diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ce29aa0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,34 @@ +command +pip_list + +__pycache__/ +*.py[cod] +*$py.class +.idea/ + +stable-diffusion/CompVis +stable-diffusion/src +stable-diffusion/latent_diffusion.egg-info + + +3DPortraitGAN_pyramid/models/*.pkl +3DPortraitGAN_pyramid/models/*.ckpt +3DPortraitGAN_pyramid/models/*.pt +3DPortraitGAN_pyramid/models/*.json +3DPortraitGAN_pyramid/out +3DPortraitGAN_pyramid/smplx_models/smpl/*.pkl +3DPortraitGAN_pyramid/training-runs + +stable-dreamfusion-3DPortrait/pretrained +stable-dreamfusion-3DPortrait/output +stable-dreamfusion-3DPortrait/smplx_models +stable-dreamfusion-3DPortrait/transfer_data + +command.py +temp.py + +*.pkl +*.pth +*.pt +*.pth.tar +./data_processing/data/J_regressor_extra.npy \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/.gitignore b/3DPortraitGAN_pyramid/.gitignore new file mode 100644 index 0000000..6041f0d --- /dev/null +++ b/3DPortraitGAN_pyramid/.gitignore @@ -0,0 +1,8 @@ +models +smplx_models +transfer_data +generate_inversion_results.py +training/dataset-ref.py +training-runs +.vscode +*.out \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/README.md b/3DPortraitGAN_pyramid/README.md new file mode 100644 index 0000000..a57b8c3 --- /dev/null +++ b/3DPortraitGAN_pyramid/README.md @@ -0,0 +1,59 @@ +# 3DPortraitGAN_pyramid Training + +**Note: Upon the acceptance of our [3DPortraitGAN](https://arxiv.org/abs/2307.14770), we plan to release our 360°PHQ dataset to facilitate reproducibility of research. We encourage you to utilize our provided pre-trained models. Stay tuned for updates! ** + + + +## Training + +```shell +cd 3DPortraitGAN_pyramid + +# stage 1 +python train.py \ + --outdir=./training-runs/stage1 --cfg=full-head \ + --data=$DATASET_PATH/360PHQ-512.zip --seg_data=$DATASET_PATH/360PHQ-512-mask.zip \ + --gpus=8 --batch=32 --gamma=5.0 --cbase=18432 --cmax=144 \ + --gamma_seg=5.0 --use_torgb_raw=1 --decoder_activation="none" \ + --bcg_reg_prob 0.2 --triplane_depth 3 --density_noise_fade_kimg 200 --density_reg 0 --back_repeat=1 \ + --gen_pose_cond=True --gpc_reg_prob=0.7 --mirror=True --data_rebalance=False --image-snap=25 --kimg=20000 \ + --neural_rendering_resolution_initial=64 \ + --pose_loss_weight=10 --input_pose_params_reg_loss_weight=5 --input_pose_params_reg_loss_kimg=200 \ + --train_g_pose_branch=True \ + --explicitly_symmetry=True \ + --metric_pose_sample_mode=G_predict + + +# stage 2 +python train.py \ + --outdir=./training-runs/stage2 --cfg=full-head \ + --data=$DATASET_PATH/360PHQ-512.zip --seg_data=$DATASET_PATH/360PHQ-512-mask.zip \ + --gpus=8 --batch=32 --gamma=5.0 --cbase=18432 --cmax=144 \ + --gamma_seg=5.0 --use_torgb_raw=1 --decoder_activation="none" \ + --bcg_reg_prob 0.2 --triplane_depth 3 --density_noise_fade_kimg 200 --density_reg 0 --back_repeat=1 \ + --gen_pose_cond=True --gpc_reg_prob=0.7 --mirror=True --data_rebalance=False --image-snap=25 --kimg=20000 \ + --neural_rendering_resolution_initial=64 \ + --pose_loss_weight=10 --input_pose_params_reg_loss_weight=5 --input_pose_params_reg_loss_kimg=200 \ + --train_g_pose_branch=False \ + --explicitly_symmetry=True \ + --metric_pose_sample_mode=D_predict \ + --resume=stage1.pkl --resume_kimg=NUM_KIMGS + +# stage 3 +python train.py \ + --outdir=./training-runs/stage3 --cfg=full-head \ + --data=$DATASET_PATH/360PHQ-512.zip --seg_data=$DATASET_PATH/360PHQ-512-mask.zip \ + --gpus=8 --batch=32 --gamma=5.0 --cbase=18432 --cmax=144 \ + --gamma_seg=5.0 --use_torgb_raw=1 --decoder_activation="none" \ + --bcg_reg_prob 0.2 --triplane_depth 3 --density_noise_fade_kimg 200 --density_reg 0 --back_repeat=1 \ + --gen_pose_cond=True --gpc_reg_prob=0.7 --mirror=True --data_rebalance=False --image-snap=25 --kimg=20000 \ + --neural_rendering_resolution_initial=64 --neural_rendering_resolution_final=128 \ + --neural_rendering_resolution_fade_kimg=1000 \ + --pose_loss_weight=10 --input_pose_params_reg_loss_weight=5 --input_pose_params_reg_loss_kimg=200 \ + --train_g_pose_branch=False \ + --explicitly_symmetry=True \ + --metric_pose_sample_mode=D_predict \ + --resume=stage2.pkl --resume_kimg=NUM_KIMGS + +``` + diff --git a/3DPortraitGAN_pyramid/calc_metrics.py b/3DPortraitGAN_pyramid/calc_metrics.py new file mode 100644 index 0000000..9df0e5b --- /dev/null +++ b/3DPortraitGAN_pyramid/calc_metrics.py @@ -0,0 +1,232 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Calculate quality metrics for previous training run or pretrained network pickle.""" + +import os +import click +import json +import tempfile +import copy +import torch + +import dnnlib +import legacy +from metrics import metric_main +from metrics import metric_utils +from torch_utils import training_stats +from torch_utils import custom_ops +from torch_utils import misc +from torch_utils.ops import conv2d_gradfix + +#---------------------------------------------------------------------------- + +def subprocess_fn(rank, args, temp_dir): + dnnlib.util.Logger(should_flush=True) + + # Init torch.distributed. + if args.num_gpus > 1: + init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init')) + if os.name == 'nt': + init_method = 'file:///' + init_file.replace('\\', '/') + torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus) + else: + init_method = f'file://{init_file}' + torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus) + + # Init torch_utils. + sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None + training_stats.init_multiprocessing(rank=rank, sync_device=sync_device) + if rank != 0 or not args.verbose: + custom_ops.verbosity = 'none' + + # Configure torch. + device = torch.device('cuda', rank) + torch.backends.cuda.matmul.allow_tf32 = False + torch.backends.cudnn.allow_tf32 = False + conv2d_gradfix.enabled = True + + # Print network summary. + G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device) + D = copy.deepcopy(args.D).eval().requires_grad_(False).to(device) if args.metric_pose_sample_mode == 'D_predict' else None + + + resample_filter = args.pose_predict_kwargs['resample_filter'] + resample_filter = torch.tensor(resample_filter, device=device).to(torch.float32) + + if rank == 0 and args.verbose: + z = torch.empty([1, G.z_dim], device=device) + c = torch.empty([1, G.c_dim], device=device) + misc.print_module_summary(G, [z, c]) + + # Calculate each metric. + for metric in args.metrics: + if rank == 0 and args.verbose: + print(f'Calculating {metric}...') + progress = metric_utils.ProgressMonitor(verbose=args.verbose) + # result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs, + # num_gpus=args.num_gpus, rank=rank, device=device, progress=progress) + + + result_dict = metric_main.calc_metric(metric=metric, + G=G, + dataset_kwargs=args.dataset_kwargs, + num_gpus=args.num_gpus, + rank=rank, + device=device, + metric_pose_sample_mode = args.metric_pose_sample_mode, + progress=progress, + identical_c_p = args.identical_c_p, + D = D, + pose_predict_kwargs = { + 'neural_rendering_resolution':args.pose_predict_kwargs['neural_rendering_resolution'], + 'blur_sigma':args.pose_predict_kwargs['blur_sigma'], + 'resample_filter':resample_filter, + 'filter_mode':args.pose_predict_kwargs['filter_mode'] + } if args.metric_pose_sample_mode == 'D_predict' else None + ) + if rank == 0: + metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl) + if rank == 0 and args.verbose: + print() + + # Done. + if rank == 0 and args.verbose: + print('Exiting...') + +#---------------------------------------------------------------------------- + +def parse_comma_separated_list(s): + if isinstance(s, list): + return s + if s is None or s.lower() == 'none' or s == '': + return [] + return s.split(',') + +#---------------------------------------------------------------------------- + +@click.command() +@click.pass_context +@click.option('network_pkl', '--network', help='Network pickle filename or URL', metavar='PATH', required=True) +@click.option('pose_predict_kwargs', '--pose_predict_kwargs', help='Network pickle filename or URL', metavar='PATH', required=True) +@click.option('--metric_pose_sample_mode', help='Type of metric_pose_sample ', metavar='STR', type=click.Choice(['D_predict', 'G_predict']), required=False, default='G_predict') +@click.option('--identical_c_p', help='Enable dataset x-flips [default: look up]', type=bool, metavar='BOOL') + +@click.option('--metrics', help='Quality metrics', metavar='[NAME|A,B,C|none]', type=parse_comma_separated_list, default='fid50k_full', show_default=True) +@click.option('--data', help='Dataset to evaluate against [default: look up]', metavar='[ZIP|DIR]') +@click.option('--seg_data', help='Dataset to evaluate against [default: look up]', metavar='[ZIP|DIR]') +@click.option('--mirror', help='Enable dataset x-flips [default: look up]', type=bool, metavar='BOOL') +@click.option('--gpus', help='Number of GPUs to use', type=int, default=1, metavar='INT', show_default=True) +@click.option('--verbose', help='Print optional information', type=bool, default=True, metavar='BOOL', show_default=True) + +def calc_metrics(ctx, network_pkl, pose_predict_kwargs,metric_pose_sample_mode,identical_c_p ,metrics, data,seg_data, mirror, gpus, verbose): + """Calculate quality metrics for previous training run or pretrained network pickle. + + Examples: + + \b + # Previous training run: look up options automatically, save result to JSONL file. + python calc_metrics.py --metrics=eqt50k_int,eqr50k \\ + --network=~/training-runs/00000-stylegan3-r-mydataset/network-snapshot-000000.pkl + + \b + # Pre-trained network pickle: specify dataset explicitly, print result to stdout. + python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq-1024x1024.zip --mirror=1 \\ + --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhq-1024x1024.pkl + + \b + Recommended metrics: + fid50k_full Frechet inception distance against the full dataset. + kid50k_full Kernel inception distance against the full dataset. + pr50k3_full Precision and recall againt the full dataset. + ppl2_wend Perceptual path length in W, endpoints, full image. + eqt50k_int Equivariance w.r.t. integer translation (EQ-T). + eqt50k_frac Equivariance w.r.t. fractional translation (EQ-T_frac). + eqr50k Equivariance w.r.t. rotation (EQ-R). + + \b + Legacy metrics: + fid50k Frechet inception distance against 50k real images. + kid50k Kernel inception distance against 50k real images. + pr50k3 Precision and recall against 50k real images. + is50k Inception score for CIFAR-10. + """ + dnnlib.util.Logger(should_flush=True) + + # Validate arguments. + args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose,metric_pose_sample_mode=metric_pose_sample_mode) + if not all(metric_main.is_valid_metric(metric) for metric in args.metrics): + ctx.fail('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics())) + if not args.num_gpus >= 1: + ctx.fail('--gpus must be at least 1') + + # Load network. + if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl): + ctx.fail('--network must point to a file or URL') + if args.verbose: + print(f'Loading network from "{network_pkl}"...') + with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f: + network_dict = legacy.load_network_pkl(f) + args.G = network_dict['G_ema'] # subclass of torch.nn.Module + args.D = network_dict['D_ema'] + + args.identical_c_p = identical_c_p + + with open(pose_predict_kwargs, 'r') as f: + args.pose_predict_kwargs = json.load(f) + + + # Initialize dataset options. + if data is not None: + #args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data) + args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.MaskLabeledDataset', + img_path=data, + seg_path = seg_data, + back_repeat =1, + use_labels=True, max_size=None, xflip=True, + data_rebalance=False,data_rebalance_idx_file=None) + elif network_dict['training_set_kwargs'] is not None: + args.dataset_kwargs = dnnlib.EasyDict(network_dict['training_set_kwargs']) + else: + ctx.fail('Could not look up dataset options; please specify --data') + + # Finalize dataset options. + args.dataset_kwargs.resolution = args.G.img_resolution + args.dataset_kwargs.use_labels =True + + + # Print dataset options. + if args.verbose: + print('Dataset options:') + print(json.dumps(args.dataset_kwargs, indent=2)) + + # Locate run dir. + args.run_dir = None + if os.path.isfile(network_pkl): + pkl_dir = os.path.dirname(network_pkl) + if os.path.isfile(os.path.join(pkl_dir, 'training_options.json')): + args.run_dir = pkl_dir + + # Launch processes. + if args.verbose: + print('Launching processes...') + torch.multiprocessing.set_start_method('spawn') + with tempfile.TemporaryDirectory() as temp_dir: + if args.num_gpus == 1: + subprocess_fn(rank=0, args=args, temp_dir=temp_dir) + else: + torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus) + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + calc_metrics() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/camera_utils.py b/3DPortraitGAN_pyramid/camera_utils.py new file mode 100644 index 0000000..4d4be88 --- /dev/null +++ b/3DPortraitGAN_pyramid/camera_utils.py @@ -0,0 +1,149 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +""" +Helper functions for constructing camera parameter matrices. Primarily used in visualization and inference scripts. +""" + +import math + +import torch +import torch.nn as nn + +from training.volumetric_rendering import math_utils + +class GaussianCameraPoseSampler: + """ + Samples pitch and yaw from a Gaussian distribution and returns a camera pose. + Camera is specified as looking at the origin. + If horizontal and vertical stddev (specified in radians) are zero, gives a + deterministic camera pose with yaw=horizontal_mean, pitch=vertical_mean. + The coordinate system is specified with y-up, z-forward, x-left. + Horizontal mean is the azimuthal angle (rotation around y axis) in radians, + vertical mean is the polar angle (angle from the y axis) in radians. + A point along the z-axis has azimuthal_angle=0, polar_angle=pi/2. + + Example: + For a camera pose looking at the origin with the camera at position [0, 0, 1]: + cam2world = GaussianCameraPoseSampler.sample(math.pi/2, math.pi/2, radius=1) + """ + + @staticmethod + def sample(horizontal_mean, vertical_mean, horizontal_stddev=0, vertical_stddev=0, radius=1, batch_size=1, device='cpu'): + h = torch.randn((batch_size, 1), device=device) * horizontal_stddev + horizontal_mean + v = torch.randn((batch_size, 1), device=device) * vertical_stddev + vertical_mean + v = torch.clamp(v, 1e-5, math.pi - 1e-5) + + theta = h + v = v / math.pi + phi = torch.arccos(1 - 2*v) + + camera_origins = torch.zeros((batch_size, 3), device=device) + + camera_origins[:, 0:1] = radius*torch.sin(phi) * torch.cos(math.pi-theta) + camera_origins[:, 2:3] = radius*torch.sin(phi) * torch.sin(math.pi-theta) + camera_origins[:, 1:2] = radius*torch.cos(phi) + + forward_vectors = math_utils.normalize_vecs(-camera_origins) + return create_cam2world_matrix(forward_vectors, camera_origins) + + +class LookAtPoseSampler: + """ + Same as GaussianCameraPoseSampler, except the + camera is specified as looking at 'lookat_position', a 3-vector. + + Example: + For a camera pose looking at the origin with the camera at position [0, 0, 1]: + cam2world = LookAtPoseSampler.sample(math.pi/2, math.pi/2, torch.tensor([0, 0, 0]), radius=1) + """ + + @staticmethod + def sample(horizontal_mean, vertical_mean, lookat_position, horizontal_stddev=0, vertical_stddev=0, radius=1, batch_size=1, device='cpu'): + h = torch.randn((batch_size, 1), device=device) * horizontal_stddev + horizontal_mean + v = torch.randn((batch_size, 1), device=device) * vertical_stddev + vertical_mean + v = torch.clamp(v, 1e-5, math.pi - 1e-5) + + theta = h + v = v / math.pi + phi = torch.arccos(1 - 2*v) + + camera_origins = torch.zeros((batch_size, 3), device=device) + + camera_origins[:, 0:1] = radius*torch.sin(phi) * torch.cos(math.pi-theta) + camera_origins[:, 2:3] = radius*torch.sin(phi) * torch.sin(math.pi-theta) + camera_origins[:, 1:2] = radius*torch.cos(phi) + + # forward_vectors = math_utils.normalize_vecs(-camera_origins) + forward_vectors = math_utils.normalize_vecs(lookat_position - camera_origins) + return create_cam2world_matrix(forward_vectors, camera_origins) + +class UniformCameraPoseSampler: + """ + Same as GaussianCameraPoseSampler, except the + pose is sampled from a uniform distribution with range +-[horizontal/vertical]_stddev. + + Example: + For a batch of random camera poses looking at the origin with yaw sampled from [-pi/2, +pi/2] radians: + + cam2worlds = UniformCameraPoseSampler.sample(math.pi/2, math.pi/2, horizontal_stddev=math.pi/2, radius=1, batch_size=16) + """ + + @staticmethod + def sample(horizontal_mean, vertical_mean, horizontal_stddev=0, vertical_stddev=0, radius=1, batch_size=1, device='cpu'): + h = (torch.rand((batch_size, 1), device=device) * 2 - 1) * horizontal_stddev + horizontal_mean + v = (torch.rand((batch_size, 1), device=device) * 2 - 1) * vertical_stddev + vertical_mean + v = torch.clamp(v, 1e-5, math.pi - 1e-5) + + theta = h + v = v / math.pi + phi = torch.arccos(1 - 2*v) + + camera_origins = torch.zeros((batch_size, 3), device=device) + + camera_origins[:, 0:1] = radius*torch.sin(phi) * torch.cos(math.pi-theta) + camera_origins[:, 2:3] = radius*torch.sin(phi) * torch.sin(math.pi-theta) + camera_origins[:, 1:2] = radius*torch.cos(phi) + + forward_vectors = math_utils.normalize_vecs(-camera_origins) + return create_cam2world_matrix(forward_vectors, camera_origins) + +def create_cam2world_matrix(forward_vector, origin): + """ + Takes in the direction the camera is pointing and the camera origin and returns a cam2world matrix. + Works on batches of forward_vectors, origins. Assumes y-axis is up and that there is no camera roll. + """ + + forward_vector = math_utils.normalize_vecs(forward_vector) + up_vector = torch.tensor([0, 1, 0], dtype=torch.float, device=origin.device).expand_as(forward_vector) + + right_vector = -math_utils.normalize_vecs(torch.cross(up_vector, forward_vector, dim=-1)) + up_vector = math_utils.normalize_vecs(torch.cross(forward_vector, right_vector, dim=-1)) + + rotation_matrix = torch.eye(4, device=origin.device).unsqueeze(0).repeat(forward_vector.shape[0], 1, 1) + rotation_matrix[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), axis=-1) + + translation_matrix = torch.eye(4, device=origin.device).unsqueeze(0).repeat(forward_vector.shape[0], 1, 1) + translation_matrix[:, :3, 3] = origin + cam2world = (translation_matrix @ rotation_matrix)[:, :, :] + assert(cam2world.shape[1:] == (4, 4)) + return cam2world + + +def FOV_to_intrinsics(fov_degrees, device='cpu'): + """ + Creates a 3x3 camera intrinsics matrix from the camera field of view, specified in degrees. + Note the intrinsics are returned as normalized by image size, rather than in pixel units. + Assumes principal point is at image center. + """ + + focal_length = float(1 / (math.tan(fov_degrees * 3.14159 / 360) * 1.414)) + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + return intrinsics \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/dataset_tool.py b/3DPortraitGAN_pyramid/dataset_tool.py new file mode 100644 index 0000000..fd5b41c --- /dev/null +++ b/3DPortraitGAN_pyramid/dataset_tool.py @@ -0,0 +1,458 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Tool for creating ZIP/PNG based datasets.""" + +import functools +import gzip +import io +import json +import os +import pickle +import re +import sys +import tarfile +import zipfile +from pathlib import Path +from typing import Callable, Optional, Tuple, Union + +import click +import numpy as np +import PIL.Image +from tqdm import tqdm + +#---------------------------------------------------------------------------- + +def error(msg): + print('Error: ' + msg) + sys.exit(1) + +#---------------------------------------------------------------------------- + +# def parse_tuple(s: str) -> Tuple[int, int]: +# '''Parse a 'M,N' or 'MxN' integer tuple. +# +# Example: +# '4x2' returns (4,2) +# '0,1' returns (0,1) +# ''' +# if m := re.match(r'^(\d+)[x,](\d+)$', s): +# return (int(m.group(1)), int(m.group(2))) +# raise ValueError(f'cannot parse tuple {s}') + +#---------------------------------------------------------------------------- + +def maybe_min(a: int, b: Optional[int]) -> int: + if b is not None: + return min(a, b) + return a + +#---------------------------------------------------------------------------- + +def file_ext(name: Union[str, Path]) -> str: + return str(name).split('.')[-1] + +#---------------------------------------------------------------------------- + +def is_image_ext(fname: Union[str, Path]) -> bool: + ext = file_ext(fname).lower() + return f'.{ext}' in PIL.Image.EXTENSION # type: ignore + +#---------------------------------------------------------------------------- + +def open_image_folder(source_dir, *, max_images: Optional[int]): + input_images = [str(f) for f in sorted(Path(source_dir).rglob('*')) if is_image_ext(f) and os.path.isfile(f)] + + # Load labels. + labels = {} + meta_fname = os.path.join(source_dir, 'dataset.json') + if os.path.isfile(meta_fname): + with open(meta_fname, 'r') as file: + labels = json.load(file)['labels'] + if labels is not None: + labels = { x[0]: x[1] for x in labels } + else: + labels = {} + + max_idx = maybe_min(len(input_images), max_images) + + def iterate_images(): + for idx, fname in enumerate(input_images): + arch_fname = os.path.relpath(fname, source_dir) + arch_fname = arch_fname.replace('\\', '/') + img = np.array(PIL.Image.open(fname)) + yield dict(img=img, label=labels.get(arch_fname)) + if idx >= max_idx-1: + break + return max_idx, iterate_images() + +#---------------------------------------------------------------------------- + +def open_image_zip(source, *, max_images: Optional[int]): + with zipfile.ZipFile(source, mode='r') as z: + input_images = [str(f) for f in sorted(z.namelist()) if is_image_ext(f)] + + # Load labels. + labels = {} + if 'dataset.json' in z.namelist(): + with z.open('dataset.json', 'r') as file: + labels = json.load(file)['labels'] + if labels is not None: + labels = { x[0]: x[1] for x in labels } + else: + labels = {} + + max_idx = maybe_min(len(input_images), max_images) + + def iterate_images(): + with zipfile.ZipFile(source, mode='r') as z: + for idx, fname in enumerate(input_images): + with z.open(fname, 'r') as file: + img = PIL.Image.open(file) # type: ignore + img = np.array(img) + yield dict(img=img, label=labels.get(fname)) + if idx >= max_idx-1: + break + return max_idx, iterate_images() + +#---------------------------------------------------------------------------- + +def open_lmdb(lmdb_dir: str, *, max_images: Optional[int]): + import cv2 # pip install opencv-python # pylint: disable=import-error + import lmdb # pip install lmdb # pylint: disable=import-error + + with lmdb.open(lmdb_dir, readonly=True, lock=False).begin(write=False) as txn: + max_idx = maybe_min(txn.stat()['entries'], max_images) + + def iterate_images(): + with lmdb.open(lmdb_dir, readonly=True, lock=False).begin(write=False) as txn: + for idx, (_key, value) in enumerate(txn.cursor()): + try: + try: + img = cv2.imdecode(np.frombuffer(value, dtype=np.uint8), 1) + if img is None: + raise IOError('cv2.imdecode failed') + img = img[:, :, ::-1] # BGR => RGB + except IOError: + img = np.array(PIL.Image.open(io.BytesIO(value))) + yield dict(img=img, label=None) + if idx >= max_idx-1: + break + except: + print(sys.exc_info()[1]) + + return max_idx, iterate_images() + +#---------------------------------------------------------------------------- + +def open_cifar10(tarball: str, *, max_images: Optional[int]): + images = [] + labels = [] + + with tarfile.open(tarball, 'r:gz') as tar: + for batch in range(1, 6): + member = tar.getmember(f'cifar-10-batches-py/data_batch_{batch}') + with tar.extractfile(member) as file: + data = pickle.load(file, encoding='latin1') + images.append(data['data'].reshape(-1, 3, 32, 32)) + labels.append(data['labels']) + + images = np.concatenate(images) + labels = np.concatenate(labels) + images = images.transpose([0, 2, 3, 1]) # NCHW -> NHWC + assert images.shape == (50000, 32, 32, 3) and images.dtype == np.uint8 + assert labels.shape == (50000,) and labels.dtype in [np.int32, np.int64] + assert np.min(images) == 0 and np.max(images) == 255 + assert np.min(labels) == 0 and np.max(labels) == 9 + + max_idx = maybe_min(len(images), max_images) + + def iterate_images(): + for idx, img in enumerate(images): + yield dict(img=img, label=int(labels[idx])) + if idx >= max_idx-1: + break + + return max_idx, iterate_images() + +#---------------------------------------------------------------------------- + +def open_mnist(images_gz: str, *, max_images: Optional[int]): + labels_gz = images_gz.replace('-images-idx3-ubyte.gz', '-labels-idx1-ubyte.gz') + assert labels_gz != images_gz + images = [] + labels = [] + + with gzip.open(images_gz, 'rb') as f: + images = np.frombuffer(f.read(), np.uint8, offset=16) + with gzip.open(labels_gz, 'rb') as f: + labels = np.frombuffer(f.read(), np.uint8, offset=8) + + images = images.reshape(-1, 28, 28) + images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0) + assert images.shape == (60000, 32, 32) and images.dtype == np.uint8 + assert labels.shape == (60000,) and labels.dtype == np.uint8 + assert np.min(images) == 0 and np.max(images) == 255 + assert np.min(labels) == 0 and np.max(labels) == 9 + + max_idx = maybe_min(len(images), max_images) + + def iterate_images(): + for idx, img in enumerate(images): + yield dict(img=img, label=int(labels[idx])) + if idx >= max_idx-1: + break + + return max_idx, iterate_images() + +#---------------------------------------------------------------------------- + +def make_transform( + transform: Optional[str], + output_width: Optional[int], + output_height: Optional[int] +) -> Callable[[np.ndarray], Optional[np.ndarray]]: + def scale(width, height, img): + w = img.shape[1] + h = img.shape[0] + if width == w and height == h: + return img + img = PIL.Image.fromarray(img) + ww = width if width is not None else w + hh = height if height is not None else h + img = img.resize((ww, hh), PIL.Image.LANCZOS) + return np.array(img) + + def center_crop(width, height, img): + crop = np.min(img.shape[:2]) + img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2] + img = PIL.Image.fromarray(img, 'RGB') + img = img.resize((width, height), PIL.Image.LANCZOS) + return np.array(img) + + def center_crop_wide(width, height, img): + ch = int(np.round(width * img.shape[0] / img.shape[1])) + if img.shape[1] < width or ch < height: + return None + + img = img[(img.shape[0] - ch) // 2 : (img.shape[0] + ch) // 2] + img = PIL.Image.fromarray(img, 'RGB') + img = img.resize((width, height), PIL.Image.LANCZOS) + img = np.array(img) + + canvas = np.zeros([width, width, 3], dtype=np.uint8) + canvas[(width - height) // 2 : (width + height) // 2, :] = img + return canvas + + if transform is None: + return functools.partial(scale, output_width, output_height) + if transform == 'center-crop': + if (output_width is None) or (output_height is None): + error ('must specify --resolution=WxH when using ' + transform + 'transform') + return functools.partial(center_crop, output_width, output_height) + if transform == 'center-crop-wide': + if (output_width is None) or (output_height is None): + error ('must specify --resolution=WxH when using ' + transform + ' transform') + return functools.partial(center_crop_wide, output_width, output_height) + assert False, 'unknown transform' + +#---------------------------------------------------------------------------- + +def open_dataset(source, *, max_images: Optional[int]): + if os.path.isdir(source): + if source.rstrip('/').endswith('_lmdb'): + return open_lmdb(source, max_images=max_images) + else: + return open_image_folder(source, max_images=max_images) + elif os.path.isfile(source): + if os.path.basename(source) == 'cifar-10-python.tar.gz': + return open_cifar10(source, max_images=max_images) + elif os.path.basename(source) == 'train-images-idx3-ubyte.gz': + return open_mnist(source, max_images=max_images) + elif file_ext(source) == 'zip': + return open_image_zip(source, max_images=max_images) + else: + assert False, 'unknown archive type' + else: + error(f'Missing input file or directory: {source}') + +#---------------------------------------------------------------------------- + +def open_dest(dest: str) -> Tuple[str, Callable[[str, Union[bytes, str]], None], Callable[[], None]]: + dest_ext = file_ext(dest) + + if dest_ext == 'zip': + if os.path.dirname(dest) != '': + os.makedirs(os.path.dirname(dest), exist_ok=True) + zf = zipfile.ZipFile(file=dest, mode='w', compression=zipfile.ZIP_STORED) + def zip_write_bytes(fname: str, data: Union[bytes, str]): + zf.writestr(fname, data) + return '', zip_write_bytes, zf.close + else: + # If the output folder already exists, check that is is + # empty. + # + # Note: creating the output directory is not strictly + # necessary as folder_write_bytes() also mkdirs, but it's better + # to give an error message earlier in case the dest folder + # somehow cannot be created. + if os.path.isdir(dest) and len(os.listdir(dest)) != 0: + error('--dest folder must be empty') + os.makedirs(dest, exist_ok=True) + + def folder_write_bytes(fname: str, data: Union[bytes, str]): + os.makedirs(os.path.dirname(fname), exist_ok=True) + with open(fname, 'wb') as fout: + if isinstance(data, str): + data = data.encode('utf8') + fout.write(data) + return dest, folder_write_bytes, lambda: None + +#---------------------------------------------------------------------------- + +@click.command() +@click.pass_context +@click.option('--source', help='Directory or archive name for input dataset', required=True, metavar='PATH') +@click.option('--dest', help='Output directory or archive name for output dataset', required=True, metavar='PATH') +@click.option('--max-images', help='Output only up to `max-images` images', type=int, default=None) +@click.option('--transform', help='Input crop/resize mode', type=click.Choice(['center-crop', 'center-crop-wide'])) +@click.option('--resolution', type=int) +def convert_dataset( + ctx: click.Context, + source: str, + dest: str, + max_images: Optional[int], + transform: Optional[str], + resolution: Optional[Tuple[int, int]] +): + """Convert an image dataset into a dataset archive usable with StyleGAN2 ADA PyTorch. + + The input dataset format is guessed from the --source argument: + + \b + --source *_lmdb/ Load LSUN dataset + --source cifar-10-python.tar.gz Load CIFAR-10 dataset + --source train-images-idx3-ubyte.gz Load MNIST dataset + --source path/ Recursively load all images from path/ + --source dataset.zip Recursively load all images from dataset.zip + + Specifying the output format and path: + + \b + --dest /path/to/dir Save output files under /path/to/dir + --dest /path/to/dataset.zip Save output files into /path/to/dataset.zip + + The output dataset format can be either an image folder or an uncompressed zip archive. + Zip archives makes it easier to move datasets around file servers and clusters, and may + offer better training performance on network file systems. + + Images within the dataset archive will be stored as uncompressed PNG. + Uncompressed PNGs can be efficiently decoded in the training loop. + + Class labels are stored in a file called 'dataset.json' that is stored at the + dataset root folder. This file has the following structure: + + \b + { + "labels": [ + ["00000/img00000000.png",6], + ["00000/img00000001.png",9], + ... repeated for every image in the dataset + ["00049/img00049999.png",1] + ] + } + + If the 'dataset.json' file cannot be found, the dataset is interpreted as + not containing class labels. + + Image scale/crop and resolution requirements: + + Output images must be square-shaped and they must all have the same power-of-two + dimensions. + + To scale arbitrary input image size to a specific width and height, use the + --resolution option. Output resolution will be either the original + input resolution (if resolution was not specified) or the one specified with + --resolution option. + + Use the --transform=center-crop or --transform=center-crop-wide options to apply a + center crop transform on the input image. These options should be used with the + --resolution option. For example: + + \b + python dataset_tool.py --source LSUN/raw/cat_lmdb --dest /tmp/lsun_cat \\ + --transform=center-crop-wide --resolution=512x384 + """ + + PIL.Image.init() # type: ignore + + if dest == '': + ctx.fail('--dest output filename or directory must not be an empty string') + + num_files, input_iter = open_dataset(source, max_images=max_images) + archive_root_dir, save_bytes, close_dest = open_dest(dest) + + resolution = (resolution,resolution) + transform_image = make_transform(transform, *resolution) + + dataset_attrs = None + + labels = [] + for idx, image in tqdm(enumerate(input_iter), total=num_files): + idx_str = f'{idx:010d}' + archive_fname = f'{idx // 1000:010d}/{idx_str}.png' + + # Apply crop and resize. + img = transform_image(image['img']) + + # Transform may drop images. + if img is None: + continue + + # Error check to require uniform image attributes across + # the whole dataset. + channels = img.shape[2] if img.ndim == 3 else 1 + cur_image_attrs = { + 'width': img.shape[1], + 'height': img.shape[0], + 'channels': channels + } + if dataset_attrs is None: + dataset_attrs = cur_image_attrs + width = dataset_attrs['width'] + height = dataset_attrs['height'] + if width != height: + error(f'Image dimensions after scale and crop are required to be square. Got {width}x{height}') + if dataset_attrs['channels'] not in [1, 3, 4]: + error('Input images must be stored as RGB or grayscale') + if width != 2 ** int(np.floor(np.log2(width))): + error('Image width/height after scale and crop are required to be power-of-two') + elif dataset_attrs != cur_image_attrs: + err = [f' dataset {k}/cur image {k}: {dataset_attrs[k]}/{cur_image_attrs[k]}' for k in dataset_attrs.keys()] # pylint: disable=unsubscriptable-object + error(f'Image {archive_fname} attributes must be equal across all images of the dataset. Got:\n' + '\n'.join(err)) + + # Save the image as an uncompressed PNG. + img = PIL.Image.fromarray(img, { 1: 'L', 3: 'RGB', 4: 'RGBA'}[channels]) + if channels == 4: img = img.convert('RGB') + image_bits = io.BytesIO() + img.save(image_bits, format='png', compress_level=0, optimize=False) + save_bytes(os.path.join(archive_root_dir, archive_fname), image_bits.getbuffer()) + labels.append([archive_fname, image['label']] if image['label'] is not None else None) + + metadata = { + 'labels': labels if all(x is not None for x in labels) else None + } + save_bytes(os.path.join(archive_root_dir, 'dataset.json'), json.dumps(metadata)) + close_dest() + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + convert_dataset() # pylint: disable=no-value-for-parameter diff --git a/3DPortraitGAN_pyramid/dnnlib/__init__.py b/3DPortraitGAN_pyramid/dnnlib/__init__.py new file mode 100644 index 0000000..dd91ed1 --- /dev/null +++ b/3DPortraitGAN_pyramid/dnnlib/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +from .util import EasyDict, make_cache_dir_path diff --git a/3DPortraitGAN_pyramid/dnnlib/util.py b/3DPortraitGAN_pyramid/dnnlib/util.py new file mode 100644 index 0000000..80b67c4 --- /dev/null +++ b/3DPortraitGAN_pyramid/dnnlib/util.py @@ -0,0 +1,493 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Miscellaneous utility classes and functions.""" + +import ctypes +import fnmatch +import importlib +import inspect +import numpy as np +import os +import shutil +import sys +import types +import io +import pickle +import re +import requests +import html +import hashlib +import glob +import tempfile +import urllib +import urllib.request +import uuid + +from distutils.util import strtobool +from typing import Any, List, Tuple, Union + + +# Util classes +# ------------------------------------------------------------------------------------------ + + +class EasyDict(dict): + """Convenience class that behaves like a dict but allows access with the attribute syntax.""" + + def __getattr__(self, name: str) -> Any: + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __setattr__(self, name: str, value: Any) -> None: + self[name] = value + + def __delattr__(self, name: str) -> None: + del self[name] + + +class Logger(object): + """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file.""" + + def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True): + self.file = None + + if file_name is not None: + self.file = open(file_name, file_mode) + + self.should_flush = should_flush + self.stdout = sys.stdout + self.stderr = sys.stderr + + sys.stdout = self + sys.stderr = self + + def __enter__(self) -> "Logger": + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + self.close() + + def write(self, text: Union[str, bytes]) -> None: + """Write text to stdout (and a file) and optionally flush.""" + if isinstance(text, bytes): + text = text.decode() + if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash + return + + if self.file is not None: + self.file.write(text) + + self.stdout.write(text) + + if self.should_flush: + self.flush() + + def flush(self) -> None: + """Flush written text to both stdout and a file, if open.""" + if self.file is not None: + self.file.flush() + + self.stdout.flush() + + def close(self) -> None: + """Flush, close possible files, and remove stdout/stderr mirroring.""" + self.flush() + + # if using multiple loggers, prevent closing in wrong order + if sys.stdout is self: + sys.stdout = self.stdout + if sys.stderr is self: + sys.stderr = self.stderr + + if self.file is not None: + self.file.close() + self.file = None + + +# Cache directories +# ------------------------------------------------------------------------------------------ + +_dnnlib_cache_dir = None + +def set_cache_dir(path: str) -> None: + global _dnnlib_cache_dir + _dnnlib_cache_dir = path + +def make_cache_dir_path(*paths: str) -> str: + if _dnnlib_cache_dir is not None: + return os.path.join(_dnnlib_cache_dir, *paths) + if 'DNNLIB_CACHE_DIR' in os.environ: + return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths) + if 'HOME' in os.environ: + return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths) + if 'USERPROFILE' in os.environ: + return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths) + return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths) + +# Small util functions +# ------------------------------------------------------------------------------------------ + + +def format_time(seconds: Union[int, float]) -> str: + """Convert the seconds to human readable string with days, hours, minutes and seconds.""" + s = int(np.rint(seconds)) + + if s < 60: + return "{0}s".format(s) + elif s < 60 * 60: + return "{0}m {1:02}s".format(s // 60, s % 60) + elif s < 24 * 60 * 60: + return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) + else: + return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) + + +def format_time_brief(seconds: Union[int, float]) -> str: + """Convert the seconds to human readable string with days, hours, minutes and seconds.""" + s = int(np.rint(seconds)) + + if s < 60: + return "{0}s".format(s) + elif s < 60 * 60: + return "{0}m {1:02}s".format(s // 60, s % 60) + elif s < 24 * 60 * 60: + return "{0}h {1:02}m".format(s // (60 * 60), (s // 60) % 60) + else: + return "{0}d {1:02}h".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24) + + +def ask_yes_no(question: str) -> bool: + """Ask the user the question until the user inputs a valid answer.""" + while True: + try: + print("{0} [y/n]".format(question)) + return strtobool(input().lower()) + except ValueError: + pass + + +def tuple_product(t: Tuple) -> Any: + """Calculate the product of the tuple elements.""" + result = 1 + + for v in t: + result *= v + + return result + + +_str_to_ctype = { + "uint8": ctypes.c_ubyte, + "uint16": ctypes.c_uint16, + "uint32": ctypes.c_uint32, + "uint64": ctypes.c_uint64, + "int8": ctypes.c_byte, + "int16": ctypes.c_int16, + "int32": ctypes.c_int32, + "int64": ctypes.c_int64, + "float32": ctypes.c_float, + "float64": ctypes.c_double +} + + +def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]: + """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.""" + type_str = None + + if isinstance(type_obj, str): + type_str = type_obj + elif hasattr(type_obj, "__name__"): + type_str = type_obj.__name__ + elif hasattr(type_obj, "name"): + type_str = type_obj.name + else: + raise RuntimeError("Cannot infer type name from input") + + assert type_str in _str_to_ctype.keys() + + my_dtype = np.dtype(type_str) + my_ctype = _str_to_ctype[type_str] + + assert my_dtype.itemsize == ctypes.sizeof(my_ctype) + + return my_dtype, my_ctype + + +def is_pickleable(obj: Any) -> bool: + try: + with io.BytesIO() as stream: + pickle.dump(obj, stream) + return True + except: + return False + + +# Functionality to import modules/objects by name, and call functions by name +# ------------------------------------------------------------------------------------------ + +def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]: + """Searches for the underlying module behind the name to some python object. + Returns the module and the object name (original name with module part removed).""" + + # allow convenience shorthands, substitute them by full names + obj_name = re.sub("^np.", "numpy.", obj_name) + obj_name = re.sub("^tf.", "tensorflow.", obj_name) + + # list alternatives for (module_name, local_obj_name) + parts = obj_name.split(".") + name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)] + + # try each alternative in turn + for module_name, local_obj_name in name_pairs: + try: + module = importlib.import_module(module_name) # may raise ImportError + get_obj_from_module(module, local_obj_name) # may raise AttributeError + return module, local_obj_name + except: + pass + + # maybe some of the modules themselves contain errors? + for module_name, _local_obj_name in name_pairs: + try: + importlib.import_module(module_name) # may raise ImportError + except ImportError: + if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"): + raise + + # maybe the requested attribute is missing? + for module_name, local_obj_name in name_pairs: + try: + module = importlib.import_module(module_name) # may raise ImportError + get_obj_from_module(module, local_obj_name) # may raise AttributeError + except ImportError: + pass + + # we are out of luck, but we have no idea why + raise ImportError(obj_name) + + +def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any: + """Traverses the object name and returns the last (rightmost) python object.""" + if obj_name == '': + return module + obj = module + for part in obj_name.split("."): + obj = getattr(obj, part) + return obj + + +def get_obj_by_name(name: str) -> Any: + """Finds the python object with the given name.""" + module, obj_name = get_module_from_obj_name(name) + return get_obj_from_module(module, obj_name) + + +def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any: + """Finds the python object with the given name and calls it as a function.""" + assert func_name is not None + func_obj = get_obj_by_name(func_name) + assert callable(func_obj) + return func_obj(*args, **kwargs) + + +def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any: + """Finds the python class with the given name and constructs it with the given arguments.""" + return call_func_by_name(*args, func_name=class_name, **kwargs) + + +def get_module_dir_by_obj_name(obj_name: str) -> str: + """Get the directory path of the module containing the given object name.""" + module, _ = get_module_from_obj_name(obj_name) + return os.path.dirname(inspect.getfile(module)) + + +def is_top_level_function(obj: Any) -> bool: + """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'.""" + return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__ + + +def get_top_level_function_name(obj: Any) -> str: + """Return the fully-qualified name of a top-level function.""" + assert is_top_level_function(obj) + module = obj.__module__ + if module == '__main__': + module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0] + return module + "." + obj.__name__ + + +# File system helpers +# ------------------------------------------------------------------------------------------ + +def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]: + """List all files recursively in a given directory while ignoring given file and directory names. + Returns list of tuples containing both absolute and relative paths.""" + assert os.path.isdir(dir_path) + base_name = os.path.basename(os.path.normpath(dir_path)) + + if ignores is None: + ignores = [] + + result = [] + + for root, dirs, files in os.walk(dir_path, topdown=True): + for ignore_ in ignores: + dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)] + + # dirs need to be edited in-place + for d in dirs_to_remove: + dirs.remove(d) + + files = [f for f in files if not fnmatch.fnmatch(f, ignore_)] + + absolute_paths = [os.path.join(root, f) for f in files] + relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths] + + if add_base_to_relative: + relative_paths = [os.path.join(base_name, p) for p in relative_paths] + + assert len(absolute_paths) == len(relative_paths) + result += zip(absolute_paths, relative_paths) + + return result + + +def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None: + """Takes in a list of tuples of (src, dst) paths and copies files. + Will create all necessary directories.""" + for file in files: + target_dir_name = os.path.dirname(file[1]) + + # will create all intermediate-level directories + if not os.path.exists(target_dir_name): + os.makedirs(target_dir_name) + + shutil.copyfile(file[0], file[1]) + + +# URL helpers +# ------------------------------------------------------------------------------------------ + +def is_url(obj: Any, allow_file_urls: bool = False) -> bool: + """Determine whether the given object is a valid URL string.""" + if not isinstance(obj, str) or not "://" in obj: + return False + if allow_file_urls and obj.startswith('file://'): + return True + try: + res = requests.compat.urlparse(obj) + if not res.scheme or not res.netloc or not "." in res.netloc: + return False + res = requests.compat.urlparse(requests.compat.urljoin(obj, "/")) + if not res.scheme or not res.netloc or not "." in res.netloc: + return False + except: + return False + return True + + +def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any: + """Download the given URL and return a binary-mode file object to access the data.""" + assert num_attempts >= 1 + assert not (return_filename and (not cache)) + + # Doesn't look like an URL scheme so interpret it as a local filename. + if not re.match('^[a-z]+://', url): + return url if return_filename else open(url, "rb") + + # Handle file URLs. This code handles unusual file:// patterns that + # arise on Windows: + # + # file:///c:/foo.txt + # + # which would translate to a local '/c:/foo.txt' filename that's + # invalid. Drop the forward slash for such pathnames. + # + # If you touch this code path, you should test it on both Linux and + # Windows. + # + # Some internet resources suggest using urllib.request.url2pathname() but + # but that converts forward slashes to backslashes and this causes + # its own set of problems. + if url.startswith('file://'): + filename = urllib.parse.urlparse(url).path + if re.match(r'^/[a-zA-Z]:', filename): + filename = filename[1:] + return filename if return_filename else open(filename, "rb") + + assert is_url(url) + + # Lookup from cache. + if cache_dir is None: + cache_dir = make_cache_dir_path('downloads') + + url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest() + if cache: + cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*")) + if len(cache_files) == 1: + filename = cache_files[0] + return filename if return_filename else open(filename, "rb") + + # Download. + url_name = None + url_data = None + with requests.Session() as session: + if verbose: + print("Downloading %s ..." % url, end="", flush=True) + for attempts_left in reversed(range(num_attempts)): + try: + with session.get(url) as res: + res.raise_for_status() + if len(res.content) == 0: + raise IOError("No data received") + + if len(res.content) < 8192: + content_str = res.content.decode("utf-8") + if "download_warning" in res.headers.get("Set-Cookie", ""): + links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link] + if len(links) == 1: + url = requests.compat.urljoin(url, links[0]) + raise IOError("Google Drive virus checker nag") + if "Google Drive - Quota exceeded" in content_str: + raise IOError("Google Drive download quota exceeded -- please try again later") + + match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", "")) + url_name = match[1] if match else url + url_data = res.content + if verbose: + print(" done") + break + except KeyboardInterrupt: + raise + except: + if not attempts_left: + if verbose: + print(" failed") + raise + if verbose: + print(".", end="", flush=True) + + # Save to cache. + if cache: + safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name) + cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name) + temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name) + os.makedirs(cache_dir, exist_ok=True) + with open(temp_file, "wb") as f: + f.write(url_data) + os.replace(temp_file, cache_file) # atomic + if return_filename: + return cache_file + + # Return data as file object. + assert not return_filename + return io.BytesIO(url_data) diff --git a/3DPortraitGAN_pyramid/environment.yml b/3DPortraitGAN_pyramid/environment.yml new file mode 100644 index 0000000..bd2ed07 --- /dev/null +++ b/3DPortraitGAN_pyramid/environment.yml @@ -0,0 +1,38 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +name: 3DPortraitGAN +channels: + - pytorch + - nvidia +dependencies: + - python=3.9.16 + - pip + - numpy>=1.20 + - click>=8.0 + - pillow=8.3.1 + - scipy=1.7.1 + - requests=2.26.0 + - tqdm=4.62.2 + - ninja=1.10.2 + - matplotlib=3.4.2 + - imageio=2.9.0 + - pip: + - imgui==1.3.0 + - glfw==2.2.0 + - pyopengl==3.1.5 + - imageio-ffmpeg==0.4.3 + - pyspng + - psutil + - mrcfile + - tensorboard + - smplx==0.1.28 + - trimesh==3.21.4 + - chumpy==0.70 \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/gen_quality_improve_data_from_triplane.py b/3DPortraitGAN_pyramid/gen_quality_improve_data_from_triplane.py new file mode 100644 index 0000000..7410cb5 --- /dev/null +++ b/3DPortraitGAN_pyramid/gen_quality_improve_data_from_triplane.py @@ -0,0 +1,331 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Generate lerp videos using pretrained network pickle.""" + +import os +import re +from typing import List, Optional, Tuple, Union + +import click +import dnnlib +import imageio +import numpy as np +import scipy.interpolate +import torch +from tqdm import tqdm +import mrcfile +import json +import legacy + +from camera_utils import LookAtPoseSampler,FOV_to_intrinsics +from torch_utils import misc +import glob +import PIL + +#---------------------------------------------------------------------------- + +def layout_grid(img, grid_w=None, grid_h=1, float_to_uint8=True, chw_to_hwc=True, to_numpy=True): + batch_size, channels, img_h, img_w = img.shape + if grid_w is None: + grid_w = batch_size // grid_h + assert batch_size == grid_w * grid_h + if float_to_uint8: + img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8) + img = img.reshape(grid_h, grid_w, channels, img_h, img_w) + img = img.permute(2, 0, 3, 1, 4) + img = img.reshape(channels, grid_h * img_h, grid_w * img_w) + if chw_to_hwc: + img = img.permute(1, 2, 0) + if to_numpy: + img = img.cpu().numpy() + return img + +def create_samples(N=256, voxel_origin=[0, 0, 0], cube_length=2.0): + # NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle + voxel_origin = np.array(voxel_origin) - cube_length/2 + voxel_size = cube_length / (N - 1) + + overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor()) + samples = torch.zeros(N ** 3, 3) + + # transform first 3 columns + # to be the x, y, z index + samples[:, 2] = overall_index % N + samples[:, 1] = (overall_index.float() / N) % N + samples[:, 0] = ((overall_index.float() / N) / N) % N + + # transform first 3 columns + # to be the x, y, z coordinate + samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2] + samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1] + samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0] + + num_samples = N ** 3 + + return samples.unsqueeze(0), voxel_origin, voxel_size + +#---------------------------------------------------------------------------- + +def gen_interp_video(G, mp4: str, trigrid=None,ws=None, shuffle_seed=None, w_frames=60*4, kind='cubic', + grid_dims=(1,1), num_keyframes=None, wraps=2, psi=1, truncation_cutoff=14, + image_mode='image', gen_shapes=False, device=torch.device('cuda'), large_pose= False, + **video_kwargs): + grid_w = grid_dims[0] + grid_h = grid_dims[1] + + num_keyframes = 1 + + + camera_lookat_point = torch.tensor([0, 0.0649, 0], device=device) + + cam2world_pose = LookAtPoseSampler.sample(np.pi/2, np.pi/2, camera_lookat_point, radius=2.7, device=device) + focal_length = 6.5104166 + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + c = c.repeat(len(ws), 1) + + p = torch.zeros([len(ws), 6], device=device) + + ws = ws.reshape(grid_h, grid_w, num_keyframes, *ws.shape[1:]) + + # Interpolation. + grid = [] + for yi in range(grid_h): + row = [] + for xi in range(grid_w): + x = np.arange(-num_keyframes * wraps, num_keyframes * (wraps + 1)) + y = np.tile(ws[yi][xi].cpu().numpy(), [wraps * 2 + 1, 1, 1]) + interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=0) + row.append(interp) + grid.append(row) + + # Render video. + video_out = imageio.get_writer(mp4, mode='I', fps=30, codec='libx264', **video_kwargs) + + + all_poses = [] + + if large_pose: + image_row = [] + + + for frame_idx in tqdm(range(num_keyframes * w_frames)): + imgs = [] + for yi in range(grid_h): + for xi in range(grid_w): + if large_pose: + # 0 - 2pi + cam2world_pose = LookAtPoseSampler.sample(np.pi / 2 + (frame_idx / w_frames) * 2 * np.pi, + np.pi / 2, + camera_lookat_point, radius=2.7, device=device) + else: + pitch_range = 0.25 + yaw_range = 0.35 + cam2world_pose = LookAtPoseSampler.sample(np.pi/2 + yaw_range * np.sin(2 * np.pi * frame_idx / (num_keyframes * w_frames)), + np.pi/2 -0.05 + pitch_range * np.cos(2 * np.pi * frame_idx / (num_keyframes * w_frames)), + camera_lookat_point, radius=2.7, device=device) + all_poses.append(cam2world_pose.squeeze().cpu().numpy()) + focal_length = 6.5104166 + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + interp = grid[yi][xi] + w = torch.from_numpy(interp(frame_idx / w_frames)).to(device) + + img = G.render_planes(ws=w.unsqueeze(0), planes=trigrid, c=c[0:1], noise_mode='const', neural_rendering_resolution=512,chunk = 4096)[image_mode][0] + + if image_mode == 'image_depth': + img = -img + img = (img - img.min()) / (img.max() - img.min()) * 2 - 1 + + imgs.append(img) + if large_pose and frame_idx % int(num_keyframes * w_frames//8) == 0: + image_row.append((img.permute(1, 2, 0) * 127.5 + 128).clamp(0, 255).to(torch.uint8)) + + video_out.append_data(layout_grid(torch.stack(imgs), grid_w=grid_w, grid_h=grid_h)) + video_out.close() + all_poses = np.stack(all_poses) + + if large_pose: + import PIL + image_row = torch.cat(image_row, 1).cpu().numpy() + PIL.Image.fromarray(image_row.astype(np.uint8)).save(mp4.replace('.mp4', '_large_pose.png')) + + + if gen_shapes: + print(all_poses.shape) + with open(mp4.replace('.mp4', '_trajectory.npy'), 'wb') as f: + np.save(f, all_poses) + +#---------------------------------------------------------------------------- + +def parse_range(s: Union[str, List[int]]) -> List[int]: + '''Parse a comma separated list of numbers or ranges and return a list of ints. + + Example: '1,2,5-10' returns [1, 2, 5, 6, 7] + ''' + if isinstance(s, list): return s + ranges = [] + range_re = re.compile(r'^(\d+)-(\d+)$') + for p in s.split(','): + if m := range_re.match(p): + ranges.extend(range(int(m.group(1)), int(m.group(2))+1)) + else: + ranges.append(int(p)) + return ranges + +#---------------------------------------------------------------------------- + +def parse_tuple(s: Union[str, Tuple[int,int]]) -> Tuple[int, int]: + '''Parse a 'M,N' or 'MxN' integer tuple. + + Example: + '4x2' returns (4,2) + '0,1' returns (0,1) + ''' + if isinstance(s, tuple): return s + if m := re.match(r'^(\d+)[x,](\d+)$', s): + return (int(m.group(1)), int(m.group(2))) + raise ValueError(f'cannot parse tuple {s}') + +#---------------------------------------------------------------------------- + +@click.command() +@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) +@click.option('--data_dir', help='Network pickle filename', required=True) +@click.option('--shuffle-seed', type=int, help='Random seed to use for shuffling seed order', default=None) +@click.option('--grid', type=parse_tuple, help='Grid width/height, e.g. \'4x3\' (default: 1x1)', default=(1,1)) +@click.option('--num-keyframes', type=int, help='Number of seeds to interpolate through. If not specified, determine based on the length of the seeds array given by --seeds.', default=None) +@click.option('--w-frames', type=int, help='Number of frames to interpolate between latents', default=120) +@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) +@click.option('--trunc-cutoff', 'truncation_cutoff', type=int, help='Truncation cutoff', default=14, show_default=True) +@click.option('--image_mode', help='Image mode', type=click.Choice(['image_depth', 'image_raw']), required=False, metavar='STR', default='image_raw', show_default=True) +@click.option('--sample_mult', 'sampling_multiplier', type=float, help='Multiplier for depth sampling in volume rendering', default=2, show_default=True) +@click.option('--nrr', type=int, help='Neural rendering resolution override', default=None, show_default=True) + +def generate_images( + network_pkl: str, + data_dir: str, + shuffle_seed: Optional[int], + truncation_psi: float, + truncation_cutoff: int, + grid: Tuple[int,int], + num_keyframes: Optional[int], + w_frames: int, + image_mode: str, + sampling_multiplier: float, + nrr: Optional[int], +): + print('Loading networks from "%s"...' % network_pkl) + device = torch.device('cuda') + with dnnlib.util.open_url(network_pkl) as f: + G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore + + G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution'] * sampling_multiplier) + G.rendering_kwargs['depth_resolution_importance'] = int( + G.rendering_kwargs['depth_resolution_importance'] * sampling_multiplier) + + G.rendering_kwargs['ray_start'] = 2.35 + + + + print("Reloading Modules!") + from training.neural_renderer import TriPlaneGenerator + G_new = TriPlaneGenerator(*G.init_args, **G.init_kwargs).eval().requires_grad_(False).to(device) + misc.copy_params_and_buffers(G, G_new, require_all=False) + G_new.neural_rendering_resolution = G.neural_rendering_resolution + G_new.rendering_kwargs = G.rendering_kwargs + G = G_new + + G.set_batch_size(1) + + res_dir = data_dir + + outdir = os.path.join(res_dir, 'data') + os.makedirs(outdir, exist_ok=True) + if os.path.exists(os.path.join(outdir, 'camera_info.json')): + print('Camera info already exists, skipping generation.') + return + + if nrr is not None: G.neural_rendering_resolution = nrr + + if truncation_cutoff == 0: + truncation_psi = 1.0 # truncation cutoff of 0 means no truncation anyways + if truncation_psi == 1.0: + truncation_cutoff = 14 # no truncation so doesn't matter where we cutoff + + ckpt_path = os.path.join(res_dir, 'checkpoints/df.pth') + if not os.path.exists(ckpt_path): + print('No checkpoints found, skipping generation.') + return + + print('Loading checkpoints from "%s"...' % ckpt_path) + ckpt = torch.load(ckpt_path, map_location=lambda storage, loc: storage)['model'] + trigrid = { + 8: ckpt['trigrids_8'].to(device), + 16: ckpt['trigrids_16'].to(device), + 32: ckpt['trigrids_32'].to(device), + 64: ckpt['trigrids_64'].to(device), + 128: ckpt['trigrids_128'].to(device), + 256: ckpt['trigrids_256'].to(device), + 512: ckpt['trigrids_512'].to(device), + } + ws = ckpt['ws'].to(device) + + intrinsics = FOV_to_intrinsics(12.447863, device=device) + + cam_pivot = torch.tensor([0, 0.0649, 0], device=device) + cam_radius = G.rendering_kwargs.get('avg_camera_radius', 2.7) + + camera_info = {} + + # sample angle_y from -pi/2 to pi/2 uniformly, 30 steps + # sample angle_p from pi/2 - 15/180*pi to pi/2 + 30/180*pi + sample_idx = 0 + step = 8 + angle_ys = np.linspace(0, np.pi * 2, step) + angle_ps = [np.pi / 2 - 30 / 180 * np.pi, np.pi / 2, np.pi / 2 + 30 / 180 * np.pi] + # for angle_y, angle_p in [(-np.pi / 2, np.pi / 2), (0, np.pi / 2), (np.pi / 2, np.pi / 2)]: + for i in tqdm(range(step - 1)): + + angle_y = angle_ys[i] + for j in range(3): + angle_p = angle_ps[j] + + angle_p = angle_p + np.random.uniform(-np.pi / 180 * 5, np.pi / 180 * 5) + + cam2world_pose = LookAtPoseSampler.sample(angle_y, angle_p, cam_pivot, radius=cam_radius, device=device) + + camera_params = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + img = G.render_planes(ws=ws, planes=trigrid, c=camera_params, noise_mode='const', + neural_rendering_resolution=512, chunk=4096)[image_mode] + + img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) + PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/{sample_idx:04d}.png') + + camera_info[f'{sample_idx:04d}.png'] = camera_params.cpu().numpy().tolist() + + sample_idx += 1 + + with open(os.path.join(outdir, 'camera_info.json'), 'w') as f: + json.dump(camera_info, f) + + + + + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + generate_images() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/gen_samples.py b/3DPortraitGAN_pyramid/gen_samples.py new file mode 100644 index 0000000..f0ba73e --- /dev/null +++ b/3DPortraitGAN_pyramid/gen_samples.py @@ -0,0 +1,253 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Generate images and shapes using pretrained network pickle.""" + +import os +import re +from typing import List, Optional, Tuple, Union + +import click +import dnnlib +import numpy as np +import PIL.Image +import torch +from tqdm import tqdm +import mrcfile + + +import legacy +from camera_utils import LookAtPoseSampler, FOV_to_intrinsics +from torch_utils import misc +from training.smpl_triplane import TriPlaneGenerator + + +#---------------------------------------------------------------------------- + +def parse_range(s: Union[str, List]) -> List[int]: + '''Parse a comma separated list of numbers or ranges and return a list of ints. + + Example: '1,2,5-10' returns [1, 2, 5, 6, 7] + ''' + if isinstance(s, list): return s + ranges = [] + range_re = re.compile(r'^(\d+)-(\d+)$') + for p in s.split(','): + if m := range_re.match(p): + ranges.extend(range(int(m.group(1)), int(m.group(2))+1)) + else: + ranges.append(int(p)) + return ranges + +#---------------------------------------------------------------------------- + +def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]: + '''Parse a floating point 2-vector of syntax 'a,b'. + + Example: + '0,1' returns (0,1) + ''' + if isinstance(s, tuple): return s + parts = s.split(',') + if len(parts) == 2: + return (float(parts[0]), float(parts[1])) + raise ValueError(f'cannot parse 2-vector {s}') + +#---------------------------------------------------------------------------- + +def make_transform(translate: Tuple[float,float], angle: float): + m = np.eye(3) + s = np.sin(angle/360.0*np.pi*2) + c = np.cos(angle/360.0*np.pi*2) + m[0][0] = c + m[0][1] = s + m[0][2] = translate[0] + m[1][0] = -s + m[1][1] = c + m[1][2] = translate[1] + return m + +#---------------------------------------------------------------------------- + +def create_samples(N=256, voxel_origin=[0, 0, 0], cube_length=2.0): + # NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle + voxel_origin = np.array(voxel_origin) - cube_length/2 + voxel_size = cube_length / (N - 1) + + overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor()) + samples = torch.zeros(N ** 3, 3) + + # transform first 3 columns + # to be the x, y, z index + samples[:, 2] = overall_index % N + samples[:, 1] = (overall_index.float() / N) % N + samples[:, 0] = ((overall_index.float() / N) / N) % N + + # transform first 3 columns + # to be the x, y, z coordinate + samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2] + samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1] + samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0] + + num_samples = N ** 3 + + return samples.unsqueeze(0), voxel_origin, voxel_size + +#---------------------------------------------------------------------------- + +@click.command() +@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) +@click.option('--seeds_num', type=int, help='List of random seeds (e.g., \'0,1,4-6\')', required=False) +@click.option('--seeds', type=parse_range, help='List of random seeds', required=False, metavar='LIST', default=[]) + +@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) +@click.option('--trunc-cutoff', 'truncation_cutoff', type=int, help='Truncation cutoff', default=14, show_default=True) +@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)') +@click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR') +@click.option('--shapes', help='Export shapes as .mrc files viewable in ChimeraX', type=bool, required=False, metavar='BOOL', default=False, show_default=True) +@click.option('--images', help='Export shapes as .mrc files viewable in ChimeraX', type=bool, required=False, metavar='BOOL', default=True, show_default=True) + +@click.option('--shape-res', help='', type=int, required=False, metavar='int', default=512, show_default=True) +@click.option('--fov-deg', help='Field of View of camera in degrees', type=int, required=False, metavar='float', default=12.447863, show_default=True) +@click.option('--shape-format', help='Shape Format', type=click.Choice(['.mrc', '.ply']), default='.mrc') +@click.option('--reload_modules', help='Overload persistent modules?', type=bool, required=False, metavar='BOOL', default=False, show_default=True) +def generate_images( + network_pkl: str, + seeds_num: int, + seeds: List[int], + truncation_psi: float, + truncation_cutoff: int, + outdir: str, + shapes: bool, + images: bool, + shape_res: int, + fov_deg: float, + shape_format: str, + class_idx: Optional[int], + reload_modules: bool, +): + """Generate images using pretrained network pickle. + + Examples: + + \b + # Generate an image using pre-trained FFHQ model. + python gen_samples.py --outdir=output --trunc=0.7 --seeds=0-5 --shapes=True\\ + --network=ffhq-rebalanced-128.pkl + """ + import random + if seeds == []: + seeds = random.sample(range(1000000), seeds_num) + print('Loading networks from "%s"...' % network_pkl) + device = torch.device('cuda') + with dnnlib.util.open_url(network_pkl) as f: + G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore + + G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution'] * 4) + G.rendering_kwargs['depth_resolution_importance'] = int( + G.rendering_kwargs['depth_resolution_importance'] * 4) + + G.rendering_kwargs['ray_start'] = 2.35 + + + # Specify reload_modules=True if you want code modifications to take effect; otherwise uses pickled code + if reload_modules: + print("Reloading Modules!") + G_new = TriPlaneGenerator(*G.init_args, **G.init_kwargs).eval().requires_grad_(False).to(device) + misc.copy_params_and_buffers(G, G_new, require_all=True) + G_new.neural_rendering_resolution = G.neural_rendering_resolution + G_new.rendering_kwargs = G.rendering_kwargs + G = G_new + + os.makedirs(outdir, exist_ok=True) + + cam2world_pose = LookAtPoseSampler.sample(np.pi/2, np.pi/2, torch.tensor([0, 0.0649, 0], device=device), radius=2.7, device=device) + intrinsics = FOV_to_intrinsics(fov_deg, device=device) + + cond_p = torch.zeros([1, 6], device=device) + cam_pivot = torch.tensor([0, 0.0649, 0], device=device) + cam_radius = G.rendering_kwargs.get('avg_camera_radius', 2.7) + conditioning_cam2world_pose = LookAtPoseSampler.sample(np.pi / 2, np.pi / 2, cam_pivot, + radius=cam_radius, device=device) + conditioning_params = torch.cat([conditioning_cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + # Generate images. + for seed_idx, seed in enumerate(seeds): + print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds))) + z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device) + + if images: + imgs = [] + #angle_p = -0.2 + for angle_y, angle_p in [(-np.pi / 2, np.pi / 2), (0, np.pi / 2), (np.pi / 2, np.pi / 2)]: + + cam2world_pose = LookAtPoseSampler.sample(angle_y, angle_p, cam_pivot, radius=cam_radius, device=device) + + camera_params = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + + + ws = G.mapping(z, conditioning_params,cond_p, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff) + img = G.synthesis(ws, c=camera_params, noise_mode='const', apply_def=False, pose_params=None)['image'] + + img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) + imgs.append(img) + + img = torch.cat(imgs, dim=2) + + PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png') + + if shapes: + # extract a shape.mrc with marching cubes. You can view the .mrc file using ChimeraX from UCSF. + max_batch=1000000 + + samples, voxel_origin, voxel_size = create_samples(N=shape_res, voxel_origin=[0, 0, 0], cube_length=G.rendering_kwargs['box_warp'] * 0.8)#.reshape(1, -1, 3) + samples = samples.to(z.device) + sigmas = torch.zeros((samples.shape[0], samples.shape[1], 1), device=z.device) + transformed_ray_directions_expanded = torch.zeros((samples.shape[0], max_batch, 3), device=z.device) + transformed_ray_directions_expanded[..., -1] = -1 + + head = 0 + with tqdm(total = samples.shape[1]) as pbar: + with torch.no_grad(): + while head < samples.shape[1]: + torch.manual_seed(0) + sigma = G.sample(samples[:, head:head+max_batch], transformed_ray_directions_expanded[:, :samples.shape[1]-head], z, conditioning_params, cond_p,truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, noise_mode='const')['sigma'] + sigmas[:, head:head+max_batch] = sigma + head += max_batch + pbar.update(max_batch) + + sigmas = sigmas.reshape((shape_res, shape_res, shape_res)).cpu().numpy() + sigmas = np.flip(sigmas, 0) + + # Trim the border of the extracted cube + pad = int(30 * shape_res / 256) + pad_value = -1000 + sigmas[:pad] = pad_value + sigmas[-pad:] = pad_value + sigmas[:, :pad] = pad_value + sigmas[:, -pad:] = pad_value + sigmas[:, :, :pad] = pad_value + sigmas[:, :, -pad:] = pad_value + + if shape_format == '.ply': + from shape_utils import convert_sdf_samples_to_ply + convert_sdf_samples_to_ply(np.transpose(sigmas, (2, 1, 0)), [0, 0, 0], 1, os.path.join(outdir, f'seed{seed:04d}.ply'), level=15) + elif shape_format == '.mrc': # output mrc + with mrcfile.new_mmap(os.path.join(outdir, f'seed{seed:04d}.mrc'), overwrite=True, shape=sigmas.shape, mrc_mode=2) as mrc: + mrc.data[:] = sigmas + + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + generate_images() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/gen_samples_with_pose_prediction.py b/3DPortraitGAN_pyramid/gen_samples_with_pose_prediction.py new file mode 100644 index 0000000..0a6e492 --- /dev/null +++ b/3DPortraitGAN_pyramid/gen_samples_with_pose_prediction.py @@ -0,0 +1,298 @@ + +import os +import re +from typing import List, Optional, Tuple, Union + +import click +import dnnlib +import numpy as np +import PIL.Image +import torch +from tqdm import tqdm +import mrcfile + + +import legacy +from camera_utils import LookAtPoseSampler, FOV_to_intrinsics +from torch_utils import misc +from training.smpl_triplane import TriPlaneGenerator +os.environ['CUDA_LAUNCH_BLOCKING'] = '1' + + +#---------------------------------------------------------------------------- + +def parse_range(s: Union[str, List]) -> List[int]: + '''Parse a comma separated list of numbers or ranges and return a list of ints. + + Example: '1,2,5-10' returns [1, 2, 5, 6, 7] + ''' + if isinstance(s, list): return s + ranges = [] + range_re = re.compile(r'^(\d+)-(\d+)$') + for p in s.split(','): + if m := range_re.match(p): + ranges.extend(range(int(m.group(1)), int(m.group(2))+1)) + else: + ranges.append(int(p)) + return ranges + +#---------------------------------------------------------------------------- + +def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]: + '''Parse a floating point 2-vector of syntax 'a,b'. + + Example: + '0,1' returns (0,1) + ''' + if isinstance(s, tuple): return s + parts = s.split(',') + if len(parts) == 2: + return (float(parts[0]), float(parts[1])) + raise ValueError(f'cannot parse 2-vector {s}') + +#---------------------------------------------------------------------------- + +def make_transform(translate: Tuple[float,float], angle: float): + m = np.eye(3) + s = np.sin(angle/360.0*np.pi*2) + c = np.cos(angle/360.0*np.pi*2) + m[0][0] = c + m[0][1] = s + m[0][2] = translate[0] + m[1][0] = -s + m[1][1] = c + m[1][2] = translate[1] + return m + +#---------------------------------------------------------------------------- + +def create_samples(N=256, voxel_origin=[0, 0, 0], cube_length=2.0): + # NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle + voxel_origin = np.array(voxel_origin) - cube_length/2 + voxel_size = cube_length / (N - 1) + + overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor()) + samples = torch.zeros(N ** 3, 3) + + # transform first 3 columns + # to be the x, y, z index + samples[:, 2] = overall_index % N + samples[:, 1] = (overall_index.float() / N) % N + samples[:, 0] = ((overall_index.float() / N) / N) % N + + # transform first 3 columns + # to be the x, y, z coordinate + samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2] + samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1] + samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0] + + num_samples = N ** 3 + + return samples.unsqueeze(0), voxel_origin, voxel_size + +#---------------------------------------------------------------------------- +from torch_utils.ops import upfirdn2d +from training.dual_discriminator import filtered_resizing + +# return pose +def run_D_pose_prediction(img, c, blur_sigma=0,D = None): + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + with torch.autograd.profiler.record_function('blur'): + f = torch.arange(-blur_size, blur_size + 1, device=img['image'].device).div(blur_sigma).square().neg().exp2() + img['image'] = upfirdn2d.filter2d(img['image'], f / f.sum()) + pose,_ = D.predict_pose( img, c) + return pose + +def get_pose_params(real_img,real_c,D = None,neural_rendering_resolution = None,blur_sigma = None,resample_filter = None, filter_mode = None): + + + + real_img_raw = filtered_resizing(real_img, size=neural_rendering_resolution, f=resample_filter, + filter_mode=filter_mode) + + if True: + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + f = torch.arange(-blur_size, blur_size + 1, device=real_img_raw.device).div( + blur_sigma).square().neg().exp2() + real_img_raw = upfirdn2d.filter2d(real_img_raw, f / f.sum()) + + real_img = {'image': real_img, 'image_raw': real_img_raw} + + # get pose_params from real image + real_img_tmp_image = real_img['image'].detach().requires_grad_(True) + real_img_tmp_image_raw = real_img['image_raw'].detach().requires_grad_(True) + real_img_tmp = {'image': real_img_tmp_image, 'image_raw': real_img_tmp_image_raw} + + predicted_real_pose = run_D_pose_prediction(real_img_tmp, real_c, blur_sigma=blur_sigma, D = D) + return predicted_real_pose + + +@click.command() +@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) +@click.option('--test_data', help='Real data dir', required=True) +@click.option('--outdir', help='output dir', required=True) +@click.option('--seeds', type=parse_range, help='List of random seeds', required=True) +@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) +@click.option('--trunc-cutoff', 'truncation_cutoff', type=int, help='Truncation cutoff', default=14, show_default=True) +@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)') +@click.option('--fov-deg', help='Field of View of camera in degrees', type=int, required=False, metavar='float', default=12.447863, show_default=True) +@click.option('--shape-format', help='Shape Format', type=click.Choice(['.mrc', '.ply']), default='.mrc') +@click.option('--reload_modules', help='Overload persistent modules?', type=bool, required=False, metavar='BOOL', default=False, show_default=True) +def generate_images( + network_pkl: str, + test_data: str, + outdir: str, + seeds: List[int], + truncation_psi: float, + truncation_cutoff: int, + fov_deg: float, + shape_format: str, + class_idx: Optional[int], + reload_modules: bool, +): + """Generate images using pretrained network pickle. + + Examples: + + \b + # Generate an image using pre-trained FFHQ model. + python gen_samples.py --outdir=output --trunc=0.7 --seeds=0-5 --shapes=True\\ + --network=ffhq-rebalanced-128.pkl + """ + os.makedirs(outdir, exist_ok=True) + + print('Loading networks from "%s"...' % network_pkl) + device = torch.device('cuda') + with dnnlib.util.open_url(network_pkl) as f: + resume_data = legacy.load_network_pkl(f) + print('resume_data',resume_data.keys()) + G = resume_data['G_ema'].to(device) # type: ignore + D = resume_data['D_ema'].to(device) # type: ignore + + G.set_batch_size(1) + G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution'] * 2) + G.rendering_kwargs['depth_resolution_importance'] = int( + G.rendering_kwargs['depth_resolution_importance'] * 2) + + G.rendering_kwargs['ray_start'] = 2.35 + + + # get pose prediction kwargs + import json + pose_prediction_kwargs_path = network_pkl.replace('.pkl','-pose_predict_kwargs.json') # network-snapshot-001400.pkl + print('Loading pose_prediction_kwargs from "%s"...' % pose_prediction_kwargs_path) + with open(pose_prediction_kwargs_path, 'r') as f: + pose_predict_kwargs = json.load(f) + + + # read images + import glob + + real_image_paths = glob.glob(os.path.join(test_data, 'aligned_images/*')) + + + path = os.path.join(test_data, 'result.json') + with open(path, 'r') as f: + labels = json.load(f) + + poses = [] + cameras = [] + names = [] + from PIL import Image + intrinsics = np.reshape( + np.array([6.510416666666667, + 0.0, + 0.5, + 0.0, + 6.510416666666667, + 0.5, + 0.0, + 0.0, + 1.0]), (1, 9) + ) + + with torch.no_grad(): + for real_image_path in real_image_paths: + image = Image.open(real_image_path).convert('RGB') + image = image.resize((G.img_resolution, G.img_resolution), Image.BILINEAR) + image = np.array(image) + image = image.transpose(2, 0, 1) + image = torch.tensor(image, device=device) + image = image.to(device).to(torch.float32) / 127.5 - 1 + image = image.unsqueeze(0) + image_id = os.path.basename(real_image_path).split('.')[0] + + c = labels[os.path.basename(real_image_path)]['camera_pose'] + c = np.reshape(np.array(c),(1,16)) + c = np.concatenate((c, intrinsics), axis=1) + + c = torch.tensor(c, device=device).to(torch.float32) + resample_filter = pose_predict_kwargs['resample_filter'] + resample_filter = torch.tensor(resample_filter, device=device).to(torch.float32) + + p = get_pose_params(real_img=image, + real_c=c, + D=D, + neural_rendering_resolution=pose_predict_kwargs['neural_rendering_resolution'], + blur_sigma=pose_predict_kwargs['blur_sigma'], + resample_filter=resample_filter, + filter_mode=pose_predict_kwargs['filter_mode']) + + poses.append(p) + cameras.append(c) + names.append(image_id) + + # Specify reload_modules=True if you want code modifications to take effect; otherwise uses pickled code + if True: + print("Reloading Modules!") + G_new = TriPlaneGenerator(*G.init_args, **G.init_kwargs).eval().requires_grad_(False).to(device) + misc.copy_params_and_buffers(G, G_new, require_all=False) + G_new.neural_rendering_resolution = G.neural_rendering_resolution + G_new.rendering_kwargs = G.rendering_kwargs + G = G_new + G.set_batch_size(1) + + + + camera_lookat_point = torch.tensor([0, 0.0649, 0], device=device) + cam2world_pose = LookAtPoseSampler.sample(np.pi / 2, np.pi / 2, camera_lookat_point, radius=2.7, device=device) + focal_length = 6.5104166 # if cfg != 'Shapenet' else 1.7074 # shapenet has higher FOV + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + cond_c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + cond_p = torch.zeros([1, 6], device=device) + + + + for seed_idx, seed in enumerate(seeds): + + z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device) + + ws = G.mapping(z, cond_c, cond_p, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff) + for pose_idx in range(len(poses)): + p = poses[pose_idx] + c = cameras[pose_idx] + name = names[pose_idx] + + img = G.synthesis(ws, c=c, noise_mode='const', apply_def=True, pose_params=p)['image'] + + img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)[0].cpu().numpy() + + real_image_path = real_image_paths[pose_idx] + image = Image.open(real_image_path).convert('RGB') + image = image.resize((G.img_resolution, G.img_resolution), Image.BILINEAR) + img = np.concatenate((np.array(image), img), axis=1) + + PIL.Image.fromarray(img, 'RGB').save(f'{outdir}/{seed}_{name}.png') + + + + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + generate_images() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/gen_videos.py b/3DPortraitGAN_pyramid/gen_videos.py new file mode 100644 index 0000000..f0aa839 --- /dev/null +++ b/3DPortraitGAN_pyramid/gen_videos.py @@ -0,0 +1,356 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Generate lerp videos using pretrained network pickle.""" + +import os +import re +from typing import List, Optional, Tuple, Union + +import click +import dnnlib +import imageio +import numpy as np +import scipy.interpolate +import torch +from tqdm import tqdm +import mrcfile + +import legacy + +from camera_utils import LookAtPoseSampler +from torch_utils import misc +#---------------------------------------------------------------------------- + +def layout_grid(img, grid_w=None, grid_h=1, float_to_uint8=True, chw_to_hwc=True, to_numpy=True): + batch_size, channels, img_h, img_w = img.shape + if grid_w is None: + grid_w = batch_size // grid_h + assert batch_size == grid_w * grid_h + if float_to_uint8: + img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8) + img = img.reshape(grid_h, grid_w, channels, img_h, img_w) + img = img.permute(2, 0, 3, 1, 4) + img = img.reshape(channels, grid_h * img_h, grid_w * img_w) + if chw_to_hwc: + img = img.permute(1, 2, 0) + if to_numpy: + img = img.cpu().numpy() + return img + +def create_samples(N=256, voxel_origin=[0, 0, 0], cube_length=2.0): + # NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle + voxel_origin = np.array(voxel_origin) - cube_length/2 + voxel_size = cube_length / (N - 1) + + overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor()) + samples = torch.zeros(N ** 3, 3) + + # transform first 3 columns + # to be the x, y, z index + samples[:, 2] = overall_index % N + samples[:, 1] = (overall_index.float() / N) % N + samples[:, 0] = ((overall_index.float() / N) / N) % N + + # transform first 3 columns + # to be the x, y, z coordinate + samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2] + samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1] + samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0] + + num_samples = N ** 3 + + return samples.unsqueeze(0), voxel_origin, voxel_size + +#---------------------------------------------------------------------------- + +def gen_interp_video(G, mp4: str, seeds, shuffle_seed=None, w_frames=60*4, kind='cubic', + grid_dims=(1,1), num_keyframes=None, wraps=2, psi=1, truncation_cutoff=14, + cfg='FFHQ', image_mode='image', gen_shapes=False, device=torch.device('cuda'), large_pose= False, + **video_kwargs): + grid_w = grid_dims[0] + grid_h = grid_dims[1] + + if num_keyframes is None: + if len(seeds) % (grid_w*grid_h) != 0: + raise ValueError('Number of input seeds must be divisible by grid W*H') + num_keyframes = len(seeds) // (grid_w*grid_h) + + all_seeds = np.zeros(num_keyframes*grid_h*grid_w, dtype=np.int64) + for idx in range(num_keyframes*grid_h*grid_w): + all_seeds[idx] = seeds[idx % len(seeds)] + + if shuffle_seed is not None: + rng = np.random.RandomState(seed=shuffle_seed) + rng.shuffle(all_seeds) + + camera_lookat_point = torch.tensor([0, 0.0649, 0], device=device) + zs = torch.from_numpy(np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds])).to(device) + + cam2world_pose = LookAtPoseSampler.sample(np.pi/2, np.pi/2, camera_lookat_point, radius=2.7, device=device) + focal_length = 6.5104166 # if cfg != 'Shapenet' else 1.7074 # shapenet has higher FOV + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + c = c.repeat(len(zs), 1) + + p = torch.zeros([len(zs), 6], device=device) + + ws = G.mapping(z=zs, c=c, p=p,truncation_psi=psi, truncation_cutoff=truncation_cutoff) + _ = G.synthesis(ws[:1], c[:1]) # warm up + ws = ws.reshape(grid_h, grid_w, num_keyframes, *ws.shape[1:]) + + # Interpolation. + grid = [] + for yi in range(grid_h): + row = [] + for xi in range(grid_w): + x = np.arange(-num_keyframes * wraps, num_keyframes * (wraps + 1)) + y = np.tile(ws[yi][xi].cpu().numpy(), [wraps * 2 + 1, 1, 1]) + interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=0) + row.append(interp) + grid.append(row) + + # Render video. + max_batch = 10000000 + voxel_resolution = 512 + video_out = imageio.get_writer(mp4, mode='I', fps=30, codec='libx264', **video_kwargs) + + if gen_shapes: + outdir = 'interpolation_{}_{}/'.format(all_seeds[0], all_seeds[1]) + os.makedirs(outdir, exist_ok=True) + all_poses = [] + for frame_idx in tqdm(range(num_keyframes * w_frames)): + imgs = [] + for yi in range(grid_h): + for xi in range(grid_w): + if large_pose: + # 0 - 2pi + cam2world_pose = LookAtPoseSampler.sample(np.pi / 2 + (frame_idx / w_frames) * 2 * np.pi, + np.pi / 2, + camera_lookat_point, radius=2.7, device=device) + else: + pitch_range = 0.25 + yaw_range = 0.35 + cam2world_pose = LookAtPoseSampler.sample(np.pi/2 + yaw_range * np.sin(2 * np.pi * frame_idx / (num_keyframes * w_frames)), + np.pi/2 -0.05 + pitch_range * np.cos(2 * np.pi * frame_idx / (num_keyframes * w_frames)), + camera_lookat_point, radius=2.7, device=device) + all_poses.append(cam2world_pose.squeeze().cpu().numpy()) + focal_length = 6.5104166 if cfg != 'Shapenet' else 1.7074 # shapenet has higher FOV + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + interp = grid[yi][xi] + w = torch.from_numpy(interp(frame_idx / w_frames)).to(device) + + entangle = 'camera' + if entangle == 'conditioning': + c_forward = torch.cat([LookAtPoseSampler.sample(np.pi/2, + np.pi/2, + camera_lookat_point, + radius=2.7, device=device).reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + w_c = G.mapping(z=zs[0:1], c=c[0:1], truncation_psi=psi, truncation_cutoff=truncation_cutoff) + img = G.synthesis(ws=w_c, c=c_forward, noise_mode='const')[image_mode][0] + elif entangle == 'camera': + img = G.synthesis(ws=w.unsqueeze(0), c=c[0:1], noise_mode='const')[image_mode][0] + elif entangle == 'both': + w_c = G.mapping(z=zs[0:1], c=c[0:1], truncation_psi=psi, truncation_cutoff=truncation_cutoff) + img = G.synthesis(ws=w_c, c=c[0:1], noise_mode='const')[image_mode][0] + + if image_mode == 'image_depth': + img = -img + img = (img - img.min()) / (img.max() - img.min()) * 2 - 1 + + imgs.append(img) + + if gen_shapes: + # generate shapes + print('Generating shape for frame %d / %d ...' % (frame_idx, num_keyframes * w_frames)) + + samples, voxel_origin, voxel_size = create_samples(N=voxel_resolution, voxel_origin=[0, 0, 0], cube_length=G.rendering_kwargs['box_warp']) + samples = samples.to(device) + sigmas = torch.zeros((samples.shape[0], samples.shape[1], 1), device=device) + transformed_ray_directions_expanded = torch.zeros((samples.shape[0], max_batch, 3), device=device) + transformed_ray_directions_expanded[..., -1] = -1 + + head = 0 + with tqdm(total = samples.shape[1]) as pbar: + with torch.no_grad(): + while head < samples.shape[1]: + torch.manual_seed(0) + sigma = G.sample_mixed(samples[:, head:head+max_batch], transformed_ray_directions_expanded[:, :samples.shape[1]-head], w.unsqueeze(0), truncation_psi=psi, noise_mode='const')['sigma'] + sigmas[:, head:head+max_batch] = sigma + head += max_batch + pbar.update(max_batch) + + sigmas = sigmas.reshape((voxel_resolution, voxel_resolution, voxel_resolution)).cpu().numpy() + sigmas = np.flip(sigmas, 0) + + pad = int(30 * voxel_resolution / 256) + pad_top = int(38 * voxel_resolution / 256) + sigmas[:pad] = 0 + sigmas[-pad:] = 0 + sigmas[:, :pad] = 0 + sigmas[:, -pad_top:] = 0 + sigmas[:, :, :pad] = 0 + sigmas[:, :, -pad:] = 0 + + output_ply = True + if output_ply: + from shape_utils import convert_sdf_samples_to_ply + convert_sdf_samples_to_ply(np.transpose(sigmas, (2, 1, 0)), [0, 0, 0], 1, os.path.join(outdir, f'{frame_idx:04d}_shape.ply'), level=10) + else: # output mrc + with mrcfile.new_mmap(outdir + f'{frame_idx:04d}_shape.mrc', overwrite=True, shape=sigmas.shape, mrc_mode=2) as mrc: + mrc.data[:] = sigmas + + video_out.append_data(layout_grid(torch.stack(imgs), grid_w=grid_w, grid_h=grid_h)) + video_out.close() + all_poses = np.stack(all_poses) + + if gen_shapes: + print(all_poses.shape) + with open(mp4.replace('.mp4', '_trajectory.npy'), 'wb') as f: + np.save(f, all_poses) + +#---------------------------------------------------------------------------- + +def parse_range(s: Union[str, List[int]]) -> List[int]: + '''Parse a comma separated list of numbers or ranges and return a list of ints. + + Example: '1,2,5-10' returns [1, 2, 5, 6, 7] + ''' + if isinstance(s, list): return s + ranges = [] + range_re = re.compile(r'^(\d+)-(\d+)$') + for p in s.split(','): + if m := range_re.match(p): + ranges.extend(range(int(m.group(1)), int(m.group(2))+1)) + else: + ranges.append(int(p)) + return ranges + +#---------------------------------------------------------------------------- + +def parse_tuple(s: Union[str, Tuple[int,int]]) -> Tuple[int, int]: + '''Parse a 'M,N' or 'MxN' integer tuple. + + Example: + '4x2' returns (4,2) + '0,1' returns (0,1) + ''' + if isinstance(s, tuple): return s + if m := re.match(r'^(\d+)[x,](\d+)$', s): + return (int(m.group(1)), int(m.group(2))) + raise ValueError(f'cannot parse tuple {s}') + +#---------------------------------------------------------------------------- + +@click.command() +@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) +@click.option('--seeds', type=parse_range, help='List of random seeds', required=True) +@click.option('--shuffle-seed', type=int, help='Random seed to use for shuffling seed order', default=None) +@click.option('--grid', type=parse_tuple, help='Grid width/height, e.g. \'4x3\' (default: 1x1)', default=(1,1)) +@click.option('--num-keyframes', type=int, help='Number of seeds to interpolate through. If not specified, determine based on the length of the seeds array given by --seeds.', default=None) +@click.option('--w-frames', type=int, help='Number of frames to interpolate between latents', default=120) +@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) +@click.option('--trunc-cutoff', 'truncation_cutoff', type=int, help='Truncation cutoff', default=14, show_default=True) +@click.option('--outdir', help='Output directory', type=str, required=True, metavar='DIR') +@click.option('--reload_modules', help='Overload persistent modules?', type=bool, required=False, metavar='BOOL', default=False, show_default=True) +@click.option('--cfg', help='Config', type=click.Choice(['FFHQ', 'AFHQ', 'Shapenet']), required=False, metavar='STR', default='FFHQ', show_default=True) +@click.option('--image_mode', help='Image mode', type=click.Choice(['image', 'image_depth', 'image_raw']), required=False, metavar='STR', default='image', show_default=True) +@click.option('--sample_mult', 'sampling_multiplier', type=float, help='Multiplier for depth sampling in volume rendering', default=2, show_default=True) +@click.option('--nrr', type=int, help='Neural rendering resolution override', default=None, show_default=True) +@click.option('--shapes', type=bool, help='Gen shapes for shape interpolation', default=False, show_default=True) +@click.option('--interpolate', type=bool, help='Interpolate between seeds', default=True, show_default=True) +@click.option('--large_pose', type=bool, help='Gen shapes for shape interpolation', default=False, show_default=True) + +def generate_images( + network_pkl: str, + seeds: List[int], + shuffle_seed: Optional[int], + truncation_psi: float, + truncation_cutoff: int, + grid: Tuple[int,int], + num_keyframes: Optional[int], + w_frames: int, + outdir: str, + reload_modules: bool, + cfg: str, + image_mode: str, + sampling_multiplier: float, + nrr: Optional[int], + shapes: bool, + interpolate: bool, + large_pose: bool, +): + """Render a latent vector interpolation video. + + Examples: + + \b + # Render a 4x2 grid of interpolations for seeds 0 through 31. + python gen_video.py --output=lerp.mp4 --trunc=1 --seeds=0-31 --grid=4x2 \\ + --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl + + Animation length and seed keyframes: + + The animation length is either determined based on the --seeds value or explicitly + specified using the --num-keyframes option. + + When num keyframes is specified with --num-keyframes, the output video length + will be 'num_keyframes*w_frames' frames. + + If --num-keyframes is not specified, the number of seeds given with + --seeds must be divisible by grid size W*H (--grid). In this case the + output video length will be '# seeds/(w*h)*w_frames' frames. + """ + + if not os.path.exists(outdir): + os.makedirs(outdir, exist_ok=True) + + print('Loading networks from "%s"...' % network_pkl) + device = torch.device('cuda') + with dnnlib.util.open_url(network_pkl) as f: + G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore + + + G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution'] * sampling_multiplier) + G.rendering_kwargs['depth_resolution_importance'] = int(G.rendering_kwargs['depth_resolution_importance'] * sampling_multiplier) + + G.rendering_kwargs['ray_start'] = 2.35 + + G.set_batch_size(1) + + + if nrr is not None: G.neural_rendering_resolution = nrr + + if truncation_cutoff == 0: + truncation_psi = 1.0 # truncation cutoff of 0 means no truncation anyways + if truncation_psi == 1.0: + truncation_cutoff = 14 # no truncation so doesn't matter where we cutoff + + if interpolate: + raise NotImplementedError + output = os.path.join(outdir, 'interpolation.mp4') + gen_interp_video(G=G, mp4=output, bitrate='10M', grid_dims=grid, num_keyframes=num_keyframes, w_frames=w_frames, seeds=seeds, shuffle_seed=shuffle_seed, psi=truncation_psi, truncation_cutoff=truncation_cutoff, cfg=cfg, image_mode=image_mode, gen_shapes=shapes) + else: + for seed in seeds: + if large_pose: + output = os.path.join(outdir, f'{seed}_large_pose.mp4') + else: + output = os.path.join(outdir, f'{seed}_small_pose.mp4') + seeds_ = [seed] + gen_interp_video(G=G, mp4=output, bitrate='10M', grid_dims=grid, num_keyframes=num_keyframes, w_frames=w_frames, seeds=seeds_, shuffle_seed=shuffle_seed, psi=truncation_psi, truncation_cutoff=truncation_cutoff, cfg=cfg, image_mode=image_mode,large_pose = large_pose) + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + generate_images() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/gen_videos_shapes_from_optimized_triplane.py b/3DPortraitGAN_pyramid/gen_videos_shapes_from_optimized_triplane.py new file mode 100644 index 0000000..31b5463 --- /dev/null +++ b/3DPortraitGAN_pyramid/gen_videos_shapes_from_optimized_triplane.py @@ -0,0 +1,364 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Generate lerp videos using pretrained network pickle.""" + +import os +import re +from typing import List, Optional, Tuple, Union + +import click +import dnnlib +import imageio +import numpy as np +import scipy.interpolate +import torch +from tqdm import tqdm +import mrcfile + +import legacy + +from camera_utils import LookAtPoseSampler +from torch_utils import misc +import glob +#---------------------------------------------------------------------------- + +def layout_grid(img, grid_w=None, grid_h=1, float_to_uint8=True, chw_to_hwc=True, to_numpy=True): + batch_size, channels, img_h, img_w = img.shape + if grid_w is None: + grid_w = batch_size // grid_h + assert batch_size == grid_w * grid_h + if float_to_uint8: + img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8) + img = img.reshape(grid_h, grid_w, channels, img_h, img_w) + img = img.permute(2, 0, 3, 1, 4) + img = img.reshape(channels, grid_h * img_h, grid_w * img_w) + if chw_to_hwc: + img = img.permute(1, 2, 0) + if to_numpy: + img = img.cpu().numpy() + return img + +def create_samples(N=256, voxel_origin=[0, 0, 0], cube_length=2.0): + # NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle + voxel_origin = np.array(voxel_origin) - cube_length/2 + voxel_size = cube_length / (N - 1) + + overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor()) + samples = torch.zeros(N ** 3, 3) + + # transform first 3 columns + # to be the x, y, z index + samples[:, 2] = overall_index % N + samples[:, 1] = (overall_index.float() / N) % N + samples[:, 0] = ((overall_index.float() / N) / N) % N + + # transform first 3 columns + # to be the x, y, z coordinate + samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2] + samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1] + samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0] + + num_samples = N ** 3 + + return samples.unsqueeze(0), voxel_origin, voxel_size + +#---------------------------------------------------------------------------- + +def gen_interp_video(G, mp4: str, trigrid=None,ws=None, shuffle_seed=None, w_frames=60*4, kind='cubic', + grid_dims=(1,1), num_keyframes=None, wraps=2, psi=1, truncation_cutoff=14, + image_mode='image', gen_shapes=False, device=torch.device('cuda'), large_pose= False, + **video_kwargs): + grid_w = grid_dims[0] + grid_h = grid_dims[1] + + num_keyframes = 1 + + + camera_lookat_point = torch.tensor([0, 0.0649, 0], device=device) + + cam2world_pose = LookAtPoseSampler.sample(np.pi/2, np.pi/2, camera_lookat_point, radius=2.7, device=device) + focal_length = 6.5104166 + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + c = c.repeat(len(ws), 1) + + p = torch.zeros([len(ws), 6], device=device) + + ws = ws.reshape(grid_h, grid_w, num_keyframes, *ws.shape[1:]) + + # Interpolation. + grid = [] + for yi in range(grid_h): + row = [] + for xi in range(grid_w): + x = np.arange(-num_keyframes * wraps, num_keyframes * (wraps + 1)) + y = np.tile(ws[yi][xi].cpu().numpy(), [wraps * 2 + 1, 1, 1]) + interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=0) + row.append(interp) + grid.append(row) + + # Render video. + video_out = imageio.get_writer(mp4, mode='I', fps=30, codec='libx264', **video_kwargs) + + + all_poses = [] + + if large_pose: + image_row_8 = [] + image_row_4 = [] + + + for frame_idx in tqdm(range(num_keyframes * w_frames)): + imgs = [] + for yi in range(grid_h): + for xi in range(grid_w): + if large_pose: + # 0 - 2pi + cam2world_pose = LookAtPoseSampler.sample(np.pi / 2 + (frame_idx / w_frames) * 2 * np.pi, + np.pi / 2, + camera_lookat_point, radius=2.7, device=device) + else: + pitch_range = 0.25 + yaw_range = 0.35 + cam2world_pose = LookAtPoseSampler.sample(np.pi/2 + yaw_range * np.sin(2 * np.pi * frame_idx / (num_keyframes * w_frames)), + np.pi/2 -0.05 + pitch_range * np.cos(2 * np.pi * frame_idx / (num_keyframes * w_frames)), + camera_lookat_point, radius=2.7, device=device) + all_poses.append(cam2world_pose.squeeze().cpu().numpy()) + focal_length = 6.5104166 + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + interp = grid[yi][xi] + w = torch.from_numpy(interp(frame_idx / w_frames)).to(device) + + img = G.render_planes(ws=w.unsqueeze(0), planes=trigrid, c=c[0:1], noise_mode='const', neural_rendering_resolution=512,chunk = 4096)[image_mode][0] + + if image_mode == 'image_depth': + img = -img + img = (img - img.min()) / (img.max() - img.min()) * 2 - 1 + + imgs.append(img) + if large_pose and frame_idx % int(num_keyframes * w_frames//8) == 0: + image_row_8.append((img.permute(1, 2, 0) * 127.5 + 128).clamp(0, 255).to(torch.uint8)) + + if large_pose and frame_idx % int(num_keyframes * w_frames//4) == 0: + image_row_4.append((img.permute(1, 2, 0) * 127.5 + 128).clamp(0, 255).to(torch.uint8)) + + video_out.append_data(layout_grid(torch.stack(imgs), grid_w=grid_w, grid_h=grid_h)) + video_out.close() + all_poses = np.stack(all_poses) + + if large_pose: + import PIL + image_row_8 = torch.cat(image_row_8, 1).cpu().numpy() + PIL.Image.fromarray(image_row_8.astype(np.uint8)).save(mp4.replace('.mp4', '_final_8.png')) + + image_row_4 = torch.cat(image_row_4, 1).cpu().numpy() + PIL.Image.fromarray(image_row_4.astype(np.uint8)).save(mp4.replace('.mp4', '_final_4.png')) + + + if gen_shapes: + print(all_poses.shape) + with open(mp4.replace('.mp4', '_trajectory.npy'), 'wb') as f: + np.save(f, all_poses) + +#---------------------------------------------------------------------------- + +def parse_range(s: Union[str, List[int]]) -> List[int]: + '''Parse a comma separated list of numbers or ranges and return a list of ints. + + Example: '1,2,5-10' returns [1, 2, 5, 6, 7] + ''' + if isinstance(s, list): return s + ranges = [] + range_re = re.compile(r'^(\d+)-(\d+)$') + for p in s.split(','): + if m := range_re.match(p): + ranges.extend(range(int(m.group(1)), int(m.group(2))+1)) + else: + ranges.append(int(p)) + return ranges + +#---------------------------------------------------------------------------- + +def parse_tuple(s: Union[str, Tuple[int,int]]) -> Tuple[int, int]: + '''Parse a 'M,N' or 'MxN' integer tuple. + + Example: + '4x2' returns (4,2) + '0,1' returns (0,1) + ''' + if isinstance(s, tuple): return s + if m := re.match(r'^(\d+)[x,](\d+)$', s): + return (int(m.group(1)), int(m.group(2))) + raise ValueError(f'cannot parse tuple {s}') + +#---------------------------------------------------------------------------- + +@click.command() +@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) +@click.option('--data_dir', help='Network pickle filename', required=True) +@click.option('--shuffle-seed', type=int, help='Random seed to use for shuffling seed order', default=None) +@click.option('--grid', type=parse_tuple, help='Grid width/height, e.g. \'4x3\' (default: 1x1)', default=(1,1)) +@click.option('--num-keyframes', type=int, help='Number of seeds to interpolate through. If not specified, determine based on the length of the seeds array given by --seeds.', default=None) +@click.option('--w-frames', type=int, help='Number of frames to interpolate between latents', default=120) +@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) +@click.option('--trunc-cutoff', 'truncation_cutoff', type=int, help='Truncation cutoff', default=14, show_default=True) +@click.option('--image_mode', help='Image mode', type=click.Choice(['image_depth', 'image_raw']), required=False, metavar='STR', default='image_raw', show_default=True) +@click.option('--sample_mult', 'sampling_multiplier', type=float, help='Multiplier for depth sampling in volume rendering', default=2, show_default=True) +@click.option('--nrr', type=int, help='Neural rendering resolution override', default=None, show_default=True) + +def generate_images( + network_pkl: str, + data_dir: str, + shuffle_seed: Optional[int], + truncation_psi: float, + truncation_cutoff: int, + grid: Tuple[int,int], + num_keyframes: Optional[int], + w_frames: int, + image_mode: str, + sampling_multiplier: float, + nrr: Optional[int], +): + print('Loading networks from "%s"...' % network_pkl) + device = torch.device('cuda') + with dnnlib.util.open_url(network_pkl) as f: + G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore + + G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution'] * sampling_multiplier) + G.rendering_kwargs['depth_resolution_importance'] = int( + G.rendering_kwargs['depth_resolution_importance'] * sampling_multiplier) + + G.rendering_kwargs['ray_start'] = 2.35 + + + + print("Reloading Modules!") + from training.smpl_triplane import TriPlaneGenerator + G_new = TriPlaneGenerator(*G.init_args, **G.init_kwargs).eval().requires_grad_(False).to(device) + misc.copy_params_and_buffers(G, G_new, require_all=True) + G_new.neural_rendering_resolution = G.neural_rendering_resolution + G_new.rendering_kwargs = G.rendering_kwargs + G = G_new + + G.set_batch_size(1) + res_dir = data_dir + + outdir = os.path.join(res_dir, 'results_final') + os.makedirs(outdir, exist_ok=True) + if not os.path.exists(os.path.join(res_dir, 'log/ckpt')): + print('WARNING: No checkpoints found in "%s"!' % os.path.join(res_dir, 'log/ckpt')) + return + + if nrr is not None: G.neural_rendering_resolution = nrr + + if truncation_cutoff == 0: + truncation_psi = 1.0 # truncation cutoff of 0 means no truncation anyways + if truncation_psi == 1.0: + truncation_cutoff = 14 # no truncation so doesn't matter where we cutoff + + ckpt_path = glob.glob(os.path.join(res_dir, 'log/ckpt/*')) + ckpt_path = sorted(ckpt_path) + ckpt_path = ckpt_path[-1] + if not os.path.exists(ckpt_path): + print('WARNING: No checkpoints found in "%s"!' % ckpt_path) + return + print('Loading checkpoints from "%s"...' % ckpt_path) + ckpt = torch.load(ckpt_path, map_location=lambda storage, loc: storage)['model'] + trigrid = { + 8: ckpt['trigrids_8'].to(device).detach(), + 16: ckpt['trigrids_16'].to(device).detach(), + 32: ckpt['trigrids_32'].to(device).detach(), + 64: ckpt['trigrids_64'].to(device).detach(), + 128: ckpt['trigrids_128'].to(device).detach(), + 256: ckpt['trigrids_256'].to(device).detach(), + 512: ckpt['trigrids_512'].to(device).detach(), + } + ws = ckpt['ws'].to(device) + + output = os.path.join(outdir, f'large_pose_final.mp4') + print('Generating video "%s"...' % output) + if not os.path.exists(output): + gen_interp_video(G=G, mp4=output, bitrate='10M', grid_dims=grid, num_keyframes=num_keyframes, w_frames=240, + trigrid=trigrid, ws=ws, shuffle_seed=shuffle_seed, psi=truncation_psi, + truncation_cutoff=truncation_cutoff, image_mode=image_mode, large_pose=True) + output = os.path.join(outdir, f'small_pose_final.mp4') + print('Generating video "%s"...' % output) + if not os.path.exists(output): + gen_interp_video(G=G, mp4=output, bitrate='10M', grid_dims=grid, num_keyframes=num_keyframes, w_frames=120, + trigrid=trigrid, ws=ws, shuffle_seed=shuffle_seed, psi=truncation_psi, + truncation_cutoff=truncation_cutoff, image_mode=image_mode, large_pose=False) + + print('Generating shapes...') + + shape_res = 512 + max_batch = 1000000 + shape_format = '.mrc' + + if shape_format == '.ply': + from shape_utils import convert_sdf_samples_to_ply + shape_path = os.path.join(outdir, f'shape.ply') + elif shape_format == '.mrc': # output mrc + shape_path = os.path.join(outdir, f'shape.mrc') + + if not os.path.exists(shape_path): + + samples, voxel_origin, voxel_size = create_samples(N=shape_res, voxel_origin=[0, 0, 0], + cube_length=0.9) # .reshape(1, -1, 3) + samples = samples.to(device) + sigmas = torch.zeros((samples.shape[0], samples.shape[1], 1), device=device) + transformed_ray_directions_expanded = torch.zeros((samples.shape[0], max_batch, 3), device=device) + transformed_ray_directions_expanded[..., -1] = -1 + + head = 0 + with tqdm(total=samples.shape[1]) as pbar: + with torch.no_grad(): + while head < samples.shape[1]: + torch.manual_seed(0) + sigma = G.sample_trigrid(samples[:, head:head + max_batch], + transformed_ray_directions_expanded[:, :samples.shape[1] - head], + planes=trigrid, truncation_psi=truncation_psi, + truncation_cutoff=truncation_cutoff, noise_mode='const')['sigma'] + sigmas[:, head:head + max_batch] = sigma + head += max_batch + pbar.update(max_batch) + + sigmas = sigmas.reshape((shape_res, shape_res, shape_res)).cpu().numpy() + sigmas = np.flip(sigmas, 0) + + # Trim the border of the extracted cube + pad = int(30 * shape_res / 256) + pad_value = -1000 + sigmas[:pad] = pad_value + sigmas[-pad:] = pad_value + sigmas[:, :pad] = pad_value + sigmas[:, -pad:] = pad_value + sigmas[:, :, :pad] = pad_value + sigmas[:, :, -pad:] = pad_value + + if shape_format == '.ply': + from shape_utils import convert_sdf_samples_to_ply + convert_sdf_samples_to_ply(np.transpose(sigmas, (2, 1, 0)), [0, 0, 0], 1, + os.path.join(outdir, f'shape.ply'), level=15) + elif shape_format == '.mrc': # output mrc + with mrcfile.new_mmap(os.path.join(outdir, f'shape.mrc'), overwrite=True, shape=sigmas.shape, + mrc_mode=2) as mrc: + mrc.data[:] = sigmas + + + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + generate_images() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/gen_videos_shapes_from_triplane.py b/3DPortraitGAN_pyramid/gen_videos_shapes_from_triplane.py new file mode 100644 index 0000000..2d436a3 --- /dev/null +++ b/3DPortraitGAN_pyramid/gen_videos_shapes_from_triplane.py @@ -0,0 +1,351 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Generate lerp videos using pretrained network pickle.""" + +import os +import re +from typing import List, Optional, Tuple, Union + +import click +import dnnlib +import imageio +import numpy as np +import scipy.interpolate +import torch +from tqdm import tqdm +import mrcfile + +import legacy + +from camera_utils import LookAtPoseSampler +from torch_utils import misc +import glob +#---------------------------------------------------------------------------- + +def layout_grid(img, grid_w=None, grid_h=1, float_to_uint8=True, chw_to_hwc=True, to_numpy=True): + batch_size, channels, img_h, img_w = img.shape + if grid_w is None: + grid_w = batch_size // grid_h + assert batch_size == grid_w * grid_h + if float_to_uint8: + img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8) + img = img.reshape(grid_h, grid_w, channels, img_h, img_w) + img = img.permute(2, 0, 3, 1, 4) + img = img.reshape(channels, grid_h * img_h, grid_w * img_w) + if chw_to_hwc: + img = img.permute(1, 2, 0) + if to_numpy: + img = img.cpu().numpy() + return img + +def create_samples(N=256, voxel_origin=[0, 0, 0], cube_length=2.0): + # NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle + voxel_origin = np.array(voxel_origin) - cube_length/2 + voxel_size = cube_length / (N - 1) + + overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor()) + samples = torch.zeros(N ** 3, 3) + + # transform first 3 columns + # to be the x, y, z index + samples[:, 2] = overall_index % N + samples[:, 1] = (overall_index.float() / N) % N + samples[:, 0] = ((overall_index.float() / N) / N) % N + + # transform first 3 columns + # to be the x, y, z coordinate + samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2] + samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1] + samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0] + + num_samples = N ** 3 + + return samples.unsqueeze(0), voxel_origin, voxel_size + +#---------------------------------------------------------------------------- + +def gen_interp_video(G, mp4: str, trigrid=None,ws=None, shuffle_seed=None, w_frames=60*4, kind='cubic', + grid_dims=(1,1), num_keyframes=None, wraps=2, psi=1, truncation_cutoff=14, + image_mode='image', gen_shapes=False, device=torch.device('cuda'), large_pose= False, + **video_kwargs): + grid_w = grid_dims[0] + grid_h = grid_dims[1] + + num_keyframes = 1 + + + camera_lookat_point = torch.tensor([0, 0.0649, 0], device=device) + + cam2world_pose = LookAtPoseSampler.sample(np.pi/2, np.pi/2, camera_lookat_point, radius=2.7, device=device) + focal_length = 6.5104166 + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + c = c.repeat(len(ws), 1) + + p = torch.zeros([len(ws), 6], device=device) + + ws = ws.reshape(grid_h, grid_w, num_keyframes, *ws.shape[1:]) + + # Interpolation. + grid = [] + for yi in range(grid_h): + row = [] + for xi in range(grid_w): + x = np.arange(-num_keyframes * wraps, num_keyframes * (wraps + 1)) + y = np.tile(ws[yi][xi].cpu().numpy(), [wraps * 2 + 1, 1, 1]) + interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=0) + row.append(interp) + grid.append(row) + + # Render video. + video_out = imageio.get_writer(mp4, mode='I', fps=30, codec='libx264', **video_kwargs) + + + all_poses = [] + + if large_pose: + image_row = [] + + + for frame_idx in tqdm(range(num_keyframes * w_frames)): + imgs = [] + for yi in range(grid_h): + for xi in range(grid_w): + if large_pose: + # 0 - 2pi + cam2world_pose = LookAtPoseSampler.sample(np.pi / 2 + (frame_idx / w_frames) * 2 * np.pi, + np.pi / 2, + camera_lookat_point, radius=2.7, device=device) + else: + pitch_range = 0.25 + yaw_range = 0.35 + cam2world_pose = LookAtPoseSampler.sample(np.pi/2 + yaw_range * np.sin(2 * np.pi * frame_idx / (num_keyframes * w_frames)), + np.pi/2 -0.05 + pitch_range * np.cos(2 * np.pi * frame_idx / (num_keyframes * w_frames)), + camera_lookat_point, radius=2.7, device=device) + all_poses.append(cam2world_pose.squeeze().cpu().numpy()) + focal_length = 6.5104166 + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + interp = grid[yi][xi] + w = torch.from_numpy(interp(frame_idx / w_frames)).to(device) + + img = G.render_planes(ws=w.unsqueeze(0), planes=trigrid, c=c[0:1], noise_mode='const', neural_rendering_resolution=512,chunk = 4096)[image_mode][0] + + if image_mode == 'image_depth': + img = -img + img = (img - img.min()) / (img.max() - img.min()) * 2 - 1 + + imgs.append(img) + if large_pose and frame_idx % int(num_keyframes * w_frames//8) == 0: + image_row.append((img.permute(1, 2, 0) * 127.5 + 128).clamp(0, 255).to(torch.uint8)) + + video_out.append_data(layout_grid(torch.stack(imgs), grid_w=grid_w, grid_h=grid_h)) + video_out.close() + all_poses = np.stack(all_poses) + + if large_pose: + import PIL + image_row = torch.cat(image_row, 1).cpu().numpy() + PIL.Image.fromarray(image_row.astype(np.uint8)).save(mp4.replace('.mp4', '_large_pose.png')) + + + if gen_shapes: + print(all_poses.shape) + with open(mp4.replace('.mp4', '_trajectory.npy'), 'wb') as f: + np.save(f, all_poses) + +#---------------------------------------------------------------------------- + +def parse_range(s: Union[str, List[int]]) -> List[int]: + '''Parse a comma separated list of numbers or ranges and return a list of ints. + + Example: '1,2,5-10' returns [1, 2, 5, 6, 7] + ''' + if isinstance(s, list): return s + ranges = [] + range_re = re.compile(r'^(\d+)-(\d+)$') + for p in s.split(','): + if m := range_re.match(p): + ranges.extend(range(int(m.group(1)), int(m.group(2))+1)) + else: + ranges.append(int(p)) + return ranges + +#---------------------------------------------------------------------------- + +def parse_tuple(s: Union[str, Tuple[int,int]]) -> Tuple[int, int]: + '''Parse a 'M,N' or 'MxN' integer tuple. + + Example: + '4x2' returns (4,2) + '0,1' returns (0,1) + ''' + if isinstance(s, tuple): return s + if m := re.match(r'^(\d+)[x,](\d+)$', s): + return (int(m.group(1)), int(m.group(2))) + raise ValueError(f'cannot parse tuple {s}') + +#---------------------------------------------------------------------------- + +@click.command() +@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) +@click.option('--data_dir', help='Network pickle filename', required=True) +@click.option('--shuffle-seed', type=int, help='Random seed to use for shuffling seed order', default=None) +@click.option('--grid', type=parse_tuple, help='Grid width/height, e.g. \'4x3\' (default: 1x1)', default=(1,1)) +@click.option('--num-keyframes', type=int, help='Number of seeds to interpolate through. If not specified, determine based on the length of the seeds array given by --seeds.', default=None) +@click.option('--w-frames', type=int, help='Number of frames to interpolate between latents', default=120) +@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) +@click.option('--trunc-cutoff', 'truncation_cutoff', type=int, help='Truncation cutoff', default=14, show_default=True) +@click.option('--image_mode', help='Image mode', type=click.Choice(['image_depth', 'image_raw']), required=False, metavar='STR', default='image_raw', show_default=True) +@click.option('--sample_mult', 'sampling_multiplier', type=float, help='Multiplier for depth sampling in volume rendering', default=2, show_default=True) +@click.option('--nrr', type=int, help='Neural rendering resolution override', default=None, show_default=True) + +def generate_images( + network_pkl: str, + data_dir: str, + shuffle_seed: Optional[int], + truncation_psi: float, + truncation_cutoff: int, + grid: Tuple[int,int], + num_keyframes: Optional[int], + w_frames: int, + image_mode: str, + sampling_multiplier: float, + nrr: Optional[int], +): + print('Loading networks from "%s"...' % network_pkl) + device = torch.device('cuda') + with dnnlib.util.open_url(network_pkl) as f: + G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore + + G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution'] * sampling_multiplier) + G.rendering_kwargs['depth_resolution_importance'] = int( + G.rendering_kwargs['depth_resolution_importance'] * sampling_multiplier) + + G.rendering_kwargs['ray_start'] = 2.35 + + + + print("Reloading Modules!") + from training.smpl_triplane import TriPlaneGenerator + G_new = TriPlaneGenerator(*G.init_args, **G.init_kwargs).eval().requires_grad_(False).to(device) + misc.copy_params_and_buffers(G, G_new, require_all=True) + G_new.neural_rendering_resolution = G.neural_rendering_resolution + G_new.rendering_kwargs = G.rendering_kwargs + G = G_new + + G.set_batch_size(1) + + + for res_dir in glob.glob(data_dir + '/*'): + outdir = os.path.join(res_dir, 'results') + os.makedirs(outdir, exist_ok=True) + + + + if nrr is not None: G.neural_rendering_resolution = nrr + + if truncation_cutoff == 0: + truncation_psi = 1.0 # truncation cutoff of 0 means no truncation anyways + if truncation_psi == 1.0: + truncation_cutoff = 14 # no truncation so doesn't matter where we cutoff + + ckpt_path = os.path.join(res_dir, 'checkpoints/df.pth') + if not os.path.exists(ckpt_path): + continue + print('Loading checkpoints from "%s"...' % ckpt_path) + ckpt = torch.load(ckpt_path, map_location=lambda storage, loc: storage)['model'] + trigrid = { + 8:ckpt['trigrids_8'].to(device), + 16:ckpt['trigrids_16'].to(device), + 32:ckpt['trigrids_32'].to(device), + 64:ckpt['trigrids_64'].to(device), + 128:ckpt['trigrids_128'].to(device), + 256:ckpt['trigrids_256'].to(device), + } + ws = ckpt['ws'].to(device) + + output = os.path.join(outdir, f'large_pose.mp4') + print('Generating video "%s"...' % output) + if not os.path.exists(output): + gen_interp_video(G=G, mp4=output, bitrate='10M', grid_dims=grid, num_keyframes=num_keyframes, w_frames=w_frames, + trigrid=trigrid,ws=ws, shuffle_seed=shuffle_seed, psi=truncation_psi, + truncation_cutoff=truncation_cutoff, image_mode=image_mode, large_pose=True) + output = os.path.join(outdir, f'small_pose.mp4') + print('Generating video "%s"...' % output) + if not os.path.exists(output): + gen_interp_video(G=G, mp4=output, bitrate='10M', grid_dims=grid, num_keyframes=num_keyframes, w_frames=w_frames, + trigrid=trigrid,ws=ws, shuffle_seed=shuffle_seed, psi=truncation_psi, + truncation_cutoff=truncation_cutoff, image_mode=image_mode, large_pose=False) + + print('Generating shapes...') + + shape_res = 512 + max_batch = 1000000 + shape_format = '.mrc' + + if shape_format == '.ply': + from shape_utils import convert_sdf_samples_to_ply + shape_path =os.path.join(outdir, f'shape.ply') + elif shape_format == '.mrc': # output mrc + shape_path = os.path.join(outdir, f'shape.mrc') + + if not os.path.exists(shape_path): + + samples, voxel_origin, voxel_size = create_samples(N=shape_res, voxel_origin=[0, 0, 0], + cube_length=0.9) # .reshape(1, -1, 3) + samples = samples.to(device) + sigmas = torch.zeros((samples.shape[0], samples.shape[1], 1), device=device) + transformed_ray_directions_expanded = torch.zeros((samples.shape[0], max_batch, 3), device=device) + transformed_ray_directions_expanded[..., -1] = -1 + + head = 0 + with tqdm(total=samples.shape[1]) as pbar: + with torch.no_grad(): + while head < samples.shape[1]: + torch.manual_seed(0) + sigma = G.sample_trigrid(samples[:, head:head + max_batch], + transformed_ray_directions_expanded[:, :samples.shape[1] - head], planes = trigrid, truncation_psi=truncation_psi, + truncation_cutoff=truncation_cutoff, noise_mode='const')['sigma'] + sigmas[:, head:head + max_batch] = sigma + head += max_batch + pbar.update(max_batch) + + sigmas = sigmas.reshape((shape_res, shape_res, shape_res)).cpu().numpy() + sigmas = np.flip(sigmas, 0) + + # Trim the border of the extracted cube + pad = int(30 * shape_res / 256) + pad_value = -1000 + sigmas[:pad] = pad_value + sigmas[-pad:] = pad_value + sigmas[:, :pad] = pad_value + sigmas[:, -pad:] = pad_value + sigmas[:, :, :pad] = pad_value + sigmas[:, :, -pad:] = pad_value + + if shape_format == '.ply': + from shape_utils import convert_sdf_samples_to_ply + convert_sdf_samples_to_ply(np.transpose(sigmas, (2, 1, 0)), [0, 0, 0], 1, + os.path.join(outdir, f'shape.ply'), level=15) + elif shape_format == '.mrc': # output mrc + with mrcfile.new_mmap(os.path.join(outdir, f'shape.mrc'), overwrite=True, shape=sigmas.shape, + mrc_mode=2) as mrc: + mrc.data[:] = sigmas + + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + generate_images() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/get_decoder_ckpt.py b/3DPortraitGAN_pyramid/get_decoder_ckpt.py new file mode 100644 index 0000000..f9057c3 --- /dev/null +++ b/3DPortraitGAN_pyramid/get_decoder_ckpt.py @@ -0,0 +1,333 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Generate lerp videos using pretrained network pickle.""" + +import os +import re +from typing import List, Optional, Tuple, Union + +import click +import dnnlib +import imageio +import numpy as np +import scipy.interpolate +import torch +from tqdm import tqdm +import mrcfile + +import legacy + +from camera_utils import LookAtPoseSampler +from torch_utils import misc +#---------------------------------------------------------------------------- + +def layout_grid(img, grid_w=None, grid_h=1, float_to_uint8=True, chw_to_hwc=True, to_numpy=True): + batch_size, channels, img_h, img_w = img.shape + if grid_w is None: + grid_w = batch_size // grid_h + assert batch_size == grid_w * grid_h + if float_to_uint8: + img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8) + img = img.reshape(grid_h, grid_w, channels, img_h, img_w) + img = img.permute(2, 0, 3, 1, 4) + img = img.reshape(channels, grid_h * img_h, grid_w * img_w) + if chw_to_hwc: + img = img.permute(1, 2, 0) + if to_numpy: + img = img.cpu().numpy() + return img + +def create_samples(N=256, voxel_origin=[0, 0, 0], cube_length=2.0): + # NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle + voxel_origin = np.array(voxel_origin) - cube_length/2 + voxel_size = cube_length / (N - 1) + + overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor()) + samples = torch.zeros(N ** 3, 3) + + # transform first 3 columns + # to be the x, y, z index + samples[:, 2] = overall_index % N + samples[:, 1] = (overall_index.float() / N) % N + samples[:, 0] = ((overall_index.float() / N) / N) % N + + # transform first 3 columns + # to be the x, y, z coordinate + samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2] + samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1] + samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0] + + num_samples = N ** 3 + + return samples.unsqueeze(0), voxel_origin, voxel_size + +#---------------------------------------------------------------------------- + +def gen_interp_video(G, mp4: str, seeds, shuffle_seed=None, w_frames=60*4, kind='cubic', + grid_dims=(1,1), num_keyframes=None, wraps=2, psi=1, truncation_cutoff=14, + cfg='FFHQ', image_mode='image', gen_shapes=False, device=torch.device('cuda'), large_pose= False, + **video_kwargs): + grid_w = grid_dims[0] + grid_h = grid_dims[1] + + if num_keyframes is None: + if len(seeds) % (grid_w*grid_h) != 0: + raise ValueError('Number of input seeds must be divisible by grid W*H') + num_keyframes = len(seeds) // (grid_w*grid_h) + + all_seeds = np.zeros(num_keyframes*grid_h*grid_w, dtype=np.int64) + for idx in range(num_keyframes*grid_h*grid_w): + all_seeds[idx] = seeds[idx % len(seeds)] + + if shuffle_seed is not None: + rng = np.random.RandomState(seed=shuffle_seed) + rng.shuffle(all_seeds) + + camera_lookat_point = torch.tensor([0, 0.0649, 0], device=device) + zs = torch.from_numpy(np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds])).to(device) + + cam2world_pose = LookAtPoseSampler.sample(np.pi/2, np.pi/2, camera_lookat_point, radius=2.7, device=device) + focal_length = 6.5104166 # if cfg != 'Shapenet' else 1.7074 # shapenet has higher FOV + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + c = c.repeat(len(zs), 1) + + p = torch.zeros([len(zs), 6], device=device) + + ws = G.mapping(z=zs, c=c, p=p,truncation_psi=psi, truncation_cutoff=truncation_cutoff) + _ = G.synthesis(ws[:1], c[:1]) # warm up + ws = ws.reshape(grid_h, grid_w, num_keyframes, *ws.shape[1:]) + + # Interpolation. + grid = [] + for yi in range(grid_h): + row = [] + for xi in range(grid_w): + x = np.arange(-num_keyframes * wraps, num_keyframes * (wraps + 1)) + y = np.tile(ws[yi][xi].cpu().numpy(), [wraps * 2 + 1, 1, 1]) + interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=0) + row.append(interp) + grid.append(row) + + # Render video. + max_batch = 10000000 + voxel_resolution = 512 + video_out = imageio.get_writer(mp4, mode='I', fps=30, codec='libx264', **video_kwargs) + + if gen_shapes: + outdir = 'interpolation_{}_{}/'.format(all_seeds[0], all_seeds[1]) + os.makedirs(outdir, exist_ok=True) + all_poses = [] + for frame_idx in tqdm(range(num_keyframes * w_frames)): + imgs = [] + for yi in range(grid_h): + for xi in range(grid_w): + if large_pose: + # 0 - 2pi + cam2world_pose = LookAtPoseSampler.sample(np.pi / 2 + (frame_idx / w_frames) * 2 * np.pi, + np.pi / 2, + camera_lookat_point, radius=2.7, device=device) + else: + pitch_range = 0.25 + yaw_range = 0.35 + cam2world_pose = LookAtPoseSampler.sample(np.pi/2 + yaw_range * np.sin(2 * np.pi * frame_idx / (num_keyframes * w_frames)), + np.pi/2 -0.05 + pitch_range * np.cos(2 * np.pi * frame_idx / (num_keyframes * w_frames)), + camera_lookat_point, radius=2.7, device=device) + all_poses.append(cam2world_pose.squeeze().cpu().numpy()) + focal_length = 6.5104166 if cfg != 'Shapenet' else 1.7074 # shapenet has higher FOV + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + interp = grid[yi][xi] + w = torch.from_numpy(interp(frame_idx / w_frames)).to(device) + + entangle = 'camera' + if entangle == 'conditioning': + c_forward = torch.cat([LookAtPoseSampler.sample(np.pi/2, + np.pi/2, + camera_lookat_point, + radius=2.7, device=device).reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + w_c = G.mapping(z=zs[0:1], c=c[0:1], truncation_psi=psi, truncation_cutoff=truncation_cutoff) + img = G.synthesis(ws=w_c, c=c_forward, noise_mode='const')[image_mode][0] + elif entangle == 'camera': + img = G.synthesis(ws=w.unsqueeze(0), c=c[0:1], noise_mode='const')[image_mode][0] + elif entangle == 'both': + w_c = G.mapping(z=zs[0:1], c=c[0:1], truncation_psi=psi, truncation_cutoff=truncation_cutoff) + img = G.synthesis(ws=w_c, c=c[0:1], noise_mode='const')[image_mode][0] + + if image_mode == 'image_depth': + img = -img + img = (img - img.min()) / (img.max() - img.min()) * 2 - 1 + + imgs.append(img) + + if gen_shapes: + # generate shapes + print('Generating shape for frame %d / %d ...' % (frame_idx, num_keyframes * w_frames)) + + samples, voxel_origin, voxel_size = create_samples(N=voxel_resolution, voxel_origin=[0, 0, 0], cube_length=G.rendering_kwargs['box_warp']) + samples = samples.to(device) + sigmas = torch.zeros((samples.shape[0], samples.shape[1], 1), device=device) + transformed_ray_directions_expanded = torch.zeros((samples.shape[0], max_batch, 3), device=device) + transformed_ray_directions_expanded[..., -1] = -1 + + head = 0 + with tqdm(total = samples.shape[1]) as pbar: + with torch.no_grad(): + while head < samples.shape[1]: + torch.manual_seed(0) + sigma = G.sample_mixed(samples[:, head:head+max_batch], transformed_ray_directions_expanded[:, :samples.shape[1]-head], w.unsqueeze(0), truncation_psi=psi, noise_mode='const')['sigma'] + sigmas[:, head:head+max_batch] = sigma + head += max_batch + pbar.update(max_batch) + + sigmas = sigmas.reshape((voxel_resolution, voxel_resolution, voxel_resolution)).cpu().numpy() + sigmas = np.flip(sigmas, 0) + + pad = int(30 * voxel_resolution / 256) + pad_top = int(38 * voxel_resolution / 256) + sigmas[:pad] = 0 + sigmas[-pad:] = 0 + sigmas[:, :pad] = 0 + sigmas[:, -pad_top:] = 0 + sigmas[:, :, :pad] = 0 + sigmas[:, :, -pad:] = 0 + + output_ply = True + if output_ply: + from shape_utils import convert_sdf_samples_to_ply + convert_sdf_samples_to_ply(np.transpose(sigmas, (2, 1, 0)), [0, 0, 0], 1, os.path.join(outdir, f'{frame_idx:04d}_shape.ply'), level=10) + else: # output mrc + with mrcfile.new_mmap(outdir + f'{frame_idx:04d}_shape.mrc', overwrite=True, shape=sigmas.shape, mrc_mode=2) as mrc: + mrc.data[:] = sigmas + + video_out.append_data(layout_grid(torch.stack(imgs), grid_w=grid_w, grid_h=grid_h)) + video_out.close() + all_poses = np.stack(all_poses) + + if gen_shapes: + print(all_poses.shape) + with open(mp4.replace('.mp4', '_trajectory.npy'), 'wb') as f: + np.save(f, all_poses) + +#---------------------------------------------------------------------------- + +def parse_range(s: Union[str, List[int]]) -> List[int]: + '''Parse a comma separated list of numbers or ranges and return a list of ints. + + Example: '1,2,5-10' returns [1, 2, 5, 6, 7] + ''' + if isinstance(s, list): return s + ranges = [] + range_re = re.compile(r'^(\d+)-(\d+)$') + for p in s.split(','): + if m := range_re.match(p): + ranges.extend(range(int(m.group(1)), int(m.group(2))+1)) + else: + ranges.append(int(p)) + return ranges + +#---------------------------------------------------------------------------- + +def parse_tuple(s: Union[str, Tuple[int,int]]) -> Tuple[int, int]: + '''Parse a 'M,N' or 'MxN' integer tuple. + + Example: + '4x2' returns (4,2) + '0,1' returns (0,1) + ''' + if isinstance(s, tuple): return s + if m := re.match(r'^(\d+)[x,](\d+)$', s): + return (int(m.group(1)), int(m.group(2))) + raise ValueError(f'cannot parse tuple {s}') + +#---------------------------------------------------------------------------- + +@click.command() +@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) +def extract_decoder_ckpt( + network_pkl: str, +): + """Render a latent vector interpolation video. + + Examples: + + \b + # Render a 4x2 grid of interpolations for seeds 0 through 31. + python gen_video.py --output=lerp.mp4 --trunc=1 --seeds=0-31 --grid=4x2 \\ + --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl + + Animation length and seed keyframes: + + The animation length is either determined based on the --seeds value or explicitly + specified using the --num-keyframes option. + + When num keyframes is specified with --num-keyframes, the output video length + will be 'num_keyframes*w_frames' frames. + + If --num-keyframes is not specified, the number of seeds given with + --seeds must be divisible by grid size W*H (--grid). In this case the + output video length will be '# seeds/(w*h)*w_frames' frames. + """ + + + print('Loading networks from "%s"...' % network_pkl) + device = torch.device('cuda') + with dnnlib.util.open_url(network_pkl) as f: + G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore + + # print the number of parameters of superresolution + print('Number of parameters of superresolution: %d' % sum(p.numel() for p in G.superresolution.parameters())) + print('Number of parameters of backbone: %d' % sum(p.numel() for p in G.backbone.parameters())) + # exit() # sum(p.numel() for p in model.parameters()) + # save the ckpt of G.decoder + decoder_ckpt = G.decoder.state_dict() + decoder_state_dict = {} + for k, v in decoder_ckpt.items(): + decoder_state_dict['decoder.'+k] = v + ckpt = decoder_state_dict + + torgb_ckpt = G.torgb.state_dict() + rgb_state_dict = {} + for k, v in torgb_ckpt.items(): + rgb_state_dict['torgb.'+k] = v + ckpt.update(rgb_state_dict) + + pose_branch_ckpt = G.pose_branch.state_dict() + pose_branch_state_dict = {} + for k, v in pose_branch_ckpt.items(): + pose_branch_state_dict['pose_branch.'+k] = v + ckpt.update(pose_branch_state_dict) + + # save parameters of G.decoder + + + params = {'z_dim': G.z_dim, + 'c_dim': G.c_dim, + 'w_dim': G.w_dim, + 'img_resolution': G.img_resolution, + 'img_channels': G.img_channels, + 'rendering_kwargs': G.rendering_kwargs, + 'batch_size': G.batch_size, + 'thickness': 0.25} + + print(params) + ckpt = {'params': params, # parameters of G.decoder + 'state_dict': ckpt} + print('save decoder ckpt to ./models/'+os.path.basename(network_pkl).replace('.pkl', '_decoder.ckpt')) + torch.save(ckpt, './models/'+os.path.basename(network_pkl).replace('.pkl', '_decoder.ckpt')) + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + extract_decoder_ckpt() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/gui_utils/__init__.py b/3DPortraitGAN_pyramid/gui_utils/__init__.py new file mode 100644 index 0000000..dfebd04 --- /dev/null +++ b/3DPortraitGAN_pyramid/gui_utils/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# empty diff --git a/3DPortraitGAN_pyramid/gui_utils/gl_utils.py b/3DPortraitGAN_pyramid/gui_utils/gl_utils.py new file mode 100644 index 0000000..1312f02 --- /dev/null +++ b/3DPortraitGAN_pyramid/gui_utils/gl_utils.py @@ -0,0 +1,376 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import os +import functools +import contextlib +import numpy as np +import OpenGL.GL as gl +import OpenGL.GL.ARB.texture_float +import dnnlib + +#---------------------------------------------------------------------------- + +def init_egl(): + assert os.environ['PYOPENGL_PLATFORM'] == 'egl' # Must be set before importing OpenGL. + import OpenGL.EGL as egl + import ctypes + + # Initialize EGL. + display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY) + assert display != egl.EGL_NO_DISPLAY + major = ctypes.c_int32() + minor = ctypes.c_int32() + ok = egl.eglInitialize(display, major, minor) + assert ok + assert major.value * 10 + minor.value >= 14 + + # Choose config. + config_attribs = [ + egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, + egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, + egl.EGL_NONE + ] + configs = (ctypes.c_int32 * 1)() + num_configs = ctypes.c_int32() + ok = egl.eglChooseConfig(display, config_attribs, configs, 1, num_configs) + assert ok + assert num_configs.value == 1 + config = configs[0] + + # Create dummy pbuffer surface. + surface_attribs = [ + egl.EGL_WIDTH, 1, + egl.EGL_HEIGHT, 1, + egl.EGL_NONE + ] + surface = egl.eglCreatePbufferSurface(display, config, surface_attribs) + assert surface != egl.EGL_NO_SURFACE + + # Setup GL context. + ok = egl.eglBindAPI(egl.EGL_OPENGL_API) + assert ok + context = egl.eglCreateContext(display, config, egl.EGL_NO_CONTEXT, None) + assert context != egl.EGL_NO_CONTEXT + ok = egl.eglMakeCurrent(display, surface, surface, context) + assert ok + +#---------------------------------------------------------------------------- + +_texture_formats = { + ('uint8', 1): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_LUMINANCE, internalformat=gl.GL_LUMINANCE8), + ('uint8', 2): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_LUMINANCE_ALPHA, internalformat=gl.GL_LUMINANCE8_ALPHA8), + ('uint8', 3): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_RGB, internalformat=gl.GL_RGB8), + ('uint8', 4): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_RGBA, internalformat=gl.GL_RGBA8), + ('float32', 1): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_LUMINANCE, internalformat=OpenGL.GL.ARB.texture_float.GL_LUMINANCE32F_ARB), + ('float32', 2): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_LUMINANCE_ALPHA, internalformat=OpenGL.GL.ARB.texture_float.GL_LUMINANCE_ALPHA32F_ARB), + ('float32', 3): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_RGB, internalformat=gl.GL_RGB32F), + ('float32', 4): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_RGBA, internalformat=gl.GL_RGBA32F), +} + +def get_texture_format(dtype, channels): + return _texture_formats[(np.dtype(dtype).name, int(channels))] + +#---------------------------------------------------------------------------- + +def prepare_texture_data(image): + image = np.asarray(image) + if image.ndim == 2: + image = image[:, :, np.newaxis] + if image.dtype.name == 'float64': + image = image.astype('float32') + return image + +#---------------------------------------------------------------------------- + +def draw_pixels(image, *, pos=0, zoom=1, align=0, rint=True): + pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2]) + zoom = np.broadcast_to(np.asarray(zoom, dtype='float32'), [2]) + align = np.broadcast_to(np.asarray(align, dtype='float32'), [2]) + image = prepare_texture_data(image) + height, width, channels = image.shape + size = zoom * [width, height] + pos = pos - size * align + if rint: + pos = np.rint(pos) + fmt = get_texture_format(image.dtype, channels) + + gl.glPushAttrib(gl.GL_CURRENT_BIT | gl.GL_PIXEL_MODE_BIT) + gl.glPushClientAttrib(gl.GL_CLIENT_PIXEL_STORE_BIT) + gl.glRasterPos2f(pos[0], pos[1]) + gl.glPixelZoom(zoom[0], -zoom[1]) + gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1) + gl.glDrawPixels(width, height, fmt.format, fmt.type, image) + gl.glPopClientAttrib() + gl.glPopAttrib() + +#---------------------------------------------------------------------------- + +def read_pixels(width, height, *, pos=0, dtype='uint8', channels=3): + pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2]) + dtype = np.dtype(dtype) + fmt = get_texture_format(dtype, channels) + image = np.empty([height, width, channels], dtype=dtype) + + gl.glPushClientAttrib(gl.GL_CLIENT_PIXEL_STORE_BIT) + gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) + gl.glReadPixels(int(np.round(pos[0])), int(np.round(pos[1])), width, height, fmt.format, fmt.type, image) + gl.glPopClientAttrib() + return np.flipud(image) + +#---------------------------------------------------------------------------- + +class Texture: + def __init__(self, *, image=None, width=None, height=None, channels=None, dtype=None, bilinear=True, mipmap=True): + self.gl_id = None + self.bilinear = bilinear + self.mipmap = mipmap + + # Determine size and dtype. + if image is not None: + image = prepare_texture_data(image) + self.height, self.width, self.channels = image.shape + self.dtype = image.dtype + else: + assert width is not None and height is not None + self.width = width + self.height = height + self.channels = channels if channels is not None else 3 + self.dtype = np.dtype(dtype) if dtype is not None else np.uint8 + + # Validate size and dtype. + assert isinstance(self.width, int) and self.width >= 0 + assert isinstance(self.height, int) and self.height >= 0 + assert isinstance(self.channels, int) and self.channels >= 1 + assert self.is_compatible(width=width, height=height, channels=channels, dtype=dtype) + + # Create texture object. + self.gl_id = gl.glGenTextures(1) + with self.bind(): + gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE) + gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE) + gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR if self.bilinear else gl.GL_NEAREST) + gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR_MIPMAP_LINEAR if self.mipmap else gl.GL_NEAREST) + self.update(image) + + def delete(self): + if self.gl_id is not None: + gl.glDeleteTextures([self.gl_id]) + self.gl_id = None + + def __del__(self): + try: + self.delete() + except: + pass + + @contextlib.contextmanager + def bind(self): + prev_id = gl.glGetInteger(gl.GL_TEXTURE_BINDING_2D) + gl.glBindTexture(gl.GL_TEXTURE_2D, self.gl_id) + yield + gl.glBindTexture(gl.GL_TEXTURE_2D, prev_id) + + def update(self, image): + if image is not None: + image = prepare_texture_data(image) + assert self.is_compatible(image=image) + with self.bind(): + fmt = get_texture_format(self.dtype, self.channels) + gl.glPushClientAttrib(gl.GL_CLIENT_PIXEL_STORE_BIT) + gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1) + gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, fmt.internalformat, self.width, self.height, 0, fmt.format, fmt.type, image) + if self.mipmap: + gl.glGenerateMipmap(gl.GL_TEXTURE_2D) + gl.glPopClientAttrib() + + def draw(self, *, pos=0, zoom=1, align=0, rint=False, color=1, alpha=1, rounding=0): + zoom = np.broadcast_to(np.asarray(zoom, dtype='float32'), [2]) + size = zoom * [self.width, self.height] + with self.bind(): + gl.glPushAttrib(gl.GL_ENABLE_BIT) + gl.glEnable(gl.GL_TEXTURE_2D) + draw_rect(pos=pos, size=size, align=align, rint=rint, color=color, alpha=alpha, rounding=rounding) + gl.glPopAttrib() + + def is_compatible(self, *, image=None, width=None, height=None, channels=None, dtype=None): # pylint: disable=too-many-return-statements + if image is not None: + if image.ndim != 3: + return False + ih, iw, ic = image.shape + if not self.is_compatible(width=iw, height=ih, channels=ic, dtype=image.dtype): + return False + if width is not None and self.width != width: + return False + if height is not None and self.height != height: + return False + if channels is not None and self.channels != channels: + return False + if dtype is not None and self.dtype != dtype: + return False + return True + +#---------------------------------------------------------------------------- + +class Framebuffer: + def __init__(self, *, texture=None, width=None, height=None, channels=None, dtype=None, msaa=0): + self.texture = texture + self.gl_id = None + self.gl_color = None + self.gl_depth_stencil = None + self.msaa = msaa + + # Determine size and dtype. + if texture is not None: + assert isinstance(self.texture, Texture) + self.width = texture.width + self.height = texture.height + self.channels = texture.channels + self.dtype = texture.dtype + else: + assert width is not None and height is not None + self.width = width + self.height = height + self.channels = channels if channels is not None else 4 + self.dtype = np.dtype(dtype) if dtype is not None else np.float32 + + # Validate size and dtype. + assert isinstance(self.width, int) and self.width >= 0 + assert isinstance(self.height, int) and self.height >= 0 + assert isinstance(self.channels, int) and self.channels >= 1 + assert width is None or width == self.width + assert height is None or height == self.height + assert channels is None or channels == self.channels + assert dtype is None or dtype == self.dtype + + # Create framebuffer object. + self.gl_id = gl.glGenFramebuffers(1) + with self.bind(): + + # Setup color buffer. + if self.texture is not None: + assert self.msaa == 0 + gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.texture.gl_id, 0) + else: + fmt = get_texture_format(self.dtype, self.channels) + self.gl_color = gl.glGenRenderbuffers(1) + gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.gl_color) + gl.glRenderbufferStorageMultisample(gl.GL_RENDERBUFFER, self.msaa, fmt.internalformat, self.width, self.height) + gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_RENDERBUFFER, self.gl_color) + + # Setup depth/stencil buffer. + self.gl_depth_stencil = gl.glGenRenderbuffers(1) + gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.gl_depth_stencil) + gl.glRenderbufferStorageMultisample(gl.GL_RENDERBUFFER, self.msaa, gl.GL_DEPTH24_STENCIL8, self.width, self.height) + gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_STENCIL_ATTACHMENT, gl.GL_RENDERBUFFER, self.gl_depth_stencil) + + def delete(self): + if self.gl_id is not None: + gl.glDeleteFramebuffers([self.gl_id]) + self.gl_id = None + if self.gl_color is not None: + gl.glDeleteRenderbuffers(1, [self.gl_color]) + self.gl_color = None + if self.gl_depth_stencil is not None: + gl.glDeleteRenderbuffers(1, [self.gl_depth_stencil]) + self.gl_depth_stencil = None + + def __del__(self): + try: + self.delete() + except: + pass + + @contextlib.contextmanager + def bind(self): + prev_fbo = gl.glGetInteger(gl.GL_FRAMEBUFFER_BINDING) + prev_rbo = gl.glGetInteger(gl.GL_RENDERBUFFER_BINDING) + gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.gl_id) + if self.width is not None and self.height is not None: + gl.glViewport(0, 0, self.width, self.height) + yield + gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, prev_fbo) + gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, prev_rbo) + + def blit(self, dst=None): + assert dst is None or isinstance(dst, Framebuffer) + with self.bind(): + gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, 0 if dst is None else dst.fbo) + gl.glBlitFramebuffer(0, 0, self.width, self.height, 0, 0, self.width, self.height, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) + +#---------------------------------------------------------------------------- + +def draw_shape(vertices, *, mode=gl.GL_TRIANGLE_FAN, pos=0, size=1, color=1, alpha=1): + assert vertices.ndim == 2 and vertices.shape[1] == 2 + pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2]) + size = np.broadcast_to(np.asarray(size, dtype='float32'), [2]) + color = np.broadcast_to(np.asarray(color, dtype='float32'), [3]) + alpha = np.clip(np.broadcast_to(np.asarray(alpha, dtype='float32'), []), 0, 1) + + gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT) + gl.glPushAttrib(gl.GL_CURRENT_BIT | gl.GL_TRANSFORM_BIT) + gl.glMatrixMode(gl.GL_MODELVIEW) + gl.glPushMatrix() + + gl.glEnableClientState(gl.GL_VERTEX_ARRAY) + gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY) + gl.glVertexPointer(2, gl.GL_FLOAT, 0, vertices) + gl.glTexCoordPointer(2, gl.GL_FLOAT, 0, vertices) + gl.glTranslate(pos[0], pos[1], 0) + gl.glScale(size[0], size[1], 1) + gl.glColor4f(color[0] * alpha, color[1] * alpha, color[2] * alpha, alpha) + gl.glDrawArrays(mode, 0, vertices.shape[0]) + + gl.glPopMatrix() + gl.glPopAttrib() + gl.glPopClientAttrib() + +#---------------------------------------------------------------------------- + +def draw_rect(*, pos=0, pos2=None, size=None, align=0, rint=False, color=1, alpha=1, rounding=0): + assert pos2 is None or size is None + pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2]) + pos2 = np.broadcast_to(np.asarray(pos2, dtype='float32'), [2]) if pos2 is not None else None + size = np.broadcast_to(np.asarray(size, dtype='float32'), [2]) if size is not None else None + size = size if size is not None else pos2 - pos if pos2 is not None else np.array([1, 1], dtype='float32') + pos = pos - size * align + if rint: + pos = np.rint(pos) + rounding = np.broadcast_to(np.asarray(rounding, dtype='float32'), [2]) + rounding = np.minimum(np.abs(rounding) / np.maximum(np.abs(size), 1e-8), 0.5) + if np.min(rounding) == 0: + rounding *= 0 + vertices = _setup_rect(float(rounding[0]), float(rounding[1])) + draw_shape(vertices, mode=gl.GL_TRIANGLE_FAN, pos=pos, size=size, color=color, alpha=alpha) + +@functools.lru_cache(maxsize=10000) +def _setup_rect(rx, ry): + t = np.linspace(0, np.pi / 2, 1 if max(rx, ry) == 0 else 64) + s = 1 - np.sin(t); c = 1 - np.cos(t) + x = [c * rx, 1 - s * rx, 1 - c * rx, s * rx] + y = [s * ry, c * ry, 1 - s * ry, 1 - c * ry] + v = np.stack([x, y], axis=-1).reshape(-1, 2) + return v.astype('float32') + +#---------------------------------------------------------------------------- + +def draw_circle(*, center=0, radius=100, hole=0, color=1, alpha=1): + hole = np.broadcast_to(np.asarray(hole, dtype='float32'), []) + vertices = _setup_circle(float(hole)) + draw_shape(vertices, mode=gl.GL_TRIANGLE_STRIP, pos=center, size=radius, color=color, alpha=alpha) + +@functools.lru_cache(maxsize=10000) +def _setup_circle(hole): + t = np.linspace(0, np.pi * 2, 128) + s = np.sin(t); c = np.cos(t) + v = np.stack([c, s, c * hole, s * hole], axis=-1).reshape(-1, 2) + return v.astype('float32') + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/gui_utils/glfw_window.py b/3DPortraitGAN_pyramid/gui_utils/glfw_window.py new file mode 100644 index 0000000..aeb96e8 --- /dev/null +++ b/3DPortraitGAN_pyramid/gui_utils/glfw_window.py @@ -0,0 +1,231 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import time +import glfw +import OpenGL.GL as gl +from . import gl_utils + +#---------------------------------------------------------------------------- + +class GlfwWindow: # pylint: disable=too-many-public-methods + def __init__(self, *, title='GlfwWindow', window_width=1920, window_height=1080, deferred_show=True, close_on_esc=True): + self._glfw_window = None + self._drawing_frame = False + self._frame_start_time = None + self._frame_delta = 0 + self._fps_limit = None + self._vsync = None + self._skip_frames = 0 + self._deferred_show = deferred_show + self._close_on_esc = close_on_esc + self._esc_pressed = False + self._drag_and_drop_paths = None + self._capture_next_frame = False + self._captured_frame = None + + # Create window. + glfw.init() + glfw.window_hint(glfw.VISIBLE, False) + self._glfw_window = glfw.create_window(width=window_width, height=window_height, title=title, monitor=None, share=None) + self._attach_glfw_callbacks() + self.make_context_current() + + # Adjust window. + self.set_vsync(False) + self.set_window_size(window_width, window_height) + if not self._deferred_show: + glfw.show_window(self._glfw_window) + + def close(self): + if self._drawing_frame: + self.end_frame() + if self._glfw_window is not None: + glfw.destroy_window(self._glfw_window) + self._glfw_window = None + #glfw.terminate() # Commented out to play it nice with other glfw clients. + + def __del__(self): + try: + self.close() + except: + pass + + @property + def window_width(self): + return self.content_width + + @property + def window_height(self): + return self.content_height + self.title_bar_height + + @property + def content_width(self): + width, _height = glfw.get_window_size(self._glfw_window) + return width + + @property + def content_height(self): + _width, height = glfw.get_window_size(self._glfw_window) + return height + + @property + def title_bar_height(self): + _left, top, _right, _bottom = glfw.get_window_frame_size(self._glfw_window) + return top + + @property + def monitor_width(self): + _, _, width, _height = glfw.get_monitor_workarea(glfw.get_primary_monitor()) + return width + + @property + def monitor_height(self): + _, _, _width, height = glfw.get_monitor_workarea(glfw.get_primary_monitor()) + return height + + @property + def frame_delta(self): + return self._frame_delta + + def set_title(self, title): + glfw.set_window_title(self._glfw_window, title) + + def set_window_size(self, width, height): + width = min(width, self.monitor_width) + height = min(height, self.monitor_height) + glfw.set_window_size(self._glfw_window, width, max(height - self.title_bar_height, 0)) + if width == self.monitor_width and height == self.monitor_height: + self.maximize() + + def set_content_size(self, width, height): + self.set_window_size(width, height + self.title_bar_height) + + def maximize(self): + glfw.maximize_window(self._glfw_window) + + def set_position(self, x, y): + glfw.set_window_pos(self._glfw_window, x, y + self.title_bar_height) + + def center(self): + self.set_position((self.monitor_width - self.window_width) // 2, (self.monitor_height - self.window_height) // 2) + + def set_vsync(self, vsync): + vsync = bool(vsync) + if vsync != self._vsync: + glfw.swap_interval(1 if vsync else 0) + self._vsync = vsync + + def set_fps_limit(self, fps_limit): + self._fps_limit = int(fps_limit) + + def should_close(self): + return glfw.window_should_close(self._glfw_window) or (self._close_on_esc and self._esc_pressed) + + def skip_frame(self): + self.skip_frames(1) + + def skip_frames(self, num): # Do not update window for the next N frames. + self._skip_frames = max(self._skip_frames, int(num)) + + def is_skipping_frames(self): + return self._skip_frames > 0 + + def capture_next_frame(self): + self._capture_next_frame = True + + def pop_captured_frame(self): + frame = self._captured_frame + self._captured_frame = None + return frame + + def pop_drag_and_drop_paths(self): + paths = self._drag_and_drop_paths + self._drag_and_drop_paths = None + return paths + + def draw_frame(self): # To be overridden by subclass. + self.begin_frame() + # Rendering code goes here. + self.end_frame() + + def make_context_current(self): + if self._glfw_window is not None: + glfw.make_context_current(self._glfw_window) + + def begin_frame(self): + # End previous frame. + if self._drawing_frame: + self.end_frame() + + # Apply FPS limit. + if self._frame_start_time is not None and self._fps_limit is not None: + delay = self._frame_start_time - time.perf_counter() + 1 / self._fps_limit + if delay > 0: + time.sleep(delay) + cur_time = time.perf_counter() + if self._frame_start_time is not None: + self._frame_delta = cur_time - self._frame_start_time + self._frame_start_time = cur_time + + # Process events. + glfw.poll_events() + + # Begin frame. + self._drawing_frame = True + self.make_context_current() + + # Initialize GL state. + gl.glViewport(0, 0, self.content_width, self.content_height) + gl.glMatrixMode(gl.GL_PROJECTION) + gl.glLoadIdentity() + gl.glTranslate(-1, 1, 0) + gl.glScale(2 / max(self.content_width, 1), -2 / max(self.content_height, 1), 1) + gl.glMatrixMode(gl.GL_MODELVIEW) + gl.glLoadIdentity() + gl.glEnable(gl.GL_BLEND) + gl.glBlendFunc(gl.GL_ONE, gl.GL_ONE_MINUS_SRC_ALPHA) # Pre-multiplied alpha. + + # Clear. + gl.glClearColor(0, 0, 0, 1) + gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) + + def end_frame(self): + assert self._drawing_frame + self._drawing_frame = False + + # Skip frames if requested. + if self._skip_frames > 0: + self._skip_frames -= 1 + return + + # Capture frame if requested. + if self._capture_next_frame: + self._captured_frame = gl_utils.read_pixels(self.content_width, self.content_height) + self._capture_next_frame = False + + # Update window. + if self._deferred_show: + glfw.show_window(self._glfw_window) + self._deferred_show = False + glfw.swap_buffers(self._glfw_window) + + def _attach_glfw_callbacks(self): + glfw.set_key_callback(self._glfw_window, self._glfw_key_callback) + glfw.set_drop_callback(self._glfw_window, self._glfw_drop_callback) + + def _glfw_key_callback(self, _window, key, _scancode, action, _mods): + if action == glfw.PRESS and key == glfw.KEY_ESCAPE: + self._esc_pressed = True + + def _glfw_drop_callback(self, _window, paths): + self._drag_and_drop_paths = paths + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/gui_utils/imgui_utils.py b/3DPortraitGAN_pyramid/gui_utils/imgui_utils.py new file mode 100644 index 0000000..05a8357 --- /dev/null +++ b/3DPortraitGAN_pyramid/gui_utils/imgui_utils.py @@ -0,0 +1,171 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import contextlib +import imgui + +#---------------------------------------------------------------------------- + +def set_default_style(color_scheme='dark', spacing=9, indent=23, scrollbar=27): + s = imgui.get_style() + s.window_padding = [spacing, spacing] + s.item_spacing = [spacing, spacing] + s.item_inner_spacing = [spacing, spacing] + s.columns_min_spacing = spacing + s.indent_spacing = indent + s.scrollbar_size = scrollbar + s.frame_padding = [4, 3] + s.window_border_size = 1 + s.child_border_size = 1 + s.popup_border_size = 1 + s.frame_border_size = 1 + s.window_rounding = 0 + s.child_rounding = 0 + s.popup_rounding = 3 + s.frame_rounding = 3 + s.scrollbar_rounding = 3 + s.grab_rounding = 3 + + getattr(imgui, f'style_colors_{color_scheme}')(s) + c0 = s.colors[imgui.COLOR_MENUBAR_BACKGROUND] + c1 = s.colors[imgui.COLOR_FRAME_BACKGROUND] + s.colors[imgui.COLOR_POPUP_BACKGROUND] = [x * 0.7 + y * 0.3 for x, y in zip(c0, c1)][:3] + [1] + +#---------------------------------------------------------------------------- + +@contextlib.contextmanager +def grayed_out(cond=True): + if cond: + s = imgui.get_style() + text = s.colors[imgui.COLOR_TEXT_DISABLED] + grab = s.colors[imgui.COLOR_SCROLLBAR_GRAB] + back = s.colors[imgui.COLOR_MENUBAR_BACKGROUND] + imgui.push_style_color(imgui.COLOR_TEXT, *text) + imgui.push_style_color(imgui.COLOR_CHECK_MARK, *grab) + imgui.push_style_color(imgui.COLOR_SLIDER_GRAB, *grab) + imgui.push_style_color(imgui.COLOR_SLIDER_GRAB_ACTIVE, *grab) + imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND, *back) + imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND_HOVERED, *back) + imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND_ACTIVE, *back) + imgui.push_style_color(imgui.COLOR_BUTTON, *back) + imgui.push_style_color(imgui.COLOR_BUTTON_HOVERED, *back) + imgui.push_style_color(imgui.COLOR_BUTTON_ACTIVE, *back) + imgui.push_style_color(imgui.COLOR_HEADER, *back) + imgui.push_style_color(imgui.COLOR_HEADER_HOVERED, *back) + imgui.push_style_color(imgui.COLOR_HEADER_ACTIVE, *back) + imgui.push_style_color(imgui.COLOR_POPUP_BACKGROUND, *back) + yield + imgui.pop_style_color(14) + else: + yield + +#---------------------------------------------------------------------------- + +@contextlib.contextmanager +def item_width(width=None): + if width is not None: + imgui.push_item_width(width) + yield + imgui.pop_item_width() + else: + yield + +#---------------------------------------------------------------------------- + +def scoped_by_object_id(method): + def decorator(self, *args, **kwargs): + imgui.push_id(str(id(self))) + res = method(self, *args, **kwargs) + imgui.pop_id() + return res + return decorator + +#---------------------------------------------------------------------------- + +def button(label, width=0, enabled=True): + with grayed_out(not enabled): + clicked = imgui.button(label, width=width) + clicked = clicked and enabled + return clicked + +#---------------------------------------------------------------------------- + +def collapsing_header(text, visible=None, flags=0, default=False, enabled=True, show=True): + expanded = False + if show: + if default: + flags |= imgui.TREE_NODE_DEFAULT_OPEN + if not enabled: + flags |= imgui.TREE_NODE_LEAF + with grayed_out(not enabled): + expanded, visible = imgui.collapsing_header(text, visible=visible, flags=flags) + expanded = expanded and enabled + return expanded, visible + +#---------------------------------------------------------------------------- + +def popup_button(label, width=0, enabled=True): + if button(label, width, enabled): + imgui.open_popup(label) + opened = imgui.begin_popup(label) + return opened + +#---------------------------------------------------------------------------- + +def input_text(label, value, buffer_length, flags, width=None, help_text=''): + old_value = value + color = list(imgui.get_style().colors[imgui.COLOR_TEXT]) + if value == '': + color[-1] *= 0.5 + with item_width(width): + imgui.push_style_color(imgui.COLOR_TEXT, *color) + value = value if value != '' else help_text + changed, value = imgui.input_text(label, value, buffer_length, flags) + value = value if value != help_text else '' + imgui.pop_style_color(1) + if not flags & imgui.INPUT_TEXT_ENTER_RETURNS_TRUE: + changed = (value != old_value) + return changed, value + +#---------------------------------------------------------------------------- + +def drag_previous_control(enabled=True): + dragging = False + dx = 0 + dy = 0 + if imgui.begin_drag_drop_source(imgui.DRAG_DROP_SOURCE_NO_PREVIEW_TOOLTIP): + if enabled: + dragging = True + dx, dy = imgui.get_mouse_drag_delta() + imgui.reset_mouse_drag_delta() + imgui.end_drag_drop_source() + return dragging, dx, dy + +#---------------------------------------------------------------------------- + +def drag_button(label, width=0, enabled=True): + clicked = button(label, width=width, enabled=enabled) + dragging, dx, dy = drag_previous_control(enabled=enabled) + return clicked, dragging, dx, dy + +#---------------------------------------------------------------------------- + +def drag_hidden_window(label, x, y, width, height, enabled=True): + imgui.push_style_color(imgui.COLOR_WINDOW_BACKGROUND, 0, 0, 0, 0) + imgui.push_style_color(imgui.COLOR_BORDER, 0, 0, 0, 0) + imgui.set_next_window_position(x, y) + imgui.set_next_window_size(width, height) + imgui.begin(label, closable=False, flags=(imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE)) + dragging, dx, dy = drag_previous_control(enabled=enabled) + imgui.end() + imgui.pop_style_color(2) + return dragging, dx, dy + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/gui_utils/imgui_window.py b/3DPortraitGAN_pyramid/gui_utils/imgui_window.py new file mode 100644 index 0000000..0e1a638 --- /dev/null +++ b/3DPortraitGAN_pyramid/gui_utils/imgui_window.py @@ -0,0 +1,105 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import os +import imgui +import imgui.integrations.glfw + +from . import glfw_window +from . import imgui_utils +from . import text_utils + +#---------------------------------------------------------------------------- + +class ImguiWindow(glfw_window.GlfwWindow): + def __init__(self, *, title='ImguiWindow', font=None, font_sizes=range(14,24), **glfw_kwargs): + if font is None: + font = text_utils.get_default_font() + font_sizes = {int(size) for size in font_sizes} + super().__init__(title=title, **glfw_kwargs) + + # Init fields. + self._imgui_context = None + self._imgui_renderer = None + self._imgui_fonts = None + self._cur_font_size = max(font_sizes) + + # Delete leftover imgui.ini to avoid unexpected behavior. + if os.path.isfile('imgui.ini'): + os.remove('imgui.ini') + + # Init ImGui. + self._imgui_context = imgui.create_context() + self._imgui_renderer = _GlfwRenderer(self._glfw_window) + self._attach_glfw_callbacks() + imgui.get_io().ini_saving_rate = 0 # Disable creating imgui.ini at runtime. + imgui.get_io().mouse_drag_threshold = 0 # Improve behavior with imgui_utils.drag_custom(). + self._imgui_fonts = {size: imgui.get_io().fonts.add_font_from_file_ttf(font, size) for size in font_sizes} + self._imgui_renderer.refresh_font_texture() + + def close(self): + self.make_context_current() + self._imgui_fonts = None + if self._imgui_renderer is not None: + self._imgui_renderer.shutdown() + self._imgui_renderer = None + if self._imgui_context is not None: + #imgui.destroy_context(self._imgui_context) # Commented out to avoid creating imgui.ini at the end. + self._imgui_context = None + super().close() + + def _glfw_key_callback(self, *args): + super()._glfw_key_callback(*args) + self._imgui_renderer.keyboard_callback(*args) + + @property + def font_size(self): + return self._cur_font_size + + @property + def spacing(self): + return round(self._cur_font_size * 0.4) + + def set_font_size(self, target): # Applied on next frame. + self._cur_font_size = min((abs(key - target), key) for key in self._imgui_fonts.keys())[1] + + def begin_frame(self): + # Begin glfw frame. + super().begin_frame() + + # Process imgui events. + self._imgui_renderer.mouse_wheel_multiplier = self._cur_font_size / 10 + if self.content_width > 0 and self.content_height > 0: + self._imgui_renderer.process_inputs() + + # Begin imgui frame. + imgui.new_frame() + imgui.push_font(self._imgui_fonts[self._cur_font_size]) + imgui_utils.set_default_style(spacing=self.spacing, indent=self.font_size, scrollbar=self.font_size+4) + + def end_frame(self): + imgui.pop_font() + imgui.render() + imgui.end_frame() + self._imgui_renderer.render(imgui.get_draw_data()) + super().end_frame() + +#---------------------------------------------------------------------------- +# Wrapper class for GlfwRenderer to fix a mouse wheel bug on Linux. + +class _GlfwRenderer(imgui.integrations.glfw.GlfwRenderer): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.mouse_wheel_multiplier = 1 + + def scroll_callback(self, window, x_offset, y_offset): + self.io.mouse_wheel += y_offset * self.mouse_wheel_multiplier + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/gui_utils/text_utils.py b/3DPortraitGAN_pyramid/gui_utils/text_utils.py new file mode 100644 index 0000000..e64a34d --- /dev/null +++ b/3DPortraitGAN_pyramid/gui_utils/text_utils.py @@ -0,0 +1,125 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import functools +from typing import Optional + +import dnnlib +import numpy as np +import PIL.Image +import PIL.ImageFont +import scipy.ndimage + +from . import gl_utils + +#---------------------------------------------------------------------------- + +def get_default_font(): + url = 'http://fonts.gstatic.com/s/opensans/v17/mem8YaGs126MiZpBA-U1UpcaXcl0Aw.ttf' # Open Sans regular + return dnnlib.util.open_url(url, return_filename=True) + +#---------------------------------------------------------------------------- + +@functools.lru_cache(maxsize=None) +def get_pil_font(font=None, size=32): + if font is None: + font = get_default_font() + return PIL.ImageFont.truetype(font=font, size=size) + +#---------------------------------------------------------------------------- + +def get_array(string, *, dropshadow_radius: int=None, **kwargs): + if dropshadow_radius is not None: + offset_x = int(np.ceil(dropshadow_radius*2/3)) + offset_y = int(np.ceil(dropshadow_radius*2/3)) + return _get_array_priv(string, dropshadow_radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs) + else: + return _get_array_priv(string, **kwargs) + +@functools.lru_cache(maxsize=10000) +def _get_array_priv( + string: str, *, + size: int = 32, + max_width: Optional[int]=None, + max_height: Optional[int]=None, + min_size=10, + shrink_coef=0.8, + dropshadow_radius: int=None, + offset_x: int=None, + offset_y: int=None, + **kwargs +): + cur_size = size + array = None + while True: + if dropshadow_radius is not None: + # separate implementation for dropshadow text rendering + array = _get_array_impl_dropshadow(string, size=cur_size, radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs) + else: + array = _get_array_impl(string, size=cur_size, **kwargs) + height, width, _ = array.shape + if (max_width is None or width <= max_width) and (max_height is None or height <= max_height) or (cur_size <= min_size): + break + cur_size = max(int(cur_size * shrink_coef), min_size) + return array + +#---------------------------------------------------------------------------- + +@functools.lru_cache(maxsize=10000) +def _get_array_impl(string, *, font=None, size=32, outline=0, outline_pad=3, outline_coef=3, outline_exp=2, line_pad: int=None): + pil_font = get_pil_font(font=font, size=size) + lines = [pil_font.getmask(line, 'L') for line in string.split('\n')] + lines = [np.array(line, dtype=np.uint8).reshape([line.size[1], line.size[0]]) for line in lines] + width = max(line.shape[1] for line in lines) + lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])), mode='constant') for line in lines] + line_spacing = line_pad if line_pad is not None else size // 2 + lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant') for line in lines[:-1]] + lines[-1:] + mask = np.concatenate(lines, axis=0) + alpha = mask + if outline > 0: + mask = np.pad(mask, int(np.ceil(outline * outline_pad)), mode='constant', constant_values=0) + alpha = mask.astype(np.float32) / 255 + alpha = scipy.ndimage.gaussian_filter(alpha, outline) + alpha = 1 - np.maximum(1 - alpha * outline_coef, 0) ** outline_exp + alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8) + alpha = np.maximum(alpha, mask) + return np.stack([mask, alpha], axis=-1) + +#---------------------------------------------------------------------------- + +@functools.lru_cache(maxsize=10000) +def _get_array_impl_dropshadow(string, *, font=None, size=32, radius: int, offset_x: int, offset_y: int, line_pad: int=None, **kwargs): + assert (offset_x > 0) and (offset_y > 0) + pil_font = get_pil_font(font=font, size=size) + lines = [pil_font.getmask(line, 'L') for line in string.split('\n')] + lines = [np.array(line, dtype=np.uint8).reshape([line.size[1], line.size[0]]) for line in lines] + width = max(line.shape[1] for line in lines) + lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])), mode='constant') for line in lines] + line_spacing = line_pad if line_pad is not None else size // 2 + lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant') for line in lines[:-1]] + lines[-1:] + mask = np.concatenate(lines, axis=0) + alpha = mask + + mask = np.pad(mask, 2*radius + max(abs(offset_x), abs(offset_y)), mode='constant', constant_values=0) + alpha = mask.astype(np.float32) / 255 + alpha = scipy.ndimage.gaussian_filter(alpha, radius) + alpha = 1 - np.maximum(1 - alpha * 1.5, 0) ** 1.4 + alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8) + alpha = np.pad(alpha, [(offset_y, 0), (offset_x, 0)], mode='constant')[:-offset_y, :-offset_x] + alpha = np.maximum(alpha, mask) + return np.stack([mask, alpha], axis=-1) + +#---------------------------------------------------------------------------- + +@functools.lru_cache(maxsize=10000) +def get_texture(string, bilinear=True, mipmap=True, **kwargs): + return gl_utils.Texture(image=get_array(string, **kwargs), bilinear=bilinear, mipmap=mipmap) + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/latent_optimization_inversion.py b/3DPortraitGAN_pyramid/latent_optimization_inversion.py new file mode 100644 index 0000000..3c3165f --- /dev/null +++ b/3DPortraitGAN_pyramid/latent_optimization_inversion.py @@ -0,0 +1,242 @@ +import glob + +import numpy as np +import dnnlib +import legacy +from proj.projector import w_projector, w_plus_projector +from proj.configs import global_config, hyperparameters +from PIL import Image +import torch +import json +import os +from torch_utils.ops import upfirdn2d +from training.dual_discriminator import filtered_resizing + + +# ---------------------------------------------------------------------------- +class Space_Regulizer: + def __init__(self, original_G, lpips_net): + self.original_G = original_G + self.morphing_regulizer_alpha = hyperparameters.regulizer_alpha + self.lpips_loss = lpips_net + + def get_morphed_w_code(self, new_w_code, fixed_w): + interpolation_direction = new_w_code - fixed_w + interpolation_direction_norm = torch.norm(interpolation_direction, p=2) + direction_to_move = hyperparameters.regulizer_alpha * interpolation_direction / interpolation_direction_norm + result_w = fixed_w + direction_to_move + self.morphing_regulizer_alpha * fixed_w + (1 - self.morphing_regulizer_alpha) * new_w_code + + return result_w + + def get_image_from_ws(self, w_codes, G): + return torch.cat([G.synthesis(w_code, noise_mode='none', force_fp32=True) for w_code in w_codes]) + + def ball_holder_loss_lazy(self, new_G, num_of_sampled_latents, w_batch, use_wandb=False): + loss = 0.0 + + z_samples = np.random.randn(num_of_sampled_latents, self.original_G.z_dim) + w_samples = self.original_G.mapping(torch.from_numpy(z_samples).to(global_config.device), None, + truncation_psi=0.5) + territory_indicator_ws = [self.get_morphed_w_code(w_code.unsqueeze(0), w_batch) for w_code in w_samples] + + for w_code in territory_indicator_ws: + new_img = new_G.synthesis(w_code, noise_mode='none', force_fp32=True) + with torch.no_grad(): + old_img = self.original_G.synthesis(w_code, noise_mode='none', force_fp32=True) + + if hyperparameters.regulizer_l2_lambda > 0: + l2_loss_val = l2_loss.l2_loss(old_img, new_img) + + loss += l2_loss_val * hyperparameters.regulizer_l2_lambda + + if hyperparameters.regulizer_lpips_lambda > 0: + loss_lpips = self.lpips_loss(old_img, new_img) + loss_lpips = torch.mean(torch.squeeze(loss_lpips)) + + loss += loss_lpips * hyperparameters.regulizer_lpips_lambda + + return loss / len(territory_indicator_ws) + + def space_regulizer_loss(self, new_G, w_batch, use_wandb): + ret_val = self.ball_holder_loss_lazy(new_G, hyperparameters.latent_ball_num_of_samples, w_batch, use_wandb) + return ret_val + + + + + +def l2_loss(real_images, generated_images): + l2_criterion = torch.nn.MSELoss(reduction='mean') + loss = l2_criterion(real_images, generated_images) + return loss + + +def toogle_grad(model, flag=True): + for p in model.parameters(): + p.requires_grad = flag + + +def run_D_pose_prediction(img, c, blur_sigma=0, D=None): + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + with torch.autograd.profiler.record_function('blur'): + f = torch.arange(-blur_size, blur_size + 1, device=img['image'].device).div( + blur_sigma).square().neg().exp2() + img['image'] = upfirdn2d.filter2d(img['image'], f / f.sum()) + pose, _ = D.predict_pose(img, c) + return pose + + +def get_pose_params(real_img, real_seg, real_c, D=None, neural_rendering_resolution=None, blur_sigma=None, + resample_filter=None, filter_mode=None): + real_img_raw = filtered_resizing(real_img, size=neural_rendering_resolution, f=resample_filter, + filter_mode=filter_mode) + + real_seg_raw = filtered_resizing(real_seg, size=neural_rendering_resolution, f=resample_filter, + filter_mode=filter_mode) + + if True: + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + f = torch.arange(-blur_size, blur_size + 1, device=real_img_raw.device).div( + blur_sigma).square().neg().exp2() + real_img_raw = upfirdn2d.filter2d(real_img_raw, f / f.sum()) + + real_img = {'image': real_img, 'image_raw': real_img_raw, 'image_mask': real_seg_raw} + + # get pose_params from real image + real_img_tmp_image = real_img['image'].detach().requires_grad_(True) + real_img_tmp_image_raw = real_img['image_raw'].detach().requires_grad_(True) + real_img_tmp_image_mask = real_img['image_mask'].detach().requires_grad_(True) + real_img_tmp = {'image': real_img_tmp_image, 'image_raw': real_img_tmp_image_raw, + 'image_mask': real_img_tmp_image_mask} + + predicted_real_pose = run_D_pose_prediction(real_img_tmp, real_c, blur_sigma=blur_sigma, D=D) + return predicted_real_pose + + +if __name__ == '__main__': + # input_dir + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--input_dir', type=str, default='input') + parser.add_argument('--model_pkl', type=str, default='input') + parser.add_argument('--pose_prediction_kwargs_path', type=str, default='input') + input_dir = parser.parse_args().input_dir + model_pkl = parser.parse_args().model_pkl + pose_prediction_kwargs_path = parser.parse_args().pose_prediction_kwargs_path + # ---------------------------------------------------------------------------- + sampling_multiplier = 2.0 + + print('Loading networks from "%s"...' % model_pkl) + device = torch.device('cuda') + with dnnlib.util.open_url(model_pkl) as f: + resume_data = legacy.load_network_pkl(f) + print('resume_data', resume_data.keys()) + G = resume_data['G_ema'].to(device) # type: ignore + D = resume_data['D_ema'].to(device) # type: ignore + + G.set_batch_size(1) + G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution'] * sampling_multiplier) + G.rendering_kwargs['depth_resolution_importance'] = int( + G.rendering_kwargs['depth_resolution_importance'] * sampling_multiplier) + + G.rendering_kwargs['ray_start'] = 2.35 + + print('Loading pose_prediction_kwargs from "%s"...' % pose_prediction_kwargs_path) + with open(pose_prediction_kwargs_path, 'r') as f: + pose_predict_kwargs = json.load(f) + + + + + + camera_path = os.path.join(input_dir, 'result.json') + print('Loading camera pose from "%s"...' % camera_path) + with open(camera_path, 'r') as f: + camera_poses = json.load(f) + + print('Loading images from "%s"...' % input_dir) + image_base_dir = os.path.join(input_dir, 'aligned_images') + mask_base_path = os.path.join(input_dir, 'mask') + + images = glob.glob(os.path.join(image_base_dir, '*')) + + print('images', images) + for image_path in images: + image_name = os.path.basename(image_path) + mask_path = os.path.join(mask_base_path, image_name) + print('projecting image: "%s"' % image_path) + image = Image.open(image_path).convert('RGB') + mask = Image.open(mask_path) + # image_name = os.path.basename(paths_config.input_data_path) + camera_pose = camera_poses[image_name] + cam2world_pose = torch.tensor(camera_pose['camera_pose'], device=device) + focal_length = 6.5104166 + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + with torch.no_grad(): + image_p = image.resize((G.img_resolution, G.img_resolution), Image.BILINEAR) + image_p = np.array(image_p) + image_p = image_p.transpose(2, 0, 1) + image_p = torch.tensor(image_p, device=device) + image_p = image_p.to(device).to(torch.float32) / 127.5 - 1 + image_p = image_p.unsqueeze(0) + + mask_p = np.array(mask)[:, :, None] + mask_p = mask_p.transpose(2, 0, 1) + mask_p = torch.tensor(mask_p, device=device) + mask_p = mask_p.to(device).to(torch.float32) / 255.0 + mask_p = mask_p.unsqueeze(0) + + resample_filter = pose_predict_kwargs['resample_filter'] + resample_filter = torch.tensor(resample_filter, device=device).to(torch.float32) + + p = get_pose_params(real_img=image_p, + real_seg=mask_p, + real_c=c, + D=D, + neural_rendering_resolution=pose_predict_kwargs['neural_rendering_resolution'], + blur_sigma=pose_predict_kwargs['blur_sigma'], + resample_filter=resample_filter, + filter_mode=pose_predict_kwargs['filter_mode']) + + # ---------------------------------------------------------------------------- + image_name = image_name[:-4] + # coach = SingleIDCoach(None, False, c, p) + # coach.train(image=image, image_name=image_name[:-4]) + w_path_dir = f'{input_dir}/inversion' + os.makedirs(w_path_dir, exist_ok=True) + use_ball_holder = True + # for fname, image in tqdm(self.data_loader): + # image_name = fname[0] + + embedding_dir = f'{w_path_dir}/{image_name}' + os.makedirs(embedding_dir, exist_ok=True) + image.save(f'{embedding_dir}/original.png') + w_pivot = None + # if hyperparameters.use_last_w_pivots: + # w_pivot = self.load_inversions(w_path_dir, image_name) + # elif not hyperparameters.use_last_w_pivots or w_pivot is None: + # w_pivot = self.calc_inversions(image, image_name) + # image = torch.tensor(image, device=device) + if os.path.exists(f'{embedding_dir}/0.pt'): + w_pivot = torch.load(f'{embedding_dir}/0.pt').to(global_config.device) + else: + image = image.resize((G.img_resolution, G.img_resolution), Image.BILINEAR) + image = np.array(image) + image = image.transpose(2, 0, 1) + image = torch.tensor(image, device=device) + image = image.to(device).to(torch.float32) / 127.5 - 1 + image = image.unsqueeze(0) + id_image = torch.squeeze((image.to(global_config.device) + 1) / 2) * 255 + # id_image = torch.squeeze((image.to(global_config.device) + 1) / 2) * 255 + w_pivot = w_projector.project(G, c, p, embedding_dir, id_image, device=torch.device('cuda'), w_avg_samples=600, + num_steps=500, + w_name=image_name) + # w_pivot = w_pivot.detach().clone().to(global_config.device) + w_pivot = w_pivot.to(global_config.device) + torch.save(w_pivot, f'{embedding_dir}/inversion.pt') + diff --git a/3DPortraitGAN_pyramid/legacy.py b/3DPortraitGAN_pyramid/legacy.py new file mode 100644 index 0000000..f30944a --- /dev/null +++ b/3DPortraitGAN_pyramid/legacy.py @@ -0,0 +1,325 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Converting legacy network pickle into the new format.""" + +import click +import pickle +import re +import copy +import numpy as np +import torch +import dnnlib +from torch_utils import misc + +#---------------------------------------------------------------------------- + +def load_network_pkl(f, force_fp16=False): + data = _LegacyUnpickler(f).load() + + # Legacy TensorFlow pickle => convert. + if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data): + tf_G, tf_D, tf_Gs = data + G = convert_tf_generator(tf_G) + D = convert_tf_discriminator(tf_D) + G_ema = convert_tf_generator(tf_Gs) + data = dict(G=G, D=D, G_ema=G_ema) + + # Add missing fields. + if 'training_set_kwargs' not in data: + data['training_set_kwargs'] = None + if 'augment_pipe' not in data: + data['augment_pipe'] = None + + # Validate contents. + assert isinstance(data['G'], torch.nn.Module) + assert isinstance(data['D'], torch.nn.Module) + assert isinstance(data['G_ema'], torch.nn.Module) + assert isinstance(data['training_set_kwargs'], (dict, type(None))) + assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None))) + + # Force FP16. + if force_fp16: + for key in ['G', 'D', 'G_ema']: + old = data[key] + kwargs = copy.deepcopy(old.init_kwargs) + fp16_kwargs = kwargs.get('synthesis_kwargs', kwargs) + fp16_kwargs.num_fp16_res = 4 + fp16_kwargs.conv_clamp = 256 + if kwargs != old.init_kwargs: + new = type(old)(**kwargs).eval().requires_grad_(False) + misc.copy_params_and_buffers(old, new, require_all=True) + data[key] = new + return data + +#---------------------------------------------------------------------------- + +class _TFNetworkStub(dnnlib.EasyDict): + pass + +class _LegacyUnpickler(pickle.Unpickler): + def find_class(self, module, name): + if module == 'dnnlib.tflib.network' and name == 'Network': + return _TFNetworkStub + return super().find_class(module, name) + +#---------------------------------------------------------------------------- + +def _collect_tf_params(tf_net): + # pylint: disable=protected-access + tf_params = dict() + def recurse(prefix, tf_net): + for name, value in tf_net.variables: + tf_params[prefix + name] = value + for name, comp in tf_net.components.items(): + recurse(prefix + name + '/', comp) + recurse('', tf_net) + return tf_params + +#---------------------------------------------------------------------------- + +def _populate_module_params(module, *patterns): + for name, tensor in misc.named_params_and_buffers(module): + found = False + value = None + for pattern, value_fn in zip(patterns[0::2], patterns[1::2]): + match = re.fullmatch(pattern, name) + if match: + found = True + if value_fn is not None: + value = value_fn(*match.groups()) + break + try: + assert found + if value is not None: + tensor.copy_(torch.from_numpy(np.array(value))) + except: + print(name, list(tensor.shape)) + raise + +#---------------------------------------------------------------------------- + +def convert_tf_generator(tf_G): + if tf_G.version < 4: + raise ValueError('TensorFlow pickle version too low') + + # Collect kwargs. + tf_kwargs = tf_G.static_kwargs + known_kwargs = set() + def kwarg(tf_name, default=None, none=None): + known_kwargs.add(tf_name) + val = tf_kwargs.get(tf_name, default) + return val if val is not None else none + + # Convert kwargs. + from training import networks_stylegan2 + network_class = networks_stylegan2.Generator + kwargs = dnnlib.EasyDict( + z_dim = kwarg('latent_size', 512), + c_dim = kwarg('label_size', 0), + w_dim = kwarg('dlatent_size', 512), + img_resolution = kwarg('resolution', 1024), + img_channels = kwarg('num_channels', 3), + channel_base = kwarg('fmap_base', 16384) * 2, + channel_max = kwarg('fmap_max', 512), + num_fp16_res = kwarg('num_fp16_res', 0), + conv_clamp = kwarg('conv_clamp', None), + architecture = kwarg('architecture', 'skip'), + resample_filter = kwarg('resample_kernel', [1,3,3,1]), + use_noise = kwarg('use_noise', True), + activation = kwarg('nonlinearity', 'lrelu'), + mapping_kwargs = dnnlib.EasyDict( + num_layers = kwarg('mapping_layers', 8), + embed_features = kwarg('label_fmaps', None), + layer_features = kwarg('mapping_fmaps', None), + activation = kwarg('mapping_nonlinearity', 'lrelu'), + lr_multiplier = kwarg('mapping_lrmul', 0.01), + w_avg_beta = kwarg('w_avg_beta', 0.995, none=1), + ), + ) + + # Check for unknown kwargs. + kwarg('truncation_psi') + kwarg('truncation_cutoff') + kwarg('style_mixing_prob') + kwarg('structure') + kwarg('conditioning') + kwarg('fused_modconv') + unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs) + if len(unknown_kwargs) > 0: + raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0]) + + # Collect params. + tf_params = _collect_tf_params(tf_G) + for name, value in list(tf_params.items()): + match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name) + if match: + r = kwargs.img_resolution // (2 ** int(match.group(1))) + tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value + kwargs.synthesis.kwargs.architecture = 'orig' + #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') + + # Convert params. + G = network_class(**kwargs).eval().requires_grad_(False) + # pylint: disable=unnecessary-lambda + # pylint: disable=f-string-without-interpolation + _populate_module_params(G, + r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'], + r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(), + r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'], + r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(), + r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'], + r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0], + r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1), + r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'], + r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0], + r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'], + r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(), + r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1, + r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1), + r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'], + r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0], + r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'], + r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(), + r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1, + r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1), + r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'], + r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0], + r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'], + r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(), + r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1, + r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1), + r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'], + r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(), + r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1, + r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1), + r'.*\.resample_filter', None, + r'.*\.act_filter', None, + ) + return G + +#---------------------------------------------------------------------------- + +def convert_tf_discriminator(tf_D): + if tf_D.version < 4: + raise ValueError('TensorFlow pickle version too low') + + # Collect kwargs. + tf_kwargs = tf_D.static_kwargs + known_kwargs = set() + def kwarg(tf_name, default=None): + known_kwargs.add(tf_name) + return tf_kwargs.get(tf_name, default) + + # Convert kwargs. + kwargs = dnnlib.EasyDict( + c_dim = kwarg('label_size', 0), + img_resolution = kwarg('resolution', 1024), + img_channels = kwarg('num_channels', 3), + architecture = kwarg('architecture', 'resnet'), + channel_base = kwarg('fmap_base', 16384) * 2, + channel_max = kwarg('fmap_max', 512), + num_fp16_res = kwarg('num_fp16_res', 0), + conv_clamp = kwarg('conv_clamp', None), + cmap_dim = kwarg('mapping_fmaps', None), + block_kwargs = dnnlib.EasyDict( + activation = kwarg('nonlinearity', 'lrelu'), + resample_filter = kwarg('resample_kernel', [1,3,3,1]), + freeze_layers = kwarg('freeze_layers', 0), + ), + mapping_kwargs = dnnlib.EasyDict( + num_layers = kwarg('mapping_layers', 0), + embed_features = kwarg('mapping_fmaps', None), + layer_features = kwarg('mapping_fmaps', None), + activation = kwarg('nonlinearity', 'lrelu'), + lr_multiplier = kwarg('mapping_lrmul', 0.1), + ), + epilogue_kwargs = dnnlib.EasyDict( + mbstd_group_size = kwarg('mbstd_group_size', None), + mbstd_num_channels = kwarg('mbstd_num_features', 1), + activation = kwarg('nonlinearity', 'lrelu'), + ), + ) + + # Check for unknown kwargs. + kwarg('structure') + kwarg('conditioning') + unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs) + if len(unknown_kwargs) > 0: + raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0]) + + # Collect params. + tf_params = _collect_tf_params(tf_D) + for name, value in list(tf_params.items()): + match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name) + if match: + r = kwargs.img_resolution // (2 ** int(match.group(1))) + tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value + kwargs.architecture = 'orig' + #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') + + # Convert params. + from training import networks_stylegan2 + D = networks_stylegan2.Discriminator(**kwargs).eval().requires_grad_(False) + # pylint: disable=unnecessary-lambda + # pylint: disable=f-string-without-interpolation + _populate_module_params(D, + r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1), + r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'], + r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1), + r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'], + r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1), + r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(), + r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'], + r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(), + r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'], + r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1), + r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'], + r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(), + r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'], + r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(), + r'b4\.out\.bias', lambda: tf_params[f'Output/bias'], + r'.*\.resample_filter', None, + ) + return D + +#---------------------------------------------------------------------------- + +@click.command() +@click.option('--source', help='Input pickle', required=True, metavar='PATH') +@click.option('--dest', help='Output pickle', required=True, metavar='PATH') +@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True) +def convert_network_pickle(source, dest, force_fp16): + """Convert legacy network pickle into the native PyTorch format. + + The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA. + It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks. + + Example: + + \b + python legacy.py \\ + --source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\ + --dest=stylegan2-cat-config-f.pkl + """ + print(f'Loading "{source}"...') + with dnnlib.util.open_url(source) as f: + data = load_network_pkl(f, force_fp16=force_fp16) + print(f'Saving "{dest}"...') + with open(dest, 'wb') as f: + pickle.dump(data, f) + print('Done.') + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + convert_network_pickle() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/metrics/__init__.py b/3DPortraitGAN_pyramid/metrics/__init__.py new file mode 100644 index 0000000..dfebd04 --- /dev/null +++ b/3DPortraitGAN_pyramid/metrics/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# empty diff --git a/3DPortraitGAN_pyramid/metrics/equivariance.py b/3DPortraitGAN_pyramid/metrics/equivariance.py new file mode 100644 index 0000000..4609296 --- /dev/null +++ b/3DPortraitGAN_pyramid/metrics/equivariance.py @@ -0,0 +1,269 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Equivariance metrics (EQ-T, EQ-T_frac, and EQ-R) from the paper +"Alias-Free Generative Adversarial Networks".""" + +import copy +import numpy as np +import torch +import torch.fft +from torch_utils.ops import upfirdn2d +from . import metric_utils + +#---------------------------------------------------------------------------- +# Utilities. + +def sinc(x): + y = (x * np.pi).abs() + z = torch.sin(y) / y.clamp(1e-30, float('inf')) + return torch.where(y < 1e-30, torch.ones_like(x), z) + +def lanczos_window(x, a): + x = x.abs() / a + return torch.where(x < 1, sinc(x), torch.zeros_like(x)) + +def rotation_matrix(angle): + angle = torch.as_tensor(angle).to(torch.float32) + mat = torch.eye(3, device=angle.device) + mat[0, 0] = angle.cos() + mat[0, 1] = angle.sin() + mat[1, 0] = -angle.sin() + mat[1, 1] = angle.cos() + return mat + +#---------------------------------------------------------------------------- +# Apply integer translation to a batch of 2D images. Corresponds to the +# operator T_x in Appendix E.1. + +def apply_integer_translation(x, tx, ty): + _N, _C, H, W = x.shape + tx = torch.as_tensor(tx * W).to(dtype=torch.float32, device=x.device) + ty = torch.as_tensor(ty * H).to(dtype=torch.float32, device=x.device) + ix = tx.round().to(torch.int64) + iy = ty.round().to(torch.int64) + + z = torch.zeros_like(x) + m = torch.zeros_like(x) + if abs(ix) < W and abs(iy) < H: + y = x[:, :, max(-iy,0) : H+min(-iy,0), max(-ix,0) : W+min(-ix,0)] + z[:, :, max(iy,0) : H+min(iy,0), max(ix,0) : W+min(ix,0)] = y + m[:, :, max(iy,0) : H+min(iy,0), max(ix,0) : W+min(ix,0)] = 1 + return z, m + +#---------------------------------------------------------------------------- +# Apply integer translation to a batch of 2D images. Corresponds to the +# operator T_x in Appendix E.2. + +def apply_fractional_translation(x, tx, ty, a=3): + _N, _C, H, W = x.shape + tx = torch.as_tensor(tx * W).to(dtype=torch.float32, device=x.device) + ty = torch.as_tensor(ty * H).to(dtype=torch.float32, device=x.device) + ix = tx.floor().to(torch.int64) + iy = ty.floor().to(torch.int64) + fx = tx - ix + fy = ty - iy + b = a - 1 + + z = torch.zeros_like(x) + zx0 = max(ix - b, 0) + zy0 = max(iy - b, 0) + zx1 = min(ix + a, 0) + W + zy1 = min(iy + a, 0) + H + if zx0 < zx1 and zy0 < zy1: + taps = torch.arange(a * 2, device=x.device) - b + filter_x = (sinc(taps - fx) * sinc((taps - fx) / a)).unsqueeze(0) + filter_y = (sinc(taps - fy) * sinc((taps - fy) / a)).unsqueeze(1) + y = x + y = upfirdn2d.filter2d(y, filter_x / filter_x.sum(), padding=[b,a,0,0]) + y = upfirdn2d.filter2d(y, filter_y / filter_y.sum(), padding=[0,0,b,a]) + y = y[:, :, max(b-iy,0) : H+b+a+min(-iy-a,0), max(b-ix,0) : W+b+a+min(-ix-a,0)] + z[:, :, zy0:zy1, zx0:zx1] = y + + m = torch.zeros_like(x) + mx0 = max(ix + a, 0) + my0 = max(iy + a, 0) + mx1 = min(ix - b, 0) + W + my1 = min(iy - b, 0) + H + if mx0 < mx1 and my0 < my1: + m[:, :, my0:my1, mx0:mx1] = 1 + return z, m + +#---------------------------------------------------------------------------- +# Construct an oriented low-pass filter that applies the appropriate +# bandlimit with respect to the input and output of the given affine 2D +# image transformation. + +def construct_affine_bandlimit_filter(mat, a=3, amax=16, aflt=64, up=4, cutoff_in=1, cutoff_out=1): + assert a <= amax < aflt + mat = torch.as_tensor(mat).to(torch.float32) + + # Construct 2D filter taps in input & output coordinate spaces. + taps = ((torch.arange(aflt * up * 2 - 1, device=mat.device) + 1) / up - aflt).roll(1 - aflt * up) + yi, xi = torch.meshgrid(taps, taps) + xo, yo = (torch.stack([xi, yi], dim=2) @ mat[:2, :2].t()).unbind(2) + + # Convolution of two oriented 2D sinc filters. + fi = sinc(xi * cutoff_in) * sinc(yi * cutoff_in) + fo = sinc(xo * cutoff_out) * sinc(yo * cutoff_out) + f = torch.fft.ifftn(torch.fft.fftn(fi) * torch.fft.fftn(fo)).real + + # Convolution of two oriented 2D Lanczos windows. + wi = lanczos_window(xi, a) * lanczos_window(yi, a) + wo = lanczos_window(xo, a) * lanczos_window(yo, a) + w = torch.fft.ifftn(torch.fft.fftn(wi) * torch.fft.fftn(wo)).real + + # Construct windowed FIR filter. + f = f * w + + # Finalize. + c = (aflt - amax) * up + f = f.roll([aflt * up - 1] * 2, dims=[0,1])[c:-c, c:-c] + f = torch.nn.functional.pad(f, [0, 1, 0, 1]).reshape(amax * 2, up, amax * 2, up) + f = f / f.sum([0,2], keepdim=True) / (up ** 2) + f = f.reshape(amax * 2 * up, amax * 2 * up)[:-1, :-1] + return f + +#---------------------------------------------------------------------------- +# Apply the given affine transformation to a batch of 2D images. + +def apply_affine_transformation(x, mat, up=4, **filter_kwargs): + _N, _C, H, W = x.shape + mat = torch.as_tensor(mat).to(dtype=torch.float32, device=x.device) + + # Construct filter. + f = construct_affine_bandlimit_filter(mat, up=up, **filter_kwargs) + assert f.ndim == 2 and f.shape[0] == f.shape[1] and f.shape[0] % 2 == 1 + p = f.shape[0] // 2 + + # Construct sampling grid. + theta = mat.inverse() + theta[:2, 2] *= 2 + theta[0, 2] += 1 / up / W + theta[1, 2] += 1 / up / H + theta[0, :] *= W / (W + p / up * 2) + theta[1, :] *= H / (H + p / up * 2) + theta = theta[:2, :3].unsqueeze(0).repeat([x.shape[0], 1, 1]) + g = torch.nn.functional.affine_grid(theta, x.shape, align_corners=False) + + # Resample image. + y = upfirdn2d.upsample2d(x=x, f=f, up=up, padding=p) + z = torch.nn.functional.grid_sample(y, g, mode='bilinear', padding_mode='zeros', align_corners=False) + + # Form mask. + m = torch.zeros_like(y) + c = p * 2 + 1 + m[:, :, c:-c, c:-c] = 1 + m = torch.nn.functional.grid_sample(m, g, mode='nearest', padding_mode='zeros', align_corners=False) + return z, m + +#---------------------------------------------------------------------------- +# Apply fractional rotation to a batch of 2D images. Corresponds to the +# operator R_\alpha in Appendix E.3. + +def apply_fractional_rotation(x, angle, a=3, **filter_kwargs): + angle = torch.as_tensor(angle).to(dtype=torch.float32, device=x.device) + mat = rotation_matrix(angle) + return apply_affine_transformation(x, mat, a=a, amax=a*2, **filter_kwargs) + +#---------------------------------------------------------------------------- +# Modify the frequency content of a batch of 2D images as if they had undergo +# fractional rotation -- but without actually rotating them. Corresponds to +# the operator R^*_\alpha in Appendix E.3. + +def apply_fractional_pseudo_rotation(x, angle, a=3, **filter_kwargs): + angle = torch.as_tensor(angle).to(dtype=torch.float32, device=x.device) + mat = rotation_matrix(-angle) + f = construct_affine_bandlimit_filter(mat, a=a, amax=a*2, up=1, **filter_kwargs) + y = upfirdn2d.filter2d(x=x, f=f) + m = torch.zeros_like(y) + c = f.shape[0] // 2 + m[:, :, c:-c, c:-c] = 1 + return y, m + +#---------------------------------------------------------------------------- +# Compute the selected equivariance metrics for the given generator. + +def compute_equivariance_metrics(opts, num_samples, batch_size, translate_max=0.125, rotate_max=1, compute_eqt_int=False, compute_eqt_frac=False, compute_eqr=False): + assert compute_eqt_int or compute_eqt_frac or compute_eqr + + # Setup generator and labels. + G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device) + I = torch.eye(3, device=opts.device) + M = getattr(getattr(getattr(G, 'synthesis', None), 'input', None), 'transform', None) + if M is None: + raise ValueError('Cannot compute equivariance metrics; the given generator does not support user-specified image transformations') + c_iter = metric_utils.iterate_random_labels(opts=opts, batch_size=batch_size) + + # Sampling loop. + sums = None + progress = opts.progress.sub(tag='eq sampling', num_items=num_samples) + for batch_start in range(0, num_samples, batch_size * opts.num_gpus): + progress.update(batch_start) + s = [] + + # Randomize noise buffers, if any. + for name, buf in G.named_buffers(): + if name.endswith('.noise_const'): + buf.copy_(torch.randn_like(buf)) + + # Run mapping network. + z = torch.randn([batch_size, G.z_dim], device=opts.device) + c = next(c_iter) + ws = G.mapping(z=z, c=c) + + # Generate reference image. + M[:] = I + orig = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs) + + # Integer translation (EQ-T). + if compute_eqt_int: + t = (torch.rand(2, device=opts.device) * 2 - 1) * translate_max + t = (t * G.img_resolution).round() / G.img_resolution + M[:] = I + M[:2, 2] = -t + img = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs) + ref, mask = apply_integer_translation(orig, t[0], t[1]) + s += [(ref - img).square() * mask, mask] + + # Fractional translation (EQ-T_frac). + if compute_eqt_frac: + t = (torch.rand(2, device=opts.device) * 2 - 1) * translate_max + M[:] = I + M[:2, 2] = -t + img = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs) + ref, mask = apply_fractional_translation(orig, t[0], t[1]) + s += [(ref - img).square() * mask, mask] + + # Rotation (EQ-R). + if compute_eqr: + angle = (torch.rand([], device=opts.device) * 2 - 1) * (rotate_max * np.pi) + M[:] = rotation_matrix(-angle) + img = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs) + ref, ref_mask = apply_fractional_rotation(orig, angle) + pseudo, pseudo_mask = apply_fractional_pseudo_rotation(img, angle) + mask = ref_mask * pseudo_mask + s += [(ref - pseudo).square() * mask, mask] + + # Accumulate results. + s = torch.stack([x.to(torch.float64).sum() for x in s]) + sums = sums + s if sums is not None else s + progress.update(num_samples) + + # Compute PSNRs. + if opts.num_gpus > 1: + torch.distributed.all_reduce(sums) + sums = sums.cpu() + mses = sums[0::2] / sums[1::2] + psnrs = np.log10(2) * 20 - mses.log10() * 10 + psnrs = tuple(psnrs.numpy()) + return psnrs[0] if len(psnrs) == 1 else psnrs + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/metrics/frechet_inception_distance.py b/3DPortraitGAN_pyramid/metrics/frechet_inception_distance.py new file mode 100644 index 0000000..c2944eb --- /dev/null +++ b/3DPortraitGAN_pyramid/metrics/frechet_inception_distance.py @@ -0,0 +1,43 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Frechet Inception Distance (FID) from the paper +"GANs trained by a two time-scale update rule converge to a local Nash +equilibrium". Matches the original implementation by Heusel et al. at +https://github.com/bioinf-jku/TTUR/blob/master/fid.py""" + +import numpy as np +import scipy.linalg +from . import metric_utils + +#---------------------------------------------------------------------------- + +def compute_fid(opts, max_real, num_gen): + # Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz + detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/inception-2015-12-05.pkl' + detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer. + + mu_real, sigma_real = metric_utils.compute_feature_stats_for_dataset( + opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, + rel_lo=0, rel_hi=0, capture_mean_cov=True, max_items=max_real).get_mean_cov() + + mu_gen, sigma_gen = metric_utils.compute_feature_stats_for_generator( + opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, + rel_lo=0, rel_hi=1, capture_mean_cov=True, max_items=num_gen).get_mean_cov() + + if opts.rank != 0: + return float('nan') + + m = np.square(mu_gen - mu_real).sum() + s, _ = scipy.linalg.sqrtm(np.dot(sigma_gen, sigma_real), disp=False) # pylint: disable=no-member + fid = np.real(m + np.trace(sigma_gen + sigma_real - s * 2)) + return float(fid) + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/metrics/inception_score.py b/3DPortraitGAN_pyramid/metrics/inception_score.py new file mode 100644 index 0000000..1e5e247 --- /dev/null +++ b/3DPortraitGAN_pyramid/metrics/inception_score.py @@ -0,0 +1,40 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Inception Score (IS) from the paper "Improved techniques for training +GANs". Matches the original implementation by Salimans et al. at +https://github.com/openai/improved-gan/blob/master/inception_score/model.py""" + +import numpy as np +from . import metric_utils + +#---------------------------------------------------------------------------- + +def compute_is(opts, num_gen, num_splits): + # Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz + detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/inception-2015-12-05.pkl' + detector_kwargs = dict(no_output_bias=True) # Match the original implementation by not applying bias in the softmax layer. + + gen_probs = metric_utils.compute_feature_stats_for_generator( + opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, + capture_all=True, max_items=num_gen).get_all() + + if opts.rank != 0: + return float('nan'), float('nan') + + scores = [] + for i in range(num_splits): + part = gen_probs[i * num_gen // num_splits : (i + 1) * num_gen // num_splits] + kl = part * (np.log(part) - np.log(np.mean(part, axis=0, keepdims=True))) + kl = np.mean(np.sum(kl, axis=1)) + scores.append(np.exp(kl)) + return float(np.mean(scores)), float(np.std(scores)) + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/metrics/kernel_inception_distance.py b/3DPortraitGAN_pyramid/metrics/kernel_inception_distance.py new file mode 100644 index 0000000..48906eb --- /dev/null +++ b/3DPortraitGAN_pyramid/metrics/kernel_inception_distance.py @@ -0,0 +1,48 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Kernel Inception Distance (KID) from the paper "Demystifying MMD +GANs". Matches the original implementation by Binkowski et al. at +https://github.com/mbinkowski/MMD-GAN/blob/master/gan/compute_scores.py""" + +import numpy as np +from . import metric_utils + +#---------------------------------------------------------------------------- + +def compute_kid(opts, max_real, num_gen, num_subsets, max_subset_size): + # Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz + detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/inception-2015-12-05.pkl' + detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer. + + real_features = metric_utils.compute_feature_stats_for_dataset( + opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, + rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all() + + gen_features = metric_utils.compute_feature_stats_for_generator( + opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, + rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all() + + if opts.rank != 0: + return float('nan') + + n = real_features.shape[1] + m = min(min(real_features.shape[0], gen_features.shape[0]), max_subset_size) + t = 0 + for _subset_idx in range(num_subsets): + x = gen_features[np.random.choice(gen_features.shape[0], m, replace=False)] + y = real_features[np.random.choice(real_features.shape[0], m, replace=False)] + a = (x @ x.T / n + 1) ** 3 + (y @ y.T / n + 1) ** 3 + b = (x @ y.T / n + 1) ** 3 + t += (a.sum() - np.diag(a).sum()) / (m - 1) - b.sum() * 2 / m + kid = t / num_subsets / m + return float(kid) + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/metrics/metric_main.py b/3DPortraitGAN_pyramid/metrics/metric_main.py new file mode 100644 index 0000000..ee00372 --- /dev/null +++ b/3DPortraitGAN_pyramid/metrics/metric_main.py @@ -0,0 +1,155 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Main API for computing and reporting quality metrics.""" + +import os +import time +import json +import torch +import dnnlib + +from . import metric_utils +from . import frechet_inception_distance +from . import kernel_inception_distance +from . import precision_recall +from . import perceptual_path_length +from . import inception_score +from . import equivariance + +#---------------------------------------------------------------------------- + +_metric_dict = dict() # name => fn + +def register_metric(fn): + assert callable(fn) + _metric_dict[fn.__name__] = fn + return fn + +def is_valid_metric(metric): + return metric in _metric_dict + +def list_valid_metrics(): + return list(_metric_dict.keys()) + +#---------------------------------------------------------------------------- + +def calc_metric(metric, **kwargs): # See metric_utils.MetricOptions for the full list of arguments. + assert is_valid_metric(metric) + opts = metric_utils.MetricOptions(**kwargs) + + # Calculate. + start_time = time.time() + results = _metric_dict[metric](opts) + total_time = time.time() - start_time + + # Broadcast results. + for key, value in list(results.items()): + if opts.num_gpus > 1: + value = torch.as_tensor(value, dtype=torch.float64, device=opts.device) + torch.distributed.broadcast(tensor=value, src=0) + value = float(value.cpu()) + results[key] = value + + # Decorate with metadata. + return dnnlib.EasyDict( + results = dnnlib.EasyDict(results), + metric = metric, + total_time = total_time, + total_time_str = dnnlib.util.format_time(total_time), + num_gpus = opts.num_gpus + ) + +#---------------------------------------------------------------------------- + +def report_metric(result_dict, run_dir=None, snapshot_pkl=None): + metric = result_dict['metric'] + assert is_valid_metric(metric) + if run_dir is not None and snapshot_pkl is not None: + snapshot_pkl = os.path.relpath(snapshot_pkl, run_dir) + + jsonl_line = json.dumps(dict(result_dict, snapshot_pkl=snapshot_pkl, timestamp=time.time())) + print(jsonl_line) + if run_dir is not None and os.path.isdir(run_dir): + with open(os.path.join(run_dir, f'metric-{metric}.jsonl'), 'at') as f: + f.write(jsonl_line + '\n') + +#---------------------------------------------------------------------------- +# Recommended metrics. + +@register_metric +def fid50k_full(opts): + opts.dataset_kwargs.update(max_size=None, xflip=True, back_repeat=1) + fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=50000) + return dict(fid50k_full=fid) + +@register_metric +def kid50k_full(opts): + opts.dataset_kwargs.update(max_size=None, xflip=False) + kid = kernel_inception_distance.compute_kid(opts, max_real=1000000, num_gen=50000, num_subsets=100, max_subset_size=1000) + return dict(kid50k_full=kid) + +@register_metric +def pr50k3_full(opts): + opts.dataset_kwargs.update(max_size=None, xflip=False) + precision, recall = precision_recall.compute_pr(opts, max_real=200000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000) + return dict(pr50k3_full_precision=precision, pr50k3_full_recall=recall) + +@register_metric +def ppl2_wend(opts): + ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=False, batch_size=2) + return dict(ppl2_wend=ppl) + +@register_metric +def eqt50k_int(opts): + opts.G_kwargs.update(force_fp32=True) + psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqt_int=True) + return dict(eqt50k_int=psnr) + +@register_metric +def eqt50k_frac(opts): + opts.G_kwargs.update(force_fp32=True) + psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqt_frac=True) + return dict(eqt50k_frac=psnr) + +@register_metric +def eqr50k(opts): + opts.G_kwargs.update(force_fp32=True) + psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqr=True) + return dict(eqr50k=psnr) + +#---------------------------------------------------------------------------- +# Legacy metrics. + +@register_metric +def fid50k(opts): + opts.dataset_kwargs.update(max_size=None) + fid = frechet_inception_distance.compute_fid(opts, max_real=50000, num_gen=50000) + return dict(fid50k=fid) + +@register_metric +def kid50k(opts): + opts.dataset_kwargs.update(max_size=None) + kid = kernel_inception_distance.compute_kid(opts, max_real=50000, num_gen=50000, num_subsets=100, max_subset_size=1000) + return dict(kid50k=kid) + +@register_metric +def pr50k3(opts): + opts.dataset_kwargs.update(max_size=None) + precision, recall = precision_recall.compute_pr(opts, max_real=50000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000) + return dict(pr50k3_precision=precision, pr50k3_recall=recall) + +@register_metric +def is50k(opts): + opts.dataset_kwargs.update(max_size=None, xflip=False) + mean, std = inception_score.compute_is(opts, num_gen=50000, num_splits=10) + return dict(is50k_mean=mean, is50k_std=std) + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/metrics/metric_utils.py b/3DPortraitGAN_pyramid/metrics/metric_utils.py new file mode 100644 index 0000000..5891537 --- /dev/null +++ b/3DPortraitGAN_pyramid/metrics/metric_utils.py @@ -0,0 +1,430 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Miscellaneous utilities used internally by the quality metrics.""" + +import os +import time +import hashlib +import pickle +import copy +import uuid +import numpy as np +import torch +import dnnlib + +#---------------------------------------------------------------------------- + +class MetricOptions: + def __init__(self, G=None, G_kwargs={}, dataset_kwargs={}, num_gpus=1, rank=0, device=None, progress=None, identical_c_p = True, + cache=True, metric_pose_sample_mode = None,D = None,pose_predict_kwargs = None): + assert 0 <= rank < num_gpus + self.G = G + self.G_kwargs = dnnlib.EasyDict(G_kwargs) + self.dataset_kwargs = dnnlib.EasyDict(dataset_kwargs) + self.num_gpus = num_gpus + self.rank = rank + self.device = device if device is not None else torch.device('cuda', rank) + self.progress = progress.sub() if progress is not None and rank == 0 else ProgressMonitor() + self.cache = cache + + self.metric_pose_sample_mode = metric_pose_sample_mode + self.D = D + self.pose_predict_kwargs = pose_predict_kwargs + + self.identical_c_p = identical_c_p + +#---------------------------------------------------------------------------- + +_feature_detector_cache = dict() + +def get_feature_detector_name(url): + return os.path.splitext(url.split('/')[-1])[0] + +def get_feature_detector(url, device=torch.device('cpu'), num_gpus=1, rank=0, verbose=False): + assert 0 <= rank < num_gpus + key = (url, device) + if key not in _feature_detector_cache: + is_leader = (rank == 0) + if not is_leader and num_gpus > 1: + torch.distributed.barrier() # leader goes first + with dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f: + _feature_detector_cache[key] = pickle.load(f).to(device) + if is_leader and num_gpus > 1: + torch.distributed.barrier() # others follow + return _feature_detector_cache[key] + +#---------------------------------------------------------------------------- + +def iterate_random_labels(opts, batch_size): + if opts.G.c_dim == 0: + c = torch.zeros([batch_size, opts.G.c_dim], device=opts.device) + while True: + yield c + else: + dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs) + while True: + random_idx = [np.random.randint(len(dataset)) for _i in range(batch_size) ] + + + c = [dataset.get_label(idx) for idx in random_idx] + c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device) + + p = [dataset.get_coarse_pose(idx) for idx in random_idx] + p = torch.from_numpy(np.stack(p)).pin_memory().to(opts.device) + yield c,p + + +from torch_utils.ops import upfirdn2d +from training.dual_discriminator import filtered_resizing + +def run_D_pose_prediction(img, c, blur_sigma=0,D = None): + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + with torch.autograd.profiler.record_function('blur'): + f = torch.arange(-blur_size, blur_size + 1, device=img['image'].device).div(blur_sigma).square().neg().exp2() + img['image'] = upfirdn2d.filter2d(img['image'], f / f.sum()) + pose,_ = D.predict_pose( img, c) + return pose + +def get_pose_params(real_img,real_seg, real_c,D = None,neural_rendering_resolution = None,blur_sigma = None,resample_filter = None, filter_mode = None): + + + + real_img_raw = filtered_resizing(real_img, size=neural_rendering_resolution, f=resample_filter, + filter_mode=filter_mode) + real_seg_raw = filtered_resizing(real_seg, size=neural_rendering_resolution, f=resample_filter, + filter_mode=filter_mode) + + if True: + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + f = torch.arange(-blur_size, blur_size + 1, device=real_img_raw.device).div( + blur_sigma).square().neg().exp2() + real_img_raw = upfirdn2d.filter2d(real_img_raw, f / f.sum()) + + real_img = {'image': real_img, 'image_raw': real_img_raw, 'image_mask': real_seg_raw} + + # get pose_params from real image + real_img_tmp_image = real_img['image'].detach().requires_grad_(True) + real_img_tmp_image_raw = real_img['image_raw'].detach().requires_grad_(True) + real_img_tmp_image_mask = real_img['image_mask'].detach().requires_grad_(True) + real_img_tmp = {'image': real_img_tmp_image, 'image_raw': real_img_tmp_image_raw, 'image_mask': real_img_tmp_image_mask} + + predicted_real_pose = run_D_pose_prediction(real_img_tmp, real_c, blur_sigma=blur_sigma, D = D) + return predicted_real_pose + +def iterate_random_labels_predicted_pose(opts, batch_size,G): + if opts.G.c_dim == 0: + c = torch.zeros([batch_size, opts.G.c_dim], device=opts.device) + while True: + yield c + else: + dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs) + while True: + random_idx = [np.random.randint(len(dataset)) for _i in range(batch_size) ] + + + c = [dataset.get_label(idx) for idx in random_idx] + c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device) + + z = torch.randn([batch_size, opts.G.z_dim], device=opts.device) + + p = G.get_pose_params(z,c) + + + yield c,p + +def iterate_random_labels_predicted_pose_D(opts, batch_size,D): + if opts.G.c_dim == 0: + c = torch.zeros([batch_size, opts.G.c_dim], device=opts.device) + while True: + yield c + else: + dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs) + while True: + random_idx = [np.random.randint(len(dataset)) for _i in range(batch_size) ] + + + c = [dataset.get_label(idx) for idx in random_idx] + c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device) + + # p = [dataset.get_coarse_pose(idx) for idx in random_idx] + # p = torch.from_numpy(np.stack(p)).pin_memory().to(opts.device) + + image = [dataset.get_image(idx) for idx in random_idx] + image = torch.from_numpy(np.stack(image)).pin_memory().to(opts.device) + image = image.to(torch.float32) / 127.5 - 1 + + + mask = [dataset._seg_dataset.get_image(idx) for idx in random_idx] + mask = torch.from_numpy(np.stack(mask)).pin_memory().to(opts.device) + mask = mask.to(torch.float32) / 255.0 + + + + p = get_pose_params( + real_img = image, + real_seg = mask, + real_c = c, + D = D, + blur_sigma = opts.pose_predict_kwargs['blur_sigma'], + neural_rendering_resolution= opts.pose_predict_kwargs['neural_rendering_resolution'], + resample_filter= opts.pose_predict_kwargs['resample_filter'], + filter_mode= opts.pose_predict_kwargs['filter_mode'], + ) + yield c,p + +# def iterate_random_poses(opts, batch_size): +# if opts.G.c_dim == 0: +# p = torch.zeros([batch_size, 6], device=opts.device) +# while True: +# yield p +# else: +# dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs) +# while True: +# p = [dataset.get_coarse_pose(np.random.randint(len(dataset))) for _i in range(batch_size)] +# p = torch.from_numpy(np.stack(p)).pin_memory().to(opts.device) +# yield p +#---------------------------------------------------------------------------- + +class FeatureStats: + def __init__(self, capture_all=False, capture_mean_cov=False, max_items=None): + self.capture_all = capture_all + self.capture_mean_cov = capture_mean_cov + self.max_items = max_items + self.num_items = 0 + self.num_features = None + self.all_features = None + self.raw_mean = None + self.raw_cov = None + + def set_num_features(self, num_features): + if self.num_features is not None: + assert num_features == self.num_features + else: + self.num_features = num_features + self.all_features = [] + self.raw_mean = np.zeros([num_features], dtype=np.float64) + self.raw_cov = np.zeros([num_features, num_features], dtype=np.float64) + + def is_full(self): + return (self.max_items is not None) and (self.num_items >= self.max_items) + + def append(self, x): + x = np.asarray(x, dtype=np.float32) + assert x.ndim == 2 + if (self.max_items is not None) and (self.num_items + x.shape[0] > self.max_items): + if self.num_items >= self.max_items: + return + x = x[:self.max_items - self.num_items] + + self.set_num_features(x.shape[1]) + self.num_items += x.shape[0] + if self.capture_all: + self.all_features.append(x) + if self.capture_mean_cov: + x64 = x.astype(np.float64) + self.raw_mean += x64.sum(axis=0) + self.raw_cov += x64.T @ x64 + + def append_torch(self, x, num_gpus=1, rank=0): + assert isinstance(x, torch.Tensor) and x.ndim == 2 + assert 0 <= rank < num_gpus + if num_gpus > 1: + ys = [] + for src in range(num_gpus): + y = x.clone() + torch.distributed.broadcast(y, src=src) + ys.append(y) + x = torch.stack(ys, dim=1).flatten(0, 1) # interleave samples + self.append(x.cpu().numpy()) + + def get_all(self): + assert self.capture_all + return np.concatenate(self.all_features, axis=0) + + def get_all_torch(self): + return torch.from_numpy(self.get_all()) + + def get_mean_cov(self): + assert self.capture_mean_cov + mean = self.raw_mean / self.num_items + cov = self.raw_cov / self.num_items + cov = cov - np.outer(mean, mean) + return mean, cov + + def save(self, pkl_file): + with open(pkl_file, 'wb') as f: + pickle.dump(self.__dict__, f) + + @staticmethod + def load(pkl_file): + with open(pkl_file, 'rb') as f: + s = dnnlib.EasyDict(pickle.load(f)) + obj = FeatureStats(capture_all=s.capture_all, max_items=s.max_items) + obj.__dict__.update(s) + return obj + +#---------------------------------------------------------------------------- + +class ProgressMonitor: + def __init__(self, tag=None, num_items=None, flush_interval=1000, verbose=False, progress_fn=None, pfn_lo=0, pfn_hi=1000, pfn_total=1000): + self.tag = tag + self.num_items = num_items + self.verbose = verbose + self.flush_interval = flush_interval + self.progress_fn = progress_fn + self.pfn_lo = pfn_lo + self.pfn_hi = pfn_hi + self.pfn_total = pfn_total + self.start_time = time.time() + self.batch_time = self.start_time + self.batch_items = 0 + if self.progress_fn is not None: + self.progress_fn(self.pfn_lo, self.pfn_total) + + def update(self, cur_items): + assert (self.num_items is None) or (cur_items <= self.num_items) + if (cur_items < self.batch_items + self.flush_interval) and (self.num_items is None or cur_items < self.num_items): + return + cur_time = time.time() + total_time = cur_time - self.start_time + time_per_item = (cur_time - self.batch_time) / max(cur_items - self.batch_items, 1) + if (self.verbose) and (self.tag is not None): + print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item*1e3:.2f}') + self.batch_time = cur_time + self.batch_items = cur_items + + if (self.progress_fn is not None) and (self.num_items is not None): + self.progress_fn(self.pfn_lo + (self.pfn_hi - self.pfn_lo) * (cur_items / self.num_items), self.pfn_total) + + def sub(self, tag=None, num_items=None, flush_interval=1000, rel_lo=0, rel_hi=1): + return ProgressMonitor( + tag = tag, + num_items = num_items, + flush_interval = flush_interval, + verbose = self.verbose, + progress_fn = self.progress_fn, + pfn_lo = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_lo, + pfn_hi = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_hi, + pfn_total = self.pfn_total, + ) + +#---------------------------------------------------------------------------- + +def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, data_loader_kwargs=None, max_items=None, **stats_kwargs): + dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs) + if data_loader_kwargs is None: + data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2) + + # Try to lookup from cache. + cache_file = None + if opts.cache: + # Choose cache file name. + args = dict(dataset_kwargs=opts.dataset_kwargs, detector_url=detector_url, detector_kwargs=detector_kwargs, stats_kwargs=stats_kwargs) + md5 = hashlib.md5(repr(sorted(args.items())).encode('utf-8')) + cache_tag = f'{dataset.name}-{get_feature_detector_name(detector_url)}-{md5.hexdigest()}' + cache_file = dnnlib.make_cache_dir_path('gan-metrics', cache_tag + '.pkl') + + # Check if the file exists (all processes must agree). + flag = os.path.isfile(cache_file) if opts.rank == 0 else False + if opts.num_gpus > 1: + flag = torch.as_tensor(flag, dtype=torch.float32, device=opts.device) + torch.distributed.broadcast(tensor=flag, src=0) + flag = (float(flag.cpu()) != 0) + + # Load. + if flag: + return FeatureStats.load(cache_file) + + # Initialize. + num_items = len(dataset) + if max_items is not None: + num_items = min(num_items, max_items) + stats = FeatureStats(max_items=num_items, **stats_kwargs) + progress = opts.progress.sub(tag='dataset features', num_items=num_items, rel_lo=rel_lo, rel_hi=rel_hi) + detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose) + + # Main loop. + item_subset = [(i * opts.num_gpus + opts.rank) % num_items for i in range((num_items - 1) // opts.num_gpus + 1)] + for images, masks, _labels,_poses in torch.utils.data.DataLoader(dataset=dataset, sampler=item_subset, batch_size=batch_size, **data_loader_kwargs): + if images.shape[1] == 1: + images = images.repeat([1, 3, 1, 1]) + features = detector(images.to(opts.device), **detector_kwargs) + stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank) + progress.update(stats.num_items) + + # Save to cache. + if cache_file is not None and opts.rank == 0: + os.makedirs(os.path.dirname(cache_file), exist_ok=True) + temp_file = cache_file + '.' + uuid.uuid4().hex + stats.save(temp_file) + os.replace(temp_file, cache_file) # atomic + return stats + +#---------------------------------------------------------------------------- + +def compute_feature_stats_for_generator(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, batch_gen=None, **stats_kwargs): + if batch_gen is None: + batch_gen = min(batch_size, 8) + assert batch_size % batch_gen == 0 + + # Setup generator and labels. + G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device) + G.set_batch_size(batch_gen) + + if opts.metric_pose_sample_mode == 'G_predict': + label_iter = iterate_random_labels_predicted_pose(opts=opts, batch_size=batch_gen, G = G) + else: + D = copy.deepcopy(opts.D).eval().requires_grad_(False).to(opts.device) + label_iter = iterate_random_labels_predicted_pose_D(opts=opts, batch_size=batch_gen,D = D) + + if not opts.identical_c_p: + if opts.metric_pose_sample_mode == 'G_predict': + cond_label_iter = iterate_random_labels_predicted_pose(opts=opts, batch_size=batch_gen, G = G) + else: + D = copy.deepcopy(opts.D).eval().requires_grad_(False).to(opts.device) + cond_label_iter = iterate_random_labels_predicted_pose_D(opts=opts, batch_size=batch_gen,D = D) + + + # Initialize. + stats = FeatureStats(**stats_kwargs) + assert stats.max_items is not None + progress = opts.progress.sub(tag='generator features', num_items=stats.max_items, rel_lo=rel_lo, rel_hi=rel_hi) + detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose) + + # Main loop. + while not stats.is_full(): + images = [] + for _i in range(batch_size // batch_gen): + z = torch.randn([batch_gen, G.z_dim], device=opts.device) + + if opts.identical_c_p: + c,p = next(label_iter) + + img = G(z=z, c=c, pose_params = p,apply_def = True,**opts.G_kwargs)['image'] + else: + c,p = next(label_iter) + cond_c,cond_p = next(cond_label_iter) + ws = G.mapping(z, cond_c, cond_p) + img = G.synthesis(ws, c=c,apply_def = True, pose_params = p,**opts.G_kwargs )['image'] + + img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8) + images.append(img) + images = torch.cat(images) + if images.shape[1] == 1: + images = images.repeat([1, 3, 1, 1]) + features = detector(images, **detector_kwargs) + stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank) + progress.update(stats.num_items) + return stats + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/metrics/perceptual_path_length.py b/3DPortraitGAN_pyramid/metrics/perceptual_path_length.py new file mode 100644 index 0000000..5e58dac --- /dev/null +++ b/3DPortraitGAN_pyramid/metrics/perceptual_path_length.py @@ -0,0 +1,127 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Perceptual Path Length (PPL) from the paper "A Style-Based Generator +Architecture for Generative Adversarial Networks". Matches the original +implementation by Karras et al. at +https://github.com/NVlabs/stylegan/blob/master/metrics/perceptual_path_length.py""" + +import copy +import numpy as np +import torch +from . import metric_utils + +#---------------------------------------------------------------------------- + +# Spherical interpolation of a batch of vectors. +def slerp(a, b, t): + a = a / a.norm(dim=-1, keepdim=True) + b = b / b.norm(dim=-1, keepdim=True) + d = (a * b).sum(dim=-1, keepdim=True) + p = t * torch.acos(d) + c = b - d * a + c = c / c.norm(dim=-1, keepdim=True) + d = a * torch.cos(p) + c * torch.sin(p) + d = d / d.norm(dim=-1, keepdim=True) + return d + +#---------------------------------------------------------------------------- + +class PPLSampler(torch.nn.Module): + def __init__(self, G, G_kwargs, epsilon, space, sampling, crop, vgg16): + assert space in ['z', 'w'] + assert sampling in ['full', 'end'] + super().__init__() + self.G = copy.deepcopy(G) + self.G_kwargs = G_kwargs + self.epsilon = epsilon + self.space = space + self.sampling = sampling + self.crop = crop + self.vgg16 = copy.deepcopy(vgg16) + + def forward(self, c): + # Generate random latents and interpolation t-values. + t = torch.rand([c.shape[0]], device=c.device) * (1 if self.sampling == 'full' else 0) + z0, z1 = torch.randn([c.shape[0] * 2, self.G.z_dim], device=c.device).chunk(2) + + # Interpolate in W or Z. + if self.space == 'w': + w0, w1 = self.G.mapping(z=torch.cat([z0,z1]), c=torch.cat([c,c])).chunk(2) + wt0 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2)) + wt1 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2) + self.epsilon) + else: # space == 'z' + zt0 = slerp(z0, z1, t.unsqueeze(1)) + zt1 = slerp(z0, z1, t.unsqueeze(1) + self.epsilon) + wt0, wt1 = self.G.mapping(z=torch.cat([zt0,zt1]), c=torch.cat([c,c])).chunk(2) + + # Randomize noise buffers. + for name, buf in self.G.named_buffers(): + if name.endswith('.noise_const'): + buf.copy_(torch.randn_like(buf)) + + # Generate images. + img = self.G.synthesis(ws=torch.cat([wt0,wt1]), noise_mode='const', force_fp32=True, **self.G_kwargs) + + # Center crop. + if self.crop: + assert img.shape[2] == img.shape[3] + c = img.shape[2] // 8 + img = img[:, :, c*3 : c*7, c*2 : c*6] + + # Downsample to 256x256. + factor = self.G.img_resolution // 256 + if factor > 1: + img = img.reshape([-1, img.shape[1], img.shape[2] // factor, factor, img.shape[3] // factor, factor]).mean([3, 5]) + + # Scale dynamic range from [-1,1] to [0,255]. + img = (img + 1) * (255 / 2) + if self.G.img_channels == 1: + img = img.repeat([1, 3, 1, 1]) + + # Evaluate differential LPIPS. + lpips_t0, lpips_t1 = self.vgg16(img, resize_images=False, return_lpips=True).chunk(2) + dist = (lpips_t0 - lpips_t1).square().sum(1) / self.epsilon ** 2 + return dist + +#---------------------------------------------------------------------------- + +def compute_ppl(opts, num_samples, epsilon, space, sampling, crop, batch_size): + vgg16_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/vgg16.pkl' + vgg16 = metric_utils.get_feature_detector(vgg16_url, num_gpus=opts.num_gpus, rank=opts.rank, verbose=opts.progress.verbose) + + # Setup sampler and labels. + sampler = PPLSampler(G=opts.G, G_kwargs=opts.G_kwargs, epsilon=epsilon, space=space, sampling=sampling, crop=crop, vgg16=vgg16) + sampler.eval().requires_grad_(False).to(opts.device) + c_iter = metric_utils.iterate_random_labels(opts=opts, batch_size=batch_size) + + # Sampling loop. + dist = [] + progress = opts.progress.sub(tag='ppl sampling', num_items=num_samples) + for batch_start in range(0, num_samples, batch_size * opts.num_gpus): + progress.update(batch_start) + x = sampler(next(c_iter)) + for src in range(opts.num_gpus): + y = x.clone() + if opts.num_gpus > 1: + torch.distributed.broadcast(y, src=src) + dist.append(y) + progress.update(num_samples) + + # Compute PPL. + if opts.rank != 0: + return float('nan') + dist = torch.cat(dist)[:num_samples].cpu().numpy() + lo = np.percentile(dist, 1, interpolation='lower') + hi = np.percentile(dist, 99, interpolation='higher') + ppl = np.extract(np.logical_and(dist >= lo, dist <= hi), dist).mean() + return float(ppl) + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/metrics/precision_recall.py b/3DPortraitGAN_pyramid/metrics/precision_recall.py new file mode 100644 index 0000000..e33e85f --- /dev/null +++ b/3DPortraitGAN_pyramid/metrics/precision_recall.py @@ -0,0 +1,64 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Precision/Recall (PR) from the paper "Improved Precision and Recall +Metric for Assessing Generative Models". Matches the original implementation +by Kynkaanniemi et al. at +https://github.com/kynkaat/improved-precision-and-recall-metric/blob/master/precision_recall.py""" + +import torch +from . import metric_utils + +#---------------------------------------------------------------------------- + +def compute_distances(row_features, col_features, num_gpus, rank, col_batch_size): + assert 0 <= rank < num_gpus + num_cols = col_features.shape[0] + num_batches = ((num_cols - 1) // col_batch_size // num_gpus + 1) * num_gpus + col_batches = torch.nn.functional.pad(col_features, [0, 0, 0, -num_cols % num_batches]).chunk(num_batches) + dist_batches = [] + for col_batch in col_batches[rank :: num_gpus]: + dist_batch = torch.cdist(row_features.unsqueeze(0), col_batch.unsqueeze(0))[0] + for src in range(num_gpus): + dist_broadcast = dist_batch.clone() + if num_gpus > 1: + torch.distributed.broadcast(dist_broadcast, src=src) + dist_batches.append(dist_broadcast.cpu() if rank == 0 else None) + return torch.cat(dist_batches, dim=1)[:, :num_cols] if rank == 0 else None + +#---------------------------------------------------------------------------- + +def compute_pr(opts, max_real, num_gen, nhood_size, row_batch_size, col_batch_size): + detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/vgg16.pkl' + detector_kwargs = dict(return_features=True) + + real_features = metric_utils.compute_feature_stats_for_dataset( + opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, + rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all_torch().to(torch.float16).to(opts.device) + + gen_features = metric_utils.compute_feature_stats_for_generator( + opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, + rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all_torch().to(torch.float16).to(opts.device) + + results = dict() + for name, manifold, probes in [('precision', real_features, gen_features), ('recall', gen_features, real_features)]: + kth = [] + for manifold_batch in manifold.split(row_batch_size): + dist = compute_distances(row_features=manifold_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size) + kth.append(dist.to(torch.float32).kthvalue(nhood_size + 1).values.to(torch.float16) if opts.rank == 0 else None) + kth = torch.cat(kth) if opts.rank == 0 else None + pred = [] + for probes_batch in probes.split(row_batch_size): + dist = compute_distances(row_features=probes_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size) + pred.append((dist <= kth).any(dim=1) if opts.rank == 0 else None) + results[name] = float(torch.cat(pred).to(torch.float32).mean() if opts.rank == 0 else 'nan') + return results['precision'], results['recall'] + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/optimize_trigrid.py b/3DPortraitGAN_pyramid/optimize_trigrid.py new file mode 100644 index 0000000..b67b9d5 --- /dev/null +++ b/3DPortraitGAN_pyramid/optimize_trigrid.py @@ -0,0 +1,297 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Generate lerp videos using pretrained network pickle.""" + +import os +import re +from typing import List, Optional, Tuple, Union + +import click +import dnnlib +import imageio +import numpy as np +import scipy.interpolate +import torch +from tqdm import tqdm +import mrcfile +import json +import legacy + +from camera_utils import LookAtPoseSampler,FOV_to_intrinsics +from torch_utils import misc +import glob +import PIL +from torch.utils.data import DataLoader +import torch.nn.functional as F + +#---------------------------------------------------------------------------- + +class Dataset(torch.utils.data.Dataset): + def __init__(self, path): + camera_info_path = os.path.join(path, 'data', 'camera_info.json') + with open(camera_info_path, 'r') as f: + camera_info = json.load(f) + + self.camera_info = camera_info + + image_list = list(camera_info.keys()) + self.image_list = [] + for img_name in image_list: + if os.path.exists(os.path.join(path, 'update_data', img_name)): + self.image_list.append(img_name) + + self.image_dir = os.path.join(path, 'update_data') + + + + def __len__(self): + return len(self.image_list) + + def __getitem__(self, index): + img_name = self.image_list[index] + + img_path = os.path.join(self.image_dir, img_name) + + img = imageio.imread(img_path) + img = np.array(img).astype(np.float32) + img = img / 255.0 + # to -1,1 + img = img * 2 - 1 + img = torch.from_numpy(img) # [H, W, C] + + + camera_info = self.camera_info[img_name] + camera_info = torch.from_numpy(np.array(camera_info)).float().squeeze() + + return img, camera_info + + +def parse_range(s: Union[str, List[int]]) -> List[int]: + '''Parse a comma separated list of numbers or ranges and return a list of ints. + + Example: '1,2,5-10' returns [1, 2, 5, 6, 7] + ''' + if isinstance(s, list): return s + ranges = [] + range_re = re.compile(r'^(\d+)-(\d+)$') + for p in s.split(','): + if m := range_re.match(p): + ranges.extend(range(int(m.group(1)), int(m.group(2))+1)) + else: + ranges.append(int(p)) + return ranges + +#---------------------------------------------------------------------------- + +def parse_tuple(s: Union[str, Tuple[int,int]]) -> Tuple[int, int]: + '''Parse a 'M,N' or 'MxN' integer tuple. + + Example: + '4x2' returns (4,2) + '0,1' returns (0,1) + ''' + if isinstance(s, tuple): return s + if m := re.match(r'^(\d+)[x,](\d+)$', s): + return (int(m.group(1)), int(m.group(2))) + raise ValueError(f'cannot parse tuple {s}') + +#---------------------------------------------------------------------------- + +@click.command() +@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) +@click.option('--data_dir', help='Network pickle filename', required=True) +@click.option('--shuffle-seed', type=int, help='Random seed to use for shuffling seed order', default=None) +@click.option('--grid', type=parse_tuple, help='Grid width/height, e.g. \'4x3\' (default: 1x1)', default=(1,1)) +@click.option('--num-keyframes', type=int, help='Number of seeds to interpolate through. If not specified, determine based on the length of the seeds array given by --seeds.', default=None) +@click.option('--w-frames', type=int, help='Number of frames to interpolate between latents', default=120) +@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) +@click.option('--trunc-cutoff', 'truncation_cutoff', type=int, help='Truncation cutoff', default=14, show_default=True) +@click.option('--image_mode', help='Image mode', type=click.Choice(['image_depth', 'image_raw']), required=False, metavar='STR', default='image_raw', show_default=True) +@click.option('--sample_mult', 'sampling_multiplier', type=float, help='Multiplier for depth sampling in volume rendering', default=2, show_default=True) +@click.option('--nrr', type=int, help='Neural rendering resolution override', default=None, show_default=True) + +def generate_images( + network_pkl: str, + data_dir: str, + shuffle_seed: Optional[int], + truncation_psi: float, + truncation_cutoff: int, + grid: Tuple[int,int], + num_keyframes: Optional[int], + w_frames: int, + image_mode: str, + sampling_multiplier: float, + nrr: Optional[int], +): + print('Loading networks from "%s"...' % network_pkl) + device = torch.device('cuda') + with dnnlib.util.open_url(network_pkl) as f: + G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore + + G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution']) + G.rendering_kwargs['depth_resolution_importance'] = int( + G.rendering_kwargs['depth_resolution_importance']) + + G.rendering_kwargs['ray_start'] = 2.35 + + + + print("Reloading Modules!") + from training.neural_renderer import TriPlaneGenerator + G_new = TriPlaneGenerator(*G.init_args, **G.init_kwargs).eval().requires_grad_(False).to(device) + misc.copy_params_and_buffers(G, G_new, require_all=False) + G_new.neural_rendering_resolution = G.neural_rendering_resolution + G_new.rendering_kwargs = G.rendering_kwargs + G = G_new + + G.set_batch_size(1) + + intrinsics = FOV_to_intrinsics(12.447863, device=device) + cam_pivot = torch.tensor([0, 0.0649, 0], device=device) + cam_radius = G.rendering_kwargs.get('avg_camera_radius', 2.7) + default_cam2world_pose = LookAtPoseSampler.sample(np.pi / 2, np.pi / 2, cam_pivot, + radius=cam_radius, device=device) + default_cam_params = torch.cat([default_cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + res_dir = data_dir + + update_data_dir = os.path.join(res_dir, 'update_data') + if not os.path.exists(update_data_dir): + print('update data not found for ', res_dir) + return + + print('optimize for ', res_dir) + + log_dir = os.path.join(res_dir, 'log') + os.makedirs(log_dir, exist_ok=True) + log_img_dir = os.path.join(log_dir, 'img') + os.makedirs(log_img_dir, exist_ok=True) + + log_ckpt_dir = os.path.join(log_dir, 'ckpt') + os.makedirs(log_ckpt_dir, exist_ok=True) + + + if nrr is not None: G.neural_rendering_resolution = nrr + + if truncation_cutoff == 0: + truncation_psi = 1.0 # truncation cutoff of 0 means no truncation anyways + if truncation_psi == 1.0: + truncation_cutoff = 14 # no truncation so doesn't matter where we cutoff + + ckpt_path = os.path.join(res_dir, 'checkpoints/df.pth') + if not os.path.exists(ckpt_path): + print('checkpoints not found for ', res_dir) + return + + print('Loading checkpoints from "%s"...' % ckpt_path) + ckpt = torch.load(ckpt_path, map_location=lambda storage, loc: storage)['model'] + trigrid = { + 8: ckpt['trigrids_8'].to(device).requires_grad_(True), + 16: ckpt['trigrids_16'].to(device).requires_grad_(True), + 32: ckpt['trigrids_32'].to(device).requires_grad_(True), + 64: ckpt['trigrids_64'].to(device).requires_grad_(True), + 128: ckpt['trigrids_128'].to(device).requires_grad_(True), + 256: ckpt['trigrids_256'].to(device).requires_grad_(True), + 512: ckpt['trigrids_512'].to(device).requires_grad_(True), + } + ws = ckpt['ws'].to(device) + + epoch_num = 19 + patch_resolution = 256 + lr = 1.0 + params = [ + {'params': trigrid[8], 'lr': lr}, + {'params': trigrid[16], 'lr': lr}, + {'params': trigrid[32], 'lr': lr}, + {'params': trigrid[64], 'lr': lr}, + {'params': trigrid[128], 'lr': lr}, + {'params': trigrid[256], 'lr': lr}, + {'params': trigrid[512], 'lr': lr}, + ] + # optimizer = torch.optim.Adam(params, betas=(0.9, 0.999)) + + from optimizer import Adan + + # Adan usually requires a larger LR + optimizer = Adan(params, eps=1e-8, weight_decay=2e-5, max_grad_norm=5.0, foreach=False) + + + dataset = Dataset(res_dir) + + data_loader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=0, drop_last=True) + + for epoch in range(epoch_num): + print('epoch: ', epoch) + + for i, data in enumerate(data_loader): + print('iter: ', i) + image, cam = data + + gt_img = image.clone().detach().to(device).permute(0, 3, 1, 2) # 1, 3, 512, 512 [-1,1] + cam = cam.clone().detach().to(device) + #print('fetch data done') + # render + output = G.render_planes(ws=ws, planes=trigrid, c=cam, noise_mode='const', + neural_rendering_resolution=512, chunk=4096,render_bg = False, patch_resolution = patch_resolution) + + img = output['image_raw'] # 1, 3, 512, 512 [-1,1] + mask = output['image_mask'] # 1, 1, 512, 512 [0,1] + patch_info = output['patch_info'] + + + # L2 loss + + top, left = patch_info[0] + gt_img = gt_img[:, :, top:top + patch_resolution, left:left + patch_resolution] + + loss = torch.mean((img - gt_img) ** 2 * mask)*1e3 + print('loss: ', loss.item()) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + + + # save checkpoint + if epoch == epoch_num - 1: + ckpt = { + 'trigrids_8': trigrid[8].clone().detach(), + 'trigrids_16': trigrid[16].clone().detach(), + 'trigrids_32': trigrid[32].clone().detach(), + 'trigrids_64': trigrid[64].clone().detach(), + 'trigrids_128': trigrid[128].clone().detach(), + 'trigrids_256': trigrid[256].clone().detach(), + 'trigrids_512': trigrid[512].clone().detach(), + 'ws': ws, + } + + torch.save({'model': ckpt}, f'{log_ckpt_dir}/epoch_{epoch:05d}.pth') + + with torch.no_grad(): + output = G.render_planes(ws=ws, planes=trigrid, c=default_cam_params, noise_mode='const', + neural_rendering_resolution=512, chunk=4096, render_bg=False, + patch_resolution=None) + + img = output['image_raw'] # 1, 3, 512, 512 [-1,1] + img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) + print('save image to ', f'{log_img_dir}/epoch_{epoch}.png') + PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{log_img_dir}/epoch_{epoch}.png') + + + + + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + generate_images() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/optimizer.py b/3DPortraitGAN_pyramid/optimizer.py new file mode 100644 index 0000000..f5bb64f --- /dev/null +++ b/3DPortraitGAN_pyramid/optimizer.py @@ -0,0 +1,325 @@ +# Copyright 2022 Garena Online Private Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List + +import torch +from torch import Tensor +from torch.optim.optimizer import Optimizer + + +class Adan(Optimizer): + """ + Implements a pytorch variant of Adan + Adan was proposed in + Adan: Adaptive Nesterov Momentum Algorithm for + Faster Optimizing Deep Models[J].arXiv preprint arXiv:2208.06677, 2022. + https://arxiv.org/abs/2208.06677 + Arguments: + params (iterable): iterable of parameters to optimize or + dicts defining parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float, flot], optional): coefficients used for + first- and second-order moments. (default: (0.98, 0.92, 0.99)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): decoupled weight decay + (L2 penalty) (default: 0) + max_grad_norm (float, optional): value used to clip + global grad norm (default: 0.0 no clip) + no_prox (bool): how to perform the decoupled weight decay + (default: False) + foreach (bool): if True would use torch._foreach implementation. + It's faster but uses slightly more memory. (default: True) + """ + def __init__(self, + params, + lr=1e-3, + betas=(0.98, 0.92, 0.99), + eps=1e-8, + weight_decay=0.0, + max_grad_norm=0.0, + no_prox=False, + foreach: bool = True): + if not 0.0 <= max_grad_norm: + raise ValueError('Invalid Max grad norm: {}'.format(max_grad_norm)) + if not 0.0 <= lr: + raise ValueError('Invalid learning rate: {}'.format(lr)) + if not 0.0 <= eps: + raise ValueError('Invalid epsilon value: {}'.format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError('Invalid beta parameter at index 0: {}'.format( + betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError('Invalid beta parameter at index 1: {}'.format( + betas[1])) + if not 0.0 <= betas[2] < 1.0: + raise ValueError('Invalid beta parameter at index 2: {}'.format( + betas[2])) + defaults = dict(lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + max_grad_norm=max_grad_norm, + no_prox=no_prox, + foreach=foreach) + super().__init__(params, defaults) + + def __setstate__(self, state): + super(Adan, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('no_prox', False) + + @torch.no_grad() + def restart_opt(self): + for group in self.param_groups: + group['step'] = 0 + for p in group['params']: + if p.requires_grad: + state = self.state[p] + # State initialization + + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + # Exponential moving average of gradient difference + state['exp_avg_diff'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step.""" + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + if self.defaults['max_grad_norm'] > 0: + device = self.param_groups[0]['params'][0].device + global_grad_norm = torch.zeros(1, device=device) + + max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], + device=device) + for group in self.param_groups: + + for p in group['params']: + if p.grad is not None: + grad = p.grad + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + + clip_global_grad_norm = torch.clamp( + max_grad_norm / (global_grad_norm + group['eps']), + max=1.0).item() + else: + clip_global_grad_norm = 1.0 + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + exp_avg_diffs = [] + neg_pre_grads = [] + + beta1, beta2, beta3 = group['betas'] + # assume same step across group now to simplify things + # per parameter step can be easily support + # by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + bias_correction1 = 1.0 - beta1**group['step'] + bias_correction2 = 1.0 - beta2**group['step'] + bias_correction3 = 1.0 - beta3**group['step'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + grads.append(p.grad) + + state = self.state[p] + if len(state) == 0: + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + state['exp_avg_diff'] = torch.zeros_like(p) + + if 'neg_pre_grad' not in state or group['step'] == 1: + state['neg_pre_grad'] = p.grad.clone().mul_( + -clip_global_grad_norm) + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + exp_avg_diffs.append(state['exp_avg_diff']) + neg_pre_grads.append(state['neg_pre_grad']) + + kwargs = dict( + params=params_with_grad, + grads=grads, + exp_avgs=exp_avgs, + exp_avg_sqs=exp_avg_sqs, + exp_avg_diffs=exp_avg_diffs, + neg_pre_grads=neg_pre_grads, + beta1=beta1, + beta2=beta2, + beta3=beta3, + bias_correction1=bias_correction1, + bias_correction2=bias_correction2, + bias_correction3_sqrt=math.sqrt(bias_correction3), + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + no_prox=group['no_prox'], + clip_global_grad_norm=clip_global_grad_norm, + ) + + if group['foreach']: + _multi_tensor_adan(**kwargs) + else: + _single_tensor_adan(**kwargs) + + return loss + + +def _single_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + neg_pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + clip_global_grad_norm: Tensor, +): + for i, param in enumerate(params): + grad = grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + exp_avg_diff = exp_avg_diffs[i] + neg_grad_or_diff = neg_pre_grads[i] + + grad.mul_(clip_global_grad_norm) + + # for memory saving, we use `neg_grad_or_diff` + # to get some temp variable in a inplace way + neg_grad_or_diff.add_(grad) + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t + exp_avg_diff.mul_(beta2).add_(neg_grad_or_diff, + alpha=1 - beta2) # diff_t + + neg_grad_or_diff.mul_(beta2).add_(grad) + exp_avg_sq.mul_(beta3).addcmul_(neg_grad_or_diff, + neg_grad_or_diff, + value=1 - beta3) # n_t + + denom = ((exp_avg_sq).sqrt() / bias_correction3_sqrt).add_(eps) + step_size_diff = lr * beta2 / bias_correction2 + step_size = lr / bias_correction1 + + if no_prox: + param.mul_(1 - lr * weight_decay) + param.addcdiv_(exp_avg, denom, value=-step_size) + param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff) + else: + param.addcdiv_(exp_avg, denom, value=-step_size) + param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff) + param.div_(1 + lr * weight_decay) + + neg_grad_or_diff.zero_().add_(grad, alpha=-1.0) + + +def _multi_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + neg_pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + clip_global_grad_norm: Tensor, +): + if len(params) == 0: + return + + torch._foreach_mul_(grads, clip_global_grad_norm) + + # for memory saving, we use `neg_pre_grads` + # to get some temp variable in a inplace way + torch._foreach_add_(neg_pre_grads, grads) + + torch._foreach_mul_(exp_avgs, beta1) + torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t + + torch._foreach_mul_(exp_avg_diffs, beta2) + torch._foreach_add_(exp_avg_diffs, neg_pre_grads, + alpha=1 - beta2) # diff_t + + torch._foreach_mul_(neg_pre_grads, beta2) + torch._foreach_add_(neg_pre_grads, grads) + torch._foreach_mul_(exp_avg_sqs, beta3) + torch._foreach_addcmul_(exp_avg_sqs, + neg_pre_grads, + neg_pre_grads, + value=1 - beta3) # n_t + + denom = torch._foreach_sqrt(exp_avg_sqs) + torch._foreach_div_(denom, bias_correction3_sqrt) + torch._foreach_add_(denom, eps) + + step_size_diff = lr * beta2 / bias_correction2 + step_size = lr / bias_correction1 + + if no_prox: + torch._foreach_mul_(params, 1 - lr * weight_decay) + torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size) + torch._foreach_addcdiv_(params, + exp_avg_diffs, + denom, + value=-step_size_diff) + else: + torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size) + torch._foreach_addcdiv_(params, + exp_avg_diffs, + denom, + value=-step_size_diff) + torch._foreach_div_(params, 1 + lr * weight_decay) + torch._foreach_zero_(neg_pre_grads) + torch._foreach_add_(neg_pre_grads, grads, alpha=-1.0) \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/proj/configs/__init__.py b/3DPortraitGAN_pyramid/proj/configs/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/3DPortraitGAN_pyramid/proj/configs/evaluation_config.py b/3DPortraitGAN_pyramid/proj/configs/evaluation_config.py new file mode 100644 index 0000000..16b621d --- /dev/null +++ b/3DPortraitGAN_pyramid/proj/configs/evaluation_config.py @@ -0,0 +1 @@ +evaluated_methods = ['e4e', 'SG2', 'SG2Plus'] \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/proj/configs/global_config.py b/3DPortraitGAN_pyramid/proj/configs/global_config.py new file mode 100644 index 0000000..c3cdaa4 --- /dev/null +++ b/3DPortraitGAN_pyramid/proj/configs/global_config.py @@ -0,0 +1,12 @@ +## Device +cuda_visible_devices = '0' +device = 'cuda:0' + +## Logs +training_step = 1 +image_rec_result_log_snapshot = 100 +pivotal_training_steps = 0 +model_snapshot_interval = 400 + +## Run name to be updated during PTI +run_name = 'test_pti' diff --git a/3DPortraitGAN_pyramid/proj/configs/hyperparameters.py b/3DPortraitGAN_pyramid/proj/configs/hyperparameters.py new file mode 100644 index 0000000..f5b2d01 --- /dev/null +++ b/3DPortraitGAN_pyramid/proj/configs/hyperparameters.py @@ -0,0 +1,28 @@ +## Architechture +lpips_type = 'alex' +first_inv_type = 'w' +optim_type = 'adam' + +## Locality regularization +latent_ball_num_of_samples = 1 +locality_regularization_interval = 1 +use_locality_regularization = False +regulizer_l2_lambda = 0.1 +regulizer_lpips_lambda = 0.1 +regulizer_alpha = 30 + +## Loss +pt_l2_lambda = 1 +pt_lpips_lambda = 1 + +## Steps +LPIPS_value_threshold = 0.06 +max_pti_steps = 350 +first_inv_steps = 450 +max_images_to_invert = 30 + +## Optimization +pti_learning_rate = 15e-4 #3e-4 +first_inv_lr = 5e-3 +train_batch_size = 1 +use_last_w_pivots = False diff --git a/3DPortraitGAN_pyramid/proj/projector/camera_utils.py b/3DPortraitGAN_pyramid/proj/projector/camera_utils.py new file mode 100644 index 0000000..d97a59b --- /dev/null +++ b/3DPortraitGAN_pyramid/proj/projector/camera_utils.py @@ -0,0 +1,145 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +""" +Helper functions for constructing camera parameter matrices. Primarily used in visualization and inference scripts. +""" + +import math + +import torch +import torch.nn as nn + +from training.volumetric_rendering import math_utils + +class GaussianCameraPoseSampler: + """ + Samples pitch and yaw from a Gaussian distribution and returns a camera pose. + Camera is specified as looking at the origin. + If horizontal and vertical stddev (specified in radians) are zero, gives a + deterministic camera pose with yaw=horizontal_mean, pitch=vertical_mean. + The coordinate system is specified with y-up, z-forward, x-left. + Horizontal mean is the azimuthal angle (rotation around y axis) in radians, + vertical mean is the polar angle (angle from the y axis) in radians. + A point along the z-axis has azimuthal_angle=0, polar_angle=pi/2. + Example: + For a camera pose looking at the origin with the camera at position [0, 0, 1]: + cam2world = GaussianCameraPoseSampler.sample(math.pi/2, math.pi/2, radius=1) + """ + + @staticmethod + def sample(horizontal_mean, vertical_mean, horizontal_stddev=0, vertical_stddev=0, radius=1, batch_size=1, device='cpu'): + h = torch.randn((batch_size, 1), device=device) * horizontal_stddev + horizontal_mean + v = torch.randn((batch_size, 1), device=device) * vertical_stddev + vertical_mean + v = torch.clamp(v, 1e-5, math.pi - 1e-5) + + theta = h + v = v / math.pi + phi = torch.arccos(1 - 2*v) + + camera_origins = torch.zeros((batch_size, 3), device=device) + + camera_origins[:, 0:1] = radius*torch.sin(phi) * torch.cos(math.pi-theta) + camera_origins[:, 2:3] = radius*torch.sin(phi) * torch.sin(math.pi-theta) + camera_origins[:, 1:2] = radius*torch.cos(phi) + + forward_vectors = math_utils.normalize_vecs(-camera_origins) + return create_cam2world_matrix(forward_vectors, camera_origins) + + +class LookAtPoseSampler: + """ + Same as GaussianCameraPoseSampler, except the + camera is specified as looking at 'lookat_position', a 3-vector. + Example: + For a camera pose looking at the origin with the camera at position [0, 0, 1]: + cam2world = LookAtPoseSampler.sample(math.pi/2, math.pi/2, torch.tensor([0, 0, 0]), radius=1) + """ + + @staticmethod + def sample(horizontal_mean, vertical_mean, lookat_position, horizontal_stddev=0, vertical_stddev=0, radius=1, batch_size=1, device='cpu'): + h = torch.randn((batch_size, 1), device=device) * horizontal_stddev + horizontal_mean + v = torch.randn((batch_size, 1), device=device) * vertical_stddev + vertical_mean + v = torch.clamp(v, 1e-5, math.pi - 1e-5) + + theta = h + v = v / math.pi + phi = torch.arccos(1 - 2*v) + + camera_origins = torch.zeros((batch_size, 3), device=device) + + camera_origins[:, 0:1] = radius*torch.sin(phi) * torch.cos(math.pi-theta) + camera_origins[:, 2:3] = radius*torch.sin(phi) * torch.sin(math.pi-theta) + camera_origins[:, 1:2] = radius*torch.cos(phi) + + # forward_vectors = math_utils.normalize_vecs(-camera_origins) + forward_vectors = math_utils.normalize_vecs(lookat_position - camera_origins) + return create_cam2world_matrix(forward_vectors, camera_origins) + +class UniformCameraPoseSampler: + """ + Same as GaussianCameraPoseSampler, except the + pose is sampled from a uniform distribution with range +-[horizontal/vertical]_stddev. + Example: + For a batch of random camera poses looking at the origin with yaw sampled from [-pi/2, +pi/2] radians: + cam2worlds = UniformCameraPoseSampler.sample(math.pi/2, math.pi/2, horizontal_stddev=math.pi/2, radius=1, batch_size=16) + """ + + @staticmethod + def sample(horizontal_mean, vertical_mean, horizontal_stddev=0, vertical_stddev=0, radius=1, batch_size=1, device='cpu'): + h = (torch.rand((batch_size, 1), device=device) * 2 - 1) * horizontal_stddev + horizontal_mean + v = (torch.rand((batch_size, 1), device=device) * 2 - 1) * vertical_stddev + vertical_mean + v = torch.clamp(v, 1e-5, math.pi - 1e-5) + + theta = h + v = v / math.pi + phi = torch.arccos(1 - 2*v) + + camera_origins = torch.zeros((batch_size, 3), device=device) + + camera_origins[:, 0:1] = radius*torch.sin(phi) * torch.cos(math.pi-theta) + camera_origins[:, 2:3] = radius*torch.sin(phi) * torch.sin(math.pi-theta) + camera_origins[:, 1:2] = radius*torch.cos(phi) + + forward_vectors = math_utils.normalize_vecs(-camera_origins) + return create_cam2world_matrix(forward_vectors, camera_origins) + +def create_cam2world_matrix(forward_vector, origin): + """ + Takes in the direction the camera is pointing and the camera origin and returns a cam2world matrix. + Works on batches of forward_vectors, origins. Assumes y-axis is up and that there is no camera roll. + """ + + forward_vector = math_utils.normalize_vecs(forward_vector) + up_vector = torch.tensor([0, 1, 0], dtype=torch.float, device=origin.device).expand_as(forward_vector) + + right_vector = -math_utils.normalize_vecs(torch.cross(up_vector, forward_vector, dim=-1)) + up_vector = math_utils.normalize_vecs(torch.cross(forward_vector, right_vector, dim=-1)) + + rotation_matrix = torch.eye(4, device=origin.device).unsqueeze(0).repeat(forward_vector.shape[0], 1, 1) + rotation_matrix[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), axis=-1) + + translation_matrix = torch.eye(4, device=origin.device).unsqueeze(0).repeat(forward_vector.shape[0], 1, 1) + translation_matrix[:, :3, 3] = origin + cam2world = (translation_matrix @ rotation_matrix)[:, :, :] + assert(cam2world.shape[1:] == (4, 4)) + return cam2world + + +def FOV_to_intrinsics(fov_degrees, device='cpu'): + """ + Creates a 3x3 camera intrinsics matrix from the camera field of view, specified in degrees. + Note the intrinsics are returned as normalized by image size, rather than in pixel units. + Assumes principal point is at image center. + """ + + focal_length = float(1 / (math.tan(fov_degrees * 3.14159 / 360) * 1.414)) + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + return intrinsics \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/proj/projector/w_projector.py b/3DPortraitGAN_pyramid/proj/projector/w_projector.py new file mode 100644 index 0000000..d3f2f5e --- /dev/null +++ b/3DPortraitGAN_pyramid/proj/projector/w_projector.py @@ -0,0 +1,189 @@ +# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""Project given image to the latent space of pretrained network pickle.""" + +import copy +import os +import numpy as np +import torch +import torch.nn.functional as F +from tqdm import tqdm +import dnnlib +import PIL +from camera_utils import LookAtPoseSampler + +def project( + G, + c, + p, + outdir, + target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution + *, + num_steps=1000, + w_avg_samples=10000, + initial_learning_rate=0.01, + initial_noise_factor=0.05, + lr_rampdown_length=0.25, + lr_rampup_length=0.05, + noise_ramp_length=0.75, + regularize_noise_weight=1e5, + verbose=False, + device: torch.device, + initial_w=None, + image_log_step=100, + w_name: str, + no_sr = False +): + os.makedirs(f'{outdir}/{w_name}_w', exist_ok=True) + outdir = f'{outdir}/{w_name}_w' + assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution) + + def logprint(*args): + if verbose: + print(*args) + + G = copy.deepcopy(G).eval().requires_grad_(False).to(device).float() # type: ignore + + # Compute w stats. + w_avg_path = './w_avg.npy' + w_std_path = './w_std.npy' + if (not os.path.exists(w_avg_path)) or (not os.path.exists(w_std_path)): + print(f'Computing W midpoint and stddev using {w_avg_samples} samples...') + z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim) + # c_samples = c.repeat(w_avg_samples, 1) + + # use avg look at point + + camera_lookat_point = torch.tensor([0, 0.0649, 0], device=device) + cam2world_pose = LookAtPoseSampler.sample(3.14 / 2, 3.14 / 2, camera_lookat_point, + radius=2.7, device=device) + focal_length = 6.5104166 + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c_samples = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + c_samples = c_samples.repeat(w_avg_samples, 1) + p_samples = p.repeat(w_avg_samples, 1) + w_samples = G.mapping(torch.from_numpy(z_samples).to(device), c_samples, p_samples, ) # [N, L, C] + w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C] + w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C] + # print('save w_avg to ./w_avg.npy') + # np.save('./w_avg.npy',w_avg) + w_avg_tensor = torch.from_numpy(w_avg).cuda() + w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5 + + # np.save(w_avg_path, w_avg) + # np.save(w_std_path, w_std) + else: + # w_avg = np.load(w_avg_path) + # w_std = np.load(w_std_path) + raise Exception(' ') + + # z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim) + # c_samples = c.repeat(w_avg_samples, 1) + # w_samples = G.mapping(torch.from_numpy(z_samples).to(device), c_samples) # [N, L, C] + # w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C] + # w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C] + # w_avg_tensor = torch.from_numpy(w_avg).cuda() + # w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5 + + start_w = initial_w if initial_w is not None else w_avg + + # Setup noise inputs. + noise_bufs = {name: buf for (name, buf) in G.backbone.synthesis.named_buffers() if 'noise_const' in name} + + # Load VGG16 feature detector. + #url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt' + url = './models/vgg16.pt' + with dnnlib.util.open_url(url) as f: + vgg16 = torch.jit.load(f).eval().to(device) + + # Features for target image. + target_images = target.unsqueeze(0).to(device).to(torch.float32) + if target_images.shape[2] > 256: + target_images = F.interpolate(target_images, size=(256, 256), mode='area') + target_features = vgg16(target_images, resize_images=False, return_lpips=True) + + start_w = np.repeat(start_w, G.backbone.mapping.num_ws, axis=1) + w_opt = torch.tensor(start_w, dtype=torch.float32, device=device, + requires_grad=True) # pylint: disable=not-callable + + optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), + lr=0.1) + + # Init noise. + for buf in noise_bufs.values(): + buf[:] = torch.randn_like(buf) + buf.requires_grad = True + + for step in tqdm(range(num_steps)): + + # Learning rate schedule. + t = step / num_steps + w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2 + lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length) + lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi) + lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length) + lr = initial_learning_rate * lr_ramp + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + # Synth images from opt_w. + w_noise = torch.randn_like(w_opt) * w_noise_scale + ws = (w_opt + w_noise) + # synth_images = G.synthesis(ws,c, noise_mode='const')['image'] + if no_sr: + synth_images = G.synthesis(ws, c=c, neural_rendering_resolution = 256, noise_mode='const', apply_def=True, pose_params=p)['image_raw'] + assert synth_images.shape[2] == 256 + else: + synth_images = G.synthesis(ws, c=c, noise_mode='const', apply_def=True, pose_params=p)['image'] + + if step % image_log_step == 0 or step == num_steps - 1: + with torch.no_grad(): + vis_img = (synth_images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) + + PIL.Image.fromarray(vis_img[0].cpu().numpy(), 'RGB').save(f'{outdir}/{step}.png') + + # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images. + synth_images = (synth_images + 1) * (255 / 2) + if synth_images.shape[2] > 256: + synth_images = F.interpolate(synth_images, size=(256, 256), mode='area') + + # Features for synth images. + synth_features = vgg16(synth_images, resize_images=False, return_lpips=True) + dist = (target_features - synth_features).square().sum() + + # Noise regularization. + reg_loss = 0.0 + for v in noise_bufs.values(): + noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d() + while True: + reg_loss += (noise * torch.roll(noise, shifts=1, dims=3)).mean() ** 2 + reg_loss += (noise * torch.roll(noise, shifts=1, dims=2)).mean() ** 2 + if noise.shape[2] <= 8: + break + noise = F.avg_pool2d(noise, kernel_size=2) + loss = dist + reg_loss * regularize_noise_weight + + # if step % 10 == 0: + # with torch.no_grad(): + # print({f'step {step}, first projection _{w_name}': loss.detach().cpu()}) + + # Step + optimizer.zero_grad(set_to_none=True) + loss.backward() + optimizer.step() + logprint(f'step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') + + # Normalize noise. + with torch.no_grad(): + for buf in noise_bufs.values(): + buf -= buf.mean() + buf *= buf.square().mean().rsqrt() + + del G + return w_opt \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/proj/projector/w_projector_with_pose_optim.py b/3DPortraitGAN_pyramid/proj/projector/w_projector_with_pose_optim.py new file mode 100644 index 0000000..69b4b6f --- /dev/null +++ b/3DPortraitGAN_pyramid/proj/projector/w_projector_with_pose_optim.py @@ -0,0 +1,206 @@ +# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# +# NVIDIA CORPORATION and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION is strictly prohibited. + +"""Project given image to the latent space of pretrained network pickle.""" + +import copy +import os +import numpy as np +import torch +import torch.nn.functional as F +from tqdm import tqdm +import dnnlib +import PIL +from camera_utils import LookAtPoseSampler + +def project( + G, + c, + p, + outdir, + target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution + *, + num_steps=1000, + w_avg_samples=10000, + initial_learning_rate=0.01, + initial_noise_factor=0.05, + lr_rampdown_length=0.25, + lr_rampup_length=0.05, + noise_ramp_length=0.75, + regularize_noise_weight=1e5, + verbose=False, + device: torch.device, + initial_w=None, + image_log_step=100, + w_name: str, + no_sr = False +): + os.makedirs(f'{outdir}/{w_name}_w', exist_ok=True) + outdir = f'{outdir}/{w_name}_w' + assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution) + + def logprint(*args): + if verbose: + print(*args) + + G = copy.deepcopy(G).eval().requires_grad_(False).to(device).float() # type: ignore + + # Compute w stats. + w_avg_path = './w_avg.npy' + w_std_path = './w_std.npy' + if (not os.path.exists(w_avg_path)) or (not os.path.exists(w_std_path)): + print(f'Computing W midpoint and stddev using {w_avg_samples} samples...') + z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim) + # c_samples = c.repeat(w_avg_samples, 1) + + # use avg look at point + + camera_lookat_point = torch.tensor([0, 0.0649, 0], device=device) + cam2world_pose = LookAtPoseSampler.sample(3.14 / 2, 3.14 / 2, camera_lookat_point, + radius=2.7, device=device) + focal_length = 6.5104166 + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c_samples = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + c_samples = c_samples.repeat(w_avg_samples, 1) + p_samples = p.repeat(w_avg_samples, 1) + w_samples = G.mapping(torch.from_numpy(z_samples).to(device), c_samples, p_samples, ) # [N, L, C] + w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C] + w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C] + # print('save w_avg to ./w_avg.npy') + # np.save('./w_avg.npy',w_avg) + w_avg_tensor = torch.from_numpy(w_avg).cuda() + w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5 + + # np.save(w_avg_path, w_avg) + # np.save(w_std_path, w_std) + else: + # w_avg = np.load(w_avg_path) + # w_std = np.load(w_std_path) + raise Exception(' ') + + # z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim) + # c_samples = c.repeat(w_avg_samples, 1) + # w_samples = G.mapping(torch.from_numpy(z_samples).to(device), c_samples) # [N, L, C] + # w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C] + # w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C] + # w_avg_tensor = torch.from_numpy(w_avg).cuda() + # w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5 + + start_w = initial_w if initial_w is not None else w_avg + + # Setup noise inputs. + noise_bufs = {name: buf for (name, buf) in G.backbone.synthesis.named_buffers() if 'noise_const' in name} + + # Load VGG16 feature detector. + #url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt' + url = './models/vgg16.pt' + with dnnlib.util.open_url(url) as f: + vgg16 = torch.jit.load(f).eval().to(device) + + # Features for target image. + target_images = target.unsqueeze(0).to(device).to(torch.float32) + if target_images.shape[2] > 256: + target_images = F.interpolate(target_images, size=(256, 256), mode='area') + target_features = vgg16(target_images, resize_images=False, return_lpips=True) + + start_w = np.repeat(start_w, G.backbone.mapping.num_ws, axis=1) + w_opt = torch.tensor(start_w, dtype=torch.float32, device=device, + requires_grad=True) # pylint: disable=not-callable + + p_opt = p.requires_grad_(True) + + params = [{'params': w_opt, 'lr': 0.1}, + {'params': list(noise_bufs.values()), 'lr': 0.1}, + {'params': p_opt, 'lr': 0.002} + ] + + optimizer = torch.optim.Adam(params, betas=(0.9, 0.999)) + + # Init noise. + for buf in noise_bufs.values(): + buf[:] = torch.randn_like(buf) + buf.requires_grad = True + + for step in tqdm(range(num_steps)): + + # Learning rate schedule. + t = step / num_steps + w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2 + lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length) + lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi) + lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length) + lr = initial_learning_rate * lr_ramp + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + # Synth images from opt_w. + w_noise = torch.randn_like(w_opt) * w_noise_scale + ws = (w_opt + w_noise) + # synth_images = G.synthesis(ws,c, noise_mode='const')['image'] + if no_sr: + synth_images = G.synthesis(ws, c=c, neural_rendering_resolution = 256, noise_mode='const', apply_def=True, pose_params=p_opt)['image_raw'] + assert synth_images.shape[2] == 256 + else: + synth_images = G.synthesis(ws, c=c, noise_mode='const', apply_def=True, pose_params=p_opt)['image'] + + if step % image_log_step == 0 or step == num_steps - 1: + with torch.no_grad(): + vis_img = (synth_images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) + + PIL.Image.fromarray(vis_img[0].cpu().numpy(), 'RGB').save(f'{outdir}/{step}.png') + + if step == num_steps - 1: + frontal_c = torch.tensor([[1.0000e+00, 1.0505e-09, 4.3685e-08, -1.1805e-07, 0.0000e+00, + -9.9951e-01, 2.4033e-02, -1.1805e-07, 4.3714e-08, -2.4033e-02, + -9.9951e-01, 2.6992e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 1.0000e+00, 6.7287e+00, 0.0000e+00, 5.0000e-01, 0.0000e+00, + 6.7287e+00, 5.0000e-01, 0.0000e+00, 0.0000e+00, 1.0000e+00]], device=device, dtype=torch.float32) + + synth_images = G.synthesis(ws, c=frontal_c, noise_mode='const', apply_def=False, pose_params=None)['image'] + vis_img = (synth_images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) + PIL.Image.fromarray(vis_img[0].cpu().numpy(), 'RGB').save(f'{outdir}/canonical.png') + + # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images. + synth_images = (synth_images + 1) * (255 / 2) + if synth_images.shape[2] > 256: + synth_images = F.interpolate(synth_images, size=(256, 256), mode='area') + + # Features for synth images. + synth_features = vgg16(synth_images, resize_images=False, return_lpips=True) + dist = (target_features - synth_features).square().sum() + + # Noise regularization. + reg_loss = 0.0 + for v in noise_bufs.values(): + noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d() + while True: + reg_loss += (noise * torch.roll(noise, shifts=1, dims=3)).mean() ** 2 + reg_loss += (noise * torch.roll(noise, shifts=1, dims=2)).mean() ** 2 + if noise.shape[2] <= 8: + break + noise = F.avg_pool2d(noise, kernel_size=2) + loss = dist + reg_loss * regularize_noise_weight + + # if step % 10 == 0: + # with torch.no_grad(): + # print({f'step {step}, first projection _{w_name}': loss.detach().cpu()}) + + # Step + optimizer.zero_grad(set_to_none=True) + loss.backward() + optimizer.step() + logprint(f'step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') + + # Normalize noise. + with torch.no_grad(): + for buf in noise_bufs.values(): + buf -= buf.mean() + buf *= buf.square().mean().rsqrt() + + del G + return w_opt,p_opt \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/pyramid_trigrid_visualizer.py b/3DPortraitGAN_pyramid/pyramid_trigrid_visualizer.py new file mode 100644 index 0000000..c09afed --- /dev/null +++ b/3DPortraitGAN_pyramid/pyramid_trigrid_visualizer.py @@ -0,0 +1,321 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import click +import os + +import multiprocessing +import numpy as np +import imgui +import dnnlib +from gui_utils import imgui_window +from gui_utils import imgui_utils +from gui_utils import gl_utils +from gui_utils import text_utils +from viz import renderer +from viz import pickle_widget +from viz import pyramid_trigrid_widget +from viz import performance_widget +from viz import capture_widget +from viz import backbone_cache_widget +from viz import layer_widget +from viz import pose_widget +# from viz import body_pose_widget +from viz import zoom_widget +from viz import conditioning_pose_widget +from viz import render_type_widget +from viz import render_depth_sample_widget + +#---------------------------------------------------------------------------- + +class Visualizer(imgui_window.ImguiWindow): + def __init__(self, capture_dir=None): + super().__init__(title='Cat Machine', window_width=3840, window_height=2160) + + # Internals. + self._last_error_print = None + self._async_renderer = AsyncRenderer() + self._defer_rendering = 0 + self._tex_img = None + self._tex_obj = None + + # Widget interface. + self.args = dnnlib.EasyDict() + self.result = dnnlib.EasyDict() + self.pane_w = 0 + self.label_w = 0 + self.button_w = 0 + + # Widgets. + self.pickle_widget = pickle_widget.PickleWidget(self) + self.pyramid_trigrid_widget = pyramid_trigrid_widget.PyramidTrigridWidget(self) + self.perf_widget = performance_widget.PerformanceWidget(self) + self.capture_widget = capture_widget.CaptureWidget(self) + self.backbone_cache_widget = backbone_cache_widget.BackboneCacheWidget(self) + self.layer_widget = layer_widget.LayerWidget(self) + self.pose_widget = pose_widget.PoseWidget(self) + # self.body_pose_widget = body_pose_widget.BodyPoseWidget(self) + self.zoom_widget = zoom_widget.ZoomWidget(self) + self.conditioning_pose_widget = conditioning_pose_widget.ConditioningPoseWidget(self) + self.render_type_widget = render_type_widget.RenderTypeWidget(self) + self.render_depth_sample_widget = render_depth_sample_widget.RenderDepthSampleWidget(self) + + if capture_dir is not None: + self.capture_widget.path = capture_dir + + # Initialize window. + self.set_position(0, 0) + self._adjust_font_size() + self.skip_frame() # Layout may change after first frame. + + def close(self): + super().close() + if self._async_renderer is not None: + self._async_renderer.close() + self._async_renderer = None + + def add_recent_pickle(self, pkl, ignore_errors=False): + self.pickle_widget.add_recent(pkl, ignore_errors=ignore_errors) + + def load_pickle(self, pkl, ignore_errors=False): + self.pickle_widget.load(pkl, ignore_errors=ignore_errors) + + def print_error(self, error): + error = str(error) + if error != self._last_error_print: + print('\n' + error + '\n') + self._last_error_print = error + + def defer_rendering(self, num_frames=1): + self._defer_rendering = max(self._defer_rendering, num_frames) + + def clear_result(self): + self._async_renderer.clear_result() + + def set_async(self, is_async): + if is_async != self._async_renderer.is_async: + self._async_renderer.set_async(is_async) + self.clear_result() + if 'image' in self.result: + self.result.message = 'Switching rendering process...' + self.defer_rendering() + + def _adjust_font_size(self): + old = self.font_size + self.set_font_size(min(self.content_width / 120, self.content_height / 60)) + if self.font_size != old: + self.skip_frame() # Layout changed. + + def draw_frame(self): + self.begin_frame() + self.args = dnnlib.EasyDict() + self.pane_w = self.font_size * 50 + self.button_w = self.font_size * 5 + self.label_w = round(self.font_size * 5.5) + + # Detect mouse dragging in the result area. + dragging, dx, dy = imgui_utils.drag_hidden_window('##result_area', x=self.pane_w, y=0, width=self.content_width-self.pane_w, height=self.content_height) + if dragging: + self.pose_widget.drag(dx, dy) + + # Begin control pane. + imgui.set_next_window_position(0, 0) + imgui.set_next_window_size(self.pane_w, self.content_height) + imgui.begin('##control_pane', closable=False, flags=(imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE)) + + # Widgets. + expanded, _visible = imgui_utils.collapsing_header('Network & latent', default=True) + self.pickle_widget(expanded) + self.pyramid_trigrid_widget(expanded) + self.pose_widget(expanded) + self.zoom_widget(expanded) + self.conditioning_pose_widget(expanded) + # self.body_pose_widget(expanded) + self.render_type_widget(expanded) + self.render_depth_sample_widget(expanded) + expanded, _visible = imgui_utils.collapsing_header('Performance & capture', default=True) + self.perf_widget(expanded) + self.capture_widget(expanded) + expanded, _visible = imgui_utils.collapsing_header('Layers & channels', default=True) + self.backbone_cache_widget(expanded) + self.layer_widget(expanded) + + # Render. + if self.is_skipping_frames(): + pass + elif self._defer_rendering > 0: + self._defer_rendering -= 1 + elif self.args.pkl is not None: + self._async_renderer.set_args(**self.args) + result = self._async_renderer.get_result() + if result is not None: + self.result = result + + # Display. + max_w = self.content_width - self.pane_w + max_h = self.content_height + pos = np.array([self.pane_w + max_w / 2, max_h / 2]) + if 'image' in self.result: + if self._tex_img is not self.result.image: + self._tex_img = self.result.image + if self._tex_obj is None or not self._tex_obj.is_compatible(image=self._tex_img): + self._tex_obj = gl_utils.Texture(image=self._tex_img, bilinear=False, mipmap=False) + else: + self._tex_obj.update(self._tex_img) + zoom = min(max_w / self._tex_obj.width, max_h / self._tex_obj.height) + # print(zoom) + zoom = np.floor(zoom) if zoom >= 1 else zoom + # zoom = 1 + self._tex_obj.draw(pos=pos, zoom=zoom, align=0.5, rint=True) + if 'error' in self.result: + self.print_error(self.result.error) + if 'message' not in self.result: + self.result.message = str(self.result.error) + if 'message' in self.result: + tex = text_utils.get_texture(self.result.message, size=self.font_size, max_width=max_w, max_height=max_h, outline=2) + tex.draw(pos=pos, align=0.5, rint=True, color=1) + + # End frame. + self._adjust_font_size() + imgui.end() + self.end_frame() + +#---------------------------------------------------------------------------- + +class AsyncRenderer: + def __init__(self): + self._closed = False + self._is_async = False + self._cur_args = None + self._cur_result = None + self._cur_stamp = 0 + self._renderer_obj = None + self._args_queue = None + self._result_queue = None + self._process = None + + def close(self): + self._closed = True + self._renderer_obj = None + if self._process is not None: + self._process.terminate() + self._process = None + self._args_queue = None + self._result_queue = None + + @property + def is_async(self): + return self._is_async + + def set_async(self, is_async): + self._is_async = is_async + + def set_args(self, **args): + assert not self._closed + if args != self._cur_args: + if self._is_async: + self._set_args_async(**args) + else: + self._set_args_sync(**args) + self._cur_args = args + + def _set_args_async(self, **args): + if self._process is None: + self._args_queue = multiprocessing.Queue() + self._result_queue = multiprocessing.Queue() + try: + multiprocessing.set_start_method('spawn') + except RuntimeError: + pass + self._process = multiprocessing.Process(target=self._process_fn, args=(self._args_queue, self._result_queue), daemon=True) + self._process.start() + self._args_queue.put([args, self._cur_stamp]) + + def _set_args_sync(self, **args): + if self._renderer_obj is None: + self._renderer_obj = renderer.Renderer() + self._cur_result = self._renderer_obj.render(**args) + + def get_result(self): + assert not self._closed + if self._result_queue is not None: + while self._result_queue.qsize() > 0: + result, stamp = self._result_queue.get() + if stamp == self._cur_stamp: + self._cur_result = result + return self._cur_result + + def clear_result(self): + assert not self._closed + self._cur_args = None + self._cur_result = None + self._cur_stamp += 1 + + @staticmethod + def _process_fn(args_queue, result_queue): + renderer_obj = renderer.Renderer() + cur_args = None + cur_stamp = None + while True: + args, stamp = args_queue.get() + while args_queue.qsize() > 0: + args, stamp = args_queue.get() + if args != cur_args or stamp != cur_stamp: + result = renderer_obj.render(**args) + if 'error' in result: + result.error = renderer.CapturedException(result.error) + result_queue.put([result, stamp]) + cur_args = args + cur_stamp = stamp + +#---------------------------------------------------------------------------- + +@click.command() +@click.argument('pkls', metavar='PATH', nargs=-1) +@click.option('--capture-dir', help='Where to save screenshot captures', metavar='PATH', default=None) +@click.option('--browse-dir', help='Specify model path for the \'Browse...\' button', metavar='PATH') +def main( + pkls, + capture_dir, + browse_dir +): + """Interactive model visualizer. + + Optional PATH argument can be used specify which .pkl file to load. + """ + viz = Visualizer(capture_dir=capture_dir) + + if browse_dir is not None: + viz.pickle_widget.search_dirs = [browse_dir] + + # List pickles. + pretrained = [ + 'https://api.ngc.nvidia.com/v2/models/nvidia/research/eg3d/versions/1/files/ffhq512-128.pkl', + 'https://api.ngc.nvidia.com/v2/models/nvidia/research/eg3d/versions/1/files/afhqcats512-128.pkl', + 'https://api.ngc.nvidia.com/v2/models/nvidia/research/eg3d/versions/1/files/ffhqrebalanced512-64.pkl', + 'https://api.ngc.nvidia.com/v2/models/nvidia/research/eg3d/versions/1/files/ffhqrebalanced512-128.pkl', + 'https://api.ngc.nvidia.com/v2/models/nvidia/research/eg3d/versions/1/files/shapenetcars128-64.pkl', + ] + + # Populate recent pickles list with pretrained model URLs. + for url in pretrained: + viz.add_recent_pickle(url) + + # Run. + while not viz.should_close(): + viz.draw_frame() + viz.close() + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + main() + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/run_inversion_with_pose_optimization.py b/3DPortraitGAN_pyramid/run_inversion_with_pose_optimization.py new file mode 100644 index 0000000..17ed6b6 --- /dev/null +++ b/3DPortraitGAN_pyramid/run_inversion_with_pose_optimization.py @@ -0,0 +1,273 @@ +import glob + +import numpy as np +import dnnlib +import legacy +from proj.projector import w_projector_with_pose_optim +from proj.configs import global_config, hyperparameters +from PIL import Image +import torch +import json +import os +from torch_utils.ops import upfirdn2d +from training.dual_discriminator import filtered_resizing + + +# ---------------------------------------------------------------------------- +class Space_Regulizer: + def __init__(self, original_G, lpips_net): + self.original_G = original_G + self.morphing_regulizer_alpha = hyperparameters.regulizer_alpha + self.lpips_loss = lpips_net + + def get_morphed_w_code(self, new_w_code, fixed_w): + interpolation_direction = new_w_code - fixed_w + interpolation_direction_norm = torch.norm(interpolation_direction, p=2) + direction_to_move = hyperparameters.regulizer_alpha * interpolation_direction / interpolation_direction_norm + result_w = fixed_w + direction_to_move + self.morphing_regulizer_alpha * fixed_w + (1 - self.morphing_regulizer_alpha) * new_w_code + + return result_w + + def get_image_from_ws(self, w_codes, G): + return torch.cat([G.synthesis(w_code, noise_mode='none', force_fp32=True) for w_code in w_codes]) + + def ball_holder_loss_lazy(self, new_G, num_of_sampled_latents, w_batch, use_wandb=False): + loss = 0.0 + + z_samples = np.random.randn(num_of_sampled_latents, self.original_G.z_dim) + w_samples = self.original_G.mapping(torch.from_numpy(z_samples).to(global_config.device), None, + truncation_psi=0.5) + territory_indicator_ws = [self.get_morphed_w_code(w_code.unsqueeze(0), w_batch) for w_code in w_samples] + + for w_code in territory_indicator_ws: + new_img = new_G.synthesis(w_code, noise_mode='none', force_fp32=True) + with torch.no_grad(): + old_img = self.original_G.synthesis(w_code, noise_mode='none', force_fp32=True) + + if hyperparameters.regulizer_l2_lambda > 0: + l2_loss_val = l2_loss.l2_loss(old_img, new_img) + + loss += l2_loss_val * hyperparameters.regulizer_l2_lambda + + if hyperparameters.regulizer_lpips_lambda > 0: + loss_lpips = self.lpips_loss(old_img, new_img) + loss_lpips = torch.mean(torch.squeeze(loss_lpips)) + + loss += loss_lpips * hyperparameters.regulizer_lpips_lambda + + return loss / len(territory_indicator_ws) + + def space_regulizer_loss(self, new_G, w_batch, use_wandb): + ret_val = self.ball_holder_loss_lazy(new_G, hyperparameters.latent_ball_num_of_samples, w_batch, use_wandb) + return ret_val + + +def l2_loss(real_images, generated_images): + l2_criterion = torch.nn.MSELoss(reduction='mean') + loss = l2_criterion(real_images, generated_images) + return loss + + +def toogle_grad(model, flag=True): + for p in model.parameters(): + p.requires_grad = flag + + +def run_D_pose_prediction(img, c, blur_sigma=0, D=None): + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + with torch.autograd.profiler.record_function('blur'): + f = torch.arange(-blur_size, blur_size + 1, device=img['image'].device).div( + blur_sigma).square().neg().exp2() + img['image'] = upfirdn2d.filter2d(img['image'], f / f.sum()) + pose, _ = D.predict_pose(img, c) + return pose + + +def get_pose_params(real_img, real_seg, real_c, D=None, neural_rendering_resolution=None, blur_sigma=None, + resample_filter=None, filter_mode=None): + real_img_raw = filtered_resizing(real_img, size=neural_rendering_resolution, f=resample_filter, + filter_mode=filter_mode) + + real_seg_raw = filtered_resizing(real_seg, size=neural_rendering_resolution, f=resample_filter, + filter_mode=filter_mode) + + if True: + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + f = torch.arange(-blur_size, blur_size + 1, device=real_img_raw.device).div( + blur_sigma).square().neg().exp2() + real_img_raw = upfirdn2d.filter2d(real_img_raw, f / f.sum()) + + real_img = {'image': real_img, 'image_raw': real_img_raw, 'image_mask': real_seg_raw} + + # get pose_params from real image + real_img_tmp_image = real_img['image'].detach().requires_grad_(True) + real_img_tmp_image_raw = real_img['image_raw'].detach().requires_grad_(True) + real_img_tmp_image_mask = real_img['image_mask'].detach().requires_grad_(True) + real_img_tmp = {'image': real_img_tmp_image, 'image_raw': real_img_tmp_image_raw, + 'image_mask': real_img_tmp_image_mask} + + predicted_real_pose = run_D_pose_prediction(real_img_tmp, real_c, blur_sigma=blur_sigma, D=D) + return predicted_real_pose + + +if __name__ == '__main__': + # input_dir + import argparse + import random + parser = argparse.ArgumentParser() + + parser.add_argument('--model_pkl', type=str,default='./models/model.pkl') + parser.add_argument('--pose_prediction_kwargs_path', type=str,default='./models/model.json') + parser.add_argument('--inversion_name', type=str) + parser.add_argument('--with_pose_optim', action='store_true') + parser.add_argument('--test_data_dir', type=str, default='../test_data') + + opt = parser.parse_args() + model_pkl = opt.model_pkl + pose_prediction_kwargs_path = opt.pose_prediction_kwargs_path + inversion_name = opt.inversion_name + with_pose_optim = opt.with_pose_optim + test_data_dir = opt.test_data_dir + + sampling_multiplier = 2.0 + + print('Loading networks from "%s"...' % model_pkl) + device = torch.device('cuda') + with dnnlib.util.open_url(model_pkl) as f: + resume_data = legacy.load_network_pkl(f) + print('resume_data', resume_data.keys()) + G = resume_data['G_ema'].to(device) # type: ignore + D = resume_data['D_ema'].to(device) # type: ignore + + G.set_batch_size(1) + G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution'] * sampling_multiplier) + G.rendering_kwargs['depth_resolution_importance'] = int( + G.rendering_kwargs['depth_resolution_importance'] * sampling_multiplier) + + G.rendering_kwargs['ray_start'] = 2.35 + + print('Loading pose_prediction_kwargs from "%s"...' % pose_prediction_kwargs_path) + with open(pose_prediction_kwargs_path, 'r') as f: + pose_predict_kwargs = json.load(f) + + # teaser + todo = glob.glob(os.path.join(test_data_dir, '*')) + + for sub_dir in todo: + print(sub_dir) + samples_dir = os.path.join(sub_dir, 'samples') + new_crop_samples_dir = os.path.join(sub_dir, 'samples_new_crop') + new_crop_mask_samples_dir = os.path.join(sub_dir, 'samples_new_crop/mask') + res_dir = os.path.join(sub_dir, f'samples_new_crop/{inversion_name}') + if os.path.exists(new_crop_samples_dir) and len( + glob.glob(os.path.join(new_crop_samples_dir, f'{inversion_name}/*/inversion.pt'))) == 0: + input_dir = new_crop_samples_dir + + + # ---------------------------------------------------------------------------- + + + camera_path = os.path.join(input_dir, 'result.json') + print('Loading camera pose from "%s"...' % camera_path) + with open(camera_path, 'r') as f: + camera_poses = json.load(f) + + print('Loading images from "%s"...' % input_dir) + image_base_dir = os.path.join(input_dir, 'aligned_images') + mask_base_path = os.path.join(input_dir, 'mask') + + images = glob.glob(os.path.join(image_base_dir, '*')) + + print('images', images) + + for image_path in images[:1]: + image_name = os.path.basename(image_path) + mask_path = os.path.join(mask_base_path, image_name) + print('projecting image: "%s"' % image_path) + image = Image.open(image_path).convert('RGB') + mask = Image.open(mask_path) + # image_name = os.path.basename(paths_config.input_data_path) + camera_pose = camera_poses[image_name] + cam2world_pose = torch.tensor(camera_pose['camera_pose'], device=device) + focal_length = 6.5104166 + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + + with torch.no_grad(): + image_p = image.resize((G.img_resolution, G.img_resolution), Image.BILINEAR) + image_p = np.array(image_p) + image_p = image_p.transpose(2, 0, 1) + image_p = torch.tensor(image_p, device=device) + image_p = image_p.to(device).to(torch.float32) / 127.5 - 1 + image_p = image_p.unsqueeze(0) + + mask_p = np.array(mask)[:, :, None] + mask_p = mask_p.transpose(2, 0, 1) + mask_p = torch.tensor(mask_p, device=device) + mask_p = mask_p.to(device).to(torch.float32) / 255.0 + mask_p = mask_p.unsqueeze(0) + + resample_filter = pose_predict_kwargs['resample_filter'] + resample_filter = torch.tensor(resample_filter, device=device).to(torch.float32) + + p = get_pose_params(real_img=image_p, + real_seg=mask_p, + real_c=c, + D=D, + neural_rendering_resolution=pose_predict_kwargs['neural_rendering_resolution'], + blur_sigma=pose_predict_kwargs['blur_sigma'], + resample_filter=resample_filter, + filter_mode=pose_predict_kwargs['filter_mode']) + + # ---------------------------------------------------------------------------- + image_name = image_name[:-4] + # coach = SingleIDCoach(None, False, c, p) + # coach.train(image=image, image_name=image_name[:-4]) + w_path_dir = res_dir + os.makedirs(w_path_dir, exist_ok=True) + use_ball_holder = True + # for fname, image in tqdm(self.data_loader): + # image_name = fname[0] + + embedding_dir = f'{w_path_dir}/{image_name}' + os.makedirs(embedding_dir, exist_ok=True) + image.save(f'{embedding_dir}/original.png') + w_pivot = None + # if hyperparameters.use_last_w_pivots: + # w_pivot = self.load_inversions(w_path_dir, image_name) + # elif not hyperparameters.use_last_w_pivots or w_pivot is None: + # w_pivot = self.calc_inversions(image, image_name) + # image = torch.tensor(image, device=device) + if os.path.exists(f'{embedding_dir}/0.pt'): + w_pivot = torch.load(f'{embedding_dir}/0.pt').to(global_config.device) + else: + image = image.resize((G.img_resolution, G.img_resolution), Image.BILINEAR) + image = np.array(image) + image = image.transpose(2, 0, 1) + image = torch.tensor(image, device=device) + image = image.to(device).to(torch.float32) / 127.5 - 1 + image = image.unsqueeze(0) + id_image = torch.squeeze((image.to(global_config.device) + 1) / 2) * 255 + # id_image = torch.squeeze((image.to(global_config.device) + 1) / 2) * 255 + w_pivot, p_opt = w_projector_with_pose_optim.project(G, c, p, embedding_dir, id_image, + device=torch.device('cuda'), + w_avg_samples=600, + num_steps=500, + w_name=image_name, no_sr=False) + # w_pivot = w_pivot.detach().clone().to(global_config.device) + w_pivot = w_pivot.to(global_config.device) + torch.save(w_pivot, f'{embedding_dir}/inversion.pt') + + print('p_opt:', p_opt.shape) + + poses = { + 'pose': p_opt.detach().cpu().numpy().tolist(), + 'camera_pose': c.cpu().numpy().tolist() + } + with open(f'{embedding_dir}/pose.json', 'w') as f: + json.dump(poses, f) + + diff --git a/3DPortraitGAN_pyramid/run_trigrid_gen.py b/3DPortraitGAN_pyramid/run_trigrid_gen.py new file mode 100644 index 0000000..2e0c2e2 --- /dev/null +++ b/3DPortraitGAN_pyramid/run_trigrid_gen.py @@ -0,0 +1,264 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Generate lerp videos using pretrained network pickle.""" + +import os +import re +from typing import List, Optional, Tuple, Union + +import click +import dnnlib +import imageio +import numpy as np +import scipy.interpolate +import torch +from tqdm import tqdm +import mrcfile + +import legacy + +from camera_utils import LookAtPoseSampler +from torch_utils import misc +import pickle +#---------------------------------------------------------------------------- + +def layout_grid(img, grid_w=None, grid_h=1, float_to_uint8=True, chw_to_hwc=True, to_numpy=True): + batch_size, channels, img_h, img_w = img.shape + if grid_w is None: + grid_w = batch_size // grid_h + assert batch_size == grid_w * grid_h + if float_to_uint8: + img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8) + img = img.reshape(grid_h, grid_w, channels, img_h, img_w) + img = img.permute(2, 0, 3, 1, 4) + img = img.reshape(channels, grid_h * img_h, grid_w * img_w) + if chw_to_hwc: + img = img.permute(1, 2, 0) + if to_numpy: + img = img.cpu().numpy() + return img + +def create_samples(N=256, voxel_origin=[0, 0, 0], cube_length=2.0): + # NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle + voxel_origin = np.array(voxel_origin) - cube_length/2 + voxel_size = cube_length / (N - 1) + + overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor()) + samples = torch.zeros(N ** 3, 3) + + # transform first 3 columns + # to be the x, y, z index + samples[:, 2] = overall_index % N + samples[:, 1] = (overall_index.float() / N) % N + samples[:, 0] = ((overall_index.float() / N) / N) % N + + # transform first 3 columns + # to be the x, y, z coordinate + samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2] + samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1] + samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0] + + num_samples = N ** 3 + + return samples.unsqueeze(0), voxel_origin, voxel_size + +#---------------------------------------------------------------------------- + +def gen_trigrids(G, output: str, latend_code_path, shuffle_seed=None, w_frames=60*4, kind='cubic', + grid_dims=(1,1), num_keyframes=None, wraps=2, psi=1, truncation_cutoff=14, + cfg='FFHQ', image_mode='image', gen_shapes=False, device=torch.device('cuda'), + **video_kwargs): + grid_w = grid_dims[0] + grid_h = grid_dims[1] + + + + + camera_lookat_point = torch.tensor([0, 0.0649, 0], device=device) + # zs = torch.from_numpy(np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds])).to(device) + ws = torch.load(latend_code_path, map_location=device) + print('ws shape', ws.shape) + + cam2world_pose = LookAtPoseSampler.sample(np.pi/2, np.pi/2, camera_lookat_point, radius=2.7, device=device) + focal_length = 6.5104166 # if cfg != 'Shapenet' else 1.7074 # shapenet has higher FOV + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) + c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) + c = c.repeat(len(ws), 1) + + p = torch.zeros([len(ws), 6], device=device) + + + trigrids,ws = G.gen_planes(ws=ws, c=c[0:1], noise_mode='const') + print('trigrids shape: ') + save_files = {'ws': ws} + for k in trigrids.keys(): + print(k, trigrids[k].shape) + save_files[f'trigrids_{trigrids[k].shape[3]}'] = trigrids[k] + print('save as', f'trigrids_{k}') + # save trigrids to output pkl file + with open(output, 'wb') as f: + pickle.dump(save_files, f) + + print(save_files.keys()) + + + + + + + + +#---------------------------------------------------------------------------- + +def parse_range(s: Union[str, List[int]]) -> List[int]: + '''Parse a comma separated list of numbers or ranges and return a list of ints. + + Example: '1,2,5-10' returns [1, 2, 5, 6, 7] + ''' + if isinstance(s, list): return s + ranges = [] + range_re = re.compile(r'^(\d+)-(\d+)$') + for p in s.split(','): + if m := range_re.match(p): + ranges.extend(range(int(m.group(1)), int(m.group(2))+1)) + else: + ranges.append(int(p)) + return ranges + +#---------------------------------------------------------------------------- + +def parse_tuple(s: Union[str, Tuple[int,int]]) -> Tuple[int, int]: + '''Parse a 'M,N' or 'MxN' integer tuple. + + Example: + '4x2' returns (4,2) + '0,1' returns (0,1) + ''' + if isinstance(s, tuple): return s + if m := re.match(r'^(\d+)[x,](\d+)$', s): + return (int(m.group(1)), int(m.group(2))) + raise ValueError(f'cannot parse tuple {s}') + +#---------------------------------------------------------------------------- + +@click.command() +@click.option('--network', 'network_pkl', help='Network pickle filename',default='./models/model.pkl') +@click.option('--test_data_dir', help='Network pickle filename',default='../test_data') + +@click.option('--shuffle-seed', type=int, help='Random seed to use for shuffling seed order', default=None) +@click.option('--grid', type=parse_tuple, help='Grid width/height, e.g. \'4x3\' (default: 1x1)', default=(1,1)) +@click.option('--num-keyframes', type=int, help='Number of seeds to interpolate through. If not specified, determine based on the length of the seeds array given by --seeds.', default=None) +@click.option('--w-frames', type=int, help='Number of frames to interpolate between latents', default=120) +@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) +@click.option('--trunc-cutoff', 'truncation_cutoff', type=int, help='Truncation cutoff', default=14, show_default=True) +@click.option('--reload_modules', help='Overload persistent modules?', type=bool, required=False, metavar='BOOL', default=False, show_default=True) +@click.option('--cfg', help='Config', type=click.Choice(['FFHQ', 'AFHQ', 'Shapenet']), required=False, metavar='STR', default='FFHQ', show_default=True) +@click.option('--image_mode', help='Image mode', type=click.Choice(['image', 'image_depth', 'image_raw']), required=False, metavar='STR', default='image', show_default=True) +@click.option('--sample_mult', 'sampling_multiplier', type=float, help='Multiplier for depth sampling in volume rendering', default=2, show_default=True) +@click.option('--nrr', type=int, help='Neural rendering resolution override', default=None, show_default=True) +@click.option('--inversion_name', type=str, required=True) + +def generate_images( + network_pkl: str, + test_data_dir: str, + shuffle_seed: Optional[int], + truncation_psi: float, + truncation_cutoff: int, + grid: Tuple[int,int], + num_keyframes: Optional[int], + w_frames: int, + reload_modules: bool, + cfg: str, + image_mode: str, + sampling_multiplier: float, + nrr: Optional[int], + inversion_name: str, +): + """Render a latent vector interpolation video. + + Examples: + + \b + # Render a 4x2 grid of interpolations for seeds 0 through 31. + python gen_video.py --output=lerp.mp4 --trunc=1 --seeds=0-31 --grid=4x2 \\ + --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl + + Animation length and seed keyframes: + + The animation length is either determined based on the --seeds value or explicitly + specified using the --num-keyframes option. + + When num keyframes is specified with --num-keyframes, the output video length + will be 'num_keyframes*w_frames' frames. + + If --num-keyframes is not specified, the number of seeds given with + --seeds must be divisible by grid size W*H (--grid). In this case the + output video length will be '# seeds/(w*h)*w_frames' frames. + """ + + + + print('Loading networks from "%s"...' % network_pkl) + device = torch.device('cuda') + with dnnlib.util.open_url(network_pkl) as f: + G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore + + + G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution'] * sampling_multiplier) + G.rendering_kwargs['depth_resolution_importance'] = int(G.rendering_kwargs['depth_resolution_importance'] * sampling_multiplier) + + G.rendering_kwargs['ray_start'] = 2.35 + + + + G.set_batch_size(1) + + from training.smpl_triplane import TriPlaneGenerator + + if True: + print("Reloading Modules!") + G_new = TriPlaneGenerator(*G.init_args, **G.init_kwargs).eval().requires_grad_(False).to(device) + misc.copy_params_and_buffers(G, G_new, require_all=True) + G_new.neural_rendering_resolution = G.neural_rendering_resolution + G_new.rendering_kwargs = G.rendering_kwargs + G = G_new + + import glob + + for path in glob.glob(os.path.join(test_data_dir,f'*/samples_new_crop/{inversion_name}/*/inversion.pt')): + outdir = os.path.dirname(path) + latend_code = f'{outdir}/inversion.pt' + + if not os.path.exists(outdir): + os.makedirs(outdir, exist_ok=True) + if nrr is not None: G.neural_rendering_resolution = nrr + + if truncation_cutoff == 0: + truncation_psi = 1.0 # truncation cutoff of 0 means no truncation anyways + if truncation_psi == 1.0: + truncation_cutoff = 14 # no truncation so doesn't matter where we cutoff + + + output = os.path.join(outdir, f'inversion_trigrid.pkl') + if os.path.exists(output): + print(f'Already exists: {output}') + continue + gen_trigrids(G=G, output=output, bitrate='10M', grid_dims=grid, num_keyframes=num_keyframes, w_frames=w_frames, + latend_code_path=latend_code, shuffle_seed=shuffle_seed, psi=truncation_psi, + truncation_cutoff=truncation_cutoff, cfg=cfg, image_mode=image_mode) + + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + generate_images() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/segmentation_example.py b/3DPortraitGAN_pyramid/segmentation_example.py new file mode 100644 index 0000000..6d8628f --- /dev/null +++ b/3DPortraitGAN_pyramid/segmentation_example.py @@ -0,0 +1,108 @@ +import torch +from PIL import Image +from torchvision.transforms import ToPILImage +import glob +import os +from torchvision.models.segmentation import deeplabv3_resnet101 +from torchvision import transforms, utils +from tqdm import tqdm +import tempfile +import dnnlib +from torch_utils import training_stats +from torch_utils import custom_ops + +from torch.utils.data import dataset + + +class LoadData(dataset.Dataset): + + def __init__(self, base_path): + super(LoadData, self).__init__() + #base_path = 'F:/high_quality_3DPortraitGAN/exp/stable-dreamfusion/output/2023-10-28-with-inversion-initialization/samples_new_crop' + paths = sorted(glob.glob(f'{base_path}/aligned_images/*')) + os.makedirs(f'{base_path}/mask', exist_ok=True) + self.paths = paths + + def __getitem__(self,idx): + image_path =self.paths[idx] + image = Image.open(image_path) + # Define the preprocessing transformation + preprocess = transforms.Compose([ + transforms.ToTensor() + ]) + + # Apply the transformation to the image + input_tensor = preprocess(image) + + return input_tensor, image_path + + def __len__(self): + return len(self.paths) + + +def get_mask(model, batch, cid): + normalized_batch = transforms.functional.normalize( + batch, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) + output = model(normalized_batch)['out'] + # sem_classes = [ + # '__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + # 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', + # 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' + # ] + # sem_class_to_idx = {cls: idx for (idx, cls) in enumerate(sem_classes)} + # cid = sem_class_to_idx['car'] + + normalized_masks = torch.nn.functional.softmax(output, dim=1) + + boolean_car_masks = (normalized_masks.argmax(1) == cid) + return boolean_car_masks.float() + + +def get_and_save_mask( device,base_path): + # data loder + batch_size = 8 + dataset = torch.utils.data.DataLoader( + dataset=LoadData(base_path), + batch_size=batch_size, + shuffle=False + ) + for input_tensor, image_paths in tqdm(dataset): + input_batch = input_tensor.to(device) # batxh, 3, 256, 256 + + # load segmentation net + seg_net = deeplabv3_resnet101(pretrained=True, progress=False).to(device) + seg_net.requires_grad_(False) + seg_net.eval() + + # 15 means human mask + mask = get_mask(seg_net, input_batch, 15) + print(mask.shape) # 16, 256, 256 + + mask = mask.unsqueeze(1) # 16, 1, 256, 256 + + for i in range(mask.shape[0]): + # Squeeze the tensor to remove unnecessary dimensions and convert to PIL Image + mask0 = mask[i:i+1] + mask_squeezed = torch.squeeze(mask0) + mask_image = ToPILImage()(mask_squeezed) + image_path = image_paths[i] + # Save as PNG + mask_path = image_path.replace('aligned_images', 'mask') + # /home/zjucadjin/dataset/pexels-256-new/0000000053/0000053992.png + # mask_dir = mask_path[:-len('/0000053992.png')] + # os.makedirs(mask_dir, exist_ok=True) + mask_image.save(mask_path) + + +def run(rank,base_path): + rank = rank + device = torch.device('cuda', rank) + get_and_save_mask(device,base_path) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--image_path', type=str, required=True) + parser.add_argument('--mask_path', type=str, required=True) + run(0, parser.parse_args().base_path) \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/shape_utils.py b/3DPortraitGAN_pyramid/shape_utils.py new file mode 100644 index 0000000..e16f6cc --- /dev/null +++ b/3DPortraitGAN_pyramid/shape_utils.py @@ -0,0 +1,124 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + + +""" +Utils for extracting 3D shapes using marching cubes. Based on code from DeepSDF (Park et al.) + +Takes as input an .mrc file and extracts a mesh. + +Ex. + python shape_utils.py my_shape.mrc +Ex. + python shape_utils.py myshapes_directory --level=12 +""" + + +import time +import plyfile +import glob +import logging +import numpy as np +import os +import random +import torch +import torch.utils.data +import trimesh +import skimage.measure +import argparse +import mrcfile +from tqdm import tqdm + + +def convert_sdf_samples_to_ply( + numpy_3d_sdf_tensor, + voxel_grid_origin, + voxel_size, + ply_filename_out, + offset=None, + scale=None, + level=0.0 +): + """ + Convert sdf samples to .ply + :param pytorch_3d_sdf_tensor: a torch.FloatTensor of shape (n,n,n) + :voxel_grid_origin: a list of three floats: the bottom, left, down origin of the voxel grid + :voxel_size: float, the size of the voxels + :ply_filename_out: string, path of the filename to save to + This function adapted from: https://github.com/RobotLocomotion/spartan + """ + start_time = time.time() + + verts, faces, normals, values = np.zeros((0, 3)), np.zeros((0, 3)), np.zeros((0, 3)), np.zeros(0) + # try: + verts, faces, normals, values = skimage.measure.marching_cubes( + numpy_3d_sdf_tensor, level=level, spacing=[voxel_size] * 3 + ) + # except: + # pass + + # transform from voxel coordinates to camera coordinates + # note x and y are flipped in the output of marching_cubes + mesh_points = np.zeros_like(verts) + mesh_points[:, 0] = voxel_grid_origin[0] + verts[:, 0] + mesh_points[:, 1] = voxel_grid_origin[1] + verts[:, 1] + mesh_points[:, 2] = voxel_grid_origin[2] + verts[:, 2] + + # apply additional offset and scale + if scale is not None: + mesh_points = mesh_points / scale + if offset is not None: + mesh_points = mesh_points - offset + + # try writing to the ply file + + num_verts = verts.shape[0] + num_faces = faces.shape[0] + + verts_tuple = np.zeros((num_verts,), dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")]) + + for i in range(0, num_verts): + verts_tuple[i] = tuple(mesh_points[i, :]) + + faces_building = [] + for i in range(0, num_faces): + faces_building.append(((faces[i, :].tolist(),))) + faces_tuple = np.array(faces_building, dtype=[("vertex_indices", "i4", (3,))]) + + el_verts = plyfile.PlyElement.describe(verts_tuple, "vertex") + el_faces = plyfile.PlyElement.describe(faces_tuple, "face") + + ply_data = plyfile.PlyData([el_verts, el_faces]) + ply_data.write(ply_filename_out) + print(f"wrote to {ply_filename_out}") + + +def convert_mrc(input_filename, output_filename, isosurface_level=1): + with mrcfile.open(input_filename) as mrc: + convert_sdf_samples_to_ply(np.transpose(mrc.data, (2, 1, 0)), [0, 0, 0], 1, output_filename, level=isosurface_level) + +if __name__ == '__main__': + start_time = time.time() + parser = argparse.ArgumentParser() + parser.add_argument('input_mrc_path') + parser.add_argument('--level', type=float, default=10, help="The isosurface level for marching cubes") + args = parser.parse_args() + + if os.path.isfile(args.input_mrc_path) and args.input_mrc_path.split('.')[-1] == 'ply': + output_obj_path = args.input_mrc_path.split('.mrc')[0] + '.ply' + convert_mrc(args.input_mrc_path, output_obj_path, isosurface_level=1) + + print(f"{time.time() - start_time:02f} s") + else: + assert os.path.isdir(args.input_mrc_path) + + for mrc_path in tqdm(glob.glob(os.path.join(args.input_mrc_path, '*.mrc'))): + output_obj_path = mrc_path.split('.mrc')[0] + '.ply' + convert_mrc(mrc_path, output_obj_path, isosurface_level=args.level) \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/torch_utils/__init__.py b/3DPortraitGAN_pyramid/torch_utils/__init__.py new file mode 100644 index 0000000..dfebd04 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# empty diff --git a/3DPortraitGAN_pyramid/torch_utils/custom_ops.py b/3DPortraitGAN_pyramid/torch_utils/custom_ops.py new file mode 100644 index 0000000..ed2524f --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/custom_ops.py @@ -0,0 +1,159 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import glob +import hashlib +import importlib +import os +import re +import shutil +import uuid + +import torch +import torch.utils.cpp_extension +from torch.utils.file_baton import FileBaton + +#---------------------------------------------------------------------------- +# Global options. + +verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full' + +#---------------------------------------------------------------------------- +# Internal helper funcs. + +def _find_compiler_bindir(): + patterns = [ + 'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64', + 'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64', + 'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64', + 'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin', + ] + for pattern in patterns: + matches = sorted(glob.glob(pattern)) + if len(matches): + return matches[-1] + return None + +#---------------------------------------------------------------------------- + +def _get_mangled_gpu_name(): + name = torch.cuda.get_device_name().lower() + out = [] + for c in name: + if re.match('[a-z0-9_-]+', c): + out.append(c) + else: + out.append('-') + return ''.join(out) + +#---------------------------------------------------------------------------- +# Main entry point for compiling and loading C++/CUDA plugins. + +_cached_plugins = dict() + +def get_plugin(module_name, sources, headers=None, source_dir=None, **build_kwargs): + assert verbosity in ['none', 'brief', 'full'] + if headers is None: + headers = [] + if source_dir is not None: + sources = [os.path.join(source_dir, fname) for fname in sources] + headers = [os.path.join(source_dir, fname) for fname in headers] + + # Already cached? + if module_name in _cached_plugins: + return _cached_plugins[module_name] + + # Print status. + if verbosity == 'full': + print(f'Setting up PyTorch plugin "{module_name}"...') + elif verbosity == 'brief': + print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True) + verbose_build = (verbosity == 'full') + + # Compile and load. + try: # pylint: disable=too-many-nested-blocks + # Make sure we can find the necessary compiler binaries. + if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0: + compiler_bindir = _find_compiler_bindir() + if compiler_bindir is None: + raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".') + os.environ['PATH'] += ';' + compiler_bindir + + # Some containers set TORCH_CUDA_ARCH_LIST to a list that can either + # break the build or unnecessarily restrict what's available to nvcc. + # Unset it to let nvcc decide based on what's available on the + # machine. + os.environ['TORCH_CUDA_ARCH_LIST'] = '' + + # Incremental build md5sum trickery. Copies all the input source files + # into a cached build directory under a combined md5 digest of the input + # source files. Copying is done only if the combined digest has changed. + # This keeps input file timestamps and filenames the same as in previous + # extension builds, allowing for fast incremental rebuilds. + # + # This optimization is done only in case all the source files reside in + # a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR + # environment variable is set (we take this as a signal that the user + # actually cares about this.) + # + # EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work + # around the *.cu dependency bug in ninja config. + # + all_source_files = sorted(sources + headers) + all_source_dirs = set(os.path.dirname(fname) for fname in all_source_files) + if len(all_source_dirs) == 1: # and ('TORCH_EXTENSIONS_DIR' in os.environ): + + # Compute combined hash digest for all source files. + hash_md5 = hashlib.md5() + for src in all_source_files: + with open(src, 'rb') as f: + hash_md5.update(f.read()) + + # Select cached build directory name. + source_digest = hash_md5.hexdigest() + build_top_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access + cached_build_dir = os.path.join(build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}') + + if not os.path.isdir(cached_build_dir): + tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}' + os.makedirs(tmpdir) + for src in all_source_files: + shutil.copyfile(src, os.path.join(tmpdir, os.path.basename(src))) + try: + os.replace(tmpdir, cached_build_dir) # atomic + except OSError: + # source directory already exists, delete tmpdir and its contents. + shutil.rmtree(tmpdir) + if not os.path.isdir(cached_build_dir): raise + + # Compile. + cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources] + torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir, + verbose=verbose_build, sources=cached_sources, **build_kwargs) + else: + torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs) + + # Load. + module = importlib.import_module(module_name) + + except: + if verbosity == 'brief': + print('Failed!') + raise + + # Print status and add to cache dict. + if verbosity == 'full': + print(f'Done setting up PyTorch plugin "{module_name}".') + elif verbosity == 'brief': + print('Done.') + _cached_plugins[module_name] = module + return module + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/torch_utils/misc.py b/3DPortraitGAN_pyramid/torch_utils/misc.py new file mode 100644 index 0000000..2fc93df --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/misc.py @@ -0,0 +1,270 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import re +import contextlib +import numpy as np +import torch +import warnings +import dnnlib + +#---------------------------------------------------------------------------- +# Cached construction of constant tensors. Avoids CPU=>GPU copy when the +# same constant is used multiple times. + +_constant_cache = dict() + +def constant(value, shape=None, dtype=None, device=None, memory_format=None): + value = np.asarray(value) + if shape is not None: + shape = tuple(shape) + if dtype is None: + dtype = torch.get_default_dtype() + if device is None: + device = torch.device('cpu') + if memory_format is None: + memory_format = torch.contiguous_format + + key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format) + tensor = _constant_cache.get(key, None) + if tensor is None: + tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device) + if shape is not None: + tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape)) + tensor = tensor.contiguous(memory_format=memory_format) + _constant_cache[key] = tensor + return tensor + +#---------------------------------------------------------------------------- +# Replace NaN/Inf with specified numerical values. + +try: + nan_to_num = torch.nan_to_num # 1.8.0a0 +except AttributeError: + def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin + assert isinstance(input, torch.Tensor) + if posinf is None: + posinf = torch.finfo(input.dtype).max + if neginf is None: + neginf = torch.finfo(input.dtype).min + assert nan == 0 + return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out) + +#---------------------------------------------------------------------------- +# Symbolic assert. + +try: + symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access +except AttributeError: + symbolic_assert = torch.Assert # 1.7.0 + +#---------------------------------------------------------------------------- +# Context manager to temporarily suppress known warnings in torch.jit.trace(). +# Note: Cannot use catch_warnings because of https://bugs.python.org/issue29672 + +@contextlib.contextmanager +def suppress_tracer_warnings(): + flt = ('ignore', None, torch.jit.TracerWarning, None, 0) + warnings.filters.insert(0, flt) + yield + warnings.filters.remove(flt) + +#---------------------------------------------------------------------------- +# Assert that the shape of a tensor matches the given list of integers. +# None indicates that the size of a dimension is allowed to vary. +# Performs symbolic assertion when used in torch.jit.trace(). + +def assert_shape(tensor, ref_shape): + if tensor.ndim != len(ref_shape): + raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}') + for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)): + if ref_size is None: + pass + elif isinstance(ref_size, torch.Tensor): + with suppress_tracer_warnings(): # as_tensor results are registered as constants + symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}') + elif isinstance(size, torch.Tensor): + with suppress_tracer_warnings(): # as_tensor results are registered as constants + symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}') + elif size != ref_size: + raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}') + +#---------------------------------------------------------------------------- +# Function decorator that calls torch.autograd.profiler.record_function(). + +def profiled_function(fn): + def decorator(*args, **kwargs): + with torch.autograd.profiler.record_function(fn.__name__): + return fn(*args, **kwargs) + decorator.__name__ = fn.__name__ + return decorator + +#---------------------------------------------------------------------------- +# Sampler for torch.utils.data.DataLoader that loops over the dataset +# indefinitely, shuffling items as it goes. + +class InfiniteSampler(torch.utils.data.Sampler): + def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5): + assert len(dataset) > 0 + assert num_replicas > 0 + assert 0 <= rank < num_replicas + assert 0 <= window_size <= 1 + super().__init__(dataset) + self.dataset = dataset + self.rank = rank + self.num_replicas = num_replicas + self.shuffle = shuffle + self.seed = seed + self.window_size = window_size + + def __iter__(self): + order = np.arange(len(self.dataset)) + rnd = None + window = 0 + if self.shuffle: + rnd = np.random.RandomState(self.seed) + rnd.shuffle(order) + window = int(np.rint(order.size * self.window_size)) + + idx = 0 + while True: + i = idx % order.size + if idx % self.num_replicas == self.rank: + yield order[i] + if window >= 2: + j = (i - rnd.randint(window)) % order.size + order[i], order[j] = order[j], order[i] + idx += 1 + +#---------------------------------------------------------------------------- +# Utilities for operating with torch.nn.Module parameters and buffers. + +def params_and_buffers(module): + assert isinstance(module, torch.nn.Module) + return list(module.parameters()) + list(module.buffers()) + +def named_params_and_buffers(module): + assert isinstance(module, torch.nn.Module) + return list(module.named_parameters()) + list(module.named_buffers()) + +def copy_params_and_buffers(src_module, dst_module, require_all=False): + assert isinstance(src_module, torch.nn.Module) + assert isinstance(dst_module, torch.nn.Module) + src_tensors = dict(named_params_and_buffers(src_module)) + for name, tensor in named_params_and_buffers(dst_module): + assert (name in src_tensors) or (not require_all) + if name in src_tensors: + tensor.copy_(src_tensors[name].detach()).requires_grad_(tensor.requires_grad) + else: + print(f'{name} is not in src_module, init it using random valua!') + +#---------------------------------------------------------------------------- +# Context manager for easily enabling/disabling DistributedDataParallel +# synchronization. + +@contextlib.contextmanager +def ddp_sync(module, sync): + assert isinstance(module, torch.nn.Module) + if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel): + yield + else: + with module.no_sync(): + yield + +#---------------------------------------------------------------------------- +# Check DistributedDataParallel consistency across processes. + +def check_ddp_consistency(module, ignore_regex=None): + assert isinstance(module, torch.nn.Module) + for name, tensor in named_params_and_buffers(module): + fullname = type(module).__name__ + '.' + name + if ignore_regex is not None and re.fullmatch(ignore_regex, fullname): + continue + tensor = tensor.detach() + if tensor.is_floating_point(): + tensor = nan_to_num(tensor) + other = tensor.clone() + torch.distributed.broadcast(tensor=other, src=0) + assert (tensor == other).all(), fullname + +#---------------------------------------------------------------------------- +# Print summary table of module hierarchy. + +def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True): + assert isinstance(module, torch.nn.Module) + assert not isinstance(module, torch.jit.ScriptModule) + assert isinstance(inputs, (tuple, list)) + + # Register hooks. + entries = [] + nesting = [0] + def pre_hook(_mod, _inputs): + nesting[0] += 1 + def post_hook(mod, _inputs, outputs): + nesting[0] -= 1 + if nesting[0] <= max_nesting: + outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs] + outputs = [t for t in outputs if isinstance(t, torch.Tensor)] + entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs)) + hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()] + hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()] + + # Run module. + outputs = module(*inputs) + for hook in hooks: + hook.remove() + + # Identify unique outputs, parameters, and buffers. + tensors_seen = set() + for e in entries: + e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen] + e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen] + e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen] + tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs} + + # Filter out redundant entries. + if skip_redundant: + entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)] + + # Construct table. + rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']] + rows += [['---'] * len(rows[0])] + param_total = 0 + buffer_total = 0 + submodule_names = {mod: name for name, mod in module.named_modules()} + for e in entries: + name = '' if e.mod is module else submodule_names[e.mod] + param_size = sum(t.numel() for t in e.unique_params) + buffer_size = sum(t.numel() for t in e.unique_buffers) + output_shapes = [str(list(t.shape)) for t in e.outputs] + output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs] + rows += [[ + name + (':0' if len(e.outputs) >= 2 else ''), + str(param_size) if param_size else '-', + str(buffer_size) if buffer_size else '-', + (output_shapes + ['-'])[0], + (output_dtypes + ['-'])[0], + ]] + for idx in range(1, len(e.outputs)): + rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]] + param_total += param_size + buffer_total += buffer_size + rows += [['---'] * len(rows[0])] + rows += [['Total', str(param_total), str(buffer_total), '-', '-']] + + # Print table. + widths = [max(len(cell) for cell in column) for column in zip(*rows)] + print() + for row in rows: + print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths))) + print() + return outputs + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/__init__.py b/3DPortraitGAN_pyramid/torch_utils/ops/__init__.py new file mode 100644 index 0000000..dfebd04 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# empty diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.cpp b/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.cpp new file mode 100644 index 0000000..ee6f6d0 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.cpp @@ -0,0 +1,103 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include +#include +#include "bias_act.h" + +//------------------------------------------------------------------------ + +static bool has_same_layout(torch::Tensor x, torch::Tensor y) +{ + if (x.dim() != y.dim()) + return false; + for (int64_t i = 0; i < x.dim(); i++) + { + if (x.size(i) != y.size(i)) + return false; + if (x.size(i) >= 2 && x.stride(i) != y.stride(i)) + return false; + } + return true; +} + +//------------------------------------------------------------------------ + +static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp) +{ + // Validate arguments. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x"); + TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x"); + TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x"); + TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x"); + TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); + TORCH_CHECK(b.dim() == 1, "b must have rank 1"); + TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds"); + TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements"); + TORCH_CHECK(grad >= 0, "grad must be non-negative"); + + // Validate layout. + TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense"); + TORCH_CHECK(b.is_contiguous(), "b must be contiguous"); + TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x"); + TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x"); + TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x"); + + // Create output tensor. + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + torch::Tensor y = torch::empty_like(x); + TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x"); + + // Initialize CUDA kernel parameters. + bias_act_kernel_params p; + p.x = x.data_ptr(); + p.b = (b.numel()) ? b.data_ptr() : NULL; + p.xref = (xref.numel()) ? xref.data_ptr() : NULL; + p.yref = (yref.numel()) ? yref.data_ptr() : NULL; + p.dy = (dy.numel()) ? dy.data_ptr() : NULL; + p.y = y.data_ptr(); + p.grad = grad; + p.act = act; + p.alpha = alpha; + p.gain = gain; + p.clamp = clamp; + p.sizeX = (int)x.numel(); + p.sizeB = (int)b.numel(); + p.stepB = (b.numel()) ? (int)x.stride(dim) : 1; + + // Choose CUDA kernel. + void* kernel; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] + { + kernel = choose_bias_act_kernel(p); + }); + TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func"); + + // Launch CUDA kernel. + p.loopX = 4; + int blockSize = 4 * 32; + int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1; + void* args[] = {&p}; + AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); + return y; +} + +//------------------------------------------------------------------------ + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("bias_act", &bias_act); +} + +//------------------------------------------------------------------------ diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.cu b/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.cu new file mode 100644 index 0000000..71ca390 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.cu @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include "bias_act.h" + +//------------------------------------------------------------------------ +// Helpers. + +template struct InternalType; +template <> struct InternalType { typedef double scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; + +//------------------------------------------------------------------------ +// CUDA kernel. + +template +__global__ void bias_act_kernel(bias_act_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + int G = p.grad; + scalar_t alpha = (scalar_t)p.alpha; + scalar_t gain = (scalar_t)p.gain; + scalar_t clamp = (scalar_t)p.clamp; + scalar_t one = (scalar_t)1; + scalar_t two = (scalar_t)2; + scalar_t expRange = (scalar_t)80; + scalar_t halfExpRange = (scalar_t)40; + scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946; + scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717; + + // Loop over elements. + int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x; + for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x) + { + // Load. + scalar_t x = (scalar_t)((const T*)p.x)[xi]; + scalar_t b = (p.b) ? (scalar_t)((const T*)p.b)[(xi / p.stepB) % p.sizeB] : 0; + scalar_t xref = (p.xref) ? (scalar_t)((const T*)p.xref)[xi] : 0; + scalar_t yref = (p.yref) ? (scalar_t)((const T*)p.yref)[xi] : 0; + scalar_t dy = (p.dy) ? (scalar_t)((const T*)p.dy)[xi] : one; + scalar_t yy = (gain != 0) ? yref / gain : 0; + scalar_t y = 0; + + // Apply bias. + ((G == 0) ? x : xref) += b; + + // linear + if (A == 1) + { + if (G == 0) y = x; + if (G == 1) y = x; + } + + // relu + if (A == 2) + { + if (G == 0) y = (x > 0) ? x : 0; + if (G == 1) y = (yy > 0) ? x : 0; + } + + // lrelu + if (A == 3) + { + if (G == 0) y = (x > 0) ? x : x * alpha; + if (G == 1) y = (yy > 0) ? x : x * alpha; + } + + // tanh + if (A == 4) + { + if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); } + if (G == 1) y = x * (one - yy * yy); + if (G == 2) y = x * (one - yy * yy) * (-two * yy); + } + + // sigmoid + if (A == 5) + { + if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one); + if (G == 1) y = x * yy * (one - yy); + if (G == 2) y = x * yy * (one - yy) * (one - two * yy); + } + + // elu + if (A == 6) + { + if (G == 0) y = (x >= 0) ? x : exp(x) - one; + if (G == 1) y = (yy >= 0) ? x : x * (yy + one); + if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one); + } + + // selu + if (A == 7) + { + if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one); + if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha); + if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha); + } + + // softplus + if (A == 8) + { + if (G == 0) y = (x > expRange) ? x : log(exp(x) + one); + if (G == 1) y = x * (one - exp(-yy)); + if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); } + } + + // swish + if (A == 9) + { + if (G == 0) + y = (x < -expRange) ? 0 : x / (exp(-x) + one); + else + { + scalar_t c = exp(xref); + scalar_t d = c + one; + if (G == 1) + y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d); + else + y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d); + yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain; + } + } + + // Apply gain. + y *= gain * dy; + + // Clamp. + if (clamp >= 0) + { + if (G == 0) + y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp; + else + y = (yref > -clamp & yref < clamp) ? y : 0; + } + + // Store. + ((T*)p.y)[xi] = (T)y; + } +} + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template void* choose_bias_act_kernel(const bias_act_kernel_params& p) +{ + if (p.act == 1) return (void*)bias_act_kernel; + if (p.act == 2) return (void*)bias_act_kernel; + if (p.act == 3) return (void*)bias_act_kernel; + if (p.act == 4) return (void*)bias_act_kernel; + if (p.act == 5) return (void*)bias_act_kernel; + if (p.act == 6) return (void*)bias_act_kernel; + if (p.act == 7) return (void*)bias_act_kernel; + if (p.act == 8) return (void*)bias_act_kernel; + if (p.act == 9) return (void*)bias_act_kernel; + return NULL; +} + +//------------------------------------------------------------------------ +// Template specializations. + +template void* choose_bias_act_kernel (const bias_act_kernel_params& p); +template void* choose_bias_act_kernel (const bias_act_kernel_params& p); +template void* choose_bias_act_kernel (const bias_act_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.h b/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.h new file mode 100644 index 0000000..8994bfb --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +//------------------------------------------------------------------------ +// CUDA kernel parameters. + +struct bias_act_kernel_params +{ + const void* x; // [sizeX] + const void* b; // [sizeB] or NULL + const void* xref; // [sizeX] or NULL + const void* yref; // [sizeX] or NULL + const void* dy; // [sizeX] or NULL + void* y; // [sizeX] + + int grad; + int act; + float alpha; + float gain; + float clamp; + + int sizeX; + int sizeB; + int stepB; + int loopX; +}; + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template void* choose_bias_act_kernel(const bias_act_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.py b/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.py new file mode 100644 index 0000000..b1f4d39 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/bias_act.py @@ -0,0 +1,211 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Custom PyTorch ops for efficient bias and activation.""" + +import os +import numpy as np +import torch +import dnnlib + +from .. import custom_ops +from .. import misc + +#---------------------------------------------------------------------------- + +activation_funcs = { + 'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False), + 'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False), + 'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False), + 'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True), + 'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True), + 'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True), + 'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True), + 'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True), + 'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True), +} + +#---------------------------------------------------------------------------- + +_plugin = None +_null_tensor = torch.empty([0]) + +def _init(): + global _plugin + if _plugin is None: + _plugin = custom_ops.get_plugin( + module_name='bias_act_plugin', + sources=['bias_act.cpp', 'bias_act.cu'], + headers=['bias_act.h'], + source_dir=os.path.dirname(__file__), + extra_cuda_cflags=['--use_fast_math'], + ) + return True + +#---------------------------------------------------------------------------- + +def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'): + r"""Fused bias and activation function. + + Adds bias `b` to activation tensor `x`, evaluates activation function `act`, + and scales the result by `gain`. Each of the steps is optional. In most cases, + the fused op is considerably more efficient than performing the same calculation + using standard PyTorch ops. It supports first and second order gradients, + but not third order gradients. + + Args: + x: Input activation tensor. Can be of any shape. + b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type + as `x`. The shape must be known, and it must match the dimension of `x` + corresponding to `dim`. + dim: The dimension in `x` corresponding to the elements of `b`. + The value of `dim` is ignored if `b` is not specified. + act: Name of the activation function to evaluate, or `"linear"` to disable. + Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc. + See `activation_funcs` for a full list. `None` is not allowed. + alpha: Shape parameter for the activation function, or `None` to use the default. + gain: Scaling factor for the output tensor, or `None` to use default. + See `activation_funcs` for the default scaling of each activation function. + If unsure, consider specifying 1. + clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable + the clamping (default). + impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). + + Returns: + Tensor of the same shape and datatype as `x`. + """ + assert isinstance(x, torch.Tensor) + assert impl in ['ref', 'cuda'] + if impl == 'cuda' and x.device.type == 'cuda' and _init(): + return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b) + return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None): + """Slow reference implementation of `bias_act()` using standard TensorFlow ops. + """ + assert isinstance(x, torch.Tensor) + assert clamp is None or clamp >= 0 + spec = activation_funcs[act] + alpha = float(alpha if alpha is not None else spec.def_alpha) + gain = float(gain if gain is not None else spec.def_gain) + clamp = float(clamp if clamp is not None else -1) + + # Add bias. + if b is not None: + assert isinstance(b, torch.Tensor) and b.ndim == 1 + assert 0 <= dim < x.ndim + assert b.shape[0] == x.shape[dim] + x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)]) + + # Evaluate activation function. + alpha = float(alpha) + x = spec.func(x, alpha=alpha) + + # Scale by gain. + gain = float(gain) + if gain != 1: + x = x * gain + + # Clamp. + if clamp >= 0: + x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type + return x + +#---------------------------------------------------------------------------- + +_bias_act_cuda_cache = dict() + +def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None): + """Fast CUDA implementation of `bias_act()` using custom ops. + """ + # Parse arguments. + assert clamp is None or clamp >= 0 + spec = activation_funcs[act] + alpha = float(alpha if alpha is not None else spec.def_alpha) + gain = float(gain if gain is not None else spec.def_gain) + clamp = float(clamp if clamp is not None else -1) + + # Lookup from cache. + key = (dim, act, alpha, gain, clamp) + if key in _bias_act_cuda_cache: + return _bias_act_cuda_cache[key] + + # Forward op. + class BiasActCuda(torch.autograd.Function): + @staticmethod + def forward(ctx, x, b): # pylint: disable=arguments-differ + ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride(1) == 1 else torch.contiguous_format + x = x.contiguous(memory_format=ctx.memory_format) + b = b.contiguous() if b is not None else _null_tensor + y = x + if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor: + y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp) + ctx.save_for_backward( + x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, + b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, + y if 'y' in spec.ref else _null_tensor) + return y + + @staticmethod + def backward(ctx, dy): # pylint: disable=arguments-differ + dy = dy.contiguous(memory_format=ctx.memory_format) + x, b, y = ctx.saved_tensors + dx = None + db = None + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + dx = dy + if act != 'linear' or gain != 1 or clamp >= 0: + dx = BiasActCudaGrad.apply(dy, x, b, y) + + if ctx.needs_input_grad[1]: + db = dx.sum([i for i in range(dx.ndim) if i != dim]) + + return dx, db + + # Backward op. + class BiasActCudaGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ + ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride(1) == 1 else torch.contiguous_format + dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp) + ctx.save_for_backward( + dy if spec.has_2nd_grad else _null_tensor, + x, b, y) + return dx + + @staticmethod + def backward(ctx, d_dx): # pylint: disable=arguments-differ + d_dx = d_dx.contiguous(memory_format=ctx.memory_format) + dy, x, b, y = ctx.saved_tensors + d_dy = None + d_x = None + d_b = None + d_y = None + + if ctx.needs_input_grad[0]: + d_dy = BiasActCudaGrad.apply(d_dx, x, b, y) + + if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]): + d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp) + + if spec.has_2nd_grad and ctx.needs_input_grad[2]: + d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim]) + + return d_dy, d_x, d_b, d_y + + # Add to cache. + _bias_act_cuda_cache[key] = BiasActCuda + return BiasActCuda + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/conv2d_gradfix.py b/3DPortraitGAN_pyramid/torch_utils/ops/conv2d_gradfix.py new file mode 100644 index 0000000..9a177cc --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/conv2d_gradfix.py @@ -0,0 +1,199 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Custom replacement for `torch.nn.functional.conv2d` that supports +arbitrarily high order gradients with zero performance penalty.""" + +import contextlib +import torch + +# pylint: disable=redefined-builtin +# pylint: disable=arguments-differ +# pylint: disable=protected-access + +#---------------------------------------------------------------------------- + +enabled = False # Enable the custom op by setting this to true. +weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights. + +@contextlib.contextmanager +def no_weight_gradients(disable=True): + global weight_gradients_disabled + old = weight_gradients_disabled + if disable: + weight_gradients_disabled = True + yield + weight_gradients_disabled = old + +#---------------------------------------------------------------------------- + +def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): + if _should_use_custom_op(input): + return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias) + return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) + +def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1): + if _should_use_custom_op(input): + return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias) + return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation) + +#---------------------------------------------------------------------------- + +def _should_use_custom_op(input): + assert isinstance(input, torch.Tensor) + if (not enabled) or (not torch.backends.cudnn.enabled): + return False + if input.device.type != 'cuda': + return False + return True + +def _tuple_of_ints(xs, ndim): + xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim + assert len(xs) == ndim + assert all(isinstance(x, int) for x in xs) + return xs + +#---------------------------------------------------------------------------- + +_conv2d_gradfix_cache = dict() +_null_tensor = torch.empty([0]) + +def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups): + # Parse arguments. + ndim = 2 + weight_shape = tuple(weight_shape) + stride = _tuple_of_ints(stride, ndim) + padding = _tuple_of_ints(padding, ndim) + output_padding = _tuple_of_ints(output_padding, ndim) + dilation = _tuple_of_ints(dilation, ndim) + + # Lookup from cache. + key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) + if key in _conv2d_gradfix_cache: + return _conv2d_gradfix_cache[key] + + # Validate arguments. + assert groups >= 1 + assert len(weight_shape) == ndim + 2 + assert all(stride[i] >= 1 for i in range(ndim)) + assert all(padding[i] >= 0 for i in range(ndim)) + assert all(dilation[i] >= 0 for i in range(ndim)) + if not transpose: + assert all(output_padding[i] == 0 for i in range(ndim)) + else: # transpose + assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim)) + + # Helpers. + common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups) + def calc_output_padding(input_shape, output_shape): + if transpose: + return [0, 0] + return [ + input_shape[i + 2] + - (output_shape[i + 2] - 1) * stride[i] + - (1 - 2 * padding[i]) + - dilation[i] * (weight_shape[i + 2] - 1) + for i in range(ndim) + ] + + # Forward & backward. + class Conv2d(torch.autograd.Function): + @staticmethod + def forward(ctx, input, weight, bias): + assert weight.shape == weight_shape + ctx.save_for_backward( + input if weight.requires_grad else _null_tensor, + weight if input.requires_grad else _null_tensor, + ) + ctx.input_shape = input.shape + + # Simple 1x1 convolution => cuBLAS (only on Volta, not on Ampere). + if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0) and torch.cuda.get_device_capability(input.device) < (8, 0): + a = weight.reshape(groups, weight_shape[0] // groups, weight_shape[1]) + b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1) + c = (a.transpose(1, 2) if transpose else a) @ b.permute(1, 2, 0, 3).flatten(2) + c = c.reshape(-1, input.shape[0], *input.shape[2:]).transpose(0, 1) + c = c if bias is None else c + bias.unsqueeze(0).unsqueeze(2).unsqueeze(3) + return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format)) + + # General case => cuDNN. + if transpose: + return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs) + return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) + + @staticmethod + def backward(ctx, grad_output): + input, weight = ctx.saved_tensors + input_shape = ctx.input_shape + grad_input = None + grad_weight = None + grad_bias = None + + if ctx.needs_input_grad[0]: + p = calc_output_padding(input_shape=input_shape, output_shape=grad_output.shape) + op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) + grad_input = op.apply(grad_output, weight, None) + assert grad_input.shape == input_shape + + if ctx.needs_input_grad[1] and not weight_gradients_disabled: + grad_weight = Conv2dGradWeight.apply(grad_output, input, weight) + assert grad_weight.shape == weight_shape + + if ctx.needs_input_grad[2]: + grad_bias = grad_output.sum([0, 2, 3]) + + return grad_input, grad_weight, grad_bias + + # Gradient with respect to the weights. + class Conv2dGradWeight(torch.autograd.Function): + @staticmethod + def forward(ctx, grad_output, input, weight): + ctx.save_for_backward( + grad_output if input.requires_grad else _null_tensor, + input if grad_output.requires_grad else _null_tensor, + ) + ctx.grad_output_shape = grad_output.shape + ctx.input_shape = input.shape + + # Simple 1x1 convolution => cuBLAS (on both Volta and Ampere). + if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0): + a = grad_output.reshape(grad_output.shape[0], groups, grad_output.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2) + b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2) + c = (b @ a.transpose(1, 2) if transpose else a @ b.transpose(1, 2)).reshape(weight_shape) + return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format)) + + # General case => cuDNN. + return torch.ops.aten.convolution_backward(grad_output=grad_output, input=input, weight=weight, bias_sizes=None, stride=stride, padding=padding, dilation=dilation, transposed=transpose, output_padding=output_padding, groups=groups, output_mask=[False, True, False])[1] + + + @staticmethod + def backward(ctx, grad2_grad_weight): + grad_output, input = ctx.saved_tensors + grad_output_shape = ctx.grad_output_shape + input_shape = ctx.input_shape + grad2_grad_output = None + grad2_input = None + + if ctx.needs_input_grad[0]: + grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None) + assert grad2_grad_output.shape == grad_output_shape + + if ctx.needs_input_grad[1]: + p = calc_output_padding(input_shape=input_shape, output_shape=grad_output_shape) + op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) + grad2_input = op.apply(grad_output, grad2_grad_weight, None) + assert grad2_input.shape == input_shape + + return grad2_grad_output, grad2_input + + _conv2d_gradfix_cache[key] = Conv2d + return Conv2d + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/conv2d_resample.py b/3DPortraitGAN_pyramid/torch_utils/ops/conv2d_resample.py new file mode 100644 index 0000000..d46f4dd --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/conv2d_resample.py @@ -0,0 +1,145 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""2D convolution with optional up/downsampling.""" + +import torch + +from .. import misc +from . import conv2d_gradfix +from . import upfirdn2d +from .upfirdn2d import _parse_padding +from .upfirdn2d import _get_filter_size + +#---------------------------------------------------------------------------- + +def _get_weight_shape(w): + with misc.suppress_tracer_warnings(): # this value will be treated as a constant + shape = [int(sz) for sz in w.shape] + misc.assert_shape(w, shape) + return shape + +#---------------------------------------------------------------------------- + +def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True): + """Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations. + """ + _out_channels, _in_channels_per_group, kh, kw = _get_weight_shape(w) + + # Flip weight if requested. + # Note: conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False). + if not flip_weight and (kw > 1 or kh > 1): + w = w.flip([2, 3]) + + # Execute using conv2d_gradfix. + op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d + return op(x, w, stride=stride, padding=padding, groups=groups) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False): + r"""2D convolution with optional up/downsampling. + + Padding is performed only once at the beginning, not between the operations. + + Args: + x: Input tensor of shape + `[batch_size, in_channels, in_height, in_width]`. + w: Weight tensor of shape + `[out_channels, in_channels//groups, kernel_height, kernel_width]`. + f: Low-pass filter for up/downsampling. Must be prepared beforehand by + calling upfirdn2d.setup_filter(). None = identity (default). + up: Integer upsampling factor (default: 1). + down: Integer downsampling factor (default: 1). + padding: Padding with respect to the upsampled image. Can be a single number + or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + groups: Split input channels into N groups (default: 1). + flip_weight: False = convolution, True = correlation (default: True). + flip_filter: False = convolution, True = correlation (default: False). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + # Validate arguments. + assert isinstance(x, torch.Tensor) and (x.ndim == 4) + assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype) + assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32) + assert isinstance(up, int) and (up >= 1) + assert isinstance(down, int) and (down >= 1) + assert isinstance(groups, int) and (groups >= 1) + out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w) + fw, fh = _get_filter_size(f) + px0, px1, py0, py1 = _parse_padding(padding) + + # Adjust padding to account for up/downsampling. + if up > 1: + px0 += (fw + up - 1) // 2 + px1 += (fw - up) // 2 + py0 += (fh + up - 1) // 2 + py1 += (fh - up) // 2 + if down > 1: + px0 += (fw - down + 1) // 2 + px1 += (fw - down) // 2 + py0 += (fh - down + 1) // 2 + py1 += (fh - down) // 2 + + # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve. + if kw == 1 and kh == 1 and (down > 1 and up == 1): + x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter) + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + return x + + # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample. + if kw == 1 and kh == 1 and (up > 1 and down == 1): + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter) + return x + + # Fast path: downsampling only => use strided convolution. + if down > 1 and up == 1: + x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter) + x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight) + return x + + # Fast path: upsampling with optional downsampling => use transpose strided convolution. + if up > 1: + if groups == 1: + w = w.transpose(0, 1) + else: + w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw) + w = w.transpose(1, 2) + w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw) + px0 -= kw - 1 + px1 -= kw - up + py0 -= kh - 1 + py1 -= kh - up + pxt = max(min(-px0, -px1), 0) + pyt = max(min(-py0, -py1), 0) + x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight)) + x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter) + if down > 1: + x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) + return x + + # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d. + if up == 1 and down == 1: + if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0: + return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight) + + # Fallback: Generic reference implementation. + x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter) + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + if down > 1: + x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) + return x + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.cpp b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.cpp new file mode 100644 index 0000000..4f55466 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.cpp @@ -0,0 +1,304 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include +#include +#include "filtered_lrelu.h" + +//------------------------------------------------------------------------ + +static std::tuple filtered_lrelu( + torch::Tensor x, torch::Tensor fu, torch::Tensor fd, torch::Tensor b, torch::Tensor si, + int up, int down, int px0, int px1, int py0, int py1, int sx, int sy, float gain, float slope, float clamp, bool flip_filters, bool writeSigns) +{ + // Set CUDA device. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + + // Validate arguments. + TORCH_CHECK(fu.device() == x.device() && fd.device() == x.device() && b.device() == x.device(), "all input tensors must reside on the same device"); + TORCH_CHECK(fu.dtype() == torch::kFloat && fd.dtype() == torch::kFloat, "fu and fd must be float32"); + TORCH_CHECK(b.dtype() == x.dtype(), "x and b must have the same dtype"); + TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat, "x and b must be float16 or float32"); + TORCH_CHECK(x.dim() == 4, "x must be rank 4"); + TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); + TORCH_CHECK(x.numel() > 0, "x is empty"); + TORCH_CHECK((fu.dim() == 1 || fu.dim() == 2) && (fd.dim() == 1 || fd.dim() == 2), "fu and fd must be rank 1 or 2"); + TORCH_CHECK(fu.size(0) <= INT_MAX && fu.size(-1) <= INT_MAX, "fu is too large"); + TORCH_CHECK(fd.size(0) <= INT_MAX && fd.size(-1) <= INT_MAX, "fd is too large"); + TORCH_CHECK(fu.numel() > 0, "fu is empty"); + TORCH_CHECK(fd.numel() > 0, "fd is empty"); + TORCH_CHECK(b.dim() == 1 && b.size(0) == x.size(1), "b must be a vector with the same number of channels as x"); + TORCH_CHECK(up >= 1 && down >= 1, "up and down must be at least 1"); + + // Figure out how much shared memory is available on the device. + int maxSharedBytes = 0; + AT_CUDA_CHECK(cudaDeviceGetAttribute(&maxSharedBytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, x.device().index())); + int sharedKB = maxSharedBytes >> 10; + + // Populate enough launch parameters to check if a CUDA kernel exists. + filtered_lrelu_kernel_params p; + p.up = up; + p.down = down; + p.fuShape = make_int2((int)fu.size(-1), fu.dim() == 2 ? (int)fu.size(0) : 0); // shape [n, 0] indicates separable filter. + p.fdShape = make_int2((int)fd.size(-1), fd.dim() == 2 ? (int)fd.size(0) : 0); + filtered_lrelu_kernel_spec test_spec = choose_filtered_lrelu_kernel(p, sharedKB); + if (!test_spec.exec) + { + // No kernel found - return empty tensors and indicate missing kernel with return code of -1. + return std::make_tuple(torch::Tensor(), torch::Tensor(), -1); + } + + // Input/output element size. + int64_t sz = (x.dtype() == torch::kHalf) ? 2 : 4; + + // Input sizes. + int64_t xw = (int)x.size(3); + int64_t xh = (int)x.size(2); + int64_t fut_w = (int)fu.size(-1) - 1; + int64_t fut_h = (int)fu.size(0) - 1; + int64_t fdt_w = (int)fd.size(-1) - 1; + int64_t fdt_h = (int)fd.size(0) - 1; + + // Logical size of upsampled buffer. + int64_t cw = xw * up + (px0 + px1) - fut_w; + int64_t ch = xh * up + (py0 + py1) - fut_h; + TORCH_CHECK(cw > fdt_w && ch > fdt_h, "upsampled buffer must be at least the size of downsampling filter"); + TORCH_CHECK(cw <= INT_MAX && ch <= INT_MAX, "upsampled buffer is too large"); + + // Compute output size and allocate. + int64_t yw = (cw - fdt_w + (down - 1)) / down; + int64_t yh = (ch - fdt_h + (down - 1)) / down; + TORCH_CHECK(yw > 0 && yh > 0, "output must be at least 1x1"); + TORCH_CHECK(yw <= INT_MAX && yh <= INT_MAX, "output is too large"); + torch::Tensor y = torch::empty({x.size(0), x.size(1), yh, yw}, x.options(), x.suggest_memory_format()); + + // Allocate sign tensor. + torch::Tensor so; + torch::Tensor s = si; + bool readSigns = !!s.numel(); + int64_t sw_active = 0; // Active width of sign tensor. + if (writeSigns) + { + sw_active = yw * down - (down - 1) + fdt_w; // Active width in elements. + int64_t sh = yh * down - (down - 1) + fdt_h; // Height = active height. + int64_t sw = (sw_active + 15) & ~15; // Width = active width in elements, rounded up to multiple of 16. + TORCH_CHECK(sh <= INT_MAX && (sw >> 2) <= INT_MAX, "signs is too large"); + s = so = torch::empty({x.size(0), x.size(1), sh, sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); + } + else if (readSigns) + sw_active = s.size(3) << 2; + + // Validate sign tensor if in use. + if (readSigns || writeSigns) + { + TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); + TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); + TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); + TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); + TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); + TORCH_CHECK(s.size(2) <= INT_MAX && s.size(3) <= INT_MAX, "signs is too large"); + } + + // Populate rest of CUDA kernel parameters. + p.x = x.data_ptr(); + p.y = y.data_ptr(); + p.b = b.data_ptr(); + p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; + p.fu = fu.data_ptr(); + p.fd = fd.data_ptr(); + p.pad0 = make_int2(px0, py0); + p.gain = gain; + p.slope = slope; + p.clamp = clamp; + p.flip = (flip_filters) ? 1 : 0; + p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); + p.yShape = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); + p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3), (int)s.size(2)) : make_int2(0, 0); // Width is in bytes. Contiguous. + p.sOfs = make_int2(sx, sy); + p.swLimit = (sw_active + 3) >> 2; // Rounded up to bytes. + + // x, y, b strides are in bytes. + p.xStride = make_longlong4(sz * x.stride(3), sz * x.stride(2), sz * x.stride(1), sz * x.stride(0)); + p.yStride = make_longlong4(sz * y.stride(3), sz * y.stride(2), sz * y.stride(1), sz * y.stride(0)); + p.bStride = sz * b.stride(0); + + // fu, fd strides are in elements. + p.fuStride = make_longlong3(fu.stride(-1), fu.dim() == 2 ? fu.stride(0) : 0, 0); + p.fdStride = make_longlong3(fd.stride(-1), fd.dim() == 2 ? fd.stride(0) : 0, 0); + + // Determine if indices don't fit in int32. Support negative strides although Torch currently never produces those. + bool index64b = false; + if (std::abs(p.bStride * x.size(1)) > INT_MAX) index64b = true; + if (std::min(x.size(0) * p.xStride.w, 0ll) + std::min(x.size(1) * p.xStride.z, 0ll) + std::min(x.size(2) * p.xStride.y, 0ll) + std::min(x.size(3) * p.xStride.x, 0ll) < -INT_MAX) index64b = true; + if (std::max(x.size(0) * p.xStride.w, 0ll) + std::max(x.size(1) * p.xStride.z, 0ll) + std::max(x.size(2) * p.xStride.y, 0ll) + std::max(x.size(3) * p.xStride.x, 0ll) > INT_MAX) index64b = true; + if (std::min(y.size(0) * p.yStride.w, 0ll) + std::min(y.size(1) * p.yStride.z, 0ll) + std::min(y.size(2) * p.yStride.y, 0ll) + std::min(y.size(3) * p.yStride.x, 0ll) < -INT_MAX) index64b = true; + if (std::max(y.size(0) * p.yStride.w, 0ll) + std::max(y.size(1) * p.yStride.z, 0ll) + std::max(y.size(2) * p.yStride.y, 0ll) + std::max(y.size(3) * p.yStride.x, 0ll) > INT_MAX) index64b = true; + if (s.numel() > INT_MAX) index64b = true; + + // Choose CUDA kernel. + filtered_lrelu_kernel_spec spec = { 0 }; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_cuda", [&] + { + if constexpr (sizeof(scalar_t) <= 4) // Exclude doubles. constexpr prevents template instantiation. + { + // Choose kernel based on index type, datatype and sign read/write modes. + if (!index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if (!index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if (!index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if ( index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if ( index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if ( index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + } + }); + TORCH_CHECK(spec.exec, "internal error - CUDA kernel not found") // This should not happen because we tested earlier that kernel exists. + + // Launch CUDA kernel. + void* args[] = {&p}; + int bx = spec.numWarps * 32; + int gx = (p.yShape.x - 1) / spec.tileOut.x + 1; + int gy = (p.yShape.y - 1) / spec.tileOut.y + 1; + int gz = p.yShape.z * p.yShape.w; + + // Repeat multiple horizontal tiles in a CTA? + if (spec.xrep) + { + p.tilesXrep = spec.xrep; + p.tilesXdim = gx; + + gx = (gx + p.tilesXrep - 1) / p.tilesXrep; + std::swap(gx, gy); + } + else + { + p.tilesXrep = 0; + p.tilesXdim = 0; + } + + // Launch filter setup kernel. + AT_CUDA_CHECK(cudaLaunchKernel(spec.setup, 1, 1024, args, 0, at::cuda::getCurrentCUDAStream())); + + // Copy kernels to constant memory. + if ( writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); + else if (!writeSigns && readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); + else if (!writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); + + // Set cache and shared memory configurations for main kernel. + AT_CUDA_CHECK(cudaFuncSetCacheConfig(spec.exec, cudaFuncCachePreferShared)); + if (spec.dynamicSharedKB) // Need dynamically allocated shared memory? + AT_CUDA_CHECK(cudaFuncSetAttribute(spec.exec, cudaFuncAttributeMaxDynamicSharedMemorySize, spec.dynamicSharedKB << 10)); + AT_CUDA_CHECK(cudaFuncSetSharedMemConfig(spec.exec, cudaSharedMemBankSizeFourByte)); + + // Launch main kernel. + const int maxSubGz = 65535; // CUDA maximum for block z dimension. + for (int zofs=0; zofs < gz; zofs += maxSubGz) // Do multiple launches if gz is too big. + { + p.blockZofs = zofs; + int subGz = std::min(maxSubGz, gz - zofs); + AT_CUDA_CHECK(cudaLaunchKernel(spec.exec, dim3(gx, gy, subGz), bx, args, spec.dynamicSharedKB << 10, at::cuda::getCurrentCUDAStream())); + } + + // Done. + return std::make_tuple(y, so, 0); +} + +//------------------------------------------------------------------------ + +static torch::Tensor filtered_lrelu_act(torch::Tensor x, torch::Tensor si, int sx, int sy, float gain, float slope, float clamp, bool writeSigns) +{ + // Set CUDA device. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + + // Validate arguments. + TORCH_CHECK(x.dim() == 4, "x must be rank 4"); + TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); + TORCH_CHECK(x.numel() > 0, "x is empty"); + TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat || x.dtype() == torch::kDouble, "x must be float16, float32 or float64"); + + // Output signs if we don't have sign input. + torch::Tensor so; + torch::Tensor s = si; + bool readSigns = !!s.numel(); + if (writeSigns) + { + int64_t sw = x.size(3); + sw = (sw + 15) & ~15; // Round to a multiple of 16 for coalescing. + s = so = torch::empty({x.size(0), x.size(1), x.size(2), sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); + } + + // Validate sign tensor if in use. + if (readSigns || writeSigns) + { + TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); + TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); + TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); + TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); + TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); + TORCH_CHECK(s.size(2) <= INT_MAX && (s.size(3) << 2) <= INT_MAX, "signs tensor is too large"); + } + + // Initialize CUDA kernel parameters. + filtered_lrelu_act_kernel_params p; + p.x = x.data_ptr(); + p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; + p.gain = gain; + p.slope = slope; + p.clamp = clamp; + p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); + p.xStride = make_longlong4(x.stride(3), x.stride(2), x.stride(1), x.stride(0)); + p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3) << 2, (int)s.size(2)) : make_int2(0, 0); // Width is in elements. Contiguous. + p.sOfs = make_int2(sx, sy); + + // Choose CUDA kernel. + void* func = 0; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_act_cuda", [&] + { + if (writeSigns) + func = choose_filtered_lrelu_act_kernel(); + else if (readSigns) + func = choose_filtered_lrelu_act_kernel(); + else + func = choose_filtered_lrelu_act_kernel(); + }); + TORCH_CHECK(func, "internal error - CUDA kernel not found"); + + // Launch CUDA kernel. + void* args[] = {&p}; + int bx = 128; // 4 warps per block. + + // Logical size of launch = writeSigns ? p.s : p.x + uint32_t gx = writeSigns ? p.sShape.x : p.xShape.x; + uint32_t gy = writeSigns ? p.sShape.y : p.xShape.y; + uint32_t gz = p.xShape.z * p.xShape.w; // Same as in p.sShape if signs are in use. + gx = (gx - 1) / bx + 1; + + // Make sure grid y and z dimensions are within CUDA launch limits. Kernel loops internally to do the rest. + const uint32_t gmax = 65535; + gy = std::min(gy, gmax); + gz = std::min(gz, gmax); + + // Launch. + AT_CUDA_CHECK(cudaLaunchKernel(func, dim3(gx, gy, gz), bx, args, 0, at::cuda::getCurrentCUDAStream())); + return so; +} + +//------------------------------------------------------------------------ + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("filtered_lrelu", &filtered_lrelu); // The whole thing. + m.def("filtered_lrelu_act_", &filtered_lrelu_act); // Activation and sign tensor handling only. Modifies data tensor in-place. +} + +//------------------------------------------------------------------------ diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.cu b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.cu new file mode 100644 index 0000000..aaac954 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.cu @@ -0,0 +1,1288 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include "filtered_lrelu.h" +#include + +//------------------------------------------------------------------------ +// Helpers. + +enum // Filter modes. +{ + MODE_SUSD = 0, // Separable upsampling, separable downsampling. + MODE_FUSD = 1, // Full upsampling, separable downsampling. + MODE_SUFD = 2, // Separable upsampling, full downsampling. + MODE_FUFD = 3, // Full upsampling, full downsampling. +}; + +template struct InternalType; +template <> struct InternalType +{ + typedef double scalar_t; typedef double2 vec2_t; typedef double4 vec4_t; + __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_double2(0, 0); } + __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_double4(0, 0, 0, 0); } + __device__ __forceinline__ static double clamp(double x, double c) { return fmin(fmax(x, -c), c); } +}; +template <> struct InternalType +{ + typedef float scalar_t; typedef float2 vec2_t; typedef float4 vec4_t; + __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_float2(0, 0); } + __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_float4(0, 0, 0, 0); } + __device__ __forceinline__ static float clamp(float x, float c) { return fminf(fmaxf(x, -c), c); } +}; +template <> struct InternalType +{ + typedef float scalar_t; typedef float2 vec2_t; typedef float4 vec4_t; + __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_float2(0, 0); } + __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_float4(0, 0, 0, 0); } + __device__ __forceinline__ static float clamp(float x, float c) { return fminf(fmaxf(x, -c), c); } +}; + +#define MIN(A, B) ((A) < (B) ? (A) : (B)) +#define MAX(A, B) ((A) > (B) ? (A) : (B)) +#define CEIL_DIV(A, B) (((B)==1) ? (A) : \ + ((B)==2) ? ((int)((A)+1) >> 1) : \ + ((B)==4) ? ((int)((A)+3) >> 2) : \ + (((A) + ((A) > 0 ? (B) - 1 : 0)) / (B))) + +// This works only up to blocks of size 256 x 256 and for all N that are powers of two. +template __device__ __forceinline__ void fast_div_mod(int& x, int& y, unsigned int i) +{ + if ((N & (N-1)) && N <= 256) + y = (i * ((1<<24)/N + 1)) >> 24; // Assumes N <= 256, i < N*256. + else + y = i/N; + + x = i - y*N; +} + +// Type cast stride before reading it. +template __device__ __forceinline__ T get_stride(const int64_t& x) +{ + return *reinterpret_cast(&x); +} + +//------------------------------------------------------------------------ +// Filters, setup kernel, copying function. + +#define MAX_FILTER_SIZE 32 + +// Combined up/down filter buffers so that transfer can be done with one copy. +__device__ float g_fbuf[2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE]; // Filters in global memory, written by setup kernel. +__device__ __constant__ float c_fbuf[2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE]; // Filters in constant memory, read by main kernel. + +// Accessors to combined buffers to index up/down filters individually. +#define c_fu (c_fbuf) +#define c_fd (c_fbuf + MAX_FILTER_SIZE * MAX_FILTER_SIZE) +#define g_fu (g_fbuf) +#define g_fd (g_fbuf + MAX_FILTER_SIZE * MAX_FILTER_SIZE) + +// Set up filters into global memory buffer. +static __global__ void setup_filters_kernel(filtered_lrelu_kernel_params p) +{ + for (int idx = threadIdx.x; idx < MAX_FILTER_SIZE * MAX_FILTER_SIZE; idx += blockDim.x) + { + int x, y; + fast_div_mod(x, y, idx); + + int fu_x = p.flip ? x : (p.fuShape.x - 1 - x); + int fu_y = p.flip ? y : (p.fuShape.y - 1 - y); + if (p.fuShape.y > 0) + g_fu[idx] = (x >= p.fuShape.x || y >= p.fuShape.y) ? 0.0f : p.fu[fu_x * p.fuStride.x + fu_y * p.fuStride.y]; + else + g_fu[idx] = (x >= p.fuShape.x || y > 0) ? 0.0f : p.fu[fu_x * p.fuStride.x]; + + int fd_x = p.flip ? x : (p.fdShape.x - 1 - x); + int fd_y = p.flip ? y : (p.fdShape.y - 1 - y); + if (p.fdShape.y > 0) + g_fd[idx] = (x >= p.fdShape.x || y >= p.fdShape.y) ? 0.0f : p.fd[fd_x * p.fdStride.x + fd_y * p.fdStride.y]; + else + g_fd[idx] = (x >= p.fdShape.x || y > 0) ? 0.0f : p.fd[fd_x * p.fdStride.x]; + } +} + +// Host function to copy filters written by setup kernel into constant buffer for main kernel. +template static cudaError_t copy_filters(cudaStream_t stream) +{ + void* src = 0; + cudaError_t err = cudaGetSymbolAddress(&src, g_fbuf); + if (err) return err; + return cudaMemcpyToSymbolAsync(c_fbuf, src, 2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE * sizeof(float), 0, cudaMemcpyDeviceToDevice, stream); +} + +//------------------------------------------------------------------------ +// Coordinate spaces: +// - Relative to input tensor: inX, inY, tileInX, tileInY +// - Relative to input tile: relInX, relInY, tileInW, tileInH +// - Relative to upsampled tile: relUpX, relUpY, tileUpW, tileUpH +// - Relative to output tile: relOutX, relOutY, tileOutW, tileOutH +// - Relative to output tensor: outX, outY, tileOutX, tileOutY +// +// Relationships between coordinate spaces: +// - inX = tileInX + relInX +// - inY = tileInY + relInY +// - relUpX = relInX * up + phaseInX +// - relUpY = relInY * up + phaseInY +// - relUpX = relOutX * down +// - relUpY = relOutY * down +// - outX = tileOutX + relOutX +// - outY = tileOutY + relOutY + +extern __shared__ char s_buf_raw[]; // When sharedKB <= 48, allocate shared memory statically inside the kernel, otherwise use the externally allocated shared memory buffer. + +template +static __global__ void filtered_lrelu_kernel(filtered_lrelu_kernel_params p) +{ + // Check that we don't try to support non-existing filter modes. + static_assert(up == 1 || up == 2 || up == 4, "only up=1, up=2, up=4 scales supported"); + static_assert(down == 1 || down == 2 || down == 4, "only down=1, down=2, down=4 scales supported"); + static_assert(fuSize >= up, "upsampling filter size must be at least upsampling factor"); + static_assert(fdSize >= down, "downsampling filter size must be at least downsampling factor"); + static_assert(fuSize % up == 0, "upsampling filter size must be divisible with upsampling factor"); + static_assert(fdSize % down == 0, "downsampling filter size must be divisible with downsampling factor"); + static_assert(fuSize <= MAX_FILTER_SIZE && fdSize <= MAX_FILTER_SIZE, "filter size greater than MAX_FILTER_SIZE"); + static_assert(up != 1 || (fuSize == 1 && (filterMode == MODE_FUFD || filterMode == MODE_FUSD)), "up=1 supported only for 1x1 full filters"); + static_assert(down != 1 || (fdSize == 1 && (filterMode == MODE_FUFD || filterMode == MODE_SUFD)), "down=1 supported only for 1x1 full filters"); + static_assert(!(up == 4 && (filterMode == MODE_FUFD || filterMode == MODE_FUSD)), "full filters not supported for up=4"); + static_assert(!(down == 4 && (filterMode == MODE_FUFD || filterMode == MODE_SUFD)), "full filters not supported for down=4"); + + // Static definitions. + typedef typename InternalType::scalar_t scalar_t; + typedef typename InternalType::vec2_t vec2_t; + typedef typename InternalType::vec4_t vec4_t; + const int tileUpW = (tileOutW * down + (fdSize - 1) - (down - 1) + 3) & ~3; // Upsampled tile width, rounded up to multiple of 4. + const int tileUpH = tileOutH * down + (fdSize - 1) - (down - 1); // Upsampled tile height. + const int tileInW = CEIL_DIV(tileUpW + (fuSize - 1), up); // Input tile width. + const int tileInH = CEIL_DIV(tileUpH + (fuSize - 1), up); // Input tile height. + const int tileUpH_up = CEIL_DIV(tileUpH, up) * up; // Upsampled tile height rounded up to a multiple of up. + const int tileInH_up = CEIL_DIV(tileUpH_up + (fuSize - 1), up); // For allocations only, to avoid shared memory read overruns with up=2 and up=4. + + // Merge 1x1 downsampling into last upsampling step for upf1 and ups2. + const bool downInline = (down == 1) && ((up == 1 && filterMode == MODE_FUFD) || (up == 2 && filterMode == MODE_SUFD)); + + // Sizes of logical buffers. + const int szIn = tileInH_up * tileInW; + const int szUpX = tileInH_up * tileUpW; + const int szUpXY = downInline ? 0 : (tileUpH * tileUpW); + const int szDownX = tileUpH * tileOutW; + + // Sizes for shared memory arrays. + const int s_buf0_size_base = + (filterMode == MODE_SUSD) ? MAX(szIn, szUpXY) : + (filterMode == MODE_FUSD) ? MAX(szIn, szDownX) : + (filterMode == MODE_SUFD) ? MAX(szIn, szUpXY) : + (filterMode == MODE_FUFD) ? szIn : + -1; + const int s_buf1_size_base = + (filterMode == MODE_SUSD) ? MAX(szUpX, szDownX) : + (filterMode == MODE_FUSD) ? szUpXY : + (filterMode == MODE_SUFD) ? szUpX : + (filterMode == MODE_FUFD) ? szUpXY : + -1; + + // Ensure U128 alignment. + const int s_buf0_size = (s_buf0_size_base + 3) & ~3; + const int s_buf1_size = (s_buf1_size_base + 3) & ~3; + + // Check at compile time that we don't use too much shared memory. + static_assert((s_buf0_size + s_buf1_size) * sizeof(scalar_t) <= (sharedKB << 10), "shared memory overflow"); + + // Declare shared memory arrays. + scalar_t* s_buf0; + scalar_t* s_buf1; + if (sharedKB <= 48) + { + // Allocate shared memory arrays here. + __shared__ scalar_t s_buf0_st[(sharedKB > 48) ? (1<<24) : (s_buf0_size + s_buf1_size)]; // Prevent launching if this isn't optimized away when unused. + s_buf0 = s_buf0_st; + s_buf1 = s_buf0 + s_buf0_size; + } + else + { + // Use the dynamically allocated shared memory array. + s_buf0 = (scalar_t*)s_buf_raw; + s_buf1 = s_buf0 + s_buf0_size; + } + + // Pointers to the buffers. + scalar_t* s_tileIn; // Input tile: [relInX * tileInH + relInY] + scalar_t* s_tileUpX; // After horizontal upsampling: [relInY * tileUpW + relUpX] + scalar_t* s_tileUpXY; // After upsampling: [relUpY * tileUpW + relUpX] + scalar_t* s_tileDownX; // After horizontal downsampling: [relUpY * tileOutW + relOutX] + if (filterMode == MODE_SUSD) + { + s_tileIn = s_buf0; + s_tileUpX = s_buf1; + s_tileUpXY = s_buf0; + s_tileDownX = s_buf1; + } + else if (filterMode == MODE_FUSD) + { + s_tileIn = s_buf0; + s_tileUpXY = s_buf1; + s_tileDownX = s_buf0; + } + else if (filterMode == MODE_SUFD) + { + s_tileIn = s_buf0; + s_tileUpX = s_buf1; + s_tileUpXY = s_buf0; + } + else if (filterMode == MODE_FUFD) + { + s_tileIn = s_buf0; + s_tileUpXY = s_buf1; + } + + // Allow large grids in z direction via per-launch offset. + int channelIdx = blockIdx.z + p.blockZofs; + int batchIdx = channelIdx / p.yShape.z; + channelIdx -= batchIdx * p.yShape.z; + + // Offset to output feature map. In bytes. + index_t mapOfsOut = channelIdx * get_stride(p.yStride.z) + batchIdx * get_stride(p.yStride.w); + + // Sign shift amount. + uint32_t signXo = ((threadIdx.x + p.sOfs.x) << 1) & 6; + + // Inner tile loop. + #pragma unroll 1 + for (int tileIdx = 0; !enableXrep || (tileIdx < MIN(p.tilesXrep, p.tilesXdim - p.tilesXrep * blockIdx.y)); tileIdx++) + { + // Locate output tile. + int tileX = enableXrep ? blockIdx.y * p.tilesXrep + tileIdx : blockIdx.x; + int tileOutX = tileX * tileOutW; + int tileOutY = (enableXrep ? blockIdx.x : blockIdx.y) * tileOutH; + + // Locate input tile. + int tmpX = tileOutX * down - p.pad0.x; + int tmpY = tileOutY * down - p.pad0.y; + int tileInX = CEIL_DIV(tmpX, up); + int tileInY = CEIL_DIV(tmpY, up); + const int phaseInX = tileInX * up - tmpX; + const int phaseInY = tileInY * up - tmpY; + + // Extra sync if input and output buffers are the same and we are not on first tile. + if (enableXrep && tileIdx > 0 && (filterMode == MODE_FUSD || (filterMode == MODE_SUFD && !downInline) || (filterMode == MODE_FUFD && downInline))) + __syncthreads(); + + // Load input tile & apply bias. Unrolled. + scalar_t b = (scalar_t)*(const T*)((const char*)p.b + (channelIdx * get_stride(p.bStride))); + index_t mapOfsIn = channelIdx * get_stride(p.xStride.z) + batchIdx * get_stride(p.xStride.w); + int idx = threadIdx.x; + const int loopCountIN = CEIL_DIV(tileInW * tileInH, threadsPerBlock); + #pragma unroll + for (int loop = 0; loop < loopCountIN; loop++) + { + int relInX, relInY; + fast_div_mod(relInX, relInY, idx); + int inX = tileInX + relInX; + int inY = tileInY + relInY; + scalar_t v = 0; + + if ((uint32_t)inX < p.xShape.x && (uint32_t)inY < p.xShape.y) + v = (scalar_t)*((const T*)((const char*)p.x + (inX * get_stride(p.xStride.x) + inY * get_stride(p.xStride.y) + mapOfsIn))) + b; + + bool skip = (loop == loopCountIN-1) && (idx >= tileInW * tileInH); + if (!skip) + s_tileIn[idx] = v; + + idx += threadsPerBlock; + } + + if (filterMode == MODE_SUSD || filterMode == MODE_SUFD) // Separable upsampling filter. + { + // Horizontal upsampling. + __syncthreads(); + if (up == 4) + { + for (int idx = threadIdx.x*up; idx < tileUpW * tileInH; idx += blockDim.x*up) + { + int relUpX0, relInY; + fast_div_mod(relUpX0, relInY, idx); + int relInX0 = relUpX0 / up; + int src0 = relInX0 + tileInW * relInY; + int dst = relInY * tileUpW + relUpX0; + vec4_t v = InternalType::zero_vec4(); + scalar_t a = s_tileIn[src0]; + if (phaseInX == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.y += a * (scalar_t)c_fu[step * up + 3]; + v.z += a * (scalar_t)c_fu[step * up + 2]; + v.w += a * (scalar_t)c_fu[step * up + 1]; + } + } + else if (phaseInX == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.z += a * (scalar_t)c_fu[step * up + 3]; + v.w += a * (scalar_t)c_fu[step * up + 2]; + } + } + else if (phaseInX == 2) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 2]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + v.z += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.w += a * (scalar_t)c_fu[step * up + 3]; + } + } + else // (phaseInX == 3) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 3]; + v.y += a * (scalar_t)c_fu[step * up + 2]; + v.z += a * (scalar_t)c_fu[step * up + 1]; + v.w += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + } + } + s_tileUpX[dst+0] = v.x; + s_tileUpX[dst+1] = v.y; + s_tileUpX[dst+2] = v.z; + s_tileUpX[dst+3] = v.w; + } + } + else if (up == 2) + { + bool p0 = (phaseInX == 0); + for (int idx = threadIdx.x*up; idx < tileUpW * tileInH; idx += blockDim.x*up) + { + int relUpX0, relInY; + fast_div_mod(relUpX0, relInY, idx); + int relInX0 = relUpX0 / up; + int src0 = relInX0 + tileInW * relInY; + int dst = relInY * tileUpW + relUpX0; + vec2_t v = InternalType::zero_vec2(); + scalar_t a = s_tileIn[src0]; + if (p0) // (phaseInX == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + } + } + else // (phaseInX == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + } + } + s_tileUpX[dst+0] = v.x; + s_tileUpX[dst+1] = v.y; + } + } + + // Vertical upsampling & nonlinearity. + + __syncthreads(); + int groupMask = 15 << ((threadIdx.x & 31) & ~3); + int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH : 0; // Skip already written signs. + int sShapeMaxY = MIN(p.sShape.y, tileOutY * down + tileUpH); // Avoid out-of-tile sign writes. + if (up == 4) + { + minY -= 3; // Adjust according to block height. + for (int idx = threadIdx.x; idx < tileUpW * tileUpH_up / up; idx += blockDim.x) + { + int relUpX, relInY0; + fast_div_mod(relUpX, relInY0, idx); + int relUpY0 = relInY0 * up; + int src0 = relInY0 * tileUpW + relUpX; + int dst = relUpY0 * tileUpW + relUpX; + vec4_t v = InternalType::zero_vec4(); + + scalar_t a = s_tileUpX[src0]; + if (phaseInY == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.y += a * (scalar_t)c_fu[step * up + 3]; + v.z += a * (scalar_t)c_fu[step * up + 2]; + v.w += a * (scalar_t)c_fu[step * up + 1]; + } + } + else if (phaseInY == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.z += a * (scalar_t)c_fu[step * up + 3]; + v.w += a * (scalar_t)c_fu[step * up + 2]; + } + } + else if (phaseInY == 2) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 2]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + v.z += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.w += a * (scalar_t)c_fu[step * up + 3]; + } + } + else // (phaseInY == 3) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 3]; + v.y += a * (scalar_t)c_fu[step * up + 2]; + v.z += a * (scalar_t)c_fu[step * up + 1]; + v.w += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + } + } + + int x = tileOutX * down + relUpX; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si0 = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + index_t si1 = si0 + p.sShape.x; + index_t si2 = si0 + p.sShape.x * 2; + index_t si3 = si0 + p.sShape.x * 3; + + v.x *= (scalar_t)((float)up * (float)up * p.gain); + v.y *= (scalar_t)((float)up * (float)up * p.gain); + v.z *= (scalar_t)((float)up * (float)up * p.gain); + v.w *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write signs. + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + int sz = __float_as_uint(v.z) >> 31 << 16; + int sw = __float_as_uint(v.w) >> 31 << 24; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (sz) v.z *= p.slope; + if (sw) v.w *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + if (fabsf(v.z) > p.clamp) { sz = 2 << 16; v.z = InternalType::clamp(v.z, p.clamp); } + if (fabsf(v.w) > p.clamp) { sw = 2 << 24; v.w = InternalType::clamp(v.w, p.clamp); } + + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + // Combine signs. + uint32_t s = sx + sy + sw + sz; + s <<= (signX & 3) << 1; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + if ((uint32_t)(signY + 2) < sShapeMaxY) { p.s[si2] = (unsigned char)(s >> 16); } + if ((uint32_t)(signY + 3) < sShapeMaxY) { p.s[si3] = (unsigned char)(s >> 24); } + } + } + else + { + // Determine and write signs. + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + int sz = __float_as_uint(v.z) >> 31 << 16; + int sw = __float_as_uint(v.w) >> 31 << 24; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (sz) v.z *= p.slope; + if (sw) v.w *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + if (fabsf(v.z) > p.clamp) { sz = 2 << 16; v.z = InternalType::clamp(v.z, p.clamp); } + if (fabsf(v.w) > p.clamp) { sw = 2 << 24; v.w = InternalType::clamp(v.w, p.clamp); } + + // Combine signs. + uint32_t s = sx + sy + sw + sz; + s <<= (signX & 3) << 1; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + if ((uint32_t)(signY + 2) < sShapeMaxY) { p.s[si2] = (unsigned char)(s >> 16); } + if ((uint32_t)(signY + 3) < sShapeMaxY) { p.s[si3] = (unsigned char)(s >> 24); } + } + else + { + // Just compute the values. + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + } + } + else if (signRead) // Read signs and apply. + { + if ((uint32_t)signXb < p.swLimit) + { + int ss = (signX & 3) << 1; + if ((uint32_t)(signY + 0) < p.sShape.y) { int s = p.s[si0] >> ss; if (s & 1) v.x *= p.slope; if (s & 2) v.x = 0.f; } + if ((uint32_t)(signY + 1) < p.sShape.y) { int s = p.s[si1] >> ss; if (s & 1) v.y *= p.slope; if (s & 2) v.y = 0.f; } + if ((uint32_t)(signY + 2) < p.sShape.y) { int s = p.s[si2] >> ss; if (s & 1) v.z *= p.slope; if (s & 2) v.z = 0.f; } + if ((uint32_t)(signY + 3) < p.sShape.y) { int s = p.s[si3] >> ss; if (s & 1) v.w *= p.slope; if (s & 2) v.w = 0.f; } + } + } + else // Forward pass with no sign write. + { + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + + s_tileUpXY[dst + 0 * tileUpW] = v.x; + if (relUpY0 + 1 < tileUpH) s_tileUpXY[dst + 1 * tileUpW] = v.y; + if (relUpY0 + 2 < tileUpH) s_tileUpXY[dst + 2 * tileUpW] = v.z; + if (relUpY0 + 3 < tileUpH) s_tileUpXY[dst + 3 * tileUpW] = v.w; + } + } + else if (up == 2) + { + minY -= 1; // Adjust according to block height. + for (int idx = threadIdx.x; idx < tileUpW * tileUpH_up / up; idx += blockDim.x) + { + int relUpX, relInY0; + fast_div_mod(relUpX, relInY0, idx); + int relUpY0 = relInY0 * up; + int src0 = relInY0 * tileUpW + relUpX; + int dst = relUpY0 * tileUpW + relUpX; + vec2_t v = InternalType::zero_vec2(); + + scalar_t a = s_tileUpX[src0]; + if (phaseInY == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + } + } + else // (phaseInY == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + } + } + + int x = tileOutX * down + relUpX; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si0 = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + index_t si1 = si0 + p.sShape.x; + + v.x *= (scalar_t)((float)up * (float)up * p.gain); + v.y *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write signs. + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + // Combine signs. + int s = sx + sy; + s <<= signXo; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + } + } + else + { + // Determine and write signs. + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + + // Combine signs. + int s = sx + sy; + s <<= signXo; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + } + else + { + // Just compute the values. + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + } + } + } + else if (signRead) // Read signs and apply. + { + if ((uint32_t)signXb < p.swLimit) + { + if ((uint32_t)(signY + 0) < p.sShape.y) { int s = p.s[si0] >> signXo; if (s & 1) v.x *= p.slope; if (s & 2) v.x = 0.f; } + if ((uint32_t)(signY + 1) < p.sShape.y) { int s = p.s[si1] >> signXo; if (s & 1) v.y *= p.slope; if (s & 2) v.y = 0.f; } + } + } + else // Forward pass with no sign write. + { + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + } + + if (!downInline) + { + // Write into temporary buffer. + s_tileUpXY[dst] = v.x; + if (relUpY0 < tileUpH - 1) + s_tileUpXY[dst + tileUpW] = v.y; + } + else + { + // Write directly into output buffer. + if ((uint32_t)x < p.yShape.x) + { + int ymax = MIN(p.yShape.y, tileUpH + tileOutY * down); + index_t ofs = x * get_stride(p.yStride.x) + y * get_stride(p.yStride.y) + mapOfsOut; + if ((uint32_t)y + 0 < p.yShape.y) *((T*)((char*)p.y + ofs)) = (T)(v.x * (scalar_t)c_fd[0]); + if ((uint32_t)y + 1 < ymax) *((T*)((char*)p.y + ofs + get_stride(p.yStride.y))) = (T)(v.y * (scalar_t)c_fd[0]); + } + } + } + } + } + else if (filterMode == MODE_FUSD || filterMode == MODE_FUFD) + { + // Full upsampling filter. + + if (up == 2) + { + // 2 x 2-wide. + __syncthreads(); + int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH + p.sOfs.y : 0; // Skip already written signs. + for (int idx = threadIdx.x * 4; idx < tileUpW * tileUpH; idx += blockDim.x * 4) + { + int relUpX0, relUpY0; + fast_div_mod(relUpX0, relUpY0, idx); + int relInX0 = CEIL_DIV(relUpX0 - phaseInX, up); + int relInY0 = CEIL_DIV(relUpY0 - phaseInY, up); + int src0 = relInX0 + tileInW * relInY0; + int tap0y = (relInY0 * up + phaseInY - relUpY0); + + #define X_LOOP(TAPY, PX) \ + for (int sx = 0; sx < fuSize / up; sx++) \ + { \ + v.x += a * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; \ + v.z += b * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; if ((PX) == 0) { a = b; b = s_tileIn[src0 + 2 + sx + sy * tileInW]; } \ + v.y += a * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; \ + v.w += b * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; if ((PX) == 1) { a = b; b = s_tileIn[src0 + 2 + sx + sy * tileInW]; } \ + } + + vec4_t v = InternalType::zero_vec4(); + if (tap0y == 0 && phaseInX == 0) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(0, 0) } + if (tap0y == 0 && phaseInX == 1) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(0, 1) } + if (tap0y == 1 && phaseInX == 0) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(1, 0) } + if (tap0y == 1 && phaseInX == 1) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(1, 1) } + + #undef X_LOOP + + int x = tileOutX * down + relUpX0; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + + v.x *= (scalar_t)((float)up * (float)up * p.gain); + v.y *= (scalar_t)((float)up * (float)up * p.gain); + v.z *= (scalar_t)((float)up * (float)up * p.gain); + v.w *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write signs. + int sx = __float_as_uint(v.x) >> 31; + int sy = __float_as_uint(v.y) >> 31; + int sz = __float_as_uint(v.z) >> 31; + int sw = __float_as_uint(v.w) >> 31; + if (sx) v.x *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2; v.x = InternalType::clamp(v.x, p.clamp); } + if (sy) v.y *= p.slope; if (fabsf(v.y) > p.clamp) { sy = 2; v.y = InternalType::clamp(v.y, p.clamp); } + if (sz) v.z *= p.slope; if (fabsf(v.z) > p.clamp) { sz = 2; v.z = InternalType::clamp(v.z, p.clamp); } + if (sw) v.w *= p.slope; if (fabsf(v.w) > p.clamp) { sw = 2; v.w = InternalType::clamp(v.w, p.clamp); } + + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + p.s[si] = sx + (sy << 2) + (sz << 4) + (sw << 6); + } + } + else + { + // Determine and write signs. + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + int sx = __float_as_uint(v.x) >> 31; + int sy = __float_as_uint(v.y) >> 31; + int sz = __float_as_uint(v.z) >> 31; + int sw = __float_as_uint(v.w) >> 31; + if (sx) v.x *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2; v.x = InternalType::clamp(v.x, p.clamp); } + if (sy) v.y *= p.slope; if (fabsf(v.y) > p.clamp) { sy = 2; v.y = InternalType::clamp(v.y, p.clamp); } + if (sz) v.z *= p.slope; if (fabsf(v.z) > p.clamp) { sz = 2; v.z = InternalType::clamp(v.z, p.clamp); } + if (sw) v.w *= p.slope; if (fabsf(v.w) > p.clamp) { sw = 2; v.w = InternalType::clamp(v.w, p.clamp); } + + p.s[si] = sx + (sy << 2) + (sz << 4) + (sw << 6); + } + else + { + // Just compute the values. + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + } + } + else if (signRead) // Read sign and apply. + { + if ((uint32_t)signY < p.sShape.y) + { + int s = 0; + if ((uint32_t)signXb < p.swLimit) s = p.s[si]; + if ((uint32_t)signXb + 1 < p.swLimit) s |= p.s[si + 1] << 8; + s >>= (signX & 3) << 1; + if (s & 0x01) v.x *= p.slope; if (s & 0x02) v.x = 0.f; + if (s & 0x04) v.y *= p.slope; if (s & 0x08) v.y = 0.f; + if (s & 0x10) v.z *= p.slope; if (s & 0x20) v.z = 0.f; + if (s & 0x40) v.w *= p.slope; if (s & 0x80) v.w = 0.f; + } + } + else // Forward pass with no sign write. + { + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + + s_tileUpXY[idx + 0] = v.x; + s_tileUpXY[idx + 1] = v.y; + s_tileUpXY[idx + 2] = v.z; + s_tileUpXY[idx + 3] = v.w; + } + } + else if (up == 1) + { + __syncthreads(); + uint32_t groupMask = 15 << ((threadIdx.x & 31) & ~3); + int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH : 0; // Skip already written signs. + for (int idx = threadIdx.x; idx < tileUpW * tileUpH; idx += blockDim.x) + { + int relUpX0, relUpY0; + fast_div_mod(relUpX0, relUpY0, idx); + scalar_t v = s_tileIn[idx] * (scalar_t)c_fu[0]; // 1x1 filter. + + int x = tileOutX * down + relUpX0; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + v *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write sign. + uint32_t s = 0; + uint32_t signXbit = (1u << signXo); + if (v < 0.f) + { + s = signXbit; + v *= p.slope; + } + if (fabsf(v) > p.clamp) + { + s = signXbit * 2; + v = InternalType::clamp(v, p.clamp); + } + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + s += __shfl_xor_sync(groupMask, s, 1); // Coalesce. + s += __shfl_xor_sync(groupMask, s, 2); // Coalesce. + p.s[si] = s; // Write. + } + } + else + { + // Determine and write sign. + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + uint32_t s = 0; + uint32_t signXbit = (1u << signXo); + if (v < 0.f) + { + s = signXbit; + v *= p.slope; + } + if (fabsf(v) > p.clamp) + { + s = signXbit * 2; + v = InternalType::clamp(v, p.clamp); + } + s += __shfl_xor_sync(groupMask, s, 1); // Coalesce. + s += __shfl_xor_sync(groupMask, s, 2); // Coalesce. + p.s[si] = s; // Write. + } + else + { + // Just compute the value. + if (v < 0.f) v *= p.slope; + v = InternalType::clamp(v, p.clamp); + } + } + } + else if (signRead) + { + // Read sign and apply if within sign tensor bounds. + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y) + { + int s = p.s[si]; + s >>= signXo; + if (s & 1) v *= p.slope; + if (s & 2) v = 0.f; + } + } + else // Forward pass with no sign write. + { + if (v < 0.f) v *= p.slope; + v = InternalType::clamp(v, p.clamp); + } + + if (!downInline) // Write into temporary buffer. + s_tileUpXY[idx] = v; + else if ((uint32_t)x < p.yShape.x && (uint32_t)y < p.yShape.y) // Write directly into output buffer + *((T*)((char*)p.y + (x * get_stride(p.yStride.x) + y * get_stride(p.yStride.y) + mapOfsOut))) = (T)(v * (scalar_t)c_fd[0]); + } + } + } + + // Downsampling. + if (filterMode == MODE_SUSD || filterMode == MODE_FUSD) + { + // Horizontal downsampling. + __syncthreads(); + if (down == 4 && tileOutW % 4 == 0) + { + // Calculate 4 pixels at a time. + for (int idx = threadIdx.x * 4; idx < tileOutW * tileUpH; idx += blockDim.x * 4) + { + int relOutX0, relUpY; + fast_div_mod(relOutX0, relUpY, idx); + int relUpX0 = relOutX0 * down; + int src0 = relUpY * tileUpW + relUpX0; + vec4_t v = InternalType::zero_vec4(); + #pragma unroll + for (int step = 0; step < fdSize; step++) + { + v.x += s_tileUpXY[src0 + 0 + step] * (scalar_t)c_fd[step]; + v.y += s_tileUpXY[src0 + 4 + step] * (scalar_t)c_fd[step]; + v.z += s_tileUpXY[src0 + 8 + step] * (scalar_t)c_fd[step]; + v.w += s_tileUpXY[src0 + 12 + step] * (scalar_t)c_fd[step]; + } + s_tileDownX[idx+0] = v.x; + s_tileDownX[idx+1] = v.y; + s_tileDownX[idx+2] = v.z; + s_tileDownX[idx+3] = v.w; + } + } + else if ((down == 2 || down == 4) && (tileOutW % 2 == 0)) + { + // Calculate 2 pixels at a time. + for (int idx = threadIdx.x * 2; idx < tileOutW * tileUpH; idx += blockDim.x * 2) + { + int relOutX0, relUpY; + fast_div_mod(relOutX0, relUpY, idx); + int relUpX0 = relOutX0 * down; + int src0 = relUpY * tileUpW + relUpX0; + vec2_t v = InternalType::zero_vec2(); + #pragma unroll + for (int step = 0; step < fdSize; step++) + { + v.x += s_tileUpXY[src0 + 0 + step] * (scalar_t)c_fd[step]; + v.y += s_tileUpXY[src0 + down + step] * (scalar_t)c_fd[step]; + } + s_tileDownX[idx+0] = v.x; + s_tileDownX[idx+1] = v.y; + } + } + else + { + // Calculate 1 pixel at a time. + for (int idx = threadIdx.x; idx < tileOutW * tileUpH; idx += blockDim.x) + { + int relOutX0, relUpY; + fast_div_mod(relOutX0, relUpY, idx); + int relUpX0 = relOutX0 * down; + int src = relUpY * tileUpW + relUpX0; + scalar_t v = 0.f; + #pragma unroll + for (int step = 0; step < fdSize; step++) + v += s_tileUpXY[src + step] * (scalar_t)c_fd[step]; + s_tileDownX[idx] = v; + } + } + + // Vertical downsampling & store output tile. + __syncthreads(); + for (int idx = threadIdx.x; idx < tileOutW * tileOutH; idx += blockDim.x) + { + int relOutX, relOutY0; + fast_div_mod(relOutX, relOutY0, idx); + int relUpY0 = relOutY0 * down; + int src0 = relUpY0 * tileOutW + relOutX; + scalar_t v = 0; + #pragma unroll + for (int step = 0; step < fdSize; step++) + v += s_tileDownX[src0 + step * tileOutW] * (scalar_t)c_fd[step]; + + int outX = tileOutX + relOutX; + int outY = tileOutY + relOutY0; + + if (outX < p.yShape.x & outY < p.yShape.y) + *((T*)((char*)p.y + (outX * get_stride(p.yStride.x) + outY * get_stride(p.yStride.y) + mapOfsOut))) = (T)v; + } + } + else if (filterMode == MODE_SUFD || filterMode == MODE_FUFD) + { + // Full downsampling filter. + if (down == 2) + { + // 2-wide. + __syncthreads(); + for (int idx = threadIdx.x * 2; idx < tileOutW * tileOutH; idx += blockDim.x * 2) + { + int relOutX0, relOutY0; + fast_div_mod(relOutX0, relOutY0, idx); + int relUpX0 = relOutX0 * down; + int relUpY0 = relOutY0 * down; + int src0 = relUpY0 * tileUpW + relUpX0; + vec2_t v = InternalType::zero_vec2(); + #pragma unroll + for (int sy = 0; sy < fdSize; sy++) + #pragma unroll + for (int sx = 0; sx < fdSize; sx++) + { + v.x += s_tileUpXY[src0 + 0 + sx + sy * tileUpW] * (scalar_t)c_fd[sx + sy * MAX_FILTER_SIZE]; + v.y += s_tileUpXY[src0 + 2 + sx + sy * tileUpW] * (scalar_t)c_fd[sx + sy * MAX_FILTER_SIZE]; + } + + int outX = tileOutX + relOutX0; + int outY = tileOutY + relOutY0; + if ((uint32_t)outY < p.yShape.y) + { + index_t ofs = outX * get_stride(p.yStride.x) + outY * get_stride(p.yStride.y) + mapOfsOut; + if (outX + 0 < p.yShape.x) *((T*)((char*)p.y + ofs)) = (T)v.x; + if (outX + 1 < p.yShape.x) *((T*)((char*)p.y + ofs + get_stride(p.yStride.x))) = (T)v.y; + } + } + } + else if (down == 1 && !downInline) + { + // Thread per pixel. + __syncthreads(); + for (int idx = threadIdx.x; idx < tileOutW * tileOutH; idx += blockDim.x) + { + int relOutX0, relOutY0; + fast_div_mod(relOutX0, relOutY0, idx); + scalar_t v = s_tileUpXY[idx] * (scalar_t)c_fd[0]; // 1x1 filter. + + int outX = tileOutX + relOutX0; + int outY = tileOutY + relOutY0; + if ((uint32_t)outX < p.yShape.x && (uint32_t)outY < p.yShape.y) + *((T*)((char*)p.y + (outX * get_stride(p.yStride.x) + outY * get_stride(p.yStride.y) + mapOfsOut))) = (T)v; + } + } + } + + if (!enableXrep) + break; + } +} + +//------------------------------------------------------------------------ +// Compute activation function and signs for upsampled data tensor, modifying data tensor in-place. Used for accelerating the generic variant. +// Sign tensor is known to be contiguous, and p.x and p.s have the same z, w dimensions. 64-bit indexing is always used. + +template +static __global__ void filtered_lrelu_act_kernel(filtered_lrelu_act_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + + // Indexing. + int32_t x = threadIdx.x + blockIdx.x * blockDim.x; + int32_t ymax = signWrite ? p.sShape.y : p.xShape.y; + int32_t qmax = p.xShape.z * p.xShape.w; // Combined minibatch*channel maximum index. + + // Loop to accommodate oversized tensors. + for (int32_t q = blockIdx.z; q < qmax; q += gridDim.z) + for (int32_t y = blockIdx.y; y < ymax; y += gridDim.y) + { + // Extract z and w (channel, minibatch index). + int32_t w = q / p.xShape.z; + int32_t z = q - w * p.xShape.z; + + // Choose behavior based on sign read/write mode. + if (signWrite) + { + // Process value if in p.x. + uint32_t s = 0; + if (x < p.xShape.x && y < p.xShape.y) + { + int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; + T* pv = ((T*)p.x) + ix; + scalar_t v = (scalar_t)(*pv); + + // Gain, LReLU, clamp. + v *= p.gain; + if (v < 0.f) + { + v *= p.slope; + s = 1; // Sign. + } + if (fabsf(v) > p.clamp) + { + v = InternalType::clamp(v, p.clamp); + s = 2; // Clamp. + } + + *pv = (T)v; // Write value. + } + + // Coalesce into threads 0 and 16 of warp. + uint32_t m = (threadIdx.x & 16) ? 0xffff0000u : 0x0000ffffu; + s <<= ((threadIdx.x & 15) << 1); // Shift into place. + s |= __shfl_xor_sync(m, s, 1); // Distribute. + s |= __shfl_xor_sync(m, s, 2); + s |= __shfl_xor_sync(m, s, 4); + s |= __shfl_xor_sync(m, s, 8); + + // Write signs if leader and in p.s. + if (!(threadIdx.x & 15) && x < p.sShape.x) // y is always in. + { + uint64_t is = x + p.sShape.x * (y + (int64_t)p.sShape.y * q); // Contiguous. + ((uint32_t*)p.s)[is >> 4] = s; + } + } + else if (signRead) + { + // Process value if in p.x. + if (x < p.xShape.x) // y is always in. + { + int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; + T* pv = ((T*)p.x) + ix; + scalar_t v = (scalar_t)(*pv); + v *= p.gain; + + // Apply sign buffer offset. + uint32_t sx = x + p.sOfs.x; + uint32_t sy = y + p.sOfs.y; + + // Read and apply signs if we land inside valid region of sign buffer. + if (sx < p.sShape.x && sy < p.sShape.y) + { + uint64_t is = (sx >> 2) + (p.sShape.x >> 2) * (sy + (uint64_t)p.sShape.y * q); // Contiguous. + unsigned char s = p.s[is]; + s >>= (sx & 3) << 1; // Shift into place. + if (s & 1) // Sign? + v *= p.slope; + if (s & 2) // Clamp? + v = 0.f; + } + + *pv = (T)v; // Write value. + } + } + else + { + // Forward pass with no sign write. Process value if in p.x. + if (x < p.xShape.x) // y is always in. + { + int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; + T* pv = ((T*)p.x) + ix; + scalar_t v = (scalar_t)(*pv); + v *= p.gain; + if (v < 0.f) + v *= p.slope; + if (fabsf(v) > p.clamp) + v = InternalType::clamp(v, p.clamp); + *pv = (T)v; // Write value. + } + } + } +} + +template void* choose_filtered_lrelu_act_kernel(void) +{ + return (void*)filtered_lrelu_act_kernel; +} + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB) +{ + filtered_lrelu_kernel_spec s = { 0 }; + + // Return the first matching kernel. +#define CASE(SH, U, FU, D, FD, MODE, TW, TH, W, XR, WS) \ + if (sharedKB >= SH) \ + if ((p.fuShape.y == 0 && (MODE == MODE_SUSD || MODE == MODE_SUFD)) || (p.fuShape.y > 0 && (MODE == MODE_FUSD || MODE == MODE_FUFD))) \ + if ((p.fdShape.y == 0 && (MODE == MODE_SUSD || MODE == MODE_FUSD)) || (p.fdShape.y > 0 && (MODE == MODE_SUFD || MODE == MODE_FUFD))) \ + if (p.up == U && p.fuShape.x <= FU && p.fuShape.y <= FU && p.down == D && p.fdShape.x <= FD && p.fdShape.y <= FD) \ + { \ + static_assert((D*TW % 4) == 0, "down * tileWidth must be divisible by 4"); \ + static_assert(FU % U == 0, "upscaling filter size must be multiple of upscaling factor"); \ + static_assert(FD % D == 0, "downscaling filter size must be multiple of downscaling factor"); \ + s.setup = (void*)setup_filters_kernel; \ + s.exec = (void*)filtered_lrelu_kernel; \ + s.tileOut = make_int2(TW, TH); \ + s.numWarps = W; \ + s.xrep = XR; \ + s.dynamicSharedKB = (SH == 48) ? 0 : SH; \ + return s; \ + } + + // Launch parameters for various kernel specializations. + // Small filters must be listed before large filters, otherwise the kernel for larger filter will always match first. + // Kernels that use more shared memory must be listed before those that use less, for the same reason. + + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/1,1, /*mode*/MODE_FUFD, /*tw,th,warps,xrep,wskip*/64, 178, 32, 0, 0) // 1t-upf1-downf1 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/152, 95, 16, 0, 0) // 4t-ups2-downf1 + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,8, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/56, 22, 16, 0, 0) // 4t-upf1-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/56, 29, 16, 11, 0) // 4t-ups2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/60, 28, 16, 0, 0) // 4t-upf2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/56, 28, 16, 0, 0) // 4t-ups2-downf2 + CASE(/*sharedKB*/48, /*up,fu*/4,16, /*down,fd*/2,8, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/56, 31, 16, 11, 0) // 4t-ups4-downs2 + CASE(/*sharedKB*/48, /*up,fu*/4,16, /*down,fd*/2,8, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/56, 36, 16, 0, 0) // 4t-ups4-downf2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/4,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 22, 16, 12, 0) // 4t-ups2-downs4 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/4,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/29, 15, 16, 0, 0) // 4t-upf2-downs4 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/96, 150, 28, 0, 0) // 6t-ups2-downf1 + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,12, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/32, 35, 24, 0, 0) // 6t-upf1-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 46, 16, 10, 0) // 6t-ups2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/58, 28, 24, 8, 0) // 6t-upf2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/52, 28, 16, 0, 0) // 6t-ups2-downf2 + CASE(/*sharedKB*/48, /*up,fu*/4,24, /*down,fd*/2,12, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 51, 16, 5, 0) // 6t-ups4-downs2 + CASE(/*sharedKB*/48, /*up,fu*/4,24, /*down,fd*/2,12, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 56, 16, 6, 0) // 6t-ups4-downf2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 18, 16, 12, 0) // 6t-ups2-downs4 + CASE(/*sharedKB*/96, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/27, 31, 32, 6, 0) // 6t-upf2-downs4 96kB + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/27, 13, 24, 0, 0) // 6t-upf2-downs4 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/148, 89, 24, 0, 0) // 8t-ups2-downf1 + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/32, 31, 16, 5, 0) // 8t-upf1-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 41, 16, 9, 0) // 8t-ups2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/56, 26, 24, 0, 0) // 8t-upf2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 40, 16, 0, 0) // 8t-ups2-downf2 + CASE(/*sharedKB*/48, /*up,fu*/4,32, /*down,fd*/2,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 46, 24, 5, 0) // 8t-ups4-downs2 + CASE(/*sharedKB*/48, /*up,fu*/4,32, /*down,fd*/2,16, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 50, 16, 0, 0) // 8t-ups4-downf2 + CASE(/*sharedKB*/96, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/24, 24, 32, 12, 1) // 8t-ups2-downs4 96kB + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 13, 16, 10, 1) // 8t-ups2-downs4 + CASE(/*sharedKB*/96, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/25, 28, 28, 4, 0) // 8t-upf2-downs4 96kB + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/25, 10, 24, 0, 0) // 8t-upf2-downs4 + + #undef CASE + return s; // No kernel found. +} + +//------------------------------------------------------------------------ diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.h b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.h new file mode 100644 index 0000000..f2bfd1d --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include + +//------------------------------------------------------------------------ +// CUDA kernel parameters. + +struct filtered_lrelu_kernel_params +{ + // These parameters decide which kernel to use. + int up; // upsampling ratio (1, 2, 4) + int down; // downsampling ratio (1, 2, 4) + int2 fuShape; // [size, 1] | [size, size] + int2 fdShape; // [size, 1] | [size, size] + + int _dummy; // Alignment. + + // Rest of the parameters. + const void* x; // Input tensor. + void* y; // Output tensor. + const void* b; // Bias tensor. + unsigned char* s; // Sign tensor in/out. NULL if unused. + const float* fu; // Upsampling filter. + const float* fd; // Downsampling filter. + + int2 pad0; // Left/top padding. + float gain; // Additional gain factor. + float slope; // Leaky ReLU slope on negative side. + float clamp; // Clamp after nonlinearity. + int flip; // Filter kernel flip for gradient computation. + + int tilesXdim; // Original number of horizontal output tiles. + int tilesXrep; // Number of horizontal tiles per CTA. + int blockZofs; // Block z offset to support large minibatch, channel dimensions. + + int4 xShape; // [width, height, channel, batch] + int4 yShape; // [width, height, channel, batch] + int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused. + int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. + int swLimit; // Active width of sign tensor in bytes. + + longlong4 xStride; // Strides of all tensors except signs, same component order as shapes. + longlong4 yStride; // + int64_t bStride; // + longlong3 fuStride; // + longlong3 fdStride; // +}; + +struct filtered_lrelu_act_kernel_params +{ + void* x; // Input/output, modified in-place. + unsigned char* s; // Sign tensor in/out. NULL if unused. + + float gain; // Additional gain factor. + float slope; // Leaky ReLU slope on negative side. + float clamp; // Clamp after nonlinearity. + + int4 xShape; // [width, height, channel, batch] + longlong4 xStride; // Input/output tensor strides, same order as in shape. + int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused. + int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. +}; + +//------------------------------------------------------------------------ +// CUDA kernel specialization. + +struct filtered_lrelu_kernel_spec +{ + void* setup; // Function for filter kernel setup. + void* exec; // Function for main operation. + int2 tileOut; // Width/height of launch tile. + int numWarps; // Number of warps per thread block, determines launch block size. + int xrep; // For processing multiple horizontal tiles per thread block. + int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants. +}; + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template void* choose_filtered_lrelu_act_kernel(void); +template cudaError_t copy_filters(cudaStream_t stream); + +//------------------------------------------------------------------------ diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.py b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.py new file mode 100644 index 0000000..2047b7e --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu.py @@ -0,0 +1,276 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import os +import numpy as np +import torch +import warnings + +from .. import custom_ops +from .. import misc +from . import upfirdn2d +from . import bias_act + +#---------------------------------------------------------------------------- + +_plugin = None + +def _init(): + global _plugin + if _plugin is None: + _plugin = custom_ops.get_plugin( + module_name='filtered_lrelu_plugin', + sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'], + headers=['filtered_lrelu.h', 'filtered_lrelu.cu'], + source_dir=os.path.dirname(__file__), + extra_cuda_cflags=['--use_fast_math'], + ) + return True + +def _get_filter_size(f): + if f is None: + return 1, 1 + assert isinstance(f, torch.Tensor) + assert 1 <= f.ndim <= 2 + return f.shape[-1], f.shape[0] # width, height + +def _parse_padding(padding): + if isinstance(padding, int): + padding = [padding, padding] + assert isinstance(padding, (list, tuple)) + assert all(isinstance(x, (int, np.integer)) for x in padding) + padding = [int(x) for x in padding] + if len(padding) == 2: + px, py = padding + padding = [px, px, py, py] + px0, px1, py0, py1 = padding + return px0, px1, py0, py1 + +#---------------------------------------------------------------------------- + +def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'): + r"""Filtered leaky ReLU for a batch of 2D images. + + Performs the following sequence of operations for each channel: + + 1. Add channel-specific bias if provided (`b`). + + 2. Upsample the image by inserting N-1 zeros after each pixel (`up`). + + 3. Pad the image with the specified number of zeros on each side (`padding`). + Negative padding corresponds to cropping the image. + + 4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it + so that the footprint of all output pixels lies within the input image. + + 5. Multiply each value by the provided gain factor (`gain`). + + 6. Apply leaky ReLU activation function to each value. + + 7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided. + + 8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking + it so that the footprint of all output pixels lies within the input image. + + 9. Downsample the image by keeping every Nth pixel (`down`). + + The fused op is considerably more efficient than performing the same calculation + using standard PyTorch ops. It supports gradients of arbitrary order. + + Args: + x: Float32/float16/float64 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + fu: Float32 upsampling FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + fd: Float32 downsampling FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type + as `x`. The length of vector must must match the channel dimension of `x`. + up: Integer upsampling factor (default: 1). + down: Integer downsampling factor. (default: 1). + padding: Padding with respect to the upsampled image. Can be a single number + or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + gain: Overall scaling factor for signal magnitude (default: sqrt(2)). + slope: Slope on the negative side of leaky ReLU (default: 0.2). + clamp: Maximum magnitude for leaky ReLU output (default: None). + flip_filter: False = convolution, True = correlation (default: False). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + assert isinstance(x, torch.Tensor) + assert impl in ['ref', 'cuda'] + if impl == 'cuda' and x.device.type == 'cuda' and _init(): + return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0) + return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False): + """Slow and memory-inefficient reference implementation of `filtered_lrelu()` using + existing `upfirdn2n()` and `bias_act()` ops. + """ + assert isinstance(x, torch.Tensor) and x.ndim == 4 + fu_w, fu_h = _get_filter_size(fu) + fd_w, fd_h = _get_filter_size(fd) + if b is not None: + assert isinstance(b, torch.Tensor) and b.dtype == x.dtype + misc.assert_shape(b, [x.shape[1]]) + assert isinstance(up, int) and up >= 1 + assert isinstance(down, int) and down >= 1 + px0, px1, py0, py1 = _parse_padding(padding) + assert gain == float(gain) and gain > 0 + assert slope == float(slope) and slope >= 0 + assert clamp is None or (clamp == float(clamp) and clamp >= 0) + + # Calculate output size. + batch_size, channels, in_h, in_w = x.shape + in_dtype = x.dtype + out_w = (in_w * up + (px0 + px1) - (fu_w - 1) - (fd_w - 1) + (down - 1)) // down + out_h = (in_h * up + (py0 + py1) - (fu_h - 1) - (fd_h - 1) + (down - 1)) // down + + # Compute using existing ops. + x = bias_act.bias_act(x=x, b=b) # Apply bias. + x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample. + x = bias_act.bias_act(x=x, act='lrelu', alpha=slope, gain=gain, clamp=clamp) # Bias, leaky ReLU, clamp. + x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter) # Downsample. + + # Check output shape & dtype. + misc.assert_shape(x, [batch_size, channels, out_h, out_w]) + assert x.dtype == in_dtype + return x + +#---------------------------------------------------------------------------- + +_filtered_lrelu_cuda_cache = dict() + +def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False): + """Fast CUDA implementation of `filtered_lrelu()` using custom ops. + """ + assert isinstance(up, int) and up >= 1 + assert isinstance(down, int) and down >= 1 + px0, px1, py0, py1 = _parse_padding(padding) + assert gain == float(gain) and gain > 0 + gain = float(gain) + assert slope == float(slope) and slope >= 0 + slope = float(slope) + assert clamp is None or (clamp == float(clamp) and clamp >= 0) + clamp = float(clamp if clamp is not None else 'inf') + + # Lookup from cache. + key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter) + if key in _filtered_lrelu_cuda_cache: + return _filtered_lrelu_cuda_cache[key] + + # Forward op. + class FilteredLReluCuda(torch.autograd.Function): + @staticmethod + def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ + assert isinstance(x, torch.Tensor) and x.ndim == 4 + + # Replace empty up/downsample kernels with full 1x1 kernels (faster than separable). + if fu is None: + fu = torch.ones([1, 1], dtype=torch.float32, device=x.device) + if fd is None: + fd = torch.ones([1, 1], dtype=torch.float32, device=x.device) + assert 1 <= fu.ndim <= 2 + assert 1 <= fd.ndim <= 2 + + # Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1. + if up == 1 and fu.ndim == 1 and fu.shape[0] == 1: + fu = fu.square()[None] + if down == 1 and fd.ndim == 1 and fd.shape[0] == 1: + fd = fd.square()[None] + + # Missing sign input tensor. + if si is None: + si = torch.empty([0]) + + # Missing bias tensor. + if b is None: + b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device) + + # Construct internal sign tensor only if gradients are needed. + write_signs = (si.numel() == 0) and (x.requires_grad or b.requires_grad) + + # Warn if input storage strides are not in decreasing order due to e.g. channels-last layout. + strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1] + if any(a < b for a, b in zip(strides[:-1], strides[1:])): + warnings.warn("low-performance memory layout detected in filtered_lrelu input", RuntimeWarning) + + # Call C++/Cuda plugin if datatype is supported. + if x.dtype in [torch.float16, torch.float32]: + if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device): + warnings.warn("filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning) + y, so, return_code = _plugin.filtered_lrelu(x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs) + else: + return_code = -1 + + # No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because + # only the bit-packed sign tensor is retained for gradient computation. + if return_code < 0: + warnings.warn("filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning) + + y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias. + y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample. + so = _plugin.filtered_lrelu_act_(y, si, sx, sy, gain, slope, clamp, write_signs) # Activation function and sign handling. Modifies y in-place. + y = upfirdn2d.upfirdn2d(x=y, f=fd, down=down, flip_filter=flip_filter) # Downsample. + + # Prepare for gradient computation. + ctx.save_for_backward(fu, fd, (si if si.numel() else so)) + ctx.x_shape = x.shape + ctx.y_shape = y.shape + ctx.s_ofs = sx, sy + return y + + @staticmethod + def backward(ctx, dy): # pylint: disable=arguments-differ + fu, fd, si = ctx.saved_tensors + _, _, xh, xw = ctx.x_shape + _, _, yh, yw = ctx.y_shape + sx, sy = ctx.s_ofs + dx = None # 0 + dfu = None; assert not ctx.needs_input_grad[1] + dfd = None; assert not ctx.needs_input_grad[2] + db = None # 3 + dsi = None; assert not ctx.needs_input_grad[4] + dsx = None; assert not ctx.needs_input_grad[5] + dsy = None; assert not ctx.needs_input_grad[6] + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]: + pp = [ + (fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0, + xw * up - yw * down + px0 - (up - 1), + (fu.shape[0] - 1) + (fd.shape[0] - 1) - py0, + xh * up - yh * down + py0 - (up - 1), + ] + gg = gain * (up ** 2) / (down ** 2) + ff = (not flip_filter) + sx = sx - (fu.shape[-1] - 1) + px0 + sy = sy - (fu.shape[0] - 1) + py0 + dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope, clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy) + + if ctx.needs_input_grad[3]: + db = dx.sum([0, 2, 3]) + + return dx, dfu, dfd, db, dsi, dsx, dsy + + # Add to cache. + _filtered_lrelu_cuda_cache[key] = FilteredLReluCuda + return FilteredLReluCuda + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu_ns.cu b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu_ns.cu new file mode 100644 index 0000000..8a3eae4 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu_ns.cu @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include "filtered_lrelu.cu" + +// Template/kernel specializations for no signs mode (no gradients required). + +// Full op, 32-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Full op, 64-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Activation/signs only for generic variant. 64-bit indexing. +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); + +// Copy filters to constant memory. +template cudaError_t copy_filters(cudaStream_t stream); diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu_rd.cu b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu_rd.cu new file mode 100644 index 0000000..3cd43ec --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu_rd.cu @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include "filtered_lrelu.cu" + +// Template/kernel specializations for sign read mode. + +// Full op, 32-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Full op, 64-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Activation/signs only for generic variant. 64-bit indexing. +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); + +// Copy filters to constant memory. +template cudaError_t copy_filters(cudaStream_t stream); diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu_wr.cu b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu_wr.cu new file mode 100644 index 0000000..bc2fa06 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/filtered_lrelu_wr.cu @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include "filtered_lrelu.cu" + +// Template/kernel specializations for sign write mode. + +// Full op, 32-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Full op, 64-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Activation/signs only for generic variant. 64-bit indexing. +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); + +// Copy filters to constant memory. +template cudaError_t copy_filters(cudaStream_t stream); diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/fma.py b/3DPortraitGAN_pyramid/torch_utils/ops/fma.py new file mode 100644 index 0000000..5458116 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/fma.py @@ -0,0 +1,62 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`.""" + +import torch + +#---------------------------------------------------------------------------- + +def fma(a, b, c): # => a * b + c + return _FusedMultiplyAdd.apply(a, b, c) + +#---------------------------------------------------------------------------- + +class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c + @staticmethod + def forward(ctx, a, b, c): # pylint: disable=arguments-differ + out = torch.addcmul(c, a, b) + ctx.save_for_backward(a, b) + ctx.c_shape = c.shape + return out + + @staticmethod + def backward(ctx, dout): # pylint: disable=arguments-differ + a, b = ctx.saved_tensors + c_shape = ctx.c_shape + da = None + db = None + dc = None + + if ctx.needs_input_grad[0]: + da = _unbroadcast(dout * b, a.shape) + + if ctx.needs_input_grad[1]: + db = _unbroadcast(dout * a, b.shape) + + if ctx.needs_input_grad[2]: + dc = _unbroadcast(dout, c_shape) + + return da, db, dc + +#---------------------------------------------------------------------------- + +def _unbroadcast(x, shape): + extra_dims = x.ndim - len(shape) + assert extra_dims >= 0 + dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)] + if len(dim): + x = x.sum(dim=dim, keepdim=True) + if extra_dims: + x = x.reshape(-1, *x.shape[extra_dims+1:]) + assert x.shape == shape + return x + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/grid_sample_gradfix.py b/3DPortraitGAN_pyramid/torch_utils/ops/grid_sample_gradfix.py new file mode 100644 index 0000000..35d9472 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/grid_sample_gradfix.py @@ -0,0 +1,79 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Custom replacement for `torch.nn.functional.grid_sample` that +supports arbitrarily high order gradients between the input and output. +Only works on 2D images and assumes +`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`.""" + +import torch + +# pylint: disable=redefined-builtin +# pylint: disable=arguments-differ +# pylint: disable=protected-access + +#---------------------------------------------------------------------------- + +enabled = False # Enable the custom op by setting this to true. + +#---------------------------------------------------------------------------- + +def grid_sample(input, grid): + if _should_use_custom_op(): + return _GridSample2dForward.apply(input, grid) + return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) + +#---------------------------------------------------------------------------- + +def _should_use_custom_op(): + return enabled + +#---------------------------------------------------------------------------- + +class _GridSample2dForward(torch.autograd.Function): + @staticmethod + def forward(ctx, input, grid): + assert input.ndim == 4 + assert grid.ndim == 4 + output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) + ctx.save_for_backward(input, grid) + return output + + @staticmethod + def backward(ctx, grad_output): + input, grid = ctx.saved_tensors + grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid) + return grad_input, grad_grid + +#---------------------------------------------------------------------------- + +class _GridSample2dBackward(torch.autograd.Function): + @staticmethod + def forward(ctx, grad_output, input, grid): + op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward') + grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) + ctx.save_for_backward(grid) + return grad_input, grad_grid + + @staticmethod + def backward(ctx, grad2_grad_input, grad2_grad_grid): + _ = grad2_grad_grid # unused + grid, = ctx.saved_tensors + grad2_grad_output = None + grad2_input = None + grad2_grid = None + + if ctx.needs_input_grad[0]: + grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid) + + assert not ctx.needs_input_grad[2] + return grad2_grad_output, grad2_input, grad2_grid + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.cpp b/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.cpp new file mode 100644 index 0000000..c1769c3 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.cpp @@ -0,0 +1,111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include +#include +#include "upfirdn2d.h" + +//------------------------------------------------------------------------ + +static torch::Tensor upfirdn2d(torch::Tensor x, torch::Tensor f, int upx, int upy, int downx, int downy, int padx0, int padx1, int pady0, int pady1, bool flip, float gain) +{ + // Validate arguments. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + TORCH_CHECK(f.device() == x.device(), "f must reside on the same device as x"); + TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32"); + TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); + TORCH_CHECK(f.numel() <= INT_MAX, "f is too large"); + TORCH_CHECK(x.numel() > 0, "x has zero size"); + TORCH_CHECK(f.numel() > 0, "f has zero size"); + TORCH_CHECK(x.dim() == 4, "x must be rank 4"); + TORCH_CHECK(f.dim() == 2, "f must be rank 2"); + TORCH_CHECK((x.size(0)-1)*x.stride(0) + (x.size(1)-1)*x.stride(1) + (x.size(2)-1)*x.stride(2) + (x.size(3)-1)*x.stride(3) <= INT_MAX, "x memory footprint is too large"); + TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1"); + TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1"); + TORCH_CHECK(downx >= 1 && downy >= 1, "downsampling factor must be at least 1"); + + // Create output tensor. + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + int outW = ((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx; + int outH = ((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy; + TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1"); + torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW}, x.options(), x.suggest_memory_format()); + TORCH_CHECK(y.numel() <= INT_MAX, "output is too large"); + TORCH_CHECK((y.size(0)-1)*y.stride(0) + (y.size(1)-1)*y.stride(1) + (y.size(2)-1)*y.stride(2) + (y.size(3)-1)*y.stride(3) <= INT_MAX, "output memory footprint is too large"); + + // Initialize CUDA kernel parameters. + upfirdn2d_kernel_params p; + p.x = x.data_ptr(); + p.f = f.data_ptr(); + p.y = y.data_ptr(); + p.up = make_int2(upx, upy); + p.down = make_int2(downx, downy); + p.pad0 = make_int2(padx0, pady0); + p.flip = (flip) ? 1 : 0; + p.gain = gain; + p.inSize = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); + p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1), (int)x.stride(0)); + p.filterSize = make_int2((int)f.size(1), (int)f.size(0)); + p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0)); + p.outSize = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); + p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1), (int)y.stride(0)); + p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z; + p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1; + + // Choose CUDA kernel. + upfirdn2d_kernel_spec spec; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] + { + spec = choose_upfirdn2d_kernel(p); + }); + + // Set looping options. + p.loopMajor = (p.sizeMajor - 1) / 16384 + 1; + p.loopMinor = spec.loopMinor; + p.loopX = spec.loopX; + p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1; + p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1; + + // Compute grid size. + dim3 blockSize, gridSize; + if (spec.tileOutW < 0) // large + { + blockSize = dim3(4, 32, 1); + gridSize = dim3( + ((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor, + (p.outSize.x - 1) / (blockSize.y * p.loopX) + 1, + p.launchMajor); + } + else // small + { + blockSize = dim3(256, 1, 1); + gridSize = dim3( + ((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor, + (p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1, + p.launchMajor); + } + + // Launch CUDA kernel. + void* args[] = {&p}; + AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); + return y; +} + +//------------------------------------------------------------------------ + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("upfirdn2d", &upfirdn2d); +} + +//------------------------------------------------------------------------ diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.cu b/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.cu new file mode 100644 index 0000000..7d182d7 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.cu @@ -0,0 +1,388 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include "upfirdn2d.h" + +//------------------------------------------------------------------------ +// Helpers. + +template struct InternalType; +template <> struct InternalType { typedef double scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; + +static __device__ __forceinline__ int floor_div(int a, int b) +{ + int t = 1 - a / b; + return (a + t * b) / b - t; +} + +//------------------------------------------------------------------------ +// Generic CUDA implementation for large filters. + +template static __global__ void upfirdn2d_kernel_large(upfirdn2d_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + + // Calculate thread index. + int minorBase = blockIdx.x * blockDim.x + threadIdx.x; + int outY = minorBase / p.launchMinor; + minorBase -= outY * p.launchMinor; + int outXBase = blockIdx.y * p.loopX * blockDim.y + threadIdx.y; + int majorBase = blockIdx.z * p.loopMajor; + if (outXBase >= p.outSize.x | outY >= p.outSize.y | majorBase >= p.sizeMajor) + return; + + // Setup Y receptive field. + int midY = outY * p.down.y + p.up.y - 1 - p.pad0.y; + int inY = min(max(floor_div(midY, p.up.y), 0), p.inSize.y); + int h = min(max(floor_div(midY + p.filterSize.y, p.up.y), 0), p.inSize.y) - inY; + int filterY = midY + p.filterSize.y - (inY + 1) * p.up.y; + if (p.flip) + filterY = p.filterSize.y - 1 - filterY; + + // Loop over major, minor, and X. + for (int majorIdx = 0, major = majorBase; majorIdx < p.loopMajor & major < p.sizeMajor; majorIdx++, major++) + for (int minorIdx = 0, minor = minorBase; minorIdx < p.loopMinor & minor < p.sizeMinor; minorIdx++, minor += p.launchMinor) + { + int nc = major * p.sizeMinor + minor; + int n = nc / p.inSize.z; + int c = nc - n * p.inSize.z; + for (int loopX = 0, outX = outXBase; loopX < p.loopX & outX < p.outSize.x; loopX++, outX += blockDim.y) + { + // Setup X receptive field. + int midX = outX * p.down.x + p.up.x - 1 - p.pad0.x; + int inX = min(max(floor_div(midX, p.up.x), 0), p.inSize.x); + int w = min(max(floor_div(midX + p.filterSize.x, p.up.x), 0), p.inSize.x) - inX; + int filterX = midX + p.filterSize.x - (inX + 1) * p.up.x; + if (p.flip) + filterX = p.filterSize.x - 1 - filterX; + + // Initialize pointers. + const T* xp = &((const T*)p.x)[inX * p.inStride.x + inY * p.inStride.y + c * p.inStride.z + n * p.inStride.w]; + const float* fp = &p.f[filterX * p.filterStride.x + filterY * p.filterStride.y]; + int filterStepX = ((p.flip) ? p.up.x : -p.up.x) * p.filterStride.x; + int filterStepY = ((p.flip) ? p.up.y : -p.up.y) * p.filterStride.y; + + // Inner loop. + scalar_t v = 0; + for (int y = 0; y < h; y++) + { + for (int x = 0; x < w; x++) + { + v += (scalar_t)(*xp) * (scalar_t)(*fp); + xp += p.inStride.x; + fp += filterStepX; + } + xp += p.inStride.y - w * p.inStride.x; + fp += filterStepY - w * filterStepX; + } + + // Store result. + v *= p.gain; + ((T*)p.y)[outX * p.outStride.x + outY * p.outStride.y + c * p.outStride.z + n * p.outStride.w] = (T)v; + } + } +} + +//------------------------------------------------------------------------ +// Specialized CUDA implementation for small filters. + +template +static __global__ void upfirdn2d_kernel_small(upfirdn2d_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + const int tileInW = ((tileOutW - 1) * downx + filterW - 1) / upx + 1; + const int tileInH = ((tileOutH - 1) * downy + filterH - 1) / upy + 1; + __shared__ volatile scalar_t sf[filterH][filterW]; + __shared__ volatile scalar_t sx[tileInH][tileInW][loopMinor]; + + // Calculate tile index. + int minorBase = blockIdx.x; + int tileOutY = minorBase / p.launchMinor; + minorBase -= tileOutY * p.launchMinor; + minorBase *= loopMinor; + tileOutY *= tileOutH; + int tileOutXBase = blockIdx.y * p.loopX * tileOutW; + int majorBase = blockIdx.z * p.loopMajor; + if (tileOutXBase >= p.outSize.x | tileOutY >= p.outSize.y | majorBase >= p.sizeMajor) + return; + + // Load filter (flipped). + for (int tapIdx = threadIdx.x; tapIdx < filterH * filterW; tapIdx += blockDim.x) + { + int fy = tapIdx / filterW; + int fx = tapIdx - fy * filterW; + scalar_t v = 0; + if (fx < p.filterSize.x & fy < p.filterSize.y) + { + int ffx = (p.flip) ? fx : p.filterSize.x - 1 - fx; + int ffy = (p.flip) ? fy : p.filterSize.y - 1 - fy; + v = (scalar_t)p.f[ffx * p.filterStride.x + ffy * p.filterStride.y]; + } + sf[fy][fx] = v; + } + + // Loop over major and X. + for (int majorIdx = 0, major = majorBase; majorIdx < p.loopMajor & major < p.sizeMajor; majorIdx++, major++) + { + int baseNC = major * p.sizeMinor + minorBase; + int n = baseNC / p.inSize.z; + int baseC = baseNC - n * p.inSize.z; + for (int loopX = 0, tileOutX = tileOutXBase; loopX < p.loopX & tileOutX < p.outSize.x; loopX++, tileOutX += tileOutW) + { + // Load input pixels. + int tileMidX = tileOutX * downx + upx - 1 - p.pad0.x; + int tileMidY = tileOutY * downy + upy - 1 - p.pad0.y; + int tileInX = floor_div(tileMidX, upx); + int tileInY = floor_div(tileMidY, upy); + __syncthreads(); + for (int inIdx = threadIdx.x; inIdx < tileInH * tileInW * loopMinor; inIdx += blockDim.x) + { + int relC = inIdx; + int relInX = relC / loopMinor; + int relInY = relInX / tileInW; + relC -= relInX * loopMinor; + relInX -= relInY * tileInW; + int c = baseC + relC; + int inX = tileInX + relInX; + int inY = tileInY + relInY; + scalar_t v = 0; + if (inX >= 0 & inY >= 0 & inX < p.inSize.x & inY < p.inSize.y & c < p.inSize.z) + v = (scalar_t)((const T*)p.x)[inX * p.inStride.x + inY * p.inStride.y + c * p.inStride.z + n * p.inStride.w]; + sx[relInY][relInX][relC] = v; + } + + // Loop over output pixels. + __syncthreads(); + for (int outIdx = threadIdx.x; outIdx < tileOutH * tileOutW * loopMinor; outIdx += blockDim.x) + { + int relC = outIdx; + int relOutX = relC / loopMinor; + int relOutY = relOutX / tileOutW; + relC -= relOutX * loopMinor; + relOutX -= relOutY * tileOutW; + int c = baseC + relC; + int outX = tileOutX + relOutX; + int outY = tileOutY + relOutY; + + // Setup receptive field. + int midX = tileMidX + relOutX * downx; + int midY = tileMidY + relOutY * downy; + int inX = floor_div(midX, upx); + int inY = floor_div(midY, upy); + int relInX = inX - tileInX; + int relInY = inY - tileInY; + int filterX = (inX + 1) * upx - midX - 1; // flipped + int filterY = (inY + 1) * upy - midY - 1; // flipped + + // Inner loop. + if (outX < p.outSize.x & outY < p.outSize.y & c < p.outSize.z) + { + scalar_t v = 0; + #pragma unroll + for (int y = 0; y < filterH / upy; y++) + #pragma unroll + for (int x = 0; x < filterW / upx; x++) + v += sx[relInY + y][relInX + x][relC] * sf[filterY + y * upy][filterX + x * upx]; + v *= p.gain; + ((T*)p.y)[outX * p.outStride.x + outY * p.outStride.y + c * p.outStride.z + n * p.outStride.w] = (T)v; + } + } + } + } +} + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p) +{ + int s = p.inStride.z, fx = p.filterSize.x, fy = p.filterSize.y; + upfirdn2d_kernel_spec spec = {(void*)upfirdn2d_kernel_large, -1,-1,1, 4}; // contiguous + if (s == 1) spec = {(void*)upfirdn2d_kernel_large, -1,-1,4, 1}; // channels_last + + // No up/downsampling. + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 7 && fy <= 7 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 5 && fy <= 5 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 3 && fy <= 3 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 24 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 16 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 8 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 7 && fy <= 7 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 5 && fy <= 5 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 3 && fy <= 3 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 24 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 16 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 8 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + } + + // 2x upsampling. + if (p.up.x == 2 && p.up.y == 2 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + } + if (p.up.x == 2 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + } + if (p.up.x == 1 && p.up.y == 2 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + } + + // 2x downsampling. + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 2 && p.down.y == 2) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 16,16,1, 1}; + if (s == 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 16,16,1, 1}; + if (s == 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + if (s == 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + if (s == 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + if (s == 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + } + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 2 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,8,1, 1}; + if (s != 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,8,1, 1}; + if (s != 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,8,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,1,8, 1}; + if (s == 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,1,8, 1}; + if (s == 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,1,8, 1}; + } + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 2) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 1,64,8, 1}; + if (s == 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 1,64,8, 1}; + if (s == 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 1,64,8, 1}; + } + + // 4x upsampling. + if (p.up.x == 4 && p.up.y == 4 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 48 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 32 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + // channels_last + if (s == 1 && fx <= 48 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 32 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + } + if (p.up.x == 4 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + // channels_last + if (s == 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + } + if (p.up.x == 1 && p.up.y == 4 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + } + + // 4x downsampling (inefficient). + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 4 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + // channels_last + if (s == 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,1,8, 1}; + if (s == 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,1,8, 1}; + } + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 4) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 1,32,8, 1}; + if (s == 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 1,32,8, 1}; + } + return spec; +} + +//------------------------------------------------------------------------ +// Template specializations. + +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel (const upfirdn2d_kernel_params& p); +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel (const upfirdn2d_kernel_params& p); +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.h b/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.h new file mode 100644 index 0000000..d5de893 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include + +//------------------------------------------------------------------------ +// CUDA kernel parameters. + +struct upfirdn2d_kernel_params +{ + const void* x; + const float* f; + void* y; + + int2 up; + int2 down; + int2 pad0; + int flip; + float gain; + + int4 inSize; // [width, height, channel, batch] + int4 inStride; + int2 filterSize; // [width, height] + int2 filterStride; + int4 outSize; // [width, height, channel, batch] + int4 outStride; + int sizeMinor; + int sizeMajor; + + int loopMinor; + int loopMajor; + int loopX; + int launchMinor; + int launchMajor; +}; + +//------------------------------------------------------------------------ +// CUDA kernel specialization. + +struct upfirdn2d_kernel_spec +{ + void* kernel; + int tileOutW; + int tileOutH; + int loopMinor; + int loopX; +}; + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.py b/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.py new file mode 100644 index 0000000..5d63471 --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/ops/upfirdn2d.py @@ -0,0 +1,391 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Custom PyTorch ops for efficient resampling of 2D images.""" + +import os +import numpy as np +import torch + +from .. import custom_ops +from .. import misc +from . import conv2d_gradfix + +#---------------------------------------------------------------------------- + +_plugin = None + +def _init(): + global _plugin + if _plugin is None: + _plugin = custom_ops.get_plugin( + module_name='upfirdn2d_plugin', + sources=['upfirdn2d.cpp', 'upfirdn2d.cu'], + headers=['upfirdn2d.h'], + source_dir=os.path.dirname(__file__), + extra_cuda_cflags=['--use_fast_math'], + ) + return True + +def _parse_scaling(scaling): + if isinstance(scaling, int): + scaling = [scaling, scaling] + assert isinstance(scaling, (list, tuple)) + assert all(isinstance(x, int) for x in scaling) + sx, sy = scaling + assert sx >= 1 and sy >= 1 + return sx, sy + +def _parse_padding(padding): + if isinstance(padding, int): + padding = [padding, padding] + assert isinstance(padding, (list, tuple)) + assert all(isinstance(x, int) for x in padding) + if len(padding) == 2: + padx, pady = padding + padding = [padx, padx, pady, pady] + padx0, padx1, pady0, pady1 = padding + return padx0, padx1, pady0, pady1 + +def _get_filter_size(f): + if f is None: + return 1, 1 + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + fw = f.shape[-1] + fh = f.shape[0] + with misc.suppress_tracer_warnings(): + fw = int(fw) + fh = int(fh) + misc.assert_shape(f, [fh, fw][:f.ndim]) + assert fw >= 1 and fh >= 1 + return fw, fh + +#---------------------------------------------------------------------------- + +def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None): + r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`. + + Args: + f: Torch tensor, numpy array, or python list of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), + `[]` (impulse), or + `None` (identity). + device: Result device (default: cpu). + normalize: Normalize the filter so that it retains the magnitude + for constant input signal (DC)? (default: True). + flip_filter: Flip the filter? (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + separable: Return a separable filter? (default: select automatically). + + Returns: + Float32 tensor of the shape + `[filter_height, filter_width]` (non-separable) or + `[filter_taps]` (separable). + """ + # Validate. + if f is None: + f = 1 + f = torch.as_tensor(f, dtype=torch.float32) + assert f.ndim in [0, 1, 2] + assert f.numel() > 0 + if f.ndim == 0: + f = f[np.newaxis] + + # Separable? + if separable is None: + separable = (f.ndim == 1 and f.numel() >= 8) + if f.ndim == 1 and not separable: + f = f.ger(f) + assert f.ndim == (1 if separable else 2) + + # Apply normalize, flip, gain, and device. + if normalize: + f /= f.sum() + if flip_filter: + f = f.flip(list(range(f.ndim))) + f = f * (gain ** (f.ndim / 2)) + f = f.to(device=device) + return f + +#---------------------------------------------------------------------------- + +def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Pad, upsample, filter, and downsample a batch of 2D images. + + Performs the following sequence of operations for each channel: + + 1. Upsample the image by inserting N-1 zeros after each pixel (`up`). + + 2. Pad the image with the specified number of zeros on each side (`padding`). + Negative padding corresponds to cropping the image. + + 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it + so that the footprint of all output pixels lies within the input image. + + 4. Downsample the image by keeping every Nth pixel (`down`). + + This sequence of operations bears close resemblance to scipy.signal.upfirdn(). + The fused op is considerably more efficient than performing the same calculation + using standard PyTorch ops. It supports gradients of arbitrary order. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + up: Integer upsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + down: Integer downsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the upsampled image. Can be a single number + or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + assert isinstance(x, torch.Tensor) + assert impl in ['ref', 'cuda'] + if impl == 'cuda' and x.device.type == 'cuda' and _init(): + return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f) + return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1): + """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops. + """ + # Validate arguments. + assert isinstance(x, torch.Tensor) and x.ndim == 4 + if f is None: + f = torch.ones([1, 1], dtype=torch.float32, device=x.device) + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + assert f.dtype == torch.float32 and not f.requires_grad + batch_size, num_channels, in_height, in_width = x.shape + upx, upy = _parse_scaling(up) + downx, downy = _parse_scaling(down) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + + # Check that upsampled buffer is not smaller than the filter. + upW = in_width * upx + padx0 + padx1 + upH = in_height * upy + pady0 + pady1 + assert upW >= f.shape[-1] and upH >= f.shape[0] + + # Upsample by inserting zeros. + x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1]) + x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1]) + x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx]) + + # Pad or crop. + x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]) + x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)] + + # Setup filter. + f = f * (gain ** (f.ndim / 2)) + f = f.to(x.dtype) + if not flip_filter: + f = f.flip(list(range(f.ndim))) + + # Convolve with the filter. + f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim) + if f.ndim == 4: + x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels) + else: + x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels) + x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels) + + # Downsample by throwing away pixels. + x = x[:, :, ::downy, ::downx] + return x + +#---------------------------------------------------------------------------- + +_upfirdn2d_cuda_cache = dict() + +def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1): + """Fast CUDA implementation of `upfirdn2d()` using custom ops. + """ + # Parse arguments. + upx, upy = _parse_scaling(up) + downx, downy = _parse_scaling(down) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + + # Lookup from cache. + key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) + if key in _upfirdn2d_cuda_cache: + return _upfirdn2d_cuda_cache[key] + + # Forward op. + class Upfirdn2dCuda(torch.autograd.Function): + @staticmethod + def forward(ctx, x, f): # pylint: disable=arguments-differ + assert isinstance(x, torch.Tensor) and x.ndim == 4 + if f is None: + f = torch.ones([1, 1], dtype=torch.float32, device=x.device) + if f.ndim == 1 and f.shape[0] == 1: + f = f.square().unsqueeze(0) # Convert separable-1 into full-1x1. + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + y = x + if f.ndim == 2: + y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) + else: + y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, 1.0) + y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, gain) + ctx.save_for_backward(f) + ctx.x_shape = x.shape + return y + + @staticmethod + def backward(ctx, dy): # pylint: disable=arguments-differ + f, = ctx.saved_tensors + _, _, ih, iw = ctx.x_shape + _, _, oh, ow = dy.shape + fw, fh = _get_filter_size(f) + p = [ + fw - padx0 - 1, + iw * upx - ow * downx + padx0 - upx + 1, + fh - pady0 - 1, + ih * upy - oh * downy + pady0 - upy + 1, + ] + dx = None + df = None + + if ctx.needs_input_grad[0]: + dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f) + + assert not ctx.needs_input_grad[1] + return dx, df + + # Add to cache. + _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda + return Upfirdn2dCuda + +#---------------------------------------------------------------------------- + +def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Filter a batch of 2D images using the given 2D FIR filter. + + By default, the result is padded so that its shape matches the input. + User-specified padding is applied on top of that, with negative values + indicating cropping. Pixels outside the image are assumed to be zero. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + padding: Padding with respect to the output. Can be a single number or a + list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + padx0, padx1, pady0, pady1 = _parse_padding(padding) + fw, fh = _get_filter_size(f) + p = [ + padx0 + fw // 2, + padx1 + (fw - 1) // 2, + pady0 + fh // 2, + pady1 + (fh - 1) // 2, + ] + return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) + +#---------------------------------------------------------------------------- + +def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Upsample a batch of 2D images using the given 2D FIR filter. + + By default, the result is padded so that its shape is a multiple of the input. + User-specified padding is applied on top of that, with negative values + indicating cropping. Pixels outside the image are assumed to be zero. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + up: Integer upsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the output. Can be a single number or a + list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + upx, upy = _parse_scaling(up) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + fw, fh = _get_filter_size(f) + p = [ + padx0 + (fw + upx - 1) // 2, + padx1 + (fw - upx) // 2, + pady0 + (fh + upy - 1) // 2, + pady1 + (fh - upy) // 2, + ] + return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl) + +#---------------------------------------------------------------------------- + +def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Downsample a batch of 2D images using the given 2D FIR filter. + + By default, the result is padded so that its shape is a fraction of the input. + User-specified padding is applied on top of that, with negative values + indicating cropping. Pixels outside the image are assumed to be zero. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + down: Integer downsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the input. Can be a single number or a + list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + downx, downy = _parse_scaling(down) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + fw, fh = _get_filter_size(f) + p = [ + padx0 + (fw - downx + 1) // 2, + padx1 + (fw - downx) // 2, + pady0 + (fh - downy + 1) // 2, + pady1 + (fh - downy) // 2, + ] + return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/torch_utils/persistence.py b/3DPortraitGAN_pyramid/torch_utils/persistence.py new file mode 100644 index 0000000..1abf9cb --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/persistence.py @@ -0,0 +1,253 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Facilities for pickling Python code alongside other data. + +The pickled code is automatically imported into a separate Python module +during unpickling. This way, any previously exported pickles will remain +usable even if the original code is no longer available, or if the current +version of the code is not consistent with what was originally pickled.""" + +import sys +import pickle +import io +import inspect +import copy +import uuid +import types +import dnnlib + +#---------------------------------------------------------------------------- + +_version = 6 # internal version number +_decorators = set() # {decorator_class, ...} +_import_hooks = [] # [hook_function, ...] +_module_to_src_dict = dict() # {module: src, ...} +_src_to_module_dict = dict() # {src: module, ...} + +#---------------------------------------------------------------------------- + +def persistent_class(orig_class): + r"""Class decorator that extends a given class to save its source code + when pickled. + + Example: + + from torch_utils import persistence + + @persistence.persistent_class + class MyNetwork(torch.nn.Module): + def __init__(self, num_inputs, num_outputs): + super().__init__() + self.fc = MyLayer(num_inputs, num_outputs) + ... + + @persistence.persistent_class + class MyLayer(torch.nn.Module): + ... + + When pickled, any instance of `MyNetwork` and `MyLayer` will save its + source code alongside other internal state (e.g., parameters, buffers, + and submodules). This way, any previously exported pickle will remain + usable even if the class definitions have been modified or are no + longer available. + + The decorator saves the source code of the entire Python module + containing the decorated class. It does *not* save the source code of + any imported modules. Thus, the imported modules must be available + during unpickling, also including `torch_utils.persistence` itself. + + It is ok to call functions defined in the same module from the + decorated class. However, if the decorated class depends on other + classes defined in the same module, they must be decorated as well. + This is illustrated in the above example in the case of `MyLayer`. + + It is also possible to employ the decorator just-in-time before + calling the constructor. For example: + + cls = MyLayer + if want_to_make_it_persistent: + cls = persistence.persistent_class(cls) + layer = cls(num_inputs, num_outputs) + + As an additional feature, the decorator also keeps track of the + arguments that were used to construct each instance of the decorated + class. The arguments can be queried via `obj.init_args` and + `obj.init_kwargs`, and they are automatically pickled alongside other + object state. A typical use case is to first unpickle a previous + instance of a persistent class, and then upgrade it to use the latest + version of the source code: + + with open('old_pickle.pkl', 'rb') as f: + old_net = pickle.load(f) + new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs) + misc.copy_params_and_buffers(old_net, new_net, require_all=True) + """ + assert isinstance(orig_class, type) + if is_persistent(orig_class): + return orig_class + + assert orig_class.__module__ in sys.modules + orig_module = sys.modules[orig_class.__module__] + orig_module_src = _module_to_src(orig_module) + + class Decorator(orig_class): + _orig_module_src = orig_module_src + _orig_class_name = orig_class.__name__ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._init_args = copy.deepcopy(args) + self._init_kwargs = copy.deepcopy(kwargs) + assert orig_class.__name__ in orig_module.__dict__ + _check_pickleable(self.__reduce__()) + + @property + def init_args(self): + return copy.deepcopy(self._init_args) + + @property + def init_kwargs(self): + return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs)) + + def __reduce__(self): + fields = list(super().__reduce__()) + fields += [None] * max(3 - len(fields), 0) + if fields[0] is not _reconstruct_persistent_obj: + meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2]) + fields[0] = _reconstruct_persistent_obj # reconstruct func + fields[1] = (meta,) # reconstruct args + fields[2] = None # state dict + return tuple(fields) + + Decorator.__name__ = orig_class.__name__ + _decorators.add(Decorator) + return Decorator + +#---------------------------------------------------------------------------- + +def is_persistent(obj): + r"""Test whether the given object or class is persistent, i.e., + whether it will save its source code when pickled. + """ + try: + if obj in _decorators: + return True + except TypeError: + pass + return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck + +#---------------------------------------------------------------------------- + +def import_hook(hook): + r"""Register an import hook that is called whenever a persistent object + is being unpickled. A typical use case is to patch the pickled source + code to avoid errors and inconsistencies when the API of some imported + module has changed. + + The hook should have the following signature: + + hook(meta) -> modified meta + + `meta` is an instance of `dnnlib.EasyDict` with the following fields: + + type: Type of the persistent object, e.g. `'class'`. + version: Internal version number of `torch_utils.persistence`. + module_src Original source code of the Python module. + class_name: Class name in the original Python module. + state: Internal state of the object. + + Example: + + @persistence.import_hook + def wreck_my_network(meta): + if meta.class_name == 'MyNetwork': + print('MyNetwork is being imported. I will wreck it!') + meta.module_src = meta.module_src.replace("True", "False") + return meta + """ + assert callable(hook) + _import_hooks.append(hook) + +#---------------------------------------------------------------------------- + +def _reconstruct_persistent_obj(meta): + r"""Hook that is called internally by the `pickle` module to unpickle + a persistent object. + """ + meta = dnnlib.EasyDict(meta) + meta.state = dnnlib.EasyDict(meta.state) + for hook in _import_hooks: + meta = hook(meta) + assert meta is not None + + assert meta.version == _version + module = _src_to_module(meta.module_src) + + assert meta.type == 'class' + orig_class = module.__dict__[meta.class_name] + decorator_class = persistent_class(orig_class) + obj = decorator_class.__new__(decorator_class) + + setstate = getattr(obj, '__setstate__', None) + if callable(setstate): + setstate(meta.state) # pylint: disable=not-callable + else: + obj.__dict__.update(meta.state) + return obj + +#---------------------------------------------------------------------------- + +def _module_to_src(module): + r"""Query the source code of a given Python module. + """ + src = _module_to_src_dict.get(module, None) + if src is None: + src = inspect.getsource(module) + _module_to_src_dict[module] = src + _src_to_module_dict[src] = module + return src + +def _src_to_module(src): + r"""Get or create a Python module for the given source code. + """ + module = _src_to_module_dict.get(src, None) + if module is None: + module_name = "_imported_module_" + uuid.uuid4().hex + module = types.ModuleType(module_name) + sys.modules[module_name] = module + _module_to_src_dict[module] = src + _src_to_module_dict[src] = module + exec(src, module.__dict__) # pylint: disable=exec-used + return module + +#---------------------------------------------------------------------------- + +def _check_pickleable(obj): + r"""Check that the given object is pickleable, raising an exception if + it is not. This function is expected to be considerably more efficient + than actually pickling the object. + """ + def recurse(obj): + if isinstance(obj, (list, tuple, set)): + return [recurse(x) for x in obj] + if isinstance(obj, dict): + return [[recurse(x), recurse(y)] for x, y in obj.items()] + if isinstance(obj, (str, int, float, bool, bytes, bytearray)): + return None # Python primitive types are pickleable. + if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor', 'torch.nn.parameter.Parameter']: + return None # NumPy arrays and PyTorch tensors are pickleable. + if is_persistent(obj): + return None # Persistent objects are pickleable, by virtue of the constructor check. + return obj + with io.BytesIO() as f: + pickle.dump(recurse(obj), f) + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/torch_utils/training_stats.py b/3DPortraitGAN_pyramid/torch_utils/training_stats.py new file mode 100644 index 0000000..636dd7f --- /dev/null +++ b/3DPortraitGAN_pyramid/torch_utils/training_stats.py @@ -0,0 +1,270 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Facilities for reporting and collecting training statistics across +multiple processes and devices. The interface is designed to minimize +synchronization overhead as well as the amount of boilerplate in user +code.""" + +import re +import numpy as np +import torch +import dnnlib + +from . import misc + +#---------------------------------------------------------------------------- + +_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares] +_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction. +_counter_dtype = torch.float64 # Data type to use for the internal counters. +_rank = 0 # Rank of the current process. +_sync_device = None # Device to use for multiprocess communication. None = single-process. +_sync_called = False # Has _sync() been called yet? +_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor +_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor + +#---------------------------------------------------------------------------- + +def init_multiprocessing(rank, sync_device): + r"""Initializes `torch_utils.training_stats` for collecting statistics + across multiple processes. + + This function must be called after + `torch.distributed.init_process_group()` and before `Collector.update()`. + The call is not necessary if multi-process collection is not needed. + + Args: + rank: Rank of the current process. + sync_device: PyTorch device to use for inter-process + communication, or None to disable multi-process + collection. Typically `torch.device('cuda', rank)`. + """ + global _rank, _sync_device + assert not _sync_called + _rank = rank + _sync_device = sync_device + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def report(name, value): + r"""Broadcasts the given set of scalars to all interested instances of + `Collector`, across device and process boundaries. + + This function is expected to be extremely cheap and can be safely + called from anywhere in the training loop, loss function, or inside a + `torch.nn.Module`. + + Warning: The current implementation expects the set of unique names to + be consistent across processes. Please make sure that `report()` is + called at least once for each unique name by each process, and in the + same order. If a given process has no scalars to broadcast, it can do + `report(name, [])` (empty list). + + Args: + name: Arbitrary string specifying the name of the statistic. + Averages are accumulated separately for each unique name. + value: Arbitrary set of scalars. Can be a list, tuple, + NumPy array, PyTorch tensor, or Python scalar. + + Returns: + The same `value` that was passed in. + """ + if name not in _counters: + _counters[name] = dict() + + elems = torch.as_tensor(value) + if elems.numel() == 0: + return value + + elems = elems.detach().flatten().to(_reduce_dtype) + moments = torch.stack([ + torch.ones_like(elems).sum(), + elems.sum(), + elems.square().sum(), + ]) + assert moments.ndim == 1 and moments.shape[0] == _num_moments + moments = moments.to(_counter_dtype) + + device = moments.device + if device not in _counters[name]: + _counters[name][device] = torch.zeros_like(moments) + _counters[name][device].add_(moments) + return value + +#---------------------------------------------------------------------------- + +def report0(name, value): + r"""Broadcasts the given set of scalars by the first process (`rank = 0`), + but ignores any scalars provided by the other processes. + See `report()` for further details. + """ + report(name, value if _rank == 0 else []) + return value + +#---------------------------------------------------------------------------- + +class Collector: + r"""Collects the scalars broadcasted by `report()` and `report0()` and + computes their long-term averages (mean and standard deviation) over + user-defined periods of time. + + The averages are first collected into internal counters that are not + directly visible to the user. They are then copied to the user-visible + state as a result of calling `update()` and can then be queried using + `mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the + internal counters for the next round, so that the user-visible state + effectively reflects averages collected between the last two calls to + `update()`. + + Args: + regex: Regular expression defining which statistics to + collect. The default is to collect everything. + keep_previous: Whether to retain the previous averages if no + scalars were collected on a given round + (default: True). + """ + def __init__(self, regex='.*', keep_previous=True): + self._regex = re.compile(regex) + self._keep_previous = keep_previous + self._cumulative = dict() + self._moments = dict() + self.update() + self._moments.clear() + + def names(self): + r"""Returns the names of all statistics broadcasted so far that + match the regular expression specified at construction time. + """ + return [name for name in _counters if self._regex.fullmatch(name)] + + def update(self): + r"""Copies current values of the internal counters to the + user-visible state and resets them for the next round. + + If `keep_previous=True` was specified at construction time, the + operation is skipped for statistics that have received no scalars + since the last update, retaining their previous averages. + + This method performs a number of GPU-to-CPU transfers and one + `torch.distributed.all_reduce()`. It is intended to be called + periodically in the main training loop, typically once every + N training steps. + """ + if not self._keep_previous: + self._moments.clear() + for name, cumulative in _sync(self.names()): + if name not in self._cumulative: + self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype) + delta = cumulative - self._cumulative[name] + self._cumulative[name].copy_(cumulative) + if float(delta[0]) != 0: + self._moments[name] = delta + + def _get_delta(self, name): + r"""Returns the raw moments that were accumulated for the given + statistic between the last two calls to `update()`, or zero if + no scalars were collected. + """ + assert self._regex.fullmatch(name) + if name not in self._moments: + self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype) + return self._moments[name] + + def num(self, name): + r"""Returns the number of scalars that were accumulated for the given + statistic between the last two calls to `update()`, or zero if + no scalars were collected. + """ + delta = self._get_delta(name) + return int(delta[0]) + + def mean(self, name): + r"""Returns the mean of the scalars that were accumulated for the + given statistic between the last two calls to `update()`, or NaN if + no scalars were collected. + """ + delta = self._get_delta(name) + if int(delta[0]) == 0: + return float('nan') + return float(delta[1] / delta[0]) + + def std(self, name): + r"""Returns the standard deviation of the scalars that were + accumulated for the given statistic between the last two calls to + `update()`, or NaN if no scalars were collected. + """ + delta = self._get_delta(name) + if int(delta[0]) == 0 or not np.isfinite(float(delta[1])): + return float('nan') + if int(delta[0]) == 1: + return float(0) + mean = float(delta[1] / delta[0]) + raw_var = float(delta[2] / delta[0]) + return np.sqrt(max(raw_var - np.square(mean), 0)) + + def as_dict(self): + r"""Returns the averages accumulated between the last two calls to + `update()` as an `dnnlib.EasyDict`. The contents are as follows: + + dnnlib.EasyDict( + NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT), + ... + ) + """ + stats = dnnlib.EasyDict() + for name in self.names(): + stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name)) + return stats + + def __getitem__(self, name): + r"""Convenience getter. + `collector[name]` is a synonym for `collector.mean(name)`. + """ + return self.mean(name) + +#---------------------------------------------------------------------------- + +def _sync(names): + r"""Synchronize the global cumulative counters across devices and + processes. Called internally by `Collector.update()`. + """ + if len(names) == 0: + return [] + global _sync_called + _sync_called = True + + # Collect deltas within current rank. + deltas = [] + device = _sync_device if _sync_device is not None else torch.device('cpu') + for name in names: + delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device) + for counter in _counters[name].values(): + delta.add_(counter.to(device)) + counter.copy_(torch.zeros_like(counter)) + deltas.append(delta) + deltas = torch.stack(deltas) + + # Sum deltas across ranks. + if _sync_device is not None: + torch.distributed.all_reduce(deltas) + + # Update cumulative values. + deltas = deltas.cpu() + for idx, name in enumerate(names): + if name not in _cumulative: + _cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype) + _cumulative[name].add_(deltas[idx]) + + # Return name-value pairs. + return [(name, _cumulative[name]) for name in names] + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/train.py b/3DPortraitGAN_pyramid/train.py new file mode 100644 index 0000000..d06d835 --- /dev/null +++ b/3DPortraitGAN_pyramid/train.py @@ -0,0 +1,501 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Train a GAN using the techniques described in the paper +"Efficient Geometry-aware 3D Generative Adversarial Networks." + +Code adapted from +"Alias-Free Generative Adversarial Networks".""" + +import os +import click +import re +import json +import tempfile +import torch + +import dnnlib +from training import training_loop +from metrics import metric_main +from torch_utils import training_stats +from torch_utils import custom_ops + +#---------------------------------------------------------------------------- + +def subprocess_fn(rank, c, temp_dir): + dnnlib.util.Logger(file_name=os.path.join(c.run_dir, 'log.txt'), file_mode='a', should_flush=True) + + # Init torch.distributed. + if c.num_gpus > 1: + init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init')) + if os.name == 'nt': + init_method = 'file:///' + init_file.replace('\\', '/') + torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=c.num_gpus) + else: + init_method = f'file://{init_file}' + torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=c.num_gpus) + + # Init torch_utils. + sync_device = torch.device('cuda', rank) if c.num_gpus > 1 else None + training_stats.init_multiprocessing(rank=rank, sync_device=sync_device) + if rank != 0: + custom_ops.verbosity = 'none' + + # Execute training loop. + training_loop.training_loop(rank=rank, **c) + +#---------------------------------------------------------------------------- + +def launch_training(c, desc, outdir, dry_run): + dnnlib.util.Logger(should_flush=True) + + # Pick output directory. + prev_run_dirs = [] + if os.path.isdir(outdir): + prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))] + prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs] + prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None] + cur_run_id = max(prev_run_ids, default=-1) + 1 + c.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{desc}') + assert not os.path.exists(c.run_dir) + + # Print options. + print() + print('Training options:') + print(json.dumps(c, indent=2)) + print() + print(f'Output directory: {c.run_dir}') + print(f'Number of GPUs: {c.num_gpus}') + print(f'Batch size: {c.batch_size} images') + print(f'Training duration: {c.total_kimg} kimg') + print(f'Dataset path (img): {c.training_set_kwargs.img_path}') + print(f'Dataset path (seg): {c.training_set_kwargs.seg_path}') + print(f'Dataset size: {c.training_set_kwargs.max_size} images') + print(f'Dataset resolution: {c.training_set_kwargs.resolution}') + print(f'Dataset labels: {c.training_set_kwargs.use_labels}') + print(f'Dataset x-flips: {c.training_set_kwargs.xflip}') + print() + + # Dry run? + if dry_run: + print('Dry run; exiting.') + return + + # Create output directory. + print('Creating output directory...') + os.makedirs(c.run_dir) + with open(os.path.join(c.run_dir, 'training_options.json'), 'wt') as f: + json.dump(c, f, indent=2) + + # Launch processes. + print('Launching processes...') + torch.multiprocessing.set_start_method('spawn') + with tempfile.TemporaryDirectory() as temp_dir: + if c.num_gpus == 1: + subprocess_fn(rank=0, c=c, temp_dir=temp_dir) + else: + torch.multiprocessing.spawn(fn=subprocess_fn, args=(c, temp_dir), nprocs=c.num_gpus) + +#---------------------------------------------------------------------------- + +def init_dataset_kwargs(data,seg_data, data_rebalance,data_rebalance_idx_file,back_repeat): + try: + dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.MaskLabeledDataset', + img_path=data, + seg_path = seg_data, + back_repeat = back_repeat, + use_labels=True, max_size=None, xflip=True, + data_rebalance=data_rebalance,data_rebalance_idx_file=data_rebalance_idx_file) + dataset_obj = dnnlib.util.construct_class_by_name(**dataset_kwargs) # Subclass of training.dataset.Dataset. + dataset_kwargs.resolution = dataset_obj.resolution # Be explicit about resolution. + dataset_kwargs.use_labels = dataset_obj.has_labels # Be explicit about labels. + dataset_kwargs.max_size = len(dataset_obj) # Be explicit about dataset size. + return dataset_kwargs, dataset_obj.name + except IOError as err: + raise click.ClickException(f'--data: {err}') + +#---------------------------------------------------------------------------- + +def parse_comma_separated_list(s): + if isinstance(s, list): + return s + if s is None or s.lower() == 'none' or s == '': + return [] + return s.split(',') + +#---------------------------------------------------------------------------- + +@click.command() + +# Required. +@click.option('--outdir', help='Where to save the results', metavar='DIR', required=True) +@click.option('--cfg', help='Base configuration', type=str, required=True) +@click.option('--data', help='Training data', metavar='[ZIP|DIR]', type=str, required=True) + +@click.option('--gpus', help='Number of GPUs to use', metavar='INT', type=click.IntRange(min=1), required=True) +@click.option('--batch', help='Total batch size', metavar='INT', type=click.IntRange(min=1), required=True) +@click.option('--gamma', help='R1 regularization weight', metavar='FLOAT', type=click.FloatRange(min=0), required=True) + +# Optional features. +@click.option('--cond', help='Train conditional model', metavar='BOOL', type=bool, default=True, show_default=True) +@click.option('--mirror', help='Enable dataset x-flips', metavar='BOOL', type=bool, default=False, show_default=True) +@click.option('--aug', help='Augmentation mode', type=click.Choice(['noaug', 'ada', 'fixed']), default='noaug', show_default=True) +@click.option('--resume', help='Resume from given network pickle', metavar='[PATH|URL]', type=str) +@click.option('--resume_kimg', help='Resume from given kimg', metavar='INT', type=int, default=0) +@click.option('--freezed', help='Freeze first layers of D', metavar='INT', type=click.IntRange(min=0), default=0, show_default=True) +@click.option('--data_rebalance', help='Enable dataset rebalance', metavar='BOOL', type=bool, default=False, show_default=True) +@click.option('--data_rebalance_idx_file', help='Enable dataset rebalance', metavar='BOOL', type=str, required=False,default = None) + +# Misc hyperparameters. +@click.option('--p', help='Probability for --aug=fixed', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0.2, show_default=True) +@click.option('--target', help='Target value for --aug=ada', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0.6, show_default=True) +@click.option('--batch-gpu', help='Limit batch size per GPU', metavar='INT', type=click.IntRange(min=1)) +@click.option('--cbase', help='Capacity multiplier', metavar='INT', type=click.IntRange(min=1), default=18432, show_default=True) +@click.option('--cmax', help='Max. feature maps', metavar='INT', type=click.IntRange(min=1), default=144, show_default=True) +@click.option('--glr', help='G learning rate [default: varies]', metavar='FLOAT', type=click.FloatRange(min=0)) +@click.option('--dlr', help='D learning rate', metavar='FLOAT', type=click.FloatRange(min=0), default=0.002, show_default=True) +@click.option('--map-depth', help='Mapping network depth [default: varies]', metavar='INT', type=click.IntRange(min=1), default=2, show_default=True) +@click.option('--mbstd-group', help='Minibatch std group size', metavar='INT', type=click.IntRange(min=1), default=4, show_default=True) + +# Misc settings. +@click.option('--desc', help='String to include in result dir name', metavar='STR', type=str) +@click.option('--metrics', help='Quality metrics', metavar='[NAME|A,B,C|none]', type=parse_comma_separated_list, default='fid50k_full', show_default=True) +@click.option('--kimg', help='Total training duration', metavar='KIMG', type=click.IntRange(min=1), default=25000, show_default=True) +@click.option('--tick', help='How often to print progress', metavar='KIMG', type=click.IntRange(min=1), default=4, show_default=True) +@click.option('--snap', help='How often to save snapshots', metavar='TICKS', type=click.IntRange(min=1), default=50, show_default=True) +@click.option('--image-snap', help='How often to save snapshots', metavar='TICKS', type=click.IntRange(min=1), default=50, show_default=True) +@click.option('--seed', help='Random seed', metavar='INT', type=click.IntRange(min=0), default=0, show_default=True) +# @click.option('--fp32', help='Disable mixed-precision', metavar='BOOL', type=bool, default=False, show_default=True) +@click.option('--nobench', help='Disable cuDNN benchmarking', metavar='BOOL', type=bool, default=False, show_default=True) +@click.option('--workers', help='DataLoader worker processes', metavar='INT', type=click.IntRange(min=1), default=3, show_default=True) +@click.option('-n','--dry-run', help='Print training options and exit', is_flag=True) + +# @click.option('--sr_module', help='Superresolution module', metavar='STR', type=str, required=True) +@click.option('--neural_rendering_resolution_initial', help='Resolution to render at', metavar='INT', type=click.IntRange(min=1), default=64, required=False) +@click.option('--neural_rendering_resolution_final', help='Final resolution to render at, if blending', metavar='INT', type=click.IntRange(min=1), required=False, default=None) +@click.option('--neural_rendering_resolution_fade_kimg', help='Kimg to blend resolution over', metavar='INT', type=click.IntRange(min=0), required=False, default=1000, show_default=True) + +@click.option('--blur_fade_kimg', help='Blur over how many', metavar='INT', type=click.IntRange(min=1), required=False, default=200) +@click.option('--gen_pose_cond', help='If true, enable generator pose conditioning.', metavar='BOOL', type=bool, required=False, default=False) +@click.option('--c-scale', help='Scale factor for generator pose conditioning.', metavar='FLOAT', type=click.FloatRange(min=0), required=False, default=1) +@click.option('--c-noise', help='Add noise for generator pose conditioning.', metavar='FLOAT', type=click.FloatRange(min=0), required=False, default=0) +@click.option('--gpc_reg_prob', help='Strength of swapping regularization. None means no generator pose conditioning, i.e. condition with zeros.', metavar='FLOAT', type=click.FloatRange(min=0), required=False, default=0.5) +@click.option('--gpc_reg_fade_kimg', help='Length of swapping prob fade', metavar='INT', type=click.IntRange(min=0), required=False, default=1000) + + + + +@click.option('--disc_c_noise', help='Strength of discriminator pose conditioning regularization, in standard deviations.', metavar='FLOAT', type=click.FloatRange(min=0), required=False, default=0) +@click.option('--sr_noise_mode', help='Type of noise for superresolution', metavar='STR', type=click.Choice(['random', 'none']), required=False, default='none') +@click.option('--resume_blur', help='Enable to blur even on resume', metavar='BOOL', type=bool, required=False, default=False) +@click.option('--sr_num_fp16_res', help='Number of fp16 layers in superresolution', metavar='INT', type=click.IntRange(min=0), default=4, required=False, show_default=True) +@click.option('--g_num_fp16_res', help='Number of fp16 layers in generator', metavar='INT', type=click.IntRange(min=0), default=0, required=False, show_default=True) +@click.option('--d_num_fp16_res', help='Number of fp16 layers in discriminator', metavar='INT', type=click.IntRange(min=0), default=4, required=False, show_default=True) +@click.option('--sr_first_cutoff', help='First cutoff for AF superresolution', metavar='INT', type=click.IntRange(min=2), default=2, required=False, show_default=True) +@click.option('--sr_first_stopband', help='First cutoff for AF superresolution', metavar='FLOAT', type=click.FloatRange(min=2), default=2**2.1, required=False, show_default=True) +@click.option('--style_mixing_prob', help='Style-mixing regularization probability for training.', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0, required=False, show_default=True) +@click.option('--sr-module', help='Superresolution module override', metavar='STR', type=str, required=False, default=None) +@click.option('--density_reg', help='Density regularization strength.', metavar='FLOAT', type=click.FloatRange(min=0), default=0.25, required=False, show_default=True) +@click.option('--density_reg_every', help='lazy density reg', metavar='int', type=click.FloatRange(min=1), default=4, required=False, show_default=True) +@click.option('--density_reg_p_dist', help='density regularization strength.', metavar='FLOAT', type=click.FloatRange(min=0), default=0.004, required=False, show_default=True) +@click.option('--reg_type', help='Type of regularization', metavar='STR', type=click.Choice(['l1', 'l1-alt', 'monotonic-detach', 'monotonic-fixed', 'total-variation']), required=False, default='l1') +@click.option('--decoder_lr_mul', help='decoder learning rate multiplier.', metavar='FLOAT', type=click.FloatRange(min=0), default=1, required=False, show_default=True) + +@click.option('--thickness', help='sample thickness around head mesh', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0.25, show_default=True) # smpl head stride is ~ 0.2 + + + + +@click.option('--pose_loss_weight', help='sample thickness around head mesh', metavar='FLOAT', type=click.FloatRange(min=0), default=1.0, show_default=True) # smpl head stride is ~ 0.2 +@click.option('--input_pose_params_reg_loss_weight', help='sample thickness around head mesh', metavar='FLOAT', type=click.FloatRange(min=0), default=0, show_default=True) # smpl head stride is ~ 0.2 +@click.option('--input_pose_params_reg_loss_kimg', help='sample thickness around head mesh', metavar='INT', type=click.FloatRange(min=0), default=0, show_default=True) # smpl head stride is ~ 0.2 + + +@click.option('--explicitly_symmetry', help='Enable to blur even on resume', metavar='BOOL', type=bool, required=False, default=False) + + +@click.option('--train_g_pose_branch', help='Enable to blur even on resume', metavar='BOOL', type=bool, required=False, default=True) + +@click.option('--metric_pose_sample_mode', help='Type of metric_pose_sample ', metavar='STR', type=click.Choice(['D_predict', 'G_predict']), required=False, default='G_predict') + +# panohead +@click.option('--seg_channels', help='Channels of masks for discriminator.', metavar='INT', type=click.IntRange(min=1), default=1, required=False, show_default=True) +@click.option('--decoder_activation', help='Activation function for decoder.', metavar='STR', type=click.Choice(['sigmoid', 'lrelu', 'none']), default="sigmoid", required=False, show_default=True) +@click.option('--use_torgb_raw', help='Use ToRGB for raw image output.', metavar='BOOL', type=bool, default=False, required=False, show_default=True) +@click.option('--use_background', help='Use separate background generator.', metavar='BOOL', type=bool, default=True, required=False, show_default=True) +@click.option('--bcg_reg_prob', help='Swapping probability of bacgkround w code.', metavar='FLOAT', type=click.FloatRange(min=0), default=0, required=False, show_default=True) +@click.option('--seg_data', help='Training data', metavar='[ZIP|DIR]', type=str, required=True) +@click.option('--gamma_seg', help='R1 regularization weight', metavar='FLOAT', type=click.FloatRange(min=0), required=True) +@click.option('--density_noise_fade_kimg', help='Kimg to add density noise.', metavar='INT', type=click.IntRange(min=0), default=0, required=False, show_default=True) +@click.option('--triplane_depth', help='Grid depth of each of tri-plane', metavar='INT', type=click.IntRange(min=1), default=1, required=False, show_default=True) + +@click.option('--back_repeat', help='Repeat abs [max(90, min_yaw), max_yaw] images how many times', metavar='INT', type=click.IntRange(min=1), default=1, required=False, show_default=True) +@click.option('--radius_scale', help='radius scale ratio.', metavar='FLOAT', type=click.FloatRange(min=0.0), default=0.7) + + + +def main(**kwargs): + """Train a GAN using the techniques described in the paper + "Alias-Free Generative Adversarial Networks". + + Examples: + + \b + # Train StyleGAN3-T for AFHQv2 using 8 GPUs. + python train.py --outdir=~/training-runs --cfg=stylegan3-t --data=~/datasets/afhqv2-512x512.zip \\ + --gpus=8 --batch=32 --gamma=8.2 --mirror=1 + + \b + # Fine-tune StyleGAN3-R for MetFaces-U using 1 GPU, starting from the pre-trained FFHQ-U pickle. + python train.py --outdir=~/training-runs --cfg=stylegan3-r --data=~/datasets/metfacesu-1024x1024.zip \\ + --gpus=8 --batch=32 --gamma=6.6 --mirror=1 --kimg=5000 --snap=5 \\ + --resume=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-ffhqu-1024x1024.pkl + + \b + # Train StyleGAN2 for FFHQ at 1024x1024 resolution using 8 GPUs. + python train.py --outdir=~/training-runs --cfg=stylegan2 --data=~/datasets/ffhq-1024x1024.zip \\ + --gpus=8 --batch=32 --gamma=10 --mirror=1 --aug=noaug + """ + + # Initialize config. + print('>>>>>>>>>>>>>> kwargs:', kwargs) + opts = dnnlib.EasyDict(kwargs) # Command line arguments. + c = dnnlib.EasyDict() # Main config dict. + c.G_kwargs = dnnlib.EasyDict(class_name=None, z_dim=512, w_dim=512, mapping_kwargs=dnnlib.EasyDict()) + c.D_kwargs = dnnlib.EasyDict(class_name='training.networks_stylegan2.Discriminator', block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict()) + c.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', betas=[0,0.99], eps=1e-8) + c.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', betas=[0,0.99], eps=1e-8) + c.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.StyleGAN2Loss') + c.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, prefetch_factor=2) + + + if opts.data_rebalance: + assert opts.data_rebalance_idx_file is not None + + if opts.data_rebalance: + raise NotImplementedError('data_rebalance is not implemented yet') + + # Training set. + c.training_set_kwargs, dataset_name = init_dataset_kwargs(data=opts.data, seg_data =opts.seg_data, data_rebalance = opts.data_rebalance,data_rebalance_idx_file = opts.data_rebalance_idx_file,back_repeat = opts.back_repeat) + if opts.cond and not c.training_set_kwargs.use_labels: + raise click.ClickException('--cond=True requires labels specified in dataset.json') + c.training_set_kwargs.use_labels = opts.cond + c.training_set_kwargs.xflip = opts.mirror + c.training_set_kwargs.data_rebalance = opts.data_rebalance + if opts.data_rebalance: + c.training_set_kwargs.data_rebalance_idx_file = opts.data_rebalance_idx_file + + + # Hyperparameters & settings. + c.num_gpus = opts.gpus + c.batch_size = opts.batch + c.batch_gpu = opts.batch_gpu or opts.batch // opts.gpus + c.G_kwargs.channel_base = c.D_kwargs.channel_base = opts.cbase + c.G_kwargs.channel_max = c.D_kwargs.channel_max = opts.cmax + c.G_kwargs.mapping_kwargs.num_layers = opts.map_depth + c.G_kwargs.batch_size = c.batch_gpu + c.D_kwargs.block_kwargs.freeze_layers = opts.freezed + c.D_kwargs.epilogue_kwargs.mbstd_group_size = opts.mbstd_group + c.loss_kwargs.r1_gamma = opts.gamma + c.loss_kwargs.r1_gamma_seg = opts.gamma_seg + c.G_opt_kwargs.lr = (0.002 if opts.cfg == 'stylegan2' else 0.0025) if opts.glr is None else opts.glr + c.D_opt_kwargs.lr = opts.dlr + c.metrics = opts.metrics + c.total_kimg = opts.kimg + c.kimg_per_tick = opts.tick + c.network_snapshot_ticks = opts.snap + c.image_snapshot_ticks = opts.image_snap + c.random_seed = c.training_set_kwargs.random_seed = opts.seed + c.data_loader_kwargs.num_workers = opts.workers + + # Sanity checks. + if c.batch_size % c.num_gpus != 0: + raise click.ClickException('--batch must be a multiple of --gpus') + if c.batch_size % (c.num_gpus * c.batch_gpu) != 0: + raise click.ClickException('--batch must be a multiple of --gpus times --batch-gpu') + if c.batch_gpu < c.D_kwargs.epilogue_kwargs.mbstd_group_size: + raise click.ClickException('--batch-gpu cannot be smaller than --mbstd') + if any(not metric_main.is_valid_metric(metric) for metric in c.metrics): + raise click.ClickException('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics())) + + # Base configuration. + c.ema_kimg = c.batch_size * 10 / 32 + c.G_kwargs.class_name = 'training.smpl_triplane.TriPlaneGenerator' + c.D_kwargs.class_name = 'training.dual_discriminator.PoseShapeAwareDualDiscriminator' + + c.G_kwargs.fused_modconv_default = 'inference_only' # Speed up training by using regular convolutions instead of grouped convolutions. + c.loss_kwargs.filter_mode = 'antialiased' # Filter mode for raw images ['antialiased', 'none', float [0-1]] + c.D_kwargs.disc_c_noise = opts.disc_c_noise # Regularization for discriminator pose conditioning + + if c.training_set_kwargs.resolution == 512: + sr_module = 'training.superresolution.SuperresolutionHybrid8XDC' + elif c.training_set_kwargs.resolution == 256: + sr_module = 'training.superresolution.SuperresolutionHybrid4X' + elif c.training_set_kwargs.resolution == 128: + sr_module = 'training.superresolution.SuperresolutionHybrid2X' + else: + assert False, f"Unsupported resolution {c.training_set_kwargs.resolution}; make a new superresolution module" + + if opts.sr_module != None: + sr_module = opts.sr_module + + rendering_options = { + 'image_resolution': c.training_set_kwargs.resolution, + 'disparity_space_sampling': False, + 'clamp_mode': 'softplus', + 'superresolution_module': sr_module, + 'c_gen_conditioning_zero': not opts.gen_pose_cond, # if true, fill generator pose conditioning label with dummy zero vector + 'gpc_reg_prob': opts.gpc_reg_prob if opts.gen_pose_cond else None, + 'decoder_activation': opts.decoder_activation, # activation function for decoder + 'use_torgb_raw': opts.use_torgb_raw, # use ToRGB layer for raw image output + 'use_background': opts.use_background, # use separate background generator + 'triplane_depth': opts.triplane_depth, # grid depth of each of tri-plane + 'c_scale': opts.c_scale, # mutliplier for generator pose conditioning label + 'superresolution_noise_mode': opts.sr_noise_mode, # [random or none], whether to inject pixel noise into super-resolution layers + 'density_reg': opts.density_reg, # strength of density regularization + 'density_reg_p_dist': opts.density_reg_p_dist, # distance at which to sample perturbed points for density regularization + 'reg_type': opts.reg_type, # for experimenting with variations on density regularization + 'decoder_lr_mul': opts.decoder_lr_mul, # learning rate multiplier for decoder + 'sr_antialias': True, + 'radius_scale': opts.radius_scale, + + } + + # if opts.cfg == 'ffhq': + # rendering_options.update({ + # 'depth_resolution': 48, # number of uniform samples to take per ray. + # 'depth_resolution_importance': 48, # number of importance samples to take per ray. + # 'ray_start': 2.25, # near point along each ray to start taking samples. + # 'ray_end': 3.3, # far point along each ray to stop taking samples. + # 'box_warp': 1, # the side-length of the bounding box spanned by the tri-planes; box_warp=1 means [-0.5, -0.5, -0.5] -> [0.5, 0.5, 0.5]. + # 'avg_camera_radius': 2.7, # used only in the visualizer to specify camera orbit radius. + # 'avg_camera_pivot': [0, 0, 0.2], # used only in the visualizer to control center of camera rotation. + # }) + # elif opts.cfg == 'afhq': + # rendering_options.update({ + # 'depth_resolution': 48, + # 'depth_resolution_importance': 48, + # 'ray_start': 2.25, + # 'ray_end': 3.3, + # 'box_warp': 1, + # 'avg_camera_radius': 2.7, + # 'avg_camera_pivot': [0, 0, -0.06], + # }) + # elif opts.cfg == 'shapenet': + # rendering_options.update({ + # 'depth_resolution': 64, + # 'depth_resolution_importance': 64, + # 'ray_start': 0.1, + # 'ray_end': 2.6, + # 'box_warp': 1.6, + # 'white_back': True, + # 'avg_camera_radius': 1.7, + # 'avg_camera_pivot': [0, 0, 0], + # }) + # el + if opts.cfg == 'full-head': + rendering_options.update({ + 'depth_resolution': 48, # number of uniform samples to take per ray. + 'depth_resolution_importance': 48, # number of importance samples to take per ray. + # 'ray_start': 2.25 * opts.radius_scale, # near point along each ray to start taking samples. + # 'ray_end': 3.3 * opts.radius_scale, # far point along each ray to stop taking samples. + 'ray_start': 2.25 + (2.7-2.25) * (1- opts.radius_scale), # near point along each ray to start taking samples. + 'ray_end': (3.3-2.7) * opts.radius_scale + 2.7 , # far point along each ray to stop taking samples. + 'box_warp': 1* opts.radius_scale, + #'c_gen_conditioning_zero': True, # disable camera condition on mapping network + + }) + else: + assert False, "Need to specify config" + + + + if opts.density_reg > 0: + c.G_reg_interval = opts.density_reg_every + c.G_kwargs.rendering_kwargs = rendering_options + c.G_kwargs.num_fp16_res = 0 + c.loss_kwargs.blur_init_sigma = 10 # Blur the images seen by the discriminator. + c.loss_kwargs.blur_fade_kimg = c.batch_size * opts.blur_fade_kimg / 32 # Fade out the blur during the first N kimg. + + c.loss_kwargs.density_noise_fade_kimg = opts.density_noise_fade_kimg + c.loss_kwargs.gpc_reg_prob = opts.gpc_reg_prob if opts.gen_pose_cond else None + c.loss_kwargs.gpc_reg_fade_kimg = opts.gpc_reg_fade_kimg + c.loss_kwargs.bcg_reg_prob = opts.bcg_reg_prob + c.loss_kwargs.dual_discrimination = True + c.loss_kwargs.neural_rendering_resolution_initial = opts.neural_rendering_resolution_initial + c.loss_kwargs.neural_rendering_resolution_final = opts.neural_rendering_resolution_final + c.loss_kwargs.neural_rendering_resolution_fade_kimg = opts.neural_rendering_resolution_fade_kimg + c.G_kwargs.sr_num_fp16_res = opts.sr_num_fp16_res + + c.G_kwargs.sr_kwargs = dnnlib.EasyDict(channel_base=opts.cbase, channel_max=opts.cmax, fused_modconv_default='inference_only') + + c.G_kwargs.thickness = opts.thickness + c.loss_kwargs.style_mixing_prob = opts.style_mixing_prob + c.loss_kwargs.thickness = opts.thickness + + c.metric_pose_sample_mode = opts.metric_pose_sample_mode + + + c.loss_kwargs.pose_loss_weight = opts.pose_loss_weight + + c.loss_kwargs.input_pose_params_reg_loss_weight = opts.input_pose_params_reg_loss_weight + c.loss_kwargs.input_pose_params_reg_loss_kimg = opts.input_pose_params_reg_loss_kimg + + c.train_g_pose_branch = opts.train_g_pose_branch + + c.G_kwargs.explicitly_symmetry = opts.explicitly_symmetry + c.D_kwargs.explicitly_symmetry = opts.explicitly_symmetry + + + # Augmentation. + if opts.aug != 'noaug': + c.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1) + if opts.aug == 'ada': + c.ada_target = opts.target + if opts.aug == 'fixed': + c.augment_p = opts.p + + # Resume. + if opts.resume is not None: + c.resume_pkl = opts.resume + c.ada_kimg = 100 # Make ADA react faster at the beginning. + c.ema_rampup = None # Disable EMA rampup. + if not opts.resume_blur: + c.loss_kwargs.blur_init_sigma = 0 # Disable blur rampup. + c.loss_kwargs.gpc_reg_fade_kimg = 0 # Disable swapping rampup + c.resume_kimg = opts.resume_kimg + # Performance-related toggles. + # if opts.fp32: + # c.G_kwargs.num_fp16_res = c.D_kwargs.num_fp16_res = 0 + # c.G_kwargs.conv_clamp = c.D_kwargs.conv_clamp = None + c.G_kwargs.num_fp16_res = opts.g_num_fp16_res + c.G_kwargs.conv_clamp = 256 if opts.g_num_fp16_res > 0 else None + c.D_kwargs.num_fp16_res = opts.d_num_fp16_res + c.D_kwargs.conv_clamp = 256 if opts.d_num_fp16_res > 0 else None + + c.D_kwargs.seg_channels = opts.seg_channels + + if opts.nobench: + c.cudnn_benchmark = False + + # Description string. + desc = f'{opts.cfg:s}-{dataset_name:s}-gpus{c.num_gpus:d}-batch{c.batch_size:d}-gamma{c.loss_kwargs.r1_gamma}' + if opts.desc is not None: + desc += f'-{opts.desc}' + + # Launch. + launch_training(c=c, desc=desc, outdir=opts.outdir, dry_run=opts.dry_run) + +#---------------------------------------------------------------------------- + +if __name__ == "__main__": + main() # pylint: disable=no-value-for-parameter + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/training/__init__.py b/3DPortraitGAN_pyramid/training/__init__.py new file mode 100644 index 0000000..dfebd04 --- /dev/null +++ b/3DPortraitGAN_pyramid/training/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# empty diff --git a/3DPortraitGAN_pyramid/training/aligned_smpl.py b/3DPortraitGAN_pyramid/training/aligned_smpl.py new file mode 100644 index 0000000..c635560 --- /dev/null +++ b/3DPortraitGAN_pyramid/training/aligned_smpl.py @@ -0,0 +1,450 @@ + +import os.path as osp + +import numpy as np +import torch +from torch_utils import misc + +import trimesh +import pickle + + +import os +# os.environ["PYOPENGL_PLATFORM"] = "egl" +# check if on a Linux machine +if os.name == 'posix': # Linux + os.environ["PYOPENGL_PLATFORM"] = "osmesa" + +# os.environ["PYOPENGL_PLATFORM"] = "osmesa" +import pyrender + +class AlignedSMPL(torch.nn.Module): + def __init__(self, model,batch_size): + super().__init__() + self.batch_size = batch_size + smpl_joint_regressor = torch.from_numpy( + np.load('transfer_data/smpl_joint_regressor.npy')).float().cuda().contiguous() + self.register_buffer('smpl_joint_regressor', smpl_joint_regressor) + + self.model = model + faces = torch.from_numpy(self.model.faces.astype(np.int32)).cuda().long().contiguous() + self.register_buffer('faces', faces) + + + def set_model(self, model): + self.model = model + def set_batch_size(self, batch_size): + self.batch_size = batch_size + + def get_align_coordinate(self, vertices): + # 30 x 6890 + batch_size = vertices.shape[0] + smpl_joints = torch.bmm(self.smpl_joint_regressor[None, :, :].repeat(batch_size, 1, 1), vertices) + align_joint_coordinate = smpl_joints[:,12, None, :] # Neck + return align_joint_coordinate + + def render_mesh(self, img, mesh, face, cam_param, color=(1.0, 1.0, 0.9, 1.0), cam_pose=None): + # mesh + mesh = trimesh.Trimesh(mesh, face) + rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0]) + mesh.apply_transform(rot) + material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.0, alphaMode='OPAQUE', baseColorFactor=color) + mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=False) + scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], ambient_light=(0.3, 0.3, 0.3)) + scene.add(mesh, 'mesh') + + focal, princpt = cam_param['focal'], cam_param['princpt'] + camera = pyrender.IntrinsicsCamera(fx=focal[0], fy=focal[1], cx=princpt[0], cy=princpt[1]) + + if cam_pose is not None: + scene.add(camera, pose=cam_pose) + else: + scene.add(camera) + # scene.add(camera) + # print('camera pose in scene ', scene.get_pose(scene._main_camera_node)) + # renderer + renderer = pyrender.OffscreenRenderer(viewport_width=img.shape[1], viewport_height=img.shape[0], point_size=1.0) + + # light + light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=0.8) + # light_pose = np.eye(4) + # light_pose[:3, 3] = np.array([0, -1, 1]) + # scene.add(light, pose=light_pose) + # light_pose[:3, 3] = np.array([0, 1, 1]) + # scene.add(light, pose=light_pose) + # light_pose[:3, 3] = np.array([1, 1, 2]) + # scene.add(light, pose=light_pose) + + light_pose = np.eye(4) + light_pose[:3, 3] = np.array([0, 0, -1]) + scene.add(light, pose=light_pose) + + scene.add(light, pose=cam_pose) + scene.add(light, pose=cam_pose) + scene.add(light, pose=cam_pose) + light_pose[:3, 3] = np.array([1, 1, -4]) + scene.add(light, pose=light_pose) + light_pose[:3, 3] = np.array([-1, 0, -1]) + scene.add(light, pose=light_pose) + light_pose[:3, 3] = np.array([0.2469, 1.8828, -2.4473]) + scene.add(light, pose=light_pose) + + # render + rgb, depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA) + rgb = rgb[:, :, :3].astype(np.float32) + valid_mask = (depth > 0)[:, :, None] + + # save to image + img = rgb * valid_mask + img * (1 - valid_mask) + return img.astype(np.uint8) + + def render_depth(self, img, mesh, face, cam_param, color=(1.0, 1.0, 0.9, 1.0), cam_pose=None): + # mesh + mesh = trimesh.Trimesh(mesh, face) + rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0]) + mesh.apply_transform(rot) + material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.0, alphaMode='OPAQUE', baseColorFactor=color) + mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=False) + scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], ambient_light=(0.3, 0.3, 0.3)) + scene.add(mesh, 'mesh') + + focal, princpt = cam_param['focal'], cam_param['princpt'] + camera = pyrender.IntrinsicsCamera(fx=focal[0], fy=focal[1], cx=princpt[0], cy=princpt[1]) + + if cam_pose is not None: + scene.add(camera, pose=cam_pose) + else: + scene.add(camera) + # scene.add(camera) + # print('camera pose in scene ', scene.get_pose(scene._main_camera_node)) + # renderer + renderer = pyrender.OffscreenRenderer(viewport_width=img.shape[1], viewport_height=img.shape[0], point_size=1.0) + + # render + rgb, depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA) + #rgb = rgb[:, :, :3].astype(np.float32) + valid_mask = (depth > 0)[:, :, None] + + # save to image + depth = depth * valid_mask + img * (1 - valid_mask) + return depth.astype(np.uint8) + + + def get_projected_vertex(self, mesh, world2screen_matrix): + # mesh = np.concatenate([mesh, np.ones((mesh.shape[0], 1))], axis=1) # N x 4 + mesh = torch.cat([mesh, torch.ones((mesh.shape[0], 1)).to(mesh.device)], dim=1) # N x 4 + points_image = world2screen_matrix @ mesh.T # 4,N + points_image = points_image[:3, :] # 3,N + + points_on_input_image = points_image / points_image[2, :] + points_on_input_image = points_on_input_image[:2, :].T # 30,2 + + return points_on_input_image + + + def generate_shaped_smpl(self, betas, scale, transl): + if betas is not None: + raise NotImplementedError + else: + betas = None + if scale is not None: + raise NotImplementedError + misc.assert_shape(scale, [self.batch_size, 1]) + else: + scale = torch.ones([self.batch_size, 1]).to(self.model.shapedirs.device) + if transl is not None: + raise NotImplementedError + misc.assert_shape(transl, [self.batch_size, 3]) + else: + transl = torch.zeros([self.batch_size, 3]).to(self.model.shapedirs.device) + + # body_pose_fill = torch.zeros((self.batch_size, 23, 3)).to(self.model.shapedirs.device) + # # 15 16 for shoulder, we hope the Hands naturally sagging + # body_pose_fill[:, 15, :] = torch.tensor([0.0, 0.0, -np.pi / 2]).to(self.model.shapedirs.device) + + # body_pose_fill[:, 16, :] = torch.tensor([0.0, 0.0, np.pi / 2]).to(self.model.shapedirs.device) + # body_pose_fill = body_pose_fill.reshape(self.batch_size, -1) + # apply beta, alignment, translation and scale + shaed_output = self.model(betas=betas, + expression=None, + return_verts=True, + body_pose=None, + return_shaped=False + ) + vertices_no_pose = shaed_output.vertices + joints_no_pose = shaed_output.joints + + + align_joint_coordinate = self.get_align_coordinate(vertices_no_pose) # B,1,3 + vertices_no_pose -= align_joint_coordinate + joints_no_pose -= align_joint_coordinate + + vertices_no_pose += transl.view(self.batch_size, 1, 3) + joints_no_pose += transl.view(self.batch_size, 1, 3) + + vertices_no_pose *= scale.view(self.batch_size, 1, 1) + joints_no_pose *= scale.view(self.batch_size, 1, 1) + + nose_2d = joints_no_pose[:,86:90,:] # B, 4, 3 + eye_right_2d = joints_no_pose[:,95: 101,:] # B, 6, 3 + eye_left_2d = joints_no_pose[:,101: 107,:] # B, 6, 3 + + # points_3d = np.concatenate([nose_2d, eye_right_2d, eye_left_2d], axis=0) # 16 + face_points = torch.cat([nose_2d, eye_right_2d, eye_left_2d], dim=1) # B, 16, 3 + + #transformation_matrix = self.compute_transformation_matrix(face_points) + + res = { + 'vertices': vertices_no_pose, + 'align_joint_coordinate': align_joint_coordinate, + 'face_points': face_points, + } + return res + + def generate_posed_smpl(self, betas, scale, transl, body_pose, align_joint_coordinate): + batch_size = body_pose.shape[0] + if betas is not None: + raise NotImplementedError + else: + betas = None + if scale is not None: + raise NotImplementedError + misc.assert_shape(scale, [self.batch_size, 1]) + else: + scale = torch.ones([self.batch_size, 1]).to(self.model.shapedirs.device) + if transl is not None: + raise NotImplementedError + misc.assert_shape(transl, [self.batch_size, 3]) + else: + transl = torch.zeros([self.batch_size, 3]).to(self.model.shapedirs.device) + misc.assert_shape(body_pose, [self.batch_size, 6]) + + # apply beta, alignment, translation and scale + + # apply beta, pose, alignment, translation and scale + # mask pose except 11 and 14 + body_pose_fill = torch.zeros((self.batch_size, 23, 3)).to(self.model.shapedirs.device) + body_pose_fill[:, 11, :] = body_pose[:, :3] + body_pose_fill[:, 14, :] = body_pose[:, 3:] + + # # 15 16 for shoulder, we hope the Hands naturally sagging + # body_pose_fill[:, 15, :] = torch.tensor([0.0, 0.0, -np.pi / 2]).to(self.model.shapedirs.device) + # body_pose_fill[:, 16, :] = torch.tensor([0.0, 0.0, np.pi / 2]).to(self.model.shapedirs.device) + + + body_pose_fill = body_pose_fill.reshape(self.batch_size, -1) + + output = self.model(betas=betas, + expression=None, + return_verts=True, + body_pose=body_pose_fill, + return_shaped=True + ) + vertices = output.vertices + joints = output.joints + + # align vertices and joints + vertices -= align_joint_coordinate + joints -= align_joint_coordinate + + # additional translation + vertices += transl.view(self.batch_size, 1, 3) + joints += transl.view(self.batch_size, 1, 3) + + # additional scale + vertices *= scale.view(self.batch_size, 1, 1) + joints *= scale.view(self.batch_size, 1, 1) + + nose_2d = joints[:, 86:90, :] # B, 4, 3 + eye_right_2d = joints[:, 95: 101, :] # B, 6, 3 + eye_left_2d = joints[:, 101: 107, :] # B, 6, 3 + + # points_3d = np.concatenate([nose_2d, eye_right_2d, eye_left_2d], axis=0) # 16 + face_points = torch.cat([nose_2d, eye_right_2d, eye_left_2d], dim=1) # B, 16, 3 + + res = { + 'vertices': vertices, + 'face_points': face_points + } + + return res + + + + def get_depth(self,vert, resolution=256, cameras=None): + + faces = self.model.faces + # compute the transformation matrix with eg3d + intrisics_standard_dict = {"focal": [5000.0 / 1024 * resolution / 0.75, 5000.0 / 1024 * resolution / 0.75], + "princpt": [resolution / 2, resolution / 2]} + # intrisics_standard = np.array( [[5000.0, 0.0, resolution/2, 0.0], [0.0, 5000.0, resolution/2.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) + # normalized_transformation_in_realworld = np.array(render_kwargs['world2camera_matrix']) + R = np.eye(3) + angle = np.pi + R[1, 1] = np.cos(angle) + R[1, 2] = -np.sin(angle) + R[2, 1] = np.sin(angle) + R[2, 2] = np.cos(angle) + + R = torch.from_numpy(R).float().to(self.model.shapedirs.device).unsqueeze(0).repeat(self.batch_size, 1, + 1) # self.batch_size x 3 x 3 + + vertices_pyrender = torch.matmul(vert, R) # 1 x 6890 x 3 + # normalized_camerapose_in_pyrender = np.array(render_kwargs['normalized_camerapose_in_pyrender']) + + # color = colorsys.hsv_to_rgb(np.random.rand(), 0.5, 1.0) + images = [] + for i in range(self.batch_size): + camera_pose = cameras[i, :16].reshape(4, 4) + + camerapose_in_pyrender = np.linalg.inv(camera_pose) + camerapose_in_pyrender[[1, 2]] *= -1 + camerapose_in_pyrender = np.linalg.inv(camerapose_in_pyrender) + + # print(vertices_pyrender.shape, vertices_pyrender[i].shape,camerapose_in_pyrender.shape) + image_camera_rotate = self.render_depth(np.ones((resolution, resolution, 3)) * 255, + vertices_pyrender[i].detach().cpu().numpy(), faces, + intrisics_standard_dict, + color=(0.4, 0.5, 0.9, 1.0), + cam_pose=camerapose_in_pyrender) + + image_camera_rotate = image_camera_rotate[None, :, :, :] # 1 x 256 x 256 x 3 + image_camera_rotate = np.transpose(image_camera_rotate, (0, 3, 1, 2)) # 1 x 3 x 256 x 256 + images.append(image_camera_rotate) + + images = np.concatenate(images, axis=0) + return images + # + def get_visualization(self, shape_pose_params, resolution=256, cameras=None): + # apply beta, alignment, translation and scale + if 'betas' in shape_pose_params: + raise NotImplementedError + betas = shape_pose_params['betas'] + misc.assert_shape(betas, [self.batch_size, self.num_betas]) + else: + betas = None + # scale = shape_pose_params['scale'] + # transl = shape_pose_params['transl'] + if 'scale' in shape_pose_params: + raise NotImplementedError + scale = shape_pose_params['scale'] + misc.assert_shape(scale, [self.batch_size, 1]) + else: + scale = torch.ones([self.batch_size, 1]).to(self.model.shapedirs.device) + if 'transl' in shape_pose_params: + raise NotImplementedError + transl = shape_pose_params['transl'] + misc.assert_shape(transl, [self.batch_size, 3]) + else: + transl = torch.zeros([self.batch_size, 3]).to(self.model.shapedirs.device) + + + body_pose = shape_pose_params['pose'] + + + misc.assert_shape(scale, [self.batch_size, 1]) + misc.assert_shape(transl, [self.batch_size, 3]) + misc.assert_shape(body_pose, [self.batch_size, 6]) + + cameras = cameras.detach().cpu().numpy() # N, 25 + + shaed_output = self.model(betas=betas, + expression=None, + return_verts=True, + body_pose=None, + return_shaped=False + ) + vertices_no_pose = shaed_output.vertices + faces = self.model.faces + + align_joint_coordinate = self.get_align_coordinate(vertices_no_pose) + vertices_no_pose = vertices_no_pose + vertices_no_pose -= align_joint_coordinate + + vertices_no_pose += transl.view(self.batch_size, 1, 3) + vertices_no_pose *= scale.view(self.batch_size, 1, 1) + + # apply beta, pose, alignment, translation and scale + # mask pose except 11 and 14 + body_pose_fill = torch.zeros((self.batch_size, 23, 3)).to(self.model.shapedirs.device) + body_pose_fill[:, 11, :] = body_pose[:, :3] + body_pose_fill[:, 14, :] = body_pose[:, 3:] + + # # 15 16 for shoulder, we hope the Hands naturally sagging + # body_pose_fill[:, 15, :] = torch.tensor([0.0, 0.0, -np.pi / 2]).to(self.model.shapedirs.device) + # body_pose_fill[:, 16, :] = torch.tensor([0.0, 0.0, np.pi / 2]).to(self.model.shapedirs.device) + + + + body_pose_fill = body_pose_fill.reshape(self.batch_size, -1) + + output = self.model(betas=betas, + expression=None, + return_verts=True, + body_pose=body_pose_fill, + return_shaped=True + ) + vertices = output.vertices + joints = output.joints + + # align vertices and joints + vertices -= align_joint_coordinate + joints -= align_joint_coordinate + + # additional translation + vertices += transl.view(self.batch_size, 1, 3) + joints += transl.view(self.batch_size, 1, 3) + + # additional scale + vertices *= scale.view(self.batch_size, 1, 1) + joints *= scale.view(self.batch_size, 1, 1) + + # print(vertices[:,0].min(),vertices[:,0].max(),vertices[:,0].max() - vertices[:,0].min()) + # print(vertices[:,1].min(),vertices[:,1].max(),vertices[:,1].max() - vertices[:,1].min()) + # print(vertices[:,2].min(),vertices[:,2].max(),vertices[:,2].max() - vertices[:,2].min()) + + # nose_2d = joints[86:90] # 4 + # eye_right_2d = joints[95: 101] # 6 + # eye_left_2d = joints[101: 107] # 6 + + #points_3d = np.concatenate([nose_2d, eye_right_2d, eye_left_2d], axis=0) # 16 + #points_3d = torch.cat([nose_2d, eye_right_2d, eye_left_2d], dim=0) # 16 + + # compute the transformation matrix with eg3d + intrisics_standard_dict = {"focal": [5000.0/1024*resolution/0.75, 5000.0/1024*resolution/0.75], "princpt": [resolution/2, resolution/2]} + # intrisics_standard = np.array( [[5000.0, 0.0, resolution/2, 0.0], [0.0, 5000.0, resolution/2.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) + # normalized_transformation_in_realworld = np.array(render_kwargs['world2camera_matrix']) + R = np.eye(3) + angle = np.pi + R[1, 1] = np.cos(angle) + R[1, 2] = -np.sin(angle) + R[2, 1] = np.sin(angle) + R[2, 2] = np.cos(angle) + + R = torch.from_numpy(R).float().to(self.model.shapedirs.device).unsqueeze(0).repeat(self.batch_size, 1, 1) # self.batch_size x 3 x 3 + + vertices_pyrender = torch.matmul(vertices, R) # 1 x 6890 x 3 + #normalized_camerapose_in_pyrender = np.array(render_kwargs['normalized_camerapose_in_pyrender']) + + # color = colorsys.hsv_to_rgb(np.random.rand(), 0.5, 1.0) + images = [] + for i in range(self.batch_size): + camera_pose = cameras[i,:16].reshape(4,4) + + camerapose_in_pyrender = np.linalg.inv(camera_pose) + camerapose_in_pyrender[[1,2]] *= -1 + camerapose_in_pyrender = np.linalg.inv(camerapose_in_pyrender) + + #print(vertices_pyrender.shape, vertices_pyrender[i].shape,camerapose_in_pyrender.shape) + image_camera_rotate = self.render_mesh(np.ones((resolution, resolution, 3)) * 255, + vertices_pyrender[i].detach().cpu().numpy(), faces, + intrisics_standard_dict, + color=(0.4, 0.5, 0.9, 1.0), + cam_pose=camerapose_in_pyrender) + + image_camera_rotate = image_camera_rotate[None, :, :, :] # 1 x 256 x 256 x 3 + image_camera_rotate = np.transpose(image_camera_rotate, (0, 3, 1, 2)) # 1 x 3 x 256 x 256 + images.append(image_camera_rotate) + + images = np.concatenate(images, axis=0) + return images diff --git a/3DPortraitGAN_pyramid/training/augment.py b/3DPortraitGAN_pyramid/training/augment.py new file mode 100644 index 0000000..7b00a4a --- /dev/null +++ b/3DPortraitGAN_pyramid/training/augment.py @@ -0,0 +1,441 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Augmentation pipeline from the paper +"Training Generative Adversarial Networks with Limited Data". +Matches the original implementation by Karras et al. at +https://github.com/NVlabs/stylegan2-ada/blob/main/training/augment.py""" + +import numpy as np +import scipy.signal +import torch +from torch_utils import persistence +from torch_utils import misc +from torch_utils.ops import upfirdn2d +from torch_utils.ops import grid_sample_gradfix +from torch_utils.ops import conv2d_gradfix + +#---------------------------------------------------------------------------- +# Coefficients of various wavelet decomposition low-pass filters. + +wavelets = { + 'haar': [0.7071067811865476, 0.7071067811865476], + 'db1': [0.7071067811865476, 0.7071067811865476], + 'db2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025], + 'db3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569], + 'db4': [-0.010597401784997278, 0.032883011666982945, 0.030841381835986965, -0.18703481171888114, -0.02798376941698385, 0.6308807679295904, 0.7148465705525415, 0.23037781330885523], + 'db5': [0.003335725285001549, -0.012580751999015526, -0.006241490213011705, 0.07757149384006515, -0.03224486958502952, -0.24229488706619015, 0.13842814590110342, 0.7243085284385744, 0.6038292697974729, 0.160102397974125], + 'db6': [-0.00107730108499558, 0.004777257511010651, 0.0005538422009938016, -0.031582039318031156, 0.02752286553001629, 0.09750160558707936, -0.12976686756709563, -0.22626469396516913, 0.3152503517092432, 0.7511339080215775, 0.4946238903983854, 0.11154074335008017], + 'db7': [0.0003537138000010399, -0.0018016407039998328, 0.00042957797300470274, 0.012550998556013784, -0.01657454163101562, -0.03802993693503463, 0.0806126091510659, 0.07130921926705004, -0.22403618499416572, -0.14390600392910627, 0.4697822874053586, 0.7291320908465551, 0.39653931948230575, 0.07785205408506236], + 'db8': [-0.00011747678400228192, 0.0006754494059985568, -0.0003917403729959771, -0.00487035299301066, 0.008746094047015655, 0.013981027917015516, -0.04408825393106472, -0.01736930100202211, 0.128747426620186, 0.00047248457399797254, -0.2840155429624281, -0.015829105256023893, 0.5853546836548691, 0.6756307362980128, 0.3128715909144659, 0.05441584224308161], + 'sym2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025], + 'sym3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569], + 'sym4': [-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427], + 'sym5': [0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728], + 'sym6': [0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148], + 'sym7': [0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255], + 'sym8': [-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609], +} + +#---------------------------------------------------------------------------- +# Helpers for constructing transformation matrices. + +def matrix(*rows, device=None): + assert all(len(row) == len(rows[0]) for row in rows) + elems = [x for row in rows for x in row] + ref = [x for x in elems if isinstance(x, torch.Tensor)] + if len(ref) == 0: + return misc.constant(np.asarray(rows), device=device) + assert device is None or device == ref[0].device + elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems] + return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1)) + +def translate2d(tx, ty, **kwargs): + return matrix( + [1, 0, tx], + [0, 1, ty], + [0, 0, 1], + **kwargs) + +def translate3d(tx, ty, tz, **kwargs): + return matrix( + [1, 0, 0, tx], + [0, 1, 0, ty], + [0, 0, 1, tz], + [0, 0, 0, 1], + **kwargs) + +def scale2d(sx, sy, **kwargs): + return matrix( + [sx, 0, 0], + [0, sy, 0], + [0, 0, 1], + **kwargs) + +def scale3d(sx, sy, sz, **kwargs): + return matrix( + [sx, 0, 0, 0], + [0, sy, 0, 0], + [0, 0, sz, 0], + [0, 0, 0, 1], + **kwargs) + +def rotate2d(theta, **kwargs): + return matrix( + [torch.cos(theta), torch.sin(-theta), 0], + [torch.sin(theta), torch.cos(theta), 0], + [0, 0, 1], + **kwargs) + +def rotate3d(v, theta, **kwargs): + vx = v[..., 0]; vy = v[..., 1]; vz = v[..., 2] + s = torch.sin(theta); c = torch.cos(theta); cc = 1 - c + return matrix( + [vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0], + [vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0], + [vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0], + [0, 0, 0, 1], + **kwargs) + +def translate2d_inv(tx, ty, **kwargs): + return translate2d(-tx, -ty, **kwargs) + +def scale2d_inv(sx, sy, **kwargs): + return scale2d(1 / sx, 1 / sy, **kwargs) + +def rotate2d_inv(theta, **kwargs): + return rotate2d(-theta, **kwargs) + +#---------------------------------------------------------------------------- +# Versatile image augmentation pipeline from the paper +# "Training Generative Adversarial Networks with Limited Data". +# +# All augmentations are disabled by default; individual augmentations can +# be enabled by setting their probability multipliers to 1. + +@persistence.persistent_class +class AugmentPipe(torch.nn.Module): + def __init__(self, + xflip=0, rotate90=0, xint=0, xint_max=0.125, + scale=0, rotate=0, aniso=0, xfrac=0, scale_std=0.2, rotate_max=1, aniso_std=0.2, xfrac_std=0.125, + brightness=0, contrast=0, lumaflip=0, hue=0, saturation=0, brightness_std=0.2, contrast_std=0.5, hue_max=1, saturation_std=1, + imgfilter=0, imgfilter_bands=[1,1,1,1], imgfilter_std=1, + noise=0, cutout=0, noise_std=0.1, cutout_size=0.5, + ): + super().__init__() + self.register_buffer('p', torch.ones([])) # Overall multiplier for augmentation probability. + + # Pixel blitting. + self.xflip = float(xflip) # Probability multiplier for x-flip. + self.rotate90 = float(rotate90) # Probability multiplier for 90 degree rotations. + self.xint = float(xint) # Probability multiplier for integer translation. + self.xint_max = float(xint_max) # Range of integer translation, relative to image dimensions. + + # General geometric transformations. + self.scale = float(scale) # Probability multiplier for isotropic scaling. + self.rotate = float(rotate) # Probability multiplier for arbitrary rotation. + self.aniso = float(aniso) # Probability multiplier for anisotropic scaling. + self.xfrac = float(xfrac) # Probability multiplier for fractional translation. + self.scale_std = float(scale_std) # Log2 standard deviation of isotropic scaling. + self.rotate_max = float(rotate_max) # Range of arbitrary rotation, 1 = full circle. + self.aniso_std = float(aniso_std) # Log2 standard deviation of anisotropic scaling. + self.xfrac_std = float(xfrac_std) # Standard deviation of frational translation, relative to image dimensions. + + # Color transformations. + self.brightness = float(brightness) # Probability multiplier for brightness. + self.contrast = float(contrast) # Probability multiplier for contrast. + self.lumaflip = float(lumaflip) # Probability multiplier for luma flip. + self.hue = float(hue) # Probability multiplier for hue rotation. + self.saturation = float(saturation) # Probability multiplier for saturation. + self.brightness_std = float(brightness_std) # Standard deviation of brightness. + self.contrast_std = float(contrast_std) # Log2 standard deviation of contrast. + self.hue_max = float(hue_max) # Range of hue rotation, 1 = full circle. + self.saturation_std = float(saturation_std) # Log2 standard deviation of saturation. + + # Image-space filtering. + self.imgfilter = float(imgfilter) # Probability multiplier for image-space filtering. + self.imgfilter_bands = list(imgfilter_bands) # Probability multipliers for individual frequency bands. + self.imgfilter_std = float(imgfilter_std) # Log2 standard deviation of image-space filter amplification. + + # Image-space corruptions. + self.noise = float(noise) # Probability multiplier for additive RGB noise. + self.cutout = float(cutout) # Probability multiplier for cutout. + self.noise_std = float(noise_std) # Standard deviation of additive RGB noise. + self.cutout_size = float(cutout_size) # Size of the cutout rectangle, relative to image dimensions. + + # Setup orthogonal lowpass filter for geometric augmentations. + self.register_buffer('Hz_geom', upfirdn2d.setup_filter(wavelets['sym6'])) + + # Construct filter bank for image-space filtering. + Hz_lo = np.asarray(wavelets['sym2']) # H(z) + Hz_hi = Hz_lo * ((-1) ** np.arange(Hz_lo.size)) # H(-z) + Hz_lo2 = np.convolve(Hz_lo, Hz_lo[::-1]) / 2 # H(z) * H(z^-1) / 2 + Hz_hi2 = np.convolve(Hz_hi, Hz_hi[::-1]) / 2 # H(-z) * H(-z^-1) / 2 + Hz_fbank = np.eye(4, 1) # Bandpass(H(z), b_i) + for i in range(1, Hz_fbank.shape[0]): + Hz_fbank = np.dstack([Hz_fbank, np.zeros_like(Hz_fbank)]).reshape(Hz_fbank.shape[0], -1)[:, :-1] + Hz_fbank = scipy.signal.convolve(Hz_fbank, [Hz_lo2]) + Hz_fbank[i, (Hz_fbank.shape[1] - Hz_hi2.size) // 2 : (Hz_fbank.shape[1] + Hz_hi2.size) // 2] += Hz_hi2 + self.register_buffer('Hz_fbank', torch.as_tensor(Hz_fbank, dtype=torch.float32)) + + def forward(self, images, debug_percentile=None): + assert isinstance(images, torch.Tensor) and images.ndim == 4 + batch_size, num_channels, height, width = images.shape + device = images.device + if debug_percentile is not None: + debug_percentile = torch.as_tensor(debug_percentile, dtype=torch.float32, device=device) + + # ------------------------------------- + # Select parameters for pixel blitting. + # ------------------------------------- + + # Initialize inverse homogeneous 2D transform: G_inv @ pixel_out ==> pixel_in + I_3 = torch.eye(3, device=device) + G_inv = I_3 + + # Apply x-flip with probability (xflip * strength). + if self.xflip > 0: + i = torch.floor(torch.rand([batch_size], device=device) * 2) + i = torch.where(torch.rand([batch_size], device=device) < self.xflip * self.p, i, torch.zeros_like(i)) + if debug_percentile is not None: + i = torch.full_like(i, torch.floor(debug_percentile * 2)) + G_inv = G_inv @ scale2d_inv(1 - 2 * i, 1) + + # Apply 90 degree rotations with probability (rotate90 * strength). + if self.rotate90 > 0: + i = torch.floor(torch.rand([batch_size], device=device) * 4) + i = torch.where(torch.rand([batch_size], device=device) < self.rotate90 * self.p, i, torch.zeros_like(i)) + if debug_percentile is not None: + i = torch.full_like(i, torch.floor(debug_percentile * 4)) + G_inv = G_inv @ rotate2d_inv(-np.pi / 2 * i) + + # Apply integer translation with probability (xint * strength). + if self.xint > 0: + t = (torch.rand([batch_size, 2], device=device) * 2 - 1) * self.xint_max + t = torch.where(torch.rand([batch_size, 1], device=device) < self.xint * self.p, t, torch.zeros_like(t)) + if debug_percentile is not None: + t = torch.full_like(t, (debug_percentile * 2 - 1) * self.xint_max) + G_inv = G_inv @ translate2d_inv(torch.round(t[:,0] * width), torch.round(t[:,1] * height)) + + # -------------------------------------------------------- + # Select parameters for general geometric transformations. + # -------------------------------------------------------- + + # Apply isotropic scaling with probability (scale * strength). + if self.scale > 0: + s = torch.exp2(torch.randn([batch_size], device=device) * self.scale_std) + s = torch.where(torch.rand([batch_size], device=device) < self.scale * self.p, s, torch.ones_like(s)) + if debug_percentile is not None: + s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.scale_std)) + G_inv = G_inv @ scale2d_inv(s, s) + + # Apply pre-rotation with probability p_rot. + p_rot = 1 - torch.sqrt((1 - self.rotate * self.p).clamp(0, 1)) # P(pre OR post) = p + if self.rotate > 0: + theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max + theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta)) + if debug_percentile is not None: + theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.rotate_max) + G_inv = G_inv @ rotate2d_inv(-theta) # Before anisotropic scaling. + + # Apply anisotropic scaling with probability (aniso * strength). + if self.aniso > 0: + s = torch.exp2(torch.randn([batch_size], device=device) * self.aniso_std) + s = torch.where(torch.rand([batch_size], device=device) < self.aniso * self.p, s, torch.ones_like(s)) + if debug_percentile is not None: + s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.aniso_std)) + G_inv = G_inv @ scale2d_inv(s, 1 / s) + + # Apply post-rotation with probability p_rot. + if self.rotate > 0: + theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max + theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta)) + if debug_percentile is not None: + theta = torch.zeros_like(theta) + G_inv = G_inv @ rotate2d_inv(-theta) # After anisotropic scaling. + + # Apply fractional translation with probability (xfrac * strength). + if self.xfrac > 0: + t = torch.randn([batch_size, 2], device=device) * self.xfrac_std + t = torch.where(torch.rand([batch_size, 1], device=device) < self.xfrac * self.p, t, torch.zeros_like(t)) + if debug_percentile is not None: + t = torch.full_like(t, torch.erfinv(debug_percentile * 2 - 1) * self.xfrac_std) + G_inv = G_inv @ translate2d_inv(t[:,0] * width, t[:,1] * height) + + # ---------------------------------- + # Execute geometric transformations. + # ---------------------------------- + + # Execute if the transform is not identity. + if G_inv is not I_3: + + # Calculate padding. + cx = (width - 1) / 2 + cy = (height - 1) / 2 + cp = matrix([-cx, -cy, 1], [cx, -cy, 1], [cx, cy, 1], [-cx, cy, 1], device=device) # [idx, xyz] + cp = G_inv @ cp.t() # [batch, xyz, idx] + Hz_pad = self.Hz_geom.shape[0] // 4 + margin = cp[:, :2, :].permute(1, 0, 2).flatten(1) # [xy, batch * idx] + margin = torch.cat([-margin, margin]).max(dim=1).values # [x0, y0, x1, y1] + margin = margin + misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy] * 2, device=device) + margin = margin.max(misc.constant([0, 0] * 2, device=device)) + margin = margin.min(misc.constant([width-1, height-1] * 2, device=device)) + mx0, my0, mx1, my1 = margin.ceil().to(torch.int32) + + # Pad image and adjust origin. + images = torch.nn.functional.pad(input=images, pad=[mx0,mx1,my0,my1], mode='reflect') + G_inv = translate2d((mx0 - mx1) / 2, (my0 - my1) / 2) @ G_inv + + # Upsample. + images = upfirdn2d.upsample2d(x=images, f=self.Hz_geom, up=2) + G_inv = scale2d(2, 2, device=device) @ G_inv @ scale2d_inv(2, 2, device=device) + G_inv = translate2d(-0.5, -0.5, device=device) @ G_inv @ translate2d_inv(-0.5, -0.5, device=device) + + # Execute transformation. + shape = [batch_size, num_channels, (height + Hz_pad * 2) * 2, (width + Hz_pad * 2) * 2] + G_inv = scale2d(2 / images.shape[3], 2 / images.shape[2], device=device) @ G_inv @ scale2d_inv(2 / shape[3], 2 / shape[2], device=device) + grid = torch.nn.functional.affine_grid(theta=G_inv[:,:2,:], size=shape, align_corners=False) + images = grid_sample_gradfix.grid_sample(images, grid) + + # Downsample and crop. + images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True) + + # -------------------------------------------- + # Select parameters for color transformations. + # -------------------------------------------- + + # Initialize homogeneous 3D transformation matrix: C @ color_in ==> color_out + I_4 = torch.eye(4, device=device) + C = I_4 + + # Apply brightness with probability (brightness * strength). + if self.brightness > 0: + b = torch.randn([batch_size], device=device) * self.brightness_std + b = torch.where(torch.rand([batch_size], device=device) < self.brightness * self.p, b, torch.zeros_like(b)) + if debug_percentile is not None: + b = torch.full_like(b, torch.erfinv(debug_percentile * 2 - 1) * self.brightness_std) + C = translate3d(b, b, b) @ C + + # Apply contrast with probability (contrast * strength). + if self.contrast > 0: + c = torch.exp2(torch.randn([batch_size], device=device) * self.contrast_std) + c = torch.where(torch.rand([batch_size], device=device) < self.contrast * self.p, c, torch.ones_like(c)) + if debug_percentile is not None: + c = torch.full_like(c, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.contrast_std)) + C = scale3d(c, c, c) @ C + + # Apply luma flip with probability (lumaflip * strength). + v = misc.constant(np.asarray([1, 1, 1, 0]) / np.sqrt(3), device=device) # Luma axis. + if self.lumaflip > 0: + i = torch.floor(torch.rand([batch_size, 1, 1], device=device) * 2) + i = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.lumaflip * self.p, i, torch.zeros_like(i)) + if debug_percentile is not None: + i = torch.full_like(i, torch.floor(debug_percentile * 2)) + C = (I_4 - 2 * v.ger(v) * i) @ C # Householder reflection. + + # Apply hue rotation with probability (hue * strength). + if self.hue > 0 and num_channels > 1: + theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.hue_max + theta = torch.where(torch.rand([batch_size], device=device) < self.hue * self.p, theta, torch.zeros_like(theta)) + if debug_percentile is not None: + theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.hue_max) + C = rotate3d(v, theta) @ C # Rotate around v. + + # Apply saturation with probability (saturation * strength). + if self.saturation > 0 and num_channels > 1: + s = torch.exp2(torch.randn([batch_size, 1, 1], device=device) * self.saturation_std) + s = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.saturation * self.p, s, torch.ones_like(s)) + if debug_percentile is not None: + s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.saturation_std)) + C = (v.ger(v) + (I_4 - v.ger(v)) * s) @ C + + # ------------------------------ + # Execute color transformations. + # ------------------------------ + + # Execute if the transform is not identity. + if C is not I_4: + images = images.reshape([batch_size, num_channels, height * width]) + if num_channels == 3: + images = C[:, :3, :3] @ images + C[:, :3, 3:] + elif num_channels == 1: + C = C[:, :3, :].mean(dim=1, keepdims=True) + images = images * C[:, :, :3].sum(dim=2, keepdims=True) + C[:, :, 3:] + elif num_channels == 6: + images[:, :3] = C[:, :3, :3] @ images[:, :3] + C[:, :3, 3:] + images[:, 3:] = C[:, :3, :3] @ images[:, 3:] + C[:, :3, 3:] + else: + raise ValueError('Image must be RGB (3 channels) or L (1 channel)') + images = images.reshape([batch_size, num_channels, height, width]) + + # ---------------------- + # Image-space filtering. + # ---------------------- + + if self.imgfilter > 0: + num_bands = self.Hz_fbank.shape[0] + assert len(self.imgfilter_bands) == num_bands + expected_power = misc.constant(np.array([10, 1, 1, 1]) / 13, device=device) # Expected power spectrum (1/f). + + # Apply amplification for each band with probability (imgfilter * strength * band_strength). + g = torch.ones([batch_size, num_bands], device=device) # Global gain vector (identity). + for i, band_strength in enumerate(self.imgfilter_bands): + t_i = torch.exp2(torch.randn([batch_size], device=device) * self.imgfilter_std) + t_i = torch.where(torch.rand([batch_size], device=device) < self.imgfilter * self.p * band_strength, t_i, torch.ones_like(t_i)) + if debug_percentile is not None: + t_i = torch.full_like(t_i, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.imgfilter_std)) if band_strength > 0 else torch.ones_like(t_i) + t = torch.ones([batch_size, num_bands], device=device) # Temporary gain vector. + t[:, i] = t_i # Replace i'th element. + t = t / (expected_power * t.square()).sum(dim=-1, keepdims=True).sqrt() # Normalize power. + g = g * t # Accumulate into global gain. + + # Construct combined amplification filter. + Hz_prime = g @ self.Hz_fbank # [batch, tap] + Hz_prime = Hz_prime.unsqueeze(1).repeat([1, num_channels, 1]) # [batch, channels, tap] + Hz_prime = Hz_prime.reshape([batch_size * num_channels, 1, -1]) # [batch * channels, 1, tap] + + # Apply filter. + p = self.Hz_fbank.shape[1] // 2 + images = images.reshape([1, batch_size * num_channels, height, width]) + images = torch.nn.functional.pad(input=images, pad=[p,p,p,p], mode='reflect') + images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels) + images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels) + images = images.reshape([batch_size, num_channels, height, width]) + + # ------------------------ + # Image-space corruptions. + # ------------------------ + + # Apply additive RGB noise with probability (noise * strength). + if self.noise > 0: + sigma = torch.randn([batch_size, 1, 1, 1], device=device).abs() * self.noise_std + sigma = torch.where(torch.rand([batch_size, 1, 1, 1], device=device) < self.noise * self.p, sigma, torch.zeros_like(sigma)) + if debug_percentile is not None: + sigma = torch.full_like(sigma, torch.erfinv(debug_percentile) * self.noise_std) + images = images + torch.randn([batch_size, num_channels, height, width], device=device) * sigma + + # Apply cutout with probability (cutout * strength). + if self.cutout > 0: + size = torch.full([batch_size, 2, 1, 1, 1], self.cutout_size, device=device) + size = torch.where(torch.rand([batch_size, 1, 1, 1, 1], device=device) < self.cutout * self.p, size, torch.zeros_like(size)) + center = torch.rand([batch_size, 2, 1, 1, 1], device=device) + if debug_percentile is not None: + size = torch.full_like(size, self.cutout_size) + center = torch.full_like(center, debug_percentile) + coord_x = torch.arange(width, device=device).reshape([1, 1, 1, -1]) + coord_y = torch.arange(height, device=device).reshape([1, 1, -1, 1]) + mask_x = (((coord_x + 0.5) / width - center[:, 0]).abs() >= size[:, 0] / 2) + mask_y = (((coord_y + 0.5) / height - center[:, 1]).abs() >= size[:, 1] / 2) + mask = torch.logical_or(mask_x, mask_y).to(torch.float32) + images = images * mask + + return images + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/training/crosssection_utils.py b/3DPortraitGAN_pyramid/training/crosssection_utils.py new file mode 100644 index 0000000..72d49f2 --- /dev/null +++ b/3DPortraitGAN_pyramid/training/crosssection_utils.py @@ -0,0 +1,26 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import torch + +def sample_cross_section(G, ws, resolution=256, w=1.2): + axis=0 + A, B = torch.meshgrid(torch.linspace(w/2, -w/2, resolution, device=ws.device), torch.linspace(-w/2, w/2, resolution, device=ws.device), indexing='ij') + A, B = A.reshape(-1, 1), B.reshape(-1, 1) + C = torch.zeros_like(A) + coordinates = [A, B] + coordinates.insert(axis, C) + coordinates = torch.cat(coordinates, dim=-1).expand(ws.shape[0], -1, -1) + + sigma = G.sample_mixed(coordinates, torch.randn_like(coordinates), ws)['sigma'] + return sigma.reshape(-1, 1, resolution, resolution) + +# if __name__ == '__main__': +# sample_crossection(None) \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/training/dataset.py b/3DPortraitGAN_pyramid/training/dataset.py new file mode 100644 index 0000000..4c348ca --- /dev/null +++ b/3DPortraitGAN_pyramid/training/dataset.py @@ -0,0 +1,565 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Streaming images and labels from datasets created with dataset_tool.py.""" + +import os +import numpy as np +import zipfile +import PIL.Image +import json +import torch +import dnnlib +try: + import pyspng +except ImportError: + pyspng = None + +#---------------------------------------------------------------------------- + +def matrix2angle(R): + """ + https://github.com/sizhean/panohead + compute three Euler angles from a Rotation Matrix. Ref: http://www.gregslabaugh.net/publications/euler.pdf + refined by: https://stackoverflow.com/questions/43364900/rotation-matrix-to-euler-angles-with-opencv + todo: check and debug + Args: + R: (3,3). rotation matrix + Returns: + x: yaw + y: pitch + z: roll + """ + if R[2, 0] > 0.998: + z = 0 + x = np.pi / 2 + y = z + atan2(-R[0, 1], -R[0, 2]) + elif R[2, 0] < -0.998: + z = 0 + x = -np.pi / 2 + y = -z + atan2(R[0, 1], R[0, 2]) + else: + x = asin(R[2, 0]) + y = atan2(R[2, 1] / cos(x), R[2, 2] / cos(x)) + z = atan2(R[1, 0] / cos(x), R[0, 0] / cos(x)) + + if abs(y) > np.pi/2: + if x > 0: + x = np.pi - x + else: + x = -np.pi - x + y = atan2(R[2, 1] / cos(x), R[2, 2] / cos(x)) + z = atan2(R[1, 0] / cos(x), R[0, 0] / cos(x)) + return x, y, z + + +def get_poseangle(eg3dparams): + ''' + https://github.com/sizhean/panohead + ''' + convert = np.array([ + [1, 0, 0, 0], + [0, -1, 0, 0], + [0, 0, -1, 0], + [0, 0, 0, 1], + ]).astype(np.float32) + + entry_cam = np.array([float(p) for p in eg3dparams][:16]).reshape((4,4)) + + world2cam = np.linalg.inv(entry_cam@convert) + pose = matrix2angle(world2cam[:3,:3]) + angle = [p * 180 / np.pi for p in pose] + + return angle + + + +class Dataset(torch.utils.data.Dataset): + def __init__(self, + name, # Name of the dataset. + raw_shape, # Shape of the raw image data (NCHW). + max_size = None, # Artificially limit the size of the dataset. None = no limit. Applied before xflip. + use_labels = False, # Enable conditioning labels? False = label dimension is zero. + xflip = False, # Artificially double the size of the dataset via x-flips. Applied after max_size. + random_seed = 0, # Random seed to use when applying max_size. + rebal_raw_idx = None, # Rebalance the dataset by sampling from the raw_idx list + data_rebalance=False, # Rebalance the dataset by sampling from the raw_idx list + ): + self._name = name + self._raw_shape = list(raw_shape) + self._use_labels = use_labels + self._raw_labels = None + self._raw_poses = None + self._label_shape = None + self._pose_shape = None + + + if data_rebalance: + raise NotImplementedError + assert rebal_raw_idx is not None, "rebal_raw_idx must be provided if data_rebalance is True" + self._raw_idx = rebal_raw_idx + else: + self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64) + + + self._raw_idx = self._filter_samples() + + # Apply max_size. + if (max_size is not None) and (self._raw_idx.size > max_size): + raise NotImplementedError + np.random.RandomState(random_seed).shuffle(self._raw_idx) + self._raw_idx = np.sort(self._raw_idx[:max_size]) + + # Apply xflip. + self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8) + if xflip: + self._raw_idx = np.tile(self._raw_idx, 2) + self._xflip = np.concatenate([self._xflip, np.ones_like(self._xflip)]) + + def _filter_samples(self): # to be overridden by subclass + raise NotImplementedError + + + def _get_raw_labels(self): + if self._raw_labels is None: + self._raw_labels,self._raw_poses = self._load_raw_labels() if self._use_labels else None + + if self._raw_labels is None: + raise Exception("_raw_labels is None") + self._raw_labels = np.zeros([self._raw_shape[0], 0], dtype=np.float32) + + assert isinstance(self._raw_labels, np.ndarray) + assert self._raw_labels.shape[0] == self._raw_shape[0] + assert self._raw_labels.dtype in [np.float32, np.int64] + if self._raw_labels.dtype == np.int64: + assert self._raw_labels.ndim == 1 + assert np.all(self._raw_labels >= 0) + self._raw_labels_std = self._raw_labels.std(0) + + + if self._raw_poses is None: + raise Exception("_raw_poses is None") + self._raw_poses = np.zeros([self._raw_poses[0], 0], dtype=np.float32) + + assert isinstance(self._raw_poses, np.ndarray) + assert self._raw_poses.shape[0] == self._raw_shape[0] + assert self._raw_poses.dtype in [np.float32, np.int64] + if self._raw_poses.dtype == np.int64: + assert self._raw_poses.ndim == 1 + assert np.all(self._raw_poses >= 0) + self._raw_poses_std = self._raw_poses.std(0) + + return self._raw_labels + + def _get_raw_poses(self): + if self._raw_poses is None: + _ = self._get_raw_labels() + #raise Exception("please run _get_raw_labels first") + + return self._raw_poses + + + def close(self): # to be overridden by subclass + pass + + def _load_raw_image(self, raw_idx): # to be overridden by subclass + raise NotImplementedError + + def _load_raw_labels(self): # to be overridden by subclass + raise NotImplementedError + + + def __getstate__(self): + return dict(self.__dict__, _raw_labels=None, _raw_poses=None) + + def __del__(self): + try: + self.close() + except: + pass + + def __len__(self): + return self._raw_idx.size + + + + + def __getitem__(self, idx): + + + label = self.get_label(idx) + pose = self.get_coarse_pose(idx) + + # image = self._load_raw_image(self._raw_idx[idx]) + # assert isinstance(image, np.ndarray) + # assert list(image.shape) == self.image_shape + # assert image.dtype == np.uint8 + # if self._xflip[idx]: + # assert image.ndim == 3 # CHW + # image = image[:, :, ::-1] + # # # flip label + # # label = self.flip_yaw(label) + # # # flip pose + # # pose[[1, 2, 4, 5]] *= -1 + + image = self.get_image(idx) + + + return image, label,pose + + def flip_yaw(self, c): + pose_matrix = c.copy() + flipped = pose_matrix[:16].reshape(4,4) + flipped[0, 1] *= -1 + flipped[0, 2] *= -1 + flipped[1, 0] *= -1 + flipped[2, 0] *= -1 + flipped[0, 3] *= -1 + + flipped = flipped.reshape(16) + pose_matrix[:16] = flipped + + return pose_matrix + + def get_image(self, idx): + image = self._load_raw_image(self._raw_idx[idx]) + assert isinstance(image, np.ndarray) + assert list(image.shape) == self.image_shape + assert image.dtype == np.uint8 + if self._xflip[idx]: + assert image.ndim == 3 # CHW + image = image[:, :, ::-1] + + return image.copy() + + + def get_label(self, idx): + label = self._get_raw_labels()[self._raw_idx[idx]].copy() + if label.dtype == np.int64: + onehot = np.zeros(self.label_shape, dtype=np.float32) + onehot[label] = 1 + label = onehot + + if self._xflip[idx]: + assert label.shape == (25,) + label[[1, 2, 3, 4, 8]] *= -1 + + return label + + def get_coarse_pose(self, idx): + pose = self._get_raw_poses()[self._raw_idx[idx]].copy() + if pose.dtype == np.int64: + raise TypeError("pose should be float32") + onehot = np.zeros(self.pose_shape, dtype=np.float32) + onehot[pose] = 1 + pose = onehot + + if self._xflip[idx]: + pose_flip = pose.copy() + pose_flip[[1, 2, 4, 5]] *= -1 + + return pose_flip + + else: + return pose + + + + def get_details(self, idx): + d = dnnlib.EasyDict() + d.raw_idx = int(self._raw_idx[idx]) + d.xflip = (int(self._xflip[idx]) != 0) + d.raw_label = self._get_raw_labels()[d.raw_idx].copy() + # d.pose = self.get_coarse_pose(idx).copy() + + return d + + def get_label_std(self): + return self._raw_labels_std + + @property + def name(self): + return self._name + + @property + def image_shape(self): + return list(self._raw_shape[1:]) + + @property + def num_channels(self): + assert len(self.image_shape) == 3 # CHW + return self.image_shape[0] + + @property + def resolution(self): + assert len(self.image_shape) == 3 # CHW + assert self.image_shape[1] == self.image_shape[2] + return self.image_shape[1] + + @property + def label_shape(self): + if self._label_shape is None: + raw_labels = self._get_raw_labels() + if raw_labels.dtype == np.int64: + self._label_shape = [int(np.max(raw_labels)) + 1] + else: + self._label_shape = raw_labels.shape[1:] + return list(self._label_shape) + + @property + def pose_shape(self): + if self._pose_shape is None: + self._get_raw_labels() + if self._raw_poses.dtype == np.int64: + self._pose_shape = [int(np.max(self._raw_poses)) + 1] + else: + self._pose_shape = self._raw_poses.shape[1:] + return list(self._pose_shape) + + + @property + def label_dim(self): + assert len(self.label_shape) == 1 + return self.label_shape[0] + + @property + def has_labels(self): + return any(x != 0 for x in self.label_shape) + + @property + def has_onehot_labels(self): + return self._get_raw_labels().dtype == np.int64 + +#---------------------------------------------------------------------------- + +class ImageFolderDataset(Dataset): + def __init__(self, + path, # Path to directory or zip. + back_repeat = None, + resolution = None, # Ensure specific resolution, None = highest available. + data_rebalance_idx_file = None, + **super_kwargs, # Additional arguments for the Dataset base class. + ): + self.min_yaw = 0 + self.max_yaw = 180 + self.max_pitch = 90 + self.back_repeat = 1 if back_repeat is None else back_repeat + self._path = path + self._zipfile = None + + if os.path.isdir(self._path): + raise NotImplementedError('Does not support directories yet') + self._type = 'dir' + self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files} + elif self._file_ext(self._path) == '.zip': + self._type = 'zip' + self._all_fnames = set(self._get_zipfile().namelist()) + else: + raise IOError('Path must point to a directory or zip') + + if data_rebalance_idx_file is not None: + raise NotImplementedError('data_rebalance is not implemented yet') + rebal_idx_list_path =data_rebalance_idx_file + #print('load rebal_idx_list from ',rebal_idx_list_path) + with open(rebal_idx_list_path, 'r') as f: + rebal_raw_idx = json.load(f) + rebal_raw_idx = np.array(rebal_raw_idx) + else: + rebal_raw_idx = None + + + PIL.Image.init() + self._image_fnames = sorted(fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION) + if len(self._image_fnames) == 0: + raise IOError('No image files found in the specified path') + + name = os.path.splitext(os.path.basename(self._path))[0] + raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape) + if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution): + raise IOError('Image files do not match the specified resolution') + super().__init__(name=name, raw_shape=raw_shape, rebal_raw_idx = rebal_raw_idx,**super_kwargs) + + + def _filter_samples(self): + if self.back_repeat>1: + raw_labels = self._get_raw_labels()[self._raw_idx] + label_list = [] + for entry in raw_labels: + label_list.append(get_poseangle(entry)) + poses = np.array(label_list) + # find [min_yaw, max_yaw] boolean + valid = (np.abs(poses[:,0])>=self.min_yaw) & (np.abs(poses[:,0])<=self.max_yaw) & (np.abs(poses[:,1])<=self.max_pitch) + # find back boolean: [max(90, self.min_yaw), max_yaw] + back_valid = (np.abs(poses[:,0])>= max(90, self.min_yaw)) & (np.abs(poses[:,0])<=self.max_yaw) & (np.abs(poses[:,1])<=self.max_pitch) + if not np.all(valid): + print(f"filtering samples by pose: ratio = {valid.sum()}/{len(self._raw_idx)}") + # boolean to index + valid_idx = self._raw_idx[valid] + back_idx = self._raw_idx[back_valid] + front_idx = np.array(list(set(valid_idx) - set(back_idx))) + + front_num = valid.sum()-len(back_idx) + front_back_ratio_min = front_num/2/len(back_idx) + print(f"if back num be the half of front num, at least repeat ({int(front_back_ratio_min)}) times.") + back_repeat = max(int(front_num/2/len(back_idx)), self.back_repeat) + + + + + # TODO: support the repeat times < 1 + # repeat [max(90, self.min_yaw), max_yaw] for multiple times + back_repeat_idx = np.tile(back_idx, back_repeat) + # merge front index and repeated back index + new_idx = np.concatenate((front_idx, back_repeat_idx)) + print(f"Repeat {len(back_idx)} back images till abs({self.max_yaw}) degree {back_repeat} times") + return new_idx + else: + return self._raw_idx + @staticmethod + def _file_ext(fname): + return os.path.splitext(fname)[1].lower() + + def _get_zipfile(self): + assert self._type == 'zip' + if self._zipfile is None: + self._zipfile = zipfile.ZipFile(self._path) + return self._zipfile + + def _open_file(self, fname): + if self._type == 'dir': + return open(os.path.join(self._path, fname), 'rb') + if self._type == 'zip': + return self._get_zipfile().open(fname, 'r') + return None + + def close(self): + try: + if self._zipfile is not None: + self._zipfile.close() + finally: + self._zipfile = None + + def __getstate__(self): + return dict(super().__getstate__(), _zipfile=None) + + def _load_raw_image(self, raw_idx): + fname = self._image_fnames[raw_idx] + with self._open_file(fname) as f: + if pyspng is not None and self._file_ext(fname) == '.png': + image = pyspng.load(f.read()) + else: + image = np.array(PIL.Image.open(f)) + if image.ndim == 2: + image = image[:, :, np.newaxis] # HW => HWC + image = image.transpose(2, 0, 1) # HWC => CHW + return image + + def _load_raw_labels(self): + fname = 'dataset.json' + if fname not in self._all_fnames: + return None + with self._open_file(fname) as f: + labels = json.load(f)['labels'] + if labels is None: + return None + labels = dict(labels) + labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames] + labels = np.array(labels) + labels = np.squeeze(labels) + #print('labels shape', labels.shape) # N, 31 + labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim]) + + poses = labels[:,25:] + labels = labels[:,:25] + + # print('labels shape', labels.shape) # N, 25 + # print('poses shape', poses.shape) # N, 6 + + return labels, poses + + +#---------------------------------------------------------------------------- + + +class MaskLabeledDataset(ImageFolderDataset): + + def __init__(self, + img_path, # Path to directory or zip. + seg_path, # Path to directory or zip. + back_repeat = None, + **super_kwargs, # Additional arguments for the Dataset base class. + ): + self.min_yaw = 0 + self.max_yaw = 180 + self.max_pitch = 90 + self.back_repeat = 1 if back_repeat is None else back_repeat + super().__init__(path=img_path, back_repeat = None,**super_kwargs) + + self._seg_dataset = ImageFolderDataset(seg_path, **super_kwargs) + + # Build the mapping from seg fname to seg raw index + seg_dict = {os.path.splitext(fname)[0]: idx for idx, fname in enumerate(self._seg_dataset._image_fnames)} + + # Build the mapping from index to seg raw index + self._seg_raw_idx = [] + for raw_idx in self._raw_idx: + fname = self._image_fnames[raw_idx] + key = os.path.splitext(fname)[0] + self._seg_raw_idx.append(seg_dict[key]) + self._seg_raw_idx = np.array(self._seg_raw_idx) + + def _filter_samples(self): + if self.back_repeat>1: + raw_labels = self._get_raw_labels()[self._raw_idx] + label_list = [] + for entry in raw_labels: + label_list.append(get_poseangle(entry)) + poses = np.array(label_list) + # find [min_yaw, max_yaw] boolean + valid = (np.abs(poses[:,0])>=self.min_yaw) & (np.abs(poses[:,0])<=self.max_yaw) & (np.abs(poses[:,1])<=self.max_pitch) + # find back boolean: [max(90, self.min_yaw), max_yaw] + back_valid = (np.abs(poses[:,0])>= max(90, self.min_yaw)) & (np.abs(poses[:,0])<=self.max_yaw) & (np.abs(poses[:,1])<=self.max_pitch) + if not np.all(valid): + print(f"filtering samples by pose: ratio = {valid.sum()}/{len(self._raw_idx)}") + # boolean to index + valid_idx = self._raw_idx[valid] + back_idx = self._raw_idx[back_valid] + front_idx = np.array(list(set(valid_idx) - set(back_idx))) + + front_num = valid.sum()-len(back_idx) + front_back_ratio_min = front_num/2/len(back_idx) + print(f"if back num be the half of front num, at least repeat ({int(front_back_ratio_min)}) times.") + back_repeat = max(int(front_num/2/len(back_idx)), self.back_repeat) + + + + + # TODO: support the repeat times < 1 + # repeat [max(90, self.min_yaw), max_yaw] for multiple times + back_repeat_idx = np.tile(back_idx, back_repeat) + # merge front index and repeated back index + new_idx = np.concatenate((front_idx, back_repeat_idx)) + print(f"Repeat {len(back_idx)} back images till abs({self.max_yaw}) degree {back_repeat} times") + return new_idx + else: + return self._raw_idx + + + + def __getitem__(self, idx): + # already flipped in the ImageFolderDataset + image = self.get_image(idx) + mask = self._seg_dataset.get_image(idx) + label = self.get_label(idx) + pose = self.get_coarse_pose(idx) + + + return image.copy(), mask.copy(), label,pose + diff --git a/3DPortraitGAN_pyramid/training/dual_discriminator.py b/3DPortraitGAN_pyramid/training/dual_discriminator.py new file mode 100644 index 0000000..1c753ec --- /dev/null +++ b/3DPortraitGAN_pyramid/training/dual_discriminator.py @@ -0,0 +1,502 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Discriminator architectures from the paper +"Efficient Geometry-aware 3D Generative Adversarial Networks".""" + +import numpy as np +import torch +from torch_utils import persistence +from torch_utils.ops import upfirdn2d +from training.networks_stylegan2 import DiscriminatorBlock, MappingNetwork, DiscriminatorEpilogue + + +@persistence.persistent_class +class SingleDiscriminator(torch.nn.Module): + def __init__(self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + architecture='resnet', # Architecture: 'orig', 'skip', 'resnet'. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=4, # Use FP16 for the N highest resolutions. + conv_clamp=256, # Clamp the output of convolution layers to +-X, None = disable clamping. + cmap_dim=None, # Dimensionality of mapped conditioning label, None = default. + sr_upsample_factor=1, # Ignored for SingleDiscriminator + block_kwargs={}, # Arguments for DiscriminatorBlock. + mapping_kwargs={}, # Arguments for MappingNetwork. + epilogue_kwargs={}, # Arguments for DiscriminatorEpilogue. + ): + super().__init__() + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + if cmap_dim is None: + cmap_dim = channels_dict[4] + if c_dim == 0: + cmap_dim = 0 + + common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) + cur_layer_idx = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res] if res < img_resolution else 0 + tmp_channels = channels_dict[res] + out_channels = channels_dict[res // 2] + use_fp16 = (res >= fp16_resolution) + block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, + first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, + **common_kwargs) + setattr(self, f'b{res}', block) + cur_layer_idx += block.num_layers + if c_dim > 0: + self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, + **mapping_kwargs) + self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, + **common_kwargs) + + def forward(self, img, c, update_emas=False, **block_kwargs): + img = img['image'] + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + cmap = None + if self.c_dim > 0: + cmap = self.mapping(None, c) + x = self.b4(x, img, cmap) + return x + + def extra_repr(self): + return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' + + +# ---------------------------------------------------------------------------- + +def filtered_resizing(image_orig_tensor, size, f, filter_mode='antialiased'): + if filter_mode == 'antialiased': + ada_filtered_64 = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', + align_corners=False, antialias=True) + elif filter_mode == 'classic': + ada_filtered_64 = upfirdn2d.upsample2d(image_orig_tensor, f, up=2) + ada_filtered_64 = torch.nn.functional.interpolate(ada_filtered_64, size=(size * 2 + 2, size * 2 + 2), + mode='bilinear', align_corners=False) + ada_filtered_64 = upfirdn2d.downsample2d(ada_filtered_64, f, down=2, flip_filter=True, padding=-1) + elif filter_mode == 'none': + ada_filtered_64 = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', + align_corners=False) + elif type(filter_mode) == float: + assert 0 < filter_mode < 1 + + filtered = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', + align_corners=False, antialias=True) + aliased = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', + align_corners=False, antialias=False) + ada_filtered_64 = (1 - filter_mode) * aliased + (filter_mode) * filtered + + return ada_filtered_64 + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class DualDiscriminator(torch.nn.Module): + def __init__(self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + architecture='resnet', # Architecture: 'orig', 'skip', 'resnet'. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=4, # Use FP16 for the N highest resolutions. + conv_clamp=256, # Clamp the output of convolution layers to +-X, None = disable clamping. + cmap_dim=None, # Dimensionality of mapped conditioning label, None = default. + disc_c_noise=0, # Corrupt camera parameters with X std dev of noise before disc. pose conditioning. + block_kwargs={}, # Arguments for DiscriminatorBlock. + mapping_kwargs={}, # Arguments for MappingNetwork. + epilogue_kwargs={}, # Arguments for DiscriminatorEpilogue. + ): + super().__init__() + img_channels *= 2 + + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + if cmap_dim is None: + cmap_dim = channels_dict[4] + if c_dim == 0: + cmap_dim = 0 + + common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) + cur_layer_idx = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res] if res < img_resolution else 0 + tmp_channels = channels_dict[res] + out_channels = channels_dict[res // 2] + use_fp16 = (res >= fp16_resolution) + block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, + first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, + **common_kwargs) + setattr(self, f'b{res}', block) + cur_layer_idx += block.num_layers + if c_dim > 0: + self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, + **mapping_kwargs) + self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, + **common_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1, 3, 3, 1])) + self.disc_c_noise = disc_c_noise + + def forward(self, img, c, update_emas=False, **block_kwargs): + image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], f=self.resample_filter) + img = torch.cat([img['image'], image_raw], 1) + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + cmap = None + if self.c_dim > 0: + if self.disc_c_noise > 0: c += torch.randn_like(c) * c.std(0) * self.disc_c_noise + cmap = self.mapping(None, c) + x = self.b4(x, img, cmap) + return x + + def extra_repr(self): + return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class DummyDualDiscriminator(torch.nn.Module): + def __init__(self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + architecture='resnet', # Architecture: 'orig', 'skip', 'resnet'. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=4, # Use FP16 for the N highest resolutions. + conv_clamp=256, # Clamp the output of convolution layers to +-X, None = disable clamping. + cmap_dim=None, # Dimensionality of mapped conditioning label, None = default. + block_kwargs={}, # Arguments for DiscriminatorBlock. + mapping_kwargs={}, # Arguments for MappingNetwork. + epilogue_kwargs={}, # Arguments for DiscriminatorEpilogue. + ): + super().__init__() + img_channels *= 2 + + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + if cmap_dim is None: + cmap_dim = channels_dict[4] + if c_dim == 0: + cmap_dim = 0 + + common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) + cur_layer_idx = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res] if res < img_resolution else 0 + tmp_channels = channels_dict[res] + out_channels = channels_dict[res // 2] + use_fp16 = (res >= fp16_resolution) + block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, + first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, + **common_kwargs) + setattr(self, f'b{res}', block) + cur_layer_idx += block.num_layers + if c_dim > 0: + self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, + **mapping_kwargs) + self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, + **common_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1, 3, 3, 1])) + + self.raw_fade = 1 + + def forward(self, img, c, update_emas=False, **block_kwargs): + self.raw_fade = max(0, self.raw_fade - 1 / (500000 / 32)) + + image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], + f=self.resample_filter) * self.raw_fade + img = torch.cat([img['image'], image_raw], 1) + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + cmap = None + if self.c_dim > 0: + cmap = self.mapping(None, c) + x = self.b4(x, img, cmap) + return x + + def extra_repr(self): + return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' + + +# ---------------------------------------------------------------------------- +from training.networks_stylegan2 import FullyConnectedLayer + + +class PoseShapeAwareDualDiscriminator(torch.nn.Module): + def __init__(self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + seg_channels, # Number of input color channels. + architecture='resnet', # Architecture: 'orig', 'skip', 'resnet'. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=4, # Use FP16 for the N highest resolutions. + conv_clamp=256, # Clamp the output of convolution layers to +-X, None = disable clamping. + cmap_dim=None, # Dimensionality of mapped conditioning label, None = default. + disc_c_noise=0, # Corrupt camera parameters with X std dev of noise before disc. pose conditioning. + explicitly_symmetry=False, + block_kwargs={}, # Arguments for DiscriminatorBlock. + mapping_kwargs={}, # Arguments for MappingNetwork. + epilogue_kwargs={}, # Arguments for DiscriminatorEpilogue. + ): + super().__init__() + img_channels = img_channels * 2 + seg_channels + self.camera_param_dim = c_dim + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + self.pose_branch = DPoseBranch(num_betas=10, in_channel=channels_dict[4]*4*4) + self.c_dim += self.pose_branch.output_dim + + if cmap_dim is None: + cmap_dim = channels_dict[4] + if self.c_dim == 0: + cmap_dim = 0 + + common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) + cur_layer_idx = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res] if res < img_resolution else 0 + tmp_channels = channels_dict[res] + out_channels = channels_dict[res // 2] + use_fp16 = (res >= fp16_resolution) + block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, + first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, + **common_kwargs) + setattr(self, f'b{res}', block) + cur_layer_idx += block.num_layers + if self.c_dim > 0: + self.mapping = MappingNetwork(z_dim=0, c_dim=self.c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, + **mapping_kwargs) + self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, + **common_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1, 3, 3, 1])) + self.disc_c_noise = disc_c_noise + + self.explicitly_symmetry = explicitly_symmetry + + def flip_yaw(self, matrix): + flipped_matrix = matrix.clone() + flipped = flipped_matrix[:, :16].reshape(-1, 4, 4) + flipped[:, 0, 1] *= -1 + flipped[:, 0, 2] *= -1 + flipped[:, 1, 0] *= -1 + flipped[:, 2, 0] *= -1 + flipped[:, 0, 3] *= -1 + + flipped = flipped.reshape(-1, 16) + flipped_matrix[:, :16] = flipped.clone() + + return flipped_matrix + + def predict_pose(self, img, c,update_emas=False, **block_kwargs): + + + if self.explicitly_symmetry: + theta = torch.atan2(c[:, [11]], c[:, [3]]) # math.atan2(z, x) + is_left = (theta >= -np.pi / 2) & (theta <= np.pi / 2) + + img_flip = torch.flip(img['image'], dims=[3]) + img_flip_raw = torch.flip(img['image_raw'], dims=[3]) + seg_flip = torch.flip(img['image_mask'], dims=[3]) + + is_left_img = is_left.unsqueeze(2).unsqueeze(3) + input_img = torch.where(is_left_img, img_flip, img['image']) # if left, flip image + misc.assert_shape(input_img, img_flip.shape ) + + is_left_img_raw = is_left.unsqueeze(2).unsqueeze(3) + input_img_raw = torch.where(is_left_img_raw, img_flip_raw, img['image_raw']) # if left, flip image_raw + misc.assert_shape(input_img_raw, img_flip_raw.shape ) + + is_left_seg = is_left.unsqueeze(2).unsqueeze(3) + input_seg = torch.where(is_left_seg, seg_flip, img['image_mask']) # if left, flip seg + misc.assert_shape(input_seg, seg_flip.shape ) + + img = {'image': input_img, 'image_raw': input_img_raw, 'image_mask': input_seg} + + image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], f=self.resample_filter) + seg = filtered_resizing(img['image_mask'], size=img['image'].shape[-1], f=self.resample_filter) + seg = 2 * seg - 1 # normalize to [-1,1] + img = torch.cat([img['image'], image_raw, seg], 1) + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + + pose_branch_input_feature = self.b4.get_flatten_x(x, img) + pose_params = self.pose_branch(pose_branch_input_feature, c) + + flip_pose_params = pose_params.clone() + flip_pose_params[:, [1, 2, 4, 5]] *= -1 # flip y and z axis angles + + pose_params = torch.where(is_left, flip_pose_params, pose_params) + + + else: + raise NotImplementedError + image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], f=self.resample_filter) + seg = filtered_resizing(img['image_mask'], size=img['image'].shape[-1], f=self.resample_filter) + seg = 2 * seg - 1 # normalize to [-1,1] + img = torch.cat([img['image'], image_raw, seg], 1) + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + + pose_branch_input_feature = self.b4.get_flatten_x(x, img) + pose_params = self.pose_branch(pose_branch_input_feature, c) + + + return pose_params,pose_branch_input_feature + + def forward(self, img, c, gt_pose = None, update_emas=False, **block_kwargs): + + if self.explicitly_symmetry: + + pose_params,_ = self.predict_pose(img, c, update_emas, **block_kwargs) + + image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], f=self.resample_filter) + seg = filtered_resizing(img['image_mask'], size=img['image'].shape[-1], f=self.resample_filter) + seg = 2 * seg - 1 # normalize to [-1,1] + img = torch.cat([img['image'], image_raw, seg], 1) + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + + pose_branch_input_feature = self.b4.get_flatten_x(x, img) + + else: + raise NotImplementedError + pose_params, pose_branch_input_feature = self.predict_pose(img, c, update_emas, **block_kwargs) + + if gt_pose is not None: + #raise NotImplementedError + c = torch.cat([c, gt_pose], dim=1) + else: + pose_label = pose_params.detach() # detach + c = torch.cat([c, pose_label], dim=1) + + cmap = None + if self.c_dim > 0: + if self.disc_c_noise > 0: c += torch.randn_like(c) * c.std(0) * self.disc_c_noise + cmap = self.mapping(None, c) + # x = self.b4(x, img, cmap) + x = self.b4(flatten_x=pose_branch_input_feature, cmap=cmap) + return x, pose_params + + def extra_repr(self): + return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' + + +from torch_utils import misc + + +class DPoseBranch(torch.nn.Module): + def __init__(self, num_betas, in_channel): + super().__init__() + self.num_betas = num_betas + hidden_dim = 64 + self.in_channel = in_channel + # + # predict_betas = predict_transl = predict_scale = False + # predict_pose = True + + out_dim = 6 + + # if predict_betas: + # out_dim += num_betas + # if predict_transl: + # out_dim += 3 + # if predict_scale: + # out_dim += 1 + # if predict_pose: + # out_dim += 6 + + self.in_channel += 25 # c dim + + self.output_dim = out_dim + self.net = torch.nn.Sequential( + # linear + # FullyConnectedLayer(self.in_channel, hidden_dim), + # torch.nn.LeakyReLU(0.2), + # FullyConnectedLayer(hidden_dim, self.output_dim) # betas, scale, transl, rots of neck and head + FullyConnectedLayer(self.in_channel, 2048, activation='lrelu'), + FullyConnectedLayer(2048, 512, activation='lrelu'), + FullyConnectedLayer(512, 128, activation='lrelu'), + FullyConnectedLayer(128, 32, activation='lrelu'), + FullyConnectedLayer(32, self.output_dim) + ) + + + def forward(self, feature, camera_parameters): + # misc.assert_shape(feature, [None, self.in_channel]) + # misc.assert_shape(camera_parameters, [None, 25]) + feature = torch.cat([feature, camera_parameters], dim=1) + + pose = self.net(feature) # (B, num_betas + 1 + 3 + 6) + + return pose \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/training/loss.py b/3DPortraitGAN_pyramid/training/loss.py new file mode 100644 index 0000000..9a9cfaa --- /dev/null +++ b/3DPortraitGAN_pyramid/training/loss.py @@ -0,0 +1,562 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Loss functions.""" + +import numpy as np +import torch +from torch_utils import training_stats +from torch_utils.ops import conv2d_gradfix +from torch_utils.ops import upfirdn2d +from training.dual_discriminator import filtered_resizing +from torch_utils import misc +import copy + + +# ---------------------------------------------------------------------------- + +class Loss: + def accumulate_gradients(self, phase, real_img, real_seg, real_c, real_pose, gen_z, gen_c, gen_pose,gain, cur_nimg, + cur_nimg_start): # to be overridden by subclass + raise NotImplementedError() + + +# ---------------------------------------------------------------------------- + +class StyleGAN2Loss(Loss): + def __init__(self, device, G, D, augment_pipe=None, r1_gamma=10, r1_gamma_seg=1000,style_mixing_prob=0, pl_weight=0, + density_noise_fade_kimg=0, + pl_batch_shrink=2, pl_decay=0.01, + pl_no_weight_grad=False, blur_init_sigma=0, blur_fade_kimg=0, r1_gamma_init=0, r1_gamma_fade_kimg=0, + neural_rendering_resolution_initial=64, neural_rendering_resolution_final=None, + neural_rendering_resolution_fade_kimg=0, + gpc_reg_fade_kimg=1000, gpc_reg_prob=None, dual_discrimination=False, filter_mode='antialiased', + thickness=None, + pose_loss_weight = None, input_pose_params_reg_loss_weight = None,input_pose_params_reg_loss_kimg = None, + rank=None,bcg_reg_prob=0 + ): + super().__init__() + self.device = device + self.G = G + self.D = D + self.augment_pipe = augment_pipe + self.r1_gamma = r1_gamma + self.r1_gamma_seg = r1_gamma_seg + self.style_mixing_prob = style_mixing_prob + self.pl_weight = pl_weight + self.pl_batch_shrink = pl_batch_shrink + self.pl_decay = pl_decay + self.pl_no_weight_grad = pl_no_weight_grad + self.pl_mean = torch.zeros([], device=device) + self.blur_init_sigma = blur_init_sigma + self.blur_fade_kimg = blur_fade_kimg + self.r1_gamma_init = r1_gamma_init + self.r1_gamma_fade_kimg = r1_gamma_fade_kimg + self.neural_rendering_resolution_initial = neural_rendering_resolution_initial + self.neural_rendering_resolution_final = neural_rendering_resolution_final + self.neural_rendering_resolution_fade_kimg = neural_rendering_resolution_fade_kimg + self.density_noise_fade_kimg = density_noise_fade_kimg + self.gpc_reg_fade_kimg = gpc_reg_fade_kimg + self.gpc_reg_prob = gpc_reg_prob + self.dual_discrimination = dual_discrimination + self.filter_mode = filter_mode + self.resample_filter = upfirdn2d.setup_filter([1, 3, 3, 1], device=device) + self.blur_raw_target = True + self.bcg_reg_prob = bcg_reg_prob + assert self.gpc_reg_prob is None or (0 <= self.gpc_reg_prob <= 1) + + + self.thickness = thickness + self.pose_loss_weight = pose_loss_weight + self.input_pose_params_reg_loss_weight = input_pose_params_reg_loss_weight + self.input_pose_params_reg_loss_kimg = input_pose_params_reg_loss_kimg + + + # for snap + self.swapping_prob = None + self.neural_rendering_resolution = None + self.blur_sigma = None + + + self.rank = rank + + def run_G(self, z, c, pose_params, swapping_prob, neural_rendering_resolution, update_emas=False): + if swapping_prob is not None: + c_swapped = torch.roll(c.clone(), 1, 0) + p_swapped = torch.roll(pose_params.clone(), 1, 0) + rand_ = torch.rand((c.shape[0], 1), device=c.device) + c_gen_conditioning = torch.where(rand_ < swapping_prob, c_swapped, c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, pose_params) + else: + c_gen_conditioning = torch.zeros_like(c) + pose_params_conditioning = torch.zeros([c.shape[0],6]).to(c.device) + + ws = self.G.mapping(z, c_gen_conditioning, pose_params_conditioning,update_emas=update_emas) + if self.style_mixing_prob > 0: + with torch.autograd.profiler.record_function('style_mixing'): + cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1]) + cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, + torch.full_like(cutoff, ws.shape[1])) + ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c,pose_params, update_emas=False)[:, cutoff:] + + if self.bcg_reg_prob > 0: + ws_swapped = torch.roll(ws.clone(), 1, 0) + ws_bcg = torch.where(torch.rand((ws.shape[0], 1, 1), device=ws.device) < self.bcg_reg_prob, ws_swapped, ws) + else: + ws_bcg = None + + + gen_output = self.G.synthesis(ws, c, neural_rendering_resolution=neural_rendering_resolution, + update_emas=update_emas, + apply_def=True, pose_params=pose_params,ws_bcg = ws_bcg + ) + return gen_output, ws + + + + def run_D(self, img, c, gt_pose=None, blur_sigma=0, blur_sigma_raw=0, update_emas=False): + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + with torch.autograd.profiler.record_function('blur'): + f = torch.arange(-blur_size, blur_size + 1, device=img['image'].device).div( + blur_sigma).square().neg().exp2() + img['image'] = upfirdn2d.filter2d(img['image'], f / f.sum()) + + if self.augment_pipe is not None: + raise NotImplementedError + augmented_pair = self.augment_pipe(torch.cat([img['image'], + torch.nn.functional.interpolate(img['image_raw'], + size=img['image'].shape[2:], + mode='bilinear', + antialias=True)], + dim=1)) + img['image'] = augmented_pair[:, :img['image'].shape[1]] + img['image_raw'] = torch.nn.functional.interpolate(augmented_pair[:, img['image'].shape[1]:], + size=img['image_raw'].shape[2:], mode='bilinear', + antialias=True) + + logits, pose = self.D(img, c, gt_pose=gt_pose, update_emas=update_emas) + return logits, pose + + def run_D_pose_prediction(self, img, c, blur_sigma=0): + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + with torch.autograd.profiler.record_function('blur'): + f = torch.arange(-blur_size, blur_size + 1, device=img['image'].device).div( + blur_sigma).square().neg().exp2() + img['image'] = upfirdn2d.filter2d(img['image'], f / f.sum()) + + if self.augment_pipe is not None: + augmented_pair = self.augment_pipe(torch.cat([img['image'], + torch.nn.functional.interpolate(img['image_raw'], + size=img['image'].shape[2:], + mode='bilinear', + antialias=True)], + dim=1)) + img['image'] = augmented_pair[:, :img['image'].shape[1]] + img['image_raw'] = torch.nn.functional.interpolate(augmented_pair[:, img['image'].shape[1]:], + size=img['image_raw'].shape[2:], mode='bilinear', + antialias=True) + + pose, _ = self.D.predict_pose(img, c) + return pose + + def get_pose_params_D(self, real_img, real_seg, real_c, cur_nimg): + blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), + 0) * self.blur_init_sigma if self.blur_fade_kimg > 0 else 0 + r1_gamma = self.r1_gamma + + alpha = min(cur_nimg / (self.gpc_reg_fade_kimg * 1e3), 1) if self.gpc_reg_fade_kimg > 0 else 1 + swapping_prob = (1 - alpha) * 1 + alpha * self.gpc_reg_prob if self.gpc_reg_prob is not None else None + + if not isinstance(real_img,dict): + if self.neural_rendering_resolution_final is not None: + alpha = min(cur_nimg / (self.neural_rendering_resolution_fade_kimg * 1e3), 1) + neural_rendering_resolution = int(np.rint(self.neural_rendering_resolution_initial * ( + 1 - alpha) + self.neural_rendering_resolution_final * alpha)) + else: + neural_rendering_resolution = self.neural_rendering_resolution_initial + real_img_raw = filtered_resizing(real_img, size=neural_rendering_resolution, f=self.resample_filter, + filter_mode=self.filter_mode) + real_seg_raw = filtered_resizing(real_seg, size=neural_rendering_resolution, f=self.resample_filter, + filter_mode=self.filter_mode) + if self.blur_raw_target: + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + f = torch.arange(-blur_size, blur_size + 1, device=real_img_raw.device).div( + blur_sigma).square().neg().exp2() + real_img_raw = upfirdn2d.filter2d(real_img_raw, f / f.sum()) + + real_img = {'image': real_img, 'image_raw': real_img_raw, 'image_mask': real_seg_raw} + + else: + assert 'image_raw' in real_img.keys(), 'image_raw is not in real_img.keys()' + assert 'image' in real_img.keys(), 'image is not in real_img.keys()' + + + # get pose_params from real image + real_img_tmp_image = real_img['image'].detach().requires_grad_(True) + real_img_tmp_image_raw = real_img['image_raw'].detach().requires_grad_(True) + real_img_tmp_image_mask = real_img['image_mask'].detach().requires_grad_(True) + real_img_tmp = {'image': real_img_tmp_image, 'image_raw': real_img_tmp_image_raw, 'image_mask': real_img_tmp_image_mask} + + predicted_real_pose = self.run_D_pose_prediction(real_img_tmp, real_c, blur_sigma=blur_sigma) + return predicted_real_pose + + def get_pose_params_G(self,z,c): + predicted_pose = self.G.get_pose_params(z,c) + return predicted_pose + + def accumulate_gradients(self, phase, real_img, real_seg, real_c, real_pose, + gen_z, gen_c,gen_pose, + gain, cur_nimg, cur_nimg_start): + assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth'] + if self.G.rendering_kwargs.get('density_reg', 0) == 0: + phase = {'Greg': 'none', 'Gboth': 'Gmain'}.get(phase, phase) + if self.r1_gamma == 0: + phase = {'Dreg': 'none', 'Dboth': 'Dmain'}.get(phase, phase) + blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), + 0) * self.blur_init_sigma if self.blur_fade_kimg > 0 else 0 + self.blur_sigma = blur_sigma + r1_gamma = self.r1_gamma + self.G.rendering_kwargs["density_noise"] = max(1 - cur_nimg / (self.density_noise_fade_kimg * 1e3), + 0) if self.density_noise_fade_kimg > 0 else 0 + + alpha = min(cur_nimg / (self.gpc_reg_fade_kimg * 1e3), 1) if self.gpc_reg_fade_kimg > 0 else 1 + swapping_prob = (1 - alpha) * 1 + alpha * self.gpc_reg_prob if self.gpc_reg_prob is not None else None + self.swapping_prob = swapping_prob + + if self.neural_rendering_resolution_final is not None: + alpha = min((cur_nimg-cur_nimg_start) / (self.neural_rendering_resolution_fade_kimg * 1e3), 1) + neural_rendering_resolution = int(np.rint(self.neural_rendering_resolution_initial * ( + 1 - alpha) + self.neural_rendering_resolution_final * alpha)) + else: + neural_rendering_resolution = self.neural_rendering_resolution_initial + + self.neural_rendering_resolution = neural_rendering_resolution + + real_img_raw = filtered_resizing(real_img, size=neural_rendering_resolution, f=self.resample_filter, + filter_mode=self.filter_mode) + real_seg_raw = filtered_resizing(real_seg, size=neural_rendering_resolution, f=self.resample_filter, + filter_mode=self.filter_mode) + + + if self.blur_raw_target: + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + f = torch.arange(-blur_size, blur_size + 1, device=real_img_raw.device).div( + blur_sigma).square().neg().exp2() + real_img_raw = upfirdn2d.filter2d(real_img_raw, f / f.sum()) + + real_img = {'image': real_img, 'image_raw': real_img_raw, 'image_mask': real_seg_raw} + + + input_pose_params = self.get_pose_params_G(gen_z,gen_c) + + + for i in range(input_pose_params.shape[1]): + training_stats.report('pose_scale/input_pose_params_{}'.format(i), + (input_pose_params[:, i]).abs().mean() / np.pi * 180) + + + # Gmain: Maximize logits for generated images. + if phase in ['Gmain', 'Gboth']: + with torch.autograd.profiler.record_function('Gmain_forward'): + gen_img, _gen_ws = self.run_G(gen_z, gen_c, input_pose_params, swapping_prob=swapping_prob, + neural_rendering_resolution=neural_rendering_resolution) + + + gen_logits, predict_gen_pose = self.run_D(gen_img, gen_c, gt_pose=None, blur_sigma=blur_sigma) + training_stats.report('Loss/scores/fake_posed', gen_logits) + training_stats.report('Loss/signs/fake_posed', gen_logits.sign()) + loss_Gmain = torch.nn.functional.softplus(-gen_logits) + + # Lpreg + if self.input_pose_params_reg_loss_weight>0 and cur_nimg<(self.input_pose_params_reg_loss_kimg+200) * 1e3: + + if cur_nimg 0 and self.G.rendering_kwargs[ + 'reg_type'] == 'l1': + if swapping_prob is not None: + # c_swapped = torch.roll(gen_c.clone(), 1, 0) + # c_gen_conditioning = torch.where(torch.rand([], device=gen_c.device) < swapping_prob, c_swapped, gen_c) + c_swapped = torch.roll(gen_c.clone(), 1, 0) + p_swapped = torch.roll(input_pose_params.clone(), 1, 0) + rand_ = torch.rand([], device=gen_c.device) + c_gen_conditioning = torch.where( rand_< swapping_prob, c_swapped, gen_c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, input_pose_params) + else: + c_gen_conditioning = torch.zeros_like(gen_c) + pose_params_conditioning = torch.zeros([gen_c.shape[0],6]).to(gen_c.device) + + + ws = self.G.mapping(gen_z, c_gen_conditioning, pose_params_conditioning,update_emas=False) + if self.style_mixing_prob > 0: + with torch.autograd.profiler.record_function('style_mixing'): + cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1]) + cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, + torch.full_like(cutoff, ws.shape[1])) + ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c, input_pose_params,update_emas=False)[:, cutoff:] + initial_coordinates = torch.rand((ws.shape[0], 1000, 3), device=ws.device) * 2 - 1 + perturbed_coordinates = initial_coordinates + torch.randn_like(initial_coordinates) * self.G.rendering_kwargs['density_reg_p_dist'] + all_coordinates = torch.cat([initial_coordinates, perturbed_coordinates], dim=1) + sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(all_coordinates), ws, update_emas=False)[ + 'sigma'] + sigma_initial = sigma[:, :sigma.shape[1] // 2] + sigma_perturbed = sigma[:, sigma.shape[1] // 2:] + + TVloss = torch.nn.functional.l1_loss(sigma_initial, sigma_perturbed) * self.G.rendering_kwargs[ + 'density_reg'] + training_stats.report('Loss/G_reg/TVloss_L1', TVloss) + TVloss.mul(gain).backward() + + # Alternative density regularization + if phase in ['Greg', 'Gboth'] and self.G.rendering_kwargs.get('density_reg', 0) > 0 and self.G.rendering_kwargs[ + 'reg_type'] == 'monotonic-detach': + if swapping_prob is not None: + # c_swapped = torch.roll(gen_c.clone(), 1, 0) + # c_gen_conditioning = torch.where(torch.rand([], device=gen_c.device) < swapping_prob, c_swapped, gen_c) + c_swapped = torch.roll(gen_c.clone(), 1, 0) + p_swapped = torch.roll(input_pose_params.clone(), 1, 0) + rand_ = torch.rand([], device=gen_c.device) + c_gen_conditioning = torch.where( rand_< swapping_prob, c_swapped, gen_c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, input_pose_params) + else: + c_gen_conditioning = torch.zeros_like(gen_c) + pose_params_conditioning = torch.zeros([gen_c.shape[0],6]).to(gen_c.device) + + ws = self.G.mapping(gen_z, c_gen_conditioning, pose_params_conditioning,update_emas=False) + + initial_coordinates = torch.rand((ws.shape[0], 2000, 3), device=ws.device) * 2 - 1 # Front + + perturbed_coordinates = initial_coordinates + torch.tensor([0, 0, -1], device=ws.device) * (1/256) * self.G.rendering_kwargs['box_warp'] # Behind + all_coordinates = torch.cat([initial_coordinates, perturbed_coordinates], dim=1) + sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(all_coordinates), ws, update_emas=False)[ + 'sigma'] + sigma_initial = sigma[:, :sigma.shape[1] // 2] + sigma_perturbed = sigma[:, sigma.shape[1] // 2:] + + monotonic_loss = torch.relu(sigma_initial.detach() - sigma_perturbed).mean() * 10 + monotonic_loss.mul(gain).backward() + + if swapping_prob is not None: + # c_swapped = torch.roll(gen_c.clone(), 1, 0) + # c_gen_conditioning = torch.where(torch.rand([], device=gen_c.device) < swapping_prob, c_swapped, gen_c) + c_swapped = torch.roll(gen_c.clone(), 1, 0) + p_swapped = torch.roll(input_pose_params.clone(), 1, 0) + rand_ = torch.rand([], device=gen_c.device) + c_gen_conditioning = torch.where( rand_< swapping_prob, c_swapped, gen_c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, input_pose_params) + else: + c_gen_conditioning = torch.zeros_like(gen_c) + pose_params_conditioning = torch.zeros([gen_c.shape[0],6]).to(gen_c.device) + + ws = self.G.mapping(gen_z, c_gen_conditioning,pose_params_conditioning, update_emas=False) + if self.style_mixing_prob > 0: + with torch.autograd.profiler.record_function('style_mixing'): + cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1]) + cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, + torch.full_like(cutoff, ws.shape[1])) + ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c, input_pose_params,update_emas=False)[:, cutoff:] + initial_coordinates = torch.rand((ws.shape[0], 1000, 3), device=ws.device) * 2 - 1 + perturbed_coordinates = initial_coordinates + torch.randn_like(initial_coordinates) * (1/256) * self.G.rendering_kwargs['box_warp'] + all_coordinates = torch.cat([initial_coordinates, perturbed_coordinates], dim=1) + sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(all_coordinates), ws, update_emas=False)[ + 'sigma'] + sigma_initial = sigma[:, :sigma.shape[1] // 2] + sigma_perturbed = sigma[:, sigma.shape[1] // 2:] + + TVloss = torch.nn.functional.l1_loss(sigma_initial, sigma_perturbed) * self.G.rendering_kwargs[ + 'density_reg'] + training_stats.report('Loss/G_reg/TVloss_monotonic-detach', TVloss) + TVloss.mul(gain).backward() + + # Alternative density regularization + if phase in ['Greg', 'Gboth'] and self.G.rendering_kwargs.get('density_reg', 0) > 0 and self.G.rendering_kwargs[ + 'reg_type'] == 'monotonic-fixed': + if swapping_prob is not None: + # c_swapped = torch.roll(gen_c.clone(), 1, 0) + # c_gen_conditioning = torch.where(torch.rand([], device=gen_c.device) < swapping_prob, c_swapped, gen_c) + c_swapped = torch.roll(gen_c.clone(), 1, 0) + p_swapped = torch.roll(input_pose_params.clone(), 1, 0) + rand_ = torch.rand([], device=gen_c.device) + c_gen_conditioning = torch.where( rand_< swapping_prob, c_swapped, gen_c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, input_pose_params) + else: + c_gen_conditioning = torch.zeros_like(gen_c) + pose_params_conditioning = torch.zeros([gen_c.shape[0],6]).to(gen_c.device) + + ws = self.G.mapping(gen_z, c_gen_conditioning, pose_params_conditioning,update_emas=False) + + initial_coordinates = torch.rand((ws.shape[0], 2000, 3), device=ws.device) * 2 - 1 # Front + + perturbed_coordinates = initial_coordinates + torch.tensor([0, 0, -1], device=ws.device) * (1/256) * self.G.rendering_kwargs['box_warp'] # Behind + all_coordinates = torch.cat([initial_coordinates, perturbed_coordinates], dim=1) + sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(all_coordinates), ws, update_emas=False)[ + 'sigma'] + sigma_initial = sigma[:, :sigma.shape[1] // 2] + sigma_perturbed = sigma[:, sigma.shape[1] // 2:] + + monotonic_loss = torch.relu(sigma_initial - sigma_perturbed).mean() * 10 + monotonic_loss.mul(gain).backward() + + if swapping_prob is not None: + # c_swapped = torch.roll(gen_c.clone(), 1, 0) + # c_gen_conditioning = torch.where(torch.rand([], device=gen_c.device) < swapping_prob, c_swapped, gen_c) + c_swapped = torch.roll(gen_c.clone(), 1, 0) + p_swapped = torch.roll(input_pose_params.clone(), 1, 0) + rand_ = torch.rand([], device=gen_c.device) + c_gen_conditioning = torch.where( rand_< swapping_prob, c_swapped, gen_c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, input_pose_params) + else: + c_gen_conditioning = torch.zeros_like(gen_c) + pose_params_conditioning = torch.zeros([gen_c.shape[0],6]).to(gen_c.device) + + + ws = self.G.mapping(gen_z, c_gen_conditioning, pose_params_conditioning,update_emas=False) + if self.style_mixing_prob > 0: + with torch.autograd.profiler.record_function('style_mixing'): + cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1]) + cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, + torch.full_like(cutoff, ws.shape[1])) + ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c, input_pose_params,update_emas=False)[:, cutoff:] + initial_coordinates = torch.rand((ws.shape[0], 1000, 3), device=ws.device) * 2 - 1 + perturbed_coordinates = initial_coordinates + torch.randn_like(initial_coordinates) * (1/256) * self.G.rendering_kwargs['box_warp'] + all_coordinates = torch.cat([initial_coordinates, perturbed_coordinates], dim=1) + sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(all_coordinates), ws, update_emas=False)[ + 'sigma'] + sigma_initial = sigma[:, :sigma.shape[1] // 2] + sigma_perturbed = sigma[:, sigma.shape[1] // 2:] + + TVloss = torch.nn.functional.l1_loss(sigma_initial, sigma_perturbed) * self.G.rendering_kwargs[ + 'density_reg'] + training_stats.report('Loss/G_reg/TVloss_monotonic-fixed', TVloss) + TVloss.mul(gain).backward() + + # Dmain: Minimize logits for generated images. + loss_Dgen = 0 + if phase in ['Dmain', 'Dboth']: + with torch.autograd.profiler.record_function('Dgen_forward'): + + gen_img, _gen_ws = self.run_G(gen_z, gen_c, input_pose_params, swapping_prob=swapping_prob, + neural_rendering_resolution=neural_rendering_resolution, update_emas=True) + gen_logits, predict_gen_pose = self.run_D(gen_img, gen_c, gt_pose=None, blur_sigma=blur_sigma, + update_emas=True) + + training_stats.report('Loss/scores/fake', gen_logits) + training_stats.report('Loss/signs/fake', gen_logits.sign()) + loss_Dgen = torch.nn.functional.softplus( gen_logits) # -log (1 - sigmoid(gen_logits)) = log (1 + exp(gen_logits)) = softplus(gen_logits) + + pose_param_loss = (predict_gen_pose - input_pose_params).square().sum([1]) * self.pose_loss_weight + training_stats.report('Loss/D/Poseloss', pose_param_loss) + + for i in range(predict_gen_pose.shape[1]): + training_stats.report('Loss_pose/fake_{}'.format(i), + (predict_gen_pose[:, i] - input_pose_params[:, i]).abs().mean() / np.pi * 180) + training_stats.report('pose_scale/fake_{}'.format(i), + (predict_gen_pose[:, i]).abs().mean() / np.pi * 180) + + + + + with torch.autograd.profiler.record_function('Dgen_backward'): + (loss_Dgen + pose_param_loss).mean().mul(gain).backward() + + + # Dmain: Maximize logits for real images. + # Dr1: Apply R1 regularization. + if phase in ['Dmain', 'Dreg', 'Dboth']: + name = 'Dreal' if phase == 'Dmain' else 'Dr1' if phase == 'Dreg' else 'Dreal_Dr1' + with torch.autograd.profiler.record_function(name + '_forward'): + real_img_tmp_image = real_img['image'].detach().requires_grad_(phase in ['Dreg', 'Dboth']) + real_img_tmp_image_raw = real_img['image_raw'].detach().requires_grad_(phase in ['Dreg', 'Dboth']) + real_img_tmp_image_mask = real_img['image_mask'].detach().requires_grad_(phase in ['Dreg', 'Dboth']) + real_img_tmp = {'image': real_img_tmp_image, 'image_raw': real_img_tmp_image_raw, 'image_mask': real_img_tmp_image_mask} + + real_logits, predicted_real_pose = self.run_D(real_img_tmp, real_c, + gt_pose=None, + blur_sigma=blur_sigma) + + training_stats.report('Loss/scores/real', real_logits) + training_stats.report('Loss/signs/real', real_logits.sign()) + + + for i in range(predicted_real_pose.shape[1]): + training_stats.report('Loss_pose/real_{}'.format(i), ( + predicted_real_pose[:, i] - real_pose[:, i]).abs().mean() / np.pi * 180) + training_stats.report('pose_scale/real_{}'.format(i), + (predicted_real_pose[:, i]).abs().mean() / np.pi * 180) + + + loss_Dreal = 0 + if phase in ['Dmain', 'Dboth']: + loss_Dreal = torch.nn.functional.softplus( + -real_logits) # - log sigmoid(real_logits) = log (1 + exp(-real_logits)) = softplus(-real_logits) + training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal) + training_stats.report('Loss/D/loss_gen', loss_Dgen) + training_stats.report('Loss/D/loss_real', loss_Dreal) + + + # + + loss_Dr1 = 0 + if phase in ['Dreg', 'Dboth']: + if self.dual_discrimination: + with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients(): + r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], + inputs=[real_img_tmp['image'], real_img_tmp['image_raw'], real_img_tmp['image_mask']], + create_graph=True, only_inputs=True) + r1_grads_image = r1_grads[0] + r1_grads_image_raw = r1_grads[1] + r1_grads_image_mask = r1_grads[2] + r1_penalty = r1_grads_image.square().sum([1,2,3]) + r1_grads_image_raw.square().sum([1,2,3]) + r1_penalty_seg = r1_grads_image_mask.square().sum([1, 2, 3]) + else: # single discrimination + with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients(): + r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp['image'], real_img_tmp['image_mask']], + create_graph=True, only_inputs=True) + r1_grads_image = r1_grads[0] + r1_grads_image_mask = r1_grads[1] + r1_penalty = r1_grads_image.square().sum([1, 2, 3]) + r1_penalty_seg = r1_grads_image_mask.square().sum([1, 2, 3]) + loss_Dr1 = r1_penalty * (self.r1_gamma / 2) + r1_penalty_seg * (self.r1_gamma_seg / 2) + training_stats.report('Loss/r1_penalty', r1_penalty) + training_stats.report('Loss/r1_penalty_seg', r1_penalty_seg) + training_stats.report('Loss/D/reg', loss_Dr1) + + + with torch.autograd.profiler.record_function(name + '_backward'): + (loss_Dreal + loss_Dr1).mean().mul(gain).backward() + +# ---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/training/networks_stylegan2.py b/3DPortraitGAN_pyramid/training/networks_stylegan2.py new file mode 100644 index 0000000..cd56a4a --- /dev/null +++ b/3DPortraitGAN_pyramid/training/networks_stylegan2.py @@ -0,0 +1,1138 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Network architectures from the paper +"Analyzing and Improving the Image Quality of StyleGAN". +Matches the original implementation of configs E-F by Karras et al. at +https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py""" + +""" +3D-aware stylegan2 backbone architectures from the paper +"Mimic3D: Thriving 3D-Aware GANs via 3D-to-2D Imitation (ICCV 2023)" +https://github.com/SeanChenxy/Mimic3D/blob/main/training/networks_stylegan2.py +""" + +import numpy as np +import torch +from torch_utils import misc +from torch_utils import persistence +from torch_utils.ops import conv2d_resample +from torch_utils.ops import upfirdn2d +from torch_utils.ops import bias_act +from torch_utils.ops import fma + + +# ---------------------------------------------------------------------------- + +@misc.profiled_function +def normalize_2nd_moment(x, dim=1, eps=1e-8): + return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt() + + +# ---------------------------------------------------------------------------- + +@misc.profiled_function +def modulated_conv2d( + x, # Input tensor of shape [batch_size, in_channels, in_height, in_width]. + weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width]. + styles, # Modulation coefficients of shape [batch_size, in_channels]. + noise=None, # Optional noise tensor to add to the output activations. + up=1, # Integer upsampling factor. + down=1, # Integer downsampling factor. + padding=0, # Padding with respect to the upsampled image. + resample_filter=None, + # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter(). + demodulate=True, # Apply weight demodulation? + flip_weight=True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d). + fused_modconv=True, # Perform modulation, convolution, and demodulation as a single fused operation? +): + batch_size = x.shape[0] + out_channels, in_channels, kh, kw = weight.shape + misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk] + misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW] + misc.assert_shape(styles, [batch_size, in_channels]) # [NI] + + # Pre-normalize inputs to avoid FP16 overflow. + if x.dtype == torch.float16 and demodulate: + weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1, 2, 3], + keepdim=True)) # max_Ikk + styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I + + # Calculate per-sample weights and demodulation coefficients. + w = None + dcoefs = None + if demodulate or fused_modconv: + w = weight.unsqueeze(0) # [NOIkk] + w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk] + if demodulate: + dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO] + if demodulate and fused_modconv: + w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk] + + # Execute by scaling the activations before and after the convolution. + if not fused_modconv: + x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1) + x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, + padding=padding, flip_weight=flip_weight) + if demodulate and noise is not None: + x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype)) + elif demodulate: + x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1) + elif noise is not None: + x = x.add_(noise.to(x.dtype)) + return x + + # Execute as one fused op using grouped convolution. + with misc.suppress_tracer_warnings(): # this value will be treated as a constant + batch_size = int(batch_size) + misc.assert_shape(x, [batch_size, in_channels, None, None]) + x = x.reshape(1, -1, *x.shape[2:]) + w = w.reshape(-1, in_channels, kh, kw) + x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, + groups=batch_size, flip_weight=flip_weight) + x = x.reshape(batch_size, -1, *x.shape[2:]) + if noise is not None: + x = x.add_(noise) + return x + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class FullyConnectedLayer(torch.nn.Module): + def __init__(self, + in_features, # Number of input features. + out_features, # Number of output features. + bias=True, # Apply additive bias before the activation function? + activation='linear', # Activation function: 'relu', 'lrelu', etc. + lr_multiplier=1, # Learning rate multiplier. + bias_init=0, # Initial value for the additive bias. + ): + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.activation = activation + self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier) + self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None + self.weight_gain = lr_multiplier / np.sqrt(in_features) + self.bias_gain = lr_multiplier + + def forward(self, x): + w = self.weight.to(x.dtype) * self.weight_gain + b = self.bias + if b is not None: + b = b.to(x.dtype) + if self.bias_gain != 1: + b = b * self.bias_gain + + if self.activation == 'linear' and b is not None: + x = torch.addmm(b.unsqueeze(0), x, w.t()) + else: + x = x.matmul(w.t()) + x = bias_act.bias_act(x, b, act=self.activation) + return x + + def extra_repr(self): + return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class Conv2dLayer(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels. + out_channels, # Number of output channels. + kernel_size, # Width and height of the convolution kernel. + bias=True, # Apply additive bias before the activation function? + activation='linear', # Activation function: 'relu', 'lrelu', etc. + up=1, # Integer upsampling factor. + down=1, # Integer downsampling factor. + resample_filter=[1, 3, 3, 1], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output to +-X, None = disable clamping. + channels_last=False, # Expect the input to have memory_format=channels_last? + trainable=True, # Update the weights of this layer during training? + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.activation = activation + self.up = up + self.down = down + self.conv_clamp = conv_clamp + self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) + self.padding = kernel_size // 2 + self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) + self.act_gain = bias_act.activation_funcs[activation].def_gain + + memory_format = torch.channels_last if channels_last else torch.contiguous_format + weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format) + bias = torch.zeros([out_channels]) if bias else None + if trainable: + self.weight = torch.nn.Parameter(weight) + self.bias = torch.nn.Parameter(bias) if bias is not None else None + else: + self.register_buffer('weight', weight) + if bias is not None: + self.register_buffer('bias', bias) + else: + self.bias = None + + def forward(self, x, gain=1): + w = self.weight * self.weight_gain + b = self.bias.to(x.dtype) if self.bias is not None else None + flip_weight = (self.up == 1) # slightly faster + x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, + padding=self.padding, flip_weight=flip_weight) + + act_gain = self.act_gain * gain + act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None + x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp) + return x + + def extra_repr(self): + return ' '.join([ + f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},', + f'up={self.up}, down={self.down}']) + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class MappingNetwork(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality, 0 = no latent. + c_dim, # Conditioning label (C) dimensionality, 0 = no label. + w_dim, # Intermediate latent (W) dimensionality. + num_ws, # Number of intermediate latents to output, None = do not broadcast. + num_layers=8, # Number of mapping layers. + embed_features=None, # Label embedding dimensionality, None = same as w_dim. + layer_features=None, # Number of intermediate features in the mapping layers, None = same as w_dim. + activation='lrelu', # Activation function: 'relu', 'lrelu', etc. + lr_multiplier=0.01, # Learning rate multiplier for the mapping layers. + w_avg_beta=0.998, # Decay for tracking the moving average of W during training, None = do not track. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.num_ws = num_ws + self.num_layers = num_layers + self.w_avg_beta = w_avg_beta + + if embed_features is None: + embed_features = w_dim + if c_dim == 0: + embed_features = 0 + if layer_features is None: + layer_features = w_dim + features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim] + + if c_dim > 0: + self.embed = FullyConnectedLayer(c_dim, embed_features) + for idx in range(num_layers): + in_features = features_list[idx] + out_features = features_list[idx + 1] + layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier) + setattr(self, f'fc{idx}', layer) + + if num_ws is not None and w_avg_beta is not None: + self.register_buffer('w_avg', torch.zeros([w_dim])) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False): + # Embed, normalize, and concat inputs. + x = None + with torch.autograd.profiler.record_function('input'): + if self.z_dim > 0: + misc.assert_shape(z, [None, self.z_dim]) + x = normalize_2nd_moment(z.to(torch.float32)) + if self.c_dim > 0: + misc.assert_shape(c, [None, self.c_dim]) + y = normalize_2nd_moment(self.embed(c.to(torch.float32))) + x = torch.cat([x, y], dim=1) if x is not None else y + + # Main layers. + for idx in range(self.num_layers): + layer = getattr(self, f'fc{idx}') + x = layer(x) + + # Update moving average of W. + if update_emas and self.w_avg_beta is not None: + with torch.autograd.profiler.record_function('update_w_avg'): + self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta)) + + # Broadcast. + if self.num_ws is not None: + with torch.autograd.profiler.record_function('broadcast'): + x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) + + # Apply truncation. + if truncation_psi != 1: + with torch.autograd.profiler.record_function('truncate'): + assert self.w_avg_beta is not None + if self.num_ws is None or truncation_cutoff is None: + x = self.w_avg.lerp(x, truncation_psi) + else: + x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi) + return x + + def extra_repr(self): + return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisLayer(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels. + out_channels, # Number of output channels. + w_dim, # Intermediate latent (W) dimensionality. + resolution, # Resolution of this layer. + kernel_size=3, # Convolution kernel size. + up=1, # Integer upsampling factor. + use_noise=True, # Enable noise input? + activation='lrelu', # Activation function: 'relu', 'lrelu', etc. + resample_filter=[1, 3, 3, 1], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping. + channels_last=False, # Use channels_last format for the weights? + roll_out=None, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.w_dim = w_dim + self.resolution = resolution + self.up = up + self.use_noise = use_noise + self.activation = activation + self.conv_clamp = conv_clamp + self.roll_out = roll_out + self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) + self.padding = kernel_size // 2 + self.act_gain = bias_act.activation_funcs[activation].def_gain + + affine_scale = 1 + if self.roll_out in ['b', 'a']: + affine_scale = 9 + elif self.roll_out in ['s']: + affine_scale = 3 + self.affine = FullyConnectedLayer(w_dim, in_channels * affine_scale, bias_init=1) + memory_format = torch.channels_last if channels_last else torch.contiguous_format + self.weight = torch.nn.Parameter(torch.randn( + [out_channels, in_channels * (1, 3)[self.roll_out in ['b', 'a']], + kernel_size, kernel_size]).to(memory_format=memory_format)) + if use_noise: + self.register_buffer('noise_const', torch.randn([resolution, resolution * (1, 3)[self.roll_out == 'w']])) + self.noise_strength = torch.nn.Parameter(torch.zeros([])) + self.bias = torch.nn.Parameter(torch.zeros([out_channels])) + + def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1, **_): + assert noise_mode in ['random', 'const', 'none'] + # noise_mode = 'const' + in_resolution = self.resolution // self.up + misc.assert_shape(x, [None, self.in_channels, in_resolution, in_resolution * (1, 3)[self.roll_out == 'w']]) + styles = self.affine(w) + if self.roll_out in ['b', 'a', 's']: + styles = styles.view(styles.shape[0], 3, styles.shape[1] // 3).view(styles.shape[0] * 3, + styles.shape[1] // 3) + if self.roll_out in ['b', 'a', ]: + x = aware3d_att(x) if self.roll_out == 'a' else aware3d(x) + noise = None + if self.use_noise and noise_mode == 'random': + noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution * (1, 3)[self.roll_out == 'w']], + device=x.device) * self.noise_strength + if self.use_noise and noise_mode == 'const': + noise = self.noise_const * self.noise_strength + + flip_weight = (self.up == 1) # slightly faster + x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up, + padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, + fused_modconv=fused_modconv) + + act_gain = self.act_gain * gain + act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None + x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp) + return x + + def extra_repr(self): + return ' '.join([ + f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},', + f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}']) + + +def aware3d(x): + if isinstance(x, list): + x_xy, x_yz, x_zx = x + B, _, H, W = x_xy.shape + B *= 3 + else: + x_ = x.view(-1, 3, x.shape[1], x.shape[2], x.shape[3]) + x_xy, x_yz, x_zx = x_[:, 0], x_[:, 1], x_[:, 2] + B, _, H, W = x.shape + x_zy = x_yz.permute(0, 1, 3, 2) + x_xz = x_zx.permute(0, 1, 3, 2) + x_yx = x_xy.permute(0, 1, 3, 2) + + x_zy_pz = x_zy.mean(dim=-1, keepdim=True).repeat(1, 1, 1, x_xy.shape[-1]) + x_xz_pz = x_xz.mean(dim=-2, keepdim=True).repeat(1, 1, x_xy.shape[-2], 1) + x_xy_ = torch.cat([x_xy, x_zy_pz, x_xz_pz], 1) + + x_yx_px = x_yx.mean(dim=-2, keepdim=True).repeat(1, 1, x_yz.shape[-2], 1) + x_xz_px = x_xz.mean(dim=-1, keepdim=True).repeat(1, 1, 1, x_yz.shape[-1]) + x_yz_ = torch.cat([x_yx_px, x_yz, x_xz_px], 1) + + x_yx_py = x_yx.mean(dim=-1, keepdim=True).repeat(1, 1, 1, x_zx.shape[-1]) + x_zy_py = x_zy.mean(dim=-2, keepdim=True).repeat(1, 1, x_zx.shape[-2], 1) + x_zx_ = torch.cat([x_yx_py, x_zy_py, x_zx], 1) + + x = torch.cat([x_xy_[:, None], x_yz_[:, None], x_zx_[:, None]], 1).view(B, -1, H, W) + return x + + +def aware3d_att(x): + x_ = x.view(-1, 3, x.shape[1], x.shape[2], x.shape[3]) + x_cyx, x_czy, x_cxz = x_[:, 0], x_[:, 1], x_[:, 2] + + x_yxc = x_cyx.permute(0, 2, 3, 1) + x_ycz = x_czy.permute(0, 3, 1, 2) + x_yzc = x_czy.permute(0, 3, 2, 1) + x_yxz = torch.einsum('byxc,bycz->byxz', x_yxc, x_ycz) + x_yxz = torch.softmax(x_yxz, dim=-1) + x_cyx_f_czy = torch.einsum('byxz,byzc->byxc', x_yxz, x_yzc).permute(0, 3, 1, 2) + x_xyc = x_cyx.permute(0, 3, 2, 1) + x_xcz = x_cxz.permute(0, 2, 1, 3) + x_xzc = x_cxz.permute(0, 2, 3, 1) + x_xyz = torch.einsum('bxyc,bxcz->bxyz', x_xyc, x_xcz) + x_xyz = torch.softmax(x_xyz, dim=-1) + x_cyx_f_cxz = torch.einsum('bxyz,bxzc->bxyc', x_xyz, x_xzc).permute(0, 3, 2, 1) + x_cyx_ = torch.cat([x_cyx, x_cyx_f_czy, x_cyx_f_cxz], 1) + + x_zyc = x_czy.permute(0, 2, 3, 1) + x_zcx = x_cxz.permute(0, 3, 1, 2) + x_zxc = x_cxz.permute(0, 3, 2, 1) + x_zyx = torch.einsum('bzyc,bzcx->bzyx', x_zyc, x_zcx) + x_zyx = torch.softmax(x_zyx, dim=-1) + x_czy_f_cxz = torch.einsum('bzyx,bzxc->bzyc', x_zyx, x_zxc).permute(0, 3, 1, 2) + x_ycx = x_cyx.permute(0, 2, 1, 3) + x_yzx = torch.einsum('byzc,bycx->byzx', x_yzc, x_ycx) + x_yzx = torch.softmax(x_yzx, dim=-1) + x_czy_f_cyx = torch.einsum('byzx,byxc->byzc', x_yzx, x_yxc).permute(0, 3, 2, 1) + x_czy_ = torch.cat([x_czy, x_czy_f_cxz, x_czy_f_cyx], 1) + + x_xcy = x_cyx.permute(0, 3, 1, 2) + x_xzy = torch.einsum('bxzc,bxcy->bxzy', x_xzc, x_xcy) + x_xzy = torch.softmax(x_xzy, dim=-1) + x_cxz_f_cyx = torch.einsum('bxzy,bxyc->bxzc', x_xzy, x_xyc).permute(0, 3, 1, 2) + x_zcy = x_czy.permute(0, 2, 1, 3) + x_zxy = torch.einsum('bzxc,bzcy->bzxy', x_zxc, x_zcy) + x_zxy = torch.softmax(x_zxy, dim=-1) + x_cxz_f_czy = torch.einsum('bzxy,bzyc->bzxc', x_zxy, x_zyc).permute(0, 3, 2, 1) + x_cxz_ = torch.cat([x_cxz, x_cxz_f_cyx, x_cxz_f_czy], 1) + + x = torch.cat([x_cyx_[:, None], x_czy_[:, None], x_cxz_[:, None]], 1).view(x.shape[0], -1, x.shape[2], x.shape[3]) + return x + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class ToRGBLayer(torch.nn.Module): + def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False, + roll_out=None): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.w_dim = w_dim + self.conv_clamp = conv_clamp + self.roll_out = roll_out + + affine_scale = 1 + if self.roll_out in ['b', 'a']: + affine_scale = 9 + elif self.roll_out in ['s']: + affine_scale = 3 + self.affine = FullyConnectedLayer(w_dim, in_channels * affine_scale, bias_init=1) + memory_format = torch.channels_last if channels_last else torch.contiguous_format + self.weight = torch.nn.Parameter(torch.randn( + [out_channels, in_channels * (1, 3)[self.roll_out in ['b', 'a']], + kernel_size, kernel_size]).to(memory_format=memory_format)) + self.bias = torch.nn.Parameter(torch.zeros([out_channels])) + self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) + + def forward(self, x, w, fused_modconv=True): + styles = self.affine(w) * self.weight_gain + if self.roll_out in ['b', 'a', 's']: + styles = styles.view(styles.shape[0], 3, styles.shape[1] // 3).view(styles.shape[0] * 3, + styles.shape[1] // 3) + if self.roll_out in ['b', 'a', ]: + x = aware3d_att(x) if self.roll_out == 'a' else aware3d(x) + x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv) + x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp) + return x + + def extra_repr(self): + return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisBlock(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels, 0 = first block. + out_channels, # Number of output channels. + w_dim, # Intermediate latent (W) dimensionality. + resolution, # Resolution of this block. + img_channels, # Number of output color channels. + is_last, # Is this the last block? + up=2, + architecture='skip', # Architecture: 'orig', 'skip', 'resnet'. + resample_filter=[1, 3, 3, 1], # Low-pass filter to apply when resampling activations. + conv_clamp=256, # Clamp the output of convolution layers to +-X, None = disable clamping. + use_fp16=False, # Use FP16 for this block? + fp16_channels_last=False, # Use channels-last memory format with FP16? + fused_modconv_default=True, + # Default value of fused_modconv. 'inference_only' = True for inference, False for training. + roll_out=None, + **layer_kwargs, # Arguments for SynthesisLayer. + ): + assert architecture in ['orig', 'skip', 'resnet'] + super().__init__() + self.in_channels = in_channels + self.w_dim = w_dim + self.resolution = resolution + self.img_channels = img_channels + self.is_last = is_last + self.up = up + self.roll_out = roll_out + self.architecture = architecture + self.use_fp16 = use_fp16 + self.channels_last = (use_fp16 and fp16_channels_last) + self.fused_modconv_default = fused_modconv_default + self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) + self.num_conv = 0 + self.num_torgb = 0 + + if in_channels == 0: + self.const = torch.nn.Parameter(torch.randn([(1, 3)[self.roll_out in ['b', 'a']], out_channels, resolution, + resolution * (1, 3)[self.roll_out == 'w']])) + + if in_channels != 0: + self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=self.up, + roll_out=roll_out, + resample_filter=resample_filter, conv_clamp=conv_clamp, + channels_last=self.channels_last, **layer_kwargs) + self.num_conv += 1 + + self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution, roll_out=roll_out, + conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) + self.num_conv += 1 + + if is_last or architecture == 'skip': + self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim, + conv_clamp=conv_clamp, channels_last=self.channels_last, roll_out=self.roll_out) + self.num_torgb += 1 + + if in_channels != 0 and architecture == 'resnet': + self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2, + resample_filter=resample_filter, channels_last=self.channels_last) + + def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs): + _ = update_emas # unused + misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim]) + w_iter = iter(ws.unbind(dim=1)) + if ws.device.type != 'cuda': + force_fp32 = True + dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 + memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format + if fused_modconv is None: + fused_modconv = self.fused_modconv_default + if fused_modconv == 'inference_only': + fused_modconv = (not self.training) + + # Input. + if self.in_channels == 0: + x = self.const.to(dtype=dtype, memory_format=memory_format) + x = x.repeat([ws.shape[0], 1, 1, 1]) + else: + misc.assert_shape(x, [None, self.in_channels, self.resolution // self.up, + self.resolution // self.up * (1, 3)[self.roll_out == 'w']]) + x = x.to(dtype=dtype, memory_format=memory_format) + + # Main layers. + if self.in_channels == 0: + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + elif self.architecture == 'resnet': + y = self.skip(x, gain=np.sqrt(0.5)) + x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs) + x = y.add_(x) + else: + x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + + # ToRGB. + if img is not None and self.up > 1: + misc.assert_shape(img, [None, self.img_channels, self.resolution // self.up, + self.resolution // self.up * (1, 3)[self.roll_out == 'w']]) + img = upfirdn2d.upsample2d(img, self.resample_filter) + if self.is_last or self.architecture == 'skip': + y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv) + y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format) + img = img.add_(y) if img is not None else y + + assert x.dtype == dtype + assert img is None or img.dtype == torch.float32 + return x, img + + def extra_repr(self): + return f'resolution={self.resolution:d}, architecture={self.architecture:s}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class Hierarchy3DAwareSynthesisNetwork(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output image resolution. + img_channels, # Number of color channels. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=4, # Use FP16 for the N highest resolutions. + **block_kwargs, # Arguments for SynthesisBlock. + ): + + aware3d_att=False + aware3d_res = [4,8,16,32,64,128,256] + add_block = 0 + + assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0 + super().__init__() + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.num_fp16_res = num_fp16_res + self.add_block = add_block + self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)] + # channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + + self.num_ws = 0 + for res in self.block_resolutions: + in_channels = img_channels if res > 4 else 0 + out_channels = img_channels + use_fp16 = (res >= fp16_resolution) + is_last = (res == self.img_resolution) and self.add_block == 0 + block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res, + img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs) + self.num_ws += block.num_conv + if is_last: + self.num_ws += block.num_torgb + setattr(self, f'b{res}', block) + if res in aware3d_res: + block3d = Aware3DBlock(img_channels, res, w_dim, aware3d_att, + block_kwargs.copy()) + setattr(self, f'b3d{res}', block3d) + + + def forward(self, ws, **block_kwargs): + block_ws = [] + with torch.autograd.profiler.record_function('split_ws'): + misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) + ws = ws.to(torch.float32) + w_idx = 0 + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb)) + w_idx += block.num_conv + + x = img = img3d = None + feature_maps = {} + last_has_block3d = False + for res, cur_ws in zip(self.block_resolutions, block_ws): + block = getattr(self, f'b{res}') + block3d = getattr(self, f'b3d{res}', None) + if last_has_block3d and block3d is None: + assert NotImplementedError + img = img3d.view(-1, 3, img3d.shape[-3], img.shape[-2], img.shape[-1]).view(img.shape) + # 2D Branch + x, img = block(x, img, cur_ws, **block_kwargs) # 2D Synthesis Block + + # 3D Branch + if block3d is not None: + last_has_block3d = True + img3d = block3d(img3d, img, cur_ws, block_kwargs) # 3D-Aware Block + if isinstance(img3d, list): + assert NotImplementedError + else: + feature_maps[res] = img3d + else: + assert NotImplementedError + + return feature_maps + + def extra_repr(self): + return ' '.join([ + f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', + f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', + f'num_fp16_res={self.num_fp16_res:d}']) + + +@persistence.persistent_class +class SR3DBlock(torch.nn.Module): + def __init__(self, img_channels, img_resolution, w_dim, block_kwargs): + super().__init__() + block_kwargs['roll_out'] = 's' + self.block2 = SynthesisBlock(img_channels // 3, img_channels // 3, w_dim=w_dim, resolution=img_resolution * 2, + up=2, + img_channels=32, is_last=True, use_fp16=False, **block_kwargs) + self.block3 = SynthesisBlock(img_channels // 3, img_channels // 3, w_dim=w_dim, resolution=img_resolution * 2, + up=1, + img_channels=32, is_last=True, use_fp16=False, **block_kwargs) + + def forward(self, img, ws): + ws = ws[:, -1:, :].repeat(1, 3, 1) + img = img.view(img.shape[0], 3, -1, img.shape[-2], img.shape[-1]).view(img.shape[0] * 3, -1, img.shape[-2], + img.shape[-1]) + x, img2 = self.block2(img, None, ws) + x, img3 = self.block3(img2, None, ws) + img2 = img2.view(-1, 3, img2.shape[-3], img2.shape[-2], img2.shape[-1]).view(-1, 3 * img2.shape[-3], + img2.shape[-2], img2.shape[-1]) + img3 = img3.view(-1, 3, img3.shape[-3], img3.shape[-2], img.shape[-1]).view(-1, 3 * img3.shape[-3], + img3.shape[-2], img3.shape[-1]) + + return [img2, img3] + + +# ---------------------------------------------------------------------------- +@persistence.persistent_class +class Aware3DBlock(torch.nn.Module): + + def __init__(self, img_channels, img_resolution, w_dim, aware3d_att, block_kwargs): + super().__init__() + block_kwargs['roll_out'] = ('b', 'a')[aware3d_att] + up = 2 + self.block = SynthesisBlock(img_channels // 3, img_channels // 3, w_dim=w_dim, resolution=img_resolution * up, + up=up, + img_channels=img_channels // 3, is_last=True, use_fp16=False, **block_kwargs) + + def forward(self, x, img, ws, block_kwargs): + img = img.view(img.shape[0], 3, -1, img.shape[-2], img.shape[-1]).view(img.shape[0] * 3, -1, img.shape[-2], + img.shape[-1]) + if x is not None: + img = img + x + + ws = ws[:, -1:, :].repeat(1, 3, 1) + _, img = self.block(img, None, ws, **block_kwargs) + return img + + +@persistence.persistent_class +class Hierarchy3DAwareGenerator(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + mapping_kwargs={}, # Arguments for MappingNetwork. + **synthesis_kwargs, # Arguments for SynthesisNetwork. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + self.synthesis = Hierarchy3DAwareSynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, + **synthesis_kwargs) + self.num_ws = self.synthesis.num_ws + self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs): + ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, + update_emas=update_emas) + img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + return img + + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisNetwork(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output image resolution. + img_channels, # Number of color channels. + channel_base = 32768, # Overall multiplier for the number of channels. + channel_max = 512, # Maximum number of channels in any layer. + num_fp16_res = 4, # Use FP16 for the N highest resolutions. + **block_kwargs, # Arguments for SynthesisBlock. + ): + assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0 + super().__init__() + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.num_fp16_res = num_fp16_res + self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + self.num_ws = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res // 2] if res > 4 else 0 + out_channels = channels_dict[res] + use_fp16 = (res >= fp16_resolution) + is_last = (res == self.img_resolution) + block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res, + img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs) + self.num_ws += block.num_conv + if is_last: + self.num_ws += block.num_torgb + setattr(self, f'b{res}', block) + + def forward(self, ws, **block_kwargs): + block_ws = [] + with torch.autograd.profiler.record_function('split_ws'): + misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) + ws = ws.to(torch.float32) + w_idx = 0 + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb)) + w_idx += block.num_conv + + x = img = None + for res, cur_ws in zip(self.block_resolutions, block_ws): + block = getattr(self, f'b{res}') + x, img = block(x, img, cur_ws, **block_kwargs) + return img + + def extra_repr(self): + return ' '.join([ + f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', + f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', + f'num_fp16_res={self.num_fp16_res:d}']) + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class Generator(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + mapping_kwargs = {}, # Arguments for MappingNetwork. + **synthesis_kwargs, # Arguments for SynthesisNetwork. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs) + self.num_ws = self.synthesis.num_ws + self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs): + ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas) + img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + return img +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class DiscriminatorBlock(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels, 0 = first block. + tmp_channels, # Number of intermediate channels. + out_channels, # Number of output channels. + resolution, # Resolution of this block. + img_channels, # Number of input color channels. + first_layer_idx, # Index of the first layer. + architecture='resnet', # Architecture: 'orig', 'skip', 'resnet'. + activation='lrelu', # Activation function: 'relu', 'lrelu', etc. + resample_filter=[1, 3, 3, 1], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping. + use_fp16=False, # Use FP16 for this block? + fp16_channels_last=False, # Use channels-last memory format with FP16? + freeze_layers=0, # Freeze-D: Number of layers to freeze. + ): + assert in_channels in [0, tmp_channels] + assert architecture in ['orig', 'skip', 'resnet'] + super().__init__() + self.in_channels = in_channels + self.resolution = resolution + self.img_channels = img_channels + self.first_layer_idx = first_layer_idx + self.architecture = architecture + self.use_fp16 = use_fp16 + self.channels_last = (use_fp16 and fp16_channels_last) + self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) + + self.num_layers = 0 + + def trainable_gen(): + while True: + layer_idx = self.first_layer_idx + self.num_layers + trainable = (layer_idx >= freeze_layers) + self.num_layers += 1 + yield trainable + + trainable_iter = trainable_gen() + + if in_channels == 0 or architecture == 'skip': + self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation, + trainable=next(trainable_iter), conv_clamp=conv_clamp, + channels_last=self.channels_last) + + self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation, + trainable=next(trainable_iter), conv_clamp=conv_clamp, + channels_last=self.channels_last) + + self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2, + trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, + channels_last=self.channels_last) + + if architecture == 'resnet': + self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2, + trainable=next(trainable_iter), resample_filter=resample_filter, + channels_last=self.channels_last) + + def forward(self, x, img, force_fp32=False): + if (x if x is not None else img).device.type != 'cuda': + force_fp32 = True + dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 + memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format + + # Input. + if x is not None: + misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) + x = x.to(dtype=dtype, memory_format=memory_format) + + # FromRGB. + if self.in_channels == 0 or self.architecture == 'skip': + misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution]) + img = img.to(dtype=dtype, memory_format=memory_format) + y = self.fromrgb(img) + x = x + y if x is not None else y + img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None + + # Main layers. + if self.architecture == 'resnet': + y = self.skip(x, gain=np.sqrt(0.5)) + x = self.conv0(x) + x = self.conv1(x, gain=np.sqrt(0.5)) + x = y.add_(x) + else: + x = self.conv0(x) + x = self.conv1(x) + + assert x.dtype == dtype + return x, img + + def extra_repr(self): + return f'resolution={self.resolution:d}, architecture={self.architecture:s}' + + + +#---------------------------------------------------------------------------- + + +@persistence.persistent_class +class MinibatchStdLayer(torch.nn.Module): + def __init__(self, group_size, num_channels=1): + super().__init__() + self.group_size = group_size + self.num_channels = num_channels + + def forward(self, x): + N, C, H, W = x.shape + with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants + G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N + F = self.num_channels + c = C // F + + y = x.reshape(G, -1, F, c, H, + W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c. + y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group. + y = y.square().mean(dim=0) # [nFcHW] Calc variance over group. + y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group. + y = y.mean(dim=[2, 3, 4]) # [nF] Take average over channels and pixels. + y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions. + y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels. + x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels. + return x + + def extra_repr(self): + return f'group_size={self.group_size}, num_channels={self.num_channels:d}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class DiscriminatorEpilogue(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels. + cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label. + resolution, # Resolution of this block. + img_channels, # Number of input color channels. + architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'. + mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch. + mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable. + activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. + conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping. + ): + assert architecture in ['orig', 'skip', 'resnet'] + super().__init__() + self.in_channels = in_channels + self.cmap_dim = cmap_dim + self.resolution = resolution + self.img_channels = img_channels + self.architecture = architecture + + if architecture == 'skip': + self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation) + self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None + self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp) + self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation) + self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim) + + + def get_flatten_x(self, x, img, force_fp32=False): + misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW] + _ = force_fp32 # unused + dtype = torch.float32 + memory_format = torch.contiguous_format + + # FromRGB. + x = x.to(dtype=dtype, memory_format=memory_format) + if self.architecture == 'skip': + misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution]) + img = img.to(dtype=dtype, memory_format=memory_format) + x = x + self.fromrgb(img) + + # Main layers. + if self.mbstd is not None: + x = self.mbstd(x) + x = self.conv(x) + + flatten_x = x.flatten(1) + + return flatten_x + + def forward(self, flatten_x, cmap, force_fp32=False): + # misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW] + # _ = force_fp32 # unused + # dtype = torch.float32 + # memory_format = torch.contiguous_format + # + # # FromRGB. + # x = x.to(dtype=dtype, memory_format=memory_format) + # if self.architecture == 'skip': + # misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution]) + # img = img.to(dtype=dtype, memory_format=memory_format) + # x = x + self.fromrgb(img) + # + # # Main layers. + # if self.mbstd is not None: + # x = self.mbstd(x) + # x = self.conv(x) + + misc.assert_shape(flatten_x, [None, self.in_channels * self.resolution * self.resolution]) + dtype = torch.float32 + + x = self.fc(flatten_x) + x = self.out(x) + + # Conditioning. + if self.cmap_dim > 0: + misc.assert_shape(cmap, [None, self.cmap_dim]) + x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)) + + assert x.dtype == dtype + return x + + def extra_repr(self): + return f'resolution={self.resolution:d}, architecture={self.architecture:s}' + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class Discriminator(torch.nn.Module): + def __init__(self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'. + channel_base = 32768, # Overall multiplier for the number of channels. + channel_max = 512, # Maximum number of channels in any layer. + num_fp16_res = 4, # Use FP16 for the N highest resolutions. + conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping. + cmap_dim = None, # Dimensionality of mapped conditioning label, None = default. + block_kwargs = {}, # Arguments for DiscriminatorBlock. + mapping_kwargs = {}, # Arguments for MappingNetwork. + epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue. + ): + super().__init__() + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + if cmap_dim is None: + cmap_dim = channels_dict[4] + if c_dim == 0: + cmap_dim = 0 + + common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) + cur_layer_idx = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res] if res < img_resolution else 0 + tmp_channels = channels_dict[res] + out_channels = channels_dict[res // 2] + use_fp16 = (res >= fp16_resolution) + block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, + first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs) + setattr(self, f'b{res}', block) + cur_layer_idx += block.num_layers + if c_dim > 0: + self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs) + self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs) + + def forward(self, img, c, update_emas=False, **block_kwargs): + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + cmap = None + if self.c_dim > 0: + cmap = self.mapping(None, c) + x = self.b4(x, img, cmap) + return x + + def extra_repr(self): + return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/training/networks_stylegan3.py b/3DPortraitGAN_pyramid/training/networks_stylegan3.py new file mode 100644 index 0000000..40e5508 --- /dev/null +++ b/3DPortraitGAN_pyramid/training/networks_stylegan3.py @@ -0,0 +1,517 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Generator architecture from the paper +"Alias-Free Generative Adversarial Networks".""" + +import numpy as np +import scipy.signal +import scipy.optimize +import torch +from torch_utils import misc +from torch_utils import persistence +from torch_utils.ops import conv2d_gradfix +from torch_utils.ops import filtered_lrelu +from torch_utils.ops import bias_act + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def modulated_conv2d( + x, # Input tensor: [batch_size, in_channels, in_height, in_width] + w, # Weight tensor: [out_channels, in_channels, kernel_height, kernel_width] + s, # Style tensor: [batch_size, in_channels] + demodulate = True, # Apply weight demodulation? + padding = 0, # Padding: int or [padH, padW] + input_gain = None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels] +): + with misc.suppress_tracer_warnings(): # this value will be treated as a constant + batch_size = int(x.shape[0]) + out_channels, in_channels, kh, kw = w.shape + misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk] + misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW] + misc.assert_shape(s, [batch_size, in_channels]) # [NI] + + # Pre-normalize inputs. + if demodulate: + w = w * w.square().mean([1,2,3], keepdim=True).rsqrt() + s = s * s.square().mean().rsqrt() + + # Modulate weights. + w = w.unsqueeze(0) # [NOIkk] + w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk] + + # Demodulate weights. + if demodulate: + dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO] + w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk] + + # Apply input scaling. + if input_gain is not None: + input_gain = input_gain.expand(batch_size, in_channels) # [NI] + w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk] + + # Execute as one fused op using grouped convolution. + x = x.reshape(1, -1, *x.shape[2:]) + w = w.reshape(-1, in_channels, kh, kw) + x = conv2d_gradfix.conv2d(input=x, weight=w.to(x.dtype), padding=padding, groups=batch_size) + x = x.reshape(batch_size, -1, *x.shape[2:]) + return x + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class FullyConnectedLayer(torch.nn.Module): + def __init__(self, + in_features, # Number of input features. + out_features, # Number of output features. + activation = 'linear', # Activation function: 'relu', 'lrelu', etc. + bias = True, # Apply additive bias before the activation function? + lr_multiplier = 1, # Learning rate multiplier. + weight_init = 1, # Initial standard deviation of the weight tensor. + bias_init = 0, # Initial value of the additive bias. + ): + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.activation = activation + self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) * (weight_init / lr_multiplier)) + bias_init = np.broadcast_to(np.asarray(bias_init, dtype=np.float32), [out_features]) + self.bias = torch.nn.Parameter(torch.from_numpy(bias_init / lr_multiplier)) if bias else None + self.weight_gain = lr_multiplier / np.sqrt(in_features) + self.bias_gain = lr_multiplier + + def forward(self, x): + w = self.weight.to(x.dtype) * self.weight_gain + b = self.bias + if b is not None: + b = b.to(x.dtype) + if self.bias_gain != 1: + b = b * self.bias_gain + if self.activation == 'linear' and b is not None: + x = torch.addmm(b.unsqueeze(0), x, w.t()) + else: + x = x.matmul(w.t()) + x = bias_act.bias_act(x, b, act=self.activation) + return x + + def extra_repr(self): + return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}' + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class MappingNetwork(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality, 0 = no labels. + w_dim, # Intermediate latent (W) dimensionality. + num_ws, # Number of intermediate latents to output. + num_layers = 2, # Number of mapping layers. + lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers. + w_avg_beta = 0.998, # Decay for tracking the moving average of W during training. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.num_ws = num_ws + self.num_layers = num_layers + self.w_avg_beta = w_avg_beta + + # Construct layers. + self.embed = FullyConnectedLayer(self.c_dim, self.w_dim) if self.c_dim > 0 else None + features = [self.z_dim + (self.w_dim if self.c_dim > 0 else 0)] + [self.w_dim] * self.num_layers + for idx, in_features, out_features in zip(range(num_layers), features[:-1], features[1:]): + layer = FullyConnectedLayer(in_features, out_features, activation='lrelu', lr_multiplier=lr_multiplier) + setattr(self, f'fc{idx}', layer) + self.register_buffer('w_avg', torch.zeros([w_dim])) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False): + misc.assert_shape(z, [None, self.z_dim]) + if truncation_cutoff is None: + truncation_cutoff = self.num_ws + + # Embed, normalize, and concatenate inputs. + x = z.to(torch.float32) + x = x * (x.square().mean(1, keepdim=True) + 1e-8).rsqrt() + if self.c_dim > 0: + misc.assert_shape(c, [None, self.c_dim]) + y = self.embed(c.to(torch.float32)) + y = y * (y.square().mean(1, keepdim=True) + 1e-8).rsqrt() + x = torch.cat([x, y], dim=1) if x is not None else y + + # Execute layers. + for idx in range(self.num_layers): + x = getattr(self, f'fc{idx}')(x) + + # Update moving average of W. + if update_emas: + self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta)) + + # Broadcast and apply truncation. + x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) + if truncation_psi != 1: + x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi) + return x + + def extra_repr(self): + return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}' + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisInput(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + channels, # Number of output channels. + size, # Output spatial size: int or [width, height]. + sampling_rate, # Output sampling rate. + bandwidth, # Output bandwidth. + ): + super().__init__() + self.w_dim = w_dim + self.channels = channels + self.size = np.broadcast_to(np.asarray(size), [2]) + self.sampling_rate = sampling_rate + self.bandwidth = bandwidth + + # Draw random frequencies from uniform 2D disc. + freqs = torch.randn([self.channels, 2]) + radii = freqs.square().sum(dim=1, keepdim=True).sqrt() + freqs /= radii * radii.square().exp().pow(0.25) + freqs *= bandwidth + phases = torch.rand([self.channels]) - 0.5 + + # Setup parameters and buffers. + self.weight = torch.nn.Parameter(torch.randn([self.channels, self.channels])) + self.affine = FullyConnectedLayer(w_dim, 4, weight_init=0, bias_init=[1,0,0,0]) + self.register_buffer('transform', torch.eye(3, 3)) # User-specified inverse transform wrt. resulting image. + self.register_buffer('freqs', freqs) + self.register_buffer('phases', phases) + + def forward(self, w): + # Introduce batch dimension. + transforms = self.transform.unsqueeze(0) # [batch, row, col] + freqs = self.freqs.unsqueeze(0) # [batch, channel, xy] + phases = self.phases.unsqueeze(0) # [batch, channel] + + # Apply learned transformation. + t = self.affine(w) # t = (r_c, r_s, t_x, t_y) + t = t / t[:, :2].norm(dim=1, keepdim=True) # t' = (r'_c, r'_s, t'_x, t'_y) + m_r = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse rotation wrt. resulting image. + m_r[:, 0, 0] = t[:, 0] # r'_c + m_r[:, 0, 1] = -t[:, 1] # r'_s + m_r[:, 1, 0] = t[:, 1] # r'_s + m_r[:, 1, 1] = t[:, 0] # r'_c + m_t = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse translation wrt. resulting image. + m_t[:, 0, 2] = -t[:, 2] # t'_x + m_t[:, 1, 2] = -t[:, 3] # t'_y + transforms = m_r @ m_t @ transforms # First rotate resulting image, then translate, and finally apply user-specified transform. + + # Transform frequencies. + phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2) + freqs = freqs @ transforms[:, :2, :2] + + # Dampen out-of-band frequencies that may occur due to the user-specified transform. + amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) / (self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1) + + # Construct sampling grid. + theta = torch.eye(2, 3, device=w.device) + theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate + theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate + grids = torch.nn.functional.affine_grid(theta.unsqueeze(0), [1, 1, self.size[1], self.size[0]], align_corners=False) + + # Compute Fourier features. + x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2)).squeeze(3) # [batch, height, width, channel] + x = x + phases.unsqueeze(1).unsqueeze(2) + x = torch.sin(x * (np.pi * 2)) + x = x * amplitudes.unsqueeze(1).unsqueeze(2) + + # Apply trainable mapping. + weight = self.weight / np.sqrt(self.channels) + x = x @ weight.t() + + # Ensure correct shape. + x = x.permute(0, 3, 1, 2) # [batch, channel, height, width] + misc.assert_shape(x, [w.shape[0], self.channels, int(self.size[1]), int(self.size[0])]) + return x + + def extra_repr(self): + return '\n'.join([ + f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},', + f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}']) + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisLayer(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + is_torgb, # Is this the final ToRGB layer? + is_critically_sampled, # Does this layer use critical sampling? + use_fp16, # Does this layer use FP16? + + # Input & output specifications. + in_channels, # Number of input channels. + out_channels, # Number of output channels. + in_size, # Input spatial size: int or [width, height]. + out_size, # Output spatial size: int or [width, height]. + in_sampling_rate, # Input sampling rate (s). + out_sampling_rate, # Output sampling rate (s). + in_cutoff, # Input cutoff frequency (f_c). + out_cutoff, # Output cutoff frequency (f_c). + in_half_width, # Input transition band half-width (f_h). + out_half_width, # Output Transition band half-width (f_h). + + # Hyperparameters. + conv_kernel = 3, # Convolution kernel size. Ignored for final the ToRGB layer. + filter_size = 6, # Low-pass filter size relative to the lower resolution when up/downsampling. + lrelu_upsampling = 2, # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer. + use_radial_filters = False, # Use radially symmetric downsampling filter? Ignored for critically sampled layers. + conv_clamp = 256, # Clamp the output to [-X, +X], None = disable clamping. + magnitude_ema_beta = 0.999, # Decay rate for the moving average of input magnitudes. + ): + super().__init__() + self.w_dim = w_dim + self.is_torgb = is_torgb + self.is_critically_sampled = is_critically_sampled + self.use_fp16 = use_fp16 + self.in_channels = in_channels + self.out_channels = out_channels + self.in_size = np.broadcast_to(np.asarray(in_size), [2]) + self.out_size = np.broadcast_to(np.asarray(out_size), [2]) + self.in_sampling_rate = in_sampling_rate + self.out_sampling_rate = out_sampling_rate + self.tmp_sampling_rate = max(in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling) + self.in_cutoff = in_cutoff + self.out_cutoff = out_cutoff + self.in_half_width = in_half_width + self.out_half_width = out_half_width + self.conv_kernel = 1 if is_torgb else conv_kernel + self.conv_clamp = conv_clamp + self.magnitude_ema_beta = magnitude_ema_beta + + # Setup parameters and buffers. + self.affine = FullyConnectedLayer(self.w_dim, self.in_channels, bias_init=1) + self.weight = torch.nn.Parameter(torch.randn([self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel])) + self.bias = torch.nn.Parameter(torch.zeros([self.out_channels])) + self.register_buffer('magnitude_ema', torch.ones([])) + + # Design upsampling filter. + self.up_factor = int(np.rint(self.tmp_sampling_rate / self.in_sampling_rate)) + assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate + self.up_taps = filter_size * self.up_factor if self.up_factor > 1 and not self.is_torgb else 1 + self.register_buffer('up_filter', self.design_lowpass_filter( + numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate)) + + # Design downsampling filter. + self.down_factor = int(np.rint(self.tmp_sampling_rate / self.out_sampling_rate)) + assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate + self.down_taps = filter_size * self.down_factor if self.down_factor > 1 and not self.is_torgb else 1 + self.down_radial = use_radial_filters and not self.is_critically_sampled + self.register_buffer('down_filter', self.design_lowpass_filter( + numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial)) + + # Compute padding. + pad_total = (self.out_size - 1) * self.down_factor + 1 # Desired output size before downsampling. + pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor # Input size after upsampling. + pad_total += self.up_taps + self.down_taps - 2 # Size reduction caused by the filters. + pad_lo = (pad_total + self.up_factor) // 2 # Shift sample locations according to the symmetric interpretation (Appendix C.3). + pad_hi = pad_total - pad_lo + self.padding = [int(pad_lo[0]), int(pad_hi[0]), int(pad_lo[1]), int(pad_hi[1])] + + def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False): + assert noise_mode in ['random', 'const', 'none'] # unused + misc.assert_shape(x, [None, self.in_channels, int(self.in_size[1]), int(self.in_size[0])]) + misc.assert_shape(w, [x.shape[0], self.w_dim]) + + # Track input magnitude. + if update_emas: + with torch.autograd.profiler.record_function('update_magnitude_ema'): + magnitude_cur = x.detach().to(torch.float32).square().mean() + self.magnitude_ema.copy_(magnitude_cur.lerp(self.magnitude_ema, self.magnitude_ema_beta)) + input_gain = self.magnitude_ema.rsqrt() + + # Execute affine layer. + styles = self.affine(w) + if self.is_torgb: + weight_gain = 1 / np.sqrt(self.in_channels * (self.conv_kernel ** 2)) + styles = styles * weight_gain + + # Execute modulated conv2d. + dtype = torch.float16 if (self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32 + x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles, + padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain) + + # Execute bias, filtered leaky ReLU, and clamping. + gain = 1 if self.is_torgb else np.sqrt(2) + slope = 1 if self.is_torgb else 0.2 + x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype), + up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp) + + # Ensure correct shape and dtype. + misc.assert_shape(x, [None, self.out_channels, int(self.out_size[1]), int(self.out_size[0])]) + assert x.dtype == dtype + return x + + @staticmethod + def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False): + assert numtaps >= 1 + + # Identity filter. + if numtaps == 1: + return None + + # Separable Kaiser low-pass filter. + if not radial: + f = scipy.signal.firwin(numtaps=numtaps, cutoff=cutoff, width=width, fs=fs) + return torch.as_tensor(f, dtype=torch.float32) + + # Radially symmetric jinc-based filter. + x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs + r = np.hypot(*np.meshgrid(x, x)) + f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r) + beta = scipy.signal.kaiser_beta(scipy.signal.kaiser_atten(numtaps, width / (fs / 2))) + w = np.kaiser(numtaps, beta) + f *= np.outer(w, w) + f /= np.sum(f) + return torch.as_tensor(f, dtype=torch.float32) + + def extra_repr(self): + return '\n'.join([ + f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},', + f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},', + f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},', + f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},', + f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},', + f'in_size={list(self.in_size)}, out_size={list(self.out_size)},', + f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}']) + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisNetwork(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output image resolution. + img_channels, # Number of color channels. + channel_base = 32768, # Overall multiplier for the number of channels. + channel_max = 512, # Maximum number of channels in any layer. + num_layers = 14, # Total number of layers, excluding Fourier features and ToRGB. + num_critical = 2, # Number of critically sampled layers at the end. + first_cutoff = 2, # Cutoff frequency of the first layer (f_{c,0}). + first_stopband = 2**2.1, # Minimum stopband of the first layer (f_{t,0}). + last_stopband_rel = 2**0.3, # Minimum stopband of the last layer, expressed relative to the cutoff. + margin_size = 10, # Number of additional pixels outside the image. + output_scale = 0.25, # Scale factor for the output image. + num_fp16_res = 4, # Use FP16 for the N highest resolutions. + **layer_kwargs, # Arguments for SynthesisLayer. + ): + super().__init__() + self.w_dim = w_dim + self.num_ws = num_layers + 2 + self.img_resolution = img_resolution + self.img_channels = img_channels + self.num_layers = num_layers + self.num_critical = num_critical + self.margin_size = margin_size + self.output_scale = output_scale + self.num_fp16_res = num_fp16_res + + # Geometric progression of layer cutoffs and min. stopbands. + last_cutoff = self.img_resolution / 2 # f_{c,N} + last_stopband = last_cutoff * last_stopband_rel # f_{t,N} + exponents = np.minimum(np.arange(self.num_layers + 1) / (self.num_layers - self.num_critical), 1) + cutoffs = first_cutoff * (last_cutoff / first_cutoff) ** exponents # f_c[i] + stopbands = first_stopband * (last_stopband / first_stopband) ** exponents # f_t[i] + + # Compute remaining layer parameters. + sampling_rates = np.exp2(np.ceil(np.log2(np.minimum(stopbands * 2, self.img_resolution)))) # s[i] + half_widths = np.maximum(stopbands, sampling_rates / 2) - cutoffs # f_h[i] + sizes = sampling_rates + self.margin_size * 2 + sizes[-2:] = self.img_resolution + channels = np.rint(np.minimum((channel_base / 2) / cutoffs, channel_max)) + channels[-1] = self.img_channels + + # Construct layers. + self.input = SynthesisInput( + w_dim=self.w_dim, channels=int(channels[0]), size=int(sizes[0]), + sampling_rate=sampling_rates[0], bandwidth=cutoffs[0]) + self.layer_names = [] + for idx in range(self.num_layers + 1): + prev = max(idx - 1, 0) + is_torgb = (idx == self.num_layers) + is_critically_sampled = (idx >= self.num_layers - self.num_critical) + use_fp16 = (sampling_rates[idx] * (2 ** self.num_fp16_res) > self.img_resolution) + layer = SynthesisLayer( + w_dim=self.w_dim, is_torgb=is_torgb, is_critically_sampled=is_critically_sampled, use_fp16=use_fp16, + in_channels=int(channels[prev]), out_channels= int(channels[idx]), + in_size=int(sizes[prev]), out_size=int(sizes[idx]), + in_sampling_rate=int(sampling_rates[prev]), out_sampling_rate=int(sampling_rates[idx]), + in_cutoff=cutoffs[prev], out_cutoff=cutoffs[idx], + in_half_width=half_widths[prev], out_half_width=half_widths[idx], + **layer_kwargs) + name = f'L{idx}_{layer.out_size[0]}_{layer.out_channels}' + setattr(self, name, layer) + self.layer_names.append(name) + + def forward(self, ws, **layer_kwargs): + misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) + ws = ws.to(torch.float32).unbind(dim=1) + + # Execute layers. + x = self.input(ws[0]) + for name, w in zip(self.layer_names, ws[1:]): + x = getattr(self, name)(x, w, **layer_kwargs) + if self.output_scale != 1: + x = x * self.output_scale + + # Ensure correct shape and dtype. + misc.assert_shape(x, [None, self.img_channels, self.img_resolution, self.img_resolution]) + x = x.to(torch.float32) + return x + + def extra_repr(self): + return '\n'.join([ + f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', + f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', + f'num_layers={self.num_layers:d}, num_critical={self.num_critical:d},', + f'margin_size={self.margin_size:d}, num_fp16_res={self.num_fp16_res:d}']) + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class Generator(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + mapping_kwargs = {}, # Arguments for MappingNetwork. + **synthesis_kwargs, # Arguments for SynthesisNetwork. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs) + self.num_ws = self.synthesis.num_ws + self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs): + ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas) + img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + return img + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/training/neural_renderer.py b/3DPortraitGAN_pyramid/training/neural_renderer.py new file mode 100644 index 0000000..a4b5b8c --- /dev/null +++ b/3DPortraitGAN_pyramid/training/neural_renderer.py @@ -0,0 +1,245 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import math +import torch +from torch_utils import persistence +from training.networks_stylegan2 import ToRGBLayer, SynthesisNetwork + +from training.networks_stylegan2 import Hierarchy3DAwareGenerator as StyleGAN2Backbone +from training.volumetric_rendering.renderer import ImportanceRenderer +from training.volumetric_rendering.ray_sampler import RaySampler +import dnnlib +import numpy as np + + +@persistence.persistent_class +class TriPlaneGenerator(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + sr_num_fp16_res = 0, + mapping_kwargs = {}, # Arguments for MappingNetwork. + rendering_kwargs = {}, + sr_kwargs = {}, + batch_size=1, + explicitly_symmetry=False, + thickness= 0.05, + **synthesis_kwargs, # Arguments for SynthesisNetwork. + ): + super().__init__() + bcg_synthesis_kwargs = synthesis_kwargs.copy() + bcg_synthesis_kwargs["channel_base"] = 32768 + bcg_synthesis_kwargs["channel_max"] = 512 + + self.z_dim=z_dim + self.c_dim=c_dim + self.w_dim=w_dim + self.img_resolution=img_resolution + self.img_channels=img_channels + + self.trigrid_channel = 12 + self.decode_channel = 32 + + self.batch_size = batch_size + self.renderer = ImportanceRenderer(w_dim = w_dim, num_ws = 14, batch_size = self.batch_size,thickness =thickness,box_warp = rendering_kwargs['box_warp']) + self.ray_sampler = RaySampler() + + self.decoder = OSGDecoder(self.trigrid_channel, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), + 'decoder_output_dim': self.decode_channel, + 'decoder_activation': rendering_kwargs['decoder_activation']}) + + self.torgb = ToRGBLayer(self.decode_channel, 3, w_dim) if rendering_kwargs.get('use_torgb_raw', False) else None + + self.bcg_synthesis = SynthesisNetwork(w_dim, img_resolution=128, + img_channels=self.decode_channel, **bcg_synthesis_kwargs) if rendering_kwargs.get('use_background', False) else None + + self.neural_rendering_resolution = 64 + self.rendering_kwargs = rendering_kwargs + + self._last_planes = None + + self.explicitly_symmetry = explicitly_symmetry + + self.avg_c = torch.tensor([[ 1.0000e+00, 1.0505e-09, 4.3685e-08, -1.1805e-07, 0.0000e+00, + -9.9951e-01, 2.4033e-02, -1.1805e-07, 4.3714e-08, -2.4033e-02, + -9.9951e-01, 2.6992e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 1.0000e+00, 6.5104e+00, 0.0000e+00, 5.0000e-01, 0.0000e+00, + 6.5104e+00, 5.0000e-01, 0.0000e+00, 0.0000e+00, 1.0000e+00]]).float().cuda() + + def flip_yaw(self, matrix): + flipped_matrix = matrix.clone() + flipped = flipped_matrix[:, :16].reshape(-1, 4, 4) + flipped[:, 0, 1] *= -1 + flipped[:, 0, 2] *= -1 + flipped[:, 1, 0] *= -1 + flipped[:, 2, 0] *= -1 + flipped[:, 0, 3] *= -1 + + flipped = flipped.reshape(-1, 16) + flipped_matrix[:, :16] = flipped.clone() + + return flipped_matrix + + + + def set_batch_size(self, batch_size): + self.renderer.set_batch_size(batch_size) + + def render_meshes(self,shape_pose_params,resolution,cameras): + + return self.renderer.render_meshes(shape_pose_params, resolution, cameras) + + + + def render_planes(self, ws, planes, c, neural_rendering_resolution=None, update_emas=False, chunk = None,render_bg = True,patch_resolution=None, + apply_def=False, pose_params = None,ws_bcg=None, + **synthesis_kwargs): + cam2world_matrix = c[:, :16].view(-1, 4, 4) + intrinsics = c[:, 16:25].view(-1, 3, 3) + + if neural_rendering_resolution is None: + neural_rendering_resolution = self.neural_rendering_resolution + + patch_info = [] + if patch_resolution is None: + # Create a batch of rays for volume rendering + ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution) + H = W = neural_rendering_resolution + else: + ray_origins, ray_directions,patch_info = self.ray_sampler.patch_forward(cam2world_matrix, intrinsics, + patch_resolution, + patch_scale=patch_resolution/neural_rendering_resolution) + H = W = patch_resolution + + + N, M, _ = ray_origins.shape + + + + # Reshape output into three D*32-channel planes, where D=self.rendering_kwargs['triplane_depth'], defines the depth of the tri-grid + for res_k in planes: + # b, c, H,W + # planes[res_k] = planes[res_k].view(len(planes[res_k]), 3, -1, planes[res_k].shape[-2], planes[res_k].shape[-1]) + if len(planes[res_k].shape) == 4: + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + + if chunk is not None: + + feature_list, depth_list, weight_list = list(), list(), list() + for _ro, _rd in zip(torch.split(ray_origins, chunk, dim=1), torch.split(ray_directions, chunk, dim=1)): + render_output = self.renderer(planes, self.decoder, _ro, + _rd, self.rendering_kwargs, apply_def = apply_def, ws = ws, pose_params = pose_params ) # channels last + + _f = render_output['rgb_final'] + _d = render_output['depth_final'] + _w = render_output['weights'] + feature_list.append(_f) + depth_list.append(_d) + weight_list.append(_w) + feature_samples = torch.cat(feature_list, 1) + depth_samples = torch.cat(depth_list, 1) + weights_samples = torch.cat(weight_list, 1) + else: + + # Perform volume rendering + render_output = self.renderer(planes, self.decoder, ray_origins, + ray_directions, self.rendering_kwargs, apply_def = apply_def, ws = ws, pose_params = pose_params ) # channels last + # {'rgb_final': rgb_final, 'depth_final': depth_final, 'weights': weights.sum(2)} + feature_samples = render_output['rgb_final'] + depth_samples = render_output['depth_final'] + weights_samples = render_output['weights'] + + + # Reshape into 'raw' neural-rendered image + + feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous() + depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W) + weights_samples = weights_samples.permute(0, 2, 1).reshape(N, 1, H, W) + + if self.decoder.activation == "sigmoid": + feature_image = feature_image * 2 - 1 # Scale to (-1, 1), taken from ray marcher + # Generate Background + if self.bcg_synthesis and render_bg: + ws_bcg = ws[:,:self.bcg_synthesis.num_ws] if ws_bcg is None else ws_bcg[:,:self.bcg_synthesis.num_ws] + if ws_bcg.size(1) < self.bcg_synthesis.num_ws: + ws_bcg = torch.cat([ws_bcg, ws_bcg[:,-1:].repeat(1,self.bcg_synthesis.num_ws-ws_bcg.size(1),1)], 1) + bcg_image = self.bcg_synthesis(ws_bcg, update_emas=update_emas, **synthesis_kwargs) + bcg_image = torch.nn.functional.interpolate(bcg_image, size=feature_image.shape[2:], + mode='bilinear', align_corners=False, antialias=self.rendering_kwargs['sr_antialias']) + feature_image = feature_image + (1-weights_samples) * bcg_image + + # Generate Raw image + if self.torgb: + rgb_image = self.torgb(feature_image, ws[:,-1], fused_modconv=False) + rgb_image = rgb_image.to(dtype=torch.float32, memory_format=torch.contiguous_format) + + else: + rgb_image = feature_image[:, :3] + + + mask_image = weights_samples * (1 + 2 * 0.001) - 0.001 + + return {'image_raw': rgb_image, 'image_depth': depth_image, "image_mask": mask_image,'patch_info':patch_info} + + + def sample_trigrid(self, coordinates, directions, planes, update_emas=False, **synthesis_kwargs): + # Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes. + # planes = planes.view(len(planes), 3, 32 * self.rendering_kwargs['triplane_depth'], planes.shape[-2], + # planes.shape[-1]) + for res_k in planes: + # b, c, H,W + if len(planes[res_k].shape) == 4: + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + + return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs) + + + + +from training.networks_stylegan2 import FullyConnectedLayer + +class OSGDecoder(torch.nn.Module): + def __init__(self, n_features, options): + super().__init__() + self.hidden_dim = 32 + + self.net = torch.nn.Sequential( + FullyConnectedLayer(n_features, self.hidden_dim, lr_multiplier=options['decoder_lr_mul']), + torch.nn.Softplus(), + FullyConnectedLayer(self.hidden_dim, 1 + options['decoder_output_dim'], lr_multiplier=options['decoder_lr_mul']) + ) + self.activation = options['decoder_activation'] + + def forward(self, sampled_features, ray_directions): + # Aggregate features + sampled_features = sampled_features.mean(1) + x = sampled_features + + N, M, C = x.shape + x = x.view(N*M, C) + + x = self.net(x) + x = x.view(N, M, -1) + rgb = x[..., 1:] + sigma = x[..., 0:1] + if self.activation == "sigmoid": + # Original EG3D + rgb = torch.sigmoid(rgb)*(1 + 2*0.001) - 0.001 + elif self.activation == "lrelu": + # StyleGAN2-style, use with toRGB + rgb = torch.nn.functional.leaky_relu(rgb, 0.2, inplace=True) * math.sqrt(2) + return {'rgb': rgb, 'sigma': sigma} + diff --git a/3DPortraitGAN_pyramid/training/smpl_triplane.py b/3DPortraitGAN_pyramid/training/smpl_triplane.py new file mode 100644 index 0000000..bd4ca20 --- /dev/null +++ b/3DPortraitGAN_pyramid/training/smpl_triplane.py @@ -0,0 +1,492 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import math +import torch +from torch_utils import persistence +from training.networks_stylegan2 import ToRGBLayer, SynthesisNetwork + +from training.networks_stylegan2 import Hierarchy3DAwareGenerator as StyleGAN2Backbone +from training.volumetric_rendering.renderer import ImportanceRenderer +from training.volumetric_rendering.ray_sampler import RaySampler +import dnnlib + +""" +Mask guidance, background synthesis and tri-grid representation from the paper +"PanoHead: Geometry-Aware 3D Full-Head Synthesis in 360°" +https://github.com/SizheAn/PanoHead/blob/main/training/triplane.py +""" + +@persistence.persistent_class +class TriPlaneGenerator(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + sr_num_fp16_res = 0, + mapping_kwargs = {}, # Arguments for MappingNetwork. + rendering_kwargs = {}, + sr_kwargs = {}, + batch_size=1, + explicitly_symmetry=False, + thickness= 0.05, + **synthesis_kwargs, # Arguments for SynthesisNetwork. + ): + super().__init__() + bcg_synthesis_kwargs = synthesis_kwargs.copy() + bcg_synthesis_kwargs["channel_base"] = 32768 + bcg_synthesis_kwargs["channel_max"] = 512 + + self.z_dim=z_dim + self.c_dim=c_dim + self.w_dim=w_dim + self.img_resolution=img_resolution + self.img_channels=img_channels + + self.trigrid_channel = 12 + self.decode_channel = 32 + + self.batch_size = batch_size + self.renderer = ImportanceRenderer(w_dim = w_dim, num_ws = 14, batch_size = self.batch_size,thickness =thickness,box_warp = rendering_kwargs['box_warp']) + self.ray_sampler = RaySampler() + # self.backbone = StyleGAN2Backbone(z_dim, c_dim+6, w_dim, img_resolution=512, img_channels=self.trigrid_channel*3*rendering_kwargs['triplane_depth'], mapping_kwargs=mapping_kwargs, **synthesis_kwargs) + self.backbone = StyleGAN2Backbone(z_dim, c_dim + 6, w_dim, img_resolution=256, + img_channels=self.trigrid_channel * 3 * rendering_kwargs['triplane_depth'], + mapping_kwargs=mapping_kwargs, roll_out=None, + **synthesis_kwargs) # forbid roll_out in main G + + self.superresolution = dnnlib.util.construct_class_by_name(class_name=rendering_kwargs['superresolution_module'], channels=self.decode_channel, img_resolution=img_resolution, sr_num_fp16_res=sr_num_fp16_res, sr_antialias=rendering_kwargs['sr_antialias'], **sr_kwargs) + self.decoder = OSGDecoder(self.trigrid_channel, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), + 'decoder_output_dim': self.decode_channel, + 'decoder_activation': rendering_kwargs['decoder_activation']}) + + self.torgb = ToRGBLayer(self.decode_channel, 3, w_dim) if rendering_kwargs.get('use_torgb_raw', False) else None + + self.bcg_synthesis = SynthesisNetwork(w_dim, img_resolution=self.superresolution.input_resolution, + img_channels=self.decode_channel, **bcg_synthesis_kwargs) if rendering_kwargs.get('use_background', False) else None + + self.pose_branch = GPoseBranch(z_dim = z_dim, c_dim = c_dim) + self.neural_rendering_resolution = 64 + self.rendering_kwargs = rendering_kwargs + + self._last_planes = None + + self.explicitly_symmetry = explicitly_symmetry + + self.avg_c = torch.tensor([[ 1.0000e+00, 1.0505e-09, 4.3685e-08, -1.1805e-07, 0.0000e+00, + -9.9951e-01, 2.4033e-02, -1.1805e-07, 4.3714e-08, -2.4033e-02, + -9.9951e-01, 2.6992e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 1.0000e+00, 6.5104e+00, 0.0000e+00, 5.0000e-01, 0.0000e+00, + 6.5104e+00, 5.0000e-01, 0.0000e+00, 0.0000e+00, 1.0000e+00]]).float().cuda() + + self.plane_shapes = {} + + planes = self.backbone.synthesis(torch.zeros(4,self.backbone.synthesis.num_ws,w_dim), update_emas=False, **synthesis_kwargs) + + # Reshape output into three D*32-channel planes, where D=self.rendering_kwargs['triplane_depth'], defines the depth of the tri-grid + for res_k in planes: + # b, c, H,W + # planes[res_k] = planes[res_k].view(len(planes[res_k]), 3, -1, planes[res_k].shape[-2], planes[res_k].shape[-1]) + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + if res_k not in self.plane_shapes: + self.plane_shapes[res_k] = planes[res_k].shape + + def flip_yaw(self, matrix): + flipped_matrix = matrix.clone() + flipped = flipped_matrix[:, :16].reshape(-1, 4, 4) + flipped[:, 0, 1] *= -1 + flipped[:, 0, 2] *= -1 + flipped[:, 1, 0] *= -1 + flipped[:, 2, 0] *= -1 + flipped[:, 0, 3] *= -1 + + flipped = flipped.reshape(-1, 16) + flipped_matrix[:, :16] = flipped.clone() + + return flipped_matrix + + def get_pose_params(self, z, c): + if self.explicitly_symmetry: + # check if c is a left face + theta = torch.atan2(c[:, [11]], c[:, [3]]) # math.atan2(z, x) + is_left = (theta >= -np.pi / 2) & (theta <= np.pi / 2) + + + flip_c = self.flip_yaw(c) + input_c = torch.where(is_left, flip_c, c) # if left, flip c + + pose_params = self.pose_branch(z, input_c) + + flip_pose_params = pose_params.clone() + flip_pose_params[:, [1, 2, 4, 5]] *= -1 # flip y and z axis angles + + pose_params = torch.where(is_left, flip_pose_params, pose_params) # if left, flip back pose_params + + return pose_params + else: + raise NotImplementedError + return self.pose_branch(z, c) + + def set_batch_size(self, batch_size): + self.renderer.set_batch_size(batch_size) + + def render_meshes(self,shape_pose_params,resolution,cameras): + + return self.renderer.render_meshes(shape_pose_params, resolution, cameras) + + + def mapping(self, z, c, p, truncation_psi=1, truncation_cutoff=None, update_emas=False): + if self.rendering_kwargs['c_gen_conditioning_zero']: + raise NotImplementedError + p = torch.zeros([c.shape[0], 6]).to(c.device) + c = self.avg_c.repeat(c.shape[0], 1).to(c.device) + c = torch.cat([c, p], dim=1) + + else: + + if p is None: + p = torch.zeros([c.shape[0],6]).to(c.device) + c = torch.cat([c,p],dim=1) + return self.backbone.mapping(z, c * self.rendering_kwargs.get('c_scale', 0), truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas) + + + def synthesis(self, ws, c, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, + apply_def=False, pose_params = None,ws_bcg=None, + **synthesis_kwargs): + cam2world_matrix = c[:, :16].view(-1, 4, 4) + intrinsics = c[:, 16:25].view(-1, 3, 3) + + if neural_rendering_resolution is None: + neural_rendering_resolution = self.neural_rendering_resolution + else: + self.neural_rendering_resolution = neural_rendering_resolution + + # Create a batch of rays for volume rendering + ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution) + + # Create triplanes by running StyleGAN backbone + N, M, _ = ray_origins.shape + if use_cached_backbone and self._last_planes is not None: + planes = self._last_planes + else: + planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + + if cache_backbone: + self._last_planes = planes + + # Reshape output into three D*32-channel planes, where D=self.rendering_kwargs['triplane_depth'], defines the depth of the tri-grid + for res_k in planes: + # b, c, H,W + # planes[res_k] = planes[res_k].view(len(planes[res_k]), 3, -1, planes[res_k].shape[-2], planes[res_k].shape[-1]) + planes[res_k] = planes[res_k].view(N, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + + + # Perform volume rendering + render_output = self.renderer(planes, self.decoder, ray_origins, + ray_directions, self.rendering_kwargs, apply_def = apply_def, ws = ws, pose_params = pose_params ) # channels last + # {'rgb_final': rgb_final, 'depth_final': depth_final, 'weights': weights.sum(2)} + feature_samples = render_output['rgb_final'] + depth_samples = render_output['depth_final'] + weights_samples = render_output['weights'] + + + # Reshape into 'raw' neural-rendered image + H = W = self.neural_rendering_resolution + feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous() + depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W) + weights_samples = weights_samples.permute(0, 2, 1).reshape(N, 1, H, W) + + # Run superresolution to get final image + if self.decoder.activation == "sigmoid": + feature_image = feature_image * 2 - 1 # Scale to (-1, 1), taken from ray marcher + # Generate Background + if self.bcg_synthesis: + ws_bcg = ws[:,:self.bcg_synthesis.num_ws] if ws_bcg is None else ws_bcg[:,:self.bcg_synthesis.num_ws] + if ws_bcg.size(1) < self.bcg_synthesis.num_ws: + ws_bcg = torch.cat([ws_bcg, ws_bcg[:,-1:].repeat(1,self.bcg_synthesis.num_ws-ws_bcg.size(1),1)], 1) + bcg_image = self.bcg_synthesis(ws_bcg, update_emas=update_emas, **synthesis_kwargs) + bcg_image = torch.nn.functional.interpolate(bcg_image, size=feature_image.shape[2:], + mode='bilinear', align_corners=False, antialias=self.rendering_kwargs['sr_antialias']) + feature_image = feature_image + (1-weights_samples) * bcg_image + + # Generate Raw image + if self.torgb: + rgb_image = self.torgb(feature_image, ws[:,-1], fused_modconv=False) + rgb_image = rgb_image.to(dtype=torch.float32, memory_format=torch.contiguous_format) + + bcg_rgb_image = self.torgb(bcg_image, ws_bcg[:,-1], fused_modconv=False) + bcg_rgb_image = bcg_rgb_image.to(dtype=torch.float32, memory_format=torch.contiguous_format) + else: + rgb_image = feature_image[:, :3] + bcg_rgb_image = bcg_image[:, :3] + # Run superresolution to get final image + sr_image = self.superresolution(rgb_image, feature_image, ws, noise_mode=self.rendering_kwargs['superresolution_noise_mode'], **{k:synthesis_kwargs[k] for k in synthesis_kwargs.keys() if k != 'noise_mode'}) + + mask_image = weights_samples * (1 + 2 * 0.001) - 0.001 + + return {'image': sr_image, 'image_raw': rgb_image, 'image_depth': depth_image, "image_mask": mask_image, "image_background":bcg_rgb_image} + + def gen_planes(self, ws, c, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, + use_cached_backbone=False, + apply_def=False, pose_params=None, ws_bcg=None, + **synthesis_kwargs): + cam2world_matrix = c[:, :16].view(-1, 4, 4) + intrinsics = c[:, 16:25].view(-1, 3, 3) + + if neural_rendering_resolution is None: + neural_rendering_resolution = self.neural_rendering_resolution + else: + self.neural_rendering_resolution = neural_rendering_resolution + + # Create a batch of rays for volume rendering + ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution) + + # Create triplanes by running StyleGAN backbone + N, M, _ = ray_origins.shape + if use_cached_backbone and self._last_planes is not None: + planes = self._last_planes + else: + planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + + if cache_backbone: + self._last_planes = planes + + # Reshape output into three D*32-channel planes, where D=self.rendering_kwargs['triplane_depth'], defines the depth of the tri-grid + for res_k in planes: + # b, c, H,W + # planes[res_k] = planes[res_k].view(len(planes[res_k]), 3, -1, planes[res_k].shape[-2], planes[res_k].shape[-1]) + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + + return planes,ws + + def render_planes(self, ws, planes, c, neural_rendering_resolution=None, update_emas=False, chunk = None, + apply_def=False, pose_params = None,ws_bcg=None, + **synthesis_kwargs): + cam2world_matrix = c[:, :16].view(-1, 4, 4) + intrinsics = c[:, 16:25].view(-1, 3, 3) + + if neural_rendering_resolution is None: + neural_rendering_resolution = self.neural_rendering_resolution + + # Create a batch of rays for volume rendering + ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution) + + # Create triplanes by running StyleGAN backbone + N, M, _ = ray_origins.shape + + + + # Reshape output into three D*32-channel planes, where D=self.rendering_kwargs['triplane_depth'], defines the depth of the tri-grid + for res_k in planes: + # b, c, H,W + # planes[res_k] = planes[res_k].view(len(planes[res_k]), 3, -1, planes[res_k].shape[-2], planes[res_k].shape[-1]) + if len(planes[res_k].shape) == 4: + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + + if chunk is not None: + + feature_list, depth_list, weight_list = list(), list(), list() + for _ro, _rd in zip(torch.split(ray_origins, chunk, dim=1), torch.split(ray_directions, chunk, dim=1)): + render_output = self.renderer(planes, self.decoder, _ro, + _rd, self.rendering_kwargs, apply_def = apply_def, ws = ws, pose_params = pose_params ) # channels last + + _f = render_output['rgb_final'] + _d = render_output['depth_final'] + _w = render_output['weights'] + feature_list.append(_f) + depth_list.append(_d) + weight_list.append(_w) + feature_samples = torch.cat(feature_list, 1) + depth_samples = torch.cat(depth_list, 1) + weights_samples = torch.cat(weight_list, 1) + else: + + # Perform volume rendering + render_output = self.renderer(planes, self.decoder, ray_origins, + ray_directions, self.rendering_kwargs, apply_def = apply_def, ws = ws, pose_params = pose_params ) # channels last + # {'rgb_final': rgb_final, 'depth_final': depth_final, 'weights': weights.sum(2)} + feature_samples = render_output['rgb_final'] + depth_samples = render_output['depth_final'] + weights_samples = render_output['weights'] + + + # Reshape into 'raw' neural-rendered image + H = W = neural_rendering_resolution + feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous() + depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W) + weights_samples = weights_samples.permute(0, 2, 1).reshape(N, 1, H, W) + + # Run superresolution to get final image + if self.decoder.activation == "sigmoid": + feature_image = feature_image * 2 - 1 # Scale to (-1, 1), taken from ray marcher + # Generate Background + if self.bcg_synthesis: + ws_bcg = ws[:,:self.bcg_synthesis.num_ws] if ws_bcg is None else ws_bcg[:,:self.bcg_synthesis.num_ws] + if ws_bcg.size(1) < self.bcg_synthesis.num_ws: + ws_bcg = torch.cat([ws_bcg, ws_bcg[:,-1:].repeat(1,self.bcg_synthesis.num_ws-ws_bcg.size(1),1)], 1) + bcg_image = self.bcg_synthesis(ws_bcg, update_emas=update_emas, **synthesis_kwargs) + bcg_image = torch.nn.functional.interpolate(bcg_image, size=feature_image.shape[2:], + mode='bilinear', align_corners=False, antialias=self.rendering_kwargs['sr_antialias']) + feature_image = feature_image + (1-weights_samples) * bcg_image + + # Generate Raw image + if self.torgb: + rgb_image = self.torgb(feature_image, ws[:,-1], fused_modconv=False) + rgb_image = rgb_image.to(dtype=torch.float32, memory_format=torch.contiguous_format) + + bcg_rgb_image = self.torgb(bcg_image, ws_bcg[:,-1], fused_modconv=False) + bcg_rgb_image = bcg_rgb_image.to(dtype=torch.float32, memory_format=torch.contiguous_format) + else: + rgb_image = feature_image[:, :3] + bcg_rgb_image = bcg_image[:, :3] + # Run superresolution to get final image + + mask_image = weights_samples * (1 + 2 * 0.001) - 0.001 + + return {'image_raw': rgb_image, 'image_depth': depth_image, "image_mask": mask_image, "image_background":bcg_rgb_image} + + def sample_trigrid(self, coordinates, directions, planes, update_emas=False, **synthesis_kwargs): + # Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes. + # planes = planes.view(len(planes), 3, 32 * self.rendering_kwargs['triplane_depth'], planes.shape[-2], + # planes.shape[-1]) + for res_k in planes: + # b, c, H,W + if len(planes[res_k].shape) == 4: + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + + return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs) + + + def sample_ws(self, coordinates, directions, ws, update_emas=False, **synthesis_kwargs): + # Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes. + planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + # planes = planes.view(len(planes), 3, 32 * self.rendering_kwargs['triplane_depth'], planes.shape[-2], + # planes.shape[-1]) + for res_k in planes: + # b, c, H,W + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + + return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs) + + def sample(self, coordinates, directions, z, c, p, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs): + # Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes. + ws = self.mapping(z, c, p,truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas) + planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + # planes = planes.view(len(planes), 3, 32 * self.rendering_kwargs['triplane_depth'], planes.shape[-2], + # planes.shape[-1]) + for res_k in planes: + # b, c, H,W + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs) + + def sample_mixed(self, coordinates, directions, ws, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs): + # Same as sample, but expects latent vectors 'ws' instead of Gaussian noise 'z' + planes = self.backbone.synthesis(ws, update_emas = update_emas, **synthesis_kwargs) + # planes = planes.view(len(planes), 3, 32 * self.rendering_kwargs['triplane_depth'], planes.shape[-2], + # planes.shape[-1]) + for res_k in planes: + # b, c, H,W + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, + apply_def=False, pose_params=None, + **synthesis_kwargs): + # Render a batch of generated images. + ws = self.mapping(z, c, pose_params,truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas) + # TODO + return self.synthesis(ws, c, update_emas=update_emas, neural_rendering_resolution=neural_rendering_resolution, cache_backbone=cache_backbone, use_cached_backbone=use_cached_backbone, + apply_def=apply_def, pose_params = pose_params, + **synthesis_kwargs) + + +from training.networks_stylegan2 import FullyConnectedLayer + +class OSGDecoder(torch.nn.Module): + def __init__(self, n_features, options): + super().__init__() + self.hidden_dim = 32 + + self.net = torch.nn.Sequential( + FullyConnectedLayer(n_features, self.hidden_dim, lr_multiplier=options['decoder_lr_mul']), + torch.nn.Softplus(), + FullyConnectedLayer(self.hidden_dim, 1 + options['decoder_output_dim'], lr_multiplier=options['decoder_lr_mul']) + ) + self.activation = options['decoder_activation'] + + def forward(self, sampled_features, ray_directions): + # Aggregate features + sampled_features = sampled_features.mean(1) + x = sampled_features + + N, M, C = x.shape + x = x.view(N*M, C) + + x = self.net(x) + x = x.view(N, M, -1) + rgb = x[..., 1:] + sigma = x[..., 0:1] + if self.activation == "sigmoid": + # Original EG3D + rgb = torch.sigmoid(rgb)*(1 + 2*0.001) - 0.001 + elif self.activation == "lrelu": + # StyleGAN2-style, use with toRGB + rgb = torch.nn.functional.leaky_relu(rgb, 0.2, inplace=True) * math.sqrt(2) + return {'rgb': rgb, 'sigma': sigma} + + +import numpy as np +class GPoseBranch(torch.nn.Module): + def __init__(self, z_dim, c_dim): + super().__init__() + hidden_dim = 64 + self.in_channel = z_dim + c_dim + # + # predict_betas = predict_transl = predict_scale = False + # predict_pose = True + + out_dim = 6 + + # if predict_betas: + # out_dim += num_betas + # if predict_transl: + # out_dim += 3 + # if predict_scale: + # out_dim += 1 + # if predict_pose: + # out_dim += 6 + + self.output_dim = out_dim + self.net = torch.nn.Sequential( + FullyConnectedLayer(self.in_channel, 128, activation='lrelu'), + FullyConnectedLayer(128, 32, activation='lrelu'), + FullyConnectedLayer(32, self.output_dim) + ) + + + def forward(self, z, c): + # misc.assert_shape(feature, [None, self.in_channel]) + # misc.assert_shape(camera_parameters, [None, 25]) + feature = torch.cat([z, c], dim=1) + + pose = self.net(feature) # (B, num_betas + 1 + 3 + 6) + + + return pose \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/training/superresolution.py b/3DPortraitGAN_pyramid/training/superresolution.py new file mode 100644 index 0000000..43321df --- /dev/null +++ b/3DPortraitGAN_pyramid/training/superresolution.py @@ -0,0 +1,292 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Superresolution network architectures from the paper +"Efficient Geometry-aware 3D Generative Adversarial Networks".""" + +import torch +from training.networks_stylegan2 import Conv2dLayer, SynthesisLayer, ToRGBLayer +from torch_utils.ops import upfirdn2d +from torch_utils import persistence +from torch_utils import misc + +from training.networks_stylegan2 import SynthesisBlock +import numpy as np +from training.networks_stylegan3 import SynthesisLayer as AFSynthesisLayer + + +#---------------------------------------------------------------------------- + +# for 512x512 generation +@persistence.persistent_class +class SuperresolutionHybrid8X(torch.nn.Module): + def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias, + num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE + **block_kwargs): + super().__init__() + assert img_resolution == 512 + + use_fp16 = sr_num_fp16_res > 0 + self.input_resolution = 128 + self.sr_antialias = sr_antialias + self.block0 = SynthesisBlock(channels, 128, w_dim=512, resolution=256, + img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=512, + img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1])) + + def forward(self, rgb, x, ws, **block_kwargs): + ws = ws[:, -1:, :].repeat(1, 3, 1) + + if x.shape[-1] != self.input_resolution: + x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + + x, rgb = self.block0(x, rgb, ws, **block_kwargs) + x, rgb = self.block1(x, rgb, ws, **block_kwargs) + return rgb + +#---------------------------------------------------------------------------- + +# for 256x256 generation +@persistence.persistent_class +class SuperresolutionHybrid4X(torch.nn.Module): + def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias, + num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE + **block_kwargs): + super().__init__() + assert img_resolution == 256 + use_fp16 = sr_num_fp16_res > 0 + self.sr_antialias = sr_antialias + self.input_resolution = 128 + self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=128, + img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=256, + img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1])) + + def forward(self, rgb, x, ws, **block_kwargs): + ws = ws[:, -1:, :].repeat(1, 3, 1) + + if x.shape[-1] < self.input_resolution: + x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + + x, rgb = self.block0(x, rgb, ws, **block_kwargs) + x, rgb = self.block1(x, rgb, ws, **block_kwargs) + return rgb + +#---------------------------------------------------------------------------- + +# for 128 x 128 generation +@persistence.persistent_class +class SuperresolutionHybrid2X(torch.nn.Module): + def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias, + num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE + **block_kwargs): + super().__init__() + assert img_resolution == 128 + + use_fp16 = sr_num_fp16_res > 0 + self.input_resolution = 64 + self.sr_antialias = sr_antialias + self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=64, + img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=128, + img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1])) + + def forward(self, rgb, x, ws, **block_kwargs): + ws = ws[:, -1:, :].repeat(1, 3, 1) + + if x.shape[-1] != self.input_resolution: + x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + + x, rgb = self.block0(x, rgb, ws, **block_kwargs) + x, rgb = self.block1(x, rgb, ws, **block_kwargs) + return rgb + +#---------------------------------------------------------------------------- + +# TODO: Delete (here for backwards compatibility with old 256x256 models) +@persistence.persistent_class +class SuperresolutionHybridDeepfp32(torch.nn.Module): + def __init__(self, channels, img_resolution, sr_num_fp16_res, + num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE + **block_kwargs): + super().__init__() + assert img_resolution == 256 + use_fp16 = sr_num_fp16_res > 0 + + self.input_resolution = 128 + self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=128, + img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=256, + img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1])) + + def forward(self, rgb, x, ws, **block_kwargs): + ws = ws[:, -1:, :].repeat(1, 3, 1) + + if x.shape[-1] < self.input_resolution: + x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False) + rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False) + + x, rgb = self.block0(x, rgb, ws, **block_kwargs) + x, rgb = self.block1(x, rgb, ws, **block_kwargs) + return rgb + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisBlockNoUp(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels, 0 = first block. + out_channels, # Number of output channels. + w_dim, # Intermediate latent (W) dimensionality. + resolution, # Resolution of this block. + img_channels, # Number of output color channels. + is_last, # Is this the last block? + architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'. + resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. + conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping. + use_fp16 = False, # Use FP16 for this block? + fp16_channels_last = False, # Use channels-last memory format with FP16? + fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training. + **layer_kwargs, # Arguments for SynthesisLayer. + ): + assert architecture in ['orig', 'skip', 'resnet'] + super().__init__() + self.in_channels = in_channels + self.w_dim = w_dim + self.resolution = resolution + self.img_channels = img_channels + self.is_last = is_last + self.architecture = architecture + self.use_fp16 = use_fp16 + self.channels_last = (use_fp16 and fp16_channels_last) + self.fused_modconv_default = fused_modconv_default + self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) + self.num_conv = 0 + self.num_torgb = 0 + + if in_channels == 0: + self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution])) + + if in_channels != 0: + self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, + conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) + self.num_conv += 1 + + self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution, + conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) + self.num_conv += 1 + + if is_last or architecture == 'skip': + self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim, + conv_clamp=conv_clamp, channels_last=self.channels_last) + self.num_torgb += 1 + + if in_channels != 0 and architecture == 'resnet': + self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2, + resample_filter=resample_filter, channels_last=self.channels_last) + + def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs): + _ = update_emas # unused + misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim]) + w_iter = iter(ws.unbind(dim=1)) + if ws.device.type != 'cuda': + force_fp32 = True + dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 + memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format + if fused_modconv is None: + fused_modconv = self.fused_modconv_default + if fused_modconv == 'inference_only': + fused_modconv = (not self.training) + + # Input. + if self.in_channels == 0: + x = self.const.to(dtype=dtype, memory_format=memory_format) + x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1]) + else: + misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) + x = x.to(dtype=dtype, memory_format=memory_format) + + # Main layers. + if self.in_channels == 0: + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + elif self.architecture == 'resnet': + y = self.skip(x, gain=np.sqrt(0.5)) + x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs) + x = y.add_(x) + else: + x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + + # ToRGB. + # if img is not None: + # misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2]) + # img = upfirdn2d.upsample2d(img, self.resample_filter) + if self.is_last or self.architecture == 'skip': + y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv) + y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format) + img = img.add_(y) if img is not None else y + + assert x.dtype == dtype + assert img is None or img.dtype == torch.float32 + return x, img + + def extra_repr(self): + return f'resolution={self.resolution:d}, architecture={self.architecture:s}' + + +#---------------------------------------------------------------------------- + +# for 512x512 generation +@persistence.persistent_class +class SuperresolutionHybrid8XDC(torch.nn.Module): + def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias, + num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE + **block_kwargs): + super().__init__() + assert img_resolution == 512 + + use_fp16 = sr_num_fp16_res > 0 + self.input_resolution = 128 + self.sr_antialias = sr_antialias + self.block0 = SynthesisBlock(channels, 256, w_dim=512, resolution=256, + img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.block1 = SynthesisBlock(256, 128, w_dim=512, resolution=512, + img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + + def forward(self, rgb, x, ws, **block_kwargs): + ws = ws[:, -1:, :].repeat(1, 3, 1) + + if x.shape[-1] != self.input_resolution: + x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + + x, rgb = self.block0(x, rgb, ws, **block_kwargs) + x, rgb = self.block1(x, rgb, ws, **block_kwargs) + return rgb + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/training/training_loop.py b/3DPortraitGAN_pyramid/training/training_loop.py new file mode 100644 index 0000000..681de57 --- /dev/null +++ b/3DPortraitGAN_pyramid/training/training_loop.py @@ -0,0 +1,714 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Main training loop.""" + +import os +import random +import time +import copy +import json +import pickle +import psutil +import PIL.Image +import numpy as np +import torch +import dnnlib +from torch_utils import misc +from torch_utils import training_stats +from torch_utils.ops import conv2d_gradfix +from torch_utils.ops import grid_sample_gradfix + +import legacy +from metrics import metric_main,metric_utils +from camera_utils import LookAtPoseSampler +from training.crosssection_utils import sample_cross_section + +#---------------------------------------------------------------------------- + +def setup_snapshot_image_grid(training_set, random_seed=0): + rnd = np.random.RandomState(random_seed) + h = int(7680 * (training_set.image_shape[2]/512)) + w = int(4320 * (training_set.image_shape[2] / 512)) + gh = np.clip(h // training_set.image_shape[2], 7, 8) + gw = np.clip(w // training_set.image_shape[1], 4, 4) + + # No labels => show random subset of training samples. + # if not training_set.has_labels: + # all_indices = list(range(len(training_set))) + # rnd.shuffle(all_indices) + # grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)] + + # else: + # # Group training samples by label. + # label_groups = dict() # label => [idx, ...] + # for idx in range(len(training_set)): + # label = tuple(training_set.get_details(idx).raw_label.flat[::-1]) + # if label not in label_groups: + # label_groups[label] = [] + # label_groups[label].append(idx) + + # # Reorder. + # label_order = list(label_groups.keys()) + # rnd.shuffle(label_order) + # for label in label_order: + # rnd.shuffle(label_groups[label]) + + # # Organize into grid. + # grid_indices = [] + # for y in range(gh): + # label = label_order[y % len(label_order)] + # indices = label_groups[label] + # grid_indices += [indices[x % len(indices)] for x in range(gw)] + # label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))] + label_groups = dict() # label => [idx, ...] + for idx in range(len(training_set)): + label = tuple(training_set.get_details(idx).raw_label.flat[::-1]) + if label not in label_groups: + label_groups[label] = [] + label_groups[label].append(idx) + + # Reorder. + label_order = list(label_groups.keys()) + rnd.shuffle(label_order) + for label in label_order: + rnd.shuffle(label_groups[label]) + + # Organize into grid. + grid_indices = [] + for y in range(gh): + for x in range(gw//2): + label = label_order[(y + x*gh) % len(label_order)] + indices = list(set(label_groups[label])) + #grid_indices += [indices[x % len(indices)] for x in range(2)] + grid_indices += [indices[0], (indices[0]+ len(training_set)//2)%len(training_set) ] + label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))] + + + # Load data. + images, segs, labels, poses = zip(*[training_set[i] for i in grid_indices]) + return (gw, gh), np.stack(images),np.stack(segs), np.stack(labels), np.stack(poses) + +#---------------------------------------------------------------------------- + +def save_image_grid(img, fname, drange, grid_size): + lo, hi = drange + img = np.asarray(img, dtype=np.float32) + img = (img - lo) * (255 / (hi - lo)) + img = np.rint(img).clip(0, 255).astype(np.uint8) + + gw, gh = grid_size + _N, C, H, W = img.shape + img = img.reshape([gh, gw, C, H, W]) + img = img.transpose(0, 3, 1, 4, 2) + img = img.reshape([gh * H, gw * W, C]) + + assert C in [1, 3] + if C == 1: + PIL.Image.fromarray(img[:, :, 0], 'L').save(fname) + if C == 3: + PIL.Image.fromarray(img, 'RGB').save(fname) + +#---------------------------------------------------------------------------- + +def training_loop( + run_dir = '.', # Output directory. + training_set_kwargs = {}, # Options for training set. + data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader. + G_kwargs = {}, # Options for generator network. + D_kwargs = {}, # Options for discriminator network. + G_opt_kwargs = {}, # Options for generator optimizer. + D_opt_kwargs = {}, # Options for discriminator optimizer. + augment_kwargs = None, # Options for augmentation pipeline. None = disable. + loss_kwargs = {}, # Options for loss function. + metrics = [], # Metrics to evaluate during training. + random_seed = 0, # Global random seed. + num_gpus = 1, # Number of GPUs participating in the training. + rank = 0, # Rank of the current process in [0, num_gpus[. + batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus. + batch_gpu = 4, # Number of samples processed at a time by one GPU. + ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights. + ema_rampup = 0.05, # EMA ramp-up coefficient. None = no rampup. + G_reg_interval = None, # How often to perform regularization for G? None = disable lazy regularization. + D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization. + augment_p = 0, # Initial value of augmentation probability. + ada_target = None, # ADA target value. None = fixed p. + ada_interval = 4, # How often to perform ADA adjustment? + ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit. + total_kimg = 25000, # Total length of the training, measured in thousands of real images. + kimg_per_tick = 4, # Progress snapshot interval. + image_snapshot_ticks = 50, # How often to save image snapshots? None = disable. + network_snapshot_ticks = 50, # How often to save network snapshots? None = disable. + resume_pkl = None, # Network pickle to resume training from. + resume_kimg = 0, # First kimg to report when resuming training. + cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark? + abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks. + progress_fn = None, # Callback function for updating training progress. Called for all ranks. + train_g_pose_branch = None, + metric_pose_sample_mode = None, +): + print('Random seed: %d' % random_seed) + # Initialize. + start_time = time.time() + device = torch.device('cuda', rank) + np.random.seed(random_seed * num_gpus + rank) + torch.cuda.set_device(device) + torch.manual_seed(random_seed * num_gpus + rank) + torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed. + torch.backends.cuda.matmul.allow_tf32 = False # Improves numerical accuracy. + torch.backends.cudnn.allow_tf32 = False # Improves numerical accuracy. + torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False # Improves numerical accuracy. + conv2d_gradfix.enabled = True # Improves training speed. # TODO: ENABLE + grid_sample_gradfix.enabled = False # Avoids errors with the augmentation pipe. + + # Load training set. + if rank == 0: + print('Loading training set...') + training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset + training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed) + training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs)) + if rank == 0: + print() + print('Num images: ', len(training_set)) + print('Image shape:', training_set.image_shape) + print('Label shape:', training_set.label_shape) + print('Pose shape:', training_set.pose_shape) + print() + print('>>>>>>>>>>>>>>> image_snapshot_ticks:', image_snapshot_ticks) + print('>>>>>>>>>>>>>>> network_snapshot_ticks:', network_snapshot_ticks) + + # Construct networks. + if rank == 0: + print('Constructing networks...') + common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels) + G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module + G.register_buffer('dataset_label_std', torch.tensor(training_set.get_label_std()).to(device)) + D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module + G_ema = copy.deepcopy(G).eval() + D_ema = copy.deepcopy(D).eval() + + # Resume from existing pickle. + if (resume_pkl is not None) and (rank == 0): + print(f'Resuming from "{resume_pkl}"') + with dnnlib.util.open_url(resume_pkl) as f: + resume_data = legacy.load_network_pkl(f) + for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]: + misc.copy_params_and_buffers(resume_data[name], module, require_all=False) + + if 'D_ema' in resume_data: + print(f'copy params of D_ema of "{resume_pkl} to D_ema') + misc.copy_params_and_buffers(resume_data['D_ema'], D_ema, require_all=False) + else: + print(f'copy params of D of "{resume_pkl} to D_ema') + misc.copy_params_and_buffers(resume_data['D'], D_ema, require_all=False) + + # Print network summary tables. + if rank == 0: + z = torch.empty([batch_gpu, G.z_dim], device=device) + c = torch.empty([batch_gpu, G.c_dim], device=device) + p = torch.empty([batch_gpu, 6], device=device) + img = misc.print_module_summary(G, [z, c, ]) + misc.print_module_summary(D, [img, c ]) + + print('plane_shapes:') + for res_k in G.plane_shapes: + print(res_k, G.plane_shapes[res_k]) + # Setup augmentation. + if rank == 0: + print('Setting up augmentation...') + augment_pipe = None + ada_stats = None + if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None): + augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module + augment_pipe.p.copy_(torch.as_tensor(augment_p)) + if ada_target is not None: + ada_stats = training_stats.Collector(regex='Loss/signs/real') + + # Distribute across GPUs. + if rank == 0: + print(f'Distributing across {num_gpus} GPUs...') + for module in [G, D, G_ema,D_ema, augment_pipe]: + if module is not None: + for param in misc.params_and_buffers(module): + if param.numel() > 0 and num_gpus > 1: + torch.distributed.broadcast(param, src=0) + + # Setup training phases. + if rank == 0: + print('Setting up training phases...') + loss = dnnlib.util.construct_class_by_name(device=device, G=G, D=D, augment_pipe=augment_pipe,rank = rank,**loss_kwargs) # subclass of training.loss.Loss + phases = [] + for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]: + params_list = [] + params_name_list = [] + for p_name, p in module.named_parameters(): + if name == 'G': + if 'aligned_SMPL' not in p_name: + if not train_g_pose_branch: + if 'pose_branch' not in p_name: + params_list.append(p) + params_name_list.append(p_name) + else: + params_list.append(p) + params_name_list.append(p_name) + else: + params_list.append(p) + params_name_list.append(p_name) + + + + if rank ==0: + print(f'params_name_list of {name}:',params_name_list) + + if reg_interval is None: + opt = dnnlib.util.construct_class_by_name(params=params_list, **opt_kwargs) # subclass of torch.optim.Optimizer + phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)] + + + else: # Lazy regularization. + mb_ratio = reg_interval / (reg_interval + 1) + opt_kwargs = dnnlib.EasyDict(opt_kwargs) + opt_kwargs.lr = opt_kwargs.lr * mb_ratio + opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas] + opt = dnnlib.util.construct_class_by_name(params=params_list, **opt_kwargs) # subclass of torch.optim.Optimizer + phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)] + phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)] + + + + for phase in phases: + phase.start_event = None + phase.end_event = None + if rank == 0: + phase.start_event = torch.cuda.Event(enable_timing=True) + phase.end_event = torch.cuda.Event(enable_timing=True) + print('phase: ',phase.name) + + # Export sample images. + grid_size = None + grid_z = None + grid_c = None + if rank == 0: + print('Exporting sample images...') + grid_size, images,segs, labels,poses = setup_snapshot_image_grid(training_set=training_set,random_seed=random.randint(0, 1000000)) + save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size) + save_image_grid(segs, os.path.join(run_dir, 'segs.jpg'), drange=[0, 255], grid_size=grid_size) + grid_images = (torch.from_numpy(images).to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu) + grid_segs = (torch.from_numpy(segs).to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu) + + #grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu) + + if G.rendering_kwargs['c_gen_conditioning_zero']: + raise NotImplementedError + grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu) + else: + #raise NotImplementedError + grid_z = [] + for i in range(labels.shape[0]//2): + sample_z = torch.randn([1, G.z_dim], device=device) + grid_z.append(sample_z) + grid_z.append(sample_z) + grid_z = torch.cat(grid_z,dim=0).split(batch_gpu) + + + grid_c = torch.from_numpy(labels).to(device).split(batch_gpu) + grid_poses = torch.from_numpy(poses).to(device).split(batch_gpu) + + real_shape_real_pose = [] + for real_pose, c in zip(grid_poses, grid_c): + real_shape_pose_param = {'pose': real_pose} + real_shape_real_pose.append( + G_ema.render_meshes(real_shape_pose_param, resolution=training_set.image_shape[2], cameras=c) + ) + real_shape_real_pose = np.concatenate(real_shape_real_pose, axis=0) + save_image_grid(real_shape_real_pose, + os.path.join(run_dir, f'mesh_coarse_real_pose.png'), + drange=[0, 255], grid_size=grid_size) + #exit() + + # Initialize logs. + if rank == 0: + print('Initializing logs...') + stats_collector = training_stats.Collector(regex='.*') + stats_metrics = dict() + stats_jsonl = None + stats_tfevents = None + if rank == 0: + stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt') + try: + import torch.utils.tensorboard as tensorboard + stats_tfevents = tensorboard.SummaryWriter(run_dir) + except ImportError as err: + print('Skipping tfevents export:', err) + + # Train. + if rank == 0: + print(f'Training for {total_kimg} kimg...') + print() + cur_nimg = resume_kimg * 1000 + cur_tick = 0 + tick_start_nimg = cur_nimg + tick_start_time = time.time() + maintenance_time = tick_start_time - start_time + batch_idx = 0 + if progress_fn is not None: + progress_fn(0, total_kimg) + + + + while True: + # Fetch training data. + with torch.autograd.profiler.record_function('data_fetch'): + + phase_real_img, phase_real_seg, phase_real_c, phase_real_pose = next(training_set_iterator) + + + phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu) + phase_real_seg = (phase_real_seg.to(device).to(torch.float32) / 255.0).split(batch_gpu) + phase_real_c = phase_real_c.to(device).split(batch_gpu) + phase_real_pose = phase_real_pose.to(device).split(batch_gpu) + + all_gen_z = torch.randn([len(phases) * (batch_size // num_gpus), G.z_dim], device=device) # 4 * 8 + all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split((batch_size // num_gpus))] + + random_idx = [np.random.randint(len(training_set)) for _ in range(len(phases) * (batch_size // num_gpus))] + + + all_gen_c = [training_set.get_label(gen_idx) for gen_idx in random_idx] + all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device) + all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split((batch_size // num_gpus))] + + + all_gen_pose = [training_set.get_coarse_pose(gen_idx) for gen_idx in random_idx] + all_gen_pose = torch.from_numpy(np.stack(all_gen_pose)).pin_memory().to(device) + all_gen_pose = [phase_gen_pose.split(batch_gpu) for phase_gen_pose in all_gen_pose.split((batch_size // num_gpus))] + + assert len(phases) == len(all_gen_z) == len(all_gen_c) ==len(all_gen_pose) + # Execute training phases. + for phase, phase_gen_z,phase_gen_c,phase_gen_pose in zip(phases, all_gen_z,all_gen_c,all_gen_pose): # 4 + if batch_idx % phase.interval != 0: + continue + + + if phase.start_event is not None: + phase.start_event.record(torch.cuda.current_stream(device)) + + # Accumulate gradients. + phase.opt.zero_grad(set_to_none=True) + phase.module.requires_grad_(True) + for real_img, real_seg, real_c,real_pose, gen_z,gen_c,gen_pose in \ + zip(phase_real_img, phase_real_seg, phase_real_c, phase_real_pose, phase_gen_z,phase_gen_c,phase_gen_pose): + + loss.accumulate_gradients(phase=phase.name, real_img=real_img,real_seg = real_seg, real_c=real_c,real_pose = real_pose, + gen_z=gen_z,gen_c = gen_c, gen_pose = gen_pose, + + gain=phase.interval, cur_nimg=cur_nimg,cur_nimg_start = resume_kimg * 1000) + phase.module.requires_grad_(False) + + # Update weights. + with torch.autograd.profiler.record_function(phase.name + '_opt'): + + params = [param for param in phase.module.parameters() if param.numel() > 0 and param.grad is not None] + if len(params) > 0: + flat = torch.cat([param.grad.flatten() for param in params]) + if num_gpus > 1: + torch.distributed.all_reduce(flat) + flat /= num_gpus + misc.nan_to_num(flat, nan=0, posinf=1e5, neginf=-1e5, out=flat) + grads = flat.split([param.numel() for param in params]) + for param, grad in zip(params, grads): + param.grad = grad.reshape(param.shape) + phase.opt.step() + + + + # Phase done. + if phase.end_event is not None: + phase.end_event.record(torch.cuda.current_stream(device)) + + # Update G_ema. + with torch.autograd.profiler.record_function('Gema'): + ema_nimg = ema_kimg * 1000 + if ema_rampup is not None: + ema_nimg = min(ema_nimg, cur_nimg * ema_rampup) + ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8)) + for p_ema, p in zip(G_ema.parameters(), G.parameters()): + p_ema.copy_(p.lerp(p_ema, ema_beta)) + for b_ema, b in zip(G_ema.buffers(), G.buffers()): + b_ema.copy_(b) + G_ema.neural_rendering_resolution = G.neural_rendering_resolution + G_ema.rendering_kwargs = G.rendering_kwargs.copy() + + with torch.autograd.profiler.record_function('Dema'): + ema_nimg = ema_kimg * 1000 + if ema_rampup is not None: + ema_nimg = min(ema_nimg, cur_nimg * ema_rampup) + ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8)) + for p_ema, p in zip(D_ema.parameters(), D.parameters()): + p_ema.copy_(p.lerp(p_ema, ema_beta)) + for b_ema, b in zip(D_ema.buffers(), D.buffers()): + b_ema.copy_(b) + + + # Update state. + cur_nimg += batch_size + batch_idx += 1 + + # Execute ADA heuristic. + if (ada_stats is not None) and (batch_idx % ada_interval == 0): + ada_stats.update() + adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000) + augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device))) + + # Perform maintenance tasks once per tick. + done = (cur_nimg >= total_kimg * 1000) + if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000): + continue + + # Print status line, accumulating the same information in training_stats. + tick_end_time = time.time() + fields = [] + fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"] + fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"] + fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"] + fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"] + fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"] + fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"] + fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"] + fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"] + fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}"] + torch.cuda.reset_peak_memory_stats() + fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"] + + if loss.swapping_prob is not None: + fields += [f"swap prob {training_stats.report0('Progress/swap_prob', float(loss.swapping_prob)):.3f}"] + if loss.neural_rendering_resolution is not None: + fields += [f"render_res {training_stats.report0('Progress/rendering_res', float(loss.neural_rendering_resolution)):.3f}"] + # if loss.noise_alpha is not None: + # fields += [f"noise_alpha {training_stats.report0('Progress/noise_alpha', float(loss.noise_alpha)):.3f}"] + # if loss.noise_scale is not None: + # fields += [f"noise_scale {training_stats.report0('Progress/noise_scale', float(loss.noise_scale)):.3f}"] + + # if loss.predict_label_alpha is not None: + # fields += [f"predict_label_alpha {training_stats.report0('Progress/predict_label_alpha', float(loss.predict_label_alpha)):.3f}"] + + training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60)) + training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60)) + if rank == 0: + print(' '.join(fields)) + + # Check for abort. + if (not done) and (abort_fn is not None) and abort_fn(): + done = True + if rank == 0: + print() + print('Aborting...') + + + + + if (rank == 0) and ((image_snapshot_ticks is not None) and (done or (cur_tick % image_snapshot_ticks == 0) ) ): # or (cur_tick<50 and cur_tick % 5 == 0 ) ) # (cur_tick!=0) and + print('gen images...') + with torch.no_grad(): + predicted_real_pose_params_D = [] + for vis_real_img,vis_real_seg, vis_c in zip(grid_images,grid_segs, grid_c): + pose_param = loss.get_pose_params_D(vis_real_img,vis_real_seg, vis_c, cur_nimg) + predicted_real_pose_params_D.append(pose_param) + + predicted_fake_pose_params_G = [] + for vis_z, vis_c in zip(grid_z, grid_c): + pose_param = loss.get_pose_params_G(vis_z, vis_c) + predicted_fake_pose_params_G.append(pose_param) + + + real_pose_mesh = [] + for predicted_real_pose, c in zip(predicted_real_pose_params_D, grid_c): + real_pose_param = {'pose': predicted_real_pose} + real_pose_mesh.append( + G_ema.render_meshes(real_pose_param, resolution=training_set.image_shape[2], cameras=c) + ) + real_pose_mesh = np.concatenate(real_pose_mesh, axis=0) + save_image_grid(real_pose_mesh, + os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_mesh_real_pose_D.png'), + drange=[0, 255], grid_size=grid_size) + + + snap_pose = predicted_fake_pose_params_G + cond_c = torch.tensor([[ 1.0000e+00, 1.0505e-09, 4.3685e-08, -1.1805e-07, 0.0000e+00, + -9.9951e-01, 2.4033e-02, -1.1805e-07, 4.3714e-08, -2.4033e-02, + -9.9951e-01, 2.6992e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 1.0000e+00, 6.5104e+00, 0.0000e+00, 5.0000e-01, 0.0000e+00, + 6.5104e+00, 5.0000e-01, 0.0000e+00, 0.0000e+00, 1.0000e+00]]).float().to(device) + + + #out = [G_ema(z=z, c=c, noise_mode='const',apply_def = True, pose_params = pose) for z, c, pose in zip(grid_z, grid_c, snap_pose)] + grid_ws = [G_ema.mapping(z, cond_c.expand(z.shape[0], -1),None) for z in grid_z] + out =[G_ema.synthesis(ws, c=c, noise_mode='const',apply_def = True, pose_params = pose) for ws, c,pose in zip(grid_ws, grid_c,snap_pose)] + images = torch.cat([o['image'].cpu() for o in out]).numpy() + #print('images range: ',np.max(images),np.min(images)) + images_raw = torch.cat([o['image_raw'].cpu() for o in out]).numpy() + images_depth = -torch.cat([o['image_depth'].cpu() for o in out]).numpy() + images_alpha = torch.cat([o['image_mask'].cpu() for o in out]).numpy() + #background_raw = torch.cat([o['image_background'].cpu() for o in out]).numpy() + save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_0.png'), drange=[-1,1], grid_size=grid_size) + save_image_grid(images_raw, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_2_raw.png'), drange=[-1,1], grid_size=grid_size) + save_image_grid(images_depth, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_4_depth.png'), drange=[images_depth.min(), images_depth.max()], grid_size=grid_size) + save_image_grid(images_alpha, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_4_alpha.jpg'), drange=[0, 1], grid_size=grid_size) + #save_image_grid(background_raw, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_4_background.jpg'), drange=[-1, 1], grid_size=grid_size) + with torch.no_grad(): + predicted_fake_pose_params_D = [] + for o,vis_c,vis_pose in zip(out,grid_c,snap_pose): + pose_param = loss.get_pose_params_D(o['image'],o['image_mask'],vis_c, cur_nimg) + predicted_fake_pose_params_D.append(pose_param) + + fake_pose_mesh = [] + for predicted_fake_pose, c in zip(predicted_fake_pose_params_D, grid_c): + fake_pose_param = {'pose': predicted_fake_pose} + fake_pose_mesh.append( + G_ema.render_meshes(fake_pose_param, resolution=training_set.image_shape[2], cameras=c) + ) + fake_pose_mesh = np.concatenate(fake_pose_mesh, axis=0) + save_image_grid(fake_pose_mesh, + os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_mesh_fake_pose_D.png'), + drange=[0, 255], grid_size=grid_size) + + input_pose_mesh = [] + for input_pose, c in zip(predicted_fake_pose_params_G, grid_c): + input_pose_param = {'pose': input_pose} + input_pose_mesh.append( + G_ema.render_meshes(input_pose_param, resolution=training_set.image_shape[2], cameras=c) + ) + input_pose_mesh = np.concatenate(input_pose_mesh, axis=0) + save_image_grid(input_pose_mesh, + os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_mesh_input_pose_G.png'), + drange=[0, 255], grid_size=grid_size) + + + + + # no_pose_out = [G_ema(z=z, c=c, noise_mode='const', apply_def=False, pose_params=None) for z, c in zip(grid_z, grid_c)] + no_pose_out =[G_ema.synthesis(ws, c=c, noise_mode='const',apply_def = False, pose_params = None) for ws, c in zip(grid_ws, grid_c)] + images = torch.cat([o['image'].cpu() for o in no_pose_out]).numpy() + images_raw = torch.cat([o['image_raw'].cpu() for o in no_pose_out]).numpy() + images_depth = -torch.cat([o['image_depth'].cpu() for o in no_pose_out]).numpy() + save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_1_no_pose.png'), drange=[-1, 1], + grid_size=grid_size) + save_image_grid(images_raw, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_3_no_pose_raw.png'), drange=[-1, 1], + grid_size=grid_size) + save_image_grid(images_depth, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_5_no_pose_depth.png'), + drange=[images_depth.min(), images_depth.max()], grid_size=grid_size) + + + + # if (loss.fronzen_D is not None) and ((network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0)): + # if rank ==0 : + # print('update loss.fronzen_D...') + # misc.copy_params_and_buffers(D, loss.fronzen_D, require_all=True) + # Save network snapshot. + snapshot_pkl = None + snapshot_data = None + if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0): + snapshot_data = dict(training_set_kwargs=dict(training_set_kwargs)) + for name, module in [('G', G), ('D', D), ('G_ema', G_ema), ('D_ema', D_ema), ('augment_pipe', augment_pipe)]: + if module is not None: + if num_gpus > 1: + misc.check_ddp_consistency(module, ignore_regex=r'.*\.[^.]+_(avg|ema)') + module = copy.deepcopy(module).eval().requires_grad_(False).cpu() + snapshot_data[name] = module + del module # conserve memory + snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl') + if rank == 0: + with open(snapshot_pkl, 'wb') as f: + pickle.dump(snapshot_data, f) + + pose_predict_kwargs = { + 'blur_sigma' : loss.blur_sigma, + 'neural_rendering_resolution': loss.neural_rendering_resolution, + 'resample_filter': loss.resample_filter.cpu().numpy().tolist(), + 'filter_mode': loss.filter_mode + } + with open(os.path.join(run_dir, f'pose_predict_kwargs-{cur_nimg//1000:06d}.json'), 'wt') as f: + json.dump(pose_predict_kwargs, f, indent=2) + + + # Evaluate metrics. + if (cur_tick!=0) and (snapshot_data is not None) and (len(metrics) > 0): + if rank == 0: + print(run_dir) + print('Evaluating metrics...') + for metric in metrics: + progress = metric_utils.ProgressMonitor(verbose=True) + # result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'], + # dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, + # rank=rank, device=device, progress=progress + # ) + result_dict = metric_main.calc_metric(metric=metric, + G=snapshot_data['G_ema'], + dataset_kwargs=training_set_kwargs, + num_gpus=num_gpus, + rank=rank, + device=device, + metric_pose_sample_mode = metric_pose_sample_mode, + progress=progress, + D = snapshot_data['D'] if metric_pose_sample_mode == 'D_predict' else None, + pose_predict_kwargs = { + 'blur_sigma' : loss.blur_sigma, + 'neural_rendering_resolution': loss.neural_rendering_resolution, + 'resample_filter': loss.resample_filter, + 'filter_mode': loss.filter_mode + } if metric_pose_sample_mode == 'D_predict' else None + ) + + if rank == 0: + metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl) + stats_metrics.update(result_dict.results) + del snapshot_data # conserve memory + + # Collect statistics. + for phase in phases: + value = [] + if (phase.start_event is not None) and (phase.end_event is not None): + phase.end_event.synchronize() + value = phase.start_event.elapsed_time(phase.end_event) + training_stats.report0('Timing/' + phase.name, value) + stats_collector.update() + stats_dict = stats_collector.as_dict() + + # Update logs. + timestamp = time.time() + if stats_jsonl is not None: + fields = dict(stats_dict, timestamp=timestamp) + stats_jsonl.write(json.dumps(fields) + '\n') + stats_jsonl.flush() + if stats_tfevents is not None: + global_step = int(cur_nimg / 1e3) + walltime = timestamp - start_time + for name, value in stats_dict.items(): + stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime) + for name, value in stats_metrics.items(): + stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime) + stats_tfevents.flush() + if progress_fn is not None: + progress_fn(cur_nimg // 1000, total_kimg) + + # Update state. + cur_tick += 1 + tick_start_nimg = cur_nimg + tick_start_time = time.time() + maintenance_time = tick_start_time - tick_end_time + if done: + break + + # Done. + if rank == 0: + print() + print('Exiting...') + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/training/volumetric_rendering/__init__.py b/3DPortraitGAN_pyramid/training/volumetric_rendering/__init__.py new file mode 100644 index 0000000..daba665 --- /dev/null +++ b/3DPortraitGAN_pyramid/training/volumetric_rendering/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# empty \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/training/volumetric_rendering/math_utils.py b/3DPortraitGAN_pyramid/training/volumetric_rendering/math_utils.py new file mode 100644 index 0000000..4cf9d2b --- /dev/null +++ b/3DPortraitGAN_pyramid/training/volumetric_rendering/math_utils.py @@ -0,0 +1,118 @@ +# MIT License + +# Copyright (c) 2022 Petr Kellnhofer + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch + +def transform_vectors(matrix: torch.Tensor, vectors4: torch.Tensor) -> torch.Tensor: + """ + Left-multiplies MxM @ NxM. Returns NxM. + """ + res = torch.matmul(vectors4, matrix.T) + return res + + +def normalize_vecs(vectors: torch.Tensor) -> torch.Tensor: + """ + Normalize vector lengths. + """ + return vectors / (torch.norm(vectors, dim=-1, keepdim=True)) + +def torch_dot(x: torch.Tensor, y: torch.Tensor): + """ + Dot product of two tensors. + """ + return (x * y).sum(-1) + + +def get_ray_limits_box(rays_o: torch.Tensor, rays_d: torch.Tensor, box_side_length): + """ + Author: Petr Kellnhofer + Intersects rays with the [-1, 1] NDC volume. + Returns min and max distance of entry. + Returns -1 for no intersection. + https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection + """ + o_shape = rays_o.shape + rays_o = rays_o.detach().reshape(-1, 3) + rays_d = rays_d.detach().reshape(-1, 3) + + + bb_min = [-1*(box_side_length/2), -1*(box_side_length/2), -1*(box_side_length/2)] + bb_max = [1*(box_side_length/2), 1*(box_side_length/2), 1*(box_side_length/2)] + bounds = torch.tensor([bb_min, bb_max], dtype=rays_o.dtype, device=rays_o.device) + is_valid = torch.ones(rays_o.shape[:-1], dtype=bool, device=rays_o.device) + + # Precompute inverse for stability. + invdir = 1 / rays_d + sign = (invdir < 0).long() + + # Intersect with YZ plane. + tmin = (bounds.index_select(0, sign[..., 0])[..., 0] - rays_o[..., 0]) * invdir[..., 0] + tmax = (bounds.index_select(0, 1 - sign[..., 0])[..., 0] - rays_o[..., 0]) * invdir[..., 0] + + # Intersect with XZ plane. + tymin = (bounds.index_select(0, sign[..., 1])[..., 1] - rays_o[..., 1]) * invdir[..., 1] + tymax = (bounds.index_select(0, 1 - sign[..., 1])[..., 1] - rays_o[..., 1]) * invdir[..., 1] + + # Resolve parallel rays. + is_valid[torch.logical_or(tmin > tymax, tymin > tmax)] = False + + # Use the shortest intersection. + tmin = torch.max(tmin, tymin) + tmax = torch.min(tmax, tymax) + + # Intersect with XY plane. + tzmin = (bounds.index_select(0, sign[..., 2])[..., 2] - rays_o[..., 2]) * invdir[..., 2] + tzmax = (bounds.index_select(0, 1 - sign[..., 2])[..., 2] - rays_o[..., 2]) * invdir[..., 2] + + # Resolve parallel rays. + is_valid[torch.logical_or(tmin > tzmax, tzmin > tmax)] = False + + # Use the shortest intersection. + tmin = torch.max(tmin, tzmin) + tmax = torch.min(tmax, tzmax) + + # Mark invalid. + tmin[torch.logical_not(is_valid)] = -1 + tmax[torch.logical_not(is_valid)] = -2 + + return tmin.reshape(*o_shape[:-1], 1), tmax.reshape(*o_shape[:-1], 1) + + +def linspace(start: torch.Tensor, stop: torch.Tensor, num: int): + """ + Creates a tensor of shape [num, *start.shape] whose values are evenly spaced from start to end, inclusive. + Replicates but the multi-dimensional bahaviour of numpy.linspace in PyTorch. + """ + # create a tensor of 'num' steps from 0 to 1 + steps = torch.arange(num, dtype=torch.float32, device=start.device) / (num - 1) + + # reshape the 'steps' tensor to [-1, *([1]*start.ndim)] to allow for broadcastings + # - using 'steps.reshape([-1, *([1]*start.ndim)])' would be nice here but torchscript + # "cannot statically infer the expected size of a list in this contex", hence the code below + for i in range(start.ndim): + steps = steps.unsqueeze(-1) + + # the output starts at 'start' and increments until 'stop' in each dimension + out = start[None] + steps * (stop - start)[None] + + return out diff --git a/3DPortraitGAN_pyramid/training/volumetric_rendering/ray_marcher.py b/3DPortraitGAN_pyramid/training/volumetric_rendering/ray_marcher.py new file mode 100644 index 0000000..3c2d1ee --- /dev/null +++ b/3DPortraitGAN_pyramid/training/volumetric_rendering/ray_marcher.py @@ -0,0 +1,60 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +""" +The ray marcher takes the raw output of the implicit representation and uses the volume rendering equation to produce composited colors and depths. +Based off of the implementation in MipNeRF (this one doesn't do any cone tracing though!) +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + +class MipRayMarcher2(nn.Module): + def __init__(self): + super().__init__() + + def run_forward(self, colors, densities, depths, rendering_options): + deltas = depths[:, :, 1:] - depths[:, :, :-1] + colors_mid = (colors[:, :, :-1] + colors[:, :, 1:]) / 2 + densities_mid = (densities[:, :, :-1] + densities[:, :, 1:]) / 2 + depths_mid = (depths[:, :, :-1] + depths[:, :, 1:]) / 2 + + + if rendering_options['clamp_mode'] == 'softplus': + densities_mid = F.softplus(densities_mid - 1) # activation bias of -1 makes things initialize better + else: + assert False, "MipRayMarcher only supports `clamp_mode`=`softplus`!" + + density_delta = densities_mid * deltas + + alpha = 1 - torch.exp(-density_delta) + + alpha_shifted = torch.cat([torch.ones_like(alpha[:, :, :1]), 1-alpha + 1e-10], -2) + weights = alpha * torch.cumprod(alpha_shifted, -2)[:, :, :-1] + + composite_rgb = torch.sum(weights * colors_mid, -2) + weight_total = weights.sum(2) + composite_depth = torch.sum(weights * depths_mid, -2) / weight_total + + # clip the composite to min/max range of depths + composite_depth = torch.nan_to_num(composite_depth, float('inf')) + composite_depth = torch.clamp(composite_depth, torch.min(depths), torch.max(depths)) + + if rendering_options.get('white_back', False): + composite_rgb = composite_rgb + 1 - weight_total + + return composite_rgb, composite_depth, weights + + + def forward(self, colors, densities, depths, rendering_options): + composite_rgb, composite_depth, weights = self.run_forward(colors, densities, depths, rendering_options) + + return composite_rgb, composite_depth, weights \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/training/volumetric_rendering/ray_sampler.py b/3DPortraitGAN_pyramid/training/volumetric_rendering/ray_sampler.py new file mode 100644 index 0000000..80b10b0 --- /dev/null +++ b/3DPortraitGAN_pyramid/training/volumetric_rendering/ray_sampler.py @@ -0,0 +1,119 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +""" +The ray sampler is a module that takes in camera matrices and resolution and batches of rays. +Expects cam2world matrices that use the OpenCV camera coordinate system conventions. +""" + +import torch + +class RaySampler(torch.nn.Module): + def __init__(self): + super().__init__() + self.ray_origins_h, self.ray_directions, self.depths, self.image_coords, self.rendering_options = None, None, None, None, None + + + def forward(self, cam2world_matrix, intrinsics, resolution): + """ + Create batches of rays and return origins and directions. + + cam2world_matrix: (N, 4, 4) + intrinsics: (N, 3, 3) + resolution: int + + ray_origins: (N, M, 3) + ray_dirs: (N, M, 2) + """ + N, M = cam2world_matrix.shape[0], resolution**2 + cam_locs_world = cam2world_matrix[:, :3, 3] + fx = intrinsics[:, 0, 0] + fy = intrinsics[:, 1, 1] + cx = intrinsics[:, 0, 2] + cy = intrinsics[:, 1, 2] + sk = intrinsics[:, 0, 1] + + uv = torch.stack(torch.meshgrid(torch.arange(resolution, dtype=torch.float32, device=cam2world_matrix.device), torch.arange(resolution, dtype=torch.float32, device=cam2world_matrix.device), indexing='ij')) * (1./resolution) + (0.5/resolution) + uv = uv.flip(0).reshape(2, -1).transpose(1, 0) + uv = uv.unsqueeze(0).repeat(cam2world_matrix.shape[0], 1, 1) + + x_cam = uv[:, :, 0].view(N, -1) + y_cam = uv[:, :, 1].view(N, -1) + z_cam = torch.ones((N, M), device=cam2world_matrix.device) + + x_lift = (x_cam - cx.unsqueeze(-1) + cy.unsqueeze(-1)*sk.unsqueeze(-1)/fy.unsqueeze(-1) - sk.unsqueeze(-1)*y_cam/fy.unsqueeze(-1)) / fx.unsqueeze(-1) * z_cam + y_lift = (y_cam - cy.unsqueeze(-1)) / fy.unsqueeze(-1) * z_cam + + cam_rel_points = torch.stack((x_lift, y_lift, z_cam, torch.ones_like(z_cam)), dim=-1) + + world_rel_points = torch.bmm(cam2world_matrix, cam_rel_points.permute(0, 2, 1)).permute(0, 2, 1)[:, :, :3] + + ray_dirs = world_rel_points - cam_locs_world[:, None, :] + ray_dirs = torch.nn.functional.normalize(ray_dirs, dim=2) + + ray_origins = cam_locs_world.unsqueeze(1).repeat(1, ray_dirs.shape[1], 1) + + return ray_origins, ray_dirs + + def patch_forward(self, cam2world_matrix, intrinsics, resolution, patch_scale=1): + """ + Create batches of rays and return origins and directions. + + cam2world_matrix: (N, 4, 4) + intrinsics: (N, 3, 3) + resolution: int + + ray_origins: (N, M, 3) + ray_dirs: (N, M, 2) + """ + N, M = cam2world_matrix.shape[0], resolution ** 2 + cam_locs_world = cam2world_matrix[:, :3, 3] + fx = intrinsics[:, 0, 0] + fy = intrinsics[:, 1, 1] + cx = intrinsics[:, 0, 2] + cy = intrinsics[:, 1, 2] + sk = intrinsics[:, 0, 1] + + full_resolution = int(resolution / patch_scale) + patch_info = [] + uv = torch.stack( + torch.meshgrid(torch.arange(full_resolution, dtype=torch.float32, device=cam2world_matrix.device), + torch.arange(full_resolution, dtype=torch.float32, device=cam2world_matrix.device), + indexing='ij')) * (1. / full_resolution) + (0.5 / full_resolution) + if full_resolution > resolution: + patch_uv = [] + for i in range(cam2world_matrix.shape[0]): + top = torch.randint(full_resolution - resolution + 1, ()).item() + left = torch.randint(full_resolution - resolution + 1, ()).item() + patch_uv.append(uv.clone()[None, :, top:top + resolution, left:left + resolution]) + patch_info.append((top, left)) + uv = torch.cat(patch_uv, 0) + else: + uv = uv.unsqueeze(0).repeat(cam2world_matrix.shape[0], 1, 1, 1) + uv = uv.flip(1).reshape(cam2world_matrix.shape[0], 2, -1).transpose(2, 1) + + x_cam = uv[:, :, 0].view(N, -1) + y_cam = uv[:, :, 1].view(N, -1) + z_cam = torch.ones((N, M), device=cam2world_matrix.device) + + x_lift = (x_cam - cx.unsqueeze(-1) + cy.unsqueeze(-1) * sk.unsqueeze(-1) / fy.unsqueeze(-1) - sk.unsqueeze( + -1) * y_cam / fy.unsqueeze(-1)) / fx.unsqueeze(-1) * z_cam + y_lift = (y_cam - cy.unsqueeze(-1)) / fy.unsqueeze(-1) * z_cam + + cam_rel_points = torch.stack((x_lift, y_lift, z_cam, torch.ones_like(z_cam)), dim=-1) + + world_rel_points = torch.bmm(cam2world_matrix, cam_rel_points.permute(0, 2, 1)).permute(0, 2, 1)[:, :, :3] + + ray_dirs = world_rel_points - cam_locs_world[:, None, :] + ray_dirs = torch.nn.functional.normalize(ray_dirs, dim=2) + + ray_origins = cam_locs_world.unsqueeze(1).repeat(1, ray_dirs.shape[1], 1) + + return ray_origins, ray_dirs, patch_info \ No newline at end of file diff --git a/3DPortraitGAN_pyramid/training/volumetric_rendering/renderer.py b/3DPortraitGAN_pyramid/training/volumetric_rendering/renderer.py new file mode 100644 index 0000000..cd79ac8 --- /dev/null +++ b/3DPortraitGAN_pyramid/training/volumetric_rendering/renderer.py @@ -0,0 +1,600 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +""" +The renderer is a module that takes in rays, decides where to sample along each +ray, and computes pixel colors using the volume rendering equation. +""" + +import math +import torch +from torch_utils import misc +from training.volumetric_rendering.ray_marcher import MipRayMarcher2 +from training.volumetric_rendering import math_utils +# from training.aligned_smplx import AlignedSMPLX +from training.aligned_smpl import AlignedSMPL +import trimesh +#from training.aligned_smpl import AlignedSMPL +import smplx +from kaolin.ops.mesh import index_vertices_by_faces +from kaolin.metrics.trianglemesh import point_to_mesh_distance + + + + +# def generate_planes(): +# """ +# Defines planes by the three vectors that form the "axes" of the +# plane. Should work with arbitrary number of planes and planes of +# arbitrary orientation. +# """ +# return torch.tensor([[[1, 0, 0], +# [0, 1, 0], +# [0, 0, 1]], +# [[1, 0, 0], +# [0, 0, 1], +# [0, 1, 0]], +# [[0, 0, 1], +# [1, 0, 0], +# [0, 1, 0]]], dtype=torch.float32) + +# correct tri-planes, see https://github.com/NVlabs/eg3d/issues/67 +def generate_planes(): + """ + Defines planes by the three vectors that form the "axes" of the + plane. Should work with arbitrary number of planes and planes of + arbitrary orientation. + """ + return torch.tensor([[[1, 0, 0], + [0, 1, 0], + [0, 0, 1]], + [[1, 0, 0], + [0, 0, 1], + [0, 1, 0]], + [[0, 1, 0], + [0, 0, 1], + [1, 0, 0]]], dtype=torch.float32) + +def project_onto_planes(planes, coordinates): + """ + Does a projection of a 3D point onto a batch of 2D planes, + returning 2D plane coordinates. + + Takes plane axes of shape n_planes, 3, 3 + # Takes coordinates of shape N, M, 3 + # returns projections of shape N*n_planes, M, 2 + """ + N, M, C = coordinates.shape + n_planes, _, _ = planes.shape + coordinates = coordinates.unsqueeze(1).expand(-1, n_planes, -1, -1).reshape(N*n_planes, M, 3) + inv_planes = torch.linalg.inv(planes).unsqueeze(0).expand(N, -1, -1, -1).reshape(N*n_planes, 3, 3) + projections = torch.bmm(coordinates, inv_planes) + return projections + +def sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None, triplane_depth=1,render_high_freq = True): + assert padding_mode == 'zeros' + output_features = None + + + _, M, _ = coordinates.shape + coordinates = (2 / box_warp) * coordinates # TODO: add specific box bounds + projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1).unsqueeze(2) # (N x n_planes) x 1 x 1 x M x 3 + for res_k in plane_features: + plane_feature = plane_features[res_k] + N, n_planes, CD, H, W = plane_feature.shape + # _, M, _ = coordinates.shape + C, D = CD // triplane_depth, triplane_depth + plane_feature = plane_feature.view(N * n_planes, C, D, H, W) + + # coordinates = (2/box_warp) * coordinates # TODO: add specific box bounds + + # projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1).unsqueeze(2) # (N x n_planes) x 1 x 1 x M x 3 + output_feature = torch.nn.functional.grid_sample(plane_feature, projected_coordinates.float(), mode=mode, + padding_mode=padding_mode, align_corners=False).permute(0, + 4, + 3, + 2, + 1).reshape(N, n_planes, M, C) + if output_features is None: + output_features = output_feature + else: + output_features += output_feature + + output_features /= len(plane_features) + + return output_features +def sample_from_3dgrid(grid, coordinates): + """ + Expects coordinates in shape (batch_size, num_points_per_batch, 3) + Expects grid in shape (1, channels, H, W, D) + (Also works if grid has batch size) + Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels) + """ + batch_size, n_coords, n_dims = coordinates.shape + sampled_features = torch.nn.functional.grid_sample(grid.expand(batch_size, -1, -1, -1, -1), + coordinates.reshape(batch_size, 1, 1, -1, n_dims), + mode='bilinear', padding_mode='zeros', align_corners=False) + N, C, H, W, D = sampled_features.shape + sampled_features = sampled_features.permute(0, 4, 3, 2, 1).reshape(N, H*W*D, C) + return sampled_features + +def triplane_crop_mask(xyz_unformatted, thresh, boxwarp, allow_bottom=True): + # bw,tc = boxwarp, thresh + bw = boxwarp + tc = boxwarp * thresh + device = xyz_unformatted.device + # xyz = 0.5 * (xyz_unformatted+1) * torch.tensor([-1,1,-1]).to(device)[None,None,:] + xyz = (xyz_unformatted) * torch.tensor([-1,1,-1]).to(device)[None,None,:] + ans = (xyz[:,:,[0,2]].abs() <= (bw/2-tc)).all(dim=-1,keepdim=True) + if allow_bottom: + ans = ans | ( + (xyz[:,:,1:2] <= -(bw/2-tc)) & + (xyz[:,:,[0,2]].abs() <= (bw/2-tc)).all(dim=-1,keepdim=True) + ) + return ~ans +def cull_clouds_mask(denities, thresh): + denities = torch.nn.functional.softplus(denities - 1) # activation bias of -1 makes things initialize better + alpha = 1 - torch.exp(-denities) + return alpha < thresh + + + +class ImportanceRenderer(torch.nn.Module): + def __init__(self, w_dim, num_ws,batch_size,thickness,box_warp): + super().__init__() + self.ray_marcher = MipRayMarcher2() + self.plane_axes = generate_planes() + self.batch_size = batch_size + self.num_betas = 10 + body_model_smpl = smplx.create('./smplx_models', + model_type='smpl', + gender='neutral', + use_compressed=False, + use_face_contour=True, + num_betas=self.num_betas, + num_expression_coeffs=10, + ext='npz', + batch_size = batch_size + ).cuda() + self.aligned_SMPL = AlignedSMPL(model=body_model_smpl,batch_size=batch_size) + + + + shaped_smpl_data = self.aligned_SMPL.generate_shaped_smpl( + betas=None, + scale=None, # shape_params['scale'], + transl=None, # shape_params['transl'] + ) + shaped_smpl = shaped_smpl_data['vertices'].detach().contiguous() + align_points = shaped_smpl_data['align_joint_coordinate'].detach().contiguous() + + self.register_buffer('shaped_smpl', shaped_smpl) + self.register_buffer('align_points', align_points) + + # shaped_smpl [B,N,3] + # filter points that outside box + box_side_length = box_warp + # shaped_smpl: B,N,3 + point_mask = shaped_smpl[0:1,:,0] > -box_side_length/2 # 1,N + point_mask = point_mask & (shaped_smpl[0:1,:,0] < box_side_length/2) + point_mask = point_mask & (shaped_smpl[0:1,:,1] > -box_side_length/2) + point_mask = point_mask & (shaped_smpl[0:1,:,1] < box_side_length/2) + point_mask = point_mask & (shaped_smpl[0:1,:,2] > -box_side_length/2) + point_mask = point_mask & (shaped_smpl[0:1,:,2] < box_side_length/2) + point_mask = point_mask.squeeze(0).cuda() # N + + faces = self.aligned_SMPL.faces # [20908, 3] + face_mask = torch.ones(faces.shape[0],dtype=torch.bool).cuda() # [20908] + for i in range(faces.shape[0]): + face_mask[i] = point_mask[faces[i,0]] and point_mask[faces[i,1]] and point_mask[faces[i,2]] + self.register_buffer('face_mask', face_mask) + + self.thickness = thickness + + # shaped_smpl [B,N,3] + # filter points that not on the head + # shaped_smpl: B,N,3 + + # + # point_mask = shaped_smpl[0:1, :, 1] > 0 # 1,N + + point_mask = shaped_smpl[0:1, :, 1] > 0.06 # 1,N + point_mask = point_mask & (shaped_smpl[0:1, :, 2] < -0.0) + + point_mask = point_mask.squeeze(0).cuda() # N + + faces = self.aligned_SMPL.faces # [20908, 3] + head_face_mask = torch.ones(faces.shape[0], dtype=torch.bool).cuda() # [20908] + for i in range(faces.shape[0]): + head_face_mask[i] = point_mask[faces[i, 0]] and point_mask[faces[i, 1]] and point_mask[faces[i, 2]] + self.register_buffer('head_face_mask', head_face_mask) + + self.back_head_depth = None + # + # print('head_face_mask shape:',head_face_mask.shape) + + + def set_batch_size(self,batch_size): + self.batch_size = batch_size + body_model_smpl = smplx.create('./smplx_models', + model_type='smpl', + gender='neutral', + use_compressed=False, + use_face_contour=True, + num_betas=self.num_betas, + num_expression_coeffs=10, + ext='npz', + batch_size=batch_size + ).to(self.aligned_SMPL.model.shapedirs.device) + self.aligned_SMPL.set_model(body_model_smpl) + self.aligned_SMPL.set_batch_size(batch_size) + shaped_smpl_data = self.aligned_SMPL.generate_shaped_smpl( + betas=None, + scale=None, # shape_params['scale'], + transl=None, # shape_params['transl'] + ) + shaped_smpl = shaped_smpl_data['vertices'].detach().contiguous() + align_points = shaped_smpl_data['align_joint_coordinate'].detach().contiguous() + self.register_buffer('shaped_smpl', shaped_smpl) + self.register_buffer('align_points', align_points) + + + def render_meshes(self, shape_pose_params,resolution,cameras): + images = self.aligned_SMPL.get_visualization(shape_pose_params, resolution, cameras) + return images + + + def get_deformed_coordinate(self, ws, pose_params, original_coordinate): + + + posed_smpl = self.aligned_SMPL.generate_posed_smpl(betas=None, + body_pose=pose_params, + scale=None, # shape_params['scale'], + transl=None, # shape_params['transl'], + align_joint_coordinate=self.align_points)['vertices'] + # misc.assert_shape(posed_smpl, [None, 10475, 3]) + + + mode = 'kaolin' + if mode == 'pytorch3d': + raise NotImplementedError + import pytorch3d.ops + #raise NotImplementedError + with torch.no_grad(): + + smpl_def_on_mesh = self.shaped_smpl - posed_smpl # [B, , 3] + + # find the nearest face in posed_smpl for each vertex in original_coordinate + knn_res = pytorch3d.ops.knn_points(p1=original_coordinate, p2=posed_smpl, K=1) + distance = knn_res[0] # [B, N, 1] + p1_index = knn_res[1].repeat(1, 1, 3) # [B, N, 3] + misc.assert_shape(p1_index, [original_coordinate.shape[0], original_coordinate.shape[1],3]) + + + DistToMesh = distance.squeeze(-1) # [B, N] + + SmplDef = smpl_def_on_mesh.gather(1, p1_index) # [B, N, 3] + mask = DistToMesh < self.thickness# [B, N] + + + scale = 5. + SmplDef1 = SmplDef / torch.exp(DistToMesh.unsqueeze(-1) * scale) # [B, N, 3] + + scale = DistToMesh.unsqueeze(-1) / (self.thickness * 2) * 20 + SmplDef2 = torch.zeros_like(SmplDef).to(SmplDef.device) + + SmplDef = torch.where(mask.unsqueeze(-1), SmplDef1, SmplDef2) # [B, N, 3] + elif mode == 'kaolin': + faces = self.aligned_SMPL.faces.clone() # [20908, 3] + faces = faces[self.face_mask, :] + # find the nearest face in shaped_smplx for each vertex in original_coordinate + vertex_faces = posed_smpl.clone() # [B, 6085, 3] + + with torch.no_grad(): + face_vertices = index_vertices_by_faces(vertex_faces, faces) + distance, index, dist_type = point_to_mesh_distance(original_coordinate, face_vertices) # B, N + distance = torch.sqrt(distance) # [B, N, 1] + selected_posed_smpl_vertices = [] + selected_shaped_smpl_vertices = [] + + for i in range(original_coordinate.shape[0]): + selected_face = faces[index[i]] + selected_posed_smpl_vertices.append(index_vertices_by_faces(posed_smpl[i:i + 1], + selected_face)) # [1, N, 3, 3] + selected_shaped_smpl_vertices.append(index_vertices_by_faces(self.shaped_smpl[i:i + 1], + selected_face)) # [1, N, 3, 3] + + selected_posed_smpl_vertices = torch.cat(selected_posed_smpl_vertices, dim=0) # [B, N, 3, 3] + selected_shaped_smpl_vertices = torch.cat(selected_shaped_smpl_vertices, dim=0) # [B, N, 3, 3] + + y_axes = torch.cross(selected_posed_smpl_vertices[:, :, 1, :] - selected_posed_smpl_vertices[:, :, 0, :], + selected_posed_smpl_vertices[:, :, 2, :] - selected_posed_smpl_vertices[:, :, 0, + :]) # [B, N, 3] + y_axes = y_axes / torch.norm(y_axes, dim=2, keepdim=True) # [B, N, 3] + + x_axes = selected_posed_smpl_vertices[:, :, 1, :] - selected_posed_smpl_vertices[:, :, 0, :] # [B, N, 3] + x_axes = x_axes / torch.norm(x_axes, dim=2, keepdim=True) # [B, N, 3] + + z_axes = torch.cross(x_axes, y_axes) # [B, N, 3] + + posed_smpl_coordinate = torch.stack( + [torch.sum((original_coordinate - selected_posed_smpl_vertices[:, :, 0, :]) * x_axes, dim=2), + torch.sum((original_coordinate - selected_posed_smpl_vertices[:, :, 0, :]) * y_axes, dim=2), + torch.sum((original_coordinate - selected_posed_smpl_vertices[:, :, 0, :]) * z_axes, dim=2)], + dim=2) # [B, N, 3] + del x_axes, y_axes, z_axes + y_axes = torch.cross(selected_shaped_smpl_vertices[:, :, 1, :] - selected_shaped_smpl_vertices[:, :, 0, :], + selected_shaped_smpl_vertices[:, :, 2, :] - selected_shaped_smpl_vertices[:, :, 0, :]) + y_axes = y_axes / torch.norm(y_axes, dim=2, keepdim=True) + + x_axes = selected_shaped_smpl_vertices[:, :, 1, :] - selected_shaped_smpl_vertices[:, :, 0, :] + x_axes = x_axes / torch.norm(x_axes, dim=2, keepdim=True) + + z_axes = torch.cross(x_axes, y_axes) + + new_coordinate = posed_smpl_coordinate[:, :, 0:1] * x_axes + \ + posed_smpl_coordinate[:, :, 1:2] * y_axes + \ + posed_smpl_coordinate[:, :, 2:3] * z_axes + \ + selected_shaped_smpl_vertices[:, :, 0, :] # [B, N, 3] + + SmplDef = new_coordinate - original_coordinate # [B, N, 3] + + DistToMesh = distance.unsqueeze(-1) # [B, N, 1] + + mask = DistToMesh < self.thickness # [B, N,1] + + SmplDef2 = torch.zeros_like(SmplDef).to(SmplDef.device) + SmplDef = torch.where(mask, SmplDef, SmplDef2) # [B, N, 3] + + else: + raise NotImplementedError + + original_coordinate = original_coordinate + SmplDef + return original_coordinate + + def forward(self, planes, decoder, ray_origins, ray_directions, rendering_options, apply_def = False, ws = None, pose_params = None, triplane_crop=0.1, cull_clouds=None, binarize_clouds=None ): + _ = ws + if apply_def: + assert pose_params is not None + else: + assert pose_params is None + + self.plane_axes = self.plane_axes.to(ray_origins.device) + + if rendering_options['ray_start'] == rendering_options['ray_end'] == 'auto': + ray_start, ray_end = math_utils.get_ray_limits_box(ray_origins, ray_directions, box_side_length=rendering_options['box_warp']) + is_ray_valid = ray_end > ray_start + if torch.any(is_ray_valid).item(): + ray_start[~is_ray_valid] = ray_start[is_ray_valid].min() + ray_end[~is_ray_valid] = ray_start[is_ray_valid].max() + depths_coarse = self.sample_stratified(ray_origins, ray_start, ray_end, rendering_options['depth_resolution'], rendering_options['disparity_space_sampling']) + else: + # Create stratified depth samples + depths_coarse = self.sample_stratified(ray_origins, rendering_options['ray_start'], rendering_options['ray_end'], rendering_options['depth_resolution'], rendering_options['disparity_space_sampling']) + + batch_size, num_rays, samples_per_ray, _ = depths_coarse.shape + + # Coarse Pass + sample_coordinates = (ray_origins.unsqueeze(-2) + depths_coarse * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3) + sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, samples_per_ray, -1).reshape(batch_size, -1, 3) + # deform the sample_coordinates + if apply_def: + sample_coordinates = self.get_deformed_coordinate(None, pose_params, sample_coordinates) + + + out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options) + colors_coarse = out['rgb'] + densities_coarse = out['sigma'] + + xyz_coarse = out['xyz'] + + if triplane_crop: + # print(xyz_fine.amin(dim=(0,1))) + # print(xyz_fine.amax(dim=(0,1))) + cropmask = triplane_crop_mask(xyz_coarse, triplane_crop, rendering_options['box_warp']) + densities_coarse[cropmask] = -1e3 + if binarize_clouds: + ccmask = cull_clouds_mask(densities_coarse, binarize_clouds) + densities_coarse[ccmask] = -1e3 + densities_coarse[~ccmask] = 1e3 + elif cull_clouds: + ccmask = cull_clouds_mask(densities_coarse, cull_clouds) + densities_coarse[ccmask] = -1e3 + + colors_coarse = colors_coarse.reshape(batch_size, num_rays, samples_per_ray, colors_coarse.shape[-1]) + densities_coarse = densities_coarse.reshape(batch_size, num_rays, samples_per_ray, 1) + xyz_coarse = xyz_coarse.reshape(batch_size, num_rays, samples_per_ray, xyz_coarse.shape[-1]) + + # Fine Pass + N_importance = rendering_options['depth_resolution_importance'] + if N_importance > 0: + _, _, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options) + + depths_fine = self.sample_importance(depths_coarse, weights, N_importance) + + sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, N_importance, -1).reshape(batch_size, -1, 3) + sample_coordinates = (ray_origins.unsqueeze(-2) + depths_fine * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3) + # deform the sample_coordinates + if apply_def: + sample_coordinates = self.get_deformed_coordinate(None, pose_params, sample_coordinates) + + out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options) + colors_fine = out['rgb'] + densities_fine = out['sigma'] + xyz_fine = out['xyz'] + if triplane_crop: + # print(xyz_fine.amin(dim=(0,1))) + # print(xyz_fine.amax(dim=(0,1))) + cropmask = triplane_crop_mask(xyz_fine, triplane_crop, rendering_options['box_warp']) + densities_fine[cropmask] = -1e3 + if binarize_clouds: + ccmask = cull_clouds_mask(densities_fine, binarize_clouds) + densities_fine[ccmask] = -1e3 + densities_fine[~ccmask] = 1e3 + elif cull_clouds: + ccmask = cull_clouds_mask(densities_fine, cull_clouds) + densities_fine[ccmask] = -1e3 + xyz_fine = xyz_fine.reshape(batch_size, num_rays, N_importance, xyz_fine.shape[-1]) + colors_fine = colors_fine.reshape(batch_size, num_rays, N_importance, colors_fine.shape[-1]) + densities_fine = densities_fine.reshape(batch_size, num_rays, N_importance, 1) + + # all_depths, all_colors, all_densities = self.unify_samples(depths_coarse, colors_coarse, densities_coarse, + # depths_fine, colors_fine, densities_fine) + all_depths, all_colors, all_densities, all_xyz = self.unify_samples( + depths_coarse, colors_coarse, densities_coarse, xyz_coarse, + depths_fine, colors_fine, densities_fine, xyz_fine, + ) + + # Aggregate + # rgb_final, depth_final, weights = self.ray_marcher(all_colors, all_densities, all_depths, rendering_options) + + all_colors_ = torch.cat([all_colors, all_xyz], dim=-1) + rgb_final_, depth_final, weights = self.ray_marcher(all_colors_, all_densities, all_depths, rendering_options) + rgb_final = rgb_final_[...,:-3] + xyz_final = rgb_final_[...,-3:] + else: + # rgb_final, depth_final, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options) + colors_coarse_ = torch.cat([colors_coarse, xyz_coarse], dim=-1) + rgb_final_, depth_final, weights = self.ray_marcher(colors_coarse_, densities_coarse, depths_coarse, rendering_options) + rgb_final = rgb_final_[...,:-3] + xyz_final = rgb_final_[...,-3:] + + + output = {'rgb_final': rgb_final, 'depth_final': depth_final, 'weights': weights.sum(2)} + + return output + + def run_model(self, planes, decoder, sample_coordinates, sample_directions, options): + self.plane_axes = self.plane_axes.to(planes[list(planes.keys())[0]].device) + sampled_features = sample_from_planes(self.plane_axes, planes, sample_coordinates, padding_mode='zeros', box_warp=options['box_warp'], triplane_depth=options['triplane_depth']) + + out = decoder(sampled_features, sample_directions) + if options.get('density_noise', 0) > 0: + out['sigma'] += torch.randn_like(out['sigma']) * options['density_noise'] + out['xyz'] = sample_coordinates#.permute(0,2,1)[...,None] + return out + + def sort_samples(self, all_depths, all_colors, all_densities): + _, indices = torch.sort(all_depths, dim=-2) + all_depths = torch.gather(all_depths, -2, indices) + all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1])) + all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1)) + return all_depths, all_colors, all_densities + + # def unify_samples(self, depths1, colors1, densities1, depths2, colors2, densities2): + # all_depths = torch.cat([depths1, depths2], dim = -2) + # all_colors = torch.cat([colors1, colors2], dim = -2) + # all_densities = torch.cat([densities1, densities2], dim = -2) + + # _, indices = torch.sort(all_depths, dim=-2) + # all_depths = torch.gather(all_depths, -2, indices) + # all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1])) + # all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1)) + + # return all_depths, all_colors, all_densities + def unify_samples(self, depths1, colors1, densities1, xyz1, depths2, colors2, densities2, xyz2): + all_depths = torch.cat([depths1, depths2], dim = -2) + all_colors = torch.cat([colors1, colors2], dim = -2) + all_xyz = torch.cat([xyz1, xyz2], dim = -2) + all_densities = torch.cat([densities1, densities2], dim = -2) + + _, indices = torch.sort(all_depths, dim=-2) + all_depths = torch.gather(all_depths, -2, indices) + all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1])) + all_xyz = torch.gather(all_xyz, -2, indices.expand(-1, -1, -1, all_xyz.shape[-1])) + all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1)) + + return all_depths, all_colors, all_densities, all_xyz + + def sample_stratified(self, ray_origins, ray_start, ray_end, depth_resolution, disparity_space_sampling=False): + """ + Return depths of approximately uniformly spaced samples along rays. + """ + N, M, _ = ray_origins.shape + if disparity_space_sampling: + depths_coarse = torch.linspace(0, + 1, + depth_resolution, + device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1) + depth_delta = 1/(depth_resolution - 1) + depths_coarse += torch.rand_like(depths_coarse) * depth_delta + depths_coarse = 1./(1./ray_start * (1. - depths_coarse) + 1./ray_end * depths_coarse) + else: + if type(ray_start) == torch.Tensor: + depths_coarse = math_utils.linspace(ray_start, ray_end, depth_resolution).permute(1,2,0,3) + depth_delta = (ray_end - ray_start) / (depth_resolution - 1) + depths_coarse += torch.rand_like(depths_coarse) * depth_delta[..., None] + else: + depths_coarse = torch.linspace(ray_start, ray_end, depth_resolution, device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1) + depth_delta = (ray_end - ray_start)/(depth_resolution - 1) + depths_coarse += torch.rand_like(depths_coarse) * depth_delta + + return depths_coarse + + def sample_importance(self, z_vals, weights, N_importance): + """ + Return depths of importance sampled points along rays. See NeRF importance sampling for more. + """ + with torch.no_grad(): + batch_size, num_rays, samples_per_ray, _ = z_vals.shape + + z_vals = z_vals.reshape(batch_size * num_rays, samples_per_ray) + weights = weights.reshape(batch_size * num_rays, -1) # -1 to account for loss of 1 sample in MipRayMarcher + + # smooth weights + weights = torch.nn.functional.max_pool1d(weights.unsqueeze(1).float(), 2, 1, padding=1) + weights = torch.nn.functional.avg_pool1d(weights, 2, 1).squeeze() + weights = weights + 0.01 + + z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:]) + importance_z_vals = self.sample_pdf(z_vals_mid, weights[:, 1:-1], + N_importance).detach().reshape(batch_size, num_rays, N_importance, 1) + return importance_z_vals + + def sample_pdf(self, bins, weights, N_importance, det=False, eps=1e-5): + """ + Sample @N_importance samples from @bins with distribution defined by @weights. + Inputs: + bins: (N_rays, N_samples_+1) where N_samples_ is "the number of coarse samples per ray - 2" + weights: (N_rays, N_samples_) + N_importance: the number of samples to draw from the distribution + det: deterministic or not + eps: a small number to prevent division by zero + Outputs: + samples: the sampled samples + """ + N_rays, N_samples_ = weights.shape + weights = weights + eps # prevent division by zero (don't do inplace op!) + pdf = weights / torch.sum(weights, -1, keepdim=True) # (N_rays, N_samples_) + cdf = torch.cumsum(pdf, -1) # (N_rays, N_samples), cumulative distribution function + cdf = torch.cat([torch.zeros_like(cdf[: ,:1]), cdf], -1) # (N_rays, N_samples_+1) + # padded to 0~1 inclusive + + if det: + u = torch.linspace(0, 1, N_importance, device=bins.device) + u = u.expand(N_rays, N_importance) + else: + u = torch.rand(N_rays, N_importance, device=bins.device) + u = u.contiguous() + + inds = torch.searchsorted(cdf, u, right=True) + below = torch.clamp_min(inds-1, 0) + above = torch.clamp_max(inds, N_samples_) + + inds_sampled = torch.stack([below, above], -1).view(N_rays, 2*N_importance) + cdf_g = torch.gather(cdf, 1, inds_sampled).view(N_rays, N_importance, 2) + bins_g = torch.gather(bins, 1, inds_sampled).view(N_rays, N_importance, 2) + + denom = cdf_g[...,1]-cdf_g[...,0] + denom[denom 0: + self.defer_frames -= 1 + elif self.dump_image: + if 'image' in viz.result: + self.dump_png(viz.result.image) + self.dump_image = False + elif self.dump_gui: + viz.capture_next_frame() + self.dump_gui = False + captured_frame = viz.pop_captured_frame() + if captured_frame is not None: + self.dump_png(captured_frame) + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/conditioning_pose_widget.py b/3DPortraitGAN_pyramid/viz/conditioning_pose_widget.py new file mode 100644 index 0000000..90ba693 --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/conditioning_pose_widget.py @@ -0,0 +1,94 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import numpy as np +import imgui +import dnnlib +from gui_utils import imgui_utils + +#---------------------------------------------------------------------------- + +class ConditioningPoseWidget: + def __init__(self, viz): + self.viz = viz + self.pose = dnnlib.EasyDict(yaw=0, pitch=0, anim=False, speed=0.25) + self.pose_def = dnnlib.EasyDict(self.pose) + + self.neck_pose = dnnlib.EasyDict(x=0, y=0, z=0) + self.head_pose = dnnlib.EasyDict(x=0, y=0, z=0) + + def drag(self, dx, dy): + viz = self.viz + self.pose.yaw += -dx / viz.font_size * 3e-2 + self.pose.pitch += -dy / viz.font_size * 3e-2 + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + if show: + imgui.text('Cond Pose') + imgui.same_line(viz.label_w) + yaw = self.pose.yaw + pitch = self.pose.pitch + with imgui_utils.item_width(viz.font_size * 5): + changed, (new_yaw, new_pitch) = imgui.input_float2('##frac', yaw, pitch, format='%+.2f', flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) + if changed: + self.pose.yaw = new_yaw + self.pose.pitch = new_pitch + imgui.same_line(viz.label_w + viz.font_size * 13 + viz.spacing * 2) + _clicked, dragging, dx, dy = imgui_utils.drag_button('Drag', width=viz.button_w) + if dragging: + self.drag(dx, dy) + imgui.same_line() + snapped = dnnlib.EasyDict(self.pose, yaw=round(self.pose.yaw, 1), pitch=round(self.pose.pitch, 1)) + if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.pose != snapped)): + self.pose = snapped + imgui.same_line() + if imgui_utils.button('Reset', width=-1, enabled=(self.pose != self.pose_def)): + self.pose = dnnlib.EasyDict(self.pose_def) + + imgui.text('Cond NeckPose') + imgui.same_line(viz.label_w) + neck_pose_x = self.neck_pose.x + neck_pose_y = self.neck_pose.y + neck_pose_z = self.neck_pose.z + with imgui_utils.item_width(viz.font_size * 10): + changed, (new_neck_pose_x, new_neck_pose_y, new_neck_pose_z) = \ + imgui.input_float3('##neck_pose', neck_pose_x, neck_pose_y, neck_pose_z, format='%+.2f', + flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) + if changed: + self.neck_pose.x = new_neck_pose_x + self.neck_pose.y = new_neck_pose_y + self.neck_pose.z = new_neck_pose_z + + imgui.text('Cond HeadPose') + imgui.same_line(viz.label_w) + head_pose_x = self.head_pose.x + head_pose_y = self.head_pose.y + head_pose_z = self.head_pose.z + with imgui_utils.item_width(viz.font_size * 10): + changed, (new_head_pose_x, new_head_pose_y, new_head_pose_z) = \ + imgui.input_float3('##head_pose', head_pose_x, head_pose_y, + head_pose_z, format='%+.2f', flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) + if changed: + self.head_pose.x = new_head_pose_x + self.head_pose.y = new_head_pose_y + self.head_pose.z = new_head_pose_z + + + + + viz.args.conditioning_yaw = self.pose.yaw + viz.args.conditioning_pitch = self.pose.pitch + + viz.args.conditioning_body_pose = [self.neck_pose.x, self.neck_pose.y, self.neck_pose.z, self.head_pose.x, self.head_pose.y, self.head_pose.z] + + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/latent_widget.py b/3DPortraitGAN_pyramid/viz/latent_widget.py new file mode 100644 index 0000000..30ce50c --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/latent_widget.py @@ -0,0 +1,80 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import numpy as np +import imgui +import dnnlib +from gui_utils import imgui_utils + +#---------------------------------------------------------------------------- + +class LatentWidget: + def __init__(self, viz): + self.viz = viz + self.latent = dnnlib.EasyDict(x=1, y=0, anim=False, speed=0.25) + self.latent_def = dnnlib.EasyDict(self.latent) + self.step_y = 100 + + def drag(self, dx, dy): + viz = self.viz + self.latent.x += dx / viz.font_size * 4e-2 + self.latent.y += dy / viz.font_size * 4e-2 + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + if show: + imgui.text('Latent') + imgui.same_line(viz.label_w) + seed = round(self.latent.x) + round(self.latent.y) * self.step_y + with imgui_utils.item_width(viz.font_size * 8): + changed, seed = imgui.input_int('##seed', seed, step=0) + if changed: + self.latent.x = seed + self.latent.y = 0 + imgui.same_line(viz.label_w + viz.font_size * 8 + viz.spacing) + frac_x = self.latent.x - round(self.latent.x) + frac_y = self.latent.y - round(self.latent.y) + with imgui_utils.item_width(viz.font_size * 5): + changed, (new_frac_x, new_frac_y) = imgui.input_float2('##frac', frac_x, frac_y, format='%+.2f', flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) + if changed: + self.latent.x += new_frac_x - frac_x + self.latent.y += new_frac_y - frac_y + imgui.same_line(viz.label_w + viz.font_size * 13 + viz.spacing * 2) + _clicked, dragging, dx, dy = imgui_utils.drag_button('Drag', width=viz.button_w) + if dragging: + self.drag(dx, dy) + imgui.same_line(viz.label_w + viz.font_size * 13 + viz.button_w + viz.spacing * 3) + _clicked, self.latent.anim = imgui.checkbox('Anim', self.latent.anim) + imgui.same_line(round(viz.font_size * 28.7)) + with imgui_utils.item_width(-2 - viz.button_w * 2 - viz.spacing * 2), imgui_utils.grayed_out(not self.latent.anim): + changed, speed = imgui.slider_float('##speed', self.latent.speed, -5, 5, format='Speed %.3f', power=3) + if changed: + self.latent.speed = speed + imgui.same_line() + snapped = dnnlib.EasyDict(self.latent, x=round(self.latent.x), y=round(self.latent.y)) + if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.latent != snapped)): + self.latent = snapped + imgui.same_line() + if imgui_utils.button('Reset', width=-1, enabled=(self.latent != self.latent_def)): + self.latent = dnnlib.EasyDict(self.latent_def) + + if self.latent.anim: + self.latent.x += viz.frame_delta * self.latent.speed + viz.args.w0_seeds = [] # [[seed, weight], ...] + for ofs_x, ofs_y in [[0, 0], [1, 0], [0, 1], [1, 1]]: + seed_x = np.floor(self.latent.x) + ofs_x + seed_y = np.floor(self.latent.y) + ofs_y + seed = (int(seed_x) + int(seed_y) * self.step_y) & ((1 << 32) - 1) + weight = (1 - abs(self.latent.x - seed_x)) * (1 - abs(self.latent.y - seed_y)) + if weight > 0: + viz.args.w0_seeds.append([seed, weight]) + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/layer_widget.py b/3DPortraitGAN_pyramid/viz/layer_widget.py new file mode 100644 index 0000000..6da2585 --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/layer_widget.py @@ -0,0 +1,185 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import imgui +from gui_utils import imgui_utils + +#---------------------------------------------------------------------------- + +class LayerWidget: + def __init__(self, viz): + self.viz = viz + self.prev_layers = None + self.cur_layer = None + self.sel_channels = 3 + self.base_channel = 0 + self.img_scale_db = 0 + self.img_normalize = False + self.fft_show = False + self.fft_all = True + self.fft_range_db = 50 + self.fft_beta = 8 + self.refocus = False + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + layers = viz.result.get('layers', []) + if self.prev_layers != layers: + self.prev_layers = layers + self.refocus = True + layer = ([layer for layer in layers if layer.name == self.cur_layer] + [None])[0] + if layer is None and len(layers) > 0: + layer = layers[-1] + self.cur_layer = layer.name + num_channels = layer.shape[1] if layer is not None else 0 + base_channel_max = max(num_channels - self.sel_channels, 0) + + if show: + bg_color = [0.16, 0.29, 0.48, 0.2] + dim_color = list(imgui.get_style().colors[imgui.COLOR_TEXT]) + dim_color[-1] *= 0.5 + + # Begin list. + width = viz.font_size * 28 + height = imgui.get_text_line_height_with_spacing() * 12 + viz.spacing + imgui.push_style_var(imgui.STYLE_FRAME_PADDING, [0, 0]) + imgui.push_style_color(imgui.COLOR_CHILD_BACKGROUND, *bg_color) + imgui.push_style_color(imgui.COLOR_HEADER, 0, 0, 0, 0) + imgui.push_style_color(imgui.COLOR_HEADER_HOVERED, 0.16, 0.29, 0.48, 0.5) + imgui.push_style_color(imgui.COLOR_HEADER_ACTIVE, 0.16, 0.29, 0.48, 0.9) + imgui.begin_child('##list', width=width, height=height, border=True, flags=imgui.WINDOW_ALWAYS_VERTICAL_SCROLLBAR) + + # List items. + for layer in layers: + selected = (self.cur_layer == layer.name) + _opened, selected = imgui.selectable(f'##{layer.name}_selectable', selected) + imgui.same_line(viz.spacing) + _clicked, selected = imgui.checkbox(f'{layer.name}##radio', selected) + if selected: + self.cur_layer = layer.name + if self.refocus: + imgui.set_scroll_here() + viz.skip_frame() # Focus will change on next frame. + self.refocus = False + imgui.same_line(width - viz.font_size * 13) + imgui.text_colored('x'.join(str(x) for x in layer.shape[2:]), *dim_color) + imgui.same_line(width - viz.font_size * 8) + imgui.text_colored(str(layer.shape[1]), *dim_color) + imgui.same_line(width - viz.font_size * 5) + imgui.text_colored(layer.dtype, *dim_color) + + # End list. + if len(layers) == 0: + imgui.text_colored('No layers found', *dim_color) + imgui.end_child() + imgui.pop_style_color(4) + imgui.pop_style_var(1) + + # Begin options. + imgui.same_line() + imgui.begin_child('##options', width=-1, height=height, border=False) + + # RGB & normalize. + rgb = (self.sel_channels == 3) + _clicked, rgb = imgui.checkbox('RGB', rgb) + self.sel_channels = 3 if rgb else 1 + imgui.same_line(viz.font_size * 4) + _clicked, self.img_normalize = imgui.checkbox('Normalize', self.img_normalize) + imgui.same_line(imgui.get_content_region_max()[0] - 1 - viz.button_w) + if imgui_utils.button('Reset##img_flags', width=-1, enabled=(self.sel_channels != 3 or self.img_normalize)): + self.sel_channels = 3 + self.img_normalize = False + + # Image scale. + with imgui_utils.item_width(-1 - viz.button_w - viz.spacing): + _changed, self.img_scale_db = imgui.slider_float('##scale', self.img_scale_db, min_value=-40, max_value=40, format='Scale %+.1f dB') + imgui.same_line() + if imgui_utils.button('Reset##scale', width=-1, enabled=(self.img_scale_db != 0)): + self.img_scale_db = 0 + + # Base channel. + self.base_channel = min(max(self.base_channel, 0), base_channel_max) + narrow_w = imgui.get_text_line_height_with_spacing() + with imgui_utils.grayed_out(base_channel_max == 0): + with imgui_utils.item_width(-1 - viz.button_w - narrow_w * 2 - viz.spacing * 3): + _changed, self.base_channel = imgui.drag_int('##channel', self.base_channel, change_speed=0.05, min_value=0, max_value=base_channel_max, format=f'Channel %d/{num_channels}') + imgui.same_line() + if imgui_utils.button('-##channel', width=narrow_w): + self.base_channel -= 1 + imgui.same_line() + if imgui_utils.button('+##channel', width=narrow_w): + self.base_channel += 1 + imgui.same_line() + self.base_channel = min(max(self.base_channel, 0), base_channel_max) + if imgui_utils.button('Reset##channel', width=-1, enabled=(self.base_channel != 0 and base_channel_max > 0)): + self.base_channel = 0 + + # Stats. + stats = viz.result.get('stats', None) + stats = [f'{stats[idx]:g}' if stats is not None else 'N/A' for idx in range(6)] + rows = [ + ['Statistic', 'All channels', 'Selected'], + ['Mean', stats[0], stats[1]], + ['Std', stats[2], stats[3]], + ['Max', stats[4], stats[5]], + ] + height = imgui.get_text_line_height_with_spacing() * len(rows) + viz.spacing + imgui.push_style_color(imgui.COLOR_CHILD_BACKGROUND, *bg_color) + imgui.begin_child('##stats', width=-1, height=height, border=True) + for y, cols in enumerate(rows): + for x, col in enumerate(cols): + if x != 0: + imgui.same_line(viz.font_size * (4 + (x - 1) * 6)) + if x == 0 or y == 0: + imgui.text_colored(col, *dim_color) + else: + imgui.text(col) + imgui.end_child() + imgui.pop_style_color(1) + + # FFT & all. + _clicked, self.fft_show = imgui.checkbox('FFT', self.fft_show) + imgui.same_line(viz.font_size * 4) + with imgui_utils.grayed_out(not self.fft_show or base_channel_max == 0): + _clicked, self.fft_all = imgui.checkbox('All channels', self.fft_all) + imgui.same_line(imgui.get_content_region_max()[0] - 1 - viz.button_w) + with imgui_utils.grayed_out(not self.fft_show): + if imgui_utils.button('Reset##fft_flags', width=-1, enabled=(self.fft_show or not self.fft_all)): + self.fft_show = False + self.fft_all = True + + # FFT range. + with imgui_utils.grayed_out(not self.fft_show): + with imgui_utils.item_width(-1 - viz.button_w - viz.spacing): + _changed, self.fft_range_db = imgui.slider_float('##fft_range_db', self.fft_range_db, min_value=0.1, max_value=100, format='Range +-%.1f dB') + imgui.same_line() + if imgui_utils.button('Reset##fft_range_db', width=-1, enabled=(self.fft_range_db != 50)): + self.fft_range_db = 50 + + # FFT beta. + with imgui_utils.grayed_out(not self.fft_show): + with imgui_utils.item_width(-1 - viz.button_w - viz.spacing): + _changed, self.fft_beta = imgui.slider_float('##fft_beta', self.fft_beta, min_value=0, max_value=50, format='Kaiser beta %.2f', power=2.63) + imgui.same_line() + if imgui_utils.button('Reset##fft_beta', width=-1, enabled=(self.fft_beta != 8)): + self.fft_beta = 8 + + # End options. + imgui.end_child() + + self.base_channel = min(max(self.base_channel, 0), base_channel_max) + viz.args.layer_name = self.cur_layer if len(layers) > 0 and self.cur_layer != layers[-1].name else None + viz.args.update(sel_channels=self.sel_channels, base_channel=self.base_channel, img_scale_db=self.img_scale_db, img_normalize=self.img_normalize) + viz.args.fft_show = self.fft_show + if self.fft_show: + viz.args.update(fft_all=self.fft_all, fft_range_db=self.fft_range_db, fft_beta=self.fft_beta) + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/performance_widget.py b/3DPortraitGAN_pyramid/viz/performance_widget.py new file mode 100644 index 0000000..deb208a --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/performance_widget.py @@ -0,0 +1,75 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import array +import numpy as np +import imgui +from gui_utils import imgui_utils + +#---------------------------------------------------------------------------- + +class PerformanceWidget: + def __init__(self, viz): + self.viz = viz + self.gui_times = [float('nan')] * 60 + self.render_times = [float('nan')] * 30 + self.fps_limit = 60 + self.use_vsync = False + self.is_async = False + self.force_fp32 = False + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + self.gui_times = self.gui_times[1:] + [viz.frame_delta] + if 'render_time' in viz.result: + self.render_times = self.render_times[1:] + [viz.result.render_time] + del viz.result.render_time + + if show: + imgui.text('GUI') + imgui.same_line(viz.label_w) + with imgui_utils.item_width(viz.font_size * 8): + imgui.plot_lines('##gui_times', array.array('f', self.gui_times), scale_min=0) + imgui.same_line(viz.label_w + viz.font_size * 9) + t = [x for x in self.gui_times if x > 0] + t = np.mean(t) if len(t) > 0 else 0 + imgui.text(f'{t*1e3:.1f} ms' if t > 0 else 'N/A') + imgui.same_line(viz.label_w + viz.font_size * 14) + imgui.text(f'{1/t:.1f} FPS' if t > 0 else 'N/A') + imgui.same_line(viz.label_w + viz.font_size * 18 + viz.spacing * 3) + with imgui_utils.item_width(viz.font_size * 6): + _changed, self.fps_limit = imgui.input_int('FPS limit', self.fps_limit, flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) + self.fps_limit = min(max(self.fps_limit, 5), 1000) + imgui.same_line(imgui.get_content_region_max()[0] - 1 - viz.button_w * 2 - viz.spacing) + _clicked, self.use_vsync = imgui.checkbox('Vertical sync', self.use_vsync) + + if show: + imgui.text('Render') + imgui.same_line(viz.label_w) + with imgui_utils.item_width(viz.font_size * 8): + imgui.plot_lines('##render_times', array.array('f', self.render_times), scale_min=0) + imgui.same_line(viz.label_w + viz.font_size * 9) + t = [x for x in self.render_times if x > 0] + t = np.mean(t) if len(t) > 0 else 0 + imgui.text(f'{t*1e3:.1f} ms' if t > 0 else 'N/A') + imgui.same_line(viz.label_w + viz.font_size * 14) + imgui.text(f'{1/t:.1f} FPS' if t > 0 else 'N/A') + imgui.same_line(viz.label_w + viz.font_size * 18 + viz.spacing * 3) + _clicked, self.is_async = imgui.checkbox('Separate process', self.is_async) + imgui.same_line(imgui.get_content_region_max()[0] - 1 - viz.button_w * 2 - viz.spacing) + _clicked, self.force_fp32 = imgui.checkbox('Force FP32', self.force_fp32) + + viz.set_fps_limit(self.fps_limit) + viz.set_vsync(self.use_vsync) + viz.set_async(self.is_async) + viz.args.force_fp32 = self.force_fp32 + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/pickle_widget.py b/3DPortraitGAN_pyramid/viz/pickle_widget.py new file mode 100644 index 0000000..e85a859 --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/pickle_widget.py @@ -0,0 +1,172 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import glob +import os +import re + +import dnnlib +import imgui +import numpy as np +from gui_utils import imgui_utils + +from . import renderer + +#---------------------------------------------------------------------------- + +def _locate_results(pattern): + return pattern + +#---------------------------------------------------------------------------- + +class PickleWidget: + def __init__(self, viz): + self.viz = viz + self.search_dirs = [] + self.cur_pkl = None + self.user_pkl = '' + self.recent_pkls = [] + self.browse_cache = dict() # {tuple(path, ...): [dnnlib.EasyDict(), ...], ...} + self.browse_refocus = False + self.load('', ignore_errors=True) + + def add_recent(self, pkl, ignore_errors=False): + try: + resolved = self.resolve_pkl(pkl) + if resolved not in self.recent_pkls: + self.recent_pkls.append(resolved) + except: + if not ignore_errors: + raise + + def load(self, pkl, ignore_errors=False): + viz = self.viz + viz.clear_result() + viz.skip_frame() # The input field will change on next frame. + try: + resolved = self.resolve_pkl(pkl) + name = resolved.replace('\\', '/').split('/')[-1] + self.cur_pkl = resolved + self.user_pkl = resolved + viz.result.message = f'Loading {name}...' + viz.defer_rendering() + if resolved in self.recent_pkls: + self.recent_pkls.remove(resolved) + self.recent_pkls.insert(0, resolved) + except: + self.cur_pkl = None + self.user_pkl = pkl + if pkl == '': + viz.result = dnnlib.EasyDict(message='No network pickle loaded') + else: + viz.result = dnnlib.EasyDict(error=renderer.CapturedException()) + if not ignore_errors: + raise + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + recent_pkls = [pkl for pkl in self.recent_pkls if pkl != self.user_pkl] + if show: + imgui.text('Pickle') + imgui.same_line(viz.label_w) + changed, self.user_pkl = imgui_utils.input_text('##pkl', self.user_pkl, 1024, + flags=(imgui.INPUT_TEXT_AUTO_SELECT_ALL | imgui.INPUT_TEXT_ENTER_RETURNS_TRUE), + width=(-1 - viz.button_w * 2 - viz.spacing * 2), + help_text=' | | | | /.pkl') + if changed: + self.load(self.user_pkl, ignore_errors=True) + if imgui.is_item_hovered() and not imgui.is_item_active() and self.user_pkl != '': + imgui.set_tooltip(self.user_pkl) + imgui.same_line() + if imgui_utils.button('Recent...', width=viz.button_w, enabled=(len(recent_pkls) != 0)): + imgui.open_popup('recent_pkls_popup') + imgui.same_line() + if imgui_utils.button('Browse...', enabled=len(self.search_dirs) > 0, width=-1): + imgui.open_popup('browse_pkls_popup') + self.browse_cache.clear() + self.browse_refocus = True + + if imgui.begin_popup('recent_pkls_popup'): + for pkl in recent_pkls: + clicked, _state = imgui.menu_item(pkl) + if clicked: + self.load(pkl, ignore_errors=True) + imgui.end_popup() + + if imgui.begin_popup('browse_pkls_popup'): + def recurse(parents): + key = tuple(parents) + items = self.browse_cache.get(key, None) + if items is None: + items = self.list_runs_and_pkls(parents) + self.browse_cache[key] = items + for item in items: + if item.type == 'run' and imgui.begin_menu(item.name): + recurse([item.path]) + imgui.end_menu() + if item.type == 'pkl': + clicked, _state = imgui.menu_item(item.name) + if clicked: + self.load(item.path, ignore_errors=True) + if len(items) == 0: + with imgui_utils.grayed_out(): + imgui.menu_item('No results found') + recurse(self.search_dirs) + if self.browse_refocus: + imgui.set_scroll_here() + viz.skip_frame() # Focus will change on next frame. + self.browse_refocus = False + imgui.end_popup() + + paths = viz.pop_drag_and_drop_paths() + if paths is not None and len(paths) >= 1: + self.load(paths[0], ignore_errors=True) + + viz.args.pkl = self.cur_pkl + + def list_runs_and_pkls(self, parents): + items = [] + run_regex = re.compile(r'\d+-.*') + pkl_regex = re.compile(r'network-snapshot-\d+\.pkl') + for parent in set(parents): + if os.path.isdir(parent): + for entry in os.scandir(parent): + if entry.is_dir() and run_regex.fullmatch(entry.name): + items.append(dnnlib.EasyDict(type='run', name=entry.name, path=os.path.join(parent, entry.name))) + if entry.is_file() and pkl_regex.fullmatch(entry.name): + items.append(dnnlib.EasyDict(type='pkl', name=entry.name, path=os.path.join(parent, entry.name))) + + items = sorted(items, key=lambda item: (item.name.replace('_', ' '), item.path)) + return items + + def resolve_pkl(self, pattern): + assert isinstance(pattern, str) + assert pattern != '' + + # URL => return as is. + if dnnlib.util.is_url(pattern): + return pattern + + # Short-hand pattern => locate. + path = _locate_results(pattern) + + # Run dir => pick the last saved snapshot. + if os.path.isdir(path): + pkl_files = sorted(glob.glob(os.path.join(path, 'network-snapshot-*.pkl'))) + if len(pkl_files) == 0: + raise IOError(f'No network pickle found in "{path}"') + path = pkl_files[-1] + + # Normalize. + path = os.path.abspath(path) + return path + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/pose_widget.py b/3DPortraitGAN_pyramid/viz/pose_widget.py new file mode 100644 index 0000000..bcb1f17 --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/pose_widget.py @@ -0,0 +1,92 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import numpy as np +import imgui +import dnnlib +from gui_utils import imgui_utils + +#---------------------------------------------------------------------------- + +class PoseWidget: + def __init__(self, viz): + self.viz = viz + self.pose = dnnlib.EasyDict(yaw=0, pitch=0, anim=False, speed=0.25) + self.pose_def = dnnlib.EasyDict(self.pose) + + self.lookat_point_choice = 0 + self.lookat_point_option = ['auto', 'ffhq', 'shapenet', 'afhq', 'manual'] + self.lookat_point_labels = ['Auto Detect', 'FFHQ Default', 'Shapenet Default', 'AFHQ Default', 'Manual'] + self.lookat_point = (0.0, 0.0, 0.2) + + def drag(self, dx, dy): + viz = self.viz + self.pose.yaw += -dx / viz.font_size * 3e-2 + self.pose.pitch += -dy / viz.font_size * 3e-2 + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + if show: + imgui.text('Pose') + imgui.same_line(viz.label_w) + yaw = self.pose.yaw + pitch = self.pose.pitch + with imgui_utils.item_width(viz.font_size * 5): + changed, (new_yaw, new_pitch) = imgui.input_float2('##pose', yaw, pitch, format='%+.2f', flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) + if changed: + self.pose.yaw = new_yaw + self.pose.pitch = new_pitch + imgui.same_line(viz.label_w + viz.font_size * 13 + viz.spacing * 2) + _clicked, dragging, dx, dy = imgui_utils.drag_button('Drag', width=viz.button_w) + if dragging: + self.drag(dx, dy) + imgui.same_line() + snapped = dnnlib.EasyDict(self.pose, yaw=round(self.pose.yaw, 1), pitch=round(self.pose.pitch, 1)) + if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.pose != snapped)): + self.pose = snapped + imgui.same_line() + if imgui_utils.button('Reset', width=-1, enabled=(self.pose != self.pose_def)): + self.pose = dnnlib.EasyDict(self.pose_def) + + # New line starts here + imgui.text('LookAt Point') + imgui.same_line(viz.label_w) + with imgui_utils.item_width(viz.font_size * 8): + _clicked, self.lookat_point_choice = imgui.combo('', self.lookat_point_choice, self.lookat_point_labels) + lookat_point = self.lookat_point_option[self.lookat_point_choice] + if lookat_point == 'auto': + self.lookat_point = None + if lookat_point == 'ffhq': + self.lookat_point = (0.0, 0.0, 0.2) + changes_enabled=False + if lookat_point == 'shapenet': + self.lookat_point = (0.0, 0.0, 0.0) + changes_enabled=False + if lookat_point == 'afhq': + self.lookat_point = (0.0, 0.0, 0.0) + changes_enabled=False + if lookat_point == 'manual': + if self.lookat_point is None: + self.lookat_point = (0.0, 0.0, 0.0) + changes_enabled=True + if lookat_point != 'auto': + imgui.same_line(viz.label_w + viz.font_size * 13 + viz.spacing * 2) + with imgui_utils.item_width(viz.font_size * 16): + with imgui_utils.grayed_out(not changes_enabled): + _changed, self.lookat_point = imgui.input_float3('##lookat', *self.lookat_point, format='%.2f', flags=(imgui.INPUT_TEXT_READ_ONLY if not changes_enabled else 0)) + + + viz.args.yaw = self.pose.yaw + viz.args.pitch = self.pose.pitch + + viz.args.lookat_point = self.lookat_point + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/pyramid_trigrid_widget.py b/3DPortraitGAN_pyramid/viz/pyramid_trigrid_widget.py new file mode 100644 index 0000000..976733e --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/pyramid_trigrid_widget.py @@ -0,0 +1,150 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import glob +import os +import re + +import dnnlib +import imgui +import numpy as np +from gui_utils import imgui_utils + +from . import renderer + +#---------------------------------------------------------------------------- + +def _locate_results(pattern): + return pattern + +#---------------------------------------------------------------------------- + +class PyramidTrigridWidget: + def __init__(self, viz): + self.viz = viz + self.search_dirs = [] + self.cur_pyramid_trigrid = None + self.cur_ws = None + self.user_pth = '' + self.recent_pths = [] + self.browse_cache = dict() # {tuple(path, ...): [dnnlib.EasyDict(), ...], ...} + self.browse_refocus = False + self.load('', ignore_errors=True) + + def add_recent(self, pth, ignore_errors=False): + try: + resolved = self.resolve_pth(pth) + if resolved not in self.recent_pths: + self.recent_pths.append(resolved) + except: + if not ignore_errors: + raise + + def load(self, pth, ignore_errors=False): + viz = self.viz + viz.clear_result() + viz.skip_frame() # The input field will change on next frame. + try: + resolved = pth + name = resolved.replace('\\', '/').split('/')[-1] + self.cur_pth = resolved + self.user_pth = resolved + viz.result.message = f'Loading {name}...' + viz.defer_rendering() + if resolved in self.recent_pths: + self.recent_pths.remove(resolved) + self.recent_pths.insert(0, resolved) + except: + self.cur_pth = None + self.user_pth = pth + if pth == '': + viz.result = dnnlib.EasyDict(message='No pyramid tri-grid ckpt loaded') + else: + viz.result = dnnlib.EasyDict(error=renderer.CapturedException()) + if not ignore_errors: + raise + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + recent_pths = [pth for pth in self.recent_pths if pth != self.user_pth] + if show: + imgui.text('Pyramid Tri-Grid Ckpt:') + imgui.same_line(round(viz.font_size * 8.5)) + changed, self.user_pth = imgui_utils.input_text('##pth', self.user_pth, 1024, + flags=(imgui.INPUT_TEXT_AUTO_SELECT_ALL | imgui.INPUT_TEXT_ENTER_RETURNS_TRUE), + width=(-1 - viz.button_w * 2 - viz.spacing * 2), + help_text='.pth') + if changed: + self.load(self.user_pth, ignore_errors=True) + if imgui.is_item_hovered() and not imgui.is_item_active() and self.user_pth != '': + imgui.set_tooltip(self.user_pth) + + imgui.same_line() + if imgui_utils.button('Browse...', enabled=len(self.search_dirs) > 0, width=-1): + imgui.open_popup('browse_pths_popup') + self.browse_cache.clear() + self.browse_refocus = True + + if imgui.begin_popup('recent_pths_popup'): + for pth in recent_pths: + clicked, _state = imgui.menu_item(pth) + if clicked: + self.load(pth, ignore_errors=True) + imgui.end_popup() + + if imgui.begin_popup('browse_pths_popup'): + def recurse(parents): + key = tuple(parents) + items = self.browse_cache.get(key, None) + if items is None: + items = self.list_runs_and_pths(parents) + self.browse_cache[key] = items + for item in items: + if item.type == 'run' and imgui.begin_menu(item.name): + recurse([item.path]) + imgui.end_menu() + if item.type == 'pth': + clicked, _state = imgui.menu_item(item.name) + if clicked: + self.load(item.path, ignore_errors=True) + if len(items) == 0: + with imgui_utils.grayed_out(): + imgui.menu_item('No results found') + recurse(self.search_dirs) + if self.browse_refocus: + imgui.set_scroll_here() + viz.skip_frame() # Focus will change on next frame. + self.browse_refocus = False + imgui.end_popup() + + paths = viz.pop_drag_and_drop_paths() + if paths is not None and len(paths) >= 1: + self.load(paths[0], ignore_errors=True) + + viz.args.pyramid_tri_grid_ckpt = self.cur_pth + + def list_runs_and_pths(self, parents): + items = [] + run_regex = re.compile(r'\d+-.*') + pth_regex = re.compile(r'network-snapshot-\d+\.pth') + for parent in set(parents): + if os.path.isdir(parent): + for entry in os.scandir(parent): + if entry.is_dir() and run_regex.fullmatch(entry.name): + items.append(dnnlib.EasyDict(type='run', name=entry.name, path=os.path.join(parent, entry.name))) + if entry.is_file() and pth_regex.fullmatch(entry.name): + items.append(dnnlib.EasyDict(type='pth', name=entry.name, path=os.path.join(parent, entry.name))) + + items = sorted(items, key=lambda item: (item.name.replace('_', ' '), item.path)) + return items + + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/render_depth_sample_widget.py b/3DPortraitGAN_pyramid/viz/render_depth_sample_widget.py new file mode 100644 index 0000000..27c48f7 --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/render_depth_sample_widget.py @@ -0,0 +1,40 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import imgui +from gui_utils import imgui_utils + +#---------------------------------------------------------------------------- + +class RenderDepthSampleWidget: + def __init__(self, viz): + self.viz = viz + self.depth_mult = 2 + self.depth_importance_mult = 2 + self.render_types = [.5, 1, 2, 4] + self.labels = ['0.5x', '1x', '2x', '4x'] + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + + if show: + imgui.text('Render Type') + imgui.same_line(viz.label_w) + with imgui_utils.item_width(viz.font_size * 4): + _clicked, self.depth_mult = imgui.combo('Depth Sample Multiplier', self.depth_mult, self.labels) + imgui.same_line(viz.label_w + viz.font_size * 16 + viz.spacing * 2) + with imgui_utils.item_width(viz.font_size * 4): + _clicked, self.depth_importance_mult = imgui.combo('Depth Sample Importance Multiplier', self.depth_importance_mult, self.labels) + + viz.args.depth_mult = self.render_types[self.depth_mult] + viz.args.depth_importance_mult = self.render_types[self.depth_importance_mult] + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/render_type_widget.py b/3DPortraitGAN_pyramid/viz/render_type_widget.py new file mode 100644 index 0000000..fcfff4e --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/render_type_widget.py @@ -0,0 +1,35 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import imgui +from gui_utils import imgui_utils + +#---------------------------------------------------------------------------- + +class RenderTypeWidget: + def __init__(self, viz): + self.viz = viz + self.render_type = 0 + self.render_types = ['image_raw', 'image_depth'] + self.labels = ['RGB Image', 'Depth Image'] + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + + if show: + imgui.text('Render Type') + imgui.same_line(viz.label_w) + with imgui_utils.item_width(viz.font_size * 10): + _clicked, self.render_type = imgui.combo('', self.render_type, self.labels) + + viz.args.render_type = self.render_types[self.render_type] + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/renderer.py b/3DPortraitGAN_pyramid/viz/renderer.py new file mode 100644 index 0000000..dc041ef --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/renderer.py @@ -0,0 +1,498 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import sys +import copy +import traceback +import numpy as np +import torch +import torch.fft +import torch.nn +import matplotlib.cm +import dnnlib +from torch_utils.ops import upfirdn2d +import legacy # pylint: disable=import-error + +from camera_utils import LookAtPoseSampler +import os + +#---------------------------------------------------------------------------- + +class CapturedException(Exception): + def __init__(self, msg=None): + if msg is None: + _type, value, _traceback = sys.exc_info() + assert value is not None + if isinstance(value, CapturedException): + msg = str(value) + else: + msg = traceback.format_exc() + assert isinstance(msg, str) + super().__init__(msg) + +#---------------------------------------------------------------------------- + +class CaptureSuccess(Exception): + def __init__(self, out): + super().__init__() + self.out = out + +#---------------------------------------------------------------------------- + +def _sinc(x): + y = (x * np.pi).abs() + z = torch.sin(y) / y.clamp(1e-30, float('inf')) + return torch.where(y < 1e-30, torch.ones_like(x), z) + +def _lanczos_window(x, a): + x = x.abs() / a + return torch.where(x < 1, _sinc(x), torch.zeros_like(x)) + +#---------------------------------------------------------------------------- + +def _construct_affine_bandlimit_filter(mat, a=3, amax=16, aflt=64, up=4, cutoff_in=1, cutoff_out=1): + assert a <= amax < aflt + mat = torch.as_tensor(mat).to(torch.float32) + + # Construct 2D filter taps in input & output coordinate spaces. + taps = ((torch.arange(aflt * up * 2 - 1, device=mat.device) + 1) / up - aflt).roll(1 - aflt * up) + yi, xi = torch.meshgrid(taps, taps) + xo, yo = (torch.stack([xi, yi], dim=2) @ mat[:2, :2].t()).unbind(2) + + # Convolution of two oriented 2D sinc filters. + fi = _sinc(xi * cutoff_in) * _sinc(yi * cutoff_in) + fo = _sinc(xo * cutoff_out) * _sinc(yo * cutoff_out) + f = torch.fft.ifftn(torch.fft.fftn(fi) * torch.fft.fftn(fo)).real + + # Convolution of two oriented 2D Lanczos windows. + wi = _lanczos_window(xi, a) * _lanczos_window(yi, a) + wo = _lanczos_window(xo, a) * _lanczos_window(yo, a) + w = torch.fft.ifftn(torch.fft.fftn(wi) * torch.fft.fftn(wo)).real + + # Construct windowed FIR filter. + f = f * w + + # Finalize. + c = (aflt - amax) * up + f = f.roll([aflt * up - 1] * 2, dims=[0,1])[c:-c, c:-c] + f = torch.nn.functional.pad(f, [0, 1, 0, 1]).reshape(amax * 2, up, amax * 2, up) + f = f / f.sum([0,2], keepdim=True) / (up ** 2) + f = f.reshape(amax * 2 * up, amax * 2 * up)[:-1, :-1] + return f + +#---------------------------------------------------------------------------- + +def _apply_affine_transformation(x, mat, up=4, **filter_kwargs): + _N, _C, H, W = x.shape + mat = torch.as_tensor(mat).to(dtype=torch.float32, device=x.device) + + # Construct filter. + f = _construct_affine_bandlimit_filter(mat, up=up, **filter_kwargs) + assert f.ndim == 2 and f.shape[0] == f.shape[1] and f.shape[0] % 2 == 1 + p = f.shape[0] // 2 + + # Construct sampling grid. + theta = mat.inverse() + theta[:2, 2] *= 2 + theta[0, 2] += 1 / up / W + theta[1, 2] += 1 / up / H + theta[0, :] *= W / (W + p / up * 2) + theta[1, :] *= H / (H + p / up * 2) + theta = theta[:2, :3].unsqueeze(0).repeat([x.shape[0], 1, 1]) + g = torch.nn.functional.affine_grid(theta, x.shape, align_corners=False) + + # Resample image. + y = upfirdn2d.upsample2d(x=x, f=f, up=up, padding=p) + z = torch.nn.functional.grid_sample(y, g, mode='bilinear', padding_mode='zeros', align_corners=False) + + # Form mask. + m = torch.zeros_like(y) + c = p * 2 + 1 + m[:, :, c:-c, c:-c] = 1 + m = torch.nn.functional.grid_sample(m, g, mode='nearest', padding_mode='zeros', align_corners=False) + return z, m + +#---------------------------------------------------------------------------- + +class Renderer: + def __init__(self): + self._device = torch.device('cuda') + self._pkl_data = dict() # {pkl: dict | CapturedException, ...} + self._networks = dict() # {cache_key: torch.nn.Module, ...} + self._pinned_bufs = dict() # {(shape, dtype): torch.Tensor, ...} + self._cmaps = dict() # {name: torch.Tensor, ...} + self._is_timing = False + self._start_event = torch.cuda.Event(enable_timing=True) + self._end_event = torch.cuda.Event(enable_timing=True) + self._net_layers = dict() # {cache_key: [dnnlib.EasyDict, ...], ...} + + self.input_data = dict() + + def render(self, **args): + self._is_timing = True + self._start_event.record(torch.cuda.current_stream(self._device)) + res = dnnlib.EasyDict() + try: + self._render_impl(res, **args) + except: + res.error = CapturedException() + self._end_event.record(torch.cuda.current_stream(self._device)) + if 'image' in res: + res.image = self.to_cpu(res.image).numpy() + if 'stats' in res: + res.stats = self.to_cpu(res.stats).numpy() + if 'error' in res: + res.error = str(res.error) + if self._is_timing: + self._end_event.synchronize() + res.render_time = self._start_event.elapsed_time(self._end_event) * 1e-3 + self._is_timing = False + return res + + def get_pyramid_tri_grid_ws(self, pyramid_tri_grid_ckpt, device): + + data = self.input_data.get(pyramid_tri_grid_ckpt, None) + if data is None: + + print(f'Loading "{pyramid_tri_grid_ckpt}"... ', end='', flush=True) + ckpt = torch.load(pyramid_tri_grid_ckpt, map_location=lambda storage, loc: storage)['model'] + trigrid = { + 8: ckpt['trigrids_8'].to(device).detach(), + 16: ckpt['trigrids_16'].to(device).detach(), + 32: ckpt['trigrids_32'].to(device).detach(), + 64: ckpt['trigrids_64'].to(device).detach(), + 128: ckpt['trigrids_128'].to(device).detach(), + 256: ckpt['trigrids_256'].to(device).detach(), + 512: ckpt['trigrids_512'].to(device).detach(), + } + ws = ckpt['ws'].to(device) + print('Done.') + self.input_data[pyramid_tri_grid_ckpt] = {'trigrid': trigrid, 'ws': ws} + + else: + trigrid = data['trigrid'] + ws = data['ws'] + + return trigrid, ws + + + def get_network(self, pkl, key, **tweak_kwargs): + data = self._pkl_data.get(pkl, None) + if data is None: + print(f'Loading "{pkl}"... ', end='', flush=True) + try: + with dnnlib.util.open_url(pkl, verbose=False) as f: + data = legacy.load_network_pkl(f) + print('Done.') + except: + data = CapturedException() + print('Failed!') + self._pkl_data[pkl] = data + self._ignore_timing() + if isinstance(data, CapturedException): + raise data + + orig_net = data[key] + cache_key = (orig_net, self._device, tuple(sorted(tweak_kwargs.items()))) + net = self._networks.get(cache_key, None) + if net is None: + try: + net = copy.deepcopy(orig_net) + net = self._tweak_network(net, **tweak_kwargs) + net.to(self._device) + except: + net = CapturedException() + self._networks[cache_key] = net + self._ignore_timing() + if isinstance(net, CapturedException): + raise net + + + return net + + def _tweak_network(self, net): + # Print diagnostics. + + # RELOAD_MODULES = False + # if RELOAD_MODULES: + # from training.triplane import TriPlaneGenerator + # from torch_utils import misc + # print("Reloading Modules!") + # net_new = TriPlaneGenerator(*net.init_args, **net.init_kwargs).eval().requires_grad_(False).to(self._device) + # misc.copy_params_and_buffers(net, net_new, require_all=True) + # net_new.neural_rendering_resolution = net.neural_rendering_resolution + # net_new.rendering_kwargs = net.rendering_kwargs + # net = net_new + # # net.rendering_kwargs['ray_start'] = 'auto' + # # net.rendering_kwargs['ray_end'] = 'auto' + # # net.rendering_kwargs['avg_camera_pivot'] = [0, 0, 0] + + if True: + print("Reloading Modules!") + from training.smpl_triplane import TriPlaneGenerator + from torch_utils import misc + print("Reloading Modules!") + init_kwargs = net.init_kwargs + print('G.init_args: ', net.init_args) + print('G.init_kwargs: ', init_kwargs) + G_new = TriPlaneGenerator(*net.init_args, **init_kwargs).eval().requires_grad_(False).to(self._device) + misc.copy_params_and_buffers(net, G_new, require_all=False) + G_new.neural_rendering_resolution = net.neural_rendering_resolution + G_new.rendering_kwargs = net.rendering_kwargs + G_new.batch_size = 1 + G_new.set_batch_size(1) + net = G_new + print('>>>> G batch: ', net.batch_size) + + + return net + + def _get_pinned_buf(self, ref): + key = (tuple(ref.shape), ref.dtype) + buf = self._pinned_bufs.get(key, None) + if buf is None: + buf = torch.empty(ref.shape, dtype=ref.dtype).pin_memory() + self._pinned_bufs[key] = buf + return buf + + def to_device(self, buf): + return self._get_pinned_buf(buf).copy_(buf).to(self._device) + + def to_cpu(self, buf): + return self._get_pinned_buf(buf).copy_(buf).clone() + + def _ignore_timing(self): + self._is_timing = False + + def _apply_cmap(self, x, name='viridis'): + cmap = self._cmaps.get(name, None) + if cmap is None: + cmap = matplotlib.cm.get_cmap(name) + cmap = cmap(np.linspace(0, 1, num=1024), bytes=True)[:, :3] + cmap = self.to_device(torch.from_numpy(cmap)) + self._cmaps[name] = cmap + hi = cmap.shape[0] - 1 + x = (x * hi + 0.5).clamp(0, hi).to(torch.int64) + x = torch.nn.functional.embedding(x, cmap) + return x + + + def _render_impl(self, res, + pkl = None, + pyramid_tri_grid_ckpt = None, + w0_seeds = [[0, 1]], + stylemix_idx = [], + stylemix_seed = 0, + trunc_psi = 1, + trunc_cutoff = 0, + random_seed = 0, + noise_mode = 'const', + force_fp32 = False, + layer_name = None, + sel_channels = 3, + base_channel = 0, + img_scale_db = 0, + img_normalize = False, + fft_show = False, + fft_all = True, + fft_range_db = 50, + fft_beta = 8, + input_transform = None, + untransform = False, + + yaw = 0, + pitch = 0, + lookat_point = (0, 0.0649, 0), + conditioning_yaw = 0, + conditioning_pitch = 0, + conditioning_body_pose = None, + body_pose = None, + focal_length = 4.2647, + render_type = 'image', + + do_backbone_caching = False, + + depth_mult = 1, + depth_importance_mult = 1, + ): + if not os.path.exists(pyramid_tri_grid_ckpt) or not os.path.exists(pkl): + res.error = 'Pyramid Tri-Grid or pkl file does not exist' + return + if body_pose is None: + body_pose = np.zeros((1, 6), dtype=np.float32) + else: + body_pose = np.array(body_pose, dtype=np.float32) + body_pose = np.reshape(body_pose, (1, -1)) + + + + + # Dig up network details. + G = self.get_network(pkl, 'G_ema').eval().requires_grad_(False).to('cuda') + res.img_resolution = G.img_resolution + res.num_ws = G.backbone.num_ws + res.has_noise = any('noise_const' in name for name, _buf in G.backbone.named_buffers()) + res.has_input_transform = (hasattr(G.backbone, 'input') and hasattr(G.backbone.input, 'transform')) + + # set G rendering kwargs + if 'depth_resolution_default' not in G.rendering_kwargs: + G.rendering_kwargs['depth_resolution_default'] = G.rendering_kwargs['depth_resolution'] + G.rendering_kwargs['depth_resolution_importance_default'] = G.rendering_kwargs['depth_resolution_importance'] + + G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution_default'] * depth_mult) + G.rendering_kwargs['depth_resolution_importance'] = int(G.rendering_kwargs['depth_resolution_importance_default'] * depth_importance_mult) + + # G.init_kwargs.batch_size = 1 + + pyramid_tri_grid,ws = self.get_pyramid_tri_grid_ws(pyramid_tri_grid_ckpt,self._device) + + + # Set input transform. + if res.has_input_transform: + m = np.eye(3) + try: + if input_transform is not None: + m = np.linalg.inv(np.asarray(input_transform)) + except np.linalg.LinAlgError: + res.error = CapturedException() + G.synthesis.input.transform.copy_(torch.from_numpy(m)) + + # Generate random latents. + + if lookat_point is None: + #camera_pivot = torch.tensor(G.rendering_kwargs.get('avg_camera_pivot', (0, 0, 0))) + camera_pivot = torch.tensor([0, 0.0649, 0]) + else: + # override lookat point provided + camera_pivot = torch.tensor(lookat_point) + camera_radius = G.rendering_kwargs.get('avg_camera_radius', 2.7) + + + # Run mapping network. + # w_avg = G.mapping.w_avg + # Run synthesis network. + synthesis_kwargs = dnnlib.EasyDict(noise_mode=noise_mode, force_fp32=force_fp32, cache_backbone=do_backbone_caching) + torch.manual_seed(random_seed) + + # Set camera params + pose = LookAtPoseSampler.sample(3.14/2 + yaw, 3.14/2 + pitch, camera_pivot, radius=camera_radius) + intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]]) + c = torch.cat([pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1).to(ws.device) + + + + # if body pose is not 0: + if not (body_pose == 0).all(): + apply_def = True + body_pose = torch.tensor(body_pose).to(ws.device) + else: + apply_def = False + body_pose = None + + + out = self.run_tri_grid_render(G, ws,pyramid_tri_grid, c) + + + # Untransform. + if untransform and res.has_input_transform: + out, _mask = _apply_affine_transformation(out.to(torch.float32), G.synthesis.input.transform, amax=6) # Override amax to hit the fast path in upfirdn2d. + + # Select channels and compute statistics. + if type(out) == dict: + # is model output. query render type + out = out[render_type][0].to(torch.float32) + else: + out = out[0].to(torch.float32) + + if sel_channels > out.shape[0]: + sel_channels = 1 + base_channel = max(min(base_channel, out.shape[0] - sel_channels), 0) + sel = out[base_channel : base_channel + sel_channels] + res.stats = torch.stack([ + out.mean(), sel.mean(), + out.std(), sel.std(), + out.norm(float('inf')), sel.norm(float('inf')), + ]) + + # normalize if type is 'image_depth' + if render_type == 'image_depth': + out -= out.min() + out /= out.max() + + out -= .5 + out *= -2 + + # Scale and convert to uint8. + img = sel + if img_normalize: + img = img / img.norm(float('inf'), dim=[1,2], keepdim=True).clip(1e-8, 1e8) + img = img * (10 ** (img_scale_db / 20)) + img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8).permute(1, 2, 0) + res.image = img + + # FFT. + if fft_show: + sig = out if fft_all else sel + sig = sig.to(torch.float32) + sig = sig - sig.mean(dim=[1,2], keepdim=True) + sig = sig * torch.kaiser_window(sig.shape[1], periodic=False, beta=fft_beta, device=self._device)[None, :, None] + sig = sig * torch.kaiser_window(sig.shape[2], periodic=False, beta=fft_beta, device=self._device)[None, None, :] + fft = torch.fft.fftn(sig, dim=[1,2]).abs().square().sum(dim=0) + fft = fft.roll(shifts=[fft.shape[0] // 2, fft.shape[1] // 2], dims=[0,1]) + fft = (fft / fft.mean()).log10() * 10 # dB + fft = self._apply_cmap((fft / fft_range_db + 1) / 2) + res.image = torch.cat([img.expand_as(fft), fft], dim=1) + + + + def run_synthesis_net(net, *args, capture_layer=None, **kwargs): # => out, layers + submodule_names = {mod: name for name, mod in net.named_modules()} + unique_names = set() + layers = [] + + def module_hook(module, _inputs, outputs): + outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs] + outputs = [out for out in outputs if isinstance(out, torch.Tensor) and out.ndim in [4, 5]] + for idx, out in enumerate(outputs): + if out.ndim == 5: # G-CNN => remove group dimension. + out = out.mean(2) + name = submodule_names[module] + if name == '': + name = 'output' + if len(outputs) > 1: + name += f':{idx}' + if name in unique_names: + suffix = 2 + while f'{name}_{suffix}' in unique_names: + suffix += 1 + name += f'_{suffix}' + unique_names.add(name) + shape = [int(x) for x in out.shape] + dtype = str(out.dtype).split('.')[-1] + layers.append(dnnlib.EasyDict(name=name, shape=shape, dtype=dtype)) + if name == capture_layer: + raise CaptureSuccess(out) + + hooks = [module.register_forward_hook(module_hook) for module in net.modules()] + try: + out = net.synthesis(*args, **kwargs) + except CaptureSuccess as e: + out = e.out + for hook in hooks: + hook.remove() + return out, layers + + @staticmethod + def run_tri_grid_render(net, w, trigrid,c): # => out, layers + out = net.render_planes(ws=w, planes=trigrid, c=c[0:1], noise_mode='const', + neural_rendering_resolution=256, chunk=4096) + return out + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/stylemix_widget.py b/3DPortraitGAN_pyramid/viz/stylemix_widget.py new file mode 100644 index 0000000..0b84d64 --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/stylemix_widget.py @@ -0,0 +1,68 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import imgui +from gui_utils import imgui_utils + +#---------------------------------------------------------------------------- + +class StyleMixingWidget: + def __init__(self, viz): + self.viz = viz + self.seed_def = 1000 + self.seed = self.seed_def + self.animate = False + self.enables = [] + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + num_ws = viz.result.get('num_ws', 0) + num_enables = viz.result.get('num_ws', 18) + self.enables += [False] * max(num_enables - len(self.enables), 0) + + if show: + imgui.text('Stylemix') + imgui.same_line(viz.label_w) + with imgui_utils.item_width(viz.font_size * 8), imgui_utils.grayed_out(num_ws == 0): + _changed, self.seed = imgui.input_int('##seed', self.seed) + imgui.same_line(viz.label_w + viz.font_size * 8 + viz.spacing) + with imgui_utils.grayed_out(num_ws == 0): + _clicked, self.animate = imgui.checkbox('Anim', self.animate) + + pos2 = imgui.get_content_region_max()[0] - 1 - viz.button_w + pos1 = pos2 - imgui.get_text_line_height() - viz.spacing + pos0 = viz.label_w + viz.font_size * 12 + imgui.push_style_var(imgui.STYLE_FRAME_PADDING, [0, 0]) + for idx in range(num_enables): + imgui.same_line(round(pos0 + (pos1 - pos0) * (idx / (num_enables - 1)))) + if idx == 0: + imgui.set_cursor_pos_y(imgui.get_cursor_pos_y() + 3) + with imgui_utils.grayed_out(num_ws == 0): + _clicked, self.enables[idx] = imgui.checkbox(f'##{idx}', self.enables[idx]) + if imgui.is_item_hovered(): + imgui.set_tooltip(f'{idx}') + imgui.pop_style_var(1) + + imgui.same_line(pos2) + imgui.set_cursor_pos_y(imgui.get_cursor_pos_y() - 3) + with imgui_utils.grayed_out(num_ws == 0): + if imgui_utils.button('Reset', width=-1, enabled=(self.seed != self.seed_def or self.animate or any(self.enables[:num_enables]))): + self.seed = self.seed_def + self.animate = False + self.enables = [False] * num_enables + + if any(self.enables[:num_ws]): + viz.args.stylemix_idx = [idx for idx, enable in enumerate(self.enables) if enable] + viz.args.stylemix_seed = self.seed & ((1 << 32) - 1) + if self.animate: + self.seed += 1 + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/trunc_noise_widget.py b/3DPortraitGAN_pyramid/viz/trunc_noise_widget.py new file mode 100644 index 0000000..c811d63 --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/trunc_noise_widget.py @@ -0,0 +1,77 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import imgui +from gui_utils import imgui_utils + +#---------------------------------------------------------------------------- + +class TruncationNoiseWidget: + def __init__(self, viz): + self.viz = viz + self.prev_num_ws = 0 + self.trunc_psi = 0.12 + self.trunc_cutoff = 7 + self.noise_enable = True + self.noise_seed = 0 + self.noise_anim = False + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + num_ws = viz.result.get('num_ws', 0) + has_noise = viz.result.get('has_noise', False) + if num_ws > 0 and num_ws != self.prev_num_ws: + if self.trunc_cutoff > num_ws or self.trunc_cutoff == self.prev_num_ws: + self.trunc_cutoff = num_ws + self.prev_num_ws = num_ws + + if show: + imgui.text('Truncate') + imgui.same_line(viz.label_w) + with imgui_utils.item_width(viz.font_size * 10), imgui_utils.grayed_out(num_ws == 0): + _changed, self.trunc_psi = imgui.slider_float('##psi', self.trunc_psi, -1, 2, format='Psi %.2f') + imgui.same_line() + if num_ws == 0: + imgui_utils.button('Cutoff 0', width=(viz.font_size * 8 + viz.spacing), enabled=False) + else: + with imgui_utils.item_width(viz.font_size * 8 + viz.spacing): + changed, new_cutoff = imgui.slider_int('##cutoff', self.trunc_cutoff, 0, num_ws, format='Cutoff %d') + if changed: + self.trunc_cutoff = min(max(new_cutoff, 0), num_ws) + + with imgui_utils.grayed_out(not has_noise): + imgui.same_line() + _clicked, self.noise_enable = imgui.checkbox('Noise##enable', self.noise_enable) + imgui.same_line(viz.font_size * 28.7) + with imgui_utils.grayed_out(not self.noise_enable): + with imgui_utils.item_width(-3 - viz.button_w - viz.spacing - viz.font_size * 4): + _changed, self.noise_seed = imgui.input_int('##seed', self.noise_seed) + imgui.same_line(spacing=0) + _clicked, self.noise_anim = imgui.checkbox('Anim##noise', self.noise_anim) + + is_def_trunc = (self.trunc_psi == 1 and self.trunc_cutoff == num_ws) + is_def_noise = (self.noise_enable and self.noise_seed == 0 and not self.noise_anim) + with imgui_utils.grayed_out(is_def_trunc and not has_noise): + imgui.same_line(imgui.get_content_region_max()[0] - 1 - viz.button_w) + if imgui_utils.button('Reset', width=-1, enabled=(not is_def_trunc or not is_def_noise)): + self.prev_num_ws = num_ws + self.trunc_psi = 0.12 + self.trunc_cutoff = 7 + self.noise_enable = True + self.noise_seed = 0 + self.noise_anim = False + + if self.noise_anim: + self.noise_seed += 1 + viz.args.update(trunc_psi=self.trunc_psi, trunc_cutoff=self.trunc_cutoff, random_seed=self.noise_seed) + viz.args.noise_mode = ('none' if not self.noise_enable else 'const' if self.noise_seed == 0 else 'random') + +#---------------------------------------------------------------------------- diff --git a/3DPortraitGAN_pyramid/viz/zoom_widget.py b/3DPortraitGAN_pyramid/viz/zoom_widget.py new file mode 100644 index 0000000..40aad64 --- /dev/null +++ b/3DPortraitGAN_pyramid/viz/zoom_widget.py @@ -0,0 +1,43 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +from inspect import formatargvalues +import numpy as np +import imgui +import dnnlib +from gui_utils import imgui_utils + +#---------------------------------------------------------------------------- + +class ZoomWidget: + def __init__(self, viz): + self.viz = viz + self.fov = 12.447863 + self.fov_default = 12.447863 + + @imgui_utils.scoped_by_object_id + def __call__(self, show=True): + viz = self.viz + if show: + imgui.text('FOV') + imgui.same_line(viz.label_w) + with imgui_utils.item_width(viz.font_size * 10): + _changed, self.fov = imgui.slider_float('##fov', self.fov, 12, 45, format='%.2f Degrees') + + imgui.same_line(viz.label_w + viz.font_size * 13 + viz.button_w + viz.spacing * 3) + snapped = round(self.fov) + if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.fov != snapped)): + self.fov = snapped + imgui.same_line() + if imgui_utils.button('Reset', width=-1, enabled=(abs(self.fov - self.fov_default)) > .01): + self.fov = self.fov_default + + viz.args.focal_length = float(1 / (np.tan(self.fov * 3.14159 / 360) * 1.414)) +#---------------------------------------------------------------------------- diff --git a/README.md b/README.md new file mode 100644 index 0000000..3ee0e34 --- /dev/null +++ b/README.md @@ -0,0 +1,399 @@ +# Portrait3D + +> **[SIGGRAPH 2024] Portrait3D: Text-Guided High-Quality 3D Portrait Generation Using Pyramid Representation and GANs Prior** +> +> [Yiqian Wu](https://onethousandwu.com/), [Hao Xu](https://xh38.github.io/), [Xiangjun Tang](https://yuyujunjun.github.io/), [Xien Chen](https://vision.cs.yale.edu/members/xien-chen.html), [Siyu Tang](https://inf.ethz.ch/people/person-detail.MjYyNzgw.TGlzdC8zMDQsLTg3NDc3NjI0MQ==.html), Zhebin Zhang, Chen Li, [Xiaogang Jin*](http://www.cad.zju.edu.cn/home/jin) + +![1f31e](assets/1f31e.png)[Paper]() ![1f431](assets/1f431.png)[Supplementary (Google Drive)]() ![1f98b](assets/1f98b.png)[Project Page]() + +This is the official code repository for our SIG'24 paper: "Portrait3D: Text-Guided High-Quality 3D Portrait Generation Using Pyramid Representation and GANs Prior". + +![Representative_Image](./assets/Representative_Image.jpg) + + +## News ✨ + +- Our paper has been **accepted by SIGGRAPH 2024** ![1f973](assets/1f973.png)! +- We have released all the source code and pre-trained models![1f389](./assets/1f389.png)! + + +## Requirements + +1. Tested on Python 3.8 +3. At least 12 GB of memory +4. Tested on NVIDIA RTX 3080Ti with 12 GB of memory (Windows, 1.5h per portrait) +5. Tested on NVIDIA RTX 4090 with 24 GB of memory (Linux, 0.5h per portrait) +6. CUDA>=11.6 + +## Installation + +Clone this repo to `$PROJECT_ROOT$`. + +**Create environment** + +``` +cd $PROJECT_ROOT$ +conda env create -f environment.yaml +conda activate text_to_3dportrait +``` + +**Torch and torchvision Installation** + +``` +pip install torch==1.12.1+cu116 torchvision==0.13.1+cu116 -f https://download.pytorch.org/whl/torch_stable.html +``` + +**OSMesa Dependencies (For Linux)** + +``` +sudo apt install libosmesa6 libosmesa6-dev +``` + +**Installing Additional Requirements** + +``` +pip install -r requirements.txt +``` + +**kaolin Installation** + +``` +pip install kaolin==0.13.0 -f https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-1.12.1_cu116.html +``` + +**Stable-diffusion Installation** + +``` +cd stable-diffusion +pip install -e . +cd .. +``` + + + +**SMPL Model Setup** + +1. Download [SMPL_python_v.1.0.0.zip](https://smpl.is.tue.mpg.de/download.php) (version 1.0.0 for Python 2.7 (female/male. 10 shape PCs) ). Save `basicModel_f_lbs_10_207_0_v1.0.0.pkl` to `3DPortraitGAN_pyramid/smplx_models/smpl/SMPL_FEMALE.pkl`, save `basicModel_m_lbs_10_207_0_v1.0.0.pkl` to `3DPortraitGAN_pyramid/smplx_models/smpl/SMPL_MALE.pkl`. + +2. Download [SMPLIFY_CODE_V2.ZIP](http://smplify.is.tue.mpg.de/), and save `basicModel_neutral_lbs_10_207_0_v1.0.0.pkl` to `3DPortraitGAN_pyramid/smplx_models/smpl/SMPL_NEUTRAL.pkl`. + +| Download Link | Save Path | +| ------------------------------------------------------------ | -------------------------------------------------------- | +| [basicModel_f_lbs_10_207_0_v1.0.0.pkl](https://smpl.is.tue.mpg.de/download.php) | 3DPortraitGAN_pyramid/smplx_models/smpl/SMPL_FEMALE.pkl | +| [basicModel_m_lbs_10_207_0_v1.0.0.pkl](https://smpl.is.tue.mpg.de/download.php) | 3DPortraitGAN_pyramid/smplx_models/smpl/SMPL_MALE.pkl | +| [basicModel_neutral_lbs_10_207_0_v1.0.0.pkl](http://smplify.is.tue.mpg.de/) | 3DPortraitGAN_pyramid/smplx_models/smpl/SMPL_NEUTRAL.pkl | + + + +## Inference + +### 3DPortraitGAN_pyramid Model + +Our 3DPortraitGAN_pyramid draws inspiration from the 3D-aware StyleGAN2 backbone implemented in [SeanChenxy/Mimic3D](https://github.com/SeanChenxy/Mimic3D), and integrates concepts of mask guidance, background synthesis, and tri-grid representation adapted from [SizheAn/PanoHead](https://github.com/SizheAn/PanoHead). We extend our sincere gratitude for these significant contributions! + +#### (Recommended) Pretrained models + +Download the pre-trained model of 3DPortraitGAN_pyramid: + +| Download Link | Description | Save Path | +| ------------------------------------------------------------ | --------------------------------------------------- | ------------------------------ | +| [model_512.pkl](https://drive.google.com/file/d/1P6k4UwGGNmxa6-rQr2oyIOmAPiLAd_WE/view?usp=sharing) | Pre-trained model of 3DPortraitGAN_pyramid | ./3DPortraitGAN_pyramid/models | +| [model_512.json](https://drive.google.com/file/d/1R6FoQXi4PyIvXtOVoKRohfOXkEkWXdJb/view?usp=sharing) | Pose prediction parameters of 3DPortraitGAN_pyramid | ./3DPortraitGAN_pyramid/models | +| [decoder_512.ckpt](https://drive.google.com/file/d/1r0Lqu1TMm-1Pjj8K963RVM_y72OglJdu/view?usp=sharing) | Decoder checkpoint extracted from model_512.pkl | ./3DPortraitGAN_pyramid/models | +| [vgg16.pt](https://drive.google.com/file/d/1av5jH9jzuOobV9s2gyzx0w9a4xqco82H/view?usp=sharing) | vgg16 | ./3DPortraitGAN_pyramid/models | + +#### (Optional) Training + +Omit this section if utilizing the pre-trained 3DPortraitGAN_pyramid model aforementioned. + +For those interested in the training process, we kindly direct you to our training instructions available [here](https://github.com/oneThousand1000/Portrait3D/tree/main/3DPortraitGAN_pyramid). + + + +### Random Image Generation + +#### Preparing Prompts + +First, prepare your prompts. These should be organized in the following structure: + +``` +test_data +│ +└─── 001 +│ │ +│ └─── prompt.txt (should initiate with "upper body photo") +└─── 002 +│ │ +│ └─── prompt.txt (should initiate with "upper body photo") +└─── ... +``` + +An example is available in `$PROJECT_ROOT$/test_data`. + + + +#### Image generation + +Download the Realistic_Vision_V5.1_noVAE model [here](https://huggingface.co/SG161222/Realistic_Vision_V5.1_noVAE). + +We employ the original stable diffusion in this use case. To convert the diffusers-version model to the original-stable-diffusion-version, follow the steps below: + +``` +cd stable-diffusion + +activate text_to_3dportrait + +git clone git@github.com:huggingface/diffusers.git + +cd diffusers/scripts + +python convert_diffusers_to_original_stable_diffusion.py --model_path $PATH_of_Realistic_Vision_V5.1_noVAE$ --checkpoint_path $PATH_of_Realistic_Vision_V5.1_noVAE$/realisticVisionV51_v51VAE.ckpt + +cd ../../../ +``` + +Then randomly generate images: + +``` +cd stable-diffusion + +activate text_to_3dportrait + +python get_test_data_df.py --test_data_dir ../test_data --sample_num 6 --scale 5 --df_ckpt $PATH_of_Realistic_Vision_V5.1_noVAE$/realisticVisionV51_v51VAE.ckpt + +cd .. +``` + +The generated images will be stored at `$PROJECT_ROOT$/test_data/image_id/samples` + +**Note:** We discovered that using a smaller scale (for example, ` --scale 3`) tends to generate superior results for specific characters, like ''Tyrion Lannister in the Game of Thrones''. Feel free to experiment with different scales to improve the outcome. + + + +#### Image Processing + +Our image processing code is largely adapted from [hongsukchoi/3DCrowdNet_RELEASE](https://github.com/hongsukchoi/3DCrowdNet_RELEASE). + +**Installation** + +```text +conda create -n portrait3d_data python=3.8 + +activate portrait3d_data + +cd data_processing + +pip install torch==1.10.2+cu113 torchvision==0.11.3+cu113 -f https://download.pytorch.org/whl/torch_stable.html + +pip install -r requirements.txt + +python -m pip install -e detectron2 + +cd .. +``` + + + +For windows: + +``` +pip install pywin32==306 +``` + + + +For windows users who experience errors during detectron2 installation, please open a `x64 Native Tools Command Prompt` for Visual Studio and execute `python -m pip install -e detectron2`. + + + +**Pretrained models** + +| Download Link | Save Path | +| ------------------------------------------------------------ | ------------------------------------------------------------ | +| [R_101_FPN_DL_soft_s1x.pkl](https://drive.google.com/file/d/1rgrW9bAVbarft57mogUfawRSu2JCUKIT/view?usp=sharing) | `./data_processing/detectron2/projects/DensePose` | +| [phi_smpl_27554_256.pkl](https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_smpl_27554_256.pkl) | `./data_processing/detectron2/projects/DensePose` | +| [pose_higher_hrnet_w32_512.pth](https://drive.google.com/drive/folders/1zJbBbIHVQmHJp89t5CD1VF5TIzldpHXn) | `./data_processing/HigherHRNet-Human-Pose-Estimation/models/pytorch/pose_coco` | +| [crowdhuman_yolov5m.pt](https://drive.google.com/file/d/1gglIwqxaH2iTvy6lZlXuAcMpd_U0GCUb/view?usp=sharing) | `./data_processing/yolov5_crowdhuman` | +| [basicModel_neutral_lbs_10_207_0_v1.0.0.pkl](http://smplify.is.tue.mpg.de/) | `./data_processing/common/utils/smplpytorch/smplpytorch/native/models` | +| [VPOSER_CKPT](https://drive.google.com/drive/folders/1KNw99d4-_6DqYXfBp2S3_4OMQ_nMW0uQ?usp=sharing) | `./data_processing/common/utils/human_model_files/smpl/VPOSER_CKPT` | +| [J_regressor_extra.npy](https://drive.google.com/file/d/1B9e65ahe6TRGv7xE45sScREAAznw9H4t/view?usp=sharing) | `./data_processing/data` | +| [demo_checkpoint.pth.tar](https://drive.google.com/drive/folders/1YYQHbtxvdljqZNo8CIyFOmZ5yXuwtEhm?usp=sharing) | `./data_processing/demo` | + +If you encounter `RuntimeError: Subtraction, the - operator, with a bool tensor is not supported.`, you may refer to [this issue](https://github.com/mks0601/I2L-MeshNet_RELEASE/issues/6#issuecomment-675152527) for a solution or change L301~L304 of `anaconda3/lib/python3.8/site-packages/torchgeometry/core/conversion.py` to below: + +``` +mask_c0 = mask_d2.float() * mask_d0_d1.float() +mask_c1 = mask_d2.float() * (1 - mask_d0_d1.float()) +mask_c2 = (1 - mask_d2.float()) * mask_d0_nd1.float() +mask_c3 = (1 - mask_d2.float()) * (1 - mask_d0_nd1.float()) +``` + + + +Then process the randomly generated images to produce aligned images following the alignment setting of 3DPortraitGAN_pyramid: + +``` +cd data_processing + +activate portrait3d_data +python preprocess_img_for_inversion.py --test_data_dir=$PROJECT_ROOT$/test_data + +cd .. +``` + + + +**Note:** Manually review and discard any subpar images located in `$PROJECT_ROOT$/test_data/image_id/samples_new_crop/aligned_images`. For optimal inversion results, it is recommended to maintain an aligned image with a frontal view and minor body poses. + + + +### 3D Portrait Inversion + +**Inversion** + +Before proceeding further, always ensure that you have removed all unsatisfactory images in `test_data/image_id/samples_new_crop/aligned_images`. This step is crucial to prevent suboptimal results. + +Notice that we only run projection for the first image in `test_data/image_id/samples_new_crop/aligned_images`. + +``` +cd 3DPortraitGAN_pyramid + +activate text_to_3dportrait + +python run_inversion_with_pose_optimization.py \ + --model_pkl=./models/model_512.pkl \ + --pose_prediction_kwargs_path=./models/model_512.json \ + --test_data_dir=../test_data \ + --inversion_name=final_inversion \ + --with_pose_optim +``` + + + +**Generate Pyramid Tri-grid from Inversion results** + +``` +python run_trigrid_gen.py \ + --network=./models/model_512.pkl \ + --inversion_name=final_inversion + +cd .. +``` + + + +### 3D Portrait Generation and Optimization + +Our image generation code is largely adapted from [ashawkey/stable-dreamfusion](https://github.com/ashawkey/stable-dreamfusion). We express our gratitude for their significant contributions! + +``` +cd stable-dreamfusion-3DPortrait + +python portrait3d_main.py \ + --trigrid_decoder_ckpt=../3DPortraitGAN_pyramid/models/decoder_512.ckpt \ + --inversion_name=final_inversion \ + --network_path=../3DPortraitGAN_pyramid/models/model_512.pkl \ + --test_data_dir=../test_data \ + --df_ckpt=$PATH_of_Realistic_Vision_V5.1_noVAE$ +``` + +The results will be stored and organized as: + +``` +stable-dreamfusion-3DPortrait/output/text_to_3dportrait/image_id +│ +└─── trigrid.pkl [Original pyramid tri-grid generated from inversion results] +│ +└─── validation [SDS validation images] +│ +└─── checkpoints [SDS checkpoints] +│ +└─── run [SDS run file] +│ +└─── results [SDS rendering results] +| +└─── data [21 rendered views, refer to Section 3.5 in our paper] +| +└─── update_data [21 refined views, refer to Section 3.5 in our paper] +| +└─── log [Pyramid tri-grid optimization log files, refer to Section 3.5 in our paper] +│ │ +│ └─── ckpt +│ │ │ +│ │ └─── epoch_00019.pth [Final pyramid tri-grid] +│ └─── img +│ +└─── results_final [Final rendering results] +``` + + + +## Results Gallery + +We offer a gallery of 300 3D portraits (with their corresponding prompts) generated by our method, all viewable and accessible on [huggingface](https://huggingface.co/datasets/onethousand/Portrait3D_gallery). + +``` +Portrait3D_gallery +│ +└─── 000 +│ │ +│ └─── 000_pyramid_trigrid.pth [the pyramid trigrid file] +│ │ +│ └─── 000_prompt.txt [the prompt] +│ │ +│ └─── 000_preview.png [the preview image] +│ │ +│ └─── ... +└─── 001 +│ │ +│ └─── ... +└─── 002 +│ │ +│ └─── ... +│ +└─── ... +``` + +To visualize these 3D portraits, use the following visualizer: + +``` +cd 3DPortraitGAN_pyramid + +activate text_to_3dportrait + +python pyramid_trigrid_visualizer.py +``` + +Input the path of your `model_512.pkl` into the `Pickle` field, and input the pyramid tri-grid path into the `Pyramid Tri-Grid Ckpt` field. + +Please observe that we **maintain the neural rendering resolution at 256** for optimal rendering speed. + + +Enjoy traversing through these results 😉! + + + +## Contact + +[onethousand@zju.edu.cn](mailto:onethousand@zju.edu.cn) / [onethousand1250@gmail.com](mailto:onethousand1250@gmail.com) + + + +## Citation + +If you find this project helpful to your research, please consider citing: + +``` +Coming soon. +``` + + + +## Acknowledgements + +The work is supported by the Information Technology Center and State Key Lab of CAD&CG, Zhejiang University. We extend our sincere gratitude for the generous provision of necessary computing resources. + +We also want to express our thanks to those in the open-source community for their valuable contributions. + + + diff --git a/assets/1f31e.png b/assets/1f31e.png new file mode 100644 index 0000000..322c9c1 Binary files /dev/null and b/assets/1f31e.png differ diff --git a/assets/1f389.png b/assets/1f389.png new file mode 100644 index 0000000..b796f8d Binary files /dev/null and b/assets/1f389.png differ diff --git a/assets/1f431.png b/assets/1f431.png new file mode 100644 index 0000000..ce018a0 Binary files /dev/null and b/assets/1f431.png differ diff --git a/assets/1f973.png b/assets/1f973.png new file mode 100644 index 0000000..0104196 Binary files /dev/null and b/assets/1f973.png differ diff --git a/assets/1f98b.png b/assets/1f98b.png new file mode 100644 index 0000000..27a9d93 Binary files /dev/null and b/assets/1f98b.png differ diff --git a/assets/Representative_Image.jpg b/assets/Representative_Image.jpg new file mode 100644 index 0000000..f59e861 Binary files /dev/null and b/assets/Representative_Image.jpg differ diff --git a/assets/gui.mp4 b/assets/gui.mp4 new file mode 100644 index 0000000..8e6ec78 Binary files /dev/null and b/assets/gui.mp4 differ diff --git a/assets/samples.mp4 b/assets/samples.mp4 new file mode 100644 index 0000000..4eb67a8 Binary files /dev/null and b/assets/samples.mp4 differ diff --git a/data_processing/.gitignore b/data_processing/.gitignore new file mode 100644 index 0000000..b188b7a --- /dev/null +++ b/data_processing/.gitignore @@ -0,0 +1,233 @@ +# Created by .ignore support plugin (hsz.mobi) +### Python template +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +command + +samples/ +#demo + +HigherHRNet-Human-Pose-Estimation/models + +/demo/*.tar +demo/my_input +simple_HRNet/ +CID/ +process_input_images.py +# Custom +*.pkl +common/utils/human_model_files +data/*/data +data/*/annotations +data/*/images +data/*.npy +data/*/*.npy +output/ +*.pyc + +# C extensions +*.so + +.idea/* + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ + +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +### macOS template +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/dictionaries +.idea/**/shelf + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# CMake +cmake-build-debug/ +cmake-build-release/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +/simple-HRNet/ +/simple_HRNet/ + + +# detectron2 +detectron2/projects/DensePose/*.pkl + +# HRNet +/HigherHRNet-Human-Pose-Estimation/models/ + + +/low_resolution_data/ + +# yolo +/yolov5_crowdhuman/crowdhuman_yolov5m.pt + +# classifier +/classifier/*/*.pth + + +/common/utils/smplpytorch/smplpytorch/native/models/*.pkl \ No newline at end of file diff --git a/data_processing/.gitkeep b/data_processing/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/.gitignore b/data_processing/HigherHRNet-Human-Pose-Estimation/.gitignore new file mode 100644 index 0000000..c1b85b6 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/.gitignore @@ -0,0 +1,95 @@ +# IntelliJ project files +.idea +*.iml +out +gen + +### Vim template +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist +*~ + +### IPythonNotebook template +# Temporary data +.ipynb_checkpoints/ + +### Python template +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +#lib/ +#lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +*.ipynb +*.params +*.json +.vscode/ + +lib/pycocotools/_mask.c +lib/nms/cpu_nms.c + +output/* +models/* +log/* +data/* +external/ + +draws/ +plot/ + diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/LICENSE b/data_processing/HigherHRNet-Human-Pose-Estimation/LICENSE new file mode 100644 index 0000000..d4e6a8f --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 HRNet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/README.md b/data_processing/HigherHRNet-Human-Pose-Estimation/README.md new file mode 100644 index 0000000..d07031b --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/README.md @@ -0,0 +1,272 @@ +# [HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation (CVPR 2020)](https://arxiv.org/abs/1908.10357) + +## News +* \[2021/04/12\] Welcome to check out our recent work on bottom-up pose estimation (CVPR 2021) [HRNet-DEKR](https://github.com/HRNet/DEKR)! +* \[2020/07/05\] [A very nice blog](https://towardsdatascience.com/overview-of-human-pose-estimation-neural-networks-hrnet-higherhrnet-architectures-and-faq-1954b2f8b249) from Towards Data Science introducing HRNet and HigherHRNet for human pose estimation. +* \[2020/03/12\] Support train/test on the CrowdPose dataset. +* \[2020/02/24\] HigherHRNet is accepted to CVPR2020! +* \[2019/11/23\] Code and models for [HigherHRNet](https://arxiv.org/abs/1908.10357) are now released! +* \[2019/08/27\] HigherHRNet is now on [ArXiv](https://arxiv.org/abs/1908.10357). We will also release code and models, stay tuned! + +## Introduction +This is the official code of [HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation](https://arxiv.org/abs/1908.10357). +Bottom-up human pose estimation methods have difficulties in predicting the correct pose for small persons due to challenges in scale variation. In this paper, we present **HigherHRNet**: a novel bottom-up human pose estimation method for learning scale-aware representations using high-resolution feature pyramids. Equipped with multi-resolution supervision for training and multi-resolution aggregation for inference, the proposed approach is able to solve the scale variation challenge in *bottom-up multi-person* pose estimation and localize keypoints more precisely, especially for small person. The feature pyramid in HigherHRNet consists of feature map outputs from HRNet and upsampled higher-resolution outputs through a transposed convolution. HigherHRNet outperforms the previous best bottom-up method by 2.5% AP for medium person on COCO test-dev, showing its effectiveness in handling scale variation. Furthermore, HigherHRNet achieves new state-of-the-art result on COCO test-dev (70.5% AP) without using refinement or other post-processing techniques, surpassing all existing bottom-up methods. HigherHRNet even surpasses all top-down methods on CrowdPose test (67.6% AP), suggesting its robustness in crowded scene. + +![Illustrating the architecture of the proposed Higher-HRNet](/figures/arch_v2.png) + +## Main Results +### Results on COCO val2017 without multi-scale test +| Method | Backbone | Input size | #Params | GFLOPs | AP | Ap .5 | AP .75 | AP (M) | AP (L) | +|--------------------|----------|------------|---------|--------|-------|-------|--------|--------|--------| +| HigherHRNet | HRNet-w32 | 512 | 28.6M | 47.9 | 67.1 | 86.2 | 73.0 | 61.5 | 76.1 | +| HigherHRNet | HRNet-w32 | 640 | 28.6M | 74.8 | 68.5 | 87.1 | 74.7 | 64.3 | 75.3 | +| HigherHRNet | HRNet-w48 | 640 | 63.8M | 154.3 | 69.9 | 87.2 | 76.1 | 65.4 | 76.4 | + +### Results on COCO val2017 *with* multi-scale test +| Method | Backbone | Input size | #Params | GFLOPs | AP | Ap .5 | AP .75 | AP (M) | AP (L) | +|--------------------|----------|------------|---------|--------|-------|-------|--------|--------|--------| +| HigherHRNet | HRNet-w32 | 512 | 28.6M | 47.9 | 69.9 | 87.1 | 76.0 | 65.3 | 77.0 | +| HigherHRNet | HRNet-w32 | 640 | 28.6M | 74.8 | 70.6 | 88.1 | 76.9 | 66.6 | 76.5 | +| HigherHRNet | HRNet-w48 | 640 | 63.8M | 154.3 | 72.1 | 88.4 | 78.2 | 67.8 | 78.3 | + +### Results on COCO test-dev2017 without multi-scale test +| Method | Backbone | Input size | #Params | GFLOPs | AP | Ap .5 | AP .75 | AP (M) | AP (L) | +|--------------------|----------|------------|---------|--------|-------|-------|--------|--------|--------| +| OpenPose\* | - | - | - | - | 61.8 | 84.9 | 67.5 | 57.1 | 68.2 | +| Hourglass | Hourglass | 512 | 277.8M | 206.9 | 56.6 | 81.8 | 61.8 | 49.8 | 67.0 | +| PersonLab | ResNet-152 | 1401 | 68.7M | 405.5 | 66.5 | 88.0 | 72.6 | 62.4 | 72.3 | +| PifPaf | - | - | - | - | 66.7 | - | - | 62.4 | 72.9 | +| Bottom-up HRNet | HRNet-w32 | 512 | 28.5M | 38.9 | 64.1 | 86.3 | 70.4 | 57.4 | 73.9 | +| **HigherHRNet** | HRNet-w32 | 512 | 28.6M | 47.9 | 66.4 | 87.5 | 72.8 | 61.2 | 74.2 | +| **HigherHRNet** | HRNet-w48 | 640 | 63.8M | 154.3 | **68.4** | **88.2** | **75.1** | **64.4** | **74.2** | + +### Results on COCO test-dev2017 *with* multi-scale test +| Method | Backbone | Input size | #Params | GFLOPs | AP | Ap .5 | AP .75 | AP (M) | AP (L) | +|--------------------|----------|------------|---------|--------|-------|-------|--------|--------|--------| +| Hourglass | Hourglass | 512 | 277.8M | 206.9 | 63.0 | 85.7 | 68.9 | 58.0 | 70.4 | +| Hourglass\* | Hourglass | 512 | 277.8M | 206.9 | 65.5 | 86.8 | 72.3 | 60.6 | 72.6 | +| PersonLab | ResNet-152 | 1401 | 68.7M | 405.5 | 68.7 | 89.0 | 75.4 | 64.1 | 75.5 | +| **HigherHRNet** | HRNet-w48 | 640 | 63.8M | 154.3 | **70.5** | **89.3** | **77.2** | **66.6** | **75.8** | + +### Results on CrowdPose test +| Method | AP | Ap .5 | AP .75 | AP (E) | AP (M) | AP (H) | +|--------------------|-------|-------|--------|--------|--------|--------| +| Mask-RCNN | 57.2 | 83.5 | 60.3 | 69.4 | 57.9 | 45.8 | +| AlphaPose | 61.0 | 81.3 | 66.0 | 71.2 | 61.4 | 51.1 | +| SPPE | 66.0. | 84.2 | 71.5 | 75.5 | 66.3 | 57.4 | +| OpenPose | - | - | - | 62.7 | 48.7 | 32.3 | +| **HigherHRNet** | 65.9 | 86.4 | 70.6 | 73.3 | 66.5 | 57.9 | +| **HigherHRNet+** | **67.6** | **87.4** | **72.6** | **75.8** | **68.1** | **58.9** | + +*Note: + indicates using multi-scale test.* + +## Environment +The code is developed using python 3.6 on Ubuntu 16.04. NVIDIA GPUs are needed. The code is developed and tested using 4 NVIDIA P100 GPU cards. Other platforms or GPU cards are not fully tested. + +## Quick start +### Installation +1. Install pytorch >= v1.1.0 following [official instruction](https://pytorch.org/). + - **Tested with pytorch v1.4.0** +2. Clone this repo, and we'll call the directory that you cloned as ${POSE_ROOT}. +3. Install dependencies: + ``` + pip install -r requirements.txt + ``` +4. Install [COCOAPI](https://github.com/cocodataset/cocoapi): + ``` + # COCOAPI=/path/to/clone/cocoapi + git clone https://github.com/cocodataset/cocoapi.git $COCOAPI + cd $COCOAPI/PythonAPI + # Install into global site-packages + make install + # Alternatively, if you do not have permissions or prefer + # not to install the COCO API into global site-packages + python3 setup.py install --user + ``` + Note that instructions like # COCOAPI=/path/to/install/cocoapi indicate that you should pick a path where you'd like to have the software cloned and then set an environment variable (COCOAPI in this case) accordingly. +5. Install [CrowdPoseAPI](https://github.com/Jeff-sjtu/CrowdPose) exactly the same as COCOAPI. + - **There is a bug in the CrowdPoseAPI, please reverse https://github.com/Jeff-sjtu/CrowdPose/commit/785e70d269a554b2ba29daf137354103221f479e** +6. Init output(training model output directory) and log(tensorboard log directory) directory: + + ``` + mkdir output + mkdir log + ``` + + Your directory tree should look like this: + + ``` + ${POSE_ROOT} + ├── data + ├── experiments + ├── lib + ├── log + ├── models + ├── output + ├── tools + ├── README.md + └── requirements.txt + ``` + +7. Download pretrained models from our model zoo([GoogleDrive](https://drive.google.com/open?id=1bdXVmYrSynPLSk5lptvgyQ8fhziobD50) or [OneDrive](https://1drv.ms/f/s!AhIXJn_J-blW4AwKRMklXVzndJT0)) + ``` + ${POSE_ROOT} + `-- models + `-- pytorch + |-- imagenet + | `-- hrnet_w32-36af842e.pth + `-- pose_coco + `-- pose_higher_hrnet_w32_512.pth + + ``` + +### Data preparation + +**For COCO data**, please download from [COCO download](http://cocodataset.org/#download), 2017 Train/Val is needed for COCO keypoints training and validation. +Download and extract them under {POSE_ROOT}/data, and make them look like this: +``` +${POSE_ROOT} +|-- data +`-- |-- coco + `-- |-- annotations + | |-- person_keypoints_train2017.json + | `-- person_keypoints_val2017.json + `-- images + |-- train2017 + | |-- 000000000009.jpg + | |-- 000000000025.jpg + | |-- 000000000030.jpg + | |-- ... + `-- val2017 + |-- 000000000139.jpg + |-- 000000000285.jpg + |-- 000000000632.jpg + |-- ... +``` + +**For CrowdPose data**, please download from [CrowdPose download](https://github.com/Jeff-sjtu/CrowdPose#dataset), Train/Val is needed for CrowdPose keypoints training and validation. +Download and extract them under {POSE_ROOT}/data, and make them look like this: +``` +${POSE_ROOT} +|-- data +`-- |-- crowd_pose + `-- |-- json + | |-- crowdpose_train.json + | |-- crowdpose_val.json + | |-- crowdpose_trainval.json (generated by tools/crowdpose_concat_train_val.py) + | `-- crowdpose_test.json + `-- images + |-- 100000.jpg + |-- 100001.jpg + |-- 100002.jpg + |-- 100003.jpg + |-- 100004.jpg + |-- 100005.jpg + |-- ... +``` +After downloading data, run `python tools/crowdpose_concat_train_val.py` under `${POSE_ROOT}` to create trainval set. + +### Training and Testing + +#### Testing on COCO val2017 dataset using model zoo's models ([GoogleDrive](https://drive.google.com/drive/folders/1X9-TzWpwbX2zQf2To8lB-ZQHMYviYYh6?usp=sharing)) + + +For single-scale testing: + +``` +python tools/valid.py \ + --cfg experiments/coco/higher_hrnet/w32_512_adam_lr1e-3.yaml \ + TEST.MODEL_FILE models/pytorch/pose_coco/pose_higher_hrnet_w32_512.pth +``` + +By default, we use horizontal flip. To test without flip: + +``` +python tools/valid.py \ + --cfg experiments/coco/higher_hrnet/w32_512_adam_lr1e-3.yaml \ + TEST.MODEL_FILE models/pytorch/pose_coco/pose_higher_hrnet_w32_512.pth \ + TEST.FLIP_TEST False +``` + +Multi-scale testing is also supported, although we do not report results in our paper: + +``` +python tools/valid.py \ + --cfg experiments/coco/higher_hrnet/w32_512_adam_lr1e-3.yaml \ + TEST.MODEL_FILE models/pytorch/pose_coco/pose_higher_hrnet_w32_512.pth \ + TEST.SCALE_FACTOR '[0.5, 1.0, 2.0]' +``` + + +#### Training on COCO train2017 dataset + +``` +python tools/dist_train.py \ + --cfg experiments/coco/higher_hrnet/w32_512_adam_lr1e-3.yaml +``` + +By default, it will use all available GPUs on the machine for training. To specify GPUs, use + +``` +CUDA_VISIBLE_DEVICES=0,1 python tools/dist_train.py \ + --cfg experiments/coco/higher_hrnet/w32_512_adam_lr1e-3.yaml +``` + +#### Mixed-precision training +Due to large input size for bottom-up methods, we use mixed-precision training to train our Higher-HRNet by using the following command: +``` +python tools/dist_train.py \ + --cfg experiments/coco/higher_hrnet/w32_512_adam_lr1e-3.yaml \ + FP16.ENABLED True FP16.DYNAMIC_LOSS_SCALE True +``` + +#### Synchronized BatchNorm training +If you have limited GPU memory, please try to reduce batch size and use SyncBN to train our Higher-HRNet by using the following command: +``` +python tools/dist_train.py \ + --cfg experiments/coco/higher_hrnet/w32_512_adam_lr1e-3.yaml \ + FP16.ENABLED True FP16.DYNAMIC_LOSS_SCALE True \ + MODEL.SYNC_BN True +``` + +Our code for mixed-precision training is borrowed from [NVIDIA Apex API](https://github.com/NVIDIA/apex). + +#### Training on CrowdPose trainval dataset + +``` +python tools/dist_train.py \ + --cfg experiments/crowd_pose/higher_hrnet/w32_512_adam_lr1e-3.yaml +``` + + +### Other applications +Many other dense prediction tasks, such as segmentation, face alignment and object detection, etc. have been benefited by HRNet. More information can be found at [Deep High-Resolution Representation Learning](https://jingdongwang2017.github.io/Projects/HRNet/). + +### Other implementations +[mmpose](https://github.com/open-mmlab/mmpose) + +## Citation +If you find this work or code is helpful in your research, please cite: +```` +@inproceedings{cheng2020bottom, + title={HigherHRNet: Scale-Aware Representation Learning for Bottom-Up Human Pose Estimation}, + author={Bowen Cheng and Bin Xiao and Jingdong Wang and Honghui Shi and Thomas S. Huang and Lei Zhang}, + booktitle={CVPR}, + year={2020} +} + +@inproceedings{SunXLW19, + title={Deep High-Resolution Representation Learning for Human Pose Estimation}, + author={Ke Sun and Bin Xiao and Dong Liu and Jingdong Wang}, + booktitle={CVPR}, + year={2019} +} + +@article{wang2019deep, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Wang, Jingdong and Sun, Ke and Cheng, Tianheng and Jiang, Borui and Deng, Chaorui and Zhao, Yang and Liu, Dong and Mu, Yadong and Tan, Mingkui and Wang, Xinggang and Liu, Wenyu and Xiao, Bin}, + journal={TPAMI}, + year={2019} +} +```` + diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/coco/higher_hrnet/w32_512_adam_lr1e-3.yaml b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/coco/higher_hrnet/w32_512_adam_lr1e-3.yaml new file mode 100644 index 0000000..c202448 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/coco/higher_hrnet/w32_512_adam_lr1e-3.yaml @@ -0,0 +1,129 @@ +AUTO_RESUME: True +DATA_DIR: '' +GPUS: (0,) +LOG_DIR: log +OUTPUT_DIR: output +PRINT_FREQ: 100 +CUDNN: + BENCHMARK: True + DETERMINISTIC: False + ENABLED: True +DATASET: + SIGMA: 2 + DATASET: coco_kpt + DATASET_TEST: coco + DATA_FORMAT: jpg + FLIP: 0.5 + INPUT_SIZE: 512 + OUTPUT_SIZE: [128, 256] + MAX_NUM_PEOPLE: 30 + MAX_ROTATION: 30 + MAX_SCALE: 1.5 + SCALE_TYPE: 'short' + MAX_TRANSLATE: 40 + MIN_SCALE: 0.75 + NUM_JOINTS: 17 + ROOT: 'data/coco' + TEST: val2017 + TRAIN: train2017 +DEBUG: + DEBUG: True + SAVE_BATCH_IMAGES_GT: False + SAVE_BATCH_IMAGES_PRED: False + SAVE_HEATMAPS_GT: True + SAVE_HEATMAPS_PRED: True + SAVE_TAGMAPS_PRED: True +LOSS: + NUM_STAGES: 2 + AE_LOSS_TYPE: exp + WITH_AE_LOSS: [True, False] + PUSH_LOSS_FACTOR: [0.001, 0.001] + PULL_LOSS_FACTOR: [0.001, 0.001] + WITH_HEATMAPS_LOSS: [True, True] + HEATMAPS_LOSS_FACTOR: [1.0, 1.0] +MODEL: + EXTRA: + FINAL_CONV_KERNEL: 1 + PRETRAINED_LAYERS: ['*'] + STEM_INPLANES: 64 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + - 256 + FUSE_METHOD: SUM + DECONV: + NUM_DECONVS: 1 + NUM_CHANNELS: + - 32 + KERNEL_SIZE: + - 4 + NUM_BASIC_BLOCKS: 4 + CAT_OUTPUT: + - True + INIT_WEIGHTS: True + NAME: pose_higher_hrnet + NUM_JOINTS: 17 + PRETRAINED: 'models/pytorch/imagenet/hrnet_w32-36af842e.pth' + TAG_PER_JOINT: True +TEST: + FLIP_TEST: True + IMAGES_PER_GPU: 1 + MODEL_FILE: '' + SCALE_FACTOR: [1] + DETECTION_THRESHOLD: 0.1 + WITH_HEATMAPS: (True, True) + WITH_AE: (True, False) + PROJECT2IMAGE: True + NMS_KERNEL: 5 + NMS_PADDING: 2 +TRAIN: + BEGIN_EPOCH: 0 + CHECKPOINT: '' + END_EPOCH: 300 + GAMMA1: 0.99 + GAMMA2: 0.0 + IMAGES_PER_GPU: 12 + LR: 0.001 + LR_FACTOR: 0.1 + LR_STEP: [200, 260] + MOMENTUM: 0.9 + NESTEROV: False + OPTIMIZER: adam + RESUME: False + SHUFFLE: True + WD: 0.0001 +WORKERS: 4 diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/coco/higher_hrnet/w32_640_adam_lr1e-3.yaml b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/coco/higher_hrnet/w32_640_adam_lr1e-3.yaml new file mode 100644 index 0000000..d77e433 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/coco/higher_hrnet/w32_640_adam_lr1e-3.yaml @@ -0,0 +1,132 @@ +AUTO_RESUME: True +DATA_DIR: '' +GPUS: (0,) +LOG_DIR: log +OUTPUT_DIR: output +PRINT_FREQ: 100 +FP16: + ENABLED: True + DYNAMIC_LOSS_SCALE: True +CUDNN: + BENCHMARK: True + DETERMINISTIC: False + ENABLED: True +DATASET: + SIGMA: 2 + DATASET: coco_kpt + DATASET_TEST: coco + DATA_FORMAT: jpg + FLIP: 0.5 + INPUT_SIZE: 640 + OUTPUT_SIZE: [160, 320] + MAX_NUM_PEOPLE: 30 + MAX_ROTATION: 30 + MAX_SCALE: 1.5 + SCALE_TYPE: 'short' + MAX_TRANSLATE: 40 + MIN_SCALE: 0.75 + NUM_JOINTS: 17 + ROOT: 'data/coco' + TEST: val2017 + TRAIN: train2017 +DEBUG: + DEBUG: True + SAVE_BATCH_IMAGES_GT: False + SAVE_BATCH_IMAGES_PRED: False + SAVE_HEATMAPS_GT: True + SAVE_HEATMAPS_PRED: True + SAVE_TAGMAPS_PRED: True +LOSS: + NUM_STAGES: 2 + AE_LOSS_TYPE: exp + WITH_AE_LOSS: [True, False] + PUSH_LOSS_FACTOR: [0.001, 0.001] + PULL_LOSS_FACTOR: [0.001, 0.001] + WITH_HEATMAPS_LOSS: [True, True] + HEATMAPS_LOSS_FACTOR: [1.0, 1.0] +MODEL: + EXTRA: + FINAL_CONV_KERNEL: 1 + PRETRAINED_LAYERS: ['*'] + STEM_INPLANES: 64 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + - 256 + FUSE_METHOD: SUM + DECONV: + NUM_DECONVS: 1 + NUM_CHANNELS: + - 32 + KERNEL_SIZE: + - 4 + NUM_BASIC_BLOCKS: 4 + CAT_OUTPUT: + - True + INIT_WEIGHTS: True + NAME: pose_higher_hrnet + NUM_JOINTS: 17 + PRETRAINED: 'models/pytorch/imagenet/hrnet_w32-36af842e.pth' + TAG_PER_JOINT: True +TEST: + FLIP_TEST: True + IMAGES_PER_GPU: 1 + MODEL_FILE: '' + SCALE_FACTOR: [1] + DETECTION_THRESHOLD: 0.1 + WITH_HEATMAPS: (True, True) + WITH_AE: (True, False) + PROJECT2IMAGE: True + NMS_KERNEL: 5 + NMS_PADDING: 2 +TRAIN: + BEGIN_EPOCH: 0 + CHECKPOINT: '' + END_EPOCH: 300 + GAMMA1: 0.99 + GAMMA2: 0.0 + IMAGES_PER_GPU: 12 + LR: 0.001 + LR_FACTOR: 0.1 + LR_STEP: [200, 260] + MOMENTUM: 0.9 + NESTEROV: False + OPTIMIZER: adam + RESUME: False + SHUFFLE: True + WD: 0.0001 +WORKERS: 4 diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/coco/higher_hrnet/w48_640_adam_lr1e-3.yaml b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/coco/higher_hrnet/w48_640_adam_lr1e-3.yaml new file mode 100644 index 0000000..f259608 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/coco/higher_hrnet/w48_640_adam_lr1e-3.yaml @@ -0,0 +1,132 @@ +AUTO_RESUME: True +DATA_DIR: '' +GPUS: (0,) +LOG_DIR: log +OUTPUT_DIR: output +PRINT_FREQ: 100 +FP16: + ENABLED: True + DYNAMIC_LOSS_SCALE: True +CUDNN: + BENCHMARK: True + DETERMINISTIC: False + ENABLED: True +DATASET: + SIGMA: 2 + DATASET: coco_kpt + DATASET_TEST: coco + DATA_FORMAT: jpg + FLIP: 0.5 + INPUT_SIZE: 640 + OUTPUT_SIZE: [160, 320] + MAX_NUM_PEOPLE: 30 + MAX_ROTATION: 30 + MAX_SCALE: 1.5 + SCALE_TYPE: 'short' + MAX_TRANSLATE: 40 + MIN_SCALE: 0.75 + NUM_JOINTS: 17 + ROOT: 'data/coco' + TEST: val2017 + TRAIN: train2017 +DEBUG: + DEBUG: True + SAVE_BATCH_IMAGES_GT: False + SAVE_BATCH_IMAGES_PRED: False + SAVE_HEATMAPS_GT: True + SAVE_HEATMAPS_PRED: True + SAVE_TAGMAPS_PRED: True +LOSS: + NUM_STAGES: 2 + AE_LOSS_TYPE: exp + WITH_AE_LOSS: [True, False] + PUSH_LOSS_FACTOR: [0.001, 0.001] + PULL_LOSS_FACTOR: [0.001, 0.001] + WITH_HEATMAPS_LOSS: [True, True] + HEATMAPS_LOSS_FACTOR: [1.0, 1.0] +MODEL: + EXTRA: + FINAL_CONV_KERNEL: 1 + PRETRAINED_LAYERS: ['*'] + STEM_INPLANES: 64 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + - 384 + FUSE_METHOD: SUM + DECONV: + NUM_DECONVS: 1 + NUM_CHANNELS: + - 48 + KERNEL_SIZE: + - 4 + NUM_BASIC_BLOCKS: 4 + CAT_OUTPUT: + - True + INIT_WEIGHTS: True + NAME: pose_higher_hrnet + NUM_JOINTS: 17 + PRETRAINED: 'models/pytorch/imagenet/hrnet_w48-8ef0771d.pth' + TAG_PER_JOINT: True +TEST: + FLIP_TEST: True + IMAGES_PER_GPU: 1 + MODEL_FILE: '' + SCALE_FACTOR: [1] + DETECTION_THRESHOLD: 0.1 + WITH_HEATMAPS: (True, True) + WITH_AE: (True, False) + PROJECT2IMAGE: True + NMS_KERNEL: 5 + NMS_PADDING: 2 +TRAIN: + BEGIN_EPOCH: 0 + CHECKPOINT: '' + END_EPOCH: 300 + GAMMA1: 0.99 + GAMMA2: 0.0 + IMAGES_PER_GPU: 10 + LR: 0.001 + LR_FACTOR: 0.1 + LR_STEP: [200, 260] + MOMENTUM: 0.9 + NESTEROV: False + OPTIMIZER: adam + RESUME: False + SHUFFLE: True + WD: 0.0001 +WORKERS: 4 diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_512_adam_lr1e-3.yaml b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_512_adam_lr1e-3.yaml new file mode 100644 index 0000000..51cea9d --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_512_adam_lr1e-3.yaml @@ -0,0 +1,129 @@ +AUTO_RESUME: True +DATA_DIR: '' +GPUS: (0,) +LOG_DIR: log +OUTPUT_DIR: output +PRINT_FREQ: 100 +CUDNN: + BENCHMARK: True + DETERMINISTIC: False + ENABLED: True +DATASET: + SIGMA: 2 + DATASET: crowd_pose_kpt + DATASET_TEST: crowd_pose + DATA_FORMAT: jpg + FLIP: 0.5 + INPUT_SIZE: 512 + OUTPUT_SIZE: [128, 256] + MAX_NUM_PEOPLE: 30 + MAX_ROTATION: 30 + MAX_SCALE: 1.5 + SCALE_TYPE: 'short' + MAX_TRANSLATE: 40 + MIN_SCALE: 0.75 + NUM_JOINTS: 14 + ROOT: 'data/crowd_pose' + TEST: test + TRAIN: trainval +DEBUG: + DEBUG: True + SAVE_BATCH_IMAGES_GT: False + SAVE_BATCH_IMAGES_PRED: False + SAVE_HEATMAPS_GT: True + SAVE_HEATMAPS_PRED: True + SAVE_TAGMAPS_PRED: True +LOSS: + NUM_STAGES: 2 + AE_LOSS_TYPE: exp + WITH_AE_LOSS: [True, False] + PUSH_LOSS_FACTOR: [0.001, 0.001] + PULL_LOSS_FACTOR: [0.001, 0.001] + WITH_HEATMAPS_LOSS: [True, True] + HEATMAPS_LOSS_FACTOR: [1.0, 1.0] +MODEL: + EXTRA: + FINAL_CONV_KERNEL: 1 + PRETRAINED_LAYERS: ['*'] + STEM_INPLANES: 64 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + - 256 + FUSE_METHOD: SUM + DECONV: + NUM_DECONVS: 1 + NUM_CHANNELS: + - 32 + KERNEL_SIZE: + - 4 + NUM_BASIC_BLOCKS: 4 + CAT_OUTPUT: + - True + INIT_WEIGHTS: True + NAME: pose_higher_hrnet + NUM_JOINTS: 14 + PRETRAINED: 'models/pytorch/imagenet/hrnet_w32-36af842e.pth' + TAG_PER_JOINT: True +TEST: + FLIP_TEST: True + IMAGES_PER_GPU: 1 + MODEL_FILE: '' + SCALE_FACTOR: [1] + DETECTION_THRESHOLD: 0.1 + WITH_HEATMAPS: (True, True) + WITH_AE: (True, False) + PROJECT2IMAGE: True + NMS_KERNEL: 5 + NMS_PADDING: 2 +TRAIN: + BEGIN_EPOCH: 0 + CHECKPOINT: '' + END_EPOCH: 300 + GAMMA1: 0.99 + GAMMA2: 0.0 + IMAGES_PER_GPU: 12 + LR: 0.001 + LR_FACTOR: 0.1 + LR_STEP: [200, 260] + MOMENTUM: 0.9 + NESTEROV: False + OPTIMIZER: adam + RESUME: False + SHUFFLE: True + WD: 0.0001 +WORKERS: 4 diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_512_adam_lr1e-3_coco.yaml b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_512_adam_lr1e-3_coco.yaml new file mode 100644 index 0000000..3309275 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_512_adam_lr1e-3_coco.yaml @@ -0,0 +1,140 @@ +AUTO_RESUME: True +DATA_DIR: '' +GPUS: (0,) +LOG_DIR: log +OUTPUT_DIR: output +PRINT_FREQ: 100 +CUDNN: + BENCHMARK: True + DETERMINISTIC: False + ENABLED: True +DATASET: + SIGMA: 2 + DATASET: crowd_pose_kpt + DATASET_TEST: crowd_pose + DATA_FORMAT: jpg + FLIP: 0.5 + INPUT_SIZE: 512 + OUTPUT_SIZE: [128, 256] + MAX_NUM_PEOPLE: 30 + MAX_ROTATION: 30 + MAX_SCALE: 1.5 + SCALE_TYPE: 'short' + MAX_TRANSLATE: 40 + MIN_SCALE: 0.75 + NUM_JOINTS: 14 + ROOT: 'data/crowd_pose' + TEST: test + TRAIN: trainval +DEBUG: + DEBUG: True + SAVE_BATCH_IMAGES_GT: False + SAVE_BATCH_IMAGES_PRED: False + SAVE_HEATMAPS_GT: True + SAVE_HEATMAPS_PRED: True + SAVE_TAGMAPS_PRED: True +LOSS: + NUM_STAGES: 2 + AE_LOSS_TYPE: exp + WITH_AE_LOSS: [True, False] + PUSH_LOSS_FACTOR: [0.001, 0.001] + PULL_LOSS_FACTOR: [0.001, 0.001] + WITH_HEATMAPS_LOSS: [True, True] + HEATMAPS_LOSS_FACTOR: [1.0, 1.0] +MODEL: + EXTRA: + FINAL_CONV_KERNEL: 1 + PRETRAINED_LAYERS: + - 'conv1' + - 'bn1' + - 'conv2' + - 'bn2' + - 'layer1' + - 'transition1' + - 'stage2' + - 'transition2' + - 'stage3' + - 'transition3' + - 'stage4' + STEM_INPLANES: 64 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + - 256 + FUSE_METHOD: SUM + DECONV: + NUM_DECONVS: 1 + NUM_CHANNELS: + - 32 + KERNEL_SIZE: + - 4 + NUM_BASIC_BLOCKS: 4 + CAT_OUTPUT: + - True + INIT_WEIGHTS: True + NAME: pose_higher_hrnet + NUM_JOINTS: 14 + PRETRAINED: 'models/pytorch/pose_coco/pose_higher_hrnet_w32_512.pth' + TAG_PER_JOINT: True +TEST: + FLIP_TEST: True + IMAGES_PER_GPU: 1 + MODEL_FILE: '' + SCALE_FACTOR: [1] + DETECTION_THRESHOLD: 0.1 + WITH_HEATMAPS: (True, True) + WITH_AE: (True, False) + PROJECT2IMAGE: True + NMS_KERNEL: 5 + NMS_PADDING: 2 +TRAIN: + BEGIN_EPOCH: 0 + CHECKPOINT: '' + END_EPOCH: 300 + GAMMA1: 0.99 + GAMMA2: 0.0 + IMAGES_PER_GPU: 12 + LR: 0.001 + LR_FACTOR: 0.1 + LR_STEP: [200, 260] + MOMENTUM: 0.9 + NESTEROV: False + OPTIMIZER: adam + RESUME: False + SHUFFLE: True + WD: 0.0001 +WORKERS: 4 diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_512_adam_lr1e-3_syncbn.yaml b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_512_adam_lr1e-3_syncbn.yaml new file mode 100644 index 0000000..8da9464 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_512_adam_lr1e-3_syncbn.yaml @@ -0,0 +1,130 @@ +AUTO_RESUME: True +DATA_DIR: '' +GPUS: (0,) +LOG_DIR: log +OUTPUT_DIR: output +PRINT_FREQ: 100 +CUDNN: + BENCHMARK: True + DETERMINISTIC: False + ENABLED: True +DATASET: + SIGMA: 2 + DATASET: crowd_pose_kpt + DATASET_TEST: crowd_pose + DATA_FORMAT: jpg + FLIP: 0.5 + INPUT_SIZE: 512 + OUTPUT_SIZE: [128, 256] + MAX_NUM_PEOPLE: 30 + MAX_ROTATION: 30 + MAX_SCALE: 1.5 + SCALE_TYPE: 'short' + MAX_TRANSLATE: 40 + MIN_SCALE: 0.75 + NUM_JOINTS: 14 + ROOT: 'data/crowd_pose' + TEST: test + TRAIN: trainval +DEBUG: + DEBUG: True + SAVE_BATCH_IMAGES_GT: False + SAVE_BATCH_IMAGES_PRED: False + SAVE_HEATMAPS_GT: True + SAVE_HEATMAPS_PRED: True + SAVE_TAGMAPS_PRED: True +LOSS: + NUM_STAGES: 2 + AE_LOSS_TYPE: exp + WITH_AE_LOSS: [True, False] + PUSH_LOSS_FACTOR: [0.001, 0.001] + PULL_LOSS_FACTOR: [0.001, 0.001] + WITH_HEATMAPS_LOSS: [True, True] + HEATMAPS_LOSS_FACTOR: [1.0, 1.0] +MODEL: + EXTRA: + FINAL_CONV_KERNEL: 1 + PRETRAINED_LAYERS: ['*'] + STEM_INPLANES: 64 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + - 256 + FUSE_METHOD: SUM + DECONV: + NUM_DECONVS: 1 + NUM_CHANNELS: + - 32 + KERNEL_SIZE: + - 4 + NUM_BASIC_BLOCKS: 4 + CAT_OUTPUT: + - True + INIT_WEIGHTS: True + NAME: pose_higher_hrnet + NUM_JOINTS: 14 + PRETRAINED: 'models/pytorch/imagenet/hrnet_w32-36af842e.pth' + TAG_PER_JOINT: True + SYNC_BN: True +TEST: + FLIP_TEST: True + IMAGES_PER_GPU: 1 + MODEL_FILE: '' + SCALE_FACTOR: [1] + DETECTION_THRESHOLD: 0.1 + WITH_HEATMAPS: (True, True) + WITH_AE: (True, False) + PROJECT2IMAGE: True + NMS_KERNEL: 5 + NMS_PADDING: 2 +TRAIN: + BEGIN_EPOCH: 0 + CHECKPOINT: '' + END_EPOCH: 300 + GAMMA1: 0.99 + GAMMA2: 0.0 + IMAGES_PER_GPU: 12 + LR: 0.001 + LR_FACTOR: 0.1 + LR_STEP: [200, 260] + MOMENTUM: 0.9 + NESTEROV: False + OPTIMIZER: adam + RESUME: False + SHUFFLE: True + WD: 0.0001 +WORKERS: 4 diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_640_adam_lr1e-3.yaml b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_640_adam_lr1e-3.yaml new file mode 100644 index 0000000..0b278ec --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w32_640_adam_lr1e-3.yaml @@ -0,0 +1,132 @@ +AUTO_RESUME: True +DATA_DIR: '' +GPUS: (0,) +LOG_DIR: log +OUTPUT_DIR: output +PRINT_FREQ: 100 +FP16: + ENABLED: True + DYNAMIC_LOSS_SCALE: True +CUDNN: + BENCHMARK: True + DETERMINISTIC: False + ENABLED: True +DATASET: + SIGMA: 2 + DATASET: crowd_pose_kpt + DATASET_TEST: crowd_pose + DATA_FORMAT: jpg + FLIP: 0.5 + INPUT_SIZE: 640 + OUTPUT_SIZE: [160, 320] + MAX_NUM_PEOPLE: 30 + MAX_ROTATION: 30 + MAX_SCALE: 1.5 + SCALE_TYPE: 'short' + MAX_TRANSLATE: 40 + MIN_SCALE: 0.75 + NUM_JOINTS: 14 + ROOT: 'data/crowd_pose' + TEST: test + TRAIN: trainval +DEBUG: + DEBUG: True + SAVE_BATCH_IMAGES_GT: False + SAVE_BATCH_IMAGES_PRED: False + SAVE_HEATMAPS_GT: True + SAVE_HEATMAPS_PRED: True + SAVE_TAGMAPS_PRED: True +LOSS: + NUM_STAGES: 2 + AE_LOSS_TYPE: exp + WITH_AE_LOSS: [True, False] + PUSH_LOSS_FACTOR: [0.001, 0.001] + PULL_LOSS_FACTOR: [0.001, 0.001] + WITH_HEATMAPS_LOSS: [True, True] + HEATMAPS_LOSS_FACTOR: [1.0, 1.0] +MODEL: + EXTRA: + FINAL_CONV_KERNEL: 1 + PRETRAINED_LAYERS: ['*'] + STEM_INPLANES: 64 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 32 + - 64 + - 128 + - 256 + FUSE_METHOD: SUM + DECONV: + NUM_DECONVS: 1 + NUM_CHANNELS: + - 32 + KERNEL_SIZE: + - 4 + NUM_BASIC_BLOCKS: 4 + CAT_OUTPUT: + - True + INIT_WEIGHTS: True + NAME: pose_higher_hrnet + NUM_JOINTS: 14 + PRETRAINED: 'models/pytorch/imagenet/hrnet_w32-36af842e.pth' + TAG_PER_JOINT: True +TEST: + FLIP_TEST: True + IMAGES_PER_GPU: 1 + MODEL_FILE: '' + SCALE_FACTOR: [1] + DETECTION_THRESHOLD: 0.1 + WITH_HEATMAPS: (True, True) + WITH_AE: (True, False) + PROJECT2IMAGE: True + NMS_KERNEL: 5 + NMS_PADDING: 2 +TRAIN: + BEGIN_EPOCH: 0 + CHECKPOINT: '' + END_EPOCH: 300 + GAMMA1: 0.99 + GAMMA2: 0.0 + IMAGES_PER_GPU: 12 + LR: 0.001 + LR_FACTOR: 0.1 + LR_STEP: [200, 260] + MOMENTUM: 0.9 + NESTEROV: False + OPTIMIZER: adam + RESUME: False + SHUFFLE: True + WD: 0.0001 +WORKERS: 4 diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w48_640_adam_lr1e-3.yaml b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w48_640_adam_lr1e-3.yaml new file mode 100644 index 0000000..162941f --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/experiments/crowd_pose/higher_hrnet/w48_640_adam_lr1e-3.yaml @@ -0,0 +1,132 @@ +AUTO_RESUME: True +DATA_DIR: '' +GPUS: (0,) +LOG_DIR: log +OUTPUT_DIR: output +PRINT_FREQ: 100 +FP16: + ENABLED: True + DYNAMIC_LOSS_SCALE: True +CUDNN: + BENCHMARK: True + DETERMINISTIC: False + ENABLED: True +DATASET: + SIGMA: 2 + DATASET: crowd_pose_kpt + DATASET_TEST: crowd_pose + DATA_FORMAT: jpg + FLIP: 0.5 + INPUT_SIZE: 640 + OUTPUT_SIZE: [160, 320] + MAX_NUM_PEOPLE: 30 + MAX_ROTATION: 30 + MAX_SCALE: 1.5 + SCALE_TYPE: 'short' + MAX_TRANSLATE: 40 + MIN_SCALE: 0.75 + NUM_JOINTS: 14 + ROOT: 'data/crowd_pose' + TEST: test + TRAIN: trainval +DEBUG: + DEBUG: True + SAVE_BATCH_IMAGES_GT: False + SAVE_BATCH_IMAGES_PRED: False + SAVE_HEATMAPS_GT: True + SAVE_HEATMAPS_PRED: True + SAVE_TAGMAPS_PRED: True +LOSS: + NUM_STAGES: 2 + AE_LOSS_TYPE: exp + WITH_AE_LOSS: [True, False] + PUSH_LOSS_FACTOR: [0.001, 0.001] + PULL_LOSS_FACTOR: [0.001, 0.001] + WITH_HEATMAPS_LOSS: [True, True] + HEATMAPS_LOSS_FACTOR: [1.0, 1.0] +MODEL: + EXTRA: + FINAL_CONV_KERNEL: 1 + PRETRAINED_LAYERS: ['*'] + STEM_INPLANES: 64 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + - 384 + FUSE_METHOD: SUM + DECONV: + NUM_DECONVS: 1 + NUM_CHANNELS: + - 48 + KERNEL_SIZE: + - 4 + NUM_BASIC_BLOCKS: 4 + CAT_OUTPUT: + - True + INIT_WEIGHTS: True + NAME: pose_higher_hrnet + NUM_JOINTS: 14 + PRETRAINED: 'models/pytorch/imagenet/hrnet_w48-8ef0771d.pth' + TAG_PER_JOINT: True +TEST: + FLIP_TEST: True + IMAGES_PER_GPU: 1 + MODEL_FILE: '' + SCALE_FACTOR: [1] + DETECTION_THRESHOLD: 0.1 + WITH_HEATMAPS: (True, True) + WITH_AE: (True, False) + PROJECT2IMAGE: True + NMS_KERNEL: 5 + NMS_PADDING: 2 +TRAIN: + BEGIN_EPOCH: 0 + CHECKPOINT: '' + END_EPOCH: 300 + GAMMA1: 0.99 + GAMMA2: 0.0 + IMAGES_PER_GPU: 10 + LR: 0.001 + LR_FACTOR: 0.1 + LR_STEP: [200, 260] + MOMENTUM: 0.9 + NESTEROV: False + OPTIMIZER: adam + RESUME: False + SHUFFLE: True + WD: 0.0001 +WORKERS: 4 diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/figures/arch_v2.png b/data_processing/HigherHRNet-Human-Pose-Estimation/figures/arch_v2.png new file mode 100644 index 0000000..77aac93 Binary files /dev/null and b/data_processing/HigherHRNet-Human-Pose-Estimation/figures/arch_v2.png differ diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/config/__init__.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/config/__init__.py new file mode 100644 index 0000000..aeeb6ba --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/config/__init__.py @@ -0,0 +1,9 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (Bin.Xiao@microsoft.com) +# ------------------------------------------------------------------------------ + +from .default import _C as cfg +from .default import update_config +from .default import check_config diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/config/default.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/config/default.py new file mode 100644 index 0000000..0ecf29b --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/config/default.py @@ -0,0 +1,219 @@ + +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from yacs.config import CfgNode as CN + +from .models import MODEL_EXTRAS + + +_C = CN() + +_C.OUTPUT_DIR = '' +_C.LOG_DIR = '' +_C.DATA_DIR = '' +_C.GPUS = (0,) +_C.WORKERS = 4 +_C.PRINT_FREQ = 20 +_C.AUTO_RESUME = False +_C.PIN_MEMORY = True +_C.RANK = 0 +_C.VERBOSE = True +_C.DIST_BACKEND = 'nccl' +_C.MULTIPROCESSING_DISTRIBUTED = True + +# FP16 training params +_C.FP16 = CN() +_C.FP16.ENABLED = False +_C.FP16.STATIC_LOSS_SCALE = 1.0 +_C.FP16.DYNAMIC_LOSS_SCALE = False + +# Cudnn related params +_C.CUDNN = CN() +_C.CUDNN.BENCHMARK = True +_C.CUDNN.DETERMINISTIC = False +_C.CUDNN.ENABLED = True + +# common params for NETWORK +_C.MODEL = CN() +_C.MODEL.NAME = 'pose_multi_resolution_net_v16' +_C.MODEL.INIT_WEIGHTS = True +_C.MODEL.PRETRAINED = '' +_C.MODEL.NUM_JOINTS = 17 +_C.MODEL.TAG_PER_JOINT = True +_C.MODEL.EXTRA = CN(new_allowed=True) +_C.MODEL.SYNC_BN = False + +_C.LOSS = CN() +_C.LOSS.NUM_STAGES = 1 +_C.LOSS.WITH_HEATMAPS_LOSS = (True,) +_C.LOSS.HEATMAPS_LOSS_FACTOR = (1.0,) +_C.LOSS.WITH_AE_LOSS = (True,) +_C.LOSS.AE_LOSS_TYPE = 'max' +_C.LOSS.PUSH_LOSS_FACTOR = (0.001,) +_C.LOSS.PULL_LOSS_FACTOR = (0.001,) + +# DATASET related params +_C.DATASET = CN() +_C.DATASET.ROOT = '' +_C.DATASET.DATASET = 'coco_kpt' +_C.DATASET.DATASET_TEST = 'coco' +_C.DATASET.NUM_JOINTS = 17 +_C.DATASET.MAX_NUM_PEOPLE = 30 +_C.DATASET.TRAIN = 'train2017' +_C.DATASET.TEST = 'val2017' +_C.DATASET.DATA_FORMAT = 'jpg' + +# training data augmentation +_C.DATASET.MAX_ROTATION = 30 +_C.DATASET.MIN_SCALE = 0.75 +_C.DATASET.MAX_SCALE = 1.25 +_C.DATASET.SCALE_TYPE = 'short' +_C.DATASET.MAX_TRANSLATE = 40 +_C.DATASET.INPUT_SIZE = 512 +_C.DATASET.OUTPUT_SIZE = [128, 256, 512] +_C.DATASET.FLIP = 0.5 + +# heatmap generator (default is OUTPUT_SIZE/64) +_C.DATASET.SIGMA = -1 +_C.DATASET.SCALE_AWARE_SIGMA = False +_C.DATASET.BASE_SIZE = 256.0 +_C.DATASET.BASE_SIGMA = 2.0 +_C.DATASET.INT_SIGMA = False + +_C.DATASET.WITH_CENTER = False + +# train +_C.TRAIN = CN() + +_C.TRAIN.LR_FACTOR = 0.1 +_C.TRAIN.LR_STEP = [90, 110] +_C.TRAIN.LR = 0.001 + +_C.TRAIN.OPTIMIZER = 'adam' +_C.TRAIN.MOMENTUM = 0.9 +_C.TRAIN.WD = 0.0001 +_C.TRAIN.NESTEROV = False +_C.TRAIN.GAMMA1 = 0.99 +_C.TRAIN.GAMMA2 = 0.0 + +_C.TRAIN.BEGIN_EPOCH = 0 +_C.TRAIN.END_EPOCH = 140 + +_C.TRAIN.RESUME = False +_C.TRAIN.CHECKPOINT = '' + +_C.TRAIN.IMAGES_PER_GPU = 32 +_C.TRAIN.SHUFFLE = True + +# testing +_C.TEST = CN() + +# size of images for each device +# _C.TEST.BATCH_SIZE = 32 +_C.TEST.IMAGES_PER_GPU = 32 +# Test Model Epoch +_C.TEST.FLIP_TEST = False +_C.TEST.ADJUST = True +_C.TEST.REFINE = True +_C.TEST.SCALE_FACTOR = [1] +# group +_C.TEST.DETECTION_THRESHOLD = 0.2 +_C.TEST.TAG_THRESHOLD = 1. +_C.TEST.USE_DETECTION_VAL = True +_C.TEST.IGNORE_TOO_MUCH = False +_C.TEST.MODEL_FILE = '' +_C.TEST.IGNORE_CENTER = True +_C.TEST.NMS_KERNEL = 3 +_C.TEST.NMS_PADDING = 1 +_C.TEST.PROJECT2IMAGE = False + +_C.TEST.WITH_HEATMAPS = (True,) +_C.TEST.WITH_AE = (True,) + +_C.TEST.LOG_PROGRESS = False + +# debug +_C.DEBUG = CN() +_C.DEBUG.DEBUG = True +_C.DEBUG.SAVE_BATCH_IMAGES_GT = False +_C.DEBUG.SAVE_BATCH_IMAGES_PRED = False +_C.DEBUG.SAVE_HEATMAPS_GT = True +_C.DEBUG.SAVE_HEATMAPS_PRED = True +_C.DEBUG.SAVE_TAGMAPS_PRED = True + + +def update_config(cfg, args): + cfg.defrost() + cfg.merge_from_file(args.cfg) + cfg.merge_from_list(args.opts) + + if not os.path.exists(cfg.DATASET.ROOT): + cfg.DATASET.ROOT = os.path.join( + cfg.DATA_DIR, cfg.DATASET.ROOT + ) + + cfg.MODEL.PRETRAINED = os.path.join( + cfg.DATA_DIR, cfg.MODEL.PRETRAINED + ) + + if cfg.TEST.MODEL_FILE: + cfg.TEST.MODEL_FILE = os.path.join( + cfg.DATA_DIR, cfg.TEST.MODEL_FILE + ) + + if cfg.DATASET.WITH_CENTER: + cfg.DATASET.NUM_JOINTS += 1 + cfg.MODEL.NUM_JOINTS = cfg.DATASET.NUM_JOINTS + + if not isinstance(cfg.DATASET.OUTPUT_SIZE, (list, tuple)): + cfg.DATASET.OUTPUT_SIZE = [cfg.DATASET.OUTPUT_SIZE] + if not isinstance(cfg.LOSS.WITH_HEATMAPS_LOSS, (list, tuple)): + cfg.LOSS.WITH_HEATMAPS_LOSS = (cfg.LOSS.WITH_HEATMAPS_LOSS) + + if not isinstance(cfg.LOSS.HEATMAPS_LOSS_FACTOR, (list, tuple)): + cfg.LOSS.HEATMAPS_LOSS_FACTOR = (cfg.LOSS.HEATMAPS_LOSS_FACTOR) + + if not isinstance(cfg.LOSS.WITH_AE_LOSS, (list, tuple)): + cfg.LOSS.WITH_AE_LOSS = (cfg.LOSS.WITH_AE_LOSS) + + if not isinstance(cfg.LOSS.PUSH_LOSS_FACTOR, (list, tuple)): + cfg.LOSS.PUSH_LOSS_FACTOR = (cfg.LOSS.PUSH_LOSS_FACTOR) + + if not isinstance(cfg.LOSS.PULL_LOSS_FACTOR, (list, tuple)): + cfg.LOSS.PULL_LOSS_FACTOR = (cfg.LOSS.PULL_LOSS_FACTOR) + + cfg.freeze() + + +def check_config(cfg): + assert cfg.LOSS.NUM_STAGES == len(cfg.LOSS.WITH_HEATMAPS_LOSS), \ + 'LOSS.NUM_SCALE should be the same as the length of LOSS.WITH_HEATMAPS_LOSS' + assert cfg.LOSS.NUM_STAGES == len(cfg.LOSS.HEATMAPS_LOSS_FACTOR), \ + 'LOSS.NUM_SCALE should be the same as the length of LOSS.HEATMAPS_LOSS_FACTOR' + assert cfg.LOSS.NUM_STAGES == len(cfg.LOSS.WITH_AE_LOSS), \ + 'LOSS.NUM_SCALE should be the same as the length of LOSS.WITH_AE_LOSS' + assert cfg.LOSS.NUM_STAGES == len(cfg.LOSS.PUSH_LOSS_FACTOR), \ + 'LOSS.NUM_SCALE should be the same as the length of LOSS.PUSH_LOSS_FACTOR' + assert cfg.LOSS.NUM_STAGES == len(cfg.LOSS.PULL_LOSS_FACTOR), \ + 'LOSS.NUM_SCALE should be the same as the length of LOSS.PULL_LOSS_FACTOR' + assert cfg.LOSS.NUM_STAGES == len(cfg.TEST.WITH_HEATMAPS), \ + 'LOSS.NUM_SCALE should be the same as the length of TEST.WITH_HEATMAPS' + assert cfg.LOSS.NUM_STAGES == len(cfg.TEST.WITH_AE), \ + 'LOSS.NUM_SCALE should be the same as the length of TEST.WITH_AE' + + +if __name__ == '__main__': + import sys + with open(sys.argv[1], 'w') as f: + print(_C, file=f) diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/config/models.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/config/models.py new file mode 100644 index 0000000..f8d258f --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/config/models.py @@ -0,0 +1,62 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from yacs.config import CfgNode as CN + + +# pose_multi_resoluton_net related params +POSE_HIGHER_RESOLUTION_NET = CN() +POSE_HIGHER_RESOLUTION_NET.PRETRAINED_LAYERS = ['*'] +POSE_HIGHER_RESOLUTION_NET.STEM_INPLANES = 64 +POSE_HIGHER_RESOLUTION_NET.FINAL_CONV_KERNEL = 1 + +POSE_HIGHER_RESOLUTION_NET.STAGE1 = CN() +POSE_HIGHER_RESOLUTION_NET.STAGE1.NUM_MODULES = 1 +POSE_HIGHER_RESOLUTION_NET.STAGE1.NUM_BRANCHES = 1 +POSE_HIGHER_RESOLUTION_NET.STAGE1.NUM_BLOCKS = [4] +POSE_HIGHER_RESOLUTION_NET.STAGE1.NUM_CHANNELS = [64] +POSE_HIGHER_RESOLUTION_NET.STAGE1.BLOCK = 'BOTTLENECK' +POSE_HIGHER_RESOLUTION_NET.STAGE1.FUSE_METHOD = 'SUM' + +POSE_HIGHER_RESOLUTION_NET.STAGE2 = CN() +POSE_HIGHER_RESOLUTION_NET.STAGE2.NUM_MODULES = 1 +POSE_HIGHER_RESOLUTION_NET.STAGE2.NUM_BRANCHES = 2 +POSE_HIGHER_RESOLUTION_NET.STAGE2.NUM_BLOCKS = [4, 4] +POSE_HIGHER_RESOLUTION_NET.STAGE2.NUM_CHANNELS = [24, 48] +POSE_HIGHER_RESOLUTION_NET.STAGE2.BLOCK = 'BOTTLENECK' +POSE_HIGHER_RESOLUTION_NET.STAGE2.FUSE_METHOD = 'SUM' + +POSE_HIGHER_RESOLUTION_NET.STAGE3 = CN() +POSE_HIGHER_RESOLUTION_NET.STAGE3.NUM_MODULES = 1 +POSE_HIGHER_RESOLUTION_NET.STAGE3.NUM_BRANCHES = 3 +POSE_HIGHER_RESOLUTION_NET.STAGE3.NUM_BLOCKS = [4, 4, 4] +POSE_HIGHER_RESOLUTION_NET.STAGE3.NUM_CHANNELS = [24, 48, 92] +POSE_HIGHER_RESOLUTION_NET.STAGE3.BLOCK = 'BOTTLENECK' +POSE_HIGHER_RESOLUTION_NET.STAGE3.FUSE_METHOD = 'SUM' + +POSE_HIGHER_RESOLUTION_NET.STAGE4 = CN() +POSE_HIGHER_RESOLUTION_NET.STAGE4.NUM_MODULES = 1 +POSE_HIGHER_RESOLUTION_NET.STAGE4.NUM_BRANCHES = 4 +POSE_HIGHER_RESOLUTION_NET.STAGE4.NUM_BLOCKS = [4, 4, 4, 4] +POSE_HIGHER_RESOLUTION_NET.STAGE4.NUM_CHANNELS = [24, 48, 92, 192] +POSE_HIGHER_RESOLUTION_NET.STAGE4.BLOCK = 'BOTTLENECK' +POSE_HIGHER_RESOLUTION_NET.STAGE4.FUSE_METHOD = 'SUM' + +POSE_HIGHER_RESOLUTION_NET.DECONV = CN() +POSE_HIGHER_RESOLUTION_NET.DECONV.NUM_DCONVS = 2 +POSE_HIGHER_RESOLUTION_NET.DECONV.NUM_CHANNELS = [32, 32] +POSE_HIGHER_RESOLUTION_NET.DECONV.NUM_BASIC_BLOCKS = 4 +POSE_HIGHER_RESOLUTION_NET.DECONV.KERNEL_SIZE = [2, 2] +POSE_HIGHER_RESOLUTION_NET.DECONV.CAT_OUTPUT = [True, True] + + +MODEL_EXTRAS = { + 'pose_multi_resolution_net_v16': POSE_HIGHER_RESOLUTION_NET, +} diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/group.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/group.py new file mode 100644 index 0000000..f09be8f --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/group.py @@ -0,0 +1,283 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Some code is from https://github.com/princeton-vl/pose-ae-train/blob/454d4ba113bbb9775d4dc259ef5e6c07c2ceed54/utils/group.py +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from munkres import Munkres +import numpy as np +import torch + + +def py_max_match(scores): + m = Munkres() + tmp = m.compute(scores) + tmp = np.array(tmp).astype(np.int32) + return tmp + + +def match_by_tag(inp, params): + assert isinstance(params, Params), 'params should be class Params()' + + tag_k, loc_k, val_k = inp + default_ = np.zeros((params.num_joints, 3 + tag_k.shape[2])) + + joint_dict = {} + tag_dict = {} + for i in range(params.num_joints): + idx = params.joint_order[i] + + tags = tag_k[idx] + joints = np.concatenate( + (loc_k[idx], val_k[idx, :, None], tags), 1 + ) + mask = joints[:, 2] > params.detection_threshold + tags = tags[mask] + joints = joints[mask] + + if joints.shape[0] == 0: + continue + + if i == 0 or len(joint_dict) == 0: + for tag, joint in zip(tags, joints): + key = tag[0] + joint_dict.setdefault(key, np.copy(default_))[idx] = joint + tag_dict[key] = [tag] + else: + grouped_keys = list(joint_dict.keys())[:params.max_num_people] + grouped_tags = [np.mean(tag_dict[i], axis=0) for i in grouped_keys] + + if params.ignore_too_much \ + and len(grouped_keys) == params.max_num_people: + continue + + diff = joints[:, None, 3:] - np.array(grouped_tags)[None, :, :] + diff_normed = np.linalg.norm(diff, ord=2, axis=2) + diff_saved = np.copy(diff_normed) + + if params.use_detection_val: + diff_normed = np.round(diff_normed) * 100 - joints[:, 2:3] + + num_added = diff.shape[0] + num_grouped = diff.shape[1] + + if num_added > num_grouped: + diff_normed = np.concatenate( + ( + diff_normed, + np.zeros((num_added, num_added-num_grouped))+1e10 + ), + axis=1 + ) + + pairs = py_max_match(diff_normed) + for row, col in pairs: + if ( + row < num_added + and col < num_grouped + and diff_saved[row][col] < params.tag_threshold + ): + key = grouped_keys[col] + joint_dict[key][idx] = joints[row] + tag_dict[key].append(tags[row]) + else: + key = tags[row][0] + joint_dict.setdefault(key, np.copy(default_))[idx] = \ + joints[row] + tag_dict[key] = [tags[row]] + + ans = np.array([joint_dict[i] for i in joint_dict]).astype(np.float32) + return ans + + +class Params(object): + def __init__(self, cfg): + self.num_joints = cfg.DATASET.NUM_JOINTS + self.max_num_people = cfg.DATASET.MAX_NUM_PEOPLE + + self.detection_threshold = cfg.TEST.DETECTION_THRESHOLD + self.tag_threshold = cfg.TEST.TAG_THRESHOLD + self.use_detection_val = cfg.TEST.USE_DETECTION_VAL + self.ignore_too_much = cfg.TEST.IGNORE_TOO_MUCH + + if cfg.DATASET.WITH_CENTER and cfg.TEST.IGNORE_CENTER: + self.num_joints -= 1 + + if cfg.DATASET.WITH_CENTER and not cfg.TEST.IGNORE_CENTER: + self.joint_order = [ + i-1 for i in [18, 1, 2, 3, 4, 5, 6, 7, 12, 13, 8, 9, 10, 11, 14, 15, 16, 17] + ] + else: + self.joint_order = [ + i-1 for i in [1, 2, 3, 4, 5, 6, 7, 12, 13, 8, 9, 10, 11, 14, 15, 16, 17] + ] + + +class HeatmapParser(object): + def __init__(self, cfg): + self.params = Params(cfg) + self.tag_per_joint = cfg.MODEL.TAG_PER_JOINT + self.pool = torch.nn.MaxPool2d( + cfg.TEST.NMS_KERNEL, 1, cfg.TEST.NMS_PADDING + ) + + def nms(self, det): + maxm = self.pool(det) + maxm = torch.eq(maxm, det).float() + det = det * maxm + return det + + def match(self, tag_k, loc_k, val_k): + match = lambda x: match_by_tag(x, self.params) + return list(map(match, zip(tag_k, loc_k, val_k))) + + def top_k(self, det, tag): + # det = torch.Tensor(det, requires_grad=False) + # tag = torch.Tensor(tag, requires_grad=False) + + det = self.nms(det) + num_images = det.size(0) + num_joints = det.size(1) + h = det.size(2) + w = det.size(3) + det = det.view(num_images, num_joints, -1) + val_k, ind = det.topk(self.params.max_num_people, dim=2) + + tag = tag.view(tag.size(0), tag.size(1), w*h, -1) + if not self.tag_per_joint: + tag = tag.expand(-1, self.params.num_joints, -1, -1) + + tag_k = torch.stack( + [ + torch.gather(tag[:, :, :, i], 2, ind) + for i in range(tag.size(3)) + ], + dim=3 + ) + + x = ind % w + y = (ind / w).long() + + ind_k = torch.stack((x, y), dim=3) + + ans = { + 'tag_k': tag_k.cpu().numpy(), + 'loc_k': ind_k.cpu().numpy(), + 'val_k': val_k.cpu().numpy() + } + + return ans + + def adjust(self, ans, det): + for batch_id, people in enumerate(ans): + for people_id, i in enumerate(people): + for joint_id, joint in enumerate(i): + if joint[2] > 0: + y, x = joint[0:2] + xx, yy = int(x), int(y) + #print(batch_id, joint_id, det[batch_id].shape) + tmp = det[batch_id][joint_id] + if tmp[xx, min(yy+1, tmp.shape[1]-1)] > tmp[xx, max(yy-1, 0)]: + y += 0.25 + else: + y -= 0.25 + + if tmp[min(xx+1, tmp.shape[0]-1), yy] > tmp[max(0, xx-1), yy]: + x += 0.25 + else: + x -= 0.25 + ans[batch_id][people_id, joint_id, 0:2] = (y+0.5, x+0.5) + return ans + + def refine(self, det, tag, keypoints): + """ + Given initial keypoint predictions, we identify missing joints + :param det: numpy.ndarray of size (17, 128, 128) + :param tag: numpy.ndarray of size (17, 128, 128) if not flip + :param keypoints: numpy.ndarray of size (17, 4) if not flip, last dim is (x, y, det score, tag score) + :return: + """ + if len(tag.shape) == 3: + # tag shape: (17, 128, 128, 1) + tag = tag[:, :, :, None] + + tags = [] + for i in range(keypoints.shape[0]): + if keypoints[i, 2] > 0: + # save tag value of detected keypoint + x, y = keypoints[i][:2].astype(np.int32) + tags.append(tag[i, y, x]) + + # mean tag of current detected people + prev_tag = np.mean(tags, axis=0) + ans = [] + + for i in range(keypoints.shape[0]): + # score of joints i at all position + tmp = det[i, :, :] + # distance of all tag values with mean tag of current detected people + tt = (((tag[i, :, :] - prev_tag[None, None, :]) ** 2).sum(axis=2) ** 0.5) + tmp2 = tmp - np.round(tt) + + # find maximum position + y, x = np.unravel_index(np.argmax(tmp2), tmp.shape) + xx = x + yy = y + # detection score at maximum position + val = tmp[y, x] + # offset by 0.5 + x += 0.5 + y += 0.5 + + # add a quarter offset + if tmp[yy, min(xx + 1, tmp.shape[1] - 1)] > tmp[yy, max(xx - 1, 0)]: + x += 0.25 + else: + x -= 0.25 + + if tmp[min(yy + 1, tmp.shape[0] - 1), xx] > tmp[max(0, yy - 1), xx]: + y += 0.25 + else: + y -= 0.25 + + ans.append((x, y, val)) + ans = np.array(ans) + + if ans is not None: + for i in range(det.shape[0]): + # add keypoint if it is not detected + if ans[i, 2] > 0 and keypoints[i, 2] == 0: + # if ans[i, 2] > 0.01 and keypoints[i, 2] == 0: + keypoints[i, :2] = ans[i, :2] + keypoints[i, 2] = ans[i, 2] + + return keypoints + + def parse(self, det, tag, adjust=True, refine=True): + ans = self.match(**self.top_k(det, tag)) + + if adjust: + ans = self.adjust(ans, det) + + scores = [i[:, 2].mean() for i in ans[0]] + + if refine: + ans = ans[0] + # for every detected person + for i in range(len(ans)): + det_numpy = det[0].cpu().numpy() + tag_numpy = tag[0].cpu().numpy() + if not self.tag_per_joint: + tag_numpy = np.tile( + tag_numpy, (self.params.num_joints, 1, 1, 1) + ) + ans[i] = self.refine(det_numpy, tag_numpy, ans[i]) + ans = [ans] + + return ans, scores diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/inference.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/inference.py new file mode 100644 index 0000000..fbc427e --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/inference.py @@ -0,0 +1,208 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import torch + +from dataset.transforms import FLIP_CONFIG + + +def get_outputs( + cfg, model, image, with_flip=False, + project2image=False, size_projected=None +): + outputs = [] + heatmaps = [] + tags = [] + + outputs.append(model(image)) + heatmaps.append(outputs[-1][:, :cfg.DATASET.NUM_JOINTS]) + tags.append(outputs[-1][:, cfg.DATASET.NUM_JOINTS:]) + + if with_flip: + outputs.append(model(torch.flip(image, [3]))) + outputs[-1] = torch.flip(outputs[-1], [3]) + heatmaps.append(outputs[-1][:, :cfg.DATASET.NUM_JOINTS]) + tags.append(outputs[-1][:, cfg.DATASET.NUM_JOINTS:]) + if 'coco' in cfg.DATASET.DATASET: + dataset_name = 'COCO' + elif 'crowd_pose' in cfg.DATASET.DATASET: + dataset_name = 'CROWDPOSE' + else: + raise ValueError('Please implement flip_index for new dataset: %s.' % cfg.DATASET.DATASET) + flip_index = FLIP_CONFIG[dataset_name + '_WITH_CENTER'] \ + if cfg.DATASET.WITH_CENTER else FLIP_CONFIG[dataset_name] + heatmaps[-1] = heatmaps[-1][:, flip_index, :, :] + if cfg.MODEL.TAG_PER_JOINT: + tags[-1] = tags[-1][:, flip_index, :, :] + + if cfg.DATASET.WITH_CENTER and cfg.TEST.IGNORE_CENTER: + heatmaps = [hms[:, :-1] for hms in heatmaps] + tags = [tms[:, :-1] for tms in tags] + + if project2image and size_projected: + heatmaps = [ + torch.nn.functional.interpolate( + hms, + size=(size_projected[1], size_projected[0]), + mode='bilinear', + align_corners=False + ) + for hms in heatmaps + ] + + tags = [ + torch.nn.functional.interpolate( + tms, + size=(size_projected[1], size_projected[0]), + mode='bilinear', + align_corners=False + ) + for tms in tags + ] + + return outputs, heatmaps, tags + + +def get_multi_stage_outputs( + cfg, model, image, with_flip=False, + project2image=False, size_projected=None +): + # outputs = [] + heatmaps_avg = 0 + num_heatmaps = 0 + heatmaps = [] + tags = [] + + outputs = model(image) + for i, output in enumerate(outputs): + if len(outputs) > 1 and i != len(outputs) - 1: + output = torch.nn.functional.interpolate( + output, + size=(outputs[-1].size(2), outputs[-1].size(3)), + mode='bilinear', + align_corners=False + ) + + offset_feat = cfg.DATASET.NUM_JOINTS \ + if cfg.LOSS.WITH_HEATMAPS_LOSS[i] else 0 + + if cfg.LOSS.WITH_HEATMAPS_LOSS[i] and cfg.TEST.WITH_HEATMAPS[i]: + heatmaps_avg += output[:, :cfg.DATASET.NUM_JOINTS] + num_heatmaps += 1 + + if cfg.LOSS.WITH_AE_LOSS[i] and cfg.TEST.WITH_AE[i]: + tags.append(output[:, offset_feat:]) + + if num_heatmaps > 0: + heatmaps.append(heatmaps_avg/num_heatmaps) + + if with_flip: + if 'coco' in cfg.DATASET.DATASET: + dataset_name = 'COCO' + elif 'crowd_pose' in cfg.DATASET.DATASET: + dataset_name = 'CROWDPOSE' + else: + raise ValueError('Please implement flip_index for new dataset: %s.' % cfg.DATASET.DATASET) + flip_index = FLIP_CONFIG[dataset_name + '_WITH_CENTER'] \ + if cfg.DATASET.WITH_CENTER else FLIP_CONFIG[dataset_name] + + heatmaps_avg = 0 + num_heatmaps = 0 + outputs_flip = model(torch.flip(image, [3])) + for i in range(len(outputs_flip)): + output = outputs_flip[i] + if len(outputs_flip) > 1 and i != len(outputs_flip) - 1: + output = torch.nn.functional.interpolate( + output, + size=(outputs_flip[-1].size(2), outputs_flip[-1].size(3)), + mode='bilinear', + align_corners=False + ) + output = torch.flip(output, [3]) + outputs.append(output) + + offset_feat = cfg.DATASET.NUM_JOINTS \ + if cfg.LOSS.WITH_HEATMAPS_LOSS[i] else 0 + + if cfg.LOSS.WITH_HEATMAPS_LOSS[i] and cfg.TEST.WITH_HEATMAPS[i]: + heatmaps_avg += \ + output[:, :cfg.DATASET.NUM_JOINTS][:, flip_index, :, :] + num_heatmaps += 1 + + if cfg.LOSS.WITH_AE_LOSS[i] and cfg.TEST.WITH_AE[i]: + tags.append(output[:, offset_feat:]) + if cfg.MODEL.TAG_PER_JOINT: + tags[-1] = tags[-1][:, flip_index, :, :] + + heatmaps.append(heatmaps_avg/num_heatmaps) + + if cfg.DATASET.WITH_CENTER and cfg.TEST.IGNORE_CENTER: + heatmaps = [hms[:, :-1] for hms in heatmaps] + tags = [tms[:, :-1] for tms in tags] + + if project2image and size_projected: + heatmaps = [ + torch.nn.functional.interpolate( + hms, + size=(size_projected[1], size_projected[0]), + mode='bilinear', + align_corners=False + ) + for hms in heatmaps + ] + + tags = [ + torch.nn.functional.interpolate( + tms, + size=(size_projected[1], size_projected[0]), + mode='bilinear', + align_corners=False + ) + for tms in tags + ] + + return outputs, heatmaps, tags + + +def aggregate_results( + cfg, scale_factor, final_heatmaps, tags_list, heatmaps, tags +): + if scale_factor == 1 or len(cfg.TEST.SCALE_FACTOR) == 1: + if final_heatmaps is not None and not cfg.TEST.PROJECT2IMAGE: + tags = [ + torch.nn.functional.interpolate( + tms, + size=(final_heatmaps.size(2), final_heatmaps.size(3)), + mode='bilinear', + align_corners=False + ) + for tms in tags + ] + for tms in tags: + tags_list.append(torch.unsqueeze(tms, dim=4)) + + heatmaps_avg = (heatmaps[0] + heatmaps[1])/2.0 if cfg.TEST.FLIP_TEST \ + else heatmaps[0] + + if final_heatmaps is None: + final_heatmaps = heatmaps_avg + elif cfg.TEST.PROJECT2IMAGE: + final_heatmaps += heatmaps_avg + else: + final_heatmaps += torch.nn.functional.interpolate( + heatmaps_avg, + size=(final_heatmaps.size(2), final_heatmaps.size(3)), + mode='bilinear', + align_corners=False + ) + + return final_heatmaps, tags_list diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/loss.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/loss.py new file mode 100644 index 0000000..ab580bb --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/loss.py @@ -0,0 +1,324 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +import torch +import torch.nn as nn + + +logger = logging.getLogger(__name__) + + +def make_input(t, requires_grad=False, need_cuda=True): + inp = torch.autograd.Variable(t, requires_grad=requires_grad) + inp = inp.sum() + if need_cuda: + inp = inp.cuda() + return inp + + +class HeatmapLoss(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, pred, gt, mask): + assert pred.size() == gt.size() + loss = ((pred - gt)**2) * mask[:, None, :, :].expand_as(pred) + loss = loss.mean(dim=3).mean(dim=2).mean(dim=1) + # loss = loss.mean(dim=3).mean(dim=2).sum(dim=1) + return loss + + +class AELoss(nn.Module): + def __init__(self, loss_type): + super().__init__() + self.loss_type = loss_type + + def singleTagLoss(self, pred_tag, joints): + """ + associative embedding loss for one image + """ + tags = [] + pull = 0 + for joints_per_person in joints: + tmp = [] + for joint in joints_per_person: + if joint[1] > 0: + tmp.append(pred_tag[joint[0]]) + if len(tmp) == 0: + continue + tmp = torch.stack(tmp) + tags.append(torch.mean(tmp, dim=0)) + pull = pull + torch.mean((tmp - tags[-1].expand_as(tmp))**2) + + num_tags = len(tags) + if num_tags == 0: + return make_input(torch.zeros(1).float()), \ + make_input(torch.zeros(1).float()) + elif num_tags == 1: + return make_input(torch.zeros(1).float()), \ + pull/(num_tags) + + tags = torch.stack(tags) + + size = (num_tags, num_tags) + A = tags.expand(*size) + B = A.permute(1, 0) + + diff = A - B + + if self.loss_type == 'exp': + diff = torch.pow(diff, 2) + push = torch.exp(-diff) + push = torch.sum(push) - num_tags + elif self.loss_type == 'max': + diff = 1 - torch.abs(diff) + push = torch.clamp(diff, min=0).sum() - num_tags + else: + raise ValueError('Unkown ae loss type') + + return push/((num_tags - 1) * num_tags) * 0.5, \ + pull/(num_tags) + + def forward(self, tags, joints): + """ + accumulate the tag loss for each image in the batch + """ + pushes, pulls = [], [] + joints = joints.cpu().data.numpy() + batch_size = tags.size(0) + for i in range(batch_size): + push, pull = self.singleTagLoss(tags[i], joints[i]) + pushes.append(push) + pulls.append(pull) + return torch.stack(pushes), torch.stack(pulls) + + +class JointsMSELoss(nn.Module): + def __init__(self, use_target_weight): + super(JointsMSELoss, self).__init__() + self.criterion = nn.MSELoss(size_average=True) + self.use_target_weight = use_target_weight + + def forward(self, output, target, target_weight): + batch_size = output.size(0) + num_joints = output.size(1) + heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1) + heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1) + loss = 0 + + for idx in range(num_joints): + heatmap_pred = heatmaps_pred[idx].squeeze() + heatmap_gt = heatmaps_gt[idx].squeeze() + if self.use_target_weight: + loss += 0.5 * self.criterion( + heatmap_pred.mul(target_weight[:, idx]), + heatmap_gt.mul(target_weight[:, idx]) + ) + else: + loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt) + + return loss / num_joints + + +class LossFactory(nn.Module): + def __init__(self, cfg): + super().__init__() + self.num_joints = cfg.DATASET.NUM_JOINTS + self.heatmaps_loss = None + self.ae_loss = None + self.heatmaps_loss_factor = 1.0 + self.push_loss_factor = 1.0 + self.pull_loss_factor = 1.0 + + if cfg.LOSS.WITH_HEATMAPS_LOSS: + self.heatmaps_loss = HeatmapLoss() + self.heatmaps_loss_factor = cfg.LOSS.HEATMAPS_LOSS_FACTOR + if cfg.LOSS.WITH_AE_LOSS: + self.ae_loss = AELoss(cfg.LOSS.AE_LOSS_TYPE) + self.push_loss_factor = cfg.LOSS.PUSH_LOSS_FACTOR + self.pull_loss_factor = cfg.LOSS.PULL_LOSS_FACTOR + + if not self.heatmaps_loss and not self.ae_loss: + logger.error('At least enable one loss!') + + def forward(self, outputs, heatmaps, masks, joints): + # TODO(bowen): outputs and heatmaps can be lists of same length + heatmaps_pred = outputs[:, :self.num_joints] + tags_pred = outputs[:, self.num_joints:] + + heatmaps_loss = None + push_loss = None + pull_loss = None + + if self.heatmaps_loss is not None: + heatmaps_loss = self.heatmaps_loss(heatmaps_pred, heatmaps, masks) + heatmaps_loss = heatmaps_loss * self.heatmaps_loss_factor + + if self.ae_loss is not None: + batch_size = tags_pred.size()[0] + tags_pred = tags_pred.contiguous().view(batch_size, -1, 1) + + push_loss, pull_loss = self.ae_loss(tags_pred, joints) + push_loss = push_loss * self.push_loss_factor + pull_loss = pull_loss * self.pull_loss_factor + + return [heatmaps_loss], [push_loss], [pull_loss] + + +class MultiLossFactory(nn.Module): + def __init__(self, cfg): + super().__init__() + # init check + self._init_check(cfg) + + self.num_joints = cfg.MODEL.NUM_JOINTS + self.num_stages = cfg.LOSS.NUM_STAGES + + self.heatmaps_loss = \ + nn.ModuleList( + [ + HeatmapLoss() + if with_heatmaps_loss else None + for with_heatmaps_loss in cfg.LOSS.WITH_HEATMAPS_LOSS + ] + ) + self.heatmaps_loss_factor = cfg.LOSS.HEATMAPS_LOSS_FACTOR + + self.ae_loss = \ + nn.ModuleList( + [ + AELoss(cfg.LOSS.AE_LOSS_TYPE) if with_ae_loss else None + for with_ae_loss in cfg.LOSS.WITH_AE_LOSS + ] + ) + self.push_loss_factor = cfg.LOSS.PUSH_LOSS_FACTOR + self.pull_loss_factor = cfg.LOSS.PULL_LOSS_FACTOR + + def forward(self, outputs, heatmaps, masks, joints): + # forward check + self._forward_check(outputs, heatmaps, masks, joints) + + heatmaps_losses = [] + push_losses = [] + pull_losses = [] + for idx in range(len(outputs)): + offset_feat = 0 + if self.heatmaps_loss[idx]: + heatmaps_pred = outputs[idx][:, :self.num_joints] + offset_feat = self.num_joints + + heatmaps_loss = self.heatmaps_loss[idx]( + heatmaps_pred, heatmaps[idx], masks[idx] + ) + heatmaps_loss = heatmaps_loss * self.heatmaps_loss_factor[idx] + heatmaps_losses.append(heatmaps_loss) + else: + heatmaps_losses.append(None) + + if self.ae_loss[idx]: + tags_pred = outputs[idx][:, offset_feat:] + batch_size = tags_pred.size()[0] + tags_pred = tags_pred.contiguous().view(batch_size, -1, 1) + + push_loss, pull_loss = self.ae_loss[idx]( + tags_pred, joints[idx] + ) + push_loss = push_loss * self.push_loss_factor[idx] + pull_loss = pull_loss * self.pull_loss_factor[idx] + + push_losses.append(push_loss) + pull_losses.append(pull_loss) + else: + push_losses.append(None) + pull_losses.append(None) + + return heatmaps_losses, push_losses, pull_losses + + def _init_check(self, cfg): + assert isinstance(cfg.LOSS.WITH_HEATMAPS_LOSS, (list, tuple)), \ + 'LOSS.WITH_HEATMAPS_LOSS should be a list or tuple' + assert isinstance(cfg.LOSS.HEATMAPS_LOSS_FACTOR, (list, tuple)), \ + 'LOSS.HEATMAPS_LOSS_FACTOR should be a list or tuple' + assert isinstance(cfg.LOSS.WITH_AE_LOSS, (list, tuple)), \ + 'LOSS.WITH_AE_LOSS should be a list or tuple' + assert isinstance(cfg.LOSS.PUSH_LOSS_FACTOR, (list, tuple)), \ + 'LOSS.PUSH_LOSS_FACTOR should be a list or tuple' + assert isinstance(cfg.LOSS.PUSH_LOSS_FACTOR, (list, tuple)), \ + 'LOSS.PUSH_LOSS_FACTOR should be a list or tuple' + assert len(cfg.LOSS.WITH_HEATMAPS_LOSS) == cfg.LOSS.NUM_STAGES, \ + 'LOSS.WITH_HEATMAPS_LOSS and LOSS.NUM_STAGE should have same length, got {} vs {}.'.\ + format(len(cfg.LOSS.WITH_HEATMAPS_LOSS), cfg.LOSS.NUM_STAGES) + assert len(cfg.LOSS.WITH_HEATMAPS_LOSS) == len(cfg.LOSS.HEATMAPS_LOSS_FACTOR), \ + 'LOSS.WITH_HEATMAPS_LOSS and LOSS.HEATMAPS_LOSS_FACTOR should have same length, got {} vs {}.'.\ + format(len(cfg.LOSS.WITH_HEATMAPS_LOSS), len(cfg.LOSS.HEATMAPS_LOSS_FACTOR)) + assert len(cfg.LOSS.WITH_AE_LOSS) == cfg.LOSS.NUM_STAGES, \ + 'LOSS.WITH_AE_LOSS and LOSS.NUM_STAGE should have same length, got {} vs {}.'.\ + format(len(cfg.LOSS.WITH_AE_LOSS), cfg.LOSS.NUM_STAGES) + assert len(cfg.LOSS.WITH_AE_LOSS) == len(cfg.LOSS.PUSH_LOSS_FACTOR), \ + 'LOSS.WITH_AE_LOSS and LOSS.PUSH_LOSS_FACTOR should have same length, got {} vs {}.'. \ + format(len(cfg.LOSS.WITH_AE_LOSS), len(cfg.LOSS.PUSH_LOSS_FACTOR)) + assert len(cfg.LOSS.WITH_AE_LOSS) == len(cfg.LOSS.PULL_LOSS_FACTOR), \ + 'LOSS.WITH_AE_LOSS and LOSS.PULL_LOSS_FACTOR should have same length, got {} vs {}.'. \ + format(len(cfg.LOSS.WITH_AE_LOSS), len(cfg.LOSS.PULL_LOSS_FACTOR)) + + def _forward_check(self, outputs, heatmaps, masks, joints): + assert isinstance(outputs, list), \ + 'outputs should be a list, got {} instead.'.format(type(outputs)) + assert isinstance(heatmaps, list), \ + 'heatmaps should be a list, got {} instead.'.format(type(heatmaps)) + assert isinstance(masks, list), \ + 'masks should be a list, got {} instead.'.format(type(masks)) + assert isinstance(joints, list), \ + 'joints should be a list, got {} instead.'.format(type(joints)) + assert len(outputs) == self.num_stages, \ + 'len(outputs) and num_stages should been same, got {} vs {}.'.format(len(outputs), self.num_stages) + assert len(outputs) == len(heatmaps), \ + 'outputs and heatmaps should have same length, got {} vs {}.'.format(len(outputs), len(heatmaps)) + assert len(outputs) == len(masks), \ + 'outputs and masks should have same length, got {} vs {}.'.format(len(outputs), len(masks)) + assert len(outputs) == len(joints), \ + 'outputs and joints should have same length, got {} vs {}.'.format(len(outputs), len(joints)) + assert len(outputs) == len(self.heatmaps_loss), \ + 'outputs and heatmaps_loss should have same length, got {} vs {}.'. \ + format(len(outputs), len(self.heatmaps_loss)) + assert len(outputs) == len(self.ae_loss), \ + 'outputs and ae_loss should have same length, got {} vs {}.'. \ + format(len(outputs), len(self.ae_loss)) + + +def test_ae_loss(): + import numpy as np + t = torch.tensor( + np.arange(0, 32).reshape(1, 2, 4, 4).astype(np.float)*0.1, + requires_grad=True + ) + t.register_hook(lambda x: print('t', x)) + + ae_loss = AELoss(loss_type='exp') + + joints = np.zeros((2, 2, 2)) + joints[0, 0] = (3, 1) + joints[1, 0] = (10, 1) + joints[0, 1] = (22, 1) + joints[1, 1] = (30, 1) + joints = torch.LongTensor(joints) + joints = joints.view(1, 2, 2, 2) + + t = t.contiguous().view(1, -1, 1) + l = ae_loss(t, joints) + + print(l) + + +if __name__ == '__main__': + test_ae_loss() diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/trainer.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/trainer.py new file mode 100644 index 0000000..0a20940 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/core/trainer.py @@ -0,0 +1,137 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging +import os +import time + +from utils.utils import AverageMeter +from utils.vis import save_debug_images + + +def do_train(cfg, model, data_loader, loss_factory, optimizer, epoch, + output_dir, tb_log_dir, writer_dict, fp16=False): + logger = logging.getLogger("Training") + + batch_time = AverageMeter() + data_time = AverageMeter() + + heatmaps_loss_meter = [AverageMeter() for _ in range(cfg.LOSS.NUM_STAGES)] + push_loss_meter = [AverageMeter() for _ in range(cfg.LOSS.NUM_STAGES)] + pull_loss_meter = [AverageMeter() for _ in range(cfg.LOSS.NUM_STAGES)] + + # switch to train mode + model.train() + + end = time.time() + for i, (images, heatmaps, masks, joints) in enumerate(data_loader): + # measure data loading time + data_time.update(time.time() - end) + + # compute output + outputs = model(images) + + heatmaps = list(map(lambda x: x.cuda(non_blocking=True), heatmaps)) + masks = list(map(lambda x: x.cuda(non_blocking=True), masks)) + joints = list(map(lambda x: x.cuda(non_blocking=True), joints)) + + # loss = loss_factory(outputs, heatmaps, masks) + heatmaps_losses, push_losses, pull_losses = \ + loss_factory(outputs, heatmaps, masks, joints) + + loss = 0 + for idx in range(cfg.LOSS.NUM_STAGES): + if heatmaps_losses[idx] is not None: + heatmaps_loss = heatmaps_losses[idx].mean(dim=0) + heatmaps_loss_meter[idx].update( + heatmaps_loss.item(), images.size(0) + ) + loss = loss + heatmaps_loss + if push_losses[idx] is not None: + push_loss = push_losses[idx].mean(dim=0) + push_loss_meter[idx].update( + push_loss.item(), images.size(0) + ) + loss = loss + push_loss + if pull_losses[idx] is not None: + pull_loss = pull_losses[idx].mean(dim=0) + pull_loss_meter[idx].update( + pull_loss.item(), images.size(0) + ) + loss = loss + pull_loss + + # compute gradient and do update step + optimizer.zero_grad() + if fp16: + optimizer.backward(loss) + else: + loss.backward() + optimizer.step() + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % cfg.PRINT_FREQ == 0 and cfg.RANK == 0: + msg = 'Epoch: [{0}][{1}/{2}]\t' \ + 'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \ + 'Speed: {speed:.1f} samples/s\t' \ + 'Data: {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \ + '{heatmaps_loss}{push_loss}{pull_loss}'.format( + epoch, i, len(data_loader), + batch_time=batch_time, + speed=images.size(0)/batch_time.val, + data_time=data_time, + heatmaps_loss=_get_loss_info(heatmaps_loss_meter, 'heatmaps'), + push_loss=_get_loss_info(push_loss_meter, 'push'), + pull_loss=_get_loss_info(pull_loss_meter, 'pull') + ) + logger.info(msg) + + writer = writer_dict['writer'] + global_steps = writer_dict['train_global_steps'] + for idx in range(cfg.LOSS.NUM_STAGES): + writer.add_scalar( + 'train_stage{}_heatmaps_loss'.format(i), + heatmaps_loss_meter[idx].val, + global_steps + ) + writer.add_scalar( + 'train_stage{}_push_loss'.format(idx), + push_loss_meter[idx].val, + global_steps + ) + writer.add_scalar( + 'train_stage{}_pull_loss'.format(idx), + pull_loss_meter[idx].val, + global_steps + ) + writer_dict['train_global_steps'] = global_steps + 1 + + prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i) + for scale_idx in range(len(outputs)): + prefix_scale = prefix + '_output_{}'.format( + cfg.DATASET.OUTPUT_SIZE[scale_idx] + ) + save_debug_images( + cfg, images, heatmaps[scale_idx], masks[scale_idx], + outputs[scale_idx], prefix_scale + ) + + +def _get_loss_info(loss_meters, loss_name): + msg = '' + for i, meter in enumerate(loss_meters): + msg += 'Stage{i}-{name}: {meter.val:.3e} ({meter.avg:.3e})\t'.format( + i=i, name=loss_name, meter=meter + ) + + return msg diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/COCODataset.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/COCODataset.py new file mode 100644 index 0000000..265c54a --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/COCODataset.py @@ -0,0 +1,309 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import defaultdict +from collections import OrderedDict +import logging +import os +import os.path + +import cv2 +import json_tricks as json +import numpy as np +from torch.utils.data import Dataset + +from pycocotools.cocoeval import COCOeval +from utils import zipreader + +logger = logging.getLogger(__name__) + + +class CocoDataset(Dataset): + """`MS Coco Detection `_ Dataset. + + Args: + root (string): Root directory where dataset is located to. + dataset (string): Dataset name(train2017, val2017, test2017). + data_format(string): Data format for reading('jpg', 'zip') + transform (callable, optional): A function/transform that takes in an opencv image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + def __init__(self, root, dataset, data_format, transform=None, + target_transform=None): + from pycocotools.coco import COCO + self.name = 'COCO' + self.root = root + self.dataset = dataset + self.data_format = data_format + self.coco = COCO(self._get_anno_file_name()) + self.ids = list(self.coco.imgs.keys()) + self.transform = transform + self.target_transform = target_transform + + cats = [cat['name'] + for cat in self.coco.loadCats(self.coco.getCatIds())] + self.classes = ['__background__'] + cats + logger.info('=> classes: {}'.format(self.classes)) + self.num_classes = len(self.classes) + self._class_to_ind = dict(zip(self.classes, range(self.num_classes))) + self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds())) + self._coco_ind_to_class_ind = dict( + [ + (self._class_to_coco_ind[cls], self._class_to_ind[cls]) + for cls in self.classes[1:] + ] + ) + + def _get_anno_file_name(self): + # example: root/annotations/person_keypoints_tran2017.json + # image_info_test-dev2017.json + if 'test' in self.dataset: + return os.path.join( + self.root, + 'annotations', + 'image_info_{}.json'.format( + self.dataset + ) + ) + else: + return os.path.join( + self.root, + 'annotations', + 'person_keypoints_{}.json'.format( + self.dataset + ) + ) + + def _get_image_path(self, file_name): + images_dir = os.path.join(self.root, 'images') + dataset = 'test2017' if 'test' in self.dataset else self.dataset + if self.data_format == 'zip': + return os.path.join(images_dir, dataset) + '.zip@' + file_name + else: + return os.path.join(images_dir, dataset, file_name) + + def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``. + """ + coco = self.coco + img_id = self.ids[index] + ann_ids = coco.getAnnIds(imgIds=img_id) + target = coco.loadAnns(ann_ids) + + file_name = coco.loadImgs(img_id)[0]['file_name'] + + if self.data_format == 'zip': + img = zipreader.imread( + self._get_image_path(file_name), + cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION + ) + else: + img = cv2.imread( + self._get_image_path(file_name), + cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION + ) + + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self): + return len(self.ids) + + def __repr__(self): + fmt_str = 'Dataset ' + self.__class__.__name__ + '\n' + fmt_str += ' Number of datapoints: {}\n'.format(self.__len__()) + fmt_str += ' Root Location: {}\n'.format(self.root) + tmp = ' Transforms (if any): ' + fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) + tmp = ' Target Transforms (if any): ' + fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) + return fmt_str + + def processKeypoints(self, keypoints): + tmp = keypoints.copy() + if keypoints[:, 2].max() > 0: + p = keypoints[keypoints[:, 2] > 0][:, :2].mean(axis=0) + num_keypoints = keypoints.shape[0] + for i in range(num_keypoints): + tmp[i][0:3] = [ + float(keypoints[i][0]), + float(keypoints[i][1]), + float(keypoints[i][2]) + ] + + return tmp + + def evaluate(self, cfg, preds, scores, output_dir, + *args, **kwargs): + ''' + Perform evaluation on COCO keypoint task + :param cfg: cfg dictionary + :param preds: prediction + :param output_dir: output directory + :param args: + :param kwargs: + :return: + ''' + res_folder = os.path.join(output_dir, 'results') + if not os.path.exists(res_folder): + os.makedirs(res_folder) + res_file = os.path.join( + res_folder, 'keypoints_%s_results.json' % self.dataset) + + # preds is a list of: image x person x (keypoints) + # keypoints: num_joints * 4 (x, y, score, tag) + kpts = defaultdict(list) + for idx, _kpts in enumerate(preds): + img_id = self.ids[idx] + file_name = self.coco.loadImgs(img_id)[0]['file_name'] + for idx_kpt, kpt in enumerate(_kpts): + area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * (np.max(kpt[:, 1]) - np.min(kpt[:, 1])) + kpt = self.processKeypoints(kpt) + # if self.with_center: + if cfg.DATASET.WITH_CENTER and not cfg.TEST.IGNORE_CENTER: + kpt = kpt[:-1] + + kpts[int(file_name[-16:-4])].append( + { + 'keypoints': kpt[:, 0:3], + 'score': scores[idx][idx_kpt], + 'tags': kpt[:, 3], + 'image': int(file_name[-16:-4]), + 'area': area + } + ) + + # rescoring and oks nms + oks_nmsed_kpts = [] + # image x person x (keypoints) + for img in kpts.keys(): + # person x (keypoints) + img_kpts = kpts[img] + # person x (keypoints) + # do not use nms, keep all detections + keep = [] + if len(keep) == 0: + oks_nmsed_kpts.append(img_kpts) + else: + oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep]) + + self._write_coco_keypoint_results( + oks_nmsed_kpts, res_file + ) + + if 'test' not in self.dataset: + info_str = self._do_python_keypoint_eval( + res_file, res_folder + ) + name_value = OrderedDict(info_str) + return name_value, name_value['AP'] + else: + return {'Null': 0}, 0 + + def _write_coco_keypoint_results(self, keypoints, res_file): + data_pack = [ + { + 'cat_id': self._class_to_coco_ind[cls], + 'cls_ind': cls_ind, + 'cls': cls, + 'ann_type': 'keypoints', + 'keypoints': keypoints + } + for cls_ind, cls in enumerate(self.classes) if not cls == '__background__' + ] + + results = self._coco_keypoint_results_one_category_kernel(data_pack[0]) + logger.info('=> Writing results json to %s' % res_file) + with open(res_file, 'w') as f: + json.dump(results, f, sort_keys=True, indent=4) + try: + json.load(open(res_file)) + except Exception: + content = [] + with open(res_file, 'r') as f: + for line in f: + content.append(line) + content[-1] = ']' + with open(res_file, 'w') as f: + for c in content: + f.write(c) + + def _coco_keypoint_results_one_category_kernel(self, data_pack): + cat_id = data_pack['cat_id'] + keypoints = data_pack['keypoints'] + cat_results = [] + num_joints = 17 + + for img_kpts in keypoints: + if len(img_kpts) == 0: + continue + + _key_points = np.array( + [img_kpts[k]['keypoints'] for k in range(len(img_kpts))] + ) + key_points = np.zeros( + (_key_points.shape[0], num_joints * 3), + dtype=np.float + ) + + for ipt in range(num_joints): + key_points[:, ipt * 3 + 0] = _key_points[:, ipt, 0] + key_points[:, ipt * 3 + 1] = _key_points[:, ipt, 1] + key_points[:, ipt * 3 + 2] = _key_points[:, ipt, 2] # keypoints score. + + for k in range(len(img_kpts)): + kpt = key_points[k].reshape((num_joints, 3)) + left_top = np.amin(kpt, axis=0) + right_bottom = np.amax(kpt, axis=0) + + w = right_bottom[0] - left_top[0] + h = right_bottom[1] - left_top[1] + + cat_results.append({ + 'image_id': img_kpts[k]['image'], + 'category_id': cat_id, + 'keypoints': list(key_points[k]), + 'score': img_kpts[k]['score'], + 'bbox': list([left_top[0], left_top[1], w, h]) + }) + + return cat_results + + def _do_python_keypoint_eval(self, res_file, res_folder): + coco_dt = self.coco.loadRes(res_file) + coco_eval = COCOeval(self.coco, coco_dt, 'keypoints') + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + stats_names = ['AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)'] + + info_str = [] + for ind, name in enumerate(stats_names): + info_str.append((name, coco_eval.stats[ind])) + # info_str.append(coco_eval.stats[ind]) + + return info_str diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/COCOKeypoints.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/COCOKeypoints.py new file mode 100644 index 0000000..9957720 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/COCOKeypoints.py @@ -0,0 +1,151 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +import numpy as np + +import pycocotools +from .COCODataset import CocoDataset +from .target_generators import HeatmapGenerator + + +logger = logging.getLogger(__name__) + + +class CocoKeypoints(CocoDataset): + def __init__(self, + cfg, + dataset_name, + remove_images_without_annotations, + heatmap_generator, + joints_generator, + transforms=None): + super().__init__(cfg.DATASET.ROOT, + dataset_name, + cfg.DATASET.DATA_FORMAT) + + if cfg.DATASET.WITH_CENTER: + assert cfg.DATASET.NUM_JOINTS == 18, 'Number of joint with center for COCO is 18' + else: + assert cfg.DATASET.NUM_JOINTS == 17, 'Number of joint for COCO is 17' + + self.num_scales = self._init_check(heatmap_generator, joints_generator) + + self.num_joints = cfg.DATASET.NUM_JOINTS + self.with_center = cfg.DATASET.WITH_CENTER + self.num_joints_without_center = self.num_joints - 1 \ + if self.with_center else self.num_joints + self.scale_aware_sigma = cfg.DATASET.SCALE_AWARE_SIGMA + self.base_sigma = cfg.DATASET.BASE_SIGMA + self.base_size = cfg.DATASET.BASE_SIZE + self.int_sigma = cfg.DATASET.INT_SIGMA + + if remove_images_without_annotations: + self.ids = [ + img_id + for img_id in self.ids + if len(self.coco.getAnnIds(imgIds=img_id, iscrowd=None)) > 0 + ] + + self.transforms = transforms + self.heatmap_generator = heatmap_generator + self.joints_generator = joints_generator + + def __getitem__(self, idx): + img, anno = super().__getitem__(idx) + + mask = self.get_mask(anno, idx) + + anno = [ + obj for obj in anno + if obj['iscrowd'] == 0 or obj['num_keypoints'] > 0 + ] + + # TODO(bowen): to generate scale-aware sigma, modify `get_joints` to associate a sigma to each joint + joints = self.get_joints(anno) + + mask_list = [mask.copy() for _ in range(self.num_scales)] + joints_list = [joints.copy() for _ in range(self.num_scales)] + target_list = list() + + if self.transforms: + img, mask_list, joints_list = self.transforms( + img, mask_list, joints_list + ) + + for scale_id in range(self.num_scales): + target_t = self.heatmap_generator[scale_id](joints_list[scale_id]) + joints_t = self.joints_generator[scale_id](joints_list[scale_id]) + + target_list.append(target_t.astype(np.float32)) + mask_list[scale_id] = mask_list[scale_id].astype(np.float32) + joints_list[scale_id] = joints_t.astype(np.int32) + + return img, target_list, mask_list, joints_list + + def get_joints(self, anno): + num_people = len(anno) + + if self.scale_aware_sigma: + joints = np.zeros((num_people, self.num_joints, 4)) + else: + joints = np.zeros((num_people, self.num_joints, 3)) + + for i, obj in enumerate(anno): + joints[i, :self.num_joints_without_center, :3] = \ + np.array(obj['keypoints']).reshape([-1, 3]) + if self.with_center: + joints_sum = np.sum(joints[i, :-1, :2], axis=0) + num_vis_joints = len(np.nonzero(joints[i, :-1, 2])[0]) + if num_vis_joints > 0: + joints[i, -1, :2] = joints_sum / num_vis_joints + joints[i, -1, 2] = 1 + if self.scale_aware_sigma: + # get person box + box = obj['bbox'] + size = max(box[2], box[3]) + sigma = size / self.base_size * self.base_sigma + if self.int_sigma: + sigma = int(np.round(sigma + 0.5)) + assert sigma > 0, sigma + joints[i, :, 3] = sigma + + return joints + + def get_mask(self, anno, idx): + coco = self.coco + img_info = coco.loadImgs(self.ids[idx])[0] + + m = np.zeros((img_info['height'], img_info['width'])) + + for obj in anno: + if obj['iscrowd']: + rle = pycocotools.mask.frPyObjects( + obj['segmentation'], img_info['height'], img_info['width']) + m += pycocotools.mask.decode(rle) + elif obj['num_keypoints'] == 0: + rles = pycocotools.mask.frPyObjects( + obj['segmentation'], img_info['height'], img_info['width']) + for rle in rles: + m += pycocotools.mask.decode(rle) + + return m < 0.5 + + def _init_check(self, heatmap_generator, joints_generator): + assert isinstance(heatmap_generator, (list, tuple)), 'heatmap_generator should be a list or tuple' + assert isinstance(joints_generator, (list, tuple)), 'joints_generator should be a list or tuple' + assert len(heatmap_generator) == len(joints_generator), \ + 'heatmap_generator and joints_generator should have same length,'\ + 'got {} vs {}.'.format( + len(heatmap_generator), len(joints_generator) + ) + return len(heatmap_generator) diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/CrowdPoseDataset.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/CrowdPoseDataset.py new file mode 100644 index 0000000..3329aaf --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/CrowdPoseDataset.py @@ -0,0 +1,296 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bowen Cheng (bcheng9@illinois.edu) and Bin Xiao (leoxiaobin@gmail.com) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import defaultdict +from collections import OrderedDict +import logging +import os +import os.path + +import cv2 +import json_tricks as json +import numpy as np +from torch.utils.data import Dataset + +from crowdposetools.cocoeval import COCOeval +from utils import zipreader + +logger = logging.getLogger(__name__) + + +class CrowdPoseDataset(Dataset): + """`CrowdPose`_ Dataset. + + Args: + root (string): Root directory where dataset is located to. + dataset (string): Dataset name(train2017, val2017, test2017). + data_format(string): Data format for reading('jpg', 'zip') + transform (callable, optional): A function/transform that takes in an opencv image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + def __init__(self, root, dataset, data_format, transform=None, + target_transform=None): + from crowdposetools.coco import COCO + self.name = 'CROWDPOSE' + self.root = root + self.dataset = dataset + self.data_format = data_format + self.coco = COCO(self._get_anno_file_name()) + self.ids = list(self.coco.imgs.keys()) + self.transform = transform + self.target_transform = target_transform + + cats = [cat['name'] + for cat in self.coco.loadCats(self.coco.getCatIds())] + self.classes = ['__background__'] + cats + logger.info('=> classes: {}'.format(self.classes)) + self.num_classes = len(self.classes) + self._class_to_ind = dict(zip(self.classes, range(self.num_classes))) + self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds())) + self._coco_ind_to_class_ind = dict( + [ + (self._class_to_coco_ind[cls], self._class_to_ind[cls]) + for cls in self.classes[1:] + ] + ) + + def _get_anno_file_name(self): + # example: root/json/crowdpose_{train,val,test}.json + return os.path.join( + self.root, + 'json', + 'crowdpose_{}.json'.format( + self.dataset + ) + ) + + def _get_image_path(self, file_name): + images_dir = os.path.join(self.root, 'images') + if self.data_format == 'zip': + return images_dir + '.zip@' + file_name + else: + return os.path.join(images_dir, file_name) + + def __getitem__(self, index): + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``. + """ + coco = self.coco + img_id = self.ids[index] + ann_ids = coco.getAnnIds(imgIds=img_id) + target = coco.loadAnns(ann_ids) + + file_name = coco.loadImgs(img_id)[0]['file_name'] + + if self.data_format == 'zip': + img = zipreader.imread( + self._get_image_path(file_name), + cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION + ) + else: + img = cv2.imread( + self._get_image_path(file_name), + cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION + ) + + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self): + return len(self.ids) + + def __repr__(self): + fmt_str = 'Dataset ' + self.__class__.__name__ + '\n' + fmt_str += ' Number of datapoints: {}\n'.format(self.__len__()) + fmt_str += ' Root Location: {}\n'.format(self.root) + tmp = ' Transforms (if any): ' + fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) + tmp = ' Target Transforms (if any): ' + fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) + return fmt_str + + def processKeypoints(self, keypoints): + tmp = keypoints.copy() + if keypoints[:, 2].max() > 0: + p = keypoints[keypoints[:, 2] > 0][:, :2].mean(axis=0) + num_keypoints = keypoints.shape[0] + for i in range(num_keypoints): + tmp[i][0:3] = [ + float(keypoints[i][0]), + float(keypoints[i][1]), + float(keypoints[i][2]) + ] + + return tmp + + def evaluate(self, cfg, preds, scores, output_dir, + *args, **kwargs): + ''' + Perform evaluation on COCO keypoint task + :param cfg: cfg dictionary + :param preds: prediction + :param output_dir: output directory + :param args: + :param kwargs: + :return: + ''' + res_folder = os.path.join(output_dir, 'results') + if not os.path.exists(res_folder): + os.makedirs(res_folder) + res_file = os.path.join( + res_folder, 'keypoints_%s_results.json' % self.dataset) + + # preds is a list of: image x person x (keypoints) + # keypoints: num_joints * 4 (x, y, score, tag) + kpts = defaultdict(list) + for idx, _kpts in enumerate(preds): + img_id = self.ids[idx] + file_name = self.coco.loadImgs(img_id)[0]['file_name'] + for idx_kpt, kpt in enumerate(_kpts): + area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * (np.max(kpt[:, 1]) - np.min(kpt[:, 1])) + kpt = self.processKeypoints(kpt) + # if self.with_center: + if cfg.DATASET.WITH_CENTER and not cfg.TEST.IGNORE_CENTER: + kpt = kpt[:-1] + + kpts[int(file_name.split('.')[0])].append( + { + 'keypoints': kpt[:, 0:3], + 'score': scores[idx][idx_kpt], + 'tags': kpt[:, 3], + 'image': int(file_name.split('.')[0]), + 'area': area + } + ) + + # rescoring and oks nms + oks_nmsed_kpts = [] + # image x person x (keypoints) + for img in kpts.keys(): + # person x (keypoints) + img_kpts = kpts[img] + # person x (keypoints) + # do not use nms, keep all detections + keep = [] + if len(keep) == 0: + oks_nmsed_kpts.append(img_kpts) + else: + oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep]) + + self._write_coco_keypoint_results( + oks_nmsed_kpts, res_file + ) + + # CrowdPose `test` set has annotation. + info_str = self._do_python_keypoint_eval( + res_file, res_folder + ) + name_value = OrderedDict(info_str) + return name_value, name_value['AP'] + + def _write_coco_keypoint_results(self, keypoints, res_file): + data_pack = [ + { + 'cat_id': self._class_to_coco_ind[cls], + 'cls_ind': cls_ind, + 'cls': cls, + 'ann_type': 'keypoints', + 'keypoints': keypoints + } + for cls_ind, cls in enumerate(self.classes) if not cls == '__background__' + ] + + results = self._coco_keypoint_results_one_category_kernel(data_pack[0]) + logger.info('=> Writing results json to %s' % res_file) + with open(res_file, 'w') as f: + json.dump(results, f, sort_keys=True, indent=4) + try: + json.load(open(res_file)) + except Exception: + content = [] + with open(res_file, 'r') as f: + for line in f: + content.append(line) + content[-1] = ']' + with open(res_file, 'w') as f: + for c in content: + f.write(c) + + def _coco_keypoint_results_one_category_kernel(self, data_pack): + cat_id = data_pack['cat_id'] + keypoints = data_pack['keypoints'] + cat_results = [] + num_joints = 14 + + for img_kpts in keypoints: + if len(img_kpts) == 0: + continue + + _key_points = np.array( + [img_kpts[k]['keypoints'] for k in range(len(img_kpts))] + ) + key_points = np.zeros( + (_key_points.shape[0], num_joints * 3), + dtype=np.float + ) + + for ipt in range(num_joints): + key_points[:, ipt * 3 + 0] = _key_points[:, ipt, 0] + key_points[:, ipt * 3 + 1] = _key_points[:, ipt, 1] + key_points[:, ipt * 3 + 2] = _key_points[:, ipt, 2] # keypoints score. + + for k in range(len(img_kpts)): + kpt = key_points[k].reshape((num_joints, 3)) + left_top = np.amin(kpt, axis=0) + right_bottom = np.amax(kpt, axis=0) + + w = right_bottom[0] - left_top[0] + h = right_bottom[1] - left_top[1] + + cat_results.append({ + 'image_id': img_kpts[k]['image'], + 'category_id': cat_id, + 'keypoints': list(key_points[k]), + 'score': img_kpts[k]['score'], + 'bbox': list([left_top[0], left_top[1], w, h]) + }) + + return cat_results + + def _do_python_keypoint_eval(self, res_file, res_folder): + coco_dt = self.coco.loadRes(res_file) + coco_eval = COCOeval(self.coco, coco_dt, 'keypoints') + coco_eval.params.useSegm = None + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + stats_names = ['AP', 'Ap .5', 'AP .75', 'AR', 'AR .5', 'AR .75', 'AP (easy)', 'AP (medium)', 'AP (hard)'] + stats_index = [0, 1, 2, 5, 6, 7, 8, 9, 10] + + info_str = [] + for ind, name in enumerate(stats_names): + info_str.append((name, coco_eval.stats[stats_index[ind]])) + # info_str.append(coco_eval.stats[ind]) + + return info_str diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/CrowdPoseKeypoints.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/CrowdPoseKeypoints.py new file mode 100644 index 0000000..120b763 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/CrowdPoseKeypoints.py @@ -0,0 +1,139 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bowen Cheng (bcheng9@illinois.edu) and Bin Xiao (leoxiaobin@gmail.com) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +import numpy as np + +import crowdposetools +from .CrowdPoseDataset import CrowdPoseDataset +from .target_generators import HeatmapGenerator + + +logger = logging.getLogger(__name__) + + +class CrowdPoseKeypoints(CrowdPoseDataset): + def __init__(self, + cfg, + dataset_name, + remove_images_without_annotations, + heatmap_generator, + joints_generator, + transforms=None): + super().__init__(cfg.DATASET.ROOT, + dataset_name, + cfg.DATASET.DATA_FORMAT) + + if cfg.DATASET.WITH_CENTER: + assert cfg.DATASET.NUM_JOINTS == 15, 'Number of joint with center for CrowdPose is 15' + else: + assert cfg.DATASET.NUM_JOINTS == 14, 'Number of joint for CrowdPose is 14' + + self.num_scales = self._init_check(heatmap_generator, joints_generator) + + self.num_joints = cfg.DATASET.NUM_JOINTS + self.with_center = cfg.DATASET.WITH_CENTER + self.num_joints_without_center = self.num_joints - 1 \ + if self.with_center else self.num_joints + self.scale_aware_sigma = cfg.DATASET.SCALE_AWARE_SIGMA + self.base_sigma = cfg.DATASET.BASE_SIGMA + self.base_size = cfg.DATASET.BASE_SIZE + self.int_sigma = cfg.DATASET.INT_SIGMA + + if remove_images_without_annotations: + self.ids = [ + img_id + for img_id in self.ids + if len(self.coco.getAnnIds(imgIds=img_id, iscrowd=None)) > 0 + ] + + self.transforms = transforms + self.heatmap_generator = heatmap_generator + self.joints_generator = joints_generator + + def __getitem__(self, idx): + img, anno = super().__getitem__(idx) + + mask = self.get_mask(anno, idx) + + anno = [ + obj for obj in anno + if obj['iscrowd'] == 0 or obj['num_keypoints'] > 0 + ] + + # TODO(bowen): to generate scale-aware sigma, modify `get_joints` to associate a sigma to each joint + joints = self.get_joints(anno) + + mask_list = [mask.copy() for _ in range(self.num_scales)] + joints_list = [joints.copy() for _ in range(self.num_scales)] + target_list = list() + + if self.transforms: + img, mask_list, joints_list = self.transforms( + img, mask_list, joints_list + ) + + for scale_id in range(self.num_scales): + target_t = self.heatmap_generator[scale_id](joints_list[scale_id]) + joints_t = self.joints_generator[scale_id](joints_list[scale_id]) + + target_list.append(target_t.astype(np.float32)) + mask_list[scale_id] = mask_list[scale_id].astype(np.float32) + joints_list[scale_id] = joints_t.astype(np.int32) + + return img, target_list, mask_list, joints_list + + def get_joints(self, anno): + num_people = len(anno) + + if self.scale_aware_sigma: + joints = np.zeros((num_people, self.num_joints, 4)) + else: + joints = np.zeros((num_people, self.num_joints, 3)) + + for i, obj in enumerate(anno): + joints[i, :self.num_joints_without_center, :3] = \ + np.array(obj['keypoints']).reshape([-1, 3]) + if self.with_center: + joints_sum = np.sum(joints[i, :-1, :2], axis=0) + num_vis_joints = len(np.nonzero(joints[i, :-1, 2])[0]) + if num_vis_joints > 0: + joints[i, -1, :2] = joints_sum / num_vis_joints + joints[i, -1, 2] = 1 + if self.scale_aware_sigma: + # get person box + box = obj['bbox'] + size = max(box[2], box[3]) + sigma = size / self.base_size * self.base_sigma + if self.int_sigma: + sigma = int(np.round(sigma + 0.5)) + assert sigma > 0, sigma + joints[i, :, 3] = sigma + + return joints + + def get_mask(self, anno, idx): + coco = self.coco + img_info = coco.loadImgs(self.ids[idx])[0] + + m = np.zeros((img_info['height'], img_info['width'])) + + return m < 0.5 + + def _init_check(self, heatmap_generator, joints_generator): + assert isinstance(heatmap_generator, (list, tuple)), 'heatmap_generator should be a list or tuple' + assert isinstance(joints_generator, (list, tuple)), 'joints_generator should be a list or tuple' + assert len(heatmap_generator) == len(joints_generator), \ + 'heatmap_generator and joints_generator should have same length,'\ + 'got {} vs {}.'.format( + len(heatmap_generator), len(joints_generator) + ) + return len(heatmap_generator) diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/__init__.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/__init__.py new file mode 100644 index 0000000..29af8b2 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/__init__.py @@ -0,0 +1,60 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# ------------------------------------------------------------------------------ + +from .COCOKeypoints import CocoKeypoints as coco +#from .CrowdPoseKeypoints import CrowdPoseKeypoints as crowd_pose +from .build import make_dataloader +from .build import make_test_dataloader + +# dataset dependent configuration for visualization +coco_part_labels = [ + 'nose', 'eye_l', 'eye_r', 'ear_l', 'ear_r', + 'sho_l', 'sho_r', 'elb_l', 'elb_r', 'wri_l', 'wri_r', + 'hip_l', 'hip_r', 'kne_l', 'kne_r', 'ank_l', 'ank_r' +] +coco_part_idx = { + b: a for a, b in enumerate(coco_part_labels) +} +coco_part_orders = [ + ('nose', 'eye_l'), ('eye_l', 'eye_r'), ('eye_r', 'nose'), + ('eye_l', 'ear_l'), ('eye_r', 'ear_r'), ('ear_l', 'sho_l'), + ('ear_r', 'sho_r'), ('sho_l', 'sho_r'), ('sho_l', 'hip_l'), + ('sho_r', 'hip_r'), ('hip_l', 'hip_r'), ('sho_l', 'elb_l'), + ('elb_l', 'wri_l'), ('sho_r', 'elb_r'), ('elb_r', 'wri_r'), + ('hip_l', 'kne_l'), ('kne_l', 'ank_l'), ('hip_r', 'kne_r'), + ('kne_r', 'ank_r') +] + +crowd_pose_part_labels = [ + 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', + 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', + 'left_knee', 'right_knee', 'left_ankle', 'right_ankle', + 'head', 'neck' +] +crowd_pose_part_idx = { + b: a for a, b in enumerate(crowd_pose_part_labels) +} +crowd_pose_part_orders = [ + ('head', 'neck'), ('neck', 'left_shoulder'), ('neck', 'right_shoulder'), + ('left_shoulder', 'right_shoulder'), ('left_shoulder', 'left_hip'), + ('right_shoulder', 'right_hip'), ('left_hip', 'right_hip'), ('left_shoulder', 'left_elbow'), + ('left_elbow', 'left_wrist'), ('right_shoulder', 'right_elbow'), ('right_elbow', 'right_wrist'), + ('left_hip', 'left_knee'), ('left_knee', 'left_ankle'), ('right_hip', 'right_knee'), + ('right_knee', 'right_ankle') +] + +VIS_CONFIG = { + 'COCO': { + 'part_labels': coco_part_labels, + 'part_idx': coco_part_idx, + 'part_orders': coco_part_orders + }, + 'CROWDPOSE': { + 'part_labels': crowd_pose_part_labels, + 'part_idx': crowd_pose_part_idx, + 'part_orders': crowd_pose_part_orders + } +} diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/build.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/build.py new file mode 100644 index 0000000..95be9eb --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/build.py @@ -0,0 +1,108 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import torch.utils.data + +# from .COCODataset import CocoDataset as coco +# from .COCOKeypoints import CocoKeypoints as coco_kpt +# from .CrowdPoseDataset import CrowdPoseDataset as crowd_pose +# from .CrowdPoseKeypoints import CrowdPoseKeypoints as crowd_pose_kpt +from .transforms import build_transforms +from .target_generators import HeatmapGenerator +from .target_generators import ScaleAwareHeatmapGenerator +from .target_generators import JointsGenerator + + +def build_dataset(cfg, is_train): + transforms = build_transforms(cfg, is_train) + + if cfg.DATASET.SCALE_AWARE_SIGMA: + _HeatmapGenerator = ScaleAwareHeatmapGenerator + else: + _HeatmapGenerator = HeatmapGenerator + + heatmap_generator = [ + _HeatmapGenerator( + output_size, cfg.DATASET.NUM_JOINTS, cfg.DATASET.SIGMA + ) for output_size in cfg.DATASET.OUTPUT_SIZE + ] + joints_generator = [ + JointsGenerator( + cfg.DATASET.MAX_NUM_PEOPLE, + cfg.DATASET.NUM_JOINTS, + output_size, + cfg.MODEL.TAG_PER_JOINT + ) for output_size in cfg.DATASET.OUTPUT_SIZE + ] + + dataset_name = cfg.DATASET.TRAIN if is_train else cfg.DATASET.TEST + + dataset = eval(cfg.DATASET.DATASET)( + cfg, + dataset_name, + is_train, + heatmap_generator, + joints_generator, + transforms + ) + + return dataset + + +def make_dataloader(cfg, is_train=True, distributed=False): + if is_train: + images_per_gpu = cfg.TRAIN.IMAGES_PER_GPU + shuffle = True + else: + images_per_gpu = cfg.TEST.IMAGES_PER_GPU + shuffle = False + images_per_batch = images_per_gpu * len(cfg.GPUS) + + dataset = build_dataset(cfg, is_train) + + if is_train and distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler( + dataset + ) + shuffle = False + else: + train_sampler = None + + data_loader = torch.utils.data.DataLoader( + dataset, + batch_size=images_per_batch, + shuffle=shuffle, + num_workers=cfg.WORKERS, + pin_memory=cfg.PIN_MEMORY, + sampler=train_sampler + ) + + return data_loader + + +def make_test_dataloader(cfg): + transforms = None + dataset = eval(cfg.DATASET.DATASET_TEST)( + cfg.DATASET.ROOT, + cfg.DATASET.TEST, + cfg.DATASET.DATA_FORMAT, + transforms + ) + + data_loader = torch.utils.data.DataLoader( + dataset, + batch_size=1, + shuffle=False, + num_workers=0, + pin_memory=False + ) + + return data_loader, dataset diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/target_generators/__init__.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/target_generators/__init__.py new file mode 100644 index 0000000..323fcb0 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/target_generators/__init__.py @@ -0,0 +1,5 @@ +from .target_generators import HeatmapGenerator +from .target_generators import ScaleAwareHeatmapGenerator +from .target_generators import JointsGenerator + +__all__ = ['HeatmapGenerator', 'ScaleAwareHeatmapGenerator', 'JointsGenerator'] diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/target_generators/target_generators.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/target_generators/target_generators.py new file mode 100644 index 0000000..e8e3165 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/target_generators/target_generators.py @@ -0,0 +1,115 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + + +class HeatmapGenerator(): + def __init__(self, output_res, num_joints, sigma=-1): + self.output_res = output_res + self.num_joints = num_joints + if sigma < 0: + sigma = self.output_res/64 + self.sigma = sigma + size = 6*sigma + 3 + x = np.arange(0, size, 1, float) + y = x[:, np.newaxis] + x0, y0 = 3*sigma + 1, 3*sigma + 1 + self.g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) + + def __call__(self, joints): + hms = np.zeros((self.num_joints, self.output_res, self.output_res), + dtype=np.float32) + sigma = self.sigma + for p in joints: + for idx, pt in enumerate(p): + if pt[2] > 0: + x, y = int(pt[0]), int(pt[1]) + if x < 0 or y < 0 or \ + x >= self.output_res or y >= self.output_res: + continue + + ul = int(np.round(x - 3 * sigma - 1)), int(np.round(y - 3 * sigma - 1)) + br = int(np.round(x + 3 * sigma + 2)), int(np.round(y + 3 * sigma + 2)) + + c, d = max(0, -ul[0]), min(br[0], self.output_res) - ul[0] + a, b = max(0, -ul[1]), min(br[1], self.output_res) - ul[1] + + cc, dd = max(0, ul[0]), min(br[0], self.output_res) + aa, bb = max(0, ul[1]), min(br[1], self.output_res) + hms[idx, aa:bb, cc:dd] = np.maximum( + hms[idx, aa:bb, cc:dd], self.g[a:b, c:d]) + return hms + + +class ScaleAwareHeatmapGenerator(): + def __init__(self, output_res, num_joints): + self.output_res = output_res + self.num_joints = num_joints + + def get_gaussian_kernel(self, sigma): + size = 6*sigma + 3 + x = np.arange(0, size, 1, float) + y = x[:, np.newaxis] + x0, y0 = 3*sigma + 1, 3*sigma + 1 + g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) + return g + + def __call__(self, joints): + hms = np.zeros((self.num_joints, self.output_res, self.output_res), + dtype=np.float32) + for p in joints: + sigma = p[0, 3] + g = self.get_gaussian_kernel(sigma) + for idx, pt in enumerate(p): + if pt[2] > 0: + x, y = int(pt[0]), int(pt[1]) + if x < 0 or y < 0 or \ + x >= self.output_res or y >= self.output_res: + continue + + ul = int(np.round(x - 3 * sigma - 1)), int(np.round(y - 3 * sigma - 1)) + br = int(np.round(x + 3 * sigma + 2)), int(np.round(y + 3 * sigma + 2)) + + c, d = max(0, -ul[0]), min(br[0], self.output_res) - ul[0] + a, b = max(0, -ul[1]), min(br[1], self.output_res) - ul[1] + + cc, dd = max(0, ul[0]), min(br[0], self.output_res) + aa, bb = max(0, ul[1]), min(br[1], self.output_res) + hms[idx, aa:bb, cc:dd] = np.maximum( + hms[idx, aa:bb, cc:dd], g[a:b, c:d]) + return hms + + +class JointsGenerator(): + def __init__(self, max_num_people, num_joints, output_res, tag_per_joint): + self.max_num_people = max_num_people + self.num_joints = num_joints + self.output_res = output_res + self.tag_per_joint = tag_per_joint + + def __call__(self, joints): + visible_nodes = np.zeros((self.max_num_people, self.num_joints, 2)) + output_res = self.output_res + for i in range(len(joints)): + tot = 0 + for idx, pt in enumerate(joints[i]): + x, y = int(pt[0]), int(pt[1]) + if pt[2] > 0 and x >= 0 and y >= 0 \ + and x < self.output_res and y < self.output_res: + if self.tag_per_joint: + visible_nodes[i][tot] = \ + (idx * output_res**2 + y * output_res + x, 1) + else: + visible_nodes[i][tot] = \ + (y * output_res + x, 1) + tot += 1 + return visible_nodes diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/transforms/__init__.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/transforms/__init__.py new file mode 100644 index 0000000..f8074b2 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/transforms/__init__.py @@ -0,0 +1,8 @@ +from .transforms import Compose +from .transforms import RandomAffineTransform +from .transforms import ToTensor +from .transforms import Normalize +from .transforms import RandomHorizontalFlip + +from .build import build_transforms +from .build import FLIP_CONFIG diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/transforms/build.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/transforms/build.py new file mode 100644 index 0000000..6b18320 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/transforms/build.py @@ -0,0 +1,85 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from . import transforms as T + + +FLIP_CONFIG = { + 'COCO': [ + 0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15 + ], + 'COCO_WITH_CENTER': [ + 0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 17 + ], + 'CROWDPOSE': [ + 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 12, 13 + ], + 'CROWDPOSE_WITH_CENTER': [ + 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 12, 13, 14 + ] +} + + +def build_transforms(cfg, is_train=True): + assert is_train is True, 'Please only use build_transforms for training.' + assert isinstance(cfg.DATASET.OUTPUT_SIZE, (list, tuple)), 'DATASET.OUTPUT_SIZE should be list or tuple' + if is_train: + max_rotation = cfg.DATASET.MAX_ROTATION + min_scale = cfg.DATASET.MIN_SCALE + max_scale = cfg.DATASET.MAX_SCALE + max_translate = cfg.DATASET.MAX_TRANSLATE + input_size = cfg.DATASET.INPUT_SIZE + output_size = cfg.DATASET.OUTPUT_SIZE + flip = cfg.DATASET.FLIP + scale_type = cfg.DATASET.SCALE_TYPE + else: + scale_type = cfg.DATASET.SCALE_TYPE + max_rotation = 0 + min_scale = 1 + max_scale = 1 + max_translate = 0 + input_size = 512 + output_size = [128] + flip = 0 + + # coco_flip_index = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] + # if cfg.DATASET.WITH_CENTER: + # coco_flip_index.append(17) + if 'coco' in cfg.DATASET.DATASET: + dataset_name = 'COCO' + elif 'crowd_pose' in cfg.DATASET.DATASET: + dataset_name = 'CROWDPOSE' + else: + raise ValueError('Please implement flip_index for new dataset: %s.' % cfg.DATASET.DATASET) + if cfg.DATASET.WITH_CENTER: + coco_flip_index = FLIP_CONFIG[dataset_name + '_WITH_CENTER'] + else: + coco_flip_index = FLIP_CONFIG[dataset_name] + + transforms = T.Compose( + [ + T.RandomAffineTransform( + input_size, + output_size, + max_rotation, + min_scale, + max_scale, + scale_type, + max_translate, + scale_aware_sigma=cfg.DATASET.SCALE_AWARE_SIGMA + ), + T.RandomHorizontalFlip(coco_flip_index, output_size, flip), + T.ToTensor(), + T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + ] + ) + + return transforms diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/transforms/transforms.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/transforms/transforms.py new file mode 100644 index 0000000..0be0ecc --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/dataset/transforms/transforms.py @@ -0,0 +1,182 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import random + +import cv2 +import numpy as np +import torch +import torchvision +from torchvision.transforms import functional as F + + +class Compose(object): + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, mask, joints): + for t in self.transforms: + image, mask, joints = t(image, mask, joints) + return image, mask, joints + + def __repr__(self): + format_string = self.__class__.__name__ + "(" + for t in self.transforms: + format_string += "\n" + format_string += " {0}".format(t) + format_string += "\n)" + return format_string + + +class ToTensor(object): + def __call__(self, image, mask, joints): + return F.to_tensor(image), mask, joints + + +class Normalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, image, mask, joints): + image = F.normalize(image, mean=self.mean, std=self.std) + return image, mask, joints + + +class RandomHorizontalFlip(object): + def __init__(self, flip_index, output_size, prob=0.5): + self.flip_index = flip_index + self.prob = prob + self.output_size = output_size if isinstance(output_size, list) \ + else [output_size] + + def __call__(self, image, mask, joints): + assert isinstance(mask, list) + assert isinstance(joints, list) + assert len(mask) == len(joints) + assert len(mask) == len(self.output_size) + + if random.random() < self.prob: + image = image[:, ::-1] - np.zeros_like(image) + for i, _output_size in enumerate(self.output_size): + mask[i] = mask[i][:, ::-1] - np.zeros_like(mask[i]) + joints[i] = joints[i][:, self.flip_index] + joints[i][:, :, 0] = _output_size - joints[i][:, :, 0] - 1 + + return image, mask, joints + + +class RandomAffineTransform(object): + def __init__(self, + input_size, + output_size, + max_rotation, + min_scale, + max_scale, + scale_type, + max_translate, + scale_aware_sigma=False): + self.input_size = input_size + self.output_size = output_size if isinstance(output_size, list) \ + else [output_size] + + self.max_rotation = max_rotation + self.min_scale = min_scale + self.max_scale = max_scale + self.scale_type = scale_type + self.max_translate = max_translate + self.scale_aware_sigma = scale_aware_sigma + + def _get_affine_matrix(self, center, scale, res, rot=0): + # Generate transformation matrix + h = 200 * scale + t = np.zeros((3, 3)) + t[0, 0] = float(res[1]) / h + t[1, 1] = float(res[0]) / h + t[0, 2] = res[1] * (-float(center[0]) / h + .5) + t[1, 2] = res[0] * (-float(center[1]) / h + .5) + t[2, 2] = 1 + if not rot == 0: + rot = -rot # To match direction of rotation from cropping + rot_mat = np.zeros((3, 3)) + rot_rad = rot * np.pi / 180 + sn, cs = np.sin(rot_rad), np.cos(rot_rad) + rot_mat[0, :2] = [cs, -sn] + rot_mat[1, :2] = [sn, cs] + rot_mat[2, 2] = 1 + # Need to rotate around center + t_mat = np.eye(3) + t_mat[0, 2] = -res[1]/2 + t_mat[1, 2] = -res[0]/2 + t_inv = t_mat.copy() + t_inv[:2, 2] *= -1 + t = np.dot(t_inv, np.dot(rot_mat, np.dot(t_mat, t))) + return t + + def _affine_joints(self, joints, mat): + joints = np.array(joints) + shape = joints.shape + joints = joints.reshape(-1, 2) + return np.dot(np.concatenate( + (joints, joints[:, 0:1]*0+1), axis=1), mat.T).reshape(shape) + + def __call__(self, image, mask, joints): + assert isinstance(mask, list) + assert isinstance(joints, list) + assert len(mask) == len(joints) + assert len(mask) == len(self.output_size) + + height, width = image.shape[:2] + + center = np.array((width/2, height/2)) + if self.scale_type == 'long': + scale = max(height, width)/200 + elif self.scale_type == 'short': + scale = min(height, width)/200 + else: + raise ValueError('Unkonw scale type: {}'.format(self.scale_type)) + aug_scale = np.random.random() * (self.max_scale - self.min_scale) \ + + self.min_scale + scale *= aug_scale + aug_rot = (np.random.random() * 2 - 1) * self.max_rotation + + if self.max_translate > 0: + dx = np.random.randint( + -self.max_translate*scale, self.max_translate*scale) + dy = np.random.randint( + -self.max_translate*scale, self.max_translate*scale) + center[0] += dx + center[1] += dy + + for i, _output_size in enumerate(self.output_size): + mat_output = self._get_affine_matrix( + center, scale, (_output_size, _output_size), aug_rot + )[:2] + mask[i] = cv2.warpAffine( + (mask[i]*255).astype(np.uint8), mat_output, + (_output_size, _output_size) + ) / 255 + mask[i] = (mask[i] > 0.5).astype(np.float32) + + joints[i][:, :, 0:2] = self._affine_joints( + joints[i][:, :, 0:2], mat_output + ) + if self.scale_aware_sigma: + joints[i][:, :, 3] = joints[i][:, :, 3] / aug_scale + + mat_input = self._get_affine_matrix( + center, scale, (self.input_size, self.input_size), aug_rot + )[:2] + image = cv2.warpAffine( + image, mat_input, (self.input_size, self.input_size) + ) + + return image, mask, joints diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/fp16_utils/__init__.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/fp16_utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/fp16_utils/fp16_optimizer.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/fp16_utils/fp16_optimizer.py new file mode 100644 index 0000000..785aef0 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/fp16_utils/fp16_optimizer.py @@ -0,0 +1,540 @@ +# ------------------------------------------------------------------------------ +# Based on: +# apex +# Copyright (c) NVIDIA +# Licence under The BSD 3-Clause "New" or "Revised" License +# https://github.com/NVIDIA/apex +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +# following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +# disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +# products derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Modified by Bowen Cheng +# ------------------------------------------------------------------------------ + +import torch +from torch import nn +from torch.autograd import Variable +from torch.nn.parameter import Parameter +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors + +from .loss_scaler import DynamicLossScaler, LossScaler +from .fp16util import model_grads_to_master_grads, master_params_to_model_params, clip_grad_norm + + +# TODO: Update overflow check + downscale to use Carl's fused kernel. +class FP16_Optimizer(object): + """ + :class:`FP16_Optimizer` is designed to wrap an existing PyTorch optimizer, + and manage static or dynamic loss scaling and master weights in a manner transparent to the user. + For standard use, only two lines must be changed: creating the :class:`FP16_Optimizer` instance, + and changing the call to ``backward``. + Example:: + model = torch.nn.Linear(D_in, D_out).cuda().half() + optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) + # Name the FP16_Optimizer instance to replace the existing optimizer + # (recommended but not required): + optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) + ... + # loss.backward() becomes: + optimizer.backward(loss) + ... + Example with dynamic loss scaling:: + ... + optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) + # optional arg to control dynamic loss scaling behavior + # dynamic_loss_args={'scale_window' : 500}) + # Usually, dynamic_loss_args is not necessary. + Args: + init_optimizer (torch.optim.optimizer): Existing optimizer created with the parameters to optimize. Internally, :class:`FP16_Optimizer` replaces the passed optimizer's fp16 parameters, if any, with fp32 master parameters copied from the original ones. :class:`FP16_Optimizer` also stores references to the original fp16 parameters, and updates these fp16 parameters from the master fp32 copy at the end of each :attr:`step`. + static_loss_scale (float, optional, default=1.0): Loss scale used internally to scale gradients computed by the model. Any fp16 gradients will be copied to fp32, then downscaled before being applied to the fp32 master params, so ``static_loss_scale`` should not affect learning rate. + dynamic_loss_scale (bool, optional, default=False): Use dynamic loss scaling. If True, this will override any ``static_loss_scale`` option. + dynamic_loss_args (dict, optional, default=None): Dict of kwargs that will be forwarded to the internal :class:`DynamicLossScaler` instance's constructor. Keys of this dict must match kwargs accepted by :class:`DynamicLossScaler`'s constructor. If ``dynamic_loss_args`` is unspecified, :class:`DynamicLossScaler`'s defaults will be used. + verbose (bool, optional, default=True): By default, FP16_Optimizer's constructor prints out the parameters and parameter groups it is ingesting, as a sanity check. If this becomes annoying (e.g. for large models), it can be disabled by passing ``verbose=False``. ``verbose=False`` will not disable printing when the loss scale is readjusted during dynamic loss scaling. + ``init_optimizer`` is expected to have been constructed in the ordinary way. + It is recommended (although not required) that the newly constructed :class:`FP16_Optimizer` instance be + named to replace ``init_optimizer``, for two reasons: + First, it means that references to the same name + later in the file will not have to change. + Second, :class:`FP16_Optimizer` reserves the right (as an implementation detail) to + modify ``init_optimizer``. If you do choose a unique name for the new + :class:`FP16_Optimizer` instance, you should only work with this new instance, + because the preexisting optimizer might no longer behave as expected. + ``init_optimizer`` may be any Pytorch optimizer. + It may contain a mixture of fp16 and fp32 parameters organized into any number of + ``param_groups`` with different hyperparameters. The :class:`FP16_Optimizer` constructor will + ingest these ``param_groups`` and remember them. + Calls to :: + loss.backward() + must be replaced with :: + optimizer.backward(loss) + because :class:`FP16_Optimizer` requires ownership of the backward pass to implement + loss scaling and copies to master gradients. + .. note:: + Loss scaling, either static or dynamic, is orthogonal to learning rate, because gradients + are downscaled before being applied. This means that adjusting the loss scale, or using + dynamic loss scaling, should not require retuning the learning rate or any other + hyperparameters. + **Advanced options** + **Closures**: :class:`FP16_Optimizer` can wrap a Pytorch optimizer that receives a closure. + See docstring for :attr:`step`. + **Gradient clipping**: Use :attr:`clip_master_grads`. + **Multiple losses**: If your model accumulates gradients from multiple losses, + this can be made more efficient by supplying ``update_master_grads=False`` + to :attr:`backward`. See docstring for :attr:`backward`. + **Manually adjusting loss scale**: The current loss scale can be retrieved or set via :: + print(optimizer.loss_scale) + optimizer.loss_scale = new_loss_scale + For static loss scaling, manually adjusting the loss scale over time is a reasonable + thing to do. During later epochs, gradients may become smaller, and a + higher loss scale may be required, analogous to scheduling the learning rate. Dynamic loss + scaling is more subtle (see :class:`DynamicLossScaler`) and in this case, manually adjusting + the loss scale is not recommended. + **Multi_GPU training**: If the wrapped ``init_optimizer`` was created from a model wrapped in + Pytorch DistributedDataParallel or Apex DistributedDataParallel, :class:`FP16_Optimizer` + should still work as intended. + """ + + def __init__(self, + init_optimizer, + static_loss_scale=1.0, + dynamic_loss_scale=False, + dynamic_loss_args=None, + verbose=True): + if not torch.cuda.is_available: + raise SystemError("Cannot use fp16 without CUDA.") + + self.verbose = verbose + + self.optimizer = init_optimizer + # init_state_dict sets up an alternative way to cast per-param state tensors. + # Stashing here in case https://github.com/pytorch/pytorch/issues/7733 makes it necessary. + # init_state_dict = init_optimizer.state_dict() + + self.fp16_groups = [] + self.fp32_from_fp16_groups = [] + self.fp32_from_fp32_groups = [] + for i, param_group in enumerate(self.optimizer.param_groups): + self.maybe_print("FP16_Optimizer processing param group {}:".format(i)) + fp16_params_this_group = [] + fp32_params_this_group = [] + fp32_from_fp16_params_this_group = [] + for i, param in enumerate(param_group['params']): + if param.requires_grad: + if param.type() == 'torch.cuda.HalfTensor': + self.maybe_print("FP16_Optimizer received torch.cuda.HalfTensor with {}" + .format(param.size())) + fp16_params_this_group.append(param) + master_param = param.detach().clone().float() + master_param.requires_grad = True + param_group['params'][i] = master_param + fp32_from_fp16_params_this_group.append(master_param) + # Reset existing state dict key to the new master param. + # We still need to recast per-param state tensors, if any, to FP32. + if param in self.optimizer.state: + self.optimizer.state[master_param] = self.optimizer.state.pop(param) + elif param.type() == 'torch.cuda.FloatTensor': + self.maybe_print("FP16_Optimizer received torch.cuda.FloatTensor with {}" + .format(param.size())) + fp32_params_this_group.append(param) + param_group['params'][i] = param + else: + raise TypeError("Wrapped parameters must be either " + "torch.cuda.FloatTensor or torch.cuda.HalfTensor. " + "Received {}".format(param.type())) + + self.fp16_groups.append(fp16_params_this_group) + self.fp32_from_fp16_groups.append(fp32_from_fp16_params_this_group) + self.fp32_from_fp32_groups.append(fp32_params_this_group) + + # Leverage state_dict() and load_state_dict() to recast preexisting per-param state tensors + self.optimizer.load_state_dict(self.optimizer.state_dict()) + # alternative way to cast per-param state tensors: + # self.optimizer.load_state_dict(init_state_dict) + + if dynamic_loss_scale: + self.dynamic_loss_scale = True + if dynamic_loss_args is not None: + self.loss_scaler = DynamicLossScaler(**dynamic_loss_args) + else: + self.loss_scaler = DynamicLossScaler() + else: + self.dynamic_loss_scale = False + self.loss_scaler = LossScaler(static_loss_scale) + + self.overflow = False + self.first_closure_call_this_step = True + + self.clip_grad_norm = clip_grad_norm + + def maybe_print(self, msg): + if self.verbose: + print(msg) + + def __getstate__(self): + raise RuntimeError("FP16_Optimizer should be serialized using state_dict().") + + def __setstate__(self, state): + raise RuntimeError("FP16_Optimizer should be deserialized using load_state_dict().") + + def zero_grad(self, set_grads_to_None=False): + """ + Zero fp32 and fp16 parameter grads. + """ + # In principle, only the .grad attributes of the model params need to be zeroed, + # because gradients are copied into the FP32 master params. However, we zero + # all gradients owned by the optimizer, just to be safe: + for group in self.optimizer.param_groups: + for p in group['params']: + if set_grads_to_None: + p.grad = None + else: + if p.grad is not None: + p.grad.detach_() + p.grad.zero_() + + # Zero fp16 gradients owned by the model: + for fp16_group in self.fp16_groups: + for param in fp16_group: + if set_grads_to_None: + param.grad = None + else: + if param.grad is not None: + param.grad.detach_() # as in torch.optim.optimizer.zero_grad() + param.grad.zero_() + + def _check_overflow(self): + params = [] + for group in self.fp16_groups: + for param in group: + params.append(param) + for group in self.fp32_from_fp32_groups: + for param in group: + params.append(param) + self.overflow = self.loss_scaler.has_overflow(params) + + def _update_scale(self, has_overflow=False): + self.loss_scaler.update_scale(has_overflow) + + def _master_params_to_model_params(self): + for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups): + master_params_to_model_params(fp16_group, fp32_from_fp16_group) + + # To consider: Integrate distributed with this wrapper by registering a hook on each variable + # that does the overflow check, gradient copy + downscale, and fp32 allreduce in a different stream. + def _model_grads_to_master_grads(self): + for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups): + model_grads_to_master_grads(fp16_group, fp32_from_fp16_group) + + def _downscale_master(self): + if self.loss_scale != 1.0: + for group in self.optimizer.param_groups: + for param in group['params']: + if param.grad is not None: + param.grad.data.mul_(1. / self.loss_scale) + + def clip_master_grads(self, max_norm, norm_type=2): + """ + Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``. + Args: + max_norm (float or int): max norm of the gradients + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + Returns: + Total norm of the current fp32 gradients (viewed as a single vector). + .. warning:: + Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``). + """ + if not self.overflow: + fp32_params = [] + for param_group in self.optimizer.param_groups: + for param in param_group['params']: + fp32_params.append(param) + return self.clip_grad_norm(fp32_params, max_norm, norm_type) + else: + return -1 + + def state_dict(self): + """ + Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. + This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict + of the contained Pytorch optimizer. + Example:: + checkpoint = {} + checkpoint['model'] = model.state_dict() + checkpoint['optimizer'] = optimizer.state_dict() + torch.save(checkpoint, "saved.pth") + """ + state_dict = {} + state_dict['loss_scaler'] = self.loss_scaler + state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale + state_dict['overflow'] = self.overflow + state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step + state_dict['optimizer_state_dict'] = self.optimizer.state_dict() + state_dict['fp32_from_fp16'] = self.fp32_from_fp16_groups + return state_dict + + def load_state_dict(self, state_dict): + """ + Loads a state_dict created by an earlier call to state_dict(). + If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, + whose parameters in turn came from ``model``, it is expected that the user + will call ``model.load_state_dict()`` before + ``fp16_optimizer_instance.load_state_dict()`` is called. + Example:: + model = torch.nn.Linear(D_in, D_out).cuda().half() + optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) + optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) + ... + checkpoint = torch.load("saved.pth") + model.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + """ + # I think it should actually be ok to reload the optimizer before the model. + self.loss_scaler = state_dict['loss_scaler'] + self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] + self.overflow = state_dict['overflow'] + self.first_closure_call_this_step = state_dict['first_closure_call_this_step'] + self.optimizer.load_state_dict(state_dict['optimizer_state_dict']) + # At this point, the optimizer's references to the model's fp32 parameters are up to date. + # The optimizer's hyperparameters and internal buffers are also up to date. + # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still + # out of date. There are two options. + # 1: Refresh the master params from the model's fp16 params. + # This requires less storage but incurs precision loss. + # 2: Save and restore the fp32 master copies separately. + # We choose option 2. + # + # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device + # of their associated parameters, because it's possible those buffers might not exist yet in + # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been + # constructed in the same way as the one whose state_dict we are loading, the same master params + # are guaranteed to exist, so we can just copy_() from the saved master params. + for current_group, saved_group in zip(self.fp32_from_fp16_groups, state_dict['fp32_from_fp16']): + for current, saved in zip(current_group, saved_group): + current.data.copy_(saved.data) + + def step(self, closure=None): # could add clip option. + """ + If no closure is supplied, :attr:`step` should be called after + ``fp16_optimizer_obj.backward(loss)``. + :attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to + :class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params + originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run + another forward pass using their model. + If a closure is supplied, :attr:`step` may be called without a prior call to + :attr:`backward(loss)`. + This control flow is identical to `ordinary Pytorch optimizer use`_ with closures. + However, the user should take care that any ``loss.backward()`` call within the closure + has been replaced by ``fp16_optimizer_obj.backward(loss)``. + Args: + closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss. + Example with closure:: + # optimizer is assumed to be an FP16_Optimizer object, previously constructed from an + # existing pytorch optimizer. + for input, target in dataset: + def closure(): + optimizer.zero_grad() + output = model(input) + loss = loss_fn(output, target) + # loss.backward() becomes: + optimizer.backward(loss) + return loss + optimizer.step(closure) + .. warning:: + Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling. + .. _`ordinary Pytorch optimizer use`: + http://pytorch.org/docs/master/optim.html#optimizer-step-closure + """ + + scale = self.loss_scaler.loss_scale + self._update_scale(self.overflow) + + if self.overflow: + # print("OVERFLOW! Skipping step. Attempted loss scale: {}, reducing to {}" + # .format(scale, self.loss_scale)) + return + + if closure is not None: + retval = self._step_with_closure(closure) + else: + retval = self.optimizer.step() + + self._master_params_to_model_params() + + return retval + + def _step_with_closure(self, closure): + def wrapped_closure(): + # helpful for debugging + # print("Calling wrapped_closure, first_closure_call_this_step = {}" + # .format(self.first_closure_call_this_step)) + if self.first_closure_call_this_step: + # We expect that the fp16 params are initially fresh on entering self.step(), + # so _master_params_to_model_params() is unnecessary the first time wrapped_closure() + # is called within self.optimizer.step(). + self.first_closure_call_this_step = False + else: + # If self.optimizer.step() internally calls wrapped_closure more than once, + # it may update the fp32 params after each call. However, self.optimizer + # doesn't know about the fp16 params at all. If the fp32 params get updated, + # we can't rely on self.optimizer to refresh the fp16 params. We need + # to handle that manually: + self._master_params_to_model_params() + # Our API expects the user to give us ownership of the backward() call by + # replacing all calls to loss.backward() with optimizer.backward(loss). + # This requirement holds whether or not the call to backward() is made within a closure. + # If the user is properly calling optimizer.backward(loss) within "closure," + # calling closure() here will give the fp32 master params fresh gradients + # for the optimizer to play with, so all wrapped_closure needs to do is call + # closure() and return the loss. + temp_loss = closure() + while (self.overflow): + scale = self.loss_scaler.loss_scale + self._update_scale(self.overflow) + # print("OVERFLOW within closure! Skipping step. Attempted loss scale: {}, " + # "reducing to {}".format(scale, self.loss_scale)) + temp_loss = closure() + return temp_loss + + retval = self.optimizer.step(wrapped_closure) + + self.first_closure_call_this_step = True + + return retval + + def backward(self, loss, update_master_grads=True): + """ + :attr:`backward` performs the following conceptual steps: + 1. fp32_loss = loss.float() (see first Note below) + 2. scaled_loss = fp32_loss*loss_scale + 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined). + 4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32. + 5. Finally, master grads are divided by loss_scale. + In this way, after :attr:`backward`, the master params have fresh gradients, + and :attr:`step` may be called. + .. note:: + :attr:`backward` internally converts the loss to fp32 before applying the loss scale. + This provides some additional safety against overflow if the user has supplied an + fp16 loss value. + However, for maximum overflow safety, the user should + compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to + :attr:`backward`. + .. warning:: + The gradients found in a model's leaves after the call to + :attr:`backward` should not be regarded as valid in general, + because it's possible + they have been scaled (and in the case of dynamic loss scaling, + the scale factor may change over time). + If the user wants to inspect gradients after a call to :attr:`backward`, + only the master gradients should be regarded as valid. These can be retrieved via + :attr:`inspect_master_grad_data()`. + Args: + loss: The loss output by the user's model. loss may be either float or half (but see first Note above). + update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`. + Example:: + # Ordinary operation: + optimizer.backward(loss) + # Naive operation with multiple losses (technically valid, but less efficient): + # fp32 grads will be correct after the second call, but + # the first call incurs an unnecessary fp16->fp32 grad copy. + optimizer.backward(loss1) + optimizer.backward(loss2) + # More efficient way to handle multiple losses: + # The fp16->fp32 grad copy is delayed until fp16 grads from all + # losses have been accumulated. + optimizer.backward(loss1, update_master_grads=False) + optimizer.backward(loss2, update_master_grads=False) + optimizer.update_master_grads() + """ + # To consider: try multiple backward passes using retain_grad=True to find + # a loss scale that works. After you find a loss scale that works, do a final dummy + # backward pass with retain_graph=False to tear down the graph. Doing this would avoid + # discarding the iteration, but probably wouldn't improve overall efficiency. + self.loss_scaler.backward(loss.float()) + if update_master_grads: + self.update_master_grads() + + def update_master_grads(self): + """ + Copy the ``.grad`` attribute from stored references to fp16 parameters to + the ``.grad`` attribute of the fp32 master parameters that are directly + updated by the optimizer. :attr:`update_master_grads` only needs to be called if + ``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``. + """ + if self.dynamic_loss_scale: + self._check_overflow() + if self.overflow: return + self._model_grads_to_master_grads() + self._downscale_master() + + def inspect_master_grad_data(self): + """ + When running with :class:`FP16_Optimizer`, + ``.grad`` attributes of a model's fp16 leaves should not be + regarded as truthful, because they might be scaled. + After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered, + the fp32 master params' ``.grad`` + attributes will contain valid gradients properly divided by the loss scale. However, + because :class:`FP16_Optimizer` flattens some parameters, accessing them may be + nonintuitive. :attr:`inspect_master_grad_data` + allows those gradients to be viewed with shapes corresponding to their associated model leaves. + Returns: + List of lists (one list for each parameter group). The list for each parameter group + is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group. + """ + raise NotImplementedError("Currently not implemented, working on it...") + fp32_grads_each_group = [] + if self.overflow: + print("Warning: calling FP16_Optimizer.inspect_master_grad_data while in an overflow state. " + "Gradients are currently invalid (may be inf, nan, or stale). Returning None.") + return None + else: + return None + + # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" + def _get_loss_scale(self): + return self.loss_scaler.loss_scale + + def _set_loss_scale(self, value): + self.loss_scaler.cur_scale = value + + loss_scale = property(_get_loss_scale, _set_loss_scale) + + # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" + def _get_state(self): + return self.optimizer.state + + def _set_state(self, value): + self.optimizer.state = value + + state = property(_get_state, _set_state) + + # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" + # (for example, to adjust the learning rate) + def _get_param_groups(self): + return self.optimizer.param_groups + + def _set_param_groups(self, value): + self.optimizer.param_groups = value + + param_groups = property(_get_param_groups, _set_param_groups) diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/fp16_utils/fp16util.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/fp16_utils/fp16util.py new file mode 100644 index 0000000..6010aa1 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/fp16_utils/fp16util.py @@ -0,0 +1,188 @@ +# ------------------------------------------------------------------------------ +# Based on: +# apex +# Copyright (c) NVIDIA +# Licence under The BSD 3-Clause "New" or "Revised" License +# https://github.com/NVIDIA/apex +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +# following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +# disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +# products derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Modified by Bowen Cheng +# ------------------------------------------------------------------------------ + +import torch +import torch.nn as nn +from torch.autograd import Variable +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors + + +class tofp16(nn.Module): + """ + Model wrapper that implements:: + def forward(self, input): + return input.half() + """ + + def __init__(self): + super(tofp16, self).__init__() + + def forward(self, input): + return input.half() + + +class tofp32(nn.Module): + """ + Model wrapper that implements:: + def forward(self, input): + return input.half() + """ + + def __init__(self): + super(tofp32, self).__init__() + + def forward(self, input): + if isinstance(input, list): + return list(map(lambda x: x.float(), input)) + else: + return input.float() + + +def BN_convert_float(module): + ''' + Designed to work with network_to_half. + BatchNorm layers need parameters in single precision. + Find all layers and convert them back to float. This can't + be done with built in .apply as that function will apply + fn to all modules, parameters, and buffers. Thus we wouldn't + be able to guard the float conversion based on the module type. + ''' + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + module.float() + for child in module.children(): + BN_convert_float(child) + return module + + +def network_to_half(network): + """ + Convert model to half precision in a batchnorm-safe way. + """ + return nn.Sequential(tofp16(), BN_convert_float(network.half()), tofp32()) + + +def backwards_debug_hook(grad): + raise RuntimeError("master_params recieved a gradient in the backward pass!") + + +def prep_param_lists(model, flat_master=False): + """ + Creates a list of FP32 master parameters for a given model, as in + `Training Neural Networks with Mixed Precision: Real Examples`_. + Args: + model (torch.nn.Module): Existing Pytorch model + flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization. + Returns: + A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element. + Example:: + model_params, master_params = prep_param_lists(model) + .. warning:: + Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`. + .. _`Training Neural Networks with Mixed Precision: Real Examples`: + http://on-demand.gputechconf.com/gtc/2018/video/S81012/ + """ + model_params = [param for param in model.parameters() if param.requires_grad] + + if flat_master: + # Give the user some more useful error messages + try: + # flatten_dense_tensors returns a contiguous flat array. + # http://pytorch.org/docs/master/_modules/torch/_utils.html + master_params = _flatten_dense_tensors([param.data for param in model_params]).float() + except: + print("Error in prep_param_lists: model may contain a mixture of parameters " + "of different types. Use flat_master=False, or use F16_Optimizer.") + raise + master_params = torch.nn.Parameter(master_params) + master_params.requires_grad = True + # master_params.register_hook(backwards_debug_hook) + if master_params.grad is None: + master_params.grad = master_params.new(*master_params.size()) + return model_params, [master_params] + else: + master_params = [param.clone().float().detach() for param in model_params] + for param in master_params: + param.requires_grad = True + return model_params, master_params + + +def model_grads_to_master_grads(model_params, master_params, flat_master=False): + """ + Copy model gradients to master gradients. + Args: + model_params: List of model parameters created by :func:`prep_param_lists`. + master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`. + """ + if flat_master: + # The flattening may incur one more deep copy than is necessary. + master_params[0].grad.data.copy_( + _flatten_dense_tensors([p.grad.data for p in model_params])) + else: + for model, master in zip(model_params, master_params): + if model.grad is not None: + if master.grad is None: + master.grad = Variable(master.data.new(*master.data.size())) + master.grad.data.copy_(model.grad.data) + else: + master.grad = None + + +def master_params_to_model_params(model_params, master_params, flat_master=False): + """ + Copy master parameters to model parameters. + Args: + model_params: List of model parameters created by :func:`prep_param_lists`. + master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`. + """ + if flat_master: + for model, master in zip(model_params, + _unflatten_dense_tensors(master_params[0].data, model_params)): + model.data.copy_(master) + else: + for model, master in zip(model_params, master_params): + model.data.copy_(master.data) + + +# Backward compatibility fixes +def to_python_float(t): + if hasattr(t, 'item'): + return t.item() + else: + return t[0] + +TORCH_MAJOR = int(torch.__version__.split('.')[0]) +TORCH_MINOR = int(torch.__version__.split('.')[1]) +if TORCH_MAJOR == 0 and TORCH_MINOR <= 4: + clip_grad_norm = torch.nn.utils.clip_grad_norm +else: + clip_grad_norm = torch.nn.utils.clip_grad_norm_ diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/fp16_utils/loss_scaler.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/fp16_utils/loss_scaler.py new file mode 100644 index 0000000..e512af6 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/fp16_utils/loss_scaler.py @@ -0,0 +1,212 @@ +# ------------------------------------------------------------------------------ +# Based on: +# apex +# Copyright (c) NVIDIA +# Licence under The BSD 3-Clause "New" or "Revised" License +# https://github.com/NVIDIA/apex +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +# following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +# disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +# products derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Modified by Bowen Cheng +# ------------------------------------------------------------------------------ + +import torch + + +# item() is a recent addition, so this helps with backward compatibility. +def to_python_float(t): + if hasattr(t, 'item'): + return t.item() + else: + return t[0] + + +class LossScaler: + """ + Class that manages a static loss scale. This class is intended to interact with + :class:`FP16_Optimizer`, and should not be directly manipulated by the user. + Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to + :class:`FP16_Optimizer`'s constructor. + Args: + scale (float, optional, default=1.0): The loss scale. + """ + + def __init__(self, scale=1): + self.cur_scale = scale + + # `params` is a list / generator of torch.Variable + def has_overflow(self, params): + return False + + # `x` is a torch.Tensor + def _has_inf_or_nan(x): + return False + + def update_scale(self, overflow): + pass + + @property + def loss_scale(self): + return self.cur_scale + + def scale_gradient(self, module, grad_in, grad_out): + return tuple(self.loss_scale * g for g in grad_in) + + def backward(self, loss): + scaled_loss = loss * self.loss_scale + scaled_loss.backward() + + +class DynamicLossScaler: + """ + Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler` + indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of + :class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler` + operates, because the default options can be changed using the + the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor. + Loss scaling is designed to combat the problem of underflowing gradients encountered at long + times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss + scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are + encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has + occurred. + :class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch, + and :class:`DynamicLossScaler` adjusts the loss scale to a lower value. + If a certain number of iterations occur without overflowing gradients detected, + :class:`DynamicLossScaler` increases the loss scale once more. + In this way :class:`DynamicLossScaler` attempts to "ride the edge" of + always using the highest loss scale possible without incurring overflow. + Args: + init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.` + scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``. + scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale. + """ + + def __init__(self, + init_scale=2 ** 32, + scale_factor=2., + scale_window=1000): + self.cur_scale = init_scale + self.cur_iter = 0 + self.last_overflow_iter = -1 + self.scale_factor = scale_factor + self.scale_window = scale_window + + # `params` is a list / generator of torch.Variable + def has_overflow(self, params): + for p in params: + # if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data): + # return True + if p.grad is not None and self._has_inf_or_nan(p.grad.data): + return True + + return False + + # `x` is a torch.Tensor + # def _has_inf_or_nan(x): + def _has_inf_or_nan(self, x): + try: + # if x is half, the .float() incurs an additional deep copy, but it's necessary if + # Pytorch's .sum() creates a one-element tensor of the same type as x + # (which is true for some recent version of pytorch). + cpu_sum = float(x.float().sum()) + # More efficient version that can be used if .sum() returns a Python scalar + # cpu_sum = float(x.sum()) + except RuntimeError as instance: + # We want to check if inst is actually an overflow exception. + # RuntimeError could come from a different error. + # If so, we still want the exception to propagate. + if "value cannot be converted" not in instance.args[0]: + raise + return True + else: + if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: + return True + return False + + # `overflow` is boolean indicating whether the gradient overflowed + def update_scale(self, overflow): + if overflow: + # self.cur_scale /= self.scale_factor + self.cur_scale = max(self.cur_scale / self.scale_factor, 1) + self.last_overflow_iter = self.cur_iter + else: + if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0: + self.cur_scale *= self.scale_factor + self.cur_iter += 1 + + @property + def loss_scale(self): + return self.cur_scale + + def scale_gradient(self, module, grad_in, grad_out): + return tuple(self.loss_scale * g for g in grad_in) + + def backward(self, loss): + scaled_loss = loss * self.loss_scale + scaled_loss.backward() + + +############################################################## +# Example usage below here -- assuming it's in a separate file +############################################################## +""" +TO-DO separate out into an example. +if __name__ == "__main__": + import torch + from torch.autograd import Variable + from dynamic_loss_scaler import DynamicLossScaler + # N is batch size; D_in is input dimension; + # H is hidden dimension; D_out is output dimension. + N, D_in, H, D_out = 64, 1000, 100, 10 + # Create random Tensors to hold inputs and outputs, and wrap them in Variables. + x = Variable(torch.randn(N, D_in), requires_grad=False) + y = Variable(torch.randn(N, D_out), requires_grad=False) + w1 = Variable(torch.randn(D_in, H), requires_grad=True) + w2 = Variable(torch.randn(H, D_out), requires_grad=True) + parameters = [w1, w2] + learning_rate = 1e-6 + optimizer = torch.optim.SGD(parameters, lr=learning_rate) + loss_scaler = DynamicLossScaler() + for t in range(500): + y_pred = x.mm(w1).clamp(min=0).mm(w2) + loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale + print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale)) + print('Iter {} scaled loss: {}'.format(t, loss.data[0])) + print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale)) + # Run backprop + optimizer.zero_grad() + loss.backward() + # Check for overflow + has_overflow = DynamicLossScaler.has_overflow(parameters) + # If no overflow, unscale grad and update as usual + if not has_overflow: + for param in parameters: + param.grad.data.mul_(1. / loss_scaler.loss_scale) + optimizer.step() + # Otherwise, don't do anything -- ie, skip iteration + else: + print('OVERFLOW!') + # Update loss scale for next iteration + loss_scaler.update_scale(has_overflow) +""" diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/models/__init__.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/models/__init__.py new file mode 100644 index 0000000..11df676 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/models/__init__.py @@ -0,0 +1,11 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (Bin.Xiao@microsoft.com) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import models.pose_higher_hrnet diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/models/pose_higher_hrnet.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/models/pose_higher_hrnet.py new file mode 100644 index 0000000..cd6255a --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/models/pose_higher_hrnet.py @@ -0,0 +1,570 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import logging + +import torch +import torch.nn as nn + + +BN_MOMENTUM = 0.1 +logger = logging.getLogger(__name__) + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, + bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion, + momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class HighResolutionModule(nn.Module): + def __init__(self, num_branches, blocks, num_blocks, num_inchannels, + num_channels, fuse_method, multi_scale_output=True): + super(HighResolutionModule, self).__init__() + self._check_branches( + num_branches, blocks, num_blocks, num_inchannels, num_channels) + + self.num_inchannels = num_inchannels + self.fuse_method = fuse_method + self.num_branches = num_branches + + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches( + num_branches, blocks, num_blocks, num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(True) + + def _check_branches(self, num_branches, blocks, num_blocks, + num_inchannels, num_channels): + if num_branches != len(num_blocks): + error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format( + num_branches, len(num_blocks)) + logger.error(error_msg) + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format( + num_branches, len(num_channels)) + logger.error(error_msg) + raise ValueError(error_msg) + + if num_branches != len(num_inchannels): + error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format( + num_branches, len(num_inchannels)) + logger.error(error_msg) + raise ValueError(error_msg) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, + stride=1): + downsample = None + if stride != 1 or \ + self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.num_inchannels[branch_index], + num_channels[branch_index] * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(num_channels[branch_index] * block.expansion, + momentum=BN_MOMENTUM), + ) + + layers = [] + layers.append(block(self.num_inchannels[branch_index], + num_channels[branch_index], stride, downsample)) + self.num_inchannels[branch_index] = \ + num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], + num_channels[branch_index])) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], + num_inchannels[i], + 1, + 1, + 0, + bias=False), + nn.BatchNorm2d(num_inchannels[i]), + nn.Upsample(scale_factor=2**(j-i), mode='nearest'))) + elif j == i: + fuse_layer.append(None) + else: + conv3x3s = [] + for k in range(i-j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], + num_outchannels_conv3x3, + 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3))) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], + num_outchannels_conv3x3, + 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3), + nn.ReLU(True))) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + + for i in range(len(self.fuse_layers)): + y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + else: + y = y + self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + + return x_fuse + + +blocks_dict = { + 'BASIC': BasicBlock, + 'BOTTLENECK': Bottleneck +} + + +class PoseHigherResolutionNet(nn.Module): + + def __init__(self, cfg, **kwargs): + self.inplanes = 64 + extra = cfg.MODEL.EXTRA + super(PoseHigherResolutionNet, self).__init__() + + # stem net + self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, + bias=False) + self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) + self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.layer1 = self._make_layer(Bottleneck, 64, 4) + + self.stage2_cfg = cfg['MODEL']['EXTRA']['STAGE2'] + num_channels = self.stage2_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage2_cfg['BLOCK']] + num_channels = [ + num_channels[i] * block.expansion for i in range(len(num_channels)) + ] + self.transition1 = self._make_transition_layer([256], num_channels) + self.stage2, pre_stage_channels = self._make_stage( + self.stage2_cfg, num_channels) + + self.stage3_cfg = cfg['MODEL']['EXTRA']['STAGE3'] + num_channels = self.stage3_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage3_cfg['BLOCK']] + num_channels = [ + num_channels[i] * block.expansion for i in range(len(num_channels)) + ] + self.transition2 = self._make_transition_layer( + pre_stage_channels, num_channels) + self.stage3, pre_stage_channels = self._make_stage( + self.stage3_cfg, num_channels) + + self.stage4_cfg = cfg['MODEL']['EXTRA']['STAGE4'] + num_channels = self.stage4_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage4_cfg['BLOCK']] + num_channels = [ + num_channels[i] * block.expansion for i in range(len(num_channels)) + ] + self.transition3 = self._make_transition_layer( + pre_stage_channels, num_channels) + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, num_channels, multi_scale_output=False) + + self.final_layers = self._make_final_layers(cfg, pre_stage_channels[0]) + self.deconv_layers = self._make_deconv_layers( + cfg, pre_stage_channels[0]) + + self.num_deconvs = extra.DECONV.NUM_DECONVS + self.deconv_config = cfg.MODEL.EXTRA.DECONV + self.loss_config = cfg.LOSS + + self.pretrained_layers = cfg['MODEL']['EXTRA']['PRETRAINED_LAYERS'] + + def _make_final_layers(self, cfg, input_channels): + dim_tag = cfg.MODEL.NUM_JOINTS if cfg.MODEL.TAG_PER_JOINT else 1 + extra = cfg.MODEL.EXTRA + + final_layers = [] + output_channels = cfg.MODEL.NUM_JOINTS + dim_tag \ + if cfg.LOSS.WITH_AE_LOSS[0] else cfg.MODEL.NUM_JOINTS + final_layers.append(nn.Conv2d( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=extra.FINAL_CONV_KERNEL, + stride=1, + padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0 + )) + + deconv_cfg = extra.DECONV + for i in range(deconv_cfg.NUM_DECONVS): + input_channels = deconv_cfg.NUM_CHANNELS[i] + output_channels = cfg.MODEL.NUM_JOINTS + dim_tag \ + if cfg.LOSS.WITH_AE_LOSS[i+1] else cfg.MODEL.NUM_JOINTS + final_layers.append(nn.Conv2d( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=extra.FINAL_CONV_KERNEL, + stride=1, + padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0 + )) + + return nn.ModuleList(final_layers) + + def _make_deconv_layers(self, cfg, input_channels): + dim_tag = cfg.MODEL.NUM_JOINTS if cfg.MODEL.TAG_PER_JOINT else 1 + extra = cfg.MODEL.EXTRA + deconv_cfg = extra.DECONV + + deconv_layers = [] + for i in range(deconv_cfg.NUM_DECONVS): + if deconv_cfg.CAT_OUTPUT[i]: + final_output_channels = cfg.MODEL.NUM_JOINTS + dim_tag \ + if cfg.LOSS.WITH_AE_LOSS[i] else cfg.MODEL.NUM_JOINTS + input_channels += final_output_channels + output_channels = deconv_cfg.NUM_CHANNELS[i] + deconv_kernel, padding, output_padding = \ + self._get_deconv_cfg(deconv_cfg.KERNEL_SIZE[i]) + + layers = [] + layers.append(nn.Sequential( + nn.ConvTranspose2d( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=deconv_kernel, + stride=2, + padding=padding, + output_padding=output_padding, + bias=False), + nn.BatchNorm2d(output_channels, momentum=BN_MOMENTUM), + nn.ReLU(inplace=True) + )) + for _ in range(cfg.MODEL.EXTRA.DECONV.NUM_BASIC_BLOCKS): + layers.append(nn.Sequential( + BasicBlock(output_channels, output_channels), + )) + deconv_layers.append(nn.Sequential(*layers)) + input_channels = output_channels + + return nn.ModuleList(deconv_layers) + + def _get_deconv_cfg(self, deconv_kernel): + if deconv_kernel == 4: + padding = 1 + output_padding = 0 + elif deconv_kernel == 3: + padding = 1 + output_padding = 1 + elif deconv_kernel == 2: + padding = 0 + output_padding = 0 + + return deconv_kernel, padding, output_padding + + def _make_transition_layer( + self, num_channels_pre_layer, num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], + num_channels_cur_layer[i], + 3, + 1, + 1, + bias=False), + nn.BatchNorm2d(num_channels_cur_layer[i]), + nn.ReLU(inplace=True))) + else: + transition_layers.append(None) + else: + conv3x3s = [] + for j in range(i+1-num_branches_pre): + inchannels = num_channels_pre_layer[-1] + outchannels = num_channels_cur_layer[i] \ + if j == i-num_branches_pre else inchannels + conv3x3s.append(nn.Sequential( + nn.Conv2d( + inchannels, outchannels, 3, 2, 1, bias=False), + nn.BatchNorm2d(outchannels), + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, num_inchannels, + multi_scale_output=True): + num_modules = layer_config['NUM_MODULES'] + num_branches = layer_config['NUM_BRANCHES'] + num_blocks = layer_config['NUM_BLOCKS'] + num_channels = layer_config['NUM_CHANNELS'] + block = blocks_dict[layer_config['BLOCK']] + fuse_method = layer_config['FUSE_METHOD'] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + if not multi_scale_output and i == num_modules - 1: + reset_multi_scale_output = False + else: + reset_multi_scale_output = True + + modules.append( + HighResolutionModule( + num_branches, + block, + num_blocks, + num_inchannels, + num_channels, + fuse_method, + reset_multi_scale_output) + ) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['NUM_BRANCHES']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['NUM_BRANCHES']): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['NUM_BRANCHES']): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + final_outputs = [] + x = y_list[0] + y = self.final_layers[0](x) + final_outputs.append(y) + + for i in range(self.num_deconvs): + if self.deconv_config.CAT_OUTPUT[i]: + x = torch.cat((x, y), 1) + + x = self.deconv_layers[i](x) + y = self.final_layers[i+1](x) + final_outputs.append(y) + + return final_outputs + + def init_weights(self, pretrained='', verbose=True): + logger.info('=> init weights from normal distribution') + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.normal_(m.weight, std=0.001) + for name, _ in m.named_parameters(): + if name in ['bias']: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + nn.init.normal_(m.weight, std=0.001) + for name, _ in m.named_parameters(): + if name in ['bias']: + nn.init.constant_(m.bias, 0) + + parameters_names = set() + for name, _ in self.named_parameters(): + parameters_names.add(name) + + buffers_names = set() + for name, _ in self.named_buffers(): + buffers_names.add(name) + + if os.path.isfile(pretrained): + pretrained_state_dict = torch.load(pretrained) + logger.info('=> loading pretrained model {}'.format(pretrained)) + + need_init_state_dict = {} + for name, m in pretrained_state_dict.items(): + if name.split('.')[0] in self.pretrained_layers \ + or self.pretrained_layers[0] is '*': + if name in parameters_names or name in buffers_names: + if verbose: + logger.info( + '=> init {} from {}'.format(name, pretrained) + ) + need_init_state_dict[name] = m + self.load_state_dict(need_init_state_dict, strict=False) + + +def get_pose_net(cfg, is_train, **kwargs): + model = PoseHigherResolutionNet(cfg, **kwargs) + + if is_train and cfg.MODEL.INIT_WEIGHTS: + model.init_weights(cfg.MODEL.PRETRAINED, verbose=cfg.VERBOSE) + + return model diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/transforms.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/transforms.py new file mode 100644 index 0000000..8f366d9 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/transforms.py @@ -0,0 +1,202 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import cv2 + + +def flip_back(output_flipped, matched_parts): + ''' + ouput_flipped: numpy.ndarray(batch_size, num_joints, height, width) + ''' + assert output_flipped.ndim == 4,\ + 'output_flipped should be [batch_size, num_joints, height, width]' + + output_flipped = output_flipped[:, :, :, ::-1] + + for pair in matched_parts: + tmp = output_flipped[:, pair[0], :, :].copy() + output_flipped[:, pair[0], :, :] = output_flipped[:, pair[1], :, :] + output_flipped[:, pair[1], :, :] = tmp + + return output_flipped + + +def fliplr_joints(joints, joints_vis, width, matched_parts): + """ + flip coords + """ + # Flip horizontal + joints[:, 0] = width - joints[:, 0] - 1 + + # Change left-right parts + for pair in matched_parts: + joints[pair[0], :], joints[pair[1], :] = \ + joints[pair[1], :], joints[pair[0], :].copy() + joints_vis[pair[0], :], joints_vis[pair[1], :] = \ + joints_vis[pair[1], :], joints_vis[pair[0], :].copy() + + return joints*joints_vis, joints_vis + + +def transform_preds(coords, center, scale, output_size): + # target_coords = np.zeros(coords.shape) + target_coords = coords.copy() + trans = get_affine_transform(center, scale, 0, output_size, inv=1) + for p in range(coords.shape[0]): + target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans) + return target_coords + + +def get_affine_transform(center, + scale, + rot, + output_size, + shift=np.array([0, 0], dtype=np.float32), + inv=0): + if not isinstance(scale, np.ndarray) and not isinstance(scale, list): + print(scale) + scale = np.array([scale, scale]) + + scale_tmp = scale * 200.0 + src_w = scale_tmp[0] + dst_w = output_size[0] + dst_h = output_size[1] + + rot_rad = np.pi * rot / 180 + src_dir = get_dir([0, src_w * -0.5], rot_rad) + dst_dir = np.array([0, dst_w * -0.5], np.float32) + + src = np.zeros((3, 2), dtype=np.float32) + dst = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + scale_tmp * shift + src[1, :] = center + src_dir + scale_tmp * shift + dst[0, :] = [dst_w * 0.5, dst_h * 0.5] + dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir + + src[2:, :] = get_3rd_point(src[0, :], src[1, :]) + dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) + + if inv: + trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + + return trans + + +def affine_transform(pt, t): + new_pt = np.array([pt[0], pt[1], 1.]).T + new_pt = np.dot(t, new_pt) + return new_pt[:2] + + +def get_3rd_point(a, b): + direct = a - b + return b + np.array([-direct[1], direct[0]], dtype=np.float32) + + +def get_dir(src_point, rot_rad): + sn, cs = np.sin(rot_rad), np.cos(rot_rad) + + src_result = [0, 0] + src_result[0] = src_point[0] * cs - src_point[1] * sn + src_result[1] = src_point[0] * sn + src_point[1] * cs + + return src_result + + +def crop(img, center, scale, output_size, rot=0): + trans = get_affine_transform(center, scale, rot, output_size) + + dst_img = cv2.warpAffine(img, + trans, + (int(output_size[0]), int(output_size[1])), + flags=cv2.INTER_LINEAR) + + return dst_img + + +def resize(image, input_size): + h, w, _ = image.shape + + center = np.array([int(w/2.0+0.5), int(h/2.0+0.5)]) + if w < h: + w_resized = input_size + h_resized = int((input_size / w * h + 63) // 64 * 64) + scale_w = w / 200.0 + scale_h = h_resized / w_resized * w / 200.0 + else: + h_resized = input_size + w_resized = int((input_size / h * w + 63) // 64 * 64) + scale_h = h / 200.0 + scale_w = w_resized / h_resized * h / 200.0 + + scale = np.array([scale_w, scale_h]) + trans = get_affine_transform(center, scale, 0, (w_resized, h_resized)) + + image_resized = cv2.warpAffine( + image, + trans, + (int(w_resized), int(h_resized)) + ) + + return image_resized, center, scale + + +def get_multi_scale_size(image, input_size, current_scale, min_scale): + h, w, _ = image.shape + center = np.array([int(w / 2.0 + 0.5), int(h / 2.0 + 0.5)]) + + # calculate the size for min_scale + min_input_size = int((min_scale * input_size + 63)//64 * 64) + if w < h: + w_resized = int(min_input_size * current_scale / min_scale) + h_resized = int( + int((min_input_size/w*h+63)//64*64)*current_scale/min_scale + ) + scale_w = w / 200.0 + scale_h = h_resized / w_resized * w / 200.0 + else: + h_resized = int(min_input_size * current_scale / min_scale) + w_resized = int( + int((min_input_size/h*w+63)//64*64)*current_scale/min_scale + ) + scale_h = h / 200.0 + scale_w = w_resized / h_resized * h / 200.0 + + return (w_resized, h_resized), center, np.array([scale_w, scale_h]) + + +def resize_align_multi_scale(image, input_size, current_scale, min_scale): + size_resized, center, scale = get_multi_scale_size( + image, input_size, current_scale, min_scale + ) + trans = get_affine_transform(center, scale, 0, size_resized) + + image_resized = cv2.warpAffine( + image, + trans, + size_resized + # (int(w_resized), int(h_resized)) + ) + + return image_resized, center, scale + + +def get_final_preds(grouped_joints, center, scale, heatmap_size): + final_results = [] + for person in grouped_joints[0]: + joints = np.zeros((person.shape[0], 3)) + joints = transform_preds(person, center, scale, heatmap_size) + final_results.append(joints) + + return final_results diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/utils.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/utils.py new file mode 100644 index 0000000..197ded5 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/utils.py @@ -0,0 +1,238 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import logging +import time +from collections import namedtuple +from pathlib import Path + +import torch +import torch.optim as optim +import torch.nn as nn + + +def setup_logger(final_output_dir, rank, phase): + time_str = time.strftime('%Y-%m-%d-%H-%M') + log_file = '{}_{}_rank{}.log'.format(phase, time_str, rank) + final_log_file = os.path.join(final_output_dir, log_file) + head = '%(asctime)-15s %(message)s' + # logging.basicConfig(format=head) + logging.basicConfig(filename=str(final_log_file), + format=head) + logger = logging.getLogger() + logger.setLevel(logging.INFO) + console = logging.StreamHandler() + logging.getLogger('').addHandler(console) + + return logger, time_str + + +def create_logger(cfg, cfg_name, phase='train'): + root_output_dir = Path(cfg.OUTPUT_DIR) + # set up logger + if not root_output_dir.exists() and cfg.RANK == 0: + print('=> creating {}'.format(root_output_dir)) + root_output_dir.mkdir() + else: + while not root_output_dir.exists(): + print('=> wait for {} created'.format(root_output_dir)) + time.sleep(30) + + dataset = cfg.DATASET.DATASET + dataset = dataset.replace(':', '_') + model = cfg.MODEL.NAME + cfg_name = os.path.basename(cfg_name).split('.')[0] + + final_output_dir = root_output_dir / dataset / model / cfg_name + + if cfg.RANK == 0: + print('=> creating {}'.format(final_output_dir)) + final_output_dir.mkdir(parents=True, exist_ok=True) + else: + while not final_output_dir.exists(): + print('=> wait for {} created'.format(final_output_dir)) + time.sleep(5) + + logger, time_str = setup_logger(final_output_dir, cfg.RANK, phase) + + tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / \ + (cfg_name + '_' + time_str) + + print('=> creating {}'.format(tensorboard_log_dir)) + tensorboard_log_dir.mkdir(parents=True, exist_ok=True) + + return logger, str(final_output_dir), str(tensorboard_log_dir) + + +def get_optimizer(cfg, model): + optimizer = None + if cfg.TRAIN.OPTIMIZER == 'sgd': + optimizer = optim.SGD( + model.parameters(), + lr=cfg.TRAIN.LR, + momentum=cfg.TRAIN.MOMENTUM, + weight_decay=cfg.TRAIN.WD, + nesterov=cfg.TRAIN.NESTEROV + ) + elif cfg.TRAIN.OPTIMIZER == 'adam': + optimizer = optim.Adam( + model.parameters(), + lr=cfg.TRAIN.LR + ) + + return optimizer + + +def save_checkpoint(states, is_best, output_dir, + filename='checkpoint.pth.tar'): + torch.save(states, os.path.join(output_dir, filename)) + + if is_best and 'state_dict' in states: + torch.save( + states['best_state_dict'], + os.path.join(output_dir, 'model_best.pth.tar') + ) + + +def get_model_summary(model, *input_tensors, item_length=26, verbose=True): + """ + :param model: + :param input_tensors: + :param item_length: + :return: + """ + + summary = [] + + ModuleDetails = namedtuple( + "Layer", ["name", "input_size", "output_size", "num_parameters", "multiply_adds"]) + hooks = [] + layer_instances = {} + + def add_hooks(module): + + def hook(module, input, output): + class_name = str(module.__class__.__name__) + + instance_index = 1 + if class_name not in layer_instances: + layer_instances[class_name] = instance_index + else: + instance_index = layer_instances[class_name] + 1 + layer_instances[class_name] = instance_index + + layer_name = class_name + "_" + str(instance_index) + + params = 0 + + if class_name.find("Conv") != -1 or class_name.find("BatchNorm") != -1 or \ + class_name.find("Linear") != -1: + for param_ in module.parameters(): + params += param_.view(-1).size(0) + + flops = "Not Available" + if class_name.find("Conv") != -1 and hasattr(module, "weight"): + flops = ( + torch.prod( + torch.LongTensor(list(module.weight.data.size()))) * + torch.prod( + torch.LongTensor(list(output.size())[2:]))).item() + elif isinstance(module, nn.Linear): + flops = (torch.prod(torch.LongTensor(list(output.size()))) \ + * input[0].size(1)).item() + + if isinstance(input[0], list): + input = input[0] + if isinstance(output, list): + output = output[0] + + summary.append( + ModuleDetails( + name=layer_name, + input_size=list(input[0].size()), + output_size=list(output.size()), + num_parameters=params, + multiply_adds=flops) + ) + + if not isinstance(module, nn.ModuleList) \ + and not isinstance(module, nn.Sequential) \ + and module != model: + hooks.append(module.register_forward_hook(hook)) + + model.eval() + model.apply(add_hooks) + + space_len = item_length + + model(*input_tensors) + for hook in hooks: + hook.remove() + + details = '' + if verbose: + details = "Model Summary" + \ + os.linesep + \ + "Name{}Input Size{}Output Size{}Parameters{}Multiply Adds (Flops){}".format( + ' ' * (space_len - len("Name")), + ' ' * (space_len - len("Input Size")), + ' ' * (space_len - len("Output Size")), + ' ' * (space_len - len("Parameters")), + ' ' * (space_len - len("Multiply Adds (Flops)"))) \ + + os.linesep + '-' * space_len * 5 + os.linesep + params_sum = 0 + flops_sum = 0 + for layer in summary: + params_sum += layer.num_parameters + if layer.multiply_adds != "Not Available": + flops_sum += layer.multiply_adds + if verbose: + details += "{}{}{}{}{}{}{}{}{}{}".format( + layer.name, + ' ' * (space_len - len(layer.name)), + layer.input_size, + ' ' * (space_len - len(str(layer.input_size))), + layer.output_size, + ' ' * (space_len - len(str(layer.output_size))), + layer.num_parameters, + ' ' * (space_len - len(str(layer.num_parameters))), + layer.multiply_adds, + ' ' * (space_len - len(str(layer.multiply_adds)))) \ + + os.linesep + '-' * space_len * 5 + os.linesep + + details += os.linesep \ + + "Total Parameters: {:,}".format(params_sum) \ + + os.linesep + '-' * space_len * 5 + os.linesep + details += "Total Multiply Adds (For Convolution and Linear Layers only): {:,}".format(flops_sum) \ + + os.linesep + '-' * space_len * 5 + os.linesep + details += "Number of Layers" + os.linesep + for layer in layer_instances: + details += "{} : {} layers ".format(layer, layer_instances[layer]) + + return details + + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count if self.count != 0 else 0 diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/vis.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/vis.py new file mode 100644 index 0000000..69a1f77 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/vis.py @@ -0,0 +1,238 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import cv2 +import numpy as np +import torchvision + +from dataset import VIS_CONFIG + + +def add_joints(image, joints, color, dataset='COCO'): + part_idx = VIS_CONFIG[dataset]['part_idx'] + part_orders = VIS_CONFIG[dataset]['part_orders'] + + def link(a, b, color): + if part_idx[a] < joints.shape[0] and part_idx[b] < joints.shape[0]: + jointa = joints[part_idx[a]] + jointb = joints[part_idx[b]] + if jointa[2] > 0 and jointb[2] > 0: + cv2.line( + image, + (int(jointa[0]), int(jointa[1])), + (int(jointb[0]), int(jointb[1])), + color, + 2 + ) + + # add joints + for joint in joints: + if joint[2] > 0: + cv2.circle(image, (int(joint[0]), int(joint[1])), 1, color, 2) + + # add link + for pair in part_orders: + link(pair[0], pair[1], color) + + return image + + +def save_valid_image(image, joints, file_name, dataset='COCO'): + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + + for person in joints: + color = np.random.randint(0, 255, size=3) + color = [int(i) for i in color] + add_joints(image, person, color, dataset=dataset) + + cv2.imwrite(file_name, image) + + +def make_heatmaps(image, heatmaps): + heatmaps = heatmaps.mul(255)\ + .clamp(0, 255)\ + .byte()\ + .cpu().numpy() + + num_joints, height, width = heatmaps.shape + image_resized = cv2.resize(image, (int(width), int(height))) + + image_grid = np.zeros((height, (num_joints+1)*width, 3), dtype=np.uint8) + + for j in range(num_joints): + # add_joints(image_resized, joints[:, j, :]) + heatmap = heatmaps[j, :, :] + colored_heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET) + image_fused = colored_heatmap*0.7 + image_resized*0.3 + + width_begin = width * (j+1) + width_end = width * (j+2) + image_grid[:, width_begin:width_end, :] = image_fused + + image_grid[:, 0:width, :] = image_resized + + return image_grid + + +def make_tagmaps(image, tagmaps): + num_joints, height, width = tagmaps.shape + image_resized = cv2.resize(image, (int(width), int(height))) + + image_grid = np.zeros((height, (num_joints+1)*width, 3), dtype=np.uint8) + + for j in range(num_joints): + tagmap = tagmaps[j, :, :] + min = float(tagmap.min()) + max = float(tagmap.max()) + tagmap = tagmap.add(-min)\ + .div(max - min + 1e-5)\ + .mul(255)\ + .clamp(0, 255)\ + .byte()\ + .cpu()\ + .numpy() + + colored_tagmap = cv2.applyColorMap(tagmap, cv2.COLORMAP_JET) + image_fused = colored_tagmap*0.9 + image_resized*0.1 + + width_begin = width * (j+1) + width_end = width * (j+2) + image_grid[:, width_begin:width_end, :] = image_fused + + image_grid[:, 0:width, :] = image_resized + + return image_grid + + +def save_batch_image_with_joints(batch_image, batch_joints, batch_joints_vis, + file_name, nrow=8, padding=2): + ''' + batch_image: [batch_size, channel, height, width] + batch_joints: [batch_size, num_joints, 3], + batch_joints_vis: [batch_size, num_joints, 1], + } + ''' + grid = torchvision.utils.make_grid(batch_image, nrow, padding, True) + ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy() + ndarr = cv2.cvtColor(ndarr, cv2.COLOR_RGB2BGR) + + nmaps = batch_image.size(0) + xmaps = min(nrow, nmaps) + ymaps = int(math.ceil(float(nmaps) / xmaps)) + height = int(batch_image.size(2) + padding) + width = int(batch_image.size(3) + padding) + k = 0 + for y in range(ymaps): + for x in range(xmaps): + if k >= nmaps: + break + joints = batch_joints[k] + joints_vis = batch_joints_vis[k] + + for joint, joint_vis in zip(joints, joints_vis): + joint[0] = x * width + padding + joint[0] + joint[1] = y * height + padding + joint[1] + if joint_vis[0]: + cv2.circle( + ndarr, + (int(joint[0]), int(joint[1])), + 2, + [255, 0, 0], + 2 + ) + k = k + 1 + cv2.imwrite(file_name, ndarr) + + +def save_batch_maps( + batch_image, + batch_maps, + batch_mask, + file_name, + map_type='heatmap', + normalize=True +): + if normalize: + batch_image = batch_image.clone() + min = float(batch_image.min()) + max = float(batch_image.max()) + + batch_image.add_(-min).div_(max - min + 1e-5) + + batch_size = batch_maps.size(0) + num_joints = batch_maps.size(1) + map_height = batch_maps.size(2) + map_width = batch_maps.size(3) + + grid_image = np.zeros( + (batch_size*map_height, (num_joints+1)*map_width, 3), + dtype=np.uint8 + ) + + for i in range(batch_size): + image = batch_image[i].mul(255)\ + .clamp(0, 255)\ + .byte()\ + .permute(1, 2, 0)\ + .cpu().numpy() + + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + maps = batch_maps[i] + + if map_type == 'heatmap': + image_with_hms = make_heatmaps(image, maps) + elif map_type == 'tagmap': + image_with_hms = make_tagmaps(image, maps) + + height_begin = map_height * i + height_end = map_height * (i + 1) + + grid_image[height_begin:height_end, :, :] = image_with_hms + if batch_mask is not None: + mask = np.expand_dims(batch_mask[i].byte().cpu().numpy(), -1) + grid_image[height_begin:height_end, :map_width, :] = \ + grid_image[height_begin:height_end, :map_width, :] * mask + + cv2.imwrite(file_name, grid_image) + + +def save_debug_images( + config, + batch_images, + batch_heatmaps, + batch_masks, + batch_outputs, + prefix +): + if not config.DEBUG.DEBUG: + return + + num_joints = config.DATASET.NUM_JOINTS + batch_pred_heatmaps = batch_outputs[:, :num_joints, :, :] + batch_pred_tagmaps = batch_outputs[:, num_joints:, :, :] + + if config.DEBUG.SAVE_HEATMAPS_GT and batch_heatmaps is not None: + file_name = '{}_hm_gt.jpg'.format(prefix) + save_batch_maps( + batch_images, batch_heatmaps, batch_masks, file_name, 'heatmap' + ) + if config.DEBUG.SAVE_HEATMAPS_PRED: + file_name = '{}_hm_pred.jpg'.format(prefix) + save_batch_maps( + batch_images, batch_pred_heatmaps, batch_masks, file_name, 'heatmap' + ) + if config.DEBUG.SAVE_TAGMAPS_PRED: + file_name = '{}_tag_pred.jpg'.format(prefix) + save_batch_maps( + batch_images, batch_pred_tagmaps, batch_masks, file_name, 'tagmap' + ) diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/zipreader.py b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/zipreader.py new file mode 100644 index 0000000..7be1d68 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/lib/utils/zipreader.py @@ -0,0 +1,70 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile +import xml.etree.ElementTree as ET + +import cv2 +import numpy as np + +_im_zfile = [] +_xml_path_zip = [] +_xml_zfile = [] + + +def imread(filename, flags=cv2.IMREAD_COLOR): + global _im_zfile + path = filename + pos_at = path.index('@') + if pos_at == -1: + print("character '@' is not found from the given path '%s'"%(path)) + assert 0 + path_zip = path[0: pos_at] + path_img = path[pos_at + 1:] + if not os.path.isfile(path_zip): + print("zip file '%s' is not found"%(path_zip)) + assert 0 + for i in range(len(_im_zfile)): + if _im_zfile[i]['path'] == path_zip: + data = _im_zfile[i]['zipfile'].read(path_img) + return cv2.imdecode(np.frombuffer(data, np.uint8), flags) + + _im_zfile.append({ + 'path': path_zip, + 'zipfile': zipfile.ZipFile(path_zip, 'r') + }) + data = _im_zfile[-1]['zipfile'].read(path_img) + + return cv2.imdecode(np.frombuffer(data, np.uint8), flags) + + +def xmlread(filename): + global _xml_path_zip + global _xml_zfile + path = filename + pos_at = path.index('@') + if pos_at == -1: + print("character '@' is not found from the given path '%s'"%(path)) + assert 0 + path_zip = path[0: pos_at] + path_xml = path[pos_at + 2:] + if not os.path.isfile(path_zip): + print("zip file '%s' is not found"%(path_zip)) + assert 0 + for i in xrange(len(_xml_path_zip)): + if _xml_path_zip[i] == path_zip: + data = _xml_zfile[i].open(path_xml) + return ET.fromstring(data.read()) + _xml_path_zip.append(path_zip) + print("read new xml file '%s'"%(path_zip)) + _xml_zfile.append(zipfile.ZipFile(path_zip, 'r')) + data = _xml_zfile[-1].open(path_xml) + return ET.fromstring(data.read()) diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/requirements.txt b/data_processing/HigherHRNet-Human-Pose-Estimation/requirements.txt new file mode 100644 index 0000000..e91f702 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/requirements.txt @@ -0,0 +1,13 @@ +EasyDict==1.7 +opencv-python +Cython +scipy +pandas +pyyaml +json_tricks +scikit-image +tensorboardX +yacs +cffi +munkres +tqdm diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/tools/_init_paths.py b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/_init_paths.py new file mode 100644 index 0000000..e6fd60e --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/_init_paths.py @@ -0,0 +1,23 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os.path as osp +import sys + + +def add_path(path): + if path not in sys.path: + sys.path.insert(0, path) + + +this_dir = osp.dirname(__file__) + +lib_path = osp.join(this_dir, '..', 'lib') +add_path(lib_path) diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/tools/crowdpose_concat_train_val.py b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/crowdpose_concat_train_val.py new file mode 100644 index 0000000..514cf9f --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/crowdpose_concat_train_val.py @@ -0,0 +1,49 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import json +import os + + +def parse_args(): + parser = argparse.ArgumentParser(description='Concat CrowdPose train and val') + + parser.add_argument('--data_dir', + help='data directory containing json annotation file', + default='data/crowd_pose/json', + type=str) + + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + train_dataset = json.load(open(os.path.join(args.data_dir, 'crowdpose_train.json'))) + val_dataset = json.load(open(os.path.join(args.data_dir, 'crowdpose_val.json'))) + + trainval_dataset = {} + trainval_dataset['categories'] = train_dataset['categories'] + trainval_dataset['images'] = [] + trainval_dataset['images'].extend(train_dataset['images']) + trainval_dataset['images'].extend(val_dataset['images']) + trainval_dataset['annotations'] = [] + trainval_dataset['annotations'].extend(train_dataset['annotations']) + trainval_dataset['annotations'].extend(val_dataset['annotations']) + + with open(os.path.join(args.data_dir, 'crowdpose_trainval.json'), 'w') as f: + json.dump(trainval_dataset, f) + + +if __name__ == '__main__': + main() diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/tools/dist_train.py b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/dist_train.py new file mode 100644 index 0000000..6d662aa --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/dist_train.py @@ -0,0 +1,319 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import pprint +import shutil +import warnings + +import torch +import torch.backends.cudnn as cudnn +import torch.distributed as dist +import torch.multiprocessing as mp +import torch.nn as nn +import torch.nn.parallel +import torch.optim +import torch.utils.data +import torch.utils.data.distributed +from tensorboardX import SummaryWriter + +import _init_paths +import models + +from config import cfg +from config import update_config +from core.loss import MultiLossFactory +from core.trainer import do_train +from dataset import make_dataloader +from fp16_utils.fp16util import network_to_half +from fp16_utils.fp16_optimizer import FP16_Optimizer +from utils.utils import create_logger +from utils.utils import get_optimizer +from utils.utils import save_checkpoint +from utils.utils import setup_logger + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train keypoints network') + # general + parser.add_argument('--cfg', + help='experiment configure file name', + required=True, + type=str) + + parser.add_argument('opts', + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER) + + # distributed training + parser.add_argument('--gpu', + help='gpu id for multiprocessing training', + type=str) + parser.add_argument('--world-size', + default=1, + type=int, + help='number of nodes for distributed training') + parser.add_argument('--dist-url', + default='tcp://127.0.0.1:23456', + type=str, + help='url used to set up distributed training') + parser.add_argument('--rank', + default=0, + type=int, + help='node rank for distributed training') + + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + update_config(cfg, args) + + cfg.defrost() + cfg.RANK = args.rank + cfg.freeze() + + logger, final_output_dir, tb_log_dir = create_logger( + cfg, args.cfg, 'train' + ) + + logger.info(pprint.pformat(args)) + logger.info(cfg) + + if args.gpu is not None: + warnings.warn('You have chosen a specific GPU. This will completely ' + 'disable data parallelism.') + + if args.dist_url == "env://" and args.world_size == -1: + args.world_size = int(os.environ["WORLD_SIZE"]) + + args.distributed = args.world_size > 1 or cfg.MULTIPROCESSING_DISTRIBUTED + + ngpus_per_node = torch.cuda.device_count() + if cfg.MULTIPROCESSING_DISTRIBUTED: + # Since we have ngpus_per_node processes per node, the total world_size + # needs to be adjusted accordingly + args.world_size = ngpus_per_node * args.world_size + # Use torch.multiprocessing.spawn to launch distributed processes: the + # main_worker process function + mp.spawn( + main_worker, + nprocs=ngpus_per_node, + args=(ngpus_per_node, args, final_output_dir, tb_log_dir) + ) + else: + # Simply call main_worker function + main_worker( + ','.join([str(i) for i in cfg.GPUS]), + ngpus_per_node, + args, + final_output_dir, + tb_log_dir + ) + + +def main_worker( + gpu, ngpus_per_node, args, final_output_dir, tb_log_dir +): + # cudnn related setting + cudnn.benchmark = cfg.CUDNN.BENCHMARK + torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC + torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED + + if cfg.FP16.ENABLED: + assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled." + + if cfg.FP16.STATIC_LOSS_SCALE != 1.0: + if not cfg.FP16.ENABLED: + print("Warning: if --fp16 is not used, static_loss_scale will be ignored.") + + args.gpu = gpu + + if args.gpu is not None: + print("Use GPU: {} for training".format(args.gpu)) + + if args.distributed: + if args.dist_url == "env://" and args.rank == -1: + args.rank = int(os.environ["RANK"]) + if cfg.MULTIPROCESSING_DISTRIBUTED: + # For multiprocessing distributed training, rank needs to be the + # global rank among all the processes + args.rank = args.rank * ngpus_per_node + gpu + print('Init process group: dist_url: {}, world_size: {}, rank: {}'. + format(args.dist_url, args.world_size, args.rank)) + dist.init_process_group( + backend=cfg.DIST_BACKEND, + init_method=args.dist_url, + world_size=args.world_size, + rank=args.rank + ) + + update_config(cfg, args) + + # setup logger + logger, _ = setup_logger(final_output_dir, args.rank, 'train') + + model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')( + cfg, is_train=True + ) + + # copy model file + if not cfg.MULTIPROCESSING_DISTRIBUTED or ( + cfg.MULTIPROCESSING_DISTRIBUTED + and args.rank % ngpus_per_node == 0 + ): + this_dir = os.path.dirname(__file__) + shutil.copy2( + os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'), + final_output_dir + ) + + writer_dict = { + 'writer': SummaryWriter(log_dir=tb_log_dir), + 'train_global_steps': 0, + 'valid_global_steps': 0, + } + + if not cfg.MULTIPROCESSING_DISTRIBUTED or ( + cfg.MULTIPROCESSING_DISTRIBUTED + and args.rank % ngpus_per_node == 0 + ): + dump_input = torch.rand( + (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE) + ) + writer_dict['writer'].add_graph(model, (dump_input, )) + # logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE)) + + if cfg.FP16.ENABLED: + model = network_to_half(model) + + if cfg.MODEL.SYNC_BN and not args.distributed: + print('Warning: Sync BatchNorm is only supported in distributed training.') + + if args.distributed: + if cfg.MODEL.SYNC_BN: + model = nn.SyncBatchNorm.convert_sync_batchnorm(model) + # For multiprocessing distributed, DistributedDataParallel constructor + # should always set the single device scope, otherwise, + # DistributedDataParallel will use all available devices. + if args.gpu is not None: + torch.cuda.set_device(args.gpu) + model.cuda(args.gpu) + # When using a single GPU per process and per + # DistributedDataParallel, we need to divide the batch size + # ourselves based on the total number of GPUs we have + # args.workers = int(args.workers / ngpus_per_node) + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[args.gpu] + ) + else: + model.cuda() + # DistributedDataParallel will divide and allocate batch_size to all + # available GPUs if device_ids are not set + model = torch.nn.parallel.DistributedDataParallel(model) + elif args.gpu is not None: + torch.cuda.set_device(args.gpu) + model = model.cuda(args.gpu) + else: + model = torch.nn.DataParallel(model).cuda() + + # define loss function (criterion) and optimizer + loss_factory = MultiLossFactory(cfg).cuda() + + # Data loading code + train_loader = make_dataloader( + cfg, is_train=True, distributed=args.distributed + ) + logger.info(train_loader.dataset) + + best_perf = -1 + best_model = False + last_epoch = -1 + optimizer = get_optimizer(cfg, model) + + if cfg.FP16.ENABLED: + optimizer = FP16_Optimizer( + optimizer, + static_loss_scale=cfg.FP16.STATIC_LOSS_SCALE, + dynamic_loss_scale=cfg.FP16.DYNAMIC_LOSS_SCALE + ) + + begin_epoch = cfg.TRAIN.BEGIN_EPOCH + checkpoint_file = os.path.join( + final_output_dir, 'checkpoint.pth.tar') + if cfg.AUTO_RESUME and os.path.exists(checkpoint_file): + logger.info("=> loading checkpoint '{}'".format(checkpoint_file)) + checkpoint = torch.load(checkpoint_file) + begin_epoch = checkpoint['epoch'] + best_perf = checkpoint['perf'] + last_epoch = checkpoint['epoch'] + model.load_state_dict(checkpoint['state_dict']) + + optimizer.load_state_dict(checkpoint['optimizer']) + logger.info("=> loaded checkpoint '{}' (epoch {})".format( + checkpoint_file, checkpoint['epoch'])) + + if cfg.FP16.ENABLED: + lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer.optimizer, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR, + last_epoch=last_epoch + ) + else: + lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR, + last_epoch=last_epoch + ) + + for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH): + # train one epoch + do_train(cfg, model, train_loader, loss_factory, optimizer, epoch, + final_output_dir, tb_log_dir, writer_dict, fp16=cfg.FP16.ENABLED) + + # In PyTorch 1.1.0 and later, you should call `lr_scheduler.step()` after `optimizer.step()`. + lr_scheduler.step() + + perf_indicator = epoch + if perf_indicator >= best_perf: + best_perf = perf_indicator + best_model = True + else: + best_model = False + + if not cfg.MULTIPROCESSING_DISTRIBUTED or ( + cfg.MULTIPROCESSING_DISTRIBUTED + and args.rank == 0 + ): + logger.info('=> saving checkpoint to {}'.format(final_output_dir)) + save_checkpoint({ + 'epoch': epoch + 1, + 'model': cfg.MODEL.NAME, + 'state_dict': model.state_dict(), + 'best_state_dict': model.module.state_dict(), + 'perf': perf_indicator, + 'optimizer': optimizer.state_dict(), + }, best_model, final_output_dir) + + final_model_state_file = os.path.join( + final_output_dir, 'final_state{}.pth.tar'.format(gpu) + ) + + logger.info('saving final model state to {}'.format( + final_model_state_file)) + torch.save(model.module.state_dict(), final_model_state_file) + writer_dict['writer'].close() + + +if __name__ == '__main__': + main() diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/tools/get_keypoints.py b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/get_keypoints.py new file mode 100644 index 0000000..8bc0b0c --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/get_keypoints.py @@ -0,0 +1,264 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import json +import argparse +import os +import pprint +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn.parallel +import torch.optim +import torch.utils.data +import torch.utils.data.distributed +import torchvision.transforms +import torch.multiprocessing +from tqdm import tqdm + +import _init_paths +import models + +from config import cfg +from config import check_config +from config import update_config +from core.inference import get_multi_stage_outputs +from core.inference import aggregate_results +from core.group import HeatmapParser +# from dataset import make_test_dataloader +from fp16_utils.fp16util import network_to_half +from utils.utils import create_logger +from utils.utils import get_model_summary +# from utils.vis import save_debug_images +from utils.vis import save_valid_image +from utils.transforms import resize_align_multi_scale +from utils.transforms import get_final_preds +from utils.transforms import get_multi_scale_size +import glob +import cv2 +torch.multiprocessing.set_sharing_strategy('file_system') +from urllib.request import urlretrieve,build_opener,install_opener + + +def parse_args(): + parser = argparse.ArgumentParser(description='Test keypoints network') + # general + parser.add_argument('--cfg', + help='experiment configure file name', + required=True, + type=str) + parser.add_argument('--input_dir', + help='experiment configure file name', + required=True, + type=str) + + parser.add_argument('opts', + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER) + + args = parser.parse_args() + + return args + + +# markdown format output +def _print_name_value(logger, name_value, full_arch_name): + names = name_value.keys() + values = name_value.values() + num_values = len(name_value) + logger.info( + '| Arch ' + + ' '.join(['| {}'.format(name) for name in names]) + + ' |' + ) + logger.info('|---' * (num_values+1) + '|') + + if len(full_arch_name) > 15: + full_arch_name = full_arch_name[:8] + '...' + logger.info( + '| ' + full_arch_name + ' ' + + ' '.join(['| {:.3f}'.format(value) for value in values]) + + ' |' + ) + + +def main(): + args = parse_args() + update_config(cfg, args) + check_config(cfg) + + logger, final_output_dir, tb_log_dir = create_logger( + cfg, args.cfg, 'valid' + ) + + logger.info(pprint.pformat(args)) + #logger.info(cfg) + + # cudnn related setting + cudnn.benchmark = cfg.CUDNN.BENCHMARK + torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC + torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED + + model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')( + cfg, is_train=False + ) + + dump_input = torch.rand( + (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE) + ) + #logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE)) + + if cfg.FP16.ENABLED: + model = network_to_half(model) + + if cfg.TEST.MODEL_FILE: + logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) + model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True) + else: + model_state_file = os.path.join( + final_output_dir, 'model_best.pth.tar' + ) + logger.info('=> loading model from {}'.format(model_state_file)) + model.load_state_dict(torch.load(model_state_file)) + + model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() + model.eval() + + # data_loader, test_dataset = make_test_dataloader(cfg) + + if cfg.MODEL.NAME == 'pose_hourglass': + transforms = torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + ] + ) + else: + transforms = torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize( + mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225] + ) + ] + ) + + parser = HeatmapParser(cfg) + results = {} + + json_save_path = os.path.join(args.input_dir,'2d_pose_result_hrnet.json') + print('json_save_path', json_save_path) + + if os.path.exists(json_save_path): + with open(json_save_path, 'r') as f: + results = json.load(f) + + meta_save_path = os.path.join(args.input_dir, 'meta_data.json') + print('meta_save_path', meta_save_path) + + if os.path.exists(meta_save_path): + with open(meta_save_path, 'r') as f: + meta_data = json.load(f) + + + + # pbar = tqdm(total=len(test_dataset)) if cfg.TEST.LOG_PROGRESS else None + i = 0 + for image_path in tqdm(glob.glob(os.path.join(args.input_dir, 'images','*'))): + if os.path.basename(image_path) in results: + continue + try: + img = cv2.imread( + image_path, + cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION + ) + image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + except: + os.remove(image_path) + + # + # assert 1 == images.size(0), 'Test batch size should be 1' + # + # image = images[0].cpu().numpy() + # size at scale 1.0 + base_size, center, scale = get_multi_scale_size( + image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR) + ) + + with torch.no_grad(): + final_heatmaps = None + tags_list = [] + for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR, reverse=True)): + input_size = cfg.DATASET.INPUT_SIZE + image_resized, center, scale = resize_align_multi_scale( + image, input_size, s, min(cfg.TEST.SCALE_FACTOR) + ) + image_resized = transforms(image_resized) + image_resized = image_resized.unsqueeze(0).cuda() + + outputs, heatmaps, tags = get_multi_stage_outputs( + cfg, model, image_resized, cfg.TEST.FLIP_TEST, + cfg.TEST.PROJECT2IMAGE, base_size + ) + + final_heatmaps, tags_list = aggregate_results( + cfg, s, final_heatmaps, tags_list, heatmaps, tags + ) + + final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR)) + tags = torch.cat(tags_list, dim=4) + grouped, scores = parser.parse( + final_heatmaps, tags, cfg.TEST.ADJUST, cfg.TEST.REFINE + ) + + final_results = get_final_preds( + grouped, center, scale, + [final_heatmaps.size(3), final_heatmaps.size(2)] + ) + + prefix = '{}_{}'.format(os.path.join(final_output_dir, 'result_valid'), i) + # logger.info('=> write {}'.format(prefix)) + #save_valid_image(image, final_results, '{}.jpg'.format(prefix), dataset='COCO') + + # save_debug_images(cfg, image_resized, None, None, outputs, prefix) + + res = np.zeros((len(final_results), 17, 5)) + + for person_id,kpts in enumerate(final_results): + res[person_id, :, :] = kpts + + + results[os.path.basename(image_path)] = res.tolist() + + i += 1 + + + + + + + with open(json_save_path, 'w') as f: + json.dump(results, f) + + + # name_values, _ = test_dataset.evaluate( + # cfg, all_preds, all_scores, final_output_dir + # ) + # + # if isinstance(name_values, list): + # for name_value in name_values: + # _print_name_value(logger, name_value, cfg.MODEL.NAME) + # else: + # _print_name_value(logger, name_values, cfg.MODEL.NAME) + + +if __name__ == '__main__': + main() diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/tools/test.py b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/test.py new file mode 100644 index 0000000..651bb05 --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/test.py @@ -0,0 +1,231 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import json +import argparse +import os +import pprint +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn.parallel +import torch.optim +import torch.utils.data +import torch.utils.data.distributed +import torchvision.transforms +import torch.multiprocessing +from tqdm import tqdm + +import _init_paths +import models + +from config import cfg +from config import check_config +from config import update_config +from core.inference import get_multi_stage_outputs +from core.inference import aggregate_results +from core.group import HeatmapParser +# from dataset import make_test_dataloader +from fp16_utils.fp16util import network_to_half +from utils.utils import create_logger +from utils.utils import get_model_summary +# from utils.vis import save_debug_images +from utils.vis import save_valid_image +from utils.transforms import resize_align_multi_scale +from utils.transforms import get_final_preds +from utils.transforms import get_multi_scale_size +import glob +import cv2 +torch.multiprocessing.set_sharing_strategy('file_system') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Test keypoints network') + # general + parser.add_argument('--cfg', + help='experiment configure file name', + required=True, + type=str) + + parser.add_argument('opts', + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER) + + args = parser.parse_args() + + return args + + +# markdown format output +def _print_name_value(logger, name_value, full_arch_name): + names = name_value.keys() + values = name_value.values() + num_values = len(name_value) + logger.info( + '| Arch ' + + ' '.join(['| {}'.format(name) for name in names]) + + ' |' + ) + logger.info('|---' * (num_values+1) + '|') + + if len(full_arch_name) > 15: + full_arch_name = full_arch_name[:8] + '...' + logger.info( + '| ' + full_arch_name + ' ' + + ' '.join(['| {:.3f}'.format(value) for value in values]) + + ' |' + ) + + +def main(): + args = parse_args() + update_config(cfg, args) + check_config(cfg) + + logger, final_output_dir, tb_log_dir = create_logger( + cfg, args.cfg, 'valid' + ) + + logger.info(pprint.pformat(args)) + logger.info(cfg) + + # cudnn related setting + cudnn.benchmark = cfg.CUDNN.BENCHMARK + torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC + torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED + + model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')( + cfg, is_train=False + ) + + dump_input = torch.rand( + (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE) + ) + logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE)) + + if cfg.FP16.ENABLED: + model = network_to_half(model) + + if cfg.TEST.MODEL_FILE: + logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) + model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True) + else: + model_state_file = os.path.join( + final_output_dir, 'model_best.pth.tar' + ) + logger.info('=> loading model from {}'.format(model_state_file)) + model.load_state_dict(torch.load(model_state_file)) + + model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() + model.eval() + + # data_loader, test_dataset = make_test_dataloader(cfg) + + if cfg.MODEL.NAME == 'pose_hourglass': + transforms = torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + ] + ) + else: + transforms = torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize( + mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225] + ) + ] + ) + + parser = HeatmapParser(cfg) + results = {} + + # pbar = tqdm(total=len(test_dataset)) if cfg.TEST.LOG_PROGRESS else None + for i, image_path in enumerate(glob.glob('F:/full-head-dataset\skeleton_estimation/3DCrowdNet_RELEASE\demo\my_input\images/*')): + img = cv2.imread( + image_path, + cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION + ) + image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + + # + # assert 1 == images.size(0), 'Test batch size should be 1' + # + # image = images[0].cpu().numpy() + # size at scale 1.0 + base_size, center, scale = get_multi_scale_size( + image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR) + ) + + with torch.no_grad(): + final_heatmaps = None + tags_list = [] + for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR, reverse=True)): + input_size = cfg.DATASET.INPUT_SIZE + image_resized, center, scale = resize_align_multi_scale( + image, input_size, s, min(cfg.TEST.SCALE_FACTOR) + ) + image_resized = transforms(image_resized) + image_resized = image_resized.unsqueeze(0).cuda() + + outputs, heatmaps, tags = get_multi_stage_outputs( + cfg, model, image_resized, cfg.TEST.FLIP_TEST, + cfg.TEST.PROJECT2IMAGE, base_size + ) + + final_heatmaps, tags_list = aggregate_results( + cfg, s, final_heatmaps, tags_list, heatmaps, tags + ) + + final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR)) + tags = torch.cat(tags_list, dim=4) + grouped, scores = parser.parse( + final_heatmaps, tags, cfg.TEST.ADJUST, cfg.TEST.REFINE + ) + + final_results = get_final_preds( + grouped, center, scale, + [final_heatmaps.size(3), final_heatmaps.size(2)] + ) + + prefix = '{}_{}'.format(os.path.join(final_output_dir, 'result_valid'), i) + # logger.info('=> write {}'.format(prefix)) + #save_valid_image(image, final_results, '{}.jpg'.format(prefix), dataset='COCO') + + # save_debug_images(cfg, image_resized, None, None, outputs, prefix) + + res = np.zeros((len(final_results), 17, 5)) + + for person_id,kpts in enumerate(final_results): + res[person_id, :, :] = kpts + + + results[os.path.basename(image_path)] = res.tolist() + + with open('F:/full-head-dataset\skeleton_estimation/3DCrowdNet_RELEASE\demo\my_input/2d_pose_result_hrnet.json', 'w') as f: + json.dump(results, f) + + + # name_values, _ = test_dataset.evaluate( + # cfg, all_preds, all_scores, final_output_dir + # ) + # + # if isinstance(name_values, list): + # for name_value in name_values: + # _print_name_value(logger, name_value, cfg.MODEL.NAME) + # else: + # _print_name_value(logger, name_values, cfg.MODEL.NAME) + + +if __name__ == '__main__': + main() diff --git a/data_processing/HigherHRNet-Human-Pose-Estimation/tools/valid.py b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/valid.py new file mode 100644 index 0000000..97cf22f --- /dev/null +++ b/data_processing/HigherHRNet-Human-Pose-Estimation/tools/valid.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os +import pprint + +import torch +import torch.backends.cudnn as cudnn +import torch.nn.parallel +import torch.optim +import torch.utils.data +import torch.utils.data.distributed +import torchvision.transforms +import torch.multiprocessing +from tqdm import tqdm + +import _init_paths +import models + +from config import cfg +from config import check_config +from config import update_config +from core.inference import get_multi_stage_outputs +from core.inference import aggregate_results +from core.group import HeatmapParser +from dataset import make_test_dataloader +from fp16_utils.fp16util import network_to_half +from utils.utils import create_logger +from utils.utils import get_model_summary +from utils.vis import save_debug_images +from utils.vis import save_valid_image +from utils.transforms import resize_align_multi_scale +from utils.transforms import get_final_preds +from utils.transforms import get_multi_scale_size + +torch.multiprocessing.set_sharing_strategy('file_system') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Test keypoints network') + # general + parser.add_argument('--cfg', + help='experiment configure file name', + required=True, + type=str) + + parser.add_argument('opts', + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER) + + args = parser.parse_args() + + return args + + +# markdown format output +def _print_name_value(logger, name_value, full_arch_name): + names = name_value.keys() + values = name_value.values() + num_values = len(name_value) + logger.info( + '| Arch ' + + ' '.join(['| {}'.format(name) for name in names]) + + ' |' + ) + logger.info('|---' * (num_values+1) + '|') + + if len(full_arch_name) > 15: + full_arch_name = full_arch_name[:8] + '...' + logger.info( + '| ' + full_arch_name + ' ' + + ' '.join(['| {:.3f}'.format(value) for value in values]) + + ' |' + ) + + +def main(): + args = parse_args() + update_config(cfg, args) + check_config(cfg) + + logger, final_output_dir, tb_log_dir = create_logger( + cfg, args.cfg, 'valid' + ) + + logger.info(pprint.pformat(args)) + logger.info(cfg) + + # cudnn related setting + cudnn.benchmark = cfg.CUDNN.BENCHMARK + torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC + torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED + + model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')( + cfg, is_train=False + ) + + dump_input = torch.rand( + (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE) + ) + logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE)) + + if cfg.FP16.ENABLED: + model = network_to_half(model) + + if cfg.TEST.MODEL_FILE: + logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE)) + model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True) + else: + model_state_file = os.path.join( + final_output_dir, 'model_best.pth.tar' + ) + logger.info('=> loading model from {}'.format(model_state_file)) + model.load_state_dict(torch.load(model_state_file)) + + model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda() + model.eval() + + data_loader, test_dataset = make_test_dataloader(cfg) + + if cfg.MODEL.NAME == 'pose_hourglass': + transforms = torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + ] + ) + else: + transforms = torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize( + mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225] + ) + ] + ) + + parser = HeatmapParser(cfg) + all_preds = [] + all_scores = [] + + pbar = tqdm(total=len(test_dataset)) if cfg.TEST.LOG_PROGRESS else None + for i, (images, annos) in enumerate(data_loader): + assert 1 == images.size(0), 'Test batch size should be 1' + + image = images[0].cpu().numpy() + # size at scale 1.0 + base_size, center, scale = get_multi_scale_size( + image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR) + ) + + with torch.no_grad(): + final_heatmaps = None + tags_list = [] + for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR, reverse=True)): + input_size = cfg.DATASET.INPUT_SIZE + image_resized, center, scale = resize_align_multi_scale( + image, input_size, s, min(cfg.TEST.SCALE_FACTOR) + ) + image_resized = transforms(image_resized) + image_resized = image_resized.unsqueeze(0).cuda() + + outputs, heatmaps, tags = get_multi_stage_outputs( + cfg, model, image_resized, cfg.TEST.FLIP_TEST, + cfg.TEST.PROJECT2IMAGE, base_size + ) + + final_heatmaps, tags_list = aggregate_results( + cfg, s, final_heatmaps, tags_list, heatmaps, tags + ) + + final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR)) + tags = torch.cat(tags_list, dim=4) + grouped, scores = parser.parse( + final_heatmaps, tags, cfg.TEST.ADJUST, cfg.TEST.REFINE + ) + + final_results = get_final_preds( + grouped, center, scale, + [final_heatmaps.size(3), final_heatmaps.size(2)] + ) + + if cfg.TEST.LOG_PROGRESS: + pbar.update() + + if i % cfg.PRINT_FREQ == 0: + prefix = '{}_{}'.format(os.path.join(final_output_dir, 'result_valid'), i) + # logger.info('=> write {}'.format(prefix)) + save_valid_image(image, final_results, '{}.jpg'.format(prefix), dataset=test_dataset.name) + # save_debug_images(cfg, image_resized, None, None, outputs, prefix) + + all_preds.append(final_results) + all_scores.append(scores) + + if cfg.TEST.LOG_PROGRESS: + pbar.close() + + name_values, _ = test_dataset.evaluate( + cfg, all_preds, all_scores, final_output_dir + ) + + if isinstance(name_values, list): + for name_value in name_values: + _print_name_value(logger, name_value, cfg.MODEL.NAME) + else: + _print_name_value(logger, name_values, cfg.MODEL.NAME) + + +if __name__ == '__main__': + main() diff --git a/data_processing/LICENSE b/data_processing/LICENSE new file mode 100644 index 0000000..9eebacf --- /dev/null +++ b/data_processing/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Hongsuk Choi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/data_processing/MANIQA/config.py b/data_processing/MANIQA/config.py new file mode 100644 index 0000000..49adf80 --- /dev/null +++ b/data_processing/MANIQA/config.py @@ -0,0 +1,12 @@ +import json + +""" configuration json """ +class Config(dict): + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + @classmethod + def load(cls, file): + with open(file, 'r') as f: + config = json.loads(f.read()) + return Config(config) \ No newline at end of file diff --git a/data_processing/MANIQA/delete_images.py b/data_processing/MANIQA/delete_images.py new file mode 100644 index 0000000..6032224 --- /dev/null +++ b/data_processing/MANIQA/delete_images.py @@ -0,0 +1,30 @@ +import os +import glob +import argparse +parser = argparse.ArgumentParser(description='Test keypoints network') + # general + +parser.add_argument('--input_dir', + help='experiment configure file name', + required=True, + type=str) +args = parser.parse_args() +count = 0 +for image_path in glob.glob(os.path.join(args.input_dir, 'aligned_images/*')): + image_name = os.path.basename(image_path) + if not os.path.exists(os.path.join(args.input_dir, 'visualization',image_name)): + os.remove(image_path) + count+=1 +print(count) +count = 0 +#for image_path in glob.glob('G:/full-head-dataset/pexels/00000000/visualization/*'): +for image_path in glob.glob(os.path.join(args.input_dir, 'visualization/*')): + image_name = os.path.basename(image_path) + #if not os.path.exists('G:/full-head-dataset/pexels/00000000/aligned_images/' + image_name): + if not os.path.exists(os.path.join(args.input_dir, 'aligned_images',image_name)): + os.remove(image_path) + count+=1 + + + +print(count) \ No newline at end of file diff --git a/data_processing/MANIQA/delete_raw_images.py b/data_processing/MANIQA/delete_raw_images.py new file mode 100644 index 0000000..0cd7161 --- /dev/null +++ b/data_processing/MANIQA/delete_raw_images.py @@ -0,0 +1,35 @@ +import os +import glob +import argparse +import json +import numpy as np +parser = argparse.ArgumentParser(description='Test keypoints network') + # general + +parser.add_argument('--input_dir', + help='experiment configure file name', + required=True, + type=str) +args = parser.parse_args() +count = 0 +for image_path in glob.glob(os.path.join(args.input_dir, 'images/*')): + image_name = os.path.basename(image_path).split('.')[0] + #print(os.path.join(args.input_dir, 'aligned_images',image_name + '*')) + if len(glob.glob(os.path.join(args.input_dir, 'aligned_images',image_name + '*'))) == 0: + os.remove(image_path) + print(image_path) + count+=1 +print(count) + +json_save_path = os.path.join(args.input_dir, 'result.json') +with open(json_save_path, 'r') as f: + results = json.load(f) +# check: +for image_path in glob.glob(os.path.join(args.input_dir, 'aligned_images/*')): + raw_image_name = results[os.path.basename(image_path)]['raw_image_name'] + if not os.path.exists(os.path.join(args.input_dir, 'images',raw_image_name)): + raise Exception(image_path) + + + + diff --git a/data_processing/MANIQA/imagedups.py b/data_processing/MANIQA/imagedups.py new file mode 100644 index 0000000..2248dc1 --- /dev/null +++ b/data_processing/MANIQA/imagedups.py @@ -0,0 +1,137 @@ +#!python +import argparse +from PIL import Image +import os +import sys +import imagehash +import progressbar +import multiprocessing as mp +import numpy as np +import cv2 +def dupes(config): + hmap = {} + paths = config['paths'] + subdirs = [] + if config['recurse']: + for path in paths: + for root, dirs, _ in os.walk(path): + for name in dirs: + subdirs.append(os.path.join(root, name)) + paths += subdirs + files = [] + for path in paths: + fs = os.listdir(path) + for f in fs: + fpath = os.path.join(path, f) + if os.path.isdir(fpath): + continue + files.append(fpath) + + num_cores = int(mp.cpu_count()) + pool = mp.Pool(num_cores) + manager = mp.Manager() + managed_locker = manager.Lock() + managed_dict = manager.dict() + results = [pool.apply_async(async_hash, args=(fpath, managed_dict, managed_locker)) for fpath in files] + + pbar = progressbar.ProgressBar(max_value=len(files)) + for i, p in enumerate(results): + p.get() + pbar.update(i) + pbar.finish() + + count = 0 + for k, v in managed_dict.items(): + if len(v) == 1: + continue + + # show image in v + if config['show']: + images = [] + for fpath in v: + images.append(Image.open(fpath)) + images = [np.array(image) for image in images] + images = np.concatenate(images, axis=1) + images = cv2.cvtColor(images, cv2.COLOR_RGB2BGR) + images = cv2.resize(images, (images.shape[1] // 4, images.shape[0] // 4)) + cv2.imshow('images', images) + cv2.waitKey(0) + + for idx, fpath in enumerate(v): + if idx == 0: + if not config['quiet']: + #print("[+]", fpath, os.path.getsize(fpath)) + pass + else: + if not config['quiet']: + pass + #print("[-]", fpath, os.path.getsize(fpath)) + + confirm = config['noprompt'] + + + + if not config['noprompt'] and config['delete']: + print("Delete %s? [y/n]") + confirm = sys.stdin.readline().strip() == 'y' + if config['delete'] and confirm: + count += 1 + os.unlink(fpath) + # if not config['quiet']: + # print() + + + print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Deleted %d files" % count) +def async_hash(fpath, result_dict, result_lock): + try: + h = imagehash.average_hash(Image.open(fpath)) + h = "%s" % h + sims = result_dict.get(h, []) + sims.append(fpath) + with result_lock: + result_dict[h] = sims + except Exception as e: + pass + +def main(args=None): + parser = argparse.ArgumentParser( + prog="imagedups", + description="""Find/Delete duplicated images + + imagedups [options] -p DIRECTORY... + """, + epilog=""" + inspire by fdupes + """, formatter_class=argparse.RawDescriptionHelpFormatter) + + parser.add_argument('-d', '--delete', dest='delete', default=False, action='store_true', + help='Delete duplicated files, keep one image only') + parser.add_argument('-r', '--recurse', dest='recurse', default=False, action='store_true', + help='For every directory given follow subdirectories encountered within') + parser.add_argument('-N', '--noprompt', dest='noprompt', default=False, action='store_true', + help='''Together with --delete, preserve the first file in +each set of duplicates and delete the rest without +prompting the user + ''') + parser.add_argument('-w', '--show', dest='show', default=False, action='store_true', + help='''Together with --delete, preserve the first file in + each set of duplicates and delete the rest without + prompting the user + ''') + parser.add_argument('-q', '--quiet', dest='quiet', default=False, action='store_true', + help='Hide progress indicator') + parser.add_argument('--minsize', dest='minsize', type=int, + help='Consider only files greater than or equal to SIZE bytes') + parser.add_argument('--maxsize', dest='maxsize', type=int, + help='Consider only files less than or equal to SIZE bytes') + parser.add_argument('-p', '--path', dest='paths', nargs='+', type=str, required=True) + + if args is not None: + config = vars(parser.parse_args(args)) + else: + config = vars(parser.parse_args()) + + dupes(config) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/data_processing/MANIQA/remove_blurr_images.py b/data_processing/MANIQA/remove_blurr_images.py new file mode 100644 index 0000000..d245531 --- /dev/null +++ b/data_processing/MANIQA/remove_blurr_images.py @@ -0,0 +1,34 @@ +import os + +import glob +import cv2 +import shutil +from tqdm import tqdm + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser(description='Test keypoints network') + # general + + parser.add_argument('--input_dir', + help='experiment configure file name', + required=True, + type=str) + + args = parser.parse_args() + + + + image_list = glob.glob(os.path.join(args.input_dir, 'aligned_images/*')) + + for image_path in tqdm(image_list): + # data load + # model defination + image = cv2.imread(image_path) + img2gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + imageVar = cv2.Laplacian(img2gray, cv2.CV_64F).var() + if imageVar < 4: + os.remove(image_path) + diff --git a/data_processing/MANIQA/timm/__init__.py b/data_processing/MANIQA/timm/__init__.py new file mode 100644 index 0000000..04ec7e5 --- /dev/null +++ b/data_processing/MANIQA/timm/__init__.py @@ -0,0 +1,4 @@ +from .version import __version__ +from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \ + is_scriptable, is_exportable, set_scriptable, set_exportable, has_model_default_key, is_model_default_key, \ + get_model_default_value, is_model_pretrained diff --git a/data_processing/MANIQA/timm/data/__init__.py b/data_processing/MANIQA/timm/data/__init__.py new file mode 100644 index 0000000..7d3cb2b --- /dev/null +++ b/data_processing/MANIQA/timm/data/__init__.py @@ -0,0 +1,12 @@ +from .auto_augment import RandAugment, AutoAugment, rand_augment_ops, auto_augment_policy,\ + rand_augment_transform, auto_augment_transform +from .config import resolve_data_config +from .constants import * +from .dataset import ImageDataset, IterableImageDataset, AugMixDataset +from .dataset_factory import create_dataset +from .loader import create_loader +from .mixup import Mixup, FastCollateMixup +from .parsers import create_parser +from .real_labels import RealLabelsImagenet +from .transforms import * +from .transforms_factory import create_transform \ No newline at end of file diff --git a/data_processing/MANIQA/timm/data/constants.py b/data_processing/MANIQA/timm/data/constants.py new file mode 100644 index 0000000..d6d4a01 --- /dev/null +++ b/data_processing/MANIQA/timm/data/constants.py @@ -0,0 +1,7 @@ +DEFAULT_CROP_PCT = 0.875 +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) +IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) +IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) +IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) +IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3) diff --git a/data_processing/MANIQA/timm/models/__init__.py b/data_processing/MANIQA/timm/models/__init__.py new file mode 100644 index 0000000..2ef4918 --- /dev/null +++ b/data_processing/MANIQA/timm/models/__init__.py @@ -0,0 +1,59 @@ +from .beit import * +from .byoanet import * +from .byobnet import * +from .cait import * +from .coat import * +from .convit import * +from .convmixer import * +from .convnext import * +from .crossvit import * +from .cspnet import * +from .densenet import * +from .dla import * +from .dpn import * +from .efficientnet import * +from .ghostnet import * +from .gluon_resnet import * +from .gluon_xception import * +from .hardcorenas import * +from .hrnet import * +from .inception_resnet_v2 import * +from .inception_v3 import * +from .inception_v4 import * +from .levit import * +from .mlp_mixer import * +from .mobilenetv3 import * +from .nasnet import * +from .nest import * +from .nfnet import * +from .pit import * +from .pnasnet import * +from .regnet import * +from .res2net import * +from .resnest import * +from .resnet import * +from .resnetv2 import * +from .rexnet import * +from .selecsls import * +from .senet import * +from .sknet import * +from .swin_transformer import * +from .tnt import * +from .tresnet import * +from .twins import * +from .vgg import * +from .visformer import * +from .vision_transformer import * +from .vision_transformer_hybrid import * +from .vovnet import * +from .xception import * +from .xception_aligned import * +from .xcit import * + +from .factory import create_model, split_model_name, safe_model_name +from .helpers import load_checkpoint, resume_checkpoint, model_parameters +from .layers import TestTimePoolHead, apply_test_time_pool +from .layers import convert_splitbn_model +from .layers import is_scriptable, is_exportable, set_scriptable, set_exportable, is_no_jit, set_no_jit +from .registry import register_model, model_entrypoint, list_models, is_model, list_modules, is_model_in_modules,\ + has_model_default_key, is_model_default_key, get_model_default_value, is_model_pretrained diff --git a/data_processing/MANIQA/timm/models/convnext.py b/data_processing/MANIQA/timm/models/convnext.py new file mode 100644 index 0000000..5f75647 --- /dev/null +++ b/data_processing/MANIQA/timm/models/convnext.py @@ -0,0 +1,427 @@ +""" ConvNeXt + +Paper: `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf + +Original code and weights from https://github.com/facebookresearch/ConvNeXt, original copyright below + +Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman +""" +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# This source code is licensed under the MIT license +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_module +from .helpers import named_apply, build_model_with_cfg +from .layers import trunc_normal_, ClassifierHead, SelectAdaptivePool2d, DropPath, ConvMlp, Mlp +from .registry import register_model + + +__all__ = ['ConvNeXt'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + convnext_tiny=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth"), + convnext_small=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth"), + convnext_base=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth"), + convnext_large=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth"), + + convnext_tiny_hnf=_cfg(url=''), + + convnext_base_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth'), + convnext_large_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth'), + convnext_xlarge_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth'), + + convnext_base_384_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + convnext_large_384_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + convnext_xlarge_384_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + + convnext_base_in22k=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth", num_classes=21841), + convnext_large_in22k=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth", num_classes=21841), + convnext_xlarge_in22k=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth", num_classes=21841), +) + + +def _is_contiguous(tensor: torch.Tensor) -> bool: + # jit is oh so lovely :/ + # if torch.jit.is_tracing(): + # return True + if torch.jit.is_scripting(): + return tensor.is_contiguous() + else: + return tensor.is_contiguous(memory_format=torch.contiguous_format) + + +@register_notrace_module +class LayerNorm2d(nn.LayerNorm): + r""" LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, H, W). + """ + + def __init__(self, normalized_shape, eps=1e-6): + super().__init__(normalized_shape, eps=eps) + + def forward(self, x) -> torch.Tensor: + if _is_contiguous(x): + return F.layer_norm( + x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) + else: + s, u = torch.var_mean(x, dim=1, unbiased=False, keepdim=True) + x = (x - u) * torch.rsqrt(s + self.eps) + x = x * self.weight[:, None, None] + self.bias[:, None, None] + return x + + +class ConvNeXtBlock(nn.Module): + """ ConvNeXt Block + There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + + Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate + choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear + is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW. + + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + ls_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__(self, dim, drop_path=0., ls_init_value=1e-6, conv_mlp=False, mlp_ratio=4, norm_layer=None): + super().__init__() + if not norm_layer: + norm_layer = partial(LayerNorm2d, eps=1e-6) if conv_mlp else partial(nn.LayerNorm, eps=1e-6) + mlp_layer = ConvMlp if conv_mlp else Mlp + self.use_conv_mlp = conv_mlp + self.conv_dw = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv + self.norm = norm_layer(dim) + self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=nn.GELU) + self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + x = self.conv_dw(x) + if self.use_conv_mlp: + x = self.norm(x) + x = self.mlp(x) + else: + x = x.permute(0, 2, 3, 1) + x = self.norm(x) + x = self.mlp(x) + x = x.permute(0, 3, 1, 2) + if self.gamma is not None: + x = x.mul(self.gamma.reshape(1, -1, 1, 1)) + x = self.drop_path(x) + shortcut + return x + + +class ConvNeXtStage(nn.Module): + + def __init__( + self, in_chs, out_chs, stride=2, depth=2, dp_rates=None, ls_init_value=1.0, conv_mlp=False, + norm_layer=None, cl_norm_layer=None, cross_stage=False): + super().__init__() + + if in_chs != out_chs or stride > 1: + self.downsample = nn.Sequential( + norm_layer(in_chs), + nn.Conv2d(in_chs, out_chs, kernel_size=stride, stride=stride), + ) + else: + self.downsample = nn.Identity() + + dp_rates = dp_rates or [0.] * depth + self.blocks = nn.Sequential(*[ConvNeXtBlock( + dim=out_chs, drop_path=dp_rates[j], ls_init_value=ls_init_value, conv_mlp=conv_mlp, + norm_layer=norm_layer if conv_mlp else cl_norm_layer) + for j in range(depth)] + ) + + def forward(self, x): + x = self.downsample(x) + x = self.blocks(x) + return x + + +class ConvNeXt(nn.Module): + r""" ConvNeXt + A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf + + Args: + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3] + dims (tuple(int)): Feature dimension at each stage. Default: [96, 192, 384, 768] + drop_rate (float): Head dropout rate + drop_path_rate (float): Stochastic depth rate. Default: 0. + ls_init_value (float): Init value for Layer Scale. Default: 1e-6. + head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1. + """ + + def __init__( + self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, patch_size=4, + depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), ls_init_value=1e-6, conv_mlp=False, + head_init_scale=1., head_norm_first=False, norm_layer=None, drop_rate=0., drop_path_rate=0., + ): + super().__init__() + assert output_stride == 32 + if norm_layer is None: + norm_layer = partial(LayerNorm2d, eps=1e-6) + cl_norm_layer = norm_layer if conv_mlp else partial(nn.LayerNorm, eps=1e-6) + else: + assert conv_mlp,\ + 'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input' + cl_norm_layer = norm_layer + + self.num_classes = num_classes + self.drop_rate = drop_rate + self.feature_info = [] + + # NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4 + self.stem = nn.Sequential( + nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size), + norm_layer(dims[0]) + ) + + self.stages = nn.Sequential() + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + curr_stride = patch_size + prev_chs = dims[0] + stages = [] + # 4 feature resolution stages, each consisting of multiple residual blocks + for i in range(4): + stride = 2 if i > 0 else 1 + # FIXME support dilation / output_stride + curr_stride *= stride + out_chs = dims[i] + stages.append(ConvNeXtStage( + prev_chs, out_chs, stride=stride, + depth=depths[i], dp_rates=dp_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp, + norm_layer=norm_layer, cl_norm_layer=cl_norm_layer) + ) + prev_chs = out_chs + # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] + self.stages = nn.Sequential(*stages) + + self.num_features = prev_chs + if head_norm_first: + # norm -> global pool -> fc ordering, like most other nets (not compat with FB weights) + self.norm_pre = norm_layer(self.num_features) # final norm layer, before pooling + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + else: + # pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights) + self.norm_pre = nn.Identity() + self.head = nn.Sequential(OrderedDict([ + ('global_pool', SelectAdaptivePool2d(pool_type=global_pool)), + ('norm', norm_layer(self.num_features)), + ('flatten', nn.Flatten(1) if global_pool else nn.Identity()), + ('drop', nn.Dropout(self.drop_rate)), + ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()) + ])) + + named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes=0, global_pool='avg'): + if isinstance(self.head, ClassifierHead): + # norm -> global pool -> fc + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + else: + # pool -> norm -> fc + self.head = nn.Sequential(OrderedDict([ + ('global_pool', SelectAdaptivePool2d(pool_type=global_pool)), + ('norm', self.head.norm), + ('flatten', nn.Flatten(1) if global_pool else nn.Identity()), + ('drop', nn.Dropout(self.drop_rate)), + ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()) + ])) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm_pre(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module, name=None, head_init_scale=1.0): + if isinstance(module, nn.Conv2d): + trunc_normal_(module.weight, std=.02) + nn.init.constant_(module.bias, 0) + elif isinstance(module, nn.Linear): + trunc_normal_(module.weight, std=.02) + nn.init.constant_(module.bias, 0) + if name and 'head.' in name: + module.weight.data.mul_(head_init_scale) + module.bias.data.mul_(head_init_scale) + + +def checkpoint_filter_fn(state_dict, model): + """ Remap FB checkpoints -> timm """ + if 'model' in state_dict: + state_dict = state_dict['model'] + out_dict = {} + import re + for k, v in state_dict.items(): + k = k.replace('downsample_layers.0.', 'stem.') + k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) + k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) + k = k.replace('dwconv', 'conv_dw') + k = k.replace('pwconv', 'mlp.fc') + k = k.replace('head.', 'head.fc.') + if k.startswith('norm.'): + k = k.replace('norm', 'head.norm') + if v.ndim == 2 and 'head' not in k: + model_shape = model.state_dict()[k].shape + v = v.reshape(model_shape) + out_dict[k] = v + return out_dict + + +def _create_convnext(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + ConvNeXt, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), + **kwargs) + return model + + +@register_model +def convnext_tiny(pretrained=False, **kwargs): + model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), **kwargs) + model = _create_convnext('convnext_tiny', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_tiny_hnf(pretrained=False, **kwargs): + model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, **kwargs) + model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_small(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs) + model = _create_convnext('convnext_small', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_base(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) + model = _create_convnext('convnext_base', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_large(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs) + model = _create_convnext('convnext_large', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_base_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) + model = _create_convnext('convnext_base_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_large_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs) + model = _create_convnext('convnext_large_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_xlarge_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs) + model = _create_convnext('convnext_xlarge_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_base_384_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) + model = _create_convnext('convnext_base_384_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_large_384_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs) + model = _create_convnext('convnext_large_384_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_xlarge_384_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs) + model = _create_convnext('convnext_xlarge_384_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_base_in22k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) + model = _create_convnext('convnext_base_in22k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_large_in22k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs) + model = _create_convnext('convnext_large_in22k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_xlarge_in22k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs) + model = _create_convnext('convnext_xlarge_in22k', pretrained=pretrained, **model_args) + return model + + + diff --git a/data_processing/MANIQA/timm/models/crossvit.py b/data_processing/MANIQA/timm/models/crossvit.py new file mode 100644 index 0000000..acf9a47 --- /dev/null +++ b/data_processing/MANIQA/timm/models/crossvit.py @@ -0,0 +1,519 @@ +""" CrossViT Model + +@inproceedings{ + chen2021crossvit, + title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}}, + author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda}, + booktitle={International Conference on Computer Vision (ICCV)}, + year={2021} +} + +Paper link: https://arxiv.org/abs/2103.14899 +Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py + +NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408 + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" + +# Copyright IBM All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + + +""" +Modifed from Timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + +""" +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.hub +from functools import partial +from typing import List + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg +from .layers import DropPath, to_2tuple, trunc_normal_, _assert +from .registry import register_model +from .vision_transformer import Mlp, Block + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, + 'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'), + 'classifier': ('head.0', 'head.1'), + **kwargs + } + + +default_cfgs = { + 'crossvit_15_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_224.pth'), + 'crossvit_15_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_15_dagger_408': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_384.pth', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_18_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_224.pth'), + 'crossvit_18_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_18_dagger_408': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_384.pth', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_9_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_224.pth'), + 'crossvit_9_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_base_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_base_224.pth'), + 'crossvit_small_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_small_224.pth'), + 'crossvit_tiny_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_tiny_224.pth'), +} + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + if multi_conv: + if patch_size[0] == 12: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1), + ) + elif patch_size[0] == 16: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1), + ) + else: + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + _assert(H == self.img_size[0], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + _assert(W == self.img_size[1], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class CrossAttention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.wq = nn.Linear(dim, dim, bias=qkv_bias) + self.wk = nn.Linear(dim, dim, bias=qkv_bias) + self.wv = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # B1C -> B1H(C/H) -> BH1(C/H) + q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class CrossAttentionBlock(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = CrossAttention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x))) + + return x + + +class MultiScaleBlock(nn.Module): + + def __init__(self, dim, patches, depth, num_heads, mlp_ratio, qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + + num_branches = len(dim) + self.num_branches = num_branches + # different branch could have different embedding size, the first one is the base + self.blocks = nn.ModuleList() + for d in range(num_branches): + tmp = [] + for i in range(depth[d]): + tmp.append(Block( + dim=dim[d], num_heads=num_heads[d], mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer)) + if len(tmp) != 0: + self.blocks.append(nn.Sequential(*tmp)) + + if len(self.blocks) == 0: + self.blocks = None + + self.projs = nn.ModuleList() + for d in range(num_branches): + if dim[d] == dim[(d + 1) % num_branches] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])] + self.projs.append(nn.Sequential(*tmp)) + + self.fusion = nn.ModuleList() + for d in range(num_branches): + d_ = (d + 1) % num_branches + nh = num_heads[d_] + if depth[-1] == 0: # backward capability: + self.fusion.append( + CrossAttentionBlock( + dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) + else: + tmp = [] + for _ in range(depth[-1]): + tmp.append(CrossAttentionBlock( + dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) + self.fusion.append(nn.Sequential(*tmp)) + + self.revert_projs = nn.ModuleList() + for d in range(num_branches): + if dim[(d + 1) % num_branches] == dim[d] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(), + nn.Linear(dim[(d + 1) % num_branches], dim[d])] + self.revert_projs.append(nn.Sequential(*tmp)) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + + outs_b = [] + for i, block in enumerate(self.blocks): + outs_b.append(block(x[i])) + + # only take the cls token out + proj_cls_token = torch.jit.annotate(List[torch.Tensor], []) + for i, proj in enumerate(self.projs): + proj_cls_token.append(proj(outs_b[i][:, 0:1, ...])) + + # cross attention + outs = [] + for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)): + tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1) + tmp = fusion(tmp) + reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...]) + tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1) + outs.append(tmp) + return outs + + +def _compute_num_patches(img_size, patches): + return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)] + + +@register_notrace_function +def scale_image(x, ss: Tuple[int, int], crop_scale: bool = False): # annotations for torchscript + """ + Pulled out of CrossViT.forward_features to bury conditional logic in a leaf node for FX tracing. + Args: + x (Tensor): input image + ss (tuple[int, int]): height and width to scale to + crop_scale (bool): whether to crop instead of interpolate to achieve the desired scale. Defaults to False + Returns: + Tensor: the "scaled" image batch tensor + """ + H, W = x.shape[-2:] + if H != ss[0] or W != ss[1]: + if crop_scale and ss[0] <= H and ss[1] <= W: + cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.)) + x = x[:, :, cu:cu + ss[0], cl:cl + ss[1]] + else: + x = torch.nn.functional.interpolate(x, size=ss, mode='bicubic', align_corners=False) + return x + + +class CrossViT(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__( + self, img_size=224, img_scale=(1.0, 1.0), patch_size=(8, 16), in_chans=3, num_classes=1000, + embed_dim=(192, 384), depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)), num_heads=(6, 12), mlp_ratio=(2., 2., 4.), + qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), multi_conv=False, crop_scale=False, + ): + super().__init__() + + self.num_classes = num_classes + self.img_size = to_2tuple(img_size) + img_scale = to_2tuple(img_scale) + self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale] + self.crop_scale = crop_scale # crop instead of interpolate for scale + num_patches = _compute_num_patches(self.img_size_scaled, patch_size) + self.num_branches = len(patch_size) + self.embed_dim = embed_dim + self.num_features = embed_dim[0] # to pass the tests + self.patch_embed = nn.ModuleList() + + # hard-coded for torch jit script + for i in range(self.num_branches): + setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i]))) + setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i]))) + + for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim): + self.patch_embed.append( + PatchEmbed(img_size=im_s, patch_size=p, in_chans=in_chans, embed_dim=d, multi_conv=multi_conv)) + + self.pos_drop = nn.Dropout(p=drop_rate) + + total_depth = sum([sum(x[-2:]) for x in depth]) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule + dpr_ptr = 0 + self.blocks = nn.ModuleList() + for idx, block_cfg in enumerate(depth): + curr_depth = max(block_cfg[:-1]) + block_cfg[-1] + dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth] + blk = MultiScaleBlock( + embed_dim, num_patches, block_cfg, num_heads=num_heads, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr_, norm_layer=norm_layer) + dpr_ptr += curr_depth + self.blocks.append(blk) + + self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)]) + self.head = nn.ModuleList([ + nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() + for i in range(self.num_branches)]) + + for i in range(self.num_branches): + trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02) + trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + out = set() + for i in range(self.num_branches): + out.add(f'cls_token_{i}') + pe = getattr(self, f'pos_embed_{i}', None) + if pe is not None and pe.requires_grad: + out.add(f'pos_embed_{i}') + return out + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.ModuleList( + [nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in + range(self.num_branches)]) + + def forward_features(self, x): + B = x.shape[0] + xs = [] + for i, patch_embed in enumerate(self.patch_embed): + x_ = x + ss = self.img_size_scaled[i] + x_ = scale_image(x_, ss, self.crop_scale) + x_ = patch_embed(x_) + cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script + cls_tokens = cls_tokens.expand(B, -1, -1) + x_ = torch.cat((cls_tokens, x_), dim=1) + pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script + x_ = x_ + pos_embed + x_ = self.pos_drop(x_) + xs.append(x_) + + for i, blk in enumerate(self.blocks): + xs = blk(xs) + + # NOTE: was before branch token section, move to here to assure all branch token are before layer norm + xs = [norm(xs[i]) for i, norm in enumerate(self.norm)] + return [xo[:, 0] for xo in xs] + + def forward(self, x): + xs = self.forward_features(x) + ce_logits = [head(xs[i]) for i, head in enumerate(self.head)] + if not isinstance(self.head[0], nn.Identity): + ce_logits = torch.mean(torch.stack(ce_logits, dim=0), dim=0) + return ce_logits + + +def _create_crossvit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + def pretrained_filter_fn(state_dict): + new_state_dict = {} + for key in state_dict.keys(): + if 'pos_embed' in key or 'cls_token' in key: + new_key = key.replace(".", "_") + else: + new_key = key + new_state_dict[new_key] = state_dict[key] + return new_state_dict + + return build_model_with_cfg( + CrossViT, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=pretrained_filter_fn, + **kwargs) + + +@register_model +def crossvit_tiny_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[3, 3], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_small_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[6, 6], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_base_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[12, 12], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_9_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_9_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_dagger_408(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_dagger_408(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **model_args) + return model diff --git a/data_processing/MANIQA/timm/models/cspnet.py b/data_processing/MANIQA/timm/models/cspnet.py new file mode 100644 index 0000000..4feb341 --- /dev/null +++ b/data_processing/MANIQA/timm/models/cspnet.py @@ -0,0 +1,460 @@ +"""PyTorch CspNet + +A PyTorch implementation of Cross Stage Partial Networks including: +* CSPResNet50 +* CSPResNeXt50 +* CSPDarkNet53 +* and DarkNet53 for good measure + +Based on paper `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + +Reference impl via darknet cfg files at https://github.com/WongKinYiu/CrossStagePartialNetworks + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, ConvBnAct, DropPath, create_attn, get_norm_act_layer +from .registry import register_model + + +__all__ = ['CspNet'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.887, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'cspresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth'), + 'cspresnet50d': _cfg(url=''), + 'cspresnet50w': _cfg(url=''), + 'cspresnext50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth', + input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.875 # FIXME I trained this at 224x224, not 256 like ref impl + ), + 'cspresnext50_iabn': _cfg(url=''), + 'cspdarknet53': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth'), + 'cspdarknet53_iabn': _cfg(url=''), + 'darknet53': _cfg(url=''), +} + + +model_cfgs = dict( + cspresnet50=dict( + stem=dict(out_chs=64, kernel_size=7, stride=2, pool='max'), + stage=dict( + out_chs=(128, 256, 512, 1024), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(2.,) * 4, + bottle_ratio=(0.5,) * 4, + block_ratio=(1.,) * 4, + cross_linear=True, + ) + ), + cspresnet50d=dict( + stem=dict(out_chs=[32, 32, 64], kernel_size=3, stride=2, pool='max'), + stage=dict( + out_chs=(128, 256, 512, 1024), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(2.,) * 4, + bottle_ratio=(0.5,) * 4, + block_ratio=(1.,) * 4, + cross_linear=True, + ) + ), + cspresnet50w=dict( + stem=dict(out_chs=[32, 32, 64], kernel_size=3, stride=2, pool='max'), + stage=dict( + out_chs=(256, 512, 1024, 2048), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(1.,) * 4, + bottle_ratio=(0.25,) * 4, + block_ratio=(0.5,) * 4, + cross_linear=True, + ) + ), + cspresnext50=dict( + stem=dict(out_chs=64, kernel_size=7, stride=2, pool='max'), + stage=dict( + out_chs=(256, 512, 1024, 2048), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + groups=(32,) * 4, + exp_ratio=(1.,) * 4, + bottle_ratio=(1.,) * 4, + block_ratio=(0.5,) * 4, + cross_linear=True, + ) + ), + cspdarknet53=dict( + stem=dict(out_chs=32, kernel_size=3, stride=1, pool=''), + stage=dict( + out_chs=(64, 128, 256, 512, 1024), + depth=(1, 2, 8, 8, 4), + stride=(2,) * 5, + exp_ratio=(2.,) + (1.,) * 4, + bottle_ratio=(0.5,) + (1.0,) * 4, + block_ratio=(1.,) + (0.5,) * 4, + down_growth=True, + ) + ), + darknet53=dict( + stem=dict(out_chs=32, kernel_size=3, stride=1, pool=''), + stage=dict( + out_chs=(64, 128, 256, 512, 1024), + depth=(1, 2, 8, 8, 4), + stride=(2,) * 5, + bottle_ratio=(0.5,) * 5, + block_ratio=(1.,) * 5, + ) + ) +) + + +def create_stem( + in_chans=3, out_chs=32, kernel_size=3, stride=2, pool='', + act_layer=None, norm_layer=None, aa_layer=None): + stem = nn.Sequential() + if not isinstance(out_chs, (tuple, list)): + out_chs = [out_chs] + assert len(out_chs) + in_c = in_chans + for i, out_c in enumerate(out_chs): + conv_name = f'conv{i + 1}' + stem.add_module(conv_name, ConvBnAct( + in_c, out_c, kernel_size, stride=stride if i == 0 else 1, + act_layer=act_layer, norm_layer=norm_layer)) + in_c = out_c + last_conv = conv_name + if pool: + if aa_layer is not None: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) + stem.add_module('aa', aa_layer(channels=in_c, stride=2)) + else: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + return stem, dict(num_chs=in_c, reduction=stride, module='.'.join(['stem', last_conv])) + + +class ResBottleneck(nn.Module): + """ ResNe(X)t Bottleneck Block + """ + + def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.25, groups=1, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_last=False, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(ResBottleneck, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + + self.conv1 = ConvBnAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.conv2 = ConvBnAct(mid_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, **ckwargs) + self.attn2 = create_attn(attn_layer, channels=mid_chs) if not attn_last else None + self.conv3 = ConvBnAct(mid_chs, out_chs, kernel_size=1, apply_act=False, **ckwargs) + self.attn3 = create_attn(attn_layer, channels=out_chs) if attn_last else None + self.drop_path = drop_path + self.act3 = act_layer(inplace=True) + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.attn2 is not None: + x = self.attn2(x) + x = self.conv3(x) + if self.attn3 is not None: + x = self.attn3(x) + if self.drop_path is not None: + x = self.drop_path(x) + x = x + shortcut + # FIXME partial shortcut needed if first block handled as per original, not used for my current impl + #x[:, :shortcut.size(1)] += shortcut + x = self.act3(x) + return x + + +class DarkBlock(nn.Module): + """ DarkNet Block + """ + + def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.5, groups=1, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, + drop_block=None, drop_path=None): + super(DarkBlock, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + self.conv1 = ConvBnAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.conv2 = ConvBnAct(mid_chs, out_chs, kernel_size=3, dilation=dilation, groups=groups, **ckwargs) + self.attn = create_attn(attn_layer, channels=out_chs) + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.attn is not None: + x = self.attn(x) + if self.drop_path is not None: + x = self.drop_path(x) + x = x + shortcut + return x + + +class CrossStage(nn.Module): + """Cross Stage.""" + def __init__(self, in_chs, out_chs, stride, dilation, depth, block_ratio=1., bottle_ratio=1., exp_ratio=1., + groups=1, first_dilation=None, down_growth=False, cross_linear=False, block_dpr=None, + block_fn=ResBottleneck, **block_kwargs): + super(CrossStage, self).__init__() + first_dilation = first_dilation or dilation + down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels + exp_chs = int(round(out_chs * exp_ratio)) + block_out_chs = int(round(out_chs * block_ratio)) + conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) + + if stride != 1 or first_dilation != dilation: + self.conv_down = ConvBnAct( + in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + aa_layer=block_kwargs.get('aa_layer', None), **conv_kwargs) + prev_chs = down_chs + else: + self.conv_down = None + prev_chs = in_chs + + # FIXME this 1x1 expansion is pushed down into the cross and block paths in the darknet cfgs. Also, + # there is also special case for the first stage for some of the model that results in uneven split + # across the two paths. I did it this way for simplicity for now. + self.conv_exp = ConvBnAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) + prev_chs = exp_chs // 2 # output of conv_exp is always split in two + + self.blocks = nn.Sequential() + for i in range(depth): + drop_path = DropPath(block_dpr[i]) if block_dpr and block_dpr[i] else None + self.blocks.add_module(str(i), block_fn( + prev_chs, block_out_chs, dilation, bottle_ratio, groups, drop_path=drop_path, **block_kwargs)) + prev_chs = block_out_chs + + # transition convs + self.conv_transition_b = ConvBnAct(prev_chs, exp_chs // 2, kernel_size=1, **conv_kwargs) + self.conv_transition = ConvBnAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) + + def forward(self, x): + if self.conv_down is not None: + x = self.conv_down(x) + x = self.conv_exp(x) + split = x.shape[1] // 2 + xs, xb = x[:, :split], x[:, split:] + xb = self.blocks(xb) + xb = self.conv_transition_b(xb).contiguous() + out = self.conv_transition(torch.cat([xs, xb], dim=1)) + return out + + +class DarkStage(nn.Module): + """DarkNet stage.""" + + def __init__(self, in_chs, out_chs, stride, dilation, depth, block_ratio=1., bottle_ratio=1., groups=1, + first_dilation=None, block_fn=ResBottleneck, block_dpr=None, **block_kwargs): + super(DarkStage, self).__init__() + first_dilation = first_dilation or dilation + + self.conv_down = ConvBnAct( + in_chs, out_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer'), + aa_layer=block_kwargs.get('aa_layer', None)) + + prev_chs = out_chs + block_out_chs = int(round(out_chs * block_ratio)) + self.blocks = nn.Sequential() + for i in range(depth): + drop_path = DropPath(block_dpr[i]) if block_dpr and block_dpr[i] else None + self.blocks.add_module(str(i), block_fn( + prev_chs, block_out_chs, dilation, bottle_ratio, groups, drop_path=drop_path, **block_kwargs)) + prev_chs = block_out_chs + + def forward(self, x): + x = self.conv_down(x) + x = self.blocks(x) + return x + + +def _cfg_to_stage_args(cfg, curr_stride=2, output_stride=32, drop_path_rate=0.): + # get per stage args for stage and containing blocks, calculate strides to meet target output_stride + num_stages = len(cfg['depth']) + if 'groups' not in cfg: + cfg['groups'] = (1,) * num_stages + if 'down_growth' in cfg and not isinstance(cfg['down_growth'], (list, tuple)): + cfg['down_growth'] = (cfg['down_growth'],) * num_stages + if 'cross_linear' in cfg and not isinstance(cfg['cross_linear'], (list, tuple)): + cfg['cross_linear'] = (cfg['cross_linear'],) * num_stages + cfg['block_dpr'] = [None] * num_stages if not drop_path_rate else \ + [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg['depth'])).split(cfg['depth'])] + stage_strides = [] + stage_dilations = [] + stage_first_dilations = [] + dilation = 1 + for cfg_stride in cfg['stride']: + stage_first_dilations.append(dilation) + if curr_stride >= output_stride: + dilation *= cfg_stride + stride = 1 + else: + stride = cfg_stride + curr_stride *= stride + stage_strides.append(stride) + stage_dilations.append(dilation) + cfg['stride'] = stage_strides + cfg['dilation'] = stage_dilations + cfg['first_dilation'] = stage_first_dilations + stage_args = [dict(zip(cfg.keys(), values)) for values in zip(*cfg.values())] + return stage_args + + +class CspNet(nn.Module): + """Cross Stage Partial base model. + + Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + Ref Impl: https://github.com/WongKinYiu/CrossStagePartialNetworks + + NOTE: There are differences in the way I handle the 1x1 'expansion' conv in this impl vs the + darknet impl. I did it this way for simplicity and less special cases. + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0., + act_layer=nn.LeakyReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_path_rate=0., + zero_init_last_bn=True, stage_fn=CrossStage, block_fn=ResBottleneck): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + layer_args = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + + # Construct the stem + self.stem, stem_feat_info = create_stem(in_chans, **cfg['stem'], **layer_args) + self.feature_info = [stem_feat_info] + prev_chs = stem_feat_info['num_chs'] + curr_stride = stem_feat_info['reduction'] # reduction does not include pool + if cfg['stem']['pool']: + curr_stride *= 2 + + # Construct the stages + per_stage_args = _cfg_to_stage_args( + cfg['stage'], curr_stride=curr_stride, output_stride=output_stride, drop_path_rate=drop_path_rate) + self.stages = nn.Sequential() + for i, sa in enumerate(per_stage_args): + self.stages.add_module( + str(i), stage_fn(prev_chs, **sa, **layer_args, block_fn=block_fn)) + prev_chs = sa['out_chs'] + curr_stride *= sa['stride'] + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] + + # Construct the head + self.num_features = prev_chs + self.head = ClassifierHead( + in_chs=prev_chs, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, mean=0.0, std=0.01) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_cspnet(variant, pretrained=False, **kwargs): + cfg_variant = variant.split('_')[0] + # NOTE: DarkNet is one of few models with stride==1 features w/ 6 out_indices [0..5] + out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4, 5) if 'darknet' in variant else (0, 1, 2, 3, 4)) + return build_model_with_cfg( + CspNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs) + + +@register_model +def cspresnet50(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50d(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50d', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50w(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50w', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnext50(pretrained=False, **kwargs): + return _create_cspnet('cspresnext50', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnext50_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_cspnet('cspresnext50_iabn', pretrained=pretrained, norm_layer=norm_layer, **kwargs) + + +@register_model +def cspdarknet53(pretrained=False, **kwargs): + return _create_cspnet('cspdarknet53', pretrained=pretrained, block_fn=DarkBlock, **kwargs) + + +@register_model +def cspdarknet53_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_cspnet('cspdarknet53_iabn', pretrained=pretrained, block_fn=DarkBlock, norm_layer=norm_layer, **kwargs) + + +@register_model +def darknet53(pretrained=False, **kwargs): + return _create_cspnet('darknet53', pretrained=pretrained, block_fn=DarkBlock, stage_fn=DarkStage, **kwargs) diff --git a/data_processing/MANIQA/timm/models/densenet.py b/data_processing/MANIQA/timm/models/densenet.py new file mode 100644 index 0000000..38a1972 --- /dev/null +++ b/data_processing/MANIQA/timm/models/densenet.py @@ -0,0 +1,387 @@ +"""Pytorch Densenet implementation w/ tweaks +This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with +fixed kwargs passthrough and addition of dynamic global avg/max pool. +""" +import re +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from torch.jit.annotations import List + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import BatchNormAct2d, create_norm_act, BlurPool2d, create_classifier +from .registry import register_model + +__all__ = ['DenseNet'] + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.conv0', 'classifier': 'classifier', + } + + +default_cfgs = { + 'densenet121': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenet121_ra-50efcf5c.pth'), + 'densenet121d': _cfg(url=''), + 'densenetblur121d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenetblur121d_ra-100dcfbc.pth'), + 'densenet169': _cfg(url='https://download.pytorch.org/models/densenet169-b2777c0a.pth'), + 'densenet201': _cfg(url='https://download.pytorch.org/models/densenet201-c1103571.pth'), + 'densenet161': _cfg(url='https://download.pytorch.org/models/densenet161-8d451a50.pth'), + 'densenet264': _cfg(url=''), + 'densenet264d_iabn': _cfg(url=''), + 'tv_densenet121': _cfg(url='https://download.pytorch.org/models/densenet121-a639ec97.pth'), +} + + +class DenseLayer(nn.Module): + def __init__(self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d, + drop_rate=0., memory_efficient=False): + super(DenseLayer, self).__init__() + self.add_module('norm1', norm_layer(num_input_features)), + self.add_module('conv1', nn.Conv2d( + num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)), + self.add_module('norm2', norm_layer(bn_size * growth_rate)), + self.add_module('conv2', nn.Conv2d( + bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)), + self.drop_rate = float(drop_rate) + self.memory_efficient = memory_efficient + + def bottleneck_fn(self, xs): + # type: (List[torch.Tensor]) -> torch.Tensor + concated_features = torch.cat(xs, 1) + bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484 + return bottleneck_output + + # todo: rewrite when torchscript supports any + def any_requires_grad(self, x): + # type: (List[torch.Tensor]) -> bool + for tensor in x: + if tensor.requires_grad: + return True + return False + + @torch.jit.unused # noqa: T484 + def call_checkpoint_bottleneck(self, x): + # type: (List[torch.Tensor]) -> torch.Tensor + def closure(*xs): + return self.bottleneck_fn(xs) + + return cp.checkpoint(closure, *x) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + # torchscript does not yet support *args, so we overload method + # allowing it to take either a List[Tensor] or single Tensor + def forward(self, x): # noqa: F811 + if isinstance(x, torch.Tensor): + prev_features = [x] + else: + prev_features = x + + if self.memory_efficient and self.any_requires_grad(prev_features): + if torch.jit.is_scripting(): + raise Exception("Memory Efficient not supported in JIT") + bottleneck_output = self.call_checkpoint_bottleneck(prev_features) + else: + bottleneck_output = self.bottleneck_fn(prev_features) + + new_features = self.conv2(self.norm2(bottleneck_output)) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return new_features + + +class DenseBlock(nn.ModuleDict): + _version = 2 + + def __init__(self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=nn.ReLU, + drop_rate=0., memory_efficient=False): + super(DenseBlock, self).__init__() + for i in range(num_layers): + layer = DenseLayer( + num_input_features + i * growth_rate, + growth_rate=growth_rate, + bn_size=bn_size, + norm_layer=norm_layer, + drop_rate=drop_rate, + memory_efficient=memory_efficient, + ) + self.add_module('denselayer%d' % (i + 1), layer) + + def forward(self, init_features): + features = [init_features] + for name, layer in self.items(): + new_features = layer(features) + features.append(new_features) + return torch.cat(features, 1) + + +class DenseTransition(nn.Sequential): + def __init__(self, num_input_features, num_output_features, norm_layer=nn.BatchNorm2d, aa_layer=None): + super(DenseTransition, self).__init__() + self.add_module('norm', norm_layer(num_input_features)) + self.add_module('conv', nn.Conv2d( + num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) + if aa_layer is not None: + self.add_module('pool', aa_layer(num_output_features, stride=2)) + else: + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" `_ + + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + + def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), bn_size=4, stem_type='', + num_classes=1000, in_chans=3, global_pool='avg', + norm_layer=BatchNormAct2d, aa_layer=None, drop_rate=0, memory_efficient=False, + aa_stem_only=True): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(DenseNet, self).__init__() + + # Stem + deep_stem = 'deep' in stem_type # 3x3 deep stem + num_init_features = growth_rate * 2 + if aa_layer is None: + stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + else: + stem_pool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=num_init_features, stride=2)]) + if deep_stem: + stem_chs_1 = stem_chs_2 = growth_rate + if 'tiered' in stem_type: + stem_chs_1 = 3 * (growth_rate // 4) + stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4) + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)), + ('norm0', norm_layer(stem_chs_1)), + ('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)), + ('norm1', norm_layer(stem_chs_2)), + ('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)), + ('norm2', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + else: + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), + ('norm0', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + self.feature_info = [ + dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')] + current_stride = 4 + + # DenseBlocks + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = DenseBlock( + num_layers=num_layers, + num_input_features=num_features, + bn_size=bn_size, + growth_rate=growth_rate, + norm_layer=norm_layer, + drop_rate=drop_rate, + memory_efficient=memory_efficient + ) + module_name = f'denseblock{(i + 1)}' + self.features.add_module(module_name, block) + num_features = num_features + num_layers * growth_rate + transition_aa_layer = None if aa_stem_only else aa_layer + if i != len(block_config) - 1: + self.feature_info += [ + dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)] + current_stride *= 2 + trans = DenseTransition( + num_input_features=num_features, num_output_features=num_features // 2, + norm_layer=norm_layer, aa_layer=transition_aa_layer) + self.features.add_module(f'transition{i + 1}', trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', norm_layer(num_features)) + + self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')] + self.num_features = num_features + + # Linear layer + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + # both classifier and block drop? + # if self.drop_rate > 0.: + # x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + +def _filter_torchvision_pretrained(state_dict): + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + return state_dict + + +def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs): + kwargs['growth_rate'] = growth_rate + kwargs['block_config'] = block_config + return build_model_with_cfg( + DenseNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=_filter_torchvision_pretrained, + **kwargs) + + +@register_model +def densenet121(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenetblur121d(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenetblur121d', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, stem_type='deep', + aa_layer=BlurPool2d, **kwargs) + return model + + +@register_model +def densenet121d(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet121d', growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep', + pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet169(pretrained=False, **kwargs): + r"""Densenet-169 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet169', growth_rate=32, block_config=(6, 12, 32, 32), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet201(pretrained=False, **kwargs): + r"""Densenet-201 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet201', growth_rate=32, block_config=(6, 12, 48, 32), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet161(pretrained=False, **kwargs): + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet161', growth_rate=48, block_config=(6, 12, 36, 24), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet264(pretrained=False, **kwargs): + r"""Densenet-264 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet264', growth_rate=48, block_config=(6, 12, 64, 48), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet264d_iabn(pretrained=False, **kwargs): + r"""Densenet-264 model with deep stem and Inplace-ABN + """ + def norm_act_fn(num_features, **kwargs): + return create_norm_act('iabn', num_features, **kwargs) + model = _create_densenet( + 'densenet264d_iabn', growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep', + norm_layer=norm_act_fn, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tv_densenet121(pretrained=False, **kwargs): + r"""Densenet-121 model with original Torchvision weights, from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'tv_densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) + return model diff --git a/data_processing/MANIQA/timm/models/dla.py b/data_processing/MANIQA/timm/models/dla.py new file mode 100644 index 0000000..f6e4dd2 --- /dev/null +++ b/data_processing/MANIQA/timm/models/dla.py @@ -0,0 +1,443 @@ +""" Deep Layer Aggregation and DLA w/ Res2Net +DLA original adapted from Official Pytorch impl at: +DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 + +Res2Net additions from: https://github.com/gasvn/Res2Net/ +Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['DLA'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'base_layer.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'dla34': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth'), + 'dla46_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth'), + 'dla46x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth'), + 'dla60x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth'), + 'dla60': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth'), + 'dla60x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth'), + 'dla102': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth'), + 'dla102x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth'), + 'dla102x2': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth'), + 'dla169': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth'), + 'dla60_res2net': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth'), + 'dla60_res2next': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth'), +} + + +class DlaBasic(nn.Module): + """DLA Basic""" + + def __init__(self, inplanes, planes, stride=1, dilation=1, **_): + super(DlaBasic, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation) + self.bn2 = nn.BatchNorm2d(planes) + self.stride = stride + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottleneck(nn.Module): + """DLA/DLA-X Bottleneck""" + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64): + super(DlaBottleneck, self).__init__() + self.stride = stride + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + + self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes) + self.conv2 = nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, + bias=False, dilation=dilation, groups=cardinality) + self.bn2 = nn.BatchNorm2d(mid_planes) + self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottle2neck(nn.Module): + """ Res2Net/Res2NeXT DLA Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py + """ + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4): + super(DlaBottle2neck, self).__init__() + self.is_first = stride > 1 + self.scale = scale + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + self.width = mid_planes + + self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes * scale) + + num_scale_convs = max(1, scale - 1) + convs = [] + bns = [] + for _ in range(num_scale_convs): + convs.append(nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, stride=stride, + padding=dilation, dilation=dilation, groups=cardinality, bias=False)) + bns.append(nn.BatchNorm2d(mid_planes)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + if self.is_first: + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) + + self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + sp = spx[i] if i == 0 or self.is_first else sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + spo.append(self.pool(spx[-1]) if self.is_first else spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaRoot(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, shortcut): + super(DlaRoot, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + self.shortcut = shortcut + + def forward(self, *x): + children = x + x = self.conv(torch.cat(x, 1)) + x = self.bn(x) + if self.shortcut: + x += children[0] + x = self.relu(x) + + return x + + +class DlaTree(nn.Module): + def __init__(self, levels, block, in_channels, out_channels, stride=1, + dilation=1, cardinality=1, base_width=64, + level_root=False, root_dim=0, root_kernel_size=1, root_shortcut=False): + super(DlaTree, self).__init__() + if root_dim == 0: + root_dim = 2 * out_channels + if level_root: + root_dim += in_channels + self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity() + self.project = nn.Identity() + cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width) + if levels == 1: + self.tree1 = block(in_channels, out_channels, stride, **cargs) + self.tree2 = block(out_channels, out_channels, 1, **cargs) + if in_channels != out_channels: + # NOTE the official impl/weights have project layers in levels > 1 case that are never + # used, I've moved the project layer here to avoid wasted params but old checkpoints will + # need strict=False while loading. + self.project = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), + nn.BatchNorm2d(out_channels)) + else: + cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut)) + self.tree1 = DlaTree( + levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs) + self.tree2 = DlaTree( + levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs) + if levels == 1: + self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut) + self.level_root = level_root + self.root_dim = root_dim + self.levels = levels + + def forward(self, x, shortcut=None, children=None): + children = [] if children is None else children + bottom = self.downsample(x) + shortcut = self.project(bottom) + if self.level_root: + children.append(bottom) + x1 = self.tree1(x, shortcut) + if self.levels == 1: + x2 = self.tree2(x1) + x = self.root(x2, x1, *children) + else: + children.append(x1) + x = self.tree2(x1, children=children) + return x + + +class DLA(nn.Module): + def __init__(self, levels, channels, output_stride=32, num_classes=1000, in_chans=3, + cardinality=1, base_width=64, block=DlaBottle2neck, shortcut_root=False, + drop_rate=0.0, global_pool='avg'): + super(DLA, self).__init__() + self.channels = channels + self.num_classes = num_classes + self.cardinality = cardinality + self.base_width = base_width + self.drop_rate = drop_rate + assert output_stride == 32 # FIXME support dilation + + self.base_layer = nn.Sequential( + nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False), + nn.BatchNorm2d(channels[0]), + nn.ReLU(inplace=True)) + self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) + self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) + cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root) + self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) + self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) + self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) + self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) + self.feature_info = [ + dict(num_chs=channels[0], reduction=1, module='level0'), # rare to have a meaningful stride 1 level + dict(num_chs=channels[1], reduction=2, module='level1'), + dict(num_chs=channels[2], reduction=4, module='level2'), + dict(num_chs=channels[3], reduction=8, module='level3'), + dict(num_chs=channels[4], reduction=16, module='level4'), + dict(num_chs=channels[5], reduction=32, module='level5'), + ] + + self.num_features = channels[-1] + self.global_pool, self.fc = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): + modules = [] + for i in range(convs): + modules.extend([ + nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1, + padding=dilation, bias=False, dilation=dilation), + nn.BatchNorm2d(planes), + nn.ReLU(inplace=True)]) + inplanes = planes + return nn.Sequential(*modules) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + x = self.base_layer(x) + x = self.level0(x) + x = self.level1(x) + x = self.level2(x) + x = self.level3(x) + x = self.level4(x) + x = self.level5(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + x = self.flatten(x) + return x + + +def _create_dla(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DLA, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=False, + feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)), + **kwargs) + + +@register_model +def dla60_res2net(pretrained=False, **kwargs): + model_kwargs = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=1, base_width=28, **kwargs) + return _create_dla('dla60_res2net', pretrained, **model_kwargs) + + +@register_model +def dla60_res2next(pretrained=False,**kwargs): + model_kwargs = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=8, base_width=4, **kwargs) + return _create_dla('dla60_res2next', pretrained, **model_kwargs) + + +@register_model +def dla34(pretrained=False, **kwargs): # DLA-34 + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], + block=DlaBasic, **kwargs) + return _create_dla('dla34', pretrained, **model_kwargs) + + +@register_model +def dla46_c(pretrained=False, **kwargs): # DLA-46-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, **kwargs) + return _create_dla('dla46_c', pretrained, **model_kwargs) + + +@register_model +def dla46x_c(pretrained=False, **kwargs): # DLA-X-46-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla46x_c', pretrained, **model_kwargs) + + +@register_model +def dla60x_c(pretrained=False, **kwargs): # DLA-X-60-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla60x_c', pretrained, **model_kwargs) + + +@register_model +def dla60(pretrained=False, **kwargs): # DLA-60 + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, **kwargs) + return _create_dla('dla60', pretrained, **model_kwargs) + + +@register_model +def dla60x(pretrained=False, **kwargs): # DLA-X-60 + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla60x', pretrained, **model_kwargs) + + +@register_model +def dla102(pretrained=False, **kwargs): # DLA-102 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True, **kwargs) + return _create_dla('dla102', pretrained, **model_kwargs) + + +@register_model +def dla102x(pretrained=False, **kwargs): # DLA-X-102 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True, **kwargs) + return _create_dla('dla102x', pretrained, **model_kwargs) + + +@register_model +def dla102x2(pretrained=False, **kwargs): # DLA-X-102 64 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True, **kwargs) + return _create_dla('dla102x2', pretrained, **model_kwargs) + + +@register_model +def dla169(pretrained=False, **kwargs): # DLA-169 + model_kwargs = dict( + levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True, **kwargs) + return _create_dla('dla169', pretrained, **model_kwargs) diff --git a/data_processing/MANIQA/timm/models/dpn.py b/data_processing/MANIQA/timm/models/dpn.py new file mode 100644 index 0000000..c4e380b --- /dev/null +++ b/data_processing/MANIQA/timm/models/dpn.py @@ -0,0 +1,317 @@ +""" PyTorch implementation of DualPathNetworks +Based on original MXNet implementation https://github.com/cypw/DPNs with +many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs. + +This implementation is compatible with the pretrained weights from cypw's MXNet implementation. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict +from functools import partial +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import BatchNormAct2d, ConvBnAct, create_conv2d, create_classifier +from .registry import register_model + +__all__ = ['DPN'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD, + 'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'dpn68': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn68-66bebafa7.pth'), + 'dpn68b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dpn68b_ra-a31ca160.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'dpn92': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn92_extra-b040e4a9b.pth'), + 'dpn98': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn98-5b90dec4d.pth'), + 'dpn131': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn131-71dfe43e0.pth'), + 'dpn107': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn107_extra-1ac7121e2.pth') +} + + +class CatBnAct(nn.Module): + def __init__(self, in_chs, norm_layer=BatchNormAct2d): + super(CatBnAct, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + def forward(self, x): + if isinstance(x, tuple): + x = torch.cat(x, dim=1) + return self.bn(x) + + +class BnActConv2d(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d): + super(BnActConv2d, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups) + + def forward(self, x): + return self.conv(self.bn(x)) + + +class DualPathBlock(nn.Module): + def __init__( + self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False): + super(DualPathBlock, self).__init__() + self.num_1x1_c = num_1x1_c + self.inc = inc + self.b = b + if block_type == 'proj': + self.key_stride = 1 + self.has_proj = True + elif block_type == 'down': + self.key_stride = 2 + self.has_proj = True + else: + assert block_type == 'normal' + self.key_stride = 1 + self.has_proj = False + + self.c1x1_w_s1 = None + self.c1x1_w_s2 = None + if self.has_proj: + # Using different member names here to allow easier parameter key matching for conversion + if self.key_stride == 2: + self.c1x1_w_s2 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2) + else: + self.c1x1_w_s1 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1) + + self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1) + self.c3x3_b = BnActConv2d( + in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups) + if b: + self.c1x1_c = CatBnAct(in_chs=num_3x3_b) + self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1) + self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1) + else: + self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1) + self.c1x1_c1 = None + self.c1x1_c2 = None + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor] + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] + pass + + def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]: + if isinstance(x, tuple): + x_in = torch.cat(x, dim=1) + else: + x_in = x + if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None: + # self.has_proj == False, torchscript requires condition on module == None + x_s1 = x[0] + x_s2 = x[1] + else: + # self.has_proj == True + if self.c1x1_w_s1 is not None: + # self.key_stride = 1 + x_s = self.c1x1_w_s1(x_in) + else: + # self.key_stride = 2 + x_s = self.c1x1_w_s2(x_in) + x_s1 = x_s[:, :self.num_1x1_c, :, :] + x_s2 = x_s[:, self.num_1x1_c:, :, :] + x_in = self.c1x1_a(x_in) + x_in = self.c3x3_b(x_in) + x_in = self.c1x1_c(x_in) + if self.c1x1_c1 is not None: + # self.b == True, using None check for torchscript compat + out1 = self.c1x1_c1(x_in) + out2 = self.c1x1_c2(x_in) + else: + out1 = x_in[:, :self.num_1x1_c, :, :] + out2 = x_in[:, self.num_1x1_c:, :, :] + resid = x_s1 + out1 + dense = torch.cat([x_s2, out2], dim=1) + return resid, dense + + +class DPN(nn.Module): + def __init__(self, small=False, num_init_features=64, k_r=96, groups=32, + b=False, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), output_stride=32, + num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', fc_act=nn.ELU): + super(DPN, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.b = b + assert output_stride == 32 # FIXME look into dilation support + norm_layer = partial(BatchNormAct2d, eps=.001) + fc_norm_layer = partial(BatchNormAct2d, eps=.001, act_layer=fc_act, inplace=False) + bw_factor = 1 if small else 4 + blocks = OrderedDict() + + # conv1 + blocks['conv1_1'] = ConvBnAct( + in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer) + blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')] + + # conv2 + bw = 64 * bw_factor + inc = inc_sec[0] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[0] + 1): + blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')] + + # conv3 + bw = 128 * bw_factor + inc = inc_sec[1] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[1] + 1): + blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')] + + # conv4 + bw = 256 * bw_factor + inc = inc_sec[2] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[2] + 1): + blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')] + + # conv5 + bw = 512 * bw_factor + inc = inc_sec[3] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[3] + 1): + blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')] + + blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer) + + self.num_features = in_chs + self.features = nn.Sequential(blocks) + + # Using 1x1 conv for the FC layer to allow the extra pooling scheme + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + x = self.flatten(x) + return x + + +def _create_dpn(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DPN, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_concat=True, flatten_sequential=True), + **kwargs) + + +@register_model +def dpn68(pretrained=False, **kwargs): + model_kwargs = dict( + small=True, num_init_features=10, k_r=128, groups=32, + k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64), **kwargs) + return _create_dpn('dpn68', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn68b(pretrained=False, **kwargs): + model_kwargs = dict( + small=True, num_init_features=10, k_r=128, groups=32, + b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64), **kwargs) + return _create_dpn('dpn68b', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn92(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=64, k_r=96, groups=32, + k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), **kwargs) + return _create_dpn('dpn92', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn98(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=96, k_r=160, groups=40, + k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128), **kwargs) + return _create_dpn('dpn98', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn131(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=128, k_r=160, groups=40, + k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128), **kwargs) + return _create_dpn('dpn131', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn107(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=128, k_r=200, groups=50, + k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128), **kwargs) + return _create_dpn('dpn107', pretrained=pretrained, **model_kwargs) diff --git a/data_processing/MANIQA/timm/models/efficientnet.py b/data_processing/MANIQA/timm/models/efficientnet.py new file mode 100644 index 0000000..cb65ffb --- /dev/null +++ b/data_processing/MANIQA/timm/models/efficientnet.py @@ -0,0 +1,2318 @@ +""" The EfficientNet Family in PyTorch + +An implementation of EfficienNet that covers variety of related models with efficient architectures: + +* EfficientNet-V2 + - `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + +* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports) + - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 + - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 + - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 + - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 + +* MixNet (Small, Medium, and Large) + - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 + +* MNasNet B1, A1 (SE), Small + - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 + +* FBNet-C + - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 + +* Single-Path NAS Pixel1 + - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 + +* TinyNet + - Model Rubik's Cube: Twisting Resolution, Depth and Width for TinyNets - https://arxiv.org/abs/2010.14819 + - Definitions & weights borrowed from https://github.com/huawei-noah/CV-Backbones/tree/master/tinynet_pytorch + +* And likely more... + +The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available +by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing +the models and weights open source! + +Hacked together by / Copyright 2019, Ross Wightman +""" +from functools import partial +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights,\ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from .features import FeatureInfo, FeatureHooks +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import create_conv2d, create_classifier +from .registry import register_model + +__all__ = ['EfficientNet', 'EfficientNetFeatures'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'mnasnet_050': _cfg(url=''), + 'mnasnet_075': _cfg(url=''), + 'mnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth'), + 'mnasnet_140': _cfg(url=''), + + 'semnasnet_050': _cfg(url=''), + 'semnasnet_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/semnasnet_075-18710866.pth'), + 'semnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth'), + 'semnasnet_140': _cfg(url=''), + 'mnasnet_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_small_lamb-aff75073.pth'), + + 'mobilenetv2_035': _cfg( + url=''), + 'mobilenetv2_050': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_050-3d30d450.pth', + interpolation='bicubic', + ), + 'mobilenetv2_075': _cfg( + url=''), + 'mobilenetv2_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth'), + 'mobilenetv2_110d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth'), + 'mobilenetv2_120d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth'), + 'mobilenetv2_140': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth'), + + 'fbnetc_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', + interpolation='bilinear'), + 'spnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', + interpolation='bilinear'), + + # NOTE experimenting with alternate attention + 'efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth'), + 'efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', + test_input_size=(3, 256, 256), crop_pct=1.0), + 'efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', + input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), crop_pct=1.0), + 'efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', + input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), + 'efficientnet_b4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth', + input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), crop_pct=1.0), + 'efficientnet_b5': _cfg( + url='', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'efficientnet_b6': _cfg( + url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'efficientnet_b7': _cfg( + url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'efficientnet_b8': _cfg( + url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + 'efficientnet_l2': _cfg( + url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), + + 'efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth'), + 'efficientnet_em': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_el': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_el.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_es_pruned': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_es_pruned75.pth'), + 'efficientnet_el_pruned': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_el_pruned70.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_cc_b0_4e': _cfg(url=''), + 'efficientnet_cc_b0_8e': _cfg(url=''), + 'efficientnet_cc_b1_8e': _cfg(url='', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'efficientnet_lite0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth'), + 'efficientnet_lite1': _cfg( + url='', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_lite2': _cfg( + url='', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'efficientnet_lite3': _cfg( + url='', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'efficientnet_lite4': _cfg( + url='', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + + 'efficientnet_b1_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb1_pruned_9ebb3fe6.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b2_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb2_pruned_203f55bc.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b3_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb3_pruned_5abcc29f.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'efficientnetv2_rw_t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'gc_efficientnetv2_rw_t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'efficientnetv2_rw_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth', + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_rw_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth', + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + + 'efficientnetv2_s': _cfg( + url='', + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_m': _cfg( + url='', + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + 'efficientnetv2_l': _cfg( + url='', + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'efficientnetv2_xl': _cfg( + url='', + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_l2_ns_475': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', + input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), + 'tf_efficientnet_l2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', + input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), + + 'tf_efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), ), + 'tf_efficientnet_em': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_el': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'tf_efficientnet_cc_b0_4e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b0_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b1_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'tf_efficientnet_lite0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), + 'tf_efficientnet_lite4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'), + + 'tf_efficientnetv2_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_s_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_xl_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_s_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_xl_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth', + input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)), + 'tf_efficientnetv2_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth', + input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882), + 'tf_efficientnetv2_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth', + input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890), + 'tf_efficientnetv2_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth', + input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), + + 'mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth'), + 'mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth'), + 'mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth'), + 'mixnet_xl': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth'), + 'mixnet_xxl': _cfg(), + + 'tf_mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth'), + 'tf_mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth'), + 'tf_mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'), + + "tinynet_a": _cfg( + input_size=(3, 192, 192), pool_size=(6, 6), # int(224 * 0.86) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth'), + "tinynet_b": _cfg( + input_size=(3, 188, 188), pool_size=(6, 6), # int(224 * 0.84) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth'), + "tinynet_c": _cfg( + input_size=(3, 184, 184), pool_size=(6, 6), # int(224 * 0.825) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth'), + "tinynet_d": _cfg( + input_size=(3, 152, 152), pool_size=(5, 5), # int(224 * 0.68) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth'), + "tinynet_e": _cfg( + input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth'), +} + + +class EfficientNet(nn.Module): + """ (Generic) EfficientNet + + A flexible and performant PyTorch implementation of efficient network architectures, including: + * EfficientNet-V2 Small, Medium, Large, XL & B0-B3 + * EfficientNet B0-B8, L2 + * EfficientNet-EdgeTPU + * EfficientNet-CondConv + * MixNet S, M, L, XL + * MnasNet A1, B1, and small + * MobileNet-V2 + * FBNet C + * Single-Path NAS Pixel1 + + """ + + def __init__(self, block_args, num_classes=1000, num_features=1280, in_chans=3, stem_size=32, fix_stem=False, + output_stride=32, pad_type='', round_chs_fn=round_channels, act_layer=None, norm_layer=None, + se_layer=None, drop_rate=0., drop_path_rate=0., global_pool='avg'): + super(EfficientNet, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + head_chs = builder.in_chs + + # Head + Pooling + self.conv_head = create_conv2d(head_chs, self.num_features, 1, padding=pad_type) + self.bn2 = norm_layer(self.num_features) + self.act2 = act_layer(inplace=True) + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1, self.act1] + layers.extend(self.blocks) + layers.extend([self.conv_head, self.bn2, self.act2, self.global_pool]) + layers.extend([nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.conv_head(x) + x = self.bn2(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + +class EfficientNetFeatures(nn.Module): + """ EfficientNet Feature Extractor + + A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation + and object detection models. + """ + + def __init__(self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, + stem_size=32, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, + act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0.): + super(EfficientNetFeatures, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, + feature_location=feature_location) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {v['stage']: i for i, v in enumerate(self.feature_info) if i in out_indices} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + def forward(self, x) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_effnet(variant, pretrained=False, **kwargs): + features_only = False + model_cls = EfficientNet + kwargs_filter = None + if kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool') + model_cls = EfficientNetFeatures + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-a1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r2_k3_s2_e6_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r4_k3_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r3_k5_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + ['ds_r1_k3_s1_c8'], + ['ir_r1_k3_s2_e3_c16'], + ['ir_r2_k3_s2_e6_c16'], + ['ir_r4_k5_s2_e6_c32_se0.25'], + ['ir_r3_k3_s1_e6_c32_se0.25'], + ['ir_r3_k5_s2_e6_c88_se0.25'], + ['ir_r1_k3_s1_e6_c144'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=8, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v2( + variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs): + """ Generate MobileNet-V2 network + Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py + Paper: https://arxiv.org/abs/1801.04381 + """ + arch_def = [ + ['ds_r1_k3_s1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r3_k3_s2_e6_c32'], + ['ir_r4_k3_s2_e6_c64'], + ['ir_r3_k3_s1_e6_c96'], + ['ir_r3_k3_s2_e6_c160'], + ['ir_r1_k3_s1_e6_c320'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), + num_features=1280 if fix_stem_head else max(1280, round_chs_fn(1280)), + stem_size=32, + fix_stem=fix_stem_head, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu6'), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNet-C + + Paper: https://arxiv.org/abs/1812.03443 + Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py + + NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, + it was used to confirm some building block details + """ + arch_def = [ + ['ir_r1_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], + ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], + ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], + ['ir_r4_k5_s2_e6_c184'], + ['ir_r1_k3_s1_e6_c352'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + num_features=1984, # paper suggests this, but is not 100% clear + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates the Single-Path NAS model from search targeted for Pixel1 phone. + + Paper: https://arxiv.org/abs/1904.02877 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], + # stage 4, 14x14in + ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet model. + + Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), + 'efficientnet-b8': (2.2, 3.6, 672, 0.5), + 'efficientnet-l2': (4.3, 5.3, 800, 0.5), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], + ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_edge(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-EdgeTPU model + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu + """ + + arch_def = [ + # NOTE `fc` is present to override a mismatch between stem channels and in chs not + # present in other models + ['er_r1_k3_s1_e4_c24_fc24_noskip'], + ['er_r2_k3_s2_e8_c32'], + ['er_r4_k3_s2_e8_c48'], + ['ir_r5_k5_s2_e8_c96'], + ['ir_r4_k5_s1_e8_c144'], + ['ir_r2_k5_s2_e8_c192'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_condconv( + variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): + """Creates an EfficientNet-CondConv model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], + ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], + ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], + ] + # NOTE unlike official impl, this one uses `cc` option where x is the base number of experts for each stage and + # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'swish'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet-Lite model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), + 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), + 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), + 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), + 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r2_k5_s2_e6_c40'], + ['ir_r3_k3_s2_e6_c80'], + ['ir_r3_k5_s1_e6_c112'], + ['ir_r4_k5_s2_e6_c192'], + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), + num_features=1280, + stem_size=32, + fix_stem=True, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + act_layer=resolve_act_layer(kwargs, 'relu6'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_base( + variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 base model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + arch_def = [ + ['cn_r1_k3_s1_e1_c16_skip'], + ['er_r2_k3_s2_e4_c32'], + ['er_r2_k3_s2_e4_c48'], + ['ir_r3_k3_s2_e4_c96_se0.25'], + ['ir_r5_k3_s1_e6_c112_se0.25'], + ['ir_r8_k3_s2_e6_c192_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_s( + variant, channel_multiplier=1.0, depth_multiplier=1.0, rw=False, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Small model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + + NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model, + before ref the impl was released. + """ + arch_def = [ + ['cn_r2_k3_s1_e1_c24_skip'], + ['er_r4_k3_s2_e4_c48'], + ['er_r4_k3_s2_e4_c64'], + ['ir_r6_k3_s2_e4_c128_se0.25'], + ['ir_r9_k3_s1_e6_c160_se0.25'], + ['ir_r15_k3_s2_e6_c256_se0.25'], + ] + num_features = 1280 + if rw: + # my original variant, based on paper figure differs from the official release + arch_def[0] = ['er_r2_k3_s1_e1_c24'] + arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25'] + num_features = 1792 + + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(num_features), + stem_size=24, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Medium model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r3_k3_s1_e1_c24_skip'], + ['er_r5_k3_s2_e4_c48'], + ['er_r5_k3_s2_e4_c80'], + ['ir_r7_k3_s2_e4_c160_se0.25'], + ['ir_r14_k3_s1_e6_c176_se0.25'], + ['ir_r18_k3_s2_e6_c304_se0.25'], + ['ir_r5_k3_s1_e6_c512_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_l(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r7_k3_s2_e4_c64'], + ['er_r7_k3_s2_e4_c96'], + ['ir_r10_k3_s2_e4_c192_se0.25'], + ['ir_r19_k3_s1_e6_c224_se0.25'], + ['ir_r25_k3_s2_e6_c384_se0.25'], + ['ir_r7_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_xl(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Xtra-Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r8_k3_s2_e4_c64'], + ['er_r8_k3_s2_e4_c96'], + ['ir_r16_k3_s2_e4_c192_se0.25'], + ['ir_r24_k3_s1_e6_c256_se0.25'], + ['ir_r32_k3_s2_e6_c512_se0.25'], + ['ir_r8_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Small model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1536, + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Medium-Large model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c24'], # relu + # stage 1, 112x112 in + ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=1536, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_tinynet( + variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs +): + """Creates a TinyNet model. + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=max(1280, round_channels(1280, model_width, 8, None)), + stem_size=32, + fix_stem=True, + round_chs_fn=partial(round_channels, multiplier=model_width), + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +@register_model +def mnasnet_050(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.5. """ + model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_075(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.75. """ + model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_100(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_b1(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + return mnasnet_100(pretrained, **kwargs) + + +@register_model +def mnasnet_140(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.4 """ + model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_050(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ + model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_075(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ + model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_100(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_a1(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + return semnasnet_100(pretrained, **kwargs) + + +@register_model +def semnasnet_140(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ + model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_small(pretrained=False, **kwargs): + """ MNASNet Small, depth multiplier of 1.0. """ + model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_035(pretrained=False, **kwargs): + """ MobileNet V2 w/ 0.35 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_035', 0.35, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_050(pretrained=False, **kwargs): + """ MobileNet V2 w/ 0.5 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_075(pretrained=False, **kwargs): + """ MobileNet V2 w/ 0.75 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_100(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.0 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_140(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.4 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_110d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" + model = _gen_mobilenet_v2( + 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_120d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ + model = _gen_mobilenet_v2( + 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetc_100(pretrained=False, **kwargs): + """ FBNet-C """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def spnasnet_100(pretrained=False, **kwargs): + """ Single-Path NAS Pixel1""" + model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2a(pretrained=False, **kwargs): + """ EfficientNet-B2 @ 288x288 w/ 1.0 test crop""" + # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now + return efficientnet_b2(pretrained=pretrained, **kwargs) + + +@register_model +def efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3a(pretrained=False, **kwargs): + """ EfficientNet-B3 @ 320x320 w/ 1.0 test crop-pct """ + # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now + return efficientnet_b3(pretrained=pretrained, **kwargs) + + +@register_model +def efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_l2(pretrained=False, **kwargs): + """ EfficientNet-L2.""" + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. """ + model = _gen_efficientnet_edge( + 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_es_pruned(pretrained=False, **kwargs): + """ EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. """ + model = _gen_efficientnet_edge( + 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. """ + model = _gen_efficientnet_edge( + 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_el_pruned(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1_pruned(pretrained=False, **kwargs): + """ EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + variant = 'efficientnet_b1_pruned' + model = _gen_efficientnet( + variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2_pruned(pretrained=False, **kwargs): + """ EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3_pruned(pretrained=False, **kwargs): + """ EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_t(pretrained=False, **kwargs): + """ EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs) + return model + + +@register_model +def gc_efficientnetv2_rw_t(pretrained=False, **kwargs): + """ EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, + rw=False, se_layer='gc', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small (RW variant). + NOTE: This is my initial (pre official code release) w/ some differences. + See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding + """ + model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium (RW variant). + """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. """ + model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. """ + model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_l(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. """ + model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_xl(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large. """ + model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0_ap(pretrained=False, **kwargs): + """ EfficientNet-B0 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1_ap(pretrained=False, **kwargs): + """ EfficientNet-B1 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2_ap(pretrained=False, **kwargs): + """ EfficientNet-B2 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3_ap(pretrained=False, **kwargs): + """ EfficientNet-B3 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4_ap(pretrained=False, **kwargs): + """ EfficientNet-B4 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5_ap(pretrained=False, **kwargs): + """ EfficientNet-B5 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6_ap(pretrained=False, **kwargs): + """ EfficientNet-B6 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7_ap(pretrained=False, **kwargs): + """ EfficientNet-B7 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b8_ap(pretrained=False, **kwargs): + """ EfficientNet-B8 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0_ns(pretrained=False, **kwargs): + """ EfficientNet-B0 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1_ns(pretrained=False, **kwargs): + """ EfficientNet-B1 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2_ns(pretrained=False, **kwargs): + """ EfficientNet-B2 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3_ns(pretrained=False, **kwargs): + """ EfficientNet-B3 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4_ns(pretrained=False, **kwargs): + """ EfficientNet-B4 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5_ns(pretrained=False, **kwargs): + """ EfficientNet-B5 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6_ns(pretrained=False, **kwargs): + """ EfficientNet-B6 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7_ns(pretrained=False, **kwargs): + """ EfficientNet-B7 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_l2_ns_475(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent @ 475x475. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_l2_ns(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + + +@register_model +def tf_efficientnetv2_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_s_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_xl_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_s_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Small w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_xl_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b0(pretrained=False, **kwargs): + """ EfficientNet-V2-B0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b1(pretrained=False, **kwargs): + """ EfficientNet-V2-B1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b2(pretrained=False, **kwargs): + """ EfficientNet-V2-B2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b3(pretrained=False, **kwargs): + """ EfficientNet-V2-B3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. + """ + model = _gen_mixnet_s( + 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. + """ + model = _gen_mixnet_m( + 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. + """ + model = _gen_mixnet_m( + 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xl(pretrained=False, **kwargs): + """Creates a MixNet Extra-Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xxl(pretrained=False, **kwargs): + """Creates a MixNet Double Extra Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_s( + 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_a(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_b(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_c(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_d(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_e(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs) + return model diff --git a/data_processing/MANIQA/timm/models/efficientnet_blocks.py b/data_processing/MANIQA/timm/models/efficientnet_blocks.py new file mode 100644 index 0000000..aef6629 --- /dev/null +++ b/data_processing/MANIQA/timm/models/efficientnet_blocks.py @@ -0,0 +1,323 @@ +""" EfficientNet, MobileNetV3, etc Blocks + +Hacked together by / Copyright 2019, Ross Wightman +""" + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from .layers import create_conv2d, drop_path, make_divisible, create_act_layer +from .layers.activations import sigmoid + +__all__ = [ + 'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual'] + + +class SqueezeExcite(nn.Module): + """ Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family + + Args: + in_chs (int): input channels to layer + rd_ratio (float): ratio of squeeze reduction + act_layer (nn.Module): activation layer of containing block + gate_layer (Callable): attention gate function + force_act_layer (nn.Module): override block's activation fn if this is set/bound + rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs + """ + + def __init__( + self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU, + gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None): + super(SqueezeExcite, self).__init__() + if rd_channels is None: + rd_round_fn = rd_round_fn or round + rd_channels = rd_round_fn(in_chs * rd_ratio) + act_layer = force_act_layer or act_layer + self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True) + self.act1 = create_act_layer(act_layer, inplace=True) + self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + return x * self.gate(x_se) + + +class ConvBnAct(nn.Module): + """ Conv + Norm Layer + Activation w/ optional skip connection + """ + def __init__( + self, in_chs, out_chs, kernel_size, stride=1, dilation=1, pad_type='', + skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0.): + super(ConvBnAct, self).__init__() + self.has_residual = skip and stride == 1 and in_chs == out_chs + self.drop_path_rate = drop_path_rate + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(out_chs) + self.act1 = act_layer(inplace=True) + + def feature_info(self, location): + if location == 'expansion': # output of conv after act, same as block coutput + info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv.out_channels) + return info + + def forward(self, x): + shortcut = x + x = self.conv(x) + x = self.bn1(x) + x = self.act1(x) + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class DepthwiseSeparableConv(nn.Module): + """ DepthwiseSeparable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion + (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. + """ + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, pw_kernel_size=1, pw_act=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + se_layer=None, drop_path_rate=0.): + super(DepthwiseSeparableConv, self).__init__() + self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip + self.has_pw_act = pw_act # activation after point-wise conv + self.drop_path_rate = drop_path_rate + + self.conv_dw = create_conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True) + self.bn1 = norm_layer(in_chs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity() + + self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs) + self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PW + info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels) + return info + + def forward(self, x): + shortcut = x + + x = self.conv_dw(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.se(x) + + x = self.conv_pw(x) + x = self.bn2(x) + x = self.act2(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE + + Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often + referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in + * MNasNet - https://arxiv.org/abs/1807.11626 + * EfficientNet - https://arxiv.org/abs/1905.11946 + * MobileNet-V3 - https://arxiv.org/abs/1905.02244 + """ + + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, conv_kwargs=None, drop_path_rate=0.): + super(InvertedResidual, self).__init__() + conv_kwargs = conv_kwargs or {} + mid_chs = make_divisible(in_chs * exp_ratio) + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Point-wise expansion + self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) + self.bn1 = norm_layer(mid_chs) + self.act1 = act_layer(inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, + padding=pad_type, depthwise=True, **conv_kwargs) + self.bn2 = norm_layer(mid_chs) + self.act2 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) + self.bn3 = norm_layer(out_chs) + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PWL + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + shortcut = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + + return x + + +class CondConvResidual(InvertedResidual): + """ Inverted residual block w/ CondConv routing""" + + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, num_experts=0, drop_path_rate=0.): + + self.num_experts = num_experts + conv_kwargs = dict(num_experts=self.num_experts) + + super(CondConvResidual, self).__init__( + in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, pad_type=pad_type, + act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, se_layer=se_layer, norm_layer=norm_layer, conv_kwargs=conv_kwargs, + drop_path_rate=drop_path_rate) + + self.routing_fn = nn.Linear(in_chs, self.num_experts) + + def forward(self, x): + shortcut = x + + # CondConv routing + pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) + routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) + + # Point-wise expansion + x = self.conv_pw(x, routing_weights) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x, routing_weights) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x, routing_weights) + x = self.bn3(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class EdgeResidual(nn.Module): + """ Residual block with expansion convolution followed by pointwise-linear w/ stride + + Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML` + - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html + + This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers + * MobileDet - https://arxiv.org/abs/2004.14525 + * EfficientNet-X - https://arxiv.org/abs/2102.05610 + * EfficientNet-V2 - https://arxiv.org/abs/2104.00298 + """ + + def __init__( + self, in_chs, out_chs, exp_kernel_size=3, stride=1, dilation=1, pad_type='', + force_in_chs=0, noskip=False, exp_ratio=1.0, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, drop_path_rate=0.): + super(EdgeResidual, self).__init__() + if force_in_chs > 0: + mid_chs = make_divisible(force_in_chs * exp_ratio) + else: + mid_chs = make_divisible(in_chs * exp_ratio) + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Expansion convolution + self.conv_exp = create_conv2d( + in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(mid_chs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs) + + def feature_info(self, location): + if location == 'expansion': # after SE, before PWL + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + shortcut = x + + # Expansion convolution + x = self.conv_exp(x) + x = self.bn1(x) + x = self.act1(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn2(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + + return x diff --git a/data_processing/MANIQA/timm/models/efficientnet_builder.py b/data_processing/MANIQA/timm/models/efficientnet_builder.py new file mode 100644 index 0000000..8192f4e --- /dev/null +++ b/data_processing/MANIQA/timm/models/efficientnet_builder.py @@ -0,0 +1,463 @@ +""" EfficientNet, MobileNetV3, etc Builder + +Assembles EfficieNet and related network feature blocks from string definitions. +Handles stride, dilation calculations, and selects feature extraction points. + +Hacked together by / Copyright 2019, Ross Wightman +""" + +import logging +import math +import re +from copy import deepcopy +from functools import partial + +import torch.nn as nn + +from .efficientnet_blocks import * +from .layers import CondConv2d, get_condconv_initializer, get_act_layer, get_attn, make_divisible + +__all__ = ["EfficientNetBuilder", "decode_arch_def", "efficientnet_init_weights", + 'resolve_bn_args', 'resolve_act_layer', 'round_channels', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT'] + +_logger = logging.getLogger(__name__) + + +_DEBUG_BUILDER = False + +# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per +# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay) +# NOTE: momentum varies btw .99 and .9997 depending on source +# .99 in official TF TPU impl +# .9997 (/w .999 in search space) for paper +BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 +BN_EPS_TF_DEFAULT = 1e-3 +_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT) + + +def get_bn_args_tf(): + return _BN_ARGS_TF.copy() + + +def resolve_bn_args(kwargs): + bn_args = {} + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + + +def resolve_act_layer(kwargs, default='relu'): + return get_act_layer(kwargs.pop('act_layer', default)) + + +def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + return make_divisible(channels * multiplier, divisor, channel_min, round_limit=round_limit) + + +def _log_info_if(msg, condition): + if condition: + _logger.info(msg) + + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + + +def _decode_block_str(block_str): + """ Decode block definition string + + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + skip = None + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + skip = False # force no skip connection + elif op == 'skip': + skip = True # force a skip connection + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = get_act_layer('relu') + elif v == 'r6': + value = get_act_layer('relu6') + elif v == 'hs': + value = get_act_layer('hard_swish') + elif v == 'sw': + value = get_act_layer('swish') # aka SiLU + elif v == 'mi': + value = get_act_layer('mish') + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + force_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + noskip=skip is False, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or skip is False, + ) + elif block_type == 'er': + block_args = dict( + block_type=block_type, + exp_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + force_in_chs=force_in_chs, + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + noskip=skip is False, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + skip=skip is True, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False): + arch_args = [] + if isinstance(depth_multiplier, tuple): + assert len(depth_multiplier) == len(arch_def) + else: + depth_multiplier = (depth_multiplier,) * len(arch_def) + for stack_idx, (block_strings, multiplier) in enumerate(zip(arch_def, depth_multiplier)): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): + arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) + else: + arch_args.append(_scale_stage_depth(stack_args, repeats, multiplier, depth_trunc)) + return arch_args + + +class EfficientNetBuilder: + """ Build Trunk Blocks + + This ended up being somewhat of a cross between + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py + and + https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py + + """ + def __init__(self, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=False, + act_layer=None, norm_layer=None, se_layer=None, drop_path_rate=0., feature_location=''): + self.output_stride = output_stride + self.pad_type = pad_type + self.round_chs_fn = round_chs_fn + self.se_from_exp = se_from_exp # calculate se channel reduction from expanded (mid) chs + self.act_layer = act_layer + self.norm_layer = norm_layer + self.se_layer = get_attn(se_layer) + try: + self.se_layer(8, rd_ratio=1.0) # test if attn layer accepts rd_ratio arg + self.se_has_ratio = True + except TypeError: + self.se_has_ratio = False + self.drop_path_rate = drop_path_rate + if feature_location == 'depthwise': + # old 'depthwise' mode renamed 'expansion' to match TF impl, old expansion mode didn't make sense + _logger.warning("feature_location=='depthwise' is deprecated, using 'expansion'") + feature_location = 'expansion' + self.feature_location = feature_location + assert feature_location in ('bottleneck', 'expansion', '') + self.verbose = _DEBUG_BUILDER + + # state updated during build, consumed by model + self.in_chs = None + self.features = [] + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self.round_chs_fn(ba['out_chs']) + if 'force_in_chs' in ba and ba['force_in_chs']: + # NOTE this is a hack to work around mismatch in TF EdgeEffNet impl + ba['force_in_chs'] = self.round_chs_fn(ba['force_in_chs']) + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + ba['norm_layer'] = self.norm_layer + ba['drop_path_rate'] = drop_path_rate + if bt != 'cn': + se_ratio = ba.pop('se_ratio') + if se_ratio and self.se_layer is not None: + if not self.se_from_exp: + # adjust se_ratio by expansion ratio if calculating se channels from block input + se_ratio /= ba.get('exp_ratio', 1.0) + if self.se_has_ratio: + ba['se_layer'] = partial(self.se_layer, rd_ratio=se_ratio) + else: + ba['se_layer'] = self.se_layer + + if bt == 'ir': + _log_info_if(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = CondConvResidual(**ba) if ba.get('num_experts', 0) else InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + _log_info_if(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = DepthwiseSeparableConv(**ba) + elif bt == 'er': + _log_info_if(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = EdgeResidual(**ba) + elif bt == 'cn': + _log_info_if(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + _log_info_if('Building model trunk with %d stages...' % len(model_block_args), self.verbose) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + stages = [] + if model_block_args[0][0]['stride'] > 1: + # if the first block starts with a stride, we need to extract first level feat from stem + feature_info = dict( + module='act1', num_chs=in_chs, stage=0, reduction=current_stride, + hook_type='forward' if self.feature_location != 'bottleneck' else '') + self.features.append(feature_info) + + # outer list of block_args defines the stacks + for stack_idx, stack_args in enumerate(model_block_args): + last_stack = stack_idx + 1 == len(model_block_args) + _log_info_if('Stack: {}'.format(stack_idx), self.verbose) + assert isinstance(stack_args, list) + + blocks = [] + # each stack (stage of blocks) contains a list of block arguments + for block_idx, block_args in enumerate(stack_args): + last_block = block_idx + 1 == len(stack_args) + _log_info_if(' Block: {}'.format(block_idx), self.verbose) + + assert block_args['stride'] in (1, 2) + if block_idx >= 1: # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + extract_features = False + if last_block: + next_stack_idx = stack_idx + 1 + extract_features = next_stack_idx >= len(model_block_args) or \ + model_block_args[next_stack_idx][0]['stride'] > 1 + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + _log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride), self.verbose) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block(block_args, total_block_idx, total_block_count) + blocks.append(block) + + # stash feature module name and channel info for model feature extraction + if extract_features: + feature_info = dict( + stage=stack_idx + 1, reduction=current_stride, **block.feature_info(self.feature_location)) + module_name = f'blocks.{stack_idx}.{block_idx}' + leaf_name = feature_info.get('module', '') + feature_info['module'] = '.'.join([module_name, leaf_name]) if leaf_name else module_name + self.features.append(feature_info) + + total_block_idx += 1 # incr global block idx (across all stacks) + stages.append(nn.Sequential(*blocks)) + return stages + + +def _init_weight_goog(m, n='', fix_group_fanout=True): + """ Weight initialization as per Tensorflow official implementations. + + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + nn.init.uniform_(m.weight, -init_range, init_range) + nn.init.zeros_(m.bias) + + +def efficientnet_init_weights(model: nn.Module, init_fn=None): + init_fn = init_fn or _init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n) + diff --git a/data_processing/MANIQA/timm/models/factory.py b/data_processing/MANIQA/timm/models/factory.py new file mode 100644 index 0000000..6d3fd98 --- /dev/null +++ b/data_processing/MANIQA/timm/models/factory.py @@ -0,0 +1,79 @@ +from .registry import is_model, is_model_in_modules, model_entrypoint +from .helpers import load_checkpoint +from .layers import set_layer_config +from .hub import load_model_config_from_hf + + +def split_model_name(model_name): + model_split = model_name.split(':', 1) + if len(model_split) == 1: + return '', model_split[0] + else: + source_name, model_name = model_split + assert source_name in ('timm', 'hf_hub') + return source_name, model_name + + +def safe_model_name(model_name, remove_source=True): + def make_safe(name): + return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_') + if remove_source: + model_name = split_model_name(model_name)[-1] + return make_safe(model_name) + + +def create_model( + model_name, + pretrained=False, + checkpoint_path='', + scriptable=None, + exportable=None, + no_jit=None, + **kwargs): + """Create a model + + Args: + model_name (str): name of model to instantiate + pretrained (bool): load pretrained ImageNet-1k weights if true + checkpoint_path (str): path of checkpoint to load after model is initialized + scriptable (bool): set layer config so that model is jit scriptable (not working for all models yet) + exportable (bool): set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet) + no_jit (bool): set layer config so that model doesn't utilize jit scripted layers (so far activations only) + + Keyword Args: + drop_rate (float): dropout rate for training (default: 0.0) + global_pool (str): global pool type (default: 'avg') + **: other kwargs are model specific + """ + source_name, model_name = split_model_name(model_name) + + # handle backwards compat with drop_connect -> drop_path change + drop_connect_rate = kwargs.pop('drop_connect_rate', None) + if drop_connect_rate is not None and kwargs.get('drop_path_rate', None) is None: + print("WARNING: 'drop_connect' as an argument is deprecated, please use 'drop_path'." + " Setting drop_path to %f." % drop_connect_rate) + kwargs['drop_path_rate'] = drop_connect_rate + + # Parameters that aren't supported by all models or are intended to only override model defaults if set + # should default to None in command line args/cfg. Remove them if they are present and not set so that + # non-supporting models don't break and default args remain in effect. + kwargs = {k: v for k, v in kwargs.items() if v is not None} + + if source_name == 'hf_hub': + # For model names specified in the form `hf_hub:path/architecture_name#revision`, + # load model weights + default_cfg from Hugging Face hub. + hf_default_cfg, model_name = load_model_config_from_hf(model_name) + kwargs['external_default_cfg'] = hf_default_cfg # FIXME revamp default_cfg interface someday + + if is_model(model_name): + create_fn = model_entrypoint(model_name) + else: + raise RuntimeError('Unknown model (%s)' % model_name) + + with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit): + model = create_fn(pretrained=pretrained, **kwargs) + + if checkpoint_path: + load_checkpoint(model, checkpoint_path) + + return model diff --git a/data_processing/MANIQA/timm/models/features.py b/data_processing/MANIQA/timm/models/features.py new file mode 100644 index 0000000..b1d6890 --- /dev/null +++ b/data_processing/MANIQA/timm/models/features.py @@ -0,0 +1,284 @@ +""" PyTorch Feature Extraction Helpers + +A collection of classes, functions, modules to help extract features from models +and provide a common interface for describing them. + +The return_layers, module re-writing idea inspired by torchvision IntermediateLayerGetter +https://github.com/pytorch/vision/blob/d88d8961ae51507d0cb680329d985b1488b1b76b/torchvision/models/_utils.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict, defaultdict +from copy import deepcopy +from functools import partial +from typing import Dict, List, Tuple + +import torch +import torch.nn as nn + + +class FeatureInfo: + + def __init__(self, feature_info: List[Dict], out_indices: Tuple[int]): + prev_reduction = 1 + for fi in feature_info: + # sanity check the mandatory fields, there may be additional fields depending on the model + assert 'num_chs' in fi and fi['num_chs'] > 0 + assert 'reduction' in fi and fi['reduction'] >= prev_reduction + prev_reduction = fi['reduction'] + assert 'module' in fi + self.out_indices = out_indices + self.info = feature_info + + def from_other(self, out_indices: Tuple[int]): + return FeatureInfo(deepcopy(self.info), out_indices) + + def get(self, key, idx=None): + """ Get value by key at specified index (indices) + if idx == None, returns value for key at each output index + if idx is an integer, return value for that feature module index (ignoring output indices) + if idx is a list/tupple, return value for each module index (ignoring output indices) + """ + if idx is None: + return [self.info[i][key] for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i][key] for i in idx] + else: + return self.info[idx][key] + + def get_dicts(self, keys=None, idx=None): + """ return info dicts for specified keys (or all if None) at specified indices (or out_indices if None) + """ + if idx is None: + if keys is None: + return [self.info[i] for i in self.out_indices] + else: + return [{k: self.info[i][k] for k in keys} for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i] if keys is None else {k: self.info[i][k] for k in keys} for i in idx] + else: + return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys} + + def channels(self, idx=None): + """ feature channels accessor + """ + return self.get('num_chs', idx) + + def reduction(self, idx=None): + """ feature reduction (output stride) accessor + """ + return self.get('reduction', idx) + + def module_name(self, idx=None): + """ feature module name accessor + """ + return self.get('module', idx) + + def __getitem__(self, item): + return self.info[item] + + def __len__(self): + return len(self.info) + + +class FeatureHooks: + """ Feature Hook Helper + + This module helps with the setup and extraction of hooks for extracting features from + internal nodes in a model by node name. This works quite well in eager Python but needs + redesign for torcscript. + """ + + def __init__(self, hooks, named_modules, out_map=None, default_hook_type='forward'): + # setup feature hooks + modules = {k: v for k, v in named_modules} + for i, h in enumerate(hooks): + hook_name = h['module'] + m = modules[hook_name] + hook_id = out_map[i] if out_map else hook_name + hook_fn = partial(self._collect_output_hook, hook_id) + hook_type = h['hook_type'] if 'hook_type' in h else default_hook_type + if hook_type == 'forward_pre': + m.register_forward_pre_hook(hook_fn) + elif hook_type == 'forward': + m.register_forward_hook(hook_fn) + else: + assert False, "Unsupported hook type" + self._feature_outputs = defaultdict(OrderedDict) + + def _collect_output_hook(self, hook_id, *args): + x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre + if isinstance(x, tuple): + x = x[0] # unwrap input tuple + self._feature_outputs[x.device][hook_id] = x + + def get_output(self, device) -> Dict[str, torch.tensor]: + output = self._feature_outputs[device] + self._feature_outputs[device] = OrderedDict() # clear after reading + return output + + +def _module_list(module, flatten_sequential=False): + # a yield/iter would be better for this but wouldn't be compatible with torchscript + ml = [] + for name, module in module.named_children(): + if flatten_sequential and isinstance(module, nn.Sequential): + # first level of Sequential containers is flattened into containing model + for child_name, child_module in module.named_children(): + combined = [name, child_name] + ml.append(('_'.join(combined), '.'.join(combined), child_module)) + else: + ml.append((name, name, module)) + return ml + + +def _get_feature_info(net, out_indices): + feature_info = getattr(net, 'feature_info') + if isinstance(feature_info, FeatureInfo): + return feature_info.from_other(out_indices) + elif isinstance(feature_info, (list, tuple)): + return FeatureInfo(net.feature_info, out_indices) + else: + assert False, "Provided feature_info is not valid" + + +def _get_return_layers(feature_info, out_map): + module_names = feature_info.module_name() + return_layers = {} + for i, name in enumerate(module_names): + return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i] + return return_layers + + +class FeatureDictNet(nn.ModuleDict): + """ Feature extractor with OrderedDict return + + Wrap a model and extract features as specified by the out indices, the network is + partially re-built from contained modules. + + There is a strong assumption that the modules have been registered into the model in the same + order as they are used. There should be no reuse of the same nn.Module more than once, including + trivial modules like `self.relu = nn.ReLU`. + + Only submodules that are directly assigned to the model class (`model.feature1`) or at most + one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured. + All Sequential containers that are directly assigned to the original model will have their + modules assigned to this module with the name `model.features.1` being changed to `model.features_1` + + Arguments: + model (nn.Module): model from which we will extract the features + out_indices (tuple[int]): model output indices to extract features for + out_map (sequence): list or tuple specifying desired return id for each out index, + otherwise str(index) is used + feature_concat (bool): whether to concatenate intermediate features that are lists or tuples + vs select element [0] + flatten_sequential (bool): whether to flatten sequential modules assigned to model + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False): + super(FeatureDictNet, self).__init__() + self.feature_info = _get_feature_info(model, out_indices) + self.concat = feature_concat + self.return_layers = {} + return_layers = _get_return_layers(self.feature_info, out_map) + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = set(return_layers.keys()) + layers = OrderedDict() + for new_name, old_name, module in modules: + layers[new_name] = module + if old_name in remaining: + # return id has to be consistently str type for torchscript + self.return_layers[new_name] = str(return_layers[old_name]) + remaining.remove(old_name) + if not remaining: + break + assert not remaining and len(self.return_layers) == len(return_layers), \ + f'Return layers ({remaining}) are not present in model' + self.update(layers) + + def _collect(self, x) -> (Dict[str, torch.Tensor]): + out = OrderedDict() + for name, module in self.items(): + x = module(x) + if name in self.return_layers: + out_id = self.return_layers[name] + if isinstance(x, (tuple, list)): + # If model tap is a tuple or list, concat or select first element + # FIXME this may need to be more generic / flexible for some nets + out[out_id] = torch.cat(x, 1) if self.concat else x[0] + else: + out[out_id] = x + return out + + def forward(self, x) -> Dict[str, torch.Tensor]: + return self._collect(x) + + +class FeatureListNet(FeatureDictNet): + """ Feature extractor with list return + + See docstring for FeatureDictNet above, this class exists only to appease Torchscript typing constraints. + In eager Python we could have returned List[Tensor] vs Dict[id, Tensor] based on a member bool. + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False): + super(FeatureListNet, self).__init__( + model, out_indices=out_indices, out_map=out_map, feature_concat=feature_concat, + flatten_sequential=flatten_sequential) + + def forward(self, x) -> (List[torch.Tensor]): + return list(self._collect(x).values()) + + +class FeatureHookNet(nn.ModuleDict): + """ FeatureHookNet + + Wrap a model and extract features specified by the out indices using forward/forward-pre hooks. + + If `no_rewrite` is True, features are extracted via hooks without modifying the underlying + network in any way. + + If `no_rewrite` is False, the model will be re-written as in the + FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one. + + FIXME this does not currently work with Torchscript, see FeatureHooks class + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, out_as_dict=False, no_rewrite=False, + feature_concat=False, flatten_sequential=False, default_hook_type='forward'): + super(FeatureHookNet, self).__init__() + assert not torch.jit.is_scripting() + self.feature_info = _get_feature_info(model, out_indices) + self.out_as_dict = out_as_dict + layers = OrderedDict() + hooks = [] + if no_rewrite: + assert not flatten_sequential + if hasattr(model, 'reset_classifier'): # make sure classifier is removed? + model.reset_classifier(0) + layers['body'] = model + hooks.extend(self.feature_info.get_dicts()) + else: + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = {f['module']: f['hook_type'] if 'hook_type' in f else default_hook_type + for f in self.feature_info.get_dicts()} + for new_name, old_name, module in modules: + layers[new_name] = module + for fn, fm in module.named_modules(prefix=old_name): + if fn in remaining: + hooks.append(dict(module=fn, hook_type=remaining[fn])) + del remaining[fn] + if not remaining: + break + assert not remaining, f'Return layers ({remaining}) are not present in model' + self.update(layers) + self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map) + + def forward(self, x): + for name, module in self.items(): + x = module(x) + out = self.hooks.get_output(x.device) + return out if self.out_as_dict else list(out.values()) diff --git a/data_processing/MANIQA/timm/models/fx_features.py b/data_processing/MANIQA/timm/models/fx_features.py new file mode 100644 index 0000000..5a25ee3 --- /dev/null +++ b/data_processing/MANIQA/timm/models/fx_features.py @@ -0,0 +1,73 @@ +""" PyTorch FX Based Feature Extraction Helpers +Using https://pytorch.org/vision/stable/feature_extraction.html +""" +from typing import Callable +from torch import nn + +from .features import _get_feature_info + +try: + from torchvision.models.feature_extraction import create_feature_extractor + has_fx_feature_extraction = True +except ImportError: + has_fx_feature_extraction = False + +# Layers we went to treat as leaf modules +from .layers import Conv2dSame, ScaledStdConv2dSame, BatchNormAct2d, BlurPool2d, CondConv2d, StdConv2dSame, DropPath +from .layers.non_local_attn import BilinearAttnTransform +from .layers.pool2d_same import MaxPool2dSame, AvgPool2dSame + +# NOTE: By default, any modules from timm.models.layers that we want to treat as leaf modules go here +# BUT modules from timm.models should use the registration mechanism below +_leaf_modules = { + BatchNormAct2d, # reason: flow control for jit scripting + BilinearAttnTransform, # reason: flow control t <= 1 + BlurPool2d, # reason: TypeError: F.conv2d received Proxy in groups=x.shape[1] + # Reason: get_same_padding has a max which raises a control flow error + Conv2dSame, MaxPool2dSame, ScaledStdConv2dSame, StdConv2dSame, AvgPool2dSame, + CondConv2d, # reason: TypeError: F.conv2d received Proxy in groups=self.groups * B (because B = x.shape[0]) + DropPath, # reason: TypeError: rand recieved Proxy in `size` argument +} + +try: + from .layers import InplaceAbn + _leaf_modules.add(InplaceAbn) +except ImportError: + pass + + +def register_notrace_module(module: nn.Module): + """ + Any module not under timm.models.layers should get this decorator if we don't want to trace through it. + """ + _leaf_modules.add(module) + return module + + +# Functions we want to autowrap (treat them as leaves) +_autowrap_functions = set() + + +def register_notrace_function(func: Callable): + """ + Decorator for functions which ought not to be traced through + """ + _autowrap_functions.add(func) + return func + + +class FeatureGraphNet(nn.Module): + def __init__(self, model, out_indices, out_map=None): + super().__init__() + assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction' + self.feature_info = _get_feature_info(model, out_indices) + if out_map is not None: + assert len(out_map) == len(out_indices) + return_nodes = {info['module']: out_map[i] if out_map is not None else info['module'] + for i, info in enumerate(self.feature_info) if i in out_indices} + self.graph_module = create_feature_extractor( + model, return_nodes, + tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)}) + + def forward(self, x): + return list(self.graph_module(x).values()) diff --git a/data_processing/MANIQA/timm/models/ghostnet.py b/data_processing/MANIQA/timm/models/ghostnet.py new file mode 100644 index 0000000..3b6f90a --- /dev/null +++ b/data_processing/MANIQA/timm/models/ghostnet.py @@ -0,0 +1,276 @@ +""" +An implementation of GhostNet Model as defined in: +GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907 +The train script of the model is similar to that of MobileNetV3 +Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch +""" +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import SelectAdaptivePool2d, Linear, make_divisible +from .efficientnet_blocks import SqueezeExcite, ConvBnAct +from .helpers import build_model_with_cfg +from .registry import register_model + + +__all__ = ['GhostNet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'ghostnet_050': _cfg(url=''), + 'ghostnet_100': _cfg( + url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth'), + 'ghostnet_130': _cfg(url=''), +} + + +_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) + + +class GhostModule(nn.Module): + def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True): + super(GhostModule, self).__init__() + self.oup = oup + init_channels = math.ceil(oup / ratio) + new_channels = init_channels * (ratio - 1) + + self.primary_conv = nn.Sequential( + nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False), + nn.BatchNorm2d(init_channels), + nn.ReLU(inplace=True) if relu else nn.Sequential(), + ) + + self.cheap_operation = nn.Sequential( + nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False), + nn.BatchNorm2d(new_channels), + nn.ReLU(inplace=True) if relu else nn.Sequential(), + ) + + def forward(self, x): + x1 = self.primary_conv(x) + x2 = self.cheap_operation(x1) + out = torch.cat([x1, x2], dim=1) + return out[:, :self.oup, :, :] + + +class GhostBottleneck(nn.Module): + """ Ghost bottleneck w/ optional SE""" + + def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3, + stride=1, act_layer=nn.ReLU, se_ratio=0.): + super(GhostBottleneck, self).__init__() + has_se = se_ratio is not None and se_ratio > 0. + self.stride = stride + + # Point-wise expansion + self.ghost1 = GhostModule(in_chs, mid_chs, relu=True) + + # Depth-wise convolution + if self.stride > 1: + self.conv_dw = nn.Conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False) + self.bn_dw = nn.BatchNorm2d(mid_chs) + else: + self.conv_dw = None + self.bn_dw = None + + # Squeeze-and-excitation + self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None + + # Point-wise linear projection + self.ghost2 = GhostModule(mid_chs, out_chs, relu=False) + + # shortcut + if in_chs == out_chs and self.stride == 1: + self.shortcut = nn.Sequential() + else: + self.shortcut = nn.Sequential( + nn.Conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False), + nn.BatchNorm2d(in_chs), + nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(out_chs), + ) + + def forward(self, x): + shortcut = x + + # 1st ghost bottleneck + x = self.ghost1(x) + + # Depth-wise convolution + if self.conv_dw is not None: + x = self.conv_dw(x) + x = self.bn_dw(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # 2nd ghost bottleneck + x = self.ghost2(x) + + x += self.shortcut(shortcut) + return x + + +class GhostNet(nn.Module): + def __init__(self, cfgs, num_classes=1000, width=1.0, dropout=0.2, in_chans=3, output_stride=32, global_pool='avg'): + super(GhostNet, self).__init__() + # setting of inverted residual blocks + assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' + self.cfgs = cfgs + self.num_classes = num_classes + self.dropout = dropout + self.feature_info = [] + + # building first layer + stem_chs = make_divisible(16 * width, 4) + self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) + self.bn1 = nn.BatchNorm2d(stem_chs) + self.act1 = nn.ReLU(inplace=True) + prev_chs = stem_chs + + # building inverted residual blocks + stages = nn.ModuleList([]) + block = GhostBottleneck + stage_idx = 0 + net_stride = 2 + for cfg in self.cfgs: + layers = [] + s = 1 + for k, exp_size, c, se_ratio, s in cfg: + out_chs = make_divisible(c * width, 4) + mid_chs = make_divisible(exp_size * width, 4) + layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio)) + prev_chs = out_chs + if s > 1: + net_stride *= 2 + self.feature_info.append(dict( + num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) + stages.append(nn.Sequential(*layers)) + stage_idx += 1 + + out_chs = make_divisible(exp_size * width, 4) + stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) + self.pool_dim = prev_chs = out_chs + + self.blocks = nn.Sequential(*stages) + + # building last several layers + self.num_features = out_chs = 1280 + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) + self.act2 = nn.ReLU(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + # cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.pool_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.flatten(x) + if self.dropout > 0.: + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.classifier(x) + return x + + +def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): + """ + Constructs a GhostNet model + """ + cfgs = [ + # k, t, c, SE, s + # stage1 + [[3, 16, 16, 0, 1]], + # stage2 + [[3, 48, 24, 0, 2]], + [[3, 72, 24, 0, 1]], + # stage3 + [[5, 72, 40, 0.25, 2]], + [[5, 120, 40, 0.25, 1]], + # stage4 + [[3, 240, 80, 0, 2]], + [[3, 200, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 480, 112, 0.25, 1], + [3, 672, 112, 0.25, 1] + ], + # stage5 + [[5, 672, 160, 0.25, 2]], + [[5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1], + [5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1] + ] + ] + model_kwargs = dict( + cfgs=cfgs, + width=width, + **kwargs, + ) + return build_model_with_cfg( + GhostNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **model_kwargs) + + +@register_model +def ghostnet_050(pretrained=False, **kwargs): + """ GhostNet-0.5x """ + model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_100(pretrained=False, **kwargs): + """ GhostNet-1.0x """ + model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_130(pretrained=False, **kwargs): + """ GhostNet-1.3x """ + model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs) + return model diff --git a/data_processing/MANIQA/timm/models/gluon_resnet.py b/data_processing/MANIQA/timm/models/gluon_resnet.py new file mode 100644 index 0000000..027a10b --- /dev/null +++ b/data_processing/MANIQA/timm/models/gluon_resnet.py @@ -0,0 +1,248 @@ +"""Pytorch impl of MxNet Gluon ResNet/(SE)ResNeXt variants +This file evolved from https://github.com/pytorch/vision 'resnet.py' with (SE)-ResNeXt additions +and ports of Gluon variations (https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnet.py) +by Ross Wightman +""" + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SEModule +from .registry import register_model +from .resnet import ResNet, Bottleneck, BasicBlock + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'gluon_resnet18_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth'), + 'gluon_resnet34_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth'), + 'gluon_resnet50_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth'), + 'gluon_resnet101_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth'), + 'gluon_resnet152_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth'), + 'gluon_resnet50_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth', + first_conv='conv1.0'), + 'gluon_resnet50_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth', + first_conv='conv1.0'), + 'gluon_resnet50_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth', + first_conv='conv1.0'), + 'gluon_resnext50_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth'), + 'gluon_resnext101_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth'), + 'gluon_resnext101_64x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth'), + 'gluon_seresnext50_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth'), + 'gluon_seresnext101_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth'), + 'gluon_seresnext101_64x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth'), + 'gluon_senet154': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_senet154-70a1a3c0.pth', + first_conv='conv1.0'), +} + + +def _create_resnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def gluon_resnet18_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('gluon_resnet18_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet34_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('gluon_resnet34_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('gluon_resnet50_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('gluon_resnet101_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('gluon_resnet152_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet50_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet101_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet152_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet50_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet101_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet152_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet50_v1s', pretrained, **model_args) + + + +@register_model +def gluon_resnet101_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet101_v1s', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet152_v1s', pretrained, **model_args) + + + +@register_model +def gluon_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('gluon_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def gluon_resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('gluon_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def gluon_resnext101_64x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs) + return _create_resnet('gluon_resnext101_64x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext50_32x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt50-32x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext101_32x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt-101-32x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext101_32x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext101_64x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt-101-64x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext101_64x4d', pretrained, **model_args) + + +@register_model +def gluon_senet154(pretrained=False, **kwargs): + """Constructs an SENet-154 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', + down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_senet154', pretrained, **model_args) diff --git a/data_processing/MANIQA/timm/models/gluon_xception.py b/data_processing/MANIQA/timm/models/gluon_xception.py new file mode 100644 index 0000000..fbd668a --- /dev/null +++ b/data_processing/MANIQA/timm/models/gluon_xception.py @@ -0,0 +1,246 @@ +"""Pytorch impl of Gluon Xception +This is a port of the Gluon Xception code and weights, itself ported from a PyTorch DeepLab impl. + +Gluon model: (https://gluon-cv.mxnet.io/_modules/gluoncv/model_zoo/xception.html) +Original PyTorch DeepLab impl: https://github.com/jfzhang95/pytorch-deeplab-xception + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier, get_padding +from .registry import register_model + +__all__ = ['Xception65'] + +default_cfgs = { + 'gluon_xception65': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth', + 'input_size': (3, 299, 299), + 'crop_pct': 0.903, + 'pool_size': (10, 10), + 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, + 'std': IMAGENET_DEFAULT_STD, + 'num_classes': 1000, + 'first_conv': 'conv1', + 'classifier': 'fc' + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + }, +} + +""" PADDING NOTES +The original PyTorch and Gluon impl of these models dutifully reproduced the +aligned padding added to Tensorflow models for Deeplab. This padding was compensating +for Tensorflow 'SAME' padding. PyTorch symmetric padding behaves the way we'd want it to. +""" + + +class SeparableConv2d(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + # depthwise convolution + padding = get_padding(kernel_size, stride, dilation) + self.conv_dw = nn.Conv2d( + inplanes, inplanes, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=inplanes, bias=bias) + self.bn = norm_layer(num_features=inplanes) + # pointwise convolution + self.conv_pw = nn.Conv2d(inplanes, planes, kernel_size=1, bias=bias) + + def forward(self, x): + x = self.conv_dw(x) + x = self.bn(x) + x = self.conv_pw(x) + return x + + +class Block(nn.Module): + def __init__(self, inplanes, planes, stride=1, dilation=1, start_with_relu=True, norm_layer=None): + super(Block, self).__init__() + if isinstance(planes, (list, tuple)): + assert len(planes) == 3 + else: + planes = (planes,) * 3 + outplanes = planes[-1] + + if outplanes != inplanes or stride != 1: + self.skip = nn.Sequential() + self.skip.add_module('conv1', nn.Conv2d( + inplanes, outplanes, 1, stride=stride, bias=False)), + self.skip.add_module('bn1', norm_layer(num_features=outplanes)) + else: + self.skip = None + + rep = OrderedDict() + for i in range(3): + rep['act%d' % (i + 1)] = nn.ReLU(inplace=True) + rep['conv%d' % (i + 1)] = SeparableConv2d( + inplanes, planes[i], 3, stride=stride if i == 2 else 1, dilation=dilation, norm_layer=norm_layer) + rep['bn%d' % (i + 1)] = norm_layer(planes[i]) + inplanes = planes[i] + + if not start_with_relu: + del rep['act1'] + else: + rep['act1'] = nn.ReLU(inplace=False) + self.rep = nn.Sequential(rep) + + def forward(self, x): + skip = x + if self.skip is not None: + skip = self.skip(skip) + x = self.rep(x) + skip + return x + + +class Xception65(nn.Module): + """Modified Aligned Xception. + + NOTE: only the 65 layer version is included here, the 71 layer variant + was not correct and had no pretrained weights + """ + + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, norm_layer=nn.BatchNorm2d, + drop_rate=0., global_pool='avg'): + super(Xception65, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + if output_stride == 32: + entry_block3_stride = 2 + exit_block20_stride = 2 + middle_dilation = 1 + exit_dilation = (1, 1) + elif output_stride == 16: + entry_block3_stride = 2 + exit_block20_stride = 1 + middle_dilation = 1 + exit_dilation = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + exit_block20_stride = 1 + middle_dilation = 2 + exit_dilation = (2, 4) + else: + raise NotImplementedError + + # Entry flow + self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = norm_layer(num_features=32) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = norm_layer(num_features=64) + self.act2 = nn.ReLU(inplace=True) + + self.block1 = Block(64, 128, stride=2, start_with_relu=False, norm_layer=norm_layer) + self.block1_act = nn.ReLU(inplace=True) + self.block2 = Block(128, 256, stride=2, start_with_relu=False, norm_layer=norm_layer) + self.block3 = Block(256, 728, stride=entry_block3_stride, norm_layer=norm_layer) + + # Middle flow + self.mid = nn.Sequential(OrderedDict([('block%d' % i, Block( + 728, 728, stride=1, dilation=middle_dilation, norm_layer=norm_layer)) for i in range(4, 20)])) + + # Exit flow + self.block20 = Block( + 728, (728, 1024, 1024), stride=exit_block20_stride, dilation=exit_dilation[0], norm_layer=norm_layer) + self.block20_act = nn.ReLU(inplace=True) + + self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn3 = norm_layer(num_features=1536) + self.act3 = nn.ReLU(inplace=True) + + self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn4 = norm_layer(num_features=1536) + self.act4 = nn.ReLU(inplace=True) + + self.num_features = 2048 + self.conv5 = SeparableConv2d( + 1536, self.num_features, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn5 = norm_layer(num_features=self.num_features) + self.act5 = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='act2'), + dict(num_chs=128, reduction=4, module='block1_act'), + dict(num_chs=256, reduction=8, module='block3.rep.act1'), + dict(num_chs=728, reduction=16, module='block20.rep.act1'), + dict(num_chs=2048, reduction=32, module='act5'), + ] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + x = self.block1(x) + x = self.block1_act(x) + # c1 = x + x = self.block2(x) + # c2 = x + x = self.block3(x) + + # Middle flow + x = self.mid(x) + # c3 = x + + # Exit flow + x = self.block20(x) + x = self.block20_act(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.act3(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.act4(x) + + x = self.conv5(x) + x = self.bn5(x) + x = self.act5(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + F.dropout(x, self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _create_gluon_xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + Xception65, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook'), + **kwargs) + + +@register_model +def gluon_xception65(pretrained=False, **kwargs): + """ Modified Aligned Xception-65 + """ + return _create_gluon_xception('gluon_xception65', pretrained, **kwargs) diff --git a/data_processing/MANIQA/timm/models/hardcorenas.py b/data_processing/MANIQA/timm/models/hardcorenas.py new file mode 100644 index 0000000..9988a04 --- /dev/null +++ b/data_processing/MANIQA/timm/models/hardcorenas.py @@ -0,0 +1,152 @@ +from functools import partial + +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import get_act_fn +from .mobilenetv3 import MobileNetV3, MobileNetV3Features +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'hardcorenas_a': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_A_Green_38ms_75.9_23474aeb.pth'), + 'hardcorenas_b': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_B_Green_40ms_76.5_1f882d1e.pth'), + 'hardcorenas_c': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_C_Green_44ms_77.1_d4148c9e.pth'), + 'hardcorenas_d': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_D_Green_50ms_77.4_23e3cdde.pth'), + 'hardcorenas_e': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_E_Green_55ms_77.9_90f20e8a.pth'), + 'hardcorenas_f': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_F_Green_60ms_78.1_2855edf1.pth'), +} + + +def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs): + """Creates a hardcorenas model + + Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS + Paper: https://arxiv.org/abs/2102.11646 + + """ + num_features = 1280 + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=32, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=se_layer, + **kwargs, + ) + + features_only = False + model_cls = MobileNetV3 + kwargs_filter = None + if model_kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool') + model_cls = MobileNetV3Features + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **model_kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +@register_model +def hardcorenas_a(pretrained=False, **kwargs): + """ hardcorenas_A """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_b(pretrained=False, **kwargs): + """ hardcorenas_B """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], + ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_c(pretrained=False, **kwargs): + """ hardcorenas_C """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', + 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_d(pretrained=False, **kwargs): + """ hardcorenas_D """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], + ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_e(pretrained=False, **kwargs): + """ hardcorenas_E """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', + 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_f(pretrained=False, **kwargs): + """ hardcorenas_F """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k3_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs) + return model diff --git a/data_processing/MANIQA/timm/models/helpers.py b/data_processing/MANIQA/timm/models/helpers.py new file mode 100644 index 0000000..880fcc6 --- /dev/null +++ b/data_processing/MANIQA/timm/models/helpers.py @@ -0,0 +1,518 @@ +""" Model creation / weight loading / state_dict helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import os +import math +from collections import OrderedDict +from copy import deepcopy +from typing import Any, Callable, Optional, Tuple + +import torch +import torch.nn as nn +from torch.hub import load_state_dict_from_url + +from .features import FeatureListNet, FeatureDictNet, FeatureHookNet +from .fx_features import FeatureGraphNet +from .hub import has_hf_hub, download_cached_file, load_state_dict_from_hf +from .layers import Conv2dSame, Linear + + +_logger = logging.getLogger(__name__) + + +def load_state_dict(checkpoint_path, use_ema=False): + if checkpoint_path and os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + state_dict_key = '' + if isinstance(checkpoint, dict): + if use_ema and checkpoint.get('state_dict_ema', None) is not None: + state_dict_key = 'state_dict_ema' + elif use_ema and checkpoint.get('model_ema', None) is not None: + state_dict_key = 'model_ema' + elif 'state_dict' in checkpoint: + state_dict_key = 'state_dict' + elif 'model' in checkpoint: + state_dict_key = 'model' + if state_dict_key: + state_dict = checkpoint[state_dict_key] + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + # strip `module.` prefix + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + state_dict = new_state_dict + else: + state_dict = checkpoint + _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) + return state_dict + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True): + if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'): + # numpy checkpoint, try to load via model specific load_pretrained fn + if hasattr(model, 'load_pretrained'): + model.load_pretrained(checkpoint_path) + else: + raise NotImplementedError('Model cannot load numpy checkpoint') + return + state_dict = load_state_dict(checkpoint_path, use_ema) + model.load_state_dict(state_dict, strict=strict) + + +def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True): + resume_epoch = None + if os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: + if log_info: + _logger.info('Restoring model state from checkpoint...') + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict'].items(): + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + + if optimizer is not None and 'optimizer' in checkpoint: + if log_info: + _logger.info('Restoring optimizer state from checkpoint...') + optimizer.load_state_dict(checkpoint['optimizer']) + + if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint: + if log_info: + _logger.info('Restoring AMP loss scaler state from checkpoint...') + loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key]) + + if 'epoch' in checkpoint: + resume_epoch = checkpoint['epoch'] + if 'version' in checkpoint and checkpoint['version'] > 1: + resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save + + if log_info: + _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) + else: + model.load_state_dict(checkpoint) + if log_info: + _logger.info("Loaded checkpoint '{}'".format(checkpoint_path)) + return resume_epoch + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def load_custom_pretrained(model, default_cfg=None, load_fn=None, progress=False, check_hash=False): + r"""Loads a custom (read non .pth) weight file + + Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls + a passed in custom load fun, or the `load_pretrained` model member fn. + + If the object is already present in `model_dir`, it's deserialized and returned. + The default value of `model_dir` is ``/checkpoints`` where + `hub_dir` is the directory returned by :func:`~torch.hub.get_dir`. + + Args: + model: The instantiated model to load weights into + default_cfg (dict): Default pretrained model cfg + load_fn: An external stand alone fn that loads weights into provided model, otherwise a fn named + 'laod_pretrained' on the model will be called if it exists + progress (bool, optional): whether or not to display a progress bar to stderr. Default: False + check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention + ``filename-.ext`` where ```` is the first eight or more + digits of the SHA256 hash of the contents of the file. The hash is used to + ensure unique names and to verify the contents of the file. Default: False + """ + default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {} + pretrained_url = default_cfg.get('url', None) + if not pretrained_url: + _logger.warning("No pretrained weights exist for this model. Using random initialization.") + return + cached_file = download_cached_file(default_cfg['url'], check_hash=check_hash, progress=progress) + + if load_fn is not None: + load_fn(model, cached_file) + elif hasattr(model, 'load_pretrained'): + model.load_pretrained(cached_file) + else: + _logger.warning("Valid function to load pretrained weights is not available, using random initialization.") + + +def adapt_input_conv(in_chans, conv_weight): + conv_type = conv_weight.dtype + conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU + O, I, J, K = conv_weight.shape + if in_chans == 1: + if I > 3: + assert conv_weight.shape[1] % 3 == 0 + # For models with space2depth stems + conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) + conv_weight = conv_weight.sum(dim=2, keepdim=False) + else: + conv_weight = conv_weight.sum(dim=1, keepdim=True) + elif in_chans != 3: + if I != 3: + raise NotImplementedError('Weight format not supported by conversion.') + else: + # NOTE this strategy should be better than random init, but there could be other combinations of + # the original RGB input layer weights that'd work better for specific cases. + repeat = int(math.ceil(in_chans / 3)) + conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] + conv_weight *= (3 / float(in_chans)) + conv_weight = conv_weight.to(conv_type) + return conv_weight + + +def load_pretrained(model, default_cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True, progress=False): + """ Load pretrained checkpoint + + Args: + model (nn.Module) : PyTorch model module + default_cfg (Optional[Dict]): default configuration for pretrained weights / target dataset + num_classes (int): num_classes for model + in_chans (int): in_chans for model + filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args) + strict (bool): strict load of checkpoint + progress (bool): enable progress bar for weight download + + """ + default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {} + pretrained_url = default_cfg.get('url', None) + hf_hub_id = default_cfg.get('hf_hub', None) + if not pretrained_url and not hf_hub_id: + _logger.warning("No pretrained weights exist for this model. Using random initialization.") + return + if pretrained_url: + _logger.info(f'Loading pretrained weights from url ({pretrained_url})') + state_dict = load_state_dict_from_url(pretrained_url, progress=progress, map_location='cpu') + elif hf_hub_id and has_hf_hub(necessary=True): + _logger.info(f'Loading pretrained weights from Hugging Face hub ({hf_hub_id})') + state_dict = load_state_dict_from_hf(hf_hub_id) + if filter_fn is not None: + # for backwards compat with filter fn that take one arg, try one first, the two + try: + state_dict = filter_fn(state_dict) + except TypeError: + state_dict = filter_fn(state_dict, model) + + input_convs = default_cfg.get('first_conv', None) + if input_convs is not None and in_chans != 3: + if isinstance(input_convs, str): + input_convs = (input_convs,) + for input_conv_name in input_convs: + weight_name = input_conv_name + '.weight' + try: + state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name]) + _logger.info( + f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)') + except NotImplementedError as e: + del state_dict[weight_name] + strict = False + _logger.warning( + f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.') + + classifiers = default_cfg.get('classifier', None) + label_offset = default_cfg.get('label_offset', 0) + if classifiers is not None: + if isinstance(classifiers, str): + classifiers = (classifiers,) + if num_classes != default_cfg['num_classes']: + for classifier_name in classifiers: + # completely discard fully connected if model num_classes doesn't match pretrained weights + state_dict.pop(classifier_name + '.weight', None) + state_dict.pop(classifier_name + '.bias', None) + strict = False + elif label_offset > 0: + for classifier_name in classifiers: + # special case for pretrained weights with an extra background class in pretrained weights + classifier_weight = state_dict[classifier_name + '.weight'] + state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] + classifier_bias = state_dict[classifier_name + '.bias'] + state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] + + model.load_state_dict(state_dict, strict=strict) + + +def extract_layer(model, layer): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + if not hasattr(model, 'module') and layer[0] == 'module': + layer = layer[1:] + for l in layer: + if hasattr(module, l): + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + else: + return module + return module + + +def set_layer(model, layer, val): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + lst_index = 0 + module2 = module + for l in layer: + if hasattr(module2, l): + if not l.isdigit(): + module2 = getattr(module2, l) + else: + module2 = module2[int(l)] + lst_index += 1 + lst_index -= 1 + for l in layer[:lst_index]: + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + l = layer[lst_index] + setattr(module, l, val) + + +def adapt_model_from_string(parent_module, model_string): + separator = '***' + state_dict = {} + lst_shape = model_string.split(separator) + for k in lst_shape: + k = k.split(':') + key = k[0] + shape = k[1][1:-1].split(',') + if shape[0] != '': + state_dict[key] = [int(i) for i in shape] + + new_module = deepcopy(parent_module) + for n, m in parent_module.named_modules(): + old_module = extract_layer(parent_module, n) + if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame): + if isinstance(old_module, Conv2dSame): + conv = Conv2dSame + else: + conv = nn.Conv2d + s = state_dict[n + '.weight'] + in_channels = s[1] + out_channels = s[0] + g = 1 + if old_module.groups > 1: + in_channels = out_channels + g = in_channels + new_conv = conv( + in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size, + bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation, + groups=g, stride=old_module.stride) + set_layer(new_module, n, new_conv) + if isinstance(old_module, nn.BatchNorm2d): + new_bn = nn.BatchNorm2d( + num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum, + affine=old_module.affine, track_running_stats=True) + set_layer(new_module, n, new_bn) + if isinstance(old_module, nn.Linear): + # FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer? + num_features = state_dict[n + '.weight'][1] + new_fc = Linear( + in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None) + set_layer(new_module, n, new_fc) + if hasattr(new_module, 'num_features'): + new_module.num_features = num_features + new_module.eval() + parent_module.eval() + + return new_module + + +def adapt_model_from_file(parent_module, model_variant): + adapt_file = os.path.join(os.path.dirname(__file__), 'pruned', model_variant + '.txt') + with open(adapt_file, 'r') as f: + return adapt_model_from_string(parent_module, f.read().strip()) + + +def default_cfg_for_features(default_cfg): + default_cfg = deepcopy(default_cfg) + # remove default pretrained cfg fields that don't have much relevance for feature backbone + to_remove = ('num_classes', 'crop_pct', 'classifier', 'global_pool') # add default final pool size? + for tr in to_remove: + default_cfg.pop(tr, None) + return default_cfg + + +def overlay_external_default_cfg(default_cfg, kwargs): + """ Overlay 'external_default_cfg' in kwargs on top of default_cfg arg. + """ + external_default_cfg = kwargs.pop('external_default_cfg', None) + if external_default_cfg: + default_cfg.pop('url', None) # url should come from external cfg + default_cfg.pop('hf_hub', None) # hf hub id should come from external cfg + default_cfg.update(external_default_cfg) + + +def set_default_kwargs(kwargs, names, default_cfg): + for n in names: + # for legacy reasons, model __init__args uses img_size + in_chans as separate args while + # default_cfg has one input_size=(C, H ,W) entry + if n == 'img_size': + input_size = default_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[-2:]) + elif n == 'in_chans': + input_size = default_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[0]) + else: + default_val = default_cfg.get(n, None) + if default_val is not None: + kwargs.setdefault(n, default_cfg[n]) + + +def filter_kwargs(kwargs, names): + if not kwargs or not names: + return + for n in names: + kwargs.pop(n, None) + + +def update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter): + """ Update the default_cfg and kwargs before passing to model + + FIXME this sequence of overlay default_cfg, set default kwargs, filter kwargs + could/should be replaced by an improved configuration mechanism + + Args: + default_cfg: input default_cfg (updated in-place) + kwargs: keyword args passed to model build fn (updated in-place) + kwargs_filter: keyword arg keys that must be removed before model __init__ + """ + # Overlay default cfg values from `external_default_cfg` if it exists in kwargs + overlay_external_default_cfg(default_cfg, kwargs) + # Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs) + default_kwarg_names = ('num_classes', 'global_pool', 'in_chans') + if default_cfg.get('fixed_input_size', False): + # if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size + default_kwarg_names += ('img_size',) + set_default_kwargs(kwargs, names=default_kwarg_names, default_cfg=default_cfg) + # Filter keyword args for task specific model variants (some 'features only' models, etc.) + filter_kwargs(kwargs, names=kwargs_filter) + + +def build_model_with_cfg( + model_cls: Callable, + variant: str, + pretrained: bool, + default_cfg: dict, + model_cfg: Optional[Any] = None, + feature_cfg: Optional[dict] = None, + pretrained_strict: bool = True, + pretrained_filter_fn: Optional[Callable] = None, + pretrained_custom_load: bool = False, + kwargs_filter: Optional[Tuple[str]] = None, + **kwargs): + """ Build model with specified default_cfg and optional model_cfg + + This helper fn aids in the construction of a model including: + * handling default_cfg and associated pretained weight loading + * passing through optional model_cfg for models with config based arch spec + * features_only model adaptation + * pruning config / model adaptation + + Args: + model_cls (nn.Module): model class + variant (str): model variant name + pretrained (bool): load pretrained weights + default_cfg (dict): model's default pretrained/task config + model_cfg (Optional[Dict]): model's architecture config + feature_cfg (Optional[Dict]: feature extraction adapter config + pretrained_strict (bool): load pretrained weights strictly + pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights + pretrained_custom_load (bool): use custom load fn, to load numpy or other non PyTorch weights + kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model + **kwargs: model args passed through to model __init__ + """ + pruned = kwargs.pop('pruned', False) + features = False + feature_cfg = feature_cfg or {} + default_cfg = deepcopy(default_cfg) if default_cfg else {} + update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter) + default_cfg.setdefault('architecture', variant) + + # Setup for feature extraction wrapper done at end of this fn + if kwargs.pop('features_only', False): + features = True + feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4)) + if 'out_indices' in kwargs: + feature_cfg['out_indices'] = kwargs.pop('out_indices') + + # Build the model + model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs) + model.default_cfg = default_cfg + + if pruned: + model = adapt_model_from_file(model, variant) + + # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats + num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000)) + if pretrained: + if pretrained_custom_load: + load_custom_pretrained(model) + else: + load_pretrained( + model, + num_classes=num_classes_pretrained, + in_chans=kwargs.get('in_chans', 3), + filter_fn=pretrained_filter_fn, + strict=pretrained_strict) + + # Wrap the model in a feature extraction module if enabled + if features: + feature_cls = FeatureListNet + if 'feature_cls' in feature_cfg: + feature_cls = feature_cfg.pop('feature_cls') + if isinstance(feature_cls, str): + feature_cls = feature_cls.lower() + if 'hook' in feature_cls: + feature_cls = FeatureHookNet + elif feature_cls == 'fx': + feature_cls = FeatureGraphNet + else: + assert False, f'Unknown feature class {feature_cls}' + model = feature_cls(model, **feature_cfg) + model.default_cfg = default_cfg_for_features(default_cfg) # add back default_cfg + + return model + + +def model_parameters(model, exclude_head=False): + if exclude_head: + # FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering + return [p for p in model.parameters()][:-2] + else: + return model.parameters() + + +def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module: + if not depth_first and include_root: + fn(module=module, name=name) + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + fn(module=module, name=name) + return module + + +def named_modules(module: nn.Module, name='', depth_first=True, include_root=False): + if not depth_first and include_root: + yield name, module + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + yield from named_modules( + module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + yield name, module diff --git a/data_processing/MANIQA/timm/models/hrnet.py b/data_processing/MANIQA/timm/models/hrnet.py new file mode 100644 index 0000000..c56964f --- /dev/null +++ b/data_processing/MANIQA/timm/models/hrnet.py @@ -0,0 +1,836 @@ +""" HRNet + +Copied from https://github.com/HRNet/HRNet-Image-Classification + +Original header: + Copyright (c) Microsoft + Licensed under the MIT License. + Written by Bin Xiao (Bin.Xiao@microsoft.com) + Modified by Ke Sun (sunk@mail.ustc.edu.cn) +""" +import logging +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .features import FeatureInfo +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import create_classifier +from .registry import register_model +from .resnet import BasicBlock, Bottleneck # leveraging ResNet blocks w/ additional features like SE + +_BN_MOMENTUM = 0.1 +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'hrnet_w18_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v1-f460c6bc.pth'), + 'hrnet_w18_small_v2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v2-4c50a8cb.pth'), + 'hrnet_w18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w18-8cb57bb9.pth'), + 'hrnet_w30': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w30-8d7f8dab.pth'), + 'hrnet_w32': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w32-90d8c5fb.pth'), + 'hrnet_w40': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w40-7cd397a4.pth'), + 'hrnet_w44': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w44-c9ac8c18.pth'), + 'hrnet_w48': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w48-abd2e6ab.pth'), + 'hrnet_w64': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w64-b47cc881.pth'), +} + +cfg_cls = dict( + hrnet_w18_small=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(1,), + NUM_CHANNELS=(32,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2), + NUM_CHANNELS=(16, 32), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=1, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2), + NUM_CHANNELS=(16, 32, 64), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=1, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2, 2), + NUM_CHANNELS=(16, 32, 64, 128), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w18_small_v2=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(2,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2), + NUM_CHANNELS=(18, 36), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=3, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2), + NUM_CHANNELS=(18, 36, 72), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=2, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2, 2), + NUM_CHANNELS=(18, 36, 72, 144), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w18=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(18, 36), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(18, 36, 72), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(18, 36, 72, 144), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w30=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(30, 60), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(30, 60, 120), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(30, 60, 120, 240), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w32=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(32, 64), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(32, 64, 128), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(32, 64, 128, 256), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w40=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(40, 80), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(40, 80, 160), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(40, 80, 160, 320), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w44=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(44, 88), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(44, 88, 176), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(44, 88, 176, 352), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w48=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(48, 96), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(48, 96, 192), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(48, 96, 192, 384), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w64=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(64, 128), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(64, 128, 256), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(64, 128, 256, 512), + FUSE_METHOD='SUM', + ), + ) +) + + +class HighResolutionModule(nn.Module): + def __init__(self, num_branches, blocks, num_blocks, num_inchannels, + num_channels, fuse_method, multi_scale_output=True): + super(HighResolutionModule, self).__init__() + self._check_branches( + num_branches, blocks, num_blocks, num_inchannels, num_channels) + + self.num_inchannels = num_inchannels + self.fuse_method = fuse_method + self.num_branches = num_branches + + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches( + num_branches, blocks, num_blocks, num_channels) + self.fuse_layers = self._make_fuse_layers() + self.fuse_act = nn.ReLU(False) + + def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels): + error_msg = '' + if num_branches != len(num_blocks): + error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(num_branches, len(num_blocks)) + elif num_branches != len(num_channels): + error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(num_branches, len(num_channels)) + elif num_branches != len(num_inchannels): + error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(num_branches, len(num_inchannels)) + if error_msg: + _logger.error(error_msg) + raise ValueError(error_msg) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): + downsample = None + if stride != 1 or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.num_inchannels[branch_index], num_channels[branch_index] * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample)] + self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index])) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + for i in range(num_branches): + branches.append(self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return nn.Identity() + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False), + nn.BatchNorm2d(num_inchannels[i], momentum=_BN_MOMENTUM), + nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(nn.Identity()) + else: + conv3x3s = [] + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, momentum=_BN_MOMENTUM))) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, momentum=_BN_MOMENTUM), + nn.ReLU(False))) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x: List[torch.Tensor]): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i, branch in enumerate(self.branches): + x[i] = branch(x[i]) + + x_fuse = [] + for i, fuse_outer in enumerate(self.fuse_layers): + y = x[0] if i == 0 else fuse_outer[0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + else: + y = y + fuse_outer[j](x[j]) + x_fuse.append(self.fuse_act(y)) + + return x_fuse + + +blocks_dict = { + 'BASIC': BasicBlock, + 'BOTTLENECK': Bottleneck +} + + +class HighResolutionNet(nn.Module): + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, head='classification'): + super(HighResolutionNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + + stem_width = cfg['STEM_WIDTH'] + self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM) + self.act1 = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM) + self.act2 = nn.ReLU(inplace=True) + + self.stage1_cfg = cfg['STAGE1'] + num_channels = self.stage1_cfg['NUM_CHANNELS'][0] + block = blocks_dict[self.stage1_cfg['BLOCK']] + num_blocks = self.stage1_cfg['NUM_BLOCKS'][0] + self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) + stage1_out_channel = block.expansion * num_channels + + self.stage2_cfg = cfg['STAGE2'] + num_channels = self.stage2_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage2_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) + self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) + + self.stage3_cfg = cfg['STAGE3'] + num_channels = self.stage3_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage3_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) + + self.stage4_cfg = cfg['STAGE4'] + num_channels = self.stage4_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage4_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) + + self.head = head + self.head_channels = None # set if _make_head called + if head == 'classification': + # Classification Head + self.num_features = 2048 + self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(pre_stage_channels) + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + elif head == 'incre': + self.num_features = 2048 + self.incre_modules, _, _ = self._make_head(pre_stage_channels, True) + else: + self.incre_modules = None + self.num_features = 256 + + curr_stride = 2 + # module names aren't actually valid here, hook or FeatureNet based extraction would not work + self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')] + for i, c in enumerate(self.head_channels if self.head_channels else num_channels): + curr_stride *= 2 + c = c * 4 if self.head_channels else c # head block expansion factor of 4 + self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')] + + self.init_weights() + + def _make_head(self, pre_stage_channels, incre_only=False): + head_block = Bottleneck + self.head_channels = [32, 64, 128, 256] + + # Increasing the #channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + incre_modules = [] + for i, channels in enumerate(pre_stage_channels): + incre_modules.append(self._make_layer(head_block, channels, self.head_channels[i], 1, stride=1)) + incre_modules = nn.ModuleList(incre_modules) + if incre_only: + return incre_modules, None, None + + # downsampling modules + downsamp_modules = [] + for i in range(len(pre_stage_channels) - 1): + in_channels = self.head_channels[i] * head_block.expansion + out_channels = self.head_channels[i + 1] * head_block.expansion + downsamp_module = nn.Sequential( + nn.Conv2d( + in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1), + nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + downsamp_modules.append(downsamp_module) + downsamp_modules = nn.ModuleList(downsamp_modules) + + final_layer = nn.Sequential( + nn.Conv2d( + in_channels=self.head_channels[3] * head_block.expansion, + out_channels=self.num_features, kernel_size=1, stride=1, padding=0 + ), + nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + + return incre_modules, downsamp_modules, final_layer + + def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), + nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + else: + transition_layers.append(nn.Identity()) + else: + conv3x3s = [] + for j in range(i + 1 - num_branches_pre): + inchannels = num_channels_pre_layer[-1] + outchannels = num_channels_cur_layer[i] if j == i - num_branches_pre else inchannels + conv3x3s.append(nn.Sequential( + nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), + nn.BatchNorm2d(outchannels, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block(inplanes, planes, stride, downsample)] + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(inplanes, planes)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True): + num_modules = layer_config['NUM_MODULES'] + num_branches = layer_config['NUM_BRANCHES'] + num_blocks = layer_config['NUM_BLOCKS'] + num_channels = layer_config['NUM_CHANNELS'] + block = blocks_dict[layer_config['BLOCK']] + fuse_method = layer_config['FUSE_METHOD'] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + reset_multi_scale_output = multi_scale_output or i < num_modules - 1 + modules.append(HighResolutionModule( + num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output) + ) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def stages(self, x) -> List[torch.Tensor]: + x = self.layer1(x) + + xl = [t(x) for i, t in enumerate(self.transition1)] + yl = self.stage2(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)] + yl = self.stage3(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)] + yl = self.stage4(xl) + return yl + + def forward_features(self, x): + # Stem + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + # Stages + yl = self.stages(x) + + # Classification Head + y = self.incre_modules[0](yl[0]) + for i, down in enumerate(self.downsamp_modules): + y = self.incre_modules[i + 1](yl[i + 1]) + down(y) + y = self.final_layer(y) + return y + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + +class HighResolutionNetFeatures(HighResolutionNet): + """HighResolutionNet feature extraction + + The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so. + It would be more complicated to use the FeatureNet helpers. + + The `feature_location=incre` allows grabbing increased channel count features using part of the + classification head. If `feature_location=''` the default HRNet features are returned. First stem + conv is used for stride 2 features. + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, + feature_location='incre', out_indices=(0, 1, 2, 3, 4)): + assert feature_location in ('incre', '') + super(HighResolutionNetFeatures, self).__init__( + cfg, in_chans=in_chans, num_classes=num_classes, global_pool=global_pool, + drop_rate=drop_rate, head=feature_location) + self.feature_info = FeatureInfo(self.feature_info, out_indices) + self._out_idx = {i for i in out_indices} + + def forward_features(self, x): + assert False, 'Not supported' + + def forward(self, x) -> List[torch.tensor]: + out = [] + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + if 0 in self._out_idx: + out.append(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + x = self.stages(x) + if self.incre_modules is not None: + x = [incre(f) for f, incre in zip(x, self.incre_modules)] + for i, f in enumerate(x): + if i + 1 in self._out_idx: + out.append(f) + return out + + +def _create_hrnet(variant, pretrained, **model_kwargs): + model_cls = HighResolutionNet + features_only = False + kwargs_filter = None + if model_kwargs.pop('features_only', False): + model_cls = HighResolutionNetFeatures + kwargs_filter = ('num_classes', 'global_pool') + features_only = True + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfg_cls[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **model_kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +@register_model +def hrnet_w18_small(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18_small', pretrained, **kwargs) + + +@register_model +def hrnet_w18_small_v2(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs) + + +@register_model +def hrnet_w18(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18', pretrained, **kwargs) + + +@register_model +def hrnet_w30(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w30', pretrained, **kwargs) + + +@register_model +def hrnet_w32(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w32', pretrained, **kwargs) + + +@register_model +def hrnet_w40(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w40', pretrained, **kwargs) + + +@register_model +def hrnet_w44(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w44', pretrained, **kwargs) + + +@register_model +def hrnet_w48(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w48', pretrained, **kwargs) + + +@register_model +def hrnet_w64(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w64', pretrained, **kwargs) diff --git a/data_processing/MANIQA/timm/models/hub.py b/data_processing/MANIQA/timm/models/hub.py new file mode 100644 index 0000000..65e7ba9 --- /dev/null +++ b/data_processing/MANIQA/timm/models/hub.py @@ -0,0 +1,171 @@ +import json +import logging +import os +from functools import partial +from pathlib import Path +from typing import Union + +import torch +from torch.hub import HASH_REGEX, download_url_to_file, urlparse +try: + from torch.hub import get_dir +except ImportError: + from torch.hub import _get_torch_home as get_dir + +from timm import __version__ +try: + from huggingface_hub import HfApi, HfFolder, Repository, cached_download, hf_hub_url + cached_download = partial(cached_download, library_name="timm", library_version=__version__) + _has_hf_hub = True +except ImportError: + cached_download = None + _has_hf_hub = False + +_logger = logging.getLogger(__name__) + + +def get_cache_dir(child_dir=''): + """ + Returns the location of the directory where models are cached (and creates it if necessary). + """ + # Issue warning to move data if old env is set + if os.getenv('TORCH_MODEL_ZOO'): + _logger.warning('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead') + + hub_dir = get_dir() + child_dir = () if not child_dir else (child_dir,) + model_dir = os.path.join(hub_dir, 'checkpoints', *child_dir) + os.makedirs(model_dir, exist_ok=True) + return model_dir + + +def download_cached_file(url, check_hash=True, progress=False): + parts = urlparse(url) + filename = os.path.basename(parts.path) + cached_file = os.path.join(get_cache_dir(), filename) + if not os.path.exists(cached_file): + _logger.info('Downloading: "{}" to {}\n'.format(url, cached_file)) + hash_prefix = None + if check_hash: + r = HASH_REGEX.search(filename) # r is Optional[Match[str]] + hash_prefix = r.group(1) if r else None + download_url_to_file(url, cached_file, hash_prefix, progress=progress) + return cached_file + + +def has_hf_hub(necessary=False): + if not _has_hf_hub and necessary: + # if no HF Hub module installed and it is necessary to continue, raise error + raise RuntimeError( + 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') + return _has_hf_hub + + +def hf_split(hf_id): + rev_split = hf_id.split('@') + assert 0 < len(rev_split) <= 2, 'hf_hub id should only contain one @ character to identify revision.' + hf_model_id = rev_split[0] + hf_revision = rev_split[-1] if len(rev_split) > 1 else None + return hf_model_id, hf_revision + + +def load_cfg_from_json(json_file: Union[str, os.PathLike]): + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + return json.loads(text) + + +def _download_from_hf(model_id: str, filename: str): + hf_model_id, hf_revision = hf_split(model_id) + url = hf_hub_url(hf_model_id, filename, revision=hf_revision) + return cached_download(url, cache_dir=get_cache_dir('hf')) + + +def load_model_config_from_hf(model_id: str): + assert has_hf_hub(True) + cached_file = _download_from_hf(model_id, 'config.json') + default_cfg = load_cfg_from_json(cached_file) + default_cfg['hf_hub'] = model_id # insert hf_hub id for pretrained weight load during model creation + model_name = default_cfg.get('architecture') + return default_cfg, model_name + + +def load_state_dict_from_hf(model_id: str): + assert has_hf_hub(True) + cached_file = _download_from_hf(model_id, 'pytorch_model.bin') + state_dict = torch.load(cached_file, map_location='cpu') + return state_dict + + +def save_for_hf(model, save_directory, model_config=None): + assert has_hf_hub(True) + model_config = model_config or {} + save_directory = Path(save_directory) + save_directory.mkdir(exist_ok=True, parents=True) + + weights_path = save_directory / 'pytorch_model.bin' + torch.save(model.state_dict(), weights_path) + + config_path = save_directory / 'config.json' + hf_config = model.default_cfg + hf_config['num_classes'] = model_config.pop('num_classes', model.num_classes) + hf_config['num_features'] = model_config.pop('num_features', model.num_features) + hf_config['labels'] = model_config.pop('labels', [f"LABEL_{i}" for i in range(hf_config['num_classes'])]) + hf_config.update(model_config) + + with config_path.open('w') as f: + json.dump(hf_config, f, indent=2) + + +def push_to_hf_hub( + model, + local_dir, + repo_namespace_or_url=None, + commit_message='Add model', + use_auth_token=True, + git_email=None, + git_user=None, + revision=None, + model_config=None, +): + if repo_namespace_or_url: + repo_owner, repo_name = repo_namespace_or_url.rstrip('/').split('/')[-2:] + else: + if isinstance(use_auth_token, str): + token = use_auth_token + else: + token = HfFolder.get_token() + + if token is None: + raise ValueError( + "You must login to the Hugging Face hub on this computer by typing `transformers-cli login` and " + "entering your credentials to use `use_auth_token=True`. Alternatively, you can pass your own " + "token as the `use_auth_token` argument." + ) + + repo_owner = HfApi().whoami(token)['name'] + repo_name = Path(local_dir).name + + repo_url = f'https://huggingface.co/{repo_owner}/{repo_name}' + + repo = Repository( + local_dir, + clone_from=repo_url, + use_auth_token=use_auth_token, + git_user=git_user, + git_email=git_email, + revision=revision, + ) + + # Prepare a default model card that includes the necessary tags to enable inference. + readme_text = f'---\ntags:\n- image-classification\n- timm\nlibrary_tag: timm\n---\n# Model card for {repo_name}' + with repo.commit(commit_message): + # Save model weights and config. + save_for_hf(model, repo.local_dir, model_config=model_config) + + # Save a model card if it doesn't exist. + readme_path = Path(repo.local_dir) / 'README.md' + if not readme_path.exists(): + readme_path.write_text(readme_text) + + return repo.git_remote_url() diff --git a/data_processing/MANIQA/timm/models/inception_resnet_v2.py b/data_processing/MANIQA/timm/models/inception_resnet_v2.py new file mode 100644 index 0000000..7167284 --- /dev/null +++ b/data_processing/MANIQA/timm/models/inception_resnet_v2.py @@ -0,0 +1,358 @@ +""" Pytorch Inception-Resnet-V2 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['InceptionResnetV2'] + +default_cfgs = { + # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz + 'inception_resnet_v2': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + 'label_offset': 1, # 1001 classes in pretrained weights + }, + # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz + 'ens_adv_inception_resnet_v2': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ens_adv_inception_resnet_v2-2592a550.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + 'label_offset': 1, # 1001 classes in pretrained weights + } +} + + +class BasicConv2d(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_planes, eps=.001) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed_5b(nn.Module): + def __init__(self): + super(Mixed_5b, self).__init__() + + self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(192, 48, kernel_size=1, stride=1), + BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(192, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(192, 64, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block35(nn.Module): + def __init__(self, scale=1.0): + super(Block35, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1), + BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1) + ) + + self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_6a(nn.Module): + def __init__(self): + super(Mixed_6a, self).__init__() + + self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 256, kernel_size=1, stride=1), + BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class Block17(nn.Module): + def __init__(self, scale=1.0): + super(Block17, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 128, kernel_size=1, stride=1), + BasicConv2d(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_7a(nn.Module): + def __init__(self): + super(Mixed_7a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1), + BasicConv2d(288, 320, kernel_size=3, stride=2) + ) + + self.branch3 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block8(nn.Module): + + def __init__(self, scale=1.0, no_relu=False): + super(Block8, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(2080, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), + BasicConv2d(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + ) + + self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) + self.relu = None if no_relu else nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + if self.relu is not None: + out = self.relu(out) + return out + + +class InceptionResnetV2(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., output_stride=32, global_pool='avg'): + super(InceptionResnetV2, self).__init__() + self.drop_rate = drop_rate + self.num_classes = num_classes + self.num_features = 1536 + assert output_stride == 32 + + self.conv2d_1a = BasicConv2d(in_chans, 32, kernel_size=3, stride=2) + self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1) + self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1) + self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] + + self.maxpool_3a = nn.MaxPool2d(3, stride=2) + self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1) + self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1) + self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] + + self.maxpool_5a = nn.MaxPool2d(3, stride=2) + self.mixed_5b = Mixed_5b() + self.repeat = nn.Sequential( + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17) + ) + self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] + + self.mixed_6a = Mixed_6a() + self.repeat_1 = nn.Sequential( + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10) + ) + self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] + + self.mixed_7a = Mixed_7a() + self.repeat_2 = nn.Sequential( + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20) + ) + self.block8 = Block8(no_relu=True) + self.conv2d_7b = BasicConv2d(2080, self.num_features, kernel_size=1, stride=1) + self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] + + self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.classif + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv2d_1a(x) + x = self.conv2d_2a(x) + x = self.conv2d_2b(x) + x = self.maxpool_3a(x) + x = self.conv2d_3b(x) + x = self.conv2d_4a(x) + x = self.maxpool_5a(x) + x = self.mixed_5b(x) + x = self.repeat(x) + x = self.mixed_6a(x) + x = self.repeat_1(x) + x = self.mixed_7a(x) + x = self.repeat_2(x) + x = self.block8(x) + x = self.conv2d_7b(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classif(x) + return x + + +def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + InceptionResnetV2, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def inception_resnet_v2(pretrained=False, **kwargs): + r"""InceptionResnetV2 model architecture from the + `"InceptionV4, Inception-ResNet..." ` paper. + """ + return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) + + +@register_model +def ens_adv_inception_resnet_v2(pretrained=False, **kwargs): + r""" Ensemble Adversarially trained InceptionResnetV2 model architecture + As per https://arxiv.org/abs/1705.07204 and + https://github.com/tensorflow/models/tree/master/research/adv_imagenet_models. + """ + return _create_inception_resnet_v2('ens_adv_inception_resnet_v2', pretrained=pretrained, **kwargs) diff --git a/data_processing/MANIQA/timm/models/inception_v3.py b/data_processing/MANIQA/timm/models/inception_v3.py new file mode 100644 index 0000000..cbb1107 --- /dev/null +++ b/data_processing/MANIQA/timm/models/inception_v3.py @@ -0,0 +1,470 @@ +""" Inception-V3 + +Originally from torchvision Inception3 model +Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .registry import register_model +from .layers import trunc_normal_, create_classifier, Linear + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + # original PyTorch weights, ported from Tensorflow but modified + 'inception_v3': _cfg( + url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth', + has_aux=True), # checkpoint has aux logit layer weights + # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) + 'tf_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_inception_v3-e0069de4.pth', + num_classes=1000, has_aux=False, label_offset=1), + # my port of Tensorflow adversarially trained Inception V3 from + # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz + 'adv_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/adv_inception_v3-9e27bd63.pth', + num_classes=1000, has_aux=False, label_offset=1), + # from gluon pretrained models, best performing in terms of accuracy/loss metrics + # https://gluon-cv.mxnet.io/model_zoo/classification.html + 'gluon_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_inception_v3-9f746940.pth', + mean=IMAGENET_DEFAULT_MEAN, # also works well with inception defaults + std=IMAGENET_DEFAULT_STD, # also works well with inception defaults + has_aux=False, + ) +} + + +class InceptionA(nn.Module): + + def __init__(self, in_channels, pool_features, conv_block=None): + super(InceptionA, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) + self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) + + self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionB, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__(self, in_channels, channels_7x7, conv_block=None): + super(InceptionC, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionD, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionE, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) + self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes, conv_block=None): + super(InceptionAux, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.conv0 = conv_block(in_channels, 128, kernel_size=1) + self.conv1 = conv_block(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 + self.fc = Linear(768, num_classes) + self.fc.stddev = 0.001 + + def forward(self, x): + # N x 768 x 17 x 17 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # N x 768 x 5 x 5 + x = self.conv0(x) + # N x 128 x 5 x 5 + x = self.conv1(x) + # N x 768 x 1 x 1 + # Adaptive average pooling + x = F.adaptive_avg_pool2d(x, (1, 1)) + # N x 768 x 1 x 1 + x = torch.flatten(x, 1) + # N x 768 + x = self.fc(x) + # N x 1000 + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) + + +class InceptionV3(nn.Module): + """Inception-V3 with no AuxLogits + FIXME two class defs are redundant, but less screwing around with torchsript fussyness and inconsistent returns + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=False): + super(InceptionV3, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.aux_logits = aux_logits + + self.Conv2d_1a_3x3 = BasicConv2d(in_chans, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) + self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) + self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Mixed_5b = InceptionA(192, pool_features=32) + self.Mixed_5c = InceptionA(256, pool_features=64) + self.Mixed_5d = InceptionA(288, pool_features=64) + self.Mixed_6a = InceptionB(288) + self.Mixed_6b = InceptionC(768, channels_7x7=128) + self.Mixed_6c = InceptionC(768, channels_7x7=160) + self.Mixed_6d = InceptionC(768, channels_7x7=160) + self.Mixed_6e = InceptionC(768, channels_7x7=192) + if aux_logits: + self.AuxLogits = InceptionAux(768, num_classes) + else: + self.AuxLogits = None + self.Mixed_7a = InceptionD(768) + self.Mixed_7b = InceptionE(1280) + self.Mixed_7c = InceptionE(2048) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'), + dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'), + dict(num_chs=288, reduction=8, module='Mixed_5d'), + dict(num_chs=768, reduction=16, module='Mixed_6e'), + dict(num_chs=2048, reduction=32, module='Mixed_7c'), + ] + + self.num_features = 2048 + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + trunc_normal_(m.weight, std=stddev) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward_preaux(self, x): + # N x 3 x 299 x 299 + x = self.Conv2d_1a_3x3(x) + # N x 32 x 149 x 149 + x = self.Conv2d_2a_3x3(x) + # N x 32 x 147 x 147 + x = self.Conv2d_2b_3x3(x) + # N x 64 x 147 x 147 + x = self.Pool1(x) + # N x 64 x 73 x 73 + x = self.Conv2d_3b_1x1(x) + # N x 80 x 73 x 73 + x = self.Conv2d_4a_3x3(x) + # N x 192 x 71 x 71 + x = self.Pool2(x) + # N x 192 x 35 x 35 + x = self.Mixed_5b(x) + # N x 256 x 35 x 35 + x = self.Mixed_5c(x) + # N x 288 x 35 x 35 + x = self.Mixed_5d(x) + # N x 288 x 35 x 35 + x = self.Mixed_6a(x) + # N x 768 x 17 x 17 + x = self.Mixed_6b(x) + # N x 768 x 17 x 17 + x = self.Mixed_6c(x) + # N x 768 x 17 x 17 + x = self.Mixed_6d(x) + # N x 768 x 17 x 17 + x = self.Mixed_6e(x) + # N x 768 x 17 x 17 + return x + + def forward_postaux(self, x): + x = self.Mixed_7a(x) + # N x 1280 x 8 x 8 + x = self.Mixed_7b(x) + # N x 2048 x 8 x 8 + x = self.Mixed_7c(x) + # N x 2048 x 8 x 8 + return x + + def forward_features(self, x): + x = self.forward_preaux(x) + x = self.forward_postaux(x) + return x + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +class InceptionV3Aux(InceptionV3): + """InceptionV3 with AuxLogits + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=True): + super(InceptionV3Aux, self).__init__( + num_classes, in_chans, drop_rate, global_pool, aux_logits) + + def forward_features(self, x): + x = self.forward_preaux(x) + aux = self.AuxLogits(x) if self.training else None + x = self.forward_postaux(x) + return x, aux + + def forward(self, x): + x, aux = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x, aux + + +def _create_inception_v3(variant, pretrained=False, **kwargs): + default_cfg = default_cfgs[variant] + aux_logits = kwargs.pop('aux_logits', False) + if aux_logits: + assert not kwargs.pop('features_only', False) + model_cls = InceptionV3Aux + load_strict = default_cfg['has_aux'] + else: + model_cls = InceptionV3 + load_strict = not default_cfg['has_aux'] + return build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfg, + pretrained_strict=load_strict, + **kwargs) + + +@register_model +def inception_v3(pretrained=False, **kwargs): + # original PyTorch weights, ported from Tensorflow but modified + model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_inception_v3(pretrained=False, **kwargs): + # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) + model = _create_inception_v3('tf_inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def adv_inception_v3(pretrained=False, **kwargs): + # my port of Tensorflow adversarially trained Inception V3 from + # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz + model = _create_inception_v3('adv_inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def gluon_inception_v3(pretrained=False, **kwargs): + # from gluon pretrained models, best performing in terms of accuracy/loss metrics + # https://gluon-cv.mxnet.io/model_zoo/classification.html + model = _create_inception_v3('gluon_inception_v3', pretrained=pretrained, **kwargs) + return model diff --git a/data_processing/MANIQA/timm/models/inception_v4.py b/data_processing/MANIQA/timm/models/inception_v4.py new file mode 100644 index 0000000..cc899e1 --- /dev/null +++ b/data_processing/MANIQA/timm/models/inception_v4.py @@ -0,0 +1,316 @@ +""" Pytorch Inception-V4 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['InceptionV4'] + +default_cfgs = { + 'inception_v4': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/inceptionv4-8e4777a0.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'features.0.conv', 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + } +} + + +class BasicConv2d(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_planes, eps=0.001) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed3a(nn.Module): + def __init__(self): + super(Mixed3a, self).__init__() + self.maxpool = nn.MaxPool2d(3, stride=2) + self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2) + + def forward(self, x): + x0 = self.maxpool(x) + x1 = self.conv(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed4a(nn.Module): + def __init__(self): + super(Mixed4a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(64, 96, kernel_size=(3, 3), stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed5a(nn.Module): + def __init__(self): + super(Mixed5a, self).__init__() + self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2) + self.maxpool = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.conv(x) + x1 = self.maxpool(x) + out = torch.cat((x0, x1), 1) + return out + + +class InceptionA(nn.Module): + def __init__(self): + super(InceptionA, self).__init__() + self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(384, 96, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionA(nn.Module): + def __init__(self): + super(ReductionA, self).__init__() + self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1), + BasicConv2d(224, 256, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionB(nn.Module): + def __init__(self): + super(InceptionB, self).__init__() + self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1024, 128, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionB(nn.Module): + def __init__(self): + super(ReductionB, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 192, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 256, kernel_size=1, stride=1), + BasicConv2d(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(320, 320, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionC(nn.Module): + def __init__(self): + super(InceptionC, self).__init__() + + self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1) + + self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch1_1a = BasicConv2d(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch1_1b = BasicConv2d(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch2_1 = BasicConv2d(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0)) + self.branch2_2 = BasicConv2d(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3a = BasicConv2d(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3b = BasicConv2d(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1536, 256, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + + x1_0 = self.branch1_0(x) + x1_1a = self.branch1_1a(x1_0) + x1_1b = self.branch1_1b(x1_0) + x1 = torch.cat((x1_1a, x1_1b), 1) + + x2_0 = self.branch2_0(x) + x2_1 = self.branch2_1(x2_0) + x2_2 = self.branch2_2(x2_1) + x2_3a = self.branch2_3a(x2_2) + x2_3b = self.branch2_3b(x2_2) + x2 = torch.cat((x2_3a, x2_3b), 1) + + x3 = self.branch3(x) + + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class InceptionV4(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg'): + super(InceptionV4, self).__init__() + assert output_stride == 32 + self.drop_rate = drop_rate + self.num_classes = num_classes + self.num_features = 1536 + + self.features = nn.Sequential( + BasicConv2d(in_chans, 32, kernel_size=3, stride=2), + BasicConv2d(32, 32, kernel_size=3, stride=1), + BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), + Mixed3a(), + Mixed4a(), + Mixed5a(), + InceptionA(), + InceptionA(), + InceptionA(), + InceptionA(), + ReductionA(), # Mixed6a + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + ReductionB(), # Mixed7a + InceptionC(), + InceptionC(), + InceptionC(), + ) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='features.2'), + dict(num_chs=160, reduction=4, module='features.3'), + dict(num_chs=384, reduction=8, module='features.9'), + dict(num_chs=1024, reduction=16, module='features.17'), + dict(num_chs=1536, reduction=32, module='features.21'), + ] + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_inception_v4(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + InceptionV4, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def inception_v4(pretrained=False, **kwargs): + return _create_inception_v4('inception_v4', pretrained, **kwargs) diff --git a/data_processing/MANIQA/timm/models/layers/__init__.py b/data_processing/MANIQA/timm/models/layers/__init__.py new file mode 100644 index 0000000..706d9dc --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/__init__.py @@ -0,0 +1,40 @@ +from .activations import * +from .adaptive_avgmax_pool import \ + adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d +from .blur_pool import BlurPool2d +from .classifier import ClassifierHead, create_classifier +from .cond_conv2d import CondConv2d, get_condconv_initializer +from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ + set_layer_config +from .conv2d_same import Conv2dSame, conv2d_same +from .conv_bn_act import ConvBnAct +from .create_act import create_act_layer, get_act_layer, get_act_fn +from .create_attn import get_attn, create_attn +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act +from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path +from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn +from .evo_norm import EvoNormBatch2d, EvoNormSample2d +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible +from .inplace_abn import InplaceAbn +from .linear import Linear +from .mixed_conv2d import MixedConv2d +from .mlp import Mlp, GluMlp, GatedMlp, ConvMlp +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .norm import GroupNorm, LayerNorm2d +from .norm_act import BatchNormAct2d, GroupNormAct +from .padding import get_padding, get_same_padding, pad_same +from .patch_embed import PatchEmbed +from .pool2d_same import AvgPool2dSame, create_pool2d +from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite +from .selective_kernel import SelectiveKernel +from .separable_conv import SeparableConv2d, SeparableConvBnAct +from .space_to_depth import SpaceToDepthModule +from .split_attn import SplitAttn +from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model +from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame +from .test_time_pool import TestTimePoolHead, apply_test_time_pool +from .trace_utils import _assert, _float_to_int +from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_ diff --git a/data_processing/MANIQA/timm/models/layers/activations.py b/data_processing/MANIQA/timm/models/layers/activations.py new file mode 100644 index 0000000..e16b3bd --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/activations.py @@ -0,0 +1,145 @@ +""" Activations + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +def swish(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) + + +class Swish(nn.Module): + def __init__(self, inplace: bool = False): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return swish(x, self.inplace) + + +def mish(x, inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + NOTE: I don't have a working inplace variant + """ + return x.mul(F.softplus(x).tanh()) + + +class Mish(nn.Module): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + def __init__(self, inplace: bool = False): + super(Mish, self).__init__() + + def forward(self, x): + return mish(x) + + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Sigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(Sigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.sigmoid_() if self.inplace else x.sigmoid() + + +def tanh(x, inplace: bool = False): + return x.tanh_() if inplace else x.tanh() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Tanh(nn.Module): + def __init__(self, inplace: bool = False): + super(Tanh, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.tanh_() if self.inplace else x.tanh() + + +def hard_swish(x, inplace: bool = False): + inner = F.relu6(x + 3.).div_(6.) + return x.mul_(inner) if inplace else x.mul(inner) + + +class HardSwish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_swish(x, self.inplace) + + +def hard_sigmoid(x, inplace: bool = False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + + +class HardSigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_sigmoid(x, self.inplace) + + +def hard_mish(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + if inplace: + return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) + else: + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_mish(x, self.inplace) + + +class PReLU(nn.PReLU): + """Applies PReLU (w/ dummy inplace arg) + """ + def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None: + super(PReLU, self).__init__(num_parameters=num_parameters, init=init) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.prelu(input, self.weight) + + +def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: + return F.gelu(x) + + +class GELU(nn.Module): + """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) + """ + def __init__(self, inplace: bool = False): + super(GELU, self).__init__() + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.gelu(input) diff --git a/data_processing/MANIQA/timm/models/layers/activations_jit.py b/data_processing/MANIQA/timm/models/layers/activations_jit.py new file mode 100644 index 0000000..b4a5165 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/activations_jit.py @@ -0,0 +1,90 @@ +""" Activations + +A collection of jit-scripted activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not +currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted +versions if they contain in-place ops. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul(x.sigmoid()) + + +@torch.jit.script +def mish_jit(x, _inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + return x.mul(F.softplus(x).tanh()) + + +class SwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishJit, self).__init__() + + def forward(self, x): + return swish_jit(x) + + +class MishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(MishJit, self).__init__() + + def forward(self, x): + return mish_jit(x) + + +@torch.jit.script +def hard_sigmoid_jit(x, inplace: bool = False): + # return F.relu6(x + 3.) / 6. + return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSigmoidJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidJit, self).__init__() + + def forward(self, x): + return hard_sigmoid_jit(x) + + +@torch.jit.script +def hard_swish_jit(x, inplace: bool = False): + # return x * (F.relu6(x + 3.) / 6) + return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishJit, self).__init__() + + def forward(self, x): + return hard_swish_jit(x) + + +@torch.jit.script +def hard_mish_jit(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishJit, self).__init__() + + def forward(self, x): + return hard_mish_jit(x) diff --git a/data_processing/MANIQA/timm/models/layers/activations_me.py b/data_processing/MANIQA/timm/models/layers/activations_me.py new file mode 100644 index 0000000..9a12bb7 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/activations_me.py @@ -0,0 +1,218 @@ +""" Activations (memory-efficient w/ custom autograd) + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +These activations are not compatible with jit scripting or ONNX export of the model, please use either +the JIT or basic versions of the activations. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit_fwd(x): + return x.mul(torch.sigmoid(x)) + + +@torch.jit.script +def swish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) + + +class SwishJitAutoFn(torch.autograd.Function): + """ torch.jit.script optimised Swish w/ memory-efficient checkpoint + Inspired by conversation btw Jeremy Howard & Adam Pazske + https://twitter.com/jeremyphoward/status/1188251041835315200 + """ + @staticmethod + def symbolic(g, x): + return g.op("Mul", x, g.op("Sigmoid", x)) + + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return swish_jit_bwd(x, grad_output) + + +def swish_me(x, inplace=False): + return SwishJitAutoFn.apply(x) + + +class SwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishMe, self).__init__() + + def forward(self, x): + return SwishJitAutoFn.apply(x) + + +@torch.jit.script +def mish_jit_fwd(x): + return x.mul(torch.tanh(F.softplus(x))) + + +@torch.jit.script +def mish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + x_tanh_sp = F.softplus(x).tanh() + return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) + + +class MishJitAutoFn(torch.autograd.Function): + """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + A memory efficient, jit scripted variant of Mish + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return mish_jit_bwd(x, grad_output) + + +def mish_me(x, inplace=False): + return MishJitAutoFn.apply(x) + + +class MishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(MishMe, self).__init__() + + def forward(self, x): + return MishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_sigmoid_jit_fwd(x, inplace: bool = False): + return (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_sigmoid_jit_bwd(x, grad_output): + m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. + return grad_output * m + + +class HardSigmoidJitAutoFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_sigmoid_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_sigmoid_jit_bwd(x, grad_output) + + +def hard_sigmoid_me(x, inplace: bool = False): + return HardSigmoidJitAutoFn.apply(x) + + +class HardSigmoidMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidMe, self).__init__() + + def forward(self, x): + return HardSigmoidJitAutoFn.apply(x) + + +@torch.jit.script +def hard_swish_jit_fwd(x): + return x * (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_swish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= 3.) + m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) + return grad_output * m + + +class HardSwishJitAutoFn(torch.autograd.Function): + """A memory efficient, jit-scripted HardSwish activation""" + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_swish_jit_bwd(x, grad_output) + + @staticmethod + def symbolic(g, self): + input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) + hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + return g.op("Mul", self, hardtanh_) + + +def hard_swish_me(x, inplace=False): + return HardSwishJitAutoFn.apply(x) + + +class HardSwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishMe, self).__init__() + + def forward(self, x): + return HardSwishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_mish_jit_fwd(x): + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +@torch.jit.script +def hard_mish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= -2.) + m = torch.where((x >= -2.) & (x <= 0.), x + 1., m) + return grad_output * m + + +class HardMishJitAutoFn(torch.autograd.Function): + """ A memory efficient, jit scripted variant of Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_mish_jit_bwd(x, grad_output) + + +def hard_mish_me(x, inplace: bool = False): + return HardMishJitAutoFn.apply(x) + + +class HardMishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishMe, self).__init__() + + def forward(self, x): + return HardMishJitAutoFn.apply(x) + + + diff --git a/data_processing/MANIQA/timm/models/layers/adaptive_avgmax_pool.py b/data_processing/MANIQA/timm/models/layers/adaptive_avgmax_pool.py new file mode 100644 index 0000000..ebc6ada --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/adaptive_avgmax_pool.py @@ -0,0 +1,118 @@ +""" PyTorch selectable adaptive pooling +Adaptive pooling with the ability to select the type of pooling from: + * 'avg' - Average pooling + * 'max' - Max pooling + * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 + * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim + +Both a functional and a nn.Module version of the pooling is provided. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def adaptive_pool_feat_mult(pool_type='avg'): + if pool_type == 'catavgmax': + return 2 + else: + return 1 + + +def adaptive_avgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return 0.5 * (x_avg + x_max) + + +def adaptive_catavgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return torch.cat((x_avg, x_max), 1) + + +def select_adaptive_pool2d(x, pool_type='avg', output_size=1): + """Selectable global pooling function with dynamic input kernel size + """ + if pool_type == 'avg': + x = F.adaptive_avg_pool2d(x, output_size) + elif pool_type == 'avgmax': + x = adaptive_avgmax_pool2d(x, output_size) + elif pool_type == 'catavgmax': + x = adaptive_catavgmax_pool2d(x, output_size) + elif pool_type == 'max': + x = F.adaptive_max_pool2d(x, output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + return x + + +class FastAdaptiveAvgPool2d(nn.Module): + def __init__(self, flatten=False): + super(FastAdaptiveAvgPool2d, self).__init__() + self.flatten = flatten + + def forward(self, x): + return x.mean((2, 3), keepdim=not self.flatten) + + +class AdaptiveAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_avgmax_pool2d(x, self.output_size) + + +class AdaptiveCatAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveCatAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_catavgmax_pool2d(x, self.output_size) + + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='fast', flatten=False): + super(SelectAdaptivePool2d, self).__init__() + self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing + self.flatten = nn.Flatten(1) if flatten else nn.Identity() + if pool_type == '': + self.pool = nn.Identity() # pass through + elif pool_type == 'fast': + assert output_size == 1 + self.pool = FastAdaptiveAvgPool2d(flatten) + self.flatten = nn.Identity() + elif pool_type == 'avg': + self.pool = nn.AdaptiveAvgPool2d(output_size) + elif pool_type == 'avgmax': + self.pool = AdaptiveAvgMaxPool2d(output_size) + elif pool_type == 'catavgmax': + self.pool = AdaptiveCatAvgMaxPool2d(output_size) + elif pool_type == 'max': + self.pool = nn.AdaptiveMaxPool2d(output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + + def is_identity(self): + return not self.pool_type + + def forward(self, x): + x = self.pool(x) + x = self.flatten(x) + return x + + def feat_mult(self): + return adaptive_pool_feat_mult(self.pool_type) + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'pool_type=' + self.pool_type \ + + ', flatten=' + str(self.flatten) + ')' + diff --git a/data_processing/MANIQA/timm/models/layers/attention_pool2d.py b/data_processing/MANIQA/timm/models/layers/attention_pool2d.py new file mode 100644 index 0000000..66e49b8 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/attention_pool2d.py @@ -0,0 +1,182 @@ +""" Attention Pool 2D + +Implementations of 2D spatial feature pooling using multi-head attention instead of average pool. + +Based on idea in CLIP by OpenAI, licensed Apache 2.0 +https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +from typing import List, Union, Tuple + +import torch +import torch.nn as nn + +from .helpers import to_2tuple +from .weight_init import trunc_normal_ + + +def rot(x): + return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) + + +def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): + return x * cos_emb + rot(x) * sin_emb + + +def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): + if isinstance(x, torch.Tensor): + x = [x] + return [t * cos_emb + rot(t) * sin_emb for t in x] + + +class RotaryEmbedding(nn.Module): + """ Rotary position embedding + + NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not + been well tested, and will likely change. It will be moved to its own file. + + The following impl/resources were referenced for this impl: + * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py + * https://blog.eleuther.ai/rotary-embeddings/ + """ + def __init__(self, dim, max_freq=4): + super().__init__() + self.dim = dim + self.register_buffer('bands', 2 ** torch.linspace(0., max_freq - 1, self.dim // 4), persistent=False) + + def get_embed(self, shape: torch.Size, device: torch.device = None, dtype: torch.dtype = None): + """ + NOTE: shape arg should include spatial dim only + """ + device = device or self.bands.device + dtype = dtype or self.bands.dtype + if not isinstance(shape, torch.Size): + shape = torch.Size(shape) + N = shape.numel() + grid = torch.stack(torch.meshgrid( + [torch.linspace(-1., 1., steps=s, device=device, dtype=dtype) for s in shape]), dim=-1).unsqueeze(-1) + emb = grid * math.pi * self.bands + sin = emb.sin().reshape(N, -1).repeat_interleave(2, -1) + cos = emb.cos().reshape(N, -1).repeat_interleave(2, -1) + return sin, cos + + def forward(self, x): + # assuming channel-first tensor where spatial dim are >= 2 + sin_emb, cos_emb = self.get_embed(x.shape[2:]) + return apply_rot_embed(x, sin_emb, cos_emb) + + +class RotAttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ rotary (relative) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed. + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from + train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW + """ + def __init__( + self, + in_features: int, + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + embed_dim = embed_dim or in_features + out_features = out_features or in_features + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + assert embed_dim % num_heads == 0 + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + self.pos_embed = RotaryEmbedding(self.head_dim) + + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + sin_emb, cos_emb = self.pos_embed.get_embed(x.shape[2:]) + x = x.reshape(B, -1, N).permute(0, 2, 1) + + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + + qc, q = q[:, :, :1], q[:, :, 1:] + q = apply_rot_embed(q, sin_emb, cos_emb) + q = torch.cat([qc, q], dim=2) + + kc, k = k[:, :, :1], k[:, :, 1:] + k = apply_rot_embed(k, sin_emb, cos_emb) + k = torch.cat([kc, k], dim=2) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] + + +class AttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ learned (absolute) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + It was based on impl in CLIP by OpenAI + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network. + """ + def __init__( + self, + in_features: int, + feat_size: Union[int, Tuple[int, int]], + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + + embed_dim = embed_dim or in_features + out_features = out_features or in_features + assert embed_dim % num_heads == 0 + self.feat_size = to_2tuple(feat_size) + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + + spatial_dim = self.feat_size[0] * self.feat_size[1] + self.pos_embed = nn.Parameter(torch.zeros(spatial_dim + 1, in_features)) + trunc_normal_(self.pos_embed, std=in_features ** -0.5) + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + assert self.feat_size[0] == H + assert self.feat_size[1] == W + x = x.reshape(B, -1, N).permute(0, 2, 1) + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + x = x + self.pos_embed.unsqueeze(0).to(x.dtype) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] diff --git a/data_processing/MANIQA/timm/models/layers/blur_pool.py b/data_processing/MANIQA/timm/models/layers/blur_pool.py new file mode 100644 index 0000000..ca4ce75 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/blur_pool.py @@ -0,0 +1,42 @@ +""" +BlurPool layer inspired by + - Kornia's Max_BlurPool2d + - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar` + +Hacked together by Chris Ha and Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from .padding import get_padding + + +class BlurPool2d(nn.Module): + r"""Creates a module that computes blurs and downsample a given feature map. + See :cite:`zhang2019shiftinvar` for more details. + Corresponds to the Downsample class, which does blurring and subsampling + + Args: + channels = Number of input channels + filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5. + stride (int): downsampling filter stride + + Returns: + torch.Tensor: the transformed tensor. + """ + def __init__(self, channels, filt_size=3, stride=2) -> None: + super(BlurPool2d, self).__init__() + assert filt_size > 1 + self.channels = channels + self.filt_size = filt_size + self.stride = stride + self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 + coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) + blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :].repeat(self.channels, 1, 1, 1) + self.register_buffer('filt', blur_filter, persistent=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = F.pad(x, self.padding, 'reflect') + return F.conv2d(x, self.filt, stride=self.stride, groups=x.shape[1]) diff --git a/data_processing/MANIQA/timm/models/layers/bottleneck_attn.py b/data_processing/MANIQA/timm/models/layers/bottleneck_attn.py new file mode 100644 index 0000000..c3db464 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/bottleneck_attn.py @@ -0,0 +1,157 @@ +""" Bottleneck Self Attention (Bottleneck Transformers) + +Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + +@misc{2101.11605, +Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, +Title = {Bottleneck Transformers for Visual Recognition}, +Year = {2021}, +} + +Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + +This impl is a WIP but given that it is based on the ref gist likely not too far off. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import to_2tuple, make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, heads, height, width, dim) + rel_k: (2 * width - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, 2 * W -1) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, W - 1]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) + x = x_pad[:, :W, W - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + """ + def __init__(self, feat_size, dim_head, scale): + super().__init__() + self.height, self.width = to_2tuple(feat_size) + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(B, self.height, self.width, -1) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, HW, HW) + return rel_logits + + +class BottleneckAttn(nn.Module): + """ Bottleneck Attention + Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + stride (int): output stride of the module, avg pool used if stride == 2 (default: 1). + num_heads (int): parallel attention heads (default: 4) + dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool): add bias to q, k, and v projections + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None, + qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False): + super().__init__() + assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + + self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias) + + # NOTE I'm only supporting relative pos embedding for now + self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.pos_embed.height, '') + _assert(W == self.pos_embed.width, '') + + x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W + + # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v + # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted. + q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1) + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2) + k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k + v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2) + + if self.scale_pos_embed: + attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W + else: + attn = (q @ k) * self.scale + self.pos_embed(q) + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W + out = self.pool(out) + return out diff --git a/data_processing/MANIQA/timm/models/layers/cbam.py b/data_processing/MANIQA/timm/models/layers/cbam.py new file mode 100644 index 0000000..bacf5cf --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/cbam.py @@ -0,0 +1,112 @@ +""" CBAM (sort-of) Attention + +Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521 + +WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on +some tasks, especially fine-grained it seems. I may end up removing this impl. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn +import torch.nn.functional as F + +from .conv_bn_act import ConvBnAct +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible + + +class ChannelAttn(nn.Module): + """ Original CBAM channel attention module, currently avg + max pool variant only. + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(ChannelAttn, self).__init__() + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) + self.act = act_layer(inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) + x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) + return x * self.gate(x_avg + x_max) + + +class LightChannelAttn(ChannelAttn): + """An experimental 'lightweight' that sums avg + max pool first + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightChannelAttn, self).__init__( + channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) + + def forward(self, x): + x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) + x_attn = self.fc2(self.act(self.fc1(x_pool))) + return x * F.sigmoid(x_attn) + + +class SpatialAttn(nn.Module): + """ Original CBAM spatial attention module + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(SpatialAttn, self).__init__() + self.conv = ConvBnAct(2, 1, kernel_size, act_layer=None) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class LightSpatialAttn(nn.Module): + """An experimental 'lightweight' variant that sums avg_pool and max_pool results. + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(LightSpatialAttn, self).__init__() + self.conv = ConvBnAct(1, 1, kernel_size, act_layer=None) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class CbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(CbamModule, self).__init__() + self.channel = ChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + + +class LightCbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightCbamModule, self).__init__() + self.channel = LightChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = LightSpatialAttn(spatial_kernel_size) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + diff --git a/data_processing/MANIQA/timm/models/layers/classifier.py b/data_processing/MANIQA/timm/models/layers/classifier.py new file mode 100644 index 0000000..798748d --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/classifier.py @@ -0,0 +1,54 @@ +""" Classifier head and layer factory + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn +from torch.nn import functional as F + +from .adaptive_avgmax_pool import SelectAdaptivePool2d + + +def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False): + flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling + if not pool_type: + assert num_classes == 0 or use_conv,\ + 'Pooling can only be disabled if classifier is also removed or conv classifier is used' + flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling) + global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten_in_pool) + num_pooled_features = num_features * global_pool.feat_mult() + return global_pool, num_pooled_features + + +def _create_fc(num_features, num_classes, use_conv=False): + if num_classes <= 0: + fc = nn.Identity() # pass-through (no classifier) + elif use_conv: + fc = nn.Conv2d(num_features, num_classes, 1, bias=True) + else: + fc = nn.Linear(num_features, num_classes, bias=True) + return fc + + +def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False): + global_pool, num_pooled_features = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv) + fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + return global_pool, fc + + +class ClassifierHead(nn.Module): + """Classifier head w/ configurable global pooling and dropout.""" + + def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False): + super(ClassifierHead, self).__init__() + self.drop_rate = drop_rate + self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv) + self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity() + + def forward(self, x): + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + x = self.fc(x) + x = self.flatten(x) + return x diff --git a/data_processing/MANIQA/timm/models/layers/cond_conv2d.py b/data_processing/MANIQA/timm/models/layers/cond_conv2d.py new file mode 100644 index 0000000..8b4bbca --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/cond_conv2d.py @@ -0,0 +1,122 @@ +""" PyTorch Conditionally Parameterized Convolution (CondConv) + +Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference +(https://arxiv.org/abs/1904.04971) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import math +from functools import partial +import numpy as np +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .helpers import to_2tuple +from .conv2d_same import conv2d_same +from .padding import get_padding_value + + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + + +class CondConv2d(nn.Module): + """ Conditionally Parameterized Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = to_2tuple(padding_val) + self.dilation = to_2tuple(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + x = x.view(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out diff --git a/data_processing/MANIQA/timm/models/layers/config.py b/data_processing/MANIQA/timm/models/layers/config.py new file mode 100644 index 0000000..f07b9d7 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/config.py @@ -0,0 +1,115 @@ +""" Model / Layer Config singleton state +""" +from typing import Any, Optional + +__all__ = [ + 'is_exportable', 'is_scriptable', 'is_no_jit', + 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config' +] + +# Set to True if prefer to have layers with no jit optimization (includes activations) +_NO_JIT = False + +# Set to True if prefer to have activation layers with no jit optimization +# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying +# the jit flags so far are activations. This will change as more layers are updated and/or added. +_NO_ACTIVATION_JIT = False + +# Set to True if exporting a model with Same padding via ONNX +_EXPORTABLE = False + +# Set to True if wanting to use torch.jit.script on a model +_SCRIPTABLE = False + + +def is_no_jit(): + return _NO_JIT + + +class set_no_jit: + def __init__(self, mode: bool) -> None: + global _NO_JIT + self.prev = _NO_JIT + _NO_JIT = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _NO_JIT + _NO_JIT = self.prev + return False + + +def is_exportable(): + return _EXPORTABLE + + +class set_exportable: + def __init__(self, mode: bool) -> None: + global _EXPORTABLE + self.prev = _EXPORTABLE + _EXPORTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _EXPORTABLE + _EXPORTABLE = self.prev + return False + + +def is_scriptable(): + return _SCRIPTABLE + + +class set_scriptable: + def __init__(self, mode: bool) -> None: + global _SCRIPTABLE + self.prev = _SCRIPTABLE + _SCRIPTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + _SCRIPTABLE = self.prev + return False + + +class set_layer_config: + """ Layer config context manager that allows setting all layer config flags at once. + If a flag arg is None, it will not change the current value. + """ + def __init__( + self, + scriptable: Optional[bool] = None, + exportable: Optional[bool] = None, + no_jit: Optional[bool] = None, + no_activation_jit: Optional[bool] = None): + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT + if scriptable is not None: + _SCRIPTABLE = scriptable + if exportable is not None: + _EXPORTABLE = exportable + if no_jit is not None: + _NO_JIT = no_jit + if no_activation_jit is not None: + _NO_ACTIVATION_JIT = no_activation_jit + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev + return False diff --git a/data_processing/MANIQA/timm/models/layers/conv2d_same.py b/data_processing/MANIQA/timm/models/layers/conv2d_same.py new file mode 100644 index 0000000..75f0f98 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/conv2d_same.py @@ -0,0 +1,42 @@ +""" Conv2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple, Optional + +from .padding import pad_same, get_padding_value + + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + + +class Conv2dSame(nn.Conv2d): + """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2dSame, self).__init__( + in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) + + def forward(self, x): + return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + + diff --git a/data_processing/MANIQA/timm/models/layers/conv_bn_act.py b/data_processing/MANIQA/timm/models/layers/conv_bn_act.py new file mode 100644 index 0000000..33005c3 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/conv_bn_act.py @@ -0,0 +1,40 @@ +""" Conv2d + BN + Act + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import convert_norm_act + + +class ConvBnAct(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, + bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, aa_layer=None, + drop_block=None): + super(ConvBnAct, self).__init__() + use_aa = aa_layer is not None + + self.conv = create_conv2d( + in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + + # NOTE for backwards compatibility with models that use separate norm and act layer definitions + norm_act_layer = convert_norm_act(norm_layer, act_layer) + self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block) + self.aa = aa_layer(channels=out_channels) if stride == 2 and use_aa else None + + @property + def in_channels(self): + return self.conv.in_channels + + @property + def out_channels(self): + return self.conv.out_channels + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.aa is not None: + x = self.aa(x) + return x diff --git a/data_processing/MANIQA/timm/models/layers/create_act.py b/data_processing/MANIQA/timm/models/layers/create_act.py new file mode 100644 index 0000000..aa55769 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/create_act.py @@ -0,0 +1,153 @@ +""" Activation Factory +Hacked together by / Copyright 2020 Ross Wightman +""" +from typing import Union, Callable, Type + +from .activations import * +from .activations_jit import * +from .activations_me import * +from .config import is_exportable, is_scriptable, is_no_jit + +# PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7. +# Also hardsigmoid, hardswish, and soon mish. This code will use native version if present. +# Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used. +_has_silu = 'silu' in dir(torch.nn.functional) +_has_hardswish = 'hardswish' in dir(torch.nn.functional) +_has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional) +_has_mish = 'mish' in dir(torch.nn.functional) + + +_ACT_FN_DEFAULT = dict( + silu=F.silu if _has_silu else swish, + swish=F.silu if _has_silu else swish, + mish=F.mish if _has_mish else mish, + relu=F.relu, + relu6=F.relu6, + leaky_relu=F.leaky_relu, + elu=F.elu, + celu=F.celu, + selu=F.selu, + gelu=gelu, + sigmoid=sigmoid, + tanh=tanh, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid, + hard_swish=F.hardswish if _has_hardswish else hard_swish, + hard_mish=hard_mish, +) + +_ACT_FN_JIT = dict( + silu=F.silu if _has_silu else swish_jit, + swish=F.silu if _has_silu else swish_jit, + mish=F.mish if _has_mish else mish_jit, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_jit, + hard_swish=F.hardswish if _has_hardswish else hard_swish_jit, + hard_mish=hard_mish_jit +) + +_ACT_FN_ME = dict( + silu=F.silu if _has_silu else swish_me, + swish=F.silu if _has_silu else swish_me, + mish=F.mish if _has_mish else mish_me, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me, + hard_swish=F.hardswish if _has_hardswish else hard_swish_me, + hard_mish=hard_mish_me, +) + +_ACT_FNS = (_ACT_FN_ME, _ACT_FN_JIT, _ACT_FN_DEFAULT) +for a in _ACT_FNS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +_ACT_LAYER_DEFAULT = dict( + silu=nn.SiLU if _has_silu else Swish, + swish=nn.SiLU if _has_silu else Swish, + mish=nn.Mish if _has_mish else Mish, + relu=nn.ReLU, + relu6=nn.ReLU6, + leaky_relu=nn.LeakyReLU, + elu=nn.ELU, + prelu=PReLU, + celu=nn.CELU, + selu=nn.SELU, + gelu=GELU, + sigmoid=Sigmoid, + tanh=Tanh, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid, + hard_swish=nn.Hardswish if _has_hardswish else HardSwish, + hard_mish=HardMish, +) + +_ACT_LAYER_JIT = dict( + silu=nn.SiLU if _has_silu else SwishJit, + swish=nn.SiLU if _has_silu else SwishJit, + mish=nn.Mish if _has_mish else MishJit, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidJit, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishJit, + hard_mish=HardMishJit +) + +_ACT_LAYER_ME = dict( + silu=nn.SiLU if _has_silu else SwishMe, + swish=nn.SiLU if _has_silu else SwishMe, + mish=nn.Mish if _has_mish else MishMe, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe, + hard_mish=HardMishMe, +) + +_ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_JIT, _ACT_LAYER_DEFAULT) +for a in _ACT_LAYERS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +def get_act_fn(name: Union[Callable, str] = 'relu'): + """ Activation Function Factory + Fetching activation fns by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if isinstance(name, Callable): + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + # If not exporting or scripting the model, first look for a memory-efficient version with + # custom autograd, then fallback + if name in _ACT_FN_ME: + return _ACT_FN_ME[name] + if is_exportable() and name in ('silu', 'swish'): + # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack + return swish + if not (is_no_jit() or is_exportable()): + if name in _ACT_FN_JIT: + return _ACT_FN_JIT[name] + return _ACT_FN_DEFAULT[name] + + +def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'): + """ Activation Layer Factory + Fetching activation layers by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if isinstance(name, type): + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + if name in _ACT_LAYER_ME: + return _ACT_LAYER_ME[name] + if is_exportable() and name in ('silu', 'swish'): + # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack + return Swish + if not (is_no_jit() or is_exportable()): + if name in _ACT_LAYER_JIT: + return _ACT_LAYER_JIT[name] + return _ACT_LAYER_DEFAULT[name] + + +def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs): + act_layer = get_act_layer(name) + if act_layer is None: + return None + return act_layer(**kwargs) if inplace is None else act_layer(inplace=inplace, **kwargs) diff --git a/data_processing/MANIQA/timm/models/layers/create_attn.py b/data_processing/MANIQA/timm/models/layers/create_attn.py new file mode 100644 index 0000000..028c0f7 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/create_attn.py @@ -0,0 +1,89 @@ +""" Attention Factory + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from functools import partial + +from .bottleneck_attn import BottleneckAttn +from .cbam import CbamModule, LightCbamModule +from .eca import EcaModule, CecaModule +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .halo_attn import HaloAttn +from .lambda_layer import LambdaLayer +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .selective_kernel import SelectiveKernel +from .split_attn import SplitAttn +from .squeeze_excite import SEModule, EffectiveSEModule + + +def get_attn(attn_type): + if isinstance(attn_type, torch.nn.Module): + return attn_type + module_cls = None + if attn_type is not None: + if isinstance(attn_type, str): + attn_type = attn_type.lower() + # Lightweight attention modules (channel and/or coarse spatial). + # Typically added to existing network architecture blocks in addition to existing convolutions. + if attn_type == 'se': + module_cls = SEModule + elif attn_type == 'ese': + module_cls = EffectiveSEModule + elif attn_type == 'eca': + module_cls = EcaModule + elif attn_type == 'ecam': + module_cls = partial(EcaModule, use_mlp=True) + elif attn_type == 'ceca': + module_cls = CecaModule + elif attn_type == 'ge': + module_cls = GatherExcite + elif attn_type == 'gc': + module_cls = GlobalContext + elif attn_type == 'gca': + module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) + elif attn_type == 'cbam': + module_cls = CbamModule + elif attn_type == 'lcbam': + module_cls = LightCbamModule + + # Attention / attention-like modules w/ significant params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'sk': + module_cls = SelectiveKernel + elif attn_type == 'splat': + module_cls = SplitAttn + + # Self-attention / attention-like modules w/ significant compute and/or params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'lambda': + return LambdaLayer + elif attn_type == 'bottleneck': + return BottleneckAttn + elif attn_type == 'halo': + return HaloAttn + elif attn_type == 'nl': + module_cls = NonLocalAttn + elif attn_type == 'bat': + module_cls = BatNonLocalAttn + + # Woops! + else: + assert False, "Invalid attn module (%s)" % attn_type + elif isinstance(attn_type, bool): + if attn_type: + module_cls = SEModule + else: + module_cls = attn_type + return module_cls + + +def create_attn(attn_type, channels, **kwargs): + module_cls = get_attn(attn_type) + if module_cls is not None: + # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels + return module_cls(channels, **kwargs) + return None diff --git a/data_processing/MANIQA/timm/models/layers/create_conv2d.py b/data_processing/MANIQA/timm/models/layers/create_conv2d.py new file mode 100644 index 0000000..3a0cc03 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/create_conv2d.py @@ -0,0 +1,31 @@ +""" Create Conv2d Factory Method + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from .mixed_conv2d import MixedConv2d +from .cond_conv2d import CondConv2d +from .conv2d_same import create_conv2d_pad + + +def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + assert 'groups' not in kwargs # MixedConv groups are defined by kernel list + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 + groups = in_channels if depthwise else kwargs.pop('groups', 1) + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + return m diff --git a/data_processing/MANIQA/timm/models/layers/create_norm_act.py b/data_processing/MANIQA/timm/models/layers/create_norm_act.py new file mode 100644 index 0000000..5b56294 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/create_norm_act.py @@ -0,0 +1,83 @@ +""" NormAct (Normalizaiton + Activation Layer) Factory + +Create norm + act combo modules that attempt to be backwards compatible with separate norm + act +isntances in models. Where these are used it will be possible to swap separate BN + act layers with +combined modules like IABN or EvoNorms. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import types +import functools + +import torch +import torch.nn as nn + +from .evo_norm import EvoNormBatch2d, EvoNormSample2d +from .norm_act import BatchNormAct2d, GroupNormAct +from .inplace_abn import InplaceAbn + +_NORM_ACT_TYPES = {BatchNormAct2d, GroupNormAct, EvoNormBatch2d, EvoNormSample2d, InplaceAbn} +_NORM_ACT_REQUIRES_ARG = {BatchNormAct2d, GroupNormAct, InplaceAbn} # requires act_layer arg to define act type + + +def get_norm_act_layer(layer_class): + layer_class = layer_class.replace('_', '').lower() + if layer_class.startswith("batchnorm"): + layer = BatchNormAct2d + elif layer_class.startswith("groupnorm"): + layer = GroupNormAct + elif layer_class == "evonormbatch": + layer = EvoNormBatch2d + elif layer_class == "evonormsample": + layer = EvoNormSample2d + elif layer_class == "iabn" or layer_class == "inplaceabn": + layer = InplaceAbn + else: + assert False, "Invalid norm_act layer (%s)" % layer_class + return layer + + +def create_norm_act(layer_type, num_features, apply_act=True, jit=False, **kwargs): + layer_parts = layer_type.split('-') # e.g. batchnorm-leaky_relu + assert len(layer_parts) in (1, 2) + layer = get_norm_act_layer(layer_parts[0]) + #activation_class = layer_parts[1].lower() if len(layer_parts) > 1 else '' # FIXME support string act selection? + layer_instance = layer(num_features, apply_act=apply_act, **kwargs) + if jit: + layer_instance = torch.jit.script(layer_instance) + return layer_instance + + +def convert_norm_act(norm_layer, act_layer): + assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) + assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) + norm_act_kwargs = {} + + # unbind partial fn, so args can be rebound later + if isinstance(norm_layer, functools.partial): + norm_act_kwargs.update(norm_layer.keywords) + norm_layer = norm_layer.func + + if isinstance(norm_layer, str): + norm_act_layer = get_norm_act_layer(norm_layer) + elif norm_layer in _NORM_ACT_TYPES: + norm_act_layer = norm_layer + elif isinstance(norm_layer, types.FunctionType): + # if function type, must be a lambda/fn that creates a norm_act layer + norm_act_layer = norm_layer + else: + type_name = norm_layer.__name__.lower() + if type_name.startswith('batchnorm'): + norm_act_layer = BatchNormAct2d + elif type_name.startswith('groupnorm'): + norm_act_layer = GroupNormAct + else: + assert False, f"No equivalent norm_act layer for {type_name}" + + if norm_act_layer in _NORM_ACT_REQUIRES_ARG: + # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. + # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types + norm_act_kwargs.setdefault('act_layer', act_layer) + if norm_act_kwargs: + norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args + return norm_act_layer diff --git a/data_processing/MANIQA/timm/models/layers/drop.py b/data_processing/MANIQA/timm/models/layers/drop.py new file mode 100644 index 0000000..90c1933 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/drop.py @@ -0,0 +1,164 @@ +""" DropBlock, DropPath + +PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. + +Papers: +DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) + +Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) + +Code: +DropBlock impl inspired by two Tensorflow impl that I liked: + - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 + - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def drop_block_2d( + x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, + with_noise: bool = False, inplace: bool = False, batchwise: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. This layer has been tested on a few training + runs with success, but needs further validation and possibly optimization for lower runtime impact. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + # seed_drop_rate, the gamma parameter + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + # Forces the block to be inside the feature map. + w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device)) + valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ + ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) + valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) + + if batchwise: + # one mask for whole batch, quite a bit faster + uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) + else: + uniform_noise = torch.rand_like(x) + block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) + block_mask = -F.max_pool2d( + -block_mask, + kernel_size=clipped_block_size, # block_size, + stride=1, + padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) + else: + x = x * block_mask + normal_noise * (1 - block_mask) + else: + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +def drop_block_fast_2d( + x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, + gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid + block mask at edges. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + block_mask = torch.empty_like(x).bernoulli_(gamma) + block_mask = F.max_pool2d( + block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.empty_like(x).normal_() + if inplace: + x.mul_(1. - block_mask).add_(normal_noise * block_mask) + else: + x = x * (1. - block_mask) + normal_noise * block_mask + else: + block_mask = 1 - block_mask + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +class DropBlock2d(nn.Module): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + """ + def __init__(self, + drop_prob=0.1, + block_size=7, + gamma_scale=1.0, + with_noise=False, + inplace=False, + batchwise=False, + fast=True): + super(DropBlock2d, self).__init__() + self.drop_prob = drop_prob + self.gamma_scale = gamma_scale + self.block_size = block_size + self.with_noise = with_noise + self.inplace = inplace + self.batchwise = batchwise + self.fast = fast # FIXME finish comparisons of fast vs not + + def forward(self, x): + if not self.training or not self.drop_prob: + return x + if self.fast: + return drop_block_fast_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace) + else: + return drop_block_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + + +def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None, scale_by_keep=True): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) diff --git a/data_processing/MANIQA/timm/models/layers/eca.py b/data_processing/MANIQA/timm/models/layers/eca.py new file mode 100644 index 0000000..e29be6a --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/eca.py @@ -0,0 +1,145 @@ +""" +ECA module from ECAnet + +paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks +https://arxiv.org/abs/1910.03151 + +Original ECA model borrowed from https://github.com/BangguWu/ECANet + +Modified circular ECA implementation and adaption for use in timm package +by Chris Ha https://github.com/VRandme + +Original License: + +MIT License + +Copyright (c) 2019 BangguWu, Qilong Wang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +import math +from torch import nn +import torch.nn.functional as F + + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class EcaModule(nn.Module): + """Constructs an ECA module. + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + def __init__( + self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', + rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False): + super(EcaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + assert kernel_size % 2 == 1 + padding = (kernel_size - 1) // 2 + if use_mlp: + # NOTE 'mlp' mode is a timm experiment, not in paper + assert channels is not None + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) + act_layer = act_layer or nn.ReLU + self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) + self.act = create_act_layer(act_layer) + self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) + else: + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.act = None + self.conv2 = None + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv + y = self.conv(y) + if self.conv2 is not None: + y = self.act(y) + y = self.conv2(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +EfficientChannelAttn = EcaModule # alias + + +class CecaModule(nn.Module): + """Constructs a circular ECA module. + + ECA module where the conv uses circular padding rather than zero padding. + Unlike the spatial dimension, the channels do not have inherent ordering nor + locality. Although this module in essence, applies such an assumption, it is unnecessary + to limit the channels on either "edge" from being circularly adapted to each other. + This will fundamentally increase connectivity and possibly increase performance metrics + (accuracy, robustness), without significantly impacting resource metrics + (parameter size, throughput,latency, etc) + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + + def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): + super(CecaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + has_act = act_layer is not None + assert kernel_size % 2 == 1 + + # PyTorch circular padding mode is buggy as of pytorch 1.4 + # see https://github.com/pytorch/pytorch/pull/17240 + # implement manual circular padding + self.padding = (kernel_size - 1) // 2 + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) + # Manually implement circular padding, F.pad does not seemed to be bugged + y = F.pad(y, (self.padding, self.padding), mode='circular') + y = self.conv(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +CircularEfficientChannelAttn = CecaModule diff --git a/data_processing/MANIQA/timm/models/layers/evo_norm.py b/data_processing/MANIQA/timm/models/layers/evo_norm.py new file mode 100644 index 0000000..6ef0c88 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/evo_norm.py @@ -0,0 +1,81 @@ +"""EvoNormB0 (Batched) and EvoNormS0 (Sample) in PyTorch + +An attempt at getting decent performing EvoNorms running in PyTorch. +While currently faster than other impl, still quite a ways off the built-in BN +in terms of memory usage and throughput (roughly 5x mem, 1/2 - 1/3x speed). + +Still very much a WIP, fiddling with buffer usage, in-place/jit optimizations, and layouts. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +import torch.nn as nn + +from .trace_utils import _assert + + +class EvoNormBatch2d(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, drop_block=None): + super(EvoNormBatch2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features), requires_grad=True) + self.bias = nn.Parameter(torch.zeros(num_features), requires_grad=True) + self.v = nn.Parameter(torch.ones(num_features), requires_grad=True) if apply_act else None + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.apply_act: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_type = x.dtype + if self.v is not None: + running_var = self.running_var.view(1, -1, 1, 1) + if self.training: + var = x.var(dim=(0, 2, 3), unbiased=False, keepdim=True) + n = x.numel() / x.shape[1] + running_var = var.detach() * self.momentum * (n / (n - 1)) + running_var * (1 - self.momentum) + self.running_var.copy_(running_var.view(self.running_var.shape)) + else: + var = running_var + v = self.v.to(dtype=x_type).reshape(1, -1, 1, 1) + d = x * v + (x.var(dim=(2, 3), unbiased=False, keepdim=True) + self.eps).sqrt().to(dtype=x_type) + d = d.max((var + self.eps).sqrt().to(dtype=x_type)) + x = x / d + return x * self.weight.view(1, -1, 1, 1) + self.bias.view(1, -1, 1, 1) + + +class EvoNormSample2d(nn.Module): + def __init__(self, num_features, apply_act=True, groups=32, eps=1e-5, drop_block=None): + super(EvoNormSample2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.groups = groups + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features), requires_grad=True) + self.bias = nn.Parameter(torch.zeros(num_features), requires_grad=True) + self.v = nn.Parameter(torch.ones(num_features), requires_grad=True) if apply_act else None + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.apply_act: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + B, C, H, W = x.shape + _assert(C % self.groups == 0, '') + if self.v is not None: + n = x * (x * self.v.view(1, -1, 1, 1)).sigmoid() + x = x.reshape(B, self.groups, -1) + x = n.reshape(B, self.groups, -1) / (x.var(dim=-1, unbiased=False, keepdim=True) + self.eps).sqrt() + x = x.reshape(B, C, H, W) + return x * self.weight.view(1, -1, 1, 1) + self.bias.view(1, -1, 1, 1) diff --git a/data_processing/MANIQA/timm/models/layers/gather_excite.py b/data_processing/MANIQA/timm/models/layers/gather_excite.py new file mode 100644 index 0000000..2d60dc9 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/gather_excite.py @@ -0,0 +1,90 @@ +""" Gather-Excite Attention Block + +Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348 + +Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet + +I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another +impl that covers all of the cases. + +NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math + +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .create_conv2d import create_conv2d +from .helpers import make_divisible +from .mlp import ConvMlp + + +class GatherExcite(nn.Module): + """ Gather-Excite Attention Module + """ + def __init__( + self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True, + rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'): + super(GatherExcite, self).__init__() + self.add_maxpool = add_maxpool + act_layer = get_act_layer(act_layer) + self.extent = extent + if extra_params: + self.gather = nn.Sequential() + if extent == 0: + assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params' + self.gather.add_module( + 'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm1', nn.BatchNorm2d(channels)) + else: + assert extent % 2 == 0 + num_conv = int(math.log2(extent)) + for i in range(num_conv): + self.gather.add_module( + f'conv{i + 1}', + create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels)) + if i != num_conv - 1: + self.gather.add_module(f'act{i + 1}', act_layer(inplace=True)) + else: + self.gather = None + if self.extent == 0: + self.gk = 0 + self.gs = 0 + else: + assert extent % 2 == 0 + self.gk = self.extent * 2 - 1 + self.gs = self.extent + + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity() + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + size = x.shape[-2:] + if self.gather is not None: + x_ge = self.gather(x) + else: + if self.extent == 0: + # global extent + x_ge = x.mean(dim=(2, 3), keepdims=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True) + else: + x_ge = F.avg_pool2d( + x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2) + x_ge = self.mlp(x_ge) + if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1: + x_ge = F.interpolate(x_ge, size=size) + return x * self.gate(x_ge) diff --git a/data_processing/MANIQA/timm/models/layers/global_context.py b/data_processing/MANIQA/timm/models/layers/global_context.py new file mode 100644 index 0000000..de7fb5c --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/global_context.py @@ -0,0 +1,67 @@ +""" Global Context Attention Block + +Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` + - https://arxiv.org/abs/1904.11492 + +Official code consulted as reference: https://github.com/xvjiarui/GCNet + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible +from .mlp import ConvMlp +from .norm import LayerNorm2d + + +class GlobalContext(nn.Module): + + def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, + rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): + super(GlobalContext, self).__init__() + act_layer = get_act_layer(act_layer) + + self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None + + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + if fuse_add: + self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_add = None + if fuse_scale: + self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_scale = None + + self.gate = create_act_layer(gate_layer) + self.init_last_zero = init_last_zero + self.reset_parameters() + + def reset_parameters(self): + if self.conv_attn is not None: + nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') + if self.mlp_add is not None: + nn.init.zeros_(self.mlp_add.fc2.weight) + + def forward(self, x): + B, C, H, W = x.shape + + if self.conv_attn is not None: + attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) + attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) + context = x.reshape(B, C, H * W).unsqueeze(1) @ attn + context = context.view(B, C, 1, 1) + else: + context = x.mean(dim=(2, 3), keepdim=True) + + if self.mlp_scale is not None: + mlp_x = self.mlp_scale(context) + x = x * self.gate(mlp_x) + if self.mlp_add is not None: + mlp_x = self.mlp_add(context) + x = x + mlp_x + + return x diff --git a/data_processing/MANIQA/timm/models/layers/halo_attn.py b/data_processing/MANIQA/timm/models/layers/halo_attn.py new file mode 100644 index 0000000..f2ac64f --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/halo_attn.py @@ -0,0 +1,233 @@ +""" Halo Self Attention + +Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + +@misc{2103.12731, +Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and + Jonathon Shlens}, +Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones}, +Year = {2021}, +} + +Status: +This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me. +The attention mechanism works but it's slow as implemented. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, height, width, dim) + rel_k: (2 * window - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + rel_size = rel_k.shape[0] + win_size = (rel_size + 1) // 2 + + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, rel_size) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, rel_size - W]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, rel_size) + x = x_pad[:, :W, win_size - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + """ + def __init__(self, block_size, win_size, dim_head, scale): + """ + Args: + block_size (int): block size + win_size (int): neighbourhood window size + dim_head (int): attention head dim + scale (float): scale factor (for init) + """ + super().__init__() + self.block_size = block_size + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, BB, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, BB, HW, -1) + return rel_logits + + +class HaloAttn(nn.Module): + """ Halo Attention + + Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda) + stride: output stride of the module, query downscaled if > 1 (default: 1). + num_heads: parallel attention heads (default: 8). + dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + block_size (int): size of blocks. (default: 8) + halo_size (int): size of halo overlap. (default: 3) + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool) : add bias to q, k, and v projections + avg_down (bool): use average pool downsample instead of strided query blocks + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, + qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + assert stride in (1, 2) + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + self.block_size = self.block_size_ds = block_size + self.halo_size = halo_size + self.win_size = block_size + halo_size * 2 # neighbourhood window size + self.block_stride = 1 + use_avg_pool = False + if stride > 1: + use_avg_pool = avg_down or block_size % stride != 0 + self.block_stride = 1 if use_avg_pool else stride + self.block_size_ds = self.block_size // self.block_stride + + # FIXME not clear if this stride behaviour is what the paper intended + # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving + # data in unfolded block form. I haven't wrapped my head around how that'd look. + self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias) + self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias) + + self.pos_embed = PosEmbedRel( + block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + std = self.q.weight.shape[1] ** -0.5 # fan-in + trunc_normal_(self.q.weight, std=std) + trunc_normal_(self.kv.weight, std=std) + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H % self.block_size == 0, '') + _assert(W % self.block_size == 0, '') + num_h_blocks = H // self.block_size + num_w_blocks = W // self.block_size + num_blocks = num_h_blocks * num_w_blocks + + q = self.q(x) + # unfold + q = q.reshape( + -1, self.dim_head_qk, + num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4) + # B, num_heads * dim_head * block_size ** 2, num_blocks + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3) + # B * num_heads, num_blocks, block_size ** 2, dim_head + + kv = self.kv(x) + # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not + # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach. + # FIXME figure out how to switch impl between this and conv2d if XLA being used. + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) + kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1) + k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1) + # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v + + if self.scale_pos_embed: + attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale + else: + attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q) + # B * num_heads, num_blocks, block_size ** 2, win_size ** 2 + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks + # fold + out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks) + out = out.permute(0, 3, 1, 4, 2).contiguous().view( + B, self.dim_out_v, H // self.block_stride, W // self.block_stride) + # B, dim_out, H // block_stride, W // block_stride + out = self.pool(out) + return out + + +""" Three alternatives for overlapping windows. + +`.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold() + + if is_xla: + # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is + # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment. + WW = self.win_size ** 2 + pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size) + kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size) + elif self.stride_tricks: + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous() + kv = kv.as_strided(( + B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks), + stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size)) + else: + kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size) + + kv = kv.reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3) +""" diff --git a/data_processing/MANIQA/timm/models/layers/helpers.py b/data_processing/MANIQA/timm/models/layers/helpers.py new file mode 100644 index 0000000..cc54ca7 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/helpers.py @@ -0,0 +1,31 @@ +""" Layer/Module Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +from itertools import repeat +import collections.abc + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def make_divisible(v, divisor=8, min_value=None, round_limit=.9): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < round_limit * v: + new_v += divisor + return new_v diff --git a/data_processing/MANIQA/timm/models/layers/inplace_abn.py b/data_processing/MANIQA/timm/models/layers/inplace_abn.py new file mode 100644 index 0000000..3aae7cf --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/inplace_abn.py @@ -0,0 +1,87 @@ +import torch +from torch import nn as nn + +try: + from inplace_abn.functions import inplace_abn, inplace_abn_sync + has_iabn = True +except ImportError: + has_iabn = False + + def inplace_abn(x, weight, bias, running_mean, running_var, + training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01): + raise ImportError( + "Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.12'") + + def inplace_abn_sync(**kwargs): + inplace_abn(**kwargs) + + +class InplaceAbn(nn.Module): + """Activated Batch Normalization + + This gathers a BatchNorm and an activation function in a single module + + Parameters + ---------- + num_features : int + Number of feature channels in the input and output. + eps : float + Small constant to prevent numerical issues. + momentum : float + Momentum factor applied to compute running statistics. + affine : bool + If `True` apply learned scale and shift transformation after normalization. + act_layer : str or nn.Module type + Name or type of the activation functions, one of: `leaky_relu`, `elu` + act_param : float + Negative slope for the `leaky_relu` activation. + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True, + act_layer="leaky_relu", act_param=0.01, drop_block=None): + super(InplaceAbn, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + self.momentum = momentum + if apply_act: + if isinstance(act_layer, str): + assert act_layer in ('leaky_relu', 'elu', 'identity', '') + self.act_name = act_layer if act_layer else 'identity' + else: + # convert act layer passed as type to string + if act_layer == nn.ELU: + self.act_name = 'elu' + elif act_layer == nn.LeakyReLU: + self.act_name = 'leaky_relu' + elif act_layer == nn.Identity: + self.act_name = 'identity' + else: + assert False, f'Invalid act layer {act_layer.__name__} for IABN' + else: + self.act_name = 'identity' + self.act_param = act_param + if self.affine: + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.constant_(self.running_mean, 0) + nn.init.constant_(self.running_var, 1) + if self.affine: + nn.init.constant_(self.weight, 1) + nn.init.constant_(self.bias, 0) + + def forward(self, x): + output = inplace_abn( + x, self.weight, self.bias, self.running_mean, self.running_var, + self.training, self.momentum, self.eps, self.act_name, self.act_param) + if isinstance(output, tuple): + output = output[0] + return output diff --git a/data_processing/MANIQA/timm/models/layers/lambda_layer.py b/data_processing/MANIQA/timm/models/layers/lambda_layer.py new file mode 100644 index 0000000..e50b43c --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/lambda_layer.py @@ -0,0 +1,133 @@ +""" Lambda Layer + +Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + +@misc{2102.08602, +Author = {Irwan Bello}, +Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, +Year = {2021}, +} + +Status: +This impl is a WIP. Code snippets in the paper were used as reference but +good chance some details are missing/wrong. + +I've only implemented local lambda conv based pos embeddings. + +For a PyTorch impl that includes other embedding options checkout +https://github.com/lucidrains/lambda-networks + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import to_2tuple, make_divisible +from .weight_init import trunc_normal_ + + +def rel_pos_indices(size): + size = to_2tuple(size) + pos = torch.stack(torch.meshgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1) + rel_pos = pos[:, None, :] - pos[:, :, None] + rel_pos[0] += size[0] - 1 + rel_pos[1] += size[1] - 1 + return rel_pos # 2, H * W, H * W + + +class LambdaLayer(nn.Module): + """Lambda Layer + + Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + + NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add. + + The internal dimensions of the lambda module are controlled via the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query (q) and key (k) dimension are determined by + * dim_head = (dim_out * attn_ratio // num_heads) if dim_head is None + * q = num_heads * dim_head, k = dim_head + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not set + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + feat_size (Tuple[int, int]): size of input feature_map for relative pos variant H, W + stride (int): output stride of the module, avg pool used if stride == 2 + num_heads (int): parallel attention heads. + dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + r (int): local lambda convolution radius. Use lambda conv if set, else relative pos if not. (default: 9) + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool): add bias to q, k, and v projections + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9, + qk_ratio=1.0, qkv_bias=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0, ' should be divided by num_heads' + self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.num_heads = num_heads + self.dim_v = dim_out // num_heads + + self.qkv = nn.Conv2d( + dim, + num_heads * self.dim_qk + self.dim_qk + self.dim_v, + kernel_size=1, bias=qkv_bias) + self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk) + self.norm_v = nn.BatchNorm2d(self.dim_v) + + if r is not None: + # local lambda convolution for pos + self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0)) + self.pos_emb = None + self.rel_pos_indices = None + else: + # relative pos embedding + assert feat_size is not None + feat_size = to_2tuple(feat_size) + rel_size = [2 * s - 1 for s in feat_size] + self.conv_lambda = None + self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk)) + self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in + if self.conv_lambda is not None: + trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** -0.5) + if self.pos_emb is not None: + trunc_normal_(self.pos_emb, std=.02) + + def forward(self, x): + B, C, H, W = x.shape + M = H * W + qkv = self.qkv(x) + q, k, v = torch.split(qkv, [ + self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1) + q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) # B, num_heads, M, K + v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V + k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) # B, K, M + + content_lam = k @ v # B, K, V + content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V + + if self.pos_emb is None: + position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K + position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V + else: + # FIXME relative pos embedding path not fully verified + pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1) + position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V + position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V + + out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W + out = self.pool(out) + return out diff --git a/data_processing/MANIQA/timm/models/layers/linear.py b/data_processing/MANIQA/timm/models/layers/linear.py new file mode 100644 index 0000000..38fe338 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/linear.py @@ -0,0 +1,19 @@ +""" Linear layer (alternate definition) +""" +import torch +import torch.nn.functional as F +from torch import nn as nn + + +class Linear(nn.Linear): + r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` + + Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting + weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. + """ + def forward(self, input: torch.Tensor) -> torch.Tensor: + if torch.jit.is_scripting(): + bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None + return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) + else: + return F.linear(input, self.weight, self.bias) diff --git a/data_processing/MANIQA/timm/models/layers/median_pool.py b/data_processing/MANIQA/timm/models/layers/median_pool.py new file mode 100644 index 0000000..40bd71a --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/median_pool.py @@ -0,0 +1,49 @@ +""" Median Pool +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.nn as nn +import torch.nn.functional as F +from .helpers import to_2tuple, to_4tuple + + +class MedianPool2d(nn.Module): + """ Median pool (usable as median filter when stride=1) module. + + Args: + kernel_size: size of pooling kernel, int or 2-tuple + stride: pool stride, int or 2-tuple + padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad + same: override padding and enforce same padding, boolean + """ + def __init__(self, kernel_size=3, stride=1, padding=0, same=False): + super(MedianPool2d, self).__init__() + self.k = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + self.padding = to_4tuple(padding) # convert to l, r, t, b + self.same = same + + def _padding(self, x): + if self.same: + ih, iw = x.size()[2:] + if ih % self.stride[0] == 0: + ph = max(self.k[0] - self.stride[0], 0) + else: + ph = max(self.k[0] - (ih % self.stride[0]), 0) + if iw % self.stride[1] == 0: + pw = max(self.k[1] - self.stride[1], 0) + else: + pw = max(self.k[1] - (iw % self.stride[1]), 0) + pl = pw // 2 + pr = pw - pl + pt = ph // 2 + pb = ph - pt + padding = (pl, pr, pt, pb) + else: + padding = self.padding + return padding + + def forward(self, x): + x = F.pad(x, self._padding(x), mode='reflect') + x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) + x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] + return x diff --git a/data_processing/MANIQA/timm/models/layers/mixed_conv2d.py b/data_processing/MANIQA/timm/models/layers/mixed_conv2d.py new file mode 100644 index 0000000..fa0ce56 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/mixed_conv2d.py @@ -0,0 +1,51 @@ +""" PyTorch Mixed Convolution + +Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn + +from .conv2d_same import create_conv2d_pad + + +def _split_channels(num_chan, num_groups): + split = [num_chan // num_groups for _ in range(num_groups)] + split[0] += num_chan - sum(split) + return split + + +class MixedConv2d(nn.ModuleDict): + """ Mixed Grouped Convolution + + Based on MDConv and GroupedConv in MixNet impl: + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py + """ + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, depthwise=False, **kwargs): + super(MixedConv2d, self).__init__() + + kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] + num_groups = len(kernel_size) + in_splits = _split_channels(in_channels, num_groups) + out_splits = _split_channels(out_channels, num_groups) + self.in_channels = sum(in_splits) + self.out_channels = sum(out_splits) + for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): + conv_groups = in_ch if depthwise else 1 + # use add_module to keep key space clean + self.add_module( + str(idx), + create_conv2d_pad( + in_ch, out_ch, k, stride=stride, + padding=padding, dilation=dilation, groups=conv_groups, **kwargs) + ) + self.splits = in_splits + + def forward(self, x): + x_split = torch.split(x, self.splits, 1) + x_out = [c(x_split[i]) for i, c in enumerate(self.values())] + x = torch.cat(x_out, 1) + return x diff --git a/data_processing/MANIQA/timm/models/layers/mlp.py b/data_processing/MANIQA/timm/models/layers/mlp.py new file mode 100644 index 0000000..a85e28d --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/mlp.py @@ -0,0 +1,119 @@ +""" MLP module w/ dropout and configurable activation layer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .helpers import to_2tuple + + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class GluMlp(nn.Module): + """ MLP w/ GLU style gating + See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + assert hidden_features % 2 == 0 + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = nn.Linear(hidden_features // 2, out_features) + self.drop2 = nn.Dropout(drop_probs[1]) + + def init_weights(self): + # override init of fc1 w/ gate portion set to weight near zero, bias=1 + fc1_mid = self.fc1.bias.shape[0] // 2 + nn.init.ones_(self.fc1.bias[fc1_mid:]) + nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) + + def forward(self, x): + x = self.fc1(x) + x, gates = x.chunk(2, dim=-1) + x = x * self.act(gates) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class GatedMlp(nn.Module): + """ MLP as used in gMLP + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, + gate_layer=None, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + if gate_layer is not None: + assert hidden_features % 2 == 0 + self.gate = gate_layer(hidden_features) + hidden_features = hidden_features // 2 # FIXME base reduction on gate property? + else: + self.gate = nn.Identity() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.gate(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class ConvMlp(nn.Module): + """ MLP using 1x1 convs that keeps spatial dims + """ + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) + self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() + self.act = act_layer() + self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.norm(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + return x diff --git a/data_processing/MANIQA/timm/models/layers/non_local_attn.py b/data_processing/MANIQA/timm/models/layers/non_local_attn.py new file mode 100644 index 0000000..881fa36 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/non_local_attn.py @@ -0,0 +1,145 @@ +""" Bilinear-Attention-Transform and Non-Local Attention + +Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms` + - https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html +Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification +""" +import torch +from torch import nn +from torch.nn import functional as F + +from .conv_bn_act import ConvBnAct +from .helpers import make_divisible +from .trace_utils import _assert + + +class NonLocalAttn(nn.Module): + """Spatial NL block for image classification. + + This was adapted from https://github.com/BA-Transform/BAT-Image-Classification + Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net. + """ + + def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs): + super(NonLocalAttn, self).__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.scale = in_channels ** -0.5 if use_scale else 1.0 + self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True) + self.norm = nn.BatchNorm2d(in_channels) + self.reset_parameters() + + def forward(self, x): + shortcut = x + + t = self.t(x) + p = self.p(x) + g = self.g(x) + + B, C, H, W = t.size() + t = t.view(B, C, -1).permute(0, 2, 1) + p = p.view(B, C, -1) + g = g.view(B, C, -1).permute(0, 2, 1) + + att = torch.bmm(t, p) * self.scale + att = F.softmax(att, dim=2) + x = torch.bmm(att, g) + + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.z(x) + x = self.norm(x) + shortcut + + return x + + def reset_parameters(self): + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + if len(list(m.parameters())) > 1: + nn.init.constant_(m.bias, 0.0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.GroupNorm): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + + +class BilinearAttnTransform(nn.Module): + + def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(BilinearAttnTransform, self).__init__() + + self.conv1 = ConvBnAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer) + self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1)) + self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size)) + self.conv2 = ConvBnAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.block_size = block_size + self.groups = groups + self.in_channels = in_channels + + def resize_mat(self, x, t: int): + B, C, block_size, block_size1 = x.shape + _assert(block_size == block_size1, '') + if t <= 1: + return x + x = x.view(B * C, -1, 1, 1) + x = x * torch.eye(t, t, dtype=x.dtype, device=x.device) + x = x.view(B * C, block_size, block_size, t, t) + x = torch.cat(torch.split(x, 1, dim=1), dim=3) + x = torch.cat(torch.split(x, 1, dim=2), dim=4) + x = x.view(B, C, block_size * t, block_size * t) + return x + + def forward(self, x): + _assert(x.shape[-1] % self.block_size == 0, '') + _assert(x.shape[-2] % self.block_size == 0, '') + B, C, H, W = x.shape + out = self.conv1(x) + rp = F.adaptive_max_pool2d(out, (self.block_size, 1)) + cp = F.adaptive_max_pool2d(out, (1, self.block_size)) + p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + p = p / p.sum(dim=3, keepdim=True) + q = q / q.sum(dim=2, keepdim=True) + p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + p = p.view(B, C, self.block_size, self.block_size) + q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + q = q.view(B, C, self.block_size, self.block_size) + p = self.resize_mat(p, H // self.block_size) + q = self.resize_mat(q, W // self.block_size) + y = p.matmul(x) + y = y.matmul(q) + + y = self.conv2(y) + return y + + +class BatNonLocalAttn(nn.Module): + """ BAT + Adapted from: https://github.com/BA-Transform/BAT-Image-Classification + """ + + def __init__( + self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_): + super().__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.conv1 = ConvBnAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer) + self.conv2 = ConvBnAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.dropout = nn.Dropout2d(p=drop_rate) + + def forward(self, x): + xl = self.conv1(x) + y = self.ba(xl) + y = self.conv2(y) + y = self.dropout(y) + return y + x diff --git a/data_processing/MANIQA/timm/models/layers/norm.py b/data_processing/MANIQA/timm/models/layers/norm.py new file mode 100644 index 0000000..8529742 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/norm.py @@ -0,0 +1,24 @@ +""" Normalization layers and wrappers +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class GroupNorm(nn.GroupNorm): + def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True): + # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN + super().__init__(num_groups, num_channels, eps=eps, affine=affine) + + def forward(self, x): + return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + + +class LayerNorm2d(nn.LayerNorm): + """ LayerNorm for channels of '2D' spatial BCHW tensors """ + def __init__(self, num_channels): + super().__init__(num_channels) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return F.layer_norm( + x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) diff --git a/data_processing/MANIQA/timm/models/layers/norm_act.py b/data_processing/MANIQA/timm/models/layers/norm_act.py new file mode 100644 index 0000000..2e15181 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/norm_act.py @@ -0,0 +1,85 @@ +""" Normalization + Activation Layers +""" +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .create_act import get_act_layer + + +class BatchNormAct2d(nn.BatchNorm2d): + """BatchNorm + Activation + + This module performs BatchNorm + Activation in a manner that will remain backwards + compatible with weights trained with separate bn, act. This is why we inherit from BN + instead of composing it as a .bn member. + """ + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None): + super(BatchNormAct2d, self).__init__( + num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) + if isinstance(act_layer, str): + act_layer = get_act_layer(act_layer) + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def _forward_jit(self, x): + """ A cut & paste of the contents of the PyTorch BatchNorm2d forward function + """ + # exponential_average_factor is self.momentum set to + # (when it is available) only so that if gets updated + # in ONNX graph when this node is exported to ONNX. + if self.momentum is None: + exponential_average_factor = 0.0 + else: + exponential_average_factor = self.momentum + + if self.training and self.track_running_stats: + # TODO: if statement only here to tell the jit to skip emitting this when it is None + if self.num_batches_tracked is not None: + self.num_batches_tracked += 1 + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / float(self.num_batches_tracked) + else: # use exponential moving average + exponential_average_factor = self.momentum + + x = F.batch_norm( + x, self.running_mean, self.running_var, self.weight, self.bias, + self.training or not self.track_running_stats, + exponential_average_factor, self.eps) + return x + + @torch.jit.ignore + def _forward_python(self, x): + return super(BatchNormAct2d, self).forward(x) + + def forward(self, x): + # FIXME cannot call parent forward() and maintain jit.script compatibility? + if torch.jit.is_scripting(): + x = self._forward_jit(x) + else: + x = self._forward_python(x) + x = self.act(x) + return x + + +class GroupNormAct(nn.GroupNorm): + # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args + def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None): + super(GroupNormAct, self).__init__(num_groups, num_channels, eps=eps, affine=affine) + if isinstance(act_layer, str): + act_layer = get_act_layer(act_layer) + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + x = self.act(x) + return x diff --git a/data_processing/MANIQA/timm/models/layers/padding.py b/data_processing/MANIQA/timm/models/layers/padding.py new file mode 100644 index 0000000..34afc37 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/padding.py @@ -0,0 +1,56 @@ +""" Padding Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +from typing import List, Tuple + +import torch.nn.functional as F + + +# Calculate symmetric padding for a convolution +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution +def get_same_padding(x: int, k: int, s: int, d: int): + return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) + + +# Can SAME padding for given args be done statically? +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + + +# Dynamically pad input x with 'SAME' padding for conv with specified args +def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): + ih, iw = x.size()[-2:] + pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) + return x + + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic diff --git a/data_processing/MANIQA/timm/models/layers/patch_embed.py b/data_processing/MANIQA/timm/models/layers/patch_embed.py new file mode 100644 index 0000000..6a7face --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/patch_embed.py @@ -0,0 +1,39 @@ +""" Image to Patch Embedding using Conv2d + +A convolution based approach to patchifying a 2D image w/ embedding projection. + +Based on the impl in https://github.com/google-research/vision_transformer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .helpers import to_2tuple +from .trace_utils import _assert + + +class PatchEmbed(nn.Module): + """ 2D Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") + _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x diff --git a/data_processing/MANIQA/timm/models/layers/pool2d_same.py b/data_processing/MANIQA/timm/models/layers/pool2d_same.py new file mode 100644 index 0000000..4c2a1c4 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/pool2d_same.py @@ -0,0 +1,73 @@ +""" AvgPool2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import List, Tuple, Optional + +from .helpers import to_2tuple +from .padding import pad_same, get_padding_value + + +def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + ceil_mode: bool = False, count_include_pad: bool = True): + # FIXME how to deal with count_include_pad vs not for external padding? + x = pad_same(x, kernel_size, stride) + return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + +class AvgPool2dSame(nn.AvgPool2d): + """ Tensorflow like 'SAME' wrapper for 2D average pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride) + return F.avg_pool2d( + x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) + + +def max_pool2d_same( + x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + dilation: List[int] = (1, 1), ceil_mode: bool = False): + x = pad_same(x, kernel_size, stride, value=-float('inf')) + return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) + + +class MaxPool2dSame(nn.MaxPool2d): + """ Tensorflow like 'SAME' wrapper for 2D max pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride, value=-float('inf')) + return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) + + +def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): + stride = stride or kernel_size + padding = kwargs.pop('padding', '') + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs) + if is_dynamic: + if pool_type == 'avg': + return AvgPool2dSame(kernel_size, stride=stride, **kwargs) + elif pool_type == 'max': + return MaxPool2dSame(kernel_size, stride=stride, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' + else: + if pool_type == 'avg': + return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + elif pool_type == 'max': + return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' diff --git a/data_processing/MANIQA/timm/models/layers/selective_kernel.py b/data_processing/MANIQA/timm/models/layers/selective_kernel.py new file mode 100644 index 0000000..1aeb929 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/selective_kernel.py @@ -0,0 +1,120 @@ +""" Selective Kernel Convolution/Attention + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn + +from .conv_bn_act import ConvBnAct +from .helpers import make_divisible +from .trace_utils import _assert + + +def _kernel_valid(k): + if isinstance(k, (list, tuple)): + for ki in k: + return _kernel_valid(ki) + assert k >= 3 and k % 2 + + +class SelectiveKernelAttn(nn.Module): + def __init__(self, channels, num_paths=2, attn_channels=32, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + """ Selective Kernel Attention Module + + Selective Kernel attention mechanism factored out into its own module. + + """ + super(SelectiveKernelAttn, self).__init__() + self.num_paths = num_paths + self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) + self.bn = norm_layer(attn_channels) + self.act = act_layer(inplace=True) + self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) + + def forward(self, x): + _assert(x.shape[1] == self.num_paths, '') + x = x.sum(1).mean((2, 3), keepdim=True) + x = self.fc_reduce(x) + x = self.bn(x) + x = self.act(x) + x = self.fc_select(x) + B, C, H, W = x.shape + x = x.view(B, self.num_paths, C // self.num_paths, H, W) + x = torch.softmax(x, dim=1) + return x + + +class SelectiveKernel(nn.Module): + + def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, + rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, + drop_block=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None): + """ Selective Kernel Convolution Module + + As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. + + Largest change is the input split, which divides the input channels across each convolution path, this can + be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps + the parameter count from ballooning when the convolutions themselves don't have groups, but still provides + a noteworthy increase in performance over similar param count models without this attention layer. -Ross W + + Args: + in_channels (int): module input (feature) channel count + out_channels (int): module output (feature) channel count + kernel_size (int, list): kernel size for each convolution branch + stride (int): stride for convolutions + dilation (int): dilation for module as a whole, impacts dilation of each branch + groups (int): number of groups for each branch + rd_ratio (int, float): reduction factor for attention features + keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations + split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, + can be viewed as grouping by path, output expands to module out_channels count + drop_block (nn.Module): drop block module + act_layer (nn.Module): activation layer to use + norm_layer (nn.Module): batchnorm/norm layer to use + """ + super(SelectiveKernel, self).__init__() + out_channels = out_channels or in_channels + kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation + _kernel_valid(kernel_size) + if not isinstance(kernel_size, list): + kernel_size = [kernel_size] * 2 + if keep_3x3: + dilation = [dilation * (k - 1) // 2 for k in kernel_size] + kernel_size = [3] * len(kernel_size) + else: + dilation = [dilation] * len(kernel_size) + self.num_paths = len(kernel_size) + self.in_channels = in_channels + self.out_channels = out_channels + self.split_input = split_input + if self.split_input: + assert in_channels % self.num_paths == 0 + in_channels = in_channels // self.num_paths + groups = min(out_channels, groups) + + conv_kwargs = dict( + stride=stride, groups=groups, drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, + aa_layer=aa_layer) + self.paths = nn.ModuleList([ + ConvBnAct(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) + for k, d in zip(kernel_size, dilation)]) + + attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) + self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) + self.drop_block = drop_block + + def forward(self, x): + if self.split_input: + x_split = torch.split(x, self.in_channels // self.num_paths, 1) + x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] + else: + x_paths = [op(x) for op in self.paths] + x = torch.stack(x_paths, dim=1) + x_attn = self.attn(x) + x = x * x_attn + x = torch.sum(x, dim=1) + return x diff --git a/data_processing/MANIQA/timm/models/layers/separable_conv.py b/data_processing/MANIQA/timm/models/layers/separable_conv.py new file mode 100644 index 0000000..1ddcb4e --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/separable_conv.py @@ -0,0 +1,73 @@ +""" Depthwise Separable Conv Modules + +Basic DWS convs. Other variations of DWS exist with batch norm or activations between the +DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import convert_norm_act + + +class SeparableConvBnAct(nn.Module): + """ Separable Conv w/ trailing Norm and Activation + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, + apply_act=True, drop_block=None): + super(SeparableConvBnAct, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + norm_act_layer = convert_norm_act(norm_layer, act_layer) + self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + if self.bn is not None: + x = self.bn(x) + return x + + +class SeparableConv2d(nn.Module): + """ Separable Conv + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1): + super(SeparableConv2d, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + return x diff --git a/data_processing/MANIQA/timm/models/layers/space_to_depth.py b/data_processing/MANIQA/timm/models/layers/space_to_depth.py new file mode 100644 index 0000000..a7e8e0b --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/space_to_depth.py @@ -0,0 +1,53 @@ +import torch +import torch.nn as nn + + +class SpaceToDepth(nn.Module): + def __init__(self, block_size=4): + super().__init__() + assert block_size == 4 + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs) + return x + + +@torch.jit.script +class SpaceToDepthJit(object): + def __call__(self, x: torch.Tensor): + # assuming hard-coded that block_size==4 for acceleration + N, C, H, W = x.size() + x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs) + return x + + +class SpaceToDepthModule(nn.Module): + def __init__(self, no_jit=False): + super().__init__() + if not no_jit: + self.op = SpaceToDepthJit() + else: + self.op = SpaceToDepth() + + def forward(self, x): + return self.op(x) + + +class DepthToSpace(nn.Module): + + def __init__(self, block_size): + super().__init__() + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs) + x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs) + return x diff --git a/data_processing/MANIQA/timm/models/layers/split_attn.py b/data_processing/MANIQA/timm/models/layers/split_attn.py new file mode 100644 index 0000000..dde601b --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/split_attn.py @@ -0,0 +1,85 @@ +""" Split Attention Conv2d (for ResNeSt Models) + +Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt + +Modified for torchscript compat, performance, and consistency with timm by Ross Wightman +""" +import torch +import torch.nn.functional as F +from torch import nn + +from .helpers import make_divisible + + +class RadixSoftmax(nn.Module): + def __init__(self, radix, cardinality): + super(RadixSoftmax, self).__init__() + self.radix = radix + self.cardinality = cardinality + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttn(nn.Module): + """Split-Attention (aka Splat) + """ + def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, + dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + act_layer=nn.ReLU, norm_layer=None, drop_block=None, **kwargs): + super(SplitAttn, self).__init__() + out_channels = out_channels or in_channels + self.radix = radix + self.drop_block = drop_block + mid_chs = out_channels * radix + if rd_channels is None: + attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) + else: + attn_chs = rd_channels * radix + + padding = kernel_size // 2 if padding is None else padding + self.conv = nn.Conv2d( + in_channels, mid_chs, kernel_size, stride, padding, dilation, + groups=groups * radix, bias=bias, **kwargs) + self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() + self.act0 = act_layer(inplace=True) + self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) + self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() + self.act1 = act_layer(inplace=True) + self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) + self.rsoftmax = RadixSoftmax(radix, groups) + + def forward(self, x): + x = self.conv(x) + x = self.bn0(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act0(x) + + B, RC, H, W = x.shape + if self.radix > 1: + x = x.reshape((B, self.radix, RC // self.radix, H, W)) + x_gap = x.sum(dim=1) + else: + x_gap = x + x_gap = x_gap.mean((2, 3), keepdim=True) + x_gap = self.fc1(x_gap) + x_gap = self.bn1(x_gap) + x_gap = self.act1(x_gap) + x_attn = self.fc2(x_gap) + + x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) + if self.radix > 1: + out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) + else: + out = x * x_attn + return out.contiguous() diff --git a/data_processing/MANIQA/timm/models/layers/split_batchnorm.py b/data_processing/MANIQA/timm/models/layers/split_batchnorm.py new file mode 100644 index 0000000..830781b --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/split_batchnorm.py @@ -0,0 +1,75 @@ +""" Split BatchNorm + +A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through +a separate BN layer. The first split is passed through the parent BN layers with weight/bias +keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' +namespace. + +This allows easily removing the auxiliary BN layers after training to efficiently +achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, +'Disentangled Learning via An Auxiliary BN' + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn + + +class SplitBatchNorm2d(torch.nn.BatchNorm2d): + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, + track_running_stats=True, num_splits=2): + super().__init__(num_features, eps, momentum, affine, track_running_stats) + assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' + self.num_splits = num_splits + self.aux_bn = nn.ModuleList([ + nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) + + def forward(self, input: torch.Tensor): + if self.training: # aux BN only relevant while training + split_size = input.shape[0] // self.num_splits + assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" + split_input = input.split(split_size) + x = [super().forward(split_input[0])] + for i, a in enumerate(self.aux_bn): + x.append(a(split_input[i + 1])) + return torch.cat(x, dim=0) + else: + return super().forward(input) + + +def convert_splitbn_model(module, num_splits=2): + """ + Recursively traverse module and its children to replace all instances of + ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. + Args: + module (torch.nn.Module): input module + num_splits: number of separate batchnorm layers to split input across + Example:: + >>> # model is an instance of torch.nn.Module + >>> model = timm.models.convert_splitbn_model(model, num_splits=2) + """ + mod = module + if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): + return module + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + mod = SplitBatchNorm2d( + module.num_features, module.eps, module.momentum, module.affine, + module.track_running_stats, num_splits=num_splits) + mod.running_mean = module.running_mean + mod.running_var = module.running_var + mod.num_batches_tracked = module.num_batches_tracked + if module.affine: + mod.weight.data = module.weight.data.clone().detach() + mod.bias.data = module.bias.data.clone().detach() + for aux in mod.aux_bn: + aux.running_mean = module.running_mean.clone() + aux.running_var = module.running_var.clone() + aux.num_batches_tracked = module.num_batches_tracked.clone() + if module.affine: + aux.weight.data = module.weight.data.clone().detach() + aux.bias.data = module.bias.data.clone().detach() + for name, child in module.named_children(): + mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) + del module + return mod diff --git a/data_processing/MANIQA/timm/models/layers/squeeze_excite.py b/data_processing/MANIQA/timm/models/layers/squeeze_excite.py new file mode 100644 index 0000000..e5da29e --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/squeeze_excite.py @@ -0,0 +1,74 @@ +""" Squeeze-and-Excitation Channel Attention + +An SE implementation originally based on PyTorch SE-Net impl. +Has since evolved with additional functionality / configuration. + +Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 + +Also included is Effective Squeeze-Excitation (ESE). +Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class SEModule(nn.Module): + """ SE Module as defined in original SE-Nets with a few additions + Additions include: + * divisor can be specified to keep channels % div == 0 (default: 8) + * reduction channels can be specified directly by arg (if rd_channels is set) + * reduction channels can be specified by float rd_ratio (default: 1/16) + * global max pooling can be added to the squeeze aggregation + * customizable activation, normalization, and gate layer + """ + def __init__( + self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): + super(SEModule, self).__init__() + self.add_maxpool = add_maxpool + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=True) + self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() + self.act = create_act_layer(act_layer, inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc1(x_se) + x_se = self.act(self.bn(x_se)) + x_se = self.fc2(x_se) + return x * self.gate(x_se) + + +SqueezeExcite = SEModule # alias + + +class EffectiveSEModule(nn.Module): + """ 'Effective Squeeze-Excitation + From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + """ + def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): + super(EffectiveSEModule, self).__init__() + self.add_maxpool = add_maxpool + self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc(x_se) + return x * self.gate(x_se) + + +EffectiveSqueezeExcite = EffectiveSEModule # alias diff --git a/data_processing/MANIQA/timm/models/layers/std_conv.py b/data_processing/MANIQA/timm/models/layers/std_conv.py new file mode 100644 index 0000000..d896ba5 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/std_conv.py @@ -0,0 +1,133 @@ +""" Convolution with Weight Standardization (StdConv and ScaledStdConv) + +StdConv: +@article{weightstandardization, + author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, + title = {Weight Standardization}, + journal = {arXiv preprint arXiv:1903.10520}, + year = {2019}, +} +Code: https://github.com/joe-siyuan-qiao/WeightStandardization + +ScaledStdConv: +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Hacked together by / copyright Ross Wightman, 2021. +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .padding import get_padding, get_padding_value, pad_same + + +class StdConv2d(nn.Conv2d): + """Conv2d with Weight Standardization. Used for BiT ResNet-V2 models. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=False, eps=1e-6): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class StdConv2dSame(nn.Conv2d): + """Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=False, eps=1e-6): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class ScaledStdConv2d(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization. + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +class ScaledStdConv2dSame(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) diff --git a/data_processing/MANIQA/timm/models/layers/test_time_pool.py b/data_processing/MANIQA/timm/models/layers/test_time_pool.py new file mode 100644 index 0000000..98c0bf5 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/test_time_pool.py @@ -0,0 +1,52 @@ +""" Test Time Pooling (Average-Max Pool) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import logging +from torch import nn +import torch.nn.functional as F + +from .adaptive_avgmax_pool import adaptive_avgmax_pool2d + + +_logger = logging.getLogger(__name__) + + +class TestTimePoolHead(nn.Module): + def __init__(self, base, original_pool=7): + super(TestTimePoolHead, self).__init__() + self.base = base + self.original_pool = original_pool + base_fc = self.base.get_classifier() + if isinstance(base_fc, nn.Conv2d): + self.fc = base_fc + else: + self.fc = nn.Conv2d( + self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) + self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) + self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) + self.base.reset_classifier(0) # delete original fc layer + + def forward(self, x): + x = self.base.forward_features(x) + x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) + x = self.fc(x) + x = adaptive_avgmax_pool2d(x, 1) + return x.view(x.size(0), -1) + + +def apply_test_time_pool(model, config, use_test_size=True): + test_time_pool = False + if not hasattr(model, 'default_cfg') or not model.default_cfg: + return model, False + if use_test_size and 'test_input_size' in model.default_cfg: + df_input_size = model.default_cfg['test_input_size'] + else: + df_input_size = model.default_cfg['input_size'] + if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: + _logger.info('Target input size %s > pretrained default %s, using test time pooling' % + (str(config['input_size'][-2:]), str(df_input_size[-2:]))) + model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) + test_time_pool = True + return model, test_time_pool diff --git a/data_processing/MANIQA/timm/models/layers/trace_utils.py b/data_processing/MANIQA/timm/models/layers/trace_utils.py new file mode 100644 index 0000000..8397072 --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/trace_utils.py @@ -0,0 +1,13 @@ +try: + from torch import _assert +except ImportError: + def _assert(condition: bool, message: str): + assert condition, message + + +def _float_to_int(x: float) -> int: + """ + Symbolic tracing helper to substitute for inbuilt `int`. + Hint: Inbuilt `int` can't accept an argument of type `Proxy` + """ + return int(x) diff --git a/data_processing/MANIQA/timm/models/layers/weight_init.py b/data_processing/MANIQA/timm/models/layers/weight_init.py new file mode 100644 index 0000000..305a2fd --- /dev/null +++ b/data_processing/MANIQA/timm/models/layers/weight_init.py @@ -0,0 +1,89 @@ +import torch +import math +import warnings + +from torch.nn.init import _calculate_fan_in_and_fan_out + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + if mode == 'fan_in': + denom = fan_in + elif mode == 'fan_out': + denom = fan_out + elif mode == 'fan_avg': + denom = (fan_in + fan_out) / 2 + + variance = scale / denom + + if distribution == "truncated_normal": + # constant is stddev of standard normal truncated to (-2, 2) + trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) + elif distribution == "normal": + tensor.normal_(std=math.sqrt(variance)) + elif distribution == "uniform": + bound = math.sqrt(3 * variance) + tensor.uniform_(-bound, bound) + else: + raise ValueError(f"invalid distribution {distribution}") + + +def lecun_normal_(tensor): + variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') diff --git a/data_processing/MANIQA/timm/models/levit.py b/data_processing/MANIQA/timm/models/levit.py new file mode 100644 index 0000000..4f400fd --- /dev/null +++ b/data_processing/MANIQA/timm/models/levit.py @@ -0,0 +1,563 @@ +""" LeViT + +Paper: `LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference` + - https://arxiv.org/abs/2104.01136 + +@article{graham2021levit, + title={LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference}, + author={Benjamin Graham and Alaaeldin El-Nouby and Hugo Touvron and Pierre Stock and Armand Joulin and Herv\'e J\'egou and Matthijs Douze}, + journal={arXiv preprint arXiv:22104.01136}, + year={2021} +} + +Adapted from official impl at https://github.com/facebookresearch/LeViT, original copyright bellow. + +This version combines both conv/linear models and fixes torchscript compatibility. + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" + +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +# Modified from +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +# Copyright 2020 Ross Wightman, Apache-2.0 License +import itertools +from copy import deepcopy +from functools import partial +from typing import Dict + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import to_ntuple, get_act_layer +from .vision_transformer import trunc_normal_ +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.0.c', 'classifier': ('head.l', 'head_dist.l'), + **kwargs + } + + +default_cfgs = dict( + levit_128s=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128S-96703c44.pth' + ), + levit_128=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128-b88c2750.pth' + ), + levit_192=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-192-92712e41.pth' + ), + levit_256=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-256-13b5763e.pth' + ), + levit_384=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-384-9bdaf2e2.pth' + ), +) + +model_cfgs = dict( + levit_128s=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 6, 8), depth=(2, 3, 4)), + levit_128=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 8, 12), depth=(4, 4, 4)), + levit_192=dict( + embed_dim=(192, 288, 384), key_dim=32, num_heads=(3, 5, 6), depth=(4, 4, 4)), + levit_256=dict( + embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 4, 4)), + levit_384=dict( + embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4)), +) + +__all__ = ['Levit'] + + +@register_model +def levit_128s(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_128s', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_128(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_128', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_192(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_192', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_256(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_256', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_384(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_384', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +class ConvNorm(nn.Sequential): + def __init__( + self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1, resolution=-10000): + super().__init__() + self.add_module('c', nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False)) + bn = nn.BatchNorm2d(b) + nn.init.constant_(bn.weight, bn_weight_init) + nn.init.constant_(bn.bias, 0) + self.add_module('bn', bn) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Conv2d( + w.size(1), w.size(0), w.shape[2:], stride=self.c.stride, + padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class LinearNorm(nn.Sequential): + def __init__(self, a, b, bn_weight_init=1, resolution=-100000): + super().__init__() + self.add_module('c', nn.Linear(a, b, bias=False)) + bn = nn.BatchNorm1d(b) + nn.init.constant_(bn.weight, bn_weight_init) + nn.init.constant_(bn.bias, 0) + self.add_module('bn', bn) + + @torch.no_grad() + def fuse(self): + l, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[:, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + def forward(self, x): + x = self.c(x) + return self.bn(x.flatten(0, 1)).reshape_as(x) + + +class NormLinear(nn.Sequential): + def __init__(self, a, b, bias=True, std=0.02): + super().__init__() + self.add_module('bn', nn.BatchNorm1d(a)) + l = nn.Linear(a, b, bias=bias) + trunc_normal_(l.weight, std=std) + if bias: + nn.init.constant_(l.bias, 0) + self.add_module('l', l) + + @torch.no_grad() + def fuse(self): + bn, l = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[None, :] + if l.bias is None: + b = b @ self.l.weight.T + else: + b = (l.weight @ b[:, None]).view(-1) + self.l.bias + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +def stem_b16(in_chs, out_chs, activation, resolution=224): + return nn.Sequential( + ConvNorm(in_chs, out_chs // 8, 3, 2, 1, resolution=resolution), + activation(), + ConvNorm(out_chs // 8, out_chs // 4, 3, 2, 1, resolution=resolution // 2), + activation(), + ConvNorm(out_chs // 4, out_chs // 2, 3, 2, 1, resolution=resolution // 4), + activation(), + ConvNorm(out_chs // 2, out_chs, 3, 2, 1, resolution=resolution // 8)) + + +class Residual(nn.Module): + def __init__(self, m, drop): + super().__init__() + self.m = m + self.drop = drop + + def forward(self, x): + if self.training and self.drop > 0: + return x + self.m(x) * torch.rand( + x.size(0), 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach() + else: + return x + self.m(x) + + +class Subsample(nn.Module): + def __init__(self, stride, resolution): + super().__init__() + self.stride = stride + self.resolution = resolution + + def forward(self, x): + B, N, C = x.shape + x = x.view(B, self.resolution, self.resolution, C)[:, ::self.stride, ::self.stride] + return x.reshape(B, -1, C) + + +class Attention(nn.Module): + ab: Dict[str, torch.Tensor] + + def __init__( + self, dim, key_dim, num_heads=8, attn_ratio=4, act_layer=None, resolution=14, use_conv=False): + super().__init__() + + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + self.use_conv = use_conv + ln_layer = ConvNorm if self.use_conv else LinearNorm + h = self.dh + nh_kd * 2 + self.qkv = ln_layer(dim, h, resolution=resolution) + self.proj = nn.Sequential( + act_layer(), + ln_layer(self.dh, dim, bn_weight_init=0, resolution=resolution)) + + points = list(itertools.product(range(resolution), range(resolution))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N)) + self.ab = {} + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.ab: + self.ab = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.ab: + self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.ab[device_key] + + def forward(self, x): # x (B,C,H,W) + if self.use_conv: + B, C, H, W = x.shape + q, k, v = self.qkv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.key_dim, self.d], dim=2) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) + else: + B, N, C = x.shape + qkv = self.qkv(x) + q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.d], dim=3) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + + attn = q @ k.transpose(-2, -1) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) + x = self.proj(x) + return x + + +class AttentionSubsample(nn.Module): + ab: Dict[str, torch.Tensor] + + def __init__( + self, in_dim, out_dim, key_dim, num_heads=8, attn_ratio=2, + act_layer=None, stride=2, resolution=14, resolution_=7, use_conv=False): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = self.d * self.num_heads + self.attn_ratio = attn_ratio + self.resolution_ = resolution_ + self.resolution_2 = resolution_ ** 2 + self.use_conv = use_conv + if self.use_conv: + ln_layer = ConvNorm + sub_layer = partial(nn.AvgPool2d, kernel_size=1, padding=0) + else: + ln_layer = LinearNorm + sub_layer = partial(Subsample, resolution=resolution) + + h = self.dh + nh_kd + self.kv = ln_layer(in_dim, h, resolution=resolution) + self.q = nn.Sequential( + sub_layer(stride=stride), + ln_layer(in_dim, nh_kd, resolution=resolution_)) + self.proj = nn.Sequential( + act_layer(), + ln_layer(self.dh, out_dim, resolution=resolution_)) + + self.stride = stride + self.resolution = resolution + points = list(itertools.product(range(resolution), range(resolution))) + points_ = list(itertools.product(range(resolution_), range(resolution_))) + N = len(points) + N_ = len(points_) + attention_offsets = {} + idxs = [] + for p1 in points_: + for p2 in points: + size = 1 + offset = ( + abs(p1[0] * stride - p2[0] + (size - 1) / 2), + abs(p1[1] * stride - p2[1] + (size - 1) / 2)) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N_, N)) + self.ab = {} # per-device attention_biases cache + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.ab: + self.ab = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.ab: + self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.ab[device_key] + + def forward(self, x): + if self.use_conv: + B, C, H, W = x.shape + k, v = self.kv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.d], dim=2) + q = self.q(x).view(B, self.num_heads, self.key_dim, self.resolution_2) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).reshape(B, -1, self.resolution_, self.resolution_) + else: + B, N, C = x.shape + k, v = self.kv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.d], dim=3) + k = k.permute(0, 2, 1, 3) # BHNC + v = v.permute(0, 2, 1, 3) # BHNC + q = self.q(x).view(B, self.resolution_2, self.num_heads, self.key_dim).permute(0, 2, 1, 3) + + attn = q @ k.transpose(-2, -1) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, -1, self.dh) + x = self.proj(x) + return x + + +class Levit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + + NOTE: distillation is defaulted to True since pretrained weights use it, will cause problems + w/ train scripts that don't take tuple outputs, + """ + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=(192,), + key_dim=64, + depth=(12,), + num_heads=(3,), + attn_ratio=2, + mlp_ratio=2, + hybrid_backbone=None, + down_ops=None, + act_layer='hard_swish', + attn_act_layer='hard_swish', + distillation=True, + use_conv=False, + drop_rate=0., + drop_path_rate=0.): + super().__init__() + act_layer = get_act_layer(act_layer) + attn_act_layer = get_act_layer(attn_act_layer) + if isinstance(img_size, tuple): + # FIXME origin impl passes single img/res dim through whole hierarchy, + # not sure this model will be used enough to spend time fixing it. + assert img_size[0] == img_size[1] + img_size = img_size[0] + self.num_classes = num_classes + self.num_features = embed_dim[-1] + self.embed_dim = embed_dim + N = len(embed_dim) + assert len(depth) == len(num_heads) == N + key_dim = to_ntuple(N)(key_dim) + attn_ratio = to_ntuple(N)(attn_ratio) + mlp_ratio = to_ntuple(N)(mlp_ratio) + down_ops = down_ops or ( + # ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + ('Subsample', key_dim[0], embed_dim[0] // key_dim[0], 4, 2, 2), + ('Subsample', key_dim[0], embed_dim[1] // key_dim[1], 4, 2, 2), + ('',) + ) + self.distillation = distillation + self.use_conv = use_conv + ln_layer = ConvNorm if self.use_conv else LinearNorm + + self.patch_embed = hybrid_backbone or stem_b16(in_chans, embed_dim[0], activation=act_layer) + + self.blocks = [] + resolution = img_size // patch_size + for i, (ed, kd, dpth, nh, ar, mr, do) in enumerate( + zip(embed_dim, key_dim, depth, num_heads, attn_ratio, mlp_ratio, down_ops)): + for _ in range(dpth): + self.blocks.append( + Residual( + Attention( + ed, kd, nh, attn_ratio=ar, act_layer=attn_act_layer, + resolution=resolution, use_conv=use_conv), + drop_path_rate)) + if mr > 0: + h = int(ed * mr) + self.blocks.append( + Residual(nn.Sequential( + ln_layer(ed, h, resolution=resolution), + act_layer(), + ln_layer(h, ed, bn_weight_init=0, resolution=resolution), + ), drop_path_rate)) + if do[0] == 'Subsample': + # ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + resolution_ = (resolution - 1) // do[5] + 1 + self.blocks.append( + AttentionSubsample( + *embed_dim[i:i + 2], key_dim=do[1], num_heads=do[2], + attn_ratio=do[3], act_layer=attn_act_layer, stride=do[5], + resolution=resolution, resolution_=resolution_, use_conv=use_conv)) + resolution = resolution_ + if do[4] > 0: # mlp_ratio + h = int(embed_dim[i + 1] * do[4]) + self.blocks.append( + Residual(nn.Sequential( + ln_layer(embed_dim[i + 1], h, resolution=resolution), + act_layer(), + ln_layer(h, embed_dim[i + 1], bn_weight_init=0, resolution=resolution), + ), drop_path_rate)) + self.blocks = nn.Sequential(*self.blocks) + + # Classifier head + self.head = NormLinear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distillation: + self.head_dist = NormLinear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def no_weight_decay(self): + return {x for x in self.state_dict().keys() if 'attention_biases' in x} + + def get_classifier(self): + if self.head_dist is None: + return self.head + else: + return self.head, self.head_dist + + def reset_classifier(self, num_classes, global_pool='', distillation=None): + self.num_classes = num_classes + self.head = NormLinear(self.embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + if distillation is not None: + self.distillation = distillation + if self.distillation: + self.head_dist = NormLinear(self.embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + else: + self.head_dist = None + + def forward_features(self, x): + x = self.patch_embed(x) + if not self.use_conv: + x = x.flatten(2).transpose(1, 2) + x = self.blocks(x) + x = x.mean((-2, -1)) if self.use_conv else x.mean(1) + return x + + def forward(self, x): + x = self.forward_features(x) + if self.head_dist is not None: + x, x_dist = self.head(x), self.head_dist(x) + if self.training and not torch.jit.is_scripting(): + return x, x_dist + else: + # during inference, return the average of both classifier predictions + return (x + x_dist) / 2 + else: + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + D = model.state_dict() + for k in state_dict.keys(): + if k in D and D[k].ndim == 4 and state_dict[k].ndim == 2: + state_dict[k] = state_dict[k][:, :, None, None] + return state_dict + + +def create_levit(variant, pretrained=False, default_cfg=None, fuse=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model_cfg = dict(**model_cfgs[variant], **kwargs) + model = build_model_with_cfg( + Levit, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **model_cfg) + #if fuse: + # utils.replace_batchnorm(model) + return model + diff --git a/data_processing/MANIQA/timm/models/mlp_mixer.py b/data_processing/MANIQA/timm/models/mlp_mixer.py new file mode 100644 index 0000000..727b655 --- /dev/null +++ b/data_processing/MANIQA/timm/models/mlp_mixer.py @@ -0,0 +1,659 @@ +""" MLP-Mixer, ResMLP, and gMLP in PyTorch + +This impl originally based on MLP-Mixer paper. + +Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py + +Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + +@article{tolstikhin2021, + title={MLP-Mixer: An all-MLP Architecture for Vision}, + author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, + Thomas and Yung, Jessica and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey}, + journal={arXiv preprint arXiv:2105.01601}, + year={2021} +} + +Also supporting ResMlp, and a preliminary (not verified) implementations of gMLP + +Code: https://github.com/facebookresearch/deit +Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 +@misc{touvron2021resmlp, + title={ResMLP: Feedforward networks for image classification with data-efficient training}, + author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and + Edouard Grave and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Hervé Jégou}, + year={2021}, + eprint={2105.03404}, +} + +Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 +@misc{liu2021pay, + title={Pay Attention to MLPs}, + author={Hanxiao Liu and Zihang Dai and David R. So and Quoc V. Le}, + year={2021}, + eprint={2105.08050}, +} + +A thank you to paper authors for releasing code and weights. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +from copy import deepcopy +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg, named_apply +from .layers import PatchEmbed, Mlp, GluMlp, GatedMlp, DropPath, lecun_normal_, to_2tuple +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'stem.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + mixer_s32_224=_cfg(), + mixer_s16_224=_cfg(), + mixer_b32_224=_cfg(), + mixer_b16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth', + ), + mixer_b16_224_in21k=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth', + num_classes=21843 + ), + mixer_l32_224=_cfg(), + mixer_l16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth', + ), + mixer_l16_224_in21k=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth', + num_classes=21843 + ), + + # Mixer ImageNet-21K-P pretraining + mixer_b16_224_miil_in21k=_cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mixer_b16_224_miil_in21k.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221, + ), + mixer_b16_224_miil=_cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mixer_b16_224_miil.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', + ), + + gmixer_12_224=_cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + gmixer_24_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth', + #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resmlp_24_224_raa-a8256759.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_36_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_big_24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_36_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_big_24_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_big_24_224_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_224_dino=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dino.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_224_dino=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dino.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + gmlp_ti16_224=_cfg(), + gmlp_s16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth', + ), + gmlp_b16_224=_cfg(), +) + + +class MixerBlock(nn.Module): + """ Residual Block w/ token mixing and channel MLPs + Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + def __init__( + self, dim, seq_len, mlp_ratio=(0.5, 4.0), mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.): + super().__init__() + tokens_dim, channels_dim = [int(x * dim) for x in to_2tuple(mlp_ratio)] + self.norm1 = norm_layer(dim) + self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.mlp_channels(self.norm2(x))) + return x + + +class Affine(nn.Module): + def __init__(self, dim): + super().__init__() + self.alpha = nn.Parameter(torch.ones((1, 1, dim))) + self.beta = nn.Parameter(torch.zeros((1, 1, dim))) + + def forward(self, x): + return torch.addcmul(self.beta, self.alpha, x) + + +class ResBlock(nn.Module): + """ Residual MLP block w/ LayerScale and Affine 'norm' + + Based on: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + def __init__( + self, dim, seq_len, mlp_ratio=4, mlp_layer=Mlp, norm_layer=Affine, + act_layer=nn.GELU, init_values=1e-4, drop=0., drop_path=0.): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm1 = norm_layer(dim) + self.linear_tokens = nn.Linear(seq_len, seq_len) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop) + self.ls1 = nn.Parameter(init_values * torch.ones(dim)) + self.ls2 = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x))) + return x + + +class SpatialGatingUnit(nn.Module): + """ Spatial Gating Unit + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__(self, dim, seq_len, norm_layer=nn.LayerNorm): + super().__init__() + gate_dim = dim // 2 + self.norm = norm_layer(gate_dim) + self.proj = nn.Linear(seq_len, seq_len) + + def init_weights(self): + # special init for the projection gate, called as override by base model init + nn.init.normal_(self.proj.weight, std=1e-6) + nn.init.ones_(self.proj.bias) + + def forward(self, x): + u, v = x.chunk(2, dim=-1) + v = self.norm(v) + v = self.proj(v.transpose(-1, -2)) + return u * v.transpose(-1, -2) + + +class SpatialGatingBlock(nn.Module): + """ Residual Block w/ Spatial Gating + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__( + self, dim, seq_len, mlp_ratio=4, mlp_layer=GatedMlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm = norm_layer(dim) + sgu = partial(SpatialGatingUnit, seq_len=seq_len) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.mlp_channels(self.norm(x))) + return x + + +class MlpMixer(nn.Module): + + def __init__( + self, + num_classes=1000, + img_size=224, + in_chans=3, + patch_size=16, + num_blocks=8, + embed_dim=512, + mlp_ratio=(0.5, 4.0), + block_layer=MixerBlock, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + drop_rate=0., + drop_path_rate=0., + nlhb=False, + stem_norm=False, + ): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.stem = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, norm_layer=norm_layer if stem_norm else None) + # FIXME drop_path (stochastic depth scaling rule or all the same?) + self.blocks = nn.Sequential(*[ + block_layer( + embed_dim, self.stem.num_patches, mlp_ratio, mlp_layer=mlp_layer, norm_layer=norm_layer, + act_layer=act_layer, drop=drop_rate, drop_path=drop_path_rate) + for _ in range(num_blocks)]) + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(nlhb=nlhb) + + def init_weights(self, nlhb=False): + head_bias = -math.log(self.num_classes) if nlhb else 0. + named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + x = self.blocks(x) + x = self.norm(x) + x = x.mean(dim=1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): + """ Mixer weight initialization (trying to match Flax defaults) + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + if flax: + # Flax defaults + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + # like MLP init in vit (my original init) + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + # NOTE if a parent module contains init_weights method, it can override the init of the + # child modules as this will be called in depth-first order. + module.init_weights() + + +def checkpoint_filter_fn(state_dict, model): + """ Remap checkpoints if needed """ + if 'patch_embed.proj.weight' in state_dict: + # Remap FB ResMlp models -> timm + out_dict = {} + for k, v in state_dict.items(): + k = k.replace('patch_embed.', 'stem.') + k = k.replace('attn.', 'linear_tokens.') + k = k.replace('mlp.', 'mlp_channels.') + k = k.replace('gamma_', 'ls') + if k.endswith('.alpha') or k.endswith('.beta'): + v = v.reshape(1, 1, -1) + out_dict[k] = v + return out_dict + return state_dict + + +def _create_mixer(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for MLP-Mixer models.') + + model = build_model_with_cfg( + MlpMixer, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def mixer_s32_224(pretrained=False, **kwargs): + """ Mixer-S/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_s16_224(pretrained=False, **kwargs): + """ Mixer-S/16 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b32_224(pretrained=False, **kwargs): + """ Mixer-B/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_in21k(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-21k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l32_224(pretrained=False, **kwargs): + """ Mixer-L/32 224x224. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l16_224(pretrained=False, **kwargs): + """ Mixer-L/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l16_224_in21k(pretrained=False, **kwargs): + """ Mixer-L/16 224x224. ImageNet-21k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l16_224_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_miil(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-21k pretrained weights. + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_miil', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_miil_in21k(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_miil_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_12_224(pretrained=False, **kwargs): + """ Glu-Mixer-12 224x224 + Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_24_224(pretrained=False, **kwargs): + """ Glu-Mixer-24 224x224 + Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_224(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_224(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_36_224(pretrained=False, **kwargs): + """ ResMLP-36 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_224(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_distilled_224(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_distilled_224(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_36_distilled_224(pretrained=False, **kwargs): + """ ResMLP-36 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_36_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_distilled_224(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_224_in22ft1k(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_224_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_224_dino(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + + Model pretrained via DINO (self-supervised) - https://arxiv.org/abs/2104.14294 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_224_dino', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_224_dino(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + + Model pretrained via DINO (self-supervised) - https://arxiv.org/abs/2104.14294 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_224_dino', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_ti16_224(pretrained=False, **kwargs): + """ gMLP-Tiny + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_s16_224(pretrained=False, **kwargs): + """ gMLP-Small + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_b16_224(pretrained=False, **kwargs): + """ gMLP-Base + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args) + return model diff --git a/data_processing/MANIQA/timm/models/mobilenetv3.py b/data_processing/MANIQA/timm/models/mobilenetv3.py new file mode 100644 index 0000000..8047412 --- /dev/null +++ b/data_processing/MANIQA/timm/models/mobilenetv3.py @@ -0,0 +1,679 @@ +""" MobileNet V3 + +A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl. + +Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244 + +Hacked together by / Copyright 2019, Ross Wightman +""" +from functools import partial +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights,\ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from .features import FeatureInfo, FeatureHooks +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import SelectAdaptivePool2d, Linear, create_conv2d, get_act_fn, hard_sigmoid +from .registry import register_model + +__all__ = ['MobileNetV3', 'MobileNetV3Features'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'mobilenetv3_large_075': _cfg(url=''), + 'mobilenetv3_large_100': _cfg( + interpolation='bicubic', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth'), + 'mobilenetv3_large_100_miil': _cfg( + interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1), + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_1k_miil_78_0.pth'), + 'mobilenetv3_large_100_miil_in21k': _cfg( + interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1), + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_in21k_miil.pth', num_classes=11221), + + 'mobilenetv3_small_050': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_050_lambc-4b7bbe87.pth', + interpolation='bicubic'), + 'mobilenetv3_small_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_075_lambc-384766db.pth', + interpolation='bicubic'), + 'mobilenetv3_small_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_100_lamb-266a294c.pth', + interpolation='bicubic'), + + 'mobilenetv3_rw': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', + interpolation='bicubic'), + + 'tf_mobilenetv3_large_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_100': _cfg( + url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'fbnetv3_b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_b_224-ead5d2a1.pth', + test_input_size=(3, 256, 256), crop_pct=0.95), + 'fbnetv3_d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_d_224-c98bce42.pth', + test_input_size=(3, 256, 256), crop_pct=0.95), + 'fbnetv3_g': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_g_240-0b1df83b.pth', + input_size=(3, 240, 240), test_input_size=(3, 288, 288), crop_pct=0.95), + + "lcnet_035": _cfg(), + "lcnet_050": _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_050-f447553b.pth', + interpolation='bicubic', + ), + "lcnet_075": _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_075-318cad2c.pth', + interpolation='bicubic', + ), + "lcnet_100": _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_100-a929038c.pth', + interpolation='bicubic', + ), + "lcnet_150": _cfg(), +} + + +class MobileNetV3(nn.Module): + """ MobiletNet-V3 + + Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific + 'efficient head', where global pooling is done before the head convolution without a final batch-norm + layer before the classifier. + + Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244 + + Other architectures utilizing MobileNet-V3 efficient head that are supported by this impl include: + * HardCoRe-NAS - https://arxiv.org/abs/2102.11646 (defn in hardcorenas.py uses this class) + * FBNet-V3 - https://arxiv.org/abs/2006.02049 + * LCNet - https://arxiv.org/abs/2109.15099 + """ + + def __init__( + self, block_args, num_classes=1000, in_chans=3, stem_size=16, fix_stem=False, num_features=1280, + head_bias=True, pad_type='', act_layer=None, norm_layer=None, se_layer=None, se_from_exp=True, + round_chs_fn=round_channels, drop_rate=0., drop_path_rate=0., global_pool='avg'): + super(MobileNetV3, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=32, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + head_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + num_pooled_chs = head_chs * self.global_pool.feat_mult() + self.conv_head = create_conv2d(num_pooled_chs, self.num_features, 1, padding=pad_type, bias=head_bias) + self.act2 = act_layer(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1, self.act1] + layers.extend(self.blocks) + layers.extend([self.global_pool, self.conv_head, self.act2]) + layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + # cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.flatten(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + +class MobileNetV3Features(nn.Module): + """ MobileNetV3 Feature Extractor + + A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation + and object detection models. + """ + + def __init__(self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, + stem_size=16, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, + se_from_exp=True, act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0.): + super(MobileNetV3Features, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, + drop_path_rate=drop_path_rate, feature_location=feature_location) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {v['stage']: i for i, v in enumerate(self.feature_info) if i in out_indices} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + def forward(self, x) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_mnv3(variant, pretrained=False, **kwargs): + features_only = False + model_cls = MobileNetV3 + kwargs_filter = None + if kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'global_pool') + model_cls = MobileNetV3Features + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + head_bias=False, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'), + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + if 'small' in variant: + num_features = 1024 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16'], + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], + # stage 2, 28x28 in + ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], + # stage 3, 14x14 in + ['ir_r2_k3_s1_e3_c48'], + # stage 4, 14x14in + ['ir_r3_k3_s2_e6_c96'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu + # stage 2, 28x28 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish + # stage 3, 14x14 in + ['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish + # stage 4, 14x14in + ['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], # hard-swish + ] + else: + num_features = 1280 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k3_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112'], + # stage 5, 14x14in + ['ir_r3_k3_s2_e6_c160'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=16, + fix_stem=channel_multiplier < 0.75, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetv3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNetV3 + Paper: `FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining` + - https://arxiv.org/abs/2006.02049 + FIXME untested, this is a preliminary impl of some FBNet-V3 variants. + """ + vl = variant.split('_')[-1] + if vl in ('a', 'b'): + stem_size = 16 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'], + ['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'], + ['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'], + ['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'], + ['cn_r1_k1_s1_c1344'], + ] + elif vl == 'd': + stem_size = 24 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'], + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'], + ['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'], + ['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'], + ['cn_r1_k1_s1_c1440'], + ] + elif vl == 'g': + stem_size = 32 + arch_def = [ + ['ds_r3_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'], + ['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'], + ['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'], + ['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'], + ['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'], + ['cn_r1_k1_s1_c1728'], + ] + else: + raise NotImplemented + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95) + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn) + act_layer = resolve_act_layer(kwargs, 'hard_swish') + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1984, + head_bias=False, + stem_size=stem_size, + round_chs_fn=round_chs_fn, + se_from_exp=False, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_lcnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ LCNet + Essentially a MobileNet-V3 crossed with a MobileNet-V1 + + Paper: `PP-LCNet: A Lightweight CPU Convolutional Neural Network` - https://arxiv.org/abs/2109.15099 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['dsa_r1_k3_s1_c32'], + # stage 1, 112x112 in + ['dsa_r2_k3_s2_c64'], + # stage 2, 56x56 in + ['dsa_r2_k3_s2_c128'], + # stage 3, 28x28 in + ['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'], + # stage 4, 14x14in + ['dsa_r4_k5_s1_c256'], + # stage 5, 14x14in + ['dsa_r2_k5_s2_c512_se0.25'], + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU), + num_features=1280, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +@register_model +def mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100_miil(pretrained=False, **kwargs): + """ MobileNet V3 + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100_miil_in21k(pretrained=False, **kwargs): + """ MobileNet V3, 21k pretraining + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil_in21k', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_050(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_050', 0.50, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_rw(pretrained=False, **kwargs): + """ MobileNet V3 """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_b(pretrained=False, **kwargs): + """ FBNetV3-B """ + model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_d(pretrained=False, **kwargs): + """ FBNetV3-D """ + model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_g(pretrained=False, **kwargs): + """ FBNetV3-G """ + model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_035(pretrained=False, **kwargs): + """ PP-LCNet 0.35""" + model = _gen_lcnet('lcnet_035', 0.35, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_050(pretrained=False, **kwargs): + """ PP-LCNet 0.5""" + model = _gen_lcnet('lcnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_075(pretrained=False, **kwargs): + """ PP-LCNet 1.0""" + model = _gen_lcnet('lcnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_100(pretrained=False, **kwargs): + """ PP-LCNet 1.0""" + model = _gen_lcnet('lcnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_150(pretrained=False, **kwargs): + """ PP-LCNet 1.5""" + model = _gen_lcnet('lcnet_150', 1.5, pretrained=pretrained, **kwargs) + return model diff --git a/data_processing/MANIQA/timm/models/nasnet.py b/data_processing/MANIQA/timm/models/nasnet.py new file mode 100644 index 0000000..2afe82c --- /dev/null +++ b/data_processing/MANIQA/timm/models/nasnet.py @@ -0,0 +1,567 @@ +""" NasNet-A (Large) + nasnetalarge implementation grabbed from Cadene's pretrained models + https://github.com/Cadene/pretrained-models.pytorch +""" +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, create_conv2d, create_pool2d, create_classifier +from .registry import register_model + +__all__ = ['NASNetALarge'] + +default_cfgs = { + 'nasnetalarge': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv0.conv', + 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + }, +} + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=0) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1) + self.act_2 = nn.ReLU(inplace=True) + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=pad_type) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class CellStem0(nn.Module): + def __init__(self, stem_size, num_channels=42, pad_type=''): + super(CellStem0, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x): + x1 = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x1) + x_comb_iter_0_right = self.comb_iter_0_right(x) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x1) + x_comb_iter_1_right = self.comb_iter_1_right(x) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x1) + x_comb_iter_2_right = self.comb_iter_2_right(x) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x1) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem1(nn.Module): + + def __init__(self, stem_size, num_channels, pad_type=''): + super(CellStem1, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x_conv0, x_stem_0): + x_left = self.conv_1x1(x_stem_0) + + x_relu = self.act(x_conv0) + # path 1 + x_path1 = self.path_1(x_relu) + # path 2 + x_path2 = self.path_2(x_relu) + # final path + x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_right) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_left) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_left) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class FirstCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(FirstCell, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_relu = self.act(x_prev) + x_path1 = self.path_1(x_relu) + x_path2 = self.path_2(x_relu) + x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NormalCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(NormalCell, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell0(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell0, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell1(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell1, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NASNetALarge(nn.Module): + """NASNetALarge (6 @ 4032) """ + + def __init__(self, num_classes=1000, in_chans=3, stem_size=96, channel_multiplier=2, + num_features=4032, output_stride=32, drop_rate=0., global_pool='avg', pad_type='same'): + super(NASNetALarge, self).__init__() + self.num_classes = num_classes + self.stem_size = stem_size + self.num_features = num_features + self.channel_multiplier = channel_multiplier + self.drop_rate = drop_rate + assert output_stride == 32 + + channels = self.num_features // 24 + # 24 is default value for the architecture + + self.conv0 = ConvBnAct( + in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + self.stem_size, num_channels=channels // (channel_multiplier ** 2), pad_type=pad_type) + self.cell_stem_1 = CellStem1( + self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type) + + self.cell_0 = FirstCell( + in_chs_left=channels, out_chs_left=channels // 2, + in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_1 = NormalCell( + in_chs_left=2 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_2 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_3 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_4 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_5 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + + self.reduction_cell_0 = ReductionCell0( + in_chs_left=6 * channels, out_chs_left=2 * channels, + in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_6 = FirstCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_7 = NormalCell( + in_chs_left=8 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_8 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_9 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_10 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_11 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + + self.reduction_cell_1 = ReductionCell1( + in_chs_left=12 * channels, out_chs_left=4 * channels, + in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_12 = FirstCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_13 = NormalCell( + in_chs_left=16 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_14 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_15 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_16 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_17 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.act = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv0'), + dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'), + dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'), + dict(num_chs=4032, reduction=32, module='act'), + ] + + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv0 = self.conv0(x) + + x_stem_0 = self.cell_stem_0(x_conv0) + x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) + + x_cell_0 = self.cell_0(x_stem_1, x_stem_0) + x_cell_1 = self.cell_1(x_cell_0, x_stem_1) + x_cell_2 = self.cell_2(x_cell_1, x_cell_0) + x_cell_3 = self.cell_3(x_cell_2, x_cell_1) + x_cell_4 = self.cell_4(x_cell_3, x_cell_2) + x_cell_5 = self.cell_5(x_cell_4, x_cell_3) + + x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4) + x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4) + x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) + x_cell_8 = self.cell_8(x_cell_7, x_cell_6) + x_cell_9 = self.cell_9(x_cell_8, x_cell_7) + x_cell_10 = self.cell_10(x_cell_9, x_cell_8) + x_cell_11 = self.cell_11(x_cell_10, x_cell_9) + + x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10) + x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10) + x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) + x_cell_14 = self.cell_14(x_cell_13, x_cell_12) + x_cell_15 = self.cell_15(x_cell_14, x_cell_13) + x_cell_16 = self.cell_16(x_cell_15, x_cell_14) + x_cell_17 = self.cell_17(x_cell_16, x_cell_15) + x = self.act(x_cell_17) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_nasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + NASNetALarge, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs) + + +@register_model +def nasnetalarge(pretrained=False, **kwargs): + """NASNet-A large model architecture. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_nasnet('nasnetalarge', pretrained, **model_kwargs) diff --git a/data_processing/MANIQA/timm/models/nest.py b/data_processing/MANIQA/timm/models/nest.py new file mode 100644 index 0000000..22cf609 --- /dev/null +++ b/data_processing/MANIQA/timm/models/nest.py @@ -0,0 +1,465 @@ +""" Nested Transformer (NesT) in PyTorch + +A PyTorch implement of Aggregating Nested Transformers as described in: + +'Aggregating Nested Transformers' + - https://arxiv.org/abs/2105.12723 + +The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights +have been converted with convert/convert_nest_flax.py + +Acknowledgments: +* The paper authors for sharing their research, code, and model weights +* Ross Wightman's existing code off which I based this + +Copyright 2021 Alexander Soare +""" + +import collections.abc +import logging +import math +from functools import partial + +import torch +import torch.nn.functional as F +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg, named_apply +from .layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_ +from .layers import _assert +from .layers import create_conv2d, create_pool2d, to_ntuple +from .registry import register_model + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], + 'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # (weights from official Google JAX impl) + 'nest_base': _cfg(), + 'nest_small': _cfg(), + 'nest_tiny': _cfg(), + 'jx_nest_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_base-8bc41011.pth'), + 'jx_nest_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_small-422eaded.pth'), + 'jx_nest_tiny': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_tiny-e3428fb9.pth'), +} + + +class Attention(nn.Module): + """ + This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with + an extra "image block" dim + """ + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + """ + x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) + """ + B, T, N, C = x.shape + # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) + qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale # (B, H, T, N, N) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + # (B, H, T, N, C'), permute -> (B, T, N, C', H) + x = (attn @ v).permute(0, 2, 3, 4, 1).reshape(B, T, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x # (B, T, N, C) + + +class TransformerLayer(nn.Module): + """ + This is much like `.vision_transformer.Block` but: + - Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks") + - Uses modified Attention layer that handles the "block" dimension + """ + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + y = self.norm1(x) + x = x + self.drop_path(self.attn(y)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConvPool(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): + super().__init__() + self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) + self.norm = norm_layer(out_channels) + self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) + + def forward(self, x): + """ + x is expected to have shape (B, C, H, W) + """ + _assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims') + _assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims') + x = self.conv(x) + # Layer norm done over channel dim only + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + x = self.pool(x) + return x # (B, C, H//2, W//2) + + +def blockify(x, block_size: int): + """image to blocks + Args: + x (Tensor): with shape (B, H, W, C) + block_size (int): edge length of a single square block in units of H, W + """ + B, H, W, C = x.shape + _assert(H % block_size == 0, '`block_size` must divide input height evenly') + _assert(W % block_size == 0, '`block_size` must divide input width evenly') + grid_height = H // block_size + grid_width = W // block_size + x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) + x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) + return x # (B, T, N, C) + + +@register_notrace_function # reason: int receives Proxy +def deblockify(x, block_size: int): + """blocks to image + Args: + x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block + block_size (int): edge length of a single square block in units of desired H, W + """ + B, T, _, C = x.shape + grid_size = int(math.sqrt(T)) + height = width = grid_size * block_size + x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) + x = x.transpose(2, 3).reshape(B, height, width, C) + return x # (B, H, W, C) + + +class NestLevel(nn.Module): + """ Single hierarchical level of a Nested Transformer + """ + def __init__( + self, num_blocks, block_size, seq_length, num_heads, depth, embed_dim, prev_embed_dim=None, + mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rates=[], + norm_layer=None, act_layer=None, pad_type=''): + super().__init__() + self.block_size = block_size + self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) + + if prev_embed_dim is not None: + self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) + else: + self.pool = nn.Identity() + + # Transformer encoder + if len(drop_path_rates): + assert len(drop_path_rates) == depth, 'Must provide as many drop path rates as there are transformer layers' + self.transformer_encoder = nn.Sequential(*[ + TransformerLayer( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path_rates[i], + norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + + def forward(self, x): + """ + expects x as (B, C, H, W) + """ + x = self.pool(x) + x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer + x = blockify(x, self.block_size) # (B, T, N, C') + x = x + self.pos_embed + x = self.transformer_encoder(x) # (B, T, N, C') + x = deblockify(x, self.block_size) # (B, H', W', C') + # Channel-first for block aggregation, and generally to replicate convnet feature map at each stage + return x.permute(0, 3, 1, 2) # (B, C, H', W') + + +class Nest(nn.Module): + """ Nested Transformer (NesT) + + A PyTorch impl of : `Aggregating Nested Transformers` + - https://arxiv.org/abs/2105.12723 + """ + + def __init__(self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), + num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.5, norm_layer=None, act_layer=None, + pad_type='', weight_init='', global_pool='avg'): + """ + Args: + img_size (int, tuple): input image size + in_chans (int): number of input channels + patch_size (int): patch size + num_levels (int): number of block hierarchies (T_d in the paper) + embed_dims (int, tuple): embedding dimensions of each level + num_heads (int, tuple): number of attention heads for each level + depths (int, tuple): number of transformer layers for each level + num_classes (int): number of classes for classification head + mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer for transformer layers + act_layer: (nn.Module): activation layer in MLP of transformer layers + pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME + weight_init: (str): weight init scheme + global_pool: (str): type of pooling operation to apply to final feature map + + Notes: + - Default values follow NesT-B from the original Jax code. + - `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`. + - For those following the paper, Table A1 may have errors! + - https://github.com/google-research/nested-transformer/issues/2 + """ + super().__init__() + + for param_name in ['embed_dims', 'num_heads', 'depths']: + param_value = locals()[param_name] + if isinstance(param_value, collections.abc.Sequence): + assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' + + embed_dims = to_ntuple(num_levels)(embed_dims) + num_heads = to_ntuple(num_levels)(num_heads) + depths = to_ntuple(num_levels)(depths) + self.num_classes = num_classes + self.num_features = embed_dims[-1] + self.feature_info = [] + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + self.drop_rate = drop_rate + self.num_levels = num_levels + if isinstance(img_size, collections.abc.Sequence): + assert img_size[0] == img_size[1], 'Model only handles square inputs' + img_size = img_size[0] + assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' + self.patch_size = patch_size + + # Number of blocks at each level + self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() + assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \ + 'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`' + + # Block edge size in units of patches + # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the + # number of blocks along edge of image + self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) + + # Patch embedding + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False) + self.num_patches = self.patch_embed.num_patches + self.seq_length = self.num_patches // self.num_blocks[0] + + # Build up each hierarchical level + levels = [] + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + prev_dim = None + curr_stride = 4 + for i in range(len(self.num_blocks)): + dim = embed_dims[i] + levels.append(NestLevel( + self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, + mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, dp_rates[i], norm_layer, act_layer, pad_type=pad_type)) + self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] + prev_dim = dim + curr_stride *= 2 + self.levels = nn.Sequential(*levels) + + # Final normalization layer + self.norm = norm_layer(embed_dims[-1]) + + # Classifier + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + self.init_weights(weight_init) + + def init_weights(self, mode=''): + assert mode in ('nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + for level in self.levels: + trunc_normal_(level.pos_embed, std=.02, a=-2, b=2) + named_apply(partial(_init_nest_weights, head_bias=head_bias), self) + + @torch.jit.ignore + def no_weight_decay(self): + return {f'level.{i}.pos_embed' for i in range(len(self.levels))} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + """ x shape (B, C, H, W) + """ + x = self.patch_embed(x) + x = self.levels(x) + # Layer norm done over channel dim only (to NHWC and back) + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + return x + + def forward(self, x): + """ x shape (B, C, H, W) + """ + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.head(x) + + +def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.): + """ NesT weight initialization + Can replicate Jax implementation. Otherwise follows vision_transformer.py + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + nn.init.constant_(module.bias, head_bias) + else: + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): + nn.init.zeros_(module.bias) + nn.init.ones_(module.weight) + + +def resize_pos_embed(posemb, posemb_new): + """ + Rescale the grid of position embeddings when loading from state_dict + Expected shape of position embeddings is (1, T, N, C), and considers only square images + """ + _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) + seq_length_old = posemb.shape[2] + num_blocks_new, seq_length_new = posemb_new.shape[1:3] + size_new = int(math.sqrt(num_blocks_new*seq_length_new)) + # First change to (1, C, H, W) + posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) + posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) + # Now change to new (1, T, N, C) + posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """ resize positional embeddings of pretrained weights """ + pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] + for k in pos_embed_keys: + if state_dict[k].shape != getattr(model, k).shape: + state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) + return state_dict + + +def _create_nest(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + model = build_model_with_cfg( + Nest, variant, pretrained, + default_cfg=default_cfg, + feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + + return model + + +@register_model +def nest_base(pretrained=False, **kwargs): + """ Nest-B @ 224x224 + """ + model_kwargs = dict( + embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_small(pretrained=False, **kwargs): + """ Nest-S @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_tiny(pretrained=False, **kwargs): + """ Nest-T @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_base(pretrained=False, **kwargs): + """ Nest-B @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('jx_nest_base', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_small(pretrained=False, **kwargs): + """ Nest-S @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('jx_nest_small', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_tiny(pretrained=False, **kwargs): + """ Nest-T @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('jx_nest_tiny', pretrained=pretrained, **model_kwargs) + return model diff --git a/data_processing/MANIQA/timm/models/nfnet.py b/data_processing/MANIQA/timm/models/nfnet.py new file mode 100644 index 0000000..973cbd6 --- /dev/null +++ b/data_processing/MANIQA/timm/models/nfnet.py @@ -0,0 +1,968 @@ +""" Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models + +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + +Paper: `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Status: +* These models are a work in progress, experiments ongoing. +* Pretrained weights for two models so far, more to come. +* Model details updated to closer match official JAX code now that it's released +* NF-ResNet, NF-RegNet-B, and NFNet-F models supported + +Hacked together by / copyright Ross Wightman, 2021. +""" +import math +from dataclasses import dataclass, field +from collections import OrderedDict +from typing import Tuple, Optional +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_module +from .helpers import build_model_with_cfg +from .registry import register_model +from .layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame,\ + get_act_layer, get_act_fn, get_attn, make_divisible + + +def _dcfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + dm_nfnet_f0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth', + pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9), + dm_nfnet_f1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91), + dm_nfnet_f2=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92), + dm_nfnet_f3=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94), + dm_nfnet_f4=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth', + pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951), + dm_nfnet_f5=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth', + pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954), + dm_nfnet_f6=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth', + pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956), + + nfnet_f0=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), + nfnet_f1=_dcfg( + url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), + nfnet_f2=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), + nfnet_f3=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), + nfnet_f4=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), + nfnet_f5=_dcfg( + url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), + nfnet_f6=_dcfg( + url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), + nfnet_f7=_dcfg( + url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), + + nfnet_f0s=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), + nfnet_f1s=_dcfg( + url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), + nfnet_f2s=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), + nfnet_f3s=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), + nfnet_f4s=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), + nfnet_f5s=_dcfg( + url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), + nfnet_f6s=_dcfg( + url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), + nfnet_f7s=_dcfg( + url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), + + nfnet_l0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0), + eca_nfnet_l0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth', + hf_hub='timm/eca_nfnet_l0', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0), + eca_nfnet_l1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), crop_pct=1.0), + eca_nfnet_l2=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), crop_pct=1.0), + eca_nfnet_l3=_dcfg( + url='', + pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), crop_pct=1.0), + + nf_regnet_b0=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'), + nf_regnet_b1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec + nf_regnet_b2=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'), + nf_regnet_b3=_dcfg( + url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'), + nf_regnet_b4=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'), + nf_regnet_b5=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'), + + nf_resnet26=_dcfg(url='', first_conv='stem.conv'), + nf_resnet50=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'), + nf_resnet101=_dcfg(url='', first_conv='stem.conv'), + + nf_seresnet26=_dcfg(url='', first_conv='stem.conv'), + nf_seresnet50=_dcfg(url='', first_conv='stem.conv'), + nf_seresnet101=_dcfg(url='', first_conv='stem.conv'), + + nf_ecaresnet26=_dcfg(url='', first_conv='stem.conv'), + nf_ecaresnet50=_dcfg(url='', first_conv='stem.conv'), + nf_ecaresnet101=_dcfg(url='', first_conv='stem.conv'), +) + + +@dataclass +class NfCfg: + depths: Tuple[int, int, int, int] + channels: Tuple[int, int, int, int] + alpha: float = 0.2 + stem_type: str = '3x3' + stem_chs: Optional[int] = None + group_size: Optional[int] = None + attn_layer: Optional[str] = None + attn_kwargs: dict = None + attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used + width_factor: float = 1.0 + bottle_ratio: float = 0.5 + num_features: int = 0 # num out_channels for final conv, no final_conv if 0 + ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal + reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle + extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models + gamma_in_act: bool = False + same_padding: bool = False + std_conv_eps: float = 1e-5 + skipinit: bool = False # disabled by default, non-trivial performance impact + zero_init_fc: bool = False + act_layer: str = 'silu' + + +def _nfres_cfg( + depths, channels=(256, 512, 1024, 2048), group_size=None, act_layer='relu', attn_layer=None, attn_kwargs=None): + attn_kwargs = attn_kwargs or {} + cfg = NfCfg( + depths=depths, channels=channels, stem_type='7x7_pool', stem_chs=64, bottle_ratio=0.25, + group_size=group_size, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs) + return cfg + + +def _nfreg_cfg(depths, channels=(48, 104, 208, 440)): + num_features = 1280 * channels[-1] // 440 + attn_kwargs = dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, channels=channels, stem_type='3x3', group_size=8, width_factor=0.75, bottle_ratio=2.25, + num_features=num_features, reg=True, attn_layer='se', attn_kwargs=attn_kwargs) + return cfg + + +def _nfnet_cfg( + depths, channels=(256, 512, 1536, 1536), group_size=128, bottle_ratio=0.5, feat_mult=2., + act_layer='gelu', attn_layer='se', attn_kwargs=None): + num_features = int(channels[-1] * feat_mult) + attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=group_size, + bottle_ratio=bottle_ratio, extra_conv=True, num_features=num_features, act_layer=act_layer, + attn_layer=attn_layer, attn_kwargs=attn_kwargs) + return cfg + + +def _dm_nfnet_cfg(depths, channels=(256, 512, 1536, 1536), act_layer='gelu', skipinit=True): + cfg = NfCfg( + depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=128, + bottle_ratio=0.5, extra_conv=True, gamma_in_act=True, same_padding=True, skipinit=skipinit, + num_features=int(channels[-1] * 2.0), act_layer=act_layer, attn_layer='se', attn_kwargs=dict(rd_ratio=0.5)) + return cfg + + + +model_cfgs = dict( + # NFNet-F models w/ GELU compatible with DeepMind weights + dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)), + dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)), + dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)), + dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)), + dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)), + dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)), + dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)), + + # NFNet-F models w/ GELU (I will likely deprecate/remove these models and just keep dm_ ver for GELU) + nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)), + nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)), + nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)), + nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)), + nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)), + nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)), + nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)), + nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)), + + # NFNet-F models w/ SiLU (much faster in PyTorch) + nfnet_f0s=_nfnet_cfg(depths=(1, 2, 6, 3), act_layer='silu'), + nfnet_f1s=_nfnet_cfg(depths=(2, 4, 12, 6), act_layer='silu'), + nfnet_f2s=_nfnet_cfg(depths=(3, 6, 18, 9), act_layer='silu'), + nfnet_f3s=_nfnet_cfg(depths=(4, 8, 24, 12), act_layer='silu'), + nfnet_f4s=_nfnet_cfg(depths=(5, 10, 30, 15), act_layer='silu'), + nfnet_f5s=_nfnet_cfg(depths=(6, 12, 36, 18), act_layer='silu'), + nfnet_f6s=_nfnet_cfg(depths=(7, 14, 42, 21), act_layer='silu'), + nfnet_f7s=_nfnet_cfg(depths=(8, 16, 48, 24), act_layer='silu'), + + # Experimental 'light' versions of NFNet-F that are little leaner + nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), + eca_nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l1=_nfnet_cfg( + depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l2=_nfnet_cfg( + depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l3=_nfnet_cfg( + depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + + # EffNet influenced RegNet defs. + # NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8. + nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)), + nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)), + nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)), + nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)), + nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)), + nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)), + # FIXME add B6-B8 + + # ResNet (preact, D style deep stem/avg down) defs + nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)), + nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)), + nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)), + + nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + + nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()), + +) + + +class GammaAct(nn.Module): + def __init__(self, act_type='relu', gamma: float = 1.0, inplace=False): + super().__init__() + self.act_fn = get_act_fn(act_type) + self.gamma = gamma + self.inplace = inplace + + def forward(self, x): + return self.act_fn(x, inplace=self.inplace).mul_(self.gamma) + + +def act_with_gamma(act_type, gamma: float = 1.): + def _create(inplace=False): + return GammaAct(act_type, gamma=gamma, inplace=inplace) + return _create + + +class DownsampleAvg(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, conv_layer=ScaledStdConv2d): + """ AvgPool Downsampling as in 'D' ResNet variants. Support for dilation.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + + def forward(self, x): + return self.conv(self.pool(x)) + + +@register_notrace_module # reason: mul_ causes FX to drop a relevant node. https://github.com/pytorch/pytorch/issues/68301 +class NormFreeBlock(nn.Module): + """Normalization-Free pre-activation block. + """ + + def __init__( + self, in_chs, out_chs=None, stride=1, dilation=1, first_dilation=None, + alpha=1.0, beta=1.0, bottle_ratio=0.25, group_size=None, ch_div=1, reg=True, extra_conv=False, + skipinit=False, attn_layer=None, attn_gain=2.0, act_layer=None, conv_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + out_chs = out_chs or in_chs + # RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet + mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div) + groups = 1 if not group_size else mid_chs // group_size + if group_size and group_size % ch_div == 0: + mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error + self.alpha = alpha + self.beta = beta + self.attn_gain = attn_gain + + if in_chs != out_chs or stride != 1 or dilation != first_dilation: + self.downsample = DownsampleAvg( + in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, conv_layer=conv_layer) + else: + self.downsample = None + + self.act1 = act_layer() + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.act2 = act_layer(inplace=True) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + if extra_conv: + self.act2b = act_layer(inplace=True) + self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups) + else: + self.act2b = None + self.conv2b = None + if reg and attn_layer is not None: + self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3 + else: + self.attn = None + self.act3 = act_layer() + self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.) + if not reg and attn_layer is not None: + self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3 + else: + self.attn_last = None + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None + + def forward(self, x): + out = self.act1(x) * self.beta + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(out) + + # residual branch + out = self.conv1(out) + out = self.conv2(self.act2(out)) + if self.conv2b is not None: + out = self.conv2b(self.act2b(out)) + if self.attn is not None: + out = self.attn_gain * self.attn(out) + out = self.conv3(self.act3(out)) + if self.attn_last is not None: + out = self.attn_gain * self.attn_last(out) + out = self.drop_path(out) + + if self.skipinit_gain is not None: + out.mul_(self.skipinit_gain) # this slows things down more than expected, TBD + out = out * self.alpha + shortcut + return out + + +def create_stem(in_chs, out_chs, stem_type='', conv_layer=None, act_layer=None, preact_feature=True): + stem_stride = 2 + stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv') + stem = OrderedDict() + assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool') + if 'deep' in stem_type: + if 'quad' in stem_type: + # 4 deep conv stack as in NFNet-F models + assert not 'pool' in stem_type + stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs) + strides = (2, 1, 1, 2) + stem_stride = 4 + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3') + else: + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets + strides = (2, 1, 1) + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2') + last_idx = len(stem_chs) - 1 + for i, (c, s) in enumerate(zip(stem_chs, strides)): + stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s) + if i != last_idx: + stem[f'act{i + 2}'] = act_layer(inplace=True) + in_chs = c + elif '3x3' in stem_type: + # 3x3 stem conv as in RegNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2) + else: + # 7x7 stem conv as in ResNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + + if 'pool' in stem_type: + stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1) + stem_stride = 4 + + return nn.Sequential(stem), stem_stride, stem_feature + + +# from https://github.com/deepmind/deepmind-research/tree/master/nfnets +_nonlin_gamma = dict( + identity=1.0, + celu=1.270926833152771, + elu=1.2716004848480225, + gelu=1.7015043497085571, + leaky_relu=1.70590341091156, + log_sigmoid=1.9193484783172607, + log_softmax=1.0002083778381348, + relu=1.7139588594436646, + relu6=1.7131484746932983, + selu=1.0008515119552612, + sigmoid=4.803835391998291, + silu=1.7881293296813965, + softsign=2.338853120803833, + softplus=1.9203323125839233, + tanh=1.5939117670059204, +) + + +class NormFreeNet(nn.Module): + """ Normalization-Free Network + + As described in : + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + and + `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 + + This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and + the (preact) ResNet models described earlier in the paper. + + There are a few differences: + * channels are rounded to be divisible by 8 by default (keep tensor core kernels happy), + this changes channel dim and param counts slightly from the paper models + * activation correcting gamma constants are moved into the ScaledStdConv as it has less performance + impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl. + * a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but + apply it in each activation. This is slightly slower, numerically different, but matches official impl. + * skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput + for what it is/does. Approx 8-10% throughput loss. + """ + def __init__(self, cfg: NfCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + drop_rate=0., drop_path_rate=0.): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert cfg.act_layer in _nonlin_gamma, f"Please add non-linearity constants for activation ({cfg.act_layer})." + conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d + if cfg.gamma_in_act: + act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer]) + conv_layer = partial(conv_layer, eps=cfg.std_conv_eps) + else: + act_layer = get_act_layer(cfg.act_layer) + conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps) + attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None + + stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div) + self.stem, stem_stride, stem_feat = create_stem( + in_chans, stem_chs, cfg.stem_type, conv_layer=conv_layer, act_layer=act_layer) + + self.feature_info = [stem_feat] + drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] + prev_chs = stem_chs + net_stride = stem_stride + dilation = 1 + expected_var = 1.0 + stages = [] + for stage_idx, stage_depth in enumerate(cfg.depths): + stride = 1 if stage_idx == 0 and stem_stride > 2 else 2 + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + blocks = [] + for block_idx in range(cfg.depths[stage_idx]): + first_block = block_idx == 0 and stage_idx == 0 + out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div) + blocks += [NormFreeBlock( + in_chs=prev_chs, out_chs=out_chs, + alpha=cfg.alpha, + beta=1. / expected_var ** 0.5, + stride=stride if block_idx == 0 else 1, + dilation=dilation, + first_dilation=first_dilation, + group_size=cfg.group_size, + bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio, + ch_div=cfg.ch_div, + reg=cfg.reg, + extra_conv=cfg.extra_conv, + skipinit=cfg.skipinit, + attn_layer=attn_layer, + attn_gain=cfg.attn_gain, + act_layer=act_layer, + conv_layer=conv_layer, + drop_path_rate=drop_path_rates[stage_idx][block_idx], + )] + if block_idx == 0: + expected_var = 1. # expected var is reset after first block of each stage + expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance + first_dilation = dilation + prev_chs = out_chs + self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')] + stages += [nn.Sequential(*blocks)] + self.stages = nn.Sequential(*stages) + + if cfg.num_features: + # The paper NFRegNet models have an EfficientNet-like final head convolution. + self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div) + self.final_conv = conv_layer(prev_chs, self.num_features, 1) + self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv') + else: + self.num_features = prev_chs + self.final_conv = nn.Identity() + self.final_act = act_layer(inplace=cfg.num_features > 0) + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + for n, m in self.named_modules(): + if 'fc' in n and isinstance(m, nn.Linear): + if cfg.zero_init_fc: + nn.init.zeros_(m.weight) + else: + nn.init.normal_(m.weight, 0., .01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear') + if m.bias is not None: + nn.init.zeros_(m.bias) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.final_conv(x) + x = self.final_act(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_normfreenet(variant, pretrained=False, **kwargs): + model_cfg = model_cfgs[variant] + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + NormFreeNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfg, + feature_cfg=feature_cfg, + **kwargs) + + +@register_model +def dm_nfnet_f0(pretrained=False, **kwargs): + """ NFNet-F0 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f1(pretrained=False, **kwargs): + """ NFNet-F1 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f2(pretrained=False, **kwargs): + """ NFNet-F2 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f3(pretrained=False, **kwargs): + """ NFNet-F3 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f4(pretrained=False, **kwargs): + """ NFNet-F4 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f5(pretrained=False, **kwargs): + """ NFNet-F5 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f6(pretrained=False, **kwargs): + """ NFNet-F6 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f0(pretrained=False, **kwargs): + """ NFNet-F0 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f1(pretrained=False, **kwargs): + """ NFNet-F1 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f2(pretrained=False, **kwargs): + """ NFNet-F2 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f3(pretrained=False, **kwargs): + """ NFNet-F3 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f4(pretrained=False, **kwargs): + """ NFNet-F4 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f5(pretrained=False, **kwargs): + """ NFNet-F5 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f6(pretrained=False, **kwargs): + """ NFNet-F6 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f7(pretrained=False, **kwargs): + """ NFNet-F7 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f0s(pretrained=False, **kwargs): + """ NFNet-F0 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f0s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f1s(pretrained=False, **kwargs): + """ NFNet-F1 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f1s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f2s(pretrained=False, **kwargs): + """ NFNet-F2 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f2s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f3s(pretrained=False, **kwargs): + """ NFNet-F3 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f3s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f4s(pretrained=False, **kwargs): + """ NFNet-F4 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f4s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f5s(pretrained=False, **kwargs): + """ NFNet-F5 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f5s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f6s(pretrained=False, **kwargs): + """ NFNet-F6 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f6s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f7s(pretrained=False, **kwargs): + """ NFNet-F7 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f7s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_l0(pretrained=False, **kwargs): + """ NFNet-L0b w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio + """ + return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l0(pretrained=False, **kwargs): + """ ECA-NFNet-L0 w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l1(pretrained=False, **kwargs): + """ ECA-NFNet-L1 w/ SiLU + My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l2(pretrained=False, **kwargs): + """ ECA-NFNet-L2 w/ SiLU + My experimental 'light' model w/ F2 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l3(pretrained=False, **kwargs): + """ ECA-NFNet-L3 w/ SiLU + My experimental 'light' model w/ F3 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b0(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B0 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b1(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B1 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b2(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B2 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b3(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B3 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b4(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B4 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b5(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B5 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet26(pretrained=False, **kwargs): + """ Normalization-Free ResNet-26 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet50(pretrained=False, **kwargs): + """ Normalization-Free ResNet-50 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet101(pretrained=False, **kwargs): + """ Normalization-Free ResNet-101 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet26(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet26 + """ + return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet50(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet50 + """ + return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet101(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet101 + """ + return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet26(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet26 + """ + return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet50(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet50 + """ + return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet101(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet101 + """ + return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs) diff --git a/data_processing/MANIQA/timm/models/pit.py b/data_processing/MANIQA/timm/models/pit.py new file mode 100644 index 0000000..460824e --- /dev/null +++ b/data_processing/MANIQA/timm/models/pit.py @@ -0,0 +1,384 @@ +""" Pooling-based Vision Transformer (PiT) in PyTorch + +A PyTorch implement of Pooling-based Vision Transformers as described in +'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302 + +This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below. + +Modifications for timm by / Copyright 2020 Ross Wightman +""" +# PiT +# Copyright 2021-present NAVER Corp. +# Apache License v2.0 + +import math +import re +from copy import deepcopy +from functools import partial +from typing import Tuple + +import torch +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import trunc_normal_, to_2tuple +from .registry import register_model +from .vision_transformer import Block + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # deit models (FB weights) + 'pit_ti_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_730.pth'), + 'pit_xs_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_781.pth'), + 'pit_s_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_809.pth'), + 'pit_b_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_820.pth'), + 'pit_ti_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_distill_746.pth', + classifier=('head', 'head_dist')), + 'pit_xs_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_distill_791.pth', + classifier=('head', 'head_dist')), + 'pit_s_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_distill_819.pth', + classifier=('head', 'head_dist')), + 'pit_b_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_distill_840.pth', + classifier=('head', 'head_dist')), +} + + +class SequentialTuple(nn.Sequential): + """ This module exists to work around torchscript typing issues list -> list""" + def __init__(self, *args): + super(SequentialTuple, self).__init__(*args) + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + for module in self: + x = module(x) + return x + + +class Transformer(nn.Module): + def __init__( + self, base_dim, depth, heads, mlp_ratio, pool=None, drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None): + super(Transformer, self).__init__() + self.layers = nn.ModuleList([]) + embed_dim = base_dim * heads + + self.blocks = nn.Sequential(*[ + Block( + dim=embed_dim, + num_heads=heads, + mlp_ratio=mlp_ratio, + qkv_bias=True, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=drop_path_prob[i], + norm_layer=partial(nn.LayerNorm, eps=1e-6) + ) + for i in range(depth)]) + + self.pool = pool + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + x, cls_tokens = x + B, C, H, W = x.shape + token_length = cls_tokens.shape[1] + + x = x.flatten(2).transpose(1, 2) + x = torch.cat((cls_tokens, x), dim=1) + + x = self.blocks(x) + + cls_tokens = x[:, :token_length] + x = x[:, token_length:] + x = x.transpose(1, 2).reshape(B, C, H, W) + + if self.pool is not None: + x, cls_tokens = self.pool(x, cls_tokens) + return x, cls_tokens + + +class ConvHeadPooling(nn.Module): + def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'): + super(ConvHeadPooling, self).__init__() + + self.conv = nn.Conv2d( + in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, + padding_mode=padding_mode, groups=in_feature) + self.fc = nn.Linear(in_feature, out_feature) + + def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]: + + x = self.conv(x) + cls_token = self.fc(cls_token) + + return x, cls_token + + +class ConvEmbedding(nn.Module): + def __init__(self, in_channels, out_channels, patch_size, stride, padding): + super(ConvEmbedding, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True) + + def forward(self, x): + x = self.conv(x) + return x + + +class PoolingVisionTransformer(nn.Module): + """ Pooling-based Vision Transformer + + A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers' + - https://arxiv.org/abs/2103.16302 + """ + def __init__(self, img_size, patch_size, stride, base_dims, depth, heads, + mlp_ratio, num_classes=1000, in_chans=3, distilled=False, + attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0): + super(PoolingVisionTransformer, self).__init__() + + padding = 0 + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + height = math.floor((img_size[0] + 2 * padding - patch_size[0]) / stride + 1) + width = math.floor((img_size[1] + 2 * padding - patch_size[1]) / stride + 1) + + self.base_dims = base_dims + self.heads = heads + self.num_classes = num_classes + self.num_tokens = 2 if distilled else 1 + + self.patch_size = patch_size + self.pos_embed = nn.Parameter(torch.randn(1, base_dims[0] * heads[0], height, width)) + self.patch_embed = ConvEmbedding(in_chans, base_dims[0] * heads[0], patch_size, stride, padding) + + self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, base_dims[0] * heads[0])) + self.pos_drop = nn.Dropout(p=drop_rate) + + transformers = [] + # stochastic depth decay rule + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)] + for stage in range(len(depth)): + pool = None + if stage < len(heads) - 1: + pool = ConvHeadPooling( + base_dims[stage] * heads[stage], base_dims[stage + 1] * heads[stage + 1], stride=2) + transformers += [Transformer( + base_dims[stage], depth[stage], heads[stage], mlp_ratio, pool=pool, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_prob=dpr[stage]) + ] + self.transformers = SequentialTuple(*transformers) + self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6) + self.num_features = self.embed_dim = base_dims[-1] * heads[-1] + + # Classifier head + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distilled: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + if self.head_dist is not None: + return self.head, self.head_dist + else: + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + if self.head_dist is not None: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.pos_drop(x + self.pos_embed) + cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) + x, cls_tokens = self.transformers((x, cls_tokens)) + cls_tokens = self.norm(cls_tokens) + if self.head_dist is not None: + return cls_tokens[:, 0], cls_tokens[:, 1] + else: + return cls_tokens[:, 0] + + def forward(self, x): + x = self.forward_features(x) + if self.head_dist is not None: + x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple + if self.training and not torch.jit.is_scripting(): + return x, x_dist + else: + return (x + x_dist) / 2 + else: + return self.head(x) + + +def checkpoint_filter_fn(state_dict, model): + """ preprocess checkpoints """ + out_dict = {} + p_blocks = re.compile(r'pools\.(\d)\.') + for k, v in state_dict.items(): + # FIXME need to update resize for PiT impl + # if k == 'pos_embed' and v.shape != model.pos_embed.shape: + # # To resize pos embedding when using model at different size from pretrained weights + # v = resize_pos_embed(v, model.pos_embed) + k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1))}.pool.', k) + out_dict[k] = v + return out_dict + + +def _create_pit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + PoolingVisionTransformer, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def pit_b_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_b_224', pretrained, **model_kwargs) + + +@register_model +def pit_s_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_s_224', pretrained, **model_kwargs) + + +@register_model +def pit_xs_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_xs_224', pretrained, **model_kwargs) + + +@register_model +def pit_ti_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_ti_224', pretrained, **model_kwargs) + + +@register_model +def pit_b_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_b_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_s_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_s_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_xs_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_xs_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_ti_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_ti_distilled_224', pretrained, **model_kwargs) \ No newline at end of file diff --git a/data_processing/MANIQA/timm/models/pnasnet.py b/data_processing/MANIQA/timm/models/pnasnet.py new file mode 100644 index 0000000..9991815 --- /dev/null +++ b/data_processing/MANIQA/timm/models/pnasnet.py @@ -0,0 +1,350 @@ +""" + pnasnet5large implementation grabbed from Cadene's pretrained models + Additional credit to https://github.com/creafz + + https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/pnasnet.py + +""" +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, create_conv2d, create_pool2d, create_classifier +from .registry import register_model + +__all__ = ['PNASNet5Large'] + +default_cfgs = { + 'pnasnet5large': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv_0.conv', + 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + }, +} + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=padding) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=padding) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001) + self.act_2 = nn.ReLU() + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=padding) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class FactorizedReduction(nn.Module): + + def __init__(self, in_channels, out_channels, padding=''): + super(FactorizedReduction, self).__init__() + self.act = nn.ReLU() + self.path_1 = nn.Sequential(OrderedDict([ + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.path_2 = nn.Sequential(OrderedDict([ + ('pad', nn.ZeroPad2d((-1, 1, -1, 1))), # shift + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x_path1 = self.path_1(x) + x_path2 = self.path_2(x) + out = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + return out + + +class CellBase(nn.Module): + + def cell_forward(self, x_left, x_right): + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2) + x_comb_iter_3_right = self.comb_iter_3_right(x_right) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_left) + if self.comb_iter_4_right is not None: + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + else: + x_comb_iter_4_right = x_right + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem0(CellBase): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(CellStem0, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_0_right = nn.Sequential(OrderedDict([ + ('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)), + ('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)), + ('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)), + ])) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, padding=pad_type) + self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type) + + def forward(self, x_left): + x_right = self.conv_1x1(x_left) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class Cell(CellBase): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type='', + is_reduction=False, match_prev_layer_dims=False): + super(Cell, self).__init__() + + # If `is_reduction` is set to `True` stride 2 is used for + # convolution and pooling layers to reduce the spatial size of + # the output of a cell approximately by a factor of 2. + stride = 2 if is_reduction else 1 + + # If `match_prev_layer_dimensions` is set to `True` + # `FactorizedReduction` is used to reduce the spatial size + # of the left input of a cell approximately by a factor of 2. + self.match_prev_layer_dimensions = match_prev_layer_dims + if match_prev_layer_dims: + self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type) + else: + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3) + self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type) + if is_reduction: + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type) + else: + self.comb_iter_4_right = None + + def forward(self, x_left, x_right): + x_left = self.conv_prev_1x1(x_left) + x_right = self.conv_1x1(x_right) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class PNASNet5Large(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg', pad_type=''): + super(PNASNet5Large, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.num_features = 4320 + assert output_stride == 32 + + self.conv_0 = ConvBnAct( + in_chans, 96, kernel_size=3, stride=2, padding=0, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type) + + self.cell_stem_1 = Cell( + in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type, + match_prev_layer_dims=True, is_reduction=True) + self.cell_0 = Cell( + in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_1 = Cell( + in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_2 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_3 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + + self.cell_4 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type, + is_reduction=True) + self.cell_5 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_6 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + self.cell_7 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + + self.cell_8 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type, + is_reduction=True) + self.cell_9 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_10 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.cell_11 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.act = nn.ReLU() + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv_0'), + dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'), + dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'), + dict(num_chs=4320, reduction=32, module='act'), + ] + + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv_0 = self.conv_0(x) + x_stem_0 = self.cell_stem_0(x_conv_0) + x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0) + x_cell_0 = self.cell_0(x_stem_0, x_stem_1) + x_cell_1 = self.cell_1(x_stem_1, x_cell_0) + x_cell_2 = self.cell_2(x_cell_0, x_cell_1) + x_cell_3 = self.cell_3(x_cell_1, x_cell_2) + x_cell_4 = self.cell_4(x_cell_2, x_cell_3) + x_cell_5 = self.cell_5(x_cell_3, x_cell_4) + x_cell_6 = self.cell_6(x_cell_4, x_cell_5) + x_cell_7 = self.cell_7(x_cell_5, x_cell_6) + x_cell_8 = self.cell_8(x_cell_6, x_cell_7) + x_cell_9 = self.cell_9(x_cell_7, x_cell_8) + x_cell_10 = self.cell_10(x_cell_8, x_cell_9) + x_cell_11 = self.cell_11(x_cell_9, x_cell_10) + x = self.act(x_cell_11) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_pnasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + PNASNet5Large, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs) + + +@register_model +def pnasnet5large(pretrained=False, **kwargs): + r"""PNASNet-5 model architecture from the + `"Progressive Neural Architecture Search" + `_ paper. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs) diff --git a/data_processing/MANIQA/timm/models/pruned/ecaresnet101d_pruned.txt b/data_processing/MANIQA/timm/models/pruned/ecaresnet101d_pruned.txt new file mode 100644 index 0000000..2589b2f --- /dev/null +++ b/data_processing/MANIQA/timm/models/pruned/ecaresnet101d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[45, 64, 1, 1]***layer1.0.bn1.weight:[45]***layer1.0.conv2.weight:[25, 45, 3, 3]***layer1.0.bn2.weight:[25]***layer1.0.conv3.weight:[26, 25, 1, 1]***layer1.0.bn3.weight:[26]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[26, 64, 1, 1]***layer1.0.downsample.2.weight:[26]***layer1.1.conv1.weight:[53, 26, 1, 1]***layer1.1.bn1.weight:[53]***layer1.1.conv2.weight:[20, 53, 3, 3]***layer1.1.bn2.weight:[20]***layer1.1.conv3.weight:[26, 20, 1, 1]***layer1.1.bn3.weight:[26]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[60, 26, 1, 1]***layer1.2.bn1.weight:[60]***layer1.2.conv2.weight:[27, 60, 3, 3]***layer1.2.bn2.weight:[27]***layer1.2.conv3.weight:[26, 27, 1, 1]***layer1.2.bn3.weight:[26]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[81, 26, 1, 1]***layer2.0.bn1.weight:[81]***layer2.0.conv2.weight:[24, 81, 3, 3]***layer2.0.bn2.weight:[24]***layer2.0.conv3.weight:[142, 24, 1, 1]***layer2.0.bn3.weight:[142]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[142, 26, 1, 1]***layer2.0.downsample.2.weight:[142]***layer2.1.conv1.weight:[93, 142, 1, 1]***layer2.1.bn1.weight:[93]***layer2.1.conv2.weight:[49, 93, 3, 3]***layer2.1.bn2.weight:[49]***layer2.1.conv3.weight:[142, 49, 1, 1]***layer2.1.bn3.weight:[142]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[102, 142, 1, 1]***layer2.2.bn1.weight:[102]***layer2.2.conv2.weight:[54, 102, 3, 3]***layer2.2.bn2.weight:[54]***layer2.2.conv3.weight:[142, 54, 1, 1]***layer2.2.bn3.weight:[142]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[122, 142, 1, 1]***layer2.3.bn1.weight:[122]***layer2.3.conv2.weight:[78, 122, 3, 3]***layer2.3.bn2.weight:[78]***layer2.3.conv3.weight:[142, 78, 1, 1]***layer2.3.bn3.weight:[142]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[101, 142, 1, 1]***layer3.0.bn1.weight:[101]***layer3.0.conv2.weight:[25, 101, 3, 3]***layer3.0.bn2.weight:[25]***layer3.0.conv3.weight:[278, 25, 1, 1]***layer3.0.bn3.weight:[278]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[278, 142, 1, 1]***layer3.0.downsample.2.weight:[278]***layer3.1.conv1.weight:[239, 278, 1, 1]***layer3.1.bn1.weight:[239]***layer3.1.conv2.weight:[160, 239, 3, 3]***layer3.1.bn2.weight:[160]***layer3.1.conv3.weight:[278, 160, 1, 1]***layer3.1.bn3.weight:[278]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[234, 278, 1, 1]***layer3.2.bn1.weight:[234]***layer3.2.conv2.weight:[156, 234, 3, 3]***layer3.2.bn2.weight:[156]***layer3.2.conv3.weight:[278, 156, 1, 1]***layer3.2.bn3.weight:[278]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[250, 278, 1, 1]***layer3.3.bn1.weight:[250]***layer3.3.conv2.weight:[176, 250, 3, 3]***layer3.3.bn2.weight:[176]***layer3.3.conv3.weight:[278, 176, 1, 1]***layer3.3.bn3.weight:[278]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[253, 278, 1, 1]***layer3.4.bn1.weight:[253]***layer3.4.conv2.weight:[191, 253, 3, 3]***layer3.4.bn2.weight:[191]***layer3.4.conv3.weight:[278, 191, 1, 1]***layer3.4.bn3.weight:[278]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[251, 278, 1, 1]***layer3.5.bn1.weight:[251]***layer3.5.conv2.weight:[175, 251, 3, 3]***layer3.5.bn2.weight:[175]***layer3.5.conv3.weight:[278, 175, 1, 1]***layer3.5.bn3.weight:[278]***layer3.5.se.conv.weight:[1, 1, 5]***layer3.6.conv1.weight:[230, 278, 1, 1]***layer3.6.bn1.weight:[230]***layer3.6.conv2.weight:[128, 230, 3, 3]***layer3.6.bn2.weight:[128]***layer3.6.conv3.weight:[278, 128, 1, 1]***layer3.6.bn3.weight:[278]***layer3.6.se.conv.weight:[1, 1, 5]***layer3.7.conv1.weight:[244, 278, 1, 1]***layer3.7.bn1.weight:[244]***layer3.7.conv2.weight:[154, 244, 3, 3]***layer3.7.bn2.weight:[154]***layer3.7.conv3.weight:[278, 154, 1, 1]***layer3.7.bn3.weight:[278]***layer3.7.se.conv.weight:[1, 1, 5]***layer3.8.conv1.weight:[244, 278, 1, 1]***layer3.8.bn1.weight:[244]***layer3.8.conv2.weight:[159, 244, 3, 3]***layer3.8.bn2.weight:[159]***layer3.8.conv3.weight:[278, 159, 1, 1]***layer3.8.bn3.weight:[278]***layer3.8.se.conv.weight:[1, 1, 5]***layer3.9.conv1.weight:[238, 278, 1, 1]***layer3.9.bn1.weight:[238]***layer3.9.conv2.weight:[97, 238, 3, 3]***layer3.9.bn2.weight:[97]***layer3.9.conv3.weight:[278, 97, 1, 1]***layer3.9.bn3.weight:[278]***layer3.9.se.conv.weight:[1, 1, 5]***layer3.10.conv1.weight:[244, 278, 1, 1]***layer3.10.bn1.weight:[244]***layer3.10.conv2.weight:[149, 244, 3, 3]***layer3.10.bn2.weight:[149]***layer3.10.conv3.weight:[278, 149, 1, 1]***layer3.10.bn3.weight:[278]***layer3.10.se.conv.weight:[1, 1, 5]***layer3.11.conv1.weight:[253, 278, 1, 1]***layer3.11.bn1.weight:[253]***layer3.11.conv2.weight:[181, 253, 3, 3]***layer3.11.bn2.weight:[181]***layer3.11.conv3.weight:[278, 181, 1, 1]***layer3.11.bn3.weight:[278]***layer3.11.se.conv.weight:[1, 1, 5]***layer3.12.conv1.weight:[245, 278, 1, 1]***layer3.12.bn1.weight:[245]***layer3.12.conv2.weight:[119, 245, 3, 3]***layer3.12.bn2.weight:[119]***layer3.12.conv3.weight:[278, 119, 1, 1]***layer3.12.bn3.weight:[278]***layer3.12.se.conv.weight:[1, 1, 5]***layer3.13.conv1.weight:[255, 278, 1, 1]***layer3.13.bn1.weight:[255]***layer3.13.conv2.weight:[216, 255, 3, 3]***layer3.13.bn2.weight:[216]***layer3.13.conv3.weight:[278, 216, 1, 1]***layer3.13.bn3.weight:[278]***layer3.13.se.conv.weight:[1, 1, 5]***layer3.14.conv1.weight:[256, 278, 1, 1]***layer3.14.bn1.weight:[256]***layer3.14.conv2.weight:[201, 256, 3, 3]***layer3.14.bn2.weight:[201]***layer3.14.conv3.weight:[278, 201, 1, 1]***layer3.14.bn3.weight:[278]***layer3.14.se.conv.weight:[1, 1, 5]***layer3.15.conv1.weight:[253, 278, 1, 1]***layer3.15.bn1.weight:[253]***layer3.15.conv2.weight:[149, 253, 3, 3]***layer3.15.bn2.weight:[149]***layer3.15.conv3.weight:[278, 149, 1, 1]***layer3.15.bn3.weight:[278]***layer3.15.se.conv.weight:[1, 1, 5]***layer3.16.conv1.weight:[254, 278, 1, 1]***layer3.16.bn1.weight:[254]***layer3.16.conv2.weight:[141, 254, 3, 3]***layer3.16.bn2.weight:[141]***layer3.16.conv3.weight:[278, 141, 1, 1]***layer3.16.bn3.weight:[278]***layer3.16.se.conv.weight:[1, 1, 5]***layer3.17.conv1.weight:[256, 278, 1, 1]***layer3.17.bn1.weight:[256]***layer3.17.conv2.weight:[190, 256, 3, 3]***layer3.17.bn2.weight:[190]***layer3.17.conv3.weight:[278, 190, 1, 1]***layer3.17.bn3.weight:[278]***layer3.17.se.conv.weight:[1, 1, 5]***layer3.18.conv1.weight:[256, 278, 1, 1]***layer3.18.bn1.weight:[256]***layer3.18.conv2.weight:[217, 256, 3, 3]***layer3.18.bn2.weight:[217]***layer3.18.conv3.weight:[278, 217, 1, 1]***layer3.18.bn3.weight:[278]***layer3.18.se.conv.weight:[1, 1, 5]***layer3.19.conv1.weight:[255, 278, 1, 1]***layer3.19.bn1.weight:[255]***layer3.19.conv2.weight:[156, 255, 3, 3]***layer3.19.bn2.weight:[156]***layer3.19.conv3.weight:[278, 156, 1, 1]***layer3.19.bn3.weight:[278]***layer3.19.se.conv.weight:[1, 1, 5]***layer3.20.conv1.weight:[256, 278, 1, 1]***layer3.20.bn1.weight:[256]***layer3.20.conv2.weight:[155, 256, 3, 3]***layer3.20.bn2.weight:[155]***layer3.20.conv3.weight:[278, 155, 1, 1]***layer3.20.bn3.weight:[278]***layer3.20.se.conv.weight:[1, 1, 5]***layer3.21.conv1.weight:[256, 278, 1, 1]***layer3.21.bn1.weight:[256]***layer3.21.conv2.weight:[232, 256, 3, 3]***layer3.21.bn2.weight:[232]***layer3.21.conv3.weight:[278, 232, 1, 1]***layer3.21.bn3.weight:[278]***layer3.21.se.conv.weight:[1, 1, 5]***layer3.22.conv1.weight:[256, 278, 1, 1]***layer3.22.bn1.weight:[256]***layer3.22.conv2.weight:[214, 256, 3, 3]***layer3.22.bn2.weight:[214]***layer3.22.conv3.weight:[278, 214, 1, 1]***layer3.22.bn3.weight:[278]***layer3.22.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[499, 278, 1, 1]***layer4.0.bn1.weight:[499]***layer4.0.conv2.weight:[289, 499, 3, 3]***layer4.0.bn2.weight:[289]***layer4.0.conv3.weight:[2042, 289, 1, 1]***layer4.0.bn3.weight:[2042]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2042, 278, 1, 1]***layer4.0.downsample.2.weight:[2042]***layer4.1.conv1.weight:[512, 2042, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[512, 512, 3, 3]***layer4.1.bn2.weight:[512]***layer4.1.conv3.weight:[2042, 512, 1, 1]***layer4.1.bn3.weight:[2042]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2042, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[502, 512, 3, 3]***layer4.2.bn2.weight:[502]***layer4.2.conv3.weight:[2042, 502, 1, 1]***layer4.2.bn3.weight:[2042]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2042]***layer1_2_conv3_M.weight:[256, 26]***layer2_3_conv3_M.weight:[512, 142]***layer3_22_conv3_M.weight:[1024, 278]***layer4_2_conv3_M.weight:[2048, 2042] \ No newline at end of file diff --git a/data_processing/MANIQA/timm/models/pruned/ecaresnet50d_pruned.txt b/data_processing/MANIQA/timm/models/pruned/ecaresnet50d_pruned.txt new file mode 100644 index 0000000..9a8b2bf --- /dev/null +++ b/data_processing/MANIQA/timm/models/pruned/ecaresnet50d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[47, 64, 1, 1]***layer1.0.bn1.weight:[47]***layer1.0.conv2.weight:[18, 47, 3, 3]***layer1.0.bn2.weight:[18]***layer1.0.conv3.weight:[19, 18, 1, 1]***layer1.0.bn3.weight:[19]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[19, 64, 1, 1]***layer1.0.downsample.2.weight:[19]***layer1.1.conv1.weight:[52, 19, 1, 1]***layer1.1.bn1.weight:[52]***layer1.1.conv2.weight:[22, 52, 3, 3]***layer1.1.bn2.weight:[22]***layer1.1.conv3.weight:[19, 22, 1, 1]***layer1.1.bn3.weight:[19]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[64, 19, 1, 1]***layer1.2.bn1.weight:[64]***layer1.2.conv2.weight:[35, 64, 3, 3]***layer1.2.bn2.weight:[35]***layer1.2.conv3.weight:[19, 35, 1, 1]***layer1.2.bn3.weight:[19]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[85, 19, 1, 1]***layer2.0.bn1.weight:[85]***layer2.0.conv2.weight:[37, 85, 3, 3]***layer2.0.bn2.weight:[37]***layer2.0.conv3.weight:[171, 37, 1, 1]***layer2.0.bn3.weight:[171]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[171, 19, 1, 1]***layer2.0.downsample.2.weight:[171]***layer2.1.conv1.weight:[107, 171, 1, 1]***layer2.1.bn1.weight:[107]***layer2.1.conv2.weight:[80, 107, 3, 3]***layer2.1.bn2.weight:[80]***layer2.1.conv3.weight:[171, 80, 1, 1]***layer2.1.bn3.weight:[171]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[120, 171, 1, 1]***layer2.2.bn1.weight:[120]***layer2.2.conv2.weight:[85, 120, 3, 3]***layer2.2.bn2.weight:[85]***layer2.2.conv3.weight:[171, 85, 1, 1]***layer2.2.bn3.weight:[171]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[125, 171, 1, 1]***layer2.3.bn1.weight:[125]***layer2.3.conv2.weight:[87, 125, 3, 3]***layer2.3.bn2.weight:[87]***layer2.3.conv3.weight:[171, 87, 1, 1]***layer2.3.bn3.weight:[171]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[198, 171, 1, 1]***layer3.0.bn1.weight:[198]***layer3.0.conv2.weight:[126, 198, 3, 3]***layer3.0.bn2.weight:[126]***layer3.0.conv3.weight:[818, 126, 1, 1]***layer3.0.bn3.weight:[818]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[818, 171, 1, 1]***layer3.0.downsample.2.weight:[818]***layer3.1.conv1.weight:[255, 818, 1, 1]***layer3.1.bn1.weight:[255]***layer3.1.conv2.weight:[232, 255, 3, 3]***layer3.1.bn2.weight:[232]***layer3.1.conv3.weight:[818, 232, 1, 1]***layer3.1.bn3.weight:[818]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[256, 818, 1, 1]***layer3.2.bn1.weight:[256]***layer3.2.conv2.weight:[233, 256, 3, 3]***layer3.2.bn2.weight:[233]***layer3.2.conv3.weight:[818, 233, 1, 1]***layer3.2.bn3.weight:[818]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[253, 818, 1, 1]***layer3.3.bn1.weight:[253]***layer3.3.conv2.weight:[235, 253, 3, 3]***layer3.3.bn2.weight:[235]***layer3.3.conv3.weight:[818, 235, 1, 1]***layer3.3.bn3.weight:[818]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[256, 818, 1, 1]***layer3.4.bn1.weight:[256]***layer3.4.conv2.weight:[225, 256, 3, 3]***layer3.4.bn2.weight:[225]***layer3.4.conv3.weight:[818, 225, 1, 1]***layer3.4.bn3.weight:[818]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[256, 818, 1, 1]***layer3.5.bn1.weight:[256]***layer3.5.conv2.weight:[239, 256, 3, 3]***layer3.5.bn2.weight:[239]***layer3.5.conv3.weight:[818, 239, 1, 1]***layer3.5.bn3.weight:[818]***layer3.5.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[492, 818, 1, 1]***layer4.0.bn1.weight:[492]***layer4.0.conv2.weight:[237, 492, 3, 3]***layer4.0.bn2.weight:[237]***layer4.0.conv3.weight:[2022, 237, 1, 1]***layer4.0.bn3.weight:[2022]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2022, 818, 1, 1]***layer4.0.downsample.2.weight:[2022]***layer4.1.conv1.weight:[512, 2022, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[500, 512, 3, 3]***layer4.1.bn2.weight:[500]***layer4.1.conv3.weight:[2022, 500, 1, 1]***layer4.1.bn3.weight:[2022]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2022, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[490, 512, 3, 3]***layer4.2.bn2.weight:[490]***layer4.2.conv3.weight:[2022, 490, 1, 1]***layer4.2.bn3.weight:[2022]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2022]***layer1_2_conv3_M.weight:[256, 19]***layer2_3_conv3_M.weight:[512, 171]***layer3_5_conv3_M.weight:[1024, 818]***layer4_2_conv3_M.weight:[2048, 2022] \ No newline at end of file diff --git a/data_processing/MANIQA/timm/models/pruned/efficientnet_b1_pruned.txt b/data_processing/MANIQA/timm/models/pruned/efficientnet_b1_pruned.txt new file mode 100644 index 0000000..0972b52 --- /dev/null +++ b/data_processing/MANIQA/timm/models/pruned/efficientnet_b1_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[48, 16, 1, 1]***blocks.1.0.bn1.weight:[48]***blocks.1.0.bn1.bias:[48]***blocks.1.0.bn1.running_mean:[48]***blocks.1.0.bn1.running_var:[48]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[48, 1, 3, 3]***blocks.1.0.bn2.weight:[48]***blocks.1.0.bn2.bias:[48]***blocks.1.0.bn2.running_mean:[48]***blocks.1.0.bn2.running_var:[48]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 48, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[48, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[48]***blocks.1.0.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[62, 12, 1, 1]***blocks.1.1.bn1.weight:[62]***blocks.1.1.bn1.bias:[62]***blocks.1.1.bn1.running_mean:[62]***blocks.1.1.bn1.running_var:[62]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[62, 1, 3, 3]***blocks.1.1.bn2.weight:[62]***blocks.1.1.bn2.bias:[62]***blocks.1.1.bn2.running_mean:[62]***blocks.1.1.bn2.running_var:[62]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 62, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[62, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[62]***blocks.1.1.conv_pwl.weight:[12, 62, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[48, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[70, 12, 1, 1]***blocks.2.0.bn1.weight:[70]***blocks.2.0.bn1.bias:[70]***blocks.2.0.bn1.running_mean:[70]***blocks.2.0.bn1.running_var:[70]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[70, 1, 5, 5]***blocks.2.0.bn2.weight:[70]***blocks.2.0.bn2.bias:[70]***blocks.2.0.bn2.running_mean:[70]***blocks.2.0.bn2.running_var:[70]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 70, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[70, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[70]***blocks.2.0.conv_pwl.weight:[35, 70, 1, 1]***blocks.2.0.bn3.weight:[35]***blocks.2.0.bn3.bias:[35]***blocks.2.0.bn3.running_mean:[35]***blocks.2.0.bn3.running_var:[35]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[61, 35, 1, 1]***blocks.2.1.bn1.weight:[61]***blocks.2.1.bn1.bias:[61]***blocks.2.1.bn1.running_mean:[61]***blocks.2.1.bn1.running_var:[61]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[61, 1, 5, 5]***blocks.2.1.bn2.weight:[61]***blocks.2.1.bn2.bias:[61]***blocks.2.1.bn2.running_mean:[61]***blocks.2.1.bn2.running_var:[61]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[10, 61, 1, 1]***blocks.2.1.se.conv_reduce.bias:[10]***blocks.2.1.se.conv_expand.weight:[61, 10, 1, 1]***blocks.2.1.se.conv_expand.bias:[61]***blocks.2.1.conv_pwl.weight:[35, 61, 1, 1]***blocks.2.1.bn3.weight:[35]***blocks.2.1.bn3.bias:[35]***blocks.2.1.bn3.running_mean:[35]***blocks.2.1.bn3.running_var:[35]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[51, 35, 1, 1]***blocks.2.2.bn1.weight:[51]***blocks.2.2.bn1.bias:[51]***blocks.2.2.bn1.running_mean:[51]***blocks.2.2.bn1.running_var:[51]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[51, 1, 5, 5]***blocks.2.2.bn2.weight:[51]***blocks.2.2.bn2.bias:[51]***blocks.2.2.bn2.running_mean:[51]***blocks.2.2.bn2.running_var:[51]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[10, 51, 1, 1]***blocks.2.2.se.conv_reduce.bias:[10]***blocks.2.2.se.conv_expand.weight:[51, 10, 1, 1]***blocks.2.2.se.conv_expand.bias:[51]***blocks.2.2.conv_pwl.weight:[35, 51, 1, 1]***blocks.2.2.bn3.weight:[35]***blocks.2.2.bn3.bias:[35]***blocks.2.2.bn3.running_mean:[35]***blocks.2.2.bn3.running_var:[35]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[175, 35, 1, 1]***blocks.3.0.bn1.weight:[175]***blocks.3.0.bn1.bias:[175]***blocks.3.0.bn1.running_mean:[175]***blocks.3.0.bn1.running_var:[175]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[175, 1, 3, 3]***blocks.3.0.bn2.weight:[175]***blocks.3.0.bn2.bias:[175]***blocks.3.0.bn2.running_mean:[175]***blocks.3.0.bn2.running_var:[175]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[10, 175, 1, 1]***blocks.3.0.se.conv_reduce.bias:[10]***blocks.3.0.se.conv_expand.weight:[175, 10, 1, 1]***blocks.3.0.se.conv_expand.bias:[175]***blocks.3.0.conv_pwl.weight:[74, 175, 1, 1]***blocks.3.0.bn3.weight:[74]***blocks.3.0.bn3.bias:[74]***blocks.3.0.bn3.running_mean:[74]***blocks.3.0.bn3.running_var:[74]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[188, 74, 1, 1]***blocks.3.1.bn1.weight:[188]***blocks.3.1.bn1.bias:[188]***blocks.3.1.bn1.running_mean:[188]***blocks.3.1.bn1.running_var:[188]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[188, 1, 3, 3]***blocks.3.1.bn2.weight:[188]***blocks.3.1.bn2.bias:[188]***blocks.3.1.bn2.running_mean:[188]***blocks.3.1.bn2.running_var:[188]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[20, 188, 1, 1]***blocks.3.1.se.conv_reduce.bias:[20]***blocks.3.1.se.conv_expand.weight:[188, 20, 1, 1]***blocks.3.1.se.conv_expand.bias:[188]***blocks.3.1.conv_pwl.weight:[74, 188, 1, 1]***blocks.3.1.bn3.weight:[74]***blocks.3.1.bn3.bias:[74]***blocks.3.1.bn3.running_mean:[74]***blocks.3.1.bn3.running_var:[74]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[137, 74, 1, 1]***blocks.3.2.bn1.weight:[137]***blocks.3.2.bn1.bias:[137]***blocks.3.2.bn1.running_mean:[137]***blocks.3.2.bn1.running_var:[137]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[137, 1, 3, 3]***blocks.3.2.bn2.weight:[137]***blocks.3.2.bn2.bias:[137]***blocks.3.2.bn2.running_mean:[137]***blocks.3.2.bn2.running_var:[137]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[20, 137, 1, 1]***blocks.3.2.se.conv_reduce.bias:[20]***blocks.3.2.se.conv_expand.weight:[137, 20, 1, 1]***blocks.3.2.se.conv_expand.bias:[137]***blocks.3.2.conv_pwl.weight:[74, 137, 1, 1]***blocks.3.2.bn3.weight:[74]***blocks.3.2.bn3.bias:[74]***blocks.3.2.bn3.running_mean:[74]***blocks.3.2.bn3.running_var:[74]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[164, 74, 1, 1]***blocks.3.3.bn1.weight:[164]***blocks.3.3.bn1.bias:[164]***blocks.3.3.bn1.running_mean:[164]***blocks.3.3.bn1.running_var:[164]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[164, 1, 3, 3]***blocks.3.3.bn2.weight:[164]***blocks.3.3.bn2.bias:[164]***blocks.3.3.bn2.running_mean:[164]***blocks.3.3.bn2.running_var:[164]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[20, 164, 1, 1]***blocks.3.3.se.conv_reduce.bias:[20]***blocks.3.3.se.conv_expand.weight:[164, 20, 1, 1]***blocks.3.3.se.conv_expand.bias:[164]***blocks.3.3.conv_pwl.weight:[74, 164, 1, 1]***blocks.3.3.bn3.weight:[74]***blocks.3.3.bn3.bias:[74]***blocks.3.3.bn3.running_mean:[74]***blocks.3.3.bn3.running_var:[74]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[399, 74, 1, 1]***blocks.4.0.bn1.weight:[399]***blocks.4.0.bn1.bias:[399]***blocks.4.0.bn1.running_mean:[399]***blocks.4.0.bn1.running_var:[399]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[399, 1, 5, 5]***blocks.4.0.bn2.weight:[399]***blocks.4.0.bn2.bias:[399]***blocks.4.0.bn2.running_mean:[399]***blocks.4.0.bn2.running_var:[399]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[20, 399, 1, 1]***blocks.4.0.se.conv_reduce.bias:[20]***blocks.4.0.se.conv_expand.weight:[399, 20, 1, 1]***blocks.4.0.se.conv_expand.bias:[399]***blocks.4.0.conv_pwl.weight:[67, 399, 1, 1]***blocks.4.0.bn3.weight:[67]***blocks.4.0.bn3.bias:[67]***blocks.4.0.bn3.running_mean:[67]***blocks.4.0.bn3.running_var:[67]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[201, 67, 1, 1]***blocks.4.1.bn1.weight:[201]***blocks.4.1.bn1.bias:[201]***blocks.4.1.bn1.running_mean:[201]***blocks.4.1.bn1.running_var:[201]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[201, 1, 5, 5]***blocks.4.1.bn2.weight:[201]***blocks.4.1.bn2.bias:[201]***blocks.4.1.bn2.running_mean:[201]***blocks.4.1.bn2.running_var:[201]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[28, 201, 1, 1]***blocks.4.1.se.conv_reduce.bias:[28]***blocks.4.1.se.conv_expand.weight:[201, 28, 1, 1]***blocks.4.1.se.conv_expand.bias:[201]***blocks.4.1.conv_pwl.weight:[67, 201, 1, 1]***blocks.4.1.bn3.weight:[67]***blocks.4.1.bn3.bias:[67]***blocks.4.1.bn3.running_mean:[67]***blocks.4.1.bn3.running_var:[67]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[160, 67, 1, 1]***blocks.4.2.bn1.weight:[160]***blocks.4.2.bn1.bias:[160]***blocks.4.2.bn1.running_mean:[160]***blocks.4.2.bn1.running_var:[160]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[160, 1, 5, 5]***blocks.4.2.bn2.weight:[160]***blocks.4.2.bn2.bias:[160]***blocks.4.2.bn2.running_mean:[160]***blocks.4.2.bn2.running_var:[160]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[28, 160, 1, 1]***blocks.4.2.se.conv_reduce.bias:[28]***blocks.4.2.se.conv_expand.weight:[160, 28, 1, 1]***blocks.4.2.se.conv_expand.bias:[160]***blocks.4.2.conv_pwl.weight:[67, 160, 1, 1]***blocks.4.2.bn3.weight:[67]***blocks.4.2.bn3.bias:[67]***blocks.4.2.bn3.running_mean:[67]***blocks.4.2.bn3.running_var:[67]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[213, 67, 1, 1]***blocks.4.3.bn1.weight:[213]***blocks.4.3.bn1.bias:[213]***blocks.4.3.bn1.running_mean:[213]***blocks.4.3.bn1.running_var:[213]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[213, 1, 5, 5]***blocks.4.3.bn2.weight:[213]***blocks.4.3.bn2.bias:[213]***blocks.4.3.bn2.running_mean:[213]***blocks.4.3.bn2.running_var:[213]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[28, 213, 1, 1]***blocks.4.3.se.conv_reduce.bias:[28]***blocks.4.3.se.conv_expand.weight:[213, 28, 1, 1]***blocks.4.3.se.conv_expand.bias:[213]***blocks.4.3.conv_pwl.weight:[67, 213, 1, 1]***blocks.4.3.bn3.weight:[67]***blocks.4.3.bn3.bias:[67]***blocks.4.3.bn3.running_mean:[67]***blocks.4.3.bn3.running_var:[67]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[637, 67, 1, 1]***blocks.5.0.bn1.weight:[637]***blocks.5.0.bn1.bias:[637]***blocks.5.0.bn1.running_mean:[637]***blocks.5.0.bn1.running_var:[637]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[637, 1, 5, 5]***blocks.5.0.bn2.weight:[637]***blocks.5.0.bn2.bias:[637]***blocks.5.0.bn2.running_mean:[637]***blocks.5.0.bn2.running_var:[637]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[27, 637, 1, 1]***blocks.5.0.se.conv_reduce.bias:[27]***blocks.5.0.se.conv_expand.weight:[637, 27, 1, 1]***blocks.5.0.se.conv_expand.bias:[637]***blocks.5.0.conv_pwl.weight:[192, 637, 1, 1]***blocks.5.0.bn3.weight:[192]***blocks.5.0.bn3.bias:[192]***blocks.5.0.bn3.running_mean:[192]***blocks.5.0.bn3.running_var:[192]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[806, 192, 1, 1]***blocks.5.1.bn1.weight:[806]***blocks.5.1.bn1.bias:[806]***blocks.5.1.bn1.running_mean:[806]***blocks.5.1.bn1.running_var:[806]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[806, 1, 5, 5]***blocks.5.1.bn2.weight:[806]***blocks.5.1.bn2.bias:[806]***blocks.5.1.bn2.running_mean:[806]***blocks.5.1.bn2.running_var:[806]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[48, 806, 1, 1]***blocks.5.1.se.conv_reduce.bias:[48]***blocks.5.1.se.conv_expand.weight:[806, 48, 1, 1]***blocks.5.1.se.conv_expand.bias:[806]***blocks.5.1.conv_pwl.weight:[192, 806, 1, 1]***blocks.5.1.bn3.weight:[192]***blocks.5.1.bn3.bias:[192]***blocks.5.1.bn3.running_mean:[192]***blocks.5.1.bn3.running_var:[192]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[798, 192, 1, 1]***blocks.5.2.bn1.weight:[798]***blocks.5.2.bn1.bias:[798]***blocks.5.2.bn1.running_mean:[798]***blocks.5.2.bn1.running_var:[798]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[798, 1, 5, 5]***blocks.5.2.bn2.weight:[798]***blocks.5.2.bn2.bias:[798]***blocks.5.2.bn2.running_mean:[798]***blocks.5.2.bn2.running_var:[798]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[48, 798, 1, 1]***blocks.5.2.se.conv_reduce.bias:[48]***blocks.5.2.se.conv_expand.weight:[798, 48, 1, 1]***blocks.5.2.se.conv_expand.bias:[798]***blocks.5.2.conv_pwl.weight:[192, 798, 1, 1]***blocks.5.2.bn3.weight:[192]***blocks.5.2.bn3.bias:[192]***blocks.5.2.bn3.running_mean:[192]***blocks.5.2.bn3.running_var:[192]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[891, 192, 1, 1]***blocks.5.3.bn1.weight:[891]***blocks.5.3.bn1.bias:[891]***blocks.5.3.bn1.running_mean:[891]***blocks.5.3.bn1.running_var:[891]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[891, 1, 5, 5]***blocks.5.3.bn2.weight:[891]***blocks.5.3.bn2.bias:[891]***blocks.5.3.bn2.running_mean:[891]***blocks.5.3.bn2.running_var:[891]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[48, 891, 1, 1]***blocks.5.3.se.conv_reduce.bias:[48]***blocks.5.3.se.conv_expand.weight:[891, 48, 1, 1]***blocks.5.3.se.conv_expand.bias:[891]***blocks.5.3.conv_pwl.weight:[192, 891, 1, 1]***blocks.5.3.bn3.weight:[192]***blocks.5.3.bn3.bias:[192]***blocks.5.3.bn3.running_mean:[192]***blocks.5.3.bn3.running_var:[192]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[990, 192, 1, 1]***blocks.5.4.bn1.weight:[990]***blocks.5.4.bn1.bias:[990]***blocks.5.4.bn1.running_mean:[990]***blocks.5.4.bn1.running_var:[990]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[990, 1, 5, 5]***blocks.5.4.bn2.weight:[990]***blocks.5.4.bn2.bias:[990]***blocks.5.4.bn2.running_mean:[990]***blocks.5.4.bn2.running_var:[990]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[48, 990, 1, 1]***blocks.5.4.se.conv_reduce.bias:[48]***blocks.5.4.se.conv_expand.weight:[990, 48, 1, 1]***blocks.5.4.se.conv_expand.bias:[990]***blocks.5.4.conv_pwl.weight:[192, 990, 1, 1]***blocks.5.4.bn3.weight:[192]***blocks.5.4.bn3.bias:[192]***blocks.5.4.bn3.running_mean:[192]***blocks.5.4.bn3.running_var:[192]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1152, 192, 1, 1]***blocks.6.0.bn1.weight:[1152]***blocks.6.0.bn1.bias:[1152]***blocks.6.0.bn1.running_mean:[1152]***blocks.6.0.bn1.running_var:[1152]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1152, 1, 3, 3]***blocks.6.0.bn2.weight:[1152]***blocks.6.0.bn2.bias:[1152]***blocks.6.0.bn2.running_mean:[1152]***blocks.6.0.bn2.running_var:[1152]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[48, 1152, 1, 1]***blocks.6.0.se.conv_reduce.bias:[48]***blocks.6.0.se.conv_expand.weight:[1152, 48, 1, 1]***blocks.6.0.se.conv_expand.bias:[1152]***blocks.6.0.conv_pwl.weight:[320, 1152, 1, 1]***blocks.6.0.bn3.weight:[320]***blocks.6.0.bn3.bias:[320]***blocks.6.0.bn3.running_mean:[320]***blocks.6.0.bn3.running_var:[320]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[1912, 320, 1, 1]***blocks.6.1.bn1.weight:[1912]***blocks.6.1.bn1.bias:[1912]***blocks.6.1.bn1.running_mean:[1912]***blocks.6.1.bn1.running_var:[1912]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[1912, 1, 3, 3]***blocks.6.1.bn2.weight:[1912]***blocks.6.1.bn2.bias:[1912]***blocks.6.1.bn2.running_mean:[1912]***blocks.6.1.bn2.running_var:[1912]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[80, 1912, 1, 1]***blocks.6.1.se.conv_reduce.bias:[80]***blocks.6.1.se.conv_expand.weight:[1912, 80, 1, 1]***blocks.6.1.se.conv_expand.bias:[1912]***blocks.6.1.conv_pwl.weight:[320, 1912, 1, 1]***blocks.6.1.bn3.weight:[320]***blocks.6.1.bn3.bias:[320]***blocks.6.1.bn3.running_mean:[320]***blocks.6.1.bn3.running_var:[320]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1280, 320, 1, 1]***bn2.weight:[1280]***bn2.bias:[1280]***bn2.running_mean:[1280]***bn2.running_var:[1280]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1280]***classifier.bias:[1000] \ No newline at end of file diff --git a/data_processing/MANIQA/timm/models/pruned/efficientnet_b2_pruned.txt b/data_processing/MANIQA/timm/models/pruned/efficientnet_b2_pruned.txt new file mode 100644 index 0000000..6e3fade --- /dev/null +++ b/data_processing/MANIQA/timm/models/pruned/efficientnet_b2_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[54, 16, 1, 1]***blocks.1.0.bn1.weight:[54]***blocks.1.0.bn1.bias:[54]***blocks.1.0.bn1.running_mean:[54]***blocks.1.0.bn1.running_var:[54]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[54, 1, 3, 3]***blocks.1.0.bn2.weight:[54]***blocks.1.0.bn2.bias:[54]***blocks.1.0.bn2.running_mean:[54]***blocks.1.0.bn2.running_var:[54]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 54, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[54, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[54]***blocks.1.0.conv_pwl.weight:[17, 54, 1, 1]***blocks.1.0.bn3.weight:[17]***blocks.1.0.bn3.bias:[17]***blocks.1.0.bn3.running_mean:[17]***blocks.1.0.bn3.running_var:[17]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[69, 17, 1, 1]***blocks.1.1.bn1.weight:[69]***blocks.1.1.bn1.bias:[69]***blocks.1.1.bn1.running_mean:[69]***blocks.1.1.bn1.running_var:[69]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[69, 1, 3, 3]***blocks.1.1.bn2.weight:[69]***blocks.1.1.bn2.bias:[69]***blocks.1.1.bn2.running_mean:[69]***blocks.1.1.bn2.running_var:[69]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 69, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[69, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[69]***blocks.1.1.conv_pwl.weight:[17, 69, 1, 1]***blocks.1.1.bn3.weight:[17]***blocks.1.1.bn3.bias:[17]***blocks.1.1.bn3.running_mean:[17]***blocks.1.1.bn3.running_var:[17]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[61, 17, 1, 1]***blocks.1.2.bn1.weight:[61]***blocks.1.2.bn1.bias:[61]***blocks.1.2.bn1.running_mean:[61]***blocks.1.2.bn1.running_var:[61]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[61, 1, 3, 3]***blocks.1.2.bn2.weight:[61]***blocks.1.2.bn2.bias:[61]***blocks.1.2.bn2.running_mean:[61]***blocks.1.2.bn2.running_var:[61]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 61, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[61, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[61]***blocks.1.2.conv_pwl.weight:[17, 61, 1, 1]***blocks.1.2.bn3.weight:[17]***blocks.1.2.bn3.bias:[17]***blocks.1.2.bn3.running_mean:[17]***blocks.1.2.bn3.running_var:[17]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[86, 17, 1, 1]***blocks.2.0.bn1.weight:[86]***blocks.2.0.bn1.bias:[86]***blocks.2.0.bn1.running_mean:[86]***blocks.2.0.bn1.running_var:[86]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[86, 1, 5, 5]***blocks.2.0.bn2.weight:[86]***blocks.2.0.bn2.bias:[86]***blocks.2.0.bn2.running_mean:[86]***blocks.2.0.bn2.running_var:[86]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 86, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[86, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[86]***blocks.2.0.conv_pwl.weight:[42, 86, 1, 1]***blocks.2.0.bn3.weight:[42]***blocks.2.0.bn3.bias:[42]***blocks.2.0.bn3.running_mean:[42]***blocks.2.0.bn3.running_var:[42]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[72, 42, 1, 1]***blocks.2.1.bn1.weight:[72]***blocks.2.1.bn1.bias:[72]***blocks.2.1.bn1.running_mean:[72]***blocks.2.1.bn1.running_var:[72]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[72, 1, 5, 5]***blocks.2.1.bn2.weight:[72]***blocks.2.1.bn2.bias:[72]***blocks.2.1.bn2.running_mean:[72]***blocks.2.1.bn2.running_var:[72]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 72, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[72, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[72]***blocks.2.1.conv_pwl.weight:[42, 72, 1, 1]***blocks.2.1.bn3.weight:[42]***blocks.2.1.bn3.bias:[42]***blocks.2.1.bn3.running_mean:[42]***blocks.2.1.bn3.running_var:[42]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[98, 42, 1, 1]***blocks.2.2.bn1.weight:[98]***blocks.2.2.bn1.bias:[98]***blocks.2.2.bn1.running_mean:[98]***blocks.2.2.bn1.running_var:[98]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[98, 1, 5, 5]***blocks.2.2.bn2.weight:[98]***blocks.2.2.bn2.bias:[98]***blocks.2.2.bn2.running_mean:[98]***blocks.2.2.bn2.running_var:[98]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 98, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[98, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[98]***blocks.2.2.conv_pwl.weight:[42, 98, 1, 1]***blocks.2.2.bn3.weight:[42]***blocks.2.2.bn3.bias:[42]***blocks.2.2.bn3.running_mean:[42]***blocks.2.2.bn3.running_var:[42]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[245, 42, 1, 1]***blocks.3.0.bn1.weight:[245]***blocks.3.0.bn1.bias:[245]***blocks.3.0.bn1.running_mean:[245]***blocks.3.0.bn1.running_var:[245]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[245, 1, 3, 3]***blocks.3.0.bn2.weight:[245]***blocks.3.0.bn2.bias:[245]***blocks.3.0.bn2.running_mean:[245]***blocks.3.0.bn2.running_var:[245]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 245, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[245, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[245]***blocks.3.0.conv_pwl.weight:[85, 245, 1, 1]***blocks.3.0.bn3.weight:[85]***blocks.3.0.bn3.bias:[85]***blocks.3.0.bn3.running_mean:[85]***blocks.3.0.bn3.running_var:[85]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[274, 85, 1, 1]***blocks.3.1.bn1.weight:[274]***blocks.3.1.bn1.bias:[274]***blocks.3.1.bn1.running_mean:[274]***blocks.3.1.bn1.running_var:[274]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[274, 1, 3, 3]***blocks.3.1.bn2.weight:[274]***blocks.3.1.bn2.bias:[274]***blocks.3.1.bn2.running_mean:[274]***blocks.3.1.bn2.running_var:[274]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[22, 274, 1, 1]***blocks.3.1.se.conv_reduce.bias:[22]***blocks.3.1.se.conv_expand.weight:[274, 22, 1, 1]***blocks.3.1.se.conv_expand.bias:[274]***blocks.3.1.conv_pwl.weight:[85, 274, 1, 1]***blocks.3.1.bn3.weight:[85]***blocks.3.1.bn3.bias:[85]***blocks.3.1.bn3.running_mean:[85]***blocks.3.1.bn3.running_var:[85]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[254, 85, 1, 1]***blocks.3.2.bn1.weight:[254]***blocks.3.2.bn1.bias:[254]***blocks.3.2.bn1.running_mean:[254]***blocks.3.2.bn1.running_var:[254]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[254, 1, 3, 3]***blocks.3.2.bn2.weight:[254]***blocks.3.2.bn2.bias:[254]***blocks.3.2.bn2.running_mean:[254]***blocks.3.2.bn2.running_var:[254]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[22, 254, 1, 1]***blocks.3.2.se.conv_reduce.bias:[22]***blocks.3.2.se.conv_expand.weight:[254, 22, 1, 1]***blocks.3.2.se.conv_expand.bias:[254]***blocks.3.2.conv_pwl.weight:[85, 254, 1, 1]***blocks.3.2.bn3.weight:[85]***blocks.3.2.bn3.bias:[85]***blocks.3.2.bn3.running_mean:[85]***blocks.3.2.bn3.running_var:[85]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[292, 85, 1, 1]***blocks.3.3.bn1.weight:[292]***blocks.3.3.bn1.bias:[292]***blocks.3.3.bn1.running_mean:[292]***blocks.3.3.bn1.running_var:[292]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[292, 1, 3, 3]***blocks.3.3.bn2.weight:[292]***blocks.3.3.bn2.bias:[292]***blocks.3.3.bn2.running_mean:[292]***blocks.3.3.bn2.running_var:[292]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[22, 292, 1, 1]***blocks.3.3.se.conv_reduce.bias:[22]***blocks.3.3.se.conv_expand.weight:[292, 22, 1, 1]***blocks.3.3.se.conv_expand.bias:[292]***blocks.3.3.conv_pwl.weight:[85, 292, 1, 1]***blocks.3.3.bn3.weight:[85]***blocks.3.3.bn3.bias:[85]***blocks.3.3.bn3.running_mean:[85]***blocks.3.3.bn3.running_var:[85]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[502, 85, 1, 1]***blocks.4.0.bn1.weight:[502]***blocks.4.0.bn1.bias:[502]***blocks.4.0.bn1.running_mean:[502]***blocks.4.0.bn1.running_var:[502]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[502, 1, 5, 5]***blocks.4.0.bn2.weight:[502]***blocks.4.0.bn2.bias:[502]***blocks.4.0.bn2.running_mean:[502]***blocks.4.0.bn2.running_var:[502]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[22, 502, 1, 1]***blocks.4.0.se.conv_reduce.bias:[22]***blocks.4.0.se.conv_expand.weight:[502, 22, 1, 1]***blocks.4.0.se.conv_expand.bias:[502]***blocks.4.0.conv_pwl.weight:[116, 502, 1, 1]***blocks.4.0.bn3.weight:[116]***blocks.4.0.bn3.bias:[116]***blocks.4.0.bn3.running_mean:[116]***blocks.4.0.bn3.running_var:[116]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[315, 116, 1, 1]***blocks.4.1.bn1.weight:[315]***blocks.4.1.bn1.bias:[315]***blocks.4.1.bn1.running_mean:[315]***blocks.4.1.bn1.running_var:[315]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[315, 1, 5, 5]***blocks.4.1.bn2.weight:[315]***blocks.4.1.bn2.bias:[315]***blocks.4.1.bn2.running_mean:[315]***blocks.4.1.bn2.running_var:[315]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[30, 315, 1, 1]***blocks.4.1.se.conv_reduce.bias:[30]***blocks.4.1.se.conv_expand.weight:[315, 30, 1, 1]***blocks.4.1.se.conv_expand.bias:[315]***blocks.4.1.conv_pwl.weight:[116, 315, 1, 1]***blocks.4.1.bn3.weight:[116]***blocks.4.1.bn3.bias:[116]***blocks.4.1.bn3.running_mean:[116]***blocks.4.1.bn3.running_var:[116]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[354, 116, 1, 1]***blocks.4.2.bn1.weight:[354]***blocks.4.2.bn1.bias:[354]***blocks.4.2.bn1.running_mean:[354]***blocks.4.2.bn1.running_var:[354]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[354, 1, 5, 5]***blocks.4.2.bn2.weight:[354]***blocks.4.2.bn2.bias:[354]***blocks.4.2.bn2.running_mean:[354]***blocks.4.2.bn2.running_var:[354]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[30, 354, 1, 1]***blocks.4.2.se.conv_reduce.bias:[30]***blocks.4.2.se.conv_expand.weight:[354, 30, 1, 1]***blocks.4.2.se.conv_expand.bias:[354]***blocks.4.2.conv_pwl.weight:[116, 354, 1, 1]***blocks.4.2.bn3.weight:[116]***blocks.4.2.bn3.bias:[116]***blocks.4.2.bn3.running_mean:[116]***blocks.4.2.bn3.running_var:[116]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[443, 116, 1, 1]***blocks.4.3.bn1.weight:[443]***blocks.4.3.bn1.bias:[443]***blocks.4.3.bn1.running_mean:[443]***blocks.4.3.bn1.running_var:[443]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[443, 1, 5, 5]***blocks.4.3.bn2.weight:[443]***blocks.4.3.bn2.bias:[443]***blocks.4.3.bn2.running_mean:[443]***blocks.4.3.bn2.running_var:[443]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[30, 443, 1, 1]***blocks.4.3.se.conv_reduce.bias:[30]***blocks.4.3.se.conv_expand.weight:[443, 30, 1, 1]***blocks.4.3.se.conv_expand.bias:[443]***blocks.4.3.conv_pwl.weight:[116, 443, 1, 1]***blocks.4.3.bn3.weight:[116]***blocks.4.3.bn3.bias:[116]***blocks.4.3.bn3.running_mean:[116]***blocks.4.3.bn3.running_var:[116]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[719, 116, 1, 1]***blocks.5.0.bn1.weight:[719]***blocks.5.0.bn1.bias:[719]***blocks.5.0.bn1.running_mean:[719]***blocks.5.0.bn1.running_var:[719]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[719, 1, 5, 5]***blocks.5.0.bn2.weight:[719]***blocks.5.0.bn2.bias:[719]***blocks.5.0.bn2.running_mean:[719]***blocks.5.0.bn2.running_var:[719]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[30, 719, 1, 1]***blocks.5.0.se.conv_reduce.bias:[30]***blocks.5.0.se.conv_expand.weight:[719, 30, 1, 1]***blocks.5.0.se.conv_expand.bias:[719]***blocks.5.0.conv_pwl.weight:[208, 719, 1, 1]***blocks.5.0.bn3.weight:[208]***blocks.5.0.bn3.bias:[208]***blocks.5.0.bn3.running_mean:[208]***blocks.5.0.bn3.running_var:[208]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1148, 208, 1, 1]***blocks.5.1.bn1.weight:[1148]***blocks.5.1.bn1.bias:[1148]***blocks.5.1.bn1.running_mean:[1148]***blocks.5.1.bn1.running_var:[1148]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1148, 1, 5, 5]***blocks.5.1.bn2.weight:[1148]***blocks.5.1.bn2.bias:[1148]***blocks.5.1.bn2.running_mean:[1148]***blocks.5.1.bn2.running_var:[1148]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[52, 1148, 1, 1]***blocks.5.1.se.conv_reduce.bias:[52]***blocks.5.1.se.conv_expand.weight:[1148, 52, 1, 1]***blocks.5.1.se.conv_expand.bias:[1148]***blocks.5.1.conv_pwl.weight:[208, 1148, 1, 1]***blocks.5.1.bn3.weight:[208]***blocks.5.1.bn3.bias:[208]***blocks.5.1.bn3.running_mean:[208]***blocks.5.1.bn3.running_var:[208]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[1160, 208, 1, 1]***blocks.5.2.bn1.weight:[1160]***blocks.5.2.bn1.bias:[1160]***blocks.5.2.bn1.running_mean:[1160]***blocks.5.2.bn1.running_var:[1160]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[1160, 1, 5, 5]***blocks.5.2.bn2.weight:[1160]***blocks.5.2.bn2.bias:[1160]***blocks.5.2.bn2.running_mean:[1160]***blocks.5.2.bn2.running_var:[1160]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[52, 1160, 1, 1]***blocks.5.2.se.conv_reduce.bias:[52]***blocks.5.2.se.conv_expand.weight:[1160, 52, 1, 1]***blocks.5.2.se.conv_expand.bias:[1160]***blocks.5.2.conv_pwl.weight:[208, 1160, 1, 1]***blocks.5.2.bn3.weight:[208]***blocks.5.2.bn3.bias:[208]***blocks.5.2.bn3.running_mean:[208]***blocks.5.2.bn3.running_var:[208]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1182, 208, 1, 1]***blocks.5.3.bn1.weight:[1182]***blocks.5.3.bn1.bias:[1182]***blocks.5.3.bn1.running_mean:[1182]***blocks.5.3.bn1.running_var:[1182]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1182, 1, 5, 5]***blocks.5.3.bn2.weight:[1182]***blocks.5.3.bn2.bias:[1182]***blocks.5.3.bn2.running_mean:[1182]***blocks.5.3.bn2.running_var:[1182]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[52, 1182, 1, 1]***blocks.5.3.se.conv_reduce.bias:[52]***blocks.5.3.se.conv_expand.weight:[1182, 52, 1, 1]***blocks.5.3.se.conv_expand.bias:[1182]***blocks.5.3.conv_pwl.weight:[208, 1182, 1, 1]***blocks.5.3.bn3.weight:[208]***blocks.5.3.bn3.bias:[208]***blocks.5.3.bn3.running_mean:[208]***blocks.5.3.bn3.running_var:[208]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1228, 208, 1, 1]***blocks.5.4.bn1.weight:[1228]***blocks.5.4.bn1.bias:[1228]***blocks.5.4.bn1.running_mean:[1228]***blocks.5.4.bn1.running_var:[1228]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1228, 1, 5, 5]***blocks.5.4.bn2.weight:[1228]***blocks.5.4.bn2.bias:[1228]***blocks.5.4.bn2.running_mean:[1228]***blocks.5.4.bn2.running_var:[1228]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[52, 1228, 1, 1]***blocks.5.4.se.conv_reduce.bias:[52]***blocks.5.4.se.conv_expand.weight:[1228, 52, 1, 1]***blocks.5.4.se.conv_expand.bias:[1228]***blocks.5.4.conv_pwl.weight:[208, 1228, 1, 1]***blocks.5.4.bn3.weight:[208]***blocks.5.4.bn3.bias:[208]***blocks.5.4.bn3.running_mean:[208]***blocks.5.4.bn3.running_var:[208]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1248, 208, 1, 1]***blocks.6.0.bn1.weight:[1248]***blocks.6.0.bn1.bias:[1248]***blocks.6.0.bn1.running_mean:[1248]***blocks.6.0.bn1.running_var:[1248]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1248, 1, 3, 3]***blocks.6.0.bn2.weight:[1248]***blocks.6.0.bn2.bias:[1248]***blocks.6.0.bn2.running_mean:[1248]***blocks.6.0.bn2.running_var:[1248]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[52, 1248, 1, 1]***blocks.6.0.se.conv_reduce.bias:[52]***blocks.6.0.se.conv_expand.weight:[1248, 52, 1, 1]***blocks.6.0.se.conv_expand.bias:[1248]***blocks.6.0.conv_pwl.weight:[352, 1248, 1, 1]***blocks.6.0.bn3.weight:[352]***blocks.6.0.bn3.bias:[352]***blocks.6.0.bn3.running_mean:[352]***blocks.6.0.bn3.running_var:[352]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2112, 352, 1, 1]***blocks.6.1.bn1.weight:[2112]***blocks.6.1.bn1.bias:[2112]***blocks.6.1.bn1.running_mean:[2112]***blocks.6.1.bn1.running_var:[2112]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2112, 1, 3, 3]***blocks.6.1.bn2.weight:[2112]***blocks.6.1.bn2.bias:[2112]***blocks.6.1.bn2.running_mean:[2112]***blocks.6.1.bn2.running_var:[2112]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[88, 2112, 1, 1]***blocks.6.1.se.conv_reduce.bias:[88]***blocks.6.1.se.conv_expand.weight:[2112, 88, 1, 1]***blocks.6.1.se.conv_expand.bias:[2112]***blocks.6.1.conv_pwl.weight:[352, 2112, 1, 1]***blocks.6.1.bn3.weight:[352]***blocks.6.1.bn3.bias:[352]***blocks.6.1.bn3.running_mean:[352]***blocks.6.1.bn3.running_var:[352]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1408, 352, 1, 1]***bn2.weight:[1408]***bn2.bias:[1408]***bn2.running_mean:[1408]***bn2.running_var:[1408]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1408]***classifier.bias:[1000] \ No newline at end of file diff --git a/data_processing/MANIQA/timm/models/pruned/efficientnet_b3_pruned.txt b/data_processing/MANIQA/timm/models/pruned/efficientnet_b3_pruned.txt new file mode 100644 index 0000000..4897817 --- /dev/null +++ b/data_processing/MANIQA/timm/models/pruned/efficientnet_b3_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[40, 3, 3, 3]***bn1.weight:[40]***bn1.bias:[40]***bn1.running_mean:[40]***bn1.running_var:[40]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[40, 1, 3, 3]***blocks.0.0.bn1.weight:[40]***blocks.0.0.bn1.bias:[40]***blocks.0.0.bn1.running_mean:[40]***blocks.0.0.bn1.running_var:[40]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[10, 40, 1, 1]***blocks.0.0.se.conv_reduce.bias:[10]***blocks.0.0.se.conv_expand.weight:[40, 10, 1, 1]***blocks.0.0.se.conv_expand.bias:[40]***blocks.0.0.conv_pw.weight:[24, 40, 1, 1]***blocks.0.0.bn2.weight:[24]***blocks.0.0.bn2.bias:[24]***blocks.0.0.bn2.running_mean:[24]***blocks.0.0.bn2.running_var:[24]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[24, 1, 3, 3]***blocks.0.1.bn1.weight:[24]***blocks.0.1.bn1.bias:[24]***blocks.0.1.bn1.running_mean:[24]***blocks.0.1.bn1.running_var:[24]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[6, 24, 1, 1]***blocks.0.1.se.conv_reduce.bias:[6]***blocks.0.1.se.conv_expand.weight:[24, 6, 1, 1]***blocks.0.1.se.conv_expand.bias:[24]***blocks.0.1.conv_pw.weight:[24, 24, 1, 1]***blocks.0.1.bn2.weight:[24]***blocks.0.1.bn2.bias:[24]***blocks.0.1.bn2.running_mean:[24]***blocks.0.1.bn2.running_var:[24]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[27, 24, 1, 1]***blocks.1.0.bn1.weight:[27]***blocks.1.0.bn1.bias:[27]***blocks.1.0.bn1.running_mean:[27]***blocks.1.0.bn1.running_var:[27]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[27, 1, 3, 3]***blocks.1.0.bn2.weight:[27]***blocks.1.0.bn2.bias:[27]***blocks.1.0.bn2.running_mean:[27]***blocks.1.0.bn2.running_var:[27]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[6, 27, 1, 1]***blocks.1.0.se.conv_reduce.bias:[6]***blocks.1.0.se.conv_expand.weight:[27, 6, 1, 1]***blocks.1.0.se.conv_expand.bias:[27]***blocks.1.0.conv_pwl.weight:[12, 27, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[49, 12, 1, 1]***blocks.1.1.bn1.weight:[49]***blocks.1.1.bn1.bias:[49]***blocks.1.1.bn1.running_mean:[49]***blocks.1.1.bn1.running_var:[49]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[49, 1, 3, 3]***blocks.1.1.bn2.weight:[49]***blocks.1.1.bn2.bias:[49]***blocks.1.1.bn2.running_mean:[49]***blocks.1.1.bn2.running_var:[49]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[8, 49, 1, 1]***blocks.1.1.se.conv_reduce.bias:[8]***blocks.1.1.se.conv_expand.weight:[49, 8, 1, 1]***blocks.1.1.se.conv_expand.bias:[49]***blocks.1.1.conv_pwl.weight:[12, 49, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[8, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[8]***blocks.1.2.se.conv_expand.weight:[48, 8, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[83, 12, 1, 1]***blocks.2.0.bn1.weight:[83]***blocks.2.0.bn1.bias:[83]***blocks.2.0.bn1.running_mean:[83]***blocks.2.0.bn1.running_var:[83]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[83, 1, 5, 5]***blocks.2.0.bn2.weight:[83]***blocks.2.0.bn2.bias:[83]***blocks.2.0.bn2.running_mean:[83]***blocks.2.0.bn2.running_var:[83]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[8, 83, 1, 1]***blocks.2.0.se.conv_reduce.bias:[8]***blocks.2.0.se.conv_expand.weight:[83, 8, 1, 1]***blocks.2.0.se.conv_expand.bias:[83]***blocks.2.0.conv_pwl.weight:[40, 83, 1, 1]***blocks.2.0.bn3.weight:[40]***blocks.2.0.bn3.bias:[40]***blocks.2.0.bn3.running_mean:[40]***blocks.2.0.bn3.running_var:[40]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[90, 40, 1, 1]***blocks.2.1.bn1.weight:[90]***blocks.2.1.bn1.bias:[90]***blocks.2.1.bn1.running_mean:[90]***blocks.2.1.bn1.running_var:[90]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[90, 1, 5, 5]***blocks.2.1.bn2.weight:[90]***blocks.2.1.bn2.bias:[90]***blocks.2.1.bn2.running_mean:[90]***blocks.2.1.bn2.running_var:[90]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 90, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[90, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[90]***blocks.2.1.conv_pwl.weight:[40, 90, 1, 1]***blocks.2.1.bn3.weight:[40]***blocks.2.1.bn3.bias:[40]***blocks.2.1.bn3.running_mean:[40]***blocks.2.1.bn3.running_var:[40]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[85, 40, 1, 1]***blocks.2.2.bn1.weight:[85]***blocks.2.2.bn1.bias:[85]***blocks.2.2.bn1.running_mean:[85]***blocks.2.2.bn1.running_var:[85]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[85, 1, 5, 5]***blocks.2.2.bn2.weight:[85]***blocks.2.2.bn2.bias:[85]***blocks.2.2.bn2.running_mean:[85]***blocks.2.2.bn2.running_var:[85]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 85, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[85, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[85]***blocks.2.2.conv_pwl.weight:[40, 85, 1, 1]***blocks.2.2.bn3.weight:[40]***blocks.2.2.bn3.bias:[40]***blocks.2.2.bn3.running_mean:[40]***blocks.2.2.bn3.running_var:[40]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[215, 40, 1, 1]***blocks.3.0.bn1.weight:[215]***blocks.3.0.bn1.bias:[215]***blocks.3.0.bn1.running_mean:[215]***blocks.3.0.bn1.running_var:[215]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[215, 1, 3, 3]***blocks.3.0.bn2.weight:[215]***blocks.3.0.bn2.bias:[215]***blocks.3.0.bn2.running_mean:[215]***blocks.3.0.bn2.running_var:[215]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 215, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[215, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[215]***blocks.3.0.conv_pwl.weight:[93, 215, 1, 1]***blocks.3.0.bn3.weight:[93]***blocks.3.0.bn3.bias:[93]***blocks.3.0.bn3.running_mean:[93]***blocks.3.0.bn3.running_var:[93]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[261, 93, 1, 1]***blocks.3.1.bn1.weight:[261]***blocks.3.1.bn1.bias:[261]***blocks.3.1.bn1.running_mean:[261]***blocks.3.1.bn1.running_var:[261]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[261, 1, 3, 3]***blocks.3.1.bn2.weight:[261]***blocks.3.1.bn2.bias:[261]***blocks.3.1.bn2.running_mean:[261]***blocks.3.1.bn2.running_var:[261]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[24, 261, 1, 1]***blocks.3.1.se.conv_reduce.bias:[24]***blocks.3.1.se.conv_expand.weight:[261, 24, 1, 1]***blocks.3.1.se.conv_expand.bias:[261]***blocks.3.1.conv_pwl.weight:[93, 261, 1, 1]***blocks.3.1.bn3.weight:[93]***blocks.3.1.bn3.bias:[93]***blocks.3.1.bn3.running_mean:[93]***blocks.3.1.bn3.running_var:[93]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[219, 93, 1, 1]***blocks.3.2.bn1.weight:[219]***blocks.3.2.bn1.bias:[219]***blocks.3.2.bn1.running_mean:[219]***blocks.3.2.bn1.running_var:[219]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[219, 1, 3, 3]***blocks.3.2.bn2.weight:[219]***blocks.3.2.bn2.bias:[219]***blocks.3.2.bn2.running_mean:[219]***blocks.3.2.bn2.running_var:[219]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[24, 219, 1, 1]***blocks.3.2.se.conv_reduce.bias:[24]***blocks.3.2.se.conv_expand.weight:[219, 24, 1, 1]***blocks.3.2.se.conv_expand.bias:[219]***blocks.3.2.conv_pwl.weight:[93, 219, 1, 1]***blocks.3.2.bn3.weight:[93]***blocks.3.2.bn3.bias:[93]***blocks.3.2.bn3.running_mean:[93]***blocks.3.2.bn3.running_var:[93]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[254, 93, 1, 1]***blocks.3.3.bn1.weight:[254]***blocks.3.3.bn1.bias:[254]***blocks.3.3.bn1.running_mean:[254]***blocks.3.3.bn1.running_var:[254]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[254, 1, 3, 3]***blocks.3.3.bn2.weight:[254]***blocks.3.3.bn2.bias:[254]***blocks.3.3.bn2.running_mean:[254]***blocks.3.3.bn2.running_var:[254]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[24, 254, 1, 1]***blocks.3.3.se.conv_reduce.bias:[24]***blocks.3.3.se.conv_expand.weight:[254, 24, 1, 1]***blocks.3.3.se.conv_expand.bias:[254]***blocks.3.3.conv_pwl.weight:[93, 254, 1, 1]***blocks.3.3.bn3.weight:[93]***blocks.3.3.bn3.bias:[93]***blocks.3.3.bn3.running_mean:[93]***blocks.3.3.bn3.running_var:[93]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.3.4.conv_pw.weight:[236, 93, 1, 1]***blocks.3.4.bn1.weight:[236]***blocks.3.4.bn1.bias:[236]***blocks.3.4.bn1.running_mean:[236]***blocks.3.4.bn1.running_var:[236]***blocks.3.4.bn1.num_batches_tracked:[]***blocks.3.4.conv_dw.weight:[236, 1, 3, 3]***blocks.3.4.bn2.weight:[236]***blocks.3.4.bn2.bias:[236]***blocks.3.4.bn2.running_mean:[236]***blocks.3.4.bn2.running_var:[236]***blocks.3.4.bn2.num_batches_tracked:[]***blocks.3.4.se.conv_reduce.weight:[24, 236, 1, 1]***blocks.3.4.se.conv_reduce.bias:[24]***blocks.3.4.se.conv_expand.weight:[236, 24, 1, 1]***blocks.3.4.se.conv_expand.bias:[236]***blocks.3.4.conv_pwl.weight:[93, 236, 1, 1]***blocks.3.4.bn3.weight:[93]***blocks.3.4.bn3.bias:[93]***blocks.3.4.bn3.running_mean:[93]***blocks.3.4.bn3.running_var:[93]***blocks.3.4.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[480, 93, 1, 1]***blocks.4.0.bn1.weight:[480]***blocks.4.0.bn1.bias:[480]***blocks.4.0.bn1.running_mean:[480]***blocks.4.0.bn1.running_var:[480]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[480, 1, 5, 5]***blocks.4.0.bn2.weight:[480]***blocks.4.0.bn2.bias:[480]***blocks.4.0.bn2.running_mean:[480]***blocks.4.0.bn2.running_var:[480]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[24, 480, 1, 1]***blocks.4.0.se.conv_reduce.bias:[24]***blocks.4.0.se.conv_expand.weight:[480, 24, 1, 1]***blocks.4.0.se.conv_expand.bias:[480]***blocks.4.0.conv_pwl.weight:[120, 480, 1, 1]***blocks.4.0.bn3.weight:[120]***blocks.4.0.bn3.bias:[120]***blocks.4.0.bn3.running_mean:[120]***blocks.4.0.bn3.running_var:[120]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[235, 120, 1, 1]***blocks.4.1.bn1.weight:[235]***blocks.4.1.bn1.bias:[235]***blocks.4.1.bn1.running_mean:[235]***blocks.4.1.bn1.running_var:[235]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[235, 1, 5, 5]***blocks.4.1.bn2.weight:[235]***blocks.4.1.bn2.bias:[235]***blocks.4.1.bn2.running_mean:[235]***blocks.4.1.bn2.running_var:[235]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[34, 235, 1, 1]***blocks.4.1.se.conv_reduce.bias:[34]***blocks.4.1.se.conv_expand.weight:[235, 34, 1, 1]***blocks.4.1.se.conv_expand.bias:[235]***blocks.4.1.conv_pwl.weight:[120, 235, 1, 1]***blocks.4.1.bn3.weight:[120]***blocks.4.1.bn3.bias:[120]***blocks.4.1.bn3.running_mean:[120]***blocks.4.1.bn3.running_var:[120]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[217, 120, 1, 1]***blocks.4.2.bn1.weight:[217]***blocks.4.2.bn1.bias:[217]***blocks.4.2.bn1.running_mean:[217]***blocks.4.2.bn1.running_var:[217]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[217, 1, 5, 5]***blocks.4.2.bn2.weight:[217]***blocks.4.2.bn2.bias:[217]***blocks.4.2.bn2.running_mean:[217]***blocks.4.2.bn2.running_var:[217]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[34, 217, 1, 1]***blocks.4.2.se.conv_reduce.bias:[34]***blocks.4.2.se.conv_expand.weight:[217, 34, 1, 1]***blocks.4.2.se.conv_expand.bias:[217]***blocks.4.2.conv_pwl.weight:[120, 217, 1, 1]***blocks.4.2.bn3.weight:[120]***blocks.4.2.bn3.bias:[120]***blocks.4.2.bn3.running_mean:[120]***blocks.4.2.bn3.running_var:[120]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[226, 120, 1, 1]***blocks.4.3.bn1.weight:[226]***blocks.4.3.bn1.bias:[226]***blocks.4.3.bn1.running_mean:[226]***blocks.4.3.bn1.running_var:[226]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[226, 1, 5, 5]***blocks.4.3.bn2.weight:[226]***blocks.4.3.bn2.bias:[226]***blocks.4.3.bn2.running_mean:[226]***blocks.4.3.bn2.running_var:[226]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[33, 226, 1, 1]***blocks.4.3.se.conv_reduce.bias:[33]***blocks.4.3.se.conv_expand.weight:[226, 33, 1, 1]***blocks.4.3.se.conv_expand.bias:[226]***blocks.4.3.conv_pwl.weight:[120, 226, 1, 1]***blocks.4.3.bn3.weight:[120]***blocks.4.3.bn3.bias:[120]***blocks.4.3.bn3.running_mean:[120]***blocks.4.3.bn3.running_var:[120]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.4.4.conv_pw.weight:[340, 120, 1, 1]***blocks.4.4.bn1.weight:[340]***blocks.4.4.bn1.bias:[340]***blocks.4.4.bn1.running_mean:[340]***blocks.4.4.bn1.running_var:[340]***blocks.4.4.bn1.num_batches_tracked:[]***blocks.4.4.conv_dw.weight:[340, 1, 5, 5]***blocks.4.4.bn2.weight:[340]***blocks.4.4.bn2.bias:[340]***blocks.4.4.bn2.running_mean:[340]***blocks.4.4.bn2.running_var:[340]***blocks.4.4.bn2.num_batches_tracked:[]***blocks.4.4.se.conv_reduce.weight:[34, 340, 1, 1]***blocks.4.4.se.conv_reduce.bias:[34]***blocks.4.4.se.conv_expand.weight:[340, 34, 1, 1]***blocks.4.4.se.conv_expand.bias:[340]***blocks.4.4.conv_pwl.weight:[120, 340, 1, 1]***blocks.4.4.bn3.weight:[120]***blocks.4.4.bn3.bias:[120]***blocks.4.4.bn3.running_mean:[120]***blocks.4.4.bn3.running_var:[120]***blocks.4.4.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[802, 120, 1, 1]***blocks.5.0.bn1.weight:[802]***blocks.5.0.bn1.bias:[802]***blocks.5.0.bn1.running_mean:[802]***blocks.5.0.bn1.running_var:[802]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[802, 1, 5, 5]***blocks.5.0.bn2.weight:[802]***blocks.5.0.bn2.bias:[802]***blocks.5.0.bn2.running_mean:[802]***blocks.5.0.bn2.running_var:[802]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[34, 802, 1, 1]***blocks.5.0.se.conv_reduce.bias:[34]***blocks.5.0.se.conv_expand.weight:[802, 34, 1, 1]***blocks.5.0.se.conv_expand.bias:[802]***blocks.5.0.conv_pwl.weight:[232, 802, 1, 1]***blocks.5.0.bn3.weight:[232]***blocks.5.0.bn3.bias:[232]***blocks.5.0.bn3.running_mean:[232]***blocks.5.0.bn3.running_var:[232]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1030, 232, 1, 1]***blocks.5.1.bn1.weight:[1030]***blocks.5.1.bn1.bias:[1030]***blocks.5.1.bn1.running_mean:[1030]***blocks.5.1.bn1.running_var:[1030]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1030, 1, 5, 5]***blocks.5.1.bn2.weight:[1030]***blocks.5.1.bn2.bias:[1030]***blocks.5.1.bn2.running_mean:[1030]***blocks.5.1.bn2.running_var:[1030]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[58, 1030, 1, 1]***blocks.5.1.se.conv_reduce.bias:[58]***blocks.5.1.se.conv_expand.weight:[1030, 58, 1, 1]***blocks.5.1.se.conv_expand.bias:[1030]***blocks.5.1.conv_pwl.weight:[232, 1030, 1, 1]***blocks.5.1.bn3.weight:[232]***blocks.5.1.bn3.bias:[232]***blocks.5.1.bn3.running_mean:[232]***blocks.5.1.bn3.running_var:[232]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[924, 232, 1, 1]***blocks.5.2.bn1.weight:[924]***blocks.5.2.bn1.bias:[924]***blocks.5.2.bn1.running_mean:[924]***blocks.5.2.bn1.running_var:[924]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[924, 1, 5, 5]***blocks.5.2.bn2.weight:[924]***blocks.5.2.bn2.bias:[924]***blocks.5.2.bn2.running_mean:[924]***blocks.5.2.bn2.running_var:[924]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[58, 924, 1, 1]***blocks.5.2.se.conv_reduce.bias:[58]***blocks.5.2.se.conv_expand.weight:[924, 58, 1, 1]***blocks.5.2.se.conv_expand.bias:[924]***blocks.5.2.conv_pwl.weight:[232, 924, 1, 1]***blocks.5.2.bn3.weight:[232]***blocks.5.2.bn3.bias:[232]***blocks.5.2.bn3.running_mean:[232]***blocks.5.2.bn3.running_var:[232]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1016, 232, 1, 1]***blocks.5.3.bn1.weight:[1016]***blocks.5.3.bn1.bias:[1016]***blocks.5.3.bn1.running_mean:[1016]***blocks.5.3.bn1.running_var:[1016]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1016, 1, 5, 5]***blocks.5.3.bn2.weight:[1016]***blocks.5.3.bn2.bias:[1016]***blocks.5.3.bn2.running_mean:[1016]***blocks.5.3.bn2.running_var:[1016]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[58, 1016, 1, 1]***blocks.5.3.se.conv_reduce.bias:[58]***blocks.5.3.se.conv_expand.weight:[1016, 58, 1, 1]***blocks.5.3.se.conv_expand.bias:[1016]***blocks.5.3.conv_pwl.weight:[232, 1016, 1, 1]***blocks.5.3.bn3.weight:[232]***blocks.5.3.bn3.bias:[232]***blocks.5.3.bn3.running_mean:[232]***blocks.5.3.bn3.running_var:[232]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1130, 232, 1, 1]***blocks.5.4.bn1.weight:[1130]***blocks.5.4.bn1.bias:[1130]***blocks.5.4.bn1.running_mean:[1130]***blocks.5.4.bn1.running_var:[1130]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1130, 1, 5, 5]***blocks.5.4.bn2.weight:[1130]***blocks.5.4.bn2.bias:[1130]***blocks.5.4.bn2.running_mean:[1130]***blocks.5.4.bn2.running_var:[1130]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[58, 1130, 1, 1]***blocks.5.4.se.conv_reduce.bias:[58]***blocks.5.4.se.conv_expand.weight:[1130, 58, 1, 1]***blocks.5.4.se.conv_expand.bias:[1130]***blocks.5.4.conv_pwl.weight:[232, 1130, 1, 1]***blocks.5.4.bn3.weight:[232]***blocks.5.4.bn3.bias:[232]***blocks.5.4.bn3.running_mean:[232]***blocks.5.4.bn3.running_var:[232]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.5.5.conv_pw.weight:[1266, 232, 1, 1]***blocks.5.5.bn1.weight:[1266]***blocks.5.5.bn1.bias:[1266]***blocks.5.5.bn1.running_mean:[1266]***blocks.5.5.bn1.running_var:[1266]***blocks.5.5.bn1.num_batches_tracked:[]***blocks.5.5.conv_dw.weight:[1266, 1, 5, 5]***blocks.5.5.bn2.weight:[1266]***blocks.5.5.bn2.bias:[1266]***blocks.5.5.bn2.running_mean:[1266]***blocks.5.5.bn2.running_var:[1266]***blocks.5.5.bn2.num_batches_tracked:[]***blocks.5.5.se.conv_reduce.weight:[58, 1266, 1, 1]***blocks.5.5.se.conv_reduce.bias:[58]***blocks.5.5.se.conv_expand.weight:[1266, 58, 1, 1]***blocks.5.5.se.conv_expand.bias:[1266]***blocks.5.5.conv_pwl.weight:[232, 1266, 1, 1]***blocks.5.5.bn3.weight:[232]***blocks.5.5.bn3.bias:[232]***blocks.5.5.bn3.running_mean:[232]***blocks.5.5.bn3.running_var:[232]***blocks.5.5.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1392, 232, 1, 1]***blocks.6.0.bn1.weight:[1392]***blocks.6.0.bn1.bias:[1392]***blocks.6.0.bn1.running_mean:[1392]***blocks.6.0.bn1.running_var:[1392]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1392, 1, 3, 3]***blocks.6.0.bn2.weight:[1392]***blocks.6.0.bn2.bias:[1392]***blocks.6.0.bn2.running_mean:[1392]***blocks.6.0.bn2.running_var:[1392]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[58, 1392, 1, 1]***blocks.6.0.se.conv_reduce.bias:[58]***blocks.6.0.se.conv_expand.weight:[1392, 58, 1, 1]***blocks.6.0.se.conv_expand.bias:[1392]***blocks.6.0.conv_pwl.weight:[384, 1392, 1, 1]***blocks.6.0.bn3.weight:[384]***blocks.6.0.bn3.bias:[384]***blocks.6.0.bn3.running_mean:[384]***blocks.6.0.bn3.running_var:[384]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2301, 384, 1, 1]***blocks.6.1.bn1.weight:[2301]***blocks.6.1.bn1.bias:[2301]***blocks.6.1.bn1.running_mean:[2301]***blocks.6.1.bn1.running_var:[2301]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2301, 1, 3, 3]***blocks.6.1.bn2.weight:[2301]***blocks.6.1.bn2.bias:[2301]***blocks.6.1.bn2.running_mean:[2301]***blocks.6.1.bn2.running_var:[2301]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[96, 2301, 1, 1]***blocks.6.1.se.conv_reduce.bias:[96]***blocks.6.1.se.conv_expand.weight:[2301, 96, 1, 1]***blocks.6.1.se.conv_expand.bias:[2301]***blocks.6.1.conv_pwl.weight:[384, 2301, 1, 1]***blocks.6.1.bn3.weight:[384]***blocks.6.1.bn3.bias:[384]***blocks.6.1.bn3.running_mean:[384]***blocks.6.1.bn3.running_var:[384]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1536, 384, 1, 1]***bn2.weight:[1536]***bn2.bias:[1536]***bn2.running_mean:[1536]***bn2.running_var:[1536]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1536]***classifier.bias:[1000] \ No newline at end of file diff --git a/data_processing/MANIQA/timm/models/registry.py b/data_processing/MANIQA/timm/models/registry.py new file mode 100644 index 0000000..f92219b --- /dev/null +++ b/data_processing/MANIQA/timm/models/registry.py @@ -0,0 +1,149 @@ +""" Model Registry +Hacked together by / Copyright 2020 Ross Wightman +""" + +import sys +import re +import fnmatch +from collections import defaultdict +from copy import deepcopy + +__all__ = ['list_models', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules', + 'is_model_default_key', 'has_model_default_key', 'get_model_default_value', 'is_model_pretrained'] + +_module_to_models = defaultdict(set) # dict of sets to check membership of model in module +_model_to_module = {} # mapping of model names to module names +_model_entrypoints = {} # mapping of model names to entrypoint fns +_model_has_pretrained = set() # set of model names that have pretrained weight url present +_model_default_cfgs = dict() # central repo for model default_cfgs + + +def register_model(fn): + # lookup containing module + mod = sys.modules[fn.__module__] + module_name_split = fn.__module__.split('.') + module_name = module_name_split[-1] if len(module_name_split) else '' + + # add model to __all__ in module + model_name = fn.__name__ + if hasattr(mod, '__all__'): + mod.__all__.append(model_name) + else: + mod.__all__ = [model_name] + + # add entries to registry dict/sets + _model_entrypoints[model_name] = fn + _model_to_module[model_name] = module_name + _module_to_models[module_name].add(model_name) + has_pretrained = False # check if model has a pretrained url to allow filtering on this + if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs: + # this will catch all models that have entrypoint matching cfg key, but miss any aliasing + # entrypoints or non-matching combos + has_pretrained = 'url' in mod.default_cfgs[model_name] and 'http' in mod.default_cfgs[model_name]['url'] + _model_default_cfgs[model_name] = deepcopy(mod.default_cfgs[model_name]) + if has_pretrained: + _model_has_pretrained.add(model_name) + return fn + + +def _natural_key(string_): + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def list_models(filter='', module='', pretrained=False, exclude_filters='', name_matches_cfg=False): + """ Return list of available model names, sorted alphabetically + + Args: + filter (str) - Wildcard filter string that works with fnmatch + module (str) - Limit model selection to a specific sub-module (ie 'gen_efficientnet') + pretrained (bool) - Include only models with pretrained weights if True + exclude_filters (str or list[str]) - Wildcard filters to exclude models after including them with filter + name_matches_cfg (bool) - Include only models w/ model_name matching default_cfg name (excludes some aliases) + + Example: + model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet' + model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module + """ + if module: + all_models = list(_module_to_models[module]) + else: + all_models = _model_entrypoints.keys() + if filter: + models = [] + include_filters = filter if isinstance(filter, (tuple, list)) else [filter] + for f in include_filters: + include_models = fnmatch.filter(all_models, f) # include these models + if len(include_models): + models = set(models).union(include_models) + else: + models = all_models + if exclude_filters: + if not isinstance(exclude_filters, (tuple, list)): + exclude_filters = [exclude_filters] + for xf in exclude_filters: + exclude_models = fnmatch.filter(models, xf) # exclude these models + if len(exclude_models): + models = set(models).difference(exclude_models) + if pretrained: + models = _model_has_pretrained.intersection(models) + if name_matches_cfg: + models = set(_model_default_cfgs).intersection(models) + return list(sorted(models, key=_natural_key)) + + +def is_model(model_name): + """ Check if a model name exists + """ + return model_name in _model_entrypoints + + +def model_entrypoint(model_name): + """Fetch a model entrypoint for specified model name + """ + return _model_entrypoints[model_name] + + +def list_modules(): + """ Return list of module names that contain models / model entrypoints + """ + modules = _module_to_models.keys() + return list(sorted(modules)) + + +def is_model_in_modules(model_name, module_names): + """Check if a model exists within a subset of modules + Args: + model_name (str) - name of model to check + module_names (tuple, list, set) - names of modules to search in + """ + assert isinstance(module_names, (tuple, list, set)) + return any(model_name in _module_to_models[n] for n in module_names) + + +def has_model_default_key(model_name, cfg_key): + """ Query model default_cfgs for existence of a specific key. + """ + if model_name in _model_default_cfgs and cfg_key in _model_default_cfgs[model_name]: + return True + return False + + +def is_model_default_key(model_name, cfg_key): + """ Return truthy value for specified model default_cfg key, False if does not exist. + """ + if model_name in _model_default_cfgs and _model_default_cfgs[model_name].get(cfg_key, False): + return True + return False + + +def get_model_default_value(model_name, cfg_key): + """ Get a specific model default_cfg value by key. None if it doesn't exist. + """ + if model_name in _model_default_cfgs: + return _model_default_cfgs[model_name].get(cfg_key, None) + else: + return None + + +def is_model_pretrained(model_name): + return model_name in _model_has_pretrained diff --git a/data_processing/MANIQA/timm/models/regnet.py b/data_processing/MANIQA/timm/models/regnet.py new file mode 100644 index 0000000..6a38107 --- /dev/null +++ b/data_processing/MANIQA/timm/models/regnet.py @@ -0,0 +1,494 @@ +"""RegNet + +Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 +Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + +Based on original PyTorch impl linked above, but re-wrote to use my own blocks (adapted from ResNet here) +and cleaned up with more descriptive variable names. + +Weights from original impl have been modified +* first layer from BGR -> RGB as most PyTorch models are +* removed training specific dict entries from checkpoints and keep model state_dict only +* remap names to match the ones here + +Hacked together by / Copyright 2020 Ross Wightman +""" +import numpy as np +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, AvgPool2dSame, ConvBnAct, SEModule, DropPath +from .registry import register_model + + +def _mcfg(**kwargs): + cfg = dict(se_ratio=0., bottle_ratio=1., stem_width=32) + cfg.update(**kwargs) + return cfg + + +# Model FLOPS = three trailing digits * 10^8 +model_cfgs = dict( + regnetx_002=_mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13), + regnetx_004=_mcfg(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22), + regnetx_006=_mcfg(w0=48, wa=36.97, wm=2.24, group_w=24, depth=16), + regnetx_008=_mcfg(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16), + regnetx_016=_mcfg(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18), + regnetx_032=_mcfg(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25), + regnetx_040=_mcfg(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23), + regnetx_064=_mcfg(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17), + regnetx_080=_mcfg(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23), + regnetx_120=_mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19), + regnetx_160=_mcfg(w0=216, wa=55.59, wm=2.1, group_w=128, depth=22), + regnetx_320=_mcfg(w0=320, wa=69.86, wm=2.0, group_w=168, depth=23), + regnety_002=_mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13, se_ratio=0.25), + regnety_004=_mcfg(w0=48, wa=27.89, wm=2.09, group_w=8, depth=16, se_ratio=0.25), + regnety_006=_mcfg(w0=48, wa=32.54, wm=2.32, group_w=16, depth=15, se_ratio=0.25), + regnety_008=_mcfg(w0=56, wa=38.84, wm=2.4, group_w=16, depth=14, se_ratio=0.25), + regnety_016=_mcfg(w0=48, wa=20.71, wm=2.65, group_w=24, depth=27, se_ratio=0.25), + regnety_032=_mcfg(w0=80, wa=42.63, wm=2.66, group_w=24, depth=21, se_ratio=0.25), + regnety_040=_mcfg(w0=96, wa=31.41, wm=2.24, group_w=64, depth=22, se_ratio=0.25), + regnety_064=_mcfg(w0=112, wa=33.22, wm=2.27, group_w=72, depth=25, se_ratio=0.25), + regnety_080=_mcfg(w0=192, wa=76.82, wm=2.19, group_w=56, depth=17, se_ratio=0.25), + regnety_120=_mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, se_ratio=0.25), + regnety_160=_mcfg(w0=200, wa=106.23, wm=2.48, group_w=112, depth=18, se_ratio=0.25), + regnety_320=_mcfg(w0=232, wa=115.89, wm=2.53, group_w=232, depth=20, se_ratio=0.25), +) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + regnetx_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth'), + regnetx_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth'), + regnetx_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth'), + regnetx_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth'), + regnetx_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth'), + regnetx_032=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth'), + regnetx_040=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth'), + regnetx_064=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth'), + regnetx_080=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth'), + regnetx_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth'), + regnetx_160=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth'), + regnetx_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth'), + regnety_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_002-e68ca334.pth'), + regnety_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_004-0db870e6.pth'), + regnety_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_006-c67e57ec.pth'), + regnety_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_008-dc900dbe.pth'), + regnety_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_016-54367f74.pth'), + regnety_032=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth', + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_040=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_040-f0d569f9.pth'), + regnety_064=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_064-0a48325c.pth'), + regnety_080=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_080-e7f3eb93.pth'), + regnety_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_120-721ba79a.pth'), + regnety_160=_cfg( + url='https://dl.fbaipublicfiles.com/deit/regnety_160-a5fe301d.pth', # from Facebook DeiT GitHub repository + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_320-ba464b29.pth'), +) + + +def quantize_float(f, q): + """Converts a float to closest non-zero int divisible by q.""" + return int(round(f / q) * q) + + +def adjust_widths_groups_comp(widths, bottle_ratios, groups): + """Adjusts the compatibility of widths and groups.""" + bottleneck_widths = [int(w * b) for w, b in zip(widths, bottle_ratios)] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_widths)] + bottleneck_widths = [quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_widths, groups)] + widths = [int(w_bot / b) for w_bot, b in zip(bottleneck_widths, bottle_ratios)] + return widths, groups + + +def generate_regnet(width_slope, width_initial, width_mult, depth, q=8): + """Generates per block widths from RegNet parameters.""" + assert width_slope >= 0 and width_initial > 0 and width_mult > 1 and width_initial % q == 0 + widths_cont = np.arange(depth) * width_slope + width_initial + width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult)) + widths = width_initial * np.power(width_mult, width_exps) + widths = np.round(np.divide(widths, q)) * q + num_stages, max_stage = len(np.unique(widths)), width_exps.max() + 1 + widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() + return widths, num_stages, max_stage, widths_cont + + +class Bottleneck(nn.Module): + """ RegNet Bottleneck + + This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from + after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels. + """ + + def __init__(self, in_chs, out_chs, stride=1, dilation=1, bottleneck_ratio=1, group_width=1, se_ratio=0.25, + downsample=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, + drop_block=None, drop_path=None): + super(Bottleneck, self).__init__() + bottleneck_chs = int(round(out_chs * bottleneck_ratio)) + groups = bottleneck_chs // group_width + + cargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + self.conv1 = ConvBnAct(in_chs, bottleneck_chs, kernel_size=1, **cargs) + self.conv2 = ConvBnAct( + bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation, + groups=groups, **cargs) + if se_ratio: + se_channels = int(round(in_chs * se_ratio)) + self.se = SEModule(bottleneck_chs, rd_channels=se_channels) + else: + self.se = None + cargs['act_layer'] = None + self.conv3 = ConvBnAct(bottleneck_chs, out_chs, kernel_size=1, **cargs) + self.act3 = act_layer(inplace=True) + self.downsample = downsample + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.se is not None: + x = self.se(x) + x = self.conv3(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act3(x) + return x + + +def downsample_conv( + in_chs, out_chs, kernel_size, stride=1, dilation=1, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + dilation = dilation if kernel_size > 1 else 1 + return ConvBnAct( + in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, norm_layer=norm_layer, act_layer=None) + + +def downsample_avg( + in_chs, out_chs, kernel_size, stride=1, dilation=1, norm_layer=None): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + pool = nn.Identity() + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + return nn.Sequential(*[ + pool, ConvBnAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, act_layer=None)]) + + +class RegStage(nn.Module): + """Stage (sequence of blocks w/ the same output shape).""" + + def __init__(self, in_chs, out_chs, stride, dilation, depth, bottle_ratio, group_width, + block_fn=Bottleneck, se_ratio=0., drop_path_rates=None, drop_block=None): + super(RegStage, self).__init__() + block_kwargs = {} # FIXME setup to pass various aa, norm, act layer common args + first_dilation = 1 if dilation in (1, 2) else 2 + for i in range(depth): + block_stride = stride if i == 0 else 1 + block_in_chs = in_chs if i == 0 else out_chs + block_dilation = first_dilation if i == 0 else dilation + if drop_path_rates is not None and drop_path_rates[i] > 0.: + drop_path = DropPath(drop_path_rates[i]) + else: + drop_path = None + if (block_in_chs != out_chs) or (block_stride != 1): + proj_block = downsample_conv(block_in_chs, out_chs, 1, block_stride, block_dilation) + else: + proj_block = None + + name = "b{}".format(i + 1) + self.add_module( + name, block_fn( + block_in_chs, out_chs, block_stride, block_dilation, bottle_ratio, group_width, se_ratio, + downsample=proj_block, drop_block=drop_block, drop_path=drop_path, **block_kwargs) + ) + + def forward(self, x): + for block in self.children(): + x = block(x) + return x + + +class RegNet(nn.Module): + """RegNet model. + + Paper: https://arxiv.org/abs/2003.13678 + Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0., + drop_path_rate=0., zero_init_last_bn=True): + super().__init__() + # TODO add drop block, drop path, anti-aliasing, custom bn/act args + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + + # Construct the stem + stem_width = cfg['stem_width'] + self.stem = ConvBnAct(in_chans, stem_width, 3, stride=2) + self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')] + + # Construct the stages + prev_width = stem_width + curr_stride = 2 + stage_params = self._get_stage_params(cfg, output_stride=output_stride, drop_path_rate=drop_path_rate) + se_ratio = cfg['se_ratio'] + for i, stage_args in enumerate(stage_params): + stage_name = "s{}".format(i + 1) + self.add_module(stage_name, RegStage(prev_width, **stage_args, se_ratio=se_ratio)) + prev_width = stage_args['out_chs'] + curr_stride *= stage_args['stride'] + self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)] + + # Construct the head + self.num_features = prev_width + self.head = ClassifierHead( + in_chs=prev_width, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, mean=0.0, std=0.01) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def _get_stage_params(self, cfg, default_stride=2, output_stride=32, drop_path_rate=0.): + # Generate RegNet ws per block + w_a, w_0, w_m, d = cfg['wa'], cfg['w0'], cfg['wm'], cfg['depth'] + widths, num_stages, _, _ = generate_regnet(w_a, w_0, w_m, d) + + # Convert to per stage format + stage_widths, stage_depths = np.unique(widths, return_counts=True) + + # Use the same group width, bottleneck mult and stride for each stage + stage_groups = [cfg['group_w'] for _ in range(num_stages)] + stage_bottle_ratios = [cfg['bottle_ratio'] for _ in range(num_stages)] + stage_strides = [] + stage_dilations = [] + net_stride = 2 + dilation = 1 + for _ in range(num_stages): + if net_stride >= output_stride: + dilation *= default_stride + stride = 1 + else: + stride = default_stride + net_stride *= stride + stage_strides.append(stride) + stage_dilations.append(dilation) + stage_dpr = np.split(np.linspace(0, drop_path_rate, d), np.cumsum(stage_depths[:-1])) + + # Adjust the compatibility of ws and gws + stage_widths, stage_groups = adjust_widths_groups_comp(stage_widths, stage_bottle_ratios, stage_groups) + param_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_width', 'drop_path_rates'] + stage_params = [ + dict(zip(param_names, params)) for params in + zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_bottle_ratios, stage_groups, + stage_dpr)] + return stage_params + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + for block in list(self.children())[:-1]: + x = block(x) + return x + + def forward(self, x): + for block in self.children(): + x = block(x) + return x + + +def _filter_fn(state_dict): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + if 'model' in state_dict: + # For DeiT trained regnety_160 pretraiend model + state_dict = state_dict['model'] + return state_dict + + +def _create_regnet(variant, pretrained, **kwargs): + return build_model_with_cfg( + RegNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant], + pretrained_filter_fn=_filter_fn, + **kwargs) + + +@register_model +def regnetx_002(pretrained=False, **kwargs): + """RegNetX-200MF""" + return _create_regnet('regnetx_002', pretrained, **kwargs) + + +@register_model +def regnetx_004(pretrained=False, **kwargs): + """RegNetX-400MF""" + return _create_regnet('regnetx_004', pretrained, **kwargs) + + +@register_model +def regnetx_006(pretrained=False, **kwargs): + """RegNetX-600MF""" + return _create_regnet('regnetx_006', pretrained, **kwargs) + + +@register_model +def regnetx_008(pretrained=False, **kwargs): + """RegNetX-800MF""" + return _create_regnet('regnetx_008', pretrained, **kwargs) + + +@register_model +def regnetx_016(pretrained=False, **kwargs): + """RegNetX-1.6GF""" + return _create_regnet('regnetx_016', pretrained, **kwargs) + + +@register_model +def regnetx_032(pretrained=False, **kwargs): + """RegNetX-3.2GF""" + return _create_regnet('regnetx_032', pretrained, **kwargs) + + +@register_model +def regnetx_040(pretrained=False, **kwargs): + """RegNetX-4.0GF""" + return _create_regnet('regnetx_040', pretrained, **kwargs) + + +@register_model +def regnetx_064(pretrained=False, **kwargs): + """RegNetX-6.4GF""" + return _create_regnet('regnetx_064', pretrained, **kwargs) + + +@register_model +def regnetx_080(pretrained=False, **kwargs): + """RegNetX-8.0GF""" + return _create_regnet('regnetx_080', pretrained, **kwargs) + + +@register_model +def regnetx_120(pretrained=False, **kwargs): + """RegNetX-12GF""" + return _create_regnet('regnetx_120', pretrained, **kwargs) + + +@register_model +def regnetx_160(pretrained=False, **kwargs): + """RegNetX-16GF""" + return _create_regnet('regnetx_160', pretrained, **kwargs) + + +@register_model +def regnetx_320(pretrained=False, **kwargs): + """RegNetX-32GF""" + return _create_regnet('regnetx_320', pretrained, **kwargs) + + +@register_model +def regnety_002(pretrained=False, **kwargs): + """RegNetY-200MF""" + return _create_regnet('regnety_002', pretrained, **kwargs) + + +@register_model +def regnety_004(pretrained=False, **kwargs): + """RegNetY-400MF""" + return _create_regnet('regnety_004', pretrained, **kwargs) + + +@register_model +def regnety_006(pretrained=False, **kwargs): + """RegNetY-600MF""" + return _create_regnet('regnety_006', pretrained, **kwargs) + + +@register_model +def regnety_008(pretrained=False, **kwargs): + """RegNetY-800MF""" + return _create_regnet('regnety_008', pretrained, **kwargs) + + +@register_model +def regnety_016(pretrained=False, **kwargs): + """RegNetY-1.6GF""" + return _create_regnet('regnety_016', pretrained, **kwargs) + + +@register_model +def regnety_032(pretrained=False, **kwargs): + """RegNetY-3.2GF""" + return _create_regnet('regnety_032', pretrained, **kwargs) + + +@register_model +def regnety_040(pretrained=False, **kwargs): + """RegNetY-4.0GF""" + return _create_regnet('regnety_040', pretrained, **kwargs) + + +@register_model +def regnety_064(pretrained=False, **kwargs): + """RegNetY-6.4GF""" + return _create_regnet('regnety_064', pretrained, **kwargs) + + +@register_model +def regnety_080(pretrained=False, **kwargs): + """RegNetY-8.0GF""" + return _create_regnet('regnety_080', pretrained, **kwargs) + + +@register_model +def regnety_120(pretrained=False, **kwargs): + """RegNetY-12GF""" + return _create_regnet('regnety_120', pretrained, **kwargs) + + +@register_model +def regnety_160(pretrained=False, **kwargs): + """RegNetY-16GF""" + return _create_regnet('regnety_160', pretrained, **kwargs) + + +@register_model +def regnety_320(pretrained=False, **kwargs): + """RegNetY-32GF""" + return _create_regnet('regnety_320', pretrained, **kwargs) diff --git a/data_processing/MANIQA/timm/models/res2net.py b/data_processing/MANIQA/timm/models/res2net.py new file mode 100644 index 0000000..282baba --- /dev/null +++ b/data_processing/MANIQA/timm/models/res2net.py @@ -0,0 +1,216 @@ +""" Res2Net and Res2NeXt +Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/ +Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .registry import register_model +from .resnet import ResNet + +__all__ = [] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'res2net50_26w_4s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_4s-06e79181.pth'), + 'res2net50_48w_2s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_48w_2s-afed724a.pth'), + 'res2net50_14w_8s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_14w_8s-6527dddc.pth'), + 'res2net50_26w_6s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_6s-19041792.pth'), + 'res2net50_26w_8s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_8s-2c7c9f12.pth'), + 'res2net101_26w_4s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net101_26w_4s-02a759a1.pth'), + 'res2next50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth'), +} + + +class Bottle2neck(nn.Module): + """ Res2Net/Res2NeXT Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py + """ + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + cardinality=1, base_width=26, scale=4, dilation=1, first_dilation=None, + act_layer=nn.ReLU, norm_layer=None, attn_layer=None, **_): + super(Bottle2neck, self).__init__() + self.scale = scale + self.is_first = stride > 1 or downsample is not None + self.num_scales = max(1, scale - 1) + width = int(math.floor(planes * (base_width / 64.0))) * cardinality + self.width = width + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) + self.bn1 = norm_layer(width * scale) + + convs = [] + bns = [] + for i in range(self.num_scales): + convs.append(nn.Conv2d( + width, width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False)) + bns.append(norm_layer(width)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + if self.is_first: + # FIXME this should probably have count_include_pad=False, but hurts original weights + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) + else: + self.pool = None + + self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + self.se = attn_layer(outplanes) if attn_layer is not None else None + + self.relu = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + sp = spx[0] # redundant, for torchscript + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + if i == 0 or self.is_first: + sp = spx[i] + else: + sp = sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + if self.pool is not None: + # self.is_first == True, None check for torchscript + spo.append(self.pool(spx[-1])) + else: + spo.append(spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + if self.se is not None: + out = self.se(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.relu(out) + + return out + + +def _create_res2net(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def res2net50_26w_4s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w4s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2net50_26w_4s', pretrained, **model_args) + + +@register_model +def res2net101_26w_4s(pretrained=False, **kwargs): + """Constructs a Res2Net-101 26w4s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2net101_26w_4s', pretrained, **model_args) + + +@register_model +def res2net50_26w_6s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w6s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6), **kwargs) + return _create_res2net('res2net50_26w_6s', pretrained, **model_args) + + +@register_model +def res2net50_26w_8s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w8s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8), **kwargs) + return _create_res2net('res2net50_26w_8s', pretrained, **model_args) + + +@register_model +def res2net50_48w_2s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 48w2s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2), **kwargs) + return _create_res2net('res2net50_48w_2s', pretrained, **model_args) + + +@register_model +def res2net50_14w_8s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 14w8s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8), **kwargs) + return _create_res2net('res2net50_14w_8s', pretrained, **model_args) + + +@register_model +def res2next50(pretrained=False, **kwargs): + """Construct Res2NeXt-50 4s + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2next50', pretrained, **model_args) diff --git a/data_processing/MANIQA/timm/models/resnest.py b/data_processing/MANIQA/timm/models/resnest.py new file mode 100644 index 0000000..31eebd8 --- /dev/null +++ b/data_processing/MANIQA/timm/models/resnest.py @@ -0,0 +1,237 @@ +""" ResNeSt Models + +Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang + +Modified for torchscript compat, and consistency with timm by Ross Wightman +""" +import torch +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SplitAttn +from .registry import register_model +from .resnet import ResNet + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1.0', 'classifier': 'fc', + **kwargs + } + +default_cfgs = { + 'resnest14d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest14-9c8fe254.pth'), + 'resnest26d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest26-50eb607c.pth'), + 'resnest50d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50-528c19ca.pth'), + 'resnest101e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'resnest200e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest200-75117900.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'), + 'resnest269e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest269-0cc87c48.pth', + input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'), + 'resnest50d_4s2x40d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_4s2x40d-41d14ed0.pth', + interpolation='bicubic'), + 'resnest50d_1s4x24d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_1s4x24d-d4a4f76f.pth', + interpolation='bicubic') +} + + +class ResNestBottleneck(nn.Module): + """ResNet Bottleneck + """ + # pylint: disable=unused-argument + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + radix=1, cardinality=1, base_width=64, avd=False, avd_first=False, is_first=False, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(ResNestBottleneck, self).__init__() + assert reduce_first == 1 # not supported + assert attn_layer is None # not supported + assert aa_layer is None # TODO not yet supported + assert drop_path is None # TODO not yet supported + + group_width = int(planes * (base_width / 64.)) * cardinality + first_dilation = first_dilation or dilation + if avd and (stride > 1 or is_first): + avd_stride = stride + stride = 1 + else: + avd_stride = 0 + self.radix = radix + self.drop_block = drop_block + + self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) + self.bn1 = norm_layer(group_width) + self.act1 = act_layer(inplace=True) + self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None + + if self.radix >= 1: + self.conv2 = SplitAttn( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_block=drop_block) + self.bn2 = nn.Identity() + self.act2 = nn.Identity() + else: + self.conv2 = nn.Conv2d( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(group_width) + self.act2 = act_layer(inplace=True) + self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None + + self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False) + self.bn3 = norm_layer(planes*4) + self.act3 = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + if self.drop_block is not None: + out = self.drop_block(out) + out = self.act1(out) + + if self.avd_first is not None: + out = self.avd_first(out) + + out = self.conv2(out) + out = self.bn2(out) + if self.drop_block is not None: + out = self.drop_block(out) + out = self.act2(out) + + if self.avd_last is not None: + out = self.avd_last(out) + + out = self.conv3(out) + out = self.bn3(out) + if self.drop_block is not None: + out = self.drop_block(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.act3(out) + return out + + +def _create_resnest(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def resnest14d(pretrained=False, **kwargs): + """ ResNeSt-14d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[1, 1, 1, 1], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest14d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest26d(pretrained=False, **kwargs): + """ ResNeSt-26d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[2, 2, 2, 2], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest26d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d(pretrained=False, **kwargs): + """ ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest50d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest101e(pretrained=False, **kwargs): + """ ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 23, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest101e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest200e(pretrained=False, **kwargs): + """ ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 24, 36, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest200e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest269e(pretrained=False, **kwargs): + """ ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 30, 48, 8], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest269e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d_4s2x40d(pretrained=False, **kwargs): + """ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, + block_args=dict(radix=4, avd=True, avd_first=True), **kwargs) + return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d_1s4x24d(pretrained=False, **kwargs): + """ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, + block_args=dict(radix=1, avd=True, avd_first=True), **kwargs) + return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **model_kwargs) diff --git a/data_processing/MANIQA/timm/models/resnet.py b/data_processing/MANIQA/timm/models/resnet.py new file mode 100644 index 0000000..f0ce507 --- /dev/null +++ b/data_processing/MANIQA/timm/models/resnet.py @@ -0,0 +1,1547 @@ +"""PyTorch ResNet + +This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with +additional dropout and dynamic global avg/max pool. + +ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman + +Copyright 2019, Ross Wightman +""" +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, GroupNorm, create_attn, get_attn, create_classifier +from .registry import register_model + +__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + # ResNet and Wide ResNet + 'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'), + 'resnet18d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'), + 'resnet34d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet26': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth', + interpolation='bicubic'), + 'resnet26d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + 'resnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1_0-14fe96d1.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnet50d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet50t': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1h-36d3f2aa.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnet101d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1h-dc400468.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnet152d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet200': _cfg(url='', interpolation='bicubic'), + 'resnet200d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'tv_resnet34': _cfg(url='https://download.pytorch.org/models/resnet34-333f7ec4.pth'), + 'tv_resnet50': _cfg(url='https://download.pytorch.org/models/resnet50-19c8e357.pth'), + 'tv_resnet101': _cfg(url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'), + 'tv_resnet152': _cfg(url='https://download.pytorch.org/models/resnet152-b121ed2d.pth'), + 'wide_resnet50_2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth', + interpolation='bicubic'), + 'wide_resnet101_2': _cfg(url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth'), + + # ResNets w/ alternative norm layers + 'resnet50_gn': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_gn_a1h2-8fe6c4d0.pth', + crop_pct=0.94, interpolation='bicubic'), + + # ResNeXt + 'resnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1h-0146ab0a.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnext50d_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'resnext101_32x4d': _cfg(url=''), + 'resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth'), + 'resnext101_64x4d': _cfg(url=''), + 'tv_resnext50_32x4d': _cfg(url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth'), + + # ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags + # from https://github.com/facebookresearch/WSL-Images + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'ig_resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth'), + 'ig_resnext101_32x16d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth'), + 'ig_resnext101_32x32d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth'), + 'ig_resnext101_32x48d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth'), + + # Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'ssl_resnet18': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth'), + 'ssl_resnet50': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth'), + 'ssl_resnext50_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth'), + 'ssl_resnext101_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth'), + 'ssl_resnext101_32x8d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth'), + 'ssl_resnext101_32x16d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth'), + + # Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'swsl_resnet18': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth'), + 'swsl_resnet50': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth'), + 'swsl_resnext50_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth'), + 'swsl_resnext101_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth'), + 'swsl_resnext101_32x8d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth'), + 'swsl_resnext101_32x16d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth'), + + # Squeeze-Excitation ResNets, to eventually replace the models in senet.py + 'seresnet18': _cfg( + url='', + interpolation='bicubic'), + 'seresnet34': _cfg( + url='', + interpolation='bicubic'), + 'seresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth', + interpolation='bicubic'), + 'seresnet50t': _cfg( + url='', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnet101': _cfg( + url='', + interpolation='bicubic'), + 'seresnet152': _cfg( + url='', + interpolation='bicubic'), + 'seresnet152d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320) + ), + 'seresnet200d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + 'seresnet269d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + + + # Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py + 'seresnext26d_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnext26t_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth', + interpolation='bicubic'), + 'seresnext101_32x4d': _cfg( + url='', + interpolation='bicubic'), + 'seresnext101_32x8d': _cfg( + url='', + interpolation='bicubic'), + 'senet154': _cfg( + url='', + interpolation='bicubic', + first_conv='conv1.0'), + + # Efficient Channel Attention ResNets + 'ecaresnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnetlight': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNetLight_4f34b35b.pth', + interpolation='bicubic'), + 'ecaresnet50d': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet50D_833caf58.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet50d_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45899/outputs/ECAResNet50D_P_9c67f710.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet50t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnet101d': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet101D_281c5844.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'ecaresnet101d_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45610/outputs/ECAResNet101D_P_75a3370e.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet200d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + 'ecaresnet269d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10), + crop_pct=1.0, test_input_size=(3, 352, 352)), + + # Efficient Channel Attention ResNeXts + 'ecaresnext26t_32x4d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'ecaresnext50t_32x4d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + + # ResNets with anti-aliasing blur pool + 'resnetblur18': _cfg( + interpolation='bicubic'), + 'resnetblur50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth', + interpolation='bicubic'), + 'resnetblur50d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'resnetblur101d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'resnetaa50d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'resnetaa101d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'seresnetaa50d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + + # ResNet-RS models + 'resnetrs50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth', + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth', + input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs200': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs200_ema-623d2f59.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs270': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs350': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth', + input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs420': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416), + interpolation='bicubic', first_conv='conv1.0'), +} + + +def get_padding(kernel_size, stride, dilation=1): + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +def create_aa(aa_layer, channels, stride=2, enable=True): + if not aa_layer or not enable: + return None + return aa_layer(stride) if issubclass(aa_layer, nn.AvgPool2d) else aa_layer(channels=channels, stride=stride) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(BasicBlock, self).__init__() + + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock does not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d( + inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, + dilation=first_dilation, bias=False) + self.bn1 = norm_layer(first_planes) + self.act1 = act_layer(inplace=True) + self.aa = create_aa(aa_layer, channels=first_planes, stride=stride, enable=use_aa) + + self.conv2 = nn.Conv2d( + first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False) + self.bn2 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act2 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn2.weight) + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act1(x) + if self.aa is not None: + x = self.aa(x) + + x = self.conv2(x) + x = self.bn2(x) + if self.drop_block is not None: + x = self.drop_block(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act2(x) + + return x + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(Bottleneck, self).__init__() + + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False) + self.bn1 = norm_layer(first_planes) + self.act1 = act_layer(inplace=True) + + self.conv2 = nn.Conv2d( + first_planes, width, kernel_size=3, stride=1 if use_aa else stride, + padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(width) + self.act2 = act_layer(inplace=True) + self.aa = create_aa(aa_layer, channels=width, stride=stride, enable=use_aa) + + self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act3 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act2(x) + if self.aa is not None: + x = self.aa(x) + + x = self.conv3(x) + x = self.bn3(x) + if self.drop_block is not None: + x = self.drop_block(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act3(x) + + return x + + +def downsample_conv( + in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1 + p = get_padding(kernel_size, stride, first_dilation) + + return nn.Sequential(*[ + nn.Conv2d( + in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False), + norm_layer(out_channels) + ]) + + +def downsample_avg( + in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + if stride == 1 and dilation == 1: + pool = nn.Identity() + else: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + + return nn.Sequential(*[ + pool, + nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False), + norm_layer(out_channels) + ]) + + +def drop_blocks(drop_block_rate=0.): + return [ + None, None, + DropBlock2d(drop_block_rate, 5, 0.25) if drop_block_rate else None, + DropBlock2d(drop_block_rate, 3, 1.00) if drop_block_rate else None] + + +def make_blocks( + block_fn, channels, block_repeats, inplanes, reduce_first=1, output_stride=32, + down_kernel_size=1, avg_down=False, drop_block_rate=0., drop_path_rate=0., **kwargs): + stages = [] + feature_info = [] + net_num_blocks = sum(block_repeats) + net_block_idx = 0 + net_stride = 4 + dilation = prev_dilation = 1 + for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))): + stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it + stride = 1 if stage_idx == 0 else 2 + if net_stride >= output_stride: + dilation *= stride + stride = 1 + else: + net_stride *= stride + + downsample = None + if stride != 1 or inplanes != planes * block_fn.expansion: + down_kwargs = dict( + in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size, + stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer')) + downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs) + + block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs) + blocks = [] + for block_idx in range(num_blocks): + downsample = downsample if block_idx == 0 else None + stride = stride if block_idx == 0 else 1 + block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule + blocks.append(block_fn( + inplanes, planes, stride, downsample, first_dilation=prev_dilation, + drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs)) + prev_dilation = dilation + inplanes = planes * block_fn.expansion + net_block_idx += 1 + + stages.append((stage_name, nn.Sequential(*blocks))) + feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name)) + + return stages, feature_info + + +class ResNet(nn.Module): + """ResNet / ResNeXt / SE-ResNeXt / SE-Net + + This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that + * have > 1 stride in the 3x3 conv layer of bottleneck + * have conv-bn-act ordering + + This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s + variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the + 'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default. + + ResNet variants (the same modifications can be used in SE/ResNeXt models as well): + * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b + * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64) + * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample + * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample + * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128) + * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample + * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample + + ResNeXt + * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths + * same c,d, e, s variants as ResNet can be enabled + + SE-ResNeXt + * normal - 7x7 stem, stem_width = 64 + * same c, d, e, s variants as ResNet can be enabled + + SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64, + reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block + + Parameters + ---------- + block : Block + Class for the residual block. Options are BasicBlockGl, BottleneckGl. + layers : list of int + Numbers of layers in each block + num_classes : int, default 1000 + Number of classification classes. + in_chans : int, default 3 + Number of input (color) channels. + cardinality : int, default 1 + Number of convolution groups for 3x3 conv in Bottleneck. + base_width : int, default 64 + Factor determining bottleneck channels. `planes * base_width / 64 * cardinality` + stem_width : int, default 64 + Number of channels in stem convolutions + stem_type : str, default '' + The type of stem: + * '', default - a single 7x7 conv with a width of stem_width + * 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2 + * 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2 + block_reduce_first: int, default 1 + Reduction factor for first convolution output width of residual blocks, + 1 for all archs except senets, where 2 + down_kernel_size: int, default 1 + Kernel size of residual block downsampling path, 1x1 for most archs, 3x3 for senets + avg_down : bool, default False + Whether to use average pooling for projection skip connection between stages/downsample. + output_stride : int, default 32 + Set the output stride of the network, 32, 16, or 8. Typically used in segmentation. + act_layer : nn.Module, activation layer + norm_layer : nn.Module, normalization layer + aa_layer : nn.Module, anti-aliasing layer + drop_rate : float, default 0. + Dropout probability before classifier, for training + global_pool : str, default 'avg' + Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' + """ + + def __init__(self, block, layers, num_classes=1000, in_chans=3, + cardinality=1, base_width=64, stem_width=64, stem_type='', replace_stem_pool=False, + output_stride=32, block_reduce_first=1, down_kernel_size=1, avg_down=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_rate=0.0, drop_path_rate=0., + drop_block_rate=0., global_pool='avg', zero_init_last_bn=True, block_args=None): + block_args = block_args or dict() + assert output_stride in (8, 16, 32) + self.num_classes = num_classes + self.drop_rate = drop_rate + super(ResNet, self).__init__() + + # Stem + deep_stem = 'deep' in stem_type + inplanes = stem_width * 2 if deep_stem else 64 + if deep_stem: + stem_chs = (stem_width, stem_width) + if 'tiered' in stem_type: + stem_chs = (3 * (stem_width // 4), stem_width) + self.conv1 = nn.Sequential(*[ + nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False), + norm_layer(stem_chs[0]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False), + norm_layer(stem_chs[1]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)]) + else: + self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = norm_layer(inplanes) + self.act1 = act_layer(inplace=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')] + + # Stem pooling. The name 'maxpool' remains for weight compatibility. + if replace_stem_pool: + self.maxpool = nn.Sequential(*filter(None, [ + nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False), + create_aa(aa_layer, channels=inplanes, stride=2), + norm_layer(inplanes), + act_layer(inplace=True) + ])) + else: + if aa_layer is not None: + if issubclass(aa_layer, nn.AvgPool2d): + self.maxpool = aa_layer(2) + else: + self.maxpool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=inplanes, stride=2)]) + else: + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + # Feature Blocks + channels = [64, 128, 256, 512] + stage_modules, stage_feature_info = make_blocks( + block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width, + output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down, + down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, + drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args) + for stage in stage_modules: + self.add_module(*stage) # layer1, layer2, etc + self.feature_info.extend(stage_feature_info) + + # Head (Pooling and Classifier) + self.num_features = 512 * block.expansion + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + self.init_weights(zero_init_last_bn=zero_init_last_bn) + + def init_weights(self, zero_init_last_bn=True): + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + x = self.fc(x) + return x + + +def _create_resnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('resnet18', pretrained, **model_args) + + +@register_model +def resnet18d(pretrained=False, **kwargs): + """Constructs a ResNet-18-D model. + """ + model_args = dict( + block=BasicBlock, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet18d', pretrained, **model_args) + + +@register_model +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet34', pretrained, **model_args) + + +@register_model +def resnet34d(pretrained=False, **kwargs): + """Constructs a ResNet-34-D model. + """ + model_args = dict( + block=BasicBlock, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet34d', pretrained, **model_args) + + +@register_model +def resnet26(pretrained=False, **kwargs): + """Constructs a ResNet-26 model. + """ + model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('resnet26', pretrained, **model_args) + + +@register_model +def resnet26t(pretrained=False, **kwargs): + """Constructs a ResNet-26-T model. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet26t', pretrained, **model_args) + + +@register_model +def resnet26d(pretrained=False, **kwargs): + """Constructs a ResNet-26-D model. + """ + model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet26d', pretrained, **model_args) + + +@register_model +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet50', pretrained, **model_args) + + +@register_model +def resnet50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet50d', pretrained, **model_args) + + +@register_model +def resnet50t(pretrained=False, **kwargs): + """Constructs a ResNet-50-T model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet50t', pretrained, **model_args) + + +@register_model +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('resnet101', pretrained, **model_args) + + +@register_model +def resnet101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet101d', pretrained, **model_args) + + +@register_model +def resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('resnet152', pretrained, **model_args) + + +@register_model +def resnet152d(pretrained=False, **kwargs): + """Constructs a ResNet-152-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet152d', pretrained, **model_args) + + +@register_model +def resnet200(pretrained=False, **kwargs): + """Constructs a ResNet-200 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], **kwargs) + return _create_resnet('resnet200', pretrained, **model_args) + + +@register_model +def resnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet200d', pretrained, **model_args) + + +@register_model +def tv_resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model with original Torchvision weights. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('tv_resnet34', pretrained, **model_args) + + +@register_model +def tv_resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model with original Torchvision weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('tv_resnet50', pretrained, **model_args) + + +@register_model +def tv_resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model w/ Torchvision pretrained weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('tv_resnet101', pretrained, **model_args) + + +@register_model +def tv_resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model w/ Torchvision pretrained weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('tv_resnet152', pretrained, **model_args) + + +@register_model +def wide_resnet50_2(pretrained=False, **kwargs): + """Constructs a Wide ResNet-50-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], base_width=128, **kwargs) + return _create_resnet('wide_resnet50_2', pretrained, **model_args) + + +@register_model +def wide_resnet101_2(pretrained=False, **kwargs): + """Constructs a Wide ResNet-101-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], base_width=128, **kwargs) + return _create_resnet('wide_resnet101_2', pretrained, **model_args) + + +@register_model +def resnet50_gn(pretrained=False, **kwargs): + """Constructs a ResNet-50 model w/ GroupNorm + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet50_gn', pretrained, norm_layer=GroupNorm, **model_args) + + +@register_model +def resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('resnext50_32x4d', pretrained, **model_args) + + +@register_model +def resnext50d_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnext50d_32x4d', pretrained, **model_args) + + +@register_model +def resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('resnext101_32x4d', pretrained, **model_args) + + +@register_model +def resnext101_32x8d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x8d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('resnext101_32x8d', pretrained, **model_args) + + +@register_model +def resnext101_64x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt101-64x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs) + return _create_resnet('resnext101_64x4d', pretrained, **model_args) + + +@register_model +def tv_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model with original Torchvision weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('tv_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('ig_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('ig_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x32d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32, **kwargs) + return _create_resnet('ig_resnext101_32x32d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x48d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=48, **kwargs) + return _create_resnet('ig_resnext101_32x48d', pretrained, **model_args) + + +@register_model +def ssl_resnet18(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNet-18 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('ssl_resnet18', pretrained, **model_args) + + +@register_model +def ssl_resnet50(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNet-50 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('ssl_resnet50', pretrained, **model_args) + + +@register_model +def ssl_resnext50_32x4d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-50 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('ssl_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x4d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('ssl_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x8 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('ssl_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x16 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('ssl_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def swsl_resnet18(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised Resnet-18 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('swsl_resnet18', pretrained, **model_args) + + +@register_model +def swsl_resnet50(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNet-50 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('swsl_resnet50', pretrained, **model_args) + + +@register_model +def swsl_resnext50_32x4d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-50 32x4 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('swsl_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x4d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x4 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('swsl_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x8 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('swsl_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('swsl_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def ecaresnet26t(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet26t', pretrained, **model_args) + + +@register_model +def ecaresnet50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50d', pretrained, **model_args) + + +@register_model +def resnetrs50(pretrained=False, **kwargs): + """Constructs a ResNet-RS-50 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs50', pretrained, **model_args) + + +@register_model +def resnetrs101(pretrained=False, **kwargs): + """Constructs a ResNet-RS-101 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs101', pretrained, **model_args) + + +@register_model +def resnetrs152(pretrained=False, **kwargs): + """Constructs a ResNet-RS-152 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs152', pretrained, **model_args) + + +@register_model +def resnetrs200(pretrained=False, **kwargs): + """Constructs a ResNet-RS-200 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs200', pretrained, **model_args) + + +@register_model +def resnetrs270(pretrained=False, **kwargs): + """Constructs a ResNet-RS-270 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 29, 53, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs270', pretrained, **model_args) + + + +@register_model +def resnetrs350(pretrained=False, **kwargs): + """Constructs a ResNet-RS-350 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 36, 72, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs350', pretrained, **model_args) + + +@register_model +def resnetrs420(pretrained=False, **kwargs): + """Constructs a ResNet-RS-420 model + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 44, 87, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs420', pretrained, **model_args) + + +@register_model +def ecaresnet50d_pruned(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **model_args) + + +@register_model +def ecaresnet50t(pretrained=False, **kwargs): + """Constructs an ECA-ResNet-50-T model. + Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50t', pretrained, **model_args) + + +@register_model +def ecaresnetlight(pretrained=False, **kwargs): + """Constructs a ResNet-50-D light model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnetlight', pretrained, **model_args) + + +@register_model +def ecaresnet101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet101d', pretrained, **model_args) + + +@register_model +def ecaresnet101d_pruned(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **model_args) + + +@register_model +def ecaresnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet200d', pretrained, **model_args) + + +@register_model +def ecaresnet269d(pretrained=False, **kwargs): + """Constructs a ResNet-269-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet269d', pretrained, **model_args) + + +@register_model +def ecaresnext26t_32x4d(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnext26t_32x4d', pretrained, **model_args) + + +@register_model +def ecaresnext50t_32x4d(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-50-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnext50t_32x4d', pretrained, **model_args) + + +@register_model +def resnetblur18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model with blur anti-aliasing + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], aa_layer=BlurPool2d, **kwargs) + return _create_resnet('resnetblur18', pretrained, **model_args) + + +@register_model +def resnetblur50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model with blur anti-aliasing + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, **kwargs) + return _create_resnet('resnetblur50', pretrained, **model_args) + + +@register_model +def resnetblur50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model with blur anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnetblur50d', pretrained, **model_args) + + +@register_model +def resnetblur101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model with blur anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=BlurPool2d, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnetblur101d', pretrained, **model_args) + + +@register_model +def resnetaa50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnetaa50d', pretrained, **model_args) + + +@register_model +def resnetaa101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=nn.AvgPool2d, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnetaa101d', pretrained, **model_args) + + +@register_model +def seresnetaa50d(pretrained=False, **kwargs): + """Constructs a SE=ResNet-50-D model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d, + stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnetaa50d', pretrained, **model_args) + + +@register_model +def seresnet18(pretrained=False, **kwargs): + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet18', pretrained, **model_args) + + +@register_model +def seresnet34(pretrained=False, **kwargs): + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet34', pretrained, **model_args) + + +@register_model +def seresnet50(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet50', pretrained, **model_args) + + +@register_model +def seresnet50t(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet50t', pretrained, **model_args) + + +@register_model +def seresnet101(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet101', pretrained, **model_args) + + +@register_model +def seresnet152(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet152', pretrained, **model_args) + + +@register_model +def seresnet152d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet152d', pretrained, **model_args) + + +@register_model +def seresnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet200d', pretrained, **model_args) + + +@register_model +def seresnet269d(pretrained=False, **kwargs): + """Constructs a ResNet-269-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet269d', pretrained, **model_args) + + +@register_model +def seresnext26d_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNeXt-26-D model.` + This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for + combination of deep stem and avg_pool in downsample. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext26d_32x4d', pretrained, **model_args) + + +@register_model +def seresnext26t_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNet-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext26t_32x4d', pretrained, **model_args) + + +@register_model +def seresnext26tn_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNeXt-26-T model. + NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note + so keeping this def for backwards compat with any uses out there. Old 't' model is lost. + """ + return seresnext26t_32x4d(pretrained=pretrained, **kwargs) + + +@register_model +def seresnext50_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def seresnext101_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext101_32x4d', pretrained, **model_args) + + +@register_model +def seresnext101_32x8d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext101_32x8d', pretrained, **model_args) + + +@register_model +def senet154(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', + down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('senet154', pretrained, **model_args) diff --git a/data_processing/MANIQA/timm/models/resnetv2.py b/data_processing/MANIQA/timm/models/resnetv2.py new file mode 100644 index 0000000..e38eaf5 --- /dev/null +++ b/data_processing/MANIQA/timm/models/resnetv2.py @@ -0,0 +1,672 @@ +"""Pre-Activation ResNet v2 with GroupNorm and Weight Standardization. + +A PyTorch implementation of ResNetV2 adapted from the Google Big-Transfoer (BiT) source code +at https://github.com/google-research/big_transfer to match timm interfaces. The BiT weights have +been included here as pretrained models from their original .NPZ checkpoints. + +Additionally, supports non pre-activation bottleneck for use as a backbone for Vision Transfomers (ViT) and +extra padding support to allow porting of official Hybrid ResNet pretrained weights from +https://github.com/google-research/vision_transformer + +Thanks to the Google team for the above two repositories and associated papers: +* Big Transfer (BiT): General Visual Representation Learning - https://arxiv.org/abs/1912.11370 +* An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale - https://arxiv.org/abs/2010.11929 +* Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + +Original copyright of Google code below, modifications by Ross Wightman, Copyright 2020. +""" +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict # pylint: disable=g-importing-member + +import torch +import torch.nn as nn +from functools import partial + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, named_apply, adapt_input_conv +from .registry import register_model +from .layers import GroupNormAct, BatchNormAct2d, EvoNormBatch2d, EvoNormSample2d,\ + ClassifierHead, DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + # pretrained on imagenet21k, finetuned on imagenet1k + 'resnetv2_50x1_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_50x3_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_101x1_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_101x3_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_152x2_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_152x4_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz', + input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0), # only one at 480x480? + + # trained on imagenet-21k + 'resnetv2_50x1_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x1.npz', + num_classes=21843), + 'resnetv2_50x3_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x3.npz', + num_classes=21843), + 'resnetv2_101x1_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x1.npz', + num_classes=21843), + 'resnetv2_101x3_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x3.npz', + num_classes=21843), + 'resnetv2_152x2_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x2.npz', + num_classes=21843), + 'resnetv2_152x4_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x4.npz', + num_classes=21843), + + 'resnetv2_50x1_bit_distilled': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R50x1_224.npz', + interpolation='bicubic'), + 'resnetv2_152x2_bit_teacher': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R152x2_T_224.npz', + interpolation='bicubic'), + 'resnetv2_152x2_bit_teacher_384': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R152x2_T_384.npz', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic'), + + 'resnetv2_50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetv2_50_a1h-000cdf49.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnetv2_50d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50t': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetv2_101_a1h-5d01f016.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnetv2_101d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_152': _cfg( + interpolation='bicubic'), + 'resnetv2_152d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + + 'resnetv2_50d_gn': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50d_evob': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50d_evos': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), +} + + +def make_div(v, divisor=8): + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class PreActBottleneck(nn.Module): + """Pre-activation (v2) bottleneck block. + + Follows the implementation of "Identity Mappings in Deep Residual Networks": + https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua + + Except it puts the stride on 3x3 conv when available. + """ + + def __init__( + self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, + act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_div(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, preact=True, + conv_layer=conv_layer, norm_layer=norm_layer) + else: + self.downsample = None + + self.norm1 = norm_layer(in_chs) + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm2 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm3 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv3.weight) + + def forward(self, x): + x_preact = self.norm1(x) + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x_preact) + + # residual branch + x = self.conv1(x_preact) + x = self.conv2(self.norm2(x)) + x = self.conv3(self.norm3(x)) + x = self.drop_path(x) + return x + shortcut + + +class Bottleneck(nn.Module): + """Non Pre-activation bottleneck block, equiv to V1.5/V1b Bottleneck. Used for ViT. + """ + def __init__( + self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, + act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + act_layer = act_layer or nn.ReLU + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_div(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, out_chs, stride=stride, dilation=dilation, preact=False, + conv_layer=conv_layer, norm_layer=norm_layer) + else: + self.downsample = None + + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm1 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm2 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.norm3 = norm_layer(out_chs, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.act3 = act_layer(inplace=True) + + def zero_init_last(self): + nn.init.zeros_(self.norm3.weight) + + def forward(self, x): + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x) + + # residual + x = self.conv1(x) + x = self.norm1(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.conv3(x) + x = self.norm3(x) + x = self.drop_path(x) + x = self.act3(x + shortcut) + return x + + +class DownsampleConv(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, preact=True, + conv_layer=None, norm_layer=None): + super(DownsampleConv, self).__init__() + self.conv = conv_layer(in_chs, out_chs, 1, stride=stride) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(x)) + + +class DownsampleAvg(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, + preact=True, conv_layer=None, norm_layer=None): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(self.pool(x))) + + +class ResNetStage(nn.Module): + """ResNet Stage.""" + def __init__(self, in_chs, out_chs, stride, dilation, depth, bottle_ratio=0.25, groups=1, + avg_down=False, block_dpr=None, block_fn=PreActBottleneck, + act_layer=None, conv_layer=None, norm_layer=None, **block_kwargs): + super(ResNetStage, self).__init__() + first_dilation = 1 if dilation in (1, 2) else 2 + layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer) + proj_layer = DownsampleAvg if avg_down else DownsampleConv + prev_chs = in_chs + self.blocks = nn.Sequential() + for block_idx in range(depth): + drop_path_rate = block_dpr[block_idx] if block_dpr else 0. + stride = stride if block_idx == 0 else 1 + self.blocks.add_module(str(block_idx), block_fn( + prev_chs, out_chs, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, + first_dilation=first_dilation, proj_layer=proj_layer, drop_path_rate=drop_path_rate, + **layer_kwargs, **block_kwargs)) + prev_chs = out_chs + first_dilation = dilation + proj_layer = None + + def forward(self, x): + x = self.blocks(x) + return x + + +def is_stem_deep(stem_type): + return any([s in stem_type for s in ('deep', 'tiered')]) + + +def create_resnetv2_stem( + in_chs, out_chs=64, stem_type='', preact=True, + conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32)): + stem = OrderedDict() + assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered') + + # NOTE conv padding mode can be changed by overriding the conv_layer def + if is_stem_deep(stem_type): + # A 3 deep 3x3 conv stack as in ResNet V1D models + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2) # 'D' ResNets + stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2) + stem['norm1'] = norm_layer(stem_chs[0]) + stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1) + stem['norm2'] = norm_layer(stem_chs[1]) + stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1) + if not preact: + stem['norm3'] = norm_layer(out_chs) + else: + # The usual 7x7 stem conv + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + if not preact: + stem['norm'] = norm_layer(out_chs) + + if 'fixed' in stem_type: + # 'fixed' SAME padding approximation that is used in BiT models + stem['pad'] = nn.ConstantPad2d(1, 0.) + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) + elif 'same' in stem_type: + # full, input size based 'SAME' padding, used in ViT Hybrid model + stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same') + else: + # the usual PyTorch symmetric padding + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + return nn.Sequential(stem) + + +class ResNetV2(nn.Module): + """Implementation of Pre-activation (v2) ResNet mode. + """ + + def __init__( + self, layers, channels=(256, 512, 1024, 2048), + num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + width_factor=1, stem_chs=64, stem_type='', avg_down=False, preact=True, + act_layer=nn.ReLU, conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32), + drop_rate=0., drop_path_rate=0., zero_init_last=False): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + wf = width_factor + + self.feature_info = [] + stem_chs = make_div(stem_chs * wf) + self.stem = create_resnetv2_stem( + in_chans, stem_chs, stem_type, preact, conv_layer=conv_layer, norm_layer=norm_layer) + stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm' + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat)) + + prev_chs = stem_chs + curr_stride = 4 + dilation = 1 + block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] + block_fn = PreActBottleneck if preact else Bottleneck + self.stages = nn.Sequential() + for stage_idx, (d, c, bdpr) in enumerate(zip(layers, channels, block_dprs)): + out_chs = make_div(c * wf) + stride = 1 if stage_idx == 0 else 2 + if curr_stride >= output_stride: + dilation *= stride + stride = 1 + stage = ResNetStage( + prev_chs, out_chs, stride=stride, dilation=dilation, depth=d, avg_down=avg_down, + act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer, block_dpr=bdpr, block_fn=block_fn) + prev_chs = out_chs + curr_stride *= stride + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')] + self.stages.add_module(str(stage_idx), stage) + + self.num_features = prev_chs + self.norm = norm_layer(self.num_features) if preact else nn.Identity() + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True) + + self.init_weights(zero_init_last=zero_init_last) + + def init_weights(self, zero_init_last=True): + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix='resnet/'): + _load_weights(self, checkpoint_path, prefix) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module: nn.Module, name: str = '', zero_init_last=True): + if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif zero_init_last and hasattr(module, 'zero_init_last'): + module.zero_init_last() + + +@torch.no_grad() +def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str = 'resnet/'): + import numpy as np + + def t2p(conv_weights): + """Possibly convert HWIO to OIHW.""" + if conv_weights.ndim == 4: + conv_weights = conv_weights.transpose([3, 2, 0, 1]) + return torch.from_numpy(conv_weights) + + weights = np.load(checkpoint_path) + stem_conv_w = adapt_input_conv( + model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) + model.stem.conv.weight.copy_(stem_conv_w) + model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma'])) + model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta'])) + if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \ + model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]: + model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel'])) + model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias'])) + for i, (sname, stage) in enumerate(model.stages.named_children()): + for j, (bname, block) in enumerate(stage.blocks.named_children()): + cname = 'standardized_conv2d' + block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/' + block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel'])) + block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel'])) + block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel'])) + block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma'])) + block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma'])) + block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma'])) + block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta'])) + block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta'])) + block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta'])) + if block.downsample is not None: + w = weights[f'{block_prefix}a/proj/{cname}/kernel'] + block.downsample.conv.weight.copy_(t2p(w)) + + +def _create_resnetv2(variant, pretrained=False, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + ResNetV2, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=feature_cfg, + pretrained_custom_load='_bit' in variant, + **kwargs) + + +def _create_resnetv2_bit(variant, pretrained=False, **kwargs): + return _create_resnetv2( + variant, pretrained=pretrained, stem_type='fixed', conv_layer=partial(StdConv2d, eps=1e-8), **kwargs) + + +@register_model +def resnetv2_50x1_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x1_bitm', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_50x3_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x3_bitm', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_101x1_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x1_bitm', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_101x3_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x3_bitm', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_152x2_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x2_bitm', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x4_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x4_bitm', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs) + + +@register_model +def resnetv2_50x1_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x1_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_50x3_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 6, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_101x1_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101x1_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 23, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_101x3_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 23, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_152x2_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x2_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x4_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x4_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 8, 36, 3], width_factor=4, **kwargs) + + +@register_model +def resnetv2_50x1_bit_distilled(pretrained=False, **kwargs): + """ ResNetV2-50x1-BiT Distilled + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_50x1_bit_distilled', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_152x2_bit_teacher(pretrained=False, **kwargs): + """ ResNetV2-152x2-BiT Teacher + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_152x2_bit_teacher', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x2_bit_teacher_384(pretrained=False, **kwargs): + """ ResNetV2-152xx-BiT Teacher @ 384x384 + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_152x2_bit_teacher_384', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_50(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_50d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_50t(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50t', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='tiered', avg_down=True, **kwargs) + + +@register_model +def resnetv2_101(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101', pretrained=pretrained, + layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_101d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101d', pretrained=pretrained, + layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_152(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_152', pretrained=pretrained, + layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_152d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_152d', pretrained=pretrained, + layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +# Experimental configs (may change / be removed) + +@register_model +def resnetv2_50d_gn(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d_gn', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=GroupNormAct, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_50d_evob(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d_evob', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNormBatch2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_50d_evos(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d_evos', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNormSample2d, + stem_type='deep', avg_down=True, **kwargs) diff --git a/data_processing/MANIQA/timm/models/rexnet.py b/data_processing/MANIQA/timm/models/rexnet.py new file mode 100644 index 0000000..f27ce5d --- /dev/null +++ b/data_processing/MANIQA/timm/models/rexnet.py @@ -0,0 +1,239 @@ +""" ReXNet + +A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` - +https://arxiv.org/abs/2007.00992 + +Adapted from original impl at https://github.com/clovaai/rexnet +Copyright (c) 2020-present NAVER Corp. MIT license + +Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman +Copyright 2020 Ross Wightman +""" + +import torch +import torch.nn as nn +from functools import partial +from math import ceil + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, create_act_layer, ConvBnAct, DropPath, make_divisible, SEModule +from .registry import register_model +from .efficientnet_builder import efficientnet_init_weights + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + } + + +default_cfgs = dict( + rexnet_100=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_100-1b4dddf4.pth'), + rexnet_130=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_130-590d768e.pth'), + rexnet_150=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_150-bd1a6aa8.pth'), + rexnet_200=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_200-8c0b7f2d.pth'), + rexnetr_100=_cfg( + url=''), + rexnetr_130=_cfg( + url=''), + rexnetr_150=_cfg( + url=''), + rexnetr_200=_cfg( + url=''), +) + +SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d) + + +class LinearBottleneck(nn.Module): + def __init__(self, in_chs, out_chs, stride, exp_ratio=1.0, se_ratio=0., ch_div=1, + act_layer='swish', dw_act_layer='relu6', drop_path=None): + super(LinearBottleneck, self).__init__() + self.use_shortcut = stride == 1 and in_chs <= out_chs + self.in_channels = in_chs + self.out_channels = out_chs + + if exp_ratio != 1.: + dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div) + self.conv_exp = ConvBnAct(in_chs, dw_chs, act_layer=act_layer) + else: + dw_chs = in_chs + self.conv_exp = None + + self.conv_dw = ConvBnAct(dw_chs, dw_chs, 3, stride=stride, groups=dw_chs, apply_act=False) + if se_ratio > 0: + self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div)) + else: + self.se = None + self.act_dw = create_act_layer(dw_act_layer) + + self.conv_pwl = ConvBnAct(dw_chs, out_chs, 1, apply_act=False) + self.drop_path = drop_path + + def feat_channels(self, exp=False): + return self.conv_dw.out_channels if exp else self.out_channels + + def forward(self, x): + shortcut = x + if self.conv_exp is not None: + x = self.conv_exp(x) + x = self.conv_dw(x) + if self.se is not None: + x = self.se(x) + x = self.act_dw(x) + x = self.conv_pwl(x) + if self.use_shortcut: + if self.drop_path is not None: + x = self.drop_path(x) + x = torch.cat([x[:, 0:self.in_channels] + shortcut, x[:, self.in_channels:]], dim=1) + return x + + +def _block_cfg(width_mult=1.0, depth_mult=1.0, initial_chs=16, final_chs=180, se_ratio=0., ch_div=1): + layers = [1, 2, 2, 3, 3, 5] + strides = [1, 2, 2, 2, 1, 2] + layers = [ceil(element * depth_mult) for element in layers] + strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], []) + exp_ratios = [1] * layers[0] + [6] * sum(layers[1:]) + depth = sum(layers[:]) * 3 + base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs + + # The following channel configuration is a simple instance to make each layer become an expand layer. + out_chs_list = [] + for i in range(depth // 3): + out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div)) + base_chs += final_chs / (depth // 3 * 1.0) + + se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:]) + + return list(zip(out_chs_list, exp_ratios, strides, se_ratios)) + + +def _build_blocks( + block_cfg, prev_chs, width_mult, ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_path_rate=0.): + feat_chs = [prev_chs] + feature_info = [] + curr_stride = 2 + features = [] + num_blocks = len(block_cfg) + for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg): + if stride > 1: + fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}' + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)] + curr_stride *= stride + block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule + drop_path = DropPath(block_dpr) if block_dpr > 0. else None + features.append(LinearBottleneck( + in_chs=prev_chs, out_chs=chs, exp_ratio=exp_ratio, stride=stride, se_ratio=se_ratio, + ch_div=ch_div, act_layer=act_layer, dw_act_layer=dw_act_layer, drop_path=drop_path)) + prev_chs = chs + feat_chs += [features[-1].feat_channels()] + pen_chs = make_divisible(1280 * width_mult, divisor=ch_div) + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')] + features.append(ConvBnAct(prev_chs, pen_chs, act_layer=act_layer)) + return features, feature_info + + +class ReXNetV1(nn.Module): + def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, + initial_chs=16, final_chs=180, width_mult=1.0, depth_mult=1.0, se_ratio=1/12., + ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_rate=0.2, drop_path_rate=0.): + super(ReXNetV1, self).__init__() + self.drop_rate = drop_rate + self.num_classes = num_classes + + assert output_stride == 32 # FIXME support dilation + stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32 + stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div) + self.stem = ConvBnAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer) + + block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div) + features, self.feature_info = _build_blocks( + block_cfg, stem_chs, width_mult, ch_div, act_layer, dw_act_layer, drop_path_rate) + self.num_features = features[-1].out_channels + self.features = nn.Sequential(*features) + + self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate) + + efficientnet_init_weights(self) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.features(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_rexnet(variant, pretrained, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + ReXNetV1, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=feature_cfg, + **kwargs) + + +@register_model +def rexnet_100(pretrained=False, **kwargs): + """ReXNet V1 1.0x""" + return _create_rexnet('rexnet_100', pretrained, **kwargs) + + +@register_model +def rexnet_130(pretrained=False, **kwargs): + """ReXNet V1 1.3x""" + return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs) + + +@register_model +def rexnet_150(pretrained=False, **kwargs): + """ReXNet V1 1.5x""" + return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs) + + +@register_model +def rexnet_200(pretrained=False, **kwargs): + """ReXNet V1 2.0x""" + return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs) + + +@register_model +def rexnetr_100(pretrained=False, **kwargs): + """ReXNet V1 1.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs) + + +@register_model +def rexnetr_130(pretrained=False, **kwargs): + """ReXNet V1 1.3x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs) + + +@register_model +def rexnetr_150(pretrained=False, **kwargs): + """ReXNet V1 1.5x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs) + + +@register_model +def rexnetr_200(pretrained=False, **kwargs): + """ReXNet V1 2.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs) diff --git a/data_processing/MANIQA/timm/models/selecsls.py b/data_processing/MANIQA/timm/models/selecsls.py new file mode 100644 index 0000000..1f3379d --- /dev/null +++ b/data_processing/MANIQA/timm/models/selecsls.py @@ -0,0 +1,362 @@ +"""PyTorch SelecSLS Net example for ImageNet Classification +License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode) +Author: Dushyant Mehta (@mehtadushy) + +SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D +Human Pose Estimation with a Single RGB Camera, Mehta et al." +https://arxiv.org/abs/1907.00837 + +Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models +and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['SelecSLS'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'selecsls42': _cfg( + url='', + interpolation='bicubic'), + 'selecsls42b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth', + interpolation='bicubic'), + 'selecsls60': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth', + interpolation='bicubic'), + 'selecsls60b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth', + interpolation='bicubic'), + 'selecsls84': _cfg( + url='', + interpolation='bicubic'), +} + + +class SequentialList(nn.Sequential): + + def __init__(self, *args): + super(SequentialList, self).__init__(*args) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (List[torch.Tensor]) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (List[torch.Tensor]) + pass + + def forward(self, x) -> List[torch.Tensor]: + for module in self: + x = module(x) + return x + + +class SelectSeq(nn.Module): + def __init__(self, mode='index', index=0): + super(SelectSeq, self).__init__() + self.mode = mode + self.index = index + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor]) -> (torch.Tensor) + pass + + def forward(self, x) -> torch.Tensor: + if self.mode == 'index': + return x[self.index] + else: + return torch.cat(x, dim=1) + + +def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): + if padding is None: + padding = ((stride - 1) + dilation * (k - 1)) // 2 + return nn.Sequential( + nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), + nn.BatchNorm2d(out_chs), + nn.ReLU(inplace=True) + ) + + +class SelecSLSBlock(nn.Module): + def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): + super(SelecSLSBlock, self).__init__() + self.stride = stride + self.is_first = is_first + assert stride in [1, 2] + + # Process input with 4 conv blocks with the same number of input and output channels + self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) + self.conv2 = conv_bn(mid_chs, mid_chs, 1) + self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) + self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + if not isinstance(x, list): + x = [x] + assert len(x) in [1, 2] + + d1 = self.conv1(x[0]) + d2 = self.conv3(self.conv2(d1)) + d3 = self.conv5(self.conv4(d2)) + if self.is_first: + out = self.conv6(torch.cat([d1, d2, d3], 1)) + return [out, out] + else: + return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] + + +class SelecSLS(nn.Module): + """SelecSLS42 / SelecSLS60 / SelecSLS84 + + Parameters + ---------- + cfg : network config dictionary specifying block type, feature, and head args + num_classes : int, default 1000 + Number of classification classes. + in_chans : int, default 3 + Number of input (color) channels. + drop_rate : float, default 0. + Dropout probability before classifier, for training + global_pool : str, default 'avg' + Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' + """ + + def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(SelecSLS, self).__init__() + + self.stem = conv_bn(in_chans, 32, stride=2) + self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) + self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way + self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) + self.num_features = cfg['num_features'] + self.feature_info = cfg['feature_info'] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.features(x) + x = self.head(self.from_seq(x)) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _create_selecsls(variant, pretrained, **kwargs): + cfg = {} + feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] + if variant.startswith('selecsls42'): + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 144, 144, True, 2), + (144, 144, 144, 288, False, 1), + (288, 0, 304, 304, True, 2), + (304, 304, 304, 480, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.3'), + dict(num_chs=480, reduction=16, module='features.5'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls42b': + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant.startswith('selecsls60'): + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 128, 128, True, 2), + (128, 128, 128, 128, False, 1), + (128, 128, 128, 288, False, 1), + (288, 0, 288, 288, True, 2), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 416, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.4'), + dict(num_chs=416, reduction=16, module='features.8'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls60b': + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant == 'selecsls84': + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 144, False, 1), + (144, 0, 144, 144, True, 2), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 304, False, 1), + (304, 0, 304, 304, True, 2), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 512, False, 1), + ] + feature_info.extend([ + dict(num_chs=144, reduction=4, module='features.1'), + dict(num_chs=304, reduction=8, module='features.6'), + dict(num_chs=512, reduction=16, module='features.12'), + ]) + # Head can be replaced with alternative configurations depending on the problem + cfg['head'] = [ + (512, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 3, 1), + ] + cfg['num_features'] = 1280 + feature_info.extend([ + dict(num_chs=1024, reduction=32, module='head.1'), + dict(num_chs=1280, reduction=64, module='head.3') + ]) + else: + raise ValueError('Invalid net configuration ' + variant + ' !!!') + cfg['feature_info'] = feature_info + + # this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises? + return build_model_with_cfg( + SelecSLS, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfg, + feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), + **kwargs) + + +@register_model +def selecsls42(pretrained=False, **kwargs): + """Constructs a SelecSLS42 model. + """ + return _create_selecsls('selecsls42', pretrained, **kwargs) + + +@register_model +def selecsls42b(pretrained=False, **kwargs): + """Constructs a SelecSLS42_B model. + """ + return _create_selecsls('selecsls42b', pretrained, **kwargs) + + +@register_model +def selecsls60(pretrained=False, **kwargs): + """Constructs a SelecSLS60 model. + """ + return _create_selecsls('selecsls60', pretrained, **kwargs) + + +@register_model +def selecsls60b(pretrained=False, **kwargs): + """Constructs a SelecSLS60_B model. + """ + return _create_selecsls('selecsls60b', pretrained, **kwargs) + + +@register_model +def selecsls84(pretrained=False, **kwargs): + """Constructs a SelecSLS84 model. + """ + return _create_selecsls('selecsls84', pretrained, **kwargs) diff --git a/data_processing/MANIQA/timm/models/senet.py b/data_processing/MANIQA/timm/models/senet.py new file mode 100644 index 0000000..3d0ba7b --- /dev/null +++ b/data_processing/MANIQA/timm/models/senet.py @@ -0,0 +1,467 @@ +""" +SEResNet implementation from Cadene's pretrained models +https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py +Additional credit to https://github.com/creafz + +Original model: https://github.com/hujie-frank/SENet + +ResNet code gently borrowed from +https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py + +FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate +support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. +""" +import math +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['SENet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', + **kwargs + } + + +default_cfgs = { + 'legacy_senet154': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth'), + 'legacy_seresnet18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', + interpolation='bicubic'), + 'legacy_seresnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), + 'legacy_seresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), + 'legacy_seresnet101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), + 'legacy_seresnet152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), + 'legacy_seresnext26_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', + interpolation='bicubic'), + 'legacy_seresnext50_32x4d': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth'), + 'legacy_seresnext101_32x4d': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth'), +} + + +def _weight_init(m): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + +class SEModule(nn.Module): + + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) + self.relu = nn.ReLU(inplace=True) + self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + module_input = x + x = x.mean((2, 3), keepdim=True) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + return module_input * x + + +class Bottleneck(nn.Module): + """ + Base class for bottlenecks that implements `forward()` method. + """ + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SEBottleneck(Bottleneck): + """ + Bottleneck for SENet154. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None): + super(SEBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes * 2) + self.conv2 = nn.Conv2d( + planes * 2, planes * 4, kernel_size=3, stride=stride, + padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes * 4) + self.conv3 = nn.Conv2d( + planes * 4, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBottleneck(Bottleneck): + """ + ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe + implementation and uses `stride=stride` in `conv1` and not in `conv2` + (the latter is used in the torchvision implementation of ResNet). + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None): + super(SEResNetBottleneck, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=1, bias=False, stride=stride) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNeXtBottleneck(Bottleneck): + """ + ResNeXt bottleneck type C with a Squeeze-and-Excitation module. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None, base_width=4): + super(SEResNeXtBottleneck, self).__init__() + width = math.floor(planes * (base_width / 64)) * groups + self.conv1 = nn.Conv2d( + inplanes, width, kernel_size=1, bias=False, stride=1) + self.bn1 = nn.BatchNorm2d(width) + self.conv2 = nn.Conv2d( + width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(width) + self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): + super(SEResNetBlock, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes, reduction=reduction) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SENet(nn.Module): + + def __init__(self, block, layers, groups, reduction, drop_rate=0.2, + in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, + downsample_padding=0, num_classes=1000, global_pool='avg'): + """ + Parameters + ---------- + block (nn.Module): Bottleneck class. + - For SENet154: SEBottleneck + - For SE-ResNet models: SEResNetBottleneck + - For SE-ResNeXt models: SEResNeXtBottleneck + layers (list of ints): Number of residual blocks for 4 layers of the + network (layer1...layer4). + groups (int): Number of groups for the 3x3 convolution in each + bottleneck block. + - For SENet154: 64 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 32 + reduction (int): Reduction ratio for Squeeze-and-Excitation modules. + - For all models: 16 + dropout_p (float or None): Drop probability for the Dropout layer. + If `None` the Dropout layer is not used. + - For SENet154: 0.2 + - For SE-ResNet models: None + - For SE-ResNeXt models: None + inplanes (int): Number of input channels for layer1. + - For SENet154: 128 + - For SE-ResNet models: 64 + - For SE-ResNeXt models: 64 + input_3x3 (bool): If `True`, use three 3x3 convolutions instead of + a single 7x7 convolution in layer0. + - For SENet154: True + - For SE-ResNet models: False + - For SE-ResNeXt models: False + downsample_kernel_size (int): Kernel size for downsampling convolutions + in layer2, layer3 and layer4. + - For SENet154: 3 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 1 + downsample_padding (int): Padding for downsampling convolutions in + layer2, layer3 and layer4. + - For SENet154: 1 + - For SE-ResNet models: 0 + - For SE-ResNeXt models: 0 + num_classes (int): Number of outputs in `last_linear` layer. + - For all models: 1000 + """ + super(SENet, self).__init__() + self.inplanes = inplanes + self.num_classes = num_classes + self.drop_rate = drop_rate + if input_3x3: + layer0_modules = [ + ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), + ('bn1', nn.BatchNorm2d(64)), + ('relu1', nn.ReLU(inplace=True)), + ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), + ('bn2', nn.BatchNorm2d(64)), + ('relu2', nn.ReLU(inplace=True)), + ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), + ('bn3', nn.BatchNorm2d(inplanes)), + ('relu3', nn.ReLU(inplace=True)), + ] + else: + layer0_modules = [ + ('conv1', nn.Conv2d( + in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), + ('bn1', nn.BatchNorm2d(inplanes)), + ('relu1', nn.ReLU(inplace=True)), + ] + self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) + # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. + self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] + self.layer1 = self._make_layer( + block, + planes=64, + blocks=layers[0], + groups=groups, + reduction=reduction, + downsample_kernel_size=1, + downsample_padding=0 + ) + self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] + self.layer2 = self._make_layer( + block, + planes=128, + blocks=layers[1], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] + self.layer3 = self._make_layer( + block, + planes=256, + blocks=layers[2], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] + self.layer4 = self._make_layer( + block, + planes=512, + blocks=layers[3], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] + self.num_features = 512 * block.expansion + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + for m in self.modules(): + _weight_init(m) + + def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, + downsample_kernel_size=1, downsample_padding=0): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, + stride=stride, padding=downsample_padding, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, groups, reduction)) + + return nn.Sequential(*layers) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.layer0(x) + x = self.pool0(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def logits(self, x): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.logits(x) + return x + + +def _create_senet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + SENet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def legacy_seresnet18(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet18', pretrained, **model_args) + + +@register_model +def legacy_seresnet34(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet34', pretrained, **model_args) + + +@register_model +def legacy_seresnet50(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet50', pretrained, **model_args) + + +@register_model +def legacy_seresnet101(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet101', pretrained, **model_args) + + +@register_model +def legacy_seresnet152(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet152', pretrained, **model_args) + + +@register_model +def legacy_senet154(pretrained=False, **kwargs): + model_args = dict( + block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, + downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) + return _create_senet('legacy_senet154', pretrained, **model_args) + + +@register_model +def legacy_seresnext26_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext50_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext101_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args) diff --git a/data_processing/MANIQA/timm/models/sknet.py b/data_processing/MANIQA/timm/models/sknet.py new file mode 100644 index 0000000..4dc2aa5 --- /dev/null +++ b/data_processing/MANIQA/timm/models/sknet.py @@ -0,0 +1,215 @@ +""" Selective Kernel Networks (ResNet base) + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268) +and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer +to the original paper with some modifications of my own to better balance param count vs accuracy. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math + +from torch import nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SelectiveKernel, ConvBnAct, create_attn +from .registry import register_model +from .resnet import ResNet + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'skresnet18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth'), + 'skresnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth'), + 'skresnet50': _cfg(), + 'skresnet50d': _cfg( + first_conv='conv1.0'), + 'skresnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth'), +} + + +class SelectiveKernelBasic(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(SelectiveKernelBasic, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock doest not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = SelectiveKernel( + inplanes, first_planes, stride=stride, dilation=first_dilation, **conv_kwargs, **sk_kwargs) + conv_kwargs['act_layer'] = None + self.conv2 = ConvBnAct( + first_planes, outplanes, kernel_size=3, dilation=dilation, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +class SelectiveKernelBottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, + drop_block=None, drop_path=None): + super(SelectiveKernelBottleneck, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = ConvBnAct(inplanes, first_planes, kernel_size=1, **conv_kwargs) + self.conv2 = SelectiveKernel( + first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, + **conv_kwargs, **sk_kwargs) + conv_kwargs['act_layer'] = None + self.conv3 = ConvBnAct(width, outplanes, kernel_size=1, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +def _create_skresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def skresnet18(pretrained=False, **kwargs): + """Constructs a Selective Kernel ResNet-18 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet18', pretrained, **model_args) + + +@register_model +def skresnet34(pretrained=False, **kwargs): + """Constructs a Selective Kernel ResNet-34 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet34', pretrained, **model_args) + + +@register_model +def skresnet50(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNet-50 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet50', pretrained, **model_args) + + +@register_model +def skresnet50d(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNet-50-D model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet50d', pretrained, **model_args) + + +@register_model +def skresnext50_32x4d(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to + the SKNet-50 model in the Select Kernel Paper + """ + sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnext50_32x4d', pretrained, **model_args) + diff --git a/data_processing/MANIQA/timm/models/swin_transformer.py b/data_processing/MANIQA/timm/models/swin_transformer.py new file mode 100644 index 0000000..37b08ba --- /dev/null +++ b/data_processing/MANIQA/timm/models/swin_transformer.py @@ -0,0 +1,657 @@ +""" Swin Transformer +A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` + - https://arxiv.org/pdf/2103.14030 + +Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# -------------------------------------------------------- +import logging +import math +from copy import deepcopy +from typing import Optional + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_ +from .layers import _assert +from .registry import register_model +from .vision_transformer import checkpoint_filter_fn, _init_vit_weights + + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # patch models (my experiments) + 'swin_base_patch4_window12_384': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'swin_base_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth', + ), + + 'swin_large_patch4_window12_384': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'swin_large_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth', + ), + + 'swin_small_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth', + ), + + 'swin_tiny_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth', + ), + + 'swin_base_patch4_window12_384_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841), + + 'swin_base_patch4_window7_224_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth', + num_classes=21841), + + 'swin_large_patch4_window12_384_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841), + + 'swin_large_patch4_window7_224_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth', + num_classes=21841), + +} + + +def window_partition(x, window_size: int): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows, window_size: int, H: int, W: int): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask: Optional[torch.Tensor] = None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + _assert(L == H * W, "input feature has wrong size") + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + _assert(L == H * W, "input feature has wrong size") + _assert(H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even.") + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock( + dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if not torch.jit.is_scripting() and self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + +class SwinTransformer(nn.Module): + r""" Swin Transformer + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, + embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), + window_size=7, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + use_checkpoint=False, weight_init='', **kwargs): + super().__init__() + + self.num_classes = num_classes + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + self.patch_grid = self.patch_embed.grid_size + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + else: + self.absolute_pos_embed = None + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + layers = [] + for i_layer in range(self.num_layers): + layers += [BasicLayer( + dim=int(embed_dim * 2 ** i_layer), + input_resolution=(self.patch_grid[0] // (2 ** i_layer), self.patch_grid[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + ] + self.layers = nn.Sequential(*layers) + + self.norm = norm_layer(self.num_features) + self.avgpool = nn.AdaptiveAvgPool1d(1) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0. + if weight_init.startswith('jax'): + for n, m in self.named_modules(): + _init_vit_weights(m, n, head_bias=head_bias, jax_impl=True) + else: + self.apply(_init_vit_weights) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.absolute_pos_embed is not None: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + x = self.layers(x) + # x = self.norm(x) # B L C + # x = self.avgpool(x.transpose(1, 2)) # B C 1 + # x = torch.flatten(x, 1) + return x + + def forward(self, x): + x = self.forward_features(x) + # x = self.head(x) + return x + + +def _create_swin_transformer(variant, pretrained=False, default_cfg=None, **kwargs): + if default_cfg is None: + default_cfg = deepcopy(default_cfgs[variant]) + overlay_external_default_cfg(default_cfg, kwargs) + default_num_classes = default_cfg['num_classes'] + default_img_size = default_cfg['input_size'][-2:] + + num_classes = kwargs.pop('num_classes', default_num_classes) + img_size = kwargs.pop('img_size', default_img_size) + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + SwinTransformer, variant, pretrained, + default_cfg=default_cfg, + img_size=img_size, + num_classes=num_classes, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + + return model + + + +@register_model +def swin_base_patch4_window12_384(pretrained=False, **kwargs): + """ Swin-B @ 384x384, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window12_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-B @ 224x224, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window12_384(pretrained=False, **kwargs): + """ Swin-L @ 384x384, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window12_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-L @ 224x224, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_small_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-S @ 224x224, trained ImageNet-1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_tiny_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-T @ 224x224, trained ImageNet-1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window12_384_in22k(pretrained=False, **kwargs): + """ Swin-B @ 384x384, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window7_224_in22k(pretrained=False, **kwargs): + """ Swin-B @ 224x224, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window12_384_in22k(pretrained=False, **kwargs): + """ Swin-L @ 384x384, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window7_224_in22k(pretrained=False, **kwargs): + """ Swin-L @ 224x224, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs) diff --git a/data_processing/MANIQA/timm/models/tnt.py b/data_processing/MANIQA/timm/models/tnt.py new file mode 100644 index 0000000..d52f9ce --- /dev/null +++ b/data_processing/MANIQA/timm/models/tnt.py @@ -0,0 +1,272 @@ +""" Transformer in Transformer (TNT) in PyTorch + +A PyTorch implement of TNT as described in +'Transformer in Transformer' - https://arxiv.org/abs/2103.00112 + +The official mindspore code is released and available at +https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT +""" +import math +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.helpers import build_model_with_cfg +from timm.models.layers import Mlp, DropPath, trunc_normal_ +from timm.models.layers.helpers import to_2tuple +from timm.models.layers import _assert +from timm.models.registry import register_model +from timm.models.vision_transformer import resize_pos_embed + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'pixel_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'tnt_s_patch16_224': _cfg( + url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), + 'tnt_b_patch16_224': _cfg( + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), +} + + +class Attention(nn.Module): + """ Multi-Head Attention + """ + def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.hidden_dim = hidden_dim + self.num_heads = num_heads + head_dim = hidden_dim // num_heads + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + + self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop, inplace=True) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop, inplace=True) + + def forward(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k = qk.unbind(0) # make torchscript happy (cannot use tensor as tuple) + v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + """ TNT Block + """ + def __init__(self, dim, in_dim, num_pixel, num_heads=12, in_num_head=4, mlp_ratio=4., + qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + # Inner transformer + self.norm_in = norm_layer(in_dim) + self.attn_in = Attention( + in_dim, in_dim, num_heads=in_num_head, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + + self.norm_mlp_in = norm_layer(in_dim) + self.mlp_in = Mlp(in_features=in_dim, hidden_features=int(in_dim * 4), + out_features=in_dim, act_layer=act_layer, drop=drop) + + self.norm1_proj = norm_layer(in_dim) + self.proj = nn.Linear(in_dim * num_pixel, dim, bias=True) + # Outer transformer + self.norm_out = norm_layer(dim) + self.attn_out = Attention( + dim, dim, num_heads=num_heads, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm_mlp = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), + out_features=dim, act_layer=act_layer, drop=drop) + + def forward(self, pixel_embed, patch_embed): + # inner + pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed))) + pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed))) + # outer + B, N, C = patch_embed.size() + patch_embed = torch.cat( + [patch_embed[:, 0:1], patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1))], + dim=1) + patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed))) + patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed))) + return pixel_embed, patch_embed + + +class PixelEmbed(nn.Module): + """ Image to Pixel Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + # grid_size property necessary for resizing positional embedding + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + num_patches = (self.grid_size[0]) * (self.grid_size[1]) + self.img_size = img_size + self.num_patches = num_patches + self.in_dim = in_dim + new_patch_size = [math.ceil(ps / stride) for ps in patch_size] + self.new_patch_size = new_patch_size + + self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride) + self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size) + + def forward(self, x, pixel_pos): + B, C, H, W = x.shape + _assert(H == self.img_size[0], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + _assert(W == self.img_size[1], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + x = self.proj(x) + x = self.unfold(x) + x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1]) + x = x + pixel_pos + x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2) + return x + + +class TNT(nn.Module): + """ Transformer in Transformer - https://arxiv.org/abs/2103.00112 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, in_dim=48, depth=12, + num_heads=12, in_num_head=4, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=nn.LayerNorm, first_stride=4): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.pixel_embed = PixelEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=in_dim, stride=first_stride) + num_patches = self.pixel_embed.num_patches + self.num_patches = num_patches + new_patch_size = self.pixel_embed.new_patch_size + num_pixel = new_patch_size[0] * new_patch_size[1] + + self.norm1_proj = norm_layer(num_pixel * in_dim) + self.proj = nn.Linear(num_pixel * in_dim, embed_dim) + self.norm2_proj = norm_layer(embed_dim) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pixel_pos = nn.Parameter(torch.zeros(1, in_dim, new_patch_size[0], new_patch_size[1])) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + blocks = [] + for i in range(depth): + blocks.append(Block( + dim=embed_dim, in_dim=in_dim, num_pixel=num_pixel, num_heads=num_heads, in_num_head=in_num_head, + mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[i], norm_layer=norm_layer)) + self.blocks = nn.ModuleList(blocks) + self.norm = norm_layer(embed_dim) + + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.cls_token, std=.02) + trunc_normal_(self.patch_pos, std=.02) + trunc_normal_(self.pixel_pos, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'patch_pos', 'pixel_pos', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + pixel_embed = self.pixel_embed(x, self.pixel_pos) + + patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1)))) + patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1) + patch_embed = patch_embed + self.patch_pos + patch_embed = self.pos_drop(patch_embed) + + for blk in self.blocks: + pixel_embed, patch_embed = blk(pixel_embed, patch_embed) + + patch_embed = self.norm(patch_embed) + return patch_embed[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + if state_dict['patch_pos'].shape != model.patch_pos.shape: + state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'], + model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size) + return state_dict + + +def _create_tnt(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + TNT, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def tnt_s_patch16_224(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=16, embed_dim=384, in_dim=24, depth=12, num_heads=6, in_num_head=4, + qkv_bias=False, **kwargs) + model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def tnt_b_patch16_224(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=16, embed_dim=640, in_dim=40, depth=12, num_heads=10, in_num_head=4, + qkv_bias=False, **kwargs) + model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **model_cfg) + return model diff --git a/data_processing/MANIQA/timm/models/tresnet.py b/data_processing/MANIQA/timm/models/tresnet.py new file mode 100644 index 0000000..372bfb7 --- /dev/null +++ b/data_processing/MANIQA/timm/models/tresnet.py @@ -0,0 +1,297 @@ +""" +TResNet: High Performance GPU-Dedicated Architecture +https://arxiv.org/pdf/2003.13630.pdf + +Original model: https://github.com/mrT23/TResNet + +""" +from collections import OrderedDict + +import torch +import torch.nn as nn + +from .helpers import build_model_with_cfg +from .layers import SpaceToDepthModule, BlurPool2d, InplaceAbn, ClassifierHead, SEModule +from .registry import register_model + +__all__ = ['tresnet_m', 'tresnet_l', 'tresnet_xl'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': (0, 0, 0), 'std': (1, 1, 1), + 'first_conv': 'body.conv1.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'tresnet_m': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/tresnet_m_1k_miil_83_1.pth'), + 'tresnet_m_miil_in21k': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/tresnet_m_miil_in21k.pth', num_classes=11221), + 'tresnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth'), + 'tresnet_xl': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth'), + 'tresnet_m_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth'), + 'tresnet_l_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth'), + 'tresnet_xl_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_448-8c1815de.pth') +} + + +def IABN2Float(module: nn.Module) -> nn.Module: + """If `module` is IABN don't use half precision.""" + if isinstance(module, InplaceAbn): + module.float() + for child in module.children(): + IABN2Float(child) + return module + + +def conv2d_iabn(ni, nf, stride, kernel_size=3, groups=1, act_layer="leaky_relu", act_param=1e-2): + return nn.Sequential( + nn.Conv2d( + ni, nf, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=groups, bias=False), + InplaceAbn(nf, act_layer=act_layer, act_param=act_param) + ) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None): + super(BasicBlock, self).__init__() + if stride == 1: + self.conv1 = conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3) + else: + if aa_layer is None: + self.conv1 = conv2d_iabn(inplanes, planes, stride=2, act_param=1e-3) + else: + self.conv1 = nn.Sequential( + conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3), + aa_layer(channels=planes, filt_size=3, stride=2)) + + self.conv2 = conv2d_iabn(planes, planes, stride=1, act_layer="identity") + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + rd_chs = max(planes * self.expansion // 4, 64) + self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + + out = self.conv1(x) + out = self.conv2(out) + + if self.se is not None: + out = self.se(out) + + out += shortcut + out = self.relu(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, + act_layer="leaky_relu", aa_layer=None): + super(Bottleneck, self).__init__() + self.conv1 = conv2d_iabn( + inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer, act_param=1e-3) + if stride == 1: + self.conv2 = conv2d_iabn( + planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3) + else: + if aa_layer is None: + self.conv2 = conv2d_iabn( + planes, planes, kernel_size=3, stride=2, act_layer=act_layer, act_param=1e-3) + else: + self.conv2 = nn.Sequential( + conv2d_iabn(planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3), + aa_layer(channels=planes, filt_size=3, stride=2)) + + reduction_chs = max(planes * self.expansion // 8, 64) + self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None + + self.conv3 = conv2d_iabn( + planes, planes * self.expansion, kernel_size=1, stride=1, act_layer="identity") + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + + out = self.conv1(x) + out = self.conv2(out) + if self.se is not None: + out = self.se(out) + + out = self.conv3(out) + out = out + shortcut # no inplace + out = self.relu(out) + + return out + + +class TResNet(nn.Module): + def __init__(self, layers, in_chans=3, num_classes=1000, width_factor=1.0, global_pool='fast', drop_rate=0.): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(TResNet, self).__init__() + + aa_layer = BlurPool2d + + # TResnet stages + self.inplanes = int(64 * width_factor) + self.planes = int(64 * width_factor) + conv1 = conv2d_iabn(in_chans * 16, self.planes, stride=1, kernel_size=3) + layer1 = self._make_layer( + BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer) # 56x56 + layer2 = self._make_layer( + BasicBlock, self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer) # 28x28 + layer3 = self._make_layer( + Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer) # 14x14 + layer4 = self._make_layer( + Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer) # 7x7 + + # body + self.body = nn.Sequential(OrderedDict([ + ('SpaceToDepth', SpaceToDepthModule()), + ('conv1', conv1), + ('layer1', layer1), + ('layer2', layer2), + ('layer3', layer3), + ('layer4', layer4)])) + + self.feature_info = [ + dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D? + dict(num_chs=self.planes, reduction=4, module='body.layer1'), + dict(num_chs=self.planes * 2, reduction=8, module='body.layer2'), + dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), + dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'), + ] + + # head + self.num_features = (self.planes * 8) * Bottleneck.expansion + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + # model initilization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') + elif isinstance(m, nn.BatchNorm2d) or isinstance(m, InplaceAbn): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # residual connections special initialization + for m in self.modules(): + if isinstance(m, BasicBlock): + m.conv2[1].weight = nn.Parameter(torch.zeros_like(m.conv2[1].weight)) # BN to zero + if isinstance(m, Bottleneck): + m.conv3[1].weight = nn.Parameter(torch.zeros_like(m.conv3[1].weight)) # BN to zero + if isinstance(m, nn.Linear): + m.weight.data.normal_(0, 0.01) + + def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + layers = [] + if stride == 2: + # avg pooling before 1x1 conv + layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) + layers += [conv2d_iabn( + self.inplanes, planes * block.expansion, kernel_size=1, stride=1, act_layer="identity")] + downsample = nn.Sequential(*layers) + + layers = [] + layers.append(block( + self.inplanes, planes, stride, downsample, use_se=use_se, aa_layer=aa_layer)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block(self.inplanes, planes, use_se=use_se, aa_layer=aa_layer)) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='fast'): + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + return self.body(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_tresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + TResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), + **kwargs) + + +@register_model +def tresnet_m(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_m_miil_in21k(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m_miil_in21k', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_l(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) + return _create_tresnet('tresnet_l', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_xl(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) + return _create_tresnet('tresnet_xl', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_m_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m_448', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_l_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) + return _create_tresnet('tresnet_l_448', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_xl_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) + return _create_tresnet('tresnet_xl_448', pretrained=pretrained, **model_kwargs) diff --git a/data_processing/MANIQA/timm/models/twins.py b/data_processing/MANIQA/timm/models/twins.py new file mode 100644 index 0000000..67a939d --- /dev/null +++ b/data_processing/MANIQA/timm/models/twins.py @@ -0,0 +1,424 @@ +""" Twins +A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers` + - https://arxiv.org/pdf/2104.13840.pdf + +Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below + +""" +# -------------------------------------------------------- +# Twins +# Copyright (c) 2021 Meituan +# Licensed under The Apache 2.0 License [see LICENSE for details] +# Written by Xinjie Li, Xiangxiang Chu +# -------------------------------------------------------- +import math +from copy import deepcopy +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import Mlp, DropPath, to_2tuple, trunc_normal_ +from .fx_features import register_notrace_module +from .registry import register_model +from .vision_transformer import Attention +from .helpers import build_model_with_cfg + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embeds.0.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'twins_pcpvt_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth', + ), + 'twins_pcpvt_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_base-e5ecb09b.pth', + ), + 'twins_pcpvt_large': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_large-d273f802.pth', + ), + 'twins_svt_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_small-42e5f78c.pth', + ), + 'twins_svt_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_base-c2265010.pth', + ), + 'twins_svt_large': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_large-90f6aaa9.pth', + ), +} + +Size_ = Tuple[int, int] + + +@register_notrace_module # reason: FX can't symbolically trace control flow in forward method +class LocallyGroupedAttn(nn.Module): + """ LSA: self attention within a group + """ + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1): + assert ws != 1 + super(LocallyGroupedAttn, self).__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.ws = ws + + def forward(self, x, size: Size_): + # There are two implementations for this function, zero padding or mask. We don't observe obvious difference for + # both. You can choose any one, we recommend forward_padding because it's neat. However, + # the masking implementation is more reasonable and accurate. + B, N, C = x.shape + H, W = size + x = x.view(B, H, W, C) + pad_l = pad_t = 0 + pad_r = (self.ws - W % self.ws) % self.ws + pad_b = (self.ws - H % self.ws) % self.ws + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + _h, _w = Hp // self.ws, Wp // self.ws + x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) + qkv = self.qkv(x).reshape( + B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + q, k, v = qkv[0], qkv[1], qkv[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + x = x.reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + # def forward_mask(self, x, size: Size_): + # B, N, C = x.shape + # H, W = size + # x = x.view(B, H, W, C) + # pad_l = pad_t = 0 + # pad_r = (self.ws - W % self.ws) % self.ws + # pad_b = (self.ws - H % self.ws) % self.ws + # x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + # _, Hp, Wp, _ = x.shape + # _h, _w = Hp // self.ws, Wp // self.ws + # mask = torch.zeros((1, Hp, Wp), device=x.device) + # mask[:, -pad_b:, :].fill_(1) + # mask[:, :, -pad_r:].fill_(1) + # + # x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C + # mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws) + # attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws + # attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0)) + # qkv = self.qkv(x).reshape( + # B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + # # n_h, B, _w*_h, nhead, ws*ws, dim + # q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head + # attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws + # attn = attn + attn_mask.unsqueeze(2) + # attn = attn.softmax(dim=-1) + # attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head + # attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + # x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + # if pad_r > 0 or pad_b > 0: + # x = x[:, :H, :W, :].contiguous() + # x = x.reshape(B, N, C) + # x = self.proj(x) + # x = self.proj_drop(x) + # return x + + +class GlobalSubSampleAttn(nn.Module): + """ GSA: using a key to summarize the information for a group to be efficient. + """ + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=True) + self.kv = nn.Linear(dim, dim * 2, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + else: + self.sr = None + self.norm = None + + def forward(self, x, size: Size_): + B, N, C = x.shape + q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + if self.sr is not None: + x = x.permute(0, 2, 1).reshape(B, C, *size) + x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) + x = self.norm(x) + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, ws=None): + super().__init__() + self.norm1 = norm_layer(dim) + if ws is None: + self.attn = Attention(dim, num_heads, False, None, attn_drop, drop) + elif ws == 1: + self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, drop, sr_ratio) + else: + self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, drop, ws) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, size: Size_): + x = x + self.drop_path(self.attn(self.norm1(x), size)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PosConv(nn.Module): + # PEG from https://arxiv.org/abs/2102.10882 + def __init__(self, in_chans, embed_dim=768, stride=1): + super(PosConv, self).__init__() + self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), ) + self.stride = stride + + def forward(self, x, size: Size_): + B, N, C = x.shape + cnn_feat_token = x.transpose(1, 2).view(B, C, *size) + x = self.proj(cnn_feat_token) + if self.stride == 1: + x += cnn_feat_token + x = x.flatten(2).transpose(1, 2) + return x + + def no_weight_decay(self): + return ['proj.%d.weight' % i for i in range(4)] + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \ + f"img_size {img_size} should be divided by patch_size {patch_size}." + self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x) -> Tuple[torch.Tensor, Size_]: + B, C, H, W = x.shape + + x = self.proj(x).flatten(2).transpose(1, 2) + x = self.norm(x) + out_size = (H // self.patch_size[0], W // self.patch_size[1]) + + return x, out_size + + +class Twins(nn.Module): + """ Twins Vision Transfomer (Revisiting Spatial Attention) + + Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git + """ + def __init__( + self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dims=(64, 128, 256, 512), + num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), wss=None, + block_cls=Block): + super().__init__() + self.num_classes = num_classes + self.depths = depths + self.embed_dims = embed_dims + self.num_features = embed_dims[-1] + + img_size = to_2tuple(img_size) + prev_chs = in_chans + self.patch_embeds = nn.ModuleList() + self.pos_drops = nn.ModuleList() + for i in range(len(depths)): + self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i])) + self.pos_drops.append(nn.Dropout(p=drop_rate)) + prev_chs = embed_dims[i] + img_size = tuple(t // patch_size for t in img_size) + patch_size = 2 + + self.blocks = nn.ModuleList() + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + cur = 0 + for k in range(len(depths)): + _block = nn.ModuleList([block_cls( + dim=embed_dims[k], num_heads=num_heads[k], mlp_ratio=mlp_ratios[k], drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[k], + ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])]) + self.blocks.append(_block) + cur += depths[k] + + self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims]) + + self.norm = norm_layer(self.num_features) + + # classification head + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # init weights + self.apply(self._init_weights) + + @torch.jit.ignore + def no_weight_decay(self): + return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()]) + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + + def forward_features(self, x): + B = x.shape[0] + for i, (embed, drop, blocks, pos_blk) in enumerate( + zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)): + x, size = embed(x) + x = drop(x) + for j, blk in enumerate(blocks): + x = blk(x, size) + if j == 0: + x = pos_blk(x, size) # PEG here + if i < len(self.depths) - 1: + x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() + x = self.norm(x) + return x.mean(dim=1) # GAP here + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_twins(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + Twins, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + return model + + +@register_model +def twins_pcpvt_small(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_pcpvt_base(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_base', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_pcpvt_large(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_large', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_small(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_base(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_base', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_large(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_large', pretrained=pretrained, **model_kwargs) diff --git a/data_processing/MANIQA/timm/models/vgg.py b/data_processing/MANIQA/timm/models/vgg.py new file mode 100644 index 0000000..ccaa21d --- /dev/null +++ b/data_processing/MANIQA/timm/models/vgg.py @@ -0,0 +1,263 @@ +"""VGG + +Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for +timm functionality. + +Copyright 2021 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Union, List, Dict, Any, cast + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .fx_features import register_notrace_module +from .layers import ClassifierHead +from .registry import register_model + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'vgg11': _cfg(url='https://download.pytorch.org/models/vgg11-bbd30ac9.pth'), + 'vgg13': _cfg(url='https://download.pytorch.org/models/vgg13-c768596a.pth'), + 'vgg16': _cfg(url='https://download.pytorch.org/models/vgg16-397923af.pth'), + 'vgg19': _cfg(url='https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'), + 'vgg11_bn': _cfg(url='https://download.pytorch.org/models/vgg11_bn-6002323d.pth'), + 'vgg13_bn': _cfg(url='https://download.pytorch.org/models/vgg13_bn-abd245e5.pth'), + 'vgg16_bn': _cfg(url='https://download.pytorch.org/models/vgg16_bn-6c64b313.pth'), + 'vgg19_bn': _cfg(url='https://download.pytorch.org/models/vgg19_bn-c79401a0.pth'), +} + + +cfgs: Dict[str, List[Union[str, int]]] = { + 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +@register_notrace_module # reason: FX can't symbolically trace control flow in forward method +class ConvMlp(nn.Module): + + def __init__(self, in_features=512, out_features=4096, kernel_size=7, mlp_ratio=1.0, + drop_rate: float = 0.2, act_layer: nn.Module = None, conv_layer: nn.Module = None): + super(ConvMlp, self).__init__() + self.input_kernel_size = kernel_size + mid_features = int(out_features * mlp_ratio) + self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True) + self.act1 = act_layer(True) + self.drop = nn.Dropout(drop_rate) + self.fc2 = conv_layer(mid_features, out_features, 1, bias=True) + self.act2 = act_layer(True) + + def forward(self, x): + if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size: + # keep the input size >= 7x7 + output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1])) + x = F.adaptive_avg_pool2d(x, output_size) + x = self.fc1(x) + x = self.act1(x) + x = self.drop(x) + x = self.fc2(x) + x = self.act2(x) + return x + + +class VGG(nn.Module): + + def __init__( + self, + cfg: List[Any], + num_classes: int = 1000, + in_chans: int = 3, + output_stride: int = 32, + mlp_ratio: float = 1.0, + act_layer: nn.Module = nn.ReLU, + conv_layer: nn.Module = nn.Conv2d, + norm_layer: nn.Module = None, + global_pool: str = 'avg', + drop_rate: float = 0., + ) -> None: + super(VGG, self).__init__() + assert output_stride == 32 + self.num_classes = num_classes + self.num_features = 4096 + self.drop_rate = drop_rate + self.feature_info = [] + prev_chs = in_chans + net_stride = 1 + pool_layer = nn.MaxPool2d + layers: List[nn.Module] = [] + for v in cfg: + last_idx = len(layers) - 1 + if v == 'M': + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}')) + layers += [pool_layer(kernel_size=2, stride=2)] + net_stride *= 2 + else: + v = cast(int, v) + conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1) + if norm_layer is not None: + layers += [conv2d, norm_layer(v), act_layer(inplace=True)] + else: + layers += [conv2d, act_layer(inplace=True)] + prev_chs = v + self.features = nn.Sequential(*layers) + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}')) + self.pre_logits = ConvMlp( + prev_chs, self.num_features, 7, mlp_ratio=mlp_ratio, + drop_rate=drop_rate, act_layer=act_layer, conv_layer=conv_layer) + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + self._initialize_weights() + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.head = ClassifierHead( + self.num_features, self.num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.features(x) + x = self.pre_logits(x) + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.head(x) + return x + + def _initialize_weights(self) -> None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def _filter_fn(state_dict): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + for k, v in state_dict.items(): + k_r = k + k_r = k_r.replace('classifier.0', 'pre_logits.fc1') + k_r = k_r.replace('classifier.3', 'pre_logits.fc2') + k_r = k_r.replace('classifier.6', 'head.fc') + if 'classifier.0.weight' in k: + v = v.reshape(-1, 512, 7, 7) + if 'classifier.3.weight' in k: + v = v.reshape(-1, 4096, 1, 1) + out_dict[k_r] = v + return out_dict + + +def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG: + cfg = variant.split('_')[0] + # NOTE: VGG is one of few models with stride==1 features w/ 6 out_indices [0..5] + out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4, 5)) + model = build_model_with_cfg( + VGG, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfgs[cfg], + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + pretrained_filter_fn=_filter_fn, + **kwargs) + return model + + +@register_model +def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg11', pretrained=pretrained, **model_args) + + +@register_model +def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg13', pretrained=pretrained, **model_args) + + +@register_model +def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg16', pretrained=pretrained, **model_args) + + +@register_model +def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg19', pretrained=pretrained, **model_args) + + +@register_model +def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args) \ No newline at end of file diff --git a/data_processing/MANIQA/timm/models/visformer.py b/data_processing/MANIQA/timm/models/visformer.py new file mode 100644 index 0000000..62f7730 --- /dev/null +++ b/data_processing/MANIQA/timm/models/visformer.py @@ -0,0 +1,413 @@ +""" Visformer + +Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533 + +From original at https://github.com/danczs/Visformer + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier +from .registry import register_model + + +__all__ = ['Visformer'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + visformer_tiny=_cfg(), + visformer_small=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/visformer_small-839e1f5b.pth' + ), +) + + +class SpatialMlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, + act_layer=nn.GELU, drop=0., group=8, spatial_conv=False): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + drop_probs = to_2tuple(drop) + + self.in_features = in_features + self.out_features = out_features + self.spatial_conv = spatial_conv + if self.spatial_conv: + if group < 2: # net setting + hidden_features = in_features * 5 // 6 + else: + hidden_features = in_features * 2 + self.hidden_features = hidden_features + self.group = group + self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False) + self.act1 = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + if self.spatial_conv: + self.conv2 = nn.Conv2d( + hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False) + self.act2 = act_layer() + else: + self.conv2 = None + self.act2 = None + self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False) + self.drop3 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.conv1(x) + x = self.act1(x) + x = self.drop1(x) + if self.conv2 is not None: + x = self.conv2(x) + x = self.act2(x) + x = self.conv3(x) + x = self.drop3(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.): + super().__init__() + self.dim = dim + self.num_heads = num_heads + head_dim = round(dim // num_heads * head_dim_ratio) + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, C, H, W = x.shape + x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3) + q, k, v = x[0], x[1], x[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + def __init__(self, dim, num_heads, head_dim_ratio=1., mlp_ratio=4., + drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm2d, + group=8, attn_disabled=False, spatial_conv=False): + super().__init__() + self.spatial_conv = spatial_conv + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + if attn_disabled: + self.norm1 = None + self.attn = None + else: + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, head_dim_ratio=head_dim_ratio, attn_drop=attn_drop, proj_drop=drop) + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = SpatialMlp( + in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, + group=group, spatial_conv=spatial_conv) # new setting + + def forward(self, x): + if self.attn is not None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Visformer(nn.Module): + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, init_channels=32, embed_dim=384, + depth=12, num_heads=6, mlp_ratio=4., drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=LayerNorm2d, attn_stage='111', pos_embed=True, spatial_conv='111', + vit_stem=False, group=8, global_pool='avg', conv_init=False, embed_norm=None): + super().__init__() + img_size = to_2tuple(img_size) + self.num_classes = num_classes + self.embed_dim = embed_dim + self.init_channels = init_channels + self.img_size = img_size + self.vit_stem = vit_stem + self.conv_init = conv_init + if isinstance(depth, (list, tuple)): + self.stage_num1, self.stage_num2, self.stage_num3 = depth + depth = sum(depth) + else: + self.stage_num1 = self.stage_num3 = depth // 3 + self.stage_num2 = depth - self.stage_num1 - self.stage_num3 + self.pos_embed = pos_embed + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + + # stage 1 + if self.vit_stem: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) + img_size = [x // patch_size for x in img_size] + else: + if self.init_channels is None: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 2, in_chans=in_chans, + embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 2) for x in img_size] + else: + self.stem = nn.Sequential( + nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False), + nn.BatchNorm2d(self.init_channels), + nn.ReLU(inplace=True) + ) + img_size = [x // 2 for x in img_size] + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 4, in_chans=self.init_channels, + embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 4) for x in img_size] + + if self.pos_embed: + if self.vit_stem: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + else: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size)) + self.pos_drop = nn.Dropout(p=drop_rate) + self.stage1 = nn.ModuleList([ + Block( + dim=embed_dim//2, num_heads=num_heads, head_dim_ratio=0.5, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[0] == '0'), spatial_conv=(spatial_conv[0] == '1') + ) + for i in range(self.stage_num1) + ]) + + # stage2 + if not self.vit_stem: + self.patch_embed2 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim // 2, + embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 8) for x in img_size] + if self.pos_embed: + self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + self.stage2 = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[1] == '0'), spatial_conv=(spatial_conv[1] == '1') + ) + for i in range(self.stage_num1, self.stage_num1+self.stage_num2) + ]) + + # stage 3 + if not self.vit_stem: + self.patch_embed3 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim, + embed_dim=embed_dim * 2, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 8) for x in img_size] + if self.pos_embed: + self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size)) + self.stage3 = nn.ModuleList([ + Block( + dim=embed_dim*2, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[2] == '0'), spatial_conv=(spatial_conv[2] == '1') + ) + for i in range(self.stage_num1+self.stage_num2, depth) + ]) + + # head + self.num_features = embed_dim if self.vit_stem else embed_dim * 2 + self.norm = norm_layer(self.num_features) + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + # weights init + if self.pos_embed: + trunc_normal_(self.pos_embed1, std=0.02) + if not self.vit_stem: + trunc_normal_(self.pos_embed2, std=0.02) + trunc_normal_(self.pos_embed3, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + if self.conv_init: + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + else: + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0.) + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + if self.stem is not None: + x = self.stem(x) + + # stage 1 + x = self.patch_embed1(x) + if self.pos_embed: + x = x + self.pos_embed1 + x = self.pos_drop(x) + for b in self.stage1: + x = b(x) + + # stage 2 + if not self.vit_stem: + x = self.patch_embed2(x) + if self.pos_embed: + x = x + self.pos_embed2 + x = self.pos_drop(x) + for b in self.stage2: + x = b(x) + + # stage3 + if not self.vit_stem: + x = self.patch_embed3(x) + if self.pos_embed: + x = x + self.pos_embed3 + x = self.pos_drop(x) + for b in self.stage3: + x = b(x) + + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + x = self.head(x) + return x + + +def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model = build_model_with_cfg( + Visformer, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + return model + + +@register_model +def visformer_tiny(pretrained=False, **kwargs): + model_cfg = dict( + init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d, **kwargs) + model = _create_visformer('visformer_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def visformer_small(pretrained=False, **kwargs): + model_cfg = dict( + init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d, **kwargs) + model = _create_visformer('visformer_small', pretrained=pretrained, **model_cfg) + return model + + +# @register_model +# def visformer_net1(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=True, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net2(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net3(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net4(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net5(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# spatial_conv='111', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net6(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net7(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model + + + + diff --git a/data_processing/MANIQA/timm/models/vision_transformer.py b/data_processing/MANIQA/timm/models/vision_transformer.py new file mode 100644 index 0000000..d80cb96 --- /dev/null +++ b/data_processing/MANIQA/timm/models/vision_transformer.py @@ -0,0 +1,1045 @@ +""" Vision Transformer (ViT) in PyTorch + +A PyTorch implement of Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.10270 + +The official jax code is released and available at https://github.com/google-research/vision_transformer + +DeiT model defs and weights from https://github.com/facebookresearch/deit, +paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 + +Acknowledgments: +* The paper authors for releasing code and weights, thanks! +* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out +for some einops/einsum fun +* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT +* Bert reference code checks against Huggingface Transformers and Tensorflow Bert + +Hacked together by / Copyright 2020, Ross Wightman +""" +import math +import logging +import numpy as np +from functools import partial +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, named_apply, adapt_input_conv +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_, lecun_normal_ +from .registry import register_model + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +class RandomMaskingGenerator: + def __init__(self, input_size, mask_ratio): + if not isinstance(input_size, tuple): + input_size = (input_size,) * 2 + + self.height, self.width = input_size + + self.num_patches = self.height * self.width # patch的总数即196 + self.num_mask = int(mask_ratio * self.num_patches) # 196 * 0.75 + + def __repr__(self): + repr_str = "Maks: total patches {}, mask patches {}".format( + self.num_patches, self.num_mask + ) + return repr_str + + def __call__(self): + mask = np.hstack([ + np.zeros(self.num_patches - self.num_mask), + np.ones(self.num_mask), + ]) + np.random.shuffle(mask) + return mask + + +default_cfgs = { + # patch models (weights from official Google JAX impl) + 'vit_tiny_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_tiny_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_small_patch32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_small_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_base_patch32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_base_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch8_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_large_patch32_224': _cfg( + url='', # no official model weights for this combo, only for in21k + ), + 'vit_large_patch32_384': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_large_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + + 'vit_huge_patch14_224': _cfg(url=''), + 'vit_giant_patch14_224': _cfg(url=''), + 'vit_gigantic_patch14_224': _cfg(url=''), + + 'vit_base2_patch32_256': _cfg(url='', input_size=(3, 256, 256), crop_pct=0.95), + + # patch models, imagenet21k (weights from official Google JAX impl) + 'vit_tiny_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_small_patch32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_small_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch8_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_large_patch32_224_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth', + num_classes=21843), + 'vit_large_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', + num_classes=21843), + 'vit_huge_patch14_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz', + hf_hub='timm/vit_huge_patch14_224_in21k', + num_classes=21843), + + # SAM trained models (https://arxiv.org/abs/2106.01548) + 'vit_base_patch32_224_sam': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz'), + 'vit_base_patch16_224_sam': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz'), + + # DINO pretrained - https://arxiv.org/abs/2104.14294 (no classifier head, for fine-tune only) + 'vit_small_patch16_224_dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_small_patch8_224_dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_base_patch16_224_dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_base_patch8_224_dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + + # deit models (FB weights) + 'deit_tiny_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_small_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_base_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_base_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0), + 'deit_tiny_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_small_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0, + classifier=('head', 'head_dist')), + + # ViT ImageNet-21K-P pretraining by MILL + 'vit_base_patch16_224_miil_in21k': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/vit_base_patch16_224_in21k_miil.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221, + ), + 'vit_base_patch16_224_miil': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm' + '/vit_base_patch16_224_1k_miil_84_4.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', + ), +} + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` + - https://arxiv.org/abs/2010.11929 + + Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` + - https://arxiv.org/abs/2012.12877 + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, + act_layer=None, weight_init=''): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + distilled (bool): model includes a distillation token and head as in DeiT models + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + embed_layer (nn.Module): patch embedding layer + norm_layer: (nn.Module): normalization layer + weight_init: (str): weight init scheme + """ + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_tokens = 2 if distilled else 1 + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.patch_embed = embed_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.Sequential(*[ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Representation layer + if representation_size and not distilled: + self.num_features = representation_size + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', nn.Linear(embed_dim, representation_size)), + ('act', nn.Tanh()) + ])) + else: + self.pre_logits = nn.Identity() + + # Classifier head(s) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distilled: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(weight_init) + + def init_weights(self, mode=''): + assert mode in ('jax', 'jax_nlhb', 'nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + trunc_normal_(self.pos_embed, std=.02) + if self.dist_token is not None: + trunc_normal_(self.dist_token, std=.02) + if mode.startswith('jax'): + # leave cls token as zeros to match jax impl + named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self) + else: + trunc_normal_(self.cls_token, std=.02) + self.apply(_init_vit_weights) + + def _init_weights(self, m): + # this fn left here for compat with downstream users + _init_vit_weights(m) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token', 'dist_token'} + + def get_classifier(self): + if self.dist_token is None: + return self.head + else: + return self.head, self.head_dist + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + if self.num_tokens == 2: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks + if self.dist_token is None: + x = torch.cat((cls_token, x), dim=1) + else: + x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) + x = self.pos_drop(x + self.pos_embed) + x = self.blocks(x) + x = self.norm(x) + # if self.dist_token is None: + # return self.pre_logits(x[:, 0]) + # else: + # return x[:, 0], x[:, 1] + return x + + def forward(self, x): + x = self.forward_features(x) + # if self.head_dist is not None: + # x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple + # if self.training and not torch.jit.is_scripting(): + # # during inference, return the average of both classifier predictions + # return x, x_dist + # else: + # return (x + x_dist) / 2 + # else: + # x = self.head(x) + return x[:, 1:] + + +def _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False): + """ ViT weight initialization + * When called without n, head_bias, jax_impl args it will behave exactly the same + as my original init for compatibility with prev hparam / downstream use cases (ie DeiT). + * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + elif name.startswith('pre_logits'): + lecun_normal_(module.weight) + nn.init.zeros_(module.bias) + else: + if jax_impl: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + else: + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif jax_impl and isinstance(module, nn.Conv2d): + # NOTE conv was left to pytorch default in my original init + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): + nn.init.zeros_(module.bias) + nn.init.ones_(module.weight) + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) + if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: + model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) + model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) + if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: + model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) + model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + + +def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): + # Rescale the grid of position embeddings when loading from state_dict. Adapted from + # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 + _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) + ntok_new = posemb_new.shape[1] + if num_tokens: + posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] + ntok_new -= num_tokens + else: + posemb_tok, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + if not len(gs_new): # backwards compatibility + gs_new = [int(math.sqrt(ntok_new))] * 2 + assert len(gs_new) >= 2 + _logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new) + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False) + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k and len(v.shape) < 4: + # For old models that I trained prior to conv based patchification + O, I, H, W = model.patch_embed.proj.weight.shape + v = v.reshape(O, -1, H, W) + elif k == 'pos_embed' and v.shape != model.pos_embed.shape: + # To resize pos embedding when using model at different size from pretrained weights + v = resize_pos_embed( + v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + out_dict[k] = v + return out_dict + + +def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + # NOTE this extra code to support handling of repr size for in21k pretrained models + default_num_classes = default_cfg['num_classes'] + num_classes = kwargs.get('num_classes', default_num_classes) + repr_size = kwargs.pop('representation_size', None) + if repr_size is not None and num_classes != default_num_classes: + # Remove representation layer if fine-tuning. This may not always be the desired action, + # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? + _logger.warning("Removing representation layer for fine-tuning.") + repr_size = None + + model = build_model_with_cfg( + VisionTransformer, variant, pretrained, + default_cfg=default_cfg, + representation_size=repr_size, + pretrained_filter_fn=checkpoint_filter_fn, + pretrained_custom_load='npz' in default_cfg['url'], + **kwargs) + return model + + +@register_model +def vit_tiny_patch16_224(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16) + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_patch16_384(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16) @ 384x384. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_224(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/32) + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_384(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/32) at 384x384. + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_384(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base2_patch32_256(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) + # FIXME experiment + """ + model_kwargs = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, **kwargs) + model = _create_vision_transformer('vit_base2_patch32_256', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch8_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_huge_patch14_224(pretrained=False, **kwargs): + """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). + """ + model_kwargs = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_huge_patch14_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_giant_patch14_224(pretrained=False, **kwargs): + """ ViT-Giant model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 + """ + model_kwargs = dict(patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_giant_patch14_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_gigantic_patch14_224(pretrained=False, **kwargs): + """ ViT-Gigantic model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 + """ + model_kwargs = dict(patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_gigantic_patch14_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch8_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch8_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights + """ + model_kwargs = dict( + patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs) + model = _create_vision_transformer('vit_large_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_huge_patch14_224_in21k(pretrained=False, **kwargs): + """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights + """ + model_kwargs = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280, **kwargs) + model = _create_vision_transformer('vit_huge_patch14_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_sam(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548 + """ + # NOTE original SAM weights release worked with representation_size=768 + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_sam', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224_sam(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548 + """ + # NOTE original SAM weights release worked with representation_size=768 + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224_sam', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224_dino(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) w/ DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294 + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224_dino', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch8_224_dino(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/8) w/ DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294 + """ + model_kwargs = dict(patch_size=8, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch8_224_dino', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_dino(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) /w DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294 + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_dino', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch8_224_dino(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/8) w/ DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294 + """ + model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch8_224_dino', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_tiny_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('deit_tiny_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_small_patch16_224(pretrained=False, **kwargs): + """ DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('deit_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_base_patch16_224(pretrained=False, **kwargs): + """ DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('deit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_base_patch16_384(pretrained=False, **kwargs): + """ DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('deit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer( + 'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_small_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer( + 'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_base_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + 'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_base_distilled_patch16_384(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + 'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_miil_in21k(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_miil(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil', pretrained=pretrained, **model_kwargs) + return model diff --git a/data_processing/MANIQA/timm/models/vision_transformer_hybrid.py b/data_processing/MANIQA/timm/models/vision_transformer_hybrid.py new file mode 100644 index 0000000..d46297e --- /dev/null +++ b/data_processing/MANIQA/timm/models/vision_transformer_hybrid.py @@ -0,0 +1,363 @@ +""" Hybrid Vision Transformer (ViT) in PyTorch + +A PyTorch implement of the Hybrid Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.TODO + +NOTE These hybrid model definitions depend on code in vision_transformer.py. +They were moved here to keep file sizes sane. + +Hacked together by / Copyright 2020, Ross Wightman +""" +from copy import deepcopy +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import StdConv2dSame, StdConv2d, to_2tuple +from .resnet import resnet26d, resnet50d +from .resnetv2 import ResNetV2, create_resnetv2_stem +from .registry import register_model +from timm.models.vision_transformer import _create_vision_transformer + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # hybrid in-1k models (weights from official JAX impl where they exist) + 'vit_tiny_r_s16_p8_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + first_conv='patch_embed.backbone.conv'), + 'vit_tiny_r_s16_p8_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_r26_s32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz', + ), + 'vit_small_r26_s32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_r26_s32_224': _cfg(), + 'vit_base_r50_s16_224': _cfg(), + 'vit_base_r50_s16_384': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_r50_s32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz' + ), + 'vit_large_r50_s32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0 + ), + + # hybrid in-21k models (weights from official Google JAX impl where they exist) + 'vit_tiny_r_s16_p8_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv'), + 'vit_small_r26_s32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9), + 'vit_base_r50_s16_224_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth', + num_classes=21843, crop_pct=0.9), + 'vit_large_r50_s32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9), + + # hybrid models (using timm resnet backbones) + 'vit_small_resnet26d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_small_resnet50d_s16_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet26d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet50d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), +} + + +class HybridEmbed(nn.Module): + """ CNN Feature Map Embedding + Extract feature map from CNN, flatten, project to embedding dim. + """ + def __init__(self, backbone, img_size=224, patch_size=1, feature_size=None, in_chans=3, embed_dim=768): + super().__init__() + assert isinstance(backbone, nn.Module) + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.backbone = backbone + if feature_size is None: + with torch.no_grad(): + # NOTE Most reliable way of determining output dims is to run forward pass + training = backbone.training + if training: + backbone.eval() + o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1])) + if isinstance(o, (list, tuple)): + o = o[-1] # last feature if backbone outputs list/tuple of features + feature_size = o.shape[-2:] + feature_dim = o.shape[1] + backbone.train(training) + else: + feature_size = to_2tuple(feature_size) + if hasattr(self.backbone, 'feature_info'): + feature_dim = self.backbone.feature_info.channels()[-1] + else: + feature_dim = self.backbone.num_features + assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0 + self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + x = self.backbone(x) + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +def _create_vision_transformer_hybrid(variant, backbone, pretrained=False, **kwargs): + embed_layer = partial(HybridEmbed, backbone=backbone) + kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set + return _create_vision_transformer( + variant, pretrained=pretrained, embed_layer=embed_layer, default_cfg=default_cfgs[variant], **kwargs) + + +def _resnetv2(layers=(3, 4, 9), **kwargs): + """ ResNet-V2 backbone helper""" + padding_same = kwargs.get('padding_same', True) + stem_type = 'same' if padding_same else '' + conv_layer = partial(StdConv2dSame, eps=1e-8) if padding_same else partial(StdConv2d, eps=1e-8) + if len(layers): + backbone = ResNetV2( + layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), + preact=False, stem_type=stem_type, conv_layer=conv_layer) + else: + backbone = create_resnetv2_stem( + kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer) + return backbone + + +@register_model +def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 224 x 224. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 384 x 384. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_224(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_384(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r26_s32_224(pretrained=False, **kwargs): + """ R26+ViT-B/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_224(pretrained=False, **kwargs): + """ R50+ViT-B/S16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_384(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50_384(pretrained=False, **kwargs): + # DEPRECATED this is forwarding to model def above for backwards compatibility + return vit_base_r50_s16_384(pretrained=pretrained, **kwargs) + + +@register_model +def vit_large_r50_s32_224(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_r50_s32_384(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_r_s16_p8_224_in21k(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid. ImageNet-21k. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_224_in21k(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. ImageNet-21k. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_224_in21k(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + """ + backbone = _resnetv2(layers=(3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50_224_in21k(pretrained=False, **kwargs): + # DEPRECATED this is forwarding to model def above for backwards compatibility + return vit_base_r50_s16_224_in21k(pretrained=pretrained, **kwargs) + + +@register_model +def vit_large_r50_s32_224_in21k(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. ImageNet-21k. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_resnet50d_s16_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3]) + model_kwargs = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model \ No newline at end of file diff --git a/data_processing/MANIQA/timm/models/vovnet.py b/data_processing/MANIQA/timm/models/vovnet.py new file mode 100644 index 0000000..ec5b3e8 --- /dev/null +++ b/data_processing/MANIQA/timm/models/vovnet.py @@ -0,0 +1,406 @@ +""" VoVNet (V1 & V2) + +Papers: +* `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730 +* `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Looked at https://github.com/youngwanLEE/vovnet-detectron2 & +https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +for some reference, rewrote most of the code. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .registry import register_model +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, SeparableConvBnAct, BatchNormAct2d, ClassifierHead, DropPath,\ + create_attn, create_norm_act, get_norm_act_layer + + +# model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 & +# https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +model_cfgs = dict( + vovnet39a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=False, + depthwise=False, + attn='', + ), + vovnet57a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=False, + depthwise=False, + attn='', + + ), + ese_vovnet19b_slim_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + + ), + ese_vovnet19b_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + ), + ese_vovnet19b_slim=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet19b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet57b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet99b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 3, 9, 3], + residual=True, + depthwise=False, + attn='ese', + ), + eca_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='eca', + ), +) +model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b'] +model_cfgs['ese_vovnet99b_iabn'] = model_cfgs['ese_vovnet99b'] + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + } + + +default_cfgs = dict( + vovnet39a=_cfg(url=''), + vovnet57a=_cfg(url=''), + ese_vovnet19b_slim_dw=_cfg(url=''), + ese_vovnet19b_dw=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet19b_dw-a8741004.pth'), + ese_vovnet19b_slim=_cfg(url=''), + ese_vovnet39b=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet39b-f912fe73.pth'), + ese_vovnet57b=_cfg(url=''), + ese_vovnet99b=_cfg(url=''), + eca_vovnet39b=_cfg(url=''), + ese_vovnet39b_evos=_cfg(url=''), + ese_vovnet99b_iabn=_cfg(url=''), +) + + +class SequentialAppendList(nn.Sequential): + def __init__(self, *args): + super(SequentialAppendList, self).__init__(*args) + + def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor: + for i, module in enumerate(self): + if i == 0: + concat_list.append(module(x)) + else: + concat_list.append(module(concat_list[-1])) + x = torch.cat(concat_list, dim=1) + return x + + +class OsaBlock(nn.Module): + + def __init__(self, in_chs, mid_chs, out_chs, layer_per_block, residual=False, + depthwise=False, attn='', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path=None): + super(OsaBlock, self).__init__() + + self.residual = residual + self.depthwise = depthwise + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + next_in_chs = in_chs + if self.depthwise and next_in_chs != mid_chs: + assert not residual + self.conv_reduction = ConvBnAct(next_in_chs, mid_chs, 1, **conv_kwargs) + else: + self.conv_reduction = None + + mid_convs = [] + for i in range(layer_per_block): + if self.depthwise: + conv = SeparableConvBnAct(mid_chs, mid_chs, **conv_kwargs) + else: + conv = ConvBnAct(next_in_chs, mid_chs, 3, **conv_kwargs) + next_in_chs = mid_chs + mid_convs.append(conv) + self.conv_mid = SequentialAppendList(*mid_convs) + + # feature aggregation + next_in_chs = in_chs + layer_per_block * mid_chs + self.conv_concat = ConvBnAct(next_in_chs, out_chs, **conv_kwargs) + + if attn: + self.attn = create_attn(attn, out_chs) + else: + self.attn = None + + self.drop_path = drop_path + + def forward(self, x): + output = [x] + if self.conv_reduction is not None: + x = self.conv_reduction(x) + x = self.conv_mid(x, output) + x = self.conv_concat(x) + if self.attn is not None: + x = self.attn(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.residual: + x = x + output[0] + return x + + +class OsaStage(nn.Module): + + def __init__(self, in_chs, mid_chs, out_chs, block_per_stage, layer_per_block, downsample=True, + residual=True, depthwise=False, attn='ese', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, + drop_path_rates=None): + super(OsaStage, self).__init__() + + if downsample: + self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) + else: + self.pool = None + + blocks = [] + for i in range(block_per_stage): + last_block = i == block_per_stage - 1 + if drop_path_rates is not None and drop_path_rates[i] > 0.: + drop_path = DropPath(drop_path_rates[i]) + else: + drop_path = None + blocks += [OsaBlock( + in_chs, mid_chs, out_chs, layer_per_block, residual=residual and i > 0, depthwise=depthwise, + attn=attn if last_block else '', norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path) + ] + in_chs = out_chs + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + if self.pool is not None: + x = self.pool(x) + x = self.blocks(x) + return x + + +class VovNet(nn.Module): + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0., stem_stride=4, + output_stride=32, norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path_rate=0.): + """ VovNet (v2) + """ + super(VovNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert stem_stride in (4, 2) + assert output_stride == 32 # FIXME support dilation + + stem_chs = cfg["stem_chs"] + stage_conv_chs = cfg["stage_conv_chs"] + stage_out_chs = cfg["stage_out_chs"] + block_per_stage = cfg["block_per_stage"] + layer_per_block = cfg["layer_per_block"] + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + # Stem module + last_stem_stride = stem_stride // 2 + conv_type = SeparableConvBnAct if cfg["depthwise"] else ConvBnAct + self.stem = nn.Sequential(*[ + ConvBnAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs), + conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs), + conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs), + ]) + self.feature_info = [dict( + num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')] + current_stride = stem_stride + + # OSA stages + stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage) + in_ch_list = stem_chs[-1:] + stage_out_chs[:-1] + stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs) + stages = [] + for i in range(4): # num_stages + downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4 + stages += [OsaStage( + in_ch_list[i], stage_conv_chs[i], stage_out_chs[i], block_per_stage[i], layer_per_block, + downsample=downsample, drop_path_rates=stage_dpr[i], **stage_args) + ] + self.num_features = stage_out_chs[i] + current_stride *= 2 if downsample else 1 + self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] + + self.stages = nn.Sequential(*stages) + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + elif isinstance(m, nn.Linear): + nn.init.zeros_(m.bias) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + return self.stages(x) + + def forward(self, x): + x = self.forward_features(x) + return self.head(x) + + +def _create_vovnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + VovNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def vovnet39a(pretrained=False, **kwargs): + return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs) + + +@register_model +def vovnet57a(pretrained=False, **kwargs): + return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim_dw(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_dw(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet39b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet57b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet99b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs) + + +@register_model +def eca_vovnet39b(pretrained=False, **kwargs): + return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs) + + +# Experimental Models + +@register_model +def ese_vovnet39b_evos(pretrained=False, **kwargs): + def norm_act_fn(num_features, **nkwargs): + return create_norm_act('EvoNormSample', num_features, jit=False, **nkwargs) + return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs) + + +@register_model +def ese_vovnet99b_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_vovnet( + 'ese_vovnet99b_iabn', pretrained=pretrained, norm_layer=norm_layer, act_layer=nn.LeakyReLU, **kwargs) diff --git a/data_processing/MANIQA/timm/models/xception.py b/data_processing/MANIQA/timm/models/xception.py new file mode 100644 index 0000000..86f558c --- /dev/null +++ b/data_processing/MANIQA/timm/models/xception.py @@ -0,0 +1,232 @@ +""" +Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch) + +@author: tstandley +Adapted by cadene + +Creates an Xception Model as defined in: + +Francois Chollet +Xception: Deep Learning with Depthwise Separable Convolutions +https://arxiv.org/pdf/1610.02357.pdf + +This weights ported from the Keras implementation. Achieves the following performance on the validation set: + +Loss:0.9173 Prec@1:78.892 Prec@5:94.292 + +REMEMBER to set your image size to 3x299x299 for both test and validation + +normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], + std=[0.5, 0.5, 0.5]) + +The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 +""" + +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['Xception'] + +default_cfgs = { + 'xception': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth', + 'input_size': (3, 299, 299), + 'pool_size': (10, 10), + 'crop_pct': 0.8975, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv1', + 'classifier': 'fc' + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + } +} + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1): + super(SeparableConv2d, self).__init__() + + self.conv1 = nn.Conv2d( + in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False) + self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.pointwise(x) + return x + + +class Block(nn.Module): + def __init__(self, in_channels, out_channels, reps, strides=1, start_with_relu=True, grow_first=True): + super(Block, self).__init__() + + if out_channels != in_channels or strides != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False) + self.skipbn = nn.BatchNorm2d(out_channels) + else: + self.skip = None + + rep = [] + for i in range(reps): + if grow_first: + inc = in_channels if i == 0 else out_channels + outc = out_channels + else: + inc = in_channels + outc = in_channels if i < (reps - 1) else out_channels + rep.append(nn.ReLU(inplace=True)) + rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1)) + rep.append(nn.BatchNorm2d(outc)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3, strides, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + + x += skip + return x + + +class Xception(nn.Module): + """ + Xception optimized for the ImageNet dataset, as specified in + https://arxiv.org/pdf/1610.02357.pdf + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg'): + """ Constructor + Args: + num_classes: number of classes + """ + super(Xception, self).__init__() + self.drop_rate = drop_rate + self.global_pool = global_pool + self.num_classes = num_classes + self.num_features = 2048 + + self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False) + self.bn1 = nn.BatchNorm2d(32) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, 3, bias=False) + self.bn2 = nn.BatchNorm2d(64) + self.act2 = nn.ReLU(inplace=True) + + self.block1 = Block(64, 128, 2, 2, start_with_relu=False) + self.block2 = Block(128, 256, 2, 2) + self.block3 = Block(256, 728, 2, 2) + + self.block4 = Block(728, 728, 3, 1) + self.block5 = Block(728, 728, 3, 1) + self.block6 = Block(728, 728, 3, 1) + self.block7 = Block(728, 728, 3, 1) + + self.block8 = Block(728, 728, 3, 1) + self.block9 = Block(728, 728, 3, 1) + self.block10 = Block(728, 728, 3, 1) + self.block11 = Block(728, 728, 3, 1) + + self.block12 = Block(728, 1024, 2, 2, grow_first=False) + + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) + self.bn3 = nn.BatchNorm2d(1536) + self.act3 = nn.ReLU(inplace=True) + + self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1) + self.bn4 = nn.BatchNorm2d(self.num_features) + self.act4 = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='act2'), + dict(num_chs=128, reduction=4, module='block2.rep.0'), + dict(num_chs=256, reduction=8, module='block3.rep.0'), + dict(num_chs=728, reduction=16, module='block12.rep.0'), + dict(num_chs=2048, reduction=32, module='act4'), + ] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + # #------- init weights -------- + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + + x = self.conv3(x) + x = self.bn3(x) + x = self.act3(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.act4(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + F.dropout(x, self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + Xception, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook'), + **kwargs) + + +@register_model +def xception(pretrained=False, **kwargs): + return _xception('xception', pretrained=pretrained, **kwargs) diff --git a/data_processing/MANIQA/timm/models/xception_aligned.py b/data_processing/MANIQA/timm/models/xception_aligned.py new file mode 100644 index 0000000..ea7f5c0 --- /dev/null +++ b/data_processing/MANIQA/timm/models/xception_aligned.py @@ -0,0 +1,238 @@ +"""Pytorch impl of Aligned Xception 41, 65, 71 + +This is a correct, from scratch impl of Aligned Xception (Deeplab) models compatible with TF weights at +https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/model_zoo.md + +Hacked together by / Copyright 2020 Ross Wightman +""" +from functools import partial + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, ConvBnAct, create_conv2d +from .layers.helpers import to_3tuple +from .registry import register_model + +__all__ = ['XceptionAligned'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (10, 10), + 'crop_pct': 0.903, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + xception41=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_41-e6439c97.pth'), + xception65=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_65-c9ae96e8.pth'), + xception71=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_71-8eec7df1.pth'), +) + + +class SeparableConv2d(nn.Module): + def __init__( + self, inplanes, planes, kernel_size=3, stride=1, dilation=1, padding='', + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + # depthwise convolution + self.conv_dw = create_conv2d( + inplanes, inplanes, kernel_size, stride=stride, + padding=padding, dilation=dilation, depthwise=True) + self.bn_dw = norm_layer(inplanes) + if act_layer is not None: + self.act_dw = act_layer(inplace=True) + else: + self.act_dw = None + + # pointwise convolution + self.conv_pw = create_conv2d(inplanes, planes, kernel_size=1) + self.bn_pw = norm_layer(planes) + if act_layer is not None: + self.act_pw = act_layer(inplace=True) + else: + self.act_pw = None + + def forward(self, x): + x = self.conv_dw(x) + x = self.bn_dw(x) + if self.act_dw is not None: + x = self.act_dw(x) + x = self.conv_pw(x) + x = self.bn_pw(x) + if self.act_pw is not None: + x = self.act_pw(x) + return x + + +class XceptionModule(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, pad_type='', + start_with_relu=True, no_skip=False, act_layer=nn.ReLU, norm_layer=None): + super(XceptionModule, self).__init__() + out_chs = to_3tuple(out_chs) + self.in_channels = in_chs + self.out_channels = out_chs[-1] + self.no_skip = no_skip + if not no_skip and (self.out_channels != self.in_channels or stride != 1): + self.shortcut = ConvBnAct( + in_chs, self.out_channels, 1, stride=stride, norm_layer=norm_layer, act_layer=None) + else: + self.shortcut = None + + separable_act_layer = None if start_with_relu else act_layer + self.stack = nn.Sequential() + for i in range(3): + if start_with_relu: + self.stack.add_module(f'act{i + 1}', nn.ReLU(inplace=i > 0)) + self.stack.add_module(f'conv{i + 1}', SeparableConv2d( + in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type, + act_layer=separable_act_layer, norm_layer=norm_layer)) + in_chs = out_chs[i] + + def forward(self, x): + skip = x + x = self.stack(x) + if self.shortcut is not None: + skip = self.shortcut(skip) + if not self.no_skip: + x = x + skip + return x + + +class XceptionAligned(nn.Module): + """Modified Aligned Xception + """ + + def __init__(self, block_cfg, num_classes=1000, in_chans=3, output_stride=32, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_rate=0., global_pool='avg'): + super(XceptionAligned, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + + layer_args = dict(act_layer=act_layer, norm_layer=norm_layer) + self.stem = nn.Sequential(*[ + ConvBnAct(in_chans, 32, kernel_size=3, stride=2, **layer_args), + ConvBnAct(32, 64, kernel_size=3, stride=1, **layer_args) + ]) + + curr_dilation = 1 + curr_stride = 2 + self.feature_info = [] + self.blocks = nn.Sequential() + for i, b in enumerate(block_cfg): + b['dilation'] = curr_dilation + if b['stride'] > 1: + self.feature_info += [dict( + num_chs=to_3tuple(b['out_chs'])[-2], reduction=curr_stride, module=f'blocks.{i}.stack.act3')] + next_stride = curr_stride * b['stride'] + if next_stride > output_stride: + curr_dilation *= b['stride'] + b['stride'] = 1 + else: + curr_stride = next_stride + self.blocks.add_module(str(i), XceptionModule(**b, **layer_args)) + self.num_features = self.blocks[-1].out_channels + + self.feature_info += [dict( + num_chs=self.num_features, reduction=curr_stride, module='blocks.' + str(len(self.blocks) - 1))] + + self.head = ClassifierHead( + in_chs=self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.blocks(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + XceptionAligned, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True, feature_cls='hook'), + **kwargs) + + +@register_model +def xception41(pretrained=False, **kwargs): + """ Modified Aligned Xception-41 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 8), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception41', pretrained=pretrained, **model_args) + + +@register_model +def xception65(pretrained=False, **kwargs): + """ Modified Aligned Xception-65 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception65', pretrained=pretrained, **model_args) + + +@register_model +def xception71(pretrained=False, **kwargs): + """ Modified Aligned Xception-71 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=1), + dict(in_chs=256, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=1), + dict(in_chs=728, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception71', pretrained=pretrained, **model_args) diff --git a/data_processing/MANIQA/timm/models/xcit.py b/data_processing/MANIQA/timm/models/xcit.py new file mode 100644 index 0000000..9ad162e --- /dev/null +++ b/data_processing/MANIQA/timm/models/xcit.py @@ -0,0 +1,814 @@ +""" Cross-Covariance Image Transformer (XCiT) in PyTorch + +Paper: + - https://arxiv.org/abs/2106.09681 + +Same as the official implementation, with some minor adaptations, original copyright below + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +import math +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .vision_transformer import _cfg, Mlp +from .registry import register_model +from .layers import DropPath, trunc_normal_, to_2tuple +from .cait import ClassAttn +from .fx_features import register_notrace_module + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # Patch size 16 + 'xcit_nano_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'), + 'xcit_nano_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'), + 'xcit_nano_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'), + 'xcit_tiny_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'), + 'xcit_tiny_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'), + 'xcit_tiny_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'), + 'xcit_tiny_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'), + 'xcit_small_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'), + 'xcit_small_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'), + 'xcit_small_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'), + 'xcit_small_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'), + 'xcit_medium_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'), + 'xcit_medium_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'), + 'xcit_large_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'), + 'xcit_large_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)), + + # Patch size 8 + 'xcit_nano_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'), + 'xcit_nano_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'), + 'xcit_nano_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'), + 'xcit_tiny_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'), + 'xcit_tiny_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'), + 'xcit_tiny_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'), + 'xcit_tiny_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'), + 'xcit_small_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'), + 'xcit_small_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'), + 'xcit_small_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'), + 'xcit_small_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'), + 'xcit_medium_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'), + 'xcit_medium_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'), + 'xcit_large_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'), + 'xcit_large_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)), +} + + +@register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method +class PositionalEncodingFourier(nn.Module): + """ + Positional encoding relying on a fourier kernel matching the one used in the "Attention is all of Need" paper. + Based on the official XCiT code + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + """ + + def __init__(self, hidden_dim=32, dim=768, temperature=10000): + super().__init__() + self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) + self.scale = 2 * math.pi + self.temperature = temperature + self.hidden_dim = hidden_dim + self.dim = dim + self.eps = 1e-6 + + def forward(self, B: int, H: int, W: int): + device = self.token_projection.weight.device + y_embed = torch.arange(1, H+1, dtype=torch.float32, device=device).unsqueeze(1).repeat(1, 1, W) + x_embed = torch.arange(1, W+1, dtype=torch.float32, device=device).repeat(1, H, 1) + y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale + dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=device) + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + pos = self.token_projection(pos) + return pos.repeat(B, 1, 1, 1) # (B, C, H, W) + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution + batch norm""" + return torch.nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), + nn.BatchNorm2d(out_planes) + ) + + +class ConvPatchEmbed(nn.Module): + """Image to Patch Embedding using multiple convolutional layers""" + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU): + super().__init__() + img_size = to_2tuple(img_size) + num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + if patch_size == 16: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 8, 2), + act_layer(), + conv3x3(embed_dim // 8, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + elif patch_size == 8: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + else: + raise('For convolutional projection, patch size has to be in [8, 16]') + + def forward(self, x): + x = self.proj(x) + Hp, Wp = x.shape[2], x.shape[3] + x = x.flatten(2).transpose(1, 2) # (B, N, C) + return x, (Hp, Wp) + + +class LPI(nn.Module): + """ + Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the + implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable + 3x3 convolutions with GeLU and BatchNorm2d + """ + + def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3): + super().__init__() + out_features = out_features or in_features + + padding = kernel_size // 2 + + self.conv1 = torch.nn.Conv2d( + in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features) + self.act = act_layer() + self.bn = nn.BatchNorm2d(in_features) + self.conv2 = torch.nn.Conv2d( + in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features) + + def forward(self, x, H: int, W: int): + B, N, C = x.shape + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.conv1(x) + x = self.act(x) + x = self.bn(x) + x = self.conv2(x) + x = x.reshape(B, C, N).permute(0, 2, 1) + return x + + +class ClassAttentionBlock(nn.Module): + """Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239""" + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., tokens_norm=False): + super().__init__() + self.norm1 = norm_layer(dim) + + self.attn = ClassAttn( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + if eta is not None: # LayerScale Initialization (no layerscale when None) + self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + else: + self.gamma1, self.gamma2 = 1.0, 1.0 + + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + self.tokens_norm = tokens_norm + + def forward(self, x): + x_norm1 = self.norm1(x) + x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1) + x = x + self.drop_path(self.gamma1 * x_attn) + if self.tokens_norm: + x = self.norm2(x) + else: + x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1) + x_res = x + cls_token = x[:, 0:1] + cls_token = self.gamma2 * self.mlp(cls_token) + x = torch.cat([cls_token, x[:, 1:]], dim=1) + x = x_res + self.drop_path(x) + return x + + +class XCA(nn.Module): + """ Cross-Covariance Attention (XCA) + Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax + normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h) + """ + + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N) + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + # Paper section 3.2 l2-Normalization and temperature scaling + q = torch.nn.functional.normalize(q, dim=-1) + k = torch.nn.functional.normalize(k, dim=-1) + attn = (q @ k.transpose(-2, -1)) * self.temperature + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + # (B, H, C', N), permute -> (B, N, H, C') + x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @torch.jit.ignore + def no_weight_decay(self): + return {'temperature'} + + +class XCABlock(nn.Module): + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1.): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm3 = norm_layer(dim) + self.local_mp = LPI(in_features=dim, act_layer=act_layer) + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma3 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + + def forward(self, x, H: int, W: int): + x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) + # NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W)) + x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) + return x + + +class XCiT(nn.Module): + """ + Based on timm and DeiT code bases + https://github.com/rwightman/pytorch-image-models/tree/master/timm + https://github.com/facebookresearch/deit/ + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False): + """ + Args: + img_size (int, tuple): input image size + patch_size (int): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate (constant across all layers) + norm_layer: (nn.Module): normalization layer + cls_attn_layers: (int) Depth of Class attention layers + use_pos_embed: (bool) whether to use positional encoding + eta: (float) layerscale initialization value + tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA + + Notes: + - Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch + interaction (class LPI) and the patch embedding (class ConvPatchEmbed) + """ + super().__init__() + img_size = to_2tuple(img_size) + assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \ + '`patch_size` should divide image dimensions evenly' + + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.patch_embed = ConvPatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.use_pos_embed = use_pos_embed + if use_pos_embed: + self.pos_embed = PositionalEncodingFourier(dim=embed_dim) + self.pos_drop = nn.Dropout(p=drop_rate) + + self.blocks = nn.ModuleList([ + XCABlock( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta) + for _ in range(depth)]) + + self.cls_attn_blocks = nn.ModuleList([ + ClassAttentionBlock( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm) + for _ in range(cls_attn_layers)]) + + # Classifier head + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # Init weights + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + # x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches) + x, (Hp, Wp) = self.patch_embed(x) + + if self.use_pos_embed: + # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C) + pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) + x = x + pos_encoding + + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x, Hp, Wp) + + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + for blk in self.cls_attn_blocks: + x = blk(x) + + x = self.norm(x)[:, 0] + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + state_dict = state_dict['model'] + # For consistency with timm's transformer models while being compatible with official weights source we rename + # pos_embeder to pos_embed. Also account for use_pos_embed == False + use_pos_embed = getattr(model, 'pos_embed', None) is not None + pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')] + for k in pos_embed_keys: + if use_pos_embed: + state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k) + else: + del state_dict[k] + # timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors + # for all tokens, just the class token. To use official weights source we must split qkv into q, k, v + if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict(): + num_ca_blocks = len(model.cls_attn_blocks) + for i in range(num_ca_blocks): + qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight') + qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1]) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j] + qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None) + if qkv_bias is not None: + qkv_bias = qkv_bias.reshape(3, -1) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j] + return state_dict + + +def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + model = build_model_with_cfg( + XCiT, variant, pretrained, default_cfg=default_cfg, pretrained_filter_fn=checkpoint_filter_fn, **kwargs) + return model + + +@register_model +def xcit_nano_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384, **kwargs) + model = _create_xcit('xcit_nano_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +# Patch size 8x8 models +@register_model +def xcit_nano_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model diff --git a/data_processing/MANIQA/timm/optim/__init__.py b/data_processing/MANIQA/timm/optim/__init__.py new file mode 100644 index 0000000..7ee4958 --- /dev/null +++ b/data_processing/MANIQA/timm/optim/__init__.py @@ -0,0 +1,15 @@ +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .adamw import AdamW +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP +from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs diff --git a/data_processing/MANIQA/timm/optim/adabelief.py b/data_processing/MANIQA/timm/optim/adabelief.py new file mode 100644 index 0000000..951d715 --- /dev/null +++ b/data_processing/MANIQA/timm/optim/adabelief.py @@ -0,0 +1,201 @@ +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdaBelief(Optimizer): + r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-16) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + decoupled_decay (boolean, optional): (default: True) If set as True, then + the optimizer uses decoupled weight decay as in AdamW + fixed_decay (boolean, optional): (default: False) This is used when weight_decouple + is set as True. + When fixed_decay == True, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay$. + When fixed_decay == False, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the + weight decay ratio decreases with learning rate (lr). + rectify (boolean, optional): (default: True) If set as True, then perform the rectified + update similar to RAdam + degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update + when variance of gradient is high + reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020 + + For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer' + For example train/args for EfficientNet see these gists + - link to train_scipt: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037 + - link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3 + """ + + def __init__( + self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False, + decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True): + + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + + if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): + for param in params: + if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): + param['buffer'] = [[None, None, None] for _ in range(10)] + + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, + degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify, + fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)]) + super(AdaBelief, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdaBelief, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def reset(self): + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + amsgrad = group['amsgrad'] + + # State initialization + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError( + 'AdaBelief does not support sparse gradients, please consider SparseAdam instead') + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + amsgrad = group['amsgrad'] + beta1, beta2 = group['betas'] + state = self.state[p] + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p_fp32) + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p_fp32) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p_fp32) + + # perform weight decay, check if decoupled weight decay + if group['decoupled_decay']: + if not group['fixed_decay']: + p_fp32.mul_(1.0 - group['lr'] * group['weight_decay']) + else: + p_fp32.mul_(1.0 - group['weight_decay']) + else: + if group['weight_decay'] != 0: + grad.add_(p_fp32, alpha=group['weight_decay']) + + # get current state variable + exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Update first and second moment running average + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + grad_residual = grad - exp_avg + exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2) + + if amsgrad: + max_exp_avg_var = state['max_exp_avg_var'] + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + # update + if not group['rectify']: + # Default update + step_size = group['lr'] / bias_correction1 + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + # Rectified update, forked from RAdam + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + elif group['degenerated_to_sgd']: + step_size = 1.0 / (1 - beta1 ** state['step']) + else: + step_size = -1 + buffered[2] = step_size + + if num_sma >= 5: + denom = exp_avg_var.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr']) + elif step_size > 0: + p_fp32.add_(exp_avg, alpha=-step_size * group['lr']) + + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/data_processing/MANIQA/timm/optim/adafactor.py b/data_processing/MANIQA/timm/optim/adafactor.py new file mode 100644 index 0000000..0605743 --- /dev/null +++ b/data_processing/MANIQA/timm/optim/adafactor.py @@ -0,0 +1,167 @@ +""" Adafactor Optimizer + +Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py + +Original header/copyright below. + +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import torch +import math + + +class Adafactor(torch.optim.Optimizer): + """Implements Adafactor algorithm. + This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` + (see https://arxiv.org/abs/1804.04235) + + Note that this optimizer internally adjusts the learning rate depending on the + *scale_parameter*, *relative_step* and *warmup_init* options. + + To use a manual (external) learning rate schedule you should set `scale_parameter=False` and + `relative_step=False`. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): external learning rate (default: None) + eps (tuple[float, float]): regularization constants for square gradient + and parameter scale respectively (default: (1e-30, 1e-3)) + clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0) + decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8) + beta1 (float): coefficient used for computing running averages of gradient (default: None) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True) + warmup_init (bool): time-dependent learning rate computation depends on + whether warm-up initialization is being used (default: False) + """ + + def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0, + decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False): + relative_step = not lr + if warmup_init and not relative_step: + raise ValueError('warmup_init requires relative_step=True') + + beta1 = None if betas is None else betas[0] # make it compat with standard betas arg + defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate, + beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, + relative_step=relative_step, warmup_init=warmup_init) + super(Adafactor, self).__init__(params, defaults) + + @staticmethod + def _get_lr(param_group, param_state): + if param_group['relative_step']: + min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2 + lr_t = min(min_step, 1.0 / math.sqrt(param_state['step'])) + param_scale = 1.0 + if param_group['scale_parameter']: + param_scale = max(param_group['eps_scale'], param_state['RMS']) + param_group['lr'] = lr_t * param_scale + return param_group['lr'] + + @staticmethod + def _get_options(param_group, param_shape): + factored = len(param_shape) >= 2 + use_first_moment = param_group['beta1'] is not None + return factored, use_first_moment + + @staticmethod + def _rms(tensor): + return tensor.norm(2) / (tensor.numel() ** 0.5) + + def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError('Adafactor does not support sparse gradients.') + + state = self.state[p] + + factored, use_first_moment = self._get_options(group, grad.shape) + # State Initialization + if len(state) == 0: + state['step'] = 0 + + if use_first_moment: + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(grad) + if factored: + state['exp_avg_sq_row'] = torch.zeros(grad.shape[:-1]).to(grad) + state['exp_avg_sq_col'] = torch.zeros(grad.shape[:-2] + grad.shape[-1:]).to(grad) + else: + state['exp_avg_sq'] = torch.zeros_like(grad) + + state['RMS'] = 0 + else: + if use_first_moment: + state['exp_avg'] = state['exp_avg'].to(grad) + if factored: + state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) + state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) + else: + state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + state['step'] += 1 + state['RMS'] = self._rms(p_fp32) + lr_t = self._get_lr(group, state) + + beta2t = 1.0 - math.pow(state['step'], group['decay_rate']) + update = grad ** 2 + group['eps'] + if factored: + exp_avg_sq_row = state['exp_avg_sq_row'] + exp_avg_sq_col = state['exp_avg_sq_col'] + + exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) + exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t) + + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + else: + exp_avg_sq = state['exp_avg_sq'] + + exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) + update = exp_avg_sq.rsqrt().mul_(grad) + + update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) + update.mul_(lr_t) + + if use_first_moment: + exp_avg = state['exp_avg'] + exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) + update = exp_avg + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t) + + p_fp32.add_(-update) + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/data_processing/MANIQA/timm/optim/adahessian.py b/data_processing/MANIQA/timm/optim/adahessian.py new file mode 100644 index 0000000..985c67c --- /dev/null +++ b/data_processing/MANIQA/timm/optim/adahessian.py @@ -0,0 +1,156 @@ +""" AdaHessian Optimizer + +Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py +Originally licensed MIT, Copyright 2020, David Samuel +""" +import torch + + +class Adahessian(torch.optim.Optimizer): + """ + Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning" + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): learning rate (default: 0.1) + betas ((float, float), optional): coefficients used for computing running averages of gradient and the + squared hessian trace (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0) + hessian_power (float, optional): exponent of the hessian trace (default: 1.0) + update_each (int, optional): compute the hessian trace approximation only after *this* number of steps + (to save time) (default: 1) + n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1) + """ + + def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, + hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= hessian_power <= 1.0: + raise ValueError(f"Invalid Hessian power value: {hessian_power}") + + self.n_samples = n_samples + self.update_each = update_each + self.avg_conv_kernel = avg_conv_kernel + + # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training + self.seed = 2147483647 + self.generator = torch.Generator().manual_seed(self.seed) + + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power) + super(Adahessian, self).__init__(params, defaults) + + for p in self.get_params(): + p.hess = 0.0 + self.state[p]["hessian step"] = 0 + + @property + def is_second_order(self): + return True + + def get_params(self): + """ + Gets all parameters in all param_groups with gradients + """ + + return (p for group in self.param_groups for p in group['params'] if p.requires_grad) + + def zero_hessian(self): + """ + Zeros out the accumalated hessian traces. + """ + + for p in self.get_params(): + if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0: + p.hess.zero_() + + @torch.no_grad() + def set_hessian(self): + """ + Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter. + """ + + params = [] + for p in filter(lambda p: p.grad is not None, self.get_params()): + if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step + params.append(p) + self.state[p]["hessian step"] += 1 + + if len(params) == 0: + return + + if self.generator.device != params[0].device: # hackish way of casting the generator to the right device + self.generator = torch.Generator(params[0].device).manual_seed(self.seed) + + grads = [p.grad for p in params] + + for i in range(self.n_samples): + # Rademacher distribution {-1.0, 1.0} + zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params] + h_zs = torch.autograd.grad( + grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1) + for h_z, z, p in zip(h_zs, zs, params): + p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z) + + @torch.no_grad() + def step(self, closure=None): + """ + Performs a single optimization step. + Arguments: + closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None) + """ + + loss = None + if closure is not None: + loss = closure() + + self.zero_hessian() + self.set_hessian() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None or p.hess is None: + continue + + if self.avg_conv_kernel and p.dim() == 4: + p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone() + + # Perform correct stepweight decay as in AdamW + p.mul_(1 - group['lr'] * group['weight_decay']) + + state = self.state[p] + + # State initialization + if len(state) == 1: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of Hessian diagonal square values + state['exp_hessian_diag_sq'] = torch.zeros_like(p) + + exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq'] + beta1, beta2 = group['betas'] + state['step'] += 1 + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) + exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + k = group['hessian_power'] + denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps']) + + # make update + step_size = group['lr'] / bias_correction1 + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/data_processing/MANIQA/timm/optim/adamp.py b/data_processing/MANIQA/timm/optim/adamp.py new file mode 100644 index 0000000..ee18763 --- /dev/null +++ b/data_processing/MANIQA/timm/optim/adamp.py @@ -0,0 +1,105 @@ +""" +AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer +import math + + +def _channel_view(x) -> torch.Tensor: + return x.reshape(x.size(0), -1) + + +def _layer_view(x) -> torch.Tensor: + return x.reshape(1, -1) + + +def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float): + wd = 1. + expand_size = (-1,) + (1,) * (len(p.shape) - 1) + for view_func in [_channel_view, _layer_view]: + param_view = view_func(p) + grad_view = view_func(grad) + cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_() + + # FIXME this is a problem for PyTorch XLA + if cosine_sim.max() < delta / math.sqrt(param_view.size(1)): + p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size) + perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size) + wd = wd_ratio + return perturb, wd + + return perturb, wd + + +class AdamP(Optimizer): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + delta=delta, wd_ratio=wd_ratio, nesterov=nesterov) + super(AdamP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + grad = p.grad + beta1, beta2 = group['betas'] + nesterov = group['nesterov'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Adam + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + step_size = group['lr'] / bias_correction1 + + if nesterov: + perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom + else: + perturb = exp_avg / denom + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + perturb, wd_ratio = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if group['weight_decay'] > 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio) + + # Step + p.add_(perturb, alpha=-step_size) + + return loss diff --git a/data_processing/MANIQA/timm/optim/adamw.py b/data_processing/MANIQA/timm/optim/adamw.py new file mode 100644 index 0000000..66478bc --- /dev/null +++ b/data_processing/MANIQA/timm/optim/adamw.py @@ -0,0 +1,122 @@ +""" AdamW Optimizer +Impl copied from PyTorch master + +NOTE: Builtin optim.AdamW is used by the factory, this impl only serves as a Python based reference, will be removed +someday +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdamW(Optimizer): + r"""Implements AdamW algorithm. + + The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. + The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=1e-2, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(AdamW, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # Perform stepweight decay + p.data.mul_(1 - group['lr'] * group['weight_decay']) + + # Perform optimization step + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + step_size = group['lr'] / bias_correction1 + + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/data_processing/MANIQA/timm/optim/lamb.py b/data_processing/MANIQA/timm/optim/lamb.py new file mode 100644 index 0000000..12c7c49 --- /dev/null +++ b/data_processing/MANIQA/timm/optim/lamb.py @@ -0,0 +1,192 @@ +""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb + +This optimizer code was adapted from the following (starting with latest) +* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py +* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py +* https://github.com/cybertronai/pytorch-lamb + +Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is +similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX. + +In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU. + +Original copyrights for above sources are below. + +Modifications Copyright 2021 Ross Wightman +""" +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2019 cybertronai +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import math + +import torch +from torch.optim import Optimizer + + +class Lamb(Optimizer): + """Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB + reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py + + LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging (bool, optional): whether apply (1-beta2) to grad when + calculating running averages of gradient. (default: True) + max_grad_norm (float, optional): value used to clip global grad norm (default: 1.0) + trust_clip (bool): enable LAMBC trust ratio clipping (default: False) + always_adapt (boolean, optional): Apply adaptive learning rate to 0.0 + weight decay parameter (default: False) + + .. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__( + self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-6, + weight_decay=0.01, grad_averaging=True, max_grad_norm=1.0, trust_clip=False, always_adapt=False): + defaults = dict( + lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, + grad_averaging=grad_averaging, max_grad_norm=max_grad_norm, + trust_clip=trust_clip, always_adapt=always_adapt) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + global_grad_norm = torch.zeros(1, device=device) + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + # FIXME it'd be nice to remove explicit tensor conversion of scalars when torch.where promotes + # scalar types properly https://github.com/pytorch/pytorch/issues/9190 + max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device) + clip_global_grad_norm = torch.where( + global_grad_norm > max_grad_norm, + global_grad_norm / max_grad_norm, + one_tensor) + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + beta3 = 1 - beta1 if grad_averaging else 1.0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + if bias_correction: + bias_correction1 = 1 - beta1 ** group['step'] + bias_correction2 = 1 - beta2 ** group['step'] + else: + bias_correction1, bias_correction2 = 1.0, 1.0 + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.div_(clip_global_grad_norm) + state = self.state[p] + + # State initialization + if len(state) == 0: + # Exponential moving average of gradient valuesa + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + update = (exp_avg / bias_correction1).div_(denom) + + weight_decay = group['weight_decay'] + if weight_decay != 0: + update.add_(p, alpha=weight_decay) + + if weight_decay != 0 or group['always_adapt']: + # Layer-wise LR adaptation. By default, skip adaptation on parameters that are + # excluded from weight decay, unless always_adapt == True, then always enabled. + w_norm = p.norm(2.0) + g_norm = update.norm(2.0) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, w_norm / g_norm, one_tensor), + one_tensor, + ) + if group['trust_clip']: + # LAMBC trust clipping, upper bound fixed at one + trust_ratio = torch.minimum(trust_ratio, one_tensor) + update.mul_(trust_ratio) + + p.add_(update, alpha=-group['lr']) + + return loss diff --git a/data_processing/MANIQA/timm/optim/lars.py b/data_processing/MANIQA/timm/optim/lars.py new file mode 100644 index 0000000..38ca9e0 --- /dev/null +++ b/data_processing/MANIQA/timm/optim/lars.py @@ -0,0 +1,135 @@ +""" PyTorch LARS / LARC Optimizer + +An implementation of LARS (SGD) + LARC in PyTorch + +Based on: + * PyTorch SGD: https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + * NVIDIA APEX LARC: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + +Additional cleanup and modifications to properly support PyTorch XLA. + +Copyright 2021 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer + + +class Lars(Optimizer): + """ LARS for PyTorch + + Paper: `Large batch training of Convolutional Networks` - https://arxiv.org/pdf/1708.03888.pdf + + Args: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate (default: 1.0). + momentum (float, optional): momentum factor (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + dampening (float, optional): dampening for momentum (default: 0) + nesterov (bool, optional): enables Nesterov momentum (default: False) + trust_coeff (float): trust coefficient for computing adaptive lr / trust_ratio (default: 0.001) + eps (float): eps for division denominator (default: 1e-8) + trust_clip (bool): enable LARC trust ratio clipping (default: False) + always_adapt (bool): always apply LARS LR adapt, otherwise only when group weight_decay != 0 (default: False) + """ + + def __init__( + self, + params, + lr=1.0, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + trust_coeff=0.001, + eps=1e-8, + trust_clip=False, + always_adapt=False, + ): + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr}") + if momentum < 0.0: + raise ValueError(f"Invalid momentum value: {momentum}") + if weight_decay < 0.0: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError("Nesterov momentum requires a momentum and zero dampening") + + defaults = dict( + lr=lr, + momentum=momentum, + dampening=dampening, + weight_decay=weight_decay, + nesterov=nesterov, + trust_coeff=trust_coeff, + eps=eps, + trust_clip=trust_clip, + always_adapt=always_adapt, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("nesterov", False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + trust_coeff = group['trust_coeff'] + eps = group['eps'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + + # apply LARS LR adaptation, LARC clipping, weight decay + # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + if weight_decay != 0 or group['always_adapt']: + w_norm = p.norm(2.0) + g_norm = grad.norm(2.0) + trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, trust_ratio, one_tensor), + one_tensor, + ) + if group['trust_clip']: + trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) + grad.add_(p, alpha=weight_decay) + grad.mul_(trust_ratio) + + # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + if momentum != 0: + param_state = self.state[p] + if 'momentum_buffer' not in param_state: + buf = param_state['momentum_buffer'] = torch.clone(grad).detach() + else: + buf = param_state['momentum_buffer'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + grad = grad.add(buf, alpha=momentum) + else: + grad = buf + + p.add_(grad, alpha=-group['lr']) + + return loss \ No newline at end of file diff --git a/data_processing/MANIQA/timm/optim/lookahead.py b/data_processing/MANIQA/timm/optim/lookahead.py new file mode 100644 index 0000000..462c3ac --- /dev/null +++ b/data_processing/MANIQA/timm/optim/lookahead.py @@ -0,0 +1,61 @@ +""" Lookahead Optimizer Wrapper. +Implementation modified from: https://github.com/alphadl/lookahead.pytorch +Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610 + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer +from collections import defaultdict + + +class Lookahead(Optimizer): + def __init__(self, base_optimizer, alpha=0.5, k=6): + # NOTE super().__init__() not called on purpose + if not 0.0 <= alpha <= 1.0: + raise ValueError(f'Invalid slow update rate: {alpha}') + if not 1 <= k: + raise ValueError(f'Invalid lookahead steps: {k}') + defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0) + self._base_optimizer = base_optimizer + self.param_groups = base_optimizer.param_groups + self.defaults = base_optimizer.defaults + self.defaults.update(defaults) + self.state = defaultdict(dict) + # manually add our defaults to the param groups + for name, default in defaults.items(): + for group in self._base_optimizer.param_groups: + group.setdefault(name, default) + + @torch.no_grad() + def update_slow(self, group): + for fast_p in group["params"]: + if fast_p.grad is None: + continue + param_state = self._base_optimizer.state[fast_p] + if 'lookahead_slow_buff' not in param_state: + param_state['lookahead_slow_buff'] = torch.empty_like(fast_p) + param_state['lookahead_slow_buff'].copy_(fast_p) + slow = param_state['lookahead_slow_buff'] + slow.add_(fast_p - slow, alpha=group['lookahead_alpha']) + fast_p.copy_(slow) + + def sync_lookahead(self): + for group in self._base_optimizer.param_groups: + self.update_slow(group) + + @torch.no_grad() + def step(self, closure=None): + loss = self._base_optimizer.step(closure) + for group in self._base_optimizer.param_groups: + group['lookahead_step'] += 1 + if group['lookahead_step'] % group['lookahead_k'] == 0: + self.update_slow(group) + return loss + + def state_dict(self): + return self._base_optimizer.state_dict() + + def load_state_dict(self, state_dict): + self._base_optimizer.load_state_dict(state_dict) + self.param_groups = self._base_optimizer.param_groups diff --git a/data_processing/MANIQA/timm/optim/madgrad.py b/data_processing/MANIQA/timm/optim/madgrad.py new file mode 100644 index 0000000..a76713b --- /dev/null +++ b/data_processing/MANIQA/timm/optim/madgrad.py @@ -0,0 +1,184 @@ +""" PyTorch MADGRAD optimizer + +MADGRAD: https://arxiv.org/abs/2101.11075 + +Code from: https://github.com/facebookresearch/madgrad +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import TYPE_CHECKING, Any, Callable, Optional + +import torch +import torch.optim + +if TYPE_CHECKING: + from torch.optim.optimizer import _params_t +else: + _params_t = Any + + +class MADGRAD(torch.optim.Optimizer): + """ + MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic + Optimization. + + .. _MADGRAD: https://arxiv.org/abs/2101.11075 + + MADGRAD is a general purpose optimizer that can be used in place of SGD or + Adam may converge faster and generalize better. Currently GPU-only. + Typically, the same learning rate schedule that is used for SGD or Adam may + be used. The overall learning rate is not comparable to either method and + should be determined by a hyper-parameter sweep. + + MADGRAD requires less weight decay than other methods, often as little as + zero. Momentum values used for SGD or Adam's beta1 should work here also. + + On sparse problems both weight_decay and momentum should be set to 0. + + Arguments: + params (iterable): + Iterable of parameters to optimize or dicts defining parameter groups. + lr (float): + Learning rate (default: 1e-2). + momentum (float): + Momentum value in the range [0,1) (default: 0.9). + weight_decay (float): + Weight decay, i.e. a L2 penalty (default: 0). + eps (float): + Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). + """ + + def __init__( + self, + params: _params_t, + lr: float = 1e-2, + momentum: float = 0.9, + weight_decay: float = 0, + eps: float = 1e-6, + decoupled_decay: bool = False, + ): + if momentum < 0 or momentum >= 1: + raise ValueError(f"Momentum {momentum} must be in the range [0,1]") + if lr <= 0: + raise ValueError(f"Learning rate {lr} must be positive") + if weight_decay < 0: + raise ValueError(f"Weight decay {weight_decay} must be non-negative") + if eps < 0: + raise ValueError(f"Eps must be non-negative") + + defaults = dict( + lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay) + super().__init__(params, defaults) + + @property + def supports_memory_efficient_fp16(self) -> bool: + return False + + @property + def supports_flat_params(self) -> bool: + return True + + @torch.no_grad() + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + eps = group['eps'] + lr = group['lr'] + eps + weight_decay = group['weight_decay'] + momentum = group['momentum'] + ck = 1 - momentum + + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if momentum != 0.0 and grad.is_sparse: + raise RuntimeError("momentum != 0 is not compatible with sparse gradients") + + state = self.state[p] + if len(state) == 0: + state['step'] = 0 + state['grad_sum_sq'] = torch.zeros_like(p) + state['s'] = torch.zeros_like(p) + if momentum != 0: + state['x0'] = torch.clone(p).detach() + + state['step'] += 1 + grad_sum_sq = state['grad_sum_sq'] + s = state['s'] + lamb = lr * math.sqrt(state['step']) + + # Apply weight decay + if weight_decay != 0: + if group['decoupled_decay']: + p.mul_(1.0 - group['lr'] * weight_decay) + else: + if grad.is_sparse: + raise RuntimeError("weight_decay option is not compatible with sparse gradients") + grad.add_(p, alpha=weight_decay) + + if grad.is_sparse: + grad = grad.coalesce() + grad_val = grad._values() + + p_masked = p.sparse_mask(grad) + grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) + s_masked = s.sparse_mask(grad) + + # Compute x_0 from other known quantities + rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) + x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) + + # Dense + sparse op + grad_sq = grad * grad + grad_sum_sq.add_(grad_sq, alpha=lamb) + grad_sum_sq_masked.add_(grad_sq, alpha=lamb) + + rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) + + s.add_(grad, alpha=lamb) + s_masked._values().add_(grad_val, alpha=lamb) + + # update masked copy of p + p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) + # Copy updated masked p to dense p using an add operation + p_masked._values().add_(p_kp1_masked_vals, alpha=-1) + p.add_(p_masked, alpha=-1) + else: + if momentum == 0: + # Compute x_0 from other known quantities + rms = grad_sum_sq.pow(1 / 3).add_(eps) + x0 = p.addcdiv(s, rms, value=1) + else: + x0 = state['x0'] + + # Accumulate second moments + grad_sum_sq.addcmul_(grad, grad, value=lamb) + rms = grad_sum_sq.pow(1 / 3).add_(eps) + + # Update s + s.add_(grad, alpha=lamb) + + # Step + if momentum == 0: + p.copy_(x0.addcdiv(s, rms, value=-1)) + else: + z = x0.addcdiv(s, rms, value=-1) + + # p is a moving average of z + p.mul_(1 - ck).add_(z, alpha=ck) + + return loss diff --git a/data_processing/MANIQA/timm/optim/nadam.py b/data_processing/MANIQA/timm/optim/nadam.py new file mode 100644 index 0000000..6268d5d --- /dev/null +++ b/data_processing/MANIQA/timm/optim/nadam.py @@ -0,0 +1,92 @@ +import math + +import torch +from torch.optim.optimizer import Optimizer + + +class Nadam(Optimizer): + """Implements Nadam algorithm (a variant of Adam based on Nesterov momentum). + + It has been proposed in `Incorporating Nesterov Momentum into Adam`__. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + schedule_decay (float, optional): momentum schedule decay (default: 4e-3) + + __ http://cs229.stanford.edu/proj2015/054_report.pdf + __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf + + Originally taken from: https://github.com/pytorch/pytorch/pull/1408 + NOTE: Has potential issues but does work well on some problems. + """ + + def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, schedule_decay=4e-3): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, schedule_decay=schedule_decay) + super(Nadam, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['m_schedule'] = 1. + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Warming momentum schedule + m_schedule = state['m_schedule'] + schedule_decay = group['schedule_decay'] + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + eps = group['eps'] + state['step'] += 1 + t = state['step'] + bias_correction2 = 1 - beta2 ** t + + if group['weight_decay'] != 0: + grad = grad.add(p, alpha=group['weight_decay']) + + momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay))) + momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay))) + m_schedule_new = m_schedule * momentum_cache_t + m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1 + state['m_schedule'] = m_schedule_new + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) + p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new)) + p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next)) + + return loss diff --git a/data_processing/MANIQA/timm/optim/nvnovograd.py b/data_processing/MANIQA/timm/optim/nvnovograd.py new file mode 100644 index 0000000..fda3f4a --- /dev/null +++ b/data_processing/MANIQA/timm/optim/nvnovograd.py @@ -0,0 +1,120 @@ +""" Nvidia NovoGrad Optimizer. +Original impl by Nvidia from Jasper example: + - https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper +Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks` + - https://arxiv.org/abs/1905.11286 +""" + +import torch +from torch.optim.optimizer import Optimizer +import math + + +class NvNovoGrad(Optimizer): + """ + Implements Novograd algorithm. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.95, 0.98)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging: gradient averaging + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + """ + + def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8, + weight_decay=0, grad_averaging=False, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + amsgrad=amsgrad) + + super(NvNovoGrad, self).__init__(params, defaults) + + def __setstate__(self, state): + super(NvNovoGrad, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Sparse gradients are not supported.') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + norm = torch.sum(torch.pow(grad, 2)) + + if exp_avg_sq == 0: + exp_avg_sq.copy_(norm) + else: + exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + grad.div_(denom) + if group['weight_decay'] != 0: + grad.add_(p, alpha=group['weight_decay']) + if group['grad_averaging']: + grad.mul_(1 - beta1) + exp_avg.mul_(beta1).add_(grad) + + p.add_(exp_avg, alpha=-group['lr']) + + return loss diff --git a/data_processing/MANIQA/timm/optim/optim_factory.py b/data_processing/MANIQA/timm/optim/optim_factory.py new file mode 100644 index 0000000..e174915 --- /dev/null +++ b/data_processing/MANIQA/timm/optim/optim_factory.py @@ -0,0 +1,217 @@ +""" Optimizer Factory w/ Custom Weight Decay +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Optional + +import torch +import torch.nn as nn +import torch.optim as optim + +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP + +try: + from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD + has_apex = True +except ImportError: + has_apex = False + + +def add_weight_decay(model, weight_decay=1e-5, skip_list=()): + decay = [] + no_decay = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: + no_decay.append(param) + else: + decay.append(param) + return [ + {'params': no_decay, 'weight_decay': 0.}, + {'params': decay, 'weight_decay': weight_decay}] + + +def optimizer_kwargs(cfg): + """ cfg/argparse to kwargs helper + Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn. + """ + kwargs = dict( + opt=cfg.opt, + lr=cfg.lr, + weight_decay=cfg.weight_decay, + momentum=cfg.momentum) + if getattr(cfg, 'opt_eps', None) is not None: + kwargs['eps'] = cfg.opt_eps + if getattr(cfg, 'opt_betas', None) is not None: + kwargs['betas'] = cfg.opt_betas + if getattr(cfg, 'opt_args', None) is not None: + kwargs.update(cfg.opt_args) + return kwargs + + +def create_optimizer(args, model, filter_bias_and_bn=True): + """ Legacy optimizer factory for backwards compatibility. + NOTE: Use create_optimizer_v2 for new code. + """ + return create_optimizer_v2( + model, + **optimizer_kwargs(cfg=args), + filter_bias_and_bn=filter_bias_and_bn, + ) + + +def create_optimizer_v2( + model_or_params, + opt: str = 'sgd', + lr: Optional[float] = None, + weight_decay: float = 0., + momentum: float = 0.9, + filter_bias_and_bn: bool = True, + **kwargs): + """ Create an optimizer. + + TODO currently the model is passed in and all parameters are selected for optimization. + For more general use an interface that allows selection of parameters to optimize and lr groups, one of: + * a filter fn interface that further breaks params into groups in a weight_decay compatible fashion + * expose the parameters interface and leave it up to caller + + Args: + model_or_params (nn.Module): model containing parameters to optimize + opt: name of optimizer to create + lr: initial learning rate + weight_decay: weight decay to apply in optimizer + momentum: momentum for momentum based optimizers (others may use betas via kwargs) + filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay + **kwargs: extra optimizer specific kwargs to pass through + + Returns: + Optimizer + """ + if isinstance(model_or_params, nn.Module): + # a model was passed in, extract parameters and add weight decays to appropriate layers + if weight_decay and filter_bias_and_bn: + skip = {} + if hasattr(model_or_params, 'no_weight_decay'): + skip = model_or_params.no_weight_decay() + parameters = add_weight_decay(model_or_params, weight_decay, skip) + weight_decay = 0. + else: + parameters = model_or_params.parameters() + else: + # iterable of parameters or param groups passed in + parameters = model_or_params + + opt_lower = opt.lower() + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' + + opt_args = dict(weight_decay=weight_decay, **kwargs) + if lr is not None: + opt_args.setdefault('lr', lr) + + # basic SGD & related + if opt_lower == 'sgd' or opt_lower == 'nesterov': + # NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'momentum': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'sgdp': + optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) + + # adaptive + elif opt_lower == 'adam': + optimizer = optim.Adam(parameters, **opt_args) + elif opt_lower == 'adamw': + optimizer = optim.AdamW(parameters, **opt_args) + elif opt_lower == 'adamp': + optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) + elif opt_lower == 'nadam': + try: + # NOTE PyTorch >= 1.10 should have native NAdam + optimizer = optim.Nadam(parameters, **opt_args) + except AttributeError: + optimizer = Nadam(parameters, **opt_args) + elif opt_lower == 'radam': + optimizer = RAdam(parameters, **opt_args) + elif opt_lower == 'adamax': + optimizer = optim.Adamax(parameters, **opt_args) + elif opt_lower == 'adabelief': + optimizer = AdaBelief(parameters, rectify=False, **opt_args) + elif opt_lower == 'radabelief': + optimizer = AdaBelief(parameters, rectify=True, **opt_args) + elif opt_lower == 'adadelta': + optimizer = optim.Adadelta(parameters, **opt_args) + elif opt_lower == 'adagrad': + opt_args.setdefault('eps', 1e-8) + optimizer = optim.Adagrad(parameters, **opt_args) + elif opt_lower == 'adafactor': + optimizer = Adafactor(parameters, **opt_args) + elif opt_lower == 'lamb': + optimizer = Lamb(parameters, **opt_args) + elif opt_lower == 'lambc': + optimizer = Lamb(parameters, trust_clip=True, **opt_args) + elif opt_lower == 'larc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args) + elif opt_lower == 'lars': + optimizer = Lars(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'nlarc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args) + elif opt_lower == 'nlars': + optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'madgrad': + optimizer = MADGRAD(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'madgradw': + optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args) + elif opt_lower == 'novograd' or opt_lower == 'nvnovograd': + optimizer = NvNovoGrad(parameters, **opt_args) + elif opt_lower == 'rmsprop': + optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) + elif opt_lower == 'rmsproptf': + optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) + + # second order + elif opt_lower == 'adahessian': + optimizer = Adahessian(parameters, **opt_args) + + # NVIDIA fused optimizers, require APEX to be installed + elif opt_lower == 'fusedsgd': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'fusedmomentum': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'fusedadam': + optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) + elif opt_lower == 'fusedadamw': + optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) + elif opt_lower == 'fusedlamb': + optimizer = FusedLAMB(parameters, **opt_args) + elif opt_lower == 'fusednovograd': + opt_args.setdefault('betas', (0.95, 0.98)) + optimizer = FusedNovoGrad(parameters, **opt_args) + + else: + assert False and "Invalid optimizer" + raise ValueError + + if len(opt_split) > 1: + if opt_split[0] == 'lookahead': + optimizer = Lookahead(optimizer) + + return optimizer diff --git a/data_processing/MANIQA/timm/optim/radam.py b/data_processing/MANIQA/timm/optim/radam.py new file mode 100644 index 0000000..eb8d22e --- /dev/null +++ b/data_processing/MANIQA/timm/optim/radam.py @@ -0,0 +1,89 @@ +"""RAdam Optimizer. +Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam +Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class RAdam(Optimizer): + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + buffer=[[None, None, None] for _ in range(10)]) + super(RAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RAdam, self).__setstate__(state) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_fp32 = p.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + + state['step'] += 1 + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = group['lr'] * math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + else: + step_size = group['lr'] / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) + + # more conservative since it's an approximated value + if num_sma >= 5: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + p_fp32.add_(exp_avg, alpha=-step_size) + + p.copy_(p_fp32) + + return loss diff --git a/data_processing/MANIQA/timm/optim/rmsprop_tf.py b/data_processing/MANIQA/timm/optim/rmsprop_tf.py new file mode 100644 index 0000000..0817887 --- /dev/null +++ b/data_processing/MANIQA/timm/optim/rmsprop_tf.py @@ -0,0 +1,139 @@ +""" RMSProp modified to behave like Tensorflow impl + +Originally cut & paste from PyTorch RMSProp +https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py +Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE + +Modifications Copyright 2021 Ross Wightman +""" + +import torch +from torch.optim import Optimizer + + +class RMSpropTF(Optimizer): + """Implements RMSprop algorithm (TensorFlow style epsilon) + + NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt + and a few other modifications to closer match Tensorflow for matching hyper-params. + + Noteworthy changes include: + 1. Epsilon applied inside square-root + 2. square_avg initialized to ones + 3. LR scaling of update accumulated in momentum buffer + + Proposed by G. Hinton in his + `course `_. + + The centered version first appears in `Generating Sequences + With Recurrent Neural Networks `_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + momentum (float, optional): momentum factor (default: 0) + alpha (float, optional): smoothing (decay) constant (default: 0.9) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-10) + centered (bool, optional) : if ``True``, compute the centered RMSProp, + the gradient is normalized by an estimation of its variance + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101 + lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer + update as per defaults in Tensorflow + + """ + + def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False, + decoupled_decay=False, lr_in_momentum=True): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= momentum: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= alpha: + raise ValueError("Invalid alpha value: {}".format(alpha)) + + defaults = dict( + lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, + decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum) + super(RMSpropTF, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RMSpropTF, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('momentum', 0) + group.setdefault('centered', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('RMSprop does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero + if group['momentum'] > 0: + state['momentum_buffer'] = torch.zeros_like(p) + if group['centered']: + state['grad_avg'] = torch.zeros_like(p) + + square_avg = state['square_avg'] + one_minus_alpha = 1. - group['alpha'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + if group['decoupled_decay']: + p.mul_(1. - group['lr'] * group['weight_decay']) + else: + grad = grad.add(p, alpha=group['weight_decay']) + + # Tensorflow order of ops for updating squared avg + square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha) + # square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original + + if group['centered']: + grad_avg = state['grad_avg'] + grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha) + avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt + # grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original + else: + avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt + + if group['momentum'] > 0: + buf = state['momentum_buffer'] + # Tensorflow accumulates the LR scaling in the momentum buffer + if group['lr_in_momentum']: + buf.mul_(group['momentum']).addcdiv_(grad, avg, value=group['lr']) + p.add_(-buf) + else: + # PyTorch scales the param update by LR + buf.mul_(group['momentum']).addcdiv_(grad, avg) + p.add_(buf, alpha=-group['lr']) + else: + p.addcdiv_(grad, avg, value=-group['lr']) + + return loss diff --git a/data_processing/MANIQA/timm/optim/sgdp.py b/data_processing/MANIQA/timm/optim/sgdp.py new file mode 100644 index 0000000..baf05fa --- /dev/null +++ b/data_processing/MANIQA/timm/optim/sgdp.py @@ -0,0 +1,70 @@ +""" +SGDP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/sgdp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer, required +import math + +from .adamp import projection + + +class SGDP(Optimizer): + def __init__(self, params, lr=required, momentum=0, dampening=0, + weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1): + defaults = dict( + lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, + nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio) + super(SGDP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['momentum'] = torch.zeros_like(p) + + # SGD + buf = state['momentum'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + d_p = grad + momentum * buf + else: + d_p = buf + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + d_p, wd_ratio = projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if weight_decay != 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum)) + + # Step + p.add_(d_p, alpha=-group['lr']) + + return loss diff --git a/data_processing/MANIQA/timm/scheduler/__init__.py b/data_processing/MANIQA/timm/scheduler/__init__.py new file mode 100644 index 0000000..f1961b8 --- /dev/null +++ b/data_processing/MANIQA/timm/scheduler/__init__.py @@ -0,0 +1,8 @@ +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler + +from .scheduler_factory import create_scheduler diff --git a/data_processing/MANIQA/timm/scheduler/cosine_lr.py b/data_processing/MANIQA/timm/scheduler/cosine_lr.py new file mode 100644 index 0000000..84ee349 --- /dev/null +++ b/data_processing/MANIQA/timm/scheduler/cosine_lr.py @@ -0,0 +1,119 @@ +""" Cosine Scheduler + +Cosine LR schedule with warmup, cycle/restarts, noise, k-decay. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class CosineLRScheduler(Scheduler): + """ + Cosine decay with restarts. + This is described in the paper https://arxiv.org/abs/1608.03983. + + Inspiration from + https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/data_processing/MANIQA/timm/scheduler/multistep_lr.py b/data_processing/MANIQA/timm/scheduler/multistep_lr.py new file mode 100644 index 0000000..a5d5fe1 --- /dev/null +++ b/data_processing/MANIQA/timm/scheduler/multistep_lr.py @@ -0,0 +1,65 @@ +""" MultiStep LR Scheduler + +Basic multi step LR schedule with warmup, noise. +""" +import torch +import bisect +from timm.scheduler.scheduler import Scheduler +from typing import List + +class MultiStepLRScheduler(Scheduler): + """ + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + decay_t: List[int], + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def get_curr_decay_steps(self, t): + # find where in the array t goes, + # assumes self.decay_t is sorted + return bisect.bisect_right(self.decay_t, t+1) + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None diff --git a/data_processing/MANIQA/timm/scheduler/plateau_lr.py b/data_processing/MANIQA/timm/scheduler/plateau_lr.py new file mode 100644 index 0000000..4f2cacb --- /dev/null +++ b/data_processing/MANIQA/timm/scheduler/plateau_lr.py @@ -0,0 +1,113 @@ +""" Plateau Scheduler + +Adapts PyTorch plateau scheduler and allows application of noise, warmup. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + +from .scheduler import Scheduler + + +class PlateauLRScheduler(Scheduler): + """Decay the LR by a factor every time the validation loss plateaus.""" + + def __init__(self, + optimizer, + decay_rate=0.1, + patience_t=10, + verbose=True, + threshold=1e-4, + cooldown_t=0, + warmup_t=0, + warmup_lr_init=0, + lr_min=0, + mode='max', + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize=True, + ): + super().__init__(optimizer, 'lr', initialize=initialize) + + self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + self.optimizer, + patience=patience_t, + factor=decay_rate, + verbose=verbose, + threshold=threshold, + cooldown=cooldown_t, + mode=mode, + min_lr=lr_min + ) + + self.noise_range = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + self.restore_lr = None + + def state_dict(self): + return { + 'best': self.lr_scheduler.best, + 'last_epoch': self.lr_scheduler.last_epoch, + } + + def load_state_dict(self, state_dict): + self.lr_scheduler.best = state_dict['best'] + if 'last_epoch' in state_dict: + self.lr_scheduler.last_epoch = state_dict['last_epoch'] + + # override the base class step fn completely + def step(self, epoch, metric=None): + if epoch <= self.warmup_t: + lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps] + super().update_groups(lrs) + else: + if self.restore_lr is not None: + # restore actual LR from before our last noise perturbation before stepping base + for i, param_group in enumerate(self.optimizer.param_groups): + param_group['lr'] = self.restore_lr[i] + self.restore_lr = None + + self.lr_scheduler.step(metric, epoch) # step the base scheduler + + if self.noise_range is not None: + if isinstance(self.noise_range, (list, tuple)): + apply_noise = self.noise_range[0] <= epoch < self.noise_range[1] + else: + apply_noise = epoch >= self.noise_range + if apply_noise: + self._apply_noise(epoch) + + def _apply_noise(self, epoch): + g = torch.Generator() + g.manual_seed(self.noise_seed + epoch) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + break + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + + # apply the noise on top of previous LR, cache the old value so we can restore for normal + # stepping of base scheduler + restore_lr = [] + for i, param_group in enumerate(self.optimizer.param_groups): + old_lr = float(param_group['lr']) + restore_lr.append(old_lr) + new_lr = old_lr + old_lr * noise + param_group['lr'] = new_lr + self.restore_lr = restore_lr diff --git a/data_processing/MANIQA/timm/scheduler/poly_lr.py b/data_processing/MANIQA/timm/scheduler/poly_lr.py new file mode 100644 index 0000000..9c351be --- /dev/null +++ b/data_processing/MANIQA/timm/scheduler/poly_lr.py @@ -0,0 +1,116 @@ +""" Polynomial Scheduler + +Polynomial LR schedule with warmup, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +import logging + +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class PolyLRScheduler(Scheduler): + """ Polynomial LR Scheduler w/ warmup, noise, and k-decay + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + power: float = 0.5, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.power = power + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/data_processing/MANIQA/timm/scheduler/scheduler.py b/data_processing/MANIQA/timm/scheduler/scheduler.py new file mode 100644 index 0000000..21d5150 --- /dev/null +++ b/data_processing/MANIQA/timm/scheduler/scheduler.py @@ -0,0 +1,105 @@ +from typing import Dict, Any + +import torch + + +class Scheduler: + """ Parameter Scheduler Base Class + A scheduler base class that can be used to schedule any optimizer parameter groups. + + Unlike the builtin PyTorch schedulers, this is intended to be consistently called + * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value + * At the END of each optimizer update, after incrementing the update count, to calculate next update's value + + The schedulers built on this should try to remain as stateless as possible (for simplicity). + + This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' + and -1 values for special behaviour. All epoch and update counts must be tracked in the training + code and explicitly passed in to the schedulers on the corresponding step or step_update call. + + Based on ideas from: + * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler + * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + param_group_field: str, + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize: bool = True) -> None: + self.optimizer = optimizer + self.param_group_field = param_group_field + self._initial_param_group_field = f"initial_{param_group_field}" + if initialize: + for i, group in enumerate(self.optimizer.param_groups): + if param_group_field not in group: + raise KeyError(f"{param_group_field} missing from param_groups[{i}]") + group.setdefault(self._initial_param_group_field, group[param_group_field]) + else: + for i, group in enumerate(self.optimizer.param_groups): + if self._initial_param_group_field not in group: + raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") + self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] + self.metric = None # any point to having this for all? + self.noise_range_t = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.update_groups(self.base_values) + + def state_dict(self) -> Dict[str, Any]: + return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + self.__dict__.update(state_dict) + + def get_epoch_values(self, epoch: int): + return None + + def get_update_values(self, num_updates: int): + return None + + def step(self, epoch: int, metric: float = None) -> None: + self.metric = metric + values = self.get_epoch_values(epoch) + if values is not None: + values = self._add_noise(values, epoch) + self.update_groups(values) + + def step_update(self, num_updates: int, metric: float = None): + self.metric = metric + values = self.get_update_values(num_updates) + if values is not None: + values = self._add_noise(values, num_updates) + self.update_groups(values) + + def update_groups(self, values): + if not isinstance(values, (list, tuple)): + values = [values] * len(self.optimizer.param_groups) + for param_group, value in zip(self.optimizer.param_groups, values): + param_group[self.param_group_field] = value + + def _add_noise(self, lrs, t): + if self.noise_range_t is not None: + if isinstance(self.noise_range_t, (list, tuple)): + apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] + else: + apply_noise = t >= self.noise_range_t + if apply_noise: + g = torch.Generator() + g.manual_seed(self.noise_seed + t) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + break + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + lrs = [v + v * noise for v in lrs] + return lrs diff --git a/data_processing/MANIQA/timm/scheduler/scheduler_factory.py b/data_processing/MANIQA/timm/scheduler/scheduler_factory.py new file mode 100644 index 0000000..72a979c --- /dev/null +++ b/data_processing/MANIQA/timm/scheduler/scheduler_factory.py @@ -0,0 +1,107 @@ +""" Scheduler Factory +Hacked together by / Copyright 2021 Ross Wightman +""" +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler + + +def create_scheduler(args, optimizer): + num_epochs = args.epochs + + if getattr(args, 'lr_noise', None) is not None: + lr_noise = getattr(args, 'lr_noise') + if isinstance(lr_noise, (list, tuple)): + noise_range = [n * num_epochs for n in lr_noise] + if len(noise_range) == 1: + noise_range = noise_range[0] + else: + noise_range = lr_noise * num_epochs + else: + noise_range = None + noise_args = dict( + noise_range_t=noise_range, + noise_pct=getattr(args, 'lr_noise_pct', 0.67), + noise_std=getattr(args, 'lr_noise_std', 1.), + noise_seed=getattr(args, 'seed', 42), + ) + cycle_args = dict( + cycle_mul=getattr(args, 'lr_cycle_mul', 1.), + cycle_decay=getattr(args, 'lr_cycle_decay', 0.1), + cycle_limit=getattr(args, 'lr_cycle_limit', 1), + ) + + lr_scheduler = None + if args.sched == 'cosine': + lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + k_decay=getattr(args, 'lr_k_decay', 1.0), + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'tanh': + lr_scheduler = TanhLRScheduler( + optimizer, + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + t_in_epochs=True, + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'step': + lr_scheduler = StepLRScheduler( + optimizer, + decay_t=args.decay_epochs, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + **noise_args, + ) + elif args.sched == 'multistep': + lr_scheduler = MultiStepLRScheduler( + optimizer, + decay_t=args.decay_epochs, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + **noise_args, + ) + elif args.sched == 'plateau': + mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max' + lr_scheduler = PlateauLRScheduler( + optimizer, + decay_rate=args.decay_rate, + patience_t=args.patience_epochs, + lr_min=args.min_lr, + mode=mode, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + cooldown_t=0, + **noise_args, + ) + elif args.sched == 'poly': + lr_scheduler = PolyLRScheduler( + optimizer, + power=args.decay_rate, # overloading 'decay_rate' as polynomial power + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + k_decay=getattr(args, 'lr_k_decay', 1.0), + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + + return lr_scheduler, num_epochs diff --git a/data_processing/MANIQA/timm/scheduler/step_lr.py b/data_processing/MANIQA/timm/scheduler/step_lr.py new file mode 100644 index 0000000..f797e1a --- /dev/null +++ b/data_processing/MANIQA/timm/scheduler/step_lr.py @@ -0,0 +1,63 @@ +""" Step Scheduler + +Basic step LR schedule with warmup, noise. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +import torch + +from .scheduler import Scheduler + + +class StepLRScheduler(Scheduler): + """ + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + decay_t: float, + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None diff --git a/data_processing/MANIQA/timm/scheduler/tanh_lr.py b/data_processing/MANIQA/timm/scheduler/tanh_lr.py new file mode 100644 index 0000000..f2d3c9c --- /dev/null +++ b/data_processing/MANIQA/timm/scheduler/tanh_lr.py @@ -0,0 +1,117 @@ +""" TanH Scheduler + +TanH schedule with warmup, cycle/restarts, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class TanhLRScheduler(Scheduler): + """ + Hyberbolic-Tangent decay with restarts. + This is described in the paper https://arxiv.org/abs/1806.01593 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lb: float = -7., + ub: float = 3., + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + assert lb < ub + assert cycle_limit >= 0 + assert warmup_t >= 0 + assert warmup_lr_init >= 0 + self.lb = lb + self.ub = ub + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + if self.warmup_t: + t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t) + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + if i < self.cycle_limit: + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + + tr = t_curr / t_i + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/data_processing/MANIQA/timm/utils/__init__.py b/data_processing/MANIQA/timm/utils/__init__.py new file mode 100644 index 0000000..b8cef32 --- /dev/null +++ b/data_processing/MANIQA/timm/utils/__init__.py @@ -0,0 +1,13 @@ +from .agc import adaptive_clip_grad +from .checkpoint_saver import CheckpointSaver +from .clip_grad import dispatch_clip_grad +from .cuda import ApexScaler, NativeScaler +from .distributed import distribute_bn, reduce_tensor +from .jit import set_jit_legacy, set_jit_fuser +from .log import setup_default_logging, FormatterNoInfo +from .metrics import AverageMeter, accuracy +from .misc import natural_key, add_bool_arg +from .model import unwrap_model, get_state_dict, freeze, unfreeze +from .model_ema import ModelEma, ModelEmaV2 +from .random import random_seed +from .summary import update_summary, get_outdir diff --git a/data_processing/MANIQA/timm/utils/agc.py b/data_processing/MANIQA/timm/utils/agc.py new file mode 100644 index 0000000..f514017 --- /dev/null +++ b/data_processing/MANIQA/timm/utils/agc.py @@ -0,0 +1,42 @@ +""" Adaptive Gradient Clipping + +An impl of AGC, as per (https://arxiv.org/abs/2102.06171): + +@article{brock2021high, + author={Andrew Brock and Soham De and Samuel L. Smith and Karen Simonyan}, + title={High-Performance Large-Scale Image Recognition Without Normalization}, + journal={arXiv preprint arXiv:}, + year={2021} +} + +Code references: + * Official JAX impl (paper authors): https://github.com/deepmind/deepmind-research/tree/master/nfnets + * Phil Wang's PyTorch gist: https://gist.github.com/lucidrains/0d6560077edac419ab5d3aa29e674d5c + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch + + +def unitwise_norm(x, norm_type=2.0): + if x.ndim <= 1: + return x.norm(norm_type) + else: + # works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor + # might need special cases for other weights (possibly MHA) where this may not be true + return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True) + + +def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + for p in parameters: + if p.grad is None: + continue + p_data = p.detach() + g_data = p.grad.detach() + max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor) + grad_norm = unitwise_norm(g_data, norm_type=norm_type) + clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6)) + new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad) + p.grad.detach().copy_(new_grads) diff --git a/data_processing/MANIQA/timm/utils/checkpoint_saver.py b/data_processing/MANIQA/timm/utils/checkpoint_saver.py new file mode 100644 index 0000000..6aad74e --- /dev/null +++ b/data_processing/MANIQA/timm/utils/checkpoint_saver.py @@ -0,0 +1,150 @@ +""" Checkpoint Saver + +Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import glob +import operator +import os +import logging + +import torch + +from .model import unwrap_model, get_state_dict + + +_logger = logging.getLogger(__name__) + + +class CheckpointSaver: + def __init__( + self, + model, + optimizer, + args=None, + model_ema=None, + amp_scaler=None, + checkpoint_prefix='checkpoint', + recovery_prefix='recovery', + checkpoint_dir='', + recovery_dir='', + decreasing=False, + max_history=10, + unwrap_fn=unwrap_model): + + # objects to save state_dicts of + self.model = model + self.optimizer = optimizer + self.args = args + self.model_ema = model_ema + self.amp_scaler = amp_scaler + + # state + self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness + self.best_epoch = None + self.best_metric = None + self.curr_recovery_file = '' + self.last_recovery_file = '' + + # config + self.checkpoint_dir = checkpoint_dir + self.recovery_dir = recovery_dir + self.save_prefix = checkpoint_prefix + self.recovery_prefix = recovery_prefix + self.extension = '.pth.tar' + self.decreasing = decreasing # a lower metric is better if True + self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs + self.max_history = max_history + self.unwrap_fn = unwrap_fn + assert self.max_history >= 1 + + def save_checkpoint(self, epoch, metric=None): + assert epoch >= 0 + tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) + last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) + self._save(tmp_save_path, epoch, metric) + if os.path.exists(last_save_path): + os.unlink(last_save_path) # required for Windows support. + os.rename(tmp_save_path, last_save_path) + worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None + if (len(self.checkpoint_files) < self.max_history + or metric is None or self.cmp(metric, worst_file[1])): + if len(self.checkpoint_files) >= self.max_history: + self._cleanup_checkpoints(1) + filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension + save_path = os.path.join(self.checkpoint_dir, filename) + os.link(last_save_path, save_path) + self.checkpoint_files.append((save_path, metric)) + self.checkpoint_files = sorted( + self.checkpoint_files, key=lambda x: x[1], + reverse=not self.decreasing) # sort in descending order if a lower metric is not better + + checkpoints_str = "Current checkpoints:\n" + for c in self.checkpoint_files: + checkpoints_str += ' {}\n'.format(c) + _logger.info(checkpoints_str) + + if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): + self.best_epoch = epoch + self.best_metric = metric + best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) + if os.path.exists(best_save_path): + os.unlink(best_save_path) + os.link(last_save_path, best_save_path) + + return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) + + def _save(self, save_path, epoch, metric=None): + save_state = { + 'epoch': epoch, + 'arch': type(self.model).__name__.lower(), + 'state_dict': get_state_dict(self.model, self.unwrap_fn), + 'optimizer': self.optimizer.state_dict(), + 'version': 2, # version < 2 increments epoch before save + } + if self.args is not None: + save_state['arch'] = self.args.model + save_state['args'] = self.args + if self.amp_scaler is not None: + save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() + if self.model_ema is not None: + save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) + if metric is not None: + save_state['metric'] = metric + torch.save(save_state, save_path) + + def _cleanup_checkpoints(self, trim=0): + trim = min(len(self.checkpoint_files), trim) + delete_index = self.max_history - trim + if delete_index < 0 or len(self.checkpoint_files) <= delete_index: + return + to_delete = self.checkpoint_files[delete_index:] + for d in to_delete: + try: + _logger.debug("Cleaning checkpoint: {}".format(d)) + os.remove(d[0]) + except Exception as e: + _logger.error("Exception '{}' while deleting checkpoint".format(e)) + self.checkpoint_files = self.checkpoint_files[:delete_index] + + def save_recovery(self, epoch, batch_idx=0): + assert epoch >= 0 + filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension + save_path = os.path.join(self.recovery_dir, filename) + self._save(save_path, epoch) + if os.path.exists(self.last_recovery_file): + try: + _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) + os.remove(self.last_recovery_file) + except Exception as e: + _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) + self.last_recovery_file = self.curr_recovery_file + self.curr_recovery_file = save_path + + def find_recovery(self): + recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) + files = glob.glob(recovery_path + '*' + self.extension) + files = sorted(files) + return files[0] if len(files) else '' diff --git a/data_processing/MANIQA/timm/utils/clip_grad.py b/data_processing/MANIQA/timm/utils/clip_grad.py new file mode 100644 index 0000000..7eb4069 --- /dev/null +++ b/data_processing/MANIQA/timm/utils/clip_grad.py @@ -0,0 +1,23 @@ +import torch + +from timm.utils.agc import adaptive_clip_grad + + +def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): + """ Dispatch to gradient clipping method + + Args: + parameters (Iterable): model parameters to clip + value (float): clipping value/factor/norm, mode dependant + mode (str): clipping mode, one of 'norm', 'value', 'agc' + norm_type (float): p-norm, default 2.0 + """ + if mode == 'norm': + torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) + elif mode == 'value': + torch.nn.utils.clip_grad_value_(parameters, value) + elif mode == 'agc': + adaptive_clip_grad(parameters, value, norm_type=norm_type) + else: + assert False, f"Unknown clip mode ({mode})." + diff --git a/data_processing/MANIQA/timm/utils/cuda.py b/data_processing/MANIQA/timm/utils/cuda.py new file mode 100644 index 0000000..9e7bddf --- /dev/null +++ b/data_processing/MANIQA/timm/utils/cuda.py @@ -0,0 +1,55 @@ +""" CUDA / AMP utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + +try: + from apex import amp + has_apex = True +except ImportError: + amp = None + has_apex = False + +from .clip_grad import dispatch_clip_grad + + +class ApexScaler: + state_dict_key = "amp" + + def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward(create_graph=create_graph) + if clip_grad is not None: + dispatch_clip_grad(amp.master_params(optimizer), clip_grad, mode=clip_mode) + optimizer.step() + + def state_dict(self): + if 'state_dict' in amp.__dict__: + return amp.state_dict() + + def load_state_dict(self, state_dict): + if 'load_state_dict' in amp.__dict__: + amp.load_state_dict(state_dict) + + +class NativeScaler: + state_dict_key = "amp_scaler" + + def __init__(self): + self._scaler = torch.cuda.amp.GradScaler() + + def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): + self._scaler.scale(loss).backward(create_graph=create_graph) + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + dispatch_clip_grad(parameters, clip_grad, mode=clip_mode) + self._scaler.step(optimizer) + self._scaler.update() + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) diff --git a/data_processing/MANIQA/timm/utils/distributed.py b/data_processing/MANIQA/timm/utils/distributed.py new file mode 100644 index 0000000..3c5dba8 --- /dev/null +++ b/data_processing/MANIQA/timm/utils/distributed.py @@ -0,0 +1,28 @@ +""" Distributed training/validation utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import distributed as dist + +from .model import unwrap_model + + +def reduce_tensor(tensor, n): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= n + return rt + + +def distribute_bn(model, world_size, reduce=False): + # ensure every node has the same running bn stats + for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True): + if ('running_mean' in bn_name) or ('running_var' in bn_name): + if reduce: + # average bn stats across whole group + torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) + bn_buf /= float(world_size) + else: + # broadcast bn stats from rank 0 to whole group + torch.distributed.broadcast(bn_buf, 0) diff --git a/data_processing/MANIQA/timm/utils/jit.py b/data_processing/MANIQA/timm/utils/jit.py new file mode 100644 index 0000000..6039823 --- /dev/null +++ b/data_processing/MANIQA/timm/utils/jit.py @@ -0,0 +1,50 @@ +""" JIT scripting/tracing utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os + +import torch + + +def set_jit_legacy(): + """ Set JIT executor to legacy w/ support for op fusion + This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes + in the JIT exectutor. These API are not supported so could change. + """ + # + assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!" + torch._C._jit_set_profiling_executor(False) + torch._C._jit_set_profiling_mode(False) + torch._C._jit_override_can_fuse_on_gpu(True) + #torch._C._jit_set_texpr_fuser_enabled(True) + + +def set_jit_fuser(fuser): + if fuser == "te": + # default fuser should be == 'te' + torch._C._jit_set_profiling_executor(True) + torch._C._jit_set_profiling_mode(True) + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(True) + torch._C._jit_set_texpr_fuser_enabled(True) + elif fuser == "old" or fuser == "legacy": + torch._C._jit_set_profiling_executor(False) + torch._C._jit_set_profiling_mode(False) + torch._C._jit_override_can_fuse_on_gpu(True) + torch._C._jit_set_texpr_fuser_enabled(False) + elif fuser == "nvfuser" or fuser == "nvf": + os.environ['PYTORCH_CUDA_FUSER_DISABLE_FALLBACK'] = '1' + os.environ['PYTORCH_CUDA_FUSER_DISABLE_FMA'] = '1' + os.environ['PYTORCH_CUDA_FUSER_JIT_OPT_LEVEL'] = '0' + torch._C._jit_set_texpr_fuser_enabled(False) + torch._C._jit_set_profiling_executor(True) + torch._C._jit_set_profiling_mode(True) + torch._C._jit_can_fuse_on_cpu() + torch._C._jit_can_fuse_on_gpu() + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(False) + torch._C._jit_set_nvfuser_guard_mode(True) + torch._C._jit_set_nvfuser_enabled(True) + else: + assert False, f"Invalid jit fuser ({fuser})" diff --git a/data_processing/MANIQA/timm/utils/log.py b/data_processing/MANIQA/timm/utils/log.py new file mode 100644 index 0000000..c99469e --- /dev/null +++ b/data_processing/MANIQA/timm/utils/log.py @@ -0,0 +1,28 @@ +""" Logging helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import logging.handlers + + +class FormatterNoInfo(logging.Formatter): + def __init__(self, fmt='%(levelname)s: %(message)s'): + logging.Formatter.__init__(self, fmt) + + def format(self, record): + if record.levelno == logging.INFO: + return str(record.getMessage()) + return logging.Formatter.format(self, record) + + +def setup_default_logging(default_level=logging.INFO, log_path=''): + console_handler = logging.StreamHandler() + console_handler.setFormatter(FormatterNoInfo()) + logging.root.addHandler(console_handler) + logging.root.setLevel(default_level) + if log_path: + file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3) + file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s") + file_handler.setFormatter(file_formatter) + logging.root.addHandler(file_handler) diff --git a/data_processing/MANIQA/timm/utils/metrics.py b/data_processing/MANIQA/timm/utils/metrics.py new file mode 100644 index 0000000..9fdbe13 --- /dev/null +++ b/data_processing/MANIQA/timm/utils/metrics.py @@ -0,0 +1,32 @@ +""" Eval metrics and related + +Hacked together by / Copyright 2020 Ross Wightman +""" + + +class AverageMeter: + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + maxk = min(max(topk), output.size()[1]) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.reshape(1, -1).expand_as(pred)) + return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk] diff --git a/data_processing/MANIQA/timm/utils/misc.py b/data_processing/MANIQA/timm/utils/misc.py new file mode 100644 index 0000000..39c0097 --- /dev/null +++ b/data_processing/MANIQA/timm/utils/misc.py @@ -0,0 +1,18 @@ +""" Misc utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import re + + +def natural_key(string_): + """See http://www.codinghorror.com/blog/archives/001018.html""" + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def add_bool_arg(parser, name, default=False, help=''): + dest_name = name.replace('-', '_') + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument('--' + name, dest=dest_name, action='store_true', help=help) + group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help) + parser.set_defaults(**{dest_name: default}) diff --git a/data_processing/MANIQA/timm/utils/model.py b/data_processing/MANIQA/timm/utils/model.py new file mode 100644 index 0000000..b95c453 --- /dev/null +++ b/data_processing/MANIQA/timm/utils/model.py @@ -0,0 +1,273 @@ +""" Model / state_dict utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import fnmatch + +import torch +from torchvision.ops.misc import FrozenBatchNorm2d + +from .model_ema import ModelEma + + +def unwrap_model(model): + if isinstance(model, ModelEma): + return unwrap_model(model.ema) + else: + return model.module if hasattr(model, 'module') else model + + +def get_state_dict(model, unwrap_fn=unwrap_model): + return unwrap_fn(model).state_dict() + + +def avg_sq_ch_mean(model, input, output): + """ calculate average channel square mean of output activations + """ + return torch.mean(output.mean(axis=[0, 2, 3]) ** 2).item() + + +def avg_ch_var(model, input, output): + """ calculate average channel variance of output activations + """ + return torch.mean(output.var(axis=[0, 2, 3])).item() + + +def avg_ch_var_residual(model, input, output): + """ calculate average channel variance of output activations + """ + return torch.mean(output.var(axis=[0, 2, 3])).item() + + +class ActivationStatsHook: + """Iterates through each of `model`'s modules and matches modules using unix pattern + matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is + a match. + + Arguments: + model (nn.Module): model from which we will extract the activation stats + hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string + matching with the name of model's modules. + hook_fns (List[Callable]): List of hook functions to be registered at every + module in `layer_names`. + + Inspiration from https://docs.fast.ai/callback.hook.html. + + Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example + on how to plot Signal Propogation Plots using `ActivationStatsHook`. + """ + + def __init__(self, model, hook_fn_locs, hook_fns): + self.model = model + self.hook_fn_locs = hook_fn_locs + self.hook_fns = hook_fns + if len(hook_fn_locs) != len(hook_fns): + raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \ + their lengths are different.") + self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns) + for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns): + self.register_hook(hook_fn_loc, hook_fn) + + def _create_hook(self, hook_fn): + def append_activation_stats(module, input, output): + out = hook_fn(module, input, output) + self.stats[hook_fn.__name__].append(out) + + return append_activation_stats + + def register_hook(self, hook_fn_loc, hook_fn): + for name, module in self.model.named_modules(): + if not fnmatch.fnmatch(name, hook_fn_loc): + continue + module.register_forward_hook(self._create_hook(hook_fn)) + + +def extract_spp_stats( + model, + hook_fn_locs, + hook_fns, + input_shape=[8, 3, 224, 224]): + """Extract average square channel mean and variance of activations during + forward pass to plot Signal Propogation Plots (SPP). + + Paper: https://arxiv.org/abs/2101.08692 + + Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 + """ + x = torch.normal(0., 1., input_shape) + hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns) + _ = model(x) + return hook.stats + + +def freeze_batch_norm_2d(module): + """ + Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is + itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and + returned. Otherwise, the module is walked recursively and submodules are converted in place. + + Args: + module (torch.nn.Module): Any PyTorch module. + + Returns: + torch.nn.Module: Resulting module + + Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 + """ + res = module + if isinstance(module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): + res = FrozenBatchNorm2d(module.num_features) + res.num_features = module.num_features + res.affine = module.affine + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + res.eps = module.eps + else: + for name, child in module.named_children(): + new_child = freeze_batch_norm_2d(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + +def unfreeze_batch_norm_2d(module): + """ + Converts all `FrozenBatchNorm2d` layers of provided module into `BatchNorm2d`. If `module` is itself and instance + of `FrozenBatchNorm2d`, it is converted into `BatchNorm2d` and returned. Otherwise, the module is walked + recursively and submodules are converted in place. + + Args: + module (torch.nn.Module): Any PyTorch module. + + Returns: + torch.nn.Module: Resulting module + + Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 + """ + res = module + if isinstance(module, FrozenBatchNorm2d): + res = torch.nn.BatchNorm2d(module.num_features) + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + res.eps = module.eps + else: + for name, child in module.named_children(): + new_child = unfreeze_batch_norm_2d(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + +def _freeze_unfreeze(root_module, submodules=[], include_bn_running_stats=True, mode='freeze'): + """ + Freeze or unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is + done in place. + Args: + root_module (nn.Module, optional): Root module relative to which the `submodules` are referenced. + submodules (list[str]): List of modules for which the parameters will be (un)frozen. They are to be provided as + named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list + means that the whole root module will be (un)frozen. Defaults to [] + include_bn_running_stats (bool): Whether to also (un)freeze the running statistics of batch norm 2d layers. + Defaults to `True`. + mode (bool): Whether to freeze ("freeze") or unfreeze ("unfreeze"). Defaults to `"freeze"`. + """ + assert mode in ["freeze", "unfreeze"], '`mode` must be one of "freeze" or "unfreeze"' + + if isinstance(root_module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): + # Raise assertion here because we can't convert it in place + raise AssertionError( + "You have provided a batch norm layer as the `root module`. Please use " + "`timm.utils.model.freeze_batch_norm_2d` or `timm.utils.model.unfreeze_batch_norm_2d` instead.") + + if isinstance(submodules, str): + submodules = [submodules] + + named_modules = submodules + submodules = [root_module.get_submodule(m) for m in submodules] + + if not len(submodules): + named_modules, submodules = list(zip(*root_module.named_children())) + + for n, m in zip(named_modules, submodules): + # (Un)freeze parameters + for p in m.parameters(): + p.requires_grad = False if mode == 'freeze' else True + if include_bn_running_stats: + # Helper to add submodule specified as a named_module + def _add_submodule(module, name, submodule): + split = name.rsplit('.', 1) + if len(split) > 1: + module.get_submodule(split[0]).add_module(split[1], submodule) + else: + module.add_module(name, submodule) + + # Freeze batch norm + if mode == 'freeze': + res = freeze_batch_norm_2d(m) + # It's possible that `m` is a type of BatchNorm in itself, in which case `unfreeze_batch_norm_2d` won't + # convert it in place, but will return the converted result. In this case `res` holds the converted + # result and we may try to re-assign the named module + if isinstance(m, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): + _add_submodule(root_module, n, res) + # Unfreeze batch norm + else: + res = unfreeze_batch_norm_2d(m) + # Ditto. See note above in mode == 'freeze' branch + if isinstance(m, FrozenBatchNorm2d): + _add_submodule(root_module, n, res) + + +def freeze(root_module, submodules=[], include_bn_running_stats=True): + """ + Freeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. + Args: + root_module (nn.Module): Root module relative to which `submodules` are referenced. + submodules (list[str]): List of modules for which the parameters will be frozen. They are to be provided as + named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list + means that the whole root module will be frozen. Defaults to `[]`. + include_bn_running_stats (bool): Whether to also freeze the running statistics of `BatchNorm2d` and + `SyncBatchNorm` layers. These will be converted to `FrozenBatchNorm2d` in place. Hint: During fine tuning, + it's good practice to freeze batch norm stats. And note that these are different to the affine parameters + which are just normal PyTorch parameters. Defaults to `True`. + + Hint: If you want to freeze batch norm ONLY, use `timm.utils.model.freeze_batch_norm_2d`. + + Examples:: + + >>> model = timm.create_model('resnet18') + >>> # Freeze up to and including layer2 + >>> submodules = [n for n, _ in model.named_children()] + >>> print(submodules) + ['conv1', 'bn1', 'act1', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4', 'global_pool', 'fc'] + >>> freeze(model, submodules[:submodules.index('layer2') + 1]) + >>> # Check for yourself that it works as expected + >>> print(model.layer2[0].conv1.weight.requires_grad) + False + >>> print(model.layer3[0].conv1.weight.requires_grad) + True + >>> # Unfreeze + >>> unfreeze(model) + """ + _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="freeze") + + +def unfreeze(root_module, submodules=[], include_bn_running_stats=True): + """ + Unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. + Args: + root_module (nn.Module): Root module relative to which `submodules` are referenced. + submodules (list[str]): List of submodules for which the parameters will be (un)frozen. They are to be provided + as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty + list means that the whole root module will be unfrozen. Defaults to `[]`. + include_bn_running_stats (bool): Whether to also unfreeze the running statistics of `FrozenBatchNorm2d` layers. + These will be converted to `BatchNorm2d` in place. Defaults to `True`. + + See example in docstring for `freeze`. + """ + _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="unfreeze") diff --git a/data_processing/MANIQA/timm/utils/model_ema.py b/data_processing/MANIQA/timm/utils/model_ema.py new file mode 100644 index 0000000..073d5c5 --- /dev/null +++ b/data_processing/MANIQA/timm/utils/model_ema.py @@ -0,0 +1,126 @@ +""" Exponential Moving Average (EMA) of model updates + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn + +_logger = logging.getLogger(__name__) + + +class ModelEma: + """ Model Exponential Moving Average (DEPRECATED) + + Keep a moving average of everything in the model state_dict (parameters and buffers). + This version is deprecated, it does not work with scripted models. Will be removed eventually. + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device='', resume=''): + # make a copy of the model for accumulating moving average of weights + self.ema = deepcopy(model) + self.ema.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if device: + self.ema.to(device=device) + self.ema_has_module = hasattr(self.ema, 'module') + if resume: + self._load_checkpoint(resume) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def _load_checkpoint(self, checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + assert isinstance(checkpoint, dict) + if 'state_dict_ema' in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict_ema'].items(): + # ema model may have been wrapped by DataParallel, and need module prefix + if self.ema_has_module: + name = 'module.' + k if not k.startswith('module') else k + else: + name = k + new_state_dict[name] = v + self.ema.load_state_dict(new_state_dict) + _logger.info("Loaded state_dict_ema") + else: + _logger.warning("Failed to find state_dict_ema, starting from loaded model weights") + + def update(self, model): + # correct a mismatch in state dict keys + needs_module = hasattr(model, 'module') and not self.ema_has_module + with torch.no_grad(): + msd = model.state_dict() + for k, ema_v in self.ema.state_dict().items(): + if needs_module: + k = 'module.' + k + model_v = msd[k].detach() + if self.device: + model_v = model_v.to(device=self.device) + ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) + + +class ModelEmaV2(nn.Module): + """ Model Exponential Moving Average V2 + + Keep a moving average of everything in the model state_dict (parameters and buffers). + V2 of this module is simpler, it does not match params/buffers based on name but simply + iterates in order. It works with torchscript (JIT of full model). + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device=None): + super(ModelEmaV2, self).__init__() + # make a copy of the model for accumulating moving average of weights + self.module = deepcopy(model) + self.module.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if self.device is not None: + self.module.to(device=device) + + def _update(self, model, update_fn): + with torch.no_grad(): + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + if self.device is not None: + model_v = model_v.to(device=self.device) + ema_v.copy_(update_fn(ema_v, model_v)) + + def update(self, model): + self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) + + def set(self, model): + self._update(model, update_fn=lambda e, m: m) diff --git a/data_processing/MANIQA/timm/utils/random.py b/data_processing/MANIQA/timm/utils/random.py new file mode 100644 index 0000000..a967998 --- /dev/null +++ b/data_processing/MANIQA/timm/utils/random.py @@ -0,0 +1,9 @@ +import random +import numpy as np +import torch + + +def random_seed(seed=42, rank=0): + torch.manual_seed(seed + rank) + np.random.seed(seed + rank) + random.seed(seed + rank) diff --git a/data_processing/MANIQA/timm/utils/summary.py b/data_processing/MANIQA/timm/utils/summary.py new file mode 100644 index 0000000..9f5af9a --- /dev/null +++ b/data_processing/MANIQA/timm/utils/summary.py @@ -0,0 +1,39 @@ +""" Summary utilities + +Hacked together by / Copyright 2020 Ross Wightman +""" +import csv +import os +from collections import OrderedDict +try: + import wandb +except ImportError: + pass + +def get_outdir(path, *paths, inc=False): + outdir = os.path.join(path, *paths) + if not os.path.exists(outdir): + os.makedirs(outdir) + elif inc: + count = 1 + outdir_inc = outdir + '-' + str(count) + while os.path.exists(outdir_inc): + count = count + 1 + outdir_inc = outdir + '-' + str(count) + assert count < 100 + outdir = outdir_inc + os.makedirs(outdir) + return outdir + + +def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False, log_wandb=False): + rowd = OrderedDict(epoch=epoch) + rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) + rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) + if log_wandb: + wandb.log(rowd) + with open(filename, mode='a') as cf: + dw = csv.DictWriter(cf, fieldnames=rowd.keys()) + if write_header: # first iteration (epoch == 1 can't be used) + dw.writeheader() + dw.writerow(rowd) diff --git a/data_processing/MANIQA/utils/inference_process.py b/data_processing/MANIQA/utils/inference_process.py new file mode 100644 index 0000000..9e93e48 --- /dev/null +++ b/data_processing/MANIQA/utils/inference_process.py @@ -0,0 +1,129 @@ +import torch +import numpy as np + + +def sort_file(file_path): + f2 = open(file_path, "r") + lines = f2.readlines() + ret = [] + for line in lines: + line = line[:-1] + ret.append(line) + ret.sort() + + with open('./output.txt', 'w') as f: + for i in ret: + f.write(i + '\n') + + +def five_point_crop(idx, d_img, config): + new_h = config.crop_size + new_w = config.crop_size + b, c, h, w = d_img.shape + if idx == 0: + top = 0 + left = 0 + elif idx == 1: + top = 0 + left = w - new_w + elif idx == 2: + top = h - new_h + left = 0 + elif idx == 3: + top = h - new_h + left = w - new_w + elif idx == 4: + center_h = h // 2 + center_w = w // 2 + top = center_h - new_h // 2 + left = center_w - new_w // 2 + d_img_org = crop_image(top, left, config.crop_size, img=d_img) + + return d_img_org + + +def random_crop(d_img, config): + b, c, h, w = d_img.shape + top = np.random.randint(0, h - config.crop_size) + left = np.random.randint(0, w - config.crop_size) + d_img_org = crop_image(top, left, config.crop_size, img=d_img) + return d_img_org + + +def crop_image(top, left, patch_size, img=None): + tmp_img = img[:, :, top:top + patch_size, left:left + patch_size] + return tmp_img + + +class RandCrop(object): + def __init__(self, patch_size): + self.patch_size = patch_size + + def __call__(self, sample): + # r_img : C x H x W (numpy) + d_img = sample['d_img_org'] + d_name = sample['d_name'] + + c, h, w = d_img.shape + new_h = self.patch_size + new_w = self.patch_size + + top = np.random.randint(0, h - new_h) + left = np.random.randint(0, w - new_w) + ret_d_img = d_img[:, top: top + new_h, left: left + new_w] + sample = { + 'd_img_org': ret_d_img, + 'd_name': d_name + } + + return sample + + +class Normalize(object): + def __init__(self, mean, var): + self.mean = mean + self.var = var + + def __call__(self, sample): + # r_img: C x H x W (numpy) + d_img = sample['d_img_org'] + d_name = sample['d_name'] + + d_img = (d_img - self.mean) / self.var + + sample = {'d_img_org': d_img, 'd_name': d_name} + return sample + + +class RandHorizontalFlip(object): + def __init__(self): + pass + + def __call__(self, sample): + d_img = sample['d_img_org'] + d_name = sample['d_name'] + prob_lr = np.random.random() + # np.fliplr needs HxWxC + if prob_lr > 0.5: + d_img = np.fliplr(d_img).copy() + + sample = { + 'd_img_org': d_img, + 'd_name': d_name + } + return sample + + +class ToTensor(object): + def __init__(self): + pass + + def __call__(self, sample): + d_img = sample['d_img_org'] + d_name = sample['d_name'] + d_img = torch.from_numpy(d_img).type(torch.FloatTensor) + sample = { + 'd_img_org': d_img, + 'd_name': d_name + } + return sample \ No newline at end of file diff --git a/data_processing/MANIQA/utils/process.py b/data_processing/MANIQA/utils/process.py new file mode 100644 index 0000000..8acad63 --- /dev/null +++ b/data_processing/MANIQA/utils/process.py @@ -0,0 +1,239 @@ +import torch +import numpy as np + + +def random_crop(d_img, config): + b, c, h, w = d_img.shape + top = np.random.randint(0, h - config.crop_size) + left = np.random.randint(0, w - config.crop_size) + d_img_org = crop_image(top, left, config.crop_size, img=d_img) + return d_img_org + + +def crop_image(top, left, patch_size, img=None): + tmp_img = img[:, :, top:top + patch_size, left:left + patch_size] + return tmp_img + + +def five_point_crop(idx, d_img, config): + new_h = config.crop_size + new_w = config.crop_size + b, c, h, w = d_img.shape + if idx == 0: + top = 0 + left = 0 + elif idx == 1: + top = 0 + left = w - new_w + elif idx == 2: + top = h - new_h + left = 0 + elif idx == 3: + top = h - new_h + left = w - new_w + elif idx == 4: + center_h = h // 2 + center_w = w // 2 + top = center_h - new_h // 2 + left = center_w - new_w // 2 + d_img_org = crop_image(top, left, config.crop_size, img=d_img) + + return d_img_org + + +def split_dataset_koniq10k(txt_file_name, split_seed=20): + np.random.seed(split_seed) + object_data = [] + with open(txt_file_name, 'r') as listFile: + for line in listFile: + dis, score = line.split() + dis = dis + if dis not in object_data: + object_data.append(dis) + + np.random.shuffle(object_data) + np.random.seed(20) + + l = len(object_data) + train_name = object_data[:int(l * 0.8)] + val_name = object_data[int(l * 0.8):] + return train_name, val_name + + +def split_dataset_kadid10k(txt_file_name, split_seed=20): + np.random.seed(split_seed) + object_data = [] + with open(txt_file_name, 'r') as listFile: + for line in listFile: + dis, score = line.split() + dis = dis[:-1] + if dis[1:3] not in object_data: + object_data.append(dis[1:3]) + + np.random.shuffle(object_data) + np.random.seed(20) + + l = len(object_data) + train_name = object_data[:int(l * 0.8)] + val_name = object_data[int(l * 0.8):] + return train_name, val_name + + +def split_dataset_tid2013(txt_file_name, split_seed=20): + np.random.seed(split_seed) + object_data = [] + with open(txt_file_name, 'r') as listFile: + for line in listFile: + score, dis = line.split() + if dis[1:3] not in object_data: + object_data.append(dis[1:3]) + + np.random.shuffle(object_data) + np.random.seed(20) + + l = len(object_data) + train_name = object_data[:int(l * 0.8)] + val_name = object_data[int(l * 0.8):] + return train_name, val_name + + +def split_dataset_live(txt_file_name, split_seed=20): + np.random.seed(split_seed) + object_data = [] + with open(txt_file_name, 'r') as listFile: + for line in listFile: + i1, i2, ref, dis, score, h, w = line.split() + if ref[8:] not in object_data: + object_data.append(ref[8:]) + + np.random.shuffle(object_data) + np.random.seed(20) + + l = len(object_data) + train_name = object_data[:int(l * 0.8)] + val_name = object_data[int(l * 0.8):] + return train_name, val_name + + +def split_dataset_csiq(txt_file_name, split_seed=20): + np.random.seed(split_seed) + object_data = [] + with open(txt_file_name, 'r') as listFile: + for line in listFile: + dis, score= line.split() + dis_name, dis_type, idx_img, _ = dis.split(".") + if dis_name not in object_data: + object_data.append(dis_name) + + np.random.shuffle(object_data) + np.random.seed(20) + + l = len(object_data) + train_name = object_data[:int(l * 0.8)] + val_name = object_data[int(l * 0.8):] + return train_name, val_name + + +class RandCrop(object): + def __init__(self, patch_size): + self.patch_size = patch_size + + def __call__(self, sample): + # r_img : C x H x W (numpy) + d_img = sample['d_img_org'] + score = sample['score'] + + c, h, w = d_img.shape + new_h = self.patch_size + new_w = self.patch_size + + # For koniq10k + if h == new_h and w == new_w: + ret_d_img = d_img + else: + top = np.random.randint(0, h - new_h) + left = np.random.randint(0, w - new_w) + ret_d_img = d_img[:, top: top + new_h, left: left + new_w] + + sample = { + 'd_img_org': ret_d_img, + 'score': score + } + return sample + + +class Normalize(object): + def __init__(self, mean, var): + self.mean = mean + self.var = var + + def __call__(self, sample): + # r_img: C x H x W (numpy) + d_img = sample['d_img_org'] + score = sample['score'] + d_img = (d_img - self.mean) / self.var + sample = {'d_img_org': d_img, 'score': score} + return sample + + +class RandHorizontalFlip(object): + def __init__(self, prob_aug): + self.prob_aug = prob_aug + + def __call__(self, sample): + d_img = sample['d_img_org'] + score = sample['score'] + + p_aug = np.array([self.prob_aug, 1 - self.prob_aug]) + prob_lr = np.random.choice([1, 0], p=p_aug.ravel()) + + if prob_lr > 0.5: + d_img = np.fliplr(d_img).copy() + + sample = { + 'd_img_org': d_img, + 'score': score + } + return sample + + +class RandRotation(object): + def __init__(self, prob_aug=0.5): + self.prob_aug = prob_aug + self.aug_count = 0 + + def __call__(self, sample): + d_img = sample['d_img_org'] + score = sample['score'] + + p_aug = np.array([self.prob_aug, 1 - self.prob_aug]) + prob_lr = np.random.choice([1, 0], p=p_aug.ravel()) + + if prob_lr > 0.5: + p = np.array([0.33, 0.33, 0.34]) + idx = np.random.choice([1, 2, 3], p=p.ravel()) + d_img = np.rot90(d_img, idx, axes=(1, 2)).copy() + self.aug_count += 1 + + sample = { + 'd_img_org': d_img, + 'score': score, + 'aug_count': self.aug_count + } + return sample + + +class ToTensor(object): + def __init__(self): + pass + + def __call__(self, sample): + d_img = sample['d_img_org'] + score = sample['score'] + d_img = torch.from_numpy(d_img).type(torch.FloatTensor) + score = torch.from_numpy(score).type(torch.FloatTensor) + sample = { + 'd_img_org': d_img, + 'score': score + } + return sample \ No newline at end of file diff --git a/data_processing/assets/3dpw.png b/data_processing/assets/3dpw.png new file mode 100644 index 0000000..a80dd50 Binary files /dev/null and b/data_processing/assets/3dpw.png differ diff --git a/data_processing/assets/3dpw_crowd.png b/data_processing/assets/3dpw_crowd.png new file mode 100644 index 0000000..0bb148f Binary files /dev/null and b/data_processing/assets/3dpw_crowd.png differ diff --git a/data_processing/assets/directory.md b/data_processing/assets/directory.md new file mode 100644 index 0000000..5ce3b09 --- /dev/null +++ b/data_processing/assets/directory.md @@ -0,0 +1,99 @@ +## Directory +### Root +The `${ROOT}` is described as below. +``` +${ROOT} +|-- assets +|-- common +|-- data +|-- demo +|-- main +|-- output +|-- tool +``` +* `assets` contains config files to reproduce results and some materials used in this repository. +* `data` contains data loading codes and soft links to images and annotations directories. +* `demo` contains demo codes. +* `common` contains kernel codes for I2L-MeshNet. +* `main` contains high-level codes for training or testing the network. +* `output` contains the current experiment's log, trained models, visualized outputs, and test result (only for MuPoTS). +* `tool` contains codes for auxiliary tasks. + +### Data +You need to follow directory structure of the `data` as below. +``` +${ROOT} +|-- data +| |-- J_regressor_extra.npy +| |-- CrowdPose +| | |-- annotations +| | |-- images +| |-- Human36M +| | |-- images +| | |-- annotations +| | |-- J_regressor_h36m_correct.npy +| |-- MuCo +| | |-- data +| | | |-- augmented_set +| | | |-- unaugmented_set +| | | |-- MuCo-3DHP.json +| | | |-- smpl_param.json +| |-- MSCOCO +| | |-- images +| | | |-- train2017 +| | | |-- val2017 +| | |-- annotations +| | |-- J_regressor_coco_hip_smpl.npy +| |-- MPII +| | |-- data +| | | |-- annotations +| | | |-- images +| |-- PW3D +| | |-- data +| | | |-- 3DPW_latest_train.json +| | | |-- 3DPW_latest_validation.json +| | | |-- 3DPW_latest_test.json +| | | |-- 3DPW_validation_crowd_hhrnet_result.json +| | | |-- imageFiles +``` +* Download `J_regressor_*.npy` [[data](https://drive.google.com/drive/folders/187Azod6z13-dS7W5wHerCTgniHYet-yh?usp=sharing)] +* Download CrowdPose data [[data](https://drive.google.com/drive/folders/1qV5Cx5DJLhJVXlfB0vmQrB3ndJXsTZVM?usp=sharing)] +* Download Human3.6M parsed data and SMPL parameters [[data](https://drive.google.com/drive/folders/1r0B9I3XxIIW_jsXjYinDpL6NFcxTZart?usp=share_link)][[SMPL parameters from SMPLify-X](https://drive.google.com/drive/folders/12fCumEgs9PXT-dAaOGq0EDpl9dGKKorF?usp=share_link)] +* Download MuCo parsed/composited data and SMPL parameters [[data](https://drive.google.com/drive/folders/1dfhFa1kBHYKLTKuprNc7xixt3yyKEky5?usp=share_link)][[SMPL parameters from SMPLify-X](https://drive.google.com/drive/folders/1Wm1_6tn1u-_RE1iUlibIWfS75O79aJRz?usp=share_link)] +* Download MS COCO [[data](https://cocodataset.org/#download)] +* Download MPII parsed data [[data](https://drive.google.com/drive/folders/1zQZpfNu0s19tA7Z1SmulP1cDaVfNDDd3?usp=sharing)] +* Download 3DPW parsed data [[data](https://drive.google.com/drive/folders/1_wi6G6h-JFfb9HGccysJwI02zc_S2DVJ?usp=sharing)] +* Download MS COCO / MPII / CrowdPose SMPL parameters from [NeuralAnnot](https://github.com/mks0601/NeuralAnnot_RELEASE) +* All annotation files follow [MS COCO format](http://cocodataset.org/#format-data). +* If you want to add your own dataset, you have to convert it to [MS COCO format](http://cocodataset.org/#format-data). + +If you have a problem with 'Download limit' problem when tried to download dataset from google drive link, please try this trick. +``` +* Go the shared folder, which contains files you want to copy to your drive +* Select all the files you want to copy +* In the upper right corner click on three vertical dots and select “make a copy” +* Then, the file is copied to your personal google drive account. You can download it from your personal account. +``` + + +### Pytorch SMPL layer and VPoser +* For the SMPL layer, I used [smplpytorch](https://github.com/gulvarol/smplpytorch). The repo is already included in `common/utils/smplpytorch`. +* Download `basicModel_f_lbs_10_207_0_v1.0.0.pkl`, `basicModel_m_lbs_10_207_0_v1.0.0.pkl`, and `basicModel_neutral_lbs_10_207_0_v1.0.0.pkl` from [here](https://smpl.is.tue.mpg.de/download.php) (female & male) and [here](http://smplify.is.tue.mpg.de/) (neutral) to `${ROOT}/smplpytorch/smplpytorch/native/models`. +* Download [VPoser](https://github.com/nghorbani/human_body_prior) from [here](https://drive.google.com/drive/folders/1KNw99d4-_6DqYXfBp2S3_4OMQ_nMW0uQ?usp=sharing) and place it under `${ROOT}/common/utils/human_model_files/smpl/VPOSERR_CKPT`. + +### Output +* Create `output` folder as a soft link form (recommended) instead of a folder form because it would take large storage capacity. +* The experiments' directory structure will be created as below. +``` +${ROOT} +|-- output +| |-- ${currrent_experiment_name} +| | |-- log +| | |-- checkpoint +| | |-- result +| | |-- vis +``` +* `log` folder contains training log file. +* `checkpoint` folder contains saved checkpoints for each epoch. +* `result` folder contains final estimation files of MuPoTs generated in the testing stage. +* `vis` folder contains visualized results. diff --git a/data_processing/assets/front_figure.png b/data_processing/assets/front_figure.png new file mode 100644 index 0000000..296b860 Binary files /dev/null and b/data_processing/assets/front_figure.png differ diff --git a/data_processing/assets/running.md b/data_processing/assets/running.md new file mode 100644 index 0000000..d65eaa1 --- /dev/null +++ b/data_processing/assets/running.md @@ -0,0 +1,27 @@ +## Running 3DCrowdNet +In this repository, we provide training and testing codes for 3DPW-Crowd (Table 5) and 3DPW (Table 8). +We use the pre-trained ResNet-50 weights of [xiao2018simple](https://github.com/microsoft/human-pose-estimation.pytorch) to achieve faster convergence, but you can get the same result by training longer. +Download the [file of weights](https://drive.google.com/drive/folders/1UsntO3wdIHOiajcb8oicMhQ82SmFvulp?usp=sharing) and place it under `${ROOT}/tool/`. + +### Train +Use the appropriate config file to reproduce results. +For example, to reproduce 3DPW-Crowd (Table 5), run +```bash +python train.py --amp --continue --gpu 0-3 --cfg ../assets/yaml/3dpw_crowd.yml +``` +Remove `--continue` if you don't want to the use pre-trained ResNet-50 weights. +Add `--exp_dir` argument to resume training. + +> Note: CUDA version may matter on the training time. Normally it takes 2hours per epoch when I used cuda-10.1. But when I use cuda-10.2, it takes 4~6hours per epoch. Pytorch version is 1.6.0. + +### Test +Download the experiment directories from [here](https://drive.google.com/drive/folders/19ntGuC0zaXQa3cCN_2Ox_hWYX3nLLP2J?usp=sharing) and place them under `${ROOT}/output/`. +To evaluate on 3DPW-Crowd (Table 5), run +```bash +python test.py --gpu 0-3 --cfg ../assets/yaml/3dpw_crowd.yml --exp_dir ../output/exp_03-28_18:26 --test_epoch 6 +``` +To evaluate on 3DPW (Table 8), run +```bash +python test.py --gpu 0-3 --cfg ../assets/yaml/3dpw.yml --exp_dir ../output/exp_04-06_23:43 --test_epoch 10 +``` +You can replace the `--exp_dir` with your own experiments. \ No newline at end of file diff --git a/data_processing/assets/yaml/3dpw.yml b/data_processing/assets/yaml/3dpw.yml new file mode 100644 index 0000000..fcf1ed3 --- /dev/null +++ b/data_processing/assets/yaml/3dpw.yml @@ -0,0 +1,10 @@ +trainset_3d: ['Human36M', 'MuCo'] +trainset_2d: ['MSCOCO', 'MPII'] +testset: 'PW3D' + +lr_dec_epoch: [10] +end_epoch: 11 +lr: 0.001 +lr_backbone: 0.0001 +lr_dec_factor: 10 + diff --git a/data_processing/assets/yaml/3dpw_crowd.yml b/data_processing/assets/yaml/3dpw_crowd.yml new file mode 100644 index 0000000..e10df3e --- /dev/null +++ b/data_processing/assets/yaml/3dpw_crowd.yml @@ -0,0 +1,13 @@ +trainset_3d: ['Human36M', 'MuCo'] +trainset_2d: ['MSCOCO', 'CrowdPose','MPII'] +testset: 'PW3D' + +lr_dec_epoch: [4,6] +end_epoch: 10 +lr: 0.0001 +lr_backbone: 0.0001 +lr_dec_factor: 10 + +crowd: True + +finetune: True \ No newline at end of file diff --git a/data_processing/common/base.py b/data_processing/common/base.py new file mode 100644 index 0000000..e0c65b5 --- /dev/null +++ b/data_processing/common/base.py @@ -0,0 +1,199 @@ +import os +import os.path as osp +import math +import time +import glob +import abc +from torch.utils.data import DataLoader +import torch.optim +import torchvision.transforms as transforms +from timer import Timer +from logger import colorlogger +from torch.nn.parallel.data_parallel import DataParallel +from config import cfg +from model import get_model +from dataset import MultipleDatasets +import os +import cv2 + +dataset_list = ['CrowdPose', 'Human36M', 'MPII', 'MSCOCO', 'MuCo', 'PW3D'] +for i in range(len(dataset_list)): + exec('from ' + dataset_list[i] + ' import ' + dataset_list[i]) + + +class Base(object): + __metaclass__ = abc.ABCMeta + + def __init__(self, log_name='logs.txt'): + self.cur_epoch = 0 + + # timer + self.tot_timer = Timer() + self.gpu_timer = Timer() + self.read_timer = Timer() + + # logger + self.logger = colorlogger(cfg.log_dir, log_name=log_name) + + @abc.abstractmethod + def _make_batch_generator(self): + return + + @abc.abstractmethod + def _make_model(self): + return + + +class Trainer(Base): + def __init__(self): + super(Trainer, self).__init__(log_name = 'train_logs.txt') + + def get_optimizer(self, model): + optimizer = torch.optim.Adam([ + {'params': model.module.backbone.parameters(), 'lr': cfg.lr_backbone}, + {'params': model.module.pose2feat.parameters()}, + {'params': model.module.position_net.parameters()}, + {'params': model.module.rotation_net.parameters()}, + ], + lr=cfg.lr) + print('The parameters of backbone, pose2feat, position_net, rotation_net, are added to the optimizer.') + + return optimizer + + def save_model(self, state, epoch,itr = None): + if itr is None: + file_path = osp.join(cfg.model_dir, 'snapshot_{}.pth.tar'.format(str(epoch))) + else: + file_path = osp.join(cfg.model_dir, 'snapshot_{}_{}.pth.tar'.format(str(epoch), str(itr))) + torch.save(state, file_path) + self.logger.info("Write snapshot into {}".format(file_path)) + + def save_visualization(self, inputs, targets, meta_info, epoch,itr): + viz_predicts = self.model.module.get_visualization(inputs, targets, meta_info) + + for idx,viz in enumerate(viz_predicts): + file_path = osp.join(cfg.vis_dir, f'epoch_{epoch:05d}_itr_{itr:05d}_sample_{idx}.png') + if idx ==0: + self.logger.info(f'Write visualization into {file_path}') + cv2.imwrite(file_path, viz) + + def load_model(self, model, optimizer): + model_file_list = glob.glob(osp.join(cfg.model_dir,'*.pth.tar')) + cur_epoch = max([int(file_name[file_name.find('snapshot_') + 9 : file_name.find('.pth.tar')]) for file_name in model_file_list]) + ckpt_path = osp.join(cfg.model_dir, 'snapshot_' + str(cur_epoch) + '.pth.tar') + ckpt = torch.load(ckpt_path) + start_epoch = ckpt['epoch'] + 1 + + + model.load_state_dict(ckpt['network'], strict=False) + #optimizer.load_state_dict(ckpt['optimizer']) + + self.logger.info('Load checkpoint from {}'.format(ckpt_path)) + return start_epoch, model, optimizer + + def set_lr(self, epoch): + for e in cfg.lr_dec_epoch: + if epoch < e: + break + if epoch < cfg.lr_dec_epoch[-1]: + idx = cfg.lr_dec_epoch.index(e) + for g in self.optimizer.param_groups: + g['lr'] = cfg.lr / (cfg.lr_dec_factor ** idx) + else: + for g in self.optimizer.param_groups: + g['lr'] = cfg.lr / (cfg.lr_dec_factor ** len(cfg.lr_dec_epoch)) + + def get_lr(self): + for g in self.optimizer.param_groups: + cur_lr = g['lr'] + return cur_lr + + def _make_batch_generator(self): + # data load and construct batch generator + self.logger.info("Creating dataset...") + trainset3d_loader = [] + for i in range(len(cfg.trainset_3d)): + print(f'Creating 3d dataset {cfg.trainset_3d[i]}...') + trainset3d_loader.append(eval(cfg.trainset_3d[i])(transforms.ToTensor(), "train")) + trainset2d_loader = [] + for i in range(len(cfg.trainset_2d)): + print(f'Creating 2d dataset {cfg.trainset_2d[i]}...') + trainset2d_loader.append(eval(cfg.trainset_2d[i])(transforms.ToTensor(), "train")) + + if len(trainset3d_loader) > 0 and len(trainset2d_loader) > 0: + self.vertex_num = trainset3d_loader[0].vertex_num + self.joint_num = trainset3d_loader[0].joint_num + trainset3d_loader = MultipleDatasets(trainset3d_loader, make_same_len=False) + trainset2d_loader = MultipleDatasets(trainset2d_loader, make_same_len=False) + trainset_loader = MultipleDatasets([trainset3d_loader, trainset2d_loader], make_same_len=True) + elif len(trainset3d_loader) > 0: + self.vertex_num = trainset3d_loader[0].vertex_num + self.joint_num = trainset3d_loader[0].joint_num + trainset_loader = MultipleDatasets(trainset3d_loader, make_same_len=False) + elif len(trainset2d_loader) > 0: + self.vertex_num = trainset2d_loader[0].vertex_num + self.joint_num = trainset2d_loader[0].joint_num + trainset_loader = MultipleDatasets(trainset2d_loader, make_same_len=False) + else: + assert 0, "Both 3D training set and 2D training set have zero length." + + self.itr_per_epoch = math.ceil(len(trainset_loader) / cfg.num_gpus / cfg.train_batch_size) + self.batch_generator = DataLoader(dataset=trainset_loader, batch_size=cfg.num_gpus*cfg.train_batch_size, shuffle=True, num_workers=cfg.num_thread, pin_memory=True) + + def _make_model(self): + # prepare network + self.logger.info("Creating graph and optimizer...") + model = get_model(self.vertex_num, self.joint_num, 'train') + model = DataParallel(model).cuda() + optimizer = self.get_optimizer(model) + if cfg.continue_train: + start_epoch, model, optimizer = self.load_model(model, optimizer) + if cfg.finetune: + start_epoch = 0 + else: + start_epoch = 0 + model.train() + + self.start_epoch = start_epoch + self.model = model + self.optimizer = optimizer + + +class Tester(Base): + def __init__(self, test_epoch): + self.test_epoch = int(test_epoch) + super(Tester, self).__init__(log_name = 'test_logs.txt') + + def _make_batch_generator(self): + # data load and construct batch generator + self.logger.info("Creating dataset...") + testset_loader = eval(cfg.testset)(transforms.ToTensor(), "test") + batch_generator = DataLoader(dataset=testset_loader, batch_size=cfg.num_gpus*cfg.test_batch_size, shuffle=False, num_workers=cfg.num_thread, pin_memory=True) + + self.testset = testset_loader + self.vertex_num = testset_loader.vertex_num + self.joint_num = testset_loader.joint_num + self.batch_generator = batch_generator + + def _make_model(self): + model_path = os.path.join(cfg.model_dir, 'snapshot_%d.pth.tar' % self.test_epoch) + assert os.path.exists(model_path), 'Cannot find model at ' + model_path + self.logger.info('Load checkpoint from {}'.format(model_path)) + + # prepare network + self.logger.info("Creating graph...") + model = get_model(self.vertex_num, self.joint_num, 'test') + model = DataParallel(model).cuda() + ckpt = torch.load(model_path) + model.load_state_dict(ckpt['network'], strict=False) + model.eval() + + self.model = model + + def _evaluate(self, outs, cur_sample_idx): + eval_result = self.testset.evaluate(outs, cur_sample_idx) + return eval_result + + def _print_eval_result(self, eval_result): + self.testset.print_eval_result(eval_result) + diff --git a/data_processing/common/logger.py b/data_processing/common/logger.py new file mode 100644 index 0000000..a117118 --- /dev/null +++ b/data_processing/common/logger.py @@ -0,0 +1,50 @@ +import logging +import os + +OK = '\033[92m' +WARNING = '\033[93m' +FAIL = '\033[91m' +END = '\033[0m' + +PINK = '\033[95m' +BLUE = '\033[94m' +GREEN = OK +RED = FAIL +WHITE = END +YELLOW = WARNING + +class colorlogger(): + def __init__(self, log_dir, log_name='train_logs.txt'): + # set log + self._logger = logging.getLogger(log_name) + self._logger.setLevel(logging.INFO) + log_file = os.path.join(log_dir, log_name) + if not os.path.exists(log_dir): + os.makedirs(log_dir) + file_log = logging.FileHandler(log_file, mode='a') + file_log.setLevel(logging.INFO) + console_log = logging.StreamHandler() + console_log.setLevel(logging.INFO) + formatter = logging.Formatter( + "{}%(asctime)s{} %(message)s".format(GREEN, END), + "%m-%d %H:%M:%S") + file_log.setFormatter(formatter) + console_log.setFormatter(formatter) + self._logger.addHandler(file_log) + self._logger.addHandler(console_log) + + def debug(self, msg): + self._logger.debug(str(msg)) + + def info(self, msg): + self._logger.info(str(msg)) + + def warning(self, msg): + self._logger.warning(WARNING + 'WRN: ' + str(msg) + END) + + def critical(self, msg): + self._logger.critical(RED + 'CRI: ' + str(msg) + END) + + def error(self, msg): + self._logger.error(RED + 'ERR: ' + str(msg) + END) + diff --git a/data_processing/common/nets/layer.py b/data_processing/common/nets/layer.py new file mode 100644 index 0000000..11c41db --- /dev/null +++ b/data_processing/common/nets/layer.py @@ -0,0 +1,110 @@ +import torch +import torch.nn as nn +from torch.nn import functional as F + +from config import cfg + +def make_linear_layers(feat_dims, relu_final=True, use_bn=False): + layers = [] + for i in range(len(feat_dims)-1): + layers.append(nn.Linear(feat_dims[i], feat_dims[i+1])) + + # Do not use ReLU for final estimation + if i < len(feat_dims)-2 or (i == len(feat_dims)-2 and relu_final): + if use_bn: + layers.append(nn.BatchNorm1d(feat_dims[i+1])) + layers.append(nn.ReLU(inplace=True)) + + return nn.Sequential(*layers) + +def make_conv_layers(feat_dims, kernel=3, stride=1, padding=1, bnrelu_final=True): + layers = [] + for i in range(len(feat_dims)-1): + layers.append( + nn.Conv2d( + in_channels=feat_dims[i], + out_channels=feat_dims[i+1], + kernel_size=kernel, + stride=stride, + padding=padding + )) + # Do not use BN and ReLU for final estimation + if i < len(feat_dims)-2 or (i == len(feat_dims)-2 and bnrelu_final): + layers.append(nn.BatchNorm2d(feat_dims[i+1])) + layers.append(nn.ReLU(inplace=True)) + + return nn.Sequential(*layers) + +def make_conv1d_layers(feat_dims, kernel=3, stride=1, padding=1, bnrelu_final=True): + layers = [] + for i in range(len(feat_dims)-1): + layers.append( + nn.Conv1d( + in_channels=feat_dims[i], + out_channels=feat_dims[i+1], + kernel_size=kernel, + stride=stride, + padding=padding + )) + # Do not use BN and ReLU for final estimation + if i < len(feat_dims)-2 or (i == len(feat_dims)-2 and bnrelu_final): + layers.append(nn.BatchNorm1d(feat_dims[i+1])) + layers.append(nn.ReLU(inplace=True)) + + return nn.Sequential(*layers) + +def make_deconv_layers(feat_dims, bnrelu_final=True): + layers = [] + for i in range(len(feat_dims)-1): + layers.append( + nn.ConvTranspose2d( + in_channels=feat_dims[i], + out_channels=feat_dims[i+1], + kernel_size=4, + stride=2, + padding=1, + output_padding=0, + bias=False)) + + # Do not use BN and ReLU for final estimation + if i < len(feat_dims)-2 or (i == len(feat_dims)-2 and bnrelu_final): + layers.append(nn.BatchNorm2d(feat_dims[i+1])) + layers.append(nn.ReLU(inplace=True)) + + return nn.Sequential(*layers) + + +class GraphConvBlock(nn.Module): + def __init__(self, adj, dim_in, dim_out): + super(GraphConvBlock, self).__init__() + self.adj = adj + self.vertex_num = adj.shape[0] + self.fcbn_list = nn.ModuleList([nn.Sequential(*[nn.Linear(dim_in, dim_out), nn.BatchNorm1d(dim_out)]) for _ in range(self.vertex_num)]) + + def forward(self, feat): + batch_size = feat.shape[0] + + # apply kernel for each vertex + feat = torch.stack([fcbn(feat[:,i,:]) for i,fcbn in enumerate(self.fcbn_list)],1) + + # apply adj + adj = self.adj.cuda()[None,:,:].repeat(batch_size,1,1) + feat = torch.bmm(adj, feat) + + # apply activation function + out = F.relu(feat) + return out + + +class GraphResBlock(nn.Module): + def __init__(self, adj, dim): + super(GraphResBlock, self).__init__() + self.adj = adj + self.graph_block1 = GraphConvBlock(adj, dim, dim) + self.graph_block2 = GraphConvBlock(adj, dim, dim) + + def forward(self, feat): + feat_out = self.graph_block1(feat) + feat_out = self.graph_block2(feat_out) + out = feat_out + feat + return out diff --git a/data_processing/common/nets/loss.py b/data_processing/common/nets/loss.py new file mode 100644 index 0000000..0de65eb --- /dev/null +++ b/data_processing/common/nets/loss.py @@ -0,0 +1,82 @@ +import torch +import torch.nn as nn +from torch.nn import functional as F +import numpy as np +from config import cfg + +class CoordLoss(nn.Module): + def __init__(self): + super(CoordLoss, self).__init__() + + def forward(self, coord_out, coord_gt, valid, is_3D=None): + loss = torch.abs(coord_out - coord_gt) * valid + if is_3D is not None: + loss_z = loss[:,:,2:] * is_3D[:,None,None].float() + loss = torch.cat((loss[:,:,:2], loss_z),2) + + return loss + +class ParamLoss(nn.Module): + def __init__(self): + super(ParamLoss, self).__init__() + + def forward(self, param_out, param_gt, valid): + loss = torch.abs(param_out - param_gt) * valid + return loss + +class NormalVectorLoss(nn.Module): + def __init__(self, face): + super(NormalVectorLoss, self).__init__() + self.face = face + + def forward(self, coord_out, coord_gt, valid): + face = torch.LongTensor(self.face).cuda() + + v1_out = coord_out[:,face[:,1],:] - coord_out[:,face[:,0],:] + v1_out = F.normalize(v1_out, p=2, dim=2) # L2 normalize to make unit vector + v2_out = coord_out[:,face[:,2],:] - coord_out[:,face[:,0],:] + v2_out = F.normalize(v2_out, p=2, dim=2) # L2 normalize to make unit vector + v3_out = coord_out[:,face[:,2],:] - coord_out[:,face[:,1],:] + v3_out = F.normalize(v3_out, p=2, dim=2) # L2 nroamlize to make unit vector + + v1_gt = coord_gt[:,face[:,1],:] - coord_gt[:,face[:,0],:] + v1_gt = F.normalize(v1_gt, p=2, dim=2) # L2 normalize to make unit vector + v2_gt = coord_gt[:,face[:,2],:] - coord_gt[:,face[:,0],:] + v2_gt = F.normalize(v2_gt, p=2, dim=2) # L2 normalize to make unit vector + normal_gt = torch.cross(v1_gt, v2_gt, dim=2) + normal_gt = F.normalize(normal_gt, p=2, dim=2) # L2 normalize to make unit vector + + valid_mask = valid[:,face[:,0],:] * valid[:,face[:,1],:] * valid[:,face[:,2],:] + + cos1 = torch.abs(torch.sum(v1_out * normal_gt, 2, keepdim=True)) * valid_mask + cos2 = torch.abs(torch.sum(v2_out * normal_gt, 2, keepdim=True)) * valid_mask + cos3 = torch.abs(torch.sum(v3_out * normal_gt, 2, keepdim=True)) * valid_mask + loss = torch.cat((cos1, cos2, cos3),1) + return loss + +class EdgeLengthLoss(nn.Module): + def __init__(self, face): + super(EdgeLengthLoss, self).__init__() + self.face = face + + def forward(self, coord_out, coord_gt, valid): + face = torch.LongTensor(self.face).cuda() + + d1_out = torch.sqrt(torch.sum((coord_out[:,face[:,0],:] - coord_out[:,face[:,1],:])**2,2,keepdim=True)) + d2_out = torch.sqrt(torch.sum((coord_out[:,face[:,0],:] - coord_out[:,face[:,2],:])**2,2,keepdim=True)) + d3_out = torch.sqrt(torch.sum((coord_out[:,face[:,1],:] - coord_out[:,face[:,2],:])**2,2,keepdim=True)) + + d1_gt = torch.sqrt(torch.sum((coord_gt[:,face[:,0],:] - coord_gt[:,face[:,1],:])**2,2,keepdim=True)) + d2_gt = torch.sqrt(torch.sum((coord_gt[:,face[:,0],:] - coord_gt[:,face[:,2],:])**2,2,keepdim=True)) + d3_gt = torch.sqrt(torch.sum((coord_gt[:,face[:,1],:] - coord_gt[:,face[:,2],:])**2,2,keepdim=True)) + + valid_mask_1 = valid[:,face[:,0],:] * valid[:,face[:,1],:] + valid_mask_2 = valid[:,face[:,0],:] * valid[:,face[:,2],:] + valid_mask_3 = valid[:,face[:,1],:] * valid[:,face[:,2],:] + + diff1 = torch.abs(d1_out - d1_gt) * valid_mask_1 + diff2 = torch.abs(d2_out - d2_gt) * valid_mask_2 + diff3 = torch.abs(d3_out - d3_gt) * valid_mask_3 + loss = torch.cat((diff1, diff2, diff3),1) + return loss + diff --git a/data_processing/common/nets/module.py b/data_processing/common/nets/module.py new file mode 100644 index 0000000..c604e12 --- /dev/null +++ b/data_processing/common/nets/module.py @@ -0,0 +1,152 @@ +import os.path as osp +import torch +import torch.nn as nn +from torch.nn import functional as F +from config import cfg +from human_body_prior.tools.model_loader import load_vposer +import torchgeometry as tgm +from nets.layer import make_conv_layers, make_deconv_layers, make_conv1d_layers, make_linear_layers, GraphConvBlock, GraphResBlock +from utils.mano import MANO +from utils.smpl import SMPL + + +class Pose2Feat(nn.Module): + def __init__(self, joint_num): + super(Pose2Feat, self).__init__() + self.joint_num = joint_num + self.conv = make_conv_layers([64+joint_num,64]) + + def forward(self, img_feat, joint_heatmap): + feat = torch.cat((img_feat, joint_heatmap),1) + feat = self.conv(feat) + return feat + + +class PositionNet(nn.Module): + def __init__(self): + super(PositionNet, self).__init__() + if 'FreiHAND' in cfg.trainset_3d + cfg.trainset_2d + [cfg.testset]: + self.human_model = MANO() + self.joint_num = self.human_model.graph_joint_num + else: + self.human_model = SMPL() + self.joint_num = self.human_model.graph_joint_num + + self.hm_shape = [cfg.output_hm_shape[0] // 8, cfg.output_hm_shape[1] // 8, cfg.output_hm_shape[2] // 8] + self.conv = make_conv_layers([2048, self.joint_num * self.hm_shape[0]], kernel=1, stride=1, padding=0, bnrelu_final=False) + + def soft_argmax_3d(self, heatmap3d): + heatmap3d = heatmap3d.reshape((-1, self.joint_num, self.hm_shape[0] * self.hm_shape[1] * self.hm_shape[2])) + heatmap3d = F.softmax(heatmap3d, 2) + heatmap3d = heatmap3d.reshape((-1, self.joint_num, self.hm_shape[0], self.hm_shape[1], self.hm_shape[2])) + + accu_x = heatmap3d.sum(dim=(2, 3)) + accu_y = heatmap3d.sum(dim=(2, 4)) + accu_z = heatmap3d.sum(dim=(3, 4)) + + accu_x = accu_x * torch.arange(self.hm_shape[2]).float().cuda()[None, None, :] + accu_y = accu_y * torch.arange(self.hm_shape[1]).float().cuda()[None, None, :] + accu_z = accu_z * torch.arange(self.hm_shape[0]).float().cuda()[None, None, :] + + accu_x = accu_x.sum(dim=2, keepdim=True) + accu_y = accu_y.sum(dim=2, keepdim=True) + accu_z = accu_z.sum(dim=2, keepdim=True) + + coord_out = torch.cat((accu_x, accu_y, accu_z), dim=2) + return coord_out + + def forward(self, img_feat): + # joint heatmap + joint_heatmap = self.conv(img_feat).view(-1, self.joint_num, self.hm_shape[0], self.hm_shape[1], self.hm_shape[2]) + + # joint coord + joint_coord = self.soft_argmax_3d(joint_heatmap) + + # joint score sampling + scores = [] + joint_heatmap = joint_heatmap.view(-1, self.joint_num, self.hm_shape[0] * self.hm_shape[1] * self.hm_shape[2]) + joint_heatmap = F.softmax(joint_heatmap, 2) + joint_heatmap = joint_heatmap.view(-1, self.joint_num, self.hm_shape[0], self.hm_shape[1], self.hm_shape[2]) + for j in range(self.joint_num): + x = joint_coord[:, j, 0] / (self.hm_shape[2] - 1) * 2 - 1 + y = joint_coord[:, j, 1] / (self.hm_shape[1] - 1) * 2 - 1 + z = joint_coord[:, j, 2] / (self.hm_shape[0] - 1) * 2 - 1 + grid = torch.stack((x, y, z), 1)[:, None, None, None, :] + score_j = F.grid_sample(joint_heatmap[:, j, None, :, :, :], grid, align_corners=True)[:, 0, 0, 0, 0] # (batch_size) + scores.append(score_j) + scores = torch.stack(scores) # (joint_num, batch_size) + joint_score = scores.permute(1, 0)[:, :, None] # (batch_size, joint_num, 1) + return joint_coord, joint_score + + +class RotationNet(nn.Module): + def __init__(self): + super(RotationNet, self).__init__() + + if 'FreiHAND' in cfg.trainset_3d + cfg.trainset_2d + [cfg.testset]: + self.human_model = MANO() + self.joint_num = self.human_model.graph_joint_num + self.graph_adj = torch.from_numpy(self.human_model.graph_adj).float() + else: + self.human_model = SMPL() + self.joint_num = self.human_model.graph_joint_num + self.graph_adj = torch.from_numpy(self.human_model.graph_adj).float() + + # graph convs + self.graph_block = nn.Sequential(*[\ + GraphConvBlock(self.graph_adj, 2048+4, 128), + GraphResBlock(self.graph_adj, 128), + GraphResBlock(self.graph_adj, 128), + GraphResBlock(self.graph_adj, 128), + GraphResBlock(self.graph_adj, 128)]) + + self.hm_shape = [cfg.output_hm_shape[0] // 8, cfg.output_hm_shape[1] // 8, cfg.output_hm_shape[2] // 8] + + self.root_pose_out = make_linear_layers([self.joint_num*128, 6], relu_final=False) + self.pose_out = make_linear_layers([self.joint_num*128, self.human_model.vposer_code_dim], relu_final=False) # vposer latent code + self.shape_out = make_linear_layers([self.joint_num*128, self.human_model.shape_param_dim], relu_final=False) + self.cam_out = make_linear_layers([self.joint_num*128,3], relu_final=False) + + def sample_image_feature(self, img_feat, joint_coord_img): + img_feat_joints = [] + for j in range(self.joint_num): + x = joint_coord_img [: ,j,0] / (self.hm_shape[2]-1) * 2 - 1 + y = joint_coord_img [: ,j,1] / (self.hm_shape[1]-1) * 2 - 1 + grid = torch.stack( (x, y),1) [:,None,None,:] + img_feat = img_feat.float() + img_feat_j = F.grid_sample(img_feat, grid, align_corners=True) [: , : , 0, 0] # (batch_size, channel_dim) + img_feat_joints.append(img_feat_j) + img_feat_joints = torch.stack(img_feat_joints) # (joint_num, batch_size, channel_dim) + img_feat_joints = img_feat_joints.permute(1, 0 ,2) # (batch_size, joint_num, channel_dim) + return img_feat_joints + + def forward(self, img_feat, joint_coord_img, joint_score): + # pose parameter + img_feat_joints = self.sample_image_feature(img_feat, joint_coord_img) + feat = torch.cat((img_feat_joints, joint_coord_img, joint_score),2) + feat = self.graph_block(feat) + root_pose = self.root_pose_out(feat.view(-1,self.joint_num*128)) + pose_param = self.pose_out(feat.view(-1,self.joint_num*128)) + # shape parameter + shape_param = self.shape_out(feat.view(-1,self.joint_num*128)) + # camera parameter + cam_param = self.cam_out(feat.view(-1,self.joint_num*128)) + + return root_pose, pose_param, shape_param, cam_param + + +class Vposer(nn.Module): + def __init__(self): + super(Vposer, self).__init__() + self.vposer, _ = load_vposer(osp.join(cfg.human_model_path, 'smpl', 'VPOSER_CKPT'), vp_model='snapshot') + self.vposer.eval() + + def forward(self, z): + batch_size = z.shape[0] + body_pose = self.vposer.decode(z, output_type='aa').view(batch_size,-1 ).view(-1,24-3,3) # without root, R_Hand, L_Hand + zero_pose = torch.zeros((batch_size,1,3)).float().cuda() + + # attach zero hand poses + body_pose = torch.cat((body_pose, zero_pose, zero_pose),1) + body_pose = body_pose.view(batch_size,-1) + return body_pose diff --git a/data_processing/common/nets/resnet.py b/data_processing/common/nets/resnet.py new file mode 100644 index 0000000..4c4e6d1 --- /dev/null +++ b/data_processing/common/nets/resnet.py @@ -0,0 +1,81 @@ +import torch +import torch.nn as nn +from torchvision.models.resnet import BasicBlock, Bottleneck +from torchvision.models.resnet import model_urls + +class ResNetBackbone(nn.Module): + + def __init__(self, resnet_type): + + resnet_spec = {18: (BasicBlock, [2, 2, 2, 2], [64, 64, 128, 256, 512], 'resnet18'), + 34: (BasicBlock, [3, 4, 6, 3], [64, 64, 128, 256, 512], 'resnet34'), + 50: (Bottleneck, [3, 4, 6, 3], [64, 256, 512, 1024, 2048], 'resnet50'), + 101: (Bottleneck, [3, 4, 23, 3], [64, 256, 512, 1024, 2048], 'resnet101'), + 152: (Bottleneck, [3, 8, 36, 3], [64, 256, 512, 1024, 2048], 'resnet152')} + block, layers, channels, name = resnet_spec[resnet_type] + + self.name = name + self.inplanes = 64 + super(ResNetBackbone, self).__init__() + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + nn.init.normal_(m.weight, mean=0, std=0.001) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x, skip_early=False): + if not skip_early: + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + return x + + x1 = self.layer1(x) + x2 = self.layer2(x1) + x3 = self.layer3(x2) + x4 = self.layer4(x3) + + return x4 + + def init_weights(self): + org_resnet = torch.utils.model_zoo.load_url(model_urls[self.name]) + # drop orginal resnet fc layer, add 'None' in case of no fc layer, that will raise error + org_resnet.pop('fc.weight', None) + org_resnet.pop('fc.bias', None) + + self.load_state_dict(org_resnet) + print("Initialize resnet from model zoo") + + diff --git a/data_processing/common/timer.py b/data_processing/common/timer.py new file mode 100644 index 0000000..7152ae9 --- /dev/null +++ b/data_processing/common/timer.py @@ -0,0 +1,38 @@ +# -------------------------------------------------------- +# Fast R-CNN +# Copyright (c) 2015 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ross Girshick +# -------------------------------------------------------- + +import time + +class Timer(object): + """A simple timer.""" + def __init__(self): + self.total_time = 0. + self.calls = 0 + self.start_time = 0. + self.diff = 0. + self.average_time = 0. + self.warm_up = 0 + + def tic(self): + # using time.time instead of time.clock because time time.clock + # does not normalize for multithreading + self.start_time = time.time() + + def toc(self, average=True): + self.diff = time.time() - self.start_time + if self.warm_up < 10: + self.warm_up += 1 + return self.diff + else: + self.total_time += self.diff + self.calls += 1 + self.average_time = self.total_time / self.calls + + if average: + return self.average_time + else: + return self.diff diff --git a/data_processing/common/utils/__init__.py b/data_processing/common/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/common/utils/dir.py b/data_processing/common/utils/dir.py new file mode 100644 index 0000000..410433d --- /dev/null +++ b/data_processing/common/utils/dir.py @@ -0,0 +1,11 @@ +import os +import sys + +def make_folder(folder_name): + if not os.path.exists(folder_name): + os.makedirs(folder_name) + +def add_pypath(path): + if path not in sys.path: + sys.path.insert(0, path) + diff --git a/data_processing/common/utils/mano.py b/data_processing/common/utils/mano.py new file mode 100644 index 0000000..04fe1d2 --- /dev/null +++ b/data_processing/common/utils/mano.py @@ -0,0 +1,36 @@ +import numpy as np +import torch +import os.path as osp +import json +from config import cfg + +import sys +sys.path.insert(0, cfg.mano_path) +import manopth +from manopth.manolayer import ManoLayer + +class MANO(object): + def __init__(self): + self.layer = self.get_layer() + self.vertex_num = 778 + self.face = self.layer.th_faces.numpy() + self.joint_regressor = self.layer.th_J_regressor.numpy() + + self.joint_num = 21 + self.joints_name = ('Wrist', 'Thumb_1', 'Thumb_2', 'Thumb_3', 'Thumb_4', 'Index_1', 'Index_2', 'Index_3', 'Index_4', 'Middle_1', 'Middle_2', 'Middle_3', 'Middle_4', 'Ring_1', 'Ring_2', 'Ring_3', 'Ring_4', 'Pinky_1', 'Pinky_2', 'Pinky_3', 'Pinly_4') + self.skeleton = ( (0,1), (0,5), (0,9), (0,13), (0,17), (1,2), (2,3), (3,4), (5,6), (6,7), (7,8), (9,10), (10,11), (11,12), (13,14), (14,15), (15,16), (17,18), (18,19), (19,20) ) + self.root_joint_idx = self.joints_name.index('Wrist') + + # add fingertips to joint_regressor + self.fingertip_vertex_idx = [745, 317, 444, 556, 673] # mesh vertex idx (right hand) + thumbtip_onehot = np.array([1 if i == 745 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + indextip_onehot = np.array([1 if i == 317 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + middletip_onehot = np.array([1 if i == 445 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + ringtip_onehot = np.array([1 if i == 556 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + pinkytip_onehot = np.array([1 if i == 673 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + self.joint_regressor = np.concatenate((self.joint_regressor, thumbtip_onehot, indextip_onehot, middletip_onehot, ringtip_onehot, pinkytip_onehot)) + self.joint_regressor = self.joint_regressor[[0, 13, 14, 15, 16, 1, 2, 3, 17, 4, 5, 6, 18, 10, 11, 12, 19, 7, 8, 9, 20],:] + + def get_layer(self): + return ManoLayer(mano_root=osp.join(cfg.mano_path, 'mano', 'models'), flat_hand_mean=False, use_pca=False) # load right hand MANO model + diff --git a/data_processing/common/utils/manopth/.gitignore b/data_processing/common/utils/manopth/.gitignore new file mode 100644 index 0000000..5d99a91 --- /dev/null +++ b/data_processing/common/utils/manopth/.gitignore @@ -0,0 +1,12 @@ +*.sw* +*.bak +*_bak.py + +.cache/ +__pycache__/ +build/ +dist/ +manopth_hassony2.egg-info/ + +mano/models +assets/mano_layer.svg diff --git a/data_processing/common/utils/manopth/LICENSE b/data_processing/common/utils/manopth/LICENSE new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/data_processing/common/utils/manopth/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/data_processing/common/utils/manopth/README.md b/data_processing/common/utils/manopth/README.md new file mode 100644 index 0000000..07ba23d --- /dev/null +++ b/data_processing/common/utils/manopth/README.md @@ -0,0 +1,135 @@ +Manopth +======= + +[MANO](http://mano.is.tue.mpg.de) layer for [PyTorch](https://pytorch.org/) (tested with v0.4 and v1.x) + +ManoLayer is a differentiable PyTorch layer that deterministically maps from pose and shape parameters to hand joints and vertices. +It can be integrated into any architecture as a differentiable layer to predict hand meshes. + +![image](assets/mano_layer.png) + +ManoLayer takes **batched** hand pose and shape vectors and outputs corresponding hand joints and vertices. + +The code is mostly a PyTorch port of the original [MANO](http://mano.is.tue.mpg.de) model from [chumpy](https://github.com/mattloper/chumpy) to [PyTorch](https://pytorch.org/). +It therefore builds directly upon the work of Javier Romero, Dimitrios Tzionas and Michael J. Black. + +This layer was developped and used for the paper *Learning joint reconstruction of hands and manipulated objects* for CVPR19. +See [project page](https://github.com/hassony2/obman) and [demo+training code](https://github.com/hassony2/obman_train). + + +It [reuses](https://github.com/hassony2/manopth/blob/master/manopth/rodrigues_layer.py) [part of the great code](https://github.com/MandyMo/pytorch_HMR/blob/master/src/util.py) from the [Pytorch layer for the SMPL body model](https://github.com/MandyMo/pytorch_HMR/blob/master/README.md) by Zhang Xiong ([MandyMo](https://github.com/MandyMo)) to compute the rotation utilities ! + +It also includes in `mano/webuser` partial content of files from the original [MANO](http://mano.is.tue.mpg.de) code ([posemapper.py](mano/webuser/posemapper.py), [serialization.py](mano/webuser/serialization.py), [lbs.py](mano/webuser/lbs.py), [verts.py](mano/webuser/verts.py), [smpl_handpca_wrapper_HAND_only.py](mano/webuser/smpl_handpca_wrapper_HAND_only.py)). + +If you find this code useful for your research, consider citing: + +- the original [MANO](http://mano.is.tue.mpg.de) publication: + +``` +@article{MANO:SIGGRAPHASIA:2017, + title = {Embodied Hands: Modeling and Capturing Hands and Bodies Together}, + author = {Romero, Javier and Tzionas, Dimitrios and Black, Michael J.}, + journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH Asia)}, + publisher = {ACM}, + month = nov, + year = {2017}, + url = {http://doi.acm.org/10.1145/3130800.3130883}, + month_numeric = {11} +} +``` + +- the publication this PyTorch port was developped for: + +``` +@INPROCEEDINGS{hasson19_obman, + title = {Learning joint reconstruction of hands and manipulated objects}, + author = {Hasson, Yana and Varol, G{\"u}l and Tzionas, Dimitris and Kalevatykh, Igor and Black, Michael J. and Laptev, Ivan and Schmid, Cordelia}, + booktitle = {CVPR}, + year = {2019} +} +``` + +The training code associated with this paper, compatible with manopth can be found [here](https://github.com/hassony2/obman_train). The release includes a model trained on a variety of hand datasets. + +# Installation + +## Get code and dependencies + +- `git clone https://github.com/hassony2/manopth` +- `cd manopth` +- Install the dependencies listed in [environment.yml](environment.yml) + - In an existing conda environment, `conda env update -f environment.yml` + - In a new environment, `conda env create -f environment.yml`, will create a conda environment named `manopth` + +## Download MANO pickle data-structures + +- Go to [MANO website](http://mano.is.tue.mpg.de/) +- Create an account by clicking *Sign Up* and provide your information +- Download Models and Code (the downloaded file should have the format `mano_v*_*.zip`). Note that all code and data from this download falls under the [MANO license](http://mano.is.tue.mpg.de/license). +- unzip and copy the `models` folder into the `manopth/mano` folder +- Your folder structure should look like this: +``` +manopth/ + mano/ + models/ + MANO_LEFT.pkl + MANO_RIGHT.pkl + ... + manopth/ + __init__.py + ... +``` + +To check that everything is going well, run `python examples/manopth_mindemo.py`, which should generate from a random hand using the MANO layer ! + +## Install `manopth` package + +To be able to import and use `ManoLayer` in another project, go to your `manopth` folder and run `pip install .` + + +`cd /path/to/other/project` + +You can now use `from manopth import ManoLayer` in this other project! + +# Usage + +## Minimal usage script + +See [examples/manopth_mindemo.py](examples/manopth_mindemo.py) + +Simple forward pass with random pose and shape parameters through MANO layer + +```python +import torch +from manopth.manolayer import ManoLayer +from manopth import demo + +batch_size = 10 +# Select number of principal components for pose space +ncomps = 6 + +# Initialize MANO layer +mano_layer = ManoLayer(mano_root='mano/models', use_pca=True, ncomps=ncomps) + +# Generate random shape parameters +random_shape = torch.rand(batch_size, 10) +# Generate random pose parameters, including 3 values for global axis-angle rotation +random_pose = torch.rand(batch_size, ncomps + 3) + +# Forward pass through MANO layer +hand_verts, hand_joints = mano_layer(random_pose, random_shape) +demo.display_hand({'verts': hand_verts, 'joints': hand_joints}, mano_faces=mano_layer.th_faces) +``` + +Result : + +![random hand](assets/random_hand.png) + +## Demo + +With more options, forward and backward pass, and a loop for quick profiling, look at [examples/manopth_demo.py](examples/manopth_demo.py). + +You can run it locally with: + +`python examples/manopth_demo.py` + diff --git a/data_processing/common/utils/manopth/assets/mano_layer.png b/data_processing/common/utils/manopth/assets/mano_layer.png new file mode 100644 index 0000000..2365263 Binary files /dev/null and b/data_processing/common/utils/manopth/assets/mano_layer.png differ diff --git a/data_processing/common/utils/manopth/assets/random_hand.png b/data_processing/common/utils/manopth/assets/random_hand.png new file mode 100644 index 0000000..1331322 Binary files /dev/null and b/data_processing/common/utils/manopth/assets/random_hand.png differ diff --git a/data_processing/common/utils/manopth/environment.yml b/data_processing/common/utils/manopth/environment.yml new file mode 100644 index 0000000..667ae3a --- /dev/null +++ b/data_processing/common/utils/manopth/environment.yml @@ -0,0 +1,12 @@ +name: manopth + +dependencies: + - opencv + - python=3.7 + - matplotlib + - numpy + - pytorch + - tqdm + - git + - pip: + - git+https://github.com/hassony2/chumpy.git diff --git a/data_processing/common/utils/manopth/examples/manopth_demo.py b/data_processing/common/utils/manopth/examples/manopth_demo.py new file mode 100644 index 0000000..72a5186 --- /dev/null +++ b/data_processing/common/utils/manopth/examples/manopth_demo.py @@ -0,0 +1,91 @@ +import argparse + +from matplotlib import pyplot as plt +from mpl_toolkits.mplot3d import Axes3D +import torch +from tqdm import tqdm + +from manopth import argutils +from manopth.manolayer import ManoLayer +from manopth.demo import display_hand + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--batch_size', default=1, type=int) + parser.add_argument('--cuda', action='store_true') + parser.add_argument( + '--no_display', + action='store_true', + help="Disable display output of ManoLayer given random inputs") + parser.add_argument('--side', default='left', choices=['left', 'right']) + parser.add_argument('--random_shape', action='store_true', help="Random hand shape") + parser.add_argument('--rand_mag', type=float, default=1, help="Controls pose variability") + parser.add_argument( + '--flat_hand_mean', + action='store_true', + help="Use flat hand as mean instead of average hand pose") + parser.add_argument( + '--iters', + type=int, + default=1, + help= + "Use for quick profiling of forward and backward pass accross ManoLayer" + ) + parser.add_argument('--mano_root', default='mano/models') + parser.add_argument('--root_rot_mode', default='axisang', choices=['rot6d', 'axisang']) + parser.add_argument('--no_pca', action='store_true', help="Give axis-angle or rotation matrix as inputs instead of PCA coefficients") + parser.add_argument('--joint_rot_mode', default='axisang', choices=['rotmat', 'axisang'], help="Joint rotation inputs") + parser.add_argument( + '--mano_ncomps', default=6, type=int, help="Number of PCA components") + args = parser.parse_args() + + argutils.print_args(args) + + layer = ManoLayer( + flat_hand_mean=args.flat_hand_mean, + side=args.side, + mano_root=args.mano_root, + ncomps=args.mano_ncomps, + use_pca=not args.no_pca, + root_rot_mode=args.root_rot_mode, + joint_rot_mode=args.joint_rot_mode) + if args.root_rot_mode == 'axisang': + rot = 3 + else: + rot = 6 + print(rot) + if args.no_pca: + args.mano_ncomps = 45 + + # Generate random pose coefficients + pose_params = args.rand_mag * torch.rand(args.batch_size, args.mano_ncomps + rot) + pose_params.requires_grad = True + if args.random_shape: + shape = torch.rand(args.batch_size, 10) + else: + shape = torch.zeros(1) # Hack to act like None for PyTorch JIT + if args.cuda: + pose_params = pose_params.cuda() + shape = shape.cuda() + layer.cuda() + + # Loop for forward/backward quick profiling + for idx in tqdm(range(args.iters)): + # Forward pass + verts, Jtr = layer(pose_params, th_betas=shape) + + # Backward pass + loss = torch.norm(verts) + loss.backward() + + if not args.no_display: + verts, Jtr = layer(pose_params, th_betas=shape) + joints = Jtr.cpu().detach() + verts = verts.cpu().detach() + + # Draw obtained vertices and joints + display_hand({ + 'verts': verts, + 'joints': joints + }, + mano_faces=layer.th_faces) diff --git a/data_processing/common/utils/manopth/examples/manopth_mindemo.py b/data_processing/common/utils/manopth/examples/manopth_mindemo.py new file mode 100644 index 0000000..10098a0 --- /dev/null +++ b/data_processing/common/utils/manopth/examples/manopth_mindemo.py @@ -0,0 +1,24 @@ +import torch +from manopth.manolayer import ManoLayer +from manopth import demo + +batch_size = 10 +# Select number of principal components for pose space +ncomps = 6 + +# Initialize MANO layer +mano_layer = ManoLayer( + mano_root='mano/models', use_pca=True, ncomps=ncomps, flat_hand_mean=False) + +# Generate random shape parameters +random_shape = torch.rand(batch_size, 10) +# Generate random pose parameters, including 3 values for global axis-angle rotation +random_pose = torch.rand(batch_size, ncomps + 3) + +# Forward pass through MANO layer +hand_verts, hand_joints = mano_layer(random_pose, random_shape) +demo.display_hand({ + 'verts': hand_verts, + 'joints': hand_joints +}, + mano_faces=mano_layer.th_faces) diff --git a/data_processing/common/utils/manopth/mano/__init__.py b/data_processing/common/utils/manopth/mano/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/common/utils/manopth/mano/webuser/__init__.py b/data_processing/common/utils/manopth/mano/webuser/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/common/utils/manopth/mano/webuser/lbs.py b/data_processing/common/utils/manopth/mano/webuser/lbs.py new file mode 100644 index 0000000..5acaf84 --- /dev/null +++ b/data_processing/common/utils/manopth/mano/webuser/lbs.py @@ -0,0 +1,84 @@ +''' +Copyright 2017 Javier Romero, Dimitrios Tzionas, Michael J Black and the Max Planck Gesellschaft. All rights reserved. +This software is provided for research purposes only. +By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license + +More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de. +For comments or questions, please email us at: mano@tue.mpg.de + + +About this file: +================ +This file defines a wrapper for the loading functions of the MANO model. + +Modules included: +- load_model: + loads the MANO model from a given file location (i.e. a .pkl file location), + or a dictionary object. + +''' + + +from mano.webuser.posemapper import posemap +import chumpy +import numpy as np + + +def global_rigid_transformation(pose, J, kintree_table, xp): + results = {} + pose = pose.reshape((-1, 3)) + id_to_col = {kintree_table[1, i]: i for i in range(kintree_table.shape[1])} + parent = { + i: id_to_col[kintree_table[0, i]] + for i in range(1, kintree_table.shape[1]) + } + + if xp == chumpy: + from mano.webuser.posemapper import Rodrigues + rodrigues = lambda x: Rodrigues(x) + else: + import cv2 + rodrigues = lambda x: cv2.Rodrigues(x)[0] + + with_zeros = lambda x: xp.vstack((x, xp.array([[0.0, 0.0, 0.0, 1.0]]))) + results[0] = with_zeros( + xp.hstack((rodrigues(pose[0, :]), J[0, :].reshape((3, 1))))) + + for i in range(1, kintree_table.shape[1]): + results[i] = results[parent[i]].dot( + with_zeros( + xp.hstack((rodrigues(pose[i, :]), ((J[i, :] - J[parent[i], :] + ).reshape((3, 1))))))) + + pack = lambda x: xp.hstack([np.zeros((4, 3)), x.reshape((4, 1))]) + + results = [results[i] for i in sorted(results.keys())] + results_global = results + + if True: + results2 = [ + results[i] - (pack(results[i].dot(xp.concatenate(((J[i, :]), 0))))) + for i in range(len(results)) + ] + results = results2 + result = xp.dstack(results) + return result, results_global + + +def verts_core(pose, v, J, weights, kintree_table, want_Jtr=False, xp=chumpy): + A, A_global = global_rigid_transformation(pose, J, kintree_table, xp) + T = A.dot(weights.T) + + rest_shape_h = xp.vstack((v.T, np.ones((1, v.shape[0])))) + + v = (T[:, 0, :] * rest_shape_h[0, :].reshape( + (1, -1)) + T[:, 1, :] * rest_shape_h[1, :].reshape( + (1, -1)) + T[:, 2, :] * rest_shape_h[2, :].reshape( + (1, -1)) + T[:, 3, :] * rest_shape_h[3, :].reshape((1, -1))).T + + v = v[:, :3] + + if not want_Jtr: + return v + Jtr = xp.vstack([g[:3, 3] for g in A_global]) + return (v, Jtr) diff --git a/data_processing/common/utils/manopth/mano/webuser/posemapper.py b/data_processing/common/utils/manopth/mano/webuser/posemapper.py new file mode 100644 index 0000000..9a9ae42 --- /dev/null +++ b/data_processing/common/utils/manopth/mano/webuser/posemapper.py @@ -0,0 +1,55 @@ +''' +Copyright 2017 Javier Romero, Dimitrios Tzionas, Michael J Black and the Max Planck Gesellschaft. All rights reserved. +This software is provided for research purposes only. +By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license + +More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de. +For comments or questions, please email us at: mano@tue.mpg.de + + +About this file: +================ +This file defines a wrapper for the loading functions of the MANO model. + +Modules included: +- load_model: + loads the MANO model from a given file location (i.e. a .pkl file location), + or a dictionary object. + +''' + + +import chumpy as ch +import numpy as np +import cv2 + + +class Rodrigues(ch.Ch): + dterms = 'rt' + + def compute_r(self): + return cv2.Rodrigues(self.rt.r)[0] + + def compute_dr_wrt(self, wrt): + if wrt is self.rt: + return cv2.Rodrigues(self.rt.r)[1].T + + +def lrotmin(p): + if isinstance(p, np.ndarray): + p = p.ravel()[3:] + return np.concatenate( + [(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() + for pp in p.reshape((-1, 3))]).ravel() + if p.ndim != 2 or p.shape[1] != 3: + p = p.reshape((-1, 3)) + p = p[1:] + return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() + for pp in p]).ravel() + + +def posemap(s): + if s == 'lrotmin': + return lrotmin + else: + raise Exception('Unknown posemapping: %s' % (str(s), )) diff --git a/data_processing/common/utils/manopth/mano/webuser/serialization.py b/data_processing/common/utils/manopth/mano/webuser/serialization.py new file mode 100644 index 0000000..9cbdd7e --- /dev/null +++ b/data_processing/common/utils/manopth/mano/webuser/serialization.py @@ -0,0 +1,94 @@ +''' +Copyright 2017 Javier Romero, Dimitrios Tzionas, Michael J Black and the Max Planck Gesellschaft. All rights reserved. +This software is provided for research purposes only. +By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license + +More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de. +For comments or questions, please email us at: mano@tue.mpg.de + + +About this file: +================ +This file defines a wrapper for the loading functions of the MANO model. + +Modules included: +- load_model: + loads the MANO model from a given file location (i.e. a .pkl file location), + or a dictionary object. + +''' + + +__all__ = ['load_model', 'save_model'] + +import numpy as np +import pickle +import chumpy as ch +from chumpy.ch import MatVecMult +from mano.webuser.posemapper import posemap +from mano.webuser.verts import verts_core + +def ready_arguments(fname_or_dict): + + if not isinstance(fname_or_dict, dict): + dd = pickle.load(open(fname_or_dict, 'rb'), encoding='latin1') + else: + dd = fname_or_dict + + backwards_compatibility_replacements(dd) + + want_shapemodel = 'shapedirs' in dd + nposeparms = dd['kintree_table'].shape[1] * 3 + + if 'trans' not in dd: + dd['trans'] = np.zeros(3) + if 'pose' not in dd: + dd['pose'] = np.zeros(nposeparms) + if 'shapedirs' in dd and 'betas' not in dd: + dd['betas'] = np.zeros(dd['shapedirs'].shape[-1]) + + for s in [ + 'v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', + 'betas', 'J' + ]: + if (s in dd) and not hasattr(dd[s], 'dterms'): + dd[s] = ch.array(dd[s]) + + if want_shapemodel: + dd['v_shaped'] = dd['shapedirs'].dot(dd['betas']) + dd['v_template'] + v_shaped = dd['v_shaped'] + J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:, 0]) + J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:, 1]) + J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:, 2]) + dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T + dd['v_posed'] = v_shaped + dd['posedirs'].dot( + posemap(dd['bs_type'])(dd['pose'])) + else: + dd['v_posed'] = dd['v_template'] + dd['posedirs'].dot( + posemap(dd['bs_type'])(dd['pose'])) + + return dd + + +def load_model(fname_or_dict): + dd = ready_arguments(fname_or_dict) + + args = { + 'pose': dd['pose'], + 'v': dd['v_posed'], + 'J': dd['J'], + 'weights': dd['weights'], + 'kintree_table': dd['kintree_table'], + 'xp': ch, + 'want_Jtr': True, + 'bs_style': dd['bs_style'] + } + + result, Jtr = verts_core(**args) + result = result + dd['trans'].reshape((1, 3)) + result.J_transformed = Jtr + dd['trans'].reshape((1, 3)) + + for k, v in dd.items(): + setattr(result, k, v) + + return result diff --git a/data_processing/common/utils/manopth/mano/webuser/smpl_handpca_wrapper_HAND_only.py b/data_processing/common/utils/manopth/mano/webuser/smpl_handpca_wrapper_HAND_only.py new file mode 100644 index 0000000..de279f9 --- /dev/null +++ b/data_processing/common/utils/manopth/mano/webuser/smpl_handpca_wrapper_HAND_only.py @@ -0,0 +1,150 @@ +''' +Copyright 2017 Javier Romero, Dimitrios Tzionas, Michael J Black and the Max Planck Gesellschaft. All rights reserved. +This software is provided for research purposes only. +By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license + +More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de. +For comments or questions, please email us at: mano@tue.mpg.de + + +About this file: +================ +This file defines a wrapper for the loading functions of the MANO model. + +Modules included: +- load_model: + loads the MANO model from a given file location (i.e. a .pkl file location), + or a dictionary object. + +''' + + +def ready_arguments(fname_or_dict, posekey4vposed='pose'): + import numpy as np + import pickle + import chumpy as ch + from chumpy.ch import MatVecMult + from mano.webuser.posemapper import posemap + + if not isinstance(fname_or_dict, dict): + dd = pickle.load(open(fname_or_dict, 'rb'), encoding='latin1') + # dd = pickle.load(open(fname_or_dict, 'rb')) + else: + dd = fname_or_dict + + want_shapemodel = 'shapedirs' in dd + nposeparms = dd['kintree_table'].shape[1] * 3 + + if 'trans' not in dd: + dd['trans'] = np.zeros(3) + if 'pose' not in dd: + dd['pose'] = np.zeros(nposeparms) + if 'shapedirs' in dd and 'betas' not in dd: + dd['betas'] = np.zeros(dd['shapedirs'].shape[-1]) + + for s in [ + 'v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', + 'betas', 'J' + ]: + if (s in dd) and not hasattr(dd[s], 'dterms'): + dd[s] = ch.array(dd[s]) + + assert (posekey4vposed in dd) + if want_shapemodel: + dd['v_shaped'] = dd['shapedirs'].dot(dd['betas']) + dd['v_template'] + v_shaped = dd['v_shaped'] + J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:, 0]) + J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:, 1]) + J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:, 2]) + dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T + pose_map_res = posemap(dd['bs_type'])(dd[posekey4vposed]) + dd['v_posed'] = v_shaped + dd['posedirs'].dot(pose_map_res) + else: + pose_map_res = posemap(dd['bs_type'])(dd[posekey4vposed]) + dd_add = dd['posedirs'].dot(pose_map_res) + dd['v_posed'] = dd['v_template'] + dd_add + + return dd + + +def load_model(fname_or_dict, ncomps=6, flat_hand_mean=False, v_template=None): + ''' This model loads the fully articulable HAND SMPL model, + and replaces the pose DOFS by ncomps from PCA''' + + from mano.webuser.verts import verts_core + import numpy as np + import chumpy as ch + import pickle + import scipy.sparse as sp + np.random.seed(1) + + if not isinstance(fname_or_dict, dict): + smpl_data = pickle.load(open(fname_or_dict, 'rb'), encoding='latin1') + # smpl_data = pickle.load(open(fname_or_dict, 'rb')) + else: + smpl_data = fname_or_dict + + rot = 3 # for global orientation!!! + + hands_components = smpl_data['hands_components'] + hands_mean = np.zeros(hands_components.shape[ + 1]) if flat_hand_mean else smpl_data['hands_mean'] + hands_coeffs = smpl_data['hands_coeffs'][:, :ncomps] + + selected_components = np.vstack((hands_components[:ncomps])) + hands_mean = hands_mean.copy() + + pose_coeffs = ch.zeros(rot + selected_components.shape[0]) + full_hand_pose = pose_coeffs[rot:(rot + ncomps)].dot(selected_components) + + smpl_data['fullpose'] = ch.concatenate((pose_coeffs[:rot], + hands_mean + full_hand_pose)) + smpl_data['pose'] = pose_coeffs + + Jreg = smpl_data['J_regressor'] + if not sp.issparse(Jreg): + smpl_data['J_regressor'] = (sp.csc_matrix( + (Jreg.data, (Jreg.row, Jreg.col)), shape=Jreg.shape)) + + # slightly modify ready_arguments to make sure that it uses the fullpose + # (which will NOT be pose) for the computation of posedirs + dd = ready_arguments(smpl_data, posekey4vposed='fullpose') + + # create the smpl formula with the fullpose, + # but expose the PCA coefficients as smpl.pose for compatibility + args = { + 'pose': dd['fullpose'], + 'v': dd['v_posed'], + 'J': dd['J'], + 'weights': dd['weights'], + 'kintree_table': dd['kintree_table'], + 'xp': ch, + 'want_Jtr': True, + 'bs_style': dd['bs_style'], + } + + result_previous, meta = verts_core(**args) + + result = result_previous + dd['trans'].reshape((1, 3)) + result.no_translation = result_previous + + if meta is not None: + for field in ['Jtr', 'A', 'A_global', 'A_weighted']: + if (hasattr(meta, field)): + setattr(result, field, getattr(meta, field)) + + setattr(result, 'Jtr', meta) + if hasattr(result, 'Jtr'): + result.J_transformed = result.Jtr + dd['trans'].reshape((1, 3)) + + for k, v in dd.items(): + setattr(result, k, v) + + if v_template is not None: + result.v_template[:] = v_template + + return result + + +if __name__ == '__main__': + load_model() diff --git a/data_processing/common/utils/manopth/mano/webuser/verts.py b/data_processing/common/utils/manopth/mano/webuser/verts.py new file mode 100644 index 0000000..5fd9550 --- /dev/null +++ b/data_processing/common/utils/manopth/mano/webuser/verts.py @@ -0,0 +1,124 @@ +''' +Copyright 2017 Javier Romero, Dimitrios Tzionas, Michael J Black and the Max Planck Gesellschaft. All rights reserved. +This software is provided for research purposes only. +By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license + +More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de. +For comments or questions, please email us at: mano@tue.mpg.de + + +About this file: +================ +This file defines a wrapper for the loading functions of the MANO model. + +Modules included: +- load_model: + loads the MANO model from a given file location (i.e. a .pkl file location), + or a dictionary object. + +''' + + +import chumpy +import mano.webuser.lbs as lbs +from mano.webuser.posemapper import posemap +import scipy.sparse as sp +from chumpy.ch import MatVecMult + + +def ischumpy(x): + return hasattr(x, 'dterms') + + +def verts_decorated(trans, + pose, + v_template, + J_regressor, + weights, + kintree_table, + bs_style, + f, + bs_type=None, + posedirs=None, + betas=None, + shapedirs=None, + want_Jtr=False): + + for which in [ + trans, pose, v_template, weights, posedirs, betas, shapedirs + ]: + if which is not None: + assert ischumpy(which) + + v = v_template + + if shapedirs is not None: + if betas is None: + betas = chumpy.zeros(shapedirs.shape[-1]) + v_shaped = v + shapedirs.dot(betas) + else: + v_shaped = v + + if posedirs is not None: + v_posed = v_shaped + posedirs.dot(posemap(bs_type)(pose)) + else: + v_posed = v_shaped + + v = v_posed + + if sp.issparse(J_regressor): + J_tmpx = MatVecMult(J_regressor, v_shaped[:, 0]) + J_tmpy = MatVecMult(J_regressor, v_shaped[:, 1]) + J_tmpz = MatVecMult(J_regressor, v_shaped[:, 2]) + J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T + else: + assert (ischumpy(J)) + + assert (bs_style == 'lbs') + result, Jtr = lbs.verts_core( + pose, v, J, weights, kintree_table, want_Jtr=True, xp=chumpy) + + tr = trans.reshape((1, 3)) + result = result + tr + Jtr = Jtr + tr + + result.trans = trans + result.f = f + result.pose = pose + result.v_template = v_template + result.J = J + result.J_regressor = J_regressor + result.weights = weights + result.kintree_table = kintree_table + result.bs_style = bs_style + result.bs_type = bs_type + if posedirs is not None: + result.posedirs = posedirs + result.v_posed = v_posed + if shapedirs is not None: + result.shapedirs = shapedirs + result.betas = betas + result.v_shaped = v_shaped + if want_Jtr: + result.J_transformed = Jtr + return result + + +def verts_core(pose, + v, + J, + weights, + kintree_table, + bs_style, + want_Jtr=False, + xp=chumpy): + + if xp == chumpy: + assert (hasattr(pose, 'dterms')) + assert (hasattr(v, 'dterms')) + assert (hasattr(J, 'dterms')) + assert (hasattr(weights, 'dterms')) + + assert (bs_style == 'lbs') + result = lbs.verts_core(pose, v, J, weights, kintree_table, want_Jtr, xp) + return result diff --git a/data_processing/common/utils/manopth/manopth/__init__.py b/data_processing/common/utils/manopth/manopth/__init__.py new file mode 100644 index 0000000..e27cf86 --- /dev/null +++ b/data_processing/common/utils/manopth/manopth/__init__.py @@ -0,0 +1 @@ +name = 'manopth' diff --git a/data_processing/common/utils/manopth/manopth/argutils.py b/data_processing/common/utils/manopth/manopth/argutils.py new file mode 100644 index 0000000..7e86eb0 --- /dev/null +++ b/data_processing/common/utils/manopth/manopth/argutils.py @@ -0,0 +1,51 @@ +import datetime +import os +import pickle +import subprocess +import sys + + +def print_args(args): + opts = vars(args) + print('======= Options ========') + for k, v in sorted(opts.items()): + print('{}: {}'.format(k, v)) + print('========================') + + +def save_args(args, save_folder, opt_prefix='opt', verbose=True): + opts = vars(args) + # Create checkpoint folder + if not os.path.exists(save_folder): + os.makedirs(save_folder, exist_ok=True) + + # Save options + opt_filename = '{}.txt'.format(opt_prefix) + opt_path = os.path.join(save_folder, opt_filename) + with open(opt_path, 'a') as opt_file: + opt_file.write('====== Options ======\n') + for k, v in sorted(opts.items()): + opt_file.write( + '{option}: {value}\n'.format(option=str(k), value=str(v))) + opt_file.write('=====================\n') + opt_file.write('launched {} at {}\n'.format( + str(sys.argv[0]), str(datetime.datetime.now()))) + + # Add git info + label = subprocess.check_output(["git", "describe", + "--always"]).strip() + if subprocess.call( + ["git", "branch"], + stderr=subprocess.STDOUT, + stdout=open(os.devnull, 'w')) == 0: + opt_file.write('=== Git info ====\n') + opt_file.write('{}\n'.format(label)) + commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']) + opt_file.write('commit : {}\n'.format(commit.strip())) + + opt_picklename = '{}.pkl'.format(opt_prefix) + opt_picklepath = os.path.join(save_folder, opt_picklename) + with open(opt_picklepath, 'wb') as opt_file: + pickle.dump(opts, opt_file) + if verbose: + print('Saved options to {}'.format(opt_path)) diff --git a/data_processing/common/utils/manopth/manopth/demo.py b/data_processing/common/utils/manopth/manopth/demo.py new file mode 100644 index 0000000..0bca468 --- /dev/null +++ b/data_processing/common/utils/manopth/manopth/demo.py @@ -0,0 +1,59 @@ +from matplotlib import pyplot as plt +from mpl_toolkits.mplot3d import Axes3D +from mpl_toolkits.mplot3d.art3d import Poly3DCollection +import numpy as np +import torch + +from manopth.manolayer import ManoLayer + + +def generate_random_hand(batch_size=1, ncomps=6, mano_root='mano/models'): + nfull_comps = ncomps + 3 # Add global orientation dims to PCA + random_pcapose = torch.rand(batch_size, nfull_comps) + mano_layer = ManoLayer(mano_root=mano_root) + verts, joints = mano_layer(random_pcapose) + return {'verts': verts, 'joints': joints, 'faces': mano_layer.th_faces} + + +def display_hand(hand_info, mano_faces=None, ax=None, alpha=0.2, batch_idx=0, show=True): + """ + Displays hand batch_idx in batch of hand_info, hand_info as returned by + generate_random_hand + """ + if ax is None: + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + verts, joints = hand_info['verts'][batch_idx], hand_info['joints'][ + batch_idx] + if mano_faces is None: + ax.scatter(verts[:, 0], verts[:, 1], verts[:, 2], alpha=0.1) + else: + mesh = Poly3DCollection(verts[mano_faces], alpha=alpha) + face_color = (141 / 255, 184 / 255, 226 / 255) + edge_color = (50 / 255, 50 / 255, 50 / 255) + mesh.set_edgecolor(edge_color) + mesh.set_facecolor(face_color) + ax.add_collection3d(mesh) + ax.scatter(joints[:, 0], joints[:, 1], joints[:, 2], color='r') + cam_equal_aspect_3d(ax, verts.numpy()) + if show: + plt.show() + + +def cam_equal_aspect_3d(ax, verts, flip_x=False): + """ + Centers view on cuboid containing hand and flips y and z axis + and fixes azimuth + """ + extents = np.stack([verts.min(0), verts.max(0)], axis=1) + sz = extents[:, 1] - extents[:, 0] + centers = np.mean(extents, axis=1) + maxsize = max(abs(sz)) + r = maxsize / 2 + if flip_x: + ax.set_xlim(centers[0] + r, centers[0] - r) + else: + ax.set_xlim(centers[0] - r, centers[0] + r) + # Invert y and z axis + ax.set_ylim(centers[1] + r, centers[1] - r) + ax.set_zlim(centers[2] + r, centers[2] - r) diff --git a/data_processing/common/utils/manopth/manopth/manolayer.py b/data_processing/common/utils/manopth/manopth/manolayer.py new file mode 100644 index 0000000..24c6a71 --- /dev/null +++ b/data_processing/common/utils/manopth/manopth/manolayer.py @@ -0,0 +1,273 @@ +import os + +import numpy as np +import torch +from torch.nn import Module + +from mano.webuser.smpl_handpca_wrapper_HAND_only import ready_arguments +from manopth import rodrigues_layer, rotproj, rot6d +from manopth.tensutils import (th_posemap_axisang, th_with_zeros, th_pack, + subtract_flat_id, make_list) + + +class ManoLayer(Module): + __constants__ = [ + 'use_pca', 'rot', 'ncomps', 'ncomps', 'kintree_parents', 'check', + 'side', 'center_idx', 'joint_rot_mode' + ] + + def __init__(self, + center_idx=None, + flat_hand_mean=True, + ncomps=6, + side='right', + mano_root='mano/models', + use_pca=True, + root_rot_mode='axisang', + joint_rot_mode='axisang', + robust_rot=False): + """ + Args: + center_idx: index of center joint in our computations, + if -1 centers on estimate of palm as middle of base + of middle finger and wrist + flat_hand_mean: if True, (0, 0, 0, ...) pose coefficients match + flat hand, else match average hand pose + mano_root: path to MANO pkl files for left and right hand + ncomps: number of PCA components form pose space (<45) + side: 'right' or 'left' + use_pca: Use PCA decomposition for pose space. + joint_rot_mode: 'axisang' or 'rotmat', ignored if use_pca + """ + super().__init__() + + self.center_idx = center_idx + self.robust_rot = robust_rot + if root_rot_mode == 'axisang': + self.rot = 3 + else: + self.rot = 6 + self.flat_hand_mean = flat_hand_mean + self.side = side + self.use_pca = use_pca + self.joint_rot_mode = joint_rot_mode + self.root_rot_mode = root_rot_mode + if use_pca: + self.ncomps = ncomps + else: + self.ncomps = 45 + + if side == 'right': + self.mano_path = os.path.join(mano_root, 'MANO_RIGHT.pkl') + elif side == 'left': + self.mano_path = os.path.join(mano_root, 'MANO_LEFT.pkl') + + smpl_data = ready_arguments(self.mano_path) + + hands_components = smpl_data['hands_components'] + + self.smpl_data = smpl_data + + self.register_buffer('th_betas', + torch.Tensor(smpl_data['betas'].r).unsqueeze(0)) + self.register_buffer('th_shapedirs', + torch.Tensor(smpl_data['shapedirs'].r)) + self.register_buffer('th_posedirs', + torch.Tensor(smpl_data['posedirs'].r)) + self.register_buffer( + 'th_v_template', + torch.Tensor(smpl_data['v_template'].r).unsqueeze(0)) + self.register_buffer( + 'th_J_regressor', + torch.Tensor(np.array(smpl_data['J_regressor'].toarray()))) + self.register_buffer('th_weights', + torch.Tensor(smpl_data['weights'].r)) + self.register_buffer('th_faces', + torch.Tensor(smpl_data['f'].astype(np.int32)).long()) + + # Get hand mean + hands_mean = np.zeros(hands_components.shape[1] + ) if flat_hand_mean else smpl_data['hands_mean'] + hands_mean = hands_mean.copy() + th_hands_mean = torch.Tensor(hands_mean).unsqueeze(0) + if self.use_pca or self.joint_rot_mode == 'axisang': + # Save as axis-angle + self.register_buffer('th_hands_mean', th_hands_mean) + selected_components = hands_components[:ncomps] + self.register_buffer('th_selected_comps', + torch.Tensor(selected_components)) + else: + th_hands_mean_rotmat = rodrigues_layer.batch_rodrigues( + th_hands_mean.view(15, 3)).reshape(15, 3, 3) + self.register_buffer('th_hands_mean_rotmat', th_hands_mean_rotmat) + + # Kinematic chain params + self.kintree_table = smpl_data['kintree_table'] + parents = list(self.kintree_table[0].tolist()) + self.kintree_parents = parents + + def forward(self, + th_pose_coeffs, + th_betas=torch.zeros(1), + th_trans=torch.zeros(1), + root_palm=torch.Tensor([0]), + share_betas=torch.Tensor([0]), + ): + """ + Args: + th_trans (Tensor (batch_size x ncomps)): if provided, applies trans to joints and vertices + th_betas (Tensor (batch_size x 10)): if provided, uses given shape parameters for hand shape + else centers on root joint (9th joint) + root_palm: return palm as hand root instead of wrist + """ + # if len(th_pose_coeffs) == 0: + # return th_pose_coeffs.new_empty(0), th_pose_coeffs.new_empty(0) + + batch_size = th_pose_coeffs.shape[0] + # Get axis angle from PCA components and coefficients + if self.use_pca or self.joint_rot_mode == 'axisang': + # Remove global rot coeffs + th_hand_pose_coeffs = th_pose_coeffs[:, self.rot:self.rot + + self.ncomps] + if self.use_pca: + # PCA components --> axis angles + th_full_hand_pose = th_hand_pose_coeffs.mm(self.th_selected_comps) + else: + th_full_hand_pose = th_hand_pose_coeffs + + # Concatenate back global rot + th_full_pose = torch.cat([ + th_pose_coeffs[:, :self.rot], + self.th_hands_mean + th_full_hand_pose + ], 1) + if self.root_rot_mode == 'axisang': + # compute rotation matrixes from axis-angle while skipping global rotation + th_pose_map, th_rot_map = th_posemap_axisang(th_full_pose) + root_rot = th_rot_map[:, :9].view(batch_size, 3, 3) + th_rot_map = th_rot_map[:, 9:] + th_pose_map = th_pose_map[:, 9:] + else: + # th_posemap offsets by 3, so add offset or 3 to get to self.rot=6 + th_pose_map, th_rot_map = th_posemap_axisang(th_full_pose[:, 6:]) + if self.robust_rot: + root_rot = rot6d.robust_compute_rotation_matrix_from_ortho6d(th_full_pose[:, :6]) + else: + root_rot = rot6d.compute_rotation_matrix_from_ortho6d(th_full_pose[:, :6]) + else: + assert th_pose_coeffs.dim() == 4, ( + 'When not self.use_pca, ' + 'th_pose_coeffs should have 4 dims, got {}'.format( + th_pose_coeffs.dim())) + assert th_pose_coeffs.shape[2:4] == (3, 3), ( + 'When not self.use_pca, th_pose_coeffs have 3x3 matrix for two' + 'last dims, got {}'.format(th_pose_coeffs.shape[2:4])) + th_pose_rots = rotproj.batch_rotprojs(th_pose_coeffs) + th_rot_map = th_pose_rots[:, 1:].view(batch_size, -1) + th_pose_map = subtract_flat_id(th_rot_map) + root_rot = th_pose_rots[:, 0] + + # Full axis angle representation with root joint + if th_betas is None or th_betas.numel() == 1: + th_v_shaped = torch.matmul(self.th_shapedirs, + self.th_betas.transpose(1, 0)).permute( + 2, 0, 1) + self.th_v_template + th_j = torch.matmul(self.th_J_regressor, th_v_shaped).repeat( + batch_size, 1, 1) + + else: + if share_betas: + th_betas = th_betas.mean(0, keepdim=True).expand(th_betas.shape[0], 10) + th_v_shaped = torch.matmul(self.th_shapedirs, + th_betas.transpose(1, 0)).permute( + 2, 0, 1) + self.th_v_template + th_j = torch.matmul(self.th_J_regressor, th_v_shaped) + # th_pose_map should have shape 20x135 + + th_v_posed = th_v_shaped + torch.matmul( + self.th_posedirs, th_pose_map.transpose(0, 1)).permute(2, 0, 1) + # Final T pose with transformation done ! + + # Global rigid transformation + + root_j = th_j[:, 0, :].contiguous().view(batch_size, 3, 1) + root_trans = th_with_zeros(torch.cat([root_rot, root_j], 2)) + + all_rots = th_rot_map.view(th_rot_map.shape[0], 15, 3, 3) + lev1_idxs = [1, 4, 7, 10, 13] + lev2_idxs = [2, 5, 8, 11, 14] + lev3_idxs = [3, 6, 9, 12, 15] + lev1_rots = all_rots[:, [idx - 1 for idx in lev1_idxs]] + lev2_rots = all_rots[:, [idx - 1 for idx in lev2_idxs]] + lev3_rots = all_rots[:, [idx - 1 for idx in lev3_idxs]] + lev1_j = th_j[:, lev1_idxs] + lev2_j = th_j[:, lev2_idxs] + lev3_j = th_j[:, lev3_idxs] + + # From base to tips + # Get lev1 results + all_transforms = [root_trans.unsqueeze(1)] + lev1_j_rel = lev1_j - root_j.transpose(1, 2) + lev1_rel_transform_flt = th_with_zeros(torch.cat([lev1_rots, lev1_j_rel.unsqueeze(3)], 3).view(-1, 3, 4)) + root_trans_flt = root_trans.unsqueeze(1).repeat(1, 5, 1, 1).view(root_trans.shape[0] * 5, 4, 4) + lev1_flt = torch.matmul(root_trans_flt, lev1_rel_transform_flt) + all_transforms.append(lev1_flt.view(all_rots.shape[0], 5, 4, 4)) + + # Get lev2 results + lev2_j_rel = lev2_j - lev1_j + lev2_rel_transform_flt = th_with_zeros(torch.cat([lev2_rots, lev2_j_rel.unsqueeze(3)], 3).view(-1, 3, 4)) + lev2_flt = torch.matmul(lev1_flt, lev2_rel_transform_flt) + all_transforms.append(lev2_flt.view(all_rots.shape[0], 5, 4, 4)) + + # Get lev3 results + lev3_j_rel = lev3_j - lev2_j + lev3_rel_transform_flt = th_with_zeros(torch.cat([lev3_rots, lev3_j_rel.unsqueeze(3)], 3).view(-1, 3, 4)) + lev3_flt = torch.matmul(lev2_flt, lev3_rel_transform_flt) + all_transforms.append(lev3_flt.view(all_rots.shape[0], 5, 4, 4)) + + reorder_idxs = [0, 1, 6, 11, 2, 7, 12, 3, 8, 13, 4, 9, 14, 5, 10, 15] + th_results = torch.cat(all_transforms, 1)[:, reorder_idxs] + th_results_global = th_results + + joint_js = torch.cat([th_j, th_j.new_zeros(th_j.shape[0], 16, 1)], 2) + tmp2 = torch.matmul(th_results, joint_js.unsqueeze(3)) + th_results2 = (th_results - torch.cat([tmp2.new_zeros(*tmp2.shape[:2], 4, 3), tmp2], 3)).permute(0, 2, 3, 1) + + th_T = torch.matmul(th_results2, self.th_weights.transpose(0, 1)) + + th_rest_shape_h = torch.cat([ + th_v_posed.transpose(2, 1), + torch.ones((batch_size, 1, th_v_posed.shape[1]), + dtype=th_T.dtype, + device=th_T.device), + ], 1) + + th_verts = (th_T * th_rest_shape_h.unsqueeze(1)).sum(2).transpose(2, 1) + th_verts = th_verts[:, :, :3] + th_jtr = th_results_global[:, :, :3, 3] + # In addition to MANO reference joints we sample vertices on each finger + # to serve as finger tips + if self.side == 'right': + tips = th_verts[:, [745, 317, 444, 556, 673]] + else: + tips = th_verts[:, [745, 317, 445, 556, 673]] + if bool(root_palm): + palm = (th_verts[:, 95] + th_verts[:, 22]).unsqueeze(1) / 2 + th_jtr = torch.cat([palm, th_jtr[:, 1:]], 1) + th_jtr = torch.cat([th_jtr, tips], 1) + + # Reorder joints to match visualization utilities + th_jtr = th_jtr[:, [0, 13, 14, 15, 16, 1, 2, 3, 17, 4, 5, 6, 18, 10, 11, 12, 19, 7, 8, 9, 20]] + + if th_trans is None or bool(torch.norm(th_trans) == 0): + if self.center_idx is not None: + center_joint = th_jtr[:, self.center_idx].unsqueeze(1) + th_jtr = th_jtr - center_joint + th_verts = th_verts - center_joint + else: + th_jtr = th_jtr + th_trans.unsqueeze(1) + th_verts = th_verts + th_trans.unsqueeze(1) + + # Scale to milimeters + th_verts = th_verts * 1000 + th_jtr = th_jtr * 1000 + return th_verts, th_jtr diff --git a/data_processing/common/utils/manopth/manopth/rodrigues_layer.py b/data_processing/common/utils/manopth/manopth/rodrigues_layer.py new file mode 100644 index 0000000..bb5ac1e --- /dev/null +++ b/data_processing/common/utils/manopth/manopth/rodrigues_layer.py @@ -0,0 +1,89 @@ +""" +This part reuses code from https://github.com/MandyMo/pytorch_HMR/blob/master/src/util.py +which is part of a PyTorch port of SMPL. +Thanks to Zhang Xiong (MandyMo) for making this great code available on github ! +""" + +import argparse +from torch.autograd import gradcheck +import torch +from torch.autograd import Variable + +from manopth import argutils + + +def quat2mat(quat): + """Convert quaternion coefficients to rotation matrix. + Args: + quat: size = [batch_size, 4] 4 <===>(w, x, y, z) + Returns: + Rotation matrix corresponding to the quaternion -- size = [batch_size, 3, 3] + """ + norm_quat = quat + norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True) + w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, + 2], norm_quat[:, + 3] + + batch_size = quat.size(0) + + w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) + wx, wy, wz = w * x, w * y, w * z + xy, xz, yz = x * y, x * z, y * z + + rotMat = torch.stack([ + w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, + w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, + w2 - x2 - y2 + z2 + ], + dim=1).view(batch_size, 3, 3) + return rotMat + + +def batch_rodrigues(axisang): + #axisang N x 3 + axisang_norm = torch.norm(axisang + 1e-8, p=2, dim=1) + angle = torch.unsqueeze(axisang_norm, -1) + axisang_normalized = torch.div(axisang, angle) + angle = angle * 0.5 + v_cos = torch.cos(angle) + v_sin = torch.sin(angle) + quat = torch.cat([v_cos, v_sin * axisang_normalized], dim=1) + rot_mat = quat2mat(quat) + rot_mat = rot_mat.view(rot_mat.shape[0], 9) + return rot_mat + + +def th_get_axis_angle(vector): + angle = torch.norm(vector, 2, 1) + axes = vector / angle.unsqueeze(1) + return axes, angle + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--batch_size', default=1, type=int) + parser.add_argument('--cuda', action='store_true') + args = parser.parse_args() + + argutils.print_args(args) + + n_components = 6 + rot = 3 + inputs = torch.rand(args.batch_size, rot) + inputs_var = Variable(inputs.double(), requires_grad=True) + if args.cuda: + inputs = inputs.cuda() + # outputs = batch_rodrigues(inputs) + test_function = gradcheck(batch_rodrigues, (inputs_var, )) + print('batch test passed !') + + inputs = torch.rand(rot) + inputs_var = Variable(inputs.double(), requires_grad=True) + test_function = gradcheck(th_cv2_rod_sub_id.apply, (inputs_var, )) + print('th_cv2_rod test passed') + + inputs = torch.rand(rot) + inputs_var = Variable(inputs.double(), requires_grad=True) + test_th = gradcheck(th_cv2_rod.apply, (inputs_var, )) + print('th_cv2_rod_id test passed !') diff --git a/data_processing/common/utils/manopth/manopth/rot6d.py b/data_processing/common/utils/manopth/manopth/rot6d.py new file mode 100644 index 0000000..c1d60ef --- /dev/null +++ b/data_processing/common/utils/manopth/manopth/rot6d.py @@ -0,0 +1,71 @@ +import torch + + +def compute_rotation_matrix_from_ortho6d(poses): + """ + Code from + https://github.com/papagina/RotationContinuity + On the Continuity of Rotation Representations in Neural Networks + Zhou et al. CVPR19 + https://zhouyisjtu.github.io/project_rotation/rotation.html + """ + x_raw = poses[:, 0:3] # batch*3 + y_raw = poses[:, 3:6] # batch*3 + + x = normalize_vector(x_raw) # batch*3 + z = cross_product(x, y_raw) # batch*3 + z = normalize_vector(z) # batch*3 + y = cross_product(z, x) # batch*3 + + x = x.view(-1, 3, 1) + y = y.view(-1, 3, 1) + z = z.view(-1, 3, 1) + matrix = torch.cat((x, y, z), 2) # batch*3*3 + return matrix + +def robust_compute_rotation_matrix_from_ortho6d(poses): + """ + Instead of making 2nd vector orthogonal to first + create a base that takes into account the two predicted + directions equally + """ + x_raw = poses[:, 0:3] # batch*3 + y_raw = poses[:, 3:6] # batch*3 + + x = normalize_vector(x_raw) # batch*3 + y = normalize_vector(y_raw) # batch*3 + middle = normalize_vector(x + y) + orthmid = normalize_vector(x - y) + x = normalize_vector(middle + orthmid) + y = normalize_vector(middle - orthmid) + # Their scalar product should be small ! + # assert torch.einsum("ij,ij->i", [x, y]).abs().max() < 0.00001 + z = normalize_vector(cross_product(x, y)) + + x = x.view(-1, 3, 1) + y = y.view(-1, 3, 1) + z = z.view(-1, 3, 1) + matrix = torch.cat((x, y, z), 2) # batch*3*3 + # Check for reflection in matrix ! If found, flip last vector TODO + assert (torch.stack([torch.det(mat) for mat in matrix ])< 0).sum() == 0 + return matrix + + +def normalize_vector(v): + batch = v.shape[0] + v_mag = torch.sqrt(v.pow(2).sum(1)) # batch + v_mag = torch.max(v_mag, v.new([1e-8])) + v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1]) + v = v/v_mag + return v + + +def cross_product(u, v): + batch = u.shape[0] + i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1] + j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2] + k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0] + + out = torch.cat((i.view(batch, 1), j.view(batch, 1), k.view(batch, 1)), 1) + + return out diff --git a/data_processing/common/utils/manopth/manopth/rotproj.py b/data_processing/common/utils/manopth/manopth/rotproj.py new file mode 100644 index 0000000..91a601d --- /dev/null +++ b/data_processing/common/utils/manopth/manopth/rotproj.py @@ -0,0 +1,21 @@ +import torch + + +def batch_rotprojs(batches_rotmats): + proj_rotmats = [] + for batch_idx, batch_rotmats in enumerate(batches_rotmats): + proj_batch_rotmats = [] + for rot_idx, rotmat in enumerate(batch_rotmats): + # GPU implementation of svd is VERY slow + # ~ 2 10^-3 per hit vs 5 10^-5 on cpu + U, S, V = rotmat.cpu().svd() + rotmat = torch.matmul(U, V.transpose(0, 1)) + orth_det = rotmat.det() + # Remove reflection + if orth_det < 0: + rotmat[:, 2] = -1 * rotmat[:, 2] + + rotmat = rotmat.cuda() + proj_batch_rotmats.append(rotmat) + proj_rotmats.append(torch.stack(proj_batch_rotmats)) + return torch.stack(proj_rotmats) diff --git a/data_processing/common/utils/manopth/manopth/tensutils.py b/data_processing/common/utils/manopth/manopth/tensutils.py new file mode 100644 index 0000000..0c64c78 --- /dev/null +++ b/data_processing/common/utils/manopth/manopth/tensutils.py @@ -0,0 +1,47 @@ +import torch + +from manopth import rodrigues_layer + + +def th_posemap_axisang(pose_vectors): + rot_nb = int(pose_vectors.shape[1] / 3) + pose_vec_reshaped = pose_vectors.contiguous().view(-1, 3) + rot_mats = rodrigues_layer.batch_rodrigues(pose_vec_reshaped) + rot_mats = rot_mats.view(pose_vectors.shape[0], rot_nb * 9) + pose_maps = subtract_flat_id(rot_mats) + return pose_maps, rot_mats + + +def th_with_zeros(tensor): + batch_size = tensor.shape[0] + padding = tensor.new([0.0, 0.0, 0.0, 1.0]) + padding.requires_grad = False + + concat_list = [tensor, padding.view(1, 1, 4).repeat(batch_size, 1, 1)] + cat_res = torch.cat(concat_list, 1) + return cat_res + + +def th_pack(tensor): + batch_size = tensor.shape[0] + padding = tensor.new_zeros((batch_size, 4, 3)) + padding.requires_grad = False + pack_list = [padding, tensor] + pack_res = torch.cat(pack_list, 2) + return pack_res + + +def subtract_flat_id(rot_mats): + # Subtracts identity as a flattened tensor + rot_nb = int(rot_mats.shape[1] / 9) + id_flat = torch.eye( + 3, dtype=rot_mats.dtype, device=rot_mats.device).view(1, 9).repeat( + rot_mats.shape[0], rot_nb) + # id_flat.requires_grad = False + results = rot_mats - id_flat + return results + + +def make_list(tensor): + # type: (List[int]) -> List[int] + return tensor diff --git a/data_processing/common/utils/manopth/setup.py b/data_processing/common/utils/manopth/setup.py new file mode 100644 index 0000000..fb3ebdf --- /dev/null +++ b/data_processing/common/utils/manopth/setup.py @@ -0,0 +1,45 @@ +from setuptools import find_packages, setup +import warnings + +DEPENDENCY_PACKAGE_NAMES = ["matplotlib", "torch", "tqdm", "numpy", "cv2", + "chumpy"] + + +def check_dependencies(): + missing_dependencies = [] + for package_name in DEPENDENCY_PACKAGE_NAMES: + try: + __import__(package_name) + except ImportError: + missing_dependencies.append(package_name) + + if missing_dependencies: + warnings.warn( + 'Missing dependencies: {}. We recommend you follow ' + 'the installation instructions at ' + 'https://github.com/hassony2/manopth#installation'.format( + missing_dependencies)) + + +with open("README.md", "r") as fh: + long_description = fh.read() + +check_dependencies() + +setup( + name="manopth", + version="0.0.1", + author="Yana Hasson", + author_email="yana.hasson.inria@gmail.com", + packages=find_packages(exclude=('tests',)), + python_requires=">=3.5.0", + description="PyTorch mano layer", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/hassony2/manopth", + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: GNU GENERAL PUBLIC LICENSE", + "Operating System :: OS Independent", + ], +) diff --git a/data_processing/common/utils/manopth/test/test_demo.py b/data_processing/common/utils/manopth/test/test_demo.py new file mode 100644 index 0000000..c378d14 --- /dev/null +++ b/data_processing/common/utils/manopth/test/test_demo.py @@ -0,0 +1,12 @@ +import torch + +from manopth.demo import generate_random_hand + + +def test_generate_random_hand(): + batch_size = 3 + hand_info = generate_random_hand(batch_size=batch_size, ncomps=6) + verts = hand_info['verts'] + joints = hand_info['joints'] + assert verts.shape == (batch_size, 778, 3) + assert joints.shape == (batch_size, 21, 3) diff --git a/data_processing/common/utils/occluder.py b/data_processing/common/utils/occluder.py new file mode 100644 index 0000000..e6f66e2 --- /dev/null +++ b/data_processing/common/utils/occluder.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python + +import functools +import os.path +import random +import sys +import xml.etree.ElementTree +import numpy as np +import matplotlib.pyplot as plt +import skimage.data +import cv2 +import PIL.Image +import pickle + + + + +def load_pascal_occluder(pascal_voc_root_path): + occluders = [] + structuring_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8)) + + annotation_paths = list_filepaths(os.path.join(pascal_voc_root_path, 'Annotations')) + for annotation_path in annotation_paths: + xml_root = xml.etree.ElementTree.parse(annotation_path).getroot() + is_segmented = (xml_root.find('segmented').text != '0') + + if not is_segmented: + continue + + boxes = [] + for i_obj, obj in enumerate(xml_root.findall('object')): + is_person = (obj.find('name').text == 'person') + is_difficult = (obj.find('difficult').text != '0') + is_truncated = (obj.find('truncated').text != '0') + if not is_difficult and not is_truncated: + bndbox = obj.find('bndbox') + box = [int(bndbox.find(s).text) for s in ['xmin', 'ymin', 'xmax', 'ymax']] + boxes.append((i_obj, box)) + + if not boxes: + continue + + im_filename = xml_root.find('filename').text + seg_filename = im_filename.replace('jpg', 'png') + + im_path = os.path.join(pascal_voc_root_path, 'JPEGImages', im_filename) + seg_path = os.path.join(pascal_voc_root_path, 'SegmentationObject', seg_filename) + + im = np.asarray(PIL.Image.open(im_path)) + labels = np.asarray(PIL.Image.open(seg_path)) + + for i_obj, (xmin, ymin, xmax, ymax) in boxes: + object_mask = (labels[ymin:ymax, xmin:xmax] == i_obj + 1).astype(np.uint8) * 255 + object_image = im[ymin:ymax, xmin:xmax] + if cv2.countNonZero(object_mask) < 500: + # Ignore small objects + continue + + # Reduce the opacity of the mask along the border for smoother blending + eroded = cv2.erode(object_mask, structuring_element) + object_mask[eroded < object_mask] = 192 + object_with_mask = np.concatenate([object_image, object_mask[..., np.newaxis]], axis=-1) + + if object_with_mask.size == 0: + continue + + # Downscale for efficiency + object_with_mask = resize_by_factor(object_with_mask, 0.5) + occluders.append(object_with_mask) + + print("total # of occluders: ", len(occluders)) + return occluders + +def load_coco_person_occluder(data_path, data_split): + img_dir_path = os.path.join(data_path, f'{data_split}2017') + part_seg_path = os.path.join(data_path, 'densepose_output', 'DensePose_maskRCNN_output') + + dp_dict = load_dp_result(part_seg_path, data_split) + print("loaded dp result..., total imgs: ", len(dp_dict.keys())) + from densepose.data.structures import DensePoseResult + from timer import Timer + load_timer = Timer() + + occluders = [] + structuring_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8)) + for img_name in dp_dict.keys(): + img_path = os.path.join(img_dir_path, img_name) + load_timer.tic() + img = cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) + img = img[:, :, ::-1].copy() + # img = np.asarray(PIL.Image.open(img_path)) + load_timer.toc() + + dp_outputs = dp_dict[img_name] + + for output in dp_outputs: + encoded_dp = output['dp'] + iuv_arr = DensePoseResult.decode_png_data(*encoded_dp) + _, h, w = iuv_arr.shape + dp_bbox = output['bbox'] + xmin, ymin = int(dp_bbox[0] + 0.5), int(dp_bbox[1] + 0.5) + xmax, ymax = xmin+w, ymin+h + + object_mask = (iuv_arr[0] != 0).astype(np.uint8) * 255 + object_image = img[ymin:ymax, xmin:xmax] + if cv2.countNonZero(object_mask) < 5000: + # Ignore small objects or low resolution objects + continue + + # Reduce the opacity of the mask along the border for smoother blending + eroded = cv2.erode(object_mask, structuring_element) + object_mask[eroded < object_mask] = 192 + object_with_mask = np.concatenate([object_image, object_mask[..., np.newaxis]], axis=-1) + + if object_with_mask.size == 0: + continue + + # Downscale for efficiency + object_with_mask = resize_by_factor(object_with_mask, 0.5) + occluders.append(object_with_mask) + + if len(occluders) > 5000: + break + + print("img load time: ", load_timer.total_time) + print("total # of occluders: ", len(occluders)) + return occluders + +def load_dp_result(part_seg_path, data_split): + print(f'Load DensePose Result of COCO {data_split} set') + data_path = os.path.join(part_seg_path, f'coco_{data_split}.pkl') + with open(data_path, 'rb') as f: + raw_data_list = pickle.load(f) + + data_dict = {} + for rd in raw_data_list: + key = rd['file_name'].split('/')[-1] + scores = rd['scores'] + pred_data_list = [] + for idx in range(len(scores)): + if scores[idx] > 0.5: + pred_data = {} + pred_data['bbox'] = rd['pred_boxes_XYXY'][idx] + pred_data['dp'] = rd['pred_densepose'].results[idx] + pred_data_list.append(pred_data) + data_dict[key] = pred_data_list + return data_dict + +def occlude_with_objects(im, occluders): + """Returns an augmented version of `im`, containing some occluders from the Pascal VOC dataset.""" + + result = im.copy() + width_height = np.asarray([im.shape[1], im.shape[0]]) + count = np.random.randint(1, 5) + + for _ in range(count): + occluder = random.choice(occluders) + im_scale_factor = min(width_height) / max(occluder.shape[:2]) + + random_scale_factor = np.random.uniform(0.2, 0.5) + scale_factor = random_scale_factor * im_scale_factor + + try: + occluder = resize_by_factor(occluder, scale_factor) + except Exception as e: + print("error") + continue + + # center = np.random.uniform([0, 0], width_height) + center = np.random.uniform(width_height/8, width_height/8*7) + paste_over(im_src=occluder, im_dst=result, center=center) + + return result + + +def paste_over(im_src, im_dst, center): + """Pastes `im_src` onto `im_dst` at a specified position, with alpha blending, in place. + Locations outside the bounds of `im_dst` are handled as expected (only a part or none of + `im_src` becomes visible). + Args: + im_src: The RGBA image to be pasted onto `im_dst`. Its size can be arbitrary. + im_dst: The target image. + alpha: A float (0.0-1.0) array of the same size as `im_src` controlling the alpha blending + at each pixel. Large values mean more visibility for `im_src`. + center: coordinates in `im_dst` where the center of `im_src` should be placed. + """ + + width_height_src = np.asarray([im_src.shape[1], im_src.shape[0]]) + width_height_dst = np.asarray([im_dst.shape[1], im_dst.shape[0]]) + + center = np.round(center).astype(np.int32) + raw_start_dst = center - width_height_src // 2 + raw_end_dst = raw_start_dst + width_height_src + + start_dst = np.clip(raw_start_dst, 0, width_height_dst) + end_dst = np.clip(raw_end_dst, 0, width_height_dst) + region_dst = im_dst[start_dst[1]:end_dst[1], start_dst[0]:end_dst[0]] + + start_src = start_dst - raw_start_dst + end_src = width_height_src + (end_dst - raw_end_dst) + region_src = im_src[start_src[1]:end_src[1], start_src[0]:end_src[0]] + color_src = region_src[..., 0:3] + alpha = region_src[..., 3:].astype(np.float32)/255 + + im_dst[start_dst[1]:end_dst[1], start_dst[0]:end_dst[0]] = ( + alpha * color_src + (1 - alpha) * region_dst) + + return im_dst + + +def resize_by_factor(im, factor): + """Returns a copy of `im` resized by `factor`, using bilinear interp for up and area interp + for downscaling. + """ + new_size = tuple(np.round(np.array([im.shape[1], im.shape[0]]) * factor).astype(int)) + interp = cv2.INTER_LINEAR if factor > 1.0 else cv2.INTER_AREA + return cv2.resize(im, new_size, fx=factor, fy=factor, interpolation=interp) + + +def list_filepaths(dirpath): + names = os.listdir(dirpath) + paths = [os.path.join(dirpath, name) for name in names] + return sorted(filter(os.path.isfile, paths)) + + + +def main(): + """Demo of how to use the code""" + + # path = 'something/something/VOCtrainval_11-May-2012/VOCdevkit/VOC2012' + path = sys.argv[1] + + print('Loading occluders from Pascal VOC dataset...') + occluders = load_pascal_occluder(pascal_voc_root_path=path) + print('Found {} suitable objects'.format(len(occluders))) + + original_im = cv2.resize(skimage.data.astronaut(), (256, 256)) + fig, axarr = plt.subplots(3, 3, figsize=(7, 7)) + for ax in axarr.ravel(): + occluded_im = occlude_with_objects(original_im, occluders) + ax.imshow(occluded_im, interpolation="none") + ax.axis('off') + + fig.tight_layout(h_pad=0) + # plt.savefig('examples.jpg', dpi=150, bbox_inches='tight') + plt.show() + + +if __name__ == '__main__': + dp_path = '/home/redarknight/projects/detectron2/projects/DensePose/' + sys.path.insert(0, dp_path) + occluder = load_coco_person_occluder('/media/disk2/hongsuk/data/COCO/2017/', data_split='train') + # img = occlude_with_objects(dummy, occluder) diff --git a/data_processing/common/utils/posefix.py b/data_processing/common/utils/posefix.py new file mode 100644 index 0000000..e577f55 --- /dev/null +++ b/data_processing/common/utils/posefix.py @@ -0,0 +1,298 @@ +import math +import random +import numpy as np +from easydict import EasyDict as edict + +# coco joints +kps_sigmas = np.array([ + .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, + .87, .89, .89]) / 10.0 +num_kps = 17 +kps_symmetry = ((1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16)) + + +def affine_transform(pt, t): + new_pt = np.array([pt[0], pt[1], 1.]).T + new_pt = np.dot(t, new_pt) + return new_pt[:2] + + +def replace_joint_img(joint_img_coco, bbox, near_joints, num_overlap, trans): + xmin, ymin, xmax, ymax = bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3] + pt1 = affine_transform(np.array([xmin, ymin]), trans) + pt2 = affine_transform(np.array([xmax, ymin]), trans) + pt3 = affine_transform(np.array([xmax, ymax]), trans) + area = math.sqrt(pow(pt2[0] - pt1[0], 2) + pow(pt2[1] - pt1[1], 2)) * math.sqrt( + pow(pt3[0] - pt2[0], 2) + pow(pt3[1] - pt2[1], 2)) + joint_img_coco[:17, :] = synthesize_pose(joint_img_coco[:17, :], near_joints[:, :17, :], area, num_overlap) + return joint_img_coco + + +def synthesize_pose(joints, near_joints, area, num_overlap): + def get_dist_wrt_ks(ks, area): + vars = (kps_sigmas * 2) ** 2 + return np.sqrt(-2 * area * vars * np.log(ks)) + + ks_10_dist = get_dist_wrt_ks(0.10, area) + ks_50_dist = get_dist_wrt_ks(0.50, area) + ks_85_dist = get_dist_wrt_ks(0.85, area) + + synth_joints = joints.copy() + + num_valid_joint = np.sum(joints[:, 2] > 0) + + N = 500 + for j in range(num_kps): + + # source keypoint position candidates to generate error on that (gt, swap, inv, swap+inv) + coord_list = [] + # on top of gt + gt_coord = np.expand_dims(synth_joints[j, :2], 0) + coord_list.append(gt_coord) + # on top of swap gt + swap_coord = near_joints[near_joints[:, j, 2] > 0, j, :2] + coord_list.append(swap_coord) + # on top of inv gt, swap inv gt + pair_exist = False + for (q, w) in kps_symmetry: + if j == q or j == w: + if j == q: + pair_idx = w + else: + pair_idx = q + pair_exist = True + if pair_exist and (joints[pair_idx, 2] > 0): + inv_coord = np.expand_dims(synth_joints[pair_idx, :2], 0) + coord_list.append(inv_coord) + else: + coord_list.append(np.empty([0, 2])) + + if pair_exist: + swap_inv_coord = near_joints[near_joints[:, pair_idx, 2] > 0, pair_idx, :2] + coord_list.append(swap_inv_coord) + else: + coord_list.append(np.empty([0, 2])) + + tot_coord_list = np.concatenate(coord_list) + + assert len(coord_list) == 4 + + # jitter error + synth_jitter = np.zeros(3) + if num_valid_joint <= 10: + if j == 0 or (j >= 13 and j <= 16): # nose, ankle, knee + jitter_prob = 0.15 + elif (j >= 1 and j <= 10): # ear, eye, upper body + jitter_prob = 0.20 + else: # hip + jitter_prob = 0.25 + else: + if j == 0 or (j >= 13 and j <= 16): # nose, ankle, knee + jitter_prob = 0.10 + elif (j >= 1 and j <= 10): # ear, eye, upper body + jitter_prob = 0.15 + else: # hip + jitter_prob = 0.20 + angle = np.random.uniform(0, 2 * math.pi, [N]) + r = np.random.uniform(ks_85_dist[j], ks_50_dist[j], [N]) + jitter_idx = 0 # gt + x = tot_coord_list[jitter_idx][0] + r * np.cos(angle) + y = tot_coord_list[jitter_idx][1] + r * np.sin(angle) + dist_mask = True + for i in range(len(tot_coord_list)): + if i == jitter_idx: + continue + dist_mask = np.logical_and(dist_mask, + np.sqrt((tot_coord_list[i][0] - x) ** 2 + (tot_coord_list[i][1] - y) ** 2) > r) + x = x[dist_mask].reshape(-1) + y = y[dist_mask].reshape(-1) + if len(x) > 0: + rand_idx = random.randrange(0, len(x)) + synth_jitter[0] = x[rand_idx] + synth_jitter[1] = y[rand_idx] + synth_jitter[2] = 1 + + # miss error + synth_miss = np.zeros(3) + if num_valid_joint <= 5: + if j >= 0 and j <= 4: # face + miss_prob = 0.15 + elif j == 5 or j == 6 or j == 15 or j == 16: # shoulder, ankle + miss_prob = 0.20 + else: # other parts + miss_prob = 0.25 + elif num_valid_joint <= 10: + if j >= 0 and j <= 4: # face + miss_prob = 0.10 + elif j == 5 or j == 6 or j == 15 or j == 16: # shoulder, ankle + miss_prob = 0.13 + else: # other parts + miss_prob = 0.15 + else: + if j >= 0 and j <= 4: # face + miss_prob = 0.02 + elif j == 5 or j == 6 or j == 15 or j == 16: # shoulder, ankle + miss_prob = 0.05 + else: # other parts + miss_prob = 0.10 + + miss_pt_list = [] + for miss_idx in range(len(tot_coord_list)): + angle = np.random.uniform(0, 2 * math.pi, [4 * N]) + r = np.random.uniform(ks_50_dist[j], ks_10_dist[j], [4 * N]) + x = tot_coord_list[miss_idx][0] + r * np.cos(angle) + y = tot_coord_list[miss_idx][1] + r * np.sin(angle) + dist_mask = True + for i in range(len(tot_coord_list)): + if i == miss_idx: + continue + dist_mask = np.logical_and(dist_mask, + np.sqrt((tot_coord_list[i][0] - x) ** 2 + (tot_coord_list[i][1] - y) ** 2) > + ks_50_dist[j]) + x = x[dist_mask].reshape(-1) + y = y[dist_mask].reshape(-1) + if len(x) > 0: + if miss_idx == 0: + coord = np.transpose(np.vstack([x, y]), [1, 0]) + miss_pt_list.append(coord) + else: + rand_idx = np.random.choice(range(len(x)), size=len(x) // 4) + x = np.take(x, rand_idx) + y = np.take(y, rand_idx) + coord = np.transpose(np.vstack([x, y]), [1, 0]) + miss_pt_list.append(coord) + if len(miss_pt_list) > 0: + miss_pt_list = np.concatenate(miss_pt_list, axis=0).reshape(-1, 2) + rand_idx = random.randrange(0, len(miss_pt_list)) + synth_miss[0] = miss_pt_list[rand_idx][0] + synth_miss[1] = miss_pt_list[rand_idx][1] + synth_miss[2] = 1 + + # inversion prob + synth_inv = np.zeros(3) + if j <= 4: # face + inv_prob = 0.01 + elif j >= 5 and j <= 10: # upper body + inv_prob = 0.03 + else: # lower body + inv_prob = 0.06 + if pair_exist and joints[pair_idx, 2] > 0: + angle = np.random.uniform(0, 2 * math.pi, [N]) + r = np.random.uniform(0, ks_50_dist[j], [N]) + inv_idx = (len(coord_list[0]) + len(coord_list[1])) + x = tot_coord_list[inv_idx][0] + r * np.cos(angle) + y = tot_coord_list[inv_idx][1] + r * np.sin(angle) + dist_mask = True + for i in range(len(tot_coord_list)): + if i == inv_idx: + continue + dist_mask = np.logical_and(dist_mask, np.sqrt( + (tot_coord_list[i][0] - x) ** 2 + (tot_coord_list[i][1] - y) ** 2) > r) + x = x[dist_mask].reshape(-1) + y = y[dist_mask].reshape(-1) + if len(x) > 0: + rand_idx = random.randrange(0, len(x)) + synth_inv[0] = x[rand_idx] + synth_inv[1] = y[rand_idx] + synth_inv[2] = 1 + + # swap prob + synth_swap = np.zeros(3) + swap_exist = (len(coord_list[1]) > 0) or (len(coord_list[3]) > 0) + if (num_valid_joint <= 10 and num_overlap > 0) or (num_valid_joint <= 15 and num_overlap >= 3): + if j >= 0 and j <= 4: # face + swap_prob = 0.02 + elif j >= 5 and j <= 10: # upper body + swap_prob = 0.15 + else: # lower body + swap_prob = 0.10 + else: + if j >= 0 and j <= 4: # face + swap_prob = 0.01 + elif j >= 5 and j <= 10: # upper body + swap_prob = 0.06 + else: # lower body + swap_prob = 0.03 + if swap_exist: + + swap_pt_list = [] + for swap_idx in range(len(tot_coord_list)): + if swap_idx == 0 or swap_idx == len(coord_list[0]) + len(coord_list[1]): + continue + angle = np.random.uniform(0, 2 * math.pi, [N]) + r = np.random.uniform(0, ks_50_dist[j], [N]) + x = tot_coord_list[swap_idx][0] + r * np.cos(angle) + y = tot_coord_list[swap_idx][1] + r * np.sin(angle) + dist_mask = True + for i in range(len(tot_coord_list)): + if i == 0 or i == len(coord_list[0]) + len(coord_list[1]): + dist_mask = np.logical_and(dist_mask, np.sqrt( + (tot_coord_list[i][0] - x) ** 2 + (tot_coord_list[i][1] - y) ** 2) > r) + x = x[dist_mask].reshape(-1) + y = y[dist_mask].reshape(-1) + if len(x) > 0: + coord = np.transpose(np.vstack([x, y]), [1, 0]) + swap_pt_list.append(coord) + if len(swap_pt_list) > 0: + swap_pt_list = np.concatenate(swap_pt_list, axis=0).reshape(-1, 2) + rand_idx = random.randrange(0, len(swap_pt_list)) + synth_swap[0] = swap_pt_list[rand_idx][0] + synth_swap[1] = swap_pt_list[rand_idx][1] + synth_swap[2] = 1 + + # TEMP + # jitter_prob, miss_prob, inv_prob, swap_prob = jitter_prob * 0.5, miss_prob * 0.5, inv_prob * 0.5, swap_prob + + # good prob + synth_good = np.zeros(3) + good_prob = 1 - (jitter_prob + miss_prob + inv_prob + swap_prob) + assert good_prob >= 0 + angle = np.random.uniform(0, 2 * math.pi, [N // 4]) + r = np.random.uniform(0, ks_85_dist[j], [N // 4]) + good_idx = 0 # gt + x = tot_coord_list[good_idx][0] + r * np.cos(angle) + y = tot_coord_list[good_idx][1] + r * np.sin(angle) + dist_mask = True + for i in range(len(tot_coord_list)): + if i == good_idx: + continue + dist_mask = np.logical_and(dist_mask, + np.sqrt((tot_coord_list[i][0] - x) ** 2 + (tot_coord_list[i][1] - y) ** 2) > r) + x = x[dist_mask].reshape(-1) + y = y[dist_mask].reshape(-1) + if len(x) > 0: + rand_idx = random.randrange(0, len(x)) + synth_good[0] = x[rand_idx] + synth_good[1] = y[rand_idx] + synth_good[2] = 1 + + if synth_jitter[2] == 0: + jitter_prob = 0 + if synth_inv[2] == 0: + inv_prob = 0 + if synth_swap[2] == 0: + swap_prob = 0 + if synth_miss[2] == 0: + miss_prob = 0 + if synth_good[2] == 0: + good_prob = 0 + + normalizer = jitter_prob + miss_prob + inv_prob + swap_prob + good_prob + if normalizer == 0: + synth_joints[j] = 0 + continue + + jitter_prob = jitter_prob / normalizer + miss_prob = miss_prob / normalizer + inv_prob = inv_prob / normalizer + swap_prob = swap_prob / normalizer + good_prob = good_prob / normalizer + + prob_list = [jitter_prob, miss_prob, inv_prob, swap_prob, good_prob] + synth_list = [synth_jitter, synth_miss, synth_inv, synth_swap, synth_good] + sampled_idx = np.random.choice(5, 1, p=prob_list)[0] + synth_joints[j] = synth_list[sampled_idx] + + assert synth_joints[j, 2] != 0 + + return synth_joints diff --git a/data_processing/common/utils/preprocessing.py b/data_processing/common/utils/preprocessing.py new file mode 100644 index 0000000..9a99b6e --- /dev/null +++ b/data_processing/common/utils/preprocessing.py @@ -0,0 +1,306 @@ +import numpy as np +import cv2 +import random +from config import cfg +import math + + + +def load_img(path, order='RGB'): + img = cv2.imread(path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) + if not isinstance(img, np.ndarray): + raise IOError("Fail to read %s" % path) + + if order=='RGB': + img = img[:,:,::-1].copy() + + img = img.astype(np.float32) + return img + +def get_bbox(joint_img, joint_valid): + + x_img, y_img = joint_img[:,0], joint_img[:,1] + x_img = x_img[joint_valid==1]; y_img = y_img[joint_valid==1]; + xmin = min(x_img); ymin = min(y_img); xmax = max(x_img); ymax = max(y_img); + + x_center = (xmin+xmax)/2.; width = xmax-xmin; + xmin = x_center - 0.5*width*1.2 + xmax = x_center + 0.5*width*1.2 + + y_center = (ymin+ymax)/2.; height = ymax-ymin; + ymin = y_center - 0.5*height*1.2 + ymax = y_center + 0.5*height*1.2 + + bbox = np.array([xmin, ymin, xmax - xmin, ymax - ymin]).astype(np.float32) + return bbox + +def compute_iou(src_roi, dst_roi): + # IoU calculate with GTs + xmin = np.maximum(dst_roi[:, 0], src_roi[:, 0]) + ymin = np.maximum(dst_roi[:, 1], src_roi[:, 1]) + xmax = np.minimum(dst_roi[:, 0] + dst_roi[:, 2], src_roi[:, 0] + src_roi[:, 2]) + ymax = np.minimum(dst_roi[:, 1] + dst_roi[:, 3], src_roi[:, 1] + src_roi[:, 3]) + + interArea = np.maximum(0, xmax - xmin) * np.maximum(0, ymax - ymin) + + boxAArea = dst_roi[:, 2] * dst_roi[:, 3] + boxBArea = np.tile(src_roi[:, 2] * src_roi[:, 3], (len(dst_roi), 1)) + sumArea = boxAArea + boxBArea + + iou = interArea / (sumArea - interArea + 1e-5) + + return iou + +# def trunc_bbox(bbox): +# if False and random.random() >= 0.3: +# return bbox +# else: +# x, y, w, h = bbox +# x_aug_range, y_aug_range = w/2, h/2 +# x_aug, y_aug = random.random() * x_aug_range, random.random() * y_aug_range +# +# if random.random() <= 0.5: +# x, y = x+x_aug, y+y_aug +# else: # good +# w, h = w-x_aug, h-y_aug +# +# return [x,y,w,h] + +def trunc_tight_bbox(tight_bbox, img, is_full_body): + xmin, ymin, width, height = tight_bbox + xmax = xmin + width + ymax = ymin + height + + height = height * 1.2 + y_center = (ymin + ymax) / 2 + + ymin = y_center - 0.5 * height + ymax = y_center + 0.5 * height + + if is_full_body: + + crop_half_bottom = random.random()<0.8 + else: + crop_half_bottom = False + + ymin = ymin + height * 0.1 * random.random() # 0.0 ~ 0.1 + if crop_half_bottom: # for is_full_body, we only preserve its upper body (or crop the bottom body) + cropped_height = height * 0.25 + height * 0.25 * random.random() # 0.25 ~ 0.5 + ymax = ymin + cropped_height # 0.25 ~ 0.6 + # lower + else: # prob_preserve_more_than_half + cropped_height = height * 0.5 + height * 0.3 * random.random() # 0.5 ~ 0.8 + ymax = ymin + cropped_height # 0.5 ~ 0.9 + + tight_bbox = np.array([xmin, ymin, xmax - xmin, ymax - ymin]).astype(np.float32) + + # Since we crop the tight bbox to simulate upper body, we need to set the lower part to 0 + img[int(ymax):, :, :] = 0 + img[:int(ymin), :, :] = 0 + + return tight_bbox, img + +def process_bbox(bbox, img_width, img_height, is_3dpw_test=False): + # sanitize bboxes + x, y, w, h = bbox + x1 = np.max((0, x)) + y1 = np.max((0, y)) + x2 = np.min((img_width - 1, x1 + np.max((0, w - 1)))) + y2 = np.min((img_height - 1, y1 + np.max((0, h - 1)))) + if is_3dpw_test: + bbox = np.array([x1, y1, x2-x1, y2-y1], dtype=np.float32) + else: + if w*h > 0 and x2 >= x1 and y2 >= y1: + bbox = np.array([x1, y1, x2-x1, y2-y1], dtype=np.float32) + else: + return None + + # aspect ratio preserving bbox + w = bbox[2] + h = bbox[3] + c_x = bbox[0] + w/2. + c_y = bbox[1] + h/2. + aspect_ratio = cfg.input_img_shape[1]/cfg.input_img_shape[0] + if w > aspect_ratio * h: + h = w / aspect_ratio + elif w < aspect_ratio * h: + w = h * aspect_ratio + bbox[2] = w*1.25 + bbox[3] = h*1.25 + bbox[0] = c_x - bbox[2]/2. + bbox[1] = c_y - bbox[3]/2. + + return bbox +def get_aug_config(exclude_flip): + scale_factor = 0.25 + rot_factor = 30 + color_factor = 0.2 + + scale = np.clip(np.random.randn(), -1.0, 1.0) * scale_factor + 1.0 + rot = np.clip(np.random.randn(), -2.0, + 2.0) * rot_factor if random.random() <= 0.2 else 0 + c_up = 1.0 + color_factor + c_low = 1.0 - color_factor + color_scale = np.array([random.uniform(c_low, c_up), random.uniform(c_low, c_up), random.uniform(c_low, c_up)]) + if exclude_flip: + do_flip = False + else: + do_flip = random.random() <= 0.5 + + do_crop_bbox = random.random() <= 0.7 + + return scale, rot, color_scale, do_flip , do_crop_bbox + + +def augmentation(img, tight_bbox, data_split, exclude_flip=False,is_full_body=False): + if data_split == 'train': + scale, rot, color_scale, do_flip, do_crop_bbox = get_aug_config(exclude_flip, ) + else: + scale, rot, color_scale, do_flip, do_crop_bbox = 1.0, 0.0, np.array([1, 1, 1]), False, False + + orig_tight_bbox = tight_bbox.copy() + if do_crop_bbox: + tight_bbox, img = trunc_tight_bbox(tight_bbox, img, is_full_body=is_full_body) + + bbox = process_bbox(tight_bbox, img.shape[1], img.shape[0]) + + + + ''' + bbox_viz = cv2.rectangle(img.copy(), (int(orig_tight_bbox[0]), int(orig_tight_bbox[1])), (int(orig_tight_bbox[0]+orig_tight_bbox[2]), int(orig_tight_bbox[1]+orig_tight_bbox[3])), (0,255,0), 2) + bbox_viz = cv2.rectangle(bbox_viz, (int(tight_bbox[0]), int(tight_bbox[1])), (int(tight_bbox[0]+tight_bbox[2]), int(tight_bbox[1]+tight_bbox[3])), (0,0,255), 2) + bbox_viz = cv2.rectangle(bbox_viz, (int(bbox[0]), int(bbox[1])), (int(bbox[0]+bbox[2]), int(bbox[1]+bbox[3])), (255,0,0), 2) + cv2.imshow('bbox', bbox_viz/255) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + #''' + img, trans, inv_trans = generate_patch_image(img, bbox, scale, rot, do_flip, cfg.input_img_shape) + + ''' + cv2.imshow('aug', img/255) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + #''' + + img = np.clip(img * color_scale[None, None, :], 0, 255) + return img, trans, inv_trans, rot, do_flip, bbox + + +def generate_patch_image(cvimg, bbox, scale, rot, do_flip, out_shape, enable_padding=False): + img = cvimg.copy() + img_height, img_width, img_channels = img.shape + + + + bb_c_x = float(bbox[0] + 0.5*bbox[2]) + bb_c_y = float(bbox[1] + 0.5*bbox[3]) + bb_width = float(bbox[2]) + bb_height = float(bbox[3]) + + if do_flip: + img = img[:, ::-1, :] + bb_c_x = img_width - bb_c_x - 1 + + if enable_padding and (bbox[0]<0 or bbox[1]<0 or bbox[0]+bbox[2]>img_width or bbox[1]+bbox[3]>img_height): + assert do_flip == False + trans = gen_trans_from_patch_cv(bb_c_x, bb_c_y, bb_width, bb_height, out_shape[1], out_shape[0], scale, rot) + # print('trans:',trans.shape,trans) + # img_patch = cv2.warpAffine(img, trans, (int(out_shape[1]), int(out_shape[0])), flags=cv2.INTER_LINEAR) + # img_patch = img_patch.astype(np.float32) + inv_trans = gen_trans_from_patch_cv(bb_c_x, bb_c_y, bb_width, bb_height, out_shape[1], out_shape[0], scale, rot, + inv=True) + # reflection padding + # top, bottom, left, right + padding_top = max(int(-bbox[1]),0) + padding_bottom = max(int(bbox[1]+bbox[3]-img_height),0) + padding_left = max(int(-bbox[0]),0) + padding_right = max(int(bbox[0]+bbox[2]-img_width),0) + img_padding = cv2.copyMakeBorder(img, padding_top, padding_bottom, padding_left, padding_right, cv2.BORDER_REFLECT) + #print(img_padding.shape,np.pad(img.astype(np.float32), ((padding_top, padding_bottom), (padding_left, padding_right), (0, 0)), 'reflect').shape) + blur_size = int(img.shape[0]//512*5)//2*2 +1 + + img_padding = img_padding.astype(np.float32) + h, w, _ = img_padding.shape + y, x, _ = np.ogrid[:h, :w, :1] + pad = [padding_left+1e-6, padding_top+1e-6, padding_right+1e-6, padding_bottom+1e-6] + mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]), + 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3])) + + low_res = cv2.resize(img_padding, (0, 0), fx=0.1, fy=0.1, interpolation=cv2.INTER_AREA) + # blur = qsize * 0.02 * 0.1 + low_res = cv2.GaussianBlur(low_res, (blur_size, blur_size), 0) + low_res = cv2.resize(low_res, (img_padding.shape[1], img_padding.shape[0]), interpolation=cv2.INTER_LANCZOS4).astype(np.float32) + # cv2.imshow('low_res', cv2.resize(low_res, (0, 0), fx=0.5, fy=0.5).astype(np.uint8)) + # cv2.waitKey(0) + img_padding += (low_res - img_padding) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) + median = cv2.resize(img_padding.astype(np.uint8), (0, 0), fx=0.1, fy=0.1, interpolation=cv2.INTER_AREA) + median = np.median(median, axis=(0, 1)) + img_padding += (median - img_padding) * np.clip(mask, 0.0, 1.0) + img_padding = np.uint8(np.clip(np.rint(img_padding), 0, 255)) + + # cv2.imshow('img_padding', cv2.resize(img_padding, (0, 0), fx=0.5, fy=0.5).astype(np.uint8)) + # cv2.waitKey(0) + + temp_bbox = np.array([padding_left+bbox[0], padding_top+bbox[1], bbox[2], bbox[3]]) + temp_bb_c_x = float(temp_bbox[0] + 0.5 * temp_bbox[2]) + temp_bb_c_y = float(temp_bbox[1] + 0.5 * temp_bbox[3]) + temp_bb_width = float(temp_bbox[2]) + temp_bb_height = float(temp_bbox[3]) + temp_trans = gen_trans_from_patch_cv(temp_bb_c_x, temp_bb_c_y, temp_bb_width,temp_bb_height, out_shape[1], out_shape[0], scale, rot) + img_patch = cv2.warpAffine(img_padding, temp_trans, (int(out_shape[1]), int(out_shape[0])), flags=cv2.INTER_LINEAR) + + else: + trans = gen_trans_from_patch_cv(bb_c_x, bb_c_y, bb_width, bb_height, out_shape[1], out_shape[0], scale, rot) + # print('trans:',trans.shape,trans) + img_patch = cv2.warpAffine(img, trans, (int(out_shape[1]), int(out_shape[0])), flags=cv2.INTER_LINEAR) + img_patch = img_patch.astype(np.float32) + inv_trans = gen_trans_from_patch_cv(bb_c_x, bb_c_y, bb_width, bb_height, out_shape[1], out_shape[0], scale, rot, + inv=True) + + return img_patch, trans, inv_trans + +def rotate_2d(pt_2d, rot_rad): + x = pt_2d[0] + y = pt_2d[1] + sn, cs = np.sin(rot_rad), np.cos(rot_rad) + xx = x * cs - y * sn + yy = x * sn + y * cs + return np.array([xx, yy], dtype=np.float32) + +def gen_trans_from_patch_cv(c_x, c_y, src_width, src_height, dst_width, dst_height, scale, rot, inv=False): + # augment size with scale + src_w = src_width * scale + src_h = src_height * scale + src_center = np.array([c_x, c_y], dtype=np.float32) + + # augment rotation + rot_rad = np.pi * rot / 180 + src_downdir = rotate_2d(np.array([0, src_h * 0.5], dtype=np.float32), rot_rad) + src_rightdir = rotate_2d(np.array([src_w * 0.5, 0], dtype=np.float32), rot_rad) + + dst_w = dst_width + dst_h = dst_height + dst_center = np.array([dst_w * 0.5, dst_h * 0.5], dtype=np.float32) + dst_downdir = np.array([0, dst_h * 0.5], dtype=np.float32) + dst_rightdir = np.array([dst_w * 0.5, 0], dtype=np.float32) + + src = np.zeros((3, 2), dtype=np.float32) + src[0, :] = src_center + src[1, :] = src_center + src_downdir + src[2, :] = src_center + src_rightdir + + dst = np.zeros((3, 2), dtype=np.float32) + dst[0, :] = dst_center + dst[1, :] = dst_center + dst_downdir + dst[2, :] = dst_center + dst_rightdir + + if inv: + trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + + trans = trans.astype(np.float32) + return trans + diff --git a/data_processing/common/utils/renderer.py b/data_processing/common/utils/renderer.py new file mode 100644 index 0000000..13021ea --- /dev/null +++ b/data_processing/common/utils/renderer.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- + +# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is +# holder of all proprietary rights on this computer program. +# You can only use this computer program if you have closed +# a license agreement with MPG or you get the right to use the computer +# program from someone who is authorized to grant you that right. +# Any use of the computer program without a valid license is prohibited and +# liable to prosecution. +# +# Copyright©2019 Max-Planck-Gesellschaft zur Förderung +# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute +# for Intelligent Systems. All rights reserved. +# +# Contact: ps-license@tuebingen.mpg.de +import math +import trimesh +import pyrender +import numpy as np +from pyrender.constants import RenderFlags + + +class WeakPerspectiveCamera(pyrender.Camera): + def __init__(self, + scale, + translation, + znear=pyrender.camera.DEFAULT_Z_NEAR, + zfar=None, + name=None): + super(WeakPerspectiveCamera, self).__init__( + znear=znear, + zfar=zfar, + name=name, + ) + self.scale = scale + self.translation = translation + + def get_projection_matrix(self, width=None, height=None): + P = np.eye(4) + P[0, 0] = self.scale[0] + P[1, 1] = self.scale[1] + P[0, 3] = self.translation[0] * self.scale[0] + P[1, 3] = -self.translation[1] * self.scale[1] + P[2, 2] = -1 + return P + + +class Renderer: + def __init__(self, face, resolution=(224,224), orig_img=False, wireframe=False): + self.resolution = resolution + + self.faces = face + self.orig_img = orig_img + self.wireframe = wireframe + self.renderer = pyrender.OffscreenRenderer( + viewport_width=self.resolution[0], + viewport_height=self.resolution[1], + point_size=1.0 + ) + + # set the scene + self.scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], ambient_light=(0.3, 0.3, 0.3)) + + # light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=0.8) + light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=0.8) + + light_pose = np.eye(4) + light_pose[:3, 3] = [0, -1, 1] + self.scene.add(light, pose=light_pose) + + light_pose[:3, 3] = [0, 1, 1] + self.scene.add(light, pose=light_pose) + + light_pose[:3, 3] = [1, 1, 2] + self.scene.add(light, pose=light_pose) + + def render(self, img, verts, cam, angle=None, axis=None, mesh_filename=None, color=[1.0, 1.0, 0.9], rotate=False): + + mesh = trimesh.Trimesh(vertices=verts, faces=self.faces, process=False) + + Rx = trimesh.transformations.rotation_matrix(math.radians(180), [1, 0, 0]) + mesh.apply_transform(Rx) + + if rotate: + rot = trimesh.transformations.rotation_matrix( + np.radians(60), [0, 1, 0]) + mesh.apply_transform(rot) + + if mesh_filename is not None: + mesh.export(mesh_filename) + + if angle and axis: + R = trimesh.transformations.rotation_matrix(math.radians(angle), axis) + mesh.apply_transform(R) + + sx, sy, tx, ty = cam + + camera = WeakPerspectiveCamera( + scale=[sx, sy], + translation=[tx, ty], + zfar=1000. + ) + + material = pyrender.MetallicRoughnessMaterial( + metallicFactor=0.0, + alphaMode='OPAQUE', + smooth=True, + wireframe=True, + roughnessFactor=1.0, + emissiveFactor=(0.1, 0.1, 0.1), + baseColorFactor=(color[0], color[1], color[2], 1.0) + ) + # material = pyrender.MetallicRoughnessMaterial( + # metallicFactor=0.2, + # alphaMode='OPAQUE', + # baseColorFactor=(0.8, 0.3, 0.3, 1.0)) + + mesh = pyrender.Mesh.from_trimesh(mesh, material=material) + + mesh_node = self.scene.add(mesh, 'mesh') + + camera_pose = np.eye(4) + cam_node = self.scene.add(camera, pose=camera_pose) + + if self.wireframe: + render_flags = RenderFlags.RGBA | RenderFlags.ALL_WIREFRAME + else: + render_flags = RenderFlags.RGBA + + rgb, depth = self.renderer.render(self.scene, flags=render_flags) + valid_mask = (depth > 0)[:, :, np.newaxis] + output_img = rgb * valid_mask + (1 - valid_mask) * img + image = output_img.astype(np.uint8) + + self.scene.remove_node(mesh_node) + self.scene.remove_node(cam_node) + + return image diff --git a/data_processing/common/utils/smpl.py b/data_processing/common/utils/smpl.py new file mode 100644 index 0000000..be4e125 --- /dev/null +++ b/data_processing/common/utils/smpl.py @@ -0,0 +1,66 @@ +import numpy as np +import torch +import os.path as osp +import json +from config import cfg + +import sys +sys.path.insert(0, cfg.smpl_path) +from smplpytorch.pytorch.smpl_layer import SMPL_Layer +from utils.transforms import build_adj, normalize_adj, transform_joint_to_other_db + + +class SMPL(object): + def __init__(self): + self.layer = {'neutral': self.get_layer(), + #'male': self.get_layer('male'), 'female': self.get_layer('female') + } + self.vertex_num = 6890 + self.face = self.layer['neutral'].th_faces.numpy() + self.joint_regressor = self.layer['neutral'].th_J_regressor.numpy() + self.shape_param_dim = 10 + self.vposer_code_dim = 32 + + # add nose, L/R eye, L/R ear, + self.face_kps_vertex = (331, 2802, 6262, 3489, 3990) # mesh vertex idx + nose_onehot = np.array([1 if i == 331 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + left_eye_onehot = np.array([1 if i == 2802 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + right_eye_onehot = np.array([1 if i == 6262 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + left_ear_onehot = np.array([1 if i == 3489 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + right_ear_onehot = np.array([1 if i == 3990 else 0 for i in range(self.joint_regressor.shape[1])], dtype=np.float32).reshape(1,-1) + self.joint_regressor = np.concatenate((self.joint_regressor, nose_onehot, left_eye_onehot, right_eye_onehot, left_ear_onehot, right_ear_onehot)) + # add head top + self.joint_regressor_extra = np.load(osp.join(cfg.data_dir, 'J_regressor_extra.npy')) + self.joint_regressor = np.concatenate((self.joint_regressor, self.joint_regressor_extra[3:4, :])).astype(np.float32) + + self.orig_joint_num = 24 + self.joint_num = 30 # original: 24. manually add nose, L/R eye, L/R ear, head top + self.joints_name = ('Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', 'Neck', 'L_Thorax', 'R_Thorax', + 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'Head_top') + self.flip_pairs = ( (1,2), (4,5), (7,8), (10,11), (13,14), (16,17), (18,19), (20,21), (22,23) , (25,26), (27,28) ) + self.skeleton = ( (0,1), (1,4), (4,7), (7,10), (0,2), (2,5), (5,8), (8,11), (0,3), (3,6), (6,9), (9,14), (14,17), (17,19), (19, 21), (21,23), (9,13), (13,16), (16,18), (18,20), (20,22), (9,12), (12,24), (24,15), (24,25), (24,26), (25,27), (26,28), (24,29) ) + self.root_joint_idx = self.joints_name.index('Pelvis') + + # joint set for PositionNet prediction + self.graph_joint_num = 15 + self.graph_joints_name = ('Pelvis', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Neck', 'Head_top', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist') + self.graph_flip_pairs = ((1, 2), (3, 4), (5, 6), (9, 10), (11, 12), (13, 14)) + self.graph_skeleton = ((0, 1), (1, 3), (3, 5), (0, 2), (2, 4), (4, 6), (0, 7), (7, 8), (7, 9), (9, 11), (11, 13), (7, 10), (10, 12), (12, 14)) + # construct graph adj + self.graph_adj = self.get_graph_adj() + + def reduce_joint_set(self, joint): + new_joint = [] + for name in self.graph_joints_name: + idx = self.joints_name.index(name) + new_joint.append(joint[:,idx,:]) + new_joint = torch.stack(new_joint,1) + return new_joint + + def get_graph_adj(self): + adj_mat = build_adj(self.graph_joint_num, self.graph_skeleton, self.graph_flip_pairs) + normalized_adj = normalize_adj(adj_mat) + return normalized_adj + + def get_layer(self, gender='neutral'): + return SMPL_Layer(gender=gender, model_root=cfg.smpl_path + '/smplpytorch/native/models') diff --git a/data_processing/common/utils/smplpytorch/LICENSE b/data_processing/common/utils/smplpytorch/LICENSE new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/data_processing/common/utils/smplpytorch/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/data_processing/common/utils/smplpytorch/README.md b/data_processing/common/utils/smplpytorch/README.md new file mode 100644 index 0000000..1bf0d9f --- /dev/null +++ b/data_processing/common/utils/smplpytorch/README.md @@ -0,0 +1,67 @@ +SMPL layer for PyTorch +======= + +[SMPL](http://smpl.is.tue.mpg.de) human body [\[1\]](#references) layer for [PyTorch](https://pytorch.org/) (tested with v0.4 and v1.x) +is a differentiable PyTorch layer that deterministically maps from pose and shape parameters to human body joints and vertices. +It can be integrated into any architecture as a differentiable layer to predict body meshes. +The code is adapted from the [manopth](https://github.com/hassony2/manopth) repository by [Yana Hasson](https://github.com/hassony2). + +

+smpl +

+ + +## Setup + +### 1. The `smplpytorch` package +* **Run without installing:** You will need to install the dependencies listed in [environment.yml](environment.yml): + * `conda env update -f environment.yml` in an existing environment, or + * `conda env create -f environment.yml`, for a new `smplpytorch` environment +* **Install:** To import `SMPL_Layer` in another project with `from smplpytorch.pytorch.smpl_layer import SMPL_Layer` do one of the following. + * Option 1: This should automatically install the dependencies. + ``` bash + git clone https://github.com/gulvarol/smplpytorch.git + cd smplpytorch + pip install . + ``` + * Option 2: You can install `smplpytorch` from [PyPI](https://pypi.org/project/smplpytorch/). Additionally, you might need to install [chumpy](https://github.com/hassony2/chumpy.git). + ``` bash + pip install smplpytorch + ``` + +### 2. Download SMPL pickle files + * Download the models from the [SMPL website](http://smpl.is.tue.mpg.de/) by choosing "SMPL for Python users". Note that you need to comply with the [SMPL model license](http://smpl.is.tue.mpg.de/license_model). + * Extract and copy the `models` folder into the `smplpytorch/native/` folder (or set the `model_root` parameter accordingly). + +## Demo + +Forward pass the randomly created pose and shape parameters from the SMPL layer and display the human body mesh and joints: + +`python demo.py` + +## Acknowledgements +The code **largely** builds on the [manopth](https://github.com/hassony2/manopth) repository from [Yana Hasson](https://github.com/hassony2), which implements the [MANO](http://mano.is.tue.mpg.de) hand model [\[2\]](#references) layer. + +The code is a PyTorch port of the original [SMPL](http://smpl.is.tue.mpg.de) model from [chumpy](https://github.com/mattloper/chumpy). It builds on the work of [Loper](https://github.com/mattloper) et al. [\[1\]](#references). + +The code [reuses](https://github.com/gulvarol/smpl/pytorch/rodrigues_layer.py) [part of the code](https://github.com/MandyMo/pytorch_HMR/blob/master/src/util.py) by [Zhang Xiong](https://github.com/MandyMo) to compute the rotation utilities. + +If you find this code useful for your research, please cite the original [SMPL](http://smpl.is.tue.mpg.de) publication: + +``` +@article{SMPL:2015, + author = {Loper, Matthew and Mahmood, Naureen and Romero, Javier and Pons-Moll, Gerard and Black, Michael J.}, + title = {{SMPL}: A Skinned Multi-Person Linear Model}, + journal = {ACM Trans. Graphics (Proc. SIGGRAPH Asia)}, + number = {6}, + pages = {248:1--248:16}, + volume = {34}, + year = {2015} +} +``` + +## References + +\[1\] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black, "SMPL: A Skinned Multi-Person Linear Model," SIGGRAPH Asia, 2015. + +\[2\] Javier Romero, Dimitrios Tzionas, and Michael J. Black, "Embodied Hands: Modeling and Capturing Hands and Bodies Together," SIGGRAPH Asia, 2017. diff --git a/data_processing/common/utils/smplpytorch/assets/image.png b/data_processing/common/utils/smplpytorch/assets/image.png new file mode 100644 index 0000000..c73c891 Binary files /dev/null and b/data_processing/common/utils/smplpytorch/assets/image.png differ diff --git a/data_processing/common/utils/smplpytorch/demo.py b/data_processing/common/utils/smplpytorch/demo.py new file mode 100644 index 0000000..f506974 --- /dev/null +++ b/data_processing/common/utils/smplpytorch/demo.py @@ -0,0 +1,38 @@ +import torch + +from smplpytorch.pytorch.smpl_layer import SMPL_Layer +from display_utils import display_model + + +if __name__ == '__main__': + cuda = False + batch_size = 1 + + # Create the SMPL layer + smpl_layer = SMPL_Layer( + center_idx=0, + gender='neutral', + model_root='smplpytorch/native/models') + + # Generate random pose and shape parameters + pose_params = torch.rand(batch_size, 72) * 0.2 + shape_params = torch.rand(batch_size, 10) * 0.03 + + # GPU mode + if cuda: + pose_params = pose_params.cuda() + shape_params = shape_params.cuda() + smpl_layer.cuda() + + # Forward from the SMPL layer + verts, Jtr = smpl_layer(pose_params, th_betas=shape_params) + + # Draw output vertices and joints + display_model( + {'verts': verts.cpu().detach(), + 'joints': Jtr.cpu().detach()}, + model_faces=smpl_layer.th_faces, + with_joints=True, + kintree_table=smpl_layer.kintree_table, + savepath='image.png', + show=True) diff --git a/data_processing/common/utils/smplpytorch/display_utils.py b/data_processing/common/utils/smplpytorch/display_utils.py new file mode 100644 index 0000000..2a4b13a --- /dev/null +++ b/data_processing/common/utils/smplpytorch/display_utils.py @@ -0,0 +1,74 @@ +from matplotlib import pyplot as plt +from mpl_toolkits.mplot3d import Axes3D +from mpl_toolkits.mplot3d.art3d import Poly3DCollection +# plt.switch_backend('agg') + + +def display_model( + model_info, + model_faces=None, + with_joints=False, + kintree_table=None, + ax=None, + batch_idx=0, + show=True, + savepath=None): + """ + Displays mesh batch_idx in batch of model_info, model_info as returned by + generate_random_model + """ + if ax is None: + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + verts, joints = model_info['verts'][batch_idx], model_info['joints'][ + batch_idx] + if model_faces is None: + ax.scatter(verts[:, 0], verts[:, 1], verts[:, 2], alpha=0.2) + else: + mesh = Poly3DCollection(verts[model_faces], alpha=0.2) + face_color = (141 / 255, 184 / 255, 226 / 255) + edge_color = (50 / 255, 50 / 255, 50 / 255) + mesh.set_edgecolor(edge_color) + mesh.set_facecolor(face_color) + ax.add_collection3d(mesh) + if with_joints: + draw_skeleton(joints, kintree_table=kintree_table, ax=ax) + ax.set_xlabel('X') + ax.set_ylabel('Y') + ax.set_zlabel('Z') + ax.set_xlim(-0.7, 0.7) + ax.set_ylim(-0.7, 0.7) + ax.set_zlim(-0.7, 0.7) + ax.view_init(azim=-90, elev=100) + fig.subplots_adjust(left=0, right=1, bottom=0, top=1) + if savepath: + print('Saving figure at {}.'.format(savepath)) + plt.savefig(savepath, bbox_inches='tight', pad_inches=0) + if show: + plt.show() + return ax + + +def draw_skeleton(joints3D, kintree_table, ax=None, with_numbers=True): + if ax is None: + fig = plt.figure(frameon=False) + ax = fig.add_subplot(111, projection='3d') + else: + ax = ax + + colors = [] + left_right_mid = ['r', 'g', 'b'] + kintree_colors = [2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 0, 1, 0, 1, 0, 1] + for c in kintree_colors: + colors += left_right_mid[c] + # For each 24 joint + for i in range(1, kintree_table.shape[1]): + j1 = kintree_table[0][i] + j2 = kintree_table[1][i] + ax.plot([joints3D[j1, 0], joints3D[j2, 0]], + [joints3D[j1, 1], joints3D[j2, 1]], + [joints3D[j1, 2], joints3D[j2, 2]], + color=colors[i], linestyle='-', linewidth=2, marker='o', markersize=5) + if with_numbers: + ax.text(joints3D[j2, 0], joints3D[j2, 1], joints3D[j2, 2], j2) + return ax diff --git a/data_processing/common/utils/smplpytorch/environment.yml b/data_processing/common/utils/smplpytorch/environment.yml new file mode 100644 index 0000000..487c7ed --- /dev/null +++ b/data_processing/common/utils/smplpytorch/environment.yml @@ -0,0 +1,11 @@ +name: smplpytorch + +dependencies: + - opencv + - python=3.7 + - matplotlib + - numpy + - pytorch + - pip + - pip: + - git+https://github.com/hassony2/chumpy.git diff --git a/data_processing/common/utils/smplpytorch/image.png b/data_processing/common/utils/smplpytorch/image.png new file mode 100644 index 0000000..c73c891 Binary files /dev/null and b/data_processing/common/utils/smplpytorch/image.png differ diff --git a/data_processing/common/utils/smplpytorch/setup.py b/data_processing/common/utils/smplpytorch/setup.py new file mode 100644 index 0000000..e0ced55 --- /dev/null +++ b/data_processing/common/utils/smplpytorch/setup.py @@ -0,0 +1,30 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +REQUIREMENTS = [ + "opencv-python", + "matplotlib", + "numpy", + "torch", + "chumpy @ git+ssh://git@github.com/hassony2/chumpy"] + +setuptools.setup( + name="smplpytorch", + version="0.0.1", + author="Gul Varol", + author_email="gulvarols@gmail.com", + python_requires=">=3.5.0", + install_requires=REQUIREMENTS, + description="SMPL human body layer for PyTorch is a differentiable PyTorch layer", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/gulvarol/smplpytorch", + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Operating System :: OS Independent", + ], +) diff --git a/data_processing/common/utils/smplpytorch/smplpytorch/__init__.py b/data_processing/common/utils/smplpytorch/smplpytorch/__init__.py new file mode 100644 index 0000000..f3d4e53 --- /dev/null +++ b/data_processing/common/utils/smplpytorch/smplpytorch/__init__.py @@ -0,0 +1 @@ +name = "smplpytorch" diff --git a/data_processing/common/utils/smplpytorch/smplpytorch/native/__init__.py b/data_processing/common/utils/smplpytorch/smplpytorch/native/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/common/utils/smplpytorch/smplpytorch/native/models/README.md b/data_processing/common/utils/smplpytorch/smplpytorch/native/models/README.md new file mode 100644 index 0000000..9e113e9 --- /dev/null +++ b/data_processing/common/utils/smplpytorch/smplpytorch/native/models/README.md @@ -0,0 +1 @@ +Here copy the .pkl model files. diff --git a/data_processing/common/utils/smplpytorch/smplpytorch/native/webuser/__init__.py b/data_processing/common/utils/smplpytorch/smplpytorch/native/webuser/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/common/utils/smplpytorch/smplpytorch/native/webuser/posemapper.py b/data_processing/common/utils/smplpytorch/smplpytorch/native/webuser/posemapper.py new file mode 100644 index 0000000..88a2ed7 --- /dev/null +++ b/data_processing/common/utils/smplpytorch/smplpytorch/native/webuser/posemapper.py @@ -0,0 +1,31 @@ +import chumpy as ch +import numpy as np +import cv2 + + +class Rodrigues(ch.Ch): + dterms = 'rt' + + def compute_r(self): + return cv2.Rodrigues(self.rt.r)[0] + + def compute_dr_wrt(self, wrt): + if wrt is self.rt: + return cv2.Rodrigues(self.rt.r)[1].T + + +def lrotmin(p): + if isinstance(p, np.ndarray): + p = p.ravel()[3:] + return np.concatenate([(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() for pp in p.reshape((-1, 3))]).ravel() + if p.ndim != 2 or p.shape[1] != 3: + p = p.reshape((-1, 3)) + p = p[1:] + return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() for pp in p]).ravel() + + +def posemap(s): + if s == 'lrotmin': + return lrotmin + else: + raise Exception('Unknown posemapping: %s' % (str(s),)) diff --git a/data_processing/common/utils/smplpytorch/smplpytorch/native/webuser/serialization.py b/data_processing/common/utils/smplpytorch/smplpytorch/native/webuser/serialization.py new file mode 100644 index 0000000..9bf16ee --- /dev/null +++ b/data_processing/common/utils/smplpytorch/smplpytorch/native/webuser/serialization.py @@ -0,0 +1,39 @@ +def ready_arguments(fname_or_dict): + import numpy as np + import pickle + import chumpy as ch + from chumpy.ch import MatVecMult + from smplpytorch.native.webuser.posemapper import posemap + + if not isinstance(fname_or_dict, dict): + dd = pickle.load(open(fname_or_dict, 'rb'), encoding='latin1') + # dd = pickle.load(open(fname_or_dict, 'rb')) + else: + dd = fname_or_dict + + want_shapemodel = 'shapedirs' in dd + nposeparms = dd['kintree_table'].shape[1] * 3 + + if 'trans' not in dd: + dd['trans'] = np.zeros(3) + if 'pose' not in dd: + dd['pose'] = np.zeros(nposeparms) + if 'shapedirs' in dd and 'betas' not in dd: + dd['betas'] = np.zeros(dd['shapedirs'].shape[-1]) + + for s in ['v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', 'betas', 'J']: + if (s in dd) and not hasattr(dd[s], 'dterms'): + dd[s] = ch.array(dd[s]) + + if want_shapemodel: + dd['v_shaped'] = dd['shapedirs'].dot(dd['betas']) + dd['v_template'] + v_shaped = dd['v_shaped'] + J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:, 0]) + J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:, 1]) + J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:, 2]) + dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T + dd['v_posed'] = v_shaped + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose'])) + else: + dd['v_posed'] = dd['v_template'] + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose'])) + + return dd diff --git a/data_processing/common/utils/smplpytorch/smplpytorch/pytorch/__init__.py b/data_processing/common/utils/smplpytorch/smplpytorch/pytorch/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/common/utils/smplpytorch/smplpytorch/pytorch/rodrigues_layer.py b/data_processing/common/utils/smplpytorch/smplpytorch/pytorch/rodrigues_layer.py new file mode 100644 index 0000000..46ae478 --- /dev/null +++ b/data_processing/common/utils/smplpytorch/smplpytorch/pytorch/rodrigues_layer.py @@ -0,0 +1,85 @@ +""" +This part reuses code from https://github.com/MandyMo/pytorch_HMR/blob/master/src/util.py +which is part of a PyTorch port of SMPL. +Thanks to Zhang Xiong (MandyMo) for making this great code available on github ! +""" + +import argparse +from torch.autograd import gradcheck +import torch +from torch.autograd import Variable + + +def quat2mat(quat): + """Convert quaternion coefficients to rotation matrix. + Args: + quat: size = [batch_size, 4] 4 <===>(w, x, y, z) + Returns: + Rotation matrix corresponding to the quaternion -- size = [batch_size, 3, 3] + """ + norm_quat = quat + norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True) + w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, + 2], norm_quat[:, + 3] + + batch_size = quat.size(0) + + w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) + wx, wy, wz = w * x, w * y, w * z + xy, xz, yz = x * y, x * z, y * z + + rotMat = torch.stack([ + w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, + w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, + w2 - x2 - y2 + z2 + ], + dim=1).view(batch_size, 3, 3) + return rotMat + + +def batch_rodrigues(axisang): + #axisang N x 3 + axisang_norm = torch.norm(axisang + 1e-8, p=2, dim=1) + angle = torch.unsqueeze(axisang_norm, -1) + axisang_normalized = torch.div(axisang, angle) + angle = angle * 0.5 + v_cos = torch.cos(angle) + v_sin = torch.sin(angle) + quat = torch.cat([v_cos, v_sin * axisang_normalized], dim=1) + rot_mat = quat2mat(quat) + rot_mat = rot_mat.view(rot_mat.shape[0], 9) + return rot_mat + + +def th_get_axis_angle(vector): + angle = torch.norm(vector, 2, 1) + axes = vector / angle.unsqueeze(1) + return axes, angle + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--batch_size', default=1, type=int) + parser.add_argument('--cuda', action='store_true') + args = parser.parse_args() + + n_components = 6 + rot = 3 + inputs = torch.rand(args.batch_size, rot) + inputs_var = Variable(inputs.double(), requires_grad=True) + if args.cuda: + inputs = inputs.cuda() + # outputs = batch_rodrigues(inputs) + test_function = gradcheck(batch_rodrigues, (inputs_var, )) + print('batch test passed !') + + inputs = torch.rand(rot) + inputs_var = Variable(inputs.double(), requires_grad=True) + test_function = gradcheck(th_cv2_rod_sub_id.apply, (inputs_var, )) + print('th_cv2_rod test passed') + + inputs = torch.rand(rot) + inputs_var = Variable(inputs.double(), requires_grad=True) + test_th = gradcheck(th_cv2_rod.apply, (inputs_var, )) + print('th_cv2_rod_id test passed !') diff --git a/data_processing/common/utils/smplpytorch/smplpytorch/pytorch/smpl_layer.py b/data_processing/common/utils/smplpytorch/smplpytorch/pytorch/smpl_layer.py new file mode 100644 index 0000000..5e74741 --- /dev/null +++ b/data_processing/common/utils/smplpytorch/smplpytorch/pytorch/smpl_layer.py @@ -0,0 +1,156 @@ +import os + +import numpy as np +import torch +from torch.nn import Module + +from smplpytorch.native.webuser.serialization import ready_arguments +from smplpytorch.pytorch import rodrigues_layer +from smplpytorch.pytorch.tensutils import (th_posemap_axisang, th_with_zeros, th_pack, make_list, subtract_flat_id) + + +class SMPL_Layer(Module): + __constants__ = ['kintree_parents', 'gender', 'center_idx', 'num_joints'] + + def __init__(self, + center_idx=None, + gender='neutral', + model_root='smpl/native/models'): + """ + Args: + center_idx: index of center joint in our computations, + model_root: path to pkl files for the model + gender: 'neutral' (default) or 'female' or 'male' + """ + super().__init__() + + self.center_idx = center_idx + self.gender = gender + + if gender == 'neutral': + self.model_path = os.path.join(model_root, 'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl') + elif gender == 'female': + self.model_path = os.path.join(model_root, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl') + elif gender == 'male': + self.model_path = os.path.join(model_root, 'basicModel_m_lbs_10_207_0_v1.0.0.pkl') + + smpl_data = ready_arguments(self.model_path) + self.smpl_data = smpl_data + + self.register_buffer('th_betas', + torch.Tensor(smpl_data['betas'].r).unsqueeze(0)) + self.register_buffer('th_shapedirs', + torch.Tensor(smpl_data['shapedirs'].r)) + self.register_buffer('th_posedirs', + torch.Tensor(smpl_data['posedirs'].r)) + self.register_buffer( + 'th_v_template', + torch.Tensor(smpl_data['v_template'].r).unsqueeze(0)) + self.register_buffer( + 'th_J_regressor', + torch.Tensor(np.array(smpl_data['J_regressor'].toarray()))) + self.register_buffer('th_weights', + torch.Tensor(smpl_data['weights'].r)) + self.register_buffer('th_faces', + torch.Tensor(smpl_data['f'].astype(np.int32)).long()) + + # Kinematic chain params + self.kintree_table = smpl_data['kintree_table'] + parents = list(self.kintree_table[0].tolist()) + self.kintree_parents = parents + self.num_joints = len(parents) # 24 + + def forward(self, + th_pose_axisang, + th_betas=torch.zeros(1), + th_trans=torch.zeros(1)): + """ + Args: + th_pose_axisang (Tensor (batch_size x 72)): pose parameters in axis-angle representation + th_betas (Tensor (batch_size x 10)): if provided, uses given shape parameters + th_trans (Tensor (batch_size x 3)): if provided, applies trans to joints and vertices + """ + + batch_size = th_pose_axisang.shape[0] + # Convert axis-angle representation to rotation matrix rep. + th_pose_rotmat = th_posemap_axisang(th_pose_axisang) + # Take out the first rotmat (global rotation) + root_rot = th_pose_rotmat[:, :9].view(batch_size, 3, 3) + # Take out the remaining rotmats (23 joints) + th_pose_rotmat = th_pose_rotmat[:, 9:] + th_pose_map = subtract_flat_id(th_pose_rotmat) + + # Below does: v_shaped = v_template + shapedirs * betas + # If shape parameters are not provided + if th_betas is None or bool(torch.norm(th_betas) == 0): + th_v_shaped = self.th_v_template + torch.matmul( + self.th_shapedirs, self.th_betas.transpose(1, 0)).permute(2, 0, 1) + th_j = torch.matmul(self.th_J_regressor, th_v_shaped).repeat( + batch_size, 1, 1) + else: + th_v_shaped = self.th_v_template + torch.matmul( + self.th_shapedirs, th_betas.transpose(1, 0)).permute(2, 0, 1) + th_j = torch.matmul(self.th_J_regressor, th_v_shaped) + + # Below does: v_posed = v_shaped + posedirs * pose_map + th_v_posed = th_v_shaped + torch.matmul( + self.th_posedirs, th_pose_map.transpose(0, 1)).permute(2, 0, 1) + # Final T pose with transformation done! + + # Global rigid transformation + th_results = [] + + root_j = th_j[:, 0, :].contiguous().view(batch_size, 3, 1) + th_results.append(th_with_zeros(torch.cat([root_rot, root_j], 2))) + + # Rotate each part + for i in range(self.num_joints - 1): + i_val = int(i + 1) + joint_rot = th_pose_rotmat[:, (i_val - 1) * 9:i_val * + 9].contiguous().view(batch_size, 3, 3) + joint_j = th_j[:, i_val, :].contiguous().view(batch_size, 3, 1) + parent = make_list(self.kintree_parents)[i_val] + parent_j = th_j[:, parent, :].contiguous().view(batch_size, 3, 1) + joint_rel_transform = th_with_zeros( + torch.cat([joint_rot, joint_j - parent_j], 2)) + th_results.append( + torch.matmul(th_results[parent], joint_rel_transform)) + th_results_global = th_results + + th_results2 = torch.zeros((batch_size, 4, 4, self.num_joints), + dtype=root_j.dtype, + device=root_j.device) + + for i in range(self.num_joints): + padd_zero = torch.zeros(1, dtype=th_j.dtype, device=th_j.device) + joint_j = torch.cat( + [th_j[:, i], + padd_zero.view(1, 1).repeat(batch_size, 1)], 1) + tmp = torch.bmm(th_results[i], joint_j.unsqueeze(2)) + th_results2[:, :, :, i] = th_results[i] - th_pack(tmp) + + th_T = torch.matmul(th_results2, self.th_weights.transpose(0, 1)) + + th_rest_shape_h = torch.cat([ + th_v_posed.transpose(2, 1), + torch.ones((batch_size, 1, th_v_posed.shape[1]), + dtype=th_T.dtype, + device=th_T.device), + ], 1) + + th_verts = (th_T * th_rest_shape_h.unsqueeze(1)).sum(2).transpose(2, 1) + th_verts = th_verts[:, :, :3] + th_jtr = torch.stack(th_results_global, dim=1)[:, :, :3, 3] + + # If translation is not provided + if th_trans is None or bool(torch.norm(th_trans) == 0): + if self.center_idx is not None: + center_joint = th_jtr[:, self.center_idx].unsqueeze(1) + th_jtr = th_jtr - center_joint + th_verts = th_verts - center_joint + else: + th_jtr = th_jtr + th_trans.unsqueeze(1) + th_verts = th_verts + th_trans.unsqueeze(1) + + # Vertices and joints in meters + return th_verts, th_jtr diff --git a/data_processing/common/utils/smplpytorch/smplpytorch/pytorch/tensutils.py b/data_processing/common/utils/smplpytorch/smplpytorch/pytorch/tensutils.py new file mode 100644 index 0000000..092e60d --- /dev/null +++ b/data_processing/common/utils/smplpytorch/smplpytorch/pytorch/tensutils.py @@ -0,0 +1,53 @@ +import torch + +from smplpytorch.pytorch import rodrigues_layer + + +def th_posemap_axisang(pose_vectors): + ''' + Converts axis-angle to rotmat + pose_vectors (Tensor (batch_size x 72)): pose parameters in axis-angle representation + ''' + rot_nb = int(pose_vectors.shape[1] / 3) + rot_mats = [] + for joint_idx in range(rot_nb): + axis_ang = pose_vectors[:, joint_idx * 3:(joint_idx + 1) * 3] + rot_mat = rodrigues_layer.batch_rodrigues(axis_ang) + rot_mats.append(rot_mat) + + rot_mats = torch.cat(rot_mats, 1) + return rot_mats + + +def th_with_zeros(tensor): + batch_size = tensor.shape[0] + padding = tensor.new([0.0, 0.0, 0.0, 1.0]) + padding.requires_grad = False + + concat_list = [tensor, padding.view(1, 1, 4).repeat(batch_size, 1, 1)] + cat_res = torch.cat(concat_list, 1) + return cat_res + + +def th_pack(tensor): + batch_size = tensor.shape[0] + padding = tensor.new_zeros((batch_size, 4, 3)) + padding.requires_grad = False + pack_list = [padding, tensor] + pack_res = torch.cat(pack_list, 2) + return pack_res + + +def subtract_flat_id(rot_mats): + # Subtracts identity as a flattened tensor + id_flat = torch.eye( + 3, dtype=rot_mats.dtype, device=rot_mats.device).view(1, 9).repeat( + rot_mats.shape[0], 23) # [rot_mats.shape[0], 23x9 = 207] + # id_flat.requires_grad = False + results = rot_mats - id_flat + return results + + +def make_list(tensor): + # type: (List[int]) -> List[int] + return tensor diff --git a/data_processing/common/utils/transforms.py b/data_processing/common/utils/transforms.py new file mode 100644 index 0000000..0d69f60 --- /dev/null +++ b/data_processing/common/utils/transforms.py @@ -0,0 +1,126 @@ +import torch +import numpy as np +from config import cfg +import torchgeometry as tgm +from torch.nn import functional as F + + +def denorm_joints(pose_out_img, body_bb2img_trans): + pose_out_img[:, 0] = pose_out_img[:, 0] / cfg.output_hm_shape[2] * cfg.input_img_shape[1] + pose_out_img[:, 1] = pose_out_img[:, 1] / cfg.output_hm_shape[1] * cfg.input_img_shape[0] + pose_out_img_xy1 = np.concatenate((pose_out_img[:, :2], np.ones_like(pose_out_img[:, :1])), 1) + pose_out_img[:, :2] = np.dot(body_bb2img_trans, pose_out_img_xy1.transpose(1, 0)).transpose(1, 0)[:, :2] + + return pose_out_img + +def cam2pixel(cam_coord, f, c): + x = cam_coord[:,0] / cam_coord[:,2] * f[0] + c[0] + y = cam_coord[:,1] / cam_coord[:,2] * f[1] + c[1] + z = cam_coord[:,2] + return np.stack((x,y,z),1) + +def pixel2cam(pixel_coord, f, c): + x = (pixel_coord[:,0] - c[0]) / f[0] * pixel_coord[:,2] + y = (pixel_coord[:,1] - c[1]) / f[1] * pixel_coord[:,2] + z = pixel_coord[:,2] + return np.stack((x,y,z),1) + +def world2cam(world_coord, R, t): + cam_coord = np.dot(R, world_coord.transpose(1,0)).transpose(1,0) + t.reshape(1,3) + return cam_coord + +def cam2world(cam_coord, R, t): + world_coord = np.dot(np.linalg.inv(R), (cam_coord - t.reshape(1,3)).transpose(1,0)).transpose(1,0) + return world_coord + +def rigid_transform_3D(A, B): + n, dim = A.shape + centroid_A = np.mean(A, axis = 0) + centroid_B = np.mean(B, axis = 0) + H = np.dot(np.transpose(A - centroid_A), B - centroid_B) / n + U, s, V = np.linalg.svd(H) + R = np.dot(np.transpose(V), np.transpose(U)) + if np.linalg.det(R) < 0: + s[-1] = -s[-1] + V[2] = -V[2] + R = np.dot(np.transpose(V), np.transpose(U)) + + varP = np.var(A, axis=0).sum() + c = 1/varP * np.sum(s) + + t = -np.dot(c*R, np.transpose(centroid_A)) + np.transpose(centroid_B) + return c, R, t + +def rigid_align(A, B): + c, R, t = rigid_transform_3D(A, B) + A2 = np.transpose(np.dot(c*R, np.transpose(A))) + t + return A2 + +def transform_joint_to_other_db(src_joint, src_name, dst_name): + src_joint_num = len(src_name) + dst_joint_num = len(dst_name) + + new_joint = np.zeros(((dst_joint_num,) + src_joint.shape[1:]), dtype=np.float32) + for src_idx in range(len(src_name)): + name = src_name[src_idx] + if name in dst_name: + dst_idx = dst_name.index(name) + new_joint[dst_idx] = src_joint[src_idx] + + return new_joint + +def build_adj(vertex_num, skeleton, flip_pairs): + adj_matrix = np.zeros((vertex_num, vertex_num)) + for line in skeleton: + adj_matrix[line] = 1 + adj_matrix[line[1], line[0]] = 1 + for pair in flip_pairs: + adj_matrix[pair] = 1 + adj_matrix[pair[1], pair[0]] = 1 + return adj_matrix + +def normalize_adj(adj): + vertex_num = adj.shape[0] + adj_self = adj + np.eye(vertex_num) + D = np.diag(adj_self.sum(0)) + np.spacing(np.array(0)) + _D = 1 / np.sqrt(D) + _D = _D * np.eye(vertex_num) # make diagonal matrix + normalized_adj = np.dot(np.dot(_D, adj_self), _D) + return normalized_adj + +def rot6d_to_axis_angle(x): + batch_size = x.shape[0] + + x = x.view(-1, 3, 2) + a1 = x[:, :, 0] + a2 = x[:, :, 1] + b1 = F.normalize(a1) + b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1) + b3 = torch.cross(b1, b2) + rot_mat = torch.stack((b1, b2, b3), dim=-1) # 3x3 rotation matrix + + rot_mat = torch.cat([rot_mat, torch.zeros((batch_size, 3, 1)).cuda().float()], 2) # 3x4 rotation matrix + axis_angle = tgm.rotation_matrix_to_angle_axis(rot_mat).reshape(-1, 3) # axis-angle + axis_angle[torch.isnan(axis_angle)] = 0.0 + return axis_angle + + +def convert_crop_cam_to_orig_img(cam, bbox, img_width, img_height): + ''' + Convert predicted camera from cropped image coordinates + to original image coordinates + :param cam (ndarray, shape=(3,)): weak perspective camera in cropped img coordinates + :param bbox (ndarray, shape=(4,)): bbox coordinates (c_x, c_y, h) + :param img_width (int): original image width + :param img_height (int): original image height + :return: + ''' + cx, cy, h = bbox[:,0], bbox[:,1], bbox[:,2] + hw, hh = img_width / 2., img_height / 2. + sx = cam[:,0] * (1. / (img_width / h)) + sy = cam[:,0] * (1. / (img_height / h)) + tx = ((cx - hw) / hw / sx) + cam[:,1] + ty = ((cy - hh) / hh / sy) + cam[:,2] + orig_cam = np.stack([sx, sy, tx, ty]).T + return orig_cam + diff --git a/data_processing/common/utils/vis.py b/data_processing/common/utils/vis.py new file mode 100644 index 0000000..d0913b7 --- /dev/null +++ b/data_processing/common/utils/vis.py @@ -0,0 +1,268 @@ +import os +import cv2 +import numpy as np +from mpl_toolkits.mplot3d import Axes3D +import matplotlib.pyplot as plt +import matplotlib as mpl +import trimesh +#os.environ['PYOPENGL_PLATFORM'] = 'egl' # comment it if use windows +import pyrender + + +def vis_bbox(img, bbox, alpha=1): + + kp_mask = np.copy(img) + bbox = bbox.astype(np.int32) # x, y, w, h + + b1 = bbox[0], bbox[1] + b2 = bbox[0] + bbox[2], bbox[1] + b3 = bbox[0] + bbox[2], bbox[1] + bbox[3] + b4 = bbox[0], bbox[1] + bbox[3] + + cv2.line(kp_mask, b1, b2, color=(255, 255, 0), thickness=1, lineType=cv2.LINE_AA) + cv2.line(kp_mask, b2, b3, color=(255, 255, 0), thickness=1, lineType=cv2.LINE_AA) + cv2.line(kp_mask, b3, b4, color=(255, 255, 0), thickness=1, lineType=cv2.LINE_AA) + cv2.line(kp_mask, b4, b1, color=(255, 255, 0), thickness=1, lineType=cv2.LINE_AA) + + return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) + + +def vis_coco_skeleton(img, kps, kps_lines, alpha=1): + colors = [ + # face + (255/255, 153/255, 51/255), + (255/255, 153/255, 51/255), + (255/255, 153/255, 51/255), + (255/255, 153/255, 51/255), + + # left arm + (102/255, 255/255, 102/255), + (51/255, 255/255, 51/255), + + # right leg + (255 / 255, 102 / 255, 255 / 255), + (255 / 255, 51 / 255, 255 / 255), + + + # left leg + + (255 / 255, 102 / 255, 102 / 255), + (255 / 255, 51 / 255, 51 / 255), + + # shoulder-thorax, hip-pevlis, + (153/255, 255/255, 153/255), # l shoulder - thorax + (153/255, 204/255, 255/255), # r shoulder - thorax + (255/255, 153/255, 153/255), # l hip - pelvis + (255/255, 153/255, 255/255), # r hip -pelvis + + # center body line + (255/255, 204/255, 153/255), + (255/255, 178/255, 102/255), + + # right arm + (102 / 255, 178 / 255, 255 / 255), + (51 / 255, 153 / 255, 255 / 255), + ] + + colors = [[c[2]*255,c[1]*255,c[0]*255] for c in colors] + + # Perform the drawing on a copy of the image, to allow for blending. + kp_mask = np.copy(img) + + line_thick = 5 #13 + circle_rad = 5 #10 + circle_thick = 5 #7 + + # Draw the keypoints. + for l in range(len(kps_lines)): + i1 = kps_lines[l][0] + i2 = kps_lines[l][1] + p1 = kps[0, i1].astype(np.int32), kps[1, i1].astype(np.int32) + p2 = kps[0, i2].astype(np.int32), kps[1, i2].astype(np.int32) + cv2.line( + kp_mask, p1, p2, + color=colors[l], thickness=line_thick, lineType=cv2.LINE_AA) + cv2.circle( + kp_mask, p1, + radius=circle_rad, color=colors[l], thickness=circle_thick, lineType=cv2.LINE_AA) + cv2.circle( + kp_mask, p2, + radius=circle_rad, color=colors[l], thickness=circle_thick, lineType=cv2.LINE_AA) + + # Blend the keypoints. + return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) + + +def vis_keypoints_with_skeleton(img, kps, kps_lines, kp_thresh=0.4, alpha=1, kps_scores=None): + # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv. + cmap = plt.get_cmap('rainbow') + colors = [cmap(i) for i in np.linspace(0, 1, len(kps_lines) + 2)] + colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors] + + # Perform the drawing on a copy of the image, to allow for blending. + kp_mask = np.copy(img) + + # Draw the keypoints. + for l in range(len(kps_lines)): + i1 = kps_lines[l][0] + i2 = kps_lines[l][1] + p1 = kps[0, i1].astype(np.int32), kps[1, i1].astype(np.int32) + p2 = kps[0, i2].astype(np.int32), kps[1, i2].astype(np.int32) + if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh: + cv2.line( + kp_mask, p1, p2, + color=colors[l], thickness=2, lineType=cv2.LINE_AA) + if kps[2, i1] > kp_thresh: + cv2.circle( + kp_mask, p1, + radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA) + cv2.putText(kp_mask, str([l][0]), p1, cv2.FONT_HERSHEY_SIMPLEX, 0.5, colors[l]) + if kps[2, i2] > kp_thresh: + cv2.circle( + kp_mask, p2, + radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA) + cv2.putText(kp_mask, str([l][1]), p2, cv2.FONT_HERSHEY_SIMPLEX, 0.5, colors[l]) + if kps_scores is not None: + cv2.putText(kp_mask, str(kps_scores[i2, 0]), p2, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) + + # Blend the keypoints. + return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) + +def vis_keypoints(img, kps, alpha=1, kps_vis=None): + # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv. + cmap = plt.get_cmap('rainbow') + colors = [cmap(i) for i in np.linspace(0, 1, len(kps) + 2)] + colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors] + + # Perform the drawing on a copy of the image, to allow for blending. + kp_mask = np.copy(img) + + # Draw the keypoints. + for i in range(len(kps)): + p = kps[i][0].astype(np.int32), kps[i][1].astype(np.int32) + cv2.circle(kp_mask, p, radius=3, color=colors[i], thickness=-1, lineType=cv2.LINE_AA) + if kps_vis is not None: + cv2.putText(kp_mask, str(kps_vis[i, 0]), p, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) + else: + cv2.putText(kp_mask, str(i), p, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) + + # Blend the keypoints. + return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) + +def vis_mesh(img, mesh_vertex, alpha=0.5): + # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv. + cmap = plt.get_cmap('rainbow') + colors = [cmap(i) for i in np.linspace(0, 1, len(mesh_vertex))] + colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors] + + # Perform the drawing on a copy of the image, to allow for blending. + mask = np.copy(img) + + # Draw the mesh + for i in range(len(mesh_vertex)): + p = mesh_vertex[i][0].astype(np.int32), mesh_vertex[i][1].astype(np.int32) + cv2.circle(mask, p, radius=1, color=colors[i], thickness=-1, lineType=cv2.LINE_AA) + + # Blend the keypoints. + return cv2.addWeighted(img, 1.0 - alpha, mask, alpha, 0) + +def vis_3d_skeleton(kpt_3d, kpt_3d_vis, kps_lines, filename=None): + + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + + # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv. + cmap = plt.get_cmap('rainbow') + colors = [cmap(i) for i in np.linspace(0, 1, len(kps_lines) + 2)] + colors = [np.array((c[2], c[1], c[0])) for c in colors] + + for l in range(len(kps_lines)): + i1 = kps_lines[l][0] + i2 = kps_lines[l][1] + x = np.array([kpt_3d[i1,0], kpt_3d[i2,0]]) + y = np.array([kpt_3d[i1,1], kpt_3d[i2,1]]) + z = np.array([kpt_3d[i1,2], kpt_3d[i2,2]]) + + if kpt_3d_vis[i1,0] > 0 and kpt_3d_vis[i2,0] > 0: + ax.plot(x, z, -y, c=colors[l], linewidth=2) + if kpt_3d_vis[i1,0] > 0: + ax.scatter(kpt_3d[i1,0], kpt_3d[i1,2], -kpt_3d[i1,1], c=colors[l], marker='o') + if kpt_3d_vis[i2,0] > 0: + ax.scatter(kpt_3d[i2,0], kpt_3d[i2,2], -kpt_3d[i2,1], c=colors[l], marker='o') + + if filename is None: + ax.set_title('3D vis') + else: + ax.set_title(filename) + + ax.set_xlabel('X Label') + ax.set_ylabel('Z Label') + ax.set_zlabel('Y Label') + ax.legend() + + plt.show() + cv2.waitKey(0) + +def save_obj(v, f, file_name='output.obj'): + obj_file = open(file_name, 'w') + for i in range(len(v)): + obj_file.write('v ' + str(v[i][0]) + ' ' + str(v[i][1]) + ' ' + str(v[i][2]) + '\n') + for i in range(len(f)): + obj_file.write('f ' + str(f[i][0]+1) + '/' + str(f[i][0]+1) + ' ' + str(f[i][1]+1) + '/' + str(f[i][1]+1) + ' ' + str(f[i][2]+1) + '/' + str(f[i][2]+1) + '\n') + obj_file.close() + +def render_mesh(img, mesh, face, cam_param, color=(1.0, 1.0, 0.9, 1.0),cam_pose = None): + # mesh + mesh = trimesh.Trimesh(mesh, face) + rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0]) + mesh.apply_transform(rot) + material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.0, alphaMode='OPAQUE', baseColorFactor=color) + mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=False) + scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], ambient_light=(0.3, 0.3, 0.3)) + scene.add(mesh, 'mesh') + + focal, princpt = cam_param['focal'], cam_param['princpt'] + camera = pyrender.IntrinsicsCamera(fx=focal[0], fy=focal[1], cx=princpt[0], cy=princpt[1]) + + + if cam_pose is not None: + scene.add(camera, pose=cam_pose) + else: + scene.add(camera) + # scene.add(camera) + # print('camera pose in scene ', scene.get_pose(scene._main_camera_node)) + # renderer + renderer = pyrender.OffscreenRenderer(viewport_width=img.shape[1], viewport_height=img.shape[0], point_size=1.0) + + # light + light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=0.8) + # light_pose = np.eye(4) + # light_pose[:3, 3] = np.array([0, -1, 1]) + # scene.add(light, pose=light_pose) + # light_pose[:3, 3] = np.array([0, 1, 1]) + # scene.add(light, pose=light_pose) + # light_pose[:3, 3] = np.array([1, 1, 2]) + # scene.add(light, pose=light_pose) + + light_pose = np.eye(4) + light_pose[:3, 3] = np.array([0, 0, -1]) + scene.add(light, pose=light_pose) + + scene.add(light, pose=cam_pose) + scene.add(light, pose=cam_pose) + scene.add(light, pose=cam_pose) + light_pose[:3, 3] = np.array([1, 1, -4]) + scene.add(light, pose=light_pose) + light_pose[:3, 3] = np.array([-1, 0, -1]) + scene.add(light, pose=light_pose) + light_pose[:3, 3] = np.array([0.2469, 1.8828, -2.4473]) + scene.add(light, pose=light_pose) + + # render + rgb, depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA) + rgb = rgb[:, :, :3].astype(np.float32) + valid_mask = (depth > 0)[:, :, None] + + # save to image + img = rgb * valid_mask + img * (1 - valid_mask) + return img.astype(np.uint8) \ No newline at end of file diff --git a/data_processing/data/CrowdPose/CrowdPose.py b/data_processing/data/CrowdPose/CrowdPose.py new file mode 100644 index 0000000..c169603 --- /dev/null +++ b/data_processing/data/CrowdPose/CrowdPose.py @@ -0,0 +1,335 @@ +import os +import os.path as osp +import numpy as np +from config import cfg +import copy +import json +import scipy.io as sio +import cv2 +import random +import math +import torch +import transforms3d +from pycocotools.coco import COCO + +from utils.posefix import replace_joint_img +from utils.smpl import SMPL +from utils.preprocessing import load_img, process_bbox, augmentation, compute_iou, get_bbox +from utils.vis import vis_keypoints, vis_mesh, save_obj, vis_keypoints_with_skeleton +from utils.transforms import world2cam, cam2pixel, pixel2cam, transform_joint_to_other_db + + +class CrowdPose(torch.utils.data.Dataset): + def __init__(self, transform, data_split): + self.transform = transform + self.data_split = data_split + self.img_path = osp.join(cfg.data_dir, 'CrowdPose', 'images') + self.annot_path = osp.join(cfg.data_dir, 'CrowdPose', 'annotations') + self.target_data_split = 'val' + self.fitting_thr = 5.0 # pixel in cfg.output_hm_shape space + + # mscoco skeleton + self.coco_joint_num = 18 # original: 17, manually added pelvis + self.coco_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis') + self.coco_skeleton = ((1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 6), (11, 12)) + self.coco_flip_pairs = ((1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16)) + self.coco_joint_regressor = np.load(osp.join(cfg.data_dir, 'MSCOCO', 'J_regressor_coco_hip_smpl.npy')) + + # crowdpose skeleton + self.crowdpose_jonit_num = 14+1 # manually added pelvis + self.crowdpose_joints_name = ('L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Head_top', 'Neck', 'Pelvis') + self.crowdpose_skeleton = ((0,2), (0,13), (1,3), (1,13), (2,4), (3,5), (6,14), (7,14), (6,8), (7,9), (8,10), (9,11), (12,13), (13,14) ) + self.crowdpose_flip_pairs = ((0, 1), (1, 2), (3, 4), (5, 6), (6, 7), (8, 9), (10, 11)) + self.crowdpose_coco_common_jidx = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14) # for posefix, exclude pelvis + + # smpl skeleton + self.smpl = SMPL() + self.face = self.smpl.face + self.joint_regressor = self.smpl.joint_regressor + self.vertex_num = self.smpl.vertex_num + self.joint_num = self.smpl.joint_num + self.joints_name = self.smpl.joints_name + self.flip_pairs = self.smpl.flip_pairs + self.skeleton = self.smpl.skeleton + self.root_joint_idx = self.smpl.root_joint_idx + self.face_kps_vertex = self.smpl.face_kps_vertex + + self.datalist = self.load_data() + print("crowdpose data len: ", len(self.datalist)) + + def add_pelvis(self, joint_coord): + lhip_idx = self.crowdpose_joints_name.index('L_Hip') + rhip_idx = self.crowdpose_joints_name.index('R_Hip') + pelvis = (joint_coord[lhip_idx, :] + joint_coord[rhip_idx, :]) * 0.5 + pelvis[2] = joint_coord[lhip_idx, 2] * joint_coord[rhip_idx, 2] # joint_valid + pelvis = pelvis.reshape(1, 3) + joint_coord = np.concatenate((joint_coord, pelvis)) + return joint_coord + + def load_data(self): + datalist = [] + if self.data_split == 'train': + split_list = ['train'] if self.data_split == 'train' else [self.target_data_split] + + datalist = [] + for split in split_list: + db = COCO(osp.join(self.annot_path, f'crowdpose_{split}.json')) + # smpl parameter load + with open(osp.join(self.annot_path, f'CrowdPose_{split}_SMPL_NeuralAnnot.json'), 'r') as f: + smpl_params = json.load(f) + + for iid in db.imgs.keys(): + aids = db.getAnnIds([iid]) + + tmplist = [] + for aid in aids: + ann = db.anns[aid] + img = db.loadImgs(ann['image_id'])[0] + img_path = osp.join(self.img_path, img['file_name']) + # bbox + if split != 'val': # correct reversed img width,height info + width, height = img['height'], img['width'] + else: + width, height = img['width'], img['height'] + + if sum(ann['keypoints']) == 0: + continue + + # bbox + # tight_bbox = np.array(ann['bbox']) + # bbox = process_bbox(tight_bbox, width, height) + # if bbox is None: continue + + # joint coordinates + joint_img = np.array(ann['keypoints'], dtype=np.float32).reshape(-1, 3) + joint_img = self.add_pelvis(joint_img) + joint_valid = (joint_img[:, 2].copy().reshape(-1, 1) > 0).astype(np.float32) + joint_img[:, 2] = joint_valid[:, 0] # for posefix, only good for 2d datasets + + # bbox + if cfg.use_bbox_in_ann: + tight_bbox = np.array(ann['bbox']) + else: + tight_bbox = get_bbox(joint_img, np.ones_like(joint_img[:, 0]), crop_bottom_body=True) + # bbox = process_bbox(tight_bbox, width, height) + # if bbox is None: continue + + if str(aid) in smpl_params: + smpl_param = smpl_params[str(aid)] + if smpl_param['fit_err'] < self.fitting_thr: + smpl_param = None + else: + smpl_param = None + + tmplist.append({'img_path': img_path, + 'img_shape': (height, width), + #'bbox': bbox, + 'tight_bbox': tight_bbox, 'joint_img': joint_img, 'joint_valid': joint_valid, 'neural_annot_result': smpl_param}) + + for i, person in enumerate(tmplist): + tight_bbox = person['tight_bbox'] + + # for swap + num_overlap = 0 + near_joints = [] + other_persons = tmplist[:i] + tmplist[i + 1:] + for other in other_persons: + other_bbox = other['tight_bbox'] + iou = compute_iou(tight_bbox[None, :], other_bbox[None, :]) + if iou < 0.1: + continue + num_overlap += 1 + other_joint = transform_joint_to_other_db(other['joint_img'], self.crowdpose_joints_name, self.coco_joints_name) + near_joints.append(other_joint) + + person['num_overlap'] = num_overlap + person['near_joints'] = near_joints + + datalist.extend(tmplist) + + return datalist + + def get_smpl_coord(self, smpl_param, cam_param, do_flip, img_shape): + pose, shape, trans = smpl_param['pose'], smpl_param['shape'], smpl_param['trans'] + smpl_pose = torch.FloatTensor(pose).view(1, -1); + smpl_shape = torch.FloatTensor(shape).view(1, -1); # smpl parameters (pose: 72 dimension, shape: 10 dimension) + smpl_trans = torch.FloatTensor(trans).view(1, -1) # translation vector + + # flip smpl pose parameter (axis-angle) + if do_flip: + smpl_pose = smpl_pose.view(-1, 3) + for pair in self.flip_pairs: + if pair[0] < len(smpl_pose) and pair[1] < len(smpl_pose): # face keypoints are already included in self.flip_pairs. However, they are not included in smpl_pose. + smpl_pose[pair[0], :], smpl_pose[pair[1], :] = smpl_pose[pair[1], :].clone(), smpl_pose[pair[0], :].clone() + smpl_pose[:, 1:3] *= -1; # multiply -1 to y and z axis of axis-angle + smpl_pose = smpl_pose.view(1, -1) + + # get mesh and joint coordinates + smpl_mesh_coord, smpl_joint_coord = self.smpl.layer['neutral'](smpl_pose, smpl_shape, smpl_trans) + + # incorporate face keypoints + smpl_mesh_coord = smpl_mesh_coord.numpy().astype(np.float32).reshape(-1, 3); + # smpl_joint_coord = smpl_joint_coord.numpy().astype(np.float32).reshape(-1,3) + # smpl_face_kps_coord = smpl_mesh_coord[self.face_kps_vertex,:].reshape(-1,3) + # smpl_joint_coord = np.concatenate((smpl_joint_coord, smpl_face_kps_coord)) + smpl_joint_coord = np.dot(self.joint_regressor, smpl_mesh_coord) + + # flip translation + if do_flip: # avg of old and new root joint should be image center. + focal, princpt = cam_param['focal'], cam_param['princpt'] + flip_trans_x = 2 * (((img_shape[1] - 1) / 2. - princpt[0]) / focal[0] * (smpl_joint_coord[self.root_joint_idx, 2])) - 2 * smpl_joint_coord[self.root_joint_idx][0] + smpl_mesh_coord[:, 0] += flip_trans_x + smpl_joint_coord[:, 0] += flip_trans_x + + # change to mean shape if beta is too far from it + smpl_shape[(smpl_shape.abs() > 3).any(dim=1)] = 0. + + return smpl_mesh_coord, smpl_joint_coord, smpl_pose[0].numpy(), smpl_shape[0].numpy() + + def __len__(self): + return len(self.datalist) + + def __getitem__(self, idx): + data = copy.deepcopy(self.datalist[idx]) + img_path, img_shape, tight_bbox = data['img_path'], data['img_shape'], data['tight_bbox'] + # check if image is full body + # self.crowdpose_jonit_num = 14+1 # manually added pelvis + # self.crowdpose_joints_name = ('L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Head_top', 'Neck', 'Pelvis') + # data['joint_valid'].shape = (15, 1) + is_full_body = np.sum( + data['joint_valid'][6:12, :]) > 3 # 6:12 is the index of L_Hip, R_Hip, L_Knee, R_Knee, L_Ankle, R_Ankle + + # image load and affine transform + img = load_img(img_path) + img, img2bb_trans, bb2img_trans, rot, do_flip,bbox = augmentation(img, tight_bbox, self.data_split,is_full_body = is_full_body) + img = self.transform(img.astype(np.float32)) / 255. + + if self.data_split == 'train': + # coco gt + crowdpose_joint_img = data['joint_img'] + crowdpose_joint_valid = data['joint_valid'] + if do_flip: + crowdpose_joint_img[:, 0] = img_shape[1] - 1 - crowdpose_joint_img[:, 0] + for pair in self.crowdpose_flip_pairs: + crowdpose_joint_img[pair[0], :], crowdpose_joint_img[pair[1], :] = crowdpose_joint_img[pair[1], :].copy(), crowdpose_joint_img[pair[0], :].copy() + crowdpose_joint_valid[pair[0], :], crowdpose_joint_valid[pair[1], :] = crowdpose_joint_valid[pair[1], :].copy(), crowdpose_joint_valid[pair[0], :].copy() + + crowdpose_joint_img_xy1 = np.concatenate((crowdpose_joint_img[:, :2], np.ones_like(crowdpose_joint_img[:, :1])), 1) + crowdpose_joint_img[:, :2] = np.dot(img2bb_trans, crowdpose_joint_img_xy1.transpose(1, 0)).transpose(1, 0) + # for swap + if len(data['near_joints']) > 0: + near_joint_list = [] + for nj in data['near_joints']: + near_joint = np.ones((self.coco_joint_num, 3), dtype=np.float32) + nj_xy1 = np.concatenate((nj[:, :2], np.ones_like(nj[:, :1])), axis=1) + near_joint[:, :2] = np.dot(img2bb_trans, nj_xy1.transpose(1, 0)).transpose(1, 0) + near_joint_list.append(near_joint) + near_joints = np.asarray(near_joint_list, dtype=np.float32) + else: + near_joints = np.zeros((1, self.coco_joint_num, 3), dtype=np.float32) + + input_crowdpose_joint_img = crowdpose_joint_img.copy() + crowdpose_joint_img[:, 0] = crowdpose_joint_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + crowdpose_joint_img[:, 1] = crowdpose_joint_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + + # check truncation + crowdpose_joint_trunc = crowdpose_joint_valid * ((crowdpose_joint_img[:, 0] >= 0) * (crowdpose_joint_img[:, 0] < cfg.output_hm_shape[2]) * (crowdpose_joint_img[:, 1] >= 0) * (crowdpose_joint_img[:, 1] < cfg.output_hm_shape[1])).reshape( + -1, 1).astype(np.float32) + + # transform coco joints to target db joints + crowdpose_joint_img = transform_joint_to_other_db(crowdpose_joint_img, self.crowdpose_joints_name, self.joints_name) + crowdpose_joint_cam = np.zeros((self.joint_num, 3), dtype=np.float32) # dummy + crowdpose_joint_valid = transform_joint_to_other_db(crowdpose_joint_valid, self.crowdpose_joints_name, self.joints_name) + crowdpose_joint_trunc = transform_joint_to_other_db(crowdpose_joint_trunc, self.crowdpose_joints_name, self.joints_name) + + # apply PoseFix + tmp_joint_img = transform_joint_to_other_db(input_crowdpose_joint_img, self.crowdpose_joints_name, self.coco_joints_name) + tmp_joint_img = replace_joint_img(tmp_joint_img, data['tight_bbox'], near_joints, data['num_overlap'], img2bb_trans) + tmp_joint_img = transform_joint_to_other_db(tmp_joint_img, self.coco_joints_name, self.crowdpose_joints_name) + input_crowdpose_joint_img[self.crowdpose_coco_common_jidx, :2] = tmp_joint_img[self.crowdpose_coco_common_jidx, :2] + """ + # debug PoseFix result + newimg = vis_keypoints_with_skeleton(img.numpy().transpose(1, 2, 0), input_crowdpose_joint_img.T, self.crowdpose_skeleton) + cv2.imshow(f'{img_path}', newimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + # import pdb; pdb.set_trace() + """ + input_crowdpose_joint_img[:, 0] = input_crowdpose_joint_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + input_crowdpose_joint_img[:, 1] = input_crowdpose_joint_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + input_crowdpose_joint_img = transform_joint_to_other_db(input_crowdpose_joint_img, self.crowdpose_joints_name, self.joints_name) + + neural_annot_result = data['neural_annot_result'] + if neural_annot_result is not None: + # use fitted mesh + smpl_param, cam_param = neural_annot_result['smpl_param'], neural_annot_result['cam_param'] + smpl_mesh_cam, smpl_joint_cam, smpl_pose, smpl_shape = self.get_smpl_coord(smpl_param, cam_param, do_flip, img_shape) + smpl_coord_cam = np.concatenate((smpl_mesh_cam, smpl_joint_cam)) + smpl_coord_img = cam2pixel(smpl_coord_cam, cam_param['focal'], cam_param['princpt']) + + # x,y affine transform, root-relative depth + smpl_coord_img_xy1 = np.concatenate((smpl_coord_img[:, :2], np.ones_like(smpl_coord_img[:, 0:1])), 1) + smpl_coord_img[:, :2] = np.dot(img2bb_trans, smpl_coord_img_xy1.transpose(1, 0)).transpose(1, 0)[:, :2] + """ + # vis smpl + newimg = vis_keypoints_with_skeleton(img.numpy().transpose(1, 2, 0), smpl_coord_img[6890:].T, self.skeleton) + cv2.imshow(f'{img_path}', newimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + """ + smpl_coord_img[:, 2] = smpl_coord_img[:, 2] - smpl_coord_cam[self.vertex_num + self.root_joint_idx][2] + smpl_coord_img[:, 0] = smpl_coord_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + smpl_coord_img[:, 1] = smpl_coord_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + smpl_coord_img[:, 2] = (smpl_coord_img[:, 2] / (cfg.bbox_3d_size / 2) + 1) / 2. * cfg.output_hm_shape[0] + + # check truncation + smpl_trunc = ( + (smpl_coord_img[:, 0] >= 0) * (smpl_coord_img[:, 0] < cfg.output_hm_shape[2]) * (smpl_coord_img[:, 1] >= 0) * (smpl_coord_img[:, 1] < cfg.output_hm_shape[1]) * (smpl_coord_img[:, 2] >= 0) * ( + smpl_coord_img[:, 2] < cfg.output_hm_shape[0])).reshape(-1, 1).astype(np.float32) + + # split mesh and joint coordinates + smpl_mesh_img = smpl_coord_img[:self.vertex_num]; + smpl_joint_img = smpl_coord_img[self.vertex_num:]; + smpl_mesh_trunc = smpl_trunc[:self.vertex_num]; + smpl_joint_trunc = smpl_trunc[self.vertex_num:]; + + # already checked in load_data() + is_valid_fit = True + + else: + smpl_joint_img = np.zeros((self.joint_num, 3), dtype=np.float32) # dummy + smpl_joint_cam = np.zeros((self.joint_num, 3), dtype=np.float32) # dummy + smpl_mesh_img = np.zeros((self.vertex_num, 3), dtype=np.float32) # dummy + smpl_pose = np.zeros((72), dtype=np.float32) # dummy + smpl_shape = np.zeros((10), dtype=np.float32) # dummy + smpl_joint_trunc = np.zeros((self.joint_num, 1), dtype=np.float32) + smpl_mesh_trunc = np.zeros((self.vertex_num, 1), dtype=np.float32) + is_valid_fit = False + + # 3D data rotation augmentation + rot_aug_mat = np.array([[np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot)), 0], [np.sin(np.deg2rad(-rot)), np.cos(np.deg2rad(-rot)), 0], [0, 0, 1]], dtype=np.float32) + # parameter + smpl_pose = smpl_pose.reshape(-1, 3) + root_pose = smpl_pose[self.root_joint_idx, :] + root_pose, _ = cv2.Rodrigues(root_pose) + root_pose, _ = cv2.Rodrigues(np.dot(rot_aug_mat, root_pose)) + smpl_pose[self.root_joint_idx] = root_pose.reshape(3) + smpl_pose = smpl_pose.reshape(-1) + # smpl coordinate + smpl_joint_cam = smpl_joint_cam - smpl_joint_cam[self.root_joint_idx, None] # root-relative + smpl_joint_cam = np.dot(rot_aug_mat, smpl_joint_cam.transpose(1, 0)).transpose(1, 0) + + # SMPL pose parameter validity + smpl_param_valid = np.ones((self.smpl.orig_joint_num, 3), dtype=np.float32) + for name in ('L_Ankle', 'R_Ankle', 'L_Toe', 'R_Toe', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand'): + smpl_param_valid[self.joints_name.index(name)] = 0 + smpl_param_valid = smpl_param_valid.reshape(-1) + + inputs = {'img': img, 'joints': input_crowdpose_joint_img[:, :2], 'joints_mask': crowdpose_joint_trunc} + targets = {'orig_joint_img': crowdpose_joint_img, 'fit_joint_img': smpl_joint_img, 'orig_joint_cam': crowdpose_joint_cam, 'fit_joint_cam': smpl_joint_cam, 'pose_param': smpl_pose, 'shape_param': smpl_shape} + meta_info = {'orig_joint_valid': crowdpose_joint_valid, 'orig_joint_trunc': crowdpose_joint_trunc, 'fit_param_valid': smpl_param_valid, 'fit_joint_trunc': smpl_joint_trunc, 'is_valid_fit': float(is_valid_fit),'bbox': bbox, + 'is_3D': float(False)} + return inputs, targets, meta_info + diff --git a/data_processing/data/Human36M/Human36M.py b/data_processing/data/Human36M/Human36M.py new file mode 100644 index 0000000..2f78ad2 --- /dev/null +++ b/data_processing/data/Human36M/Human36M.py @@ -0,0 +1,482 @@ +import os +import os.path as osp +import numpy as np +import torch +import cv2 +import random +import json +import math +import copy +import transforms3d +from pycocotools.coco import COCO +from config import cfg +from utils.posefix import replace_joint_img +from utils.smpl import SMPL +from utils.preprocessing import load_img, get_bbox, process_bbox, generate_patch_image, augmentation +from utils.transforms import world2cam, cam2pixel, pixel2cam, rigid_align, transform_joint_to_other_db +from utils.vis import vis_keypoints, vis_mesh, save_obj, vis_keypoints_with_skeleton + + +class Human36M(torch.utils.data.Dataset): + def __init__(self, transform, data_split): + self.transform = transform + self.data_split = data_split + self.img_dir = osp.join(cfg.data_dir, 'Human36M', 'images') + self.annot_path = osp.join(cfg.data_dir, 'Human36M', 'annotations') + self.human_bbox_root_dir = osp.join(cfg.data_dir, 'Human36M', 'rootnet_output', 'bbox_root_human36m_output.json') + self.action_name = ['Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning', 'Posing', 'Purchases', 'Sitting', 'SittingDown', 'Smoking', 'Photo', 'Waiting', 'Walking', 'WalkDog', 'WalkTogether'] + self.fitting_thr = 25 # milimeter + + # COCO joint set + self.coco_joint_num = 17 # original: 17 + self.coco_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', + 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle') + + # H36M joint set + self.h36m_joint_num = 17 + self.h36m_joints_name = ('Pelvis', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Torso', 'Neck', 'Nose', 'Head_top', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Shoulder', 'R_Elbow', 'R_Wrist') + self.h36m_flip_pairs = ( (1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13) ) + self.h36m_skeleton = ( (0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15), (15, 16), (0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6) ) + self.h36m_root_joint_idx = self.h36m_joints_name.index('Pelvis') + self.h36m_eval_joint = (1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16) + self.h36m_joint_regressor = np.load(osp.join(cfg.data_dir, 'Human36M', 'J_regressor_h36m_correct.npy')) + self.h36m_coco_common_jidx = (1, 2, 3, 4, 5, 6, 9, 11, 12, 13, 14, 15, 16) # for posefix, exclude pelvis + + # SMPL joint set + self.smpl = SMPL() + self.face = self.smpl.face + self.joint_regressor = self.smpl.joint_regressor + self.vertex_num = self.smpl.vertex_num + self.joint_num = self.smpl.joint_num + self.joints_name = self.smpl.joints_name + self.flip_pairs = self.smpl.flip_pairs + self.skeleton = self.smpl.skeleton + self.root_joint_idx = self.smpl.root_joint_idx + self.face_kps_vertex = self.smpl.face_kps_vertex + + self.datalist = self.load_data() + print("h36m data len: ", len(self.datalist)) + + def get_subsampling_ratio(self): + if self.data_split == 'train': + return 5 + elif self.data_split == 'test': + return 64 + else: + assert 0, print('Unknown subset') + + def get_subject(self): + if self.data_split == 'train': + subject = [1,5,6,7,8] + elif self.data_split == 'test': + subject = [9,11] + else: + assert 0, print("Unknown subset") + + return subject + + def load_data(self): + subject_list = self.get_subject() + sampling_ratio = self.get_subsampling_ratio() + + # aggregate annotations from each subject + db = COCO() + cameras = {} + joints = {} + smpl_params = {} + for subject in subject_list: + # data load + with open(osp.join(self.annot_path, 'Human36M_subject' + str(subject) + '_data.json'),'r') as f: + annot = json.load(f) + if len(db.dataset) == 0: + for k,v in annot.items(): + db.dataset[k] = v + else: + for k,v in annot.items(): + db.dataset[k] += v + # camera load + with open(osp.join(self.annot_path, 'Human36M_subject' + str(subject) + '_camera.json'),'r') as f: + cameras[str(subject)] = json.load(f) + # joint coordinate load + with open(osp.join(self.annot_path, 'Human36M_subject' + str(subject) + '_joint_3d.json'),'r') as f: + joints[str(subject)] = json.load(f) + # smpl parameter load + with open(osp.join(self.annot_path, 'Human36M_subject' + str(subject) + '_smpl_param.json'),'r') as f: + smpl_params[str(subject)] = json.load(f) + db.createIndex() + + if self.data_split == 'test' and not cfg.use_gt_info: + print("Get bounding box and root from " + self.human_bbox_root_dir) + bbox_root_result = {} + with open(self.human_bbox_root_dir) as f: + annot = json.load(f) + for i in range(len(annot)): + bbox_root_result[str(annot[i]['image_id'])] = {'bbox': np.array(annot[i]['bbox']), 'root': np.array(annot[i]['root_cam'])} + else: + print("Get bounding box and root from groundtruth") + + datalist = [] + for aid in db.anns.keys(): + ann = db.anns[aid] + image_id = ann['image_id'] + img = db.loadImgs(image_id)[0] + img_path = osp.join(self.img_dir, img['file_name']) + img_shape = (img['height'], img['width']) + + # check subject and frame_idx + frame_idx = img['frame_idx']; + if frame_idx % sampling_ratio != 0: + continue + + # check smpl parameter exist + subject = img['subject']; action_idx = img['action_idx']; subaction_idx = img['subaction_idx']; frame_idx = img['frame_idx']; + try: + smpl_param = smpl_params[str(subject)][str(action_idx)][str(subaction_idx)][str(frame_idx)] + except KeyError: + smpl_param = None + + # camera parameter + cam_idx = img['cam_idx'] + cam_param = cameras[str(subject)][str(cam_idx)] + R,t,f,c = np.array(cam_param['R'], dtype=np.float32), np.array(cam_param['t'], dtype=np.float32), np.array(cam_param['f'], dtype=np.float32), np.array(cam_param['c'], dtype=np.float32) + cam_param = {'R': R, 't': t, 'focal': f, 'princpt': c} + + # only use frontal camera following previous works (HMR and SPIN) + if self.data_split == 'test' and str(cam_idx) != '4': + continue + + # project world coordinate to cam, image coordinate space + joint_world = np.array(joints[str(subject)][str(action_idx)][str(subaction_idx)][str(frame_idx)], dtype=np.float32) + joint_cam = world2cam(joint_world, R, t) + joint_img = cam2pixel(joint_cam, f, c) + joint_valid = np.ones((self.h36m_joint_num,1)) + + if cfg.use_bbox_in_ann: + tight_bbox = np.array(ann['bbox']) + else: + tight_bbox = get_bbox(joint_img, np.ones_like(joint_img[:, 0]), crop_bottom_body=True) + if self.data_split == 'test' and not cfg.use_gt_info: + bbox = bbox_root_result[str(image_id)]['bbox'] # bbox should be aspect ratio preserved-extended. It is done in RootNet. + root_joint_depth = bbox_root_result[str(image_id)]['root'][2] + datalist.append({ + 'img_path': img_path, + 'img_id': image_id, + 'img_shape': img_shape, + 'bbox': bbox, + 'tight_bbox': tight_bbox, + 'joint_img': joint_img, + 'joint_cam': joint_cam, + 'joint_valid': joint_valid, + 'smpl_param': smpl_param, + 'root_joint_depth': root_joint_depth, + 'cam_param': cam_param, + 'num_overlap': 0, + 'near_joints': np.zeros((1, self.coco_joint_num, 3), dtype=np.float32) # coco_joint_num + + }) + else: + # bbox = process_bbox(np.array(ann['bbox']), img['width'], img['height']) + # if bbox is None: continue + root_joint_depth = joint_cam[self.h36m_root_joint_idx][2] + + datalist.append({ + 'img_path': img_path, + 'img_id': image_id, + 'img_shape': img_shape, + #'bbox': bbox, + 'tight_bbox': tight_bbox, + 'joint_img': joint_img, + 'joint_cam': joint_cam, + 'joint_valid': joint_valid, + 'smpl_param': smpl_param, + 'root_joint_depth': root_joint_depth, + 'cam_param': cam_param, + 'num_overlap': 0, + 'near_joints': np.zeros((1, self.coco_joint_num, 3), dtype=np.float32) # coco_joint_num + + }) + + return datalist + + def get_smpl_coord(self, smpl_param, cam_param, do_flip, img_shape): + pose, shape, trans = smpl_param['pose'], smpl_param['shape'], smpl_param['trans'] + smpl_pose = torch.FloatTensor(pose).view(-1,3); smpl_shape = torch.FloatTensor(shape).view(1,-1); # smpl parameters (pose: 72 dimension, shape: 10 dimension) + R, t = np.array(cam_param['R'], dtype=np.float32).reshape(3,3), np.array(cam_param['t'], dtype=np.float32).reshape(3) # camera rotation and translation + + # merge root pose and camera rotation + root_pose = smpl_pose[self.root_joint_idx,:].numpy() + root_pose, _ = cv2.Rodrigues(root_pose) + root_pose, _ = cv2.Rodrigues(np.dot(R,root_pose)) + smpl_pose[self.root_joint_idx] = torch.from_numpy(root_pose).view(3) + + # flip smpl pose parameter (axis-angle) + if do_flip: + for pair in self.flip_pairs: + if pair[0] < len(smpl_pose) and pair[1] < len(smpl_pose): # face keypoints are already included in self.flip_pairs. However, they are not included in smpl_pose. + smpl_pose[pair[0], :], smpl_pose[pair[1], :] = smpl_pose[pair[1], :].clone(), smpl_pose[pair[0], :].clone() + smpl_pose[:,1:3] *= -1; # multiply -1 to y and z axis of axis-angle + smpl_pose = smpl_pose.view(1,-1) + + # get mesh and joint coordinates + smpl_mesh_coord, smpl_joint_coord = self.smpl.layer['neutral'](smpl_pose, smpl_shape) + + # incorporate face keypoints + smpl_mesh_coord = smpl_mesh_coord.numpy().astype(np.float32).reshape(-1,3); + # smpl_joint_coord = smpl_joint_coord.numpy().astype(np.float32).reshape(-1,3) + # smpl_face_kps_coord = smpl_mesh_coord[self.face_kps_vertex,:].reshape(-1,3) + # smpl_joint_coord = np.concatenate((smpl_joint_coord, smpl_face_kps_coord)) + smpl_joint_coord = np.dot(self.joint_regressor, smpl_mesh_coord) + + # compenstate rotation (translation from origin to root joint was not cancled) + smpl_trans = np.array(trans, dtype=np.float32).reshape(3) # translation vector from smpl coordinate to h36m world coordinate + smpl_trans = np.dot(R, smpl_trans[:,None]).reshape(1,3) + t.reshape(1,3)/1000 + root_joint_coord = smpl_joint_coord[self.root_joint_idx].reshape(1,3) + smpl_trans = smpl_trans - root_joint_coord + np.dot(R, root_joint_coord.transpose(1,0)).transpose(1,0) + smpl_mesh_coord = smpl_mesh_coord + smpl_trans + smpl_joint_coord = smpl_joint_coord + smpl_trans + + # flip translation + if do_flip: # avg of old and new root joint should be image center. + focal, princpt = cam_param['focal'], cam_param['princpt'] + flip_trans_x = 2 * (((img_shape[1] - 1)/2. - princpt[0]) / focal[0] * (smpl_joint_coord[self.root_joint_idx,2] * 1000)) / 1000 - 2 * smpl_joint_coord[self.root_joint_idx][0] + smpl_mesh_coord[:,0] += flip_trans_x + smpl_joint_coord[:,0] += flip_trans_x + + # change to mean shape if beta is too far from it + smpl_shape[(smpl_shape.abs() > 3).any(dim=1)] = 0. + + # meter -> milimeter + smpl_mesh_coord *= 1000; smpl_joint_coord *= 1000; + return smpl_mesh_coord, smpl_joint_coord, smpl_pose[0].numpy(), smpl_shape[0].numpy() + + def get_fitting_error(self, h36m_joint, smpl_mesh, do_flip): + h36m_joint = h36m_joint - h36m_joint[self.h36m_root_joint_idx,None,:] # root-relative + if do_flip: + h36m_joint[:,0] = -h36m_joint[:,0] + for pair in self.h36m_flip_pairs: + h36m_joint[pair[0],:] , h36m_joint[pair[1],:] = h36m_joint[pair[1],:].copy(), h36m_joint[pair[0],:].copy() + + h36m_from_smpl = np.dot(self.h36m_joint_regressor, smpl_mesh) + h36m_from_smpl = h36m_from_smpl - np.mean(h36m_from_smpl,0)[None,:] + np.mean(h36m_joint,0)[None,:] # translation alignment + + error = np.sqrt(np.sum((h36m_joint - h36m_from_smpl)**2,1)).mean() + return error + + def __len__(self): + return len(self.datalist) + + def __getitem__(self, idx): + data = copy.deepcopy(self.datalist[idx]) + img_path, img_shape, tight_bbox, smpl_param, cam_param = data['img_path'], data['img_shape'], data['tight_bbox'], data['smpl_param'], data['cam_param'] + + # img + img = load_img(img_path) + img, img2bb_trans, bb2img_trans, rot, do_flip,bbox = augmentation(img, tight_bbox, self.data_split,is_full_body = True ) # always full body + img = self.transform(img.astype(np.float32))/255. + + if self.data_split == 'train': + # h36m gt + h36m_joint_img = data['joint_img'] + h36m_joint_cam = data['joint_cam'] + h36m_joint_cam = h36m_joint_cam - h36m_joint_cam[self.h36m_root_joint_idx,None,:] # root-relative + h36m_joint_valid = data['joint_valid'] + if do_flip: + h36m_joint_cam[:,0] = -h36m_joint_cam[:,0] + h36m_joint_img[:,0] = img_shape[1] - 1 - h36m_joint_img[:,0] + for pair in self.h36m_flip_pairs: + h36m_joint_img[pair[0],:], h36m_joint_img[pair[1],:] = h36m_joint_img[pair[1],:].copy(), h36m_joint_img[pair[0],:].copy() + h36m_joint_cam[pair[0],:], h36m_joint_cam[pair[1],:] = h36m_joint_cam[pair[1],:].copy(), h36m_joint_cam[pair[0],:].copy() + h36m_joint_valid[pair[0],:], h36m_joint_valid[pair[1],:] = h36m_joint_valid[pair[1],:].copy(), h36m_joint_valid[pair[0],:].copy() + + h36m_joint_img_xy1 = np.concatenate((h36m_joint_img[:,:2], np.ones_like(h36m_joint_img[:,:1])),1) + h36m_joint_img[:,:2] = np.dot(img2bb_trans, h36m_joint_img_xy1.transpose(1,0)).transpose(1,0) + input_h36m_joint_img = h36m_joint_img.copy() + h36m_joint_img[:,0] = h36m_joint_img[:,0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + h36m_joint_img[:,1] = h36m_joint_img[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + h36m_joint_img[:,2] = h36m_joint_img[:,2] - h36m_joint_img[self.h36m_root_joint_idx][2] # root-relative + h36m_joint_img[:,2] = (h36m_joint_img[:,2] / (cfg.bbox_3d_size * 1000 / 2) + 1)/2. * cfg.output_hm_shape[0] # change cfg.bbox_3d_size from meter to milimeter + + # check truncation + h36m_joint_trunc = h36m_joint_valid * ((h36m_joint_img[:,0] >= 0) * (h36m_joint_img[:,0] < cfg.output_hm_shape[2]) * \ + (h36m_joint_img[:,1] >= 0) * (h36m_joint_img[:,1] < cfg.output_hm_shape[1]) * \ + (h36m_joint_img[:,2] >= 0) * (h36m_joint_img[:,2] < cfg.output_hm_shape[0])).reshape(-1,1).astype(np.float32) + + """ + print(f'{img_path} trunc:\n', h36m_joint_trunc.nonzero()) + tmp_coord = h36m_joint_img[:, :2] * np.array([[cfg.input_img_shape[1] / cfg.output_hm_shape[2], cfg.input_img_shape[0]/ cfg.output_hm_shape[1]]]) + newimg = vis_keypoints(img.numpy().transpose(1,2,0), tmp_coord) + cv2.imshow(f'{img_path}', newimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + """ + + # transform h36m joints to target db joints + h36m_joint_img = transform_joint_to_other_db(h36m_joint_img, self.h36m_joints_name, self.joints_name) + h36m_joint_cam = transform_joint_to_other_db(h36m_joint_cam, self.h36m_joints_name, self.joints_name) + h36m_joint_valid = transform_joint_to_other_db(h36m_joint_valid, self.h36m_joints_name, self.joints_name) + h36m_joint_trunc = transform_joint_to_other_db(h36m_joint_trunc, self.h36m_joints_name, self.joints_name) + + # apply PoseFix + input_h36m_joint_img[:, 2] = 1 # joint valid + tmp_joint_img = transform_joint_to_other_db(input_h36m_joint_img, self.h36m_joints_name, self.coco_joints_name) + tmp_joint_img = replace_joint_img(tmp_joint_img, data['tight_bbox'], data['near_joints'], data['num_overlap'], img2bb_trans) + tmp_joint_img = transform_joint_to_other_db(tmp_joint_img, self.coco_joints_name, self.h36m_joints_name) + input_h36m_joint_img[self.h36m_coco_common_jidx, :2] = tmp_joint_img[self.h36m_coco_common_jidx, :2] + """ + # debug PoseFix result + newimg = vis_keypoints_with_skeleton(img.numpy().transpose(1, 2, 0), input_h36m_joint_img.T, self.h36m_skeleton) + cv2.imshow(f'{img_path}', newimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + import pdb; pdb.set_trace() + """ + input_h36m_joint_img[:, 0] = input_h36m_joint_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + input_h36m_joint_img[:, 1] = input_h36m_joint_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + input_h36m_joint_img = transform_joint_to_other_db(input_h36m_joint_img, self.h36m_joints_name, self.joints_name) + joint_mask = h36m_joint_trunc + + if smpl_param is not None: + # smpl coordinates + smpl_mesh_cam, smpl_joint_cam, smpl_pose, smpl_shape = self.get_smpl_coord(smpl_param, cam_param, do_flip, img_shape) + smpl_coord_cam = np.concatenate((smpl_mesh_cam, smpl_joint_cam)) + focal, princpt = cam_param['focal'], cam_param['princpt'] + smpl_coord_img = cam2pixel(smpl_coord_cam, focal, princpt) + + """ + # vis smpl joint coord + tmpimg = cv2.imread(img_path) + newimg = vis_keypoints(tmpimg, smpl_coord_img[6890:]) + cv2.imshow(f'{img_path}', newimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + import pdb; pdb.set_trace() + """ + + # affine transform x,y coordinates, root-relative depth + smpl_coord_img_xy1 = np.concatenate((smpl_coord_img[:,:2], np.ones_like(smpl_coord_img[:,:1])),1) + smpl_coord_img[:,:2] = np.dot(img2bb_trans, smpl_coord_img_xy1.transpose(1,0)).transpose(1,0)[:,:2] + smpl_coord_img[:,2] = smpl_coord_img[:,2] - smpl_coord_cam[self.vertex_num + self.root_joint_idx][2] + # coordinates voxelize + smpl_coord_img[:,0] = smpl_coord_img[:,0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + smpl_coord_img[:,1] = smpl_coord_img[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + smpl_coord_img[:,2] = (smpl_coord_img[:,2] / (cfg.bbox_3d_size * 1000 / 2) + 1)/2. * cfg.output_hm_shape[0] # change cfg.bbox_3d_size from meter to milimeter + + # check truncation + smpl_trunc = ((smpl_coord_img[:,0] >= 0) * (smpl_coord_img[:,0] < cfg.output_hm_shape[2]) * \ + (smpl_coord_img[:,1] >= 0) * (smpl_coord_img[:,1] < cfg.output_hm_shape[1]) * \ + (smpl_coord_img[:,2] >= 0) * (smpl_coord_img[:,2] < cfg.output_hm_shape[0])).reshape(-1,1).astype(np.float32) + + # split mesh and joint coordinates + smpl_mesh_img = smpl_coord_img[:self.vertex_num]; smpl_joint_img = smpl_coord_img[self.vertex_num:]; + smpl_mesh_trunc = smpl_trunc[:self.vertex_num]; smpl_joint_trunc = smpl_trunc[self.vertex_num:]; + + # if fitted mesh is too far from h36m gt, discard it + is_valid_fit = True + error = self.get_fitting_error(data['joint_cam'], smpl_mesh_cam, do_flip) + if error > self.fitting_thr: + is_valid_fit = False + + else: + smpl_joint_img = np.zeros((self.joint_num,3), dtype=np.float32) # dummy + smpl_joint_cam = np.zeros((self.joint_num,3), dtype=np.float32) # dummy + smpl_mesh_img = np.zeros((self.vertex_num,3), dtype=np.float32) # dummy + smpl_pose = np.zeros((72), dtype=np.float32) # dummy + smpl_shape = np.zeros((10), dtype=np.float32) # dummy + smpl_joint_trunc = np.zeros((self.joint_num,1), dtype=np.float32) # dummy + smpl_mesh_trunc = np.zeros((self.vertex_num,1), dtype=np.float32) # dummy + is_valid_fit = False + + # 3D data rotation augmentation + rot_aug_mat = np.array([[np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot)), 0], + [np.sin(np.deg2rad(-rot)), np.cos(np.deg2rad(-rot)), 0], + [0, 0, 1]], dtype=np.float32) + # h36m coordinate + h36m_joint_cam = np.dot(rot_aug_mat, h36m_joint_cam.transpose(1,0)).transpose(1,0) / 1000 # milimeter to meter + # parameter + smpl_pose = smpl_pose.reshape(-1,3) + root_pose = smpl_pose[self.root_joint_idx,:] + root_pose, _ = cv2.Rodrigues(root_pose) + root_pose, _ = cv2.Rodrigues(np.dot(rot_aug_mat,root_pose)) + smpl_pose[self.root_joint_idx] = root_pose.reshape(3) + smpl_pose = smpl_pose.reshape(-1) + # smpl coordinate + smpl_joint_cam = smpl_joint_cam - smpl_joint_cam[self.root_joint_idx,None] # root-relative + smpl_joint_cam = np.dot(rot_aug_mat, smpl_joint_cam.transpose(1,0)).transpose(1,0) / 1000 # milimeter to meter + + # SMPL pose parameter validity + smpl_param_valid = np.ones((self.smpl.orig_joint_num, 3), dtype=np.float32) + for name in ('L_Ankle', 'R_Ankle', 'L_Toe', 'R_Toe', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand'): + smpl_param_valid[self.joints_name.index(name)] = 0 + smpl_param_valid = smpl_param_valid.reshape(-1) + + inputs = {'img': img, 'joints': input_h36m_joint_img[:, :2], 'joints_mask': joint_mask} + targets = {'orig_joint_img': h36m_joint_img, 'fit_joint_img': smpl_joint_img, 'orig_joint_cam': h36m_joint_cam, 'fit_joint_cam': smpl_joint_cam, 'pose_param': smpl_pose, 'shape_param': smpl_shape} + meta_info = {'orig_joint_valid': h36m_joint_valid, 'orig_joint_trunc': h36m_joint_trunc, 'fit_param_valid': smpl_param_valid, 'fit_joint_trunc': smpl_joint_trunc, 'is_valid_fit': float(is_valid_fit), 'bbox': bbox, + 'is_3D': float(True)} + return inputs, targets, meta_info + else: + inputs = {'img': img} + targets = {} + meta_info = {'bb2img_trans': bb2img_trans} + return inputs, targets, meta_info + + def evaluate(self, outs, cur_sample_idx): + + annots = self.datalist + sample_num = len(outs) + eval_result = {'mpjpe_lixel': [], 'pa_mpjpe_lixel': [], 'mpjpe_param': [], 'pa_mpjpe_param': []} + for n in range(sample_num): + annot = annots[cur_sample_idx + n] + out = outs[n] + + # mesh from lixel + # x,y: resize to input image space and perform bbox to image affine transform + mesh_out_img = out['mesh_coord_img'] + mesh_out_img[:,0] = mesh_out_img[:,0] / cfg.output_hm_shape[2] * cfg.input_img_shape[1] + mesh_out_img[:,1] = mesh_out_img[:,1] / cfg.output_hm_shape[1] * cfg.input_img_shape[0] + mesh_out_img_xy1 = np.concatenate((mesh_out_img[:,:2], np.ones_like(mesh_out_img[:,:1])),1) + mesh_out_img[:,:2] = np.dot(out['bb2img_trans'], mesh_out_img_xy1.transpose(1,0)).transpose(1,0)[:,:2] + # z: devoxelize and translate to absolute depth + root_joint_depth = annot['root_joint_depth'] + mesh_out_img[:,2] = (mesh_out_img[:,2] / cfg.output_hm_shape[0] * 2. - 1) * (cfg.bbox_3d_size * 1000 / 2) + mesh_out_img[:,2] = mesh_out_img[:,2] + root_joint_depth + # camera back-projection + cam_param = annot['cam_param'] + focal, princpt = cam_param['focal'], cam_param['princpt'] + mesh_out_cam = pixel2cam(mesh_out_img, focal, princpt) + + # h36m joint from gt mesh + pose_coord_gt_h36m = annot['joint_cam'] + pose_coord_gt_h36m = pose_coord_gt_h36m - pose_coord_gt_h36m[self.h36m_root_joint_idx,None] # root-relative + pose_coord_gt_h36m = pose_coord_gt_h36m[self.h36m_eval_joint,:] + + # h36m joint from lixel mesh + pose_coord_out_h36m = np.dot(self.h36m_joint_regressor, mesh_out_cam) + pose_coord_out_h36m = pose_coord_out_h36m - pose_coord_out_h36m[self.h36m_root_joint_idx,None] # root-relative + pose_coord_out_h36m = pose_coord_out_h36m[self.h36m_eval_joint,:] + pose_coord_out_h36m_aligned = rigid_align(pose_coord_out_h36m, pose_coord_gt_h36m) + eval_result['mpjpe_lixel'].append(np.sqrt(np.sum((pose_coord_out_h36m - pose_coord_gt_h36m)**2,1)).mean()) + eval_result['pa_mpjpe_lixel'].append(np.sqrt(np.sum((pose_coord_out_h36m_aligned - pose_coord_gt_h36m)**2,1)).mean()) + + vis = False + if vis: + filename = annot['img_path'].split('/')[-1][:-4] + + img = load_img(annot['img_path'])[:,:,::-1] + img = vis_mesh(img, mesh_out_img, 0.5) + cv2.imwrite(filename + '.jpg', img) + + save_obj(mesh_out_cam, self.smpl.face, filename + '.obj') + + return eval_result + + def print_eval_result(self, eval_result): + print('MPJPE from lixel mesh: %.2f mm' % np.mean(eval_result['mpjpe_lixel'])) + print('PA MPJPE from lixel mesh: %.2f mm' % np.mean(eval_result['pa_mpjpe_lixel'])) + + print('MPJPE from param mesh: %.2f mm' % np.mean(eval_result['mpjpe_param'])) + print('PA MPJPE from param mesh: %.2f mm' % np.mean(eval_result['pa_mpjpe_param'])) diff --git a/data_processing/data/MPII/MPII.py b/data_processing/data/MPII/MPII.py new file mode 100644 index 0000000..4441017 --- /dev/null +++ b/data_processing/data/MPII/MPII.py @@ -0,0 +1,295 @@ +import os +import os.path as osp +import numpy as np +from config import cfg +import copy +import json +import cv2 +import torch +from pycocotools.coco import COCO + +from utils.posefix import replace_joint_img +from utils.preprocessing import compute_iou, process_bbox, load_img, augmentation,get_bbox +from utils.smpl import SMPL +from utils.transforms import transform_joint_to_other_db, cam2pixel +from utils.vis import vis_keypoints_with_skeleton + + +class MPII(torch.utils.data.Dataset): + def __init__(self, transform, data_split): + self.transform = transform + self.data_split = data_split + self.img_path = osp.join(cfg.data_dir, 'MPII', 'data') + self.annot_path = osp.join(cfg.data_dir, 'MPII', 'data', 'annotations') + + # mpii skeleton + self.mpii_joint_num = 16 + self.mpii_joints_name = ('R_Ankle', 'R_Knee', 'R_Hip', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Thorax', 'Neck', 'Head_top', 'R_Wrist', 'R_Elbow', 'R_Shoulder', 'L_Shoulder', 'L_Elbow', 'L_Wrist') + self.mpii_flip_pairs = ((0, 5), (1, 4), (2, 3), (10, 15), (11, 14), (12, 13)) + self.mpii_skeleton = ((0,1), (1,2), (2,6), (3,6), (3, 4), (4, 5), (6, 7), (7, 8), (8, 9), (10, 11), (11, 12) , (7, 12), (7, 13), (13, 14), (14, 15)) + + # smpl skeleton + self.smpl = SMPL() + self.face = self.smpl.face + self.joint_regressor = self.smpl.joint_regressor + self.vertex_num = self.smpl.vertex_num + self.joint_num = self.smpl.joint_num + self.joints_name = self.smpl.joints_name + self.flip_pairs = self.smpl.flip_pairs + self.skeleton = self.smpl.skeleton + self.root_joint_idx = self.smpl.root_joint_idx + self.face_kps_vertex = self.smpl.face_kps_vertex + + self.coco_joint_num = 18 # original: 17, manually added pelvis + self.coco_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis') + self.mpii_coco_common_idx = (0, 1, 2, 3, 4, 5, 6, 10, 11, 12, 13, 14, 15) + + self.datalist = self.load_data() + print("mpii data len: ", len(self.datalist)) + + def load_data(self): + db = COCO(osp.join(self.annot_path, 'train.json')) + with open(osp.join(self.annot_path, 'MPII_train_SMPL_NeuralAnnot.json')) as f: + smpl_params = json.load(f) + + datalist = [] + for iid in db.imgs.keys(): + aids = db.getAnnIds([iid]) + + tmplist = [] + for aid in aids: + ann = db.anns[aid] + img = db.loadImgs(ann['image_id'])[0] + img_path = osp.join(self.img_path, img['file_name']) + width, height = img['width'], img['height'] + + # bbox + # tight_bbox = np.array(ann['bbox']) + # bbox = process_bbox(tight_bbox, width, height) + # if bbox is None: continue + + # joint coordinates + joint_img = np.array(ann['keypoints'], dtype=np.float32).reshape(-1, 3) + joint_valid = (joint_img[:, 2].copy().reshape(-1, 1) > 0).astype(np.float32) + joint_img[:, 2] = joint_valid[:, 0] # for posefix, only good for 2d datasets + + # bbox + if cfg.use_bbox_in_ann: + tight_bbox = np.array(ann['bbox']) + else: + tight_bbox = get_bbox(joint_img, np.ones_like(joint_img[:, 0]), crop_bottom_body=True) + + # smpl parameter + if str(aid) in smpl_params: + smpl_param = smpl_params[str(aid)] + else: + smpl_param = None + + tmplist.append({'img_path': img_path, 'img_shape': (height, width), + #'bbox': bbox, + 'tight_bbox': tight_bbox, 'joint_img': joint_img, 'joint_valid': joint_valid, 'smpl_param': smpl_param}) + + for i, person in enumerate(tmplist): + tight_bbox = person['tight_bbox'] + + # for swap + num_overlap = 0 + near_joints = [] + other_persons = tmplist[:i] + tmplist[i + 1:] + for other in other_persons: + other_bbox = other['tight_bbox'] + iou = compute_iou(tight_bbox[None, :], other_bbox[None, :]) + if iou < 0.1: + continue + num_overlap += 1 + other_joint = transform_joint_to_other_db(other['joint_img'], self.mpii_joints_name, self.coco_joints_name) + near_joints.append(other_joint) + + person['num_overlap'] = num_overlap + person['near_joints'] = near_joints + + datalist.extend(tmplist) + """ + if num_overlap > 2: + tmpimg = cv2.imread(img_path) + newimg = vis_keypoints_with_skeleton(tmpimg, joint_img.T, self.mpii_skeleton) + cv2.imshow(f'{img_path}', newimg / 255) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + import pdb; + pdb.set_trace() + """ + + return datalist + + def get_smpl_coord(self, smpl_param, cam_param, do_flip, img_shape): + pose, shape, trans = smpl_param['pose'], smpl_param['shape'], smpl_param['trans'] + smpl_pose = torch.FloatTensor(pose).view(1, -1); + smpl_shape = torch.FloatTensor(shape).view(1, -1); # smpl parameters (pose: 72 dimension, shape: 10 dimension) + smpl_trans = torch.FloatTensor(trans).view(1, -1) # translation vector + + # flip smpl pose parameter (axis-angle) + if do_flip: + smpl_pose = smpl_pose.view(-1, 3) + for pair in self.flip_pairs: + if pair[0] < len(smpl_pose) and pair[1] < len(smpl_pose): # face keypoints are already included in self.flip_pairs. However, they are not included in smpl_pose. + smpl_pose[pair[0], :], smpl_pose[pair[1], :] = smpl_pose[pair[1], :].clone(), smpl_pose[pair[0], :].clone() + smpl_pose[:, 1:3] *= -1; # multiply -1 to y and z axis of axis-angle + smpl_pose = smpl_pose.view(1, -1) + + # get mesh and joint coordinates + smpl_mesh_coord, smpl_joint_coord = self.smpl.layer['neutral'](smpl_pose, smpl_shape, smpl_trans) + + # incorporate face keypoints + smpl_mesh_coord = smpl_mesh_coord.numpy().astype(np.float32).reshape(-1, 3); + # smpl_joint_coord = smpl_joint_coord.numpy().astype(np.float32).reshape(-1,3) + # smpl_face_kps_coord = smpl_mesh_coord[self.face_kps_vertex,:].reshape(-1,3) + # smpl_joint_coord = np.concatenate((smpl_joint_coord, smpl_face_kps_coord)) + smpl_joint_coord = np.dot(self.joint_regressor, smpl_mesh_coord) + + # flip translation + if do_flip: # avg of old and new root joint should be image center. + focal, princpt = cam_param['focal'], cam_param['princpt'] + flip_trans_x = 2 * (((img_shape[1] - 1) / 2. - princpt[0]) / focal[0] * (smpl_joint_coord[self.root_joint_idx, 2])) - 2 * smpl_joint_coord[self.root_joint_idx][0] + smpl_mesh_coord[:, 0] += flip_trans_x + smpl_joint_coord[:, 0] += flip_trans_x + + # change to mean shape if beta is too far from it + smpl_shape[(smpl_shape.abs() > 3).any(dim=1)] = 0. + + return smpl_mesh_coord, smpl_joint_coord, smpl_pose[0].numpy(), smpl_shape[0].numpy() + + def __len__(self): + return len(self.datalist) + + def __getitem__(self, idx): + data = copy.deepcopy(self.datalist[idx]) + img_path, img_shape, tight_bbox = data['img_path'], data['img_shape'], data['tight_bbox'] + # check if image is full body + # self.mpii_joint_num = 16 + # self.mpii_joints_name = ('R_Ankle', 'R_Knee', 'R_Hip', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Thorax', 'Neck', 'Head_top', 'R_Wrist', 'R_Elbow', 'R_Shoulder', 'L_Shoulder', 'L_Elbow', 'L_Wrist') + # data['joint_valid'].shape = (15, 1) + is_full_body = np.sum( + data['joint_valid'][0:6, :]) > 3 # 0:6 is the index of R_Ankle, R_Knee, R_Hip, L_Hip, L_Knee, L_Ankle + + # image load and affine transform + img = load_img(img_path) + img, img2bb_trans, bb2img_trans, rot, do_flip,bbox = augmentation(img, tight_bbox, self.data_split, is_full_body= is_full_body) + img = self.transform(img.astype(np.float32)) / 255. + + # mpii gt + mpii_joint_img = data['joint_img'] + mpii_joint_valid = data['joint_valid'] + if do_flip: + mpii_joint_img[:, 0] = img_shape[1] - 1 - mpii_joint_img[:, 0] + for pair in self.mpii_flip_pairs: + mpii_joint_img[pair[0], :], mpii_joint_img[pair[1], :] = mpii_joint_img[pair[1], :].copy(), mpii_joint_img[pair[0], :].copy() + mpii_joint_valid[pair[0], :], mpii_joint_valid[pair[1], :] = mpii_joint_valid[pair[1], :].copy(), mpii_joint_valid[pair[0], :].copy() + + mpii_joint_img_xy1 = np.concatenate((mpii_joint_img[:, :2], np.ones_like(mpii_joint_img[:, :1])), 1) + mpii_joint_img[:, :2] = np.dot(img2bb_trans, mpii_joint_img_xy1.transpose(1, 0)).transpose(1, 0) + # for swap + if len(data['near_joints']) > 0: + near_joint_list = [] + for nj in data['near_joints']: + near_joint = np.ones((self.coco_joint_num, 3), dtype=np.float32) + nj_xy1 = np.concatenate((nj[:, :2], np.ones_like(nj[:, :1])), axis=1) + near_joint[:, :2] = np.dot(img2bb_trans, nj_xy1.transpose(1, 0)).transpose(1, 0) + near_joint_list.append(near_joint) + near_joints = np.asarray(near_joint_list, dtype=np.float32) + else: + near_joints = np.zeros((1, self.coco_joint_num, 3), dtype=np.float32) + + input_mpii_joint_img = mpii_joint_img.copy() + mpii_joint_img[:, 0] = mpii_joint_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + mpii_joint_img[:, 1] = mpii_joint_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + + # check truncation + mpii_joint_trunc = mpii_joint_valid * ( + (mpii_joint_img[:, 0] >= 0) * (mpii_joint_img[:, 0] < cfg.output_hm_shape[2]) * (mpii_joint_img[:, 1] >= 0) * (mpii_joint_img[:, 1] < cfg.output_hm_shape[1])).reshape(-1, + 1).astype(np.float32) + + # transform coco joints to target db joints + mpii_joint_img = transform_joint_to_other_db(mpii_joint_img, self.mpii_joints_name, self.joints_name) + mpii_joint_cam = np.zeros((self.joint_num, 3), dtype=np.float32) # dummy + mpii_joint_valid = transform_joint_to_other_db(mpii_joint_valid, self.mpii_joints_name, self.joints_name) + mpii_joint_trunc = transform_joint_to_other_db(mpii_joint_trunc, self.mpii_joints_name, self.joints_name) + + # apply PoseFix + tmp_joint_img = transform_joint_to_other_db(input_mpii_joint_img, self.mpii_joints_name, self.coco_joints_name) + tmp_joint_img = replace_joint_img(tmp_joint_img, data['tight_bbox'], near_joints, data['num_overlap'], img2bb_trans) + tmp_joint_img = transform_joint_to_other_db(tmp_joint_img, self.coco_joints_name, self.mpii_joints_name) + input_mpii_joint_img[self.mpii_coco_common_idx, :2] = tmp_joint_img[self.mpii_coco_common_idx, :2] + """ + # debug PoseFix result + newimg = vis_keypoints_with_skeleton(img.numpy().transpose(1, 2, 0), input_mpii_joint_img.T, self.mpii_skeleton) + cv2.imshow(f'{img_path}', newimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + import pdb; pdb.set_trace() + """ + input_mpii_joint_img[:, 0] = input_mpii_joint_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + input_mpii_joint_img[:, 1] = input_mpii_joint_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + input_mpii_joint_img = transform_joint_to_other_db(input_mpii_joint_img, self.mpii_joints_name, self.joints_name) + + smpl_param = data['smpl_param'] + if smpl_param is not None: + # use fitted mesh + smpl_param, cam_param = smpl_param['smpl_param'], smpl_param['cam_param'] + smpl_mesh_cam, smpl_joint_cam, smpl_pose, smpl_shape = self.get_smpl_coord(smpl_param, cam_param, do_flip, img_shape) + smpl_coord_cam = np.concatenate((smpl_mesh_cam, smpl_joint_cam)) + smpl_coord_img = cam2pixel(smpl_coord_cam, cam_param['focal'], cam_param['princpt']) + + # x,y affine transform, root-relative depth + smpl_coord_img_xy1 = np.concatenate((smpl_coord_img[:, :2], np.ones_like(smpl_coord_img[:, 0:1])), 1) + smpl_coord_img[:, :2] = np.dot(img2bb_trans, smpl_coord_img_xy1.transpose(1, 0)).transpose(1, 0)[:, :2] + smpl_coord_img[:, 2] = smpl_coord_img[:, 2] - smpl_coord_cam[self.vertex_num + self.root_joint_idx][2] + smpl_coord_img[:, 0] = smpl_coord_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + smpl_coord_img[:, 1] = smpl_coord_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + smpl_coord_img[:, 2] = (smpl_coord_img[:, 2] / (cfg.bbox_3d_size / 2) + 1) / 2. * cfg.output_hm_shape[0] + + # check truncation + smpl_trunc = ((smpl_coord_img[:, 0] >= 0) * (smpl_coord_img[:, 0] < cfg.output_hm_shape[2]) * (smpl_coord_img[:, 1] >= 0) * (smpl_coord_img[:, 1] < cfg.output_hm_shape[1]) * (smpl_coord_img[:, 2] >= 0) * ( + smpl_coord_img[:, 2] < cfg.output_hm_shape[0])).reshape(-1, 1).astype(np.float32) + + # split mesh and joint coordinates + smpl_joint_img = smpl_coord_img[self.vertex_num:]; + smpl_joint_trunc = smpl_trunc[self.vertex_num:]; + + """ + # vis smpl joint coord + # tmpimg = cv2.imread(img_path) + newimg = vis_keypoints_with_skeleton(img.numpy().transpose(1, 2, 0), (smpl_joint_img.T)*4, self.skeleton) + cv2.imshow(f'{img_path}', newimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + import pdb; pdb.set_trace() + """ + + # if fitted mesh is too far from h36m gt, discard it + is_valid_fit = True + else: + smpl_joint_img = np.zeros((self.joint_num, 3), dtype=np.float32) # dummy + smpl_joint_cam = np.zeros((self.joint_num, 3), dtype=np.float32) # dummy + smpl_pose = np.zeros((72), dtype=np.float32) # dummy + smpl_shape = np.zeros((10), dtype=np.float32) # dummy + smpl_joint_trunc = np.zeros((self.joint_num, 1), dtype=np.float32) + is_valid_fit = False + + # SMPL pose parameter validity + smpl_param_valid = np.ones((self.smpl.orig_joint_num, 3), dtype=np.float32) + for name in ('L_Ankle', 'R_Ankle', 'L_Toe', 'R_Toe', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand'): + smpl_param_valid[self.joints_name.index(name)] = 0 + smpl_param_valid = smpl_param_valid.reshape(-1) + + inputs = {'img': img, 'joints': input_mpii_joint_img[:, :2], 'joints_mask': mpii_joint_trunc} + targets = {'orig_joint_img': mpii_joint_img, 'fit_joint_img': smpl_joint_img, 'orig_joint_cam': mpii_joint_cam, 'fit_joint_cam': smpl_joint_cam, 'pose_param': smpl_pose, 'shape_param': smpl_shape} + meta_info = {'orig_joint_valid': mpii_joint_valid, 'orig_joint_trunc': mpii_joint_trunc, 'fit_param_valid': smpl_param_valid, 'fit_joint_trunc': smpl_joint_trunc,'bbox': bbox, + 'is_valid_fit': float(is_valid_fit), 'is_3D': float(False)} + + return inputs, targets, meta_info + + diff --git a/data_processing/data/MSCOCO/MSCOCO.py b/data_processing/data/MSCOCO/MSCOCO.py new file mode 100644 index 0000000..7b7c42e --- /dev/null +++ b/data_processing/data/MSCOCO/MSCOCO.py @@ -0,0 +1,415 @@ +import os +import os.path as osp +import numpy as np +from config import cfg +import copy +import json +import scipy.io as sio +import cv2 +import random +import math +import torch +import transforms3d +from pycocotools.coco import COCO + +from utils.posefix import replace_joint_img +from utils.smpl import SMPL +from utils.preprocessing import load_img, process_bbox, augmentation, compute_iou,get_bbox +from utils.vis import vis_keypoints, vis_mesh, save_obj, vis_keypoints_with_skeleton, vis_bbox, render_mesh +from utils.transforms import world2cam, cam2pixel, pixel2cam, transform_joint_to_other_db + + + + +class MSCOCO(torch.utils.data.Dataset): + def __init__(self, transform, data_split): + self.transform = transform + self.data_split = 'train' if data_split == 'train' else 'val' + self.img_path = osp.join(cfg.data_dir, 'MSCOCO', 'images') + self.annot_path = osp.join(cfg.data_dir, 'MSCOCO', 'annotations') + self.rootnet_output_path = osp.join(cfg.data_dir, 'MSCOCO', 'rootnet_output', 'bbox_root_coco_output.json') + self.fitting_thr = 3.0 # pixel in cfg.output_hm_shape space + + # mscoco skeleton + self.coco_joint_num = 18 # original: 17, manually added pelvis + self.coco_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis') + self.coco_skeleton = ( (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 6), (11, 12) ) + self.coco_flip_pairs = ( (1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16) ) + self.coco_joint_regressor = np.load(osp.join(cfg.data_dir, 'MSCOCO', 'J_regressor_coco_hip_smpl.npy')) + + # smpl skeleton + self.smpl = SMPL() + self.face = self.smpl.face + self.joint_regressor = self.smpl.joint_regressor + self.vertex_num = self.smpl.vertex_num + self.joint_num = self.smpl.joint_num + self.joints_name = self.smpl.joints_name + self.flip_pairs = self.smpl.flip_pairs + self.skeleton = self.smpl.skeleton + self.root_joint_idx = self.smpl.root_joint_idx + self.face_kps_vertex = self.smpl.face_kps_vertex + + self.datalist = self.load_data() + print("coco data len: ", len(self.datalist)) + + def add_pelvis(self, joint_coord): + lhip_idx = self.coco_joints_name.index('L_Hip') + rhip_idx = self.coco_joints_name.index('R_Hip') + pelvis = (joint_coord[lhip_idx, :] + joint_coord[rhip_idx, :]) * 0.5 + pelvis[2] = joint_coord[lhip_idx,2] * joint_coord[rhip_idx,2] # joint_valid + pelvis = pelvis.reshape(1, 3) + joint_coord = np.concatenate((joint_coord, pelvis)) + return joint_coord + + def load_data(self): + db = COCO(osp.join(self.annot_path, 'person_keypoints_' + self.data_split + '2017.json')) + with open(osp.join(self.annot_path, 'MSCOCO_train_SMPL_NeuralAnnot.json')) as f: + # with open(osp.join(self.annot_path, 'coco_smplifyx_train.json')) as f: + smpl_params = json.load(f) + + datalist = [] + if self.data_split == 'train': + for iid in db.imgs.keys(): + aids = db.getAnnIds([iid]) + + tmplist = [] + for aid in aids: + ann = db.anns[aid] + img = db.loadImgs(ann['image_id'])[0] + imgname = osp.join('train2017', img['file_name']) + img_path = osp.join(self.img_path, imgname) + width, height = img['width'], img['height'] + + if ann['iscrowd'] or (ann['num_keypoints'] == 0): + continue + + # bbox + # tight_bbox = np.array(ann['bbox']) + # bbox = process_bbox(tight_bbox, width, height) + # if bbox is None: continue + + # joint coordinates + joint_img = np.array(ann['keypoints'], dtype=np.float32).reshape(-1, 3) + joint_img = self.add_pelvis(joint_img) + joint_valid = (joint_img[:, 2].copy().reshape(-1, 1) > 0).astype(np.float32) + joint_img[:, 2] = joint_valid[:, 0] # for posefix, only good for 2d datasets + + # bbox + if cfg.use_bbox_in_ann: + tight_bbox = np.array(ann['bbox']) + else: + tight_bbox = get_bbox(joint_img, np.ones_like(joint_img[:, 0]), crop_bottom_body=True) + # bbox = process_bbox(tight_bbox, width, height) + # if bbox is None: continue + + + if str(aid) in smpl_params: + smpl_param = smpl_params[str(aid)] + else: + smpl_param = None + + tmplist.append({ + 'img_path': img_path, + 'img_shape': (height, width), + #'bbox': bbox, + 'tight_bbox': tight_bbox, + 'joint_img': joint_img, + 'joint_valid': joint_valid, + 'smpl_param': smpl_param + }) + + for i, person in enumerate(tmplist): + tight_bbox = person['tight_bbox'] + + # for swap + num_overlap = 0 + near_joints = [] + other_persons = tmplist[:i] + tmplist[i + 1:] + for other in other_persons: + other_tight_bbox = other['tight_bbox'] + iou = compute_iou(tight_bbox[None, :], other_tight_bbox[None, :]) + if iou < 0.1: + continue + num_overlap += 1 + near_joints.append(other['joint_img']) + + person['num_overlap'] = num_overlap + person['near_joints'] = near_joints + + datalist.extend(tmplist) + + else: + for aid in db.anns.keys(): + ann = db.anns[aid] + img = db.loadImgs(ann['image_id'])[0] + imgname = osp.join('val2017', img['file_name']) + img_path = osp.join(self.img_path, imgname) + width, height = img['width'], img['height'] + + if ann['iscrowd'] or (ann['num_keypoints'] == 0): + continue + + # bbox + tight_bbox = np.array(ann['bbox']) + bbox = process_bbox(tight_bbox, width, height) + if bbox is None: continue + + # joint coordinates + joint_img = np.array(ann['keypoints'], dtype=np.float32).reshape(-1, 3) + joint_img = self.add_pelvis(joint_img) + joint_valid = (joint_img[:, 2].copy().reshape(-1, 1) > 0).astype(np.float32) + joint_img[:, 2] = joint_valid[:, 0] # for posefix, only good for 2d datasets + + smpl_param = None + + datalist.append({'img_path': img_path, 'img_shape': (height, width), 'bbox': bbox, 'tight_bbox': tight_bbox, 'joint_img': joint_img, 'joint_valid': joint_valid, 'smpl_param': smpl_param}) + + if len(datalist) > 100: + break + + return datalist + + def get_smpl_coord(self, smpl_param, cam_param, do_flip, img_shape): + pose, shape, trans = smpl_param['pose'], smpl_param['shape'], smpl_param['trans'] + smpl_pose = torch.FloatTensor(pose).view(1,-1); smpl_shape = torch.FloatTensor(shape).view(1,-1); # smpl parameters (pose: 72 dimension, shape: 10 dimension) + smpl_trans = torch.FloatTensor(trans).view(1,-1) # translation vector + + # flip smpl pose parameter (axis-angle) + if do_flip: + smpl_pose = smpl_pose.view(-1,3) + for pair in self.flip_pairs: + if pair[0] < len(smpl_pose) and pair[1] < len(smpl_pose): # face keypoints are already included in self.flip_pairs. However, they are not included in smpl_pose. + smpl_pose[pair[0], :], smpl_pose[pair[1], :] = smpl_pose[pair[1], :].clone(), smpl_pose[pair[0], :].clone() + smpl_pose[:,1:3] *= -1; # multiply -1 to y and z axis of axis-angle + smpl_pose = smpl_pose.view(1,-1) + + # get mesh and joint coordinates + smpl_mesh_coord, smpl_joint_coord = self.smpl.layer['neutral'](smpl_pose, smpl_shape, smpl_trans) + + # incorporate face keypoints + smpl_mesh_coord = smpl_mesh_coord.numpy().astype(np.float32).reshape(-1,3); + # smpl_joint_coord = smpl_joint_coord.numpy().astype(np.float32).reshape(-1,3) + # smpl_face_kps_coord = smpl_mesh_coord[self.face_kps_vertex,:].reshape(-1,3) + # smpl_joint_coord = np.concatenate((smpl_joint_coord, smpl_face_kps_coord)) + smpl_joint_coord = np.dot(self.joint_regressor, smpl_mesh_coord) + + # flip translation + if do_flip: # avg of old and new root joint should be image center. + focal, princpt = cam_param['focal'], cam_param['princpt'] + flip_trans_x = 2 * (((img_shape[1] - 1)/2. - princpt[0]) / focal[0] * (smpl_joint_coord[self.root_joint_idx,2])) - 2 * smpl_joint_coord[self.root_joint_idx][0] + smpl_mesh_coord[:,0] += flip_trans_x + smpl_joint_coord[:,0] += flip_trans_x + + # change to mean shape if beta is too far from it + smpl_shape[(smpl_shape.abs() > 3).any(dim=1)] = 0. + + return smpl_mesh_coord, smpl_joint_coord, smpl_pose[0].numpy(), smpl_shape[0].numpy() + + def get_fitting_error(self, coco_joint, smpl_mesh, cam_param, img2bb_trans, coco_joint_valid): + # get coco joint from smpl mesh + coco_from_smpl = np.dot(self.coco_joint_regressor, smpl_mesh) + coco_from_smpl = self.add_pelvis(coco_from_smpl) # z-axis component will be removed + coco_from_smpl = cam2pixel(coco_from_smpl, cam_param['focal'], cam_param['princpt']) + coco_from_smpl_xy1 = np.concatenate((coco_from_smpl[:,:2], np.ones_like(coco_from_smpl[:,0:1])),1) + coco_from_smpl[:,:2] = np.dot(img2bb_trans, coco_from_smpl_xy1.transpose(1,0)).transpose(1,0) + coco_from_smpl[:,0] = coco_from_smpl[:,0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + coco_from_smpl[:,1] = coco_from_smpl[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + + # mask joint coordinates + coco_joint = coco_joint[:,:2][np.tile(coco_joint_valid,(1,2))==1].reshape(-1,2) + coco_from_smpl = coco_from_smpl[:,:2][np.tile(coco_joint_valid,(1,2))==1].reshape(-1,2) + + error = np.sqrt(np.sum((coco_joint - coco_from_smpl)**2,1)).mean() + return error + + def __len__(self): + return len(self.datalist) + + def __getitem__(self, idx): + data = copy.deepcopy(self.datalist[idx]) + img_path, img_shape, tight_bbox = data['img_path'], data['img_shape'], data['tight_bbox'] + # check if image is full body + # self.coco_joint_num = 18 # original: 17, manually added pelvis + # self.coco_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis') + # data['joint_valid'].shape = (18, 1) + is_full_body = np.sum( + data['joint_valid'][11:17, :]) > 3 # 11:17 = L_Hip, R_Hip, L_Knee, R_Knee, L_Ankle, R_Ankle + + # image load and affine transform + img = load_img(img_path) + img, img2bb_trans, bb2img_trans, rot, do_flip,bbox = augmentation(img, tight_bbox, self.data_split,is_full_body = is_full_body) + img = self.transform(img.astype(np.float32))/255. + + if self.data_split == 'train': + # coco gt + coco_joint_img = data['joint_img'] + coco_joint_valid = data['joint_valid'] + if do_flip: + coco_joint_img[:,0] = img_shape[1] - 1 - coco_joint_img[:,0] + for pair in self.coco_flip_pairs: + coco_joint_img[pair[0],:], coco_joint_img[pair[1],:] = coco_joint_img[pair[1],:].copy(), coco_joint_img[pair[0],:].copy() + coco_joint_valid[pair[0],:], coco_joint_valid[pair[1],:] = coco_joint_valid[pair[1],:].copy(), coco_joint_valid[pair[0],:].copy() + + coco_joint_img_xy1 = np.concatenate((coco_joint_img[:,:2], np.ones_like(coco_joint_img[:,:1])),1) + coco_joint_img[:,:2] = np.dot(img2bb_trans, coco_joint_img_xy1.transpose(1,0)).transpose(1,0) + # for swap + if len(data['near_joints']) > 0: + near_joint_list = [] + for nj in data['near_joints']: + near_joint = np.ones((self.coco_joint_num, 3), dtype=np.float32) + nj_xy1 = np.concatenate((nj[:, :2], np.ones_like(nj[:, :1])), axis=1) + near_joint[:, :2] = np.dot(img2bb_trans, nj_xy1.transpose(1, 0)).transpose(1, 0) + near_joint_list.append(near_joint) + near_joints = np.asarray(near_joint_list, dtype=np.float32) + else: + near_joints = np.zeros((1, self.coco_joint_num, 3), dtype=np.float32) + + input_coco_joint_img = coco_joint_img.copy() + coco_joint_img[:,0] = coco_joint_img[:,0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + coco_joint_img[:,1] = coco_joint_img[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + + # backup for calculating fitting error + _coco_joint_img = coco_joint_img.copy() + _coco_joint_valid = coco_joint_valid.copy() + + # check truncation + coco_joint_trunc = coco_joint_valid * ((coco_joint_img[:,0] >= 0) * (coco_joint_img[:,0] < cfg.output_hm_shape[2]) * \ + (coco_joint_img[:,1] >= 0) * (coco_joint_img[:,1] < cfg.output_hm_shape[1])).reshape(-1,1).astype(np.float32) + + # transform coco joints to target db joints + coco_joint_img = transform_joint_to_other_db(coco_joint_img, self.coco_joints_name, self.joints_name) + coco_joint_cam = np.zeros((self.joint_num,3), dtype=np.float32) # dummy + coco_joint_valid = transform_joint_to_other_db(coco_joint_valid, self.coco_joints_name, self.joints_name) + coco_joint_trunc = transform_joint_to_other_db(coco_joint_trunc, self.coco_joints_name, self.joints_name) + + # apply PoseFix + input_coco_joint_img = replace_joint_img(input_coco_joint_img, data['tight_bbox'], near_joints, data['num_overlap'], img2bb_trans) + """ + # debug PoseFix result + newimg = vis_keypoints_with_skeleton(img.numpy().transpose(1, 2, 0), input_coco_joint_img.T, self.coco_skeleton) + cv2.imshow(f'{img_path}', newimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + import pdb; pdb.set_trace() + """ + input_coco_joint_img[:, 0] = input_coco_joint_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + input_coco_joint_img[:,1] = input_coco_joint_img[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + input_coco_joint_img = transform_joint_to_other_db(input_coco_joint_img, self.coco_joints_name, self.joints_name) + joint_mask = coco_joint_trunc + + smpl_param = data['smpl_param'] + if smpl_param is not None: + # use fitted mesh + smpl_param, cam_param = smpl_param['smpl_param'], smpl_param['cam_param'] + smpl_mesh_cam, smpl_joint_cam, smpl_pose, smpl_shape = self.get_smpl_coord(smpl_param, cam_param, do_flip, img_shape) + smpl_coord_cam = np.concatenate((smpl_mesh_cam, smpl_joint_cam)) + smpl_coord_img = cam2pixel(smpl_coord_cam, cam_param['focal'], cam_param['princpt']) + + # x,y affine transform, root-relative depth + smpl_coord_img_xy1 = np.concatenate((smpl_coord_img[:,:2], np.ones_like(smpl_coord_img[:,0:1])),1) + smpl_coord_img[:,:2] = np.dot(img2bb_trans, smpl_coord_img_xy1.transpose(1,0)).transpose(1,0)[:,:2] + smpl_coord_img[:,2] = smpl_coord_img[:,2] - smpl_coord_cam[self.vertex_num + self.root_joint_idx][2] + smpl_coord_img[:,0] = smpl_coord_img[:,0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + smpl_coord_img[:,1] = smpl_coord_img[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + smpl_coord_img[:,2] = (smpl_coord_img[:,2] / (cfg.bbox_3d_size / 2) + 1)/2. * cfg.output_hm_shape[0] + + # check truncation + smpl_trunc = ((smpl_coord_img[:,0] >= 0) * (smpl_coord_img[:,0] < cfg.output_hm_shape[2]) * \ + (smpl_coord_img[:,1] >= 0) * (smpl_coord_img[:,1] < cfg.output_hm_shape[1]) * \ + (smpl_coord_img[:,2] >= 0) * (smpl_coord_img[:,2] < cfg.output_hm_shape[0])).reshape(-1,1).astype(np.float32) + + # split mesh and joint coordinates + smpl_mesh_img = smpl_coord_img[:self.vertex_num]; smpl_joint_img = smpl_coord_img[self.vertex_num:]; + smpl_mesh_trunc = smpl_trunc[:self.vertex_num]; smpl_joint_trunc = smpl_trunc[self.vertex_num:]; + + # if fitted mesh is too far from h36m gt, discard it + is_valid_fit = True + # error = self.get_fitting_error(_coco_joint_img, smpl_mesh_cam, cam_param, img2bb_trans, _coco_joint_valid) + # if error > self.fitting_thr: + # is_valid_fit = False + + else: + smpl_joint_img = np.zeros((self.joint_num,3), dtype=np.float32) # dummy + smpl_joint_cam = np.zeros((self.joint_num,3), dtype=np.float32) # dummy + smpl_mesh_img = np.zeros((self.vertex_num,3), dtype=np.float32) # dummy + smpl_pose = np.zeros((72), dtype=np.float32) # dummy + smpl_shape = np.zeros((10), dtype=np.float32) # dummy + smpl_joint_trunc = np.zeros((self.joint_num,1), dtype=np.float32) + smpl_mesh_trunc = np.zeros((self.vertex_num,1), dtype=np.float32) + is_valid_fit = False + + # 3D data rotation augmentation + rot_aug_mat = np.array([[np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot)), 0], + [np.sin(np.deg2rad(-rot)), np.cos(np.deg2rad(-rot)), 0], + [0, 0, 1]], dtype=np.float32) + # parameter + smpl_pose = smpl_pose.reshape(-1,3) + root_pose = smpl_pose[self.root_joint_idx,:] + root_pose, _ = cv2.Rodrigues(root_pose) + root_pose, _ = cv2.Rodrigues(np.dot(rot_aug_mat,root_pose)) + smpl_pose[self.root_joint_idx] = root_pose.reshape(3) + smpl_pose = smpl_pose.reshape(-1) + # smpl coordinate + smpl_joint_cam = smpl_joint_cam - smpl_joint_cam[self.root_joint_idx,None] # root-relative + smpl_joint_cam = np.dot(rot_aug_mat, smpl_joint_cam.transpose(1,0)).transpose(1,0) + + # SMPL pose parameter validity + smpl_param_valid = np.ones((self.smpl.orig_joint_num, 3), dtype=np.float32) + for name in ('L_Ankle', 'R_Ankle', 'L_Toe', 'R_Toe', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand'): + smpl_param_valid[self.joints_name.index(name)] = 0 + smpl_param_valid = smpl_param_valid.reshape(-1) + + inputs = {'img': img, 'joints': input_coco_joint_img[:, :2], 'joints_mask': joint_mask} + targets = {'orig_joint_img': coco_joint_img, 'fit_joint_img': smpl_joint_img, 'orig_joint_cam': coco_joint_cam, 'fit_joint_cam': smpl_joint_cam, 'pose_param': smpl_pose, 'shape_param': smpl_shape} + meta_info = {'orig_joint_valid': coco_joint_valid, 'orig_joint_trunc': coco_joint_trunc, 'fit_param_valid': smpl_param_valid, 'fit_joint_trunc': smpl_joint_trunc, 'is_valid_fit': float(is_valid_fit), 'bbox': bbox, + 'is_3D': float(False)} + return inputs, targets, meta_info + else: + # coco gt + coco_joint_img = data['joint_img'] + coco_joint_valid = data['joint_valid'] + + coco_joint_img_xy1 = np.concatenate((coco_joint_img[:, :2], np.ones_like(coco_joint_img[:, :1])), 1) + coco_joint_img[:, :2] = np.dot(img2bb_trans, coco_joint_img_xy1.transpose(1, 0)).transpose(1, 0) + coco_joint_img[:, 0] = coco_joint_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + coco_joint_img[:, 1] = coco_joint_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + + # check truncation + coco_joint_trunc = coco_joint_valid * ((coco_joint_img[:, 0] >= 0) * (coco_joint_img[:, 0] < cfg.output_hm_shape[2]) * (coco_joint_img[:, 1] >= 0) * (coco_joint_img[:, 1] < cfg.output_hm_shape[1])).reshape( + -1, 1).astype(np.float32) + + coco_joint_img = transform_joint_to_other_db(coco_joint_img, self.coco_joints_name, self.joints_name) + coco_joint_trunc = transform_joint_to_other_db(coco_joint_trunc, self.coco_joints_name, self.joints_name) + + inputs = {'img': img, 'joints': coco_joint_img, 'joints_mask': coco_joint_trunc} + targets = {} + meta_info = {'bbox': bbox} + return inputs, targets, meta_info + + def evaluate(self, outs, cur_sample_idx): + + annots = self.datalist + sample_num = len(outs) + eval_result = {} + for n in range(sample_num): + annot = annots[cur_sample_idx + n] + out = outs[n] + + img = cv2.imread(annot['img_path']) + mesh_cam_render = out['mesh_cam_render'] + bbox = out['bbox'] + princpt = (bbox[0]+bbox[2]/2, bbox[1]+bbox[3]/2) + img = vis_bbox(img, bbox, alpha=1) + + rendered_img = render_mesh(img, mesh_cam_render, self.face, {'focal': cfg.focal, 'princpt': princpt}) + + cv2.imshow(annot['img_path'], rendered_img/255) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + + return eval_result + + def print_eval_result(self, eval_result): + pass diff --git a/data_processing/data/MuCo/MuCo.py b/data_processing/data/MuCo/MuCo.py new file mode 100644 index 0000000..cd1d479 --- /dev/null +++ b/data_processing/data/MuCo/MuCo.py @@ -0,0 +1,354 @@ +import os +import os.path as osp +import numpy as np +import torch +import cv2 +import random +import json +import math +import copy +import pickle +import transforms3d +from pycocotools.coco import COCO +from config import cfg +from utils.posefix import replace_joint_img +from utils.smpl import SMPL +from utils.preprocessing import load_img, get_bbox, process_bbox, generate_patch_image, augmentation, compute_iou +from utils.transforms import world2cam, cam2pixel, pixel2cam, rigid_align, transform_joint_to_other_db +from utils.vis import vis_keypoints, vis_mesh, save_obj, vis_keypoints_with_skeleton, vis_bbox +import transforms3d + + +class MuCo(torch.utils.data.Dataset): + def __init__(self, transform, data_split): + self.transform = transform + self.data_split = data_split + self.img_dir = osp.join(cfg.data_dir, 'MuCo', 'data') + self.annot_path = osp.join(cfg.data_dir, 'MuCo', 'data', 'MuCo-3DHP.json') + self.smpl_param_path = osp.join(cfg.data_dir, 'MuCo', 'data', 'smpl_param.json') + self.fitting_thr = 25 # milimeter + + # COCO joint set + self.coco_joint_num = 17 # original: 17 + self.coco_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle') + + # MuCo joint set + self.muco_joint_num = 21 + self.muco_joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head', 'R_Hand', 'L_Hand', 'R_Toe', 'L_Toe') + self.muco_flip_pairs = ( (2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13), (17, 18), (19, 20) ) + self.muco_skeleton = ( (0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (10, 19), (11, 12), (12, 13), (13, 20), (1, 2), (2, 3), (3, 4), (4, 17), (1, 5), (5, 6), (6, 7), (7, 18) ) + self.muco_root_joint_idx = self.muco_joints_name.index('Pelvis') + self.muco_coco_common_jidx = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13) + + # H36M joint set + self.h36m_joint_regressor = np.load(osp.join(cfg.data_dir, 'Human36M', 'J_regressor_h36m_correct.npy')) # use h36m joint regrssor (only use subset from original muco joint set) + self.h36m_flip_pairs = ( (1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13) ) + self.h36m_joints_name = ('Pelvis', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Torso', 'Neck', 'Nose', 'Head_top', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Shoulder', 'R_Elbow', 'R_Wrist') + self.h36m_root_joint_idx = self.h36m_joints_name.index('Pelvis') + + # SMPL joint set + self.smpl = SMPL() + self.face = self.smpl.face + self.joint_regressor = self.smpl.joint_regressor + self.vertex_num = self.smpl.vertex_num + self.joint_num = self.smpl.joint_num + self.joints_name = self.smpl.joints_name + self.flip_pairs = self.smpl.flip_pairs + self.skeleton = self.smpl.skeleton + self.root_joint_idx = self.smpl.root_joint_idx + self.face_kps_vertex = self.smpl.face_kps_vertex + + self.datalist = self.load_data() + print("muco data len: ", len(self.datalist)) + + def load_data(self): + if self.data_split == 'train': + db = COCO(self.annot_path) + with open(self.smpl_param_path) as f: + smpl_params = json.load(f) + else: + print('Unknown data subset') + assert 0 + + datalist = [] + for iid in db.imgs.keys(): + img = db.imgs[iid] + img_id = img["id"] + img_width, img_height = img['width'], img['height'] + imgname = img['file_name'] + img_path = osp.join(self.img_dir, imgname) + focal = img["f"] + princpt = img["c"] + cam_param = {'focal': focal, 'princpt': princpt} + + # crop the closest person to the camera + ann_ids = db.getAnnIds(img_id) + anns = db.loadAnns(ann_ids) + + root_depths = [ann['keypoints_cam'][self.muco_root_joint_idx][2] for ann in anns] + closest_pid = root_depths.index(min(root_depths)) + pid_list = [closest_pid] + for pid in pid_list: + joint_cam = np.array(anns[pid]['keypoints_cam']) + joint_img = np.array(anns[pid]['keypoints_img']) + joint_img = np.concatenate([joint_img, joint_cam[:,2:]],1) + joint_valid = np.ones((self.muco_joint_num,1)) + + if cfg.use_bbox_in_ann: + tight_bbox = np.array(anns[pid]['bbox']) + else: + tight_bbox = get_bbox(joint_img, np.ones_like(joint_img[:, 0]), crop_bottom_body=True) + + # for swap + num_overlap = 0 + near_joints = [] + other_persons = anns[:pid] + anns[pid+1:] + for other in other_persons: + other_tight_bbox = np.array(other['bbox']) + iou = compute_iou(tight_bbox[None, :], other_tight_bbox[None, :]) + if iou < 0.1: + continue + num_overlap += 1 + other_joint = np.array(other['keypoints_img']) + other_joint = np.concatenate((other_joint, np.ones_like(other_joint[:, :1])), axis=1) + other_joint = transform_joint_to_other_db(other_joint, self.muco_joints_name, self.coco_joints_name) + near_joints.append(other_joint) + if num_overlap == 0: + near_joints = [] + + # bbox = process_bbox(tight_bbox, img_width, img_height) + # if bbox is None: continue + + # check smpl parameter exist + try: + smpl_param = smpl_params[str(ann_ids[pid])] + except KeyError: + smpl_param = None + + datalist.append({ + 'img_path': img_path, + 'img_shape': (img_height, img_width), + #'bbox': bbox, + 'tight_bbox': tight_bbox, + 'joint_img': joint_img, + 'joint_cam': joint_cam, + 'joint_valid': joint_valid, + 'cam_param': cam_param, + 'smpl_param': smpl_param, + 'near_joints': near_joints, + 'num_overlap': num_overlap + }) + + return datalist + + def get_smpl_coord(self, smpl_param, cam_param, do_flip, img_shape): + pose, shape, trans = smpl_param['pose'], smpl_param['shape'], smpl_param['trans'] + smpl_pose = torch.FloatTensor(pose).view(1,-1); smpl_shape = torch.FloatTensor(shape).view(1,-1); # smpl parameters (pose: 72 dimension, shape: 10 dimension) + smpl_trans = torch.FloatTensor(trans).view(1,3) # translation vector from smpl coordinate to muco world coordinate + + # flip smpl pose parameter (axis-angle) + if do_flip: + smpl_pose = smpl_pose.view(-1,3) + for pair in self.flip_pairs: + if pair[0] < len(smpl_pose) and pair[1] < len(smpl_pose): # face keypoints are already included in self.flip_pairs. However, they are not included in smpl_pose. + smpl_pose[pair[0], :], smpl_pose[pair[1], :] = smpl_pose[pair[1], :].clone(), smpl_pose[pair[0], :].clone() + smpl_pose[:,1:3] *= -1; # multiply -1 to y and z axis of axis-angle + smpl_pose = smpl_pose.view(1,-1) + + # get mesh and joint coordinates + smpl_mesh_coord, smpl_joint_coord = self.smpl.layer['neutral'](smpl_pose, smpl_shape, smpl_trans) + + # incorporate face keypoints + smpl_mesh_coord = smpl_mesh_coord.numpy().astype(np.float32).reshape(-1,3); + # smpl_joint_coord = smpl_joint_coord.numpy().astype(np.float32).reshape(-1,3) + # smpl_face_kps_coord = smpl_mesh_coord[self.face_kps_vertex,:].reshape(-1,3) + # smpl_joint_coord = np.concatenate((smpl_joint_coord, smpl_face_kps_coord)) + smpl_joint_coord = np.dot(self.joint_regressor, smpl_mesh_coord) + + # flip translation + if do_flip: # avg of old and new root joint should be image center. + focal, princpt = cam_param['focal'], cam_param['princpt'] + flip_trans_x = 2 * (((img_shape[1] - 1)/2. - princpt[0]) / focal[0] * (smpl_joint_coord[self.root_joint_idx,2] * 1000)) / 1000 - 2 * smpl_joint_coord[self.root_joint_idx][0] + smpl_mesh_coord[:,0] += flip_trans_x + smpl_joint_coord[:,0] += flip_trans_x + + # change to mean shape if beta is too far from it + smpl_shape[(smpl_shape.abs() > 3).any(dim=1)] = 0. + + # meter -> milimeter + smpl_mesh_coord *= 1000; smpl_joint_coord *= 1000; + return smpl_mesh_coord, smpl_joint_coord, smpl_pose[0].numpy(), smpl_shape[0].numpy() + + def get_fitting_error(self, muco_joint, smpl_mesh, do_flip): + muco_joint = muco_joint.copy() + muco_joint = muco_joint - muco_joint[self.muco_root_joint_idx,None,:] # root-relative + if do_flip: + muco_joint[:,0] = -muco_joint[:,0] + for pair in self.muco_flip_pairs: + muco_joint[pair[0],:] , muco_joint[pair[1],:] = muco_joint[pair[1],:].copy(), muco_joint[pair[0],:].copy() + muco_joint_valid = np.ones((self.muco_joint_num,3), dtype=np.float32) + + # transform to h36m joint set + h36m_joint = transform_joint_to_other_db(muco_joint, self.muco_joints_name, self.h36m_joints_name) + h36m_joint_valid = transform_joint_to_other_db(muco_joint_valid, self.muco_joints_name, self.h36m_joints_name) + h36m_joint = h36m_joint[h36m_joint_valid==1].reshape(-1,3) + + h36m_from_smpl = np.dot(self.h36m_joint_regressor, smpl_mesh) + h36m_from_smpl = h36m_from_smpl[h36m_joint_valid==1].reshape(-1,3) + h36m_from_smpl = h36m_from_smpl - np.mean(h36m_from_smpl,0)[None,:] + np.mean(h36m_joint,0)[None,:] # translation alignment + error = np.sqrt(np.sum((h36m_joint - h36m_from_smpl)**2,1)).mean() + return error + + def __len__(self): + return len(self.datalist) + + def __getitem__(self, idx): + data = copy.deepcopy(self.datalist[idx]) + img_path, img_shape, tight_bbox, smpl_param, cam_param = data['img_path'], data['img_shape'], data['tight_bbox'], data['smpl_param'], data['cam_param'] + + # img + img = load_img(img_path) + img, img2bb_trans, bb2img_trans, rot, do_flip,bbox = augmentation(img, tight_bbox, self.data_split,is_full_body = True) # always full body + img = self.transform(img.astype(np.float32))/255. + + # muco gt + muco_joint_img = data['joint_img'] + muco_joint_cam = data['joint_cam'] + muco_joint_cam = muco_joint_cam - muco_joint_cam[self.muco_root_joint_idx,None,:] # root-relative + muco_joint_valid = data['joint_valid'] + if do_flip: + muco_joint_img[:,0] = img_shape[1] - 1 - muco_joint_img[:,0] + muco_joint_cam[:,0] = -muco_joint_cam[:,0] + for pair in self.muco_flip_pairs: + muco_joint_img[pair[0],:], muco_joint_img[pair[1],:] = muco_joint_img[pair[1],:].copy(), muco_joint_img[pair[0],:].copy() + muco_joint_cam[pair[0],:], muco_joint_cam[pair[1],:] = muco_joint_cam[pair[1],:].copy(), muco_joint_cam[pair[0],:].copy() + muco_joint_valid[pair[0],:], muco_joint_valid[pair[1],:] = muco_joint_valid[pair[1],:].copy(), muco_joint_valid[pair[0],:].copy() + + muco_joint_img_xy1 = np.concatenate((muco_joint_img[:,:2], np.ones_like(muco_joint_img[:,:1])),1) + muco_joint_img[:,:2] = np.dot(img2bb_trans, muco_joint_img_xy1.transpose(1,0)).transpose(1,0) + # for swap + if len(data['near_joints']) > 0: + near_joint_list = [] + for nj in data['near_joints']: + near_joint = np.ones((self.coco_joint_num, 3), dtype=np.float32) + nj_xy1 = np.concatenate((nj[:, :2], np.ones_like(nj[:, :1])), axis=1) + near_joint[:, :2] = np.dot(img2bb_trans, nj_xy1.transpose(1,0)).transpose(1,0) + near_joint_list.append(near_joint) + near_joints = np.asarray(near_joint_list, dtype=np.float32) + else: + near_joints = np.zeros((1, self.coco_joint_num, 3), dtype=np.float32) + + input_muco_joint_img = muco_joint_img.copy() + muco_joint_img[:,0] = muco_joint_img[:,0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + muco_joint_img[:,1] = muco_joint_img[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + muco_joint_img[:,2] = muco_joint_img[:,2] - muco_joint_img[self.muco_root_joint_idx][2] # root-relative + muco_joint_img[:,2] = (muco_joint_img[:,2] / (cfg.bbox_3d_size * 1000 / 2) + 1)/2. * cfg.output_hm_shape[0] # change cfg.bbox_3d_size from meter to milimeter + + # check truncation + muco_joint_trunc = muco_joint_valid * ((muco_joint_img[:,0] >= 0) * (muco_joint_img[:,0] < cfg.output_hm_shape[2]) * \ + (muco_joint_img[:,1] >= 0) * (muco_joint_img[:,1] < cfg.output_hm_shape[1]) * \ + (muco_joint_img[:,2] >= 0) * (muco_joint_img[:,2] < cfg.output_hm_shape[0])).reshape(-1,1).astype(np.float32) + + # transform muco joints to target db joints + muco_joint_img = transform_joint_to_other_db(muco_joint_img, self.muco_joints_name, self.joints_name) + muco_joint_cam = transform_joint_to_other_db(muco_joint_cam, self.muco_joints_name, self.joints_name) + muco_joint_valid = transform_joint_to_other_db(muco_joint_valid, self.muco_joints_name, self.joints_name) + muco_joint_trunc = transform_joint_to_other_db(muco_joint_trunc, self.muco_joints_name, self.joints_name) + + # apply PoseFix + input_muco_joint_img[:, 2] = 1 # joint valid + tmp_joint_img = transform_joint_to_other_db(input_muco_joint_img, self.muco_joints_name, self.coco_joints_name) + tmp_joint_img = replace_joint_img(tmp_joint_img, data['tight_bbox'], near_joints, data['num_overlap'], img2bb_trans) + tmp_joint_img = transform_joint_to_other_db(tmp_joint_img, self.coco_joints_name, self.muco_joints_name) + input_muco_joint_img[self.muco_coco_common_jidx, :2] = tmp_joint_img[self.muco_coco_common_jidx, :2] + """ + # debug PoseFix result + newimg = vis_keypoints_with_skeleton(img.numpy().transpose(1, 2, 0), input_muco_joint_img.T, self.muco_skeleton) + cv2.imshow(f'{img_path}', newimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + """ + input_muco_joint_img[:, 0] = input_muco_joint_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + input_muco_joint_img[:, 1] = input_muco_joint_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + input_muco_joint_img = transform_joint_to_other_db(input_muco_joint_img, self.muco_joints_name, self.joints_name) + + if smpl_param is not None: + # smpl coordinates + smpl_mesh_cam, smpl_joint_cam, smpl_pose, smpl_shape = self.get_smpl_coord(smpl_param, cam_param, do_flip, img_shape) + smpl_coord_cam = np.concatenate((smpl_mesh_cam, smpl_joint_cam)) + focal, princpt = cam_param['focal'], cam_param['princpt'] + smpl_coord_img = cam2pixel(smpl_coord_cam, focal, princpt) + + # affine transform x,y coordinates. root-relative depth + smpl_coord_img_xy1 = np.concatenate((smpl_coord_img[:,:2], np.ones_like(smpl_coord_img[:,:1])),1) + smpl_coord_img[:,:2] = np.dot(img2bb_trans, smpl_coord_img_xy1.transpose(1,0)).transpose(1,0)[:,:2] + """ + # vis smpl + newimg = vis_keypoints_with_skeleton(img.numpy().transpose(1, 2, 0), smpl_coord_img[6890:].T, self.skeleton) + cv2.imshow(f'{img_path}', newimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + """ + smpl_coord_img[:,2] = smpl_coord_img[:,2] - smpl_coord_cam[self.vertex_num + self.root_joint_idx][2] + smpl_coord_img[:,0] = smpl_coord_img[:,0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + smpl_coord_img[:,1] = smpl_coord_img[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + smpl_coord_img[:,2] = (smpl_coord_img[:,2] / (cfg.bbox_3d_size * 1000 / 2) + 1)/2. * cfg.output_hm_shape[0] # change cfg.bbox_3d_size from meter to milimeter + + # check truncation + smpl_trunc = ((smpl_coord_img[:,0] >= 0) * (smpl_coord_img[:,0] < cfg.output_hm_shape[2]) * \ + (smpl_coord_img[:,1] >= 0) * (smpl_coord_img[:,1] < cfg.output_hm_shape[1]) * \ + (smpl_coord_img[:,2] >= 0) * (smpl_coord_img[:,2] < cfg.output_hm_shape[0])).reshape(-1,1).astype(np.float32) + + # split mesh and joint coordinates + smpl_mesh_img = smpl_coord_img[:self.vertex_num]; smpl_joint_img = smpl_coord_img[self.vertex_num:]; + smpl_mesh_trunc = smpl_trunc[:self.vertex_num]; smpl_joint_trunc = smpl_trunc[self.vertex_num:]; + + # if fitted mesh is too far from muco gt, discard it + is_valid_fit = True + error = self.get_fitting_error(data['joint_cam'], smpl_mesh_cam, do_flip) + if error > self.fitting_thr: + is_valid_fit = False + + else: + smpl_joint_img = np.zeros((self.joint_num,3), dtype=np.float32) # dummy + smpl_joint_cam = np.zeros((self.joint_num,3), dtype=np.float32) # dummy + smpl_mesh_img = np.zeros((self.vertex_num,3), dtype=np.float32) # dummy + smpl_pose = np.zeros((72), dtype=np.float32) # dummy + smpl_shape = np.zeros((10), dtype=np.float32) # dummy + smpl_joint_trunc = np.zeros((self.joint_num,1), dtype=np.float32) # dummy + smpl_mesh_trunc = np.zeros((self.vertex_num,1), dtype=np.float32) # dummy + is_valid_fit = False + + # 3D data rotation augmentation + rot_aug_mat = np.array([[np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot)), 0], + [np.sin(np.deg2rad(-rot)), np.cos(np.deg2rad(-rot)), 0], + [0, 0, 1]], dtype=np.float32) + # muco coordinate + muco_joint_cam = np.dot(rot_aug_mat, muco_joint_cam.transpose(1,0)).transpose(1,0) / 1000 # milimeter to meter + # parameter + smpl_pose = smpl_pose.reshape(-1,3) + root_pose = smpl_pose[self.root_joint_idx,:] + root_pose, _ = cv2.Rodrigues(root_pose) + root_pose, _ = cv2.Rodrigues(np.dot(rot_aug_mat,root_pose)) + smpl_pose[self.root_joint_idx] = root_pose.reshape(3) + smpl_pose = smpl_pose.reshape(-1) + # smpl coordinate + smpl_joint_cam = smpl_joint_cam - smpl_joint_cam[self.root_joint_idx,None] # root-relative + smpl_joint_cam = np.dot(rot_aug_mat, smpl_joint_cam.transpose(1,0)).transpose(1,0) / 1000 # milimeter to meter + + # SMPL pose parameter validity + smpl_param_valid = np.ones((self.smpl.orig_joint_num, 3), dtype=np.float32) + for name in ('L_Ankle', 'R_Ankle', 'L_Toe', 'R_Toe', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand'): + smpl_param_valid[self.joints_name.index(name)] = 0 + smpl_param_valid = smpl_param_valid.reshape(-1) + + inputs = {'img': img, 'joints': input_muco_joint_img[:, :2], 'joints_mask': muco_joint_trunc} + targets = {'orig_joint_img': muco_joint_img, 'fit_joint_img': smpl_joint_img, 'orig_joint_cam': muco_joint_cam, 'fit_joint_cam': smpl_joint_cam, 'pose_param': smpl_pose, 'shape_param': smpl_shape} + meta_info = {'orig_joint_valid': muco_joint_valid, 'orig_joint_trunc': muco_joint_trunc, 'fit_param_valid': smpl_param_valid, 'fit_joint_trunc': smpl_joint_trunc, 'is_valid_fit': float(is_valid_fit), 'bbox': bbox, + 'is_3D': float(True)} + + return inputs, targets, meta_info + + diff --git a/data_processing/data/MuPoTs/MuPoTs.py b/data_processing/data/MuPoTs/MuPoTs.py new file mode 100644 index 0000000..0ef7943 --- /dev/null +++ b/data_processing/data/MuPoTs/MuPoTs.py @@ -0,0 +1,311 @@ +import torch +import copy +import os +import os.path as osp +import scipy.io as sio +import numpy as np +from pycocotools.coco import COCO +from config import cfg +import json +import cv2 +import random +import math + +from utils.smpl import SMPL +from utils.transforms import pixel2cam, transform_joint_to_other_db, cam2pixel +from utils.preprocessing import load_img, augmentation, process_bbox, get_bbox +from utils.vis import vis_keypoints, vis_3d_skeleton, vis_keypoints_with_skeleton + + +class MuPoTs(torch.utils.data.Dataset): + def __init__(self, transform, data_split): + self.transform = transform + self.data_split = data_split + self.img_dir = osp.join(cfg.data_dir, 'MuPoTs', 'data', 'MultiPersonTestSet') + self.test_annot_path = osp.join(cfg.data_dir, 'MuPoTs', 'data', 'MuPoTS-3D.json') + self.hhrnet_result_path = osp.join(cfg.data_dir, 'MuPoTs', 'data', 'MuPoTs_test_hhrnet_result.json') + self.hhrnet_thr = 0.1 + self.openpose_result_path = osp.join(cfg.data_dir, 'MuPoTs', 'data', 'MuPoTs_test_openpose_result.json') + self.openpose_thr = 0.05 + + # SMPL joint set + self.smpl = SMPL() + self.face = self.smpl.face + self.joint_regressor = self.smpl.joint_regressor + self.vertex_num = self.smpl.vertex_num + self.joint_num = self.smpl.joint_num + self.joints_name = self.smpl.joints_name + self.skeleton = self.smpl.skeleton + self.root_joint_idx = self.smpl.root_joint_idx + self.face_kps_vertex = self.smpl.face_kps_vertex + + # MuCo-3DHP + self.muco_joint_num = 21 + self.muco_joints_name = ( + 'Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', + 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head', 'R_Hand', 'L_Hand', 'R_Toe', + 'L_Toe') + + # MuPoTS + self.mupots_joint_num = 17 + self.mupots_joints_name = ( + 'Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', + 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head') # + self.mupots_flip_pairs = ((2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13)) + self.mupots_skeleton = ( + (0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (11, 12), (12, 13), (1, 2), (2, 3), + (3, 4), (1, 5), (5, 6), (6, 7)) + self.mupots_eval_joint = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + self.mupots_root_idx = self.mupots_joints_name.index('Pelvis') + + # H36M joint set + # Spine Thorax, Head + self.h36m_joint_num = 17 + self.h36m_joints_name = ('Pelvis', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Spine', 'Thorax', 'Head', 'Head_top', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Shoulder', 'R_Elbow', 'R_Wrist') + self.h36m_flip_pairs = ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13)) + self.h36m_skeleton = ((0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15), (15, 16), (0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6)) + self.h36m_root_joint_idx = self.h36m_joints_name.index('Pelvis') + self.h36m_eval_joint = (1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16) + # self.h36m_joint_regressor = np.load(osp.join('..', 'data', 'Human36M', 'J_regressor_h36m_from_pav.npy')) #'J_regressor_h36m_correct.npy')) + # self.h36m_pav_joints_name = ('Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'R_Hip', 'R_Knee', 'R_Ankle', 'Spine', 'Thorax', 'Head', 'Head_top', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Shoulder', 'R_Elbow', 'R_Wrist') + # self.h36m_joint_regressor = transform_joint_to_other_db(self.h36m_joint_regressor, self.h36m_pav_joints_name, self.h36m_joints_name) + + # MPI-INF-3DHP joint set + self.mpii3d_joint_num = 17 + self.mpii3d_joints_name = ( + 'Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', + 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', + 'Head') + self.mpii3d_flip_pairs = ((2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13)) + self.mpii3d_smpl_regressor = np.load(osp.join(cfg.data_dir, 'MPI_INF_3DHP', 'J_regressor_mi_smpl.npy'))[:17] + self.mpii3d_root_idx = self.mpii3d_joints_name.index('Pelvis') + + # MSCOCO joint set + self.coco_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') + # OpenPose joint set + self.openpose_joints_name = ('Nose', 'Neck', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'R_Eye', 'L_Eye', 'R_Ear', 'L_Ear', 'Pelvis') + + self.datalist = self.load_data() + print('mupots data len: ', len(self.datalist)) + + def add_pelvis(self, joint_coord, joints_name): + lhip_idx = joints_name.index('L_Hip') + rhip_idx = joints_name.index('R_Hip') + pelvis = (joint_coord[lhip_idx, :] + joint_coord[rhip_idx, :]) * 0.5 + pelvis[2] = joint_coord[lhip_idx,2] * joint_coord[rhip_idx,2] # confidence for openpose + pelvis = pelvis.reshape(1, 3) + + joint_coord = np.concatenate((joint_coord, pelvis))#, neck)) + + return joint_coord + + def add_neck(self, joint_coord, joints_name): + lshoulder_idx = joints_name.index('L_Shoulder') + rshoulder_idx = joints_name.index('R_Shoulder') + neck = (joint_coord[lshoulder_idx, :] + joint_coord[rshoulder_idx, :]) * 0.5 + neck[2] = joint_coord[lshoulder_idx, 2] * joint_coord[rshoulder_idx, 2] + neck = neck.reshape(1,3) + + joint_coord = np.concatenate((joint_coord, neck)) + + return joint_coord + + def load_data(self): + if self.data_split != 'test': + print('Unknown data subset') + assert 0 + + with open(self.hhrnet_result_path) as f: + hhrnet_result = json.load(f) + with open(self.openpose_result_path) as f: + openpose_result = json.load(f) + + data = [] + db = COCO(self.test_annot_path) + + count_dummy = 0 + # use gt bbox and root + print("Get bounding box and root from groundtruth") + for aid in db.anns.keys(): + ann = db.anns[aid] + if ann['is_valid'] == 0: + continue + + image_id = ann['image_id'] + img = db.loadImgs(image_id)[0] + img_path = osp.join(self.img_dir, img['file_name']) + fx, fy, cx, cy = img['intrinsic'] + f = np.array([fx, fy]); + c = np.array([cx, cy]); + + joint_cam = np.array(ann['keypoints_cam']) + root_cam = joint_cam[self.mupots_root_idx] + + joint_img = np.array(ann['keypoints_img']) + joint_img = np.concatenate([joint_img, joint_cam[:, 2:]], 1) + joint_img[:, 2] = joint_img[:, 2] - root_cam[2] + joint_valid = np.ones((self.mupots_joint_num, 1)) + + hhrnetpose = np.array(hhrnet_result[str(aid)]['coco_joints']) + hhrnetpose = self.add_pelvis(hhrnetpose, self.coco_joints_name) + hhrnetpose = self.add_neck(hhrnetpose, self.coco_joints_name) + + openpose = np.array(openpose_result[str(aid)]['coco_joints']) + openpose = self.add_pelvis(openpose, self.openpose_joints_name) + + if openpose.sum() == 0: + count_dummy += 1 + bbox = np.array(ann['bbox']) + img_width, img_height = img['width'], img['height'] + # bbox = process_bbox(bbox, img_width, img_height) + # if bbox is None: continue + + data.append({ + 'img_path': img_path, + 'img_shape': (img_height, img_width), + 'bbox': bbox, + 'tight_bbox': np.array(ann['bbox']), + 'joint_img': joint_img, # [org_img_x, org_img_y, depth - root_depth] + 'joint_cam': joint_cam, # [X, Y, Z] in camera coordinate + 'joint_valid': joint_valid, + 'root_cam': root_cam, # [X, Y, Z] in camera coordinate + 'f': f, + 'c': c, + 'hhrnetpose': hhrnetpose, + 'openpose': openpose + }) + + print("dummy predictions: ", count_dummy) + return data + + def __len__(self): + return len(self.datalist) + + def __getitem__(self, idx): + data = copy.deepcopy(self.datalist[idx]) + img_path = data['img_path'] + + input_joint_name = 'openpose' + if input_joint_name == 'gt': + joint_coord_img = data['joint_img'] + joint_coord_img[:, 2] = 1 + joint_valid = data['joint_valid'] + joint_coord_img = transform_joint_to_other_db(joint_coord_img, self.mupots_joints_name, self.joints_name) + joint_valid = transform_joint_to_other_db(joint_valid, self.mupots_joints_name, self.joints_name) + elif input_joint_name == 'hhrnet': + joint_coord_img = data['hhrnetpose'] + joint_valid = (joint_coord_img[:, 2:] > self.hhrnet_thr) + joint_coord_img = transform_joint_to_other_db(joint_coord_img, self.coco_joints_name, self.joints_name) + joint_valid = transform_joint_to_other_db(joint_valid, self.coco_joints_name, self.joints_name) + elif input_joint_name == 'openpose': + joint_coord_img = data['openpose'] + joint_valid = (joint_coord_img[:, 2:] > self.openpose_thr) + joint_coord_img = transform_joint_to_other_db(joint_coord_img, self.openpose_joints_name, self.joints_name) + joint_valid = transform_joint_to_other_db(joint_valid, self.openpose_joints_name, self.joints_name) + + # get bbox from joints + try: + bbox = get_bbox(joint_coord_img, joint_valid[:, 0]) + except: # in case of perfect occlusion + bbox = data['bbox'] + img_height, img_width = data['img_shape'] + bbox = process_bbox(bbox.copy(), img_width, img_height, is_3dpw_test=True) + + # img + img = load_img(img_path) + img, img2bb_trans, bb2img_trans, _, _, _ = augmentation(img, bbox, self.data_split) + img = self.transform(img.astype(np.float32)) / 255. + + """ + # debug + img = cv2.imread(img_path) + input_img = vis_keypoints_with_skeleton(img, joint_coord_img.T, self.skeleton, kp_thresh=0.1, alpha=1, kps_scores=joint_coord_img[:, 2:].round(3)) + cv2.imshow('mupots', input_img) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + # import pdb; + # pdb.set_trace() + """ + + # x,y affine transform, root-relative depth + joint_coord_img_xy1 = np.concatenate((joint_coord_img[:, :2], np.ones_like(joint_coord_img[:, 0:1])), 1) + joint_coord_img[:, :2] = np.dot(img2bb_trans, joint_coord_img_xy1.transpose(1, 0)).transpose(1, 0)[:, :2] + joint_coord_img[:, 0] = joint_coord_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + joint_coord_img[:, 1] = joint_coord_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + + # check truncation + joints_mask = joint_valid * ( + (joint_coord_img[:, 0] >= 0) * (joint_coord_img[:, 0] < cfg.output_hm_shape[2]) * (joint_coord_img[:, 1] >= 0) * (joint_coord_img[:, 1] < cfg.output_hm_shape[1])).reshape(-1, 1).astype(np.float32) + + inputs = {'img': img, 'joints': joint_coord_img, 'joints_mask': joints_mask} + targets = {} + meta_info = {'bbox': bbox, + 'bb2img_trans': bb2img_trans, 'img2bb_trans': img2bb_trans} + return inputs, targets, meta_info + + def evaluate(self, outs, cur_sample_idx): + gts = self.datalist + sample_num = len(outs) + joint_num = self.mupots_joint_num + + pred_2d_save = {} + pred_3d_save = {} + for n in range(sample_num): + gt = gts[cur_sample_idx+n] + f = gt['f'] + c = gt['c'] + gt_3d_root = gt['root_cam'] + img_name = gt['img_path'].split('/') + img_name = img_name[-2] + '_' + img_name[-1].split('.')[0] # e.g., TS1_img_0001 + + # h36m joint from output mesh + out = outs[n] + mesh_out_cam = out['smpl_mesh_cam'] * 1000 + pred = np.dot(self.mpii3d_smpl_regressor, mesh_out_cam) + pred = pred - pred[self.mpii3d_root_idx, None] # root-relative + pred_3d_kpt = transform_joint_to_other_db(pred, self.mpii3d_joints_name, self.mupots_joints_name) + pred_3d_kpt += gt_3d_root + + pred_3d_save.setdefault(img_name + '_3d', []).append(pred_3d_kpt) + + pred_2d_kpt = cam2pixel(pred_3d_kpt, f, c) + pred_2d_save.setdefault(img_name + '_2d', []).append(pred_2d_kpt[:, :2]) + + vis = False + if vis: + cvimg = cv2.imread(gt['img_path'], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) + filename = str(random.randrange(1, 500)) + + pred_2d_kpt[:, 2] = 1 + # tmpimg = vis_keypoints(cvimg, pred_2d_kpt, alpha=1) + tmpimg = vis_keypoints_with_skeleton(cvimg, pred_2d_kpt.T, self.mupots_skeleton, kp_thresh=0.1, alpha=1) + # cv2.imwrite(filename + '_output.jpg', tmpimg) + cv2.imshow('mupots', tmpimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + import pdb; pdb.set_trace() + + eval_result = {**pred_2d_save, **pred_3d_save} + return eval_result + + def print_eval_result(self, eval_result): + pred_2d_save = {} + pred_3d_save = {} + + for k, v in eval_result.items(): + if '2d' in k: + key = k.split('_2d')[0] + pred_2d_save[key] = v + elif '3d' in k: + key = k.split('_3d')[0] + pred_3d_save[key] = v + + result_dir = osp.join(cfg.result_dir, 'MuPoTs') + output_path = osp.join(result_dir, 'preds_2d_kpt_mupots.mat') + sio.savemat(output_path, pred_2d_save) + print("Testing result is saved at " + output_path) + output_path = osp.join(result_dir, 'preds_3d_kpt_mupots.mat') + sio.savemat(output_path, pred_3d_save) + print("Testing result is saved at " + output_path) \ No newline at end of file diff --git a/data_processing/data/PW3D/PW3D.py b/data_processing/data/PW3D/PW3D.py new file mode 100644 index 0000000..76f4375 --- /dev/null +++ b/data_processing/data/PW3D/PW3D.py @@ -0,0 +1,373 @@ +import os +import os.path as osp +import numpy as np +import torch +import cv2 +import random +import json +import math +import copy +import transforms3d +from pycocotools.coco import COCO +from config import cfg +from utils.renderer import Renderer +from utils.smpl import SMPL +from utils.preprocessing import load_img, get_bbox, process_bbox, generate_patch_image, augmentation +from utils.transforms import cam2pixel, pixel2cam, rigid_align, transform_joint_to_other_db, denorm_joints, convert_crop_cam_to_orig_img +from utils.vis import vis_keypoints, vis_mesh, save_obj, vis_keypoints_with_skeleton, vis_bbox, render_mesh + + +class PW3D(torch.utils.data.Dataset): + def __init__(self, transform, data_split): + self.transform = transform + self.data_split ='validation' if cfg.crowd else 'test' # data_split + self.data_path = osp.join(cfg.data_dir, 'PW3D', 'data') + self.human_bbox_root_dir = osp.join(cfg.data_dir, 'PW3D', 'rootnet_output', 'bbox_root_pw3d_output.json') + + # SMPL joint set + self.smpl = SMPL() + self.face = self.smpl.face + self.joint_regressor = self.smpl.joint_regressor + self.vertex_num = self.smpl.vertex_num + self.joint_num = self.smpl.joint_num + self.joints_name = self.smpl.joints_name + self.skeleton = self.smpl.skeleton + self.root_joint_idx = self.smpl.root_joint_idx + self.face_kps_vertex = self.smpl.face_kps_vertex + + # H36M joint set + self.h36m_joints_name = ('Pelvis', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Torso', 'Neck', 'Nose', 'Head_top', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Shoulder', 'R_Elbow', 'R_Wrist') + self.h36m_root_joint_idx = self.h36m_joints_name.index('Pelvis') + self.h36m_eval_joint = (1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16) + self.h36m_joint_regressor = np.load(osp.join(cfg.data_dir, 'Human36M', 'J_regressor_h36m_correct.npy')) + + # mscoco skeleton + self.coco_joint_num = 18+1 # original: 17, manually added pelvis + self.coco_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') + self.coco_skeleton = ( (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 6), (11, 12) ) + self.coco_flip_pairs = ( (1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16) ) + self.coco_joint_regressor = np.load(osp.join(cfg.data_dir, 'MSCOCO', 'J_regressor_coco_hip_smpl.npy')) + + self.openpose_joints_name = ('Nose', 'Neck', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'R_Eye', 'L_Eye', 'R_Ear', 'L_Ear', 'Pelvis') + self.conf_thr = 0.05 + + self.datalist = self.load_data() + print("3dpw data len: ", len(self.datalist)) + + def add_pelvis(self, joint_coord, joints_name): + lhip_idx = joints_name.index('L_Hip') + rhip_idx = joints_name.index('R_Hip') + pelvis = (joint_coord[lhip_idx, :] + joint_coord[rhip_idx, :]) * 0.5 + pelvis[2] = joint_coord[lhip_idx,2] * joint_coord[rhip_idx,2] # confidence for openpose + pelvis = pelvis.reshape(1, 3) + + joint_coord = np.concatenate((joint_coord, pelvis)) + + return joint_coord + + def add_neck(self, joint_coord, joints_name): + lshoulder_idx = joints_name.index('L_Shoulder') + rshoulder_idx = joints_name.index('R_Shoulder') + neck = (joint_coord[lshoulder_idx, :] + joint_coord[rshoulder_idx, :]) * 0.5 + neck[2] = joint_coord[lshoulder_idx, 2] * joint_coord[rshoulder_idx, 2] + neck = neck.reshape(1,3) + + joint_coord = np.concatenate((joint_coord, neck)) + + return joint_coord + + def load_data(self): + + db = COCO(osp.join(self.data_path, '3DPW_latest_' + self.data_split + '.json')) + if self.data_split == 'test' and not cfg.use_gt_info: + print("Get bounding box and root from " + self.human_bbox_root_dir) + bbox_root_result = {} + with open(self.human_bbox_root_dir) as f: + annot = json.load(f) + for i in range(len(annot)): + ann_id = str(annot[i]['ann_id']) + bbox_root_result[ann_id] = {'bbox': np.array(annot[i]['bbox']), 'root': np.array(annot[i]['root_cam'])} + elif cfg.crowd: + with open(osp.join(self.data_path, f'3DPW_{self.data_split}_crowd_hhrnet_result.json')) as f: + hhrnet_result = json.load(f) + print("Load Higher-HRNet input") + + else: + print("Load OpenPose input") + + hhrnet_count = 0 + datalist = [] + for aid in db.anns.keys(): + aid = int(aid) + + ann = db.anns[aid] + image_id = ann['image_id'] + img = db.loadImgs(image_id)[0] + img_width, img_height = img['width'], img['height'] + sequence_name = img['sequence'] + img_name = img['file_name'] + + if cfg.crowd and self.data_split=='validation': + if 'courtyard_hug_00' not in sequence_name and 'courtyard_dancing_00' not in sequence_name: + continue + + img_path = osp.join(self.data_path, 'imageFiles', sequence_name, img_name) + cam_param = {k: np.array(v, dtype=np.float32) for k,v in img['cam_param'].items()} + smpl_param = ann['smpl_param'] + + if self.data_split == 'test' and not cfg.use_gt_info: + bbox = bbox_root_result[str(aid)]['bbox'] # bbox should be aspect ratio preserved-extended. It is done in RootNet. + root_joint_depth = bbox_root_result[str(aid)]['root'][2] + else: + ann['bbox'] = np.array(ann['bbox'], dtype=np.float32) + + bbox = process_bbox(ann['bbox'], img['width'], img['height']) + if bbox is None: continue + root_joint_depth = None + + openpose = np.array(ann['openpose_result'], dtype=np.float32).reshape(-1, 3) + openpose = self.add_pelvis(openpose, self.openpose_joints_name) + pose_score_thr = self.conf_thr + + hhrnetpose = None + if cfg.crowd and self.data_split=='validation': + try: + hhrnetpose = np.array(hhrnet_result[str(aid)]['coco_joints']) + hhrnetpose = self.add_pelvis(hhrnetpose, self.coco_joints_name) + hhrnetpose = self.add_neck(hhrnetpose, self.coco_joints_name) + hhrnet_count += 1 + + except: + hhrnetpose = openpose + hhrnetpose = transform_joint_to_other_db(hhrnetpose, self.openpose_joints_name, self.coco_joints_name) + + datalist.append({ + 'ann_id': aid, + 'img_path': img_path, + 'img_shape': (img_height, img_width), + 'bbox': bbox, + 'tight_bbox': ann['bbox'], + 'smpl_param': smpl_param, + 'cam_param': cam_param, + 'root_joint_depth': root_joint_depth, + 'pose_score_thr': pose_score_thr, + 'openpose': openpose, + 'hhrnetpose': hhrnetpose + }) + + print("check hhrnet input: ", hhrnet_count) + return datalist + + def get_smpl_coord(self, smpl_param): + pose, shape, trans, gender = smpl_param['pose'], smpl_param['shape'], smpl_param['trans'], smpl_param['gender'] + smpl_pose = torch.FloatTensor(pose).view(1,-1); smpl_shape = torch.FloatTensor(shape).view(1,-1); # smpl parameters (pose: 72 dimension, shape: 10 dimension) + smpl_trans = torch.FloatTensor(trans).view(-1,3) # translation vector from smpl coordinate to 3dpw camera coordinate + + # TEMP + # gender = 'neutral' + # get mesh and joint coordinates + smpl_mesh_coord, smpl_joint_coord = self.smpl.layer[gender](smpl_pose, smpl_shape, smpl_trans) + + # incorporate face keypoints + smpl_mesh_coord = smpl_mesh_coord.numpy().astype(np.float32).reshape(-1,3); + # smpl_joint_coord = smpl_joint_coord.numpy().astype(np.float32).reshape(-1,3) + # smpl_face_kps_coord = smpl_mesh_coord[self.face_kps_vertex,:].reshape(-1,3) + # smpl_joint_coord = np.concatenate((smpl_joint_coord, smpl_face_kps_coord)) + smpl_joint_coord = np.dot(self.joint_regressor, smpl_mesh_coord) + + return smpl_mesh_coord, smpl_joint_coord + + def __len__(self): + return len(self.datalist) + + def __getitem__(self, idx): + data = copy.deepcopy(self.datalist[idx]) + aid, img_path, bbox, smpl_param, cam_param = data['ann_id'], data['img_path'], data['bbox'], data['smpl_param'], data['cam_param'] + + # get gt img joint from smpl coordinates + smpl_mesh_cam, smpl_joint_cam = self.get_smpl_coord(smpl_param) + smpl_coord_img = cam2pixel(smpl_joint_cam, cam_param['focal'], cam_param['princpt']) + joint_coord_img = smpl_coord_img + joint_valid = np.ones_like(joint_coord_img[:, :1], dtype=np.float32) + + if cfg.crowd and self.data_split == 'validation': + # get input joint img from higher hrnet + joint_coord_img = data['hhrnetpose'] + joint_coord_img = transform_joint_to_other_db(joint_coord_img, self.coco_joints_name, self.joints_name) + else: + # get input joint img from openpose + joint_coord_img = data['openpose'] + joint_coord_img = transform_joint_to_other_db(joint_coord_img, self.openpose_joints_name, self.joints_name) + pose_thr = data['pose_score_thr'] + joint_valid[joint_coord_img[:, 2] <= pose_thr] = 0 + + # get bbox from joints + bbox = get_bbox(joint_coord_img, joint_valid[:, 0]) + img_height, img_width = data['img_shape'] + bbox = process_bbox(bbox.copy(), img_width, img_height, is_3dpw_test=True) + bbox = data['bbox'] if bbox is None else bbox + + # img + img = load_img(img_path) + img, img2bb_trans, bb2img_trans, _, _ = augmentation(img, bbox, self.data_split) + img = self.transform(img.astype(np.float32))/255. + + """ + # vis + joint_coord_img = transform_joint_to_other_db(joint_coord_img, self.joints_name, self.crowdpose_joints_name) + img = cv2.imread(img_path) + input_img = vis_keypoints_with_skeleton(img, joint_coord_img.T, self.crowdpose_skeleton, kp_thresh=self.openpose_thr, alpha=1, kps_scores=joint_coord_img[:,2:]) + cv2.imshow('open pose', input_img) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + import pdb; pdb.set_trace() + + # smpl_coord_img[:, 2] = 1 + # input_img = vis_keypoints_with_skeleton(img_copy, smpl_coord_img.T, self.skeleton, kp_thresh=0.4, alpha=1) + # cv2.imshow('smpl gt', input_img/255) + # cv2.waitKey(0) + # cv2.destroyAllWindows() + # cv2.waitKey(1) + """ + + # x,y affine transform, root-relative depth + joint_coord_img_xy1 = np.concatenate((joint_coord_img[:, :2], np.ones_like(joint_coord_img[:, 0:1])), 1) + joint_coord_img[:, :2] = np.dot(img2bb_trans, joint_coord_img_xy1.transpose(1, 0)).transpose(1, 0)[:, :2] + joint_coord_img[:, 0] = joint_coord_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + joint_coord_img[:, 1] = joint_coord_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + + # check truncation + joint_trunc = joint_valid * ( + (joint_coord_img[:, 0] >= 0) * (joint_coord_img[:, 0] < cfg.output_hm_shape[2]) * \ + (joint_coord_img[:, 1] >= 0) * (joint_coord_img[:, 1] < cfg.output_hm_shape[1])).reshape(-1, 1).astype(np.float32) + + """ + print(f'{img_path} trunc:\n', joint_trunc.nonzero()) + tmp_coord = joint_coord_img[:, :2] * np.array([[cfg.input_img_shape[1] / cfg.output_hm_shape[2], cfg.input_img_shape[0]/ cfg.output_hm_shape[1]]]) + newimg = vis_keypoints(img.numpy().transpose(1,2,0), tmp_coord) + cv2.imshow(f'{img_path}', newimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + """ + + inputs = {'img': img, 'joints': joint_coord_img, 'joints_mask': joint_trunc} + targets = {'smpl_mesh_cam': smpl_mesh_cam} + meta_info = {'bb2img_trans': bb2img_trans, 'img2bb_trans': img2bb_trans, 'bbox': bbox, 'tight_bbox': data['tight_bbox'], 'aid': aid} + return inputs, targets, meta_info + + def evaluate(self, outs, cur_sample_idx): + annots = self.datalist + sample_num = len(outs) + eval_result = {'mpjpe': [], 'pa_mpjpe': [], 'mpvpe': []} + for n in range(sample_num): + annot = annots[cur_sample_idx + n] + out = outs[n] + + # h36m joint from gt mesh + mesh_gt_cam = out['smpl_mesh_cam_target'] + pose_coord_gt_h36m = np.dot(self.h36m_joint_regressor, mesh_gt_cam) + # debug + root_h36m_gt = pose_coord_gt_h36m[self.h36m_root_joint_idx, :] + pose_gt_img = cam2pixel(pose_coord_gt_h36m, annot['cam_param']['focal'], annot['cam_param']['princpt']) + pose_gt_img = transform_joint_to_other_db(pose_gt_img, self.h36m_joints_name, self.smpl.graph_joints_name) + + pose_coord_gt_h36m = pose_coord_gt_h36m - pose_coord_gt_h36m[self.h36m_root_joint_idx, None] # root-relative + pose_coord_gt_h36m = pose_coord_gt_h36m[self.h36m_eval_joint, :] + mesh_gt_cam -= np.dot(self.joint_regressor, mesh_gt_cam)[0, None, :] + + # TEMP: use PositionNet output + # pose_out_img = out['joint_img'] + # pose_out_img = denorm_joints(pose_out_img, out['bb2img_trans']) + # pose_out_img[:, 2] = (pose_out_img[:, 2] / cfg.output_hm_shape[0] * 2. - 1) * (cfg.bbox_3d_size / 2) + root_h36m_gt[None, 2] + # pose_out_cam = pixel2cam(pose_out_img, annot['cam_param']['focal'], annot['cam_param']['princpt']) + # pose_coord_out_h36m = transform_joint_to_other_db(pose_out_cam, self.smpl.graph_joints_name, self.h36m_joints_name) + + # h36m joint from output mesh + mesh_out_cam = out['smpl_mesh_cam'] + pose_coord_out_h36m = np.dot(self.h36m_joint_regressor, mesh_out_cam) + # # debug + # pose_out_img = cam2pixel(pose_coord_out_h36m + root_h36m_gt, annot['cam_param']['focal'], annot['cam_param']['princpt']) + # pose_out_img = transform_joint_to_other_db(pose_out_img, self.h36m_joints_name, self.smpl.graph_joints_name) + + pose_coord_out_h36m = pose_coord_out_h36m - pose_coord_out_h36m[self.h36m_root_joint_idx, None] # root-relative + pose_coord_out_h36m = pose_coord_out_h36m[self.h36m_eval_joint, :] + pose_coord_out_h36m_aligned = rigid_align(pose_coord_out_h36m, pose_coord_gt_h36m) + + eval_result['mpjpe'].append(np.sqrt( + np.sum((pose_coord_out_h36m - pose_coord_gt_h36m) ** 2, 1)).mean() * 1000) # meter -> milimeter + eval_result['pa_mpjpe'].append(np.sqrt(np.sum((pose_coord_out_h36m_aligned - pose_coord_gt_h36m) ** 2, + 1)).mean() * 1000) # meter -> milimeter + mesh_out_cam -= np.dot(self.joint_regressor, mesh_out_cam)[0, None, :] + + # compute MPVPE + mesh_error = np.sqrt(np.sum((mesh_gt_cam - mesh_out_cam) ** 2, 1)).mean() * 1000 + eval_result['mpvpe'].append(mesh_error) + + if cfg.render: + img = cv2.imread(annot['img_path']) + mesh_cam_render = out['mesh_cam_render'] + bbox = out['bbox'] + princpt = (bbox[0]+bbox[2]/2, bbox[1]+bbox[3]/2) + img = vis_bbox(img, bbox, alpha=1) + + rendered_img = render_mesh(img, mesh_cam_render, self.face, {'focal': cfg.focal, 'princpt': princpt}) + + cv2.imshow(annot['img_path'], rendered_img/255) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + + if cfg.vis: + img = cv2.imread(annot['img_path']) + bbox_to_vis = out['bbox'] + + # vis input 2d pose + # pose_out_img = out['input_joints'] + # pose_out_img = denorm_joints(pose_out_img, out['bb2img_trans']) + # pose_scores = pose_out_img[:, 2:].round(3) + # newimg = vis_keypoints_with_skeleton(img.copy(), pose_out_img.T, self.skeleton, kp_thresh=self.openpose_thr, alpha=1, kps_scores=pose_scores) + # newimg = vis_bbox(newimg, bbox_to_vis, alpha=1) + # cv2.imwrite(f'./{annot["img_path"].split("_")[-1][:-4]}_{out["aid"]}_input_2dpose.jpg', newimg) + + # vis PositionNet output + pose_out_img = out['joint_img'] + pose_scores = (out['joint_score']).round(3) + pose_out_img = denorm_joints(pose_out_img, out['bb2img_trans']) + pose_out_img = np.concatenate((pose_out_img, pose_out_img[:, :1]), axis=1) + newimg = vis_keypoints_with_skeleton(img.copy(), pose_out_img.T, self.smpl.graph_skeleton, kp_thresh=0.4, alpha=1, kps_scores=pose_scores) + newimg = vis_bbox(newimg, bbox_to_vis, alpha=1) + cv2.imwrite(f'./{annot["img_path"].split("_")[-1][:-4]}_{out["aid"]}_positionnet.jpg', newimg) + + # vis RotationNet output + pose_out_img = out['joint_proj'] + + pose_out_img = denorm_joints(pose_out_img, out['bb2img_trans']) + pose_out_img = np.concatenate((pose_out_img, pose_out_img[:, :1]), axis=1) + newimg = vis_keypoints_with_skeleton(img.copy(), pose_out_img.T, self.skeleton, + kp_thresh=0.4, alpha=1) + newimg = vis_bbox(newimg, bbox_to_vis, alpha=1) + cv2.imwrite(f'./{annot["img_path"].split("_")[-1][:-4]}_{out["aid"]}_final.jpg', newimg) + + save_obj(mesh_out_cam, self.face, f'./{annot["img_path"].split("_")[-1][:-4]}_{out["aid"]}_final.obj') + + # vis gt + pose_gt_img[:, 2] = 1 + newimg = vis_keypoints_with_skeleton(img.copy(), pose_gt_img.T, self.smpl.graph_skeleton, + kp_thresh=0.4, alpha=1) + newimg = vis_bbox(newimg, bbox_to_vis, alpha=1) + cv2.imwrite(f'./{annot["img_path"].split("_")[-1][:-4]}_{out["aid"]}_gt.jpg', newimg) + + save_obj(mesh_gt_cam, self.face, f'./{annot["img_path"].split("_")[-1][:-4]}_{out["aid"]}_gt.obj') + + return eval_result + + def print_eval_result(self, eval_result): + print('MPJPE from mesh: %.2f mm' % np.mean(eval_result['mpjpe'])) + print('PA MPJPE from mesh: %.2f mm' % np.mean(eval_result['pa_mpjpe'])) + print('MPVPE from mesh: %.2f mm' % np.mean(eval_result['mpvpe'])) + + + + diff --git a/data_processing/data/dataset.py b/data_processing/data/dataset.py new file mode 100644 index 0000000..e8fc95e --- /dev/null +++ b/data_processing/data/dataset.py @@ -0,0 +1,40 @@ +import random +import numpy as np +from torch.utils.data.dataset import Dataset +from config import cfg + +class MultipleDatasets(Dataset): + def __init__(self, dbs, make_same_len=True): + self.dbs = dbs + self.db_num = len(self.dbs) + self.max_db_data_num = max([len(db) for db in dbs]) + self.db_len_cumsum = np.cumsum([len(db) for db in dbs]) + self.make_same_len = make_same_len + + def __len__(self): + # all dbs have the same length + if self.make_same_len: + return self.max_db_data_num * self.db_num + # each db has different length + else: + return sum([len(db) for db in self.dbs]) + + def __getitem__(self, index): + if self.make_same_len: + db_idx = index // self.max_db_data_num + data_idx = index % self.max_db_data_num + if data_idx >= len(self.dbs[db_idx]) * (self.max_db_data_num // len(self.dbs[db_idx])): # last batch: random sampling + data_idx = random.randint(0,len(self.dbs[db_idx])-1) + else: # before last batch: use modular + data_idx = data_idx % len(self.dbs[db_idx]) + else: + for i in range(self.db_num): + if index < self.db_len_cumsum[i]: + db_idx = i + break + if db_idx == 0: + data_idx = index + else: + data_idx = index - self.db_len_cumsum[db_idx-1] + + return self.dbs[db_idx][data_idx] diff --git a/data_processing/demo/backup/template_mesh.npy b/data_processing/demo/backup/template_mesh.npy new file mode 100644 index 0000000..333dbb6 Binary files /dev/null and b/data_processing/demo/backup/template_mesh.npy differ diff --git a/data_processing/demo/backup/template_mesh_in_pyrender.npy b/data_processing/demo/backup/template_mesh_in_pyrender.npy new file mode 100644 index 0000000..e16e4e5 Binary files /dev/null and b/data_processing/demo/backup/template_mesh_in_pyrender.npy differ diff --git a/data_processing/demo/demo.py b/data_processing/demo/demo.py new file mode 100644 index 0000000..84c4f4f --- /dev/null +++ b/data_processing/demo/demo.py @@ -0,0 +1,224 @@ +import glob +import sys +import os +import os.path as osp +import argparse +import numpy as np +import cv2 +import colorsys +import json +import random +import torch +import torchvision.transforms as transforms +from torch.nn.parallel.data_parallel import DataParallel +import torch.backends.cudnn as cudnn +import matplotlib.pyplot as plt + + +sys.path.insert(0, osp.join('..', 'main')) +sys.path.insert(0, osp.join('..', 'data')) +sys.path.insert(0, osp.join('..', 'common')) +from config import cfg +from model import get_model +from utils.preprocessing import process_bbox, generate_patch_image, get_bbox +from utils.transforms import pixel2cam, cam2pixel, transform_joint_to_other_db +from utils.vis import vis_mesh, save_obj, render_mesh, vis_coco_skeleton +sys.path.insert(0, cfg.smpl_path) +from utils.smpl import SMPL + + + +def add_pelvis(joint_coord, joints_name): + lhip_idx = joints_name.index('L_Hip') + rhip_idx = joints_name.index('R_Hip') + pelvis = (joint_coord[lhip_idx, :] + joint_coord[rhip_idx, :]) * 0.5 + pelvis[2] = joint_coord[lhip_idx, 2] * joint_coord[rhip_idx, 2] # confidence for openpose + pelvis = pelvis.reshape(1, 3) + + joint_coord = np.concatenate((joint_coord, pelvis)) + + return joint_coord + +def add_neck(joint_coord, joints_name): + lshoulder_idx = joints_name.index('L_Shoulder') + rshoulder_idx = joints_name.index('R_Shoulder') + neck = (joint_coord[lshoulder_idx, :] + joint_coord[rshoulder_idx, :]) * 0.5 + neck[2] = joint_coord[lshoulder_idx, 2] * joint_coord[rshoulder_idx, 2] + neck = neck.reshape(1,3) + + joint_coord = np.concatenate((joint_coord, neck)) + + return joint_coord + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--gpu', type=str, dest='gpu_ids') + parser.add_argument('--model_path', type=str, default='demo_checkpoint.pth.tar') + parser.add_argument('--img_idx', type=str, default='101570') + + args = parser.parse_args() + + # test gpus + if not args.gpu_ids: + assert 0, print("Please set proper gpu ids") + + if '-' in args.gpu_ids: + gpus = args.gpu_ids.split('-') + gpus[0] = int(gpus[0]) + gpus[1] = int(gpus[1]) + 1 + args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus)))) + + return args + +# argument parsing +args = parse_args() +cfg.set_args(args.gpu_ids, is_test=True) +cfg.render = True +cudnn.benchmark = True + +# SMPL joint set +joint_num = 30 # original: 24. manually add nose, L/R eye, L/R ear, head top +joints_name = ( +'Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', +'Neck', 'L_Thorax', 'R_Thorax', +'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', +'R_Eye', 'L_Ear', 'R_Ear', 'Head_top') +flip_pairs = ( +(1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23), (25, 26), (27, 28)) +skeleton = ( +(0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), (17, 19), +(19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 24), (24, 15), (24, 25), (24, 26), +(25, 27), (26, 28), (24, 29)) + +# SMPl mesh +vertex_num = 6890 +smpl = SMPL() +face = smpl.face + +# other joint set +coco_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') +coco_skeleton = ( +(1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 6), +(11, 17), (12,17), (17,18)) + +vis_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Thorax', 'Pelvis') +vis_skeleton = ((0, 1), (0, 2), (2, 4), (1, 3), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 17), (6, 17), (11, 18), (12, 18), (17, 18), (17, 0), (6, 8), (8, 10),) + +# snapshot load +model_path = args.model_path +assert osp.exists(model_path), 'Cannot find model at ' + model_path +print('Load checkpoint from {}'.format(model_path)) +model = get_model(vertex_num, joint_num, 'test') + +model = DataParallel(model).cuda() +ckpt = torch.load(model_path) +model.load_state_dict(ckpt['network'], strict=False) +model.eval() + +# prepare input image +transform = transforms.ToTensor() +pose2d_result_path = './input/2d_pose_result.json' +with open(pose2d_result_path) as f: + pose2d_result = json.load(f) + +img_dir = './input/images' +for img_name in sorted(pose2d_result.keys()): + img_path = osp.join(img_dir, img_name) + original_img = cv2.imread(img_path) + input = original_img.copy() + input2 = original_img.copy() + original_img_height, original_img_width = original_img.shape[:2] + coco_joint_list = pose2d_result[img_name] + + if args.img_idx not in img_name: + continue + + drawn_joints = [] + c = coco_joint_list + # manually assign the order of output meshes + # coco_joint_list = [c[2], c[0], c[1], c[4], c[3]] + + for idx in range(len(coco_joint_list)): + """ 2D pose input setting & hard-coding for filtering """ + pose_thr = 0.1 + coco_joint_img = np.asarray(coco_joint_list[idx])[:, :3] + coco_joint_img = add_pelvis(coco_joint_img, coco_joints_name) + coco_joint_img = add_neck(coco_joint_img, coco_joints_name) + coco_joint_valid = (coco_joint_img[:, 2].copy().reshape(-1, 1) > pose_thr).astype(np.float32) + + # filter inaccurate inputs + det_score = sum(coco_joint_img[:, 2]) + if det_score < 1.0: + continue + if len(coco_joint_img[:, 2:].nonzero()[0]) < 1: + continue + # filter the same targets + tmp_joint_img = coco_joint_img.copy() + continue_check = False + for ddx in range(len(drawn_joints)): + drawn_joint_img = drawn_joints[ddx] + drawn_joint_val = (drawn_joint_img[:, 2].copy().reshape(-1, 1) > pose_thr).astype(np.float32) + diff = np.abs(tmp_joint_img[:, :2] - drawn_joint_img[:, :2]) * coco_joint_valid * drawn_joint_val + diff = diff[diff != 0] + if diff.size == 0: + continue_check = True + elif diff.mean() < 20: + continue_check = True + if continue_check: + continue + drawn_joints.append(tmp_joint_img) + + """ Prepare model input """ + # prepare bbox + bbox = get_bbox(coco_joint_img, coco_joint_valid[:, 0]) # xmin, ymin, width, height + bbox = process_bbox(bbox, original_img_width, original_img_height) + if bbox is None: + continue + img, img2bb_trans, bb2img_trans = generate_patch_image(input2[:,:,::-1], bbox, 1.0, 0.0, False, cfg.input_img_shape) + img = transform(img.astype(np.float32))/255 + img = img.cuda()[None,:,:,:] + + coco_joint_img_xy1 = np.concatenate((coco_joint_img[:, :2], np.ones_like(coco_joint_img[:, :1])), 1) + coco_joint_img[:, :2] = np.dot(img2bb_trans, coco_joint_img_xy1.transpose(1, 0)).transpose(1, 0) + coco_joint_img[:, 0] = coco_joint_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + coco_joint_img[:, 1] = coco_joint_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + + coco_joint_img = transform_joint_to_other_db(coco_joint_img, coco_joints_name, joints_name) + coco_joint_valid = transform_joint_to_other_db(coco_joint_valid, coco_joints_name, joints_name) + coco_joint_valid[coco_joint_img[:, 2] <= pose_thr] = 0 + + # check truncation + coco_joint_trunc = coco_joint_valid * ((coco_joint_img[:, 0] >= 0) * (coco_joint_img[:, 0] < cfg.output_hm_shape[2]) * (coco_joint_img[:, 1] >= 0) * (coco_joint_img[:, 1] < cfg.output_hm_shape[1])).reshape( + -1, 1).astype(np.float32) + coco_joint_img, coco_joint_trunc, bbox = torch.from_numpy(coco_joint_img).cuda()[None, :, :], torch.from_numpy(coco_joint_trunc).cuda()[None, :, :], torch.from_numpy(bbox).cuda()[None, :] + + """ Model forward """ + inputs = {'img': img, 'joints': coco_joint_img, 'joints_mask': coco_joint_trunc} + targets = {} + meta_info = {'bbox': bbox} + with torch.no_grad(): + out = model(inputs, targets, meta_info, 'test') + + # draw output mesh + mesh_cam_render = out['mesh_cam_render'][0].cpu().numpy() + bbox = out['bbox'][0].cpu().numpy() + princpt = (bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2) + # original_img = vis_bbox(original_img, bbox, alpha=1) # for debug + + # generate random color + color = colorsys.hsv_to_rgb(np.random.rand(), 0.5, 1.0) + original_img = render_mesh(original_img, mesh_cam_render, face, {'focal': cfg.focal, 'princpt': princpt}, color=color) + + # Save output mesh + output_dir = 'output' + file_name = f'{output_dir}/{img_path.split("/")[-1][:-4]}_{idx}.jpg' + print("file name: ", file_name) + save_obj(mesh_cam_render, face, file_name=f'{output_dir}/{img_path.split("/")[-1][:-4]}_{idx}.obj') + cv2.imwrite(file_name, original_img) + + # Draw input 2d pose + tmp_joint_img[-1], tmp_joint_img[-2] = tmp_joint_img[-2].copy(), tmp_joint_img[-1].copy() + input = vis_coco_skeleton(input, tmp_joint_img.T, vis_skeleton) + cv2.imwrite(file_name[:-4] + '_2dpose.jpg', input) + + diff --git a/data_processing/demo/extract_camera_parameter.py b/data_processing/demo/extract_camera_parameter.py new file mode 100644 index 0000000..863e450 --- /dev/null +++ b/data_processing/demo/extract_camera_parameter.py @@ -0,0 +1,589 @@ +import glob +import shutil +import sys +import os +import os.path as osp +import argparse + +import matplotlib.pyplot as plt +import numpy as np +import cv2 +import colorsys +import json +import random +import torch +import torchvision.transforms as transforms +from torch.nn.parallel.data_parallel import DataParallel +import torch.backends.cudnn as cudnn +import pyrender + +sys.path.insert(0, osp.join('..', 'main')) +sys.path.insert(0, osp.join('..', 'data')) +sys.path.insert(0, osp.join('..', 'common')) +from config import cfg +from model import get_model +from utils.preprocessing import process_bbox, generate_patch_image, get_bbox +from utils.transforms import pixel2cam, cam2pixel, transform_joint_to_other_db +from utils.vis import vis_mesh, save_obj, render_mesh, vis_coco_skeleton +import atexit +sys.path.insert(0, cfg.smpl_path) +from utils.smpl import SMPL + +import os +# os.environ["PYOPENGL_PLATFORM"] = "egl" +# check if on a Linux machine +if os.name == 'posix': # Linux + os.environ["PYOPENGL_PLATFORM"] = "egl" +def add_pelvis(joint_coord, joints_name): + lhip_idx = joints_name.index('L_Hip') + rhip_idx = joints_name.index('R_Hip') + pelvis = (joint_coord[lhip_idx, :] + joint_coord[rhip_idx, :]) * 0.5 + pelvis[2] = joint_coord[lhip_idx, 2] * joint_coord[rhip_idx, 2] # confidence for openpose + pelvis = pelvis.reshape(1, 3) + + joint_coord = np.concatenate((joint_coord, pelvis)) + + return joint_coord + + +def add_neck(joint_coord, joints_name): + lshoulder_idx = joints_name.index('L_Shoulder') + rshoulder_idx = joints_name.index('R_Shoulder') + neck = (joint_coord[lshoulder_idx, :] + joint_coord[rshoulder_idx, :]) * 0.5 + neck[2] = joint_coord[lshoulder_idx, 2] * joint_coord[rshoulder_idx, 2] + neck = neck.reshape(1, 3) + + joint_coord = np.concatenate((joint_coord, neck)) + + return joint_coord + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--gpu', type=str, dest='gpu_ids') + parser.add_argument('--model_path', type=str, default='demo_checkpoint.pth.tar') + parser.add_argument('--input_dir', type=str, default='') + parser.add_argument('--output_dir', type=str, default='output') + parser.add_argument('--data_dir', type=str, default='101570') + parser.add_argument('--crop_image_size', type=int, default=1024) + parser.add_argument('--debug', type=int, default=0) + + args = parser.parse_args() + + # test gpus + if not args.gpu_ids: + assert 0, print("Please set proper gpu ids") + + if '-' in args.gpu_ids: + gpus = args.gpu_ids.split('-') + gpus[0] = int(gpus[0]) + gpus[1] = int(gpus[1]) + 1 + args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus)))) + + return args + + +def bad_image_vis(image, joint,vis_skeleton): + joint[-1], joint[-2] = joint[-2].copy(), joint[-1].copy() + image = vis_coco_skeleton(image, joint.T, vis_skeleton) + image = cv2.resize(image, (512, int(image.shape[0]/image.shape[1] *512))) + return image + + + + +# argument parsing +args = parse_args() +cfg.set_args(args.gpu_ids, is_test=True) +cfg.set_data_dir(args.data_dir) +cfg.render = True +cudnn.benchmark = True + +# SMPL joint set +joint_num = 30 # original: 24. manually add nose, L/R eye, L/R ear, head top +joints_name = ( + 'Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', + 'Neck', 'L_Thorax', 'R_Thorax', + 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', + 'R_Eye', 'L_Ear', 'R_Ear', 'Head_top') +flip_pairs = ( + (1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23), (25, 26), (27, 28)) +skeleton = ( + (0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), + (17, 19), + (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 24), (24, 15), (24, 25), + (24, 26), + (25, 27), (26, 28), (24, 29)) + +# SMPl mesh +vertex_num = 6890 +smpl = SMPL() +face = smpl.face + +alpha = 0.8 + +# other joint set +coco_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', + 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') +coco_skeleton = ( + (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), + (5, 6), + (11, 17), (12, 17), (17, 18)) + +vis_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', + 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Thorax', 'Pelvis') +vis_skeleton = ( + (0, 1), (0, 2), (2, 4), (1, 3), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 17), (6, 17), (11, 18), + (12, 18), (17, 18), (17, 0), (6, 8), (8, 10),) + +# snapshot load +model_path = args.model_path +assert osp.exists(model_path), 'Cannot find model at ' + model_path +print('Load checkpoint from {}'.format(model_path)) +model = get_model(vertex_num, joint_num, 'test') + +model = DataParallel(model).cuda() +ckpt = torch.load(model_path) +model.load_state_dict(ckpt['network'], strict=False) +model.eval() + +# prepare input image +transform = transforms.ToTensor() +pose2d_result_path = os.path.join(args.input_dir, '2d_pose_result_hrnet.json') +with open(pose2d_result_path) as f: + pose2d_result = json.load(f) + + +head_bbox_path = os.path.join(args.input_dir, 'head_bbox_yolov5_crowdhuman.json') +with open(head_bbox_path) as f: + head_bbox_result = json.load(f) + +img_dir = os.path.join(args.input_dir, 'images') + +output_dir = args.output_dir +print('>>>>>>> output_dir', output_dir) +os.makedirs(output_dir, exist_ok=True) + +aligned_images_dir = os.path.join(output_dir, 'aligned_images') +os.makedirs(aligned_images_dir, exist_ok=True) + +bad_images_dir = os.path.join(output_dir, 'bad_images') +os.makedirs(bad_images_dir, exist_ok=True) + +visualization_dir = os.path.join(output_dir, 'visualization') +os.makedirs(visualization_dir, exist_ok=True) + + +result_json_path = os.path.join(output_dir, 'result.json') +if os.path.exists(result_json_path): + with open(result_json_path, 'r') as f: + result_json = json.load(f) +else: + result_json = {} + +def exit_function(): + global result_json + with open(result_json_path, 'w') as f: + json.dump(result_json, f) + print('结束') + +atexit.register(exit_function) + + + +if not os.path.exists('./template_mesh.npy'): + print( + f'save template mesh (shape {model.module.template_mesh_cam_render_no_flip.cpu().numpy().shape}) to ./template_mesh.npy') + np.save('./template_mesh.npy', model.module.template_mesh_cam_render_no_flip.cpu().numpy()) + template_mesh = model.module.template_mesh_cam_render_no_flip.cpu().numpy() +else: + print('load template_mesh from ', './template_mesh.npy') + template_mesh = np.load('./template_mesh.npy') + + +if not os.path.exists('./template_mesh_in_pyrender.npy'): + print( + f'save template mesh (shape {model.module.template_mesh_cam_render.cpu().numpy().shape}) to ./template_mesh_in_pyrender.npy') + np.save('./template_mesh_in_pyrender.npy', model.module.template_mesh_cam_render.cpu().numpy()) + + +min_box_stride = 50 + +model.module.set_min_box_stride(min_box_stride) + +image_list = glob.glob(os.path.join(img_dir, "*")) +for img_idx,img_path in enumerate(image_list): + + print(f'{img_idx}/{len(image_list)}',img_path) + original_img = cv2.imread(img_path) + img_name = os.path.basename(img_path) + if img_name not in pose2d_result or img_name not in head_bbox_result: + raise ValueError('please generate 2d pose result and head bbox result for all images first!') + # print(img_name) + # debug + # if img_name.split('.')[0] not in ['pexels-photo-15829424']: + # continue + + original_img_height, original_img_width = original_img.shape[:2] + coco_joint_list = pose2d_result[img_name] + head_bbox_list = head_bbox_result[img_name] + if len(coco_joint_list) > 50: + coco_joint_list = coco_joint_list[:50] + head_bbox_list = head_bbox_list[:50] + + assert len(coco_joint_list) == len(head_bbox_list), 'len(coco_joint_list) != len(head_bbox_list)' + + drawn_joints = [] + c = coco_joint_list + + result_count = 0 + + used_joints = [] + + for idx in range(len(coco_joint_list)): + + image_name = os.path.basename(img_path).split('.')[0] + file_name = f'{image_name}_{idx}.jpg' + + + if f'{image_name}_{idx}.png' in result_json or f'{image_name}_{idx}_h.png' in result_json or f'{image_name}_{idx}_s.png' in result_json: + result_count += 1 + continue + + + image = original_img.copy() + input = original_img.copy() + input2 = original_img.copy() + """ 2D pose input setting & hard-coding for filtering """ + pose_thr = 0.05 + coco_joint_img = np.asarray(coco_joint_list[idx])[:, :3] + + # if there is a similar joint in used_joints, skip this joint + if len(used_joints) > 0: + for joint in used_joints: + #print(np.linalg.norm(joint - coco_joint_img)/ np.linalg.norm(coco_joint_img)) + distance = max( + max(coco_joint_img[:, 0])-min(coco_joint_img[:, 0]), + max(coco_joint_img[:, 1])-min(coco_joint_img[:, 1]) + ) + #print( np.linalg.norm(joint - coco_joint_img)/ distance) + if np.linalg.norm(joint - coco_joint_img)/ distance < 0.15: + print('skip similar', np.linalg.norm(joint - coco_joint_img) / np.linalg.norm(coco_joint_img)) + continue + used_joints.append(coco_joint_img) + + coco_joint_img = add_pelvis(coco_joint_img, coco_joints_name) + coco_joint_img = add_neck(coco_joint_img, coco_joints_name) + coco_joint_valid = (coco_joint_img[:, 2].copy().reshape(-1, 1) > pose_thr).astype(np.float32) + + """head bbox""" + head_bbox = head_bbox_list[idx] + + # if len(head_bbox)<4: + # # bad_vis = bad_image_vis(image, coco_joint_img.copy(), vis_skeleton) + # # cv2.imwrite(os.path.join(bad_images_dir, file_name), bad_vis) + # continue + # filter inaccurate inputs + det_score = sum(coco_joint_img[:, 2]) + if det_score < 0.3: + print('skip low det score', det_score) + continue + if len(coco_joint_img[:, 2:].nonzero()[0]) < 1: + print('skip no det score', det_score) + continue + # filter the same targets + tmp_joint_img = coco_joint_img.copy() + continue_check = False + for ddx in range(len(drawn_joints)): + drawn_joint_img = drawn_joints[ddx] + drawn_joint_val = (drawn_joint_img[:, 2].copy().reshape(-1, 1) > pose_thr).astype(np.float32) + diff = np.abs(tmp_joint_img[:, :2] - drawn_joint_img[:, :2]) * coco_joint_valid * drawn_joint_val + diff = diff[diff != 0] + if diff.size == 0: + continue_check = True + elif diff.mean() < 20: + continue_check = True + if continue_check: + print('skip continue_check') + # bad_vis = bad_image_vis(image, coco_joint_img.copy(), vis_skeleton) + # cv2.imwrite(os.path.join(bad_images_dir, file_name), bad_vis) + continue + + + drawn_joints.append(tmp_joint_img) + + tmp_joint_img[-1], tmp_joint_img[-2] = tmp_joint_img[-2].copy(), tmp_joint_img[-1].copy() + + + + + """ Prepare model input """ + # prepare bbox + # bbox = get_bbox(coco_joint_img, coco_joint_valid[:, 0]) # xmin, ymin, width, height + bbox = get_bbox(coco_joint_img, np.ones_like(coco_joint_valid[:, 0])) + if bbox[2] < min_box_stride or bbox[3] < min_box_stride: + print('skip too small bbox', bbox[2], bbox[3]) + continue + orig_bbox = bbox.copy() + bbox = process_bbox(bbox, original_img_width, original_img_height) + if bbox is None: + print('skip invalid bbox') + continue + img, img2bb_trans, bb2img_trans = generate_patch_image(input2[:, :, ::-1], bbox, 1.0, 0.0, False, + cfg.input_img_shape) + img = transform(img.astype(np.float32)) / 255 + img = img.cuda()[None, :, :, :] + + coco_joint_img_xy1 = np.concatenate((coco_joint_img[:, :2], np.ones_like(coco_joint_img[:, :1])), 1) + coco_joint_img[:, :2] = np.dot(img2bb_trans, coco_joint_img_xy1.transpose(1, 0)).transpose(1, 0) + coco_joint_img[:, 0] = coco_joint_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + coco_joint_img[:, 1] = coco_joint_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + + coco_joint_img = transform_joint_to_other_db(coco_joint_img, coco_joints_name, joints_name) + coco_joint_valid = transform_joint_to_other_db(coco_joint_valid, coco_joints_name, joints_name) + coco_joint_valid[coco_joint_img[:, 2] <= pose_thr] = 0 + + # check truncation + coco_joint_trunc = coco_joint_valid * ( + (coco_joint_img[:, 0] >= 0) * (coco_joint_img[:, 0] < cfg.output_hm_shape[2]) * ( + coco_joint_img[:, 1] >= 0) * (coco_joint_img[:, 1] < cfg.output_hm_shape[1])).reshape( + -1, 1).astype(np.float32) + coco_joint_img, coco_joint_trunc, bbox = torch.from_numpy(coco_joint_img).cuda()[None, :, :], torch.from_numpy( + coco_joint_trunc).cuda()[None, :, :], torch.from_numpy(bbox).cuda()[None, :] + + """ Model forward """ + inputs = {'img': img, 'joints': coco_joint_img, 'joints_mask': coco_joint_trunc} + targets = {} + meta_info = {'bbox': bbox} + with torch.no_grad(): + out = model(inputs, targets, meta_info, 'test') + + + #print("file name: ", file_name) + + bbox = out['bbox'][0].cpu().numpy() + princpt = (bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2) + + + + color = colorsys.hsv_to_rgb(np.random.rand(), 0.5, 1.0) + + intrisics_full_image_dict = {'focal': cfg.focal, 'princpt': princpt} + camera_to_render_template_in_pyrender = out['camera_to_render_template_in_pyrender'].cpu().numpy() + camera_pose_in_pyrender = out['camera_pose_in_pyrender'].cpu().numpy() + + + + if args.debug: + viz = [original_img] + + image_mesh = render_mesh(image, out['mesh_cam_render'][0].cpu().numpy(), face, + intrisics_full_image_dict, + color=color) + + viz.append((image_mesh * alpha + original_img * (1.0 - alpha)).astype(np.uint8)) + + image_template = render_mesh(image.copy(), model.module.template_mesh_cam_render[0].cpu().numpy(), face, + intrisics_full_image_dict, + color=color, cam_pose=camera_to_render_template_in_pyrender) + viz.append((image_template * alpha + original_img * (1.0 - alpha)).astype(np.uint8)) + + image_camera_rotate = render_mesh(image.copy(),model.module.template_mesh_cam_render[0].cpu().numpy(), face, + intrisics_full_image_dict, + color=color, cam_pose=camera_pose_in_pyrender) + viz.append((image_camera_rotate * alpha + original_img * (1.0 - alpha)).astype(np.uint8)) + + + viz_full_image = np.concatenate(viz, axis=0 if original_img.shape[1] > original_img.shape[0] else 1) + viz_full_image = cv2.resize(viz_full_image, (int( viz_full_image.shape[1] / viz_full_image.shape[0] * 853),853 )) + + + + + + + + crop_image_size = args.crop_image_size + crop_outputs = model.module.crop_and_process_camera_matrix(out, + original_img.copy(), + joint_2d= tmp_joint_img, # used to realign + crop_image_size=crop_image_size, + model_input_bbox=bbox, + head_bbox = head_bbox) + model_input_bbox = bbox.copy() + if len(crop_outputs) == 0: + continue + if len(crop_outputs) == 1: + save_keys = [f'{image_name}_{idx}.png'] + else: + save_keys = [f'{image_name}_{idx}_h.png' ,f'{image_name}_{idx}_s.png'] + + + for crop_idx in range(len(crop_outputs)): + crop_output = crop_outputs[crop_idx] + if crop_output is None: + continue + save_key = save_keys[crop_idx] + intrisics_crop = np.eye(4) + intrisics_crop[0, 0] = crop_output['intrisics']['focal'][0] + intrisics_crop[1, 1] = crop_output['intrisics']['focal'][1] + intrisics_crop[0, 2] = crop_output['intrisics']['princpt'][0] + intrisics_crop[1, 2] = crop_output['intrisics']['princpt'][1] + intrisics_crop_dict = {'focal': (intrisics_crop[0, 0], intrisics_crop[1, 1]), + 'princpt': [intrisics_crop[0, 2], intrisics_crop[1, 2]]} + + intrisics_standard = np.eye(4) + intrisics_standard[0, 0] = cfg.focal[0] + intrisics_standard[1, 1] = cfg.focal[1] + intrisics_standard[0, 2] = crop_image_size / 2 + intrisics_standard[1, 2] = crop_image_size / 2 + intrisics_standard_dict = {'focal': cfg.focal, 'princpt': [crop_image_size / 2, crop_image_size / 2]} + + normalized_camerapose_in_pyrender = out['normalized_camerapose_in_pyrender'] + normalized_transformation_in_realworld = out['normalized_transformation_in_realworld'] + camerapose_in_realworld = np.linalg.inv(normalized_transformation_in_realworld) + + + # realign image + + + viz = [crop_output['cropped_image']] + + # image_mesh = render_mesh(crop_output['cropped_image'], + # out['mesh_cam_render'][0].cpu().numpy(), face, + # intrisics_crop_dict, + # color=color) + image_mesh = render_mesh(crop_output['cropped_image'].copy(),out['neck_head_rotated_template_mesh'][0].cpu().numpy(), face, + intrisics_standard_dict, + color=color, + cam_pose=normalized_camerapose_in_pyrender) + + #image_mesh,_,_ = generate_patch_image(image_mesh, crop_output['bbox'], 1.0, 0.0, False, (crop_image_size,crop_image_size)) + viz.append((image_mesh * alpha + crop_output['cropped_image'] * (1.0-alpha)).astype(np.uint8)) + + # image_template = render_mesh(crop_output['cropped_image'].copy(), + # model.module.template_mesh_cam_render[0].cpu().numpy(), face, + # intrisics_crop_dict, + # color=color, + # cam_pose=camera_to_render_template_in_pyrender) + # viz.append((image_template * alpha + crop_output['cropped_image'] * (1.0-alpha)).astype(np.uint8)) + + # image_camera_rotate = render_mesh(crop_output['cropped_image'].copy(), + # model.module.template_mesh_cam_render[0].cpu().numpy(), face, + # intrisics_standard_dict, + # color=color, + # cam_pose=normalized_camerapose_in_pyrender) + # viz.append((image_camera_rotate * alpha + crop_output['cropped_image'] * (1.0-alpha)).astype(np.uint8)) + + + if args.debug: + projected_vertexes = model.module.get_projected_vertex(torch.from_numpy(template_mesh).float().cuda(), + intrisics_standard @ normalized_transformation_in_realworld) + + vertex_vis = crop_output['cropped_image'].copy() + + + camera_forward_direction = (camerapose_in_realworld[:3, :3] @ np.reshape(np.array([0, 0, 1]), (3, 1)))[:, 0] # 3,1 + camera_position = camerapose_in_realworld[:3, 3] # 3,1 + + not_pass_check = 0 + in_screen = 0 + for i in range(projected_vertexes.shape[0]): + if projected_vertexes[i, 0] < 0 or projected_vertexes[i, 0] >= vertex_vis.shape[1] or \ + projected_vertexes[i, 1] < 0 or projected_vertexes[i, 1] >= vertex_vis.shape[0]: + continue + # print(template_mesh[0, i, :].shape, camera_position.shape, camera_forward_direction.shape) + check = np.sum((template_mesh[0, i, :] - camera_position) * camera_forward_direction) + in_screen += 1 + if check < 0: + not_pass_check += 1 + cv2.circle(vertex_vis, (int(projected_vertexes[i, 0]), int(projected_vertexes[i, 1])), 5, (255, 255, 255), -1) + + viz.append(vertex_vis) + + if not_pass_check == in_screen: + raise Exception('all vertexes are before camera') + + + + # tmp_joint_img 19 x 2 + # rescale tmp_joint_img accroding to bbox + tmp_joint_img_on_croppped_image = tmp_joint_img.copy() + tmp_joint_img_on_croppped_image[:, 0] = tmp_joint_img[:, 0] - crop_output['bbox'][0] + tmp_joint_img_on_croppped_image[:, 1] = tmp_joint_img[:, 1] - crop_output['bbox'][1] + tmp_joint_img_on_croppped_image*= crop_image_size/crop_output['bbox'][2] + + skeleton_vis = vis_coco_skeleton(crop_output['cropped_image'].copy(), tmp_joint_img_on_croppped_image.T, vis_skeleton) + if len(head_bbox['bbox']) ==4 and crop_idx == 0: + tmp_head_bbox = np.array(head_bbox['bbox'].copy()) + tmp_head_bbox[0] = head_bbox['bbox'][0] - crop_output['bbox'][0] + tmp_head_bbox[1] = head_bbox['bbox'][1] - crop_output['bbox'][1] + tmp_head_bbox *= crop_image_size / crop_output['bbox'][2] + cv2.rectangle(skeleton_vis, (int(tmp_head_bbox[0]), int(tmp_head_bbox[1])), + (int(tmp_head_bbox[0] + tmp_head_bbox[2]), int(tmp_head_bbox[1] + tmp_head_bbox[3])), + (0, 255, 0), 4) + + viz.append(skeleton_vis) + + viz = np.concatenate(viz, axis=0 ) + if args.debug: + viz = cv2.resize(viz, (int(viz.shape[1]/viz.shape[0] * viz_full_image.shape[0]), viz_full_image.shape[0])) + viz = np.concatenate([viz_full_image, viz], axis=1) + else: + viz = cv2.resize(viz, (viz.shape[1]//6,viz.shape[0]//6)) + + cv2.imwrite(os.path.join(visualization_dir, save_key),viz) + + #''' + + cv2.imwrite(os.path.join(aligned_images_dir,save_key), crop_output['cropped_image']) + + # final ========================================= + res = { + 'bbox':crop_output['bbox'].tolist(), + + 'coco_joint': tmp_joint_img.tolist(), + 'model_input_bbox': model_input_bbox.tolist(), + 'raw_image_name': img_name, + + # real world + 'intrisics': intrisics_standard.tolist(), + 'intrisics_dict': intrisics_standard_dict, + 'world2camera_matrix': normalized_transformation_in_realworld.tolist(), + 'camera_pose': camerapose_in_realworld.tolist(), + + + # pyrender + # original + 'intrisics_full_image_dict': intrisics_full_image_dict, + 'camera_to_render_template_in_pyrender':camera_to_render_template_in_pyrender.tolist(), + 'camera_pose_in_pyrender':camera_pose_in_pyrender.tolist(), + + #crop + 'intrisics_crop_dict': intrisics_crop_dict, + 'normalized_camerapose_in_pyrender': normalized_camerapose_in_pyrender.tolist(), + + + + # smpl + 'smpl_pose': out['smpl_pose'].cpu().numpy().tolist(), + 'smpl_shape': out['smpl_shape'].cpu().numpy().tolist(), + 'cam_trans': out['cam_trans'].cpu().numpy().tolist(), + + + } + + result_json[save_key] = res + + result_count += 1 + + if result_count == 0: + print(f">>>>>>> No result in {img_path}!") + shutil.move(img_path, os.path.join(bad_images_dir, os.path.basename(img_path))) + # ============================================== + + +with open(result_json_path, 'w') as f: + json.dump(result_json, f) diff --git a/data_processing/demo/generate_visualization.py b/data_processing/demo/generate_visualization.py new file mode 100644 index 0000000..c51b5c9 --- /dev/null +++ b/data_processing/demo/generate_visualization.py @@ -0,0 +1,293 @@ +import glob +import sys +import os +import os.path as osp +import argparse + +import matplotlib.pyplot as plt +import numpy as np +import cv2 +import colorsys +import json +import random +import torch +import torchvision.transforms as transforms +from torch.nn.parallel.data_parallel import DataParallel +import torch.backends.cudnn as cudnn +import pyrender + +sys.path.insert(0, osp.join('..', 'main')) +sys.path.insert(0, osp.join('..', 'data')) +sys.path.insert(0, osp.join('..', 'common')) +from config import cfg +# from model import get_model +# from utils.preprocessing import process_bbox, generate_patch_image, get_bbox +# from utils.transforms import pixel2cam, cam2pixel, transform_joint_to_other_db +from utils.vis import vis_mesh, save_obj, render_mesh, vis_coco_skeleton + +sys.path.insert(0, cfg.smpl_path) +from utils.smpl import SMPL + + +def add_pelvis(joint_coord, joints_name): + lhip_idx = joints_name.index('L_Hip') + rhip_idx = joints_name.index('R_Hip') + pelvis = (joint_coord[lhip_idx, :] + joint_coord[rhip_idx, :]) * 0.5 + pelvis[2] = joint_coord[lhip_idx, 2] * joint_coord[rhip_idx, 2] # confidence for openpose + pelvis = pelvis.reshape(1, 3) + + joint_coord = np.concatenate((joint_coord, pelvis)) + + return joint_coord + + +def add_neck(joint_coord, joints_name): + lshoulder_idx = joints_name.index('L_Shoulder') + rshoulder_idx = joints_name.index('R_Shoulder') + neck = (joint_coord[lshoulder_idx, :] + joint_coord[rshoulder_idx, :]) * 0.5 + neck[2] = joint_coord[lshoulder_idx, 2] * joint_coord[rshoulder_idx, 2] + neck = neck.reshape(1, 3) + + joint_coord = np.concatenate((joint_coord, neck)) + + return joint_coord + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--gpu', type=str, dest='gpu_ids') + parser.add_argument('--model_path', type=str, default='demo_checkpoint.pth.tar') + parser.add_argument('--input_dir', type=str, default='') + parser.add_argument('--output_dir', type=str, default='output') + parser.add_argument('--data_dir', type=str, default='101570') + + args = parser.parse_args() + + # test gpus + if not args.gpu_ids: + assert 0, print("Please set proper gpu ids") + + if '-' in args.gpu_ids: + gpus = args.gpu_ids.split('-') + gpus[0] = int(gpus[0]) + gpus[1] = int(gpus[1]) + 1 + args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus)))) + + return args + + +def get_projected_vertex(mesh, world2screen_matrix): + mesh = mesh[0,...] + mesh = np.concatenate([mesh, np.ones((mesh.shape[0], 1))], axis=1) # 6890 x 4 + points_image = world2screen_matrix @ mesh.T # 4,6890 + points_image = points_image[:3, :] # 3,6890 + + points_on_input_image = points_image / points_image[2, :] + points_on_input_image = points_on_input_image[:2, :].T # 30,2 + + return points_on_input_image + +def flip_yaw(pose_matrix): + flipped = pose_matrix.copy() + flipped[0, 1] *= -1 + flipped[0, 2] *= -1 + flipped[1, 0] *= -1 + flipped[2, 0] *= -1 + flipped[0, 3] *= -1 + return flipped +# argument parsing +args = parse_args() +cfg.set_args(args.gpu_ids, is_test=True) +cfg.set_data_dir(args.data_dir) +cfg.render = True +cudnn.benchmark = True + +# SMPL joint set +joint_num = 30 # original: 24. manually add nose, L/R eye, L/R ear, head top +joints_name = ( + 'Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', + 'Neck', 'L_Thorax', 'R_Thorax', + 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', + 'R_Eye', 'L_Ear', 'R_Ear', 'Head_top') +flip_pairs = ( + (1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23), (25, 26), (27, 28)) +skeleton = ( + (0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), + (17, 19), + (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 24), (24, 15), (24, 25), + (24, 26), + (25, 27), (26, 28), (24, 29)) + +# SMPl mesh +vertex_num = 6890 +smpl = SMPL() +face = smpl.face +alpha = 0.8 +# other joint set +coco_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', + 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') +coco_skeleton = ( + (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), + (5, 6), + (11, 17), (12, 17), (17, 18)) + +vis_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', + 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Thorax', 'Pelvis') +vis_skeleton = ( + (0, 1), (0, 2), (2, 4), (1, 3), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 17), (6, 17), (11, 18), + (12, 18), (17, 18), (17, 0), (6, 8), (8, 10),) + +human_model_layer = smpl.layer['neutral'].cuda() + + +# prepare input image +transform = transforms.ToTensor() +pose2d_result_path = os.path.join(args.input_dir, '2d_pose_result_hrnet.json') +with open(pose2d_result_path) as f: + pose2d_result = json.load(f) + +img_dir = os.path.join(args.input_dir, 'images') + +debug = True + +output_dir = args.output_dir +print('>>>>>>> output_dir', output_dir) +os.makedirs(output_dir, exist_ok=True) +aligned_images_dir = os.path.join(output_dir, 'aligned_images') +visualization_dir = os.path.join(output_dir, 'visualization_debug') +os.makedirs(visualization_dir, exist_ok=True) + +result_json_path = os.path.join(output_dir, 'result.json') +with open(result_json_path, 'r') as f: + result_json = json.load(f) + +template_mesh_in_pyrender = np.load('./template_mesh_in_pyrender.npy') +print('template_mesh_in_pyrender.shape', template_mesh_in_pyrender.shape) +template_mesh = np.load('./template_mesh.npy') +print('template_mesh.shape', template_mesh.shape) + + + +from model import get_model +model_path = args.model_path +assert osp.exists(model_path), 'Cannot find model at ' + model_path +print('Load checkpoint from {}'.format(model_path)) +model = get_model(vertex_num, joint_num, 'test') + +model = DataParallel(model).cuda() +ckpt = torch.load(model_path) +model.load_state_dict(ckpt['network'], strict=False) +model.eval() + + +for aligned_image_name in result_json.keys(): + color = colorsys.hsv_to_rgb(np.random.rand(), 0.5, 1.0) + meta_info = result_json[aligned_image_name] + + visualization_path = os.path.join(visualization_dir, aligned_image_name) + aligned_image_path = os.path.join(aligned_images_dir, aligned_image_name) + aligned_image = cv2.imread(aligned_image_path) + + + + # crop image + viz = [aligned_image] + mesh_cam_render, _ = human_model_layer(torch.from_numpy(np.array(meta_info['smpl_pose'])).float().cuda(), + torch.from_numpy(np.array(meta_info['smpl_shape'])).float().cuda(), + torch.from_numpy(np.array(meta_info['cam_trans'])).float().cuda()) + image_mesh = render_mesh(aligned_image.copy(), + mesh_cam_render[0].cpu().numpy(), face, + meta_info['intrisics_crop_dict'], + color=color) + # image_mesh,_,_ = generate_patch_image(image_mesh, crop_output['bbox'], 1.0, 0.0, False, (crop_image_size,crop_image_size)) + viz.append((image_mesh * alpha + aligned_image.copy() * (1.0 - alpha)).astype(np.uint8)) + + + image_camera_rotate = render_mesh(aligned_image.copy(), + template_mesh_in_pyrender[0], face, + meta_info['intrisics_dict'], + color=color, cam_pose=meta_info['normalized_camerapose_in_pyrender']) + viz.append((image_camera_rotate * alpha + aligned_image.copy() * (1.0-alpha)).astype(np.uint8)) + + + + + + # + projected_vertexes = get_projected_vertex(template_mesh, np.array(meta_info['intrisics']) @ np.array(meta_info['world2camera_matrix'])) + vertex_vis = aligned_image.copy() + camera_pose = np.array(meta_info['camera_pose']) + camera_forward_direction = (camera_pose[:3, :3] @ np.reshape(np.array([0, 0, 1]),(3,1)))[:,0] # 3,1 + camera_position = camera_pose[:, 3:4][:3, 0] # 34,1 + not_pass_check = 0 + in_screen = 0 + for i in range(projected_vertexes.shape[0]): + if projected_vertexes[i, 0] < 0 or projected_vertexes[i, 0] >= vertex_vis.shape[1] or \ + projected_vertexes[i, 1] < 0 or projected_vertexes[i, 1] >= vertex_vis.shape[0]: + continue + check = np.sum((template_mesh[0, i, :3] - camera_position) * camera_forward_direction) + in_screen += 1 + if check < 0: + not_pass_check += 1 + cv2.circle(vertex_vis, (int(projected_vertexes[i, 0]), int(projected_vertexes[i, 1])), 6, (255, 0, 0), -1) + print('check', not_pass_check, in_screen) + if not_pass_check == in_screen: + raise Exception('all vertexes are before camera') + viz.append(vertex_vis) + + # flip image + flip_camerapose_in_pyrender = np.array(meta_info['normalized_camerapose_in_pyrender']) + flip_camerapose_in_pyrender = flip_yaw(flip_camerapose_in_pyrender) + + image_camera_rotate_flip = render_mesh(cv2.flip(aligned_image.copy(), 1), + template_mesh_in_pyrender[0], face, + meta_info['intrisics_dict'], + color=color, cam_pose=flip_camerapose_in_pyrender) + viz.append((image_camera_rotate_flip * alpha + cv2.flip(aligned_image.copy(), 1) * (1.0 - alpha)).astype(np.uint8)) + + + # flip + camera_pose = np.array(meta_info['camera_pose']) + flip_camera_pose = flip_yaw(camera_pose) + + + flip_world2camera_matrix = np.linalg.inv(flip_camera_pose) + + projected_vertexes = get_projected_vertex(template_mesh, np.array(meta_info['intrisics']) @ flip_world2camera_matrix) # + # select head & neck vertexes + template_align_joint_coorinate = model.module.template_align_joint_coorinate.cpu().numpy() # 30, 6890 + template_mesh_cam_render = model.module.template_mesh_cam_render.cpu().numpy() # 6890, 3 + # template_mesh_cam_render -template_align_joint_coorinate > 0 + print(template_mesh_cam_render.shape) + selected_vertexes = np.where( template_mesh_cam_render[0,:,1]<0 )[0] + print(selected_vertexes.shape) + projected_vertexes = projected_vertexes[selected_vertexes, :] + print(projected_vertexes.shape) + + + vertex_vis = cv2.flip(aligned_image.copy(), 1) + camera_pose = flip_camera_pose + camera_forward_direction = (camera_pose[:3, :3] @ np.reshape(np.array([0, 0, 1]), (3, 1)))[:, 0] # 3,1 + camera_position = camera_pose[:, 3:4][:3, 0] # 34,1 + not_pass_check = 0 + in_screen = 0 + for i in range(projected_vertexes.shape[0]): + if projected_vertexes[i, 0] < 0 or projected_vertexes[i, 0] >= vertex_vis.shape[1] or \ + projected_vertexes[i, 1] < 0 or projected_vertexes[i, 1] >= vertex_vis.shape[0]: + continue + check = np.sum((template_mesh[0, i, :3] - camera_position) * camera_forward_direction) + in_screen += 1 + if check < 0: + not_pass_check += 1 + cv2.circle(vertex_vis, (int(projected_vertexes[i, 0]), int(projected_vertexes[i, 1])), 6, (255, 0, 0), -1) + print('check', not_pass_check, in_screen) + if not_pass_check == in_screen: + raise Exception('all vertexes are before camera') + + + + viz.append(vertex_vis) + viz = np.concatenate(viz, axis=0) + cv2.imwrite(visualization_path, cv2.resize(viz, (viz.shape[1] //4, viz.shape[0] //4))) diff --git a/data_processing/demo/my_demo.py b/data_processing/demo/my_demo.py new file mode 100644 index 0000000..6e4979b --- /dev/null +++ b/data_processing/demo/my_demo.py @@ -0,0 +1,293 @@ +import glob +import sys +import os +import os.path as osp +import argparse +import numpy as np +import cv2 +import colorsys +import json +import random +import torch +import torchvision.transforms as transforms +from torch.nn.parallel.data_parallel import DataParallel +import torch.backends.cudnn as cudnn +import matplotlib.pyplot as plt + +sys.path.insert(0, osp.join('..', 'main')) +sys.path.insert(0, osp.join('..', 'data')) +sys.path.insert(0, osp.join('..', 'common')) +from config import cfg +from model import get_model +from utils.preprocessing import process_bbox, generate_patch_image, get_bbox +from utils.transforms import pixel2cam, cam2pixel, transform_joint_to_other_db +from utils.vis import vis_mesh, save_obj, render_mesh, vis_coco_skeleton + +sys.path.insert(0, cfg.smpl_path) +from utils.smpl import SMPL + + +def add_pelvis(joint_coord, joints_name): + lhip_idx = joints_name.index('L_Hip') + rhip_idx = joints_name.index('R_Hip') + pelvis = (joint_coord[lhip_idx, :] + joint_coord[rhip_idx, :]) * 0.5 + pelvis[2] = joint_coord[lhip_idx, 2] * joint_coord[rhip_idx, 2] # confidence for openpose + pelvis = pelvis.reshape(1, 3) + + joint_coord = np.concatenate((joint_coord, pelvis)) + + return joint_coord + + +def add_neck(joint_coord, joints_name): + lshoulder_idx = joints_name.index('L_Shoulder') + rshoulder_idx = joints_name.index('R_Shoulder') + neck = (joint_coord[lshoulder_idx, :] + joint_coord[rshoulder_idx, :]) * 0.5 + neck[2] = joint_coord[lshoulder_idx, 2] * joint_coord[rshoulder_idx, 2] + neck = neck.reshape(1, 3) + + joint_coord = np.concatenate((joint_coord, neck)) + + return joint_coord + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--gpu', type=str, dest='gpu_ids') + parser.add_argument('--model_path', type=str, default='demo_checkpoint.pth.tar') + parser.add_argument('--img_name', type=str, default='101570') + parser.add_argument('--input_dir', type=str, default='101570') + parser.add_argument('--data_dir', type=str, default='101570') + + args = parser.parse_args() + + # test gpus + if not args.gpu_ids: + assert 0, print("Please set proper gpu ids") + + if '-' in args.gpu_ids: + gpus = args.gpu_ids.split('-') + gpus[0] = int(gpus[0]) + gpus[1] = int(gpus[1]) + 1 + args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus)))) + + return args + + +# argument parsing +args = parse_args() +cfg.set_args(args.gpu_ids, is_test=True) +cfg.set_data_dir(args.data_dir) +cfg.render = True +cudnn.benchmark = True + +# SMPL joint set +joint_num = 30 # original: 24. manually add nose, L/R eye, L/R ear, head top +joints_name = ( + 'Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', + 'Neck', 'L_Thorax', 'R_Thorax', + 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', + 'R_Eye', 'L_Ear', 'R_Ear', 'Head_top') +flip_pairs = ( + (1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23), (25, 26), (27, 28)) +skeleton = ( + (0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), + (17, 19), + (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 24), (24, 15), (24, 25), + (24, 26), + (25, 27), (26, 28), (24, 29)) + +# SMPl mesh +vertex_num = 6890 +smpl = SMPL() +face = smpl.face + +# other joint set +coco_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', + 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') +coco_skeleton = ( + (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), + (5, 6), + (11, 17), (12, 17), (17, 18)) + +vis_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', + 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Thorax', 'Pelvis') +vis_skeleton = ( + (0, 1), (0, 2), (2, 4), (1, 3), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 17), (6, 17), (11, 18), + (12, 18), (17, 18), (17, 0), (6, 8), (8, 10),) + +# snapshot load +model_path = args.model_path +assert osp.exists(model_path), 'Cannot find model at ' + model_path +print('Load checkpoint from {}'.format(model_path)) +model = get_model(vertex_num, joint_num, 'test') + +model = DataParallel(model).cuda() +ckpt = torch.load(model_path) +model.load_state_dict(ckpt['network'], strict=False) +model.eval() + +# prepare input image +transform = transforms.ToTensor() +pose2d_result_path = os.path.join(args.input_dir, '2d_pose_result_hrnet.json') +with open(pose2d_result_path) as f: + pose2d_result = json.load(f) + +img_dir = os.path.join(args.input_dir, 'images') + +output_dir = 'output' +os.makedirs(output_dir, exist_ok=True) +# img_name = args.img_name +# if img_name not in pose2d_result: +# print('missing pose2d! ') +# exit() +# +# img_path = osp.join(img_dir, img_name) +# + +for img_path in glob.glob(os.path.join(img_dir, "*")): + original_img = cv2.imread(img_path) + img_name = os.path.basename(img_path) + + if img_name.split('.')[0] not in ['arun-4ZpNFwSV7sY-unsplash','taylor-brandon-QAnqDU_fTz0-unsplash']: + continue + + + original_img_height, original_img_width = original_img.shape[:2] + coco_joint_list = pose2d_result[img_name] + + drawn_joints = [] + c = coco_joint_list + # manually assign the order of output meshes + # coco_joint_list = [c[2], c[0], c[1], c[4], c[3]] + + result_count = 0 + + + + + for idx in range(len(coco_joint_list)): + image = original_img.copy() + input = original_img.copy() + input2 = original_img.copy() + """ 2D pose input setting & hard-coding for filtering """ + pose_thr = 0.05 + coco_joint_img = np.asarray(coco_joint_list[idx])[:, :3] + coco_joint_img = add_pelvis(coco_joint_img, coco_joints_name) + coco_joint_img = add_neck(coco_joint_img, coco_joints_name) + coco_joint_valid = (coco_joint_img[:, 2].copy().reshape(-1, 1) > pose_thr).astype(np.float32) + + # filter inaccurate inputs + det_score = sum(coco_joint_img[:, 2]) + if det_score < 0.3: + # print(f'det_score = ({det_score})!') + continue + if len(coco_joint_img[:, 2:].nonzero()[0]) < 1: + #print('len(coco_joint_img[:, 2:].nonzero()[0]) < 1!') + continue + # filter the same targets + tmp_joint_img = coco_joint_img.copy() + continue_check = False + for ddx in range(len(drawn_joints)): + drawn_joint_img = drawn_joints[ddx] + drawn_joint_val = (drawn_joint_img[:, 2].copy().reshape(-1, 1) > pose_thr).astype(np.float32) + diff = np.abs(tmp_joint_img[:, :2] - drawn_joint_img[:, :2]) * coco_joint_valid * drawn_joint_val + diff = diff[diff != 0] + if diff.size == 0: + continue_check = True + elif diff.mean() < 20: + continue_check = True + if continue_check: + #print('continue_check failed!') + continue + drawn_joints.append(tmp_joint_img) + + """ Prepare model input """ + # prepare bbox + # bbox = get_bbox(coco_joint_img, coco_joint_valid[:, 0]) # xmin, ymin, width, height + bbox = get_bbox(coco_joint_img, np.ones_like(coco_joint_valid[:, 0])) + + orig_bbox = bbox.copy() + bbox = process_bbox(bbox, original_img_width, original_img_height) + if bbox is None: + #print('bbox is None!') + continue + img, img2bb_trans, bb2img_trans = generate_patch_image(input2[:, :, ::-1], bbox, 1.0, 0.0, False, + cfg.input_img_shape) + img = transform(img.astype(np.float32)) / 255 + img = img.cuda()[None, :, :, :] + + coco_joint_img_xy1 = np.concatenate((coco_joint_img[:, :2], np.ones_like(coco_joint_img[:, :1])), 1) + coco_joint_img[:, :2] = np.dot(img2bb_trans, coco_joint_img_xy1.transpose(1, 0)).transpose(1, 0) + coco_joint_img[:, 0] = coco_joint_img[:, 0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + coco_joint_img[:, 1] = coco_joint_img[:, 1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + + coco_joint_img = transform_joint_to_other_db(coco_joint_img, coco_joints_name, joints_name) + coco_joint_valid = transform_joint_to_other_db(coco_joint_valid, coco_joints_name, joints_name) + coco_joint_valid[coco_joint_img[:, 2] <= pose_thr] = 0 + + # check truncation + coco_joint_trunc = coco_joint_valid * ( + (coco_joint_img[:, 0] >= 0) * (coco_joint_img[:, 0] < cfg.output_hm_shape[2]) * ( + coco_joint_img[:, 1] >= 0) * (coco_joint_img[:, 1] < cfg.output_hm_shape[1])).reshape( + -1, 1).astype(np.float32) + coco_joint_img, coco_joint_trunc, bbox = torch.from_numpy(coco_joint_img).cuda()[None, :, :], torch.from_numpy( + coco_joint_trunc).cuda()[None, :, :], torch.from_numpy(bbox).cuda()[None, :] + + """ Model forward """ + inputs = {'img': img, 'joints': coco_joint_img, 'joints_mask': coco_joint_trunc} + targets = {} + meta_info = {'bbox': bbox} + with torch.no_grad(): + out = model(inputs, targets, meta_info, 'test') + + # draw output mesh + mesh_cam_render = out['mesh_cam_render'][0].cpu().numpy() + bbox = out['bbox'][0].cpu().numpy() + princpt = (bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2) + # original_img = vis_bbox(original_img, bbox, alpha=1) # for debug + + # generate random color + color = colorsys.hsv_to_rgb(np.random.rand(), 0.5, 1.0) + image = render_mesh(image, mesh_cam_render, face, {'focal': cfg.focal, 'princpt': princpt}, + color=color) + + # img = img[0].cpu().numpy().transpose(1, 2, 0)[:, :, ::-1] * 255 + # img = render_mesh(img, out['mesh_cam_render_crop'][0].cpu().numpy(), face, {'focal': cfg.focal, 'princpt': cfg.princpt}, + # color=color) + # Save output mesh + + image_name = os.path.basename(img_path).split('.')[0] + file_name = f'{output_dir}/{image_name}_{idx}.jpg' + #print("file name: ", file_name) + + # save_obj(mesh_cam_render, face, file_name=f'{output_dir}/{image_name}_{idx}.obj') + + # cv2.imwrite(file_name, (image*0.7+original_img*0.3).astype(np.uint8)) + + # Draw input 2d pose + tmp_joint_img[-1], tmp_joint_img[-2] = tmp_joint_img[-2].copy(), tmp_joint_img[-1].copy() + input = vis_coco_skeleton(input, tmp_joint_img.T, vis_skeleton) + # cv2.imwrite(file_name[:-4] + f'_2dpose_{idx}.jpg', input) + input = cv2.rectangle(input, (int(orig_bbox[0]), int(orig_bbox[1])), + (int(orig_bbox[0] + orig_bbox[2]), int(orig_bbox[1] + orig_bbox[3])), (0, 0, 255), 2) + input = cv2.rectangle(input, (int(bbox[0]), int(bbox[1])), (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])), + (0, 255, 0), 2) + + viz = np.concatenate([original_img, (image * 0.9 + original_img * 0.1).astype(np.uint8), input], + axis=0 if original_img.shape[1] > original_img.shape[0] else 1) + cv2.imwrite(file_name[:-4] + f'_viz_{idx}.jpg', viz) + + result_count += 1 + + + if result_count == 0: + print(f">>>>>>> No result in {img_path}!") + + + # cv2.imwrite(file_name[:-4] + f'_viz_crop_{idx}.jpg', img) + + + diff --git a/data_processing/demo/new_crop_use_densepose.py b/data_processing/demo/new_crop_use_densepose.py new file mode 100644 index 0000000..f591b1a --- /dev/null +++ b/data_processing/demo/new_crop_use_densepose.py @@ -0,0 +1,484 @@ +import glob +import shutil +import sys +import os +import os.path as osp +import argparse + +import matplotlib.pyplot as plt +import numpy as np +import cv2 +import colorsys +import json +import random +import torch +import torchvision.transforms as transforms +from torch.nn.parallel.data_parallel import DataParallel +import torch.backends.cudnn as cudnn +import pyrender + +sys.path.insert(0, osp.join('..', 'main')) +sys.path.insert(0, osp.join('..', 'data')) +sys.path.insert(0, osp.join('..', 'common')) +from config import cfg +# from model import get_model +# from utils.preprocessing import process_bbox, generate_patch_image, get_bbox +# from utils.transforms import pixel2cam, cam2pixel, transform_joint_to_other_db +from utils.vis import vis_mesh, save_obj, render_mesh, vis_coco_skeleton + +sys.path.insert(0, cfg.smpl_path) +from utils.smpl import SMPL + +import os +# os.environ["PYOPENGL_PLATFORM"] = "egl" +# check if on a Linux machine +if os.name == 'posix': # Linux + os.environ["PYOPENGL_PLATFORM"] = "egl" +def add_pelvis(joint_coord, joints_name): + lhip_idx = joints_name.index('L_Hip') + rhip_idx = joints_name.index('R_Hip') + pelvis = (joint_coord[lhip_idx, :] + joint_coord[rhip_idx, :]) * 0.5 + pelvis[2] = joint_coord[lhip_idx, 2] * joint_coord[rhip_idx, 2] # confidence for openpose + pelvis = pelvis.reshape(1, 3) + + joint_coord = np.concatenate((joint_coord, pelvis)) + + return joint_coord + + +def add_neck(joint_coord, joints_name): + lshoulder_idx = joints_name.index('L_Shoulder') + rshoulder_idx = joints_name.index('R_Shoulder') + neck = (joint_coord[lshoulder_idx, :] + joint_coord[rshoulder_idx, :]) * 0.5 + neck[2] = joint_coord[lshoulder_idx, 2] * joint_coord[rshoulder_idx, 2] + neck = neck.reshape(1, 3) + + joint_coord = np.concatenate((joint_coord, neck)) + + return joint_coord + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--gpu', type=str, dest='gpu_ids') + parser.add_argument('--model_path', type=str, default='demo_checkpoint.pth.tar') + parser.add_argument('--input_dir', type=str, default='') + parser.add_argument('--output_dir', type=str, default='output') + parser.add_argument('--data_dir', type=str, default='101570') + + args = parser.parse_args() + + # test gpus + if not args.gpu_ids: + assert 0, print("Please set proper gpu ids") + + if '-' in args.gpu_ids: + gpus = args.gpu_ids.split('-') + gpus[0] = int(gpus[0]) + gpus[1] = int(gpus[1]) + 1 + args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus)))) + + return args + + +def get_projected_vertex(mesh, world2screen_matrix): + mesh = mesh[0, ...] + mesh = np.concatenate([mesh, np.ones((mesh.shape[0], 1))], axis=1) # 6890 x 4 + # mesh = torch.cat([mesh, torch.ones((mesh.shape[0], 1))], dim=1) + points_image = world2screen_matrix @ mesh.T # 4,6890 + points_image = points_image[:3, :] # 3,6890 + + points_on_input_image = points_image / points_image[2, :] + points_on_input_image = points_on_input_image[:2, :].T # 30,2 + + return points_on_input_image + + +def flip_yaw(pose_matrix): + flipped = pose_matrix.copy() + flipped[0, 1] *= -1 + flipped[0, 2] *= -1 + flipped[1, 0] *= -1 + flipped[2, 0] *= -1 + flipped[0, 3] *= -1 + return flipped + + +# argument parsing +args = parse_args() +cfg.set_args(args.gpu_ids, is_test=True) +cfg.set_data_dir(args.data_dir) +cfg.render = True +cudnn.benchmark = True + +# SMPL joint set +joint_num = 30 # original: 24. manually add nose, L/R eye, L/R ear, head top +joints_name = ( + 'Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', + 'Neck', 'L_Thorax', 'R_Thorax', + 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', + 'R_Eye', 'L_Ear', 'R_Ear', 'Head_top') +flip_pairs = ( + (1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23), (25, 26), (27, 28)) +skeleton = ( + (0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), + (17, 19), + (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 24), (24, 15), (24, 25), + (24, 26), + (25, 27), (26, 28), (24, 29)) + +# SMPl mesh +vertex_num = 6890 +smpl = SMPL() +face = smpl.face +alpha = 0.8 +# other joint set +coco_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', + 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') +coco_skeleton = ( + (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), + (5, 6), + (11, 17), (12, 17), (17, 18)) + +vis_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', + 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Thorax', 'Pelvis') +vis_skeleton = ( + (0, 1), (0, 2), (2, 4), (1, 3), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 17), (6, 17), (11, 18), + (12, 18), (17, 18), (17, 0), (6, 8), (8, 10),) + +human_model_layer = smpl.layer['neutral'].cuda() + +# prepare input image +transform = transforms.ToTensor() +# pose2d_result_path = os.path.join(args.input_dir, '2d_pose_result_hrnet.json') +# with open(pose2d_result_path) as f: +# pose2d_result = json.load(f) + +img_dir = os.path.join(args.input_dir, 'images') + +debug = True + +input_aligned_images_dir = os.path.join(args.input_dir, 'aligned_images') + +output_dir = args.output_dir +print('>>>>>>> output_dir', output_dir) +os.makedirs(output_dir, exist_ok=True) + +result_json_path = os.path.join(args.input_dir, 'result.json') +with open(result_json_path, 'r') as f: + result_json = json.load(f) + +template_mesh_in_pyrender = np.load('./template_mesh_in_pyrender.npy') +print('template_mesh_in_pyrender.shape', template_mesh_in_pyrender.shape) +template_mesh = np.load('./template_mesh.npy') +print('template_mesh.shape', template_mesh.shape) + +from model import get_model + +model_path = args.model_path +assert osp.exists(model_path), 'Cannot find model at ' + model_path +print('Load checkpoint from {}'.format(model_path)) +model = get_model(vertex_num, joint_num, 'test') + +model = DataParallel(model).cuda() +ckpt = torch.load(model_path) +model.load_state_dict(ckpt['network'], strict=False) +model.eval() + +from utils.preprocessing import generate_patch_image + +output_aligned_images_dir = os.path.join(output_dir, 'aligned_images') +output_visualization_dir = os.path.join(output_dir, 'visualization') +os.makedirs(output_aligned_images_dir, exist_ok=True) +os.makedirs(output_visualization_dir, exist_ok=True) + +output_result_json_path = os.path.join(output_dir, 'result.json') +if os.path.exists(output_result_json_path): + with open(output_result_json_path, 'r') as f: + output_result_json = json.load(f) +else: + output_result_json = {} + + +def exit_function(): + global output_result_json + with open(output_result_json_path, 'w') as f: + json.dump(output_result_json, f, indent=4) + print('结束') + + +import atexit +from tqdm import tqdm + +atexit.register(exit_function) + +color = colorsys.hsv_to_rgb(np.random.rand(), 0.5, 1.0) +template_mesh_cam_render = model.module.template_mesh_cam_render.cpu().numpy() # 6890, 3 + +head_vertexes_index = np.where(template_mesh_cam_render[0, :, 1] < -0.0649)[0] + +# +# align_joint_name = 'Neck' +# align_joint_index = model.module.human_model.joints_name.index(align_joint_name) +# print(align_joint_indexmodel.module.joint_regressor.max(), model.module.joint_regressor.shape) +# neck_vertexes_index = np.where(model.module.joint_regressor[align_joint_index,:] == 1 )[0] +# print('neck_vertexes_index', neck_vertexes_index , neck_vertexes_index.shape) + +for input_aligned_image_path in tqdm(glob.glob(os.path.join(input_aligned_images_dir, '*'))): + aligned_image_name = osp.basename(input_aligned_image_path) + + dense_pose_img_paths = glob.glob(osp.join(args.input_dir, 'seg', aligned_image_name.replace('.png', '*.png'))) + original_aligned_image_path = osp.join(args.input_dir, 'aligned_images', aligned_image_name) + original_visualization_path = osp.join(args.input_dir, 'visualization', aligned_image_name) + + meta_info = result_json[aligned_image_name] + intrisics_dict = meta_info['intrisics_dict'] + intrisics_dict['focal'][0] = intrisics_dict['focal'][0] / 0.75 + intrisics_dict['focal'][1] = intrisics_dict['focal'][1] / 0.75 + + camera_pose = np.array(meta_info['camera_pose']) + + raw_image_name = meta_info['raw_image_name'] + raw_image_path = osp.join(img_dir, raw_image_name) + raw_image = cv2.imread(raw_image_path) + + if len(dense_pose_img_paths) == 0: + res_meta_info = {} + save_key = os.path.basename(input_aligned_image_path) + original_bbox = meta_info['bbox'] + + stride = original_bbox[2] + center = np.array([original_bbox[0] + stride / 2, original_bbox[1] + stride / 2]) + + stride = 0.75 * stride + + new_bbox = [center[0] - stride / 2, center[1] - stride / 2, stride, stride] + + new_aligned_image, img2bb_trans, bb2img_trans = generate_patch_image(raw_image, new_bbox, 1.0, 0.0, False, + (1024, 1024), + enable_padding=True) + viz = [new_aligned_image] + body_pose_param = torch.from_numpy(np.array(meta_info['smpl_pose'])).float().cuda() + body_pose_param = body_pose_param.reshape(-1, 24, 3) + body_pose_param = body_pose_param[:, 1:, :] + mesh_cam_render = model.module.get_neck_head_rotated_template_mesh(body_pose_param) + image_camera_rotate = render_mesh(new_aligned_image.copy(), + mesh_cam_render[0].cpu().numpy(), face, + intrisics_dict, + color=color, cam_pose=meta_info['normalized_camerapose_in_pyrender']) + viz.append((image_camera_rotate * alpha + new_aligned_image.copy() * (1.0 - alpha)).astype(np.uint8)) + + viz = np.concatenate(viz, axis=0).astype(np.uint8) + viz = cv2.resize(viz, (viz.shape[1] // 4, viz.shape[0] // 4)) + + # output_aligned_images_dir = os.path.join(output_dir, 'aligned_images') + # output_visualization_dir = os.path.join(output_dir, 'visualization') + new_aligned_image_path = os.path.join(output_aligned_images_dir, save_key) + cv2.imwrite(new_aligned_image_path, new_aligned_image) + + new_visualization_path = os.path.join(output_visualization_dir, save_key) + cv2.imwrite(new_visualization_path, viz) + + res_meta_info['bbox'] = new_bbox + res_meta_info['camera_pose'] = camera_pose.tolist() + res_meta_info['smpl_pose'] = meta_info['smpl_pose'] + res_meta_info['raw_image_name'] = raw_image_name + + output_result_json[save_key] = res_meta_info + + for dense_pose_img_path in dense_pose_img_paths: + res_meta_info = {} + save_key = os.path.basename(dense_pose_img_path) + if save_key in output_result_json: + continue + dense_pose_img_ = cv2.imread(dense_pose_img_path).astype(np.int32) + + dense_pose_index_ = dense_pose_img_[:, :, 0] * 255 + dense_pose_img_[:, :, 1] + dense_pose_index_[dense_pose_img_[:, :, 2] == 255] = -1 + + # print('dense_pose_index', dense_pose_index.shape, dense_pose_index.min(), dense_pose_index.max() ) + # mask out dense_pose_index that not in head_vertexes_index + # for i in range(dense_pose_index.shape[0]): + # for j in range(dense_pose_index.shape[1]): + # if dense_pose_index[i,j] not in head_vertexes_index: + # dense_pose_index[i,j] = -1 + # dense_pose_img[i,j,:] = 255 + # dense_pose_index = np.ones_like(dense_pose_index_)*-1 + # dense_pose_img = np.ones_like(dense_pose_img_)*255 + + dense_pose_2d_points = np.ones((head_vertexes_index.shape[0], 2)) * -1 + + for i, selected_vertex in enumerate(head_vertexes_index): + mask = dense_pose_index_ == selected_vertex + # dense_pose_index[mask] = selected_vertex + # dense_pose_img[mask, :] = dense_pose_img_[mask, :] + if mask.sum() == 0: + continue + dense_pose_2d_points[i, :] = np.array([np.mean(np.where(mask)[1]), np.mean(np.where(mask)[0])]) + # cv2.circle(dense_pose_img, (int(dense_pose_2d_points[i,0]), int(dense_pose_2d_points[i,1])), 6, (0,255,0), -1) + + # dense_pose_img = dense_pose_img.astype(np.uint8) + valid_head_vertexes_index = np.where(dense_pose_2d_points[:, 0] != -1)[0] + dense_pose_2d_points = dense_pose_2d_points[valid_head_vertexes_index, :] + + if dense_pose_2d_points.shape[0] == 0: + continue + + # project smpl mesh to img: + + # mesh_cam_render, _ = human_model_layer(torch.from_numpy(np.array(meta_info['smpl_pose'])).float().cuda(), + # torch.from_numpy(np.array(meta_info['smpl_shape'])).float().cuda(), + # torch.from_numpy(np.array(meta_info['cam_trans'])).float().cuda()) + # + # mesh_cam_render = mesh_cam_render.cpu().numpy() + body_pose_param = torch.from_numpy(np.array(meta_info['smpl_pose'])).float().cuda() + body_pose_param = body_pose_param.reshape(-1, 24, 3) + body_pose_param = body_pose_param[:, 1:, :] + mesh_cam_render = model.module.get_neck_head_rotated_template_mesh(body_pose_param) + + mesh_proj = torch.matmul(mesh_cam_render, model.module.template_mesh_R[:3, :3]).cpu().numpy() + + intrisics = np.array(meta_info['intrisics']) + # optimize trans and scale + transl = np.array([0, 0]).reshape(1, 2) + scale = np.array([1]).reshape(1, 1) + + proj_matrix = np.array(intrisics) @ np.array(meta_info['world2camera_matrix']) + projected_vertexes = get_projected_vertex(mesh_proj, proj_matrix) + moved_projected_vertexes = projected_vertexes * scale + transl + + projected_vertexes = moved_projected_vertexes[head_vertexes_index, :][valid_head_vertexes_index, :] + + # vertex_vis = dense_pose_img.copy() + # for i in range(projected_vertexes.shape[0]): + # if projected_vertexes[i, 0] < 0 or projected_vertexes[i, 0] >= vertex_vis.shape[1] or \ + # projected_vertexes[i, 1] < 0 or projected_vertexes[i, 1] >= vertex_vis.shape[0]: + # continue + # cv2.circle(vertex_vis, (int(projected_vertexes[i, 0]), int(projected_vertexes[i, 1])), 6, (255, 0, 0), -1) + # cv2.imshow('vertex_vis', vertex_vis) + # cv2.waitKey(0) + + # print('dense_pose_2d_points', dense_pose_2d_points.shape) + # print('projected_vertexes', projected_vertexes.shape) + # try to align projected_vertexes to dense_pose_index + height_dense_pose = dense_pose_2d_points[:, 1].max() - dense_pose_2d_points[:, 1].min() + width_dense_pose = dense_pose_2d_points[:, 0].max() - dense_pose_2d_points[:, 0].min() + new_center = np.array([1024 / 2, 1024 / 2]).reshape(1, 2) + + height_projected_vertexes = projected_vertexes[:, 1].max() - projected_vertexes[:, 1].min() + width_projected_vertexes = projected_vertexes[:, 0].max() - projected_vertexes[:, 0].min() + + scale = max(height_projected_vertexes / height_dense_pose, width_projected_vertexes / width_dense_pose) + + scale = max(scale, 0.85) + scale = min(scale, 2) + + dense_pose_2d_points = dense_pose_2d_points * scale + new_center = new_center * scale + + center_dense_pose = np.array([dense_pose_2d_points[:, 0].mean(), dense_pose_2d_points[:, 1].mean()]).reshape(1, + 2) + center_projected_vertexes = np.array( + [projected_vertexes[:, 0].mean(), projected_vertexes[:, 1].mean()]).reshape(1, 2) + + transl = center_projected_vertexes - center_dense_pose + + dense_pose_2d_points = dense_pose_2d_points + transl + new_center = new_center + transl + + # vertex_vis = np.ones_like(dense_pose_img)*255 + # for i in range(projected_vertexes.shape[0]): + # if projected_vertexes[i, 0] < 0 or projected_vertexes[i, 0] >= vertex_vis.shape[1] or \ + # projected_vertexes[i, 1] < 0 or projected_vertexes[i, 1] >= vertex_vis.shape[0]: + # continue + # cv2.circle(vertex_vis, (int(projected_vertexes[i, 0]), int(projected_vertexes[i, 1])), 6, (255, 0, 0), -1) + # cv2.circle(vertex_vis, (int(dense_pose_2d_points[i, 0]), int(dense_pose_2d_points[i, 1])), 6, (0, 255, 0), -1) + # + # cv2.imshow('vertex_vis', vertex_vis) + # cv2.waitKey(0) + + original_bbox = meta_info['bbox'] + stride = original_bbox[2] + center = np.array([original_bbox[0] + stride / 2, original_bbox[1] + stride / 2]) + + new_stride = stride / scale + + # transl_ = transl/1024 + + # new_center_on_crop_image = np.array([1024/2, 1024/2]) + transl + new_center = center - (new_center - np.array([1024 / 2, 1024 / 2]).reshape(1, 2)) / 1024 * stride + new_center = new_center.reshape(-1) + # print('new_center', new_center) + # print('new_stride', new_stride) + new_stride = new_stride * 0.75 + new_bbox = [new_center[0] - new_stride / 2, new_center[1] - new_stride / 2, new_stride, new_stride] + meta_info['bbox'] = new_bbox + + try: + new_aligned_image, img2bb_trans, bb2img_trans = generate_patch_image(raw_image, new_bbox, 1.0, 0.0, False, + (1024, 1024), + enable_padding=True) + except: + continue + viz = [new_aligned_image] + image_camera_rotate = render_mesh(new_aligned_image.copy(), + mesh_cam_render[0].cpu().numpy(), face, + intrisics_dict, + color=color, cam_pose=meta_info['normalized_camerapose_in_pyrender']) + viz.append((image_camera_rotate * alpha + new_aligned_image.copy() * (1.0 - alpha)).astype(np.uint8)) + + viz = np.concatenate(viz, axis=0).astype(np.uint8) + viz = cv2.resize(viz, (viz.shape[1] // 4, viz.shape[0] // 4)) + + # output_aligned_images_dir = os.path.join(output_dir, 'aligned_images') + # output_visualization_dir = os.path.join(output_dir, 'visualization') + new_aligned_image_path = os.path.join(output_aligned_images_dir, save_key) + cv2.imwrite(new_aligned_image_path, new_aligned_image) + + new_visualization_path = os.path.join(output_visualization_dir, save_key) + cv2.imwrite(new_visualization_path, viz) + + res_meta_info['bbox'] = new_bbox + res_meta_info['camera_pose'] = camera_pose.tolist() + res_meta_info['smpl_pose'] = meta_info['smpl_pose'] + res_meta_info['raw_image_name'] = raw_image_name + + output_result_json[save_key] = res_meta_info + + # print(save_key,scale) + +with open(output_result_json_path, 'w') as f: + json.dump(output_result_json, f, indent=4) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/data_processing/demo/remove_bad_vis.py b/data_processing/demo/remove_bad_vis.py new file mode 100644 index 0000000..c13afda --- /dev/null +++ b/data_processing/demo/remove_bad_vis.py @@ -0,0 +1,252 @@ +import glob +import sys +import os +import os.path as osp +import argparse + +import matplotlib.pyplot as plt +import numpy as np +import cv2 +import colorsys +import json +import random +import torch +import torchvision.transforms as transforms +from torch.nn.parallel.data_parallel import DataParallel +import torch.backends.cudnn as cudnn +import pyrender +import glob +import sys +import os +import os.path as osp +import argparse + +import matplotlib.pyplot as plt +import numpy as np +import cv2 +import colorsys +import json +import random +import torch +import torchvision.transforms as transforms +from torch.nn.parallel.data_parallel import DataParallel +import torch.backends.cudnn as cudnn +import pyrender + +sys.path.insert(0, osp.join('..', 'main')) +sys.path.insert(0, osp.join('..', 'data')) +sys.path.insert(0, osp.join('..', 'common')) +from config import cfg +from tqdm import tqdm +from utils.vis import vis_mesh, save_obj, render_mesh, vis_coco_skeleton + +sys.path.insert(0, cfg.smpl_path) +from utils.smpl import SMPL +from model import get_model + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--gpu', type=str, dest='gpu_ids', default='0') + parser.add_argument('--model_path', type=str, default='demo_checkpoint.pth.tar') + parser.add_argument('--input_dir', type=str, default='') + + parser.add_argument('--data_dir', type=str, + default='E:\project/3DCrowdNet_upper_body-main\data') + + args = parser.parse_args() + + # test gpus + if not args.gpu_ids: + assert 0, print("Please set proper gpu ids") + + if '-' in args.gpu_ids: + gpus = args.gpu_ids.split('-') + gpus[0] = int(gpus[0]) + gpus[1] = int(gpus[1]) + 1 + args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus)))) + + return args + + +args = parse_args() +cfg.set_args(args.gpu_ids, is_test=True) +cfg.set_data_dir(args.data_dir) +cfg.render = True +cudnn.benchmark = True + +# SMPL joint set +joint_num = 30 # original: 24. manually add nose, L/R eye, L/R ear, head top +joints_name = ( + 'Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', + 'Neck', 'L_Thorax', 'R_Thorax', + 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', + 'R_Eye', 'L_Ear', 'R_Ear', 'Head_top') +flip_pairs = ( + (1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23), (25, 26), (27, 28)) +skeleton = ( + (0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), + (17, 19), + (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 24), (24, 15), (24, 25), + (24, 26), + (25, 27), (26, 28), (24, 29)) + +# SMPl mesh +vertex_num = 6890 +smpl = SMPL() +face = smpl.face +alpha = 0.9 +# other joint set +coco_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', + 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') +coco_skeleton = ( + (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), + (5, 6), + (11, 17), (12, 17), (17, 18)) + +vis_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', + 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Thorax', 'Pelvis') +vis_skeleton = ( + (0, 1), (0, 2), (2, 4), (1, 3), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 17), (6, 17), (11, 18), + (12, 18), (17, 18), (17, 0), (6, 8), (8, 10),) + +human_model_layer = smpl.layer['neutral'].cuda() + +# snapshot load +model_path = args.model_path +assert osp.exists(model_path), 'Cannot find model at ' + model_path +print('Load checkpoint from {}'.format(model_path)) +model = get_model(vertex_num, joint_num, 'test') + +model = DataParallel(model).cuda() +ckpt = torch.load(model_path) +model.load_state_dict(ckpt['network'], strict=False) +model.eval() + + +def get_projected_vertex(mesh, world2screen_matrix): + mesh = mesh[0, ...] + mesh = np.concatenate([mesh, np.ones((mesh.shape[0], 1))], axis=1) # 6890 x 4 + points_image = world2screen_matrix @ mesh.T # 4,6890 + points_image = points_image[:3, :] # 3,6890 + + points_on_input_image = points_image / points_image[2, :] + points_on_input_image = points_on_input_image[:2, :].T # 30,2 + + return points_on_input_image + + +import shutil + +path = args.input_dir +bad_path = os.path.join(args.input_dir,'bad_aligned_images') +os.makedirs(bad_path, exist_ok=True) +image_list = glob.glob(os.path.join(args.input_dir,'aligned_images/*')) # + +result_json_path = os.path.join(path, 'result.json') +with open(result_json_path, 'r') as f: + result_json = json.load(f) + +for image_path in tqdm(image_list): + aligned_image_name = os.path.basename(image_path) + + vis1_path = os.path.join(path, 'aligned_images', aligned_image_name) + + + meta_info = result_json[aligned_image_name] + bbox1 = meta_info['bbox'] + + coco_joint = np.array(meta_info['coco_joint']) + coco_joint1 = coco_joint.copy() + coco_joint1[:, 0] = coco_joint1[:, 0] - bbox1[0] + coco_joint1[:, 1] = coco_joint1[:, 1] - bbox1[1] + coco_joint1 *= 1024 / bbox1[2] + + # vis1 = cv2.imread(image_path) + + pose_params_input = torch.from_numpy(np.array(meta_info['smpl_pose'])).float().cuda().view(1, 24, 3) + pose_params_input = pose_params_input[:, 1:, :] + + joints_3d = model.module.get_neck_head_rotated_template_mesh_joint(pose_params_input).cpu().numpy() + # print(joints_3d.shape) + projected_joints = get_projected_vertex(joints_3d, np.array(meta_info['intrisics']) @ np.array( + meta_info['world2camera_matrix'])) + # print(projected_joints.shape) + + vis1 = cv2.imread(image_path) + # joint_names = ['Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear'] + # for j in range(5): + # cv2.circle(vis1, (int(coco_joint1[j, 0]), int(coco_joint1[j, 1])), 3, (0, 0, 255), -1) + # #cv2.circle(vis2, (int(coco_joint2[j, 0]), int(coco_joint2[j, 1])), 3, (0, 0, 255), -1) + # cv2.putText(vis1, joint_names[j], (int(coco_joint1[j, 0]), int(coco_joint1[j, 1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) + # #cv2.putText(vis2, joint_names[j], (int(coco_joint2[j, 0]), int(coco_joint2[j, 1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) + # + # for j in range(24,29): + # cv2.circle(vis1, (int(projected_joints[j, 0]), int(projected_joints[j, 1])), 3, (0, 255, 0), -1) + # #cv2.circle(vis2, (int(projected_joints[j, 0]), int(projected_joints[j, 1])), 3, (0, 255, 0), -1) + # cv2.putText(vis1, joint_names[j-24], (int(projected_joints[j, 0]), int(projected_joints[j, 1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1) + # #cv2.putText(vis2, joint_names[j-24], (int(projected_joints[j, 0]), int(projected_joints[j, 1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1) + + coco_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', + 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle') + + smpl_joints_name = ( + 'Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', + 'Neck', 'L_Thorax', 'R_Thorax', + 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', + 'R_Eye', 'L_Ear', 'R_Ear', 'Head_top') + + selected_joint_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder') + + selected_coco_idx = [coco_joints_name.index(joint_name) for joint_name in selected_joint_name] + selected_smpl_idx = [smpl_joints_name.index(joint_name) for joint_name in selected_joint_name] + + distance = 0 + count = 0 + for i in range(len(selected_joint_name)): + if coco_joint1[selected_coco_idx[i], 2] > 0.1: + distance += np.linalg.norm(coco_joint1[selected_coco_idx[i], :2] - projected_joints[selected_smpl_idx[i], :]) + count += 1 + + cv2.circle(vis1, (int(coco_joint1[selected_coco_idx[i], 0]), int(coco_joint1[selected_coco_idx[i], 1])), 3, (0, 0, 255), -1) + cv2.putText(vis1, selected_joint_name[i], (int(coco_joint1[selected_coco_idx[i], 0]), int(coco_joint1[selected_coco_idx[i], 1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) + cv2.circle(vis1, (int(projected_joints[selected_smpl_idx[i], 0]), int(projected_joints[selected_smpl_idx[i], 1])), 3, (0, 255, 0), -1) + cv2.putText(vis1, selected_joint_name[i], (int(projected_joints[selected_smpl_idx[i], 0]), int(projected_joints[selected_smpl_idx[i], 1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1) + + if count > 0: + distance /= count + else: + shutil.move(vis1_path, os.path.join(bad_path, os.path.basename(vis1_path))) + + if distance > 150: + shutil.move(vis1_path, os.path.join(bad_path, os.path.basename(vis1_path))) + + + + + + + + # cv2.imshow('vis1', vis1) + # cv2.waitKey(0) + # cv2.imshow('vis2', vis2) + # cv2.waitKey(0) + # exit() + + # distance1 = np.linalg.norm(coco_joint1[:5, :2] - projected_joints[24:29, :]) + # + # # if distance1 > distance2: + # # print('move', vis1_path, 'to', os.path.join(bad_path, os.path.basename(vis1_path))) + # # shutil.move(vis1_path, os.path.join(bad_path, os.path.basename(vis1_path))) + # # else: + # # print('move', vis2_path, 'to', os.path.join(bad_path, os.path.basename(vis2_path))) + # # shutil.move(vis2_path, os.path.join(bad_path, os.path.basename(vis2_path))) + # + # if distance1 > 50: + # cv2.imshow('vis1', vis1) + # cv2.waitKey(0) + + # exit() diff --git a/data_processing/demo/template_mesh.npy b/data_processing/demo/template_mesh.npy new file mode 100644 index 0000000..f132f73 Binary files /dev/null and b/data_processing/demo/template_mesh.npy differ diff --git a/data_processing/demo/template_mesh_in_pyrender.npy b/data_processing/demo/template_mesh_in_pyrender.npy new file mode 100644 index 0000000..5ec3e50 Binary files /dev/null and b/data_processing/demo/template_mesh_in_pyrender.npy differ diff --git a/data_processing/detectron2/.clang-format b/data_processing/detectron2/.clang-format new file mode 100644 index 0000000..39b1b3d --- /dev/null +++ b/data_processing/detectron2/.clang-format @@ -0,0 +1,85 @@ +AccessModifierOffset: -1 +AlignAfterOpenBracket: AlwaysBreak +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlinesLeft: true +AlignOperands: false +AlignTrailingComments: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: true +BinPackArguments: false +BinPackParameters: false +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: false +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +ForEachMacros: [ FOR_EACH, FOR_EACH_R, FOR_EACH_RANGE, ] +IncludeCategories: + - Regex: '^<.*\.h(pp)?>' + Priority: 1 + - Regex: '^<.*' + Priority: 2 + - Regex: '.*' + Priority: 3 +IndentCaseLabels: true +IndentWidth: 2 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: false +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PointerAlignment: Left +ReflowComments: true +SortIncludes: true +SpaceAfterCStyleCast: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +TabWidth: 8 +UseTab: Never diff --git a/data_processing/detectron2/.flake8 b/data_processing/detectron2/.flake8 new file mode 100644 index 0000000..28881e4 --- /dev/null +++ b/data_processing/detectron2/.flake8 @@ -0,0 +1,15 @@ +# This is an example .flake8 config, used when developing *Black* itself. +# Keep in sync with setup.cfg which is used for source packages. + +[flake8] +ignore = W503, E203, E221, C901, C408, E741, C407, B017, F811, C101, EXE001, EXE002 +max-line-length = 100 +max-complexity = 18 +select = B,C,E,F,W,T4,B9 +exclude = build +per-file-ignores = + **/__init__.py:F401,F403,E402 + **/configs/**.py:F401,E402 + configs/**.py:F401,E402 + **/tests/config/**.py:F401,E402 + tests/config/**.py:F401,E402 diff --git a/data_processing/detectron2/.gitignore b/data_processing/detectron2/.gitignore new file mode 100644 index 0000000..9953d9b --- /dev/null +++ b/data_processing/detectron2/.gitignore @@ -0,0 +1,53 @@ +# output dir +output +instant_test_output +inference_test_output + + +*.png +*.json +*.diff +*.jpg +!/projects/DensePose/doc/images/*.jpg + +# compilation and distribution +__pycache__ +_ext +*.pyc +*.pyd +*.so +*.dll +*.egg-info/ +build/ +dist/ +wheels/ + +# pytorch/python/numpy formats +*.pth +*.pkl +*.npy +*.ts +model_ts*.txt + +# ipython/jupyter notebooks +*.ipynb +**/.ipynb_checkpoints/ + +# Editor temporaries +*.swn +*.swo +*.swp +*~ + +# editor settings +.idea +.vscode +_darcs + +# project dirs +/detectron2/model_zoo/configs +/datasets/* +!/datasets/*.* +/projects/*/datasets +/models +/snippet diff --git a/data_processing/detectron2/GETTING_STARTED.md b/data_processing/detectron2/GETTING_STARTED.md new file mode 100644 index 0000000..404b0c8 --- /dev/null +++ b/data_processing/detectron2/GETTING_STARTED.md @@ -0,0 +1,79 @@ +## Getting Started with Detectron2 + +This document provides a brief intro of the usage of builtin command-line tools in detectron2. + +For a tutorial that involves actual coding with the API, +see our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) +which covers how to run inference with an +existing model, and how to train a builtin model on a custom dataset. + + +### Inference Demo with Pre-trained Models + +1. Pick a model and its config file from + [model zoo](MODEL_ZOO.md), + for example, `mask_rcnn_R_50_FPN_3x.yaml`. +2. We provide `demo.py` that is able to demo builtin configs. Run it with: +``` +cd demo/ +python demo.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ + --input input1.jpg input2.jpg \ + [--other-options] + --opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl +``` +The configs are made for training, therefore we need to specify `MODEL.WEIGHTS` to a model from model zoo for evaluation. +This command will run the inference and show visualizations in an OpenCV window. + +For details of the command line arguments, see `demo.py -h` or look at its source code +to understand its behavior. Some common arguments are: +* To run __on your webcam__, replace `--input files` with `--webcam`. +* To run __on a video__, replace `--input files` with `--video-input video.mp4`. +* To run __on cpu__, add `MODEL.DEVICE cpu` after `--opts`. +* To save outputs to a directory (for images) or a file (for webcam or video), use `--output`. + + +### Training & Evaluation in Command Line + +We provide two scripts in "tools/plain_train_net.py" and "tools/train_net.py", +that are made to train all the configs provided in detectron2. You may want to +use it as a reference to write your own training script. + +Compared to "train_net.py", "plain_train_net.py" supports fewer default +features. It also includes fewer abstraction, therefore is easier to add custom +logic. + +To train a model with "train_net.py", first +setup the corresponding datasets following +[datasets/README.md](./datasets/README.md), +then run: +``` +cd tools/ +./train_net.py --num-gpus 8 \ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml +``` + +The configs are made for 8-GPU training. +To train on 1 GPU, you may need to [change some parameters](https://arxiv.org/abs/1706.02677), e.g.: +``` +./train_net.py \ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ + --num-gpus 1 SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025 +``` + +To evaluate a model's performance, use +``` +./train_net.py \ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ + --eval-only MODEL.WEIGHTS /path/to/checkpoint_file +``` +For more options, see `./train_net.py -h`. + +### Use Detectron2 APIs in Your Code + +See our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) +to learn how to use detectron2 APIs to: +1. run inference with an existing model +2. train a builtin model on a custom dataset + +See [detectron2/projects](https://github.com/facebookresearch/detectron2/tree/main/projects) +for more ways to build your project on detectron2. diff --git a/data_processing/detectron2/INSTALL.md b/data_processing/detectron2/INSTALL.md new file mode 100644 index 0000000..f522e6f --- /dev/null +++ b/data_processing/detectron2/INSTALL.md @@ -0,0 +1,261 @@ +## Installation + +### Requirements +- Linux or macOS with Python ≥ 3.7 +- PyTorch ≥ 1.8 and [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation. + Install them together at [pytorch.org](https://pytorch.org) to make sure of this +- OpenCV is optional but needed by demo and visualization + + +### Build Detectron2 from Source + +gcc & g++ ≥ 5.4 are required. [ninja](https://ninja-build.org/) is optional but recommended for faster build. +After having them, run: +``` +python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' +# (add --user if you don't have permission) + +# Or, to install it from a local clone: +git clone https://github.com/facebookresearch/detectron2.git +python -m pip install -e detectron2 + +# On macOS, you may need to prepend the above commands with a few environment variables: +CC=clang CXX=clang++ ARCHFLAGS="-arch x86_64" python -m pip install ... +``` + +To __rebuild__ detectron2 that's built from a local clone, use `rm -rf build/ **/*.so` to clean the +old build first. You often need to rebuild detectron2 after reinstalling PyTorch. + +### Install Pre-Built Detectron2 (Linux only) + +Choose from this table to install [v0.6 (Oct 2021)](https://github.com/facebookresearch/detectron2/releases): + +
CUDA torch 1.10torch 1.9torch 1.8
11.3
install
python -m pip install detectron2 -f \
+  https://dl.fbaipublicfiles.com/detectron2/wheels/cu113/torch1.10/index.html
+
11.1
install
python -m pip install detectron2 -f \
+  https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.10/index.html
+
install
python -m pip install detectron2 -f \
+  https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.9/index.html
+
install
python -m pip install detectron2 -f \
+  https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.8/index.html
+
10.2
install
python -m pip install detectron2 -f \
+  https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.10/index.html
+
install
python -m pip install detectron2 -f \
+  https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html
+
install
python -m pip install detectron2 -f \
+  https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.8/index.html
+
10.1
install
python -m pip install detectron2 -f \
+  https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html
+
cpu
install
python -m pip install detectron2 -f \
+  https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.10/index.html
+
install
python -m pip install detectron2 -f \
+  https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.9/index.html
+
install
python -m pip install detectron2 -f \
+  https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.8/index.html
+
+ +Note that: +1. The pre-built packages have to be used with corresponding version of CUDA and the official package of PyTorch. + Otherwise, please build detectron2 from source. +2. New packages are released every few months. Therefore, packages may not contain latest features in the main + branch and may not be compatible with the main branch of a research project that uses detectron2 + (e.g. those in [projects](projects)). + +### Common Installation Issues + +Click each issue for its solutions: + +
+ +Undefined symbols that looks like "TH..","at::Tensor...","torch..." + +
+ +This usually happens when detectron2 or torchvision is not +compiled with the version of PyTorch you're running. + +If the error comes from a pre-built torchvision, uninstall torchvision and pytorch and reinstall them +following [pytorch.org](http://pytorch.org). So the versions will match. + +If the error comes from a pre-built detectron2, check [release notes](https://github.com/facebookresearch/detectron2/releases), +uninstall and reinstall the correct pre-built detectron2 that matches pytorch version. + +If the error comes from detectron2 or torchvision that you built manually from source, +remove files you built (`build/`, `**/*.so`) and rebuild it so it can pick up the version of pytorch currently in your environment. + +If the above instructions do not resolve this problem, please provide an environment (e.g. a dockerfile) that can reproduce the issue. +
+ +
+ +Missing torch dynamic libraries, OR segmentation fault immediately when using detectron2. + +This usually happens when detectron2 or torchvision is not +compiled with the version of PyTorch you're running. See the previous common issue for the solution. +
+ +
+ +Undefined C++ symbols (e.g. "GLIBCXX..") or C++ symbols not found. + +
+Usually it's because the library is compiled with a newer C++ compiler but run with an old C++ runtime. + +This often happens with old anaconda. +It may help to run `conda update libgcc` to upgrade its runtime. + +The fundamental solution is to avoid the mismatch, either by compiling using older version of C++ +compiler, or run the code with proper C++ runtime. +To run the code with a specific C++ runtime, you can use environment variable `LD_PRELOAD=/path/to/libstdc++.so`. + +
+ +
+ +"nvcc not found" or "Not compiled with GPU support" or "Detectron2 CUDA Compiler: not available". + +
+CUDA is not found when building detectron2. +You should make sure + +``` +python -c 'import torch; from torch.utils.cpp_extension import CUDA_HOME; print(torch.cuda.is_available(), CUDA_HOME)' +``` + +print `(True, a directory with cuda)` at the time you build detectron2. + +Most models can run inference (but not training) without GPU support. To use CPUs, set `MODEL.DEVICE='cpu'` in the config. +
+ +
+ +"invalid device function" or "no kernel image is available for execution". + +
+Two possibilities: + +* You build detectron2 with one version of CUDA but run it with a different version. + + To check whether it is the case, + use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions. + In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA" + to contain cuda libraries of the same version. + + When they are inconsistent, + you need to either install a different build of PyTorch (or build by yourself) + to match your local CUDA installation, or install a different version of CUDA to match PyTorch. + +* PyTorch/torchvision/Detectron2 is not built for the correct GPU SM architecture (aka. compute capability). + + The architecture included by PyTorch/detectron2/torchvision is available in the "architecture flags" in + `python -m detectron2.utils.collect_env`. It must include + the architecture of your GPU, which can be found at [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus). + + If you're using pre-built PyTorch/detectron2/torchvision, they have included support for most popular GPUs already. + If not supported, you need to build them from source. + + When building detectron2/torchvision from source, they detect the GPU device and build for only the device. + This means the compiled code may not work on a different GPU device. + To recompile them for the correct architecture, remove all installed/compiled files, + and rebuild them with the `TORCH_CUDA_ARCH_LIST` environment variable set properly. + For example, `export TORCH_CUDA_ARCH_LIST="6.0;7.0"` makes it compile for both P100s and V100s. +
+ +
+ +Undefined CUDA symbols; Cannot open libcudart.so + +
+The version of NVCC you use to build detectron2 or torchvision does +not match the version of CUDA you are running with. +This often happens when using anaconda's CUDA runtime. + +Use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions. +In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA" +to contain cuda libraries of the same version. + +When they are inconsistent, +you need to either install a different build of PyTorch (or build by yourself) +to match your local CUDA installation, or install a different version of CUDA to match PyTorch. +
+ + +
+ +C++ compilation errors from NVCC / NVRTC, or "Unsupported gpu architecture" + +
+A few possibilities: + +1. Local CUDA/NVCC version has to match the CUDA version of your PyTorch. Both can be found in `python collect_env.py` + (download from [here](./detectron2/utils/collect_env.py)). + When they are inconsistent, you need to either install a different build of PyTorch (or build by yourself) + to match your local CUDA installation, or install a different version of CUDA to match PyTorch. + +2. Local CUDA/NVCC version shall support the SM architecture (a.k.a. compute capability) of your GPU. + The capability of your GPU can be found at [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus). + The capability supported by NVCC is listed at [here](https://gist.github.com/ax3l/9489132). + If your NVCC version is too old, this can be workaround by setting environment variable + `TORCH_CUDA_ARCH_LIST` to a lower, supported capability. + +3. The combination of NVCC and GCC you use is incompatible. You need to change one of their versions. + See [here](https://gist.github.com/ax3l/9489132) for some valid combinations. + Notably, CUDA<=10.1.105 doesn't support GCC>7.3. + + The CUDA/GCC version used by PyTorch can be found by `print(torch.__config__.show())`. + +
+ + +
+ +"ImportError: cannot import name '_C'". + +
+Please build and install detectron2 following the instructions above. + +Or, if you are running code from detectron2's root directory, `cd` to a different one. +Otherwise you may not import the code that you installed. +
+ + +
+ +Any issue on windows. + +
+ +Detectron2 is continuously built on windows with [CircleCI](https://app.circleci.com/pipelines/github/facebookresearch/detectron2?branch=main). +However we do not provide official support for it. +PRs that improves code compatibility on windows are welcome. +
+ +
+ +ONNX conversion segfault after some "TraceWarning". + +
+The ONNX package is compiled with a too old compiler. + +Please build and install ONNX from its source code using a compiler +whose version is closer to what's used by PyTorch (available in `torch.__config__.show()`). +
+ + +
+ +"library not found for -lstdc++" on older version of MacOS + +
+ +See [this stackoverflow answer](https://stackoverflow.com/questions/56083725/macos-build-issues-lstdc-not-found-while-building-python-package). + +
+ + +### Installation inside specific environments: + +* __Colab__: see our [Colab Tutorial](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) + which has step-by-step instructions. + +* __Docker__: The official [Dockerfile](docker) installs detectron2 with a few simple commands. diff --git a/data_processing/detectron2/LICENSE b/data_processing/detectron2/LICENSE new file mode 100644 index 0000000..cd1b070 --- /dev/null +++ b/data_processing/detectron2/LICENSE @@ -0,0 +1,202 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "[]" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/data_processing/detectron2/MODEL_ZOO.md b/data_processing/detectron2/MODEL_ZOO.md new file mode 100644 index 0000000..69db272 --- /dev/null +++ b/data_processing/detectron2/MODEL_ZOO.md @@ -0,0 +1,1052 @@ +# Detectron2 Model Zoo and Baselines + +## Introduction + +This file documents a large collection of baselines trained +with detectron2 in Sep-Oct, 2019. +All numbers were obtained on [Big Basin](https://engineering.fb.com/data-center-engineering/introducing-big-basin-our-next-generation-ai-hardware/) +servers with 8 NVIDIA V100 GPUs & NVLink. The speed numbers are periodically updated with latest PyTorch/CUDA/cuDNN versions. +You can access these models from code using [detectron2.model_zoo](https://detectron2.readthedocs.io/modules/model_zoo.html) APIs. + +In addition to these official baseline models, you can find more models in [projects/](projects/). + +#### How to Read the Tables +* The "Name" column contains a link to the config file. Models can be reproduced using `tools/train_net.py` with the corresponding yaml config file, + or `tools/lazyconfig_train_net.py` for python config files. +* Training speed is averaged across the entire training. + We keep updating the speed with latest version of detectron2/pytorch/etc., + so they might be different from the `metrics` file. + Training speed for multi-machine jobs is not provided. +* Inference speed is measured by `tools/train_net.py --eval-only`, or [inference_on_dataset()](https://detectron2.readthedocs.io/modules/evaluation.html#detectron2.evaluation.inference_on_dataset), + with batch size 1 in detectron2 directly. + Measuring it with custom code may introduce other overhead. + Actual deployment in production should in general be faster than the given inference + speed due to more optimizations. +* The *model id* column is provided for ease of reference. + To check downloaded file integrity, any model on this page contains its md5 prefix in its file name. +* Training curves and other statistics can be found in `metrics` for each model. + +#### Common Settings for COCO Models +* All COCO models were trained on `train2017` and evaluated on `val2017`. +* The default settings are __not directly comparable__ with Detectron's standard settings. + For example, our default training data augmentation uses scale jittering in addition to horizontal flipping. + + To make fair comparisons with Detectron's settings, see + [Detectron1-Comparisons](configs/Detectron1-Comparisons/) for accuracy comparison, + and [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html) + for speed comparison. +* For Faster/Mask R-CNN, we provide baselines based on __3 different backbone combinations__: + * __FPN__: Use a ResNet+FPN backbone with standard conv and FC heads for mask and box prediction, + respectively. It obtains the best + speed/accuracy tradeoff, but the other two are still useful for research. + * __C4__: Use a ResNet conv4 backbone with conv5 head. The original baseline in the Faster R-CNN paper. + * __DC5__ (Dilated-C5): Use a ResNet conv5 backbone with dilations in conv5, and standard conv and FC heads + for mask and box prediction, respectively. + This is used by the Deformable ConvNet paper. +* Most models are trained with the 3x schedule (~37 COCO epochs). + Although 1x models are heavily under-trained, we provide some ResNet-50 models with the 1x (~12 COCO epochs) + training schedule for comparison when doing quick research iteration. + +#### ImageNet Pretrained Models + +It's common to initialize from backbone models pre-trained on ImageNet classification tasks. The following backbone models are available: + +* [R-50.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-50.pkl): converted copy of [MSRA's original ResNet-50](https://github.com/KaimingHe/deep-residual-networks) model. +* [R-101.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-101.pkl): converted copy of [MSRA's original ResNet-101](https://github.com/KaimingHe/deep-residual-networks) model. +* [X-101-32x8d.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/FAIR/X-101-32x8d.pkl): ResNeXt-101-32x8d model trained with Caffe2 at FB. +* [R-50.pkl (torchvision)](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/torchvision/R-50.pkl): converted copy of [torchvision's ResNet-50](https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.resnet50) model. + More details can be found in [the conversion script](tools/convert-torchvision-to-d2.py). + +Note that the above models have __different__ format from those provided in Detectron: we do not fuse BatchNorm into an affine layer. +Pretrained models in Detectron's format can still be used. For example: +* [X-152-32x8d-IN5k.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl): + ResNeXt-152-32x8d model trained on ImageNet-5k with Caffe2 at FB (see ResNeXt paper for details on ImageNet-5k). +* [R-50-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47261647/R-50-GN.pkl): + ResNet-50 with Group Normalization. +* [R-101-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47592356/R-101-GN.pkl): + ResNet-101 with Group Normalization. + +These models require slightly different settings regarding normalization and architecture. See the model zoo configs for reference. + +#### License + +All models available for download through this document are licensed under the +[Creative Commons Attribution-ShareAlike 3.0 license](https://creativecommons.org/licenses/by-sa/3.0/). + +### COCO Object Detection Baselines + +#### Faster R-CNN: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
model iddownload
R50-C41x0.5510.1024.835.7137257644model | metrics
R50-DC51x0.3800.0685.037.3137847829model | metrics
R50-FPN1x0.2100.0383.037.9137257794model | metrics
R50-C43x0.5430.1044.838.4137849393model | metrics
R50-DC53x0.3780.0705.039.0137849425model | metrics
R50-FPN3x0.2090.0383.040.2137849458model | metrics
R101-C43x0.6190.1395.941.1138204752model | metrics
R101-DC53x0.4520.0866.140.6138204841model | metrics
R101-FPN3x0.2860.0514.142.0137851257model | metrics
X101-FPN3x0.6380.0986.743.0139173657model | metrics
+ +#### RetinaNet: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
model iddownload
R501x0.2050.0414.137.4190397773model | metrics
R503x0.2050.0414.138.7190397829model | metrics
R1013x0.2910.0545.240.4190397697model | metrics
+ + +#### RPN & Fast R-CNN: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
prop.
AR
model iddownload
RPN R50-C41x0.1300.0341.551.6137258005model | metrics
RPN R50-FPN1x0.1860.0322.758.0137258492model | metrics
Fast R-CNN R50-FPN1x0.1400.0292.637.8137635226model | metrics
+ +### COCO Instance Segmentation Baselines with Mask R-CNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
R50-C41x0.5840.1105.236.832.2137259246model | metrics
R50-DC51x0.4710.0766.538.334.2137260150model | metrics
R50-FPN1x0.2610.0433.438.635.2137260431model | metrics
R50-C43x0.5750.1115.239.834.4137849525model | metrics
R50-DC53x0.4700.0766.540.035.9137849551model | metrics
R50-FPN3x0.2610.0433.441.037.2137849600model | metrics
R101-C43x0.6520.1456.342.636.7138363239model | metrics
R101-DC53x0.5450.0927.641.937.3138363294model | metrics
R101-FPN3x0.3400.0564.642.938.6138205316model | metrics
X101-FPN3x0.6900.1037.244.339.5139653917model | metrics
+ + + +#### New baselines using Large-Scale Jitter and Longer Training Schedule + +The following baselines of COCO Instance Segmentation with Mask R-CNN are generated +using a longer training schedule and large-scale jitter as described in Google's +[Simple Copy-Paste Data Augmentation](https://arxiv.org/pdf/2012.07177.pdf) paper. These +models are trained from scratch using random initialization. These baselines exceed the +previous Mask R-CNN baselines. + +In the following table, one epoch consists of training on 118000 COCO images. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Nameepochstrain
time
(s/im)
inference
time
(s/im)
box
AP
mask
AP
model iddownload
R50-FPN1000.3760.06944.640.342047764model | metrics
R50-FPN2000.3760.06946.341.742047638model | metrics
R50-FPN4000.3760.06947.442.542019571model | metrics
R101-FPN1000.5180.07346.441.642025812model | metrics
R101-FPN2000.5180.07348.043.142131867model | metrics
R101-FPN4000.5180.07348.943.742073830model | metrics
regnetx_4gf_dds_FPN1000.4740.07146.041.342047771model | metrics
regnetx_4gf_dds_FPN2000.4740.07148.143.142132721model | metrics
regnetx_4gf_dds_FPN4000.4740.07148.643.542025447model | metrics
regnety_4gf_dds_FPN1000.4870.07346.141.642047784model | metrics
regnety_4gf_dds_FPN2000.4870.07247.843.042047642model | metrics
regnety_4gf_dds_FPN4000.4870.07248.243.342045954model | metrics
+ +### COCO Person Keypoint Detection Baselines with Keypoint R-CNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
kp.
AP
model iddownload
R50-FPN1x0.3150.0725.053.664.0137261548model | metrics
R50-FPN3x0.3160.0665.055.465.5137849621model | metrics
R101-FPN3x0.3900.0766.156.466.1138363331model | metrics
X101-FPN3x0.7380.1218.757.366.0139686956model | metrics
+ +### COCO Panoptic Segmentation Baselines with Panoptic FPN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
PQmodel iddownload
R50-FPN1x0.3040.0534.837.634.739.4139514544model | metrics
R50-FPN3x0.3020.0534.840.036.541.5139514569model | metrics
R101-FPN3x0.3920.0666.042.438.543.0139514519model | metrics
+ + +### LVIS Instance Segmentation Baselines with Mask R-CNN + +Mask R-CNN baselines on the [LVIS dataset](https://lvisdataset.org), v0.5. +These baselines are described in Table 3(c) of the [LVIS paper](https://arxiv.org/abs/1908.03195). + +NOTE: the 1x schedule here has the same amount of __iterations__ as the COCO 1x baselines. +They are roughly 24 epochs of LVISv0.5 data. +The final results of these configs have large variance across different runs. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
R50-FPN1x0.2920.1077.123.624.4144219072model | metrics
R101-FPN1x0.3710.1147.825.625.9144219035model | metrics
X101-FPN1x0.7120.15110.226.727.1144219108model | metrics
+ + + +### Cityscapes & Pascal VOC Baselines + +Simple baselines for +* Mask R-CNN on Cityscapes instance segmentation (initialized from COCO pre-training, then trained on Cityscapes fine annotations only) +* Faster R-CNN on PASCAL VOC object detection (trained on VOC 2007 train+val + VOC 2012 train+val, tested on VOC 2007 using 11-point interpolated AP) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Nametrain
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
box
AP50
mask
AP
model iddownload
R50-FPN, Cityscapes0.2400.0784.436.5142423278model | metrics
R50-C4, VOC0.5370.0814.851.980.3142202221model | metrics
+ + + +### Other Settings + +Ablations for Deformable Conv and Cascade R-CNN: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
Baseline R50-FPN1x0.2610.0433.438.635.2137260431model | metrics
Deformable Conv1x0.3420.0483.541.537.5138602867model | metrics
Cascade R-CNN1x0.3170.0524.042.136.4138602847model | metrics
Baseline R50-FPN3x0.2610.0433.441.037.2137849600model | metrics
Deformable Conv3x0.3490.0473.542.738.5144998336model | metrics
Cascade R-CNN3x0.3280.0534.044.338.5144998488model | metrics
+ + +Ablations for normalization methods, and a few models trained from scratch following [Rethinking ImageNet Pre-training](https://arxiv.org/abs/1811.08883). +(Note: The baseline uses `2fc` head while the others use [`4conv1fc` head](https://arxiv.org/abs/1803.08494)) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
Baseline R50-FPN3x0.2610.0433.441.037.2137849600model | metrics
GN3x0.3090.0605.642.638.6138602888model | metrics
SyncBN3x0.3450.0535.541.937.8169527823model | metrics
GN (from scratch)3x0.3380.0617.239.936.6138602908model | metrics
GN (from scratch)9xN/A0.0617.243.739.6183808979model | metrics
SyncBN (from scratch)9xN/A0.0557.243.639.3184226666model | metrics
+ + +A few very large models trained for a long time, for demo purposes. They are trained using multiple machines: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Nameinference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
PQmodel iddownload
Panoptic FPN R1010.09811.447.441.346.1139797668model | metrics
Mask R-CNN X1520.23415.150.244.018131413model | metrics
above + test-time aug.51.945.9
diff --git a/data_processing/detectron2/README.md b/data_processing/detectron2/README.md new file mode 100644 index 0000000..75db3c5 --- /dev/null +++ b/data_processing/detectron2/README.md @@ -0,0 +1,68 @@ + + + + Support Ukraine - Help Provide Humanitarian Aid to Ukraine. + + +Detectron2 is Facebook AI Research's next generation library +that provides state-of-the-art detection and segmentation algorithms. +It is the successor of +[Detectron](https://github.com/facebookresearch/Detectron/) +and [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/). +It supports a number of computer vision research projects and production applications in Facebook. + +
+ +
+
+ +## Learn More about Detectron2 + +Explain Like I’m 5: Detectron2 | Using Machine Learning with Detectron2 +:-------------------------:|:-------------------------: +[![Explain Like I’m 5: Detectron2](https://img.youtube.com/vi/1oq1Ye7dFqc/0.jpg)](https://www.youtube.com/watch?v=1oq1Ye7dFqc) | [![Using Machine Learning with Detectron2](https://img.youtube.com/vi/eUSgtfK4ivk/0.jpg)](https://www.youtube.com/watch?v=eUSgtfK4ivk) + +## What's New +* Includes new capabilities such as panoptic segmentation, Densepose, Cascade R-CNN, rotated bounding boxes, PointRend, + DeepLab, ViTDet, MViTv2 etc. +* Used as a library to support building [research projects](projects/) on top of it. +* Models can be exported to TorchScript format or Caffe2 format for deployment. +* It [trains much faster](https://detectron2.readthedocs.io/notes/benchmarks.html). + +See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-/) +to see more demos and learn about detectron2. + +## Installation + +See [installation instructions](https://detectron2.readthedocs.io/tutorials/install.html). + +## Getting Started + +See [Getting Started with Detectron2](https://detectron2.readthedocs.io/tutorials/getting_started.html), +and the [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) +to learn about basic usage. + +Learn more at our [documentation](https://detectron2.readthedocs.org). +And see [projects/](projects/) for some projects that are built on top of detectron2. + +## Model Zoo and Baselines + +We provide a large set of baseline results and trained models available for download in the [Detectron2 Model Zoo](MODEL_ZOO.md). + +## License + +Detectron2 is released under the [Apache 2.0 license](LICENSE). + +## Citing Detectron2 + +If you use Detectron2 in your research or wish to refer to the baseline results published in the [Model Zoo](MODEL_ZOO.md), please use the following BibTeX entry. + +```BibTeX +@misc{wu2019detectron2, + author = {Yuxin Wu and Alexander Kirillov and Francisco Massa and + Wan-Yen Lo and Ross Girshick}, + title = {Detectron2}, + howpublished = {\url{https://github.com/facebookresearch/detectron2}}, + year = {2019} +} +``` diff --git a/data_processing/detectron2/configs/Base-RCNN-C4.yaml b/data_processing/detectron2/configs/Base-RCNN-C4.yaml new file mode 100644 index 0000000..fbf34a0 --- /dev/null +++ b/data_processing/detectron2/configs/Base-RCNN-C4.yaml @@ -0,0 +1,18 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + RPN: + PRE_NMS_TOPK_TEST: 6000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "Res5ROIHeads" +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/data_processing/detectron2/configs/Base-RCNN-DilatedC5.yaml b/data_processing/detectron2/configs/Base-RCNN-DilatedC5.yaml new file mode 100644 index 0000000..c0d6d16 --- /dev/null +++ b/data_processing/detectron2/configs/Base-RCNN-DilatedC5.yaml @@ -0,0 +1,31 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + RESNETS: + OUT_FEATURES: ["res5"] + RES5_DILATION: 2 + RPN: + IN_FEATURES: ["res5"] + PRE_NMS_TOPK_TEST: 6000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "StandardROIHeads" + IN_FEATURES: ["res5"] + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + ROI_MASK_HEAD: + NAME: "MaskRCNNConvUpsampleHead" + NUM_CONV: 4 + POOLER_RESOLUTION: 14 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/data_processing/detectron2/configs/Base-RCNN-FPN.yaml b/data_processing/detectron2/configs/Base-RCNN-FPN.yaml new file mode 100644 index 0000000..3e020f2 --- /dev/null +++ b/data_processing/detectron2/configs/Base-RCNN-FPN.yaml @@ -0,0 +1,42 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map + ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) + RPN: + IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] + PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level + PRE_NMS_TOPK_TEST: 1000 # Per FPN level + # Detectron1 uses 2000 proposals per-batch, + # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) + # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. + POST_NMS_TOPK_TRAIN: 1000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "StandardROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + ROI_MASK_HEAD: + NAME: "MaskRCNNConvUpsampleHead" + NUM_CONV: 4 + POOLER_RESOLUTION: 14 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/data_processing/detectron2/configs/Base-RetinaNet.yaml b/data_processing/detectron2/configs/Base-RetinaNet.yaml new file mode 100644 index 0000000..8b45b98 --- /dev/null +++ b/data_processing/detectron2/configs/Base-RetinaNet.yaml @@ -0,0 +1,25 @@ +MODEL: + META_ARCHITECTURE: "RetinaNet" + BACKBONE: + NAME: "build_retinanet_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: !!python/object/apply:eval ["[[x, x * 2**(1.0/3), x * 2**(2.0/3) ] for x in [32, 64, 128, 256, 512 ]]"] + FPN: + IN_FEATURES: ["res3", "res4", "res5"] + RETINANET: + IOU_THRESHOLDS: [0.4, 0.5] + IOU_LABELS: [0, -1, 1] + SMOOTH_L1_LOSS_BETA: 0.0 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.01 # Note that RetinaNet uses a different default learning rate + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/data_processing/detectron2/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml b/data_processing/detectron2/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000..773ac10 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,17 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + LOAD_PROPOSALS: True + RESNETS: + DEPTH: 50 + PROPOSAL_GENERATOR: + NAME: "PrecomputedProposals" +DATASETS: + TRAIN: ("coco_2017_train",) + PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_train_box_proposals_21bc3a.pkl", ) + TEST: ("coco_2017_val",) + PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) +DATALOADER: + # proposals are part of the dataset_dicts, and take a lot of RAM + NUM_WORKERS: 2 diff --git a/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml new file mode 100644 index 0000000..db142cd --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml new file mode 100644 index 0000000..bceb6b3 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml new file mode 100644 index 0000000..57a098f --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml new file mode 100644 index 0000000..f961301 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml new file mode 100644 index 0000000..bc51bce --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml new file mode 100644 index 0000000..0fe96f5 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml new file mode 100644 index 0000000..33fadeb --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000..3262019 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml new file mode 100644 index 0000000..4139518 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml new file mode 100644 index 0000000..9c9b5ab --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml @@ -0,0 +1,13 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: False + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-Detection/fcos_R_50_FPN_1x.py b/data_processing/detectron2/configs/COCO-Detection/fcos_R_50_FPN_1x.py new file mode 100644 index 0000000..86f83c6 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/fcos_R_50_FPN_1x.py @@ -0,0 +1,11 @@ +from ..common.optim import SGD as optimizer +from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier +from ..common.data.coco import dataloader +from ..common.models.fcos import model +from ..common.train import train + +dataloader.train.mapper.use_instance_mask = False +optimizer.lr = 0.01 + +model.backbone.bottom_up.freeze_at = 2 +train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" diff --git a/data_processing/detectron2/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml b/data_processing/detectron2/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml new file mode 100644 index 0000000..4abb1b9 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RetinaNet.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_1x.py b/data_processing/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_1x.py new file mode 100644 index 0000000..43057a8 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_1x.py @@ -0,0 +1,11 @@ +from ..common.optim import SGD as optimizer +from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier +from ..common.data.coco import dataloader +from ..common.models.retinanet import model +from ..common.train import train + +dataloader.train.mapper.use_instance_mask = False +model.backbone.bottom_up.freeze_at = 2 +optimizer.lr = 0.01 + +train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" diff --git a/data_processing/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml b/data_processing/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml new file mode 100644 index 0000000..4a24ce3 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml @@ -0,0 +1,5 @@ +_BASE_: "../Base-RetinaNet.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 diff --git a/data_processing/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml b/data_processing/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml new file mode 100644 index 0000000..3b5412d --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RetinaNet.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-Detection/rpn_R_50_C4_1x.yaml b/data_processing/detectron2/configs/COCO-Detection/rpn_R_50_C4_1x.yaml new file mode 100644 index 0000000..e048211 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/rpn_R_50_C4_1x.yaml @@ -0,0 +1,10 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + META_ARCHITECTURE: "ProposalNetwork" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + RPN: + PRE_NMS_TOPK_TEST: 12000 + POST_NMS_TOPK_TEST: 2000 diff --git a/data_processing/detectron2/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml b/data_processing/detectron2/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml new file mode 100644 index 0000000..dc9c952 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "ProposalNetwork" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + RPN: + POST_NMS_TOPK_TEST: 2000 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml new file mode 100644 index 0000000..1a94cc4 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml new file mode 100644 index 0000000..67b70cf --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml new file mode 100644 index 0000000..1935a30 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.py b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.py new file mode 100644 index 0000000..22016be --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.py @@ -0,0 +1,8 @@ +from ..common.train import train +from ..common.optim import SGD as optimizer +from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier +from ..common.data.coco import dataloader +from ..common.models.mask_rcnn_c4 import model + +model.backbone.freeze_at = 2 +train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml new file mode 100644 index 0000000..a9aeb4e --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml new file mode 100644 index 0000000..38ed867 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml new file mode 100644 index 0000000..b13eefa --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml new file mode 100644 index 0000000..d401016 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py new file mode 100644 index 0000000..40844dd --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py @@ -0,0 +1,8 @@ +from ..common.optim import SGD as optimizer +from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier +from ..common.data.coco import dataloader +from ..common.models.mask_rcnn_fpn import model +from ..common.train import train + +model.backbone.bottom_up.freeze_at = 2 +train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000..d50fb86 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x_giou.yaml b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x_giou.yaml new file mode 100644 index 0000000..bec680e --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x_giou.yaml @@ -0,0 +1,12 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + RPN: + BBOX_REG_LOSS_TYPE: "giou" + BBOX_REG_LOSS_WEIGHT: 2.0 + ROI_BOX_HEAD: + BBOX_REG_LOSS_TYPE: "giou" + BBOX_REG_LOSS_WEIGHT: 10.0 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml new file mode 100644 index 0000000..be7d06b --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml new file mode 100644 index 0000000..d14c63f --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml @@ -0,0 +1,13 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: True + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py new file mode 100644 index 0000000..d7bbdd7 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py @@ -0,0 +1,34 @@ +from ..common.optim import SGD as optimizer +from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier +from ..common.data.coco import dataloader +from ..common.models.mask_rcnn_fpn import model +from ..common.train import train + +from detectron2.config import LazyCall as L +from detectron2.modeling.backbone import RegNet +from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock + + +# Replace default ResNet with RegNetX-4GF from the DDS paper. Config source: +# https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnetx/RegNetX-4.0GF_dds_8gpu.yaml#L4-L9 # noqa +model.backbone.bottom_up = L(RegNet)( + stem_class=SimpleStem, + stem_width=32, + block_class=ResBottleneckBlock, + depth=23, + w_a=38.65, + w_0=96, + w_m=2.43, + group_width=40, + freeze_at=2, + norm="FrozenBN", + out_features=["s1", "s2", "s3", "s4"], +) +model.pixel_std = [57.375, 57.120, 58.395] + +optimizer.weight_decay = 5e-5 +train.init_checkpoint = ( + "https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906383/RegNetX-4.0GF_dds_8gpu.pyth" +) +# RegNets benefit from enabling cudnn benchmark mode +train.cudnn_benchmark = True diff --git a/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py new file mode 100644 index 0000000..72c6b7a --- /dev/null +++ b/data_processing/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py @@ -0,0 +1,35 @@ +from ..common.optim import SGD as optimizer +from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier +from ..common.data.coco import dataloader +from ..common.models.mask_rcnn_fpn import model +from ..common.train import train + +from detectron2.config import LazyCall as L +from detectron2.modeling.backbone import RegNet +from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock + + +# Replace default ResNet with RegNetY-4GF from the DDS paper. Config source: +# https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnety/RegNetY-4.0GF_dds_8gpu.yaml#L4-L10 # noqa +model.backbone.bottom_up = L(RegNet)( + stem_class=SimpleStem, + stem_width=32, + block_class=ResBottleneckBlock, + depth=22, + w_a=31.41, + w_0=96, + w_m=2.24, + group_width=64, + se_ratio=0.25, + freeze_at=2, + norm="FrozenBN", + out_features=["s1", "s2", "s3", "s4"], +) +model.pixel_std = [57.375, 57.120, 58.395] + +optimizer.weight_decay = 5e-5 +train.init_checkpoint = ( + "https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906838/RegNetY-4.0GF_dds_8gpu.pyth" +) +# RegNets benefit from enabling cudnn benchmark mode +train.cudnn_benchmark = True diff --git a/data_processing/detectron2/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml b/data_processing/detectron2/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml new file mode 100644 index 0000000..4e03944 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml @@ -0,0 +1,15 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + KEYPOINT_ON: True + ROI_HEADS: + NUM_CLASSES: 1 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 0.5 # Keypoint AP degrades (though box AP improves) when using plain L1 loss + RPN: + # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2. + # 1000 proposals per-image is found to hurt box AP. + # Therefore we increase it to 1500 per-image. + POST_NMS_TOPK_TRAIN: 1500 +DATASETS: + TRAIN: ("keypoints_coco_2017_train",) + TEST: ("keypoints_coco_2017_val",) diff --git a/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml b/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml new file mode 100644 index 0000000..9309535 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py b/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py new file mode 100644 index 0000000..1aad53b --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py @@ -0,0 +1,8 @@ +from ..common.optim import SGD as optimizer +from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier +from ..common.data.coco_keypoint import dataloader +from ..common.models.keypoint_rcnn_fpn import model +from ..common.train import train + +model.backbone.bottom_up.freeze_at = 2 +train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" diff --git a/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml b/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000..7bf85cf --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,5 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 diff --git a/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml b/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml new file mode 100644 index 0000000..a07f243 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml b/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml new file mode 100644 index 0000000..d4bfa20 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml b/data_processing/detectron2/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml new file mode 100644 index 0000000..f00d54b --- /dev/null +++ b/data_processing/detectron2/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml @@ -0,0 +1,11 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "PanopticFPN" + MASK_ON: True + SEM_SEG_HEAD: + LOSS_WEIGHT: 0.5 +DATASETS: + TRAIN: ("coco_2017_train_panoptic_separated",) + TEST: ("coco_2017_val_panoptic_separated",) +DATALOADER: + FILTER_EMPTY_ANNOTATIONS: False diff --git a/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml b/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml new file mode 100644 index 0000000..0e01f6f --- /dev/null +++ b/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py b/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py new file mode 100644 index 0000000..40cf181 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py @@ -0,0 +1,8 @@ +from ..common.optim import SGD as optimizer +from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier +from ..common.data.coco_panoptic_separated import dataloader +from ..common.models.panoptic_fpn import model +from ..common.train import train + +model.backbone.bottom_up.freeze_at = 2 +train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" diff --git a/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml b/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml new file mode 100644 index 0000000..6afa2c1 --- /dev/null +++ b/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml @@ -0,0 +1,5 @@ +_BASE_: "Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 diff --git a/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml b/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml new file mode 100644 index 0000000..b956b3f --- /dev/null +++ b/data_processing/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml b/data_processing/detectron2/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml new file mode 100644 index 0000000..1a7aaeb --- /dev/null +++ b/data_processing/detectron2/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml @@ -0,0 +1,27 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + # WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + # For better, more stable performance initialize from COCO + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" + MASK_ON: True + ROI_HEADS: + NUM_CLASSES: 8 +# This is similar to the setting used in Mask R-CNN paper, Appendix A +# But there are some differences, e.g., we did not initialize the output +# layer using the corresponding classes from COCO +INPUT: + MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024) + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 1024 + MAX_SIZE_TRAIN: 2048 + MAX_SIZE_TEST: 2048 +DATASETS: + TRAIN: ("cityscapes_fine_instance_seg_train",) + TEST: ("cityscapes_fine_instance_seg_val",) +SOLVER: + BASE_LR: 0.01 + STEPS: (18000,) + MAX_ITER: 24000 + IMS_PER_BATCH: 8 +TEST: + EVAL_PERIOD: 8000 diff --git a/data_processing/detectron2/configs/Detectron1-Comparisons/README.md b/data_processing/detectron2/configs/Detectron1-Comparisons/README.md new file mode 100644 index 0000000..924fd00 --- /dev/null +++ b/data_processing/detectron2/configs/Detectron1-Comparisons/README.md @@ -0,0 +1,84 @@ + +Detectron2 model zoo's experimental settings and a few implementation details are different from Detectron. + +The differences in implementation details are shared in +[Compatibility with Other Libraries](../../docs/notes/compatibility.md). + +The differences in model zoo's experimental settings include: +* Use scale augmentation during training. This improves AP with lower training cost. +* Use L1 loss instead of smooth L1 loss for simplicity. This sometimes improves box AP but may + affect other AP. +* Use `POOLER_SAMPLING_RATIO=0` instead of 2. This does not significantly affect AP. +* Use `ROIAlignV2`. This does not significantly affect AP. + +In this directory, we provide a few configs that __do not__ have the above changes. +They mimic Detectron's behavior as close as possible, +and provide a fair comparison of accuracy and speed against Detectron. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
kp.
AP
model iddownload
Faster R-CNN1x0.2190.0383.136.9137781054model | metrics
Keypoint R-CNN1x0.3130.0715.053.164.2137781195model | metrics
Mask R-CNN1x0.2730.0433.437.834.9137781281model | metrics
+ +## Comparisons: + +* Faster R-CNN: Detectron's AP is 36.7, similar to ours. +* Keypoint R-CNN: Detectron's AP is box 53.6, keypoint 64.2. Fixing a Detectron's + [bug](https://github.com/facebookresearch/Detectron/issues/459) lead to a drop in box AP, and can be + compensated back by some parameter tuning. +* Mask R-CNN: Detectron's AP is box 37.7, mask 33.9. We're 1 AP better in mask AP, due to more correct implementation. + See [this article](https://ppwwyyxx.com/blog/2021/Where-are-Pixels/) for details. + +For speed comparison, see [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html). diff --git a/data_processing/detectron2/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml b/data_processing/detectron2/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml new file mode 100644 index 0000000..6ce77f1 --- /dev/null +++ b/data_processing/detectron2/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml @@ -0,0 +1,17 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + # Detectron1 uses smooth L1 loss with some magic beta values. + # The defaults are changed to L1 loss in Detectron2. + RPN: + SMOOTH_L1_BETA: 0.1111 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" +INPUT: + # no scale augmentation + MIN_SIZE_TRAIN: (800, ) diff --git a/data_processing/detectron2/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml b/data_processing/detectron2/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000..aacf868 --- /dev/null +++ b/data_processing/detectron2/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,27 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 1 + ROI_KEYPOINT_HEAD: + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + # Detectron1 uses smooth L1 loss with some magic beta values. + # The defaults are changed to L1 loss in Detectron2. + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + RPN: + SMOOTH_L1_BETA: 0.1111 + # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2 + # 1000 proposals per-image is found to hurt box AP. + # Therefore we increase it to 1500 per-image. + POST_NMS_TOPK_TRAIN: 1500 +DATASETS: + TRAIN: ("keypoints_coco_2017_train",) + TEST: ("keypoints_coco_2017_val",) diff --git a/data_processing/detectron2/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml b/data_processing/detectron2/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml new file mode 100644 index 0000000..4ea86a8 --- /dev/null +++ b/data_processing/detectron2/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml @@ -0,0 +1,20 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + # Detectron1 uses smooth L1 loss with some magic beta values. + # The defaults are changed to L1 loss in Detectron2. + RPN: + SMOOTH_L1_BETA: 0.1111 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + ROI_MASK_HEAD: + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" +INPUT: + # no scale augmentation + MIN_SIZE_TRAIN: (800, ) diff --git a/data_processing/detectron2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml b/data_processing/detectron2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml new file mode 100644 index 0000000..f0c3a1b --- /dev/null +++ b/data_processing/detectron2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 + ROI_HEADS: + NUM_CLASSES: 1230 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v0.5_train",) + TEST: ("lvis_v0.5_val",) +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/data_processing/detectron2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml b/data_processing/detectron2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000..64b4caa --- /dev/null +++ b/data_processing/detectron2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 1230 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v0.5_train",) + TEST: ("lvis_v0.5_val",) +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/data_processing/detectron2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml b/data_processing/detectron2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml new file mode 100644 index 0000000..c8b822c --- /dev/null +++ b/data_processing/detectron2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml @@ -0,0 +1,23 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + MASK_ON: True + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 + ROI_HEADS: + NUM_CLASSES: 1230 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v0.5_train",) + TEST: ("lvis_v0.5_val",) +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/data_processing/detectron2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml b/data_processing/detectron2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml new file mode 100644 index 0000000..ca4dd97 --- /dev/null +++ b/data_processing/detectron2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml @@ -0,0 +1,22 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 + ROI_HEADS: + NUM_CLASSES: 1203 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v1_train",) + TEST: ("lvis_v1_val",) +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +SOLVER: + STEPS: (120000, 160000) + MAX_ITER: 180000 # 180000 * 16 / 100000 ~ 28.8 epochs +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/data_processing/detectron2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml b/data_processing/detectron2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000..f313295 --- /dev/null +++ b/data_processing/detectron2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,22 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 1203 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v1_train",) + TEST: ("lvis_v1_val",) +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +SOLVER: + STEPS: (120000, 160000) + MAX_ITER: 180000 # 180000 * 16 / 100000 ~ 28.8 epochs +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/data_processing/detectron2/configs/LVISv1-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml b/data_processing/detectron2/configs/LVISv1-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml new file mode 100644 index 0000000..f6528f7 --- /dev/null +++ b/data_processing/detectron2/configs/LVISv1-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml @@ -0,0 +1,26 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + MASK_ON: True + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 + ROI_HEADS: + NUM_CLASSES: 1203 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v1_train",) + TEST: ("lvis_v1_val",) +SOLVER: + STEPS: (120000, 160000) + MAX_ITER: 180000 # 180000 * 16 / 100000 ~ 28.8 epochs +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/data_processing/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml b/data_processing/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000..abb33b6 --- /dev/null +++ b/data_processing/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NAME: CascadeROIHeads + ROI_BOX_HEAD: + CLS_AGNOSTIC_BBOX_REG: True + RPN: + POST_NMS_TOPK_TRAIN: 2000 diff --git a/data_processing/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml b/data_processing/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml new file mode 100644 index 0000000..e2201ad --- /dev/null +++ b/data_processing/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml @@ -0,0 +1,15 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NAME: CascadeROIHeads + ROI_BOX_HEAD: + CLS_AGNOSTIC_BBOX_REG: True + RPN: + POST_NMS_TOPK_TRAIN: 2000 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml b/data_processing/detectron2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml new file mode 100644 index 0000000..fc117f6 --- /dev/null +++ b/data_processing/detectron2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml @@ -0,0 +1,36 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: True + WEIGHTS: "catalog://ImageNetPretrained/FAIR/X-152-32x8d-IN5k" + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 152 + DEFORM_ON_PER_STAGE: [False, True, True, True] + ROI_HEADS: + NAME: "CascadeROIHeads" + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_CONV: 4 + NUM_FC: 1 + NORM: "GN" + CLS_AGNOSTIC_BBOX_REG: True + ROI_MASK_HEAD: + NUM_CONV: 8 + NORM: "GN" + RPN: + POST_NMS_TOPK_TRAIN: 2000 +SOLVER: + IMS_PER_BATCH: 128 + STEPS: (35000, 45000) + MAX_ITER: 50000 + BASE_LR: 0.16 +INPUT: + MIN_SIZE_TRAIN: (640, 864) + MIN_SIZE_TRAIN_SAMPLING: "range" + MAX_SIZE_TRAIN: 1440 + CROP: + ENABLED: True +TEST: + EVAL_PERIOD: 2500 diff --git a/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml b/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml new file mode 100644 index 0000000..4c3b767 --- /dev/null +++ b/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml @@ -0,0 +1,10 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_BOX_HEAD: + CLS_AGNOSTIC_BBOX_REG: True + ROI_MASK_HEAD: + CLS_AGNOSTIC_MASK: True diff --git a/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml b/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml new file mode 100644 index 0000000..04ff988 --- /dev/null +++ b/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3,Res4,Res5 + DEFORM_MODULATED: False diff --git a/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml b/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml new file mode 100644 index 0000000..68c0ca5 --- /dev/null +++ b/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml @@ -0,0 +1,11 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3,Res4,Res5 + DEFORM_MODULATED: False +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml b/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml new file mode 100644 index 0000000..74d274e --- /dev/null +++ b/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml @@ -0,0 +1,21 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-50-GN" + MASK_ON: True + RESNETS: + DEPTH: 50 + NORM: "GN" + STRIDE_IN_1X1: False + FPN: + NORM: "GN" + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_CONV: 4 + NUM_FC: 1 + NORM: "GN" + ROI_MASK_HEAD: + NORM: "GN" +SOLVER: + # 3x schedule + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml b/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml new file mode 100644 index 0000000..11ebb07 --- /dev/null +++ b/data_processing/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml @@ -0,0 +1,24 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + STRIDE_IN_1X1: True + FPN: + NORM: "SyncBN" + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_CONV: 4 + NUM_FC: 1 + NORM: "SyncBN" + ROI_MASK_HEAD: + NORM: "SyncBN" +SOLVER: + # 3x schedule + STEPS: (210000, 250000) + MAX_ITER: 270000 +TEST: + PRECISE_BN: + ENABLED: True diff --git a/data_processing/detectron2/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py b/data_processing/detectron2/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py new file mode 100644 index 0000000..bdd49a4 --- /dev/null +++ b/data_processing/detectron2/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py @@ -0,0 +1,152 @@ +# An example config to train a mmdetection model using detectron2. + +from ..common.data.coco import dataloader +from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier +from ..common.optim import SGD as optimizer +from ..common.train import train +from ..common.data.constants import constants + +from detectron2.modeling.mmdet_wrapper import MMDetDetector +from detectron2.config import LazyCall as L + +model = L(MMDetDetector)( + detector=dict( + type="MaskRCNN", + pretrained="torchvision://resnet50", + backbone=dict( + type="ResNet", + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type="BN", requires_grad=True), + norm_eval=True, + style="pytorch", + ), + neck=dict(type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), + rpn_head=dict( + type="RPNHead", + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type="AnchorGenerator", + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64], + ), + bbox_coder=dict( + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0], + ), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type="L1Loss", loss_weight=1.0), + ), + roi_head=dict( + type="StandardRoIHead", + bbox_roi_extractor=dict( + type="SingleRoIExtractor", + roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + ), + bbox_head=dict( + type="Shared2FCBBoxHead", + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2], + ), + reg_class_agnostic=False, + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type="L1Loss", loss_weight=1.0), + ), + mask_roi_extractor=dict( + type="SingleRoIExtractor", + roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + ), + mask_head=dict( + type="FCNMaskHead", + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0), + ), + ), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type="MaxIoUAssigner", + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1, + ), + sampler=dict( + type="RandomSampler", + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False, + ), + allowed_border=-1, + pos_weight=-1, + debug=False, + ), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type="nms", iou_threshold=0.7), + min_bbox_size=0, + ), + rcnn=dict( + assigner=dict( + type="MaxIoUAssigner", + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1, + ), + sampler=dict( + type="RandomSampler", + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True, + ), + mask_size=28, + pos_weight=-1, + debug=False, + ), + ), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type="nms", iou_threshold=0.7), + min_bbox_size=0, + ), + rcnn=dict( + score_thr=0.05, + nms=dict(type="nms", iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5, + ), + ), + ), + pixel_mean=constants.imagenet_rgb256_mean, + pixel_std=constants.imagenet_rgb256_std, +) + +dataloader.train.mapper.image_format = "RGB" # torchvision pretrained model +train.init_checkpoint = None # pretrained model is loaded inside backbone diff --git a/data_processing/detectron2/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml b/data_processing/detectron2/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml new file mode 100644 index 0000000..34016ce --- /dev/null +++ b/data_processing/detectron2/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml @@ -0,0 +1,26 @@ +# A large PanopticFPN for demo purposes. +# Use GN on backbone to support semantic seg. +# Use Cascade + Deform Conv to improve localization. +_BASE_: "../COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-101-GN" + RESNETS: + DEPTH: 101 + NORM: "GN" + DEFORM_ON_PER_STAGE: [False, True, True, True] + STRIDE_IN_1X1: False + FPN: + NORM: "GN" + ROI_HEADS: + NAME: CascadeROIHeads + ROI_BOX_HEAD: + CLS_AGNOSTIC_BBOX_REG: True + ROI_MASK_HEAD: + NORM: "GN" + RPN: + POST_NMS_TOPK_TRAIN: 2000 +SOLVER: + STEPS: (105000, 125000) + MAX_ITER: 135000 + IMS_PER_BATCH: 32 + BASE_LR: 0.04 diff --git a/data_processing/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml b/data_processing/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml new file mode 100644 index 0000000..f340028 --- /dev/null +++ b/data_processing/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml @@ -0,0 +1,13 @@ +_BASE_: "mask_rcnn_R_50_FPN_3x_gn.yaml" +MODEL: + # Train from random initialization. + WEIGHTS: "" + # It makes sense to divide by STD when training from scratch + # But it seems to make no difference on the results and C2's models didn't do this. + # So we keep things consistent with C2. + # PIXEL_STD: [57.375, 57.12, 58.395] + MASK_ON: True + BACKBONE: + FREEZE_AT: 0 +# NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 +# to learn what you need for training from scratch. diff --git a/data_processing/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml b/data_processing/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml new file mode 100644 index 0000000..d90c9ff --- /dev/null +++ b/data_processing/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml @@ -0,0 +1,19 @@ +_BASE_: "mask_rcnn_R_50_FPN_3x_gn.yaml" +MODEL: + PIXEL_STD: [57.375, 57.12, 58.395] + WEIGHTS: "" + MASK_ON: True + RESNETS: + STRIDE_IN_1X1: False + BACKBONE: + FREEZE_AT: 0 +SOLVER: + # 9x schedule + IMS_PER_BATCH: 64 # 4x the standard + STEPS: (187500, 197500) # last 60/4==15k and last 20/4==5k + MAX_ITER: 202500 # 90k * 9 / 4 + BASE_LR: 0.08 +TEST: + EVAL_PERIOD: 2500 +# NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 +# to learn what you need for training from scratch. diff --git a/data_processing/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml b/data_processing/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml new file mode 100644 index 0000000..60d4e42 --- /dev/null +++ b/data_processing/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml @@ -0,0 +1,19 @@ +_BASE_: "mask_rcnn_R_50_FPN_3x_syncbn.yaml" +MODEL: + PIXEL_STD: [57.375, 57.12, 58.395] + WEIGHTS: "" + MASK_ON: True + RESNETS: + STRIDE_IN_1X1: False + BACKBONE: + FREEZE_AT: 0 +SOLVER: + # 9x schedule + IMS_PER_BATCH: 64 # 4x the standard + STEPS: (187500, 197500) # last 60/4==15k and last 20/4==5k + MAX_ITER: 202500 # 90k * 9 / 4 + BASE_LR: 0.08 +TEST: + EVAL_PERIOD: 2500 +# NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 +# to learn what you need for training from scratch. diff --git a/data_processing/detectron2/configs/Misc/semantic_R_50_FPN_1x.yaml b/data_processing/detectron2/configs/Misc/semantic_R_50_FPN_1x.yaml new file mode 100644 index 0000000..ac256e1 --- /dev/null +++ b/data_processing/detectron2/configs/Misc/semantic_R_50_FPN_1x.yaml @@ -0,0 +1,11 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TRAIN: ("coco_2017_train_panoptic_stuffonly",) + TEST: ("coco_2017_val_panoptic_stuffonly",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) diff --git a/data_processing/detectron2/configs/Misc/torchvision_imagenet_R_50.py b/data_processing/detectron2/configs/Misc/torchvision_imagenet_R_50.py new file mode 100644 index 0000000..0d75305 --- /dev/null +++ b/data_processing/detectron2/configs/Misc/torchvision_imagenet_R_50.py @@ -0,0 +1,150 @@ +""" +An example config file to train a ImageNet classifier with detectron2. +Model and dataloader both come from torchvision. +This shows how to use detectron2 as a general engine for any new models and tasks. + +To run, use the following command: + +python tools/lazyconfig_train_net.py --config-file configs/Misc/torchvision_imagenet_R_50.py \ + --num-gpus 8 dataloader.train.dataset.root=/path/to/imagenet/ + +""" + + +import torch +from torch import nn +from torch.nn import functional as F +from omegaconf import OmegaConf +import torchvision +from torchvision.transforms import transforms as T +from torchvision.models.resnet import ResNet, Bottleneck +from fvcore.common.param_scheduler import MultiStepParamScheduler + +from detectron2.solver import WarmupParamScheduler +from detectron2.solver.build import get_default_optimizer_params +from detectron2.config import LazyCall as L +from detectron2.model_zoo import get_config +from detectron2.data.samplers import TrainingSampler, InferenceSampler +from detectron2.evaluation import DatasetEvaluator +from detectron2.utils import comm + + +""" +Note: Here we put reusable code (models, evaluation, data) together with configs just as a +proof-of-concept, to easily demonstrate what's needed to train a ImageNet classifier in detectron2. +Writing code in configs offers extreme flexibility but is often not a good engineering practice. +In practice, you might want to put code in your project and import them instead. +""" + + +def build_data_loader(dataset, batch_size, num_workers, training=True): + return torch.utils.data.DataLoader( + dataset, + sampler=(TrainingSampler if training else InferenceSampler)(len(dataset)), + batch_size=batch_size, + num_workers=num_workers, + pin_memory=True, + ) + + +class ClassificationNet(nn.Module): + def __init__(self, model: nn.Module): + super().__init__() + self.model = model + + @property + def device(self): + return list(self.model.parameters())[0].device + + def forward(self, inputs): + image, label = inputs + pred = self.model(image.to(self.device)) + if self.training: + label = label.to(self.device) + return F.cross_entropy(pred, label) + else: + return pred + + +class ClassificationAcc(DatasetEvaluator): + def reset(self): + self.corr = self.total = 0 + + def process(self, inputs, outputs): + image, label = inputs + self.corr += (outputs.argmax(dim=1).cpu() == label.cpu()).sum().item() + self.total += len(label) + + def evaluate(self): + all_corr_total = comm.all_gather([self.corr, self.total]) + corr = sum(x[0] for x in all_corr_total) + total = sum(x[1] for x in all_corr_total) + return {"accuracy": corr / total} + + +# --- End of code that could be in a project and be imported + + +dataloader = OmegaConf.create() +dataloader.train = L(build_data_loader)( + dataset=L(torchvision.datasets.ImageNet)( + root="/path/to/imagenet", + split="train", + transform=L(T.Compose)( + transforms=[ + L(T.RandomResizedCrop)(size=224), + L(T.RandomHorizontalFlip)(), + T.ToTensor(), + L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + ] + ), + ), + batch_size=256 // 8, + num_workers=4, + training=True, +) + +dataloader.test = L(build_data_loader)( + dataset=L(torchvision.datasets.ImageNet)( + root="${...train.dataset.root}", + split="val", + transform=L(T.Compose)( + transforms=[ + L(T.Resize)(size=256), + L(T.CenterCrop)(size=224), + T.ToTensor(), + L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + ] + ), + ), + batch_size=256 // 8, + num_workers=4, + training=False, +) + +dataloader.evaluator = L(ClassificationAcc)() + +model = L(ClassificationNet)( + model=(ResNet)(block=Bottleneck, layers=[3, 4, 6, 3], zero_init_residual=True) +) + + +optimizer = L(torch.optim.SGD)( + params=L(get_default_optimizer_params)(), + lr=0.1, + momentum=0.9, + weight_decay=1e-4, +) + +lr_multiplier = L(WarmupParamScheduler)( + scheduler=L(MultiStepParamScheduler)( + values=[1.0, 0.1, 0.01, 0.001], milestones=[30, 60, 90, 100] + ), + warmup_length=1 / 100, + warmup_factor=0.1, +) + + +train = get_config("common/train.py").train +train.init_checkpoint = None +train.max_iter = 100 * 1281167 // 256 diff --git a/data_processing/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml b/data_processing/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml new file mode 100644 index 0000000..ea2a6ba --- /dev/null +++ b/data_processing/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml @@ -0,0 +1,18 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 20 +INPUT: + MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) + MIN_SIZE_TEST: 800 +DATASETS: + TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') + TEST: ('voc_2007_test',) +SOLVER: + STEPS: (12000, 16000) + MAX_ITER: 18000 # 17.4 epochs + WARMUP_ITERS: 100 diff --git a/data_processing/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml b/data_processing/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml new file mode 100644 index 0000000..e554cab --- /dev/null +++ b/data_processing/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml @@ -0,0 +1,18 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 20 +INPUT: + MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) + MIN_SIZE_TEST: 800 +DATASETS: + TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') + TEST: ('voc_2007_test',) +SOLVER: + STEPS: (12000, 16000) + MAX_ITER: 18000 # 17.4 epochs + WARMUP_ITERS: 100 diff --git a/data_processing/detectron2/configs/common/README.md b/data_processing/detectron2/configs/common/README.md new file mode 100644 index 0000000..912cc29 --- /dev/null +++ b/data_processing/detectron2/configs/common/README.md @@ -0,0 +1,6 @@ +This directory provides definitions for a few common models, dataloaders, scheduler, +and optimizers that are often used in training. +The definition of these objects are provided in the form of lazy instantiation: +their arguments can be edited by users before constructing the objects. + +They can be imported, or loaded by `model_zoo.get_config` API in users' own configs. diff --git a/data_processing/detectron2/configs/common/coco_schedule.py b/data_processing/detectron2/configs/common/coco_schedule.py new file mode 100644 index 0000000..355e66a --- /dev/null +++ b/data_processing/detectron2/configs/common/coco_schedule.py @@ -0,0 +1,47 @@ +from fvcore.common.param_scheduler import MultiStepParamScheduler + +from detectron2.config import LazyCall as L +from detectron2.solver import WarmupParamScheduler + + +def default_X_scheduler(num_X): + """ + Returns the config for a default multi-step LR scheduler such as "1x", "3x", + commonly referred to in papers, where every 1x has the total length of 1440k + training images (~12 COCO epochs). LR is decayed twice at the end of training + following the strategy defined in "Rethinking ImageNet Pretraining", Sec 4. + + Args: + num_X: a positive real number + + Returns: + DictConfig: configs that define the multiplier for LR during training + """ + # total number of iterations assuming 16 batch size, using 1440000/16=90000 + total_steps_16bs = num_X * 90000 + + if num_X <= 2: + scheduler = L(MultiStepParamScheduler)( + values=[1.0, 0.1, 0.01], + # note that scheduler is scale-invariant. This is equivalent to + # milestones=[6, 8, 9] + milestones=[60000, 80000, 90000], + ) + else: + scheduler = L(MultiStepParamScheduler)( + values=[1.0, 0.1, 0.01], + milestones=[total_steps_16bs - 60000, total_steps_16bs - 20000, total_steps_16bs], + ) + return L(WarmupParamScheduler)( + scheduler=scheduler, + warmup_length=1000 / total_steps_16bs, + warmup_method="linear", + warmup_factor=0.001, + ) + + +lr_multiplier_1x = default_X_scheduler(1) +lr_multiplier_2x = default_X_scheduler(2) +lr_multiplier_3x = default_X_scheduler(3) +lr_multiplier_6x = default_X_scheduler(6) +lr_multiplier_9x = default_X_scheduler(9) diff --git a/data_processing/detectron2/configs/common/data/coco.py b/data_processing/detectron2/configs/common/data/coco.py new file mode 100644 index 0000000..703c438 --- /dev/null +++ b/data_processing/detectron2/configs/common/data/coco.py @@ -0,0 +1,48 @@ +from omegaconf import OmegaConf + +import detectron2.data.transforms as T +from detectron2.config import LazyCall as L +from detectron2.data import ( + DatasetMapper, + build_detection_test_loader, + build_detection_train_loader, + get_detection_dataset_dicts, +) +from detectron2.evaluation import COCOEvaluator + +dataloader = OmegaConf.create() + +dataloader.train = L(build_detection_train_loader)( + dataset=L(get_detection_dataset_dicts)(names="coco_2017_train"), + mapper=L(DatasetMapper)( + is_train=True, + augmentations=[ + L(T.ResizeShortestEdge)( + short_edge_length=(640, 672, 704, 736, 768, 800), + sample_style="choice", + max_size=1333, + ), + L(T.RandomFlip)(horizontal=True), + ], + image_format="BGR", + use_instance_mask=True, + ), + total_batch_size=16, + num_workers=4, +) + +dataloader.test = L(build_detection_test_loader)( + dataset=L(get_detection_dataset_dicts)(names="coco_2017_val", filter_empty=False), + mapper=L(DatasetMapper)( + is_train=False, + augmentations=[ + L(T.ResizeShortestEdge)(short_edge_length=800, max_size=1333), + ], + image_format="${...train.mapper.image_format}", + ), + num_workers=4, +) + +dataloader.evaluator = L(COCOEvaluator)( + dataset_name="${..test.dataset.names}", +) diff --git a/data_processing/detectron2/configs/common/data/coco_keypoint.py b/data_processing/detectron2/configs/common/data/coco_keypoint.py new file mode 100644 index 0000000..b4ceb06 --- /dev/null +++ b/data_processing/detectron2/configs/common/data/coco_keypoint.py @@ -0,0 +1,13 @@ +from detectron2.data.detection_utils import create_keypoint_hflip_indices + +from .coco import dataloader + +dataloader.train.dataset.min_keypoints = 1 +dataloader.train.dataset.names = "keypoints_coco_2017_train" +dataloader.test.dataset.names = "keypoints_coco_2017_val" + +dataloader.train.mapper.update( + use_instance_mask=False, + use_keypoint=True, + keypoint_hflip_indices=create_keypoint_hflip_indices(dataloader.train.dataset.names), +) diff --git a/data_processing/detectron2/configs/common/data/coco_panoptic_separated.py b/data_processing/detectron2/configs/common/data/coco_panoptic_separated.py new file mode 100644 index 0000000..5ccbc77 --- /dev/null +++ b/data_processing/detectron2/configs/common/data/coco_panoptic_separated.py @@ -0,0 +1,26 @@ +from detectron2.config import LazyCall as L +from detectron2.evaluation import ( + COCOEvaluator, + COCOPanopticEvaluator, + DatasetEvaluators, + SemSegEvaluator, +) + +from .coco import dataloader + +dataloader.train.dataset.names = "coco_2017_train_panoptic_separated" +dataloader.train.dataset.filter_empty = False +dataloader.test.dataset.names = "coco_2017_val_panoptic_separated" + + +dataloader.evaluator = [ + L(COCOEvaluator)( + dataset_name="${...test.dataset.names}", + ), + L(SemSegEvaluator)( + dataset_name="${...test.dataset.names}", + ), + L(COCOPanopticEvaluator)( + dataset_name="${...test.dataset.names}", + ), +] diff --git a/data_processing/detectron2/configs/common/data/constants.py b/data_processing/detectron2/configs/common/data/constants.py new file mode 100644 index 0000000..be11cb5 --- /dev/null +++ b/data_processing/detectron2/configs/common/data/constants.py @@ -0,0 +1,9 @@ +constants = dict( + imagenet_rgb256_mean=[123.675, 116.28, 103.53], + imagenet_rgb256_std=[58.395, 57.12, 57.375], + imagenet_bgr256_mean=[103.530, 116.280, 123.675], + # When using pre-trained models in Detectron1 or any MSRA models, + # std has been absorbed into its conv1 weights, so the std needs to be set 1. + # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) + imagenet_bgr256_std=[1.0, 1.0, 1.0], +) diff --git a/data_processing/detectron2/configs/common/models/cascade_rcnn.py b/data_processing/detectron2/configs/common/models/cascade_rcnn.py new file mode 100644 index 0000000..c7372a8 --- /dev/null +++ b/data_processing/detectron2/configs/common/models/cascade_rcnn.py @@ -0,0 +1,36 @@ +from detectron2.config import LazyCall as L +from detectron2.layers import ShapeSpec +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.matcher import Matcher +from detectron2.modeling.roi_heads import FastRCNNOutputLayers, FastRCNNConvFCHead, CascadeROIHeads + +from .mask_rcnn_fpn import model + +# arguments that don't exist for Cascade R-CNN +[model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]] + +model.roi_heads.update( + _target_=CascadeROIHeads, + box_heads=[ + L(FastRCNNConvFCHead)( + input_shape=ShapeSpec(channels=256, height=7, width=7), + conv_dims=[], + fc_dims=[1024, 1024], + ) + for k in range(3) + ], + box_predictors=[ + L(FastRCNNOutputLayers)( + input_shape=ShapeSpec(channels=1024), + test_score_thresh=0.05, + box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)), + cls_agnostic_bbox_reg=True, + num_classes="${...num_classes}", + ) + for (w1, w2) in [(10, 5), (20, 10), (30, 15)] + ], + proposal_matchers=[ + L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False) + for th in [0.5, 0.6, 0.7] + ], +) diff --git a/data_processing/detectron2/configs/common/models/fcos.py b/data_processing/detectron2/configs/common/models/fcos.py new file mode 100644 index 0000000..1c75202 --- /dev/null +++ b/data_processing/detectron2/configs/common/models/fcos.py @@ -0,0 +1,23 @@ +from detectron2.modeling.meta_arch.fcos import FCOS, FCOSHead + +from .retinanet import model + +model._target_ = FCOS + +del model.anchor_generator +del model.box2box_transform +del model.anchor_matcher +del model.input_format + +# Use P5 instead of C5 to compute P6/P7 +# (Sec 2.2 of https://arxiv.org/abs/2006.09214) +model.backbone.top_block.in_feature = "p5" +model.backbone.top_block.in_channels = 256 + +# New score threshold determined based on sqrt(cls_score * centerness) +model.test_score_thresh = 0.2 +model.test_nms_thresh = 0.6 + +model.head._target_ = FCOSHead +del model.head.num_anchors +model.head.norm = "GN" diff --git a/data_processing/detectron2/configs/common/models/keypoint_rcnn_fpn.py b/data_processing/detectron2/configs/common/models/keypoint_rcnn_fpn.py new file mode 100644 index 0000000..56b3994 --- /dev/null +++ b/data_processing/detectron2/configs/common/models/keypoint_rcnn_fpn.py @@ -0,0 +1,33 @@ +from detectron2.config import LazyCall as L +from detectron2.layers import ShapeSpec +from detectron2.modeling.poolers import ROIPooler +from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead + +from .mask_rcnn_fpn import model + +[model.roi_heads.pop(x) for x in ["mask_in_features", "mask_pooler", "mask_head"]] + +model.roi_heads.update( + num_classes=1, + keypoint_in_features=["p2", "p3", "p4", "p5"], + keypoint_pooler=L(ROIPooler)( + output_size=14, + scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), + sampling_ratio=0, + pooler_type="ROIAlignV2", + ), + keypoint_head=L(KRCNNConvDeconvUpsampleHead)( + input_shape=ShapeSpec(channels=256, width=14, height=14), + num_keypoints=17, + conv_dims=[512] * 8, + loss_normalizer="visible", + ), +) + +# Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2. +# 1000 proposals per-image is found to hurt box AP. +# Therefore we increase it to 1500 per-image. +model.proposal_generator.post_nms_topk = (1500, 1000) + +# Keypoint AP degrades (though box AP improves) when using plain L1 loss +model.roi_heads.box_predictor.smooth_l1_beta = 0.5 diff --git a/data_processing/detectron2/configs/common/models/mask_rcnn_c4.py b/data_processing/detectron2/configs/common/models/mask_rcnn_c4.py new file mode 100644 index 0000000..902d5b1 --- /dev/null +++ b/data_processing/detectron2/configs/common/models/mask_rcnn_c4.py @@ -0,0 +1,90 @@ +from detectron2.config import LazyCall as L +from detectron2.layers import ShapeSpec +from detectron2.modeling.meta_arch import GeneralizedRCNN +from detectron2.modeling.anchor_generator import DefaultAnchorGenerator +from detectron2.modeling.backbone import BasicStem, BottleneckBlock, ResNet +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.matcher import Matcher +from detectron2.modeling.poolers import ROIPooler +from detectron2.modeling.proposal_generator import RPN, StandardRPNHead +from detectron2.modeling.roi_heads import ( + FastRCNNOutputLayers, + MaskRCNNConvUpsampleHead, + Res5ROIHeads, +) + +from ..data.constants import constants + +model = L(GeneralizedRCNN)( + backbone=L(ResNet)( + stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), + stages=L(ResNet.make_default_stages)( + depth=50, + stride_in_1x1=True, + norm="FrozenBN", + ), + out_features=["res4"], + ), + proposal_generator=L(RPN)( + in_features=["res4"], + head=L(StandardRPNHead)(in_channels=1024, num_anchors=15), + anchor_generator=L(DefaultAnchorGenerator)( + sizes=[[32, 64, 128, 256, 512]], + aspect_ratios=[0.5, 1.0, 2.0], + strides=[16], + offset=0.0, + ), + anchor_matcher=L(Matcher)( + thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True + ), + box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]), + batch_size_per_image=256, + positive_fraction=0.5, + pre_nms_topk=(12000, 6000), + post_nms_topk=(2000, 1000), + nms_thresh=0.7, + ), + roi_heads=L(Res5ROIHeads)( + num_classes=80, + batch_size_per_image=512, + positive_fraction=0.25, + proposal_matcher=L(Matcher)( + thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False + ), + in_features=["res4"], + pooler=L(ROIPooler)( + output_size=14, + scales=(1.0 / 16,), + sampling_ratio=0, + pooler_type="ROIAlignV2", + ), + res5=L(ResNet.make_stage)( + block_class=BottleneckBlock, + num_blocks=3, + stride_per_block=[2, 1, 1], + in_channels=1024, + bottleneck_channels=512, + out_channels=2048, + norm="FrozenBN", + stride_in_1x1=True, + ), + box_predictor=L(FastRCNNOutputLayers)( + input_shape=L(ShapeSpec)(channels="${...res5.out_channels}", height=1, width=1), + test_score_thresh=0.05, + box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)), + num_classes="${..num_classes}", + ), + mask_head=L(MaskRCNNConvUpsampleHead)( + input_shape=L(ShapeSpec)( + channels="${...res5.out_channels}", + width="${...pooler.output_size}", + height="${...pooler.output_size}", + ), + num_classes="${..num_classes}", + conv_dims=[256], + ), + ), + pixel_mean=constants.imagenet_bgr256_mean, + pixel_std=constants.imagenet_bgr256_std, + input_format="BGR", +) diff --git a/data_processing/detectron2/configs/common/models/mask_rcnn_fpn.py b/data_processing/detectron2/configs/common/models/mask_rcnn_fpn.py new file mode 100644 index 0000000..5e5c501 --- /dev/null +++ b/data_processing/detectron2/configs/common/models/mask_rcnn_fpn.py @@ -0,0 +1,95 @@ +from detectron2.config import LazyCall as L +from detectron2.layers import ShapeSpec +from detectron2.modeling.meta_arch import GeneralizedRCNN +from detectron2.modeling.anchor_generator import DefaultAnchorGenerator +from detectron2.modeling.backbone.fpn import LastLevelMaxPool +from detectron2.modeling.backbone import BasicStem, FPN, ResNet +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.matcher import Matcher +from detectron2.modeling.poolers import ROIPooler +from detectron2.modeling.proposal_generator import RPN, StandardRPNHead +from detectron2.modeling.roi_heads import ( + StandardROIHeads, + FastRCNNOutputLayers, + MaskRCNNConvUpsampleHead, + FastRCNNConvFCHead, +) + +from ..data.constants import constants + +model = L(GeneralizedRCNN)( + backbone=L(FPN)( + bottom_up=L(ResNet)( + stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), + stages=L(ResNet.make_default_stages)( + depth=50, + stride_in_1x1=True, + norm="FrozenBN", + ), + out_features=["res2", "res3", "res4", "res5"], + ), + in_features="${.bottom_up.out_features}", + out_channels=256, + top_block=L(LastLevelMaxPool)(), + ), + proposal_generator=L(RPN)( + in_features=["p2", "p3", "p4", "p5", "p6"], + head=L(StandardRPNHead)(in_channels=256, num_anchors=3), + anchor_generator=L(DefaultAnchorGenerator)( + sizes=[[32], [64], [128], [256], [512]], + aspect_ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64], + offset=0.0, + ), + anchor_matcher=L(Matcher)( + thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True + ), + box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]), + batch_size_per_image=256, + positive_fraction=0.5, + pre_nms_topk=(2000, 1000), + post_nms_topk=(1000, 1000), + nms_thresh=0.7, + ), + roi_heads=L(StandardROIHeads)( + num_classes=80, + batch_size_per_image=512, + positive_fraction=0.25, + proposal_matcher=L(Matcher)( + thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False + ), + box_in_features=["p2", "p3", "p4", "p5"], + box_pooler=L(ROIPooler)( + output_size=7, + scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), + sampling_ratio=0, + pooler_type="ROIAlignV2", + ), + box_head=L(FastRCNNConvFCHead)( + input_shape=ShapeSpec(channels=256, height=7, width=7), + conv_dims=[], + fc_dims=[1024, 1024], + ), + box_predictor=L(FastRCNNOutputLayers)( + input_shape=ShapeSpec(channels=1024), + test_score_thresh=0.05, + box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)), + num_classes="${..num_classes}", + ), + mask_in_features=["p2", "p3", "p4", "p5"], + mask_pooler=L(ROIPooler)( + output_size=14, + scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), + sampling_ratio=0, + pooler_type="ROIAlignV2", + ), + mask_head=L(MaskRCNNConvUpsampleHead)( + input_shape=ShapeSpec(channels=256, width=14, height=14), + num_classes="${..num_classes}", + conv_dims=[256, 256, 256, 256, 256], + ), + ), + pixel_mean=constants.imagenet_bgr256_mean, + pixel_std=constants.imagenet_bgr256_std, + input_format="BGR", +) diff --git a/data_processing/detectron2/configs/common/models/mask_rcnn_vitdet.py b/data_processing/detectron2/configs/common/models/mask_rcnn_vitdet.py new file mode 100644 index 0000000..d6f5244 --- /dev/null +++ b/data_processing/detectron2/configs/common/models/mask_rcnn_vitdet.py @@ -0,0 +1,59 @@ +from functools import partial +import torch.nn as nn +from detectron2.config import LazyCall as L +from detectron2.modeling import ViT, SimpleFeaturePyramid +from detectron2.modeling.backbone.fpn import LastLevelMaxPool + +from .mask_rcnn_fpn import model +from ..data.constants import constants + +model.pixel_mean = constants.imagenet_rgb256_mean +model.pixel_std = constants.imagenet_rgb256_std +model.input_format = "RGB" + +# Base +embed_dim, depth, num_heads, dp = 768, 12, 12, 0.1 +# Creates Simple Feature Pyramid from ViT backbone +model.backbone = L(SimpleFeaturePyramid)( + net=L(ViT)( # Single-scale ViT backbone + img_size=1024, + patch_size=16, + embed_dim=embed_dim, + depth=depth, + num_heads=num_heads, + drop_path_rate=dp, + window_size=14, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + window_block_indexes=[ + # 2, 5, 8 11 for global attention + 0, + 1, + 3, + 4, + 6, + 7, + 9, + 10, + ], + residual_block_indexes=[], + use_rel_pos=True, + out_feature="last_feat", + ), + in_feature="${.net.out_feature}", + out_channels=256, + scale_factors=(4.0, 2.0, 1.0, 0.5), + top_block=L(LastLevelMaxPool)(), + norm="LN", + square_pad=1024, +) + +model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = "LN" + +# 2conv in RPN: +model.proposal_generator.head.conv_dims = [-1, -1] + +# 4conv1fc box head +model.roi_heads.box_head.conv_dims = [256, 256, 256, 256] +model.roi_heads.box_head.fc_dims = [1024] diff --git a/data_processing/detectron2/configs/common/models/panoptic_fpn.py b/data_processing/detectron2/configs/common/models/panoptic_fpn.py new file mode 100644 index 0000000..88f55d2 --- /dev/null +++ b/data_processing/detectron2/configs/common/models/panoptic_fpn.py @@ -0,0 +1,20 @@ +from detectron2.config import LazyCall as L +from detectron2.layers import ShapeSpec +from detectron2.modeling import PanopticFPN +from detectron2.modeling.meta_arch.semantic_seg import SemSegFPNHead + +from .mask_rcnn_fpn import model + +model._target_ = PanopticFPN +model.sem_seg_head = L(SemSegFPNHead)( + input_shape={ + f: L(ShapeSpec)(stride=s, channels="${....backbone.out_channels}") + for f, s in zip(["p2", "p3", "p4", "p5"], [4, 8, 16, 32]) + }, + ignore_value=255, + num_classes=54, # COCO stuff + 1 + conv_dims=128, + common_stride=4, + loss_weight=0.5, + norm="GN", +) diff --git a/data_processing/detectron2/configs/common/models/retinanet.py b/data_processing/detectron2/configs/common/models/retinanet.py new file mode 100644 index 0000000..784e531 --- /dev/null +++ b/data_processing/detectron2/configs/common/models/retinanet.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +from detectron2.config import LazyCall as L +from detectron2.layers import ShapeSpec +from detectron2.modeling.meta_arch import RetinaNet +from detectron2.modeling.anchor_generator import DefaultAnchorGenerator +from detectron2.modeling.backbone.fpn import LastLevelP6P7 +from detectron2.modeling.backbone import BasicStem, FPN, ResNet +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.matcher import Matcher +from detectron2.modeling.meta_arch.retinanet import RetinaNetHead + +from ..data.constants import constants + +model = L(RetinaNet)( + backbone=L(FPN)( + bottom_up=L(ResNet)( + stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), + stages=L(ResNet.make_default_stages)( + depth=50, + stride_in_1x1=True, + norm="FrozenBN", + ), + out_features=["res3", "res4", "res5"], + ), + in_features=["res3", "res4", "res5"], + out_channels=256, + top_block=L(LastLevelP6P7)(in_channels=2048, out_channels="${..out_channels}"), + ), + head=L(RetinaNetHead)( + # Shape for each input feature map + input_shape=[ShapeSpec(channels=256)] * 5, + num_classes="${..num_classes}", + conv_dims=[256, 256, 256, 256], + prior_prob=0.01, + num_anchors=9, + ), + anchor_generator=L(DefaultAnchorGenerator)( + sizes=[[x, x * 2 ** (1.0 / 3), x * 2 ** (2.0 / 3)] for x in [32, 64, 128, 256, 512]], + aspect_ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128], + offset=0.0, + ), + box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]), + anchor_matcher=L(Matcher)( + thresholds=[0.4, 0.5], labels=[0, -1, 1], allow_low_quality_matches=True + ), + num_classes=80, + head_in_features=["p3", "p4", "p5", "p6", "p7"], + focal_loss_alpha=0.25, + focal_loss_gamma=2.0, + pixel_mean=constants.imagenet_bgr256_mean, + pixel_std=constants.imagenet_bgr256_std, + input_format="BGR", +) diff --git a/data_processing/detectron2/configs/common/optim.py b/data_processing/detectron2/configs/common/optim.py new file mode 100644 index 0000000..6cf43e8 --- /dev/null +++ b/data_processing/detectron2/configs/common/optim.py @@ -0,0 +1,28 @@ +import torch + +from detectron2.config import LazyCall as L +from detectron2.solver.build import get_default_optimizer_params + +SGD = L(torch.optim.SGD)( + params=L(get_default_optimizer_params)( + # params.model is meant to be set to the model object, before instantiating + # the optimizer. + weight_decay_norm=0.0 + ), + lr=0.02, + momentum=0.9, + weight_decay=1e-4, +) + + +AdamW = L(torch.optim.AdamW)( + params=L(get_default_optimizer_params)( + # params.model is meant to be set to the model object, before instantiating + # the optimizer. + base_lr="${..lr}", + weight_decay_norm=0.0, + ), + lr=1e-4, + betas=(0.9, 0.999), + weight_decay=0.1, +) diff --git a/data_processing/detectron2/configs/common/train.py b/data_processing/detectron2/configs/common/train.py new file mode 100644 index 0000000..b6ed02b --- /dev/null +++ b/data_processing/detectron2/configs/common/train.py @@ -0,0 +1,18 @@ +# Common training-related configs that are designed for "tools/lazyconfig_train_net.py" +# You can use your own instead, together with your own train_net.py +train = dict( + output_dir="./output", + init_checkpoint="", + max_iter=90000, + amp=dict(enabled=False), # options for Automatic Mixed Precision + ddp=dict( # options for DistributedDataParallel + broadcast_buffers=False, + find_unused_parameters=False, + fp16_compression=False, + ), + checkpointer=dict(period=5000, max_to_keep=100), # options for PeriodicCheckpointer + eval_period=5000, + log_period=20, + device="cuda" + # ... +) diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py new file mode 100644 index 0000000..3740e9b --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py @@ -0,0 +1,9 @@ +from .mask_rcnn_R_50_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +model.backbone.bottom_up.stages.depth = 101 diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py new file mode 100644 index 0000000..18e5f07 --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py @@ -0,0 +1,14 @@ +from .mask_rcnn_R_101_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +train.max_iter *= 2 # 100ep -> 200ep + +lr_multiplier.scheduler.milestones = [ + milestone * 2 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py new file mode 100644 index 0000000..63c54ee --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py @@ -0,0 +1,14 @@ +from .mask_rcnn_R_101_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +train.max_iter *= 4 # 100ep -> 400ep + +lr_multiplier.scheduler.milestones = [ + milestone * 4 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py new file mode 100644 index 0000000..df7a2ae --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py @@ -0,0 +1,72 @@ +import detectron2.data.transforms as T +from detectron2.config.lazy import LazyCall as L +from detectron2.layers.batch_norm import NaiveSyncBatchNorm +from detectron2.solver import WarmupParamScheduler +from fvcore.common.param_scheduler import MultiStepParamScheduler + +from ..common.data.coco import dataloader +from ..common.models.mask_rcnn_fpn import model +from ..common.optim import SGD as optimizer +from ..common.train import train + +# train from scratch +train.init_checkpoint = "" +train.amp.enabled = True +train.ddp.fp16_compression = True +model.backbone.bottom_up.freeze_at = 0 + +# SyncBN +# fmt: off +model.backbone.bottom_up.stem.norm = \ + model.backbone.bottom_up.stages.norm = \ + model.backbone.norm = "SyncBN" + +# Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by +# torch.nn.SyncBatchNorm. We can remove this after +# https://github.com/pytorch/pytorch/issues/36530 is fixed. +model.roi_heads.box_head.conv_norm = \ + model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c, + stats_mode="N") +# fmt: on + +# 2conv in RPN: +# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950 +model.proposal_generator.head.conv_dims = [-1, -1] + +# 4conv1fc box head +model.roi_heads.box_head.conv_dims = [256, 256, 256, 256] +model.roi_heads.box_head.fc_dims = [1024] + +# resize_and_crop_image in: +# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/utils/input_utils.py#L127 # noqa: E501, B950 +image_size = 1024 +dataloader.train.mapper.augmentations = [ + L(T.ResizeScale)( + min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size + ), + L(T.FixedSizeCrop)(crop_size=(image_size, image_size)), + L(T.RandomFlip)(horizontal=True), +] + +# recompute boxes due to cropping +dataloader.train.mapper.recompute_boxes = True + +# larger batch-size. +dataloader.train.total_batch_size = 64 + +# Equivalent to 100 epochs. +# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep +train.max_iter = 184375 + +lr_multiplier = L(WarmupParamScheduler)( + scheduler=L(MultiStepParamScheduler)( + values=[1.0, 0.1, 0.01], + milestones=[163889, 177546], + num_updates=train.max_iter, + ), + warmup_length=500 / train.max_iter, + warmup_factor=0.067, +) + +optimizer.lr = 0.1 +optimizer.weight_decay = 4e-5 diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py new file mode 100644 index 0000000..2a7c376 --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py @@ -0,0 +1,14 @@ +from .mask_rcnn_R_50_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +train.max_iter *= 2 # 100ep -> 200ep + +lr_multiplier.scheduler.milestones = [ + milestone * 2 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py new file mode 100644 index 0000000..97586b8 --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py @@ -0,0 +1,14 @@ +from .mask_rcnn_R_50_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +train.max_iter *= 4 # 100ep -> 400ep + +lr_multiplier.scheduler.milestones = [ + milestone * 4 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py new file mode 100644 index 0000000..2ca1ede --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py @@ -0,0 +1,14 @@ +from .mask_rcnn_R_50_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +train.max_iter //= 2 # 100ep -> 50ep + +lr_multiplier.scheduler.milestones = [ + milestone // 2 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py new file mode 100644 index 0000000..ef0b6d1 --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py @@ -0,0 +1,29 @@ +from .mask_rcnn_R_50_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) +from detectron2.config import LazyCall as L +from detectron2.modeling.backbone import RegNet +from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock + +# Config source: +# https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py # noqa +model.backbone.bottom_up = L(RegNet)( + stem_class=SimpleStem, + stem_width=32, + block_class=ResBottleneckBlock, + depth=23, + w_a=38.65, + w_0=96, + w_m=2.43, + group_width=40, + norm="SyncBN", + out_features=["s1", "s2", "s3", "s4"], +) +model.pixel_std = [57.375, 57.120, 58.395] + +# RegNets benefit from enabling cudnn benchmark mode +train.cudnn_benchmark = True diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py new file mode 100644 index 0000000..731320e --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py @@ -0,0 +1,14 @@ +from .mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +train.max_iter *= 2 # 100ep -> 200ep + +lr_multiplier.scheduler.milestones = [ + milestone * 2 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ.py new file mode 100644 index 0000000..8f369a2 --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ.py @@ -0,0 +1,14 @@ +from .mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +train.max_iter *= 4 # 100ep -> 400ep + +lr_multiplier.scheduler.milestones = [ + milestone * 4 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py new file mode 100644 index 0000000..ba2c327 --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py @@ -0,0 +1,30 @@ +from .mask_rcnn_R_50_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) +from detectron2.config import LazyCall as L +from detectron2.modeling.backbone import RegNet +from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock + +# Config source: +# https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py # noqa +model.backbone.bottom_up = L(RegNet)( + stem_class=SimpleStem, + stem_width=32, + block_class=ResBottleneckBlock, + depth=22, + w_a=31.41, + w_0=96, + w_m=2.24, + group_width=64, + se_ratio=0.25, + norm="SyncBN", + out_features=["s1", "s2", "s3", "s4"], +) +model.pixel_std = [57.375, 57.120, 58.395] + +# RegNets benefit from enabling cudnn benchmark mode +train.cudnn_benchmark = True diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py new file mode 100644 index 0000000..b867cc8 --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py @@ -0,0 +1,14 @@ +from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +train.max_iter *= 2 # 100ep -> 200ep + +lr_multiplier.scheduler.milestones = [ + milestone * 2 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py new file mode 100644 index 0000000..7b86ea8 --- /dev/null +++ b/data_processing/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py @@ -0,0 +1,14 @@ +from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +train.max_iter *= 4 # 100ep -> 400ep + +lr_multiplier.scheduler.milestones = [ + milestone * 4 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/configs/quick_schedules/README.md b/data_processing/detectron2/configs/quick_schedules/README.md new file mode 100644 index 0000000..4e6c82e --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/README.md @@ -0,0 +1,8 @@ +These are quick configs for performance or accuracy regression tracking purposes. + +* `*instance_test.yaml`: can train on 2 GPUs. They are used to test whether the training can + successfully finish. They are not expected to produce reasonable training results. +* `*inference_acc_test.yaml`: They should be run using `--eval-only`. They run inference using pre-trained models and verify + the results are as expected. +* `*training_acc_test.yaml`: They should be trained on 8 GPUs. They finish in about an hour and verify the training accuracy + is within the normal range. diff --git a/data_processing/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000..fc5a411 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml" +MODEL: + WEIGHTS: "detectron2://Misc/cascade_mask_rcnn_R_50_FPN_3x/144998488/model_final_480dd8.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 50.18, 0.02], ["segm", "AP", 43.87, 0.02]] diff --git a/data_processing/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_instant_test.yaml b/data_processing/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000..e41a0fe --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,11 @@ +_BASE_: "../Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml" +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/data_processing/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000..a2f37e5 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Detection/fast_rcnn_R_50_FPN_1x/137635226/model_final_e5f7ce.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 45.70, 0.02]] diff --git a/data_processing/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml b/data_processing/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000..52fc0ec --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,15 @@ +_BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +DATASETS: + TRAIN: ("coco_2017_val_100",) + PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) + TEST: ("coco_2017_val_100",) + PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000..14cf2aa --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x/137849621/model_final_a6e10b.pkl" +DATASETS: + TEST: ("keypoints_coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 52.47, 0.02], ["keypoints", "AP", 67.36, 0.02]] diff --git a/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml b/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000..3dd209f --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,16 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True + ROI_HEADS: + NUM_CLASSES: 1 +DATASETS: + TRAIN: ("keypoints_coco_2017_val_100",) + TEST: ("keypoints_coco_2017_val_100",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml new file mode 100644 index 0000000..4b92392 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml @@ -0,0 +1,30 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + NUM_CLASSES: 1 + ROI_KEYPOINT_HEAD: + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS: False + LOSS_WEIGHT: 4.0 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss + RPN: + SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss +DATASETS: + TRAIN: ("keypoints_coco_2017_val",) + TEST: ("keypoints_coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +SOLVER: + WARMUP_FACTOR: 0.33333333 + WARMUP_ITERS: 100 + STEPS: (5500, 5800) + MAX_ITER: 6000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 55.35, 1.0], ["keypoints", "AP", 76.91, 1.0]] diff --git a/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000..9bd9628 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,28 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + NUM_CLASSES: 1 + ROI_KEYPOINT_HEAD: + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss + RPN: + SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss +DATASETS: + TRAIN: ("keypoints_coco_2017_val",) + TEST: ("keypoints_coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +SOLVER: + WARMUP_FACTOR: 0.33333333 + WARMUP_ITERS: 100 + STEPS: (5500, 5800) + MAX_ITER: 6000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 53.5, 1.0], ["keypoints", "AP", 72.4, 1.0]] diff --git a/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_GCV_instant_test.yaml b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_GCV_instant_test.yaml new file mode 100644 index 0000000..ab6e698 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_GCV_instant_test.yaml @@ -0,0 +1,18 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.001 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 + CLIP_GRADIENTS: + ENABLED: True + CLIP_TYPE: "value" + CLIP_VALUE: 1.0 +DATALOADER: + NUM_WORKERS: 2 diff --git a/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml new file mode 100644 index 0000000..b2d5b7f --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x/137849525/model_final_4ce675.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 47.37, 0.02], ["segm", "AP", 40.99, 0.02]] diff --git a/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml new file mode 100644 index 0000000..6c4f121 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml @@ -0,0 +1,14 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.001 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml new file mode 100644 index 0000000..f68dd8f --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml @@ -0,0 +1,22 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val",) + TEST: ("coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (600,) + MAX_SIZE_TRAIN: 1000 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1000 +SOLVER: + IMS_PER_BATCH: 8 # base uses 16 + WARMUP_FACTOR: 0.33333 + WARMUP_ITERS: 100 + STEPS: (11000, 11600) + MAX_ITER: 12000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 41.88, 0.7], ["segm", "AP", 33.79, 0.5]] diff --git a/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml new file mode 100644 index 0000000..e3ce6cf --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x/137849551/model_final_84107b.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 47.44, 0.02], ["segm", "AP", 42.94, 0.02]] diff --git a/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000..e5454bf --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,10 @@ +_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 47.34, 0.02], ["segm", "AP", 42.67, 0.02], ["bbox_TTA", "AP", 49.11, 0.02], ["segm_TTA", "AP", 45.04, 0.02]] + AUG: + ENABLED: True + MIN_SIZES: (700, 800) # to save some time diff --git a/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000..6dbfcde --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,14 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_pred_boxes_training_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_pred_boxes_training_acc_test.yaml new file mode 100644 index 0000000..52f7876 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_pred_boxes_training_acc_test.yaml @@ -0,0 +1,6 @@ +_BASE_: "./mask_rcnn_R_50_FPN_training_acc_test.yaml" +MODEL: + ROI_BOX_HEAD: + TRAIN_ON_PRED_BOXES: True +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 42.6, 1.0], ["segm", "AP", 35.8, 0.8]] diff --git a/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000..aadae4c --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,21 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val",) + TEST: ("coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (600,) + MAX_SIZE_TRAIN: 1000 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1000 +SOLVER: + WARMUP_FACTOR: 0.3333333 + WARMUP_ITERS: 100 + STEPS: (5500, 5800) + MAX_ITER: 6000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 42.5, 1.0], ["segm", "AP", 35.8, 0.8]] diff --git a/data_processing/detectron2/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml new file mode 100644 index 0000000..70874e3 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-PanopticSegmentation/panoptic_fpn_R_50_3x/139514569/model_final_c10459.pkl" +DATASETS: + TEST: ("coco_2017_val_100_panoptic_separated",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 46.47, 0.02], ["segm", "AP", 43.39, 0.02], ["sem_seg", "mIoU", 42.55, 0.02], ["panoptic_seg", "PQ", 38.99, 0.02]] diff --git a/data_processing/detectron2/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml b/data_processing/detectron2/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml new file mode 100644 index 0000000..7cdee7b --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "PanopticFPN" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + SEM_SEG_HEAD: + LOSS_WEIGHT: 0.5 +DATASETS: + TRAIN: ("coco_2017_val_100_panoptic_separated",) + TEST: ("coco_2017_val_100_panoptic_separated",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 1 diff --git a/data_processing/detectron2/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml new file mode 100644 index 0000000..f3bbf30 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml @@ -0,0 +1,20 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "PanopticFPN" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + SEM_SEG_HEAD: + LOSS_WEIGHT: 0.5 +DATASETS: + TRAIN: ("coco_2017_val_panoptic_separated",) + TEST: ("coco_2017_val_panoptic_separated",) +SOLVER: + BASE_LR: 0.01 + WARMUP_FACTOR: 0.001 + WARMUP_ITERS: 500 + STEPS: (5500,) + MAX_ITER: 7000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 46.70, 1.1], ["segm", "AP", 39.0, 0.7], ["sem_seg", "mIoU", 64.73, 1.3], ["panoptic_seg", "PQ", 48.13, 0.8]] diff --git a/data_processing/detectron2/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000..cb666c1 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Detection/retinanet_R_50_FPN_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Detection/retinanet_R_50_FPN_3x/190397829/model_final_5bd44e.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 44.45, 0.02]] diff --git a/data_processing/detectron2/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml b/data_processing/detectron2/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000..8d95c1f --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml @@ -0,0 +1,13 @@ +_BASE_: "../COCO-Detection/retinanet_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/data_processing/detectron2/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000..c7c3f90 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/model_final_02ce48.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["box_proposals", "AR@1000", 58.16, 0.02]] diff --git a/data_processing/detectron2/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml b/data_processing/detectron2/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000..402d432 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml @@ -0,0 +1,13 @@ +_BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + STEPS: (30,) + MAX_ITER: 40 + BASE_LR: 0.005 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/data_processing/detectron2/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000..bca7498 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,10 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://semantic_R_50_FPN_1x/111802073/model_final_c18079783c55a94968edc28b7101c5f0.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TEST: ("coco_2017_val_100_panoptic_stuffonly",) +TEST: + EXPECTED_RESULTS: [["sem_seg", "mIoU", 39.53, 0.02], ["sem_seg", "mACC", 51.50, 0.02]] diff --git a/data_processing/detectron2/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml b/data_processing/detectron2/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000..14ab606 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml @@ -0,0 +1,18 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TRAIN: ("coco_2017_val_100_panoptic_stuffonly",) + TEST: ("coco_2017_val_100_panoptic_stuffonly",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/data_processing/detectron2/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml b/data_processing/detectron2/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000..1f78d77 --- /dev/null +++ b/data_processing/detectron2/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,20 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TRAIN: ("coco_2017_val_panoptic_stuffonly",) + TEST: ("coco_2017_val_panoptic_stuffonly",) +SOLVER: + BASE_LR: 0.01 + WARMUP_FACTOR: 0.001 + WARMUP_ITERS: 300 + STEPS: (5500,) + MAX_ITER: 7000 +TEST: + EXPECTED_RESULTS: [["sem_seg", "mIoU", 76.51, 1.0], ["sem_seg", "mACC", 83.25, 1.0]] +INPUT: + # no scale augmentation + MIN_SIZE_TRAIN: (800, ) diff --git a/data_processing/detectron2/datasets/README.md b/data_processing/detectron2/datasets/README.md new file mode 100644 index 0000000..0eb44cc --- /dev/null +++ b/data_processing/detectron2/datasets/README.md @@ -0,0 +1,140 @@ +# Use Builtin Datasets + +A dataset can be used by accessing [DatasetCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.DatasetCatalog) +for its data, or [MetadataCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.MetadataCatalog) for its metadata (class names, etc). +This document explains how to setup the builtin datasets so they can be used by the above APIs. +[Use Custom Datasets](https://detectron2.readthedocs.io/tutorials/datasets.html) gives a deeper dive on how to use `DatasetCatalog` and `MetadataCatalog`, +and how to add new datasets to them. + +Detectron2 has builtin support for a few datasets. +The datasets are assumed to exist in a directory specified by the environment variable +`DETECTRON2_DATASETS`. +Under this directory, detectron2 will look for datasets in the structure described below, if needed. +``` +$DETECTRON2_DATASETS/ + coco/ + lvis/ + cityscapes/ + VOC20{07,12}/ +``` + +You can set the location for builtin datasets by `export DETECTRON2_DATASETS=/path/to/datasets`. +If left unset, the default is `./datasets` relative to your current working directory. + +The [model zoo](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md) +contains configs and models that use these builtin datasets. + +## Expected dataset structure for [COCO instance/keypoint detection](https://cocodataset.org/#download): + +``` +coco/ + annotations/ + instances_{train,val}2017.json + person_keypoints_{train,val}2017.json + {train,val}2017/ + # image files that are mentioned in the corresponding json +``` + +You can use the 2014 version of the dataset as well. + +Some of the builtin tests (`dev/run_*_tests.sh`) uses a tiny version of the COCO dataset, +which you can download with `./datasets/prepare_for_tests.sh`. + +## Expected dataset structure for PanopticFPN: + +Extract panoptic annotations from [COCO website](https://cocodataset.org/#download) +into the following structure: +``` +coco/ + annotations/ + panoptic_{train,val}2017.json + panoptic_{train,val}2017/ # png annotations + panoptic_stuff_{train,val}2017/ # generated by the script mentioned below +``` + +Install panopticapi by: +``` +pip install git+https://github.com/cocodataset/panopticapi.git +``` +Then, run `python datasets/prepare_panoptic_fpn.py`, to extract semantic annotations from panoptic annotations. + +## Expected dataset structure for [LVIS instance segmentation](https://www.lvisdataset.org/dataset): +``` +coco/ + {train,val,test}2017/ +lvis/ + lvis_v0.5_{train,val}.json + lvis_v0.5_image_info_test.json + lvis_v1_{train,val}.json + lvis_v1_image_info_test{,_challenge}.json +``` + +Install lvis-api by: +``` +pip install git+https://github.com/lvis-dataset/lvis-api.git +``` + +To evaluate models trained on the COCO dataset using LVIS annotations, +run `python datasets/prepare_cocofied_lvis.py` to prepare "cocofied" LVIS annotations. + +## Expected dataset structure for [cityscapes](https://www.cityscapes-dataset.com/downloads/): +``` +cityscapes/ + gtFine/ + train/ + aachen/ + color.png, instanceIds.png, labelIds.png, polygons.json, + labelTrainIds.png + ... + val/ + test/ + # below are generated Cityscapes panoptic annotation + cityscapes_panoptic_train.json + cityscapes_panoptic_train/ + cityscapes_panoptic_val.json + cityscapes_panoptic_val/ + cityscapes_panoptic_test.json + cityscapes_panoptic_test/ + leftImg8bit/ + train/ + val/ + test/ +``` +Install cityscapes scripts by: +``` +pip install git+https://github.com/mcordts/cityscapesScripts.git +``` + +Note: to create labelTrainIds.png, first prepare the above structure, then run cityscapesescript with: +``` +CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createTrainIdLabelImgs.py +``` +These files are not needed for instance segmentation. + +Note: to generate Cityscapes panoptic dataset, run cityscapesescript with: +``` +CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createPanopticImgs.py +``` +These files are not needed for semantic and instance segmentation. + +## Expected dataset structure for [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/index.html): +``` +VOC20{07,12}/ + Annotations/ + ImageSets/ + Main/ + trainval.txt + test.txt + # train.txt or val.txt, if you use these splits + JPEGImages/ +``` + +## Expected dataset structure for [ADE20k Scene Parsing](http://sceneparsing.csail.mit.edu/): +``` +ADEChallengeData2016/ + annotations/ + annotations_detectron2/ + images/ + objectInfo150.txt +``` +The directory `annotations_detectron2` is generated by running `python datasets/prepare_ade20k_sem_seg.py`. diff --git a/data_processing/detectron2/datasets/prepare_ade20k_sem_seg.py b/data_processing/detectron2/datasets/prepare_ade20k_sem_seg.py new file mode 100644 index 0000000..8b4a58d --- /dev/null +++ b/data_processing/detectron2/datasets/prepare_ade20k_sem_seg.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import os +from pathlib import Path +import tqdm +from PIL import Image + + +def convert(input, output): + img = np.asarray(Image.open(input)) + assert img.dtype == np.uint8 + img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1 + Image.fromarray(img).save(output) + + +if __name__ == "__main__": + dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016" + for name in ["training", "validation"]: + annotation_dir = dataset_dir / "annotations" / name + output_dir = dataset_dir / "annotations_detectron2" / name + output_dir.mkdir(parents=True, exist_ok=True) + for file in tqdm.tqdm(list(annotation_dir.iterdir())): + output_file = output_dir / file.name + convert(file, output_file) diff --git a/data_processing/detectron2/datasets/prepare_cocofied_lvis.py b/data_processing/detectron2/datasets/prepare_cocofied_lvis.py new file mode 100644 index 0000000..245c884 --- /dev/null +++ b/data_processing/detectron2/datasets/prepare_cocofied_lvis.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import copy +import json +import os +from collections import defaultdict + +# This mapping is extracted from the official LVIS mapping: +# https://github.com/lvis-dataset/lvis-api/blob/master/data/coco_to_synset.json +COCO_SYNSET_CATEGORIES = [ + {"synset": "person.n.01", "coco_cat_id": 1}, + {"synset": "bicycle.n.01", "coco_cat_id": 2}, + {"synset": "car.n.01", "coco_cat_id": 3}, + {"synset": "motorcycle.n.01", "coco_cat_id": 4}, + {"synset": "airplane.n.01", "coco_cat_id": 5}, + {"synset": "bus.n.01", "coco_cat_id": 6}, + {"synset": "train.n.01", "coco_cat_id": 7}, + {"synset": "truck.n.01", "coco_cat_id": 8}, + {"synset": "boat.n.01", "coco_cat_id": 9}, + {"synset": "traffic_light.n.01", "coco_cat_id": 10}, + {"synset": "fireplug.n.01", "coco_cat_id": 11}, + {"synset": "stop_sign.n.01", "coco_cat_id": 13}, + {"synset": "parking_meter.n.01", "coco_cat_id": 14}, + {"synset": "bench.n.01", "coco_cat_id": 15}, + {"synset": "bird.n.01", "coco_cat_id": 16}, + {"synset": "cat.n.01", "coco_cat_id": 17}, + {"synset": "dog.n.01", "coco_cat_id": 18}, + {"synset": "horse.n.01", "coco_cat_id": 19}, + {"synset": "sheep.n.01", "coco_cat_id": 20}, + {"synset": "beef.n.01", "coco_cat_id": 21}, + {"synset": "elephant.n.01", "coco_cat_id": 22}, + {"synset": "bear.n.01", "coco_cat_id": 23}, + {"synset": "zebra.n.01", "coco_cat_id": 24}, + {"synset": "giraffe.n.01", "coco_cat_id": 25}, + {"synset": "backpack.n.01", "coco_cat_id": 27}, + {"synset": "umbrella.n.01", "coco_cat_id": 28}, + {"synset": "bag.n.04", "coco_cat_id": 31}, + {"synset": "necktie.n.01", "coco_cat_id": 32}, + {"synset": "bag.n.06", "coco_cat_id": 33}, + {"synset": "frisbee.n.01", "coco_cat_id": 34}, + {"synset": "ski.n.01", "coco_cat_id": 35}, + {"synset": "snowboard.n.01", "coco_cat_id": 36}, + {"synset": "ball.n.06", "coco_cat_id": 37}, + {"synset": "kite.n.03", "coco_cat_id": 38}, + {"synset": "baseball_bat.n.01", "coco_cat_id": 39}, + {"synset": "baseball_glove.n.01", "coco_cat_id": 40}, + {"synset": "skateboard.n.01", "coco_cat_id": 41}, + {"synset": "surfboard.n.01", "coco_cat_id": 42}, + {"synset": "tennis_racket.n.01", "coco_cat_id": 43}, + {"synset": "bottle.n.01", "coco_cat_id": 44}, + {"synset": "wineglass.n.01", "coco_cat_id": 46}, + {"synset": "cup.n.01", "coco_cat_id": 47}, + {"synset": "fork.n.01", "coco_cat_id": 48}, + {"synset": "knife.n.01", "coco_cat_id": 49}, + {"synset": "spoon.n.01", "coco_cat_id": 50}, + {"synset": "bowl.n.03", "coco_cat_id": 51}, + {"synset": "banana.n.02", "coco_cat_id": 52}, + {"synset": "apple.n.01", "coco_cat_id": 53}, + {"synset": "sandwich.n.01", "coco_cat_id": 54}, + {"synset": "orange.n.01", "coco_cat_id": 55}, + {"synset": "broccoli.n.01", "coco_cat_id": 56}, + {"synset": "carrot.n.01", "coco_cat_id": 57}, + {"synset": "frank.n.02", "coco_cat_id": 58}, + {"synset": "pizza.n.01", "coco_cat_id": 59}, + {"synset": "doughnut.n.02", "coco_cat_id": 60}, + {"synset": "cake.n.03", "coco_cat_id": 61}, + {"synset": "chair.n.01", "coco_cat_id": 62}, + {"synset": "sofa.n.01", "coco_cat_id": 63}, + {"synset": "pot.n.04", "coco_cat_id": 64}, + {"synset": "bed.n.01", "coco_cat_id": 65}, + {"synset": "dining_table.n.01", "coco_cat_id": 67}, + {"synset": "toilet.n.02", "coco_cat_id": 70}, + {"synset": "television_receiver.n.01", "coco_cat_id": 72}, + {"synset": "laptop.n.01", "coco_cat_id": 73}, + {"synset": "mouse.n.04", "coco_cat_id": 74}, + {"synset": "remote_control.n.01", "coco_cat_id": 75}, + {"synset": "computer_keyboard.n.01", "coco_cat_id": 76}, + {"synset": "cellular_telephone.n.01", "coco_cat_id": 77}, + {"synset": "microwave.n.02", "coco_cat_id": 78}, + {"synset": "oven.n.01", "coco_cat_id": 79}, + {"synset": "toaster.n.02", "coco_cat_id": 80}, + {"synset": "sink.n.01", "coco_cat_id": 81}, + {"synset": "electric_refrigerator.n.01", "coco_cat_id": 82}, + {"synset": "book.n.01", "coco_cat_id": 84}, + {"synset": "clock.n.01", "coco_cat_id": 85}, + {"synset": "vase.n.01", "coco_cat_id": 86}, + {"synset": "scissors.n.01", "coco_cat_id": 87}, + {"synset": "teddy.n.01", "coco_cat_id": 88}, + {"synset": "hand_blower.n.01", "coco_cat_id": 89}, + {"synset": "toothbrush.n.01", "coco_cat_id": 90}, +] + + +def cocofy_lvis(input_filename, output_filename): + """ + Filter LVIS instance segmentation annotations to remove all categories that are not included in + COCO. The new json files can be used to evaluate COCO AP using `lvis-api`. The category ids in + the output json are the incontiguous COCO dataset ids. + + Args: + input_filename (str): path to the LVIS json file. + output_filename (str): path to the COCOfied json file. + """ + + with open(input_filename, "r") as f: + lvis_json = json.load(f) + + lvis_annos = lvis_json.pop("annotations") + cocofied_lvis = copy.deepcopy(lvis_json) + lvis_json["annotations"] = lvis_annos + + # Mapping from lvis cat id to coco cat id via synset + lvis_cat_id_to_synset = {cat["id"]: cat["synset"] for cat in lvis_json["categories"]} + synset_to_coco_cat_id = {x["synset"]: x["coco_cat_id"] for x in COCO_SYNSET_CATEGORIES} + # Synsets that we will keep in the dataset + synsets_to_keep = set(synset_to_coco_cat_id.keys()) + coco_cat_id_with_instances = defaultdict(int) + + new_annos = [] + ann_id = 1 + for ann in lvis_annos: + lvis_cat_id = ann["category_id"] + synset = lvis_cat_id_to_synset[lvis_cat_id] + if synset not in synsets_to_keep: + continue + coco_cat_id = synset_to_coco_cat_id[synset] + new_ann = copy.deepcopy(ann) + new_ann["category_id"] = coco_cat_id + new_ann["id"] = ann_id + ann_id += 1 + new_annos.append(new_ann) + coco_cat_id_with_instances[coco_cat_id] += 1 + cocofied_lvis["annotations"] = new_annos + + for image in cocofied_lvis["images"]: + for key in ["not_exhaustive_category_ids", "neg_category_ids"]: + new_category_list = [] + for lvis_cat_id in image[key]: + synset = lvis_cat_id_to_synset[lvis_cat_id] + if synset not in synsets_to_keep: + continue + coco_cat_id = synset_to_coco_cat_id[synset] + new_category_list.append(coco_cat_id) + coco_cat_id_with_instances[coco_cat_id] += 1 + image[key] = new_category_list + + coco_cat_id_with_instances = set(coco_cat_id_with_instances.keys()) + + new_categories = [] + for cat in lvis_json["categories"]: + synset = cat["synset"] + if synset not in synsets_to_keep: + continue + coco_cat_id = synset_to_coco_cat_id[synset] + if coco_cat_id not in coco_cat_id_with_instances: + continue + new_cat = copy.deepcopy(cat) + new_cat["id"] = coco_cat_id + new_categories.append(new_cat) + cocofied_lvis["categories"] = new_categories + + with open(output_filename, "w") as f: + json.dump(cocofied_lvis, f) + print("{} is COCOfied and stored in {}.".format(input_filename, output_filename)) + + +if __name__ == "__main__": + dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "lvis") + for s in ["lvis_v0.5_train", "lvis_v0.5_val"]: + print("Start COCOfing {}.".format(s)) + cocofy_lvis( + os.path.join(dataset_dir, "{}.json".format(s)), + os.path.join(dataset_dir, "{}_cocofied.json".format(s)), + ) diff --git a/data_processing/detectron2/datasets/prepare_for_tests.sh b/data_processing/detectron2/datasets/prepare_for_tests.sh new file mode 100644 index 0000000..67e875a --- /dev/null +++ b/data_processing/detectron2/datasets/prepare_for_tests.sh @@ -0,0 +1,31 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. + +# Download the mini dataset (coco val2017_100, with only 100 images) +# to be used in unittests & integration tests. + +cd "${0%/*}" + +BASE=https://dl.fbaipublicfiles.com/detectron2 +ROOT=${DETECTRON2_DATASETS:-./} +ROOT=${ROOT/#\~/$HOME} # expand ~ to HOME +mkdir -p $ROOT/coco/annotations + +for anno in instances_val2017_100 \ + person_keypoints_val2017_100 ; do + + dest=$ROOT/coco/annotations/$anno.json + [[ -s $dest ]] && { + echo "$dest exists. Skipping ..." + } || { + wget $BASE/annotations/coco/$anno.json -O $dest + } +done + +dest=$ROOT/coco/val2017_100.tgz +[[ -d $ROOT/coco/val2017 ]] && { + echo "$ROOT/coco/val2017 exists. Skipping ..." +} || { + wget $BASE/annotations/coco/val2017_100.tgz -O $dest + tar xzf $dest -C $ROOT/coco/ && rm -f $dest +} diff --git a/data_processing/detectron2/datasets/prepare_panoptic_fpn.py b/data_processing/detectron2/datasets/prepare_panoptic_fpn.py new file mode 100644 index 0000000..597d791 --- /dev/null +++ b/data_processing/detectron2/datasets/prepare_panoptic_fpn.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import functools +import json +import multiprocessing as mp +import numpy as np +import os +import time +from fvcore.common.download import download +from panopticapi.utils import rgb2id +from PIL import Image + +from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES + + +def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map): + panoptic = np.asarray(Image.open(input_panoptic), dtype=np.uint32) + panoptic = rgb2id(panoptic) + output = np.zeros_like(panoptic, dtype=np.uint8) + 255 + for seg in segments: + cat_id = seg["category_id"] + new_cat_id = id_map[cat_id] + output[panoptic == seg["id"]] = new_cat_id + Image.fromarray(output).save(output_semantic) + + +def separate_coco_semantic_from_panoptic(panoptic_json, panoptic_root, sem_seg_root, categories): + """ + Create semantic segmentation annotations from panoptic segmentation + annotations, to be used by PanopticFPN. + + It maps all thing categories to class 0, and maps all unlabeled pixels to class 255. + It maps all stuff categories to contiguous ids starting from 1. + + Args: + panoptic_json (str): path to the panoptic json file, in COCO's format. + panoptic_root (str): a directory with panoptic annotation files, in COCO's format. + sem_seg_root (str): a directory to output semantic annotation files + categories (list[dict]): category metadata. Each dict needs to have: + "id": corresponds to the "category_id" in the json annotations + "isthing": 0 or 1 + """ + os.makedirs(sem_seg_root, exist_ok=True) + + stuff_ids = [k["id"] for k in categories if k["isthing"] == 0] + thing_ids = [k["id"] for k in categories if k["isthing"] == 1] + id_map = {} # map from category id to id in the output semantic annotation + assert len(stuff_ids) <= 254 + for i, stuff_id in enumerate(stuff_ids): + id_map[stuff_id] = i + 1 + for thing_id in thing_ids: + id_map[thing_id] = 0 + id_map[0] = 255 + + with open(panoptic_json) as f: + obj = json.load(f) + + pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4)) + + def iter_annotations(): + for anno in obj["annotations"]: + file_name = anno["file_name"] + segments = anno["segments_info"] + input = os.path.join(panoptic_root, file_name) + output = os.path.join(sem_seg_root, file_name) + yield input, output, segments + + print("Start writing to {} ...".format(sem_seg_root)) + start = time.time() + pool.starmap( + functools.partial(_process_panoptic_to_semantic, id_map=id_map), + iter_annotations(), + chunksize=100, + ) + print("Finished. time: {:.2f}s".format(time.time() - start)) + + +if __name__ == "__main__": + dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco") + for s in ["val2017", "train2017"]: + separate_coco_semantic_from_panoptic( + os.path.join(dataset_dir, "annotations/panoptic_{}.json".format(s)), + os.path.join(dataset_dir, "panoptic_{}".format(s)), + os.path.join(dataset_dir, "panoptic_stuff_{}".format(s)), + COCO_CATEGORIES, + ) + + # Prepare val2017_100 for quick testing: + + dest_dir = os.path.join(dataset_dir, "annotations/") + URL_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" + download(URL_PREFIX + "annotations/coco/panoptic_val2017_100.json", dest_dir) + with open(os.path.join(dest_dir, "panoptic_val2017_100.json")) as f: + obj = json.load(f) + + def link_val100(dir_full, dir_100): + print("Creating " + dir_100 + " ...") + os.makedirs(dir_100, exist_ok=True) + for img in obj["images"]: + basename = os.path.splitext(img["file_name"])[0] + src = os.path.join(dir_full, basename + ".png") + dst = os.path.join(dir_100, basename + ".png") + src = os.path.relpath(src, start=dir_100) + os.symlink(src, dst) + + link_val100( + os.path.join(dataset_dir, "panoptic_val2017"), + os.path.join(dataset_dir, "panoptic_val2017_100"), + ) + + link_val100( + os.path.join(dataset_dir, "panoptic_stuff_val2017"), + os.path.join(dataset_dir, "panoptic_stuff_val2017_100"), + ) diff --git a/data_processing/detectron2/demo/README.md b/data_processing/detectron2/demo/README.md new file mode 100644 index 0000000..133d8d3 --- /dev/null +++ b/data_processing/detectron2/demo/README.md @@ -0,0 +1,8 @@ + +## Detectron2 Demo + +We provide a command line tool to run a simple demo of builtin configs. +The usage is explained in [GETTING_STARTED.md](../GETTING_STARTED.md). + +See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-) +for a high-quality demo generated with this tool. diff --git a/data_processing/detectron2/demo/demo.py b/data_processing/detectron2/demo/demo.py new file mode 100644 index 0000000..4baa876 --- /dev/null +++ b/data_processing/detectron2/demo/demo.py @@ -0,0 +1,188 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import argparse +import glob +import multiprocessing as mp +import numpy as np +import os +import tempfile +import time +import warnings +import cv2 +import tqdm + +from detectron2.config import get_cfg +from detectron2.data.detection_utils import read_image +from detectron2.utils.logger import setup_logger + +from predictor import VisualizationDemo + +# constants +WINDOW_NAME = "COCO detections" + + +def setup_cfg(args): + # load config from file and command-line arguments + cfg = get_cfg() + # To use demo for Panoptic-DeepLab, please uncomment the following two lines. + # from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa + # add_panoptic_deeplab_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + # Set score_threshold for builtin models + cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold + cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold + cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold + cfg.freeze() + return cfg + + +def get_parser(): + parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs") + parser.add_argument( + "--config-file", + default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml", + metavar="FILE", + help="path to config file", + ) + parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.") + parser.add_argument("--video-input", help="Path to video file.") + parser.add_argument( + "--input", + nargs="+", + help="A list of space separated input images; " + "or a single glob pattern such as 'directory/*.jpg'", + ) + parser.add_argument( + "--output", + help="A file or directory to save output visualizations. " + "If not given, will show output in an OpenCV window.", + ) + + parser.add_argument( + "--confidence-threshold", + type=float, + default=0.5, + help="Minimum score for instance predictions to be shown", + ) + parser.add_argument( + "--opts", + help="Modify config options using the command-line 'KEY VALUE' pairs", + default=[], + nargs=argparse.REMAINDER, + ) + return parser + + +def test_opencv_video_format(codec, file_ext): + with tempfile.TemporaryDirectory(prefix="video_format_test") as dir: + filename = os.path.join(dir, "test_file" + file_ext) + writer = cv2.VideoWriter( + filename=filename, + fourcc=cv2.VideoWriter_fourcc(*codec), + fps=float(30), + frameSize=(10, 10), + isColor=True, + ) + [writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)] + writer.release() + if os.path.isfile(filename): + return True + return False + + +if __name__ == "__main__": + mp.set_start_method("spawn", force=True) + args = get_parser().parse_args() + setup_logger(name="fvcore") + logger = setup_logger() + logger.info("Arguments: " + str(args)) + + cfg = setup_cfg(args) + + demo = VisualizationDemo(cfg) + + if args.input: + if len(args.input) == 1: + args.input = glob.glob(os.path.expanduser(args.input[0])) + assert args.input, "The input path(s) was not found" + for path in tqdm.tqdm(args.input, disable=not args.output): + # use PIL, to be consistent with evaluation + img = read_image(path, format="BGR") + start_time = time.time() + predictions, visualized_output = demo.run_on_image(img) + logger.info( + "{}: {} in {:.2f}s".format( + path, + "detected {} instances".format(len(predictions["instances"])) + if "instances" in predictions + else "finished", + time.time() - start_time, + ) + ) + + if args.output: + if os.path.isdir(args.output): + assert os.path.isdir(args.output), args.output + out_filename = os.path.join(args.output, os.path.basename(path)) + else: + assert len(args.input) == 1, "Please specify a directory with args.output" + out_filename = args.output + visualized_output.save(out_filename) + else: + cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL) + cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1]) + if cv2.waitKey(0) == 27: + break # esc to quit + elif args.webcam: + assert args.input is None, "Cannot have both --input and --webcam!" + assert args.output is None, "output not yet supported with --webcam!" + cam = cv2.VideoCapture(0) + for vis in tqdm.tqdm(demo.run_on_video(cam)): + cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL) + cv2.imshow(WINDOW_NAME, vis) + if cv2.waitKey(1) == 27: + break # esc to quit + cam.release() + cv2.destroyAllWindows() + elif args.video_input: + video = cv2.VideoCapture(args.video_input) + width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) + frames_per_second = video.get(cv2.CAP_PROP_FPS) + num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) + basename = os.path.basename(args.video_input) + codec, file_ext = ( + ("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4") + ) + if codec == ".mp4v": + warnings.warn("x264 codec not available, switching to mp4v") + if args.output: + if os.path.isdir(args.output): + output_fname = os.path.join(args.output, basename) + output_fname = os.path.splitext(output_fname)[0] + file_ext + else: + output_fname = args.output + assert not os.path.isfile(output_fname), output_fname + output_file = cv2.VideoWriter( + filename=output_fname, + # some installation of opencv may not support x264 (due to its license), + # you can try other format (e.g. MPEG) + fourcc=cv2.VideoWriter_fourcc(*codec), + fps=float(frames_per_second), + frameSize=(width, height), + isColor=True, + ) + assert os.path.isfile(args.video_input) + for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames): + if args.output: + output_file.write(vis_frame) + else: + cv2.namedWindow(basename, cv2.WINDOW_NORMAL) + cv2.imshow(basename, vis_frame) + if cv2.waitKey(1) == 27: + break # esc to quit + video.release() + if args.output: + output_file.release() + else: + cv2.destroyAllWindows() diff --git a/data_processing/detectron2/demo/predictor.py b/data_processing/detectron2/demo/predictor.py new file mode 100644 index 0000000..7b7ebd3 --- /dev/null +++ b/data_processing/detectron2/demo/predictor.py @@ -0,0 +1,220 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import atexit +import bisect +import multiprocessing as mp +from collections import deque +import cv2 +import torch + +from detectron2.data import MetadataCatalog +from detectron2.engine.defaults import DefaultPredictor +from detectron2.utils.video_visualizer import VideoVisualizer +from detectron2.utils.visualizer import ColorMode, Visualizer + + +class VisualizationDemo(object): + def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False): + """ + Args: + cfg (CfgNode): + instance_mode (ColorMode): + parallel (bool): whether to run the model in different processes from visualization. + Useful since the visualization logic can be slow. + """ + self.metadata = MetadataCatalog.get( + cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused" + ) + self.cpu_device = torch.device("cpu") + self.instance_mode = instance_mode + + self.parallel = parallel + if parallel: + num_gpu = torch.cuda.device_count() + self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu) + else: + self.predictor = DefaultPredictor(cfg) + + def run_on_image(self, image): + """ + Args: + image (np.ndarray): an image of shape (H, W, C) (in BGR order). + This is the format used by OpenCV. + + Returns: + predictions (dict): the output of the model. + vis_output (VisImage): the visualized image output. + """ + vis_output = None + predictions = self.predictor(image) + # Convert image from OpenCV BGR format to Matplotlib RGB format. + image = image[:, :, ::-1] + visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode) + if "panoptic_seg" in predictions: + panoptic_seg, segments_info = predictions["panoptic_seg"] + vis_output = visualizer.draw_panoptic_seg_predictions( + panoptic_seg.to(self.cpu_device), segments_info + ) + else: + if "sem_seg" in predictions: + vis_output = visualizer.draw_sem_seg( + predictions["sem_seg"].argmax(dim=0).to(self.cpu_device) + ) + if "instances" in predictions: + instances = predictions["instances"].to(self.cpu_device) + vis_output = visualizer.draw_instance_predictions(predictions=instances) + + return predictions, vis_output + + def _frame_from_video(self, video): + while video.isOpened(): + success, frame = video.read() + if success: + yield frame + else: + break + + def run_on_video(self, video): + """ + Visualizes predictions on frames of the input video. + + Args: + video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be + either a webcam or a video file. + + Yields: + ndarray: BGR visualizations of each video frame. + """ + video_visualizer = VideoVisualizer(self.metadata, self.instance_mode) + + def process_predictions(frame, predictions): + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + if "panoptic_seg" in predictions: + panoptic_seg, segments_info = predictions["panoptic_seg"] + vis_frame = video_visualizer.draw_panoptic_seg_predictions( + frame, panoptic_seg.to(self.cpu_device), segments_info + ) + elif "instances" in predictions: + predictions = predictions["instances"].to(self.cpu_device) + vis_frame = video_visualizer.draw_instance_predictions(frame, predictions) + elif "sem_seg" in predictions: + vis_frame = video_visualizer.draw_sem_seg( + frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device) + ) + + # Converts Matplotlib RGB format to OpenCV BGR format + vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR) + return vis_frame + + frame_gen = self._frame_from_video(video) + if self.parallel: + buffer_size = self.predictor.default_buffer_size + + frame_data = deque() + + for cnt, frame in enumerate(frame_gen): + frame_data.append(frame) + self.predictor.put(frame) + + if cnt >= buffer_size: + frame = frame_data.popleft() + predictions = self.predictor.get() + yield process_predictions(frame, predictions) + + while len(frame_data): + frame = frame_data.popleft() + predictions = self.predictor.get() + yield process_predictions(frame, predictions) + else: + for frame in frame_gen: + yield process_predictions(frame, self.predictor(frame)) + + +class AsyncPredictor: + """ + A predictor that runs the model asynchronously, possibly on >1 GPUs. + Because rendering the visualization takes considerably amount of time, + this helps improve throughput a little bit when rendering videos. + """ + + class _StopToken: + pass + + class _PredictWorker(mp.Process): + def __init__(self, cfg, task_queue, result_queue): + self.cfg = cfg + self.task_queue = task_queue + self.result_queue = result_queue + super().__init__() + + def run(self): + predictor = DefaultPredictor(self.cfg) + + while True: + task = self.task_queue.get() + if isinstance(task, AsyncPredictor._StopToken): + break + idx, data = task + result = predictor(data) + self.result_queue.put((idx, result)) + + def __init__(self, cfg, num_gpus: int = 1): + """ + Args: + cfg (CfgNode): + num_gpus (int): if 0, will run on CPU + """ + num_workers = max(num_gpus, 1) + self.task_queue = mp.Queue(maxsize=num_workers * 3) + self.result_queue = mp.Queue(maxsize=num_workers * 3) + self.procs = [] + for gpuid in range(max(num_gpus, 1)): + cfg = cfg.clone() + cfg.defrost() + cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu" + self.procs.append( + AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue) + ) + + self.put_idx = 0 + self.get_idx = 0 + self.result_rank = [] + self.result_data = [] + + for p in self.procs: + p.start() + atexit.register(self.shutdown) + + def put(self, image): + self.put_idx += 1 + self.task_queue.put((self.put_idx, image)) + + def get(self): + self.get_idx += 1 # the index needed for this request + if len(self.result_rank) and self.result_rank[0] == self.get_idx: + res = self.result_data[0] + del self.result_data[0], self.result_rank[0] + return res + + while True: + # make sure the results are returned in the correct order + idx, res = self.result_queue.get() + if idx == self.get_idx: + return res + insert = bisect.bisect(self.result_rank, idx) + self.result_rank.insert(insert, idx) + self.result_data.insert(insert, res) + + def __len__(self): + return self.put_idx - self.get_idx + + def __call__(self, image): + self.put(image) + return self.get() + + def shutdown(self): + for _ in self.procs: + self.task_queue.put(AsyncPredictor._StopToken()) + + @property + def default_buffer_size(self): + return len(self.procs) * 5 diff --git a/data_processing/detectron2/detectron2/__init__.py b/data_processing/detectron2/detectron2/__init__.py new file mode 100644 index 0000000..bdd994b --- /dev/null +++ b/data_processing/detectron2/detectron2/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .utils.env import setup_environment + +setup_environment() + + +# This line will be programatically read/write by setup.py. +# Leave them at the bottom of this file and don't touch them. +__version__ = "0.6" diff --git a/data_processing/detectron2/detectron2/checkpoint/__init__.py b/data_processing/detectron2/detectron2/checkpoint/__init__.py new file mode 100644 index 0000000..99da046 --- /dev/null +++ b/data_processing/detectron2/detectron2/checkpoint/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. +# File: + + +from . import catalog as _UNUSED # register the handler +from .detection_checkpoint import DetectionCheckpointer +from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer + +__all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"] diff --git a/data_processing/detectron2/detectron2/checkpoint/c2_model_loading.py b/data_processing/detectron2/detectron2/checkpoint/c2_model_loading.py new file mode 100644 index 0000000..c6de2a3 --- /dev/null +++ b/data_processing/detectron2/detectron2/checkpoint/c2_model_loading.py @@ -0,0 +1,412 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import logging +import re +from typing import Dict, List +import torch +from tabulate import tabulate + + +def convert_basic_c2_names(original_keys): + """ + Apply some basic name conversion to names in C2 weights. + It only deals with typical backbone models. + + Args: + original_keys (list[str]): + Returns: + list[str]: The same number of strings matching those in original_keys. + """ + layer_keys = copy.deepcopy(original_keys) + layer_keys = [ + {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys + ] # some hard-coded mappings + + layer_keys = [k.replace("_", ".") for k in layer_keys] + layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] + layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] + # Uniform both bn and gn names to "norm" + layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] + layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] + + # stem + layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] + # to avoid mis-matching with "conv1" in other components (e.g. detection head) + layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] + + # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) + # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] + # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] + # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] + # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] + + # blocks + layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] + layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] + layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] + layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] + + # DensePose substitutions + layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] + layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] + layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] + layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] + layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] + return layer_keys + + +def convert_c2_detectron_names(weights): + """ + Map Caffe2 Detectron weight names to Detectron2 names. + + Args: + weights (dict): name -> tensor + + Returns: + dict: detectron2 names -> tensor + dict: detectron2 names -> C2 names + """ + logger = logging.getLogger(__name__) + logger.info("Renaming Caffe2 weights ......") + original_keys = sorted(weights.keys()) + layer_keys = copy.deepcopy(original_keys) + + layer_keys = convert_basic_c2_names(layer_keys) + + # -------------------------------------------------------------------------- + # RPN hidden representation conv + # -------------------------------------------------------------------------- + # FPN case + # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then + # shared for all other levels, hence the appearance of "fpn2" + layer_keys = [ + k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys + ] + # Non-FPN case + layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] + + # -------------------------------------------------------------------------- + # RPN box transformation conv + # -------------------------------------------------------------------------- + # FPN case (see note above about "fpn2") + layer_keys = [ + k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") + for k in layer_keys + ] + layer_keys = [ + k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") + for k in layer_keys + ] + # Non-FPN case + layer_keys = [ + k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys + ] + layer_keys = [ + k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") + for k in layer_keys + ] + + # -------------------------------------------------------------------------- + # Fast R-CNN box head + # -------------------------------------------------------------------------- + layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] + layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] + layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] + layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] + # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s + layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] + + # -------------------------------------------------------------------------- + # FPN lateral and output convolutions + # -------------------------------------------------------------------------- + def fpn_map(name): + """ + Look for keys with the following patterns: + 1) Starts with "fpn.inner." + Example: "fpn.inner.res2.2.sum.lateral.weight" + Meaning: These are lateral pathway convolutions + 2) Starts with "fpn.res" + Example: "fpn.res2.2.sum.weight" + Meaning: These are FPN output convolutions + """ + splits = name.split(".") + norm = ".norm" if "norm" in splits else "" + if name.startswith("fpn.inner."): + # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] + stage = int(splits[2][len("res") :]) + return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) + elif name.startswith("fpn.res"): + # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] + stage = int(splits[1][len("res") :]) + return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) + return name + + layer_keys = [fpn_map(k) for k in layer_keys] + + # -------------------------------------------------------------------------- + # Mask R-CNN mask head + # -------------------------------------------------------------------------- + # roi_heads.StandardROIHeads case + layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] + layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] + layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] + # roi_heads.Res5ROIHeads case + layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] + + # -------------------------------------------------------------------------- + # Keypoint R-CNN head + # -------------------------------------------------------------------------- + # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" + layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] + layer_keys = [ + k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys + ] + layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] + + # -------------------------------------------------------------------------- + # Done with replacements + # -------------------------------------------------------------------------- + assert len(set(layer_keys)) == len(layer_keys) + assert len(original_keys) == len(layer_keys) + + new_weights = {} + new_keys_to_original_keys = {} + for orig, renamed in zip(original_keys, layer_keys): + new_keys_to_original_keys[renamed] = orig + if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): + # remove the meaningless prediction weight for background class + new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 + new_weights[renamed] = weights[orig][new_start_idx:] + logger.info( + "Remove prediction weight for background class in {}. The shape changes from " + "{} to {}.".format( + renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) + ) + ) + elif renamed.startswith("cls_score."): + # move weights of bg class from original index 0 to last index + logger.info( + "Move classification weights for background class in {} from index 0 to " + "index {}.".format(renamed, weights[orig].shape[0] - 1) + ) + new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) + else: + new_weights[renamed] = weights[orig] + + return new_weights, new_keys_to_original_keys + + +# Note the current matching is not symmetric. +# it assumes model_state_dict will have longer names. +def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): + """ + Match names between the two state-dict, and returns a new chkpt_state_dict with names + converted to match model_state_dict with heuristics. The returned dict can be later + loaded with fvcore checkpointer. + If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 + model and will be renamed at first. + + Strategy: suppose that the models that we will create will have prefixes appended + to each of its keys, for example due to an extra level of nesting that the original + pre-trained weights from ImageNet won't contain. For example, model.state_dict() + might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains + res2.conv1.weight. We thus want to match both parameters together. + For that, we look for each model weight, look among all loaded keys if there is one + that is a suffix of the current weight name, and use it if that's the case. + If multiple matches exist, take the one with longest size + of the corresponding name. For example, for the same model as before, the pretrained + weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, + we want to match backbone[0].body.conv1.weight to conv1.weight, and + backbone[0].body.res2.conv1.weight to res2.conv1.weight. + """ + model_keys = sorted(model_state_dict.keys()) + if c2_conversion: + ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) + # original_keys: the name in the original dict (before renaming) + else: + original_keys = {x: x for x in ckpt_state_dict.keys()} + ckpt_keys = sorted(ckpt_state_dict.keys()) + + def match(a, b): + # Matched ckpt_key should be a complete (starts with '.') suffix. + # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, + # but matches whatever_conv1 or mesh_head.whatever_conv1. + return a == b or a.endswith("." + b) + + # get a matrix of string matches, where each (i, j) entry correspond to the size of the + # ckpt_key string, if it matches + match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] + match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) + # use the matched one with longest size in case of multiple matches + max_match_size, idxs = match_matrix.max(1) + # remove indices that correspond to no-match + idxs[max_match_size == 0] = -1 + + logger = logging.getLogger(__name__) + # matched_pairs (matched checkpoint key --> matched model key) + matched_keys = {} + result_state_dict = {} + for idx_model, idx_ckpt in enumerate(idxs.tolist()): + if idx_ckpt == -1: + continue + key_model = model_keys[idx_model] + key_ckpt = ckpt_keys[idx_ckpt] + value_ckpt = ckpt_state_dict[key_ckpt] + shape_in_model = model_state_dict[key_model].shape + + if shape_in_model != value_ckpt.shape: + logger.warning( + "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( + key_ckpt, value_ckpt.shape, key_model, shape_in_model + ) + ) + logger.warning( + "{} will not be loaded. Please double check and see if this is desired.".format( + key_ckpt + ) + ) + continue + + assert key_model not in result_state_dict + result_state_dict[key_model] = value_ckpt + if key_ckpt in matched_keys: # already added to matched_keys + logger.error( + "Ambiguity found for {} in checkpoint!" + "It matches at least two keys in the model ({} and {}).".format( + key_ckpt, key_model, matched_keys[key_ckpt] + ) + ) + raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") + + matched_keys[key_ckpt] = key_model + + # logging: + matched_model_keys = sorted(matched_keys.values()) + if len(matched_model_keys) == 0: + logger.warning("No weights in checkpoint matched with model.") + return ckpt_state_dict + common_prefix = _longest_common_prefix(matched_model_keys) + rev_matched_keys = {v: k for k, v in matched_keys.items()} + original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} + + model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) + table = [] + memo = set() + for key_model in matched_model_keys: + if key_model in memo: + continue + if key_model in model_key_groups: + group = model_key_groups[key_model] + memo |= set(group) + shapes = [tuple(model_state_dict[k].shape) for k in group] + table.append( + ( + _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", + _group_str([original_keys[k] for k in group]), + " ".join([str(x).replace(" ", "") for x in shapes]), + ) + ) + else: + key_checkpoint = original_keys[key_model] + shape = str(tuple(model_state_dict[key_model].shape)) + table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) + table_str = tabulate( + table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] + ) + logger.info( + "Following weights matched with " + + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") + + ":\n" + + table_str + ) + + unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] + for k in unmatched_ckpt_keys: + result_state_dict[k] = ckpt_state_dict[k] + return result_state_dict + + +def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): + """ + Params in the same submodule are grouped together. + + Args: + keys: names of all parameters + original_names: mapping from parameter name to their name in the checkpoint + + Returns: + dict[name -> all other names in the same group] + """ + + def _submodule_name(key): + pos = key.rfind(".") + if pos < 0: + return None + prefix = key[: pos + 1] + return prefix + + all_submodules = [_submodule_name(k) for k in keys] + all_submodules = [x for x in all_submodules if x] + all_submodules = sorted(all_submodules, key=len) + + ret = {} + for prefix in all_submodules: + group = [k for k in keys if k.startswith(prefix)] + if len(group) <= 1: + continue + original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) + if len(original_name_lcp) == 0: + # don't group weights if original names don't share prefix + continue + + for k in group: + if k in ret: + continue + ret[k] = group + return ret + + +def _longest_common_prefix(names: List[str]) -> str: + """ + ["abc.zfg", "abc.zef"] -> "abc." + """ + names = [n.split(".") for n in names] + m1, m2 = min(names), max(names) + ret = [a for a, b in zip(m1, m2) if a == b] + ret = ".".join(ret) + "." if len(ret) else "" + return ret + + +def _longest_common_prefix_str(names: List[str]) -> str: + m1, m2 = min(names), max(names) + lcp = [] + for a, b in zip(m1, m2): + if a == b: + lcp.append(a) + else: + break + lcp = "".join(lcp) + return lcp + + +def _group_str(names: List[str]) -> str: + """ + Turn "common1", "common2", "common3" into "common{1,2,3}" + """ + lcp = _longest_common_prefix_str(names) + rest = [x[len(lcp) :] for x in names] + rest = "{" + ",".join(rest) + "}" + ret = lcp + rest + + # add some simplification for BN specifically + ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") + ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") + return ret diff --git a/data_processing/detectron2/detectron2/checkpoint/catalog.py b/data_processing/detectron2/detectron2/checkpoint/catalog.py new file mode 100644 index 0000000..9a85736 --- /dev/null +++ b/data_processing/detectron2/detectron2/checkpoint/catalog.py @@ -0,0 +1,115 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging + +from detectron2.utils.file_io import PathHandler, PathManager + + +class ModelCatalog(object): + """ + Store mappings from names to third-party models. + """ + + S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron" + + # MSRA models have STRIDE_IN_1X1=True. False otherwise. + # NOTE: all BN models here have fused BN into an affine layer. + # As a result, you should only load them to a model with "FrozenBN". + # Loading them to a model with regular BN or SyncBN is wrong. + # Even when loaded to FrozenBN, it is still different from affine by an epsilon, + # which should be negligible for training. + # NOTE: all models here uses PIXEL_STD=[1,1,1] + # NOTE: Most of the BN models here are no longer used. We use the + # re-converted pre-trained models under detectron2 model zoo instead. + C2_IMAGENET_MODELS = { + "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl", + "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl", + "FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl", + "FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl", + "FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl", + "FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl", + "FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl", + } + + C2_DETECTRON_PATH_FORMAT = ( + "{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" # noqa B950 + ) + + C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival" + C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival" + + # format: {model_name} -> part of the url + C2_DETECTRON_MODELS = { + "35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950 + "35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950 + "35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950 + "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950 + "35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950 + "35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950 + "35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950 + "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950 + "48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950 + "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950 + "35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950 + "35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950 + "36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950 + } + + @staticmethod + def get(name): + if name.startswith("Caffe2Detectron/COCO"): + return ModelCatalog._get_c2_detectron_baseline(name) + if name.startswith("ImageNetPretrained/"): + return ModelCatalog._get_c2_imagenet_pretrained(name) + raise RuntimeError("model not present in the catalog: {}".format(name)) + + @staticmethod + def _get_c2_imagenet_pretrained(name): + prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX + name = name[len("ImageNetPretrained/") :] + name = ModelCatalog.C2_IMAGENET_MODELS[name] + url = "/".join([prefix, name]) + return url + + @staticmethod + def _get_c2_detectron_baseline(name): + name = name[len("Caffe2Detectron/COCO/") :] + url = ModelCatalog.C2_DETECTRON_MODELS[name] + if "keypoint_rcnn" in name: + dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS + else: + dataset = ModelCatalog.C2_DATASET_COCO + + if "35998355/rpn_R-50-C4_1x" in name: + # this one model is somehow different from others .. + type = "rpn" + else: + type = "generalized_rcnn" + + # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`. + url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format( + prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset + ) + return url + + +class ModelCatalogHandler(PathHandler): + """ + Resolve URL like catalog://. + """ + + PREFIX = "catalog://" + + def _get_supported_prefixes(self): + return [self.PREFIX] + + def _get_local_path(self, path, **kwargs): + logger = logging.getLogger(__name__) + catalog_path = ModelCatalog.get(path[len(self.PREFIX) :]) + logger.info("Catalog entry {} points to {}".format(path, catalog_path)) + return PathManager.get_local_path(catalog_path, **kwargs) + + def _open(self, path, mode="r", **kwargs): + return PathManager.open(self._get_local_path(path), mode, **kwargs) + + +PathManager.register_handler(ModelCatalogHandler()) diff --git a/data_processing/detectron2/detectron2/checkpoint/detection_checkpoint.py b/data_processing/detectron2/detectron2/checkpoint/detection_checkpoint.py new file mode 100644 index 0000000..cecb1fc --- /dev/null +++ b/data_processing/detectron2/detectron2/checkpoint/detection_checkpoint.py @@ -0,0 +1,143 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import os +import pickle +from urllib.parse import parse_qs, urlparse +import torch +from fvcore.common.checkpoint import Checkpointer +from torch.nn.parallel import DistributedDataParallel + +import detectron2.utils.comm as comm +from detectron2.utils.file_io import PathManager + +from .c2_model_loading import align_and_update_state_dicts + + +class DetectionCheckpointer(Checkpointer): + """ + Same as :class:`Checkpointer`, but is able to: + 1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models. + 2. correctly load checkpoints that are only available on the master worker + """ + + def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables): + is_main_process = comm.is_main_process() + super().__init__( + model, + save_dir, + save_to_disk=is_main_process if save_to_disk is None else save_to_disk, + **checkpointables, + ) + self.path_manager = PathManager + self._parsed_url_during_load = None + + def load(self, path, *args, **kwargs): + assert self._parsed_url_during_load is None + need_sync = False + logger = logging.getLogger(__name__) + logger.info("[DetectionCheckpointer] Loading from {} ...".format(path)) + + if path and isinstance(self.model, DistributedDataParallel): + path = self.path_manager.get_local_path(path) + has_file = os.path.isfile(path) + all_has_file = comm.all_gather(has_file) + if not all_has_file[0]: + raise OSError(f"File {path} not found on main worker.") + if not all(all_has_file): + logger.warning( + f"Not all workers can read checkpoint {path}. " + "Training may fail to fully resume." + ) + # TODO: broadcast the checkpoint file contents from main + # worker, and load from it instead. + need_sync = True + if not has_file: + path = None # don't load if not readable + + if path: + parsed_url = urlparse(path) + self._parsed_url_during_load = parsed_url + path = parsed_url._replace(query="").geturl() # remove query from filename + path = self.path_manager.get_local_path(path) + ret = super().load(path, *args, **kwargs) + + if need_sync: + logger.info("Broadcasting model states from main worker ...") + self.model._sync_params_and_buffers() + self._parsed_url_during_load = None # reset to None + return ret + + def _load_file(self, filename): + if filename.endswith(".pkl"): + with PathManager.open(filename, "rb") as f: + data = pickle.load(f, encoding="latin1") + if "model" in data and "__author__" in data: + # file is in Detectron2 model zoo format + self.logger.info("Reading a file from '{}'".format(data["__author__"])) + return data + else: + # assume file is from Caffe2 / Detectron1 model zoo + if "blobs" in data: + # Detection models have "blobs", but ImageNet models don't + data = data["blobs"] + data = {k: v for k, v in data.items() if not k.endswith("_momentum")} + return {"model": data, "__author__": "Caffe2", "matching_heuristics": True} + elif filename.endswith(".pyth"): + # assume file is from pycls; no one else seems to use the ".pyth" extension + with PathManager.open(filename, "rb") as f: + data = torch.load(f) + assert ( + "model_state" in data + ), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'." + model_state = { + k: v + for k, v in data["model_state"].items() + if not k.endswith("num_batches_tracked") + } + return {"model": model_state, "__author__": "pycls", "matching_heuristics": True} + + loaded = self._torch_load(filename) + if "model" not in loaded: + loaded = {"model": loaded} + assert self._parsed_url_during_load is not None, "`_load_file` must be called inside `load`" + parsed_url = self._parsed_url_during_load + queries = parse_qs(parsed_url.query) + if queries.pop("matching_heuristics", "False") == ["True"]: + loaded["matching_heuristics"] = True + if len(queries) > 0: + raise ValueError( + f"Unsupported query remaining: f{queries}, orginal filename: {parsed_url.geturl()}" + ) + return loaded + + def _torch_load(self, f): + return super()._load_file(f) + + def _load_model(self, checkpoint): + if checkpoint.get("matching_heuristics", False): + self._convert_ndarray_to_tensor(checkpoint["model"]) + # convert weights by name-matching heuristics + checkpoint["model"] = align_and_update_state_dicts( + self.model.state_dict(), + checkpoint["model"], + c2_conversion=checkpoint.get("__author__", None) == "Caffe2", + ) + # for non-caffe2 models, use standard ways to load it + incompatible = super()._load_model(checkpoint) + + model_buffers = dict(self.model.named_buffers(recurse=False)) + for k in ["pixel_mean", "pixel_std"]: + # Ignore missing key message about pixel_mean/std. + # Though they may be missing in old checkpoints, they will be correctly + # initialized from config anyway. + if k in model_buffers: + try: + incompatible.missing_keys.remove(k) + except ValueError: + pass + for k in incompatible.unexpected_keys[:]: + # Ignore unexpected keys about cell anchors. They exist in old checkpoints + # but now they are non-persistent buffers and will not be in new checkpoints. + if "anchor_generator.cell_anchors" in k: + incompatible.unexpected_keys.remove(k) + return incompatible diff --git a/data_processing/detectron2/detectron2/config/__init__.py b/data_processing/detectron2/detectron2/config/__init__.py new file mode 100644 index 0000000..4e648e6 --- /dev/null +++ b/data_processing/detectron2/detectron2/config/__init__.py @@ -0,0 +1,24 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .compat import downgrade_config, upgrade_config +from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable +from .instantiate import instantiate +from .lazy import LazyCall, LazyConfig + +__all__ = [ + "CfgNode", + "get_cfg", + "global_cfg", + "set_global_cfg", + "downgrade_config", + "upgrade_config", + "configurable", + "instantiate", + "LazyCall", + "LazyConfig", +] + + +from detectron2.utils.env import fixup_module_metadata + +fixup_module_metadata(__name__, globals(), __all__) +del fixup_module_metadata diff --git a/data_processing/detectron2/detectron2/config/compat.py b/data_processing/detectron2/detectron2/config/compat.py new file mode 100644 index 0000000..11a08c4 --- /dev/null +++ b/data_processing/detectron2/detectron2/config/compat.py @@ -0,0 +1,229 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Backward compatibility of configs. + +Instructions to bump version: ++ It's not needed to bump version if new keys are added. + It's only needed when backward-incompatible changes happen + (i.e., some existing keys disappear, or the meaning of a key changes) ++ To bump version, do the following: + 1. Increment _C.VERSION in defaults.py + 2. Add a converter in this file. + + Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X, + and a function "downgrade" which in-place downgrades config from X to X-1 + + In each function, VERSION is left unchanged. + + Each converter assumes that its input has the relevant keys + (i.e., the input is not a partial config). + 3. Run the tests (test_config.py) to make sure the upgrade & downgrade + functions are consistent. +""" + +import logging +from typing import List, Optional, Tuple + +from .config import CfgNode as CN +from .defaults import _C + +__all__ = ["upgrade_config", "downgrade_config"] + + +def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN: + """ + Upgrade a config from its current version to a newer version. + + Args: + cfg (CfgNode): + to_version (int): defaults to the latest version. + """ + cfg = cfg.clone() + if to_version is None: + to_version = _C.VERSION + + assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format( + cfg.VERSION, to_version + ) + for k in range(cfg.VERSION, to_version): + converter = globals()["ConverterV" + str(k + 1)] + converter.upgrade(cfg) + cfg.VERSION = k + 1 + return cfg + + +def downgrade_config(cfg: CN, to_version: int) -> CN: + """ + Downgrade a config from its current version to an older version. + + Args: + cfg (CfgNode): + to_version (int): + + Note: + A general downgrade of arbitrary configs is not always possible due to the + different functionalities in different versions. + The purpose of downgrade is only to recover the defaults in old versions, + allowing it to load an old partial yaml config. + Therefore, the implementation only needs to fill in the default values + in the old version when a general downgrade is not possible. + """ + cfg = cfg.clone() + assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format( + cfg.VERSION, to_version + ) + for k in range(cfg.VERSION, to_version, -1): + converter = globals()["ConverterV" + str(k)] + converter.downgrade(cfg) + cfg.VERSION = k - 1 + return cfg + + +def guess_version(cfg: CN, filename: str) -> int: + """ + Guess the version of a partial config where the VERSION field is not specified. + Returns the version, or the latest if cannot make a guess. + + This makes it easier for users to migrate. + """ + logger = logging.getLogger(__name__) + + def _has(name: str) -> bool: + cur = cfg + for n in name.split("."): + if n not in cur: + return False + cur = cur[n] + return True + + # Most users' partial configs have "MODEL.WEIGHT", so guess on it + ret = None + if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"): + ret = 1 + + if ret is not None: + logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret)) + else: + ret = _C.VERSION + logger.warning( + "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format( + filename, ret + ) + ) + return ret + + +def _rename(cfg: CN, old: str, new: str) -> None: + old_keys = old.split(".") + new_keys = new.split(".") + + def _set(key_seq: List[str], val: str) -> None: + cur = cfg + for k in key_seq[:-1]: + if k not in cur: + cur[k] = CN() + cur = cur[k] + cur[key_seq[-1]] = val + + def _get(key_seq: List[str]) -> CN: + cur = cfg + for k in key_seq: + cur = cur[k] + return cur + + def _del(key_seq: List[str]) -> None: + cur = cfg + for k in key_seq[:-1]: + cur = cur[k] + del cur[key_seq[-1]] + if len(cur) == 0 and len(key_seq) > 1: + _del(key_seq[:-1]) + + _set(new_keys, _get(old_keys)) + _del(old_keys) + + +class _RenameConverter: + """ + A converter that handles simple rename. + """ + + RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name) + + @classmethod + def upgrade(cls, cfg: CN) -> None: + for old, new in cls.RENAME: + _rename(cfg, old, new) + + @classmethod + def downgrade(cls, cfg: CN) -> None: + for old, new in cls.RENAME[::-1]: + _rename(cfg, new, old) + + +class ConverterV1(_RenameConverter): + RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")] + + +class ConverterV2(_RenameConverter): + """ + A large bulk of rename, before public release. + """ + + RENAME = [ + ("MODEL.WEIGHT", "MODEL.WEIGHTS"), + ("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"), + ( + "MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD", + "MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH", + ), + ( + "MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT", + "MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT", + ), + ( + "MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD", + "MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH", + ), + ("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"), + ("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"), + ("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"), + ("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"), + ("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"), + ("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"), + ("TEST.AUG_ON", "TEST.AUG.ENABLED"), + ("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"), + ("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"), + ("TEST.AUG_FLIP", "TEST.AUG.FLIP"), + ] + + @classmethod + def upgrade(cls, cfg: CN) -> None: + super().upgrade(cfg) + + if cfg.MODEL.META_ARCHITECTURE == "RetinaNet": + _rename( + cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS" + ) + _rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") + del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"] + del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"] + else: + _rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS") + _rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") + del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"] + del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"] + del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"] + + @classmethod + def downgrade(cls, cfg: CN) -> None: + super().downgrade(cfg) + + _rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS") + _rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES") + cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS + cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES + cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version diff --git a/data_processing/detectron2/detectron2/config/config.py b/data_processing/detectron2/detectron2/config/config.py new file mode 100644 index 0000000..49a55b1 --- /dev/null +++ b/data_processing/detectron2/detectron2/config/config.py @@ -0,0 +1,265 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import functools +import inspect +import logging +from fvcore.common.config import CfgNode as _CfgNode + +from detectron2.utils.file_io import PathManager + + +class CfgNode(_CfgNode): + """ + The same as `fvcore.common.config.CfgNode`, but different in: + + 1. Use unsafe yaml loading by default. + Note that this may lead to arbitrary code execution: you must not + load a config file from untrusted sources before manually inspecting + the content of the file. + 2. Support config versioning. + When attempting to merge an old config, it will convert the old config automatically. + + .. automethod:: clone + .. automethod:: freeze + .. automethod:: defrost + .. automethod:: is_frozen + .. automethod:: load_yaml_with_base + .. automethod:: merge_from_list + .. automethod:: merge_from_other_cfg + """ + + @classmethod + def _open_cfg(cls, filename): + return PathManager.open(filename, "r") + + # Note that the default value of allow_unsafe is changed to True + def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: + """ + Load content from the given config file and merge it into self. + + Args: + cfg_filename: config filename + allow_unsafe: allow unsafe yaml syntax + """ + assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" + loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) + loaded_cfg = type(self)(loaded_cfg) + + # defaults.py needs to import CfgNode + from .defaults import _C + + latest_ver = _C.VERSION + assert ( + latest_ver == self.VERSION + ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" + + logger = logging.getLogger(__name__) + + loaded_ver = loaded_cfg.get("VERSION", None) + if loaded_ver is None: + from .compat import guess_version + + loaded_ver = guess_version(loaded_cfg, cfg_filename) + assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( + loaded_ver, self.VERSION + ) + + if loaded_ver == self.VERSION: + self.merge_from_other_cfg(loaded_cfg) + else: + # compat.py needs to import CfgNode + from .compat import upgrade_config, downgrade_config + + logger.warning( + "Loading an old v{} config file '{}' by automatically upgrading to v{}. " + "See docs/CHANGELOG.md for instructions to update your files.".format( + loaded_ver, cfg_filename, self.VERSION + ) + ) + # To convert, first obtain a full config at an old version + old_self = downgrade_config(self, to_version=loaded_ver) + old_self.merge_from_other_cfg(loaded_cfg) + new_config = upgrade_config(old_self) + self.clear() + self.update(new_config) + + def dump(self, *args, **kwargs): + """ + Returns: + str: a yaml string representation of the config + """ + # to make it show up in docs + return super().dump(*args, **kwargs) + + +global_cfg = CfgNode() + + +def get_cfg() -> CfgNode: + """ + Get a copy of the default config. + + Returns: + a detectron2 CfgNode instance. + """ + from .defaults import _C + + return _C.clone() + + +def set_global_cfg(cfg: CfgNode) -> None: + """ + Let the global config point to the given cfg. + + Assume that the given "cfg" has the key "KEY", after calling + `set_global_cfg(cfg)`, the key can be accessed by: + :: + from detectron2.config import global_cfg + print(global_cfg.KEY) + + By using a hacky global config, you can access these configs anywhere, + without having to pass the config object or the values deep into the code. + This is a hacky feature introduced for quick prototyping / research exploration. + """ + global global_cfg + global_cfg.clear() + global_cfg.update(cfg) + + +def configurable(init_func=None, *, from_config=None): + """ + Decorate a function or a class's __init__ method so that it can be called + with a :class:`CfgNode` object using a :func:`from_config` function that translates + :class:`CfgNode` to arguments. + + Examples: + :: + # Usage 1: Decorator on __init__: + class A: + @configurable + def __init__(self, a, b=2, c=3): + pass + + @classmethod + def from_config(cls, cfg): # 'cfg' must be the first argument + # Returns kwargs to be passed to __init__ + return {"a": cfg.A, "b": cfg.B} + + a1 = A(a=1, b=2) # regular construction + a2 = A(cfg) # construct with a cfg + a3 = A(cfg, b=3, c=4) # construct with extra overwrite + + # Usage 2: Decorator on any function. Needs an extra from_config argument: + @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) + def a_func(a, b=2, c=3): + pass + + a1 = a_func(a=1, b=2) # regular call + a2 = a_func(cfg) # call with a cfg + a3 = a_func(cfg, b=3, c=4) # call with extra overwrite + + Args: + init_func (callable): a class's ``__init__`` method in usage 1. The + class must have a ``from_config`` classmethod which takes `cfg` as + the first argument. + from_config (callable): the from_config function in usage 2. It must take `cfg` + as its first argument. + """ + + if init_func is not None: + assert ( + inspect.isfunction(init_func) + and from_config is None + and init_func.__name__ == "__init__" + ), "Incorrect use of @configurable. Check API documentation for examples." + + @functools.wraps(init_func) + def wrapped(self, *args, **kwargs): + try: + from_config_func = type(self).from_config + except AttributeError as e: + raise AttributeError( + "Class with @configurable must have a 'from_config' classmethod." + ) from e + if not inspect.ismethod(from_config_func): + raise TypeError("Class with @configurable must have a 'from_config' classmethod.") + + if _called_with_cfg(*args, **kwargs): + explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) + init_func(self, **explicit_args) + else: + init_func(self, *args, **kwargs) + + return wrapped + + else: + if from_config is None: + return configurable # @configurable() is made equivalent to @configurable + assert inspect.isfunction( + from_config + ), "from_config argument of configurable must be a function!" + + def wrapper(orig_func): + @functools.wraps(orig_func) + def wrapped(*args, **kwargs): + if _called_with_cfg(*args, **kwargs): + explicit_args = _get_args_from_config(from_config, *args, **kwargs) + return orig_func(**explicit_args) + else: + return orig_func(*args, **kwargs) + + wrapped.from_config = from_config + return wrapped + + return wrapper + + +def _get_args_from_config(from_config_func, *args, **kwargs): + """ + Use `from_config` to obtain explicit arguments. + + Returns: + dict: arguments to be used for cls.__init__ + """ + signature = inspect.signature(from_config_func) + if list(signature.parameters.keys())[0] != "cfg": + if inspect.isfunction(from_config_func): + name = from_config_func.__name__ + else: + name = f"{from_config_func.__self__}.from_config" + raise TypeError(f"{name} must take 'cfg' as the first argument!") + support_var_arg = any( + param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] + for param in signature.parameters.values() + ) + if support_var_arg: # forward all arguments to from_config, if from_config accepts them + ret = from_config_func(*args, **kwargs) + else: + # forward supported arguments to from_config + supported_arg_names = set(signature.parameters.keys()) + extra_kwargs = {} + for name in list(kwargs.keys()): + if name not in supported_arg_names: + extra_kwargs[name] = kwargs.pop(name) + ret = from_config_func(*args, **kwargs) + # forward the other arguments to __init__ + ret.update(extra_kwargs) + return ret + + +def _called_with_cfg(*args, **kwargs): + """ + Returns: + bool: whether the arguments contain CfgNode and should be considered + forwarded to from_config. + """ + from omegaconf import DictConfig + + if len(args) and isinstance(args[0], (_CfgNode, DictConfig)): + return True + if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)): + return True + # `from_config`'s first argument is forced to be "cfg". + # So the above check covers all cases. + return False diff --git a/data_processing/detectron2/detectron2/config/defaults.py b/data_processing/detectron2/detectron2/config/defaults.py new file mode 100644 index 0000000..bd2a5f6 --- /dev/null +++ b/data_processing/detectron2/detectron2/config/defaults.py @@ -0,0 +1,650 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .config import CfgNode as CN + +# NOTE: given the new config system +# (https://detectron2.readthedocs.io/en/latest/tutorials/lazyconfigs.html), +# we will stop adding new functionalities to default CfgNode. + +# ----------------------------------------------------------------------------- +# Convention about Training / Test specific parameters +# ----------------------------------------------------------------------------- +# Whenever an argument can be either used for training or for testing, the +# corresponding name will be post-fixed by a _TRAIN for a training parameter, +# or _TEST for a test-specific parameter. +# For example, the number of images during training will be +# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be +# IMAGES_PER_BATCH_TEST + +# ----------------------------------------------------------------------------- +# Config definition +# ----------------------------------------------------------------------------- + +_C = CN() + +# The version number, to upgrade from old configs to new ones if any +# changes happen. It's recommended to keep a VERSION in your config file. +_C.VERSION = 2 + +_C.MODEL = CN() +_C.MODEL.LOAD_PROPOSALS = False +_C.MODEL.MASK_ON = False +_C.MODEL.KEYPOINT_ON = False +_C.MODEL.DEVICE = "cuda" +_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN" + +# Path (a file path, or URL like detectron2://.., https://..) to a checkpoint file +# to be loaded to the model. You can find available models in the model zoo. +_C.MODEL.WEIGHTS = "" + +# Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR). +# To train on images of different number of channels, just set different mean & std. +# Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675] +_C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] +# When using pre-trained models in Detectron1 or any MSRA models, +# std has been absorbed into its conv1 weights, so the std needs to be set 1. +# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) +_C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] + + +# ----------------------------------------------------------------------------- +# INPUT +# ----------------------------------------------------------------------------- +_C.INPUT = CN() +# By default, {MIN,MAX}_SIZE options are used in transforms.ResizeShortestEdge. +# Please refer to ResizeShortestEdge for detailed definition. +# Size of the smallest side of the image during training +_C.INPUT.MIN_SIZE_TRAIN = (800,) +# Sample size of smallest side by choice or random selection from range give by +# INPUT.MIN_SIZE_TRAIN +_C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice" +# Maximum size of the side of the image during training +_C.INPUT.MAX_SIZE_TRAIN = 1333 +# Size of the smallest side of the image during testing. Set to zero to disable resize in testing. +_C.INPUT.MIN_SIZE_TEST = 800 +# Maximum size of the side of the image during testing +_C.INPUT.MAX_SIZE_TEST = 1333 +# Mode for flipping images used in data augmentation during training +# choose one of ["horizontal, "vertical", "none"] +_C.INPUT.RANDOM_FLIP = "horizontal" + +# `True` if cropping is used for data augmentation during training +_C.INPUT.CROP = CN({"ENABLED": False}) +# Cropping type. See documentation of `detectron2.data.transforms.RandomCrop` for explanation. +_C.INPUT.CROP.TYPE = "relative_range" +# Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of +# pixels if CROP.TYPE is "absolute" +_C.INPUT.CROP.SIZE = [0.9, 0.9] + + +# Whether the model needs RGB, YUV, HSV etc. +# Should be one of the modes defined here, as we use PIL to read the image: +# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes +# with BGR being the one exception. One can set image format to BGR, we will +# internally use RGB for conversion and flip the channels over +_C.INPUT.FORMAT = "BGR" +# The ground truth mask format that the model will use. +# Mask R-CNN supports either "polygon" or "bitmask" as ground truth. +_C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask" + + +# ----------------------------------------------------------------------------- +# Dataset +# ----------------------------------------------------------------------------- +_C.DATASETS = CN() +# List of the dataset names for training. Must be registered in DatasetCatalog +# Samples from these datasets will be merged and used as one dataset. +_C.DATASETS.TRAIN = () +# List of the pre-computed proposal files for training, which must be consistent +# with datasets listed in DATASETS.TRAIN. +_C.DATASETS.PROPOSAL_FILES_TRAIN = () +# Number of top scoring precomputed proposals to keep for training +_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000 +# List of the dataset names for testing. Must be registered in DatasetCatalog +_C.DATASETS.TEST = () +# List of the pre-computed proposal files for test, which must be consistent +# with datasets listed in DATASETS.TEST. +_C.DATASETS.PROPOSAL_FILES_TEST = () +# Number of top scoring precomputed proposals to keep for test +_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000 + +# ----------------------------------------------------------------------------- +# DataLoader +# ----------------------------------------------------------------------------- +_C.DATALOADER = CN() +# Number of data loading threads +_C.DATALOADER.NUM_WORKERS = 4 +# If True, each batch should contain only images for which the aspect ratio +# is compatible. This groups portrait images together, and landscape images +# are not batched with portrait images. +_C.DATALOADER.ASPECT_RATIO_GROUPING = True +# Options: TrainingSampler, RepeatFactorTrainingSampler +_C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler" +# Repeat threshold for RepeatFactorTrainingSampler +_C.DATALOADER.REPEAT_THRESHOLD = 0.0 +# Tf True, when working on datasets that have instance annotations, the +# training dataloader will filter out images without associated annotations +_C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True + +# ---------------------------------------------------------------------------- # +# Backbone options +# ---------------------------------------------------------------------------- # +_C.MODEL.BACKBONE = CN() + +_C.MODEL.BACKBONE.NAME = "build_resnet_backbone" +# Freeze the first several stages so they are not trained. +# There are 5 stages in ResNet. The first is a convolution, and the following +# stages are each group of residual blocks. +_C.MODEL.BACKBONE.FREEZE_AT = 2 + + +# ---------------------------------------------------------------------------- # +# FPN options +# ---------------------------------------------------------------------------- # +_C.MODEL.FPN = CN() +# Names of the input feature maps to be used by FPN +# They must have contiguous power of 2 strides +# e.g., ["res2", "res3", "res4", "res5"] +_C.MODEL.FPN.IN_FEATURES = [] +_C.MODEL.FPN.OUT_CHANNELS = 256 + +# Options: "" (no norm), "GN" +_C.MODEL.FPN.NORM = "" + +# Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg" +_C.MODEL.FPN.FUSE_TYPE = "sum" + + +# ---------------------------------------------------------------------------- # +# Proposal generator options +# ---------------------------------------------------------------------------- # +_C.MODEL.PROPOSAL_GENERATOR = CN() +# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals" +_C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN" +# Proposal height and width both need to be greater than MIN_SIZE +# (a the scale used during training or inference) +_C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0 + + +# ---------------------------------------------------------------------------- # +# Anchor generator options +# ---------------------------------------------------------------------------- # +_C.MODEL.ANCHOR_GENERATOR = CN() +# The generator can be any name in the ANCHOR_GENERATOR registry +_C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator" +# Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input. +# Format: list[list[float]]. SIZES[i] specifies the list of sizes to use for +# IN_FEATURES[i]; len(SIZES) must be equal to len(IN_FEATURES) or 1. +# When len(SIZES) == 1, SIZES[0] is used for all IN_FEATURES. +_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]] +# Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect +# ratios are generated by an anchor generator. +# Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W) +# to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true, +# or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used +# for all IN_FEATURES. +_C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] +# Anchor angles. +# list[list[float]], the angle in degrees, for each input feature map. +# ANGLES[i] specifies the list of angles for IN_FEATURES[i]. +_C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]] +# Relative offset between the center of the first anchor and the top-left corner of the image +# Value has to be in [0, 1). Recommend to use 0.5, which means half stride. +# The value is not expected to affect model accuracy. +_C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0 + +# ---------------------------------------------------------------------------- # +# RPN options +# ---------------------------------------------------------------------------- # +_C.MODEL.RPN = CN() +_C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY + +# Names of the input feature maps to be used by RPN +# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN +_C.MODEL.RPN.IN_FEATURES = ["res4"] +# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels +# Set to -1 or a large value, e.g. 100000, to disable pruning anchors +_C.MODEL.RPN.BOUNDARY_THRESH = -1 +# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD] +# Minimum overlap required between an anchor and ground-truth box for the +# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD +# ==> positive RPN example: 1) +# Maximum overlap allowed between an anchor and ground-truth box for the +# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD +# ==> negative RPN example: 0) +# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD) +# are ignored (-1) +_C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7] +_C.MODEL.RPN.IOU_LABELS = [0, -1, 1] +# Number of regions per image used to train RPN +_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256 +# Target fraction of foreground (positive) examples per RPN minibatch +_C.MODEL.RPN.POSITIVE_FRACTION = 0.5 +# Options are: "smooth_l1", "giou", "diou", "ciou" +_C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1" +_C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0 +# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets +_C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) +# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. +_C.MODEL.RPN.SMOOTH_L1_BETA = 0.0 +_C.MODEL.RPN.LOSS_WEIGHT = 1.0 +# Number of top scoring RPN proposals to keep before applying NMS +# When FPN is used, this is *per FPN level* (not total) +_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000 +_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000 +# Number of top scoring RPN proposals to keep after applying NMS +# When FPN is used, this limit is applied per level and then again to the union +# of proposals from all levels +# NOTE: When FPN is used, the meaning of this config is different from Detectron1. +# It means per-batch topk in Detectron1, but per-image topk here. +# See the "find_top_rpn_proposals" function for details. +_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000 +_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000 +# NMS threshold used on RPN proposals +_C.MODEL.RPN.NMS_THRESH = 0.7 +# Set this to -1 to use the same number of output channels as input channels. +_C.MODEL.RPN.CONV_DIMS = [-1] + +# ---------------------------------------------------------------------------- # +# ROI HEADS options +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_HEADS = CN() +_C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads" +# Number of foreground classes +_C.MODEL.ROI_HEADS.NUM_CLASSES = 80 +# Names of the input feature maps to be used by ROI heads +# Currently all heads (box, mask, ...) use the same input feature map list +# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN +_C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"] +# IOU overlap ratios [IOU_THRESHOLD] +# Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD) +# Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD) +_C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5] +_C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1] +# RoI minibatch size *per image* (number of regions of interest [ROIs]) during training +# Total number of RoIs per training minibatch = +# ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH +# E.g., a common configuration is: 512 * 16 = 8192 +_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 +# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0) +_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25 + +# Only used on test mode + +# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to +# balance obtaining high recall with not having too many low precision +# detections that will slow down inference post processing steps (like NMS) +# A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down +# inference. +_C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05 +# Overlap threshold used for non-maximum suppression (suppress boxes with +# IoU >= this threshold) +_C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5 +# If True, augment proposals with ground-truth boxes before sampling proposals to +# train ROI heads. +_C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True + +# ---------------------------------------------------------------------------- # +# Box Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_BOX_HEAD = CN() +# C4 don't use head name option +# Options for non-C4 models: FastRCNNConvFCHead, +_C.MODEL.ROI_BOX_HEAD.NAME = "" +# Options are: "smooth_l1", "giou", "diou", "ciou" +_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1" +# The final scaling coefficient on the box regression loss, used to balance the magnitude of its +# gradients with other losses in the model. See also `MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT`. +_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0 +# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets +# These are empirically chosen to approximately lead to unit variance targets +_C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0) +# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. +_C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0 +_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0 +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" + +_C.MODEL.ROI_BOX_HEAD.NUM_FC = 0 +# Hidden layer dimension for FC layers in the RoI box head +_C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024 +_C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0 +# Channel dimension for Conv layers in the RoI box head +_C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256 +# Normalization method for the convolution layers. +# Options: "" (no norm), "GN", "SyncBN". +_C.MODEL.ROI_BOX_HEAD.NORM = "" +# Whether to use class agnostic for bbox regression +_C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False +# If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes. +_C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False + +# Federated loss can be used to improve the training of LVIS +_C.MODEL.ROI_BOX_HEAD.USE_FED_LOSS = False +# Sigmoid cross entrophy is used with federated loss +_C.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE = False +# The power value applied to image_count when calcualting frequency weight +_C.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER = 0.5 +# Number of classes to keep in total +_C.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES = 50 + +# ---------------------------------------------------------------------------- # +# Cascaded Box Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_BOX_CASCADE_HEAD = CN() +# The number of cascade stages is implicitly defined by the length of the following two configs. +_C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = ( + (10.0, 10.0, 5.0, 5.0), + (20.0, 20.0, 10.0, 10.0), + (30.0, 30.0, 15.0, 15.0), +) +_C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7) + + +# ---------------------------------------------------------------------------- # +# Mask Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_MASK_HEAD = CN() +_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead" +_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0 +_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head +_C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256 +# Normalization method for the convolution layers. +# Options: "" (no norm), "GN", "SyncBN". +_C.MODEL.ROI_MASK_HEAD.NORM = "" +# Whether to use class agnostic for mask prediction +_C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2" + + +# ---------------------------------------------------------------------------- # +# Keypoint Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_KEYPOINT_HEAD = CN() +_C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead" +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0 +_C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8)) +_C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO. + +# Images with too few (or no) keypoints are excluded from training. +_C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1 +# Normalize by the total number of visible keypoints in the minibatch if True. +# Otherwise, normalize by the total number of keypoints that could ever exist +# in the minibatch. +# The keypoint softmax loss is only calculated on visible keypoints. +# Since the number of visible keypoints can vary significantly between +# minibatches, this has the effect of up-weighting the importance of +# minibatches with few visible keypoints. (Imagine the extreme case of +# only one visible keypoint versus N: in the case of N, each one +# contributes 1/N to the gradient compared to the single keypoint +# determining the gradient direction). Instead, we can normalize the +# loss by the total number of keypoints, if it were the case that all +# keypoints were visible in a full minibatch. (Returning to the example, +# this means that the one visible keypoint contributes as much as each +# of the N keypoints.) +_C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True +# Multi-task loss weight to use for keypoints +# Recommended values: +# - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True +# - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False +_C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0 +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2" + +# ---------------------------------------------------------------------------- # +# Semantic Segmentation Head +# ---------------------------------------------------------------------------- # +_C.MODEL.SEM_SEG_HEAD = CN() +_C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead" +_C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"] +# Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for +# the correposnding pixel. +_C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255 +# Number of classes in the semantic segmentation head +_C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54 +# Number of channels in the 3x3 convs inside semantic-FPN heads. +_C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128 +# Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride. +_C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4 +# Normalization method for the convolution layers. Options: "" (no norm), "GN". +_C.MODEL.SEM_SEG_HEAD.NORM = "GN" +_C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0 + +_C.MODEL.PANOPTIC_FPN = CN() +# Scaling of all losses from instance detection / segmentation head. +_C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0 + +# options when combining instance & semantic segmentation outputs +_C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) # "COMBINE.ENABLED" is deprecated & not used +_C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5 +_C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096 +_C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5 + + +# ---------------------------------------------------------------------------- # +# RetinaNet Head +# ---------------------------------------------------------------------------- # +_C.MODEL.RETINANET = CN() + +# This is the number of foreground classes. +_C.MODEL.RETINANET.NUM_CLASSES = 80 + +_C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"] + +# Convolutions to use in the cls and bbox tower +# NOTE: this doesn't include the last conv for logits +_C.MODEL.RETINANET.NUM_CONVS = 4 + +# IoU overlap ratio [bg, fg] for labeling anchors. +# Anchors with < bg are labeled negative (0) +# Anchors with >= bg and < fg are ignored (-1) +# Anchors with >= fg are labeled positive (1) +_C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5] +_C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1] + +# Prior prob for rare case (i.e. foreground) at the beginning of training. +# This is used to set the bias for the logits layer of the classifier subnet. +# This improves training stability in the case of heavy class imbalance. +_C.MODEL.RETINANET.PRIOR_PROB = 0.01 + +# Inference cls score threshold, only anchors with score > INFERENCE_TH are +# considered for inference (to improve speed) +_C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05 +# Select topk candidates before NMS +_C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000 +_C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5 + +# Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets +_C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) + +# Loss parameters +_C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0 +_C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25 +_C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1 +# Options are: "smooth_l1", "giou", "diou", "ciou" +_C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1" + +# One of BN, SyncBN, FrozenBN, GN +# Only supports GN until unshared norm is implemented +_C.MODEL.RETINANET.NORM = "" + + +# ---------------------------------------------------------------------------- # +# ResNe[X]t options (ResNets = {ResNet, ResNeXt} +# Note that parts of a resnet may be used for both the backbone and the head +# These options apply to both +# ---------------------------------------------------------------------------- # +_C.MODEL.RESNETS = CN() + +_C.MODEL.RESNETS.DEPTH = 50 +_C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone + +# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt +_C.MODEL.RESNETS.NUM_GROUPS = 1 + +# Options: FrozenBN, GN, "SyncBN", "BN" +_C.MODEL.RESNETS.NORM = "FrozenBN" + +# Baseline width of each group. +# Scaling this parameters will scale the width of all bottleneck layers. +_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64 + +# Place the stride 2 conv on the 1x1 filter +# Use True only for the original MSRA ResNet; use False for C2 and Torch models +_C.MODEL.RESNETS.STRIDE_IN_1X1 = True + +# Apply dilation in stage "res5" +_C.MODEL.RESNETS.RES5_DILATION = 1 + +# Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet +# For R18 and R34, this needs to be set to 64 +_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256 +_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64 + +# Apply Deformable Convolution in stages +# Specify if apply deform_conv on Res2, Res3, Res4, Res5 +_C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False] +# Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168); +# Use False for DeformableV1. +_C.MODEL.RESNETS.DEFORM_MODULATED = False +# Number of groups in deformable conv. +_C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1 + + +# ---------------------------------------------------------------------------- # +# Solver +# ---------------------------------------------------------------------------- # +_C.SOLVER = CN() + +# Options: WarmupMultiStepLR, WarmupCosineLR. +# See detectron2/solver/build.py for definition. +_C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR" + +_C.SOLVER.MAX_ITER = 40000 + +_C.SOLVER.BASE_LR = 0.001 +# The end lr, only used by WarmupCosineLR +_C.SOLVER.BASE_LR_END = 0.0 + +_C.SOLVER.MOMENTUM = 0.9 + +_C.SOLVER.NESTEROV = False + +_C.SOLVER.WEIGHT_DECAY = 0.0001 +# The weight decay that's applied to parameters of normalization layers +# (typically the affine transformation) +_C.SOLVER.WEIGHT_DECAY_NORM = 0.0 + +_C.SOLVER.GAMMA = 0.1 +# The iteration number to decrease learning rate by GAMMA. +_C.SOLVER.STEPS = (30000,) +# Number of decays in WarmupStepWithFixedGammaLR schedule +_C.SOLVER.NUM_DECAYS = 3 + +_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000 +_C.SOLVER.WARMUP_ITERS = 1000 +_C.SOLVER.WARMUP_METHOD = "linear" +# Whether to rescale the interval for the learning schedule after warmup +_C.SOLVER.RESCALE_INTERVAL = False + +# Save a checkpoint after every this number of iterations +_C.SOLVER.CHECKPOINT_PERIOD = 5000 + +# Number of images per batch across all machines. This is also the number +# of training images per step (i.e. per iteration). If we use 16 GPUs +# and IMS_PER_BATCH = 32, each GPU will see 2 images per batch. +# May be adjusted automatically if REFERENCE_WORLD_SIZE is set. +_C.SOLVER.IMS_PER_BATCH = 16 + +# The reference number of workers (GPUs) this config is meant to train with. +# It takes no effect when set to 0. +# With a non-zero value, it will be used by DefaultTrainer to compute a desired +# per-worker batch size, and then scale the other related configs (total batch size, +# learning rate, etc) to match the per-worker batch size. +# See documentation of `DefaultTrainer.auto_scale_workers` for details: +_C.SOLVER.REFERENCE_WORLD_SIZE = 0 + +# Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for +# biases. This is not useful (at least for recent models). You should avoid +# changing these and they exist only to reproduce Detectron v1 training if +# desired. +_C.SOLVER.BIAS_LR_FACTOR = 1.0 +_C.SOLVER.WEIGHT_DECAY_BIAS = None # None means following WEIGHT_DECAY + +# Gradient clipping +_C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False}) +# Type of gradient clipping, currently 2 values are supported: +# - "value": the absolute values of elements of each gradients are clipped +# - "norm": the norm of the gradient for each parameter is clipped thus +# affecting all elements in the parameter +_C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value" +# Maximum absolute value used for clipping gradients +_C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0 +# Floating point number p for L-p norm to be used with the "norm" +# gradient clipping type; for L-inf, please specify .inf +_C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0 + +# Enable automatic mixed precision for training +# Note that this does not change model's inference behavior. +# To use AMP in inference, run inference under autocast() +_C.SOLVER.AMP = CN({"ENABLED": False}) + +# ---------------------------------------------------------------------------- # +# Specific test options +# ---------------------------------------------------------------------------- # +_C.TEST = CN() +# For end-to-end tests to verify the expected accuracy. +# Each item is [task, metric, value, tolerance] +# e.g.: [['bbox', 'AP', 38.5, 0.2]] +_C.TEST.EXPECTED_RESULTS = [] +# The period (in terms of steps) to evaluate the model during training. +# Set to 0 to disable. +_C.TEST.EVAL_PERIOD = 0 +# The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval +# When empty, it will use the defaults in COCO. +# Otherwise it should be a list[float] with the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. +_C.TEST.KEYPOINT_OKS_SIGMAS = [] +# Maximum number of detections to return per image during inference (100 is +# based on the limit established for the COCO dataset). +_C.TEST.DETECTIONS_PER_IMAGE = 100 + +_C.TEST.AUG = CN({"ENABLED": False}) +_C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) +_C.TEST.AUG.MAX_SIZE = 4000 +_C.TEST.AUG.FLIP = True + +_C.TEST.PRECISE_BN = CN({"ENABLED": False}) +_C.TEST.PRECISE_BN.NUM_ITER = 200 + +# ---------------------------------------------------------------------------- # +# Misc options +# ---------------------------------------------------------------------------- # +# Directory where output files are written +_C.OUTPUT_DIR = "./output" +# Set seed to negative to fully randomize everything. +# Set seed to positive to use a fixed seed. Note that a fixed seed increases +# reproducibility but does not guarantee fully deterministic behavior. +# Disabling all parallelism further increases reproducibility. +_C.SEED = -1 +# Benchmark different cudnn algorithms. +# If input images have very different sizes, this option will have large overhead +# for about 10k iterations. It usually hurts total time, but can benefit for certain models. +# If input images have the same or similar sizes, benchmark is often helpful. +_C.CUDNN_BENCHMARK = False +# The period (in terms of steps) for minibatch visualization at train time. +# Set to 0 to disable. +_C.VIS_PERIOD = 0 + +# global config is for quick hack purposes. +# You can set them in command line or config files, +# and access it with: +# +# from detectron2.config import global_cfg +# print(global_cfg.HACK) +# +# Do not commit any configs into it. +_C.GLOBAL = CN() +_C.GLOBAL.HACK = 1.0 diff --git a/data_processing/detectron2/detectron2/config/instantiate.py b/data_processing/detectron2/detectron2/config/instantiate.py new file mode 100644 index 0000000..05ee2c7 --- /dev/null +++ b/data_processing/detectron2/detectron2/config/instantiate.py @@ -0,0 +1,88 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import collections.abc as abc +import dataclasses +import logging +from typing import Any + +from detectron2.utils.registry import _convert_target_to_string, locate + +__all__ = ["dump_dataclass", "instantiate"] + + +def dump_dataclass(obj: Any): + """ + Dump a dataclass recursively into a dict that can be later instantiated. + + Args: + obj: a dataclass object + + Returns: + dict + """ + assert dataclasses.is_dataclass(obj) and not isinstance( + obj, type + ), "dump_dataclass() requires an instance of a dataclass." + ret = {"_target_": _convert_target_to_string(type(obj))} + for f in dataclasses.fields(obj): + v = getattr(obj, f.name) + if dataclasses.is_dataclass(v): + v = dump_dataclass(v) + if isinstance(v, (list, tuple)): + v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] + ret[f.name] = v + return ret + + +def instantiate(cfg): + """ + Recursively instantiate objects defined in dictionaries by + "_target_" and arguments. + + Args: + cfg: a dict-like object with "_target_" that defines the caller, and + other keys that define the arguments + + Returns: + object instantiated by cfg + """ + from omegaconf import ListConfig, DictConfig, OmegaConf + + if isinstance(cfg, ListConfig): + lst = [instantiate(x) for x in cfg] + return ListConfig(lst, flags={"allow_objects": True}) + if isinstance(cfg, list): + # Specialize for list, because many classes take + # list[objects] as arguments, such as ResNet, DatasetMapper + return [instantiate(x) for x in cfg] + + # If input is a DictConfig backed by dataclasses (i.e. omegaconf's structured config), + # instantiate it to the actual dataclass. + if isinstance(cfg, DictConfig) and dataclasses.is_dataclass(cfg._metadata.object_type): + return OmegaConf.to_object(cfg) + + if isinstance(cfg, abc.Mapping) and "_target_" in cfg: + # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, + # but faster: https://github.com/facebookresearch/hydra/issues/1200 + cfg = {k: instantiate(v) for k, v in cfg.items()} + cls = cfg.pop("_target_") + cls = instantiate(cls) + + if isinstance(cls, str): + cls_name = cls + cls = locate(cls_name) + assert cls is not None, cls_name + else: + try: + cls_name = cls.__module__ + "." + cls.__qualname__ + except Exception: + # target could be anything, so the above could fail + cls_name = str(cls) + assert callable(cls), f"_target_ {cls} does not define a callable object" + try: + return cls(**cfg) + except TypeError: + logger = logging.getLogger(__name__) + logger.error(f"Error when instantiating {cls_name}!") + raise + return cfg # return as-is if don't know what to do diff --git a/data_processing/detectron2/detectron2/config/lazy.py b/data_processing/detectron2/detectron2/config/lazy.py new file mode 100644 index 0000000..ea93e86 --- /dev/null +++ b/data_processing/detectron2/detectron2/config/lazy.py @@ -0,0 +1,436 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import ast +import builtins +import collections.abc as abc +import importlib +import inspect +import logging +import os +import uuid +from contextlib import contextmanager +from copy import deepcopy +from dataclasses import is_dataclass +from typing import List, Tuple, Union +import cloudpickle +import yaml +from omegaconf import DictConfig, ListConfig, OmegaConf, SCMode + +from detectron2.utils.file_io import PathManager +from detectron2.utils.registry import _convert_target_to_string + +__all__ = ["LazyCall", "LazyConfig"] + + +class LazyCall: + """ + Wrap a callable so that when it's called, the call will not be executed, + but returns a dict that describes the call. + + LazyCall object has to be called with only keyword arguments. Positional + arguments are not yet supported. + + Examples: + :: + from detectron2.config import instantiate, LazyCall + + layer_cfg = LazyCall(nn.Conv2d)(in_channels=32, out_channels=32) + layer_cfg.out_channels = 64 # can edit it afterwards + layer = instantiate(layer_cfg) + """ + + def __init__(self, target): + if not (callable(target) or isinstance(target, (str, abc.Mapping))): + raise TypeError( + f"target of LazyCall must be a callable or defines a callable! Got {target}" + ) + self._target = target + + def __call__(self, **kwargs): + if is_dataclass(self._target): + # omegaconf object cannot hold dataclass type + # https://github.com/omry/omegaconf/issues/784 + target = _convert_target_to_string(self._target) + else: + target = self._target + kwargs["_target_"] = target + + return DictConfig(content=kwargs, flags={"allow_objects": True}) + + +def _visit_dict_config(cfg, func): + """ + Apply func recursively to all DictConfig in cfg. + """ + if isinstance(cfg, DictConfig): + func(cfg) + for v in cfg.values(): + _visit_dict_config(v, func) + elif isinstance(cfg, ListConfig): + for v in cfg: + _visit_dict_config(v, func) + + +def _validate_py_syntax(filename): + # see also https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py + with PathManager.open(filename, "r") as f: + content = f.read() + try: + ast.parse(content) + except SyntaxError as e: + raise SyntaxError(f"Config file {filename} has syntax error!") from e + + +def _cast_to_config(obj): + # if given a dict, return DictConfig instead + if isinstance(obj, dict): + return DictConfig(obj, flags={"allow_objects": True}) + return obj + + +_CFG_PACKAGE_NAME = "detectron2._cfg_loader" +""" +A namespace to put all imported config into. +""" + + +def _random_package_name(filename): + # generate a random package name when loading config files + return _CFG_PACKAGE_NAME + str(uuid.uuid4())[:4] + "." + os.path.basename(filename) + + +@contextmanager +def _patch_import(): + """ + Enhance relative import statements in config files, so that they: + 1. locate files purely based on relative location, regardless of packages. + e.g. you can import file without having __init__ + 2. do not cache modules globally; modifications of module states has no side effect + 3. support other storage system through PathManager, so config files can be in the cloud + 4. imported dict are turned into omegaconf.DictConfig automatically + """ + old_import = builtins.__import__ + + def find_relative_file(original_file, relative_import_path, level): + # NOTE: "from . import x" is not handled. Because then it's unclear + # if such import should produce `x` as a python module or DictConfig. + # This can be discussed further if needed. + relative_import_err = """ +Relative import of directories is not allowed within config files. +Within a config file, relative import can only import other config files. +""".replace( + "\n", " " + ) + if not len(relative_import_path): + raise ImportError(relative_import_err) + + cur_file = os.path.dirname(original_file) + for _ in range(level - 1): + cur_file = os.path.dirname(cur_file) + cur_name = relative_import_path.lstrip(".") + for part in cur_name.split("."): + cur_file = os.path.join(cur_file, part) + if not cur_file.endswith(".py"): + cur_file += ".py" + if not PathManager.isfile(cur_file): + cur_file_no_suffix = cur_file[: -len(".py")] + if PathManager.isdir(cur_file_no_suffix): + raise ImportError(f"Cannot import from {cur_file_no_suffix}." + relative_import_err) + else: + raise ImportError( + f"Cannot import name {relative_import_path} from " + f"{original_file}: {cur_file} does not exist." + ) + return cur_file + + def new_import(name, globals=None, locals=None, fromlist=(), level=0): + if ( + # Only deal with relative imports inside config files + level != 0 + and globals is not None + and (globals.get("__package__", "") or "").startswith(_CFG_PACKAGE_NAME) + ): + cur_file = find_relative_file(globals["__file__"], name, level) + _validate_py_syntax(cur_file) + spec = importlib.machinery.ModuleSpec( + _random_package_name(cur_file), None, origin=cur_file + ) + module = importlib.util.module_from_spec(spec) + module.__file__ = cur_file + with PathManager.open(cur_file) as f: + content = f.read() + exec(compile(content, cur_file, "exec"), module.__dict__) + for name in fromlist: # turn imported dict into DictConfig automatically + val = _cast_to_config(module.__dict__[name]) + module.__dict__[name] = val + return module + return old_import(name, globals, locals, fromlist=fromlist, level=level) + + builtins.__import__ = new_import + yield new_import + builtins.__import__ = old_import + + +class LazyConfig: + """ + Provide methods to save, load, and overrides an omegaconf config object + which may contain definition of lazily-constructed objects. + """ + + @staticmethod + def load_rel(filename: str, keys: Union[None, str, Tuple[str, ...]] = None): + """ + Similar to :meth:`load()`, but load path relative to the caller's + source file. + + This has the same functionality as a relative import, except that this method + accepts filename as a string, so more characters are allowed in the filename. + """ + caller_frame = inspect.stack()[1] + caller_fname = caller_frame[0].f_code.co_filename + assert caller_fname != "", "load_rel Unable to find caller" + caller_dir = os.path.dirname(caller_fname) + filename = os.path.join(caller_dir, filename) + return LazyConfig.load(filename, keys) + + @staticmethod + def load(filename: str, keys: Union[None, str, Tuple[str, ...]] = None): + """ + Load a config file. + + Args: + filename: absolute path or relative path w.r.t. the current working directory + keys: keys to load and return. If not given, return all keys + (whose values are config objects) in a dict. + """ + has_keys = keys is not None + filename = filename.replace("/./", "/") # redundant + if os.path.splitext(filename)[1] not in [".py", ".yaml", ".yml"]: + raise ValueError(f"Config file {filename} has to be a python or yaml file.") + if filename.endswith(".py"): + _validate_py_syntax(filename) + + with _patch_import(): + # Record the filename + module_namespace = { + "__file__": filename, + "__package__": _random_package_name(filename), + } + with PathManager.open(filename) as f: + content = f.read() + # Compile first with filename to: + # 1. make filename appears in stacktrace + # 2. make load_rel able to find its parent's (possibly remote) location + exec(compile(content, filename, "exec"), module_namespace) + + ret = module_namespace + else: + with PathManager.open(filename) as f: + obj = yaml.unsafe_load(f) + ret = OmegaConf.create(obj, flags={"allow_objects": True}) + + if has_keys: + if isinstance(keys, str): + return _cast_to_config(ret[keys]) + else: + return tuple(_cast_to_config(ret[a]) for a in keys) + else: + if filename.endswith(".py"): + # when not specified, only load those that are config objects + ret = DictConfig( + { + name: _cast_to_config(value) + for name, value in ret.items() + if isinstance(value, (DictConfig, ListConfig, dict)) + and not name.startswith("_") + }, + flags={"allow_objects": True}, + ) + return ret + + @staticmethod + def save(cfg, filename: str): + """ + Save a config object to a yaml file. + Note that when the config dictionary contains complex objects (e.g. lambda), + it can't be saved to yaml. In that case we will print an error and + attempt to save to a pkl file instead. + + Args: + cfg: an omegaconf config object + filename: yaml file name to save the config file + """ + logger = logging.getLogger(__name__) + try: + cfg = deepcopy(cfg) + except Exception: + pass + else: + # if it's deep-copyable, then... + def _replace_type_by_name(x): + if "_target_" in x and callable(x._target_): + try: + x._target_ = _convert_target_to_string(x._target_) + except AttributeError: + pass + + # not necessary, but makes yaml looks nicer + _visit_dict_config(cfg, _replace_type_by_name) + + save_pkl = False + try: + dict = OmegaConf.to_container( + cfg, + # Do not resolve interpolation when saving, i.e. do not turn ${a} into + # actual values when saving. + resolve=False, + # Save structures (dataclasses) in a format that can be instantiated later. + # Without this option, the type information of the dataclass will be erased. + structured_config_mode=SCMode.INSTANTIATE, + ) + dumped = yaml.dump(dict, default_flow_style=None, allow_unicode=True, width=9999) + with PathManager.open(filename, "w") as f: + f.write(dumped) + + try: + _ = yaml.unsafe_load(dumped) # test that it is loadable + except Exception: + logger.warning( + "The config contains objects that cannot serialize to a valid yaml. " + f"{filename} is human-readable but cannot be loaded." + ) + save_pkl = True + except Exception: + logger.exception("Unable to serialize the config to yaml. Error:") + save_pkl = True + + if save_pkl: + new_filename = filename + ".pkl" + try: + # retry by pickle + with PathManager.open(new_filename, "wb") as f: + cloudpickle.dump(cfg, f) + logger.warning(f"Config is saved using cloudpickle at {new_filename}.") + except Exception: + pass + + @staticmethod + def apply_overrides(cfg, overrides: List[str]): + """ + In-place override contents of cfg. + + Args: + cfg: an omegaconf config object + overrides: list of strings in the format of "a=b" to override configs. + See https://hydra.cc/docs/next/advanced/override_grammar/basic/ + for syntax. + + Returns: + the cfg object + """ + + def safe_update(cfg, key, value): + parts = key.split(".") + for idx in range(1, len(parts)): + prefix = ".".join(parts[:idx]) + v = OmegaConf.select(cfg, prefix, default=None) + if v is None: + break + if not OmegaConf.is_config(v): + raise KeyError( + f"Trying to update key {key}, but {prefix} " + f"is not a config, but has type {type(v)}." + ) + OmegaConf.update(cfg, key, value, merge=True) + + try: + from hydra.core.override_parser.overrides_parser import OverridesParser + + has_hydra = True + except ImportError: + has_hydra = False + + if has_hydra: + parser = OverridesParser.create() + overrides = parser.parse_overrides(overrides) + for o in overrides: + key = o.key_or_group + value = o.value() + if o.is_delete(): + # TODO support this + raise NotImplementedError("deletion is not yet a supported override") + safe_update(cfg, key, value) + else: + # Fallback. Does not support all the features and error checking like hydra. + for o in overrides: + key, value = o.split("=") + try: + value = eval(value, {}) + except NameError: + pass + safe_update(cfg, key, value) + return cfg + + @staticmethod + def to_py(cfg, prefix: str = "cfg."): + """ + Try to convert a config object into Python-like psuedo code. + + Note that perfect conversion is not always possible. So the returned + results are mainly meant to be human-readable, and not meant to be executed. + + Args: + cfg: an omegaconf config object + prefix: root name for the resulting code (default: "cfg.") + + + Returns: + str of formatted Python code + """ + import black + + cfg = OmegaConf.to_container(cfg, resolve=True) + + def _to_str(obj, prefix=None, inside_call=False): + if prefix is None: + prefix = [] + if isinstance(obj, abc.Mapping) and "_target_" in obj: + # Dict representing a function call + target = _convert_target_to_string(obj.pop("_target_")) + args = [] + for k, v in sorted(obj.items()): + args.append(f"{k}={_to_str(v, inside_call=True)}") + args = ", ".join(args) + call = f"{target}({args})" + return "".join(prefix) + call + elif isinstance(obj, abc.Mapping) and not inside_call: + # Dict that is not inside a call is a list of top-level config objects that we + # render as one object per line with dot separated prefixes + key_list = [] + for k, v in sorted(obj.items()): + if isinstance(v, abc.Mapping) and "_target_" not in v: + key_list.append(_to_str(v, prefix=prefix + [k + "."])) + else: + key = "".join(prefix) + k + key_list.append(f"{key}={_to_str(v)}") + return "\n".join(key_list) + elif isinstance(obj, abc.Mapping): + # Dict that is inside a call is rendered as a regular dict + return ( + "{" + + ",".join( + f"{repr(k)}: {_to_str(v, inside_call=inside_call)}" + for k, v in sorted(obj.items()) + ) + + "}" + ) + elif isinstance(obj, list): + return "[" + ",".join(_to_str(x, inside_call=inside_call) for x in obj) + "]" + else: + return repr(obj) + + py_str = _to_str(cfg, prefix=[prefix]) + try: + return black.format_str(py_str, mode=black.Mode()) + except black.InvalidInput: + return py_str diff --git a/data_processing/detectron2/detectron2/data/__init__.py b/data_processing/detectron2/detectron2/data/__init__.py new file mode 100644 index 0000000..259f669 --- /dev/null +++ b/data_processing/detectron2/detectron2/data/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from . import transforms # isort:skip + +from .build import ( + build_batch_data_loader, + build_detection_test_loader, + build_detection_train_loader, + get_detection_dataset_dicts, + load_proposals_into_dataset, + print_instances_class_histogram, +) +from .catalog import DatasetCatalog, MetadataCatalog, Metadata +from .common import DatasetFromList, MapDataset, ToIterableDataset +from .dataset_mapper import DatasetMapper + +# ensure the builtin datasets are registered +from . import datasets, samplers # isort:skip + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/data_processing/detectron2/detectron2/data/benchmark.py b/data_processing/detectron2/detectron2/data/benchmark.py new file mode 100644 index 0000000..ac2f372 --- /dev/null +++ b/data_processing/detectron2/detectron2/data/benchmark.py @@ -0,0 +1,225 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +from itertools import count +from typing import List, Tuple +import torch +import tqdm +from fvcore.common.timer import Timer + +from detectron2.utils import comm + +from .build import build_batch_data_loader +from .common import DatasetFromList, MapDataset +from .samplers import TrainingSampler + +logger = logging.getLogger(__name__) + + +class _EmptyMapDataset(torch.utils.data.Dataset): + """ + Map anything to emptiness. + """ + + def __init__(self, dataset): + self.ds = dataset + + def __len__(self): + return len(self.ds) + + def __getitem__(self, idx): + _ = self.ds[idx] + return [0] + + +def iter_benchmark( + iterator, num_iter: int, warmup: int = 5, max_time_seconds: float = 60 +) -> Tuple[float, List[float]]: + """ + Benchmark an iterator/iterable for `num_iter` iterations with an extra + `warmup` iterations of warmup. + End early if `max_time_seconds` time is spent on iterations. + + Returns: + float: average time (seconds) per iteration + list[float]: time spent on each iteration. Sometimes useful for further analysis. + """ + num_iter, warmup = int(num_iter), int(warmup) + + iterator = iter(iterator) + for _ in range(warmup): + next(iterator) + timer = Timer() + all_times = [] + for curr_iter in tqdm.trange(num_iter): + start = timer.seconds() + if start > max_time_seconds: + num_iter = curr_iter + break + next(iterator) + all_times.append(timer.seconds() - start) + avg = timer.seconds() / num_iter + return avg, all_times + + +class DataLoaderBenchmark: + """ + Some common benchmarks that help understand perf bottleneck of a standard dataloader + made of dataset, mapper and sampler. + """ + + def __init__( + self, + dataset, + *, + mapper, + sampler=None, + total_batch_size, + num_workers=0, + max_time_seconds: int = 90, + ): + """ + Args: + max_time_seconds (int): maximum time to spent for each benchmark + other args: same as in `build.py:build_detection_train_loader` + """ + if isinstance(dataset, list): + dataset = DatasetFromList(dataset, copy=False, serialize=True) + if sampler is None: + sampler = TrainingSampler(len(dataset)) + + self.dataset = dataset + self.mapper = mapper + self.sampler = sampler + self.total_batch_size = total_batch_size + self.num_workers = num_workers + self.per_gpu_batch_size = self.total_batch_size // comm.get_world_size() + + self.max_time_seconds = max_time_seconds + + def _benchmark(self, iterator, num_iter, warmup, msg=None): + avg, all_times = iter_benchmark(iterator, num_iter, warmup, self.max_time_seconds) + if msg is not None: + self._log_time(msg, avg, all_times) + return avg, all_times + + def _log_time(self, msg, avg, all_times, distributed=False): + percentiles = [np.percentile(all_times, k, interpolation="nearest") for k in [1, 5, 95, 99]] + if not distributed: + logger.info( + f"{msg}: avg={1.0/avg:.1f} it/s, " + f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, " + f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s." + ) + return + avg_per_gpu = comm.all_gather(avg) + percentiles_per_gpu = comm.all_gather(percentiles) + if comm.get_rank() > 0: + return + for idx, avg, percentiles in zip(count(), avg_per_gpu, percentiles_per_gpu): + logger.info( + f"GPU{idx} {msg}: avg={1.0/avg:.1f} it/s, " + f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, " + f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s." + ) + + def benchmark_dataset(self, num_iter, warmup=5): + """ + Benchmark the speed of taking raw samples from the dataset. + """ + + def loader(): + while True: + for k in self.sampler: + yield self.dataset[k] + + self._benchmark(loader(), num_iter, warmup, "Dataset Alone") + + def benchmark_mapper(self, num_iter, warmup=5): + """ + Benchmark the speed of taking raw samples from the dataset and map + them in a single process. + """ + + def loader(): + while True: + for k in self.sampler: + yield self.mapper(self.dataset[k]) + + self._benchmark(loader(), num_iter, warmup, "Single Process Mapper (sec/sample)") + + def benchmark_workers(self, num_iter, warmup=10): + """ + Benchmark the dataloader by tuning num_workers to [0, 1, self.num_workers]. + """ + candidates = [0, 1] + if self.num_workers not in candidates: + candidates.append(self.num_workers) + + dataset = MapDataset(self.dataset, self.mapper) + for n in candidates: + loader = build_batch_data_loader( + dataset, + self.sampler, + self.total_batch_size, + num_workers=n, + ) + self._benchmark( + iter(loader), + num_iter * max(n, 1), + warmup * max(n, 1), + f"DataLoader ({n} workers, bs={self.per_gpu_batch_size})", + ) + del loader + + def benchmark_IPC(self, num_iter, warmup=10): + """ + Benchmark the dataloader where each worker outputs nothing. This + eliminates the IPC overhead compared to the regular dataloader. + + PyTorch multiprocessing's IPC only optimizes for torch tensors. + Large numpy arrays or other data structure may incur large IPC overhead. + """ + n = self.num_workers + dataset = _EmptyMapDataset(MapDataset(self.dataset, self.mapper)) + loader = build_batch_data_loader( + dataset, self.sampler, self.total_batch_size, num_workers=n + ) + self._benchmark( + iter(loader), + num_iter * max(n, 1), + warmup * max(n, 1), + f"DataLoader ({n} workers, bs={self.per_gpu_batch_size}) w/o comm", + ) + + def benchmark_distributed(self, num_iter, warmup=10): + """ + Benchmark the dataloader in each distributed worker, and log results of + all workers. This helps understand the final performance as well as + the variances among workers. + + It also prints startup time (first iter) of the dataloader. + """ + gpu = comm.get_world_size() + dataset = MapDataset(self.dataset, self.mapper) + n = self.num_workers + loader = build_batch_data_loader( + dataset, self.sampler, self.total_batch_size, num_workers=n + ) + + timer = Timer() + loader = iter(loader) + next(loader) + startup_time = timer.seconds() + logger.info("Dataloader startup time: {:.2f} seconds".format(startup_time)) + + comm.synchronize() + + avg, all_times = self._benchmark(loader, num_iter * max(n, 1), warmup * max(n, 1)) + del loader + self._log_time( + f"DataLoader ({gpu} GPUs x {n} workers, total bs={self.total_batch_size})", + avg, + all_times, + True, + ) diff --git a/data_processing/detectron2/detectron2/data/build.py b/data_processing/detectron2/detectron2/data/build.py new file mode 100644 index 0000000..3fa2c6b --- /dev/null +++ b/data_processing/detectron2/detectron2/data/build.py @@ -0,0 +1,556 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import logging +import numpy as np +import operator +import pickle +from typing import Any, Callable, Dict, List, Optional, Union +import torch +import torch.utils.data as torchdata +from tabulate import tabulate +from termcolor import colored + +from detectron2.config import configurable +from detectron2.structures import BoxMode +from detectron2.utils.comm import get_world_size +from detectron2.utils.env import seed_all_rng +from detectron2.utils.file_io import PathManager +from detectron2.utils.logger import _log_api_usage, log_first_n + +from .catalog import DatasetCatalog, MetadataCatalog +from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset +from .dataset_mapper import DatasetMapper +from .detection_utils import check_metadata_consistency +from .samplers import ( + InferenceSampler, + RandomSubsetTrainingSampler, + RepeatFactorTrainingSampler, + TrainingSampler, +) + +""" +This file contains the default logic to build a dataloader for training or testing. +""" + +__all__ = [ + "build_batch_data_loader", + "build_detection_train_loader", + "build_detection_test_loader", + "get_detection_dataset_dicts", + "load_proposals_into_dataset", + "print_instances_class_histogram", +] + + +def filter_images_with_only_crowd_annotations(dataset_dicts): + """ + Filter out images with none annotations or only crowd annotations + (i.e., images without non-crowd annotations). + A common training-time preprocessing on COCO dataset. + + Args: + dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. + + Returns: + list[dict]: the same format, but filtered. + """ + num_before = len(dataset_dicts) + + def valid(anns): + for ann in anns: + if ann.get("iscrowd", 0) == 0: + return True + return False + + dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] + num_after = len(dataset_dicts) + logger = logging.getLogger(__name__) + logger.info( + "Removed {} images with no usable annotations. {} images left.".format( + num_before - num_after, num_after + ) + ) + return dataset_dicts + + +def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): + """ + Filter out images with too few number of keypoints. + + Args: + dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. + + Returns: + list[dict]: the same format as dataset_dicts, but filtered. + """ + num_before = len(dataset_dicts) + + def visible_keypoints_in_image(dic): + # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility + annotations = dic["annotations"] + return sum( + (np.array(ann["keypoints"][2::3]) > 0).sum() + for ann in annotations + if "keypoints" in ann + ) + + dataset_dicts = [ + x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image + ] + num_after = len(dataset_dicts) + logger = logging.getLogger(__name__) + logger.info( + "Removed {} images with fewer than {} keypoints.".format( + num_before - num_after, min_keypoints_per_image + ) + ) + return dataset_dicts + + +def load_proposals_into_dataset(dataset_dicts, proposal_file): + """ + Load precomputed object proposals into the dataset. + + The proposal file should be a pickled dict with the following keys: + + - "ids": list[int] or list[str], the image ids + - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id + - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores + corresponding to the boxes. + - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. + + Args: + dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. + proposal_file (str): file path of pre-computed proposals, in pkl format. + + Returns: + list[dict]: the same format as dataset_dicts, but added proposal field. + """ + logger = logging.getLogger(__name__) + logger.info("Loading proposals from: {}".format(proposal_file)) + + with PathManager.open(proposal_file, "rb") as f: + proposals = pickle.load(f, encoding="latin1") + + # Rename the key names in D1 proposal files + rename_keys = {"indexes": "ids", "scores": "objectness_logits"} + for key in rename_keys: + if key in proposals: + proposals[rename_keys[key]] = proposals.pop(key) + + # Fetch the indexes of all proposals that are in the dataset + # Convert image_id to str since they could be int. + img_ids = set({str(record["image_id"]) for record in dataset_dicts}) + id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} + + # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' + bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS + + for record in dataset_dicts: + # Get the index of the proposal + i = id_to_index[str(record["image_id"])] + + boxes = proposals["boxes"][i] + objectness_logits = proposals["objectness_logits"][i] + # Sort the proposals in descending order of the scores + inds = objectness_logits.argsort()[::-1] + record["proposal_boxes"] = boxes[inds] + record["proposal_objectness_logits"] = objectness_logits[inds] + record["proposal_bbox_mode"] = bbox_mode + + return dataset_dicts + + +def print_instances_class_histogram(dataset_dicts, class_names): + """ + Args: + dataset_dicts (list[dict]): list of dataset dicts. + class_names (list[str]): list of class names (zero-indexed). + """ + num_classes = len(class_names) + hist_bins = np.arange(num_classes + 1) + histogram = np.zeros((num_classes,), dtype=np.int) + for entry in dataset_dicts: + annos = entry["annotations"] + classes = np.asarray( + [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=np.int + ) + if len(classes): + assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" + assert ( + classes.max() < num_classes + ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" + histogram += np.histogram(classes, bins=hist_bins)[0] + + N_COLS = min(6, len(class_names) * 2) + + def short_name(x): + # make long class names shorter. useful for lvis + if len(x) > 13: + return x[:11] + ".." + return x + + data = list( + itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) + ) + total_num_instances = sum(data[1::2]) + data.extend([None] * (N_COLS - (len(data) % N_COLS))) + if num_classes > 1: + data.extend(["total", total_num_instances]) + data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) + table = tabulate( + data, + headers=["category", "#instances"] * (N_COLS // 2), + tablefmt="pipe", + numalign="left", + stralign="center", + ) + log_first_n( + logging.INFO, + "Distribution of instances among all {} categories:\n".format(num_classes) + + colored(table, "cyan"), + key="message", + ) + + +def get_detection_dataset_dicts( + names, + filter_empty=True, + min_keypoints=0, + proposal_files=None, + check_consistency=True, +): + """ + Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. + + Args: + names (str or list[str]): a dataset name or a list of dataset names + filter_empty (bool): whether to filter out images without instance annotations + min_keypoints (int): filter out images with fewer keypoints than + `min_keypoints`. Set to 0 to do nothing. + proposal_files (list[str]): if given, a list of object proposal files + that match each dataset in `names`. + check_consistency (bool): whether to check if datasets have consistent metadata. + + Returns: + list[dict]: a list of dicts following the standard dataset dict format. + """ + if isinstance(names, str): + names = [names] + assert len(names), names + dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] + + if isinstance(dataset_dicts[0], torchdata.Dataset): + if len(dataset_dicts) > 1: + # ConcatDataset does not work for iterable style dataset. + # We could support concat for iterable as well, but it's often + # not a good idea to concat iterables anyway. + return torchdata.ConcatDataset(dataset_dicts) + return dataset_dicts[0] + + for dataset_name, dicts in zip(names, dataset_dicts): + assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) + + if proposal_files is not None: + assert len(names) == len(proposal_files) + # load precomputed proposals from proposal files + dataset_dicts = [ + load_proposals_into_dataset(dataset_i_dicts, proposal_file) + for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) + ] + + dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) + + has_instances = "annotations" in dataset_dicts[0] + if filter_empty and has_instances: + dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) + if min_keypoints > 0 and has_instances: + dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) + + if check_consistency and has_instances: + try: + class_names = MetadataCatalog.get(names[0]).thing_classes + check_metadata_consistency("thing_classes", names) + print_instances_class_histogram(dataset_dicts, class_names) + except AttributeError: # class names are not available for this dataset + pass + + assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) + return dataset_dicts + + +def build_batch_data_loader( + dataset, + sampler, + total_batch_size, + *, + aspect_ratio_grouping=False, + num_workers=0, + collate_fn=None, +): + """ + Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are: + 1. support aspect ratio grouping options + 2. use no "batch collation", because this is common for detection training + + Args: + dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset. + sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices. + Must be provided iff. ``dataset`` is a map-style dataset. + total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see + :func:`build_detection_train_loader`. + + Returns: + iterable[list]. Length of each list is the batch size of the current + GPU. Each element in the list comes from the dataset. + """ + world_size = get_world_size() + assert ( + total_batch_size > 0 and total_batch_size % world_size == 0 + ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format( + total_batch_size, world_size + ) + batch_size = total_batch_size // world_size + + if isinstance(dataset, torchdata.IterableDataset): + assert sampler is None, "sampler must be None if dataset is IterableDataset" + else: + dataset = ToIterableDataset(dataset, sampler) + + if aspect_ratio_grouping: + data_loader = torchdata.DataLoader( + dataset, + num_workers=num_workers, + collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements + worker_init_fn=worker_init_reset_seed, + ) # yield individual mapped dict + data_loader = AspectRatioGroupedDataset(data_loader, batch_size) + if collate_fn is None: + return data_loader + return MapDataset(data_loader, collate_fn) + else: + return torchdata.DataLoader( + dataset, + batch_size=batch_size, + drop_last=True, + num_workers=num_workers, + collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, + worker_init_fn=worker_init_reset_seed, + ) + + +def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): + if dataset is None: + dataset = get_detection_dataset_dicts( + cfg.DATASETS.TRAIN, + filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, + min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE + if cfg.MODEL.KEYPOINT_ON + else 0, + proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, + ) + _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0]) + + if mapper is None: + mapper = DatasetMapper(cfg, True) + + if sampler is None: + sampler_name = cfg.DATALOADER.SAMPLER_TRAIN + logger = logging.getLogger(__name__) + if isinstance(dataset, torchdata.IterableDataset): + logger.info("Not using any sampler since the dataset is IterableDataset.") + sampler = None + else: + logger.info("Using training sampler {}".format(sampler_name)) + if sampler_name == "TrainingSampler": + sampler = TrainingSampler(len(dataset)) + elif sampler_name == "RepeatFactorTrainingSampler": + repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( + dataset, cfg.DATALOADER.REPEAT_THRESHOLD + ) + sampler = RepeatFactorTrainingSampler(repeat_factors) + elif sampler_name == "RandomSubsetTrainingSampler": + sampler = RandomSubsetTrainingSampler( + len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO + ) + else: + raise ValueError("Unknown training sampler: {}".format(sampler_name)) + + return { + "dataset": dataset, + "sampler": sampler, + "mapper": mapper, + "total_batch_size": cfg.SOLVER.IMS_PER_BATCH, + "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING, + "num_workers": cfg.DATALOADER.NUM_WORKERS, + } + + +@configurable(from_config=_train_loader_from_config) +def build_detection_train_loader( + dataset, + *, + mapper, + sampler=None, + total_batch_size, + aspect_ratio_grouping=True, + num_workers=0, + collate_fn=None, +): + """ + Build a dataloader for object detection with some default features. + + Args: + dataset (list or torch.utils.data.Dataset): a list of dataset dicts, + or a pytorch dataset (either map-style or iterable). It can be obtained + by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. + mapper (callable): a callable which takes a sample (dict) from dataset and + returns the format to be consumed by the model. + When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``. + sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces + indices to be applied on ``dataset``. + If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`, + which coordinates an infinite random shuffle sequence across all workers. + Sampler must be None if ``dataset`` is iterable. + total_batch_size (int): total batch size across all workers. + aspect_ratio_grouping (bool): whether to group images with similar + aspect ratio for efficiency. When enabled, it requires each + element in dataset be a dict with keys "width" and "height". + num_workers (int): number of parallel data loading workers + collate_fn: a function that determines how to do batching, same as the argument of + `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of + data. No collation is OK for small batch size and simple data structures. + If your batch size is large and each sample contains too many small tensors, + it's more efficient to collate them in data loader. + + Returns: + torch.utils.data.DataLoader: + a dataloader. Each output from it is a ``list[mapped_element]`` of length + ``total_batch_size / num_workers``, where ``mapped_element`` is produced + by the ``mapper``. + """ + if isinstance(dataset, list): + dataset = DatasetFromList(dataset, copy=False) + if mapper is not None: + dataset = MapDataset(dataset, mapper) + + if isinstance(dataset, torchdata.IterableDataset): + assert sampler is None, "sampler must be None if dataset is IterableDataset" + else: + if sampler is None: + sampler = TrainingSampler(len(dataset)) + assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}" + return build_batch_data_loader( + dataset, + sampler, + total_batch_size, + aspect_ratio_grouping=aspect_ratio_grouping, + num_workers=num_workers, + collate_fn=collate_fn, + ) + + +def _test_loader_from_config(cfg, dataset_name, mapper=None): + """ + Uses the given `dataset_name` argument (instead of the names in cfg), because the + standard practice is to evaluate each test set individually (not combining them). + """ + if isinstance(dataset_name, str): + dataset_name = [dataset_name] + + dataset = get_detection_dataset_dicts( + dataset_name, + filter_empty=False, + proposal_files=[ + cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name + ] + if cfg.MODEL.LOAD_PROPOSALS + else None, + ) + if mapper is None: + mapper = DatasetMapper(cfg, False) + return { + "dataset": dataset, + "mapper": mapper, + "num_workers": cfg.DATALOADER.NUM_WORKERS, + "sampler": InferenceSampler(len(dataset)) + if not isinstance(dataset, torchdata.IterableDataset) + else None, + } + + +@configurable(from_config=_test_loader_from_config) +def build_detection_test_loader( + dataset: Union[List[Any], torchdata.Dataset], + *, + mapper: Callable[[Dict[str, Any]], Any], + sampler: Optional[torchdata.Sampler] = None, + batch_size: int = 1, + num_workers: int = 0, + collate_fn: Optional[Callable[[List[Any]], Any]] = None, +) -> torchdata.DataLoader: + """ + Similar to `build_detection_train_loader`, with default batch size = 1, + and sampler = :class:`InferenceSampler`. This sampler coordinates all workers + to produce the exact set of all samples. + + Args: + dataset: a list of dataset dicts, + or a pytorch dataset (either map-style or iterable). They can be obtained + by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. + mapper: a callable which takes a sample (dict) from dataset + and returns the format to be consumed by the model. + When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. + sampler: a sampler that produces + indices to be applied on ``dataset``. Default to :class:`InferenceSampler`, + which splits the dataset across all workers. Sampler must be None + if `dataset` is iterable. + batch_size: the batch size of the data loader to be created. + Default to 1 image per worker since this is the standard when reporting + inference time in papers. + num_workers: number of parallel data loading workers + collate_fn: same as the argument of `torch.utils.data.DataLoader`. + Defaults to do no collation and return a list of data. + + Returns: + DataLoader: a torch DataLoader, that loads the given detection + dataset, with test-time transformation and batching. + + Examples: + :: + data_loader = build_detection_test_loader( + DatasetRegistry.get("my_test"), + mapper=DatasetMapper(...)) + + # or, instantiate with a CfgNode: + data_loader = build_detection_test_loader(cfg, "my_test") + """ + if isinstance(dataset, list): + dataset = DatasetFromList(dataset, copy=False) + if mapper is not None: + dataset = MapDataset(dataset, mapper) + if isinstance(dataset, torchdata.IterableDataset): + assert sampler is None, "sampler must be None if dataset is IterableDataset" + else: + if sampler is None: + sampler = InferenceSampler(len(dataset)) + return torchdata.DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + drop_last=False, + num_workers=num_workers, + collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, + ) + + +def trivial_batch_collator(batch): + """ + A batch collator that does nothing. + """ + return batch + + +def worker_init_reset_seed(worker_id): + initial_seed = torch.initial_seed() % 2**31 + seed_all_rng(initial_seed + worker_id) diff --git a/data_processing/detectron2/detectron2/data/catalog.py b/data_processing/detectron2/detectron2/data/catalog.py new file mode 100644 index 0000000..45c110c --- /dev/null +++ b/data_processing/detectron2/detectron2/data/catalog.py @@ -0,0 +1,236 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import logging +import types +from collections import UserDict +from typing import List + +from detectron2.utils.logger import log_first_n + +__all__ = ["DatasetCatalog", "MetadataCatalog", "Metadata"] + + +class _DatasetCatalog(UserDict): + """ + A global dictionary that stores information about the datasets and how to obtain them. + + It contains a mapping from strings + (which are names that identify a dataset, e.g. "coco_2014_train") + to a function which parses the dataset and returns the samples in the + format of `list[dict]`. + + The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details) + if used with the data loader functionalities in `data/build.py,data/detection_transform.py`. + + The purpose of having this catalog is to make it easy to choose + different datasets, by just using the strings in the config. + """ + + def register(self, name, func): + """ + Args: + name (str): the name that identifies a dataset, e.g. "coco_2014_train". + func (callable): a callable which takes no arguments and returns a list of dicts. + It must return the same results if called multiple times. + """ + assert callable(func), "You must register a function with `DatasetCatalog.register`!" + assert name not in self, "Dataset '{}' is already registered!".format(name) + self[name] = func + + def get(self, name): + """ + Call the registered function and return its results. + + Args: + name (str): the name that identifies a dataset, e.g. "coco_2014_train". + + Returns: + list[dict]: dataset annotations. + """ + try: + f = self[name] + except KeyError as e: + raise KeyError( + "Dataset '{}' is not registered! Available datasets are: {}".format( + name, ", ".join(list(self.keys())) + ) + ) from e + return f() + + def list(self) -> List[str]: + """ + List all registered datasets. + + Returns: + list[str] + """ + return list(self.keys()) + + def remove(self, name): + """ + Alias of ``pop``. + """ + self.pop(name) + + def __str__(self): + return "DatasetCatalog(registered datasets: {})".format(", ".join(self.keys())) + + __repr__ = __str__ + + +DatasetCatalog = _DatasetCatalog() +DatasetCatalog.__doc__ = ( + _DatasetCatalog.__doc__ + + """ + .. automethod:: detectron2.data.catalog.DatasetCatalog.register + .. automethod:: detectron2.data.catalog.DatasetCatalog.get +""" +) + + +class Metadata(types.SimpleNamespace): + """ + A class that supports simple attribute setter/getter. + It is intended for storing metadata of a dataset and make it accessible globally. + + Examples: + :: + # somewhere when you load the data: + MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"] + + # somewhere when you print statistics or visualize: + classes = MetadataCatalog.get("mydataset").thing_classes + """ + + # the name of the dataset + # set default to N/A so that `self.name` in the errors will not trigger getattr again + name: str = "N/A" + + _RENAMED = { + "class_names": "thing_classes", + "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id", + "stuff_class_names": "stuff_classes", + } + + def __getattr__(self, key): + if key in self._RENAMED: + log_first_n( + logging.WARNING, + "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]), + n=10, + ) + return getattr(self, self._RENAMED[key]) + + # "name" exists in every metadata + if len(self.__dict__) > 1: + raise AttributeError( + "Attribute '{}' does not exist in the metadata of dataset '{}'. Available " + "keys are {}.".format(key, self.name, str(self.__dict__.keys())) + ) + else: + raise AttributeError( + f"Attribute '{key}' does not exist in the metadata of dataset '{self.name}': " + "metadata is empty." + ) + + def __setattr__(self, key, val): + if key in self._RENAMED: + log_first_n( + logging.WARNING, + "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]), + n=10, + ) + setattr(self, self._RENAMED[key], val) + + # Ensure that metadata of the same name stays consistent + try: + oldval = getattr(self, key) + assert oldval == val, ( + "Attribute '{}' in the metadata of '{}' cannot be set " + "to a different value!\n{} != {}".format(key, self.name, oldval, val) + ) + except AttributeError: + super().__setattr__(key, val) + + def as_dict(self): + """ + Returns all the metadata as a dict. + Note that modifications to the returned dict will not reflect on the Metadata object. + """ + return copy.copy(self.__dict__) + + def set(self, **kwargs): + """ + Set multiple metadata with kwargs. + """ + for k, v in kwargs.items(): + setattr(self, k, v) + return self + + def get(self, key, default=None): + """ + Access an attribute and return its value if exists. + Otherwise return default. + """ + try: + return getattr(self, key) + except AttributeError: + return default + + +class _MetadataCatalog(UserDict): + """ + MetadataCatalog is a global dictionary that provides access to + :class:`Metadata` of a given dataset. + + The metadata associated with a certain name is a singleton: once created, the + metadata will stay alive and will be returned by future calls to ``get(name)``. + + It's like global variables, so don't abuse it. + It's meant for storing knowledge that's constant and shared across the execution + of the program, e.g.: the class names in COCO. + """ + + def get(self, name): + """ + Args: + name (str): name of a dataset (e.g. coco_2014_train). + + Returns: + Metadata: The :class:`Metadata` instance associated with this name, + or create an empty one if none is available. + """ + assert len(name) + r = super().get(name, None) + if r is None: + r = self[name] = Metadata(name=name) + return r + + def list(self): + """ + List all registered metadata. + + Returns: + list[str]: keys (names of datasets) of all registered metadata + """ + return list(self.keys()) + + def remove(self, name): + """ + Alias of ``pop``. + """ + self.pop(name) + + def __str__(self): + return "MetadataCatalog(registered metadata: {})".format(", ".join(self.keys())) + + __repr__ = __str__ + + +MetadataCatalog = _MetadataCatalog() +MetadataCatalog.__doc__ = ( + _MetadataCatalog.__doc__ + + """ + .. automethod:: detectron2.data.catalog.MetadataCatalog.get +""" +) diff --git a/data_processing/detectron2/detectron2/data/common.py b/data_processing/detectron2/detectron2/data/common.py new file mode 100644 index 0000000..bf24b1d --- /dev/null +++ b/data_processing/detectron2/detectron2/data/common.py @@ -0,0 +1,301 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import contextlib +import copy +import itertools +import logging +import numpy as np +import pickle +import random +from typing import Callable, Union +import torch +import torch.utils.data as data +from torch.utils.data.sampler import Sampler + +from detectron2.utils.serialize import PicklableWrapper + +__all__ = ["MapDataset", "DatasetFromList", "AspectRatioGroupedDataset", "ToIterableDataset"] + +logger = logging.getLogger(__name__) + + +def _shard_iterator_dataloader_worker(iterable): + # Shard the iterable if we're currently inside pytorch dataloader worker. + worker_info = data.get_worker_info() + if worker_info is None or worker_info.num_workers == 1: + # do nothing + yield from iterable + else: + yield from itertools.islice(iterable, worker_info.id, None, worker_info.num_workers) + + +class _MapIterableDataset(data.IterableDataset): + """ + Map a function over elements in an IterableDataset. + + Similar to pytorch's MapIterDataPipe, but support filtering when map_func + returns None. + + This class is not public-facing. Will be called by `MapDataset`. + """ + + def __init__(self, dataset, map_func): + self._dataset = dataset + self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work + + def __len__(self): + return len(self._dataset) + + def __iter__(self): + for x in map(self._map_func, self._dataset): + if x is not None: + yield x + + +class MapDataset(data.Dataset): + """ + Map a function over the elements in a dataset. + """ + + def __init__(self, dataset, map_func): + """ + Args: + dataset: a dataset where map function is applied. Can be either + map-style or iterable dataset. When given an iterable dataset, + the returned object will also be an iterable dataset. + map_func: a callable which maps the element in dataset. map_func can + return None to skip the data (e.g. in case of errors). + How None is handled depends on the style of `dataset`. + If `dataset` is map-style, it randomly tries other elements. + If `dataset` is iterable, it skips the data and tries the next. + """ + self._dataset = dataset + self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work + + self._rng = random.Random(42) + self._fallback_candidates = set(range(len(dataset))) + + def __new__(cls, dataset, map_func): + is_iterable = isinstance(dataset, data.IterableDataset) + if is_iterable: + return _MapIterableDataset(dataset, map_func) + else: + return super().__new__(cls) + + def __getnewargs__(self): + return self._dataset, self._map_func + + def __len__(self): + return len(self._dataset) + + def __getitem__(self, idx): + retry_count = 0 + cur_idx = int(idx) + + while True: + data = self._map_func(self._dataset[cur_idx]) + if data is not None: + self._fallback_candidates.add(cur_idx) + return data + + # _map_func fails for this idx, use a random new index from the pool + retry_count += 1 + self._fallback_candidates.discard(cur_idx) + cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0] + + if retry_count >= 3: + logger = logging.getLogger(__name__) + logger.warning( + "Failed to apply `_map_func` for idx: {}, retry count: {}".format( + idx, retry_count + ) + ) + + +class _TorchSerializedList(object): + """ + A list-like object whose items are serialized and stored in a torch tensor. When + launching a process that uses TorchSerializedList with "fork" start method, + the subprocess can read the same buffer without triggering copy-on-access. When + launching a process that uses TorchSerializedList with "spawn/forkserver" start + method, the list will be pickled by a special ForkingPickler registered by PyTorch + that moves data to shared memory. In both cases, this allows parent and child + processes to share RAM for the list data, hence avoids the issue in + https://github.com/pytorch/pytorch/issues/13246. + + See also https://ppwwyyxx.com/blog/2022/Demystify-RAM-Usage-in-Multiprocess-DataLoader/ + on how it works. + """ + + def __init__(self, lst: list): + self._lst = lst + + def _serialize(data): + buffer = pickle.dumps(data, protocol=-1) + return np.frombuffer(buffer, dtype=np.uint8) + + logger.info( + "Serializing {} elements to byte tensors and concatenating them all ...".format( + len(self._lst) + ) + ) + self._lst = [_serialize(x) for x in self._lst] + self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64) + self._addr = torch.from_numpy(np.cumsum(self._addr)) + self._lst = torch.from_numpy(np.concatenate(self._lst)) + logger.info("Serialized dataset takes {:.2f} MiB".format(len(self._lst) / 1024**2)) + + def __len__(self): + return len(self._addr) + + def __getitem__(self, idx): + start_addr = 0 if idx == 0 else self._addr[idx - 1].item() + end_addr = self._addr[idx].item() + bytes = memoryview(self._lst[start_addr:end_addr].numpy()) + + # @lint-ignore PYTHONPICKLEISBAD + return pickle.loads(bytes) + + +_DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = _TorchSerializedList + + +@contextlib.contextmanager +def set_default_dataset_from_list_serialize_method(new): + """ + Context manager for using custom serialize function when creating DatasetFromList + """ + + global _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD + orig = _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD + _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = new + yield + _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = orig + + +class DatasetFromList(data.Dataset): + """ + Wrap a list to a torch Dataset. It produces elements of the list as data. + """ + + def __init__( + self, + lst: list, + copy: bool = True, + serialize: Union[bool, Callable] = True, + ): + """ + Args: + lst (list): a list which contains elements to produce. + copy (bool): whether to deepcopy the element when producing it, + so that the result can be modified in place without affecting the + source in the list. + serialize (bool or callable): whether to serialize the stroage to other + backend. If `True`, the default serialize method will be used, if given + a callable, the callable will be used as serialize method. + """ + self._lst = lst + self._copy = copy + if not isinstance(serialize, (bool, Callable)): + raise TypeError(f"Unsupported type for argument `serailzie`: {serialize}") + self._serialize = serialize is not False + + if self._serialize: + serialize_method = ( + serialize + if isinstance(serialize, Callable) + else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD + ) + logger.info(f"Serializing the dataset using: {serialize_method}") + self._lst = serialize_method(self._lst) + + def __len__(self): + return len(self._lst) + + def __getitem__(self, idx): + if self._copy and not self._serialize: + return copy.deepcopy(self._lst[idx]) + else: + return self._lst[idx] + + +class ToIterableDataset(data.IterableDataset): + """ + Convert an old indices-based (also called map-style) dataset + to an iterable-style dataset. + """ + + def __init__(self, dataset: data.Dataset, sampler: Sampler, shard_sampler: bool = True): + """ + Args: + dataset: an old-style dataset with ``__getitem__`` + sampler: a cheap iterable that produces indices to be applied on ``dataset``. + shard_sampler: whether to shard the sampler based on the current pytorch data loader + worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple + workers, it is responsible for sharding its data based on worker id so that workers + don't produce identical data. + + Most samplers (like our TrainingSampler) do not shard based on dataloader worker id + and this argument should be set to True. But certain samplers may be already + sharded, in that case this argument should be set to False. + """ + assert not isinstance(dataset, data.IterableDataset), dataset + assert isinstance(sampler, Sampler), sampler + self.dataset = dataset + self.sampler = sampler + self.shard_sampler = shard_sampler + + def __iter__(self): + if not self.shard_sampler: + sampler = self.sampler + else: + # With map-style dataset, `DataLoader(dataset, sampler)` runs the + # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))` + # will run sampler in every of the N worker. So we should only keep 1/N of the ids on + # each worker. The assumption is that sampler is cheap to iterate so it's fine to + # discard ids in workers. + sampler = _shard_iterator_dataloader_worker(self.sampler) + for idx in sampler: + yield self.dataset[idx] + + def __len__(self): + return len(self.sampler) + + +class AspectRatioGroupedDataset(data.IterableDataset): + """ + Batch data that have similar aspect ratio together. + In this implementation, images whose aspect ratio < (or >) 1 will + be batched together. + This improves training speed because the images then need less padding + to form a batch. + + It assumes the underlying dataset produces dicts with "width" and "height" keys. + It will then produce a list of original dicts with length = batch_size, + all with similar aspect ratios. + """ + + def __init__(self, dataset, batch_size): + """ + Args: + dataset: an iterable. Each element must be a dict with keys + "width" and "height", which will be used to batch data. + batch_size (int): + """ + self.dataset = dataset + self.batch_size = batch_size + self._buckets = [[] for _ in range(2)] + # Hard-coded two aspect ratio groups: w > h and w < h. + # Can add support for more aspect ratio groups, but doesn't seem useful + + def __iter__(self): + for d in self.dataset: + w, h = d["width"], d["height"] + bucket_id = 0 if w > h else 1 + bucket = self._buckets[bucket_id] + bucket.append(d) + if len(bucket) == self.batch_size: + data = bucket[:] + # Clear bucket first, because code after yield is not + # guaranteed to execute + del bucket[:] + yield data diff --git a/data_processing/detectron2/detectron2/data/dataset_mapper.py b/data_processing/detectron2/detectron2/data/dataset_mapper.py new file mode 100644 index 0000000..a8714f7 --- /dev/null +++ b/data_processing/detectron2/detectron2/data/dataset_mapper.py @@ -0,0 +1,191 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import logging +import numpy as np +from typing import List, Optional, Union +import torch + +from detectron2.config import configurable + +from . import detection_utils as utils +from . import transforms as T + +""" +This file contains the default mapping that's applied to "dataset dicts". +""" + +__all__ = ["DatasetMapper"] + + +class DatasetMapper: + """ + A callable which takes a dataset dict in Detectron2 Dataset format, + and map it into a format used by the model. + + This is the default callable to be used to map your dataset dict into training data. + You may need to follow it to implement your own one for customized logic, + such as a different way to read or transform images. + See :doc:`/tutorials/data_loading` for details. + + The callable currently does the following: + + 1. Read the image from "file_name" + 2. Applies cropping/geometric transforms to the image and annotations + 3. Prepare data and annotations to Tensor and :class:`Instances` + """ + + @configurable + def __init__( + self, + is_train: bool, + *, + augmentations: List[Union[T.Augmentation, T.Transform]], + image_format: str, + use_instance_mask: bool = False, + use_keypoint: bool = False, + instance_mask_format: str = "polygon", + keypoint_hflip_indices: Optional[np.ndarray] = None, + precomputed_proposal_topk: Optional[int] = None, + recompute_boxes: bool = False, + ): + """ + NOTE: this interface is experimental. + + Args: + is_train: whether it's used in training or inference + augmentations: a list of augmentations or deterministic transforms to apply + image_format: an image format supported by :func:`detection_utils.read_image`. + use_instance_mask: whether to process instance segmentation annotations, if available + use_keypoint: whether to process keypoint annotations if available + instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation + masks into this format. + keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices` + precomputed_proposal_topk: if given, will load pre-computed + proposals from dataset_dict and keep the top k proposals for each image. + recompute_boxes: whether to overwrite bounding box annotations + by computing tight bounding boxes from instance mask annotations. + """ + if recompute_boxes: + assert use_instance_mask, "recompute_boxes requires instance masks" + # fmt: off + self.is_train = is_train + self.augmentations = T.AugmentationList(augmentations) + self.image_format = image_format + self.use_instance_mask = use_instance_mask + self.instance_mask_format = instance_mask_format + self.use_keypoint = use_keypoint + self.keypoint_hflip_indices = keypoint_hflip_indices + self.proposal_topk = precomputed_proposal_topk + self.recompute_boxes = recompute_boxes + # fmt: on + logger = logging.getLogger(__name__) + mode = "training" if is_train else "inference" + logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}") + + @classmethod + def from_config(cls, cfg, is_train: bool = True): + augs = utils.build_augmentation(cfg, is_train) + if cfg.INPUT.CROP.ENABLED and is_train: + augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) + recompute_boxes = cfg.MODEL.MASK_ON + else: + recompute_boxes = False + + ret = { + "is_train": is_train, + "augmentations": augs, + "image_format": cfg.INPUT.FORMAT, + "use_instance_mask": cfg.MODEL.MASK_ON, + "instance_mask_format": cfg.INPUT.MASK_FORMAT, + "use_keypoint": cfg.MODEL.KEYPOINT_ON, + "recompute_boxes": recompute_boxes, + } + + if cfg.MODEL.KEYPOINT_ON: + ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) + + if cfg.MODEL.LOAD_PROPOSALS: + ret["precomputed_proposal_topk"] = ( + cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN + if is_train + else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST + ) + return ret + + def _transform_annotations(self, dataset_dict, transforms, image_shape): + # USER: Modify this if you want to keep them for some reason. + for anno in dataset_dict["annotations"]: + if not self.use_instance_mask: + anno.pop("segmentation", None) + if not self.use_keypoint: + anno.pop("keypoints", None) + + # USER: Implement additional transformations if you have other types of data + annos = [ + utils.transform_instance_annotations( + obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices + ) + for obj in dataset_dict.pop("annotations") + if obj.get("iscrowd", 0) == 0 + ] + instances = utils.annotations_to_instances( + annos, image_shape, mask_format=self.instance_mask_format + ) + + # After transforms such as cropping are applied, the bounding box may no longer + # tightly bound the object. As an example, imagine a triangle object + # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight + # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to + # the intersection of original bounding box and the cropping box. + if self.recompute_boxes: + instances.gt_boxes = instances.gt_masks.get_bounding_boxes() + dataset_dict["instances"] = utils.filter_empty_instances(instances) + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + + Returns: + dict: a format that builtin models in detectron2 accept + """ + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + # USER: Write your own image loading if it's not from a file + image = utils.read_image(dataset_dict["file_name"], format=self.image_format) + utils.check_image_size(dataset_dict, image) + + # USER: Remove if you don't do semantic/panoptic segmentation. + if "sem_seg_file_name" in dataset_dict: + sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2) + else: + sem_seg_gt = None + + aug_input = T.AugInput(image, sem_seg=sem_seg_gt) + transforms = self.augmentations(aug_input) + image, sem_seg_gt = aug_input.image, aug_input.sem_seg + + image_shape = image.shape[:2] # h, w + # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, + # but not efficient on large generic data structures due to the use of pickle & mp.Queue. + # Therefore it's important to use torch.Tensor. + dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) + if sem_seg_gt is not None: + dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) + + # USER: Remove if you don't use pre-computed proposals. + # Most users would not need this feature. + if self.proposal_topk is not None: + utils.transform_proposals( + dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk + ) + + if not self.is_train: + # USER: Modify this if you want to keep them for some reason. + dataset_dict.pop("annotations", None) + dataset_dict.pop("sem_seg_file_name", None) + return dataset_dict + + if "annotations" in dataset_dict: + self._transform_annotations(dataset_dict, transforms, image_shape) + + return dataset_dict diff --git a/data_processing/detectron2/detectron2/data/datasets/README.md b/data_processing/detectron2/detectron2/data/datasets/README.md new file mode 100644 index 0000000..9fb3e4f --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/README.md @@ -0,0 +1,9 @@ + + +### Common Datasets + +The dataset implemented here do not need to load the data into the final format. +It should provide the minimal data structure needed to use the dataset, so it can be very efficient. + +For example, for an image dataset, just provide the file names and labels, but don't read the images. +Let the downstream decide how to read. diff --git a/data_processing/detectron2/detectron2/data/datasets/__init__.py b/data_processing/detectron2/detectron2/data/datasets/__init__.py new file mode 100644 index 0000000..a44bedc --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .coco import load_coco_json, load_sem_seg, register_coco_instances, convert_to_coco_json +from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated +from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta +from .pascal_voc import load_voc_instances, register_pascal_voc +from . import builtin as _builtin # ensure the builtin datasets are registered + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/data_processing/detectron2/detectron2/data/datasets/builtin.py b/data_processing/detectron2/detectron2/data/datasets/builtin.py new file mode 100644 index 0000000..c3a68aa --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/builtin.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + + +""" +This file registers pre-defined datasets at hard-coded paths, and their metadata. + +We hard-code metadata for common datasets. This will enable: +1. Consistency check when loading the datasets +2. Use models on these standard datasets directly and run demos, + without having to download the dataset annotations + +We hard-code some paths to the dataset that's assumed to +exist in "./datasets/". + +Users SHOULD NOT use this file to create new dataset / metadata for new dataset. +To add new dataset, refer to the tutorial "docs/DATASETS.md". +""" + +import os + +from detectron2.data import DatasetCatalog, MetadataCatalog + +from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata +from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic +from .cityscapes_panoptic import register_all_cityscapes_panoptic +from .coco import load_sem_seg, register_coco_instances +from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated +from .lvis import get_lvis_instances_meta, register_lvis_instances +from .pascal_voc import register_pascal_voc + +# ==== Predefined datasets and splits for COCO ========== + +_PREDEFINED_SPLITS_COCO = {} +_PREDEFINED_SPLITS_COCO["coco"] = { + "coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"), + "coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"), + "coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"), + "coco_2014_valminusminival": ( + "coco/val2014", + "coco/annotations/instances_valminusminival2014.json", + ), + "coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"), + "coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"), + "coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"), + "coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"), + "coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"), +} + +_PREDEFINED_SPLITS_COCO["coco_person"] = { + "keypoints_coco_2014_train": ( + "coco/train2014", + "coco/annotations/person_keypoints_train2014.json", + ), + "keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"), + "keypoints_coco_2014_minival": ( + "coco/val2014", + "coco/annotations/person_keypoints_minival2014.json", + ), + "keypoints_coco_2014_valminusminival": ( + "coco/val2014", + "coco/annotations/person_keypoints_valminusminival2014.json", + ), + "keypoints_coco_2017_train": ( + "coco/train2017", + "coco/annotations/person_keypoints_train2017.json", + ), + "keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"), + "keypoints_coco_2017_val_100": ( + "coco/val2017", + "coco/annotations/person_keypoints_val2017_100.json", + ), +} + + +_PREDEFINED_SPLITS_COCO_PANOPTIC = { + "coco_2017_train_panoptic": ( + # This is the original panoptic annotation directory + "coco/panoptic_train2017", + "coco/annotations/panoptic_train2017.json", + # This directory contains semantic annotations that are + # converted from panoptic annotations. + # It is used by PanopticFPN. + # You can use the script at detectron2/datasets/prepare_panoptic_fpn.py + # to create these directories. + "coco/panoptic_stuff_train2017", + ), + "coco_2017_val_panoptic": ( + "coco/panoptic_val2017", + "coco/annotations/panoptic_val2017.json", + "coco/panoptic_stuff_val2017", + ), + "coco_2017_val_100_panoptic": ( + "coco/panoptic_val2017_100", + "coco/annotations/panoptic_val2017_100.json", + "coco/panoptic_stuff_val2017_100", + ), +} + + +def register_all_coco(root): + for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items(): + for key, (image_root, json_file) in splits_per_dataset.items(): + # Assume pre-defined datasets live in `./datasets`. + register_coco_instances( + key, + _get_builtin_metadata(dataset_name), + os.path.join(root, json_file) if "://" not in json_file else json_file, + os.path.join(root, image_root), + ) + + for ( + prefix, + (panoptic_root, panoptic_json, semantic_root), + ) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items(): + prefix_instances = prefix[: -len("_panoptic")] + instances_meta = MetadataCatalog.get(prefix_instances) + image_root, instances_json = instances_meta.image_root, instances_meta.json_file + # The "separated" version of COCO panoptic segmentation dataset, + # e.g. used by Panoptic FPN + register_coco_panoptic_separated( + prefix, + _get_builtin_metadata("coco_panoptic_separated"), + image_root, + os.path.join(root, panoptic_root), + os.path.join(root, panoptic_json), + os.path.join(root, semantic_root), + instances_json, + ) + # The "standard" version of COCO panoptic segmentation dataset, + # e.g. used by Panoptic-DeepLab + register_coco_panoptic( + prefix, + _get_builtin_metadata("coco_panoptic_standard"), + image_root, + os.path.join(root, panoptic_root), + os.path.join(root, panoptic_json), + instances_json, + ) + + +# ==== Predefined datasets and splits for LVIS ========== + + +_PREDEFINED_SPLITS_LVIS = { + "lvis_v1": { + "lvis_v1_train": ("coco/", "lvis/lvis_v1_train.json"), + "lvis_v1_val": ("coco/", "lvis/lvis_v1_val.json"), + "lvis_v1_test_dev": ("coco/", "lvis/lvis_v1_image_info_test_dev.json"), + "lvis_v1_test_challenge": ("coco/", "lvis/lvis_v1_image_info_test_challenge.json"), + }, + "lvis_v0.5": { + "lvis_v0.5_train": ("coco/", "lvis/lvis_v0.5_train.json"), + "lvis_v0.5_val": ("coco/", "lvis/lvis_v0.5_val.json"), + "lvis_v0.5_val_rand_100": ("coco/", "lvis/lvis_v0.5_val_rand_100.json"), + "lvis_v0.5_test": ("coco/", "lvis/lvis_v0.5_image_info_test.json"), + }, + "lvis_v0.5_cocofied": { + "lvis_v0.5_train_cocofied": ("coco/", "lvis/lvis_v0.5_train_cocofied.json"), + "lvis_v0.5_val_cocofied": ("coco/", "lvis/lvis_v0.5_val_cocofied.json"), + }, +} + + +def register_all_lvis(root): + for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items(): + for key, (image_root, json_file) in splits_per_dataset.items(): + register_lvis_instances( + key, + get_lvis_instances_meta(dataset_name), + os.path.join(root, json_file) if "://" not in json_file else json_file, + os.path.join(root, image_root), + ) + + +# ==== Predefined splits for raw cityscapes images =========== +_RAW_CITYSCAPES_SPLITS = { + "cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train/", "cityscapes/gtFine/train/"), + "cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val/", "cityscapes/gtFine/val/"), + "cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test/", "cityscapes/gtFine/test/"), +} + + +def register_all_cityscapes(root): + for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items(): + meta = _get_builtin_metadata("cityscapes") + image_dir = os.path.join(root, image_dir) + gt_dir = os.path.join(root, gt_dir) + + inst_key = key.format(task="instance_seg") + DatasetCatalog.register( + inst_key, + lambda x=image_dir, y=gt_dir: load_cityscapes_instances( + x, y, from_json=True, to_polygons=True + ), + ) + MetadataCatalog.get(inst_key).set( + image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta + ) + + sem_key = key.format(task="sem_seg") + DatasetCatalog.register( + sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y) + ) + MetadataCatalog.get(sem_key).set( + image_dir=image_dir, + gt_dir=gt_dir, + evaluator_type="cityscapes_sem_seg", + ignore_label=255, + **meta, + ) + + +# ==== Predefined splits for PASCAL VOC =========== +def register_all_pascal_voc(root): + SPLITS = [ + ("voc_2007_trainval", "VOC2007", "trainval"), + ("voc_2007_train", "VOC2007", "train"), + ("voc_2007_val", "VOC2007", "val"), + ("voc_2007_test", "VOC2007", "test"), + ("voc_2012_trainval", "VOC2012", "trainval"), + ("voc_2012_train", "VOC2012", "train"), + ("voc_2012_val", "VOC2012", "val"), + ] + for name, dirname, split in SPLITS: + year = 2007 if "2007" in name else 2012 + register_pascal_voc(name, os.path.join(root, dirname), split, year) + MetadataCatalog.get(name).evaluator_type = "pascal_voc" + + +def register_all_ade20k(root): + root = os.path.join(root, "ADEChallengeData2016") + for name, dirname in [("train", "training"), ("val", "validation")]: + image_dir = os.path.join(root, "images", dirname) + gt_dir = os.path.join(root, "annotations_detectron2", dirname) + name = f"ade20k_sem_seg_{name}" + DatasetCatalog.register( + name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg") + ) + MetadataCatalog.get(name).set( + stuff_classes=ADE20K_SEM_SEG_CATEGORIES[:], + image_root=image_dir, + sem_seg_root=gt_dir, + evaluator_type="sem_seg", + ignore_label=255, + ) + + +# True for open source; +# Internally at fb, we register them elsewhere +if __name__.endswith(".builtin"): + # Assume pre-defined datasets live in `./datasets`. + _root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets")) + register_all_coco(_root) + register_all_lvis(_root) + register_all_cityscapes(_root) + register_all_cityscapes_panoptic(_root) + register_all_pascal_voc(_root) + register_all_ade20k(_root) diff --git a/data_processing/detectron2/detectron2/data/datasets/builtin_meta.py b/data_processing/detectron2/detectron2/data/datasets/builtin_meta.py new file mode 100644 index 0000000..63c7a1a --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/builtin_meta.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +Note: +For your custom dataset, there is no need to hard-code metadata anywhere in the code. +For example, for COCO-format dataset, metadata will be obtained automatically +when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways +during loading. + +However, we hard-coded metadata for a few common dataset here. +The only goal is to allow users who don't have these dataset to use pre-trained models. +Users don't have to download a COCO json (which contains metadata), in order to visualize a +COCO model (with correct class names and colors). +""" + + +# All coco categories, together with their nice-looking visualization colors +# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json +COCO_CATEGORIES = [ + {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, + {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, + {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, + {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, + {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, + {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, + {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, + {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, + {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, + {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, + {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, + {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, + {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, + {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, + {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, + {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, + {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, + {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, + {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, + {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, + {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, + {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, + {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, + {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, + {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, + {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, + {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, + {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, + {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, + {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, + {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, + {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, + {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, + {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, + {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, + {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, + {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, + {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, + {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, + {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, + {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, + {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, + {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, + {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, + {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, + {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, + {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, + {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, + {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, + {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, + {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, + {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, + {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, + {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, + {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, + {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, + {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, + {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, + {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, + {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, + {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, + {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, + {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, + {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, + {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, + {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, + {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, + {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, + {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, + {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, + {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, + {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, + {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, + {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, + {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, + {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, + {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, + {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, + {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, + {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, + {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, + {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, + {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, + {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, + {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, + {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, + {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, + {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, + {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, + {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, + {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, + {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, + {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, + {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, + {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, + {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, + {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, + {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, + {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, + {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, + {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, + {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, + {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, + {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, + {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, + {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, + {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, + {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, + {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, + {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, + {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, + {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, + {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, + {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, + {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, + {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, + {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, + {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, + {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, + {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, + {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, + {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, + {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, + {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, + {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, + {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, + {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, + {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, + {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, + {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, + {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, + {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, + {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}, +] + +# fmt: off +COCO_PERSON_KEYPOINT_NAMES = ( + "nose", + "left_eye", "right_eye", + "left_ear", "right_ear", + "left_shoulder", "right_shoulder", + "left_elbow", "right_elbow", + "left_wrist", "right_wrist", + "left_hip", "right_hip", + "left_knee", "right_knee", + "left_ankle", "right_ankle", +) +# fmt: on + +# Pairs of keypoints that should be exchanged under horizontal flipping +COCO_PERSON_KEYPOINT_FLIP_MAP = ( + ("left_eye", "right_eye"), + ("left_ear", "right_ear"), + ("left_shoulder", "right_shoulder"), + ("left_elbow", "right_elbow"), + ("left_wrist", "right_wrist"), + ("left_hip", "right_hip"), + ("left_knee", "right_knee"), + ("left_ankle", "right_ankle"), +) + +# rules for pairs of keypoints to draw a line between, and the line color to use. +KEYPOINT_CONNECTION_RULES = [ + # face + ("left_ear", "left_eye", (102, 204, 255)), + ("right_ear", "right_eye", (51, 153, 255)), + ("left_eye", "nose", (102, 0, 204)), + ("nose", "right_eye", (51, 102, 255)), + # upper-body + ("left_shoulder", "right_shoulder", (255, 128, 0)), + ("left_shoulder", "left_elbow", (153, 255, 204)), + ("right_shoulder", "right_elbow", (128, 229, 255)), + ("left_elbow", "left_wrist", (153, 255, 153)), + ("right_elbow", "right_wrist", (102, 255, 224)), + # lower-body + ("left_hip", "right_hip", (255, 102, 0)), + ("left_hip", "left_knee", (255, 255, 77)), + ("right_hip", "right_knee", (153, 255, 204)), + ("left_knee", "left_ankle", (191, 255, 128)), + ("right_knee", "right_ankle", (255, 195, 77)), +] + +# All Cityscapes categories, together with their nice-looking visualization colors +# It's from https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # noqa +CITYSCAPES_CATEGORIES = [ + {"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"}, + {"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"}, + {"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"}, + {"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"}, + {"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"}, + {"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"}, + {"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"}, + {"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"}, + {"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"}, + {"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"}, + {"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"}, + {"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"}, + {"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"}, + {"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"}, + {"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"}, + {"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"}, + {"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"}, + {"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"}, + {"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"}, +] + +# fmt: off +ADE20K_SEM_SEG_CATEGORIES = [ + "wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa +] +# After processed by `prepare_ade20k_sem_seg.py`, id 255 means ignore +# fmt: on + + +def _get_coco_instances_meta(): + thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1] + thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1] + assert len(thing_ids) == 80, len(thing_ids) + # Mapping from the incontiguous COCO category id to an id in [0, 79] + thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)} + thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1] + ret = { + "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, + "thing_classes": thing_classes, + "thing_colors": thing_colors, + } + return ret + + +def _get_coco_panoptic_separated_meta(): + """ + Returns metadata for "separated" version of the panoptic segmentation dataset. + """ + stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0] + assert len(stuff_ids) == 53, len(stuff_ids) + + # For semantic segmentation, this mapping maps from contiguous stuff id + # (in [0, 53], used in models) to ids in the dataset (used for processing results) + # The id 0 is mapped to an extra category "thing". + stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)} + # When converting COCO panoptic annotations to semantic annotations + # We label the "thing" category to 0 + stuff_dataset_id_to_contiguous_id[0] = 0 + + # 54 names for COCO stuff categories (including "things") + stuff_classes = ["things"] + [ + k["name"].replace("-other", "").replace("-merged", "") + for k in COCO_CATEGORIES + if k["isthing"] == 0 + ] + + # NOTE: I randomly picked a color for things + stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0] + ret = { + "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id, + "stuff_classes": stuff_classes, + "stuff_colors": stuff_colors, + } + ret.update(_get_coco_instances_meta()) + return ret + + +def _get_builtin_metadata(dataset_name): + if dataset_name == "coco": + return _get_coco_instances_meta() + if dataset_name == "coco_panoptic_separated": + return _get_coco_panoptic_separated_meta() + elif dataset_name == "coco_panoptic_standard": + meta = {} + # The following metadata maps contiguous id from [0, #thing categories + + # #stuff categories) to their names and colors. We have to replica of the + # same name and color under "thing_*" and "stuff_*" because the current + # visualization function in D2 handles thing and class classes differently + # due to some heuristic used in Panoptic FPN. We keep the same naming to + # enable reusing existing visualization functions. + thing_classes = [k["name"] for k in COCO_CATEGORIES] + thing_colors = [k["color"] for k in COCO_CATEGORIES] + stuff_classes = [k["name"] for k in COCO_CATEGORIES] + stuff_colors = [k["color"] for k in COCO_CATEGORIES] + + meta["thing_classes"] = thing_classes + meta["thing_colors"] = thing_colors + meta["stuff_classes"] = stuff_classes + meta["stuff_colors"] = stuff_colors + + # Convert category id for training: + # category id: like semantic segmentation, it is the class id for each + # pixel. Since there are some classes not used in evaluation, the category + # id is not always contiguous and thus we have two set of category ids: + # - original category id: category id in the original dataset, mainly + # used for evaluation. + # - contiguous category id: [0, #classes), in order to train the linear + # softmax classifier. + thing_dataset_id_to_contiguous_id = {} + stuff_dataset_id_to_contiguous_id = {} + + for i, cat in enumerate(COCO_CATEGORIES): + if cat["isthing"]: + thing_dataset_id_to_contiguous_id[cat["id"]] = i + else: + stuff_dataset_id_to_contiguous_id[cat["id"]] = i + + meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id + meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id + + return meta + elif dataset_name == "coco_person": + return { + "thing_classes": ["person"], + "keypoint_names": COCO_PERSON_KEYPOINT_NAMES, + "keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP, + "keypoint_connection_rules": KEYPOINT_CONNECTION_RULES, + } + elif dataset_name == "cityscapes": + # fmt: off + CITYSCAPES_THING_CLASSES = [ + "person", "rider", "car", "truck", + "bus", "train", "motorcycle", "bicycle", + ] + CITYSCAPES_STUFF_CLASSES = [ + "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", + "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", + "truck", "bus", "train", "motorcycle", "bicycle", + ] + # fmt: on + return { + "thing_classes": CITYSCAPES_THING_CLASSES, + "stuff_classes": CITYSCAPES_STUFF_CLASSES, + } + raise KeyError("No built-in metadata for dataset {}".format(dataset_name)) diff --git a/data_processing/detectron2/detectron2/data/datasets/cityscapes.py b/data_processing/detectron2/detectron2/data/datasets/cityscapes.py new file mode 100644 index 0000000..1e84a5b --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/cityscapes.py @@ -0,0 +1,329 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import functools +import json +import logging +import multiprocessing as mp +import numpy as np +import os +from itertools import chain +import pycocotools.mask as mask_util +from PIL import Image + +from detectron2.structures import BoxMode +from detectron2.utils.comm import get_world_size +from detectron2.utils.file_io import PathManager +from detectron2.utils.logger import setup_logger + +try: + import cv2 # noqa +except ImportError: + # OpenCV is an optional dependency at the moment + pass + + +logger = logging.getLogger(__name__) + + +def _get_cityscapes_files(image_dir, gt_dir): + files = [] + # scan through the directory + cities = PathManager.ls(image_dir) + logger.info(f"{len(cities)} cities found in '{image_dir}'.") + for city in cities: + city_img_dir = os.path.join(image_dir, city) + city_gt_dir = os.path.join(gt_dir, city) + for basename in PathManager.ls(city_img_dir): + image_file = os.path.join(city_img_dir, basename) + + suffix = "leftImg8bit.png" + assert basename.endswith(suffix), basename + basename = basename[: -len(suffix)] + + instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png") + label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png") + json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json") + + files.append((image_file, instance_file, label_file, json_file)) + assert len(files), "No images found in {}".format(image_dir) + for f in files[0]: + assert PathManager.isfile(f), f + return files + + +def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". + gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train". + from_json (bool): whether to read annotations from the raw json file or the png files. + to_polygons (bool): whether to represent the segmentation as polygons + (COCO's format) instead of masks (cityscapes's format). + + Returns: + list[dict]: a list of dicts in Detectron2 standard format. (See + `Using Custom Datasets `_ ) + """ + if from_json: + assert to_polygons, ( + "Cityscapes's json annotations are in polygon format. " + "Converting to mask format is not supported now." + ) + files = _get_cityscapes_files(image_dir, gt_dir) + + logger.info("Preprocessing cityscapes annotations ...") + # This is still not fast: all workers will execute duplicate works and will + # take up to 10m on a 8GPU server. + pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4)) + + ret = pool.map( + functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons), + files, + ) + logger.info("Loaded {} images from {}".format(len(ret), image_dir)) + + # Map cityscape ids to contiguous ids + from cityscapesscripts.helpers.labels import labels + + labels = [l for l in labels if l.hasInstances and not l.ignoreInEval] + dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)} + for dict_per_image in ret: + for anno in dict_per_image["annotations"]: + anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]] + return ret + + +def load_cityscapes_semantic(image_dir, gt_dir): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". + gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train". + + Returns: + list[dict]: a list of dict, each has "file_name" and + "sem_seg_file_name". + """ + ret = [] + # gt_dir is small and contain many small files. make sense to fetch to local first + gt_dir = PathManager.get_local_path(gt_dir) + for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir): + label_file = label_file.replace("labelIds", "labelTrainIds") + + with PathManager.open(json_file, "r") as f: + jsonobj = json.load(f) + ret.append( + { + "file_name": image_file, + "sem_seg_file_name": label_file, + "height": jsonobj["imgHeight"], + "width": jsonobj["imgWidth"], + } + ) + assert len(ret), f"No images found in {image_dir}!" + assert PathManager.isfile( + ret[0]["sem_seg_file_name"] + ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa + return ret + + +def _cityscapes_files_to_dict(files, from_json, to_polygons): + """ + Parse cityscapes annotation files to a instance segmentation dataset dict. + + Args: + files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file) + from_json (bool): whether to read annotations from the raw json file or the png files. + to_polygons (bool): whether to represent the segmentation as polygons + (COCO's format) instead of masks (cityscapes's format). + + Returns: + A dict in Detectron2 Dataset format. + """ + from cityscapesscripts.helpers.labels import id2label, name2label + + image_file, instance_id_file, _, json_file = files + + annos = [] + + if from_json: + from shapely.geometry import MultiPolygon, Polygon + + with PathManager.open(json_file, "r") as f: + jsonobj = json.load(f) + ret = { + "file_name": image_file, + "image_id": os.path.basename(image_file), + "height": jsonobj["imgHeight"], + "width": jsonobj["imgWidth"], + } + + # `polygons_union` contains the union of all valid polygons. + polygons_union = Polygon() + + # CityscapesScripts draw the polygons in sequential order + # and each polygon *overwrites* existing ones. See + # (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa + # We use reverse order, and each polygon *avoids* early ones. + # This will resolve the ploygon overlaps in the same way as CityscapesScripts. + for obj in jsonobj["objects"][::-1]: + if "deleted" in obj: # cityscapes data format specific + continue + label_name = obj["label"] + + try: + label = name2label[label_name] + except KeyError: + if label_name.endswith("group"): # crowd area + label = name2label[label_name[: -len("group")]] + else: + raise + if label.id < 0: # cityscapes data format + continue + + # Cityscapes's raw annotations uses integer coordinates + # Therefore +0.5 here + poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5 + # CityscapesScript uses PIL.ImageDraw.polygon to rasterize + # polygons for evaluation. This function operates in integer space + # and draws each pixel whose center falls into the polygon. + # Therefore it draws a polygon which is 0.5 "fatter" in expectation. + # We therefore dilate the input polygon by 0.5 as our input. + poly = Polygon(poly_coord).buffer(0.5, resolution=4) + + if not label.hasInstances or label.ignoreInEval: + # even if we won't store the polygon it still contributes to overlaps resolution + polygons_union = polygons_union.union(poly) + continue + + # Take non-overlapping part of the polygon + poly_wo_overlaps = poly.difference(polygons_union) + if poly_wo_overlaps.is_empty: + continue + polygons_union = polygons_union.union(poly) + + anno = {} + anno["iscrowd"] = label_name.endswith("group") + anno["category_id"] = label.id + + if isinstance(poly_wo_overlaps, Polygon): + poly_list = [poly_wo_overlaps] + elif isinstance(poly_wo_overlaps, MultiPolygon): + poly_list = poly_wo_overlaps.geoms + else: + raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps)) + + poly_coord = [] + for poly_el in poly_list: + # COCO API can work only with exterior boundaries now, hence we store only them. + # TODO: store both exterior and interior boundaries once other parts of the + # codebase support holes in polygons. + poly_coord.append(list(chain(*poly_el.exterior.coords))) + anno["segmentation"] = poly_coord + (xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds + + anno["bbox"] = (xmin, ymin, xmax, ymax) + anno["bbox_mode"] = BoxMode.XYXY_ABS + + annos.append(anno) + else: + # See also the official annotation parsing scripts at + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa + with PathManager.open(instance_id_file, "rb") as f: + inst_image = np.asarray(Image.open(f), order="F") + # ids < 24 are stuff labels (filtering them first is about 5% faster) + flattened_ids = np.unique(inst_image[inst_image >= 24]) + + ret = { + "file_name": image_file, + "image_id": os.path.basename(image_file), + "height": inst_image.shape[0], + "width": inst_image.shape[1], + } + + for instance_id in flattened_ids: + # For non-crowd annotations, instance_id // 1000 is the label_id + # Crowd annotations have <1000 instance ids + label_id = instance_id // 1000 if instance_id >= 1000 else instance_id + label = id2label[label_id] + if not label.hasInstances or label.ignoreInEval: + continue + + anno = {} + anno["iscrowd"] = instance_id < 1000 + anno["category_id"] = label.id + + mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F") + + inds = np.nonzero(mask) + ymin, ymax = inds[0].min(), inds[0].max() + xmin, xmax = inds[1].min(), inds[1].max() + anno["bbox"] = (xmin, ymin, xmax, ymax) + if xmax <= xmin or ymax <= ymin: + continue + anno["bbox_mode"] = BoxMode.XYXY_ABS + if to_polygons: + # This conversion comes from D4809743 and D5171122, + # when Mask-RCNN was first developed. + contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[ + -2 + ] + polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3] + # opencv's can produce invalid polygons + if len(polygons) == 0: + continue + anno["segmentation"] = polygons + else: + anno["segmentation"] = mask_util.encode(mask[:, :, None])[0] + annos.append(anno) + ret["annotations"] = annos + return ret + + +if __name__ == "__main__": + """ + Test the cityscapes dataset loader. + + Usage: + python -m detectron2.data.datasets.cityscapes \ + cityscapes/leftImg8bit/train cityscapes/gtFine/train + """ + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("image_dir") + parser.add_argument("gt_dir") + parser.add_argument("--type", choices=["instance", "semantic"], default="instance") + args = parser.parse_args() + from detectron2.data.catalog import Metadata + from detectron2.utils.visualizer import Visualizer + from cityscapesscripts.helpers.labels import labels + + logger = setup_logger(name=__name__) + + dirname = "cityscapes-data-vis" + os.makedirs(dirname, exist_ok=True) + + if args.type == "instance": + dicts = load_cityscapes_instances( + args.image_dir, args.gt_dir, from_json=True, to_polygons=True + ) + logger.info("Done loading {} samples.".format(len(dicts))) + + thing_classes = [k.name for k in labels if k.hasInstances and not k.ignoreInEval] + meta = Metadata().set(thing_classes=thing_classes) + + else: + dicts = load_cityscapes_semantic(args.image_dir, args.gt_dir) + logger.info("Done loading {} samples.".format(len(dicts))) + + stuff_classes = [k.name for k in labels if k.trainId != 255] + stuff_colors = [k.color for k in labels if k.trainId != 255] + meta = Metadata().set(stuff_classes=stuff_classes, stuff_colors=stuff_colors) + + for d in dicts: + img = np.array(Image.open(PathManager.open(d["file_name"], "rb"))) + visualizer = Visualizer(img, metadata=meta) + vis = visualizer.draw_dataset_dict(d) + # cv2.imshow("a", vis.get_image()[:, :, ::-1]) + # cv2.waitKey() + fpath = os.path.join(dirname, os.path.basename(d["file_name"])) + vis.save(fpath) diff --git a/data_processing/detectron2/detectron2/data/datasets/cityscapes_panoptic.py b/data_processing/detectron2/detectron2/data/datasets/cityscapes_panoptic.py new file mode 100644 index 0000000..48c136f --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/cityscapes_panoptic.py @@ -0,0 +1,187 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import json +import logging +import os + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES +from detectron2.utils.file_io import PathManager + +""" +This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog. +""" + + +logger = logging.getLogger(__name__) + + +def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info): + files = [] + # scan through the directory + cities = PathManager.ls(image_dir) + logger.info(f"{len(cities)} cities found in '{image_dir}'.") + image_dict = {} + for city in cities: + city_img_dir = os.path.join(image_dir, city) + for basename in PathManager.ls(city_img_dir): + image_file = os.path.join(city_img_dir, basename) + + suffix = "_leftImg8bit.png" + assert basename.endswith(suffix), basename + basename = os.path.basename(basename)[: -len(suffix)] + + image_dict[basename] = image_file + + for ann in json_info["annotations"]: + image_file = image_dict.get(ann["image_id"], None) + assert image_file is not None, "No image {} found for annotation {}".format( + ann["image_id"], ann["file_name"] + ) + label_file = os.path.join(gt_dir, ann["file_name"]) + segments_info = ann["segments_info"] + + files.append((image_file, label_file, segments_info)) + + assert len(files), "No images found in {}".format(image_dir) + assert PathManager.isfile(files[0][0]), files[0][0] + assert PathManager.isfile(files[0][1]), files[0][1] + return files + + +def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". + gt_dir (str): path to the raw annotations. e.g., + "~/cityscapes/gtFine/cityscapes_panoptic_train". + gt_json (str): path to the json file. e.g., + "~/cityscapes/gtFine/cityscapes_panoptic_train.json". + meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id" + and "stuff_dataset_id_to_contiguous_id" to map category ids to + contiguous ids for training. + + Returns: + list[dict]: a list of dicts in Detectron2 standard format. (See + `Using Custom Datasets `_ ) + """ + + def _convert_category_id(segment_info, meta): + if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: + segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + else: + segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + return segment_info + + assert os.path.exists( + gt_json + ), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa + with open(gt_json) as f: + json_info = json.load(f) + files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info) + ret = [] + for image_file, label_file, segments_info in files: + sem_label_file = ( + image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png" + ) + segments_info = [_convert_category_id(x, meta) for x in segments_info] + ret.append( + { + "file_name": image_file, + "image_id": "_".join( + os.path.splitext(os.path.basename(image_file))[0].split("_")[:3] + ), + "sem_seg_file_name": sem_label_file, + "pan_seg_file_name": label_file, + "segments_info": segments_info, + } + ) + assert len(ret), f"No images found in {image_dir}!" + assert PathManager.isfile( + ret[0]["sem_seg_file_name"] + ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa + assert PathManager.isfile( + ret[0]["pan_seg_file_name"] + ), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa + return ret + + +_RAW_CITYSCAPES_PANOPTIC_SPLITS = { + "cityscapes_fine_panoptic_train": ( + "cityscapes/leftImg8bit/train", + "cityscapes/gtFine/cityscapes_panoptic_train", + "cityscapes/gtFine/cityscapes_panoptic_train.json", + ), + "cityscapes_fine_panoptic_val": ( + "cityscapes/leftImg8bit/val", + "cityscapes/gtFine/cityscapes_panoptic_val", + "cityscapes/gtFine/cityscapes_panoptic_val.json", + ), + # "cityscapes_fine_panoptic_test": not supported yet +} + + +def register_all_cityscapes_panoptic(root): + meta = {} + # The following metadata maps contiguous id from [0, #thing categories + + # #stuff categories) to their names and colors. We have to replica of the + # same name and color under "thing_*" and "stuff_*" because the current + # visualization function in D2 handles thing and class classes differently + # due to some heuristic used in Panoptic FPN. We keep the same naming to + # enable reusing existing visualization functions. + thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES] + thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES] + stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES] + stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES] + + meta["thing_classes"] = thing_classes + meta["thing_colors"] = thing_colors + meta["stuff_classes"] = stuff_classes + meta["stuff_colors"] = stuff_colors + + # There are three types of ids in cityscapes panoptic segmentation: + # (1) category id: like semantic segmentation, it is the class id for each + # pixel. Since there are some classes not used in evaluation, the category + # id is not always contiguous and thus we have two set of category ids: + # - original category id: category id in the original dataset, mainly + # used for evaluation. + # - contiguous category id: [0, #classes), in order to train the classifier + # (2) instance id: this id is used to differentiate different instances from + # the same category. For "stuff" classes, the instance id is always 0; for + # "thing" classes, the instance id starts from 1 and 0 is reserved for + # ignored instances (e.g. crowd annotation). + # (3) panoptic id: this is the compact id that encode both category and + # instance id by: category_id * 1000 + instance_id. + thing_dataset_id_to_contiguous_id = {} + stuff_dataset_id_to_contiguous_id = {} + + for k in CITYSCAPES_CATEGORIES: + if k["isthing"] == 1: + thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"] + else: + stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"] + + meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id + meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id + + for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items(): + image_dir = os.path.join(root, image_dir) + gt_dir = os.path.join(root, gt_dir) + gt_json = os.path.join(root, gt_json) + + DatasetCatalog.register( + key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta) + ) + MetadataCatalog.get(key).set( + panoptic_root=gt_dir, + image_root=image_dir, + panoptic_json=gt_json, + gt_dir=gt_dir.replace("cityscapes_panoptic_", ""), + evaluator_type="cityscapes_panoptic_seg", + ignore_label=255, + label_divisor=1000, + **meta, + ) diff --git a/data_processing/detectron2/detectron2/data/datasets/coco.py b/data_processing/detectron2/detectron2/data/datasets/coco.py new file mode 100644 index 0000000..ed4f7cc --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/coco.py @@ -0,0 +1,539 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import contextlib +import datetime +import io +import json +import logging +import numpy as np +import os +import shutil +import pycocotools.mask as mask_util +from fvcore.common.timer import Timer +from iopath.common.file_io import file_lock +from PIL import Image + +from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes +from detectron2.utils.file_io import PathManager + +from .. import DatasetCatalog, MetadataCatalog + +""" +This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format". +""" + + +logger = logging.getLogger(__name__) + +__all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"] + + +def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): + """ + Load a json file with COCO's instances annotation format. + Currently supports instance detection, instance segmentation, + and person keypoints annotations. + + Args: + json_file (str): full path to the json file in COCO instances annotation format. + image_root (str or path-like): the directory where the images in this json file exists. + dataset_name (str or None): the name of the dataset (e.g., coco_2017_train). + When provided, this function will also do the following: + + * Put "thing_classes" into the metadata associated with this dataset. + * Map the category ids into a contiguous range (needed by standard dataset format), + and add "thing_dataset_id_to_contiguous_id" to the metadata associated + with this dataset. + + This option should usually be provided, unless users need to load + the original json content and apply more processing manually. + extra_annotation_keys (list[str]): list of per-annotation keys that should also be + loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints", + "category_id", "segmentation"). The values for these keys will be returned as-is. + For example, the densepose annotations are loaded in this way. + + Returns: + list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See + `Using Custom Datasets `_ ) when `dataset_name` is not None. + If `dataset_name` is None, the returned `category_ids` may be + incontiguous and may not conform to the Detectron2 standard format. + + Notes: + 1. This function does not read the image files. + The results do not have the "image" field. + """ + from pycocotools.coco import COCO + + timer = Timer() + json_file = PathManager.get_local_path(json_file) + with contextlib.redirect_stdout(io.StringIO()): + coco_api = COCO(json_file) + if timer.seconds() > 1: + logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) + + id_map = None + if dataset_name is not None: + meta = MetadataCatalog.get(dataset_name) + cat_ids = sorted(coco_api.getCatIds()) + cats = coco_api.loadCats(cat_ids) + # The categories in a custom json file may not be sorted. + thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] + meta.thing_classes = thing_classes + + # In COCO, certain category ids are artificially removed, + # and by convention they are always ignored. + # We deal with COCO's id issue and translate + # the category ids to contiguous ids in [0, 80). + + # It works by looking at the "categories" field in the json, therefore + # if users' own json also have incontiguous ids, we'll + # apply this mapping as well but print a warning. + if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): + if "coco" not in dataset_name: + logger.warning( + """ +Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. +""" + ) + id_map = {v: i for i, v in enumerate(cat_ids)} + meta.thing_dataset_id_to_contiguous_id = id_map + + # sort indices for reproducible results + img_ids = sorted(coco_api.imgs.keys()) + # imgs is a list of dicts, each looks something like: + # {'license': 4, + # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', + # 'file_name': 'COCO_val2014_000000001268.jpg', + # 'height': 427, + # 'width': 640, + # 'date_captured': '2013-11-17 05:57:24', + # 'id': 1268} + imgs = coco_api.loadImgs(img_ids) + # anns is a list[list[dict]], where each dict is an annotation + # record for an object. The inner list enumerates the objects in an image + # and the outer list enumerates over images. Example of anns[0]: + # [{'segmentation': [[192.81, + # 247.09, + # ... + # 219.03, + # 249.06]], + # 'area': 1035.749, + # 'iscrowd': 0, + # 'image_id': 1268, + # 'bbox': [192.81, 224.8, 74.73, 33.43], + # 'category_id': 16, + # 'id': 42986}, + # ...] + anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] + total_num_valid_anns = sum([len(x) for x in anns]) + total_num_anns = len(coco_api.anns) + if total_num_valid_anns < total_num_anns: + logger.warning( + f"{json_file} contains {total_num_anns} annotations, but only " + f"{total_num_valid_anns} of them match to images in the file." + ) + + if "minival" not in json_file: + # The popular valminusminival & minival annotations for COCO2014 contain this bug. + # However the ratio of buggy annotations there is tiny and does not affect accuracy. + # Therefore we explicitly white-list them. + ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] + assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( + json_file + ) + + imgs_anns = list(zip(imgs, anns)) + logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) + + dataset_dicts = [] + + ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or []) + + num_instances_without_valid_segmentation = 0 + + for (img_dict, anno_dict_list) in imgs_anns: + record = {} + record["file_name"] = os.path.join(image_root, img_dict["file_name"]) + record["height"] = img_dict["height"] + record["width"] = img_dict["width"] + image_id = record["image_id"] = img_dict["id"] + + objs = [] + for anno in anno_dict_list: + # Check that the image_id in this annotation is the same as + # the image_id we're looking at. + # This fails only when the data parsing logic or the annotation file is buggy. + + # The original COCO valminusminival2014 & minival2014 annotation files + # actually contains bugs that, together with certain ways of using COCO API, + # can trigger this assertion. + assert anno["image_id"] == image_id + + assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' + + obj = {key: anno[key] for key in ann_keys if key in anno} + if "bbox" in obj and len(obj["bbox"]) == 0: + raise ValueError( + f"One annotation of image {image_id} contains empty 'bbox' value! " + "This json does not have valid COCO format." + ) + + segm = anno.get("segmentation", None) + if segm: # either list[list[float]] or dict(RLE) + if isinstance(segm, dict): + if isinstance(segm["counts"], list): + # convert to compressed RLE + segm = mask_util.frPyObjects(segm, *segm["size"]) + else: + # filter out invalid polygons (< 3 points) + segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] + if len(segm) == 0: + num_instances_without_valid_segmentation += 1 + continue # ignore this instance + obj["segmentation"] = segm + + keypts = anno.get("keypoints", None) + if keypts: # list[int] + for idx, v in enumerate(keypts): + if idx % 3 != 2: + # COCO's segmentation coordinates are floating points in [0, H or W], + # but keypoint coordinates are integers in [0, H-1 or W-1] + # Therefore we assume the coordinates are "pixel indices" and + # add 0.5 to convert to floating point coordinates. + keypts[idx] = v + 0.5 + obj["keypoints"] = keypts + + obj["bbox_mode"] = BoxMode.XYWH_ABS + if id_map: + annotation_category_id = obj["category_id"] + try: + obj["category_id"] = id_map[annotation_category_id] + except KeyError as e: + raise KeyError( + f"Encountered category_id={annotation_category_id} " + "but this id does not exist in 'categories' of the json file." + ) from e + objs.append(obj) + record["annotations"] = objs + dataset_dicts.append(record) + + if num_instances_without_valid_segmentation > 0: + logger.warning( + "Filtered out {} instances without valid segmentation. ".format( + num_instances_without_valid_segmentation + ) + + "There might be issues in your dataset generation process. Please " + "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully" + ) + return dataset_dicts + + +def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"): + """ + Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are + treated as ground truth annotations and all files under "image_root" with "image_ext" extension + as input images. Ground truth and input images are matched using file paths relative to + "gt_root" and "image_root" respectively without taking into account file extensions. + This works for COCO as well as some other datasets. + + Args: + gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation + annotations are stored as images with integer values in pixels that represent + corresponding semantic labels. + image_root (str): the directory where the input images are. + gt_ext (str): file extension for ground truth annotations. + image_ext (str): file extension for input images. + + Returns: + list[dict]: + a list of dicts in detectron2 standard format without instance-level + annotation. + + Notes: + 1. This function does not read the image and ground truth files. + The results do not have the "image" and "sem_seg" fields. + """ + + # We match input images with ground truth based on their relative filepaths (without file + # extensions) starting from 'image_root' and 'gt_root' respectively. + def file2id(folder_path, file_path): + # extract relative path starting from `folder_path` + image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path)) + # remove file extension + image_id = os.path.splitext(image_id)[0] + return image_id + + input_files = sorted( + (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), + key=lambda file_path: file2id(image_root, file_path), + ) + gt_files = sorted( + (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), + key=lambda file_path: file2id(gt_root, file_path), + ) + + assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) + + # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images + if len(input_files) != len(gt_files): + logger.warn( + "Directory {} and {} has {} and {} files, respectively.".format( + image_root, gt_root, len(input_files), len(gt_files) + ) + ) + input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] + gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] + intersect = list(set(input_basenames) & set(gt_basenames)) + # sort, otherwise each worker may obtain a list[dict] in different order + intersect = sorted(intersect) + logger.warn("Will use their intersection of {} files.".format(len(intersect))) + input_files = [os.path.join(image_root, f + image_ext) for f in intersect] + gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] + + logger.info( + "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) + ) + + dataset_dicts = [] + for (img_path, gt_path) in zip(input_files, gt_files): + record = {} + record["file_name"] = img_path + record["sem_seg_file_name"] = gt_path + dataset_dicts.append(record) + + return dataset_dicts + + +def convert_to_coco_dict(dataset_name): + """ + Convert an instance detection/segmentation or keypoint detection dataset + in detectron2's standard format into COCO json format. + + Generic dataset description can be found here: + https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset + + COCO data format description can be found here: + http://cocodataset.org/#format-data + + Args: + dataset_name (str): + name of the source dataset + Must be registered in DatastCatalog and in detectron2's standard format. + Must have corresponding metadata "thing_classes" + Returns: + coco_dict: serializable dict in COCO json format + """ + + dataset_dicts = DatasetCatalog.get(dataset_name) + metadata = MetadataCatalog.get(dataset_name) + + # unmap the category mapping ids for COCO + if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): + reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()} + reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa + else: + reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa + + categories = [ + {"id": reverse_id_mapper(id), "name": name} + for id, name in enumerate(metadata.thing_classes) + ] + + logger.info("Converting dataset dicts into COCO format") + coco_images = [] + coco_annotations = [] + + for image_id, image_dict in enumerate(dataset_dicts): + coco_image = { + "id": image_dict.get("image_id", image_id), + "width": int(image_dict["width"]), + "height": int(image_dict["height"]), + "file_name": str(image_dict["file_name"]), + } + coco_images.append(coco_image) + + anns_per_image = image_dict.get("annotations", []) + for annotation in anns_per_image: + # create a new dict with only COCO fields + coco_annotation = {} + + # COCO requirement: XYWH box format for axis-align and XYWHA for rotated + bbox = annotation["bbox"] + if isinstance(bbox, np.ndarray): + if bbox.ndim != 1: + raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.") + bbox = bbox.tolist() + if len(bbox) not in [4, 5]: + raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") + from_bbox_mode = annotation["bbox_mode"] + to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS + bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode) + + # COCO requirement: instance area + if "segmentation" in annotation: + # Computing areas for instances by counting the pixels + segmentation = annotation["segmentation"] + # TODO: check segmentation type: RLE, BinaryMask or Polygon + if isinstance(segmentation, list): + polygons = PolygonMasks([segmentation]) + area = polygons.area()[0].item() + elif isinstance(segmentation, dict): # RLE + area = mask_util.area(segmentation).item() + else: + raise TypeError(f"Unknown segmentation type {type(segmentation)}!") + else: + # Computing areas using bounding boxes + if to_bbox_mode == BoxMode.XYWH_ABS: + bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS) + area = Boxes([bbox_xy]).area()[0].item() + else: + area = RotatedBoxes([bbox]).area()[0].item() + + if "keypoints" in annotation: + keypoints = annotation["keypoints"] # list[int] + for idx, v in enumerate(keypoints): + if idx % 3 != 2: + # COCO's segmentation coordinates are floating points in [0, H or W], + # but keypoint coordinates are integers in [0, H-1 or W-1] + # For COCO format consistency we substract 0.5 + # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163 + keypoints[idx] = v - 0.5 + if "num_keypoints" in annotation: + num_keypoints = annotation["num_keypoints"] + else: + num_keypoints = sum(kp > 0 for kp in keypoints[2::3]) + + # COCO requirement: + # linking annotations to images + # "id" field must start with 1 + coco_annotation["id"] = len(coco_annotations) + 1 + coco_annotation["image_id"] = coco_image["id"] + coco_annotation["bbox"] = [round(float(x), 3) for x in bbox] + coco_annotation["area"] = float(area) + coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0)) + coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"])) + + # Add optional fields + if "keypoints" in annotation: + coco_annotation["keypoints"] = keypoints + coco_annotation["num_keypoints"] = num_keypoints + + if "segmentation" in annotation: + seg = coco_annotation["segmentation"] = annotation["segmentation"] + if isinstance(seg, dict): # RLE + counts = seg["counts"] + if not isinstance(counts, str): + # make it json-serializable + seg["counts"] = counts.decode("ascii") + + coco_annotations.append(coco_annotation) + + logger.info( + "Conversion finished, " + f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}" + ) + + info = { + "date_created": str(datetime.datetime.now()), + "description": "Automatically generated COCO json file for Detectron2.", + } + coco_dict = {"info": info, "images": coco_images, "categories": categories, "licenses": None} + if len(coco_annotations) > 0: + coco_dict["annotations"] = coco_annotations + return coco_dict + + +def convert_to_coco_json(dataset_name, output_file, allow_cached=True): + """ + Converts dataset into COCO format and saves it to a json file. + dataset_name must be registered in DatasetCatalog and in detectron2's standard format. + + Args: + dataset_name: + reference from the config file to the catalogs + must be registered in DatasetCatalog and in detectron2's standard format + output_file: path of json file that will be saved to + allow_cached: if json file is already present then skip conversion + """ + + # TODO: The dataset or the conversion script *may* change, + # a checksum would be useful for validating the cached data + + PathManager.mkdirs(os.path.dirname(output_file)) + with file_lock(output_file): + if PathManager.exists(output_file) and allow_cached: + logger.warning( + f"Using previously cached COCO format annotations at '{output_file}'. " + "You need to clear the cache file if your dataset has been modified." + ) + else: + logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)") + coco_dict = convert_to_coco_dict(dataset_name) + + logger.info(f"Caching COCO format annotations at '{output_file}' ...") + tmp_file = output_file + ".tmp" + with PathManager.open(tmp_file, "w") as f: + json.dump(coco_dict, f) + shutil.move(tmp_file, output_file) + + +def register_coco_instances(name, metadata, json_file, image_root): + """ + Register a dataset in COCO's json annotation format for + instance detection, instance segmentation and keypoint detection. + (i.e., Type 1 and 2 in http://cocodataset.org/#format-data. + `instances*.json` and `person_keypoints*.json` in the dataset). + + This is an example of how to register a new dataset. + You can do something similar to this function, to register new datasets. + + Args: + name (str): the name that identifies a dataset, e.g. "coco_2014_train". + metadata (dict): extra metadata associated with this dataset. You can + leave it as an empty dict. + json_file (str): path to the json instance annotation file. + image_root (str or path-like): directory which contains all the images. + """ + assert isinstance(name, str), name + assert isinstance(json_file, (str, os.PathLike)), json_file + assert isinstance(image_root, (str, os.PathLike)), image_root + # 1. register a function which returns dicts + DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name)) + + # 2. Optionally, add metadata about this dataset, + # since they might be useful in evaluation, visualization or logging + MetadataCatalog.get(name).set( + json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata + ) + + +if __name__ == "__main__": + """ + Test the COCO json dataset loader. + + Usage: + python -m detectron2.data.datasets.coco \ + path/to/json path/to/image_root dataset_name + + "dataset_name" can be "coco_2014_minival_100", or other + pre-registered ones + """ + from detectron2.utils.logger import setup_logger + from detectron2.utils.visualizer import Visualizer + import detectron2.data.datasets # noqa # add pre-defined metadata + import sys + + logger = setup_logger(name=__name__) + assert sys.argv[3] in DatasetCatalog.list() + meta = MetadataCatalog.get(sys.argv[3]) + + dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3]) + logger.info("Done loading {} samples.".format(len(dicts))) + + dirname = "coco-data-vis" + os.makedirs(dirname, exist_ok=True) + for d in dicts: + img = np.array(Image.open(d["file_name"])) + visualizer = Visualizer(img, metadata=meta) + vis = visualizer.draw_dataset_dict(d) + fpath = os.path.join(dirname, os.path.basename(d["file_name"])) + vis.save(fpath) diff --git a/data_processing/detectron2/detectron2/data/datasets/coco_panoptic.py b/data_processing/detectron2/detectron2/data/datasets/coco_panoptic.py new file mode 100644 index 0000000..b8dae44 --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/coco_panoptic.py @@ -0,0 +1,228 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import json +import os + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.utils.file_io import PathManager + +from .coco import load_coco_json, load_sem_seg + +__all__ = ["register_coco_panoptic", "register_coco_panoptic_separated"] + + +def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/coco/train2017". + gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017". + json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json". + + Returns: + list[dict]: a list of dicts in Detectron2 standard format. (See + `Using Custom Datasets `_ ) + """ + + def _convert_category_id(segment_info, meta): + if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: + segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + segment_info["isthing"] = True + else: + segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + segment_info["isthing"] = False + return segment_info + + with PathManager.open(json_file) as f: + json_info = json.load(f) + + ret = [] + for ann in json_info["annotations"]: + image_id = int(ann["image_id"]) + # TODO: currently we assume image and label has the same filename but + # different extension, and images have extension ".jpg" for COCO. Need + # to make image extension a user-provided argument if we extend this + # function to support other COCO-like datasets. + image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg") + label_file = os.path.join(gt_dir, ann["file_name"]) + segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]] + ret.append( + { + "file_name": image_file, + "image_id": image_id, + "pan_seg_file_name": label_file, + "segments_info": segments_info, + } + ) + assert len(ret), f"No images found in {image_dir}!" + assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"] + assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"] + return ret + + +def register_coco_panoptic( + name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None +): + """ + Register a "standard" version of COCO panoptic segmentation dataset named `name`. + The dictionaries in this registered dataset follows detectron2's standard format. + Hence it's called "standard". + + Args: + name (str): the name that identifies a dataset, + e.g. "coco_2017_train_panoptic" + metadata (dict): extra metadata associated with this dataset. + image_root (str): directory which contains all the images + panoptic_root (str): directory which contains panoptic annotation images in COCO format + panoptic_json (str): path to the json panoptic annotation file in COCO format + sem_seg_root (none): not used, to be consistent with + `register_coco_panoptic_separated`. + instances_json (str): path to the json instance annotation file + """ + panoptic_name = name + DatasetCatalog.register( + panoptic_name, + lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata), + ) + MetadataCatalog.get(panoptic_name).set( + panoptic_root=panoptic_root, + image_root=image_root, + panoptic_json=panoptic_json, + json_file=instances_json, + evaluator_type="coco_panoptic_seg", + ignore_label=255, + label_divisor=1000, + **metadata, + ) + + +def register_coco_panoptic_separated( + name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json +): + """ + Register a "separated" version of COCO panoptic segmentation dataset named `name`. + The annotations in this registered dataset will contain both instance annotations and + semantic annotations, each with its own contiguous ids. Hence it's called "separated". + + It follows the setting used by the PanopticFPN paper: + + 1. The instance annotations directly come from polygons in the COCO + instances annotation task, rather than from the masks in the COCO panoptic annotations. + + The two format have small differences: + Polygons in the instance annotations may have overlaps. + The mask annotations are produced by labeling the overlapped polygons + with depth ordering. + + 2. The semantic annotations are converted from panoptic annotations, where + all "things" are assigned a semantic id of 0. + All semantic categories will therefore have ids in contiguous + range [1, #stuff_categories]. + + This function will also register a pure semantic segmentation dataset + named ``name + '_stuffonly'``. + + Args: + name (str): the name that identifies a dataset, + e.g. "coco_2017_train_panoptic" + metadata (dict): extra metadata associated with this dataset. + image_root (str): directory which contains all the images + panoptic_root (str): directory which contains panoptic annotation images + panoptic_json (str): path to the json panoptic annotation file + sem_seg_root (str): directory which contains all the ground truth segmentation annotations. + instances_json (str): path to the json instance annotation file + """ + panoptic_name = name + "_separated" + DatasetCatalog.register( + panoptic_name, + lambda: merge_to_panoptic( + load_coco_json(instances_json, image_root, panoptic_name), + load_sem_seg(sem_seg_root, image_root), + ), + ) + MetadataCatalog.get(panoptic_name).set( + panoptic_root=panoptic_root, + image_root=image_root, + panoptic_json=panoptic_json, + sem_seg_root=sem_seg_root, + json_file=instances_json, # TODO rename + evaluator_type="coco_panoptic_seg", + ignore_label=255, + **metadata, + ) + + semantic_name = name + "_stuffonly" + DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root)) + MetadataCatalog.get(semantic_name).set( + sem_seg_root=sem_seg_root, + image_root=image_root, + evaluator_type="sem_seg", + ignore_label=255, + **metadata, + ) + + +def merge_to_panoptic(detection_dicts, sem_seg_dicts): + """ + Create dataset dicts for panoptic segmentation, by + merging two dicts using "file_name" field to match their entries. + + Args: + detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation. + sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation. + + Returns: + list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in + both detection_dicts and sem_seg_dicts that correspond to the same image. + The function assumes that the same key in different dicts has the same value. + """ + results = [] + sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts} + assert len(sem_seg_file_to_entry) > 0 + + for det_dict in detection_dicts: + dic = copy.copy(det_dict) + dic.update(sem_seg_file_to_entry[dic["file_name"]]) + results.append(dic) + return results + + +if __name__ == "__main__": + """ + Test the COCO panoptic dataset loader. + + Usage: + python -m detectron2.data.datasets.coco_panoptic \ + path/to/image_root path/to/panoptic_root path/to/panoptic_json dataset_name 10 + + "dataset_name" can be "coco_2017_train_panoptic", or other + pre-registered ones + """ + from detectron2.utils.logger import setup_logger + from detectron2.utils.visualizer import Visualizer + import detectron2.data.datasets # noqa # add pre-defined metadata + import sys + from PIL import Image + import numpy as np + + logger = setup_logger(name=__name__) + assert sys.argv[4] in DatasetCatalog.list() + meta = MetadataCatalog.get(sys.argv[4]) + + dicts = load_coco_panoptic_json(sys.argv[3], sys.argv[1], sys.argv[2], meta.as_dict()) + logger.info("Done loading {} samples.".format(len(dicts))) + + dirname = "coco-data-vis" + os.makedirs(dirname, exist_ok=True) + num_imgs_to_vis = int(sys.argv[5]) + for i, d in enumerate(dicts): + img = np.array(Image.open(d["file_name"])) + visualizer = Visualizer(img, metadata=meta) + vis = visualizer.draw_dataset_dict(d) + fpath = os.path.join(dirname, os.path.basename(d["file_name"])) + vis.save(fpath) + if i + 1 >= num_imgs_to_vis: + break diff --git a/data_processing/detectron2/detectron2/data/datasets/lvis.py b/data_processing/detectron2/detectron2/data/datasets/lvis.py new file mode 100644 index 0000000..576d962 --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/lvis.py @@ -0,0 +1,241 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import os +from fvcore.common.timer import Timer + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.structures import BoxMode +from detectron2.utils.file_io import PathManager + +from .builtin_meta import _get_coco_instances_meta +from .lvis_v0_5_categories import LVIS_CATEGORIES as LVIS_V0_5_CATEGORIES +from .lvis_v1_categories import LVIS_CATEGORIES as LVIS_V1_CATEGORIES +from .lvis_v1_category_image_count import LVIS_CATEGORY_IMAGE_COUNT as LVIS_V1_CATEGORY_IMAGE_COUNT + +""" +This file contains functions to parse LVIS-format annotations into dicts in the +"Detectron2 format". +""" + +logger = logging.getLogger(__name__) + +__all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"] + + +def register_lvis_instances(name, metadata, json_file, image_root): + """ + Register a dataset in LVIS's json annotation format for instance detection and segmentation. + + Args: + name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train". + metadata (dict): extra metadata associated with this dataset. It can be an empty dict. + json_file (str): path to the json instance annotation file. + image_root (str or path-like): directory which contains all the images. + """ + DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name)) + MetadataCatalog.get(name).set( + json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata + ) + + +def load_lvis_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): + """ + Load a json file in LVIS's annotation format. + + Args: + json_file (str): full path to the LVIS json annotation file. + image_root (str): the directory where the images in this json file exists. + dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train"). + If provided, this function will put "thing_classes" into the metadata + associated with this dataset. + extra_annotation_keys (list[str]): list of per-annotation keys that should also be + loaded into the dataset dict (besides "bbox", "bbox_mode", "category_id", + "segmentation"). The values for these keys will be returned as-is. + + Returns: + list[dict]: a list of dicts in Detectron2 standard format. (See + `Using Custom Datasets `_ ) + + Notes: + 1. This function does not read the image files. + The results do not have the "image" field. + """ + from lvis import LVIS + + json_file = PathManager.get_local_path(json_file) + + timer = Timer() + lvis_api = LVIS(json_file) + if timer.seconds() > 1: + logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) + + if dataset_name is not None: + meta = get_lvis_instances_meta(dataset_name) + MetadataCatalog.get(dataset_name).set(**meta) + + # sort indices for reproducible results + img_ids = sorted(lvis_api.imgs.keys()) + # imgs is a list of dicts, each looks something like: + # {'license': 4, + # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', + # 'file_name': 'COCO_val2014_000000001268.jpg', + # 'height': 427, + # 'width': 640, + # 'date_captured': '2013-11-17 05:57:24', + # 'id': 1268} + imgs = lvis_api.load_imgs(img_ids) + # anns is a list[list[dict]], where each dict is an annotation + # record for an object. The inner list enumerates the objects in an image + # and the outer list enumerates over images. Example of anns[0]: + # [{'segmentation': [[192.81, + # 247.09, + # ... + # 219.03, + # 249.06]], + # 'area': 1035.749, + # 'image_id': 1268, + # 'bbox': [192.81, 224.8, 74.73, 33.43], + # 'category_id': 16, + # 'id': 42986}, + # ...] + anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] + + # Sanity check that each annotation has a unique id + ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] + assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format( + json_file + ) + + imgs_anns = list(zip(imgs, anns)) + + logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file)) + + if extra_annotation_keys: + logger.info( + "The following extra annotation keys will be loaded: {} ".format(extra_annotation_keys) + ) + else: + extra_annotation_keys = [] + + def get_file_name(img_root, img_dict): + # Determine the path including the split folder ("train2017", "val2017", "test2017") from + # the coco_url field. Example: + # 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg' + split_folder, file_name = img_dict["coco_url"].split("/")[-2:] + return os.path.join(img_root + split_folder, file_name) + + dataset_dicts = [] + + for (img_dict, anno_dict_list) in imgs_anns: + record = {} + record["file_name"] = get_file_name(image_root, img_dict) + record["height"] = img_dict["height"] + record["width"] = img_dict["width"] + record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", []) + record["neg_category_ids"] = img_dict.get("neg_category_ids", []) + image_id = record["image_id"] = img_dict["id"] + + objs = [] + for anno in anno_dict_list: + # Check that the image_id in this annotation is the same as + # the image_id we're looking at. + # This fails only when the data parsing logic or the annotation file is buggy. + assert anno["image_id"] == image_id + obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS} + # LVIS data loader can be used to load COCO dataset categories. In this case `meta` + # variable will have a field with COCO-specific category mapping. + if dataset_name is not None and "thing_dataset_id_to_contiguous_id" in meta: + obj["category_id"] = meta["thing_dataset_id_to_contiguous_id"][anno["category_id"]] + else: + obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed + segm = anno["segmentation"] # list[list[float]] + # filter out invalid polygons (< 3 points) + valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] + assert len(segm) == len( + valid_segm + ), "Annotation contains an invalid polygon with < 3 points" + assert len(segm) > 0 + obj["segmentation"] = segm + for extra_ann_key in extra_annotation_keys: + obj[extra_ann_key] = anno[extra_ann_key] + objs.append(obj) + record["annotations"] = objs + dataset_dicts.append(record) + + return dataset_dicts + + +def get_lvis_instances_meta(dataset_name): + """ + Load LVIS metadata. + + Args: + dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5"). + + Returns: + dict: LVIS metadata with keys: thing_classes + """ + if "cocofied" in dataset_name: + return _get_coco_instances_meta() + if "v0.5" in dataset_name: + return _get_lvis_instances_meta_v0_5() + elif "v1" in dataset_name: + return _get_lvis_instances_meta_v1() + raise ValueError("No built-in metadata for dataset {}".format(dataset_name)) + + +def _get_lvis_instances_meta_v0_5(): + assert len(LVIS_V0_5_CATEGORIES) == 1230 + cat_ids = [k["id"] for k in LVIS_V0_5_CATEGORIES] + assert min(cat_ids) == 1 and max(cat_ids) == len( + cat_ids + ), "Category ids are not in [1, #categories], as expected" + # Ensure that the category list is sorted by id + lvis_categories = sorted(LVIS_V0_5_CATEGORIES, key=lambda x: x["id"]) + thing_classes = [k["synonyms"][0] for k in lvis_categories] + meta = {"thing_classes": thing_classes} + return meta + + +def _get_lvis_instances_meta_v1(): + assert len(LVIS_V1_CATEGORIES) == 1203 + cat_ids = [k["id"] for k in LVIS_V1_CATEGORIES] + assert min(cat_ids) == 1 and max(cat_ids) == len( + cat_ids + ), "Category ids are not in [1, #categories], as expected" + # Ensure that the category list is sorted by id + lvis_categories = sorted(LVIS_V1_CATEGORIES, key=lambda x: x["id"]) + thing_classes = [k["synonyms"][0] for k in lvis_categories] + meta = {"thing_classes": thing_classes, "class_image_count": LVIS_V1_CATEGORY_IMAGE_COUNT} + return meta + + +if __name__ == "__main__": + """ + Test the LVIS json dataset loader. + + Usage: + python -m detectron2.data.datasets.lvis \ + path/to/json path/to/image_root dataset_name vis_limit + """ + import sys + import numpy as np + from detectron2.utils.logger import setup_logger + from PIL import Image + import detectron2.data.datasets # noqa # add pre-defined metadata + from detectron2.utils.visualizer import Visualizer + + logger = setup_logger(name=__name__) + meta = MetadataCatalog.get(sys.argv[3]) + + dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3]) + logger.info("Done loading {} samples.".format(len(dicts))) + + dirname = "lvis-data-vis" + os.makedirs(dirname, exist_ok=True) + for d in dicts[: int(sys.argv[4])]: + img = np.array(Image.open(d["file_name"])) + visualizer = Visualizer(img, metadata=meta) + vis = visualizer.draw_dataset_dict(d) + fpath = os.path.join(dirname, os.path.basename(d["file_name"])) + vis.save(fpath) diff --git a/data_processing/detectron2/detectron2/data/datasets/lvis_v0_5_categories.py b/data_processing/detectron2/detectron2/data/datasets/lvis_v0_5_categories.py new file mode 100644 index 0000000..d3dab61 --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/lvis_v0_5_categories.py @@ -0,0 +1,13 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Autogen with +# with open("lvis_v0.5_val.json", "r") as f: +# a = json.load(f) +# c = a["categories"] +# for x in c: +# del x["image_count"] +# del x["instance_count"] +# LVIS_CATEGORIES = repr(c) + " # noqa" + +# fmt: off +LVIS_CATEGORIES = [{'frequency': 'r', 'id': 1, 'synset': 'acorn.n.01', 'synonyms': ['acorn'], 'def': 'nut from an oak tree', 'name': 'acorn'}, {'frequency': 'c', 'id': 2, 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'id': 3, 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'id': 4, 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'c', 'id': 5, 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'id': 6, 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'r', 'id': 7, 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'id': 8, 'synset': 'almond.n.02', 'synonyms': ['almond'], 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'id': 9, 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'r', 'id': 10, 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'id': 11, 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'id': 12, 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'id': 13, 'synset': 'apple.n.01', 'synonyms': ['apple'], 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'id': 14, 'synset': 'apple_juice.n.01', 'synonyms': ['apple_juice'], 'def': 'the juice of apples', 'name': 'apple_juice'}, {'frequency': 'r', 'id': 15, 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'id': 16, 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'id': 17, 'synset': 'apron.n.01', 'synonyms': ['apron'], 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'id': 18, 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'c', 'id': 19, 'synset': 'armband.n.02', 'synonyms': ['armband'], 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'id': 20, 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'id': 21, 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'id': 22, 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'id': 23, 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'id': 24, 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'id': 25, 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'id': 26, 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'id': 27, 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'c', 'id': 28, 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'id': 29, 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'id': 30, 'synset': 'awning.n.01', 'synonyms': ['awning'], 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'id': 31, 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'f', 'id': 32, 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'id': 33, 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'id': 34, 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'id': 35, 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'id': 36, 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'id': 37, 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'id': 38, 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'id': 39, 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'id': 40, 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'id': 41, 'synset': 'ball.n.06', 'synonyms': ['ball'], 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'id': 42, 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'id': 43, 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'id': 44, 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'id': 45, 'synset': 'banana.n.02', 'synonyms': ['banana'], 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'r', 'id': 46, 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'id': 47, 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'c', 'id': 48, 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'id': 49, 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'id': 50, 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'id': 51, 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'id': 52, 'synset': 'barge.n.01', 'synonyms': ['barge'], 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'id': 53, 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'id': 54, 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'id': 55, 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'id': 56, 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'id': 57, 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'id': 58, 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'id': 59, 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'id': 60, 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'id': 61, 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'id': 62, 'synset': 'basket.n.03', 'synonyms': ['basketball_hoop'], 'def': 'metal hoop supporting a net through which players try to throw the basketball', 'name': 'basketball_hoop'}, {'frequency': 'c', 'id': 63, 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'id': 64, 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'r', 'id': 65, 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'id': 66, 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'id': 67, 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'id': 68, 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'id': 69, 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'id': 70, 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'id': 71, 'synset': 'battery.n.02', 'synonyms': ['battery'], 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'id': 72, 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'id': 73, 'synset': 'bead.n.01', 'synonyms': ['bead'], 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'r', 'id': 74, 'synset': 'beaker.n.01', 'synonyms': ['beaker'], 'def': 'a flatbottomed jar made of glass or plastic; used for chemistry', 'name': 'beaker'}, {'frequency': 'c', 'id': 75, 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'id': 76, 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'id': 77, 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'id': 78, 'synset': 'bear.n.01', 'synonyms': ['bear'], 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'id': 79, 'synset': 'bed.n.01', 'synonyms': ['bed'], 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'c', 'id': 80, 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'id': 81, 'synset': 'beef.n.01', 'synonyms': ['cow'], 'def': 'cattle that are reared for their meat', 'name': 'cow'}, {'frequency': 'c', 'id': 82, 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'id': 83, 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'id': 84, 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'id': 85, 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'id': 86, 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'id': 87, 'synset': 'bell.n.01', 'synonyms': ['bell'], 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'id': 88, 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'id': 89, 'synset': 'belt.n.02', 'synonyms': ['belt'], 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'id': 90, 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'id': 91, 'synset': 'bench.n.01', 'synonyms': ['bench'], 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'id': 92, 'synset': 'beret.n.01', 'synonyms': ['beret'], 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'id': 93, 'synset': 'bib.n.02', 'synonyms': ['bib'], 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'id': 94, 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'id': 95, 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'id': 96, 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'c', 'id': 97, 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'id': 98, 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'id': 99, 'synset': 'bird.n.01', 'synonyms': ['bird'], 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'r', 'id': 100, 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'r', 'id': 101, 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'id': 102, 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'id': 103, 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'id': 104, 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'id': 105, 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'id': 106, 'synset': 'biscuit.n.01', 'synonyms': ['biscuit_(bread)'], 'def': 'small round bread leavened with baking-powder or soda', 'name': 'biscuit_(bread)'}, {'frequency': 'r', 'id': 107, 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'id': 108, 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'id': 109, 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'id': 110, 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'id': 111, 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'id': 112, 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'id': 113, 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'c', 'id': 114, 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'c', 'id': 115, 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'id': 116, 'synset': 'boar.n.02', 'synonyms': ['boar'], 'def': 'an uncastrated male hog', 'name': 'boar'}, {'frequency': 'r', 'id': 117, 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'id': 118, 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'c', 'id': 119, 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'r', 'id': 120, 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'id': 121, 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'id': 122, 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'id': 123, 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'id': 124, 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'id': 125, 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'id': 126, 'synset': 'book.n.01', 'synonyms': ['book'], 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'r', 'id': 127, 'synset': 'book_bag.n.01', 'synonyms': ['book_bag'], 'def': 'a bag in which students carry their books', 'name': 'book_bag'}, {'frequency': 'c', 'id': 128, 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'id': 129, 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'id': 130, 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'id': 131, 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'id': 132, 'synset': 'boot.n.01', 'synonyms': ['boot'], 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'id': 133, 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'id': 134, 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'id': 135, 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'id': 136, 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'id': 137, 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'id': 138, 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'id': 139, 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'id': 140, 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'id': 141, 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'id': 142, 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'r', 'id': 143, 'synset': 'bowling_pin.n.01', 'synonyms': ['bowling_pin'], 'def': 'a club-shaped wooden object used in bowling', 'name': 'bowling_pin'}, {'frequency': 'r', 'id': 144, 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'id': 145, 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'id': 146, 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'id': 147, 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'id': 148, 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'id': 149, 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'r', 'id': 150, 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'c', 'id': 151, 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'id': 152, 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'c', 'id': 153, 'synset': 'bristle_brush.n.01', 'synonyms': ['bristle_brush'], 'def': 'a brush that is made with the short stiff hairs of an animal or plant', 'name': 'bristle_brush'}, {'frequency': 'f', 'id': 154, 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'id': 155, 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'id': 156, 'synset': 'broom.n.01', 'synonyms': ['broom'], 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'id': 157, 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'id': 158, 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'id': 159, 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'id': 160, 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'id': 161, 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'id': 162, 'synset': 'bull.n.11', 'synonyms': ['bull'], 'def': 'mature male cow', 'name': 'bull'}, {'frequency': 'r', 'id': 163, 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'id': 164, 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'id': 165, 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'id': 166, 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'id': 167, 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'id': 168, 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'r', 'id': 169, 'synset': 'bully_beef.n.01', 'synonyms': ['corned_beef', 'corn_beef'], 'def': 'beef cured or pickled in brine', 'name': 'corned_beef'}, {'frequency': 'f', 'id': 170, 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'id': 171, 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'id': 172, 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'id': 173, 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'id': 174, 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'id': 175, 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'c', 'id': 176, 'synset': 'butcher_knife.n.01', 'synonyms': ['butcher_knife'], 'def': 'a large sharp knife for cutting or trimming meat', 'name': 'butcher_knife'}, {'frequency': 'c', 'id': 177, 'synset': 'butter.n.01', 'synonyms': ['butter'], 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'id': 178, 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'id': 179, 'synset': 'button.n.01', 'synonyms': ['button'], 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'id': 180, 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'id': 181, 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'r', 'id': 182, 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'id': 183, 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'id': 184, 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'id': 185, 'synset': 'cake.n.03', 'synonyms': ['cake'], 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'id': 186, 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'id': 187, 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'id': 188, 'synset': 'calf.n.01', 'synonyms': ['calf'], 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'id': 189, 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'id': 190, 'synset': 'camel.n.01', 'synonyms': ['camel'], 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'id': 191, 'synset': 'camera.n.01', 'synonyms': ['camera'], 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'id': 192, 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'id': 193, 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'id': 194, 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'id': 195, 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'r', 'id': 196, 'synset': 'candelabrum.n.01', 'synonyms': ['candelabrum', 'candelabra'], 'def': 'branched candlestick; ornamental; has several lights', 'name': 'candelabrum'}, {'frequency': 'f', 'id': 197, 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'id': 198, 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'id': 199, 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'id': 200, 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'id': 201, 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'id': 202, 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'r', 'id': 203, 'synset': 'cannon.n.02', 'synonyms': ['cannon'], 'def': 'heavy gun fired from a tank', 'name': 'cannon'}, {'frequency': 'c', 'id': 204, 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'r', 'id': 205, 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'id': 206, 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'c', 'id': 207, 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'id': 208, 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'r', 'id': 209, 'synset': 'cape.n.02', 'synonyms': ['cape'], 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'id': 210, 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'id': 211, 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'id': 212, 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'def': 'a wheeled vehicle adapted to the rails of railroad', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'id': 213, 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'id': 214, 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'id': 215, 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'id': 216, 'synset': 'card.n.03', 'synonyms': ['card'], 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'r', 'id': 217, 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'id': 218, 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'id': 219, 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'id': 220, 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'id': 221, 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'c', 'id': 222, 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'id': 223, 'synset': 'cart.n.01', 'synonyms': ['cart'], 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'id': 224, 'synset': 'carton.n.02', 'synonyms': ['carton'], 'def': 'a box made of cardboard; opens by flaps on top', 'name': 'carton'}, {'frequency': 'c', 'id': 225, 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'id': 226, 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'id': 227, 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'id': 228, 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'id': 229, 'synset': 'cat.n.01', 'synonyms': ['cat'], 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'c', 'id': 230, 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'r', 'id': 231, 'synset': 'caviar.n.01', 'synonyms': ['caviar', 'caviare'], 'def': "salted roe of sturgeon or other large fish; usually served as an hors d'oeuvre", 'name': 'caviar'}, {'frequency': 'c', 'id': 232, 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'id': 233, 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'c', 'id': 234, 'synset': 'celery.n.01', 'synonyms': ['celery'], 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'id': 235, 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'id': 236, 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'id': 237, 'synset': 'chair.n.01', 'synonyms': ['chair'], 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'id': 238, 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'id': 239, 'synset': 'champagne.n.01', 'synonyms': ['champagne'], 'def': 'a white sparkling wine produced in Champagne or resembling that produced there', 'name': 'champagne'}, {'frequency': 'f', 'id': 240, 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'id': 241, 'synset': 'chap.n.04', 'synonyms': ['chap'], 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'id': 242, 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'id': 243, 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'id': 244, 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'id': 245, 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'r', 'id': 246, 'synset': 'chest_of_drawers.n.01', 'synonyms': ['chest_of_drawers_(furniture)', 'bureau_(furniture)', 'chest_(furniture)'], 'def': 'furniture with drawers for keeping clothes', 'name': 'chest_of_drawers_(furniture)'}, {'frequency': 'c', 'id': 247, 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'id': 248, 'synset': 'chicken_wire.n.01', 'synonyms': ['chicken_wire'], 'def': 'a galvanized wire network with a hexagonal mesh; used to build fences', 'name': 'chicken_wire'}, {'frequency': 'r', 'id': 249, 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'r', 'id': 250, 'synset': 'chihuahua.n.03', 'synonyms': ['Chihuahua'], 'def': 'an old breed of tiny short-haired dog with protruding eyes from Mexico', 'name': 'Chihuahua'}, {'frequency': 'r', 'id': 251, 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'id': 252, 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'id': 253, 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'id': 254, 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'id': 255, 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'id': 256, 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'id': 257, 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'id': 258, 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'id': 259, 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'id': 260, 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'def': 'necklace that fits tightly around the neck', 'name': 'choker'}, {'frequency': 'f', 'id': 261, 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'c', 'id': 262, 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'id': 263, 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'id': 264, 'synset': 'chute.n.02', 'synonyms': ['slide'], 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'id': 265, 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'id': 266, 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'c', 'id': 267, 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'id': 268, 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'id': 269, 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'id': 270, 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'r', 'id': 271, 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'id': 272, 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'id': 273, 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'id': 274, 'synset': 'clip.n.03', 'synonyms': ['clip'], 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'id': 275, 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'f', 'id': 276, 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'id': 277, 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'id': 278, 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'id': 279, 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'id': 280, 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'id': 281, 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'id': 282, 'synset': 'coat.n.01', 'synonyms': ['coat'], 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'id': 283, 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'r', 'id': 284, 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'id': 285, 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'c', 'id': 286, 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'r', 'id': 287, 'synset': 'coffee_filter.n.01', 'synonyms': ['coffee_filter'], 'def': 'filter (usually of paper) that passes the coffee and retains the coffee grounds', 'name': 'coffee_filter'}, {'frequency': 'f', 'id': 288, 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'id': 289, 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'id': 290, 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'id': 291, 'synset': 'coil.n.05', 'synonyms': ['coil'], 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'id': 292, 'synset': 'coin.n.01', 'synonyms': ['coin'], 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'r', 'id': 293, 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'id': 294, 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'id': 295, 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'id': 296, 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'id': 297, 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'id': 298, 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'f', 'id': 299, 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'r', 'id': 300, 'synset': 'concrete_mixer.n.01', 'synonyms': ['concrete_mixer', 'cement_mixer'], 'def': 'a machine with a large revolving drum in which cement/concrete is mixed', 'name': 'concrete_mixer'}, {'frequency': 'f', 'id': 301, 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'id': 302, 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'id': 303, 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'id': 304, 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'c', 'id': 305, 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'id': 306, 'synset': 'cookie_jar.n.01', 'synonyms': ['cookie_jar', 'cooky_jar'], 'def': 'a jar in which cookies are kept (and sometimes money is hidden)', 'name': 'cookie_jar'}, {'frequency': 'r', 'id': 307, 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'id': 308, 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'c', 'id': 309, 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'id': 310, 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'r', 'id': 311, 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'c', 'id': 312, 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'def': 'ears of corn that can be prepared and served for human food', 'name': 'edible_corn'}, {'frequency': 'r', 'id': 313, 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'id': 314, 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'id': 315, 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'id': 316, 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'r', 'id': 317, 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'r', 'id': 318, 'synset': 'cos.n.02', 'synonyms': ['romaine_lettuce'], 'def': 'lettuce with long dark-green leaves in a loosely packed elongated head', 'name': 'romaine_lettuce'}, {'frequency': 'c', 'id': 319, 'synset': 'costume.n.04', 'synonyms': ['costume'], 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'id': 320, 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'id': 321, 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'r', 'id': 322, 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'id': 323, 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'r', 'id': 324, 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'c', 'id': 325, 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'id': 326, 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'id': 327, 'synset': 'crate.n.01', 'synonyms': ['crate'], 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'r', 'id': 328, 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'id': 329, 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'r', 'id': 330, 'synset': 'credit_card.n.01', 'synonyms': ['credit_card', 'charge_card', 'debit_card'], 'def': 'a card, usually plastic, used to pay for goods and services', 'name': 'credit_card'}, {'frequency': 'c', 'id': 331, 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'id': 332, 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'id': 333, 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'def': 'an earthen jar (made of baked clay)', 'name': 'crock_pot'}, {'frequency': 'f', 'id': 334, 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'id': 335, 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'r', 'id': 336, 'synset': 'crow.n.01', 'synonyms': ['crow'], 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'c', 'id': 337, 'synset': 'crown.n.04', 'synonyms': ['crown'], 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'id': 338, 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'id': 339, 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'id': 340, 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'c', 'id': 341, 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'r', 'id': 342, 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'id': 343, 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'r', 'id': 344, 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'id': 345, 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'id': 346, 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'id': 347, 'synset': 'cup.n.01', 'synonyms': ['cup'], 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'id': 348, 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'def': 'a metal vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'c', 'id': 349, 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'id': 350, 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'id': 351, 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'id': 352, 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'id': 353, 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'id': 354, 'synset': 'custard.n.01', 'synonyms': ['custard'], 'def': 'sweetened mixture of milk and eggs baked or boiled or frozen', 'name': 'custard'}, {'frequency': 'c', 'id': 355, 'synset': 'cutter.n.06', 'synonyms': ['cutting_tool'], 'def': 'a cutting implement; a tool for cutting', 'name': 'cutting_tool'}, {'frequency': 'r', 'id': 356, 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'id': 357, 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'id': 358, 'synset': 'dachshund.n.01', 'synonyms': ['dachshund', 'dachsie', 'badger_dog'], 'def': 'small long-bodied short-legged breed of dog having a short sleek coat and long drooping ears', 'name': 'dachshund'}, {'frequency': 'r', 'id': 359, 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'id': 360, 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'id': 361, 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'id': 362, 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'id': 363, 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'id': 364, 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'id': 365, 'synset': 'desk.n.01', 'synonyms': ['desk'], 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'id': 366, 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'id': 367, 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'id': 368, 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'def': 'a daily written record of (usually personal) experiences and observations', 'name': 'diary'}, {'frequency': 'r', 'id': 369, 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'id': 370, 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'id': 371, 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'id': 372, 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'c', 'id': 373, 'synset': 'dish.n.01', 'synonyms': ['dish'], 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'id': 374, 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'id': 375, 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'def': 'a cloth for washing dishes', 'name': 'dishrag'}, {'frequency': 'c', 'id': 376, 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'id': 377, 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'id': 378, 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid'], 'def': 'a low-sudsing detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'r', 'id': 379, 'synset': 'diskette.n.01', 'synonyms': ['diskette', 'floppy', 'floppy_disk'], 'def': 'a small plastic magnetic disk enclosed in a stiff envelope used to store data', 'name': 'diskette'}, {'frequency': 'c', 'id': 380, 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'c', 'id': 381, 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'id': 382, 'synset': 'dog.n.01', 'synonyms': ['dog'], 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'id': 383, 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'c', 'id': 384, 'synset': 'doll.n.01', 'synonyms': ['doll'], 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'id': 385, 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'id': 386, 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'id': 387, 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'r', 'id': 388, 'synset': 'domino.n.03', 'synonyms': ['eye_mask'], 'def': 'a mask covering the upper part of the face but with holes for the eyes', 'name': 'eye_mask'}, {'frequency': 'r', 'id': 389, 'synset': 'doorbell.n.01', 'synonyms': ['doorbell', 'buzzer'], 'def': 'a button at an outer door that gives a ringing or buzzing signal when pushed', 'name': 'doorbell'}, {'frequency': 'f', 'id': 390, 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'id': 391, 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'id': 392, 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'id': 393, 'synset': 'dove.n.01', 'synonyms': ['dove'], 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'id': 394, 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'id': 395, 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'id': 396, 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'id': 397, 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'id': 398, 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'c', 'id': 399, 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'c', 'id': 400, 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'id': 401, 'synset': 'drill.n.01', 'synonyms': ['drill'], 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'id': 402, 'synset': 'drinking_fountain.n.01', 'synonyms': ['drinking_fountain'], 'def': 'a public fountain to provide a jet of drinking water', 'name': 'drinking_fountain'}, {'frequency': 'r', 'id': 403, 'synset': 'drone.n.04', 'synonyms': ['drone'], 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'id': 404, 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'id': 405, 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'id': 406, 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'id': 407, 'synset': 'duck.n.01', 'synonyms': ['duck'], 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'r', 'id': 408, 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'id': 409, 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'id': 410, 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'def': 'a large cylindrical bag of heavy cloth', 'name': 'duffel_bag'}, {'frequency': 'r', 'id': 411, 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'id': 412, 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'id': 413, 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'r', 'id': 414, 'synset': 'dutch_oven.n.02', 'synonyms': ['Dutch_oven'], 'def': 'iron or earthenware cooking pot; used for stews', 'name': 'Dutch_oven'}, {'frequency': 'c', 'id': 415, 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'id': 416, 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'id': 417, 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'id': 418, 'synset': 'earring.n.01', 'synonyms': ['earring'], 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'id': 419, 'synset': 'easel.n.01', 'synonyms': ['easel'], 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'id': 420, 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'id': 421, 'synset': 'eel.n.01', 'synonyms': ['eel'], 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'id': 422, 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'id': 423, 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'id': 424, 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'id': 425, 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'id': 426, 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'id': 427, 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'id': 428, 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'id': 429, 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'r', 'id': 430, 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'id': 431, 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'id': 432, 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'id': 433, 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'id': 434, 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'id': 435, 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'id': 436, 'synset': 'fan.n.01', 'synonyms': ['fan'], 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'id': 437, 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'id': 438, 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'id': 439, 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'id': 440, 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'r', 'id': 441, 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'id': 442, 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'id': 443, 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'id': 444, 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'id': 445, 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'id': 446, 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'id': 447, 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'c', 'id': 448, 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'c', 'id': 449, 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'id': 450, 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'id': 451, 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'id': 452, 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'c', 'id': 453, 'synset': 'fish.n.01', 'synonyms': ['fish'], 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'r', 'id': 454, 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'id': 455, 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'r', 'id': 456, 'synset': 'fishing_boat.n.01', 'synonyms': ['fishing_boat', 'fishing_vessel'], 'def': 'a vessel for fishing', 'name': 'fishing_boat'}, {'frequency': 'c', 'id': 457, 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'id': 458, 'synset': 'flag.n.01', 'synonyms': ['flag'], 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'id': 459, 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'id': 460, 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'id': 461, 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'r', 'id': 462, 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'id': 463, 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'id': 464, 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'id': 465, 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'id': 466, 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'id': 467, 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'id': 468, 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'r', 'id': 469, 'synset': 'foal.n.01', 'synonyms': ['foal'], 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'id': 470, 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'id': 471, 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'id': 472, 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'id': 473, 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'id': 474, 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'id': 475, 'synset': 'fork.n.01', 'synonyms': ['fork'], 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'r', 'id': 476, 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'r', 'id': 477, 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'r', 'id': 478, 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'id': 479, 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'def': 'anything that freshens', 'name': 'freshener'}, {'frequency': 'f', 'id': 480, 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'id': 481, 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'id': 482, 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'r', 'id': 483, 'synset': 'fruit_salad.n.01', 'synonyms': ['fruit_salad'], 'def': 'salad composed of fruits', 'name': 'fruit_salad'}, {'frequency': 'c', 'id': 484, 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'id': 485, 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'id': 486, 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'c', 'id': 487, 'synset': 'futon.n.01', 'synonyms': ['futon'], 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'id': 488, 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'id': 489, 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'id': 490, 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'id': 491, 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'id': 492, 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'id': 493, 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'id': 494, 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'id': 495, 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'r', 'id': 496, 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'id': 497, 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'id': 498, 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'c', 'id': 499, 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'id': 500, 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'id': 501, 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'id': 502, 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'id': 503, 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'id': 504, 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'id': 505, 'synset': 'globe.n.03', 'synonyms': ['globe'], 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'id': 506, 'synset': 'glove.n.02', 'synonyms': ['glove'], 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'id': 507, 'synset': 'goat.n.01', 'synonyms': ['goat'], 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'id': 508, 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'id': 509, 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'r', 'id': 510, 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'id': 511, 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'id': 512, 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'id': 513, 'synset': 'goose.n.01', 'synonyms': ['goose'], 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'id': 514, 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'id': 515, 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'r', 'id': 516, 'synset': 'gown.n.04', 'synonyms': ['surgical_gown', 'scrubs_(surgical_clothing)'], 'def': 'protective garment worn by surgeons during operations', 'name': 'surgical_gown'}, {'frequency': 'f', 'id': 517, 'synset': 'grape.n.01', 'synonyms': ['grape'], 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'r', 'id': 518, 'synset': 'grasshopper.n.01', 'synonyms': ['grasshopper'], 'def': 'plant-eating insect with hind legs adapted for leaping', 'name': 'grasshopper'}, {'frequency': 'c', 'id': 519, 'synset': 'grater.n.01', 'synonyms': ['grater'], 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'id': 520, 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'id': 521, 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'c', 'id': 522, 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'c', 'id': 523, 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'id': 524, 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'r', 'id': 525, 'synset': 'grillroom.n.01', 'synonyms': ['grillroom', 'grill_(restaurant)'], 'def': 'a restaurant where food is cooked on a grill', 'name': 'grillroom'}, {'frequency': 'r', 'id': 526, 'synset': 'grinder.n.04', 'synonyms': ['grinder_(tool)'], 'def': 'a machine tool that polishes metal', 'name': 'grinder_(tool)'}, {'frequency': 'r', 'id': 527, 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'id': 528, 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'id': 529, 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'r', 'id': 530, 'synset': 'guacamole.n.01', 'synonyms': ['guacamole'], 'def': 'a dip made of mashed avocado mixed with chopped onions and other seasonings', 'name': 'guacamole'}, {'frequency': 'f', 'id': 531, 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'id': 532, 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'id': 533, 'synset': 'gun.n.01', 'synonyms': ['gun'], 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'r', 'id': 534, 'synset': 'hair_spray.n.01', 'synonyms': ['hair_spray'], 'def': 'substance sprayed on the hair to hold it in place', 'name': 'hair_spray'}, {'frequency': 'c', 'id': 535, 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'id': 536, 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'id': 537, 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'f', 'id': 538, 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'id': 539, 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'id': 540, 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'r', 'id': 541, 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'id': 542, 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'r', 'id': 543, 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'c', 'id': 544, 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'id': 545, 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'id': 546, 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'id': 547, 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'id': 548, 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'id': 549, 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'id': 550, 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'id': 551, 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'id': 552, 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'id': 553, 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'id': 554, 'synset': 'hat.n.01', 'synonyms': ['hat'], 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'id': 555, 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'r', 'id': 556, 'synset': 'hatch.n.03', 'synonyms': ['hatch'], 'def': 'a movable barrier covering a hatchway', 'name': 'hatch'}, {'frequency': 'c', 'id': 557, 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'def': 'a garment that covers the head and face', 'name': 'veil'}, {'frequency': 'f', 'id': 558, 'synset': 'headband.n.01', 'synonyms': ['headband'], 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'id': 559, 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'id': 560, 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'id': 561, 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'id': 562, 'synset': 'headset.n.01', 'synonyms': ['headset'], 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'id': 563, 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'r', 'id': 564, 'synset': 'hearing_aid.n.02', 'synonyms': ['hearing_aid'], 'def': 'an acoustic device used to direct sound to the ear of a hearing-impaired person', 'name': 'hearing_aid'}, {'frequency': 'c', 'id': 565, 'synset': 'heart.n.02', 'synonyms': ['heart'], 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'id': 566, 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'id': 567, 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'id': 568, 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'id': 569, 'synset': 'heron.n.02', 'synonyms': ['heron'], 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'id': 570, 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'id': 571, 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'id': 572, 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'id': 573, 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'id': 574, 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'id': 575, 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'id': 576, 'synset': 'honey.n.01', 'synonyms': ['honey'], 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'id': 577, 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'id': 578, 'synset': 'hook.n.05', 'synonyms': ['hook'], 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'f', 'id': 579, 'synset': 'horse.n.01', 'synonyms': ['horse'], 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'id': 580, 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'id': 581, 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'id': 582, 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'id': 583, 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'id': 584, 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'id': 585, 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'r', 'id': 586, 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'id': 587, 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'c', 'id': 588, 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'id': 589, 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'id': 590, 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'id': 591, 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'id': 592, 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'id': 593, 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'r', 'id': 594, 'synset': 'ice_tea.n.01', 'synonyms': ['ice_tea', 'iced_tea'], 'def': 'strong tea served over ice', 'name': 'ice_tea'}, {'frequency': 'c', 'id': 595, 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'id': 596, 'synset': 'incense.n.01', 'synonyms': ['incense'], 'def': 'a substance that produces a fragrant odor when burned', 'name': 'incense'}, {'frequency': 'r', 'id': 597, 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'c', 'id': 598, 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'id': 599, 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'r', 'id': 600, 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'id': 601, 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'r', 'id': 602, 'synset': 'jam.n.01', 'synonyms': ['jam'], 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'id': 603, 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'id': 604, 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'id': 605, 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'id': 606, 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'id': 607, 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'c', 'id': 608, 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'id': 609, 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'r', 'id': 610, 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'id': 611, 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'id': 612, 'synset': 'keg.n.02', 'synonyms': ['keg'], 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'id': 613, 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'id': 614, 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'id': 615, 'synset': 'key.n.01', 'synonyms': ['key'], 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'id': 616, 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'r', 'id': 617, 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'id': 618, 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'id': 619, 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'c', 'id': 620, 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'id': 621, 'synset': 'kite.n.03', 'synonyms': ['kite'], 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'id': 622, 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'id': 623, 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'id': 624, 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'id': 625, 'synset': 'knife.n.01', 'synonyms': ['knife'], 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'id': 626, 'synset': 'knight.n.02', 'synonyms': ['knight_(chess_piece)', 'horse_(chess_piece)'], 'def': 'a chess game piece shaped to resemble the head of a horse', 'name': 'knight_(chess_piece)'}, {'frequency': 'r', 'id': 627, 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'id': 628, 'synset': 'knob.n.02', 'synonyms': ['knob'], 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'id': 629, 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'id': 630, 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'id': 631, 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'id': 632, 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'id': 633, 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'r', 'id': 634, 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'c', 'id': 635, 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'id': 636, 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'id': 637, 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'id': 638, 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'id': 639, 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'id': 640, 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'id': 641, 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'id': 642, 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'id': 643, 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'c', 'id': 644, 'synset': 'latch.n.02', 'synonyms': ['latch'], 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'id': 645, 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'id': 646, 'synset': 'leather.n.01', 'synonyms': ['leather'], 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'id': 647, 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'id': 648, 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'f', 'id': 649, 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'id': 650, 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'id': 651, 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'id': 652, 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'id': 653, 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'id': 654, 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'id': 655, 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'def': 'glass bulb or tube shaped electric device that emits light (DO NOT MARK LAMPS AS A WHOLE)', 'name': 'lightbulb'}, {'frequency': 'r', 'id': 656, 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'c', 'id': 657, 'synset': 'lime.n.06', 'synonyms': ['lime'], 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'id': 658, 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'r', 'id': 659, 'synset': 'linen.n.02', 'synonyms': ['linen_paper'], 'def': 'a high-quality paper made of linen fibers or with a linen finish', 'name': 'linen_paper'}, {'frequency': 'c', 'id': 660, 'synset': 'lion.n.01', 'synonyms': ['lion'], 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'id': 661, 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'c', 'id': 662, 'synset': 'lipstick.n.01', 'synonyms': ['lipstick', 'lip_rouge'], 'def': 'makeup that is used to color the lips', 'name': 'lipstick'}, {'frequency': 'r', 'id': 663, 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'def': 'an alcoholic beverage that is distilled rather than fermented', 'name': 'liquor'}, {'frequency': 'r', 'id': 664, 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'r', 'id': 665, 'synset': 'loafer.n.02', 'synonyms': ['Loafer_(type_of_shoe)'], 'def': 'a low leather step-in shoe', 'name': 'Loafer_(type_of_shoe)'}, {'frequency': 'f', 'id': 666, 'synset': 'log.n.01', 'synonyms': ['log'], 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'id': 667, 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'c', 'id': 668, 'synset': 'lotion.n.01', 'synonyms': ['lotion'], 'def': 'any of various cosmetic preparations that are applied to the skin', 'name': 'lotion'}, {'frequency': 'f', 'id': 669, 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'id': 670, 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'id': 671, 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'id': 672, 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'id': 673, 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'r', 'id': 674, 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'c', 'id': 675, 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'id': 676, 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'id': 677, 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'c', 'id': 678, 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'id': 679, 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'id': 680, 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'c', 'id': 681, 'synset': 'map.n.01', 'synonyms': ['map'], 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'c', 'id': 682, 'synset': 'marker.n.03', 'synonyms': ['marker'], 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'id': 683, 'synset': 'martini.n.01', 'synonyms': ['martini'], 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'id': 684, 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'id': 685, 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'id': 686, 'synset': 'masher.n.02', 'synonyms': ['masher'], 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'id': 687, 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'id': 688, 'synset': 'mast.n.01', 'synonyms': ['mast'], 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'id': 689, 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'id': 690, 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'id': 691, 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'id': 692, 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'id': 693, 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'id': 694, 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'id': 695, 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'r', 'id': 696, 'synset': 'melon.n.01', 'synonyms': ['melon'], 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'id': 697, 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'id': 698, 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'id': 699, 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'id': 700, 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'c', 'id': 701, 'synset': 'milk.n.01', 'synonyms': ['milk'], 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'f', 'id': 702, 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'id': 703, 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'id': 704, 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'id': 705, 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'id': 706, 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'id': 707, 'synset': 'money.n.03', 'synonyms': ['money'], 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'id': 708, 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'id': 709, 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'id': 710, 'synset': 'motor.n.01', 'synonyms': ['motor'], 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'id': 711, 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'id': 712, 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'r', 'id': 713, 'synset': 'motorboat.n.01', 'synonyms': ['motorboat', 'powerboat'], 'def': 'a boat propelled by an internal-combustion engine', 'name': 'motorboat'}, {'frequency': 'f', 'id': 714, 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'id': 715, 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'r', 'id': 716, 'synset': 'mouse.n.01', 'synonyms': ['mouse_(animal_rodent)'], 'def': 'a small rodent with pointed snouts and small ears on elongated bodies with slender usually hairless tails', 'name': 'mouse_(animal_rodent)'}, {'frequency': 'f', 'id': 717, 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'def': 'a computer input device that controls an on-screen pointer', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'id': 718, 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'id': 719, 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'id': 720, 'synset': 'mug.n.04', 'synonyms': ['mug'], 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'id': 721, 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'id': 722, 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'r', 'id': 723, 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'id': 724, 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'r', 'id': 725, 'synset': 'nameplate.n.01', 'synonyms': ['nameplate'], 'def': 'a plate bearing a name', 'name': 'nameplate'}, {'frequency': 'f', 'id': 726, 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'id': 727, 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'id': 728, 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'id': 729, 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'r', 'id': 730, 'synset': 'needle.n.03', 'synonyms': ['needle'], 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'id': 731, 'synset': 'nest.n.01', 'synonyms': ['nest'], 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'r', 'id': 732, 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'id': 733, 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'id': 734, 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'r', 'id': 735, 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'id': 736, 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'id': 737, 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'c', 'id': 738, 'synset': 'nut.n.03', 'synonyms': ['nut'], 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'id': 739, 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'c', 'id': 740, 'synset': 'oar.n.01', 'synonyms': ['oar'], 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'id': 741, 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'id': 742, 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'id': 743, 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'id': 744, 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'id': 745, 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'id': 746, 'synset': 'onion.n.01', 'synonyms': ['onion'], 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'id': 747, 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'id': 748, 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'r', 'id': 749, 'synset': 'oregano.n.01', 'synonyms': ['oregano', 'marjoram'], 'def': 'aromatic Eurasian perennial herb used in cooking and baking', 'name': 'oregano'}, {'frequency': 'c', 'id': 750, 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'c', 'id': 751, 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'def': 'thick cushion used as a seat', 'name': 'ottoman'}, {'frequency': 'c', 'id': 752, 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'id': 753, 'synset': 'owl.n.01', 'synonyms': ['owl'], 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'id': 754, 'synset': 'packet.n.03', 'synonyms': ['packet'], 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'id': 755, 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'id': 756, 'synset': 'pad.n.04', 'synonyms': ['pad'], 'def': 'a flat mass of soft material used for protection, stuffing, or comfort', 'name': 'pad'}, {'frequency': 'c', 'id': 757, 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'id': 758, 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'r', 'id': 759, 'synset': 'paintbox.n.01', 'synonyms': ['paintbox'], 'def': "a box containing a collection of cubes or tubes of artists' paint", 'name': 'paintbox'}, {'frequency': 'c', 'id': 760, 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'id': 761, 'synset': 'painting.n.01', 'synonyms': ['painting'], 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'c', 'id': 762, 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'id': 763, 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'id': 764, 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'id': 765, 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'id': 766, 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'id': 767, 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'id': 768, 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'r', 'id': 769, 'synset': 'paper_clip.n.01', 'synonyms': ['paperclip'], 'def': 'a wire or plastic clip for holding sheets of paper together', 'name': 'paperclip'}, {'frequency': 'f', 'id': 770, 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'id': 771, 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'id': 772, 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'id': 773, 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'id': 774, 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'r', 'id': 775, 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'id': 776, 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'r', 'id': 777, 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'r', 'id': 778, 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'id': 779, 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'id': 780, 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'id': 781, 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'id': 782, 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'r', 'id': 783, 'synset': 'passport.n.02', 'synonyms': ['passport'], 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'id': 784, 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'id': 785, 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'id': 786, 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'id': 787, 'synset': 'peach.n.03', 'synonyms': ['peach'], 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'id': 788, 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'c', 'id': 789, 'synset': 'pear.n.01', 'synonyms': ['pear'], 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'r', 'id': 790, 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'id': 791, 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'id': 792, 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'id': 793, 'synset': 'pen.n.01', 'synonyms': ['pen'], 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'c', 'id': 794, 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'id': 795, 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'id': 796, 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'id': 797, 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'id': 798, 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'id': 799, 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'id': 800, 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'c', 'id': 801, 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'id': 802, 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'id': 803, 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'id': 804, 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'id': 805, 'synset': 'person.n.01', 'synonyms': ['baby', 'child', 'boy', 'girl', 'man', 'woman', 'person', 'human'], 'def': 'a human being', 'name': 'baby'}, {'frequency': 'r', 'id': 806, 'synset': 'pet.n.01', 'synonyms': ['pet'], 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'r', 'id': 807, 'synset': 'petfood.n.01', 'synonyms': ['petfood', 'pet-food'], 'def': 'food prepared for animal pets', 'name': 'petfood'}, {'frequency': 'r', 'id': 808, 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'id': 809, 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'id': 810, 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'c', 'id': 811, 'synset': 'piano.n.01', 'synonyms': ['piano'], 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'id': 812, 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'id': 813, 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'id': 814, 'synset': 'pie.n.01', 'synonyms': ['pie'], 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'id': 815, 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'id': 816, 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'id': 817, 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'id': 818, 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'id': 819, 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'id': 820, 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'id': 821, 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'id': 822, 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'id': 823, 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'id': 824, 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'id': 825, 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'r', 'id': 826, 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'id': 827, 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'id': 828, 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'id': 829, 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'id': 830, 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'id': 831, 'synset': 'plate.n.04', 'synonyms': ['plate'], 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'id': 832, 'synset': 'platter.n.01', 'synonyms': ['platter'], 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'id': 833, 'synset': 'playing_card.n.01', 'synonyms': ['playing_card'], 'def': 'one of a pack of cards that are used to play card games', 'name': 'playing_card'}, {'frequency': 'r', 'id': 834, 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'id': 835, 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'id': 836, 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'id': 837, 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'id': 838, 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'id': 839, 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'id': 840, 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'r', 'id': 841, 'synset': 'police_van.n.01', 'synonyms': ['police_van', 'police_wagon', 'paddy_wagon', 'patrol_wagon'], 'def': 'van used by police to transport prisoners', 'name': 'police_van'}, {'frequency': 'f', 'id': 842, 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'id': 843, 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'id': 844, 'synset': 'pony.n.05', 'synonyms': ['pony'], 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'id': 845, 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'id': 846, 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'r', 'id': 847, 'synset': 'portrait.n.02', 'synonyms': ['portrait', 'portrayal'], 'def': 'any likeness of a person, in any medium', 'name': 'portrait'}, {'frequency': 'c', 'id': 848, 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'id': 849, 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'id': 850, 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'id': 851, 'synset': 'pot.n.01', 'synonyms': ['pot'], 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'id': 852, 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'id': 853, 'synset': 'potato.n.01', 'synonyms': ['potato'], 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'id': 854, 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'id': 855, 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'id': 856, 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'r', 'id': 857, 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'id': 858, 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'f', 'id': 859, 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'id': 860, 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'id': 861, 'synset': 'projector.n.02', 'synonyms': ['projector'], 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'id': 862, 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'id': 863, 'synset': 'prune.n.01', 'synonyms': ['prune'], 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'id': 864, 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'id': 865, 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'id': 866, 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'id': 867, 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'id': 868, 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'id': 869, 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'id': 870, 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'r', 'id': 871, 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'id': 872, 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'id': 873, 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'id': 874, 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'id': 875, 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'id': 876, 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'id': 877, 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'id': 878, 'synset': 'radar.n.01', 'synonyms': ['radar'], 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'c', 'id': 879, 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'id': 880, 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'id': 881, 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'id': 882, 'synset': 'raft.n.01', 'synonyms': ['raft'], 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'id': 883, 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'id': 884, 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'id': 885, 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'id': 886, 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'id': 887, 'synset': 'rat.n.01', 'synonyms': ['rat'], 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'id': 888, 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'id': 889, 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'id': 890, 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'def': 'car mirror that reflects the view out of the rear window', 'name': 'rearview_mirror'}, {'frequency': 'c', 'id': 891, 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'id': 892, 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'r', 'id': 893, 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'r', 'id': 894, 'synset': 'red_cabbage.n.02', 'synonyms': ['red_cabbage'], 'def': 'compact head of purplish-red leaves', 'name': 'red_cabbage'}, {'frequency': 'f', 'id': 895, 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'id': 896, 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'id': 897, 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'id': 898, 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'r', 'id': 899, 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'id': 900, 'synset': 'ring.n.08', 'synonyms': ['ring'], 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'id': 901, 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'id': 902, 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'id': 903, 'synset': 'robe.n.01', 'synonyms': ['robe'], 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'id': 904, 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'id': 905, 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'id': 906, 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'id': 907, 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'id': 908, 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'id': 909, 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'id': 910, 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'id': 911, 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'id': 912, 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'id': 913, 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'id': 914, 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'id': 915, 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'id': 916, 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'c', 'id': 917, 'synset': 'sail.n.01', 'synonyms': ['sail'], 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'c', 'id': 918, 'synset': 'salad.n.01', 'synonyms': ['salad'], 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'id': 919, 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'r', 'id': 920, 'synset': 'salami.n.01', 'synonyms': ['salami'], 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'r', 'id': 921, 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'id': 922, 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'r', 'id': 923, 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'id': 924, 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'id': 925, 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'id': 926, 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'id': 927, 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'id': 928, 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'id': 929, 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'id': 930, 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'id': 931, 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'id': 932, 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'id': 933, 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'id': 934, 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'id': 935, 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'id': 936, 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'id': 937, 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'c', 'id': 938, 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'c', 'id': 939, 'synset': 'scrambled_eggs.n.01', 'synonyms': ['scrambled_eggs'], 'def': 'eggs beaten and cooked to a soft firm consistency while stirring', 'name': 'scrambled_eggs'}, {'frequency': 'r', 'id': 940, 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'r', 'id': 941, 'synset': 'scratcher.n.03', 'synonyms': ['scratcher'], 'def': 'a device used for scratching', 'name': 'scratcher'}, {'frequency': 'c', 'id': 942, 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'c', 'id': 943, 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'id': 944, 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'r', 'id': 945, 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'r', 'id': 946, 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'id': 947, 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'id': 948, 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'r', 'id': 949, 'synset': 'seedling.n.01', 'synonyms': ['seedling'], 'def': 'young plant or tree grown from a seed', 'name': 'seedling'}, {'frequency': 'c', 'id': 950, 'synset': 'serving_dish.n.01', 'synonyms': ['serving_dish'], 'def': 'a dish used for serving food', 'name': 'serving_dish'}, {'frequency': 'r', 'id': 951, 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'r', 'id': 952, 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'id': 953, 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'r', 'id': 954, 'synset': 'shark.n.01', 'synonyms': ['shark'], 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'id': 955, 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'id': 956, 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'id': 957, 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'id': 958, 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'id': 959, 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'id': 960, 'synset': 'shears.n.01', 'synonyms': ['shears'], 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'id': 961, 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'id': 962, 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'id': 963, 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'r', 'id': 964, 'synset': 'shield.n.02', 'synonyms': ['shield'], 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'id': 965, 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'id': 966, 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'c', 'id': 967, 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'id': 968, 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'id': 969, 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'id': 970, 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'c', 'id': 971, 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'id': 972, 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'id': 973, 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'f', 'id': 974, 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'id': 975, 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'r', 'id': 976, 'synset': 'sieve.n.01', 'synonyms': ['sieve', 'screen_(sieve)'], 'def': 'a strainer for separating lumps from powdered material or grading particles', 'name': 'sieve'}, {'frequency': 'f', 'id': 977, 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'id': 978, 'synset': 'silo.n.01', 'synonyms': ['silo'], 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'id': 979, 'synset': 'sink.n.01', 'synonyms': ['sink'], 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'id': 980, 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'id': 981, 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'id': 982, 'synset': 'ski.n.01', 'synonyms': ['ski'], 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'id': 983, 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'id': 984, 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'id': 985, 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'id': 986, 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'c', 'id': 987, 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'id': 988, 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'id': 989, 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'id': 990, 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'id': 991, 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'id': 992, 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'id': 993, 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'id': 994, 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'id': 995, 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'id': 996, 'synset': 'soap.n.01', 'synonyms': ['soap'], 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'id': 997, 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'id': 998, 'synset': 'sock.n.01', 'synonyms': ['sock'], 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'r', 'id': 999, 'synset': 'soda_fountain.n.02', 'synonyms': ['soda_fountain'], 'def': 'an apparatus for dispensing soda water', 'name': 'soda_fountain'}, {'frequency': 'r', 'id': 1000, 'synset': 'soda_water.n.01', 'synonyms': ['carbonated_water', 'club_soda', 'seltzer', 'sparkling_water'], 'def': 'effervescent beverage artificially charged with carbon dioxide', 'name': 'carbonated_water'}, {'frequency': 'f', 'id': 1001, 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'id': 1002, 'synset': 'softball.n.01', 'synonyms': ['softball'], 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'id': 1003, 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'id': 1004, 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'c', 'id': 1005, 'synset': 'soup.n.01', 'synonyms': ['soup'], 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'id': 1006, 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'id': 1007, 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'id': 1008, 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'id': 1009, 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'id': 1010, 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'id': 1011, 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'id': 1012, 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'id': 1013, 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'id': 1014, 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'id': 1015, 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'r', 'id': 1016, 'synset': 'spider.n.01', 'synonyms': ['spider'], 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'c', 'id': 1017, 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'id': 1018, 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'id': 1019, 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'id': 1020, 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'id': 1021, 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'c', 'id': 1022, 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'r', 'id': 1023, 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'id': 1024, 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'id': 1025, 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'id': 1026, 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'r', 'id': 1027, 'synset': 'steamer.n.02', 'synonyms': ['steamer_(kitchen_appliance)'], 'def': 'a cooking utensil that can be used to cook food by steaming it', 'name': 'steamer_(kitchen_appliance)'}, {'frequency': 'f', 'id': 1028, 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'id': 1029, 'synset': 'stencil.n.01', 'synonyms': ['stencil'], 'def': 'a sheet of material (metal, plastic, etc.) that has been perforated with a pattern; ink or paint can pass through the perforations to create the printed pattern on the surface below', 'name': 'stencil'}, {'frequency': 'r', 'id': 1030, 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'id': 1031, 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'id': 1032, 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'id': 1033, 'synset': 'stew.n.02', 'synonyms': ['stew'], 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'id': 1034, 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'id': 1035, 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'c', 'id': 1036, 'synset': 'stocking.n.01', 'synonyms': ['stockings_(leg_wear)'], 'def': 'close-fitting hosiery to cover the foot and leg; come in matched pairs', 'name': 'stockings_(leg_wear)'}, {'frequency': 'f', 'id': 1037, 'synset': 'stool.n.01', 'synonyms': ['stool'], 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'id': 1038, 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'id': 1039, 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'id': 1040, 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'id': 1041, 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'id': 1042, 'synset': 'strap.n.01', 'synonyms': ['strap'], 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'id': 1043, 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'id': 1044, 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'id': 1045, 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'id': 1046, 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'id': 1047, 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'id': 1048, 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'def': 'a pointed tool for writing or drawing or engraving', 'name': 'stylus'}, {'frequency': 'r', 'id': 1049, 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'id': 1050, 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'id': 1051, 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'c', 'id': 1052, 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'id': 1053, 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'id': 1054, 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'id': 1055, 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'r', 'id': 1056, 'synset': 'sunscreen.n.01', 'synonyms': ['sunscreen', 'sunblock'], 'def': 'a cream spread on the skin; contains a chemical to filter out ultraviolet light and so protect from sunburn', 'name': 'sunscreen'}, {'frequency': 'f', 'id': 1057, 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'id': 1058, 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'id': 1059, 'synset': 'swab.n.02', 'synonyms': ['mop'], 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'id': 1060, 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'id': 1061, 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'id': 1062, 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'id': 1063, 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'id': 1064, 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'id': 1065, 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'id': 1066, 'synset': 'sword.n.01', 'synonyms': ['sword'], 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'id': 1067, 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'id': 1068, 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'id': 1069, 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'id': 1070, 'synset': 'table.n.02', 'synonyms': ['table'], 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'id': 1071, 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'id': 1072, 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'id': 1073, 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'id': 1074, 'synset': 'taco.n.02', 'synonyms': ['taco'], 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'id': 1075, 'synset': 'tag.n.02', 'synonyms': ['tag'], 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'id': 1076, 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'id': 1077, 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'id': 1078, 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'c', 'id': 1079, 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'id': 1080, 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'c', 'id': 1081, 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'id': 1082, 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'id': 1083, 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'id': 1084, 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'id': 1085, 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'id': 1086, 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'r', 'id': 1087, 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'id': 1088, 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'id': 1089, 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'c', 'id': 1090, 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'id': 1091, 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'id': 1092, 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'def': 'electronic device for communicating by voice over long distances', 'name': 'telephone'}, {'frequency': 'c', 'id': 1093, 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'id': 1094, 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'id': 1095, 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'id': 1096, 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'id': 1097, 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'id': 1098, 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'id': 1099, 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'id': 1100, 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'id': 1101, 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'id': 1102, 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'c', 'id': 1103, 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'id': 1104, 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'id': 1105, 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'id': 1106, 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'id': 1107, 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'id': 1108, 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'id': 1109, 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'id': 1110, 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'id': 1111, 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'r', 'id': 1112, 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'id': 1113, 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'id': 1114, 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'id': 1115, 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'c', 'id': 1116, 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'id': 1117, 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'id': 1118, 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'id': 1119, 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'c', 'id': 1120, 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'id': 1121, 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'id': 1122, 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'id': 1123, 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'c', 'id': 1124, 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'c', 'id': 1125, 'synset': 'top.n.09', 'synonyms': ['cover'], 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'id': 1126, 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'id': 1127, 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'id': 1128, 'synset': 'towel.n.01', 'synonyms': ['towel'], 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'id': 1129, 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'id': 1130, 'synset': 'toy.n.03', 'synonyms': ['toy'], 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'id': 1131, 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'id': 1132, 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'r', 'id': 1133, 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'c', 'id': 1134, 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'id': 1135, 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'id': 1136, 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'id': 1137, 'synset': 'tray.n.01', 'synonyms': ['tray'], 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'id': 1138, 'synset': 'tree_house.n.01', 'synonyms': ['tree_house'], 'def': '(NOT A TREE) a PLAYHOUSE built in the branches of a tree', 'name': 'tree_house'}, {'frequency': 'r', 'id': 1139, 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'id': 1140, 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'r', 'id': 1141, 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'c', 'id': 1142, 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'id': 1143, 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'id': 1144, 'synset': 'truck.n.01', 'synonyms': ['truck'], 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'id': 1145, 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'id': 1146, 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'id': 1147, 'synset': 'tub.n.02', 'synonyms': ['vat'], 'def': 'a large open vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'id': 1148, 'synset': 'turban.n.01', 'synonyms': ['turban'], 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'r', 'id': 1149, 'synset': 'turkey.n.01', 'synonyms': ['turkey_(bird)'], 'def': 'large gallinaceous bird with fan-shaped tail; widely domesticated for food', 'name': 'turkey_(bird)'}, {'frequency': 'c', 'id': 1150, 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'id': 1151, 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'id': 1152, 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'r', 'id': 1153, 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'r', 'id': 1154, 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'id': 1155, 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'c', 'id': 1156, 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'id': 1157, 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'c', 'id': 1158, 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'r', 'id': 1159, 'synset': 'urn.n.01', 'synonyms': ['urn'], 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'id': 1160, 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'c', 'id': 1161, 'synset': 'valve.n.03', 'synonyms': ['valve'], 'def': 'control consisting of a mechanical device for controlling the flow of a fluid', 'name': 'valve'}, {'frequency': 'f', 'id': 1162, 'synset': 'vase.n.01', 'synonyms': ['vase'], 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'id': 1163, 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'id': 1164, 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'c', 'id': 1165, 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'id': 1166, 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'id': 1167, 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'id': 1168, 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'r', 'id': 1169, 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'id': 1170, 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'id': 1171, 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'id': 1172, 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'id': 1173, 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'id': 1174, 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'id': 1175, 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'id': 1176, 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'id': 1177, 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'c', 'id': 1178, 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'id': 1179, 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'id': 1180, 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'id': 1181, 'synset': 'wasabi.n.02', 'synonyms': ['wasabi'], 'def': 'the thick green root of the wasabi plant that the Japanese use in cooking and that tastes like strong horseradish', 'name': 'wasabi'}, {'frequency': 'c', 'id': 1182, 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'id': 1183, 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'id': 1184, 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'id': 1185, 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'id': 1186, 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'id': 1187, 'synset': 'water_filter.n.01', 'synonyms': ['water_filter'], 'def': 'a filter to remove impurities from the water supply', 'name': 'water_filter'}, {'frequency': 'r', 'id': 1188, 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'r', 'id': 1189, 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'id': 1190, 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'id': 1191, 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'id': 1192, 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'id': 1193, 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'id': 1194, 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'c', 'id': 1195, 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'id': 1196, 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'id': 1197, 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'id': 1198, 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'id': 1199, 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'id': 1200, 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'id': 1201, 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'id': 1202, 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'id': 1203, 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'r', 'id': 1204, 'synset': 'whiskey.n.01', 'synonyms': ['whiskey'], 'def': 'a liquor made from fermented mash of grain', 'name': 'whiskey'}, {'frequency': 'r', 'id': 1205, 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'r', 'id': 1206, 'synset': 'wick.n.02', 'synonyms': ['wick'], 'def': 'a loosely woven cord in a candle or oil lamp that is lit on fire', 'name': 'wick'}, {'frequency': 'c', 'id': 1207, 'synset': 'wig.n.01', 'synonyms': ['wig'], 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'id': 1208, 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'id': 1209, 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'def': 'a mill that is powered by the wind', 'name': 'windmill'}, {'frequency': 'c', 'id': 1210, 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'id': 1211, 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'id': 1212, 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'id': 1213, 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'r', 'id': 1214, 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'id': 1215, 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'r', 'id': 1216, 'synset': 'wing_chair.n.01', 'synonyms': ['wing_chair'], 'def': 'easy chair having wings on each side of a high back', 'name': 'wing_chair'}, {'frequency': 'c', 'id': 1217, 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'id': 1218, 'synset': 'wok.n.01', 'synonyms': ['wok'], 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'id': 1219, 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'id': 1220, 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'id': 1221, 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'id': 1222, 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'c', 'id': 1223, 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'id': 1224, 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'r', 'id': 1225, 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'r', 'id': 1226, 'synset': 'yak.n.02', 'synonyms': ['yak'], 'def': 'large long-haired wild ox of Tibet often domesticated', 'name': 'yak'}, {'frequency': 'c', 'id': 1227, 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'r', 'id': 1228, 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'id': 1229, 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'id': 1230, 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa +# fmt: on diff --git a/data_processing/detectron2/detectron2/data/datasets/lvis_v1_categories.py b/data_processing/detectron2/detectron2/data/datasets/lvis_v1_categories.py new file mode 100644 index 0000000..7374e69 --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/lvis_v1_categories.py @@ -0,0 +1,16 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Autogen with +# with open("lvis_v1_val.json", "r") as f: +# a = json.load(f) +# c = a["categories"] +# for x in c: +# del x["image_count"] +# del x["instance_count"] +# LVIS_CATEGORIES = repr(c) + " # noqa" +# with open("/tmp/lvis_categories.py", "wt") as f: +# f.write(f"LVIS_CATEGORIES = {LVIS_CATEGORIES}") +# Then paste the contents of that file below + +# fmt: off +LVIS_CATEGORIES = [{'frequency': 'c', 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'id': 1, 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'id': 2, 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'id': 3, 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'f', 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'id': 4, 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'id': 5, 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'c', 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'id': 6, 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'synset': 'almond.n.02', 'synonyms': ['almond'], 'id': 7, 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'id': 8, 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'c', 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'id': 9, 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'id': 10, 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'id': 11, 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'synset': 'apple.n.01', 'synonyms': ['apple'], 'id': 12, 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'id': 13, 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'id': 14, 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'synset': 'apron.n.01', 'synonyms': ['apron'], 'id': 15, 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'id': 16, 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'r', 'synset': 'arctic.n.02', 'synonyms': ['arctic_(type_of_shoe)', 'galosh', 'golosh', 'rubber_(type_of_shoe)', 'gumshoe'], 'id': 17, 'def': 'a waterproof overshoe that protects shoes from water or snow', 'name': 'arctic_(type_of_shoe)'}, {'frequency': 'c', 'synset': 'armband.n.02', 'synonyms': ['armband'], 'id': 18, 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'id': 19, 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'id': 20, 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'id': 21, 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'id': 22, 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'id': 23, 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'id': 24, 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'id': 25, 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'id': 26, 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'f', 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'id': 27, 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'id': 28, 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'synset': 'awning.n.01', 'synonyms': ['awning'], 'id': 29, 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'id': 30, 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'r', 'synset': 'baboon.n.01', 'synonyms': ['baboon'], 'id': 31, 'def': 'large terrestrial monkeys having doglike muzzles', 'name': 'baboon'}, {'frequency': 'f', 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'id': 32, 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'id': 33, 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'id': 34, 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'id': 35, 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'id': 36, 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'id': 37, 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'id': 38, 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'id': 39, 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'id': 40, 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'synset': 'ball.n.06', 'synonyms': ['ball'], 'id': 41, 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'id': 42, 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'id': 43, 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'id': 44, 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'synset': 'banana.n.02', 'synonyms': ['banana'], 'id': 45, 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'c', 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'id': 46, 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'id': 47, 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'f', 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'id': 48, 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'id': 49, 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'id': 50, 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'id': 51, 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'synset': 'barge.n.01', 'synonyms': ['barge'], 'id': 52, 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'id': 53, 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'id': 54, 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'id': 55, 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'id': 56, 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'id': 57, 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'id': 58, 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'id': 59, 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'id': 60, 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'id': 61, 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'id': 62, 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'id': 63, 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'c', 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'id': 64, 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'id': 65, 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'id': 66, 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'id': 67, 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'id': 68, 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'id': 69, 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'synset': 'battery.n.02', 'synonyms': ['battery'], 'id': 70, 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'id': 71, 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'synset': 'bead.n.01', 'synonyms': ['bead'], 'id': 72, 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'c', 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'id': 73, 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'id': 74, 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'id': 75, 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'synset': 'bear.n.01', 'synonyms': ['bear'], 'id': 76, 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'synset': 'bed.n.01', 'synonyms': ['bed'], 'id': 77, 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'r', 'synset': 'bedpan.n.01', 'synonyms': ['bedpan'], 'id': 78, 'def': 'a shallow vessel used by a bedridden patient for defecation and urination', 'name': 'bedpan'}, {'frequency': 'f', 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'id': 79, 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'synset': 'beef.n.01', 'synonyms': ['cow'], 'id': 80, 'def': 'cattle/cow', 'name': 'cow'}, {'frequency': 'f', 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'id': 81, 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'id': 82, 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'id': 83, 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'id': 84, 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'id': 85, 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'synset': 'bell.n.01', 'synonyms': ['bell'], 'id': 86, 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'id': 87, 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'synset': 'belt.n.02', 'synonyms': ['belt'], 'id': 88, 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'id': 89, 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'synset': 'bench.n.01', 'synonyms': ['bench'], 'id': 90, 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'synset': 'beret.n.01', 'synonyms': ['beret'], 'id': 91, 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'synset': 'bib.n.02', 'synonyms': ['bib'], 'id': 92, 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'id': 93, 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'id': 94, 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'id': 95, 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'f', 'synset': 'billboard.n.01', 'synonyms': ['billboard'], 'id': 96, 'def': 'large outdoor signboard', 'name': 'billboard'}, {'frequency': 'c', 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'id': 97, 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'id': 98, 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'synset': 'bird.n.01', 'synonyms': ['bird'], 'id': 99, 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'c', 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'id': 100, 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'c', 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'id': 101, 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'id': 102, 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'id': 103, 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'id': 104, 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'id': 105, 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'id': 106, 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'id': 107, 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'synset': 'blackberry.n.01', 'synonyms': ['blackberry'], 'id': 108, 'def': 'large sweet black or very dark purple edible aggregate fruit', 'name': 'blackberry'}, {'frequency': 'f', 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'id': 109, 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'id': 110, 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'id': 111, 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'id': 112, 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'id': 113, 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'f', 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'id': 114, 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'f', 'synset': 'blouse.n.01', 'synonyms': ['blouse'], 'id': 115, 'def': 'a top worn by women', 'name': 'blouse'}, {'frequency': 'f', 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'id': 116, 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'id': 117, 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'id': 118, 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'r', 'synset': 'bob.n.05', 'synonyms': ['bob', 'bobber', 'bobfloat'], 'id': 119, 'def': 'a small float usually made of cork; attached to a fishing line', 'name': 'bob'}, {'frequency': 'c', 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'id': 120, 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'c', 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'id': 121, 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'id': 122, 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'id': 123, 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'id': 124, 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'id': 125, 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'id': 126, 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'synset': 'book.n.01', 'synonyms': ['book'], 'id': 127, 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'c', 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'id': 128, 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'id': 129, 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'id': 130, 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'id': 131, 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'synset': 'boot.n.01', 'synonyms': ['boot'], 'id': 132, 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'id': 133, 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'id': 134, 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'id': 135, 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'id': 136, 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'id': 137, 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'id': 138, 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'id': 139, 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'id': 140, 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'id': 141, 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'id': 142, 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'f', 'synset': 'box.n.01', 'synonyms': ['box'], 'id': 143, 'def': 'a (usually rectangular) container; may have a lid', 'name': 'box'}, {'frequency': 'r', 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'id': 144, 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'id': 145, 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'id': 146, 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'id': 147, 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'id': 148, 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'id': 149, 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'f', 'synset': 'bread.n.01', 'synonyms': ['bread'], 'id': 150, 'def': 'food made from dough of flour or meal and usually raised with yeast or baking powder and then baked', 'name': 'bread'}, {'frequency': 'r', 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'id': 151, 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'f', 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'id': 152, 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'id': 153, 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'f', 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'id': 154, 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'id': 155, 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'synset': 'broom.n.01', 'synonyms': ['broom'], 'id': 156, 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'id': 157, 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'id': 158, 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'id': 159, 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'id': 160, 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'id': 161, 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'synset': 'bull.n.11', 'synonyms': ['horned_cow'], 'id': 162, 'def': 'a cow with horns', 'name': 'bull'}, {'frequency': 'c', 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'id': 163, 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'id': 164, 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'id': 165, 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'id': 166, 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'id': 167, 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'id': 168, 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'f', 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'id': 169, 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'id': 170, 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'id': 171, 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'id': 172, 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'id': 173, 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'id': 174, 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'f', 'synset': 'butter.n.01', 'synonyms': ['butter'], 'id': 175, 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'id': 176, 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'synset': 'button.n.01', 'synonyms': ['button'], 'id': 177, 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'id': 178, 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'id': 179, 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'c', 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'id': 180, 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'id': 181, 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'id': 182, 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'synset': 'cake.n.03', 'synonyms': ['cake'], 'id': 183, 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'id': 184, 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'id': 185, 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'synset': 'calf.n.01', 'synonyms': ['calf'], 'id': 186, 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'id': 187, 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'synset': 'camel.n.01', 'synonyms': ['camel'], 'id': 188, 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'synset': 'camera.n.01', 'synonyms': ['camera'], 'id': 189, 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'id': 190, 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'id': 191, 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'id': 192, 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'id': 193, 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'f', 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'id': 194, 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'id': 195, 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'id': 196, 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'id': 197, 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'id': 198, 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'id': 199, 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'c', 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'id': 200, 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'c', 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'id': 201, 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'id': 202, 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'f', 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'id': 203, 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'id': 204, 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'c', 'synset': 'cape.n.02', 'synonyms': ['cape'], 'id': 205, 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'id': 206, 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'id': 207, 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'id': 208, 'def': 'a wheeled vehicle adapted to the rails of railroad (mark each individual railcar separately)', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'id': 209, 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'id': 210, 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'id': 211, 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'synset': 'card.n.03', 'synonyms': ['card'], 'id': 212, 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'c', 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'id': 213, 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'id': 214, 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'id': 215, 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'id': 216, 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'id': 217, 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'f', 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'id': 218, 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'synset': 'cart.n.01', 'synonyms': ['cart'], 'id': 219, 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'synset': 'carton.n.02', 'synonyms': ['carton'], 'id': 220, 'def': 'a container made of cardboard for holding food or drink', 'name': 'carton'}, {'frequency': 'c', 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'id': 221, 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'id': 222, 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'id': 223, 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'id': 224, 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'synset': 'cat.n.01', 'synonyms': ['cat'], 'id': 225, 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'f', 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'id': 226, 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'c', 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'id': 227, 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'id': 228, 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'f', 'synset': 'celery.n.01', 'synonyms': ['celery'], 'id': 229, 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'id': 230, 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'id': 231, 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'synset': 'chair.n.01', 'synonyms': ['chair'], 'id': 232, 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'id': 233, 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'synset': 'chalice.n.01', 'synonyms': ['chalice'], 'id': 234, 'def': 'a bowl-shaped drinking vessel; especially the Eucharistic cup', 'name': 'chalice'}, {'frequency': 'f', 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'id': 235, 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'synset': 'chap.n.04', 'synonyms': ['chap'], 'id': 236, 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'id': 237, 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'id': 238, 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'id': 239, 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'id': 240, 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'c', 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'id': 241, 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'id': 242, 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'c', 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'id': 243, 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'id': 244, 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'id': 245, 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'id': 246, 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'id': 247, 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'id': 248, 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'id': 249, 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'id': 250, 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'id': 251, 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'id': 252, 'def': 'shirt collar, animal collar, or tight-fitting necklace', 'name': 'choker'}, {'frequency': 'f', 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'id': 253, 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'f', 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'id': 254, 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'id': 255, 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'synset': 'chute.n.02', 'synonyms': ['slide'], 'id': 256, 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'id': 257, 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'id': 258, 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'f', 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'id': 259, 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'id': 260, 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'id': 261, 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'id': 262, 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'c', 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'id': 263, 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'id': 264, 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'synset': 'cleat.n.02', 'synonyms': ['cleat_(for_securing_rope)'], 'id': 265, 'def': 'a fastener (usually with two projecting horns) around which a rope can be secured', 'name': 'cleat_(for_securing_rope)'}, {'frequency': 'r', 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'id': 266, 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'synset': 'clip.n.03', 'synonyms': ['clip'], 'id': 267, 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'id': 268, 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'r', 'synset': 'clipper.n.03', 'synonyms': ['clippers_(for_plants)'], 'id': 269, 'def': 'shears for cutting grass or shrubbery (often used in the plural)', 'name': 'clippers_(for_plants)'}, {'frequency': 'r', 'synset': 'cloak.n.02', 'synonyms': ['cloak'], 'id': 270, 'def': 'a loose outer garment', 'name': 'cloak'}, {'frequency': 'f', 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'id': 271, 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'id': 272, 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'id': 273, 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'id': 274, 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'id': 275, 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'id': 276, 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'synset': 'coat.n.01', 'synonyms': ['coat'], 'id': 277, 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'id': 278, 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'c', 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'id': 279, 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'id': 280, 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'r', 'synset': 'cockroach.n.01', 'synonyms': ['cockroach'], 'id': 281, 'def': 'any of numerous chiefly nocturnal insects; some are domestic pests', 'name': 'cockroach'}, {'frequency': 'r', 'synset': 'cocoa.n.01', 'synonyms': ['cocoa_(beverage)', 'hot_chocolate_(beverage)', 'drinking_chocolate'], 'id': 282, 'def': 'a beverage made from cocoa powder and milk and sugar; usually drunk hot', 'name': 'cocoa_(beverage)'}, {'frequency': 'c', 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'id': 283, 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'f', 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'id': 284, 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'id': 285, 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'id': 286, 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'synset': 'coil.n.05', 'synonyms': ['coil'], 'id': 287, 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'synset': 'coin.n.01', 'synonyms': ['coin'], 'id': 288, 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'c', 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'id': 289, 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'id': 290, 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'id': 291, 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'id': 292, 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'id': 293, 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'id': 294, 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'r', 'synset': 'compass.n.01', 'synonyms': ['compass'], 'id': 295, 'def': 'navigational instrument for finding directions', 'name': 'compass'}, {'frequency': 'f', 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'id': 296, 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'f', 'synset': 'condiment.n.01', 'synonyms': ['condiment'], 'id': 297, 'def': 'a preparation (a sauce or relish or spice) to enhance flavor or enjoyment', 'name': 'condiment'}, {'frequency': 'f', 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'id': 298, 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'id': 299, 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'id': 300, 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'id': 301, 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'r', 'synset': 'cooker.n.01', 'synonyms': ['cooker'], 'id': 302, 'def': 'a utensil for cooking', 'name': 'cooker'}, {'frequency': 'f', 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'id': 303, 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'id': 304, 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'id': 305, 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'f', 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'id': 306, 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'id': 307, 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'c', 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'id': 308, 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'f', 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'id': 309, 'def': 'ears or kernels of corn that can be prepared and served for human food (only mark individual ears or kernels)', 'name': 'edible_corn'}, {'frequency': 'r', 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'id': 310, 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'id': 311, 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'id': 312, 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'id': 313, 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'c', 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'id': 314, 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'c', 'synset': 'costume.n.04', 'synonyms': ['costume'], 'id': 315, 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'id': 316, 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'id': 317, 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'c', 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'id': 318, 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'id': 319, 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'c', 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'id': 320, 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'r', 'synset': 'crab.n.05', 'synonyms': ['crabmeat'], 'id': 321, 'def': 'the edible flesh of any of various crabs', 'name': 'crabmeat'}, {'frequency': 'c', 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'id': 322, 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'id': 323, 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'synset': 'crate.n.01', 'synonyms': ['crate'], 'id': 324, 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'c', 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'id': 325, 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'id': 326, 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'c', 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'id': 327, 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'id': 328, 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'id': 329, 'def': 'an earthen jar (made of baked clay) or a modern electric crockpot', 'name': 'crock_pot'}, {'frequency': 'f', 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'id': 330, 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'id': 331, 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'c', 'synset': 'crow.n.01', 'synonyms': ['crow'], 'id': 332, 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'r', 'synset': 'crowbar.n.01', 'synonyms': ['crowbar', 'wrecking_bar', 'pry_bar'], 'id': 333, 'def': 'a heavy iron lever with one end forged into a wedge', 'name': 'crowbar'}, {'frequency': 'c', 'synset': 'crown.n.04', 'synonyms': ['crown'], 'id': 334, 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'id': 335, 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'id': 336, 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'id': 337, 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'f', 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'id': 338, 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'c', 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'id': 339, 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'id': 340, 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'c', 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'id': 341, 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'id': 342, 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'id': 343, 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'synset': 'cup.n.01', 'synonyms': ['cup'], 'id': 344, 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'id': 345, 'def': 'a metal award or cup-shaped vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'f', 'synset': 'cupboard.n.01', 'synonyms': ['cupboard', 'closet'], 'id': 346, 'def': 'a small room (or recess) or cabinet used for storage space', 'name': 'cupboard'}, {'frequency': 'f', 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'id': 347, 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'id': 348, 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'id': 349, 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'id': 350, 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'id': 351, 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'id': 352, 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'id': 353, 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'id': 354, 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'synset': 'dalmatian.n.02', 'synonyms': ['dalmatian'], 'id': 355, 'def': 'a large breed having a smooth white coat with black or brown spots', 'name': 'dalmatian'}, {'frequency': 'c', 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'id': 356, 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'id': 357, 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'id': 358, 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'id': 359, 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'id': 360, 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'synset': 'desk.n.01', 'synonyms': ['desk'], 'id': 361, 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'id': 362, 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'id': 363, 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'id': 364, 'def': 'yearly planner book', 'name': 'diary'}, {'frequency': 'r', 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'id': 365, 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'id': 366, 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'id': 367, 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'id': 368, 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'f', 'synset': 'dish.n.01', 'synonyms': ['dish'], 'id': 369, 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'id': 370, 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'id': 371, 'def': 'a cloth for washing dishes or cleaning in general', 'name': 'dishrag'}, {'frequency': 'f', 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'id': 372, 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'id': 373, 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid', 'dishsoap'], 'id': 374, 'def': 'dishsoap or dish detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'f', 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'id': 375, 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'r', 'synset': 'diving_board.n.01', 'synonyms': ['diving_board'], 'id': 376, 'def': 'a springboard from which swimmers can dive', 'name': 'diving_board'}, {'frequency': 'f', 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'id': 377, 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'synset': 'dog.n.01', 'synonyms': ['dog'], 'id': 378, 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'id': 379, 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'f', 'synset': 'doll.n.01', 'synonyms': ['doll'], 'id': 380, 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'id': 381, 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'synset': 'dollhouse.n.01', 'synonyms': ['dollhouse', "doll's_house"], 'id': 382, 'def': "a house so small that it is likened to a child's plaything", 'name': 'dollhouse'}, {'frequency': 'c', 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'id': 383, 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'id': 384, 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'f', 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'id': 385, 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'id': 386, 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'id': 387, 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'synset': 'dove.n.01', 'synonyms': ['dove'], 'id': 388, 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'id': 389, 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'id': 390, 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'id': 391, 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'id': 392, 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'id': 393, 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'f', 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'id': 394, 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'f', 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'id': 395, 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'synset': 'drill.n.01', 'synonyms': ['drill'], 'id': 396, 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'synset': 'drone.n.04', 'synonyms': ['drone'], 'id': 397, 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'id': 398, 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'id': 399, 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'id': 400, 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'synset': 'duck.n.01', 'synonyms': ['duck'], 'id': 401, 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'c', 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'id': 402, 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'id': 403, 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'id': 404, 'def': 'a large cylindrical bag of heavy cloth (does not include suitcases)', 'name': 'duffel_bag'}, {'frequency': 'r', 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'id': 405, 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'id': 406, 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'id': 407, 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'c', 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'id': 408, 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'id': 409, 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'id': 410, 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'synset': 'earring.n.01', 'synonyms': ['earring'], 'id': 411, 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'synset': 'easel.n.01', 'synonyms': ['easel'], 'id': 412, 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'id': 413, 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'synset': 'eel.n.01', 'synonyms': ['eel'], 'id': 414, 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'id': 415, 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'id': 416, 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'id': 417, 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'id': 418, 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'id': 419, 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'id': 420, 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'id': 421, 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'id': 422, 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'c', 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'id': 423, 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'id': 424, 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'id': 425, 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'id': 426, 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'id': 427, 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'id': 428, 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'synset': 'fan.n.01', 'synonyms': ['fan'], 'id': 429, 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'id': 430, 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'id': 431, 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'id': 432, 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'id': 433, 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'c', 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'id': 434, 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'id': 435, 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'id': 436, 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'id': 437, 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'id': 438, 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'id': 439, 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'id': 440, 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'f', 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'id': 441, 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'f', 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'id': 442, 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'id': 443, 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'id': 444, 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'id': 445, 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'r', 'synset': 'first-aid_kit.n.01', 'synonyms': ['first-aid_kit'], 'id': 446, 'def': 'kit consisting of a set of bandages and medicines for giving first aid', 'name': 'first-aid_kit'}, {'frequency': 'f', 'synset': 'fish.n.01', 'synonyms': ['fish'], 'id': 447, 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'c', 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'id': 448, 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'id': 449, 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'c', 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'id': 450, 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'synset': 'flag.n.01', 'synonyms': ['flag'], 'id': 451, 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'id': 452, 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'id': 453, 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'id': 454, 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'c', 'synset': 'flap.n.01', 'synonyms': ['flap'], 'id': 455, 'def': 'any broad thin covering attached at one edge, such as a mud flap next to a wheel or a flap on an airplane wing', 'name': 'flap'}, {'frequency': 'r', 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'id': 456, 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'id': 457, 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'id': 458, 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'id': 459, 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'id': 460, 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'id': 461, 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'id': 462, 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'c', 'synset': 'foal.n.01', 'synonyms': ['foal'], 'id': 463, 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'id': 464, 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'id': 465, 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'id': 466, 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'id': 467, 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'id': 468, 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'synset': 'fork.n.01', 'synonyms': ['fork'], 'id': 469, 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'c', 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'id': 470, 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'c', 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'id': 471, 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'c', 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'id': 472, 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'id': 473, 'def': 'anything that freshens air by removing or covering odor', 'name': 'freshener'}, {'frequency': 'f', 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'id': 474, 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'id': 475, 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'id': 476, 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'f', 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'id': 477, 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'id': 478, 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'id': 479, 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'r', 'synset': 'futon.n.01', 'synonyms': ['futon'], 'id': 480, 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'id': 481, 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'id': 482, 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'id': 483, 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'id': 484, 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'id': 485, 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'id': 486, 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'id': 487, 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'id': 488, 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'c', 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'id': 489, 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'id': 490, 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'id': 491, 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'r', 'synset': 'generator.n.02', 'synonyms': ['generator'], 'id': 492, 'def': 'engine that converts mechanical energy into electrical energy by electromagnetic induction', 'name': 'generator'}, {'frequency': 'c', 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'id': 493, 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'id': 494, 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'id': 495, 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'id': 496, 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'id': 497, 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'id': 498, 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'synset': 'globe.n.03', 'synonyms': ['globe'], 'id': 499, 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'synset': 'glove.n.02', 'synonyms': ['glove'], 'id': 500, 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'synset': 'goat.n.01', 'synonyms': ['goat'], 'id': 501, 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'id': 502, 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'id': 503, 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'c', 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'id': 504, 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'id': 505, 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'id': 506, 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'synset': 'goose.n.01', 'synonyms': ['goose'], 'id': 507, 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'id': 508, 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'id': 509, 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'f', 'synset': 'grape.n.01', 'synonyms': ['grape'], 'id': 510, 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'c', 'synset': 'grater.n.01', 'synonyms': ['grater'], 'id': 511, 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'id': 512, 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'id': 513, 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'f', 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'id': 514, 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'f', 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'id': 515, 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'id': 516, 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'f', 'synset': 'grill.n.02', 'synonyms': ['grill', 'grille', 'grillwork', 'radiator_grille'], 'id': 517, 'def': 'a framework of metal bars used as a partition or a grate', 'name': 'grill'}, {'frequency': 'r', 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'id': 518, 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'id': 519, 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'id': 520, 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'f', 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'id': 521, 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'id': 522, 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'synset': 'gun.n.01', 'synonyms': ['gun'], 'id': 523, 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'f', 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'id': 524, 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'id': 525, 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'id': 526, 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'r', 'synset': 'halter.n.03', 'synonyms': ['halter_top'], 'id': 527, 'def': "a woman's top that fastens behind the back and neck leaving the back and arms uncovered", 'name': 'halter_top'}, {'frequency': 'f', 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'id': 528, 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'id': 529, 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'id': 530, 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'c', 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'id': 531, 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'id': 532, 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'c', 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'id': 533, 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'f', 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'id': 534, 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'id': 535, 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'id': 536, 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'id': 537, 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'id': 538, 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'id': 539, 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'id': 540, 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'id': 541, 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'id': 542, 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'id': 543, 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'synset': 'hat.n.01', 'synonyms': ['hat'], 'id': 544, 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'id': 545, 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'c', 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'id': 546, 'def': 'a garment that covers the head OR face', 'name': 'veil'}, {'frequency': 'f', 'synset': 'headband.n.01', 'synonyms': ['headband'], 'id': 547, 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'id': 548, 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'id': 549, 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'id': 550, 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'synset': 'headset.n.01', 'synonyms': ['headset'], 'id': 551, 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'id': 552, 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'c', 'synset': 'heart.n.02', 'synonyms': ['heart'], 'id': 553, 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'id': 554, 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'id': 555, 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'id': 556, 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'synset': 'heron.n.02', 'synonyms': ['heron'], 'id': 557, 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'id': 558, 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'id': 559, 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'id': 560, 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'id': 561, 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'id': 562, 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'id': 563, 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'synset': 'honey.n.01', 'synonyms': ['honey'], 'id': 564, 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'id': 565, 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'synset': 'hook.n.05', 'synonyms': ['hook'], 'id': 566, 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'r', 'synset': 'hookah.n.01', 'synonyms': ['hookah', 'narghile', 'nargileh', 'sheesha', 'shisha', 'water_pipe'], 'id': 567, 'def': 'a tobacco pipe with a long flexible tube connected to a container where the smoke is cooled by passing through water', 'name': 'hookah'}, {'frequency': 'r', 'synset': 'hornet.n.01', 'synonyms': ['hornet'], 'id': 568, 'def': 'large stinging wasp', 'name': 'hornet'}, {'frequency': 'f', 'synset': 'horse.n.01', 'synonyms': ['horse'], 'id': 569, 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'id': 570, 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'id': 571, 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'id': 572, 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'id': 573, 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'id': 574, 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'id': 575, 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'c', 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'id': 576, 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'id': 577, 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'f', 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'id': 578, 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'id': 579, 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'id': 580, 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'id': 581, 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'id': 582, 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'id': 583, 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'c', 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'id': 584, 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'id': 585, 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'f', 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'id': 586, 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'id': 587, 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'c', 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'id': 588, 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'id': 589, 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'c', 'synset': 'jam.n.01', 'synonyms': ['jam'], 'id': 590, 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'synset': 'jar.n.01', 'synonyms': ['jar'], 'id': 591, 'def': 'a vessel (usually cylindrical) with a wide mouth and without handles', 'name': 'jar'}, {'frequency': 'f', 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'id': 592, 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'id': 593, 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'id': 594, 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'id': 595, 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'id': 596, 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'r', 'synset': 'jewel.n.01', 'synonyms': ['jewel', 'gem', 'precious_stone'], 'id': 597, 'def': 'a precious or semiprecious stone incorporated into a piece of jewelry', 'name': 'jewel'}, {'frequency': 'c', 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'id': 598, 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'id': 599, 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'c', 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'id': 600, 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'id': 601, 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'synset': 'keg.n.02', 'synonyms': ['keg'], 'id': 602, 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'id': 603, 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'id': 604, 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'synset': 'key.n.01', 'synonyms': ['key'], 'id': 605, 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'id': 606, 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'c', 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'id': 607, 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'id': 608, 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'id': 609, 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'r', 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'id': 610, 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'synset': 'kite.n.03', 'synonyms': ['kite'], 'id': 611, 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'id': 612, 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'id': 613, 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'id': 614, 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'synset': 'knife.n.01', 'synonyms': ['knife'], 'id': 615, 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'id': 616, 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'synset': 'knob.n.02', 'synonyms': ['knob'], 'id': 617, 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'id': 618, 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'id': 619, 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'id': 620, 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'id': 621, 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'id': 622, 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'c', 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'id': 623, 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'f', 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'id': 624, 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'id': 625, 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'id': 626, 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'id': 627, 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'id': 628, 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'id': 629, 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'id': 630, 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'id': 631, 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'id': 632, 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'f', 'synset': 'latch.n.02', 'synonyms': ['latch'], 'id': 633, 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'id': 634, 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'synset': 'leather.n.01', 'synonyms': ['leather'], 'id': 635, 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'id': 636, 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'id': 637, 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'r', 'synset': 'legume.n.02', 'synonyms': ['legume'], 'id': 638, 'def': 'the fruit or seed of bean or pea plants', 'name': 'legume'}, {'frequency': 'f', 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'id': 639, 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'id': 640, 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'id': 641, 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'id': 642, 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'id': 643, 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'id': 644, 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'id': 645, 'def': 'lightblub/source of light', 'name': 'lightbulb'}, {'frequency': 'r', 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'id': 646, 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'f', 'synset': 'lime.n.06', 'synonyms': ['lime'], 'id': 647, 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'id': 648, 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'c', 'synset': 'lion.n.01', 'synonyms': ['lion'], 'id': 649, 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'id': 650, 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'r', 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'id': 651, 'def': 'liquor or beer', 'name': 'liquor'}, {'frequency': 'c', 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'id': 652, 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'f', 'synset': 'log.n.01', 'synonyms': ['log'], 'id': 653, 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'id': 654, 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'f', 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'id': 655, 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'id': 656, 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'id': 657, 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'id': 658, 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'id': 659, 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'c', 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'id': 660, 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'f', 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'id': 661, 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'synset': 'mallard.n.01', 'synonyms': ['mallard'], 'id': 662, 'def': 'wild dabbling duck from which domestic ducks are descended', 'name': 'mallard'}, {'frequency': 'r', 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'id': 663, 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'id': 664, 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'r', 'synset': 'manatee.n.01', 'synonyms': ['manatee'], 'id': 665, 'def': 'sirenian mammal of tropical coastal waters of America', 'name': 'manatee'}, {'frequency': 'c', 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'id': 666, 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'id': 667, 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'id': 668, 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'f', 'synset': 'map.n.01', 'synonyms': ['map'], 'id': 669, 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'f', 'synset': 'marker.n.03', 'synonyms': ['marker'], 'id': 670, 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'synset': 'martini.n.01', 'synonyms': ['martini'], 'id': 671, 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'id': 672, 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'id': 673, 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'synset': 'masher.n.02', 'synonyms': ['masher'], 'id': 674, 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'id': 675, 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'synset': 'mast.n.01', 'synonyms': ['mast'], 'id': 676, 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'id': 677, 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'id': 678, 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'id': 679, 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'id': 680, 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'id': 681, 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'id': 682, 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'id': 683, 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'c', 'synset': 'melon.n.01', 'synonyms': ['melon'], 'id': 684, 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'id': 685, 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'id': 686, 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'id': 687, 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'id': 688, 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'f', 'synset': 'milk.n.01', 'synonyms': ['milk'], 'id': 689, 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'r', 'synset': 'milk_can.n.01', 'synonyms': ['milk_can'], 'id': 690, 'def': 'can for transporting milk', 'name': 'milk_can'}, {'frequency': 'r', 'synset': 'milkshake.n.01', 'synonyms': ['milkshake'], 'id': 691, 'def': 'frothy drink of milk and flavoring and sometimes fruit or ice cream', 'name': 'milkshake'}, {'frequency': 'f', 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'id': 692, 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'id': 693, 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'id': 694, 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'id': 695, 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'id': 696, 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'synset': 'money.n.03', 'synonyms': ['money'], 'id': 697, 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'id': 698, 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'id': 699, 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'synset': 'motor.n.01', 'synonyms': ['motor'], 'id': 700, 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'id': 701, 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'id': 702, 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'f', 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'id': 703, 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'id': 704, 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'f', 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'id': 705, 'def': 'a computer input device that controls an on-screen pointer (does not include trackpads / touchpads)', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'id': 706, 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'id': 707, 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'synset': 'mug.n.04', 'synonyms': ['mug'], 'id': 708, 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'id': 709, 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'id': 710, 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'c', 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'id': 711, 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'id': 712, 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'f', 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'id': 713, 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'id': 714, 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'id': 715, 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'id': 716, 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'c', 'synset': 'needle.n.03', 'synonyms': ['needle'], 'id': 717, 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'synset': 'nest.n.01', 'synonyms': ['nest'], 'id': 718, 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'f', 'synset': 'newspaper.n.01', 'synonyms': ['newspaper', 'paper_(newspaper)'], 'id': 719, 'def': 'a daily or weekly publication on folded sheets containing news, articles, and advertisements', 'name': 'newspaper'}, {'frequency': 'c', 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'id': 720, 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'id': 721, 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'id': 722, 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'c', 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'id': 723, 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'id': 724, 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'id': 725, 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'f', 'synset': 'nut.n.03', 'synonyms': ['nut'], 'id': 726, 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'id': 727, 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'f', 'synset': 'oar.n.01', 'synonyms': ['oar'], 'id': 728, 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'id': 729, 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'id': 730, 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'id': 731, 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'id': 732, 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'id': 733, 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'synset': 'onion.n.01', 'synonyms': ['onion'], 'id': 734, 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'id': 735, 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'id': 736, 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'c', 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'id': 737, 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'f', 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'id': 738, 'def': 'a thick standalone cushion used as a seat or footrest, often next to a chair', 'name': 'ottoman'}, {'frequency': 'f', 'synset': 'oven.n.01', 'synonyms': ['oven'], 'id': 739, 'def': 'kitchen appliance used for baking or roasting', 'name': 'oven'}, {'frequency': 'c', 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'id': 740, 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'synset': 'owl.n.01', 'synonyms': ['owl'], 'id': 741, 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'synset': 'packet.n.03', 'synonyms': ['packet'], 'id': 742, 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'id': 743, 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'synset': 'pad.n.04', 'synonyms': ['pad'], 'id': 744, 'def': 'mostly arm/knee pads labeled', 'name': 'pad'}, {'frequency': 'f', 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'id': 745, 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'id': 746, 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'c', 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'id': 747, 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'synset': 'painting.n.01', 'synonyms': ['painting'], 'id': 748, 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'f', 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'id': 749, 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'id': 750, 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'id': 751, 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'id': 752, 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'id': 753, 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'id': 754, 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'id': 755, 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'f', 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'id': 756, 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'id': 757, 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'id': 758, 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'id': 759, 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'id': 760, 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'c', 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'id': 761, 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'id': 762, 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'c', 'synset': 'parasol.n.01', 'synonyms': ['parasol', 'sunshade'], 'id': 763, 'def': 'a handheld collapsible source of shade', 'name': 'parasol'}, {'frequency': 'r', 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'id': 764, 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'c', 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'id': 765, 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'id': 766, 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'id': 767, 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'id': 768, 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'id': 769, 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'c', 'synset': 'passport.n.02', 'synonyms': ['passport'], 'id': 770, 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'id': 771, 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'id': 772, 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'id': 773, 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'synset': 'peach.n.03', 'synonyms': ['peach'], 'id': 774, 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'id': 775, 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'f', 'synset': 'pear.n.01', 'synonyms': ['pear'], 'id': 776, 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'c', 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'id': 777, 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'synset': 'peg.n.04', 'synonyms': ['wooden_leg', 'pegleg'], 'id': 778, 'def': 'a prosthesis that replaces a missing leg', 'name': 'wooden_leg'}, {'frequency': 'r', 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'id': 779, 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'id': 780, 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'synset': 'pen.n.01', 'synonyms': ['pen'], 'id': 781, 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'f', 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'id': 782, 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'id': 783, 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'id': 784, 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'id': 785, 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'id': 786, 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'id': 787, 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'id': 788, 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'f', 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'id': 789, 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'id': 790, 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'id': 791, 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'id': 792, 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'synset': 'person.n.01', 'synonyms': ['person', 'baby', 'child', 'boy', 'girl', 'man', 'woman', 'human'], 'id': 793, 'def': 'a human being', 'name': 'person'}, {'frequency': 'c', 'synset': 'pet.n.01', 'synonyms': ['pet'], 'id': 794, 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'c', 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'id': 795, 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'id': 796, 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'id': 797, 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'f', 'synset': 'piano.n.01', 'synonyms': ['piano'], 'id': 798, 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'id': 799, 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'id': 800, 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'synset': 'pie.n.01', 'synonyms': ['pie'], 'id': 801, 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'id': 802, 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'id': 803, 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'id': 804, 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'id': 805, 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'id': 806, 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'id': 807, 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'id': 808, 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'id': 809, 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'id': 810, 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'id': 811, 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'id': 812, 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'c', 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'id': 813, 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'id': 814, 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'id': 815, 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'id': 816, 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'id': 817, 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'synset': 'plate.n.04', 'synonyms': ['plate'], 'id': 818, 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'synset': 'platter.n.01', 'synonyms': ['platter'], 'id': 819, 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'id': 820, 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'id': 821, 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'id': 822, 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'synset': 'plume.n.02', 'synonyms': ['plume'], 'id': 823, 'def': 'a feather or cluster of feathers worn as an ornament', 'name': 'plume'}, {'frequency': 'r', 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'id': 824, 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'id': 825, 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'id': 826, 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'id': 827, 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'f', 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'id': 828, 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'id': 829, 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'synset': 'pony.n.05', 'synonyms': ['pony'], 'id': 830, 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'id': 831, 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'id': 832, 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'c', 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'id': 833, 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'id': 834, 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'id': 835, 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'synset': 'pot.n.01', 'synonyms': ['pot'], 'id': 836, 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'id': 837, 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'synset': 'potato.n.01', 'synonyms': ['potato'], 'id': 838, 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'id': 839, 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'id': 840, 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'id': 841, 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'c', 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'id': 842, 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'id': 843, 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'c', 'synset': 'pretzel.n.01', 'synonyms': ['pretzel'], 'id': 844, 'def': 'glazed and salted cracker typically in the shape of a loose knot', 'name': 'pretzel'}, {'frequency': 'f', 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'id': 845, 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'id': 846, 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'synset': 'projector.n.02', 'synonyms': ['projector'], 'id': 847, 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'id': 848, 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'synset': 'prune.n.01', 'synonyms': ['prune'], 'id': 849, 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'id': 850, 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'id': 851, 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'id': 852, 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'id': 853, 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'id': 854, 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'id': 855, 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'id': 856, 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'c', 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'id': 857, 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'id': 858, 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'id': 859, 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'id': 860, 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'id': 861, 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'id': 862, 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'id': 863, 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'synset': 'radar.n.01', 'synonyms': ['radar'], 'id': 864, 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'f', 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'id': 865, 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'id': 866, 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'id': 867, 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'synset': 'raft.n.01', 'synonyms': ['raft'], 'id': 868, 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'id': 869, 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'id': 870, 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'id': 871, 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'id': 872, 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'synset': 'rat.n.01', 'synonyms': ['rat'], 'id': 873, 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'id': 874, 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'id': 875, 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'id': 876, 'def': 'vehicle mirror (side or rearview)', 'name': 'rearview_mirror'}, {'frequency': 'c', 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'id': 877, 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'id': 878, 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'c', 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'id': 879, 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'f', 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'id': 880, 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'id': 881, 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'id': 882, 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'id': 883, 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'c', 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'id': 884, 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'synset': 'ring.n.08', 'synonyms': ['ring'], 'id': 885, 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'id': 886, 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'id': 887, 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'synset': 'robe.n.01', 'synonyms': ['robe'], 'id': 888, 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'id': 889, 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'synset': 'rodent.n.01', 'synonyms': ['rodent'], 'id': 890, 'def': 'relatively small placental mammals having a single pair of constantly growing incisor teeth specialized for gnawing', 'name': 'rodent'}, {'frequency': 'r', 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'id': 891, 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'id': 892, 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'id': 893, 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'id': 894, 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'id': 895, 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'id': 896, 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'id': 897, 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'id': 898, 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'id': 899, 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'id': 900, 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'id': 901, 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'id': 902, 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'f', 'synset': 'sail.n.01', 'synonyms': ['sail'], 'id': 903, 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'f', 'synset': 'salad.n.01', 'synonyms': ['salad'], 'id': 904, 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'id': 905, 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'c', 'synset': 'salami.n.01', 'synonyms': ['salami'], 'id': 906, 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'c', 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'id': 907, 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'id': 908, 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'c', 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'id': 909, 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'id': 910, 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'id': 911, 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'id': 912, 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'id': 913, 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'id': 914, 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'id': 915, 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'id': 916, 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'id': 917, 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'id': 918, 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'id': 919, 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'id': 920, 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'id': 921, 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'id': 922, 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'id': 923, 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'f', 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'id': 924, 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'r', 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'id': 925, 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'c', 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'id': 926, 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'f', 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'id': 927, 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'id': 928, 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'c', 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'id': 929, 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'c', 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'id': 930, 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'id': 931, 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'id': 932, 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'c', 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'id': 933, 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'c', 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'id': 934, 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'id': 935, 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'c', 'synset': 'shark.n.01', 'synonyms': ['shark'], 'id': 936, 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'id': 937, 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'id': 938, 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'id': 939, 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'id': 940, 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'id': 941, 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'synset': 'shears.n.01', 'synonyms': ['shears'], 'id': 942, 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'id': 943, 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'id': 944, 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'id': 945, 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'c', 'synset': 'shield.n.02', 'synonyms': ['shield'], 'id': 946, 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'id': 947, 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'id': 948, 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'f', 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'id': 949, 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'id': 950, 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'id': 951, 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'id': 952, 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'f', 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'id': 953, 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'id': 954, 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'id': 955, 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'r', 'synset': 'shower_cap.n.01', 'synonyms': ['shower_cap'], 'id': 956, 'def': 'a tight cap worn to keep hair dry while showering', 'name': 'shower_cap'}, {'frequency': 'f', 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'id': 957, 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'id': 958, 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'f', 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'id': 959, 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'synset': 'silo.n.01', 'synonyms': ['silo'], 'id': 960, 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'synset': 'sink.n.01', 'synonyms': ['sink'], 'id': 961, 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'id': 962, 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'id': 963, 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'synset': 'ski.n.01', 'synonyms': ['ski'], 'id': 964, 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'id': 965, 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'id': 966, 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'id': 967, 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'id': 968, 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'r', 'synset': 'skullcap.n.01', 'synonyms': ['skullcap'], 'id': 969, 'def': 'rounded brimless cap fitting the crown of the head', 'name': 'skullcap'}, {'frequency': 'c', 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'id': 970, 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'id': 971, 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'id': 972, 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'id': 973, 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'id': 974, 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'id': 975, 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'id': 976, 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'id': 977, 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'id': 978, 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'synset': 'soap.n.01', 'synonyms': ['soap'], 'id': 979, 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'id': 980, 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'synset': 'sock.n.01', 'synonyms': ['sock'], 'id': 981, 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'f', 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'id': 982, 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'synset': 'softball.n.01', 'synonyms': ['softball'], 'id': 983, 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'id': 984, 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'id': 985, 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'f', 'synset': 'soup.n.01', 'synonyms': ['soup'], 'id': 986, 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'id': 987, 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'id': 988, 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'id': 989, 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'id': 990, 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'id': 991, 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'id': 992, 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'id': 993, 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'id': 994, 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'id': 995, 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'id': 996, 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'c', 'synset': 'spider.n.01', 'synonyms': ['spider'], 'id': 997, 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'r', 'synset': 'spiny_lobster.n.02', 'synonyms': ['crawfish', 'crayfish'], 'id': 998, 'def': 'large edible marine crustacean having a spiny carapace but lacking the large pincers of true lobsters', 'name': 'crawfish'}, {'frequency': 'c', 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'id': 999, 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'id': 1000, 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'id': 1001, 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'id': 1002, 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'synset': 'squid.n.01', 'synonyms': ['squid_(food)', 'calamari', 'calamary'], 'id': 1003, 'def': '(Italian cuisine) squid prepared as food', 'name': 'squid_(food)'}, {'frequency': 'c', 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'id': 1004, 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'r', 'synset': 'stagecoach.n.01', 'synonyms': ['stagecoach'], 'id': 1005, 'def': 'a large coach-and-four formerly used to carry passengers and mail on regular routes between towns', 'name': 'stagecoach'}, {'frequency': 'c', 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'id': 1006, 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'c', 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'id': 1007, 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'id': 1008, 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'id': 1009, 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'id': 1010, 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'f', 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'id': 1011, 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'id': 1012, 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'id': 1013, 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'id': 1014, 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'synset': 'stew.n.02', 'synonyms': ['stew'], 'id': 1015, 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'id': 1016, 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'id': 1017, 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'f', 'synset': 'stool.n.01', 'synonyms': ['stool'], 'id': 1018, 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'id': 1019, 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'id': 1020, 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'id': 1021, 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'id': 1022, 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'synset': 'strap.n.01', 'synonyms': ['strap'], 'id': 1023, 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'id': 1024, 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'id': 1025, 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'id': 1026, 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'id': 1027, 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'id': 1028, 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'id': 1029, 'def': 'a pointed tool for writing or drawing or engraving, including pens', 'name': 'stylus'}, {'frequency': 'r', 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'id': 1030, 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'id': 1031, 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'id': 1032, 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'f', 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'id': 1033, 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'id': 1034, 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'id': 1035, 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'id': 1036, 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'f', 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'id': 1037, 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'id': 1038, 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'synset': 'swab.n.02', 'synonyms': ['mop'], 'id': 1039, 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'id': 1040, 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'id': 1041, 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'id': 1042, 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'id': 1043, 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'id': 1044, 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'id': 1045, 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'synset': 'sword.n.01', 'synonyms': ['sword'], 'id': 1046, 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'id': 1047, 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'id': 1048, 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'id': 1049, 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'synset': 'table.n.02', 'synonyms': ['table'], 'id': 1050, 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'id': 1051, 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'id': 1052, 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'id': 1053, 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'synset': 'taco.n.02', 'synonyms': ['taco'], 'id': 1054, 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'synset': 'tag.n.02', 'synonyms': ['tag'], 'id': 1055, 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'id': 1056, 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'id': 1057, 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'id': 1058, 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'f', 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'id': 1059, 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'id': 1060, 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'f', 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'id': 1061, 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'id': 1062, 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'id': 1063, 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'id': 1064, 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'id': 1065, 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'id': 1066, 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'c', 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'id': 1067, 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'id': 1068, 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'id': 1069, 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'f', 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'id': 1070, 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'id': 1071, 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'id': 1072, 'def': 'electronic device for communicating by voice over long distances (includes wired and wireless/cell phones)', 'name': 'telephone'}, {'frequency': 'c', 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'id': 1073, 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'id': 1074, 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'id': 1075, 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'id': 1076, 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'id': 1077, 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'id': 1078, 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'id': 1079, 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'id': 1080, 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'id': 1081, 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'id': 1082, 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'f', 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'id': 1083, 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'id': 1084, 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'id': 1085, 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'id': 1086, 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'id': 1087, 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'id': 1088, 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'id': 1089, 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'id': 1090, 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'id': 1091, 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'c', 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'id': 1092, 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'id': 1093, 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'id': 1094, 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'id': 1095, 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'f', 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'id': 1096, 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'id': 1097, 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'id': 1098, 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'id': 1099, 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'f', 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'id': 1100, 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'id': 1101, 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'id': 1102, 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'id': 1103, 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'f', 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'id': 1104, 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'f', 'synset': 'top.n.09', 'synonyms': ['cover'], 'id': 1105, 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'id': 1106, 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'id': 1107, 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'synset': 'towel.n.01', 'synonyms': ['towel'], 'id': 1108, 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'id': 1109, 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'synset': 'toy.n.03', 'synonyms': ['toy'], 'id': 1110, 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'id': 1111, 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'id': 1112, 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'c', 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'id': 1113, 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'f', 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'id': 1114, 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'id': 1115, 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'id': 1116, 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'synset': 'tray.n.01', 'synonyms': ['tray'], 'id': 1117, 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'id': 1118, 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'id': 1119, 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'c', 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'id': 1120, 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'f', 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'id': 1121, 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'id': 1122, 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'synset': 'truck.n.01', 'synonyms': ['truck'], 'id': 1123, 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'id': 1124, 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'id': 1125, 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'synset': 'tub.n.02', 'synonyms': ['vat'], 'id': 1126, 'def': 'a large vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'synset': 'turban.n.01', 'synonyms': ['turban'], 'id': 1127, 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'c', 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'id': 1128, 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'id': 1129, 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'id': 1130, 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'c', 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'id': 1131, 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'c', 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'id': 1132, 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'id': 1133, 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'f', 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'id': 1134, 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'id': 1135, 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'f', 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'id': 1136, 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'c', 'synset': 'urn.n.01', 'synonyms': ['urn'], 'id': 1137, 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'id': 1138, 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'f', 'synset': 'vase.n.01', 'synonyms': ['vase'], 'id': 1139, 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'id': 1140, 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'id': 1141, 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'f', 'synset': 'vest.n.01', 'synonyms': ['vest', 'waistcoat'], 'id': 1142, 'def': "a man's sleeveless garment worn underneath a coat", 'name': 'vest'}, {'frequency': 'c', 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'id': 1143, 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'id': 1144, 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'id': 1145, 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'id': 1146, 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'c', 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'id': 1147, 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'id': 1148, 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'id': 1149, 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'id': 1150, 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'id': 1151, 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'id': 1152, 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'id': 1153, 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'id': 1154, 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'id': 1155, 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'f', 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'id': 1156, 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'id': 1157, 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'id': 1158, 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'synset': 'washbasin.n.01', 'synonyms': ['washbasin', 'basin_(for_washing)', 'washbowl', 'washstand', 'handbasin'], 'id': 1159, 'def': 'a bathroom sink that is permanently installed and connected to a water supply and drainpipe; where you can wash your hands and face', 'name': 'washbasin'}, {'frequency': 'c', 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'id': 1160, 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'id': 1161, 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'id': 1162, 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'id': 1163, 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'id': 1164, 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'id': 1165, 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'c', 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'id': 1166, 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'id': 1167, 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'id': 1168, 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'id': 1169, 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'id': 1170, 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'id': 1171, 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'f', 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'id': 1172, 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'id': 1173, 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'id': 1174, 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'id': 1175, 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'id': 1176, 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'id': 1177, 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'id': 1178, 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'id': 1179, 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'id': 1180, 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'c', 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'id': 1181, 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'c', 'synset': 'wig.n.01', 'synonyms': ['wig'], 'id': 1182, 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'id': 1183, 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'id': 1184, 'def': 'A mill or turbine that is powered by wind', 'name': 'windmill'}, {'frequency': 'c', 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'id': 1185, 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'id': 1186, 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'id': 1187, 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'id': 1188, 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'c', 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'id': 1189, 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'id': 1190, 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'f', 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'id': 1191, 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'synset': 'wok.n.01', 'synonyms': ['wok'], 'id': 1192, 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'id': 1193, 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'id': 1194, 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'id': 1195, 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'id': 1196, 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'f', 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'id': 1197, 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'id': 1198, 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'c', 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'id': 1199, 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'c', 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'id': 1200, 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'c', 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'id': 1201, 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'id': 1202, 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'id': 1203, 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa +# fmt: on diff --git a/data_processing/detectron2/detectron2/data/datasets/lvis_v1_category_image_count.py b/data_processing/detectron2/detectron2/data/datasets/lvis_v1_category_image_count.py new file mode 100644 index 0000000..31bf0cf --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/lvis_v1_category_image_count.py @@ -0,0 +1,20 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Autogen with +# with open("lvis_v1_train.json", "r") as f: +# a = json.load(f) +# c = a["categories"] +# for x in c: +# del x["name"] +# del x["instance_count"] +# del x["def"] +# del x["synonyms"] +# del x["frequency"] +# del x["synset"] +# LVIS_CATEGORY_IMAGE_COUNT = repr(c) + " # noqa" +# with open("/tmp/lvis_category_image_count.py", "wt") as f: +# f.write(f"LVIS_CATEGORY_IMAGE_COUNT = {LVIS_CATEGORY_IMAGE_COUNT}") +# Then paste the contents of that file below + +# fmt: off +LVIS_CATEGORY_IMAGE_COUNT = [{'id': 1, 'image_count': 64}, {'id': 2, 'image_count': 364}, {'id': 3, 'image_count': 1911}, {'id': 4, 'image_count': 149}, {'id': 5, 'image_count': 29}, {'id': 6, 'image_count': 26}, {'id': 7, 'image_count': 59}, {'id': 8, 'image_count': 22}, {'id': 9, 'image_count': 12}, {'id': 10, 'image_count': 28}, {'id': 11, 'image_count': 505}, {'id': 12, 'image_count': 1207}, {'id': 13, 'image_count': 4}, {'id': 14, 'image_count': 10}, {'id': 15, 'image_count': 500}, {'id': 16, 'image_count': 33}, {'id': 17, 'image_count': 3}, {'id': 18, 'image_count': 44}, {'id': 19, 'image_count': 561}, {'id': 20, 'image_count': 8}, {'id': 21, 'image_count': 9}, {'id': 22, 'image_count': 33}, {'id': 23, 'image_count': 1883}, {'id': 24, 'image_count': 98}, {'id': 25, 'image_count': 70}, {'id': 26, 'image_count': 46}, {'id': 27, 'image_count': 117}, {'id': 28, 'image_count': 41}, {'id': 29, 'image_count': 1395}, {'id': 30, 'image_count': 7}, {'id': 31, 'image_count': 1}, {'id': 32, 'image_count': 314}, {'id': 33, 'image_count': 31}, {'id': 34, 'image_count': 1905}, {'id': 35, 'image_count': 1859}, {'id': 36, 'image_count': 1623}, {'id': 37, 'image_count': 47}, {'id': 38, 'image_count': 3}, {'id': 39, 'image_count': 3}, {'id': 40, 'image_count': 1}, {'id': 41, 'image_count': 305}, {'id': 42, 'image_count': 6}, {'id': 43, 'image_count': 210}, {'id': 44, 'image_count': 36}, {'id': 45, 'image_count': 1787}, {'id': 46, 'image_count': 17}, {'id': 47, 'image_count': 51}, {'id': 48, 'image_count': 138}, {'id': 49, 'image_count': 3}, {'id': 50, 'image_count': 1470}, {'id': 51, 'image_count': 3}, {'id': 52, 'image_count': 2}, {'id': 53, 'image_count': 186}, {'id': 54, 'image_count': 76}, {'id': 55, 'image_count': 26}, {'id': 56, 'image_count': 303}, {'id': 57, 'image_count': 738}, {'id': 58, 'image_count': 1799}, {'id': 59, 'image_count': 1934}, {'id': 60, 'image_count': 1609}, {'id': 61, 'image_count': 1622}, {'id': 62, 'image_count': 41}, {'id': 63, 'image_count': 4}, {'id': 64, 'image_count': 11}, {'id': 65, 'image_count': 270}, {'id': 66, 'image_count': 349}, {'id': 67, 'image_count': 42}, {'id': 68, 'image_count': 823}, {'id': 69, 'image_count': 6}, {'id': 70, 'image_count': 48}, {'id': 71, 'image_count': 3}, {'id': 72, 'image_count': 42}, {'id': 73, 'image_count': 24}, {'id': 74, 'image_count': 16}, {'id': 75, 'image_count': 605}, {'id': 76, 'image_count': 646}, {'id': 77, 'image_count': 1765}, {'id': 78, 'image_count': 2}, {'id': 79, 'image_count': 125}, {'id': 80, 'image_count': 1420}, {'id': 81, 'image_count': 140}, {'id': 82, 'image_count': 4}, {'id': 83, 'image_count': 322}, {'id': 84, 'image_count': 60}, {'id': 85, 'image_count': 2}, {'id': 86, 'image_count': 231}, {'id': 87, 'image_count': 333}, {'id': 88, 'image_count': 1941}, {'id': 89, 'image_count': 367}, {'id': 90, 'image_count': 1922}, {'id': 91, 'image_count': 18}, {'id': 92, 'image_count': 81}, {'id': 93, 'image_count': 1}, {'id': 94, 'image_count': 1852}, {'id': 95, 'image_count': 430}, {'id': 96, 'image_count': 247}, {'id': 97, 'image_count': 94}, {'id': 98, 'image_count': 21}, {'id': 99, 'image_count': 1821}, {'id': 100, 'image_count': 16}, {'id': 101, 'image_count': 12}, {'id': 102, 'image_count': 25}, {'id': 103, 'image_count': 41}, {'id': 104, 'image_count': 244}, {'id': 105, 'image_count': 7}, {'id': 106, 'image_count': 1}, {'id': 107, 'image_count': 40}, {'id': 108, 'image_count': 40}, {'id': 109, 'image_count': 104}, {'id': 110, 'image_count': 1671}, {'id': 111, 'image_count': 49}, {'id': 112, 'image_count': 243}, {'id': 113, 'image_count': 2}, {'id': 114, 'image_count': 242}, {'id': 115, 'image_count': 271}, {'id': 116, 'image_count': 104}, {'id': 117, 'image_count': 8}, {'id': 118, 'image_count': 1758}, {'id': 119, 'image_count': 1}, {'id': 120, 'image_count': 48}, {'id': 121, 'image_count': 14}, {'id': 122, 'image_count': 40}, {'id': 123, 'image_count': 1}, {'id': 124, 'image_count': 37}, {'id': 125, 'image_count': 1510}, {'id': 126, 'image_count': 6}, {'id': 127, 'image_count': 1903}, {'id': 128, 'image_count': 70}, {'id': 129, 'image_count': 86}, {'id': 130, 'image_count': 7}, {'id': 131, 'image_count': 5}, {'id': 132, 'image_count': 1406}, {'id': 133, 'image_count': 1901}, {'id': 134, 'image_count': 15}, {'id': 135, 'image_count': 28}, {'id': 136, 'image_count': 6}, {'id': 137, 'image_count': 494}, {'id': 138, 'image_count': 234}, {'id': 139, 'image_count': 1922}, {'id': 140, 'image_count': 1}, {'id': 141, 'image_count': 35}, {'id': 142, 'image_count': 5}, {'id': 143, 'image_count': 1828}, {'id': 144, 'image_count': 8}, {'id': 145, 'image_count': 63}, {'id': 146, 'image_count': 1668}, {'id': 147, 'image_count': 4}, {'id': 148, 'image_count': 95}, {'id': 149, 'image_count': 17}, {'id': 150, 'image_count': 1567}, {'id': 151, 'image_count': 2}, {'id': 152, 'image_count': 103}, {'id': 153, 'image_count': 50}, {'id': 154, 'image_count': 1309}, {'id': 155, 'image_count': 6}, {'id': 156, 'image_count': 92}, {'id': 157, 'image_count': 19}, {'id': 158, 'image_count': 37}, {'id': 159, 'image_count': 4}, {'id': 160, 'image_count': 709}, {'id': 161, 'image_count': 9}, {'id': 162, 'image_count': 82}, {'id': 163, 'image_count': 15}, {'id': 164, 'image_count': 3}, {'id': 165, 'image_count': 61}, {'id': 166, 'image_count': 51}, {'id': 167, 'image_count': 5}, {'id': 168, 'image_count': 13}, {'id': 169, 'image_count': 642}, {'id': 170, 'image_count': 24}, {'id': 171, 'image_count': 255}, {'id': 172, 'image_count': 9}, {'id': 173, 'image_count': 1808}, {'id': 174, 'image_count': 31}, {'id': 175, 'image_count': 158}, {'id': 176, 'image_count': 80}, {'id': 177, 'image_count': 1884}, {'id': 178, 'image_count': 158}, {'id': 179, 'image_count': 2}, {'id': 180, 'image_count': 12}, {'id': 181, 'image_count': 1659}, {'id': 182, 'image_count': 7}, {'id': 183, 'image_count': 834}, {'id': 184, 'image_count': 57}, {'id': 185, 'image_count': 174}, {'id': 186, 'image_count': 95}, {'id': 187, 'image_count': 27}, {'id': 188, 'image_count': 22}, {'id': 189, 'image_count': 1391}, {'id': 190, 'image_count': 90}, {'id': 191, 'image_count': 40}, {'id': 192, 'image_count': 445}, {'id': 193, 'image_count': 21}, {'id': 194, 'image_count': 1132}, {'id': 195, 'image_count': 177}, {'id': 196, 'image_count': 4}, {'id': 197, 'image_count': 17}, {'id': 198, 'image_count': 84}, {'id': 199, 'image_count': 55}, {'id': 200, 'image_count': 30}, {'id': 201, 'image_count': 25}, {'id': 202, 'image_count': 2}, {'id': 203, 'image_count': 125}, {'id': 204, 'image_count': 1135}, {'id': 205, 'image_count': 19}, {'id': 206, 'image_count': 72}, {'id': 207, 'image_count': 1926}, {'id': 208, 'image_count': 159}, {'id': 209, 'image_count': 7}, {'id': 210, 'image_count': 1}, {'id': 211, 'image_count': 13}, {'id': 212, 'image_count': 35}, {'id': 213, 'image_count': 18}, {'id': 214, 'image_count': 8}, {'id': 215, 'image_count': 6}, {'id': 216, 'image_count': 35}, {'id': 217, 'image_count': 1222}, {'id': 218, 'image_count': 103}, {'id': 219, 'image_count': 28}, {'id': 220, 'image_count': 63}, {'id': 221, 'image_count': 28}, {'id': 222, 'image_count': 5}, {'id': 223, 'image_count': 7}, {'id': 224, 'image_count': 14}, {'id': 225, 'image_count': 1918}, {'id': 226, 'image_count': 133}, {'id': 227, 'image_count': 16}, {'id': 228, 'image_count': 27}, {'id': 229, 'image_count': 110}, {'id': 230, 'image_count': 1895}, {'id': 231, 'image_count': 4}, {'id': 232, 'image_count': 1927}, {'id': 233, 'image_count': 8}, {'id': 234, 'image_count': 1}, {'id': 235, 'image_count': 263}, {'id': 236, 'image_count': 10}, {'id': 237, 'image_count': 2}, {'id': 238, 'image_count': 3}, {'id': 239, 'image_count': 87}, {'id': 240, 'image_count': 9}, {'id': 241, 'image_count': 71}, {'id': 242, 'image_count': 13}, {'id': 243, 'image_count': 18}, {'id': 244, 'image_count': 2}, {'id': 245, 'image_count': 5}, {'id': 246, 'image_count': 45}, {'id': 247, 'image_count': 1}, {'id': 248, 'image_count': 23}, {'id': 249, 'image_count': 32}, {'id': 250, 'image_count': 4}, {'id': 251, 'image_count': 1}, {'id': 252, 'image_count': 858}, {'id': 253, 'image_count': 661}, {'id': 254, 'image_count': 168}, {'id': 255, 'image_count': 210}, {'id': 256, 'image_count': 65}, {'id': 257, 'image_count': 4}, {'id': 258, 'image_count': 2}, {'id': 259, 'image_count': 159}, {'id': 260, 'image_count': 31}, {'id': 261, 'image_count': 811}, {'id': 262, 'image_count': 1}, {'id': 263, 'image_count': 42}, {'id': 264, 'image_count': 27}, {'id': 265, 'image_count': 2}, {'id': 266, 'image_count': 5}, {'id': 267, 'image_count': 95}, {'id': 268, 'image_count': 32}, {'id': 269, 'image_count': 1}, {'id': 270, 'image_count': 1}, {'id': 271, 'image_count': 1844}, {'id': 272, 'image_count': 897}, {'id': 273, 'image_count': 31}, {'id': 274, 'image_count': 23}, {'id': 275, 'image_count': 1}, {'id': 276, 'image_count': 202}, {'id': 277, 'image_count': 746}, {'id': 278, 'image_count': 44}, {'id': 279, 'image_count': 14}, {'id': 280, 'image_count': 26}, {'id': 281, 'image_count': 1}, {'id': 282, 'image_count': 2}, {'id': 283, 'image_count': 25}, {'id': 284, 'image_count': 238}, {'id': 285, 'image_count': 592}, {'id': 286, 'image_count': 26}, {'id': 287, 'image_count': 5}, {'id': 288, 'image_count': 42}, {'id': 289, 'image_count': 13}, {'id': 290, 'image_count': 46}, {'id': 291, 'image_count': 1}, {'id': 292, 'image_count': 8}, {'id': 293, 'image_count': 34}, {'id': 294, 'image_count': 5}, {'id': 295, 'image_count': 1}, {'id': 296, 'image_count': 1871}, {'id': 297, 'image_count': 717}, {'id': 298, 'image_count': 1010}, {'id': 299, 'image_count': 679}, {'id': 300, 'image_count': 3}, {'id': 301, 'image_count': 4}, {'id': 302, 'image_count': 1}, {'id': 303, 'image_count': 166}, {'id': 304, 'image_count': 2}, {'id': 305, 'image_count': 266}, {'id': 306, 'image_count': 101}, {'id': 307, 'image_count': 6}, {'id': 308, 'image_count': 14}, {'id': 309, 'image_count': 133}, {'id': 310, 'image_count': 2}, {'id': 311, 'image_count': 38}, {'id': 312, 'image_count': 95}, {'id': 313, 'image_count': 1}, {'id': 314, 'image_count': 12}, {'id': 315, 'image_count': 49}, {'id': 316, 'image_count': 5}, {'id': 317, 'image_count': 5}, {'id': 318, 'image_count': 16}, {'id': 319, 'image_count': 216}, {'id': 320, 'image_count': 12}, {'id': 321, 'image_count': 1}, {'id': 322, 'image_count': 54}, {'id': 323, 'image_count': 5}, {'id': 324, 'image_count': 245}, {'id': 325, 'image_count': 12}, {'id': 326, 'image_count': 7}, {'id': 327, 'image_count': 35}, {'id': 328, 'image_count': 36}, {'id': 329, 'image_count': 32}, {'id': 330, 'image_count': 1027}, {'id': 331, 'image_count': 10}, {'id': 332, 'image_count': 12}, {'id': 333, 'image_count': 1}, {'id': 334, 'image_count': 67}, {'id': 335, 'image_count': 71}, {'id': 336, 'image_count': 30}, {'id': 337, 'image_count': 48}, {'id': 338, 'image_count': 249}, {'id': 339, 'image_count': 13}, {'id': 340, 'image_count': 29}, {'id': 341, 'image_count': 14}, {'id': 342, 'image_count': 236}, {'id': 343, 'image_count': 15}, {'id': 344, 'image_count': 1521}, {'id': 345, 'image_count': 25}, {'id': 346, 'image_count': 249}, {'id': 347, 'image_count': 139}, {'id': 348, 'image_count': 2}, {'id': 349, 'image_count': 2}, {'id': 350, 'image_count': 1890}, {'id': 351, 'image_count': 1240}, {'id': 352, 'image_count': 1}, {'id': 353, 'image_count': 9}, {'id': 354, 'image_count': 1}, {'id': 355, 'image_count': 3}, {'id': 356, 'image_count': 11}, {'id': 357, 'image_count': 4}, {'id': 358, 'image_count': 236}, {'id': 359, 'image_count': 44}, {'id': 360, 'image_count': 19}, {'id': 361, 'image_count': 1100}, {'id': 362, 'image_count': 7}, {'id': 363, 'image_count': 69}, {'id': 364, 'image_count': 2}, {'id': 365, 'image_count': 8}, {'id': 366, 'image_count': 5}, {'id': 367, 'image_count': 227}, {'id': 368, 'image_count': 6}, {'id': 369, 'image_count': 106}, {'id': 370, 'image_count': 81}, {'id': 371, 'image_count': 17}, {'id': 372, 'image_count': 134}, {'id': 373, 'image_count': 312}, {'id': 374, 'image_count': 8}, {'id': 375, 'image_count': 271}, {'id': 376, 'image_count': 2}, {'id': 377, 'image_count': 103}, {'id': 378, 'image_count': 1938}, {'id': 379, 'image_count': 574}, {'id': 380, 'image_count': 120}, {'id': 381, 'image_count': 2}, {'id': 382, 'image_count': 2}, {'id': 383, 'image_count': 13}, {'id': 384, 'image_count': 29}, {'id': 385, 'image_count': 1710}, {'id': 386, 'image_count': 66}, {'id': 387, 'image_count': 1008}, {'id': 388, 'image_count': 1}, {'id': 389, 'image_count': 3}, {'id': 390, 'image_count': 1942}, {'id': 391, 'image_count': 19}, {'id': 392, 'image_count': 1488}, {'id': 393, 'image_count': 46}, {'id': 394, 'image_count': 106}, {'id': 395, 'image_count': 115}, {'id': 396, 'image_count': 19}, {'id': 397, 'image_count': 2}, {'id': 398, 'image_count': 1}, {'id': 399, 'image_count': 28}, {'id': 400, 'image_count': 9}, {'id': 401, 'image_count': 192}, {'id': 402, 'image_count': 12}, {'id': 403, 'image_count': 21}, {'id': 404, 'image_count': 247}, {'id': 405, 'image_count': 6}, {'id': 406, 'image_count': 64}, {'id': 407, 'image_count': 7}, {'id': 408, 'image_count': 40}, {'id': 409, 'image_count': 542}, {'id': 410, 'image_count': 2}, {'id': 411, 'image_count': 1898}, {'id': 412, 'image_count': 36}, {'id': 413, 'image_count': 4}, {'id': 414, 'image_count': 1}, {'id': 415, 'image_count': 191}, {'id': 416, 'image_count': 6}, {'id': 417, 'image_count': 41}, {'id': 418, 'image_count': 39}, {'id': 419, 'image_count': 46}, {'id': 420, 'image_count': 1}, {'id': 421, 'image_count': 1451}, {'id': 422, 'image_count': 1878}, {'id': 423, 'image_count': 11}, {'id': 424, 'image_count': 82}, {'id': 425, 'image_count': 18}, {'id': 426, 'image_count': 1}, {'id': 427, 'image_count': 7}, {'id': 428, 'image_count': 3}, {'id': 429, 'image_count': 575}, {'id': 430, 'image_count': 1907}, {'id': 431, 'image_count': 8}, {'id': 432, 'image_count': 4}, {'id': 433, 'image_count': 32}, {'id': 434, 'image_count': 11}, {'id': 435, 'image_count': 4}, {'id': 436, 'image_count': 54}, {'id': 437, 'image_count': 202}, {'id': 438, 'image_count': 32}, {'id': 439, 'image_count': 3}, {'id': 440, 'image_count': 130}, {'id': 441, 'image_count': 119}, {'id': 442, 'image_count': 141}, {'id': 443, 'image_count': 29}, {'id': 444, 'image_count': 525}, {'id': 445, 'image_count': 1323}, {'id': 446, 'image_count': 2}, {'id': 447, 'image_count': 113}, {'id': 448, 'image_count': 16}, {'id': 449, 'image_count': 7}, {'id': 450, 'image_count': 35}, {'id': 451, 'image_count': 1908}, {'id': 452, 'image_count': 353}, {'id': 453, 'image_count': 18}, {'id': 454, 'image_count': 14}, {'id': 455, 'image_count': 77}, {'id': 456, 'image_count': 8}, {'id': 457, 'image_count': 37}, {'id': 458, 'image_count': 1}, {'id': 459, 'image_count': 346}, {'id': 460, 'image_count': 19}, {'id': 461, 'image_count': 1779}, {'id': 462, 'image_count': 23}, {'id': 463, 'image_count': 25}, {'id': 464, 'image_count': 67}, {'id': 465, 'image_count': 19}, {'id': 466, 'image_count': 28}, {'id': 467, 'image_count': 4}, {'id': 468, 'image_count': 27}, {'id': 469, 'image_count': 1861}, {'id': 470, 'image_count': 11}, {'id': 471, 'image_count': 13}, {'id': 472, 'image_count': 13}, {'id': 473, 'image_count': 32}, {'id': 474, 'image_count': 1767}, {'id': 475, 'image_count': 42}, {'id': 476, 'image_count': 17}, {'id': 477, 'image_count': 128}, {'id': 478, 'image_count': 1}, {'id': 479, 'image_count': 9}, {'id': 480, 'image_count': 10}, {'id': 481, 'image_count': 4}, {'id': 482, 'image_count': 9}, {'id': 483, 'image_count': 18}, {'id': 484, 'image_count': 41}, {'id': 485, 'image_count': 28}, {'id': 486, 'image_count': 3}, {'id': 487, 'image_count': 65}, {'id': 488, 'image_count': 9}, {'id': 489, 'image_count': 23}, {'id': 490, 'image_count': 24}, {'id': 491, 'image_count': 1}, {'id': 492, 'image_count': 2}, {'id': 493, 'image_count': 59}, {'id': 494, 'image_count': 48}, {'id': 495, 'image_count': 17}, {'id': 496, 'image_count': 1877}, {'id': 497, 'image_count': 18}, {'id': 498, 'image_count': 1920}, {'id': 499, 'image_count': 50}, {'id': 500, 'image_count': 1890}, {'id': 501, 'image_count': 99}, {'id': 502, 'image_count': 1530}, {'id': 503, 'image_count': 3}, {'id': 504, 'image_count': 11}, {'id': 505, 'image_count': 19}, {'id': 506, 'image_count': 3}, {'id': 507, 'image_count': 63}, {'id': 508, 'image_count': 5}, {'id': 509, 'image_count': 6}, {'id': 510, 'image_count': 233}, {'id': 511, 'image_count': 54}, {'id': 512, 'image_count': 36}, {'id': 513, 'image_count': 10}, {'id': 514, 'image_count': 124}, {'id': 515, 'image_count': 101}, {'id': 516, 'image_count': 3}, {'id': 517, 'image_count': 363}, {'id': 518, 'image_count': 3}, {'id': 519, 'image_count': 30}, {'id': 520, 'image_count': 18}, {'id': 521, 'image_count': 199}, {'id': 522, 'image_count': 97}, {'id': 523, 'image_count': 32}, {'id': 524, 'image_count': 121}, {'id': 525, 'image_count': 16}, {'id': 526, 'image_count': 12}, {'id': 527, 'image_count': 2}, {'id': 528, 'image_count': 214}, {'id': 529, 'image_count': 48}, {'id': 530, 'image_count': 26}, {'id': 531, 'image_count': 13}, {'id': 532, 'image_count': 4}, {'id': 533, 'image_count': 11}, {'id': 534, 'image_count': 123}, {'id': 535, 'image_count': 7}, {'id': 536, 'image_count': 200}, {'id': 537, 'image_count': 91}, {'id': 538, 'image_count': 9}, {'id': 539, 'image_count': 72}, {'id': 540, 'image_count': 1886}, {'id': 541, 'image_count': 4}, {'id': 542, 'image_count': 1}, {'id': 543, 'image_count': 1}, {'id': 544, 'image_count': 1932}, {'id': 545, 'image_count': 4}, {'id': 546, 'image_count': 56}, {'id': 547, 'image_count': 854}, {'id': 548, 'image_count': 755}, {'id': 549, 'image_count': 1843}, {'id': 550, 'image_count': 96}, {'id': 551, 'image_count': 7}, {'id': 552, 'image_count': 74}, {'id': 553, 'image_count': 66}, {'id': 554, 'image_count': 57}, {'id': 555, 'image_count': 44}, {'id': 556, 'image_count': 1905}, {'id': 557, 'image_count': 4}, {'id': 558, 'image_count': 90}, {'id': 559, 'image_count': 1635}, {'id': 560, 'image_count': 8}, {'id': 561, 'image_count': 5}, {'id': 562, 'image_count': 50}, {'id': 563, 'image_count': 545}, {'id': 564, 'image_count': 20}, {'id': 565, 'image_count': 193}, {'id': 566, 'image_count': 285}, {'id': 567, 'image_count': 3}, {'id': 568, 'image_count': 1}, {'id': 569, 'image_count': 1904}, {'id': 570, 'image_count': 294}, {'id': 571, 'image_count': 3}, {'id': 572, 'image_count': 5}, {'id': 573, 'image_count': 24}, {'id': 574, 'image_count': 2}, {'id': 575, 'image_count': 2}, {'id': 576, 'image_count': 16}, {'id': 577, 'image_count': 8}, {'id': 578, 'image_count': 154}, {'id': 579, 'image_count': 66}, {'id': 580, 'image_count': 1}, {'id': 581, 'image_count': 24}, {'id': 582, 'image_count': 1}, {'id': 583, 'image_count': 4}, {'id': 584, 'image_count': 75}, {'id': 585, 'image_count': 6}, {'id': 586, 'image_count': 126}, {'id': 587, 'image_count': 24}, {'id': 588, 'image_count': 22}, {'id': 589, 'image_count': 1872}, {'id': 590, 'image_count': 16}, {'id': 591, 'image_count': 423}, {'id': 592, 'image_count': 1927}, {'id': 593, 'image_count': 38}, {'id': 594, 'image_count': 3}, {'id': 595, 'image_count': 1945}, {'id': 596, 'image_count': 35}, {'id': 597, 'image_count': 1}, {'id': 598, 'image_count': 13}, {'id': 599, 'image_count': 9}, {'id': 600, 'image_count': 14}, {'id': 601, 'image_count': 37}, {'id': 602, 'image_count': 3}, {'id': 603, 'image_count': 4}, {'id': 604, 'image_count': 100}, {'id': 605, 'image_count': 195}, {'id': 606, 'image_count': 1}, {'id': 607, 'image_count': 12}, {'id': 608, 'image_count': 24}, {'id': 609, 'image_count': 489}, {'id': 610, 'image_count': 10}, {'id': 611, 'image_count': 1689}, {'id': 612, 'image_count': 42}, {'id': 613, 'image_count': 81}, {'id': 614, 'image_count': 894}, {'id': 615, 'image_count': 1868}, {'id': 616, 'image_count': 7}, {'id': 617, 'image_count': 1567}, {'id': 618, 'image_count': 10}, {'id': 619, 'image_count': 8}, {'id': 620, 'image_count': 7}, {'id': 621, 'image_count': 629}, {'id': 622, 'image_count': 89}, {'id': 623, 'image_count': 15}, {'id': 624, 'image_count': 134}, {'id': 625, 'image_count': 4}, {'id': 626, 'image_count': 1802}, {'id': 627, 'image_count': 595}, {'id': 628, 'image_count': 1210}, {'id': 629, 'image_count': 48}, {'id': 630, 'image_count': 418}, {'id': 631, 'image_count': 1846}, {'id': 632, 'image_count': 5}, {'id': 633, 'image_count': 221}, {'id': 634, 'image_count': 10}, {'id': 635, 'image_count': 7}, {'id': 636, 'image_count': 76}, {'id': 637, 'image_count': 22}, {'id': 638, 'image_count': 10}, {'id': 639, 'image_count': 341}, {'id': 640, 'image_count': 1}, {'id': 641, 'image_count': 705}, {'id': 642, 'image_count': 1900}, {'id': 643, 'image_count': 188}, {'id': 644, 'image_count': 227}, {'id': 645, 'image_count': 861}, {'id': 646, 'image_count': 6}, {'id': 647, 'image_count': 115}, {'id': 648, 'image_count': 5}, {'id': 649, 'image_count': 43}, {'id': 650, 'image_count': 14}, {'id': 651, 'image_count': 6}, {'id': 652, 'image_count': 15}, {'id': 653, 'image_count': 1167}, {'id': 654, 'image_count': 15}, {'id': 655, 'image_count': 994}, {'id': 656, 'image_count': 28}, {'id': 657, 'image_count': 2}, {'id': 658, 'image_count': 338}, {'id': 659, 'image_count': 334}, {'id': 660, 'image_count': 15}, {'id': 661, 'image_count': 102}, {'id': 662, 'image_count': 1}, {'id': 663, 'image_count': 8}, {'id': 664, 'image_count': 1}, {'id': 665, 'image_count': 1}, {'id': 666, 'image_count': 28}, {'id': 667, 'image_count': 91}, {'id': 668, 'image_count': 260}, {'id': 669, 'image_count': 131}, {'id': 670, 'image_count': 128}, {'id': 671, 'image_count': 3}, {'id': 672, 'image_count': 10}, {'id': 673, 'image_count': 39}, {'id': 674, 'image_count': 2}, {'id': 675, 'image_count': 925}, {'id': 676, 'image_count': 354}, {'id': 677, 'image_count': 31}, {'id': 678, 'image_count': 10}, {'id': 679, 'image_count': 215}, {'id': 680, 'image_count': 71}, {'id': 681, 'image_count': 43}, {'id': 682, 'image_count': 28}, {'id': 683, 'image_count': 34}, {'id': 684, 'image_count': 16}, {'id': 685, 'image_count': 273}, {'id': 686, 'image_count': 2}, {'id': 687, 'image_count': 999}, {'id': 688, 'image_count': 4}, {'id': 689, 'image_count': 107}, {'id': 690, 'image_count': 2}, {'id': 691, 'image_count': 1}, {'id': 692, 'image_count': 454}, {'id': 693, 'image_count': 9}, {'id': 694, 'image_count': 1901}, {'id': 695, 'image_count': 61}, {'id': 696, 'image_count': 91}, {'id': 697, 'image_count': 46}, {'id': 698, 'image_count': 1402}, {'id': 699, 'image_count': 74}, {'id': 700, 'image_count': 421}, {'id': 701, 'image_count': 226}, {'id': 702, 'image_count': 10}, {'id': 703, 'image_count': 1720}, {'id': 704, 'image_count': 261}, {'id': 705, 'image_count': 1337}, {'id': 706, 'image_count': 293}, {'id': 707, 'image_count': 62}, {'id': 708, 'image_count': 814}, {'id': 709, 'image_count': 407}, {'id': 710, 'image_count': 6}, {'id': 711, 'image_count': 16}, {'id': 712, 'image_count': 7}, {'id': 713, 'image_count': 1791}, {'id': 714, 'image_count': 2}, {'id': 715, 'image_count': 1915}, {'id': 716, 'image_count': 1940}, {'id': 717, 'image_count': 13}, {'id': 718, 'image_count': 16}, {'id': 719, 'image_count': 448}, {'id': 720, 'image_count': 12}, {'id': 721, 'image_count': 18}, {'id': 722, 'image_count': 4}, {'id': 723, 'image_count': 71}, {'id': 724, 'image_count': 189}, {'id': 725, 'image_count': 74}, {'id': 726, 'image_count': 103}, {'id': 727, 'image_count': 3}, {'id': 728, 'image_count': 110}, {'id': 729, 'image_count': 5}, {'id': 730, 'image_count': 9}, {'id': 731, 'image_count': 15}, {'id': 732, 'image_count': 25}, {'id': 733, 'image_count': 7}, {'id': 734, 'image_count': 647}, {'id': 735, 'image_count': 824}, {'id': 736, 'image_count': 100}, {'id': 737, 'image_count': 47}, {'id': 738, 'image_count': 121}, {'id': 739, 'image_count': 731}, {'id': 740, 'image_count': 73}, {'id': 741, 'image_count': 49}, {'id': 742, 'image_count': 23}, {'id': 743, 'image_count': 4}, {'id': 744, 'image_count': 62}, {'id': 745, 'image_count': 118}, {'id': 746, 'image_count': 99}, {'id': 747, 'image_count': 40}, {'id': 748, 'image_count': 1036}, {'id': 749, 'image_count': 105}, {'id': 750, 'image_count': 21}, {'id': 751, 'image_count': 229}, {'id': 752, 'image_count': 7}, {'id': 753, 'image_count': 72}, {'id': 754, 'image_count': 9}, {'id': 755, 'image_count': 10}, {'id': 756, 'image_count': 328}, {'id': 757, 'image_count': 468}, {'id': 758, 'image_count': 1}, {'id': 759, 'image_count': 2}, {'id': 760, 'image_count': 24}, {'id': 761, 'image_count': 11}, {'id': 762, 'image_count': 72}, {'id': 763, 'image_count': 17}, {'id': 764, 'image_count': 10}, {'id': 765, 'image_count': 17}, {'id': 766, 'image_count': 489}, {'id': 767, 'image_count': 47}, {'id': 768, 'image_count': 93}, {'id': 769, 'image_count': 1}, {'id': 770, 'image_count': 12}, {'id': 771, 'image_count': 228}, {'id': 772, 'image_count': 5}, {'id': 773, 'image_count': 76}, {'id': 774, 'image_count': 71}, {'id': 775, 'image_count': 30}, {'id': 776, 'image_count': 109}, {'id': 777, 'image_count': 14}, {'id': 778, 'image_count': 1}, {'id': 779, 'image_count': 8}, {'id': 780, 'image_count': 26}, {'id': 781, 'image_count': 339}, {'id': 782, 'image_count': 153}, {'id': 783, 'image_count': 2}, {'id': 784, 'image_count': 3}, {'id': 785, 'image_count': 8}, {'id': 786, 'image_count': 47}, {'id': 787, 'image_count': 8}, {'id': 788, 'image_count': 6}, {'id': 789, 'image_count': 116}, {'id': 790, 'image_count': 69}, {'id': 791, 'image_count': 13}, {'id': 792, 'image_count': 6}, {'id': 793, 'image_count': 1928}, {'id': 794, 'image_count': 79}, {'id': 795, 'image_count': 14}, {'id': 796, 'image_count': 7}, {'id': 797, 'image_count': 20}, {'id': 798, 'image_count': 114}, {'id': 799, 'image_count': 221}, {'id': 800, 'image_count': 502}, {'id': 801, 'image_count': 62}, {'id': 802, 'image_count': 87}, {'id': 803, 'image_count': 4}, {'id': 804, 'image_count': 1912}, {'id': 805, 'image_count': 7}, {'id': 806, 'image_count': 186}, {'id': 807, 'image_count': 18}, {'id': 808, 'image_count': 4}, {'id': 809, 'image_count': 3}, {'id': 810, 'image_count': 7}, {'id': 811, 'image_count': 1413}, {'id': 812, 'image_count': 7}, {'id': 813, 'image_count': 12}, {'id': 814, 'image_count': 248}, {'id': 815, 'image_count': 4}, {'id': 816, 'image_count': 1881}, {'id': 817, 'image_count': 529}, {'id': 818, 'image_count': 1932}, {'id': 819, 'image_count': 50}, {'id': 820, 'image_count': 3}, {'id': 821, 'image_count': 28}, {'id': 822, 'image_count': 10}, {'id': 823, 'image_count': 5}, {'id': 824, 'image_count': 5}, {'id': 825, 'image_count': 18}, {'id': 826, 'image_count': 14}, {'id': 827, 'image_count': 1890}, {'id': 828, 'image_count': 660}, {'id': 829, 'image_count': 8}, {'id': 830, 'image_count': 25}, {'id': 831, 'image_count': 10}, {'id': 832, 'image_count': 218}, {'id': 833, 'image_count': 36}, {'id': 834, 'image_count': 16}, {'id': 835, 'image_count': 808}, {'id': 836, 'image_count': 479}, {'id': 837, 'image_count': 1404}, {'id': 838, 'image_count': 307}, {'id': 839, 'image_count': 57}, {'id': 840, 'image_count': 28}, {'id': 841, 'image_count': 80}, {'id': 842, 'image_count': 11}, {'id': 843, 'image_count': 92}, {'id': 844, 'image_count': 20}, {'id': 845, 'image_count': 194}, {'id': 846, 'image_count': 23}, {'id': 847, 'image_count': 52}, {'id': 848, 'image_count': 673}, {'id': 849, 'image_count': 2}, {'id': 850, 'image_count': 2}, {'id': 851, 'image_count': 1}, {'id': 852, 'image_count': 2}, {'id': 853, 'image_count': 8}, {'id': 854, 'image_count': 80}, {'id': 855, 'image_count': 3}, {'id': 856, 'image_count': 3}, {'id': 857, 'image_count': 15}, {'id': 858, 'image_count': 2}, {'id': 859, 'image_count': 10}, {'id': 860, 'image_count': 386}, {'id': 861, 'image_count': 65}, {'id': 862, 'image_count': 3}, {'id': 863, 'image_count': 35}, {'id': 864, 'image_count': 5}, {'id': 865, 'image_count': 180}, {'id': 866, 'image_count': 99}, {'id': 867, 'image_count': 49}, {'id': 868, 'image_count': 28}, {'id': 869, 'image_count': 1}, {'id': 870, 'image_count': 52}, {'id': 871, 'image_count': 36}, {'id': 872, 'image_count': 70}, {'id': 873, 'image_count': 6}, {'id': 874, 'image_count': 29}, {'id': 875, 'image_count': 24}, {'id': 876, 'image_count': 1115}, {'id': 877, 'image_count': 61}, {'id': 878, 'image_count': 18}, {'id': 879, 'image_count': 18}, {'id': 880, 'image_count': 665}, {'id': 881, 'image_count': 1096}, {'id': 882, 'image_count': 29}, {'id': 883, 'image_count': 8}, {'id': 884, 'image_count': 14}, {'id': 885, 'image_count': 1622}, {'id': 886, 'image_count': 2}, {'id': 887, 'image_count': 3}, {'id': 888, 'image_count': 32}, {'id': 889, 'image_count': 55}, {'id': 890, 'image_count': 1}, {'id': 891, 'image_count': 10}, {'id': 892, 'image_count': 10}, {'id': 893, 'image_count': 47}, {'id': 894, 'image_count': 3}, {'id': 895, 'image_count': 29}, {'id': 896, 'image_count': 342}, {'id': 897, 'image_count': 25}, {'id': 898, 'image_count': 1469}, {'id': 899, 'image_count': 521}, {'id': 900, 'image_count': 347}, {'id': 901, 'image_count': 35}, {'id': 902, 'image_count': 7}, {'id': 903, 'image_count': 207}, {'id': 904, 'image_count': 108}, {'id': 905, 'image_count': 2}, {'id': 906, 'image_count': 34}, {'id': 907, 'image_count': 12}, {'id': 908, 'image_count': 10}, {'id': 909, 'image_count': 13}, {'id': 910, 'image_count': 361}, {'id': 911, 'image_count': 1023}, {'id': 912, 'image_count': 782}, {'id': 913, 'image_count': 2}, {'id': 914, 'image_count': 5}, {'id': 915, 'image_count': 247}, {'id': 916, 'image_count': 221}, {'id': 917, 'image_count': 4}, {'id': 918, 'image_count': 8}, {'id': 919, 'image_count': 158}, {'id': 920, 'image_count': 3}, {'id': 921, 'image_count': 752}, {'id': 922, 'image_count': 64}, {'id': 923, 'image_count': 707}, {'id': 924, 'image_count': 143}, {'id': 925, 'image_count': 1}, {'id': 926, 'image_count': 49}, {'id': 927, 'image_count': 126}, {'id': 928, 'image_count': 76}, {'id': 929, 'image_count': 11}, {'id': 930, 'image_count': 11}, {'id': 931, 'image_count': 4}, {'id': 932, 'image_count': 39}, {'id': 933, 'image_count': 11}, {'id': 934, 'image_count': 13}, {'id': 935, 'image_count': 91}, {'id': 936, 'image_count': 14}, {'id': 937, 'image_count': 5}, {'id': 938, 'image_count': 3}, {'id': 939, 'image_count': 10}, {'id': 940, 'image_count': 18}, {'id': 941, 'image_count': 9}, {'id': 942, 'image_count': 6}, {'id': 943, 'image_count': 951}, {'id': 944, 'image_count': 2}, {'id': 945, 'image_count': 1}, {'id': 946, 'image_count': 19}, {'id': 947, 'image_count': 1942}, {'id': 948, 'image_count': 1916}, {'id': 949, 'image_count': 139}, {'id': 950, 'image_count': 43}, {'id': 951, 'image_count': 1969}, {'id': 952, 'image_count': 5}, {'id': 953, 'image_count': 134}, {'id': 954, 'image_count': 74}, {'id': 955, 'image_count': 381}, {'id': 956, 'image_count': 1}, {'id': 957, 'image_count': 381}, {'id': 958, 'image_count': 6}, {'id': 959, 'image_count': 1826}, {'id': 960, 'image_count': 28}, {'id': 961, 'image_count': 1635}, {'id': 962, 'image_count': 1967}, {'id': 963, 'image_count': 16}, {'id': 964, 'image_count': 1926}, {'id': 965, 'image_count': 1789}, {'id': 966, 'image_count': 401}, {'id': 967, 'image_count': 1968}, {'id': 968, 'image_count': 1167}, {'id': 969, 'image_count': 1}, {'id': 970, 'image_count': 56}, {'id': 971, 'image_count': 17}, {'id': 972, 'image_count': 1}, {'id': 973, 'image_count': 58}, {'id': 974, 'image_count': 9}, {'id': 975, 'image_count': 8}, {'id': 976, 'image_count': 1124}, {'id': 977, 'image_count': 31}, {'id': 978, 'image_count': 16}, {'id': 979, 'image_count': 491}, {'id': 980, 'image_count': 432}, {'id': 981, 'image_count': 1945}, {'id': 982, 'image_count': 1899}, {'id': 983, 'image_count': 5}, {'id': 984, 'image_count': 28}, {'id': 985, 'image_count': 7}, {'id': 986, 'image_count': 146}, {'id': 987, 'image_count': 1}, {'id': 988, 'image_count': 25}, {'id': 989, 'image_count': 22}, {'id': 990, 'image_count': 1}, {'id': 991, 'image_count': 10}, {'id': 992, 'image_count': 9}, {'id': 993, 'image_count': 308}, {'id': 994, 'image_count': 4}, {'id': 995, 'image_count': 1969}, {'id': 996, 'image_count': 45}, {'id': 997, 'image_count': 12}, {'id': 998, 'image_count': 1}, {'id': 999, 'image_count': 85}, {'id': 1000, 'image_count': 1127}, {'id': 1001, 'image_count': 11}, {'id': 1002, 'image_count': 60}, {'id': 1003, 'image_count': 1}, {'id': 1004, 'image_count': 16}, {'id': 1005, 'image_count': 1}, {'id': 1006, 'image_count': 65}, {'id': 1007, 'image_count': 13}, {'id': 1008, 'image_count': 655}, {'id': 1009, 'image_count': 51}, {'id': 1010, 'image_count': 1}, {'id': 1011, 'image_count': 673}, {'id': 1012, 'image_count': 5}, {'id': 1013, 'image_count': 36}, {'id': 1014, 'image_count': 54}, {'id': 1015, 'image_count': 5}, {'id': 1016, 'image_count': 8}, {'id': 1017, 'image_count': 305}, {'id': 1018, 'image_count': 297}, {'id': 1019, 'image_count': 1053}, {'id': 1020, 'image_count': 223}, {'id': 1021, 'image_count': 1037}, {'id': 1022, 'image_count': 63}, {'id': 1023, 'image_count': 1881}, {'id': 1024, 'image_count': 507}, {'id': 1025, 'image_count': 333}, {'id': 1026, 'image_count': 1911}, {'id': 1027, 'image_count': 1765}, {'id': 1028, 'image_count': 1}, {'id': 1029, 'image_count': 5}, {'id': 1030, 'image_count': 1}, {'id': 1031, 'image_count': 9}, {'id': 1032, 'image_count': 2}, {'id': 1033, 'image_count': 151}, {'id': 1034, 'image_count': 82}, {'id': 1035, 'image_count': 1931}, {'id': 1036, 'image_count': 41}, {'id': 1037, 'image_count': 1895}, {'id': 1038, 'image_count': 24}, {'id': 1039, 'image_count': 22}, {'id': 1040, 'image_count': 35}, {'id': 1041, 'image_count': 69}, {'id': 1042, 'image_count': 962}, {'id': 1043, 'image_count': 588}, {'id': 1044, 'image_count': 21}, {'id': 1045, 'image_count': 825}, {'id': 1046, 'image_count': 52}, {'id': 1047, 'image_count': 5}, {'id': 1048, 'image_count': 5}, {'id': 1049, 'image_count': 5}, {'id': 1050, 'image_count': 1860}, {'id': 1051, 'image_count': 56}, {'id': 1052, 'image_count': 1582}, {'id': 1053, 'image_count': 7}, {'id': 1054, 'image_count': 2}, {'id': 1055, 'image_count': 1562}, {'id': 1056, 'image_count': 1885}, {'id': 1057, 'image_count': 1}, {'id': 1058, 'image_count': 5}, {'id': 1059, 'image_count': 137}, {'id': 1060, 'image_count': 1094}, {'id': 1061, 'image_count': 134}, {'id': 1062, 'image_count': 29}, {'id': 1063, 'image_count': 22}, {'id': 1064, 'image_count': 522}, {'id': 1065, 'image_count': 50}, {'id': 1066, 'image_count': 68}, {'id': 1067, 'image_count': 16}, {'id': 1068, 'image_count': 40}, {'id': 1069, 'image_count': 35}, {'id': 1070, 'image_count': 135}, {'id': 1071, 'image_count': 1413}, {'id': 1072, 'image_count': 772}, {'id': 1073, 'image_count': 50}, {'id': 1074, 'image_count': 1015}, {'id': 1075, 'image_count': 1}, {'id': 1076, 'image_count': 65}, {'id': 1077, 'image_count': 1900}, {'id': 1078, 'image_count': 1302}, {'id': 1079, 'image_count': 1977}, {'id': 1080, 'image_count': 2}, {'id': 1081, 'image_count': 29}, {'id': 1082, 'image_count': 36}, {'id': 1083, 'image_count': 138}, {'id': 1084, 'image_count': 4}, {'id': 1085, 'image_count': 67}, {'id': 1086, 'image_count': 26}, {'id': 1087, 'image_count': 25}, {'id': 1088, 'image_count': 33}, {'id': 1089, 'image_count': 37}, {'id': 1090, 'image_count': 50}, {'id': 1091, 'image_count': 270}, {'id': 1092, 'image_count': 12}, {'id': 1093, 'image_count': 316}, {'id': 1094, 'image_count': 41}, {'id': 1095, 'image_count': 224}, {'id': 1096, 'image_count': 105}, {'id': 1097, 'image_count': 1925}, {'id': 1098, 'image_count': 1021}, {'id': 1099, 'image_count': 1213}, {'id': 1100, 'image_count': 172}, {'id': 1101, 'image_count': 28}, {'id': 1102, 'image_count': 745}, {'id': 1103, 'image_count': 187}, {'id': 1104, 'image_count': 147}, {'id': 1105, 'image_count': 136}, {'id': 1106, 'image_count': 34}, {'id': 1107, 'image_count': 41}, {'id': 1108, 'image_count': 636}, {'id': 1109, 'image_count': 570}, {'id': 1110, 'image_count': 1149}, {'id': 1111, 'image_count': 61}, {'id': 1112, 'image_count': 1890}, {'id': 1113, 'image_count': 18}, {'id': 1114, 'image_count': 143}, {'id': 1115, 'image_count': 1517}, {'id': 1116, 'image_count': 7}, {'id': 1117, 'image_count': 943}, {'id': 1118, 'image_count': 6}, {'id': 1119, 'image_count': 1}, {'id': 1120, 'image_count': 11}, {'id': 1121, 'image_count': 101}, {'id': 1122, 'image_count': 1909}, {'id': 1123, 'image_count': 800}, {'id': 1124, 'image_count': 1}, {'id': 1125, 'image_count': 44}, {'id': 1126, 'image_count': 3}, {'id': 1127, 'image_count': 44}, {'id': 1128, 'image_count': 31}, {'id': 1129, 'image_count': 7}, {'id': 1130, 'image_count': 20}, {'id': 1131, 'image_count': 11}, {'id': 1132, 'image_count': 13}, {'id': 1133, 'image_count': 1924}, {'id': 1134, 'image_count': 113}, {'id': 1135, 'image_count': 2}, {'id': 1136, 'image_count': 139}, {'id': 1137, 'image_count': 12}, {'id': 1138, 'image_count': 37}, {'id': 1139, 'image_count': 1866}, {'id': 1140, 'image_count': 47}, {'id': 1141, 'image_count': 1468}, {'id': 1142, 'image_count': 729}, {'id': 1143, 'image_count': 24}, {'id': 1144, 'image_count': 1}, {'id': 1145, 'image_count': 10}, {'id': 1146, 'image_count': 3}, {'id': 1147, 'image_count': 14}, {'id': 1148, 'image_count': 4}, {'id': 1149, 'image_count': 29}, {'id': 1150, 'image_count': 4}, {'id': 1151, 'image_count': 70}, {'id': 1152, 'image_count': 46}, {'id': 1153, 'image_count': 14}, {'id': 1154, 'image_count': 48}, {'id': 1155, 'image_count': 1855}, {'id': 1156, 'image_count': 113}, {'id': 1157, 'image_count': 1}, {'id': 1158, 'image_count': 1}, {'id': 1159, 'image_count': 10}, {'id': 1160, 'image_count': 54}, {'id': 1161, 'image_count': 1923}, {'id': 1162, 'image_count': 630}, {'id': 1163, 'image_count': 31}, {'id': 1164, 'image_count': 69}, {'id': 1165, 'image_count': 7}, {'id': 1166, 'image_count': 11}, {'id': 1167, 'image_count': 1}, {'id': 1168, 'image_count': 30}, {'id': 1169, 'image_count': 50}, {'id': 1170, 'image_count': 45}, {'id': 1171, 'image_count': 28}, {'id': 1172, 'image_count': 114}, {'id': 1173, 'image_count': 193}, {'id': 1174, 'image_count': 21}, {'id': 1175, 'image_count': 91}, {'id': 1176, 'image_count': 31}, {'id': 1177, 'image_count': 1469}, {'id': 1178, 'image_count': 1924}, {'id': 1179, 'image_count': 87}, {'id': 1180, 'image_count': 77}, {'id': 1181, 'image_count': 11}, {'id': 1182, 'image_count': 47}, {'id': 1183, 'image_count': 21}, {'id': 1184, 'image_count': 47}, {'id': 1185, 'image_count': 70}, {'id': 1186, 'image_count': 1838}, {'id': 1187, 'image_count': 19}, {'id': 1188, 'image_count': 531}, {'id': 1189, 'image_count': 11}, {'id': 1190, 'image_count': 941}, {'id': 1191, 'image_count': 113}, {'id': 1192, 'image_count': 26}, {'id': 1193, 'image_count': 5}, {'id': 1194, 'image_count': 56}, {'id': 1195, 'image_count': 73}, {'id': 1196, 'image_count': 32}, {'id': 1197, 'image_count': 128}, {'id': 1198, 'image_count': 623}, {'id': 1199, 'image_count': 12}, {'id': 1200, 'image_count': 52}, {'id': 1201, 'image_count': 11}, {'id': 1202, 'image_count': 1674}, {'id': 1203, 'image_count': 81}] # noqa +# fmt: on diff --git a/data_processing/detectron2/detectron2/data/datasets/pascal_voc.py b/data_processing/detectron2/detectron2/data/datasets/pascal_voc.py new file mode 100644 index 0000000..dbbf82c --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/pascal_voc.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import numpy as np +import os +import xml.etree.ElementTree as ET +from typing import List, Tuple, Union + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.structures import BoxMode +from detectron2.utils.file_io import PathManager + +__all__ = ["load_voc_instances", "register_pascal_voc"] + + +# fmt: off +CLASS_NAMES = ( + "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", + "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", + "pottedplant", "sheep", "sofa", "train", "tvmonitor" +) +# fmt: on + + +def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): + """ + Load Pascal VOC detection annotations to Detectron2 format. + + Args: + dirname: Contain "Annotations", "ImageSets", "JPEGImages" + split (str): one of "train", "test", "val", "trainval" + class_names: list or tuple of class names + """ + with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: + fileids = np.loadtxt(f, dtype=np.str) + + # Needs to read many small annotation files. Makes sense at local + annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) + dicts = [] + for fileid in fileids: + anno_file = os.path.join(annotation_dirname, fileid + ".xml") + jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") + + with PathManager.open(anno_file) as f: + tree = ET.parse(f) + + r = { + "file_name": jpeg_file, + "image_id": fileid, + "height": int(tree.findall("./size/height")[0].text), + "width": int(tree.findall("./size/width")[0].text), + } + instances = [] + + for obj in tree.findall("object"): + cls = obj.find("name").text + # We include "difficult" samples in training. + # Based on limited experiments, they don't hurt accuracy. + # difficult = int(obj.find("difficult").text) + # if difficult == 1: + # continue + bbox = obj.find("bndbox") + bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] + # Original annotations are integers in the range [1, W or H] + # Assuming they mean 1-based pixel indices (inclusive), + # a box with annotation (xmin=1, xmax=W) covers the whole image. + # In coordinate space this is represented by (xmin=0, xmax=W) + bbox[0] -= 1.0 + bbox[1] -= 1.0 + instances.append( + {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} + ) + r["annotations"] = instances + dicts.append(r) + return dicts + + +def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES): + DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names)) + MetadataCatalog.get(name).set( + thing_classes=list(class_names), dirname=dirname, year=year, split=split + ) diff --git a/data_processing/detectron2/detectron2/data/datasets/register_coco.py b/data_processing/detectron2/detectron2/data/datasets/register_coco.py new file mode 100644 index 0000000..e564438 --- /dev/null +++ b/data_processing/detectron2/detectron2/data/datasets/register_coco.py @@ -0,0 +1,3 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .coco import register_coco_instances # noqa +from .coco_panoptic import register_coco_panoptic_separated # noqa diff --git a/data_processing/detectron2/detectron2/data/detection_utils.py b/data_processing/detectron2/detectron2/data/detection_utils.py new file mode 100644 index 0000000..ada19bd --- /dev/null +++ b/data_processing/detectron2/detectron2/data/detection_utils.py @@ -0,0 +1,659 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +Common data processing utilities that are used in a +typical object detection data pipeline. +""" +import logging +import numpy as np +from typing import List, Union +import pycocotools.mask as mask_util +import torch +from PIL import Image + +from detectron2.structures import ( + BitMasks, + Boxes, + BoxMode, + Instances, + Keypoints, + PolygonMasks, + RotatedBoxes, + polygons_to_bitmask, +) +from detectron2.utils.file_io import PathManager + +from . import transforms as T +from .catalog import MetadataCatalog + +__all__ = [ + "SizeMismatchError", + "convert_image_to_rgb", + "check_image_size", + "transform_proposals", + "transform_instance_annotations", + "annotations_to_instances", + "annotations_to_instances_rotated", + "build_augmentation", + "build_transform_gen", + "create_keypoint_hflip_indices", + "filter_empty_instances", + "read_image", +] + + +class SizeMismatchError(ValueError): + """ + When loaded image has difference width/height compared with annotation. + """ + + +# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601 +_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]] +_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]] + +# https://www.exiv2.org/tags.html +_EXIF_ORIENT = 274 # exif 'Orientation' tag + + +def convert_PIL_to_numpy(image, format): + """ + Convert PIL image to numpy array of target format. + + Args: + image (PIL.Image): a PIL image + format (str): the format of output image + + Returns: + (np.ndarray): also see `read_image` + """ + if format is not None: + # PIL only supports RGB, so convert to RGB and flip channels over below + conversion_format = format + if format in ["BGR", "YUV-BT.601"]: + conversion_format = "RGB" + image = image.convert(conversion_format) + image = np.asarray(image) + # PIL squeezes out the channel dimension for "L", so make it HWC + if format == "L": + image = np.expand_dims(image, -1) + + # handle formats not supported by PIL + elif format == "BGR": + # flip channels if needed + image = image[:, :, ::-1] + elif format == "YUV-BT.601": + image = image / 255.0 + image = np.dot(image, np.array(_M_RGB2YUV).T) + + return image + + +def convert_image_to_rgb(image, format): + """ + Convert an image from given format to RGB. + + Args: + image (np.ndarray or Tensor): an HWC image + format (str): the format of input image, also see `read_image` + + Returns: + (np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8 + """ + if isinstance(image, torch.Tensor): + image = image.cpu().numpy() + if format == "BGR": + image = image[:, :, [2, 1, 0]] + elif format == "YUV-BT.601": + image = np.dot(image, np.array(_M_YUV2RGB).T) + image = image * 255.0 + else: + if format == "L": + image = image[:, :, 0] + image = image.astype(np.uint8) + image = np.asarray(Image.fromarray(image, mode=format).convert("RGB")) + return image + + +def _apply_exif_orientation(image): + """ + Applies the exif orientation correctly. + + This code exists per the bug: + https://github.com/python-pillow/Pillow/issues/3973 + with the function `ImageOps.exif_transpose`. The Pillow source raises errors with + various methods, especially `tobytes` + + Function based on: + https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59 + https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527 + + Args: + image (PIL.Image): a PIL image + + Returns: + (PIL.Image): the PIL image with exif orientation applied, if applicable + """ + if not hasattr(image, "getexif"): + return image + + try: + exif = image.getexif() + except Exception: # https://github.com/facebookresearch/detectron2/issues/1885 + exif = None + + if exif is None: + return image + + orientation = exif.get(_EXIF_ORIENT) + + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(orientation) + + if method is not None: + return image.transpose(method) + return image + + +def read_image(file_name, format=None): + """ + Read an image into the given format. + Will apply rotation and flipping if the image has such exif information. + + Args: + file_name (str): image file path + format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601". + + Returns: + image (np.ndarray): + an HWC image in the given format, which is 0-255, uint8 for + supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601. + """ + with PathManager.open(file_name, "rb") as f: + image = Image.open(f) + + # work around this bug: https://github.com/python-pillow/Pillow/issues/3973 + image = _apply_exif_orientation(image) + return convert_PIL_to_numpy(image, format) + + +def check_image_size(dataset_dict, image): + """ + Raise an error if the image does not match the size specified in the dict. + """ + if "width" in dataset_dict or "height" in dataset_dict: + image_wh = (image.shape[1], image.shape[0]) + expected_wh = (dataset_dict["width"], dataset_dict["height"]) + if not image_wh == expected_wh: + raise SizeMismatchError( + "Mismatched image shape{}, got {}, expect {}.".format( + " for image " + dataset_dict["file_name"] + if "file_name" in dataset_dict + else "", + image_wh, + expected_wh, + ) + + " Please check the width/height in your annotation." + ) + + # To ensure bbox always remap to original image size + if "width" not in dataset_dict: + dataset_dict["width"] = image.shape[1] + if "height" not in dataset_dict: + dataset_dict["height"] = image.shape[0] + + +def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0): + """ + Apply transformations to the proposals in dataset_dict, if any. + + Args: + dataset_dict (dict): a dict read from the dataset, possibly + contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode" + image_shape (tuple): height, width + transforms (TransformList): + proposal_topk (int): only keep top-K scoring proposals + min_box_size (int): proposals with either side smaller than this + threshold are removed + + The input dict is modified in-place, with abovementioned keys removed. A new + key "proposals" will be added. Its value is an `Instances` + object which contains the transformed proposals in its field + "proposal_boxes" and "objectness_logits". + """ + if "proposal_boxes" in dataset_dict: + # Transform proposal boxes + boxes = transforms.apply_box( + BoxMode.convert( + dataset_dict.pop("proposal_boxes"), + dataset_dict.pop("proposal_bbox_mode"), + BoxMode.XYXY_ABS, + ) + ) + boxes = Boxes(boxes) + objectness_logits = torch.as_tensor( + dataset_dict.pop("proposal_objectness_logits").astype("float32") + ) + + boxes.clip(image_shape) + keep = boxes.nonempty(threshold=min_box_size) + boxes = boxes[keep] + objectness_logits = objectness_logits[keep] + + proposals = Instances(image_shape) + proposals.proposal_boxes = boxes[:proposal_topk] + proposals.objectness_logits = objectness_logits[:proposal_topk] + dataset_dict["proposals"] = proposals + + +def get_bbox(annotation): + """ + Get bbox from data + Args: + annotation (dict): dict of instance annotations for a single instance. + Returns: + bbox (ndarray): x1, y1, x2, y2 coordinates + """ + # bbox is 1d (per-instance bounding box) + bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS) + return bbox + + +def transform_instance_annotations( + annotation, transforms, image_size, *, keypoint_hflip_indices=None +): + """ + Apply transforms to box, segmentation and keypoints annotations of a single instance. + + It will use `transforms.apply_box` for the box, and + `transforms.apply_coords` for segmentation polygons & keypoints. + If you need anything more specially designed for each data structure, + you'll need to implement your own version of this function or the transforms. + + Args: + annotation (dict): dict of instance annotations for a single instance. + It will be modified in-place. + transforms (TransformList or list[Transform]): + image_size (tuple): the height, width of the transformed image + keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. + + Returns: + dict: + the same input dict with fields "bbox", "segmentation", "keypoints" + transformed according to `transforms`. + The "bbox_mode" field will be set to XYXY_ABS. + """ + if isinstance(transforms, (tuple, list)): + transforms = T.TransformList(transforms) + # bbox is 1d (per-instance bounding box) + bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS) + # clip transformed bbox to image size + bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0) + annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1]) + annotation["bbox_mode"] = BoxMode.XYXY_ABS + + if "segmentation" in annotation: + # each instance contains 1 or more polygons + segm = annotation["segmentation"] + if isinstance(segm, list): + # polygons + polygons = [np.asarray(p).reshape(-1, 2) for p in segm] + annotation["segmentation"] = [ + p.reshape(-1) for p in transforms.apply_polygons(polygons) + ] + elif isinstance(segm, dict): + # RLE + mask = mask_util.decode(segm) + mask = transforms.apply_segmentation(mask) + assert tuple(mask.shape[:2]) == image_size + annotation["segmentation"] = mask + else: + raise ValueError( + "Cannot transform segmentation of type '{}'!" + "Supported types are: polygons as list[list[float] or ndarray]," + " COCO-style RLE as a dict.".format(type(segm)) + ) + + if "keypoints" in annotation: + keypoints = transform_keypoint_annotations( + annotation["keypoints"], transforms, image_size, keypoint_hflip_indices + ) + annotation["keypoints"] = keypoints + + return annotation + + +def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None): + """ + Transform keypoint annotations of an image. + If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0) + + Args: + keypoints (list[float]): Nx3 float in Detectron2's Dataset format. + Each point is represented by (x, y, visibility). + transforms (TransformList): + image_size (tuple): the height, width of the transformed image + keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. + When `transforms` includes horizontal flip, will use the index + mapping to flip keypoints. + """ + # (N*3,) -> (N, 3) + keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3) + keypoints_xy = transforms.apply_coords(keypoints[:, :2]) + + # Set all out-of-boundary points to "unlabeled" + inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1])) + inside = inside.all(axis=1) + keypoints[:, :2] = keypoints_xy + keypoints[:, 2][~inside] = 0 + + # This assumes that HorizFlipTransform is the only one that does flip + do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1 + + # Alternative way: check if probe points was horizontally flipped. + # probe = np.asarray([[0.0, 0.0], [image_width, 0.0]]) + # probe_aug = transforms.apply_coords(probe.copy()) + # do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa + + # If flipped, swap each keypoint with its opposite-handed equivalent + if do_hflip: + if keypoint_hflip_indices is None: + raise ValueError("Cannot flip keypoints without providing flip indices!") + if len(keypoints) != len(keypoint_hflip_indices): + raise ValueError( + "Keypoint data has {} points, but metadata " + "contains {} points!".format(len(keypoints), len(keypoint_hflip_indices)) + ) + keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :] + + # Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0 + keypoints[keypoints[:, 2] == 0] = 0 + return keypoints + + +def annotations_to_instances(annos, image_size, mask_format="polygon"): + """ + Create an :class:`Instances` object used by the models, + from instance annotations in the dataset dict. + + Args: + annos (list[dict]): a list of instance annotations in one image, each + element for one instance. + image_size (tuple): height, width + + Returns: + Instances: + It will contain fields "gt_boxes", "gt_classes", + "gt_masks", "gt_keypoints", if they can be obtained from `annos`. + This is the format that builtin models expect. + """ + boxes = ( + np.stack( + [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos] + ) + if len(annos) + else np.zeros((0, 4)) + ) + target = Instances(image_size) + target.gt_boxes = Boxes(boxes) + + classes = [int(obj["category_id"]) for obj in annos] + classes = torch.tensor(classes, dtype=torch.int64) + target.gt_classes = classes + + if len(annos) and "segmentation" in annos[0]: + segms = [obj["segmentation"] for obj in annos] + if mask_format == "polygon": + try: + masks = PolygonMasks(segms) + except ValueError as e: + raise ValueError( + "Failed to use mask_format=='polygon' from the given annotations!" + ) from e + else: + assert mask_format == "bitmask", mask_format + masks = [] + for segm in segms: + if isinstance(segm, list): + # polygon + masks.append(polygons_to_bitmask(segm, *image_size)) + elif isinstance(segm, dict): + # COCO RLE + masks.append(mask_util.decode(segm)) + elif isinstance(segm, np.ndarray): + assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format( + segm.ndim + ) + # mask array + masks.append(segm) + else: + raise ValueError( + "Cannot convert segmentation of type '{}' to BitMasks!" + "Supported types are: polygons as list[list[float] or ndarray]," + " COCO-style RLE as a dict, or a binary segmentation mask " + " in a 2D numpy array of shape HxW.".format(type(segm)) + ) + # torch.from_numpy does not support array with negative stride. + masks = BitMasks( + torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks]) + ) + target.gt_masks = masks + + if len(annos) and "keypoints" in annos[0]: + kpts = [obj.get("keypoints", []) for obj in annos] + target.gt_keypoints = Keypoints(kpts) + + return target + + +def annotations_to_instances_rotated(annos, image_size): + """ + Create an :class:`Instances` object used by the models, + from instance annotations in the dataset dict. + Compared to `annotations_to_instances`, this function is for rotated boxes only + + Args: + annos (list[dict]): a list of instance annotations in one image, each + element for one instance. + image_size (tuple): height, width + + Returns: + Instances: + Containing fields "gt_boxes", "gt_classes", + if they can be obtained from `annos`. + This is the format that builtin models expect. + """ + boxes = [obj["bbox"] for obj in annos] + target = Instances(image_size) + boxes = target.gt_boxes = RotatedBoxes(boxes) + boxes.clip(image_size) + + classes = [obj["category_id"] for obj in annos] + classes = torch.tensor(classes, dtype=torch.int64) + target.gt_classes = classes + + return target + + +def filter_empty_instances( + instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False +): + """ + Filter out empty instances in an `Instances` object. + + Args: + instances (Instances): + by_box (bool): whether to filter out instances with empty boxes + by_mask (bool): whether to filter out instances with empty masks + box_threshold (float): minimum width and height to be considered non-empty + return_mask (bool): whether to return boolean mask of filtered instances + + Returns: + Instances: the filtered instances. + tensor[bool], optional: boolean mask of filtered instances + """ + assert by_box or by_mask + r = [] + if by_box: + r.append(instances.gt_boxes.nonempty(threshold=box_threshold)) + if instances.has("gt_masks") and by_mask: + r.append(instances.gt_masks.nonempty()) + + # TODO: can also filter visible keypoints + + if not r: + return instances + m = r[0] + for x in r[1:]: + m = m & x + if return_mask: + return instances[m], m + return instances[m] + + +def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]: + """ + Args: + dataset_names: list of dataset names + + Returns: + list[int]: a list of size=#keypoints, storing the + horizontally-flipped keypoint indices. + """ + if isinstance(dataset_names, str): + dataset_names = [dataset_names] + + check_metadata_consistency("keypoint_names", dataset_names) + check_metadata_consistency("keypoint_flip_map", dataset_names) + + meta = MetadataCatalog.get(dataset_names[0]) + names = meta.keypoint_names + # TODO flip -> hflip + flip_map = dict(meta.keypoint_flip_map) + flip_map.update({v: k for k, v in flip_map.items()}) + flipped_names = [i if i not in flip_map else flip_map[i] for i in names] + flip_indices = [names.index(i) for i in flipped_names] + return flip_indices + + +def get_fed_loss_cls_weights(dataset_names: Union[str, List[str]], freq_weight_power=1.0): + """ + Get frequency weight for each class sorted by class id. + We now calcualte freqency weight using image_count to the power freq_weight_power. + + Args: + dataset_names: list of dataset names + freq_weight_power: power value + """ + if isinstance(dataset_names, str): + dataset_names = [dataset_names] + + check_metadata_consistency("class_image_count", dataset_names) + + meta = MetadataCatalog.get(dataset_names[0]) + class_freq_meta = meta.class_image_count + class_freq = torch.tensor( + [c["image_count"] for c in sorted(class_freq_meta, key=lambda x: x["id"])] + ) + class_freq_weight = class_freq.float() ** freq_weight_power + return class_freq_weight + + +def gen_crop_transform_with_instance(crop_size, image_size, instance): + """ + Generate a CropTransform so that the cropping region contains + the center of the given instance. + + Args: + crop_size (tuple): h, w in pixels + image_size (tuple): h, w + instance (dict): an annotation dict of one instance, in Detectron2's + dataset format. + """ + crop_size = np.asarray(crop_size, dtype=np.int32) + bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS) + center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5 + assert ( + image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1] + ), "The annotation bounding box is outside of the image!" + assert ( + image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1] + ), "Crop size is larger than image size!" + + min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0) + max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0) + max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32)) + + y0 = np.random.randint(min_yx[0], max_yx[0] + 1) + x0 = np.random.randint(min_yx[1], max_yx[1] + 1) + return T.CropTransform(x0, y0, crop_size[1], crop_size[0]) + + +def check_metadata_consistency(key, dataset_names): + """ + Check that the datasets have consistent metadata. + + Args: + key (str): a metadata key + dataset_names (list[str]): a list of dataset names + + Raises: + AttributeError: if the key does not exist in the metadata + ValueError: if the given datasets do not have the same metadata values defined by key + """ + if len(dataset_names) == 0: + return + logger = logging.getLogger(__name__) + entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names] + for idx, entry in enumerate(entries_per_dataset): + if entry != entries_per_dataset[0]: + logger.error( + "Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry)) + ) + logger.error( + "Metadata '{}' for dataset '{}' is '{}'".format( + key, dataset_names[0], str(entries_per_dataset[0]) + ) + ) + raise ValueError("Datasets have different metadata '{}'!".format(key)) + + +def build_augmentation(cfg, is_train): + """ + Create a list of default :class:`Augmentation` from config. + Now it includes resizing and flipping. + + Returns: + list[Augmentation] + """ + if is_train: + min_size = cfg.INPUT.MIN_SIZE_TRAIN + max_size = cfg.INPUT.MAX_SIZE_TRAIN + sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING + else: + min_size = cfg.INPUT.MIN_SIZE_TEST + max_size = cfg.INPUT.MAX_SIZE_TEST + sample_style = "choice" + augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)] + if is_train and cfg.INPUT.RANDOM_FLIP != "none": + augmentation.append( + T.RandomFlip( + horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal", + vertical=cfg.INPUT.RANDOM_FLIP == "vertical", + ) + ) + return augmentation + + +build_transform_gen = build_augmentation +""" +Alias for backward-compatibility. +""" diff --git a/data_processing/detectron2/detectron2/data/samplers/__init__.py b/data_processing/detectron2/detectron2/data/samplers/__init__.py new file mode 100644 index 0000000..85c9f1a --- /dev/null +++ b/data_processing/detectron2/detectron2/data/samplers/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .distributed_sampler import ( + InferenceSampler, + RandomSubsetTrainingSampler, + RepeatFactorTrainingSampler, + TrainingSampler, +) + +from .grouped_batch_sampler import GroupedBatchSampler + +__all__ = [ + "GroupedBatchSampler", + "TrainingSampler", + "RandomSubsetTrainingSampler", + "InferenceSampler", + "RepeatFactorTrainingSampler", +] diff --git a/data_processing/detectron2/detectron2/data/samplers/distributed_sampler.py b/data_processing/detectron2/detectron2/data/samplers/distributed_sampler.py new file mode 100644 index 0000000..a098e6a --- /dev/null +++ b/data_processing/detectron2/detectron2/data/samplers/distributed_sampler.py @@ -0,0 +1,278 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import logging +import math +from collections import defaultdict +from typing import Optional +import torch +from torch.utils.data.sampler import Sampler + +from detectron2.utils import comm + +logger = logging.getLogger(__name__) + + +class TrainingSampler(Sampler): + """ + In training, we only care about the "infinite stream" of training data. + So this sampler produces an infinite stream of indices and + all workers cooperate to correctly shuffle the indices and sample different indices. + + The samplers in each worker effectively produces `indices[worker_id::num_workers]` + where `indices` is an infinite stream of indices consisting of + `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True) + or `range(size) + range(size) + ...` (if shuffle is False) + + Note that this sampler does not shard based on pytorch DataLoader worker id. + A sampler passed to pytorch DataLoader is used only with map-style dataset + and will not be executed inside workers. + But if this sampler is used in a way that it gets execute inside a dataloader + worker, then extra work needs to be done to shard its outputs based on worker id. + This is required so that workers don't produce identical data. + :class:`ToIterableDataset` implements this logic. + This note is true for all samplers in detectron2. + """ + + def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None): + """ + Args: + size (int): the total number of data of the underlying dataset to sample from + shuffle (bool): whether to shuffle the indices or not + seed (int): the initial seed of the shuffle. Must be the same + across all workers. If None, will use a random seed shared + among workers (require synchronization among all workers). + """ + if not isinstance(size, int): + raise TypeError(f"TrainingSampler(size=) expects an int. Got type {type(size)}.") + if size <= 0: + raise ValueError(f"TrainingSampler(size=) expects a positive int. Got {size}.") + self._size = size + self._shuffle = shuffle + if seed is None: + seed = comm.shared_random_seed() + self._seed = int(seed) + + self._rank = comm.get_rank() + self._world_size = comm.get_world_size() + + def __iter__(self): + start = self._rank + yield from itertools.islice(self._infinite_indices(), start, None, self._world_size) + + def _infinite_indices(self): + g = torch.Generator() + g.manual_seed(self._seed) + while True: + if self._shuffle: + yield from torch.randperm(self._size, generator=g).tolist() + else: + yield from torch.arange(self._size).tolist() + + +class RandomSubsetTrainingSampler(TrainingSampler): + """ + Similar to TrainingSampler, but only sample a random subset of indices. + This is useful when you want to estimate the accuracy vs data-number curves by + training the model with different subset_ratio. + """ + + def __init__( + self, + size: int, + subset_ratio: float, + shuffle: bool = True, + seed_shuffle: Optional[int] = None, + seed_subset: Optional[int] = None, + ): + """ + Args: + size (int): the total number of data of the underlying dataset to sample from + subset_ratio (float): the ratio of subset data to sample from the underlying dataset + shuffle (bool): whether to shuffle the indices or not + seed_shuffle (int): the initial seed of the shuffle. Must be the same + across all workers. If None, will use a random seed shared + among workers (require synchronization among all workers). + seed_subset (int): the seed to randomize the subset to be sampled. + Must be the same across all workers. If None, will use a random seed shared + among workers (require synchronization among all workers). + """ + super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle) + + assert 0.0 < subset_ratio <= 1.0 + self._size_subset = int(size * subset_ratio) + assert self._size_subset > 0 + if seed_subset is None: + seed_subset = comm.shared_random_seed() + self._seed_subset = int(seed_subset) + + # randomly generate the subset indexes to be sampled from + g = torch.Generator() + g.manual_seed(self._seed_subset) + indexes_randperm = torch.randperm(self._size, generator=g) + self._indexes_subset = indexes_randperm[: self._size_subset] + + logger.info("Using RandomSubsetTrainingSampler......") + logger.info(f"Randomly sample {self._size_subset} data from the original {self._size} data") + + def _infinite_indices(self): + g = torch.Generator() + g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__() + while True: + if self._shuffle: + # generate a random permutation to shuffle self._indexes_subset + randperm = torch.randperm(self._size_subset, generator=g) + yield from self._indexes_subset[randperm].tolist() + else: + yield from self._indexes_subset.tolist() + + +class RepeatFactorTrainingSampler(Sampler): + """ + Similar to TrainingSampler, but a sample may appear more times than others based + on its "repeat factor". This is suitable for training on class imbalanced datasets like LVIS. + """ + + def __init__(self, repeat_factors, *, shuffle=True, seed=None): + """ + Args: + repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's + full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``. + shuffle (bool): whether to shuffle the indices or not + seed (int): the initial seed of the shuffle. Must be the same + across all workers. If None, will use a random seed shared + among workers (require synchronization among all workers). + """ + self._shuffle = shuffle + if seed is None: + seed = comm.shared_random_seed() + self._seed = int(seed) + + self._rank = comm.get_rank() + self._world_size = comm.get_world_size() + + # Split into whole number (_int_part) and fractional (_frac_part) parts. + self._int_part = torch.trunc(repeat_factors) + self._frac_part = repeat_factors - self._int_part + + @staticmethod + def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh): + """ + Compute (fractional) per-image repeat factors based on category frequency. + The repeat factor for an image is a function of the frequency of the rarest + category labeled in that image. The "frequency of category c" in [0, 1] is defined + as the fraction of images in the training set (without repeats) in which category c + appears. + See :paper:`lvis` (>= v2) Appendix B.2. + + Args: + dataset_dicts (list[dict]): annotations in Detectron2 dataset format. + repeat_thresh (float): frequency threshold below which data is repeated. + If the frequency is half of `repeat_thresh`, the image will be + repeated twice. + + Returns: + torch.Tensor: + the i-th element is the repeat factor for the dataset image at index i. + """ + # 1. For each category c, compute the fraction of images that contain it: f(c) + category_freq = defaultdict(int) + for dataset_dict in dataset_dicts: # For each image (without repeats) + cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]} + for cat_id in cat_ids: + category_freq[cat_id] += 1 + num_images = len(dataset_dicts) + for k, v in category_freq.items(): + category_freq[k] = v / num_images + + # 2. For each category c, compute the category-level repeat factor: + # r(c) = max(1, sqrt(t / f(c))) + category_rep = { + cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq)) + for cat_id, cat_freq in category_freq.items() + } + + # 3. For each image I, compute the image-level repeat factor: + # r(I) = max_{c in I} r(c) + rep_factors = [] + for dataset_dict in dataset_dicts: + cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]} + rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0) + rep_factors.append(rep_factor) + + return torch.tensor(rep_factors, dtype=torch.float32) + + def _get_epoch_indices(self, generator): + """ + Create a list of dataset indices (with repeats) to use for one epoch. + + Args: + generator (torch.Generator): pseudo random number generator used for + stochastic rounding. + + Returns: + torch.Tensor: list of dataset indices to use in one epoch. Each index + is repeated based on its calculated repeat factor. + """ + # Since repeat factors are fractional, we use stochastic rounding so + # that the target repeat factor is achieved in expectation over the + # course of training + rands = torch.rand(len(self._frac_part), generator=generator) + rep_factors = self._int_part + (rands < self._frac_part).float() + # Construct a list of indices in which we repeat images as specified + indices = [] + for dataset_index, rep_factor in enumerate(rep_factors): + indices.extend([dataset_index] * int(rep_factor.item())) + return torch.tensor(indices, dtype=torch.int64) + + def __iter__(self): + start = self._rank + yield from itertools.islice(self._infinite_indices(), start, None, self._world_size) + + def _infinite_indices(self): + g = torch.Generator() + g.manual_seed(self._seed) + while True: + # Sample indices with repeats determined by stochastic rounding; each + # "epoch" may have a slightly different size due to the rounding. + indices = self._get_epoch_indices(g) + if self._shuffle: + randperm = torch.randperm(len(indices), generator=g) + yield from indices[randperm].tolist() + else: + yield from indices.tolist() + + +class InferenceSampler(Sampler): + """ + Produce indices for inference across all workers. + Inference needs to run on the __exact__ set of samples, + therefore when the total number of samples is not divisible by the number of workers, + this sampler produces different number of samples on different workers. + """ + + def __init__(self, size: int): + """ + Args: + size (int): the total number of data of the underlying dataset to sample from + """ + self._size = size + assert size > 0 + self._rank = comm.get_rank() + self._world_size = comm.get_world_size() + self._local_indices = self._get_local_indices(size, self._world_size, self._rank) + + @staticmethod + def _get_local_indices(total_size, world_size, rank): + shard_size = total_size // world_size + left = total_size % world_size + shard_sizes = [shard_size + int(r < left) for r in range(world_size)] + + begin = sum(shard_sizes[:rank]) + end = min(sum(shard_sizes[: rank + 1]), total_size) + return range(begin, end) + + def __iter__(self): + yield from self._local_indices + + def __len__(self): + return len(self._local_indices) diff --git a/data_processing/detectron2/detectron2/data/samplers/grouped_batch_sampler.py b/data_processing/detectron2/detectron2/data/samplers/grouped_batch_sampler.py new file mode 100644 index 0000000..5b24773 --- /dev/null +++ b/data_processing/detectron2/detectron2/data/samplers/grouped_batch_sampler.py @@ -0,0 +1,47 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from torch.utils.data.sampler import BatchSampler, Sampler + + +class GroupedBatchSampler(BatchSampler): + """ + Wraps another sampler to yield a mini-batch of indices. + It enforces that the batch only contain elements from the same group. + It also tries to provide mini-batches which follows an ordering which is + as close as possible to the ordering from the original sampler. + """ + + def __init__(self, sampler, group_ids, batch_size): + """ + Args: + sampler (Sampler): Base sampler. + group_ids (list[int]): If the sampler produces indices in range [0, N), + `group_ids` must be a list of `N` ints which contains the group id of each sample. + The group ids must be a set of integers in the range [0, num_groups). + batch_size (int): Size of mini-batch. + """ + if not isinstance(sampler, Sampler): + raise ValueError( + "sampler should be an instance of " + "torch.utils.data.Sampler, but got sampler={}".format(sampler) + ) + self.sampler = sampler + self.group_ids = np.asarray(group_ids) + assert self.group_ids.ndim == 1 + self.batch_size = batch_size + groups = np.unique(self.group_ids).tolist() + + # buffer the indices of each group until batch size is reached + self.buffer_per_group = {k: [] for k in groups} + + def __iter__(self): + for idx in self.sampler: + group_id = self.group_ids[idx] + group_buffer = self.buffer_per_group[group_id] + group_buffer.append(idx) + if len(group_buffer) == self.batch_size: + yield group_buffer[:] # yield a copy of the list + del group_buffer[:] + + def __len__(self): + raise NotImplementedError("len() of GroupedBatchSampler is not well-defined.") diff --git a/data_processing/detectron2/detectron2/data/transforms/__init__.py b/data_processing/detectron2/detectron2/data/transforms/__init__.py new file mode 100644 index 0000000..ab3c63b --- /dev/null +++ b/data_processing/detectron2/detectron2/data/transforms/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from fvcore.transforms.transform import Transform, TransformList # order them first +from fvcore.transforms.transform import * +from .transform import * +from .augmentation import * +from .augmentation_impl import * + +__all__ = [k for k in globals().keys() if not k.startswith("_")] + + +from detectron2.utils.env import fixup_module_metadata + +fixup_module_metadata(__name__, globals(), __all__) +del fixup_module_metadata diff --git a/data_processing/detectron2/detectron2/data/transforms/augmentation.py b/data_processing/detectron2/detectron2/data/transforms/augmentation.py new file mode 100644 index 0000000..63dd41a --- /dev/null +++ b/data_processing/detectron2/detectron2/data/transforms/augmentation.py @@ -0,0 +1,380 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import inspect +import numpy as np +import pprint +from typing import Any, List, Optional, Tuple, Union +from fvcore.transforms.transform import Transform, TransformList + +""" +See "Data Augmentation" tutorial for an overview of the system: +https://detectron2.readthedocs.io/tutorials/augmentation.html +""" + + +__all__ = [ + "Augmentation", + "AugmentationList", + "AugInput", + "TransformGen", + "apply_transform_gens", + "StandardAugInput", + "apply_augmentations", +] + + +def _check_img_dtype(img): + assert isinstance(img, np.ndarray), "[Augmentation] Needs an numpy array, but got a {}!".format( + type(img) + ) + assert not isinstance(img.dtype, np.integer) or ( + img.dtype == np.uint8 + ), "[Augmentation] Got image of type {}, use uint8 or floating points instead!".format( + img.dtype + ) + assert img.ndim in [2, 3], img.ndim + + +def _get_aug_input_args(aug, aug_input) -> List[Any]: + """ + Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``. + """ + if aug.input_args is None: + # Decide what attributes are needed automatically + prms = list(inspect.signature(aug.get_transform).parameters.items()) + # The default behavior is: if there is one parameter, then its "image" + # (work automatically for majority of use cases, and also avoid BC breaking), + # Otherwise, use the argument names. + if len(prms) == 1: + names = ("image",) + else: + names = [] + for name, prm in prms: + if prm.kind in ( + inspect.Parameter.VAR_POSITIONAL, + inspect.Parameter.VAR_KEYWORD, + ): + raise TypeError( + f""" \ +The default implementation of `{type(aug)}.__call__` does not allow \ +`{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \ +If arguments are unknown, reimplement `__call__` instead. \ +""" + ) + names.append(name) + aug.input_args = tuple(names) + + args = [] + for f in aug.input_args: + try: + args.append(getattr(aug_input, f)) + except AttributeError as e: + raise AttributeError( + f"{type(aug)}.get_transform needs input attribute '{f}', " + f"but it is not an attribute of {type(aug_input)}!" + ) from e + return args + + +class Augmentation: + """ + Augmentation defines (often random) policies/strategies to generate :class:`Transform` + from data. It is often used for pre-processing of input data. + + A "policy" that generates a :class:`Transform` may, in the most general case, + need arbitrary information from input data in order to determine what transforms + to apply. Therefore, each :class:`Augmentation` instance defines the arguments + needed by its :meth:`get_transform` method. When called with the positional arguments, + the :meth:`get_transform` method executes the policy. + + Note that :class:`Augmentation` defines the policies to create a :class:`Transform`, + but not how to execute the actual transform operations to those data. + Its :meth:`__call__` method will use :meth:`AugInput.transform` to execute the transform. + + The returned `Transform` object is meant to describe deterministic transformation, which means + it can be re-applied on associated data, e.g. the geometry of an image and its segmentation + masks need to be transformed together. + (If such re-application is not needed, then determinism is not a crucial requirement.) + """ + + input_args: Optional[Tuple[str]] = None + """ + Stores the attribute names needed by :meth:`get_transform`, e.g. ``("image", "sem_seg")``. + By default, it is just a tuple of argument names in :meth:`self.get_transform`, which often only + contain "image". As long as the argument name convention is followed, there is no need for + users to touch this attribute. + """ + + def _init(self, params=None): + if params: + for k, v in params.items(): + if k != "self" and not k.startswith("_"): + setattr(self, k, v) + + def get_transform(self, *args) -> Transform: + """ + Execute the policy based on input data, and decide what transform to apply to inputs. + + Args: + args: Any fixed-length positional arguments. By default, the name of the arguments + should exist in the :class:`AugInput` to be used. + + Returns: + Transform: Returns the deterministic transform to apply to the input. + + Examples: + :: + class MyAug: + # if a policy needs to know both image and semantic segmentation + def get_transform(image, sem_seg) -> T.Transform: + pass + tfm: Transform = MyAug().get_transform(image, sem_seg) + new_image = tfm.apply_image(image) + + Notes: + Users can freely use arbitrary new argument names in custom + :meth:`get_transform` method, as long as they are available in the + input data. In detectron2 we use the following convention: + + * image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or + floating point in range [0, 1] or [0, 255]. + * boxes: (N,4) ndarray of float32. It represents the instance bounding boxes + of N instances. Each is in XYXY format in unit of absolute coordinates. + * sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel. + + We do not specify convention for other types and do not include builtin + :class:`Augmentation` that uses other types in detectron2. + """ + raise NotImplementedError + + def __call__(self, aug_input) -> Transform: + """ + Augment the given `aug_input` **in-place**, and return the transform that's used. + + This method will be called to apply the augmentation. In most augmentation, it + is enough to use the default implementation, which calls :meth:`get_transform` + using the inputs. But a subclass can overwrite it to have more complicated logic. + + Args: + aug_input (AugInput): an object that has attributes needed by this augmentation + (defined by ``self.get_transform``). Its ``transform`` method will be called + to in-place transform it. + + Returns: + Transform: the transform that is applied on the input. + """ + args = _get_aug_input_args(self, aug_input) + tfm = self.get_transform(*args) + assert isinstance(tfm, (Transform, TransformList)), ( + f"{type(self)}.get_transform must return an instance of Transform! " + f"Got {type(tfm)} instead." + ) + aug_input.transform(tfm) + return tfm + + def _rand_range(self, low=1.0, high=None, size=None): + """ + Uniform float random number between low and high. + """ + if high is None: + low, high = 0, low + if size is None: + size = [] + return np.random.uniform(low, high, size) + + def __repr__(self): + """ + Produce something like: + "MyAugmentation(field1={self.field1}, field2={self.field2})" + """ + try: + sig = inspect.signature(self.__init__) + classname = type(self).__name__ + argstr = [] + for name, param in sig.parameters.items(): + assert ( + param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD + ), "The default __repr__ doesn't support *args or **kwargs" + assert hasattr(self, name), ( + "Attribute {} not found! " + "Default __repr__ only works if attributes match the constructor.".format(name) + ) + attr = getattr(self, name) + default = param.default + if default is attr: + continue + attr_str = pprint.pformat(attr) + if "\n" in attr_str: + # don't show it if pformat decides to use >1 lines + attr_str = "..." + argstr.append("{}={}".format(name, attr_str)) + return "{}({})".format(classname, ", ".join(argstr)) + except AssertionError: + return super().__repr__() + + __str__ = __repr__ + + +class _TransformToAug(Augmentation): + def __init__(self, tfm: Transform): + self.tfm = tfm + + def get_transform(self, *args): + return self.tfm + + def __repr__(self): + return repr(self.tfm) + + __str__ = __repr__ + + +def _transform_to_aug(tfm_or_aug): + """ + Wrap Transform into Augmentation. + Private, used internally to implement augmentations. + """ + assert isinstance(tfm_or_aug, (Transform, Augmentation)), tfm_or_aug + if isinstance(tfm_or_aug, Augmentation): + return tfm_or_aug + else: + return _TransformToAug(tfm_or_aug) + + +class AugmentationList(Augmentation): + """ + Apply a sequence of augmentations. + + It has ``__call__`` method to apply the augmentations. + + Note that :meth:`get_transform` method is impossible (will throw error if called) + for :class:`AugmentationList`, because in order to apply a sequence of augmentations, + the kth augmentation must be applied first, to provide inputs needed by the (k+1)th + augmentation. + """ + + def __init__(self, augs): + """ + Args: + augs (list[Augmentation or Transform]): + """ + super().__init__() + self.augs = [_transform_to_aug(x) for x in augs] + + def __call__(self, aug_input) -> TransformList: + tfms = [] + for x in self.augs: + tfm = x(aug_input) + tfms.append(tfm) + return TransformList(tfms) + + def __repr__(self): + msgs = [str(x) for x in self.augs] + return "AugmentationList[{}]".format(", ".join(msgs)) + + __str__ = __repr__ + + +class AugInput: + """ + Input that can be used with :meth:`Augmentation.__call__`. + This is a standard implementation for the majority of use cases. + This class provides the standard attributes **"image", "boxes", "sem_seg"** + defined in :meth:`__init__` and they may be needed by different augmentations. + Most augmentation policies do not need attributes beyond these three. + + After applying augmentations to these attributes (using :meth:`AugInput.transform`), + the returned transforms can then be used to transform other data structures that users have. + + Examples: + :: + input = AugInput(image, boxes=boxes) + tfms = augmentation(input) + transformed_image = input.image + transformed_boxes = input.boxes + transformed_other_data = tfms.apply_other(other_data) + + An extended project that works with new data types may implement augmentation policies + that need other inputs. An algorithm may need to transform inputs in a way different + from the standard approach defined in this class. In those rare situations, users can + implement a class similar to this class, that satify the following condition: + + * The input must provide access to these data in the form of attribute access + (``getattr``). For example, if an :class:`Augmentation` to be applied needs "image" + and "sem_seg" arguments, its input must have the attribute "image" and "sem_seg". + * The input must have a ``transform(tfm: Transform) -> None`` method which + in-place transforms all its attributes. + """ + + # TODO maybe should support more builtin data types here + def __init__( + self, + image: np.ndarray, + *, + boxes: Optional[np.ndarray] = None, + sem_seg: Optional[np.ndarray] = None, + ): + """ + Args: + image (ndarray): (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or + floating point in range [0, 1] or [0, 255]. The meaning of C is up + to users. + boxes (ndarray or None): Nx4 float32 boxes in XYXY_ABS mode + sem_seg (ndarray or None): HxW uint8 semantic segmentation mask. Each element + is an integer label of pixel. + """ + _check_img_dtype(image) + self.image = image + self.boxes = boxes + self.sem_seg = sem_seg + + def transform(self, tfm: Transform) -> None: + """ + In-place transform all attributes of this class. + + By "in-place", it means after calling this method, accessing an attribute such + as ``self.image`` will return transformed data. + """ + self.image = tfm.apply_image(self.image) + if self.boxes is not None: + self.boxes = tfm.apply_box(self.boxes) + if self.sem_seg is not None: + self.sem_seg = tfm.apply_segmentation(self.sem_seg) + + def apply_augmentations( + self, augmentations: List[Union[Augmentation, Transform]] + ) -> TransformList: + """ + Equivalent of ``AugmentationList(augmentations)(self)`` + """ + return AugmentationList(augmentations)(self) + + +def apply_augmentations(augmentations: List[Union[Transform, Augmentation]], inputs): + """ + Use ``T.AugmentationList(augmentations)(inputs)`` instead. + """ + if isinstance(inputs, np.ndarray): + # handle the common case of image-only Augmentation, also for backward compatibility + image_only = True + inputs = AugInput(inputs) + else: + image_only = False + tfms = inputs.apply_augmentations(augmentations) + return inputs.image if image_only else inputs, tfms + + +apply_transform_gens = apply_augmentations +""" +Alias for backward-compatibility. +""" + +TransformGen = Augmentation +""" +Alias for Augmentation, since it is something that generates :class:`Transform`s +""" + +StandardAugInput = AugInput +""" +Alias for compatibility. It's not worth the complexity to have two classes. +""" diff --git a/data_processing/detectron2/detectron2/data/transforms/augmentation_impl.py b/data_processing/detectron2/detectron2/data/transforms/augmentation_impl.py new file mode 100644 index 0000000..cc270cd --- /dev/null +++ b/data_processing/detectron2/detectron2/data/transforms/augmentation_impl.py @@ -0,0 +1,736 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Implement many useful :class:`Augmentation`. +""" +import numpy as np +import sys +from numpy import random +from typing import Tuple +import torch +from fvcore.transforms.transform import ( + BlendTransform, + CropTransform, + HFlipTransform, + NoOpTransform, + PadTransform, + Transform, + TransformList, + VFlipTransform, +) +from PIL import Image + +from detectron2.structures import Boxes, pairwise_iou + +from .augmentation import Augmentation, _transform_to_aug +from .transform import ExtentTransform, ResizeTransform, RotationTransform + +__all__ = [ + "FixedSizeCrop", + "RandomApply", + "RandomBrightness", + "RandomContrast", + "RandomCrop", + "RandomExtent", + "RandomFlip", + "RandomSaturation", + "RandomLighting", + "RandomRotation", + "Resize", + "ResizeScale", + "ResizeShortestEdge", + "RandomCrop_CategoryAreaConstraint", + "RandomResize", + "MinIoURandomCrop", +] + + +class RandomApply(Augmentation): + """ + Randomly apply an augmentation with a given probability. + """ + + def __init__(self, tfm_or_aug, prob=0.5): + """ + Args: + tfm_or_aug (Transform, Augmentation): the transform or augmentation + to be applied. It can either be a `Transform` or `Augmentation` + instance. + prob (float): probability between 0.0 and 1.0 that + the wrapper transformation is applied + """ + super().__init__() + self.aug = _transform_to_aug(tfm_or_aug) + assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})" + self.prob = prob + + def get_transform(self, *args): + do = self._rand_range() < self.prob + if do: + return self.aug.get_transform(*args) + else: + return NoOpTransform() + + def __call__(self, aug_input): + do = self._rand_range() < self.prob + if do: + return self.aug(aug_input) + else: + return NoOpTransform() + + +class RandomFlip(Augmentation): + """ + Flip the image horizontally or vertically with the given probability. + """ + + def __init__(self, prob=0.5, *, horizontal=True, vertical=False): + """ + Args: + prob (float): probability of flip. + horizontal (boolean): whether to apply horizontal flipping + vertical (boolean): whether to apply vertical flipping + """ + super().__init__() + + if horizontal and vertical: + raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.") + if not horizontal and not vertical: + raise ValueError("At least one of horiz or vert has to be True!") + self._init(locals()) + + def get_transform(self, image): + h, w = image.shape[:2] + do = self._rand_range() < self.prob + if do: + if self.horizontal: + return HFlipTransform(w) + elif self.vertical: + return VFlipTransform(h) + else: + return NoOpTransform() + + +class Resize(Augmentation): + """Resize image to a fixed target size""" + + def __init__(self, shape, interp=Image.BILINEAR): + """ + Args: + shape: (h, w) tuple or a int + interp: PIL interpolation method + """ + if isinstance(shape, int): + shape = (shape, shape) + shape = tuple(shape) + self._init(locals()) + + def get_transform(self, image): + return ResizeTransform( + image.shape[0], image.shape[1], self.shape[0], self.shape[1], self.interp + ) + + +class ResizeShortestEdge(Augmentation): + """ + Resize the image while keeping the aspect ratio unchanged. + It attempts to scale the shorter edge to the given `short_edge_length`, + as long as the longer edge does not exceed `max_size`. + If `max_size` is reached, then downscale so that the longer edge does not exceed max_size. + """ + + @torch.jit.unused + def __init__( + self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR + ): + """ + Args: + short_edge_length (list[int]): If ``sample_style=="range"``, + a [min, max] interval from which to sample the shortest edge length. + If ``sample_style=="choice"``, a list of shortest edge lengths to sample from. + max_size (int): maximum allowed longest edge length. + sample_style (str): either "range" or "choice". + """ + super().__init__() + assert sample_style in ["range", "choice"], sample_style + + self.is_range = sample_style == "range" + if isinstance(short_edge_length, int): + short_edge_length = (short_edge_length, short_edge_length) + if self.is_range: + assert len(short_edge_length) == 2, ( + "short_edge_length must be two values using 'range' sample style." + f" Got {short_edge_length}!" + ) + self._init(locals()) + + @torch.jit.unused + def get_transform(self, image): + h, w = image.shape[:2] + if self.is_range: + size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1) + else: + size = np.random.choice(self.short_edge_length) + if size == 0: + return NoOpTransform() + + newh, neww = ResizeShortestEdge.get_output_shape(h, w, size, self.max_size) + return ResizeTransform(h, w, newh, neww, self.interp) + + @staticmethod + def get_output_shape( + oldh: int, oldw: int, short_edge_length: int, max_size: int + ) -> Tuple[int, int]: + """ + Compute the output size given input size and target short edge length. + """ + h, w = oldh, oldw + size = short_edge_length * 1.0 + scale = size / min(h, w) + if h < w: + newh, neww = size, scale * w + else: + newh, neww = scale * h, size + if max(newh, neww) > max_size: + scale = max_size * 1.0 / max(newh, neww) + newh = newh * scale + neww = neww * scale + neww = int(neww + 0.5) + newh = int(newh + 0.5) + return (newh, neww) + + +class ResizeScale(Augmentation): + """ + Takes target size as input and randomly scales the given target size between `min_scale` + and `max_scale`. It then scales the input image such that it fits inside the scaled target + box, keeping the aspect ratio constant. + This implements the resize part of the Google's 'resize_and_crop' data augmentation: + https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/input_utils.py#L127 + """ + + def __init__( + self, + min_scale: float, + max_scale: float, + target_height: int, + target_width: int, + interp: int = Image.BILINEAR, + ): + """ + Args: + min_scale: minimum image scale range. + max_scale: maximum image scale range. + target_height: target image height. + target_width: target image width. + interp: image interpolation method. + """ + super().__init__() + self._init(locals()) + + def _get_resize(self, image: np.ndarray, scale: float) -> Transform: + input_size = image.shape[:2] + + # Compute new target size given a scale. + target_size = (self.target_height, self.target_width) + target_scale_size = np.multiply(target_size, scale) + + # Compute actual rescaling applied to input image and output size. + output_scale = np.minimum( + target_scale_size[0] / input_size[0], target_scale_size[1] / input_size[1] + ) + output_size = np.round(np.multiply(input_size, output_scale)).astype(int) + + return ResizeTransform( + input_size[0], input_size[1], output_size[0], output_size[1], self.interp + ) + + def get_transform(self, image: np.ndarray) -> Transform: + random_scale = np.random.uniform(self.min_scale, self.max_scale) + return self._get_resize(image, random_scale) + + +class RandomRotation(Augmentation): + """ + This method returns a copy of this image, rotated the given + number of degrees counter clockwise around the given center. + """ + + def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None): + """ + Args: + angle (list[float]): If ``sample_style=="range"``, + a [min, max] interval from which to sample the angle (in degrees). + If ``sample_style=="choice"``, a list of angles to sample from + expand (bool): choose if the image should be resized to fit the whole + rotated image (default), or simply cropped + center (list[[float, float]]): If ``sample_style=="range"``, + a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center, + [0, 0] being the top left of the image and [1, 1] the bottom right. + If ``sample_style=="choice"``, a list of centers to sample from + Default: None, which means that the center of rotation is the center of the image + center has no effect if expand=True because it only affects shifting + """ + super().__init__() + assert sample_style in ["range", "choice"], sample_style + self.is_range = sample_style == "range" + if isinstance(angle, (float, int)): + angle = (angle, angle) + if center is not None and isinstance(center[0], (float, int)): + center = (center, center) + self._init(locals()) + + def get_transform(self, image): + h, w = image.shape[:2] + center = None + if self.is_range: + angle = np.random.uniform(self.angle[0], self.angle[1]) + if self.center is not None: + center = ( + np.random.uniform(self.center[0][0], self.center[1][0]), + np.random.uniform(self.center[0][1], self.center[1][1]), + ) + else: + angle = np.random.choice(self.angle) + if self.center is not None: + center = np.random.choice(self.center) + + if center is not None: + center = (w * center[0], h * center[1]) # Convert to absolute coordinates + + if angle % 360 == 0: + return NoOpTransform() + + return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp) + + +class FixedSizeCrop(Augmentation): + """ + If `crop_size` is smaller than the input image size, then it uses a random crop of + the crop size. If `crop_size` is larger than the input image size, then it pads + the right and the bottom of the image to the crop size if `pad` is True, otherwise + it returns the smaller image. + """ + + def __init__( + self, + crop_size: Tuple[int], + pad: bool = True, + pad_value: float = 128.0, + seg_pad_value: int = 255, + ): + """ + Args: + crop_size: target image (height, width). + pad: if True, will pad images smaller than `crop_size` up to `crop_size` + pad_value: the padding value to the image. + seg_pad_value: the padding value to the segmentation mask. + """ + super().__init__() + self._init(locals()) + + def _get_crop(self, image: np.ndarray) -> Transform: + # Compute the image scale and scaled size. + input_size = image.shape[:2] + output_size = self.crop_size + + # Add random crop if the image is scaled up. + max_offset = np.subtract(input_size, output_size) + max_offset = np.maximum(max_offset, 0) + offset = np.multiply(max_offset, np.random.uniform(0.0, 1.0)) + offset = np.round(offset).astype(int) + return CropTransform( + offset[1], offset[0], output_size[1], output_size[0], input_size[1], input_size[0] + ) + + def _get_pad(self, image: np.ndarray) -> Transform: + # Compute the image scale and scaled size. + input_size = image.shape[:2] + output_size = self.crop_size + + # Add padding if the image is scaled down. + pad_size = np.subtract(output_size, input_size) + pad_size = np.maximum(pad_size, 0) + original_size = np.minimum(input_size, output_size) + return PadTransform( + 0, + 0, + pad_size[1], + pad_size[0], + original_size[1], + original_size[0], + self.pad_value, + self.seg_pad_value, + ) + + def get_transform(self, image: np.ndarray) -> TransformList: + transforms = [self._get_crop(image)] + if self.pad: + transforms.append(self._get_pad(image)) + return TransformList(transforms) + + +class RandomCrop(Augmentation): + """ + Randomly crop a rectangle region out of an image. + """ + + def __init__(self, crop_type: str, crop_size): + """ + Args: + crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range". + crop_size (tuple[float, float]): two floats, explained below. + + - "relative": crop a (H * crop_size[0], W * crop_size[1]) region from an input image of + size (H, W). crop size should be in (0, 1] + - "relative_range": uniformly sample two values from [crop_size[0], 1] + and [crop_size[1]], 1], and use them as in "relative" crop type. + - "absolute" crop a (crop_size[0], crop_size[1]) region from input image. + crop_size must be smaller than the input image size. + - "absolute_range", for an input of size (H, W), uniformly sample H_crop in + [crop_size[0], min(H, crop_size[1])] and W_crop in [crop_size[0], min(W, crop_size[1])]. + Then crop a region (H_crop, W_crop). + """ + # TODO style of relative_range and absolute_range are not consistent: + # one takes (h, w) but another takes (min, max) + super().__init__() + assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"] + self._init(locals()) + + def get_transform(self, image): + h, w = image.shape[:2] + croph, cropw = self.get_crop_size((h, w)) + assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self) + h0 = np.random.randint(h - croph + 1) + w0 = np.random.randint(w - cropw + 1) + return CropTransform(w0, h0, cropw, croph) + + def get_crop_size(self, image_size): + """ + Args: + image_size (tuple): height, width + + Returns: + crop_size (tuple): height, width in absolute pixels + """ + h, w = image_size + if self.crop_type == "relative": + ch, cw = self.crop_size + return int(h * ch + 0.5), int(w * cw + 0.5) + elif self.crop_type == "relative_range": + crop_size = np.asarray(self.crop_size, dtype=np.float32) + ch, cw = crop_size + np.random.rand(2) * (1 - crop_size) + return int(h * ch + 0.5), int(w * cw + 0.5) + elif self.crop_type == "absolute": + return (min(self.crop_size[0], h), min(self.crop_size[1], w)) + elif self.crop_type == "absolute_range": + assert self.crop_size[0] <= self.crop_size[1] + ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1) + cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1) + return ch, cw + else: + raise NotImplementedError("Unknown crop type {}".format(self.crop_type)) + + +class RandomCrop_CategoryAreaConstraint(Augmentation): + """ + Similar to :class:`RandomCrop`, but find a cropping window such that no single category + occupies a ratio of more than `single_category_max_area` in semantic segmentation ground + truth, which can cause unstability in training. The function attempts to find such a valid + cropping window for at most 10 times. + """ + + def __init__( + self, + crop_type: str, + crop_size, + single_category_max_area: float = 1.0, + ignored_category: int = None, + ): + """ + Args: + crop_type, crop_size: same as in :class:`RandomCrop` + single_category_max_area: the maximum allowed area ratio of a + category. Set to 1.0 to disable + ignored_category: allow this category in the semantic segmentation + ground truth to exceed the area ratio. Usually set to the category + that's ignored in training. + """ + self.crop_aug = RandomCrop(crop_type, crop_size) + self._init(locals()) + + def get_transform(self, image, sem_seg): + if self.single_category_max_area >= 1.0: + return self.crop_aug.get_transform(image) + else: + h, w = sem_seg.shape + for _ in range(10): + crop_size = self.crop_aug.get_crop_size((h, w)) + y0 = np.random.randint(h - crop_size[0] + 1) + x0 = np.random.randint(w - crop_size[1] + 1) + sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]] + labels, cnt = np.unique(sem_seg_temp, return_counts=True) + if self.ignored_category is not None: + cnt = cnt[labels != self.ignored_category] + if len(cnt) > 1 and np.max(cnt) < np.sum(cnt) * self.single_category_max_area: + break + crop_tfm = CropTransform(x0, y0, crop_size[1], crop_size[0]) + return crop_tfm + + +class RandomExtent(Augmentation): + """ + Outputs an image by cropping a random "subrect" of the source image. + + The subrect can be parameterized to include pixels outside the source image, + in which case they will be set to zeros (i.e. black). The size of the output + image will vary with the size of the random subrect. + """ + + def __init__(self, scale_range, shift_range): + """ + Args: + output_size (h, w): Dimensions of output image + scale_range (l, h): Range of input-to-output size scaling factor + shift_range (x, y): Range of shifts of the cropped subrect. The rect + is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)], + where (w, h) is the (width, height) of the input image. Set each + component to zero to crop at the image's center. + """ + super().__init__() + self._init(locals()) + + def get_transform(self, image): + img_h, img_w = image.shape[:2] + + # Initialize src_rect to fit the input image. + src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h]) + + # Apply a random scaling to the src_rect. + src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1]) + + # Apply a random shift to the coordinates origin. + src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5) + src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5) + + # Map src_rect coordinates into image coordinates (center at corner). + src_rect[0::2] += 0.5 * img_w + src_rect[1::2] += 0.5 * img_h + + return ExtentTransform( + src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]), + output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])), + ) + + +class RandomContrast(Augmentation): + """ + Randomly transforms image contrast. + + Contrast intensity is uniformly sampled in (intensity_min, intensity_max). + - intensity < 1 will reduce contrast + - intensity = 1 will preserve the input image + - intensity > 1 will increase contrast + + See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html + """ + + def __init__(self, intensity_min, intensity_max): + """ + Args: + intensity_min (float): Minimum augmentation + intensity_max (float): Maximum augmentation + """ + super().__init__() + self._init(locals()) + + def get_transform(self, image): + w = np.random.uniform(self.intensity_min, self.intensity_max) + return BlendTransform(src_image=image.mean(), src_weight=1 - w, dst_weight=w) + + +class RandomBrightness(Augmentation): + """ + Randomly transforms image brightness. + + Brightness intensity is uniformly sampled in (intensity_min, intensity_max). + - intensity < 1 will reduce brightness + - intensity = 1 will preserve the input image + - intensity > 1 will increase brightness + + See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html + """ + + def __init__(self, intensity_min, intensity_max): + """ + Args: + intensity_min (float): Minimum augmentation + intensity_max (float): Maximum augmentation + """ + super().__init__() + self._init(locals()) + + def get_transform(self, image): + w = np.random.uniform(self.intensity_min, self.intensity_max) + return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w) + + +class RandomSaturation(Augmentation): + """ + Randomly transforms saturation of an RGB image. + Input images are assumed to have 'RGB' channel order. + + Saturation intensity is uniformly sampled in (intensity_min, intensity_max). + - intensity < 1 will reduce saturation (make the image more grayscale) + - intensity = 1 will preserve the input image + - intensity > 1 will increase saturation + + See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html + """ + + def __init__(self, intensity_min, intensity_max): + """ + Args: + intensity_min (float): Minimum augmentation (1 preserves input). + intensity_max (float): Maximum augmentation (1 preserves input). + """ + super().__init__() + self._init(locals()) + + def get_transform(self, image): + assert image.shape[-1] == 3, "RandomSaturation only works on RGB images" + w = np.random.uniform(self.intensity_min, self.intensity_max) + grayscale = image.dot([0.299, 0.587, 0.114])[:, :, np.newaxis] + return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w) + + +class RandomLighting(Augmentation): + """ + The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet. + Input images are assumed to have 'RGB' channel order. + + The degree of color jittering is randomly sampled via a normal distribution, + with standard deviation given by the scale parameter. + """ + + def __init__(self, scale): + """ + Args: + scale (float): Standard deviation of principal component weighting. + """ + super().__init__() + self._init(locals()) + self.eigen_vecs = np.array( + [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]] + ) + self.eigen_vals = np.array([0.2175, 0.0188, 0.0045]) + + def get_transform(self, image): + assert image.shape[-1] == 3, "RandomLighting only works on RGB images" + weights = np.random.normal(scale=self.scale, size=3) + return BlendTransform( + src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0 + ) + + +class RandomResize(Augmentation): + """Randomly resize image to a target size in shape_list""" + + def __init__(self, shape_list, interp=Image.BILINEAR): + """ + Args: + shape_list: a list of shapes in (h, w) + interp: PIL interpolation method + """ + self.shape_list = shape_list + self._init(locals()) + + def get_transform(self, image): + shape_idx = np.random.randint(low=0, high=len(self.shape_list)) + h, w = self.shape_list[shape_idx] + return ResizeTransform(image.shape[0], image.shape[1], h, w, self.interp) + + +class MinIoURandomCrop(Augmentation): + """Random crop the image & bboxes, the cropped patches have minimum IoU + requirement with original image & bboxes, the IoU threshold is randomly + selected from min_ious. + + Args: + min_ious (tuple): minimum IoU threshold for all intersections with + bounding boxes + min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, + where a >= min_crop_size) + mode_trials: number of trials for sampling min_ious threshold + crop_trials: number of trials for sampling crop_size after cropping + """ + + def __init__( + self, + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3, + mode_trials=1000, + crop_trials=50, + ): + self.min_ious = min_ious + self.sample_mode = (1, *min_ious, 0) + self.min_crop_size = min_crop_size + self.mode_trials = mode_trials + self.crop_trials = crop_trials + + def get_transform(self, image, boxes): + """Call function to crop images and bounding boxes with minimum IoU + constraint. + + Args: + boxes: ground truth boxes in (x1, y1, x2, y2) format + """ + if boxes is None: + return NoOpTransform() + h, w, c = image.shape + for _ in range(self.mode_trials): + mode = random.choice(self.sample_mode) + self.mode = mode + if mode == 1: + return NoOpTransform() + + min_iou = mode + for _ in range(self.crop_trials): + new_w = random.uniform(self.min_crop_size * w, w) + new_h = random.uniform(self.min_crop_size * h, h) + + # h / w in [0.5, 2] + if new_h / new_w < 0.5 or new_h / new_w > 2: + continue + + left = random.uniform(w - new_w) + top = random.uniform(h - new_h) + + patch = np.array((int(left), int(top), int(left + new_w), int(top + new_h))) + # Line or point crop is not allowed + if patch[2] == patch[0] or patch[3] == patch[1]: + continue + overlaps = pairwise_iou( + Boxes(patch.reshape(-1, 4)), Boxes(boxes.reshape(-1, 4)) + ).reshape(-1) + if len(overlaps) > 0 and overlaps.min() < min_iou: + continue + + # center of boxes should inside the crop img + # only adjust boxes and instance masks when the gt is not empty + if len(overlaps) > 0: + # adjust boxes + def is_center_of_bboxes_in_patch(boxes, patch): + center = (boxes[:, :2] + boxes[:, 2:]) / 2 + mask = ( + (center[:, 0] > patch[0]) + * (center[:, 1] > patch[1]) + * (center[:, 0] < patch[2]) + * (center[:, 1] < patch[3]) + ) + return mask + + mask = is_center_of_bboxes_in_patch(boxes, patch) + if not mask.any(): + continue + return CropTransform(int(left), int(top), int(new_w), int(new_h)) diff --git a/data_processing/detectron2/detectron2/data/transforms/transform.py b/data_processing/detectron2/detectron2/data/transforms/transform.py new file mode 100644 index 0000000..de44b99 --- /dev/null +++ b/data_processing/detectron2/detectron2/data/transforms/transform.py @@ -0,0 +1,351 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +See "Data Augmentation" tutorial for an overview of the system: +https://detectron2.readthedocs.io/tutorials/augmentation.html +""" + +import numpy as np +import torch +import torch.nn.functional as F +from fvcore.transforms.transform import ( + CropTransform, + HFlipTransform, + NoOpTransform, + Transform, + TransformList, +) +from PIL import Image + +try: + import cv2 # noqa +except ImportError: + # OpenCV is an optional dependency at the moment + pass + +__all__ = [ + "ExtentTransform", + "ResizeTransform", + "RotationTransform", + "ColorTransform", + "PILColorTransform", +] + + +class ExtentTransform(Transform): + """ + Extracts a subregion from the source image and scales it to the output size. + + The fill color is used to map pixels from the source rect that fall outside + the source image. + + See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform + """ + + def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0): + """ + Args: + src_rect (x0, y0, x1, y1): src coordinates + output_size (h, w): dst image size + interp: PIL interpolation methods + fill: Fill color used when src_rect extends outside image + """ + super().__init__() + self._set_attributes(locals()) + + def apply_image(self, img, interp=None): + h, w = self.output_size + if len(img.shape) > 2 and img.shape[2] == 1: + pil_image = Image.fromarray(img[:, :, 0], mode="L") + else: + pil_image = Image.fromarray(img) + pil_image = pil_image.transform( + size=(w, h), + method=Image.EXTENT, + data=self.src_rect, + resample=interp if interp else self.interp, + fill=self.fill, + ) + ret = np.asarray(pil_image) + if len(img.shape) > 2 and img.shape[2] == 1: + ret = np.expand_dims(ret, -1) + return ret + + def apply_coords(self, coords): + # Transform image center from source coordinates into output coordinates + # and then map the new origin to the corner of the output image. + h, w = self.output_size + x0, y0, x1, y1 = self.src_rect + new_coords = coords.astype(np.float32) + new_coords[:, 0] -= 0.5 * (x0 + x1) + new_coords[:, 1] -= 0.5 * (y0 + y1) + new_coords[:, 0] *= w / (x1 - x0) + new_coords[:, 1] *= h / (y1 - y0) + new_coords[:, 0] += 0.5 * w + new_coords[:, 1] += 0.5 * h + return new_coords + + def apply_segmentation(self, segmentation): + segmentation = self.apply_image(segmentation, interp=Image.NEAREST) + return segmentation + + +class ResizeTransform(Transform): + """ + Resize the image to a target size. + """ + + def __init__(self, h, w, new_h, new_w, interp=None): + """ + Args: + h, w (int): original image size + new_h, new_w (int): new image size + interp: PIL interpolation methods, defaults to bilinear. + """ + # TODO decide on PIL vs opencv + super().__init__() + if interp is None: + interp = Image.BILINEAR + self._set_attributes(locals()) + + def apply_image(self, img, interp=None): + assert img.shape[:2] == (self.h, self.w) + assert len(img.shape) <= 4 + interp_method = interp if interp is not None else self.interp + + if img.dtype == np.uint8: + if len(img.shape) > 2 and img.shape[2] == 1: + pil_image = Image.fromarray(img[:, :, 0], mode="L") + else: + pil_image = Image.fromarray(img) + pil_image = pil_image.resize((self.new_w, self.new_h), interp_method) + ret = np.asarray(pil_image) + if len(img.shape) > 2 and img.shape[2] == 1: + ret = np.expand_dims(ret, -1) + else: + # PIL only supports uint8 + if any(x < 0 for x in img.strides): + img = np.ascontiguousarray(img) + img = torch.from_numpy(img) + shape = list(img.shape) + shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:] + img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw + _PIL_RESIZE_TO_INTERPOLATE_MODE = { + Image.NEAREST: "nearest", + Image.BILINEAR: "bilinear", + Image.BICUBIC: "bicubic", + } + mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[interp_method] + align_corners = None if mode == "nearest" else False + img = F.interpolate( + img, (self.new_h, self.new_w), mode=mode, align_corners=align_corners + ) + shape[:2] = (self.new_h, self.new_w) + ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c) + + return ret + + def apply_coords(self, coords): + coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w) + coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h) + return coords + + def apply_segmentation(self, segmentation): + segmentation = self.apply_image(segmentation, interp=Image.NEAREST) + return segmentation + + def inverse(self): + return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp) + + +class RotationTransform(Transform): + """ + This method returns a copy of this image, rotated the given + number of degrees counter clockwise around its center. + """ + + def __init__(self, h, w, angle, expand=True, center=None, interp=None): + """ + Args: + h, w (int): original image size + angle (float): degrees for rotation + expand (bool): choose if the image should be resized to fit the whole + rotated image (default), or simply cropped + center (tuple (width, height)): coordinates of the rotation center + if left to None, the center will be fit to the center of each image + center has no effect if expand=True because it only affects shifting + interp: cv2 interpolation method, default cv2.INTER_LINEAR + """ + super().__init__() + image_center = np.array((w / 2, h / 2)) + if center is None: + center = image_center + if interp is None: + interp = cv2.INTER_LINEAR + abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle)))) + if expand: + # find the new width and height bounds + bound_w, bound_h = np.rint( + [h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin] + ).astype(int) + else: + bound_w, bound_h = w, h + + self._set_attributes(locals()) + self.rm_coords = self.create_rotation_matrix() + # Needed because of this problem https://github.com/opencv/opencv/issues/11784 + self.rm_image = self.create_rotation_matrix(offset=-0.5) + + def apply_image(self, img, interp=None): + """ + img should be a numpy array, formatted as Height * Width * Nchannels + """ + if len(img) == 0 or self.angle % 360 == 0: + return img + assert img.shape[:2] == (self.h, self.w) + interp = interp if interp is not None else self.interp + return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp) + + def apply_coords(self, coords): + """ + coords should be a N * 2 array-like, containing N couples of (x, y) points + """ + coords = np.asarray(coords, dtype=float) + if len(coords) == 0 or self.angle % 360 == 0: + return coords + return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :] + + def apply_segmentation(self, segmentation): + segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST) + return segmentation + + def create_rotation_matrix(self, offset=0): + center = (self.center[0] + offset, self.center[1] + offset) + rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1) + if self.expand: + # Find the coordinates of the center of rotation in the new image + # The only point for which we know the future coordinates is the center of the image + rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :] + new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center + # shift the rotation center to the new coordinates + rm[:, 2] += new_center + return rm + + def inverse(self): + """ + The inverse is to rotate it back with expand, and crop to get the original shape. + """ + if not self.expand: # Not possible to inverse if a part of the image is lost + raise NotImplementedError() + rotation = RotationTransform( + self.bound_h, self.bound_w, -self.angle, True, None, self.interp + ) + crop = CropTransform( + (rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h + ) + return TransformList([rotation, crop]) + + +class ColorTransform(Transform): + """ + Generic wrapper for any photometric transforms. + These transformations should only affect the color space and + not the coordinate space of the image (e.g. annotation + coordinates such as bounding boxes should not be changed) + """ + + def __init__(self, op): + """ + Args: + op (Callable): operation to be applied to the image, + which takes in an ndarray and returns an ndarray. + """ + if not callable(op): + raise ValueError("op parameter should be callable") + super().__init__() + self._set_attributes(locals()) + + def apply_image(self, img): + return self.op(img) + + def apply_coords(self, coords): + return coords + + def inverse(self): + return NoOpTransform() + + def apply_segmentation(self, segmentation): + return segmentation + + +class PILColorTransform(ColorTransform): + """ + Generic wrapper for PIL Photometric image transforms, + which affect the color space and not the coordinate + space of the image + """ + + def __init__(self, op): + """ + Args: + op (Callable): operation to be applied to the image, + which takes in a PIL Image and returns a transformed + PIL Image. + For reference on possible operations see: + - https://pillow.readthedocs.io/en/stable/ + """ + if not callable(op): + raise ValueError("op parameter should be callable") + super().__init__(op) + + def apply_image(self, img): + img = Image.fromarray(img) + return np.asarray(super().apply_image(img)) + + +def HFlip_rotated_box(transform, rotated_boxes): + """ + Apply the horizontal flip transform on rotated boxes. + + Args: + rotated_boxes (ndarray): Nx5 floating point array of + (x_center, y_center, width, height, angle_degrees) format + in absolute coordinates. + """ + # Transform x_center + rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0] + # Transform angle + rotated_boxes[:, 4] = -rotated_boxes[:, 4] + return rotated_boxes + + +def Resize_rotated_box(transform, rotated_boxes): + """ + Apply the resizing transform on rotated boxes. For details of how these (approximation) + formulas are derived, please refer to :meth:`RotatedBoxes.scale`. + + Args: + rotated_boxes (ndarray): Nx5 floating point array of + (x_center, y_center, width, height, angle_degrees) format + in absolute coordinates. + """ + scale_factor_x = transform.new_w * 1.0 / transform.w + scale_factor_y = transform.new_h * 1.0 / transform.h + rotated_boxes[:, 0] *= scale_factor_x + rotated_boxes[:, 1] *= scale_factor_y + theta = rotated_boxes[:, 4] * np.pi / 180.0 + c = np.cos(theta) + s = np.sin(theta) + rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s)) + rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c)) + rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi + + return rotated_boxes + + +HFlipTransform.register_type("rotated_box", HFlip_rotated_box) +ResizeTransform.register_type("rotated_box", Resize_rotated_box) + +# not necessary any more with latest fvcore +NoOpTransform.register_type("rotated_box", lambda t, x: x) diff --git a/data_processing/detectron2/detectron2/engine/__init__.py b/data_processing/detectron2/detectron2/engine/__init__.py new file mode 100644 index 0000000..08a6157 --- /dev/null +++ b/data_processing/detectron2/detectron2/engine/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .launch import * +from .train_loop import * + +__all__ = [k for k in globals().keys() if not k.startswith("_")] + + +# prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) +# but still make them available here +from .hooks import * +from .defaults import * diff --git a/data_processing/detectron2/detectron2/engine/defaults.py b/data_processing/detectron2/detectron2/engine/defaults.py new file mode 100644 index 0000000..5b95257 --- /dev/null +++ b/data_processing/detectron2/detectron2/engine/defaults.py @@ -0,0 +1,715 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +This file contains components with some default boilerplate logic user may need +in training / testing. They will not work for everyone, but many users may find them useful. + +The behavior of functions/classes in this file is subject to change, +since they are meant to represent the "common default behavior" people need in their projects. +""" + +import argparse +import logging +import os +import sys +import weakref +from collections import OrderedDict +from typing import Optional +import torch +from fvcore.nn.precise_bn import get_bn_modules +from omegaconf import OmegaConf +from torch.nn.parallel import DistributedDataParallel + +import detectron2.data.transforms as T +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import CfgNode, LazyConfig +from detectron2.data import ( + MetadataCatalog, + build_detection_test_loader, + build_detection_train_loader, +) +from detectron2.evaluation import ( + DatasetEvaluator, + inference_on_dataset, + print_csv_format, + verify_results, +) +from detectron2.modeling import build_model +from detectron2.solver import build_lr_scheduler, build_optimizer +from detectron2.utils import comm +from detectron2.utils.collect_env import collect_env_info +from detectron2.utils.env import seed_all_rng +from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter +from detectron2.utils.file_io import PathManager +from detectron2.utils.logger import setup_logger + +from . import hooks +from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase + +__all__ = [ + "create_ddp_model", + "default_argument_parser", + "default_setup", + "default_writers", + "DefaultPredictor", + "DefaultTrainer", +] + + +def create_ddp_model(model, *, fp16_compression=False, **kwargs): + """ + Create a DistributedDataParallel model if there are >1 processes. + + Args: + model: a torch.nn.Module + fp16_compression: add fp16 compression hooks to the ddp object. + See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook + kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`. + """ # noqa + if comm.get_world_size() == 1: + return model + if "device_ids" not in kwargs: + kwargs["device_ids"] = [comm.get_local_rank()] + ddp = DistributedDataParallel(model, **kwargs) + if fp16_compression: + from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks + + ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook) + return ddp + + +def default_argument_parser(epilog=None): + """ + Create a parser with some common arguments used by detectron2 users. + + Args: + epilog (str): epilog passed to ArgumentParser describing the usage. + + Returns: + argparse.ArgumentParser: + """ + parser = argparse.ArgumentParser( + epilog=epilog + or f""" +Examples: + +Run on single machine: + $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml + +Change some config options: + $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001 + +Run on multiple machines: + (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url [--other-flags] + (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url [--other-flags] +""", + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") + parser.add_argument( + "--resume", + action="store_true", + help="Whether to attempt to resume from the checkpoint directory. " + "See documentation of `DefaultTrainer.resume_or_load()` for what it means.", + ) + parser.add_argument("--eval-only", action="store_true", help="perform evaluation only") + parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*") + parser.add_argument("--num-machines", type=int, default=1, help="total number of machines") + parser.add_argument( + "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)" + ) + + # PyTorch still may leave orphan processes in multi-gpu training. + # Therefore we use a deterministic way to obtain port, + # so that users are aware of orphan processes by seeing the port occupied. + port = 2**15 + 2**14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2**14 + parser.add_argument( + "--dist-url", + default="tcp://127.0.0.1:{}".format(port), + help="initialization URL for pytorch distributed backend. See " + "https://pytorch.org/docs/stable/distributed.html for details.", + ) + parser.add_argument( + "opts", + help=""" +Modify config options at the end of the command. For Yacs configs, use +space-separated "PATH.KEY VALUE" pairs. +For python-based LazyConfig, use "path.key=value". + """.strip(), + default=None, + nargs=argparse.REMAINDER, + ) + return parser + + +def _try_get_key(cfg, *keys, default=None): + """ + Try select keys from cfg until the first key that exists. Otherwise return default. + """ + if isinstance(cfg, CfgNode): + cfg = OmegaConf.create(cfg.dump()) + for k in keys: + none = object() + p = OmegaConf.select(cfg, k, default=none) + if p is not none: + return p + return default + + +def _highlight(code, filename): + try: + import pygments + except ImportError: + return code + + from pygments.lexers import Python3Lexer, YamlLexer + from pygments.formatters import Terminal256Formatter + + lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer() + code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai")) + return code + + +def default_setup(cfg, args): + """ + Perform some basic common setups at the beginning of a job, including: + + 1. Set up the detectron2 logger + 2. Log basic information about environment, cmdline arguments, and config + 3. Backup the config to the output directory + + Args: + cfg (CfgNode or omegaconf.DictConfig): the full config to be used + args (argparse.NameSpace): the command line arguments to be logged + """ + output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir") + if comm.is_main_process() and output_dir: + PathManager.mkdirs(output_dir) + + rank = comm.get_rank() + setup_logger(output_dir, distributed_rank=rank, name="fvcore") + logger = setup_logger(output_dir, distributed_rank=rank) + + logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size())) + logger.info("Environment info:\n" + collect_env_info()) + + logger.info("Command line arguments: " + str(args)) + if hasattr(args, "config_file") and args.config_file != "": + logger.info( + "Contents of args.config_file={}:\n{}".format( + args.config_file, + _highlight(PathManager.open(args.config_file, "r").read(), args.config_file), + ) + ) + + if comm.is_main_process() and output_dir: + # Note: some of our scripts may expect the existence of + # config.yaml in output directory + path = os.path.join(output_dir, "config.yaml") + if isinstance(cfg, CfgNode): + logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml"))) + with PathManager.open(path, "w") as f: + f.write(cfg.dump()) + else: + LazyConfig.save(cfg, path) + logger.info("Full config saved to {}".format(path)) + + # make sure each worker has a different, yet deterministic seed if specified + seed = _try_get_key(cfg, "SEED", "train.seed", default=-1) + seed_all_rng(None if seed < 0 else seed + rank) + + # cudnn benchmark has large overhead. It shouldn't be used considering the small size of + # typical validation set. + if not (hasattr(args, "eval_only") and args.eval_only): + torch.backends.cudnn.benchmark = _try_get_key( + cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False + ) + + +def default_writers(output_dir: str, max_iter: Optional[int] = None): + """ + Build a list of :class:`EventWriter` to be used. + It now consists of a :class:`CommonMetricPrinter`, + :class:`TensorboardXWriter` and :class:`JSONWriter`. + + Args: + output_dir: directory to store JSON metrics and tensorboard events + max_iter: the total number of iterations + + Returns: + list[EventWriter]: a list of :class:`EventWriter` objects. + """ + PathManager.mkdirs(output_dir) + return [ + # It may not always print what you want to see, since it prints "common" metrics only. + CommonMetricPrinter(max_iter), + JSONWriter(os.path.join(output_dir, "metrics.json")), + TensorboardXWriter(output_dir), + ] + + +class DefaultPredictor: + """ + Create a simple end-to-end predictor with the given config that runs on + single device for a single input image. + + Compared to using the model directly, this class does the following additions: + + 1. Load checkpoint from `cfg.MODEL.WEIGHTS`. + 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`. + 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`. + 4. Take one input image and produce a single output, instead of a batch. + + This is meant for simple demo purposes, so it does the above steps automatically. + This is not meant for benchmarks or running complicated inference logic. + If you'd like to do anything more complicated, please refer to its source code as + examples to build and use the model manually. + + Attributes: + metadata (Metadata): the metadata of the underlying dataset, obtained from + cfg.DATASETS.TEST. + + Examples: + :: + pred = DefaultPredictor(cfg) + inputs = cv2.imread("input.jpg") + outputs = pred(inputs) + """ + + def __init__(self, cfg): + self.cfg = cfg.clone() # cfg can be modified by model + self.model = build_model(self.cfg) + self.model.eval() + if len(cfg.DATASETS.TEST): + self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0]) + + checkpointer = DetectionCheckpointer(self.model) + checkpointer.load(cfg.MODEL.WEIGHTS) + + self.aug = T.ResizeShortestEdge( + [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST + ) + + self.input_format = cfg.INPUT.FORMAT + assert self.input_format in ["RGB", "BGR"], self.input_format + + def __call__(self, original_image): + """ + Args: + original_image (np.ndarray): an image of shape (H, W, C) (in BGR order). + + Returns: + predictions (dict): + the output of the model for one image only. + See :doc:`/tutorials/models` for details about the format. + """ + with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258 + # Apply pre-processing to image. + if self.input_format == "RGB": + # whether the model expects BGR inputs or RGB + original_image = original_image[:, :, ::-1] + height, width = original_image.shape[:2] + image = self.aug.get_transform(original_image).apply_image(original_image) + image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) + + inputs = {"image": image, "height": height, "width": width} + predictions = self.model([inputs])[0] + return predictions + + +class DefaultTrainer(TrainerBase): + """ + A trainer with default training logic. It does the following: + + 1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader + defined by the given config. Create a LR scheduler defined by the config. + 2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when + `resume_or_load` is called. + 3. Register a few common hooks defined by the config. + + It is created to simplify the **standard model training workflow** and reduce code boilerplate + for users who only need the standard training workflow, with standard features. + It means this class makes *many assumptions* about your training logic that + may easily become invalid in a new research. In fact, any assumptions beyond those made in the + :class:`SimpleTrainer` are too much for research. + + The code of this class has been annotated about restrictive assumptions it makes. + When they do not work for you, you're encouraged to: + + 1. Overwrite methods of this class, OR: + 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and + nothing else. You can then add your own hooks if needed. OR: + 3. Write your own training loop similar to `tools/plain_train_net.py`. + + See the :doc:`/tutorials/training` tutorials for more details. + + Note that the behavior of this class, like other functions/classes in + this file, is not stable, since it is meant to represent the "common default behavior". + It is only guaranteed to work well with the standard models and training workflow in detectron2. + To obtain more stable behavior, write your own training logic with other public APIs. + + Examples: + :: + trainer = DefaultTrainer(cfg) + trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS + trainer.train() + + Attributes: + scheduler: + checkpointer (DetectionCheckpointer): + cfg (CfgNode): + """ + + def __init__(self, cfg): + """ + Args: + cfg (CfgNode): + """ + super().__init__() + logger = logging.getLogger("detectron2") + if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2 + setup_logger() + cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size()) + + # Assume these objects must be constructed in this order. + model = self.build_model(cfg) + optimizer = self.build_optimizer(cfg, model) + data_loader = self.build_train_loader(cfg) + + model = create_ddp_model(model, broadcast_buffers=False) + self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)( + model, data_loader, optimizer + ) + + self.scheduler = self.build_lr_scheduler(cfg, optimizer) + self.checkpointer = DetectionCheckpointer( + # Assume you want to save checkpoints together with logs/statistics + model, + cfg.OUTPUT_DIR, + trainer=weakref.proxy(self), + ) + self.start_iter = 0 + self.max_iter = cfg.SOLVER.MAX_ITER + self.cfg = cfg + + self.register_hooks(self.build_hooks()) + + def resume_or_load(self, resume=True): + """ + If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by + a `last_checkpoint` file), resume from the file. Resuming means loading all + available states (eg. optimizer and scheduler) and update iteration counter + from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used. + + Otherwise, this is considered as an independent training. The method will load model + weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start + from iteration 0. + + Args: + resume (bool): whether to do resume or not + """ + self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume) + if resume and self.checkpointer.has_checkpoint(): + # The checkpoint stores the training iteration that just finished, thus we start + # at the next iteration + self.start_iter = self.iter + 1 + + def build_hooks(self): + """ + Build a list of default hooks, including timing, evaluation, + checkpointing, lr scheduling, precise BN, writing events. + + Returns: + list[HookBase]: + """ + cfg = self.cfg.clone() + cfg.defrost() + cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN + + ret = [ + hooks.IterationTimer(), + hooks.LRScheduler(), + hooks.PreciseBN( + # Run at the same freq as (but before) evaluation. + cfg.TEST.EVAL_PERIOD, + self.model, + # Build a new data loader to not affect training + self.build_train_loader(cfg), + cfg.TEST.PRECISE_BN.NUM_ITER, + ) + if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) + else None, + ] + + # Do PreciseBN before checkpointer, because it updates the model and need to + # be saved by checkpointer. + # This is not always the best: if checkpointing has a different frequency, + # some checkpoints may have more precise statistics than others. + if comm.is_main_process(): + ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD)) + + def test_and_save_results(): + self._last_eval_results = self.test(self.cfg, self.model) + return self._last_eval_results + + # Do evaluation after checkpointer, because then if it fails, + # we can use the saved checkpoint to debug. + ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) + + if comm.is_main_process(): + # Here the default print/log frequency of each writer is used. + # run writers in the end, so that evaluation metrics are written + ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) + return ret + + def build_writers(self): + """ + Build a list of writers to be used using :func:`default_writers()`. + If you'd like a different list of writers, you can overwrite it in + your trainer. + + Returns: + list[EventWriter]: a list of :class:`EventWriter` objects. + """ + return default_writers(self.cfg.OUTPUT_DIR, self.max_iter) + + def train(self): + """ + Run training. + + Returns: + OrderedDict of results, if evaluation is enabled. Otherwise None. + """ + super().train(self.start_iter, self.max_iter) + if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process(): + assert hasattr( + self, "_last_eval_results" + ), "No evaluation results obtained during training!" + verify_results(self.cfg, self._last_eval_results) + return self._last_eval_results + + def run_step(self): + self._trainer.iter = self.iter + self._trainer.run_step() + + def state_dict(self): + ret = super().state_dict() + ret["_trainer"] = self._trainer.state_dict() + return ret + + def load_state_dict(self, state_dict): + super().load_state_dict(state_dict) + self._trainer.load_state_dict(state_dict["_trainer"]) + + @classmethod + def build_model(cls, cfg): + """ + Returns: + torch.nn.Module: + + It now calls :func:`detectron2.modeling.build_model`. + Overwrite it if you'd like a different model. + """ + model = build_model(cfg) + logger = logging.getLogger(__name__) + logger.info("Model:\n{}".format(model)) + return model + + @classmethod + def build_optimizer(cls, cfg, model): + """ + Returns: + torch.optim.Optimizer: + + It now calls :func:`detectron2.solver.build_optimizer`. + Overwrite it if you'd like a different optimizer. + """ + return build_optimizer(cfg, model) + + @classmethod + def build_lr_scheduler(cls, cfg, optimizer): + """ + It now calls :func:`detectron2.solver.build_lr_scheduler`. + Overwrite it if you'd like a different scheduler. + """ + return build_lr_scheduler(cfg, optimizer) + + @classmethod + def build_train_loader(cls, cfg): + """ + Returns: + iterable + + It now calls :func:`detectron2.data.build_detection_train_loader`. + Overwrite it if you'd like a different data loader. + """ + return build_detection_train_loader(cfg) + + @classmethod + def build_test_loader(cls, cfg, dataset_name): + """ + Returns: + iterable + + It now calls :func:`detectron2.data.build_detection_test_loader`. + Overwrite it if you'd like a different data loader. + """ + return build_detection_test_loader(cfg, dataset_name) + + @classmethod + def build_evaluator(cls, cfg, dataset_name): + """ + Returns: + DatasetEvaluator or None + + It is not implemented by default. + """ + raise NotImplementedError( + """ +If you want DefaultTrainer to automatically run evaluation, +please implement `build_evaluator()` in subclasses (see train_net.py for example). +Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example). +""" + ) + + @classmethod + def test(cls, cfg, model, evaluators=None): + """ + Evaluate the given model. The given model is expected to already contain + weights to evaluate. + + Args: + cfg (CfgNode): + model (nn.Module): + evaluators (list[DatasetEvaluator] or None): if None, will call + :meth:`build_evaluator`. Otherwise, must have the same length as + ``cfg.DATASETS.TEST``. + + Returns: + dict: a dict of result metrics + """ + logger = logging.getLogger(__name__) + if isinstance(evaluators, DatasetEvaluator): + evaluators = [evaluators] + if evaluators is not None: + assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( + len(cfg.DATASETS.TEST), len(evaluators) + ) + + results = OrderedDict() + for idx, dataset_name in enumerate(cfg.DATASETS.TEST): + data_loader = cls.build_test_loader(cfg, dataset_name) + # When evaluators are passed in as arguments, + # implicitly assume that evaluators can be created before data_loader. + if evaluators is not None: + evaluator = evaluators[idx] + else: + try: + evaluator = cls.build_evaluator(cfg, dataset_name) + except NotImplementedError: + logger.warn( + "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " + "or implement its `build_evaluator` method." + ) + results[dataset_name] = {} + continue + results_i = inference_on_dataset(model, data_loader, evaluator) + results[dataset_name] = results_i + if comm.is_main_process(): + assert isinstance( + results_i, dict + ), "Evaluator must return a dict on the main process. Got {} instead.".format( + results_i + ) + logger.info("Evaluation results for {} in csv format:".format(dataset_name)) + print_csv_format(results_i) + + if len(results) == 1: + results = list(results.values())[0] + return results + + @staticmethod + def auto_scale_workers(cfg, num_workers: int): + """ + When the config is defined for certain number of workers (according to + ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of + workers currently in use, returns a new cfg where the total batch size + is scaled so that the per-GPU batch size stays the same as the + original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``. + + Other config options are also scaled accordingly: + * training steps and warmup steps are scaled inverse proportionally. + * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`. + + For example, with the original config like the following: + + .. code-block:: yaml + + IMS_PER_BATCH: 16 + BASE_LR: 0.1 + REFERENCE_WORLD_SIZE: 8 + MAX_ITER: 5000 + STEPS: (4000,) + CHECKPOINT_PERIOD: 1000 + + When this config is used on 16 GPUs instead of the reference number 8, + calling this method will return a new config with: + + .. code-block:: yaml + + IMS_PER_BATCH: 32 + BASE_LR: 0.2 + REFERENCE_WORLD_SIZE: 16 + MAX_ITER: 2500 + STEPS: (2000,) + CHECKPOINT_PERIOD: 500 + + Note that both the original config and this new config can be trained on 16 GPUs. + It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``). + + Returns: + CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``. + """ + old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE + if old_world_size == 0 or old_world_size == num_workers: + return cfg + cfg = cfg.clone() + frozen = cfg.is_frozen() + cfg.defrost() + + assert ( + cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0 + ), "Invalid REFERENCE_WORLD_SIZE in config!" + scale = num_workers / old_world_size + bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale)) + lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale + max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale)) + warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale)) + cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS) + cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale)) + cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale)) + cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant + logger = logging.getLogger(__name__) + logger.info( + f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, " + f"max_iter={max_iter}, warmup={warmup_iter}." + ) + + if frozen: + cfg.freeze() + return cfg + + +# Access basic attributes from the underlying trainer +for _attr in ["model", "data_loader", "optimizer"]: + setattr( + DefaultTrainer, + _attr, + property( + # getter + lambda self, x=_attr: getattr(self._trainer, x), + # setter + lambda self, value, x=_attr: setattr(self._trainer, x, value), + ), + ) diff --git a/data_processing/detectron2/detectron2/engine/hooks.py b/data_processing/detectron2/detectron2/engine/hooks.py new file mode 100644 index 0000000..fc37af0 --- /dev/null +++ b/data_processing/detectron2/detectron2/engine/hooks.py @@ -0,0 +1,690 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import datetime +import itertools +import logging +import math +import operator +import os +import tempfile +import time +import warnings +from collections import Counter +import torch +from fvcore.common.checkpoint import Checkpointer +from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer +from fvcore.common.param_scheduler import ParamScheduler +from fvcore.common.timer import Timer +from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats + +import detectron2.utils.comm as comm +from detectron2.evaluation.testing import flatten_results_dict +from detectron2.solver import LRMultiplier +from detectron2.solver import LRScheduler as _LRScheduler +from detectron2.utils.events import EventStorage, EventWriter +from detectron2.utils.file_io import PathManager + +from .train_loop import HookBase + +__all__ = [ + "CallbackHook", + "IterationTimer", + "PeriodicWriter", + "PeriodicCheckpointer", + "BestCheckpointer", + "LRScheduler", + "AutogradProfiler", + "EvalHook", + "PreciseBN", + "TorchProfiler", + "TorchMemoryStats", +] + + +""" +Implement some common hooks. +""" + + +class CallbackHook(HookBase): + """ + Create a hook using callback functions provided by the user. + """ + + def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None): + """ + Each argument is a function that takes one argument: the trainer. + """ + self._before_train = before_train + self._before_step = before_step + self._after_step = after_step + self._after_train = after_train + + def before_train(self): + if self._before_train: + self._before_train(self.trainer) + + def after_train(self): + if self._after_train: + self._after_train(self.trainer) + # The functions may be closures that hold reference to the trainer + # Therefore, delete them to avoid circular reference. + del self._before_train, self._after_train + del self._before_step, self._after_step + + def before_step(self): + if self._before_step: + self._before_step(self.trainer) + + def after_step(self): + if self._after_step: + self._after_step(self.trainer) + + +class IterationTimer(HookBase): + """ + Track the time spent for each iteration (each run_step call in the trainer). + Print a summary in the end of training. + + This hook uses the time between the call to its :meth:`before_step` + and :meth:`after_step` methods. + Under the convention that :meth:`before_step` of all hooks should only + take negligible amount of time, the :class:`IterationTimer` hook should be + placed at the beginning of the list of hooks to obtain accurate timing. + """ + + def __init__(self, warmup_iter=3): + """ + Args: + warmup_iter (int): the number of iterations at the beginning to exclude + from timing. + """ + self._warmup_iter = warmup_iter + self._step_timer = Timer() + self._start_time = time.perf_counter() + self._total_timer = Timer() + + def before_train(self): + self._start_time = time.perf_counter() + self._total_timer.reset() + self._total_timer.pause() + + def after_train(self): + logger = logging.getLogger(__name__) + total_time = time.perf_counter() - self._start_time + total_time_minus_hooks = self._total_timer.seconds() + hook_time = total_time - total_time_minus_hooks + + num_iter = self.trainer.storage.iter + 1 - self.trainer.start_iter - self._warmup_iter + + if num_iter > 0 and total_time_minus_hooks > 0: + # Speed is meaningful only after warmup + # NOTE this format is parsed by grep in some scripts + logger.info( + "Overall training speed: {} iterations in {} ({:.4f} s / it)".format( + num_iter, + str(datetime.timedelta(seconds=int(total_time_minus_hooks))), + total_time_minus_hooks / num_iter, + ) + ) + + logger.info( + "Total training time: {} ({} on hooks)".format( + str(datetime.timedelta(seconds=int(total_time))), + str(datetime.timedelta(seconds=int(hook_time))), + ) + ) + + def before_step(self): + self._step_timer.reset() + self._total_timer.resume() + + def after_step(self): + # +1 because we're in after_step, the current step is done + # but not yet counted + iter_done = self.trainer.storage.iter - self.trainer.start_iter + 1 + if iter_done >= self._warmup_iter: + sec = self._step_timer.seconds() + self.trainer.storage.put_scalars(time=sec) + else: + self._start_time = time.perf_counter() + self._total_timer.reset() + + self._total_timer.pause() + + +class PeriodicWriter(HookBase): + """ + Write events to EventStorage (by calling ``writer.write()``) periodically. + + It is executed every ``period`` iterations and after the last iteration. + Note that ``period`` does not affect how data is smoothed by each writer. + """ + + def __init__(self, writers, period=20): + """ + Args: + writers (list[EventWriter]): a list of EventWriter objects + period (int): + """ + self._writers = writers + for w in writers: + assert isinstance(w, EventWriter), w + self._period = period + + def after_step(self): + if (self.trainer.iter + 1) % self._period == 0 or ( + self.trainer.iter == self.trainer.max_iter - 1 + ): + for writer in self._writers: + writer.write() + + def after_train(self): + for writer in self._writers: + # If any new data is found (e.g. produced by other after_train), + # write them before closing + writer.write() + writer.close() + + +class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase): + """ + Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook. + + Note that when used as a hook, + it is unable to save additional data other than what's defined + by the given `checkpointer`. + + It is executed every ``period`` iterations and after the last iteration. + """ + + def before_train(self): + self.max_iter = self.trainer.max_iter + + def after_step(self): + # No way to use **kwargs + self.step(self.trainer.iter) + + +class BestCheckpointer(HookBase): + """ + Checkpoints best weights based off given metric. + + This hook should be used in conjunction to and executed after the hook + that produces the metric, e.g. `EvalHook`. + """ + + def __init__( + self, + eval_period: int, + checkpointer: Checkpointer, + val_metric: str, + mode: str = "max", + file_prefix: str = "model_best", + ) -> None: + """ + Args: + eval_period (int): the period `EvalHook` is set to run. + checkpointer: the checkpointer object used to save checkpoints. + val_metric (str): validation metric to track for best checkpoint, e.g. "bbox/AP50" + mode (str): one of {'max', 'min'}. controls whether the chosen val metric should be + maximized or minimized, e.g. for "bbox/AP50" it should be "max" + file_prefix (str): the prefix of checkpoint's filename, defaults to "model_best" + """ + self._logger = logging.getLogger(__name__) + self._period = eval_period + self._val_metric = val_metric + assert mode in [ + "max", + "min", + ], f'Mode "{mode}" to `BestCheckpointer` is unknown. It should be one of {"max", "min"}.' + if mode == "max": + self._compare = operator.gt + else: + self._compare = operator.lt + self._checkpointer = checkpointer + self._file_prefix = file_prefix + self.best_metric = None + self.best_iter = None + + def _update_best(self, val, iteration): + if math.isnan(val) or math.isinf(val): + return False + self.best_metric = val + self.best_iter = iteration + return True + + def _best_checking(self): + metric_tuple = self.trainer.storage.latest().get(self._val_metric) + if metric_tuple is None: + self._logger.warning( + f"Given val metric {self._val_metric} does not seem to be computed/stored." + "Will not be checkpointing based on it." + ) + return + else: + latest_metric, metric_iter = metric_tuple + + if self.best_metric is None: + if self._update_best(latest_metric, metric_iter): + additional_state = {"iteration": metric_iter} + self._checkpointer.save(f"{self._file_prefix}", **additional_state) + self._logger.info( + f"Saved first model at {self.best_metric:0.5f} @ {self.best_iter} steps" + ) + elif self._compare(latest_metric, self.best_metric): + additional_state = {"iteration": metric_iter} + self._checkpointer.save(f"{self._file_prefix}", **additional_state) + self._logger.info( + f"Saved best model as latest eval score for {self._val_metric} is " + f"{latest_metric:0.5f}, better than last best score " + f"{self.best_metric:0.5f} @ iteration {self.best_iter}." + ) + self._update_best(latest_metric, metric_iter) + else: + self._logger.info( + f"Not saving as latest eval score for {self._val_metric} is {latest_metric:0.5f}, " + f"not better than best score {self.best_metric:0.5f} @ iteration {self.best_iter}." + ) + + def after_step(self): + # same conditions as `EvalHook` + next_iter = self.trainer.iter + 1 + if ( + self._period > 0 + and next_iter % self._period == 0 + and next_iter != self.trainer.max_iter + ): + self._best_checking() + + def after_train(self): + # same conditions as `EvalHook` + if self.trainer.iter + 1 >= self.trainer.max_iter: + self._best_checking() + + +class LRScheduler(HookBase): + """ + A hook which executes a torch builtin LR scheduler and summarizes the LR. + It is executed after every iteration. + """ + + def __init__(self, optimizer=None, scheduler=None): + """ + Args: + optimizer (torch.optim.Optimizer): + scheduler (torch.optim.LRScheduler or fvcore.common.param_scheduler.ParamScheduler): + if a :class:`ParamScheduler` object, it defines the multiplier over the base LR + in the optimizer. + + If any argument is not given, will try to obtain it from the trainer. + """ + self._optimizer = optimizer + self._scheduler = scheduler + + def before_train(self): + self._optimizer = self._optimizer or self.trainer.optimizer + if isinstance(self.scheduler, ParamScheduler): + self._scheduler = LRMultiplier( + self._optimizer, + self.scheduler, + self.trainer.max_iter, + last_iter=self.trainer.iter - 1, + ) + self._best_param_group_id = LRScheduler.get_best_param_group_id(self._optimizer) + + @staticmethod + def get_best_param_group_id(optimizer): + # NOTE: some heuristics on what LR to summarize + # summarize the param group with most parameters + largest_group = max(len(g["params"]) for g in optimizer.param_groups) + + if largest_group == 1: + # If all groups have one parameter, + # then find the most common initial LR, and use it for summary + lr_count = Counter([g["lr"] for g in optimizer.param_groups]) + lr = lr_count.most_common()[0][0] + for i, g in enumerate(optimizer.param_groups): + if g["lr"] == lr: + return i + else: + for i, g in enumerate(optimizer.param_groups): + if len(g["params"]) == largest_group: + return i + + def after_step(self): + lr = self._optimizer.param_groups[self._best_param_group_id]["lr"] + self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False) + self.scheduler.step() + + @property + def scheduler(self): + return self._scheduler or self.trainer.scheduler + + def state_dict(self): + if isinstance(self.scheduler, _LRScheduler): + return self.scheduler.state_dict() + return {} + + def load_state_dict(self, state_dict): + if isinstance(self.scheduler, _LRScheduler): + logger = logging.getLogger(__name__) + logger.info("Loading scheduler from state_dict ...") + self.scheduler.load_state_dict(state_dict) + + +class TorchProfiler(HookBase): + """ + A hook which runs `torch.profiler.profile`. + + Examples: + :: + hooks.TorchProfiler( + lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR + ) + + The above example will run the profiler for iteration 10~20 and dump + results to ``OUTPUT_DIR``. We did not profile the first few iterations + because they are typically slower than the rest. + The result files can be loaded in the ``chrome://tracing`` page in chrome browser, + and the tensorboard visualizations can be visualized using + ``tensorboard --logdir OUTPUT_DIR/log`` + """ + + def __init__(self, enable_predicate, output_dir, *, activities=None, save_tensorboard=True): + """ + Args: + enable_predicate (callable[trainer -> bool]): a function which takes a trainer, + and returns whether to enable the profiler. + It will be called once every step, and can be used to select which steps to profile. + output_dir (str): the output directory to dump tracing files. + activities (iterable): same as in `torch.profiler.profile`. + save_tensorboard (bool): whether to save tensorboard visualizations at (output_dir)/log/ + """ + self._enable_predicate = enable_predicate + self._activities = activities + self._output_dir = output_dir + self._save_tensorboard = save_tensorboard + + def before_step(self): + if self._enable_predicate(self.trainer): + if self._save_tensorboard: + on_trace_ready = torch.profiler.tensorboard_trace_handler( + os.path.join( + self._output_dir, + "log", + "profiler-tensorboard-iter{}".format(self.trainer.iter), + ), + f"worker{comm.get_rank()}", + ) + else: + on_trace_ready = None + self._profiler = torch.profiler.profile( + activities=self._activities, + on_trace_ready=on_trace_ready, + record_shapes=True, + profile_memory=True, + with_stack=True, + with_flops=True, + ) + self._profiler.__enter__() + else: + self._profiler = None + + def after_step(self): + if self._profiler is None: + return + self._profiler.__exit__(None, None, None) + if not self._save_tensorboard: + PathManager.mkdirs(self._output_dir) + out_file = os.path.join( + self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter) + ) + if "://" not in out_file: + self._profiler.export_chrome_trace(out_file) + else: + # Support non-posix filesystems + with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d: + tmp_file = os.path.join(d, "tmp.json") + self._profiler.export_chrome_trace(tmp_file) + with open(tmp_file) as f: + content = f.read() + with PathManager.open(out_file, "w") as f: + f.write(content) + + +class AutogradProfiler(TorchProfiler): + """ + A hook which runs `torch.autograd.profiler.profile`. + + Examples: + :: + hooks.AutogradProfiler( + lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR + ) + + The above example will run the profiler for iteration 10~20 and dump + results to ``OUTPUT_DIR``. We did not profile the first few iterations + because they are typically slower than the rest. + The result files can be loaded in the ``chrome://tracing`` page in chrome browser. + + Note: + When used together with NCCL on older version of GPUs, + autograd profiler may cause deadlock because it unnecessarily allocates + memory on every device it sees. The memory management calls, if + interleaved with NCCL calls, lead to deadlock on GPUs that do not + support ``cudaLaunchCooperativeKernelMultiDevice``. + """ + + def __init__(self, enable_predicate, output_dir, *, use_cuda=True): + """ + Args: + enable_predicate (callable[trainer -> bool]): a function which takes a trainer, + and returns whether to enable the profiler. + It will be called once every step, and can be used to select which steps to profile. + output_dir (str): the output directory to dump tracing files. + use_cuda (bool): same as in `torch.autograd.profiler.profile`. + """ + warnings.warn("AutogradProfiler has been deprecated in favor of TorchProfiler.") + self._enable_predicate = enable_predicate + self._use_cuda = use_cuda + self._output_dir = output_dir + + def before_step(self): + if self._enable_predicate(self.trainer): + self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda) + self._profiler.__enter__() + else: + self._profiler = None + + +class EvalHook(HookBase): + """ + Run an evaluation function periodically, and at the end of training. + + It is executed every ``eval_period`` iterations and after the last iteration. + """ + + def __init__(self, eval_period, eval_function, eval_after_train=True): + """ + Args: + eval_period (int): the period to run `eval_function`. Set to 0 to + not evaluate periodically (but still evaluate after the last iteration + if `eval_after_train` is True). + eval_function (callable): a function which takes no arguments, and + returns a nested dict of evaluation metrics. + eval_after_train (bool): whether to evaluate after the last iteration + + Note: + This hook must be enabled in all or none workers. + If you would like only certain workers to perform evaluation, + give other workers a no-op function (`eval_function=lambda: None`). + """ + self._period = eval_period + self._func = eval_function + self._eval_after_train = eval_after_train + + def _do_eval(self): + results = self._func() + + if results: + assert isinstance( + results, dict + ), "Eval function must return a dict. Got {} instead.".format(results) + + flattened_results = flatten_results_dict(results) + for k, v in flattened_results.items(): + try: + v = float(v) + except Exception as e: + raise ValueError( + "[EvalHook] eval_function should return a nested dict of float. " + "Got '{}: {}' instead.".format(k, v) + ) from e + self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False) + + # Evaluation may take different time among workers. + # A barrier make them start the next iteration together. + comm.synchronize() + + def after_step(self): + next_iter = self.trainer.iter + 1 + if self._period > 0 and next_iter % self._period == 0: + # do the last eval in after_train + if next_iter != self.trainer.max_iter: + self._do_eval() + + def after_train(self): + # This condition is to prevent the eval from running after a failed training + if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter: + self._do_eval() + # func is likely a closure that holds reference to the trainer + # therefore we clean it to avoid circular reference in the end + del self._func + + +class PreciseBN(HookBase): + """ + The standard implementation of BatchNorm uses EMA in inference, which is + sometimes suboptimal. + This class computes the true average of statistics rather than the moving average, + and put true averages to every BN layer in the given model. + + It is executed every ``period`` iterations and after the last iteration. + """ + + def __init__(self, period, model, data_loader, num_iter): + """ + Args: + period (int): the period this hook is run, or 0 to not run during training. + The hook will always run in the end of training. + model (nn.Module): a module whose all BN layers in training mode will be + updated by precise BN. + Note that user is responsible for ensuring the BN layers to be + updated are in training mode when this hook is triggered. + data_loader (iterable): it will produce data to be run by `model(data)`. + num_iter (int): number of iterations used to compute the precise + statistics. + """ + self._logger = logging.getLogger(__name__) + if len(get_bn_modules(model)) == 0: + self._logger.info( + "PreciseBN is disabled because model does not contain BN layers in training mode." + ) + self._disabled = True + return + + self._model = model + self._data_loader = data_loader + self._num_iter = num_iter + self._period = period + self._disabled = False + + self._data_iter = None + + def after_step(self): + next_iter = self.trainer.iter + 1 + is_final = next_iter == self.trainer.max_iter + if is_final or (self._period > 0 and next_iter % self._period == 0): + self.update_stats() + + def update_stats(self): + """ + Update the model with precise statistics. Users can manually call this method. + """ + if self._disabled: + return + + if self._data_iter is None: + self._data_iter = iter(self._data_loader) + + def data_loader(): + for num_iter in itertools.count(1): + if num_iter % 100 == 0: + self._logger.info( + "Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter) + ) + # This way we can reuse the same iterator + yield next(self._data_iter) + + with EventStorage(): # capture events in a new storage to discard them + self._logger.info( + "Running precise-BN for {} iterations... ".format(self._num_iter) + + "Note that this could produce different statistics every time." + ) + update_bn_stats(self._model, data_loader(), self._num_iter) + + +class TorchMemoryStats(HookBase): + """ + Writes pytorch's cuda memory statistics periodically. + """ + + def __init__(self, period=20, max_runs=10): + """ + Args: + period (int): Output stats each 'period' iterations + max_runs (int): Stop the logging after 'max_runs' + """ + + self._logger = logging.getLogger(__name__) + self._period = period + self._max_runs = max_runs + self._runs = 0 + + def after_step(self): + if self._runs > self._max_runs: + return + + if (self.trainer.iter + 1) % self._period == 0 or ( + self.trainer.iter == self.trainer.max_iter - 1 + ): + if torch.cuda.is_available(): + max_reserved_mb = torch.cuda.max_memory_reserved() / 1024.0 / 1024.0 + reserved_mb = torch.cuda.memory_reserved() / 1024.0 / 1024.0 + max_allocated_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 + allocated_mb = torch.cuda.memory_allocated() / 1024.0 / 1024.0 + + self._logger.info( + ( + " iter: {} " + " max_reserved_mem: {:.0f}MB " + " reserved_mem: {:.0f}MB " + " max_allocated_mem: {:.0f}MB " + " allocated_mem: {:.0f}MB " + ).format( + self.trainer.iter, + max_reserved_mb, + reserved_mb, + max_allocated_mb, + allocated_mb, + ) + ) + + self._runs += 1 + if self._runs == self._max_runs: + mem_summary = torch.cuda.memory_summary() + self._logger.info("\n" + mem_summary) + + torch.cuda.reset_peak_memory_stats() diff --git a/data_processing/detectron2/detectron2/engine/launch.py b/data_processing/detectron2/detectron2/engine/launch.py new file mode 100644 index 0000000..7052c50 --- /dev/null +++ b/data_processing/detectron2/detectron2/engine/launch.py @@ -0,0 +1,123 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +from datetime import timedelta +import torch +import torch.distributed as dist +import torch.multiprocessing as mp + +from detectron2.utils import comm + +__all__ = ["DEFAULT_TIMEOUT", "launch"] + +DEFAULT_TIMEOUT = timedelta(minutes=30) + + +def _find_free_port(): + import socket + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + # Binding to port 0 will cause the OS to find an available port for us + sock.bind(("", 0)) + port = sock.getsockname()[1] + sock.close() + # NOTE: there is still a chance the port could be taken by other processes. + return port + + +def launch( + main_func, + # Should be num_processes_per_machine, but kept for compatibility. + num_gpus_per_machine, + num_machines=1, + machine_rank=0, + dist_url=None, + args=(), + timeout=DEFAULT_TIMEOUT, +): + """ + Launch multi-process or distributed training. + This function must be called on all machines involved in the training. + It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine. + + Args: + main_func: a function that will be called by `main_func(*args)` + num_gpus_per_machine (int): number of processes per machine. When + using GPUs, this should be the number of GPUs. + num_machines (int): the total number of machines + machine_rank (int): the rank of this machine + dist_url (str): url to connect to for distributed jobs, including protocol + e.g. "tcp://127.0.0.1:8686". + Can be set to "auto" to automatically select a free port on localhost + timeout (timedelta): timeout of the distributed workers + args (tuple): arguments passed to main_func + """ + world_size = num_machines * num_gpus_per_machine + if world_size > 1: + # https://github.com/pytorch/pytorch/pull/14391 + # TODO prctl in spawned processes + + if dist_url == "auto": + assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs." + port = _find_free_port() + dist_url = f"tcp://127.0.0.1:{port}" + if num_machines > 1 and dist_url.startswith("file://"): + logger = logging.getLogger(__name__) + logger.warning( + "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://" + ) + + mp.start_processes( + _distributed_worker, + nprocs=num_gpus_per_machine, + args=( + main_func, + world_size, + num_gpus_per_machine, + machine_rank, + dist_url, + args, + timeout, + ), + daemon=False, + ) + else: + main_func(*args) + + +def _distributed_worker( + local_rank, + main_func, + world_size, + num_gpus_per_machine, + machine_rank, + dist_url, + args, + timeout=DEFAULT_TIMEOUT, +): + has_gpu = torch.cuda.is_available() + if has_gpu: + assert num_gpus_per_machine <= torch.cuda.device_count() + global_rank = machine_rank * num_gpus_per_machine + local_rank + try: + dist.init_process_group( + backend="NCCL" if has_gpu else "GLOO", + init_method=dist_url, + world_size=world_size, + rank=global_rank, + timeout=timeout, + ) + except Exception as e: + logger = logging.getLogger(__name__) + logger.error("Process group URL: {}".format(dist_url)) + raise e + + # Setup the local process group. + comm.create_local_process_group(num_gpus_per_machine) + if has_gpu: + torch.cuda.set_device(local_rank) + + # synchronize is needed here to prevent a possible timeout after calling init_process_group + # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 + comm.synchronize() + + main_func(*args) diff --git a/data_processing/detectron2/detectron2/engine/train_loop.py b/data_processing/detectron2/detectron2/engine/train_loop.py new file mode 100644 index 0000000..2f6b96d --- /dev/null +++ b/data_processing/detectron2/detectron2/engine/train_loop.py @@ -0,0 +1,528 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. +import concurrent.futures +import logging +import numpy as np +import time +import weakref +from typing import List, Mapping, Optional +import torch +from torch.nn.parallel import DataParallel, DistributedDataParallel + +import detectron2.utils.comm as comm +from detectron2.utils.events import EventStorage, get_event_storage +from detectron2.utils.logger import _log_api_usage + +__all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"] + + +class HookBase: + """ + Base class for hooks that can be registered with :class:`TrainerBase`. + + Each hook can implement 4 methods. The way they are called is demonstrated + in the following snippet: + :: + hook.before_train() + for iter in range(start_iter, max_iter): + hook.before_step() + trainer.run_step() + hook.after_step() + iter += 1 + hook.after_train() + + Notes: + 1. In the hook method, users can access ``self.trainer`` to access more + properties about the context (e.g., model, current iteration, or config + if using :class:`DefaultTrainer`). + + 2. A hook that does something in :meth:`before_step` can often be + implemented equivalently in :meth:`after_step`. + If the hook takes non-trivial time, it is strongly recommended to + implement the hook in :meth:`after_step` instead of :meth:`before_step`. + The convention is that :meth:`before_step` should only take negligible time. + + Following this convention will allow hooks that do care about the difference + between :meth:`before_step` and :meth:`after_step` (e.g., timer) to + function properly. + + """ + + trainer: "TrainerBase" = None + """ + A weak reference to the trainer object. Set by the trainer when the hook is registered. + """ + + def before_train(self): + """ + Called before the first iteration. + """ + pass + + def after_train(self): + """ + Called after the last iteration. + """ + pass + + def before_step(self): + """ + Called before each iteration. + """ + pass + + def after_backward(self): + """ + Called after the backward pass of each iteration. + """ + pass + + def after_step(self): + """ + Called after each iteration. + """ + pass + + def state_dict(self): + """ + Hooks are stateless by default, but can be made checkpointable by + implementing `state_dict` and `load_state_dict`. + """ + return {} + + +class TrainerBase: + """ + Base class for iterative trainer with hooks. + + The only assumption we made here is: the training runs in a loop. + A subclass can implement what the loop is. + We made no assumptions about the existence of dataloader, optimizer, model, etc. + + Attributes: + iter(int): the current iteration. + + start_iter(int): The iteration to start with. + By convention the minimum possible value is 0. + + max_iter(int): The iteration to end training. + + storage(EventStorage): An EventStorage that's opened during the course of training. + """ + + def __init__(self) -> None: + self._hooks: List[HookBase] = [] + self.iter: int = 0 + self.start_iter: int = 0 + self.max_iter: int + self.storage: EventStorage + _log_api_usage("trainer." + self.__class__.__name__) + + def register_hooks(self, hooks: List[Optional[HookBase]]) -> None: + """ + Register hooks to the trainer. The hooks are executed in the order + they are registered. + + Args: + hooks (list[Optional[HookBase]]): list of hooks + """ + hooks = [h for h in hooks if h is not None] + for h in hooks: + assert isinstance(h, HookBase) + # To avoid circular reference, hooks and trainer cannot own each other. + # This normally does not matter, but will cause memory leak if the + # involved objects contain __del__: + # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/ + h.trainer = weakref.proxy(self) + self._hooks.extend(hooks) + + def train(self, start_iter: int, max_iter: int): + """ + Args: + start_iter, max_iter (int): See docs above + """ + logger = logging.getLogger(__name__) + logger.info("Starting training from iteration {}".format(start_iter)) + + self.iter = self.start_iter = start_iter + self.max_iter = max_iter + + with EventStorage(start_iter) as self.storage: + try: + self.before_train() + for self.iter in range(start_iter, max_iter): + self.before_step() + self.run_step() + self.after_step() + # self.iter == max_iter can be used by `after_train` to + # tell whether the training successfully finished or failed + # due to exceptions. + self.iter += 1 + except Exception: + logger.exception("Exception during training:") + raise + finally: + self.after_train() + + def before_train(self): + for h in self._hooks: + h.before_train() + + def after_train(self): + self.storage.iter = self.iter + for h in self._hooks: + h.after_train() + + def before_step(self): + # Maintain the invariant that storage.iter == trainer.iter + # for the entire execution of each step + self.storage.iter = self.iter + + for h in self._hooks: + h.before_step() + + def after_backward(self): + for h in self._hooks: + h.after_backward() + + def after_step(self): + for h in self._hooks: + h.after_step() + + def run_step(self): + raise NotImplementedError + + def state_dict(self): + ret = {"iteration": self.iter} + hooks_state = {} + for h in self._hooks: + sd = h.state_dict() + if sd: + name = type(h).__qualname__ + if name in hooks_state: + # TODO handle repetitive stateful hooks + continue + hooks_state[name] = sd + if hooks_state: + ret["hooks"] = hooks_state + return ret + + def load_state_dict(self, state_dict): + logger = logging.getLogger(__name__) + self.iter = state_dict["iteration"] + for key, value in state_dict.get("hooks", {}).items(): + for h in self._hooks: + try: + name = type(h).__qualname__ + except AttributeError: + continue + if name == key: + h.load_state_dict(value) + break + else: + logger.warning(f"Cannot find the hook '{key}', its state_dict is ignored.") + + +class SimpleTrainer(TrainerBase): + """ + A simple trainer for the most common type of task: + single-cost single-optimizer single-data-source iterative optimization, + optionally using data-parallelism. + It assumes that every step, you: + + 1. Compute the loss with a data from the data_loader. + 2. Compute the gradients with the above loss. + 3. Update the model with the optimizer. + + All other tasks during training (checkpointing, logging, evaluation, LR schedule) + are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`. + + If you want to do anything fancier than this, + either subclass TrainerBase and implement your own `run_step`, + or write your own training loop. + """ + + def __init__( + self, + model, + data_loader, + optimizer, + gather_metric_period=1, + zero_grad_before_forward=False, + async_write_metrics=False, + ): + """ + Args: + model: a torch Module. Takes a data from data_loader and returns a + dict of losses. + data_loader: an iterable. Contains data to be used to call model. + optimizer: a torch optimizer. + gather_metric_period: an int. Every gather_metric_period iterations + the metrics are gathered from all the ranks to rank 0 and logged. + zero_grad_before_forward: whether to zero the gradients before the forward. + async_write_metrics: bool. If True, then write metrics asynchronously to improve + training speed + """ + super().__init__() + + """ + We set the model to training mode in the trainer. + However it's valid to train a model that's in eval mode. + If you want your model (or a submodule of it) to behave + like evaluation during training, you can overwrite its train() method. + """ + model.train() + + self.model = model + self.data_loader = data_loader + # to access the data loader iterator, call `self._data_loader_iter` + self._data_loader_iter_obj = None + self.optimizer = optimizer + self.gather_metric_period = gather_metric_period + self.zero_grad_before_forward = zero_grad_before_forward + self.async_write_metrics = async_write_metrics + # create a thread pool that can execute non critical logic in run_step asynchronically + # use only 1 worker so tasks will be executred in order of submitting. + self.concurrent_executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + + def run_step(self): + """ + Implement the standard training logic described above. + """ + assert self.model.training, "[SimpleTrainer] model was changed to eval mode!" + start = time.perf_counter() + """ + If you want to do something with the data, you can wrap the dataloader. + """ + data = next(self._data_loader_iter) + data_time = time.perf_counter() - start + + if self.zero_grad_before_forward: + """ + If you need to accumulate gradients or do something similar, you can + wrap the optimizer with your custom `zero_grad()` method. + """ + self.optimizer.zero_grad() + + """ + If you want to do something with the losses, you can wrap the model. + """ + loss_dict = self.model(data) + if isinstance(loss_dict, torch.Tensor): + losses = loss_dict + loss_dict = {"total_loss": loss_dict} + else: + losses = sum(loss_dict.values()) + if not self.zero_grad_before_forward: + """ + If you need to accumulate gradients or do something similar, you can + wrap the optimizer with your custom `zero_grad()` method. + """ + self.optimizer.zero_grad() + losses.backward() + + self.after_backward() + + if self.async_write_metrics: + # write metrics asynchronically + self.concurrent_executor.submit( + self._write_metrics, loss_dict, data_time, iter=self.iter + ) + else: + self._write_metrics(loss_dict, data_time) + + """ + If you need gradient clipping/scaling or other processing, you can + wrap the optimizer with your custom `step()` method. But it is + suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4 + """ + self.optimizer.step() + + @property + def _data_loader_iter(self): + # only create the data loader iterator when it is used + if self._data_loader_iter_obj is None: + self._data_loader_iter_obj = iter(self.data_loader) + return self._data_loader_iter_obj + + def reset_data_loader(self, data_loader_builder): + """ + Delete and replace the current data loader with a new one, which will be created + by calling `data_loader_builder` (without argument). + """ + del self.data_loader + data_loader = data_loader_builder() + self.data_loader = data_loader + self._data_loader_iter_obj = None + + def _write_metrics( + self, + loss_dict: Mapping[str, torch.Tensor], + data_time: float, + prefix: str = "", + iter: Optional[int] = None, + ) -> None: + logger = logging.getLogger(__name__) + + iter = self.iter if iter is None else iter + if (iter + 1) % self.gather_metric_period == 0: + try: + SimpleTrainer.write_metrics(loss_dict, data_time, iter, prefix) + except Exception: + logger.exception("Exception in writing metrics: ") + raise + + @staticmethod + def write_metrics( + loss_dict: Mapping[str, torch.Tensor], + data_time: float, + cur_iter: int, + prefix: str = "", + ) -> None: + """ + Args: + loss_dict (dict): dict of scalar losses + data_time (float): time taken by the dataloader iteration + prefix (str): prefix for logging keys + """ + metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()} + metrics_dict["data_time"] = data_time + + # Gather metrics among all workers for logging + # This assumes we do DDP-style training, which is currently the only + # supported method in detectron2. + all_metrics_dict = comm.gather(metrics_dict) + + if comm.is_main_process(): + storage = get_event_storage() + + # data_time among workers can have high variance. The actual latency + # caused by data_time is the maximum among workers. + data_time = np.max([x.pop("data_time") for x in all_metrics_dict]) + storage.put_scalar("data_time", data_time, cur_iter=cur_iter) + + # average the rest metrics + metrics_dict = { + k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys() + } + total_losses_reduced = sum(metrics_dict.values()) + if not np.isfinite(total_losses_reduced): + raise FloatingPointError( + f"Loss became infinite or NaN at iteration={cur_iter}!\n" + f"loss_dict = {metrics_dict}" + ) + + storage.put_scalar( + "{}total_loss".format(prefix), total_losses_reduced, cur_iter=cur_iter + ) + if len(metrics_dict) > 1: + storage.put_scalars(cur_iter=cur_iter, **metrics_dict) + + def state_dict(self): + ret = super().state_dict() + ret["optimizer"] = self.optimizer.state_dict() + return ret + + def load_state_dict(self, state_dict): + super().load_state_dict(state_dict) + self.optimizer.load_state_dict(state_dict["optimizer"]) + + def after_train(self): + super().after_train() + self.concurrent_executor.shutdown(wait=True) + + +class AMPTrainer(SimpleTrainer): + """ + Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision + in the training loop. + """ + + def __init__( + self, + model, + data_loader, + optimizer, + gather_metric_period=1, + zero_grad_before_forward=False, + grad_scaler=None, + precision: torch.dtype = torch.float16, + log_grad_scaler: bool = False, + async_write_metrics=False, + ): + """ + Args: + model, data_loader, optimizer, gather_metric_period, zero_grad_before_forward, + async_write_metrics: same as in :class:`SimpleTrainer`. + grad_scaler: torch GradScaler to automatically scale gradients. + precision: torch.dtype as the target precision to cast to in computations + """ + unsupported = "AMPTrainer does not support single-process multi-device training!" + if isinstance(model, DistributedDataParallel): + assert not (model.device_ids and len(model.device_ids) > 1), unsupported + assert not isinstance(model, DataParallel), unsupported + + super().__init__( + model, data_loader, optimizer, gather_metric_period, zero_grad_before_forward + ) + + if grad_scaler is None: + from torch.cuda.amp import GradScaler + + grad_scaler = GradScaler() + self.grad_scaler = grad_scaler + self.precision = precision + self.log_grad_scaler = log_grad_scaler + + def run_step(self): + """ + Implement the AMP training logic. + """ + assert self.model.training, "[AMPTrainer] model was changed to eval mode!" + assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!" + from torch.cuda.amp import autocast + + start = time.perf_counter() + data = next(self._data_loader_iter) + data_time = time.perf_counter() - start + + if self.zero_grad_before_forward: + self.optimizer.zero_grad() + with autocast(dtype=self.precision): + loss_dict = self.model(data) + if isinstance(loss_dict, torch.Tensor): + losses = loss_dict + loss_dict = {"total_loss": loss_dict} + else: + losses = sum(loss_dict.values()) + + if not self.zero_grad_before_forward: + self.optimizer.zero_grad() + + self.grad_scaler.scale(losses).backward() + + if self.log_grad_scaler: + storage = get_event_storage() + storage.put_scalar("[metric]grad_scaler", self.grad_scaler.get_scale()) + + self.after_backward() + + if self.async_write_metrics: + # write metrics asynchronically + self.concurrent_executor.submit( + self._write_metrics, loss_dict, data_time, iter=self.iter + ) + else: + self._write_metrics(loss_dict, data_time) + + self.grad_scaler.step(self.optimizer) + self.grad_scaler.update() + + def state_dict(self): + ret = super().state_dict() + ret["grad_scaler"] = self.grad_scaler.state_dict() + return ret + + def load_state_dict(self, state_dict): + super().load_state_dict(state_dict) + self.grad_scaler.load_state_dict(state_dict["grad_scaler"]) diff --git a/data_processing/detectron2/detectron2/evaluation/__init__.py b/data_processing/detectron2/detectron2/evaluation/__init__.py new file mode 100644 index 0000000..d96609e --- /dev/null +++ b/data_processing/detectron2/detectron2/evaluation/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator +from .coco_evaluation import COCOEvaluator +from .rotated_coco_evaluation import RotatedCOCOEvaluator +from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset +from .lvis_evaluation import LVISEvaluator +from .panoptic_evaluation import COCOPanopticEvaluator +from .pascal_voc_evaluation import PascalVOCDetectionEvaluator +from .sem_seg_evaluation import SemSegEvaluator +from .testing import print_csv_format, verify_results + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/data_processing/detectron2/detectron2/evaluation/cityscapes_evaluation.py b/data_processing/detectron2/detectron2/evaluation/cityscapes_evaluation.py new file mode 100644 index 0000000..9cc7888 --- /dev/null +++ b/data_processing/detectron2/detectron2/evaluation/cityscapes_evaluation.py @@ -0,0 +1,197 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import glob +import logging +import numpy as np +import os +import tempfile +from collections import OrderedDict +import torch +from PIL import Image + +from detectron2.data import MetadataCatalog +from detectron2.utils import comm +from detectron2.utils.file_io import PathManager + +from .evaluator import DatasetEvaluator + + +class CityscapesEvaluator(DatasetEvaluator): + """ + Base class for evaluation using cityscapes API. + """ + + def __init__(self, dataset_name): + """ + Args: + dataset_name (str): the name of the dataset. + It must have the following metadata associated with it: + "thing_classes", "gt_dir". + """ + self._metadata = MetadataCatalog.get(dataset_name) + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + def reset(self): + self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_") + self._temp_dir = self._working_dir.name + # All workers will write to the same results directory + # TODO this does not work in distributed training + assert ( + comm.get_local_size() == comm.get_world_size() + ), "CityscapesEvaluator currently do not work with multiple machines." + self._temp_dir = comm.all_gather(self._temp_dir)[0] + if self._temp_dir != self._working_dir.name: + self._working_dir.cleanup() + self._logger.info( + "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir) + ) + + +class CityscapesInstanceEvaluator(CityscapesEvaluator): + """ + Evaluate instance segmentation results on cityscapes dataset using cityscapes API. + + Note: + * It does not work in multi-machine distributed training. + * It contains a synchronization, therefore has to be used on all ranks. + * Only the main process runs evaluation. + """ + + def process(self, inputs, outputs): + from cityscapesscripts.helpers.labels import name2label + + for input, output in zip(inputs, outputs): + file_name = input["file_name"] + basename = os.path.splitext(os.path.basename(file_name))[0] + pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt") + + if "instances" in output: + output = output["instances"].to(self._cpu_device) + num_instances = len(output) + with open(pred_txt, "w") as fout: + for i in range(num_instances): + pred_class = output.pred_classes[i] + classes = self._metadata.thing_classes[pred_class] + class_id = name2label[classes].id + score = output.scores[i] + mask = output.pred_masks[i].numpy().astype("uint8") + png_filename = os.path.join( + self._temp_dir, basename + "_{}_{}.png".format(i, classes) + ) + + Image.fromarray(mask * 255).save(png_filename) + fout.write( + "{} {} {}\n".format(os.path.basename(png_filename), class_id, score) + ) + else: + # Cityscapes requires a prediction file for every ground truth image. + with open(pred_txt, "w") as fout: + pass + + def evaluate(self): + """ + Returns: + dict: has a key "segm", whose value is a dict of "AP" and "AP50". + """ + comm.synchronize() + if comm.get_rank() > 0: + return + import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval + + self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) + + # set some global states in cityscapes evaluation API, before evaluating + cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) + cityscapes_eval.args.predictionWalk = None + cityscapes_eval.args.JSONOutput = False + cityscapes_eval.args.colorized = False + cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json") + + # These lines are adopted from + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa + gt_dir = PathManager.get_local_path(self._metadata.gt_dir) + groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png")) + assert len( + groundTruthImgList + ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( + cityscapes_eval.args.groundTruthSearch + ) + predictionImgList = [] + for gt in groundTruthImgList: + predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args)) + results = cityscapes_eval.evaluateImgLists( + predictionImgList, groundTruthImgList, cityscapes_eval.args + )["averages"] + + ret = OrderedDict() + ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100} + self._working_dir.cleanup() + return ret + + +class CityscapesSemSegEvaluator(CityscapesEvaluator): + """ + Evaluate semantic segmentation results on cityscapes dataset using cityscapes API. + + Note: + * It does not work in multi-machine distributed training. + * It contains a synchronization, therefore has to be used on all ranks. + * Only the main process runs evaluation. + """ + + def process(self, inputs, outputs): + from cityscapesscripts.helpers.labels import trainId2label + + for input, output in zip(inputs, outputs): + file_name = input["file_name"] + basename = os.path.splitext(os.path.basename(file_name))[0] + pred_filename = os.path.join(self._temp_dir, basename + "_pred.png") + + output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy() + pred = 255 * np.ones(output.shape, dtype=np.uint8) + for train_id, label in trainId2label.items(): + if label.ignoreInEval: + continue + pred[output == train_id] = label.id + Image.fromarray(pred).save(pred_filename) + + def evaluate(self): + comm.synchronize() + if comm.get_rank() > 0: + return + # Load the Cityscapes eval script *after* setting the required env var, + # since the script reads CITYSCAPES_DATASET into global variables at load time. + import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval + + self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) + + # set some global states in cityscapes evaluation API, before evaluating + cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) + cityscapes_eval.args.predictionWalk = None + cityscapes_eval.args.JSONOutput = False + cityscapes_eval.args.colorized = False + + # These lines are adopted from + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa + gt_dir = PathManager.get_local_path(self._metadata.gt_dir) + groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png")) + assert len( + groundTruthImgList + ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( + cityscapes_eval.args.groundTruthSearch + ) + predictionImgList = [] + for gt in groundTruthImgList: + predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt)) + results = cityscapes_eval.evaluateImgLists( + predictionImgList, groundTruthImgList, cityscapes_eval.args + ) + ret = OrderedDict() + ret["sem_seg"] = { + "IoU": 100.0 * results["averageScoreClasses"], + "iIoU": 100.0 * results["averageScoreInstClasses"], + "IoU_sup": 100.0 * results["averageScoreCategories"], + "iIoU_sup": 100.0 * results["averageScoreInstCategories"], + } + self._working_dir.cleanup() + return ret diff --git a/data_processing/detectron2/detectron2/evaluation/coco_evaluation.py b/data_processing/detectron2/detectron2/evaluation/coco_evaluation.py new file mode 100644 index 0000000..fe8142c --- /dev/null +++ b/data_processing/detectron2/detectron2/evaluation/coco_evaluation.py @@ -0,0 +1,722 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import contextlib +import copy +import io +import itertools +import json +import logging +import numpy as np +import os +import pickle +from collections import OrderedDict +import pycocotools.mask as mask_util +import torch +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval +from tabulate import tabulate + +import detectron2.utils.comm as comm +from detectron2.config import CfgNode +from detectron2.data import MetadataCatalog +from detectron2.data.datasets.coco import convert_to_coco_json +from detectron2.structures import Boxes, BoxMode, pairwise_iou +from detectron2.utils.file_io import PathManager +from detectron2.utils.logger import create_small_table + +from .evaluator import DatasetEvaluator + +try: + from detectron2.evaluation.fast_eval_api import COCOeval_opt +except ImportError: + COCOeval_opt = COCOeval + + +class COCOEvaluator(DatasetEvaluator): + """ + Evaluate AR for object proposals, AP for instance detection/segmentation, AP + for keypoint detection outputs using COCO's metrics. + See http://cocodataset.org/#detection-eval and + http://cocodataset.org/#keypoints-eval to understand its metrics. + The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means + the metric cannot be computed (e.g. due to no predictions made). + + In addition to COCO, this evaluator is able to support any bounding box detection, + instance segmentation, or keypoint detection dataset. + """ + + def __init__( + self, + dataset_name, + tasks=None, + distributed=True, + output_dir=None, + *, + max_dets_per_image=None, + use_fast_impl=True, + kpt_oks_sigmas=(), + allow_cached_coco=True, + ): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + It must have either the following corresponding metadata: + + "json_file": the path to the COCO format annotation + + Or it must be in detectron2's standard dataset format + so it can be converted to COCO format automatically. + tasks (tuple[str]): tasks that can be evaluated under the given + configuration. A task is one of "bbox", "segm", "keypoints". + By default, will infer this automatically from predictions. + distributed (True): if True, will collect results from all ranks and run evaluation + in the main process. + Otherwise, will only evaluate the results in the current process. + output_dir (str): optional, an output directory to dump all + results predicted on the dataset. The dump contains two files: + + 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and + contains all the results in the format they are produced by the model. + 2. "coco_instances_results.json" a json file in COCO's result format. + max_dets_per_image (int): limit on the maximum number of detections per image. + By default in COCO, this limit is to 100, but this can be customized + to be greater, as is needed in evaluation metrics AP fixed and AP pool + (see https://arxiv.org/pdf/2102.01066.pdf) + This doesn't affect keypoint evaluation. + use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. + Although the results should be very close to the official implementation in COCO + API, it is still recommended to compute results with the official API for use in + papers. The faster implementation also uses more RAM. + kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. + See http://cocodataset.org/#keypoints-eval + When empty, it will use the defaults in COCO. + Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. + allow_cached_coco (bool): Whether to use cached coco json from previous validation + runs. You should set this to False if you need to use different validation data. + Defaults to True. + """ + self._logger = logging.getLogger(__name__) + self._distributed = distributed + self._output_dir = output_dir + + if use_fast_impl and (COCOeval_opt is COCOeval): + self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") + use_fast_impl = False + self._use_fast_impl = use_fast_impl + + # COCOeval requires the limit on the number of detections per image (maxDets) to be a list + # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the + # 3rd element (100) is used as the limit on the number of detections per image when + # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, + # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. + if max_dets_per_image is None: + max_dets_per_image = [1, 10, 100] + else: + max_dets_per_image = [1, 10, max_dets_per_image] + self._max_dets_per_image = max_dets_per_image + + if tasks is not None and isinstance(tasks, CfgNode): + kpt_oks_sigmas = ( + tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas + ) + self._logger.warn( + "COCO Evaluator instantiated using config, this is deprecated behavior." + " Please pass in explicit arguments instead." + ) + self._tasks = None # Infering it from predictions should be better + else: + self._tasks = tasks + + self._cpu_device = torch.device("cpu") + + self._metadata = MetadataCatalog.get(dataset_name) + if not hasattr(self._metadata, "json_file"): + if output_dir is None: + raise ValueError( + "output_dir must be provided to COCOEvaluator " + "for datasets not in COCO format." + ) + self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") + + cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") + self._metadata.json_file = cache_path + convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco) + + json_file = PathManager.get_local_path(self._metadata.json_file) + with contextlib.redirect_stdout(io.StringIO()): + self._coco_api = COCO(json_file) + + # Test set json files do not contain annotations (evaluation must be + # performed using the COCO evaluation server). + self._do_evaluation = "annotations" in self._coco_api.dataset + if self._do_evaluation: + self._kpt_oks_sigmas = kpt_oks_sigmas + + def reset(self): + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + if len(prediction) > 1: + self._predictions.append(prediction) + + def evaluate(self, img_ids=None): + """ + Args: + img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset + """ + if self._distributed: + comm.synchronize() + predictions = comm.gather(self._predictions, dst=0) + predictions = list(itertools.chain(*predictions)) + + if not comm.is_main_process(): + return {} + else: + predictions = self._predictions + + if len(predictions) == 0: + self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") + return {} + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "instances_predictions.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(predictions, f) + + self._results = OrderedDict() + if "proposals" in predictions[0]: + self._eval_box_proposals(predictions) + if "instances" in predictions[0]: + self._eval_predictions(predictions, img_ids=img_ids) + # Copy so the caller can do whatever with results + return copy.deepcopy(self._results) + + def _tasks_from_predictions(self, predictions): + """ + Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. + """ + tasks = {"bbox"} + for pred in predictions: + if "segmentation" in pred: + tasks.add("segm") + if "keypoints" in pred: + tasks.add("keypoints") + return sorted(tasks) + + def _eval_predictions(self, predictions, img_ids=None): + """ + Evaluate predictions. Fill self._results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) + tasks = self._tasks or self._tasks_from_predictions(coco_results) + + # unmap the category ids for COCO + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id + all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) + num_classes = len(all_contiguous_ids) + assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 + + reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} + for result in coco_results: + category_id = result["category_id"] + assert category_id < num_classes, ( + f"A prediction has class={category_id}, " + f"but the dataset only has {num_classes} classes and " + f"predicted class id should be in [0, {num_classes - 1}]." + ) + result["category_id"] = reverse_id_mapping[category_id] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(coco_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info( + "Evaluating predictions with {} COCO API...".format( + "unofficial" if self._use_fast_impl else "official" + ) + ) + for task in sorted(tasks): + assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!" + coco_eval = ( + _evaluate_predictions_on_coco( + self._coco_api, + coco_results, + task, + kpt_oks_sigmas=self._kpt_oks_sigmas, + cocoeval_fn=COCOeval_opt if self._use_fast_impl else COCOeval, + img_ids=img_ids, + max_dets_per_image=self._max_dets_per_image, + ) + if len(coco_results) > 0 + else None # cocoapi does not handle empty results very well + ) + + res = self._derive_coco_results( + coco_eval, task, class_names=self._metadata.get("thing_classes") + ) + self._results[task] = res + + def _eval_box_proposals(self, predictions): + """ + Evaluate the box proposals in predictions. + Fill self._results with the metrics for "box_proposals" task. + """ + if self._output_dir: + # Saving generated box proposals to file. + # Predicted box_proposals are in XYXY_ABS mode. + bbox_mode = BoxMode.XYXY_ABS.value + ids, boxes, objectness_logits = [], [], [] + for prediction in predictions: + ids.append(prediction["image_id"]) + boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) + objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) + + proposal_data = { + "boxes": boxes, + "objectness_logits": objectness_logits, + "ids": ids, + "bbox_mode": bbox_mode, + } + with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: + pickle.dump(proposal_data, f) + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating bbox proposals ...") + res = {} + areas = {"all": "", "small": "s", "medium": "m", "large": "l"} + for limit in [100, 1000]: + for area, suffix in areas.items(): + stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit) + key = "AR{}@{:d}".format(suffix, limit) + res[key] = float(stats["ar"].item() * 100) + self._logger.info("Proposal metrics: \n" + create_small_table(res)) + self._results["box_proposals"] = res + + def _derive_coco_results(self, coco_eval, iou_type, class_names=None): + """ + Derive the desired score numbers from summarized COCOeval. + + Args: + coco_eval (None or COCOEval): None represents no predictions from model. + iou_type (str): + class_names (None or list[str]): if provided, will use it to predict + per-category AP. + + Returns: + a dict of {metric name: score} + """ + + metrics = { + "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], + "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], + "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], + }[iou_type] + + if coco_eval is None: + self._logger.warn("No predictions from the model!") + return {metric: float("nan") for metric in metrics} + + # the standard metrics + results = { + metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") + for idx, metric in enumerate(metrics) + } + self._logger.info( + "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) + ) + if not np.isfinite(sum(results.values())): + self._logger.info("Some metrics cannot be computed and is shown as NaN.") + + if class_names is None or len(class_names) <= 1: + return results + # Compute per-category AP + # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa + precisions = coco_eval.eval["precision"] + # precision has dims (iou, recall, cls, area range, max dets) + assert len(class_names) == precisions.shape[2] + + results_per_category = [] + for idx, name in enumerate(class_names): + # area range index 0: all area ranges + # max dets index -1: typically 100 per image + precision = precisions[:, :, idx, 0, -1] + precision = precision[precision > -1] + ap = np.mean(precision) if precision.size else float("nan") + results_per_category.append(("{}".format(name), float(ap * 100))) + + # tabulate it + N_COLS = min(6, len(results_per_category) * 2) + results_flatten = list(itertools.chain(*results_per_category)) + results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) + table = tabulate( + results_2d, + tablefmt="pipe", + floatfmt=".3f", + headers=["category", "AP"] * (N_COLS // 2), + numalign="left", + ) + self._logger.info("Per-category {} AP: \n".format(iou_type) + table) + + results.update({"AP-" + name: ap for name, ap in results_per_category}) + return results + + +def instances_to_coco_json(instances, img_id): + """ + Dump an "Instances" object to a COCO-format json that's used for evaluation. + + Args: + instances (Instances): + img_id (int): the image id + + Returns: + list[dict]: list of json annotations in COCO format. + """ + num_instance = len(instances) + if num_instance == 0: + return [] + + boxes = instances.pred_boxes.tensor.numpy() + boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + boxes = boxes.tolist() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + + has_mask = instances.has("pred_masks") + if has_mask: + # use RLE to encode the masks, because they are too large and takes memory + # since this evaluator stores outputs of the entire dataset + rles = [ + mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] + for mask in instances.pred_masks + ] + for rle in rles: + # "counts" is an array encoded by mask_util as a byte-stream. Python3's + # json writer which always produces strings cannot serialize a bytestream + # unless you decode it. Thankfully, utf-8 works out (which is also what + # the pycocotools/_mask.pyx does). + rle["counts"] = rle["counts"].decode("utf-8") + + has_keypoints = instances.has("pred_keypoints") + if has_keypoints: + keypoints = instances.pred_keypoints + + results = [] + for k in range(num_instance): + result = { + "image_id": img_id, + "category_id": classes[k], + "bbox": boxes[k], + "score": scores[k], + } + if has_mask: + result["segmentation"] = rles[k] + if has_keypoints: + # In COCO annotations, + # keypoints coordinates are pixel indices. + # However our predictions are floating point coordinates. + # Therefore we subtract 0.5 to be consistent with the annotation format. + # This is the inverse of data loading logic in `datasets/coco.py`. + keypoints[k][:, :2] -= 0.5 + result["keypoints"] = keypoints[k].flatten().tolist() + results.append(result) + return results + + +# inspired from Detectron: +# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa +def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): + """ + Evaluate detection proposal recall metrics. This function is a much + faster alternative to the official COCO API recall evaluation code. However, + it produces slightly different results. + """ + # Record max overlap value for each gt box + # Return vector of overlap values + areas = { + "all": 0, + "small": 1, + "medium": 2, + "large": 3, + "96-128": 4, + "128-256": 5, + "256-512": 6, + "512-inf": 7, + } + area_ranges = [ + [0**2, 1e5**2], # all + [0**2, 32**2], # small + [32**2, 96**2], # medium + [96**2, 1e5**2], # large + [96**2, 128**2], # 96-128 + [128**2, 256**2], # 128-256 + [256**2, 512**2], # 256-512 + [512**2, 1e5**2], + ] # 512-inf + assert area in areas, "Unknown area range: {}".format(area) + area_range = area_ranges[areas[area]] + gt_overlaps = [] + num_pos = 0 + + for prediction_dict in dataset_predictions: + predictions = prediction_dict["proposals"] + + # sort predictions in descending order + # TODO maybe remove this and make it explicit in the documentation + inds = predictions.objectness_logits.sort(descending=True)[1] + predictions = predictions[inds] + + ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) + anno = coco_api.loadAnns(ann_ids) + gt_boxes = [ + BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) + for obj in anno + if obj["iscrowd"] == 0 + ] + gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes + gt_boxes = Boxes(gt_boxes) + gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) + + if len(gt_boxes) == 0 or len(predictions) == 0: + continue + + valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) + gt_boxes = gt_boxes[valid_gt_inds] + + num_pos += len(gt_boxes) + + if len(gt_boxes) == 0: + continue + + if limit is not None and len(predictions) > limit: + predictions = predictions[:limit] + + overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) + + _gt_overlaps = torch.zeros(len(gt_boxes)) + for j in range(min(len(predictions), len(gt_boxes))): + # find which proposal box maximally covers each gt box + # and get the iou amount of coverage for each gt box + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + + # find which gt box is 'best' covered (i.e. 'best' = most iou) + gt_ovr, gt_ind = max_overlaps.max(dim=0) + assert gt_ovr >= 0 + # find the proposal box that covers the best covered gt box + box_ind = argmax_overlaps[gt_ind] + # record the iou coverage of this gt box + _gt_overlaps[j] = overlaps[box_ind, gt_ind] + assert _gt_overlaps[j] == gt_ovr + # mark the proposal box and the gt box as used + overlaps[box_ind, :] = -1 + overlaps[:, gt_ind] = -1 + + # append recorded iou coverage level + gt_overlaps.append(_gt_overlaps) + gt_overlaps = ( + torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) + ) + gt_overlaps, _ = torch.sort(gt_overlaps) + + if thresholds is None: + step = 0.05 + thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) + recalls = torch.zeros_like(thresholds) + # compute recall for each iou threshold + for i, t in enumerate(thresholds): + recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) + # ar = 2 * np.trapz(recalls, thresholds) + ar = recalls.mean() + return { + "ar": ar, + "recalls": recalls, + "thresholds": thresholds, + "gt_overlaps": gt_overlaps, + "num_pos": num_pos, + } + + +def _evaluate_predictions_on_coco( + coco_gt, + coco_results, + iou_type, + kpt_oks_sigmas=None, + cocoeval_fn=COCOeval_opt, + img_ids=None, + max_dets_per_image=None, +): + """ + Evaluate the coco results using COCOEval API. + """ + assert len(coco_results) > 0 + + if iou_type == "segm": + coco_results = copy.deepcopy(coco_results) + # When evaluating mask AP, if the results contain bbox, cocoapi will + # use the box area as the area of the instance, instead of the mask area. + # This leads to a different definition of small/medium/large. + # We remove the bbox field to let mask AP use mask area. + for c in coco_results: + c.pop("bbox", None) + + coco_dt = coco_gt.loadRes(coco_results) + coco_eval = cocoeval_fn(coco_gt, coco_dt, iou_type) + # For COCO, the default max_dets_per_image is [1, 10, 100]. + if max_dets_per_image is None: + max_dets_per_image = [1, 10, 100] # Default from COCOEval + else: + assert ( + len(max_dets_per_image) >= 3 + ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3" + # In the case that user supplies a custom input for max_dets_per_image, + # apply COCOevalMaxDets to evaluate AP with the custom input. + if max_dets_per_image[2] != 100: + coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type) + if iou_type != "keypoints": + coco_eval.params.maxDets = max_dets_per_image + + if img_ids is not None: + coco_eval.params.imgIds = img_ids + + if iou_type == "keypoints": + # Use the COCO default keypoint OKS sigmas unless overrides are specified + if kpt_oks_sigmas: + assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!" + coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas) + # COCOAPI requires every detection and every gt to have keypoints, so + # we just take the first entry from both + num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3 + num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3 + num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas) + assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, ( + f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. " + f"Ground truth contains {num_keypoints_gt} keypoints. " + f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. " + "They have to agree with each other. For meaning of OKS, please refer to " + "http://cocodataset.org/#keypoints-eval." + ) + + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + return coco_eval + + +class COCOevalMaxDets(COCOeval): + """ + Modified version of COCOeval for evaluating AP with a custom + maxDets (by default for COCO, maxDets is 100) + """ + + def summarize(self): + """ + Compute and display summary metrics for evaluation results given + a custom value for max_dets_per_image + """ + + def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100): + p = self.params + iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}" + titleStr = "Average Precision" if ap == 1 else "Average Recall" + typeStr = "(AP)" if ap == 1 else "(AR)" + iouStr = ( + "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1]) + if iouThr is None + else "{:0.2f}".format(iouThr) + ) + + aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] + mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] + if ap == 1: + # dimension of precision: [TxRxKxAxM] + s = self.eval["precision"] + # IoU + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, :, aind, mind] + else: + # dimension of recall: [TxKxAxM] + s = self.eval["recall"] + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, aind, mind] + if len(s[s > -1]) == 0: + mean_s = -1 + else: + mean_s = np.mean(s[s > -1]) + print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)) + return mean_s + + def _summarizeDets(): + stats = np.zeros((12,)) + # Evaluate AP using the custom limit on maximum detections per image + stats[0] = _summarize(1, maxDets=self.params.maxDets[2]) + stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2]) + stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2]) + stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2]) + stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2]) + stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) + stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) + stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2]) + stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2]) + return stats + + def _summarizeKps(): + stats = np.zeros((10,)) + stats[0] = _summarize(1, maxDets=20) + stats[1] = _summarize(1, maxDets=20, iouThr=0.5) + stats[2] = _summarize(1, maxDets=20, iouThr=0.75) + stats[3] = _summarize(1, maxDets=20, areaRng="medium") + stats[4] = _summarize(1, maxDets=20, areaRng="large") + stats[5] = _summarize(0, maxDets=20) + stats[6] = _summarize(0, maxDets=20, iouThr=0.5) + stats[7] = _summarize(0, maxDets=20, iouThr=0.75) + stats[8] = _summarize(0, maxDets=20, areaRng="medium") + stats[9] = _summarize(0, maxDets=20, areaRng="large") + return stats + + if not self.eval: + raise Exception("Please run accumulate() first") + iouType = self.params.iouType + if iouType == "segm" or iouType == "bbox": + summarize = _summarizeDets + elif iouType == "keypoints": + summarize = _summarizeKps + self.stats = summarize() + + def __str__(self): + self.summarize() diff --git a/data_processing/detectron2/detectron2/evaluation/evaluator.py b/data_processing/detectron2/detectron2/evaluation/evaluator.py new file mode 100644 index 0000000..baf9960 --- /dev/null +++ b/data_processing/detectron2/detectron2/evaluation/evaluator.py @@ -0,0 +1,224 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import datetime +import logging +import time +from collections import OrderedDict, abc +from contextlib import ExitStack, contextmanager +from typing import List, Union +import torch +from torch import nn + +from detectron2.utils.comm import get_world_size, is_main_process +from detectron2.utils.logger import log_every_n_seconds + + +class DatasetEvaluator: + """ + Base class for a dataset evaluator. + + The function :func:`inference_on_dataset` runs the model over + all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. + + This class will accumulate information of the inputs/outputs (by :meth:`process`), + and produce evaluation results in the end (by :meth:`evaluate`). + """ + + def reset(self): + """ + Preparation for a new round of evaluation. + Should be called before starting a round of evaluation. + """ + pass + + def process(self, inputs, outputs): + """ + Process the pair of inputs and outputs. + If they contain batches, the pairs can be consumed one-by-one using `zip`: + + .. code-block:: python + + for input_, output in zip(inputs, outputs): + # do evaluation on single input/output pair + ... + + Args: + inputs (list): the inputs that's used to call the model. + outputs (list): the return value of `model(inputs)` + """ + pass + + def evaluate(self): + """ + Evaluate/summarize the performance, after processing all input/output pairs. + + Returns: + dict: + A new evaluator class can return a dict of arbitrary format + as long as the user can process the results. + In our train_net.py, we expect the following format: + + * key: the name of the task (e.g., bbox) + * value: a dict of {metric name: score}, e.g.: {"AP50": 80} + """ + pass + + +class DatasetEvaluators(DatasetEvaluator): + """ + Wrapper class to combine multiple :class:`DatasetEvaluator` instances. + + This class dispatches every evaluation call to + all of its :class:`DatasetEvaluator`. + """ + + def __init__(self, evaluators): + """ + Args: + evaluators (list): the evaluators to combine. + """ + super().__init__() + self._evaluators = evaluators + + def reset(self): + for evaluator in self._evaluators: + evaluator.reset() + + def process(self, inputs, outputs): + for evaluator in self._evaluators: + evaluator.process(inputs, outputs) + + def evaluate(self): + results = OrderedDict() + for evaluator in self._evaluators: + result = evaluator.evaluate() + if is_main_process() and result is not None: + for k, v in result.items(): + assert ( + k not in results + ), "Different evaluators produce results with the same key {}".format(k) + results[k] = v + return results + + +def inference_on_dataset( + model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None] +): + """ + Run model on the data_loader and evaluate the metrics with evaluator. + Also benchmark the inference speed of `model.__call__` accurately. + The model will be used in eval mode. + + Args: + model (callable): a callable which takes an object from + `data_loader` and returns some outputs. + + If it's an nn.Module, it will be temporarily set to `eval` mode. + If you wish to evaluate a model in `training` mode instead, you can + wrap the given model and override its behavior of `.eval()` and `.train()`. + data_loader: an iterable object with a length. + The elements it generates will be the inputs to the model. + evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, + but don't want to do any evaluation. + + Returns: + The return value of `evaluator.evaluate()` + """ + num_devices = get_world_size() + logger = logging.getLogger(__name__) + logger.info("Start inference on {} batches".format(len(data_loader))) + + total = len(data_loader) # inference data loader must have a fixed length + if evaluator is None: + # create a no-op evaluator + evaluator = DatasetEvaluators([]) + if isinstance(evaluator, abc.MutableSequence): + evaluator = DatasetEvaluators(evaluator) + evaluator.reset() + + num_warmup = min(5, total - 1) + start_time = time.perf_counter() + total_data_time = 0 + total_compute_time = 0 + total_eval_time = 0 + with ExitStack() as stack: + if isinstance(model, nn.Module): + stack.enter_context(inference_context(model)) + stack.enter_context(torch.no_grad()) + + start_data_time = time.perf_counter() + for idx, inputs in enumerate(data_loader): + total_data_time += time.perf_counter() - start_data_time + if idx == num_warmup: + start_time = time.perf_counter() + total_data_time = 0 + total_compute_time = 0 + total_eval_time = 0 + + start_compute_time = time.perf_counter() + outputs = model(inputs) + if torch.cuda.is_available(): + torch.cuda.synchronize() + total_compute_time += time.perf_counter() - start_compute_time + + start_eval_time = time.perf_counter() + evaluator.process(inputs, outputs) + total_eval_time += time.perf_counter() - start_eval_time + + iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup) + data_seconds_per_iter = total_data_time / iters_after_start + compute_seconds_per_iter = total_compute_time / iters_after_start + eval_seconds_per_iter = total_eval_time / iters_after_start + total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start + if idx >= num_warmup * 2 or compute_seconds_per_iter > 5: + eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1))) + log_every_n_seconds( + logging.INFO, + ( + f"Inference done {idx + 1}/{total}. " + f"Dataloading: {data_seconds_per_iter:.4f} s/iter. " + f"Inference: {compute_seconds_per_iter:.4f} s/iter. " + f"Eval: {eval_seconds_per_iter:.4f} s/iter. " + f"Total: {total_seconds_per_iter:.4f} s/iter. " + f"ETA={eta}" + ), + n=5, + ) + start_data_time = time.perf_counter() + + # Measure the time only for this worker (before the synchronization barrier) + total_time = time.perf_counter() - start_time + total_time_str = str(datetime.timedelta(seconds=total_time)) + # NOTE this format is parsed by grep + logger.info( + "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format( + total_time_str, total_time / (total - num_warmup), num_devices + ) + ) + total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time))) + logger.info( + "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format( + total_compute_time_str, total_compute_time / (total - num_warmup), num_devices + ) + ) + + results = evaluator.evaluate() + # An evaluator may return None when not in main process. + # Replace it by an empty dict instead to make it easier for downstream code to handle + if results is None: + results = {} + return results + + +@contextmanager +def inference_context(model): + """ + A context where the model is temporarily changed to eval mode, + and restored to previous mode afterwards. + + Args: + model: a torch Module + """ + training_mode = model.training + model.eval() + yield + model.train(training_mode) diff --git a/data_processing/detectron2/detectron2/evaluation/fast_eval_api.py b/data_processing/detectron2/detectron2/evaluation/fast_eval_api.py new file mode 100644 index 0000000..2eb202b --- /dev/null +++ b/data_processing/detectron2/detectron2/evaluation/fast_eval_api.py @@ -0,0 +1,121 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import logging +import numpy as np +import time +from pycocotools.cocoeval import COCOeval + +from detectron2 import _C + +logger = logging.getLogger(__name__) + + +class COCOeval_opt(COCOeval): + """ + This is a slightly modified version of the original COCO API, where the functions evaluateImg() + and accumulate() are implemented in C++ to speedup evaluation + """ + + def evaluate(self): + """ + Run per image evaluation on given images and store results in self.evalImgs_cpp, a + datastructure that isn't readable from Python but is used by a c++ implementation of + accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure + self.evalImgs because this datastructure is a computational bottleneck. + :return: None + """ + tic = time.time() + + p = self.params + # add backward compatibility if useSegm is specified in params + if p.useSegm is not None: + p.iouType = "segm" if p.useSegm == 1 else "bbox" + logger.info("Evaluate annotation type *{}*".format(p.iouType)) + p.imgIds = list(np.unique(p.imgIds)) + if p.useCats: + p.catIds = list(np.unique(p.catIds)) + p.maxDets = sorted(p.maxDets) + self.params = p + + self._prepare() # bottleneck + + # loop through images, area range, max detection number + catIds = p.catIds if p.useCats else [-1] + + if p.iouType == "segm" or p.iouType == "bbox": + computeIoU = self.computeIoU + elif p.iouType == "keypoints": + computeIoU = self.computeOks + self.ious = { + (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds + } # bottleneck + + maxDet = p.maxDets[-1] + + # <<<< Beginning of code differences with original COCO API + def convert_instances_to_cpp(instances, is_det=False): + # Convert annotations for a list of instances in an image to a format that's fast + # to access in C++ + instances_cpp = [] + for instance in instances: + instance_cpp = _C.InstanceAnnotation( + int(instance["id"]), + instance["score"] if is_det else instance.get("score", 0.0), + instance["area"], + bool(instance.get("iscrowd", 0)), + bool(instance.get("ignore", 0)), + ) + instances_cpp.append(instance_cpp) + return instances_cpp + + # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++ + ground_truth_instances = [ + [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds] + for imgId in p.imgIds + ] + detected_instances = [ + [convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds] + for imgId in p.imgIds + ] + ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds] + + if not p.useCats: + # For each image, flatten per-category lists into a single list + ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances] + detected_instances = [[[o for c in i for o in c]] for i in detected_instances] + + # Call C++ implementation of self.evaluateImgs() + self._evalImgs_cpp = _C.COCOevalEvaluateImages( + p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances + ) + self._evalImgs = None + + self._paramsEval = copy.deepcopy(self.params) + toc = time.time() + logger.info("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic)) + # >>>> End of code differences with original COCO API + + def accumulate(self): + """ + Accumulate per image evaluation results and store the result in self.eval. Does not + support changing parameter settings from those used by self.evaluate() + """ + logger.info("Accumulating evaluation results...") + tic = time.time() + assert hasattr( + self, "_evalImgs_cpp" + ), "evaluate() must be called before accmulate() is called." + + self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp) + + # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections + self.eval["recall"] = np.array(self.eval["recall"]).reshape( + self.eval["counts"][:1] + self.eval["counts"][2:] + ) + + # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X + # num_area_ranges X num_max_detections + self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"]) + self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"]) + toc = time.time() + logger.info("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic)) diff --git a/data_processing/detectron2/detectron2/evaluation/lvis_evaluation.py b/data_processing/detectron2/detectron2/evaluation/lvis_evaluation.py new file mode 100644 index 0000000..6cc854a --- /dev/null +++ b/data_processing/detectron2/detectron2/evaluation/lvis_evaluation.py @@ -0,0 +1,380 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import itertools +import json +import logging +import os +import pickle +from collections import OrderedDict +import torch + +import detectron2.utils.comm as comm +from detectron2.config import CfgNode +from detectron2.data import MetadataCatalog +from detectron2.structures import Boxes, BoxMode, pairwise_iou +from detectron2.utils.file_io import PathManager +from detectron2.utils.logger import create_small_table + +from .coco_evaluation import instances_to_coco_json +from .evaluator import DatasetEvaluator + + +class LVISEvaluator(DatasetEvaluator): + """ + Evaluate object proposal and instance detection/segmentation outputs using + LVIS's metrics and evaluation API. + """ + + def __init__( + self, + dataset_name, + tasks=None, + distributed=True, + output_dir=None, + *, + max_dets_per_image=None, + ): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + It must have the following corresponding metadata: + "json_file": the path to the LVIS format annotation + tasks (tuple[str]): tasks that can be evaluated under the given + configuration. A task is one of "bbox", "segm". + By default, will infer this automatically from predictions. + distributed (True): if True, will collect results from all ranks for evaluation. + Otherwise, will evaluate the results in the current process. + output_dir (str): optional, an output directory to dump results. + max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP + This limit, by default of the LVIS dataset, is 300. + """ + from lvis import LVIS + + self._logger = logging.getLogger(__name__) + + if tasks is not None and isinstance(tasks, CfgNode): + self._logger.warn( + "COCO Evaluator instantiated using config, this is deprecated behavior." + " Please pass in explicit arguments instead." + ) + self._tasks = None # Infering it from predictions should be better + else: + self._tasks = tasks + + self._distributed = distributed + self._output_dir = output_dir + self._max_dets_per_image = max_dets_per_image + + self._cpu_device = torch.device("cpu") + + self._metadata = MetadataCatalog.get(dataset_name) + json_file = PathManager.get_local_path(self._metadata.json_file) + self._lvis_api = LVIS(json_file) + # Test set json files do not contain annotations (evaluation must be + # performed using the LVIS evaluation server). + self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0 + + def reset(self): + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a LVIS model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a LVIS model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + self._predictions.append(prediction) + + def evaluate(self): + if self._distributed: + comm.synchronize() + predictions = comm.gather(self._predictions, dst=0) + predictions = list(itertools.chain(*predictions)) + + if not comm.is_main_process(): + return + else: + predictions = self._predictions + + if len(predictions) == 0: + self._logger.warning("[LVISEvaluator] Did not receive valid predictions.") + return {} + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "instances_predictions.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(predictions, f) + + self._results = OrderedDict() + if "proposals" in predictions[0]: + self._eval_box_proposals(predictions) + if "instances" in predictions[0]: + self._eval_predictions(predictions) + # Copy so the caller can do whatever with results + return copy.deepcopy(self._results) + + def _tasks_from_predictions(self, predictions): + for pred in predictions: + if "segmentation" in pred: + return ("bbox", "segm") + return ("bbox",) + + def _eval_predictions(self, predictions): + """ + Evaluate predictions. Fill self._results with the metrics of the tasks. + + Args: + predictions (list[dict]): list of outputs from the model + """ + self._logger.info("Preparing results in the LVIS format ...") + lvis_results = list(itertools.chain(*[x["instances"] for x in predictions])) + tasks = self._tasks or self._tasks_from_predictions(lvis_results) + + # LVIS evaluator can be used to evaluate results for COCO dataset categories. + # In this case `_metadata` variable will have a field with COCO-specific category mapping. + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + reverse_id_mapping = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + for result in lvis_results: + result["category_id"] = reverse_id_mapping[result["category_id"]] + else: + # unmap the category ids for LVIS (from 0-indexed to 1-indexed) + for result in lvis_results: + result["category_id"] += 1 + + if self._output_dir: + file_path = os.path.join(self._output_dir, "lvis_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(lvis_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating predictions ...") + for task in sorted(tasks): + res = _evaluate_predictions_on_lvis( + self._lvis_api, + lvis_results, + task, + max_dets_per_image=self._max_dets_per_image, + class_names=self._metadata.get("thing_classes"), + ) + self._results[task] = res + + def _eval_box_proposals(self, predictions): + """ + Evaluate the box proposals in predictions. + Fill self._results with the metrics for "box_proposals" task. + """ + if self._output_dir: + # Saving generated box proposals to file. + # Predicted box_proposals are in XYXY_ABS mode. + bbox_mode = BoxMode.XYXY_ABS.value + ids, boxes, objectness_logits = [], [], [] + for prediction in predictions: + ids.append(prediction["image_id"]) + boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) + objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) + + proposal_data = { + "boxes": boxes, + "objectness_logits": objectness_logits, + "ids": ids, + "bbox_mode": bbox_mode, + } + with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: + pickle.dump(proposal_data, f) + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating bbox proposals ...") + res = {} + areas = {"all": "", "small": "s", "medium": "m", "large": "l"} + for limit in [100, 1000]: + for area, suffix in areas.items(): + stats = _evaluate_box_proposals(predictions, self._lvis_api, area=area, limit=limit) + key = "AR{}@{:d}".format(suffix, limit) + res[key] = float(stats["ar"].item() * 100) + self._logger.info("Proposal metrics: \n" + create_small_table(res)) + self._results["box_proposals"] = res + + +# inspired from Detectron: +# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa +def _evaluate_box_proposals(dataset_predictions, lvis_api, thresholds=None, area="all", limit=None): + """ + Evaluate detection proposal recall metrics. This function is a much + faster alternative to the official LVIS API recall evaluation code. However, + it produces slightly different results. + """ + # Record max overlap value for each gt box + # Return vector of overlap values + areas = { + "all": 0, + "small": 1, + "medium": 2, + "large": 3, + "96-128": 4, + "128-256": 5, + "256-512": 6, + "512-inf": 7, + } + area_ranges = [ + [0**2, 1e5**2], # all + [0**2, 32**2], # small + [32**2, 96**2], # medium + [96**2, 1e5**2], # large + [96**2, 128**2], # 96-128 + [128**2, 256**2], # 128-256 + [256**2, 512**2], # 256-512 + [512**2, 1e5**2], + ] # 512-inf + assert area in areas, "Unknown area range: {}".format(area) + area_range = area_ranges[areas[area]] + gt_overlaps = [] + num_pos = 0 + + for prediction_dict in dataset_predictions: + predictions = prediction_dict["proposals"] + + # sort predictions in descending order + # TODO maybe remove this and make it explicit in the documentation + inds = predictions.objectness_logits.sort(descending=True)[1] + predictions = predictions[inds] + + ann_ids = lvis_api.get_ann_ids(img_ids=[prediction_dict["image_id"]]) + anno = lvis_api.load_anns(ann_ids) + gt_boxes = [ + BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno + ] + gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes + gt_boxes = Boxes(gt_boxes) + gt_areas = torch.as_tensor([obj["area"] for obj in anno]) + + if len(gt_boxes) == 0 or len(predictions) == 0: + continue + + valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) + gt_boxes = gt_boxes[valid_gt_inds] + + num_pos += len(gt_boxes) + + if len(gt_boxes) == 0: + continue + + if limit is not None and len(predictions) > limit: + predictions = predictions[:limit] + + overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) + + _gt_overlaps = torch.zeros(len(gt_boxes)) + for j in range(min(len(predictions), len(gt_boxes))): + # find which proposal box maximally covers each gt box + # and get the iou amount of coverage for each gt box + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + + # find which gt box is 'best' covered (i.e. 'best' = most iou) + gt_ovr, gt_ind = max_overlaps.max(dim=0) + assert gt_ovr >= 0 + # find the proposal box that covers the best covered gt box + box_ind = argmax_overlaps[gt_ind] + # record the iou coverage of this gt box + _gt_overlaps[j] = overlaps[box_ind, gt_ind] + assert _gt_overlaps[j] == gt_ovr + # mark the proposal box and the gt box as used + overlaps[box_ind, :] = -1 + overlaps[:, gt_ind] = -1 + + # append recorded iou coverage level + gt_overlaps.append(_gt_overlaps) + gt_overlaps = ( + torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) + ) + gt_overlaps, _ = torch.sort(gt_overlaps) + + if thresholds is None: + step = 0.05 + thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) + recalls = torch.zeros_like(thresholds) + # compute recall for each iou threshold + for i, t in enumerate(thresholds): + recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) + # ar = 2 * np.trapz(recalls, thresholds) + ar = recalls.mean() + return { + "ar": ar, + "recalls": recalls, + "thresholds": thresholds, + "gt_overlaps": gt_overlaps, + "num_pos": num_pos, + } + + +def _evaluate_predictions_on_lvis( + lvis_gt, lvis_results, iou_type, max_dets_per_image=None, class_names=None +): + """ + Args: + iou_type (str): + max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP + This limit, by default of the LVIS dataset, is 300. + class_names (None or list[str]): if provided, will use it to predict + per-category AP. + + Returns: + a dict of {metric name: score} + """ + metrics = { + "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], + "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], + }[iou_type] + + logger = logging.getLogger(__name__) + + if len(lvis_results) == 0: # TODO: check if needed + logger.warn("No predictions from the model!") + return {metric: float("nan") for metric in metrics} + + if iou_type == "segm": + lvis_results = copy.deepcopy(lvis_results) + # When evaluating mask AP, if the results contain bbox, LVIS API will + # use the box area as the area of the instance, instead of the mask area. + # This leads to a different definition of small/medium/large. + # We remove the bbox field to let mask AP use mask area. + for c in lvis_results: + c.pop("bbox", None) + + if max_dets_per_image is None: + max_dets_per_image = 300 # Default for LVIS dataset + + from lvis import LVISEval, LVISResults + + logger.info(f"Evaluating with max detections per image = {max_dets_per_image}") + lvis_results = LVISResults(lvis_gt, lvis_results, max_dets=max_dets_per_image) + lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type) + lvis_eval.run() + lvis_eval.print_results() + + # Pull the standard metrics from the LVIS results + results = lvis_eval.get_results() + results = {metric: float(results[metric] * 100) for metric in metrics} + logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results)) + return results diff --git a/data_processing/detectron2/detectron2/evaluation/panoptic_evaluation.py b/data_processing/detectron2/detectron2/evaluation/panoptic_evaluation.py new file mode 100644 index 0000000..9fb3462 --- /dev/null +++ b/data_processing/detectron2/detectron2/evaluation/panoptic_evaluation.py @@ -0,0 +1,199 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import contextlib +import io +import itertools +import json +import logging +import numpy as np +import os +import tempfile +from collections import OrderedDict +from typing import Optional +from PIL import Image +from tabulate import tabulate + +from detectron2.data import MetadataCatalog +from detectron2.utils import comm +from detectron2.utils.file_io import PathManager + +from .evaluator import DatasetEvaluator + +logger = logging.getLogger(__name__) + + +class COCOPanopticEvaluator(DatasetEvaluator): + """ + Evaluate Panoptic Quality metrics on COCO using PanopticAPI. + It saves panoptic segmentation prediction in `output_dir` + + It contains a synchronize call and has to be called from all workers. + """ + + def __init__(self, dataset_name: str, output_dir: Optional[str] = None): + """ + Args: + dataset_name: name of the dataset + output_dir: output directory to save results for evaluation. + """ + self._metadata = MetadataCatalog.get(dataset_name) + self._thing_contiguous_id_to_dataset_id = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + self._stuff_contiguous_id_to_dataset_id = { + v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items() + } + + self._output_dir = output_dir + if self._output_dir is not None: + PathManager.mkdirs(self._output_dir) + + def reset(self): + self._predictions = [] + + def _convert_category_id(self, segment_info): + isthing = segment_info.pop("isthing", None) + if isthing is None: + # the model produces panoptic category id directly. No more conversion needed + return segment_info + if isthing is True: + segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[ + segment_info["category_id"] + ] + else: + segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[ + segment_info["category_id"] + ] + return segment_info + + def process(self, inputs, outputs): + from panopticapi.utils import id2rgb + + for input, output in zip(inputs, outputs): + panoptic_img, segments_info = output["panoptic_seg"] + panoptic_img = panoptic_img.cpu().numpy() + if segments_info is None: + # If "segments_info" is None, we assume "panoptic_img" is a + # H*W int32 image storing the panoptic_id in the format of + # category_id * label_divisor + instance_id. We reserve -1 for + # VOID label, and add 1 to panoptic_img since the official + # evaluation script uses 0 for VOID label. + label_divisor = self._metadata.label_divisor + segments_info = [] + for panoptic_label in np.unique(panoptic_img): + if panoptic_label == -1: + # VOID region. + continue + pred_class = panoptic_label // label_divisor + isthing = ( + pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values() + ) + segments_info.append( + { + "id": int(panoptic_label) + 1, + "category_id": int(pred_class), + "isthing": bool(isthing), + } + ) + # Official evaluation script uses 0 for VOID label. + panoptic_img += 1 + + file_name = os.path.basename(input["file_name"]) + file_name_png = os.path.splitext(file_name)[0] + ".png" + with io.BytesIO() as out: + Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG") + segments_info = [self._convert_category_id(x) for x in segments_info] + self._predictions.append( + { + "image_id": input["image_id"], + "file_name": file_name_png, + "png_string": out.getvalue(), + "segments_info": segments_info, + } + ) + + def evaluate(self): + comm.synchronize() + + self._predictions = comm.gather(self._predictions) + self._predictions = list(itertools.chain(*self._predictions)) + if not comm.is_main_process(): + return + + # PanopticApi requires local files + gt_json = PathManager.get_local_path(self._metadata.panoptic_json) + gt_folder = PathManager.get_local_path(self._metadata.panoptic_root) + + with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir: + logger.info("Writing all panoptic predictions to {} ...".format(pred_dir)) + for p in self._predictions: + with open(os.path.join(pred_dir, p["file_name"]), "wb") as f: + f.write(p.pop("png_string")) + + with open(gt_json, "r") as f: + json_data = json.load(f) + json_data["annotations"] = self._predictions + + output_dir = self._output_dir or pred_dir + predictions_json = os.path.join(output_dir, "predictions.json") + with PathManager.open(predictions_json, "w") as f: + f.write(json.dumps(json_data)) + + from panopticapi.evaluation import pq_compute + + with contextlib.redirect_stdout(io.StringIO()): + pq_res = pq_compute( + gt_json, + PathManager.get_local_path(predictions_json), + gt_folder=gt_folder, + pred_folder=pred_dir, + ) + + res = {} + res["PQ"] = 100 * pq_res["All"]["pq"] + res["SQ"] = 100 * pq_res["All"]["sq"] + res["RQ"] = 100 * pq_res["All"]["rq"] + res["PQ_th"] = 100 * pq_res["Things"]["pq"] + res["SQ_th"] = 100 * pq_res["Things"]["sq"] + res["RQ_th"] = 100 * pq_res["Things"]["rq"] + res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] + res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] + res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] + + results = OrderedDict({"panoptic_seg": res}) + _print_panoptic_results(pq_res) + + return results + + +def _print_panoptic_results(pq_res): + headers = ["", "PQ", "SQ", "RQ", "#categories"] + data = [] + for name in ["All", "Things", "Stuff"]: + row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]] + data.append(row) + table = tabulate( + data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center" + ) + logger.info("Panoptic Evaluation Results:\n" + table) + + +if __name__ == "__main__": + from detectron2.utils.logger import setup_logger + + logger = setup_logger() + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--gt-json") + parser.add_argument("--gt-dir") + parser.add_argument("--pred-json") + parser.add_argument("--pred-dir") + args = parser.parse_args() + + from panopticapi.evaluation import pq_compute + + with contextlib.redirect_stdout(io.StringIO()): + pq_res = pq_compute( + args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir + ) + _print_panoptic_results(pq_res) diff --git a/data_processing/detectron2/detectron2/evaluation/pascal_voc_evaluation.py b/data_processing/detectron2/detectron2/evaluation/pascal_voc_evaluation.py new file mode 100644 index 0000000..88bb42e --- /dev/null +++ b/data_processing/detectron2/detectron2/evaluation/pascal_voc_evaluation.py @@ -0,0 +1,300 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +import numpy as np +import os +import tempfile +import xml.etree.ElementTree as ET +from collections import OrderedDict, defaultdict +from functools import lru_cache +import torch + +from detectron2.data import MetadataCatalog +from detectron2.utils import comm +from detectron2.utils.file_io import PathManager + +from .evaluator import DatasetEvaluator + + +class PascalVOCDetectionEvaluator(DatasetEvaluator): + """ + Evaluate Pascal VOC style AP for Pascal VOC dataset. + It contains a synchronization, therefore has to be called from all ranks. + + Note that the concept of AP can be implemented in different ways and may not + produce identical results. This class mimics the implementation of the official + Pascal VOC Matlab API, and should produce similar but not identical results to the + official API. + """ + + def __init__(self, dataset_name): + """ + Args: + dataset_name (str): name of the dataset, e.g., "voc_2007_test" + """ + self._dataset_name = dataset_name + meta = MetadataCatalog.get(dataset_name) + + # Too many tiny files, download all to local for speed. + annotation_dir_local = PathManager.get_local_path( + os.path.join(meta.dirname, "Annotations/") + ) + self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml") + self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt") + self._class_names = meta.thing_classes + assert meta.year in [2007, 2012], meta.year + self._is_2007 = meta.year == 2007 + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + def reset(self): + self._predictions = defaultdict(list) # class name -> list of prediction strings + + def process(self, inputs, outputs): + for input, output in zip(inputs, outputs): + image_id = input["image_id"] + instances = output["instances"].to(self._cpu_device) + boxes = instances.pred_boxes.tensor.numpy() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + for box, score, cls in zip(boxes, scores, classes): + xmin, ymin, xmax, ymax = box + # The inverse of data loading logic in `datasets/pascal_voc.py` + xmin += 1 + ymin += 1 + self._predictions[cls].append( + f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}" + ) + + def evaluate(self): + """ + Returns: + dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75". + """ + all_predictions = comm.gather(self._predictions, dst=0) + if not comm.is_main_process(): + return + predictions = defaultdict(list) + for predictions_per_rank in all_predictions: + for clsid, lines in predictions_per_rank.items(): + predictions[clsid].extend(lines) + del all_predictions + + self._logger.info( + "Evaluating {} using {} metric. " + "Note that results do not use the official Matlab API.".format( + self._dataset_name, 2007 if self._is_2007 else 2012 + ) + ) + + with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname: + res_file_template = os.path.join(dirname, "{}.txt") + + aps = defaultdict(list) # iou -> ap per class + for cls_id, cls_name in enumerate(self._class_names): + lines = predictions.get(cls_id, [""]) + + with open(res_file_template.format(cls_name), "w") as f: + f.write("\n".join(lines)) + + for thresh in range(50, 100, 5): + rec, prec, ap = voc_eval( + res_file_template, + self._anno_file_template, + self._image_set_path, + cls_name, + ovthresh=thresh / 100.0, + use_07_metric=self._is_2007, + ) + aps[thresh].append(ap * 100) + + ret = OrderedDict() + mAP = {iou: np.mean(x) for iou, x in aps.items()} + ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]} + return ret + + +############################################################################## +# +# Below code is modified from +# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py +# -------------------------------------------------------- +# Fast/er R-CNN +# Licensed under The MIT License [see LICENSE for details] +# Written by Bharath Hariharan +# -------------------------------------------------------- + +"""Python implementation of the PASCAL VOC devkit's AP evaluation code.""" + + +@lru_cache(maxsize=None) +def parse_rec(filename): + """Parse a PASCAL VOC xml file.""" + with PathManager.open(filename) as f: + tree = ET.parse(f) + objects = [] + for obj in tree.findall("object"): + obj_struct = {} + obj_struct["name"] = obj.find("name").text + obj_struct["pose"] = obj.find("pose").text + obj_struct["truncated"] = int(obj.find("truncated").text) + obj_struct["difficult"] = int(obj.find("difficult").text) + bbox = obj.find("bndbox") + obj_struct["bbox"] = [ + int(bbox.find("xmin").text), + int(bbox.find("ymin").text), + int(bbox.find("xmax").text), + int(bbox.find("ymax").text), + ] + objects.append(obj_struct) + + return objects + + +def voc_ap(rec, prec, use_07_metric=False): + """Compute VOC AP given precision and recall. If use_07_metric is true, uses + the VOC 07 11-point method (default:False). + """ + if use_07_metric: + # 11 point metric + ap = 0.0 + for t in np.arange(0.0, 1.1, 0.1): + if np.sum(rec >= t) == 0: + p = 0 + else: + p = np.max(prec[rec >= t]) + ap = ap + p / 11.0 + else: + # correct AP calculation + # first append sentinel values at the end + mrec = np.concatenate(([0.0], rec, [1.0])) + mpre = np.concatenate(([0.0], prec, [0.0])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False): + """rec, prec, ap = voc_eval(detpath, + annopath, + imagesetfile, + classname, + [ovthresh], + [use_07_metric]) + + Top level function that does the PASCAL VOC evaluation. + + detpath: Path to detections + detpath.format(classname) should produce the detection results file. + annopath: Path to annotations + annopath.format(imagename) should be the xml annotations file. + imagesetfile: Text file containing the list of images, one image per line. + classname: Category name (duh) + [ovthresh]: Overlap threshold (default = 0.5) + [use_07_metric]: Whether to use VOC07's 11 point AP computation + (default False) + """ + # assumes detections are in detpath.format(classname) + # assumes annotations are in annopath.format(imagename) + # assumes imagesetfile is a text file with each line an image name + + # first load gt + # read list of images + with PathManager.open(imagesetfile, "r") as f: + lines = f.readlines() + imagenames = [x.strip() for x in lines] + + # load annots + recs = {} + for imagename in imagenames: + recs[imagename] = parse_rec(annopath.format(imagename)) + + # extract gt objects for this class + class_recs = {} + npos = 0 + for imagename in imagenames: + R = [obj for obj in recs[imagename] if obj["name"] == classname] + bbox = np.array([x["bbox"] for x in R]) + difficult = np.array([x["difficult"] for x in R]).astype(bool) + # difficult = np.array([False for x in R]).astype(bool) # treat all "difficult" as GT + det = [False] * len(R) + npos = npos + sum(~difficult) + class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det} + + # read dets + detfile = detpath.format(classname) + with open(detfile, "r") as f: + lines = f.readlines() + + splitlines = [x.strip().split(" ") for x in lines] + image_ids = [x[0] for x in splitlines] + confidence = np.array([float(x[1]) for x in splitlines]) + BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4) + + # sort by confidence + sorted_ind = np.argsort(-confidence) + BB = BB[sorted_ind, :] + image_ids = [image_ids[x] for x in sorted_ind] + + # go down dets and mark TPs and FPs + nd = len(image_ids) + tp = np.zeros(nd) + fp = np.zeros(nd) + for d in range(nd): + R = class_recs[image_ids[d]] + bb = BB[d, :].astype(float) + ovmax = -np.inf + BBGT = R["bbox"].astype(float) + + if BBGT.size > 0: + # compute overlaps + # intersection + ixmin = np.maximum(BBGT[:, 0], bb[0]) + iymin = np.maximum(BBGT[:, 1], bb[1]) + ixmax = np.minimum(BBGT[:, 2], bb[2]) + iymax = np.minimum(BBGT[:, 3], bb[3]) + iw = np.maximum(ixmax - ixmin + 1.0, 0.0) + ih = np.maximum(iymax - iymin + 1.0, 0.0) + inters = iw * ih + + # union + uni = ( + (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) + - inters + ) + + overlaps = inters / uni + ovmax = np.max(overlaps) + jmax = np.argmax(overlaps) + + if ovmax > ovthresh: + if not R["difficult"][jmax]: + if not R["det"][jmax]: + tp[d] = 1.0 + R["det"][jmax] = 1 + else: + fp[d] = 1.0 + else: + fp[d] = 1.0 + + # compute precision recall + fp = np.cumsum(fp) + tp = np.cumsum(tp) + rec = tp / float(npos) + # avoid divide by zero in case the first detection matches a difficult + # ground truth + prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + ap = voc_ap(rec, prec, use_07_metric) + + return rec, prec, ap diff --git a/data_processing/detectron2/detectron2/evaluation/rotated_coco_evaluation.py b/data_processing/detectron2/detectron2/evaluation/rotated_coco_evaluation.py new file mode 100644 index 0000000..ea6d1b3 --- /dev/null +++ b/data_processing/detectron2/detectron2/evaluation/rotated_coco_evaluation.py @@ -0,0 +1,207 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import json +import numpy as np +import os +import torch +from pycocotools.cocoeval import COCOeval, maskUtils + +from detectron2.structures import BoxMode, RotatedBoxes, pairwise_iou_rotated +from detectron2.utils.file_io import PathManager + +from .coco_evaluation import COCOEvaluator + + +class RotatedCOCOeval(COCOeval): + @staticmethod + def is_rotated(box_list): + if type(box_list) == np.ndarray: + return box_list.shape[1] == 5 + elif type(box_list) == list: + if box_list == []: # cannot decide the box_dim + return False + return np.all( + np.array( + [ + (len(obj) == 5) and ((type(obj) == list) or (type(obj) == np.ndarray)) + for obj in box_list + ] + ) + ) + return False + + @staticmethod + def boxlist_to_tensor(boxlist, output_box_dim): + if type(boxlist) == np.ndarray: + box_tensor = torch.from_numpy(boxlist) + elif type(boxlist) == list: + if boxlist == []: + return torch.zeros((0, output_box_dim), dtype=torch.float32) + else: + box_tensor = torch.FloatTensor(boxlist) + else: + raise Exception("Unrecognized boxlist type") + + input_box_dim = box_tensor.shape[1] + if input_box_dim != output_box_dim: + if input_box_dim == 4 and output_box_dim == 5: + box_tensor = BoxMode.convert(box_tensor, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS) + else: + raise Exception( + "Unable to convert from {}-dim box to {}-dim box".format( + input_box_dim, output_box_dim + ) + ) + return box_tensor + + def compute_iou_dt_gt(self, dt, gt, is_crowd): + if self.is_rotated(dt) or self.is_rotated(gt): + # TODO: take is_crowd into consideration + assert all(c == 0 for c in is_crowd) + dt = RotatedBoxes(self.boxlist_to_tensor(dt, output_box_dim=5)) + gt = RotatedBoxes(self.boxlist_to_tensor(gt, output_box_dim=5)) + return pairwise_iou_rotated(dt, gt) + else: + # This is the same as the classical COCO evaluation + return maskUtils.iou(dt, gt, is_crowd) + + def computeIoU(self, imgId, catId): + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return [] + inds = np.argsort([-d["score"] for d in dt], kind="mergesort") + dt = [dt[i] for i in inds] + if len(dt) > p.maxDets[-1]: + dt = dt[0 : p.maxDets[-1]] + + assert p.iouType == "bbox", "unsupported iouType for iou computation" + + g = [g["bbox"] for g in gt] + d = [d["bbox"] for d in dt] + + # compute iou between each dt and gt region + iscrowd = [int(o["iscrowd"]) for o in gt] + + # Note: this function is copied from cocoeval.py in cocoapi + # and the major difference is here. + ious = self.compute_iou_dt_gt(d, g, iscrowd) + return ious + + +class RotatedCOCOEvaluator(COCOEvaluator): + """ + Evaluate object proposal/instance detection outputs using COCO-like metrics and APIs, + with rotated boxes support. + Note: this uses IOU only and does not consider angle differences. + """ + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + + prediction["instances"] = self.instances_to_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + self._predictions.append(prediction) + + def instances_to_json(self, instances, img_id): + num_instance = len(instances) + if num_instance == 0: + return [] + + boxes = instances.pred_boxes.tensor.numpy() + if boxes.shape[1] == 4: + boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + boxes = boxes.tolist() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + + results = [] + for k in range(num_instance): + result = { + "image_id": img_id, + "category_id": classes[k], + "bbox": boxes[k], + "score": scores[k], + } + + results.append(result) + return results + + def _eval_predictions(self, predictions, img_ids=None): # img_ids: unused + """ + Evaluate predictions on the given tasks. + Fill self._results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) + + # unmap the category ids for COCO + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + reverse_id_mapping = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + for result in coco_results: + result["category_id"] = reverse_id_mapping[result["category_id"]] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(coco_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating predictions ...") + + assert self._tasks is None or set(self._tasks) == { + "bbox" + }, "[RotatedCOCOEvaluator] Only bbox evaluation is supported" + coco_eval = ( + self._evaluate_predictions_on_coco(self._coco_api, coco_results) + if len(coco_results) > 0 + else None # cocoapi does not handle empty results very well + ) + + task = "bbox" + res = self._derive_coco_results( + coco_eval, task, class_names=self._metadata.get("thing_classes") + ) + self._results[task] = res + + def _evaluate_predictions_on_coco(self, coco_gt, coco_results): + """ + Evaluate the coco results using COCOEval API. + """ + assert len(coco_results) > 0 + + coco_dt = coco_gt.loadRes(coco_results) + + # Only bbox is supported for now + coco_eval = RotatedCOCOeval(coco_gt, coco_dt, iouType="bbox") + + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + return coco_eval diff --git a/data_processing/detectron2/detectron2/evaluation/sem_seg_evaluation.py b/data_processing/detectron2/detectron2/evaluation/sem_seg_evaluation.py new file mode 100644 index 0000000..3735de6 --- /dev/null +++ b/data_processing/detectron2/detectron2/evaluation/sem_seg_evaluation.py @@ -0,0 +1,265 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import json +import logging +import numpy as np +import os +from collections import OrderedDict +from typing import Optional, Union +import pycocotools.mask as mask_util +import torch +from PIL import Image + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.utils.comm import all_gather, is_main_process, synchronize +from detectron2.utils.file_io import PathManager + +from .evaluator import DatasetEvaluator + +_CV2_IMPORTED = True +try: + import cv2 # noqa +except ImportError: + # OpenCV is an optional dependency at the moment + _CV2_IMPORTED = False + + +def load_image_into_numpy_array( + filename: str, + copy: bool = False, + dtype: Optional[Union[np.dtype, str]] = None, +) -> np.ndarray: + with PathManager.open(filename, "rb") as f: + array = np.array(Image.open(f), copy=copy, dtype=dtype) + return array + + +class SemSegEvaluator(DatasetEvaluator): + """ + Evaluate semantic segmentation metrics. + """ + + def __init__( + self, + dataset_name, + distributed=True, + output_dir=None, + *, + sem_seg_loading_fn=load_image_into_numpy_array, + num_classes=None, + ignore_label=None, + ): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + distributed (bool): if True, will collect results from all ranks for evaluation. + Otherwise, will evaluate the results in the current process. + output_dir (str): an output directory to dump results. + sem_seg_loading_fn: function to read sem seg file and load into numpy array. + Default provided, but projects can customize. + num_classes, ignore_label: deprecated argument + """ + self._logger = logging.getLogger(__name__) + if num_classes is not None: + self._logger.warn( + "SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata." + ) + if ignore_label is not None: + self._logger.warn( + "SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata." + ) + self._dataset_name = dataset_name + self._distributed = distributed + self._output_dir = output_dir + + self._cpu_device = torch.device("cpu") + + self.input_file_to_gt_file = { + dataset_record["file_name"]: dataset_record["sem_seg_file_name"] + for dataset_record in DatasetCatalog.get(dataset_name) + } + + meta = MetadataCatalog.get(dataset_name) + # Dict that maps contiguous training ids to COCO category ids + try: + c2d = meta.stuff_dataset_id_to_contiguous_id + self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()} + except AttributeError: + self._contiguous_id_to_dataset_id = None + self._class_names = meta.stuff_classes + self.sem_seg_loading_fn = sem_seg_loading_fn + self._num_classes = len(meta.stuff_classes) + if num_classes is not None: + assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}" + self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label + + # This is because cv2.erode did not work for int datatype. Only works for uint8. + self._compute_boundary_iou = True + if not _CV2_IMPORTED: + self._compute_boundary_iou = False + self._logger.warn( + """Boundary IoU calculation requires OpenCV. B-IoU metrics are + not going to be computed because OpenCV is not available to import.""" + ) + if self._num_classes >= np.iinfo(np.uint8).max: + self._compute_boundary_iou = False + self._logger.warn( + f"""SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation! + B-IoU metrics are not going to be computed. Max allowed value (exclusive) + for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}. + The number of classes of dataset {self._dataset_name} is {self._num_classes}""" + ) + + def reset(self): + self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64) + self._b_conf_matrix = np.zeros( + (self._num_classes + 1, self._num_classes + 1), dtype=np.int64 + ) + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a model. + It is a list of dicts. Each dict corresponds to an image and + contains keys like "height", "width", "file_name". + outputs: the outputs of a model. It is either list of semantic segmentation predictions + (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic + segmentation prediction in the same format. + """ + for input, output in zip(inputs, outputs): + output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) + pred = np.array(output, dtype=np.int) + gt_filename = self.input_file_to_gt_file[input["file_name"]] + gt = self.sem_seg_loading_fn(gt_filename, dtype=np.int) + + gt[gt == self._ignore_label] = self._num_classes + + self._conf_matrix += np.bincount( + (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1), + minlength=self._conf_matrix.size, + ).reshape(self._conf_matrix.shape) + + if self._compute_boundary_iou: + b_gt = self._mask_to_boundary(gt.astype(np.uint8)) + b_pred = self._mask_to_boundary(pred.astype(np.uint8)) + + self._b_conf_matrix += np.bincount( + (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1), + minlength=self._conf_matrix.size, + ).reshape(self._conf_matrix.shape) + + self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"])) + + def evaluate(self): + """ + Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): + + * Mean intersection-over-union averaged across classes (mIoU) + * Frequency Weighted IoU (fwIoU) + * Mean pixel accuracy averaged across classes (mACC) + * Pixel Accuracy (pACC) + """ + if self._distributed: + synchronize() + conf_matrix_list = all_gather(self._conf_matrix) + b_conf_matrix_list = all_gather(self._b_conf_matrix) + self._predictions = all_gather(self._predictions) + self._predictions = list(itertools.chain(*self._predictions)) + if not is_main_process(): + return + + self._conf_matrix = np.zeros_like(self._conf_matrix) + for conf_matrix in conf_matrix_list: + self._conf_matrix += conf_matrix + + self._b_conf_matrix = np.zeros_like(self._b_conf_matrix) + for b_conf_matrix in b_conf_matrix_list: + self._b_conf_matrix += b_conf_matrix + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "sem_seg_predictions.json") + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(self._predictions)) + + acc = np.full(self._num_classes, np.nan, dtype=np.float) + iou = np.full(self._num_classes, np.nan, dtype=np.float) + tp = self._conf_matrix.diagonal()[:-1].astype(np.float) + pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) + class_weights = pos_gt / np.sum(pos_gt) + pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) + acc_valid = pos_gt > 0 + acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid] + union = pos_gt + pos_pred - tp + iou_valid = np.logical_and(acc_valid, union > 0) + iou[iou_valid] = tp[iou_valid] / union[iou_valid] + macc = np.sum(acc[acc_valid]) / np.sum(acc_valid) + miou = np.sum(iou[iou_valid]) / np.sum(iou_valid) + fiou = np.sum(iou[iou_valid] * class_weights[iou_valid]) + pacc = np.sum(tp) / np.sum(pos_gt) + + if self._compute_boundary_iou: + b_iou = np.full(self._num_classes, np.nan, dtype=np.float) + b_tp = self._b_conf_matrix.diagonal()[:-1].astype(np.float) + b_pos_gt = np.sum(self._b_conf_matrix[:-1, :-1], axis=0).astype(np.float) + b_pos_pred = np.sum(self._b_conf_matrix[:-1, :-1], axis=1).astype(np.float) + b_union = b_pos_gt + b_pos_pred - b_tp + b_iou_valid = b_union > 0 + b_iou[b_iou_valid] = b_tp[b_iou_valid] / b_union[b_iou_valid] + + res = {} + res["mIoU"] = 100 * miou + res["fwIoU"] = 100 * fiou + for i, name in enumerate(self._class_names): + res[f"IoU-{name}"] = 100 * iou[i] + if self._compute_boundary_iou: + res[f"BoundaryIoU-{name}"] = 100 * b_iou[i] + res[f"min(IoU, B-Iou)-{name}"] = 100 * min(iou[i], b_iou[i]) + res["mACC"] = 100 * macc + res["pACC"] = 100 * pacc + for i, name in enumerate(self._class_names): + res[f"ACC-{name}"] = 100 * acc[i] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(res, f) + results = OrderedDict({"sem_seg": res}) + self._logger.info(results) + return results + + def encode_json_sem_seg(self, sem_seg, input_file_name): + """ + Convert semantic segmentation to COCO stuff format with segments encoded as RLEs. + See http://cocodataset.org/#format-results + """ + json_list = [] + for label in np.unique(sem_seg): + if self._contiguous_id_to_dataset_id is not None: + assert ( + label in self._contiguous_id_to_dataset_id + ), "Label {} is not in the metadata info for {}".format(label, self._dataset_name) + dataset_id = self._contiguous_id_to_dataset_id[label] + else: + dataset_id = int(label) + mask = (sem_seg == label).astype(np.uint8) + mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0] + mask_rle["counts"] = mask_rle["counts"].decode("utf-8") + json_list.append( + {"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle} + ) + return json_list + + def _mask_to_boundary(self, mask: np.ndarray, dilation_ratio=0.02): + assert mask.ndim == 2, "mask_to_boundary expects a 2-dimensional image" + h, w = mask.shape + diag_len = np.sqrt(h**2 + w**2) + dilation = max(1, int(round(dilation_ratio * diag_len))) + kernel = np.ones((3, 3), dtype=np.uint8) + + padded_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0) + eroded_mask_with_padding = cv2.erode(padded_mask, kernel, iterations=dilation) + eroded_mask = eroded_mask_with_padding[1:-1, 1:-1] + boundary = mask - eroded_mask + return boundary diff --git a/data_processing/detectron2/detectron2/evaluation/testing.py b/data_processing/detectron2/detectron2/evaluation/testing.py new file mode 100644 index 0000000..9e5ae62 --- /dev/null +++ b/data_processing/detectron2/detectron2/evaluation/testing.py @@ -0,0 +1,85 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +import pprint +import sys +from collections.abc import Mapping + + +def print_csv_format(results): + """ + Print main metrics in a format similar to Detectron, + so that they are easy to copypaste into a spreadsheet. + + Args: + results (OrderedDict[dict]): task_name -> {metric -> score} + unordered dict can also be printed, but in arbitrary order + """ + assert isinstance(results, Mapping) or not len(results), results + logger = logging.getLogger(__name__) + for task, res in results.items(): + if isinstance(res, Mapping): + # Don't print "AP-category" metrics since they are usually not tracked. + important_res = [(k, v) for k, v in res.items() if "-" not in k] + logger.info("copypaste: Task: {}".format(task)) + logger.info("copypaste: " + ",".join([k[0] for k in important_res])) + logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res])) + else: + logger.info(f"copypaste: {task}={res}") + + +def verify_results(cfg, results): + """ + Args: + results (OrderedDict[dict]): task_name -> {metric -> score} + + Returns: + bool: whether the verification succeeds or not + """ + expected_results = cfg.TEST.EXPECTED_RESULTS + if not len(expected_results): + return True + + ok = True + for task, metric, expected, tolerance in expected_results: + actual = results[task].get(metric, None) + if actual is None: + ok = False + continue + if not np.isfinite(actual): + ok = False + continue + diff = abs(actual - expected) + if diff > tolerance: + ok = False + + logger = logging.getLogger(__name__) + if not ok: + logger.error("Result verification failed!") + logger.error("Expected Results: " + str(expected_results)) + logger.error("Actual Results: " + pprint.pformat(results)) + + sys.exit(1) + else: + logger.info("Results verification passed.") + return ok + + +def flatten_results_dict(results): + """ + Expand a hierarchical dict of scalars into a flat dict of scalars. + If results[k1][k2][k3] = v, the returned dict will have the entry + {"k1/k2/k3": v}. + + Args: + results (dict): + """ + r = {} + for k, v in results.items(): + if isinstance(v, Mapping): + v = flatten_results_dict(v) + for kk, vv in v.items(): + r[k + "/" + kk] = vv + else: + r[k] = v + return r diff --git a/data_processing/detectron2/detectron2/export/README.md b/data_processing/detectron2/detectron2/export/README.md new file mode 100644 index 0000000..c86ff62 --- /dev/null +++ b/data_processing/detectron2/detectron2/export/README.md @@ -0,0 +1,15 @@ + +This directory contains code to prepare a detectron2 model for deployment. +Currently it supports exporting a detectron2 model to TorchScript, ONNX, or (deprecated) Caffe2 format. + +Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage. + + +### Acknowledgements + +Thanks to Mobile Vision team at Facebook for developing the Caffe2 conversion tools. + +Thanks to Computing Platform Department - PAI team at Alibaba Group (@bddpqq, @chenbohua3) who +help export Detectron2 models to TorchScript. + +Thanks to ONNX Converter team at Microsoft who help export Detectron2 models to ONNX. diff --git a/data_processing/detectron2/detectron2/export/__init__.py b/data_processing/detectron2/detectron2/export/__init__.py new file mode 100644 index 0000000..5a58758 --- /dev/null +++ b/data_processing/detectron2/detectron2/export/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +import warnings + +from .flatten import TracingAdapter +from .torchscript import dump_torchscript_IR, scripting_with_instances + +try: + from caffe2.proto import caffe2_pb2 as _tmp + from caffe2.python import core + + # caffe2 is optional +except ImportError: + pass +else: + from .api import * + + +# TODO: Update ONNX Opset version and run tests when a newer PyTorch is supported +STABLE_ONNX_OPSET_VERSION = 11 + + +def add_export_config(cfg): + warnings.warn( + "add_export_config has been deprecated and behaves as no-op function.", DeprecationWarning + ) + return cfg + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/data_processing/detectron2/detectron2/export/api.py b/data_processing/detectron2/detectron2/export/api.py new file mode 100644 index 0000000..1a272fe --- /dev/null +++ b/data_processing/detectron2/detectron2/export/api.py @@ -0,0 +1,230 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import logging +import os +import torch +from caffe2.proto import caffe2_pb2 +from torch import nn + +from detectron2.config import CfgNode +from detectron2.utils.file_io import PathManager + +from .caffe2_inference import ProtobufDetectionModel +from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format +from .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph + +__all__ = [ + "Caffe2Model", + "Caffe2Tracer", +] + + +class Caffe2Tracer: + """ + Make a detectron2 model traceable with Caffe2 operators. + This class creates a traceable version of a detectron2 model which: + + 1. Rewrite parts of the model using ops in Caffe2. Note that some ops do + not have GPU implementation in Caffe2. + 2. Remove post-processing and only produce raw layer outputs + + After making a traceable model, the class provide methods to export such a + model to different deployment formats. + Exported graph produced by this class take two input tensors: + + 1. (1, C, H, W) float "data" which is an image (usually in [0, 255]). + (H, W) often has to be padded to multiple of 32 (depend on the model + architecture). + 2. 1x3 float "im_info", each row of which is (height, width, 1.0). + Height and width are true image shapes before padding. + + The class currently only supports models using builtin meta architectures. + Batch inference is not supported, and contributions are welcome. + """ + + def __init__(self, cfg: CfgNode, model: nn.Module, inputs): + """ + Args: + cfg (CfgNode): a detectron2 config used to construct caffe2-compatible model. + model (nn.Module): An original pytorch model. Must be among a few official models + in detectron2 that can be converted to become caffe2-compatible automatically. + Weights have to be already loaded to this model. + inputs: sample inputs that the given model takes for inference. + Will be used to trace the model. For most models, random inputs with + no detected objects will not work as they lead to wrong traces. + """ + assert isinstance(cfg, CfgNode), cfg + assert isinstance(model, torch.nn.Module), type(model) + + # TODO make it support custom models, by passing in c2 model directly + C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE] + self.traceable_model = C2MetaArch(cfg, copy.deepcopy(model)) + self.inputs = inputs + self.traceable_inputs = self.traceable_model.get_caffe2_inputs(inputs) + + def export_caffe2(self): + """ + Export the model to Caffe2's protobuf format. + The returned object can be saved with its :meth:`.save_protobuf()` method. + The result can be loaded and executed using Caffe2 runtime. + + Returns: + :class:`Caffe2Model` + """ + from .caffe2_export import export_caffe2_detection_model + + predict_net, init_net = export_caffe2_detection_model( + self.traceable_model, self.traceable_inputs + ) + return Caffe2Model(predict_net, init_net) + + def export_onnx(self): + """ + Export the model to ONNX format. + Note that the exported model contains custom ops only available in caffe2, therefore it + cannot be directly executed by other runtime (such as onnxruntime or TensorRT). + Post-processing or transformation passes may be applied on the model to accommodate + different runtimes, but we currently do not provide support for them. + + Returns: + onnx.ModelProto: an onnx model. + """ + from .caffe2_export import export_onnx_model as export_onnx_model_impl + + return export_onnx_model_impl(self.traceable_model, (self.traceable_inputs,)) + + def export_torchscript(self): + """ + Export the model to a ``torch.jit.TracedModule`` by tracing. + The returned object can be saved to a file by ``.save()``. + + Returns: + torch.jit.TracedModule: a torch TracedModule + """ + logger = logging.getLogger(__name__) + logger.info("Tracing the model with torch.jit.trace ...") + with torch.no_grad(): + return torch.jit.trace(self.traceable_model, (self.traceable_inputs,)) + + +class Caffe2Model(nn.Module): + """ + A wrapper around the traced model in Caffe2's protobuf format. + The exported graph has different inputs/outputs from the original Pytorch + model, as explained in :class:`Caffe2Tracer`. This class wraps around the + exported graph to simulate the same interface as the original Pytorch model. + It also provides functions to save/load models in Caffe2's format.' + + Examples: + :: + c2_model = Caffe2Tracer(cfg, torch_model, inputs).export_caffe2() + inputs = [{"image": img_tensor_CHW}] + outputs = c2_model(inputs) + orig_outputs = torch_model(inputs) + """ + + def __init__(self, predict_net, init_net): + super().__init__() + self.eval() # always in eval mode + self._predict_net = predict_net + self._init_net = init_net + self._predictor = None + + __init__.__HIDE_SPHINX_DOC__ = True + + @property + def predict_net(self): + """ + caffe2.core.Net: the underlying caffe2 predict net + """ + return self._predict_net + + @property + def init_net(self): + """ + caffe2.core.Net: the underlying caffe2 init net + """ + return self._init_net + + def save_protobuf(self, output_dir): + """ + Save the model as caffe2's protobuf format. + It saves the following files: + + * "model.pb": definition of the graph. Can be visualized with + tools like `netron `_. + * "model_init.pb": model parameters + * "model.pbtxt": human-readable definition of the graph. Not + needed for deployment. + + Args: + output_dir (str): the output directory to save protobuf files. + """ + logger = logging.getLogger(__name__) + logger.info("Saving model to {} ...".format(output_dir)) + if not PathManager.exists(output_dir): + PathManager.mkdirs(output_dir) + + with PathManager.open(os.path.join(output_dir, "model.pb"), "wb") as f: + f.write(self._predict_net.SerializeToString()) + with PathManager.open(os.path.join(output_dir, "model.pbtxt"), "w") as f: + f.write(str(self._predict_net)) + with PathManager.open(os.path.join(output_dir, "model_init.pb"), "wb") as f: + f.write(self._init_net.SerializeToString()) + + def save_graph(self, output_file, inputs=None): + """ + Save the graph as SVG format. + + Args: + output_file (str): a SVG file + inputs: optional inputs given to the model. + If given, the inputs will be used to run the graph to record + shape of every tensor. The shape information will be + saved together with the graph. + """ + from .caffe2_export import run_and_save_graph + + if inputs is None: + save_graph(self._predict_net, output_file, op_only=False) + else: + size_divisibility = get_pb_arg_vali(self._predict_net, "size_divisibility", 0) + device = get_pb_arg_vals(self._predict_net, "device", b"cpu").decode("ascii") + inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device) + inputs = [x.cpu().numpy() for x in inputs] + run_and_save_graph(self._predict_net, self._init_net, inputs, output_file) + + @staticmethod + def load_protobuf(dir): + """ + Args: + dir (str): a directory used to save Caffe2Model with + :meth:`save_protobuf`. + The files "model.pb" and "model_init.pb" are needed. + + Returns: + Caffe2Model: the caffe2 model loaded from this directory. + """ + predict_net = caffe2_pb2.NetDef() + with PathManager.open(os.path.join(dir, "model.pb"), "rb") as f: + predict_net.ParseFromString(f.read()) + + init_net = caffe2_pb2.NetDef() + with PathManager.open(os.path.join(dir, "model_init.pb"), "rb") as f: + init_net.ParseFromString(f.read()) + + return Caffe2Model(predict_net, init_net) + + def __call__(self, inputs): + """ + An interface that wraps around a Caffe2 model and mimics detectron2's models' + input/output format. See details about the format at :doc:`/tutorials/models`. + This is used to compare the outputs of caffe2 model with its original torch model. + + Due to the extra conversion between Pytorch/Caffe2, this method is not meant for + benchmark. Because of the conversion, this method also has dependency + on detectron2 in order to convert to detectron2's output format. + """ + if self._predictor is None: + self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net) + return self._predictor(inputs) diff --git a/data_processing/detectron2/detectron2/export/c10.py b/data_processing/detectron2/detectron2/export/c10.py new file mode 100644 index 0000000..e9a3ee3 --- /dev/null +++ b/data_processing/detectron2/detectron2/export/c10.py @@ -0,0 +1,571 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import math +from typing import Dict +import torch +import torch.nn.functional as F + +from detectron2.layers import ShapeSpec, cat +from detectron2.layers.roi_align_rotated import ROIAlignRotated +from detectron2.modeling import poolers +from detectron2.modeling.proposal_generator import rpn +from detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference +from detectron2.structures import Boxes, ImageList, Instances, Keypoints, RotatedBoxes + +from .shared import alias, to_device + + +""" +This file contains caffe2-compatible implementation of several detectron2 components. +""" + + +class Caffe2Boxes(Boxes): + """ + Representing a list of detectron2.structures.Boxes from minibatch, each box + is represented by a 5d vector (batch index + 4 coordinates), or a 6d vector + (batch index + 5 coordinates) for RotatedBoxes. + """ + + def __init__(self, tensor): + assert isinstance(tensor, torch.Tensor) + assert tensor.dim() == 2 and tensor.size(-1) in [4, 5, 6], tensor.size() + # TODO: make tensor immutable when dim is Nx5 for Boxes, + # and Nx6 for RotatedBoxes? + self.tensor = tensor + + +# TODO clean up this class, maybe just extend Instances +class InstancesList(object): + """ + Tensor representation of a list of Instances object for a batch of images. + + When dealing with a batch of images with Caffe2 ops, a list of bboxes + (instances) are usually represented by single Tensor with size + (sigma(Ni), 5) or (sigma(Ni), 4) plus a batch split Tensor. This class is + for providing common functions to convert between these two representations. + """ + + def __init__(self, im_info, indices, extra_fields=None): + # [N, 3] -> (H, W, Scale) + self.im_info = im_info + # [N,] -> indice of batch to which the instance belongs + self.indices = indices + # [N, ...] + self.batch_extra_fields = extra_fields or {} + + self.image_size = self.im_info + + def get_fields(self): + """like `get_fields` in the Instances object, + but return each field in tensor representations""" + ret = {} + for k, v in self.batch_extra_fields.items(): + # if isinstance(v, torch.Tensor): + # tensor_rep = v + # elif isinstance(v, (Boxes, Keypoints)): + # tensor_rep = v.tensor + # else: + # raise ValueError("Can't find tensor representation for: {}".format()) + ret[k] = v + return ret + + def has(self, name): + return name in self.batch_extra_fields + + def set(self, name, value): + # len(tensor) is a bad practice that generates ONNX constants during tracing. + # Although not a problem for the `assert` statement below, torch ONNX exporter + # still raises a misleading warning as it does not this call comes from `assert` + if isinstance(value, Boxes): + data_len = value.tensor.shape[0] + elif isinstance(value, torch.Tensor): + data_len = value.shape[0] + else: + data_len = len(value) + if len(self.batch_extra_fields): + assert ( + len(self) == data_len + ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) + self.batch_extra_fields[name] = value + + def __getattr__(self, name): + if name not in self.batch_extra_fields: + raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) + return self.batch_extra_fields[name] + + def __len__(self): + return len(self.indices) + + def flatten(self): + ret = [] + for _, v in self.batch_extra_fields.items(): + if isinstance(v, (Boxes, Keypoints)): + ret.append(v.tensor) + else: + ret.append(v) + return ret + + @staticmethod + def to_d2_instances_list(instances_list): + """ + Convert InstancesList to List[Instances]. The input `instances_list` can + also be a List[Instances], in this case this method is a non-op. + """ + if not isinstance(instances_list, InstancesList): + assert all(isinstance(x, Instances) for x in instances_list) + return instances_list + + ret = [] + for i, info in enumerate(instances_list.im_info): + instances = Instances(torch.Size([int(info[0].item()), int(info[1].item())])) + + ids = instances_list.indices == i + for k, v in instances_list.batch_extra_fields.items(): + if isinstance(v, torch.Tensor): + instances.set(k, v[ids]) + continue + elif isinstance(v, Boxes): + instances.set(k, v[ids, -4:]) + continue + + target_type, tensor_source = v + assert isinstance(tensor_source, torch.Tensor) + assert tensor_source.shape[0] == instances_list.indices.shape[0] + tensor_source = tensor_source[ids] + + if issubclass(target_type, Boxes): + instances.set(k, Boxes(tensor_source[:, -4:])) + elif issubclass(target_type, Keypoints): + instances.set(k, Keypoints(tensor_source)) + elif issubclass(target_type, torch.Tensor): + instances.set(k, tensor_source) + else: + raise ValueError("Can't handle targe type: {}".format(target_type)) + + ret.append(instances) + return ret + + +class Caffe2Compatible(object): + """ + A model can inherit this class to indicate that it can be traced and deployed with caffe2. + """ + + def _get_tensor_mode(self): + return self._tensor_mode + + def _set_tensor_mode(self, v): + self._tensor_mode = v + + tensor_mode = property(_get_tensor_mode, _set_tensor_mode) + """ + If true, the model expects C2-style tensor only inputs/outputs format. + """ + + +class Caffe2RPN(Caffe2Compatible, rpn.RPN): + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + ret = super(Caffe2Compatible, cls).from_config(cfg, input_shape) + assert tuple(cfg.MODEL.RPN.BBOX_REG_WEIGHTS) == (1.0, 1.0, 1.0, 1.0) or tuple( + cfg.MODEL.RPN.BBOX_REG_WEIGHTS + ) == (1.0, 1.0, 1.0, 1.0, 1.0) + return ret + + def _generate_proposals( + self, images, objectness_logits_pred, anchor_deltas_pred, gt_instances=None + ): + assert isinstance(images, ImageList) + if self.tensor_mode: + im_info = images.image_sizes + else: + im_info = torch.tensor([[im_sz[0], im_sz[1], 1.0] for im_sz in images.image_sizes]).to( + images.tensor.device + ) + assert isinstance(im_info, torch.Tensor) + + rpn_rois_list = [] + rpn_roi_probs_list = [] + for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip( + objectness_logits_pred, + anchor_deltas_pred, + [b for (n, b) in self.anchor_generator.cell_anchors.named_buffers()], + self.anchor_generator.strides, + ): + scores = scores.detach() + bbox_deltas = bbox_deltas.detach() + + rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals( + scores, + bbox_deltas, + im_info, + cell_anchors_tensor, + spatial_scale=1.0 / feat_stride, + pre_nms_topN=self.pre_nms_topk[self.training], + post_nms_topN=self.post_nms_topk[self.training], + nms_thresh=self.nms_thresh, + min_size=self.min_box_size, + # correct_transform_coords=True, # deprecated argument + angle_bound_on=True, # Default + angle_bound_lo=-180, + angle_bound_hi=180, + clip_angle_thresh=1.0, # Default + legacy_plus_one=False, + ) + rpn_rois_list.append(rpn_rois) + rpn_roi_probs_list.append(rpn_roi_probs) + + # For FPN in D2, in RPN all proposals from different levels are concated + # together, ranked and picked by top post_nms_topk. Then in ROIPooler + # it calculates level_assignments and calls the RoIAlign from + # the corresponding level. + + if len(objectness_logits_pred) == 1: + rpn_rois = rpn_rois_list[0] + rpn_roi_probs = rpn_roi_probs_list[0] + else: + assert len(rpn_rois_list) == len(rpn_roi_probs_list) + rpn_post_nms_topN = self.post_nms_topk[self.training] + + device = rpn_rois_list[0].device + input_list = [to_device(x, "cpu") for x in (rpn_rois_list + rpn_roi_probs_list)] + + # TODO remove this after confirming rpn_max_level/rpn_min_level + # is not needed in CollectRpnProposals. + feature_strides = list(self.anchor_generator.strides) + rpn_min_level = int(math.log2(feature_strides[0])) + rpn_max_level = int(math.log2(feature_strides[-1])) + assert (rpn_max_level - rpn_min_level + 1) == len( + rpn_rois_list + ), "CollectRpnProposals requires continuous levels" + + rpn_rois = torch.ops._caffe2.CollectRpnProposals( + input_list, + # NOTE: in current implementation, rpn_max_level and rpn_min_level + # are not needed, only the subtraction of two matters and it + # can be infer from the number of inputs. Keep them now for + # consistency. + rpn_max_level=2 + len(rpn_rois_list) - 1, + rpn_min_level=2, + rpn_post_nms_topN=rpn_post_nms_topN, + ) + rpn_rois = to_device(rpn_rois, device) + rpn_roi_probs = [] + + proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode) + return proposals, {} + + def forward(self, images, features, gt_instances=None): + assert not self.training + features = [features[f] for f in self.in_features] + objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features) + return self._generate_proposals( + images, + objectness_logits_pred, + anchor_deltas_pred, + gt_instances, + ) + + @staticmethod + def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode): + proposals = InstancesList( + im_info=im_info, + indices=rpn_rois[:, 0], + extra_fields={ + "proposal_boxes": Caffe2Boxes(rpn_rois), + "objectness_logits": (torch.Tensor, rpn_roi_probs), + }, + ) + if not tensor_mode: + proposals = InstancesList.to_d2_instances_list(proposals) + else: + proposals = [proposals] + return proposals + + +class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler): + @staticmethod + def c2_preprocess(box_lists): + assert all(isinstance(x, Boxes) for x in box_lists) + if all(isinstance(x, Caffe2Boxes) for x in box_lists): + # input is pure-tensor based + assert len(box_lists) == 1 + pooler_fmt_boxes = box_lists[0].tensor + else: + pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists) + return pooler_fmt_boxes + + def forward(self, x, box_lists): + assert not self.training + + pooler_fmt_boxes = self.c2_preprocess(box_lists) + num_level_assignments = len(self.level_poolers) + + if num_level_assignments == 1: + if isinstance(self.level_poolers[0], ROIAlignRotated): + c2_roi_align = torch.ops._caffe2.RoIAlignRotated + aligned = True + else: + c2_roi_align = torch.ops._caffe2.RoIAlign + aligned = self.level_poolers[0].aligned + + x0 = x[0] + if x0.is_quantized: + x0 = x0.dequantize() + + out = c2_roi_align( + x0, + pooler_fmt_boxes, + order="NCHW", + spatial_scale=float(self.level_poolers[0].spatial_scale), + pooled_h=int(self.output_size[0]), + pooled_w=int(self.output_size[1]), + sampling_ratio=int(self.level_poolers[0].sampling_ratio), + aligned=aligned, + ) + return out + + device = pooler_fmt_boxes.device + assert ( + self.max_level - self.min_level + 1 == 4 + ), "Currently DistributeFpnProposals only support 4 levels" + fpn_outputs = torch.ops._caffe2.DistributeFpnProposals( + to_device(pooler_fmt_boxes, "cpu"), + roi_canonical_scale=self.canonical_box_size, + roi_canonical_level=self.canonical_level, + roi_max_level=self.max_level, + roi_min_level=self.min_level, + legacy_plus_one=False, + ) + fpn_outputs = [to_device(x, device) for x in fpn_outputs] + + rois_fpn_list = fpn_outputs[:-1] + rois_idx_restore_int32 = fpn_outputs[-1] + + roi_feat_fpn_list = [] + for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers): + if isinstance(pooler, ROIAlignRotated): + c2_roi_align = torch.ops._caffe2.RoIAlignRotated + aligned = True + else: + c2_roi_align = torch.ops._caffe2.RoIAlign + aligned = bool(pooler.aligned) + + if x_level.is_quantized: + x_level = x_level.dequantize() + + roi_feat_fpn = c2_roi_align( + x_level, + roi_fpn, + order="NCHW", + spatial_scale=float(pooler.spatial_scale), + pooled_h=int(self.output_size[0]), + pooled_w=int(self.output_size[1]), + sampling_ratio=int(pooler.sampling_ratio), + aligned=aligned, + ) + roi_feat_fpn_list.append(roi_feat_fpn) + + roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0) + assert roi_feat_shuffled.numel() > 0 and rois_idx_restore_int32.numel() > 0, ( + "Caffe2 export requires tracing with a model checkpoint + input that can produce valid" + " detections. But no detections were obtained with the given checkpoint and input!" + ) + roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32) + return roi_feat + + +def caffe2_fast_rcnn_outputs_inference(tensor_mode, box_predictor, predictions, proposals): + """equivalent to FastRCNNOutputLayers.inference""" + num_classes = box_predictor.num_classes + score_thresh = box_predictor.test_score_thresh + nms_thresh = box_predictor.test_nms_thresh + topk_per_image = box_predictor.test_topk_per_image + is_rotated = len(box_predictor.box2box_transform.weights) == 5 + + if is_rotated: + box_dim = 5 + assert box_predictor.box2box_transform.weights[4] == 1, ( + "The weights for Rotated BBoxTransform in C2 have only 4 dimensions," + + " thus enforcing the angle weight to be 1 for now" + ) + box2box_transform_weights = box_predictor.box2box_transform.weights[:4] + else: + box_dim = 4 + box2box_transform_weights = box_predictor.box2box_transform.weights + + class_logits, box_regression = predictions + if num_classes + 1 == class_logits.shape[1]: + class_prob = F.softmax(class_logits, -1) + else: + assert num_classes == class_logits.shape[1] + class_prob = F.sigmoid(class_logits) + # BoxWithNMSLimit will infer num_classes from the shape of the class_prob + # So append a zero column as placeholder for the background class + class_prob = torch.cat((class_prob, torch.zeros(class_prob.shape[0], 1)), dim=1) + + assert box_regression.shape[1] % box_dim == 0 + cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1 + + input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1 + + proposal_boxes = proposals[0].proposal_boxes + if isinstance(proposal_boxes, Caffe2Boxes): + rois = Caffe2Boxes.cat([p.proposal_boxes for p in proposals]) + elif isinstance(proposal_boxes, RotatedBoxes): + rois = RotatedBoxes.cat([p.proposal_boxes for p in proposals]) + elif isinstance(proposal_boxes, Boxes): + rois = Boxes.cat([p.proposal_boxes for p in proposals]) + else: + raise NotImplementedError( + 'Expected proposals[0].proposal_boxes to be type "Boxes", ' + f"instead got {type(proposal_boxes)}" + ) + + device, dtype = rois.tensor.device, rois.tensor.dtype + if input_tensor_mode: + im_info = proposals[0].image_size + rois = rois.tensor + else: + im_info = torch.tensor([[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]]) + batch_ids = cat( + [ + torch.full((b, 1), i, dtype=dtype, device=device) + for i, b in enumerate(len(p) for p in proposals) + ], + dim=0, + ) + rois = torch.cat([batch_ids, rois.tensor], dim=1) + + roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform( + to_device(rois, "cpu"), + to_device(box_regression, "cpu"), + to_device(im_info, "cpu"), + weights=box2box_transform_weights, + apply_scale=True, + rotated=is_rotated, + angle_bound_on=True, + angle_bound_lo=-180, + angle_bound_hi=180, + clip_angle_thresh=1.0, + legacy_plus_one=False, + ) + roi_pred_bbox = to_device(roi_pred_bbox, device) + roi_batch_splits = to_device(roi_batch_splits, device) + + nms_outputs = torch.ops._caffe2.BoxWithNMSLimit( + to_device(class_prob, "cpu"), + to_device(roi_pred_bbox, "cpu"), + to_device(roi_batch_splits, "cpu"), + score_thresh=float(score_thresh), + nms=float(nms_thresh), + detections_per_im=int(topk_per_image), + soft_nms_enabled=False, + soft_nms_method="linear", + soft_nms_sigma=0.5, + soft_nms_min_score_thres=0.001, + rotated=is_rotated, + cls_agnostic_bbox_reg=cls_agnostic_bbox_reg, + input_boxes_include_bg_cls=False, + output_classes_include_bg_cls=False, + legacy_plus_one=False, + ) + roi_score_nms = to_device(nms_outputs[0], device) + roi_bbox_nms = to_device(nms_outputs[1], device) + roi_class_nms = to_device(nms_outputs[2], device) + roi_batch_splits_nms = to_device(nms_outputs[3], device) + roi_keeps_nms = to_device(nms_outputs[4], device) + roi_keeps_size_nms = to_device(nms_outputs[5], device) + if not tensor_mode: + roi_class_nms = roi_class_nms.to(torch.int64) + + roi_batch_ids = cat( + [ + torch.full((b, 1), i, dtype=dtype, device=device) + for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms) + ], + dim=0, + ) + + roi_class_nms = alias(roi_class_nms, "class_nms") + roi_score_nms = alias(roi_score_nms, "score_nms") + roi_bbox_nms = alias(roi_bbox_nms, "bbox_nms") + roi_batch_splits_nms = alias(roi_batch_splits_nms, "batch_splits_nms") + roi_keeps_nms = alias(roi_keeps_nms, "keeps_nms") + roi_keeps_size_nms = alias(roi_keeps_size_nms, "keeps_size_nms") + + results = InstancesList( + im_info=im_info, + indices=roi_batch_ids[:, 0], + extra_fields={ + "pred_boxes": Caffe2Boxes(roi_bbox_nms), + "scores": roi_score_nms, + "pred_classes": roi_class_nms, + }, + ) + + if not tensor_mode: + results = InstancesList.to_d2_instances_list(results) + batch_splits = roi_batch_splits_nms.int().tolist() + kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits)) + else: + results = [results] + kept_indices = [roi_keeps_nms] + + return results, kept_indices + + +class Caffe2FastRCNNOutputsInference: + def __init__(self, tensor_mode): + self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode + + def __call__(self, box_predictor, predictions, proposals): + return caffe2_fast_rcnn_outputs_inference( + self.tensor_mode, box_predictor, predictions, proposals + ) + + +def caffe2_mask_rcnn_inference(pred_mask_logits, pred_instances): + """equivalent to mask_head.mask_rcnn_inference""" + if all(isinstance(x, InstancesList) for x in pred_instances): + assert len(pred_instances) == 1 + mask_probs_pred = pred_mask_logits.sigmoid() + mask_probs_pred = alias(mask_probs_pred, "mask_fcn_probs") + pred_instances[0].set("pred_masks", mask_probs_pred) + else: + mask_rcnn_inference(pred_mask_logits, pred_instances) + + +class Caffe2MaskRCNNInference: + def __call__(self, pred_mask_logits, pred_instances): + return caffe2_mask_rcnn_inference(pred_mask_logits, pred_instances) + + +def caffe2_keypoint_rcnn_inference(use_heatmap_max_keypoint, pred_keypoint_logits, pred_instances): + # just return the keypoint heatmap for now, + # there will be option to call HeatmapMaxKeypointOp + output = alias(pred_keypoint_logits, "kps_score") + if all(isinstance(x, InstancesList) for x in pred_instances): + assert len(pred_instances) == 1 + if use_heatmap_max_keypoint: + device = output.device + output = torch.ops._caffe2.HeatmapMaxKeypoint( + to_device(output, "cpu"), + pred_instances[0].pred_boxes.tensor, + should_output_softmax=True, # worth make it configerable? + ) + output = to_device(output, device) + output = alias(output, "keypoints_out") + pred_instances[0].set("pred_keypoints", output) + return pred_keypoint_logits + + +class Caffe2KeypointRCNNInference: + def __init__(self, use_heatmap_max_keypoint): + self.use_heatmap_max_keypoint = use_heatmap_max_keypoint + + def __call__(self, pred_keypoint_logits, pred_instances): + return caffe2_keypoint_rcnn_inference( + self.use_heatmap_max_keypoint, pred_keypoint_logits, pred_instances + ) diff --git a/data_processing/detectron2/detectron2/export/caffe2_export.py b/data_processing/detectron2/detectron2/export/caffe2_export.py new file mode 100644 index 0000000..d609c27 --- /dev/null +++ b/data_processing/detectron2/detectron2/export/caffe2_export.py @@ -0,0 +1,203 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import copy +import io +import logging +import numpy as np +from typing import List +import onnx +import onnx.optimizer +import torch +from caffe2.proto import caffe2_pb2 +from caffe2.python import core +from caffe2.python.onnx.backend import Caffe2Backend +from tabulate import tabulate +from termcolor import colored +from torch.onnx import OperatorExportTypes + +from .shared import ( + ScopedWS, + construct_init_net_from_params, + fuse_alias_placeholder, + fuse_copy_between_cpu_and_gpu, + get_params_from_init_net, + group_norm_replace_aten_with_caffe2, + infer_device_type, + remove_dead_end_ops, + remove_reshape_for_fc, + save_graph, +) + +logger = logging.getLogger(__name__) + + +def export_onnx_model(model, inputs): + """ + Trace and export a model to onnx format. + + Args: + model (nn.Module): + inputs (tuple[args]): the model will be called by `model(*inputs)` + + Returns: + an onnx model + """ + assert isinstance(model, torch.nn.Module) + + # make sure all modules are in eval mode, onnx may change the training state + # of the module if the states are not consistent + def _check_eval(module): + assert not module.training + + model.apply(_check_eval) + + # Export the model to ONNX + with torch.no_grad(): + with io.BytesIO() as f: + torch.onnx.export( + model, + inputs, + f, + operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK, + # verbose=True, # NOTE: uncomment this for debugging + # export_params=True, + ) + onnx_model = onnx.load_from_string(f.getvalue()) + + return onnx_model + + +def _op_stats(net_def): + type_count = {} + for t in [op.type for op in net_def.op]: + type_count[t] = type_count.get(t, 0) + 1 + type_count_list = sorted(type_count.items(), key=lambda kv: kv[0]) # alphabet + type_count_list = sorted(type_count_list, key=lambda kv: -kv[1]) # count + return "\n".join("{:>4}x {}".format(count, name) for name, count in type_count_list) + + +def _assign_device_option( + predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, tensor_inputs: List[torch.Tensor] +): + """ + ONNX exported network doesn't have concept of device, assign necessary + device option for each op in order to make it runable on GPU runtime. + """ + + def _get_device_type(torch_tensor): + assert torch_tensor.device.type in ["cpu", "cuda"] + assert torch_tensor.device.index == 0 + return torch_tensor.device.type + + def _assign_op_device_option(net_proto, net_ssa, blob_device_types): + for op, ssa_i in zip(net_proto.op, net_ssa): + if op.type in ["CopyCPUToGPU", "CopyGPUToCPU"]: + op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0)) + else: + devices = [blob_device_types[b] for b in ssa_i[0] + ssa_i[1]] + assert all(d == devices[0] for d in devices) + if devices[0] == "cuda": + op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0)) + + # update ops in predict_net + predict_net_input_device_types = { + (name, 0): _get_device_type(tensor) + for name, tensor in zip(predict_net.external_input, tensor_inputs) + } + predict_net_device_types = infer_device_type( + predict_net, known_status=predict_net_input_device_types, device_name_style="pytorch" + ) + predict_net_ssa, _ = core.get_ssa(predict_net) + _assign_op_device_option(predict_net, predict_net_ssa, predict_net_device_types) + + # update ops in init_net + init_net_ssa, versions = core.get_ssa(init_net) + init_net_output_device_types = { + (name, versions[name]): predict_net_device_types[(name, 0)] + for name in init_net.external_output + } + init_net_device_types = infer_device_type( + init_net, known_status=init_net_output_device_types, device_name_style="pytorch" + ) + _assign_op_device_option(init_net, init_net_ssa, init_net_device_types) + + +def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]): + """ + Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX. + + Arg: + model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py + tensor_inputs: a list of tensors that caffe2 model takes as input. + """ + model = copy.deepcopy(model) + assert isinstance(model, torch.nn.Module) + assert hasattr(model, "encode_additional_info") + + # Export via ONNX + logger.info( + "Exporting a {} model via ONNX ...".format(type(model).__name__) + + " Some warnings from ONNX are expected and are usually not to worry about." + ) + onnx_model = export_onnx_model(model, (tensor_inputs,)) + # Convert ONNX model to Caffe2 protobuf + init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model) + ops_table = [[op.type, op.input, op.output] for op in predict_net.op] + table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe") + logger.info( + "ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan") + ) + + # Apply protobuf optimization + fuse_alias_placeholder(predict_net, init_net) + if any(t.device.type != "cpu" for t in tensor_inputs): + fuse_copy_between_cpu_and_gpu(predict_net) + remove_dead_end_ops(init_net) + _assign_device_option(predict_net, init_net, tensor_inputs) + params, device_options = get_params_from_init_net(init_net) + predict_net, params = remove_reshape_for_fc(predict_net, params) + init_net = construct_init_net_from_params(params, device_options) + group_norm_replace_aten_with_caffe2(predict_net) + + # Record necessary information for running the pb model in Detectron2 system. + model.encode_additional_info(predict_net, init_net) + + logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net))) + logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net))) + + return predict_net, init_net + + +def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path): + """ + Run the caffe2 model on given inputs, recording the shape and draw the graph. + + predict_net/init_net: caffe2 model. + tensor_inputs: a list of tensors that caffe2 model takes as input. + graph_save_path: path for saving graph of exported model. + """ + + logger.info("Saving graph of ONNX exported model to {} ...".format(graph_save_path)) + save_graph(predict_net, graph_save_path, op_only=False) + + # Run the exported Caffe2 net + logger.info("Running ONNX exported model ...") + with ScopedWS("__ws_tmp__", True) as ws: + ws.RunNetOnce(init_net) + initialized_blobs = set(ws.Blobs()) + uninitialized = [inp for inp in predict_net.external_input if inp not in initialized_blobs] + for name, blob in zip(uninitialized, tensor_inputs): + ws.FeedBlob(name, blob) + + try: + ws.RunNetOnce(predict_net) + except RuntimeError as e: + logger.warning("Encountered RuntimeError: \n{}".format(str(e))) + + ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()} + blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)} + + logger.info("Saving graph with blob shapes to {} ...".format(graph_save_path)) + save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes) + + return ws_blobs diff --git a/data_processing/detectron2/detectron2/export/caffe2_inference.py b/data_processing/detectron2/detectron2/export/caffe2_inference.py new file mode 100644 index 0000000..deb886c --- /dev/null +++ b/data_processing/detectron2/detectron2/export/caffe2_inference.py @@ -0,0 +1,161 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +import numpy as np +from itertools import count +import torch +from caffe2.proto import caffe2_pb2 +from caffe2.python import core + +from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format +from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type + +logger = logging.getLogger(__name__) + + +# ===== ref: mobile-vision predictor's 'Caffe2Wrapper' class ====== +class ProtobufModel(torch.nn.Module): + """ + Wrapper of a caffe2's protobuf model. + It works just like nn.Module, but running caffe2 under the hood. + Input/Output are tuple[tensor] that match the caffe2 net's external_input/output. + """ + + _ids = count(0) + + def __init__(self, predict_net, init_net): + logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...") + super().__init__() + assert isinstance(predict_net, caffe2_pb2.NetDef) + assert isinstance(init_net, caffe2_pb2.NetDef) + # create unique temporary workspace for each instance + self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids)) + self.net = core.Net(predict_net) + + logger.info("Running init_net once to fill the parameters ...") + with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws: + ws.RunNetOnce(init_net) + uninitialized_external_input = [] + for blob in self.net.Proto().external_input: + if blob not in ws.Blobs(): + uninitialized_external_input.append(blob) + ws.CreateBlob(blob) + ws.CreateNet(self.net) + + self._error_msgs = set() + self._input_blobs = uninitialized_external_input + + def _infer_output_devices(self, inputs): + """ + Returns: + list[str]: list of device for each external output + """ + + def _get_device_type(torch_tensor): + assert torch_tensor.device.type in ["cpu", "cuda"] + assert torch_tensor.device.index == 0 + return torch_tensor.device.type + + predict_net = self.net.Proto() + input_device_types = { + (name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs) + } + device_type_map = infer_device_type( + predict_net, known_status=input_device_types, device_name_style="pytorch" + ) + ssa, versions = core.get_ssa(predict_net) + versioned_outputs = [(name, versions[name]) for name in predict_net.external_output] + output_devices = [device_type_map[outp] for outp in versioned_outputs] + return output_devices + + def forward(self, inputs): + """ + Args: + inputs (tuple[torch.Tensor]) + + Returns: + tuple[torch.Tensor] + """ + assert len(inputs) == len(self._input_blobs), ( + f"Length of inputs ({len(inputs)}) " + f"doesn't match the required input blobs: {self._input_blobs}" + ) + + with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws: + for b, tensor in zip(self._input_blobs, inputs): + ws.FeedBlob(b, tensor) + + try: + ws.RunNet(self.net.Proto().name) + except RuntimeError as e: + if not str(e) in self._error_msgs: + self._error_msgs.add(str(e)) + logger.warning("Encountered new RuntimeError: \n{}".format(str(e))) + logger.warning("Catch the error and use partial results.") + + c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output] + # Remove outputs of current run, this is necessary in order to + # prevent fetching the result from previous run if the model fails + # in the middle. + for b in self.net.Proto().external_output: + # Needs to create uninitialized blob to make the net runable. + # This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b), + # but there'no such API. + ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).") + + # Cast output to torch.Tensor on the desired device + output_devices = ( + self._infer_output_devices(inputs) + if any(t.device.type != "cpu" for t in inputs) + else ["cpu" for _ in self.net.Proto().external_output] + ) + + outputs = [] + for name, c2_output, device in zip( + self.net.Proto().external_output, c2_outputs, output_devices + ): + if not isinstance(c2_output, np.ndarray): + raise RuntimeError( + "Invalid output for blob {}, received: {}".format(name, c2_output) + ) + outputs.append(torch.tensor(c2_output).to(device=device)) + return tuple(outputs) + + +class ProtobufDetectionModel(torch.nn.Module): + """ + A class works just like a pytorch meta arch in terms of inference, but running + caffe2 model under the hood. + """ + + def __init__(self, predict_net, init_net, *, convert_outputs=None): + """ + Args: + predict_net, init_net (core.Net): caffe2 nets + convert_outptus (callable): a function that converts caffe2 + outputs to the same format of the original pytorch model. + By default, use the one defined in the caffe2 meta_arch. + """ + super().__init__() + self.protobuf_model = ProtobufModel(predict_net, init_net) + self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0) + self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii") + + if convert_outputs is None: + meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN") + meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")] + self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net) + else: + self._convert_outputs = convert_outputs + + def _convert_inputs(self, batched_inputs): + # currently all models convert inputs in the same way + return convert_batched_inputs_to_c2_format( + batched_inputs, self.size_divisibility, self.device + ) + + def forward(self, batched_inputs): + c2_inputs = self._convert_inputs(batched_inputs) + c2_results = self.protobuf_model(c2_inputs) + c2_results = dict(zip(self.protobuf_model.net.Proto().external_output, c2_results)) + return self._convert_outputs(batched_inputs, c2_inputs, c2_results) diff --git a/data_processing/detectron2/detectron2/export/caffe2_modeling.py b/data_processing/detectron2/detectron2/export/caffe2_modeling.py new file mode 100644 index 0000000..3e675c4 --- /dev/null +++ b/data_processing/detectron2/detectron2/export/caffe2_modeling.py @@ -0,0 +1,420 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import functools +import io +import struct +import types +import torch + +from detectron2.modeling import meta_arch +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.roi_heads import keypoint_head +from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes + +from .c10 import Caffe2Compatible +from .caffe2_patch import ROIHeadsPatcher, patch_generalized_rcnn +from .shared import ( + alias, + check_set_pb_arg, + get_pb_arg_floats, + get_pb_arg_valf, + get_pb_arg_vali, + get_pb_arg_vals, + mock_torch_nn_functional_interpolate, +) + + +def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False): + """ + A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor]) + to detectron2's format (i.e. list of Instances instance). + This only works when the model follows the Caffe2 detectron's naming convention. + + Args: + image_sizes (List[List[int, int]]): [H, W] of every image. + tensor_outputs (Dict[str, Tensor]): external_output to its tensor. + + force_mask_on (Bool): if true, the it make sure there'll be pred_masks even + if the mask is not found from tensor_outputs (usually due to model crash) + """ + + results = [Instances(image_size) for image_size in image_sizes] + + batch_splits = tensor_outputs.get("batch_splits", None) + if batch_splits: + raise NotImplementedError() + assert len(image_sizes) == 1 + result = results[0] + + bbox_nms = tensor_outputs["bbox_nms"] + score_nms = tensor_outputs["score_nms"] + class_nms = tensor_outputs["class_nms"] + # Detection will always success because Conv support 0-batch + assert bbox_nms is not None + assert score_nms is not None + assert class_nms is not None + if bbox_nms.shape[1] == 5: + result.pred_boxes = RotatedBoxes(bbox_nms) + else: + result.pred_boxes = Boxes(bbox_nms) + result.scores = score_nms + result.pred_classes = class_nms.to(torch.int64) + + mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None) + if mask_fcn_probs is not None: + # finish the mask pred + mask_probs_pred = mask_fcn_probs + num_masks = mask_probs_pred.shape[0] + class_pred = result.pred_classes + indices = torch.arange(num_masks, device=class_pred.device) + mask_probs_pred = mask_probs_pred[indices, class_pred][:, None] + result.pred_masks = mask_probs_pred + elif force_mask_on: + # NOTE: there's no way to know the height/width of mask here, it won't be + # used anyway when batch size is 0, so just set them to 0. + result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8) + + keypoints_out = tensor_outputs.get("keypoints_out", None) + kps_score = tensor_outputs.get("kps_score", None) + if keypoints_out is not None: + # keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob) + keypoints_tensor = keypoints_out + # NOTE: it's possible that prob is not calculated if "should_output_softmax" + # is set to False in HeatmapMaxKeypoint, so just using raw score, seems + # it doesn't affect mAP. TODO: check more carefully. + keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]] + result.pred_keypoints = keypoint_xyp + elif kps_score is not None: + # keypoint heatmap to sparse data structure + pred_keypoint_logits = kps_score + keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result]) + + return results + + +def _cast_to_f32(f64): + return struct.unpack("f", struct.pack("f", f64))[0] + + +def set_caffe2_compatible_tensor_mode(model, enable=True): + def _fn(m): + if isinstance(m, Caffe2Compatible): + m.tensor_mode = enable + + model.apply(_fn) + + +def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device): + """ + See get_caffe2_inputs() below. + """ + assert all(isinstance(x, dict) for x in batched_inputs) + assert all(x["image"].dim() == 3 for x in batched_inputs) + + images = [x["image"] for x in batched_inputs] + images = ImageList.from_tensors(images, size_divisibility) + + im_info = [] + for input_per_image, image_size in zip(batched_inputs, images.image_sizes): + target_height = input_per_image.get("height", image_size[0]) + target_width = input_per_image.get("width", image_size[1]) # noqa + # NOTE: The scale inside im_info is kept as convention and for providing + # post-processing information if further processing is needed. For + # current Caffe2 model definitions that don't include post-processing inside + # the model, this number is not used. + # NOTE: There can be a slight difference between width and height + # scales, using a single number can results in numerical difference + # compared with D2's post-processing. + scale = target_height / image_size[0] + im_info.append([image_size[0], image_size[1], scale]) + im_info = torch.Tensor(im_info) + + return images.tensor.to(device), im_info.to(device) + + +class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module): + """ + Base class for caffe2-compatible implementation of a meta architecture. + The forward is traceable and its traced graph can be converted to caffe2 + graph through ONNX. + """ + + def __init__(self, cfg, torch_model, enable_tensor_mode=True): + """ + Args: + cfg (CfgNode): + torch_model (nn.Module): the detectron2 model (meta_arch) to be + converted. + """ + super().__init__() + self._wrapped_model = torch_model + self.eval() + set_caffe2_compatible_tensor_mode(self, enable_tensor_mode) + + def get_caffe2_inputs(self, batched_inputs): + """ + Convert pytorch-style structured inputs to caffe2-style inputs that + are tuples of tensors. + + Args: + batched_inputs (list[dict]): inputs to a detectron2 model + in its standard format. Each dict has "image" (CHW tensor), and optionally + "height" and "width". + + Returns: + tuple[Tensor]: + tuple of tensors that will be the inputs to the + :meth:`forward` method. For existing models, the first + is an NCHW tensor (padded and batched); the second is + a im_info Nx3 tensor, where the rows are + (height, width, unused legacy parameter) + """ + return convert_batched_inputs_to_c2_format( + batched_inputs, + self._wrapped_model.backbone.size_divisibility, + self._wrapped_model.device, + ) + + def encode_additional_info(self, predict_net, init_net): + """ + Save extra metadata that will be used by inference in the output protobuf. + """ + pass + + def forward(self, inputs): + """ + Run the forward in caffe2-style. It has to use caffe2-compatible ops + and the method will be used for tracing. + + Args: + inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`. + They will be the inputs of the converted caffe2 graph. + + Returns: + tuple[Tensor]: output tensors. They will be the outputs of the + converted caffe2 graph. + """ + raise NotImplementedError + + def _caffe2_preprocess_image(self, inputs): + """ + Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward. + It normalizes the input images, and the final caffe2 graph assumes the + inputs have been batched already. + """ + data, im_info = inputs + data = alias(data, "data") + im_info = alias(im_info, "im_info") + mean, std = self._wrapped_model.pixel_mean, self._wrapped_model.pixel_std + normalized_data = (data - mean) / std + normalized_data = alias(normalized_data, "normalized_data") + + # Pack (data, im_info) into ImageList which is recognized by self.inference. + images = ImageList(tensor=normalized_data, image_sizes=im_info) + return images + + @staticmethod + def get_outputs_converter(predict_net, init_net): + """ + Creates a function that converts outputs of the caffe2 model to + detectron2's standard format. + The function uses information in `predict_net` and `init_net` that are + available at inferene time. Therefore the function logic can be used in inference. + + The returned function has the following signature: + + def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs + + Where + + * batched_inputs (list[dict]): the original input format of the meta arch + * c2_inputs (tuple[Tensor]): the caffe2 inputs. + * c2_results (dict[str, Tensor]): the caffe2 output format, + corresponding to the outputs of the :meth:`forward` function. + * detectron2_outputs: the original output format of the meta arch. + + This function can be used to compare the outputs of the original meta arch and + the converted caffe2 graph. + + Returns: + callable: a callable of the above signature. + """ + raise NotImplementedError + + +class Caffe2GeneralizedRCNN(Caffe2MetaArch): + def __init__(self, cfg, torch_model, enable_tensor_mode=True): + assert isinstance(torch_model, meta_arch.GeneralizedRCNN) + torch_model = patch_generalized_rcnn(torch_model) + super().__init__(cfg, torch_model, enable_tensor_mode) + + try: + use_heatmap_max_keypoint = cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT + except AttributeError: + use_heatmap_max_keypoint = False + self.roi_heads_patcher = ROIHeadsPatcher( + self._wrapped_model.roi_heads, use_heatmap_max_keypoint + ) + if self.tensor_mode: + self.roi_heads_patcher.patch_roi_heads() + + def encode_additional_info(self, predict_net, init_net): + size_divisibility = self._wrapped_model.backbone.size_divisibility + check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) + check_set_pb_arg( + predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") + ) + check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN") + + @mock_torch_nn_functional_interpolate() + def forward(self, inputs): + if not self.tensor_mode: + return self._wrapped_model.inference(inputs) + images = self._caffe2_preprocess_image(inputs) + features = self._wrapped_model.backbone(images.tensor) + proposals, _ = self._wrapped_model.proposal_generator(images, features) + detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals) + return tuple(detector_results[0].flatten()) + + @staticmethod + def get_outputs_converter(predict_net, init_net): + def f(batched_inputs, c2_inputs, c2_results): + _, im_info = c2_inputs + image_sizes = [[int(im[0]), int(im[1])] for im in im_info] + results = assemble_rcnn_outputs_by_name(image_sizes, c2_results) + return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes) + + return f + + +class Caffe2RetinaNet(Caffe2MetaArch): + def __init__(self, cfg, torch_model): + assert isinstance(torch_model, meta_arch.RetinaNet) + super().__init__(cfg, torch_model) + + @mock_torch_nn_functional_interpolate() + def forward(self, inputs): + assert self.tensor_mode + images = self._caffe2_preprocess_image(inputs) + + # explicitly return the images sizes to avoid removing "im_info" by ONNX + # since it's not used in the forward path + return_tensors = [images.image_sizes] + + features = self._wrapped_model.backbone(images.tensor) + features = [features[f] for f in self._wrapped_model.head_in_features] + for i, feature_i in enumerate(features): + features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True) + return_tensors.append(features[i]) + + pred_logits, pred_anchor_deltas = self._wrapped_model.head(features) + for i, (box_cls_i, box_delta_i) in enumerate(zip(pred_logits, pred_anchor_deltas)): + return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i))) + return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i))) + + return tuple(return_tensors) + + def encode_additional_info(self, predict_net, init_net): + size_divisibility = self._wrapped_model.backbone.size_divisibility + check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) + check_set_pb_arg( + predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") + ) + check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet") + + # Inference parameters: + check_set_pb_arg( + predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.test_score_thresh) + ) + check_set_pb_arg( + predict_net, "topk_candidates", "i", self._wrapped_model.test_topk_candidates + ) + check_set_pb_arg( + predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.test_nms_thresh) + ) + check_set_pb_arg( + predict_net, + "max_detections_per_image", + "i", + self._wrapped_model.max_detections_per_image, + ) + + check_set_pb_arg( + predict_net, + "bbox_reg_weights", + "floats", + [_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights], + ) + self._encode_anchor_generator_cfg(predict_net) + + def _encode_anchor_generator_cfg(self, predict_net): + # serialize anchor_generator for future use + serialized_anchor_generator = io.BytesIO() + torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator) + # Ideally we can put anchor generating inside the model, then we don't + # need to store this information. + bytes = serialized_anchor_generator.getvalue() + check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes) + + @staticmethod + def get_outputs_converter(predict_net, init_net): + self = types.SimpleNamespace() + serialized_anchor_generator = io.BytesIO( + get_pb_arg_vals(predict_net, "serialized_anchor_generator", None) + ) + self.anchor_generator = torch.load(serialized_anchor_generator) + bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None) + self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights)) + self.test_score_thresh = get_pb_arg_valf(predict_net, "score_threshold", None) + self.test_topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None) + self.test_nms_thresh = get_pb_arg_valf(predict_net, "nms_threshold", None) + self.max_detections_per_image = get_pb_arg_vali( + predict_net, "max_detections_per_image", None + ) + + # hack to reuse inference code from RetinaNet + for meth in [ + "forward_inference", + "inference_single_image", + "_transpose_dense_predictions", + "_decode_multi_level_predictions", + "_decode_per_level_predictions", + ]: + setattr(self, meth, functools.partial(getattr(meta_arch.RetinaNet, meth), self)) + + def f(batched_inputs, c2_inputs, c2_results): + _, im_info = c2_inputs + image_sizes = [[int(im[0]), int(im[1])] for im in im_info] + dummy_images = ImageList( + torch.randn( + ( + len(im_info), + 3, + ) + + tuple(image_sizes[0]) + ), + image_sizes, + ) + + num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")]) + pred_logits = [c2_results["box_cls_{}".format(i)] for i in range(num_features)] + pred_anchor_deltas = [c2_results["box_delta_{}".format(i)] for i in range(num_features)] + + # For each feature level, feature should have the same batch size and + # spatial dimension as the box_cls and box_delta. + dummy_features = [x.clone()[:, 0:0, :, :] for x in pred_logits] + # self.num_classess can be inferred + self.num_classes = pred_logits[0].shape[1] // (pred_anchor_deltas[0].shape[1] // 4) + + results = self.forward_inference( + dummy_images, dummy_features, [pred_logits, pred_anchor_deltas] + ) + return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes) + + return f + + +META_ARCH_CAFFE2_EXPORT_TYPE_MAP = { + "GeneralizedRCNN": Caffe2GeneralizedRCNN, + "RetinaNet": Caffe2RetinaNet, +} diff --git a/data_processing/detectron2/detectron2/export/caffe2_patch.py b/data_processing/detectron2/detectron2/export/caffe2_patch.py new file mode 100644 index 0000000..2da70ae --- /dev/null +++ b/data_processing/detectron2/detectron2/export/caffe2_patch.py @@ -0,0 +1,189 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import contextlib +from unittest import mock +import torch + +from detectron2.modeling import poolers +from detectron2.modeling.proposal_generator import rpn +from detectron2.modeling.roi_heads import keypoint_head, mask_head +from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers + +from .c10 import ( + Caffe2Compatible, + Caffe2FastRCNNOutputsInference, + Caffe2KeypointRCNNInference, + Caffe2MaskRCNNInference, + Caffe2ROIPooler, + Caffe2RPN, + caffe2_fast_rcnn_outputs_inference, + caffe2_keypoint_rcnn_inference, + caffe2_mask_rcnn_inference, +) + + +class GenericMixin(object): + pass + + +class Caffe2CompatibleConverter(object): + """ + A GenericUpdater which implements the `create_from` interface, by modifying + module object and assign it with another class replaceCls. + """ + + def __init__(self, replaceCls): + self.replaceCls = replaceCls + + def create_from(self, module): + # update module's class to the new class + assert isinstance(module, torch.nn.Module) + if issubclass(self.replaceCls, GenericMixin): + # replaceCls should act as mixin, create a new class on-the-fly + new_class = type( + "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__), + (self.replaceCls, module.__class__), + {}, # {"new_method": lambda self: ...}, + ) + module.__class__ = new_class + else: + # replaceCls is complete class, this allow arbitrary class swap + module.__class__ = self.replaceCls + + # initialize Caffe2Compatible + if isinstance(module, Caffe2Compatible): + module.tensor_mode = False + + return module + + +def patch(model, target, updater, *args, **kwargs): + """ + recursively (post-order) update all modules with the target type and its + subclasses, make a initialization/composition/inheritance/... via the + updater.create_from. + """ + for name, module in model.named_children(): + model._modules[name] = patch(module, target, updater, *args, **kwargs) + if isinstance(model, target): + return updater.create_from(model, *args, **kwargs) + return model + + +def patch_generalized_rcnn(model): + ccc = Caffe2CompatibleConverter + model = patch(model, rpn.RPN, ccc(Caffe2RPN)) + model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler)) + + return model + + +@contextlib.contextmanager +def mock_fastrcnn_outputs_inference( + tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers +): + with mock.patch.object( + box_predictor_type, + "inference", + autospec=True, + side_effect=Caffe2FastRCNNOutputsInference(tensor_mode), + ) as mocked_func: + yield + if check: + assert mocked_func.call_count > 0 + + +@contextlib.contextmanager +def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True): + with mock.patch( + "{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference() + ) as mocked_func: + yield + if check: + assert mocked_func.call_count > 0 + + +@contextlib.contextmanager +def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True): + with mock.patch( + "{}.keypoint_rcnn_inference".format(patched_module), + side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint), + ) as mocked_func: + yield + if check: + assert mocked_func.call_count > 0 + + +class ROIHeadsPatcher: + def __init__(self, heads, use_heatmap_max_keypoint): + self.heads = heads + self.use_heatmap_max_keypoint = use_heatmap_max_keypoint + self.previous_patched = {} + + @contextlib.contextmanager + def mock_roi_heads(self, tensor_mode=True): + """ + Patching several inference functions inside ROIHeads and its subclasses + + Args: + tensor_mode (bool): whether the inputs/outputs are caffe2's tensor + format or not. Default to True. + """ + # NOTE: this requries the `keypoint_rcnn_inference` and `mask_rcnn_inference` + # are called inside the same file as BaseXxxHead due to using mock.patch. + kpt_heads_mod = keypoint_head.BaseKeypointRCNNHead.__module__ + mask_head_mod = mask_head.BaseMaskRCNNHead.__module__ + + mock_ctx_managers = [ + mock_fastrcnn_outputs_inference( + tensor_mode=tensor_mode, + check=True, + box_predictor_type=type(self.heads.box_predictor), + ) + ] + if getattr(self.heads, "keypoint_on", False): + mock_ctx_managers += [ + mock_keypoint_rcnn_inference( + tensor_mode, kpt_heads_mod, self.use_heatmap_max_keypoint + ) + ] + if getattr(self.heads, "mask_on", False): + mock_ctx_managers += [mock_mask_rcnn_inference(tensor_mode, mask_head_mod)] + + with contextlib.ExitStack() as stack: # python 3.3+ + for mgr in mock_ctx_managers: + stack.enter_context(mgr) + yield + + def patch_roi_heads(self, tensor_mode=True): + self.previous_patched["box_predictor"] = self.heads.box_predictor.inference + self.previous_patched["keypoint_rcnn"] = keypoint_head.keypoint_rcnn_inference + self.previous_patched["mask_rcnn"] = mask_head.mask_rcnn_inference + + def patched_fastrcnn_outputs_inference(predictions, proposal): + return caffe2_fast_rcnn_outputs_inference( + True, self.heads.box_predictor, predictions, proposal + ) + + self.heads.box_predictor.inference = patched_fastrcnn_outputs_inference + + if getattr(self.heads, "keypoint_on", False): + + def patched_keypoint_rcnn_inference(pred_keypoint_logits, pred_instances): + return caffe2_keypoint_rcnn_inference( + self.use_heatmap_max_keypoint, pred_keypoint_logits, pred_instances + ) + + keypoint_head.keypoint_rcnn_inference = patched_keypoint_rcnn_inference + + if getattr(self.heads, "mask_on", False): + + def patched_mask_rcnn_inference(pred_mask_logits, pred_instances): + return caffe2_mask_rcnn_inference(pred_mask_logits, pred_instances) + + mask_head.mask_rcnn_inference = patched_mask_rcnn_inference + + def unpatch_roi_heads(self): + self.heads.box_predictor.inference = self.previous_patched["box_predictor"] + keypoint_head.keypoint_rcnn_inference = self.previous_patched["keypoint_rcnn"] + mask_head.mask_rcnn_inference = self.previous_patched["mask_rcnn"] diff --git a/data_processing/detectron2/detectron2/export/flatten.py b/data_processing/detectron2/detectron2/export/flatten.py new file mode 100644 index 0000000..f5ba429 --- /dev/null +++ b/data_processing/detectron2/detectron2/export/flatten.py @@ -0,0 +1,330 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import collections +from dataclasses import dataclass +from typing import Callable, List, Optional, Tuple +import torch +from torch import nn + +from detectron2.structures import Boxes, Instances, ROIMasks +from detectron2.utils.registry import _convert_target_to_string, locate + +from .torchscript_patch import patch_builtin_len + + +@dataclass +class Schema: + """ + A Schema defines how to flatten a possibly hierarchical object into tuple of + primitive objects, so it can be used as inputs/outputs of PyTorch's tracing. + + PyTorch does not support tracing a function that produces rich output + structures (e.g. dict, Instances, Boxes). To trace such a function, we + flatten the rich object into tuple of tensors, and return this tuple of tensors + instead. Meanwhile, we also need to know how to "rebuild" the original object + from the flattened results, so we can evaluate the flattened results. + A Schema defines how to flatten an object, and while flattening it, it records + necessary schemas so that the object can be rebuilt using the flattened outputs. + + The flattened object and the schema object is returned by ``.flatten`` classmethod. + Then the original object can be rebuilt with the ``__call__`` method of schema. + + A Schema is a dataclass that can be serialized easily. + """ + + # inspired by FetchMapper in tensorflow/python/client/session.py + + @classmethod + def flatten(cls, obj): + raise NotImplementedError + + def __call__(self, values): + raise NotImplementedError + + @staticmethod + def _concat(values): + ret = () + sizes = [] + for v in values: + assert isinstance(v, tuple), "Flattened results must be a tuple" + ret = ret + v + sizes.append(len(v)) + return ret, sizes + + @staticmethod + def _split(values, sizes): + if len(sizes): + expected_len = sum(sizes) + assert ( + len(values) == expected_len + ), f"Values has length {len(values)} but expect length {expected_len}." + ret = [] + for k in range(len(sizes)): + begin, end = sum(sizes[:k]), sum(sizes[: k + 1]) + ret.append(values[begin:end]) + return ret + + +@dataclass +class ListSchema(Schema): + schemas: List[Schema] # the schemas that define how to flatten each element in the list + sizes: List[int] # the flattened length of each element + + def __call__(self, values): + values = self._split(values, self.sizes) + if len(values) != len(self.schemas): + raise ValueError( + f"Values has length {len(values)} but schemas " f"has length {len(self.schemas)}!" + ) + values = [m(v) for m, v in zip(self.schemas, values)] + return list(values) + + @classmethod + def flatten(cls, obj): + res = [flatten_to_tuple(k) for k in obj] + values, sizes = cls._concat([k[0] for k in res]) + return values, cls([k[1] for k in res], sizes) + + +@dataclass +class TupleSchema(ListSchema): + def __call__(self, values): + return tuple(super().__call__(values)) + + +@dataclass +class IdentitySchema(Schema): + def __call__(self, values): + return values[0] + + @classmethod + def flatten(cls, obj): + return (obj,), cls() + + +@dataclass +class DictSchema(ListSchema): + keys: List[str] + + def __call__(self, values): + values = super().__call__(values) + return dict(zip(self.keys, values)) + + @classmethod + def flatten(cls, obj): + for k in obj.keys(): + if not isinstance(k, str): + raise KeyError("Only support flattening dictionaries if keys are str.") + keys = sorted(obj.keys()) + values = [obj[k] for k in keys] + ret, schema = ListSchema.flatten(values) + return ret, cls(schema.schemas, schema.sizes, keys) + + +@dataclass +class InstancesSchema(DictSchema): + def __call__(self, values): + image_size, fields = values[-1], values[:-1] + fields = super().__call__(fields) + return Instances(image_size, **fields) + + @classmethod + def flatten(cls, obj): + ret, schema = super().flatten(obj.get_fields()) + size = obj.image_size + if not isinstance(size, torch.Tensor): + size = torch.tensor(size) + return ret + (size,), schema + + +@dataclass +class TensorWrapSchema(Schema): + """ + For classes that are simple wrapper of tensors, e.g. + Boxes, RotatedBoxes, BitMasks + """ + + class_name: str + + def __call__(self, values): + return locate(self.class_name)(values[0]) + + @classmethod + def flatten(cls, obj): + return (obj.tensor,), cls(_convert_target_to_string(type(obj))) + + +# if more custom structures needed in the future, can allow +# passing in extra schemas for custom types +def flatten_to_tuple(obj): + """ + Flatten an object so it can be used for PyTorch tracing. + Also returns how to rebuild the original object from the flattened outputs. + + Returns: + res (tuple): the flattened results that can be used as tracing outputs + schema: an object with a ``__call__`` method such that ``schema(res) == obj``. + It is a pure dataclass that can be serialized. + """ + schemas = [ + ((str, bytes), IdentitySchema), + (list, ListSchema), + (tuple, TupleSchema), + (collections.abc.Mapping, DictSchema), + (Instances, InstancesSchema), + ((Boxes, ROIMasks), TensorWrapSchema), + ] + for klass, schema in schemas: + if isinstance(obj, klass): + F = schema + break + else: + F = IdentitySchema + + return F.flatten(obj) + + +class TracingAdapter(nn.Module): + """ + A model may take rich input/output format (e.g. dict or custom classes), + but `torch.jit.trace` requires tuple of tensors as input/output. + This adapter flattens input/output format of a model so it becomes traceable. + + It also records the necessary schema to rebuild model's inputs/outputs from flattened + inputs/outputs. + + Example: + :: + outputs = model(inputs) # inputs/outputs may be rich structure + adapter = TracingAdapter(model, inputs) + + # can now trace the model, with adapter.flattened_inputs, or another + # tuple of tensors with the same length and meaning + traced = torch.jit.trace(adapter, adapter.flattened_inputs) + + # traced model can only produce flattened outputs (tuple of tensors) + flattened_outputs = traced(*adapter.flattened_inputs) + # adapter knows the schema to convert it back (new_outputs == outputs) + new_outputs = adapter.outputs_schema(flattened_outputs) + """ + + flattened_inputs: Tuple[torch.Tensor] = None + """ + Flattened version of inputs given to this class's constructor. + """ + + inputs_schema: Schema = None + """ + Schema of the inputs given to this class's constructor. + """ + + outputs_schema: Schema = None + """ + Schema of the output produced by calling the given model with inputs. + """ + + def __init__( + self, + model: nn.Module, + inputs, + inference_func: Optional[Callable] = None, + allow_non_tensor: bool = False, + ): + """ + Args: + model: an nn.Module + inputs: An input argument or a tuple of input arguments used to call model. + After flattening, it has to only consist of tensors. + inference_func: a callable that takes (model, *inputs), calls the + model with inputs, and return outputs. By default it + is ``lambda model, *inputs: model(*inputs)``. Can be override + if you need to call the model differently. + allow_non_tensor: allow inputs/outputs to contain non-tensor objects. + This option will filter out non-tensor objects to make the + model traceable, but ``inputs_schema``/``outputs_schema`` cannot be + used anymore because inputs/outputs cannot be rebuilt from pure tensors. + This is useful when you're only interested in the single trace of + execution (e.g. for flop count), but not interested in + generalizing the traced graph to new inputs. + """ + super().__init__() + if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)): + model = model.module + self.model = model + if not isinstance(inputs, tuple): + inputs = (inputs,) + self.inputs = inputs + self.allow_non_tensor = allow_non_tensor + + if inference_func is None: + inference_func = lambda model, *inputs: model(*inputs) # noqa + self.inference_func = inference_func + + self.flattened_inputs, self.inputs_schema = flatten_to_tuple(inputs) + + if all(isinstance(x, torch.Tensor) for x in self.flattened_inputs): + return + if self.allow_non_tensor: + self.flattened_inputs = tuple( + [x for x in self.flattened_inputs if isinstance(x, torch.Tensor)] + ) + self.inputs_schema = None + else: + for input in self.flattened_inputs: + if not isinstance(input, torch.Tensor): + raise ValueError( + "Inputs for tracing must only contain tensors. " + f"Got a {type(input)} instead." + ) + + def forward(self, *args: torch.Tensor): + with torch.no_grad(), patch_builtin_len(): + if self.inputs_schema is not None: + inputs_orig_format = self.inputs_schema(args) + else: + if len(args) != len(self.flattened_inputs) or any( + x is not y for x, y in zip(args, self.flattened_inputs) + ): + raise ValueError( + "TracingAdapter does not contain valid inputs_schema." + " So it cannot generalize to other inputs and must be" + " traced with `.flattened_inputs`." + ) + inputs_orig_format = self.inputs + + outputs = self.inference_func(self.model, *inputs_orig_format) + flattened_outputs, schema = flatten_to_tuple(outputs) + + flattened_output_tensors = tuple( + [x for x in flattened_outputs if isinstance(x, torch.Tensor)] + ) + if len(flattened_output_tensors) < len(flattened_outputs): + if self.allow_non_tensor: + flattened_outputs = flattened_output_tensors + self.outputs_schema = None + else: + raise ValueError( + "Model cannot be traced because some model outputs " + "cannot flatten to tensors." + ) + else: # schema is valid + if self.outputs_schema is None: + self.outputs_schema = schema + else: + assert self.outputs_schema == schema, ( + "Model should always return outputs with the same " + "structure so it can be traced!" + ) + return flattened_outputs + + def _create_wrapper(self, traced_model): + """ + Return a function that has an input/output interface the same as the + original model, but it calls the given traced model under the hood. + """ + + def forward(*args): + flattened_inputs, _ = flatten_to_tuple(args) + flattened_outputs = traced_model(*flattened_inputs) + return self.outputs_schema(flattened_outputs) + + return forward diff --git a/data_processing/detectron2/detectron2/export/shared.py b/data_processing/detectron2/detectron2/export/shared.py new file mode 100644 index 0000000..53ba933 --- /dev/null +++ b/data_processing/detectron2/detectron2/export/shared.py @@ -0,0 +1,1039 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import collections +import copy +import functools +import logging +import numpy as np +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from unittest import mock +import caffe2.python.utils as putils +import torch +import torch.nn.functional as F +from caffe2.proto import caffe2_pb2 +from caffe2.python import core, net_drawer, workspace +from torch.nn.functional import interpolate as interp + +logger = logging.getLogger(__name__) + + +# ==== torch/utils_toffee/cast.py ======================================= + + +def to_device(t, device_str): + """ + This function is a replacement of .to(another_device) such that it allows the + casting to be traced properly by explicitly calling the underlying copy ops. + It also avoids introducing unncessary op when casting to the same device. + """ + src = t.device + dst = torch.device(device_str) + + if src == dst: + return t + elif src.type == "cuda" and dst.type == "cpu": + return torch.ops._caffe2.CopyGPUToCPU(t) + elif src.type == "cpu" and dst.type == "cuda": + return torch.ops._caffe2.CopyCPUToGPU(t) + else: + raise RuntimeError("Can't cast tensor from device {} to device {}".format(src, dst)) + + +# ==== torch/utils_toffee/interpolate.py ======================================= + + +# Note: borrowed from vision/detection/fair/detectron/detectron/modeling/detector.py +def BilinearInterpolation(tensor_in, up_scale): + assert up_scale % 2 == 0, "Scale should be even" + + def upsample_filt(size): + factor = (size + 1) // 2 + if size % 2 == 1: + center = factor - 1 + else: + center = factor - 0.5 + + og = np.ogrid[:size, :size] + return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) + + kernel_size = int(up_scale) * 2 + bil_filt = upsample_filt(kernel_size) + + dim = int(tensor_in.shape[1]) + kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32) + kernel[range(dim), range(dim), :, :] = bil_filt + + tensor_out = F.conv_transpose2d( + tensor_in, + weight=to_device(torch.Tensor(kernel), tensor_in.device), + bias=None, + stride=int(up_scale), + padding=int(up_scale / 2), + ) + + return tensor_out + + +# NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if +# using dynamic `scale_factor` rather than static `size`. (T43166860) +# NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly. +def onnx_compatibale_interpolate( + input, size=None, scale_factor=None, mode="nearest", align_corners=None +): + # NOTE: The input dimensions are interpreted in the form: + # `mini-batch x channels x [optional depth] x [optional height] x width`. + if size is None and scale_factor is not None: + if input.dim() == 4: + if isinstance(scale_factor, (int, float)): + height_scale, width_scale = (scale_factor, scale_factor) + else: + assert isinstance(scale_factor, (tuple, list)) + assert len(scale_factor) == 2 + height_scale, width_scale = scale_factor + + assert not align_corners, "No matching C2 op for align_corners == True" + if mode == "nearest": + return torch.ops._caffe2.ResizeNearest( + input, order="NCHW", width_scale=width_scale, height_scale=height_scale + ) + elif mode == "bilinear": + logger.warning( + "Use F.conv_transpose2d for bilinear interpolate" + " because there's no such C2 op, this may cause significant" + " slowdown and the boundary pixels won't be as same as" + " using F.interpolate due to padding." + ) + assert height_scale == width_scale + return BilinearInterpolation(input, up_scale=height_scale) + logger.warning("Output size is not static, it might cause ONNX conversion issue") + + return interp(input, size, scale_factor, mode, align_corners) + + +def mock_torch_nn_functional_interpolate(): + def decorator(func): + @functools.wraps(func) + def _mock_torch_nn_functional_interpolate(*args, **kwargs): + if torch.onnx.is_in_onnx_export(): + with mock.patch( + "torch.nn.functional.interpolate", side_effect=onnx_compatibale_interpolate + ): + return func(*args, **kwargs) + else: + return func(*args, **kwargs) + + return _mock_torch_nn_functional_interpolate + + return decorator + + +# ==== torch/utils_caffe2/ws_utils.py ========================================== + + +class ScopedWS(object): + def __init__(self, ws_name, is_reset, is_cleanup=False): + self.ws_name = ws_name + self.is_reset = is_reset + self.is_cleanup = is_cleanup + self.org_ws = "" + + def __enter__(self): + self.org_ws = workspace.CurrentWorkspace() + if self.ws_name is not None: + workspace.SwitchWorkspace(self.ws_name, True) + if self.is_reset: + workspace.ResetWorkspace() + + return workspace + + def __exit__(self, *args): + if self.is_cleanup: + workspace.ResetWorkspace() + if self.ws_name is not None: + workspace.SwitchWorkspace(self.org_ws) + + +def fetch_any_blob(name): + bb = None + try: + bb = workspace.FetchBlob(name) + except TypeError: + bb = workspace.FetchInt8Blob(name) + except Exception as e: + logger.error("Get blob {} error: {}".format(name, e)) + + return bb + + +# ==== torch/utils_caffe2/protobuf.py ========================================== + + +def get_pb_arg(pb, arg_name): + for x in pb.arg: + if x.name == arg_name: + return x + return None + + +def get_pb_arg_valf(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return arg.f if arg is not None else default_val + + +def get_pb_arg_floats(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return list(map(float, arg.floats)) if arg is not None else default_val + + +def get_pb_arg_ints(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return list(map(int, arg.ints)) if arg is not None else default_val + + +def get_pb_arg_vali(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return arg.i if arg is not None else default_val + + +def get_pb_arg_vals(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return arg.s if arg is not None else default_val + + +def get_pb_arg_valstrings(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return list(arg.strings) if arg is not None else default_val + + +def check_set_pb_arg(pb, arg_name, arg_attr, arg_value, allow_override=False): + arg = get_pb_arg(pb, arg_name) + if arg is None: + arg = putils.MakeArgument(arg_name, arg_value) + assert hasattr(arg, arg_attr) + pb.arg.extend([arg]) + if allow_override and getattr(arg, arg_attr) != arg_value: + logger.warning( + "Override argument {}: {} -> {}".format(arg_name, getattr(arg, arg_attr), arg_value) + ) + setattr(arg, arg_attr, arg_value) + else: + assert arg is not None + assert getattr(arg, arg_attr) == arg_value, "Existing value {}, new value {}".format( + getattr(arg, arg_attr), arg_value + ) + + +def _create_const_fill_op_from_numpy(name, tensor, device_option=None): + assert type(tensor) == np.ndarray + kTypeNameMapper = { + np.dtype("float32"): "GivenTensorFill", + np.dtype("int32"): "GivenTensorIntFill", + np.dtype("int64"): "GivenTensorInt64Fill", + np.dtype("uint8"): "GivenTensorStringFill", + } + + args_dict = {} + if tensor.dtype == np.dtype("uint8"): + args_dict.update({"values": [str(tensor.data)], "shape": [1]}) + else: + args_dict.update({"values": tensor, "shape": tensor.shape}) + + if device_option is not None: + args_dict["device_option"] = device_option + + return core.CreateOperator(kTypeNameMapper[tensor.dtype], [], [name], **args_dict) + + +def _create_const_fill_op_from_c2_int8_tensor(name, int8_tensor): + assert type(int8_tensor) == workspace.Int8Tensor + kTypeNameMapper = { + np.dtype("int32"): "Int8GivenIntTensorFill", + np.dtype("uint8"): "Int8GivenTensorFill", + } + + tensor = int8_tensor.data + assert tensor.dtype in [np.dtype("uint8"), np.dtype("int32")] + values = tensor.tobytes() if tensor.dtype == np.dtype("uint8") else tensor + + return core.CreateOperator( + kTypeNameMapper[tensor.dtype], + [], + [name], + values=values, + shape=tensor.shape, + Y_scale=int8_tensor.scale, + Y_zero_point=int8_tensor.zero_point, + ) + + +def create_const_fill_op( + name: str, + blob: Union[np.ndarray, workspace.Int8Tensor], + device_option: Optional[caffe2_pb2.DeviceOption] = None, +) -> caffe2_pb2.OperatorDef: + """ + Given a blob object, return the Caffe2 operator that creates this blob + as constant. Currently support NumPy tensor and Caffe2 Int8Tensor. + """ + + tensor_type = type(blob) + assert tensor_type in [ + np.ndarray, + workspace.Int8Tensor, + ], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format( + name, type(blob) + ) + + if tensor_type == np.ndarray: + return _create_const_fill_op_from_numpy(name, blob, device_option) + elif tensor_type == workspace.Int8Tensor: + assert device_option is None + return _create_const_fill_op_from_c2_int8_tensor(name, blob) + + +def construct_init_net_from_params( + params: Dict[str, Any], device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None +) -> caffe2_pb2.NetDef: + """ + Construct the init_net from params dictionary + """ + init_net = caffe2_pb2.NetDef() + device_options = device_options or {} + for name, blob in params.items(): + if isinstance(blob, str): + logger.warning( + ( + "Blob {} with type {} is not supported in generating init net," + " skipped.".format(name, type(blob)) + ) + ) + continue + init_net.op.extend( + [create_const_fill_op(name, blob, device_option=device_options.get(name, None))] + ) + init_net.external_output.append(name) + return init_net + + +def get_producer_map(ssa): + """ + Return dict from versioned blob to (i, j), + where i is index of producer op, j is the index of output of that op. + """ + producer_map = {} + for i in range(len(ssa)): + outputs = ssa[i][1] + for j, outp in enumerate(outputs): + producer_map[outp] = (i, j) + return producer_map + + +def get_consumer_map(ssa): + """ + Return dict from versioned blob to list of (i, j), + where i is index of consumer op, j is the index of input of that op. + """ + consumer_map = collections.defaultdict(list) + for i in range(len(ssa)): + inputs = ssa[i][0] + for j, inp in enumerate(inputs): + consumer_map[inp].append((i, j)) + return consumer_map + + +def get_params_from_init_net( + init_net: caffe2_pb2.NetDef, +) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]: + """ + Take the output blobs from init_net by running it. + Outputs: + params: dict from blob name to numpy array + device_options: dict from blob name to the device option of its creating op + """ + # NOTE: this assumes that the params is determined by producer op with the + # only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor. + def _get_device_option(producer_op): + if producer_op.type == "CopyGPUToCPU": + return caffe2_pb2.DeviceOption() + else: + return producer_op.device_option + + with ScopedWS("__get_params_from_init_net__", is_reset=True, is_cleanup=True) as ws: + ws.RunNetOnce(init_net) + params = {b: fetch_any_blob(b) for b in init_net.external_output} + ssa, versions = core.get_ssa(init_net) + producer_map = get_producer_map(ssa) + device_options = { + b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]]) + for b in init_net.external_output + } + return params, device_options + + +def _updater_raise(op, input_types, output_types): + raise RuntimeError( + "Failed to apply updater for op {} given input_types {} and" + " output_types {}".format(op, input_types, output_types) + ) + + +def _generic_status_identifier( + predict_net: caffe2_pb2.NetDef, + status_updater: Callable, + known_status: Dict[Tuple[str, int], Any], +) -> Dict[Tuple[str, int], Any]: + """ + Statically infer the status of each blob, the status can be such as device type + (CPU/GPU), layout (NCHW/NHWC), data type (float32/int8), etc. "Blob" here + is versioned blob (Tuple[str, int]) in the format compatible with ssa. + Inputs: + predict_net: the caffe2 network + status_updater: a callable, given an op and the status of its input/output, + it returns the updated status of input/output. `None` is used for + representing unknown status. + known_status: a dict containing known status, used as initialization. + Outputs: + A dict mapping from versioned blob to its status + """ + ssa, versions = core.get_ssa(predict_net) + versioned_ext_input = [(b, 0) for b in predict_net.external_input] + versioned_ext_output = [(b, versions[b]) for b in predict_net.external_output] + all_versioned_blobs = set().union(*[set(x[0] + x[1]) for x in ssa]) + + allowed_vbs = all_versioned_blobs.union(versioned_ext_input).union(versioned_ext_output) + assert all(k in allowed_vbs for k in known_status) + assert all(v is not None for v in known_status.values()) + _known_status = copy.deepcopy(known_status) + + def _check_and_update(key, value): + assert value is not None + if key in _known_status: + if not _known_status[key] == value: + raise RuntimeError( + "Confilict status for {}, existing status {}, new status {}".format( + key, _known_status[key], value + ) + ) + _known_status[key] = value + + def _update_i(op, ssa_i): + versioned_inputs = ssa_i[0] + versioned_outputs = ssa_i[1] + + inputs_status = [_known_status.get(b, None) for b in versioned_inputs] + outputs_status = [_known_status.get(b, None) for b in versioned_outputs] + + new_inputs_status, new_outputs_status = status_updater(op, inputs_status, outputs_status) + + for versioned_blob, status in zip( + versioned_inputs + versioned_outputs, new_inputs_status + new_outputs_status + ): + if status is not None: + _check_and_update(versioned_blob, status) + + for op, ssa_i in zip(predict_net.op, ssa): + _update_i(op, ssa_i) + for op, ssa_i in zip(reversed(predict_net.op), reversed(ssa)): + _update_i(op, ssa_i) + + # NOTE: This strictly checks all the blob from predict_net must be assgined + # a known status. However sometimes it's impossible (eg. having deadend op), + # we may relax this constraint if + for k in all_versioned_blobs: + if k not in _known_status: + raise NotImplementedError( + "Can not infer the status for {}. Currently only support the case where" + " a single forward and backward pass can identify status for all blobs.".format(k) + ) + + return _known_status + + +def infer_device_type( + predict_net: caffe2_pb2.NetDef, + known_status: Dict[Tuple[str, int], Any], + device_name_style: str = "caffe2", +) -> Dict[Tuple[str, int], str]: + """Return the device type ("cpu" or "gpu"/"cuda") of each (versioned) blob""" + + assert device_name_style in ["caffe2", "pytorch"] + _CPU_STR = "cpu" + _GPU_STR = "gpu" if device_name_style == "caffe2" else "cuda" + + def _copy_cpu_to_gpu_updater(op, input_types, output_types): + if input_types[0] == _GPU_STR or output_types[0] == _CPU_STR: + _updater_raise(op, input_types, output_types) + return ([_CPU_STR], [_GPU_STR]) + + def _copy_gpu_to_cpu_updater(op, input_types, output_types): + if input_types[0] == _CPU_STR or output_types[0] == _GPU_STR: + _updater_raise(op, input_types, output_types) + return ([_GPU_STR], [_CPU_STR]) + + def _other_ops_updater(op, input_types, output_types): + non_none_types = [x for x in input_types + output_types if x is not None] + if len(non_none_types) > 0: + the_type = non_none_types[0] + if not all(x == the_type for x in non_none_types): + _updater_raise(op, input_types, output_types) + else: + the_type = None + return ([the_type for _ in op.input], [the_type for _ in op.output]) + + def _device_updater(op, *args, **kwargs): + return { + "CopyCPUToGPU": _copy_cpu_to_gpu_updater, + "CopyGPUToCPU": _copy_gpu_to_cpu_updater, + }.get(op.type, _other_ops_updater)(op, *args, **kwargs) + + return _generic_status_identifier(predict_net, _device_updater, known_status) + + +# ==== torch/utils_caffe2/vis.py =============================================== + + +def _modify_blob_names(ops, blob_rename_f): + ret = [] + + def _replace_list(blob_list, replaced_list): + del blob_list[:] + blob_list.extend(replaced_list) + + for x in ops: + cur = copy.deepcopy(x) + _replace_list(cur.input, list(map(blob_rename_f, cur.input))) + _replace_list(cur.output, list(map(blob_rename_f, cur.output))) + ret.append(cur) + + return ret + + +def _rename_blob(name, blob_sizes, blob_ranges): + def _list_to_str(bsize): + ret = ", ".join([str(x) for x in bsize]) + ret = "[" + ret + "]" + return ret + + ret = name + if blob_sizes is not None and name in blob_sizes: + ret += "\n" + _list_to_str(blob_sizes[name]) + if blob_ranges is not None and name in blob_ranges: + ret += "\n" + _list_to_str(blob_ranges[name]) + + return ret + + +# graph_name could not contain word 'graph' +def save_graph(net, file_name, graph_name="net", op_only=True, blob_sizes=None, blob_ranges=None): + blob_rename_f = functools.partial(_rename_blob, blob_sizes=blob_sizes, blob_ranges=blob_ranges) + return save_graph_base(net, file_name, graph_name, op_only, blob_rename_f) + + +def save_graph_base(net, file_name, graph_name="net", op_only=True, blob_rename_func=None): + graph = None + ops = net.op + if blob_rename_func is not None: + ops = _modify_blob_names(ops, blob_rename_func) + if not op_only: + graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir="TB") + else: + graph = net_drawer.GetPydotGraphMinimal( + ops, graph_name, rankdir="TB", minimal_dependency=True + ) + + try: + par_dir = os.path.dirname(file_name) + if not os.path.exists(par_dir): + os.makedirs(par_dir) + + format = os.path.splitext(os.path.basename(file_name))[-1] + if format == ".png": + graph.write_png(file_name) + elif format == ".pdf": + graph.write_pdf(file_name) + elif format == ".svg": + graph.write_svg(file_name) + else: + print("Incorrect format {}".format(format)) + except Exception as e: + print("Error when writing graph to image {}".format(e)) + + return graph + + +# ==== torch/utils_toffee/aten_to_caffe2.py ==================================== + + +def group_norm_replace_aten_with_caffe2(predict_net: caffe2_pb2.NetDef): + """ + For ONNX exported model, GroupNorm will be represented as ATen op, + this can be a drop in replacement from ATen to GroupNorm + """ + count = 0 + for op in predict_net.op: + if op.type == "ATen": + op_name = get_pb_arg_vals(op, "operator", None) # return byte in py3 + if op_name and op_name.decode() == "group_norm": + op.arg.remove(get_pb_arg(op, "operator")) + + if get_pb_arg_vali(op, "cudnn_enabled", None): + op.arg.remove(get_pb_arg(op, "cudnn_enabled")) + + num_groups = get_pb_arg_vali(op, "num_groups", None) + if num_groups is not None: + op.arg.remove(get_pb_arg(op, "num_groups")) + check_set_pb_arg(op, "group", "i", num_groups) + + op.type = "GroupNorm" + count += 1 + if count > 1: + logger.info("Replaced {} ATen operator to GroupNormOp".format(count)) + + +# ==== torch/utils_toffee/alias.py ============================================= + + +def alias(x, name, is_backward=False): + if not torch.onnx.is_in_onnx_export(): + return x + assert isinstance(x, torch.Tensor) + return torch.ops._caffe2.AliasWithName(x, name, is_backward=is_backward) + + +def fuse_alias_placeholder(predict_net, init_net): + """Remove AliasWithName placeholder and rename the input/output of it""" + # First we finish all the re-naming + for i, op in enumerate(predict_net.op): + if op.type == "AliasWithName": + assert len(op.input) == 1 + assert len(op.output) == 1 + name = get_pb_arg_vals(op, "name", None).decode() + is_backward = bool(get_pb_arg_vali(op, "is_backward", 0)) + rename_op_input(predict_net, init_net, i, 0, name, from_producer=is_backward) + rename_op_output(predict_net, i, 0, name) + + # Remove AliasWithName, should be very safe since it's a non-op + new_ops = [] + for op in predict_net.op: + if op.type != "AliasWithName": + new_ops.append(op) + else: + # safety check + assert op.input == op.output + assert op.input[0] == op.arg[0].s.decode() + del predict_net.op[:] + predict_net.op.extend(new_ops) + + +# ==== torch/utils_caffe2/graph_transform.py =================================== + + +class IllegalGraphTransformError(ValueError): + """When a graph transform function call can't be executed.""" + + +def _rename_versioned_blob_in_proto( + proto: caffe2_pb2.NetDef, + old_name: str, + new_name: str, + version: int, + ssa: List[Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]], + start_versions: Dict[str, int], + end_versions: Dict[str, int], +): + """In given proto, rename all blobs with matched version""" + # Operater list + for op, i_th_ssa in zip(proto.op, ssa): + versioned_inputs, versioned_outputs = i_th_ssa + for i in range(len(op.input)): + if versioned_inputs[i] == (old_name, version): + op.input[i] = new_name + for i in range(len(op.output)): + if versioned_outputs[i] == (old_name, version): + op.output[i] = new_name + # external_input + if start_versions.get(old_name, 0) == version: + for i in range(len(proto.external_input)): + if proto.external_input[i] == old_name: + proto.external_input[i] = new_name + # external_output + if end_versions.get(old_name, 0) == version: + for i in range(len(proto.external_output)): + if proto.external_output[i] == old_name: + proto.external_output[i] = new_name + + +def rename_op_input( + predict_net: caffe2_pb2.NetDef, + init_net: caffe2_pb2.NetDef, + op_id: int, + input_id: int, + new_name: str, + from_producer: bool = False, +): + """ + Rename the op_id-th operator in predict_net, change it's input_id-th input's + name to the new_name. It also does automatic re-route and change + external_input and init_net if necessary. + - It requires the input is only consumed by this op. + - This function modifies predict_net and init_net in-place. + - When from_producer is enable, this also updates other operators that consumes + the same input. Be cautious because may trigger unintended behavior. + """ + assert isinstance(predict_net, caffe2_pb2.NetDef) + assert isinstance(init_net, caffe2_pb2.NetDef) + + init_net_ssa, init_net_versions = core.get_ssa(init_net) + predict_net_ssa, predict_net_versions = core.get_ssa( + predict_net, copy.deepcopy(init_net_versions) + ) + + versioned_inputs, versioned_outputs = predict_net_ssa[op_id] + old_name, version = versioned_inputs[input_id] + + if from_producer: + producer_map = get_producer_map(predict_net_ssa) + if not (old_name, version) in producer_map: + raise NotImplementedError( + "Can't find producer, the input {} is probably from" + " init_net, this is not supported yet.".format(old_name) + ) + producer = producer_map[(old_name, version)] + rename_op_output(predict_net, producer[0], producer[1], new_name) + return + + def contain_targets(op_ssa): + return (old_name, version) in op_ssa[0] + + is_consumer = [contain_targets(op_ssa) for op_ssa in predict_net_ssa] + if sum(is_consumer) > 1: + raise IllegalGraphTransformError( + ( + "Input '{}' of operator(#{}) are consumed by other ops, please use" + + " rename_op_output on the producer instead. Offending op: \n{}" + ).format(old_name, op_id, predict_net.op[op_id]) + ) + + # update init_net + _rename_versioned_blob_in_proto( + init_net, old_name, new_name, version, init_net_ssa, {}, init_net_versions + ) + # update predict_net + _rename_versioned_blob_in_proto( + predict_net, + old_name, + new_name, + version, + predict_net_ssa, + init_net_versions, + predict_net_versions, + ) + + +def rename_op_output(predict_net: caffe2_pb2.NetDef, op_id: int, output_id: int, new_name: str): + """ + Rename the op_id-th operator in predict_net, change it's output_id-th input's + name to the new_name. It also does automatic re-route and change + external_output and if necessary. + - It allows multiple consumers of its output. + - This function modifies predict_net in-place, doesn't need init_net. + """ + assert isinstance(predict_net, caffe2_pb2.NetDef) + + ssa, blob_versions = core.get_ssa(predict_net) + + versioned_inputs, versioned_outputs = ssa[op_id] + old_name, version = versioned_outputs[output_id] + + # update predict_net + _rename_versioned_blob_in_proto( + predict_net, old_name, new_name, version, ssa, {}, blob_versions + ) + + +def get_sub_graph_external_input_output( + predict_net: caffe2_pb2.NetDef, sub_graph_op_indices: List[int] +) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]: + """ + Return the list of external input/output of sub-graph, + each element is tuple of the name and corresponding version in predict_net. + + external input/output is defined the same way as caffe2 NetDef. + """ + ssa, versions = core.get_ssa(predict_net) + + all_inputs = [] + all_outputs = [] + for op_id in sub_graph_op_indices: + all_inputs += [inp for inp in ssa[op_id][0] if inp not in all_inputs] + all_outputs += list(ssa[op_id][1]) # ssa output won't repeat + + # for versioned blobs, external inputs are just those blob in all_inputs + # but not in all_outputs + ext_inputs = [inp for inp in all_inputs if inp not in all_outputs] + + # external outputs are essentially outputs of this subgraph that are used + # outside of this sub-graph (including predict_net.external_output) + all_other_inputs = sum( + (ssa[i][0] for i in range(len(ssa)) if i not in sub_graph_op_indices), + [(outp, versions[outp]) for outp in predict_net.external_output], + ) + ext_outputs = [outp for outp in all_outputs if outp in set(all_other_inputs)] + + return ext_inputs, ext_outputs + + +class DiGraph: + """A DAG representation of caffe2 graph, each vertice is a versioned blob.""" + + def __init__(self): + self.vertices = set() + self.graph = collections.defaultdict(list) + + def add_edge(self, u, v): + self.graph[u].append(v) + self.vertices.add(u) + self.vertices.add(v) + + # grab from https://www.geeksforgeeks.org/find-paths-given-source-destination/ + def get_all_paths(self, s, d): + visited = {k: False for k in self.vertices} + path = [] + all_paths = [] + + def _get_all_paths_util(graph, u, d, visited, path): + visited[u] = True + path.append(u) + if u == d: + all_paths.append(copy.deepcopy(path)) + else: + for i in graph[u]: + if not visited[i]: + _get_all_paths_util(graph, i, d, visited, path) + path.pop() + visited[u] = False + + _get_all_paths_util(self.graph, s, d, visited, path) + return all_paths + + @staticmethod + def from_ssa(ssa): + graph = DiGraph() + for op_id in range(len(ssa)): + for inp in ssa[op_id][0]: + for outp in ssa[op_id][1]: + graph.add_edge(inp, outp) + return graph + + +def _get_dependency_chain(ssa, versioned_target, versioned_source): + """ + Return the index list of relevant operator to produce target blob from source blob, + if there's no dependency, return empty list. + """ + + # finding all paths between nodes can be O(N!), thus we can only search + # in the subgraph using the op starting from the first consumer of source blob + # to the producer of the target blob. + consumer_map = get_consumer_map(ssa) + producer_map = get_producer_map(ssa) + start_op = min(x[0] for x in consumer_map[versioned_source]) - 15 + end_op = ( + producer_map[versioned_target][0] + 15 if versioned_target in producer_map else start_op + ) + sub_graph_ssa = ssa[start_op : end_op + 1] + if len(sub_graph_ssa) > 30: + logger.warning( + "Subgraph bebetween {} and {} is large (from op#{} to op#{}), it" + " might take non-trival time to find all paths between them.".format( + versioned_source, versioned_target, start_op, end_op + ) + ) + + dag = DiGraph.from_ssa(sub_graph_ssa) + paths = dag.get_all_paths(versioned_source, versioned_target) # include two ends + ops_in_paths = [[producer_map[blob][0] for blob in path[1:]] for path in paths] + return sorted(set().union(*[set(ops) for ops in ops_in_paths])) + + +def identify_reshape_sub_graph(predict_net: caffe2_pb2.NetDef) -> List[List[int]]: + """ + Idenfity the reshape sub-graph in a protobuf. + The reshape sub-graph is defined as matching the following pattern: + + (input_blob) -> Op_1 -> ... -> Op_N -> (new_shape) -─┐ + └-------------------------------------------> Reshape -> (output_blob) + + Return: + List of sub-graphs, each sub-graph is represented as a list of indices + of the relavent ops, [Op_1, Op_2, ..., Op_N, Reshape] + """ + + ssa, _ = core.get_ssa(predict_net) + + ret = [] + for i, op in enumerate(predict_net.op): + if op.type == "Reshape": + assert len(op.input) == 2 + input_ssa = ssa[i][0] + data_source = input_ssa[0] + shape_source = input_ssa[1] + op_indices = _get_dependency_chain(ssa, shape_source, data_source) + ret.append(op_indices + [i]) + return ret + + +def remove_reshape_for_fc(predict_net, params): + """ + In PyTorch nn.Linear has to take 2D tensor, this often leads to reshape + a 4D tensor to 2D by calling .view(). However this (dynamic) reshaping + doesn't work well with ONNX and Int8 tools, and cause using extra + ops (eg. ExpandDims) that might not be available on mobile. + Luckily Caffe2 supports 4D tensor for FC, so we can remove those reshape + after exporting ONNX model. + """ + from caffe2.python import core + + # find all reshape sub-graph that can be removed, which is now all Reshape + # sub-graph whose output is only consumed by FC. + # TODO: to make it safer, we may need the actually value to better determine + # if a Reshape before FC is removable. + reshape_sub_graphs = identify_reshape_sub_graph(predict_net) + sub_graphs_to_remove = [] + for reshape_sub_graph in reshape_sub_graphs: + reshape_op_id = reshape_sub_graph[-1] + assert predict_net.op[reshape_op_id].type == "Reshape" + ssa, _ = core.get_ssa(predict_net) + reshape_output = ssa[reshape_op_id][1][0] + consumers = [i for i in range(len(ssa)) if reshape_output in ssa[i][0]] + if all(predict_net.op[consumer].type == "FC" for consumer in consumers): + # safety check if the sub-graph is isolated, for this reshape sub-graph, + # it means it has one non-param external input and one external output. + ext_inputs, ext_outputs = get_sub_graph_external_input_output( + predict_net, reshape_sub_graph + ) + non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0] + if len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1: + sub_graphs_to_remove.append(reshape_sub_graph) + + # perform removing subgraph by: + # 1: rename the Reshape's output to its input, then the graph can be + # seen as in-place itentify, meaning whose external input/output are the same. + # 2: simply remove those ops. + remove_op_ids = [] + params_to_remove = [] + for sub_graph in sub_graphs_to_remove: + logger.info( + "Remove Reshape sub-graph:\n{}".format( + "".join(["(#{:>4})\n{}".format(i, predict_net.op[i]) for i in sub_graph]) + ) + ) + reshape_op_id = sub_graph[-1] + new_reshap_output = predict_net.op[reshape_op_id].input[0] + rename_op_output(predict_net, reshape_op_id, 0, new_reshap_output) + ext_inputs, ext_outputs = get_sub_graph_external_input_output(predict_net, sub_graph) + non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0] + params_ext_inputs = [inp for inp in ext_inputs if inp[1] == 0] + assert len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1 + assert ext_outputs[0][0] == non_params_ext_inputs[0][0] + assert ext_outputs[0][1] == non_params_ext_inputs[0][1] + 1 + remove_op_ids.extend(sub_graph) + params_to_remove.extend(params_ext_inputs) + + predict_net = copy.deepcopy(predict_net) + new_ops = [op for i, op in enumerate(predict_net.op) if i not in remove_op_ids] + del predict_net.op[:] + predict_net.op.extend(new_ops) + for versioned_params in params_to_remove: + name = versioned_params[0] + logger.info("Remove params: {} from init_net and predict_net.external_input".format(name)) + del params[name] + predict_net.external_input.remove(name) + + return predict_net, params + + +def fuse_copy_between_cpu_and_gpu(predict_net: caffe2_pb2.NetDef): + """ + In-place fuse extra copy ops between cpu/gpu for the following case: + a -CopyAToB-> b -CopyBToA> c1 -NextOp1-> d1 + -CopyBToA> c2 -NextOp2-> d2 + The fused network will look like: + a -NextOp1-> d1 + -NextOp2-> d2 + """ + + _COPY_OPS = ["CopyCPUToGPU", "CopyGPUToCPU"] + + def _fuse_once(predict_net): + ssa, blob_versions = core.get_ssa(predict_net) + consumer_map = get_consumer_map(ssa) + versioned_external_output = [ + (name, blob_versions[name]) for name in predict_net.external_output + ] + + for op_id, op in enumerate(predict_net.op): + if op.type in _COPY_OPS: + fw_copy_versioned_output = ssa[op_id][1][0] + consumer_ids = [x[0] for x in consumer_map[fw_copy_versioned_output]] + reverse_op_type = _COPY_OPS[1 - _COPY_OPS.index(op.type)] + + is_fusable = ( + len(consumer_ids) > 0 + and fw_copy_versioned_output not in versioned_external_output + and all( + predict_net.op[_op_id].type == reverse_op_type + and ssa[_op_id][1][0] not in versioned_external_output + for _op_id in consumer_ids + ) + ) + + if is_fusable: + for rv_copy_op_id in consumer_ids: + # making each NextOp uses "a" directly and removing Copy ops + rs_copy_versioned_output = ssa[rv_copy_op_id][1][0] + next_op_id, inp_id = consumer_map[rs_copy_versioned_output][0] + predict_net.op[next_op_id].input[inp_id] = op.input[0] + # remove CopyOps + new_ops = [ + op + for i, op in enumerate(predict_net.op) + if i != op_id and i not in consumer_ids + ] + del predict_net.op[:] + predict_net.op.extend(new_ops) + return True + + return False + + # _fuse_once returns False is nothing can be fused + while _fuse_once(predict_net): + pass + + +def remove_dead_end_ops(net_def: caffe2_pb2.NetDef): + """remove ops if its output is not used or not in external_output""" + ssa, versions = core.get_ssa(net_def) + versioned_external_output = [(name, versions[name]) for name in net_def.external_output] + consumer_map = get_consumer_map(ssa) + removed_op_ids = set() + + def _is_dead_end(versioned_blob): + return not ( + versioned_blob in versioned_external_output + or ( + len(consumer_map[versioned_blob]) > 0 + and all(x[0] not in removed_op_ids for x in consumer_map[versioned_blob]) + ) + ) + + for i, ssa_i in reversed(list(enumerate(ssa))): + versioned_outputs = ssa_i[1] + if all(_is_dead_end(outp) for outp in versioned_outputs): + removed_op_ids.add(i) + + # simply removing those deadend ops should have no effect to external_output + new_ops = [op for i, op in enumerate(net_def.op) if i not in removed_op_ids] + del net_def.op[:] + net_def.op.extend(new_ops) diff --git a/data_processing/detectron2/detectron2/export/torchscript.py b/data_processing/detectron2/detectron2/export/torchscript.py new file mode 100644 index 0000000..24fe59b --- /dev/null +++ b/data_processing/detectron2/detectron2/export/torchscript.py @@ -0,0 +1,132 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import os +import torch + +from detectron2.utils.file_io import PathManager + +from .torchscript_patch import freeze_training_mode, patch_instances + +__all__ = ["scripting_with_instances", "dump_torchscript_IR"] + + +def scripting_with_instances(model, fields): + """ + Run :func:`torch.jit.script` on a model that uses the :class:`Instances` class. Since + attributes of :class:`Instances` are "dynamically" added in eager mode,it is difficult + for scripting to support it out of the box. This function is made to support scripting + a model that uses :class:`Instances`. It does the following: + + 1. Create a scriptable ``new_Instances`` class which behaves similarly to ``Instances``, + but with all attributes been "static". + The attributes need to be statically declared in the ``fields`` argument. + 2. Register ``new_Instances``, and force scripting compiler to + use it when trying to compile ``Instances``. + + After this function, the process will be reverted. User should be able to script another model + using different fields. + + Example: + Assume that ``Instances`` in the model consist of two attributes named + ``proposal_boxes`` and ``objectness_logits`` with type :class:`Boxes` and + :class:`Tensor` respectively during inference. You can call this function like: + :: + fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor} + torchscipt_model = scripting_with_instances(model, fields) + + Note: + It only support models in evaluation mode. + + Args: + model (nn.Module): The input model to be exported by scripting. + fields (Dict[str, type]): Attribute names and corresponding type that + ``Instances`` will use in the model. Note that all attributes used in ``Instances`` + need to be added, regardless of whether they are inputs/outputs of the model. + Data type not defined in detectron2 is not supported for now. + + Returns: + torch.jit.ScriptModule: the model in torchscript format + """ + assert ( + not model.training + ), "Currently we only support exporting models in evaluation mode to torchscript" + + with freeze_training_mode(model), patch_instances(fields): + scripted_model = torch.jit.script(model) + return scripted_model + + +# alias for old name +export_torchscript_with_instances = scripting_with_instances + + +def dump_torchscript_IR(model, dir): + """ + Dump IR of a TracedModule/ScriptModule/Function in various format (code, graph, + inlined graph). Useful for debugging. + + Args: + model (TracedModule/ScriptModule/ScriptFUnction): traced or scripted module + dir (str): output directory to dump files. + """ + dir = os.path.expanduser(dir) + PathManager.mkdirs(dir) + + def _get_script_mod(mod): + if isinstance(mod, torch.jit.TracedModule): + return mod._actual_script_module + return mod + + # Dump pretty-printed code: https://pytorch.org/docs/stable/jit.html#inspecting-code + with PathManager.open(os.path.join(dir, "model_ts_code.txt"), "w") as f: + + def get_code(mod): + # Try a few ways to get code using private attributes. + try: + # This contains more information than just `mod.code` + return _get_script_mod(mod)._c.code + except AttributeError: + pass + try: + return mod.code + except AttributeError: + return None + + def dump_code(prefix, mod): + code = get_code(mod) + name = prefix or "root model" + if code is None: + f.write(f"Could not found code for {name} (type={mod.original_name})\n") + f.write("\n") + else: + f.write(f"\nCode for {name}, type={mod.original_name}:\n") + f.write(code) + f.write("\n") + f.write("-" * 80) + + for name, m in mod.named_children(): + dump_code(prefix + "." + name, m) + + if isinstance(model, torch.jit.ScriptFunction): + f.write(get_code(model)) + else: + dump_code("", model) + + def _get_graph(model): + try: + # Recursively dump IR of all modules + return _get_script_mod(model)._c.dump_to_str(True, False, False) + except AttributeError: + return model.graph.str() + + with PathManager.open(os.path.join(dir, "model_ts_IR.txt"), "w") as f: + f.write(_get_graph(model)) + + # Dump IR of the entire graph (all submodules inlined) + with PathManager.open(os.path.join(dir, "model_ts_IR_inlined.txt"), "w") as f: + f.write(str(model.inlined_graph)) + + if not isinstance(model, torch.jit.ScriptFunction): + # Dump the model structure in pytorch style + with PathManager.open(os.path.join(dir, "model.txt"), "w") as f: + f.write(str(model)) diff --git a/data_processing/detectron2/detectron2/export/torchscript_patch.py b/data_processing/detectron2/detectron2/export/torchscript_patch.py new file mode 100644 index 0000000..da9b324 --- /dev/null +++ b/data_processing/detectron2/detectron2/export/torchscript_patch.py @@ -0,0 +1,406 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import os +import sys +import tempfile +from contextlib import ExitStack, contextmanager +from copy import deepcopy +from unittest import mock +import torch +from torch import nn + +# need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964 +import detectron2 # noqa F401 +from detectron2.structures import Boxes, Instances +from detectron2.utils.env import _import_file + +_counter = 0 + + +def _clear_jit_cache(): + from torch.jit._recursive import concrete_type_store + from torch.jit._state import _jit_caching_layer + + concrete_type_store.type_store.clear() # for modules + _jit_caching_layer.clear() # for free functions + + +def _add_instances_conversion_methods(newInstances): + """ + Add from_instances methods to the scripted Instances class. + """ + cls_name = newInstances.__name__ + + @torch.jit.unused + def from_instances(instances: Instances): + """ + Create scripted Instances from original Instances + """ + fields = instances.get_fields() + image_size = instances.image_size + ret = newInstances(image_size) + for name, val in fields.items(): + assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}" + setattr(ret, name, deepcopy(val)) + return ret + + newInstances.from_instances = from_instances + + +@contextmanager +def patch_instances(fields): + """ + A contextmanager, under which the Instances class in detectron2 is replaced + by a statically-typed scriptable class, defined by `fields`. + See more in `scripting_with_instances`. + """ + + with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile( + mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False + ) as f: + try: + # Objects that use Instances should not reuse previously-compiled + # results in cache, because `Instances` could be a new class each time. + _clear_jit_cache() + + cls_name, s = _gen_instance_module(fields) + f.write(s) + f.flush() + f.close() + + module = _import(f.name) + new_instances = getattr(module, cls_name) + _ = torch.jit.script(new_instances) + # let torchscript think Instances was scripted already + Instances.__torch_script_class__ = True + # let torchscript find new_instances when looking for the jit type of Instances + Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances) + + _add_instances_conversion_methods(new_instances) + yield new_instances + finally: + try: + del Instances.__torch_script_class__ + del Instances._jit_override_qualname + except AttributeError: + pass + sys.modules.pop(module.__name__) + + +def _gen_instance_class(fields): + """ + Args: + fields (dict[name: type]) + """ + + class _FieldType: + def __init__(self, name, type_): + assert isinstance(name, str), f"Field name must be str, got {name}" + self.name = name + self.type_ = type_ + self.annotation = f"{type_.__module__}.{type_.__name__}" + + fields = [_FieldType(k, v) for k, v in fields.items()] + + def indent(level, s): + return " " * 4 * level + s + + lines = [] + + global _counter + _counter += 1 + + cls_name = "ScriptedInstances{}".format(_counter) + + field_names = tuple(x.name for x in fields) + extra_args = ", ".join([f"{f.name}: Optional[{f.annotation}] = None" for f in fields]) + lines.append( + f""" +class {cls_name}: + def __init__(self, image_size: Tuple[int, int], {extra_args}): + self.image_size = image_size + self._field_names = {field_names} +""" + ) + + for f in fields: + lines.append( + indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], {f.name})") + ) + + for f in fields: + lines.append( + f""" + @property + def {f.name}(self) -> {f.annotation}: + # has to use a local for type refinement + # https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement + t = self._{f.name} + assert t is not None, "{f.name} is None and cannot be accessed!" + return t + + @{f.name}.setter + def {f.name}(self, value: {f.annotation}) -> None: + self._{f.name} = value +""" + ) + + # support method `__len__` + lines.append( + """ + def __len__(self) -> int: +""" + ) + for f in fields: + lines.append( + f""" + t = self._{f.name} + if t is not None: + return len(t) +""" + ) + lines.append( + """ + raise NotImplementedError("Empty Instances does not support __len__!") +""" + ) + + # support method `has` + lines.append( + """ + def has(self, name: str) -> bool: +""" + ) + for f in fields: + lines.append( + f""" + if name == "{f.name}": + return self._{f.name} is not None +""" + ) + lines.append( + """ + return False +""" + ) + + # support method `to` + none_args = ", None" * len(fields) + lines.append( + f""" + def to(self, device: torch.device) -> "{cls_name}": + ret = {cls_name}(self.image_size{none_args}) +""" + ) + for f in fields: + if hasattr(f.type_, "to"): + lines.append( + f""" + t = self._{f.name} + if t is not None: + ret._{f.name} = t.to(device) +""" + ) + else: + # For now, ignore fields that cannot be moved to devices. + # Maybe can support other tensor-like classes (e.g. __torch_function__) + pass + lines.append( + """ + return ret +""" + ) + + # support method `getitem` + none_args = ", None" * len(fields) + lines.append( + f""" + def __getitem__(self, item) -> "{cls_name}": + ret = {cls_name}(self.image_size{none_args}) +""" + ) + for f in fields: + lines.append( + f""" + t = self._{f.name} + if t is not None: + ret._{f.name} = t[item] +""" + ) + lines.append( + """ + return ret +""" + ) + + # support method `cat` + # this version does not contain checks that all instances have same size and fields + none_args = ", None" * len(fields) + lines.append( + f""" + def cat(self, instances: List["{cls_name}"]) -> "{cls_name}": + ret = {cls_name}(self.image_size{none_args}) +""" + ) + for f in fields: + lines.append( + f""" + t = self._{f.name} + if t is not None: + values: List[{f.annotation}] = [x.{f.name} for x in instances] + if torch.jit.isinstance(t, torch.Tensor): + ret._{f.name} = torch.cat(values, dim=0) + else: + ret._{f.name} = t.cat(values) +""" + ) + lines.append( + """ + return ret""" + ) + + # support method `get_fields()` + lines.append( + """ + def get_fields(self) -> Dict[str, Tensor]: + ret = {} + """ + ) + for f in fields: + if f.type_ == Boxes: + stmt = "t.tensor" + elif f.type_ == torch.Tensor: + stmt = "t" + else: + stmt = f'assert False, "unsupported type {str(f.type_)}"' + lines.append( + f""" + t = self._{f.name} + if t is not None: + ret["{f.name}"] = {stmt} + """ + ) + lines.append( + """ + return ret""" + ) + return cls_name, os.linesep.join(lines) + + +def _gen_instance_module(fields): + # TODO: find a more automatic way to enable import of other classes + s = """ +from copy import deepcopy +import torch +from torch import Tensor +import typing +from typing import * + +import detectron2 +from detectron2.structures import Boxes, Instances + +""" + + cls_name, cls_def = _gen_instance_class(fields) + s += cls_def + return cls_name, s + + +def _import(path): + return _import_file( + "{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True + ) + + +@contextmanager +def patch_builtin_len(modules=()): + """ + Patch the builtin len() function of a few detectron2 modules + to use __len__ instead, because __len__ does not convert values to + integers and therefore is friendly to tracing. + + Args: + modules (list[stsr]): names of extra modules to patch len(), in + addition to those in detectron2. + """ + + def _new_len(obj): + return obj.__len__() + + with ExitStack() as stack: + MODULES = [ + "detectron2.modeling.roi_heads.fast_rcnn", + "detectron2.modeling.roi_heads.mask_head", + "detectron2.modeling.roi_heads.keypoint_head", + ] + list(modules) + ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES] + for m in ctxs: + m.side_effect = _new_len + yield + + +def patch_nonscriptable_classes(): + """ + Apply patches on a few nonscriptable detectron2 classes. + Should not have side-effects on eager usage. + """ + # __prepare_scriptable__ can also be added to models for easier maintenance. + # But it complicates the clean model code. + + from detectron2.modeling.backbone import ResNet, FPN + + # Due to https://github.com/pytorch/pytorch/issues/36061, + # we change backbone to use ModuleList for scripting. + # (note: this changes param names in state_dict) + + def prepare_resnet(self): + ret = deepcopy(self) + ret.stages = nn.ModuleList(ret.stages) + for k in self.stage_names: + delattr(ret, k) + return ret + + ResNet.__prepare_scriptable__ = prepare_resnet + + def prepare_fpn(self): + ret = deepcopy(self) + ret.lateral_convs = nn.ModuleList(ret.lateral_convs) + ret.output_convs = nn.ModuleList(ret.output_convs) + for name, _ in self.named_children(): + if name.startswith("fpn_"): + delattr(ret, name) + return ret + + FPN.__prepare_scriptable__ = prepare_fpn + + # Annotate some attributes to be constants for the purpose of scripting, + # even though they are not constants in eager mode. + from detectron2.modeling.roi_heads import StandardROIHeads + + if hasattr(StandardROIHeads, "__annotations__"): + # copy first to avoid editing annotations of base class + StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__) + StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool] + StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool] + + +# These patches are not supposed to have side-effects. +patch_nonscriptable_classes() + + +@contextmanager +def freeze_training_mode(model): + """ + A context manager that annotates the "training" attribute of every submodule + to constant, so that the training codepath in these modules can be + meta-compiled away. Upon exiting, the annotations are reverted. + """ + classes = {type(x) for x in model.modules()} + # __constants__ is the old way to annotate constants and not compatible + # with __annotations__ . + classes = {x for x in classes if not hasattr(x, "__constants__")} + for cls in classes: + cls.__annotations__["training"] = torch.jit.Final[bool] + yield + for cls in classes: + cls.__annotations__["training"] = bool diff --git a/data_processing/detectron2/detectron2/layers/__init__.py b/data_processing/detectron2/detectron2/layers/__init__.py new file mode 100644 index 0000000..761a3d1 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/__init__.py @@ -0,0 +1,26 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm, CycleBatchNormList +from .deform_conv import DeformConv, ModulatedDeformConv +from .mask_ops import paste_masks_in_image +from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated +from .roi_align import ROIAlign, roi_align +from .roi_align_rotated import ROIAlignRotated, roi_align_rotated +from .shape_spec import ShapeSpec +from .wrappers import ( + BatchNorm2d, + Conv2d, + ConvTranspose2d, + cat, + interpolate, + Linear, + nonzero_tuple, + cross_entropy, + empty_input_loss_func_wrapper, + shapes_to_tensor, + move_device_like, +) +from .blocks import CNNBlockBase, DepthwiseSeparableConv2d +from .aspp import ASPP +from .losses import ciou_loss, diou_loss + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/data_processing/detectron2/detectron2/layers/aspp.py b/data_processing/detectron2/detectron2/layers/aspp.py new file mode 100644 index 0000000..14861aa --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/aspp.py @@ -0,0 +1,144 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from copy import deepcopy +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from .batch_norm import get_norm +from .blocks import DepthwiseSeparableConv2d +from .wrappers import Conv2d + + +class ASPP(nn.Module): + """ + Atrous Spatial Pyramid Pooling (ASPP). + """ + + def __init__( + self, + in_channels, + out_channels, + dilations, + *, + norm, + activation, + pool_kernel_size=None, + dropout: float = 0.0, + use_depthwise_separable_conv=False, + ): + """ + Args: + in_channels (int): number of input channels for ASPP. + out_channels (int): number of output channels. + dilations (list): a list of 3 dilations in ASPP. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. norm is + applied to all conv layers except the conv following + global average pooling. + activation (callable): activation function. + pool_kernel_size (tuple, list): the average pooling size (kh, kw) + for image pooling layer in ASPP. If set to None, it always + performs global average pooling. If not None, it must be + divisible by the shape of inputs in forward(). It is recommended + to use a fixed input feature size in training, and set this + option to match this size, so that it performs global average + pooling in training, and the size of the pooling window stays + consistent in inference. + dropout (float): apply dropout on the output of ASPP. It is used in + the official DeepLab implementation with a rate of 0.1: + https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/model.py#L532 # noqa + use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d + for 3x3 convs in ASPP, proposed in :paper:`DeepLabV3+`. + """ + super(ASPP, self).__init__() + assert len(dilations) == 3, "ASPP expects 3 dilations, got {}".format(len(dilations)) + self.pool_kernel_size = pool_kernel_size + self.dropout = dropout + use_bias = norm == "" + self.convs = nn.ModuleList() + # conv 1x1 + self.convs.append( + Conv2d( + in_channels, + out_channels, + kernel_size=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + activation=deepcopy(activation), + ) + ) + weight_init.c2_xavier_fill(self.convs[-1]) + # atrous convs + for dilation in dilations: + if use_depthwise_separable_conv: + self.convs.append( + DepthwiseSeparableConv2d( + in_channels, + out_channels, + kernel_size=3, + padding=dilation, + dilation=dilation, + norm1=norm, + activation1=deepcopy(activation), + norm2=norm, + activation2=deepcopy(activation), + ) + ) + else: + self.convs.append( + Conv2d( + in_channels, + out_channels, + kernel_size=3, + padding=dilation, + dilation=dilation, + bias=use_bias, + norm=get_norm(norm, out_channels), + activation=deepcopy(activation), + ) + ) + weight_init.c2_xavier_fill(self.convs[-1]) + # image pooling + # We do not add BatchNorm because the spatial resolution is 1x1, + # the original TF implementation has BatchNorm. + if pool_kernel_size is None: + image_pooling = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)), + ) + else: + image_pooling = nn.Sequential( + nn.AvgPool2d(kernel_size=pool_kernel_size, stride=1), + Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)), + ) + weight_init.c2_xavier_fill(image_pooling[1]) + self.convs.append(image_pooling) + + self.project = Conv2d( + 5 * out_channels, + out_channels, + kernel_size=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + activation=deepcopy(activation), + ) + weight_init.c2_xavier_fill(self.project) + + def forward(self, x): + size = x.shape[-2:] + if self.pool_kernel_size is not None: + if size[0] % self.pool_kernel_size[0] or size[1] % self.pool_kernel_size[1]: + raise ValueError( + "`pool_kernel_size` must be divisible by the shape of inputs. " + "Input size: {} `pool_kernel_size`: {}".format(size, self.pool_kernel_size) + ) + res = [] + for conv in self.convs: + res.append(conv(x)) + res[-1] = F.interpolate(res[-1], size=size, mode="bilinear", align_corners=False) + res = torch.cat(res, dim=1) + res = self.project(res) + res = F.dropout(res, self.dropout, training=self.training) if self.dropout > 0 else res + return res diff --git a/data_processing/detectron2/detectron2/layers/batch_norm.py b/data_processing/detectron2/detectron2/layers/batch_norm.py new file mode 100644 index 0000000..f594587 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/batch_norm.py @@ -0,0 +1,320 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +import torch.distributed as dist +from fvcore.nn.distributed import differentiable_all_reduce +from torch import nn +from torch.nn import functional as F + +from detectron2.utils import comm, env + +from .wrappers import BatchNorm2d + + +class FrozenBatchNorm2d(nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + It contains non-trainable buffers called + "weight" and "bias", "running_mean", "running_var", + initialized to perform identity transformation. + + The pre-trained backbone models from Caffe2 only contain "weight" and "bias", + which are computed from the original four parameters of BN. + The affine transform `x * weight + bias` will perform the equivalent + computation of `(x - running_mean) / sqrt(running_var) * weight + bias`. + When loading a backbone model from Caffe2, "running_mean" and "running_var" + will be left unchanged as identity transformation. + + Other pre-trained backbone models may contain all 4 parameters. + + The forward is implemented by `F.batch_norm(..., training=False)`. + """ + + _version = 3 + + def __init__(self, num_features, eps=1e-5): + super().__init__() + self.num_features = num_features + self.eps = eps + self.register_buffer("weight", torch.ones(num_features)) + self.register_buffer("bias", torch.zeros(num_features)) + self.register_buffer("running_mean", torch.zeros(num_features)) + self.register_buffer("running_var", torch.ones(num_features) - eps) + self.register_buffer("num_batches_tracked", None) + + def forward(self, x): + if x.requires_grad: + # When gradients are needed, F.batch_norm will use extra memory + # because its backward op computes gradients for weight/bias as well. + scale = self.weight * (self.running_var + self.eps).rsqrt() + bias = self.bias - self.running_mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + out_dtype = x.dtype # may be half + return x * scale.to(out_dtype) + bias.to(out_dtype) + else: + # When gradients are not needed, F.batch_norm is a single fused op + # and provide more optimization opportunities. + return F.batch_norm( + x, + self.running_mean, + self.running_var, + self.weight, + self.bias, + training=False, + eps=self.eps, + ) + + def _load_from_state_dict( + self, + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ): + version = local_metadata.get("version", None) + + if version is None or version < 2: + # No running_mean/var in early versions + # This will silent the warnings + if prefix + "running_mean" not in state_dict: + state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean) + if prefix + "running_var" not in state_dict: + state_dict[prefix + "running_var"] = torch.ones_like(self.running_var) + + super()._load_from_state_dict( + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ) + + def __repr__(self): + return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps) + + @classmethod + def convert_frozen_batchnorm(cls, module): + """ + Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm. + + Args: + module (torch.nn.Module): + + Returns: + If module is BatchNorm/SyncBatchNorm, returns a new module. + Otherwise, in-place convert module and return it. + + Similar to convert_sync_batchnorm in + https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py + """ + bn_module = nn.modules.batchnorm + bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm) + res = module + if isinstance(module, bn_module): + res = cls(module.num_features) + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + res.eps = module.eps + res.num_batches_tracked = module.num_batches_tracked + else: + for name, child in module.named_children(): + new_child = cls.convert_frozen_batchnorm(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + +def get_norm(norm, out_channels): + """ + Args: + norm (str or callable): either one of BN, SyncBN, FrozenBN, GN; + or a callable that takes a channel number and returns + the normalization layer as a nn.Module. + + Returns: + nn.Module or None: the normalization layer + """ + if norm is None: + return None + if isinstance(norm, str): + if len(norm) == 0: + return None + norm = { + "BN": BatchNorm2d, + # Fixed in https://github.com/pytorch/pytorch/pull/36382 + "SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm, + "FrozenBN": FrozenBatchNorm2d, + "GN": lambda channels: nn.GroupNorm(32, channels), + # for debugging: + "nnSyncBN": nn.SyncBatchNorm, + "naiveSyncBN": NaiveSyncBatchNorm, + # expose stats_mode N as an option to caller, required for zero-len inputs + "naiveSyncBN_N": lambda channels: NaiveSyncBatchNorm(channels, stats_mode="N"), + "LN": lambda channels: LayerNorm(channels), + }[norm] + return norm(out_channels) + + +class NaiveSyncBatchNorm(BatchNorm2d): + """ + In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient + when the batch size on each worker is different. + (e.g., when scale augmentation is used, or when it is applied to mask head). + + This is a slower but correct alternative to `nn.SyncBatchNorm`. + + Note: + There isn't a single definition of Sync BatchNorm. + + When ``stats_mode==""``, this module computes overall statistics by using + statistics of each worker with equal weight. The result is true statistics + of all samples (as if they are all on one worker) only when all workers + have the same (N, H, W). This mode does not support inputs with zero batch size. + + When ``stats_mode=="N"``, this module computes overall statistics by weighting + the statistics of each worker by their ``N``. The result is true statistics + of all samples (as if they are all on one worker) only when all workers + have the same (H, W). It is slower than ``stats_mode==""``. + + Even though the result of this module may not be the true statistics of all samples, + it may still be reasonable because it might be preferrable to assign equal weights + to all workers, regardless of their (H, W) dimension, instead of putting larger weight + on larger images. From preliminary experiments, little difference is found between such + a simplified implementation and an accurate computation of overall mean & variance. + """ + + def __init__(self, *args, stats_mode="", **kwargs): + super().__init__(*args, **kwargs) + assert stats_mode in ["", "N"] + self._stats_mode = stats_mode + + def forward(self, input): + if comm.get_world_size() == 1 or not self.training: + return super().forward(input) + + B, C = input.shape[0], input.shape[1] + + half_input = input.dtype == torch.float16 + if half_input: + # fp16 does not have good enough numerics for the reduction here + input = input.float() + mean = torch.mean(input, dim=[0, 2, 3]) + meansqr = torch.mean(input * input, dim=[0, 2, 3]) + + if self._stats_mode == "": + assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.' + vec = torch.cat([mean, meansqr], dim=0) + vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size()) + mean, meansqr = torch.split(vec, C) + momentum = self.momentum + else: + if B == 0: + vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype) + vec = vec + input.sum() # make sure there is gradient w.r.t input + else: + vec = torch.cat( + [ + mean, + meansqr, + torch.ones([1], device=mean.device, dtype=mean.dtype), + ], + dim=0, + ) + vec = differentiable_all_reduce(vec * B) + + total_batch = vec[-1].detach() + momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0 + mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero + + var = meansqr - mean * mean + invstd = torch.rsqrt(var + self.eps) + scale = self.weight * invstd + bias = self.bias - mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + + self.running_mean += momentum * (mean.detach() - self.running_mean) + self.running_var += momentum * (var.detach() - self.running_var) + ret = input * scale + bias + if half_input: + ret = ret.half() + return ret + + +class CycleBatchNormList(nn.ModuleList): + """ + Implement domain-specific BatchNorm by cycling. + + When a BatchNorm layer is used for multiple input domains or input + features, it might need to maintain a separate test-time statistics + for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`. + + This module implements it by using N separate BN layers + and it cycles through them every time a forward() is called. + + NOTE: The caller of this module MUST guarantee to always call + this module by multiple of N times. Otherwise its test-time statistics + will be incorrect. + """ + + def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs): + """ + Args: + length: number of BatchNorm layers to cycle. + bn_class: the BatchNorm class to use + kwargs: arguments of the BatchNorm class, such as num_features. + """ + self._affine = kwargs.pop("affine", True) + super().__init__([bn_class(**kwargs, affine=False) for k in range(length)]) + if self._affine: + # shared affine, domain-specific BN + channels = self[0].num_features + self.weight = nn.Parameter(torch.ones(channels)) + self.bias = nn.Parameter(torch.zeros(channels)) + self._pos = 0 + + def forward(self, x): + ret = self[self._pos](x) + self._pos = (self._pos + 1) % len(self) + + if self._affine: + w = self.weight.reshape(1, -1, 1, 1) + b = self.bias.reshape(1, -1, 1, 1) + return ret * w + b + else: + return ret + + def extra_repr(self): + return f"affine={self._affine}" + + +class LayerNorm(nn.Module): + """ + A LayerNorm variant, popularized by Transformers, that performs point-wise mean and + variance normalization over the channel dimension for inputs that have shape + (batch_size, channels, height, width). + https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa B950 + """ + + def __init__(self, normalized_shape, eps=1e-6): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.normalized_shape = (normalized_shape,) + + def forward(self, x): + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/data_processing/detectron2/detectron2/layers/blocks.py b/data_processing/detectron2/detectron2/layers/blocks.py new file mode 100644 index 0000000..1995a4b --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/blocks.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import fvcore.nn.weight_init as weight_init +from torch import nn + +from .batch_norm import FrozenBatchNorm2d, get_norm +from .wrappers import Conv2d + + +""" +CNN building blocks. +""" + + +class CNNBlockBase(nn.Module): + """ + A CNN block is assumed to have input channels, output channels and a stride. + The input and output of `forward()` method must be NCHW tensors. + The method can perform arbitrary computation but must match the given + channels and stride specification. + + Attribute: + in_channels (int): + out_channels (int): + stride (int): + """ + + def __init__(self, in_channels, out_channels, stride): + """ + The `__init__` method of any subclass should also contain these arguments. + + Args: + in_channels (int): + out_channels (int): + stride (int): + """ + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.stride = stride + + def freeze(self): + """ + Make this block not trainable. + This method sets all parameters to `requires_grad=False`, + and convert all BatchNorm layers to FrozenBatchNorm + + Returns: + the block itself + """ + for p in self.parameters(): + p.requires_grad = False + FrozenBatchNorm2d.convert_frozen_batchnorm(self) + return self + + +class DepthwiseSeparableConv2d(nn.Module): + """ + A kxk depthwise convolution + a 1x1 convolution. + + In :paper:`xception`, norm & activation are applied on the second conv. + :paper:`mobilenet` uses norm & activation on both convs. + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size=3, + padding=1, + dilation=1, + *, + norm1=None, + activation1=None, + norm2=None, + activation2=None, + ): + """ + Args: + norm1, norm2 (str or callable): normalization for the two conv layers. + activation1, activation2 (callable(Tensor) -> Tensor): activation + function for the two conv layers. + """ + super().__init__() + self.depthwise = Conv2d( + in_channels, + in_channels, + kernel_size=kernel_size, + padding=padding, + dilation=dilation, + groups=in_channels, + bias=not norm1, + norm=get_norm(norm1, in_channels), + activation=activation1, + ) + self.pointwise = Conv2d( + in_channels, + out_channels, + kernel_size=1, + bias=not norm2, + norm=get_norm(norm2, out_channels), + activation=activation2, + ) + + # default initialization + weight_init.c2_msra_fill(self.depthwise) + weight_init.c2_msra_fill(self.pointwise) + + def forward(self, x): + return self.pointwise(self.depthwise(x)) diff --git a/data_processing/detectron2/detectron2/layers/csrc/README.md b/data_processing/detectron2/detectron2/layers/csrc/README.md new file mode 100644 index 0000000..778ed3d --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/README.md @@ -0,0 +1,7 @@ + + +To add a new Op: + +1. Create a new directory +2. Implement new ops there +3. Delcare its Python interface in `vision.cpp`. diff --git a/data_processing/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h b/data_processing/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h new file mode 100644 index 0000000..03f4211 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h @@ -0,0 +1,115 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +at::Tensor ROIAlignRotated_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio); + +at::Tensor ROIAlignRotated_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio); + +#if defined(WITH_CUDA) || defined(WITH_HIP) +at::Tensor ROIAlignRotated_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio); + +at::Tensor ROIAlignRotated_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio); +#endif + +// Interface for Python +inline at::Tensor ROIAlignRotated_forward( + const at::Tensor& input, + const at::Tensor& rois, + const double spatial_scale, + const int64_t pooled_height, + const int64_t pooled_width, + const int64_t sampling_ratio) { + if (input.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return ROIAlignRotated_forward_cuda( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + return ROIAlignRotated_forward_cpu( + input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); +} + +inline at::Tensor ROIAlignRotated_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const double spatial_scale, + const int64_t pooled_height, + const int64_t pooled_width, + const int64_t batch_size, + const int64_t channels, + const int64_t height, + const int64_t width, + const int64_t sampling_ratio) { + if (grad.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return ROIAlignRotated_backward_cuda( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + return ROIAlignRotated_backward_cpu( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio); +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp b/data_processing/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp new file mode 100644 index 0000000..2a3d305 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp @@ -0,0 +1,522 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include "ROIAlignRotated.h" + +// Note: this implementation originates from the Caffe2 ROIAlignRotated Op +// and PyTorch ROIAlign (non-rotated) Op implementations. +// The key difference between this implementation and those ones is +// we don't do "legacy offset" in this version, as there aren't many previous +// works, if any, using the "legacy" ROIAlignRotated Op. +// This would make the interface a bit cleaner. + +namespace detectron2 { + +namespace { +template +struct PreCalc { + int pos1; + int pos2; + int pos3; + int pos4; + T w1; + T w2; + T w3; + T w4; +}; + +template +void pre_calc_for_bilinear_interpolate( + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int iy_upper, + const int ix_upper, + T roi_start_h, + T roi_start_w, + T bin_size_h, + T bin_size_w, + int roi_bin_grid_h, + int roi_bin_grid_w, + T roi_center_h, + T roi_center_w, + T cos_theta, + T sin_theta, + std::vector>& pre_calc) { + int pre_calc_index = 0; + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + for (int iy = 0; iy < iy_upper; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < ix_upper; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + // In image space, (y, x) is the order for Right Handed System, + // and this is essentially multiplying the point by a rotation matrix + // to rotate it counterclockwise through angle theta. + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + // deal with: inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + PreCalc pc; + pc.pos1 = 0; + pc.pos2 = 0; + pc.pos3 = 0; + pc.pos4 = 0; + pc.w1 = 0; + pc.w2 = 0; + pc.w3 = 0; + pc.w4 = 0; + pre_calc[pre_calc_index] = pc; + pre_calc_index += 1; + continue; + } + + if (y < 0) { + y = 0; + } + if (x < 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + // save weights and indices + PreCalc pc; + pc.pos1 = y_low * width + x_low; + pc.pos2 = y_low * width + x_high; + pc.pos3 = y_high * width + x_low; + pc.pos4 = y_high * width + x_high; + pc.w1 = w1; + pc.w2 = w2; + pc.w3 = w3; + pc.w4 = w4; + pre_calc[pre_calc_index] = pc; + + pre_calc_index += 1; + } + } + } + } +} + +template +void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +inline void add(T* address, const T& val) { + *address += val; +} + +} // namespace + +template +void ROIAlignRotatedForward( + const int nthreads, + const T* input, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* output) { + int n_rois = nthreads / channels / pooled_width / pooled_height; + // (n, c, ph, pw) is an element in the pooled output + // can be parallelized using omp + // #pragma omp parallel for num_threads(32) + for (int n = 0; n < n_rois; n++) { + int index_n = n * channels * pooled_width * pooled_height; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + // we want to precalculate indices and weights shared by all channels, + // this is the key point of optimization + std::vector> pre_calc( + roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + pre_calc_for_bilinear_interpolate( + height, + width, + pooled_height, + pooled_width, + roi_bin_grid_h, + roi_bin_grid_w, + roi_start_h, + roi_start_w, + bin_size_h, + bin_size_w, + roi_bin_grid_h, + roi_bin_grid_w, + roi_center_h, + roi_center_w, + cos_theta, + sin_theta, + pre_calc); + + for (int c = 0; c < channels; c++) { + int index_n_c = index_n + c * pooled_width * pooled_height; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + int pre_calc_index = 0; + + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + int index = index_n_c + ph * pooled_width + pw; + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + PreCalc pc = pre_calc[pre_calc_index]; + output_val += pc.w1 * offset_input[pc.pos1] + + pc.w2 * offset_input[pc.pos2] + + pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4]; + + pre_calc_index += 1; + } + } + output_val /= count; + + output[index] = output_val; + } // for pw + } // for ph + } // for c + } // for n +} + +template +void ROIAlignRotatedBackward( + const int nthreads, + // may not be contiguous. should index using n_stride, etc + const T* grad_output, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* grad_input, + const T* rois, + const int n_stride, + const int c_stride, + const int h_stride, + const int w_stride) { + for (int index = 0; index < nthreads; index++) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + int output_offset = n * n_stride + c * c_stride; + const T* offset_grad_output = grad_output + output_offset; + const T grad_output_this_bin = + offset_grad_output[ph * h_stride + pw * w_stride]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + // atomic add is not needed for now since it is single threaded + add(offset_grad_input + y_low * width + x_low, static_cast(g1)); + add(offset_grad_input + y_low * width + x_high, static_cast(g2)); + add(offset_grad_input + y_high * width + x_low, static_cast(g3)); + add(offset_grad_input + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // for +} // ROIAlignRotatedBackward + +at::Tensor ROIAlignRotated_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlign_forward_cpu"; + at::checkAllSameType(c, {input_t, rois_t}); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + at::Tensor output = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + + auto output_size = num_rois * pooled_height * pooled_width * channels; + + if (output.numel() == 0) { + return output; + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "ROIAlignRotated_forward", [&] { + ROIAlignRotatedForward( + output_size, + input_.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr(), + output.data_ptr()); + }); + return output; +} + +at::Tensor ROIAlignRotated_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlignRotated_backward_cpu"; + at::checkAllSameType(c, {grad_t, rois_t}); + + at::Tensor grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + // handle possibly empty gradients + if (grad.numel() == 0) { + return grad_input; + } + + // get stride values to ensure indexing into gradients is correct. + int n_stride = grad.stride(0); + int c_stride = grad.stride(1); + int h_stride = grad.stride(2); + int w_stride = grad.stride(3); + + auto rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "ROIAlignRotated_forward", [&] { + ROIAlignRotatedBackward( + grad.numel(), + grad.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois_.data_ptr(), + n_stride, + c_stride, + h_stride, + w_stride); + }); + return grad_input; +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu b/data_processing/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu new file mode 100644 index 0000000..fca1865 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu @@ -0,0 +1,443 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include +#include +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +// Note: this implementation originates from the Caffe2 ROIAlignRotated Op +// and PyTorch ROIAlign (non-rotated) Op implementations. +// The key difference between this implementation and those ones is +// we don't do "legacy offset" in this version, as there aren't many previous +// works, if any, using the "legacy" ROIAlignRotated Op. +// This would make the interface a bit cleaner. + +namespace detectron2 { + +namespace { + +template +__device__ T bilinear_interpolate( + const T* input, + const int height, + const int width, + T y, + T x) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + return 0; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + // do bilinear interpolation + T v1 = input[y_low * width + x_low]; + T v2 = input[y_low * width + x_high]; + T v3 = input[y_high * width + x_low]; + T v4 = input[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template +__device__ void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +} // namespace + +template +__global__ void RoIAlignRotatedForward( + const int nthreads, + const T* input, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* top_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (inte gral) pooling inside a bin + const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T val = bilinear_interpolate(offset_input, height, width, y, x); + output_val += val; + } + } + output_val /= count; + + top_data[index] = output_val; + } +} + +template +__global__ void RoIAlignRotatedBackwardFeature( + const int nthreads, + const T* top_diff, + const int num_rois, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* bottom_diff, + const T* rois) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_bottom_diff = + bottom_diff + (roi_batch_ind * channels + c) * height * width; + + int top_offset = (n * channels + c) * pooled_height * pooled_width; + const T* offset_top_diff = top_diff + top_offset; + const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + + T g1 = top_diff_this_bin * w1 / count; + T g2 = top_diff_this_bin * w2 / count; + T g3 = top_diff_this_bin * w3 / count; + T g4 = top_diff_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd( + offset_bottom_diff + y_low * width + x_low, static_cast(g1)); + atomicAdd( + offset_bottom_diff + y_low * width + x_high, static_cast(g2)); + atomicAdd( + offset_bottom_diff + y_high * width + x_low, static_cast(g3)); + atomicAdd( + offset_bottom_diff + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // CUDA_1D_KERNEL_LOOP +} // RoIAlignRotatedBackward + +at::Tensor ROIAlignRotated_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlignRotated_forward_cuda"; + at::checkAllSameGPU(c, {input_t, rois_t}); + at::checkAllSameType(c, {input_t, rois_t}); + at::cuda::CUDAGuard device_guard(input.device()); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + auto output = at::empty( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + auto output_size = num_rois * pooled_height * pooled_width * channels; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + at::cuda::ATenCeilDiv( + static_cast(output_size), static_cast(512)), + static_cast(4096))); + dim3 block(512); + + if (output.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return output; + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "ROIAlignRotated_forward", [&] { + RoIAlignRotatedForward<<>>( + output_size, + input_.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr(), + output.data_ptr()); + }); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + return output; +} + +// TODO remove the dependency on input and use instead its sizes -> save memory +at::Tensor ROIAlignRotated_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + at::CheckedFrom c = "ROIAlign_backward_cuda"; + at::checkAllSameGPU(c, {grad_t, rois_t}); + at::checkAllSameType(c, {grad_t, rois_t}); + at::cuda::CUDAGuard device_guard(grad.device()); + + auto num_rois = rois.size(0); + auto grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + at::cuda::ATenCeilDiv( + static_cast(grad.numel()), static_cast(512)), + static_cast(4096))); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; + } + + auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES( + grad.scalar_type(), "ROIAlignRotated_backward", [&] { + RoIAlignRotatedBackwardFeature<<>>( + grad.numel(), + grad_.data_ptr(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois_.data_ptr()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h b/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h new file mode 100644 index 0000000..3bf383b --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h @@ -0,0 +1,35 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +at::Tensor box_iou_rotated_cpu( + const at::Tensor& boxes1, + const at::Tensor& boxes2); + +#if defined(WITH_CUDA) || defined(WITH_HIP) +at::Tensor box_iou_rotated_cuda( + const at::Tensor& boxes1, + const at::Tensor& boxes2); +#endif + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +inline at::Tensor box_iou_rotated( + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); + if (boxes1.device().is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous()); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + + return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous()); +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp b/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp new file mode 100644 index 0000000..c843487 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp @@ -0,0 +1,39 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include "box_iou_rotated.h" +#include "box_iou_rotated_utils.h" + +namespace detectron2 { + +template +void box_iou_rotated_cpu_kernel( + const at::Tensor& boxes1, + const at::Tensor& boxes2, + at::Tensor& ious) { + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + + for (int i = 0; i < num_boxes1; i++) { + for (int j = 0; j < num_boxes2; j++) { + ious[i * num_boxes2 + j] = single_box_iou_rotated( + boxes1[i].data_ptr(), boxes2[j].data_ptr()); + } + } +} + +at::Tensor box_iou_rotated_cpu( + // input must be contiguous: + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + at::Tensor ious = + at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); + + box_iou_rotated_cpu_kernel(boxes1, boxes2, ious); + + // reshape from 1d array to 2d array + auto shape = std::vector{num_boxes1, num_boxes2}; + return ious.reshape(shape); +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu b/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu new file mode 100644 index 0000000..952710e --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu @@ -0,0 +1,130 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include +#include +#include +#include "box_iou_rotated_utils.h" + +namespace detectron2 { + +// 2D block with 32 * 16 = 512 threads per block +const int BLOCK_DIM_X = 32; +const int BLOCK_DIM_Y = 16; + +template +__global__ void box_iou_rotated_cuda_kernel( + const int n_boxes1, + const int n_boxes2, + const T* dev_boxes1, + const T* dev_boxes2, + T* dev_ious) { + const int row_start = blockIdx.x * blockDim.x; + const int col_start = blockIdx.y * blockDim.y; + + const int row_size = min(n_boxes1 - row_start, blockDim.x); + const int col_size = min(n_boxes2 - col_start, blockDim.y); + + __shared__ float block_boxes1[BLOCK_DIM_X * 5]; + __shared__ float block_boxes2[BLOCK_DIM_Y * 5]; + + // It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y + if (threadIdx.x < row_size && threadIdx.y == 0) { + block_boxes1[threadIdx.x * 5 + 0] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 0]; + block_boxes1[threadIdx.x * 5 + 1] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 1]; + block_boxes1[threadIdx.x * 5 + 2] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 2]; + block_boxes1[threadIdx.x * 5 + 3] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 3]; + block_boxes1[threadIdx.x * 5 + 4] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 4]; + } + + if (threadIdx.x < col_size && threadIdx.y == 0) { + block_boxes2[threadIdx.x * 5 + 0] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 0]; + block_boxes2[threadIdx.x * 5 + 1] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 1]; + block_boxes2[threadIdx.x * 5 + 2] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 2]; + block_boxes2[threadIdx.x * 5 + 3] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 3]; + block_boxes2[threadIdx.x * 5 + 4] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size && threadIdx.y < col_size) { + int offset = (row_start + threadIdx.x) * n_boxes2 + col_start + threadIdx.y; + dev_ious[offset] = single_box_iou_rotated( + block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5); + } +} + +at::Tensor box_iou_rotated_cuda( + // input must be contiguous + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + using scalar_t = float; + AT_ASSERTM( + boxes1.scalar_type() == at::kFloat, "boxes1 must be a float tensor"); + AT_ASSERTM( + boxes2.scalar_type() == at::kFloat, "boxes2 must be a float tensor"); + AT_ASSERTM(boxes1.is_cuda(), "boxes1 must be a CUDA tensor"); + AT_ASSERTM(boxes2.is_cuda(), "boxes2 must be a CUDA tensor"); + at::cuda::CUDAGuard device_guard(boxes1.device()); + + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + + at::Tensor ious = + at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); + + bool transpose = false; + if (num_boxes1 > 0 && num_boxes2 > 0) { + scalar_t *data1 = boxes1.data_ptr(), + *data2 = boxes2.data_ptr(); + + if (num_boxes2 > 65535 * BLOCK_DIM_Y) { + AT_ASSERTM( + num_boxes1 <= 65535 * BLOCK_DIM_Y, + "Too many boxes for box_iou_rotated_cuda!"); + // x dim is allowed to be large, but y dim cannot, + // so we transpose the two to avoid "invalid configuration argument" + // error. We assume one of them is small. Otherwise the result is hard to + // fit in memory anyway. + std::swap(num_boxes1, num_boxes2); + std::swap(data1, data2); + transpose = true; + } + + const int blocks_x = + at::cuda::ATenCeilDiv(static_cast(num_boxes1), BLOCK_DIM_X); + const int blocks_y = + at::cuda::ATenCeilDiv(static_cast(num_boxes2), BLOCK_DIM_Y); + + dim3 blocks(blocks_x, blocks_y); + dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + box_iou_rotated_cuda_kernel<<>>( + num_boxes1, + num_boxes2, + data1, + data2, + (scalar_t*)ious.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); + } + + // reshape from 1d array to 2d array + auto shape = std::vector{num_boxes1, num_boxes2}; + if (transpose) { + return ious.view(shape).t(); + } else { + return ious.view(shape); + } +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h b/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h new file mode 100644 index 0000000..b54a5dd --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h @@ -0,0 +1,370 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once + +#include +#include + +#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1 +// Designates functions callable from the host (CPU) and the device (GPU) +#define HOST_DEVICE __host__ __device__ +#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__ +#else +#include +#define HOST_DEVICE +#define HOST_DEVICE_INLINE HOST_DEVICE inline +#endif + +namespace detectron2 { + +namespace { + +template +struct RotatedBox { + T x_ctr, y_ctr, w, h, a; +}; + +template +struct Point { + T x, y; + HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {} + HOST_DEVICE_INLINE Point operator+(const Point& p) const { + return Point(x + p.x, y + p.y); + } + HOST_DEVICE_INLINE Point& operator+=(const Point& p) { + x += p.x; + y += p.y; + return *this; + } + HOST_DEVICE_INLINE Point operator-(const Point& p) const { + return Point(x - p.x, y - p.y); + } + HOST_DEVICE_INLINE Point operator*(const T coeff) const { + return Point(x * coeff, y * coeff); + } +}; + +template +HOST_DEVICE_INLINE T dot_2d(const Point& A, const Point& B) { + return A.x * B.x + A.y * B.y; +} + +// R: result type. can be different from input type +template +HOST_DEVICE_INLINE R cross_2d(const Point& A, const Point& B) { + return static_cast(A.x) * static_cast(B.y) - + static_cast(B.x) * static_cast(A.y); +} + +template +HOST_DEVICE_INLINE void get_rotated_vertices( + const RotatedBox& box, + Point (&pts)[4]) { + // M_PI / 180. == 0.01745329251 + double theta = box.a * 0.01745329251; + T cosTheta2 = (T)cos(theta) * 0.5f; + T sinTheta2 = (T)sin(theta) * 0.5f; + + // y: top --> down; x: left --> right + pts[0].x = box.x_ctr + sinTheta2 * box.h + cosTheta2 * box.w; + pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w; + pts[1].x = box.x_ctr - sinTheta2 * box.h + cosTheta2 * box.w; + pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w; + pts[2].x = 2 * box.x_ctr - pts[0].x; + pts[2].y = 2 * box.y_ctr - pts[0].y; + pts[3].x = 2 * box.x_ctr - pts[1].x; + pts[3].y = 2 * box.y_ctr - pts[1].y; +} + +template +HOST_DEVICE_INLINE int get_intersection_points( + const Point (&pts1)[4], + const Point (&pts2)[4], + Point (&intersections)[24]) { + // Line vector + // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1] + Point vec1[4], vec2[4]; + for (int i = 0; i < 4; i++) { + vec1[i] = pts1[(i + 1) % 4] - pts1[i]; + vec2[i] = pts2[(i + 1) % 4] - pts2[i]; + } + + // When computing the intersection area, it doesn't hurt if we have + // more (duplicated/approximate) intersections/vertices than needed, + // while it can cause drastic difference if we miss an intersection/vertex. + // Therefore, we add an epsilon to relax the comparisons between + // the float point numbers that decide the intersection points. + double EPS = 1e-5; + + // Line test - test all line combos for intersection + int num = 0; // number of intersections + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + // Solve for 2x2 Ax=b + T det = cross_2d(vec2[j], vec1[i]); + + // This takes care of parallel lines + if (fabs(det) <= 1e-14) { + continue; + } + + auto vec12 = pts2[j] - pts1[i]; + + T t1 = cross_2d(vec2[j], vec12) / det; + T t2 = cross_2d(vec1[i], vec12) / det; + + if (t1 > -EPS && t1 < 1.0f + EPS && t2 > -EPS && t2 < 1.0f + EPS) { + intersections[num++] = pts1[i] + vec1[i] * t1; + } + } + } + + // Check for vertices of rect1 inside rect2 + { + const auto& AB = vec2[0]; + const auto& DA = vec2[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + // assume ABCD is the rectangle, and P is the point to be judged + // P is inside ABCD iff. P's projection on AB lies within AB + // and P's projection on AD lies within AD + + auto AP = pts1[i] - pts2[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) && + (APdotAD < ADdotAD + EPS)) { + intersections[num++] = pts1[i]; + } + } + } + + // Reverse the check - check for vertices of rect2 inside rect1 + { + const auto& AB = vec1[0]; + const auto& DA = vec1[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + auto AP = pts2[i] - pts1[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) && + (APdotAD < ADdotAD + EPS)) { + intersections[num++] = pts2[i]; + } + } + } + + return num; +} + +template +HOST_DEVICE_INLINE int convex_hull_graham( + const Point (&p)[24], + const int& num_in, + Point (&q)[24], + bool shift_to_zero = false) { + assert(num_in >= 2); + + // Step 1: + // Find point with minimum y + // if more than 1 points have the same minimum y, + // pick the one with the minimum x. + int t = 0; + for (int i = 1; i < num_in; i++) { + if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) { + t = i; + } + } + auto& start = p[t]; // starting point + + // Step 2: + // Subtract starting point from every points (for sorting in the next step) + for (int i = 0; i < num_in; i++) { + q[i] = p[i] - start; + } + + // Swap the starting point to position 0 + auto tmp = q[0]; + q[0] = q[t]; + q[t] = tmp; + + // Step 3: + // Sort point 1 ~ num_in according to their relative cross-product values + // (essentially sorting according to angles) + // If the angles are the same, sort according to their distance to origin + T dist[24]; +#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1 + // compute distance to origin before sort, and sort them together with the + // points + for (int i = 0; i < num_in; i++) { + dist[i] = dot_2d(q[i], q[i]); + } + + // CUDA version + // In the future, we can potentially use thrust + // for sorting here to improve speed (though not guaranteed) + for (int i = 1; i < num_in - 1; i++) { + for (int j = i + 1; j < num_in; j++) { + T crossProduct = cross_2d(q[i], q[j]); + if ((crossProduct < -1e-6) || + (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) { + auto q_tmp = q[i]; + q[i] = q[j]; + q[j] = q_tmp; + auto dist_tmp = dist[i]; + dist[i] = dist[j]; + dist[j] = dist_tmp; + } + } + } +#else + // CPU version + std::sort( + q + 1, q + num_in, [](const Point& A, const Point& B) -> bool { + T temp = cross_2d(A, B); + if (fabs(temp) < 1e-6) { + return dot_2d(A, A) < dot_2d(B, B); + } else { + return temp > 0; + } + }); + // compute distance to origin after sort, since the points are now different. + for (int i = 0; i < num_in; i++) { + dist[i] = dot_2d(q[i], q[i]); + } +#endif + + // Step 4: + // Make sure there are at least 2 points (that don't overlap with each other) + // in the stack + int k; // index of the non-overlapped second point + for (k = 1; k < num_in; k++) { + if (dist[k] > 1e-8) { + break; + } + } + if (k == num_in) { + // We reach the end, which means the convex hull is just one point + q[0] = p[t]; + return 1; + } + q[1] = q[k]; + int m = 2; // 2 points in the stack + // Step 5: + // Finally we can start the scanning process. + // When a non-convex relationship between the 3 points is found + // (either concave shape or duplicated points), + // we pop the previous point from the stack + // until the 3-point relationship is convex again, or + // until the stack only contains two points + for (int i = k + 1; i < num_in; i++) { + while (m > 1) { + auto q1 = q[i] - q[m - 2], q2 = q[m - 1] - q[m - 2]; + // cross_2d() uses FMA and therefore computes round(round(q1.x*q2.y) - + // q2.x*q1.y) So it may not return 0 even when q1==q2. Therefore we + // compare round(q1.x*q2.y) and round(q2.x*q1.y) directly. (round means + // round to nearest floating point). + if (q1.x * q2.y >= q2.x * q1.y) + m--; + else + break; + } + // Using double also helps, but float can solve the issue for now. + // while (m > 1 && cross_2d(q[i] - q[m - 2], q[m - 1] - q[m - 2]) + // >= 0) { + // m--; + // } + q[m++] = q[i]; + } + + // Step 6 (Optional): + // In general sense we need the original coordinates, so we + // need to shift the points back (reverting Step 2) + // But if we're only interested in getting the area/perimeter of the shape + // We can simply return. + if (!shift_to_zero) { + for (int i = 0; i < m; i++) { + q[i] += start; + } + } + + return m; +} + +template +HOST_DEVICE_INLINE T polygon_area(const Point (&q)[24], const int& m) { + if (m <= 2) { + return 0; + } + + T area = 0; + for (int i = 1; i < m - 1; i++) { + area += fabs(cross_2d(q[i] - q[0], q[i + 1] - q[0])); + } + + return area / 2.0; +} + +template +HOST_DEVICE_INLINE T rotated_boxes_intersection( + const RotatedBox& box1, + const RotatedBox& box2) { + // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned + // from rotated_rect_intersection_pts + Point intersectPts[24], orderedPts[24]; + + Point pts1[4]; + Point pts2[4]; + get_rotated_vertices(box1, pts1); + get_rotated_vertices(box2, pts2); + + int num = get_intersection_points(pts1, pts2, intersectPts); + + if (num <= 2) { + return 0.0; + } + + // Convex Hull to order the intersection points in clockwise order and find + // the contour area. + int num_convex = convex_hull_graham(intersectPts, num, orderedPts, true); + return polygon_area(orderedPts, num_convex); +} + +} // namespace + +template +HOST_DEVICE_INLINE T +single_box_iou_rotated(T const* const box1_raw, T const* const box2_raw) { + // shift center to the middle point to achieve higher precision in result + RotatedBox box1, box2; + auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0; + auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0; + box1.x_ctr = box1_raw[0] - center_shift_x; + box1.y_ctr = box1_raw[1] - center_shift_y; + box1.w = box1_raw[2]; + box1.h = box1_raw[3]; + box1.a = box1_raw[4]; + box2.x_ctr = box2_raw[0] - center_shift_x; + box2.y_ctr = box2_raw[1] - center_shift_y; + box2.w = box2_raw[2]; + box2.h = box2_raw[3]; + box2.a = box2_raw[4]; + + T area1 = box1.w * box1.h; + T area2 = box2.w * box2.h; + if (area1 < 1e-14 || area2 < 1e-14) { + return 0.f; + } + + T intersection = rotated_boxes_intersection(box1, box2); + T iou = intersection / (area1 + area2 - intersection); + return iou; +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/cocoeval/cocoeval.cpp b/data_processing/detectron2/detectron2/layers/csrc/cocoeval/cocoeval.cpp new file mode 100644 index 0000000..0a5b7b9 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/cocoeval/cocoeval.cpp @@ -0,0 +1,507 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include "cocoeval.h" +#include +#include +#include +#include + +using namespace pybind11::literals; + +namespace detectron2 { + +namespace COCOeval { + +// Sort detections from highest score to lowest, such that +// detection_instances[detection_sorted_indices[t]] >= +// detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match +// original COCO API +void SortInstancesByDetectionScore( + const std::vector& detection_instances, + std::vector* detection_sorted_indices) { + detection_sorted_indices->resize(detection_instances.size()); + std::iota( + detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); + std::stable_sort( + detection_sorted_indices->begin(), + detection_sorted_indices->end(), + [&detection_instances](size_t j1, size_t j2) { + return detection_instances[j1].score > detection_instances[j2].score; + }); +} + +// Partition the ground truth objects based on whether or not to ignore them +// based on area +void SortInstancesByIgnore( + const std::array& area_range, + const std::vector& ground_truth_instances, + std::vector* ground_truth_sorted_indices, + std::vector* ignores) { + ignores->clear(); + ignores->reserve(ground_truth_instances.size()); + for (auto o : ground_truth_instances) { + ignores->push_back( + o.ignore || o.area < area_range[0] || o.area > area_range[1]); + } + + ground_truth_sorted_indices->resize(ground_truth_instances.size()); + std::iota( + ground_truth_sorted_indices->begin(), + ground_truth_sorted_indices->end(), + 0); + std::stable_sort( + ground_truth_sorted_indices->begin(), + ground_truth_sorted_indices->end(), + [&ignores](size_t j1, size_t j2) { + return (int)(*ignores)[j1] < (int)(*ignores)[j2]; + }); +} + +// For each IOU threshold, greedily match each detected instance to a ground +// truth instance (if possible) and store the results +void MatchDetectionsToGroundTruth( + const std::vector& detection_instances, + const std::vector& detection_sorted_indices, + const std::vector& ground_truth_instances, + const std::vector& ground_truth_sorted_indices, + const std::vector& ignores, + const std::vector>& ious, + const std::vector& iou_thresholds, + const std::array& area_range, + ImageEvaluation* results) { + // Initialize memory to store return data matches and ignore + const int num_iou_thresholds = iou_thresholds.size(); + const int num_ground_truth = ground_truth_sorted_indices.size(); + const int num_detections = detection_sorted_indices.size(); + std::vector ground_truth_matches( + num_iou_thresholds * num_ground_truth, 0); + std::vector& detection_matches = results->detection_matches; + std::vector& detection_ignores = results->detection_ignores; + std::vector& ground_truth_ignores = results->ground_truth_ignores; + detection_matches.resize(num_iou_thresholds * num_detections, 0); + detection_ignores.resize(num_iou_thresholds * num_detections, false); + ground_truth_ignores.resize(num_ground_truth); + for (auto g = 0; g < num_ground_truth; ++g) { + ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]]; + } + + for (auto t = 0; t < num_iou_thresholds; ++t) { + for (auto d = 0; d < num_detections; ++d) { + // information about best match so far (match=-1 -> unmatched) + double best_iou = std::min(iou_thresholds[t], 1 - 1e-10); + int match = -1; + for (auto g = 0; g < num_ground_truth; ++g) { + // if this ground truth instance is already matched and not a + // crowd, it cannot be matched to another detection + if (ground_truth_matches[t * num_ground_truth + g] > 0 && + !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) { + continue; + } + + // if detected instance matched to a regular ground truth + // instance, we can break on the first ground truth instance + // tagged as ignore (because they are sorted by the ignore tag) + if (match >= 0 && !ground_truth_ignores[match] && + ground_truth_ignores[g]) { + break; + } + + // if IOU overlap is the best so far, store the match appropriately + if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) { + best_iou = ious[d][ground_truth_sorted_indices[g]]; + match = g; + } + } + // if match was made, store id of match for both detection and + // ground truth + if (match >= 0) { + detection_ignores[t * num_detections + d] = ground_truth_ignores[match]; + detection_matches[t * num_detections + d] = + ground_truth_instances[ground_truth_sorted_indices[match]].id; + ground_truth_matches[t * num_ground_truth + match] = + detection_instances[detection_sorted_indices[d]].id; + } + + // set unmatched detections outside of area range to ignore + const InstanceAnnotation& detection = + detection_instances[detection_sorted_indices[d]]; + detection_ignores[t * num_detections + d] = + detection_ignores[t * num_detections + d] || + (detection_matches[t * num_detections + d] == 0 && + (detection.area < area_range[0] || detection.area > area_range[1])); + } + } + + // store detection score results + results->detection_scores.resize(detection_sorted_indices.size()); + for (size_t d = 0; d < detection_sorted_indices.size(); ++d) { + results->detection_scores[d] = + detection_instances[detection_sorted_indices[d]].score; + } +} + +std::vector EvaluateImages( + const std::vector>& area_ranges, + int max_detections, + const std::vector& iou_thresholds, + const ImageCategoryInstances>& image_category_ious, + const ImageCategoryInstances& + image_category_ground_truth_instances, + const ImageCategoryInstances& + image_category_detection_instances) { + const int num_area_ranges = area_ranges.size(); + const int num_images = image_category_ground_truth_instances.size(); + const int num_categories = + image_category_ious.size() > 0 ? image_category_ious[0].size() : 0; + std::vector detection_sorted_indices; + std::vector ground_truth_sorted_indices; + std::vector ignores; + std::vector results_all( + num_images * num_area_ranges * num_categories); + + // Store results for each image, category, and area range combination. Results + // for each IOU threshold are packed into the same ImageEvaluation object + for (auto i = 0; i < num_images; ++i) { + for (auto c = 0; c < num_categories; ++c) { + const std::vector& ground_truth_instances = + image_category_ground_truth_instances[i][c]; + const std::vector& detection_instances = + image_category_detection_instances[i][c]; + + SortInstancesByDetectionScore( + detection_instances, &detection_sorted_indices); + if ((int)detection_sorted_indices.size() > max_detections) { + detection_sorted_indices.resize(max_detections); + } + + for (size_t a = 0; a < area_ranges.size(); ++a) { + SortInstancesByIgnore( + area_ranges[a], + ground_truth_instances, + &ground_truth_sorted_indices, + &ignores); + + MatchDetectionsToGroundTruth( + detection_instances, + detection_sorted_indices, + ground_truth_instances, + ground_truth_sorted_indices, + ignores, + image_category_ious[i][c], + iou_thresholds, + area_ranges[a], + &results_all + [c * num_area_ranges * num_images + a * num_images + i]); + } + } + } + + return results_all; +} + +// Convert a python list to a vector +template +std::vector list_to_vec(const py::list& l) { + std::vector v(py::len(l)); + for (int i = 0; i < (int)py::len(l); ++i) { + v[i] = l[i].cast(); + } + return v; +} + +// Helper function to Accumulate() +// Considers the evaluation results applicable to a particular category, area +// range, and max_detections parameter setting, which begin at +// evaluations[evaluation_index]. Extracts a sorted list of length n of all +// applicable detection instances concatenated across all images in the dataset, +// which are represented by the outputs evaluation_indices, detection_scores, +// image_detection_indices, and detection_sorted_indices--all of which are +// length n. evaluation_indices[i] stores the applicable index into +// evaluations[] for instance i, which has detection score detection_score[i], +// and is the image_detection_indices[i]'th of the list of detections +// for the image containing i. detection_sorted_indices[] defines a sorted +// permutation of the 3 other outputs +int BuildSortedDetectionList( + const std::vector& evaluations, + const int64_t evaluation_index, + const int64_t num_images, + const int max_detections, + std::vector* evaluation_indices, + std::vector* detection_scores, + std::vector* detection_sorted_indices, + std::vector* image_detection_indices) { + assert(evaluations.size() >= evaluation_index + num_images); + + // Extract a list of object instances of the applicable category, area + // range, and max detections requirements such that they can be sorted + image_detection_indices->clear(); + evaluation_indices->clear(); + detection_scores->clear(); + image_detection_indices->reserve(num_images * max_detections); + evaluation_indices->reserve(num_images * max_detections); + detection_scores->reserve(num_images * max_detections); + int num_valid_ground_truth = 0; + for (auto i = 0; i < num_images; ++i) { + const ImageEvaluation& evaluation = evaluations[evaluation_index + i]; + + for (int d = 0; + d < (int)evaluation.detection_scores.size() && d < max_detections; + ++d) { // detected instances + evaluation_indices->push_back(evaluation_index + i); + image_detection_indices->push_back(d); + detection_scores->push_back(evaluation.detection_scores[d]); + } + for (auto ground_truth_ignore : evaluation.ground_truth_ignores) { + if (!ground_truth_ignore) { + ++num_valid_ground_truth; + } + } + } + + // Sort detections by decreasing score, using stable sort to match + // python implementation + detection_sorted_indices->resize(detection_scores->size()); + std::iota( + detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); + std::stable_sort( + detection_sorted_indices->begin(), + detection_sorted_indices->end(), + [&detection_scores](size_t j1, size_t j2) { + return (*detection_scores)[j1] > (*detection_scores)[j2]; + }); + + return num_valid_ground_truth; +} + +// Helper function to Accumulate() +// Compute a precision recall curve given a sorted list of detected instances +// encoded in evaluations, evaluation_indices, detection_scores, +// detection_sorted_indices, image_detection_indices (see +// BuildSortedDetectionList()). Using vectors precisions and recalls +// and temporary storage, output the results into precisions_out, recalls_out, +// and scores_out, which are large buffers containing many precion/recall curves +// for all possible parameter settings, with precisions_out_index and +// recalls_out_index defining the applicable indices to store results. +void ComputePrecisionRecallCurve( + const int64_t precisions_out_index, + const int64_t precisions_out_stride, + const int64_t recalls_out_index, + const std::vector& recall_thresholds, + const int iou_threshold_index, + const int num_iou_thresholds, + const int num_valid_ground_truth, + const std::vector& evaluations, + const std::vector& evaluation_indices, + const std::vector& detection_scores, + const std::vector& detection_sorted_indices, + const std::vector& image_detection_indices, + std::vector* precisions, + std::vector* recalls, + std::vector* precisions_out, + std::vector* scores_out, + std::vector* recalls_out) { + assert(recalls_out->size() > recalls_out_index); + + // Compute precision/recall for each instance in the sorted list of detections + int64_t true_positives_sum = 0, false_positives_sum = 0; + precisions->clear(); + recalls->clear(); + precisions->reserve(detection_sorted_indices.size()); + recalls->reserve(detection_sorted_indices.size()); + assert(!evaluations.empty() || detection_sorted_indices.empty()); + for (auto detection_sorted_index : detection_sorted_indices) { + const ImageEvaluation& evaluation = + evaluations[evaluation_indices[detection_sorted_index]]; + const auto num_detections = + evaluation.detection_matches.size() / num_iou_thresholds; + const auto detection_index = iou_threshold_index * num_detections + + image_detection_indices[detection_sorted_index]; + assert(evaluation.detection_matches.size() > detection_index); + assert(evaluation.detection_ignores.size() > detection_index); + const int64_t detection_match = + evaluation.detection_matches[detection_index]; + const bool detection_ignores = + evaluation.detection_ignores[detection_index]; + const auto true_positive = detection_match > 0 && !detection_ignores; + const auto false_positive = detection_match == 0 && !detection_ignores; + if (true_positive) { + ++true_positives_sum; + } + if (false_positive) { + ++false_positives_sum; + } + + const double recall = + static_cast(true_positives_sum) / num_valid_ground_truth; + recalls->push_back(recall); + const int64_t num_valid_detections = + true_positives_sum + false_positives_sum; + const double precision = num_valid_detections > 0 + ? static_cast(true_positives_sum) / num_valid_detections + : 0.0; + precisions->push_back(precision); + } + + (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0; + + for (int64_t i = static_cast(precisions->size()) - 1; i > 0; --i) { + if ((*precisions)[i] > (*precisions)[i - 1]) { + (*precisions)[i - 1] = (*precisions)[i]; + } + } + + // Sample the per instance precision/recall list at each recall threshold + for (size_t r = 0; r < recall_thresholds.size(); ++r) { + // first index in recalls >= recall_thresholds[r] + std::vector::iterator low = std::lower_bound( + recalls->begin(), recalls->end(), recall_thresholds[r]); + size_t precisions_index = low - recalls->begin(); + + const auto results_ind = precisions_out_index + r * precisions_out_stride; + assert(results_ind < precisions_out->size()); + assert(results_ind < scores_out->size()); + if (precisions_index < precisions->size()) { + (*precisions_out)[results_ind] = (*precisions)[precisions_index]; + (*scores_out)[results_ind] = + detection_scores[detection_sorted_indices[precisions_index]]; + } else { + (*precisions_out)[results_ind] = 0; + (*scores_out)[results_ind] = 0; + } + } +} +py::dict Accumulate( + const py::object& params, + const std::vector& evaluations) { + const std::vector recall_thresholds = + list_to_vec(params.attr("recThrs")); + const std::vector max_detections = + list_to_vec(params.attr("maxDets")); + const int num_iou_thresholds = py::len(params.attr("iouThrs")); + const int num_recall_thresholds = py::len(params.attr("recThrs")); + const int num_categories = params.attr("useCats").cast() == 1 + ? py::len(params.attr("catIds")) + : 1; + const int num_area_ranges = py::len(params.attr("areaRng")); + const int num_max_detections = py::len(params.attr("maxDets")); + const int num_images = py::len(params.attr("imgIds")); + + std::vector precisions_out( + num_iou_thresholds * num_recall_thresholds * num_categories * + num_area_ranges * num_max_detections, + -1); + std::vector recalls_out( + num_iou_thresholds * num_categories * num_area_ranges * + num_max_detections, + -1); + std::vector scores_out( + num_iou_thresholds * num_recall_thresholds * num_categories * + num_area_ranges * num_max_detections, + -1); + + // Consider the list of all detected instances in the entire dataset in one + // large list. evaluation_indices, detection_scores, + // image_detection_indices, and detection_sorted_indices all have the same + // length as this list, such that each entry corresponds to one detected + // instance + std::vector evaluation_indices; // indices into evaluations[] + std::vector detection_scores; // detection scores of each instance + std::vector detection_sorted_indices; // sorted indices of all + // instances in the dataset + std::vector + image_detection_indices; // indices into the list of detected instances in + // the same image as each instance + std::vector precisions, recalls; + + for (auto c = 0; c < num_categories; ++c) { + for (auto a = 0; a < num_area_ranges; ++a) { + for (auto m = 0; m < num_max_detections; ++m) { + // The COCO PythonAPI assumes evaluations[] (the return value of + // COCOeval::EvaluateImages() is one long list storing results for each + // combination of category, area range, and image id, with categories in + // the outermost loop and images in the innermost loop. + const int64_t evaluations_index = + c * num_area_ranges * num_images + a * num_images; + int num_valid_ground_truth = BuildSortedDetectionList( + evaluations, + evaluations_index, + num_images, + max_detections[m], + &evaluation_indices, + &detection_scores, + &detection_sorted_indices, + &image_detection_indices); + + if (num_valid_ground_truth == 0) { + continue; + } + + for (auto t = 0; t < num_iou_thresholds; ++t) { + // recalls_out is a flattened vectors representing a + // num_iou_thresholds X num_categories X num_area_ranges X + // num_max_detections matrix + const int64_t recalls_out_index = + t * num_categories * num_area_ranges * num_max_detections + + c * num_area_ranges * num_max_detections + + a * num_max_detections + m; + + // precisions_out and scores_out are flattened vectors + // representing a num_iou_thresholds X num_recall_thresholds X + // num_categories X num_area_ranges X num_max_detections matrix + const int64_t precisions_out_stride = + num_categories * num_area_ranges * num_max_detections; + const int64_t precisions_out_index = t * num_recall_thresholds * + num_categories * num_area_ranges * num_max_detections + + c * num_area_ranges * num_max_detections + + a * num_max_detections + m; + + ComputePrecisionRecallCurve( + precisions_out_index, + precisions_out_stride, + recalls_out_index, + recall_thresholds, + t, + num_iou_thresholds, + num_valid_ground_truth, + evaluations, + evaluation_indices, + detection_scores, + detection_sorted_indices, + image_detection_indices, + &precisions, + &recalls, + &precisions_out, + &scores_out, + &recalls_out); + } + } + } + } + + time_t rawtime; + struct tm local_time; + std::array buffer; + time(&rawtime); +#ifdef _WIN32 + localtime_s(&local_time, &rawtime); +#else + localtime_r(&rawtime, &local_time); +#endif + strftime( + buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time); + return py::dict( + "params"_a = params, + "counts"_a = std::vector( + {num_iou_thresholds, + num_recall_thresholds, + num_categories, + num_area_ranges, + num_max_detections}), + "date"_a = buffer, + "precision"_a = precisions_out, + "recall"_a = recalls_out, + "scores"_a = scores_out); +} + +} // namespace COCOeval + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/cocoeval/cocoeval.h b/data_processing/detectron2/detectron2/layers/csrc/cocoeval/cocoeval.h new file mode 100644 index 0000000..db246e4 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/cocoeval/cocoeval.h @@ -0,0 +1,88 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once + +#include +#include +#include +#include +#include + +namespace py = pybind11; + +namespace detectron2 { + +namespace COCOeval { + +// Annotation data for a single object instance in an image +struct InstanceAnnotation { + InstanceAnnotation( + uint64_t id, + double score, + double area, + bool is_crowd, + bool ignore) + : id{id}, score{score}, area{area}, is_crowd{is_crowd}, ignore{ignore} {} + uint64_t id; + double score = 0.; + double area = 0.; + bool is_crowd = false; + bool ignore = false; +}; + +// Stores intermediate results for evaluating detection results for a single +// image that has D detected instances and G ground truth instances. This stores +// matches between detected and ground truth instances +struct ImageEvaluation { + // For each of the D detected instances, the id of the matched ground truth + // instance, or 0 if unmatched + std::vector detection_matches; + + // The detection score of each of the D detected instances + std::vector detection_scores; + + // Marks whether or not each of G instances was ignored from evaluation (e.g., + // because it's outside area_range) + std::vector ground_truth_ignores; + + // Marks whether or not each of D instances was ignored from evaluation (e.g., + // because it's outside aRng) + std::vector detection_ignores; +}; + +template +using ImageCategoryInstances = std::vector>>; + +// C++ implementation of COCO API cocoeval.py::COCOeval.evaluateImg(). For each +// combination of image, category, area range settings, and IOU thresholds to +// evaluate, it matches detected instances to ground truth instances and stores +// the results into a vector of ImageEvaluation results, which will be +// interpreted by the COCOeval::Accumulate() function to produce precion-recall +// curves. The parameters of nested vectors have the following semantics: +// image_category_ious[i][c][d][g] is the intersection over union of the d'th +// detected instance and g'th ground truth instance of +// category category_ids[c] in image image_ids[i] +// image_category_ground_truth_instances[i][c] is a vector of ground truth +// instances in image image_ids[i] of category category_ids[c] +// image_category_detection_instances[i][c] is a vector of detected +// instances in image image_ids[i] of category category_ids[c] +std::vector EvaluateImages( + const std::vector>& area_ranges, // vector of 2-tuples + int max_detections, + const std::vector& iou_thresholds, + const ImageCategoryInstances>& image_category_ious, + const ImageCategoryInstances& + image_category_ground_truth_instances, + const ImageCategoryInstances& + image_category_detection_instances); + +// C++ implementation of COCOeval.accumulate(), which generates precision +// recall curves for each set of category, IOU threshold, detection area range, +// and max number of detections parameters. It is assumed that the parameter +// evaluations is the return value of the functon COCOeval::EvaluateImages(), +// which was called with the same parameter settings params +py::dict Accumulate( + const py::object& params, + const std::vector& evalutations); + +} // namespace COCOeval +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/cuda_version.cu b/data_processing/detectron2/detectron2/layers/csrc/cuda_version.cu new file mode 100644 index 0000000..6dfe1b9 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/cuda_version.cu @@ -0,0 +1,26 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#include + +namespace detectron2 { +int get_cudart_version() { +// Not a ROCM platform: Either HIP is not used, or +// it is used, but platform is not ROCM (i.e. it is CUDA) +#if !defined(__HIP_PLATFORM_HCC__) + return CUDART_VERSION; +#else + int version = 0; + +#if HIP_VERSION_MAJOR != 0 + // Create a convention similar to that of CUDA, as assumed by other + // parts of the code. + + version = HIP_VERSION_MINOR; + version += (HIP_VERSION_MAJOR * 100); +#else + hipRuntimeGetVersion(&version); +#endif + return version; +#endif +} +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/deformable/deform_conv.h b/data_processing/detectron2/detectron2/layers/csrc/deformable/deform_conv.h new file mode 100644 index 0000000..965c1bf --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/deformable/deform_conv.h @@ -0,0 +1,377 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +#if defined(WITH_CUDA) || defined(WITH_HIP) +int deform_conv_forward_cuda( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step); + +int deform_conv_backward_input_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step); + +int deform_conv_backward_parameters_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step); + +void modulated_deform_conv_cuda_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias); + +void modulated_deform_conv_cuda_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias); + +#endif + +inline int deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + if (input.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_forward_cuda( + input, + weight, + offset, + output, + columns, + ones, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + im2col_step); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline int deform_conv_backward_input( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + if (gradOutput.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_backward_input_cuda( + input, + offset, + gradOutput, + gradInput, + gradOffset, + weight, + columns, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + im2col_step); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline int deform_conv_backward_filter( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step) { + if (gradOutput.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_backward_parameters_cuda( + input, + offset, + gradOutput, + gradWeight, + columns, + ones, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + scale, + im2col_step); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline void modulated_deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias) { + if (input.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(bias.is_cuda(), "bias tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return modulated_deform_conv_cuda_forward( + input, + weight, + bias, + ones, + offset, + mask, + output, + columns, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group, + with_bias); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline void modulated_deform_conv_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias) { + if (grad_output.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(bias.is_cuda(), "bias tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return modulated_deform_conv_cuda_backward( + input, + weight, + bias, + ones, + offset, + mask, + columns, + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group, + with_bias); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda.cu b/data_processing/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda.cu new file mode 100644 index 0000000..2072bb8 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda.cu @@ -0,0 +1,1223 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda.cpp +// Original license: Apache 2.0 + +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda.c +// Original license: Apache 2.0 + +#include + +#include "deform_conv.h" + +#include +#include + +namespace detectron2 { + +void deformable_im2col( + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor data_col); + +void deformable_col2im( + const at::Tensor data_col, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_im); + +void deformable_col2im_coord( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_offset); + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor data_col); + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_im); + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, + at::Tensor grad_mask); + +void shape_check( + at::Tensor input, + at::Tensor offset, + at::Tensor* gradOutput, + at::Tensor weight, + int kH, + int kW, + int dH, + int dW, + int padH, + int padW, + int dilationH, + int dilationW, + int group, + int deformable_group) { + TORCH_CHECK( + weight.ndimension() == 4, + "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, " + "but got: %s", + weight.ndimension()); + + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + TORCH_CHECK( + kW > 0 && kH > 0, + "kernel size should be greater than zero, but got kH: %d kW: %d", + kH, + kW); + + TORCH_CHECK( + (weight.size(2) == kH && weight.size(3) == kW), + "kernel size should be consistent with weight, ", + "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", + kH, + kW, + weight.size(2), + weight.size(3)); + + TORCH_CHECK( + dW > 0 && dH > 0, + "stride should be greater than zero, but got dH: %d dW: %d", + dH, + dW); + + TORCH_CHECK( + dilationW > 0 && dilationH > 0, + "dilation should be greater than 0, but got dilationH: %d dilationW: %d", + dilationH, + dilationW); + + int ndim = input.ndimension(); + int dimf = 0; + int dimh = 1; + int dimw = 2; + + if (ndim == 4) { + dimf++; + dimh++; + dimw++; + } + + TORCH_CHECK( + ndim == 3 || ndim == 4, + "3D or 4D input tensor expected but got: %s", + ndim); + + long nInputPlane = weight.size(1) * group; + long inputHeight = input.size(dimh); + long inputWidth = input.size(dimw); + long nOutputPlane = weight.size(0); + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + + TORCH_CHECK( + nInputPlane % deformable_group == 0, + "input channels must divide deformable group size"); + + if (outputWidth < 1 || outputHeight < 1) + AT_ERROR( + "Given input size: (%ld x %ld x %ld). " + "Calculated output size: (%ld x %ld x %ld). Output size is too small", + nInputPlane, + inputHeight, + inputWidth, + nOutputPlane, + outputHeight, + outputWidth); + + TORCH_CHECK( + input.size(1) == nInputPlane, + "invalid number of input planes, expected: %d, but got: %d", + nInputPlane, + input.size(1)); + + TORCH_CHECK( + (inputHeight + 2 * padH >= kH && inputWidth + 2 * padW >= kW), + "input image is smaller than kernel"); + + TORCH_CHECK( + (offset.size(2) == outputHeight && offset.size(3) == outputWidth), + "invalid spatial size of offset, expected height: %d width: %d, but " + "got height: %d width: %d", + outputHeight, + outputWidth, + offset.size(2), + offset.size(3)); + + TORCH_CHECK( + (offset.size(1) == deformable_group * 2 * kH * kW), + "invalid number of channels of offset"); + + if (gradOutput != NULL) { + TORCH_CHECK( + gradOutput->size(dimf) == nOutputPlane, + "invalid number of gradOutput planes, expected: %d, but got: %d", + nOutputPlane, + gradOutput->size(dimf)); + + TORCH_CHECK( + (gradOutput->size(dimh) == outputHeight && + gradOutput->size(dimw) == outputWidth), + "invalid size of gradOutput, expected height: %d width: %d , but " + "got height: %d width: %d", + outputHeight, + outputWidth, + gradOutput->size(dimh), + gradOutput->size(dimw)); + } +} + +int deform_conv_forward_cuda( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + // todo: resize columns to include im2col: done + // todo: add im2col_step as input + // todo: add new output buffer and transpose it to output (or directly + // transpose output) todo: possibly change data indexing because of + // parallel_imgs + + shape_check( + input, + offset, + NULL, + weight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input.unsqueeze_(0); + offset.unsqueeze_(0); + } + + // todo: assert batchsize dividable by im2col_step + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + output = output.view( + {batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < outputHeight * outputWidth) { + ones = at::ones({outputHeight, outputWidth}, input.options()); + } + + input = input.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + offset = offset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + at::Tensor output_buffer = at::zeros( + {batchSize / im2col_step, + nOutputPlane, + im2col_step * outputHeight, + outputWidth}, + output.options()); + + output_buffer = output_buffer.view( + {output_buffer.size(0), + group, + output_buffer.size(1) / group, + output_buffer.size(2), + output_buffer.size(3)}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col( + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + + for (int g = 0; g < group; g++) { + output_buffer[elt][g] = output_buffer[elt][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output_buffer[elt][g]); + } + } + + output_buffer = output_buffer.view( + {output_buffer.size(0), + output_buffer.size(1) * output_buffer.size(2), + output_buffer.size(3), + output_buffer.size(4)}); + + output_buffer = output_buffer.view( + {batchSize / im2col_step, + nOutputPlane, + im2col_step, + outputHeight, + outputWidth}); + output_buffer.transpose_(1, 2); + output.copy_(output_buffer); + output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + output = output.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_input_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + shape_check( + input, + offset, + &gradOutput, + weight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view({1, input.size(0), input.size(1), input.size(2)}); + offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)}); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset"); + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + // change order of grad output + gradOutput = gradOutput.view( + {batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + gradOutput.transpose_(1, 2); + + gradInput = gradInput.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + input = input.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + gradOffset = gradOffset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + offset = offset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + // divide into groups + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), + group, + gradOutput.size(1) / group, + gradOutput.size(2), + gradOutput.size(3), + gradOutput.size(4)}); + + for (int g = 0; g < group; g++) { + columns[g] = columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), + gradOutput[elt][g].flatten(1), + 0.0f, + 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), + gradOutput.size(1) * gradOutput.size(2), + gradOutput.size(3), + gradOutput.size(4), + gradOutput.size(5)}); + + deformable_col2im_coord( + columns, + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + gradOffset[elt]); + + deformable_col2im( + columns, + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + gradInput[elt]); + } + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + gradOffset = gradOffset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + gradOffset = + gradOffset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_parameters_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step) { + // todo: transpose and reshape outGrad + // todo: reshape columns + // todo: add im2col_step as input + + shape_check( + input, + offset, + &gradOutput, + gradWeight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view( + at::IntList({1, input.size(0), input.size(1), input.size(2)})); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = gradWeight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + gradOutput = gradOutput.view( + {batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + gradOutput.transpose_(1, 2); + + at::Tensor gradOutputBuffer = at::zeros_like(gradOutput); + gradOutputBuffer = gradOutputBuffer.view( + {batchSize / im2col_step, + nOutputPlane, + im2col_step, + outputHeight, + outputWidth}); + gradOutputBuffer.copy_(gradOutput); + // gradOutput is not contiguous, so we do reshape (instead of view) next + gradOutputBuffer = gradOutputBuffer.reshape( + {batchSize / im2col_step, + nOutputPlane, + im2col_step * outputHeight, + outputWidth}); + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + offset = offset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col( + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + columns); + + // divide into group + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + group, + gradOutputBuffer.size(1) / group, + gradOutputBuffer.size(2), + gradOutputBuffer.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + gradWeight = gradWeight.view( + {group, + gradWeight.size(0) / group, + gradWeight.size(1), + gradWeight.size(2), + gradWeight.size(3)}); + + for (int g = 0; g < group; g++) { + gradWeight[g] = gradWeight[g] + .flatten(1) + .addmm_( + gradOutputBuffer[elt][g].flatten(1), + columns[g].transpose(1, 0), + 1.0, + scale) + .view_as(gradWeight[g]); + } + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + gradOutputBuffer.size(1) * gradOutputBuffer.size(2), + gradOutputBuffer.size(3), + gradOutputBuffer.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradWeight = gradWeight.view( + {gradWeight.size(0) * gradWeight.size(1), + gradWeight.size(2), + gradWeight.size(3), + gradWeight.size(4)}); + } + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + } + + return 1; +} + +void modulated_deform_conv_cuda_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias) { + shape_check( + input, + offset, + NULL, + weight, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group); + + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR( + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, + kernel_w, + kernel_h_, + kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR( + "Input shape and kernel channels wont match: (%d vs %d).", + channels, + channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + // mask shape check + TORCH_CHECK( + (mask.size(2) == height_out && mask.size(3) == width_out), + "invalid spatial size of mask, expected height: %d width: %d, but " + "got height: %d width: %d", + height_out, + width_out, + mask.size(2), + mask.size(3)); + + TORCH_CHECK( + (mask.size(1) == deformable_group * kernel_h * kernel_w), + "invalid number of channels of mask"); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + // resize output + output = output.view({batch, channels_out, height_out, width_out}).zero_(); + // resize temporary columns + columns = at::zeros( + {channels * kernel_h * kernel_w, 1 * height_out * width_out}, + input.options()); + + output = output.view( + {output.size(0), + group, + output.size(1) / group, + output.size(2), + output.size(3)}); + + for (int b = 0; b < batch; b++) { + modulated_deformable_im2col_cuda( + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + columns); + + // divide into group + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + + for (int g = 0; g < group; g++) { + output[b][g] = output[b][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output[b][g]); + } + + weight = weight.view( + {weight.size(0) * weight.size(1), + weight.size(2), + weight.size(3), + weight.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + } + + output = output.view( + {output.size(0), + output.size(1) * output.size(2), + output.size(3), + output.size(4)}); + + if (with_bias) { + output += bias.view({1, bias.size(0), 1, 1}); + } +} + +void modulated_deform_conv_cuda_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias) { + shape_check( + input, + offset, + &grad_output, + weight, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group); + + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR( + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, + kernel_w, + kernel_h_, + kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR( + "Input shape and kernel channels wont match: (%d vs %d).", + channels, + channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + // mask shape check + TORCH_CHECK( + (mask.size(2) == height_out && mask.size(3) == width_out), + "invalid spatial size of mask, expected height: %d width: %d, but " + "got height: %d width: %d", + height_out, + width_out, + mask.size(2), + mask.size(3)); + + TORCH_CHECK( + (mask.size(1) == deformable_group * kernel_h * kernel_w), + "invalid number of channels of mask"); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + grad_input = grad_input.view({batch, channels, height, width}); + columns = at::zeros( + {channels * kernel_h * kernel_w, height_out * width_out}, + input.options()); + + grad_output = grad_output.view( + {grad_output.size(0), + group, + grad_output.size(1) / group, + grad_output.size(2), + grad_output.size(3)}); + + for (int b = 0; b < batch; b++) { + // divide int group + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + + for (int g = 0; g < group; g++) { + columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), + grad_output[b][g].flatten(1), + 0.0f, + 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + weight = weight.view( + {weight.size(0) * weight.size(1), + weight.size(2), + weight.size(3), + weight.size(4)}); + + // gradient w.r.t. input coordinate data + modulated_deformable_col2im_coord_cuda( + columns, + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + grad_offset[b], + grad_mask[b]); + // gradient w.r.t. input data + modulated_deformable_col2im_cuda( + columns, + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + grad_input[b]); + + // gradient w.r.t. weight, dWeight should accumulate across the batch and + // group + modulated_deformable_im2col_cuda( + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + grad_weight = grad_weight.view( + {group, + grad_weight.size(0) / group, + grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3)}); + if (with_bias) + grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); + + for (int g = 0; g < group; g++) { + grad_weight[g] = + grad_weight[g] + .flatten(1) + .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) + .view_as(grad_weight[g]); + if (with_bias) { + grad_bias[g] = + grad_bias[g] + .view({-1, 1}) + .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) + .view(-1); + } + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + grad_weight = grad_weight.view( + {grad_weight.size(0) * grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3), + grad_weight.size(4)}); + if (with_bias) + grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); + } + grad_output = grad_output.view( + {grad_output.size(0) * grad_output.size(1), + grad_output.size(2), + grad_output.size(3), + grad_output.size(4)}); +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu b/data_processing/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu new file mode 100644 index 0000000..f299c7a --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu @@ -0,0 +1,1288 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu +// Original license: Apache 2.0 +// clang-format off + +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer ********************* + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +#include +#include +#include +#include +#include +#include + +using namespace at; + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + + +namespace { + +const int CUDA_NUM_THREADS = 1024; +const int kMaxGridNum = 65535; + +inline int GET_BLOCKS(const int N) { + return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); +} + +} + +template +__device__ scalar_t deformable_im2col_bilinear( + const scalar_t* bottom_data, + const int data_width, + const int height, + const int width, + scalar_t h, + scalar_t w) { + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t get_gradient_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int h, + const int w, + const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t get_coordinate_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int height, + const int width, + const scalar_t* im_data, + const int data_width, + const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void deformable_im2col_gpu_kernel( + const int n, + const scalar_t* data_im, + const scalar_t* data_offset, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int num_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* data_col) { + CUDA_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + scalar_t* data_col_ptr = data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * + // height + h_in) * width + w_in; + const scalar_t* data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { + // const scalar_t map_h = i * dilation_h + offset_h; + // const scalar_t map_w = j * dilation_w + offset_w; + // const int cur_height = height - h_in; + // const int cur_width = width - w_in; + // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, + // cur_width, map_h, map_w); + val = deformable_im2col_bilinear( + data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + + +template +__global__ void deformable_col2im_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_im) { + CUDA_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = get_gradient_weight( + cur_inv_h_data, + cur_inv_w_data, + cur_h + dy, + cur_w + dx, + height, + width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + + +template +__global__ void deformable_col2im_coord_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_im, + const scalar_t* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int offset_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_offset) { + CUDA_KERNEL_LOOP(index, n) { + scalar_t val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t* data_col_ptr = data_col + + deformable_group_index * channel_per_deformable_group * batch_size * + width_col * height_col; + const scalar_t* data_im_ptr = data_im + + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { + inv_h = inv_w = -2; + } + const scalar_t weight = get_coordinate_weight( + inv_h, + inv_w, + height, + width, + data_im_ptr + cnt * height * width, + width, + bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + + grad_offset[index] = val; + } +} + + +namespace detectron2 { + +void deformable_im2col( + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor data_col) { + // num_axes should be smaller than block size + // todo: check parallel_imgs is correctly passed in + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + at::cuda::CUDAGuard device_guard(data_im.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "deformable_im2col_gpu", ([&] { + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* data_col_ = data_col.data_ptr(); + + deformable_im2col_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_im_, + data_offset_, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + channels, + deformable_group, + height_col, + width_col, + data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); + } +} + + +void deformable_col2im( + const at::Tensor data_col, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_im) { + // todo: make sure parallel_imgs is passed in correctly + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = + channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* grad_im_ = grad_im.data_ptr(); + + deformable_col2im_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_offset_, + channels, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + deformable_group, + height_col, + width_col, + grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); + } +} + + +void deformable_col2im_coord( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_offset) { + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * + deformable_group * parallel_imgs; + int channel_per_deformable_group = + channels * ksize_h * ksize_w / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* grad_offset_ = grad_offset.data_ptr(); + + deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_im_, + data_offset_, + channels, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + 2 * ksize_h * ksize_w * deformable_group, + deformable_group, + height_col, + width_col, + grad_offset_); + })); +} + +} // namespace detectron2 + + +template +__device__ scalar_t dmcn_im2col_bilinear( + const scalar_t* bottom_data, + const int data_width, + const int height, + const int width, + scalar_t h, + scalar_t w) { + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t dmcn_get_gradient_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int h, + const int w, + const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t dmcn_get_coordinate_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int height, + const int width, + const scalar_t* im_data, + const int data_width, + const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void modulated_deformable_im2col_gpu_kernel( + const int n, + const scalar_t* data_im, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int num_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* data_col) { + CUDA_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + scalar_t* data_col_ptr = data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * + // height + h_in) * width + w_in; + const scalar_t* data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + const scalar_t* data_mask_ptr = data_mask + + (b_col * deformable_group + deformable_group_index) * kernel_h * + kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { + // const float map_h = i * dilation_h + offset_h; + // const float map_w = j * dilation_w + offset_w; + // const int cur_height = height - h_in; + // const int cur_width = width - w_in; + // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, + // cur_width, map_h, map_w); + val = dmcn_im2col_bilinear( + data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val * mask; + data_col_ptr += batch_size * height_col * width_col; + // data_col_ptr += height_col * width_col; + } + } + } +} + +template +__global__ void modulated_deformable_col2im_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_im) { + CUDA_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const scalar_t* data_mask_ptr = data_mask + + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * + height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index] * mask; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = dmcn_get_gradient_weight( + cur_inv_h_data, + cur_inv_w_data, + cur_h + dy, + cur_w + dx, + height, + width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void modulated_deformable_col2im_coord_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_im, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int offset_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_offset, + scalar_t* grad_mask) { + CUDA_KERNEL_LOOP(index, n) { + scalar_t val = 0, mval = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t* data_col_ptr = data_col + + deformable_group_index * channel_per_deformable_group * batch_size * + width_col * height_col; + const scalar_t* data_im_ptr = data_im + + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const scalar_t* data_mask_ptr = data_mask + + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * + height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const int data_mask_hw_ptr = + (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { + inv_h = inv_w = -2; + } else { + mval += data_col_ptr[col_pos] * + dmcn_im2col_bilinear( + data_im_ptr + cnt * height * width, + width, + height, + width, + inv_h, + inv_w); + } + const scalar_t weight = dmcn_get_coordinate_weight( + inv_h, + inv_w, + height, + width, + data_im_ptr + cnt * height * width, + width, + bp_dir); + val += weight * data_col_ptr[col_pos] * mask; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + if (offset_c % 2 == 0) + // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * + // height_col + h) * width_col + w], mask_req, mval); + grad_mask + [(((b * deformable_group + deformable_group_index) * kernel_h * + kernel_w + + offset_c / 2) * + height_col + + h) * + width_col + + w] = mval; + } +} + + +namespace detectron2 { + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor data_col) { + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + + at::cuda::CUDAGuard device_guard(data_im.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* data_col_ = data_col.data_ptr(); + + modulated_deformable_im2col_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_im_, + data_offset_, + data_mask_, + height_im, + width_im, + kernel_h, + kenerl_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + channels, + deformable_group, + height_col, + width_col, + data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_im2col_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_im) { + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = + channels * kernel_h * kernel_w * batch_size * height_col * width_col; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* grad_im_ = grad_im.data_ptr(); + + modulated_deformable_col2im_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_offset_, + data_mask_, + channels, + height_im, + width_im, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + deformable_group, + height_col, + width_col, + grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_col2im_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, + at::Tensor grad_mask) { + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * + kernel_w * deformable_group; + const int channel_per_deformable_group = + channels * kernel_h * kernel_w / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* grad_offset_ = grad_offset.data_ptr(); + scalar_t* grad_mask_ = grad_mask.data_ptr(); + + modulated_deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_im_, + data_offset_, + data_mask_, + channels, + height_im, + width_im, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + 2 * kernel_h * kernel_w * deformable_group, + deformable_group, + height_col, + width_col, + grad_offset_, + grad_mask_); + })); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_col2im_coord_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h b/data_processing/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h new file mode 100644 index 0000000..12aca38 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h @@ -0,0 +1,39 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +at::Tensor nms_rotated_cpu( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold); + +#if defined(WITH_CUDA) || defined(WITH_HIP) +at::Tensor nms_rotated_cuda( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold); +#endif + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +inline at::Tensor nms_rotated( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold) { + assert(dets.device().is_cuda() == scores.device().is_cuda()); + if (dets.device().is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return nms_rotated_cuda( + dets.contiguous(), scores.contiguous(), iou_threshold); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + + return nms_rotated_cpu(dets.contiguous(), scores.contiguous(), iou_threshold); +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp b/data_processing/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp new file mode 100644 index 0000000..d7556e6 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp @@ -0,0 +1,75 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include "../box_iou_rotated/box_iou_rotated_utils.h" +#include "nms_rotated.h" + +namespace detectron2 { + +template +at::Tensor nms_rotated_cpu_kernel( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold) { + // nms_rotated_cpu_kernel is modified from torchvision's nms_cpu_kernel, + // however, the code in this function is much shorter because + // we delegate the IoU computation for rotated boxes to + // the single_box_iou_rotated function in box_iou_rotated_utils.h + AT_ASSERTM(dets.device().is_cpu(), "dets must be a CPU tensor"); + AT_ASSERTM(scores.device().is_cpu(), "scores must be a CPU tensor"); + AT_ASSERTM( + dets.scalar_type() == scores.scalar_type(), + "dets should have the same type as scores"); + + if (dets.numel() == 0) { + return at::empty({0}, dets.options().dtype(at::kLong)); + } + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto ndets = dets.size(0); + at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte)); + at::Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong)); + + auto suppressed = suppressed_t.data_ptr(); + auto keep = keep_t.data_ptr(); + auto order = order_t.data_ptr(); + + int64_t num_to_keep = 0; + + for (int64_t _i = 0; _i < ndets; _i++) { + auto i = order[_i]; + if (suppressed[i] == 1) { + continue; + } + + keep[num_to_keep++] = i; + + for (int64_t _j = _i + 1; _j < ndets; _j++) { + auto j = order[_j]; + if (suppressed[j] == 1) { + continue; + } + + auto ovr = single_box_iou_rotated( + dets[i].data_ptr(), dets[j].data_ptr()); + if (ovr >= iou_threshold) { + suppressed[j] = 1; + } + } + } + return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep); +} + +at::Tensor nms_rotated_cpu( + // input must be contiguous + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold) { + auto result = at::empty({0}, dets.options()); + + AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms_rotated", [&] { + result = nms_rotated_cpu_kernel(dets, scores, iou_threshold); + }); + return result; +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu b/data_processing/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu new file mode 100644 index 0000000..2a3db5c --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu @@ -0,0 +1,145 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include +#include +#include +#ifdef WITH_CUDA +#include "../box_iou_rotated/box_iou_rotated_utils.h" +#endif +// TODO avoid this when pytorch supports "same directory" hipification +#ifdef WITH_HIP +#include "box_iou_rotated/box_iou_rotated_utils.h" +#endif + +using namespace detectron2; + +namespace { +int const threadsPerBlock = sizeof(unsigned long long) * 8; +} + +template +__global__ void nms_rotated_cuda_kernel( + const int n_boxes, + const double iou_threshold, + const T* dev_boxes, + unsigned long long* dev_mask) { + // nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + // Compared to nms_cuda_kernel, where each box is represented with 4 values + // (x1, y1, x2, y2), each rotated box is represented with 5 values + // (x_center, y_center, width, height, angle_degrees) here. + __shared__ T block_boxes[threadsPerBlock * 5]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const T* cur_box = dev_boxes + cur_box_idx * 5; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + // Instead of devIoU used by original horizontal nms, here + // we use the single_box_iou_rotated function from box_iou_rotated_utils.h + if (single_box_iou_rotated(cur_box, block_boxes + i * 5) > + iou_threshold) { + t |= 1ULL << i; + } + } + const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +namespace detectron2 { + +at::Tensor nms_rotated_cuda( + // input must be contiguous + const at::Tensor& dets, + const at::Tensor& scores, + double iou_threshold) { + // using scalar_t = float; + AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor"); + AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); + at::cuda::CUDAGuard device_guard(dets.device()); + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + auto dets_sorted = dets.index_select(0, order_t); + + auto dets_num = dets.size(0); + + const int col_blocks = + at::cuda::ATenCeilDiv(static_cast(dets_num), threadsPerBlock); + + at::Tensor mask = + at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); + + dim3 blocks(col_blocks, col_blocks); + dim3 threads(threadsPerBlock); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES( + dets_sorted.scalar_type(), "nms_rotated_kernel_cuda", [&] { + nms_rotated_cuda_kernel<<>>( + dets_num, + iou_threshold, + dets_sorted.data_ptr(), + (unsigned long long*)mask.data_ptr()); + }); + + at::Tensor mask_cpu = mask.to(at::kCPU); + unsigned long long* mask_host = + (unsigned long long*)mask_cpu.data_ptr(); + + std::vector remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + at::Tensor keep = + at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); + int64_t* keep_out = keep.data_ptr(); + + int num_to_keep = 0; + for (int i = 0; i < dets_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep_out[num_to_keep++] = i; + unsigned long long* p = mask_host + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + + AT_CUDA_CHECK(cudaGetLastError()); + return order_t.index( + {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) + .to(order_t.device(), keep.scalar_type())}); +} + +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/csrc/vision.cpp b/data_processing/detectron2/detectron2/layers/csrc/vision.cpp new file mode 100644 index 0000000..c9a2cd4 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/csrc/vision.cpp @@ -0,0 +1,117 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#include +#include "ROIAlignRotated/ROIAlignRotated.h" +#include "box_iou_rotated/box_iou_rotated.h" +#include "cocoeval/cocoeval.h" +#include "deformable/deform_conv.h" +#include "nms_rotated/nms_rotated.h" + +namespace detectron2 { + +#if defined(WITH_CUDA) || defined(WITH_HIP) +extern int get_cudart_version(); +#endif + +std::string get_cuda_version() { +#if defined(WITH_CUDA) || defined(WITH_HIP) + std::ostringstream oss; + +#if defined(WITH_CUDA) + oss << "CUDA "; +#else + oss << "HIP "; +#endif + + // copied from + // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231 + auto printCudaStyleVersion = [&](int v) { + oss << (v / 1000) << "." << (v / 10 % 100); + if (v % 10 != 0) { + oss << "." << (v % 10); + } + }; + printCudaStyleVersion(get_cudart_version()); + return oss.str(); +#else // neither CUDA nor HIP + return std::string("not available"); +#endif +} + +bool has_cuda() { +#if defined(WITH_CUDA) + return true; +#else + return false; +#endif +} + +// similar to +// https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp +std::string get_compiler_version() { + std::ostringstream ss; +#if defined(__GNUC__) +#ifndef __clang__ + +#if ((__GNUC__ <= 4) && (__GNUC_MINOR__ <= 8)) +#error "GCC >= 4.9 is required!" +#endif + + { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } +#endif +#endif + +#if defined(__clang_major__) + { + ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." + << __clang_patchlevel__; + } +#endif + +#if defined(_MSC_VER) + { ss << "MSVC " << _MSC_FULL_VER; } +#endif + return ss.str(); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("get_compiler_version", &get_compiler_version, "get_compiler_version"); + m.def("get_cuda_version", &get_cuda_version, "get_cuda_version"); + m.def("has_cuda", &has_cuda, "has_cuda"); + + m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward"); + m.def( + "deform_conv_backward_input", + &deform_conv_backward_input, + "deform_conv_backward_input"); + m.def( + "deform_conv_backward_filter", + &deform_conv_backward_filter, + "deform_conv_backward_filter"); + m.def( + "modulated_deform_conv_forward", + &modulated_deform_conv_forward, + "modulated_deform_conv_forward"); + m.def( + "modulated_deform_conv_backward", + &modulated_deform_conv_backward, + "modulated_deform_conv_backward"); + + m.def("COCOevalAccumulate", &COCOeval::Accumulate, "COCOeval::Accumulate"); + m.def( + "COCOevalEvaluateImages", + &COCOeval::EvaluateImages, + "COCOeval::EvaluateImages"); + pybind11::class_(m, "InstanceAnnotation") + .def(pybind11::init()); + pybind11::class_(m, "ImageEvaluation") + .def(pybind11::init<>()); +} + +TORCH_LIBRARY(detectron2, m) { + m.def("nms_rotated", &nms_rotated); + m.def("box_iou_rotated", &box_iou_rotated); + m.def("roi_align_rotated_forward", &ROIAlignRotated_forward); + m.def("roi_align_rotated_backward", &ROIAlignRotated_backward); +} +} // namespace detectron2 diff --git a/data_processing/detectron2/detectron2/layers/deform_conv.py b/data_processing/detectron2/detectron2/layers/deform_conv.py new file mode 100644 index 0000000..dffb720 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/deform_conv.py @@ -0,0 +1,514 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from functools import lru_cache +import torch +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair +from torchvision.ops import deform_conv2d + +from detectron2.utils.develop import create_dummy_class, create_dummy_func + +from .wrappers import _NewEmptyTensorOp + + +class _DeformConv(Function): + @staticmethod + def forward( + ctx, + input, + offset, + weight, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + im2col_step=64, + ): + if input is not None and input.dim() != 4: + raise ValueError( + "Expected 4D tensor as input, got {}D tensor instead.".format(input.dim()) + ) + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.im2col_step = im2col_step + + ctx.save_for_backward(input, offset, weight) + + output = input.new_empty( + _DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride) + ) + + ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones + + if not input.is_cuda: + # TODO: let torchvision support full features of our deformconv. + if deformable_groups != 1: + raise NotImplementedError( + "Deformable Conv with deformable_groups != 1 is not supported on CPUs!" + ) + return deform_conv2d( + input, offset, weight, stride=stride, padding=padding, dilation=dilation + ) + else: + cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) + assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" + + _C.deform_conv_forward( + input, + weight, + offset, + output, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step, + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, offset, weight = ctx.saved_tensors + + grad_input = grad_offset = grad_weight = None + + if not grad_output.is_cuda: + raise NotImplementedError("Deformable Conv is not supported on CPUs!") + else: + cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) + assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + _C.deform_conv_backward_input( + input, + offset, + grad_output, + grad_input, + grad_offset, + weight, + ctx.bufs_[0], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step, + ) + + if ctx.needs_input_grad[2]: + grad_weight = torch.zeros_like(weight) + _C.deform_conv_backward_filter( + input, + offset, + grad_output, + grad_weight, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + 1, + cur_im2col_step, + ) + + return grad_input, grad_offset, grad_weight, None, None, None, None, None, None + + @staticmethod + def _output_size(input, weight, padding, dilation, stride): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = padding[d] + kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + "convolution input is too small (output would be {})".format( + "x".join(map(str, output_size)) + ) + ) + return output_size + + @staticmethod + @lru_cache(maxsize=128) + def _cal_im2col_step(input_size, default_size): + """ + Calculate proper im2col step size, which should be divisible by input_size and not larger + than prefer_size. Meanwhile the step size should be as large as possible to be more + efficient. So we choose the largest one among all divisors of input_size which are smaller + than prefer_size. + :param input_size: input batch size . + :param default_size: default preferred im2col step size. + :return: the largest proper step size. + """ + if input_size <= default_size: + return input_size + best_step = 1 + for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)): + if input_size % step == 0: + if input_size // step <= default_size: + return input_size // step + best_step = step + + return best_step + + +class _ModulatedDeformConv(Function): + @staticmethod + def forward( + ctx, + input, + offset, + mask, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + ): + ctx.stride = stride + ctx.padding = padding + ctx.dilation = dilation + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.with_bias = bias is not None + if not ctx.with_bias: + bias = input.new_empty(1) # fake tensor + if not input.is_cuda: + raise NotImplementedError("Deformable Conv is not supported on CPUs!") + if ( + weight.requires_grad + or mask.requires_grad + or offset.requires_grad + or input.requires_grad + ): + ctx.save_for_backward(input, offset, mask, weight, bias) + output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight)) + ctx._bufs = [input.new_empty(0), input.new_empty(0)] + _C.modulated_deform_conv_forward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + output, + ctx._bufs[1], + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias, + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + if not grad_output.is_cuda: + raise NotImplementedError("Deformable Conv is not supported on CPUs!") + input, offset, mask, weight, bias = ctx.saved_tensors + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + grad_mask = torch.zeros_like(mask) + grad_weight = torch.zeros_like(weight) + grad_bias = torch.zeros_like(bias) + _C.modulated_deform_conv_backward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + ctx._bufs[1], + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias, + ) + if not ctx.with_bias: + grad_bias = None + + return ( + grad_input, + grad_offset, + grad_mask, + grad_weight, + grad_bias, + None, + None, + None, + None, + None, + ) + + @staticmethod + def _infer_shape(ctx, input, weight): + n = input.size(0) + channels_out = weight.size(0) + height, width = input.shape[2:4] + kernel_h, kernel_w = weight.shape[2:4] + height_out = ( + height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1) + ) // ctx.stride + 1 + width_out = ( + width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1) + ) // ctx.stride + 1 + return n, channels_out, height_out, width_out + + +deform_conv = _DeformConv.apply +modulated_deform_conv = _ModulatedDeformConv.apply + + +class DeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=False, + norm=None, + activation=None, + ): + """ + Deformable convolution from :paper:`deformconv`. + + Arguments are similar to :class:`Conv2D`. Extra arguments: + + Args: + deformable_groups (int): number of groups used in deformable convolution. + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + """ + super(DeformConv, self).__init__() + + assert not bias + assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format( + in_channels, groups + ) + assert ( + out_channels % groups == 0 + ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups) + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deformable_groups = deformable_groups + self.norm = norm + self.activation = activation + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size) + ) + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + + def forward(self, x, offset): + if x.numel() == 0: + # When input is empty, we want to return a empty tensor with "correct" shape, + # So that the following operations will not panic + # if they check for the shape of the tensor. + # This computes the height and width of the output tensor + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + return _NewEmptyTensorOp.apply(x, output_shape) + + x = deform_conv( + x, + offset, + self.weight, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", padding=" + str(self.padding) + tmpstr += ", dilation=" + str(self.dilation) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", deformable_groups=" + str(self.deformable_groups) + tmpstr += ", bias=False" + return tmpstr + + +class ModulatedDeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=True, + norm=None, + activation=None, + ): + """ + Modulated deformable convolution from :paper:`deformconv2`. + + Arguments are similar to :class:`Conv2D`. Extra arguments: + + Args: + deformable_groups (int): number of groups used in deformable convolution. + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + """ + super(ModulatedDeformConv, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.deformable_groups = deformable_groups + self.with_bias = bias + self.norm = norm + self.activation = activation + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) + ) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + if self.bias is not None: + nn.init.constant_(self.bias, 0) + + def forward(self, x, offset, mask): + if x.numel() == 0: + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + return _NewEmptyTensorOp.apply(x, output_shape) + + x = modulated_deform_conv( + x, + offset, + mask, + self.weight, + self.bias, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", padding=" + str(self.padding) + tmpstr += ", dilation=" + str(self.dilation) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", deformable_groups=" + str(self.deformable_groups) + tmpstr += ", bias=" + str(self.with_bias) + return tmpstr + + +try: + from detectron2 import _C +except ImportError: + # TODO: register ops natively so there is no need to import _C. + _msg = "detectron2 is not compiled successfully, please build following the instructions!" + _args = ("detectron2._C", _msg) + DeformConv = create_dummy_class("DeformConv", *_args) + ModulatedDeformConv = create_dummy_class("ModulatedDeformConv", *_args) + deform_conv = create_dummy_func("deform_conv", *_args) + modulated_deform_conv = create_dummy_func("modulated_deform_conv", *_args) diff --git a/data_processing/detectron2/detectron2/layers/losses.py b/data_processing/detectron2/detectron2/layers/losses.py new file mode 100644 index 0000000..850a852 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/losses.py @@ -0,0 +1,133 @@ +import math +import torch + + +def diou_loss( + boxes1: torch.Tensor, + boxes2: torch.Tensor, + reduction: str = "none", + eps: float = 1e-7, +) -> torch.Tensor: + """ + Distance Intersection over Union Loss (Zhaohui Zheng et. al) + https://arxiv.org/abs/1911.08287 + Args: + boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + eps (float): small number to prevent division by zero + """ + + x1, y1, x2, y2 = boxes1.unbind(dim=-1) + x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) + + # TODO: use torch._assert_async() when pytorch 1.8 support is dropped + assert (x2 >= x1).all(), "bad box: x1 larger than x2" + assert (y2 >= y1).all(), "bad box: y1 larger than y2" + + # Intersection keypoints + xkis1 = torch.max(x1, x1g) + ykis1 = torch.max(y1, y1g) + xkis2 = torch.min(x2, x2g) + ykis2 = torch.min(y2, y2g) + + intsct = torch.zeros_like(x1) + mask = (ykis2 > ykis1) & (xkis2 > xkis1) + intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) + union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps + iou = intsct / union + + # smallest enclosing box + xc1 = torch.min(x1, x1g) + yc1 = torch.min(y1, y1g) + xc2 = torch.max(x2, x2g) + yc2 = torch.max(y2, y2g) + diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps + + # centers of boxes + x_p = (x2 + x1) / 2 + y_p = (y2 + y1) / 2 + x_g = (x1g + x2g) / 2 + y_g = (y1g + y2g) / 2 + distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) + + # Eqn. (7) + loss = 1 - iou + (distance / diag_len) + if reduction == "mean": + loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() + elif reduction == "sum": + loss = loss.sum() + + return loss + + +def ciou_loss( + boxes1: torch.Tensor, + boxes2: torch.Tensor, + reduction: str = "none", + eps: float = 1e-7, +) -> torch.Tensor: + """ + Complete Intersection over Union Loss (Zhaohui Zheng et. al) + https://arxiv.org/abs/1911.08287 + Args: + boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + eps (float): small number to prevent division by zero + """ + + x1, y1, x2, y2 = boxes1.unbind(dim=-1) + x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) + + # TODO: use torch._assert_async() when pytorch 1.8 support is dropped + assert (x2 >= x1).all(), "bad box: x1 larger than x2" + assert (y2 >= y1).all(), "bad box: y1 larger than y2" + + # Intersection keypoints + xkis1 = torch.max(x1, x1g) + ykis1 = torch.max(y1, y1g) + xkis2 = torch.min(x2, x2g) + ykis2 = torch.min(y2, y2g) + + intsct = torch.zeros_like(x1) + mask = (ykis2 > ykis1) & (xkis2 > xkis1) + intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) + union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps + iou = intsct / union + + # smallest enclosing box + xc1 = torch.min(x1, x1g) + yc1 = torch.min(y1, y1g) + xc2 = torch.max(x2, x2g) + yc2 = torch.max(y2, y2g) + diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps + + # centers of boxes + x_p = (x2 + x1) / 2 + y_p = (y2 + y1) / 2 + x_g = (x1g + x2g) / 2 + y_g = (y1g + y2g) / 2 + distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) + + # width and height of boxes + w_pred = x2 - x1 + h_pred = y2 - y1 + w_gt = x2g - x1g + h_gt = y2g - y1g + v = (4 / (math.pi**2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) + with torch.no_grad(): + alpha = v / (1 - iou + v + eps) + + # Eqn. (10) + loss = 1 - iou + (distance / diag_len) + alpha * v + if reduction == "mean": + loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() + elif reduction == "sum": + loss = loss.sum() + + return loss diff --git a/data_processing/detectron2/detectron2/layers/mask_ops.py b/data_processing/detectron2/detectron2/layers/mask_ops.py new file mode 100644 index 0000000..990d04a --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/mask_ops.py @@ -0,0 +1,275 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Tuple +import torch +from PIL import Image +from torch.nn import functional as F + +__all__ = ["paste_masks_in_image"] + + +BYTES_PER_FLOAT = 4 +# TODO: This memory limit may be too much or too little. It would be better to +# determine it based on available resources. +GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit + + +def _do_paste_mask(masks, boxes, img_h: int, img_w: int, skip_empty: bool = True): + """ + Args: + masks: N, 1, H, W + boxes: N, 4 + img_h, img_w (int): + skip_empty (bool): only paste masks within the region that + tightly bound all boxes, and returns the results this region only. + An important optimization for CPU. + + Returns: + if skip_empty == False, a mask of shape (N, img_h, img_w) + if skip_empty == True, a mask of shape (N, h', w'), and the slice + object for the corresponding region. + """ + # On GPU, paste all masks together (up to chunk size) + # by using the entire image to sample the masks + # Compared to pasting them one by one, + # this has more operations but is faster on COCO-scale dataset. + device = masks.device + + if skip_empty and not torch.jit.is_scripting(): + x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to( + dtype=torch.int32 + ) + x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) + y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) + else: + x0_int, y0_int = 0, 0 + x1_int, y1_int = img_w, img_h + x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 + + N = masks.shape[0] + + img_y = torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 + img_x = torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 + img_y = (img_y - y0) / (y1 - y0) * 2 - 1 + img_x = (img_x - x0) / (x1 - x0) * 2 - 1 + # img_x, img_y have shapes (N, w), (N, h) + + gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) + gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) + grid = torch.stack([gx, gy], dim=3) + + if not torch.jit.is_scripting(): + if not masks.dtype.is_floating_point: + masks = masks.float() + img_masks = F.grid_sample(masks, grid.to(masks.dtype), align_corners=False) + + if skip_empty and not torch.jit.is_scripting(): + return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) + else: + return img_masks[:, 0], () + + +# Annotate boxes as Tensor (but not Boxes) in order to use scripting +@torch.jit.script_if_tracing +def paste_masks_in_image( + masks: torch.Tensor, boxes: torch.Tensor, image_shape: Tuple[int, int], threshold: float = 0.5 +): + """ + Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image. + The location, height, and width for pasting each mask is determined by their + corresponding bounding boxes in boxes. + + Note: + This is a complicated but more accurate implementation. In actual deployment, it is + often enough to use a faster but less accurate implementation. + See :func:`paste_mask_in_image_old` in this file for an alternative implementation. + + Args: + masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of + detected object instances in the image and Hmask, Wmask are the mask width and mask + height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1]. + boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4). + boxes[i] and masks[i] correspond to the same object instance. + image_shape (tuple): height, width + threshold (float): A threshold in [0, 1] for converting the (soft) masks to + binary masks. + + Returns: + img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the + number of detected object instances and Himage, Wimage are the image width + and height. img_masks[i] is a binary mask for object instance i. + """ + + assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported" + N = len(masks) + if N == 0: + return masks.new_empty((0,) + image_shape, dtype=torch.uint8) + if not isinstance(boxes, torch.Tensor): + boxes = boxes.tensor + device = boxes.device + assert len(boxes) == N, boxes.shape + + img_h, img_w = image_shape + + # The actual implementation split the input into chunks, + # and paste them chunk by chunk. + if device.type == "cpu" or torch.jit.is_scripting(): + # CPU is most efficient when they are pasted one by one with skip_empty=True + # so that it performs minimal number of operations. + num_chunks = N + else: + # GPU benefits from parallelism for larger chunks, but may have memory issue + # int(img_h) because shape may be tensors in tracing + num_chunks = int(np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) + assert ( + num_chunks <= N + ), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it" + chunks = torch.chunk(torch.arange(N, device=device), num_chunks) + + img_masks = torch.zeros( + N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8 + ) + for inds in chunks: + masks_chunk, spatial_inds = _do_paste_mask( + masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu" + ) + + if threshold >= 0: + masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) + else: + # for visualization and debugging + masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) + + if torch.jit.is_scripting(): # Scripting does not use the optimized codepath + img_masks[inds] = masks_chunk + else: + img_masks[(inds,) + spatial_inds] = masks_chunk + return img_masks + + +# The below are the original paste function (from Detectron1) which has +# larger quantization error. +# It is faster on CPU, while the aligned one is faster on GPU thanks to grid_sample. + + +def paste_mask_in_image_old(mask, box, img_h, img_w, threshold): + """ + Paste a single mask in an image. + This is a per-box implementation of :func:`paste_masks_in_image`. + This function has larger quantization error due to incorrect pixel + modeling and is not used any more. + + Args: + mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single + object instance. Values are in [0, 1]. + box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners + of the object instance. + img_h, img_w (int): Image height and width. + threshold (float): Mask binarization threshold in [0, 1]. + + Returns: + im_mask (Tensor): + The resized and binarized object mask pasted into the original + image plane (a tensor of shape (img_h, img_w)). + """ + # Conversion from continuous box coordinates to discrete pixel coordinates + # via truncation (cast to int32). This determines which pixels to paste the + # mask onto. + box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion + # An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to + # a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1 + # pixels (not x1 - x0 pixels). + samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width + samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height + + # Resample the mask from it's original grid to the new samples_w x samples_h grid + mask = Image.fromarray(mask.cpu().numpy()) + mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR) + mask = np.array(mask, copy=False) + + if threshold >= 0: + mask = np.array(mask > threshold, dtype=np.uint8) + mask = torch.from_numpy(mask) + else: + # for visualization and debugging, we also + # allow it to return an unmodified mask + mask = torch.from_numpy(mask * 255).to(torch.uint8) + + im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8) + x_0 = max(box[0], 0) + x_1 = min(box[2] + 1, img_w) + y_0 = max(box[1], 0) + y_1 = min(box[3] + 1, img_h) + + im_mask[y_0:y_1, x_0:x_1] = mask[ + (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0]) + ] + return im_mask + + +# Our pixel modeling requires extrapolation for any continuous +# coordinate < 0.5 or > length - 0.5. When sampling pixels on the masks, +# we would like this extrapolation to be an interpolation between boundary values and zero, +# instead of using absolute zero or boundary values. +# Therefore `paste_mask_in_image_old` is often used with zero padding around the masks like this: +# masks, scale = pad_masks(masks[:, 0, :, :], 1) +# boxes = scale_boxes(boxes.tensor, scale) + + +def pad_masks(masks, padding): + """ + Args: + masks (tensor): A tensor of shape (B, M, M) representing B masks. + padding (int): Number of cells to pad on all sides. + + Returns: + The padded masks and the scale factor of the padding size / original size. + """ + B = masks.shape[0] + M = masks.shape[-1] + pad2 = 2 * padding + scale = float(M + pad2) / M + padded_masks = masks.new_zeros((B, M + pad2, M + pad2)) + padded_masks[:, padding:-padding, padding:-padding] = masks + return padded_masks, scale + + +def scale_boxes(boxes, scale): + """ + Args: + boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4 + coords representing the corners x0, y0, x1, y1, + scale (float): The box scaling factor. + + Returns: + Scaled boxes. + """ + w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 + h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 + x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 + y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 + + w_half *= scale + h_half *= scale + + scaled_boxes = torch.zeros_like(boxes) + scaled_boxes[:, 0] = x_c - w_half + scaled_boxes[:, 2] = x_c + w_half + scaled_boxes[:, 1] = y_c - h_half + scaled_boxes[:, 3] = y_c + h_half + return scaled_boxes + + +@torch.jit.script_if_tracing +def _paste_masks_tensor_shape( + masks: torch.Tensor, + boxes: torch.Tensor, + image_shape: Tuple[torch.Tensor, torch.Tensor], + threshold: float = 0.5, +): + """ + A wrapper of paste_masks_in_image where image_shape is Tensor. + During tracing, shapes might be tensors instead of ints. The Tensor->int + conversion should be scripted rather than traced. + """ + return paste_masks_in_image(masks, boxes, (int(image_shape[0]), int(image_shape[1])), threshold) diff --git a/data_processing/detectron2/detectron2/layers/nms.py b/data_processing/detectron2/detectron2/layers/nms.py new file mode 100644 index 0000000..1019e7f --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/nms.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import torch +from torchvision.ops import boxes as box_ops +from torchvision.ops import nms # noqa . for compatibility + + +def batched_nms( + boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float +): + """ + Same as torchvision.ops.boxes.batched_nms, but with float(). + """ + assert boxes.shape[-1] == 4 + # Note: Torchvision already has a strategy (https://github.com/pytorch/vision/issues/1311) + # to decide whether to use coordinate trick or for loop to implement batched_nms. So we + # just call it directly. + # Fp16 does not have enough range for batched NMS, so adding float(). + return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold) + + +# Note: this function (nms_rotated) might be moved into +# torchvision/ops/boxes.py in the future +def nms_rotated(boxes: torch.Tensor, scores: torch.Tensor, iou_threshold: float): + """ + Performs non-maximum suppression (NMS) on the rotated boxes according + to their intersection-over-union (IoU). + + Rotated NMS iteratively removes lower scoring rotated boxes which have an + IoU greater than iou_threshold with another (higher scoring) rotated box. + + Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as + RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they + can be representing completely different objects in certain tasks, e.g., OCR. + + As for the question of whether rotated-NMS should treat them as faraway boxes + even though their IOU is 1, it depends on the application and/or ground truth annotation. + + As an extreme example, consider a single character v and the square box around it. + + If the angle is 0 degree, the object (text) would be read as 'v'; + + If the angle is 90 degrees, the object (text) would become '>'; + + If the angle is 180 degrees, the object (text) would become '^'; + + If the angle is 270/-90 degrees, the object (text) would become '<' + + All of these cases have IoU of 1 to each other, and rotated NMS that only + uses IoU as criterion would only keep one of them with the highest score - + which, practically, still makes sense in most cases because typically + only one of theses orientations is the correct one. Also, it does not matter + as much if the box is only used to classify the object (instead of transcribing + them with a sequential OCR recognition model) later. + + On the other hand, when we use IoU to filter proposals that are close to the + ground truth during training, we should definitely take the angle into account if + we know the ground truth is labeled with the strictly correct orientation (as in, + upside-down words are annotated with -180 degrees even though they can be covered + with a 0/90/-90 degree box, etc.) + + The way the original dataset is annotated also matters. For example, if the dataset + is a 4-point polygon dataset that does not enforce ordering of vertices/orientation, + we can estimate a minimum rotated bounding box to this polygon, but there's no way + we can tell the correct angle with 100% confidence (as shown above, there could be 4 different + rotated boxes, with angles differed by 90 degrees to each other, covering the exactly + same region). In that case we have to just use IoU to determine the box + proximity (as many detection benchmarks (even for text) do) unless there're other + assumptions we can make (like width is always larger than height, or the object is not + rotated by more than 90 degrees CCW/CW, etc.) + + In summary, not considering angles in rotated NMS seems to be a good option for now, + but we should be aware of its implications. + + Args: + boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in + (x_center, y_center, width, height, angle_degrees) format. + scores (Tensor[N]): Scores for each one of the rotated boxes + iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold + + Returns: + keep (Tensor): int64 tensor with the indices of the elements that have been kept + by Rotated NMS, sorted in decreasing order of scores + """ + return torch.ops.detectron2.nms_rotated(boxes, scores, iou_threshold) + + +# Note: this function (batched_nms_rotated) might be moved into +# torchvision/ops/boxes.py in the future + + +@torch.jit.script_if_tracing +def batched_nms_rotated( + boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float +): + """ + Performs non-maximum suppression in a batched fashion. + + Each index value correspond to a category, and NMS + will not be applied between elements of different categories. + + Args: + boxes (Tensor[N, 5]): + boxes where NMS will be performed. They + are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format + scores (Tensor[N]): + scores for each one of the boxes + idxs (Tensor[N]): + indices of the categories for each one of the boxes. + iou_threshold (float): + discards all overlapping boxes + with IoU < iou_threshold + + Returns: + Tensor: + int64 tensor with the indices of the elements that have been kept + by NMS, sorted in decreasing order of scores + """ + assert boxes.shape[-1] == 5 + + if boxes.numel() == 0: + return torch.empty((0,), dtype=torch.int64, device=boxes.device) + boxes = boxes.float() # fp16 does not have enough range for batched NMS + # Strategy: in order to perform NMS independently per class, + # we add an offset to all the boxes. The offset is dependent + # only on the class idx, and is large enough so that boxes + # from different classes do not overlap + + # Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate, + # which won't handle negative coordinates correctly. + # Here by using min_coordinate we can make sure the negative coordinates are + # correctly handled. + max_coordinate = ( + torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2 + ).max() + min_coordinate = ( + torch.min(boxes[:, 0], boxes[:, 1]) - torch.max(boxes[:, 2], boxes[:, 3]) / 2 + ).min() + offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1) + boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes + boxes_for_nms[:, :2] += offsets[:, None] + keep = nms_rotated(boxes_for_nms, scores, iou_threshold) + return keep diff --git a/data_processing/detectron2/detectron2/layers/roi_align.py b/data_processing/detectron2/detectron2/layers/roi_align.py new file mode 100644 index 0000000..163462e --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/roi_align.py @@ -0,0 +1,74 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from torch import nn +from torchvision.ops import roi_align + + +# NOTE: torchvision's RoIAlign has a different default aligned=False +class ROIAlign(nn.Module): + def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True): + """ + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each output + sample. 0 to take samples densely. + aligned (bool): if False, use the legacy implementation in + Detectron. If True, align the results more perfectly. + + Note: + The meaning of aligned=True: + + Given a continuous coordinate c, its two neighboring pixel indices (in our + pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, + c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled + from the underlying signal at continuous coordinates 0.5 and 1.5). But the original + roi_align (aligned=False) does not subtract the 0.5 when computing neighboring + pixel indices and therefore it uses pixels with a slightly incorrect alignment + (relative to our pixel model) when performing bilinear interpolation. + + With `aligned=True`, + we first appropriately scale the ROI and then shift it by -0.5 + prior to calling roi_align. This produces the correct neighbors; see + detectron2/tests/test_roi_align.py for verification. + + The difference does not make a difference to the model's performance if + ROIAlign is used together with conv layers. + """ + super().__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + self.aligned = aligned + + from torchvision import __version__ + + version = tuple(int(x) for x in __version__.split(".")[:2]) + # https://github.com/pytorch/vision/pull/2438 + assert version >= (0, 7), "Require torchvision >= 0.7" + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. + """ + assert rois.dim() == 2 and rois.size(1) == 5 + if input.is_quantized: + input = input.dequantize() + return roi_align( + input, + rois.to(dtype=input.dtype), + self.output_size, + self.spatial_scale, + self.sampling_ratio, + self.aligned, + ) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) + tmpstr += ", aligned=" + str(self.aligned) + tmpstr += ")" + return tmpstr diff --git a/data_processing/detectron2/detectron2/layers/roi_align_rotated.py b/data_processing/detectron2/detectron2/layers/roi_align_rotated.py new file mode 100644 index 0000000..2a52399 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/roi_align_rotated.py @@ -0,0 +1,100 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + + +class _ROIAlignRotated(Function): + @staticmethod + def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): + ctx.save_for_backward(roi) + ctx.output_size = _pair(output_size) + ctx.spatial_scale = spatial_scale + ctx.sampling_ratio = sampling_ratio + ctx.input_shape = input.size() + output = torch.ops.detectron2.roi_align_rotated_forward( + input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + (rois,) = ctx.saved_tensors + output_size = ctx.output_size + spatial_scale = ctx.spatial_scale + sampling_ratio = ctx.sampling_ratio + bs, ch, h, w = ctx.input_shape + grad_input = torch.ops.detectron2.roi_align_rotated_backward( + grad_output, + rois, + spatial_scale, + output_size[0], + output_size[1], + bs, + ch, + h, + w, + sampling_ratio, + ) + return grad_input, None, None, None, None, None + + +roi_align_rotated = _ROIAlignRotated.apply + + +class ROIAlignRotated(nn.Module): + def __init__(self, output_size, spatial_scale, sampling_ratio): + """ + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each output + sample. 0 to take samples densely. + + Note: + ROIAlignRotated supports continuous coordinate by default: + Given a continuous coordinate c, its two neighboring pixel indices (in our + pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, + c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled + from the underlying signal at continuous coordinates 0.5 and 1.5). + """ + super(ROIAlignRotated, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx6 boxes. First column is the index into N. + The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees). + """ + assert rois.dim() == 2 and rois.size(1) == 6 + orig_dtype = input.dtype + if orig_dtype == torch.float16: + input = input.float() + rois = rois.float() + output_size = _pair(self.output_size) + + # Scripting for Autograd is currently unsupported. + # This is a quick fix without having to rewrite code on the C++ side + if torch.jit.is_scripting() or torch.jit.is_tracing(): + return torch.ops.detectron2.roi_align_rotated_forward( + input, rois, self.spatial_scale, output_size[0], output_size[1], self.sampling_ratio + ).to(dtype=orig_dtype) + + return roi_align_rotated( + input, rois, self.output_size, self.spatial_scale, self.sampling_ratio + ).to(dtype=orig_dtype) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) + tmpstr += ")" + return tmpstr diff --git a/data_processing/detectron2/detectron2/layers/rotated_boxes.py b/data_processing/detectron2/detectron2/layers/rotated_boxes.py new file mode 100644 index 0000000..03f73b3 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/rotated_boxes.py @@ -0,0 +1,21 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from __future__ import absolute_import, division, print_function, unicode_literals +import torch + + +def pairwise_iou_rotated(boxes1, boxes2): + """ + Return intersection-over-union (Jaccard index) of boxes. + + Both sets of boxes are expected to be in + (x_center, y_center, width, height, angle) format. + + Arguments: + boxes1 (Tensor[N, 5]) + boxes2 (Tensor[M, 5]) + + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + return torch.ops.detectron2.box_iou_rotated(boxes1, boxes2) diff --git a/data_processing/detectron2/detectron2/layers/shape_spec.py b/data_processing/detectron2/detectron2/layers/shape_spec.py new file mode 100644 index 0000000..8dac3c5 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/shape_spec.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. +from dataclasses import dataclass +from typing import Optional + + +@dataclass +class ShapeSpec: + """ + A simple structure that contains basic shape specification about a tensor. + It is often used as the auxiliary inputs/outputs of models, + to complement the lack of shape inference ability among pytorch modules. + """ + + channels: Optional[int] = None + height: Optional[int] = None + width: Optional[int] = None + stride: Optional[int] = None diff --git a/data_processing/detectron2/detectron2/layers/wrappers.py b/data_processing/detectron2/detectron2/layers/wrappers.py new file mode 100644 index 0000000..fb3cb38 --- /dev/null +++ b/data_processing/detectron2/detectron2/layers/wrappers.py @@ -0,0 +1,162 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Wrappers around on some nn functions, mainly to support empty tensors. + +Ideally, add support directly in PyTorch to empty tensors in those functions. + +These can be removed once https://github.com/pytorch/pytorch/issues/12013 +is implemented +""" + +import warnings +from typing import List, Optional +import torch +from torch.nn import functional as F + +from detectron2.utils.env import TORCH_VERSION + + +def shapes_to_tensor(x: List[int], device: Optional[torch.device] = None) -> torch.Tensor: + """ + Turn a list of integer scalars or integer Tensor scalars into a vector, + in a way that's both traceable and scriptable. + + In tracing, `x` should be a list of scalar Tensor, so the output can trace to the inputs. + In scripting or eager, `x` should be a list of int. + """ + if torch.jit.is_scripting(): + return torch.as_tensor(x, device=device) + if torch.jit.is_tracing(): + assert all( + [isinstance(t, torch.Tensor) for t in x] + ), "Shape should be tensor during tracing!" + # as_tensor should not be used in tracing because it records a constant + ret = torch.stack(x) + if ret.device != device: # avoid recording a hard-coded device if not necessary + ret = ret.to(device=device) + return ret + return torch.as_tensor(x, device=device) + + +def check_if_dynamo_compiling(): + if TORCH_VERSION >= (1, 14): + from torch._dynamo import is_compiling + + return is_compiling() + else: + return False + + +def cat(tensors: List[torch.Tensor], dim: int = 0): + """ + Efficient version of torch.cat that avoids a copy if there is only a single element in a list + """ + assert isinstance(tensors, (list, tuple)) + if len(tensors) == 1: + return tensors[0] + return torch.cat(tensors, dim) + + +def empty_input_loss_func_wrapper(loss_func): + def wrapped_loss_func(input, target, *, reduction="mean", **kwargs): + """ + Same as `loss_func`, but returns 0 (instead of nan) for empty inputs. + """ + if target.numel() == 0 and reduction == "mean": + return input.sum() * 0.0 # connect the gradient + return loss_func(input, target, reduction=reduction, **kwargs) + + return wrapped_loss_func + + +cross_entropy = empty_input_loss_func_wrapper(F.cross_entropy) + + +class _NewEmptyTensorOp(torch.autograd.Function): + @staticmethod + def forward(ctx, x, new_shape): + ctx.shape = x.shape + return x.new_empty(new_shape) + + @staticmethod + def backward(ctx, grad): + shape = ctx.shape + return _NewEmptyTensorOp.apply(grad, shape), None + + +class Conv2d(torch.nn.Conv2d): + """ + A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. + """ + + def __init__(self, *args, **kwargs): + """ + Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: + + Args: + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + + It assumes that norm layer is used before activation. + """ + norm = kwargs.pop("norm", None) + activation = kwargs.pop("activation", None) + super().__init__(*args, **kwargs) + + self.norm = norm + self.activation = activation + + def forward(self, x): + # torchscript does not support SyncBatchNorm yet + # https://github.com/pytorch/pytorch/issues/40507 + # and we skip these codes in torchscript since: + # 1. currently we only support torchscript in evaluation mode + # 2. features needed by exporting module to torchscript are added in PyTorch 1.6 or + # later version, `Conv2d` in these PyTorch versions has already supported empty inputs. + if not torch.jit.is_scripting(): + # Dynamo doesn't support context managers yet + is_dynamo_compiling = check_if_dynamo_compiling() + if not is_dynamo_compiling: + with warnings.catch_warnings(record=True): + if x.numel() == 0 and self.training: + # https://github.com/pytorch/pytorch/issues/12013 + assert not isinstance( + self.norm, torch.nn.SyncBatchNorm + ), "SyncBatchNorm does not support empty inputs!" + + x = F.conv2d( + x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + +ConvTranspose2d = torch.nn.ConvTranspose2d +BatchNorm2d = torch.nn.BatchNorm2d +interpolate = F.interpolate +Linear = torch.nn.Linear + + +def nonzero_tuple(x): + """ + A 'as_tuple=True' version of torch.nonzero to support torchscript. + because of https://github.com/pytorch/pytorch/issues/38718 + """ + if torch.jit.is_scripting(): + if x.dim() == 0: + return x.unsqueeze(0).nonzero().unbind(1) + return x.nonzero().unbind(1) + else: + return x.nonzero(as_tuple=True) + + +@torch.jit.script_if_tracing +def move_device_like(src: torch.Tensor, dst: torch.Tensor) -> torch.Tensor: + """ + Tracing friendly way to cast tensor to another tensor's device. Device will be treated + as constant during tracing, scripting the casting process as whole can workaround this issue. + """ + return src.to(dst.device) diff --git a/data_processing/detectron2/detectron2/model_zoo/__init__.py b/data_processing/detectron2/detectron2/model_zoo/__init__.py new file mode 100644 index 0000000..6204208 --- /dev/null +++ b/data_processing/detectron2/detectron2/model_zoo/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Model Zoo API for Detectron2: a collection of functions to create common model architectures +listed in `MODEL_ZOO.md `_, +and optionally load their pre-trained weights. +""" + +from .model_zoo import get, get_config_file, get_checkpoint_url, get_config + +__all__ = ["get_checkpoint_url", "get", "get_config_file", "get_config"] diff --git a/data_processing/detectron2/detectron2/model_zoo/model_zoo.py b/data_processing/detectron2/detectron2/model_zoo/model_zoo.py new file mode 100644 index 0000000..5b90bc9 --- /dev/null +++ b/data_processing/detectron2/detectron2/model_zoo/model_zoo.py @@ -0,0 +1,213 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import os +from typing import Optional +import pkg_resources +import torch + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate +from detectron2.modeling import build_model + + +class _ModelZooUrls(object): + """ + Mapping from names to officially released Detectron2 pre-trained models. + """ + + S3_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" + + # format: {config_path.yaml} -> model_id/model_final_{commit}.pkl + CONFIG_PATH_TO_URL_SUFFIX = { + # COCO Detection with Faster R-CNN + "COCO-Detection/faster_rcnn_R_50_C4_1x": "137257644/model_final_721ade.pkl", + "COCO-Detection/faster_rcnn_R_50_DC5_1x": "137847829/model_final_51d356.pkl", + "COCO-Detection/faster_rcnn_R_50_FPN_1x": "137257794/model_final_b275ba.pkl", + "COCO-Detection/faster_rcnn_R_50_C4_3x": "137849393/model_final_f97cb7.pkl", + "COCO-Detection/faster_rcnn_R_50_DC5_3x": "137849425/model_final_68d202.pkl", + "COCO-Detection/faster_rcnn_R_50_FPN_3x": "137849458/model_final_280758.pkl", + "COCO-Detection/faster_rcnn_R_101_C4_3x": "138204752/model_final_298dad.pkl", + "COCO-Detection/faster_rcnn_R_101_DC5_3x": "138204841/model_final_3e0943.pkl", + "COCO-Detection/faster_rcnn_R_101_FPN_3x": "137851257/model_final_f6e8b1.pkl", + "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x": "139173657/model_final_68b088.pkl", + # COCO Detection with RetinaNet + "COCO-Detection/retinanet_R_50_FPN_1x": "190397773/model_final_bfca0b.pkl", + "COCO-Detection/retinanet_R_50_FPN_3x": "190397829/model_final_5bd44e.pkl", + "COCO-Detection/retinanet_R_101_FPN_3x": "190397697/model_final_971ab9.pkl", + # COCO Detection with RPN and Fast R-CNN + "COCO-Detection/rpn_R_50_C4_1x": "137258005/model_final_450694.pkl", + "COCO-Detection/rpn_R_50_FPN_1x": "137258492/model_final_02ce48.pkl", + "COCO-Detection/fast_rcnn_R_50_FPN_1x": "137635226/model_final_e5f7ce.pkl", + # COCO Instance Segmentation Baselines with Mask R-CNN + "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x": "137259246/model_final_9243eb.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x": "137260150/model_final_4f86c3.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "137260431/model_final_a54504.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x": "137849525/model_final_4ce675.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x": "137849551/model_final_84107b.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x": "137849600/model_final_f10217.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x": "138363239/model_final_a2914c.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x": "138363294/model_final_0464b7.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x": "138205316/model_final_a3ec72.pkl", + "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x": "139653917/model_final_2d9806.pkl", # noqa + # New baselines using Large-Scale Jitter and Longer Training Schedule + "new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ": "42047764/model_final_bb69de.pkl", + "new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ": "42047638/model_final_89a8d3.pkl", + "new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ": "42019571/model_final_14d201.pkl", + "new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ": "42025812/model_final_4f7b58.pkl", + "new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ": "42131867/model_final_0bb7ae.pkl", + "new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ": "42073830/model_final_f96b26.pkl", + "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ": "42047771/model_final_b7fbab.pkl", # noqa + "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ": "42132721/model_final_5d87c1.pkl", # noqa + "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ": "42025447/model_final_f1362d.pkl", # noqa + "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ": "42047784/model_final_6ba57e.pkl", # noqa + "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ": "42047642/model_final_27b9c1.pkl", # noqa + "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ": "42045954/model_final_ef3a80.pkl", # noqa + # COCO Person Keypoint Detection Baselines with Keypoint R-CNN + "COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x": "137261548/model_final_04e291.pkl", + "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x": "137849621/model_final_a6e10b.pkl", + "COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x": "138363331/model_final_997cc7.pkl", + "COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x": "139686956/model_final_5ad38f.pkl", + # COCO Panoptic Segmentation Baselines with Panoptic FPN + "COCO-PanopticSegmentation/panoptic_fpn_R_50_1x": "139514544/model_final_dbfeb4.pkl", + "COCO-PanopticSegmentation/panoptic_fpn_R_50_3x": "139514569/model_final_c10459.pkl", + "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x": "139514519/model_final_cafdb1.pkl", + # LVIS Instance Segmentation Baselines with Mask R-CNN + "LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "144219072/model_final_571f7c.pkl", # noqa + "LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x": "144219035/model_final_824ab5.pkl", # noqa + "LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x": "144219108/model_final_5e3439.pkl", # noqa + # Cityscapes & Pascal VOC Baselines + "Cityscapes/mask_rcnn_R_50_FPN": "142423278/model_final_af9cf5.pkl", + "PascalVOC-Detection/faster_rcnn_R_50_C4": "142202221/model_final_b1acc2.pkl", + # Other Settings + "Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5": "138602867/model_final_65c703.pkl", + "Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5": "144998336/model_final_821d0b.pkl", + "Misc/cascade_mask_rcnn_R_50_FPN_1x": "138602847/model_final_e9d89b.pkl", + "Misc/cascade_mask_rcnn_R_50_FPN_3x": "144998488/model_final_480dd8.pkl", + "Misc/mask_rcnn_R_50_FPN_3x_syncbn": "169527823/model_final_3b3c51.pkl", + "Misc/mask_rcnn_R_50_FPN_3x_gn": "138602888/model_final_dc5d9e.pkl", + "Misc/scratch_mask_rcnn_R_50_FPN_3x_gn": "138602908/model_final_01ca85.pkl", + "Misc/scratch_mask_rcnn_R_50_FPN_9x_gn": "183808979/model_final_da7b4c.pkl", + "Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn": "184226666/model_final_5ce33e.pkl", + "Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x": "139797668/model_final_be35db.pkl", + "Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv": "18131413/model_0039999_e76410.pkl", # noqa + # D1 Comparisons + "Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x": "137781054/model_final_7ab50c.pkl", # noqa + "Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x": "137781281/model_final_62ca52.pkl", # noqa + "Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x": "137781195/model_final_cce136.pkl", + } + + @staticmethod + def query(config_path: str) -> Optional[str]: + """ + Args: + config_path: relative config filename + """ + name = config_path.replace(".yaml", "").replace(".py", "") + if name in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX: + suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[name] + return _ModelZooUrls.S3_PREFIX + name + "/" + suffix + return None + + +def get_checkpoint_url(config_path): + """ + Returns the URL to the model trained using the given config + + Args: + config_path (str): config file name relative to detectron2's "configs/" + directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + + Returns: + str: a URL to the model + """ + url = _ModelZooUrls.query(config_path) + if url is None: + raise RuntimeError("Pretrained model for {} is not available!".format(config_path)) + return url + + +def get_config_file(config_path): + """ + Returns path to a builtin config file. + + Args: + config_path (str): config file name relative to detectron2's "configs/" + directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + + Returns: + str: the real path to the config file. + """ + cfg_file = pkg_resources.resource_filename( + "detectron2.model_zoo", os.path.join("configs", config_path) + ) + if not os.path.exists(cfg_file): + raise RuntimeError("{} not available in Model Zoo!".format(config_path)) + return cfg_file + + +def get_config(config_path, trained: bool = False): + """ + Returns a config object for a model in model zoo. + + Args: + config_path (str): config file name relative to detectron2's "configs/" + directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights. + If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used + instead; this will typically (though not always) initialize a subset of weights using + an ImageNet pre-trained model, while randomly initializing the other weights. + + Returns: + CfgNode or omegaconf.DictConfig: a config object + """ + cfg_file = get_config_file(config_path) + if cfg_file.endswith(".yaml"): + cfg = get_cfg() + cfg.merge_from_file(cfg_file) + if trained: + cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path) + return cfg + elif cfg_file.endswith(".py"): + cfg = LazyConfig.load(cfg_file) + if trained: + url = get_checkpoint_url(config_path) + if "train" in cfg and "init_checkpoint" in cfg.train: + cfg.train.init_checkpoint = url + else: + raise NotImplementedError + return cfg + + +def get(config_path, trained: bool = False, device: Optional[str] = None): + """ + Get a model specified by relative path under Detectron2's official ``configs/`` directory. + + Args: + config_path (str): config file name relative to detectron2's "configs/" + directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + trained (bool): see :func:`get_config`. + device (str or None): overwrite the device in config, if given. + + Returns: + nn.Module: a detectron2 model. Will be in training mode. + + Example: + :: + from detectron2 import model_zoo + model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True) + """ + cfg = get_config(config_path, trained) + if device is None and not torch.cuda.is_available(): + device = "cpu" + if device is not None and isinstance(cfg, CfgNode): + cfg.MODEL.DEVICE = device + + if isinstance(cfg, CfgNode): + model = build_model(cfg) + DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) + else: + model = instantiate(cfg.model) + if device is not None: + model = model.to(device) + if "train" in cfg and "init_checkpoint" in cfg.train: + DetectionCheckpointer(model).load(cfg.train.init_checkpoint) + return model diff --git a/data_processing/detectron2/detectron2/modeling/__init__.py b/data_processing/detectron2/detectron2/modeling/__init__.py new file mode 100644 index 0000000..4d949e2 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/__init__.py @@ -0,0 +1,64 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from detectron2.layers import ShapeSpec + +from .anchor_generator import build_anchor_generator, ANCHOR_GENERATOR_REGISTRY +from .backbone import ( + BACKBONE_REGISTRY, + FPN, + Backbone, + ResNet, + ResNetBlockBase, + build_backbone, + build_resnet_backbone, + make_stage, + ViT, + SimpleFeaturePyramid, + get_vit_lr_decay_rate, + MViT, + SwinTransformer, +) +from .meta_arch import ( + META_ARCH_REGISTRY, + SEM_SEG_HEADS_REGISTRY, + GeneralizedRCNN, + PanopticFPN, + ProposalNetwork, + RetinaNet, + SemanticSegmentor, + build_model, + build_sem_seg_head, + FCOS, +) +from .postprocessing import detector_postprocess +from .proposal_generator import ( + PROPOSAL_GENERATOR_REGISTRY, + build_proposal_generator, + RPN_HEAD_REGISTRY, + build_rpn_head, +) +from .roi_heads import ( + ROI_BOX_HEAD_REGISTRY, + ROI_HEADS_REGISTRY, + ROI_KEYPOINT_HEAD_REGISTRY, + ROI_MASK_HEAD_REGISTRY, + ROIHeads, + StandardROIHeads, + BaseMaskRCNNHead, + BaseKeypointRCNNHead, + FastRCNNOutputLayers, + build_box_head, + build_keypoint_head, + build_mask_head, + build_roi_heads, +) +from .test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA +from .mmdet_wrapper import MMDetBackbone, MMDetDetector + +_EXCLUDE = {"ShapeSpec"} +__all__ = [k for k in globals().keys() if k not in _EXCLUDE and not k.startswith("_")] + + +from detectron2.utils.env import fixup_module_metadata + +fixup_module_metadata(__name__, globals(), __all__) +del fixup_module_metadata diff --git a/data_processing/detectron2/detectron2/modeling/anchor_generator.py b/data_processing/detectron2/detectron2/modeling/anchor_generator.py new file mode 100644 index 0000000..ac94e72 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/anchor_generator.py @@ -0,0 +1,386 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import collections +import math +from typing import List +import torch +from torch import nn + +from detectron2.config import configurable +from detectron2.layers import ShapeSpec, move_device_like +from detectron2.structures import Boxes, RotatedBoxes +from detectron2.utils.registry import Registry + +ANCHOR_GENERATOR_REGISTRY = Registry("ANCHOR_GENERATOR") +ANCHOR_GENERATOR_REGISTRY.__doc__ = """ +Registry for modules that creates object detection anchors for feature maps. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +class BufferList(nn.Module): + """ + Similar to nn.ParameterList, but for buffers + """ + + def __init__(self, buffers): + super().__init__() + for i, buffer in enumerate(buffers): + # Use non-persistent buffer so the values are not saved in checkpoint + self.register_buffer(str(i), buffer, persistent=False) + + def __len__(self): + return len(self._buffers) + + def __iter__(self): + return iter(self._buffers.values()) + + +def _create_grid_offsets( + size: List[int], stride: int, offset: float, target_device_tensor: torch.Tensor +): + grid_height, grid_width = size + shifts_x = move_device_like( + torch.arange(offset * stride, grid_width * stride, step=stride, dtype=torch.float32), + target_device_tensor, + ) + shifts_y = move_device_like( + torch.arange(offset * stride, grid_height * stride, step=stride, dtype=torch.float32), + target_device_tensor, + ) + + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + return shift_x, shift_y + + +def _broadcast_params(params, num_features, name): + """ + If one size (or aspect ratio) is specified and there are multiple feature + maps, we "broadcast" anchors of that single size (or aspect ratio) + over all feature maps. + + If params is list[float], or list[list[float]] with len(params) == 1, repeat + it num_features time. + + Returns: + list[list[float]]: param for each feature + """ + assert isinstance( + params, collections.abc.Sequence + ), f"{name} in anchor generator has to be a list! Got {params}." + assert len(params), f"{name} in anchor generator cannot be empty!" + if not isinstance(params[0], collections.abc.Sequence): # params is list[float] + return [params] * num_features + if len(params) == 1: + return list(params) * num_features + assert len(params) == num_features, ( + f"Got {name} of length {len(params)} in anchor generator, " + f"but the number of input features is {num_features}!" + ) + return params + + +@ANCHOR_GENERATOR_REGISTRY.register() +class DefaultAnchorGenerator(nn.Module): + """ + Compute anchors in the standard ways described in + "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks". + """ + + box_dim: torch.jit.Final[int] = 4 + """ + the dimension of each anchor box. + """ + + @configurable + def __init__(self, *, sizes, aspect_ratios, strides, offset=0.5): + """ + This interface is experimental. + + Args: + sizes (list[list[float]] or list[float]): + If ``sizes`` is list[list[float]], ``sizes[i]`` is the list of anchor sizes + (i.e. sqrt of anchor area) to use for the i-th feature map. + If ``sizes`` is list[float], ``sizes`` is used for all feature maps. + Anchor sizes are given in absolute lengths in units of + the input image; they do not dynamically scale if the input image size changes. + aspect_ratios (list[list[float]] or list[float]): list of aspect ratios + (i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies. + strides (list[int]): stride of each input feature. + offset (float): Relative offset between the center of the first anchor and the top-left + corner of the image. Value has to be in [0, 1). + Recommend to use 0.5, which means half stride. + """ + super().__init__() + + self.strides = strides + self.num_features = len(self.strides) + sizes = _broadcast_params(sizes, self.num_features, "sizes") + aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios") + self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios) + + self.offset = offset + assert 0.0 <= self.offset < 1.0, self.offset + + @classmethod + def from_config(cls, cfg, input_shape: List[ShapeSpec]): + return { + "sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES, + "aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, + "strides": [x.stride for x in input_shape], + "offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET, + } + + def _calculate_anchors(self, sizes, aspect_ratios): + cell_anchors = [ + self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios) + ] + return BufferList(cell_anchors) + + @property + @torch.jit.unused + def num_cell_anchors(self): + """ + Alias of `num_anchors`. + """ + return self.num_anchors + + @property + @torch.jit.unused + def num_anchors(self): + """ + Returns: + list[int]: Each int is the number of anchors at every pixel + location, on that feature map. + For example, if at every pixel we use anchors of 3 aspect + ratios and 5 sizes, the number of anchors is 15. + (See also ANCHOR_GENERATOR.SIZES and ANCHOR_GENERATOR.ASPECT_RATIOS in config) + + In standard RPN models, `num_anchors` on every feature map is the same. + """ + return [len(cell_anchors) for cell_anchors in self.cell_anchors] + + def _grid_anchors(self, grid_sizes: List[List[int]]): + """ + Returns: + list[Tensor]: #featuremap tensors, each is (#locations x #cell_anchors) x 4 + """ + anchors = [] + # buffers() not supported by torchscript. use named_buffers() instead + buffers: List[torch.Tensor] = [x[1] for x in self.cell_anchors.named_buffers()] + for size, stride, base_anchors in zip(grid_sizes, self.strides, buffers): + shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors) + shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) + + anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) + + return anchors + + def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)): + """ + Generate a tensor storing canonical anchor boxes, which are all anchor + boxes of different sizes and aspect_ratios centered at (0, 0). + We can later build the set of anchors for a full feature map by + shifting and tiling these tensors (see `meth:_grid_anchors`). + + Args: + sizes (tuple[float]): + aspect_ratios (tuple[float]]): + + Returns: + Tensor of shape (len(sizes) * len(aspect_ratios), 4) storing anchor boxes + in XYXY format. + """ + + # This is different from the anchor generator defined in the original Faster R-CNN + # code or Detectron. They yield the same AP, however the old version defines cell + # anchors in a less natural way with a shift relative to the feature grid and + # quantization that results in slightly different sizes for different aspect ratios. + # See also https://github.com/facebookresearch/Detectron/issues/227 + + anchors = [] + for size in sizes: + area = size**2.0 + for aspect_ratio in aspect_ratios: + # s * s = w * h + # a = h / w + # ... some algebra ... + # w = sqrt(s * s / a) + # h = a * w + w = math.sqrt(area / aspect_ratio) + h = aspect_ratio * w + x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0 + anchors.append([x0, y0, x1, y1]) + return torch.tensor(anchors) + + def forward(self, features: List[torch.Tensor]): + """ + Args: + features (list[Tensor]): list of backbone feature maps on which to generate anchors. + + Returns: + list[Boxes]: a list of Boxes containing all the anchors for each feature map + (i.e. the cell anchors repeated over all locations in the feature map). + The number of anchors of each feature map is Hi x Wi x num_cell_anchors, + where Hi, Wi are resolution of the feature map divided by anchor stride. + """ + grid_sizes = [feature_map.shape[-2:] for feature_map in features] + anchors_over_all_feature_maps = self._grid_anchors(grid_sizes) + return [Boxes(x) for x in anchors_over_all_feature_maps] + + +@ANCHOR_GENERATOR_REGISTRY.register() +class RotatedAnchorGenerator(nn.Module): + """ + Compute rotated anchors used by Rotated RPN (RRPN), described in + "Arbitrary-Oriented Scene Text Detection via Rotation Proposals". + """ + + box_dim: int = 5 + """ + the dimension of each anchor box. + """ + + @configurable + def __init__(self, *, sizes, aspect_ratios, strides, angles, offset=0.5): + """ + This interface is experimental. + + Args: + sizes (list[list[float]] or list[float]): + If sizes is list[list[float]], sizes[i] is the list of anchor sizes + (i.e. sqrt of anchor area) to use for the i-th feature map. + If sizes is list[float], the sizes are used for all feature maps. + Anchor sizes are given in absolute lengths in units of + the input image; they do not dynamically scale if the input image size changes. + aspect_ratios (list[list[float]] or list[float]): list of aspect ratios + (i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies. + strides (list[int]): stride of each input feature. + angles (list[list[float]] or list[float]): list of angles (in degrees CCW) + to use for anchors. Same "broadcast" rule for `sizes` applies. + offset (float): Relative offset between the center of the first anchor and the top-left + corner of the image. Value has to be in [0, 1). + Recommend to use 0.5, which means half stride. + """ + super().__init__() + + self.strides = strides + self.num_features = len(self.strides) + sizes = _broadcast_params(sizes, self.num_features, "sizes") + aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios") + angles = _broadcast_params(angles, self.num_features, "angles") + self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios, angles) + + self.offset = offset + assert 0.0 <= self.offset < 1.0, self.offset + + @classmethod + def from_config(cls, cfg, input_shape: List[ShapeSpec]): + return { + "sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES, + "aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, + "strides": [x.stride for x in input_shape], + "offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET, + "angles": cfg.MODEL.ANCHOR_GENERATOR.ANGLES, + } + + def _calculate_anchors(self, sizes, aspect_ratios, angles): + cell_anchors = [ + self.generate_cell_anchors(size, aspect_ratio, angle).float() + for size, aspect_ratio, angle in zip(sizes, aspect_ratios, angles) + ] + return BufferList(cell_anchors) + + @property + def num_cell_anchors(self): + """ + Alias of `num_anchors`. + """ + return self.num_anchors + + @property + def num_anchors(self): + """ + Returns: + list[int]: Each int is the number of anchors at every pixel + location, on that feature map. + For example, if at every pixel we use anchors of 3 aspect + ratios, 2 sizes and 5 angles, the number of anchors is 30. + (See also ANCHOR_GENERATOR.SIZES, ANCHOR_GENERATOR.ASPECT_RATIOS + and ANCHOR_GENERATOR.ANGLES in config) + + In standard RRPN models, `num_anchors` on every feature map is the same. + """ + return [len(cell_anchors) for cell_anchors in self.cell_anchors] + + def _grid_anchors(self, grid_sizes): + anchors = [] + for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): + shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors) + zeros = torch.zeros_like(shift_x) + shifts = torch.stack((shift_x, shift_y, zeros, zeros, zeros), dim=1) + + anchors.append((shifts.view(-1, 1, 5) + base_anchors.view(1, -1, 5)).reshape(-1, 5)) + + return anchors + + def generate_cell_anchors( + self, + sizes=(32, 64, 128, 256, 512), + aspect_ratios=(0.5, 1, 2), + angles=(-90, -60, -30, 0, 30, 60, 90), + ): + """ + Generate a tensor storing canonical anchor boxes, which are all anchor + boxes of different sizes, aspect_ratios, angles centered at (0, 0). + We can later build the set of anchors for a full feature map by + shifting and tiling these tensors (see `meth:_grid_anchors`). + + Args: + sizes (tuple[float]): + aspect_ratios (tuple[float]]): + angles (tuple[float]]): + + Returns: + Tensor of shape (len(sizes) * len(aspect_ratios) * len(angles), 5) + storing anchor boxes in (x_ctr, y_ctr, w, h, angle) format. + """ + anchors = [] + for size in sizes: + area = size**2.0 + for aspect_ratio in aspect_ratios: + # s * s = w * h + # a = h / w + # ... some algebra ... + # w = sqrt(s * s / a) + # h = a * w + w = math.sqrt(area / aspect_ratio) + h = aspect_ratio * w + anchors.extend([0, 0, w, h, a] for a in angles) + + return torch.tensor(anchors) + + def forward(self, features): + """ + Args: + features (list[Tensor]): list of backbone feature maps on which to generate anchors. + + Returns: + list[RotatedBoxes]: a list of Boxes containing all the anchors for each feature map + (i.e. the cell anchors repeated over all locations in the feature map). + The number of anchors of each feature map is Hi x Wi x num_cell_anchors, + where Hi, Wi are resolution of the feature map divided by anchor stride. + """ + grid_sizes = [feature_map.shape[-2:] for feature_map in features] + anchors_over_all_feature_maps = self._grid_anchors(grid_sizes) + return [RotatedBoxes(x) for x in anchors_over_all_feature_maps] + + +def build_anchor_generator(cfg, input_shape): + """ + Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`. + """ + anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME + return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape) diff --git a/data_processing/detectron2/detectron2/modeling/backbone/__init__.py b/data_processing/detectron2/detectron2/modeling/backbone/__init__.py new file mode 100644 index 0000000..5b3358a --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/backbone/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip + +from .backbone import Backbone +from .fpn import FPN +from .regnet import RegNet +from .resnet import ( + BasicStem, + ResNet, + ResNetBlockBase, + build_resnet_backbone, + make_stage, + BottleneckBlock, +) +from .vit import ViT, SimpleFeaturePyramid, get_vit_lr_decay_rate +from .mvit import MViT +from .swin import SwinTransformer + +__all__ = [k for k in globals().keys() if not k.startswith("_")] +# TODO can expose more resnet blocks after careful consideration diff --git a/data_processing/detectron2/detectron2/modeling/backbone/backbone.py b/data_processing/detectron2/detectron2/modeling/backbone/backbone.py new file mode 100644 index 0000000..e1c765a --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/backbone/backbone.py @@ -0,0 +1,74 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from abc import ABCMeta, abstractmethod +from typing import Dict +import torch.nn as nn + +from detectron2.layers import ShapeSpec + +__all__ = ["Backbone"] + + +class Backbone(nn.Module, metaclass=ABCMeta): + """ + Abstract base class for network backbones. + """ + + def __init__(self): + """ + The `__init__` method of any subclass can specify its own set of arguments. + """ + super().__init__() + + @abstractmethod + def forward(self): + """ + Subclasses must override this method, but adhere to the same return type. + + Returns: + dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor + """ + pass + + @property + def size_divisibility(self) -> int: + """ + Some backbones require the input height and width to be divisible by a + specific integer. This is typically true for encoder / decoder type networks + with lateral connection (e.g., FPN) for which feature maps need to match + dimension in the "bottom up" and "top down" paths. Set to 0 if no specific + input size divisibility is required. + """ + return 0 + + @property + def padding_constraints(self) -> Dict[str, int]: + """ + This property is a generalization of size_divisibility. Some backbones and training + recipes require specific padding constraints, such as enforcing divisibility by a specific + integer (e.g., FPN) or padding to a square (e.g., ViTDet with large-scale jitter + in :paper:vitdet). `padding_constraints` contains these optional items like: + { + "size_divisibility": int, + "square_size": int, + # Future options are possible + } + `size_divisibility` will read from here if presented and `square_size` indicates the + square padding size if `square_size` > 0. + + TODO: use type of Dict[str, int] to avoid torchscipt issues. The type of padding_constraints + could be generalized as TypedDict (Python 3.8+) to support more types in the future. + """ + return {} + + def output_shape(self): + """ + Returns: + dict[str->ShapeSpec] + """ + # this is a backward-compatible default + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } diff --git a/data_processing/detectron2/detectron2/modeling/backbone/build.py b/data_processing/detectron2/detectron2/modeling/backbone/build.py new file mode 100644 index 0000000..af02141 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/backbone/build.py @@ -0,0 +1,33 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from detectron2.layers import ShapeSpec +from detectron2.utils.registry import Registry + +from .backbone import Backbone + +BACKBONE_REGISTRY = Registry("BACKBONE") +BACKBONE_REGISTRY.__doc__ = """ +Registry for backbones, which extract feature maps from images + +The registered object must be a callable that accepts two arguments: + +1. A :class:`detectron2.config.CfgNode` +2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification. + +Registered object must return instance of :class:`Backbone`. +""" + + +def build_backbone(cfg, input_shape=None): + """ + Build a backbone from `cfg.MODEL.BACKBONE.NAME`. + + Returns: + an instance of :class:`Backbone` + """ + if input_shape is None: + input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) + + backbone_name = cfg.MODEL.BACKBONE.NAME + backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape) + assert isinstance(backbone, Backbone) + return backbone diff --git a/data_processing/detectron2/detectron2/modeling/backbone/fpn.py b/data_processing/detectron2/detectron2/modeling/backbone/fpn.py new file mode 100644 index 0000000..19d24e1 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/backbone/fpn.py @@ -0,0 +1,268 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn.functional as F +from torch import nn + +from detectron2.layers import Conv2d, ShapeSpec, get_norm + +from .backbone import Backbone +from .build import BACKBONE_REGISTRY +from .resnet import build_resnet_backbone + +__all__ = ["build_resnet_fpn_backbone", "build_retinanet_resnet_fpn_backbone", "FPN"] + + +class FPN(Backbone): + """ + This module implements :paper:`FPN`. + It creates pyramid features built on top of some input feature maps. + """ + + _fuse_type: torch.jit.Final[str] + + def __init__( + self, + bottom_up, + in_features, + out_channels, + norm="", + top_block=None, + fuse_type="sum", + square_pad=0, + ): + """ + Args: + bottom_up (Backbone): module representing the bottom up subnetwork. + Must be a subclass of :class:`Backbone`. The multi-scale feature + maps generated by the bottom up network, and listed in `in_features`, + are used to generate FPN levels. + in_features (list[str]): names of the input feature maps coming + from the backbone to which FPN is attached. For example, if the + backbone produces ["res2", "res3", "res4"], any *contiguous* sublist + of these may be used; order must be from high to low resolution. + out_channels (int): number of channels in the output feature maps. + norm (str): the normalization to use. + top_block (nn.Module or None): if provided, an extra operation will + be performed on the output of the last (smallest resolution) + FPN output, and the result will extend the result list. The top_block + further downsamples the feature map. It must have an attribute + "num_levels", meaning the number of extra FPN levels added by + this block, and "in_feature", which is a string representing + its input feature (e.g., p5). + fuse_type (str): types for fusing the top down features and the lateral + ones. It can be "sum" (default), which sums up element-wise; or "avg", + which takes the element-wise mean of the two. + square_pad (int): If > 0, require input images to be padded to specific square size. + """ + super(FPN, self).__init__() + assert isinstance(bottom_up, Backbone) + assert in_features, in_features + + # Feature map strides and channels from the bottom up network (e.g. ResNet) + input_shapes = bottom_up.output_shape() + strides = [input_shapes[f].stride for f in in_features] + in_channels_per_feature = [input_shapes[f].channels for f in in_features] + + _assert_strides_are_log2_contiguous(strides) + lateral_convs = [] + output_convs = [] + + use_bias = norm == "" + for idx, in_channels in enumerate(in_channels_per_feature): + lateral_norm = get_norm(norm, out_channels) + output_norm = get_norm(norm, out_channels) + + lateral_conv = Conv2d( + in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm + ) + output_conv = Conv2d( + out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=use_bias, + norm=output_norm, + ) + weight_init.c2_xavier_fill(lateral_conv) + weight_init.c2_xavier_fill(output_conv) + stage = int(math.log2(strides[idx])) + self.add_module("fpn_lateral{}".format(stage), lateral_conv) + self.add_module("fpn_output{}".format(stage), output_conv) + + lateral_convs.append(lateral_conv) + output_convs.append(output_conv) + # Place convs into top-down order (from low to high resolution) + # to make the top-down computation in forward clearer. + self.lateral_convs = lateral_convs[::-1] + self.output_convs = output_convs[::-1] + self.top_block = top_block + self.in_features = tuple(in_features) + self.bottom_up = bottom_up + # Return feature names are "p", like ["p2", "p3", ..., "p6"] + self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides} + # top block output feature maps. + if self.top_block is not None: + for s in range(stage, stage + self.top_block.num_levels): + self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) + + self._out_features = list(self._out_feature_strides.keys()) + self._out_feature_channels = {k: out_channels for k in self._out_features} + self._size_divisibility = strides[-1] + self._square_pad = square_pad + assert fuse_type in {"avg", "sum"} + self._fuse_type = fuse_type + + @property + def size_divisibility(self): + return self._size_divisibility + + @property + def padding_constraints(self): + return {"square_size": self._square_pad} + + def forward(self, x): + """ + Args: + input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to + feature map tensor for each feature level in high to low resolution order. + + Returns: + dict[str->Tensor]: + mapping from feature map name to FPN feature map tensor + in high to low resolution order. Returned feature names follow the FPN + paper convention: "p", where stage has stride = 2 ** stage e.g., + ["p2", "p3", ..., "p6"]. + """ + bottom_up_features = self.bottom_up(x) + results = [] + prev_features = self.lateral_convs[0](bottom_up_features[self.in_features[-1]]) + results.append(self.output_convs[0](prev_features)) + + # Reverse feature maps into top-down order (from low to high resolution) + for idx, (lateral_conv, output_conv) in enumerate( + zip(self.lateral_convs, self.output_convs) + ): + # Slicing of ModuleList is not supported https://github.com/pytorch/pytorch/issues/47336 + # Therefore we loop over all modules but skip the first one + if idx > 0: + features = self.in_features[-idx - 1] + features = bottom_up_features[features] + top_down_features = F.interpolate(prev_features, scale_factor=2.0, mode="nearest") + lateral_features = lateral_conv(features) + prev_features = lateral_features + top_down_features + if self._fuse_type == "avg": + prev_features /= 2 + results.insert(0, output_conv(prev_features)) + + if self.top_block is not None: + if self.top_block.in_feature in bottom_up_features: + top_block_in_feature = bottom_up_features[self.top_block.in_feature] + else: + top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)] + results.extend(self.top_block(top_block_in_feature)) + assert len(self._out_features) == len(results) + return {f: res for f, res in zip(self._out_features, results)} + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + +def _assert_strides_are_log2_contiguous(strides): + """ + Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2". + """ + for i, stride in enumerate(strides[1:], 1): + assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format( + stride, strides[i - 1] + ) + + +class LastLevelMaxPool(nn.Module): + """ + This module is used in the original FPN to generate a downsampled + P6 feature from P5. + """ + + def __init__(self): + super().__init__() + self.num_levels = 1 + self.in_feature = "p5" + + def forward(self, x): + return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)] + + +class LastLevelP6P7(nn.Module): + """ + This module is used in RetinaNet to generate extra layers, P6 and P7 from + C5 feature. + """ + + def __init__(self, in_channels, out_channels, in_feature="res5"): + super().__init__() + self.num_levels = 2 + self.in_feature = in_feature + self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) + self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) + for module in [self.p6, self.p7]: + weight_init.c2_xavier_fill(module) + + def forward(self, c5): + p6 = self.p6(c5) + p7 = self.p7(F.relu(p6)) + return [p6, p7] + + +@BACKBONE_REGISTRY.register() +def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): + """ + Args: + cfg: a detectron2 CfgNode + + Returns: + backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. + """ + bottom_up = build_resnet_backbone(cfg, input_shape) + in_features = cfg.MODEL.FPN.IN_FEATURES + out_channels = cfg.MODEL.FPN.OUT_CHANNELS + backbone = FPN( + bottom_up=bottom_up, + in_features=in_features, + out_channels=out_channels, + norm=cfg.MODEL.FPN.NORM, + top_block=LastLevelMaxPool(), + fuse_type=cfg.MODEL.FPN.FUSE_TYPE, + ) + return backbone + + +@BACKBONE_REGISTRY.register() +def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): + """ + Args: + cfg: a detectron2 CfgNode + + Returns: + backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. + """ + bottom_up = build_resnet_backbone(cfg, input_shape) + in_features = cfg.MODEL.FPN.IN_FEATURES + out_channels = cfg.MODEL.FPN.OUT_CHANNELS + in_channels_p6p7 = bottom_up.output_shape()["res5"].channels + backbone = FPN( + bottom_up=bottom_up, + in_features=in_features, + out_channels=out_channels, + norm=cfg.MODEL.FPN.NORM, + top_block=LastLevelP6P7(in_channels_p6p7, out_channels), + fuse_type=cfg.MODEL.FPN.FUSE_TYPE, + ) + return backbone diff --git a/data_processing/detectron2/detectron2/modeling/backbone/mvit.py b/data_processing/detectron2/detectron2/modeling/backbone/mvit.py new file mode 100644 index 0000000..50667a8 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/backbone/mvit.py @@ -0,0 +1,448 @@ +import logging +import numpy as np +import torch +import torch.nn as nn + +from .backbone import Backbone +from .utils import ( + PatchEmbed, + add_decomposed_rel_pos, + get_abs_pos, + window_partition, + window_unpartition, +) + +logger = logging.getLogger(__name__) + + +__all__ = ["MViT"] + + +def attention_pool(x, pool, norm=None): + # (B, H, W, C) -> (B, C, H, W) + x = x.permute(0, 3, 1, 2) + x = pool(x) + # (B, C, H1, W1) -> (B, H1, W1, C) + x = x.permute(0, 2, 3, 1) + if norm: + x = norm(x) + + return x + + +class MultiScaleAttention(nn.Module): + """Multiscale Multi-head Attention block.""" + + def __init__( + self, + dim, + dim_out, + num_heads, + qkv_bias=True, + norm_layer=nn.LayerNorm, + pool_kernel=(3, 3), + stride_q=1, + stride_kv=1, + residual_pooling=True, + window_size=0, + use_rel_pos=False, + rel_pos_zero_init=True, + input_size=None, + ): + """ + Args: + dim (int): Number of input channels. + dim_out (int): Number of output channels. + num_heads (int): Number of attention heads. + qkv_bias (bool: If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + pool_kernel (tuple): kernel size for qkv pooling layers. + stride_q (int): stride size for q pooling layer. + stride_kv (int): stride size for kv pooling layer. + residual_pooling (bool): If true, enable residual pooling. + use_rel_pos (bool): If True, add relative postional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (int or None): Input resolution. + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim_out // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim_out * 3, bias=qkv_bias) + self.proj = nn.Linear(dim_out, dim_out) + + # qkv pooling + pool_padding = [k // 2 for k in pool_kernel] + dim_conv = dim_out // num_heads + self.pool_q = nn.Conv2d( + dim_conv, + dim_conv, + pool_kernel, + stride=stride_q, + padding=pool_padding, + groups=dim_conv, + bias=False, + ) + self.norm_q = norm_layer(dim_conv) + self.pool_k = nn.Conv2d( + dim_conv, + dim_conv, + pool_kernel, + stride=stride_kv, + padding=pool_padding, + groups=dim_conv, + bias=False, + ) + self.norm_k = norm_layer(dim_conv) + self.pool_v = nn.Conv2d( + dim_conv, + dim_conv, + pool_kernel, + stride=stride_kv, + padding=pool_padding, + groups=dim_conv, + bias=False, + ) + self.norm_v = norm_layer(dim_conv) + + self.window_size = window_size + if window_size: + self.q_win_size = window_size // stride_q + self.kv_win_size = window_size // stride_kv + self.residual_pooling = residual_pooling + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + # initialize relative positional embeddings + assert input_size[0] == input_size[1] + size = input_size[0] + rel_dim = 2 * max(size // stride_q, size // stride_kv) - 1 + self.rel_pos_h = nn.Parameter(torch.zeros(rel_dim, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(rel_dim, head_dim)) + + if not rel_pos_zero_init: + nn.init.trunc_normal_(self.rel_pos_h, std=0.02) + nn.init.trunc_normal_(self.rel_pos_w, std=0.02) + + def forward(self, x): + B, H, W, _ = x.shape + # qkv with shape (3, B, nHead, H, W, C) + qkv = self.qkv(x).reshape(B, H, W, 3, self.num_heads, -1).permute(3, 0, 4, 1, 2, 5) + # q, k, v with shape (B * nHead, H, W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, H, W, -1).unbind(0) + + q = attention_pool(q, self.pool_q, self.norm_q) + k = attention_pool(k, self.pool_k, self.norm_k) + v = attention_pool(v, self.pool_v, self.norm_v) + + ori_q = q + if self.window_size: + q, q_hw_pad = window_partition(q, self.q_win_size) + k, kv_hw_pad = window_partition(k, self.kv_win_size) + v, _ = window_partition(v, self.kv_win_size) + q_hw = (self.q_win_size, self.q_win_size) + kv_hw = (self.kv_win_size, self.kv_win_size) + else: + q_hw = q.shape[1:3] + kv_hw = k.shape[1:3] + + q = q.view(q.shape[0], np.prod(q_hw), -1) + k = k.view(k.shape[0], np.prod(kv_hw), -1) + v = v.view(v.shape[0], np.prod(kv_hw), -1) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + if self.use_rel_pos: + attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, q_hw, kv_hw) + + attn = attn.softmax(dim=-1) + x = attn @ v + + x = x.view(x.shape[0], q_hw[0], q_hw[1], -1) + + if self.window_size: + x = window_unpartition(x, self.q_win_size, q_hw_pad, ori_q.shape[1:3]) + + if self.residual_pooling: + x += ori_q + + H, W = x.shape[1], x.shape[2] + x = x.view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) + x = self.proj(x) + + return x + + +class MultiScaleBlock(nn.Module): + """Multiscale Transformer blocks""" + + def __init__( + self, + dim, + dim_out, + num_heads, + mlp_ratio=4.0, + qkv_bias=True, + drop_path=0.0, + norm_layer=nn.LayerNorm, + act_layer=nn.GELU, + qkv_pool_kernel=(3, 3), + stride_q=1, + stride_kv=1, + residual_pooling=True, + window_size=0, + use_rel_pos=False, + rel_pos_zero_init=True, + input_size=None, + ): + """ + Args: + dim (int): Number of input channels. + dim_out (int): Number of output channels. + num_heads (int): Number of attention heads in the MViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + drop_path (float): Stochastic depth rate. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + qkv_pool_kernel (tuple): kernel size for qkv pooling layers. + stride_q (int): stride size for q pooling layer. + stride_kv (int): stride size for kv pooling layer. + residual_pooling (bool): If true, enable residual pooling. + window_size (int): Window size for window attention blocks. If it equals 0, then not + use window attention. + use_rel_pos (bool): If True, add relative postional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (int or None): Input resolution. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = MultiScaleAttention( + dim, + dim_out, + num_heads=num_heads, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + pool_kernel=qkv_pool_kernel, + stride_q=stride_q, + stride_kv=stride_kv, + residual_pooling=residual_pooling, + window_size=window_size, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size, + ) + + from timm.models.layers import DropPath, Mlp + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim_out) + self.mlp = Mlp( + in_features=dim_out, + hidden_features=int(dim_out * mlp_ratio), + out_features=dim_out, + act_layer=act_layer, + ) + + if dim != dim_out: + self.proj = nn.Linear(dim, dim_out) + + if stride_q > 1: + kernel_skip = stride_q + 1 + padding_skip = int(kernel_skip // 2) + self.pool_skip = nn.MaxPool2d(kernel_skip, stride_q, padding_skip, ceil_mode=False) + + def forward(self, x): + x_norm = self.norm1(x) + x_block = self.attn(x_norm) + + if hasattr(self, "proj"): + x = self.proj(x_norm) + if hasattr(self, "pool_skip"): + x = attention_pool(x, self.pool_skip) + + x = x + self.drop_path(x_block) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class MViT(Backbone): + """ + This module implements Multiscale Vision Transformer (MViT) backbone in :paper:'mvitv2'. + """ + + def __init__( + self, + img_size=224, + patch_kernel=(7, 7), + patch_stride=(4, 4), + patch_padding=(3, 3), + in_chans=3, + embed_dim=96, + depth=16, + num_heads=1, + last_block_indexes=(0, 2, 11, 15), + qkv_pool_kernel=(3, 3), + adaptive_kv_stride=4, + adaptive_window_size=56, + residual_pooling=True, + mlp_ratio=4.0, + qkv_bias=True, + drop_path_rate=0.0, + norm_layer=nn.LayerNorm, + act_layer=nn.GELU, + use_abs_pos=False, + use_rel_pos=True, + rel_pos_zero_init=True, + use_act_checkpoint=False, + pretrain_img_size=224, + pretrain_use_cls_token=True, + out_features=("scale2", "scale3", "scale4", "scale5"), + ): + """ + Args: + img_size (int): Input image size. + patch_kernel (tuple): kernel size for patch embedding. + patch_stride (tuple): stride size for patch embedding. + patch_padding (tuple): padding size for patch embedding. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of MViT. + num_heads (int): Number of base attention heads in each MViT block. + last_block_indexes (tuple): Block indexes for last blocks in each stage. + qkv_pool_kernel (tuple): kernel size for qkv pooling layers. + adaptive_kv_stride (int): adaptive stride size for kv pooling. + adaptive_window_size (int): adaptive window size for window attention blocks. + residual_pooling (bool): If true, enable residual pooling. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + drop_path_rate (float): Stochastic depth rate. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + use_rel_pos (bool): If True, add relative postional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + use_act_checkpoint (bool): If True, use activation checkpointing. + pretrain_img_size (int): input image size for pretraining models. + pretrain_use_cls_token (bool): If True, pretrainig models use class token. + out_features (tuple): name of the feature maps from each stage. + """ + super().__init__() + self.pretrain_use_cls_token = pretrain_use_cls_token + + self.patch_embed = PatchEmbed( + kernel_size=patch_kernel, + stride=patch_stride, + padding=patch_padding, + in_chans=in_chans, + embed_dim=embed_dim, + ) + + if use_abs_pos: + # Initialize absoluate positional embedding with pretrain image size. + num_patches = (pretrain_img_size // patch_stride[0]) * ( + pretrain_img_size // patch_stride[1] + ) + num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches + self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim)) + else: + self.pos_embed = None + + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + dim_out = embed_dim + stride_kv = adaptive_kv_stride + window_size = adaptive_window_size + input_size = (img_size // patch_stride[0], img_size // patch_stride[1]) + stage = 2 + stride = patch_stride[0] + self._out_feature_strides = {} + self._out_feature_channels = {} + self.blocks = nn.ModuleList() + for i in range(depth): + # Multiply stride_kv by 2 if it's the last block of stage2 and stage3. + if i == last_block_indexes[1] or i == last_block_indexes[2]: + stride_kv_ = stride_kv * 2 + else: + stride_kv_ = stride_kv + # hybrid window attention: global attention in last three stages. + window_size_ = 0 if i in last_block_indexes[1:] else window_size + block = MultiScaleBlock( + dim=embed_dim, + dim_out=dim_out, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path=dpr[i], + norm_layer=norm_layer, + qkv_pool_kernel=qkv_pool_kernel, + stride_q=2 if i - 1 in last_block_indexes else 1, + stride_kv=stride_kv_, + residual_pooling=residual_pooling, + window_size=window_size_, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size, + ) + if use_act_checkpoint: + # TODO: use torch.utils.checkpoint + from fairscale.nn.checkpoint import checkpoint_wrapper + + block = checkpoint_wrapper(block) + self.blocks.append(block) + + embed_dim = dim_out + if i in last_block_indexes: + name = f"scale{stage}" + if name in out_features: + self._out_feature_channels[name] = dim_out + self._out_feature_strides[name] = stride + self.add_module(f"{name}_norm", norm_layer(dim_out)) + + dim_out *= 2 + num_heads *= 2 + stride_kv = max(stride_kv // 2, 1) + stride *= 2 + stage += 1 + if i - 1 in last_block_indexes: + window_size = window_size // 2 + input_size = [s // 2 for s in input_size] + + self._out_features = out_features + self._last_block_indexes = last_block_indexes + + if self.pos_embed is not None: + nn.init.trunc_normal_(self.pos_embed, std=0.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward(self, x): + x = self.patch_embed(x) + + if self.pos_embed is not None: + x = x + get_abs_pos(self.pos_embed, self.pretrain_use_cls_token, x.shape[1:3]) + + outputs = {} + stage = 2 + for i, blk in enumerate(self.blocks): + x = blk(x) + if i in self._last_block_indexes: + name = f"scale{stage}" + if name in self._out_features: + x_out = getattr(self, f"{name}_norm")(x) + outputs[name] = x_out.permute(0, 3, 1, 2) + stage += 1 + + return outputs diff --git a/data_processing/detectron2/detectron2/modeling/backbone/regnet.py b/data_processing/detectron2/detectron2/modeling/backbone/regnet.py new file mode 100644 index 0000000..3533d63 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/backbone/regnet.py @@ -0,0 +1,452 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Implementation of RegNet models from :paper:`dds` and :paper:`scaling`. + +This code is adapted from https://github.com/facebookresearch/pycls with minimal modifications. +Some code duplication exists between RegNet and ResNets (e.g., ResStem) in order to simplify +model loading. +""" + +import numpy as np +from torch import nn + +from detectron2.layers import CNNBlockBase, ShapeSpec, get_norm + +from .backbone import Backbone + +__all__ = [ + "AnyNet", + "RegNet", + "ResStem", + "SimpleStem", + "VanillaBlock", + "ResBasicBlock", + "ResBottleneckBlock", +] + + +def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False): + """Helper for building a conv2d layer.""" + assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues." + s, p, g, b = stride, (k - 1) // 2, groups, bias + return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b) + + +def gap2d(): + """Helper for building a global average pooling layer.""" + return nn.AdaptiveAvgPool2d((1, 1)) + + +def pool2d(k, *, stride=1): + """Helper for building a pool2d layer.""" + assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues." + return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2) + + +def init_weights(m): + """Performs ResNet-style weight initialization.""" + if isinstance(m, nn.Conv2d): + # Note that there is no bias due to BN + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + m.weight.data.normal_(mean=0.0, std=0.01) + m.bias.data.zero_() + + +class ResStem(CNNBlockBase): + """ResNet stem for ImageNet: 7x7, BN, AF, MaxPool.""" + + def __init__(self, w_in, w_out, norm, activation_class): + super().__init__(w_in, w_out, 4) + self.conv = conv2d(w_in, w_out, 7, stride=2) + self.bn = get_norm(norm, w_out) + self.af = activation_class() + self.pool = pool2d(3, stride=2) + + def forward(self, x): + for layer in self.children(): + x = layer(x) + return x + + +class SimpleStem(CNNBlockBase): + """Simple stem for ImageNet: 3x3, BN, AF.""" + + def __init__(self, w_in, w_out, norm, activation_class): + super().__init__(w_in, w_out, 2) + self.conv = conv2d(w_in, w_out, 3, stride=2) + self.bn = get_norm(norm, w_out) + self.af = activation_class() + + def forward(self, x): + for layer in self.children(): + x = layer(x) + return x + + +class SE(nn.Module): + """Squeeze-and-Excitation (SE) block: AvgPool, FC, Act, FC, Sigmoid.""" + + def __init__(self, w_in, w_se, activation_class): + super().__init__() + self.avg_pool = gap2d() + self.f_ex = nn.Sequential( + conv2d(w_in, w_se, 1, bias=True), + activation_class(), + conv2d(w_se, w_in, 1, bias=True), + nn.Sigmoid(), + ) + + def forward(self, x): + return x * self.f_ex(self.avg_pool(x)) + + +class VanillaBlock(CNNBlockBase): + """Vanilla block: [3x3 conv, BN, Relu] x2.""" + + def __init__(self, w_in, w_out, stride, norm, activation_class, _params): + super().__init__(w_in, w_out, stride) + self.a = conv2d(w_in, w_out, 3, stride=stride) + self.a_bn = get_norm(norm, w_out) + self.a_af = activation_class() + self.b = conv2d(w_out, w_out, 3) + self.b_bn = get_norm(norm, w_out) + self.b_af = activation_class() + + def forward(self, x): + for layer in self.children(): + x = layer(x) + return x + + +class BasicTransform(nn.Module): + """Basic transformation: [3x3 conv, BN, Relu] x2.""" + + def __init__(self, w_in, w_out, stride, norm, activation_class, _params): + super().__init__() + self.a = conv2d(w_in, w_out, 3, stride=stride) + self.a_bn = get_norm(norm, w_out) + self.a_af = activation_class() + self.b = conv2d(w_out, w_out, 3) + self.b_bn = get_norm(norm, w_out) + self.b_bn.final_bn = True + + def forward(self, x): + for layer in self.children(): + x = layer(x) + return x + + +class ResBasicBlock(CNNBlockBase): + """Residual basic block: x + f(x), f = basic transform.""" + + def __init__(self, w_in, w_out, stride, norm, activation_class, params): + super().__init__(w_in, w_out, stride) + self.proj, self.bn = None, None + if (w_in != w_out) or (stride != 1): + self.proj = conv2d(w_in, w_out, 1, stride=stride) + self.bn = get_norm(norm, w_out) + self.f = BasicTransform(w_in, w_out, stride, norm, activation_class, params) + self.af = activation_class() + + def forward(self, x): + x_p = self.bn(self.proj(x)) if self.proj else x + return self.af(x_p + self.f(x)) + + +class BottleneckTransform(nn.Module): + """Bottleneck transformation: 1x1, 3x3 [+SE], 1x1.""" + + def __init__(self, w_in, w_out, stride, norm, activation_class, params): + super().__init__() + w_b = int(round(w_out * params["bot_mul"])) + w_se = int(round(w_in * params["se_r"])) + groups = w_b // params["group_w"] + self.a = conv2d(w_in, w_b, 1) + self.a_bn = get_norm(norm, w_b) + self.a_af = activation_class() + self.b = conv2d(w_b, w_b, 3, stride=stride, groups=groups) + self.b_bn = get_norm(norm, w_b) + self.b_af = activation_class() + self.se = SE(w_b, w_se, activation_class) if w_se else None + self.c = conv2d(w_b, w_out, 1) + self.c_bn = get_norm(norm, w_out) + self.c_bn.final_bn = True + + def forward(self, x): + for layer in self.children(): + x = layer(x) + return x + + +class ResBottleneckBlock(CNNBlockBase): + """Residual bottleneck block: x + f(x), f = bottleneck transform.""" + + def __init__(self, w_in, w_out, stride, norm, activation_class, params): + super().__init__(w_in, w_out, stride) + self.proj, self.bn = None, None + if (w_in != w_out) or (stride != 1): + self.proj = conv2d(w_in, w_out, 1, stride=stride) + self.bn = get_norm(norm, w_out) + self.f = BottleneckTransform(w_in, w_out, stride, norm, activation_class, params) + self.af = activation_class() + + def forward(self, x): + x_p = self.bn(self.proj(x)) if self.proj else x + return self.af(x_p + self.f(x)) + + +class AnyStage(nn.Module): + """AnyNet stage (sequence of blocks w/ the same output shape).""" + + def __init__(self, w_in, w_out, stride, d, block_class, norm, activation_class, params): + super().__init__() + for i in range(d): + block = block_class(w_in, w_out, stride, norm, activation_class, params) + self.add_module("b{}".format(i + 1), block) + stride, w_in = 1, w_out + + def forward(self, x): + for block in self.children(): + x = block(x) + return x + + +class AnyNet(Backbone): + """AnyNet model. See :paper:`dds`.""" + + def __init__( + self, + *, + stem_class, + stem_width, + block_class, + depths, + widths, + group_widths, + strides, + bottleneck_ratios, + se_ratio, + activation_class, + freeze_at=0, + norm="BN", + out_features=None, + ): + """ + Args: + stem_class (callable): A callable taking 4 arguments (channels in, channels out, + normalization, callable returning an activation function) that returns another + callable implementing the stem module. + stem_width (int): The number of output channels that the stem produces. + block_class (callable): A callable taking 6 arguments (channels in, channels out, + stride, normalization, callable returning an activation function, a dict of + block-specific parameters) that returns another callable implementing the repeated + block module. + depths (list[int]): Number of blocks in each stage. + widths (list[int]): For each stage, the number of output channels of each block. + group_widths (list[int]): For each stage, the number of channels per group in group + convolution, if the block uses group convolution. + strides (list[int]): The stride that each network stage applies to its input. + bottleneck_ratios (list[float]): For each stage, the ratio of the number of bottleneck + channels to the number of block input channels (or, equivalently, output channels), + if the block uses a bottleneck. + se_ratio (float): The ratio of the number of channels used inside the squeeze-excitation + (SE) module to it number of input channels, if SE the block uses SE. + activation_class (callable): A callable taking no arguments that returns another + callable implementing an activation function. + freeze_at (int): The number of stages at the beginning to freeze. + see :meth:`freeze` for detailed explanation. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. + out_features (list[str]): name of the layers whose outputs should + be returned in forward. RegNet's use "stem" and "s1", "s2", etc for the stages after + the stem. If None, will return the output of the last layer. + """ + super().__init__() + self.stem = stem_class(3, stem_width, norm, activation_class) + + current_stride = self.stem.stride + self._out_feature_strides = {"stem": current_stride} + self._out_feature_channels = {"stem": self.stem.out_channels} + self.stages_and_names = [] + prev_w = stem_width + + for i, (d, w, s, b, g) in enumerate( + zip(depths, widths, strides, bottleneck_ratios, group_widths) + ): + params = {"bot_mul": b, "group_w": g, "se_r": se_ratio} + stage = AnyStage(prev_w, w, s, d, block_class, norm, activation_class, params) + name = "s{}".format(i + 1) + self.add_module(name, stage) + self.stages_and_names.append((stage, name)) + self._out_feature_strides[name] = current_stride = int( + current_stride * np.prod([k.stride for k in stage.children()]) + ) + self._out_feature_channels[name] = list(stage.children())[-1].out_channels + prev_w = w + + self.apply(init_weights) + + if out_features is None: + out_features = [name] + self._out_features = out_features + assert len(self._out_features) + children = [x[0] for x in self.named_children()] + for out_feature in self._out_features: + assert out_feature in children, "Available children: {} does not include {}".format( + ", ".join(children), out_feature + ) + self.freeze(freeze_at) + + def forward(self, x): + """ + Args: + x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. + + Returns: + dict[str->Tensor]: names and the corresponding features + """ + assert x.dim() == 4, f"Model takes an input of shape (N, C, H, W). Got {x.shape} instead!" + outputs = {} + x = self.stem(x) + if "stem" in self._out_features: + outputs["stem"] = x + for stage, name in self.stages_and_names: + x = stage(x) + if name in self._out_features: + outputs[name] = x + return outputs + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + def freeze(self, freeze_at=0): + """ + Freeze the first several stages of the model. Commonly used in fine-tuning. + + Layers that produce the same feature map spatial size are defined as one + "stage" by :paper:`FPN`. + + Args: + freeze_at (int): number of stages to freeze. + `1` means freezing the stem. `2` means freezing the stem and + one residual stage, etc. + + Returns: + nn.Module: this model itself + """ + if freeze_at >= 1: + self.stem.freeze() + for idx, (stage, _) in enumerate(self.stages_and_names, start=2): + if freeze_at >= idx: + for block in stage.children(): + block.freeze() + return self + + +def adjust_block_compatibility(ws, bs, gs): + """Adjusts the compatibility of widths, bottlenecks, and groups.""" + assert len(ws) == len(bs) == len(gs) + assert all(w > 0 and b > 0 and g > 0 for w, b, g in zip(ws, bs, gs)) + vs = [int(max(1, w * b)) for w, b in zip(ws, bs)] + gs = [int(min(g, v)) for g, v in zip(gs, vs)] + ms = [np.lcm(g, b) if b > 1 else g for g, b in zip(gs, bs)] + vs = [max(m, int(round(v / m) * m)) for v, m in zip(vs, ms)] + ws = [int(v / b) for v, b in zip(vs, bs)] + assert all(w * b % g == 0 for w, b, g in zip(ws, bs, gs)) + return ws, bs, gs + + +def generate_regnet_parameters(w_a, w_0, w_m, d, q=8): + """Generates per stage widths and depths from RegNet parameters.""" + assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0 + # Generate continuous per-block ws + ws_cont = np.arange(d) * w_a + w_0 + # Generate quantized per-block ws + ks = np.round(np.log(ws_cont / w_0) / np.log(w_m)) + ws_all = w_0 * np.power(w_m, ks) + ws_all = np.round(np.divide(ws_all, q)).astype(int) * q + # Generate per stage ws and ds (assumes ws_all are sorted) + ws, ds = np.unique(ws_all, return_counts=True) + # Compute number of actual stages and total possible stages + num_stages, total_stages = len(ws), ks.max() + 1 + # Convert numpy arrays to lists and return + ws, ds, ws_all, ws_cont = (x.tolist() for x in (ws, ds, ws_all, ws_cont)) + return ws, ds, num_stages, total_stages, ws_all, ws_cont + + +class RegNet(AnyNet): + """RegNet model. See :paper:`dds`.""" + + def __init__( + self, + *, + stem_class, + stem_width, + block_class, + depth, + w_a, + w_0, + w_m, + group_width, + stride=2, + bottleneck_ratio=1.0, + se_ratio=0.0, + activation_class=None, + freeze_at=0, + norm="BN", + out_features=None, + ): + """ + Build a RegNet from the parameterization described in :paper:`dds` Section 3.3. + + Args: + See :class:`AnyNet` for arguments that are not listed here. + depth (int): Total number of blocks in the RegNet. + w_a (float): Factor by which block width would increase prior to quantizing block widths + by stage. See :paper:`dds` Section 3.3. + w_0 (int): Initial block width. See :paper:`dds` Section 3.3. + w_m (float): Parameter controlling block width quantization. + See :paper:`dds` Section 3.3. + group_width (int): Number of channels per group in group convolution, if the block uses + group convolution. + bottleneck_ratio (float): The ratio of the number of bottleneck channels to the number + of block input channels (or, equivalently, output channels), if the block uses a + bottleneck. + stride (int): The stride that each network stage applies to its input. + """ + ws, ds = generate_regnet_parameters(w_a, w_0, w_m, depth)[0:2] + ss = [stride for _ in ws] + bs = [bottleneck_ratio for _ in ws] + gs = [group_width for _ in ws] + ws, bs, gs = adjust_block_compatibility(ws, bs, gs) + + def default_activation_class(): + return nn.ReLU(inplace=True) + + super().__init__( + stem_class=stem_class, + stem_width=stem_width, + block_class=block_class, + depths=ds, + widths=ws, + strides=ss, + group_widths=gs, + bottleneck_ratios=bs, + se_ratio=se_ratio, + activation_class=default_activation_class + if activation_class is None + else activation_class, + freeze_at=freeze_at, + norm=norm, + out_features=out_features, + ) diff --git a/data_processing/detectron2/detectron2/modeling/backbone/resnet.py b/data_processing/detectron2/detectron2/modeling/backbone/resnet.py new file mode 100644 index 0000000..5b8e842 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/backbone/resnet.py @@ -0,0 +1,694 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn.functional as F +from torch import nn + +from detectron2.layers import ( + CNNBlockBase, + Conv2d, + DeformConv, + ModulatedDeformConv, + ShapeSpec, + get_norm, +) + +from .backbone import Backbone +from .build import BACKBONE_REGISTRY + +__all__ = [ + "ResNetBlockBase", + "BasicBlock", + "BottleneckBlock", + "DeformBottleneckBlock", + "BasicStem", + "ResNet", + "make_stage", + "build_resnet_backbone", +] + + +class BasicBlock(CNNBlockBase): + """ + The basic residual block for ResNet-18 and ResNet-34 defined in :paper:`ResNet`, + with two 3x3 conv layers and a projection shortcut if needed. + """ + + def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"): + """ + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + stride (int): Stride for the first conv. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. + """ + super().__init__(in_channels, out_channels, stride) + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + self.conv1 = Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=stride, + padding=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + self.conv2 = Conv2d( + out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + def forward(self, x): + out = self.conv1(x) + out = F.relu_(out) + out = self.conv2(out) + + if self.shortcut is not None: + shortcut = self.shortcut(x) + else: + shortcut = x + + out += shortcut + out = F.relu_(out) + return out + + +class BottleneckBlock(CNNBlockBase): + """ + The standard bottleneck residual block used by ResNet-50, 101 and 152 + defined in :paper:`ResNet`. It contains 3 conv layers with kernels + 1x1, 3x3, 1x1, and a projection shortcut if needed. + """ + + def __init__( + self, + in_channels, + out_channels, + *, + bottleneck_channels, + stride=1, + num_groups=1, + norm="BN", + stride_in_1x1=False, + dilation=1, + ): + """ + Args: + bottleneck_channels (int): number of output channels for the 3x3 + "bottleneck" conv layers. + num_groups (int): number of groups for the 3x3 conv layer. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. + stride_in_1x1 (bool): when stride>1, whether to put stride in the + first 1x1 convolution or the bottleneck 3x3 convolution. + dilation (int): the dilation rate of the 3x3 conv layer. + """ + super().__init__(in_channels, out_channels, stride) + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + # The original MSRA ResNet models have stride in the first 1x1 conv + # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have + # stride in the 3x3 conv + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv2 = Conv2d( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + bias=False, + groups=num_groups, + dilation=dilation, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv3 = Conv2d( + bottleneck_channels, + out_channels, + kernel_size=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + # Zero-initialize the last normalization in each residual branch, + # so that at the beginning, the residual branch starts with zeros, + # and each residual block behaves like an identity. + # See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": + # "For BN layers, the learnable scaling coefficient γ is initialized + # to be 1, except for each residual block's last BN + # where γ is initialized to be 0." + + # nn.init.constant_(self.conv3.norm.weight, 0) + # TODO this somehow hurts performance when training GN models from scratch. + # Add it as an option when we need to use this code to train a backbone. + + def forward(self, x): + out = self.conv1(x) + out = F.relu_(out) + + out = self.conv2(out) + out = F.relu_(out) + + out = self.conv3(out) + + if self.shortcut is not None: + shortcut = self.shortcut(x) + else: + shortcut = x + + out += shortcut + out = F.relu_(out) + return out + + +class DeformBottleneckBlock(CNNBlockBase): + """ + Similar to :class:`BottleneckBlock`, but with :paper:`deformable conv ` + in the 3x3 convolution. + """ + + def __init__( + self, + in_channels, + out_channels, + *, + bottleneck_channels, + stride=1, + num_groups=1, + norm="BN", + stride_in_1x1=False, + dilation=1, + deform_modulated=False, + deform_num_groups=1, + ): + super().__init__(in_channels, out_channels, stride) + self.deform_modulated = deform_modulated + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + norm=get_norm(norm, bottleneck_channels), + ) + + if deform_modulated: + deform_conv_op = ModulatedDeformConv + # offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size + offset_channels = 27 + else: + deform_conv_op = DeformConv + offset_channels = 18 + + self.conv2_offset = Conv2d( + bottleneck_channels, + offset_channels * deform_num_groups, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + dilation=dilation, + ) + self.conv2 = deform_conv_op( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + bias=False, + groups=num_groups, + dilation=dilation, + deformable_groups=deform_num_groups, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv3 = Conv2d( + bottleneck_channels, + out_channels, + kernel_size=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + nn.init.constant_(self.conv2_offset.weight, 0) + nn.init.constant_(self.conv2_offset.bias, 0) + + def forward(self, x): + out = self.conv1(x) + out = F.relu_(out) + + if self.deform_modulated: + offset_mask = self.conv2_offset(out) + offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1) + offset = torch.cat((offset_x, offset_y), dim=1) + mask = mask.sigmoid() + out = self.conv2(out, offset, mask) + else: + offset = self.conv2_offset(out) + out = self.conv2(out, offset) + out = F.relu_(out) + + out = self.conv3(out) + + if self.shortcut is not None: + shortcut = self.shortcut(x) + else: + shortcut = x + + out += shortcut + out = F.relu_(out) + return out + + +class BasicStem(CNNBlockBase): + """ + The standard ResNet stem (layers before the first residual block), + with a conv, relu and max_pool. + """ + + def __init__(self, in_channels=3, out_channels=64, norm="BN"): + """ + Args: + norm (str or callable): norm after the first conv layer. + See :func:`layers.get_norm` for supported format. + """ + super().__init__(in_channels, out_channels, 4) + self.in_channels = in_channels + self.conv1 = Conv2d( + in_channels, + out_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False, + norm=get_norm(norm, out_channels), + ) + weight_init.c2_msra_fill(self.conv1) + + def forward(self, x): + x = self.conv1(x) + x = F.relu_(x) + x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) + return x + + +class ResNet(Backbone): + """ + Implement :paper:`ResNet`. + """ + + def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0): + """ + Args: + stem (nn.Module): a stem module + stages (list[list[CNNBlockBase]]): several (typically 4) stages, + each contains multiple :class:`CNNBlockBase`. + num_classes (None or int): if None, will not perform classification. + Otherwise, will create a linear layer. + out_features (list[str]): name of the layers whose outputs should + be returned in forward. Can be anything in "stem", "linear", or "res2" ... + If None, will return the output of the last layer. + freeze_at (int): The number of stages at the beginning to freeze. + see :meth:`freeze` for detailed explanation. + """ + super().__init__() + self.stem = stem + self.num_classes = num_classes + + current_stride = self.stem.stride + self._out_feature_strides = {"stem": current_stride} + self._out_feature_channels = {"stem": self.stem.out_channels} + + self.stage_names, self.stages = [], [] + + if out_features is not None: + # Avoid keeping unused layers in this module. They consume extra memory + # and may cause allreduce to fail + num_stages = max( + [{"res2": 1, "res3": 2, "res4": 3, "res5": 4}.get(f, 0) for f in out_features] + ) + stages = stages[:num_stages] + for i, blocks in enumerate(stages): + assert len(blocks) > 0, len(blocks) + for block in blocks: + assert isinstance(block, CNNBlockBase), block + + name = "res" + str(i + 2) + stage = nn.Sequential(*blocks) + + self.add_module(name, stage) + self.stage_names.append(name) + self.stages.append(stage) + + self._out_feature_strides[name] = current_stride = int( + current_stride * np.prod([k.stride for k in blocks]) + ) + self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels + self.stage_names = tuple(self.stage_names) # Make it static for scripting + + if num_classes is not None: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.linear = nn.Linear(curr_channels, num_classes) + + # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": + # "The 1000-way fully-connected layer is initialized by + # drawing weights from a zero-mean Gaussian with standard deviation of 0.01." + nn.init.normal_(self.linear.weight, std=0.01) + name = "linear" + + if out_features is None: + out_features = [name] + self._out_features = out_features + assert len(self._out_features) + children = [x[0] for x in self.named_children()] + for out_feature in self._out_features: + assert out_feature in children, "Available children: {}".format(", ".join(children)) + self.freeze(freeze_at) + + def forward(self, x): + """ + Args: + x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. + + Returns: + dict[str->Tensor]: names and the corresponding features + """ + assert x.dim() == 4, f"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!" + outputs = {} + x = self.stem(x) + if "stem" in self._out_features: + outputs["stem"] = x + for name, stage in zip(self.stage_names, self.stages): + x = stage(x) + if name in self._out_features: + outputs[name] = x + if self.num_classes is not None: + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.linear(x) + if "linear" in self._out_features: + outputs["linear"] = x + return outputs + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + def freeze(self, freeze_at=0): + """ + Freeze the first several stages of the ResNet. Commonly used in + fine-tuning. + + Layers that produce the same feature map spatial size are defined as one + "stage" by :paper:`FPN`. + + Args: + freeze_at (int): number of stages to freeze. + `1` means freezing the stem. `2` means freezing the stem and + one residual stage, etc. + + Returns: + nn.Module: this ResNet itself + """ + if freeze_at >= 1: + self.stem.freeze() + for idx, stage in enumerate(self.stages, start=2): + if freeze_at >= idx: + for block in stage.children(): + block.freeze() + return self + + @staticmethod + def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs): + """ + Create a list of blocks of the same type that forms one ResNet stage. + + Args: + block_class (type): a subclass of CNNBlockBase that's used to create all blocks in this + stage. A module of this type must not change spatial resolution of inputs unless its + stride != 1. + num_blocks (int): number of blocks in this stage + in_channels (int): input channels of the entire stage. + out_channels (int): output channels of **every block** in the stage. + kwargs: other arguments passed to the constructor of + `block_class`. If the argument name is "xx_per_block", the + argument is a list of values to be passed to each block in the + stage. Otherwise, the same argument is passed to every block + in the stage. + + Returns: + list[CNNBlockBase]: a list of block module. + + Examples: + :: + stage = ResNet.make_stage( + BottleneckBlock, 3, in_channels=16, out_channels=64, + bottleneck_channels=16, num_groups=1, + stride_per_block=[2, 1, 1], + dilations_per_block=[1, 1, 2] + ) + + Usually, layers that produce the same feature map spatial size are defined as one + "stage" (in :paper:`FPN`). Under such definition, ``stride_per_block[1:]`` should + all be 1. + """ + blocks = [] + for i in range(num_blocks): + curr_kwargs = {} + for k, v in kwargs.items(): + if k.endswith("_per_block"): + assert len(v) == num_blocks, ( + f"Argument '{k}' of make_stage should have the " + f"same length as num_blocks={num_blocks}." + ) + newk = k[: -len("_per_block")] + assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!" + curr_kwargs[newk] = v[i] + else: + curr_kwargs[k] = v + + blocks.append( + block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs) + ) + in_channels = out_channels + return blocks + + @staticmethod + def make_default_stages(depth, block_class=None, **kwargs): + """ + Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152). + If it doesn't create the ResNet variant you need, please use :meth:`make_stage` + instead for fine-grained customization. + + Args: + depth (int): depth of ResNet + block_class (type): the CNN block class. Has to accept + `bottleneck_channels` argument for depth > 50. + By default it is BasicBlock or BottleneckBlock, based on the + depth. + kwargs: + other arguments to pass to `make_stage`. Should not contain + stride and channels, as they are predefined for each depth. + + Returns: + list[list[CNNBlockBase]]: modules in all stages; see arguments of + :class:`ResNet.__init__`. + """ + num_blocks_per_stage = { + 18: [2, 2, 2, 2], + 34: [3, 4, 6, 3], + 50: [3, 4, 6, 3], + 101: [3, 4, 23, 3], + 152: [3, 8, 36, 3], + }[depth] + if block_class is None: + block_class = BasicBlock if depth < 50 else BottleneckBlock + if depth < 50: + in_channels = [64, 64, 128, 256] + out_channels = [64, 128, 256, 512] + else: + in_channels = [64, 256, 512, 1024] + out_channels = [256, 512, 1024, 2048] + ret = [] + for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels): + if depth >= 50: + kwargs["bottleneck_channels"] = o // 4 + ret.append( + ResNet.make_stage( + block_class=block_class, + num_blocks=n, + stride_per_block=[s] + [1] * (n - 1), + in_channels=i, + out_channels=o, + **kwargs, + ) + ) + return ret + + +ResNetBlockBase = CNNBlockBase +""" +Alias for backward compatibiltiy. +""" + + +def make_stage(*args, **kwargs): + """ + Deprecated alias for backward compatibiltiy. + """ + return ResNet.make_stage(*args, **kwargs) + + +@BACKBONE_REGISTRY.register() +def build_resnet_backbone(cfg, input_shape): + """ + Create a ResNet instance from config. + + Returns: + ResNet: a :class:`ResNet` instance. + """ + # need registration of new blocks/stems? + norm = cfg.MODEL.RESNETS.NORM + stem = BasicStem( + in_channels=input_shape.channels, + out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, + norm=norm, + ) + + # fmt: off + freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT + out_features = cfg.MODEL.RESNETS.OUT_FEATURES + depth = cfg.MODEL.RESNETS.DEPTH + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group + in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION + deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE + deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED + deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS + # fmt: on + assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) + + num_blocks_per_stage = { + 18: [2, 2, 2, 2], + 34: [3, 4, 6, 3], + 50: [3, 4, 6, 3], + 101: [3, 4, 23, 3], + 152: [3, 8, 36, 3], + }[depth] + + if depth in [18, 34]: + assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34" + assert not any( + deform_on_per_stage + ), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34" + assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34" + assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34" + + stages = [] + + for idx, stage_idx in enumerate(range(2, 6)): + # res5_dilation is used this way as a convention in R-FCN & Deformable Conv paper + dilation = res5_dilation if stage_idx == 5 else 1 + first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 + stage_kargs = { + "num_blocks": num_blocks_per_stage[idx], + "stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1), + "in_channels": in_channels, + "out_channels": out_channels, + "norm": norm, + } + # Use BasicBlock for R18 and R34. + if depth in [18, 34]: + stage_kargs["block_class"] = BasicBlock + else: + stage_kargs["bottleneck_channels"] = bottleneck_channels + stage_kargs["stride_in_1x1"] = stride_in_1x1 + stage_kargs["dilation"] = dilation + stage_kargs["num_groups"] = num_groups + if deform_on_per_stage[idx]: + stage_kargs["block_class"] = DeformBottleneckBlock + stage_kargs["deform_modulated"] = deform_modulated + stage_kargs["deform_num_groups"] = deform_num_groups + else: + stage_kargs["block_class"] = BottleneckBlock + blocks = ResNet.make_stage(**stage_kargs) + in_channels = out_channels + out_channels *= 2 + bottleneck_channels *= 2 + stages.append(blocks) + return ResNet(stem, stages, out_features=out_features, freeze_at=freeze_at) diff --git a/data_processing/detectron2/detectron2/modeling/backbone/swin.py b/data_processing/detectron2/detectron2/modeling/backbone/swin.py new file mode 100644 index 0000000..780b6fc --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/backbone/swin.py @@ -0,0 +1,695 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Implementation of Swin models from :paper:`swin`. + +This code is adapted from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py with minimal modifications. # noqa +-------------------------------------------------------- +Swin Transformer +Copyright (c) 2021 Microsoft +Licensed under The MIT License [see LICENSE for details] +Written by Ze Liu, Yutong Lin, Yixuan Wei +-------------------------------------------------------- +LICENSE: https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/461e003166a8083d0b620beacd4662a2df306bd6/LICENSE +""" + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from detectron2.modeling.backbone.backbone import Backbone + +_to_2tuple = nn.modules.utils._ntuple(2) + + +class Mlp(nn.Module): + """Multilayer perceptron.""" + + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0 + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + """Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. + Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__( + self, + dim, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) + ) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + nn.init.trunc_normal_(self.relative_position_bias_table, std=0.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """Forward function. + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B_, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = q @ k.transpose(-2, -1) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1) + ].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 + ) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1 + ).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + """Swin Transformer Block. + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__( + self, + dim, + num_heads, + window_size=7, + shift_size=0, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + window_size=_to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + ) + + if drop_path > 0.0: + from timm.models.layers import DropPath + + self.drop_path = DropPath(drop_path) + else: + self.drop_path = nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop + ) + + self.H = None + self.W = None + + def forward(self, x, mask_matrix): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + mask_matrix: Attention mask for cyclic shift. + """ + B, L, C = x.shape + H, W = self.H, self.W + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_l = pad_t = 0 + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + attn_mask = mask_matrix + else: + shifted_x = x + attn_mask = None + + # partition windows + x_windows = window_partition( + shifted_x, self.window_size + ) # nW*B, window_size, window_size, C + x_windows = x_windows.view( + -1, self.window_size * self.window_size, C + ) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class PatchMerging(nn.Module): + """Patch Merging Layer + Args: + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x, H, W): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + x = x.view(B, H, W, C) + + # padding + pad_input = (H % 2 == 1) or (W % 2 == 1) + if pad_input: + x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + +class BasicLayer(nn.Module): + """A basic Swin Transformer layer for one stage. + Args: + dim (int): Number of feature channels + depth (int): Depths of this stage. + num_heads (int): Number of attention head. + window_size (int): Local window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. + Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__( + self, + dim, + depth, + num_heads, + window_size=7, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + norm_layer=nn.LayerNorm, + downsample=None, + use_checkpoint=False, + ): + super().__init__() + self.window_size = window_size + self.shift_size = window_size // 2 + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList( + [ + SwinTransformerBlock( + dim=dim, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + ) + for i in range(depth) + ] + ) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x, H, W): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + + # calculate attention mask for SW-MSA + Hp = int(np.ceil(H / self.window_size)) * self.window_size + Wp = int(np.ceil(W / self.window_size)) * self.window_size + img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 + h_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + w_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition( + img_mask, self.window_size + ) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( + attn_mask == 0, float(0.0) + ) + + for blk in self.blocks: + blk.H, blk.W = H, W + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x, attn_mask) + else: + x = blk(x, attn_mask) + if self.downsample is not None: + x_down = self.downsample(x, H, W) + Wh, Ww = (H + 1) // 2, (W + 1) // 2 + return x, H, W, x_down, Wh, Ww + else: + return x, H, W, x, H, W + + +class PatchEmbed(nn.Module): + """Image to Patch Embedding + Args: + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + patch_size = _to_2tuple(patch_size) + self.patch_size = patch_size + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + """Forward function.""" + # padding + _, _, H, W = x.size() + if W % self.patch_size[1] != 0: + x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) + if H % self.patch_size[0] != 0: + x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) + + x = self.proj(x) # B C Wh Ww + if self.norm is not None: + Wh, Ww = x.size(2), x.size(3) + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) + + return x + + +class SwinTransformer(Backbone): + """Swin Transformer backbone. + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted + Windows` - https://arxiv.org/pdf/2103.14030 + Args: + pretrain_img_size (int): Input image size for training the pretrained model, + used in absolute postion embedding. Default 224. + patch_size (int | tuple(int)): Patch size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + depths (tuple[int]): Depths of each Swin Transformer stage. + num_heads (tuple[int]): Number of attention head of each stage. + window_size (int): Window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. + drop_rate (float): Dropout rate. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Default: 0.2. + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. + patch_norm (bool): If True, add normalization after patch embedding. Default: True. + out_indices (Sequence[int]): Output from which stages. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__( + self, + pretrain_img_size=224, + patch_size=4, + in_chans=3, + embed_dim=96, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + window_size=7, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.2, + norm_layer=nn.LayerNorm, + ape=False, + patch_norm=True, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + use_checkpoint=False, + ): + super().__init__() + + self.pretrain_img_size = pretrain_img_size + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.out_indices = out_indices + self.frozen_stages = frozen_stages + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None, + ) + + # absolute position embedding + if self.ape: + pretrain_img_size = _to_2tuple(pretrain_img_size) + patch_size = _to_2tuple(patch_size) + patches_resolution = [ + pretrain_img_size[0] // patch_size[0], + pretrain_img_size[1] // patch_size[1], + ] + + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]) + ) + nn.init.trunc_normal_(self.absolute_pos_embed, std=0.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2**i_layer), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint, + ) + self.layers.append(layer) + + num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)] + self.num_features = num_features + + # add a norm layer for each output + for i_layer in out_indices: + layer = norm_layer(num_features[i_layer]) + layer_name = f"norm{i_layer}" + self.add_module(layer_name, layer) + + self._freeze_stages() + self._out_features = ["p{}".format(i) for i in self.out_indices] + self._out_feature_channels = { + "p{}".format(i): self.embed_dim * 2**i for i in self.out_indices + } + self._out_feature_strides = {"p{}".format(i): 2 ** (i + 2) for i in self.out_indices} + self._size_devisibility = 32 + + self.apply(self._init_weights) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + if self.frozen_stages >= 1 and self.ape: + self.absolute_pos_embed.requires_grad = False + + if self.frozen_stages >= 2: + self.pos_drop.eval() + for i in range(0, self.frozen_stages - 1): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @property + def size_divisibility(self): + return self._size_divisibility + + def forward(self, x): + """Forward function.""" + x = self.patch_embed(x) + + Wh, Ww = x.size(2), x.size(3) + if self.ape: + # interpolate the position embedding to the corresponding size + absolute_pos_embed = F.interpolate( + self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" + ) + x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C + else: + x = x.flatten(2).transpose(1, 2) + x = self.pos_drop(x) + + outs = {} + for i in range(self.num_layers): + layer = self.layers[i] + x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) + + if i in self.out_indices: + norm_layer = getattr(self, f"norm{i}") + x_out = norm_layer(x_out) + + out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() + outs["p{}".format(i)] = out + + return outs diff --git a/data_processing/detectron2/detectron2/modeling/backbone/utils.py b/data_processing/detectron2/detectron2/modeling/backbone/utils.py new file mode 100644 index 0000000..2b89a4c --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/backbone/utils.py @@ -0,0 +1,186 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = [ + "window_partition", + "window_unpartition", + "add_decomposed_rel_pos", + "get_abs_pos", + "PatchEmbed", +] + + +def window_partition(x, window_size): + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition(windows, window_size, pad_hw, hw): + """ + Window unpartition into original sequences and removing padding. + Args: + x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +def get_rel_pos(q_size, k_size, rel_pos): + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + +def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size): + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 + Args: + attn (Tensor): attention map. + q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). + rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. + rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. + q_size (Tuple): spatial sequence size of query q with (q_h, q_w). + k_size (Tuple): spatial sequence size of key k with (k_h, k_w). + + Returns: + attn (Tensor): attention map with added relative positional embeddings. + """ + q_h, q_w = q_size + k_h, k_w = k_size + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) + rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) + + attn = ( + attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] + ).view(B, q_h * q_w, k_h * k_w) + + return attn + + +def get_abs_pos(abs_pos, has_cls_token, hw): + """ + Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token + dimension for the original embeddings. + Args: + abs_pos (Tensor): absolute positional embeddings with (1, num_position, C). + has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token. + hw (Tuple): size of input image tokens. + + Returns: + Absolute positional embeddings after processing with shape (1, H, W, C) + """ + h, w = hw + if has_cls_token: + abs_pos = abs_pos[:, 1:] + xy_num = abs_pos.shape[1] + size = int(math.sqrt(xy_num)) + assert size * size == xy_num + + if size != h or size != w: + new_abs_pos = F.interpolate( + abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2), + size=(h, w), + mode="bicubic", + align_corners=False, + ) + + return new_abs_pos.permute(0, 2, 3, 1) + else: + return abs_pos.reshape(1, h, w, -1) + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, kernel_size=(16, 16), stride=(16, 16), padding=(0, 0), in_chans=3, embed_dim=768 + ): + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): embed_dim (int): Patch embedding dimension. + """ + super().__init__() + + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding + ) + + def forward(self, x): + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x diff --git a/data_processing/detectron2/detectron2/modeling/backbone/vit.py b/data_processing/detectron2/detectron2/modeling/backbone/vit.py new file mode 100644 index 0000000..31cc28a --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/backbone/vit.py @@ -0,0 +1,524 @@ +import logging +import math +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn as nn + +from detectron2.layers import CNNBlockBase, Conv2d, get_norm +from detectron2.modeling.backbone.fpn import _assert_strides_are_log2_contiguous + +from .backbone import Backbone +from .utils import ( + PatchEmbed, + add_decomposed_rel_pos, + get_abs_pos, + window_partition, + window_unpartition, +) + +logger = logging.getLogger(__name__) + + +__all__ = ["ViT", "SimpleFeaturePyramid", "get_vit_lr_decay_rate"] + + +class Attention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__( + self, + dim, + num_heads=8, + qkv_bias=True, + use_rel_pos=False, + rel_pos_zero_init=True, + input_size=None, + ): + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool: If True, add a learnable bias to query, key, value. + rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (int or None): Input resolution for calculating the relative positional + parameter size. + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + if not rel_pos_zero_init: + nn.init.trunc_normal_(self.rel_pos_h, std=0.02) + nn.init.trunc_normal_(self.rel_pos_w, std=0.02) + + def forward(self, x): + B, H, W, _ = x.shape + # qkv with shape (3, B, nHead, H * W, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + # q, k, v with shape (B * nHead, H * W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + if self.use_rel_pos: + attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) + + attn = attn.softmax(dim=-1) + x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) + x = self.proj(x) + + return x + + +class ResBottleneckBlock(CNNBlockBase): + """ + The standard bottleneck residual block without the last activation layer. + It contains 3 conv layers with kernels 1x1, 3x3, 1x1. + """ + + def __init__( + self, + in_channels, + out_channels, + bottleneck_channels, + norm="LN", + act_layer=nn.GELU, + ): + """ + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + bottleneck_channels (int): number of output channels for the 3x3 + "bottleneck" conv layers. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. + act_layer (callable): activation for all conv layers. + """ + super().__init__(in_channels, out_channels, 1) + + self.conv1 = Conv2d(in_channels, bottleneck_channels, 1, bias=False) + self.norm1 = get_norm(norm, bottleneck_channels) + self.act1 = act_layer() + + self.conv2 = Conv2d( + bottleneck_channels, + bottleneck_channels, + 3, + padding=1, + bias=False, + ) + self.norm2 = get_norm(norm, bottleneck_channels) + self.act2 = act_layer() + + self.conv3 = Conv2d(bottleneck_channels, out_channels, 1, bias=False) + self.norm3 = get_norm(norm, out_channels) + + for layer in [self.conv1, self.conv2, self.conv3]: + weight_init.c2_msra_fill(layer) + for layer in [self.norm1, self.norm2]: + layer.weight.data.fill_(1.0) + layer.bias.data.zero_() + # zero init last norm layer. + self.norm3.weight.data.zero_() + self.norm3.bias.data.zero_() + + def forward(self, x): + out = x + for layer in self.children(): + out = layer(out) + + out = x + out + return out + + +class Block(nn.Module): + """Transformer blocks with support of window attention and residual propagation blocks""" + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4.0, + qkv_bias=True, + drop_path=0.0, + norm_layer=nn.LayerNorm, + act_layer=nn.GELU, + use_rel_pos=False, + rel_pos_zero_init=True, + window_size=0, + use_residual_block=False, + input_size=None, + ): + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + drop_path (float): Stochastic depth rate. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. If it equals 0, then not + use window attention. + use_residual_block (bool): If True, use a residual block after the MLP block. + input_size (int or None): Input resolution for calculating the relative positional + parameter size. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size if window_size == 0 else (window_size, window_size), + ) + + from timm.models.layers import DropPath, Mlp + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer) + + self.window_size = window_size + + self.use_residual_block = use_residual_block + if use_residual_block: + # Use a residual block with bottleneck channel as dim // 2 + self.residual = ResBottleneckBlock( + in_channels=dim, + out_channels=dim, + bottleneck_channels=dim // 2, + norm="LN", + act_layer=act_layer, + ) + + def forward(self, x): + shortcut = x + x = self.norm1(x) + # Window partition + if self.window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, self.window_size) + + x = self.attn(x) + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, pad_hw, (H, W)) + + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + if self.use_residual_block: + x = self.residual(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + + return x + + +class ViT(Backbone): + """ + This module implements Vision Transformer (ViT) backbone in :paper:`vitdet`. + "Exploring Plain Vision Transformer Backbones for Object Detection", + https://arxiv.org/abs/2203.16527 + """ + + def __init__( + self, + img_size=1024, + patch_size=16, + in_chans=3, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=True, + drop_path_rate=0.0, + norm_layer=nn.LayerNorm, + act_layer=nn.GELU, + use_abs_pos=True, + use_rel_pos=False, + rel_pos_zero_init=True, + window_size=0, + window_block_indexes=(), + residual_block_indexes=(), + use_act_checkpoint=False, + pretrain_img_size=224, + pretrain_use_cls_token=True, + out_feature="last_feat", + ): + """ + Args: + img_size (int): Input image size. + patch_size (int): Patch size. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of ViT. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + drop_path_rate (float): Stochastic depth rate. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + window_block_indexes (list): Indexes for blocks using window attention. + residual_block_indexes (list): Indexes for blocks using conv propagation. + use_act_checkpoint (bool): If True, use activation checkpointing. + pretrain_img_size (int): input image size for pretraining models. + pretrain_use_cls_token (bool): If True, pretrainig models use class token. + out_feature (str): name of the feature from the last block. + """ + super().__init__() + self.pretrain_use_cls_token = pretrain_use_cls_token + + self.patch_embed = PatchEmbed( + kernel_size=(patch_size, patch_size), + stride=(patch_size, patch_size), + in_chans=in_chans, + embed_dim=embed_dim, + ) + + if use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + num_patches = (pretrain_img_size // patch_size) * (pretrain_img_size // patch_size) + num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches + self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim)) + else: + self.pos_embed = None + + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + + self.blocks = nn.ModuleList() + for i in range(depth): + block = Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + window_size=window_size if i in window_block_indexes else 0, + use_residual_block=i in residual_block_indexes, + input_size=(img_size // patch_size, img_size // patch_size), + ) + if use_act_checkpoint: + # TODO: use torch.utils.checkpoint + from fairscale.nn.checkpoint import checkpoint_wrapper + + block = checkpoint_wrapper(block) + self.blocks.append(block) + + self._out_feature_channels = {out_feature: embed_dim} + self._out_feature_strides = {out_feature: patch_size} + self._out_features = [out_feature] + + if self.pos_embed is not None: + nn.init.trunc_normal_(self.pos_embed, std=0.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward(self, x): + x = self.patch_embed(x) + if self.pos_embed is not None: + x = x + get_abs_pos( + self.pos_embed, self.pretrain_use_cls_token, (x.shape[1], x.shape[2]) + ) + + for blk in self.blocks: + x = blk(x) + + outputs = {self._out_features[0]: x.permute(0, 3, 1, 2)} + return outputs + + +class SimpleFeaturePyramid(Backbone): + """ + This module implements SimpleFeaturePyramid in :paper:`vitdet`. + It creates pyramid features built on top of the input feature map. + """ + + def __init__( + self, + net, + in_feature, + out_channels, + scale_factors, + top_block=None, + norm="LN", + square_pad=0, + ): + """ + Args: + net (Backbone): module representing the subnetwork backbone. + Must be a subclass of :class:`Backbone`. + in_feature (str): names of the input feature maps coming + from the net. + out_channels (int): number of channels in the output feature maps. + scale_factors (list[float]): list of scaling factors to upsample or downsample + the input features for creating pyramid features. + top_block (nn.Module or None): if provided, an extra operation will + be performed on the output of the last (smallest resolution) + pyramid output, and the result will extend the result list. The top_block + further downsamples the feature map. It must have an attribute + "num_levels", meaning the number of extra pyramid levels added by + this block, and "in_feature", which is a string representing + its input feature (e.g., p5). + norm (str): the normalization to use. + square_pad (int): If > 0, require input images to be padded to specific square size. + """ + super(SimpleFeaturePyramid, self).__init__() + assert isinstance(net, Backbone) + + self.scale_factors = scale_factors + + input_shapes = net.output_shape() + strides = [int(input_shapes[in_feature].stride / scale) for scale in scale_factors] + _assert_strides_are_log2_contiguous(strides) + + dim = input_shapes[in_feature].channels + self.stages = [] + use_bias = norm == "" + for idx, scale in enumerate(scale_factors): + out_dim = dim + if scale == 4.0: + layers = [ + nn.ConvTranspose2d(dim, dim // 2, kernel_size=2, stride=2), + get_norm(norm, dim // 2), + nn.GELU(), + nn.ConvTranspose2d(dim // 2, dim // 4, kernel_size=2, stride=2), + ] + out_dim = dim // 4 + elif scale == 2.0: + layers = [nn.ConvTranspose2d(dim, dim // 2, kernel_size=2, stride=2)] + out_dim = dim // 2 + elif scale == 1.0: + layers = [] + elif scale == 0.5: + layers = [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + raise NotImplementedError(f"scale_factor={scale} is not supported yet.") + + layers.extend( + [ + Conv2d( + out_dim, + out_channels, + kernel_size=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + ), + Conv2d( + out_channels, + out_channels, + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + ), + ] + ) + layers = nn.Sequential(*layers) + + stage = int(math.log2(strides[idx])) + self.add_module(f"simfp_{stage}", layers) + self.stages.append(layers) + + self.net = net + self.in_feature = in_feature + self.top_block = top_block + # Return feature names are "p", like ["p2", "p3", ..., "p6"] + self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides} + # top block output feature maps. + if self.top_block is not None: + for s in range(stage, stage + self.top_block.num_levels): + self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) + + self._out_features = list(self._out_feature_strides.keys()) + self._out_feature_channels = {k: out_channels for k in self._out_features} + self._size_divisibility = strides[-1] + self._square_pad = square_pad + + @property + def padding_constraints(self): + return { + "size_divisiblity": self._size_divisibility, + "square_size": self._square_pad, + } + + def forward(self, x): + """ + Args: + x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. + + Returns: + dict[str->Tensor]: + mapping from feature map name to pyramid feature map tensor + in high to low resolution order. Returned feature names follow the FPN + convention: "p", where stage has stride = 2 ** stage e.g., + ["p2", "p3", ..., "p6"]. + """ + bottom_up_features = self.net(x) + features = bottom_up_features[self.in_feature] + results = [] + + for stage in self.stages: + results.append(stage(features)) + + if self.top_block is not None: + if self.top_block.in_feature in bottom_up_features: + top_block_in_feature = bottom_up_features[self.top_block.in_feature] + else: + top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)] + results.extend(self.top_block(top_block_in_feature)) + assert len(self._out_features) == len(results) + return {f: res for f, res in zip(self._out_features, results)} + + +def get_vit_lr_decay_rate(name, lr_decay_rate=1.0, num_layers=12): + """ + Calculate lr decay rate for different ViT blocks. + Args: + name (string): parameter name. + lr_decay_rate (float): base lr decay rate. + num_layers (int): number of ViT blocks. + + Returns: + lr decay rate for the given parameter. + """ + layer_id = num_layers + 1 + if name.startswith("backbone"): + if ".pos_embed" in name or ".patch_embed" in name: + layer_id = 0 + elif ".blocks." in name and ".residual." not in name: + layer_id = int(name[name.find(".blocks.") :].split(".")[2]) + 1 + + return lr_decay_rate ** (num_layers + 1 - layer_id) diff --git a/data_processing/detectron2/detectron2/modeling/box_regression.py b/data_processing/detectron2/detectron2/modeling/box_regression.py new file mode 100644 index 0000000..b24c123 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/box_regression.py @@ -0,0 +1,369 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from typing import List, Tuple, Union +import torch +from fvcore.nn import giou_loss, smooth_l1_loss +from torch.nn import functional as F + +from detectron2.layers import cat, ciou_loss, diou_loss +from detectron2.structures import Boxes + +# Value for clamping large dw and dh predictions. The heuristic is that we clamp +# such that dw and dh are no larger than what would transform a 16px box into a +# 1000px box (based on a small anchor, 16px, and a typical image size, 1000px). +_DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16) + + +__all__ = ["Box2BoxTransform", "Box2BoxTransformRotated", "Box2BoxTransformLinear"] + + +@torch.jit.script +class Box2BoxTransform(object): + """ + The box-to-box transform defined in R-CNN. The transformation is parameterized + by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height + by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height). + """ + + def __init__( + self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP + ): + """ + Args: + weights (4-element tuple): Scaling factors that are applied to the + (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set + such that the deltas have unit variance; now they are treated as + hyperparameters of the system. + scale_clamp (float): When predicting deltas, the predicted box scaling + factors (dw and dh) are clamped such that they are <= scale_clamp. + """ + self.weights = weights + self.scale_clamp = scale_clamp + + def get_deltas(self, src_boxes, target_boxes): + """ + Get box regression transformation deltas (dx, dy, dw, dh) that can be used + to transform the `src_boxes` into the `target_boxes`. That is, the relation + ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless + any delta is too large and is clamped). + + Args: + src_boxes (Tensor): source boxes, e.g., object proposals + target_boxes (Tensor): target of the transformation, e.g., ground-truth + boxes. + """ + assert isinstance(src_boxes, torch.Tensor), type(src_boxes) + assert isinstance(target_boxes, torch.Tensor), type(target_boxes) + + src_widths = src_boxes[:, 2] - src_boxes[:, 0] + src_heights = src_boxes[:, 3] - src_boxes[:, 1] + src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths + src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights + + target_widths = target_boxes[:, 2] - target_boxes[:, 0] + target_heights = target_boxes[:, 3] - target_boxes[:, 1] + target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths + target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights + + wx, wy, ww, wh = self.weights + dx = wx * (target_ctr_x - src_ctr_x) / src_widths + dy = wy * (target_ctr_y - src_ctr_y) / src_heights + dw = ww * torch.log(target_widths / src_widths) + dh = wh * torch.log(target_heights / src_heights) + + deltas = torch.stack((dx, dy, dw, dh), dim=1) + assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!" + return deltas + + def apply_deltas(self, deltas, boxes): + """ + Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. + + Args: + deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. + deltas[i] represents k potentially different class-specific + box transformations for the single box boxes[i]. + boxes (Tensor): boxes to transform, of shape (N, 4) + """ + deltas = deltas.float() # ensure fp32 for decoding precision + boxes = boxes.to(deltas.dtype) + + widths = boxes[:, 2] - boxes[:, 0] + heights = boxes[:, 3] - boxes[:, 1] + ctr_x = boxes[:, 0] + 0.5 * widths + ctr_y = boxes[:, 1] + 0.5 * heights + + wx, wy, ww, wh = self.weights + dx = deltas[:, 0::4] / wx + dy = deltas[:, 1::4] / wy + dw = deltas[:, 2::4] / ww + dh = deltas[:, 3::4] / wh + + # Prevent sending too large values into torch.exp() + dw = torch.clamp(dw, max=self.scale_clamp) + dh = torch.clamp(dh, max=self.scale_clamp) + + pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] + pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] + pred_w = torch.exp(dw) * widths[:, None] + pred_h = torch.exp(dh) * heights[:, None] + + x1 = pred_ctr_x - 0.5 * pred_w + y1 = pred_ctr_y - 0.5 * pred_h + x2 = pred_ctr_x + 0.5 * pred_w + y2 = pred_ctr_y + 0.5 * pred_h + pred_boxes = torch.stack((x1, y1, x2, y2), dim=-1) + return pred_boxes.reshape(deltas.shape) + + +@torch.jit.script +class Box2BoxTransformRotated(object): + """ + The box-to-box transform defined in Rotated R-CNN. The transformation is parameterized + by 5 deltas: (dx, dy, dw, dh, da). The transformation scales the box's width and height + by exp(dw), exp(dh), shifts a box's center by the offset (dx * width, dy * height), + and rotate a box's angle by da (radians). + Note: angles of deltas are in radians while angles of boxes are in degrees. + """ + + def __init__( + self, + weights: Tuple[float, float, float, float, float], + scale_clamp: float = _DEFAULT_SCALE_CLAMP, + ): + """ + Args: + weights (5-element tuple): Scaling factors that are applied to the + (dx, dy, dw, dh, da) deltas. These are treated as + hyperparameters of the system. + scale_clamp (float): When predicting deltas, the predicted box scaling + factors (dw and dh) are clamped such that they are <= scale_clamp. + """ + self.weights = weights + self.scale_clamp = scale_clamp + + def get_deltas(self, src_boxes, target_boxes): + """ + Get box regression transformation deltas (dx, dy, dw, dh, da) that can be used + to transform the `src_boxes` into the `target_boxes`. That is, the relation + ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless + any delta is too large and is clamped). + + Args: + src_boxes (Tensor): Nx5 source boxes, e.g., object proposals + target_boxes (Tensor): Nx5 target of the transformation, e.g., ground-truth + boxes. + """ + assert isinstance(src_boxes, torch.Tensor), type(src_boxes) + assert isinstance(target_boxes, torch.Tensor), type(target_boxes) + + src_ctr_x, src_ctr_y, src_widths, src_heights, src_angles = torch.unbind(src_boxes, dim=1) + + target_ctr_x, target_ctr_y, target_widths, target_heights, target_angles = torch.unbind( + target_boxes, dim=1 + ) + + wx, wy, ww, wh, wa = self.weights + dx = wx * (target_ctr_x - src_ctr_x) / src_widths + dy = wy * (target_ctr_y - src_ctr_y) / src_heights + dw = ww * torch.log(target_widths / src_widths) + dh = wh * torch.log(target_heights / src_heights) + # Angles of deltas are in radians while angles of boxes are in degrees. + # the conversion to radians serve as a way to normalize the values + da = target_angles - src_angles + da = (da + 180.0) % 360.0 - 180.0 # make it in [-180, 180) + da *= wa * math.pi / 180.0 + + deltas = torch.stack((dx, dy, dw, dh, da), dim=1) + assert ( + (src_widths > 0).all().item() + ), "Input boxes to Box2BoxTransformRotated are not valid!" + return deltas + + def apply_deltas(self, deltas, boxes): + """ + Apply transformation `deltas` (dx, dy, dw, dh, da) to `boxes`. + + Args: + deltas (Tensor): transformation deltas of shape (N, k*5). + deltas[i] represents box transformation for the single box boxes[i]. + boxes (Tensor): boxes to transform, of shape (N, 5) + """ + assert deltas.shape[1] % 5 == 0 and boxes.shape[1] == 5 + + boxes = boxes.to(deltas.dtype).unsqueeze(2) + + ctr_x = boxes[:, 0] + ctr_y = boxes[:, 1] + widths = boxes[:, 2] + heights = boxes[:, 3] + angles = boxes[:, 4] + + wx, wy, ww, wh, wa = self.weights + + dx = deltas[:, 0::5] / wx + dy = deltas[:, 1::5] / wy + dw = deltas[:, 2::5] / ww + dh = deltas[:, 3::5] / wh + da = deltas[:, 4::5] / wa + + # Prevent sending too large values into torch.exp() + dw = torch.clamp(dw, max=self.scale_clamp) + dh = torch.clamp(dh, max=self.scale_clamp) + + pred_boxes = torch.zeros_like(deltas) + pred_boxes[:, 0::5] = dx * widths + ctr_x # x_ctr + pred_boxes[:, 1::5] = dy * heights + ctr_y # y_ctr + pred_boxes[:, 2::5] = torch.exp(dw) * widths # width + pred_boxes[:, 3::5] = torch.exp(dh) * heights # height + + # Following original RRPN implementation, + # angles of deltas are in radians while angles of boxes are in degrees. + pred_angle = da * 180.0 / math.pi + angles + pred_angle = (pred_angle + 180.0) % 360.0 - 180.0 # make it in [-180, 180) + + pred_boxes[:, 4::5] = pred_angle + + return pred_boxes + + +class Box2BoxTransformLinear(object): + """ + The linear box-to-box transform defined in FCOS. The transformation is parameterized + by the distance from the center of (square) src box to 4 edges of the target box. + """ + + def __init__(self, normalize_by_size=True): + """ + Args: + normalize_by_size: normalize deltas by the size of src (anchor) boxes. + """ + self.normalize_by_size = normalize_by_size + + def get_deltas(self, src_boxes, target_boxes): + """ + Get box regression transformation deltas (dx1, dy1, dx2, dy2) that can be used + to transform the `src_boxes` into the `target_boxes`. That is, the relation + ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true. + The center of src must be inside target boxes. + + Args: + src_boxes (Tensor): square source boxes, e.g., anchors + target_boxes (Tensor): target of the transformation, e.g., ground-truth + boxes. + """ + assert isinstance(src_boxes, torch.Tensor), type(src_boxes) + assert isinstance(target_boxes, torch.Tensor), type(target_boxes) + + src_ctr_x = 0.5 * (src_boxes[:, 0] + src_boxes[:, 2]) + src_ctr_y = 0.5 * (src_boxes[:, 1] + src_boxes[:, 3]) + + target_l = src_ctr_x - target_boxes[:, 0] + target_t = src_ctr_y - target_boxes[:, 1] + target_r = target_boxes[:, 2] - src_ctr_x + target_b = target_boxes[:, 3] - src_ctr_y + + deltas = torch.stack((target_l, target_t, target_r, target_b), dim=1) + if self.normalize_by_size: + stride_w = src_boxes[:, 2] - src_boxes[:, 0] + stride_h = src_boxes[:, 3] - src_boxes[:, 1] + strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1) + deltas = deltas / strides + + return deltas + + def apply_deltas(self, deltas, boxes): + """ + Apply transformation `deltas` (dx1, dy1, dx2, dy2) to `boxes`. + + Args: + deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. + deltas[i] represents k potentially different class-specific + box transformations for the single box boxes[i]. + boxes (Tensor): boxes to transform, of shape (N, 4) + """ + # Ensure the output is a valid box. See Sec 2.1 of https://arxiv.org/abs/2006.09214 + deltas = F.relu(deltas) + boxes = boxes.to(deltas.dtype) + + ctr_x = 0.5 * (boxes[:, 0] + boxes[:, 2]) + ctr_y = 0.5 * (boxes[:, 1] + boxes[:, 3]) + if self.normalize_by_size: + stride_w = boxes[:, 2] - boxes[:, 0] + stride_h = boxes[:, 3] - boxes[:, 1] + strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1) + deltas = deltas * strides + + l = deltas[:, 0::4] + t = deltas[:, 1::4] + r = deltas[:, 2::4] + b = deltas[:, 3::4] + + pred_boxes = torch.zeros_like(deltas) + pred_boxes[:, 0::4] = ctr_x[:, None] - l # x1 + pred_boxes[:, 1::4] = ctr_y[:, None] - t # y1 + pred_boxes[:, 2::4] = ctr_x[:, None] + r # x2 + pred_boxes[:, 3::4] = ctr_y[:, None] + b # y2 + return pred_boxes + + +def _dense_box_regression_loss( + anchors: List[Union[Boxes, torch.Tensor]], + box2box_transform: Box2BoxTransform, + pred_anchor_deltas: List[torch.Tensor], + gt_boxes: List[torch.Tensor], + fg_mask: torch.Tensor, + box_reg_loss_type="smooth_l1", + smooth_l1_beta=0.0, +): + """ + Compute loss for dense multi-level box regression. + Loss is accumulated over ``fg_mask``. + + Args: + anchors: #lvl anchor boxes, each is (HixWixA, 4) + pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4) + gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A)) + fg_mask: the foreground boolean mask of shape (N, R) to compute loss on + box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou", + "diou", "ciou". + smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to + use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1" + """ + if isinstance(anchors[0], Boxes): + anchors = type(anchors[0]).cat(anchors).tensor # (R, 4) + else: + anchors = cat(anchors) + if box_reg_loss_type == "smooth_l1": + gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes] + gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4) + loss_box_reg = smooth_l1_loss( + cat(pred_anchor_deltas, dim=1)[fg_mask], + gt_anchor_deltas[fg_mask], + beta=smooth_l1_beta, + reduction="sum", + ) + elif box_reg_loss_type == "giou": + pred_boxes = [ + box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1) + ] + loss_box_reg = giou_loss( + torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum" + ) + elif box_reg_loss_type == "diou": + pred_boxes = [ + box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1) + ] + loss_box_reg = diou_loss( + torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum" + ) + elif box_reg_loss_type == "ciou": + pred_boxes = [ + box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1) + ] + loss_box_reg = ciou_loss( + torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum" + ) + else: + raise ValueError(f"Invalid dense box regression loss type '{box_reg_loss_type}'") + return loss_box_reg diff --git a/data_processing/detectron2/detectron2/modeling/matcher.py b/data_processing/detectron2/detectron2/modeling/matcher.py new file mode 100644 index 0000000..c7597ca --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/matcher.py @@ -0,0 +1,127 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import List +import torch + +from detectron2.layers import nonzero_tuple + + +# TODO: the name is too general +class Matcher(object): + """ + This class assigns to each predicted "element" (e.g., a box) a ground-truth + element. Each predicted element will have exactly zero or one matches; each + ground-truth element may be matched to zero or more predicted elements. + + The matching is determined by the MxN match_quality_matrix, that characterizes + how well each (ground-truth, prediction)-pair match each other. For example, + if the elements are boxes, this matrix may contain box intersection-over-union + overlap values. + + The matcher returns (a) a vector of length N containing the index of the + ground-truth element m in [0, M) that matches to prediction n in [0, N). + (b) a vector of length N containing the labels for each prediction. + """ + + def __init__( + self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False + ): + """ + Args: + thresholds (list): a list of thresholds used to stratify predictions + into levels. + labels (list): a list of values to label predictions belonging at + each level. A label can be one of {-1, 0, 1} signifying + {ignore, negative class, positive class}, respectively. + allow_low_quality_matches (bool): if True, produce additional matches + for predictions with maximum match quality lower than high_threshold. + See set_low_quality_matches_ for more details. + + For example, + thresholds = [0.3, 0.5] + labels = [0, -1, 1] + All predictions with iou < 0.3 will be marked with 0 and + thus will be considered as false positives while training. + All predictions with 0.3 <= iou < 0.5 will be marked with -1 and + thus will be ignored. + All predictions with 0.5 <= iou will be marked with 1 and + thus will be considered as true positives. + """ + # Add -inf and +inf to first and last position in thresholds + thresholds = thresholds[:] + assert thresholds[0] > 0 + thresholds.insert(0, -float("inf")) + thresholds.append(float("inf")) + # Currently torchscript does not support all + generator + assert all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])]) + assert all([l in [-1, 0, 1] for l in labels]) + assert len(labels) == len(thresholds) - 1 + self.thresholds = thresholds + self.labels = labels + self.allow_low_quality_matches = allow_low_quality_matches + + def __call__(self, match_quality_matrix): + """ + Args: + match_quality_matrix (Tensor[float]): an MxN tensor, containing the + pairwise quality between M ground-truth elements and N predicted + elements. All elements must be >= 0 (due to the us of `torch.nonzero` + for selecting indices in :meth:`set_low_quality_matches_`). + + Returns: + matches (Tensor[int64]): a vector of length N, where matches[i] is a matched + ground-truth index in [0, M) + match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates + whether a prediction is a true or false positive or ignored + """ + assert match_quality_matrix.dim() == 2 + if match_quality_matrix.numel() == 0: + default_matches = match_quality_matrix.new_full( + (match_quality_matrix.size(1),), 0, dtype=torch.int64 + ) + # When no gt boxes exist, we define IOU = 0 and therefore set labels + # to `self.labels[0]`, which usually defaults to background class 0 + # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds + default_match_labels = match_quality_matrix.new_full( + (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 + ) + return default_matches, default_match_labels + + assert torch.all(match_quality_matrix >= 0) + + # match_quality_matrix is M (gt) x N (predicted) + # Max over gt elements (dim 0) to find best gt candidate for each prediction + matched_vals, matches = match_quality_matrix.max(dim=0) + + match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) + + for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): + low_high = (matched_vals >= low) & (matched_vals < high) + match_labels[low_high] = l + + if self.allow_low_quality_matches: + self.set_low_quality_matches_(match_labels, match_quality_matrix) + + return matches, match_labels + + def set_low_quality_matches_(self, match_labels, match_quality_matrix): + """ + Produce additional matches for predictions that have only low-quality matches. + Specifically, for each ground-truth G find the set of predictions that have + maximum overlap with it (including ties); for each prediction in that set, if + it is unmatched, then match it to the ground-truth G. + + This function implements the RPN assignment case (i) in Sec. 3.1.2 of + :paper:`Faster R-CNN`. + """ + # For each gt, find the prediction with which it has highest quality + highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) + # Find the highest quality match available, even if it is low, including ties. + # Note that the matches qualities must be positive due to the use of + # `torch.nonzero`. + _, pred_inds_with_highest_quality = nonzero_tuple( + match_quality_matrix == highest_quality_foreach_gt[:, None] + ) + # If an anchor was labeled positive only due to a low-quality match + # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B. + # This follows the implementation in Detectron, and is found to have no significant impact. + match_labels[pred_inds_with_highest_quality] = 1 diff --git a/data_processing/detectron2/detectron2/modeling/meta_arch/__init__.py b/data_processing/detectron2/detectron2/modeling/meta_arch/__init__.py new file mode 100644 index 0000000..6b06681 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/meta_arch/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +from .build import META_ARCH_REGISTRY, build_model # isort:skip + +from .panoptic_fpn import PanopticFPN + +# import all the meta_arch, so they will be registered +from .rcnn import GeneralizedRCNN, ProposalNetwork +from .dense_detector import DenseDetector +from .retinanet import RetinaNet +from .fcos import FCOS +from .semantic_seg import SEM_SEG_HEADS_REGISTRY, SemanticSegmentor, build_sem_seg_head + + +__all__ = list(globals().keys()) diff --git a/data_processing/detectron2/detectron2/modeling/meta_arch/build.py b/data_processing/detectron2/detectron2/modeling/meta_arch/build.py new file mode 100644 index 0000000..3427215 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/meta_arch/build.py @@ -0,0 +1,25 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch + +from detectron2.utils.logger import _log_api_usage +from detectron2.utils.registry import Registry + +META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip +META_ARCH_REGISTRY.__doc__ = """ +Registry for meta-architectures, i.e. the whole model. + +The registered object will be called with `obj(cfg)` +and expected to return a `nn.Module` object. +""" + + +def build_model(cfg): + """ + Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. + Note that it does not load any weights from ``cfg``. + """ + meta_arch = cfg.MODEL.META_ARCHITECTURE + model = META_ARCH_REGISTRY.get(meta_arch)(cfg) + model.to(torch.device(cfg.MODEL.DEVICE)) + _log_api_usage("modeling.meta_arch." + meta_arch) + return model diff --git a/data_processing/detectron2/detectron2/modeling/meta_arch/dense_detector.py b/data_processing/detectron2/detectron2/modeling/meta_arch/dense_detector.py new file mode 100644 index 0000000..33066b6 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/meta_arch/dense_detector.py @@ -0,0 +1,294 @@ +import numpy as np +from typing import Dict, List, Optional, Tuple +import torch +from torch import Tensor, nn + +from detectron2.data.detection_utils import convert_image_to_rgb +from detectron2.layers import move_device_like +from detectron2.modeling import Backbone +from detectron2.structures import Boxes, ImageList, Instances +from detectron2.utils.events import get_event_storage + +from ..postprocessing import detector_postprocess + + +def permute_to_N_HWA_K(tensor, K: int): + """ + Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K) + """ + assert tensor.dim() == 4, tensor.shape + N, _, H, W = tensor.shape + tensor = tensor.view(N, -1, K, H, W) + tensor = tensor.permute(0, 3, 4, 1, 2) + tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K) + return tensor + + +class DenseDetector(nn.Module): + """ + Base class for dense detector. We define a dense detector as a fully-convolutional model that + makes per-pixel (i.e. dense) predictions. + """ + + def __init__( + self, + backbone: Backbone, + head: nn.Module, + head_in_features: Optional[List[str]] = None, + *, + pixel_mean, + pixel_std, + ): + """ + Args: + backbone: backbone module + head: head module + head_in_features: backbone features to use in head. Default to all backbone features. + pixel_mean (Tuple[float]): + Values to be used for image normalization (BGR order). + To train on images of different number of channels, set different mean & std. + Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675] + pixel_std (Tuple[float]): + When using pre-trained models in Detectron1 or any MSRA models, + std has been absorbed into its conv1 weights, so the std needs to be set 1. + Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) + """ + super().__init__() + + self.backbone = backbone + self.head = head + if head_in_features is None: + shapes = self.backbone.output_shape() + self.head_in_features = sorted(shapes.keys(), key=lambda x: shapes[x].stride) + else: + self.head_in_features = head_in_features + self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) + + @property + def device(self): + return self.pixel_mean.device + + def _move_to_current_device(self, x): + return move_device_like(x, self.pixel_mean) + + def forward(self, batched_inputs: List[Dict[str, Tensor]]): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper` . + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + + * image: Tensor, image in (C, H, W) format. + * instances: Instances + + Other information that's included in the original dicts, such as: + + * "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + In training, dict[str, Tensor]: mapping from a named loss to a tensor storing the + loss. Used during training only. In inference, the standard output format, described + in :doc:`/tutorials/models`. + """ + images = self.preprocess_image(batched_inputs) + features = self.backbone(images.tensor) + features = [features[f] for f in self.head_in_features] + predictions = self.head(features) + + if self.training: + assert not torch.jit.is_scripting(), "Not supported" + assert "instances" in batched_inputs[0], "Instance annotations are missing in training!" + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + return self.forward_training(images, features, predictions, gt_instances) + else: + results = self.forward_inference(images, features, predictions) + if torch.jit.is_scripting(): + return results + + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + results, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"instances": r}) + return processed_results + + def forward_training(self, images, features, predictions, gt_instances): + raise NotImplementedError() + + def preprocess_image(self, batched_inputs: List[Dict[str, Tensor]]): + """ + Normalize, pad and batch the input images. + """ + images = [self._move_to_current_device(x["image"]) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors( + images, + self.backbone.size_divisibility, + padding_constraints=self.backbone.padding_constraints, + ) + return images + + def _transpose_dense_predictions( + self, predictions: List[List[Tensor]], dims_per_anchor: List[int] + ) -> List[List[Tensor]]: + """ + Transpose the dense per-level predictions. + + Args: + predictions: a list of outputs, each is a list of per-level + predictions with shape (N, Ai x K, Hi, Wi), where N is the + number of images, Ai is the number of anchors per location on + level i, K is the dimension of predictions per anchor. + dims_per_anchor: the value of K for each predictions. e.g. 4 for + box prediction, #classes for classification prediction. + + Returns: + List[List[Tensor]]: each prediction is transposed to (N, Hi x Wi x Ai, K). + """ + assert len(predictions) == len(dims_per_anchor) + res: List[List[Tensor]] = [] + for pred, dim_per_anchor in zip(predictions, dims_per_anchor): + pred = [permute_to_N_HWA_K(x, dim_per_anchor) for x in pred] + res.append(pred) + return res + + def _ema_update(self, name: str, value: float, initial_value: float, momentum: float = 0.9): + """ + Apply EMA update to `self.name` using `value`. + + This is mainly used for loss normalizer. In Detectron1, loss is normalized by number + of foreground samples in the batch. When batch size is 1 per GPU, #foreground has a + large variance and using it lead to lower performance. Therefore we maintain an EMA of + #foreground to stabilize the normalizer. + + Args: + name: name of the normalizer + value: the new value to update + initial_value: the initial value to start with + momentum: momentum of EMA + + Returns: + float: the updated EMA value + """ + if hasattr(self, name): + old = getattr(self, name) + else: + old = initial_value + new = old * momentum + value * (1 - momentum) + setattr(self, name, new) + return new + + def _decode_per_level_predictions( + self, + anchors: Boxes, + pred_scores: Tensor, + pred_deltas: Tensor, + score_thresh: float, + topk_candidates: int, + image_size: Tuple[int, int], + ) -> Instances: + """ + Decode boxes and classification predictions of one featuer level, by + the following steps: + 1. filter the predictions based on score threshold and top K scores. + 2. transform the box regression outputs + 3. return the predicted scores, classes and boxes + + Args: + anchors: Boxes, anchor for this feature level + pred_scores: HxWxA,K + pred_deltas: HxWxA,4 + + Returns: + Instances: with field "scores", "pred_boxes", "pred_classes". + """ + # Apply two filtering to make NMS faster. + # 1. Keep boxes with confidence score higher than threshold + keep_idxs = pred_scores > score_thresh + pred_scores = pred_scores[keep_idxs] + topk_idxs = torch.nonzero(keep_idxs) # Kx2 + + # 2. Keep top k top scoring boxes only + topk_idxs_size = topk_idxs.shape[0] + if isinstance(topk_idxs_size, Tensor): + # It's a tensor in tracing + num_topk = torch.clamp(topk_idxs_size, max=topk_candidates) + else: + num_topk = min(topk_idxs_size, topk_candidates) + pred_scores, idxs = pred_scores.topk(num_topk) + topk_idxs = topk_idxs[idxs] + + anchor_idxs, classes_idxs = topk_idxs.unbind(dim=1) + + pred_boxes = self.box2box_transform.apply_deltas( + pred_deltas[anchor_idxs], anchors.tensor[anchor_idxs] + ) + return Instances( + image_size, pred_boxes=Boxes(pred_boxes), scores=pred_scores, pred_classes=classes_idxs + ) + + def _decode_multi_level_predictions( + self, + anchors: List[Boxes], + pred_scores: List[Tensor], + pred_deltas: List[Tensor], + score_thresh: float, + topk_candidates: int, + image_size: Tuple[int, int], + ) -> Instances: + """ + Run `_decode_per_level_predictions` for all feature levels and concat the results. + """ + predictions = [ + self._decode_per_level_predictions( + anchors_i, + box_cls_i, + box_reg_i, + self.test_score_thresh, + self.test_topk_candidates, + image_size, + ) + # Iterate over every feature level + for box_cls_i, box_reg_i, anchors_i in zip(pred_scores, pred_deltas, anchors) + ] + return predictions[0].cat(predictions) # 'Instances.cat' is not scriptale but this is + + def visualize_training(self, batched_inputs, results): + """ + A function used to visualize ground truth images and final network predictions. + It shows ground truth bounding boxes on the original image and up to 20 + predicted object bounding boxes on the original image. + + Args: + batched_inputs (list): a list that contains input to the model. + results (List[Instances]): a list of #images elements returned by forward_inference(). + """ + from detectron2.utils.visualizer import Visualizer + + assert len(batched_inputs) == len( + results + ), "Cannot visualize inputs and results of different sizes" + storage = get_event_storage() + max_boxes = 20 + + image_index = 0 # only visualize a single image + img = batched_inputs[image_index]["image"] + img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format) + v_gt = Visualizer(img, None) + v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index]["instances"].gt_boxes) + anno_img = v_gt.get_image() + processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1]) + predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy() + + v_pred = Visualizer(img, None) + v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes]) + prop_img = v_pred.get_image() + vis_img = np.vstack((anno_img, prop_img)) + vis_img = vis_img.transpose(2, 0, 1) + vis_name = f"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results" + storage.put_image(vis_name, vis_img) diff --git a/data_processing/detectron2/detectron2/modeling/meta_arch/fcos.py b/data_processing/detectron2/detectron2/modeling/meta_arch/fcos.py new file mode 100644 index 0000000..7e7140b --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/meta_arch/fcos.py @@ -0,0 +1,328 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +from typing import List, Optional, Tuple +import torch +from fvcore.nn import sigmoid_focal_loss_jit +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import ShapeSpec, batched_nms +from detectron2.structures import Boxes, ImageList, Instances, pairwise_point_box_distance +from detectron2.utils.events import get_event_storage + +from ..anchor_generator import DefaultAnchorGenerator +from ..backbone import Backbone +from ..box_regression import Box2BoxTransformLinear, _dense_box_regression_loss +from .dense_detector import DenseDetector +from .retinanet import RetinaNetHead + +__all__ = ["FCOS"] + +logger = logging.getLogger(__name__) + + +class FCOS(DenseDetector): + """ + Implement FCOS in :paper:`fcos`. + """ + + def __init__( + self, + *, + backbone: Backbone, + head: nn.Module, + head_in_features: Optional[List[str]] = None, + box2box_transform=None, + num_classes, + center_sampling_radius: float = 1.5, + focal_loss_alpha=0.25, + focal_loss_gamma=2.0, + test_score_thresh=0.2, + test_topk_candidates=1000, + test_nms_thresh=0.6, + max_detections_per_image=100, + pixel_mean, + pixel_std, + ): + """ + Args: + center_sampling_radius: radius of the "center" of a groundtruth box, + within which all anchor points are labeled positive. + Other arguments mean the same as in :class:`RetinaNet`. + """ + super().__init__( + backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std + ) + + self.num_classes = num_classes + + # FCOS uses one anchor point per location. + # We represent the anchor point by a box whose size equals the anchor stride. + feature_shapes = backbone.output_shape() + fpn_strides = [feature_shapes[k].stride for k in self.head_in_features] + self.anchor_generator = DefaultAnchorGenerator( + sizes=[[k] for k in fpn_strides], aspect_ratios=[1.0], strides=fpn_strides + ) + + # FCOS parameterizes box regression by a linear transform, + # where predictions are normalized by anchor stride (equal to anchor size). + if box2box_transform is None: + box2box_transform = Box2BoxTransformLinear(normalize_by_size=True) + self.box2box_transform = box2box_transform + + self.center_sampling_radius = float(center_sampling_radius) + + # Loss parameters: + self.focal_loss_alpha = focal_loss_alpha + self.focal_loss_gamma = focal_loss_gamma + + # Inference parameters: + self.test_score_thresh = test_score_thresh + self.test_topk_candidates = test_topk_candidates + self.test_nms_thresh = test_nms_thresh + self.max_detections_per_image = max_detections_per_image + + def forward_training(self, images, features, predictions, gt_instances): + # Transpose the Hi*Wi*A dimension to the middle: + pred_logits, pred_anchor_deltas, pred_centerness = self._transpose_dense_predictions( + predictions, [self.num_classes, 4, 1] + ) + anchors = self.anchor_generator(features) + gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) + return self.losses( + anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes, pred_centerness + ) + + @torch.no_grad() + def _match_anchors(self, gt_boxes: Boxes, anchors: List[Boxes]): + """ + Match ground-truth boxes to a set of multi-level anchors. + + Args: + gt_boxes: Ground-truth boxes from instances of an image. + anchors: List of anchors for each feature map (of different scales). + + Returns: + torch.Tensor + A tensor of shape `(M, R)`, given `M` ground-truth boxes and total + `R` anchor points from all feature levels, indicating the quality + of match between m-th box and r-th anchor. Higher value indicates + better match. + """ + # Naming convention: (M = ground-truth boxes, R = anchor points) + # Anchor points are represented as square boxes of size = stride. + num_anchors_per_level = [len(x) for x in anchors] + anchors = Boxes.cat(anchors) # (R, 4) + anchor_centers = anchors.get_centers() # (R, 2) + anchor_sizes = anchors.tensor[:, 2] - anchors.tensor[:, 0] # (R, ) + + lower_bound = anchor_sizes * 4 + lower_bound[: num_anchors_per_level[0]] = 0 + upper_bound = anchor_sizes * 8 + upper_bound[-num_anchors_per_level[-1] :] = float("inf") + + gt_centers = gt_boxes.get_centers() + + # FCOS with center sampling: anchor point must be close enough to + # ground-truth box center. + center_dists = (anchor_centers[None, :, :] - gt_centers[:, None, :]).abs_() + sampling_regions = self.center_sampling_radius * anchor_sizes[None, :] + + match_quality_matrix = center_dists.max(dim=2).values < sampling_regions + + pairwise_dist = pairwise_point_box_distance(anchor_centers, gt_boxes) + pairwise_dist = pairwise_dist.permute(1, 0, 2) # (M, R, 4) + + # The original FCOS anchor matching rule: anchor point must be inside GT. + match_quality_matrix &= pairwise_dist.min(dim=2).values > 0 + + # Multilevel anchor matching in FCOS: each anchor is only responsible + # for certain scale range. + pairwise_dist = pairwise_dist.max(dim=2).values + match_quality_matrix &= (pairwise_dist > lower_bound[None, :]) & ( + pairwise_dist < upper_bound[None, :] + ) + # Match the GT box with minimum area, if there are multiple GT matches. + gt_areas = gt_boxes.area() # (M, ) + + match_quality_matrix = match_quality_matrix.to(torch.float32) + match_quality_matrix *= 1e8 - gt_areas[:, None] + return match_quality_matrix # (M, R) + + @torch.no_grad() + def label_anchors(self, anchors: List[Boxes], gt_instances: List[Instances]): + """ + Same interface as :meth:`RetinaNet.label_anchors`, but implemented with FCOS + anchor matching rule. + + Unlike RetinaNet, there are no ignored anchors. + """ + + gt_labels, matched_gt_boxes = [], [] + + for inst in gt_instances: + if len(inst) > 0: + match_quality_matrix = self._match_anchors(inst.gt_boxes, anchors) + + # Find matched ground-truth box per anchor. Un-matched anchors are + # assigned -1. This is equivalent to using an anchor matcher as used + # in R-CNN/RetinaNet: `Matcher(thresholds=[1e-5], labels=[0, 1])` + match_quality, matched_idxs = match_quality_matrix.max(dim=0) + matched_idxs[match_quality < 1e-5] = -1 + + matched_gt_boxes_i = inst.gt_boxes.tensor[matched_idxs.clip(min=0)] + gt_labels_i = inst.gt_classes[matched_idxs.clip(min=0)] + + # Anchors with matched_idxs = -1 are labeled background. + gt_labels_i[matched_idxs < 0] = self.num_classes + else: + matched_gt_boxes_i = torch.zeros_like(Boxes.cat(anchors).tensor) + gt_labels_i = torch.full( + (len(matched_gt_boxes_i),), + fill_value=self.num_classes, + dtype=torch.long, + device=matched_gt_boxes_i.device, + ) + + gt_labels.append(gt_labels_i) + matched_gt_boxes.append(matched_gt_boxes_i) + + return gt_labels, matched_gt_boxes + + def losses( + self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes, pred_centerness + ): + """ + This method is almost identical to :meth:`RetinaNet.losses`, with an extra + "loss_centerness" in the returned dict. + """ + num_images = len(gt_labels) + gt_labels = torch.stack(gt_labels) # (M, R) + + pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) + num_pos_anchors = pos_mask.sum().item() + get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) + normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 300) + + # classification and regression loss + gt_labels_target = F.one_hot(gt_labels, num_classes=self.num_classes + 1)[ + :, :, :-1 + ] # no loss for the last (background) class + loss_cls = sigmoid_focal_loss_jit( + torch.cat(pred_logits, dim=1), + gt_labels_target.to(pred_logits[0].dtype), + alpha=self.focal_loss_alpha, + gamma=self.focal_loss_gamma, + reduction="sum", + ) + + loss_box_reg = _dense_box_regression_loss( + anchors, + self.box2box_transform, + pred_anchor_deltas, + gt_boxes, + pos_mask, + box_reg_loss_type="giou", + ) + + ctrness_targets = self.compute_ctrness_targets(anchors, gt_boxes) # (M, R) + pred_centerness = torch.cat(pred_centerness, dim=1).squeeze(dim=2) # (M, R) + ctrness_loss = F.binary_cross_entropy_with_logits( + pred_centerness[pos_mask], ctrness_targets[pos_mask], reduction="sum" + ) + return { + "loss_fcos_cls": loss_cls / normalizer, + "loss_fcos_loc": loss_box_reg / normalizer, + "loss_fcos_ctr": ctrness_loss / normalizer, + } + + def compute_ctrness_targets(self, anchors: List[Boxes], gt_boxes: List[torch.Tensor]): + anchors = Boxes.cat(anchors).tensor # Rx4 + reg_targets = [self.box2box_transform.get_deltas(anchors, m) for m in gt_boxes] + reg_targets = torch.stack(reg_targets, dim=0) # NxRx4 + if len(reg_targets) == 0: + return reg_targets.new_zeros(len(reg_targets)) + left_right = reg_targets[:, :, [0, 2]] + top_bottom = reg_targets[:, :, [1, 3]] + ctrness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( + top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0] + ) + return torch.sqrt(ctrness) + + def forward_inference( + self, + images: ImageList, + features: List[torch.Tensor], + predictions: List[List[torch.Tensor]], + ): + pred_logits, pred_anchor_deltas, pred_centerness = self._transpose_dense_predictions( + predictions, [self.num_classes, 4, 1] + ) + anchors = self.anchor_generator(features) + + results: List[Instances] = [] + for img_idx, image_size in enumerate(images.image_sizes): + scores_per_image = [ + # Multiply and sqrt centerness & classification scores + # (See eqn. 4 in https://arxiv.org/abs/2006.09214) + torch.sqrt(x[img_idx].sigmoid_() * y[img_idx].sigmoid_()) + for x, y in zip(pred_logits, pred_centerness) + ] + deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] + results_per_image = self.inference_single_image( + anchors, scores_per_image, deltas_per_image, image_size + ) + results.append(results_per_image) + return results + + def inference_single_image( + self, + anchors: List[Boxes], + box_cls: List[torch.Tensor], + box_delta: List[torch.Tensor], + image_size: Tuple[int, int], + ): + """ + Identical to :meth:`RetinaNet.inference_single_image. + """ + pred = self._decode_multi_level_predictions( + anchors, + box_cls, + box_delta, + self.test_score_thresh, + self.test_topk_candidates, + image_size, + ) + keep = batched_nms( + pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh + ) + return pred[keep[: self.max_detections_per_image]] + + +class FCOSHead(RetinaNetHead): + """ + The head used in :paper:`fcos`. It adds an additional centerness + prediction branch on top of :class:`RetinaNetHead`. + """ + + def __init__(self, *, input_shape: List[ShapeSpec], conv_dims: List[int], **kwargs): + super().__init__(input_shape=input_shape, conv_dims=conv_dims, num_anchors=1, **kwargs) + # Unlike original FCOS, we do not add an additional learnable scale layer + # because it's found to have no benefits after normalizing regression targets by stride. + self._num_features = len(input_shape) + self.ctrness = nn.Conv2d(conv_dims[-1], 1, kernel_size=3, stride=1, padding=1) + torch.nn.init.normal_(self.ctrness.weight, std=0.01) + torch.nn.init.constant_(self.ctrness.bias, 0) + + def forward(self, features): + assert len(features) == self._num_features + logits = [] + bbox_reg = [] + ctrness = [] + for feature in features: + logits.append(self.cls_score(self.cls_subnet(feature))) + bbox_feature = self.bbox_subnet(feature) + bbox_reg.append(self.bbox_pred(bbox_feature)) + ctrness.append(self.ctrness(bbox_feature)) + return logits, bbox_reg, ctrness diff --git a/data_processing/detectron2/detectron2/modeling/meta_arch/panoptic_fpn.py b/data_processing/detectron2/detectron2/modeling/meta_arch/panoptic_fpn.py new file mode 100644 index 0000000..b31e1c8 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/meta_arch/panoptic_fpn.py @@ -0,0 +1,269 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +from typing import Dict, List +import torch +from torch import nn + +from detectron2.config import configurable +from detectron2.structures import ImageList + +from ..postprocessing import detector_postprocess, sem_seg_postprocess +from .build import META_ARCH_REGISTRY +from .rcnn import GeneralizedRCNN +from .semantic_seg import build_sem_seg_head + +__all__ = ["PanopticFPN"] + + +@META_ARCH_REGISTRY.register() +class PanopticFPN(GeneralizedRCNN): + """ + Implement the paper :paper:`PanopticFPN`. + """ + + @configurable + def __init__( + self, + *, + sem_seg_head: nn.Module, + combine_overlap_thresh: float = 0.5, + combine_stuff_area_thresh: float = 4096, + combine_instances_score_thresh: float = 0.5, + **kwargs, + ): + """ + NOTE: this interface is experimental. + + Args: + sem_seg_head: a module for the semantic segmentation head. + combine_overlap_thresh: combine masks into one instances if + they have enough overlap + combine_stuff_area_thresh: ignore stuff areas smaller than this threshold + combine_instances_score_thresh: ignore instances whose score is + smaller than this threshold + + Other arguments are the same as :class:`GeneralizedRCNN`. + """ + super().__init__(**kwargs) + self.sem_seg_head = sem_seg_head + # options when combining instance & semantic outputs + self.combine_overlap_thresh = combine_overlap_thresh + self.combine_stuff_area_thresh = combine_stuff_area_thresh + self.combine_instances_score_thresh = combine_instances_score_thresh + + @classmethod + def from_config(cls, cfg): + ret = super().from_config(cfg) + ret.update( + { + "combine_overlap_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH, + "combine_stuff_area_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT, + "combine_instances_score_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH, # noqa + } + ) + ret["sem_seg_head"] = build_sem_seg_head(cfg, ret["backbone"].output_shape()) + logger = logging.getLogger(__name__) + if not cfg.MODEL.PANOPTIC_FPN.COMBINE.ENABLED: + logger.warning( + "PANOPTIC_FPN.COMBINED.ENABLED is no longer used. " + " model.inference(do_postprocess=) should be used to toggle postprocessing." + ) + if cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT != 1.0: + w = cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT + logger.warning( + "PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT should be replaced by weights on each ROI head." + ) + + def update_weight(x): + if isinstance(x, dict): + return {k: v * w for k, v in x.items()} + else: + return x * w + + roi_heads = ret["roi_heads"] + roi_heads.box_predictor.loss_weight = update_weight(roi_heads.box_predictor.loss_weight) + roi_heads.mask_head.loss_weight = update_weight(roi_heads.mask_head.loss_weight) + return ret + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper`. + Each item in the list contains the inputs for one image. + + For now, each item in the list is a dict that contains: + + * "image": Tensor, image in (C, H, W) format. + * "instances": Instances + * "sem_seg": semantic segmentation ground truth. + * Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + list[dict]: + each dict has the results for one image. The dict contains the following keys: + + * "instances": see :meth:`GeneralizedRCNN.forward` for its format. + * "sem_seg": see :meth:`SemanticSegmentor.forward` for its format. + * "panoptic_seg": See the return value of + :func:`combine_semantic_and_instance_outputs` for its format. + """ + if not self.training: + return self.inference(batched_inputs) + images = self.preprocess_image(batched_inputs) + features = self.backbone(images.tensor) + + assert "sem_seg" in batched_inputs[0] + gt_sem_seg = [x["sem_seg"].to(self.device) for x in batched_inputs] + gt_sem_seg = ImageList.from_tensors( + gt_sem_seg, + self.backbone.size_divisibility, + self.sem_seg_head.ignore_value, + self.backbone.padding_constraints, + ).tensor + sem_seg_results, sem_seg_losses = self.sem_seg_head(features, gt_sem_seg) + + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + detector_results, detector_losses = self.roi_heads( + images, features, proposals, gt_instances + ) + + losses = sem_seg_losses + losses.update(proposal_losses) + losses.update(detector_losses) + return losses + + def inference(self, batched_inputs: List[Dict[str, torch.Tensor]], do_postprocess: bool = True): + """ + Run inference on the given inputs. + + Args: + batched_inputs (list[dict]): same as in :meth:`forward` + do_postprocess (bool): whether to apply post-processing on the outputs. + + Returns: + When do_postprocess=True, see docs in :meth:`forward`. + Otherwise, returns a (list[Instances], list[Tensor]) that contains + the raw detector outputs, and raw semantic segmentation outputs. + """ + images = self.preprocess_image(batched_inputs) + features = self.backbone(images.tensor) + sem_seg_results, sem_seg_losses = self.sem_seg_head(features, None) + proposals, _ = self.proposal_generator(images, features, None) + detector_results, _ = self.roi_heads(images, features, proposals, None) + + if do_postprocess: + processed_results = [] + for sem_seg_result, detector_result, input_per_image, image_size in zip( + sem_seg_results, detector_results, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width) + detector_r = detector_postprocess(detector_result, height, width) + + processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r}) + + panoptic_r = combine_semantic_and_instance_outputs( + detector_r, + sem_seg_r.argmax(dim=0), + self.combine_overlap_thresh, + self.combine_stuff_area_thresh, + self.combine_instances_score_thresh, + ) + processed_results[-1]["panoptic_seg"] = panoptic_r + return processed_results + else: + return detector_results, sem_seg_results + + +def combine_semantic_and_instance_outputs( + instance_results, + semantic_results, + overlap_threshold, + stuff_area_thresh, + instances_score_thresh, +): + """ + Implement a simple combining logic following + "combine_semantic_and_instance_predictions.py" in panopticapi + to produce panoptic segmentation outputs. + + Args: + instance_results: output of :func:`detector_postprocess`. + semantic_results: an (H, W) tensor, each element is the contiguous semantic + category id + + Returns: + panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. + segments_info (list[dict]): Describe each segment in `panoptic_seg`. + Each dict contains keys "id", "category_id", "isthing". + """ + panoptic_seg = torch.zeros_like(semantic_results, dtype=torch.int32) + + # sort instance outputs by scores + sorted_inds = torch.argsort(-instance_results.scores) + + current_segment_id = 0 + segments_info = [] + + instance_masks = instance_results.pred_masks.to(dtype=torch.bool, device=panoptic_seg.device) + + # Add instances one-by-one, check for overlaps with existing ones + for inst_id in sorted_inds: + score = instance_results.scores[inst_id].item() + if score < instances_score_thresh: + break + mask = instance_masks[inst_id] # H,W + mask_area = mask.sum().item() + + if mask_area == 0: + continue + + intersect = (mask > 0) & (panoptic_seg > 0) + intersect_area = intersect.sum().item() + + if intersect_area * 1.0 / mask_area > overlap_threshold: + continue + + if intersect_area > 0: + mask = mask & (panoptic_seg == 0) + + current_segment_id += 1 + panoptic_seg[mask] = current_segment_id + segments_info.append( + { + "id": current_segment_id, + "isthing": True, + "score": score, + "category_id": instance_results.pred_classes[inst_id].item(), + "instance_id": inst_id.item(), + } + ) + + # Add semantic results to remaining empty areas + semantic_labels = torch.unique(semantic_results).cpu().tolist() + for semantic_label in semantic_labels: + if semantic_label == 0: # 0 is a special "thing" class + continue + mask = (semantic_results == semantic_label) & (panoptic_seg == 0) + mask_area = mask.sum().item() + if mask_area < stuff_area_thresh: + continue + + current_segment_id += 1 + panoptic_seg[mask] = current_segment_id + segments_info.append( + { + "id": current_segment_id, + "isthing": False, + "category_id": semantic_label, + "area": mask_area, + } + ) + + return panoptic_seg, segments_info diff --git a/data_processing/detectron2/detectron2/modeling/meta_arch/rcnn.py b/data_processing/detectron2/detectron2/modeling/meta_arch/rcnn.py new file mode 100644 index 0000000..edcbda5 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/meta_arch/rcnn.py @@ -0,0 +1,341 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +from typing import Dict, List, Optional, Tuple +import torch +from torch import nn + +from detectron2.config import configurable +from detectron2.data.detection_utils import convert_image_to_rgb +from detectron2.layers import move_device_like +from detectron2.structures import ImageList, Instances +from detectron2.utils.events import get_event_storage +from detectron2.utils.logger import log_first_n + +from ..backbone import Backbone, build_backbone +from ..postprocessing import detector_postprocess +from ..proposal_generator import build_proposal_generator +from ..roi_heads import build_roi_heads +from .build import META_ARCH_REGISTRY + +__all__ = ["GeneralizedRCNN", "ProposalNetwork"] + + +@META_ARCH_REGISTRY.register() +class GeneralizedRCNN(nn.Module): + """ + Generalized R-CNN. Any models that contains the following three components: + 1. Per-image feature extraction (aka backbone) + 2. Region proposal generation + 3. Per-region feature extraction and prediction + """ + + @configurable + def __init__( + self, + *, + backbone: Backbone, + proposal_generator: nn.Module, + roi_heads: nn.Module, + pixel_mean: Tuple[float], + pixel_std: Tuple[float], + input_format: Optional[str] = None, + vis_period: int = 0, + ): + """ + Args: + backbone: a backbone module, must follow detectron2's backbone interface + proposal_generator: a module that generates proposals using backbone features + roi_heads: a ROI head that performs per-region computation + pixel_mean, pixel_std: list or tuple with #channels element, representing + the per-channel mean and std to be used to normalize the input image + input_format: describe the meaning of channels of input. Needed by visualization + vis_period: the period to run visualization. Set to 0 to disable. + """ + super().__init__() + self.backbone = backbone + self.proposal_generator = proposal_generator + self.roi_heads = roi_heads + + self.input_format = input_format + self.vis_period = vis_period + if vis_period > 0: + assert input_format is not None, "input_format is required for visualization!" + + self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) + assert ( + self.pixel_mean.shape == self.pixel_std.shape + ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!" + + @classmethod + def from_config(cls, cfg): + backbone = build_backbone(cfg) + return { + "backbone": backbone, + "proposal_generator": build_proposal_generator(cfg, backbone.output_shape()), + "roi_heads": build_roi_heads(cfg, backbone.output_shape()), + "input_format": cfg.INPUT.FORMAT, + "vis_period": cfg.VIS_PERIOD, + "pixel_mean": cfg.MODEL.PIXEL_MEAN, + "pixel_std": cfg.MODEL.PIXEL_STD, + } + + @property + def device(self): + return self.pixel_mean.device + + def _move_to_current_device(self, x): + return move_device_like(x, self.pixel_mean) + + def visualize_training(self, batched_inputs, proposals): + """ + A function used to visualize images and proposals. It shows ground truth + bounding boxes on the original image and up to 20 top-scoring predicted + object proposals on the original image. Users can implement different + visualization functions for different models. + + Args: + batched_inputs (list): a list that contains input to the model. + proposals (list): a list that contains predicted proposals. Both + batched_inputs and proposals should have the same length. + """ + from detectron2.utils.visualizer import Visualizer + + storage = get_event_storage() + max_vis_prop = 20 + + for input, prop in zip(batched_inputs, proposals): + img = input["image"] + img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format) + v_gt = Visualizer(img, None) + v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes) + anno_img = v_gt.get_image() + box_size = min(len(prop.proposal_boxes), max_vis_prop) + v_pred = Visualizer(img, None) + v_pred = v_pred.overlay_instances( + boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy() + ) + prop_img = v_pred.get_image() + vis_img = np.concatenate((anno_img, prop_img), axis=1) + vis_img = vis_img.transpose(2, 0, 1) + vis_name = "Left: GT bounding boxes; Right: Predicted proposals" + storage.put_image(vis_name, vis_img) + break # only visualize one image in a batch + + def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper` . + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + + * image: Tensor, image in (C, H, W) format. + * instances (optional): groundtruth :class:`Instances` + * proposals (optional): :class:`Instances`, precomputed proposals. + + Other information that's included in the original dicts, such as: + + * "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + list[dict]: + Each dict is the output for one input image. + The dict contains one key "instances" whose value is a :class:`Instances`. + The :class:`Instances` object has the following keys: + "pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints" + """ + if not self.training: + return self.inference(batched_inputs) + + images = self.preprocess_image(batched_inputs) + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + + features = self.backbone(images.tensor) + + if self.proposal_generator is not None: + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + else: + assert "proposals" in batched_inputs[0] + proposals = [x["proposals"].to(self.device) for x in batched_inputs] + proposal_losses = {} + + _, detector_losses = self.roi_heads(images, features, proposals, gt_instances) + if self.vis_period > 0: + storage = get_event_storage() + if storage.iter % self.vis_period == 0: + self.visualize_training(batched_inputs, proposals) + + losses = {} + losses.update(detector_losses) + losses.update(proposal_losses) + return losses + + def inference( + self, + batched_inputs: List[Dict[str, torch.Tensor]], + detected_instances: Optional[List[Instances]] = None, + do_postprocess: bool = True, + ): + """ + Run inference on the given inputs. + + Args: + batched_inputs (list[dict]): same as in :meth:`forward` + detected_instances (None or list[Instances]): if not None, it + contains an `Instances` object per image. The `Instances` + object contains "pred_boxes" and "pred_classes" which are + known boxes in the image. + The inference will then skip the detection of bounding boxes, + and only predict other per-ROI outputs. + do_postprocess (bool): whether to apply post-processing on the outputs. + + Returns: + When do_postprocess=True, same as in :meth:`forward`. + Otherwise, a list[Instances] containing raw network outputs. + """ + assert not self.training + + images = self.preprocess_image(batched_inputs) + features = self.backbone(images.tensor) + + if detected_instances is None: + if self.proposal_generator is not None: + proposals, _ = self.proposal_generator(images, features, None) + else: + assert "proposals" in batched_inputs[0] + proposals = [x["proposals"].to(self.device) for x in batched_inputs] + + results, _ = self.roi_heads(images, features, proposals, None) + else: + detected_instances = [x.to(self.device) for x in detected_instances] + results = self.roi_heads.forward_with_given_boxes(features, detected_instances) + + if do_postprocess: + assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess." + return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes) + return results + + def preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]): + """ + Normalize, pad and batch the input images. + """ + images = [self._move_to_current_device(x["image"]) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors( + images, + self.backbone.size_divisibility, + padding_constraints=self.backbone.padding_constraints, + ) + return images + + @staticmethod + def _postprocess(instances, batched_inputs: List[Dict[str, torch.Tensor]], image_sizes): + """ + Rescale the output instances to the target size. + """ + # note: private function; subject to changes + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + instances, batched_inputs, image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"instances": r}) + return processed_results + + +@META_ARCH_REGISTRY.register() +class ProposalNetwork(nn.Module): + """ + A meta architecture that only predicts object proposals. + """ + + @configurable + def __init__( + self, + *, + backbone: Backbone, + proposal_generator: nn.Module, + pixel_mean: Tuple[float], + pixel_std: Tuple[float], + ): + """ + Args: + backbone: a backbone module, must follow detectron2's backbone interface + proposal_generator: a module that generates proposals using backbone features + pixel_mean, pixel_std: list or tuple with #channels element, representing + the per-channel mean and std to be used to normalize the input image + """ + super().__init__() + self.backbone = backbone + self.proposal_generator = proposal_generator + self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) + + @classmethod + def from_config(cls, cfg): + backbone = build_backbone(cfg) + return { + "backbone": backbone, + "proposal_generator": build_proposal_generator(cfg, backbone.output_shape()), + "pixel_mean": cfg.MODEL.PIXEL_MEAN, + "pixel_std": cfg.MODEL.PIXEL_STD, + } + + @property + def device(self): + return self.pixel_mean.device + + def _move_to_current_device(self, x): + return move_device_like(x, self.pixel_mean) + + def forward(self, batched_inputs): + """ + Args: + Same as in :class:`GeneralizedRCNN.forward` + + Returns: + list[dict]: + Each dict is the output for one input image. + The dict contains one key "proposals" whose value is a + :class:`Instances` with keys "proposal_boxes" and "objectness_logits". + """ + images = [self._move_to_current_device(x["image"]) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors( + images, + self.backbone.size_divisibility, + padding_constraints=self.backbone.padding_constraints, + ) + features = self.backbone(images.tensor) + + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + elif "targets" in batched_inputs[0]: + log_first_n( + logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 + ) + gt_instances = [x["targets"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + # In training, the proposals are not useful at all but we generate them anyway. + # This makes RPN-only models about 5% slower. + if self.training: + return proposal_losses + + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + proposals, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"proposals": r}) + return processed_results diff --git a/data_processing/detectron2/detectron2/modeling/meta_arch/retinanet.py b/data_processing/detectron2/detectron2/modeling/meta_arch/retinanet.py new file mode 100644 index 0000000..bd72a8e --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/meta_arch/retinanet.py @@ -0,0 +1,439 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import math +from typing import List, Tuple +import torch +from fvcore.nn import sigmoid_focal_loss_jit +from torch import Tensor, nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.layers import CycleBatchNormList, ShapeSpec, batched_nms, cat, get_norm +from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou +from detectron2.utils.events import get_event_storage + +from ..anchor_generator import build_anchor_generator +from ..backbone import Backbone, build_backbone +from ..box_regression import Box2BoxTransform, _dense_box_regression_loss +from ..matcher import Matcher +from .build import META_ARCH_REGISTRY +from .dense_detector import DenseDetector, permute_to_N_HWA_K # noqa + +__all__ = ["RetinaNet"] + + +logger = logging.getLogger(__name__) + + +@META_ARCH_REGISTRY.register() +class RetinaNet(DenseDetector): + """ + Implement RetinaNet in :paper:`RetinaNet`. + """ + + @configurable + def __init__( + self, + *, + backbone: Backbone, + head: nn.Module, + head_in_features, + anchor_generator, + box2box_transform, + anchor_matcher, + num_classes, + focal_loss_alpha=0.25, + focal_loss_gamma=2.0, + smooth_l1_beta=0.0, + box_reg_loss_type="smooth_l1", + test_score_thresh=0.05, + test_topk_candidates=1000, + test_nms_thresh=0.5, + max_detections_per_image=100, + pixel_mean, + pixel_std, + vis_period=0, + input_format="BGR", + ): + """ + NOTE: this interface is experimental. + + Args: + backbone: a backbone module, must follow detectron2's backbone interface + head (nn.Module): a module that predicts logits and regression deltas + for each level from a list of per-level features + head_in_features (Tuple[str]): Names of the input feature maps to be used in head + anchor_generator (nn.Module): a module that creates anchors from a + list of features. Usually an instance of :class:`AnchorGenerator` + box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to + instance boxes + anchor_matcher (Matcher): label the anchors by matching them with ground truth. + num_classes (int): number of classes. Used to label background proposals. + + # Loss parameters: + focal_loss_alpha (float): focal_loss_alpha + focal_loss_gamma (float): focal_loss_gamma + smooth_l1_beta (float): smooth_l1_beta + box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" + + # Inference parameters: + test_score_thresh (float): Inference cls score threshold, only anchors with + score > INFERENCE_TH are considered for inference (to improve speed) + test_topk_candidates (int): Select topk candidates before NMS + test_nms_thresh (float): Overlap threshold used for non-maximum suppression + (suppress boxes with IoU >= this threshold) + max_detections_per_image (int): + Maximum number of detections to return per image during inference + (100 is based on the limit established for the COCO dataset). + + pixel_mean, pixel_std: see :class:`DenseDetector`. + """ + super().__init__( + backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std + ) + self.num_classes = num_classes + + # Anchors + self.anchor_generator = anchor_generator + self.box2box_transform = box2box_transform + self.anchor_matcher = anchor_matcher + + # Loss parameters: + self.focal_loss_alpha = focal_loss_alpha + self.focal_loss_gamma = focal_loss_gamma + self.smooth_l1_beta = smooth_l1_beta + self.box_reg_loss_type = box_reg_loss_type + # Inference parameters: + self.test_score_thresh = test_score_thresh + self.test_topk_candidates = test_topk_candidates + self.test_nms_thresh = test_nms_thresh + self.max_detections_per_image = max_detections_per_image + # Vis parameters + self.vis_period = vis_period + self.input_format = input_format + + @classmethod + def from_config(cls, cfg): + backbone = build_backbone(cfg) + backbone_shape = backbone.output_shape() + feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] + head = RetinaNetHead(cfg, feature_shapes) + anchor_generator = build_anchor_generator(cfg, feature_shapes) + return { + "backbone": backbone, + "head": head, + "anchor_generator": anchor_generator, + "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), + "anchor_matcher": Matcher( + cfg.MODEL.RETINANET.IOU_THRESHOLDS, + cfg.MODEL.RETINANET.IOU_LABELS, + allow_low_quality_matches=True, + ), + "pixel_mean": cfg.MODEL.PIXEL_MEAN, + "pixel_std": cfg.MODEL.PIXEL_STD, + "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, + "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, + # Loss parameters: + "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, + "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, + "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, + "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, + # Inference parameters: + "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, + "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, + "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, + "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, + # Vis parameters + "vis_period": cfg.VIS_PERIOD, + "input_format": cfg.INPUT.FORMAT, + } + + def forward_training(self, images, features, predictions, gt_instances): + # Transpose the Hi*Wi*A dimension to the middle: + pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( + predictions, [self.num_classes, 4] + ) + anchors = self.anchor_generator(features) + gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) + return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) + + def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): + """ + Args: + anchors (list[Boxes]): a list of #feature level Boxes + gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. + Their shapes are (N, R) and (N, R, 4), respectively, where R is + the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) + pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the + list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). + Where K is the number of classes used in `pred_logits`. + + Returns: + dict[str, Tensor]: + mapping from a named loss to a scalar tensor storing the loss. + Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" + """ + num_images = len(gt_labels) + gt_labels = torch.stack(gt_labels) # (N, R) + + valid_mask = gt_labels >= 0 + pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) + num_pos_anchors = pos_mask.sum().item() + get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) + normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 100) + + # classification and regression loss + gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[ + :, :-1 + ] # no loss for the last (background) class + loss_cls = sigmoid_focal_loss_jit( + cat(pred_logits, dim=1)[valid_mask], + gt_labels_target.to(pred_logits[0].dtype), + alpha=self.focal_loss_alpha, + gamma=self.focal_loss_gamma, + reduction="sum", + ) + + loss_box_reg = _dense_box_regression_loss( + anchors, + self.box2box_transform, + pred_anchor_deltas, + gt_boxes, + pos_mask, + box_reg_loss_type=self.box_reg_loss_type, + smooth_l1_beta=self.smooth_l1_beta, + ) + + return { + "loss_cls": loss_cls / normalizer, + "loss_box_reg": loss_box_reg / normalizer, + } + + @torch.no_grad() + def label_anchors(self, anchors, gt_instances): + """ + Args: + anchors (list[Boxes]): A list of #feature level Boxes. + The Boxes contains anchors of this image on the specific feature level. + gt_instances (list[Instances]): a list of N `Instances`s. The i-th + `Instances` contains the ground-truth per-instance annotations + for the i-th input image. + + Returns: + list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is + the total number of anchors across all feature maps (sum(Hi * Wi * A)). + Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background. + + list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors + across feature maps. The values are the matched gt boxes for each anchor. + Values are undefined for those anchors not labeled as foreground. + """ + anchors = Boxes.cat(anchors) # Rx4 + + gt_labels = [] + matched_gt_boxes = [] + for gt_per_image in gt_instances: + match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors) + matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix) + del match_quality_matrix + + if len(gt_per_image) > 0: + matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs] + + gt_labels_i = gt_per_image.gt_classes[matched_idxs] + # Anchors with label 0 are treated as background. + gt_labels_i[anchor_labels == 0] = self.num_classes + # Anchors with label -1 are ignored. + gt_labels_i[anchor_labels == -1] = -1 + else: + matched_gt_boxes_i = torch.zeros_like(anchors.tensor) + gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes + + gt_labels.append(gt_labels_i) + matched_gt_boxes.append(matched_gt_boxes_i) + + return gt_labels, matched_gt_boxes + + def forward_inference( + self, images: ImageList, features: List[Tensor], predictions: List[List[Tensor]] + ): + pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( + predictions, [self.num_classes, 4] + ) + anchors = self.anchor_generator(features) + + results: List[Instances] = [] + for img_idx, image_size in enumerate(images.image_sizes): + scores_per_image = [x[img_idx].sigmoid_() for x in pred_logits] + deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] + results_per_image = self.inference_single_image( + anchors, scores_per_image, deltas_per_image, image_size + ) + results.append(results_per_image) + return results + + def inference_single_image( + self, + anchors: List[Boxes], + box_cls: List[Tensor], + box_delta: List[Tensor], + image_size: Tuple[int, int], + ): + """ + Single-image inference. Return bounding-box detection results by thresholding + on scores and applying non-maximum suppression (NMS). + + Arguments: + anchors (list[Boxes]): list of #feature levels. Each entry contains + a Boxes object, which contains all the anchors in that feature level. + box_cls (list[Tensor]): list of #feature levels. Each entry contains + tensor of size (H x W x A, K) + box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. + image_size (tuple(H, W)): a tuple of the image height and width. + + Returns: + Same as `inference`, but for only one image. + """ + pred = self._decode_multi_level_predictions( + anchors, + box_cls, + box_delta, + self.test_score_thresh, + self.test_topk_candidates, + image_size, + ) + keep = batched_nms( # per-class NMS + pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh + ) + return pred[keep[: self.max_detections_per_image]] + + +class RetinaNetHead(nn.Module): + """ + The head used in RetinaNet for object classification and box regression. + It has two subnets for the two tasks, with a common structure but separate parameters. + """ + + @configurable + def __init__( + self, + *, + input_shape: List[ShapeSpec], + num_classes, + num_anchors, + conv_dims: List[int], + norm="", + prior_prob=0.01, + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape (List[ShapeSpec]): input shape + num_classes (int): number of classes. Used to label background proposals. + num_anchors (int): number of generated anchors + conv_dims (List[int]): dimensions for each convolution layer + norm (str or callable): + Normalization for conv layers except for the two output layers. + See :func:`detectron2.layers.get_norm` for supported types. + prior_prob (float): Prior weight for computing bias + """ + super().__init__() + + self._num_features = len(input_shape) + if norm == "BN" or norm == "SyncBN": + logger.info( + f"Using domain-specific {norm} in RetinaNetHead with len={self._num_features}." + ) + bn_class = nn.BatchNorm2d if norm == "BN" else nn.SyncBatchNorm + + def norm(c): + return CycleBatchNormList( + length=self._num_features, bn_class=bn_class, num_features=c + ) + + else: + norm_name = str(type(get_norm(norm, 32))) + if "BN" in norm_name: + logger.warning( + f"Shared BatchNorm (type={norm_name}) may not work well in RetinaNetHead." + ) + + cls_subnet = [] + bbox_subnet = [] + for in_channels, out_channels in zip( + [input_shape[0].channels] + list(conv_dims), conv_dims + ): + cls_subnet.append( + nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + ) + if norm: + cls_subnet.append(get_norm(norm, out_channels)) + cls_subnet.append(nn.ReLU()) + bbox_subnet.append( + nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + ) + if norm: + bbox_subnet.append(get_norm(norm, out_channels)) + bbox_subnet.append(nn.ReLU()) + + self.cls_subnet = nn.Sequential(*cls_subnet) + self.bbox_subnet = nn.Sequential(*bbox_subnet) + self.cls_score = nn.Conv2d( + conv_dims[-1], num_anchors * num_classes, kernel_size=3, stride=1, padding=1 + ) + self.bbox_pred = nn.Conv2d( + conv_dims[-1], num_anchors * 4, kernel_size=3, stride=1, padding=1 + ) + + # Initialization + for modules in [self.cls_subnet, self.bbox_subnet, self.cls_score, self.bbox_pred]: + for layer in modules.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, mean=0, std=0.01) + torch.nn.init.constant_(layer.bias, 0) + + # Use prior in model initialization to improve stability + bias_value = -(math.log((1 - prior_prob) / prior_prob)) + torch.nn.init.constant_(self.cls_score.bias, bias_value) + + @classmethod + def from_config(cls, cfg, input_shape: List[ShapeSpec]): + num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors + assert ( + len(set(num_anchors)) == 1 + ), "Using different number of anchors between levels is not currently supported!" + num_anchors = num_anchors[0] + + return { + "input_shape": input_shape, + "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, + "conv_dims": [input_shape[0].channels] * cfg.MODEL.RETINANET.NUM_CONVS, + "prior_prob": cfg.MODEL.RETINANET.PRIOR_PROB, + "norm": cfg.MODEL.RETINANET.NORM, + "num_anchors": num_anchors, + } + + def forward(self, features: List[Tensor]): + """ + Arguments: + features (list[Tensor]): FPN feature map tensors in high to low resolution. + Each tensor in the list correspond to different feature levels. + + Returns: + logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi). + The tensor predicts the classification probability + at each spatial position for each of the A anchors and K object + classes. + bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi). + The tensor predicts 4-vector (dx,dy,dw,dh) box + regression values for every anchor. These values are the + relative offset between the anchor and the ground truth box. + """ + assert len(features) == self._num_features + logits = [] + bbox_reg = [] + for feature in features: + logits.append(self.cls_score(self.cls_subnet(feature))) + bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature))) + return logits, bbox_reg diff --git a/data_processing/detectron2/detectron2/modeling/meta_arch/semantic_seg.py b/data_processing/detectron2/detectron2/modeling/meta_arch/semantic_seg.py new file mode 100644 index 0000000..fefbecf --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/meta_arch/semantic_seg.py @@ -0,0 +1,267 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Callable, Dict, Optional, Tuple, Union +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.layers import Conv2d, ShapeSpec, get_norm +from detectron2.structures import ImageList +from detectron2.utils.registry import Registry + +from ..backbone import Backbone, build_backbone +from ..postprocessing import sem_seg_postprocess +from .build import META_ARCH_REGISTRY + +__all__ = [ + "SemanticSegmentor", + "SEM_SEG_HEADS_REGISTRY", + "SemSegFPNHead", + "build_sem_seg_head", +] + + +SEM_SEG_HEADS_REGISTRY = Registry("SEM_SEG_HEADS") +SEM_SEG_HEADS_REGISTRY.__doc__ = """ +Registry for semantic segmentation heads, which make semantic segmentation predictions +from feature maps. +""" + + +@META_ARCH_REGISTRY.register() +class SemanticSegmentor(nn.Module): + """ + Main class for semantic segmentation architectures. + """ + + @configurable + def __init__( + self, + *, + backbone: Backbone, + sem_seg_head: nn.Module, + pixel_mean: Tuple[float], + pixel_std: Tuple[float], + ): + """ + Args: + backbone: a backbone module, must follow detectron2's backbone interface + sem_seg_head: a module that predicts semantic segmentation from backbone features + pixel_mean, pixel_std: list or tuple with #channels element, representing + the per-channel mean and std to be used to normalize the input image + """ + super().__init__() + self.backbone = backbone + self.sem_seg_head = sem_seg_head + self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) + + @classmethod + def from_config(cls, cfg): + backbone = build_backbone(cfg) + sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) + return { + "backbone": backbone, + "sem_seg_head": sem_seg_head, + "pixel_mean": cfg.MODEL.PIXEL_MEAN, + "pixel_std": cfg.MODEL.PIXEL_STD, + } + + @property + def device(self): + return self.pixel_mean.device + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper`. + Each item in the list contains the inputs for one image. + + For now, each item in the list is a dict that contains: + + * "image": Tensor, image in (C, H, W) format. + * "sem_seg": semantic segmentation ground truth + * Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model (may be different + from input resolution), used in inference. + + + Returns: + list[dict]: + Each dict is the output for one input image. + The dict contains one key "sem_seg" whose value is a + Tensor that represents the + per-pixel segmentation prediced by the head. + The prediction has shape KxHxW that represents the logits of + each class for each pixel. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors( + images, + self.backbone.size_divisibility, + padding_constraints=self.backbone.padding_constraints, + ) + + features = self.backbone(images.tensor) + + if "sem_seg" in batched_inputs[0]: + targets = [x["sem_seg"].to(self.device) for x in batched_inputs] + targets = ImageList.from_tensors( + targets, + self.backbone.size_divisibility, + self.sem_seg_head.ignore_value, + self.backbone.padding_constraints, + ).tensor + else: + targets = None + results, losses = self.sem_seg_head(features, targets) + + if self.training: + return losses + + processed_results = [] + for result, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = sem_seg_postprocess(result, image_size, height, width) + processed_results.append({"sem_seg": r}) + return processed_results + + +def build_sem_seg_head(cfg, input_shape): + """ + Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`. + """ + name = cfg.MODEL.SEM_SEG_HEAD.NAME + return SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) + + +@SEM_SEG_HEADS_REGISTRY.register() +class SemSegFPNHead(nn.Module): + """ + A semantic segmentation head described in :paper:`PanopticFPN`. + It takes a list of FPN features as input, and applies a sequence of + 3x3 convs and upsampling to scale all of them to the stride defined by + ``common_stride``. Then these features are added and used to make final + predictions by another 1x1 conv layer. + """ + + @configurable + def __init__( + self, + input_shape: Dict[str, ShapeSpec], + *, + num_classes: int, + conv_dims: int, + common_stride: int, + loss_weight: float = 1.0, + norm: Optional[Union[str, Callable]] = None, + ignore_value: int = -1, + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape: shapes (channels and stride) of the input features + num_classes: number of classes to predict + conv_dims: number of output channels for the intermediate conv layers. + common_stride: the common stride that all features will be upscaled to + loss_weight: loss weight + norm (str or callable): normalization for all conv layers + ignore_value: category id to be ignored during training. + """ + super().__init__() + input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) + if not len(input_shape): + raise ValueError("SemSegFPNHead(input_shape=) cannot be empty!") + self.in_features = [k for k, v in input_shape] + feature_strides = [v.stride for k, v in input_shape] + feature_channels = [v.channels for k, v in input_shape] + + self.ignore_value = ignore_value + self.common_stride = common_stride + self.loss_weight = loss_weight + + self.scale_heads = [] + for in_feature, stride, channels in zip( + self.in_features, feature_strides, feature_channels + ): + head_ops = [] + head_length = max(1, int(np.log2(stride) - np.log2(self.common_stride))) + for k in range(head_length): + norm_module = get_norm(norm, conv_dims) + conv = Conv2d( + channels if k == 0 else conv_dims, + conv_dims, + kernel_size=3, + stride=1, + padding=1, + bias=not norm, + norm=norm_module, + activation=F.relu, + ) + weight_init.c2_msra_fill(conv) + head_ops.append(conv) + if stride != self.common_stride: + head_ops.append( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) + ) + self.scale_heads.append(nn.Sequential(*head_ops)) + self.add_module(in_feature, self.scale_heads[-1]) + self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) + weight_init.c2_msra_fill(self.predictor) + + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + return { + "input_shape": { + k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES + }, + "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, + "conv_dims": cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM, + "common_stride": cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE, + "norm": cfg.MODEL.SEM_SEG_HEAD.NORM, + "loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT, + } + + def forward(self, features, targets=None): + """ + Returns: + In training, returns (None, dict of losses) + In inference, returns (CxHxW logits, {}) + """ + x = self.layers(features) + if self.training: + return None, self.losses(x, targets) + else: + x = F.interpolate( + x, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + return x, {} + + def layers(self, features): + for i, f in enumerate(self.in_features): + if i == 0: + x = self.scale_heads[i](features[f]) + else: + x = x + self.scale_heads[i](features[f]) + x = self.predictor(x) + return x + + def losses(self, predictions, targets): + predictions = predictions.float() # https://github.com/pytorch/pytorch/issues/48163 + predictions = F.interpolate( + predictions, + scale_factor=self.common_stride, + mode="bilinear", + align_corners=False, + ) + loss = F.cross_entropy( + predictions, targets, reduction="mean", ignore_index=self.ignore_value + ) + losses = {"loss_sem_seg": loss * self.loss_weight} + return losses diff --git a/data_processing/detectron2/detectron2/modeling/mmdet_wrapper.py b/data_processing/detectron2/detectron2/modeling/mmdet_wrapper.py new file mode 100644 index 0000000..293b3e9 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/mmdet_wrapper.py @@ -0,0 +1,273 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import logging +import numpy as np +from collections import OrderedDict +from collections.abc import Mapping +from typing import Dict, List, Optional, Tuple, Union +import torch +from omegaconf import DictConfig, OmegaConf +from torch import Tensor, nn + +from detectron2.layers import ShapeSpec +from detectron2.structures import BitMasks, Boxes, ImageList, Instances +from detectron2.utils.events import get_event_storage + +from .backbone import Backbone + +logger = logging.getLogger(__name__) + + +def _to_container(cfg): + """ + mmdet will assert the type of dict/list. + So convert omegaconf objects to dict/list. + """ + if isinstance(cfg, DictConfig): + cfg = OmegaConf.to_container(cfg, resolve=True) + from mmcv.utils import ConfigDict + + return ConfigDict(cfg) + + +class MMDetBackbone(Backbone): + """ + Wrapper of mmdetection backbones to use in detectron2. + + mmdet backbones produce list/tuple of tensors, while detectron2 backbones + produce a dict of tensors. This class wraps the given backbone to produce + output in detectron2's convention, so it can be used in place of detectron2 + backbones. + """ + + def __init__( + self, + backbone: Union[nn.Module, Mapping], + neck: Union[nn.Module, Mapping, None] = None, + *, + output_shapes: List[ShapeSpec], + output_names: Optional[List[str]] = None, + ): + """ + Args: + backbone: either a backbone module or a mmdet config dict that defines a + backbone. The backbone takes a 4D image tensor and returns a + sequence of tensors. + neck: either a backbone module or a mmdet config dict that defines a + neck. The neck takes outputs of backbone and returns a + sequence of tensors. If None, no neck is used. + output_shapes: shape for every output of the backbone (or neck, if given). + stride and channels are often needed. + output_names: names for every output of the backbone (or neck, if given). + By default, will use "out0", "out1", ... + """ + super().__init__() + if isinstance(backbone, Mapping): + from mmdet.models import build_backbone + + backbone = build_backbone(_to_container(backbone)) + self.backbone = backbone + + if isinstance(neck, Mapping): + from mmdet.models import build_neck + + neck = build_neck(_to_container(neck)) + self.neck = neck + + # "Neck" weights, if any, are part of neck itself. This is the interface + # of mmdet so we follow it. Reference: + # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py + logger.info("Initializing mmdet backbone weights...") + self.backbone.init_weights() + # train() in mmdet modules is non-trivial, and has to be explicitly + # called. Reference: + # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py + self.backbone.train() + if self.neck is not None: + logger.info("Initializing mmdet neck weights ...") + if isinstance(self.neck, nn.Sequential): + for m in self.neck: + m.init_weights() + else: + self.neck.init_weights() + self.neck.train() + + self._output_shapes = output_shapes + if not output_names: + output_names = [f"out{i}" for i in range(len(output_shapes))] + self._output_names = output_names + + def forward(self, x) -> Dict[str, Tensor]: + outs = self.backbone(x) + if self.neck is not None: + outs = self.neck(outs) + assert isinstance( + outs, (list, tuple) + ), "mmdet backbone should return a list/tuple of tensors!" + if len(outs) != len(self._output_shapes): + raise ValueError( + "Length of output_shapes does not match outputs from the mmdet backbone: " + f"{len(outs)} != {len(self._output_shapes)}" + ) + return {k: v for k, v in zip(self._output_names, outs)} + + def output_shape(self) -> Dict[str, ShapeSpec]: + return {k: v for k, v in zip(self._output_names, self._output_shapes)} + + +class MMDetDetector(nn.Module): + """ + Wrapper of a mmdetection detector model, for detection and instance segmentation. + Input/output formats of this class follow detectron2's convention, so a + mmdetection model can be trained and evaluated in detectron2. + """ + + def __init__( + self, + detector: Union[nn.Module, Mapping], + *, + # Default is 32 regardless of model: + # https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets + size_divisibility=32, + pixel_mean: Tuple[float], + pixel_std: Tuple[float], + ): + """ + Args: + detector: a mmdet detector, or a mmdet config dict that defines a detector. + size_divisibility: pad input images to multiple of this number + pixel_mean: per-channel mean to normalize input image + pixel_std: per-channel stddev to normalize input image + """ + super().__init__() + if isinstance(detector, Mapping): + from mmdet.models import build_detector + + detector = build_detector(_to_container(detector)) + self.detector = detector + self.detector.init_weights() + self.size_divisibility = size_divisibility + + self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) + assert ( + self.pixel_mean.shape == self.pixel_std.shape + ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!" + + def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]): + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor + metas = [] + rescale = {"height" in x for x in batched_inputs} + if len(rescale) != 1: + raise ValueError("Some inputs have original height/width, but some don't!") + rescale = list(rescale)[0] + output_shapes = [] + for input in batched_inputs: + meta = {} + c, h, w = input["image"].shape + meta["img_shape"] = meta["ori_shape"] = (h, w, c) + if rescale: + scale_factor = np.array( + [w / input["width"], h / input["height"]] * 2, dtype="float32" + ) + ori_shape = (input["height"], input["width"]) + output_shapes.append(ori_shape) + meta["ori_shape"] = ori_shape + (c,) + else: + scale_factor = 1.0 + output_shapes.append((h, w)) + meta["scale_factor"] = scale_factor + meta["flip"] = False + padh, padw = images.shape[-2:] + meta["pad_shape"] = (padh, padw, c) + metas.append(meta) + + if self.training: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + if gt_instances[0].has("gt_masks"): + from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks + + def convert_mask(m, shape): + # mmdet mask format + if isinstance(m, BitMasks): + return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1]) + else: + return mm_PolygonMasks(m.polygons, shape[0], shape[1]) + + gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances] + losses_and_metrics = self.detector.forward_train( + images, + metas, + [x.gt_boxes.tensor for x in gt_instances], + [x.gt_classes for x in gt_instances], + gt_masks=gt_masks, + ) + else: + losses_and_metrics = self.detector.forward_train( + images, + metas, + [x.gt_boxes.tensor for x in gt_instances], + [x.gt_classes for x in gt_instances], + ) + return _parse_losses(losses_and_metrics) + else: + results = self.detector.simple_test(images, metas, rescale=rescale) + results = [ + {"instances": _convert_mmdet_result(r, shape)} + for r, shape in zip(results, output_shapes) + ] + return results + + @property + def device(self): + return self.pixel_mean.device + + +# Reference: show_result() in +# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py +def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances: + if isinstance(result, tuple): + bbox_result, segm_result = result + if isinstance(segm_result, tuple): + segm_result = segm_result[0] + else: + bbox_result, segm_result = result, None + + bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5 + bboxes, scores = bboxes[:, :4], bboxes[:, -1] + labels = [ + torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result) + ] + labels = torch.cat(labels) + inst = Instances(shape) + inst.pred_boxes = Boxes(bboxes) + inst.scores = scores + inst.pred_classes = labels + + if segm_result is not None and len(labels) > 0: + segm_result = list(itertools.chain(*segm_result)) + segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result] + segm_result = torch.stack(segm_result, dim=0) + inst.pred_masks = segm_result + return inst + + +# reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py +def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]: + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError(f"{loss_name} is not a tensor or list of tensors") + + if "loss" not in loss_name: + # put metrics to storage; don't return them + storage = get_event_storage() + value = log_vars.pop(loss_name).cpu().item() + storage.put_scalar(loss_name, value) + return log_vars diff --git a/data_processing/detectron2/detectron2/modeling/poolers.py b/data_processing/detectron2/detectron2/modeling/poolers.py new file mode 100644 index 0000000..3393794 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/poolers.py @@ -0,0 +1,263 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from typing import List, Optional +import torch +from torch import nn +from torchvision.ops import RoIPool + +from detectron2.layers import ROIAlign, ROIAlignRotated, cat, nonzero_tuple, shapes_to_tensor +from detectron2.structures import Boxes +from detectron2.utils.tracing import assert_fx_safe, is_fx_tracing + +""" +To export ROIPooler to torchscript, in this file, variables that should be annotated with +`Union[List[Boxes], List[RotatedBoxes]]` are only annotated with `List[Boxes]`. + +TODO: Correct these annotations when torchscript support `Union`. +https://github.com/pytorch/pytorch/issues/41412 +""" + +__all__ = ["ROIPooler"] + + +def assign_boxes_to_levels( + box_lists: List[Boxes], + min_level: int, + max_level: int, + canonical_box_size: int, + canonical_level: int, +): + """ + Map each box in `box_lists` to a feature map level index and return the assignment + vector. + + Args: + box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, + where N is the number of images in the batch. + min_level (int): Smallest feature map level index. The input is considered index 0, + the output of stage 1 is index 1, and so. + max_level (int): Largest feature map level index. + canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). + canonical_level (int): The feature map level index on which a canonically-sized box + should be placed. + + Returns: + A tensor of length M, where M is the total number of boxes aggregated over all + N batch images. The memory layout corresponds to the concatenation of boxes + from all images. Each element is the feature map index, as an offset from + `self.min_level`, for the corresponding box (so value i means the box is at + `self.min_level + i`). + """ + box_sizes = torch.sqrt(cat([boxes.area() for boxes in box_lists])) + # Eqn.(1) in FPN paper + level_assignments = torch.floor( + canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8) + ) + # clamp level to (min, max), in case the box size is too large or too small + # for the available feature maps + level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level) + return level_assignments.to(torch.int64) - min_level + + +# script the module to avoid hardcoded device type +@torch.jit.script_if_tracing +def _convert_boxes_to_pooler_format(boxes: torch.Tensor, sizes: torch.Tensor) -> torch.Tensor: + sizes = sizes.to(device=boxes.device) + indices = torch.repeat_interleave( + torch.arange(len(sizes), dtype=boxes.dtype, device=boxes.device), sizes + ) + return cat([indices[:, None], boxes], dim=1) + + +def convert_boxes_to_pooler_format(box_lists: List[Boxes]): + """ + Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops + (see description under Returns). + + Args: + box_lists (list[Boxes] | list[RotatedBoxes]): + A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. + + Returns: + When input is list[Boxes]: + A tensor of shape (M, 5), where M is the total number of boxes aggregated over all + N batch images. + The 5 columns are (batch index, x0, y0, x1, y1), where batch index + is the index in [0, N) identifying which batch image the box with corners at + (x0, y0, x1, y1) comes from. + When input is list[RotatedBoxes]: + A tensor of shape (M, 6), where M is the total number of boxes aggregated over all + N batch images. + The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees), + where batch index is the index in [0, N) identifying which batch image the + rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from. + """ + boxes = torch.cat([x.tensor for x in box_lists], dim=0) + # __len__ returns Tensor in tracing. + sizes = shapes_to_tensor([x.__len__() for x in box_lists]) + return _convert_boxes_to_pooler_format(boxes, sizes) + + +@torch.jit.script_if_tracing +def _create_zeros( + batch_target: Optional[torch.Tensor], + channels: int, + height: int, + width: int, + like_tensor: torch.Tensor, +) -> torch.Tensor: + batches = batch_target.shape[0] if batch_target is not None else 0 + sizes = (batches, channels, height, width) + return torch.zeros(sizes, dtype=like_tensor.dtype, device=like_tensor.device) + + +class ROIPooler(nn.Module): + """ + Region of interest feature map pooler that supports pooling from one or more + feature maps. + """ + + def __init__( + self, + output_size, + scales, + sampling_ratio, + pooler_type, + canonical_box_size=224, + canonical_level=4, + ): + """ + Args: + output_size (int, tuple[int] or list[int]): output size of the pooled region, + e.g., 14 x 14. If tuple or list is given, the length must be 2. + scales (list[float]): The scale for each low-level pooling op relative to + the input image. For a feature map with stride s relative to the input + image, scale is defined as 1/s. The stride must be power of 2. + When there are multiple scales, they must form a pyramid, i.e. they must be + a monotically decreasing geometric sequence with a factor of 1/2. + sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. + pooler_type (string): Name of the type of pooling operation that should be applied. + For instance, "ROIPool" or "ROIAlignV2". + canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default + is heuristically defined as 224 pixels in the FPN paper (based on ImageNet + pre-training). + canonical_level (int): The feature map level index from which a canonically-sized box + should be placed. The default is defined as level 4 (stride=16) in the FPN paper, + i.e., a box of size 224x224 will be placed on the feature with stride=16. + The box placement for all boxes will be determined from their sizes w.r.t + canonical_box_size. For example, a box whose area is 4x that of a canonical box + should be used to pool features from feature level ``canonical_level+1``. + + Note that the actual input feature maps given to this module may not have + sufficiently many levels for the input boxes. If the boxes are too large or too + small for the input feature maps, the closest level will be used. + """ + super().__init__() + + if isinstance(output_size, int): + output_size = (output_size, output_size) + assert len(output_size) == 2 + assert isinstance(output_size[0], int) and isinstance(output_size[1], int) + self.output_size = output_size + + if pooler_type == "ROIAlign": + self.level_poolers = nn.ModuleList( + ROIAlign( + output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False + ) + for scale in scales + ) + elif pooler_type == "ROIAlignV2": + self.level_poolers = nn.ModuleList( + ROIAlign( + output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True + ) + for scale in scales + ) + elif pooler_type == "ROIPool": + self.level_poolers = nn.ModuleList( + RoIPool(output_size, spatial_scale=scale) for scale in scales + ) + elif pooler_type == "ROIAlignRotated": + self.level_poolers = nn.ModuleList( + ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) + for scale in scales + ) + else: + raise ValueError("Unknown pooler type: {}".format(pooler_type)) + + # Map scale (defined as 1 / stride) to its feature map level under the + # assumption that stride is a power of 2. + min_level = -(math.log2(scales[0])) + max_level = -(math.log2(scales[-1])) + assert math.isclose(min_level, int(min_level)) and math.isclose( + max_level, int(max_level) + ), "Featuremap stride is not power of 2!" + self.min_level = int(min_level) + self.max_level = int(max_level) + assert ( + len(scales) == self.max_level - self.min_level + 1 + ), "[ROIPooler] Sizes of input featuremaps do not form a pyramid!" + assert 0 <= self.min_level and self.min_level <= self.max_level + self.canonical_level = canonical_level + assert canonical_box_size > 0 + self.canonical_box_size = canonical_box_size + + def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]): + """ + Args: + x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those + used to construct this module. + box_lists (list[Boxes] | list[RotatedBoxes]): + A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. + The box coordinates are defined on the original image and + will be scaled by the `scales` argument of :class:`ROIPooler`. + + Returns: + Tensor: + A tensor of shape (M, C, output_size, output_size) where M is the total number of + boxes aggregated over all N batch images and C is the number of channels in `x`. + """ + num_level_assignments = len(self.level_poolers) + + if not is_fx_tracing(): + torch._assert( + isinstance(x, list) and isinstance(box_lists, list), + "Arguments to pooler must be lists", + ) + assert_fx_safe( + len(x) == num_level_assignments, + "unequal value, num_level_assignments={}, but x is list of {} Tensors".format( + num_level_assignments, len(x) + ), + ) + assert_fx_safe( + len(box_lists) == x[0].size(0), + "unequal value, x[0] batch dim 0 is {}, but box_list has length {}".format( + x[0].size(0), len(box_lists) + ), + ) + if len(box_lists) == 0: + return _create_zeros(None, x[0].shape[1], *self.output_size, x[0]) + + pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists) + + if num_level_assignments == 1: + return self.level_poolers[0](x[0], pooler_fmt_boxes) + + level_assignments = assign_boxes_to_levels( + box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level + ) + + num_channels = x[0].shape[1] + output_size = self.output_size[0] + + output = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0]) + + for level, pooler in enumerate(self.level_poolers): + inds = nonzero_tuple(level_assignments == level)[0] + pooler_fmt_boxes_level = pooler_fmt_boxes[inds] + # Use index_put_ instead of advance indexing, to avoid pytorch/issues/49852 + output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level)) + + return output diff --git a/data_processing/detectron2/detectron2/modeling/postprocessing.py b/data_processing/detectron2/detectron2/modeling/postprocessing.py new file mode 100644 index 0000000..8451260 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/postprocessing.py @@ -0,0 +1,100 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +from torch.nn import functional as F + +from detectron2.structures import Instances, ROIMasks + + +# perhaps should rename to "resize_instance" +def detector_postprocess( + results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5 +): + """ + Resize the output instances. + The input images are often resized when entering an object detector. + As a result, we often need the outputs of the detector in a different + resolution from its inputs. + + This function will resize the raw outputs of an R-CNN detector + to produce outputs according to the desired output resolution. + + Args: + results (Instances): the raw outputs from the detector. + `results.image_size` contains the input image resolution the detector sees. + This object might be modified in-place. + output_height, output_width: the desired output resolution. + Returns: + Instances: the resized output from the model, based on the output resolution + """ + if isinstance(output_width, torch.Tensor): + # This shape might (but not necessarily) be tensors during tracing. + # Converts integer tensors to float temporaries to ensure true + # division is performed when computing scale_x and scale_y. + output_width_tmp = output_width.float() + output_height_tmp = output_height.float() + new_size = torch.stack([output_height, output_width]) + else: + new_size = (output_height, output_width) + output_width_tmp = output_width + output_height_tmp = output_height + + scale_x, scale_y = ( + output_width_tmp / results.image_size[1], + output_height_tmp / results.image_size[0], + ) + results = Instances(new_size, **results.get_fields()) + + if results.has("pred_boxes"): + output_boxes = results.pred_boxes + elif results.has("proposal_boxes"): + output_boxes = results.proposal_boxes + else: + output_boxes = None + assert output_boxes is not None, "Predictions must contain boxes!" + + output_boxes.scale(scale_x, scale_y) + output_boxes.clip(results.image_size) + + results = results[output_boxes.nonempty()] + + if results.has("pred_masks"): + if isinstance(results.pred_masks, ROIMasks): + roi_masks = results.pred_masks + else: + # pred_masks is a tensor of shape (N, 1, M, M) + roi_masks = ROIMasks(results.pred_masks[:, 0, :, :]) + results.pred_masks = roi_masks.to_bitmasks( + results.pred_boxes, output_height, output_width, mask_threshold + ).tensor # TODO return ROIMasks/BitMask object in the future + + if results.has("pred_keypoints"): + results.pred_keypoints[:, :, 0] *= scale_x + results.pred_keypoints[:, :, 1] *= scale_y + + return results + + +def sem_seg_postprocess(result, img_size, output_height, output_width): + """ + Return semantic segmentation predictions in the original resolution. + + The input images are often resized when entering semantic segmentor. Moreover, in same + cases, they also padded inside segmentor to be divisible by maximum network stride. + As a result, we often need the predictions of the segmentor in a different + resolution from its inputs. + + Args: + result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W), + where C is the number of classes, and H, W are the height and width of the prediction. + img_size (tuple): image size that segmentor is taking as input. + output_height, output_width: the desired output resolution. + + Returns: + semantic segmentation prediction (Tensor): A tensor of the shape + (C, output_height, output_width) that contains per-pixel soft predictions. + """ + result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1) + result = F.interpolate( + result, size=(output_height, output_width), mode="bilinear", align_corners=False + )[0] + return result diff --git a/data_processing/detectron2/detectron2/modeling/proposal_generator/__init__.py b/data_processing/detectron2/detectron2/modeling/proposal_generator/__init__.py new file mode 100644 index 0000000..3f4e4df --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/proposal_generator/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator +from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN, StandardRPNHead + +__all__ = list(globals().keys()) diff --git a/data_processing/detectron2/detectron2/modeling/proposal_generator/build.py b/data_processing/detectron2/detectron2/modeling/proposal_generator/build.py new file mode 100644 index 0000000..34eb12d --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/proposal_generator/build.py @@ -0,0 +1,24 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from detectron2.utils.registry import Registry + +PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR") +PROPOSAL_GENERATOR_REGISTRY.__doc__ = """ +Registry for proposal generator, which produces object proposals from feature maps. + +The registered object will be called with `obj(cfg, input_shape)`. +The call should return a `nn.Module` object. +""" + +from . import rpn, rrpn # noqa F401 isort:skip + + +def build_proposal_generator(cfg, input_shape): + """ + Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. + The name can be "PrecomputedProposals" to use no proposal generator. + """ + name = cfg.MODEL.PROPOSAL_GENERATOR.NAME + if name == "PrecomputedProposals": + return None + + return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape) diff --git a/data_processing/detectron2/detectron2/modeling/proposal_generator/proposal_utils.py b/data_processing/detectron2/detectron2/modeling/proposal_generator/proposal_utils.py new file mode 100644 index 0000000..0fdf5dc --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/proposal_generator/proposal_utils.py @@ -0,0 +1,205 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import math +from typing import List, Tuple, Union +import torch + +from detectron2.layers import batched_nms, cat, move_device_like +from detectron2.structures import Boxes, Instances + +logger = logging.getLogger(__name__) + + +def _is_tracing(): + # (fixed in TORCH_VERSION >= 1.9) + if torch.jit.is_scripting(): + # https://github.com/pytorch/pytorch/issues/47379 + return False + else: + return torch.jit.is_tracing() + + +def find_top_rpn_proposals( + proposals: List[torch.Tensor], + pred_objectness_logits: List[torch.Tensor], + image_sizes: List[Tuple[int, int]], + nms_thresh: float, + pre_nms_topk: int, + post_nms_topk: int, + min_box_size: float, + training: bool, +): + """ + For each feature map, select the `pre_nms_topk` highest scoring proposals, + apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` + highest scoring proposals among all the feature maps for each image. + + Args: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4). + All proposal predictions on the feature maps. + pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). + image_sizes (list[tuple]): sizes (h, w) for each image + nms_thresh (float): IoU threshold to use for NMS + pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. + When RPN is run on multiple feature maps (as in FPN) this number is per + feature map. + post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. + When RPN is run on multiple feature maps (as in FPN) this number is total, + over all feature maps. + min_box_size (float): minimum proposal box side length in pixels (absolute units + wrt input images). + training (bool): True if proposals are to be used in training, otherwise False. + This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." + comment. + + Returns: + list[Instances]: list of N Instances. The i-th Instances + stores post_nms_topk object proposals for image i, sorted by their + objectness score in descending order. + """ + num_images = len(image_sizes) + device = ( + proposals[0].device + if torch.jit.is_scripting() + else ("cpu" if torch.jit.is_tracing() else proposals[0].device) + ) + + # 1. Select top-k anchor for every level and every image + topk_scores = [] # #lvl Tensor, each of shape N x topk + topk_proposals = [] + level_ids = [] # #lvl Tensor, each of shape (topk,) + batch_idx = move_device_like(torch.arange(num_images, device=device), proposals[0]) + for level_id, (proposals_i, logits_i) in enumerate(zip(proposals, pred_objectness_logits)): + Hi_Wi_A = logits_i.shape[1] + if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing + num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk) + else: + num_proposals_i = min(Hi_Wi_A, pre_nms_topk) + + topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) + + # each is N x topk + topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 + + topk_proposals.append(topk_proposals_i) + topk_scores.append(topk_scores_i) + level_ids.append( + move_device_like( + torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device), + proposals[0], + ) + ) + + # 2. Concat all levels together + topk_scores = cat(topk_scores, dim=1) + topk_proposals = cat(topk_proposals, dim=1) + level_ids = cat(level_ids, dim=0) + + # 3. For each image, run a per-level NMS, and choose topk results. + results: List[Instances] = [] + for n, image_size in enumerate(image_sizes): + boxes = Boxes(topk_proposals[n]) + scores_per_img = topk_scores[n] + lvl = level_ids + + valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img) + if not valid_mask.all(): + if training: + raise FloatingPointError( + "Predicted boxes or scores contain Inf/NaN. Training has diverged." + ) + boxes = boxes[valid_mask] + scores_per_img = scores_per_img[valid_mask] + lvl = lvl[valid_mask] + boxes.clip(image_size) + + # filter empty boxes + keep = boxes.nonempty(threshold=min_box_size) + if _is_tracing() or keep.sum().item() != len(boxes): + boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep] + + keep = batched_nms(boxes.tensor, scores_per_img, lvl, nms_thresh) + # In Detectron1, there was different behavior during training vs. testing. + # (https://github.com/facebookresearch/Detectron/issues/459) + # During training, topk is over the proposals from *all* images in the training batch. + # During testing, it is over the proposals for each image separately. + # As a result, the training behavior becomes batch-dependent, + # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. + # This bug is addressed in Detectron2 to make the behavior independent of batch size. + keep = keep[:post_nms_topk] # keep is already sorted + + res = Instances(image_size) + res.proposal_boxes = boxes[keep] + res.objectness_logits = scores_per_img[keep] + results.append(res) + return results + + +def add_ground_truth_to_proposals( + gt: Union[List[Instances], List[Boxes]], proposals: List[Instances] +) -> List[Instances]: + """ + Call `add_ground_truth_to_proposals_single_image` for all images. + + Args: + gt(Union[List[Instances], List[Boxes]): list of N elements. Element i is a Instances + representing the ground-truth for image i. + proposals (list[Instances]): list of N elements. Element i is a Instances + representing the proposals for image i. + + Returns: + list[Instances]: list of N Instances. Each is the proposals for the image, + with field "proposal_boxes" and "objectness_logits". + """ + assert gt is not None + + if len(proposals) != len(gt): + raise ValueError("proposals and gt should have the same length as the number of images!") + if len(proposals) == 0: + return proposals + + return [ + add_ground_truth_to_proposals_single_image(gt_i, proposals_i) + for gt_i, proposals_i in zip(gt, proposals) + ] + + +def add_ground_truth_to_proposals_single_image( + gt: Union[Instances, Boxes], proposals: Instances +) -> Instances: + """ + Augment `proposals` with `gt`. + + Args: + Same as `add_ground_truth_to_proposals`, but with gt and proposals + per image. + + Returns: + Same as `add_ground_truth_to_proposals`, but for only one image. + """ + if isinstance(gt, Boxes): + # convert Boxes to Instances + gt = Instances(proposals.image_size, gt_boxes=gt) + + gt_boxes = gt.gt_boxes + device = proposals.objectness_logits.device + # Assign all ground-truth boxes an objectness logit corresponding to + # P(object) = sigmoid(logit) =~ 1. + gt_logit_value = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10))) + gt_logits = gt_logit_value * torch.ones(len(gt_boxes), device=device) + + # Concatenating gt_boxes with proposals requires them to have the same fields + gt_proposal = Instances(proposals.image_size, **gt.get_fields()) + gt_proposal.proposal_boxes = gt_boxes + gt_proposal.objectness_logits = gt_logits + + for key in proposals.get_fields().keys(): + assert gt_proposal.has( + key + ), "The attribute '{}' in `proposals` does not exist in `gt`".format(key) + + # NOTE: Instances.cat only use fields from the first item. Extra fields in latter items + # will be thrown away. + new_proposals = Instances.cat([proposals, gt_proposal]) + + return new_proposals diff --git a/data_processing/detectron2/detectron2/modeling/proposal_generator/rpn.py b/data_processing/detectron2/detectron2/modeling/proposal_generator/rpn.py new file mode 100644 index 0000000..99cd536 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/proposal_generator/rpn.py @@ -0,0 +1,533 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import Dict, List, Optional, Tuple, Union +import torch +import torch.nn.functional as F +from torch import nn + +from detectron2.config import configurable +from detectron2.layers import Conv2d, ShapeSpec, cat +from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou +from detectron2.utils.events import get_event_storage +from detectron2.utils.memory import retry_if_cuda_oom +from detectron2.utils.registry import Registry + +from ..anchor_generator import build_anchor_generator +from ..box_regression import Box2BoxTransform, _dense_box_regression_loss +from ..matcher import Matcher +from ..sampling import subsample_labels +from .build import PROPOSAL_GENERATOR_REGISTRY +from .proposal_utils import find_top_rpn_proposals + +RPN_HEAD_REGISTRY = Registry("RPN_HEAD") +RPN_HEAD_REGISTRY.__doc__ = """ +Registry for RPN heads, which take feature maps and perform +objectness classification and bounding box regression for anchors. + +The registered object will be called with `obj(cfg, input_shape)`. +The call should return a `nn.Module` object. +""" + + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + L: number of feature maps per image on which RPN is run + A: number of cell anchors (must be the same for all feature maps) + Hi, Wi: height and width of the i-th feature map + B: size of the box parameterization + +Naming convention: + + objectness: refers to the binary classification of an anchor as object vs. not object. + + deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box + transform (see :class:`box_regression.Box2BoxTransform`), or 5d for rotated boxes. + + pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use + sigmoid(pred_objectness_logits) to estimate P(object). + + gt_labels: ground-truth binary classification labels for objectness + + pred_anchor_deltas: predicted box2box transform deltas + + gt_anchor_deltas: ground-truth box2box transform deltas +""" + + +def build_rpn_head(cfg, input_shape): + """ + Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`. + """ + name = cfg.MODEL.RPN.HEAD_NAME + return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape) + + +@RPN_HEAD_REGISTRY.register() +class StandardRPNHead(nn.Module): + """ + Standard RPN classification and regression heads described in :paper:`Faster R-CNN`. + Uses a 3x3 conv to produce a shared hidden state from which one 1x1 conv predicts + objectness logits for each anchor and a second 1x1 conv predicts bounding-box deltas + specifying how to deform each anchor into an object proposal. + """ + + @configurable + def __init__( + self, *, in_channels: int, num_anchors: int, box_dim: int = 4, conv_dims: List[int] = (-1,) + ): + """ + NOTE: this interface is experimental. + + Args: + in_channels (int): number of input feature channels. When using multiple + input features, they must have the same number of channels. + num_anchors (int): number of anchors to predict for *each spatial position* + on the feature map. The total number of anchors for each + feature map will be `num_anchors * H * W`. + box_dim (int): dimension of a box, which is also the number of box regression + predictions to make for each anchor. An axis aligned box has + box_dim=4, while a rotated box has box_dim=5. + conv_dims (list[int]): a list of integers representing the output channels + of N conv layers. Set it to -1 to use the same number of output channels + as input channels. + """ + super().__init__() + cur_channels = in_channels + # Keeping the old variable names and structure for backwards compatiblity. + # Otherwise the old checkpoints will fail to load. + if len(conv_dims) == 1: + out_channels = cur_channels if conv_dims[0] == -1 else conv_dims[0] + # 3x3 conv for the hidden representation + self.conv = self._get_rpn_conv(cur_channels, out_channels) + cur_channels = out_channels + else: + self.conv = nn.Sequential() + for k, conv_dim in enumerate(conv_dims): + out_channels = cur_channels if conv_dim == -1 else conv_dim + if out_channels <= 0: + raise ValueError( + f"Conv output channels should be greater than 0. Got {out_channels}" + ) + conv = self._get_rpn_conv(cur_channels, out_channels) + self.conv.add_module(f"conv{k}", conv) + cur_channels = out_channels + # 1x1 conv for predicting objectness logits + self.objectness_logits = nn.Conv2d(cur_channels, num_anchors, kernel_size=1, stride=1) + # 1x1 conv for predicting box2box transform deltas + self.anchor_deltas = nn.Conv2d(cur_channels, num_anchors * box_dim, kernel_size=1, stride=1) + + # Keeping the order of weights initialization same for backwards compatiblility. + for layer in self.modules(): + if isinstance(layer, nn.Conv2d): + nn.init.normal_(layer.weight, std=0.01) + nn.init.constant_(layer.bias, 0) + + def _get_rpn_conv(self, in_channels, out_channels): + return Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + activation=nn.ReLU(), + ) + + @classmethod + def from_config(cls, cfg, input_shape): + # Standard RPN is shared across levels: + in_channels = [s.channels for s in input_shape] + assert len(set(in_channels)) == 1, "Each level must have the same channel!" + in_channels = in_channels[0] + + # RPNHead should take the same input as anchor generator + # NOTE: it assumes that creating an anchor generator does not have unwanted side effect. + anchor_generator = build_anchor_generator(cfg, input_shape) + num_anchors = anchor_generator.num_anchors + box_dim = anchor_generator.box_dim + assert ( + len(set(num_anchors)) == 1 + ), "Each level must have the same number of anchors per spatial position" + return { + "in_channels": in_channels, + "num_anchors": num_anchors[0], + "box_dim": box_dim, + "conv_dims": cfg.MODEL.RPN.CONV_DIMS, + } + + def forward(self, features: List[torch.Tensor]): + """ + Args: + features (list[Tensor]): list of feature maps + + Returns: + list[Tensor]: A list of L elements. + Element i is a tensor of shape (N, A, Hi, Wi) representing + the predicted objectness logits for all anchors. A is the number of cell anchors. + list[Tensor]: A list of L elements. Element i is a tensor of shape + (N, A*box_dim, Hi, Wi) representing the predicted "deltas" used to transform anchors + to proposals. + """ + pred_objectness_logits = [] + pred_anchor_deltas = [] + for x in features: + t = self.conv(x) + pred_objectness_logits.append(self.objectness_logits(t)) + pred_anchor_deltas.append(self.anchor_deltas(t)) + return pred_objectness_logits, pred_anchor_deltas + + +@PROPOSAL_GENERATOR_REGISTRY.register() +class RPN(nn.Module): + """ + Region Proposal Network, introduced by :paper:`Faster R-CNN`. + """ + + @configurable + def __init__( + self, + *, + in_features: List[str], + head: nn.Module, + anchor_generator: nn.Module, + anchor_matcher: Matcher, + box2box_transform: Box2BoxTransform, + batch_size_per_image: int, + positive_fraction: float, + pre_nms_topk: Tuple[float, float], + post_nms_topk: Tuple[float, float], + nms_thresh: float = 0.7, + min_box_size: float = 0.0, + anchor_boundary_thresh: float = -1.0, + loss_weight: Union[float, Dict[str, float]] = 1.0, + box_reg_loss_type: str = "smooth_l1", + smooth_l1_beta: float = 0.0, + ): + """ + NOTE: this interface is experimental. + + Args: + in_features (list[str]): list of names of input features to use + head (nn.Module): a module that predicts logits and regression deltas + for each level from a list of per-level features + anchor_generator (nn.Module): a module that creates anchors from a + list of features. Usually an instance of :class:`AnchorGenerator` + anchor_matcher (Matcher): label the anchors by matching them with ground truth. + box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to + instance boxes + batch_size_per_image (int): number of anchors per image to sample for training + positive_fraction (float): fraction of foreground anchors to sample for training + pre_nms_topk (tuple[float]): (train, test) that represents the + number of top k proposals to select before NMS, in + training and testing. + post_nms_topk (tuple[float]): (train, test) that represents the + number of top k proposals to select after NMS, in + training and testing. + nms_thresh (float): NMS threshold used to de-duplicate the predicted proposals + min_box_size (float): remove proposal boxes with any side smaller than this threshold, + in the unit of input image pixels + anchor_boundary_thresh (float): legacy option + loss_weight (float|dict): weights to use for losses. Can be single float for weighting + all rpn losses together, or a dict of individual weightings. Valid dict keys are: + "loss_rpn_cls" - applied to classification loss + "loss_rpn_loc" - applied to box regression loss + box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou". + smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to + use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1" + """ + super().__init__() + self.in_features = in_features + self.rpn_head = head + self.anchor_generator = anchor_generator + self.anchor_matcher = anchor_matcher + self.box2box_transform = box2box_transform + self.batch_size_per_image = batch_size_per_image + self.positive_fraction = positive_fraction + # Map from self.training state to train/test settings + self.pre_nms_topk = {True: pre_nms_topk[0], False: pre_nms_topk[1]} + self.post_nms_topk = {True: post_nms_topk[0], False: post_nms_topk[1]} + self.nms_thresh = nms_thresh + self.min_box_size = float(min_box_size) + self.anchor_boundary_thresh = anchor_boundary_thresh + if isinstance(loss_weight, float): + loss_weight = {"loss_rpn_cls": loss_weight, "loss_rpn_loc": loss_weight} + self.loss_weight = loss_weight + self.box_reg_loss_type = box_reg_loss_type + self.smooth_l1_beta = smooth_l1_beta + + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + in_features = cfg.MODEL.RPN.IN_FEATURES + ret = { + "in_features": in_features, + "min_box_size": cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE, + "nms_thresh": cfg.MODEL.RPN.NMS_THRESH, + "batch_size_per_image": cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, + "positive_fraction": cfg.MODEL.RPN.POSITIVE_FRACTION, + "loss_weight": { + "loss_rpn_cls": cfg.MODEL.RPN.LOSS_WEIGHT, + "loss_rpn_loc": cfg.MODEL.RPN.BBOX_REG_LOSS_WEIGHT * cfg.MODEL.RPN.LOSS_WEIGHT, + }, + "anchor_boundary_thresh": cfg.MODEL.RPN.BOUNDARY_THRESH, + "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS), + "box_reg_loss_type": cfg.MODEL.RPN.BBOX_REG_LOSS_TYPE, + "smooth_l1_beta": cfg.MODEL.RPN.SMOOTH_L1_BETA, + } + + ret["pre_nms_topk"] = (cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN, cfg.MODEL.RPN.PRE_NMS_TOPK_TEST) + ret["post_nms_topk"] = (cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN, cfg.MODEL.RPN.POST_NMS_TOPK_TEST) + + ret["anchor_generator"] = build_anchor_generator(cfg, [input_shape[f] for f in in_features]) + ret["anchor_matcher"] = Matcher( + cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True + ) + ret["head"] = build_rpn_head(cfg, [input_shape[f] for f in in_features]) + return ret + + def _subsample_labels(self, label): + """ + Randomly sample a subset of positive and negative examples, and overwrite + the label vector to the ignore value (-1) for all elements that are not + included in the sample. + + Args: + labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned. + """ + pos_idx, neg_idx = subsample_labels( + label, self.batch_size_per_image, self.positive_fraction, 0 + ) + # Fill with the ignore label (-1), then set positive and negative labels + label.fill_(-1) + label.scatter_(0, pos_idx, 1) + label.scatter_(0, neg_idx, 0) + return label + + @torch.jit.unused + @torch.no_grad() + def label_and_sample_anchors( + self, anchors: List[Boxes], gt_instances: List[Instances] + ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: + """ + Args: + anchors (list[Boxes]): anchors for each feature map. + gt_instances: the ground-truth instances for each image. + + Returns: + list[Tensor]: + List of #img tensors. i-th element is a vector of labels whose length is + the total number of anchors across all feature maps R = sum(Hi * Wi * A). + Label values are in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative + class; 1 = positive class. + list[Tensor]: + i-th element is a Rx4 tensor. The values are the matched gt boxes for each + anchor. Values are undefined for those anchors not labeled as 1. + """ + anchors = Boxes.cat(anchors) + + gt_boxes = [x.gt_boxes for x in gt_instances] + image_sizes = [x.image_size for x in gt_instances] + del gt_instances + + gt_labels = [] + matched_gt_boxes = [] + for image_size_i, gt_boxes_i in zip(image_sizes, gt_boxes): + """ + image_size_i: (h, w) for the i-th image + gt_boxes_i: ground-truth boxes for i-th image + """ + + match_quality_matrix = retry_if_cuda_oom(pairwise_iou)(gt_boxes_i, anchors) + matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix) + # Matching is memory-expensive and may result in CPU tensors. But the result is small + gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device) + del match_quality_matrix + + if self.anchor_boundary_thresh >= 0: + # Discard anchors that go out of the boundaries of the image + # NOTE: This is legacy functionality that is turned off by default in Detectron2 + anchors_inside_image = anchors.inside_box(image_size_i, self.anchor_boundary_thresh) + gt_labels_i[~anchors_inside_image] = -1 + + # A vector of labels (-1, 0, 1) for each anchor + gt_labels_i = self._subsample_labels(gt_labels_i) + + if len(gt_boxes_i) == 0: + # These values won't be used anyway since the anchor is labeled as background + matched_gt_boxes_i = torch.zeros_like(anchors.tensor) + else: + # TODO wasted indexing computation for ignored boxes + matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor + + gt_labels.append(gt_labels_i) # N,AHW + matched_gt_boxes.append(matched_gt_boxes_i) + return gt_labels, matched_gt_boxes + + @torch.jit.unused + def losses( + self, + anchors: List[Boxes], + pred_objectness_logits: List[torch.Tensor], + gt_labels: List[torch.Tensor], + pred_anchor_deltas: List[torch.Tensor], + gt_boxes: List[torch.Tensor], + ) -> Dict[str, torch.Tensor]: + """ + Return the losses from a set of RPN predictions and their associated ground-truth. + + Args: + anchors (list[Boxes or RotatedBoxes]): anchors for each feature map, each + has shape (Hi*Wi*A, B), where B is box dimension (4 or 5). + pred_objectness_logits (list[Tensor]): A list of L elements. + Element i is a tensor of shape (N, Hi*Wi*A) representing + the predicted objectness logits for all anchors. + gt_labels (list[Tensor]): Output of :meth:`label_and_sample_anchors`. + pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape + (N, Hi*Wi*A, 4 or 5) representing the predicted "deltas" used to transform anchors + to proposals. + gt_boxes (list[Tensor]): Output of :meth:`label_and_sample_anchors`. + + Returns: + dict[loss name -> loss value]: A dict mapping from loss name to loss value. + Loss names are: `loss_rpn_cls` for objectness classification and + `loss_rpn_loc` for proposal localization. + """ + num_images = len(gt_labels) + gt_labels = torch.stack(gt_labels) # (N, sum(Hi*Wi*Ai)) + + # Log the number of positive/negative anchors per-image that's used in training + pos_mask = gt_labels == 1 + num_pos_anchors = pos_mask.sum().item() + num_neg_anchors = (gt_labels == 0).sum().item() + storage = get_event_storage() + storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / num_images) + storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / num_images) + + localization_loss = _dense_box_regression_loss( + anchors, + self.box2box_transform, + pred_anchor_deltas, + gt_boxes, + pos_mask, + box_reg_loss_type=self.box_reg_loss_type, + smooth_l1_beta=self.smooth_l1_beta, + ) + + valid_mask = gt_labels >= 0 + objectness_loss = F.binary_cross_entropy_with_logits( + cat(pred_objectness_logits, dim=1)[valid_mask], + gt_labels[valid_mask].to(torch.float32), + reduction="sum", + ) + normalizer = self.batch_size_per_image * num_images + losses = { + "loss_rpn_cls": objectness_loss / normalizer, + # The original Faster R-CNN paper uses a slightly different normalizer + # for loc loss. But it doesn't matter in practice + "loss_rpn_loc": localization_loss / normalizer, + } + losses = {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()} + return losses + + def forward( + self, + images: ImageList, + features: Dict[str, torch.Tensor], + gt_instances: Optional[List[Instances]] = None, + ): + """ + Args: + images (ImageList): input images of length `N` + features (dict[str, Tensor]): input data as a mapping from feature + map name to tensor. Axis 0 represents the number of images `N` in + the input data; axes 1-3 are channels, height, and width, which may + vary between feature maps (e.g., if a feature pyramid is used). + gt_instances (list[Instances], optional): a length `N` list of `Instances`s. + Each `Instances` stores ground-truth instances for the corresponding image. + + Returns: + proposals: list[Instances]: contains fields "proposal_boxes", "objectness_logits" + loss: dict[Tensor] or None + """ + features = [features[f] for f in self.in_features] + anchors = self.anchor_generator(features) + + pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) + # Transpose the Hi*Wi*A dimension to the middle: + pred_objectness_logits = [ + # (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A) + score.permute(0, 2, 3, 1).flatten(1) + for score in pred_objectness_logits + ] + pred_anchor_deltas = [ + # (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N, Hi*Wi*A, B) + x.view(x.shape[0], -1, self.anchor_generator.box_dim, x.shape[-2], x.shape[-1]) + .permute(0, 3, 4, 1, 2) + .flatten(1, -2) + for x in pred_anchor_deltas + ] + + if self.training: + assert gt_instances is not None, "RPN requires gt_instances in training!" + gt_labels, gt_boxes = self.label_and_sample_anchors(anchors, gt_instances) + losses = self.losses( + anchors, pred_objectness_logits, gt_labels, pred_anchor_deltas, gt_boxes + ) + else: + losses = {} + proposals = self.predict_proposals( + anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes + ) + return proposals, losses + + def predict_proposals( + self, + anchors: List[Boxes], + pred_objectness_logits: List[torch.Tensor], + pred_anchor_deltas: List[torch.Tensor], + image_sizes: List[Tuple[int, int]], + ): + """ + Decode all the predicted box regression deltas to proposals. Find the top proposals + by applying NMS and removing boxes that are too small. + + Returns: + proposals (list[Instances]): list of N Instances. The i-th Instances + stores post_nms_topk object proposals for image i, sorted by their + objectness score in descending order. + """ + # The proposals are treated as fixed for joint training with roi heads. + # This approach ignores the derivative w.r.t. the proposal boxes’ coordinates that + # are also network responses. + with torch.no_grad(): + pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas) + return find_top_rpn_proposals( + pred_proposals, + pred_objectness_logits, + image_sizes, + self.nms_thresh, + self.pre_nms_topk[self.training], + self.post_nms_topk[self.training], + self.min_box_size, + self.training, + ) + + def _decode_proposals(self, anchors: List[Boxes], pred_anchor_deltas: List[torch.Tensor]): + """ + Transform anchors into proposals by applying the predicted anchor deltas. + + Returns: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape + (N, Hi*Wi*A, B) + """ + N = pred_anchor_deltas[0].shape[0] + proposals = [] + # For each feature map + for anchors_i, pred_anchor_deltas_i in zip(anchors, pred_anchor_deltas): + B = anchors_i.tensor.size(1) + pred_anchor_deltas_i = pred_anchor_deltas_i.reshape(-1, B) + # Expand anchors to shape (N*Hi*Wi*A, B) + anchors_i = anchors_i.tensor.unsqueeze(0).expand(N, -1, -1).reshape(-1, B) + proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) + # Append feature map proposals with shape (N, Hi*Wi*A, B) + proposals.append(proposals_i.view(N, -1, B)) + return proposals diff --git a/data_processing/detectron2/detectron2/modeling/proposal_generator/rrpn.py b/data_processing/detectron2/detectron2/modeling/proposal_generator/rrpn.py new file mode 100644 index 0000000..1a3cd28 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/proposal_generator/rrpn.py @@ -0,0 +1,209 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import logging +from typing import Dict, List +import torch + +from detectron2.config import configurable +from detectron2.layers import ShapeSpec, batched_nms_rotated, cat +from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated +from detectron2.utils.memory import retry_if_cuda_oom + +from ..box_regression import Box2BoxTransformRotated +from .build import PROPOSAL_GENERATOR_REGISTRY +from .proposal_utils import _is_tracing +from .rpn import RPN + +logger = logging.getLogger(__name__) + + +def find_top_rrpn_proposals( + proposals, + pred_objectness_logits, + image_sizes, + nms_thresh, + pre_nms_topk, + post_nms_topk, + min_box_size, + training, +): + """ + For each feature map, select the `pre_nms_topk` highest scoring proposals, + apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` + highest scoring proposals among all the feature maps if `training` is True, + otherwise, returns the highest `post_nms_topk` scoring proposals for each + feature map. + + Args: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5). + All proposal predictions on the feature maps. + pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). + image_sizes (list[tuple]): sizes (h, w) for each image + nms_thresh (float): IoU threshold to use for NMS + pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. + When RRPN is run on multiple feature maps (as in FPN) this number is per + feature map. + post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. + When RRPN is run on multiple feature maps (as in FPN) this number is total, + over all feature maps. + min_box_size(float): minimum proposal box side length in pixels (absolute units wrt + input images). + training (bool): True if proposals are to be used in training, otherwise False. + This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." + comment. + + Returns: + proposals (list[Instances]): list of N Instances. The i-th Instances + stores post_nms_topk object proposals for image i. + """ + num_images = len(image_sizes) + device = proposals[0].device + + # 1. Select top-k anchor for every level and every image + topk_scores = [] # #lvl Tensor, each of shape N x topk + topk_proposals = [] + level_ids = [] # #lvl Tensor, each of shape (topk,) + batch_idx = torch.arange(num_images, device=device) + for level_id, proposals_i, logits_i in zip( + itertools.count(), proposals, pred_objectness_logits + ): + Hi_Wi_A = logits_i.shape[1] + if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing + num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk) + else: + num_proposals_i = min(Hi_Wi_A, pre_nms_topk) + + topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) + + # each is N x topk + topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 5 + + topk_proposals.append(topk_proposals_i) + topk_scores.append(topk_scores_i) + level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) + + # 2. Concat all levels together + topk_scores = cat(topk_scores, dim=1) + topk_proposals = cat(topk_proposals, dim=1) + level_ids = cat(level_ids, dim=0) + + # 3. For each image, run a per-level NMS, and choose topk results. + results = [] + for n, image_size in enumerate(image_sizes): + boxes = RotatedBoxes(topk_proposals[n]) + scores_per_img = topk_scores[n] + lvl = level_ids + + valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img) + if not valid_mask.all(): + if training: + raise FloatingPointError( + "Predicted boxes or scores contain Inf/NaN. Training has diverged." + ) + boxes = boxes[valid_mask] + scores_per_img = scores_per_img[valid_mask] + lvl = lvl[valid_mask] + boxes.clip(image_size) + + # filter empty boxes + keep = boxes.nonempty(threshold=min_box_size) + if _is_tracing() or keep.sum().item() != len(boxes): + boxes, scores_per_img, lvl = (boxes[keep], scores_per_img[keep], lvl[keep]) + + keep = batched_nms_rotated(boxes.tensor, scores_per_img, lvl, nms_thresh) + # In Detectron1, there was different behavior during training vs. testing. + # (https://github.com/facebookresearch/Detectron/issues/459) + # During training, topk is over the proposals from *all* images in the training batch. + # During testing, it is over the proposals for each image separately. + # As a result, the training behavior becomes batch-dependent, + # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. + # This bug is addressed in Detectron2 to make the behavior independent of batch size. + keep = keep[:post_nms_topk] + + res = Instances(image_size) + res.proposal_boxes = boxes[keep] + res.objectness_logits = scores_per_img[keep] + results.append(res) + return results + + +@PROPOSAL_GENERATOR_REGISTRY.register() +class RRPN(RPN): + """ + Rotated Region Proposal Network described in :paper:`RRPN`. + """ + + @configurable + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.anchor_boundary_thresh >= 0: + raise NotImplementedError( + "anchor_boundary_thresh is a legacy option not implemented for RRPN." + ) + + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + ret = super().from_config(cfg, input_shape) + ret["box2box_transform"] = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS) + return ret + + @torch.no_grad() + def label_and_sample_anchors(self, anchors: List[RotatedBoxes], gt_instances: List[Instances]): + """ + Args: + anchors (list[RotatedBoxes]): anchors for each feature map. + gt_instances: the ground-truth instances for each image. + + Returns: + list[Tensor]: + List of #img tensors. i-th element is a vector of labels whose length is + the total number of anchors across feature maps. Label values are in {-1, 0, 1}, + with meanings: -1 = ignore; 0 = negative class; 1 = positive class. + list[Tensor]: + i-th element is a Nx5 tensor, where N is the total number of anchors across + feature maps. The values are the matched gt boxes for each anchor. + Values are undefined for those anchors not labeled as 1. + """ + anchors = RotatedBoxes.cat(anchors) + + gt_boxes = [x.gt_boxes for x in gt_instances] + del gt_instances + + gt_labels = [] + matched_gt_boxes = [] + for gt_boxes_i in gt_boxes: + """ + gt_boxes_i: ground-truth boxes for i-th image + """ + match_quality_matrix = retry_if_cuda_oom(pairwise_iou_rotated)(gt_boxes_i, anchors) + matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix) + # Matching is memory-expensive and may result in CPU tensors. But the result is small + gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device) + + # A vector of labels (-1, 0, 1) for each anchor + gt_labels_i = self._subsample_labels(gt_labels_i) + + if len(gt_boxes_i) == 0: + # These values won't be used anyway since the anchor is labeled as background + matched_gt_boxes_i = torch.zeros_like(anchors.tensor) + else: + # TODO wasted indexing computation for ignored boxes + matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor + + gt_labels.append(gt_labels_i) # N,AHW + matched_gt_boxes.append(matched_gt_boxes_i) + return gt_labels, matched_gt_boxes + + @torch.no_grad() + def predict_proposals(self, anchors, pred_objectness_logits, pred_anchor_deltas, image_sizes): + pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas) + return find_top_rrpn_proposals( + pred_proposals, + pred_objectness_logits, + image_sizes, + self.nms_thresh, + self.pre_nms_topk[self.training], + self.post_nms_topk[self.training], + self.min_box_size, + self.training, + ) diff --git a/data_processing/detectron2/detectron2/modeling/roi_heads/__init__.py b/data_processing/detectron2/detectron2/modeling/roi_heads/__init__.py new file mode 100644 index 0000000..d13e9c5 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/roi_heads/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .box_head import ROI_BOX_HEAD_REGISTRY, build_box_head, FastRCNNConvFCHead +from .keypoint_head import ( + ROI_KEYPOINT_HEAD_REGISTRY, + build_keypoint_head, + BaseKeypointRCNNHead, + KRCNNConvDeconvUpsampleHead, +) +from .mask_head import ( + ROI_MASK_HEAD_REGISTRY, + build_mask_head, + BaseMaskRCNNHead, + MaskRCNNConvUpsampleHead, +) +from .roi_heads import ( + ROI_HEADS_REGISTRY, + ROIHeads, + Res5ROIHeads, + StandardROIHeads, + build_roi_heads, + select_foreground_proposals, +) +from .cascade_rcnn import CascadeROIHeads +from .rotated_fast_rcnn import RROIHeads +from .fast_rcnn import FastRCNNOutputLayers + +from . import cascade_rcnn # isort:skip + +__all__ = list(globals().keys()) diff --git a/data_processing/detectron2/detectron2/modeling/roi_heads/box_head.py b/data_processing/detectron2/detectron2/modeling/roi_heads/box_head.py new file mode 100644 index 0000000..5d0370b --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/roi_heads/box_head.py @@ -0,0 +1,118 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import List +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn + +from detectron2.config import configurable +from detectron2.layers import Conv2d, ShapeSpec, get_norm +from detectron2.utils.registry import Registry + +__all__ = ["FastRCNNConvFCHead", "build_box_head", "ROI_BOX_HEAD_REGISTRY"] + +ROI_BOX_HEAD_REGISTRY = Registry("ROI_BOX_HEAD") +ROI_BOX_HEAD_REGISTRY.__doc__ = """ +Registry for box heads, which make box predictions from per-region features. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +# To get torchscript support, we make the head a subclass of `nn.Sequential`. +# Therefore, to add new layers in this head class, please make sure they are +# added in the order they will be used in forward(). +@ROI_BOX_HEAD_REGISTRY.register() +class FastRCNNConvFCHead(nn.Sequential): + """ + A head with several 3x3 conv layers (each followed by norm & relu) and then + several fc layers (each followed by relu). + """ + + @configurable + def __init__( + self, input_shape: ShapeSpec, *, conv_dims: List[int], fc_dims: List[int], conv_norm="" + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature. + conv_dims (list[int]): the output dimensions of the conv layers + fc_dims (list[int]): the output dimensions of the fc layers + conv_norm (str or callable): normalization for the conv layers. + See :func:`detectron2.layers.get_norm` for supported types. + """ + super().__init__() + assert len(conv_dims) + len(fc_dims) > 0 + + self._output_size = (input_shape.channels, input_shape.height, input_shape.width) + + self.conv_norm_relus = [] + for k, conv_dim in enumerate(conv_dims): + conv = Conv2d( + self._output_size[0], + conv_dim, + kernel_size=3, + padding=1, + bias=not conv_norm, + norm=get_norm(conv_norm, conv_dim), + activation=nn.ReLU(), + ) + self.add_module("conv{}".format(k + 1), conv) + self.conv_norm_relus.append(conv) + self._output_size = (conv_dim, self._output_size[1], self._output_size[2]) + + self.fcs = [] + for k, fc_dim in enumerate(fc_dims): + if k == 0: + self.add_module("flatten", nn.Flatten()) + fc = nn.Linear(int(np.prod(self._output_size)), fc_dim) + self.add_module("fc{}".format(k + 1), fc) + self.add_module("fc_relu{}".format(k + 1), nn.ReLU()) + self.fcs.append(fc) + self._output_size = fc_dim + + for layer in self.conv_norm_relus: + weight_init.c2_msra_fill(layer) + for layer in self.fcs: + weight_init.c2_xavier_fill(layer) + + @classmethod + def from_config(cls, cfg, input_shape): + num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV + conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM + num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC + fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM + return { + "input_shape": input_shape, + "conv_dims": [conv_dim] * num_conv, + "fc_dims": [fc_dim] * num_fc, + "conv_norm": cfg.MODEL.ROI_BOX_HEAD.NORM, + } + + def forward(self, x): + for layer in self: + x = layer(x) + return x + + @property + @torch.jit.unused + def output_shape(self): + """ + Returns: + ShapeSpec: the output feature shape + """ + o = self._output_size + if isinstance(o, int): + return ShapeSpec(channels=o) + else: + return ShapeSpec(channels=o[0], height=o[1], width=o[2]) + + +def build_box_head(cfg, input_shape): + """ + Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_BOX_HEAD.NAME + return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape) diff --git a/data_processing/detectron2/detectron2/modeling/roi_heads/cascade_rcnn.py b/data_processing/detectron2/detectron2/modeling/roi_heads/cascade_rcnn.py new file mode 100644 index 0000000..a0ca70f --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/roi_heads/cascade_rcnn.py @@ -0,0 +1,299 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import List +import torch +from torch import nn +from torch.autograd.function import Function + +from detectron2.config import configurable +from detectron2.layers import ShapeSpec +from detectron2.structures import Boxes, Instances, pairwise_iou +from detectron2.utils.events import get_event_storage + +from ..box_regression import Box2BoxTransform +from ..matcher import Matcher +from ..poolers import ROIPooler +from .box_head import build_box_head +from .fast_rcnn import FastRCNNOutputLayers, fast_rcnn_inference +from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads + + +class _ScaleGradient(Function): + @staticmethod + def forward(ctx, input, scale): + ctx.scale = scale + return input + + @staticmethod + def backward(ctx, grad_output): + return grad_output * ctx.scale, None + + +@ROI_HEADS_REGISTRY.register() +class CascadeROIHeads(StandardROIHeads): + """ + The ROI heads that implement :paper:`Cascade R-CNN`. + """ + + @configurable + def __init__( + self, + *, + box_in_features: List[str], + box_pooler: ROIPooler, + box_heads: List[nn.Module], + box_predictors: List[nn.Module], + proposal_matchers: List[Matcher], + **kwargs, + ): + """ + NOTE: this interface is experimental. + + Args: + box_pooler (ROIPooler): pooler that extracts region features from given boxes + box_heads (list[nn.Module]): box head for each cascade stage + box_predictors (list[nn.Module]): box predictor for each cascade stage + proposal_matchers (list[Matcher]): matcher with different IoU thresholds to + match boxes with ground truth for each stage. The first matcher matches + RPN proposals with ground truth, the other matchers use boxes predicted + by the previous stage as proposals and match them with ground truth. + """ + assert "proposal_matcher" not in kwargs, ( + "CascadeROIHeads takes 'proposal_matchers=' for each stage instead " + "of one 'proposal_matcher='." + ) + # The first matcher matches RPN proposals with ground truth, done in the base class + kwargs["proposal_matcher"] = proposal_matchers[0] + num_stages = self.num_cascade_stages = len(box_heads) + box_heads = nn.ModuleList(box_heads) + box_predictors = nn.ModuleList(box_predictors) + assert len(box_predictors) == num_stages, f"{len(box_predictors)} != {num_stages}!" + assert len(proposal_matchers) == num_stages, f"{len(proposal_matchers)} != {num_stages}!" + super().__init__( + box_in_features=box_in_features, + box_pooler=box_pooler, + box_head=box_heads, + box_predictor=box_predictors, + **kwargs, + ) + self.proposal_matchers = proposal_matchers + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg, input_shape) + ret.pop("proposal_matcher") + return ret + + @classmethod + def _init_box_head(cls, cfg, input_shape): + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS + cascade_ious = cfg.MODEL.ROI_BOX_CASCADE_HEAD.IOUS + assert len(cascade_bbox_reg_weights) == len(cascade_ious) + assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, \ + "CascadeROIHeads only support class-agnostic regression now!" + assert cascade_ious[0] == cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS[0] + # fmt: on + + in_channels = [input_shape[f].channels for f in in_features] + # Check all channel counts are equal + assert len(set(in_channels)) == 1, in_channels + in_channels = in_channels[0] + + box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + pooled_shape = ShapeSpec( + channels=in_channels, width=pooler_resolution, height=pooler_resolution + ) + + box_heads, box_predictors, proposal_matchers = [], [], [] + for match_iou, bbox_reg_weights in zip(cascade_ious, cascade_bbox_reg_weights): + box_head = build_box_head(cfg, pooled_shape) + box_heads.append(box_head) + box_predictors.append( + FastRCNNOutputLayers( + cfg, + box_head.output_shape, + box2box_transform=Box2BoxTransform(weights=bbox_reg_weights), + ) + ) + proposal_matchers.append(Matcher([match_iou], [0, 1], allow_low_quality_matches=False)) + return { + "box_in_features": in_features, + "box_pooler": box_pooler, + "box_heads": box_heads, + "box_predictors": box_predictors, + "proposal_matchers": proposal_matchers, + } + + def forward(self, images, features, proposals, targets=None): + del images + if self.training: + proposals = self.label_and_sample_proposals(proposals, targets) + + if self.training: + # Need targets to box head + losses = self._forward_box(features, proposals, targets) + losses.update(self._forward_mask(features, proposals)) + losses.update(self._forward_keypoint(features, proposals)) + return proposals, losses + else: + pred_instances = self._forward_box(features, proposals) + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def _forward_box(self, features, proposals, targets=None): + """ + Args: + features, targets: the same as in + Same as in :meth:`ROIHeads.forward`. + proposals (list[Instances]): the per-image object proposals with + their matching ground truth. + Each has fields "proposal_boxes", and "objectness_logits", + "gt_classes", "gt_boxes". + """ + features = [features[f] for f in self.box_in_features] + head_outputs = [] # (predictor, predictions, proposals) + prev_pred_boxes = None + image_sizes = [x.image_size for x in proposals] + for k in range(self.num_cascade_stages): + if k > 0: + # The output boxes of the previous stage are used to create the input + # proposals of the next stage. + proposals = self._create_proposals_from_boxes(prev_pred_boxes, image_sizes) + if self.training: + proposals = self._match_and_label_boxes(proposals, k, targets) + predictions = self._run_stage(features, proposals, k) + prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals) + head_outputs.append((self.box_predictor[k], predictions, proposals)) + + if self.training: + losses = {} + storage = get_event_storage() + for stage, (predictor, predictions, proposals) in enumerate(head_outputs): + with storage.name_scope("stage{}".format(stage)): + stage_losses = predictor.losses(predictions, proposals) + losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()}) + return losses + else: + # Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1) + scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs] + + # Average the scores across heads + scores = [ + sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages) + for scores_per_image in zip(*scores_per_stage) + ] + # Use the boxes of the last head + predictor, predictions, proposals = head_outputs[-1] + boxes = predictor.predict_boxes(predictions, proposals) + pred_instances, _ = fast_rcnn_inference( + boxes, + scores, + image_sizes, + predictor.test_score_thresh, + predictor.test_nms_thresh, + predictor.test_topk_per_image, + ) + return pred_instances + + @torch.no_grad() + def _match_and_label_boxes(self, proposals, stage, targets): + """ + Match proposals with groundtruth using the matcher at the given stage. + Label the proposals as foreground or background based on the match. + + Args: + proposals (list[Instances]): One Instances for each image, with + the field "proposal_boxes". + stage (int): the current stage + targets (list[Instances]): the ground truth instances + + Returns: + list[Instances]: the same proposals, but with fields "gt_classes" and "gt_boxes" + """ + num_fg_samples, num_bg_samples = [], [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + match_quality_matrix = pairwise_iou( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + # proposal_labels are 0 or 1 + matched_idxs, proposal_labels = self.proposal_matchers[stage](match_quality_matrix) + if len(targets_per_image) > 0: + gt_classes = targets_per_image.gt_classes[matched_idxs] + # Label unmatched proposals (0 label from matcher) as background (label=num_classes) + gt_classes[proposal_labels == 0] = self.num_classes + gt_boxes = targets_per_image.gt_boxes[matched_idxs] + else: + gt_classes = torch.zeros_like(matched_idxs) + self.num_classes + gt_boxes = Boxes( + targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4)) + ) + proposals_per_image.gt_classes = gt_classes + proposals_per_image.gt_boxes = gt_boxes + + num_fg_samples.append((proposal_labels == 1).sum().item()) + num_bg_samples.append(proposal_labels.numel() - num_fg_samples[-1]) + + # Log the number of fg/bg samples in each stage + storage = get_event_storage() + storage.put_scalar( + "stage{}/roi_head/num_fg_samples".format(stage), + sum(num_fg_samples) / len(num_fg_samples), + ) + storage.put_scalar( + "stage{}/roi_head/num_bg_samples".format(stage), + sum(num_bg_samples) / len(num_bg_samples), + ) + return proposals + + def _run_stage(self, features, proposals, stage): + """ + Args: + features (list[Tensor]): #lvl input features to ROIHeads + proposals (list[Instances]): #image Instances, with the field "proposal_boxes" + stage (int): the current stage + + Returns: + Same output as `FastRCNNOutputLayers.forward()`. + """ + box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) + # The original implementation averages the losses among heads, + # but scale up the parameter gradients of the heads. + # This is equivalent to adding the losses among heads, + # but scale down the gradients on features. + if self.training: + box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages) + box_features = self.box_head[stage](box_features) + return self.box_predictor[stage](box_features) + + def _create_proposals_from_boxes(self, boxes, image_sizes): + """ + Args: + boxes (list[Tensor]): per-image predicted boxes, each of shape Ri x 4 + image_sizes (list[tuple]): list of image shapes in (h, w) + + Returns: + list[Instances]: per-image proposals with the given boxes. + """ + # Just like RPN, the proposals should not have gradients + boxes = [Boxes(b.detach()) for b in boxes] + proposals = [] + for boxes_per_image, image_size in zip(boxes, image_sizes): + boxes_per_image.clip(image_size) + if self.training: + # do not filter empty boxes at inference time, + # because the scores from each stage need to be aligned and added later + boxes_per_image = boxes_per_image[boxes_per_image.nonempty()] + prop = Instances(image_size) + prop.proposal_boxes = boxes_per_image + proposals.append(prop) + return proposals diff --git a/data_processing/detectron2/detectron2/modeling/roi_heads/fast_rcnn.py b/data_processing/detectron2/detectron2/modeling/roi_heads/fast_rcnn.py new file mode 100644 index 0000000..039e249 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/roi_heads/fast_rcnn.py @@ -0,0 +1,569 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +from typing import Callable, Dict, List, Optional, Tuple, Union +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.data.detection_utils import get_fed_loss_cls_weights +from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple +from detectron2.modeling.box_regression import Box2BoxTransform, _dense_box_regression_loss +from detectron2.structures import Boxes, Instances +from detectron2.utils.events import get_event_storage + +__all__ = ["fast_rcnn_inference", "FastRCNNOutputLayers"] + + +logger = logging.getLogger(__name__) + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + R: number of ROIs, combined over all images, in the minibatch + Ri: number of ROIs in image i + K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. + +Naming convention: + + deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box + transform (see :class:`box_regression.Box2BoxTransform`). + + pred_class_logits: predicted class scores in [-inf, +inf]; use + softmax(pred_class_logits) to estimate P(class). + + gt_classes: ground-truth classification labels in [0, K], where [0, K) represent + foreground object classes and K represents the background class. + + pred_proposal_deltas: predicted box2box transform deltas for transforming proposals + to detection box predictions. + + gt_proposal_deltas: ground-truth box2box transform deltas +""" + + +def fast_rcnn_inference( + boxes: List[torch.Tensor], + scores: List[torch.Tensor], + image_shapes: List[Tuple[int, int]], + score_thresh: float, + nms_thresh: float, + topk_per_image: int, +): + """ + Call `fast_rcnn_inference_single_image` for all images. + + Args: + boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic + boxes for each image. Element i has shape (Ri, K * 4) if doing + class-specific regression, or (Ri, 4) if doing class-agnostic + regression, where Ri is the number of predicted objects for image i. + This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. + scores (list[Tensor]): A list of Tensors of predicted class scores for each image. + Element i has shape (Ri, K + 1), where Ri is the number of predicted objects + for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. + image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. + score_thresh (float): Only return detections with a confidence score exceeding this + threshold. + nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. + topk_per_image (int): The number of top scoring detections to return. Set < 0 to return + all detections. + + Returns: + instances: (list[Instances]): A list of N instances, one for each image in the batch, + that stores the topk most confidence detections. + kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates + the corresponding boxes/scores index in [0, Ri) from the input, for image i. + """ + result_per_image = [ + fast_rcnn_inference_single_image( + boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image + ) + for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) + ] + return [x[0] for x in result_per_image], [x[1] for x in result_per_image] + + +def _log_classification_stats(pred_logits, gt_classes, prefix="fast_rcnn"): + """ + Log the classification metrics to EventStorage. + + Args: + pred_logits: Rx(K+1) logits. The last column is for background class. + gt_classes: R labels + """ + num_instances = gt_classes.numel() + if num_instances == 0: + return + pred_classes = pred_logits.argmax(dim=1) + bg_class_ind = pred_logits.shape[1] - 1 + + fg_inds = (gt_classes >= 0) & (gt_classes < bg_class_ind) + num_fg = fg_inds.nonzero().numel() + fg_gt_classes = gt_classes[fg_inds] + fg_pred_classes = pred_classes[fg_inds] + + num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel() + num_accurate = (pred_classes == gt_classes).nonzero().numel() + fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel() + + storage = get_event_storage() + storage.put_scalar(f"{prefix}/cls_accuracy", num_accurate / num_instances) + if num_fg > 0: + storage.put_scalar(f"{prefix}/fg_cls_accuracy", fg_num_accurate / num_fg) + storage.put_scalar(f"{prefix}/false_negative", num_false_negative / num_fg) + + +def fast_rcnn_inference_single_image( + boxes, + scores, + image_shape: Tuple[int, int], + score_thresh: float, + nms_thresh: float, + topk_per_image: int, +): + """ + Single-image inference. Return bounding-box detection results by thresholding + on scores and applying non-maximum suppression (NMS). + + Args: + Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes + per image. + + Returns: + Same as `fast_rcnn_inference`, but for only one image. + """ + valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) + if not valid_mask.all(): + boxes = boxes[valid_mask] + scores = scores[valid_mask] + + scores = scores[:, :-1] + num_bbox_reg_classes = boxes.shape[1] // 4 + # Convert to Boxes to use the `clip` function ... + boxes = Boxes(boxes.reshape(-1, 4)) + boxes.clip(image_shape) + boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4 + + # 1. Filter results based on detection scores. It can make NMS more efficient + # by filtering out low-confidence detections. + filter_mask = scores > score_thresh # R x K + # R' x 2. First column contains indices of the R predictions; + # Second column contains indices of classes. + filter_inds = filter_mask.nonzero() + if num_bbox_reg_classes == 1: + boxes = boxes[filter_inds[:, 0], 0] + else: + boxes = boxes[filter_mask] + scores = scores[filter_mask] + + # 2. Apply NMS for each class independently. + keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh) + if topk_per_image >= 0: + keep = keep[:topk_per_image] + boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] + + result = Instances(image_shape) + result.pred_boxes = Boxes(boxes) + result.scores = scores + result.pred_classes = filter_inds[:, 1] + return result, filter_inds[:, 0] + + +class FastRCNNOutputLayers(nn.Module): + """ + Two linear layers for predicting Fast R-CNN outputs: + + 1. proposal-to-detection box regression deltas + 2. classification scores + """ + + @configurable + def __init__( + self, + input_shape: ShapeSpec, + *, + box2box_transform, + num_classes: int, + test_score_thresh: float = 0.0, + test_nms_thresh: float = 0.5, + test_topk_per_image: int = 100, + cls_agnostic_bbox_reg: bool = False, + smooth_l1_beta: float = 0.0, + box_reg_loss_type: str = "smooth_l1", + loss_weight: Union[float, Dict[str, float]] = 1.0, + use_fed_loss: bool = False, + use_sigmoid_ce: bool = False, + get_fed_loss_cls_weights: Optional[Callable] = None, + fed_loss_num_classes: int = 50, + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature to this module + box2box_transform (Box2BoxTransform or Box2BoxTransformRotated): + num_classes (int): number of foreground classes + test_score_thresh (float): threshold to filter predictions results. + test_nms_thresh (float): NMS threshold for prediction results. + test_topk_per_image (int): number of top predictions to produce per image. + cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression + smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if + `box_reg_loss_type` is "smooth_l1" + box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou", + "diou", "ciou" + loss_weight (float|dict): weights to use for losses. Can be single float for weighting + all losses, or a dict of individual weightings. Valid dict keys are: + * "loss_cls": applied to classification loss + * "loss_box_reg": applied to box regression loss + use_fed_loss (bool): whether to use federated loss which samples additional negative + classes to calculate the loss + use_sigmoid_ce (bool): whether to calculate the loss using weighted average of binary + cross entropy with logits. This could be used together with federated loss + get_fed_loss_cls_weights (Callable): a callable which takes dataset name and frequency + weight power, and returns the probabilities to sample negative classes for + federated loss. The implementation can be found in + detectron2/data/detection_utils.py + fed_loss_num_classes (int): number of federated classes to keep in total + """ + super().__init__() + if isinstance(input_shape, int): # some backward compatibility + input_shape = ShapeSpec(channels=input_shape) + self.num_classes = num_classes + input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1) + # prediction layer for num_classes foreground classes and one background class (hence + 1) + self.cls_score = nn.Linear(input_size, num_classes + 1) + num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes + box_dim = len(box2box_transform.weights) + self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) + + nn.init.normal_(self.cls_score.weight, std=0.01) + nn.init.normal_(self.bbox_pred.weight, std=0.001) + for l in [self.cls_score, self.bbox_pred]: + nn.init.constant_(l.bias, 0) + + self.box2box_transform = box2box_transform + self.smooth_l1_beta = smooth_l1_beta + self.test_score_thresh = test_score_thresh + self.test_nms_thresh = test_nms_thresh + self.test_topk_per_image = test_topk_per_image + self.box_reg_loss_type = box_reg_loss_type + if isinstance(loss_weight, float): + loss_weight = {"loss_cls": loss_weight, "loss_box_reg": loss_weight} + self.loss_weight = loss_weight + self.use_fed_loss = use_fed_loss + self.use_sigmoid_ce = use_sigmoid_ce + self.fed_loss_num_classes = fed_loss_num_classes + + if self.use_fed_loss: + assert self.use_sigmoid_ce, "Please use sigmoid cross entropy loss with federated loss" + fed_loss_cls_weights = get_fed_loss_cls_weights() + assert ( + len(fed_loss_cls_weights) == self.num_classes + ), "Please check the provided fed_loss_cls_weights. Their size should match num_classes" + self.register_buffer("fed_loss_cls_weights", fed_loss_cls_weights) + + @classmethod + def from_config(cls, cfg, input_shape): + return { + "input_shape": input_shape, + "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS), + # fmt: off + "num_classes" : cfg.MODEL.ROI_HEADS.NUM_CLASSES, + "cls_agnostic_bbox_reg" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, + "smooth_l1_beta" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA, + "test_score_thresh" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST, + "test_nms_thresh" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST, + "test_topk_per_image" : cfg.TEST.DETECTIONS_PER_IMAGE, + "box_reg_loss_type" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE, + "loss_weight" : {"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT}, # noqa + "use_fed_loss" : cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS, + "use_sigmoid_ce" : cfg.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE, + "get_fed_loss_cls_weights" : lambda: get_fed_loss_cls_weights(dataset_names=cfg.DATASETS.TRAIN, freq_weight_power=cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER), # noqa + "fed_loss_num_classes" : cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES, + # fmt: on + } + + def forward(self, x): + """ + Args: + x: per-region features of shape (N, ...) for N bounding boxes to predict. + + Returns: + (Tensor, Tensor): + First tensor: shape (N,K+1), scores for each of the N box. Each row contains the + scores for K object categories and 1 background class. + + Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4), + or (N,4) for class-agnostic regression. + """ + if x.dim() > 2: + x = torch.flatten(x, start_dim=1) + scores = self.cls_score(x) + proposal_deltas = self.bbox_pred(x) + return scores, proposal_deltas + + def losses(self, predictions, proposals): + """ + Args: + predictions: return values of :meth:`forward()`. + proposals (list[Instances]): proposals that match the features that were used + to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``, + ``gt_classes`` are expected. + + Returns: + Dict[str, Tensor]: dict of losses + """ + scores, proposal_deltas = predictions + + # parse classification outputs + gt_classes = ( + cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0) + ) + _log_classification_stats(scores, gt_classes) + + # parse box regression outputs + if len(proposals): + proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4 + assert not proposal_boxes.requires_grad, "Proposals should not require gradients!" + # If "gt_boxes" does not exist, the proposals must be all negative and + # should not be included in regression loss computation. + # Here we just use proposal_boxes as an arbitrary placeholder because its + # value won't be used in self.box_reg_loss(). + gt_boxes = cat( + [(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals], + dim=0, + ) + else: + proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device) + + if self.use_sigmoid_ce: + loss_cls = self.sigmoid_cross_entropy_loss(scores, gt_classes) + else: + loss_cls = cross_entropy(scores, gt_classes, reduction="mean") + + losses = { + "loss_cls": loss_cls, + "loss_box_reg": self.box_reg_loss( + proposal_boxes, gt_boxes, proposal_deltas, gt_classes + ), + } + return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()} + + # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py # noqa + # with slight modifications + def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_classes, weight): + """ + Args: + gt_classes: a long tensor of shape R that contains the gt class label of each proposal. + num_fed_loss_classes: minimum number of classes to keep when calculating federated loss. + Will sample negative classes if number of unique gt_classes is smaller than this value. + num_classes: number of foreground classes + weight: probabilities used to sample negative classes + + Returns: + Tensor: + classes to keep when calculating the federated loss, including both unique gt + classes and sampled negative classes. + """ + unique_gt_classes = torch.unique(gt_classes) + prob = unique_gt_classes.new_ones(num_classes + 1).float() + prob[-1] = 0 + if len(unique_gt_classes) < num_fed_loss_classes: + prob[:num_classes] = weight.float().clone() + prob[unique_gt_classes] = 0 + sampled_negative_classes = torch.multinomial( + prob, num_fed_loss_classes - len(unique_gt_classes), replacement=False + ) + fed_loss_classes = torch.cat([unique_gt_classes, sampled_negative_classes]) + else: + fed_loss_classes = unique_gt_classes + return fed_loss_classes + + # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py#L113 # noqa + # with slight modifications + def sigmoid_cross_entropy_loss(self, pred_class_logits, gt_classes): + """ + Args: + pred_class_logits: shape (N, K+1), scores for each of the N box. Each row contains the + scores for K object categories and 1 background class + gt_classes: a long tensor of shape R that contains the gt class label of each proposal. + """ + if pred_class_logits.numel() == 0: + return pred_class_logits.new_zeros([1])[0] + + N = pred_class_logits.shape[0] + K = pred_class_logits.shape[1] - 1 + + target = pred_class_logits.new_zeros(N, K + 1) + target[range(len(gt_classes)), gt_classes] = 1 + target = target[:, :K] + + cls_loss = F.binary_cross_entropy_with_logits( + pred_class_logits[:, :-1], target, reduction="none" + ) + + if self.use_fed_loss: + fed_loss_classes = self.get_fed_loss_classes( + gt_classes, + num_fed_loss_classes=self.fed_loss_num_classes, + num_classes=K, + weight=self.fed_loss_cls_weights, + ) + fed_loss_classes_mask = fed_loss_classes.new_zeros(K + 1) + fed_loss_classes_mask[fed_loss_classes] = 1 + fed_loss_classes_mask = fed_loss_classes_mask[:K] + weight = fed_loss_classes_mask.view(1, K).expand(N, K).float() + else: + weight = 1 + + loss = torch.sum(cls_loss * weight) / N + return loss + + def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes): + """ + Args: + proposal_boxes/gt_boxes are tensors with the same shape (R, 4 or 5). + pred_deltas has shape (R, 4 or 5), or (R, num_classes * (4 or 5)). + gt_classes is a long tensor of shape R, the gt class label of each proposal. + R shall be the number of proposals. + """ + box_dim = proposal_boxes.shape[1] # 4 or 5 + # Regression loss is only computed for foreground proposals (those matched to a GT) + fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0] + if pred_deltas.shape[1] == box_dim: # cls-agnostic regression + fg_pred_deltas = pred_deltas[fg_inds] + else: + fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[ + fg_inds, gt_classes[fg_inds] + ] + + loss_box_reg = _dense_box_regression_loss( + [proposal_boxes[fg_inds]], + self.box2box_transform, + [fg_pred_deltas.unsqueeze(0)], + [gt_boxes[fg_inds]], + ..., + self.box_reg_loss_type, + self.smooth_l1_beta, + ) + + # The reg loss is normalized using the total number of regions (R), not the number + # of foreground regions even though the box regression loss is only defined on + # foreground regions. Why? Because doing so gives equal training influence to + # each foreground example. To see how, consider two different minibatches: + # (1) Contains a single foreground region + # (2) Contains 100 foreground regions + # If we normalize by the number of foreground regions, the single example in + # minibatch (1) will be given 100 times as much influence as each foreground + # example in minibatch (2). Normalizing by the total number of regions, R, + # means that the single example in minibatch (1) and each of the 100 examples + # in minibatch (2) are given equal influence. + return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty + + def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]): + """ + Args: + predictions: return values of :meth:`forward()`. + proposals (list[Instances]): proposals that match the features that were + used to compute predictions. The ``proposal_boxes`` field is expected. + + Returns: + list[Instances]: same as `fast_rcnn_inference`. + list[Tensor]: same as `fast_rcnn_inference`. + """ + boxes = self.predict_boxes(predictions, proposals) + scores = self.predict_probs(predictions, proposals) + image_shapes = [x.image_size for x in proposals] + return fast_rcnn_inference( + boxes, + scores, + image_shapes, + self.test_score_thresh, + self.test_nms_thresh, + self.test_topk_per_image, + ) + + def predict_boxes_for_gt_classes(self, predictions, proposals): + """ + Args: + predictions: return values of :meth:`forward()`. + proposals (list[Instances]): proposals that match the features that were used + to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected. + + Returns: + list[Tensor]: + A list of Tensors of predicted boxes for GT classes in case of + class-specific box head. Element i of the list has shape (Ri, B), where Ri is + the number of proposals for image i and B is the box dimension (4 or 5) + """ + if not len(proposals): + return [] + scores, proposal_deltas = predictions + proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) + N, B = proposal_boxes.shape + predict_boxes = self.box2box_transform.apply_deltas( + proposal_deltas, proposal_boxes + ) # Nx(KxB) + + K = predict_boxes.shape[1] // B + if K > 1: + gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0) + # Some proposals are ignored or have a background class. Their gt_classes + # cannot be used as index. + gt_classes = gt_classes.clamp_(0, K - 1) + + predict_boxes = predict_boxes.view(N, K, B)[ + torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes + ] + num_prop_per_image = [len(p) for p in proposals] + return predict_boxes.split(num_prop_per_image) + + def predict_boxes( + self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances] + ): + """ + Args: + predictions: return values of :meth:`forward()`. + proposals (list[Instances]): proposals that match the features that were + used to compute predictions. The ``proposal_boxes`` field is expected. + + Returns: + list[Tensor]: + A list of Tensors of predicted class-specific or class-agnostic boxes + for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is + the number of proposals for image i and B is the box dimension (4 or 5) + """ + if not len(proposals): + return [] + _, proposal_deltas = predictions + num_prop_per_image = [len(p) for p in proposals] + proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) + predict_boxes = self.box2box_transform.apply_deltas( + proposal_deltas, + proposal_boxes, + ) # Nx(KxB) + return predict_boxes.split(num_prop_per_image) + + def predict_probs( + self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances] + ): + """ + Args: + predictions: return values of :meth:`forward()`. + proposals (list[Instances]): proposals that match the features that were + used to compute predictions. + + Returns: + list[Tensor]: + A list of Tensors of predicted class probabilities for each image. + Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i. + """ + scores, _ = predictions + num_inst_per_image = [len(p) for p in proposals] + if self.use_sigmoid_ce: + probs = scores.sigmoid() + else: + probs = F.softmax(scores, dim=-1) + return probs.split(num_inst_per_image, dim=0) diff --git a/data_processing/detectron2/detectron2/modeling/roi_heads/keypoint_head.py b/data_processing/detectron2/detectron2/modeling/roi_heads/keypoint_head.py new file mode 100644 index 0000000..e0acc13 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/roi_heads/keypoint_head.py @@ -0,0 +1,272 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import List +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate +from detectron2.structures import Instances, heatmaps_to_keypoints +from detectron2.utils.events import get_event_storage +from detectron2.utils.registry import Registry + +_TOTAL_SKIPPED = 0 + + +__all__ = [ + "ROI_KEYPOINT_HEAD_REGISTRY", + "build_keypoint_head", + "BaseKeypointRCNNHead", + "KRCNNConvDeconvUpsampleHead", +] + + +ROI_KEYPOINT_HEAD_REGISTRY = Registry("ROI_KEYPOINT_HEAD") +ROI_KEYPOINT_HEAD_REGISTRY.__doc__ = """ +Registry for keypoint heads, which make keypoint predictions from per-region features. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +def build_keypoint_head(cfg, input_shape): + """ + Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_KEYPOINT_HEAD.NAME + return ROI_KEYPOINT_HEAD_REGISTRY.get(name)(cfg, input_shape) + + +def keypoint_rcnn_loss(pred_keypoint_logits, instances, normalizer): + """ + Arguments: + pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number + of instances in the batch, K is the number of keypoints, and S is the side length + of the keypoint heatmap. The values are spatial logits. + instances (list[Instances]): A list of M Instances, where M is the batch size. + These instances are predictions from the model + that are in 1:1 correspondence with pred_keypoint_logits. + Each Instances should contain a `gt_keypoints` field containing a `structures.Keypoint` + instance. + normalizer (float): Normalize the loss by this amount. + If not specified, we normalize by the number of visible keypoints in the minibatch. + + Returns a scalar tensor containing the loss. + """ + heatmaps = [] + valid = [] + + keypoint_side_len = pred_keypoint_logits.shape[2] + for instances_per_image in instances: + if len(instances_per_image) == 0: + continue + keypoints = instances_per_image.gt_keypoints + heatmaps_per_image, valid_per_image = keypoints.to_heatmap( + instances_per_image.proposal_boxes.tensor, keypoint_side_len + ) + heatmaps.append(heatmaps_per_image.view(-1)) + valid.append(valid_per_image.view(-1)) + + if len(heatmaps): + keypoint_targets = cat(heatmaps, dim=0) + valid = cat(valid, dim=0).to(dtype=torch.uint8) + valid = torch.nonzero(valid).squeeze(1) + + # torch.mean (in binary_cross_entropy_with_logits) doesn't + # accept empty tensors, so handle it separately + if len(heatmaps) == 0 or valid.numel() == 0: + global _TOTAL_SKIPPED + _TOTAL_SKIPPED += 1 + storage = get_event_storage() + storage.put_scalar("kpts_num_skipped_batches", _TOTAL_SKIPPED, smoothing_hint=False) + return pred_keypoint_logits.sum() * 0 + + N, K, H, W = pred_keypoint_logits.shape + pred_keypoint_logits = pred_keypoint_logits.view(N * K, H * W) + + keypoint_loss = F.cross_entropy( + pred_keypoint_logits[valid], keypoint_targets[valid], reduction="sum" + ) + + # If a normalizer isn't specified, normalize by the number of visible keypoints in the minibatch + if normalizer is None: + normalizer = valid.numel() + keypoint_loss /= normalizer + + return keypoint_loss + + +def keypoint_rcnn_inference(pred_keypoint_logits: torch.Tensor, pred_instances: List[Instances]): + """ + Post process each predicted keypoint heatmap in `pred_keypoint_logits` into (x, y, score) + and add it to the `pred_instances` as a `pred_keypoints` field. + + Args: + pred_keypoint_logits (Tensor): A tensor of shape (R, K, S, S) where R is the total number + of instances in the batch, K is the number of keypoints, and S is the side length of + the keypoint heatmap. The values are spatial logits. + pred_instances (list[Instances]): A list of N Instances, where N is the number of images. + + Returns: + None. Each element in pred_instances will contain extra "pred_keypoints" and + "pred_keypoint_heatmaps" fields. "pred_keypoints" is a tensor of shape + (#instance, K, 3) where the last dimension corresponds to (x, y, score). + The scores are larger than 0. "pred_keypoint_heatmaps" contains the raw + keypoint logits as passed to this function. + """ + # flatten all bboxes from all images together (list[Boxes] -> Rx4 tensor) + bboxes_flat = cat([b.pred_boxes.tensor for b in pred_instances], dim=0) + + pred_keypoint_logits = pred_keypoint_logits.detach() + keypoint_results = heatmaps_to_keypoints(pred_keypoint_logits, bboxes_flat.detach()) + num_instances_per_image = [len(i) for i in pred_instances] + keypoint_results = keypoint_results[:, :, [0, 1, 3]].split(num_instances_per_image, dim=0) + heatmap_results = pred_keypoint_logits.split(num_instances_per_image, dim=0) + + for keypoint_results_per_image, heatmap_results_per_image, instances_per_image in zip( + keypoint_results, heatmap_results, pred_instances + ): + # keypoint_results_per_image is (num instances)x(num keypoints)x(x, y, score) + # heatmap_results_per_image is (num instances)x(num keypoints)x(side)x(side) + instances_per_image.pred_keypoints = keypoint_results_per_image + instances_per_image.pred_keypoint_heatmaps = heatmap_results_per_image + + +class BaseKeypointRCNNHead(nn.Module): + """ + Implement the basic Keypoint R-CNN losses and inference logic described in + Sec. 5 of :paper:`Mask R-CNN`. + """ + + @configurable + def __init__(self, *, num_keypoints, loss_weight=1.0, loss_normalizer=1.0): + """ + NOTE: this interface is experimental. + + Args: + num_keypoints (int): number of keypoints to predict + loss_weight (float): weight to multiple on the keypoint loss + loss_normalizer (float or str): + If float, divide the loss by `loss_normalizer * #images`. + If 'visible', the loss is normalized by the total number of + visible keypoints across images. + """ + super().__init__() + self.num_keypoints = num_keypoints + self.loss_weight = loss_weight + assert loss_normalizer == "visible" or isinstance(loss_normalizer, float), loss_normalizer + self.loss_normalizer = loss_normalizer + + @classmethod + def from_config(cls, cfg, input_shape): + ret = { + "loss_weight": cfg.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT, + "num_keypoints": cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS, + } + normalize_by_visible = ( + cfg.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS + ) # noqa + if not normalize_by_visible: + batch_size_per_image = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE + positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION + ret["loss_normalizer"] = ( + ret["num_keypoints"] * batch_size_per_image * positive_sample_fraction + ) + else: + ret["loss_normalizer"] = "visible" + return ret + + def forward(self, x, instances: List[Instances]): + """ + Args: + x: input 4D region feature(s) provided by :class:`ROIHeads`. + instances (list[Instances]): contains the boxes & labels corresponding + to the input features. + Exact format is up to its caller to decide. + Typically, this is the foreground instances in training, with + "proposal_boxes" field and other gt annotations. + In inference, it contains boxes that are already predicted. + + Returns: + A dict of losses if in training. The predicted "instances" if in inference. + """ + x = self.layers(x) + if self.training: + num_images = len(instances) + normalizer = ( + None if self.loss_normalizer == "visible" else num_images * self.loss_normalizer + ) + return { + "loss_keypoint": keypoint_rcnn_loss(x, instances, normalizer=normalizer) + * self.loss_weight + } + else: + keypoint_rcnn_inference(x, instances) + return instances + + def layers(self, x): + """ + Neural network layers that makes predictions from regional input features. + """ + raise NotImplementedError + + +# To get torchscript support, we make the head a subclass of `nn.Sequential`. +# Therefore, to add new layers in this head class, please make sure they are +# added in the order they will be used in forward(). +@ROI_KEYPOINT_HEAD_REGISTRY.register() +class KRCNNConvDeconvUpsampleHead(BaseKeypointRCNNHead, nn.Sequential): + """ + A standard keypoint head containing a series of 3x3 convs, followed by + a transpose convolution and bilinear interpolation for upsampling. + It is described in Sec. 5 of :paper:`Mask R-CNN`. + """ + + @configurable + def __init__(self, input_shape, *, num_keypoints, conv_dims, **kwargs): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature + conv_dims: an iterable of output channel counts for each conv in the head + e.g. (512, 512, 512) for three convs outputting 512 channels. + """ + super().__init__(num_keypoints=num_keypoints, **kwargs) + + # default up_scale to 2.0 (this can be made an option) + up_scale = 2.0 + in_channels = input_shape.channels + + for idx, layer_channels in enumerate(conv_dims, 1): + module = Conv2d(in_channels, layer_channels, 3, stride=1, padding=1) + self.add_module("conv_fcn{}".format(idx), module) + self.add_module("conv_fcn_relu{}".format(idx), nn.ReLU()) + in_channels = layer_channels + + deconv_kernel = 4 + self.score_lowres = ConvTranspose2d( + in_channels, num_keypoints, deconv_kernel, stride=2, padding=deconv_kernel // 2 - 1 + ) + self.up_scale = up_scale + + for name, param in self.named_parameters(): + if "bias" in name: + nn.init.constant_(param, 0) + elif "weight" in name: + # Caffe2 implementation uses MSRAFill, which in fact + # corresponds to kaiming_normal_ in PyTorch + nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg, input_shape) + ret["input_shape"] = input_shape + ret["conv_dims"] = cfg.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS + return ret + + def layers(self, x): + for layer in self: + x = layer(x) + x = interpolate(x, scale_factor=self.up_scale, mode="bilinear", align_corners=False) + return x diff --git a/data_processing/detectron2/detectron2/modeling/roi_heads/mask_head.py b/data_processing/detectron2/detectron2/modeling/roi_heads/mask_head.py new file mode 100644 index 0000000..1eff8f7 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/roi_heads/mask_head.py @@ -0,0 +1,298 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import List +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, get_norm +from detectron2.layers.wrappers import move_device_like +from detectron2.structures import Instances +from detectron2.utils.events import get_event_storage +from detectron2.utils.registry import Registry + +__all__ = [ + "BaseMaskRCNNHead", + "MaskRCNNConvUpsampleHead", + "build_mask_head", + "ROI_MASK_HEAD_REGISTRY", +] + + +ROI_MASK_HEAD_REGISTRY = Registry("ROI_MASK_HEAD") +ROI_MASK_HEAD_REGISTRY.__doc__ = """ +Registry for mask heads, which predicts instance masks given +per-region features. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +@torch.jit.unused +def mask_rcnn_loss(pred_mask_logits: torch.Tensor, instances: List[Instances], vis_period: int = 0): + """ + Compute the mask prediction loss defined in the Mask R-CNN paper. + + Args: + pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) + for class-specific or class-agnostic, where B is the total number of predicted masks + in all images, C is the number of foreground classes, and Hmask, Wmask are the height + and width of the mask predictions. The values are logits. + instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. These instances are in 1:1 + correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask, + ...) associated with each instance are stored in fields. + vis_period (int): the period (in steps) to dump visualization. + + Returns: + mask_loss (Tensor): A scalar tensor containing the loss. + """ + cls_agnostic_mask = pred_mask_logits.size(1) == 1 + total_num_masks = pred_mask_logits.size(0) + mask_side_len = pred_mask_logits.size(2) + assert pred_mask_logits.size(2) == pred_mask_logits.size(3), "Mask prediction must be square!" + + gt_classes = [] + gt_masks = [] + for instances_per_image in instances: + if len(instances_per_image) == 0: + continue + if not cls_agnostic_mask: + gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64) + gt_classes.append(gt_classes_per_image) + + gt_masks_per_image = instances_per_image.gt_masks.crop_and_resize( + instances_per_image.proposal_boxes.tensor, mask_side_len + ).to(device=pred_mask_logits.device) + # A tensor of shape (N, M, M), N=#instances in the image; M=mask_side_len + gt_masks.append(gt_masks_per_image) + + if len(gt_masks) == 0: + return pred_mask_logits.sum() * 0 + + gt_masks = cat(gt_masks, dim=0) + + if cls_agnostic_mask: + pred_mask_logits = pred_mask_logits[:, 0] + else: + indices = torch.arange(total_num_masks) + gt_classes = cat(gt_classes, dim=0) + pred_mask_logits = pred_mask_logits[indices, gt_classes] + + if gt_masks.dtype == torch.bool: + gt_masks_bool = gt_masks + else: + # Here we allow gt_masks to be float as well (depend on the implementation of rasterize()) + gt_masks_bool = gt_masks > 0.5 + gt_masks = gt_masks.to(dtype=torch.float32) + + # Log the training accuracy (using gt classes and sigmoid(0.0) == 0.5 threshold) + mask_incorrect = (pred_mask_logits > 0.0) != gt_masks_bool + mask_accuracy = 1 - (mask_incorrect.sum().item() / max(mask_incorrect.numel(), 1.0)) + num_positive = gt_masks_bool.sum().item() + false_positive = (mask_incorrect & ~gt_masks_bool).sum().item() / max( + gt_masks_bool.numel() - num_positive, 1.0 + ) + false_negative = (mask_incorrect & gt_masks_bool).sum().item() / max(num_positive, 1.0) + + storage = get_event_storage() + storage.put_scalar("mask_rcnn/accuracy", mask_accuracy) + storage.put_scalar("mask_rcnn/false_positive", false_positive) + storage.put_scalar("mask_rcnn/false_negative", false_negative) + if vis_period > 0 and storage.iter % vis_period == 0: + pred_masks = pred_mask_logits.sigmoid() + vis_masks = torch.cat([pred_masks, gt_masks], axis=2) + name = "Left: mask prediction; Right: mask GT" + for idx, vis_mask in enumerate(vis_masks): + vis_mask = torch.stack([vis_mask] * 3, axis=0) + storage.put_image(name + f" ({idx})", vis_mask) + + mask_loss = F.binary_cross_entropy_with_logits(pred_mask_logits, gt_masks, reduction="mean") + return mask_loss + + +def mask_rcnn_inference(pred_mask_logits: torch.Tensor, pred_instances: List[Instances]): + """ + Convert pred_mask_logits to estimated foreground probability masks while also + extracting only the masks for the predicted classes in pred_instances. For each + predicted box, the mask of the same class is attached to the instance by adding a + new "pred_masks" field to pred_instances. + + Args: + pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) + for class-specific or class-agnostic, where B is the total number of predicted masks + in all images, C is the number of foreground classes, and Hmask, Wmask are the height + and width of the mask predictions. The values are logits. + pred_instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. Each Instances must have field "pred_classes". + + Returns: + None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask, + Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized) + masks the resolution predicted by the network; post-processing steps, such as resizing + the predicted masks to the original image resolution and/or binarizing them, is left + to the caller. + """ + cls_agnostic_mask = pred_mask_logits.size(1) == 1 + + if cls_agnostic_mask: + mask_probs_pred = pred_mask_logits.sigmoid() + else: + # Select masks corresponding to the predicted classes + num_masks = pred_mask_logits.shape[0] + class_pred = cat([i.pred_classes for i in pred_instances]) + device = ( + class_pred.device + if torch.jit.is_scripting() + else ("cpu" if torch.jit.is_tracing() else class_pred.device) + ) + indices = move_device_like(torch.arange(num_masks, device=device), class_pred) + mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid() + # mask_probs_pred.shape: (B, 1, Hmask, Wmask) + + num_boxes_per_image = [len(i) for i in pred_instances] + mask_probs_pred = mask_probs_pred.split(num_boxes_per_image, dim=0) + + for prob, instances in zip(mask_probs_pred, pred_instances): + instances.pred_masks = prob # (1, Hmask, Wmask) + + +class BaseMaskRCNNHead(nn.Module): + """ + Implement the basic Mask R-CNN losses and inference logic described in :paper:`Mask R-CNN` + """ + + @configurable + def __init__(self, *, loss_weight: float = 1.0, vis_period: int = 0): + """ + NOTE: this interface is experimental. + + Args: + loss_weight (float): multiplier of the loss + vis_period (int): visualization period + """ + super().__init__() + self.vis_period = vis_period + self.loss_weight = loss_weight + + @classmethod + def from_config(cls, cfg, input_shape): + return {"vis_period": cfg.VIS_PERIOD} + + def forward(self, x, instances: List[Instances]): + """ + Args: + x: input region feature(s) provided by :class:`ROIHeads`. + instances (list[Instances]): contains the boxes & labels corresponding + to the input features. + Exact format is up to its caller to decide. + Typically, this is the foreground instances in training, with + "proposal_boxes" field and other gt annotations. + In inference, it contains boxes that are already predicted. + + Returns: + A dict of losses in training. The predicted "instances" in inference. + """ + x = self.layers(x) + if self.training: + return {"loss_mask": mask_rcnn_loss(x, instances, self.vis_period) * self.loss_weight} + else: + mask_rcnn_inference(x, instances) + return instances + + def layers(self, x): + """ + Neural network layers that makes predictions from input features. + """ + raise NotImplementedError + + +# To get torchscript support, we make the head a subclass of `nn.Sequential`. +# Therefore, to add new layers in this head class, please make sure they are +# added in the order they will be used in forward(). +@ROI_MASK_HEAD_REGISTRY.register() +class MaskRCNNConvUpsampleHead(BaseMaskRCNNHead, nn.Sequential): + """ + A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`). + Predictions are made with a final 1x1 conv layer. + """ + + @configurable + def __init__(self, input_shape: ShapeSpec, *, num_classes, conv_dims, conv_norm="", **kwargs): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature + num_classes (int): the number of foreground classes (i.e. background is not + included). 1 if using class agnostic prediction. + conv_dims (list[int]): a list of N>0 integers representing the output dimensions + of N-1 conv layers and the last upsample layer. + conv_norm (str or callable): normalization for the conv layers. + See :func:`detectron2.layers.get_norm` for supported types. + """ + super().__init__(**kwargs) + assert len(conv_dims) >= 1, "conv_dims have to be non-empty!" + + self.conv_norm_relus = [] + + cur_channels = input_shape.channels + for k, conv_dim in enumerate(conv_dims[:-1]): + conv = Conv2d( + cur_channels, + conv_dim, + kernel_size=3, + stride=1, + padding=1, + bias=not conv_norm, + norm=get_norm(conv_norm, conv_dim), + activation=nn.ReLU(), + ) + self.add_module("mask_fcn{}".format(k + 1), conv) + self.conv_norm_relus.append(conv) + cur_channels = conv_dim + + self.deconv = ConvTranspose2d( + cur_channels, conv_dims[-1], kernel_size=2, stride=2, padding=0 + ) + self.add_module("deconv_relu", nn.ReLU()) + cur_channels = conv_dims[-1] + + self.predictor = Conv2d(cur_channels, num_classes, kernel_size=1, stride=1, padding=0) + + for layer in self.conv_norm_relus + [self.deconv]: + weight_init.c2_msra_fill(layer) + # use normal distribution initialization for mask prediction layer + nn.init.normal_(self.predictor.weight, std=0.001) + if self.predictor.bias is not None: + nn.init.constant_(self.predictor.bias, 0) + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg, input_shape) + conv_dim = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM + num_conv = cfg.MODEL.ROI_MASK_HEAD.NUM_CONV + ret.update( + conv_dims=[conv_dim] * (num_conv + 1), # +1 for ConvTranspose + conv_norm=cfg.MODEL.ROI_MASK_HEAD.NORM, + input_shape=input_shape, + ) + if cfg.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK: + ret["num_classes"] = 1 + else: + ret["num_classes"] = cfg.MODEL.ROI_HEADS.NUM_CLASSES + return ret + + def layers(self, x): + for layer in self: + x = layer(x) + return x + + +def build_mask_head(cfg, input_shape): + """ + Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_MASK_HEAD.NAME + return ROI_MASK_HEAD_REGISTRY.get(name)(cfg, input_shape) diff --git a/data_processing/detectron2/detectron2/modeling/roi_heads/roi_heads.py b/data_processing/detectron2/detectron2/modeling/roi_heads/roi_heads.py new file mode 100644 index 0000000..13dd57a --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/roi_heads/roi_heads.py @@ -0,0 +1,877 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import inspect +import logging +import numpy as np +from typing import Dict, List, Optional, Tuple +import torch +from torch import nn + +from detectron2.config import configurable +from detectron2.layers import ShapeSpec, nonzero_tuple +from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou +from detectron2.utils.events import get_event_storage +from detectron2.utils.registry import Registry + +from ..backbone.resnet import BottleneckBlock, ResNet +from ..matcher import Matcher +from ..poolers import ROIPooler +from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals +from ..sampling import subsample_labels +from .box_head import build_box_head +from .fast_rcnn import FastRCNNOutputLayers +from .keypoint_head import build_keypoint_head +from .mask_head import build_mask_head + +ROI_HEADS_REGISTRY = Registry("ROI_HEADS") +ROI_HEADS_REGISTRY.__doc__ = """ +Registry for ROI heads in a generalized R-CNN model. +ROIHeads take feature maps and region proposals, and +perform per-region computation. + +The registered object will be called with `obj(cfg, input_shape)`. +The call is expected to return an :class:`ROIHeads`. +""" + +logger = logging.getLogger(__name__) + + +def build_roi_heads(cfg, input_shape): + """ + Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`. + """ + name = cfg.MODEL.ROI_HEADS.NAME + return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape) + + +def select_foreground_proposals( + proposals: List[Instances], bg_label: int +) -> Tuple[List[Instances], List[torch.Tensor]]: + """ + Given a list of N Instances (for N images), each containing a `gt_classes` field, + return a list of Instances that contain only instances with `gt_classes != -1 && + gt_classes != bg_label`. + + Args: + proposals (list[Instances]): A list of N Instances, where N is the number of + images in the batch. + bg_label: label index of background class. + + Returns: + list[Instances]: N Instances, each contains only the selected foreground instances. + list[Tensor]: N boolean vector, correspond to the selection mask of + each Instances object. True for selected instances. + """ + assert isinstance(proposals, (list, tuple)) + assert isinstance(proposals[0], Instances) + assert proposals[0].has("gt_classes") + fg_proposals = [] + fg_selection_masks = [] + for proposals_per_image in proposals: + gt_classes = proposals_per_image.gt_classes + fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label) + fg_idxs = fg_selection_mask.nonzero().squeeze(1) + fg_proposals.append(proposals_per_image[fg_idxs]) + fg_selection_masks.append(fg_selection_mask) + return fg_proposals, fg_selection_masks + + +def select_proposals_with_visible_keypoints(proposals: List[Instances]) -> List[Instances]: + """ + Args: + proposals (list[Instances]): a list of N Instances, where N is the + number of images. + + Returns: + proposals: only contains proposals with at least one visible keypoint. + + Note that this is still slightly different from Detectron. + In Detectron, proposals for training keypoint head are re-sampled from + all the proposals with IOU>threshold & >=1 visible keypoint. + + Here, the proposals are first sampled from all proposals with + IOU>threshold, then proposals with no visible keypoint are filtered out. + This strategy seems to make no difference on Detectron and is easier to implement. + """ + ret = [] + all_num_fg = [] + for proposals_per_image in proposals: + # If empty/unannotated image (hard negatives), skip filtering for train + if len(proposals_per_image) == 0: + ret.append(proposals_per_image) + continue + gt_keypoints = proposals_per_image.gt_keypoints.tensor + # #fg x K x 3 + vis_mask = gt_keypoints[:, :, 2] >= 1 + xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1] + proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4 + kp_in_box = ( + (xs >= proposal_boxes[:, :, 0]) + & (xs <= proposal_boxes[:, :, 2]) + & (ys >= proposal_boxes[:, :, 1]) + & (ys <= proposal_boxes[:, :, 3]) + ) + selection = (kp_in_box & vis_mask).any(dim=1) + selection_idxs = nonzero_tuple(selection)[0] + all_num_fg.append(selection_idxs.numel()) + ret.append(proposals_per_image[selection_idxs]) + + storage = get_event_storage() + storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg)) + return ret + + +class ROIHeads(torch.nn.Module): + """ + ROIHeads perform all per-region computation in an R-CNN. + + It typically contains logic to + + 1. (in training only) match proposals with ground truth and sample them + 2. crop the regions and extract per-region features using proposals + 3. make per-region predictions with different heads + + It can have many variants, implemented as subclasses of this class. + This base class contains the logic to match/sample proposals. + But it is not necessary to inherit this class if the sampling logic is not needed. + """ + + @configurable + def __init__( + self, + *, + num_classes, + batch_size_per_image, + positive_fraction, + proposal_matcher, + proposal_append_gt=True, + ): + """ + NOTE: this interface is experimental. + + Args: + num_classes (int): number of foreground classes (i.e. background is not included) + batch_size_per_image (int): number of proposals to sample for training + positive_fraction (float): fraction of positive (foreground) proposals + to sample for training. + proposal_matcher (Matcher): matcher that matches proposals and ground truth + proposal_append_gt (bool): whether to include ground truth as proposals as well + """ + super().__init__() + self.batch_size_per_image = batch_size_per_image + self.positive_fraction = positive_fraction + self.num_classes = num_classes + self.proposal_matcher = proposal_matcher + self.proposal_append_gt = proposal_append_gt + + @classmethod + def from_config(cls, cfg): + return { + "batch_size_per_image": cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, + "positive_fraction": cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION, + "num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES, + "proposal_append_gt": cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT, + # Matcher to assign box proposals to gt boxes + "proposal_matcher": Matcher( + cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS, + cfg.MODEL.ROI_HEADS.IOU_LABELS, + allow_low_quality_matches=False, + ), + } + + def _sample_proposals( + self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Based on the matching between N proposals and M groundtruth, + sample the proposals and set their classification labels. + + Args: + matched_idxs (Tensor): a vector of length N, each is the best-matched + gt index in [0, M) for each proposal. + matched_labels (Tensor): a vector of length N, the matcher's label + (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal. + gt_classes (Tensor): a vector of length M. + + Returns: + Tensor: a vector of indices of sampled proposals. Each is in [0, N). + Tensor: a vector of the same length, the classification label for + each sampled proposal. Each sample is labeled as either a category in + [0, num_classes) or the background (num_classes). + """ + has_gt = gt_classes.numel() > 0 + # Get the corresponding GT for each proposal + if has_gt: + gt_classes = gt_classes[matched_idxs] + # Label unmatched proposals (0 label from matcher) as background (label=num_classes) + gt_classes[matched_labels == 0] = self.num_classes + # Label ignore proposals (-1 label) + gt_classes[matched_labels == -1] = -1 + else: + gt_classes = torch.zeros_like(matched_idxs) + self.num_classes + + sampled_fg_idxs, sampled_bg_idxs = subsample_labels( + gt_classes, self.batch_size_per_image, self.positive_fraction, self.num_classes + ) + + sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0) + return sampled_idxs, gt_classes[sampled_idxs] + + @torch.no_grad() + def label_and_sample_proposals( + self, proposals: List[Instances], targets: List[Instances] + ) -> List[Instances]: + """ + Prepare some proposals to be used to train the ROI heads. + It performs box matching between `proposals` and `targets`, and assigns + training labels to the proposals. + It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth + boxes, with a fraction of positives that is no larger than + ``self.positive_fraction``. + + Args: + See :meth:`ROIHeads.forward` + + Returns: + list[Instances]: + length `N` list of `Instances`s containing the proposals + sampled for training. Each `Instances` has the following fields: + + - proposal_boxes: the proposal boxes + - gt_boxes: the ground-truth box that the proposal is assigned to + (this is only meaningful if the proposal has a label > 0; if label = 0 + then the ground-truth box is random) + + Other fields such as "gt_classes", "gt_masks", that's included in `targets`. + """ + # Augment proposals with ground-truth boxes. + # In the case of learned proposals (e.g., RPN), when training starts + # the proposals will be low quality due to random initialization. + # It's possible that none of these initial + # proposals have high enough overlap with the gt objects to be used + # as positive examples for the second stage components (box head, + # cls head, mask head). Adding the gt boxes to the set of proposals + # ensures that the second stage components will have some positive + # examples from the start of training. For RPN, this augmentation improves + # convergence and empirically improves box AP on COCO by about 0.5 + # points (under one tested configuration). + if self.proposal_append_gt: + proposals = add_ground_truth_to_proposals(targets, proposals) + + proposals_with_gt = [] + + num_fg_samples = [] + num_bg_samples = [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + has_gt = len(targets_per_image) > 0 + match_quality_matrix = pairwise_iou( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix) + sampled_idxs, gt_classes = self._sample_proposals( + matched_idxs, matched_labels, targets_per_image.gt_classes + ) + + # Set target attributes of the sampled proposals: + proposals_per_image = proposals_per_image[sampled_idxs] + proposals_per_image.gt_classes = gt_classes + + if has_gt: + sampled_targets = matched_idxs[sampled_idxs] + # We index all the attributes of targets that start with "gt_" + # and have not been added to proposals yet (="gt_classes"). + # NOTE: here the indexing waste some compute, because heads + # like masks, keypoints, etc, will filter the proposals again, + # (by foreground/background, or number of keypoints in the image, etc) + # so we essentially index the data twice. + for (trg_name, trg_value) in targets_per_image.get_fields().items(): + if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name): + proposals_per_image.set(trg_name, trg_value[sampled_targets]) + # If no GT is given in the image, we don't know what a dummy gt value can be. + # Therefore the returned proposals won't have any gt_* fields, except for a + # gt_classes full of background label. + + num_bg_samples.append((gt_classes == self.num_classes).sum().item()) + num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) + proposals_with_gt.append(proposals_per_image) + + # Log the number of fg/bg samples that are selected for training ROI heads + storage = get_event_storage() + storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) + storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) + + return proposals_with_gt + + def forward( + self, + images: ImageList, + features: Dict[str, torch.Tensor], + proposals: List[Instances], + targets: Optional[List[Instances]] = None, + ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]: + """ + Args: + images (ImageList): + features (dict[str,Tensor]): input data as a mapping from feature + map name to tensor. Axis 0 represents the number of images `N` in + the input data; axes 1-3 are channels, height, and width, which may + vary between feature maps (e.g., if a feature pyramid is used). + proposals (list[Instances]): length `N` list of `Instances`. The i-th + `Instances` contains object proposals for the i-th input image, + with fields "proposal_boxes" and "objectness_logits". + targets (list[Instances], optional): length `N` list of `Instances`. The i-th + `Instances` contains the ground-truth per-instance annotations + for the i-th input image. Specify `targets` during training only. + It may have the following fields: + + - gt_boxes: the bounding box of each instance. + - gt_classes: the label for each instance with a category ranging in [0, #class]. + - gt_masks: PolygonMasks or BitMasks, the ground-truth masks of each instance. + - gt_keypoints: NxKx3, the groud-truth keypoints for each instance. + + Returns: + list[Instances]: length `N` list of `Instances` containing the + detected instances. Returned during inference only; may be [] during training. + + dict[str->Tensor]: + mapping from a named loss to a tensor storing the loss. Used during training only. + """ + raise NotImplementedError() + + +@ROI_HEADS_REGISTRY.register() +class Res5ROIHeads(ROIHeads): + """ + The ROIHeads in a typical "C4" R-CNN model, where + the box and mask head share the cropping and + the per-region feature computation by a Res5 block. + See :paper:`ResNet` Appendix A. + """ + + @configurable + def __init__( + self, + *, + in_features: List[str], + pooler: ROIPooler, + res5: nn.Module, + box_predictor: nn.Module, + mask_head: Optional[nn.Module] = None, + **kwargs, + ): + """ + NOTE: this interface is experimental. + + Args: + in_features (list[str]): list of backbone feature map names to use for + feature extraction + pooler (ROIPooler): pooler to extra region features from backbone + res5 (nn.Sequential): a CNN to compute per-region features, to be used by + ``box_predictor`` and ``mask_head``. Typically this is a "res5" + block from a ResNet. + box_predictor (nn.Module): make box predictions from the feature. + Should have the same interface as :class:`FastRCNNOutputLayers`. + mask_head (nn.Module): transform features to make mask predictions + """ + super().__init__(**kwargs) + self.in_features = in_features + self.pooler = pooler + if isinstance(res5, (list, tuple)): + res5 = nn.Sequential(*res5) + self.res5 = res5 + self.box_predictor = box_predictor + self.mask_on = mask_head is not None + if self.mask_on: + self.mask_head = mask_head + + @classmethod + def from_config(cls, cfg, input_shape): + # fmt: off + ret = super().from_config(cfg) + in_features = ret["in_features"] = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + pooler_scales = (1.0 / input_shape[in_features[0]].stride, ) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + mask_on = cfg.MODEL.MASK_ON + # fmt: on + assert not cfg.MODEL.KEYPOINT_ON + assert len(in_features) == 1 + + ret["pooler"] = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + + # Compatbility with old moco code. Might be useful. + # See notes in StandardROIHeads.from_config + if not inspect.ismethod(cls._build_res5_block): + logger.warning( + "The behavior of _build_res5_block may change. " + "Please do not depend on private methods." + ) + cls._build_res5_block = classmethod(cls._build_res5_block) + + ret["res5"], out_channels = cls._build_res5_block(cfg) + ret["box_predictor"] = FastRCNNOutputLayers( + cfg, ShapeSpec(channels=out_channels, height=1, width=1) + ) + + if mask_on: + ret["mask_head"] = build_mask_head( + cfg, + ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution), + ) + return ret + + @classmethod + def _build_res5_block(cls, cfg): + # fmt: off + stage_channel_factor = 2 ** 3 # res5 is 8x res2 + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group * stage_channel_factor + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + norm = cfg.MODEL.RESNETS.NORM + assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \ + "Deformable conv is not yet supported in res5 head." + # fmt: on + + blocks = ResNet.make_stage( + BottleneckBlock, + 3, + stride_per_block=[2, 1, 1], + in_channels=out_channels // 2, + bottleneck_channels=bottleneck_channels, + out_channels=out_channels, + num_groups=num_groups, + norm=norm, + stride_in_1x1=stride_in_1x1, + ) + return nn.Sequential(*blocks), out_channels + + def _shared_roi_transform(self, features: List[torch.Tensor], boxes: List[Boxes]): + x = self.pooler(features, boxes) + return self.res5(x) + + def forward( + self, + images: ImageList, + features: Dict[str, torch.Tensor], + proposals: List[Instances], + targets: Optional[List[Instances]] = None, + ): + """ + See :meth:`ROIHeads.forward`. + """ + del images + + if self.training: + assert targets + proposals = self.label_and_sample_proposals(proposals, targets) + del targets + + proposal_boxes = [x.proposal_boxes for x in proposals] + box_features = self._shared_roi_transform( + [features[f] for f in self.in_features], proposal_boxes + ) + predictions = self.box_predictor(box_features.mean(dim=[2, 3])) + + if self.training: + del features + losses = self.box_predictor.losses(predictions, proposals) + if self.mask_on: + proposals, fg_selection_masks = select_foreground_proposals( + proposals, self.num_classes + ) + # Since the ROI feature transform is shared between boxes and masks, + # we don't need to recompute features. The mask loss is only defined + # on foreground proposals, so we need to select out the foreground + # features. + mask_features = box_features[torch.cat(fg_selection_masks, dim=0)] + del box_features + losses.update(self.mask_head(mask_features, proposals)) + return [], losses + else: + pred_instances, _ = self.box_predictor.inference(predictions, proposals) + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def forward_with_given_boxes( + self, features: Dict[str, torch.Tensor], instances: List[Instances] + ) -> List[Instances]: + """ + Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. + + Args: + features: same as in `forward()` + instances (list[Instances]): instances to predict other outputs. Expect the keys + "pred_boxes" and "pred_classes" to exist. + + Returns: + instances (Instances): + the same `Instances` object, with extra + fields such as `pred_masks` or `pred_keypoints`. + """ + assert not self.training + assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") + + if self.mask_on: + feature_list = [features[f] for f in self.in_features] + x = self._shared_roi_transform(feature_list, [x.pred_boxes for x in instances]) + return self.mask_head(x, instances) + else: + return instances + + +@ROI_HEADS_REGISTRY.register() +class StandardROIHeads(ROIHeads): + """ + It's "standard" in a sense that there is no ROI transform sharing + or feature sharing between tasks. + Each head independently processes the input features by each head's + own pooler and head. + + This class is used by most models, such as FPN and C5. + To implement more models, you can subclass it and implement a different + :meth:`forward()` or a head. + """ + + @configurable + def __init__( + self, + *, + box_in_features: List[str], + box_pooler: ROIPooler, + box_head: nn.Module, + box_predictor: nn.Module, + mask_in_features: Optional[List[str]] = None, + mask_pooler: Optional[ROIPooler] = None, + mask_head: Optional[nn.Module] = None, + keypoint_in_features: Optional[List[str]] = None, + keypoint_pooler: Optional[ROIPooler] = None, + keypoint_head: Optional[nn.Module] = None, + train_on_pred_boxes: bool = False, + **kwargs, + ): + """ + NOTE: this interface is experimental. + + Args: + box_in_features (list[str]): list of feature names to use for the box head. + box_pooler (ROIPooler): pooler to extra region features for box head + box_head (nn.Module): transform features to make box predictions + box_predictor (nn.Module): make box predictions from the feature. + Should have the same interface as :class:`FastRCNNOutputLayers`. + mask_in_features (list[str]): list of feature names to use for the mask + pooler or mask head. None if not using mask head. + mask_pooler (ROIPooler): pooler to extract region features from image features. + The mask head will then take region features to make predictions. + If None, the mask head will directly take the dict of image features + defined by `mask_in_features` + mask_head (nn.Module): transform features to make mask predictions + keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask_*``. + train_on_pred_boxes (bool): whether to use proposal boxes or + predicted boxes from the box head to train other heads. + """ + super().__init__(**kwargs) + # keep self.in_features for backward compatibility + self.in_features = self.box_in_features = box_in_features + self.box_pooler = box_pooler + self.box_head = box_head + self.box_predictor = box_predictor + + self.mask_on = mask_in_features is not None + if self.mask_on: + self.mask_in_features = mask_in_features + self.mask_pooler = mask_pooler + self.mask_head = mask_head + + self.keypoint_on = keypoint_in_features is not None + if self.keypoint_on: + self.keypoint_in_features = keypoint_in_features + self.keypoint_pooler = keypoint_pooler + self.keypoint_head = keypoint_head + + self.train_on_pred_boxes = train_on_pred_boxes + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg) + ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES + # Subclasses that have not been updated to use from_config style construction + # may have overridden _init_*_head methods. In this case, those overridden methods + # will not be classmethods and we need to avoid trying to call them here. + # We test for this with ismethod which only returns True for bound methods of cls. + # Such subclasses will need to handle calling their overridden _init_*_head methods. + if inspect.ismethod(cls._init_box_head): + ret.update(cls._init_box_head(cfg, input_shape)) + if inspect.ismethod(cls._init_mask_head): + ret.update(cls._init_mask_head(cfg, input_shape)) + if inspect.ismethod(cls._init_keypoint_head): + ret.update(cls._init_keypoint_head(cfg, input_shape)) + return ret + + @classmethod + def _init_box_head(cls, cfg, input_shape): + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + # fmt: on + + # If StandardROIHeads is applied on multiple feature maps (as in FPN), + # then we share the same predictors and therefore the channel counts must be the same + in_channels = [input_shape[f].channels for f in in_features] + # Check all channel counts are equal + assert len(set(in_channels)) == 1, in_channels + in_channels = in_channels[0] + + box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + # Here we split "box head" and "box predictor", which is mainly due to historical reasons. + # They are used together so the "box predictor" layers should be part of the "box head". + # New subclasses of ROIHeads do not need "box predictor"s. + box_head = build_box_head( + cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) + ) + box_predictor = FastRCNNOutputLayers(cfg, box_head.output_shape) + return { + "box_in_features": in_features, + "box_pooler": box_pooler, + "box_head": box_head, + "box_predictor": box_predictor, + } + + @classmethod + def _init_mask_head(cls, cfg, input_shape): + if not cfg.MODEL.MASK_ON: + return {} + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE + # fmt: on + + in_channels = [input_shape[f].channels for f in in_features][0] + + ret = {"mask_in_features": in_features} + ret["mask_pooler"] = ( + ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + if pooler_type + else None + ) + if pooler_type: + shape = ShapeSpec( + channels=in_channels, width=pooler_resolution, height=pooler_resolution + ) + else: + shape = {f: input_shape[f] for f in in_features} + ret["mask_head"] = build_mask_head(cfg, shape) + return ret + + @classmethod + def _init_keypoint_head(cls, cfg, input_shape): + if not cfg.MODEL.KEYPOINT_ON: + return {} + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) # noqa + sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE + # fmt: on + + in_channels = [input_shape[f].channels for f in in_features][0] + + ret = {"keypoint_in_features": in_features} + ret["keypoint_pooler"] = ( + ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + if pooler_type + else None + ) + if pooler_type: + shape = ShapeSpec( + channels=in_channels, width=pooler_resolution, height=pooler_resolution + ) + else: + shape = {f: input_shape[f] for f in in_features} + ret["keypoint_head"] = build_keypoint_head(cfg, shape) + return ret + + def forward( + self, + images: ImageList, + features: Dict[str, torch.Tensor], + proposals: List[Instances], + targets: Optional[List[Instances]] = None, + ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]: + """ + See :class:`ROIHeads.forward`. + """ + del images + if self.training: + assert targets, "'targets' argument is required during training" + proposals = self.label_and_sample_proposals(proposals, targets) + del targets + + if self.training: + losses = self._forward_box(features, proposals) + # Usually the original proposals used by the box head are used by the mask, keypoint + # heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes + # predicted by the box head. + losses.update(self._forward_mask(features, proposals)) + losses.update(self._forward_keypoint(features, proposals)) + return proposals, losses + else: + pred_instances = self._forward_box(features, proposals) + # During inference cascaded prediction is used: the mask and keypoints heads are only + # applied to the top scoring box detections. + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def forward_with_given_boxes( + self, features: Dict[str, torch.Tensor], instances: List[Instances] + ) -> List[Instances]: + """ + Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. + + This is useful for downstream tasks where a box is known, but need to obtain + other attributes (outputs of other heads). + Test-time augmentation also uses this. + + Args: + features: same as in `forward()` + instances (list[Instances]): instances to predict other outputs. Expect the keys + "pred_boxes" and "pred_classes" to exist. + + Returns: + list[Instances]: + the same `Instances` objects, with extra + fields such as `pred_masks` or `pred_keypoints`. + """ + assert not self.training + assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") + + instances = self._forward_mask(features, instances) + instances = self._forward_keypoint(features, instances) + return instances + + def _forward_box(self, features: Dict[str, torch.Tensor], proposals: List[Instances]): + """ + Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`, + the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument. + + Args: + features (dict[str, Tensor]): mapping from feature map names to tensor. + Same as in :meth:`ROIHeads.forward`. + proposals (list[Instances]): the per-image object proposals with + their matching ground truth. + Each has fields "proposal_boxes", and "objectness_logits", + "gt_classes", "gt_boxes". + + Returns: + In training, a dict of losses. + In inference, a list of `Instances`, the predicted instances. + """ + features = [features[f] for f in self.box_in_features] + box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) + box_features = self.box_head(box_features) + predictions = self.box_predictor(box_features) + del box_features + + if self.training: + losses = self.box_predictor.losses(predictions, proposals) + # proposals is modified in-place below, so losses must be computed first. + if self.train_on_pred_boxes: + with torch.no_grad(): + pred_boxes = self.box_predictor.predict_boxes_for_gt_classes( + predictions, proposals + ) + for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes): + proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image) + return losses + else: + pred_instances, _ = self.box_predictor.inference(predictions, proposals) + return pred_instances + + def _forward_mask(self, features: Dict[str, torch.Tensor], instances: List[Instances]): + """ + Forward logic of the mask prediction branch. + + Args: + features (dict[str, Tensor]): mapping from feature map names to tensor. + Same as in :meth:`ROIHeads.forward`. + instances (list[Instances]): the per-image instances to train/predict masks. + In training, they can be the proposals. + In inference, they can be the boxes predicted by R-CNN box head. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "pred_masks" and return it. + """ + if not self.mask_on: + return {} if self.training else instances + + if self.training: + # head is only trained on positive proposals. + instances, _ = select_foreground_proposals(instances, self.num_classes) + + if self.mask_pooler is not None: + features = [features[f] for f in self.mask_in_features] + boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances] + features = self.mask_pooler(features, boxes) + else: + features = {f: features[f] for f in self.mask_in_features} + return self.mask_head(features, instances) + + def _forward_keypoint(self, features: Dict[str, torch.Tensor], instances: List[Instances]): + """ + Forward logic of the keypoint prediction branch. + + Args: + features (dict[str, Tensor]): mapping from feature map names to tensor. + Same as in :meth:`ROIHeads.forward`. + instances (list[Instances]): the per-image instances to train/predict keypoints. + In training, they can be the proposals. + In inference, they can be the boxes predicted by R-CNN box head. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "pred_keypoints" and return it. + """ + if not self.keypoint_on: + return {} if self.training else instances + + if self.training: + # head is only trained on positive proposals with >=1 visible keypoints. + instances, _ = select_foreground_proposals(instances, self.num_classes) + instances = select_proposals_with_visible_keypoints(instances) + + if self.keypoint_pooler is not None: + features = [features[f] for f in self.keypoint_in_features] + boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances] + features = self.keypoint_pooler(features, boxes) + else: + features = {f: features[f] for f in self.keypoint_in_features} + return self.keypoint_head(features, instances) diff --git a/data_processing/detectron2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py b/data_processing/detectron2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py new file mode 100644 index 0000000..1e7bfab --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py @@ -0,0 +1,271 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +import torch + +from detectron2.config import configurable +from detectron2.layers import ShapeSpec, batched_nms_rotated +from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated +from detectron2.utils.events import get_event_storage + +from ..box_regression import Box2BoxTransformRotated +from ..poolers import ROIPooler +from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals +from .box_head import build_box_head +from .fast_rcnn import FastRCNNOutputLayers +from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads + +logger = logging.getLogger(__name__) + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + R: number of ROIs, combined over all images, in the minibatch + Ri: number of ROIs in image i + K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. + +Naming convention: + + deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box + transform (see :class:`box_regression.Box2BoxTransformRotated`). + + pred_class_logits: predicted class scores in [-inf, +inf]; use + softmax(pred_class_logits) to estimate P(class). + + gt_classes: ground-truth classification labels in [0, K], where [0, K) represent + foreground object classes and K represents the background class. + + pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals + to detection box predictions. + + gt_proposal_deltas: ground-truth rotated box2box transform deltas +""" + + +def fast_rcnn_inference_rotated( + boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image +): + """ + Call `fast_rcnn_inference_single_image_rotated` for all images. + + Args: + boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic + boxes for each image. Element i has shape (Ri, K * 5) if doing + class-specific regression, or (Ri, 5) if doing class-agnostic + regression, where Ri is the number of predicted objects for image i. + This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. + scores (list[Tensor]): A list of Tensors of predicted class scores for each image. + Element i has shape (Ri, K + 1), where Ri is the number of predicted objects + for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. + image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. + score_thresh (float): Only return detections with a confidence score exceeding this + threshold. + nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. + topk_per_image (int): The number of top scoring detections to return. Set < 0 to return + all detections. + + Returns: + instances: (list[Instances]): A list of N instances, one for each image in the batch, + that stores the topk most confidence detections. + kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates + the corresponding boxes/scores index in [0, Ri) from the input, for image i. + """ + result_per_image = [ + fast_rcnn_inference_single_image_rotated( + boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image + ) + for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) + ] + return [x[0] for x in result_per_image], [x[1] for x in result_per_image] + + +@torch.no_grad() +def fast_rcnn_inference_single_image_rotated( + boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image +): + """ + Single-image inference. Return rotated bounding-box detection results by thresholding + on scores and applying rotated non-maximum suppression (Rotated NMS). + + Args: + Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes + per image. + + Returns: + Same as `fast_rcnn_inference_rotated`, but for only one image. + """ + valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) + if not valid_mask.all(): + boxes = boxes[valid_mask] + scores = scores[valid_mask] + + B = 5 # box dimension + scores = scores[:, :-1] + num_bbox_reg_classes = boxes.shape[1] // B + # Convert to Boxes to use the `clip` function ... + boxes = RotatedBoxes(boxes.reshape(-1, B)) + boxes.clip(image_shape) + boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B + # Filter results based on detection scores + filter_mask = scores > score_thresh # R x K + # R' x 2. First column contains indices of the R predictions; + # Second column contains indices of classes. + filter_inds = filter_mask.nonzero() + if num_bbox_reg_classes == 1: + boxes = boxes[filter_inds[:, 0], 0] + else: + boxes = boxes[filter_mask] + scores = scores[filter_mask] + + # Apply per-class Rotated NMS + keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh) + if topk_per_image >= 0: + keep = keep[:topk_per_image] + boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] + + result = Instances(image_shape) + result.pred_boxes = RotatedBoxes(boxes) + result.scores = scores + result.pred_classes = filter_inds[:, 1] + + return result, filter_inds[:, 0] + + +class RotatedFastRCNNOutputLayers(FastRCNNOutputLayers): + """ + Two linear layers for predicting Rotated Fast R-CNN outputs. + """ + + @classmethod + def from_config(cls, cfg, input_shape): + args = super().from_config(cfg, input_shape) + args["box2box_transform"] = Box2BoxTransformRotated( + weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS + ) + return args + + def inference(self, predictions, proposals): + """ + Returns: + list[Instances]: same as `fast_rcnn_inference_rotated`. + list[Tensor]: same as `fast_rcnn_inference_rotated`. + """ + boxes = self.predict_boxes(predictions, proposals) + scores = self.predict_probs(predictions, proposals) + image_shapes = [x.image_size for x in proposals] + + return fast_rcnn_inference_rotated( + boxes, + scores, + image_shapes, + self.test_score_thresh, + self.test_nms_thresh, + self.test_topk_per_image, + ) + + +@ROI_HEADS_REGISTRY.register() +class RROIHeads(StandardROIHeads): + """ + This class is used by Rotated Fast R-CNN to detect rotated boxes. + For now, it only supports box predictions but not mask or keypoints. + """ + + @configurable + def __init__(self, **kwargs): + """ + NOTE: this interface is experimental. + """ + super().__init__(**kwargs) + assert ( + not self.mask_on and not self.keypoint_on + ), "Mask/Keypoints not supported in Rotated ROIHeads." + assert not self.train_on_pred_boxes, "train_on_pred_boxes not implemented for RROIHeads!" + + @classmethod + def _init_box_head(cls, cfg, input_shape): + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + # fmt: on + assert pooler_type in ["ROIAlignRotated"], pooler_type + # assume all channel counts are equal + in_channels = [input_shape[f].channels for f in in_features][0] + + box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + box_head = build_box_head( + cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) + ) + # This line is the only difference v.s. StandardROIHeads + box_predictor = RotatedFastRCNNOutputLayers(cfg, box_head.output_shape) + return { + "box_in_features": in_features, + "box_pooler": box_pooler, + "box_head": box_head, + "box_predictor": box_predictor, + } + + @torch.no_grad() + def label_and_sample_proposals(self, proposals, targets): + """ + Prepare some proposals to be used to train the RROI heads. + It performs box matching between `proposals` and `targets`, and assigns + training labels to the proposals. + It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes, + with a fraction of positives that is no larger than `self.positive_sample_fraction. + + Args: + See :meth:`StandardROIHeads.forward` + + Returns: + list[Instances]: length `N` list of `Instances`s containing the proposals + sampled for training. Each `Instances` has the following fields: + - proposal_boxes: the rotated proposal boxes + - gt_boxes: the ground-truth rotated boxes that the proposal is assigned to + (this is only meaningful if the proposal has a label > 0; if label = 0 + then the ground-truth box is random) + - gt_classes: the ground-truth classification lable for each proposal + """ + if self.proposal_append_gt: + proposals = add_ground_truth_to_proposals(targets, proposals) + + proposals_with_gt = [] + + num_fg_samples = [] + num_bg_samples = [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + has_gt = len(targets_per_image) > 0 + match_quality_matrix = pairwise_iou_rotated( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix) + sampled_idxs, gt_classes = self._sample_proposals( + matched_idxs, matched_labels, targets_per_image.gt_classes + ) + + proposals_per_image = proposals_per_image[sampled_idxs] + proposals_per_image.gt_classes = gt_classes + + if has_gt: + sampled_targets = matched_idxs[sampled_idxs] + proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets] + + num_bg_samples.append((gt_classes == self.num_classes).sum().item()) + num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) + proposals_with_gt.append(proposals_per_image) + + # Log the number of fg/bg samples that are selected for training ROI heads + storage = get_event_storage() + storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) + storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) + + return proposals_with_gt diff --git a/data_processing/detectron2/detectron2/modeling/sampling.py b/data_processing/detectron2/detectron2/modeling/sampling.py new file mode 100644 index 0000000..a2d0f66 --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/sampling.py @@ -0,0 +1,54 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch + +from detectron2.layers import nonzero_tuple + +__all__ = ["subsample_labels"] + + +def subsample_labels( + labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int +): + """ + Return `num_samples` (or fewer, if not enough found) + random samples from `labels` which is a mixture of positives & negatives. + It will try to return as many positives as possible without + exceeding `positive_fraction * num_samples`, and then try to + fill the remaining slots with negatives. + + Args: + labels (Tensor): (N, ) label vector with values: + * -1: ignore + * bg_label: background ("negative") class + * otherwise: one or more foreground ("positive") classes + num_samples (int): The total number of labels with value >= 0 to return. + Values that are not sampled will be filled with -1 (ignore). + positive_fraction (float): The number of subsampled labels with values > 0 + is `min(num_positives, int(positive_fraction * num_samples))`. The number + of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. + In order words, if there are not enough positives, the sample is filled with + negatives. If there are also not enough negatives, then as many elements are + sampled as is possible. + bg_label (int): label index of background ("negative") class. + + Returns: + pos_idx, neg_idx (Tensor): + 1D vector of indices. The total length of both is `num_samples` or fewer. + """ + positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0] + negative = nonzero_tuple(labels == bg_label)[0] + + num_pos = int(num_samples * positive_fraction) + # protect against not enough positive examples + num_pos = min(positive.numel(), num_pos) + num_neg = num_samples - num_pos + # protect against not enough negative examples + num_neg = min(negative.numel(), num_neg) + + # randomly select positive and negative examples + perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] + perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] + + pos_idx = positive[perm1] + neg_idx = negative[perm2] + return pos_idx, neg_idx diff --git a/data_processing/detectron2/detectron2/modeling/test_time_augmentation.py b/data_processing/detectron2/detectron2/modeling/test_time_augmentation.py new file mode 100644 index 0000000..373e6bf --- /dev/null +++ b/data_processing/detectron2/detectron2/modeling/test_time_augmentation.py @@ -0,0 +1,307 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import numpy as np +from contextlib import contextmanager +from itertools import count +from typing import List +import torch +from fvcore.transforms import HFlipTransform, NoOpTransform +from torch import nn +from torch.nn.parallel import DistributedDataParallel + +from detectron2.config import configurable +from detectron2.data.detection_utils import read_image +from detectron2.data.transforms import ( + RandomFlip, + ResizeShortestEdge, + ResizeTransform, + apply_augmentations, +) +from detectron2.structures import Boxes, Instances + +from .meta_arch import GeneralizedRCNN +from .postprocessing import detector_postprocess +from .roi_heads.fast_rcnn import fast_rcnn_inference_single_image + +__all__ = ["DatasetMapperTTA", "GeneralizedRCNNWithTTA"] + + +class DatasetMapperTTA: + """ + Implement test-time augmentation for detection data. + It is a callable which takes a dataset dict from a detection dataset, + and returns a list of dataset dicts where the images + are augmented from the input image by the transformations defined in the config. + This is used for test-time augmentation. + """ + + @configurable + def __init__(self, min_sizes: List[int], max_size: int, flip: bool): + """ + Args: + min_sizes: list of short-edge size to resize the image to + max_size: maximum height or width of resized images + flip: whether to apply flipping augmentation + """ + self.min_sizes = min_sizes + self.max_size = max_size + self.flip = flip + + @classmethod + def from_config(cls, cfg): + return { + "min_sizes": cfg.TEST.AUG.MIN_SIZES, + "max_size": cfg.TEST.AUG.MAX_SIZE, + "flip": cfg.TEST.AUG.FLIP, + } + + def __call__(self, dataset_dict): + """ + Args: + dict: a dict in standard model input format. See tutorials for details. + + Returns: + list[dict]: + a list of dicts, which contain augmented version of the input image. + The total number of dicts is ``len(min_sizes) * (2 if flip else 1)``. + Each dict has field "transforms" which is a TransformList, + containing the transforms that are used to generate this image. + """ + numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy() + shape = numpy_image.shape + orig_shape = (dataset_dict["height"], dataset_dict["width"]) + if shape[:2] != orig_shape: + # It transforms the "original" image in the dataset to the input image + pre_tfm = ResizeTransform(orig_shape[0], orig_shape[1], shape[0], shape[1]) + else: + pre_tfm = NoOpTransform() + + # Create all combinations of augmentations to use + aug_candidates = [] # each element is a list[Augmentation] + for min_size in self.min_sizes: + resize = ResizeShortestEdge(min_size, self.max_size) + aug_candidates.append([resize]) # resize only + if self.flip: + flip = RandomFlip(prob=1.0) + aug_candidates.append([resize, flip]) # resize + flip + + # Apply all the augmentations + ret = [] + for aug in aug_candidates: + new_image, tfms = apply_augmentations(aug, np.copy(numpy_image)) + torch_image = torch.from_numpy(np.ascontiguousarray(new_image.transpose(2, 0, 1))) + + dic = copy.deepcopy(dataset_dict) + dic["transforms"] = pre_tfm + tfms + dic["image"] = torch_image + ret.append(dic) + return ret + + +class GeneralizedRCNNWithTTA(nn.Module): + """ + A GeneralizedRCNN with test-time augmentation enabled. + Its :meth:`__call__` method has the same interface as :meth:`GeneralizedRCNN.forward`. + """ + + def __init__(self, cfg, model, tta_mapper=None, batch_size=3): + """ + Args: + cfg (CfgNode): + model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on. + tta_mapper (callable): takes a dataset dict and returns a list of + augmented versions of the dataset dict. Defaults to + `DatasetMapperTTA(cfg)`. + batch_size (int): batch the augmented images into this batch size for inference. + """ + super().__init__() + if isinstance(model, DistributedDataParallel): + model = model.module + assert isinstance( + model, GeneralizedRCNN + ), "TTA is only supported on GeneralizedRCNN. Got a model of type {}".format(type(model)) + self.cfg = cfg.clone() + assert not self.cfg.MODEL.KEYPOINT_ON, "TTA for keypoint is not supported yet" + assert ( + not self.cfg.MODEL.LOAD_PROPOSALS + ), "TTA for pre-computed proposals is not supported yet" + + self.model = model + + if tta_mapper is None: + tta_mapper = DatasetMapperTTA(cfg) + self.tta_mapper = tta_mapper + self.batch_size = batch_size + + @contextmanager + def _turn_off_roi_heads(self, attrs): + """ + Open a context where some heads in `model.roi_heads` are temporarily turned off. + Args: + attr (list[str]): the attribute in `model.roi_heads` which can be used + to turn off a specific head, e.g., "mask_on", "keypoint_on". + """ + roi_heads = self.model.roi_heads + old = {} + for attr in attrs: + try: + old[attr] = getattr(roi_heads, attr) + except AttributeError: + # The head may not be implemented in certain ROIHeads + pass + + if len(old.keys()) == 0: + yield + else: + for attr in old.keys(): + setattr(roi_heads, attr, False) + yield + for attr in old.keys(): + setattr(roi_heads, attr, old[attr]) + + def _batch_inference(self, batched_inputs, detected_instances=None): + """ + Execute inference on a list of inputs, + using batch size = self.batch_size, instead of the length of the list. + + Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference` + """ + if detected_instances is None: + detected_instances = [None] * len(batched_inputs) + + outputs = [] + inputs, instances = [], [] + for idx, input, instance in zip(count(), batched_inputs, detected_instances): + inputs.append(input) + instances.append(instance) + if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1: + outputs.extend( + self.model.inference( + inputs, + instances if instances[0] is not None else None, + do_postprocess=False, + ) + ) + inputs, instances = [], [] + return outputs + + def __call__(self, batched_inputs): + """ + Same input/output format as :meth:`GeneralizedRCNN.forward` + """ + + def _maybe_read_image(dataset_dict): + ret = copy.copy(dataset_dict) + if "image" not in ret: + image = read_image(ret.pop("file_name"), self.model.input_format) + image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW + ret["image"] = image + if "height" not in ret and "width" not in ret: + ret["height"] = image.shape[1] + ret["width"] = image.shape[2] + return ret + + return [self._inference_one_image(_maybe_read_image(x)) for x in batched_inputs] + + def _inference_one_image(self, input): + """ + Args: + input (dict): one dataset dict with "image" field being a CHW tensor + + Returns: + dict: one output dict + """ + orig_shape = (input["height"], input["width"]) + augmented_inputs, tfms = self._get_augmented_inputs(input) + # Detect boxes from all augmented versions + with self._turn_off_roi_heads(["mask_on", "keypoint_on"]): + # temporarily disable roi heads + all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms) + # merge all detected boxes to obtain final predictions for boxes + merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape) + + if self.cfg.MODEL.MASK_ON: + # Use the detected boxes to obtain masks + augmented_instances = self._rescale_detected_boxes( + augmented_inputs, merged_instances, tfms + ) + # run forward on the detected boxes + outputs = self._batch_inference(augmented_inputs, augmented_instances) + # Delete now useless variables to avoid being out of memory + del augmented_inputs, augmented_instances + # average the predictions + merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms) + merged_instances = detector_postprocess(merged_instances, *orig_shape) + return {"instances": merged_instances} + else: + return {"instances": merged_instances} + + def _get_augmented_inputs(self, input): + augmented_inputs = self.tta_mapper(input) + tfms = [x.pop("transforms") for x in augmented_inputs] + return augmented_inputs, tfms + + def _get_augmented_boxes(self, augmented_inputs, tfms): + # 1: forward with all augmented images + outputs = self._batch_inference(augmented_inputs) + # 2: union the results + all_boxes = [] + all_scores = [] + all_classes = [] + for output, tfm in zip(outputs, tfms): + # Need to inverse the transforms on boxes, to obtain results on original image + pred_boxes = output.pred_boxes.tensor + original_pred_boxes = tfm.inverse().apply_box(pred_boxes.cpu().numpy()) + all_boxes.append(torch.from_numpy(original_pred_boxes).to(pred_boxes.device)) + + all_scores.extend(output.scores) + all_classes.extend(output.pred_classes) + all_boxes = torch.cat(all_boxes, dim=0) + return all_boxes, all_scores, all_classes + + def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw): + # select from the union of all results + num_boxes = len(all_boxes) + num_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES + # +1 because fast_rcnn_inference expects background scores as well + all_scores_2d = torch.zeros(num_boxes, num_classes + 1, device=all_boxes.device) + for idx, cls, score in zip(count(), all_classes, all_scores): + all_scores_2d[idx, cls] = score + + merged_instances, _ = fast_rcnn_inference_single_image( + all_boxes, + all_scores_2d, + shape_hw, + 1e-8, + self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST, + self.cfg.TEST.DETECTIONS_PER_IMAGE, + ) + + return merged_instances + + def _rescale_detected_boxes(self, augmented_inputs, merged_instances, tfms): + augmented_instances = [] + for input, tfm in zip(augmented_inputs, tfms): + # Transform the target box to the augmented image's coordinate space + pred_boxes = merged_instances.pred_boxes.tensor.cpu().numpy() + pred_boxes = torch.from_numpy(tfm.apply_box(pred_boxes)) + + aug_instances = Instances( + image_size=input["image"].shape[1:3], + pred_boxes=Boxes(pred_boxes), + pred_classes=merged_instances.pred_classes, + scores=merged_instances.scores, + ) + augmented_instances.append(aug_instances) + return augmented_instances + + def _reduce_pred_masks(self, outputs, tfms): + # Should apply inverse transforms on masks. + # We assume only resize & flip are used. pred_masks is a scale-invariant + # representation, so we handle flip specially + for output, tfm in zip(outputs, tfms): + if any(isinstance(t, HFlipTransform) for t in tfm.transforms): + output.pred_masks = output.pred_masks.flip(dims=[3]) + all_pred_masks = torch.stack([o.pred_masks for o in outputs], dim=0) + avg_pred_masks = torch.mean(all_pred_masks, dim=0) + return avg_pred_masks diff --git a/data_processing/detectron2/detectron2/projects/README.md b/data_processing/detectron2/detectron2/projects/README.md new file mode 100644 index 0000000..95afe7f --- /dev/null +++ b/data_processing/detectron2/detectron2/projects/README.md @@ -0,0 +1,2 @@ + +Projects live in the [`projects` directory](../../projects) under the root of this repository, but not here. diff --git a/data_processing/detectron2/detectron2/projects/__init__.py b/data_processing/detectron2/detectron2/projects/__init__.py new file mode 100644 index 0000000..b2d0540 --- /dev/null +++ b/data_processing/detectron2/detectron2/projects/__init__.py @@ -0,0 +1,34 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import importlib.abc +import importlib.util +from pathlib import Path + +__all__ = [] + +_PROJECTS = { + "point_rend": "PointRend", + "deeplab": "DeepLab", + "panoptic_deeplab": "Panoptic-DeepLab", +} +_PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent / "projects" + +if _PROJECT_ROOT.is_dir(): + # This is true only for in-place installation (pip install -e, setup.py develop), + # where setup(package_dir=) does not work: https://github.com/pypa/setuptools/issues/230 + + class _D2ProjectsFinder(importlib.abc.MetaPathFinder): + def find_spec(self, name, path, target=None): + if not name.startswith("detectron2.projects."): + return + project_name = name.split(".")[-1] + project_dir = _PROJECTS.get(project_name) + if not project_dir: + return + target_file = _PROJECT_ROOT / f"{project_dir}/{project_name}/__init__.py" + if not target_file.is_file(): + return + return importlib.util.spec_from_file_location(name, target_file) + + import sys + + sys.meta_path.append(_D2ProjectsFinder()) diff --git a/data_processing/detectron2/detectron2/solver/__init__.py b/data_processing/detectron2/detectron2/solver/__init__.py new file mode 100644 index 0000000..7e36c64 --- /dev/null +++ b/data_processing/detectron2/detectron2/solver/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .build import build_lr_scheduler, build_optimizer, get_default_optimizer_params +from .lr_scheduler import ( + LRMultiplier, + LRScheduler, + WarmupCosineLR, + WarmupMultiStepLR, + WarmupParamScheduler, +) + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/data_processing/detectron2/detectron2/solver/build.py b/data_processing/detectron2/detectron2/solver/build.py new file mode 100644 index 0000000..6ce25b3 --- /dev/null +++ b/data_processing/detectron2/detectron2/solver/build.py @@ -0,0 +1,310 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import itertools +import logging +from collections import defaultdict +from enum import Enum +from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union +import torch +from fvcore.common.param_scheduler import ( + CosineParamScheduler, + MultiStepParamScheduler, + StepWithFixedGammaParamScheduler, +) + +from detectron2.config import CfgNode +from detectron2.utils.env import TORCH_VERSION + +from .lr_scheduler import LRMultiplier, LRScheduler, WarmupParamScheduler + +_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]] +_GradientClipper = Callable[[_GradientClipperInput], None] + + +class GradientClipType(Enum): + VALUE = "value" + NORM = "norm" + + +def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper: + """ + Creates gradient clipping closure to clip by value or by norm, + according to the provided config. + """ + cfg = copy.deepcopy(cfg) + + def clip_grad_norm(p: _GradientClipperInput): + torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE) + + def clip_grad_value(p: _GradientClipperInput): + torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE) + + _GRADIENT_CLIP_TYPE_TO_CLIPPER = { + GradientClipType.VALUE: clip_grad_value, + GradientClipType.NORM: clip_grad_norm, + } + return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)] + + +def _generate_optimizer_class_with_gradient_clipping( + optimizer: Type[torch.optim.Optimizer], + *, + per_param_clipper: Optional[_GradientClipper] = None, + global_clipper: Optional[_GradientClipper] = None, +) -> Type[torch.optim.Optimizer]: + """ + Dynamically creates a new type that inherits the type of a given instance + and overrides the `step` method to add gradient clipping + """ + assert ( + per_param_clipper is None or global_clipper is None + ), "Not allowed to use both per-parameter clipping and global clipping" + + def optimizer_wgc_step(self, closure=None): + if per_param_clipper is not None: + for group in self.param_groups: + for p in group["params"]: + per_param_clipper(p) + else: + # global clipper for future use with detr + # (https://github.com/facebookresearch/detr/pull/287) + all_params = itertools.chain(*[g["params"] for g in self.param_groups]) + global_clipper(all_params) + super(type(self), self).step(closure) + + OptimizerWithGradientClip = type( + optimizer.__name__ + "WithGradientClip", + (optimizer,), + {"step": optimizer_wgc_step}, + ) + return OptimizerWithGradientClip + + +def maybe_add_gradient_clipping( + cfg: CfgNode, optimizer: Type[torch.optim.Optimizer] +) -> Type[torch.optim.Optimizer]: + """ + If gradient clipping is enabled through config options, wraps the existing + optimizer type to become a new dynamically created class OptimizerWithGradientClip + that inherits the given optimizer and overrides the `step` method to + include gradient clipping. + + Args: + cfg: CfgNode, configuration options + optimizer: type. A subclass of torch.optim.Optimizer + + Return: + type: either the input `optimizer` (if gradient clipping is disabled), or + a subclass of it with gradient clipping included in the `step` method. + """ + if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED: + return optimizer + if isinstance(optimizer, torch.optim.Optimizer): + optimizer_type = type(optimizer) + else: + assert issubclass(optimizer, torch.optim.Optimizer), optimizer + optimizer_type = optimizer + + grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS) + OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping( + optimizer_type, per_param_clipper=grad_clipper + ) + if isinstance(optimizer, torch.optim.Optimizer): + optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended + return optimizer + else: + return OptimizerWithGradientClip + + +def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: + """ + Build an optimizer from config. + """ + params = get_default_optimizer_params( + model, + base_lr=cfg.SOLVER.BASE_LR, + weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM, + bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR, + weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS, + ) + sgd_args = { + "params": params, + "lr": cfg.SOLVER.BASE_LR, + "momentum": cfg.SOLVER.MOMENTUM, + "nesterov": cfg.SOLVER.NESTEROV, + "weight_decay": cfg.SOLVER.WEIGHT_DECAY, + } + if TORCH_VERSION >= (1, 12): + sgd_args["foreach"] = True + return maybe_add_gradient_clipping(cfg, torch.optim.SGD(**sgd_args)) + + +def get_default_optimizer_params( + model: torch.nn.Module, + base_lr: Optional[float] = None, + weight_decay: Optional[float] = None, + weight_decay_norm: Optional[float] = None, + bias_lr_factor: Optional[float] = 1.0, + weight_decay_bias: Optional[float] = None, + lr_factor_func: Optional[Callable] = None, + overrides: Optional[Dict[str, Dict[str, float]]] = None, +) -> List[Dict[str, Any]]: + """ + Get default param list for optimizer, with support for a few types of + overrides. If no overrides needed, this is equivalent to `model.parameters()`. + + Args: + base_lr: lr for every group by default. Can be omitted to use the one in optimizer. + weight_decay: weight decay for every group by default. Can be omitted to use the one + in optimizer. + weight_decay_norm: override weight decay for params in normalization layers + bias_lr_factor: multiplier of lr for bias parameters. + weight_decay_bias: override weight decay for bias parameters. + lr_factor_func: function to calculate lr decay rate by mapping the parameter names to + corresponding lr decay rate. Note that setting this option requires + also setting ``base_lr``. + overrides: if not `None`, provides values for optimizer hyperparameters + (LR, weight decay) for module parameters with a given name; e.g. + ``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and + weight decay values for all module parameters named `embedding`. + + For common detection models, ``weight_decay_norm`` is the only option + needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings + from Detectron1 that are not found useful. + + Example: + :: + torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0), + lr=0.01, weight_decay=1e-4, momentum=0.9) + """ + if overrides is None: + overrides = {} + defaults = {} + if base_lr is not None: + defaults["lr"] = base_lr + if weight_decay is not None: + defaults["weight_decay"] = weight_decay + bias_overrides = {} + if bias_lr_factor is not None and bias_lr_factor != 1.0: + # NOTE: unlike Detectron v1, we now by default make bias hyperparameters + # exactly the same as regular weights. + if base_lr is None: + raise ValueError("bias_lr_factor requires base_lr") + bias_overrides["lr"] = base_lr * bias_lr_factor + if weight_decay_bias is not None: + bias_overrides["weight_decay"] = weight_decay_bias + if len(bias_overrides): + if "bias" in overrides: + raise ValueError("Conflicting overrides for 'bias'") + overrides["bias"] = bias_overrides + if lr_factor_func is not None: + if base_lr is None: + raise ValueError("lr_factor_func requires base_lr") + norm_module_types = ( + torch.nn.BatchNorm1d, + torch.nn.BatchNorm2d, + torch.nn.BatchNorm3d, + torch.nn.SyncBatchNorm, + # NaiveSyncBatchNorm inherits from BatchNorm2d + torch.nn.GroupNorm, + torch.nn.InstanceNorm1d, + torch.nn.InstanceNorm2d, + torch.nn.InstanceNorm3d, + torch.nn.LayerNorm, + torch.nn.LocalResponseNorm, + ) + params: List[Dict[str, Any]] = [] + memo: Set[torch.nn.parameter.Parameter] = set() + for module_name, module in model.named_modules(): + for module_param_name, value in module.named_parameters(recurse=False): + if not value.requires_grad: + continue + # Avoid duplicating parameters + if value in memo: + continue + memo.add(value) + + hyperparams = copy.copy(defaults) + if isinstance(module, norm_module_types) and weight_decay_norm is not None: + hyperparams["weight_decay"] = weight_decay_norm + if lr_factor_func is not None: + hyperparams["lr"] *= lr_factor_func(f"{module_name}.{module_param_name}") + + hyperparams.update(overrides.get(module_param_name, {})) + params.append({"params": [value], **hyperparams}) + return reduce_param_groups(params) + + +def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + # Transform parameter groups into per-parameter structure. + # Later items in `params` can overwrite parameters set in previous items. + ret = defaultdict(dict) + for item in params: + assert "params" in item + cur_params = {x: y for x, y in item.items() if x != "params"} + for param in item["params"]: + ret[param].update({"params": [param], **cur_params}) + return list(ret.values()) + + +def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + # Reorganize the parameter groups and merge duplicated groups. + # The number of parameter groups needs to be as small as possible in order + # to efficiently use the PyTorch multi-tensor optimizer. Therefore instead + # of using a parameter_group per single parameter, we reorganize the + # parameter groups and merge duplicated groups. This approach speeds + # up multi-tensor optimizer significantly. + params = _expand_param_groups(params) + groups = defaultdict(list) # re-group all parameter groups by their hyperparams + for item in params: + cur_params = tuple((x, y) for x, y in item.items() if x != "params") + groups[cur_params].extend(item["params"]) + ret = [] + for param_keys, param_values in groups.items(): + cur = {kv[0]: kv[1] for kv in param_keys} + cur["params"] = param_values + ret.append(cur) + return ret + + +def build_lr_scheduler(cfg: CfgNode, optimizer: torch.optim.Optimizer) -> LRScheduler: + """ + Build a LR scheduler from config. + """ + name = cfg.SOLVER.LR_SCHEDULER_NAME + + if name == "WarmupMultiStepLR": + steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER] + if len(steps) != len(cfg.SOLVER.STEPS): + logger = logging.getLogger(__name__) + logger.warning( + "SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. " + "These values will be ignored." + ) + sched = MultiStepParamScheduler( + values=[cfg.SOLVER.GAMMA**k for k in range(len(steps) + 1)], + milestones=steps, + num_updates=cfg.SOLVER.MAX_ITER, + ) + elif name == "WarmupCosineLR": + end_value = cfg.SOLVER.BASE_LR_END / cfg.SOLVER.BASE_LR + assert end_value >= 0.0 and end_value <= 1.0, end_value + sched = CosineParamScheduler(1, end_value) + elif name == "WarmupStepWithFixedGammaLR": + sched = StepWithFixedGammaParamScheduler( + base_value=1.0, + gamma=cfg.SOLVER.GAMMA, + num_decays=cfg.SOLVER.NUM_DECAYS, + num_updates=cfg.SOLVER.MAX_ITER, + ) + else: + raise ValueError("Unknown LR scheduler: {}".format(name)) + + sched = WarmupParamScheduler( + sched, + cfg.SOLVER.WARMUP_FACTOR, + min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0), + cfg.SOLVER.WARMUP_METHOD, + cfg.SOLVER.RESCALE_INTERVAL, + ) + return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER) diff --git a/data_processing/detectron2/detectron2/solver/lr_scheduler.py b/data_processing/detectron2/detectron2/solver/lr_scheduler.py new file mode 100644 index 0000000..d6aed2b --- /dev/null +++ b/data_processing/detectron2/detectron2/solver/lr_scheduler.py @@ -0,0 +1,246 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import math +from bisect import bisect_right +from typing import List +import torch +from fvcore.common.param_scheduler import ( + CompositeParamScheduler, + ConstantParamScheduler, + LinearParamScheduler, + ParamScheduler, +) + +try: + from torch.optim.lr_scheduler import LRScheduler +except ImportError: + from torch.optim.lr_scheduler import _LRScheduler as LRScheduler + +logger = logging.getLogger(__name__) + + +class WarmupParamScheduler(CompositeParamScheduler): + """ + Add an initial warmup stage to another scheduler. + """ + + def __init__( + self, + scheduler: ParamScheduler, + warmup_factor: float, + warmup_length: float, + warmup_method: str = "linear", + rescale_interval: bool = False, + ): + """ + Args: + scheduler: warmup will be added at the beginning of this scheduler + warmup_factor: the factor w.r.t the initial value of ``scheduler``, e.g. 0.001 + warmup_length: the relative length (in [0, 1]) of warmup steps w.r.t the entire + training, e.g. 0.01 + warmup_method: one of "linear" or "constant" + rescale_interval: whether we will rescale the interval of the scheduler after + warmup + """ + end_value = scheduler(warmup_length) # the value to reach when warmup ends + start_value = warmup_factor * scheduler(0.0) + if warmup_method == "constant": + warmup = ConstantParamScheduler(start_value) + elif warmup_method == "linear": + warmup = LinearParamScheduler(start_value, end_value) + else: + raise ValueError("Unknown warmup method: {}".format(warmup_method)) + super().__init__( + [warmup, scheduler], + interval_scaling=["rescaled", "rescaled" if rescale_interval else "fixed"], + lengths=[warmup_length, 1 - warmup_length], + ) + + +class LRMultiplier(LRScheduler): + """ + A LRScheduler which uses fvcore :class:`ParamScheduler` to multiply the + learning rate of each param in the optimizer. + Every step, the learning rate of each parameter becomes its initial value + multiplied by the output of the given :class:`ParamScheduler`. + + The absolute learning rate value of each parameter can be different. + This scheduler can be used as long as the relative scale among them do + not change during training. + + Examples: + :: + LRMultiplier( + opt, + WarmupParamScheduler( + MultiStepParamScheduler( + [1, 0.1, 0.01], + milestones=[60000, 80000], + num_updates=90000, + ), 0.001, 100 / 90000 + ), + max_iter=90000 + ) + """ + + # NOTES: in the most general case, every LR can use its own scheduler. + # Supporting this requires interaction with the optimizer when its parameter + # group is initialized. For example, classyvision implements its own optimizer + # that allows different schedulers for every parameter group. + # To avoid this complexity, we use this class to support the most common cases + # where the relative scale among all LRs stay unchanged during training. In this + # case we only need a total of one scheduler that defines the relative LR multiplier. + + def __init__( + self, + optimizer: torch.optim.Optimizer, + multiplier: ParamScheduler, + max_iter: int, + last_iter: int = -1, + ): + """ + Args: + optimizer, last_iter: See ``torch.optim.lr_scheduler.LRScheduler``. + ``last_iter`` is the same as ``last_epoch``. + multiplier: a fvcore ParamScheduler that defines the multiplier on + every LR of the optimizer + max_iter: the total number of training iterations + """ + if not isinstance(multiplier, ParamScheduler): + raise ValueError( + "_LRMultiplier(multiplier=) must be an instance of fvcore " + f"ParamScheduler. Got {multiplier} instead." + ) + self._multiplier = multiplier + self._max_iter = max_iter + super().__init__(optimizer, last_epoch=last_iter) + + def state_dict(self): + # fvcore schedulers are stateless. Only keep pytorch scheduler states + return {"base_lrs": self.base_lrs, "last_epoch": self.last_epoch} + + def get_lr(self) -> List[float]: + multiplier = self._multiplier(self.last_epoch / self._max_iter) + return [base_lr * multiplier for base_lr in self.base_lrs] + + +""" +Content below is no longer needed! +""" + +# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes +# only on epoch boundaries. We typically use iteration based schedules instead. +# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean +# "iteration" instead. + +# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating +# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it. + + +class WarmupMultiStepLR(LRScheduler): + def __init__( + self, + optimizer: torch.optim.Optimizer, + milestones: List[int], + gamma: float = 0.1, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + ): + logger.warning( + "WarmupMultiStepLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!" + ) + if not list(milestones) == sorted(milestones): + raise ValueError( + "Milestones should be a list of" " increasing integers. Got {}", milestones + ) + self.milestones = milestones + self.gamma = gamma + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + return [ + base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() + + +class WarmupCosineLR(LRScheduler): + def __init__( + self, + optimizer: torch.optim.Optimizer, + max_iters: int, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + ): + logger.warning( + "WarmupCosineLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!" + ) + self.max_iters = max_iters + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + # Different definitions of half-cosine with warmup are possible. For + # simplicity we multiply the standard half-cosine schedule by the warmup + # factor. An alternative is to start the period of the cosine at warmup_iters + # instead of at 0. In the case that warmup_iters << max_iters the two are + # very close to each other. + return [ + base_lr + * warmup_factor + * 0.5 + * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters)) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() + + +def _get_warmup_factor_at_iter( + method: str, iter: int, warmup_iters: int, warmup_factor: float +) -> float: + """ + Return the learning rate warmup factor at a specific iteration. + See :paper:`ImageNet in 1h` for more details. + + Args: + method (str): warmup method; either "constant" or "linear". + iter (int): iteration at which to calculate the warmup factor. + warmup_iters (int): the number of warmup iterations. + warmup_factor (float): the base warmup factor (the meaning changes according + to the method used). + + Returns: + float: the effective warmup factor at the given iteration. + """ + if iter >= warmup_iters: + return 1.0 + + if method == "constant": + return warmup_factor + elif method == "linear": + alpha = iter / warmup_iters + return warmup_factor * (1 - alpha) + alpha + else: + raise ValueError("Unknown warmup method: {}".format(method)) diff --git a/data_processing/detectron2/detectron2/structures/__init__.py b/data_processing/detectron2/detectron2/structures/__init__.py new file mode 100644 index 0000000..f3ee605 --- /dev/null +++ b/data_processing/detectron2/detectron2/structures/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .boxes import Boxes, BoxMode, pairwise_iou, pairwise_ioa, pairwise_point_box_distance +from .image_list import ImageList + +from .instances import Instances +from .keypoints import Keypoints, heatmaps_to_keypoints +from .masks import BitMasks, PolygonMasks, polygons_to_bitmask, ROIMasks +from .rotated_boxes import RotatedBoxes +from .rotated_boxes import pairwise_iou as pairwise_iou_rotated + +__all__ = [k for k in globals().keys() if not k.startswith("_")] + + +from detectron2.utils.env import fixup_module_metadata + +fixup_module_metadata(__name__, globals(), __all__) +del fixup_module_metadata diff --git a/data_processing/detectron2/detectron2/structures/boxes.py b/data_processing/detectron2/detectron2/structures/boxes.py new file mode 100644 index 0000000..fd396f6 --- /dev/null +++ b/data_processing/detectron2/detectron2/structures/boxes.py @@ -0,0 +1,425 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +import numpy as np +from enum import IntEnum, unique +from typing import List, Tuple, Union +import torch +from torch import device + +_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray] + + +@unique +class BoxMode(IntEnum): + """ + Enum of different ways to represent a box. + """ + + XYXY_ABS = 0 + """ + (x0, y0, x1, y1) in absolute floating points coordinates. + The coordinates in range [0, width or height]. + """ + XYWH_ABS = 1 + """ + (x0, y0, w, h) in absolute floating points coordinates. + """ + XYXY_REL = 2 + """ + Not yet supported! + (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image. + """ + XYWH_REL = 3 + """ + Not yet supported! + (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image. + """ + XYWHA_ABS = 4 + """ + (xc, yc, w, h, a) in absolute floating points coordinates. + (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw. + """ + + @staticmethod + def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType: + """ + Args: + box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5 + from_mode, to_mode (BoxMode) + + Returns: + The converted box of the same type. + """ + if from_mode == to_mode: + return box + + original_type = type(box) + is_numpy = isinstance(box, np.ndarray) + single_box = isinstance(box, (list, tuple)) + if single_box: + assert len(box) == 4 or len(box) == 5, ( + "BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor," + " where k == 4 or 5" + ) + arr = torch.tensor(box)[None, :] + else: + # avoid modifying the input box + if is_numpy: + arr = torch.from_numpy(np.asarray(box)).clone() + else: + arr = box.clone() + + assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [ + BoxMode.XYXY_REL, + BoxMode.XYWH_REL, + ], "Relative mode not yet supported!" + + if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS: + assert ( + arr.shape[-1] == 5 + ), "The last dimension of input shape must be 5 for XYWHA format" + original_dtype = arr.dtype + arr = arr.double() + + w = arr[:, 2] + h = arr[:, 3] + a = arr[:, 4] + c = torch.abs(torch.cos(a * math.pi / 180.0)) + s = torch.abs(torch.sin(a * math.pi / 180.0)) + # This basically computes the horizontal bounding rectangle of the rotated box + new_w = c * w + s * h + new_h = c * h + s * w + + # convert center to top-left corner + arr[:, 0] -= new_w / 2.0 + arr[:, 1] -= new_h / 2.0 + # bottom-right corner + arr[:, 2] = arr[:, 0] + new_w + arr[:, 3] = arr[:, 1] + new_h + + arr = arr[:, :4].to(dtype=original_dtype) + elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS: + original_dtype = arr.dtype + arr = arr.double() + arr[:, 0] += arr[:, 2] / 2.0 + arr[:, 1] += arr[:, 3] / 2.0 + angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype) + arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype) + else: + if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS: + arr[:, 2] += arr[:, 0] + arr[:, 3] += arr[:, 1] + elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS: + arr[:, 2] -= arr[:, 0] + arr[:, 3] -= arr[:, 1] + else: + raise NotImplementedError( + "Conversion from BoxMode {} to {} is not supported yet".format( + from_mode, to_mode + ) + ) + + if single_box: + return original_type(arr.flatten().tolist()) + if is_numpy: + return arr.numpy() + else: + return arr + + +class Boxes: + """ + This structure stores a list of boxes as a Nx4 torch.Tensor. + It supports some common methods about boxes + (`area`, `clip`, `nonempty`, etc), + and also behaves like a Tensor + (support indexing, `to(device)`, `.device`, and iteration over all boxes) + + Attributes: + tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2). + """ + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2). + """ + if not isinstance(tensor, torch.Tensor): + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device("cpu")) + else: + tensor = tensor.to(torch.float32) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that does not depend on + # the inputs (and consequently confuses jit) + tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32) + assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size() + + self.tensor = tensor + + def clone(self) -> "Boxes": + """ + Clone the Boxes. + + Returns: + Boxes + """ + return Boxes(self.tensor.clone()) + + def to(self, device: torch.device): + # Boxes are assumed float32 and does not support to(dtype) + return Boxes(self.tensor.to(device=device)) + + def area(self) -> torch.Tensor: + """ + Computes the area of all the boxes. + + Returns: + torch.Tensor: a vector with areas of each box. + """ + box = self.tensor + area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) + return area + + def clip(self, box_size: Tuple[int, int]) -> None: + """ + Clip (in place) the boxes by limiting x coordinates to the range [0, width] + and y coordinates to the range [0, height]. + + Args: + box_size (height, width): The clipping box's size. + """ + assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!" + h, w = box_size + x1 = self.tensor[:, 0].clamp(min=0, max=w) + y1 = self.tensor[:, 1].clamp(min=0, max=h) + x2 = self.tensor[:, 2].clamp(min=0, max=w) + y2 = self.tensor[:, 3].clamp(min=0, max=h) + self.tensor = torch.stack((x1, y1, x2, y2), dim=-1) + + def nonempty(self, threshold: float = 0.0) -> torch.Tensor: + """ + Find boxes that are non-empty. + A box is considered empty, if either of its side is no larger than threshold. + + Returns: + Tensor: + a binary vector which represents whether each box is empty + (False) or non-empty (True). + """ + box = self.tensor + widths = box[:, 2] - box[:, 0] + heights = box[:, 3] - box[:, 1] + keep = (widths > threshold) & (heights > threshold) + return keep + + def __getitem__(self, item) -> "Boxes": + """ + Args: + item: int, slice, or a BoolTensor + + Returns: + Boxes: Create a new :class:`Boxes` by indexing. + + The following usage are allowed: + + 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box. + 2. `new_boxes = boxes[2:10]`: return a slice of boxes. + 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor + with `length = len(boxes)`. Nonzero elements in the vector will be selected. + + Note that the returned Boxes might share storage with this Boxes, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return Boxes(self.tensor[item].view(1, -1)) + b = self.tensor[item] + assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item) + return Boxes(b) + + def __len__(self) -> int: + return self.tensor.shape[0] + + def __repr__(self) -> str: + return "Boxes(" + str(self.tensor) + ")" + + def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor: + """ + Args: + box_size (height, width): Size of the reference box. + boundary_threshold (int): Boxes that extend beyond the reference box + boundary by more than boundary_threshold are considered "outside". + + Returns: + a binary vector, indicating whether each box is inside the reference box. + """ + height, width = box_size + inds_inside = ( + (self.tensor[..., 0] >= -boundary_threshold) + & (self.tensor[..., 1] >= -boundary_threshold) + & (self.tensor[..., 2] < width + boundary_threshold) + & (self.tensor[..., 3] < height + boundary_threshold) + ) + return inds_inside + + def get_centers(self) -> torch.Tensor: + """ + Returns: + The box centers in a Nx2 array of (x, y). + """ + return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2 + + def scale(self, scale_x: float, scale_y: float) -> None: + """ + Scale the box with horizontal and vertical scaling factors + """ + self.tensor[:, 0::2] *= scale_x + self.tensor[:, 1::2] *= scale_y + + @classmethod + def cat(cls, boxes_list: List["Boxes"]) -> "Boxes": + """ + Concatenates a list of Boxes into a single Boxes + + Arguments: + boxes_list (list[Boxes]) + + Returns: + Boxes: the concatenated Boxes + """ + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0)) + assert all([isinstance(box, Boxes) for box in boxes_list]) + + # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input + cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) + return cat_boxes + + @property + def device(self) -> device: + return self.tensor.device + + # type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript + # https://github.com/pytorch/pytorch/issues/18627 + @torch.jit.unused + def __iter__(self): + """ + Yield a box as a Tensor of shape (4,) at a time. + """ + yield from self.tensor + + +def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Given two lists of boxes of size N and M, + compute the intersection area between __all__ N x M pairs of boxes. + The box order must be (xmin, ymin, xmax, ymax) + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: intersection, sized [N,M]. + """ + boxes1, boxes2 = boxes1.tensor, boxes2.tensor + width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max( + boxes1[:, None, :2], boxes2[:, :2] + ) # [N,M,2] + + width_height.clamp_(min=0) # [N,M,2] + intersection = width_height.prod(dim=2) # [N,M] + return intersection + + +# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py +# with slight modifications +def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Given two lists of boxes of size N and M, compute the IoU + (intersection over union) between **all** N x M pairs of boxes. + The box order must be (xmin, ymin, xmax, ymax). + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: IoU, sized [N,M]. + """ + area1 = boxes1.area() # [N] + area2 = boxes2.area() # [M] + inter = pairwise_intersection(boxes1, boxes2) + + # handle empty boxes + iou = torch.where( + inter > 0, + inter / (area1[:, None] + area2 - inter), + torch.zeros(1, dtype=inter.dtype, device=inter.device), + ) + return iou + + +def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area). + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: IoA, sized [N,M]. + """ + area2 = boxes2.area() # [M] + inter = pairwise_intersection(boxes1, boxes2) + + # handle empty boxes + ioa = torch.where( + inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device) + ) + return ioa + + +def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes): + """ + Pairwise distance between N points and M boxes. The distance between a + point and a box is represented by the distance from the point to 4 edges + of the box. Distances are all positive when the point is inside the box. + + Args: + points: Nx2 coordinates. Each row is (x, y) + boxes: M boxes + + Returns: + Tensor: distances of size (N, M, 4). The 4 values are distances from + the point to the left, top, right, bottom of the box. + """ + x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1) + x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M) + return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2) + + +def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Compute pairwise intersection over union (IOU) of two sets of matched + boxes that have the same number of boxes. + Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix. + + Args: + boxes1 (Boxes): bounding boxes, sized [N,4]. + boxes2 (Boxes): same length as boxes1 + Returns: + Tensor: iou, sized [N]. + """ + assert len(boxes1) == len( + boxes2 + ), "boxlists should have the same" "number of entries, got {}, {}".format( + len(boxes1), len(boxes2) + ) + area1 = boxes1.area() # [N] + area2 = boxes2.area() # [N] + box1, box2 = boxes1.tensor, boxes2.tensor + lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2] + rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2] + wh = (rb - lt).clamp(min=0) # [N,2] + inter = wh[:, 0] * wh[:, 1] # [N] + iou = inter / (area1 + area2 - inter) # [N] + return iou diff --git a/data_processing/detectron2/detectron2/structures/image_list.py b/data_processing/detectron2/detectron2/structures/image_list.py new file mode 100644 index 0000000..f78cae7 --- /dev/null +++ b/data_processing/detectron2/detectron2/structures/image_list.py @@ -0,0 +1,129 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from __future__ import division +from typing import Any, Dict, List, Optional, Tuple +import torch +from torch import device +from torch.nn import functional as F + +from detectron2.layers.wrappers import move_device_like, shapes_to_tensor + + +class ImageList(object): + """ + Structure that holds a list of images (of possibly + varying sizes) as a single tensor. + This works by padding the images to the same size. + The original sizes of each image is stored in `image_sizes`. + + Attributes: + image_sizes (list[tuple[int, int]]): each tuple is (h, w). + During tracing, it becomes list[Tensor] instead. + """ + + def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]): + """ + Arguments: + tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 + image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can + be smaller than (H, W) due to padding. + """ + self.tensor = tensor + self.image_sizes = image_sizes + + def __len__(self) -> int: + return len(self.image_sizes) + + def __getitem__(self, idx) -> torch.Tensor: + """ + Access the individual image in its original size. + + Args: + idx: int or slice + + Returns: + Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 + """ + size = self.image_sizes[idx] + return self.tensor[idx, ..., : size[0], : size[1]] + + @torch.jit.unused + def to(self, *args: Any, **kwargs: Any) -> "ImageList": + cast_tensor = self.tensor.to(*args, **kwargs) + return ImageList(cast_tensor, self.image_sizes) + + @property + def device(self) -> device: + return self.tensor.device + + @staticmethod + def from_tensors( + tensors: List[torch.Tensor], + size_divisibility: int = 0, + pad_value: float = 0.0, + padding_constraints: Optional[Dict[str, int]] = None, + ) -> "ImageList": + """ + Args: + tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or + (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded + to the same shape with `pad_value`. + size_divisibility (int): If `size_divisibility > 0`, add padding to ensure + the common height and width is divisible by `size_divisibility`. + This depends on the model and many models need a divisibility of 32. + pad_value (float): value to pad. + padding_constraints (optional[Dict]): If given, it would follow the format as + {"size_divisibility": int, "square_size": int}, where `size_divisibility` will + overwrite the above one if presented and `square_size` indicates the + square padding size if `square_size` > 0. + Returns: + an `ImageList`. + """ + assert len(tensors) > 0 + assert isinstance(tensors, (tuple, list)) + for t in tensors: + assert isinstance(t, torch.Tensor), type(t) + assert t.shape[:-2] == tensors[0].shape[:-2], t.shape + + image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors] + image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes] + max_size = torch.stack(image_sizes_tensor).max(0).values + + if padding_constraints is not None: + square_size = padding_constraints.get("square_size", 0) + if square_size > 0: + # pad to square. + max_size[0] = max_size[1] = square_size + if "size_divisibility" in padding_constraints: + size_divisibility = padding_constraints["size_divisibility"] + if size_divisibility > 1: + stride = size_divisibility + # the last two dims are H,W, both subject to divisibility requirement + max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride + + # handle weirdness of scripting and tracing ... + if torch.jit.is_scripting(): + max_size: List[int] = max_size.to(dtype=torch.long).tolist() + else: + if torch.jit.is_tracing(): + image_sizes = image_sizes_tensor + + if len(tensors) == 1: + # This seems slightly (2%) faster. + # TODO: check whether it's faster for multiple images as well + image_size = image_sizes[0] + padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]] + batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0) + else: + # max_size can be a tensor in tracing mode, therefore convert to list + batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size) + device = ( + None if torch.jit.is_scripting() else ("cpu" if torch.jit.is_tracing() else None) + ) + batched_imgs = tensors[0].new_full(batch_shape, pad_value, device=device) + batched_imgs = move_device_like(batched_imgs, tensors[0]) + for i, img in enumerate(tensors): + # Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)` + # Tracing mode cannot capture `copy_()` of temporary locals + batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img) + + return ImageList(batched_imgs.contiguous(), image_sizes) diff --git a/data_processing/detectron2/detectron2/structures/instances.py b/data_processing/detectron2/detectron2/structures/instances.py new file mode 100644 index 0000000..c9579bc --- /dev/null +++ b/data_processing/detectron2/detectron2/structures/instances.py @@ -0,0 +1,194 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import warnings +from typing import Any, Dict, List, Tuple, Union +import torch + + +class Instances: + """ + This class represents a list of instances in an image. + It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields". + All fields must have the same ``__len__`` which is the number of instances. + + All other (non-field) attributes of this class are considered private: + they must start with '_' and are not modifiable by a user. + + Some basic usage: + + 1. Set/get/check a field: + + .. code-block:: python + + instances.gt_boxes = Boxes(...) + print(instances.pred_masks) # a tensor of shape (N, H, W) + print('gt_masks' in instances) + + 2. ``len(instances)`` returns the number of instances + 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields + and returns a new :class:`Instances`. + Typically, ``indices`` is a integer vector of indices, + or a binary mask of length ``num_instances`` + + .. code-block:: python + + category_3_detections = instances[instances.pred_classes == 3] + confident_detections = instances[instances.scores > 0.9] + """ + + def __init__(self, image_size: Tuple[int, int], **kwargs: Any): + """ + Args: + image_size (height, width): the spatial size of the image. + kwargs: fields to add to this `Instances`. + """ + self._image_size = image_size + self._fields: Dict[str, Any] = {} + for k, v in kwargs.items(): + self.set(k, v) + + @property + def image_size(self) -> Tuple[int, int]: + """ + Returns: + tuple: height, width + """ + return self._image_size + + def __setattr__(self, name: str, val: Any) -> None: + if name.startswith("_"): + super().__setattr__(name, val) + else: + self.set(name, val) + + def __getattr__(self, name: str) -> Any: + if name == "_fields" or name not in self._fields: + raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) + return self._fields[name] + + def set(self, name: str, value: Any) -> None: + """ + Set the field named `name` to `value`. + The length of `value` must be the number of instances, + and must agree with other existing fields in this object. + """ + with warnings.catch_warnings(record=True): + data_len = len(value) + if len(self._fields): + assert ( + len(self) == data_len + ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) + self._fields[name] = value + + def has(self, name: str) -> bool: + """ + Returns: + bool: whether the field called `name` exists. + """ + return name in self._fields + + def remove(self, name: str) -> None: + """ + Remove the field called `name`. + """ + del self._fields[name] + + def get(self, name: str) -> Any: + """ + Returns the field called `name`. + """ + return self._fields[name] + + def get_fields(self) -> Dict[str, Any]: + """ + Returns: + dict: a dict which maps names (str) to data of the fields + + Modifying the returned dict will modify this instance. + """ + return self._fields + + # Tensor-like methods + def to(self, *args: Any, **kwargs: Any) -> "Instances": + """ + Returns: + Instances: all fields are called with a `to(device)`, if the field has this method. + """ + ret = Instances(self._image_size) + for k, v in self._fields.items(): + if hasattr(v, "to"): + v = v.to(*args, **kwargs) + ret.set(k, v) + return ret + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances": + """ + Args: + item: an index-like object and will be used to index all the fields. + + Returns: + If `item` is a string, return the data in the corresponding field. + Otherwise, returns an `Instances` where all fields are indexed by `item`. + """ + if type(item) == int: + if item >= len(self) or item < -len(self): + raise IndexError("Instances index out of range!") + else: + item = slice(item, None, len(self)) + + ret = Instances(self._image_size) + for k, v in self._fields.items(): + ret.set(k, v[item]) + return ret + + def __len__(self) -> int: + for v in self._fields.values(): + # use __len__ because len() has to be int and is not friendly to tracing + return v.__len__() + raise NotImplementedError("Empty Instances does not support __len__!") + + def __iter__(self): + raise NotImplementedError("`Instances` object is not iterable!") + + @staticmethod + def cat(instance_lists: List["Instances"]) -> "Instances": + """ + Args: + instance_lists (list[Instances]) + + Returns: + Instances + """ + assert all(isinstance(i, Instances) for i in instance_lists) + assert len(instance_lists) > 0 + if len(instance_lists) == 1: + return instance_lists[0] + + image_size = instance_lists[0].image_size + if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing + for i in instance_lists[1:]: + assert i.image_size == image_size + ret = Instances(image_size) + for k in instance_lists[0]._fields.keys(): + values = [i.get(k) for i in instance_lists] + v0 = values[0] + if isinstance(v0, torch.Tensor): + values = torch.cat(values, dim=0) + elif isinstance(v0, list): + values = list(itertools.chain(*values)) + elif hasattr(type(v0), "cat"): + values = type(v0).cat(values) + else: + raise ValueError("Unsupported type {} for concatenation".format(type(v0))) + ret.set(k, values) + return ret + + def __str__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={}, ".format(len(self)) + s += "image_height={}, ".format(self._image_size[0]) + s += "image_width={}, ".format(self._image_size[1]) + s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items()))) + return s + + __repr__ = __str__ diff --git a/data_processing/detectron2/detectron2/structures/keypoints.py b/data_processing/detectron2/detectron2/structures/keypoints.py new file mode 100644 index 0000000..b93ebed --- /dev/null +++ b/data_processing/detectron2/detectron2/structures/keypoints.py @@ -0,0 +1,235 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Any, List, Tuple, Union +import torch +from torch.nn import functional as F + + +class Keypoints: + """ + Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property + containing the x,y location and visibility flag of each keypoint. This tensor has shape + (N, K, 3) where N is the number of instances and K is the number of keypoints per instance. + + The visibility flag follows the COCO format and must be one of three integers: + + * v=0: not labeled (in which case x=y=0) + * v=1: labeled but not visible + * v=2: labeled and visible + """ + + def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]): + """ + Arguments: + keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint. + The shape should be (N, K, 3) where N is the number of + instances, and K is the number of keypoints per instance. + """ + device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu") + keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device) + assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape + self.tensor = keypoints + + def __len__(self) -> int: + return self.tensor.size(0) + + def to(self, *args: Any, **kwargs: Any) -> "Keypoints": + return type(self)(self.tensor.to(*args, **kwargs)) + + @property + def device(self) -> torch.device: + return self.tensor.device + + def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor: + """ + Convert keypoint annotations to a heatmap of one-hot labels for training, + as described in :paper:`Mask R-CNN`. + + Arguments: + boxes: Nx4 tensor, the boxes to draw the keypoints to + + Returns: + heatmaps: + A tensor of shape (N, K), each element is integer spatial label + in the range [0, heatmap_size**2 - 1] for each keypoint in the input. + valid: + A tensor of shape (N, K) containing whether each keypoint is in the roi or not. + """ + return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size) + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints": + """ + Create a new `Keypoints` by indexing on this `Keypoints`. + + The following usage are allowed: + + 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance. + 2. `new_kpts = kpts[2:10]`: return a slice of key points. + 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor + with `length = len(kpts)`. Nonzero elements in the vector will be selected. + + Note that the returned Keypoints might share storage with this Keypoints, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return Keypoints([self.tensor[item]]) + return Keypoints(self.tensor[item]) + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + @staticmethod + def cat(keypoints_list: List["Keypoints"]) -> "Keypoints": + """ + Concatenates a list of Keypoints into a single Keypoints + + Arguments: + keypoints_list (list[Keypoints]) + + Returns: + Keypoints: the concatenated Keypoints + """ + assert isinstance(keypoints_list, (list, tuple)) + assert len(keypoints_list) > 0 + assert all(isinstance(keypoints, Keypoints) for keypoints in keypoints_list) + + cat_kpts = type(keypoints_list[0])( + torch.cat([kpts.tensor for kpts in keypoints_list], dim=0) + ) + return cat_kpts + + +# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop) +def _keypoints_to_heatmap( + keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space. + + Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the + closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the + continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"): + d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. + + Arguments: + keypoints: tensor of keypoint locations in of shape (N, K, 3). + rois: Nx4 tensor of rois in xyxy format + heatmap_size: integer side length of square heatmap. + + Returns: + heatmaps: A tensor of shape (N, K) containing an integer spatial label + in the range [0, heatmap_size**2 - 1] for each keypoint in the input. + valid: A tensor of shape (N, K) containing whether each keypoint is in + the roi or not. + """ + + if rois.numel() == 0: + return rois.new().long(), rois.new().long() + offset_x = rois[:, 0] + offset_y = rois[:, 1] + scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) + scale_y = heatmap_size / (rois[:, 3] - rois[:, 1]) + + offset_x = offset_x[:, None] + offset_y = offset_y[:, None] + scale_x = scale_x[:, None] + scale_y = scale_y[:, None] + + x = keypoints[..., 0] + y = keypoints[..., 1] + + x_boundary_inds = x == rois[:, 2][:, None] + y_boundary_inds = y == rois[:, 3][:, None] + + x = (x - offset_x) * scale_x + x = x.floor().long() + y = (y - offset_y) * scale_y + y = y.floor().long() + + x[x_boundary_inds] = heatmap_size - 1 + y[y_boundary_inds] = heatmap_size - 1 + + valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size) + vis = keypoints[..., 2] > 0 + valid = (valid_loc & vis).long() + + lin_ind = y * heatmap_size + x + heatmaps = lin_ind * valid + + return heatmaps, valid + + +@torch.jit.script_if_tracing +def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor: + """ + Extract predicted keypoint locations from heatmaps. + + Args: + maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for + each ROI and each keypoint. + rois (Tensor): (#ROIs, 4). The box of each ROI. + + Returns: + Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to + (x, y, logit, score) for each keypoint. + + When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate, + we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from + Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. + """ + + offset_x = rois[:, 0] + offset_y = rois[:, 1] + + widths = (rois[:, 2] - rois[:, 0]).clamp(min=1) + heights = (rois[:, 3] - rois[:, 1]).clamp(min=1) + widths_ceil = widths.ceil() + heights_ceil = heights.ceil() + + num_rois, num_keypoints = maps.shape[:2] + xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4) + + width_corrections = widths / widths_ceil + height_corrections = heights / heights_ceil + + keypoints_idx = torch.arange(num_keypoints, device=maps.device) + + for i in range(num_rois): + outsize = (int(heights_ceil[i]), int(widths_ceil[i])) + roi_map = F.interpolate(maps[[i]], size=outsize, mode="bicubic", align_corners=False) + + # Although semantically equivalent, `reshape` is used instead of `squeeze` due + # to limitation during ONNX export of `squeeze` in scripting mode + roi_map = roi_map.reshape(roi_map.shape[1:]) # keypoints x H x W + + # softmax over the spatial region + max_score, _ = roi_map.view(num_keypoints, -1).max(1) + max_score = max_score.view(num_keypoints, 1, 1) + tmp_full_resolution = (roi_map - max_score).exp_() + tmp_pool_resolution = (maps[i] - max_score).exp_() + # Produce scores over the region H x W, but normalize with POOL_H x POOL_W, + # so that the scores of objects of different absolute sizes will be more comparable + roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True) + + w = roi_map.shape[2] + pos = roi_map.view(num_keypoints, -1).argmax(1) + + x_int = pos % w + y_int = (pos - x_int) // w + + assert ( + roi_map_scores[keypoints_idx, y_int, x_int] + == roi_map_scores.view(num_keypoints, -1).max(1)[0] + ).all() + + x = (x_int.float() + 0.5) * width_corrections[i] + y = (y_int.float() + 0.5) * height_corrections[i] + + xy_preds[i, :, 0] = x + offset_x[i] + xy_preds[i, :, 1] = y + offset_y[i] + xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int] + xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int] + + return xy_preds diff --git a/data_processing/detectron2/detectron2/structures/masks.py b/data_processing/detectron2/detectron2/structures/masks.py new file mode 100644 index 0000000..899ad8b --- /dev/null +++ b/data_processing/detectron2/detectron2/structures/masks.py @@ -0,0 +1,534 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import itertools +import numpy as np +from typing import Any, Iterator, List, Union +import pycocotools.mask as mask_util +import torch +from torch import device + +from detectron2.layers.roi_align import ROIAlign +from detectron2.utils.memory import retry_if_cuda_oom + +from .boxes import Boxes + + +def polygon_area(x, y): + # Using the shoelace formula + # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates + return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) + + +def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: + """ + Args: + polygons (list[ndarray]): each array has shape (Nx2,) + height, width (int) + + Returns: + ndarray: a bool mask of shape (height, width) + """ + if len(polygons) == 0: + # COCOAPI does not support empty polygons + return np.zeros((height, width)).astype(bool) + rles = mask_util.frPyObjects(polygons, height, width) + rle = mask_util.merge(rles) + return mask_util.decode(rle).astype(bool) + + +def rasterize_polygons_within_box( + polygons: List[np.ndarray], box: np.ndarray, mask_size: int +) -> torch.Tensor: + """ + Rasterize the polygons into a mask image and + crop the mask content in the given box. + The cropped mask is resized to (mask_size, mask_size). + + This function is used when generating training targets for mask head in Mask R-CNN. + Given original ground-truth masks for an image, new ground-truth mask + training targets in the size of `mask_size x mask_size` + must be provided for each predicted box. This function will be called to + produce such targets. + + Args: + polygons (list[ndarray[float]]): a list of polygons, which represents an instance. + box: 4-element numpy array + mask_size (int): + + Returns: + Tensor: BoolTensor of shape (mask_size, mask_size) + """ + # 1. Shift the polygons w.r.t the boxes + w, h = box[2] - box[0], box[3] - box[1] + + polygons = copy.deepcopy(polygons) + for p in polygons: + p[0::2] = p[0::2] - box[0] + p[1::2] = p[1::2] - box[1] + + # 2. Rescale the polygons to the new box size + # max() to avoid division by small number + ratio_h = mask_size / max(h, 0.1) + ratio_w = mask_size / max(w, 0.1) + + if ratio_h == ratio_w: + for p in polygons: + p *= ratio_h + else: + for p in polygons: + p[0::2] *= ratio_w + p[1::2] *= ratio_h + + # 3. Rasterize the polygons with coco api + mask = polygons_to_bitmask(polygons, mask_size, mask_size) + mask = torch.from_numpy(mask) + return mask + + +class BitMasks: + """ + This class stores the segmentation masks for all objects in one image, in + the form of bitmaps. + + Attributes: + tensor: bool Tensor of N,H,W, representing N instances in the image. + """ + + def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): + """ + Args: + tensor: bool Tensor of N,H,W, representing N instances in the image. + """ + if isinstance(tensor, torch.Tensor): + tensor = tensor.to(torch.bool) + else: + tensor = torch.as_tensor(tensor, dtype=torch.bool, device=torch.device("cpu")) + assert tensor.dim() == 3, tensor.size() + self.image_size = tensor.shape[1:] + self.tensor = tensor + + @torch.jit.unused + def to(self, *args: Any, **kwargs: Any) -> "BitMasks": + return BitMasks(self.tensor.to(*args, **kwargs)) + + @property + def device(self) -> torch.device: + return self.tensor.device + + @torch.jit.unused + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": + """ + Returns: + BitMasks: Create a new :class:`BitMasks` by indexing. + + The following usage are allowed: + + 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. + 2. `new_masks = masks[2:10]`: return a slice of masks. + 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor + with `length = len(masks)`. Nonzero elements in the vector will be selected. + + Note that the returned object might share storage with this object, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return BitMasks(self.tensor[item].unsqueeze(0)) + m = self.tensor[item] + assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( + item, m.shape + ) + return BitMasks(m) + + @torch.jit.unused + def __iter__(self) -> torch.Tensor: + yield from self.tensor + + @torch.jit.unused + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + def __len__(self) -> int: + return self.tensor.shape[0] + + def nonempty(self) -> torch.Tensor: + """ + Find masks that are non-empty. + + Returns: + Tensor: a BoolTensor which represents + whether each mask is empty (False) or non-empty (True). + """ + return self.tensor.flatten(1).any(dim=1) + + @staticmethod + def from_polygon_masks( + polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int + ) -> "BitMasks": + """ + Args: + polygon_masks (list[list[ndarray]] or PolygonMasks) + height, width (int) + """ + if isinstance(polygon_masks, PolygonMasks): + polygon_masks = polygon_masks.polygons + masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] + if len(masks): + return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) + else: + return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) + + @staticmethod + def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": + """ + Args: + roi_masks: + height, width (int): + """ + return roi_masks.to_bitmasks(height, width) + + def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: + """ + Crop each bitmask by the given box, and resize results to (mask_size, mask_size). + This can be used to prepare training targets for Mask R-CNN. + It has less reconstruction error compared to rasterization with polygons. + However we observe no difference in accuracy, + but BitMasks requires more memory to store all the masks. + + Args: + boxes (Tensor): Nx4 tensor storing the boxes for each mask + mask_size (int): the size of the rasterized mask. + + Returns: + Tensor: + A bool tensor of shape (N, mask_size, mask_size), where + N is the number of predicted boxes for this image. + """ + assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) + device = self.tensor.device + + batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] + rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 + + bit_masks = self.tensor.to(dtype=torch.float32) + rois = rois.to(device=device) + output = ( + ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) + .forward(bit_masks[:, None, :, :], rois) + .squeeze(1) + ) + output = output >= 0.5 + return output + + def get_bounding_boxes(self) -> Boxes: + """ + Returns: + Boxes: tight bounding boxes around bitmasks. + If a mask is empty, it's bounding box will be all zero. + """ + boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) + x_any = torch.any(self.tensor, dim=1) + y_any = torch.any(self.tensor, dim=2) + for idx in range(self.tensor.shape[0]): + x = torch.where(x_any[idx, :])[0] + y = torch.where(y_any[idx, :])[0] + if len(x) > 0 and len(y) > 0: + boxes[idx, :] = torch.as_tensor( + [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 + ) + return Boxes(boxes) + + @staticmethod + def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": + """ + Concatenates a list of BitMasks into a single BitMasks + + Arguments: + bitmasks_list (list[BitMasks]) + + Returns: + BitMasks: the concatenated BitMasks + """ + assert isinstance(bitmasks_list, (list, tuple)) + assert len(bitmasks_list) > 0 + assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) + + cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) + return cat_bitmasks + + +class PolygonMasks: + """ + This class stores the segmentation masks for all objects in one image, in the form of polygons. + + Attributes: + polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. + """ + + def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): + """ + Arguments: + polygons (list[list[np.ndarray]]): The first + level of the list correspond to individual instances, + the second level to all the polygons that compose the + instance, and the third level to the polygon coordinates. + The third level array should have the format of + [x0, y0, x1, y1, ..., xn, yn] (n >= 3). + """ + if not isinstance(polygons, list): + raise ValueError( + "Cannot create PolygonMasks: Expect a list of list of polygons per image. " + "Got '{}' instead.".format(type(polygons)) + ) + + def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: + # Use float64 for higher precision, because why not? + # Always put polygons on CPU (self.to is a no-op) since they + # are supposed to be small tensors. + # May need to change this assumption if GPU placement becomes useful + if isinstance(t, torch.Tensor): + t = t.cpu().numpy() + return np.asarray(t).astype("float64") + + def process_polygons( + polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] + ) -> List[np.ndarray]: + if not isinstance(polygons_per_instance, list): + raise ValueError( + "Cannot create polygons: Expect a list of polygons per instance. " + "Got '{}' instead.".format(type(polygons_per_instance)) + ) + # transform each polygon to a numpy array + polygons_per_instance = [_make_array(p) for p in polygons_per_instance] + for polygon in polygons_per_instance: + if len(polygon) % 2 != 0 or len(polygon) < 6: + raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") + return polygons_per_instance + + self.polygons: List[List[np.ndarray]] = [ + process_polygons(polygons_per_instance) for polygons_per_instance in polygons + ] + + def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": + return self + + @property + def device(self) -> torch.device: + return torch.device("cpu") + + def get_bounding_boxes(self) -> Boxes: + """ + Returns: + Boxes: tight bounding boxes around polygon masks. + """ + boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) + for idx, polygons_per_instance in enumerate(self.polygons): + minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) + maxxy = torch.zeros(2, dtype=torch.float32) + for polygon in polygons_per_instance: + coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) + minxy = torch.min(minxy, torch.min(coords, dim=0).values) + maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) + boxes[idx, :2] = minxy + boxes[idx, 2:] = maxxy + return Boxes(boxes) + + def nonempty(self) -> torch.Tensor: + """ + Find masks that are non-empty. + + Returns: + Tensor: + a BoolTensor which represents whether each mask is empty (False) or not (True). + """ + keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] + return torch.from_numpy(np.asarray(keep, dtype=bool)) + + def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": + """ + Support indexing over the instances and return a `PolygonMasks` object. + `item` can be: + + 1. An integer. It will return an object with only one instance. + 2. A slice. It will return an object with the selected instances. + 3. A list[int]. It will return an object with the selected instances, + correpsonding to the indices in the list. + 4. A vector mask of type BoolTensor, whose length is num_instances. + It will return an object with the instances whose mask is nonzero. + """ + if isinstance(item, int): + selected_polygons = [self.polygons[item]] + elif isinstance(item, slice): + selected_polygons = self.polygons[item] + elif isinstance(item, list): + selected_polygons = [self.polygons[i] for i in item] + elif isinstance(item, torch.Tensor): + # Polygons is a list, so we have to move the indices back to CPU. + if item.dtype == torch.bool: + assert item.dim() == 1, item.shape + item = item.nonzero().squeeze(1).cpu().numpy().tolist() + elif item.dtype in [torch.int32, torch.int64]: + item = item.cpu().numpy().tolist() + else: + raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) + selected_polygons = [self.polygons[i] for i in item] + return PolygonMasks(selected_polygons) + + def __iter__(self) -> Iterator[List[np.ndarray]]: + """ + Yields: + list[ndarray]: the polygons for one instance. + Each Tensor is a float64 vector representing a polygon. + """ + return iter(self.polygons) + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.polygons)) + return s + + def __len__(self) -> int: + return len(self.polygons) + + def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: + """ + Crop each mask by the given box, and resize results to (mask_size, mask_size). + This can be used to prepare training targets for Mask R-CNN. + + Args: + boxes (Tensor): Nx4 tensor storing the boxes for each mask + mask_size (int): the size of the rasterized mask. + + Returns: + Tensor: A bool tensor of shape (N, mask_size, mask_size), where + N is the number of predicted boxes for this image. + """ + assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) + + device = boxes.device + # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise + # (several small tensors for representing a single instance mask) + boxes = boxes.to(torch.device("cpu")) + + results = [ + rasterize_polygons_within_box(poly, box.numpy(), mask_size) + for poly, box in zip(self.polygons, boxes) + ] + """ + poly: list[list[float]], the polygons for one instance + box: a tensor of shape (4,) + """ + if len(results) == 0: + return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) + return torch.stack(results, dim=0).to(device=device) + + def area(self): + """ + Computes area of the mask. + Only works with Polygons, using the shoelace formula: + https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates + + Returns: + Tensor: a vector, area for each instance + """ + + area = [] + for polygons_per_instance in self.polygons: + area_per_instance = 0 + for p in polygons_per_instance: + area_per_instance += polygon_area(p[0::2], p[1::2]) + area.append(area_per_instance) + + return torch.tensor(area) + + @staticmethod + def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": + """ + Concatenates a list of PolygonMasks into a single PolygonMasks + + Arguments: + polymasks_list (list[PolygonMasks]) + + Returns: + PolygonMasks: the concatenated PolygonMasks + """ + assert isinstance(polymasks_list, (list, tuple)) + assert len(polymasks_list) > 0 + assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) + + cat_polymasks = type(polymasks_list[0])( + list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) + ) + return cat_polymasks + + +class ROIMasks: + """ + Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, + full-image bitmask can be obtained by "pasting" the mask on the region defined + by the corresponding ROI box. + """ + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor: (N, M, M) mask tensor that defines the mask within each ROI. + """ + if tensor.dim() != 3: + raise ValueError("ROIMasks must take a masks of 3 dimension.") + self.tensor = tensor + + def to(self, device: torch.device) -> "ROIMasks": + return ROIMasks(self.tensor.to(device)) + + @property + def device(self) -> device: + return self.tensor.device + + def __len__(self): + return self.tensor.shape[0] + + def __getitem__(self, item) -> "ROIMasks": + """ + Returns: + ROIMasks: Create a new :class:`ROIMasks` by indexing. + + The following usage are allowed: + + 1. `new_masks = masks[2:10]`: return a slice of masks. + 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor + with `length = len(masks)`. Nonzero elements in the vector will be selected. + + Note that the returned object might share storage with this object, + subject to Pytorch's indexing semantics. + """ + t = self.tensor[item] + if t.dim() != 3: + raise ValueError( + f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" + ) + return ROIMasks(t) + + @torch.jit.unused + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + @torch.jit.unused + def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): + """ + Args: see documentation of :func:`paste_masks_in_image`. + """ + from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape + + if torch.jit.is_tracing(): + if isinstance(height, torch.Tensor): + paste_func = _paste_masks_tensor_shape + else: + paste_func = paste_masks_in_image + else: + paste_func = retry_if_cuda_oom(paste_masks_in_image) + bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) + return BitMasks(bitmasks) diff --git a/data_processing/detectron2/detectron2/structures/rotated_boxes.py b/data_processing/detectron2/detectron2/structures/rotated_boxes.py new file mode 100644 index 0000000..c842b99 --- /dev/null +++ b/data_processing/detectron2/detectron2/structures/rotated_boxes.py @@ -0,0 +1,505 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from typing import List, Tuple +import torch + +from detectron2.layers.rotated_boxes import pairwise_iou_rotated + +from .boxes import Boxes + + +class RotatedBoxes(Boxes): + """ + This structure stores a list of rotated boxes as a Nx5 torch.Tensor. + It supports some common methods about boxes + (`area`, `clip`, `nonempty`, etc), + and also behaves like a Tensor + (support indexing, `to(device)`, `.device`, and iteration over all boxes) + """ + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor (Tensor[float]): a Nx5 matrix. Each row is + (x_center, y_center, width, height, angle), + in which angle is represented in degrees. + While there's no strict range restriction for it, + the recommended principal range is between [-180, 180) degrees. + + Assume we have a horizontal box B = (x_center, y_center, width, height), + where width is along the x-axis and height is along the y-axis. + The rotated box B_rot (x_center, y_center, width, height, angle) + can be seen as: + + 1. When angle == 0: + B_rot == B + 2. When angle > 0: + B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW; + 3. When angle < 0: + B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW. + + Mathematically, since the right-handed coordinate system for image space + is (y, x), where y is top->down and x is left->right, the 4 vertices of the + rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from + the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4) + in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians, + :math:`(y_c, x_c)` is the center of the rectangle): + + .. math:: + + yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c, + + xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c, + + which is the standard rigid-body rotation transformation. + + Intuitively, the angle is + (1) the rotation angle from y-axis in image space + to the height vector (top->down in the box's local coordinate system) + of the box in CCW, and + (2) the rotation angle from x-axis in image space + to the width vector (left->right in the box's local coordinate system) + of the box in CCW. + + More intuitively, consider the following horizontal box ABCD represented + in (x1, y1, x2, y2): (3, 2, 7, 4), + covering the [3, 7] x [2, 4] region of the continuous coordinate system + which looks like this: + + .. code:: none + + O--------> x + | + | A---B + | | | + | D---C + | + v y + + Note that each capital letter represents one 0-dimensional geometric point + instead of a 'square pixel' here. + + In the example above, using (x, y) to represent a point we have: + + .. math:: + + O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4) + + We name vector AB = vector DC as the width vector in box's local coordinate system, and + vector AD = vector BC as the height vector in box's local coordinate system. Initially, + when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis + in the image space, respectively. + + For better illustration, we denote the center of the box as E, + + .. code:: none + + O--------> x + | + | A---B + | | E | + | D---C + | + v y + + where the center E = ((3+7)/2, (2+4)/2) = (5, 3). + + Also, + + .. math:: + + width = |AB| = |CD| = 7 - 3 = 4, + height = |AD| = |BC| = 4 - 2 = 2. + + Therefore, the corresponding representation for the same shape in rotated box in + (x_center, y_center, width, height, angle) format is: + + (5, 3, 4, 2, 0), + + Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees + CCW (counter-clockwise) by definition. It looks like this: + + .. code:: none + + O--------> x + | B-C + | | | + | |E| + | | | + | A-D + v y + + The center E is still located at the same point (5, 3), while the vertices + ABCD are rotated by 90 degrees CCW with regard to E: + A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5) + + Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to + vector AD or vector BC (the top->down height vector in box's local coordinate system), + or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right + width vector in box's local coordinate system). + + .. math:: + + width = |AB| = |CD| = 5 - 1 = 4, + height = |AD| = |BC| = 6 - 4 = 2. + + Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise) + by definition? It looks like this: + + .. code:: none + + O--------> x + | D-A + | | | + | |E| + | | | + | C-B + v y + + The center E is still located at the same point (5, 3), while the vertices + ABCD are rotated by 90 degrees CW with regard to E: + A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1) + + .. math:: + + width = |AB| = |CD| = 5 - 1 = 4, + height = |AD| = |BC| = 6 - 4 = 2. + + This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU + will be 1. However, these two will generate different RoI Pooling results and + should not be treated as an identical box. + + On the other hand, it's easy to see that (X, Y, W, H, A) is identical to + (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be + identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is + equivalent to rotating the same shape 90 degrees CW. + + We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180): + + .. code:: none + + O--------> x + | + | C---D + | | E | + | B---A + | + v y + + .. math:: + + A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2), + + width = |AB| = |CD| = 7 - 3 = 4, + height = |AD| = |BC| = 4 - 2 = 2. + + Finally, this is a very inaccurate (heavily quantized) illustration of + how (5, 3, 4, 2, 60) looks like in case anyone wonders: + + .. code:: none + + O--------> x + | B\ + | / C + | /E / + | A / + | `D + v y + + It's still a rectangle with center of (5, 3), width of 4 and height of 2, + but its angle (and thus orientation) is somewhere between + (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90). + """ + device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that does not depend on + # the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size() + + self.tensor = tensor + + def clone(self) -> "RotatedBoxes": + """ + Clone the RotatedBoxes. + + Returns: + RotatedBoxes + """ + return RotatedBoxes(self.tensor.clone()) + + def to(self, device: torch.device): + # Boxes are assumed float32 and does not support to(dtype) + return RotatedBoxes(self.tensor.to(device=device)) + + def area(self) -> torch.Tensor: + """ + Computes the area of all the boxes. + + Returns: + torch.Tensor: a vector with areas of each box. + """ + box = self.tensor + area = box[:, 2] * box[:, 3] + return area + + # Avoid in-place operations so that we can torchscript; NOTE: this creates a new tensor + def normalize_angles(self) -> None: + """ + Restrict angles to the range of [-180, 180) degrees + """ + angle_tensor = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0 + self.tensor = torch.cat((self.tensor[:, :4], angle_tensor[:, None]), dim=1) + + def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None: + """ + Clip (in place) the boxes by limiting x coordinates to the range [0, width] + and y coordinates to the range [0, height]. + + For RRPN: + Only clip boxes that are almost horizontal with a tolerance of + clip_angle_threshold to maintain backward compatibility. + + Rotated boxes beyond this threshold are not clipped for two reasons: + + 1. There are potentially multiple ways to clip a rotated box to make it + fit within the image. + 2. It's tricky to make the entire rectangular box fit within the image + and still be able to not leave out pixels of interest. + + Therefore we rely on ops like RoIAlignRotated to safely handle this. + + Args: + box_size (height, width): The clipping box's size. + clip_angle_threshold: + Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees), + we do the clipping as horizontal boxes. + """ + h, w = box_size + + # normalize angles to be within (-180, 180] degrees + self.normalize_angles() + + idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0] + + # convert to (x1, y1, x2, y2) + x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0 + y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0 + x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0 + y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0 + + # clip + x1.clamp_(min=0, max=w) + y1.clamp_(min=0, max=h) + x2.clamp_(min=0, max=w) + y2.clamp_(min=0, max=h) + + # convert back to (xc, yc, w, h) + self.tensor[idx, 0] = (x1 + x2) / 2.0 + self.tensor[idx, 1] = (y1 + y2) / 2.0 + # make sure widths and heights do not increase due to numerical errors + self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1) + self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1) + + def nonempty(self, threshold: float = 0.0) -> torch.Tensor: + """ + Find boxes that are non-empty. + A box is considered empty, if either of its side is no larger than threshold. + + Returns: + Tensor: a binary vector which represents + whether each box is empty (False) or non-empty (True). + """ + box = self.tensor + widths = box[:, 2] + heights = box[:, 3] + keep = (widths > threshold) & (heights > threshold) + return keep + + def __getitem__(self, item) -> "RotatedBoxes": + """ + Returns: + RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing. + + The following usage are allowed: + + 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box. + 2. `new_boxes = boxes[2:10]`: return a slice of boxes. + 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor + with `length = len(boxes)`. Nonzero elements in the vector will be selected. + + Note that the returned RotatedBoxes might share storage with this RotatedBoxes, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return RotatedBoxes(self.tensor[item].view(1, -1)) + b = self.tensor[item] + assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format( + item + ) + return RotatedBoxes(b) + + def __len__(self) -> int: + return self.tensor.shape[0] + + def __repr__(self) -> str: + return "RotatedBoxes(" + str(self.tensor) + ")" + + def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor: + """ + Args: + box_size (height, width): Size of the reference box covering + [0, width] x [0, height] + boundary_threshold (int): Boxes that extend beyond the reference box + boundary by more than boundary_threshold are considered "outside". + + For RRPN, it might not be necessary to call this function since it's common + for rotated box to extend to outside of the image boundaries + (the clip function only clips the near-horizontal boxes) + + Returns: + a binary vector, indicating whether each box is inside the reference box. + """ + height, width = box_size + + cnt_x = self.tensor[..., 0] + cnt_y = self.tensor[..., 1] + half_w = self.tensor[..., 2] / 2.0 + half_h = self.tensor[..., 3] / 2.0 + a = self.tensor[..., 4] + c = torch.abs(torch.cos(a * math.pi / 180.0)) + s = torch.abs(torch.sin(a * math.pi / 180.0)) + # This basically computes the horizontal bounding rectangle of the rotated box + max_rect_dx = c * half_w + s * half_h + max_rect_dy = c * half_h + s * half_w + + inds_inside = ( + (cnt_x - max_rect_dx >= -boundary_threshold) + & (cnt_y - max_rect_dy >= -boundary_threshold) + & (cnt_x + max_rect_dx < width + boundary_threshold) + & (cnt_y + max_rect_dy < height + boundary_threshold) + ) + + return inds_inside + + def get_centers(self) -> torch.Tensor: + """ + Returns: + The box centers in a Nx2 array of (x, y). + """ + return self.tensor[:, :2] + + def scale(self, scale_x: float, scale_y: float) -> None: + """ + Scale the rotated box with horizontal and vertical scaling factors + Note: when scale_factor_x != scale_factor_y, + the rotated box does not preserve the rectangular shape when the angle + is not a multiple of 90 degrees under resize transformation. + Instead, the shape is a parallelogram (that has skew) + Here we make an approximation by fitting a rotated rectangle to the parallelogram. + """ + self.tensor[:, 0] *= scale_x + self.tensor[:, 1] *= scale_y + theta = self.tensor[:, 4] * math.pi / 180.0 + c = torch.cos(theta) + s = torch.sin(theta) + + # In image space, y is top->down and x is left->right + # Consider the local coordintate system for the rotated box, + # where the box center is located at (0, 0), and the four vertices ABCD are + # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2) + # the midpoint of the left edge AD of the rotated box E is: + # E = (A+D)/2 = (-w / 2, 0) + # the midpoint of the top edge AB of the rotated box F is: + # F(0, -h / 2) + # To get the old coordinates in the global system, apply the rotation transformation + # (Note: the right-handed coordinate system for image space is yOx): + # (old_x, old_y) = (s * y + c * x, c * y - s * x) + # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2) + # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2) + # After applying the scaling factor (sfx, sfy): + # E(new) = (-sfx * c * w / 2, sfy * s * w / 2) + # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2) + # The new width after scaling tranformation becomes: + + # w(new) = |E(new) - O| * 2 + # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2 + # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w + # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2] + # + # For example, + # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x; + # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y + self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2) + + # h(new) = |F(new) - O| * 2 + # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2 + # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h + # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2] + # + # For example, + # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y; + # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x + self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2) + + # The angle is the rotation angle from y-axis in image space to the height + # vector (top->down in the box's local coordinate system) of the box in CCW. + # + # angle(new) = angle_yOx(O - F(new)) + # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) ) + # = atan2(sfx * s * h / 2, sfy * c * h / 2) + # = atan2(sfx * s, sfy * c) + # + # For example, + # when sfx == sfy, angle(new) == atan2(s, c) == angle(old) + self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi + + @classmethod + def cat(cls, boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes": + """ + Concatenates a list of RotatedBoxes into a single RotatedBoxes + + Arguments: + boxes_list (list[RotatedBoxes]) + + Returns: + RotatedBoxes: the concatenated RotatedBoxes + """ + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0)) + assert all([isinstance(box, RotatedBoxes) for box in boxes_list]) + + # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input + cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) + return cat_boxes + + @property + def device(self) -> torch.device: + return self.tensor.device + + @torch.jit.unused + def __iter__(self): + """ + Yield a box as a Tensor of shape (5,) at a time. + """ + yield from self.tensor + + +def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None: + """ + Given two lists of rotated boxes of size N and M, + compute the IoU (intersection over union) + between **all** N x M pairs of boxes. + The box order must be (x_center, y_center, width, height, angle). + + Args: + boxes1, boxes2 (RotatedBoxes): + two `RotatedBoxes`. Contains N & M rotated boxes, respectively. + + Returns: + Tensor: IoU, sized [N,M]. + """ + + return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor) diff --git a/data_processing/detectron2/detectron2/tracking/__init__.py b/data_processing/detectron2/detectron2/tracking/__init__.py new file mode 100644 index 0000000..21078ae --- /dev/null +++ b/data_processing/detectron2/detectron2/tracking/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .base_tracker import ( # noqa + BaseTracker, + build_tracker_head, + TRACKER_HEADS_REGISTRY, +) +from .bbox_iou_tracker import BBoxIOUTracker # noqa +from .hungarian_tracker import BaseHungarianTracker # noqa +from .iou_weighted_hungarian_bbox_iou_tracker import ( # noqa + IOUWeightedHungarianBBoxIOUTracker, +) +from .utils import create_prediction_pairs # noqa +from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker # noqa + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/data_processing/detectron2/detectron2/tracking/base_tracker.py b/data_processing/detectron2/detectron2/tracking/base_tracker.py new file mode 100644 index 0000000..a8872f7 --- /dev/null +++ b/data_processing/detectron2/detectron2/tracking/base_tracker.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# Copyright 2004-present Facebook. All Rights Reserved. +from detectron2.config import configurable +from detectron2.utils.registry import Registry + +from ..config.config import CfgNode as CfgNode_ +from ..structures import Instances + +TRACKER_HEADS_REGISTRY = Registry("TRACKER_HEADS") +TRACKER_HEADS_REGISTRY.__doc__ = """ +Registry for tracking classes. +""" + + +class BaseTracker(object): + """ + A parent class for all trackers + """ + + @configurable + def __init__(self, **kwargs): + self._prev_instances = None # (D2)instances for previous frame + self._matched_idx = set() # indices in prev_instances found matching + self._matched_ID = set() # idendities in prev_instances found matching + self._untracked_prev_idx = set() # indices in prev_instances not found matching + self._id_count = 0 # used to assign new id + + @classmethod + def from_config(cls, cfg: CfgNode_): + raise NotImplementedError("Calling BaseTracker::from_config") + + def update(self, predictions: Instances) -> Instances: + """ + Args: + predictions: D2 Instances for predictions of the current frame + Return: + D2 Instances for predictions of the current frame with ID assigned + + _prev_instances and instances will have the following fields: + .pred_boxes (shape=[N, 4]) + .scores (shape=[N,]) + .pred_classes (shape=[N,]) + .pred_keypoints (shape=[N, M, 3], Optional) + .pred_masks (shape=List[2D_MASK], Optional) 2D_MASK: shape=[H, W] + .ID (shape=[N,]) + + N: # of detected bboxes + H and W: height and width of 2D mask + """ + raise NotImplementedError("Calling BaseTracker::update") + + +def build_tracker_head(cfg: CfgNode_) -> BaseTracker: + """ + Build a tracker head from `cfg.TRACKER_HEADS.TRACKER_NAME`. + + Args: + cfg: D2 CfgNode, config file with tracker information + Return: + tracker object + """ + name = cfg.TRACKER_HEADS.TRACKER_NAME + tracker_class = TRACKER_HEADS_REGISTRY.get(name) + return tracker_class(cfg) diff --git a/data_processing/detectron2/detectron2/tracking/bbox_iou_tracker.py b/data_processing/detectron2/detectron2/tracking/bbox_iou_tracker.py new file mode 100644 index 0000000..598081c --- /dev/null +++ b/data_processing/detectron2/detectron2/tracking/bbox_iou_tracker.py @@ -0,0 +1,276 @@ +#!/usr/bin/env python3 +# Copyright 2004-present Facebook. All Rights Reserved. +import copy +import numpy as np +from typing import List +import torch + +from detectron2.config import configurable +from detectron2.structures import Boxes, Instances +from detectron2.structures.boxes import pairwise_iou + +from ..config.config import CfgNode as CfgNode_ +from .base_tracker import TRACKER_HEADS_REGISTRY, BaseTracker + + +@TRACKER_HEADS_REGISTRY.register() +class BBoxIOUTracker(BaseTracker): + """ + A bounding box tracker to assign ID based on IoU between current and previous instances + """ + + @configurable + def __init__( + self, + *, + video_height: int, + video_width: int, + max_num_instances: int = 200, + max_lost_frame_count: int = 0, + min_box_rel_dim: float = 0.02, + min_instance_period: int = 1, + track_iou_threshold: float = 0.5, + **kwargs, + ): + """ + Args: + video_height: height the video frame + video_width: width of the video frame + max_num_instances: maximum number of id allowed to be tracked + max_lost_frame_count: maximum number of frame an id can lost tracking + exceed this number, an id is considered as lost + forever + min_box_rel_dim: a percentage, smaller than this dimension, a bbox is + removed from tracking + min_instance_period: an instance will be shown after this number of period + since its first showing up in the video + track_iou_threshold: iou threshold, below this number a bbox pair is removed + from tracking + """ + super().__init__(**kwargs) + self._video_height = video_height + self._video_width = video_width + self._max_num_instances = max_num_instances + self._max_lost_frame_count = max_lost_frame_count + self._min_box_rel_dim = min_box_rel_dim + self._min_instance_period = min_instance_period + self._track_iou_threshold = track_iou_threshold + + @classmethod + def from_config(cls, cfg: CfgNode_): + """ + Old style initialization using CfgNode + + Args: + cfg: D2 CfgNode, config file + Return: + dictionary storing arguments for __init__ method + """ + assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS + assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS + video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") + video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") + max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) + max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) + min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) + min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) + track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) + return { + "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", + "video_height": video_height, + "video_width": video_width, + "max_num_instances": max_num_instances, + "max_lost_frame_count": max_lost_frame_count, + "min_box_rel_dim": min_box_rel_dim, + "min_instance_period": min_instance_period, + "track_iou_threshold": track_iou_threshold, + } + + def update(self, instances: Instances) -> Instances: + """ + See BaseTracker description + """ + instances = self._initialize_extra_fields(instances) + if self._prev_instances is not None: + # calculate IoU of all bbox pairs + iou_all = pairwise_iou( + boxes1=instances.pred_boxes, + boxes2=self._prev_instances.pred_boxes, + ) + # sort IoU in descending order + bbox_pairs = self._create_prediction_pairs(instances, iou_all) + # assign previous ID to current bbox if IoU > track_iou_threshold + self._reset_fields() + for bbox_pair in bbox_pairs: + idx = bbox_pair["idx"] + prev_id = bbox_pair["prev_id"] + if ( + idx in self._matched_idx + or prev_id in self._matched_ID + or bbox_pair["IoU"] < self._track_iou_threshold + ): + continue + instances.ID[idx] = prev_id + instances.ID_period[idx] = bbox_pair["prev_period"] + 1 + instances.lost_frame_count[idx] = 0 + self._matched_idx.add(idx) + self._matched_ID.add(prev_id) + self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) + instances = self._assign_new_id(instances) + instances = self._merge_untracked_instances(instances) + self._prev_instances = copy.deepcopy(instances) + return instances + + def _create_prediction_pairs(self, instances: Instances, iou_all: np.ndarray) -> List: + """ + For all instances in previous and current frames, create pairs. For each + pair, store index of the instance in current frame predcitions, index in + previous predictions, ID in previous predictions, IoU of the bboxes in this + pair, period in previous predictions. + + Args: + instances: D2 Instances, for predictions of the current frame + iou_all: IoU for all bboxes pairs + Return: + A list of IoU for all pairs + """ + bbox_pairs = [] + for i in range(len(instances)): + for j in range(len(self._prev_instances)): + bbox_pairs.append( + { + "idx": i, + "prev_idx": j, + "prev_id": self._prev_instances.ID[j], + "IoU": iou_all[i, j], + "prev_period": self._prev_instances.ID_period[j], + } + ) + return bbox_pairs + + def _initialize_extra_fields(self, instances: Instances) -> Instances: + """ + If input instances don't have ID, ID_period, lost_frame_count fields, + this method is used to initialize these fields. + + Args: + instances: D2 Instances, for predictions of the current frame + Return: + D2 Instances with extra fields added + """ + if not instances.has("ID"): + instances.set("ID", [None] * len(instances)) + if not instances.has("ID_period"): + instances.set("ID_period", [None] * len(instances)) + if not instances.has("lost_frame_count"): + instances.set("lost_frame_count", [None] * len(instances)) + if self._prev_instances is None: + instances.ID = list(range(len(instances))) + self._id_count += len(instances) + instances.ID_period = [1] * len(instances) + instances.lost_frame_count = [0] * len(instances) + return instances + + def _reset_fields(self): + """ + Before each uodate call, reset fields first + """ + self._matched_idx = set() + self._matched_ID = set() + self._untracked_prev_idx = set(range(len(self._prev_instances))) + + def _assign_new_id(self, instances: Instances) -> Instances: + """ + For each untracked instance, assign a new id + + Args: + instances: D2 Instances, for predictions of the current frame + Return: + D2 Instances with new ID assigned + """ + untracked_idx = set(range(len(instances))).difference(self._matched_idx) + for idx in untracked_idx: + instances.ID[idx] = self._id_count + self._id_count += 1 + instances.ID_period[idx] = 1 + instances.lost_frame_count[idx] = 0 + return instances + + def _merge_untracked_instances(self, instances: Instances) -> Instances: + """ + For untracked previous instances, under certain condition, still keep them + in tracking and merge with the current instances. + + Args: + instances: D2 Instances, for predictions of the current frame + Return: + D2 Instances merging current instances and instances from previous + frame decided to keep tracking + """ + untracked_instances = Instances( + image_size=instances.image_size, + pred_boxes=[], + pred_classes=[], + scores=[], + ID=[], + ID_period=[], + lost_frame_count=[], + ) + prev_bboxes = list(self._prev_instances.pred_boxes) + prev_classes = list(self._prev_instances.pred_classes) + prev_scores = list(self._prev_instances.scores) + prev_ID_period = self._prev_instances.ID_period + if instances.has("pred_masks"): + untracked_instances.set("pred_masks", []) + prev_masks = list(self._prev_instances.pred_masks) + if instances.has("pred_keypoints"): + untracked_instances.set("pred_keypoints", []) + prev_keypoints = list(self._prev_instances.pred_keypoints) + if instances.has("pred_keypoint_heatmaps"): + untracked_instances.set("pred_keypoint_heatmaps", []) + prev_keypoint_heatmaps = list(self._prev_instances.pred_keypoint_heatmaps) + for idx in self._untracked_prev_idx: + x_left, y_top, x_right, y_bot = prev_bboxes[idx] + if ( + (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) + or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) + or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count + or prev_ID_period[idx] <= self._min_instance_period + ): + continue + untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) + untracked_instances.pred_classes.append(int(prev_classes[idx])) + untracked_instances.scores.append(float(prev_scores[idx])) + untracked_instances.ID.append(self._prev_instances.ID[idx]) + untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) + untracked_instances.lost_frame_count.append( + self._prev_instances.lost_frame_count[idx] + 1 + ) + if instances.has("pred_masks"): + untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) + if instances.has("pred_keypoints"): + untracked_instances.pred_keypoints.append( + prev_keypoints[idx].numpy().astype(np.uint8) + ) + if instances.has("pred_keypoint_heatmaps"): + untracked_instances.pred_keypoint_heatmaps.append( + prev_keypoint_heatmaps[idx].numpy().astype(np.float32) + ) + untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) + untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) + untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) + if instances.has("pred_masks"): + untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) + if instances.has("pred_keypoints"): + untracked_instances.pred_keypoints = torch.IntTensor(untracked_instances.pred_keypoints) + if instances.has("pred_keypoint_heatmaps"): + untracked_instances.pred_keypoint_heatmaps = torch.FloatTensor( + untracked_instances.pred_keypoint_heatmaps + ) + + return Instances.cat( + [ + instances, + untracked_instances, + ] + ) diff --git a/data_processing/detectron2/detectron2/tracking/hungarian_tracker.py b/data_processing/detectron2/detectron2/tracking/hungarian_tracker.py new file mode 100644 index 0000000..5b3ce88 --- /dev/null +++ b/data_processing/detectron2/detectron2/tracking/hungarian_tracker.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python3 +# Copyright 2004-present Facebook. All Rights Reserved. +import copy +import numpy as np +from typing import Dict +import torch +from scipy.optimize import linear_sum_assignment + +from detectron2.config import configurable +from detectron2.structures import Boxes, Instances + +from ..config.config import CfgNode as CfgNode_ +from .base_tracker import BaseTracker + + +class BaseHungarianTracker(BaseTracker): + """ + A base class for all Hungarian trackers + """ + + @configurable + def __init__( + self, + video_height: int, + video_width: int, + max_num_instances: int = 200, + max_lost_frame_count: int = 0, + min_box_rel_dim: float = 0.02, + min_instance_period: int = 1, + **kwargs + ): + """ + Args: + video_height: height the video frame + video_width: width of the video frame + max_num_instances: maximum number of id allowed to be tracked + max_lost_frame_count: maximum number of frame an id can lost tracking + exceed this number, an id is considered as lost + forever + min_box_rel_dim: a percentage, smaller than this dimension, a bbox is + removed from tracking + min_instance_period: an instance will be shown after this number of period + since its first showing up in the video + """ + super().__init__(**kwargs) + self._video_height = video_height + self._video_width = video_width + self._max_num_instances = max_num_instances + self._max_lost_frame_count = max_lost_frame_count + self._min_box_rel_dim = min_box_rel_dim + self._min_instance_period = min_instance_period + + @classmethod + def from_config(cls, cfg: CfgNode_) -> Dict: + raise NotImplementedError("Calling HungarianTracker::from_config") + + def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray: + raise NotImplementedError("Calling HungarianTracker::build_matrix") + + def update(self, instances: Instances) -> Instances: + if instances.has("pred_keypoints"): + raise NotImplementedError("Need to add support for keypoints") + instances = self._initialize_extra_fields(instances) + if self._prev_instances is not None: + self._untracked_prev_idx = set(range(len(self._prev_instances))) + cost_matrix = self.build_cost_matrix(instances, self._prev_instances) + matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix) + instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx) + instances = self._process_unmatched_idx(instances, matched_idx) + instances = self._process_unmatched_prev_idx(instances, matched_prev_idx) + self._prev_instances = copy.deepcopy(instances) + return instances + + def _initialize_extra_fields(self, instances: Instances) -> Instances: + """ + If input instances don't have ID, ID_period, lost_frame_count fields, + this method is used to initialize these fields. + + Args: + instances: D2 Instances, for predictions of the current frame + Return: + D2 Instances with extra fields added + """ + if not instances.has("ID"): + instances.set("ID", [None] * len(instances)) + if not instances.has("ID_period"): + instances.set("ID_period", [None] * len(instances)) + if not instances.has("lost_frame_count"): + instances.set("lost_frame_count", [None] * len(instances)) + if self._prev_instances is None: + instances.ID = list(range(len(instances))) + self._id_count += len(instances) + instances.ID_period = [1] * len(instances) + instances.lost_frame_count = [0] * len(instances) + return instances + + def _process_matched_idx( + self, instances: Instances, matched_idx: np.ndarray, matched_prev_idx: np.ndarray + ) -> Instances: + assert matched_idx.size == matched_prev_idx.size + for i in range(matched_idx.size): + instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]] + instances.ID_period[matched_idx[i]] = ( + self._prev_instances.ID_period[matched_prev_idx[i]] + 1 + ) + instances.lost_frame_count[matched_idx[i]] = 0 + return instances + + def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances: + untracked_idx = set(range(len(instances))).difference(set(matched_idx)) + for idx in untracked_idx: + instances.ID[idx] = self._id_count + self._id_count += 1 + instances.ID_period[idx] = 1 + instances.lost_frame_count[idx] = 0 + return instances + + def _process_unmatched_prev_idx( + self, instances: Instances, matched_prev_idx: np.ndarray + ) -> Instances: + untracked_instances = Instances( + image_size=instances.image_size, + pred_boxes=[], + pred_masks=[], + pred_classes=[], + scores=[], + ID=[], + ID_period=[], + lost_frame_count=[], + ) + prev_bboxes = list(self._prev_instances.pred_boxes) + prev_classes = list(self._prev_instances.pred_classes) + prev_scores = list(self._prev_instances.scores) + prev_ID_period = self._prev_instances.ID_period + if instances.has("pred_masks"): + prev_masks = list(self._prev_instances.pred_masks) + untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx)) + for idx in untracked_prev_idx: + x_left, y_top, x_right, y_bot = prev_bboxes[idx] + if ( + (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) + or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) + or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count + or prev_ID_period[idx] <= self._min_instance_period + ): + continue + untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) + untracked_instances.pred_classes.append(int(prev_classes[idx])) + untracked_instances.scores.append(float(prev_scores[idx])) + untracked_instances.ID.append(self._prev_instances.ID[idx]) + untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) + untracked_instances.lost_frame_count.append( + self._prev_instances.lost_frame_count[idx] + 1 + ) + if instances.has("pred_masks"): + untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) + + untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) + untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) + untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) + if instances.has("pred_masks"): + untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) + else: + untracked_instances.remove("pred_masks") + + return Instances.cat( + [ + instances, + untracked_instances, + ] + ) diff --git a/data_processing/detectron2/detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py b/data_processing/detectron2/detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py new file mode 100644 index 0000000..b3b4d1c --- /dev/null +++ b/data_processing/detectron2/detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# Copyright 2004-present Facebook. All Rights Reserved. + +import numpy as np +from typing import List + +from detectron2.config import CfgNode as CfgNode_ +from detectron2.config import configurable + +from .base_tracker import TRACKER_HEADS_REGISTRY +from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker + + +@TRACKER_HEADS_REGISTRY.register() +class IOUWeightedHungarianBBoxIOUTracker(VanillaHungarianBBoxIOUTracker): + """ + A tracker using IoU as weight in Hungarian algorithm, also known + as Munkres or Kuhn-Munkres algorithm + """ + + @configurable + def __init__( + self, + *, + video_height: int, + video_width: int, + max_num_instances: int = 200, + max_lost_frame_count: int = 0, + min_box_rel_dim: float = 0.02, + min_instance_period: int = 1, + track_iou_threshold: float = 0.5, + **kwargs, + ): + """ + Args: + video_height: height the video frame + video_width: width of the video frame + max_num_instances: maximum number of id allowed to be tracked + max_lost_frame_count: maximum number of frame an id can lost tracking + exceed this number, an id is considered as lost + forever + min_box_rel_dim: a percentage, smaller than this dimension, a bbox is + removed from tracking + min_instance_period: an instance will be shown after this number of period + since its first showing up in the video + track_iou_threshold: iou threshold, below this number a bbox pair is removed + from tracking + """ + super().__init__( + video_height=video_height, + video_width=video_width, + max_num_instances=max_num_instances, + max_lost_frame_count=max_lost_frame_count, + min_box_rel_dim=min_box_rel_dim, + min_instance_period=min_instance_period, + track_iou_threshold=track_iou_threshold, + ) + + @classmethod + def from_config(cls, cfg: CfgNode_): + """ + Old style initialization using CfgNode + + Args: + cfg: D2 CfgNode, config file + Return: + dictionary storing arguments for __init__ method + """ + assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS + assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS + video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") + video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") + max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) + max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) + min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) + min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) + track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) + return { + "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa + "video_height": video_height, + "video_width": video_width, + "max_num_instances": max_num_instances, + "max_lost_frame_count": max_lost_frame_count, + "min_box_rel_dim": min_box_rel_dim, + "min_instance_period": min_instance_period, + "track_iou_threshold": track_iou_threshold, + } + + def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray: + """ + Based on IoU for each pair of bbox, assign the associated value in cost matrix + + Args: + cost_matrix: np.ndarray, initialized 2D array with target dimensions + bbox_pairs: list of bbox pair, in each pair, iou value is stored + Return: + np.ndarray, cost_matrix with assigned values + """ + for pair in bbox_pairs: + # assign (-1 * IoU) for above threshold pairs, algorithms will minimize cost + cost_matrix[pair["idx"]][pair["prev_idx"]] = -1 * pair["IoU"] + return cost_matrix diff --git a/data_processing/detectron2/detectron2/tracking/utils.py b/data_processing/detectron2/detectron2/tracking/utils.py new file mode 100644 index 0000000..92634c5 --- /dev/null +++ b/data_processing/detectron2/detectron2/tracking/utils.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +import numpy as np +from typing import List + +from detectron2.structures import Instances + + +def create_prediction_pairs( + instances: Instances, + prev_instances: Instances, + iou_all: np.ndarray, + threshold: float = 0.5, +) -> List: + """ + Args: + instances: predictions from current frame + prev_instances: predictions from previous frame + iou_all: 2D numpy array containing iou for each bbox pair + threshold: below the threshold, doesn't consider the pair of bbox is valid + Return: + List of bbox pairs + """ + bbox_pairs = [] + for i in range(len(instances)): + for j in range(len(prev_instances)): + if iou_all[i, j] < threshold: + continue + bbox_pairs.append( + { + "idx": i, + "prev_idx": j, + "prev_id": prev_instances.ID[j], + "IoU": iou_all[i, j], + "prev_period": prev_instances.ID_period[j], + } + ) + return bbox_pairs + + +LARGE_COST_VALUE = 100000 diff --git a/data_processing/detectron2/detectron2/tracking/vanilla_hungarian_bbox_iou_tracker.py b/data_processing/detectron2/detectron2/tracking/vanilla_hungarian_bbox_iou_tracker.py new file mode 100644 index 0000000..5629f73 --- /dev/null +++ b/data_processing/detectron2/detectron2/tracking/vanilla_hungarian_bbox_iou_tracker.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python3 +# Copyright 2004-present Facebook. All Rights Reserved. + +import numpy as np +from typing import List + +from detectron2.config import CfgNode as CfgNode_ +from detectron2.config import configurable +from detectron2.structures import Instances +from detectron2.structures.boxes import pairwise_iou +from detectron2.tracking.utils import LARGE_COST_VALUE, create_prediction_pairs + +from .base_tracker import TRACKER_HEADS_REGISTRY +from .hungarian_tracker import BaseHungarianTracker + + +@TRACKER_HEADS_REGISTRY.register() +class VanillaHungarianBBoxIOUTracker(BaseHungarianTracker): + """ + Hungarian algo based tracker using bbox iou as metric + """ + + @configurable + def __init__( + self, + *, + video_height: int, + video_width: int, + max_num_instances: int = 200, + max_lost_frame_count: int = 0, + min_box_rel_dim: float = 0.02, + min_instance_period: int = 1, + track_iou_threshold: float = 0.5, + **kwargs, + ): + """ + Args: + video_height: height the video frame + video_width: width of the video frame + max_num_instances: maximum number of id allowed to be tracked + max_lost_frame_count: maximum number of frame an id can lost tracking + exceed this number, an id is considered as lost + forever + min_box_rel_dim: a percentage, smaller than this dimension, a bbox is + removed from tracking + min_instance_period: an instance will be shown after this number of period + since its first showing up in the video + track_iou_threshold: iou threshold, below this number a bbox pair is removed + from tracking + """ + super().__init__( + video_height=video_height, + video_width=video_width, + max_num_instances=max_num_instances, + max_lost_frame_count=max_lost_frame_count, + min_box_rel_dim=min_box_rel_dim, + min_instance_period=min_instance_period, + ) + self._track_iou_threshold = track_iou_threshold + + @classmethod + def from_config(cls, cfg: CfgNode_): + """ + Old style initialization using CfgNode + + Args: + cfg: D2 CfgNode, config file + Return: + dictionary storing arguments for __init__ method + """ + assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS + assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS + video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") + video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") + max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) + max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) + min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) + min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) + track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) + return { + "_target_": "detectron2.tracking.vanilla_hungarian_bbox_iou_tracker.VanillaHungarianBBoxIOUTracker", # noqa + "video_height": video_height, + "video_width": video_width, + "max_num_instances": max_num_instances, + "max_lost_frame_count": max_lost_frame_count, + "min_box_rel_dim": min_box_rel_dim, + "min_instance_period": min_instance_period, + "track_iou_threshold": track_iou_threshold, + } + + def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray: + """ + Build the cost matrix for assignment problem + (https://en.wikipedia.org/wiki/Assignment_problem) + + Args: + instances: D2 Instances, for current frame predictions + prev_instances: D2 Instances, for previous frame predictions + + Return: + the cost matrix in numpy array + """ + assert instances is not None and prev_instances is not None + # calculate IoU of all bbox pairs + iou_all = pairwise_iou( + boxes1=instances.pred_boxes, + boxes2=self._prev_instances.pred_boxes, + ) + bbox_pairs = create_prediction_pairs( + instances, self._prev_instances, iou_all, threshold=self._track_iou_threshold + ) + # assign large cost value to make sure pair below IoU threshold won't be matched + cost_matrix = np.full((len(instances), len(prev_instances)), LARGE_COST_VALUE) + return self.assign_cost_matrix_values(cost_matrix, bbox_pairs) + + def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray: + """ + Based on IoU for each pair of bbox, assign the associated value in cost matrix + + Args: + cost_matrix: np.ndarray, initialized 2D array with target dimensions + bbox_pairs: list of bbox pair, in each pair, iou value is stored + Return: + np.ndarray, cost_matrix with assigned values + """ + for pair in bbox_pairs: + # assign -1 for IoU above threshold pairs, algorithms will minimize cost + cost_matrix[pair["idx"]][pair["prev_idx"]] = -1 + return cost_matrix diff --git a/data_processing/detectron2/detectron2/utils/README.md b/data_processing/detectron2/detectron2/utils/README.md new file mode 100644 index 0000000..9765b24 --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/README.md @@ -0,0 +1,5 @@ +# Utility functions + +This folder contain utility functions that are not used in the +core library, but are useful for building models or training +code using the config system. diff --git a/data_processing/detectron2/detectron2/utils/__init__.py b/data_processing/detectron2/detectron2/utils/__init__.py new file mode 100644 index 0000000..9020c2d --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/data_processing/detectron2/detectron2/utils/analysis.py b/data_processing/detectron2/detectron2/utils/analysis.py new file mode 100644 index 0000000..178da79 --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/analysis.py @@ -0,0 +1,188 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# -*- coding: utf-8 -*- + +import typing +from typing import Any, List +import fvcore +from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table +from torch import nn + +from detectron2.export import TracingAdapter + +__all__ = [ + "activation_count_operators", + "flop_count_operators", + "parameter_count_table", + "parameter_count", + "FlopCountAnalysis", +] + +FLOPS_MODE = "flops" +ACTIVATIONS_MODE = "activations" + + +# Some extra ops to ignore from counting, including elementwise and reduction ops +_IGNORED_OPS = { + "aten::add", + "aten::add_", + "aten::argmax", + "aten::argsort", + "aten::batch_norm", + "aten::constant_pad_nd", + "aten::div", + "aten::div_", + "aten::exp", + "aten::log2", + "aten::max_pool2d", + "aten::meshgrid", + "aten::mul", + "aten::mul_", + "aten::neg", + "aten::nonzero_numpy", + "aten::reciprocal", + "aten::repeat_interleave", + "aten::rsub", + "aten::sigmoid", + "aten::sigmoid_", + "aten::softmax", + "aten::sort", + "aten::sqrt", + "aten::sub", + "torchvision::nms", # TODO estimate flop for nms +} + + +class FlopCountAnalysis(fvcore.nn.FlopCountAnalysis): + """ + Same as :class:`fvcore.nn.FlopCountAnalysis`, but supports detectron2 models. + """ + + def __init__(self, model, inputs): + """ + Args: + model (nn.Module): + inputs (Any): inputs of the given model. Does not have to be tuple of tensors. + """ + wrapper = TracingAdapter(model, inputs, allow_non_tensor=True) + super().__init__(wrapper, wrapper.flattened_inputs) + self.set_op_handle(**{k: None for k in _IGNORED_OPS}) + + +def flop_count_operators(model: nn.Module, inputs: list) -> typing.DefaultDict[str, float]: + """ + Implement operator-level flops counting using jit. + This is a wrapper of :func:`fvcore.nn.flop_count` and adds supports for standard + detection models in detectron2. + Please use :class:`FlopCountAnalysis` for more advanced functionalities. + + Note: + The function runs the input through the model to compute flops. + The flops of a detection model is often input-dependent, for example, + the flops of box & mask head depends on the number of proposals & + the number of detected objects. + Therefore, the flops counting using a single input may not accurately + reflect the computation cost of a model. It's recommended to average + across a number of inputs. + + Args: + model: a detectron2 model that takes `list[dict]` as input. + inputs (list[dict]): inputs to model, in detectron2's standard format. + Only "image" key will be used. + supported_ops (dict[str, Handle]): see documentation of :func:`fvcore.nn.flop_count` + + Returns: + Counter: Gflop count per operator + """ + old_train = model.training + model.eval() + ret = FlopCountAnalysis(model, inputs).by_operator() + model.train(old_train) + return {k: v / 1e9 for k, v in ret.items()} + + +def activation_count_operators( + model: nn.Module, inputs: list, **kwargs +) -> typing.DefaultDict[str, float]: + """ + Implement operator-level activations counting using jit. + This is a wrapper of fvcore.nn.activation_count, that supports standard detection models + in detectron2. + + Note: + The function runs the input through the model to compute activations. + The activations of a detection model is often input-dependent, for example, + the activations of box & mask head depends on the number of proposals & + the number of detected objects. + + Args: + model: a detectron2 model that takes `list[dict]` as input. + inputs (list[dict]): inputs to model, in detectron2's standard format. + Only "image" key will be used. + + Returns: + Counter: activation count per operator + """ + return _wrapper_count_operators(model=model, inputs=inputs, mode=ACTIVATIONS_MODE, **kwargs) + + +def _wrapper_count_operators( + model: nn.Module, inputs: list, mode: str, **kwargs +) -> typing.DefaultDict[str, float]: + # ignore some ops + supported_ops = {k: lambda *args, **kwargs: {} for k in _IGNORED_OPS} + supported_ops.update(kwargs.pop("supported_ops", {})) + kwargs["supported_ops"] = supported_ops + + assert len(inputs) == 1, "Please use batch size=1" + tensor_input = inputs[0]["image"] + inputs = [{"image": tensor_input}] # remove other keys, in case there are any + + old_train = model.training + if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)): + model = model.module + wrapper = TracingAdapter(model, inputs) + wrapper.eval() + if mode == FLOPS_MODE: + ret = flop_count(wrapper, (tensor_input,), **kwargs) + elif mode == ACTIVATIONS_MODE: + ret = activation_count(wrapper, (tensor_input,), **kwargs) + else: + raise NotImplementedError("Count for mode {} is not supported yet.".format(mode)) + # compatible with change in fvcore + if isinstance(ret, tuple): + ret = ret[0] + model.train(old_train) + return ret + + +def find_unused_parameters(model: nn.Module, inputs: Any) -> List[str]: + """ + Given a model, find parameters that do not contribute + to the loss. + + Args: + model: a model in training mode that returns losses + inputs: argument or a tuple of arguments. Inputs of the model + + Returns: + list[str]: the name of unused parameters + """ + assert model.training + for _, prm in model.named_parameters(): + prm.grad = None + + if isinstance(inputs, tuple): + losses = model(*inputs) + else: + losses = model(inputs) + + if isinstance(losses, dict): + losses = sum(losses.values()) + losses.backward() + + unused: List[str] = [] + for name, prm in model.named_parameters(): + if prm.grad is None: + unused.append(name) + prm.grad = None + return unused diff --git a/data_processing/detectron2/detectron2/utils/collect_env.py b/data_processing/detectron2/detectron2/utils/collect_env.py new file mode 100644 index 0000000..2846d7a --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/collect_env.py @@ -0,0 +1,246 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import importlib +import numpy as np +import os +import re +import subprocess +import sys +from collections import defaultdict +import PIL +import torch +import torchvision +from tabulate import tabulate + +__all__ = ["collect_env_info"] + + +def collect_torch_env(): + try: + import torch.__config__ + + return torch.__config__.show() + except ImportError: + # compatible with older versions of pytorch + from torch.utils.collect_env import get_pretty_env_info + + return get_pretty_env_info() + + +def get_env_module(): + var_name = "DETECTRON2_ENV_MODULE" + return var_name, os.environ.get(var_name, "") + + +def detect_compute_compatibility(CUDA_HOME, so_file): + try: + cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump") + if os.path.isfile(cuobjdump): + output = subprocess.check_output( + "'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True + ) + output = output.decode("utf-8").strip().split("\n") + arch = [] + for line in output: + line = re.findall(r"\.sm_([0-9]*)\.", line)[0] + arch.append(".".join(line)) + arch = sorted(set(arch)) + return ", ".join(arch) + else: + return so_file + "; cannot find cuobjdump" + except Exception: + # unhandled failure + return so_file + + +def collect_env_info(): + has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM + torch_version = torch.__version__ + + # NOTE that CUDA_HOME/ROCM_HOME could be None even when CUDA runtime libs are functional + from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME + + has_rocm = False + if (getattr(torch.version, "hip", None) is not None) and (ROCM_HOME is not None): + has_rocm = True + has_cuda = has_gpu and (not has_rocm) + + data = [] + data.append(("sys.platform", sys.platform)) # check-template.yml depends on it + data.append(("Python", sys.version.replace("\n", ""))) + data.append(("numpy", np.__version__)) + + try: + import detectron2 # noqa + + data.append( + ("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__)) + ) + except ImportError: + data.append(("detectron2", "failed to import")) + except AttributeError: + data.append(("detectron2", "imported a wrong installation")) + + try: + import detectron2._C as _C + except ImportError as e: + data.append(("detectron2._C", f"not built correctly: {e}")) + + # print system compilers when extension fails to build + if sys.platform != "win32": # don't know what to do for windows + try: + # this is how torch/utils/cpp_extensions.py choose compiler + cxx = os.environ.get("CXX", "c++") + cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True) + cxx = cxx.decode("utf-8").strip().split("\n")[0] + except subprocess.SubprocessError: + cxx = "Not found" + data.append(("Compiler ($CXX)", cxx)) + + if has_cuda and CUDA_HOME is not None: + try: + nvcc = os.path.join(CUDA_HOME, "bin", "nvcc") + nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True) + nvcc = nvcc.decode("utf-8").strip().split("\n")[-1] + except subprocess.SubprocessError: + nvcc = "Not found" + data.append(("CUDA compiler", nvcc)) + if has_cuda and sys.platform != "win32": + try: + so_file = importlib.util.find_spec("detectron2._C").origin + except (ImportError, AttributeError): + pass + else: + data.append( + ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, so_file)) + ) + else: + # print compilers that are used to build extension + data.append(("Compiler", _C.get_compiler_version())) + data.append(("CUDA compiler", _C.get_cuda_version())) # cuda or hip + if has_cuda and getattr(_C, "has_cuda", lambda: True)(): + data.append( + ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__)) + ) + + data.append(get_env_module()) + data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__))) + data.append(("PyTorch debug build", torch.version.debug)) + try: + data.append(("torch._C._GLIBCXX_USE_CXX11_ABI", torch._C._GLIBCXX_USE_CXX11_ABI)) + except Exception: + pass + + if not has_gpu: + has_gpu_text = "No: torch.cuda.is_available() == False" + else: + has_gpu_text = "Yes" + data.append(("GPU available", has_gpu_text)) + if has_gpu: + devices = defaultdict(list) + for k in range(torch.cuda.device_count()): + cap = ".".join((str(x) for x in torch.cuda.get_device_capability(k))) + name = torch.cuda.get_device_name(k) + f" (arch={cap})" + devices[name].append(str(k)) + for name, devids in devices.items(): + data.append(("GPU " + ",".join(devids), name)) + + if has_rocm: + msg = " - invalid!" if not (ROCM_HOME and os.path.isdir(ROCM_HOME)) else "" + data.append(("ROCM_HOME", str(ROCM_HOME) + msg)) + else: + try: + from torch.utils.collect_env import get_nvidia_driver_version, run as _run + + data.append(("Driver version", get_nvidia_driver_version(_run))) + except Exception: + pass + msg = " - invalid!" if not (CUDA_HOME and os.path.isdir(CUDA_HOME)) else "" + data.append(("CUDA_HOME", str(CUDA_HOME) + msg)) + + cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None) + if cuda_arch_list: + data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list)) + data.append(("Pillow", PIL.__version__)) + + try: + data.append( + ( + "torchvision", + str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__), + ) + ) + if has_cuda: + try: + torchvision_C = importlib.util.find_spec("torchvision._C").origin + msg = detect_compute_compatibility(CUDA_HOME, torchvision_C) + data.append(("torchvision arch flags", msg)) + except (ImportError, AttributeError): + data.append(("torchvision._C", "Not found")) + except AttributeError: + data.append(("torchvision", "unknown")) + + try: + import fvcore + + data.append(("fvcore", fvcore.__version__)) + except (ImportError, AttributeError): + pass + + try: + import iopath + + data.append(("iopath", iopath.__version__)) + except (ImportError, AttributeError): + pass + + try: + import cv2 + + data.append(("cv2", cv2.__version__)) + except (ImportError, AttributeError): + data.append(("cv2", "Not found")) + env_str = tabulate(data) + "\n" + env_str += collect_torch_env() + return env_str + + +def test_nccl_ops(): + num_gpu = torch.cuda.device_count() + if os.access("/tmp", os.W_OK): + import torch.multiprocessing as mp + + dist_url = "file:///tmp/nccl_tmp_file" + print("Testing NCCL connectivity ... this should not hang.") + mp.spawn(_test_nccl_worker, nprocs=num_gpu, args=(num_gpu, dist_url), daemon=False) + print("NCCL succeeded.") + + +def _test_nccl_worker(rank, num_gpu, dist_url): + import torch.distributed as dist + + dist.init_process_group(backend="NCCL", init_method=dist_url, rank=rank, world_size=num_gpu) + dist.barrier(device_ids=[rank]) + + +if __name__ == "__main__": + try: + from detectron2.utils.collect_env import collect_env_info as f + + print(f()) + except ImportError: + print(collect_env_info()) + + if torch.cuda.is_available(): + num_gpu = torch.cuda.device_count() + for k in range(num_gpu): + device = f"cuda:{k}" + try: + x = torch.tensor([1, 2.0], dtype=torch.float32) + x = x.to(device) + except Exception as e: + print( + f"Unable to copy tensor to device={device}: {e}. " + "Your CUDA environment is broken." + ) + if num_gpu > 1: + test_nccl_ops() diff --git a/data_processing/detectron2/detectron2/utils/colormap.py b/data_processing/detectron2/detectron2/utils/colormap.py new file mode 100644 index 0000000..14ded16 --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/colormap.py @@ -0,0 +1,158 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +An awesome colormap for really neat visualizations. +Copied from Detectron, and removed gray colors. +""" + +import numpy as np +import random + +__all__ = ["colormap", "random_color", "random_colors"] + +# fmt: off +# RGB: +_COLORS = np.array( + [ + 0.000, 0.447, 0.741, + 0.850, 0.325, 0.098, + 0.929, 0.694, 0.125, + 0.494, 0.184, 0.556, + 0.466, 0.674, 0.188, + 0.301, 0.745, 0.933, + 0.635, 0.078, 0.184, + 0.300, 0.300, 0.300, + 0.600, 0.600, 0.600, + 1.000, 0.000, 0.000, + 1.000, 0.500, 0.000, + 0.749, 0.749, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 1.000, + 0.667, 0.000, 1.000, + 0.333, 0.333, 0.000, + 0.333, 0.667, 0.000, + 0.333, 1.000, 0.000, + 0.667, 0.333, 0.000, + 0.667, 0.667, 0.000, + 0.667, 1.000, 0.000, + 1.000, 0.333, 0.000, + 1.000, 0.667, 0.000, + 1.000, 1.000, 0.000, + 0.000, 0.333, 0.500, + 0.000, 0.667, 0.500, + 0.000, 1.000, 0.500, + 0.333, 0.000, 0.500, + 0.333, 0.333, 0.500, + 0.333, 0.667, 0.500, + 0.333, 1.000, 0.500, + 0.667, 0.000, 0.500, + 0.667, 0.333, 0.500, + 0.667, 0.667, 0.500, + 0.667, 1.000, 0.500, + 1.000, 0.000, 0.500, + 1.000, 0.333, 0.500, + 1.000, 0.667, 0.500, + 1.000, 1.000, 0.500, + 0.000, 0.333, 1.000, + 0.000, 0.667, 1.000, + 0.000, 1.000, 1.000, + 0.333, 0.000, 1.000, + 0.333, 0.333, 1.000, + 0.333, 0.667, 1.000, + 0.333, 1.000, 1.000, + 0.667, 0.000, 1.000, + 0.667, 0.333, 1.000, + 0.667, 0.667, 1.000, + 0.667, 1.000, 1.000, + 1.000, 0.000, 1.000, + 1.000, 0.333, 1.000, + 1.000, 0.667, 1.000, + 0.333, 0.000, 0.000, + 0.500, 0.000, 0.000, + 0.667, 0.000, 0.000, + 0.833, 0.000, 0.000, + 1.000, 0.000, 0.000, + 0.000, 0.167, 0.000, + 0.000, 0.333, 0.000, + 0.000, 0.500, 0.000, + 0.000, 0.667, 0.000, + 0.000, 0.833, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 0.167, + 0.000, 0.000, 0.333, + 0.000, 0.000, 0.500, + 0.000, 0.000, 0.667, + 0.000, 0.000, 0.833, + 0.000, 0.000, 1.000, + 0.000, 0.000, 0.000, + 0.143, 0.143, 0.143, + 0.857, 0.857, 0.857, + 1.000, 1.000, 1.000 + ] +).astype(np.float32).reshape(-1, 3) +# fmt: on + + +def colormap(rgb=False, maximum=255): + """ + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + + Returns: + ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1] + """ + assert maximum in [255, 1], maximum + c = _COLORS * maximum + if not rgb: + c = c[:, ::-1] + return c + + +def random_color(rgb=False, maximum=255): + """ + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + + Returns: + ndarray: a vector of 3 numbers + """ + idx = np.random.randint(0, len(_COLORS)) + ret = _COLORS[idx] * maximum + if not rgb: + ret = ret[::-1] + return ret + + +def random_colors(N, rgb=False, maximum=255): + """ + Args: + N (int): number of unique colors needed + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + + Returns: + ndarray: a list of random_color + """ + indices = random.sample(range(len(_COLORS)), N) + ret = [_COLORS[i] * maximum for i in indices] + if not rgb: + ret = [x[::-1] for x in ret] + return ret + + +if __name__ == "__main__": + import cv2 + + size = 100 + H, W = 10, 10 + canvas = np.random.rand(H * size, W * size, 3).astype("float32") + for h in range(H): + for w in range(W): + idx = h * W + w + if idx >= len(_COLORS): + break + canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx] + cv2.imshow("a", canvas) + cv2.waitKey(0) diff --git a/data_processing/detectron2/detectron2/utils/comm.py b/data_processing/detectron2/detectron2/utils/comm.py new file mode 100644 index 0000000..a9ea9a9 --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/comm.py @@ -0,0 +1,238 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" +This file contains primitives for multi-gpu communication. +This is useful when doing distributed training. +""" + +import functools +import numpy as np +import torch +import torch.distributed as dist + +_LOCAL_PROCESS_GROUP = None +_MISSING_LOCAL_PG_ERROR = ( + "Local process group is not yet created! Please use detectron2's `launch()` " + "to start processes and initialize pytorch process group. If you need to start " + "processes in other ways, please call comm.create_local_process_group(" + "num_workers_per_machine) after calling torch.distributed.init_process_group()." +) + + +def get_world_size() -> int: + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank() -> int: + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +@functools.lru_cache() +def create_local_process_group(num_workers_per_machine: int) -> None: + """ + Create a process group that contains ranks within the same machine. + + Detectron2's launch() in engine/launch.py will call this function. If you start + workers without launch(), you'll have to also call this. Otherwise utilities + like `get_local_rank()` will not work. + + This function contains a barrier. All processes must call it together. + + Args: + num_workers_per_machine: the number of worker processes per machine. Typically + the number of GPUs. + """ + global _LOCAL_PROCESS_GROUP + assert _LOCAL_PROCESS_GROUP is None + assert get_world_size() % num_workers_per_machine == 0 + num_machines = get_world_size() // num_workers_per_machine + machine_rank = get_rank() // num_workers_per_machine + for i in range(num_machines): + ranks_on_i = list(range(i * num_workers_per_machine, (i + 1) * num_workers_per_machine)) + pg = dist.new_group(ranks_on_i) + if i == machine_rank: + _LOCAL_PROCESS_GROUP = pg + + +def get_local_process_group(): + """ + Returns: + A torch process group which only includes processes that are on the same + machine as the current process. This group can be useful for communication + within a machine, e.g. a per-machine SyncBN. + """ + assert _LOCAL_PROCESS_GROUP is not None, _MISSING_LOCAL_PG_ERROR + return _LOCAL_PROCESS_GROUP + + +def get_local_rank() -> int: + """ + Returns: + The rank of the current process within the local (per-machine) process group. + """ + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + assert _LOCAL_PROCESS_GROUP is not None, _MISSING_LOCAL_PG_ERROR + return dist.get_rank(group=_LOCAL_PROCESS_GROUP) + + +def get_local_size() -> int: + """ + Returns: + The size of the per-machine process group, + i.e. the number of processes per machine. + """ + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + assert _LOCAL_PROCESS_GROUP is not None, _MISSING_LOCAL_PG_ERROR + return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) + + +def is_main_process() -> bool: + return get_rank() == 0 + + +def synchronize(): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + if dist.get_backend() == dist.Backend.NCCL: + # This argument is needed to avoid warnings. + # It's valid only for NCCL backend. + dist.barrier(device_ids=[torch.cuda.current_device()]) + else: + dist.barrier() + + +@functools.lru_cache() +def _get_global_gloo_group(): + """ + Return a process group based on gloo backend, containing all the ranks + The result is cached. + """ + if dist.get_backend() == "nccl": + return dist.new_group(backend="gloo") + else: + return dist.group.WORLD + + +def all_gather(data, group=None): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: list of data gathered from each rank + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() # use CPU group by default, to reduce GPU RAM usage. + world_size = dist.get_world_size(group) + if world_size == 1: + return [data] + + output = [None for _ in range(world_size)] + dist.all_gather_object(output, data, group=group) + return output + + +def gather(data, dst=0, group=None): + """ + Run gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + dst (int): destination rank + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: on dst, a list of data gathered from each rank. Otherwise, + an empty list. + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + world_size = dist.get_world_size(group=group) + if world_size == 1: + return [data] + rank = dist.get_rank(group=group) + + if rank == dst: + output = [None for _ in range(world_size)] + dist.gather_object(data, output, dst=dst, group=group) + return output + else: + dist.gather_object(data, None, dst=dst, group=group) + return [] + + +def shared_random_seed(): + """ + Returns: + int: a random number that is the same across all workers. + If workers need a shared RNG, they can use this shared seed to + create one. + + All workers must call this function, otherwise it will deadlock. + """ + ints = np.random.randint(2**31) + all_ints = all_gather(ints) + return all_ints[0] + + +def reduce_dict(input_dict, average=True): + """ + Reduce the values in the dictionary from all processes so that process with rank + 0 has the reduced results. + + Args: + input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor. + average (bool): whether to do average or sum + + Returns: + a dict with the same keys as input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict diff --git a/data_processing/detectron2/detectron2/utils/develop.py b/data_processing/detectron2/detectron2/utils/develop.py new file mode 100644 index 0000000..e841698 --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/develop.py @@ -0,0 +1,59 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" Utilities for developers only. +These are not visible to users (not automatically imported). And should not +appeared in docs.""" +# adapted from https://github.com/tensorpack/tensorpack/blob/master/tensorpack/utils/develop.py + + +def create_dummy_class(klass, dependency, message=""): + """ + When a dependency of a class is not available, create a dummy class which throws ImportError + when used. + + Args: + klass (str): name of the class. + dependency (str): name of the dependency. + message: extra message to print + Returns: + class: a class object + """ + err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, klass) + if message: + err = err + " " + message + + class _DummyMetaClass(type): + # throw error on class attribute access + def __getattr__(_, __): # noqa: B902 + raise ImportError(err) + + class _Dummy(object, metaclass=_DummyMetaClass): + # throw error on constructor + def __init__(self, *args, **kwargs): + raise ImportError(err) + + return _Dummy + + +def create_dummy_func(func, dependency, message=""): + """ + When a dependency of a function is not available, create a dummy function which throws + ImportError when used. + + Args: + func (str): name of the function. + dependency (str or list[str]): name(s) of the dependency. + message: extra message to print + Returns: + function: a function object + """ + err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, func) + if message: + err = err + " " + message + + if isinstance(dependency, (list, tuple)): + dependency = ",".join(dependency) + + def _dummy(*args, **kwargs): + raise ImportError(err) + + return _dummy diff --git a/data_processing/detectron2/detectron2/utils/env.py b/data_processing/detectron2/detectron2/utils/env.py new file mode 100644 index 0000000..40634c1 --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/env.py @@ -0,0 +1,170 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import importlib +import importlib.util +import logging +import numpy as np +import os +import random +import sys +from datetime import datetime +import torch + +__all__ = ["seed_all_rng"] + + +TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) +""" +PyTorch version as a tuple of 2 ints. Useful for comparison. +""" + + +DOC_BUILDING = os.getenv("_DOC_BUILDING", False) # set in docs/conf.py +""" +Whether we're building documentation. +""" + + +def seed_all_rng(seed=None): + """ + Set the random seed for the RNG in torch, numpy and python. + + Args: + seed (int): if None, will use a strong random seed. + """ + if seed is None: + seed = ( + os.getpid() + + int(datetime.now().strftime("%S%f")) + + int.from_bytes(os.urandom(2), "big") + ) + logger = logging.getLogger(__name__) + logger.info("Using a generated random seed {}".format(seed)) + np.random.seed(seed) + torch.manual_seed(seed) + random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + + +# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path +def _import_file(module_name, file_path, make_importable=False): + spec = importlib.util.spec_from_file_location(module_name, file_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + if make_importable: + sys.modules[module_name] = module + return module + + +def _configure_libraries(): + """ + Configurations for some libraries. + """ + # An environment option to disable `import cv2` globally, + # in case it leads to negative performance impact + disable_cv2 = int(os.environ.get("DETECTRON2_DISABLE_CV2", False)) + if disable_cv2: + sys.modules["cv2"] = None + else: + # Disable opencl in opencv since its interaction with cuda often has negative effects + # This envvar is supported after OpenCV 3.4.0 + os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled" + try: + import cv2 + + if int(cv2.__version__.split(".")[0]) >= 3: + cv2.ocl.setUseOpenCL(False) + except ModuleNotFoundError: + # Other types of ImportError, if happened, should not be ignored. + # Because a failed opencv import could mess up address space + # https://github.com/skvark/opencv-python/issues/381 + pass + + def get_version(module, digit=2): + return tuple(map(int, module.__version__.split(".")[:digit])) + + # fmt: off + assert get_version(torch) >= (1, 4), "Requires torch>=1.4" + import fvcore + assert get_version(fvcore, 3) >= (0, 1, 2), "Requires fvcore>=0.1.2" + import yaml + assert get_version(yaml) >= (5, 1), "Requires pyyaml>=5.1" + # fmt: on + + +_ENV_SETUP_DONE = False + + +def setup_environment(): + """Perform environment setup work. The default setup is a no-op, but this + function allows the user to specify a Python source file or a module in + the $DETECTRON2_ENV_MODULE environment variable, that performs + custom setup work that may be necessary to their computing environment. + """ + global _ENV_SETUP_DONE + if _ENV_SETUP_DONE: + return + _ENV_SETUP_DONE = True + + _configure_libraries() + + custom_module_path = os.environ.get("DETECTRON2_ENV_MODULE") + + if custom_module_path: + setup_custom_environment(custom_module_path) + else: + # The default setup is a no-op + pass + + +def setup_custom_environment(custom_module): + """ + Load custom environment setup by importing a Python source file or a + module, and run the setup function. + """ + if custom_module.endswith(".py"): + module = _import_file("detectron2.utils.env.custom_module", custom_module) + else: + module = importlib.import_module(custom_module) + assert hasattr(module, "setup_environment") and callable(module.setup_environment), ( + "Custom environment module defined in {} does not have the " + "required callable attribute 'setup_environment'." + ).format(custom_module) + module.setup_environment() + + +def fixup_module_metadata(module_name, namespace, keys=None): + """ + Fix the __qualname__ of module members to be their exported api name, so + when they are referenced in docs, sphinx can find them. Reference: + https://github.com/python-trio/trio/blob/6754c74eacfad9cc5c92d5c24727a2f3b620624e/trio/_util.py#L216-L241 + """ + if not DOC_BUILDING: + return + seen_ids = set() + + def fix_one(qualname, name, obj): + # avoid infinite recursion (relevant when using + # typing.Generic, for example) + if id(obj) in seen_ids: + return + seen_ids.add(id(obj)) + + mod = getattr(obj, "__module__", None) + if mod is not None and (mod.startswith(module_name) or mod.startswith("fvcore.")): + obj.__module__ = module_name + # Modules, unlike everything else in Python, put fully-qualitied + # names into their __name__ attribute. We check for "." to avoid + # rewriting these. + if hasattr(obj, "__name__") and "." not in obj.__name__: + obj.__name__ = name + obj.__qualname__ = qualname + if isinstance(obj, type): + for attr_name, attr_value in obj.__dict__.items(): + fix_one(objname + "." + attr_name, attr_name, attr_value) + + if keys is None: + keys = namespace.keys() + for objname in keys: + if not objname.startswith("_"): + obj = namespace[objname] + fix_one(objname, objname, obj) diff --git a/data_processing/detectron2/detectron2/utils/events.py b/data_processing/detectron2/detectron2/utils/events.py new file mode 100644 index 0000000..7d582a9 --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/events.py @@ -0,0 +1,551 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import datetime +import json +import logging +import os +import time +from collections import defaultdict +from contextlib import contextmanager +from functools import cached_property +from typing import Optional +import torch +from fvcore.common.history_buffer import HistoryBuffer + +from detectron2.utils.file_io import PathManager + +__all__ = [ + "get_event_storage", + "has_event_storage", + "JSONWriter", + "TensorboardXWriter", + "CommonMetricPrinter", + "EventStorage", +] + +_CURRENT_STORAGE_STACK = [] + + +def get_event_storage(): + """ + Returns: + The :class:`EventStorage` object that's currently being used. + Throws an error if no :class:`EventStorage` is currently enabled. + """ + assert len( + _CURRENT_STORAGE_STACK + ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" + return _CURRENT_STORAGE_STACK[-1] + + +def has_event_storage(): + """ + Returns: + Check if there are EventStorage() context existed. + """ + return len(_CURRENT_STORAGE_STACK) > 0 + + +class EventWriter: + """ + Base class for writers that obtain events from :class:`EventStorage` and process them. + """ + + def write(self): + raise NotImplementedError + + def close(self): + pass + + +class JSONWriter(EventWriter): + """ + Write scalars to a json file. + + It saves scalars as one json per line (instead of a big json) for easy parsing. + + Examples parsing such a json file: + :: + $ cat metrics.json | jq -s '.[0:2]' + [ + { + "data_time": 0.008433341979980469, + "iteration": 19, + "loss": 1.9228371381759644, + "loss_box_reg": 0.050025828182697296, + "loss_classifier": 0.5316952466964722, + "loss_mask": 0.7236229181289673, + "loss_rpn_box": 0.0856662318110466, + "loss_rpn_cls": 0.48198649287223816, + "lr": 0.007173333333333333, + "time": 0.25401854515075684 + }, + { + "data_time": 0.007216215133666992, + "iteration": 39, + "loss": 1.282649278640747, + "loss_box_reg": 0.06222952902317047, + "loss_classifier": 0.30682939291000366, + "loss_mask": 0.6970193982124329, + "loss_rpn_box": 0.038663312792778015, + "loss_rpn_cls": 0.1471673548221588, + "lr": 0.007706666666666667, + "time": 0.2490077018737793 + } + ] + + $ cat metrics.json | jq '.loss_mask' + 0.7126231789588928 + 0.689423680305481 + 0.6776131987571716 + ... + + """ + + def __init__(self, json_file, window_size=20): + """ + Args: + json_file (str): path to the json file. New data will be appended if the file exists. + window_size (int): the window size of median smoothing for the scalars whose + `smoothing_hint` are True. + """ + self._file_handle = PathManager.open(json_file, "a") + self._window_size = window_size + self._last_write = -1 + + def write(self): + storage = get_event_storage() + to_save = defaultdict(dict) + + for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): + # keep scalars that have not been written + if iter <= self._last_write: + continue + to_save[iter][k] = v + if len(to_save): + all_iters = sorted(to_save.keys()) + self._last_write = max(all_iters) + + for itr, scalars_per_iter in to_save.items(): + scalars_per_iter["iteration"] = itr + self._file_handle.write(json.dumps(scalars_per_iter, sort_keys=True) + "\n") + self._file_handle.flush() + try: + os.fsync(self._file_handle.fileno()) + except AttributeError: + pass + + def close(self): + self._file_handle.close() + + +class TensorboardXWriter(EventWriter): + """ + Write all scalars to a tensorboard file. + """ + + def __init__(self, log_dir: str, window_size: int = 20, **kwargs): + """ + Args: + log_dir (str): the directory to save the output events + window_size (int): the scalars will be median-smoothed by this window size + + kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)` + """ + self._window_size = window_size + self._writer_args = {"log_dir": log_dir, **kwargs} + self._last_write = -1 + + @cached_property + def _writer(self): + from torch.utils.tensorboard import SummaryWriter + + return SummaryWriter(**self._writer_args) + + def write(self): + storage = get_event_storage() + new_last_write = self._last_write + for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): + if iter > self._last_write: + self._writer.add_scalar(k, v, iter) + new_last_write = max(new_last_write, iter) + self._last_write = new_last_write + + # storage.put_{image,histogram} is only meant to be used by + # tensorboard writer. So we access its internal fields directly from here. + if len(storage._vis_data) >= 1: + for img_name, img, step_num in storage._vis_data: + self._writer.add_image(img_name, img, step_num) + # Storage stores all image data and rely on this writer to clear them. + # As a result it assumes only one writer will use its image data. + # An alternative design is to let storage store limited recent + # data (e.g. only the most recent image) that all writers can access. + # In that case a writer may not see all image data if its period is long. + storage.clear_images() + + if len(storage._histograms) >= 1: + for params in storage._histograms: + self._writer.add_histogram_raw(**params) + storage.clear_histograms() + + def close(self): + if "_writer" in self.__dict__: + self._writer.close() + + +class CommonMetricPrinter(EventWriter): + """ + Print **common** metrics to the terminal, including + iteration time, ETA, memory, all losses, and the learning rate. + It also applies smoothing using a window of 20 elements. + + It's meant to print common metrics in common ways. + To print something in more customized ways, please implement a similar printer by yourself. + """ + + def __init__(self, max_iter: Optional[int] = None, window_size: int = 20): + """ + Args: + max_iter: the maximum number of iterations to train. + Used to compute ETA. If not given, ETA will not be printed. + window_size (int): the losses will be median-smoothed by this window size + """ + self.logger = logging.getLogger("detectron2.utils.events") + self._max_iter = max_iter + self._window_size = window_size + self._last_write = None # (step, time) of last call to write(). Used to compute ETA + + def _get_eta(self, storage) -> Optional[str]: + if self._max_iter is None: + return "" + iteration = storage.iter + try: + eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration - 1) + storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False) + return str(datetime.timedelta(seconds=int(eta_seconds))) + except KeyError: + # estimate eta on our own - more noisy + eta_string = None + if self._last_write is not None: + estimate_iter_time = (time.perf_counter() - self._last_write[1]) / ( + iteration - self._last_write[0] + ) + eta_seconds = estimate_iter_time * (self._max_iter - iteration - 1) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + self._last_write = (iteration, time.perf_counter()) + return eta_string + + def write(self): + storage = get_event_storage() + iteration = storage.iter + if iteration == self._max_iter: + # This hook only reports training progress (loss, ETA, etc) but not other data, + # therefore do not write anything after training succeeds, even if this method + # is called. + return + + try: + avg_data_time = storage.history("data_time").avg( + storage.count_samples("data_time", self._window_size) + ) + last_data_time = storage.history("data_time").latest() + except KeyError: + # they may not exist in the first few iterations (due to warmup) + # or when SimpleTrainer is not used + avg_data_time = None + last_data_time = None + try: + avg_iter_time = storage.history("time").global_avg() + last_iter_time = storage.history("time").latest() + except KeyError: + avg_iter_time = None + last_iter_time = None + try: + lr = "{:.5g}".format(storage.history("lr").latest()) + except KeyError: + lr = "N/A" + + eta_string = self._get_eta(storage) + + if torch.cuda.is_available(): + max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 + else: + max_mem_mb = None + + # NOTE: max_mem is parsed by grep in "dev/parse_results.sh" + self.logger.info( + str.format( + " {eta}iter: {iter} {losses} {non_losses} {avg_time}{last_time}" + + "{avg_data_time}{last_data_time} lr: {lr} {memory}", + eta=f"eta: {eta_string} " if eta_string else "", + iter=iteration, + losses=" ".join( + [ + "{}: {:.4g}".format( + k, v.median(storage.count_samples(k, self._window_size)) + ) + for k, v in storage.histories().items() + if "loss" in k + ] + ), + non_losses=" ".join( + [ + "{}: {:.4g}".format( + k, v.median(storage.count_samples(k, self._window_size)) + ) + for k, v in storage.histories().items() + if "[metric]" in k + ] + ), + avg_time="time: {:.4f} ".format(avg_iter_time) + if avg_iter_time is not None + else "", + last_time="last_time: {:.4f} ".format(last_iter_time) + if last_iter_time is not None + else "", + avg_data_time="data_time: {:.4f} ".format(avg_data_time) + if avg_data_time is not None + else "", + last_data_time="last_data_time: {:.4f} ".format(last_data_time) + if last_data_time is not None + else "", + lr=lr, + memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "", + ) + ) + + +class EventStorage: + """ + The user-facing class that provides metric storage functionalities. + + In the future we may add support for storing / logging other types of data if needed. + """ + + def __init__(self, start_iter=0): + """ + Args: + start_iter (int): the iteration number to start with + """ + self._history = defaultdict(HistoryBuffer) + self._smoothing_hints = {} + self._latest_scalars = {} + self._iter = start_iter + self._current_prefix = "" + self._vis_data = [] + self._histograms = [] + + def put_image(self, img_name, img_tensor): + """ + Add an `img_tensor` associated with `img_name`, to be shown on + tensorboard. + + Args: + img_name (str): The name of the image to put into tensorboard. + img_tensor (torch.Tensor or numpy.array): An `uint8` or `float` + Tensor of shape `[channel, height, width]` where `channel` is + 3. The image format should be RGB. The elements in img_tensor + can either have values in [0, 1] (float32) or [0, 255] (uint8). + The `img_tensor` will be visualized in tensorboard. + """ + self._vis_data.append((img_name, img_tensor, self._iter)) + + def put_scalar(self, name, value, smoothing_hint=True, cur_iter=None): + """ + Add a scalar `value` to the `HistoryBuffer` associated with `name`. + + Args: + smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be + smoothed when logged. The hint will be accessible through + :meth:`EventStorage.smoothing_hints`. A writer may ignore the hint + and apply custom smoothing rule. + + It defaults to True because most scalars we save need to be smoothed to + provide any useful signal. + cur_iter (int): an iteration number to set explicitly instead of current iteration + """ + name = self._current_prefix + name + cur_iter = self._iter if cur_iter is None else cur_iter + history = self._history[name] + value = float(value) + history.update(value, cur_iter) + self._latest_scalars[name] = (value, cur_iter) + + existing_hint = self._smoothing_hints.get(name) + + if existing_hint is not None: + assert ( + existing_hint == smoothing_hint + ), "Scalar {} was put with a different smoothing_hint!".format(name) + else: + self._smoothing_hints[name] = smoothing_hint + + def put_scalars(self, *, smoothing_hint=True, cur_iter=None, **kwargs): + """ + Put multiple scalars from keyword arguments. + + Examples: + + storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True) + """ + for k, v in kwargs.items(): + self.put_scalar(k, v, smoothing_hint=smoothing_hint, cur_iter=cur_iter) + + def put_histogram(self, hist_name, hist_tensor, bins=1000): + """ + Create a histogram from a tensor. + + Args: + hist_name (str): The name of the histogram to put into tensorboard. + hist_tensor (torch.Tensor): A Tensor of arbitrary shape to be converted + into a histogram. + bins (int): Number of histogram bins. + """ + ht_min, ht_max = hist_tensor.min().item(), hist_tensor.max().item() + + # Create a histogram with PyTorch + hist_counts = torch.histc(hist_tensor, bins=bins) + hist_edges = torch.linspace(start=ht_min, end=ht_max, steps=bins + 1, dtype=torch.float32) + + # Parameter for the add_histogram_raw function of SummaryWriter + hist_params = dict( + tag=hist_name, + min=ht_min, + max=ht_max, + num=len(hist_tensor), + sum=float(hist_tensor.sum()), + sum_squares=float(torch.sum(hist_tensor**2)), + bucket_limits=hist_edges[1:].tolist(), + bucket_counts=hist_counts.tolist(), + global_step=self._iter, + ) + self._histograms.append(hist_params) + + def history(self, name): + """ + Returns: + HistoryBuffer: the scalar history for name + """ + ret = self._history.get(name, None) + if ret is None: + raise KeyError("No history metric available for {}!".format(name)) + return ret + + def histories(self): + """ + Returns: + dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars + """ + return self._history + + def latest(self): + """ + Returns: + dict[str -> (float, int)]: mapping from the name of each scalar to the most + recent value and the iteration number its added. + """ + return self._latest_scalars + + def latest_with_smoothing_hint(self, window_size=20): + """ + Similar to :meth:`latest`, but the returned values + are either the un-smoothed original latest value, + or a median of the given window_size, + depend on whether the smoothing_hint is True. + + This provides a default behavior that other writers can use. + + Note: All scalars saved in the past `window_size` iterations are used for smoothing. + This is different from the `window_size` definition in HistoryBuffer. + Use :meth:`get_history_window_size` to get the `window_size` used in HistoryBuffer. + """ + result = {} + for k, (v, itr) in self._latest_scalars.items(): + result[k] = ( + self._history[k].median(self.count_samples(k, window_size)) + if self._smoothing_hints[k] + else v, + itr, + ) + return result + + def count_samples(self, name, window_size=20): + """ + Return the number of samples logged in the past `window_size` iterations. + """ + samples = 0 + data = self._history[name].values() + for _, iter_ in reversed(data): + if iter_ > data[-1][1] - window_size: + samples += 1 + else: + break + return samples + + def smoothing_hints(self): + """ + Returns: + dict[name -> bool]: the user-provided hint on whether the scalar + is noisy and needs smoothing. + """ + return self._smoothing_hints + + def step(self): + """ + User should either: (1) Call this function to increment storage.iter when needed. Or + (2) Set `storage.iter` to the correct iteration number before each iteration. + + The storage will then be able to associate the new data with an iteration number. + """ + self._iter += 1 + + @property + def iter(self): + """ + Returns: + int: The current iteration number. When used together with a trainer, + this is ensured to be the same as trainer.iter. + """ + return self._iter + + @iter.setter + def iter(self, val): + self._iter = int(val) + + @property + def iteration(self): + # for backward compatibility + return self._iter + + def __enter__(self): + _CURRENT_STORAGE_STACK.append(self) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + assert _CURRENT_STORAGE_STACK[-1] == self + _CURRENT_STORAGE_STACK.pop() + + @contextmanager + def name_scope(self, name): + """ + Yields: + A context within which all the events added to this storage + will be prefixed by the name scope. + """ + old_prefix = self._current_prefix + self._current_prefix = name.rstrip("/") + "/" + yield + self._current_prefix = old_prefix + + def clear_images(self): + """ + Delete all the stored images for visualization. This should be called + after images are written to tensorboard. + """ + self._vis_data = [] + + def clear_histograms(self): + """ + Delete all the stored histograms for visualization. + This should be called after histograms are written to tensorboard. + """ + self._histograms = [] diff --git a/data_processing/detectron2/detectron2/utils/file_io.py b/data_processing/detectron2/detectron2/utils/file_io.py new file mode 100644 index 0000000..09f7dff --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/file_io.py @@ -0,0 +1,39 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from iopath.common.file_io import HTTPURLHandler, OneDrivePathHandler, PathHandler +from iopath.common.file_io import PathManager as PathManagerBase + +__all__ = ["PathManager", "PathHandler"] + + +PathManager = PathManagerBase() +""" +This is a detectron2 project-specific PathManager. +We try to stay away from global PathManager in fvcore as it +introduces potential conflicts among other libraries. +""" + + +class Detectron2Handler(PathHandler): + """ + Resolve anything that's hosted under detectron2's namespace. + """ + + PREFIX = "detectron2://" + S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" + + def _get_supported_prefixes(self): + return [self.PREFIX] + + def _get_local_path(self, path, **kwargs): + name = path[len(self.PREFIX) :] + return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name, **kwargs) + + def _open(self, path, mode="r", **kwargs): + return PathManager.open( + self.S3_DETECTRON2_PREFIX + path[len(self.PREFIX) :], mode, **kwargs + ) + + +PathManager.register_handler(HTTPURLHandler()) +PathManager.register_handler(OneDrivePathHandler()) +PathManager.register_handler(Detectron2Handler()) diff --git a/data_processing/detectron2/detectron2/utils/logger.py b/data_processing/detectron2/detectron2/utils/logger.py new file mode 100644 index 0000000..85be03c --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/logger.py @@ -0,0 +1,261 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import atexit +import functools +import logging +import os +import sys +import time +from collections import Counter +import torch +from tabulate import tabulate +from termcolor import colored + +from detectron2.utils.file_io import PathManager + +__all__ = ["setup_logger", "log_first_n", "log_every_n", "log_every_n_seconds"] + +D2_LOG_BUFFER_SIZE_KEY: str = "D2_LOG_BUFFER_SIZE" + +DEFAULT_LOG_BUFFER_SIZE: int = 1024 * 1024 # 1MB + + +class _ColorfulFormatter(logging.Formatter): + def __init__(self, *args, **kwargs): + self._root_name = kwargs.pop("root_name") + "." + self._abbrev_name = kwargs.pop("abbrev_name", "") + if len(self._abbrev_name): + self._abbrev_name = self._abbrev_name + "." + super(_ColorfulFormatter, self).__init__(*args, **kwargs) + + def formatMessage(self, record): + record.name = record.name.replace(self._root_name, self._abbrev_name) + log = super(_ColorfulFormatter, self).formatMessage(record) + if record.levelno == logging.WARNING: + prefix = colored("WARNING", "red", attrs=["blink"]) + elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: + prefix = colored("ERROR", "red", attrs=["blink", "underline"]) + else: + return log + return prefix + " " + log + + +@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers +def setup_logger( + output=None, + distributed_rank=0, + *, + color=True, + name="detectron2", + abbrev_name=None, + enable_propagation: bool = False, + configure_stdout: bool = True +): + """ + Initialize the detectron2 logger and set its verbosity level to "DEBUG". + + Args: + output (str): a file name or a directory to save log. If None, will not save log file. + If ends with ".txt" or ".log", assumed to be a file name. + Otherwise, logs will be saved to `output/log.txt`. + name (str): the root module name of this logger + abbrev_name (str): an abbreviation of the module, to avoid long names in logs. + Set to "" to not log the root module in logs. + By default, will abbreviate "detectron2" to "d2" and leave other + modules unchanged. + enable_propagation (bool): whether to propagate logs to the parent logger. + configure_stdout (bool): whether to configure logging to stdout. + + + Returns: + logging.Logger: a logger + """ + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + logger.propagate = enable_propagation + + if abbrev_name is None: + abbrev_name = "d2" if name == "detectron2" else name + + plain_formatter = logging.Formatter( + "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S" + ) + # stdout logging: master only + if configure_stdout and distributed_rank == 0: + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + if color: + formatter = _ColorfulFormatter( + colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s", + datefmt="%m/%d %H:%M:%S", + root_name=name, + abbrev_name=str(abbrev_name), + ) + else: + formatter = plain_formatter + ch.setFormatter(formatter) + logger.addHandler(ch) + + # file logging: all workers + if output is not None: + if output.endswith(".txt") or output.endswith(".log"): + filename = output + else: + filename = os.path.join(output, "log.txt") + if distributed_rank > 0: + filename = filename + ".rank{}".format(distributed_rank) + PathManager.mkdirs(os.path.dirname(filename)) + + fh = logging.StreamHandler(_cached_log_stream(filename)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(plain_formatter) + logger.addHandler(fh) + + return logger + + +# cache the opened file object, so that different calls to `setup_logger` +# with the same file name can safely write to the same file. +@functools.lru_cache(maxsize=None) +def _cached_log_stream(filename): + # use 1K buffer if writing to cloud storage + io = PathManager.open(filename, "a", buffering=_get_log_stream_buffer_size(filename)) + atexit.register(io.close) + return io + + +def _get_log_stream_buffer_size(filename: str) -> int: + if "://" not in filename: + # Local file, no extra caching is necessary + return -1 + # Remote file requires a larger cache to avoid many small writes. + if D2_LOG_BUFFER_SIZE_KEY in os.environ: + return int(os.environ[D2_LOG_BUFFER_SIZE_KEY]) + return DEFAULT_LOG_BUFFER_SIZE + + +""" +Below are some other convenient logging methods. +They are mainly adopted from +https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py +""" + + +def _find_caller(): + """ + Returns: + str: module name of the caller + tuple: a hashable key to be used to identify different callers + """ + frame = sys._getframe(2) + while frame: + code = frame.f_code + if os.path.join("utils", "logger.") not in code.co_filename: + mod_name = frame.f_globals["__name__"] + if mod_name == "__main__": + mod_name = "detectron2" + return mod_name, (code.co_filename, frame.f_lineno, code.co_name) + frame = frame.f_back + + +_LOG_COUNTER = Counter() +_LOG_TIMER = {} + + +def log_first_n(lvl, msg, n=1, *, name=None, key="caller"): + """ + Log only for the first n times. + + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + key (str or tuple[str]): the string(s) can be one of "caller" or + "message", which defines how to identify duplicated logs. + For example, if called with `n=1, key="caller"`, this function + will only log the first call from the same caller, regardless of + the message content. + If called with `n=1, key="message"`, this function will log the + same content only once, even if they are called from different places. + If called with `n=1, key=("caller", "message")`, this function + will not log only if the same caller has logged the same message before. + """ + if isinstance(key, str): + key = (key,) + assert len(key) > 0 + + caller_module, caller_key = _find_caller() + hash_key = () + if "caller" in key: + hash_key = hash_key + caller_key + if "message" in key: + hash_key = hash_key + (msg,) + + _LOG_COUNTER[hash_key] += 1 + if _LOG_COUNTER[hash_key] <= n: + logging.getLogger(name or caller_module).log(lvl, msg) + + +def log_every_n(lvl, msg, n=1, *, name=None): + """ + Log once per n times. + + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + """ + caller_module, key = _find_caller() + _LOG_COUNTER[key] += 1 + if n == 1 or _LOG_COUNTER[key] % n == 1: + logging.getLogger(name or caller_module).log(lvl, msg) + + +def log_every_n_seconds(lvl, msg, n=1, *, name=None): + """ + Log no more than once per n seconds. + + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + """ + caller_module, key = _find_caller() + last_logged = _LOG_TIMER.get(key, None) + current_time = time.time() + if last_logged is None or current_time - last_logged >= n: + logging.getLogger(name or caller_module).log(lvl, msg) + _LOG_TIMER[key] = current_time + + +def create_small_table(small_dict): + """ + Create a small table using the keys of small_dict as headers. This is only + suitable for small dictionaries. + + Args: + small_dict (dict): a result dictionary of only a few items. + + Returns: + str: the table as a string. + """ + keys, values = tuple(zip(*small_dict.items())) + table = tabulate( + [values], + headers=keys, + tablefmt="pipe", + floatfmt=".3f", + stralign="center", + numalign="center", + ) + return table + + +def _log_api_usage(identifier: str): + """ + Internal function used to log the usage of different detectron2 components + inside facebook's infra. + """ + torch._C._log_api_usage_once("detectron2." + identifier) diff --git a/data_processing/detectron2/detectron2/utils/memory.py b/data_processing/detectron2/detectron2/utils/memory.py new file mode 100644 index 0000000..bd49478 --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/memory.py @@ -0,0 +1,84 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +from contextlib import contextmanager +from functools import wraps +import torch + +__all__ = ["retry_if_cuda_oom"] + + +@contextmanager +def _ignore_torch_cuda_oom(): + """ + A context which ignores CUDA OOM exception from pytorch. + """ + try: + yield + except RuntimeError as e: + # NOTE: the string may change? + if "CUDA out of memory. " in str(e): + pass + else: + raise + + +def retry_if_cuda_oom(func): + """ + Makes a function retry itself after encountering + pytorch's CUDA OOM error. + It will first retry after calling `torch.cuda.empty_cache()`. + + If that still fails, it will then retry by trying to convert inputs to CPUs. + In this case, it expects the function to dispatch to CPU implementation. + The return values may become CPU tensors as well and it's user's + responsibility to convert it back to CUDA tensor if needed. + + Args: + func: a stateless callable that takes tensor-like objects as arguments + + Returns: + a callable which retries `func` if OOM is encountered. + + Examples: + :: + output = retry_if_cuda_oom(some_torch_function)(input1, input2) + # output may be on CPU even if inputs are on GPU + + Note: + 1. When converting inputs to CPU, it will only look at each argument and check + if it has `.device` and `.to` for conversion. Nested structures of tensors + are not supported. + + 2. Since the function might be called more than once, it has to be + stateless. + """ + + def maybe_to_cpu(x): + try: + like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to") + except AttributeError: + like_gpu_tensor = False + if like_gpu_tensor: + return x.to(device="cpu") + else: + return x + + @wraps(func) + def wrapped(*args, **kwargs): + with _ignore_torch_cuda_oom(): + return func(*args, **kwargs) + + # Clear cache and retry + torch.cuda.empty_cache() + with _ignore_torch_cuda_oom(): + return func(*args, **kwargs) + + # Try on CPU. This slows down the code significantly, therefore print a notice. + logger = logging.getLogger(__name__) + logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func))) + new_args = (maybe_to_cpu(x) for x in args) + new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()} + return func(*new_args, **new_kwargs) + + return wrapped diff --git a/data_processing/detectron2/detectron2/utils/registry.py b/data_processing/detectron2/detectron2/utils/registry.py new file mode 100644 index 0000000..4b01e90 --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/registry.py @@ -0,0 +1,60 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any +import pydoc +from fvcore.common.registry import Registry # for backward compatibility. + +""" +``Registry`` and `locate` provide ways to map a string (typically found +in config files) to callable objects. +""" + +__all__ = ["Registry", "locate"] + + +def _convert_target_to_string(t: Any) -> str: + """ + Inverse of ``locate()``. + + Args: + t: any object with ``__module__`` and ``__qualname__`` + """ + module, qualname = t.__module__, t.__qualname__ + + # Compress the path to this object, e.g. ``module.submodule._impl.class`` + # may become ``module.submodule.class``, if the later also resolves to the same + # object. This simplifies the string, and also is less affected by moving the + # class implementation. + module_parts = module.split(".") + for k in range(1, len(module_parts)): + prefix = ".".join(module_parts[:k]) + candidate = f"{prefix}.{qualname}" + try: + if locate(candidate) is t: + return candidate + except ImportError: + pass + return f"{module}.{qualname}" + + +def locate(name: str) -> Any: + """ + Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``, + such as "module.submodule.class_name". + + Raise Exception if it cannot be found. + """ + obj = pydoc.locate(name) + + # Some cases (e.g. torch.optim.sgd.SGD) not handled correctly + # by pydoc.locate. Try a private function from hydra. + if obj is None: + try: + # from hydra.utils import get_method - will print many errors + from hydra.utils import _locate + except ImportError as e: + raise ImportError(f"Cannot dynamically locate object {name}!") from e + else: + obj = _locate(name) # it raises if fails + + return obj diff --git a/data_processing/detectron2/detectron2/utils/serialize.py b/data_processing/detectron2/detectron2/utils/serialize.py new file mode 100644 index 0000000..0b38862 --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/serialize.py @@ -0,0 +1,32 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import cloudpickle + + +class PicklableWrapper(object): + """ + Wrap an object to make it more picklable, note that it uses + heavy weight serialization libraries that are slower than pickle. + It's best to use it only on closures (which are usually not picklable). + + This is a simplified version of + https://github.com/joblib/joblib/blob/master/joblib/externals/loky/cloudpickle_wrapper.py + """ + + def __init__(self, obj): + while isinstance(obj, PicklableWrapper): + # Wrapping an object twice is no-op + obj = obj._obj + self._obj = obj + + def __reduce__(self): + s = cloudpickle.dumps(self._obj) + return cloudpickle.loads, (s,) + + def __call__(self, *args, **kwargs): + return self._obj(*args, **kwargs) + + def __getattr__(self, attr): + # Ensure that the wrapped object can be used seamlessly as the previous object. + if attr not in ["_obj"]: + return getattr(self._obj, attr) + return getattr(self, attr) diff --git a/data_processing/detectron2/detectron2/utils/testing.py b/data_processing/detectron2/detectron2/utils/testing.py new file mode 100644 index 0000000..3f5b9db --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/testing.py @@ -0,0 +1,478 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import io +import numpy as np +import os +import re +import tempfile +import unittest +from typing import Callable +import torch +import torch.onnx.symbolic_helper as sym_help +from packaging import version +from torch._C import ListType +from torch.onnx import register_custom_op_symbolic + +from detectron2 import model_zoo +from detectron2.config import CfgNode, LazyConfig, instantiate +from detectron2.data import DatasetCatalog +from detectron2.data.detection_utils import read_image +from detectron2.modeling import build_model +from detectron2.structures import Boxes, Instances, ROIMasks +from detectron2.utils.file_io import PathManager + + +""" +Internal utilities for tests. Don't use except for writing tests. +""" + + +def get_model_no_weights(config_path): + """ + Like model_zoo.get, but do not load any weights (even pretrained) + """ + cfg = model_zoo.get_config(config_path) + if isinstance(cfg, CfgNode): + if not torch.cuda.is_available(): + cfg.MODEL.DEVICE = "cpu" + return build_model(cfg) + else: + return instantiate(cfg.model) + + +def random_boxes(num_boxes, max_coord=100, device="cpu"): + """ + Create a random Nx4 boxes tensor, with coordinates < max_coord. + """ + boxes = torch.rand(num_boxes, 4, device=device) * (max_coord * 0.5) + boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression + # Note: the implementation of this function in torchvision is: + # boxes[:, 2:] += torch.rand(N, 2) * 100 + # but it does not guarantee non-negative widths/heights constraints: + # boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]: + boxes[:, 2:] += boxes[:, :2] + return boxes + + +def get_sample_coco_image(tensor=True): + """ + Args: + tensor (bool): if True, returns 3xHxW tensor. + else, returns a HxWx3 numpy array. + + Returns: + an image, in BGR color. + """ + try: + file_name = DatasetCatalog.get("coco_2017_val_100")[0]["file_name"] + if not PathManager.exists(file_name): + raise FileNotFoundError() + except IOError: + # for public CI to run + file_name = PathManager.get_local_path( + "http://images.cocodataset.org/train2017/000000000009.jpg" + ) + ret = read_image(file_name, format="BGR") + if tensor: + ret = torch.from_numpy(np.ascontiguousarray(ret.transpose(2, 0, 1))) + return ret + + +def convert_scripted_instances(instances): + """ + Convert a scripted Instances object to a regular :class:`Instances` object + """ + assert hasattr( + instances, "image_size" + ), f"Expect an Instances object, but got {type(instances)}!" + ret = Instances(instances.image_size) + for name in instances._field_names: + val = getattr(instances, "_" + name, None) + if val is not None: + ret.set(name, val) + return ret + + +def assert_instances_allclose(input, other, *, rtol=1e-5, msg="", size_as_tensor=False): + """ + Args: + input, other (Instances): + size_as_tensor: compare image_size of the Instances as tensors (instead of tuples). + Useful for comparing outputs of tracing. + """ + if not isinstance(input, Instances): + input = convert_scripted_instances(input) + if not isinstance(other, Instances): + other = convert_scripted_instances(other) + + if not msg: + msg = "Two Instances are different! " + else: + msg = msg.rstrip() + " " + + size_error_msg = msg + f"image_size is {input.image_size} vs. {other.image_size}!" + if size_as_tensor: + assert torch.equal( + torch.tensor(input.image_size), torch.tensor(other.image_size) + ), size_error_msg + else: + assert input.image_size == other.image_size, size_error_msg + fields = sorted(input.get_fields().keys()) + fields_other = sorted(other.get_fields().keys()) + assert fields == fields_other, msg + f"Fields are {fields} vs {fields_other}!" + + for f in fields: + val1, val2 = input.get(f), other.get(f) + if isinstance(val1, (Boxes, ROIMasks)): + # boxes in the range of O(100) and can have a larger tolerance + assert torch.allclose(val1.tensor, val2.tensor, atol=100 * rtol), ( + msg + f"Field {f} differs too much!" + ) + elif isinstance(val1, torch.Tensor): + if val1.dtype.is_floating_point: + mag = torch.abs(val1).max().cpu().item() + assert torch.allclose(val1, val2, atol=mag * rtol), ( + msg + f"Field {f} differs too much!" + ) + else: + assert torch.equal(val1, val2), msg + f"Field {f} is different!" + else: + raise ValueError(f"Don't know how to compare type {type(val1)}") + + +def reload_script_model(module): + """ + Save a jit module and load it back. + Similar to the `getExportImportCopy` function in torch/testing/ + """ + buffer = io.BytesIO() + torch.jit.save(module, buffer) + buffer.seek(0) + return torch.jit.load(buffer) + + +def reload_lazy_config(cfg): + """ + Save an object by LazyConfig.save and load it back. + This is used to test that a config still works the same after + serialization/deserialization. + """ + with tempfile.TemporaryDirectory(prefix="detectron2") as d: + fname = os.path.join(d, "d2_cfg_test.yaml") + LazyConfig.save(cfg, fname) + return LazyConfig.load(fname) + + +def min_torch_version(min_version: str) -> bool: + """ + Returns True when torch's version is at least `min_version`. + """ + try: + import torch + except ImportError: + return False + + installed_version = version.parse(torch.__version__.split("+")[0]) + min_version = version.parse(min_version) + return installed_version >= min_version + + +def has_dynamic_axes(onnx_model): + """ + Return True when all ONNX input/output have only dynamic axes for all ranks + """ + return all( + not dim.dim_param.isnumeric() + for inp in onnx_model.graph.input + for dim in inp.type.tensor_type.shape.dim + ) and all( + not dim.dim_param.isnumeric() + for out in onnx_model.graph.output + for dim in out.type.tensor_type.shape.dim + ) + + +def register_custom_op_onnx_export( + opname: str, symbolic_fn: Callable, opset_version: int, min_version: str +) -> None: + """ + Register `symbolic_fn` as PyTorch's symbolic `opname`-`opset_version` for ONNX export. + The registration is performed only when current PyTorch's version is < `min_version.` + IMPORTANT: symbolic must be manually unregistered after the caller function returns + """ + if min_torch_version(min_version): + return + register_custom_op_symbolic(opname, symbolic_fn, opset_version) + print(f"_register_custom_op_onnx_export({opname}, {opset_version}) succeeded.") + + +def unregister_custom_op_onnx_export(opname: str, opset_version: int, min_version: str) -> None: + """ + Unregister PyTorch's symbolic `opname`-`opset_version` for ONNX export. + The un-registration is performed only when PyTorch's version is < `min_version` + IMPORTANT: The symbolic must have been manually registered by the caller, otherwise + the incorrect symbolic may be unregistered instead. + """ + + # TODO: _unregister_custom_op_symbolic is introduced PyTorch>=1.10 + # Remove after PyTorch 1.10+ is used by ALL detectron2's CI + try: + from torch.onnx import unregister_custom_op_symbolic as _unregister_custom_op_symbolic + except ImportError: + + def _unregister_custom_op_symbolic(symbolic_name, opset_version): + import torch.onnx.symbolic_registry as sym_registry + from torch.onnx.symbolic_helper import _onnx_main_opset, _onnx_stable_opsets + + def _get_ns_op_name_from_custom_op(symbolic_name): + try: + from torch.onnx.utils import get_ns_op_name_from_custom_op + + ns, op_name = get_ns_op_name_from_custom_op(symbolic_name) + except ImportError as import_error: + if not bool( + re.match(r"^[a-zA-Z0-9-_]*::[a-zA-Z-_]+[a-zA-Z0-9-_]*$", symbolic_name) + ): + raise ValueError( + f"Invalid symbolic name {symbolic_name}. Must be `domain::name`" + ) from import_error + + ns, op_name = symbolic_name.split("::") + if ns == "onnx": + raise ValueError(f"{ns} domain cannot be modified.") from import_error + + if ns == "aten": + ns = "" + + return ns, op_name + + def _unregister_op(opname: str, domain: str, version: int): + try: + sym_registry.unregister_op(op_name, ns, ver) + except AttributeError as attribute_error: + if sym_registry.is_registered_op(opname, domain, version): + del sym_registry._registry[(domain, version)][opname] + if not sym_registry._registry[(domain, version)]: + del sym_registry._registry[(domain, version)] + else: + raise RuntimeError( + f"The opname {opname} is not registered." + ) from attribute_error + + ns, op_name = _get_ns_op_name_from_custom_op(symbolic_name) + for ver in _onnx_stable_opsets + [_onnx_main_opset]: + if ver >= opset_version: + _unregister_op(op_name, ns, ver) + + if min_torch_version(min_version): + return + _unregister_custom_op_symbolic(opname, opset_version) + print(f"_unregister_custom_op_onnx_export({opname}, {opset_version}) succeeded.") + + +skipIfOnCPUCI = unittest.skipIf( + os.environ.get("CI") and not torch.cuda.is_available(), + "The test is too slow on CPUs and will be executed on CircleCI's GPU jobs.", +) + + +def skipIfUnsupportedMinOpsetVersion(min_opset_version, current_opset_version=None): + """ + Skips tests for ONNX Opset versions older than min_opset_version. + """ + + def skip_dec(func): + def wrapper(self): + try: + opset_version = self.opset_version + except AttributeError: + opset_version = current_opset_version + if opset_version < min_opset_version: + raise unittest.SkipTest( + f"Unsupported opset_version {opset_version}" + f", required is {min_opset_version}" + ) + return func(self) + + return wrapper + + return skip_dec + + +def skipIfUnsupportedMinTorchVersion(min_version): + """ + Skips tests for PyTorch versions older than min_version. + """ + reason = f"module 'torch' has __version__ {torch.__version__}" f", required is: {min_version}" + return unittest.skipIf(not min_torch_version(min_version), reason) + + +# TODO: Remove after PyTorch 1.11.1+ is used by detectron2's CI +def _pytorch1111_symbolic_opset9_to(g, self, *args): + """aten::to() symbolic that must be used for testing with PyTorch < 1.11.1.""" + + def is_aten_to_device_only(args): + if len(args) == 4: + # aten::to(Tensor, Device, bool, bool, memory_format) + return ( + args[0].node().kind() == "prim::device" + or args[0].type().isSubtypeOf(ListType.ofInts()) + or ( + sym_help._is_value(args[0]) + and args[0].node().kind() == "onnx::Constant" + and isinstance(args[0].node()["value"], str) + ) + ) + elif len(args) == 5: + # aten::to(Tensor, Device, ScalarType, bool, bool, memory_format) + # When dtype is None, this is a aten::to(device) call + dtype = sym_help._get_const(args[1], "i", "dtype") + return dtype is None + elif len(args) in (6, 7): + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, memory_format) + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, bool, memory_format) + # When dtype is None, this is a aten::to(device) call + dtype = sym_help._get_const(args[0], "i", "dtype") + return dtype is None + return False + + # ONNX doesn't have a concept of a device, so we ignore device-only casts + if is_aten_to_device_only(args): + return self + + if len(args) == 4: + # TestONNXRuntime::test_ones_bool shows args[0] of aten::to can be onnx::Constant[Tensor] + # In this case, the constant value is a tensor not int, + # so sym_help._maybe_get_const(args[0], 'i') would not work. + dtype = args[0] + if sym_help._is_value(args[0]) and args[0].node().kind() == "onnx::Constant": + tval = args[0].node()["value"] + if isinstance(tval, torch.Tensor): + if len(tval.shape) == 0: + tval = tval.item() + dtype = int(tval) + else: + dtype = tval + + if sym_help._is_value(dtype) or isinstance(dtype, torch.Tensor): + # aten::to(Tensor, Tensor, bool, bool, memory_format) + dtype = args[0].type().scalarType() + return g.op("Cast", self, to_i=sym_help.cast_pytorch_to_onnx[dtype]) + else: + # aten::to(Tensor, ScalarType, bool, bool, memory_format) + # memory_format is ignored + return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype]) + elif len(args) == 5: + # aten::to(Tensor, Device, ScalarType, bool, bool, memory_format) + dtype = sym_help._get_const(args[1], "i", "dtype") + # memory_format is ignored + return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype]) + elif len(args) == 6: + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, memory_format) + dtype = sym_help._get_const(args[0], "i", "dtype") + # Layout, device and memory_format are ignored + return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype]) + elif len(args) == 7: + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, bool, memory_format) + dtype = sym_help._get_const(args[0], "i", "dtype") + # Layout, device and memory_format are ignored + return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype]) + else: + return sym_help._onnx_unsupported("Unknown aten::to signature") + + +# TODO: Remove after PyTorch 1.11.1+ is used by detectron2's CI +def _pytorch1111_symbolic_opset9_repeat_interleave(g, self, repeats, dim=None, output_size=None): + + # from torch.onnx.symbolic_helper import ScalarType + from torch.onnx.symbolic_opset9 import expand, unsqueeze + + input = self + # if dim is None flatten + # By default, use the flattened input array, and return a flat output array + if sym_help._is_none(dim): + input = sym_help._reshape_helper(g, self, g.op("Constant", value_t=torch.tensor([-1]))) + dim = 0 + else: + dim = sym_help._maybe_get_scalar(dim) + + repeats_dim = sym_help._get_tensor_rank(repeats) + repeats_sizes = sym_help._get_tensor_sizes(repeats) + input_sizes = sym_help._get_tensor_sizes(input) + if repeats_dim is None: + raise RuntimeError( + "Unsupported: ONNX export of repeat_interleave for unknown " "repeats rank." + ) + if repeats_sizes is None: + raise RuntimeError( + "Unsupported: ONNX export of repeat_interleave for unknown " "repeats size." + ) + if input_sizes is None: + raise RuntimeError( + "Unsupported: ONNX export of repeat_interleave for unknown " "input size." + ) + + input_sizes_temp = input_sizes.copy() + for idx, input_size in enumerate(input_sizes): + if input_size is None: + input_sizes[idx], input_sizes_temp[idx] = 0, -1 + + # Cases where repeats is an int or single value tensor + if repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1): + if not sym_help._is_tensor(repeats): + repeats = g.op("Constant", value_t=torch.LongTensor(repeats)) + if input_sizes[dim] == 0: + return sym_help._onnx_opset_unsupported_detailed( + "repeat_interleave", + 9, + 13, + "Unsupported along dimension with unknown input size", + ) + else: + reps = input_sizes[dim] + repeats = expand(g, repeats, g.op("Constant", value_t=torch.tensor([reps])), None) + + # Cases where repeats is a 1 dim Tensor + elif repeats_dim == 1: + if input_sizes[dim] == 0: + return sym_help._onnx_opset_unsupported_detailed( + "repeat_interleave", + 9, + 13, + "Unsupported along dimension with unknown input size", + ) + if repeats_sizes[0] is None: + return sym_help._onnx_opset_unsupported_detailed( + "repeat_interleave", 9, 13, "Unsupported for cases with dynamic repeats" + ) + assert ( + repeats_sizes[0] == input_sizes[dim] + ), "repeats must have the same size as input along dim" + reps = repeats_sizes[0] + else: + raise RuntimeError("repeats must be 0-dim or 1-dim tensor") + + final_splits = list() + r_splits = sym_help._repeat_interleave_split_helper(g, repeats, reps, 0) + if isinstance(r_splits, torch._C.Value): + r_splits = [r_splits] + i_splits = sym_help._repeat_interleave_split_helper(g, input, reps, dim) + if isinstance(i_splits, torch._C.Value): + i_splits = [i_splits] + input_sizes[dim], input_sizes_temp[dim] = -1, 1 + for idx, r_split in enumerate(r_splits): + i_split = unsqueeze(g, i_splits[idx], dim + 1) + r_concat = [ + g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[: dim + 1])), + r_split, + g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[dim + 1 :])), + ] + r_concat = g.op("Concat", *r_concat, axis_i=0) + i_split = expand(g, i_split, r_concat, None) + i_split = sym_help._reshape_helper( + g, + i_split, + g.op("Constant", value_t=torch.LongTensor(input_sizes)), + allowzero=0, + ) + final_splits.append(i_split) + return g.op("Concat", *final_splits, axis_i=dim) diff --git a/data_processing/detectron2/detectron2/utils/tracing.py b/data_processing/detectron2/detectron2/utils/tracing.py new file mode 100644 index 0000000..577df4e --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/tracing.py @@ -0,0 +1,71 @@ +import inspect +import torch + +from detectron2.utils.env import TORCH_VERSION + +try: + from torch.fx._symbolic_trace import is_fx_tracing as is_fx_tracing_current + + tracing_current_exists = True +except ImportError: + tracing_current_exists = False + +try: + from torch.fx._symbolic_trace import _orig_module_call + + tracing_legacy_exists = True +except ImportError: + tracing_legacy_exists = False + + +@torch.jit.ignore +def is_fx_tracing_legacy() -> bool: + """ + Returns a bool indicating whether torch.fx is currently symbolically tracing a module. + Can be useful for gating module logic that is incompatible with symbolic tracing. + """ + return torch.nn.Module.__call__ is not _orig_module_call + + +@torch.jit.ignore +def is_fx_tracing() -> bool: + """Returns whether execution is currently in + Torch FX tracing mode""" + if TORCH_VERSION >= (1, 10) and tracing_current_exists: + return is_fx_tracing_current() + elif tracing_legacy_exists: + return is_fx_tracing_legacy() + else: + # Can't find either current or legacy tracing indication code. + # Enabling this assert_fx_safe() call regardless of tracing status. + return False + + +@torch.jit.ignore +def assert_fx_safe(condition: bool, message: str) -> torch.Tensor: + """An FX-tracing safe version of assert. + Avoids erroneous type assertion triggering when types are masked inside + an fx.proxy.Proxy object during tracing. + Args: condition - either a boolean expression or a string representing + the condition to test. If this assert triggers an exception when tracing + due to dynamic control flow, try encasing the expression in quotation + marks and supplying it as a string.""" + # Must return a concrete tensor for compatibility with PyTorch <=1.8. + # If <=1.8 compatibility is not needed, return type can be converted to None + if not is_fx_tracing(): + try: + if isinstance(condition, str): + caller_frame = inspect.currentframe().f_back + torch._assert( + eval(condition, caller_frame.f_globals, caller_frame.f_locals), message + ) + return torch.ones(1) + else: + torch._assert(condition, message) + return torch.ones(1) + except torch.fx.proxy.TraceError as e: + print( + "Found a non-FX compatible assertion. Skipping the check. Failure is shown below" + + str(e) + ) + return torch.zeros(1) diff --git a/data_processing/detectron2/detectron2/utils/video_visualizer.py b/data_processing/detectron2/detectron2/utils/video_visualizer.py new file mode 100644 index 0000000..42685be --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/video_visualizer.py @@ -0,0 +1,287 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import List +import pycocotools.mask as mask_util + +from detectron2.structures import Instances +from detectron2.utils.visualizer import ( + ColorMode, + Visualizer, + _create_text_labels, + _PanopticPrediction, +) + +from .colormap import random_color, random_colors + + +class _DetectedInstance: + """ + Used to store data about detected objects in video frame, + in order to transfer color to objects in the future frames. + + Attributes: + label (int): + bbox (tuple[float]): + mask_rle (dict): + color (tuple[float]): RGB colors in range (0, 1) + ttl (int): time-to-live for the instance. For example, if ttl=2, + the instance color can be transferred to objects in the next two frames. + """ + + __slots__ = ["label", "bbox", "mask_rle", "color", "ttl"] + + def __init__(self, label, bbox, mask_rle, color, ttl): + self.label = label + self.bbox = bbox + self.mask_rle = mask_rle + self.color = color + self.ttl = ttl + + +class VideoVisualizer: + def __init__(self, metadata, instance_mode=ColorMode.IMAGE): + """ + Args: + metadata (MetadataCatalog): image metadata. + """ + self.metadata = metadata + self._old_instances = [] + assert instance_mode in [ + ColorMode.IMAGE, + ColorMode.IMAGE_BW, + ], "Other mode not supported yet." + self._instance_mode = instance_mode + self._max_num_instances = self.metadata.get("max_num_instances", 74) + self._assigned_colors = {} + self._color_pool = random_colors(self._max_num_instances, rgb=True, maximum=1) + self._color_idx_set = set(range(len(self._color_pool))) + + def draw_instance_predictions(self, frame, predictions): + """ + Draw instance-level prediction results on an image. + + Args: + frame (ndarray): an RGB image of shape (H, W, C), in the range [0, 255]. + predictions (Instances): the output of an instance detection/segmentation + model. Following fields will be used to draw: + "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). + + Returns: + output (VisImage): image object with visualizations. + """ + frame_visualizer = Visualizer(frame, self.metadata) + num_instances = len(predictions) + if num_instances == 0: + return frame_visualizer.output + + boxes = predictions.pred_boxes.tensor.numpy() if predictions.has("pred_boxes") else None + scores = predictions.scores if predictions.has("scores") else None + classes = predictions.pred_classes.numpy() if predictions.has("pred_classes") else None + keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None + colors = predictions.COLOR if predictions.has("COLOR") else [None] * len(predictions) + periods = predictions.ID_period if predictions.has("ID_period") else None + period_threshold = self.metadata.get("period_threshold", 0) + visibilities = ( + [True] * len(predictions) + if periods is None + else [x > period_threshold for x in periods] + ) + + if predictions.has("pred_masks"): + masks = predictions.pred_masks + # mask IOU is not yet enabled + # masks_rles = mask_util.encode(np.asarray(masks.permute(1, 2, 0), order="F")) + # assert len(masks_rles) == num_instances + else: + masks = None + + if not predictions.has("COLOR"): + if predictions.has("ID"): + colors = self._assign_colors_by_id(predictions) + else: + # ToDo: clean old assign color method and use a default tracker to assign id + detected = [ + _DetectedInstance(classes[i], boxes[i], mask_rle=None, color=colors[i], ttl=8) + for i in range(num_instances) + ] + colors = self._assign_colors(detected) + + labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) + + if self._instance_mode == ColorMode.IMAGE_BW: + # any() returns uint8 tensor + frame_visualizer.output.reset_image( + frame_visualizer._create_grayscale_image( + (masks.any(dim=0) > 0).numpy() if masks is not None else None + ) + ) + alpha = 0.3 + else: + alpha = 0.5 + + labels = ( + None + if labels is None + else [y[0] for y in filter(lambda x: x[1], zip(labels, visibilities))] + ) # noqa + assigned_colors = ( + None + if colors is None + else [y[0] for y in filter(lambda x: x[1], zip(colors, visibilities))] + ) # noqa + frame_visualizer.overlay_instances( + boxes=None if masks is not None else boxes[visibilities], # boxes are a bit distracting + masks=None if masks is None else masks[visibilities], + labels=labels, + keypoints=None if keypoints is None else keypoints[visibilities], + assigned_colors=assigned_colors, + alpha=alpha, + ) + + return frame_visualizer.output + + def draw_sem_seg(self, frame, sem_seg, area_threshold=None): + """ + Args: + sem_seg (ndarray or Tensor): semantic segmentation of shape (H, W), + each value is the integer label. + area_threshold (Optional[int]): only draw segmentations larger than the threshold + """ + # don't need to do anything special + frame_visualizer = Visualizer(frame, self.metadata) + frame_visualizer.draw_sem_seg(sem_seg, area_threshold=None) + return frame_visualizer.output + + def draw_panoptic_seg_predictions( + self, frame, panoptic_seg, segments_info, area_threshold=None, alpha=0.5 + ): + frame_visualizer = Visualizer(frame, self.metadata) + pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) + + if self._instance_mode == ColorMode.IMAGE_BW: + frame_visualizer.output.reset_image( + frame_visualizer._create_grayscale_image(pred.non_empty_mask()) + ) + + # draw mask for all semantic segments first i.e. "stuff" + for mask, sinfo in pred.semantic_masks(): + category_idx = sinfo["category_id"] + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] + except AttributeError: + mask_color = None + + frame_visualizer.draw_binary_mask( + mask, + color=mask_color, + text=self.metadata.stuff_classes[category_idx], + alpha=alpha, + area_threshold=area_threshold, + ) + + all_instances = list(pred.instance_masks()) + if len(all_instances) == 0: + return frame_visualizer.output + # draw mask for all instances second + masks, sinfo = list(zip(*all_instances)) + num_instances = len(masks) + masks_rles = mask_util.encode( + np.asarray(np.asarray(masks).transpose(1, 2, 0), dtype=np.uint8, order="F") + ) + assert len(masks_rles) == num_instances + + category_ids = [x["category_id"] for x in sinfo] + detected = [ + _DetectedInstance(category_ids[i], bbox=None, mask_rle=masks_rles[i], color=None, ttl=8) + for i in range(num_instances) + ] + colors = self._assign_colors(detected) + labels = [self.metadata.thing_classes[k] for k in category_ids] + + frame_visualizer.overlay_instances( + boxes=None, + masks=masks, + labels=labels, + keypoints=None, + assigned_colors=colors, + alpha=alpha, + ) + return frame_visualizer.output + + def _assign_colors(self, instances): + """ + Naive tracking heuristics to assign same color to the same instance, + will update the internal state of tracked instances. + + Returns: + list[tuple[float]]: list of colors. + """ + + # Compute iou with either boxes or masks: + is_crowd = np.zeros((len(instances),), dtype=bool) + if instances[0].bbox is None: + assert instances[0].mask_rle is not None + # use mask iou only when box iou is None + # because box seems good enough + rles_old = [x.mask_rle for x in self._old_instances] + rles_new = [x.mask_rle for x in instances] + ious = mask_util.iou(rles_old, rles_new, is_crowd) + threshold = 0.5 + else: + boxes_old = [x.bbox for x in self._old_instances] + boxes_new = [x.bbox for x in instances] + ious = mask_util.iou(boxes_old, boxes_new, is_crowd) + threshold = 0.6 + if len(ious) == 0: + ious = np.zeros((len(self._old_instances), len(instances)), dtype="float32") + + # Only allow matching instances of the same label: + for old_idx, old in enumerate(self._old_instances): + for new_idx, new in enumerate(instances): + if old.label != new.label: + ious[old_idx, new_idx] = 0 + + matched_new_per_old = np.asarray(ious).argmax(axis=1) + max_iou_per_old = np.asarray(ious).max(axis=1) + + # Try to find match for each old instance: + extra_instances = [] + for idx, inst in enumerate(self._old_instances): + if max_iou_per_old[idx] > threshold: + newidx = matched_new_per_old[idx] + if instances[newidx].color is None: + instances[newidx].color = inst.color + continue + # If an old instance does not match any new instances, + # keep it for the next frame in case it is just missed by the detector + inst.ttl -= 1 + if inst.ttl > 0: + extra_instances.append(inst) + + # Assign random color to newly-detected instances: + for inst in instances: + if inst.color is None: + inst.color = random_color(rgb=True, maximum=1) + self._old_instances = instances[:] + extra_instances + return [d.color for d in instances] + + def _assign_colors_by_id(self, instances: Instances) -> List: + colors = [] + untracked_ids = set(self._assigned_colors.keys()) + for id in instances.ID: + if id in self._assigned_colors: + colors.append(self._color_pool[self._assigned_colors[id]]) + untracked_ids.remove(id) + else: + assert ( + len(self._color_idx_set) >= 1 + ), f"Number of id exceeded maximum, \ + max = {self._max_num_instances}" + idx = self._color_idx_set.pop() + color = self._color_pool[idx] + self._assigned_colors[id] = idx + colors.append(color) + for id in untracked_ids: + self._color_idx_set.add(self._assigned_colors[id]) + del self._assigned_colors[id] + return colors diff --git a/data_processing/detectron2/detectron2/utils/visualizer.py b/data_processing/detectron2/detectron2/utils/visualizer.py new file mode 100644 index 0000000..5d2cc17 --- /dev/null +++ b/data_processing/detectron2/detectron2/utils/visualizer.py @@ -0,0 +1,1267 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import colorsys +import logging +import math +import numpy as np +from enum import Enum, unique +import cv2 +import matplotlib as mpl +import matplotlib.colors as mplc +import matplotlib.figure as mplfigure +import pycocotools.mask as mask_util +import torch +from matplotlib.backends.backend_agg import FigureCanvasAgg +from PIL import Image + +from detectron2.data import MetadataCatalog +from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes +from detectron2.utils.file_io import PathManager + +from .colormap import random_color + +logger = logging.getLogger(__name__) + +__all__ = ["ColorMode", "VisImage", "Visualizer"] + + +_SMALL_OBJECT_AREA_THRESH = 1000 +_LARGE_MASK_AREA_THRESH = 120000 +_OFF_WHITE = (1.0, 1.0, 240.0 / 255) +_BLACK = (0, 0, 0) +_RED = (1.0, 0, 0) + +_KEYPOINT_THRESHOLD = 0.05 + + +@unique +class ColorMode(Enum): + """ + Enum of different color modes to use for instance visualizations. + """ + + IMAGE = 0 + """ + Picks a random color for every instance and overlay segmentations with low opacity. + """ + SEGMENTATION = 1 + """ + Let instances of the same category have similar colors + (from metadata.thing_colors), and overlay them with + high opacity. This provides more attention on the quality of segmentation. + """ + IMAGE_BW = 2 + """ + Same as IMAGE, but convert all areas without masks to gray-scale. + Only available for drawing per-instance mask predictions. + """ + + +class GenericMask: + """ + Attribute: + polygons (list[ndarray]): list[ndarray]: polygons for this mask. + Each ndarray has format [x, y, x, y, ...] + mask (ndarray): a binary mask + """ + + def __init__(self, mask_or_polygons, height, width): + self._mask = self._polygons = self._has_holes = None + self.height = height + self.width = width + + m = mask_or_polygons + if isinstance(m, dict): + # RLEs + assert "counts" in m and "size" in m + if isinstance(m["counts"], list): # uncompressed RLEs + h, w = m["size"] + assert h == height and w == width + m = mask_util.frPyObjects(m, h, w) + self._mask = mask_util.decode(m)[:, :] + return + + if isinstance(m, list): # list[ndarray] + self._polygons = [np.asarray(x).reshape(-1) for x in m] + return + + if isinstance(m, np.ndarray): # assumed to be a binary mask + assert m.shape[1] != 2, m.shape + assert m.shape == ( + height, + width, + ), f"mask shape: {m.shape}, target dims: {height}, {width}" + self._mask = m.astype("uint8") + return + + raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) + + @property + def mask(self): + if self._mask is None: + self._mask = self.polygons_to_mask(self._polygons) + return self._mask + + @property + def polygons(self): + if self._polygons is None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + return self._polygons + + @property + def has_holes(self): + if self._has_holes is None: + if self._mask is not None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + else: + self._has_holes = False # if original format is polygon, does not have holes + return self._has_holes + + def mask_to_polygons(self, mask): + # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level + # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. + # Internal contours (holes) are placed in hierarchy-2. + # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. + mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr + res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) + hierarchy = res[-1] + if hierarchy is None: # empty mask + return [], False + has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 + res = res[-2] + res = [x.flatten() for x in res] + # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. + # We add 0.5 to turn them into real-value coordinate space. A better solution + # would be to first +0.5 and then dilate the returned polygon by 0.5. + res = [x + 0.5 for x in res if len(x) >= 6] + return res, has_holes + + def polygons_to_mask(self, polygons): + rle = mask_util.frPyObjects(polygons, self.height, self.width) + rle = mask_util.merge(rle) + return mask_util.decode(rle)[:, :] + + def area(self): + return self.mask.sum() + + def bbox(self): + p = mask_util.frPyObjects(self.polygons, self.height, self.width) + p = mask_util.merge(p) + bbox = mask_util.toBbox(p) + bbox[2] += bbox[0] + bbox[3] += bbox[1] + return bbox + + +class _PanopticPrediction: + """ + Unify different panoptic annotation/prediction formats + """ + + def __init__(self, panoptic_seg, segments_info, metadata=None): + if segments_info is None: + assert metadata is not None + # If "segments_info" is None, we assume "panoptic_img" is a + # H*W int32 image storing the panoptic_id in the format of + # category_id * label_divisor + instance_id. We reserve -1 for + # VOID label. + label_divisor = metadata.label_divisor + segments_info = [] + for panoptic_label in np.unique(panoptic_seg.numpy()): + if panoptic_label == -1: + # VOID region. + continue + pred_class = panoptic_label // label_divisor + isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() + segments_info.append( + { + "id": int(panoptic_label), + "category_id": int(pred_class), + "isthing": bool(isthing), + } + ) + del metadata + + self._seg = panoptic_seg + + self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info + segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) + areas = areas.numpy() + sorted_idxs = np.argsort(-areas) + self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] + self._seg_ids = self._seg_ids.tolist() + for sid, area in zip(self._seg_ids, self._seg_areas): + if sid in self._sinfo: + self._sinfo[sid]["area"] = float(area) + + def non_empty_mask(self): + """ + Returns: + (H, W) array, a mask for all pixels that have a prediction + """ + empty_ids = [] + for id in self._seg_ids: + if id not in self._sinfo: + empty_ids.append(id) + if len(empty_ids) == 0: + return np.zeros(self._seg.shape, dtype=np.uint8) + assert ( + len(empty_ids) == 1 + ), ">1 ids corresponds to no labels. This is currently not supported" + return (self._seg != empty_ids[0]).numpy().astype(bool) + + def semantic_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or sinfo["isthing"]: + # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. + continue + yield (self._seg == sid).numpy().astype(bool), sinfo + + def instance_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or not sinfo["isthing"]: + continue + mask = (self._seg == sid).numpy().astype(bool) + if mask.sum() > 0: + yield mask, sinfo + + +def _create_text_labels(classes, scores, class_names, is_crowd=None): + """ + Args: + classes (list[int] or None): + scores (list[float] or None): + class_names (list[str] or None): + is_crowd (list[bool] or None): + + Returns: + list[str] or None + """ + labels = None + if classes is not None: + if class_names is not None and len(class_names) > 0: + labels = [class_names[i] for i in classes] + else: + labels = [str(i) for i in classes] + if scores is not None: + if labels is None: + labels = ["{:.0f}%".format(s * 100) for s in scores] + else: + labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] + if labels is not None and is_crowd is not None: + labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] + return labels + + +class VisImage: + def __init__(self, img, scale=1.0): + """ + Args: + img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. + scale (float): scale the input image + """ + self.img = img + self.scale = scale + self.width, self.height = img.shape[1], img.shape[0] + self._setup_figure(img) + + def _setup_figure(self, img): + """ + Args: + Same as in :meth:`__init__()`. + + Returns: + fig (matplotlib.pyplot.figure): top level container for all the image plot elements. + ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. + """ + fig = mplfigure.Figure(frameon=False) + self.dpi = fig.get_dpi() + # add a small 1e-2 to avoid precision lost due to matplotlib's truncation + # (https://github.com/matplotlib/matplotlib/issues/15363) + fig.set_size_inches( + (self.width * self.scale + 1e-2) / self.dpi, + (self.height * self.scale + 1e-2) / self.dpi, + ) + self.canvas = FigureCanvasAgg(fig) + # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) + ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) + ax.axis("off") + self.fig = fig + self.ax = ax + self.reset_image(img) + + def reset_image(self, img): + """ + Args: + img: same as in __init__ + """ + img = img.astype("uint8") + self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") + + def save(self, filepath): + """ + Args: + filepath (str): a string that contains the absolute path, including the file name, where + the visualized image will be saved. + """ + self.fig.savefig(filepath) + + def get_image(self): + """ + Returns: + ndarray: + the visualized image of shape (H, W, 3) (RGB) in uint8 type. + The shape is scaled w.r.t the input image using the given `scale` argument. + """ + canvas = self.canvas + s, (width, height) = canvas.print_to_buffer() + # buf = io.BytesIO() # works for cairo backend + # canvas.print_rgba(buf) + # width, height = self.width, self.height + # s = buf.getvalue() + + buffer = np.frombuffer(s, dtype="uint8") + + img_rgba = buffer.reshape(height, width, 4) + rgb, alpha = np.split(img_rgba, [3], axis=2) + return rgb.astype("uint8") + + +class Visualizer: + """ + Visualizer that draws data about detection/segmentation on images. + + It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` + that draw primitive objects to images, as well as high-level wrappers like + `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` + that draw composite data in some pre-defined style. + + Note that the exact visualization style for the high-level wrappers are subject to change. + Style such as color, opacity, label contents, visibility of labels, or even the visibility + of objects themselves (e.g. when the object is too small) may change according + to different heuristics, as long as the results still look visually reasonable. + + To obtain a consistent style, you can implement custom drawing functions with the + abovementioned primitive methods instead. If you need more customized visualization + styles, you can process the data yourself following their format documented in + tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not + intend to satisfy everyone's preference on drawing styles. + + This visualizer focuses on high rendering quality rather than performance. It is not + designed to be used for real-time applications. + """ + + # TODO implement a fast, rasterized version using OpenCV + + def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): + """ + Args: + img_rgb: a numpy array of shape (H, W, C), where H and W correspond to + the height and width of the image respectively. C is the number of + color channels. The image is required to be in RGB format since that + is a requirement of the Matplotlib library. The image is also expected + to be in the range [0, 255]. + metadata (Metadata): dataset metadata (e.g. class names and colors) + instance_mode (ColorMode): defines one of the pre-defined style for drawing + instances on an image. + """ + self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) + if metadata is None: + metadata = MetadataCatalog.get("__nonexist__") + self.metadata = metadata + self.output = VisImage(self.img, scale=scale) + self.cpu_device = torch.device("cpu") + + # too small texts are useless, therefore clamp to 9 + self._default_font_size = max( + np.sqrt(self.output.height * self.output.width) // 90, 10 // scale + ) + self._instance_mode = instance_mode + self.keypoint_threshold = _KEYPOINT_THRESHOLD + + def draw_instance_predictions(self, predictions): + """ + Draw instance-level prediction results on an image. + + Args: + predictions (Instances): the output of an instance detection/segmentation + model. Following fields will be used to draw: + "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). + + Returns: + output (VisImage): image object with visualizations. + """ + boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None + scores = predictions.scores if predictions.has("scores") else None + classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None + labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) + keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None + + if predictions.has("pred_masks"): + masks = np.asarray(predictions.pred_masks) + masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] + else: + masks = None + + if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes + ] + alpha = 0.8 + else: + colors = None + alpha = 0.5 + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.reset_image( + self._create_grayscale_image( + (predictions.pred_masks.any(dim=0) > 0).numpy() + if predictions.has("pred_masks") + else None + ) + ) + alpha = 0.3 + + self.overlay_instances( + masks=masks, + boxes=boxes, + labels=labels, + keypoints=keypoints, + assigned_colors=colors, + alpha=alpha, + ) + return self.output + + def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): + """ + Draw semantic segmentation predictions/labels. + + Args: + sem_seg (Tensor or ndarray): the segmentation of shape (H, W). + Each value is the integer label of the pixel. + area_threshold (int): segments with less than `area_threshold` are not drawn. + alpha (float): the larger it is, the more opaque the segmentations are. + + Returns: + output (VisImage): image object with visualizations. + """ + if isinstance(sem_seg, torch.Tensor): + sem_seg = sem_seg.numpy() + labels, areas = np.unique(sem_seg, return_counts=True) + sorted_idxs = np.argsort(-areas).tolist() + labels = labels[sorted_idxs] + for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] + except (AttributeError, IndexError): + mask_color = None + + binary_mask = (sem_seg == label).astype(np.uint8) + text = self.metadata.stuff_classes[label] + self.draw_binary_mask( + binary_mask, + color=mask_color, + edge_color=_OFF_WHITE, + text=text, + alpha=alpha, + area_threshold=area_threshold, + ) + return self.output + + def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): + """ + Draw panoptic prediction annotations or results. + + Args: + panoptic_seg (Tensor): of shape (height, width) where the values are ids for each + segment. + segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. + If it is a ``list[dict]``, each dict contains keys "id", "category_id". + If None, category id of each pixel is computed by + ``pixel // metadata.label_divisor``. + area_threshold (int): stuff segments with less than `area_threshold` are not drawn. + + Returns: + output (VisImage): image object with visualizations. + """ + pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) + + # draw mask for all semantic segments first i.e. "stuff" + for mask, sinfo in pred.semantic_masks(): + category_idx = sinfo["category_id"] + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] + except AttributeError: + mask_color = None + + text = self.metadata.stuff_classes[category_idx] + self.draw_binary_mask( + mask, + color=mask_color, + edge_color=_OFF_WHITE, + text=text, + alpha=alpha, + area_threshold=area_threshold, + ) + + # draw mask for all instances second + all_instances = list(pred.instance_masks()) + if len(all_instances) == 0: + return self.output + masks, sinfo = list(zip(*all_instances)) + category_ids = [x["category_id"] for x in sinfo] + + try: + scores = [x["score"] for x in sinfo] + except KeyError: + scores = None + labels = _create_text_labels( + category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo] + ) + + try: + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids + ] + except AttributeError: + colors = None + self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) + + return self.output + + draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility + + def draw_dataset_dict(self, dic): + """ + Draw annotations/segmentations in Detectron2 Dataset format. + + Args: + dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. + + Returns: + output (VisImage): image object with visualizations. + """ + annos = dic.get("annotations", None) + if annos: + if "segmentation" in annos[0]: + masks = [x["segmentation"] for x in annos] + else: + masks = None + if "keypoints" in annos[0]: + keypts = [x["keypoints"] for x in annos] + keypts = np.array(keypts).reshape(len(annos), -1, 3) + else: + keypts = None + + boxes = [ + BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) + if len(x["bbox"]) == 4 + else x["bbox"] + for x in annos + ] + + colors = None + category_ids = [x["category_id"] for x in annos] + if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) + for c in category_ids + ] + names = self.metadata.get("thing_classes", None) + labels = _create_text_labels( + category_ids, + scores=None, + class_names=names, + is_crowd=[x.get("iscrowd", 0) for x in annos], + ) + self.overlay_instances( + labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors + ) + + sem_seg = dic.get("sem_seg", None) + if sem_seg is None and "sem_seg_file_name" in dic: + with PathManager.open(dic["sem_seg_file_name"], "rb") as f: + sem_seg = Image.open(f) + sem_seg = np.asarray(sem_seg, dtype="uint8") + if sem_seg is not None: + self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) + + pan_seg = dic.get("pan_seg", None) + if pan_seg is None and "pan_seg_file_name" in dic: + with PathManager.open(dic["pan_seg_file_name"], "rb") as f: + pan_seg = Image.open(f) + pan_seg = np.asarray(pan_seg) + from panopticapi.utils import rgb2id + + pan_seg = rgb2id(pan_seg) + if pan_seg is not None: + segments_info = dic["segments_info"] + pan_seg = torch.tensor(pan_seg) + self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) + return self.output + + def overlay_instances( + self, + *, + boxes=None, + labels=None, + masks=None, + keypoints=None, + assigned_colors=None, + alpha=0.5, + ): + """ + Args: + boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, + or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, + or a :class:`RotatedBoxes`, + or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format + for the N objects in a single image, + labels (list[str]): the text to be displayed for each instance. + masks (masks-like object): Supported types are: + + * :class:`detectron2.structures.PolygonMasks`, + :class:`detectron2.structures.BitMasks`. + * list[list[ndarray]]: contains the segmentation masks for all objects in one image. + The first level of the list corresponds to individual instances. The second + level to all the polygon that compose the instance, and the third level + to the polygon coordinates. The third level should have the format of + [x0, y0, x1, y1, ..., xn, yn] (n >= 3). + * list[ndarray]: each ndarray is a binary mask of shape (H, W). + * list[dict]: each dict is a COCO-style RLE. + keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), + where the N is the number of instances and K is the number of keypoints. + The last dimension corresponds to (x, y, visibility or score). + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + Returns: + output (VisImage): image object with visualizations. + """ + num_instances = 0 + if boxes is not None: + boxes = self._convert_boxes(boxes) + num_instances = len(boxes) + if masks is not None: + masks = self._convert_masks(masks) + if num_instances: + assert len(masks) == num_instances + else: + num_instances = len(masks) + if keypoints is not None: + if num_instances: + assert len(keypoints) == num_instances + else: + num_instances = len(keypoints) + keypoints = self._convert_keypoints(keypoints) + if labels is not None: + assert len(labels) == num_instances + if assigned_colors is None: + assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + if num_instances == 0: + return self.output + if boxes is not None and boxes.shape[1] == 5: + return self.overlay_rotated_instances( + boxes=boxes, labels=labels, assigned_colors=assigned_colors + ) + + # Display in largest to smallest order to reduce occlusion. + areas = None + if boxes is not None: + areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) + elif masks is not None: + areas = np.asarray([x.area() for x in masks]) + + if areas is not None: + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] if boxes is not None else None + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None + assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] + keypoints = keypoints[sorted_idxs] if keypoints is not None else None + + for i in range(num_instances): + color = assigned_colors[i] + if boxes is not None: + self.draw_box(boxes[i], edge_color=color) + + if masks is not None: + for segment in masks[i].polygons: + self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) + + if labels is not None: + # first get a box + if boxes is not None: + x0, y0, x1, y1 = boxes[i] + text_pos = (x0, y0) # if drawing boxes, put text on the box corner. + horiz_align = "left" + elif masks is not None: + # skip small mask without polygon + if len(masks[i].polygons) == 0: + continue + + x0, y0, x1, y1 = masks[i].bbox() + + # draw text in the center (defined by median) when box is not drawn + # median is less sensitive to outliers. + text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] + horiz_align = "center" + else: + continue # drawing the box confidence for keypoints isn't very useful. + # for small objects, draw text at the side to avoid occlusion + instance_area = (y1 - y0) * (x1 - x0) + if ( + instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale + or y1 - y0 < 40 * self.output.scale + ): + if y1 >= self.output.height - 5: + text_pos = (x1, y0) + else: + text_pos = (x0, y1) + + height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) + * 0.5 + * self._default_font_size + ) + self.draw_text( + labels[i], + text_pos, + color=lighter_color, + horizontal_alignment=horiz_align, + font_size=font_size, + ) + + # draw keypoints + if keypoints is not None: + for keypoints_per_instance in keypoints: + self.draw_and_connect_keypoints(keypoints_per_instance) + + return self.output + + def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): + """ + Args: + boxes (ndarray): an Nx5 numpy array of + (x_center, y_center, width, height, angle_degrees) format + for the N objects in a single image. + labels (list[str]): the text to be displayed for each instance. + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + + Returns: + output (VisImage): image object with visualizations. + """ + num_instances = len(boxes) + + if assigned_colors is None: + assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + if num_instances == 0: + return self.output + + # Display in largest to smallest order to reduce occlusion. + if boxes is not None: + areas = boxes[:, 2] * boxes[:, 3] + + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + colors = [assigned_colors[idx] for idx in sorted_idxs] + + for i in range(num_instances): + self.draw_rotated_box_with_label( + boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None + ) + + return self.output + + def draw_and_connect_keypoints(self, keypoints): + """ + Draws keypoints of an instance and follows the rules for keypoint connections + to draw lines between appropriate keypoints. This follows color heuristics for + line color. + + Args: + keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints + and the last dimension corresponds to (x, y, probability). + + Returns: + output (VisImage): image object with visualizations. + """ + visible = {} + keypoint_names = self.metadata.get("keypoint_names") + for idx, keypoint in enumerate(keypoints): + + # draw keypoint + x, y, prob = keypoint + if prob > self.keypoint_threshold: + self.draw_circle((x, y), color=_RED) + if keypoint_names: + keypoint_name = keypoint_names[idx] + visible[keypoint_name] = (x, y) + + if self.metadata.get("keypoint_connection_rules"): + for kp0, kp1, color in self.metadata.keypoint_connection_rules: + if kp0 in visible and kp1 in visible: + x0, y0 = visible[kp0] + x1, y1 = visible[kp1] + color = tuple(x / 255.0 for x in color) + self.draw_line([x0, x1], [y0, y1], color=color) + + # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip + # Note that this strategy is specific to person keypoints. + # For other keypoints, it should just do nothing + try: + ls_x, ls_y = visible["left_shoulder"] + rs_x, rs_y = visible["right_shoulder"] + mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 + except KeyError: + pass + else: + # draw line from nose to mid-shoulder + nose_x, nose_y = visible.get("nose", (None, None)) + if nose_x is not None: + self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) + + try: + # draw line from mid-shoulder to mid-hip + lh_x, lh_y = visible["left_hip"] + rh_x, rh_y = visible["right_hip"] + except KeyError: + pass + else: + mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 + self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) + return self.output + + """ + Primitive drawing functions: + """ + + def draw_text( + self, + text, + position, + *, + font_size=None, + color="g", + horizontal_alignment="center", + rotation=0, + ): + """ + Args: + text (str): class label + position (tuple): a tuple of the x and y coordinates to place text on image. + font_size (int, optional): font of the text. If not provided, a font size + proportional to the image width is calculated and used. + color: color of the text. Refer to `matplotlib.colors` for full list + of formats that are accepted. + horizontal_alignment (str): see `matplotlib.text.Text` + rotation: rotation angle in degrees CCW + + Returns: + output (VisImage): image object with text drawn. + """ + if not font_size: + font_size = self._default_font_size + + # since the text background is dark, we don't want the text to be dark + color = np.maximum(list(mplc.to_rgb(color)), 0.2) + color[np.argmax(color)] = max(0.8, np.max(color)) + + x, y = position + self.output.ax.text( + x, + y, + text, + size=font_size * self.output.scale, + family="sans-serif", + bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, + verticalalignment="top", + horizontalalignment=horizontal_alignment, + color=color, + zorder=10, + rotation=rotation, + ) + return self.output + + def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): + """ + Args: + box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 + are the coordinates of the image's top left corner. x1 and y1 are the + coordinates of the image's bottom right corner. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + edge_color: color of the outline of the box. Refer to `matplotlib.colors` + for full list of formats that are accepted. + line_style (string): the string to use to create the outline of the boxes. + + Returns: + output (VisImage): image object with box drawn. + """ + x0, y0, x1, y1 = box_coord + width = x1 - x0 + height = y1 - y0 + + linewidth = max(self._default_font_size / 4, 1) + + self.output.ax.add_patch( + mpl.patches.Rectangle( + (x0, y0), + width, + height, + fill=False, + edgecolor=edge_color, + linewidth=linewidth * self.output.scale, + alpha=alpha, + linestyle=line_style, + ) + ) + return self.output + + def draw_rotated_box_with_label( + self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None + ): + """ + Draw a rotated box with label on its top-left corner. + + Args: + rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), + where cnt_x and cnt_y are the center coordinates of the box. + w and h are the width and height of the box. angle represents how + many degrees the box is rotated CCW with regard to the 0-degree box. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + edge_color: color of the outline of the box. Refer to `matplotlib.colors` + for full list of formats that are accepted. + line_style (string): the string to use to create the outline of the boxes. + label (string): label for rotated box. It will not be rendered when set to None. + + Returns: + output (VisImage): image object with box drawn. + """ + cnt_x, cnt_y, w, h, angle = rotated_box + area = w * h + # use thinner lines when the box is small + linewidth = self._default_font_size / ( + 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 + ) + + theta = angle * math.pi / 180.0 + c = math.cos(theta) + s = math.sin(theta) + rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] + # x: left->right ; y: top->down + rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] + for k in range(4): + j = (k + 1) % 4 + self.draw_line( + [rotated_rect[k][0], rotated_rect[j][0]], + [rotated_rect[k][1], rotated_rect[j][1]], + color=edge_color, + linestyle="--" if k == 1 else line_style, + linewidth=linewidth, + ) + + if label is not None: + text_pos = rotated_rect[1] # topleft corner + + height_ratio = h / np.sqrt(self.output.height * self.output.width) + label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size + ) + self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) + + return self.output + + def draw_circle(self, circle_coord, color, radius=3): + """ + Args: + circle_coord (list(int) or tuple(int)): contains the x and y coordinates + of the center of the circle. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + radius (int): radius of the circle. + + Returns: + output (VisImage): image object with box drawn. + """ + x, y = circle_coord + self.output.ax.add_patch( + mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) + ) + return self.output + + def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): + """ + Args: + x_data (list[int]): a list containing x values of all the points being drawn. + Length of list should match the length of y_data. + y_data (list[int]): a list containing y values of all the points being drawn. + Length of list should match the length of x_data. + color: color of the line. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + linestyle: style of the line. Refer to `matplotlib.lines.Line2D` + for a full list of formats that are accepted. + linewidth (float or None): width of the line. When it's None, + a default value will be computed and used. + + Returns: + output (VisImage): image object with line drawn. + """ + if linewidth is None: + linewidth = self._default_font_size / 3 + linewidth = max(linewidth, 1) + self.output.ax.add_line( + mpl.lines.Line2D( + x_data, + y_data, + linewidth=linewidth * self.output.scale, + color=color, + linestyle=linestyle, + ) + ) + return self.output + + def draw_binary_mask( + self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10 + ): + """ + Args: + binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and + W is the image width. Each value in the array is either a 0 or 1 value of uint8 + type. + color: color of the mask. Refer to `matplotlib.colors` for a full list of + formats that are accepted. If None, will pick a random color. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. + text (str): if None, will be drawn on the object + alpha (float): blending efficient. Smaller values lead to more transparent masks. + area_threshold (float): a connected component smaller than this area will not be shown. + + Returns: + output (VisImage): image object with mask drawn. + """ + if color is None: + color = random_color(rgb=True, maximum=1) + color = mplc.to_rgb(color) + + has_valid_segment = False + binary_mask = binary_mask.astype("uint8") # opencv needs uint8 + mask = GenericMask(binary_mask, self.output.height, self.output.width) + shape2d = (binary_mask.shape[0], binary_mask.shape[1]) + + if not mask.has_holes: + # draw polygons for regular masks + for segment in mask.polygons: + area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) + if area < (area_threshold or 0): + continue + has_valid_segment = True + segment = segment.reshape(-1, 2) + self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) + else: + # TODO: Use Path/PathPatch to draw vector graphics: + # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon + rgba = np.zeros(shape2d + (4,), dtype="float32") + rgba[:, :, :3] = color + rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha + has_valid_segment = True + self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) + + if text is not None and has_valid_segment: + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + self._draw_text_in_mask(binary_mask, text, lighter_color) + return self.output + + def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): + """ + Args: + soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. + color: color of the mask. Refer to `matplotlib.colors` for a full list of + formats that are accepted. If None, will pick a random color. + text (str): if None, will be drawn on the object + alpha (float): blending efficient. Smaller values lead to more transparent masks. + + Returns: + output (VisImage): image object with mask drawn. + """ + if color is None: + color = random_color(rgb=True, maximum=1) + color = mplc.to_rgb(color) + + shape2d = (soft_mask.shape[0], soft_mask.shape[1]) + rgba = np.zeros(shape2d + (4,), dtype="float32") + rgba[:, :, :3] = color + rgba[:, :, 3] = soft_mask * alpha + self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) + + if text is not None: + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + binary_mask = (soft_mask > 0.5).astype("uint8") + self._draw_text_in_mask(binary_mask, text, lighter_color) + return self.output + + def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): + """ + Args: + segment: numpy array of shape Nx2, containing all the points in the polygon. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. If not provided, a darker shade + of the polygon color will be used instead. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + + Returns: + output (VisImage): image object with polygon drawn. + """ + if edge_color is None: + # make edge color darker than the polygon color + if alpha > 0.8: + edge_color = self._change_color_brightness(color, brightness_factor=-0.7) + else: + edge_color = color + edge_color = mplc.to_rgb(edge_color) + (1,) + + polygon = mpl.patches.Polygon( + segment, + fill=True, + facecolor=mplc.to_rgb(color) + (alpha,), + edgecolor=edge_color, + linewidth=max(self._default_font_size // 15 * self.output.scale, 1), + ) + self.output.ax.add_patch(polygon) + return self.output + + """ + Internal methods: + """ + + def _jitter(self, color): + """ + Randomly modifies given color to produce a slightly different color than the color given. + + Args: + color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color + picked. The values in the list are in the [0.0, 1.0] range. + + Returns: + jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the + color after being jittered. The values in the list are in the [0.0, 1.0] range. + """ + color = mplc.to_rgb(color) + vec = np.random.rand(3) + # better to do it in another color space + vec = vec / np.linalg.norm(vec) * 0.5 + res = np.clip(vec + color, 0, 1) + return tuple(res) + + def _create_grayscale_image(self, mask=None): + """ + Create a grayscale version of the original image. + The colors in masked area, if given, will be kept. + """ + img_bw = self.img.astype("f4").mean(axis=2) + img_bw = np.stack([img_bw] * 3, axis=2) + if mask is not None: + img_bw[mask] = self.img[mask] + return img_bw + + def _change_color_brightness(self, color, brightness_factor): + """ + Depending on the brightness_factor, gives a lighter or darker color i.e. a color with + less or more saturation than the original color. + + Args: + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of + 0 will correspond to no change, a factor in [-1.0, 0) range will result in + a darker color and a factor in (0, 1.0] range will result in a lighter color. + + Returns: + modified_color (tuple[double]): a tuple containing the RGB values of the + modified color. Each value in the tuple is in the [0.0, 1.0] range. + """ + assert brightness_factor >= -1.0 and brightness_factor <= 1.0 + color = mplc.to_rgb(color) + polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) + modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) + modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness + modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness + modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) + return tuple(np.clip(modified_color, 0.0, 1.0)) + + def _convert_boxes(self, boxes): + """ + Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. + """ + if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): + return boxes.tensor.detach().numpy() + else: + return np.asarray(boxes) + + def _convert_masks(self, masks_or_polygons): + """ + Convert different format of masks or polygons to a tuple of masks and polygons. + + Returns: + list[GenericMask]: + """ + + m = masks_or_polygons + if isinstance(m, PolygonMasks): + m = m.polygons + if isinstance(m, BitMasks): + m = m.tensor.numpy() + if isinstance(m, torch.Tensor): + m = m.numpy() + ret = [] + for x in m: + if isinstance(x, GenericMask): + ret.append(x) + else: + ret.append(GenericMask(x, self.output.height, self.output.width)) + return ret + + def _draw_text_in_mask(self, binary_mask, text, color): + """ + Find proper places to draw text given a binary mask. + """ + # TODO sometimes drawn on wrong objects. the heuristics here can improve. + _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) + if stats[1:, -1].size == 0: + return + largest_component_id = np.argmax(stats[1:, -1]) + 1 + + # draw text on the largest component, as well as other very large components. + for cid in range(1, _num_cc): + if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: + # median is more stable than centroid + # center = centroids[largest_component_id] + center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] + self.draw_text(text, center, color=color) + + def _convert_keypoints(self, keypoints): + if isinstance(keypoints, Keypoints): + keypoints = keypoints.tensor + keypoints = np.asarray(keypoints) + return keypoints + + def get_output(self): + """ + Returns: + output (VisImage): the image output containing the visualizations added + to the image. + """ + return self.output diff --git a/data_processing/detectron2/dev/README.md b/data_processing/detectron2/dev/README.md new file mode 100644 index 0000000..bec811a --- /dev/null +++ b/data_processing/detectron2/dev/README.md @@ -0,0 +1,7 @@ + +## Some scripts for developers to use, include: + +- `linter.sh`: lint the codebase before commit. +- `run_{inference,instant}_tests.sh`: run inference/training for a few iterations. + Note that these tests require 2 GPUs. +- `parse_results.sh`: parse results from a log file. diff --git a/data_processing/detectron2/dev/linter.sh b/data_processing/detectron2/dev/linter.sh new file mode 100644 index 0000000..55793e0 --- /dev/null +++ b/data_processing/detectron2/dev/linter.sh @@ -0,0 +1,42 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. + +# cd to detectron2 project root +cd "$(dirname "${BASH_SOURCE[0]}")/.." + +{ + black --version | grep -E "22\." > /dev/null +} || { + echo "Linter requires 'black==22.*' !" + exit 1 +} + +ISORT_VERSION=$(isort --version-number) +if [[ "$ISORT_VERSION" != 4.3* ]]; then + echo "Linter requires isort==4.3.21 !" + exit 1 +fi + +set -v + +echo "Running isort ..." +isort -y -sp . --atomic + +echo "Running black ..." +black -l 100 . + +echo "Running flake8 ..." +if [ -x "$(command -v flake8)" ]; then + flake8 . +else + python3 -m flake8 . +fi + +# echo "Running mypy ..." +# Pytorch does not have enough type annotations +# mypy detectron2/solver detectron2/structures detectron2/config + +echo "Running clang-format ..." +find . -regex ".*\.\(cpp\|c\|cc\|cu\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 clang-format -i + +command -v arc > /dev/null && arc lint diff --git a/data_processing/detectron2/dev/packaging/README.md b/data_processing/detectron2/dev/packaging/README.md new file mode 100644 index 0000000..0174b7d --- /dev/null +++ b/data_processing/detectron2/dev/packaging/README.md @@ -0,0 +1,17 @@ + +## To build a cu101 wheel for release: + +``` +$ nvidia-docker run -it --storage-opt "size=20GB" --name pt pytorch/manylinux-cuda101 +# inside the container: +# git clone https://github.com/facebookresearch/detectron2/ +# cd detectron2 +# export CU_VERSION=cu101 D2_VERSION_SUFFIX= PYTHON_VERSION=3.7 PYTORCH_VERSION=1.8 +# ./dev/packaging/build_wheel.sh +``` + +## To build all wheels for combinations of CUDA and Python +``` +./dev/packaging/build_all_wheels.sh +./dev/packaging/gen_wheel_index.sh /path/to/wheels +``` diff --git a/data_processing/detectron2/dev/packaging/build_all_wheels.sh b/data_processing/detectron2/dev/packaging/build_all_wheels.sh new file mode 100644 index 0000000..00f9de5 --- /dev/null +++ b/data_processing/detectron2/dev/packaging/build_all_wheels.sh @@ -0,0 +1,65 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. + +[[ -d "dev/packaging" ]] || { + echo "Please run this script at detectron2 root!" + exit 1 +} + +build_one() { + cu=$1 + pytorch_ver=$2 + + case "$cu" in + cu*) + container_name=manylinux-cuda${cu/cu/} + ;; + cpu) + container_name=manylinux-cuda101 + ;; + *) + echo "Unrecognized cu=$cu" + exit 1 + ;; + esac + + echo "Launching container $container_name ..." + container_id="$container_name"_"$cu"_"$pytorch_ver" + + py_versions=(3.7 3.8 3.9) + + for py in "${py_versions[@]}"; do + docker run -itd \ + --name "$container_id" \ + --mount type=bind,source="$(pwd)",target=/detectron2 \ + pytorch/$container_name + + cat </dev/null 2>&1 && pwd )" +. "$script_dir/pkg_helpers.bash" + +echo "Build Settings:" +echo "CU_VERSION: $CU_VERSION" # e.g. cu101 +echo "D2_VERSION_SUFFIX: $D2_VERSION_SUFFIX" # e.g. +cu101 or "" +echo "PYTHON_VERSION: $PYTHON_VERSION" # e.g. 3.7 +echo "PYTORCH_VERSION: $PYTORCH_VERSION" # e.g. 1.4 + +setup_cuda +setup_wheel_python + +yum install ninja-build -y +ln -sv /usr/bin/ninja-build /usr/bin/ninja || true + +pip_install pip numpy -U +pip_install "torch==$PYTORCH_VERSION" \ + -f https://download.pytorch.org/whl/"$CU_VERSION"/torch_stable.html + +# use separate directories to allow parallel build +BASE_BUILD_DIR=build/$CU_VERSION-py$PYTHON_VERSION-pt$PYTORCH_VERSION +python setup.py \ + build -b "$BASE_BUILD_DIR" \ + bdist_wheel -b "$BASE_BUILD_DIR/build_dist" -d "wheels/$CU_VERSION/torch$PYTORCH_VERSION" +rm -rf "$BASE_BUILD_DIR" diff --git a/data_processing/detectron2/dev/packaging/gen_install_table.py b/data_processing/detectron2/dev/packaging/gen_install_table.py new file mode 100644 index 0000000..b4c852d --- /dev/null +++ b/data_processing/detectron2/dev/packaging/gen_install_table.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. +# -*- coding: utf-8 -*- + +import argparse + +template = """
install
\
+python -m pip install detectron2{d2_version} -f \\
+  https://dl.fbaipublicfiles.com/detectron2/wheels/{cuda}/torch{torch}/index.html
+
""" +CUDA_SUFFIX = { + "11.3": "cu113", + "11.1": "cu111", + "11.0": "cu110", + "10.2": "cu102", + "10.1": "cu101", + "10.0": "cu100", + "9.2": "cu92", + "cpu": "cpu", +} + + +def gen_header(torch_versions): + return '' + "".join( + [ + ''.format(t) + for t in torch_versions + ] + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--d2-version", help="detectron2 version number, default to empty") + args = parser.parse_args() + d2_version = f"=={args.d2_version}" if args.d2_version else "" + + all_versions = ( + [("1.8", k) for k in ["11.1", "10.2", "10.1", "cpu"]] + + [("1.9", k) for k in ["11.1", "10.2", "cpu"]] + + [("1.10", k) for k in ["11.3", "11.1", "10.2", "cpu"]] + ) + + torch_versions = sorted( + {k[0] for k in all_versions}, key=lambda x: int(x.split(".")[1]), reverse=True + ) + cuda_versions = sorted( + {k[1] for k in all_versions}, key=lambda x: float(x) if x != "cpu" else 0, reverse=True + ) + + table = gen_header(torch_versions) + for cu in cuda_versions: + table += f""" """ + cu_suffix = CUDA_SUFFIX[cu] + for torch in torch_versions: + if (torch, cu) in all_versions: + cell = template.format(d2_version=d2_version, cuda=cu_suffix, torch=torch) + else: + cell = "" + table += f""" """ + table += "" + table += "
CUDA torch {}
{cu}{cell}
" + print(table) diff --git a/data_processing/detectron2/dev/packaging/gen_wheel_index.sh b/data_processing/detectron2/dev/packaging/gen_wheel_index.sh new file mode 100644 index 0000000..ec96a27 --- /dev/null +++ b/data_processing/detectron2/dev/packaging/gen_wheel_index.sh @@ -0,0 +1,46 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. + + +root=$(readlink -f $1) +if [[ -z "$root" ]]; then + echo "Usage: ./gen_wheel_index.sh /absolute/path/to/wheels" + exit +fi + +export LC_ALL=C # reproducible sort +# NOTE: all sort in this script might not work when xx.10 is released + +index=$root/index.html + +cd "$root" +for cu in cpu cu92 cu100 cu101 cu102 cu110 cu111 cu113; do + mkdir -p "$root/$cu" + cd "$root/$cu" + echo "Creating $PWD/index.html ..." + # First sort by torch version, then stable sort by d2 version with unique. + # As a result, the latest torch version for each d2 version is kept. + for whl in $(find -type f -name '*.whl' -printf '%P\n' \ + | sort -k 1 -r | sort -t '/' -k 2 --stable -r --unique); do + echo "$whl
" + done > index.html + + + for torch in torch*; do + cd "$root/$cu/$torch" + + # list all whl for each cuda,torch version + echo "Creating $PWD/index.html ..." + for whl in $(find . -type f -name '*.whl' -printf '%P\n' | sort -r); do + echo "$whl
" + done > index.html + done +done + +cd "$root" +# Just list everything: +echo "Creating $index ..." +for whl in $(find . -type f -name '*.whl' -printf '%P\n' | sort -r); do + echo "$whl
" +done > "$index" + diff --git a/data_processing/detectron2/dev/packaging/pkg_helpers.bash b/data_processing/detectron2/dev/packaging/pkg_helpers.bash new file mode 100644 index 0000000..550bb6e --- /dev/null +++ b/data_processing/detectron2/dev/packaging/pkg_helpers.bash @@ -0,0 +1,75 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. + +# Function to retry functions that sometimes timeout or have flaky failures +retry () { + $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) +} +# Install with pip a bit more robustly than the default +pip_install() { + retry pip install --progress-bar off "$@" +} + + +setup_cuda() { + # Now work out the CUDA settings + # Like other torch domain libraries, we choose common GPU architectures only. + # See https://github.com/pytorch/pytorch/blob/master/torch/utils/cpp_extension.py + # and https://github.com/pytorch/vision/blob/main/packaging/pkg_helpers.bash for reference. + export FORCE_CUDA=1 + case "$CU_VERSION" in + cu113) + export CUDA_HOME=/usr/local/cuda-11.3/ + export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0;8.6+PTX" + ;; + cu112) + export CUDA_HOME=/usr/local/cuda-11.2/ + export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0;8.6+PTX" + ;; + cu111) + export CUDA_HOME=/usr/local/cuda-11.1/ + export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0;8.6+PTX" + ;; + cu110) + export CUDA_HOME=/usr/local/cuda-11.0/ + export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0+PTX" + ;; + cu102) + export CUDA_HOME=/usr/local/cuda-10.2/ + export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX" + ;; + cu101) + export CUDA_HOME=/usr/local/cuda-10.1/ + export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX" + ;; + cu100) + export CUDA_HOME=/usr/local/cuda-10.0/ + export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX" + ;; + cu92) + export CUDA_HOME=/usr/local/cuda-9.2/ + export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0+PTX" + ;; + cpu) + unset FORCE_CUDA + export CUDA_VISIBLE_DEVICES= + ;; + *) + echo "Unrecognized CU_VERSION=$CU_VERSION" + exit 1 + ;; + esac +} + +setup_wheel_python() { + case "$PYTHON_VERSION" in + 3.7) python_abi=cp37-cp37m ;; + 3.8) python_abi=cp38-cp38 ;; + 3.9) python_abi=cp39-cp39 ;; + *) + echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION" + exit 1 + ;; + esac + export PATH="/opt/python/$python_abi/bin:$PATH" +} diff --git a/data_processing/detectron2/dev/parse_results.sh b/data_processing/detectron2/dev/parse_results.sh new file mode 100644 index 0000000..80768a4 --- /dev/null +++ b/data_processing/detectron2/dev/parse_results.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright (c) Facebook, Inc. and its affiliates. + +# A shell script that parses metrics from the log file. +# Make it easier for developers to track performance of models. + +LOG="$1" + +if [[ -z "$LOG" ]]; then + echo "Usage: $0 /path/to/log/file" + exit 1 +fi + +# [12/15 11:47:32] trainer INFO: Total training time: 12:15:04.446477 (0.4900 s / it) +# [12/15 11:49:03] inference INFO: Total inference time: 0:01:25.326167 (0.13652186737060548 s / img per device, on 8 devices) +# [12/15 11:49:03] inference INFO: Total inference pure compute time: ..... + +# training time +trainspeed=$(grep -o 'Overall training.*' "$LOG" | grep -Eo '\(.*\)' | grep -o '[0-9\.]*') +echo "Training speed: $trainspeed s/it" + +# inference time: there could be multiple inference during training +inferencespeed=$(grep -o 'Total inference pure.*' "$LOG" | tail -n1 | grep -Eo '\(.*\)' | grep -o '[0-9\.]*' | head -n1) +echo "Inference speed: $inferencespeed s/it" + +# [12/15 11:47:18] trainer INFO: eta: 0:00:00 iter: 90000 loss: 0.5407 (0.7256) loss_classifier: 0.1744 (0.2446) loss_box_reg: 0.0838 (0.1160) loss_mask: 0.2159 (0.2722) loss_objectness: 0.0244 (0.0429) loss_rpn_box_reg: 0.0279 (0.0500) time: 0.4487 (0.4899) data: 0.0076 (0.0975) lr: 0.000200 max mem: 4161 +memory=$(grep -o 'max[_ ]mem: [0-9]*' "$LOG" | tail -n1 | grep -o '[0-9]*') +echo "Training memory: $memory MB" + +echo "Easy to copypaste:" +echo "$trainspeed","$inferencespeed","$memory" + +echo "------------------------------" + +# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: bbox +# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl +# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0017,0.0024,0.0017,0.0005,0.0019,0.0011 +# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: segm +# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl +# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0014,0.0021,0.0016,0.0005,0.0016,0.0011 + +echo "COCO Results:" +num_tasks=$(grep -o 'copypaste:.*Task.*' "$LOG" | sort -u | wc -l) +# each task has 3 lines +grep -o 'copypaste:.*' "$LOG" | cut -d ' ' -f 2- | tail -n $((num_tasks * 3)) diff --git a/data_processing/detectron2/dev/run_inference_tests.sh b/data_processing/detectron2/dev/run_inference_tests.sh new file mode 100644 index 0000000..bc9dcc5 --- /dev/null +++ b/data_processing/detectron2/dev/run_inference_tests.sh @@ -0,0 +1,44 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. + +BIN="python tools/train_net.py" +OUTPUT="inference_test_output" +NUM_GPUS=2 + +CFG_LIST=( "${@:1}" ) + +if [ ${#CFG_LIST[@]} -eq 0 ]; then + CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml ) +fi + +echo "========================================================================" +echo "Configs to run:" +echo "${CFG_LIST[@]}" +echo "========================================================================" + + +for cfg in "${CFG_LIST[@]}"; do + echo "========================================================================" + echo "Running $cfg ..." + echo "========================================================================" + $BIN \ + --eval-only \ + --num-gpus $NUM_GPUS \ + --config-file "$cfg" \ + OUTPUT_DIR $OUTPUT + rm -rf $OUTPUT +done + + +echo "========================================================================" +echo "Running demo.py ..." +echo "========================================================================" +DEMO_BIN="python demo/demo.py" +COCO_DIR=datasets/coco/val2014 +mkdir -pv $OUTPUT + +set -v + +$DEMO_BIN --config-file ./configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml \ + --input $COCO_DIR/COCO_val2014_0000001933* --output $OUTPUT +rm -rf $OUTPUT diff --git a/data_processing/detectron2/dev/run_instant_tests.sh b/data_processing/detectron2/dev/run_instant_tests.sh new file mode 100644 index 0000000..9fd9ba0 --- /dev/null +++ b/data_processing/detectron2/dev/run_instant_tests.sh @@ -0,0 +1,27 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. + +BIN="python tools/train_net.py" +OUTPUT="instant_test_output" +NUM_GPUS=2 + +CFG_LIST=( "${@:1}" ) +if [ ${#CFG_LIST[@]} -eq 0 ]; then + CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml ) +fi + +echo "========================================================================" +echo "Configs to run:" +echo "${CFG_LIST[@]}" +echo "========================================================================" + +for cfg in "${CFG_LIST[@]}"; do + echo "========================================================================" + echo "Running $cfg ..." + echo "========================================================================" + $BIN --num-gpus $NUM_GPUS --config-file "$cfg" \ + SOLVER.IMS_PER_BATCH $(($NUM_GPUS * 2)) \ + OUTPUT_DIR "$OUTPUT" + rm -rf "$OUTPUT" +done + diff --git a/data_processing/detectron2/docker/Dockerfile b/data_processing/detectron2/docker/Dockerfile new file mode 100644 index 0000000..fae0060 --- /dev/null +++ b/data_processing/detectron2/docker/Dockerfile @@ -0,0 +1,47 @@ +FROM nvidia/cuda:11.1.1-cudnn8-devel-ubuntu18.04 +# use an older system (18.04) to avoid opencv incompatibility (issue#3524) + +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get install -y \ + python3-opencv ca-certificates python3-dev git wget sudo ninja-build +RUN ln -sv /usr/bin/python3 /usr/bin/python + +# create a non-root user +ARG USER_ID=1000 +RUN useradd -m --no-log-init --system --uid ${USER_ID} appuser -g sudo +RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers +USER appuser +WORKDIR /home/appuser + +ENV PATH="/home/appuser/.local/bin:${PATH}" +RUN wget https://bootstrap.pypa.io/pip/3.6/get-pip.py && \ + python3 get-pip.py --user && \ + rm get-pip.py + +# install dependencies +# See https://pytorch.org/ for other options if you use a different version of CUDA +RUN pip install --user tensorboard cmake onnx # cmake from apt-get is too old +RUN pip install --user torch==1.10 torchvision==0.11.1 -f https://download.pytorch.org/whl/cu111/torch_stable.html + +RUN pip install --user 'git+https://github.com/facebookresearch/fvcore' +# install detectron2 +RUN git clone https://github.com/facebookresearch/detectron2 detectron2_repo +# set FORCE_CUDA because during `docker build` cuda is not accessible +ENV FORCE_CUDA="1" +# This will by default build detectron2 for all common cuda architectures and take a lot more time, +# because inside `docker build`, there is no way to tell which architecture will be used. +ARG TORCH_CUDA_ARCH_LIST="Kepler;Kepler+Tesla;Maxwell;Maxwell+Tegra;Pascal;Volta;Turing" +ENV TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST}" + +RUN pip install --user -e detectron2_repo + +# Set a fixed model cache directory. +ENV FVCORE_CACHE="/tmp" +WORKDIR /home/appuser/detectron2_repo + +# run detectron2 under user "appuser": +# wget http://images.cocodataset.org/val2017/000000439715.jpg -O input.jpg +# python3 demo/demo.py \ + #--config-file configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ + #--input input.jpg --output outputs/ \ + #--opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl diff --git a/data_processing/detectron2/docker/README.md b/data_processing/detectron2/docker/README.md new file mode 100644 index 0000000..ea709f3 --- /dev/null +++ b/data_processing/detectron2/docker/README.md @@ -0,0 +1,45 @@ + +## Use the container (with docker ≥ 19.03) + +``` +cd docker/ +# Build: +docker build --build-arg USER_ID=$UID -t detectron2:v0 . +# Launch (require GPUs): +docker run --gpus all -it \ + --shm-size=8gb --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \ + --name=detectron2 detectron2:v0 + +# Grant docker access to host X server to show images +xhost +local:`docker inspect --format='{{ .Config.Hostname }}' detectron2` +``` + +## Use the container (with docker-compose ≥ 1.28.0) + +Install docker-compose and nvidia-docker-toolkit, then run: +``` +cd docker && USER_ID=$UID docker-compose run detectron2 +``` + +## Use the deployment container (to test C++ examples) +After building the base detectron2 container as above, do: +``` +# Build: +docker build -t detectron2-deploy:v0 -f deploy.Dockerfile . +# Launch: +docker run --gpus all -it detectron2-deploy:v0 +``` + +#### Using a persistent cache directory + +You can prevent models from being re-downloaded on every run, +by storing them in a cache directory. + +To do this, add `--volume=$HOME/.torch/fvcore_cache:/tmp:rw` in the run command. + +## Install new dependencies +Add the following to `Dockerfile` to make persistent changes. +``` +RUN sudo apt-get update && sudo apt-get install -y vim +``` +Or run them in the container to make temporary changes. diff --git a/data_processing/detectron2/docker/deploy.Dockerfile b/data_processing/detectron2/docker/deploy.Dockerfile new file mode 100644 index 0000000..30b4ed7 --- /dev/null +++ b/data_processing/detectron2/docker/deploy.Dockerfile @@ -0,0 +1,32 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# This file defines a container that compiles the C++ examples of detectron2. +# See docker/README.md for usage. + +# Depends on the image produced by "./Dockerfile" +FROM detectron2:v0 + +USER appuser +ENV HOME=/home/appuser +WORKDIR $HOME + +# Let torchvision find libtorch +ENV CMAKE_PREFIX_PATH=$HOME/.local/lib/python3.6/site-packages/torch/ + +RUN sudo apt-get update && sudo apt-get install libopencv-dev --yes + +# install libtorchvision +RUN git clone --branch v0.11.1 https://github.com/pytorch/vision/ +RUN mkdir vision/build && cd vision/build && \ + cmake .. -DCMAKE_INSTALL_PREFIX=$HOME/.local -DCMAKE_BUILD_TYPE=Release -DWITH_CUDA=on -DTORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST && \ + make -j && make install + +# make our installation take effect +ENV CPATH=$HOME/.local/include \ + LIBRARY_PATH=$HOME/.local/lib \ + LD_LIBRARY_PATH=$HOME/.local/lib + + +# build C++ examples of detectron2 +RUN cd detectron2_repo/tools/deploy && mkdir build && cd build && \ + cmake -DTORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST .. && make +# binaries will be available under tools/deploy/build diff --git a/data_processing/detectron2/docker/docker-compose.yml b/data_processing/detectron2/docker/docker-compose.yml new file mode 100644 index 0000000..6665ab4 --- /dev/null +++ b/data_processing/detectron2/docker/docker-compose.yml @@ -0,0 +1,26 @@ +version: "2.3" +services: + detectron2: + build: + context: . + dockerfile: Dockerfile + args: + USER_ID: ${USER_ID:-1000} + deploy: + resources: + reservations: + devices: + - capabilities: + - gpu + shm_size: "8gb" + ulimits: + memlock: -1 + stack: 67108864 + volumes: + - /tmp/.X11-unix:/tmp/.X11-unix:ro + environment: + - DISPLAY=$DISPLAY + - NVIDIA_VISIBLE_DEVICES=all + # Uncomment with proper source to access webcam from docker + # devices: + # - /dev/video0:/dev/video0 diff --git a/data_processing/detectron2/docs/.gitignore b/data_processing/detectron2/docs/.gitignore new file mode 100644 index 0000000..e35d885 --- /dev/null +++ b/data_processing/detectron2/docs/.gitignore @@ -0,0 +1 @@ +_build diff --git a/data_processing/detectron2/docs/Makefile b/data_processing/detectron2/docs/Makefile new file mode 100644 index 0000000..718eddc --- /dev/null +++ b/data_processing/detectron2/docs/Makefile @@ -0,0 +1,19 @@ +# Minimal makefile for Sphinx documentation +# Copyright (c) Facebook, Inc. and its affiliates. + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/data_processing/detectron2/docs/README.md b/data_processing/detectron2/docs/README.md new file mode 100644 index 0000000..8531caf --- /dev/null +++ b/data_processing/detectron2/docs/README.md @@ -0,0 +1,15 @@ +# Read the docs: + +The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/). +Documents in this directory are not meant to be read on github. + +# Build the docs: + +1. Install detectron2 according to [INSTALL.md](../INSTALL.md). +2. Install additional libraries required to build docs: + - docutils==0.16 + - Sphinx==3.2.0 + - recommonmark==0.6.0 + - sphinx_rtd_theme + +3. Run `make html` from this directory. diff --git a/data_processing/detectron2/docs/_static/css/custom.css b/data_processing/detectron2/docs/_static/css/custom.css new file mode 100644 index 0000000..6c51176 --- /dev/null +++ b/data_processing/detectron2/docs/_static/css/custom.css @@ -0,0 +1,30 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * some extra css to make markdown look similar between github/sphinx + */ + +/* + * Below is for install.md: + */ +.rst-content code { + white-space: pre; + border: 0px; +} + +.rst-content th { + border: 1px solid #e1e4e5; +} + +.rst-content th p { + /* otherwise will be default 24px for regular paragraph */ + margin-bottom: 0px; +} + +.rst-content .line-block { + /* otherwise will be 24px */ + margin-bottom: 0px; +} + +div.section > details { + padding-bottom: 1em; +} diff --git a/data_processing/detectron2/docs/conf.py b/data_processing/detectron2/docs/conf.py new file mode 100644 index 0000000..1fb3e30 --- /dev/null +++ b/data_processing/detectron2/docs/conf.py @@ -0,0 +1,395 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +# flake8: noqa + +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +from unittest import mock +from sphinx.domains import Domain +from typing import Dict, List, Tuple + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +import sphinx_rtd_theme + + +class GithubURLDomain(Domain): + """ + Resolve certain links in markdown files to github source. + """ + + name = "githuburl" + ROOT = "https://github.com/facebookresearch/detectron2/blob/main/" + LINKED_DOC = ["tutorials/install", "tutorials/getting_started"] + + def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): + github_url = None + if not target.endswith("html") and target.startswith("../../"): + url = target.replace("../", "") + github_url = url + if fromdocname in self.LINKED_DOC: + # unresolved links in these docs are all github links + github_url = target + + if github_url is not None: + if github_url.endswith("MODEL_ZOO") or github_url.endswith("README"): + # bug of recommonmark. + # https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/recommonmark/parser.py#L152-L155 + github_url += ".md" + print("Ref {} resolved to github:{}".format(target, github_url)) + contnode["refuri"] = self.ROOT + github_url + return [("githuburl:any", contnode)] + else: + return [] + + +# to support markdown +from recommonmark.parser import CommonMarkParser + +sys.path.insert(0, os.path.abspath("../")) +os.environ["_DOC_BUILDING"] = "True" +DEPLOY = os.environ.get("READTHEDOCS") == "True" + + +# -- Project information ----------------------------------------------------- + +# fmt: off +try: + import torch # noqa +except ImportError: + for m in [ + "torch", "torchvision", "torch.nn", "torch.nn.parallel", "torch.distributed", "torch.multiprocessing", "torch.autograd", + "torch.autograd.function", "torch.nn.modules", "torch.nn.modules.utils", "torch.utils", "torch.utils.data", "torch.onnx", + "torchvision", "torchvision.ops", + ]: + sys.modules[m] = mock.Mock(name=m) + sys.modules['torch'].__version__ = "1.7" # fake version + HAS_TORCH = False +else: + try: + torch.ops.detectron2 = mock.Mock(name="torch.ops.detectron2") + except: + pass + HAS_TORCH = True + +for m in [ + "cv2", "scipy", "portalocker", "detectron2._C", + "pycocotools", "pycocotools.mask", "pycocotools.coco", "pycocotools.cocoeval", + "google", "google.protobuf", "google.protobuf.internal", "onnx", + "caffe2", "caffe2.proto", "caffe2.python", "caffe2.python.utils", "caffe2.python.onnx", "caffe2.python.onnx.backend", +]: + sys.modules[m] = mock.Mock(name=m) +# fmt: on +sys.modules["cv2"].__version__ = "3.4" + +import detectron2 # isort: skip + +if HAS_TORCH: + from detectron2.utils.env import fixup_module_metadata + + fixup_module_metadata("torch.nn", torch.nn.__dict__) + fixup_module_metadata("torch.utils.data", torch.utils.data.__dict__) + + +project = "detectron2" +copyright = "2019-2020, detectron2 contributors" +author = "detectron2 contributors" + +# The short X.Y version +version = detectron2.__version__ +# The full version, including alpha/beta/rc tags +release = version + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +needs_sphinx = "3.0" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "recommonmark", + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", + "sphinx.ext.githubpages", +] + +# -- Configurations for plugins ------------ +napoleon_google_docstring = True +napoleon_include_init_with_doc = True +napoleon_include_special_with_doc = True +napoleon_numpy_docstring = False +napoleon_use_rtype = False +autodoc_inherit_docstrings = False +autodoc_member_order = "bysource" + +if DEPLOY: + intersphinx_timeout = 10 +else: + # skip this when building locally + intersphinx_timeout = 0.5 +intersphinx_mapping = { + "python": ("https://docs.python.org/3.7", None), + "numpy": ("https://docs.scipy.org/doc/numpy/", None), + "torch": ("https://pytorch.org/docs/master/", None), +} +# ------------------------- + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +source_suffix = [".rst", ".md"] + +# The master toctree document. +master_doc = "index" + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "build", "README.md", "tutorials/README.md"] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + + +# -- Options for HTML output ------------------------------------------------- + +html_theme = "sphinx_rtd_theme" +html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] +html_css_files = ["css/custom.css"] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = "detectron2doc" + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, "detectron2.tex", "detectron2 Documentation", "detectron2 contributors", "manual") +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, "detectron2", "detectron2 Documentation", [author], 1)] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "detectron2", + "detectron2 Documentation", + author, + "detectron2", + "One line description of project.", + "Miscellaneous", + ) +] + + +# -- Options for todo extension ---------------------------------------------- + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +def autodoc_skip_member(app, what, name, obj, skip, options): + # we hide something deliberately + if getattr(obj, "__HIDE_SPHINX_DOC__", False): + return True + + # Hide some that are deprecated or not intended to be used + HIDDEN = { + "ResNetBlockBase", + "GroupedBatchSampler", + "build_transform_gen", + "apply_transform_gens", + "TransformGen", + "apply_augmentations", + "StandardAugInput", + "build_batch_data_loader", + "draw_panoptic_seg_predictions", + "WarmupCosineLR", + "WarmupMultiStepLR", + "downgrade_config", + "upgrade_config", + "add_export_config", + } + try: + if name in HIDDEN or ( + hasattr(obj, "__doc__") and obj.__doc__.lower().strip().startswith("deprecated") + ): + print("Skipping deprecated object: {}".format(name)) + return True + except: + pass + return skip + + +_PAPER_DATA = { + "resnet": ("1512.03385", "Deep Residual Learning for Image Recognition"), + "fpn": ("1612.03144", "Feature Pyramid Networks for Object Detection"), + "mask r-cnn": ("1703.06870", "Mask R-CNN"), + "faster r-cnn": ( + "1506.01497", + "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks", + ), + "deformconv": ("1703.06211", "Deformable Convolutional Networks"), + "deformconv2": ("1811.11168", "Deformable ConvNets v2: More Deformable, Better Results"), + "panopticfpn": ("1901.02446", "Panoptic Feature Pyramid Networks"), + "retinanet": ("1708.02002", "Focal Loss for Dense Object Detection"), + "cascade r-cnn": ("1712.00726", "Cascade R-CNN: Delving into High Quality Object Detection"), + "lvis": ("1908.03195", "LVIS: A Dataset for Large Vocabulary Instance Segmentation"), + "rrpn": ("1703.01086", "Arbitrary-Oriented Scene Text Detection via Rotation Proposals"), + "imagenet in 1h": ("1706.02677", "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"), + "xception": ("1610.02357", "Xception: Deep Learning with Depthwise Separable Convolutions"), + "mobilenet": ( + "1704.04861", + "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications", + ), + "deeplabv3+": ( + "1802.02611", + "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation", + ), + "dds": ("2003.13678", "Designing Network Design Spaces"), + "scaling": ("2103.06877", "Fast and Accurate Model Scaling"), + "fcos": ("2006.09214", "FCOS: A Simple and Strong Anchor-free Object Detector"), + "rethinking-batchnorm": ("2105.07576", 'Rethinking "Batch" in BatchNorm'), + "vitdet": ("2203.16527", "Exploring Plain Vision Transformer Backbones for Object Detection"), + "mvitv2": ( + "2112.01526", + "MViTv2: Improved Multiscale Vision Transformers for Classification and Detection", + ), + "swin": ( + "2103.14030", + "Swin Transformer: Hierarchical Vision Transformer using Shifted Windows", + ), + "omni3d": ( + "2207.10660", + "Omni3D: A Large Benchmark and Model for 3D Object Detection in the Wild", + ), +} + + +def paper_ref_role( + typ: str, + rawtext: str, + text: str, + lineno: int, + inliner, + options: Dict = {}, + content: List[str] = [], +): + """ + Parse :paper:`xxx`. Similar to the "extlinks" sphinx extension. + """ + from docutils import nodes, utils + from sphinx.util.nodes import split_explicit_title + + text = utils.unescape(text) + has_explicit_title, title, link = split_explicit_title(text) + link = link.lower() + if link not in _PAPER_DATA: + inliner.reporter.warning("Cannot find paper " + link) + paper_url, paper_title = "#", link + else: + paper_url, paper_title = _PAPER_DATA[link] + if "/" not in paper_url: + paper_url = "https://arxiv.org/abs/" + paper_url + if not has_explicit_title: + title = paper_title + pnode = nodes.reference(title, title, internal=False, refuri=paper_url) + return [pnode], [] + + +def setup(app): + from recommonmark.transform import AutoStructify + + app.add_domain(GithubURLDomain) + app.connect("autodoc-skip-member", autodoc_skip_member) + app.add_role("paper", paper_ref_role) + app.add_config_value( + "recommonmark_config", + {"enable_math": True, "enable_inline_math": True, "enable_eval_rst": True}, + True, + ) + app.add_transform(AutoStructify) diff --git a/data_processing/detectron2/docs/index.rst b/data_processing/detectron2/docs/index.rst new file mode 100644 index 0000000..8634b7b --- /dev/null +++ b/data_processing/detectron2/docs/index.rst @@ -0,0 +1,14 @@ +.. detectron2 documentation master file, created by + sphinx-quickstart on Sat Sep 21 13:46:45 2019. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to detectron2's documentation! +====================================== + +.. toctree:: + :maxdepth: 2 + + tutorials/index + notes/index + modules/index diff --git a/data_processing/detectron2/docs/modules/checkpoint.rst b/data_processing/detectron2/docs/modules/checkpoint.rst new file mode 100644 index 0000000..449caaf --- /dev/null +++ b/data_processing/detectron2/docs/modules/checkpoint.rst @@ -0,0 +1,7 @@ +detectron2.checkpoint +============================= + +.. automodule:: detectron2.checkpoint + :members: + :undoc-members: + :show-inheritance: diff --git a/data_processing/detectron2/docs/modules/config.rst b/data_processing/detectron2/docs/modules/config.rst new file mode 100644 index 0000000..c76913d --- /dev/null +++ b/data_processing/detectron2/docs/modules/config.rst @@ -0,0 +1,18 @@ +detectron2.config +========================= + +Related tutorials: :doc:`../tutorials/configs`, :doc:`../tutorials/extend`. + +.. automodule:: detectron2.config + :members: + :undoc-members: + :show-inheritance: + + +Yaml Config References +----------------- + +.. literalinclude:: ../../detectron2/config/defaults.py + :language: python + :linenos: + :lines: 7- diff --git a/data_processing/detectron2/docs/modules/data.rst b/data_processing/detectron2/docs/modules/data.rst new file mode 100644 index 0000000..0d5bd89 --- /dev/null +++ b/data_processing/detectron2/docs/modules/data.rst @@ -0,0 +1,37 @@ +detectron2.data +======================= + +.. autodata:: detectron2.data.DatasetCatalog(dict) + :annotation: + +.. autodata:: detectron2.data.MetadataCatalog(dict) + :annotation: + +.. automodule:: detectron2.data + :members: + :undoc-members: + :show-inheritance: + +detectron2.data.detection\_utils module +--------------------------------------- + +.. automodule:: detectron2.data.detection_utils + :members: + :undoc-members: + :show-inheritance: + +detectron2.data.datasets module +--------------------------------------- + +.. automodule:: detectron2.data.datasets + :members: + :undoc-members: + :show-inheritance: + +detectron2.data.samplers module +--------------------------------------- + +.. automodule:: detectron2.data.samplers + :members: + :undoc-members: + :show-inheritance: diff --git a/data_processing/detectron2/docs/modules/data_transforms.rst b/data_processing/detectron2/docs/modules/data_transforms.rst new file mode 100644 index 0000000..1533a43 --- /dev/null +++ b/data_processing/detectron2/docs/modules/data_transforms.rst @@ -0,0 +1,10 @@ +detectron2.data.transforms +==================================== + +Related tutorial: :doc:`../tutorials/augmentation`. + +.. automodule:: detectron2.data.transforms + :members: + :undoc-members: + :show-inheritance: + :imported-members: diff --git a/data_processing/detectron2/docs/modules/engine.rst b/data_processing/detectron2/docs/modules/engine.rst new file mode 100644 index 0000000..7e0d2b0 --- /dev/null +++ b/data_processing/detectron2/docs/modules/engine.rst @@ -0,0 +1,26 @@ +detectron2.engine +========================= + +Related tutorial: :doc:`../tutorials/training`. + +.. automodule:: detectron2.engine + :members: + :undoc-members: + :show-inheritance: + + +detectron2.engine.defaults module +--------------------------------- + +.. automodule:: detectron2.engine.defaults + :members: + :undoc-members: + :show-inheritance: + +detectron2.engine.hooks module +--------------------------------- + +.. automodule:: detectron2.engine.hooks + :members: + :undoc-members: + :show-inheritance: diff --git a/data_processing/detectron2/docs/modules/evaluation.rst b/data_processing/detectron2/docs/modules/evaluation.rst new file mode 100644 index 0000000..69bfc4b --- /dev/null +++ b/data_processing/detectron2/docs/modules/evaluation.rst @@ -0,0 +1,7 @@ +detectron2.evaluation +============================= + +.. automodule:: detectron2.evaluation + :members: + :undoc-members: + :show-inheritance: diff --git a/data_processing/detectron2/docs/modules/export.rst b/data_processing/detectron2/docs/modules/export.rst new file mode 100644 index 0000000..dcee14f --- /dev/null +++ b/data_processing/detectron2/docs/modules/export.rst @@ -0,0 +1,9 @@ +detectron2.export +========================= + +Related tutorial: :doc:`../tutorials/deployment`. + +.. automodule:: detectron2.export + :members: + :undoc-members: + :show-inheritance: diff --git a/data_processing/detectron2/docs/modules/fvcore.rst b/data_processing/detectron2/docs/modules/fvcore.rst new file mode 100644 index 0000000..c8bf9f5 --- /dev/null +++ b/data_processing/detectron2/docs/modules/fvcore.rst @@ -0,0 +1,49 @@ +fvcore documentation +==================== + +Detectron2 depends on utilities in +`fvcore `_. +We include part of fvcore documentation here for easier reference. + +fvcore.nn +----------------- + +.. automodule:: fvcore.nn + :members: + :inherited-members: + :undoc-members: + :show-inheritance: + +fvcore.common +--------------------- + +.. automodule:: fvcore.common.checkpoint + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: fvcore.common.config + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: fvcore.common.history_buffer + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: fvcore.common.param_scheduler + :members: + :inherited-members: + :undoc-members: + :show-inheritance: + +.. automodule:: fvcore.common.registry + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: fvcore.common.timer + :members: + :undoc-members: + :show-inheritance: diff --git a/data_processing/detectron2/docs/modules/index.rst b/data_processing/detectron2/docs/modules/index.rst new file mode 100644 index 0000000..14b7543 --- /dev/null +++ b/data_processing/detectron2/docs/modules/index.rst @@ -0,0 +1,19 @@ +API Documentation +================== + +.. toctree:: + + checkpoint + config + data + data_transforms + engine + evaluation + layers + model_zoo + modeling + solver + structures + utils + export + fvcore diff --git a/data_processing/detectron2/docs/modules/layers.rst b/data_processing/detectron2/docs/modules/layers.rst new file mode 100644 index 0000000..b43b42a --- /dev/null +++ b/data_processing/detectron2/docs/modules/layers.rst @@ -0,0 +1,7 @@ +detectron2.layers +========================= + +.. automodule:: detectron2.layers + :members: + :undoc-members: + :show-inheritance: diff --git a/data_processing/detectron2/docs/modules/model_zoo.rst b/data_processing/detectron2/docs/modules/model_zoo.rst new file mode 100644 index 0000000..5abbad1 --- /dev/null +++ b/data_processing/detectron2/docs/modules/model_zoo.rst @@ -0,0 +1,7 @@ +detectron2.model_zoo +============================ + +.. automodule:: detectron2.model_zoo + :members: + :undoc-members: + :show-inheritance: diff --git a/data_processing/detectron2/docs/modules/modeling.rst b/data_processing/detectron2/docs/modules/modeling.rst new file mode 100644 index 0000000..a22c7ed --- /dev/null +++ b/data_processing/detectron2/docs/modules/modeling.rst @@ -0,0 +1,58 @@ +detectron2.modeling +=========================== + +.. automodule:: detectron2.modeling + :members: + :undoc-members: + :show-inheritance: + + +detectron2.modeling.poolers module +--------------------------------------- + +.. automodule:: detectron2.modeling.poolers + :members: + :undoc-members: + :show-inheritance: + + +detectron2.modeling.sampling module +------------------------------------ + +.. automodule:: detectron2.modeling.sampling + :members: + :undoc-members: + :show-inheritance: + + +detectron2.modeling.box_regression module +------------------------------------------ + +.. automodule:: detectron2.modeling.box_regression + :members: + :undoc-members: + :show-inheritance: + + +Model Registries +----------------- + +These are different registries provided in modeling. +Each registry provide you the ability to replace it with your customized component, +without having to modify detectron2's code. + +Note that it is impossible to allow users to customize any line of code directly. +Even just to add one line at some place, +you'll likely need to find out the smallest registry which contains that line, +and register your component to that registry. + + +.. autodata:: detectron2.modeling.META_ARCH_REGISTRY +.. autodata:: detectron2.modeling.BACKBONE_REGISTRY +.. autodata:: detectron2.modeling.PROPOSAL_GENERATOR_REGISTRY +.. autodata:: detectron2.modeling.RPN_HEAD_REGISTRY +.. autodata:: detectron2.modeling.ANCHOR_GENERATOR_REGISTRY +.. autodata:: detectron2.modeling.ROI_HEADS_REGISTRY +.. autodata:: detectron2.modeling.ROI_BOX_HEAD_REGISTRY +.. autodata:: detectron2.modeling.ROI_MASK_HEAD_REGISTRY +.. autodata:: detectron2.modeling.ROI_KEYPOINT_HEAD_REGISTRY diff --git a/data_processing/detectron2/docs/modules/solver.rst b/data_processing/detectron2/docs/modules/solver.rst new file mode 100644 index 0000000..59d98c7 --- /dev/null +++ b/data_processing/detectron2/docs/modules/solver.rst @@ -0,0 +1,7 @@ +detectron2.solver +========================= + +.. automodule:: detectron2.solver + :members: + :undoc-members: + :show-inheritance: diff --git a/data_processing/detectron2/docs/modules/structures.rst b/data_processing/detectron2/docs/modules/structures.rst new file mode 100644 index 0000000..1369dc0 --- /dev/null +++ b/data_processing/detectron2/docs/modules/structures.rst @@ -0,0 +1,7 @@ +detectron2.structures +============================= + +.. automodule:: detectron2.structures + :members: + :undoc-members: + :show-inheritance: diff --git a/data_processing/detectron2/docs/modules/utils.rst b/data_processing/detectron2/docs/modules/utils.rst new file mode 100644 index 0000000..ab58f2c --- /dev/null +++ b/data_processing/detectron2/docs/modules/utils.rst @@ -0,0 +1,80 @@ +detectron2.utils +======================== + +detectron2.utils.colormap module +-------------------------------- + +.. automodule:: detectron2.utils.colormap + :members: + :undoc-members: + :show-inheritance: + +detectron2.utils.comm module +---------------------------- + +.. automodule:: detectron2.utils.comm + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.events module +------------------------------ + +.. automodule:: detectron2.utils.events + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.logger module +------------------------------ + +.. automodule:: detectron2.utils.logger + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.registry module +-------------------------------- + +.. automodule:: detectron2.utils.registry + :members: + :undoc-members: + :show-inheritance: + +detectron2.utils.memory module +---------------------------------- + +.. automodule:: detectron2.utils.memory + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.analysis module +---------------------------------- + +.. automodule:: detectron2.utils.analysis + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.visualizer module +---------------------------------- + +.. automodule:: detectron2.utils.visualizer + :members: + :undoc-members: + :show-inheritance: + +detectron2.utils.video\_visualizer module +----------------------------------------- + +.. automodule:: detectron2.utils.video_visualizer + :members: + :undoc-members: + :show-inheritance: + diff --git a/data_processing/detectron2/docs/notes/benchmarks.md b/data_processing/detectron2/docs/notes/benchmarks.md new file mode 100644 index 0000000..b41588d --- /dev/null +++ b/data_processing/detectron2/docs/notes/benchmarks.md @@ -0,0 +1,196 @@ + +# Benchmarks + +Here we benchmark the training speed of a Mask R-CNN in detectron2, +with some other popular open source Mask R-CNN implementations. + + +### Settings + +* Hardware: 8 NVIDIA V100s with NVLink. +* Software: Python 3.7, CUDA 10.1, cuDNN 7.6.5, PyTorch 1.5, + TensorFlow 1.15.0rc2, Keras 2.2.5, MxNet 1.6.0b20190820. +* Model: an end-to-end R-50-FPN Mask-RCNN model, using the same hyperparameter as the + [Detectron baseline config](https://github.com/facebookresearch/Detectron/blob/master/configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml) + (it does not have scale augmentation). +* Metrics: We use the average throughput in iterations 100-500 to skip GPU warmup time. + Note that for R-CNN-style models, the throughput of a model typically changes during training, because + it depends on the predictions of the model. Therefore this metric is not directly comparable with + "train speed" in model zoo, which is the average speed of the entire training run. + + +### Main Results + +```eval_rst ++-------------------------------+--------------------+ +| Implementation | Throughput (img/s) | ++===============================+====================+ +| |D2| |PT| | 62 | ++-------------------------------+--------------------+ +| mmdetection_ |PT| | 53 | ++-------------------------------+--------------------+ +| maskrcnn-benchmark_ |PT| | 53 | ++-------------------------------+--------------------+ +| tensorpack_ |TF| | 50 | ++-------------------------------+--------------------+ +| simpledet_ |mxnet| | 39 | ++-------------------------------+--------------------+ +| Detectron_ |C2| | 19 | ++-------------------------------+--------------------+ +| `matterport/Mask_RCNN`__ |TF| | 14 | ++-------------------------------+--------------------+ + +.. _maskrcnn-benchmark: https://github.com/facebookresearch/maskrcnn-benchmark/ +.. _tensorpack: https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN +.. _mmdetection: https://github.com/open-mmlab/mmdetection/ +.. _simpledet: https://github.com/TuSimple/simpledet/ +.. _Detectron: https://github.com/facebookresearch/Detectron +__ https://github.com/matterport/Mask_RCNN/ + +.. |D2| image:: https://github.com/facebookresearch/detectron2/raw/main/.github/Detectron2-Logo-Horz.svg?sanitize=true + :height: 15pt + :target: https://github.com/facebookresearch/detectron2/ +.. |PT| image:: https://pytorch.org/assets/images/logo-icon.svg + :width: 15pt + :height: 15pt + :target: https://pytorch.org +.. |TF| image:: https://static.nvidiagrid.net/ngc/containers/tensorflow.png + :width: 15pt + :height: 15pt + :target: https://tensorflow.org +.. |mxnet| image:: https://github.com/dmlc/web-data/raw/master/mxnet/image/mxnet_favicon.png + :width: 15pt + :height: 15pt + :target: https://mxnet.apache.org/ +.. |C2| image:: https://caffe2.ai/static/logo.svg + :width: 15pt + :height: 15pt + :target: https://caffe2.ai +``` + + +Details for each implementation: + +* __Detectron2__: with release v0.1.2, run: + ``` + python tools/train_net.py --config-file configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml --num-gpus 8 + ``` + +* __mmdetection__: at commit `b0d845f`, run + ``` + ./tools/dist_train.sh configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py 8 + ``` + +* __maskrcnn-benchmark__: use commit `0ce8f6f` with `sed -i 's/torch.uint8/torch.bool/g' **/*.py; sed -i 's/AT_CHECK/TORCH_CHECK/g' **/*.cu` + to make it compatible with PyTorch 1.5. Then, run training with + ``` + python -m torch.distributed.launch --nproc_per_node=8 tools/train_net.py --config-file configs/e2e_mask_rcnn_R_50_FPN_1x.yaml + ``` + The speed we observed is faster than its model zoo, likely due to different software versions. + +* __tensorpack__: at commit `caafda`, `export TF_CUDNN_USE_AUTOTUNE=0`, then run + ``` + mpirun -np 8 ./train.py --config DATA.BASEDIR=/data/coco TRAINER=horovod BACKBONE.STRIDE_1X1=True TRAIN.STEPS_PER_EPOCH=50 --load ImageNet-R50-AlignPadding.npz + ``` + +* __SimpleDet__: at commit `9187a1`, run + ``` + python detection_train.py --config config/mask_r50v1_fpn_1x.py + ``` + +* __Detectron__: run + ``` + python tools/train_net.py --cfg configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml + ``` + Note that many of its ops run on CPUs, therefore the performance is limited. + +* __matterport/Mask_RCNN__: at commit `3deaec`, apply the following diff, `export TF_CUDNN_USE_AUTOTUNE=0`, then run + ``` + python coco.py train --dataset=/data/coco/ --model=imagenet + ``` + Note that many small details in this implementation might be different + from Detectron's standards. + +
+ + (diff to make it use the same hyperparameters - click to expand) + + + ```diff + diff --git i/mrcnn/model.py w/mrcnn/model.py + index 62cb2b0..61d7779 100644 + --- i/mrcnn/model.py + +++ w/mrcnn/model.py + @@ -2367,8 +2367,8 @@ class MaskRCNN(): + epochs=epochs, + steps_per_epoch=self.config.STEPS_PER_EPOCH, + callbacks=callbacks, + - validation_data=val_generator, + - validation_steps=self.config.VALIDATION_STEPS, + + #validation_data=val_generator, + + #validation_steps=self.config.VALIDATION_STEPS, + max_queue_size=100, + workers=workers, + use_multiprocessing=True, + diff --git i/mrcnn/parallel_model.py w/mrcnn/parallel_model.py + index d2bf53b..060172a 100644 + --- i/mrcnn/parallel_model.py + +++ w/mrcnn/parallel_model.py + @@ -32,6 +32,7 @@ class ParallelModel(KM.Model): + keras_model: The Keras model to parallelize + gpu_count: Number of GPUs. Must be > 1 + """ + + super().__init__() + self.inner_model = keras_model + self.gpu_count = gpu_count + merged_outputs = self.make_parallel() + diff --git i/samples/coco/coco.py w/samples/coco/coco.py + index 5d172b5..239ed75 100644 + --- i/samples/coco/coco.py + +++ w/samples/coco/coco.py + @@ -81,7 +81,10 @@ class CocoConfig(Config): + IMAGES_PER_GPU = 2 + + # Uncomment to train on 8 GPUs (default is 1) + - # GPU_COUNT = 8 + + GPU_COUNT = 8 + + BACKBONE = "resnet50" + + STEPS_PER_EPOCH = 50 + + TRAIN_ROIS_PER_IMAGE = 512 + + # Number of classes (including background) + NUM_CLASSES = 1 + 80 # COCO has 80 classes + @@ -496,29 +499,10 @@ if __name__ == '__main__': + # *** This training schedule is an example. Update to your needs *** + + # Training - Stage 1 + - print("Training network heads") + model.train(dataset_train, dataset_val, + learning_rate=config.LEARNING_RATE, + epochs=40, + - layers='heads', + - augmentation=augmentation) + - + - # Training - Stage 2 + - # Finetune layers from ResNet stage 4 and up + - print("Fine tune Resnet stage 4 and up") + - model.train(dataset_train, dataset_val, + - learning_rate=config.LEARNING_RATE, + - epochs=120, + - layers='4+', + - augmentation=augmentation) + - + - # Training - Stage 3 + - # Fine tune all layers + - print("Fine tune all layers") + - model.train(dataset_train, dataset_val, + - learning_rate=config.LEARNING_RATE / 10, + - epochs=160, + - layers='all', + + layers='3+', + augmentation=augmentation) + + elif args.command == "evaluate": + ``` + +
diff --git a/data_processing/detectron2/docs/notes/changelog.md b/data_processing/detectron2/docs/notes/changelog.md new file mode 100644 index 0000000..000e9f8 --- /dev/null +++ b/data_processing/detectron2/docs/notes/changelog.md @@ -0,0 +1,48 @@ +# Change Log and Backward Compatibility + +### Releases +See release logs at +[https://github.com/facebookresearch/detectron2/releases](https://github.com/facebookresearch/detectron2/releases) +for new updates. + +### Backward Compatibility + +Due to the research nature of what the library does, there might be backward incompatible changes. +But we try to reduce users' disruption by the following ways: +* APIs listed in [API documentation](https://detectron2.readthedocs.io/modules/index.html), including + function/class names, their arguments, and documented class attributes, are considered *stable* unless + otherwise noted in the documentation. + They are less likely to be broken, but if needed, will trigger a deprecation warning for a reasonable period + before getting broken, and will be documented in release logs. +* Others functions/classses/attributes are considered internal, and are more likely to change. + However, we're aware that some of them may be already used by other projects, and in particular we may + use them for convenience among projects under `detectron2/projects`. + For such APIs, we may treat them as stable APIs and also apply the above strategies. + They may be promoted to stable when we're ready. +* Projects under "detectron2/projects" or imported with "detectron2.projects" are research projects + and are all considered experimental. +* Classes/functions that contain the word "default" or are explicitly documented to produce + "default behavior" may change their behaviors when new features are added. + +Despite of the possible breakage, if a third-party project would like to keep up with the latest updates +in detectron2, using it as a library will still be less disruptive than forking, because +the frequency and scope of API changes will be much smaller than code changes. + +To see such changes, search for "incompatible changes" in [release logs](https://github.com/facebookresearch/detectron2/releases). + +### Config Version Change Log + +Detectron2's config version has not been changed since open source. +There is no need for an open source user to worry about this. + +* v1: Rename `RPN_HEAD.NAME` to `RPN.HEAD_NAME`. +* v2: A batch of rename of many configurations before release. + +### Silent Regressions in Historical Versions: + +We list a few silent regressions, since they may silently produce incorrect results and will be hard to debug. + +* 04/01/2020 - 05/11/2020: Bad accuracy if `TRAIN_ON_PRED_BOXES` is set to True. +* 03/30/2020 - 04/01/2020: ResNets are not correctly built. +* 12/19/2019 - 12/26/2019: Using aspect ratio grouping causes a drop in accuracy. +* - 11/9/2019: Test time augmentation does not predict the last category. diff --git a/data_processing/detectron2/docs/notes/compatibility.md b/data_processing/detectron2/docs/notes/compatibility.md new file mode 100644 index 0000000..83d93f5 --- /dev/null +++ b/data_processing/detectron2/docs/notes/compatibility.md @@ -0,0 +1,84 @@ +# Compatibility with Other Libraries + +## Compatibility with Detectron (and maskrcnn-benchmark) + +Detectron2 addresses some legacy issues left in Detectron. As a result, their models +are not compatible: +running inference with the same model weights will produce different results in the two code bases. + +The major differences regarding inference are: + +- The height and width of a box with corners (x1, y1) and (x2, y2) is now computed more naturally as + width = x2 - x1 and height = y2 - y1; + In Detectron, a "+ 1" was added both height and width. + + Note that the relevant ops in Caffe2 have [adopted this change of convention](https://github.com/pytorch/pytorch/pull/20550) + with an extra option. + So it is still possible to run inference with a Detectron2-trained model in Caffe2. + + The change in height/width calculations most notably changes: + - encoding/decoding in bounding box regression. + - non-maximum suppression. The effect here is very negligible, though. + +- RPN now uses simpler anchors with fewer quantization artifacts. + + In Detectron, the anchors were quantized and + [do not have accurate areas](https://github.com/facebookresearch/Detectron/issues/227). + In Detectron2, the anchors are center-aligned to feature grid points and not quantized. + +- Classification layers have a different ordering of class labels. + + This involves any trainable parameter with shape (..., num_categories + 1, ...). + In Detectron2, integer labels [0, K-1] correspond to the K = num_categories object categories + and the label "K" corresponds to the special "background" category. + In Detectron, label "0" means background, and labels [1, K] correspond to the K categories. + +- ROIAlign is implemented differently. The new implementation is [available in Caffe2](https://github.com/pytorch/pytorch/pull/23706). + + 1. All the ROIs are shifted by half a pixel compared to Detectron in order to create better image-feature-map alignment. + See `layers/roi_align.py` for details. + To enable the old behavior, use `ROIAlign(aligned=False)`, or `POOLER_TYPE=ROIAlign` instead of + `ROIAlignV2` (the default). + + 1. The ROIs are not required to have a minimum size of 1. + This will lead to tiny differences in the output, but should be negligible. + +- Mask inference function is different. + + In Detectron2, the "paste_mask" function is different and should be more accurate than in Detectron. This change + can improve mask AP on COCO by ~0.5% absolute. + +There are some other differences in training as well, but they won't affect +model-level compatibility. The major ones are: + +- We fixed a [bug](https://github.com/facebookresearch/Detectron/issues/459) in + Detectron, by making `RPN.POST_NMS_TOPK_TRAIN` per-image, rather than per-batch. + The fix may lead to a small accuracy drop for a few models (e.g. keypoint + detection) and will require some parameter tuning to match the Detectron results. +- For simplicity, we change the default loss in bounding box regression to L1 loss, instead of smooth L1 loss. + We have observed that this tends to slightly decrease box AP50 while improving box AP for higher + overlap thresholds (and leading to a slight overall improvement in box AP). +- We interpret the coordinates in COCO bounding box and segmentation annotations + as coordinates in range `[0, width]` or `[0, height]`. The coordinates in + COCO keypoint annotations are interpreted as pixel indices in range `[0, width - 1]` or `[0, height - 1]`. + Note that this affects how flip augmentation is implemented. + + +[This article](https://ppwwyyxx.com/blog/2021/Where-are-Pixels/) +explains more details on the above mentioned issues +about pixels, coordinates, and "+1"s. + + +## Compatibility with Caffe2 + +As mentioned above, despite the incompatibilities with Detectron, the relevant +ops have been implemented in Caffe2. +Therefore, models trained with detectron2 can be converted in Caffe2. +See [Deployment](../tutorials/deployment.md) for the tutorial. + +## Compatibility with TensorFlow + +Most ops are available in TensorFlow, although some tiny differences in +the implementation of resize / ROIAlign / padding need to be addressed. +A working conversion script is provided by [tensorpack Faster R-CNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN/convert_d2) +to run a standard detectron2 model in TensorFlow. diff --git a/data_processing/detectron2/docs/notes/contributing.md b/data_processing/detectron2/docs/notes/contributing.md new file mode 100644 index 0000000..9518123 --- /dev/null +++ b/data_processing/detectron2/docs/notes/contributing.md @@ -0,0 +1 @@ +../../.github/CONTRIBUTING.md \ No newline at end of file diff --git a/data_processing/detectron2/docs/notes/index.rst b/data_processing/detectron2/docs/notes/index.rst new file mode 100644 index 0000000..63cf907 --- /dev/null +++ b/data_processing/detectron2/docs/notes/index.rst @@ -0,0 +1,10 @@ +Notes +====================================== + +.. toctree:: + :maxdepth: 2 + + benchmarks + compatibility + contributing + changelog diff --git a/data_processing/detectron2/docs/requirements.txt b/data_processing/detectron2/docs/requirements.txt new file mode 100644 index 0000000..720a1b1 --- /dev/null +++ b/data_processing/detectron2/docs/requirements.txt @@ -0,0 +1,23 @@ +docutils==0.16 +# https://github.com/sphinx-doc/sphinx/commit/7acd3ada3f38076af7b2b5c9f3b60bb9c2587a3d +sphinx==3.2.0 +recommonmark==0.6.0 +sphinx_rtd_theme +# Dependencies here are only those required by import +termcolor +numpy +tqdm +matplotlib +termcolor +yacs +tabulate +cloudpickle +Pillow +future +git+https://github.com/facebookresearch/fvcore.git +https://download.pytorch.org/whl/cpu/torch-1.8.1%2Bcpu-cp37-cp37m-linux_x86_64.whl +https://download.pytorch.org/whl/cpu/torchvision-0.9.1%2Bcpu-cp37-cp37m-linux_x86_64.whl +omegaconf>=2.1.0.dev24 +hydra-core>=1.1.0.dev5 +scipy +timm diff --git a/data_processing/detectron2/docs/tutorials/README.md b/data_processing/detectron2/docs/tutorials/README.md new file mode 100644 index 0000000..1ca9c94 --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/README.md @@ -0,0 +1,4 @@ +# Read the docs: + +The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/). +Documents in this directory are not meant to be read on github. diff --git a/data_processing/detectron2/docs/tutorials/augmentation.md b/data_processing/detectron2/docs/tutorials/augmentation.md new file mode 100644 index 0000000..7601a08 --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/augmentation.md @@ -0,0 +1,186 @@ + +# Data Augmentation + +Augmentation is an important part of training. +Detectron2's data augmentation system aims at addressing the following goals: + +1. Allow augmenting multiple data types together + (e.g., images together with their bounding boxes and masks) +2. Allow applying a sequence of statically-declared augmentation +3. Allow adding custom new data types to augment (rotated bounding boxes, video clips, etc.) +4. Process and manipulate the __operations__ that are applied by augmentations + +The first two features cover most of the common use cases, and is also +available in other libraries such as [albumentations](https://medium.com/pytorch/multi-target-in-albumentations-16a777e9006e). +Supporting other features adds some overhead to detectron2's augmentation API, +which we'll explain in this tutorial. + +This tutorial focuses on how to use augmentations when writing new data loaders, +and how to write new augmentations. +If you use the default data loader in detectron2, it already supports taking a user-provided list of custom augmentations, +as explained in the [Dataloader tutorial](data_loading). + +## Basic Usage + +The basic usage of feature (1) and (2) is like the following: +```python +from detectron2.data import transforms as T +# Define a sequence of augmentations: +augs = T.AugmentationList([ + T.RandomBrightness(0.9, 1.1), + T.RandomFlip(prob=0.5), + T.RandomCrop("absolute", (640, 640)) +]) # type: T.Augmentation + +# Define the augmentation input ("image" required, others optional): +input = T.AugInput(image, boxes=boxes, sem_seg=sem_seg) +# Apply the augmentation: +transform = augs(input) # type: T.Transform +image_transformed = input.image # new image +sem_seg_transformed = input.sem_seg # new semantic segmentation + +# For any extra data that needs to be augmented together, use transform, e.g.: +image2_transformed = transform.apply_image(image2) +polygons_transformed = transform.apply_polygons(polygons) +``` + +Three basic concepts are involved here. They are: +* [T.Augmentation](../modules/data_transforms.html#detectron2.data.transforms.Augmentation) defines the __"policy"__ to modify inputs. + * its `__call__(AugInput) -> Transform` method augments the inputs in-place, and returns the operation that is applied +* [T.Transform](../modules/data_transforms.html#detectron2.data.transforms.Transform) + implements the actual __operations__ to transform data + * it has methods such as `apply_image`, `apply_coords` that define how to transform each data type +* [T.AugInput](../modules/data_transforms.html#detectron2.data.transforms.AugInput) + stores inputs needed by `T.Augmentation` and how they should be transformed. + This concept is needed for some advanced usage. + Using this class directly should be sufficient for all common use cases, + since extra data not in `T.AugInput` can be augmented using the returned + `transform`, as shown in the above example. + +## Write New Augmentations + +Most 2D augmentations only need to know about the input image. Such augmentation can be implemented easily like this: + +```python +class MyColorAugmentation(T.Augmentation): + def get_transform(self, image): + r = np.random.rand(2) + return T.ColorTransform(lambda x: x * r[0] + r[1] * 10) + +class MyCustomResize(T.Augmentation): + def get_transform(self, image): + old_h, old_w = image.shape[:2] + new_h, new_w = int(old_h * np.random.rand()), int(old_w * 1.5) + return T.ResizeTransform(old_h, old_w, new_h, new_w) + +augs = MyCustomResize() +transform = augs(input) +``` + +In addition to image, any attributes of the given `AugInput` can be used as long +as they are part of the function signature, e.g.: + +```python +class MyCustomCrop(T.Augmentation): + def get_transform(self, image, sem_seg): + # decide where to crop using both image and sem_seg + return T.CropTransform(...) + +augs = MyCustomCrop() +assert hasattr(input, "image") and hasattr(input, "sem_seg") +transform = augs(input) +``` + +New transform operation can also be added by subclassing +[T.Transform](../modules/data_transforms.html#detectron2.data.transforms.Transform). + +## Advanced Usage + +We give a few examples of advanced usages that +are enabled by our system. +These options can be interesting to new research, +although changing them is often not needed +for standard use cases. + +### Custom transform strategy + +Instead of only returning the augmented data, detectron2's `Augmentation` returns the __operations__ as `T.Transform`. +This allows users to apply custom transform strategy on their data. +We use keypoints data as an example. + +Keypoints are (x, y) coordinates, but they are not so trivial to augment due to the semantic meaning they carry. +Such meaning is only known to the users, therefore users may want to augment them manually +by looking at the returned `transform`. +For example, when an image is horizontally flipped, we'd like to swap the keypoint annotations for "left eye" and "right eye". +This can be done like this (included by default in detectron2's default data loader): +```python +# augs, input are defined as in previous examples +transform = augs(input) # type: T.Transform +keypoints_xy = transform.apply_coords(keypoints_xy) # transform the coordinates + +# get a list of all transforms that were applied +transforms = T.TransformList([transform]).transforms +# check if it is flipped for odd number of times +do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms) % 2 == 1 +if do_hflip: + keypoints_xy = keypoints_xy[flip_indices_mapping] +``` + +As another example, keypoints annotations often have a "visibility" field. +A sequence of augmentations might augment a visible keypoint out of the image boundary (e.g. with cropping), +but then bring it back within the boundary afterwards (e.g. with image padding). +If users decide to label such keypoints "invisible", +then the visibility check has to happen after every transform step. +This can be achieved by: + +```python +transform = augs(input) # type: T.TransformList +assert isinstance(transform, T.TransformList) +for t in transform.transforms: + keypoints_xy = t.apply_coords(keypoints_xy) + visibility &= (keypoints_xy >= [0, 0] & keypoints_xy <= [W, H]).all(axis=1) + +# btw, detectron2's `transform_keypoint_annotations` function chooses to label such keypoints "visible": +# keypoints_xy = transform.apply_coords(keypoints_xy) +# visibility &= (keypoints_xy >= [0, 0] & keypoints_xy <= [W, H]).all(axis=1) +``` + + +### Geometrically invert the transform +If images are pre-processed by augmentations before inference, the predicted results +such as segmentation masks are localized on the augmented image. +We'd like to invert the applied augmentation with the [inverse()](../modules/data_transforms.html#detectron2.data.transforms.Transform.inverse) +API, to obtain results on the original image: +```python +transform = augs(input) +pred_mask = make_prediction(input.image) +inv_transform = transform.inverse() +pred_mask_orig = inv_transform.apply_segmentation(pred_mask) +``` + +### Add new data types + +[T.Transform](../modules/data_transforms.html#detectron2.data.transforms.Transform) +supports a few common data types to transform, including images, coordinates, masks, boxes, polygons. +It allows registering new data types, e.g.: +```python +@T.HFlipTransform.register_type("rotated_boxes") +def func(flip_transform: T.HFlipTransform, rotated_boxes: Any): + # do the work + return flipped_rotated_boxes + +t = HFlipTransform(width=800) +transformed_rotated_boxes = t.apply_rotated_boxes(rotated_boxes) # func will be called +``` + +### Extend T.AugInput + +An augmentation can only access attributes available in the given input. +[T.AugInput](../modules/data_transforms.html#detectron2.data.transforms.StandardAugInput) defines "image", "boxes", "sem_seg", +which are sufficient for common augmentation strategies to decide how to augment. +If not, a custom implementation is needed. + +By re-implement the "transform()" method in AugInput, it is also possible to +augment different fields in ways that are dependent on each other. +Such use case is uncommon (e.g. post-process bounding box based on augmented masks), but allowed by the system. + diff --git a/data_processing/detectron2/docs/tutorials/builtin_datasets.md b/data_processing/detectron2/docs/tutorials/builtin_datasets.md new file mode 100644 index 0000000..0ba8242 --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/builtin_datasets.md @@ -0,0 +1 @@ +../../datasets/README.md \ No newline at end of file diff --git a/data_processing/detectron2/docs/tutorials/configs.md b/data_processing/detectron2/docs/tutorials/configs.md new file mode 100644 index 0000000..49538d0 --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/configs.md @@ -0,0 +1,62 @@ +# Yacs Configs + +Detectron2 provides a key-value based config system that can be +used to obtain standard, common behaviors. + +This system uses YAML and [yacs](https://github.com/rbgirshick/yacs). +Yaml is a very limited language, +so we do not expect all features in detectron2 to be available through configs. +If you need something that's not available in the config space, +please write code using detectron2's API. + +With the introduction of a more powerful [LazyConfig system](lazyconfigs.md), +we no longer add functionality / new keys to the Yacs/Yaml-based config system. + +### Basic Usage + +Some basic usage of the `CfgNode` object is shown here. See more in [documentation](../modules/config.html#detectron2.config.CfgNode). +```python +from detectron2.config import get_cfg +cfg = get_cfg() # obtain detectron2's default config +cfg.xxx = yyy # add new configs for your own custom components +cfg.merge_from_file("my_cfg.yaml") # load values from a file + +cfg.merge_from_list(["MODEL.WEIGHTS", "weights.pth"]) # can also load values from a list of str +print(cfg.dump()) # print formatted configs +with open("output.yaml", "w") as f: + f.write(cfg.dump()) # save config to file +``` + +In addition to the basic Yaml syntax, the config file can +define a `_BASE_: base.yaml` field, which will load a base config file first. +Values in the base config will be overwritten in sub-configs, if there are any conflicts. +We provided several base configs for standard model architectures. + +Many builtin tools in detectron2 accept command line config overwrite: +Key-value pairs provided in the command line will overwrite the existing values in the config file. +For example, [demo.py](../../demo/demo.py) can be used with +```sh +./demo.py --config-file config.yaml [--other-options] \ + --opts MODEL.WEIGHTS /path/to/weights INPUT.MIN_SIZE_TEST 1000 +``` + +To see a list of available configs in detectron2 and what they mean, +check [Config References](../modules/config.html#config-references) + +### Configs in Projects + +A project that lives outside the detectron2 library may define its own configs, which will need to be added +for the project to be functional, e.g.: +```python +from detectron2.projects.point_rend import add_pointrend_config +cfg = get_cfg() # obtain detectron2's default config +add_pointrend_config(cfg) # add pointrend's default config +# ... ... +``` + +### Best Practice with Configs + +1. Treat the configs you write as "code": avoid copying them or duplicating them; use `_BASE_` + to share common parts between configs. + +2. Keep the configs you write simple: don't include keys that do not affect the experimental setting. diff --git a/data_processing/detectron2/docs/tutorials/data_loading.md b/data_processing/detectron2/docs/tutorials/data_loading.md new file mode 100644 index 0000000..1d2769f --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/data_loading.md @@ -0,0 +1,95 @@ + +# Dataloader + +Dataloader is the component that provides data to models. +A dataloader usually (but not necessarily) takes raw information from [datasets](./datasets.md), +and process them into a format needed by the model. + +## How the Existing Dataloader Works + +Detectron2 contains a builtin data loading pipeline. +It's good to understand how it works, in case you need to write a custom one. + +Detectron2 provides two functions +[build_detection_{train,test}_loader](../modules/data.html#detectron2.data.build_detection_train_loader) +that create a default data loader from a given config. +Here is how `build_detection_{train,test}_loader` work: + +1. It takes the name of a registered dataset (e.g., "coco_2017_train") and loads a `list[dict]` representing the dataset items + in a lightweight format. These dataset items are not yet ready to be used by the model (e.g., images are + not loaded into memory, random augmentations have not been applied, etc.). + Details about the dataset format and dataset registration can be found in + [datasets](./datasets.md). +2. Each dict in this list is mapped by a function ("mapper"): + * Users can customize this mapping function by specifying the "mapper" argument in + `build_detection_{train,test}_loader`. The default mapper is [DatasetMapper](../modules/data.html#detectron2.data.DatasetMapper). + * The output format of the mapper can be arbitrary, as long as it is accepted by the consumer of this data loader (usually the model). + The outputs of the default mapper, after batching, follow the default model input format documented in + [Use Models](./models.html#model-input-format). + * The role of the mapper is to transform the lightweight representation of a dataset item into a format + that is ready for the model to consume (including, e.g., read images, perform random data augmentation and convert to torch Tensors). + If you would like to perform custom transformations to data, you often want a custom mapper. +3. The outputs of the mapper are batched (simply into a list). +4. This batched data is the output of the data loader. Typically, it's also the input of + `model.forward()`. + + +## Write a Custom Dataloader + +Using a different "mapper" with `build_detection_{train,test}_loader(mapper=)` works for most use cases +of custom data loading. +For example, if you want to resize all images to a fixed size for training, use: + +```python +import detectron2.data.transforms as T +from detectron2.data import DatasetMapper # the default mapper +dataloader = build_detection_train_loader(cfg, + mapper=DatasetMapper(cfg, is_train=True, augmentations=[ + T.Resize((800, 800)) + ])) +# use this dataloader instead of the default +``` +If the arguments of the default [DatasetMapper](../modules/data.html#detectron2.data.DatasetMapper) +does not provide what you need, you may write a custom mapper function and use it instead, e.g.: + +```python +from detectron2.data import detection_utils as utils + # Show how to implement a minimal mapper, similar to the default DatasetMapper +def mapper(dataset_dict): + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + # can use other ways to read image + image = utils.read_image(dataset_dict["file_name"], format="BGR") + # See "Data Augmentation" tutorial for details usage + auginput = T.AugInput(image) + transform = T.Resize((800, 800))(auginput) + image = torch.from_numpy(auginput.image.transpose(2, 0, 1)) + annos = [ + utils.transform_instance_annotations(annotation, [transform], image.shape[1:]) + for annotation in dataset_dict.pop("annotations") + ] + return { + # create the format that the model expects + "image": image, + "instances": utils.annotations_to_instances(annos, image.shape[1:]) + } +dataloader = build_detection_train_loader(cfg, mapper=mapper) +``` + +If you want to change not only the mapper (e.g., in order to implement different sampling or batching logic), +`build_detection_train_loader` won't work and you will need to write a different data loader. +The data loader is simply a +python iterator that produces [the format](./models.md) that the model accepts. +You can implement it using any tools you like. + +No matter what to implement, it's recommended to +check out [API documentation of detectron2.data](../modules/data) to learn more about the APIs of +these functions. + +## Use a Custom Dataloader + +If you use [DefaultTrainer](../modules/engine.html#detectron2.engine.defaults.DefaultTrainer), +you can overwrite its `build_{train,test}_loader` method to use your own dataloader. +See the [deeplab dataloader](../../projects/DeepLab/train_net.py) +for an example. + +If you write your own training loop, you can plug in your data loader easily. diff --git a/data_processing/detectron2/docs/tutorials/datasets.md b/data_processing/detectron2/docs/tutorials/datasets.md new file mode 100644 index 0000000..91103f6 --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/datasets.md @@ -0,0 +1,290 @@ +# Use Custom Datasets + +This document explains how the dataset APIs +([DatasetCatalog](../modules/data.html#detectron2.data.DatasetCatalog), [MetadataCatalog](../modules/data.html#detectron2.data.MetadataCatalog)) +work, and how to use them to add custom datasets. + +Datasets that have builtin support in detectron2 are listed in [builtin datasets](builtin_datasets.md). +If you want to use a custom dataset while also reusing detectron2's data loaders, +you will need to: + +1. __Register__ your dataset (i.e., tell detectron2 how to obtain your dataset). +2. Optionally, __register metadata__ for your dataset. + +Next, we explain the above two concepts in detail. + +The [Colab tutorial](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) +has a live example of how to register and train on a dataset of custom formats. + +### Register a Dataset + +To let detectron2 know how to obtain a dataset named "my_dataset", users need to implement +a function that returns the items in your dataset and then tell detectron2 about this +function: +```python +def my_dataset_function(): + ... + return list[dict] in the following format + +from detectron2.data import DatasetCatalog +DatasetCatalog.register("my_dataset", my_dataset_function) +# later, to access the data: +data: List[Dict] = DatasetCatalog.get("my_dataset") +``` + +Here, the snippet associates a dataset named "my_dataset" with a function that returns the data. +The function must return the same data (with same order) if called multiple times. +The registration stays effective until the process exits. + +The function can do arbitrary things and should return the data in `list[dict]`, each dict in either +of the following formats: +1. Detectron2's standard dataset dict, described below. This will make it work with many other builtin + features in detectron2, so it's recommended to use it when it's sufficient. +2. Any custom format. You can also return arbitrary dicts in your own format, + such as adding extra keys for new tasks. + Then you will need to handle them properly downstream as well. + See below for more details. + +#### Standard Dataset Dicts + +For standard tasks +(instance detection, instance/semantic/panoptic segmentation, keypoint detection), +we load the original dataset into `list[dict]` with a specification similar to COCO's annotations. +This is our standard representation for a dataset. + +Each dict contains information about one image. +The dict may have the following fields, +and the required fields vary based on what the dataloader or the task needs (see more below). + +```eval_rst +.. list-table:: + :header-rows: 1 + + * - Task + - Fields + * - Common + - file_name, height, width, image_id + + * - Instance detection/segmentation + - annotations + + * - Semantic segmentation + - sem_seg_file_name + + * - Panoptic segmentation + - pan_seg_file_name, segments_info +``` + ++ `file_name`: the full path to the image file. ++ `height`, `width`: integer. The shape of the image. ++ `image_id` (str or int): a unique id that identifies this image. Required by many + evaluators to identify the images, but a dataset may use it for different purposes. ++ `annotations` (list[dict]): Required by __instance detection/segmentation or keypoint detection__ tasks. + Each dict corresponds to annotations of one instance in this image, and + may contain the following keys: + + `bbox` (list[float], required): list of 4 numbers representing the bounding box of the instance. + + `bbox_mode` (int, required): the format of bbox. It must be a member of + [structures.BoxMode](../modules/structures.html#detectron2.structures.BoxMode). + Currently supports: `BoxMode.XYXY_ABS`, `BoxMode.XYWH_ABS`. + + `category_id` (int, required): an integer in the range [0, num_categories-1] representing the category label. + The value num_categories is reserved to represent the "background" category, if applicable. + + `segmentation` (list[list[float]] or dict): the segmentation mask of the instance. + + If `list[list[float]]`, it represents a list of polygons, one for each connected component + of the object. Each `list[float]` is one simple polygon in the format of `[x1, y1, ..., xn, yn]` (n≥3). + The Xs and Ys are absolute coordinates in unit of pixels. + + If `dict`, it represents the per-pixel segmentation mask in COCO's compressed RLE format. + The dict should have keys "size" and "counts". You can convert a uint8 segmentation mask of 0s and + 1s into such dict by `pycocotools.mask.encode(np.asarray(mask, order="F"))`. + `cfg.INPUT.MASK_FORMAT` must be set to `bitmask` if using the default data loader with such format. + + `keypoints` (list[float]): in the format of [x1, y1, v1,..., xn, yn, vn]. + v[i] means the [visibility](http://cocodataset.org/#format-data) of this keypoint. + `n` must be equal to the number of keypoint categories. + The Xs and Ys are absolute real-value coordinates in range [0, W or H]. + + (Note that the keypoint coordinates in COCO format are integers in range [0, W-1 or H-1], which is different + from our standard format. Detectron2 adds 0.5 to COCO keypoint coordinates to convert them from discrete + pixel indices to floating point coordinates.) + + `iscrowd`: 0 (default) or 1. Whether this instance is labeled as COCO's "crowd + region". Don't include this field if you don't know what it means. + + If `annotations` is an empty list, it means the image is labeled to have no objects. + Such images will by default be removed from training, + but can be included using `DATALOADER.FILTER_EMPTY_ANNOTATIONS`. + ++ `sem_seg_file_name` (str): + The full path to the semantic segmentation ground truth file. + It should be a grayscale image whose pixel values are integer labels. ++ `pan_seg_file_name` (str): + The full path to panoptic segmentation ground truth file. + It should be an RGB image whose pixel values are integer ids encoded using the + [panopticapi.utils.id2rgb](https://github.com/cocodataset/panopticapi/) function. + The ids are defined by `segments_info`. + If an id does not appear in `segments_info`, the pixel is considered unlabeled + and is usually ignored in training & evaluation. ++ `segments_info` (list[dict]): defines the meaning of each id in panoptic segmentation ground truth. + Each dict has the following keys: + + `id` (int): integer that appears in the ground truth image. + + `category_id` (int): an integer in the range [0, num_categories-1] representing the category label. + + `iscrowd`: 0 (default) or 1. Whether this instance is labeled as COCO's "crowd region". + + +```eval_rst + +.. note:: + + The PanopticFPN model does not use the panoptic segmentation + format defined here, but a combination of both instance segmentation and semantic segmentation data + format. See :doc:`builtin_datasets` for instructions on COCO. + +``` + +Fast R-CNN (with pre-computed proposals) models are rarely used today. +To train a Fast R-CNN, the following extra keys are needed: + ++ `proposal_boxes` (array): 2D numpy array with shape (K, 4) representing K precomputed proposal boxes for this image. ++ `proposal_objectness_logits` (array): numpy array with shape (K, ), which corresponds to the objectness + logits of proposals in 'proposal_boxes'. ++ `proposal_bbox_mode` (int): the format of the precomputed proposal bbox. + It must be a member of + [structures.BoxMode](../modules/structures.html#detectron2.structures.BoxMode). + Default is `BoxMode.XYXY_ABS`. + + + +#### Custom Dataset Dicts for New Tasks + +In the `list[dict]` that your dataset function returns, the dictionary can also have __arbitrary custom data__. +This will be useful for a new task that needs extra information not covered +by the standard dataset dicts. In this case, you need to make sure the downstream code can handle your data +correctly. Usually this requires writing a new `mapper` for the dataloader (see [Use Custom Dataloaders](./data_loading.md)). + +When designing a custom format, note that all dicts are stored in memory +(sometimes serialized and with multiple copies). +To save memory, each dict is meant to contain __small__ but sufficient information +about each sample, such as file names and annotations. +Loading full samples typically happens in the data loader. + +For attributes shared among the entire dataset, use `Metadata` (see below). +To avoid extra memory, do not save such information inside each sample. + +### "Metadata" for Datasets + +Each dataset is associated with some metadata, accessible through +`MetadataCatalog.get(dataset_name).some_metadata`. +Metadata is a key-value mapping that contains information that's shared among +the entire dataset, and usually is used to interpret what's in the dataset, e.g., +names of classes, colors of classes, root of files, etc. +This information will be useful for augmentation, evaluation, visualization, logging, etc. +The structure of metadata depends on what is needed from the corresponding downstream code. + +If you register a new dataset through `DatasetCatalog.register`, +you may also want to add its corresponding metadata through +`MetadataCatalog.get(dataset_name).some_key = some_value`, to enable any features that need the metadata. +You can do it like this (using the metadata key "thing_classes" as an example): + +```python +from detectron2.data import MetadataCatalog +MetadataCatalog.get("my_dataset").thing_classes = ["person", "dog"] +``` + +Here is a list of metadata keys that are used by builtin features in detectron2. +If you add your own dataset without these metadata, some features may be +unavailable to you: + +* `thing_classes` (list[str]): Used by all instance detection/segmentation tasks. + A list of names for each instance/thing category. + If you load a COCO format dataset, it will be automatically set by the function `load_coco_json`. + +* `thing_colors` (list[tuple(r, g, b)]): Pre-defined color (in [0, 255]) for each thing category. + Used for visualization. If not given, random colors will be used. + +* `stuff_classes` (list[str]): Used by semantic and panoptic segmentation tasks. + A list of names for each stuff category. + +* `stuff_colors` (list[tuple(r, g, b)]): Pre-defined color (in [0, 255]) for each stuff category. + Used for visualization. If not given, random colors are used. + +* `ignore_label` (int): Used by semantic and panoptic segmentation tasks. Pixels in ground-truth + annotations with this category label should be ignored in evaluation. Typically these are "unlabeled" + pixels. + +* `keypoint_names` (list[str]): Used by keypoint detection. A list of names for each keypoint. + +* `keypoint_flip_map` (list[tuple[str]]): Used by keypoint detection. A list of pairs of names, + where each pair are the two keypoints that should be flipped if the image is + flipped horizontally during augmentation. +* `keypoint_connection_rules`: list[tuple(str, str, (r, g, b))]. Each tuple specifies a pair of keypoints + that are connected and the color (in [0, 255]) to use for the line between them when visualized. + +Some additional metadata that are specific to the evaluation of certain datasets (e.g. COCO): + +* `thing_dataset_id_to_contiguous_id` (dict[int->int]): Used by all instance detection/segmentation tasks in the COCO format. + A mapping from instance class ids in the dataset to contiguous ids in range [0, #class). + Will be automatically set by the function `load_coco_json`. + +* `stuff_dataset_id_to_contiguous_id` (dict[int->int]): Used when generating prediction json files for + semantic/panoptic segmentation. + A mapping from semantic segmentation class ids in the dataset + to contiguous ids in [0, num_categories). It is useful for evaluation only. + +* `json_file`: The COCO annotation json file. Used by COCO evaluation for COCO-format datasets. +* `panoptic_root`, `panoptic_json`: Used by COCO-format panoptic evaluation. +* `evaluator_type`: Used by the builtin main training script to select + evaluator. Don't use it in a new training script. + You can just provide the [DatasetEvaluator](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluator) + for your dataset directly in your main script. + +```eval_rst +.. note:: + + In recognition, sometimes we use the term "thing" for instance-level tasks, + and "stuff" for semantic segmentation tasks. + Both are used in panoptic segmentation tasks. + For background on the concept of "thing" and "stuff", see + `On Seeing Stuff: The Perception of Materials by Humans and Machines + `_. +``` + +### Register a COCO Format Dataset + +If your instance-level (detection, segmentation, keypoint) dataset is already a json file in the COCO format, +the dataset and its associated metadata can be registered easily with: +```python +from detectron2.data.datasets import register_coco_instances +register_coco_instances("my_dataset", {}, "json_annotation.json", "path/to/image/dir") +``` + +If your dataset is in COCO format but need to be further processed, or has extra custom per-instance annotations, +the [load_coco_json](../modules/data.html#detectron2.data.datasets.load_coco_json) +function might be useful. + +### Update the Config for New Datasets + +Once you've registered the dataset, you can use the name of the dataset (e.g., "my_dataset" in +example above) in `cfg.DATASETS.{TRAIN,TEST}`. +There are other configs you might want to change to train or evaluate on new datasets: + +* `MODEL.ROI_HEADS.NUM_CLASSES` and `MODEL.RETINANET.NUM_CLASSES` are the number of thing classes + for R-CNN and RetinaNet models, respectively. +* `MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS` sets the number of keypoints for Keypoint R-CNN. + You'll also need to set [Keypoint OKS](http://cocodataset.org/#keypoints-eval) + with `TEST.KEYPOINT_OKS_SIGMAS` for evaluation. +* `MODEL.SEM_SEG_HEAD.NUM_CLASSES` sets the number of stuff classes for Semantic FPN & Panoptic FPN. +* `TEST.DETECTIONS_PER_IMAGE` controls the maximum number of objects to be detected. + Set it to a larger number if test images may contain >100 objects. +* If you're training Fast R-CNN (with precomputed proposals), `DATASETS.PROPOSAL_FILES_{TRAIN,TEST}` + need to match the datasets. The format of proposal files are documented + [here](../modules/data.html#detectron2.data.load_proposals_into_dataset). + +New models +(e.g. [TensorMask](../../projects/TensorMask), +[PointRend](../../projects/PointRend)) +often have similar configs of their own that need to be changed as well. + +```eval_rst +.. tip:: + + After changing the number of classes, certain layers in a pre-trained model will become incompatible + and therefore cannot be loaded to the new model. + This is expected, and loading such pre-trained models will produce warnings about such layers. +``` diff --git a/data_processing/detectron2/docs/tutorials/deployment.md b/data_processing/detectron2/docs/tutorials/deployment.md new file mode 100644 index 0000000..f759888 --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/deployment.md @@ -0,0 +1,137 @@ +# Deployment + +Models written in Python need to go through an export process to become a deployable artifact. +A few basic concepts about this process: + +__"Export method"__ is how a Python model is fully serialized to a deployable format. +We support the following export methods: + +* `tracing`: see [pytorch documentation](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html) to learn about it +* `scripting`: see [pytorch documentation](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html) to learn about it +* `caffe2_tracing`: replace parts of the model by caffe2 operators, then use tracing. + +__"Format"__ is how a serialized model is described in a file, e.g. +TorchScript, Caffe2 protobuf, ONNX format. +__"Runtime"__ is an engine that loads a serialized model and executes it, +e.g., PyTorch, Caffe2, TensorFlow, onnxruntime, TensorRT, etc. +A runtime is often tied to a specific format +(e.g. PyTorch needs TorchScript format, Caffe2 needs protobuf format). +We currently support the following combination and each has some limitations: + +```eval_rst ++----------------------------+-------------+-------------+-----------------------------+ +| Export Method | tracing | scripting | caffe2_tracing | ++============================+=============+=============+=============================+ +| **Formats** | TorchScript | TorchScript | Caffe2, TorchScript, ONNX | ++----------------------------+-------------+-------------+-----------------------------+ +| **Runtime** | PyTorch | PyTorch | Caffe2, PyTorch | ++----------------------------+-------------+-------------+-----------------------------+ +| C++/Python inference | ✅ | ✅ | ✅ | ++----------------------------+-------------+-------------+-----------------------------+ +| Dynamic resolution | ✅ | ✅ | ✅ | ++----------------------------+-------------+-------------+-----------------------------+ +| Batch size requirement | Constant | Dynamic | Batch inference unsupported | ++----------------------------+-------------+-------------+-----------------------------+ +| Extra runtime deps | torchvision | torchvision | Caffe2 ops (usually already | +| | | | | +| | | | included in PyTorch) | ++----------------------------+-------------+-------------+-----------------------------+ +| Faster/Mask/Keypoint R-CNN | ✅ | ✅ | ✅ | ++----------------------------+-------------+-------------+-----------------------------+ +| RetinaNet | ✅ | ✅ | ✅ | ++----------------------------+-------------+-------------+-----------------------------+ +| PointRend R-CNN | ✅ | ❌ | ❌ | ++----------------------------+-------------+-------------+-----------------------------+ +| Cascade R-CNN | ✅ | ❌ | ❌ | ++----------------------------+-------------+-------------+-----------------------------+ + +``` + +`caffe2_tracing` is going to be deprecated. +We don't plan to work on additional support for other formats/runtime, but contributions are welcome. + + +## Deployment with Tracing or Scripting + +Models can be exported to TorchScript format, by either +[tracing or scripting](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html). +The output model file can be loaded without detectron2 dependency in either Python or C++. +The exported model often requires torchvision (or its C++ library) dependency for some custom ops. + +This feature requires PyTorch ≥ 1.8. + +### Coverage +Most official models under the meta architectures `GeneralizedRCNN` and `RetinaNet` +are supported in both tracing and scripting mode. +Cascade R-CNN and PointRend are currently supported in tracing. +Users' custom extensions are supported if they are also scriptable or traceable. + +For models exported with tracing, dynamic input resolution is allowed, but batch size +(number of input images) must be fixed. +Scripting can support dynamic batch size. + +### Usage + +The main export APIs for tracing and scripting are [TracingAdapter](../modules/export.html#detectron2.export.TracingAdapter) +and [scripting_with_instances](../modules/export.html#detectron2.export.scripting_with_instances). +Their usage is currently demonstrated in [test_export_torchscript.py](../../tests/test_export_torchscript.py) +(see `TestScripting` and `TestTracing`) +as well as the [deployment example](../../tools/deploy). +Please check that these examples can run, and then modify for your use cases. +The usage now requires some user effort and necessary knowledge for each model to workaround the limitation of scripting and tracing. +In the future we plan to wrap these under simpler APIs to lower the bar to use them. + +## Deployment with Caffe2-tracing +We provide [Caffe2Tracer](../modules/export.html#detectron2.export.Caffe2Tracer) +that performs the export logic. +It replaces parts of the model with Caffe2 operators, +and then export the model into Caffe2, TorchScript or ONNX format. + +The converted model is able to run in either Python or C++ without detectron2/torchvision dependency, on CPU or GPUs. +It has a runtime optimized for CPU & mobile inference, but not optimized for GPU inference. + +This feature requires ONNX ≥ 1.6. + +### Coverage + +Most official models under these 3 common meta architectures: `GeneralizedRCNN`, `RetinaNet`, `PanopticFPN` +are supported. Cascade R-CNN is not supported. Batch inference is not supported. + +Users' custom extensions under these architectures (added through registration) are supported +as long as they do not contain control flow or operators not available in Caffe2 (e.g. deformable convolution). +For example, custom backbones and heads are often supported out of the box. + +### Usage + +The APIs are listed at [the API documentation](../modules/export). +We provide [export_model.py](../../tools/deploy/) as an example that uses +these APIs to convert a standard model. For custom models/datasets, you can add them to this script. + +### Use the model in C++/Python + +The model can be loaded in C++ and deployed with +either Caffe2 or Pytorch runtime.. [C++ examples](../../tools/deploy/) for Mask R-CNN +are given as a reference. Note that: + +* Models exported with `caffe2_tracing` method take a special input format + described in [documentation](../modules/export.html#detectron2.export.Caffe2Tracer). + This was taken care of in the C++ example. + +* The converted models do not contain post-processing operations that + transform raw layer outputs into formatted predictions. + For example, the C++ examples only produce raw outputs (28x28 masks) from the final + layers that are not post-processed, because in actual deployment, an application often needs + its custom lightweight post-processing, so this step is left for users. + +To help use the Caffe2-format model in python, +we provide a python wrapper around the converted model, in the +[Caffe2Model.\_\_call\_\_](../modules/export.html#detectron2.export.Caffe2Model.__call__) method. +This method has an interface that's identical to the [pytorch versions of models](./models.md), +and it internally applies pre/post-processing code to match the formats. +This wrapper can serve as a reference for how to use Caffe2's python API, +or for how to implement pre/post-processing in actual deployment. + +## Conversion to TensorFlow +[tensorpack Faster R-CNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN/convert_d2) +provides scripts to convert a few standard detectron2 R-CNN models to TensorFlow's pb format. +It works by translating configs and weights, therefore only support a few models. diff --git a/data_processing/detectron2/docs/tutorials/evaluation.md b/data_processing/detectron2/docs/tutorials/evaluation.md new file mode 100644 index 0000000..2ef94fa --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/evaluation.md @@ -0,0 +1,68 @@ + +# Evaluation + +Evaluation is a process that takes a number of inputs/outputs pairs and aggregate them. +You can always [use the model](./models.md) directly and just parse its inputs/outputs manually to perform +evaluation. +Alternatively, evaluation is implemented in detectron2 using the [DatasetEvaluator](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluator) +interface. + +Detectron2 includes a few `DatasetEvaluator` that computes metrics using standard dataset-specific +APIs (e.g., COCO, LVIS). +You can also implement your own `DatasetEvaluator` that performs some other jobs +using the inputs/outputs pairs. +For example, to count how many instances are detected on the validation set: + +```python +class Counter(DatasetEvaluator): + def reset(self): + self.count = 0 + def process(self, inputs, outputs): + for output in outputs: + self.count += len(output["instances"]) + def evaluate(self): + # save self.count somewhere, or print it, or return it. + return {"count": self.count} +``` + +## Use evaluators + +To evaluate using the methods of evaluators manually: +```python +def get_all_inputs_outputs(): + for data in data_loader: + yield data, model(data) + +evaluator.reset() +for inputs, outputs in get_all_inputs_outputs(): + evaluator.process(inputs, outputs) +eval_results = evaluator.evaluate() +``` + +Evaluators can also be used with [inference_on_dataset](../modules/evaluation.html#detectron2.evaluation.inference_on_dataset). +For example, + +```python +eval_results = inference_on_dataset( + model, + data_loader, + DatasetEvaluators([COCOEvaluator(...), Counter()])) +``` +This will execute `model` on all inputs from `data_loader`, and call evaluator to process them. + +Compared to running the evaluation manually using the model, the benefit of this function is that +evaluators can be merged together using [DatasetEvaluators](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluators), +and all the evaluation can finish in one forward pass over the dataset. +This function also provides accurate speed benchmarks for the given model and dataset. + +## Evaluators for custom dataset + +Many evaluators in detectron2 are made for specific datasets, +in order to obtain scores using each dataset's official API. +In addition to that, two evaluators are able to evaluate any generic dataset +that follows detectron2's [standard dataset format](./datasets.md), so they +can be used to evaluate custom datasets: + +* [COCOEvaluator](../modules/evaluation.html#detectron2.evaluation.COCOEvaluator) is able to evaluate AP (Average Precision) for box detection, + instance segmentation, keypoint detection on any custom dataset. +* [SemSegEvaluator](../modules/evaluation.html#detectron2.evaluation.SemSegEvaluator) is able to evaluate semantic segmentation metrics on any custom dataset. diff --git a/data_processing/detectron2/docs/tutorials/extend.md b/data_processing/detectron2/docs/tutorials/extend.md new file mode 100644 index 0000000..a6af550 --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/extend.md @@ -0,0 +1,141 @@ +# Extend Detectron2's Defaults + +__Research is about doing things in new ways__. +This brings a tension in how to create abstractions in code, +which is a challenge for any research engineering project of a significant size: + +1. On one hand, it needs to have very thin abstractions to allow for the possibility of doing + everything in new ways. It should be reasonably easy to break existing + abstractions and replace them with new ones. + +2. On the other hand, such a project also needs reasonably high-level + abstractions, so that users can easily do things in standard ways, + without worrying too much about the details that only certain researchers care about. + +In detectron2, there are two types of interfaces that address this tension together: + +1. Functions and classes that take a config (`cfg`) argument + created from a yaml file + (sometimes with few extra arguments). + + Such functions and classes implement + the "standard default" behavior: it will read what it needs from a given + config and do the "standard" thing. + Users only need to load an expert-made config and pass it around, without having to worry about + which arguments are used and what they all mean. + + See [Yacs Configs](configs.md) for a detailed tutorial. + +2. Functions and classes that have well-defined explicit arguments. + + Each of these is a small building block of the entire system. + They require users' expertise to understand what each argument should be, + and require more effort to stitch together to a larger system. + But they can be stitched together in more flexible ways. + + When you need to implement something not supported by the "standard defaults" + included in detectron2, these well-defined components can be reused. + + The [LazyConfig system](lazyconfigs.md) relies on such functions and classes. + +3. A few functions and classes are implemented with the + [@configurable](../modules/config.html#detectron2.config.configurable) + decorator - they can be called with either a config, or with explicit arguments, or a mixture of both. + Their explicit argument interfaces are currently experimental. + + As an example, a Mask R-CNN model can be built in the following ways: + + 1. Config-only: + ```python + # load proper yaml config file, then + model = build_model(cfg) + ``` + + 2. Mixture of config and additional argument overrides: + ```python + model = GeneralizedRCNN( + cfg, + roi_heads=StandardROIHeads(cfg, batch_size_per_image=666), + pixel_std=[57.0, 57.0, 57.0]) + ``` + + 3. Full explicit arguments: +
+ + (click to expand) + + + ```python + model = GeneralizedRCNN( + backbone=FPN( + ResNet( + BasicStem(3, 64, norm="FrozenBN"), + ResNet.make_default_stages(50, stride_in_1x1=True, norm="FrozenBN"), + out_features=["res2", "res3", "res4", "res5"], + ).freeze(2), + ["res2", "res3", "res4", "res5"], + 256, + top_block=LastLevelMaxPool(), + ), + proposal_generator=RPN( + in_features=["p2", "p3", "p4", "p5", "p6"], + head=StandardRPNHead(in_channels=256, num_anchors=3), + anchor_generator=DefaultAnchorGenerator( + sizes=[[32], [64], [128], [256], [512]], + aspect_ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64], + offset=0.0, + ), + anchor_matcher=Matcher([0.3, 0.7], [0, -1, 1], allow_low_quality_matches=True), + box2box_transform=Box2BoxTransform([1.0, 1.0, 1.0, 1.0]), + batch_size_per_image=256, + positive_fraction=0.5, + pre_nms_topk=(2000, 1000), + post_nms_topk=(1000, 1000), + nms_thresh=0.7, + ), + roi_heads=StandardROIHeads( + num_classes=80, + batch_size_per_image=512, + positive_fraction=0.25, + proposal_matcher=Matcher([0.5], [0, 1], allow_low_quality_matches=False), + box_in_features=["p2", "p3", "p4", "p5"], + box_pooler=ROIPooler(7, (1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), 0, "ROIAlignV2"), + box_head=FastRCNNConvFCHead( + ShapeSpec(channels=256, height=7, width=7), conv_dims=[], fc_dims=[1024, 1024] + ), + box_predictor=FastRCNNOutputLayers( + ShapeSpec(channels=1024), + test_score_thresh=0.05, + box2box_transform=Box2BoxTransform((10, 10, 5, 5)), + num_classes=80, + ), + mask_in_features=["p2", "p3", "p4", "p5"], + mask_pooler=ROIPooler(14, (1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), 0, "ROIAlignV2"), + mask_head=MaskRCNNConvUpsampleHead( + ShapeSpec(channels=256, width=14, height=14), + num_classes=80, + conv_dims=[256, 256, 256, 256, 256], + ), + ), + pixel_mean=[103.530, 116.280, 123.675], + pixel_std=[1.0, 1.0, 1.0], + input_format="BGR", + ) + ``` + +
+ + +If you only need the standard behavior, the [Beginner's Tutorial](./getting_started.md) +should suffice. If you need to extend detectron2 to your own needs, +see the following tutorials for more details: + +* Detectron2 includes a few standard datasets. To use custom ones, see + [Use Custom Datasets](./datasets.md). +* Detectron2 contains the standard logic that creates a data loader for training/testing from a + dataset, but you can write your own as well. See [Use Custom Data Loaders](./data_loading.md). +* Detectron2 implements many standard detection models, and provide ways for you + to overwrite their behaviors. See [Use Models](./models.md) and [Write Models](./write-models.md). +* Detectron2 provides a default training loop that is good for common training tasks. + You can customize it with hooks, or write your own loop instead. See [training](./training.md). diff --git a/data_processing/detectron2/docs/tutorials/getting_started.md b/data_processing/detectron2/docs/tutorials/getting_started.md new file mode 100644 index 0000000..e90bde7 --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/getting_started.md @@ -0,0 +1 @@ +../../GETTING_STARTED.md \ No newline at end of file diff --git a/data_processing/detectron2/docs/tutorials/index.rst b/data_processing/detectron2/docs/tutorials/index.rst new file mode 100644 index 0000000..850b95c --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/index.rst @@ -0,0 +1,20 @@ +Tutorials +====================================== + +.. toctree:: + :maxdepth: 2 + + install + getting_started + builtin_datasets + extend + datasets + data_loading + augmentation + models + write-models + training + evaluation + configs + lazyconfigs + deployment diff --git a/data_processing/detectron2/docs/tutorials/install.md b/data_processing/detectron2/docs/tutorials/install.md new file mode 100644 index 0000000..5f52b2b --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/install.md @@ -0,0 +1 @@ +../../INSTALL.md \ No newline at end of file diff --git a/data_processing/detectron2/docs/tutorials/lazyconfigs.md b/data_processing/detectron2/docs/tutorials/lazyconfigs.md new file mode 100644 index 0000000..a01101a --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/lazyconfigs.md @@ -0,0 +1,170 @@ +# Lazy Configs + +The traditional yacs-based config system provides basic, standard functionalities. +However, it does not offer enough flexibility for many new projects. +We develop an alternative, non-intrusive config system that can be used with +detectron2 or potentially any other complex projects. + +## Python Syntax + +Our config objects are still dictionaries. Instead of using Yaml to define dictionaries, +we create dictionaries in Python directly. This gives users the following power that +doesn't exist in Yaml: + +* Easily manipulate the dictionary (addition & deletion) using Python. +* Write simple arithmetics or call simple functions. +* Use more data types / objects. +* Import / compose other config files, using the familiar Python import syntax. + +A Python config file can be loaded like this: +```python +# config.py: +a = dict(x=1, y=2, z=dict(xx=1)) +b = dict(x=3, y=4) + +# my_code.py: +from detectron2.config import LazyConfig +cfg = LazyConfig.load("path/to/config.py") # an omegaconf dictionary +assert cfg.a.z.xx == 1 +``` + +After [LazyConfig.load](../modules/config.html#detectron2.config.LazyConfig.load), `cfg` will be a dictionary that contains all dictionaries +defined in the global scope of the config file. Note that: +* All dictionaries are turned to an [omegaconf](https://omegaconf.readthedocs.io/) + config object during loading. This enables access to omegaconf features, + such as its [access syntax](https://omegaconf.readthedocs.io/en/2.1_branch/usage.html#access-and-manipulation) + and [interpolation](https://omegaconf.readthedocs.io/en/2.1_branch/usage.html#variable-interpolation). +* Absolute imports in `config.py` works the same as in regular Python. +* Relative imports can only import dictionaries from config files. + They are simply a syntax sugar for [LazyConfig.load_rel](../modules/config.html#detectron2.config.LazyConfig.load_rel). + They can load Python files at relative path without requiring `__init__.py`. + +[LazyConfig.save](../modules/config.html#detectron2.config.LazyConfig.save) can save a config object to yaml. +Note that this is not always successful if non-serializable objects appear in the config file (e.g. lambdas). +It is up to users whether to sacrifice the ability to save in exchange for flexibility. + +## Recursive Instantiation + +The LazyConfig system heavily uses recursive instantiation, which is a pattern that +uses a dictionary to describe a +call to a function/class. The dictionary consists of: + +1. A "\_target\_" key which contains path to the callable, such as "module.submodule.class_name". +2. Other keys that represent arguments to pass to the callable. Arguments themselves can be defined + using recursive instantiation. + +We provide a helper function [LazyCall](../modules/config.html#detectron2.config.LazyCall) that helps create such dictionaries. +The following code using `LazyCall` +```python +from detectron2.config import LazyCall as L +from my_app import Trainer, Optimizer +cfg = L(Trainer)( + optimizer=L(Optimizer)( + lr=0.01, + algo="SGD" + ) +) +``` +creates a dictionary like this: +```python +cfg = { + "_target_": "my_app.Trainer", + "optimizer": { + "_target_": "my_app.Optimizer", + "lr": 0.01, "algo": "SGD" + } +} +``` + +By representing objects using such dictionaries, a general +[instantiate](../modules/config.html#detectron2.config.instantiate) +function can turn them into actual objects, i.e.: +```python +from detectron2.config import instantiate +trainer = instantiate(cfg) +# equivalent to: +# from my_app import Trainer, Optimizer +# trainer = Trainer(optimizer=Optimizer(lr=0.01, algo="SGD")) +``` + +This pattern is powerful enough to describe very complex objects, e.g.: + +
+ +A Full Mask R-CNN described in recursive instantiation (click to expand) + + +```eval_rst +.. literalinclude:: ../../configs/common/models/mask_rcnn_fpn.py + :language: python + :linenos: +``` + +
+ +There are also objects or logic that cannot be described simply by a dictionary, +such as reused objects or method calls. They may require some refactoring +to work with recursive instantiation. + +## Using Model Zoo LazyConfigs + +We provide some configs in the model zoo using the LazyConfig system, for example: + +* [common baselines](../../configs/common/). +* [new Mask R-CNN baselines](../../configs/new_baselines/) + +After installing detectron2, they can be loaded by the model zoo API +[model_zoo.get_config](../modules/model_zoo.html#detectron2.model_zoo.get_config). + +Using these as references, you're free to define custom config structure / fields for your own +project, as long as your training script can understand them. +Despite of this, our model zoo configs still follow some simple conventions for consistency, e.g. +`cfg.model` defines a model object, `cfg.dataloader.{train,test}` defines dataloader objects, +and `cfg.train` contains training options in key-value form. +In addition to `print()`, a better way to view the structure of a config is like this: +```python +from detectron2.model_zoo import get_config +from detectron2.config import LazyConfig +print(LazyConfig.to_py(get_config("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py"))) +``` +From the output it's easier to find relevant options to change, e.g. +`dataloader.train.total_batch_size` for the batch size, or `optimizer.lr` for base learning rate. + +We provide a reference training script +[tools/lazyconfig_train_net.py](../../tools/lazyconfig_train_net.py), +that can train/eval our model zoo configs. +It also shows how to support command line value overrides. + +To demonstrate the power and flexibility of the new system, we show that +[a simple config file](../../configs/Misc/torchvision_imagenet_R_50.py) +can let detectron2 train an ImageNet classification model from torchvision, even though +detectron2 contains no features about ImageNet classification. +This can serve as a reference for using detectron2 in other deep learning tasks. + +## Summary + +By using recursive instantiation to create objects, +we avoid passing a giant config to many places, because `cfg` is only passed to `instantiate`. +This has the following benefits: + +* It's __non-intrusive__: objects to be constructed are config-agnostic, regular Python + functions/classes. + They can even live in other libraries. For example, + `{"_target_": "torch.nn.Conv2d", "in_channels": 10, "out_channels": 10, "kernel_size": 1}` + defines a conv layer. +* __Clarity__ of what function/classes will be called, and what arguments they use. +* `cfg` doesn't need pre-defined keys and structures. It's valid as long as it translates to valid + code. This gives a lot more __flexibility__. +* You can still pass huge dictionaries as arguments, just like the old way. + +Recursive instantiation and Python syntax are orthogonal: you can use one without the other. +But by putting them together, the config file looks a lot like the code that will be executed: + +![img](./lazyconfig.jpg) + +However, the config file just defines dictionaries, which can be easily manipulated further +by composition or overrides. +The corresponding code will only be executed +later when `instantiate` is called. In some way, +in config files we're writing "editable code" that will be "lazily executed" later when needed. +That's why we call this system "LazyConfig". diff --git a/data_processing/detectron2/docs/tutorials/models.md b/data_processing/detectron2/docs/tutorials/models.md new file mode 100644 index 0000000..a2def5c --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/models.md @@ -0,0 +1,180 @@ +# Use Models + +## Build Models from Yacs Config +From a yacs config object, +models (and their sub-models) can be built by +functions such as `build_model`, `build_backbone`, `build_roi_heads`: +```python +from detectron2.modeling import build_model +model = build_model(cfg) # returns a torch.nn.Module +``` + +`build_model` only builds the model structure and fills it with random parameters. +See below for how to load an existing checkpoint to the model and how to use the `model` object. + +### Load/Save a Checkpoint +```python +from detectron2.checkpoint import DetectionCheckpointer +DetectionCheckpointer(model).load(file_path_or_url) # load a file, usually from cfg.MODEL.WEIGHTS + +checkpointer = DetectionCheckpointer(model, save_dir="output") +checkpointer.save("model_999") # save to output/model_999.pth +``` + +Detectron2's checkpointer recognizes models in pytorch's `.pth` format, as well as the `.pkl` files +in our model zoo. +See [API doc](../modules/checkpoint.html#detectron2.checkpoint.DetectionCheckpointer) +for more details about its usage. + +The model files can be arbitrarily manipulated using `torch.{load,save}` for `.pth` files or +`pickle.{dump,load}` for `.pkl` files. + +### Use a Model + +A model can be called by `outputs = model(inputs)`, where `inputs` is a `list[dict]`. +Each dict corresponds to one image and the required keys +depend on the type of model, and whether the model is in training or evaluation mode. +For example, in order to do inference, +all existing models expect the "image" key, and optionally "height" and "width". +The detailed format of inputs and outputs of existing models are explained below. + +__Training__: When in training mode, all models are required to be used under an `EventStorage`. +The training statistics will be put into the storage: +```python +from detectron2.utils.events import EventStorage +with EventStorage() as storage: + losses = model(inputs) +``` + +__Inference__: If you only want to do simple inference using an existing model, +[DefaultPredictor](../modules/engine.html#detectron2.engine.defaults.DefaultPredictor) +is a wrapper around model that provides such basic functionality. +It includes default behavior including model loading, preprocessing, +and operates on single image rather than batches. See its documentation for usage. + +You can also run inference directly like this: +```python +model.eval() +with torch.no_grad(): + outputs = model(inputs) +``` + +### Model Input Format + +Users can implement custom models that support any arbitrary input format. +Here we describe the standard input format that all builtin models support in detectron2. +They all take a `list[dict]` as the inputs. Each dict +corresponds to information about one image. + +The dict may contain the following keys: + +* "image": `Tensor` in (C, H, W) format. The meaning of channels are defined by `cfg.INPUT.FORMAT`. + Image normalization, if any, will be performed inside the model using + `cfg.MODEL.PIXEL_{MEAN,STD}`. +* "height", "width": the **desired** output height and width **in inference**, which is not necessarily the same + as the height or width of the `image` field. + For example, the `image` field contains the resized image, if resize is used as a preprocessing step. + But you may want the outputs to be in **original** resolution. + If provided, the model will produce output in this resolution, + rather than in the resolution of the `image` as input into the model. This is more efficient and accurate. +* "instances": an [Instances](../modules/structures.html#detectron2.structures.Instances) + object for training, with the following fields: + + "gt_boxes": a [Boxes](../modules/structures.html#detectron2.structures.Boxes) object storing N boxes, one for each instance. + + "gt_classes": `Tensor` of long type, a vector of N labels, in range [0, num_categories). + + "gt_masks": a [PolygonMasks](../modules/structures.html#detectron2.structures.PolygonMasks) + or [BitMasks](../modules/structures.html#detectron2.structures.BitMasks) object storing N masks, one for each instance. + + "gt_keypoints": a [Keypoints](../modules/structures.html#detectron2.structures.Keypoints) + object storing N keypoint sets, one for each instance. +* "sem_seg": `Tensor[int]` in (H, W) format. The semantic segmentation ground truth for training. + Values represent category labels starting from 0. +* "proposals": an [Instances](../modules/structures.html#detectron2.structures.Instances) + object used only in Fast R-CNN style models, with the following fields: + + "proposal_boxes": a [Boxes](../modules/structures.html#detectron2.structures.Boxes) object storing P proposal boxes. + + "objectness_logits": `Tensor`, a vector of P scores, one for each proposal. + +For inference of builtin models, only "image" key is required, and "width/height" are optional. + +We currently don't define standard input format for panoptic segmentation training, +because models now use custom formats produced by custom data loaders. + +#### How it connects to data loader: + +The output of the default [DatasetMapper]( ../modules/data.html#detectron2.data.DatasetMapper) is a dict +that follows the above format. +After the data loader performs batching, it becomes `list[dict]` which the builtin models support. + + +### Model Output Format + +When in training mode, the builtin models output a `dict[str->ScalarTensor]` with all the losses. + +When in inference mode, the builtin models output a `list[dict]`, one dict for each image. +Based on the tasks the model is doing, each dict may contain the following fields: + +* "instances": [Instances](../modules/structures.html#detectron2.structures.Instances) + object with the following fields: + * "pred_boxes": [Boxes](../modules/structures.html#detectron2.structures.Boxes) object storing N boxes, one for each detected instance. + * "scores": `Tensor`, a vector of N confidence scores. + * "pred_classes": `Tensor`, a vector of N labels in range [0, num_categories). + + "pred_masks": a `Tensor` of shape (N, H, W), masks for each detected instance. + + "pred_keypoints": a `Tensor` of shape (N, num_keypoint, 3). + Each row in the last dimension is (x, y, score). Confidence scores are larger than 0. +* "sem_seg": `Tensor` of (num_categories, H, W), the semantic segmentation prediction. +* "proposals": [Instances](../modules/structures.html#detectron2.structures.Instances) + object with the following fields: + * "proposal_boxes": [Boxes](../modules/structures.html#detectron2.structures.Boxes) + object storing N boxes. + * "objectness_logits": a torch vector of N confidence scores. +* "panoptic_seg": A tuple of `(pred: Tensor, segments_info: Optional[list[dict]])`. + The `pred` tensor has shape (H, W), containing the segment id of each pixel. + + * If `segments_info` exists, each dict describes one segment id in `pred` and has the following fields: + + * "id": the segment id + * "isthing": whether the segment is a thing or stuff + * "category_id": the category id of this segment. + + If a pixel's id does not exist in `segments_info`, it is considered to be void label + defined in [Panoptic Segmentation](https://arxiv.org/abs/1801.00868). + + * If `segments_info` is None, all pixel values in `pred` must be ≥ -1. + Pixels with value -1 are assigned void labels. + Otherwise, the category id of each pixel is obtained by + `category_id = pixel // metadata.label_divisor`. + + +### Partially execute a model: + +Sometimes you may want to obtain an intermediate tensor inside a model, +such as the input of certain layer, the output before post-processing. +Since there are typically hundreds of intermediate tensors, there isn't an API that provides you +the intermediate result you need. +You have the following options: + +1. Write a (sub)model. Following the [tutorial](./write-models.md), you can + rewrite a model component (e.g. a head of a model), such that it + does the same thing as the existing component, but returns the output + you need. +2. Partially execute a model. You can create the model as usual, + but use custom code to execute it instead of its `forward()`. For example, + the following code obtains mask features before mask head. + + ```python + images = ImageList.from_tensors(...) # preprocessed input tensor + model = build_model(cfg) + model.eval() + features = model.backbone(images.tensor) + proposals, _ = model.proposal_generator(images, features) + instances, _ = model.roi_heads(images, features, proposals) + mask_features = [features[f] for f in model.roi_heads.in_features] + mask_features = model.roi_heads.mask_pooler(mask_features, [x.pred_boxes for x in instances]) + ``` + +3. Use [forward hooks](https://pytorch.org/tutorials/beginner/former_torchies/nnft_tutorial.html#forward-and-backward-function-hooks). + Forward hooks can help you obtain inputs or outputs of a certain module. + If they are not exactly what you want, they can at least be used together with partial execution + to obtain other tensors. + +All options require you to read documentation and sometimes code +of the existing models to understand the internal logic, +in order to write code to obtain the internal tensors. diff --git a/data_processing/detectron2/docs/tutorials/training.md b/data_processing/detectron2/docs/tutorials/training.md new file mode 100644 index 0000000..83a6cb0 --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/training.md @@ -0,0 +1,67 @@ +# Training + +From the previous tutorials, you may now have a custom model and a data loader. +To run training, users typically have a preference in one of the following two styles: + +### Custom Training Loop + +With a model and a data loader ready, everything else needed to write a training loop can +be found in PyTorch, and you are free to write the training loop yourself. +This style allows researchers to manage the entire training logic more clearly and have full control. +One such example is provided in [tools/plain_train_net.py](../../tools/plain_train_net.py). + +Any customization on the training logic is then easily controlled by the user. + +### Trainer Abstraction + +We also provide a standardized "trainer" abstraction with a +hook system that helps simplify the standard training behavior. +It includes the following two instantiations: + +* [SimpleTrainer](../modules/engine.html#detectron2.engine.SimpleTrainer) + provides a minimal training loop for single-cost single-optimizer single-data-source training, with nothing else. + Other tasks (checkpointing, logging, etc) can be implemented using + [the hook system](../modules/engine.html#detectron2.engine.HookBase). +* [DefaultTrainer](../modules/engine.html#detectron2.engine.defaults.DefaultTrainer) is a `SimpleTrainer` initialized from a + yacs config, used by + [tools/train_net.py](../../tools/train_net.py) and many scripts. + It includes more standard default behaviors that one might want to opt in, + including default configurations for optimizer, learning rate schedule, + logging, evaluation, checkpointing etc. + +To customize a `DefaultTrainer`: + +1. For simple customizations (e.g. change optimizer, evaluator, LR scheduler, data loader, etc.), overwrite [its methods](../modules/engine.html#detectron2.engine.defaults.DefaultTrainer) in a subclass, just like [tools/train_net.py](../../tools/train_net.py). +2. For extra tasks during training, check the + [hook system](../modules/engine.html#detectron2.engine.HookBase) to see if it's supported. + + As an example, to print hello during training: + ```python + class HelloHook(HookBase): + def after_step(self): + if self.trainer.iter % 100 == 0: + print(f"Hello at iteration {self.trainer.iter}!") + ``` +3. Using a trainer+hook system means there will always be some non-standard behaviors that cannot be supported, especially in research. + For this reason, we intentionally keep the trainer & hook system minimal, rather than powerful. + If anything cannot be achieved by such a system, it's easier to start from [tools/plain_train_net.py](../../tools/plain_train_net.py) to implement custom training logic manually. + +### Logging of Metrics + +During training, detectron2 models and trainer put metrics to a centralized [EventStorage](../modules/utils.html#detectron2.utils.events.EventStorage). +You can use the following code to access it and log metrics to it: +```python +from detectron2.utils.events import get_event_storage + +# inside the model: +if self.training: + value = # compute the value from inputs + storage = get_event_storage() + storage.put_scalar("some_accuracy", value) +``` + +Refer to its documentation for more details. + +Metrics are then written to various destinations with [EventWriter](../modules/utils.html#module-detectron2.utils.events). +DefaultTrainer enables a few `EventWriter` with default configurations. +See above for how to customize them. diff --git a/data_processing/detectron2/docs/tutorials/write-models.md b/data_processing/detectron2/docs/tutorials/write-models.md new file mode 100644 index 0000000..967d126 --- /dev/null +++ b/data_processing/detectron2/docs/tutorials/write-models.md @@ -0,0 +1,90 @@ +# Write Models + +If you are trying to do something completely new, you may wish to implement +a model entirely from scratch. However, in many situations you may +be interested in modifying or extending some components of an existing model. +Therefore, we also provide mechanisms that let users override the +behavior of certain internal components of standard models. + + +## Register New Components + +For common concepts that users often want to customize, such as "backbone feature extractor", "box head", +we provide a registration mechanism for users to inject custom implementation that +will be immediately available to use in config files. + +For example, to add a new backbone, import this code in your code: +```python +from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec + +@BACKBONE_REGISTRY.register() +class ToyBackbone(Backbone): + def __init__(self, cfg, input_shape): + super().__init__() + # create your own backbone + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=16, padding=3) + + def forward(self, image): + return {"conv1": self.conv1(image)} + + def output_shape(self): + return {"conv1": ShapeSpec(channels=64, stride=16)} +``` + +In this code, we implement a new backbone following the interface of the +[Backbone](../modules/modeling.html#detectron2.modeling.Backbone) class, +and register it into the [BACKBONE_REGISTRY](../modules/modeling.html#detectron2.modeling.BACKBONE_REGISTRY) +which requires subclasses of `Backbone`. +After importing this code, detectron2 can link the name of the class to its implementation. Therefore you can write the following code: + +```python +cfg = ... # read a config +cfg.MODEL.BACKBONE.NAME = 'ToyBackbone' # or set it in the config file +model = build_model(cfg) # it will find `ToyBackbone` defined above +``` + +As another example, to add new abilities to the ROI heads in the Generalized R-CNN meta-architecture, +you can implement a new +[ROIHeads](../modules/modeling.html#detectron2.modeling.ROIHeads) subclass and put it in the `ROI_HEADS_REGISTRY`. +[DensePose](../../projects/DensePose) +and [MeshRCNN](https://github.com/facebookresearch/meshrcnn) +are two examples that implement new ROIHeads to perform new tasks. +And [projects/](../../projects/) +contains more examples that implement different architectures. + +A complete list of registries can be found in [API documentation](../modules/modeling.html#model-registries). +You can register components in these registries to customize different parts of a model, or the +entire model. + +## Construct Models with Explicit Arguments + +Registry is a bridge to connect names in config files to the actual code. +They are meant to cover a few main components that users frequently need to replace. +However, the capability of a text-based config file is sometimes limited and +some deeper customization may be available only through writing code. + +Most model components in detectron2 have a clear `__init__` interface that documents +what input arguments it needs. Calling them with custom arguments will give you a custom variant +of the model. + +As an example, to use __custom loss function__ in the box head of a Faster R-CNN, we can do the following: + +1. Losses are currently computed in [FastRCNNOutputLayers](../modules/modeling.html#detectron2.modeling.FastRCNNOutputLayers). + We need to implement a variant or a subclass of it, with custom loss functions, named `MyRCNNOutput`. +2. Call `StandardROIHeads` with `box_predictor=MyRCNNOutput()` argument instead of the builtin `FastRCNNOutputLayers`. + If all other arguments should stay unchanged, this can be easily achieved by using the [configurable `__init__`](../modules/config.html#detectron2.config.configurable) mechanism: + + ```python + roi_heads = StandardROIHeads( + cfg, backbone.output_shape(), + box_predictor=MyRCNNOutput(...) + ) + ``` +3. (optional) If we want to enable this new model from a config file, registration is needed: + ```python + @ROI_HEADS_REGISTRY.register() + class MyStandardROIHeads(StandardROIHeads): + def __init__(self, cfg, input_shape): + super().__init__(cfg, input_shape, + box_predictor=MyRCNNOutput(...)) + ``` diff --git a/data_processing/detectron2/projects/DeepLab/README.md b/data_processing/detectron2/projects/DeepLab/README.md new file mode 100644 index 0000000..bd03cf1 --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/README.md @@ -0,0 +1,100 @@ +# DeepLab in Detectron2 + +In this repository, we implement DeepLabV3 and DeepLabV3+ in Detectron2. + +## Installation +Install Detectron2 following [the instructions](https://detectron2.readthedocs.io/tutorials/install.html). + +## Training + +To train a model with 8 GPUs run: +```bash +cd /path/to/detectron2/projects/DeepLab +python train_net.py --config-file configs/Cityscapes-SemanticSegmentation/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16.yaml --num-gpus 8 +``` + +## Evaluation + +Model evaluation can be done similarly: +```bash +cd /path/to/detectron2/projects/DeepLab +python train_net.py --config-file configs/Cityscapes-SemanticSegmentation/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16.yaml --eval-only MODEL.WEIGHTS /path/to/model_checkpoint +``` + +## Cityscapes Semantic Segmentation +Cityscapes models are trained with ImageNet pretraining. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodBackboneOutput
resolution
mIoUmodel iddownload
DeepLabV3R101-DC51024×2048 76.7 - -  |  -
DeepLabV3R103-DC51024×2048 78.5 28041665 model | metrics
DeepLabV3+R101-DC51024×2048 78.1 - -  |  -
DeepLabV3+R103-DC51024×2048 80.0 28054032model | metrics
+ +Note: +- [R103](https://dl.fbaipublicfiles.com/detectron2/DeepLab/R-103.pkl): a ResNet-101 with its first 7x7 convolution replaced by 3 3x3 convolutions. +This modification has been used in most semantic segmentation papers. We pre-train this backbone on ImageNet using the default recipe of [pytorch examples](https://github.com/pytorch/examples/tree/master/imagenet). +- DC5 means using dilated convolution in `res5`. + +## Citing DeepLab + +If you use DeepLab, please use the following BibTeX entry. + +* DeepLabv3+: + +``` +@inproceedings{deeplabv3plus2018, + title={Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation}, + author={Liang-Chieh Chen and Yukun Zhu and George Papandreou and Florian Schroff and Hartwig Adam}, + booktitle={ECCV}, + year={2018} +} +``` + +* DeepLabv3: + +``` +@article{deeplabv32018, + title={Rethinking atrous convolution for semantic image segmentation}, + author={Chen, Liang-Chieh and Papandreou, George and Schroff, Florian and Adam, Hartwig}, + journal={arXiv:1706.05587}, + year={2017} +} +``` diff --git a/data_processing/detectron2/projects/DeepLab/configs/Cityscapes-SemanticSegmentation/Base-DeepLabV3-OS16-Semantic.yaml b/data_processing/detectron2/projects/DeepLab/configs/Cityscapes-SemanticSegmentation/Base-DeepLabV3-OS16-Semantic.yaml new file mode 100644 index 0000000..fa6edb5 --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/configs/Cityscapes-SemanticSegmentation/Base-DeepLabV3-OS16-Semantic.yaml @@ -0,0 +1,36 @@ +_BASE_: "../../../../configs/Base-RCNN-DilatedC5.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + BACKBONE: + FREEZE_AT: 0 + SEM_SEG_HEAD: + NAME: "DeepLabV3Head" + IN_FEATURES: ["res5"] + ASPP_CHANNELS: 256 + ASPP_DILATIONS: [6, 12, 18] + ASPP_DROPOUT: 0.1 + CONVS_DIM: 256 + COMMON_STRIDE: 16 + NUM_CLASSES: 19 + LOSS_TYPE: "hard_pixel_mining" +DATASETS: + TRAIN: ("cityscapes_fine_sem_seg_train",) + TEST: ("cityscapes_fine_sem_seg_val",) +SOLVER: + BASE_LR: 0.01 + MAX_ITER: 90000 + LR_SCHEDULER_NAME: "WarmupPolyLR" + IMS_PER_BATCH: 16 +INPUT: + MIN_SIZE_TRAIN: (512, 768, 1024, 1280, 1536, 1792, 2048) + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 1024 + MAX_SIZE_TRAIN: 4096 + MAX_SIZE_TEST: 2048 + CROP: + ENABLED: True + TYPE: "absolute" + SIZE: (512, 1024) + SINGLE_CATEGORY_MAX_AREA: 1.0 +DATALOADER: + NUM_WORKERS: 10 diff --git a/data_processing/detectron2/projects/DeepLab/configs/Cityscapes-SemanticSegmentation/deeplab_v3_R_103_os16_mg124_poly_90k_bs16.yaml b/data_processing/detectron2/projects/DeepLab/configs/Cityscapes-SemanticSegmentation/deeplab_v3_R_103_os16_mg124_poly_90k_bs16.yaml new file mode 100644 index 0000000..a2f5a54 --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/configs/Cityscapes-SemanticSegmentation/deeplab_v3_R_103_os16_mg124_poly_90k_bs16.yaml @@ -0,0 +1,19 @@ +_BASE_: Base-DeepLabV3-OS16-Semantic.yaml +MODEL: + WEIGHTS: "detectron2://DeepLab/R-103.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + BACKBONE: + NAME: "build_resnet_deeplab_backbone" + RESNETS: + DEPTH: 101 + NORM: "SyncBN" + RES5_MULTI_GRID: [1, 2, 4] + STEM_TYPE: "deeplab" + STEM_OUT_CHANNELS: 128 + STRIDE_IN_1X1: False + SEM_SEG_HEAD: + NAME: "DeepLabV3Head" + NORM: "SyncBN" +INPUT: + FORMAT: "RGB" diff --git a/data_processing/detectron2/projects/DeepLab/configs/Cityscapes-SemanticSegmentation/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16.yaml b/data_processing/detectron2/projects/DeepLab/configs/Cityscapes-SemanticSegmentation/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16.yaml new file mode 100644 index 0000000..c03a72d --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/configs/Cityscapes-SemanticSegmentation/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16.yaml @@ -0,0 +1,24 @@ +_BASE_: Base-DeepLabV3-OS16-Semantic.yaml +MODEL: + WEIGHTS: "detectron2://DeepLab/R-103.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + BACKBONE: + NAME: "build_resnet_deeplab_backbone" + RESNETS: + DEPTH: 101 + NORM: "SyncBN" + OUT_FEATURES: ["res2", "res5"] + RES5_MULTI_GRID: [1, 2, 4] + STEM_TYPE: "deeplab" + STEM_OUT_CHANNELS: 128 + STRIDE_IN_1X1: False + SEM_SEG_HEAD: + NAME: "DeepLabV3PlusHead" + IN_FEATURES: ["res2", "res5"] + PROJECT_FEATURES: ["res2"] + PROJECT_CHANNELS: [48] + NORM: "SyncBN" + COMMON_STRIDE: 4 +INPUT: + FORMAT: "RGB" diff --git a/data_processing/detectron2/projects/DeepLab/deeplab/__init__.py b/data_processing/detectron2/projects/DeepLab/deeplab/__init__.py new file mode 100644 index 0000000..dcd88ff --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/deeplab/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .build_solver import build_lr_scheduler +from .config import add_deeplab_config +from .resnet import build_resnet_deeplab_backbone +from .semantic_seg import DeepLabV3Head, DeepLabV3PlusHead diff --git a/data_processing/detectron2/projects/DeepLab/deeplab/build_solver.py b/data_processing/detectron2/projects/DeepLab/deeplab/build_solver.py new file mode 100644 index 0000000..a1d359c --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/deeplab/build_solver.py @@ -0,0 +1,27 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch + +from detectron2.config import CfgNode +from detectron2.solver import LRScheduler +from detectron2.solver import build_lr_scheduler as build_d2_lr_scheduler + +from .lr_scheduler import WarmupPolyLR + + +def build_lr_scheduler(cfg: CfgNode, optimizer: torch.optim.Optimizer) -> LRScheduler: + """ + Build a LR scheduler from config. + """ + name = cfg.SOLVER.LR_SCHEDULER_NAME + if name == "WarmupPolyLR": + return WarmupPolyLR( + optimizer, + cfg.SOLVER.MAX_ITER, + warmup_factor=cfg.SOLVER.WARMUP_FACTOR, + warmup_iters=cfg.SOLVER.WARMUP_ITERS, + warmup_method=cfg.SOLVER.WARMUP_METHOD, + power=cfg.SOLVER.POLY_LR_POWER, + constant_ending=cfg.SOLVER.POLY_LR_CONSTANT_ENDING, + ) + else: + return build_d2_lr_scheduler(cfg, optimizer) diff --git a/data_processing/detectron2/projects/DeepLab/deeplab/config.py b/data_processing/detectron2/projects/DeepLab/deeplab/config.py new file mode 100644 index 0000000..5f5e45a --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/deeplab/config.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + + +def add_deeplab_config(cfg): + """ + Add config for DeepLab. + """ + # We retry random cropping until no single category in semantic segmentation GT occupies more + # than `SINGLE_CATEGORY_MAX_AREA` part of the crop. + cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0 + # Used for `poly` learning rate schedule. + cfg.SOLVER.POLY_LR_POWER = 0.9 + cfg.SOLVER.POLY_LR_CONSTANT_ENDING = 0.0 + # Loss type, choose from `cross_entropy`, `hard_pixel_mining`. + cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE = "hard_pixel_mining" + # DeepLab settings + cfg.MODEL.SEM_SEG_HEAD.PROJECT_FEATURES = ["res2"] + cfg.MODEL.SEM_SEG_HEAD.PROJECT_CHANNELS = [48] + cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS = 256 + cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS = [6, 12, 18] + cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT = 0.1 + cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV = False + # Backbone new configs + cfg.MODEL.RESNETS.RES4_DILATION = 1 + cfg.MODEL.RESNETS.RES5_MULTI_GRID = [1, 2, 4] + # ResNet stem type from: `basic`, `deeplab` + cfg.MODEL.RESNETS.STEM_TYPE = "deeplab" diff --git a/data_processing/detectron2/projects/DeepLab/deeplab/loss.py b/data_processing/detectron2/projects/DeepLab/deeplab/loss.py new file mode 100644 index 0000000..3a43087 --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/deeplab/loss.py @@ -0,0 +1,40 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +import torch.nn as nn + + +class DeepLabCE(nn.Module): + """ + Hard pixel mining with cross entropy loss, for semantic segmentation. + This is used in TensorFlow DeepLab frameworks. + Paper: DeeperLab: Single-Shot Image Parser + Reference: https://github.com/tensorflow/models/blob/bd488858d610e44df69da6f89277e9de8a03722c/research/deeplab/utils/train_utils.py#L33 # noqa + Arguments: + ignore_label: Integer, label to ignore. + top_k_percent_pixels: Float, the value lies in [0.0, 1.0]. When its + value < 1.0, only compute the loss for the top k percent pixels + (e.g., the top 20% pixels). This is useful for hard pixel mining. + weight: Tensor, a manual rescaling weight given to each class. + """ + + def __init__(self, ignore_label=-1, top_k_percent_pixels=1.0, weight=None): + super(DeepLabCE, self).__init__() + self.top_k_percent_pixels = top_k_percent_pixels + self.ignore_label = ignore_label + self.criterion = nn.CrossEntropyLoss( + weight=weight, ignore_index=ignore_label, reduction="none" + ) + + def forward(self, logits, labels, weights=None): + if weights is None: + pixel_losses = self.criterion(logits, labels).contiguous().view(-1) + else: + # Apply per-pixel loss weights. + pixel_losses = self.criterion(logits, labels) * weights + pixel_losses = pixel_losses.contiguous().view(-1) + if self.top_k_percent_pixels == 1.0: + return pixel_losses.mean() + + top_k_pixels = int(self.top_k_percent_pixels * pixel_losses.numel()) + pixel_losses, _ = torch.topk(pixel_losses, top_k_pixels) + return pixel_losses.mean() diff --git a/data_processing/detectron2/projects/DeepLab/deeplab/lr_scheduler.py b/data_processing/detectron2/projects/DeepLab/deeplab/lr_scheduler.py new file mode 100644 index 0000000..b754b59 --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/deeplab/lr_scheduler.py @@ -0,0 +1,62 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from typing import List +import torch + +from detectron2.solver.lr_scheduler import LRScheduler, _get_warmup_factor_at_iter + +# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes +# only on epoch boundaries. We typically use iteration based schedules instead. +# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean +# "iteration" instead. + +# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating +# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it. + + +class WarmupPolyLR(LRScheduler): + """ + Poly learning rate schedule used to train DeepLab. + Paper: DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, + Atrous Convolution, and Fully Connected CRFs. + Reference: https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/utils/train_utils.py#L337 # noqa + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + max_iters: int, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + power: float = 0.9, + constant_ending: float = 0.0, + ): + self.max_iters = max_iters + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + self.power = power + self.constant_ending = constant_ending + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + if self.constant_ending > 0 and warmup_factor == 1.0: + # Constant ending lr. + if ( + math.pow((1.0 - self.last_epoch / self.max_iters), self.power) + < self.constant_ending + ): + return [base_lr * self.constant_ending for base_lr in self.base_lrs] + return [ + base_lr * warmup_factor * math.pow((1.0 - self.last_epoch / self.max_iters), self.power) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() diff --git a/data_processing/detectron2/projects/DeepLab/deeplab/resnet.py b/data_processing/detectron2/projects/DeepLab/deeplab/resnet.py new file mode 100644 index 0000000..2cc277b --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/deeplab/resnet.py @@ -0,0 +1,158 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import fvcore.nn.weight_init as weight_init +import torch.nn.functional as F + +from detectron2.layers import CNNBlockBase, Conv2d, get_norm +from detectron2.modeling import BACKBONE_REGISTRY +from detectron2.modeling.backbone.resnet import ( + BasicStem, + BottleneckBlock, + DeformBottleneckBlock, + ResNet, +) + + +class DeepLabStem(CNNBlockBase): + """ + The DeepLab ResNet stem (layers before the first residual block). + """ + + def __init__(self, in_channels=3, out_channels=128, norm="BN"): + """ + Args: + norm (str or callable): norm after the first conv layer. + See :func:`layers.get_norm` for supported format. + """ + super().__init__(in_channels, out_channels, 4) + self.in_channels = in_channels + self.conv1 = Conv2d( + in_channels, + out_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=False, + norm=get_norm(norm, out_channels // 2), + ) + self.conv2 = Conv2d( + out_channels // 2, + out_channels // 2, + kernel_size=3, + stride=1, + padding=1, + bias=False, + norm=get_norm(norm, out_channels // 2), + ) + self.conv3 = Conv2d( + out_channels // 2, + out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + weight_init.c2_msra_fill(self.conv1) + weight_init.c2_msra_fill(self.conv2) + weight_init.c2_msra_fill(self.conv3) + + def forward(self, x): + x = self.conv1(x) + x = F.relu_(x) + x = self.conv2(x) + x = F.relu_(x) + x = self.conv3(x) + x = F.relu_(x) + x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) + return x + + +@BACKBONE_REGISTRY.register() +def build_resnet_deeplab_backbone(cfg, input_shape): + """ + Create a ResNet instance from config. + Returns: + ResNet: a :class:`ResNet` instance. + """ + # need registration of new blocks/stems? + norm = cfg.MODEL.RESNETS.NORM + if cfg.MODEL.RESNETS.STEM_TYPE == "basic": + stem = BasicStem( + in_channels=input_shape.channels, + out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, + norm=norm, + ) + elif cfg.MODEL.RESNETS.STEM_TYPE == "deeplab": + stem = DeepLabStem( + in_channels=input_shape.channels, + out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, + norm=norm, + ) + else: + raise ValueError("Unknown stem type: {}".format(cfg.MODEL.RESNETS.STEM_TYPE)) + + # fmt: off + freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT + out_features = cfg.MODEL.RESNETS.OUT_FEATURES + depth = cfg.MODEL.RESNETS.DEPTH + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group + in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + res4_dilation = cfg.MODEL.RESNETS.RES4_DILATION + res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION + deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE + deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED + deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS + res5_multi_grid = cfg.MODEL.RESNETS.RES5_MULTI_GRID + # fmt: on + assert res4_dilation in {1, 2}, "res4_dilation cannot be {}.".format(res4_dilation) + assert res5_dilation in {1, 2, 4}, "res5_dilation cannot be {}.".format(res5_dilation) + if res4_dilation == 2: + # Always dilate res5 if res4 is dilated. + assert res5_dilation == 4 + + num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] + + stages = [] + + # Avoid creating variables without gradients + # It consumes extra memory and may cause allreduce to fail + out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] + max_stage_idx = max(out_stage_idx) + for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): + if stage_idx == 4: + dilation = res4_dilation + elif stage_idx == 5: + dilation = res5_dilation + else: + dilation = 1 + first_stride = 1 if idx == 0 or dilation > 1 else 2 + stage_kargs = { + "num_blocks": num_blocks_per_stage[idx], + "stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1), + "in_channels": in_channels, + "out_channels": out_channels, + "norm": norm, + } + stage_kargs["bottleneck_channels"] = bottleneck_channels + stage_kargs["stride_in_1x1"] = stride_in_1x1 + stage_kargs["dilation"] = dilation + stage_kargs["num_groups"] = num_groups + if deform_on_per_stage[idx]: + stage_kargs["block_class"] = DeformBottleneckBlock + stage_kargs["deform_modulated"] = deform_modulated + stage_kargs["deform_num_groups"] = deform_num_groups + else: + stage_kargs["block_class"] = BottleneckBlock + if stage_idx == 5: + stage_kargs.pop("dilation") + stage_kargs["dilation_per_block"] = [dilation * mg for mg in res5_multi_grid] + blocks = ResNet.make_stage(**stage_kargs) + in_channels = out_channels + out_channels *= 2 + bottleneck_channels *= 2 + stages.append(blocks) + return ResNet(stem, stages, out_features=out_features).freeze(freeze_at) diff --git a/data_processing/detectron2/projects/DeepLab/deeplab/semantic_seg.py b/data_processing/detectron2/projects/DeepLab/deeplab/semantic_seg.py new file mode 100644 index 0000000..d4625c5 --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/deeplab/semantic_seg.py @@ -0,0 +1,348 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import Callable, Dict, List, Optional, Tuple, Union +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.layers import ASPP, Conv2d, DepthwiseSeparableConv2d, ShapeSpec, get_norm +from detectron2.modeling import SEM_SEG_HEADS_REGISTRY + +from .loss import DeepLabCE + + +@SEM_SEG_HEADS_REGISTRY.register() +class DeepLabV3PlusHead(nn.Module): + """ + A semantic segmentation head described in :paper:`DeepLabV3+`. + """ + + @configurable + def __init__( + self, + input_shape: Dict[str, ShapeSpec], + *, + project_channels: List[int], + aspp_dilations: List[int], + aspp_dropout: float, + decoder_channels: List[int], + common_stride: int, + norm: Union[str, Callable], + train_size: Optional[Tuple], + loss_weight: float = 1.0, + loss_type: str = "cross_entropy", + ignore_value: int = -1, + num_classes: Optional[int] = None, + use_depthwise_separable_conv: bool = False, + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape: shape of the input features. They will be ordered by stride + and the last one (with largest stride) is used as the input to the + decoder (i.e. the ASPP module); the rest are low-level feature for + the intermediate levels of decoder. + project_channels (list[int]): a list of low-level feature channels. + The length should be len(in_features) - 1. + aspp_dilations (list(int)): a list of 3 dilations in ASPP. + aspp_dropout (float): apply dropout on the output of ASPP. + decoder_channels (list[int]): a list of output channels of each + decoder stage. It should have the same length as "in_features" + (each element in "in_features" corresponds to one decoder stage). + common_stride (int): output stride of decoder. + norm (str or callable): normalization for all conv layers. + train_size (tuple): (height, width) of training images. + loss_weight (float): loss weight. + loss_type (str): type of loss function, 2 opptions: + (1) "cross_entropy" is the standard cross entropy loss. + (2) "hard_pixel_mining" is the loss in DeepLab that samples + top k% hardest pixels. + ignore_value (int): category to be ignored during training. + num_classes (int): number of classes, if set to None, the decoder + will not construct a predictor. + use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d + in ASPP and decoder. + """ + super().__init__() + input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) + + # fmt: off + self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5" + in_channels = [x[1].channels for x in input_shape] + in_strides = [x[1].stride for x in input_shape] + aspp_channels = decoder_channels[-1] + self.ignore_value = ignore_value + self.common_stride = common_stride # output stride + self.loss_weight = loss_weight + self.loss_type = loss_type + self.decoder_only = num_classes is None + self.use_depthwise_separable_conv = use_depthwise_separable_conv + # fmt: on + + assert ( + len(project_channels) == len(self.in_features) - 1 + ), "Expected {} project_channels, got {}".format( + len(self.in_features) - 1, len(project_channels) + ) + assert len(decoder_channels) == len( + self.in_features + ), "Expected {} decoder_channels, got {}".format( + len(self.in_features), len(decoder_channels) + ) + self.decoder = nn.ModuleDict() + + use_bias = norm == "" + for idx, in_channel in enumerate(in_channels): + decoder_stage = nn.ModuleDict() + + if idx == len(self.in_features) - 1: + # ASPP module + if train_size is not None: + train_h, train_w = train_size + encoder_stride = in_strides[-1] + if train_h % encoder_stride or train_w % encoder_stride: + raise ValueError("Crop size need to be divisible by encoder stride.") + pool_h = train_h // encoder_stride + pool_w = train_w // encoder_stride + pool_kernel_size = (pool_h, pool_w) + else: + pool_kernel_size = None + project_conv = ASPP( + in_channel, + aspp_channels, + aspp_dilations, + norm=norm, + activation=F.relu, + pool_kernel_size=pool_kernel_size, + dropout=aspp_dropout, + use_depthwise_separable_conv=use_depthwise_separable_conv, + ) + fuse_conv = None + else: + project_conv = Conv2d( + in_channel, + project_channels[idx], + kernel_size=1, + bias=use_bias, + norm=get_norm(norm, project_channels[idx]), + activation=F.relu, + ) + weight_init.c2_xavier_fill(project_conv) + if use_depthwise_separable_conv: + # We use a single 5x5 DepthwiseSeparableConv2d to replace + # 2 3x3 Conv2d since they have the same receptive field, + # proposed in :paper:`Panoptic-DeepLab`. + fuse_conv = DepthwiseSeparableConv2d( + project_channels[idx] + decoder_channels[idx + 1], + decoder_channels[idx], + kernel_size=5, + padding=2, + norm1=norm, + activation1=F.relu, + norm2=norm, + activation2=F.relu, + ) + else: + fuse_conv = nn.Sequential( + Conv2d( + project_channels[idx] + decoder_channels[idx + 1], + decoder_channels[idx], + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, decoder_channels[idx]), + activation=F.relu, + ), + Conv2d( + decoder_channels[idx], + decoder_channels[idx], + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, decoder_channels[idx]), + activation=F.relu, + ), + ) + weight_init.c2_xavier_fill(fuse_conv[0]) + weight_init.c2_xavier_fill(fuse_conv[1]) + + decoder_stage["project_conv"] = project_conv + decoder_stage["fuse_conv"] = fuse_conv + + self.decoder[self.in_features[idx]] = decoder_stage + + if not self.decoder_only: + self.predictor = Conv2d( + decoder_channels[0], num_classes, kernel_size=1, stride=1, padding=0 + ) + nn.init.normal_(self.predictor.weight, 0, 0.001) + nn.init.constant_(self.predictor.bias, 0) + + if self.loss_type == "cross_entropy": + self.loss = nn.CrossEntropyLoss(reduction="mean", ignore_index=self.ignore_value) + elif self.loss_type == "hard_pixel_mining": + self.loss = DeepLabCE(ignore_label=self.ignore_value, top_k_percent_pixels=0.2) + else: + raise ValueError("Unexpected loss type: %s" % self.loss_type) + + @classmethod + def from_config(cls, cfg, input_shape): + if cfg.INPUT.CROP.ENABLED: + assert cfg.INPUT.CROP.TYPE == "absolute" + train_size = cfg.INPUT.CROP.SIZE + else: + train_size = None + decoder_channels = [cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM] * ( + len(cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES) - 1 + ) + [cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS] + ret = dict( + input_shape={ + k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES + }, + project_channels=cfg.MODEL.SEM_SEG_HEAD.PROJECT_CHANNELS, + aspp_dilations=cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS, + aspp_dropout=cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT, + decoder_channels=decoder_channels, + common_stride=cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE, + norm=cfg.MODEL.SEM_SEG_HEAD.NORM, + train_size=train_size, + loss_weight=cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT, + loss_type=cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE, + ignore_value=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, + use_depthwise_separable_conv=cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV, + ) + return ret + + def forward(self, features, targets=None): + """ + Returns: + In training, returns (None, dict of losses) + In inference, returns (CxHxW logits, {}) + """ + y = self.layers(features) + if self.decoder_only: + # Output from self.layers() only contains decoder feature. + return y + if self.training: + return None, self.losses(y, targets) + else: + y = F.interpolate( + y, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + return y, {} + + def layers(self, features): + # Reverse feature maps into top-down order (from low to high resolution) + for f in self.in_features[::-1]: + x = features[f] + proj_x = self.decoder[f]["project_conv"](x) + if self.decoder[f]["fuse_conv"] is None: + # This is aspp module + y = proj_x + else: + # Upsample y + y = F.interpolate(y, size=proj_x.size()[2:], mode="bilinear", align_corners=False) + y = torch.cat([proj_x, y], dim=1) + y = self.decoder[f]["fuse_conv"](y) + if not self.decoder_only: + y = self.predictor(y) + return y + + def losses(self, predictions, targets): + predictions = F.interpolate( + predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + loss = self.loss(predictions, targets) + losses = {"loss_sem_seg": loss * self.loss_weight} + return losses + + +@SEM_SEG_HEADS_REGISTRY.register() +class DeepLabV3Head(nn.Module): + """ + A semantic segmentation head described in :paper:`DeepLabV3`. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__() + + # fmt: off + self.in_features = cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES + in_channels = [input_shape[f].channels for f in self.in_features] + aspp_channels = cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS + aspp_dilations = cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS + self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE + num_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES + conv_dims = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM + self.common_stride = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE # output stride + norm = cfg.MODEL.SEM_SEG_HEAD.NORM + self.loss_weight = cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT + self.loss_type = cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE + train_crop_size = cfg.INPUT.CROP.SIZE + aspp_dropout = cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT + use_depthwise_separable_conv = cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV + # fmt: on + + assert len(self.in_features) == 1 + assert len(in_channels) == 1 + + # ASPP module + if cfg.INPUT.CROP.ENABLED: + assert cfg.INPUT.CROP.TYPE == "absolute" + train_crop_h, train_crop_w = train_crop_size + if train_crop_h % self.common_stride or train_crop_w % self.common_stride: + raise ValueError("Crop size need to be divisible by output stride.") + pool_h = train_crop_h // self.common_stride + pool_w = train_crop_w // self.common_stride + pool_kernel_size = (pool_h, pool_w) + else: + pool_kernel_size = None + self.aspp = ASPP( + in_channels[0], + aspp_channels, + aspp_dilations, + norm=norm, + activation=F.relu, + pool_kernel_size=pool_kernel_size, + dropout=aspp_dropout, + use_depthwise_separable_conv=use_depthwise_separable_conv, + ) + + self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) + nn.init.normal_(self.predictor.weight, 0, 0.001) + nn.init.constant_(self.predictor.bias, 0) + + if self.loss_type == "cross_entropy": + self.loss = nn.CrossEntropyLoss(reduction="mean", ignore_index=self.ignore_value) + elif self.loss_type == "hard_pixel_mining": + self.loss = DeepLabCE(ignore_label=self.ignore_value, top_k_percent_pixels=0.2) + else: + raise ValueError("Unexpected loss type: %s" % self.loss_type) + + def forward(self, features, targets=None): + """ + Returns: + In training, returns (None, dict of losses) + In inference, returns (CxHxW logits, {}) + """ + x = features[self.in_features[0]] + x = self.aspp(x) + x = self.predictor(x) + if self.training: + return None, self.losses(x, targets) + else: + x = F.interpolate( + x, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + return x, {} + + def losses(self, predictions, targets): + predictions = F.interpolate( + predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + loss = self.loss(predictions, targets) + losses = {"loss_sem_seg": loss * self.loss_weight} + return losses diff --git a/data_processing/detectron2/projects/DeepLab/train_net.py b/data_processing/detectron2/projects/DeepLab/train_net.py new file mode 100644 index 0000000..d3414dd --- /dev/null +++ b/data_processing/detectron2/projects/DeepLab/train_net.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +DeepLab Training Script. + +This script is a simplified version of the training script in detectron2/tools. +""" + +import os + +import detectron2.data.transforms as T +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import DatasetMapper, MetadataCatalog, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import CityscapesSemSegEvaluator, DatasetEvaluators, SemSegEvaluator +from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler + + +def build_sem_seg_train_aug(cfg): + augs = [ + T.ResizeShortestEdge( + cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING + ) + ] + if cfg.INPUT.CROP.ENABLED: + augs.append( + T.RandomCrop_CategoryAreaConstraint( + cfg.INPUT.CROP.TYPE, + cfg.INPUT.CROP.SIZE, + cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA, + cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + ) + ) + augs.append(T.RandomFlip()) + return augs + + +class Trainer(DefaultTrainer): + """ + We use the "DefaultTrainer" which contains a number pre-defined logic for + standard training workflow. They may not work for you, especially if you + are working on a new research project. In that case you can use the cleaner + "SimpleTrainer", or write your own training loop. + """ + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type == "sem_seg": + return SemSegEvaluator( + dataset_name, + distributed=True, + output_dir=output_folder, + ) + if evaluator_type == "cityscapes_sem_seg": + return CityscapesSemSegEvaluator(dataset_name) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format( + dataset_name, evaluator_type + ) + ) + if len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + @classmethod + def build_train_loader(cls, cfg): + if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE: + mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg)) + else: + mapper = None + return build_detection_train_loader(cfg, mapper=mapper) + + @classmethod + def build_lr_scheduler(cls, cfg, optimizer): + """ + It now calls :func:`detectron2.solver.build_lr_scheduler`. + Overwrite it if you'd like a different scheduler. + """ + return build_lr_scheduler(cfg, optimizer) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_deeplab_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/data_processing/detectron2/projects/DensePose/DensePoseData/UV_Processed.mat b/data_processing/detectron2/projects/DensePose/DensePoseData/UV_Processed.mat new file mode 100644 index 0000000..ddaae2e Binary files /dev/null and b/data_processing/detectron2/projects/DensePose/DensePoseData/UV_Processed.mat differ diff --git a/data_processing/detectron2/projects/DensePose/DensePoseData/UV_symmetry_transforms.mat b/data_processing/detectron2/projects/DensePose/DensePoseData/UV_symmetry_transforms.mat new file mode 100644 index 0000000..d09d70f Binary files /dev/null and b/data_processing/detectron2/projects/DensePose/DensePoseData/UV_symmetry_transforms.mat differ diff --git a/data_processing/detectron2/projects/DensePose/DensePoseData/densepose_uv_data.tar.gz b/data_processing/detectron2/projects/DensePose/DensePoseData/densepose_uv_data.tar.gz new file mode 100644 index 0000000..a56333c Binary files /dev/null and b/data_processing/detectron2/projects/DensePose/DensePoseData/densepose_uv_data.tar.gz differ diff --git a/data_processing/detectron2/projects/DensePose/README.md b/data_processing/detectron2/projects/DensePose/README.md new file mode 100644 index 0000000..38f4f83 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/README.md @@ -0,0 +1,64 @@ +# DensePose in Detectron2 + +DensePose aims at learning and establishing dense correspondences between image pixels +and 3D object geometry for deformable objects, such as humans or animals. +In this repository, we provide the code to train and evaluate DensePose R-CNN and +various tools to visualize DensePose annotations and results. + +There are two main paradigms that are used within DensePose project. + +## [Chart-based Dense Pose Estimation for Humans and Animals](doc/DENSEPOSE_IUV.md) + +
+ +
+ +For chart-based estimation, 3D object mesh is split into charts and +for each pixel the model estimates chart index `I` and local chart coordinates `(U, V)`. +Please follow the link above to find a [detailed overview](doc/DENSEPOSE_IUV.md#Overview) +of the method, links to trained models along with their performance evaluation in the +[Model Zoo](doc/DENSEPOSE_IUV.md#ModelZoo) and +[references](doc/DENSEPOSE_IUV.md#References) to the corresponding papers. + +## [Continuous Surface Embeddings for Dense Pose Estimation for Humans and Animals](doc/DENSEPOSE_CSE.md) + +
+ +
+ +To establish continuous surface embeddings, the model simultaneously learns +descriptors for mesh vertices and for image pixels. +The embeddings are put into correspondence, thus the location +of each pixel on the 3D model is derived. +Please follow the link above to find a [detailed overview](doc/DENSEPOSE_CSE.md#Overview) +of the method, links to trained models along with their performance evaluation in the +[Model Zoo](doc/DENSEPOSE_CSE.md#ModelZoo) and +[references](doc/DENSEPOSE_CSE.md#References) to the corresponding papers. + +# Quick Start + +See [ Getting Started ](doc/GETTING_STARTED.md) + +# Model Zoo + +Please check the dedicated pages +for [chart-based model zoo](doc/DENSEPOSE_IUV.md#ModelZoo) +and for [continuous surface embeddings model zoo](doc/DENSEPOSE_CSE.md#ModelZoo). + +# What's New + +* June 2021: [DensePose CSE with Cycle Losses](doc/RELEASE_2021_06.md) +* March 2021: [DensePose CSE (a framework to extend DensePose to various categories using 3D models) + and DensePose Evolution (a framework to bootstrap DensePose on unlabeled data) released](doc/RELEASE_2021_03.md) +* April 2020: [DensePose Confidence Estimation and Model Zoo Improvements](doc/RELEASE_2020_04.md) + +# License + +Detectron2 is released under the [Apache 2.0 license](../../LICENSE) + +## Citing DensePose + +If you use DensePose, please refer to the BibTeX entries +for [chart-based models](doc/DENSEPOSE_IUV.md#References) +and for [continuous surface embeddings](doc/DENSEPOSE_CSE.md#References). + diff --git a/data_processing/detectron2/projects/DensePose/apply_net.py b/data_processing/detectron2/projects/DensePose/apply_net.py new file mode 100644 index 0000000..c854f4e --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/apply_net.py @@ -0,0 +1,385 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. + +import argparse +import glob +import logging +import os +import sys +from typing import Any, ClassVar, Dict, List +import torch + +from detectron2.config import CfgNode, get_cfg +from detectron2.data.detection_utils import read_image +from detectron2.engine.defaults import DefaultPredictor +from detectron2.structures.instances import Instances +from detectron2.utils.logger import setup_logger + +from densepose import add_densepose_config +from densepose.structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput +from densepose.utils.logger import verbosity_to_level +from densepose.vis.base import CompoundVisualizer +from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer +from densepose.vis.densepose_outputs_vertex import ( + DensePoseOutputsTextureVisualizer, + DensePoseOutputsVertexVisualizer, + get_texture_atlases, +) +from densepose.vis.densepose_results import ( + DensePoseResultsContourVisualizer, + DensePoseResultsFineSegmentationVisualizer, + DensePoseResultsUVisualizer, + DensePoseResultsVVisualizer, +) +from densepose.vis.densepose_results_textures import ( + DensePoseResultsVisualizerWithTexture, + get_texture_atlas, +) +from densepose.vis.extractor import ( + CompoundExtractor, + DensePoseOutputsExtractor, + DensePoseResultExtractor, + create_extractor, +) + +DOC = """Apply Net - a tool to print / visualize DensePose results +""" + +LOGGER_NAME = "apply_net" +logger = logging.getLogger(LOGGER_NAME) + +_ACTION_REGISTRY: Dict[str, "Action"] = {} + + +class Action(object): + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + parser.add_argument( + "-v", + "--verbosity", + action="count", + help="Verbose mode. Multiple -v options increase the verbosity.", + ) + + +def register_action(cls: type): + """ + Decorator for action classes to automate action registration + """ + global _ACTION_REGISTRY + _ACTION_REGISTRY[cls.COMMAND] = cls + return cls + + +class InferenceAction(Action): + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(InferenceAction, cls).add_arguments(parser) + parser.add_argument("cfg", metavar="", help="Config file") + parser.add_argument("model", metavar="", help="Model file") + parser.add_argument("input", metavar="", help="Input data") + parser.add_argument( + "--opts", + help="Modify config options using the command-line 'KEY VALUE' pairs", + default=[], + nargs=argparse.REMAINDER, + ) + + @classmethod + def execute(cls: type, args: argparse.Namespace): + logger.info(f"Loading config from {args.cfg}") + opts = [] + cfg = cls.setup_config(args.cfg, args.model, args, opts) + logger.info(f"Loading model from {args.model}") + predictor = DefaultPredictor(cfg) + logger.info(f"Loading data from {args.input}") + file_list = cls._get_input_file_list(args.input) + if len(file_list) == 0: + logger.warning(f"No input images for {args.input}") + return + context = cls.create_context(args, cfg) + + from tqdm import tqdm + for file_name in tqdm(file_list): + img = read_image(file_name, format="BGR") # predictor expects BGR image. + with torch.no_grad(): + outputs = predictor(img)["instances"] + cls.execute_on_outputs(context, {"file_name": file_name, "image": img}, outputs) + cls.postexecute(context) + + @classmethod + def setup_config( + cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str] + ): + cfg = get_cfg() + add_densepose_config(cfg) + cfg.merge_from_file(config_fpath) + cfg.merge_from_list(args.opts) + if opts: + cfg.merge_from_list(opts) + cfg.MODEL.WEIGHTS = model_fpath + cfg.freeze() + return cfg + + @classmethod + def _get_input_file_list(cls: type, input_spec: str): + #print('input_spec: ', input_spec) + + if os.path.isdir(input_spec): + file_list = [ + os.path.join(input_spec, fname) + for fname in os.listdir(input_spec) + if os.path.isfile(os.path.join(input_spec, fname)) + ] + elif os.path.isfile(input_spec): + file_list = [input_spec] + else: + file_list = glob.glob(input_spec) + + #print('file_list: ', file_list) + return file_list + + +@register_action +class DumpAction(InferenceAction): + """ + Dump action that outputs results to a pickle file + """ + + COMMAND: ClassVar[str] = "dump" + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Dump model outputs to a file.") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(DumpAction, cls).add_arguments(parser) + parser.add_argument( + "--output", + metavar="", + default="results.pkl", + help="File name to save dump to", + ) + + @classmethod + def execute_on_outputs( + cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances + ): + image_fpath = entry["file_name"] + logger.info(f"Processing {image_fpath}") + result = {"file_name": image_fpath} + if outputs.has("scores"): + result["scores"] = outputs.get("scores").cpu() + if outputs.has("pred_boxes"): + result["pred_boxes_XYXY"] = outputs.get("pred_boxes").tensor.cpu() + if outputs.has("pred_densepose"): + if isinstance(outputs.pred_densepose, DensePoseChartPredictorOutput): + extractor = DensePoseResultExtractor() + elif isinstance(outputs.pred_densepose, DensePoseEmbeddingPredictorOutput): + extractor = DensePoseOutputsExtractor() + result["pred_densepose"] = extractor(outputs)[0] + context["results"].append(result) + + @classmethod + def create_context(cls: type, args: argparse.Namespace, cfg: CfgNode): + context = {"results": [], "out_fname": args.output} + return context + + @classmethod + def postexecute(cls: type, context: Dict[str, Any]): + out_fname = context["out_fname"] + out_dir = os.path.dirname(out_fname) + if len(out_dir) > 0 and not os.path.exists(out_dir): + os.makedirs(out_dir) + with open(out_fname, "wb") as hFile: + torch.save(context["results"], hFile) + logger.info(f"Output saved to {out_fname}") + + +@register_action +class ShowAction(InferenceAction): + """ + Show action that visualizes selected entries on an image + """ + + COMMAND: ClassVar[str] = "show" + VISUALIZERS: ClassVar[Dict[str, object]] = { + "dp_contour": DensePoseResultsContourVisualizer, + "dp_segm": DensePoseResultsFineSegmentationVisualizer, + "dp_u": DensePoseResultsUVisualizer, + "dp_v": DensePoseResultsVVisualizer, + "dp_iuv_texture": DensePoseResultsVisualizerWithTexture, + "dp_cse_texture": DensePoseOutputsTextureVisualizer, + "dp_vertex": DensePoseOutputsVertexVisualizer, + "bbox": ScoredBoundingBoxVisualizer, + } + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(ShowAction, cls).add_arguments(parser) + parser.add_argument( + "visualizations", + metavar="", + help="Comma separated list of visualizations, possible values: " + "[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))), + ) + parser.add_argument( + "--min_score", + metavar="", + default=0.8, + type=float, + help="Minimum detection score to visualize", + ) + parser.add_argument( + "--nms_thresh", metavar="", default=None, type=float, help="NMS threshold" + ) + parser.add_argument( + "--texture_atlas", + metavar="", + default=None, + help="Texture atlas file (for IUV texture transfer)", + ) + parser.add_argument( + "--texture_atlases_map", + metavar="", + default=None, + help="JSON string of a dict containing texture atlas files for each mesh", + ) + parser.add_argument( + "--output", + metavar="", + default="outputres.png", + help="File name to save output to", + ) + + @classmethod + def setup_config( + cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str] + ): + opts.append("MODEL.ROI_HEADS.SCORE_THRESH_TEST") + opts.append(str(args.min_score)) + if args.nms_thresh is not None: + opts.append("MODEL.ROI_HEADS.NMS_THRESH_TEST") + opts.append(str(args.nms_thresh)) + cfg = super(ShowAction, cls).setup_config(config_fpath, model_fpath, args, opts) + return cfg + + @classmethod + def execute_on_outputs( + cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances + ): + import cv2 + import numpy as np + + visualizer = context["visualizer"] + extractor = context["extractor"] + image_fpath = entry["file_name"] + logger.info(f"Processing {image_fpath}") + image = cv2.cvtColor(entry["image"], cv2.COLOR_BGR2GRAY) + image = np.tile(image[:, :, np.newaxis], [1, 1, 3]) + scores = outputs.scores + + data = extractor(outputs) + # if "pexels-photo-3268732_1" not in image_fpath: + # return + # print('scores', scores) + image = np.ones_like(image)*255 + image_vis_list = visualizer.visualize(image, data) + # check if image_vis is all white + for i, image_vis in enumerate(image_vis_list): + + if image_vis is not None : + + all_white = np.all(image_vis[:,:,2] == 255) + #print(np.sum(image_vis[:,:,2] == 255) / (image_vis.shape[0] * image_vis.shape[1])) + most_white = np.sum(image_vis[:,:,2] == 255) / (image_vis.shape[0] * image_vis.shape[1]) > 0.9 + + if all_white or most_white: + #print('all white') + continue + + entry_idx = context["entry_idx"] + 1 + # out_fname = cls._get_out_fname(entry_idx, context["out_fname"]) + + out_fname = os.path.basename(image_fpath).split('.')[0] + f'_seg{i}.png' + + out_dir = context["out_fname"] # os.path.dirname(out_fname) + out_fname = os.path.join(out_dir, out_fname) + if len(out_dir) > 0 and not os.path.exists(out_dir): + os.makedirs(out_dir) + cv2.imwrite(out_fname, image_vis) + logger.info(f"Output saved to {out_fname}") + context["entry_idx"] += 1 + + + + @classmethod + def postexecute(cls: type, context: Dict[str, Any]): + pass + + @classmethod + def _get_out_fname(cls: type, entry_idx: int, fname_base: str): + #print('fname_base: ', fname_base) + base, ext = os.path.splitext(fname_base) + return base + ".{0:04d}".format(entry_idx) + ext + + @classmethod + def create_context(cls: type, args: argparse.Namespace, cfg: CfgNode) -> Dict[str, Any]: + vis_specs = args.visualizations.split(",") + visualizers = [] + extractors = [] + for vis_spec in vis_specs: + texture_atlas = get_texture_atlas(args.texture_atlas) + texture_atlases_dict = get_texture_atlases(args.texture_atlases_map) + vis = cls.VISUALIZERS[vis_spec]( + cfg=cfg, + texture_atlas=texture_atlas, + texture_atlases_dict=texture_atlases_dict, + ) + visualizers.append(vis) + extractor = create_extractor(vis) + extractors.append(extractor) + visualizer = CompoundVisualizer(visualizers) + extractor = CompoundExtractor(extractors) + context = { + "extractor": extractor, + "visualizer": visualizer, + "out_fname": args.output, + "entry_idx": 0, + } + return context + + +def create_argument_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description=DOC, + formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120), + ) + parser.set_defaults(func=lambda _: parser.print_help(sys.stdout)) + subparsers = parser.add_subparsers(title="Actions") + for _, action in _ACTION_REGISTRY.items(): + action.add_parser(subparsers) + return parser + + +def main(): + parser = create_argument_parser() + args = parser.parse_args() + verbosity = getattr(args, "verbosity", None) + global logger + logger = setup_logger(name=LOGGER_NAME) + logger.setLevel(verbosity_to_level(verbosity)) + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/data_processing/detectron2/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml b/data_processing/detectron2/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml new file mode 100644 index 0000000..1579187 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml @@ -0,0 +1,48 @@ +VERSION: 2 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map + ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) + RPN: + IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] + PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level + PRE_NMS_TOPK_TEST: 1000 # Per FPN level + # Detectron1 uses 2000 proposals per-batch, + # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) + # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. + POST_NMS_TOPK_TRAIN: 1000 + POST_NMS_TOPK_TEST: 1000 + + DENSEPOSE_ON: True + ROI_HEADS: + NAME: "DensePoseROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + NUM_CLASSES: 1 + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + POOLER_TYPE: "ROIAlign" + NUM_COARSE_SEGM_CHANNELS: 2 +DATASETS: + TRAIN: ("densepose_coco_2014_train", "densepose_coco_2014_valminusminival") + TEST: ("densepose_coco_2014_minival",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.01 + STEPS: (60000, 80000) + MAX_ITER: 90000 + WARMUP_FACTOR: 0.1 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) diff --git a/data_processing/detectron2/projects/DensePose/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w32_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w32_s1x.yaml new file mode 100644 index 0000000..36eabfe --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w32_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://1drv.ms/u/s!Aus8VCZ_C_33dYBMemi9xOUFR0w" + BACKBONE: + NAME: "build_hrfpn_backbone" + RPN: + IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5'] + ROI_HEADS: + IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5'] +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) + CLIP_GRADIENTS: + ENABLED: True + CLIP_TYPE: "norm" + BASE_LR: 0.03 diff --git a/data_processing/detectron2/projects/DensePose/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w40_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w40_s1x.yaml new file mode 100644 index 0000000..0ca8085 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w40_s1x.yaml @@ -0,0 +1,23 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://1drv.ms/u/s!Aus8VCZ_C_33ck0gvo5jfoWBOPo" + BACKBONE: + NAME: "build_hrfpn_backbone" + RPN: + IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5'] + ROI_HEADS: + IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5'] + HRNET: + STAGE2: + NUM_CHANNELS: [40, 80] + STAGE3: + NUM_CHANNELS: [40, 80, 160] + STAGE4: + NUM_CHANNELS: [40, 80, 160, 320] +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) + CLIP_GRADIENTS: + ENABLED: True + CLIP_TYPE: "norm" + BASE_LR: 0.03 diff --git a/data_processing/detectron2/projects/DensePose/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w48_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w48_s1x.yaml new file mode 100644 index 0000000..a3f437a --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w48_s1x.yaml @@ -0,0 +1,23 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://1drv.ms/u/s!Aus8VCZ_C_33dKvqI6pBZlifgJk" + BACKBONE: + NAME: "build_hrfpn_backbone" + RPN: + IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5'] + ROI_HEADS: + IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5'] + HRNET: + STAGE2: + NUM_CHANNELS: [48, 96] + STAGE3: + NUM_CHANNELS: [48, 96, 192] + STAGE4: + NUM_CHANNELS: [48, 96, 192, 384] +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) + CLIP_GRADIENTS: + ENABLED: True + CLIP_TYPE: "norm" + BASE_LR: 0.03 diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/Base-DensePose-RCNN-FPN-Human.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/Base-DensePose-RCNN-FPN-Human.yaml new file mode 100644 index 0000000..1d44a8a --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/Base-DensePose-RCNN-FPN-Human.yaml @@ -0,0 +1,21 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + ROI_DENSEPOSE_HEAD: + CSE: + EMBEDDERS: + "smpl_27554": + TYPE: vertex_feature + NUM_VERTICES: 27554 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + #INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_smpl_27554_256.pkl" + INIT_FILE: "./phi_smpl_27554_256.pkl" +DATASETS: + TRAIN: + - "densepose_coco_2014_train_cse" + - "densepose_coco_2014_valminusminival_cse" + TEST: + - "densepose_coco_2014_minival_cse" + CLASS_TO_MESH_NAME_MAPPING: + "0": "smpl_27554" diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/Base-DensePose-RCNN-FPN.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/Base-DensePose-RCNN-FPN.yaml new file mode 100644 index 0000000..de3b260 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/Base-DensePose-RCNN-FPN.yaml @@ -0,0 +1,60 @@ +VERSION: 2 +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map + ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) + RPN: + IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] + PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level + PRE_NMS_TOPK_TEST: 1000 # Per FPN level + # Detectron1 uses 2000 proposals per-batch, + # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) + # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. + POST_NMS_TOPK_TRAIN: 1000 + POST_NMS_TOPK_TEST: 1000 + + DENSEPOSE_ON: True + ROI_HEADS: + NAME: "DensePoseROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + NUM_CLASSES: 1 + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + POOLER_TYPE: "ROIAlign" + NUM_COARSE_SEGM_CHANNELS: 2 + PREDICTOR_NAME: "DensePoseEmbeddingPredictor" + LOSS_NAME: "DensePoseCseLoss" + CSE: + # embedding loss, possible values: + # - "EmbeddingLoss" + # - "SoftEmbeddingLoss" + EMBED_LOSS_NAME: "EmbeddingLoss" +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.01 + STEPS: (60000, 80000) + MAX_ITER: 90000 + WARMUP_FACTOR: 0.1 + CLIP_GRADIENTS: + CLIP_TYPE: norm + CLIP_VALUE: 1.0 + ENABLED: true + NORM_TYPE: 2.0 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DENSEPOSE_EVALUATION: + TYPE: cse + STORAGE: file diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_s1x.yaml new file mode 100644 index 0000000..69d8589 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_s1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + CSE: + EMBED_LOSS_NAME: "EmbeddingLoss" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml new file mode 100644 index 0000000..141657c --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_s1x.yaml new file mode 100644 index 0000000..d2eea1e --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_s1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + CSE: + EMBED_LOSS_NAME: "EmbeddingLoss" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_soft_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_soft_s1x.yaml new file mode 100644 index 0000000..1c362e1 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_soft_s1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_DL_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_DL_s1x.yaml new file mode 100644 index 0000000..26684de --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_DL_s1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + CSE: + EMBED_LOSS_NAME: "EmbeddingLoss" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_DL_soft_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_DL_soft_s1x.yaml new file mode 100644 index 0000000..b53501d --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_DL_soft_s1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_s1x.yaml new file mode 100644 index 0000000..c186625 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_s1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + CSE: + EMBED_LOSS_NAME: "EmbeddingLoss" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_16k.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_16k.yaml new file mode 100644 index 0000000..69ab226 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_16k.yaml @@ -0,0 +1,133 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl" + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 1 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + COARSE_SEGM_TRAINED_BY_MASKS: True + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" + EMBEDDING_DIST_GAUSS_SIGMA: 0.1 + GEODESIC_DIST_GAUSS_SIGMA: 0.1 + EMBEDDERS: + "cat_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl" + "dog_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl" + "sheep_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl" + "horse_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl" + "zebra_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl" + "giraffe_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl" + "elephant_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl" + "cow_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl" + "bear_4936": + TYPE: vertex_feature + NUM_VERTICES: 4936 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl" +DATASETS: + TRAIN: + - "densepose_lvis_v1_ds2_train_v1" + TEST: + - "densepose_lvis_v1_ds2_val_v1" + WHITELISTED_CATEGORIES: + "densepose_lvis_v1_ds2_train_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + "densepose_lvis_v1_ds2_val_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + CATEGORY_MAPS: + "densepose_lvis_v1_ds2_train_v1": + "1202": 943 # zebra -> sheep + "569": 943 # horse -> sheep + "496": 943 # giraffe -> sheep + "422": 943 # elephant -> sheep + "80": 943 # cow -> sheep + "76": 943 # bear -> sheep + "225": 943 # cat -> sheep + "378": 943 # dog -> sheep + "densepose_lvis_v1_ds2_val_v1": + "1202": 943 # zebra -> sheep + "569": 943 # horse -> sheep + "496": 943 # giraffe -> sheep + "422": 943 # elephant -> sheep + "80": 943 # cow -> sheep + "76": 943 # bear -> sheep + "225": 943 # cat -> sheep + "378": 943 # dog -> sheep + CLASS_TO_MESH_NAME_MAPPING: + # Note: different classes are mapped to a single class + # mesh is chosen based on GT data, so this is just some + # value which has no particular meaning + "0": "sheep_5004" +SOLVER: + MAX_ITER: 16000 + STEPS: (12000, 14000) +DENSEPOSE_EVALUATION: + EVALUATE_MESH_ALIGNMENT: True diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml new file mode 100644 index 0000000..921a9c1 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml @@ -0,0 +1,133 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl" + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 1 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + COARSE_SEGM_TRAINED_BY_MASKS: True + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" + EMBEDDING_DIST_GAUSS_SIGMA: 0.1 + GEODESIC_DIST_GAUSS_SIGMA: 0.1 + EMBEDDERS: + "cat_5001": + TYPE: vertex_feature + NUM_VERTICES: 5001 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_5001_256.pkl" + "dog_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_5002_256.pkl" + "sheep_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl" + "horse_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl" + "zebra_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl" + "giraffe_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl" + "elephant_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl" + "cow_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl" + "bear_4936": + TYPE: vertex_feature + NUM_VERTICES: 4936 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl" +DATASETS: + TRAIN: + - "densepose_lvis_v1_ds1_train_v1" + TEST: + - "densepose_lvis_v1_ds1_val_v1" + WHITELISTED_CATEGORIES: + "densepose_lvis_v1_ds1_train_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + "densepose_lvis_v1_ds1_val_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + CATEGORY_MAPS: + "densepose_lvis_v1_ds1_train_v1": + "1202": 943 # zebra -> sheep + "569": 943 # horse -> sheep + "496": 943 # giraffe -> sheep + "422": 943 # elephant -> sheep + "80": 943 # cow -> sheep + "76": 943 # bear -> sheep + "225": 943 # cat -> sheep + "378": 943 # dog -> sheep + "densepose_lvis_v1_ds1_val_v1": + "1202": 943 # zebra -> sheep + "569": 943 # horse -> sheep + "496": 943 # giraffe -> sheep + "422": 943 # elephant -> sheep + "80": 943 # cow -> sheep + "76": 943 # bear -> sheep + "225": 943 # cat -> sheep + "378": 943 # dog -> sheep + CLASS_TO_MESH_NAME_MAPPING: + # Note: different classes are mapped to a single class + # mesh is chosen based on GT data, so this is just some + # value which has no particular meaning + "0": "sheep_5004" +SOLVER: + MAX_ITER: 4000 + STEPS: (3000, 3500) +DENSEPOSE_EVALUATION: + EVALUATE_MESH_ALIGNMENT: True diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_16k.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_16k.yaml new file mode 100644 index 0000000..1b5a098 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_16k.yaml @@ -0,0 +1,119 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k/270668502/model_final_21b1d2.pkl" + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 9 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + COARSE_SEGM_TRAINED_BY_MASKS: True + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" + EMBEDDING_DIST_GAUSS_SIGMA: 0.1 + GEODESIC_DIST_GAUSS_SIGMA: 0.1 + EMBEDDERS: + "cat_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl" + "dog_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl" + "sheep_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl" + "horse_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl" + "zebra_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl" + "giraffe_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl" + "elephant_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl" + "cow_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl" + "bear_4936": + TYPE: vertex_feature + NUM_VERTICES: 4936 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl" +DATASETS: + TRAIN: + - "densepose_lvis_v1_ds2_train_v1" + TEST: + - "densepose_lvis_v1_ds2_val_v1" + WHITELISTED_CATEGORIES: + "densepose_lvis_v1_ds2_train_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + "densepose_lvis_v1_ds2_val_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + CLASS_TO_MESH_NAME_MAPPING: + "0": "bear_4936" + "1": "cow_5002" + "2": "cat_7466" + "3": "dog_7466" + "4": "elephant_5002" + "5": "giraffe_5002" + "6": "horse_5004" + "7": "sheep_5004" + "8": "zebra_5002" +SOLVER: + MAX_ITER: 16000 + STEPS: (12000, 14000) +DENSEPOSE_EVALUATION: + EVALUATE_MESH_ALIGNMENT: True diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_i2m_16k.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_i2m_16k.yaml new file mode 100644 index 0000000..18d6dac --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_i2m_16k.yaml @@ -0,0 +1,121 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k/270668502/model_final_21b1d2.pkl" + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 9 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + COARSE_SEGM_TRAINED_BY_MASKS: True + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" + EMBEDDING_DIST_GAUSS_SIGMA: 0.1 + GEODESIC_DIST_GAUSS_SIGMA: 0.1 + PIX_TO_SHAPE_CYCLE_LOSS: + ENABLED: True + EMBEDDERS: + "cat_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl" + "dog_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl" + "sheep_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl" + "horse_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl" + "zebra_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl" + "giraffe_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl" + "elephant_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl" + "cow_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl" + "bear_4936": + TYPE: vertex_feature + NUM_VERTICES: 4936 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl" +DATASETS: + TRAIN: + - "densepose_lvis_v1_ds2_train_v1" + TEST: + - "densepose_lvis_v1_ds2_val_v1" + WHITELISTED_CATEGORIES: + "densepose_lvis_v1_ds2_train_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + "densepose_lvis_v1_ds2_val_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + CLASS_TO_MESH_NAME_MAPPING: + "0": "bear_4936" + "1": "cow_5002" + "2": "cat_7466" + "3": "dog_7466" + "4": "elephant_5002" + "5": "giraffe_5002" + "6": "horse_5004" + "7": "sheep_5004" + "8": "zebra_5002" +SOLVER: + MAX_ITER: 16000 + STEPS: (12000, 14000) +DENSEPOSE_EVALUATION: + EVALUATE_MESH_ALIGNMENT: True diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_m2m_16k.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_m2m_16k.yaml new file mode 100644 index 0000000..6b798ae --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_m2m_16k.yaml @@ -0,0 +1,138 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k/267687159/model_final_354e61.pkl" + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 9 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + COARSE_SEGM_TRAINED_BY_MASKS: True + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" + EMBEDDING_DIST_GAUSS_SIGMA: 0.1 + GEODESIC_DIST_GAUSS_SIGMA: 0.1 + SHAPE_TO_SHAPE_CYCLE_LOSS: + ENABLED: True + EMBEDDERS: + "cat_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl" + "dog_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl" + "sheep_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl" + "horse_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl" + "zebra_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl" + "giraffe_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl" + "elephant_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl" + "cow_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl" + "bear_4936": + TYPE: vertex_feature + NUM_VERTICES: 4936 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl" + "smpl_27554": + TYPE: vertex_feature + NUM_VERTICES: 27554 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_smpl_27554_256.pkl" +DATASETS: + TRAIN: + - "densepose_lvis_v1_ds2_train_v1" + TEST: + - "densepose_lvis_v1_ds2_val_v1" + WHITELISTED_CATEGORIES: + "densepose_lvis_v1_ds2_train_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + "densepose_lvis_v1_ds2_val_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + CLASS_TO_MESH_NAME_MAPPING: + "0": "bear_4936" + "1": "cow_5002" + "2": "cat_7466" + "3": "dog_7466" + "4": "elephant_5002" + "5": "giraffe_5002" + "6": "horse_5004" + "7": "sheep_5004" + "8": "zebra_5002" +SOLVER: + MAX_ITER: 16000 + STEPS: (12000, 14000) +DENSEPOSE_EVALUATION: + EVALUATE_MESH_ALIGNMENT: True + MESH_ALIGNMENT_MESH_NAMES: + - bear_4936 + - cow_5002 + - cat_7466 + - dog_7466 + - elephant_5002 + - giraffe_5002 + - horse_5004 + - sheep_5004 + - zebra_5002 diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_16k.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_16k.yaml new file mode 100644 index 0000000..b1462e3 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_16k.yaml @@ -0,0 +1,119 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl" + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 9 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + COARSE_SEGM_TRAINED_BY_MASKS: True + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" + EMBEDDING_DIST_GAUSS_SIGMA: 0.1 + GEODESIC_DIST_GAUSS_SIGMA: 0.1 + EMBEDDERS: + "cat_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl" + "dog_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl" + "sheep_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl" + "horse_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl" + "zebra_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl" + "giraffe_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl" + "elephant_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl" + "cow_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl" + "bear_4936": + TYPE: vertex_feature + NUM_VERTICES: 4936 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl" +DATASETS: + TRAIN: + - "densepose_lvis_v1_ds2_train_v1" + TEST: + - "densepose_lvis_v1_ds2_val_v1" + WHITELISTED_CATEGORIES: + "densepose_lvis_v1_ds2_train_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + "densepose_lvis_v1_ds2_val_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + CLASS_TO_MESH_NAME_MAPPING: + "0": "bear_4936" + "1": "cow_5002" + "2": "cat_7466" + "3": "dog_7466" + "4": "elephant_5002" + "5": "giraffe_5002" + "6": "horse_5004" + "7": "sheep_5004" + "8": "zebra_5002" +SOLVER: + MAX_ITER: 16000 + STEPS: (12000, 14000) +DENSEPOSE_EVALUATION: + EVALUATE_MESH_ALIGNMENT: True diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_4k.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_4k.yaml new file mode 100644 index 0000000..ba4b81d --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_4k.yaml @@ -0,0 +1,119 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl" + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 9 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + COARSE_SEGM_TRAINED_BY_MASKS: True + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" + EMBEDDING_DIST_GAUSS_SIGMA: 0.1 + GEODESIC_DIST_GAUSS_SIGMA: 0.1 + EMBEDDERS: + "cat_5001": + TYPE: vertex_feature + NUM_VERTICES: 5001 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_5001_256.pkl" + "dog_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_5002_256.pkl" + "sheep_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl" + "horse_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl" + "zebra_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl" + "giraffe_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl" + "elephant_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl" + "cow_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl" + "bear_4936": + TYPE: vertex_feature + NUM_VERTICES: 4936 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl" +DATASETS: + TRAIN: + - "densepose_lvis_v1_ds1_train_v1" + TEST: + - "densepose_lvis_v1_ds1_val_v1" + WHITELISTED_CATEGORIES: + "densepose_lvis_v1_ds1_train_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + "densepose_lvis_v1_ds1_val_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + CLASS_TO_MESH_NAME_MAPPING: + "0": "bear_4936" + "1": "cow_5002" + "2": "cat_5001" + "3": "dog_5002" + "4": "elephant_5002" + "5": "giraffe_5002" + "6": "horse_5004" + "7": "sheep_5004" + "8": "zebra_5002" +SOLVER: + MAX_ITER: 4000 + STEPS: (3000, 3500) +DENSEPOSE_EVALUATION: + EVALUATE_MESH_ALIGNMENT: True diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k.yaml new file mode 100644 index 0000000..bb6136e --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k.yaml @@ -0,0 +1,118 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl" + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 9 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + COARSE_SEGM_TRAINED_BY_MASKS: True + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" + EMBED_LOSS_WEIGHT: 0.0 + EMBEDDING_DIST_GAUSS_SIGMA: 0.1 + GEODESIC_DIST_GAUSS_SIGMA: 0.1 + EMBEDDERS: + "cat_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl" + "dog_7466": + TYPE: vertex_feature + NUM_VERTICES: 7466 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl" + "sheep_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl" + "horse_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl" + "zebra_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl" + "giraffe_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl" + "elephant_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl" + "cow_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl" + "bear_4936": + TYPE: vertex_feature + NUM_VERTICES: 4936 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl" +DATASETS: + TRAIN: + - "densepose_lvis_v1_ds2_train_v1" + TEST: + - "densepose_lvis_v1_ds2_val_v1" + WHITELISTED_CATEGORIES: + "densepose_lvis_v1_ds2_train_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + "densepose_lvis_v1_ds2_val_v1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + CLASS_TO_MESH_NAME_MAPPING: + "0": "bear_4936" + "1": "cow_5002" + "2": "cat_7466" + "3": "dog_7466" + "4": "elephant_5002" + "5": "giraffe_5002" + "6": "horse_5004" + "7": "sheep_5004" + "8": "zebra_5002" +SOLVER: + MAX_ITER: 24000 + STEPS: (20000, 22000) diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_chimps_finetune_4k.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_chimps_finetune_4k.yaml new file mode 100644 index 0000000..3bccb78 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_chimps_finetune_4k.yaml @@ -0,0 +1,29 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" + EMBEDDING_DIST_GAUSS_SIGMA: 0.1 + GEODESIC_DIST_GAUSS_SIGMA: 0.1 + EMBEDDERS: + "chimp_5029": + TYPE: vertex_feature + NUM_VERTICES: 5029 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_chimp_5029_256.pkl" +DATASETS: + TRAIN: + - "densepose_chimps_cse_train" + TEST: + - "densepose_chimps_cse_val" + CLASS_TO_MESH_NAME_MAPPING: + "0": "chimp_5029" +SOLVER: + MAX_ITER: 4000 + STEPS: (3000, 3500) diff --git a/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_s1x.yaml new file mode 100644 index 0000000..9662fb8 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_s1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC1M_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC1M_s1x.yaml new file mode 100644 index 0000000..3c16763 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC1M_s1x.yaml @@ -0,0 +1,18 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC1_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC1_s1x.yaml new file mode 100644 index 0000000..15475b1 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC1_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC2M_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC2M_s1x.yaml new file mode 100644 index 0000000..0cbe07f --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC2M_s1x.yaml @@ -0,0 +1,18 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC2_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC2_s1x.yaml new file mode 100644 index 0000000..7546b96 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC2_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_s1x.yaml new file mode 100644 index 0000000..045f7f0 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_s1x.yaml @@ -0,0 +1,10 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC1M_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC1M_s1x.yaml new file mode 100644 index 0000000..9334e18 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC1M_s1x.yaml @@ -0,0 +1,18 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC1_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC1_s1x.yaml new file mode 100644 index 0000000..ace6209 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC1_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC2M_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC2M_s1x.yaml new file mode 100644 index 0000000..90f0be2 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC2M_s1x.yaml @@ -0,0 +1,18 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC2_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC2_s1x.yaml new file mode 100644 index 0000000..766c098 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC2_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x.yaml new file mode 100644 index 0000000..af44fb7 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x_legacy.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x_legacy.yaml new file mode 100644 index 0000000..8e79a1b --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x_legacy.yaml @@ -0,0 +1,17 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NUM_COARSE_SEGM_CHANNELS: 15 + POOLER_RESOLUTION: 14 + HEATMAP_SIZE: 56 + INDEX_WEIGHTS: 2.0 + PART_WEIGHTS: 0.3 + POINT_REGRESSION_WEIGHTS: 0.1 + DECODER_ON: False +SOLVER: + BASE_LR: 0.002 + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC1M_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC1M_s1x.yaml new file mode 100644 index 0000000..18a417a --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC1M_s1x.yaml @@ -0,0 +1,18 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC1_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC1_s1x.yaml new file mode 100644 index 0000000..f3720ef --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC1_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC2M_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC2M_s1x.yaml new file mode 100644 index 0000000..8a413d2 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC2M_s1x.yaml @@ -0,0 +1,18 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC2_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC2_s1x.yaml new file mode 100644 index 0000000..5a47cc0 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC2_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_s1x.yaml new file mode 100644 index 0000000..52a170b --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_s1x.yaml @@ -0,0 +1,10 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml new file mode 100644 index 0000000..8a81f2a --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml @@ -0,0 +1,20 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + CLIP_TYPE: norm + CLIP_VALUE: 100.0 + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1_s1x.yaml new file mode 100644 index 0000000..d36e542 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC2M_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC2M_s1x.yaml new file mode 100644 index 0000000..5cf29ea --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC2M_s1x.yaml @@ -0,0 +1,18 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC2_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC2_s1x.yaml new file mode 100644 index 0000000..e880d46 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC2_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x.yaml new file mode 100644 index 0000000..d2dd14c --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x_legacy.yaml b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x_legacy.yaml new file mode 100644 index 0000000..6c5391f --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x_legacy.yaml @@ -0,0 +1,17 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NUM_COARSE_SEGM_CHANNELS: 15 + POOLER_RESOLUTION: 14 + HEATMAP_SIZE: 56 + INDEX_WEIGHTS: 2.0 + PART_WEIGHTS: 0.3 + POINT_REGRESSION_WEIGHTS: 0.1 + DECODER_ON: False +SOLVER: + BASE_LR: 0.002 + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/data_processing/detectron2/projects/DensePose/configs/evolution/Base-RCNN-FPN-Atop10P_CA.yaml b/data_processing/detectron2/projects/DensePose/configs/evolution/Base-RCNN-FPN-Atop10P_CA.yaml new file mode 100644 index 0000000..f09d723 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/evolution/Base-RCNN-FPN-Atop10P_CA.yaml @@ -0,0 +1,91 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map + ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) + RPN: + IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] + PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level + PRE_NMS_TOPK_TEST: 1000 # Per FPN level + # Detectron1 uses 2000 proposals per-batch, + # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) + # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. + POST_NMS_TOPK_TRAIN: 1000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "StandardROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + NUM_CLASSES: 1 + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + ROI_MASK_HEAD: + NAME: "MaskRCNNConvUpsampleHead" + NUM_CONV: 4 + POOLER_RESOLUTION: 14 +DATASETS: + TRAIN: ("base_coco_2017_train", "densepose_coco_2014_train") + TEST: ("densepose_chimps",) + CATEGORY_MAPS: + "base_coco_2017_train": + "16": 1 # bird -> person + "17": 1 # cat -> person + "18": 1 # dog -> person + "19": 1 # horse -> person + "20": 1 # sheep -> person + "21": 1 # cow -> person + "22": 1 # elephant -> person + "23": 1 # bear -> person + "24": 1 # zebra -> person + "25": 1 # girafe -> person + "base_coco_2017_val": + "16": 1 # bird -> person + "17": 1 # cat -> person + "18": 1 # dog -> person + "19": 1 # horse -> person + "20": 1 # sheep -> person + "21": 1 # cow -> person + "22": 1 # elephant -> person + "23": 1 # bear -> person + "24": 1 # zebra -> person + "25": 1 # girafe -> person + WHITELISTED_CATEGORIES: + "base_coco_2017_train": + - 1 # person + - 16 # bird + - 17 # cat + - 18 # dog + - 19 # horse + - 20 # sheep + - 21 # cow + - 22 # elephant + - 23 # bear + - 24 # zebra + - 25 # girafe + "base_coco_2017_val": + - 1 # person + - 16 # bird + - 17 # cat + - 18 # dog + - 19 # horse + - 20 # sheep + - 21 # cow + - 22 # elephant + - 23 # bear + - 24 # zebra + - 25 # girafe +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA.yaml b/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA.yaml new file mode 100644 index 0000000..6296692 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA.yaml @@ -0,0 +1,28 @@ +_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + DENSEPOSE_ON: True + ROI_HEADS: + NAME: "DensePoseROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + NUM_CLASSES: 1 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 + POOLER_TYPE: "ROIAlign" + NUM_COARSE_SEGM_CHANNELS: 2 + COARSE_SEGM_TRAINED_BY_MASKS: True + INDEX_WEIGHTS: 1.0 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + WARMUP_FACTOR: 0.025 + MAX_ITER: 270000 + STEPS: (210000, 250000) diff --git a/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_coarsesegm.yaml b/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_coarsesegm.yaml new file mode 100644 index 0000000..033918e --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_coarsesegm.yaml @@ -0,0 +1,56 @@ +_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml" +MODEL: + WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl + RESNETS: + DEPTH: 50 + DENSEPOSE_ON: True + ROI_HEADS: + NAME: "DensePoseROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + NUM_CLASSES: 1 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 + POOLER_TYPE: "ROIAlign" + NUM_COARSE_SEGM_CHANNELS: 2 + COARSE_SEGM_TRAINED_BY_MASKS: True +BOOTSTRAP_DATASETS: + - DATASET: "chimpnsee" + RATIO: 1.0 + IMAGE_LOADER: + TYPE: "video_keyframe" + SELECT: + STRATEGY: "random_k" + NUM_IMAGES: 4 + TRANSFORM: + TYPE: "resize" + MIN_SIZE: 800 + MAX_SIZE: 1333 + BATCH_SIZE: 8 + NUM_WORKERS: 1 + INFERENCE: + INPUT_BATCH_SIZE: 1 + OUTPUT_BATCH_SIZE: 1 + DATA_SAMPLER: + # supported types: + # densepose_uniform + # densepose_UV_confidence + # densepose_fine_segm_confidence + # densepose_coarse_segm_confidence + TYPE: "densepose_coarse_segm_confidence" + COUNT_PER_CLASS: 8 + FILTER: + TYPE: "detection_score" + MIN_VALUE: 0.8 +BOOTSTRAP_MODEL: + WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 270000 + STEPS: (210000, 250000) diff --git a/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_finesegm.yaml b/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_finesegm.yaml new file mode 100644 index 0000000..5814a4a --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_finesegm.yaml @@ -0,0 +1,56 @@ +_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml" +MODEL: + WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl + RESNETS: + DEPTH: 50 + DENSEPOSE_ON: True + ROI_HEADS: + NAME: "DensePoseROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + NUM_CLASSES: 1 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 + POOLER_TYPE: "ROIAlign" + NUM_COARSE_SEGM_CHANNELS: 2 + COARSE_SEGM_TRAINED_BY_MASKS: True +BOOTSTRAP_DATASETS: + - DATASET: "chimpnsee" + RATIO: 1.0 + IMAGE_LOADER: + TYPE: "video_keyframe" + SELECT: + STRATEGY: "random_k" + NUM_IMAGES: 4 + TRANSFORM: + TYPE: "resize" + MIN_SIZE: 800 + MAX_SIZE: 1333 + BATCH_SIZE: 8 + NUM_WORKERS: 1 + INFERENCE: + INPUT_BATCH_SIZE: 1 + OUTPUT_BATCH_SIZE: 1 + DATA_SAMPLER: + # supported types: + # densepose_uniform + # densepose_UV_confidence + # densepose_fine_segm_confidence + # densepose_coarse_segm_confidence + TYPE: "densepose_fine_segm_confidence" + COUNT_PER_CLASS: 8 + FILTER: + TYPE: "detection_score" + MIN_VALUE: 0.8 +BOOTSTRAP_MODEL: + WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 270000 + STEPS: (210000, 250000) diff --git a/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uniform.yaml b/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uniform.yaml new file mode 100644 index 0000000..d591ea6 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uniform.yaml @@ -0,0 +1,56 @@ +_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml" +MODEL: + WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl + RESNETS: + DEPTH: 50 + DENSEPOSE_ON: True + ROI_HEADS: + NAME: "DensePoseROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + NUM_CLASSES: 1 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 + POOLER_TYPE: "ROIAlign" + NUM_COARSE_SEGM_CHANNELS: 2 + COARSE_SEGM_TRAINED_BY_MASKS: True +BOOTSTRAP_DATASETS: + - DATASET: "chimpnsee" + RATIO: 1.0 + IMAGE_LOADER: + TYPE: "video_keyframe" + SELECT: + STRATEGY: "random_k" + NUM_IMAGES: 4 + TRANSFORM: + TYPE: "resize" + MIN_SIZE: 800 + MAX_SIZE: 1333 + BATCH_SIZE: 8 + NUM_WORKERS: 1 + INFERENCE: + INPUT_BATCH_SIZE: 1 + OUTPUT_BATCH_SIZE: 1 + DATA_SAMPLER: + # supported types: + # densepose_uniform + # densepose_UV_confidence + # densepose_fine_segm_confidence + # densepose_coarse_segm_confidence + TYPE: "densepose_uniform" + COUNT_PER_CLASS: 8 + FILTER: + TYPE: "detection_score" + MIN_VALUE: 0.8 +BOOTSTRAP_MODEL: + WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 270000 + STEPS: (210000, 250000) diff --git a/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uv.yaml b/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uv.yaml new file mode 100644 index 0000000..110acff --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uv.yaml @@ -0,0 +1,56 @@ +_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml" +MODEL: + WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl + RESNETS: + DEPTH: 50 + DENSEPOSE_ON: True + ROI_HEADS: + NAME: "DensePoseROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + NUM_CLASSES: 1 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + SEGM_CONFIDENCE: + ENABLED: True + POINT_REGRESSION_WEIGHTS: 0.0005 + POOLER_TYPE: "ROIAlign" + NUM_COARSE_SEGM_CHANNELS: 2 + COARSE_SEGM_TRAINED_BY_MASKS: True +BOOTSTRAP_DATASETS: + - DATASET: "chimpnsee" + RATIO: 1.0 + IMAGE_LOADER: + TYPE: "video_keyframe" + SELECT: + STRATEGY: "random_k" + NUM_IMAGES: 4 + TRANSFORM: + TYPE: "resize" + MIN_SIZE: 800 + MAX_SIZE: 1333 + BATCH_SIZE: 8 + NUM_WORKERS: 1 + INFERENCE: + INPUT_BATCH_SIZE: 1 + OUTPUT_BATCH_SIZE: 1 + DATA_SAMPLER: + # supported types: + # densepose_uniform + # densepose_UV_confidence + # densepose_fine_segm_confidence + # densepose_coarse_segm_confidence + TYPE: "densepose_UV_confidence" + COUNT_PER_CLASS: 8 + FILTER: + TYPE: "detection_score" + MIN_VALUE: 0.8 +BOOTSTRAP_MODEL: + WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 270000 + STEPS: (210000, 250000) diff --git a/data_processing/detectron2/projects/DensePose/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_DL_instant_test.yaml b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_DL_instant_test.yaml new file mode 100644 index 0000000..3b43f75 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_DL_instant_test.yaml @@ -0,0 +1,11 @@ +_BASE_: "../../cse/Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" +DATASETS: + TRAIN: ("densepose_coco_2014_minival_100_cse",) + TEST: ("densepose_coco_2014_minival_100_cse",) +SOLVER: + MAX_ITER: 40 + STEPS: (30,) diff --git a/data_processing/detectron2/projects/DensePose/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_instant_test.yaml b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_instant_test.yaml new file mode 100644 index 0000000..a2c49a2 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_instant_test.yaml @@ -0,0 +1,126 @@ +_BASE_: "../../cse/Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 9 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + CSE: + EMBED_LOSS_NAME: "SoftEmbeddingLoss" + EMBEDDING_DIST_GAUSS_SIGMA: 0.1 + EMBEDDERS: + "cat_5001": + TYPE: vertex_feature + NUM_VERTICES: 5001 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_5001_256.pkl" + "dog_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_5002_256.pkl" + "sheep_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl" + "horse_5004": + TYPE: vertex_feature + NUM_VERTICES: 5004 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl" + "zebra_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl" + "giraffe_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl" + "elephant_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl" + "cow_5002": + TYPE: vertex_feature + NUM_VERTICES: 5002 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl" + "bear_4936": + TYPE: vertex_feature + NUM_VERTICES: 4936 + FEATURE_DIM: 256 + FEATURES_TRAINABLE: False + IS_TRAINABLE: True + INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl" +DATASETS: + TRAIN: + - "densepose_lvis_v1_train1" + - "densepose_lvis_v1_train2" + TEST: + - "densepose_lvis_v1_val_animals_100" + WHITELISTED_CATEGORIES: + "densepose_lvis_v1_train1": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + "densepose_lvis_v1_train2": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + "densepose_lvis_v1_val_animals_100": + - 943 # sheep + - 1202 # zebra + - 569 # horse + - 496 # giraffe + - 422 # elephant + - 80 # cow + - 76 # bear + - 225 # cat + - 378 # dog + CLASS_TO_MESH_NAME_MAPPING: + "0": "bear_4936" + "1": "cow_5002" + "2": "cat_5001" + "3": "dog_5002" + "4": "elephant_5002" + "5": "giraffe_5002" + "6": "horse_5004" + "7": "sheep_5004" + "8": "zebra_5002" +SOLVER: + MAX_ITER: 40 + STEPS: (30,) diff --git a/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_HRFPN_HRNet_w32_instant_test.yaml b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_HRFPN_HRNet_w32_instant_test.yaml new file mode 100644 index 0000000..95677ce --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_HRFPN_HRNet_w32_instant_test.yaml @@ -0,0 +1,8 @@ +_BASE_: "../HRNet/densepose_rcnn_HRFPN_HRNet_w32_s1x.yaml" +DATASETS: + TRAIN: ("densepose_coco_2014_minival_100",) + TEST: ("densepose_coco_2014_minival_100",) +SOLVER: + MAX_ITER: 40 + STEPS: (30,) + IMS_PER_BATCH: 2 diff --git a/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_DL_instant_test.yaml b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_DL_instant_test.yaml new file mode 100644 index 0000000..b90989e --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_DL_instant_test.yaml @@ -0,0 +1,11 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" +DATASETS: + TRAIN: ("densepose_coco_2014_minival_100",) + TEST: ("densepose_coco_2014_minival_100",) +SOLVER: + MAX_ITER: 40 + STEPS: (30,) diff --git a/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_TTA_inference_acc_test.yaml b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_TTA_inference_acc_test.yaml new file mode 100644 index 0000000..b124da1 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_TTA_inference_acc_test.yaml @@ -0,0 +1,13 @@ +_BASE_: "../densepose_rcnn_R_50_FPN_s1x.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl" +DATASETS: + TRAIN: () + TEST: ("densepose_coco_2014_minival_100",) +TEST: + AUG: + ENABLED: True + MIN_SIZES: (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) + MAX_SIZE: 4000 + FLIP: True + EXPECTED_RESULTS: [["bbox_TTA", "AP", 61.74, 0.03], ["densepose_gps_TTA", "AP", 60.22, 0.03], ["densepose_gpsm_TTA", "AP", 63.59, 0.03]] diff --git a/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC1_instant_test.yaml b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC1_instant_test.yaml new file mode 100644 index 0000000..f0fe611 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC1_instant_test.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + POINT_REGRESSION_WEIGHTS: 0.0005 +DATASETS: + TRAIN: ("densepose_coco_2014_minival_100",) + TEST: ("densepose_coco_2014_minival_100",) +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 40 + STEPS: (30,) + WARMUP_FACTOR: 0.025 diff --git a/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC2_instant_test.yaml b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC2_instant_test.yaml new file mode 100644 index 0000000..f0d9358 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC2_instant_test.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + POINT_REGRESSION_WEIGHTS: 0.0005 +DATASETS: + TRAIN: ("densepose_coco_2014_minival_100",) + TEST: ("densepose_coco_2014_minival_100",) +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 40 + STEPS: (30,) + WARMUP_FACTOR: 0.025 diff --git a/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_inference_acc_test.yaml b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000..d607c98 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,8 @@ +_BASE_: "../densepose_rcnn_R_50_FPN_s1x.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl" +DATASETS: + TRAIN: () + TEST: ("densepose_coco_2014_minival_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 59.27, 0.025], ["densepose_gps", "AP", 60.11, 0.02], ["densepose_gpsm", "AP", 64.09, 0.02]] diff --git a/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_instant_test.yaml b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000..057c876 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +DATASETS: + TRAIN: ("densepose_coco_2014_minival_100",) + TEST: ("densepose_coco_2014_minival_100",) +SOLVER: + MAX_ITER: 40 + STEPS: (30,) diff --git a/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_training_acc_test.yaml b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000..0053c9d --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,18 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_HEADS: + NUM_CLASSES: 1 +DATASETS: + TRAIN: ("densepose_coco_2014_minival",) + TEST: ("densepose_coco_2014_minival",) +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + CLIP_TYPE: norm + CLIP_VALUE: 1.0 + MAX_ITER: 6000 + STEPS: (5500, 5800) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 76.2477, 1.0], ["densepose_gps", "AP", 79.6090, 1.5], ["densepose_gpsm", "AP", 80.0061, 1.5]] + diff --git a/data_processing/detectron2/projects/DensePose/densepose/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/__init__.py new file mode 100644 index 0000000..b50a3da --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .data.datasets import builtin # just to register data +from .converters import builtin as builtin_converters # register converters +from .config import ( + add_densepose_config, + add_densepose_head_config, + add_hrnet_config, + add_dataset_category_config, + add_bootstrap_config, + load_bootstrap_config, +) +from .structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData +from .evaluation import DensePoseCOCOEvaluator +from .modeling.roi_heads import DensePoseROIHeads +from .modeling.test_time_augmentation import ( + DensePoseGeneralizedRCNNWithTTA, + DensePoseDatasetMapperTTA, +) +from .utils.transform import load_from_cfg +from .modeling.hrfpn import build_hrfpn_backbone diff --git a/data_processing/detectron2/projects/DensePose/densepose/config.py b/data_processing/detectron2/projects/DensePose/densepose/config.py new file mode 100644 index 0000000..2a06a09 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/config.py @@ -0,0 +1,277 @@ +# -*- coding = utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. +# pyre-ignore-all-errors + +from detectron2.config import CfgNode as CN + + +def add_dataset_category_config(cfg: CN) -> None: + """ + Add config for additional category-related dataset options + - category whitelisting + - category mapping + """ + _C = cfg + _C.DATASETS.CATEGORY_MAPS = CN(new_allowed=True) + _C.DATASETS.WHITELISTED_CATEGORIES = CN(new_allowed=True) + # class to mesh mapping + _C.DATASETS.CLASS_TO_MESH_NAME_MAPPING = CN(new_allowed=True) + + +def add_evaluation_config(cfg: CN) -> None: + _C = cfg + _C.DENSEPOSE_EVALUATION = CN() + # evaluator type, possible values: + # - "iou": evaluator for models that produce iou data + # - "cse": evaluator for models that produce cse data + _C.DENSEPOSE_EVALUATION.TYPE = "iou" + # storage for DensePose results, possible values: + # - "none": no explicit storage, all the results are stored in the + # dictionary with predictions, memory intensive; + # historically the default storage type + # - "ram": RAM storage, uses per-process RAM storage, which is + # reduced to a single process storage on later stages, + # less memory intensive + # - "file": file storage, uses per-process file-based storage, + # the least memory intensive, but may create bottlenecks + # on file system accesses + _C.DENSEPOSE_EVALUATION.STORAGE = "none" + # minimum threshold for IOU values: the lower its values is, + # the more matches are produced (and the higher the AP score) + _C.DENSEPOSE_EVALUATION.MIN_IOU_THRESHOLD = 0.5 + # Non-distributed inference is slower (at inference time) but can avoid RAM OOM + _C.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE = True + # evaluate mesh alignment based on vertex embeddings, only makes sense in CSE context + _C.DENSEPOSE_EVALUATION.EVALUATE_MESH_ALIGNMENT = False + # meshes to compute mesh alignment for + _C.DENSEPOSE_EVALUATION.MESH_ALIGNMENT_MESH_NAMES = [] + + +def add_bootstrap_config(cfg: CN) -> None: + """ """ + _C = cfg + _C.BOOTSTRAP_DATASETS = [] + _C.BOOTSTRAP_MODEL = CN() + _C.BOOTSTRAP_MODEL.WEIGHTS = "" + _C.BOOTSTRAP_MODEL.DEVICE = "cuda" + + +def get_bootstrap_dataset_config() -> CN: + _C = CN() + _C.DATASET = "" + # ratio used to mix data loaders + _C.RATIO = 0.1 + # image loader + _C.IMAGE_LOADER = CN(new_allowed=True) + _C.IMAGE_LOADER.TYPE = "" + _C.IMAGE_LOADER.BATCH_SIZE = 4 + _C.IMAGE_LOADER.NUM_WORKERS = 4 + _C.IMAGE_LOADER.CATEGORIES = [] + _C.IMAGE_LOADER.MAX_COUNT_PER_CATEGORY = 1_000_000 + _C.IMAGE_LOADER.CATEGORY_TO_CLASS_MAPPING = CN(new_allowed=True) + # inference + _C.INFERENCE = CN() + # batch size for model inputs + _C.INFERENCE.INPUT_BATCH_SIZE = 4 + # batch size to group model outputs + _C.INFERENCE.OUTPUT_BATCH_SIZE = 2 + # sampled data + _C.DATA_SAMPLER = CN(new_allowed=True) + _C.DATA_SAMPLER.TYPE = "" + _C.DATA_SAMPLER.USE_GROUND_TRUTH_CATEGORIES = False + # filter + _C.FILTER = CN(new_allowed=True) + _C.FILTER.TYPE = "" + return _C + + +def load_bootstrap_config(cfg: CN) -> None: + """ + Bootstrap datasets are given as a list of `dict` that are not automatically + converted into CfgNode. This method processes all bootstrap dataset entries + and ensures that they are in CfgNode format and comply with the specification + """ + if not cfg.BOOTSTRAP_DATASETS: + return + + bootstrap_datasets_cfgnodes = [] + for dataset_cfg in cfg.BOOTSTRAP_DATASETS: + _C = get_bootstrap_dataset_config().clone() + _C.merge_from_other_cfg(CN(dataset_cfg)) + bootstrap_datasets_cfgnodes.append(_C) + cfg.BOOTSTRAP_DATASETS = bootstrap_datasets_cfgnodes + + +def add_densepose_head_cse_config(cfg: CN) -> None: + """ + Add configuration options for Continuous Surface Embeddings (CSE) + """ + _C = cfg + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE = CN() + # Dimensionality D of the embedding space + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE = 16 + # Embedder specifications for various mesh IDs + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS = CN(new_allowed=True) + # normalization coefficient for embedding distances + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA = 0.01 + # normalization coefficient for geodesic distances + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.GEODESIC_DIST_GAUSS_SIGMA = 0.01 + # embedding loss weight + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_WEIGHT = 0.6 + # embedding loss name, currently the following options are supported: + # - EmbeddingLoss: cross-entropy on vertex labels + # - SoftEmbeddingLoss: cross-entropy on vertex label combined with + # Gaussian penalty on distance between vertices + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_NAME = "EmbeddingLoss" + # optimizer hyperparameters + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.FEATURES_LR_FACTOR = 1.0 + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_LR_FACTOR = 1.0 + # Shape to shape cycle consistency loss parameters: + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS = CN({"ENABLED": False}) + # shape to shape cycle consistency loss weight + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.WEIGHT = 0.025 + # norm type used for loss computation + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.NORM_P = 2 + # normalization term for embedding similarity matrices + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.TEMPERATURE = 0.05 + # maximum number of vertices to include into shape to shape cycle loss + # if negative or zero, all vertices are considered + # if positive, random subset of vertices of given size is considered + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.MAX_NUM_VERTICES = 4936 + # Pixel to shape cycle consistency loss parameters: + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS = CN({"ENABLED": False}) + # pixel to shape cycle consistency loss weight + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.WEIGHT = 0.0001 + # norm type used for loss computation + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NORM_P = 2 + # map images to all meshes and back (if false, use only gt meshes from the batch) + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.USE_ALL_MESHES_NOT_GT_ONLY = False + # Randomly select at most this number of pixels from every instance + # if negative or zero, all vertices are considered + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NUM_PIXELS_TO_SAMPLE = 100 + # normalization factor for pixel to pixel distances (higher value = smoother distribution) + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.PIXEL_SIGMA = 5.0 + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_PIXEL_TO_VERTEX = 0.05 + _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_VERTEX_TO_PIXEL = 0.05 + + +def add_densepose_head_config(cfg: CN) -> None: + """ + Add config for densepose head. + """ + _C = cfg + + _C.MODEL.DENSEPOSE_ON = True + + _C.MODEL.ROI_DENSEPOSE_HEAD = CN() + _C.MODEL.ROI_DENSEPOSE_HEAD.NAME = "" + _C.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS = 8 + # Number of parts used for point labels + _C.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES = 24 + _C.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL = 4 + _C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM = 512 + _C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL = 3 + _C.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE = 2 + _C.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE = 112 + _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE = "ROIAlignV2" + _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION = 28 + _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO = 2 + _C.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS = 2 # 15 or 2 + # Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD) + _C.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD = 0.7 + # Loss weights for annotation masks.(14 Parts) + _C.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS = 5.0 + # Loss weights for surface parts. (24 Parts) + _C.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS = 1.0 + # Loss weights for UV regression. + _C.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS = 0.01 + # Coarse segmentation is trained using instance segmentation task data + _C.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS = False + # For Decoder + _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON = True + _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES = 256 + _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS = 256 + _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM = "" + _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE = 4 + # For DeepLab head + _C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB = CN() + _C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM = "GN" + _C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON = 0 + # Predictor class name, must be registered in DENSEPOSE_PREDICTOR_REGISTRY + # Some registered predictors: + # "DensePoseChartPredictor": predicts segmentation and UV coordinates for predefined charts + # "DensePoseChartWithConfidencePredictor": predicts segmentation, UV coordinates + # and associated confidences for predefined charts (default) + # "DensePoseEmbeddingWithConfidencePredictor": predicts segmentation, embeddings + # and associated confidences for CSE + _C.MODEL.ROI_DENSEPOSE_HEAD.PREDICTOR_NAME = "DensePoseChartWithConfidencePredictor" + # Loss class name, must be registered in DENSEPOSE_LOSS_REGISTRY + # Some registered losses: + # "DensePoseChartLoss": loss for chart-based models that estimate + # segmentation and UV coordinates + # "DensePoseChartWithConfidenceLoss": loss for chart-based models that estimate + # segmentation, UV coordinates and the corresponding confidences (default) + _C.MODEL.ROI_DENSEPOSE_HEAD.LOSS_NAME = "DensePoseChartWithConfidenceLoss" + # Confidences + # Enable learning UV confidences (variances) along with the actual values + _C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE = CN({"ENABLED": False}) + # UV confidence lower bound + _C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON = 0.01 + # Enable learning segmentation confidences (variances) along with the actual values + _C.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE = CN({"ENABLED": False}) + # Segmentation confidence lower bound + _C.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.EPSILON = 0.01 + # Statistical model type for confidence learning, possible values: + # - "iid_iso": statistically independent identically distributed residuals + # with isotropic covariance + # - "indep_aniso": statistically independent residuals with anisotropic + # covariances + _C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE = "iid_iso" + # List of angles for rotation in data augmentation during training + _C.INPUT.ROTATION_ANGLES = [0] + _C.TEST.AUG.ROTATION_ANGLES = () # Rotation TTA + + add_densepose_head_cse_config(cfg) + + +def add_hrnet_config(cfg: CN) -> None: + """ + Add config for HRNet backbone. + """ + _C = cfg + + # For HigherHRNet w32 + _C.MODEL.HRNET = CN() + _C.MODEL.HRNET.STEM_INPLANES = 64 + _C.MODEL.HRNET.STAGE2 = CN() + _C.MODEL.HRNET.STAGE2.NUM_MODULES = 1 + _C.MODEL.HRNET.STAGE2.NUM_BRANCHES = 2 + _C.MODEL.HRNET.STAGE2.BLOCK = "BASIC" + _C.MODEL.HRNET.STAGE2.NUM_BLOCKS = [4, 4] + _C.MODEL.HRNET.STAGE2.NUM_CHANNELS = [32, 64] + _C.MODEL.HRNET.STAGE2.FUSE_METHOD = "SUM" + _C.MODEL.HRNET.STAGE3 = CN() + _C.MODEL.HRNET.STAGE3.NUM_MODULES = 4 + _C.MODEL.HRNET.STAGE3.NUM_BRANCHES = 3 + _C.MODEL.HRNET.STAGE3.BLOCK = "BASIC" + _C.MODEL.HRNET.STAGE3.NUM_BLOCKS = [4, 4, 4] + _C.MODEL.HRNET.STAGE3.NUM_CHANNELS = [32, 64, 128] + _C.MODEL.HRNET.STAGE3.FUSE_METHOD = "SUM" + _C.MODEL.HRNET.STAGE4 = CN() + _C.MODEL.HRNET.STAGE4.NUM_MODULES = 3 + _C.MODEL.HRNET.STAGE4.NUM_BRANCHES = 4 + _C.MODEL.HRNET.STAGE4.BLOCK = "BASIC" + _C.MODEL.HRNET.STAGE4.NUM_BLOCKS = [4, 4, 4, 4] + _C.MODEL.HRNET.STAGE4.NUM_CHANNELS = [32, 64, 128, 256] + _C.MODEL.HRNET.STAGE4.FUSE_METHOD = "SUM" + + _C.MODEL.HRNET.HRFPN = CN() + _C.MODEL.HRNET.HRFPN.OUT_CHANNELS = 256 + + +def add_densepose_config(cfg: CN) -> None: + add_densepose_head_config(cfg) + add_hrnet_config(cfg) + add_bootstrap_config(cfg) + add_dataset_category_config(cfg) + add_evaluation_config(cfg) diff --git a/data_processing/detectron2/projects/DensePose/densepose/converters/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/converters/__init__.py new file mode 100644 index 0000000..930339e --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/converters/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .hflip import HFlipConverter +from .to_mask import ToMaskConverter +from .to_chart_result import ToChartResultConverter, ToChartResultConverterWithConfidences +from .segm_to_mask import ( + predictor_output_with_fine_and_coarse_segm_to_mask, + predictor_output_with_coarse_segm_to_mask, + resample_fine_and_coarse_segm_to_bbox, +) +from .chart_output_to_chart_result import ( + densepose_chart_predictor_output_to_result, + densepose_chart_predictor_output_to_result_with_confidences, +) +from .chart_output_hflip import densepose_chart_predictor_output_hflip diff --git a/data_processing/detectron2/projects/DensePose/densepose/converters/base.py b/data_processing/detectron2/projects/DensePose/densepose/converters/base.py new file mode 100644 index 0000000..c9dbe56 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/converters/base.py @@ -0,0 +1,93 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any, Tuple, Type +import torch + + +class BaseConverter: + """ + Converter base class to be reused by various converters. + Converter allows one to convert data from various source types to a particular + destination type. Each source type needs to register its converter. The + registration for each source type is valid for all descendants of that type. + """ + + @classmethod + def register(cls, from_type: Type, converter: Any = None): + """ + Registers a converter for the specified type. + Can be used as a decorator (if converter is None), or called as a method. + + Args: + from_type (type): type to register the converter for; + all instances of this type will use the same converter + converter (callable): converter to be registered for the given + type; if None, this method is assumed to be a decorator for the converter + """ + + if converter is not None: + cls._do_register(from_type, converter) + + def wrapper(converter: Any) -> Any: + cls._do_register(from_type, converter) + return converter + + return wrapper + + @classmethod + def _do_register(cls, from_type: Type, converter: Any): + cls.registry[from_type] = converter # pyre-ignore[16] + + @classmethod + def _lookup_converter(cls, from_type: Type) -> Any: + """ + Perform recursive lookup for the given type + to find registered converter. If a converter was found for some base + class, it gets registered for this class to save on further lookups. + + Args: + from_type: type for which to find a converter + Return: + callable or None - registered converter or None + if no suitable entry was found in the registry + """ + if from_type in cls.registry: # pyre-ignore[16] + return cls.registry[from_type] + for base in from_type.__bases__: + converter = cls._lookup_converter(base) + if converter is not None: + cls._do_register(from_type, converter) + return converter + return None + + @classmethod + def convert(cls, instance: Any, *args, **kwargs): + """ + Convert an instance to the destination type using some registered + converter. Does recursive lookup for base classes, so there's no need + for explicit registration for derived classes. + + Args: + instance: source instance to convert to the destination type + Return: + An instance of the destination type obtained from the source instance + Raises KeyError, if no suitable converter found + """ + instance_type = type(instance) + converter = cls._lookup_converter(instance_type) + if converter is None: + if cls.dst_type is None: # pyre-ignore[16] + output_type_str = "itself" + else: + output_type_str = cls.dst_type + raise KeyError(f"Could not find converter from {instance_type} to {output_type_str}") + return converter(instance, *args, **kwargs) + + +IntTupleBox = Tuple[int, int, int, int] + + +def make_int_box(box: torch.Tensor) -> IntTupleBox: + int_box = [0, 0, 0, 0] + int_box[0], int_box[1], int_box[2], int_box[3] = tuple(box.long().tolist()) + return int_box[0], int_box[1], int_box[2], int_box[3] diff --git a/data_processing/detectron2/projects/DensePose/densepose/converters/builtin.py b/data_processing/detectron2/projects/DensePose/densepose/converters/builtin.py new file mode 100644 index 0000000..3bd48f8 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/converters/builtin.py @@ -0,0 +1,31 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from ..structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput +from . import ( + HFlipConverter, + ToChartResultConverter, + ToChartResultConverterWithConfidences, + ToMaskConverter, + densepose_chart_predictor_output_hflip, + densepose_chart_predictor_output_to_result, + densepose_chart_predictor_output_to_result_with_confidences, + predictor_output_with_coarse_segm_to_mask, + predictor_output_with_fine_and_coarse_segm_to_mask, +) + +ToMaskConverter.register( + DensePoseChartPredictorOutput, predictor_output_with_fine_and_coarse_segm_to_mask +) +ToMaskConverter.register( + DensePoseEmbeddingPredictorOutput, predictor_output_with_coarse_segm_to_mask +) + +ToChartResultConverter.register( + DensePoseChartPredictorOutput, densepose_chart_predictor_output_to_result +) + +ToChartResultConverterWithConfidences.register( + DensePoseChartPredictorOutput, densepose_chart_predictor_output_to_result_with_confidences +) + +HFlipConverter.register(DensePoseChartPredictorOutput, densepose_chart_predictor_output_hflip) diff --git a/data_processing/detectron2/projects/DensePose/densepose/converters/chart_output_hflip.py b/data_processing/detectron2/projects/DensePose/densepose/converters/chart_output_hflip.py new file mode 100644 index 0000000..17d2948 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/converters/chart_output_hflip.py @@ -0,0 +1,71 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from dataclasses import fields +import torch + +from densepose.structures import DensePoseChartPredictorOutput, DensePoseTransformData + + +def densepose_chart_predictor_output_hflip( + densepose_predictor_output: DensePoseChartPredictorOutput, + transform_data: DensePoseTransformData, +) -> DensePoseChartPredictorOutput: + """ + Change to take into account a Horizontal flip. + """ + if len(densepose_predictor_output) > 0: + + PredictorOutput = type(densepose_predictor_output) + output_dict = {} + + for field in fields(densepose_predictor_output): + field_value = getattr(densepose_predictor_output, field.name) + # flip tensors + if isinstance(field_value, torch.Tensor): + setattr(densepose_predictor_output, field.name, torch.flip(field_value, [3])) + + densepose_predictor_output = _flip_iuv_semantics_tensor( + densepose_predictor_output, transform_data + ) + densepose_predictor_output = _flip_segm_semantics_tensor( + densepose_predictor_output, transform_data + ) + + for field in fields(densepose_predictor_output): + output_dict[field.name] = getattr(densepose_predictor_output, field.name) + + return PredictorOutput(**output_dict) + else: + return densepose_predictor_output + + +def _flip_iuv_semantics_tensor( + densepose_predictor_output: DensePoseChartPredictorOutput, + dp_transform_data: DensePoseTransformData, +) -> DensePoseChartPredictorOutput: + point_label_symmetries = dp_transform_data.point_label_symmetries + uv_symmetries = dp_transform_data.uv_symmetries + + N, C, H, W = densepose_predictor_output.u.shape + u_loc = (densepose_predictor_output.u[:, 1:, :, :].clamp(0, 1) * 255).long() + v_loc = (densepose_predictor_output.v[:, 1:, :, :].clamp(0, 1) * 255).long() + Iindex = torch.arange(C - 1, device=densepose_predictor_output.u.device)[ + None, :, None, None + ].expand(N, C - 1, H, W) + densepose_predictor_output.u[:, 1:, :, :] = uv_symmetries["U_transforms"][Iindex, v_loc, u_loc] + densepose_predictor_output.v[:, 1:, :, :] = uv_symmetries["V_transforms"][Iindex, v_loc, u_loc] + + for el in ["fine_segm", "u", "v"]: + densepose_predictor_output.__dict__[el] = densepose_predictor_output.__dict__[el][ + :, point_label_symmetries, :, : + ] + return densepose_predictor_output + + +def _flip_segm_semantics_tensor( + densepose_predictor_output: DensePoseChartPredictorOutput, dp_transform_data +): + if densepose_predictor_output.coarse_segm.shape[1] > 2: + densepose_predictor_output.coarse_segm = densepose_predictor_output.coarse_segm[ + :, dp_transform_data.mask_label_symmetries, :, : + ] + return densepose_predictor_output diff --git a/data_processing/detectron2/projects/DensePose/densepose/converters/chart_output_to_chart_result.py b/data_processing/detectron2/projects/DensePose/densepose/converters/chart_output_to_chart_result.py new file mode 100644 index 0000000..4248f6c --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/converters/chart_output_to_chart_result.py @@ -0,0 +1,188 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Dict +import torch +from torch.nn import functional as F + +from detectron2.structures.boxes import Boxes, BoxMode + +from ..structures import ( + DensePoseChartPredictorOutput, + DensePoseChartResult, + DensePoseChartResultWithConfidences, +) +from . import resample_fine_and_coarse_segm_to_bbox +from .base import IntTupleBox, make_int_box + + +def resample_uv_tensors_to_bbox( + u: torch.Tensor, + v: torch.Tensor, + labels: torch.Tensor, + box_xywh_abs: IntTupleBox, +) -> torch.Tensor: + """ + Resamples U and V coordinate estimates for the given bounding box + + Args: + u (tensor [1, C, H, W] of float): U coordinates + v (tensor [1, C, H, W] of float): V coordinates + labels (tensor [H, W] of long): labels obtained by resampling segmentation + outputs for the given bounding box + box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs + Return: + Resampled U and V coordinates - a tensor [2, H, W] of float + """ + x, y, w, h = box_xywh_abs + w = max(int(w), 1) + h = max(int(h), 1) + u_bbox = F.interpolate(u, (h, w), mode="bilinear", align_corners=False) + v_bbox = F.interpolate(v, (h, w), mode="bilinear", align_corners=False) + uv = torch.zeros([2, h, w], dtype=torch.float32, device=u.device) + for part_id in range(1, u_bbox.size(1)): + uv[0][labels == part_id] = u_bbox[0, part_id][labels == part_id] + uv[1][labels == part_id] = v_bbox[0, part_id][labels == part_id] + return uv + + +def resample_uv_to_bbox( + predictor_output: DensePoseChartPredictorOutput, + labels: torch.Tensor, + box_xywh_abs: IntTupleBox, +) -> torch.Tensor: + """ + Resamples U and V coordinate estimates for the given bounding box + + Args: + predictor_output (DensePoseChartPredictorOutput): DensePose predictor + output to be resampled + labels (tensor [H, W] of long): labels obtained by resampling segmentation + outputs for the given bounding box + box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs + Return: + Resampled U and V coordinates - a tensor [2, H, W] of float + """ + return resample_uv_tensors_to_bbox( + predictor_output.u, + predictor_output.v, + labels, + box_xywh_abs, + ) + + +def densepose_chart_predictor_output_to_result( + predictor_output: DensePoseChartPredictorOutput, boxes: Boxes +) -> DensePoseChartResult: + """ + Convert densepose chart predictor outputs to results + + Args: + predictor_output (DensePoseChartPredictorOutput): DensePose predictor + output to be converted to results, must contain only 1 output + boxes (Boxes): bounding box that corresponds to the predictor output, + must contain only 1 bounding box + Return: + DensePose chart-based result (DensePoseChartResult) + """ + assert len(predictor_output) == 1 and len(boxes) == 1, ( + f"Predictor output to result conversion can operate only single outputs" + f", got {len(predictor_output)} predictor outputs and {len(boxes)} boxes" + ) + + boxes_xyxy_abs = boxes.tensor.clone() + boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + box_xywh = make_int_box(boxes_xywh_abs[0]) + + labels = resample_fine_and_coarse_segm_to_bbox(predictor_output, box_xywh).squeeze(0) + uv = resample_uv_to_bbox(predictor_output, labels, box_xywh) + return DensePoseChartResult(labels=labels, uv=uv) + + +def resample_confidences_to_bbox( + predictor_output: DensePoseChartPredictorOutput, + labels: torch.Tensor, + box_xywh_abs: IntTupleBox, +) -> Dict[str, torch.Tensor]: + """ + Resamples confidences for the given bounding box + + Args: + predictor_output (DensePoseChartPredictorOutput): DensePose predictor + output to be resampled + labels (tensor [H, W] of long): labels obtained by resampling segmentation + outputs for the given bounding box + box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs + Return: + Resampled confidences - a dict of [H, W] tensors of float + """ + + x, y, w, h = box_xywh_abs + w = max(int(w), 1) + h = max(int(h), 1) + + confidence_names = [ + "sigma_1", + "sigma_2", + "kappa_u", + "kappa_v", + "fine_segm_confidence", + "coarse_segm_confidence", + ] + confidence_results = {key: None for key in confidence_names} + confidence_names = [ + key for key in confidence_names if getattr(predictor_output, key) is not None + ] + confidence_base = torch.zeros([h, w], dtype=torch.float32, device=predictor_output.u.device) + + # assign data from channels that correspond to the labels + for key in confidence_names: + resampled_confidence = F.interpolate( + getattr(predictor_output, key), + (h, w), + mode="bilinear", + align_corners=False, + ) + result = confidence_base.clone() + for part_id in range(1, predictor_output.u.size(1)): + if resampled_confidence.size(1) != predictor_output.u.size(1): + # confidence is not part-based, don't try to fill it part by part + continue + result[labels == part_id] = resampled_confidence[0, part_id][labels == part_id] + + if resampled_confidence.size(1) != predictor_output.u.size(1): + # confidence is not part-based, fill the data with the first channel + # (targeted for segmentation confidences that have only 1 channel) + result = resampled_confidence[0, 0] + + confidence_results[key] = result + + return confidence_results # pyre-ignore[7] + + +def densepose_chart_predictor_output_to_result_with_confidences( + predictor_output: DensePoseChartPredictorOutput, boxes: Boxes +) -> DensePoseChartResultWithConfidences: + """ + Convert densepose chart predictor outputs to results + + Args: + predictor_output (DensePoseChartPredictorOutput): DensePose predictor + output with confidences to be converted to results, must contain only 1 output + boxes (Boxes): bounding box that corresponds to the predictor output, + must contain only 1 bounding box + Return: + DensePose chart-based result with confidences (DensePoseChartResultWithConfidences) + """ + assert len(predictor_output) == 1 and len(boxes) == 1, ( + f"Predictor output to result conversion can operate only single outputs" + f", got {len(predictor_output)} predictor outputs and {len(boxes)} boxes" + ) + + boxes_xyxy_abs = boxes.tensor.clone() + boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + box_xywh = make_int_box(boxes_xywh_abs[0]) + + labels = resample_fine_and_coarse_segm_to_bbox(predictor_output, box_xywh).squeeze(0) + uv = resample_uv_to_bbox(predictor_output, labels, box_xywh) + confidences = resample_confidences_to_bbox(predictor_output, labels, box_xywh) + return DensePoseChartResultWithConfidences(labels=labels, uv=uv, **confidences) diff --git a/data_processing/detectron2/projects/DensePose/densepose/converters/hflip.py b/data_processing/detectron2/projects/DensePose/densepose/converters/hflip.py new file mode 100644 index 0000000..6df1442 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/converters/hflip.py @@ -0,0 +1,34 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any + +from .base import BaseConverter + + +class HFlipConverter(BaseConverter): + """ + Converts various DensePose predictor outputs to DensePose results. + Each DensePose predictor output type has to register its convertion strategy. + """ + + registry = {} + dst_type = None + + @classmethod + # pyre-fixme[14]: `convert` overrides method defined in `BaseConverter` + # inconsistently. + def convert(cls, predictor_outputs: Any, transform_data: Any, *args, **kwargs): + """ + Performs an horizontal flip on DensePose predictor outputs. + Does recursive lookup for base classes, so there's no need + for explicit registration for derived classes. + + Args: + predictor_outputs: DensePose predictor output to be converted to BitMasks + transform_data: Anything useful for the flip + Return: + An instance of the same type as predictor_outputs + """ + return super(HFlipConverter, cls).convert( + predictor_outputs, transform_data, *args, **kwargs + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/converters/segm_to_mask.py b/data_processing/detectron2/projects/DensePose/densepose/converters/segm_to_mask.py new file mode 100644 index 0000000..6433d5d --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/converters/segm_to_mask.py @@ -0,0 +1,150 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any +import torch +from torch.nn import functional as F + +from detectron2.structures import BitMasks, Boxes, BoxMode + +from .base import IntTupleBox, make_int_box +from .to_mask import ImageSizeType + + +def resample_coarse_segm_tensor_to_bbox(coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox): + """ + Resample coarse segmentation tensor to the given + bounding box and derive labels for each pixel of the bounding box + + Args: + coarse_segm: float tensor of shape [1, K, Hout, Wout] + box_xywh_abs (tuple of 4 int): bounding box given by its upper-left + corner coordinates, width (W) and height (H) + Return: + Labels for each pixel of the bounding box, a long tensor of size [1, H, W] + """ + x, y, w, h = box_xywh_abs + w = max(int(w), 1) + h = max(int(h), 1) + labels = F.interpolate(coarse_segm, (h, w), mode="bilinear", align_corners=False).argmax(dim=1) + return labels + + +def resample_fine_and_coarse_segm_tensors_to_bbox( + fine_segm: torch.Tensor, coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox +): + """ + Resample fine and coarse segmentation tensors to the given + bounding box and derive labels for each pixel of the bounding box + + Args: + fine_segm: float tensor of shape [1, C, Hout, Wout] + coarse_segm: float tensor of shape [1, K, Hout, Wout] + box_xywh_abs (tuple of 4 int): bounding box given by its upper-left + corner coordinates, width (W) and height (H) + Return: + Labels for each pixel of the bounding box, a long tensor of size [1, H, W] + """ + x, y, w, h = box_xywh_abs + w = max(int(w), 1) + h = max(int(h), 1) + # coarse segmentation + coarse_segm_bbox = F.interpolate( + coarse_segm, + (h, w), + mode="bilinear", + align_corners=False, + ).argmax(dim=1) + # combined coarse and fine segmentation + labels = ( + F.interpolate(fine_segm, (h, w), mode="bilinear", align_corners=False).argmax(dim=1) + * (coarse_segm_bbox > 0).long() + ) + return labels + + +def resample_fine_and_coarse_segm_to_bbox(predictor_output: Any, box_xywh_abs: IntTupleBox): + """ + Resample fine and coarse segmentation outputs from a predictor to the given + bounding box and derive labels for each pixel of the bounding box + + Args: + predictor_output: DensePose predictor output that contains segmentation + results to be resampled + box_xywh_abs (tuple of 4 int): bounding box given by its upper-left + corner coordinates, width (W) and height (H) + Return: + Labels for each pixel of the bounding box, a long tensor of size [1, H, W] + """ + return resample_fine_and_coarse_segm_tensors_to_bbox( + predictor_output.fine_segm, + predictor_output.coarse_segm, + box_xywh_abs, + ) + + +def predictor_output_with_coarse_segm_to_mask( + predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType +) -> BitMasks: + """ + Convert predictor output with coarse and fine segmentation to a mask. + Assumes that predictor output has the following attributes: + - coarse_segm (tensor of size [N, D, H, W]): coarse segmentation + unnormalized scores for N instances; D is the number of coarse + segmentation labels, H and W is the resolution of the estimate + + Args: + predictor_output: DensePose predictor output to be converted to mask + boxes (Boxes): bounding boxes that correspond to the DensePose + predictor outputs + image_size_hw (tuple [int, int]): image height Himg and width Wimg + Return: + BitMasks that contain a bool tensor of size [N, Himg, Wimg] with + a mask of the size of the image for each instance + """ + H, W = image_size_hw + boxes_xyxy_abs = boxes.tensor.clone() + boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + N = len(boxes_xywh_abs) + masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device) + for i in range(len(boxes_xywh_abs)): + box_xywh = make_int_box(boxes_xywh_abs[i]) + box_mask = resample_coarse_segm_tensor_to_bbox(predictor_output[i].coarse_segm, box_xywh) + x, y, w, h = box_xywh + masks[i, y : y + h, x : x + w] = box_mask + + return BitMasks(masks) + + +def predictor_output_with_fine_and_coarse_segm_to_mask( + predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType +) -> BitMasks: + """ + Convert predictor output with coarse and fine segmentation to a mask. + Assumes that predictor output has the following attributes: + - coarse_segm (tensor of size [N, D, H, W]): coarse segmentation + unnormalized scores for N instances; D is the number of coarse + segmentation labels, H and W is the resolution of the estimate + - fine_segm (tensor of size [N, C, H, W]): fine segmentation + unnormalized scores for N instances; C is the number of fine + segmentation labels, H and W is the resolution of the estimate + + Args: + predictor_output: DensePose predictor output to be converted to mask + boxes (Boxes): bounding boxes that correspond to the DensePose + predictor outputs + image_size_hw (tuple [int, int]): image height Himg and width Wimg + Return: + BitMasks that contain a bool tensor of size [N, Himg, Wimg] with + a mask of the size of the image for each instance + """ + H, W = image_size_hw + boxes_xyxy_abs = boxes.tensor.clone() + boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + N = len(boxes_xywh_abs) + masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device) + for i in range(len(boxes_xywh_abs)): + box_xywh = make_int_box(boxes_xywh_abs[i]) + labels_i = resample_fine_and_coarse_segm_to_bbox(predictor_output[i], box_xywh) + x, y, w, h = box_xywh + masks[i, y : y + h, x : x + w] = labels_i > 0 + return BitMasks(masks) diff --git a/data_processing/detectron2/projects/DensePose/densepose/converters/to_chart_result.py b/data_processing/detectron2/projects/DensePose/densepose/converters/to_chart_result.py new file mode 100644 index 0000000..3eabd26 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/converters/to_chart_result.py @@ -0,0 +1,70 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any + +from detectron2.structures import Boxes + +from ..structures import DensePoseChartResult, DensePoseChartResultWithConfidences +from .base import BaseConverter + + +class ToChartResultConverter(BaseConverter): + """ + Converts various DensePose predictor outputs to DensePose results. + Each DensePose predictor output type has to register its convertion strategy. + """ + + registry = {} + dst_type = DensePoseChartResult + + @classmethod + # pyre-fixme[14]: `convert` overrides method defined in `BaseConverter` + # inconsistently. + def convert(cls, predictor_outputs: Any, boxes: Boxes, *args, **kwargs) -> DensePoseChartResult: + """ + Convert DensePose predictor outputs to DensePoseResult using some registered + converter. Does recursive lookup for base classes, so there's no need + for explicit registration for derived classes. + + Args: + densepose_predictor_outputs: DensePose predictor output to be + converted to BitMasks + boxes (Boxes): bounding boxes that correspond to the DensePose + predictor outputs + Return: + An instance of DensePoseResult. If no suitable converter was found, raises KeyError + """ + return super(ToChartResultConverter, cls).convert(predictor_outputs, boxes, *args, **kwargs) + + +class ToChartResultConverterWithConfidences(BaseConverter): + """ + Converts various DensePose predictor outputs to DensePose results. + Each DensePose predictor output type has to register its convertion strategy. + """ + + registry = {} + dst_type = DensePoseChartResultWithConfidences + + @classmethod + # pyre-fixme[14]: `convert` overrides method defined in `BaseConverter` + # inconsistently. + def convert( + cls, predictor_outputs: Any, boxes: Boxes, *args, **kwargs + ) -> DensePoseChartResultWithConfidences: + """ + Convert DensePose predictor outputs to DensePoseResult with confidences + using some registered converter. Does recursive lookup for base classes, + so there's no need for explicit registration for derived classes. + + Args: + densepose_predictor_outputs: DensePose predictor output with confidences + to be converted to BitMasks + boxes (Boxes): bounding boxes that correspond to the DensePose + predictor outputs + Return: + An instance of DensePoseResult. If no suitable converter was found, raises KeyError + """ + return super(ToChartResultConverterWithConfidences, cls).convert( + predictor_outputs, boxes, *args, **kwargs + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/converters/to_mask.py b/data_processing/detectron2/projects/DensePose/densepose/converters/to_mask.py new file mode 100644 index 0000000..a57fd71 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/converters/to_mask.py @@ -0,0 +1,49 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any, Tuple + +from detectron2.structures import BitMasks, Boxes + +from .base import BaseConverter + +ImageSizeType = Tuple[int, int] + + +class ToMaskConverter(BaseConverter): + """ + Converts various DensePose predictor outputs to masks + in bit mask format (see `BitMasks`). Each DensePose predictor output type + has to register its convertion strategy. + """ + + registry = {} + dst_type = BitMasks + + @classmethod + # pyre-fixme[14]: `convert` overrides method defined in `BaseConverter` + # inconsistently. + def convert( + cls, + densepose_predictor_outputs: Any, + boxes: Boxes, + image_size_hw: ImageSizeType, + *args, + **kwargs + ) -> BitMasks: + """ + Convert DensePose predictor outputs to BitMasks using some registered + converter. Does recursive lookup for base classes, so there's no need + for explicit registration for derived classes. + + Args: + densepose_predictor_outputs: DensePose predictor output to be + converted to BitMasks + boxes (Boxes): bounding boxes that correspond to the DensePose + predictor outputs + image_size_hw (tuple [int, int]): image height and width + Return: + An instance of `BitMasks`. If no suitable converter was found, raises KeyError + """ + return super(ToMaskConverter, cls).convert( + densepose_predictor_outputs, boxes, image_size_hw, *args, **kwargs + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/data/__init__.py new file mode 100644 index 0000000..bf21ba7 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .meshes import builtin +from .build import ( + build_detection_test_loader, + build_detection_train_loader, + build_combined_loader, + build_frame_selector, + build_inference_based_loaders, + has_inference_based_loaders, + BootstrapDatasetFactoryCatalog, +) +from .combined_loader import CombinedDataLoader +from .dataset_mapper import DatasetMapper +from .inference_based_loader import InferenceBasedLoader, ScoreBasedFilter +from .image_list_dataset import ImageListDataset +from .utils import is_relative_local_path, maybe_prepend_base_path + +# ensure the builtin datasets are registered +from . import datasets + +# ensure the bootstrap datasets builders are registered +from . import build + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/build.py b/data_processing/detectron2/projects/DensePose/densepose/data/build.py new file mode 100644 index 0000000..39edbd8 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/build.py @@ -0,0 +1,736 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import itertools +import logging +import numpy as np +from collections import UserDict, defaultdict +from dataclasses import dataclass +from typing import Any, Callable, Collection, Dict, Iterable, List, Optional, Sequence, Tuple +import torch +from torch.utils.data.dataset import Dataset + +from detectron2.config import CfgNode +from detectron2.data.build import build_detection_test_loader as d2_build_detection_test_loader +from detectron2.data.build import build_detection_train_loader as d2_build_detection_train_loader +from detectron2.data.build import ( + load_proposals_into_dataset, + print_instances_class_histogram, + trivial_batch_collator, + worker_init_reset_seed, +) +from detectron2.data.catalog import DatasetCatalog, Metadata, MetadataCatalog +from detectron2.data.samplers import TrainingSampler +from detectron2.utils.comm import get_world_size + +from densepose.config import get_bootstrap_dataset_config +from densepose.modeling import build_densepose_embedder + +from .combined_loader import CombinedDataLoader, Loader +from .dataset_mapper import DatasetMapper +from .datasets.coco import DENSEPOSE_CSE_KEYS_WITHOUT_MASK, DENSEPOSE_IUV_KEYS_WITHOUT_MASK +from .datasets.dataset_type import DatasetType +from .inference_based_loader import InferenceBasedLoader, ScoreBasedFilter +from .samplers import ( + DensePoseConfidenceBasedSampler, + DensePoseCSEConfidenceBasedSampler, + DensePoseCSEUniformSampler, + DensePoseUniformSampler, + MaskFromDensePoseSampler, + PredictionToGroundTruthSampler, +) +from .transform import ImageResizeTransform +from .utils import get_category_to_class_mapping, get_class_to_mesh_name_mapping +from .video import ( + FirstKFramesSelector, + FrameSelectionStrategy, + LastKFramesSelector, + RandomKFramesSelector, + VideoKeyframeDataset, + video_list_from_file, +) + +__all__ = ["build_detection_train_loader", "build_detection_test_loader"] + + +Instance = Dict[str, Any] +InstancePredicate = Callable[[Instance], bool] + + +def _compute_num_images_per_worker(cfg: CfgNode) -> int: + num_workers = get_world_size() + images_per_batch = cfg.SOLVER.IMS_PER_BATCH + assert ( + images_per_batch % num_workers == 0 + ), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format( + images_per_batch, num_workers + ) + assert ( + images_per_batch >= num_workers + ), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format( + images_per_batch, num_workers + ) + images_per_worker = images_per_batch // num_workers + return images_per_worker + + +def _map_category_id_to_contiguous_id(dataset_name: str, dataset_dicts: Iterable[Instance]) -> None: + meta = MetadataCatalog.get(dataset_name) + for dataset_dict in dataset_dicts: + for ann in dataset_dict["annotations"]: + ann["category_id"] = meta.thing_dataset_id_to_contiguous_id[ann["category_id"]] + + +@dataclass +class _DatasetCategory: + """ + Class representing category data in a dataset: + - id: category ID, as specified in the dataset annotations file + - name: category name, as specified in the dataset annotations file + - mapped_id: category ID after applying category maps (DATASETS.CATEGORY_MAPS config option) + - mapped_name: category name after applying category maps + - dataset_name: dataset in which the category is defined + + For example, when training models in a class-agnostic manner, one could take LVIS 1.0 + dataset and map the animal categories to the same category as human data from COCO: + id = 225 + name = "cat" + mapped_id = 1 + mapped_name = "person" + dataset_name = "lvis_v1_animals_dp_train" + """ + + id: int + name: str + mapped_id: int + mapped_name: str + dataset_name: str + + +_MergedCategoriesT = Dict[int, List[_DatasetCategory]] + + +def _add_category_id_to_contiguous_id_maps_to_metadata( + merged_categories: _MergedCategoriesT, +) -> None: + merged_categories_per_dataset = {} + for contiguous_cat_id, cat_id in enumerate(sorted(merged_categories.keys())): + for cat in merged_categories[cat_id]: + if cat.dataset_name not in merged_categories_per_dataset: + merged_categories_per_dataset[cat.dataset_name] = defaultdict(list) + merged_categories_per_dataset[cat.dataset_name][cat_id].append( + ( + contiguous_cat_id, + cat, + ) + ) + + logger = logging.getLogger(__name__) + for dataset_name, merged_categories in merged_categories_per_dataset.items(): + meta = MetadataCatalog.get(dataset_name) + if not hasattr(meta, "thing_classes"): + meta.thing_classes = [] + meta.thing_dataset_id_to_contiguous_id = {} + meta.thing_dataset_id_to_merged_id = {} + else: + meta.thing_classes.clear() + meta.thing_dataset_id_to_contiguous_id.clear() + meta.thing_dataset_id_to_merged_id.clear() + logger.info(f"Dataset {dataset_name}: category ID to contiguous ID mapping:") + for _cat_id, categories in sorted(merged_categories.items()): + added_to_thing_classes = False + for contiguous_cat_id, cat in categories: + if not added_to_thing_classes: + meta.thing_classes.append(cat.mapped_name) + added_to_thing_classes = True + meta.thing_dataset_id_to_contiguous_id[cat.id] = contiguous_cat_id + meta.thing_dataset_id_to_merged_id[cat.id] = cat.mapped_id + logger.info(f"{cat.id} ({cat.name}) -> {contiguous_cat_id}") + + +def _maybe_create_general_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]: + def has_annotations(instance: Instance) -> bool: + return "annotations" in instance + + def has_only_crowd_anotations(instance: Instance) -> bool: + for ann in instance["annotations"]: + if ann.get("is_crowd", 0) == 0: + return False + return True + + def general_keep_instance_predicate(instance: Instance) -> bool: + return has_annotations(instance) and not has_only_crowd_anotations(instance) + + if not cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS: + return None + return general_keep_instance_predicate + + +def _maybe_create_keypoints_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]: + + min_num_keypoints = cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE + + def has_sufficient_num_keypoints(instance: Instance) -> bool: + num_kpts = sum( + (np.array(ann["keypoints"][2::3]) > 0).sum() + for ann in instance["annotations"] + if "keypoints" in ann + ) + return num_kpts >= min_num_keypoints + + if cfg.MODEL.KEYPOINT_ON and (min_num_keypoints > 0): + return has_sufficient_num_keypoints + return None + + +def _maybe_create_mask_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]: + if not cfg.MODEL.MASK_ON: + return None + + def has_mask_annotations(instance: Instance) -> bool: + return any("segmentation" in ann for ann in instance["annotations"]) + + return has_mask_annotations + + +def _maybe_create_densepose_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]: + if not cfg.MODEL.DENSEPOSE_ON: + return None + + use_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS + + def has_densepose_annotations(instance: Instance) -> bool: + for ann in instance["annotations"]: + if all(key in ann for key in DENSEPOSE_IUV_KEYS_WITHOUT_MASK) or all( + key in ann for key in DENSEPOSE_CSE_KEYS_WITHOUT_MASK + ): + return True + if use_masks and "segmentation" in ann: + return True + return False + + return has_densepose_annotations + + +def _maybe_create_specific_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]: + specific_predicate_creators = [ + _maybe_create_keypoints_keep_instance_predicate, + _maybe_create_mask_keep_instance_predicate, + _maybe_create_densepose_keep_instance_predicate, + ] + predicates = [creator(cfg) for creator in specific_predicate_creators] + predicates = [p for p in predicates if p is not None] + if not predicates: + return None + + def combined_predicate(instance: Instance) -> bool: + return any(p(instance) for p in predicates) + + return combined_predicate + + +def _get_train_keep_instance_predicate(cfg: CfgNode): + general_keep_predicate = _maybe_create_general_keep_instance_predicate(cfg) + combined_specific_keep_predicate = _maybe_create_specific_keep_instance_predicate(cfg) + + def combined_general_specific_keep_predicate(instance: Instance) -> bool: + return general_keep_predicate(instance) and combined_specific_keep_predicate(instance) + + if (general_keep_predicate is None) and (combined_specific_keep_predicate is None): + return None + if general_keep_predicate is None: + return combined_specific_keep_predicate + if combined_specific_keep_predicate is None: + return general_keep_predicate + return combined_general_specific_keep_predicate + + +def _get_test_keep_instance_predicate(cfg: CfgNode): + general_keep_predicate = _maybe_create_general_keep_instance_predicate(cfg) + return general_keep_predicate + + +def _maybe_filter_and_map_categories( + dataset_name: str, dataset_dicts: List[Instance] +) -> List[Instance]: + meta = MetadataCatalog.get(dataset_name) + category_id_map = meta.thing_dataset_id_to_contiguous_id + filtered_dataset_dicts = [] + for dataset_dict in dataset_dicts: + anns = [] + for ann in dataset_dict["annotations"]: + cat_id = ann["category_id"] + if cat_id not in category_id_map: + continue + ann["category_id"] = category_id_map[cat_id] + anns.append(ann) + dataset_dict["annotations"] = anns + filtered_dataset_dicts.append(dataset_dict) + return filtered_dataset_dicts + + +def _add_category_whitelists_to_metadata(cfg: CfgNode) -> None: + for dataset_name, whitelisted_cat_ids in cfg.DATASETS.WHITELISTED_CATEGORIES.items(): + meta = MetadataCatalog.get(dataset_name) + meta.whitelisted_categories = whitelisted_cat_ids + logger = logging.getLogger(__name__) + logger.info( + "Whitelisted categories for dataset {}: {}".format( + dataset_name, meta.whitelisted_categories + ) + ) + + +def _add_category_maps_to_metadata(cfg: CfgNode) -> None: + for dataset_name, category_map in cfg.DATASETS.CATEGORY_MAPS.items(): + category_map = { + int(cat_id_src): int(cat_id_dst) for cat_id_src, cat_id_dst in category_map.items() + } + meta = MetadataCatalog.get(dataset_name) + meta.category_map = category_map + logger = logging.getLogger(__name__) + logger.info("Category maps for dataset {}: {}".format(dataset_name, meta.category_map)) + + +def _add_category_info_to_bootstrapping_metadata(dataset_name: str, dataset_cfg: CfgNode) -> None: + meta = MetadataCatalog.get(dataset_name) + meta.category_to_class_mapping = get_category_to_class_mapping(dataset_cfg) + meta.categories = dataset_cfg.CATEGORIES + meta.max_count_per_category = dataset_cfg.MAX_COUNT_PER_CATEGORY + logger = logging.getLogger(__name__) + logger.info( + "Category to class mapping for dataset {}: {}".format( + dataset_name, meta.category_to_class_mapping + ) + ) + + +def _maybe_add_class_to_mesh_name_map_to_metadata(dataset_names: List[str], cfg: CfgNode) -> None: + for dataset_name in dataset_names: + meta = MetadataCatalog.get(dataset_name) + if not hasattr(meta, "class_to_mesh_name"): + meta.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg) + + +def _merge_categories(dataset_names: Collection[str]) -> _MergedCategoriesT: + merged_categories = defaultdict(list) + category_names = {} + for dataset_name in dataset_names: + meta = MetadataCatalog.get(dataset_name) + whitelisted_categories = meta.get("whitelisted_categories") + category_map = meta.get("category_map", {}) + cat_ids = ( + whitelisted_categories if whitelisted_categories is not None else meta.categories.keys() + ) + for cat_id in cat_ids: + cat_name = meta.categories[cat_id] + cat_id_mapped = category_map.get(cat_id, cat_id) + if cat_id_mapped == cat_id or cat_id_mapped in cat_ids: + category_names[cat_id] = cat_name + else: + category_names[cat_id] = str(cat_id_mapped) + # assign temporary mapped category name, this name can be changed + # during the second pass, since mapped ID can correspond to a category + # from a different dataset + cat_name_mapped = meta.categories[cat_id_mapped] + merged_categories[cat_id_mapped].append( + _DatasetCategory( + id=cat_id, + name=cat_name, + mapped_id=cat_id_mapped, + mapped_name=cat_name_mapped, + dataset_name=dataset_name, + ) + ) + # second pass to assign proper mapped category names + for cat_id, categories in merged_categories.items(): + for cat in categories: + if cat_id in category_names and cat.mapped_name != category_names[cat_id]: + cat.mapped_name = category_names[cat_id] + + return merged_categories + + +def _warn_if_merged_different_categories(merged_categories: _MergedCategoriesT) -> None: + logger = logging.getLogger(__name__) + for cat_id in merged_categories: + merged_categories_i = merged_categories[cat_id] + first_cat_name = merged_categories_i[0].name + if len(merged_categories_i) > 1 and not all( + cat.name == first_cat_name for cat in merged_categories_i[1:] + ): + cat_summary_str = ", ".join( + [f"{cat.id} ({cat.name}) from {cat.dataset_name}" for cat in merged_categories_i] + ) + logger.warning( + f"Merged category {cat_id} corresponds to the following categories: " + f"{cat_summary_str}" + ) + + +def combine_detection_dataset_dicts( + dataset_names: Collection[str], + keep_instance_predicate: Optional[InstancePredicate] = None, + proposal_files: Optional[Collection[str]] = None, +) -> List[Instance]: + """ + Load and prepare dataset dicts for training / testing + + Args: + dataset_names (Collection[str]): a list of dataset names + keep_instance_predicate (Callable: Dict[str, Any] -> bool): predicate + applied to instance dicts which defines whether to keep the instance + proposal_files (Collection[str]): if given, a list of object proposal files + that match each dataset in `dataset_names`. + """ + assert len(dataset_names) + if proposal_files is None: + proposal_files = [None] * len(dataset_names) + assert len(dataset_names) == len(proposal_files) + # load datasets and metadata + dataset_name_to_dicts = {} + for dataset_name in dataset_names: + dataset_name_to_dicts[dataset_name] = DatasetCatalog.get(dataset_name) + assert len(dataset_name_to_dicts), f"Dataset '{dataset_name}' is empty!" + # merge categories, requires category metadata to be loaded + # cat_id -> [(orig_cat_id, cat_name, dataset_name)] + merged_categories = _merge_categories(dataset_names) + _warn_if_merged_different_categories(merged_categories) + merged_category_names = [ + merged_categories[cat_id][0].mapped_name for cat_id in sorted(merged_categories) + ] + # map to contiguous category IDs + _add_category_id_to_contiguous_id_maps_to_metadata(merged_categories) + # load annotations and dataset metadata + for dataset_name, proposal_file in zip(dataset_names, proposal_files): + dataset_dicts = dataset_name_to_dicts[dataset_name] + assert len(dataset_dicts), f"Dataset '{dataset_name}' is empty!" + if proposal_file is not None: + dataset_dicts = load_proposals_into_dataset(dataset_dicts, proposal_file) + dataset_dicts = _maybe_filter_and_map_categories(dataset_name, dataset_dicts) + print_instances_class_histogram(dataset_dicts, merged_category_names) + dataset_name_to_dicts[dataset_name] = dataset_dicts + + if keep_instance_predicate is not None: + all_datasets_dicts_plain = [ + d + for d in itertools.chain.from_iterable(dataset_name_to_dicts.values()) + if keep_instance_predicate(d) + ] + else: + all_datasets_dicts_plain = list( + itertools.chain.from_iterable(dataset_name_to_dicts.values()) + ) + return all_datasets_dicts_plain + + +def build_detection_train_loader(cfg: CfgNode, mapper=None): + """ + A data loader is created in a way similar to that of Detectron2. + The main differences are: + - it allows to combine datasets with different but compatible object category sets + + The data loader is created by the following steps: + 1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts. + 2. Start workers to work on the dicts. Each worker will: + * Map each metadata dict into another format to be consumed by the model. + * Batch them by simply putting dicts into a list. + The batched ``list[mapped_dict]`` is what this dataloader will return. + + Args: + cfg (CfgNode): the config + mapper (callable): a callable which takes a sample (dict) from dataset and + returns the format to be consumed by the model. + By default it will be `DatasetMapper(cfg, True)`. + + Returns: + an infinite iterator of training data + """ + + _add_category_whitelists_to_metadata(cfg) + _add_category_maps_to_metadata(cfg) + _maybe_add_class_to_mesh_name_map_to_metadata(cfg.DATASETS.TRAIN, cfg) + dataset_dicts = combine_detection_dataset_dicts( + cfg.DATASETS.TRAIN, + keep_instance_predicate=_get_train_keep_instance_predicate(cfg), + proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, + ) + if mapper is None: + mapper = DatasetMapper(cfg, True) + return d2_build_detection_train_loader(cfg, dataset=dataset_dicts, mapper=mapper) + + +def build_detection_test_loader(cfg, dataset_name, mapper=None): + """ + Similar to `build_detection_train_loader`. + But this function uses the given `dataset_name` argument (instead of the names in cfg), + and uses batch size 1. + + Args: + cfg: a detectron2 CfgNode + dataset_name (str): a name of the dataset that's available in the DatasetCatalog + mapper (callable): a callable which takes a sample (dict) from dataset + and returns the format to be consumed by the model. + By default it will be `DatasetMapper(cfg, False)`. + + Returns: + DataLoader: a torch DataLoader, that loads the given detection + dataset, with test-time transformation and batching. + """ + _add_category_whitelists_to_metadata(cfg) + _add_category_maps_to_metadata(cfg) + _maybe_add_class_to_mesh_name_map_to_metadata([dataset_name], cfg) + dataset_dicts = combine_detection_dataset_dicts( + [dataset_name], + keep_instance_predicate=_get_test_keep_instance_predicate(cfg), + proposal_files=[ + cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)] + ] + if cfg.MODEL.LOAD_PROPOSALS + else None, + ) + sampler = None + if not cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE: + sampler = torch.utils.data.SequentialSampler(dataset_dicts) + if mapper is None: + mapper = DatasetMapper(cfg, False) + return d2_build_detection_test_loader( + dataset_dicts, mapper=mapper, num_workers=cfg.DATALOADER.NUM_WORKERS, sampler=sampler + ) + + +def build_frame_selector(cfg: CfgNode): + strategy = FrameSelectionStrategy(cfg.STRATEGY) + if strategy == FrameSelectionStrategy.RANDOM_K: + frame_selector = RandomKFramesSelector(cfg.NUM_IMAGES) + elif strategy == FrameSelectionStrategy.FIRST_K: + frame_selector = FirstKFramesSelector(cfg.NUM_IMAGES) + elif strategy == FrameSelectionStrategy.LAST_K: + frame_selector = LastKFramesSelector(cfg.NUM_IMAGES) + elif strategy == FrameSelectionStrategy.ALL: + frame_selector = None + # pyre-fixme[61]: `frame_selector` may not be initialized here. + return frame_selector + + +def build_transform(cfg: CfgNode, data_type: str): + if cfg.TYPE == "resize": + if data_type == "image": + return ImageResizeTransform(cfg.MIN_SIZE, cfg.MAX_SIZE) + raise ValueError(f"Unknown transform {cfg.TYPE} for data type {data_type}") + + +def build_combined_loader(cfg: CfgNode, loaders: Collection[Loader], ratios: Sequence[float]): + images_per_worker = _compute_num_images_per_worker(cfg) + return CombinedDataLoader(loaders, images_per_worker, ratios) + + +def build_bootstrap_dataset(dataset_name: str, cfg: CfgNode) -> Sequence[torch.Tensor]: + """ + Build dataset that provides data to bootstrap on + + Args: + dataset_name (str): Name of the dataset, needs to have associated metadata + to load the data + cfg (CfgNode): bootstrapping config + Returns: + Sequence[Tensor] - dataset that provides image batches, Tensors of size + [N, C, H, W] of type float32 + """ + logger = logging.getLogger(__name__) + _add_category_info_to_bootstrapping_metadata(dataset_name, cfg) + meta = MetadataCatalog.get(dataset_name) + factory = BootstrapDatasetFactoryCatalog.get(meta.dataset_type) + dataset = None + if factory is not None: + dataset = factory(meta, cfg) + if dataset is None: + logger.warning(f"Failed to create dataset {dataset_name} of type {meta.dataset_type}") + return dataset + + +def build_data_sampler(cfg: CfgNode, sampler_cfg: CfgNode, embedder: Optional[torch.nn.Module]): + if sampler_cfg.TYPE == "densepose_uniform": + data_sampler = PredictionToGroundTruthSampler() + # transform densepose pred -> gt + data_sampler.register_sampler( + "pred_densepose", + "gt_densepose", + DensePoseUniformSampler(count_per_class=sampler_cfg.COUNT_PER_CLASS), + ) + data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler()) + return data_sampler + elif sampler_cfg.TYPE == "densepose_UV_confidence": + data_sampler = PredictionToGroundTruthSampler() + # transform densepose pred -> gt + data_sampler.register_sampler( + "pred_densepose", + "gt_densepose", + DensePoseConfidenceBasedSampler( + confidence_channel="sigma_2", + count_per_class=sampler_cfg.COUNT_PER_CLASS, + search_proportion=0.5, + ), + ) + data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler()) + return data_sampler + elif sampler_cfg.TYPE == "densepose_fine_segm_confidence": + data_sampler = PredictionToGroundTruthSampler() + # transform densepose pred -> gt + data_sampler.register_sampler( + "pred_densepose", + "gt_densepose", + DensePoseConfidenceBasedSampler( + confidence_channel="fine_segm_confidence", + count_per_class=sampler_cfg.COUNT_PER_CLASS, + search_proportion=0.5, + ), + ) + data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler()) + return data_sampler + elif sampler_cfg.TYPE == "densepose_coarse_segm_confidence": + data_sampler = PredictionToGroundTruthSampler() + # transform densepose pred -> gt + data_sampler.register_sampler( + "pred_densepose", + "gt_densepose", + DensePoseConfidenceBasedSampler( + confidence_channel="coarse_segm_confidence", + count_per_class=sampler_cfg.COUNT_PER_CLASS, + search_proportion=0.5, + ), + ) + data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler()) + return data_sampler + elif sampler_cfg.TYPE == "densepose_cse_uniform": + assert embedder is not None + data_sampler = PredictionToGroundTruthSampler() + # transform densepose pred -> gt + data_sampler.register_sampler( + "pred_densepose", + "gt_densepose", + DensePoseCSEUniformSampler( + cfg=cfg, + use_gt_categories=sampler_cfg.USE_GROUND_TRUTH_CATEGORIES, + embedder=embedder, + count_per_class=sampler_cfg.COUNT_PER_CLASS, + ), + ) + data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler()) + return data_sampler + elif sampler_cfg.TYPE == "densepose_cse_coarse_segm_confidence": + assert embedder is not None + data_sampler = PredictionToGroundTruthSampler() + # transform densepose pred -> gt + data_sampler.register_sampler( + "pred_densepose", + "gt_densepose", + DensePoseCSEConfidenceBasedSampler( + cfg=cfg, + use_gt_categories=sampler_cfg.USE_GROUND_TRUTH_CATEGORIES, + embedder=embedder, + confidence_channel="coarse_segm_confidence", + count_per_class=sampler_cfg.COUNT_PER_CLASS, + search_proportion=0.5, + ), + ) + data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler()) + return data_sampler + + raise ValueError(f"Unknown data sampler type {sampler_cfg.TYPE}") + + +def build_data_filter(cfg: CfgNode): + if cfg.TYPE == "detection_score": + min_score = cfg.MIN_VALUE + return ScoreBasedFilter(min_score=min_score) + raise ValueError(f"Unknown data filter type {cfg.TYPE}") + + +def build_inference_based_loader( + cfg: CfgNode, + dataset_cfg: CfgNode, + model: torch.nn.Module, + embedder: Optional[torch.nn.Module] = None, +) -> InferenceBasedLoader: + """ + Constructs data loader based on inference results of a model. + """ + dataset = build_bootstrap_dataset(dataset_cfg.DATASET, dataset_cfg.IMAGE_LOADER) + meta = MetadataCatalog.get(dataset_cfg.DATASET) + training_sampler = TrainingSampler(len(dataset)) + data_loader = torch.utils.data.DataLoader( + dataset, # pyre-ignore[6] + batch_size=dataset_cfg.IMAGE_LOADER.BATCH_SIZE, + sampler=training_sampler, + num_workers=dataset_cfg.IMAGE_LOADER.NUM_WORKERS, + collate_fn=trivial_batch_collator, + worker_init_fn=worker_init_reset_seed, + ) + return InferenceBasedLoader( + model, + data_loader=data_loader, + data_sampler=build_data_sampler(cfg, dataset_cfg.DATA_SAMPLER, embedder), + data_filter=build_data_filter(dataset_cfg.FILTER), + shuffle=True, + batch_size=dataset_cfg.INFERENCE.OUTPUT_BATCH_SIZE, + inference_batch_size=dataset_cfg.INFERENCE.INPUT_BATCH_SIZE, + category_to_class_mapping=meta.category_to_class_mapping, + ) + + +def has_inference_based_loaders(cfg: CfgNode) -> bool: + """ + Returns True, if at least one inferense-based loader must + be instantiated for training + """ + return len(cfg.BOOTSTRAP_DATASETS) > 0 + + +def build_inference_based_loaders( + cfg: CfgNode, model: torch.nn.Module +) -> Tuple[List[InferenceBasedLoader], List[float]]: + loaders = [] + ratios = [] + embedder = build_densepose_embedder(cfg).to(device=model.device) # pyre-ignore[16] + for dataset_spec in cfg.BOOTSTRAP_DATASETS: + dataset_cfg = get_bootstrap_dataset_config().clone() + dataset_cfg.merge_from_other_cfg(CfgNode(dataset_spec)) + loader = build_inference_based_loader(cfg, dataset_cfg, model, embedder) + loaders.append(loader) + ratios.append(dataset_cfg.RATIO) + return loaders, ratios + + +def build_video_list_dataset(meta: Metadata, cfg: CfgNode): + video_list_fpath = meta.video_list_fpath + video_base_path = meta.video_base_path + category = meta.category + if cfg.TYPE == "video_keyframe": + frame_selector = build_frame_selector(cfg.SELECT) + transform = build_transform(cfg.TRANSFORM, data_type="image") + video_list = video_list_from_file(video_list_fpath, video_base_path) + keyframe_helper_fpath = getattr(cfg, "KEYFRAME_HELPER", None) + return VideoKeyframeDataset( + video_list, category, frame_selector, transform, keyframe_helper_fpath + ) + + +class _BootstrapDatasetFactoryCatalog(UserDict): + """ + A global dictionary that stores information about bootstrapped datasets creation functions + from metadata and config, for diverse DatasetType + """ + + def register(self, dataset_type: DatasetType, factory: Callable[[Metadata, CfgNode], Dataset]): + """ + Args: + dataset_type (DatasetType): a DatasetType e.g. DatasetType.VIDEO_LIST + factory (Callable[Metadata, CfgNode]): a callable which takes Metadata and cfg + arguments and returns a dataset object. + """ + assert dataset_type not in self, "Dataset '{}' is already registered!".format(dataset_type) + self[dataset_type] = factory + + +BootstrapDatasetFactoryCatalog = _BootstrapDatasetFactoryCatalog() +BootstrapDatasetFactoryCatalog.register(DatasetType.VIDEO_LIST, build_video_list_dataset) diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/combined_loader.py b/data_processing/detectron2/projects/DensePose/densepose/data/combined_loader.py new file mode 100644 index 0000000..5bfbbde --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/combined_loader.py @@ -0,0 +1,44 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import random +from collections import deque +from typing import Any, Collection, Deque, Iterable, Iterator, List, Sequence + +Loader = Iterable[Any] + + +def _pooled_next(iterator: Iterator[Any], pool: Deque[Any]): + if not pool: + pool.extend(next(iterator)) + return pool.popleft() + + +class CombinedDataLoader: + """ + Combines data loaders using the provided sampling ratios + """ + + BATCH_COUNT = 100 + + def __init__(self, loaders: Collection[Loader], batch_size: int, ratios: Sequence[float]): + self.loaders = loaders + self.batch_size = batch_size + self.ratios = ratios + + def __iter__(self) -> Iterator[List[Any]]: + iters = [iter(loader) for loader in self.loaders] + indices = [] + pool = [deque()] * len(iters) + # infinite iterator, as in D2 + while True: + if not indices: + # just a buffer of indices, its size doesn't matter + # as long as it's a multiple of batch_size + k = self.batch_size * self.BATCH_COUNT + indices = random.choices(range(len(self.loaders)), self.ratios, k=k) + try: + batch = [_pooled_next(iters[i], pool[i]) for i in indices[: self.batch_size]] + except StopIteration: + break + indices = indices[self.batch_size :] + yield batch diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/dataset_mapper.py b/data_processing/detectron2/projects/DensePose/densepose/data/dataset_mapper.py new file mode 100644 index 0000000..3229c4d --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/dataset_mapper.py @@ -0,0 +1,168 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import copy +import logging +from typing import Any, Dict, List, Tuple +import torch + +from detectron2.data import MetadataCatalog +from detectron2.data import detection_utils as utils +from detectron2.data import transforms as T +from detectron2.layers import ROIAlign +from detectron2.structures import BoxMode +from detectron2.utils.file_io import PathManager + +from densepose.structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData + + +def build_augmentation(cfg, is_train): + logger = logging.getLogger(__name__) + result = utils.build_augmentation(cfg, is_train) + if is_train: + random_rotation = T.RandomRotation( + cfg.INPUT.ROTATION_ANGLES, expand=False, sample_style="choice" + ) + result.append(random_rotation) + logger.info("DensePose-specific augmentation used in training: " + str(random_rotation)) + return result + + +class DatasetMapper: + """ + A customized version of `detectron2.data.DatasetMapper` + """ + + def __init__(self, cfg, is_train=True): + self.augmentation = build_augmentation(cfg, is_train) + + # fmt: off + self.img_format = cfg.INPUT.FORMAT + self.mask_on = ( + cfg.MODEL.MASK_ON or ( + cfg.MODEL.DENSEPOSE_ON + and cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS) + ) + self.keypoint_on = cfg.MODEL.KEYPOINT_ON + self.densepose_on = cfg.MODEL.DENSEPOSE_ON + assert not cfg.MODEL.LOAD_PROPOSALS, "not supported yet" + # fmt: on + if self.keypoint_on and is_train: + # Flip only makes sense in training + self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) + else: + self.keypoint_hflip_indices = None + + if self.densepose_on: + densepose_transform_srcs = [ + MetadataCatalog.get(ds).densepose_transform_src + for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST + ] + assert len(densepose_transform_srcs) > 0 + # TODO: check that DensePose transformation data is the same for + # all the datasets. Otherwise one would have to pass DB ID with + # each entry to select proper transformation data. For now, since + # all DensePose annotated data uses the same data semantics, we + # omit this check. + densepose_transform_data_fpath = PathManager.get_local_path(densepose_transform_srcs[0]) + self.densepose_transform_data = DensePoseTransformData.load( + densepose_transform_data_fpath + ) + + self.is_train = is_train + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + + Returns: + dict: a format that builtin models in detectron2 accept + """ + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + image = utils.read_image(dataset_dict["file_name"], format=self.img_format) + utils.check_image_size(dataset_dict, image) + + image, transforms = T.apply_transform_gens(self.augmentation, image) + image_shape = image.shape[:2] # h, w + dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32")) + + if not self.is_train: + dataset_dict.pop("annotations", None) + return dataset_dict + + for anno in dataset_dict["annotations"]: + if not self.mask_on: + anno.pop("segmentation", None) + if not self.keypoint_on: + anno.pop("keypoints", None) + + # USER: Implement additional transformations if you have other types of data + # USER: Don't call transpose_densepose if you don't need + annos = [ + self._transform_densepose( + utils.transform_instance_annotations( + obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices + ), + transforms, + ) + for obj in dataset_dict.pop("annotations") + if obj.get("iscrowd", 0) == 0 + ] + + if self.mask_on: + self._add_densepose_masks_as_segmentation(annos, image_shape) + + instances = utils.annotations_to_instances(annos, image_shape, mask_format="bitmask") + densepose_annotations = [obj.get("densepose") for obj in annos] + if densepose_annotations and not all(v is None for v in densepose_annotations): + instances.gt_densepose = DensePoseList( + densepose_annotations, instances.gt_boxes, image_shape + ) + + dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()] + return dataset_dict + + def _transform_densepose(self, annotation, transforms): + if not self.densepose_on: + return annotation + + # Handle densepose annotations + is_valid, reason_not_valid = DensePoseDataRelative.validate_annotation(annotation) + if is_valid: + densepose_data = DensePoseDataRelative(annotation, cleanup=True) + densepose_data.apply_transform(transforms, self.densepose_transform_data) + annotation["densepose"] = densepose_data + else: + # logger = logging.getLogger(__name__) + # logger.debug("Could not load DensePose annotation: {}".format(reason_not_valid)) + DensePoseDataRelative.cleanup_annotation(annotation) + # NOTE: annotations for certain instances may be unavailable. + # 'None' is accepted by the DensePostList data structure. + annotation["densepose"] = None + return annotation + + def _add_densepose_masks_as_segmentation( + self, annotations: List[Dict[str, Any]], image_shape_hw: Tuple[int, int] + ): + for obj in annotations: + if ("densepose" not in obj) or ("segmentation" in obj): + continue + # DP segmentation: torch.Tensor [S, S] of float32, S=256 + segm_dp = torch.zeros_like(obj["densepose"].segm) + segm_dp[obj["densepose"].segm > 0] = 1 + segm_h, segm_w = segm_dp.shape + bbox_segm_dp = torch.tensor((0, 0, segm_h - 1, segm_w - 1), dtype=torch.float32) + # image bbox + x0, y0, x1, y1 = ( + v.item() for v in BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) + ) + segm_aligned = ( + ROIAlign((y1 - y0, x1 - x0), 1.0, 0, aligned=True) + .forward(segm_dp.view(1, 1, *segm_dp.shape), bbox_segm_dp) + .squeeze() + ) + image_mask = torch.zeros(*image_shape_hw, dtype=torch.float32) + image_mask[y0:y1, x0:x1] = segm_aligned + # segmentation for BitMask: np.array [H, W] of bool + obj["segmentation"] = image_mask >= 0.5 diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/datasets/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/__init__.py new file mode 100644 index 0000000..260ccb9 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from . import builtin # ensure the builtin datasets are registered + +__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")] diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/datasets/builtin.py b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/builtin.py new file mode 100644 index 0000000..7572cd6 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/builtin.py @@ -0,0 +1,16 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .chimpnsee import register_dataset as register_chimpnsee_dataset +from .coco import BASE_DATASETS as BASE_COCO_DATASETS +from .coco import DATASETS as COCO_DATASETS +from .coco import register_datasets as register_coco_datasets +from .lvis import DATASETS as LVIS_DATASETS +from .lvis import register_datasets as register_lvis_datasets + +DEFAULT_DATASETS_ROOT = "datasets" + + +register_coco_datasets(COCO_DATASETS, DEFAULT_DATASETS_ROOT) +register_coco_datasets(BASE_COCO_DATASETS, DEFAULT_DATASETS_ROOT) +register_lvis_datasets(LVIS_DATASETS, DEFAULT_DATASETS_ROOT) + +register_chimpnsee_dataset(DEFAULT_DATASETS_ROOT) # pyre-ignore[19] diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/datasets/chimpnsee.py b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/chimpnsee.py new file mode 100644 index 0000000..61e0b50 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/chimpnsee.py @@ -0,0 +1,29 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Optional + +from detectron2.data import DatasetCatalog, MetadataCatalog + +from ..utils import maybe_prepend_base_path +from .dataset_type import DatasetType + +CHIMPNSEE_DATASET_NAME = "chimpnsee" + + +def register_dataset(datasets_root: Optional[str] = None) -> None: + def empty_load_callback(): + pass + + video_list_fpath = maybe_prepend_base_path( + datasets_root, + "chimpnsee/cdna.eva.mpg.de/video_list.txt", + ) + video_base_path = maybe_prepend_base_path(datasets_root, "chimpnsee/cdna.eva.mpg.de") + + DatasetCatalog.register(CHIMPNSEE_DATASET_NAME, empty_load_callback) + MetadataCatalog.get(CHIMPNSEE_DATASET_NAME).set( + dataset_type=DatasetType.VIDEO_LIST, + video_list_fpath=video_list_fpath, + video_base_path=video_base_path, + category="chimpanzee", + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/datasets/coco.py b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/coco.py new file mode 100644 index 0000000..c19f7b0 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/coco.py @@ -0,0 +1,432 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import contextlib +import io +import logging +import os +from collections import defaultdict +from dataclasses import dataclass +from typing import Any, Dict, Iterable, List, Optional +from fvcore.common.timer import Timer + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.structures import BoxMode +from detectron2.utils.file_io import PathManager + +from ..utils import maybe_prepend_base_path + +DENSEPOSE_MASK_KEY = "dp_masks" +DENSEPOSE_IUV_KEYS_WITHOUT_MASK = ["dp_x", "dp_y", "dp_I", "dp_U", "dp_V"] +DENSEPOSE_CSE_KEYS_WITHOUT_MASK = ["dp_x", "dp_y", "dp_vertex", "ref_model"] +DENSEPOSE_ALL_POSSIBLE_KEYS = set( + DENSEPOSE_IUV_KEYS_WITHOUT_MASK + DENSEPOSE_CSE_KEYS_WITHOUT_MASK + [DENSEPOSE_MASK_KEY] +) +DENSEPOSE_METADATA_URL_PREFIX = "https://dl.fbaipublicfiles.com/densepose/data/" + + +@dataclass +class CocoDatasetInfo: + name: str + images_root: str + annotations_fpath: str + + +DATASETS = [ + CocoDatasetInfo( + name="densepose_coco_2014_train", + images_root="coco/train2014", + annotations_fpath="coco/annotations/densepose_train2014.json", + ), + CocoDatasetInfo( + name="densepose_coco_2014_minival", + images_root="coco/val2014", + annotations_fpath="coco/annotations/densepose_minival2014.json", + ), + CocoDatasetInfo( + name="densepose_coco_2014_minival_100", + images_root="coco/val2014", + annotations_fpath="coco/annotations/densepose_minival2014_100.json", + ), + CocoDatasetInfo( + name="densepose_coco_2014_valminusminival", + images_root="coco/val2014", + annotations_fpath="coco/annotations/densepose_valminusminival2014.json", + ), + CocoDatasetInfo( + name="densepose_coco_2014_train_cse", + images_root="coco/train2014", + annotations_fpath="coco_cse/densepose_train2014_cse.json", + ), + CocoDatasetInfo( + name="densepose_coco_2014_minival_cse", + images_root="coco/val2014", + annotations_fpath="coco_cse/densepose_minival2014_cse.json", + ), + CocoDatasetInfo( + name="densepose_coco_2014_minival_100_cse", + images_root="coco/val2014", + annotations_fpath="coco_cse/densepose_minival2014_100_cse.json", + ), + CocoDatasetInfo( + name="densepose_coco_2014_valminusminival_cse", + images_root="coco/val2014", + annotations_fpath="coco_cse/densepose_valminusminival2014_cse.json", + ), + CocoDatasetInfo( + name="densepose_chimps", + images_root="densepose_chimps/images", + annotations_fpath="densepose_chimps/densepose_chimps_densepose.json", + ), + CocoDatasetInfo( + name="densepose_chimps_cse_train", + images_root="densepose_chimps/images", + annotations_fpath="densepose_chimps/densepose_chimps_cse_train.json", + ), + CocoDatasetInfo( + name="densepose_chimps_cse_val", + images_root="densepose_chimps/images", + annotations_fpath="densepose_chimps/densepose_chimps_cse_val.json", + ), + CocoDatasetInfo( + name="posetrack2017_train", + images_root="posetrack2017/posetrack_data_2017", + annotations_fpath="posetrack2017/densepose_posetrack_train2017.json", + ), + CocoDatasetInfo( + name="posetrack2017_val", + images_root="posetrack2017/posetrack_data_2017", + annotations_fpath="posetrack2017/densepose_posetrack_val2017.json", + ), + CocoDatasetInfo( + name="lvis_v05_train", + images_root="coco/train2017", + annotations_fpath="lvis/lvis_v0.5_plus_dp_train.json", + ), + CocoDatasetInfo( + name="lvis_v05_val", + images_root="coco/val2017", + annotations_fpath="lvis/lvis_v0.5_plus_dp_val.json", + ), +] + + +BASE_DATASETS = [ + CocoDatasetInfo( + name="base_coco_2017_train", + images_root="coco/train2017", + annotations_fpath="coco/annotations/instances_train2017.json", + ), + CocoDatasetInfo( + name="base_coco_2017_val", + images_root="coco/val2017", + annotations_fpath="coco/annotations/instances_val2017.json", + ), + CocoDatasetInfo( + name="base_coco_2017_val_100", + images_root="coco/val2017", + annotations_fpath="coco/annotations/instances_val2017_100.json", + ), +] + + +def get_metadata(base_path: Optional[str]) -> Dict[str, Any]: + """ + Returns metadata associated with COCO DensePose datasets + + Args: + base_path: Optional[str] + Base path used to load metadata from + + Returns: + Dict[str, Any] + Metadata in the form of a dictionary + """ + meta = { + "densepose_transform_src": maybe_prepend_base_path(base_path, "UV_symmetry_transforms.mat"), + "densepose_smpl_subdiv": maybe_prepend_base_path(base_path, "SMPL_subdiv.mat"), + "densepose_smpl_subdiv_transform": maybe_prepend_base_path( + base_path, + "SMPL_SUBDIV_TRANSFORM.mat", + ), + } + return meta + + +def _load_coco_annotations(json_file: str): + """ + Load COCO annotations from a JSON file + + Args: + json_file: str + Path to the file to load annotations from + Returns: + Instance of `pycocotools.coco.COCO` that provides access to annotations + data + """ + from pycocotools.coco import COCO + + logger = logging.getLogger(__name__) + timer = Timer() + with contextlib.redirect_stdout(io.StringIO()): + coco_api = COCO(json_file) + if timer.seconds() > 1: + logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) + return coco_api + + +def _add_categories_metadata(dataset_name: str, categories: List[Dict[str, Any]]): + meta = MetadataCatalog.get(dataset_name) + meta.categories = {c["id"]: c["name"] for c in categories} + logger = logging.getLogger(__name__) + logger.info("Dataset {} categories: {}".format(dataset_name, meta.categories)) + + +def _verify_annotations_have_unique_ids(json_file: str, anns: List[List[Dict[str, Any]]]): + if "minival" in json_file: + # Skip validation on COCO2014 valminusminival and minival annotations + # The ratio of buggy annotations there is tiny and does not affect accuracy + # Therefore we explicitly white-list them + return + ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] + assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( + json_file + ) + + +def _maybe_add_bbox(obj: Dict[str, Any], ann_dict: Dict[str, Any]): + if "bbox" not in ann_dict: + return + obj["bbox"] = ann_dict["bbox"] + obj["bbox_mode"] = BoxMode.XYWH_ABS + + +def _maybe_add_segm(obj: Dict[str, Any], ann_dict: Dict[str, Any]): + if "segmentation" not in ann_dict: + return + segm = ann_dict["segmentation"] + if not isinstance(segm, dict): + # filter out invalid polygons (< 3 points) + segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] + if len(segm) == 0: + return + obj["segmentation"] = segm + + +def _maybe_add_keypoints(obj: Dict[str, Any], ann_dict: Dict[str, Any]): + if "keypoints" not in ann_dict: + return + keypts = ann_dict["keypoints"] # list[int] + for idx, v in enumerate(keypts): + if idx % 3 != 2: + # COCO's segmentation coordinates are floating points in [0, H or W], + # but keypoint coordinates are integers in [0, H-1 or W-1] + # Therefore we assume the coordinates are "pixel indices" and + # add 0.5 to convert to floating point coordinates. + keypts[idx] = v + 0.5 + obj["keypoints"] = keypts + + +def _maybe_add_densepose(obj: Dict[str, Any], ann_dict: Dict[str, Any]): + for key in DENSEPOSE_ALL_POSSIBLE_KEYS: + if key in ann_dict: + obj[key] = ann_dict[key] + + +def _combine_images_with_annotations( + dataset_name: str, + image_root: str, + img_datas: Iterable[Dict[str, Any]], + ann_datas: Iterable[Iterable[Dict[str, Any]]], +): + + ann_keys = ["iscrowd", "category_id"] + dataset_dicts = [] + contains_video_frame_info = False + + for img_dict, ann_dicts in zip(img_datas, ann_datas): + record = {} + record["file_name"] = os.path.join(image_root, img_dict["file_name"]) + record["height"] = img_dict["height"] + record["width"] = img_dict["width"] + record["image_id"] = img_dict["id"] + record["dataset"] = dataset_name + if "frame_id" in img_dict: + record["frame_id"] = img_dict["frame_id"] + record["video_id"] = img_dict.get("vid_id", None) + contains_video_frame_info = True + objs = [] + for ann_dict in ann_dicts: + assert ann_dict["image_id"] == record["image_id"] + assert ann_dict.get("ignore", 0) == 0 + obj = {key: ann_dict[key] for key in ann_keys if key in ann_dict} + _maybe_add_bbox(obj, ann_dict) + _maybe_add_segm(obj, ann_dict) + _maybe_add_keypoints(obj, ann_dict) + _maybe_add_densepose(obj, ann_dict) + objs.append(obj) + record["annotations"] = objs + dataset_dicts.append(record) + if contains_video_frame_info: + create_video_frame_mapping(dataset_name, dataset_dicts) + return dataset_dicts + + +def get_contiguous_id_to_category_id_map(metadata): + cat_id_2_cont_id = metadata.thing_dataset_id_to_contiguous_id + cont_id_2_cat_id = {} + for cat_id, cont_id in cat_id_2_cont_id.items(): + if cont_id in cont_id_2_cat_id: + continue + cont_id_2_cat_id[cont_id] = cat_id + return cont_id_2_cat_id + + +def maybe_filter_categories_cocoapi(dataset_name, coco_api): + meta = MetadataCatalog.get(dataset_name) + cont_id_2_cat_id = get_contiguous_id_to_category_id_map(meta) + cat_id_2_cont_id = meta.thing_dataset_id_to_contiguous_id + # filter categories + cats = [] + for cat in coco_api.dataset["categories"]: + cat_id = cat["id"] + if cat_id not in cat_id_2_cont_id: + continue + cont_id = cat_id_2_cont_id[cat_id] + if (cont_id in cont_id_2_cat_id) and (cont_id_2_cat_id[cont_id] == cat_id): + cats.append(cat) + coco_api.dataset["categories"] = cats + # filter annotations, if multiple categories are mapped to a single + # contiguous ID, use only one category ID and map all annotations to that category ID + anns = [] + for ann in coco_api.dataset["annotations"]: + cat_id = ann["category_id"] + if cat_id not in cat_id_2_cont_id: + continue + cont_id = cat_id_2_cont_id[cat_id] + ann["category_id"] = cont_id_2_cat_id[cont_id] + anns.append(ann) + coco_api.dataset["annotations"] = anns + # recreate index + coco_api.createIndex() + + +def maybe_filter_and_map_categories_cocoapi(dataset_name, coco_api): + meta = MetadataCatalog.get(dataset_name) + category_id_map = meta.thing_dataset_id_to_contiguous_id + # map categories + cats = [] + for cat in coco_api.dataset["categories"]: + cat_id = cat["id"] + if cat_id not in category_id_map: + continue + cat["id"] = category_id_map[cat_id] + cats.append(cat) + coco_api.dataset["categories"] = cats + # map annotation categories + anns = [] + for ann in coco_api.dataset["annotations"]: + cat_id = ann["category_id"] + if cat_id not in category_id_map: + continue + ann["category_id"] = category_id_map[cat_id] + anns.append(ann) + coco_api.dataset["annotations"] = anns + # recreate index + coco_api.createIndex() + + +def create_video_frame_mapping(dataset_name, dataset_dicts): + mapping = defaultdict(dict) + for d in dataset_dicts: + video_id = d.get("video_id") + if video_id is None: + continue + mapping[video_id].update({d["frame_id"]: d["file_name"]}) + MetadataCatalog.get(dataset_name).set(video_frame_mapping=mapping) + + +def load_coco_json(annotations_json_file: str, image_root: str, dataset_name: str): + """ + Loads a JSON file with annotations in COCO instances format. + Replaces `detectron2.data.datasets.coco.load_coco_json` to handle metadata + in a more flexible way. Postpones category mapping to a later stage to be + able to combine several datasets with different (but coherent) sets of + categories. + + Args: + + annotations_json_file: str + Path to the JSON file with annotations in COCO instances format. + image_root: str + directory that contains all the images + dataset_name: str + the name that identifies a dataset, e.g. "densepose_coco_2014_train" + extra_annotation_keys: Optional[List[str]] + If provided, these keys are used to extract additional data from + the annotations. + """ + coco_api = _load_coco_annotations(PathManager.get_local_path(annotations_json_file)) + _add_categories_metadata(dataset_name, coco_api.loadCats(coco_api.getCatIds())) + # sort indices for reproducible results + img_ids = sorted(coco_api.imgs.keys()) + # imgs is a list of dicts, each looks something like: + # {'license': 4, + # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', + # 'file_name': 'COCO_val2014_000000001268.jpg', + # 'height': 427, + # 'width': 640, + # 'date_captured': '2013-11-17 05:57:24', + # 'id': 1268} + imgs = coco_api.loadImgs(img_ids) + logger = logging.getLogger(__name__) + logger.info("Loaded {} images in COCO format from {}".format(len(imgs), annotations_json_file)) + # anns is a list[list[dict]], where each dict is an annotation + # record for an object. The inner list enumerates the objects in an image + # and the outer list enumerates over images. + anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] + _verify_annotations_have_unique_ids(annotations_json_file, anns) + dataset_records = _combine_images_with_annotations(dataset_name, image_root, imgs, anns) + return dataset_records + + +def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[str] = None): + """ + Registers provided COCO DensePose dataset + + Args: + dataset_data: CocoDatasetInfo + Dataset data + datasets_root: Optional[str] + Datasets root folder (default: None) + """ + annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath) + images_root = maybe_prepend_base_path(datasets_root, dataset_data.images_root) + + def load_annotations(): + return load_coco_json( + annotations_json_file=annotations_fpath, + image_root=images_root, + dataset_name=dataset_data.name, + ) + + DatasetCatalog.register(dataset_data.name, load_annotations) + MetadataCatalog.get(dataset_data.name).set( + json_file=annotations_fpath, + image_root=images_root, + **get_metadata(DENSEPOSE_METADATA_URL_PREFIX) + ) + + +def register_datasets( + datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[str] = None +): + """ + Registers provided COCO DensePose datasets + + Args: + datasets_data: Iterable[CocoDatasetInfo] + An iterable of dataset datas + datasets_root: Optional[str] + Datasets root folder (default: None) + """ + for dataset_data in datasets_data: + register_dataset(dataset_data, datasets_root) diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/datasets/dataset_type.py b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/dataset_type.py new file mode 100644 index 0000000..ed8f8f2 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/dataset_type.py @@ -0,0 +1,11 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from enum import Enum + + +class DatasetType(Enum): + """ + Dataset type, mostly used for datasets that contain data to bootstrap models on + """ + + VIDEO_LIST = "video_list" diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/datasets/lvis.py b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/lvis.py new file mode 100644 index 0000000..b4af9fa --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/datasets/lvis.py @@ -0,0 +1,257 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import os +from typing import Any, Dict, Iterable, List, Optional +from fvcore.common.timer import Timer + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.data.datasets.lvis import get_lvis_instances_meta +from detectron2.structures import BoxMode +from detectron2.utils.file_io import PathManager + +from ..utils import maybe_prepend_base_path +from .coco import ( + DENSEPOSE_ALL_POSSIBLE_KEYS, + DENSEPOSE_METADATA_URL_PREFIX, + CocoDatasetInfo, + get_metadata, +) + +DATASETS = [ + CocoDatasetInfo( + name="densepose_lvis_v1_ds1_train_v1", + images_root="coco_", + annotations_fpath="lvis/densepose_lvis_v1_ds1_train_v1.json", + ), + CocoDatasetInfo( + name="densepose_lvis_v1_ds1_val_v1", + images_root="coco_", + annotations_fpath="lvis/densepose_lvis_v1_ds1_val_v1.json", + ), + CocoDatasetInfo( + name="densepose_lvis_v1_ds2_train_v1", + images_root="coco_", + annotations_fpath="lvis/densepose_lvis_v1_ds2_train_v1.json", + ), + CocoDatasetInfo( + name="densepose_lvis_v1_ds2_val_v1", + images_root="coco_", + annotations_fpath="lvis/densepose_lvis_v1_ds2_val_v1.json", + ), + CocoDatasetInfo( + name="densepose_lvis_v1_ds1_val_animals_100", + images_root="coco_", + annotations_fpath="lvis/densepose_lvis_v1_val_animals_100_v2.json", + ), +] + + +def _load_lvis_annotations(json_file: str): + """ + Load COCO annotations from a JSON file + + Args: + json_file: str + Path to the file to load annotations from + Returns: + Instance of `pycocotools.coco.COCO` that provides access to annotations + data + """ + from lvis import LVIS + + json_file = PathManager.get_local_path(json_file) + logger = logging.getLogger(__name__) + timer = Timer() + lvis_api = LVIS(json_file) + if timer.seconds() > 1: + logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) + return lvis_api + + +def _add_categories_metadata(dataset_name: str) -> None: + metadict = get_lvis_instances_meta(dataset_name) + categories = metadict["thing_classes"] + metadata = MetadataCatalog.get(dataset_name) + metadata.categories = {i + 1: categories[i] for i in range(len(categories))} + logger = logging.getLogger(__name__) + logger.info(f"Dataset {dataset_name} has {len(categories)} categories") + + +def _verify_annotations_have_unique_ids(json_file: str, anns: List[List[Dict[str, Any]]]) -> None: + ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] + assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( + json_file + ) + + +def _maybe_add_bbox(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None: + if "bbox" not in ann_dict: + return + obj["bbox"] = ann_dict["bbox"] + obj["bbox_mode"] = BoxMode.XYWH_ABS + + +def _maybe_add_segm(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None: + if "segmentation" not in ann_dict: + return + segm = ann_dict["segmentation"] + if not isinstance(segm, dict): + # filter out invalid polygons (< 3 points) + segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] + if len(segm) == 0: + return + obj["segmentation"] = segm + + +def _maybe_add_keypoints(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None: + if "keypoints" not in ann_dict: + return + keypts = ann_dict["keypoints"] # list[int] + for idx, v in enumerate(keypts): + if idx % 3 != 2: + # COCO's segmentation coordinates are floating points in [0, H or W], + # but keypoint coordinates are integers in [0, H-1 or W-1] + # Therefore we assume the coordinates are "pixel indices" and + # add 0.5 to convert to floating point coordinates. + keypts[idx] = v + 0.5 + obj["keypoints"] = keypts + + +def _maybe_add_densepose(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None: + for key in DENSEPOSE_ALL_POSSIBLE_KEYS: + if key in ann_dict: + obj[key] = ann_dict[key] + + +def _combine_images_with_annotations( + dataset_name: str, + image_root: str, + img_datas: Iterable[Dict[str, Any]], + ann_datas: Iterable[Iterable[Dict[str, Any]]], +): + + dataset_dicts = [] + + def get_file_name(img_root, img_dict): + # Determine the path including the split folder ("train2017", "val2017", "test2017") from + # the coco_url field. Example: + # 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg' + split_folder, file_name = img_dict["coco_url"].split("/")[-2:] + return os.path.join(img_root + split_folder, file_name) + + for img_dict, ann_dicts in zip(img_datas, ann_datas): + record = {} + record["file_name"] = get_file_name(image_root, img_dict) + record["height"] = img_dict["height"] + record["width"] = img_dict["width"] + record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", []) + record["neg_category_ids"] = img_dict.get("neg_category_ids", []) + record["image_id"] = img_dict["id"] + record["dataset"] = dataset_name + + objs = [] + for ann_dict in ann_dicts: + assert ann_dict["image_id"] == record["image_id"] + obj = {} + _maybe_add_bbox(obj, ann_dict) + obj["iscrowd"] = ann_dict.get("iscrowd", 0) + obj["category_id"] = ann_dict["category_id"] + _maybe_add_segm(obj, ann_dict) + _maybe_add_keypoints(obj, ann_dict) + _maybe_add_densepose(obj, ann_dict) + objs.append(obj) + record["annotations"] = objs + dataset_dicts.append(record) + return dataset_dicts + + +def load_lvis_json(annotations_json_file: str, image_root: str, dataset_name: str): + """ + Loads a JSON file with annotations in LVIS instances format. + Replaces `detectron2.data.datasets.coco.load_lvis_json` to handle metadata + in a more flexible way. Postpones category mapping to a later stage to be + able to combine several datasets with different (but coherent) sets of + categories. + + Args: + + annotations_json_file: str + Path to the JSON file with annotations in COCO instances format. + image_root: str + directory that contains all the images + dataset_name: str + the name that identifies a dataset, e.g. "densepose_coco_2014_train" + extra_annotation_keys: Optional[List[str]] + If provided, these keys are used to extract additional data from + the annotations. + """ + lvis_api = _load_lvis_annotations(PathManager.get_local_path(annotations_json_file)) + + _add_categories_metadata(dataset_name) + + # sort indices for reproducible results + img_ids = sorted(lvis_api.imgs.keys()) + # imgs is a list of dicts, each looks something like: + # {'license': 4, + # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', + # 'file_name': 'COCO_val2014_000000001268.jpg', + # 'height': 427, + # 'width': 640, + # 'date_captured': '2013-11-17 05:57:24', + # 'id': 1268} + imgs = lvis_api.load_imgs(img_ids) + logger = logging.getLogger(__name__) + logger.info("Loaded {} images in LVIS format from {}".format(len(imgs), annotations_json_file)) + # anns is a list[list[dict]], where each dict is an annotation + # record for an object. The inner list enumerates the objects in an image + # and the outer list enumerates over images. + anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] + + _verify_annotations_have_unique_ids(annotations_json_file, anns) + dataset_records = _combine_images_with_annotations(dataset_name, image_root, imgs, anns) + return dataset_records + + +def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[str] = None) -> None: + """ + Registers provided LVIS DensePose dataset + + Args: + dataset_data: CocoDatasetInfo + Dataset data + datasets_root: Optional[str] + Datasets root folder (default: None) + """ + annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath) + images_root = maybe_prepend_base_path(datasets_root, dataset_data.images_root) + + def load_annotations(): + return load_lvis_json( + annotations_json_file=annotations_fpath, + image_root=images_root, + dataset_name=dataset_data.name, + ) + + DatasetCatalog.register(dataset_data.name, load_annotations) + MetadataCatalog.get(dataset_data.name).set( + json_file=annotations_fpath, + image_root=images_root, + evaluator_type="lvis", + **get_metadata(DENSEPOSE_METADATA_URL_PREFIX), + ) + + +def register_datasets( + datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[str] = None +) -> None: + """ + Registers provided LVIS DensePose datasets + + Args: + datasets_data: Iterable[CocoDatasetInfo] + An iterable of dataset datas + datasets_root: Optional[str] + Datasets root folder (default: None) + """ + for dataset_data in datasets_data: + register_dataset(dataset_data, datasets_root) diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/image_list_dataset.py b/data_processing/detectron2/projects/DensePose/densepose/data/image_list_dataset.py new file mode 100644 index 0000000..92a95d3 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/image_list_dataset.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +import numpy as np +from typing import Any, Callable, Dict, List, Optional, Union +import torch +from torch.utils.data.dataset import Dataset + +from detectron2.data.detection_utils import read_image + +ImageTransform = Callable[[torch.Tensor], torch.Tensor] + + +class ImageListDataset(Dataset): + """ + Dataset that provides images from a list. + """ + + _EMPTY_IMAGE = torch.empty((0, 3, 1, 1)) + + def __init__( + self, + image_list: List[str], + category_list: Union[str, List[str], None] = None, + transform: Optional[ImageTransform] = None, + ): + """ + Args: + image_list (List[str]): list of paths to image files + category_list (Union[str, List[str], None]): list of animal categories for + each image. If it is a string, or None, this applies to all images + """ + if type(category_list) == list: + self.category_list = category_list + else: + self.category_list = [category_list] * len(image_list) + assert len(image_list) == len( + self.category_list + ), "length of image and category lists must be equal" + self.image_list = image_list + self.transform = transform + + def __getitem__(self, idx: int) -> Dict[str, Any]: + """ + Gets selected images from the list + + Args: + idx (int): video index in the video list file + Returns: + A dictionary containing two keys: + images (torch.Tensor): tensor of size [N, 3, H, W] (N = 1, or 0 for _EMPTY_IMAGE) + categories (List[str]): categories of the frames + """ + categories = [self.category_list[idx]] + fpath = self.image_list[idx] + transform = self.transform + + try: + image = torch.from_numpy(np.ascontiguousarray(read_image(fpath, format="BGR"))) + image = image.permute(2, 0, 1).unsqueeze(0).float() # HWC -> NCHW + if transform is not None: + image = transform(image) + return {"images": image, "categories": categories} + except (OSError, RuntimeError) as e: + logger = logging.getLogger(__name__) + logger.warning(f"Error opening image file container {fpath}: {e}") + + return {"images": self._EMPTY_IMAGE, "categories": []} + + def __len__(self): + return len(self.image_list) diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/inference_based_loader.py b/data_processing/detectron2/projects/DensePose/densepose/data/inference_based_loader.py new file mode 100644 index 0000000..dde4c0f --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/inference_based_loader.py @@ -0,0 +1,173 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import random +from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple +import torch +from torch import nn + +SampledData = Any +ModelOutput = Any + + +def _grouper(iterable: Iterable[Any], n: int, fillvalue=None) -> Iterator[Tuple[Any]]: + """ + Group elements of an iterable by chunks of size `n`, e.g. + grouper(range(9), 4) -> + (0, 1, 2, 3), (4, 5, 6, 7), (8, None, None, None) + """ + it = iter(iterable) + while True: + values = [] + for _ in range(n): + try: + value = next(it) + except StopIteration: + if values: + values.extend([fillvalue] * (n - len(values))) + yield tuple(values) + return + values.append(value) + yield tuple(values) + + +class ScoreBasedFilter: + """ + Filters entries in model output based on their scores + Discards all entries with score less than the specified minimum + """ + + def __init__(self, min_score: float = 0.8): + self.min_score = min_score + + def __call__(self, model_output: ModelOutput) -> ModelOutput: + for model_output_i in model_output: + instances = model_output_i["instances"] + if not instances.has("scores"): + continue + print('in inference based loader') + instances_filtered = instances[instances.scores >= self.min_score] + model_output_i["instances"] = instances_filtered + return model_output + + +class InferenceBasedLoader: + """ + Data loader based on results inferred by a model. Consists of: + - a data loader that provides batches of images + - a model that is used to infer the results + - a data sampler that converts inferred results to annotations + """ + + def __init__( + self, + model: nn.Module, + data_loader: Iterable[List[Dict[str, Any]]], + data_sampler: Optional[Callable[[ModelOutput], List[SampledData]]] = None, + data_filter: Optional[Callable[[ModelOutput], ModelOutput]] = None, + shuffle: bool = True, + batch_size: int = 4, + inference_batch_size: int = 4, + drop_last: bool = False, + category_to_class_mapping: Optional[dict] = None, + ): + """ + Constructor + + Args: + model (torch.nn.Module): model used to produce data + data_loader (Iterable[List[Dict[str, Any]]]): iterable that provides + dictionaries with "images" and "categories" fields to perform inference on + data_sampler (Callable: ModelOutput -> SampledData): functor + that produces annotation data from inference results; + (optional, default: None) + data_filter (Callable: ModelOutput -> ModelOutput): filter + that selects model outputs for further processing + (optional, default: None) + shuffle (bool): if True, the input images get shuffled + batch_size (int): batch size for the produced annotation data + inference_batch_size (int): batch size for input images + drop_last (bool): if True, drop the last batch if it is undersized + category_to_class_mapping (dict): category to class mapping + """ + self.model = model + self.model.eval() + self.data_loader = data_loader + self.data_sampler = data_sampler + self.data_filter = data_filter + self.shuffle = shuffle + self.batch_size = batch_size + self.inference_batch_size = inference_batch_size + self.drop_last = drop_last + if category_to_class_mapping is not None: + self.category_to_class_mapping = category_to_class_mapping + else: + self.category_to_class_mapping = {} + + def __iter__(self) -> Iterator[List[SampledData]]: + for batch in self.data_loader: + # batch : List[Dict[str: Tensor[N, C, H, W], str: Optional[str]]] + # images_batch : Tensor[N, C, H, W] + # image : Tensor[C, H, W] + images_and_categories = [ + {"image": image, "category": category} + for element in batch + for image, category in zip(element["images"], element["categories"]) + ] + if not images_and_categories: + continue + if self.shuffle: + random.shuffle(images_and_categories) + yield from self._produce_data(images_and_categories) # pyre-ignore[6] + + def _produce_data( + self, images_and_categories: List[Tuple[torch.Tensor, Optional[str]]] + ) -> Iterator[List[SampledData]]: + """ + Produce batches of data from images + + Args: + images_and_categories (List[Tuple[torch.Tensor, Optional[str]]]): + list of images and corresponding categories to process + + Returns: + Iterator over batches of data sampled from model outputs + """ + data_batches: List[SampledData] = [] + category_to_class_mapping = self.category_to_class_mapping + batched_images_and_categories = _grouper(images_and_categories, self.inference_batch_size) + for batch in batched_images_and_categories: + batch = [ + { + "image": image_and_category["image"].to(self.model.device), + "category": image_and_category["category"], + } + for image_and_category in batch + if image_and_category is not None + ] + if not batch: + continue + with torch.no_grad(): + model_output = self.model(batch) + for model_output_i, batch_i in zip(model_output, batch): + assert len(batch_i["image"].shape) == 3 + model_output_i["image"] = batch_i["image"] + instance_class = category_to_class_mapping.get(batch_i["category"], 0) + model_output_i["instances"].dataset_classes = torch.tensor( + [instance_class] * len(model_output_i["instances"]) + ) + model_output_filtered = ( + model_output if self.data_filter is None else self.data_filter(model_output) + ) + data = ( + model_output_filtered + if self.data_sampler is None + else self.data_sampler(model_output_filtered) + ) + for data_i in data: + if len(data_i["instances"]): + data_batches.append(data_i) + if len(data_batches) >= self.batch_size: + yield data_batches[: self.batch_size] + data_batches = data_batches[self.batch_size :] + if not self.drop_last and data_batches: + yield data_batches diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/meshes/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/data/meshes/__init__.py new file mode 100644 index 0000000..1e1f0d5 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/meshes/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from . import builtin + +__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")] diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/meshes/builtin.py b/data_processing/detectron2/projects/DensePose/densepose/data/meshes/builtin.py new file mode 100644 index 0000000..c0b2376 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/meshes/builtin.py @@ -0,0 +1,101 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from .catalog import MeshInfo, register_meshes + +DENSEPOSE_MESHES_DIR = "https://dl.fbaipublicfiles.com/densepose/meshes/" + +MESHES = [ + MeshInfo( + name="smpl_27554", + data="smpl_27554.pkl", + geodists="geodists/geodists_smpl_27554.pkl", + symmetry="symmetry/symmetry_smpl_27554.pkl", + texcoords="texcoords/texcoords_smpl_27554.pkl", + ), + MeshInfo( + name="chimp_5029", + data="chimp_5029.pkl", + geodists="geodists/geodists_chimp_5029.pkl", + symmetry="symmetry/symmetry_chimp_5029.pkl", + texcoords="texcoords/texcoords_chimp_5029.pkl", + ), + MeshInfo( + name="cat_5001", + data="cat_5001.pkl", + geodists="geodists/geodists_cat_5001.pkl", + symmetry="symmetry/symmetry_cat_5001.pkl", + texcoords="texcoords/texcoords_cat_5001.pkl", + ), + MeshInfo( + name="cat_7466", + data="cat_7466.pkl", + geodists="geodists/geodists_cat_7466.pkl", + symmetry="symmetry/symmetry_cat_7466.pkl", + texcoords="texcoords/texcoords_cat_7466.pkl", + ), + MeshInfo( + name="sheep_5004", + data="sheep_5004.pkl", + geodists="geodists/geodists_sheep_5004.pkl", + symmetry="symmetry/symmetry_sheep_5004.pkl", + texcoords="texcoords/texcoords_sheep_5004.pkl", + ), + MeshInfo( + name="zebra_5002", + data="zebra_5002.pkl", + geodists="geodists/geodists_zebra_5002.pkl", + symmetry="symmetry/symmetry_zebra_5002.pkl", + texcoords="texcoords/texcoords_zebra_5002.pkl", + ), + MeshInfo( + name="horse_5004", + data="horse_5004.pkl", + geodists="geodists/geodists_horse_5004.pkl", + symmetry="symmetry/symmetry_horse_5004.pkl", + texcoords="texcoords/texcoords_zebra_5002.pkl", + ), + MeshInfo( + name="giraffe_5002", + data="giraffe_5002.pkl", + geodists="geodists/geodists_giraffe_5002.pkl", + symmetry="symmetry/symmetry_giraffe_5002.pkl", + texcoords="texcoords/texcoords_giraffe_5002.pkl", + ), + MeshInfo( + name="elephant_5002", + data="elephant_5002.pkl", + geodists="geodists/geodists_elephant_5002.pkl", + symmetry="symmetry/symmetry_elephant_5002.pkl", + texcoords="texcoords/texcoords_elephant_5002.pkl", + ), + MeshInfo( + name="dog_5002", + data="dog_5002.pkl", + geodists="geodists/geodists_dog_5002.pkl", + symmetry="symmetry/symmetry_dog_5002.pkl", + texcoords="texcoords/texcoords_dog_5002.pkl", + ), + MeshInfo( + name="dog_7466", + data="dog_7466.pkl", + geodists="geodists/geodists_dog_7466.pkl", + symmetry="symmetry/symmetry_dog_7466.pkl", + texcoords="texcoords/texcoords_dog_7466.pkl", + ), + MeshInfo( + name="cow_5002", + data="cow_5002.pkl", + geodists="geodists/geodists_cow_5002.pkl", + symmetry="symmetry/symmetry_cow_5002.pkl", + texcoords="texcoords/texcoords_cow_5002.pkl", + ), + MeshInfo( + name="bear_4936", + data="bear_4936.pkl", + geodists="geodists/geodists_bear_4936.pkl", + symmetry="symmetry/symmetry_bear_4936.pkl", + texcoords="texcoords/texcoords_bear_4936.pkl", + ), +] + +register_meshes(MESHES, DENSEPOSE_MESHES_DIR) diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/meshes/catalog.py b/data_processing/detectron2/projects/DensePose/densepose/data/meshes/catalog.py new file mode 100644 index 0000000..b258f3c --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/meshes/catalog.py @@ -0,0 +1,71 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import logging +from collections import UserDict +from dataclasses import dataclass +from typing import Iterable, Optional + +from ..utils import maybe_prepend_base_path + + +@dataclass +class MeshInfo: + name: str + data: str + geodists: Optional[str] = None + symmetry: Optional[str] = None + texcoords: Optional[str] = None + + +class _MeshCatalog(UserDict): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.mesh_ids = {} + self.mesh_names = {} + self.max_mesh_id = -1 + + def __setitem__(self, key, value): + if key in self: + logger = logging.getLogger(__name__) + logger.warning( + f"Overwriting mesh catalog entry '{key}': old value {self[key]}" + f", new value {value}" + ) + mesh_id = self.mesh_ids[key] + else: + self.max_mesh_id += 1 + mesh_id = self.max_mesh_id + super().__setitem__(key, value) + self.mesh_ids[key] = mesh_id + self.mesh_names[mesh_id] = key + + def get_mesh_id(self, shape_name: str) -> int: + return self.mesh_ids[shape_name] + + def get_mesh_name(self, mesh_id: int) -> str: + return self.mesh_names[mesh_id] + + +MeshCatalog = _MeshCatalog() + + +def register_mesh(mesh_info: MeshInfo, base_path: Optional[str]) -> None: + geodists, symmetry, texcoords = mesh_info.geodists, mesh_info.symmetry, mesh_info.texcoords + if geodists: + geodists = maybe_prepend_base_path(base_path, geodists) + if symmetry: + symmetry = maybe_prepend_base_path(base_path, symmetry) + if texcoords: + texcoords = maybe_prepend_base_path(base_path, texcoords) + MeshCatalog[mesh_info.name] = MeshInfo( + name=mesh_info.name, + data=maybe_prepend_base_path(base_path, mesh_info.data), + geodists=geodists, + symmetry=symmetry, + texcoords=texcoords, + ) + + +def register_meshes(mesh_infos: Iterable[MeshInfo], base_path: Optional[str]) -> None: + for mesh_info in mesh_infos: + register_mesh(mesh_info, base_path) diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/samplers/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/__init__.py new file mode 100644 index 0000000..7dba87e --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .densepose_uniform import DensePoseUniformSampler +from .densepose_confidence_based import DensePoseConfidenceBasedSampler +from .densepose_cse_uniform import DensePoseCSEUniformSampler +from .densepose_cse_confidence_based import DensePoseCSEConfidenceBasedSampler +from .mask_from_densepose import MaskFromDensePoseSampler +from .prediction_to_gt import PredictionToGroundTruthSampler diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_base.py b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_base.py new file mode 100644 index 0000000..4d499d8 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_base.py @@ -0,0 +1,203 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any, Dict, List, Tuple +import torch +from torch.nn import functional as F + +from detectron2.structures import BoxMode, Instances + +from densepose.converters import ToChartResultConverter +from densepose.converters.base import IntTupleBox, make_int_box +from densepose.structures import DensePoseDataRelative, DensePoseList + + +class DensePoseBaseSampler: + """ + Base DensePose sampler to produce DensePose data from DensePose predictions. + Samples for each class are drawn according to some distribution over all pixels estimated + to belong to that class. + """ + + def __init__(self, count_per_class: int = 8): + """ + Constructor + + Args: + count_per_class (int): the sampler produces at most `count_per_class` + samples for each category + """ + self.count_per_class = count_per_class + + def __call__(self, instances: Instances) -> DensePoseList: + """ + Convert DensePose predictions (an instance of `DensePoseChartPredictorOutput`) + into DensePose annotations data (an instance of `DensePoseList`) + """ + boxes_xyxy_abs = instances.pred_boxes.tensor.clone().cpu() + boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + dp_datas = [] + for i in range(len(boxes_xywh_abs)): + annotation_i = self._sample(instances[i], make_int_box(boxes_xywh_abs[i])) + annotation_i[DensePoseDataRelative.S_KEY] = self._resample_mask( # pyre-ignore[6] + instances[i].pred_densepose + ) + dp_datas.append(DensePoseDataRelative(annotation_i)) + # create densepose annotations on CPU + dp_list = DensePoseList(dp_datas, boxes_xyxy_abs, instances.image_size) + return dp_list + + def _sample(self, instance: Instances, bbox_xywh: IntTupleBox) -> Dict[str, List[Any]]: + """ + Sample DensPoseDataRelative from estimation results + """ + labels, dp_result = self._produce_labels_and_results(instance) + annotation = { + DensePoseDataRelative.X_KEY: [], + DensePoseDataRelative.Y_KEY: [], + DensePoseDataRelative.U_KEY: [], + DensePoseDataRelative.V_KEY: [], + DensePoseDataRelative.I_KEY: [], + } + n, h, w = dp_result.shape + for part_id in range(1, DensePoseDataRelative.N_PART_LABELS + 1): + # indices - tuple of 3 1D tensors of size k + # 0: index along the first dimension N + # 1: index along H dimension + # 2: index along W dimension + indices = torch.nonzero(labels.expand(n, h, w) == part_id, as_tuple=True) + # values - an array of size [n, k] + # n: number of channels (U, V, confidences) + # k: number of points labeled with part_id + values = dp_result[indices].view(n, -1) + k = values.shape[1] + count = min(self.count_per_class, k) + if count <= 0: + continue + index_sample = self._produce_index_sample(values, count) + sampled_values = values[:, index_sample] + sampled_y = indices[1][index_sample] + 0.5 + sampled_x = indices[2][index_sample] + 0.5 + # prepare / normalize data + x = (sampled_x / w * 256.0).cpu().tolist() + y = (sampled_y / h * 256.0).cpu().tolist() + u = sampled_values[0].clamp(0, 1).cpu().tolist() + v = sampled_values[1].clamp(0, 1).cpu().tolist() + fine_segm_labels = [part_id] * count + # extend annotations + annotation[DensePoseDataRelative.X_KEY].extend(x) + annotation[DensePoseDataRelative.Y_KEY].extend(y) + annotation[DensePoseDataRelative.U_KEY].extend(u) + annotation[DensePoseDataRelative.V_KEY].extend(v) + annotation[DensePoseDataRelative.I_KEY].extend(fine_segm_labels) + return annotation + + def _produce_index_sample(self, values: torch.Tensor, count: int): + """ + Abstract method to produce a sample of indices to select data + To be implemented in descendants + + Args: + values (torch.Tensor): an array of size [n, k] that contains + estimated values (U, V, confidences); + n: number of channels (U, V, confidences) + k: number of points labeled with part_id + count (int): number of samples to produce, should be positive and <= k + + Return: + list(int): indices of values (along axis 1) selected as a sample + """ + raise NotImplementedError + + def _produce_labels_and_results(self, instance: Instances) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Method to get labels and DensePose results from an instance + + Args: + instance (Instances): an instance of `DensePoseChartPredictorOutput` + + Return: + labels (torch.Tensor): shape [H, W], DensePose segmentation labels + dp_result (torch.Tensor): shape [2, H, W], stacked DensePose results u and v + """ + converter = ToChartResultConverter + chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes) + labels, dp_result = chart_result.labels.cpu(), chart_result.uv.cpu() + return labels, dp_result + + def _resample_mask(self, output: Any) -> torch.Tensor: + """ + Convert DensePose predictor output to segmentation annotation - tensors of size + (256, 256) and type `int64`. + + Args: + output: DensePose predictor output with the following attributes: + - coarse_segm: tensor of size [N, D, H, W] with unnormalized coarse + segmentation scores + - fine_segm: tensor of size [N, C, H, W] with unnormalized fine + segmentation scores + Return: + Tensor of size (S, S) and type `int64` with coarse segmentation annotations, + where S = DensePoseDataRelative.MASK_SIZE + """ + sz = DensePoseDataRelative.MASK_SIZE + S = ( + F.interpolate(output.coarse_segm, (sz, sz), mode="bilinear", align_corners=False) + .argmax(dim=1) + .long() + ) + I = ( + ( + F.interpolate( + output.fine_segm, + (sz, sz), + mode="bilinear", + align_corners=False, + ).argmax(dim=1) + * (S > 0).long() + ) + .squeeze() + .cpu() + ) + # Map fine segmentation results to coarse segmentation ground truth + # TODO: extract this into separate classes + # coarse segmentation: 1 = Torso, 2 = Right Hand, 3 = Left Hand, + # 4 = Left Foot, 5 = Right Foot, 6 = Upper Leg Right, 7 = Upper Leg Left, + # 8 = Lower Leg Right, 9 = Lower Leg Left, 10 = Upper Arm Left, + # 11 = Upper Arm Right, 12 = Lower Arm Left, 13 = Lower Arm Right, + # 14 = Head + # fine segmentation: 1, 2 = Torso, 3 = Right Hand, 4 = Left Hand, + # 5 = Left Foot, 6 = Right Foot, 7, 9 = Upper Leg Right, + # 8, 10 = Upper Leg Left, 11, 13 = Lower Leg Right, + # 12, 14 = Lower Leg Left, 15, 17 = Upper Arm Left, + # 16, 18 = Upper Arm Right, 19, 21 = Lower Arm Left, + # 20, 22 = Lower Arm Right, 23, 24 = Head + FINE_TO_COARSE_SEGMENTATION = { + 1: 1, + 2: 1, + 3: 2, + 4: 3, + 5: 4, + 6: 5, + 7: 6, + 8: 7, + 9: 6, + 10: 7, + 11: 8, + 12: 9, + 13: 8, + 14: 9, + 15: 10, + 16: 11, + 17: 10, + 18: 11, + 19: 12, + 20: 13, + 21: 12, + 22: 13, + 23: 14, + 24: 14, + } + mask = torch.zeros((sz, sz), dtype=torch.int64, device=torch.device("cpu")) + for i in range(DensePoseDataRelative.N_PART_LABELS): + mask[I == i + 1] = FINE_TO_COARSE_SEGMENTATION[i + 1] + return mask diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_confidence_based.py b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_confidence_based.py new file mode 100644 index 0000000..48e325b --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_confidence_based.py @@ -0,0 +1,108 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import random +from typing import Optional, Tuple +import torch + +from densepose.converters import ToChartResultConverterWithConfidences + +from .densepose_base import DensePoseBaseSampler + + +class DensePoseConfidenceBasedSampler(DensePoseBaseSampler): + """ + Samples DensePose data from DensePose predictions. + Samples for each class are drawn using confidence value estimates. + """ + + def __init__( + self, + confidence_channel: str, + count_per_class: int = 8, + search_count_multiplier: Optional[float] = None, + search_proportion: Optional[float] = None, + ): + """ + Constructor + + Args: + confidence_channel (str): confidence channel to use for sampling; + possible values: + "sigma_2": confidences for UV values + "fine_segm_confidence": confidences for fine segmentation + "coarse_segm_confidence": confidences for coarse segmentation + (default: "sigma_2") + count_per_class (int): the sampler produces at most `count_per_class` + samples for each category (default: 8) + search_count_multiplier (float or None): if not None, the total number + of the most confident estimates of a given class to consider is + defined as `min(search_count_multiplier * count_per_class, N)`, + where `N` is the total number of estimates of the class; cannot be + specified together with `search_proportion` (default: None) + search_proportion (float or None): if not None, the total number of the + of the most confident estimates of a given class to consider is + defined as `min(max(search_proportion * N, count_per_class), N)`, + where `N` is the total number of estimates of the class; cannot be + specified together with `search_count_multiplier` (default: None) + """ + super().__init__(count_per_class) + self.confidence_channel = confidence_channel + self.search_count_multiplier = search_count_multiplier + self.search_proportion = search_proportion + assert (search_count_multiplier is None) or (search_proportion is None), ( + f"Cannot specify both search_count_multiplier (={search_count_multiplier})" + f"and search_proportion (={search_proportion})" + ) + + def _produce_index_sample(self, values: torch.Tensor, count: int): + """ + Produce a sample of indices to select data based on confidences + + Args: + values (torch.Tensor): an array of size [n, k] that contains + estimated values (U, V, confidences); + n: number of channels (U, V, confidences) + k: number of points labeled with part_id + count (int): number of samples to produce, should be positive and <= k + + Return: + list(int): indices of values (along axis 1) selected as a sample + """ + k = values.shape[1] + if k == count: + index_sample = list(range(k)) + else: + # take the best count * search_count_multiplier pixels, + # sample from them uniformly + # (here best = smallest variance) + _, sorted_confidence_indices = torch.sort(values[2]) + if self.search_count_multiplier is not None: + search_count = min(int(count * self.search_count_multiplier), k) + elif self.search_proportion is not None: + search_count = min(max(int(k * self.search_proportion), count), k) + else: + search_count = min(count, k) + sample_from_top = random.sample(range(search_count), count) + index_sample = sorted_confidence_indices[:search_count][sample_from_top] + return index_sample + + def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Method to get labels and DensePose results from an instance, with confidences + + Args: + instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences` + + Return: + labels (torch.Tensor): shape [H, W], DensePose segmentation labels + dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v + stacked with the confidence channel + """ + converter = ToChartResultConverterWithConfidences + chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes) + labels, dp_result = chart_result.labels.cpu(), chart_result.uv.cpu() + dp_result = torch.cat( + (dp_result, getattr(chart_result, self.confidence_channel)[None].cpu()) + ) + + return labels, dp_result diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_cse_base.py b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_cse_base.py new file mode 100644 index 0000000..845545c --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_cse_base.py @@ -0,0 +1,139 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any, Dict, List, Tuple +import torch +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.structures import Instances + +from densepose.converters.base import IntTupleBox +from densepose.data.utils import get_class_to_mesh_name_mapping +from densepose.modeling.cse.utils import squared_euclidean_distance_matrix +from densepose.structures import DensePoseDataRelative + +from .densepose_base import DensePoseBaseSampler + + +class DensePoseCSEBaseSampler(DensePoseBaseSampler): + """ + Base DensePose sampler to produce DensePose data from DensePose predictions. + Samples for each class are drawn according to some distribution over all pixels estimated + to belong to that class. + """ + + def __init__( + self, + cfg: CfgNode, + use_gt_categories: bool, + embedder: torch.nn.Module, + count_per_class: int = 8, + ): + """ + Constructor + + Args: + cfg (CfgNode): the config of the model + embedder (torch.nn.Module): necessary to compute mesh vertex embeddings + count_per_class (int): the sampler produces at most `count_per_class` + samples for each category + """ + super().__init__(count_per_class) + self.embedder = embedder + self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg) + self.use_gt_categories = use_gt_categories + + def _sample(self, instance: Instances, bbox_xywh: IntTupleBox) -> Dict[str, List[Any]]: + """ + Sample DensPoseDataRelative from estimation results + """ + if self.use_gt_categories: + instance_class = instance.dataset_classes.tolist()[0] + else: + instance_class = instance.pred_classes.tolist()[0] + mesh_name = self.class_to_mesh_name[instance_class] + + annotation = { + DensePoseDataRelative.X_KEY: [], + DensePoseDataRelative.Y_KEY: [], + DensePoseDataRelative.VERTEX_IDS_KEY: [], + DensePoseDataRelative.MESH_NAME_KEY: mesh_name, + } + + mask, embeddings, other_values = self._produce_mask_and_results(instance, bbox_xywh) + indices = torch.nonzero(mask, as_tuple=True) + selected_embeddings = embeddings.permute(1, 2, 0)[indices].cpu() + values = other_values[:, indices[0], indices[1]] + k = values.shape[1] + + count = min(self.count_per_class, k) + if count <= 0: + return annotation + + index_sample = self._produce_index_sample(values, count) + closest_vertices = squared_euclidean_distance_matrix( + selected_embeddings[index_sample], self.embedder(mesh_name) + ) + closest_vertices = torch.argmin(closest_vertices, dim=1) + + sampled_y = indices[0][index_sample] + 0.5 + sampled_x = indices[1][index_sample] + 0.5 + # prepare / normalize data + _, _, w, h = bbox_xywh + x = (sampled_x / w * 256.0).cpu().tolist() + y = (sampled_y / h * 256.0).cpu().tolist() + # extend annotations + annotation[DensePoseDataRelative.X_KEY].extend(x) + annotation[DensePoseDataRelative.Y_KEY].extend(y) + annotation[DensePoseDataRelative.VERTEX_IDS_KEY].extend(closest_vertices.cpu().tolist()) + return annotation + + def _produce_mask_and_results( + self, instance: Instances, bbox_xywh: IntTupleBox + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Method to get labels and DensePose results from an instance + + Args: + instance (Instances): an instance of `DensePoseEmbeddingPredictorOutput` + bbox_xywh (IntTupleBox): the corresponding bounding box + + Return: + mask (torch.Tensor): shape [H, W], DensePose segmentation mask + embeddings (Tuple[torch.Tensor]): a tensor of shape [D, H, W], + DensePose CSE Embeddings + other_values (Tuple[torch.Tensor]): a tensor of shape [0, H, W], + for potential other values + """ + densepose_output = instance.pred_densepose + S = densepose_output.coarse_segm + E = densepose_output.embedding + _, _, w, h = bbox_xywh + embeddings = F.interpolate(E, size=(h, w), mode="bilinear")[0] + coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0] + mask = coarse_segm_resized.argmax(0) > 0 + other_values = torch.empty((0, h, w), device=E.device) + return mask, embeddings, other_values + + def _resample_mask(self, output: Any) -> torch.Tensor: + """ + Convert DensePose predictor output to segmentation annotation - tensors of size + (256, 256) and type `int64`. + + Args: + output: DensePose predictor output with the following attributes: + - coarse_segm: tensor of size [N, D, H, W] with unnormalized coarse + segmentation scores + Return: + Tensor of size (S, S) and type `int64` with coarse segmentation annotations, + where S = DensePoseDataRelative.MASK_SIZE + """ + sz = DensePoseDataRelative.MASK_SIZE + mask = ( + F.interpolate(output.coarse_segm, (sz, sz), mode="bilinear", align_corners=False) + .argmax(dim=1) + .long() + .squeeze() + .cpu() + ) + return mask diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_cse_confidence_based.py b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_cse_confidence_based.py new file mode 100644 index 0000000..964b7f4 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_cse_confidence_based.py @@ -0,0 +1,119 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import random +from typing import Optional, Tuple +import torch +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.structures import Instances + +from densepose.converters.base import IntTupleBox + +from .densepose_cse_base import DensePoseCSEBaseSampler + + +class DensePoseCSEConfidenceBasedSampler(DensePoseCSEBaseSampler): + """ + Samples DensePose data from DensePose predictions. + Samples for each class are drawn using confidence value estimates. + """ + + def __init__( + self, + cfg: CfgNode, + use_gt_categories: bool, + embedder: torch.nn.Module, + confidence_channel: str, + count_per_class: int = 8, + search_count_multiplier: Optional[float] = None, + search_proportion: Optional[float] = None, + ): + """ + Constructor + + Args: + cfg (CfgNode): the config of the model + embedder (torch.nn.Module): necessary to compute mesh vertex embeddings + confidence_channel (str): confidence channel to use for sampling; + possible values: + "coarse_segm_confidence": confidences for coarse segmentation + (default: "coarse_segm_confidence") + count_per_class (int): the sampler produces at most `count_per_class` + samples for each category (default: 8) + search_count_multiplier (float or None): if not None, the total number + of the most confident estimates of a given class to consider is + defined as `min(search_count_multiplier * count_per_class, N)`, + where `N` is the total number of estimates of the class; cannot be + specified together with `search_proportion` (default: None) + search_proportion (float or None): if not None, the total number of the + of the most confident estimates of a given class to consider is + defined as `min(max(search_proportion * N, count_per_class), N)`, + where `N` is the total number of estimates of the class; cannot be + specified together with `search_count_multiplier` (default: None) + """ + super().__init__(cfg, use_gt_categories, embedder, count_per_class) + self.confidence_channel = confidence_channel + self.search_count_multiplier = search_count_multiplier + self.search_proportion = search_proportion + assert (search_count_multiplier is None) or (search_proportion is None), ( + f"Cannot specify both search_count_multiplier (={search_count_multiplier})" + f"and search_proportion (={search_proportion})" + ) + + def _produce_index_sample(self, values: torch.Tensor, count: int): + """ + Produce a sample of indices to select data based on confidences + + Args: + values (torch.Tensor): a tensor of length k that contains confidences + k: number of points labeled with part_id + count (int): number of samples to produce, should be positive and <= k + + Return: + list(int): indices of values (along axis 1) selected as a sample + """ + k = values.shape[1] + if k == count: + index_sample = list(range(k)) + else: + # take the best count * search_count_multiplier pixels, + # sample from them uniformly + # (here best = smallest variance) + _, sorted_confidence_indices = torch.sort(values[0]) + if self.search_count_multiplier is not None: + search_count = min(int(count * self.search_count_multiplier), k) + elif self.search_proportion is not None: + search_count = min(max(int(k * self.search_proportion), count), k) + else: + search_count = min(count, k) + sample_from_top = random.sample(range(search_count), count) + index_sample = sorted_confidence_indices[-search_count:][sample_from_top] + return index_sample + + def _produce_mask_and_results( + self, instance: Instances, bbox_xywh: IntTupleBox + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Method to get labels and DensePose results from an instance + + Args: + instance (Instances): an instance of + `DensePoseEmbeddingPredictorOutputWithConfidences` + bbox_xywh (IntTupleBox): the corresponding bounding box + + Return: + mask (torch.Tensor): shape [H, W], DensePose segmentation mask + embeddings (Tuple[torch.Tensor]): a tensor of shape [D, H, W] + DensePose CSE Embeddings + other_values: a tensor of shape [1, H, W], DensePose CSE confidence + """ + _, _, w, h = bbox_xywh + densepose_output = instance.pred_densepose + mask, embeddings, _ = super()._produce_mask_and_results(instance, bbox_xywh) + other_values = F.interpolate( + getattr(densepose_output, self.confidence_channel), + size=(h, w), + mode="bilinear", + )[0].cpu() + return mask, embeddings, other_values diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_cse_uniform.py b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_cse_uniform.py new file mode 100644 index 0000000..567636c --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_cse_uniform.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .densepose_cse_base import DensePoseCSEBaseSampler +from .densepose_uniform import DensePoseUniformSampler + + +class DensePoseCSEUniformSampler(DensePoseCSEBaseSampler, DensePoseUniformSampler): + """ + Uniform Sampler for CSE + """ + + pass diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_uniform.py b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_uniform.py new file mode 100644 index 0000000..0d72cc3 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/densepose_uniform.py @@ -0,0 +1,41 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import random +import torch + +from .densepose_base import DensePoseBaseSampler + + +class DensePoseUniformSampler(DensePoseBaseSampler): + """ + Samples DensePose data from DensePose predictions. + Samples for each class are drawn uniformly over all pixels estimated + to belong to that class. + """ + + def __init__(self, count_per_class: int = 8): + """ + Constructor + + Args: + count_per_class (int): the sampler produces at most `count_per_class` + samples for each category + """ + super().__init__(count_per_class) + + def _produce_index_sample(self, values: torch.Tensor, count: int): + """ + Produce a uniform sample of indices to select data + + Args: + values (torch.Tensor): an array of size [n, k] that contains + estimated values (U, V, confidences); + n: number of channels (U, V, confidences) + k: number of points labeled with part_id + count (int): number of samples to produce, should be positive and <= k + + Return: + list(int): indices of values (along axis 1) selected as a sample + """ + k = values.shape[1] + return random.sample(range(k), count) diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/samplers/mask_from_densepose.py b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/mask_from_densepose.py new file mode 100644 index 0000000..0e6e812 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/mask_from_densepose.py @@ -0,0 +1,28 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from detectron2.structures import BitMasks, Instances + +from densepose.converters import ToMaskConverter + + +class MaskFromDensePoseSampler: + """ + Produce mask GT from DensePose predictions + This sampler simply converts DensePose predictions to BitMasks + that a contain a bool tensor of the size of the input image + """ + + def __call__(self, instances: Instances) -> BitMasks: + """ + Converts predicted data from `instances` into the GT mask data + + Args: + instances (Instances): predicted results, expected to have `pred_densepose` field + + Returns: + Boolean Tensor of the size of the input image that has non-zero + values at pixels that are estimated to belong to the detected object + """ + return ToMaskConverter.convert( + instances.pred_densepose, instances.pred_boxes, instances.image_size + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/samplers/prediction_to_gt.py b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/prediction_to_gt.py new file mode 100644 index 0000000..3881fa5 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/samplers/prediction_to_gt.py @@ -0,0 +1,98 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional + +from detectron2.structures import Instances + +ModelOutput = Dict[str, Any] +SampledData = Dict[str, Any] + + +@dataclass +class _Sampler: + """ + Sampler registry entry that contains: + - src (str): source field to sample from (deleted after sampling) + - dst (Optional[str]): destination field to sample to, if not None + - func (Optional[Callable: Any -> Any]): function that performs sampling, + if None, reference copy is performed + """ + + src: str + dst: Optional[str] + func: Optional[Callable[[Any], Any]] + + +class PredictionToGroundTruthSampler: + """ + Sampler implementation that converts predictions to GT using registered + samplers for different fields of `Instances`. + """ + + def __init__(self, dataset_name: str = ""): + self.dataset_name = dataset_name + self._samplers = {} + self.register_sampler("pred_boxes", "gt_boxes", None) + self.register_sampler("pred_classes", "gt_classes", None) + # delete scores + self.register_sampler("scores") + + def __call__(self, model_output: List[ModelOutput]) -> List[SampledData]: + """ + Transform model output into ground truth data through sampling + + Args: + model_output (Dict[str, Any]): model output + Returns: + Dict[str, Any]: sampled data + """ + for model_output_i in model_output: + instances: Instances = model_output_i["instances"] + # transform data in each field + for _, sampler in self._samplers.items(): + if not instances.has(sampler.src) or sampler.dst is None: + continue + if sampler.func is None: + instances.set(sampler.dst, instances.get(sampler.src)) + else: + instances.set(sampler.dst, sampler.func(instances)) + # delete model output data that was transformed + for _, sampler in self._samplers.items(): + if sampler.src != sampler.dst and instances.has(sampler.src): + instances.remove(sampler.src) + model_output_i["dataset"] = self.dataset_name + return model_output + + def register_sampler( + self, + prediction_attr: str, + gt_attr: Optional[str] = None, + func: Optional[Callable[[Any], Any]] = None, + ): + """ + Register sampler for a field + + Args: + prediction_attr (str): field to replace with a sampled value + gt_attr (Optional[str]): field to store the sampled value to, if not None + func (Optional[Callable: Any -> Any]): sampler function + """ + self._samplers[(prediction_attr, gt_attr)] = _Sampler( + src=prediction_attr, dst=gt_attr, func=func + ) + + def remove_sampler( + self, + prediction_attr: str, + gt_attr: Optional[str] = None, + ): + """ + Remove sampler for a field + + Args: + prediction_attr (str): field to replace with a sampled value + gt_attr (Optional[str]): field to store the sampled value to, if not None + """ + assert (prediction_attr, gt_attr) in self._samplers + del self._samplers[(prediction_attr, gt_attr)] diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/transform/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/data/transform/__init__.py new file mode 100644 index 0000000..369e1b2 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/transform/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .image import ImageResizeTransform diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/transform/image.py b/data_processing/detectron2/projects/DensePose/densepose/data/transform/image.py new file mode 100644 index 0000000..8139b67 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/transform/image.py @@ -0,0 +1,39 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import torch + + +class ImageResizeTransform: + """ + Transform that resizes images loaded from a dataset + (BGR data in NCHW channel order, typically uint8) to a format ready to be + consumed by DensePose training (BGR float32 data in NCHW channel order) + """ + + def __init__(self, min_size: int = 800, max_size: int = 1333): + self.min_size = min_size + self.max_size = max_size + + def __call__(self, images: torch.Tensor) -> torch.Tensor: + """ + Args: + images (torch.Tensor): tensor of size [N, 3, H, W] that contains + BGR data (typically in uint8) + Returns: + images (torch.Tensor): tensor of size [N, 3, H1, W1] where + H1 and W1 are chosen to respect the specified min and max sizes + and preserve the original aspect ratio, the data channels + follow BGR order and the data type is `torch.float32` + """ + # resize with min size + images = images.float() + min_size = min(images.shape[-2:]) + max_size = max(images.shape[-2:]) + scale = min(self.min_size / min_size, self.max_size / max_size) + images = torch.nn.functional.interpolate( + images, + scale_factor=scale, + mode="bilinear", + align_corners=False, + ) + return images diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/utils.py b/data_processing/detectron2/projects/DensePose/densepose/data/utils.py new file mode 100644 index 0000000..9878c31 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/utils.py @@ -0,0 +1,38 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import os +from typing import Dict, Optional + +from detectron2.config import CfgNode + + +def is_relative_local_path(path: str) -> bool: + path_str = os.fsdecode(path) + return ("://" not in path_str) and not os.path.isabs(path) + + +def maybe_prepend_base_path(base_path: Optional[str], path: str): + """ + Prepends the provided path with a base path prefix if: + 1) base path is not None; + 2) path is a local path + """ + if base_path is None: + return path + if is_relative_local_path(path): + return os.path.join(base_path, path) + return path + + +def get_class_to_mesh_name_mapping(cfg: CfgNode) -> Dict[int, str]: + return { + int(class_id): mesh_name + for class_id, mesh_name in cfg.DATASETS.CLASS_TO_MESH_NAME_MAPPING.items() + } + + +def get_category_to_class_mapping(dataset_cfg: CfgNode) -> Dict[str, int]: + return { + category: int(class_id) + for category, class_id in dataset_cfg.CATEGORY_TO_CLASS_MAPPING.items() + } diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/video/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/data/video/__init__.py new file mode 100644 index 0000000..72406e1 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/video/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .frame_selector import ( + FrameSelectionStrategy, + RandomKFramesSelector, + FirstKFramesSelector, + LastKFramesSelector, + FrameTsList, + FrameSelector, +) + +from .video_keyframe_dataset import ( + VideoKeyframeDataset, + video_list_from_file, + list_keyframes, + read_keyframes, +) diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/video/frame_selector.py b/data_processing/detectron2/projects/DensePose/densepose/data/video/frame_selector.py new file mode 100644 index 0000000..c28f0e9 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/video/frame_selector.py @@ -0,0 +1,87 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import random +from collections.abc import Callable +from enum import Enum +from typing import Callable as TCallable +from typing import List + +FrameTsList = List[int] +FrameSelector = TCallable[[FrameTsList], FrameTsList] + + +class FrameSelectionStrategy(Enum): + """ + Frame selection strategy used with videos: + - "random_k": select k random frames + - "first_k": select k first frames + - "last_k": select k last frames + - "all": select all frames + """ + + # fmt: off + RANDOM_K = "random_k" + FIRST_K = "first_k" + LAST_K = "last_k" + ALL = "all" + # fmt: on + + +class RandomKFramesSelector(Callable): # pyre-ignore[39] + """ + Selector that retains at most `k` random frames + """ + + def __init__(self, k: int): + self.k = k + + def __call__(self, frame_tss: FrameTsList) -> FrameTsList: + """ + Select `k` random frames + + Args: + frames_tss (List[int]): timestamps of input frames + Returns: + List[int]: timestamps of selected frames + """ + return random.sample(frame_tss, min(self.k, len(frame_tss))) + + +class FirstKFramesSelector(Callable): # pyre-ignore[39] + """ + Selector that retains at most `k` first frames + """ + + def __init__(self, k: int): + self.k = k + + def __call__(self, frame_tss: FrameTsList) -> FrameTsList: + """ + Select `k` first frames + + Args: + frames_tss (List[int]): timestamps of input frames + Returns: + List[int]: timestamps of selected frames + """ + return frame_tss[: self.k] + + +class LastKFramesSelector(Callable): # pyre-ignore[39] + """ + Selector that retains at most `k` last frames from video data + """ + + def __init__(self, k: int): + self.k = k + + def __call__(self, frame_tss: FrameTsList) -> FrameTsList: + """ + Select `k` last frames + + Args: + frames_tss (List[int]): timestamps of input frames + Returns: + List[int]: timestamps of selected frames + """ + return frame_tss[-self.k :] diff --git a/data_processing/detectron2/projects/DensePose/densepose/data/video/video_keyframe_dataset.py b/data_processing/detectron2/projects/DensePose/densepose/data/video/video_keyframe_dataset.py new file mode 100644 index 0000000..214365c --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/data/video/video_keyframe_dataset.py @@ -0,0 +1,300 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import csv +import logging +import numpy as np +from typing import Any, Callable, Dict, List, Optional, Union +import av +import torch +from torch.utils.data.dataset import Dataset + +from detectron2.utils.file_io import PathManager + +from ..utils import maybe_prepend_base_path +from .frame_selector import FrameSelector, FrameTsList + +FrameList = List[av.frame.Frame] # pyre-ignore[16] +FrameTransform = Callable[[torch.Tensor], torch.Tensor] + + +def list_keyframes(video_fpath: str, video_stream_idx: int = 0) -> FrameTsList: + """ + Traverses all keyframes of a video file. Returns a list of keyframe + timestamps. Timestamps are counts in timebase units. + + Args: + video_fpath (str): Video file path + video_stream_idx (int): Video stream index (default: 0) + Returns: + List[int]: list of keyframe timestaps (timestamp is a count in timebase + units) + """ + try: + with PathManager.open(video_fpath, "rb") as io: + container = av.open(io, mode="r") + stream = container.streams.video[video_stream_idx] + keyframes = [] + pts = -1 + # Note: even though we request forward seeks for keyframes, sometimes + # a keyframe in backwards direction is returned. We introduce tolerance + # as a max count of ignored backward seeks + tolerance_backward_seeks = 2 + while True: + try: + container.seek(pts + 1, backward=False, any_frame=False, stream=stream) + except av.AVError as e: + # the exception occurs when the video length is exceeded, + # we then return whatever data we've already collected + logger = logging.getLogger(__name__) + logger.debug( + f"List keyframes: Error seeking video file {video_fpath}, " + f"video stream {video_stream_idx}, pts {pts + 1}, AV error: {e}" + ) + return keyframes + except OSError as e: + logger = logging.getLogger(__name__) + logger.warning( + f"List keyframes: Error seeking video file {video_fpath}, " + f"video stream {video_stream_idx}, pts {pts + 1}, OS error: {e}" + ) + return [] + packet = next(container.demux(video=video_stream_idx)) + if packet.pts is not None and packet.pts <= pts: + logger = logging.getLogger(__name__) + logger.warning( + f"Video file {video_fpath}, stream {video_stream_idx}: " + f"bad seek for packet {pts + 1} (got packet {packet.pts}), " + f"tolerance {tolerance_backward_seeks}." + ) + tolerance_backward_seeks -= 1 + if tolerance_backward_seeks == 0: + return [] + pts += 1 + continue + tolerance_backward_seeks = 2 + pts = packet.pts + if pts is None: + return keyframes + if packet.is_keyframe: + keyframes.append(pts) + return keyframes + except OSError as e: + logger = logging.getLogger(__name__) + logger.warning( + f"List keyframes: Error opening video file container {video_fpath}, " f"OS error: {e}" + ) + except RuntimeError as e: + logger = logging.getLogger(__name__) + logger.warning( + f"List keyframes: Error opening video file container {video_fpath}, " + f"Runtime error: {e}" + ) + return [] + + +def read_keyframes( + video_fpath: str, keyframes: FrameTsList, video_stream_idx: int = 0 +) -> FrameList: # pyre-ignore[11] + """ + Reads keyframe data from a video file. + + Args: + video_fpath (str): Video file path + keyframes (List[int]): List of keyframe timestamps (as counts in + timebase units to be used in container seek operations) + video_stream_idx (int): Video stream index (default: 0) + Returns: + List[Frame]: list of frames that correspond to the specified timestamps + """ + try: + with PathManager.open(video_fpath, "rb") as io: + container = av.open(io) + stream = container.streams.video[video_stream_idx] + frames = [] + for pts in keyframes: + try: + container.seek(pts, any_frame=False, stream=stream) + frame = next(container.decode(video=0)) + frames.append(frame) + except av.AVError as e: + logger = logging.getLogger(__name__) + logger.warning( + f"Read keyframes: Error seeking video file {video_fpath}, " + f"video stream {video_stream_idx}, pts {pts}, AV error: {e}" + ) + container.close() + return frames + except OSError as e: + logger = logging.getLogger(__name__) + logger.warning( + f"Read keyframes: Error seeking video file {video_fpath}, " + f"video stream {video_stream_idx}, pts {pts}, OS error: {e}" + ) + container.close() + return frames + except StopIteration: + logger = logging.getLogger(__name__) + logger.warning( + f"Read keyframes: Error decoding frame from {video_fpath}, " + f"video stream {video_stream_idx}, pts {pts}" + ) + container.close() + return frames + + container.close() + return frames + except OSError as e: + logger = logging.getLogger(__name__) + logger.warning( + f"Read keyframes: Error opening video file container {video_fpath}, OS error: {e}" + ) + except RuntimeError as e: + logger = logging.getLogger(__name__) + logger.warning( + f"Read keyframes: Error opening video file container {video_fpath}, Runtime error: {e}" + ) + return [] + + +def video_list_from_file(video_list_fpath: str, base_path: Optional[str] = None): + """ + Create a list of paths to video files from a text file. + + Args: + video_list_fpath (str): path to a plain text file with the list of videos + base_path (str): base path for entries from the video list (default: None) + """ + video_list = [] + with PathManager.open(video_list_fpath, "r") as io: + for line in io: + video_list.append(maybe_prepend_base_path(base_path, str(line.strip()))) + return video_list + + +def read_keyframe_helper_data(fpath: str): + """ + Read keyframe data from a file in CSV format: the header should contain + "video_id" and "keyframes" fields. Value specifications are: + video_id: int + keyframes: list(int) + Example of contents: + video_id,keyframes + 2,"[1,11,21,31,41,51,61,71,81]" + + Args: + fpath (str): File containing keyframe data + + Return: + video_id_to_keyframes (dict: int -> list(int)): for a given video ID it + contains a list of keyframes for that video + """ + video_id_to_keyframes = {} + try: + with PathManager.open(fpath, "r") as io: + csv_reader = csv.reader(io) # pyre-ignore[6] + header = next(csv_reader) + video_id_idx = header.index("video_id") + keyframes_idx = header.index("keyframes") + for row in csv_reader: + video_id = int(row[video_id_idx]) + assert ( + video_id not in video_id_to_keyframes + ), f"Duplicate keyframes entry for video {fpath}" + video_id_to_keyframes[video_id] = ( + [int(v) for v in row[keyframes_idx][1:-1].split(",")] + if len(row[keyframes_idx]) > 2 + else [] + ) + except Exception as e: + logger = logging.getLogger(__name__) + logger.warning(f"Error reading keyframe helper data from {fpath}: {e}") + return video_id_to_keyframes + + +class VideoKeyframeDataset(Dataset): + """ + Dataset that provides keyframes for a set of videos. + """ + + _EMPTY_FRAMES = torch.empty((0, 3, 1, 1)) + + def __init__( + self, + video_list: List[str], + category_list: Union[str, List[str], None] = None, + frame_selector: Optional[FrameSelector] = None, + transform: Optional[FrameTransform] = None, + keyframe_helper_fpath: Optional[str] = None, + ): + """ + Dataset constructor + + Args: + video_list (List[str]): list of paths to video files + category_list (Union[str, List[str], None]): list of animal categories for each + video file. If it is a string, or None, this applies to all videos + frame_selector (Callable: KeyFrameList -> KeyFrameList): + selects keyframes to process, keyframes are given by + packet timestamps in timebase counts. If None, all keyframes + are selected (default: None) + transform (Callable: torch.Tensor -> torch.Tensor): + transforms a batch of RGB images (tensors of size [B, 3, H, W]), + returns a tensor of the same size. If None, no transform is + applied (default: None) + + """ + if type(category_list) == list: + self.category_list = category_list + else: + self.category_list = [category_list] * len(video_list) + assert len(video_list) == len( + self.category_list + ), "length of video and category lists must be equal" + self.video_list = video_list + self.frame_selector = frame_selector + self.transform = transform + self.keyframe_helper_data = ( + read_keyframe_helper_data(keyframe_helper_fpath) + if keyframe_helper_fpath is not None + else None + ) + + def __getitem__(self, idx: int) -> Dict[str, Any]: + """ + Gets selected keyframes from a given video + + Args: + idx (int): video index in the video list file + Returns: + A dictionary containing two keys: + images (torch.Tensor): tensor of size [N, H, W, 3] or of size + defined by the transform that contains keyframes data + categories (List[str]): categories of the frames + """ + categories = [self.category_list[idx]] + fpath = self.video_list[idx] + keyframes = ( + list_keyframes(fpath) + if self.keyframe_helper_data is None or idx not in self.keyframe_helper_data + else self.keyframe_helper_data[idx] + ) + transform = self.transform + frame_selector = self.frame_selector + if not keyframes: + return {"images": self._EMPTY_FRAMES, "categories": []} + if frame_selector is not None: + keyframes = frame_selector(keyframes) + frames = read_keyframes(fpath, keyframes) + if not frames: + return {"images": self._EMPTY_FRAMES, "categories": []} + frames = np.stack([frame.to_rgb().to_ndarray() for frame in frames]) + frames = torch.as_tensor(frames, device=torch.device("cpu")) + frames = frames[..., [2, 1, 0]] # RGB -> BGR + frames = frames.permute(0, 3, 1, 2).float() # NHWC -> NCHW + if transform is not None: + frames = transform(frames) + return {"images": frames, "categories": categories} + + def __len__(self): + return len(self.video_list) diff --git a/data_processing/detectron2/projects/DensePose/densepose/engine/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/engine/__init__.py new file mode 100644 index 0000000..539b93a --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/engine/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .trainer import Trainer diff --git a/data_processing/detectron2/projects/DensePose/densepose/engine/trainer.py b/data_processing/detectron2/projects/DensePose/densepose/engine/trainer.py new file mode 100644 index 0000000..a8ffe82 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/engine/trainer.py @@ -0,0 +1,258 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import logging +import os +from collections import OrderedDict +from typing import List, Optional, Union +import torch +from torch import nn + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import CfgNode +from detectron2.engine import DefaultTrainer +from detectron2.evaluation import ( + DatasetEvaluator, + DatasetEvaluators, + inference_on_dataset, + print_csv_format, +) +from detectron2.solver.build import get_default_optimizer_params, maybe_add_gradient_clipping +from detectron2.utils import comm +from detectron2.utils.events import EventWriter, get_event_storage + +from densepose import DensePoseDatasetMapperTTA, DensePoseGeneralizedRCNNWithTTA, load_from_cfg +from densepose.data import ( + DatasetMapper, + build_combined_loader, + build_detection_test_loader, + build_detection_train_loader, + build_inference_based_loaders, + has_inference_based_loaders, +) +from densepose.evaluation.d2_evaluator_adapter import Detectron2COCOEvaluatorAdapter +from densepose.evaluation.evaluator import DensePoseCOCOEvaluator, build_densepose_evaluator_storage +from densepose.modeling.cse import Embedder + + +class SampleCountingLoader: + def __init__(self, loader): + self.loader = loader + + def __iter__(self): + it = iter(self.loader) + storage = get_event_storage() + while True: + try: + batch = next(it) + num_inst_per_dataset = {} + for data in batch: + dataset_name = data["dataset"] + if dataset_name not in num_inst_per_dataset: + num_inst_per_dataset[dataset_name] = 0 + num_inst = len(data["instances"]) + num_inst_per_dataset[dataset_name] += num_inst + for dataset_name in num_inst_per_dataset: + storage.put_scalar(f"batch/{dataset_name}", num_inst_per_dataset[dataset_name]) + yield batch + except StopIteration: + break + + +class SampleCountMetricPrinter(EventWriter): + def __init__(self): + self.logger = logging.getLogger(__name__) + + def write(self): + storage = get_event_storage() + batch_stats_strs = [] + for key, buf in storage.histories().items(): + if key.startswith("batch/"): + batch_stats_strs.append(f"{key} {buf.avg(20)}") + self.logger.info(", ".join(batch_stats_strs)) + + +class Trainer(DefaultTrainer): + @classmethod + def extract_embedder_from_model(cls, model: nn.Module) -> Optional[Embedder]: + if isinstance(model, nn.parallel.DistributedDataParallel): + model = model.module + if hasattr(model, "roi_heads") and hasattr(model.roi_heads, "embedder"): + return model.roi_heads.embedder + return None + + # TODO: the only reason to copy the base class code here is to pass the embedder from + # the model to the evaluator; that should be refactored to avoid unnecessary copy-pasting + @classmethod + def test( + cls, + cfg: CfgNode, + model: nn.Module, + evaluators: Optional[Union[DatasetEvaluator, List[DatasetEvaluator]]] = None, + ): + """ + Args: + cfg (CfgNode): + model (nn.Module): + evaluators (DatasetEvaluator, list[DatasetEvaluator] or None): if None, will call + :meth:`build_evaluator`. Otherwise, must have the same length as + ``cfg.DATASETS.TEST``. + + Returns: + dict: a dict of result metrics + """ + logger = logging.getLogger(__name__) + if isinstance(evaluators, DatasetEvaluator): + evaluators = [evaluators] + if evaluators is not None: + assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( + len(cfg.DATASETS.TEST), len(evaluators) + ) + + results = OrderedDict() + for idx, dataset_name in enumerate(cfg.DATASETS.TEST): + data_loader = cls.build_test_loader(cfg, dataset_name) + # When evaluators are passed in as arguments, + # implicitly assume that evaluators can be created before data_loader. + if evaluators is not None: + evaluator = evaluators[idx] + else: + try: + embedder = cls.extract_embedder_from_model(model) + evaluator = cls.build_evaluator(cfg, dataset_name, embedder=embedder) + except NotImplementedError: + logger.warn( + "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " + "or implement its `build_evaluator` method." + ) + results[dataset_name] = {} + continue + if cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE or comm.is_main_process(): + results_i = inference_on_dataset(model, data_loader, evaluator) + else: + results_i = {} + results[dataset_name] = results_i + if comm.is_main_process(): + assert isinstance( + results_i, dict + ), "Evaluator must return a dict on the main process. Got {} instead.".format( + results_i + ) + logger.info("Evaluation results for {} in csv format:".format(dataset_name)) + print_csv_format(results_i) + + if len(results) == 1: + results = list(results.values())[0] + return results + + @classmethod + def build_evaluator( + cls, + cfg: CfgNode, + dataset_name: str, + output_folder: Optional[str] = None, + embedder: Optional[Embedder] = None, + ) -> DatasetEvaluators: + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluators = [] + distributed = cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE + # Note: we currently use COCO evaluator for both COCO and LVIS datasets + # to have compatible metrics. LVIS bbox evaluator could also be used + # with an adapter to properly handle filtered / mapped categories + # evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + # if evaluator_type == "coco": + # evaluators.append(COCOEvaluator(dataset_name, output_dir=output_folder)) + # elif evaluator_type == "lvis": + # evaluators.append(LVISEvaluator(dataset_name, output_dir=output_folder)) + evaluators.append( + Detectron2COCOEvaluatorAdapter( + dataset_name, output_dir=output_folder, distributed=distributed + ) + ) + if cfg.MODEL.DENSEPOSE_ON: + storage = build_densepose_evaluator_storage(cfg, output_folder) + evaluators.append( + DensePoseCOCOEvaluator( + dataset_name, + distributed, + output_folder, + evaluator_type=cfg.DENSEPOSE_EVALUATION.TYPE, + min_iou_threshold=cfg.DENSEPOSE_EVALUATION.MIN_IOU_THRESHOLD, + storage=storage, + embedder=embedder, + should_evaluate_mesh_alignment=cfg.DENSEPOSE_EVALUATION.EVALUATE_MESH_ALIGNMENT, + mesh_alignment_mesh_names=cfg.DENSEPOSE_EVALUATION.MESH_ALIGNMENT_MESH_NAMES, + ) + ) + return DatasetEvaluators(evaluators) + + @classmethod + def build_optimizer(cls, cfg: CfgNode, model: nn.Module): + params = get_default_optimizer_params( + model, + base_lr=cfg.SOLVER.BASE_LR, + weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM, + bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR, + weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS, + overrides={ + "features": { + "lr": cfg.SOLVER.BASE_LR * cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.FEATURES_LR_FACTOR, + }, + "embeddings": { + "lr": cfg.SOLVER.BASE_LR * cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_LR_FACTOR, + }, + }, + ) + optimizer = torch.optim.SGD( + params, + cfg.SOLVER.BASE_LR, + momentum=cfg.SOLVER.MOMENTUM, + nesterov=cfg.SOLVER.NESTEROV, + weight_decay=cfg.SOLVER.WEIGHT_DECAY, + ) + # pyre-fixme[6]: For 2nd param expected `Type[Optimizer]` but got `SGD`. + return maybe_add_gradient_clipping(cfg, optimizer) + + @classmethod + def build_test_loader(cls, cfg: CfgNode, dataset_name): + return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False)) + + @classmethod + def build_train_loader(cls, cfg: CfgNode): + data_loader = build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True)) + if not has_inference_based_loaders(cfg): + return data_loader + model = cls.build_model(cfg) + model.to(cfg.BOOTSTRAP_MODEL.DEVICE) + DetectionCheckpointer(model).resume_or_load(cfg.BOOTSTRAP_MODEL.WEIGHTS, resume=False) + inference_based_loaders, ratios = build_inference_based_loaders(cfg, model) + loaders = [data_loader] + inference_based_loaders + ratios = [1.0] + ratios + combined_data_loader = build_combined_loader(cfg, loaders, ratios) + sample_counting_loader = SampleCountingLoader(combined_data_loader) + return sample_counting_loader + + def build_writers(self): + writers = super().build_writers() + writers.append(SampleCountMetricPrinter()) + return writers + + @classmethod + def test_with_TTA(cls, cfg: CfgNode, model): + logger = logging.getLogger("detectron2.trainer") + # In the end of training, run an evaluation with TTA + # Only support some R-CNN models. + logger.info("Running inference with test-time augmentation ...") + transform_data = load_from_cfg(cfg) + model = DensePoseGeneralizedRCNNWithTTA( + cfg, model, transform_data, DensePoseDatasetMapperTTA(cfg) + ) + evaluators = [ + cls.build_evaluator( + cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") + ) + for name in cfg.DATASETS.TEST + ] + res = cls.test(cfg, model, evaluators) # pyre-ignore[6] + res = OrderedDict({k + "_TTA": v for k, v in res.items()}) + return res diff --git a/data_processing/detectron2/projects/DensePose/densepose/evaluation/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/evaluation/__init__.py new file mode 100644 index 0000000..e5ae1f2 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/evaluation/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .evaluator import DensePoseCOCOEvaluator diff --git a/data_processing/detectron2/projects/DensePose/densepose/evaluation/d2_evaluator_adapter.py b/data_processing/detectron2/projects/DensePose/densepose/evaluation/d2_evaluator_adapter.py new file mode 100644 index 0000000..1fbc526 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/evaluation/d2_evaluator_adapter.py @@ -0,0 +1,50 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from detectron2.data.catalog import Metadata +from detectron2.evaluation import COCOEvaluator + +from densepose.data.datasets.coco import ( + get_contiguous_id_to_category_id_map, + maybe_filter_categories_cocoapi, +) + + +def _maybe_add_iscrowd_annotations(cocoapi) -> None: + for ann in cocoapi.dataset["annotations"]: + if "iscrowd" not in ann: + ann["iscrowd"] = 0 + + +class Detectron2COCOEvaluatorAdapter(COCOEvaluator): + def __init__( + self, + dataset_name, + output_dir=None, + distributed=True, + ): + super().__init__(dataset_name, output_dir=output_dir, distributed=distributed) + maybe_filter_categories_cocoapi(dataset_name, self._coco_api) + _maybe_add_iscrowd_annotations(self._coco_api) + # substitute category metadata to account for categories + # that are mapped to the same contiguous id + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + self._maybe_substitute_metadata() + + def _maybe_substitute_metadata(self): + cont_id_2_cat_id = get_contiguous_id_to_category_id_map(self._metadata) + cat_id_2_cont_id = self._metadata.thing_dataset_id_to_contiguous_id + if len(cont_id_2_cat_id) == len(cat_id_2_cont_id): + return + + cat_id_2_cont_id_injective = {} + for cat_id, cont_id in cat_id_2_cont_id.items(): + if (cont_id in cont_id_2_cat_id) and (cont_id_2_cat_id[cont_id] == cat_id): + cat_id_2_cont_id_injective[cat_id] = cont_id + + metadata_new = Metadata(name=self._metadata.name) + for key, value in self._metadata.__dict__.items(): + if key == "thing_dataset_id_to_contiguous_id": + setattr(metadata_new, key, cat_id_2_cont_id_injective) + else: + setattr(metadata_new, key, value) + self._metadata = metadata_new diff --git a/data_processing/detectron2/projects/DensePose/densepose/evaluation/densepose_coco_evaluation.py b/data_processing/detectron2/projects/DensePose/densepose/evaluation/densepose_coco_evaluation.py new file mode 100644 index 0000000..06965f3 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/evaluation/densepose_coco_evaluation.py @@ -0,0 +1,1303 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# This is a modified version of cocoeval.py where we also have the densepose evaluation. + +__author__ = "tsungyi" + +import copy +import datetime +import logging +import numpy as np +import pickle +import time +from collections import defaultdict +from enum import Enum +from typing import Any, Dict, Tuple +import scipy.spatial.distance as ssd +import torch +import torch.nn.functional as F +from pycocotools import mask as maskUtils +from scipy.io import loadmat +from scipy.ndimage import zoom as spzoom + +from detectron2.utils.file_io import PathManager + +from densepose.converters.chart_output_to_chart_result import resample_uv_tensors_to_bbox +from densepose.converters.segm_to_mask import ( + resample_coarse_segm_tensor_to_bbox, + resample_fine_and_coarse_segm_tensors_to_bbox, +) +from densepose.modeling.cse.utils import squared_euclidean_distance_matrix +from densepose.structures import DensePoseDataRelative +from densepose.structures.mesh import create_mesh + +logger = logging.getLogger(__name__) + + +class DensePoseEvalMode(str, Enum): + # use both masks and geodesic distances (GPS * IOU) to compute scores + GPSM = "gpsm" + # use only geodesic distances (GPS) to compute scores + GPS = "gps" + # use only masks (IOU) to compute scores + IOU = "iou" + + +class DensePoseDataMode(str, Enum): + # use estimated IUV data (default mode) + IUV_DT = "iuvdt" + # use ground truth IUV data + IUV_GT = "iuvgt" + # use ground truth labels I and set UV to 0 + I_GT_UV_0 = "igtuv0" + # use ground truth labels I and estimated UV coordinates + I_GT_UV_DT = "igtuvdt" + # use estimated labels I and set UV to 0 + I_DT_UV_0 = "idtuv0" + + +class DensePoseCocoEval(object): + # Interface for evaluating detection on the Microsoft COCO dataset. + # + # The usage for CocoEval is as follows: + # cocoGt=..., cocoDt=... # load dataset and results + # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object + # E.params.recThrs = ...; # set parameters as desired + # E.evaluate(); # run per image evaluation + # E.accumulate(); # accumulate per image results + # E.summarize(); # display summary metrics of results + # For example usage see evalDemo.m and http://mscoco.org/. + # + # The evaluation parameters are as follows (defaults in brackets): + # imgIds - [all] N img ids to use for evaluation + # catIds - [all] K cat ids to use for evaluation + # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation + # recThrs - [0:.01:1] R=101 recall thresholds for evaluation + # areaRng - [...] A=4 object area ranges for evaluation + # maxDets - [1 10 100] M=3 thresholds on max detections per image + # iouType - ['segm'] set iouType to 'segm', 'bbox', 'keypoints' or 'densepose' + # iouType replaced the now DEPRECATED useSegm parameter. + # useCats - [1] if true use category labels for evaluation + # Note: if useCats=0 category labels are ignored as in proposal scoring. + # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified. + # + # evaluate(): evaluates detections on every image and every category and + # concats the results into the "evalImgs" with fields: + # dtIds - [1xD] id for each of the D detections (dt) + # gtIds - [1xG] id for each of the G ground truths (gt) + # dtMatches - [TxD] matching gt id at each IoU or 0 + # gtMatches - [TxG] matching dt id at each IoU or 0 + # dtScores - [1xD] confidence of each dt + # gtIgnore - [1xG] ignore flag for each gt + # dtIgnore - [TxD] ignore flag for each dt at each IoU + # + # accumulate(): accumulates the per-image, per-category evaluation + # results in "evalImgs" into the dictionary "eval" with fields: + # params - parameters used for evaluation + # date - date evaluation was performed + # counts - [T,R,K,A,M] parameter dimensions (see above) + # precision - [TxRxKxAxM] precision for every evaluation setting + # recall - [TxKxAxM] max recall for every evaluation setting + # Note: precision and recall==-1 for settings with no gt objects. + # + # See also coco, mask, pycocoDemo, pycocoEvalDemo + # + # Microsoft COCO Toolbox. version 2.0 + # Data, paper, and tutorials available at: http://mscoco.org/ + # Code written by Piotr Dollar and Tsung-Yi Lin, 2015. + # Licensed under the Simplified BSD License [see coco/license.txt] + def __init__( + self, + cocoGt=None, + cocoDt=None, + iouType: str = "densepose", + multi_storage=None, + embedder=None, + dpEvalMode: DensePoseEvalMode = DensePoseEvalMode.GPS, + dpDataMode: DensePoseDataMode = DensePoseDataMode.IUV_DT, + ): + """ + Initialize CocoEval using coco APIs for gt and dt + :param cocoGt: coco object with ground truth annotations + :param cocoDt: coco object with detection results + :return: None + """ + self.cocoGt = cocoGt # ground truth COCO API + self.cocoDt = cocoDt # detections COCO API + self.multi_storage = multi_storage + self.embedder = embedder + self._dpEvalMode = dpEvalMode + self._dpDataMode = dpDataMode + self.evalImgs = defaultdict(list) # per-image per-category eval results [KxAxI] + self.eval = {} # accumulated evaluation results + self._gts = defaultdict(list) # gt for evaluation + self._dts = defaultdict(list) # dt for evaluation + self.params = Params(iouType=iouType) # parameters + self._paramsEval = {} # parameters for evaluation + self.stats = [] # result summarization + self.ious = {} # ious between all gts and dts + if cocoGt is not None: + self.params.imgIds = sorted(cocoGt.getImgIds()) + self.params.catIds = sorted(cocoGt.getCatIds()) + self.ignoreThrBB = 0.7 + self.ignoreThrUV = 0.9 + + def _loadGEval(self): + smpl_subdiv_fpath = PathManager.get_local_path( + "https://dl.fbaipublicfiles.com/densepose/data/SMPL_subdiv.mat" + ) + pdist_transform_fpath = PathManager.get_local_path( + "https://dl.fbaipublicfiles.com/densepose/data/SMPL_SUBDIV_TRANSFORM.mat" + ) + pdist_matrix_fpath = PathManager.get_local_path( + "https://dl.fbaipublicfiles.com/densepose/data/Pdist_matrix.pkl", timeout_sec=120 + ) + SMPL_subdiv = loadmat(smpl_subdiv_fpath) + self.PDIST_transform = loadmat(pdist_transform_fpath) + self.PDIST_transform = self.PDIST_transform["index"].squeeze() + UV = np.array([SMPL_subdiv["U_subdiv"], SMPL_subdiv["V_subdiv"]]).squeeze() + ClosestVertInds = np.arange(UV.shape[1]) + 1 + self.Part_UVs = [] + self.Part_ClosestVertInds = [] + for i in np.arange(24): + self.Part_UVs.append(UV[:, SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)]) + self.Part_ClosestVertInds.append( + ClosestVertInds[SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)] + ) + + with open(pdist_matrix_fpath, "rb") as hFile: + arrays = pickle.load(hFile, encoding="latin1") + self.Pdist_matrix = arrays["Pdist_matrix"] + self.Part_ids = np.array(SMPL_subdiv["Part_ID_subdiv"].squeeze()) + # Mean geodesic distances for parts. + self.Mean_Distances = np.array([0, 0.351, 0.107, 0.126, 0.237, 0.173, 0.142, 0.128, 0.150]) + # Coarse Part labels. + self.CoarseParts = np.array( + [0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8] + ) + + def _prepare(self): + """ + Prepare ._gts and ._dts for evaluation based on params + :return: None + """ + + def _toMask(anns, coco): + # modify ann['segmentation'] by reference + for ann in anns: + # safeguard for invalid segmentation annotation; + # annotations containing empty lists exist in the posetrack + # dataset. This is not a correct segmentation annotation + # in terms of COCO format; we need to deal with it somehow + segm = ann["segmentation"] + if type(segm) == list and len(segm) == 0: + ann["segmentation"] = None + continue + rle = coco.annToRLE(ann) + ann["segmentation"] = rle + + def _getIgnoreRegion(iid, coco): + img = coco.imgs[iid] + + if "ignore_regions_x" not in img.keys(): + return None + + if len(img["ignore_regions_x"]) == 0: + return None + + rgns_merged = [ + [v for xy in zip(region_x, region_y) for v in xy] + for region_x, region_y in zip(img["ignore_regions_x"], img["ignore_regions_y"]) + ] + rles = maskUtils.frPyObjects(rgns_merged, img["height"], img["width"]) + rle = maskUtils.merge(rles) + return maskUtils.decode(rle) + + def _checkIgnore(dt, iregion): + if iregion is None: + return True + + bb = np.array(dt["bbox"]).astype(np.int) + x1, y1, x2, y2 = bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3] + x2 = min([x2, iregion.shape[1]]) + y2 = min([y2, iregion.shape[0]]) + + if bb[2] * bb[3] == 0: + return False + + crop_iregion = iregion[y1:y2, x1:x2] + + if crop_iregion.sum() == 0: + return True + + if "densepose" not in dt.keys(): # filtering boxes + return crop_iregion.sum() / bb[2] / bb[3] < self.ignoreThrBB + + # filtering UVs + ignoremask = np.require(crop_iregion, requirements=["F"]) + mask = self._extract_mask(dt) + uvmask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"]) + uvmask_ = maskUtils.encode(uvmask) + ignoremask_ = maskUtils.encode(ignoremask) + uviou = maskUtils.iou([uvmask_], [ignoremask_], [1])[0] + return uviou < self.ignoreThrUV + + p = self.params + + if p.useCats: + gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) + dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) + else: + gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds)) + dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds)) + + imns = self.cocoGt.loadImgs(p.imgIds) + self.size_mapping = {} + for im in imns: + self.size_mapping[im["id"]] = [im["height"], im["width"]] + + # if iouType == 'uv', add point gt annotations + if p.iouType == "densepose": + self._loadGEval() + + # convert ground truth to mask if iouType == 'segm' + if p.iouType == "segm": + _toMask(gts, self.cocoGt) + _toMask(dts, self.cocoDt) + + # set ignore flag + for gt in gts: + gt["ignore"] = gt["ignore"] if "ignore" in gt else 0 + gt["ignore"] = "iscrowd" in gt and gt["iscrowd"] + if p.iouType == "keypoints": + gt["ignore"] = (gt["num_keypoints"] == 0) or gt["ignore"] + if p.iouType == "densepose": + gt["ignore"] = ("dp_x" in gt) == 0 + if p.iouType == "segm": + gt["ignore"] = gt["segmentation"] is None + + self._gts = defaultdict(list) # gt for evaluation + self._dts = defaultdict(list) # dt for evaluation + self._igrgns = defaultdict(list) + + for gt in gts: + iid = gt["image_id"] + if iid not in self._igrgns.keys(): + self._igrgns[iid] = _getIgnoreRegion(iid, self.cocoGt) + if _checkIgnore(gt, self._igrgns[iid]): + self._gts[iid, gt["category_id"]].append(gt) + for dt in dts: + iid = dt["image_id"] + if (iid not in self._igrgns) or _checkIgnore(dt, self._igrgns[iid]): + self._dts[iid, dt["category_id"]].append(dt) + + self.evalImgs = defaultdict(list) # per-image per-category evaluation results + self.eval = {} # accumulated evaluation results + + def evaluate(self): + """ + Run per image evaluation on given images and store results (a list of dict) in self.evalImgs + :return: None + """ + tic = time.time() + logger.info("Running per image DensePose evaluation... {}".format(self.params.iouType)) + p = self.params + # add backward compatibility if useSegm is specified in params + if p.useSegm is not None: + p.iouType = "segm" if p.useSegm == 1 else "bbox" + logger.info("useSegm (deprecated) is not None. Running DensePose evaluation") + p.imgIds = list(np.unique(p.imgIds)) + if p.useCats: + p.catIds = list(np.unique(p.catIds)) + p.maxDets = sorted(p.maxDets) + self.params = p + + self._prepare() + # loop through images, area range, max detection number + catIds = p.catIds if p.useCats else [-1] + + if p.iouType in ["segm", "bbox"]: + computeIoU = self.computeIoU + elif p.iouType == "keypoints": + computeIoU = self.computeOks + elif p.iouType == "densepose": + computeIoU = self.computeOgps + if self._dpEvalMode in {DensePoseEvalMode.GPSM, DensePoseEvalMode.IOU}: + self.real_ious = { + (imgId, catId): self.computeDPIoU(imgId, catId) + for imgId in p.imgIds + for catId in catIds + } + + self.ious = { + (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds + } + + evaluateImg = self.evaluateImg + maxDet = p.maxDets[-1] + self.evalImgs = [ + evaluateImg(imgId, catId, areaRng, maxDet) + for catId in catIds + for areaRng in p.areaRng + for imgId in p.imgIds + ] + self._paramsEval = copy.deepcopy(self.params) + toc = time.time() + logger.info("DensePose evaluation DONE (t={:0.2f}s).".format(toc - tic)) + + def getDensePoseMask(self, polys): + maskGen = np.zeros([256, 256]) + stop = min(len(polys) + 1, 15) + for i in range(1, stop): + if polys[i - 1]: + currentMask = maskUtils.decode(polys[i - 1]) + maskGen[currentMask > 0] = i + return maskGen + + def _generate_rlemask_on_image(self, mask, imgId, data): + bbox_xywh = np.array(data["bbox"]) + x, y, w, h = bbox_xywh + im_h, im_w = self.size_mapping[imgId] + im_mask = np.zeros((im_h, im_w), dtype=np.uint8) + if mask is not None: + x0 = max(int(x), 0) + x1 = min(int(x + w), im_w, int(x) + mask.shape[1]) + y0 = max(int(y), 0) + y1 = min(int(y + h), im_h, int(y) + mask.shape[0]) + y = int(y) + x = int(x) + im_mask[y0:y1, x0:x1] = mask[y0 - y : y1 - y, x0 - x : x1 - x] + im_mask = np.require(np.asarray(im_mask > 0), dtype=np.uint8, requirements=["F"]) + rle_mask = maskUtils.encode(np.array(im_mask[:, :, np.newaxis], order="F"))[0] + return rle_mask + + def computeDPIoU(self, imgId, catId): + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return [] + inds = np.argsort([-d["score"] for d in dt], kind="mergesort") + dt = [dt[i] for i in inds] + if len(dt) > p.maxDets[-1]: + dt = dt[0 : p.maxDets[-1]] + + gtmasks = [] + for g in gt: + if DensePoseDataRelative.S_KEY in g: + # convert DensePose mask to a binary mask + mask = np.minimum(self.getDensePoseMask(g[DensePoseDataRelative.S_KEY]), 1.0) + _, _, w, h = g["bbox"] + scale_x = float(max(w, 1)) / mask.shape[1] + scale_y = float(max(h, 1)) / mask.shape[0] + mask = spzoom(mask, (scale_y, scale_x), order=1, prefilter=False) + mask = np.array(mask > 0.5, dtype=np.uint8) + rle_mask = self._generate_rlemask_on_image(mask, imgId, g) + elif "segmentation" in g: + segmentation = g["segmentation"] + if isinstance(segmentation, list) and segmentation: + # polygons + im_h, im_w = self.size_mapping[imgId] + rles = maskUtils.frPyObjects(segmentation, im_h, im_w) + rle_mask = maskUtils.merge(rles) + elif isinstance(segmentation, dict): + if isinstance(segmentation["counts"], list): + # uncompressed RLE + im_h, im_w = self.size_mapping[imgId] + rle_mask = maskUtils.frPyObjects(segmentation, im_h, im_w) + else: + # compressed RLE + rle_mask = segmentation + else: + rle_mask = self._generate_rlemask_on_image(None, imgId, g) + else: + rle_mask = self._generate_rlemask_on_image(None, imgId, g) + gtmasks.append(rle_mask) + + dtmasks = [] + for d in dt: + mask = self._extract_mask(d) + mask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"]) + rle_mask = self._generate_rlemask_on_image(mask, imgId, d) + dtmasks.append(rle_mask) + + # compute iou between each dt and gt region + iscrowd = [int(o.get("iscrowd", 0)) for o in gt] + iousDP = maskUtils.iou(dtmasks, gtmasks, iscrowd) + return iousDP + + def computeIoU(self, imgId, catId): + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return [] + inds = np.argsort([-d["score"] for d in dt], kind="mergesort") + dt = [dt[i] for i in inds] + if len(dt) > p.maxDets[-1]: + dt = dt[0 : p.maxDets[-1]] + + if p.iouType == "segm": + g = [g["segmentation"] for g in gt if g["segmentation"] is not None] + d = [d["segmentation"] for d in dt if d["segmentation"] is not None] + elif p.iouType == "bbox": + g = [g["bbox"] for g in gt] + d = [d["bbox"] for d in dt] + else: + raise Exception("unknown iouType for iou computation") + + # compute iou between each dt and gt region + iscrowd = [int(o.get("iscrowd", 0)) for o in gt] + ious = maskUtils.iou(d, g, iscrowd) + return ious + + def computeOks(self, imgId, catId): + p = self.params + # dimension here should be Nxm + gts = self._gts[imgId, catId] + dts = self._dts[imgId, catId] + inds = np.argsort([-d["score"] for d in dts], kind="mergesort") + dts = [dts[i] for i in inds] + if len(dts) > p.maxDets[-1]: + dts = dts[0 : p.maxDets[-1]] + # if len(gts) == 0 and len(dts) == 0: + if len(gts) == 0 or len(dts) == 0: + return [] + ious = np.zeros((len(dts), len(gts))) + sigmas = ( + np.array( + [ + 0.26, + 0.25, + 0.25, + 0.35, + 0.35, + 0.79, + 0.79, + 0.72, + 0.72, + 0.62, + 0.62, + 1.07, + 1.07, + 0.87, + 0.87, + 0.89, + 0.89, + ] + ) + / 10.0 + ) + vars = (sigmas * 2) ** 2 + k = len(sigmas) + # compute oks between each detection and ground truth object + for j, gt in enumerate(gts): + # create bounds for ignore regions(double the gt bbox) + g = np.array(gt["keypoints"]) + xg = g[0::3] + yg = g[1::3] + vg = g[2::3] + k1 = np.count_nonzero(vg > 0) + bb = gt["bbox"] + x0 = bb[0] - bb[2] + x1 = bb[0] + bb[2] * 2 + y0 = bb[1] - bb[3] + y1 = bb[1] + bb[3] * 2 + for i, dt in enumerate(dts): + d = np.array(dt["keypoints"]) + xd = d[0::3] + yd = d[1::3] + if k1 > 0: + # measure the per-keypoint distance if keypoints visible + dx = xd - xg + dy = yd - yg + else: + # measure minimum distance to keypoints in (x0,y0) & (x1,y1) + z = np.zeros(k) + dx = np.max((z, x0 - xd), axis=0) + np.max((z, xd - x1), axis=0) + dy = np.max((z, y0 - yd), axis=0) + np.max((z, yd - y1), axis=0) + e = (dx**2 + dy**2) / vars / (gt["area"] + np.spacing(1)) / 2 + if k1 > 0: + e = e[vg > 0] + ious[i, j] = np.sum(np.exp(-e)) / e.shape[0] + return ious + + def _extract_mask(self, dt: Dict[str, Any]) -> np.ndarray: + if "densepose" in dt: + densepose_results_quantized = dt["densepose"] + return densepose_results_quantized.labels_uv_uint8[0].numpy() + elif "cse_mask" in dt: + return dt["cse_mask"] + elif "coarse_segm" in dt: + dy = max(int(dt["bbox"][3]), 1) + dx = max(int(dt["bbox"][2]), 1) + return ( + F.interpolate( + dt["coarse_segm"].unsqueeze(0), + (dy, dx), + mode="bilinear", + align_corners=False, + ) + .squeeze(0) + .argmax(0) + .numpy() + .astype(np.uint8) + ) + elif "record_id" in dt: + assert ( + self.multi_storage is not None + ), f"Storage record id encountered in a detection {dt}, but no storage provided!" + record = self.multi_storage.get(dt["rank"], dt["record_id"]) + coarse_segm = record["coarse_segm"] + dy = max(int(dt["bbox"][3]), 1) + dx = max(int(dt["bbox"][2]), 1) + return ( + F.interpolate( + coarse_segm.unsqueeze(0), + (dy, dx), + mode="bilinear", + align_corners=False, + ) + .squeeze(0) + .argmax(0) + .numpy() + .astype(np.uint8) + ) + else: + raise Exception(f"No mask data in the detection: {dt}") + raise ValueError('The prediction dict needs to contain either "densepose" or "cse_mask"') + + def _extract_iuv( + self, densepose_data: np.ndarray, py: np.ndarray, px: np.ndarray, gt: Dict[str, Any] + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Extract arrays of I, U and V values at given points as numpy arrays + given the data mode stored in self._dpDataMode + """ + if self._dpDataMode == DensePoseDataMode.IUV_DT: + # estimated labels and UV (default) + ipoints = densepose_data[0, py, px] + upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255. + vpoints = densepose_data[2, py, px] / 255.0 + elif self._dpDataMode == DensePoseDataMode.IUV_GT: + # ground truth + ipoints = np.array(gt["dp_I"]) + upoints = np.array(gt["dp_U"]) + vpoints = np.array(gt["dp_V"]) + elif self._dpDataMode == DensePoseDataMode.I_GT_UV_0: + # ground truth labels, UV = 0 + ipoints = np.array(gt["dp_I"]) + upoints = upoints * 0.0 + vpoints = vpoints * 0.0 + elif self._dpDataMode == DensePoseDataMode.I_GT_UV_DT: + # ground truth labels, estimated UV + ipoints = np.array(gt["dp_I"]) + upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255. + vpoints = densepose_data[2, py, px] / 255.0 + elif self._dpDataMode == DensePoseDataMode.I_DT_UV_0: + # estimated labels, UV = 0 + ipoints = densepose_data[0, py, px] + upoints = upoints * 0.0 + vpoints = vpoints * 0.0 + else: + raise ValueError(f"Unknown data mode: {self._dpDataMode}") + return ipoints, upoints, vpoints + + def computeOgps_single_pair(self, dt, gt, py, px, pt_mask): + if "densepose" in dt: + ipoints, upoints, vpoints = self.extract_iuv_from_quantized(dt, gt, py, px, pt_mask) + return self.computeOgps_single_pair_iuv(dt, gt, ipoints, upoints, vpoints) + elif "u" in dt: + ipoints, upoints, vpoints = self.extract_iuv_from_raw(dt, gt, py, px, pt_mask) + return self.computeOgps_single_pair_iuv(dt, gt, ipoints, upoints, vpoints) + elif "record_id" in dt: + assert ( + self.multi_storage is not None + ), f"Storage record id encountered in detection {dt}, but no storage provided!" + record = self.multi_storage.get(dt["rank"], dt["record_id"]) + record["bbox"] = dt["bbox"] + if "u" in record: + ipoints, upoints, vpoints = self.extract_iuv_from_raw(record, gt, py, px, pt_mask) + return self.computeOgps_single_pair_iuv(dt, gt, ipoints, upoints, vpoints) + elif "embedding" in record: + return self.computeOgps_single_pair_cse( + dt, + gt, + py, + px, + pt_mask, + record["coarse_segm"], + record["embedding"], + record["bbox"], + ) + else: + raise Exception(f"Unknown record format: {record}") + elif "embedding" in dt: + return self.computeOgps_single_pair_cse( + dt, gt, py, px, pt_mask, dt["coarse_segm"], dt["embedding"], dt["bbox"] + ) + raise Exception(f"Unknown detection format: {dt}") + + def extract_iuv_from_quantized(self, dt, gt, py, px, pt_mask): + densepose_results_quantized = dt["densepose"] + ipoints, upoints, vpoints = self._extract_iuv( + densepose_results_quantized.labels_uv_uint8.numpy(), py, px, gt + ) + ipoints[pt_mask == -1] = 0 + return ipoints, upoints, vpoints + + def extract_iuv_from_raw(self, dt, gt, py, px, pt_mask): + labels_dt = resample_fine_and_coarse_segm_tensors_to_bbox( + dt["fine_segm"].unsqueeze(0), + dt["coarse_segm"].unsqueeze(0), + dt["bbox"], + ) + uv = resample_uv_tensors_to_bbox( + dt["u"].unsqueeze(0), dt["v"].unsqueeze(0), labels_dt.squeeze(0), dt["bbox"] + ) + labels_uv_uint8 = torch.cat((labels_dt.byte(), (uv * 255).clamp(0, 255).byte())) + ipoints, upoints, vpoints = self._extract_iuv(labels_uv_uint8.numpy(), py, px, gt) + ipoints[pt_mask == -1] = 0 + return ipoints, upoints, vpoints + + def computeOgps_single_pair_iuv(self, dt, gt, ipoints, upoints, vpoints): + cVertsGT, ClosestVertsGTTransformed = self.findAllClosestVertsGT(gt) + cVerts = self.findAllClosestVertsUV(upoints, vpoints, ipoints) + # Get pairwise geodesic distances between gt and estimated mesh points. + dist = self.getDistancesUV(ClosestVertsGTTransformed, cVerts) + # Compute the Ogps measure. + # Find the mean geodesic normalization distance for + # each GT point, based on which part it is on. + Current_Mean_Distances = self.Mean_Distances[ + self.CoarseParts[self.Part_ids[cVertsGT[cVertsGT > 0].astype(int) - 1]] + ] + return dist, Current_Mean_Distances + + def computeOgps_single_pair_cse( + self, dt, gt, py, px, pt_mask, coarse_segm, embedding, bbox_xywh_abs + ): + # 0-based mesh vertex indices + cVertsGT = torch.as_tensor(gt["dp_vertex"], dtype=torch.int64) + # label for each pixel of the bbox, [H, W] tensor of long + labels_dt = resample_coarse_segm_tensor_to_bbox( + coarse_segm.unsqueeze(0), bbox_xywh_abs + ).squeeze(0) + x, y, w, h = bbox_xywh_abs + # embedding for each pixel of the bbox, [D, H, W] tensor of float32 + embedding = F.interpolate( + embedding.unsqueeze(0), (int(h), int(w)), mode="bilinear", align_corners=False + ).squeeze(0) + # valid locations py, px + py_pt = torch.from_numpy(py[pt_mask > -1]) + px_pt = torch.from_numpy(px[pt_mask > -1]) + cVerts = torch.ones_like(cVertsGT) * -1 + cVerts[pt_mask > -1] = self.findClosestVertsCse( + embedding, py_pt, px_pt, labels_dt, gt["ref_model"] + ) + # Get pairwise geodesic distances between gt and estimated mesh points. + dist = self.getDistancesCse(cVertsGT, cVerts, gt["ref_model"]) + # normalize distances + if (gt["ref_model"] == "smpl_27554") and ("dp_I" in gt): + Current_Mean_Distances = self.Mean_Distances[ + self.CoarseParts[np.array(gt["dp_I"], dtype=int)] + ] + else: + Current_Mean_Distances = 0.255 + return dist, Current_Mean_Distances + + def computeOgps(self, imgId, catId): + p = self.params + # dimension here should be Nxm + g = self._gts[imgId, catId] + d = self._dts[imgId, catId] + inds = np.argsort([-d_["score"] for d_ in d], kind="mergesort") + d = [d[i] for i in inds] + if len(d) > p.maxDets[-1]: + d = d[0 : p.maxDets[-1]] + # if len(gts) == 0 and len(dts) == 0: + if len(g) == 0 or len(d) == 0: + return [] + ious = np.zeros((len(d), len(g))) + # compute opgs between each detection and ground truth object + # sigma = self.sigma #0.255 # dist = 0.3m corresponds to ogps = 0.5 + # 1 # dist = 0.3m corresponds to ogps = 0.96 + # 1.45 # dist = 1.7m (person height) corresponds to ogps = 0.5) + for j, gt in enumerate(g): + if not gt["ignore"]: + g_ = gt["bbox"] + for i, dt in enumerate(d): + # + dy = int(dt["bbox"][3]) + dx = int(dt["bbox"][2]) + dp_x = np.array(gt["dp_x"]) * g_[2] / 255.0 + dp_y = np.array(gt["dp_y"]) * g_[3] / 255.0 + py = (dp_y + g_[1] - dt["bbox"][1]).astype(np.int) + px = (dp_x + g_[0] - dt["bbox"][0]).astype(np.int) + # + pts = np.zeros(len(px)) + pts[px >= dx] = -1 + pts[py >= dy] = -1 + pts[px < 0] = -1 + pts[py < 0] = -1 + if len(pts) < 1: + ogps = 0.0 + elif np.max(pts) == -1: + ogps = 0.0 + else: + px[pts == -1] = 0 + py[pts == -1] = 0 + dists_between_matches, dist_norm_coeffs = self.computeOgps_single_pair( + dt, gt, py, px, pts + ) + # Compute gps + ogps_values = np.exp( + -(dists_between_matches**2) / (2 * (dist_norm_coeffs**2)) + ) + # + ogps = np.mean(ogps_values) if len(ogps_values) > 0 else 0.0 + ious[i, j] = ogps + + gbb = [gt["bbox"] for gt in g] + dbb = [dt["bbox"] for dt in d] + + # compute iou between each dt and gt region + iscrowd = [int(o.get("iscrowd", 0)) for o in g] + ious_bb = maskUtils.iou(dbb, gbb, iscrowd) + return ious, ious_bb + + def evaluateImg(self, imgId, catId, aRng, maxDet): + """ + perform evaluation for single category and image + :return: dict (single image results) + """ + + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return None + + for g in gt: + # g['_ignore'] = g['ignore'] + if g["ignore"] or (g["area"] < aRng[0] or g["area"] > aRng[1]): + g["_ignore"] = True + else: + g["_ignore"] = False + + # sort dt highest score first, sort gt ignore last + gtind = np.argsort([g["_ignore"] for g in gt], kind="mergesort") + gt = [gt[i] for i in gtind] + dtind = np.argsort([-d["score"] for d in dt], kind="mergesort") + dt = [dt[i] for i in dtind[0:maxDet]] + iscrowd = [int(o.get("iscrowd", 0)) for o in gt] + # load computed ious + if p.iouType == "densepose": + # print('Checking the length', len(self.ious[imgId, catId])) + # if len(self.ious[imgId, catId]) == 0: + # print(self.ious[imgId, catId]) + ious = ( + self.ious[imgId, catId][0][:, gtind] + if len(self.ious[imgId, catId]) > 0 + else self.ious[imgId, catId] + ) + ioubs = ( + self.ious[imgId, catId][1][:, gtind] + if len(self.ious[imgId, catId]) > 0 + else self.ious[imgId, catId] + ) + if self._dpEvalMode in {DensePoseEvalMode.GPSM, DensePoseEvalMode.IOU}: + iousM = ( + self.real_ious[imgId, catId][:, gtind] + if len(self.real_ious[imgId, catId]) > 0 + else self.real_ious[imgId, catId] + ) + else: + ious = ( + self.ious[imgId, catId][:, gtind] + if len(self.ious[imgId, catId]) > 0 + else self.ious[imgId, catId] + ) + + T = len(p.iouThrs) + G = len(gt) + D = len(dt) + gtm = np.zeros((T, G)) + dtm = np.zeros((T, D)) + gtIg = np.array([g["_ignore"] for g in gt]) + dtIg = np.zeros((T, D)) + if np.all(gtIg) and p.iouType == "densepose": + dtIg = np.logical_or(dtIg, True) + + if len(ious) > 0: # and not p.iouType == 'densepose': + for tind, t in enumerate(p.iouThrs): + for dind, d in enumerate(dt): + # information about best match so far (m=-1 -> unmatched) + iou = min([t, 1 - 1e-10]) + m = -1 + for gind, _g in enumerate(gt): + # if this gt already matched, and not a crowd, continue + if gtm[tind, gind] > 0 and not iscrowd[gind]: + continue + # if dt matched to reg gt, and on ignore gt, stop + if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1: + break + if p.iouType == "densepose": + if self._dpEvalMode == DensePoseEvalMode.GPSM: + new_iou = np.sqrt(iousM[dind, gind] * ious[dind, gind]) + elif self._dpEvalMode == DensePoseEvalMode.IOU: + new_iou = iousM[dind, gind] + elif self._dpEvalMode == DensePoseEvalMode.GPS: + new_iou = ious[dind, gind] + else: + new_iou = ious[dind, gind] + if new_iou < iou: + continue + if new_iou == 0.0: + continue + # if match successful and best so far, store appropriately + iou = new_iou + m = gind + # if match made store id of match for both dt and gt + if m == -1: + continue + dtIg[tind, dind] = gtIg[m] + dtm[tind, dind] = gt[m]["id"] + gtm[tind, m] = d["id"] + + if p.iouType == "densepose": + if not len(ioubs) == 0: + for dind, d in enumerate(dt): + # information about best match so far (m=-1 -> unmatched) + if dtm[tind, dind] == 0: + ioub = 0.8 + m = -1 + for gind, _g in enumerate(gt): + # if this gt already matched, and not a crowd, continue + if gtm[tind, gind] > 0 and not iscrowd[gind]: + continue + # continue to next gt unless better match made + if ioubs[dind, gind] < ioub: + continue + # if match successful and best so far, store appropriately + ioub = ioubs[dind, gind] + m = gind + # if match made store id of match for both dt and gt + if m > -1: + dtIg[:, dind] = gtIg[m] + if gtIg[m]: + dtm[tind, dind] = gt[m]["id"] + gtm[tind, m] = d["id"] + # set unmatched detections outside of area range to ignore + a = np.array([d["area"] < aRng[0] or d["area"] > aRng[1] for d in dt]).reshape((1, len(dt))) + dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0))) + # store results for given image and category + # print('Done with the function', len(self.ious[imgId, catId])) + return { + "image_id": imgId, + "category_id": catId, + "aRng": aRng, + "maxDet": maxDet, + "dtIds": [d["id"] for d in dt], + "gtIds": [g["id"] for g in gt], + "dtMatches": dtm, + "gtMatches": gtm, + "dtScores": [d["score"] for d in dt], + "gtIgnore": gtIg, + "dtIgnore": dtIg, + } + + def accumulate(self, p=None): + """ + Accumulate per image evaluation results and store the result in self.eval + :param p: input params for evaluation + :return: None + """ + logger.info("Accumulating evaluation results...") + tic = time.time() + if not self.evalImgs: + logger.info("Please run evaluate() first") + # allows input customized parameters + if p is None: + p = self.params + p.catIds = p.catIds if p.useCats == 1 else [-1] + T = len(p.iouThrs) + R = len(p.recThrs) + K = len(p.catIds) if p.useCats else 1 + A = len(p.areaRng) + M = len(p.maxDets) + precision = -(np.ones((T, R, K, A, M))) # -1 for the precision of absent categories + recall = -(np.ones((T, K, A, M))) + + # create dictionary for future indexing + logger.info("Categories: {}".format(p.catIds)) + _pe = self._paramsEval + catIds = _pe.catIds if _pe.useCats else [-1] + setK = set(catIds) + setA = set(map(tuple, _pe.areaRng)) + setM = set(_pe.maxDets) + setI = set(_pe.imgIds) + # get inds to evaluate + k_list = [n for n, k in enumerate(p.catIds) if k in setK] + m_list = [m for n, m in enumerate(p.maxDets) if m in setM] + a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA] + i_list = [n for n, i in enumerate(p.imgIds) if i in setI] + I0 = len(_pe.imgIds) + A0 = len(_pe.areaRng) + # retrieve E at each category, area range, and max number of detections + for k, k0 in enumerate(k_list): + Nk = k0 * A0 * I0 + for a, a0 in enumerate(a_list): + Na = a0 * I0 + for m, maxDet in enumerate(m_list): + E = [self.evalImgs[Nk + Na + i] for i in i_list] + E = [e for e in E if e is not None] + if len(E) == 0: + continue + dtScores = np.concatenate([e["dtScores"][0:maxDet] for e in E]) + + # different sorting method generates slightly different results. + # mergesort is used to be consistent as Matlab implementation. + inds = np.argsort(-dtScores, kind="mergesort") + + dtm = np.concatenate([e["dtMatches"][:, 0:maxDet] for e in E], axis=1)[:, inds] + dtIg = np.concatenate([e["dtIgnore"][:, 0:maxDet] for e in E], axis=1)[:, inds] + gtIg = np.concatenate([e["gtIgnore"] for e in E]) + npig = np.count_nonzero(gtIg == 0) + if npig == 0: + continue + tps = np.logical_and(dtm, np.logical_not(dtIg)) + fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg)) + tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float) + fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float) + for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): + tp = np.array(tp) + fp = np.array(fp) + nd = len(tp) + rc = tp / npig + pr = tp / (fp + tp + np.spacing(1)) + q = np.zeros((R,)) + + if nd: + recall[t, k, a, m] = rc[-1] + else: + recall[t, k, a, m] = 0 + + # numpy is slow without cython optimization for accessing elements + # use python array gets significant speed improvement + pr = pr.tolist() + q = q.tolist() + + for i in range(nd - 1, 0, -1): + if pr[i] > pr[i - 1]: + pr[i - 1] = pr[i] + + inds = np.searchsorted(rc, p.recThrs, side="left") + try: + for ri, pi in enumerate(inds): + q[ri] = pr[pi] + except Exception: + pass + precision[t, :, k, a, m] = np.array(q) + logger.info( + "Final: max precision {}, min precision {}".format(np.max(precision), np.min(precision)) + ) + self.eval = { + "params": p, + "counts": [T, R, K, A, M], + "date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + "precision": precision, + "recall": recall, + } + toc = time.time() + logger.info("DONE (t={:0.2f}s).".format(toc - tic)) + + def summarize(self): + """ + Compute and display summary metrics for evaluation results. + Note this function can *only* be applied on the default parameter setting + """ + + def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100): + p = self.params + iStr = " {:<18} {} @[ {}={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}" + titleStr = "Average Precision" if ap == 1 else "Average Recall" + typeStr = "(AP)" if ap == 1 else "(AR)" + measure = "IoU" + if self.params.iouType == "keypoints": + measure = "OKS" + elif self.params.iouType == "densepose": + measure = "OGPS" + iouStr = ( + "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1]) + if iouThr is None + else "{:0.2f}".format(iouThr) + ) + + aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] + mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] + if ap == 1: + # dimension of precision: [TxRxKxAxM] + s = self.eval["precision"] + # IoU + if iouThr is not None: + t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0] + s = s[t] + s = s[:, :, :, aind, mind] + else: + # dimension of recall: [TxKxAxM] + s = self.eval["recall"] + if iouThr is not None: + t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0] + s = s[t] + s = s[:, :, aind, mind] + if len(s[s > -1]) == 0: + mean_s = -1 + else: + mean_s = np.mean(s[s > -1]) + logger.info(iStr.format(titleStr, typeStr, measure, iouStr, areaRng, maxDets, mean_s)) + return mean_s + + def _summarizeDets(): + stats = np.zeros((12,)) + stats[0] = _summarize(1) + stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2]) + stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2]) + stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2]) + stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2]) + stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) + stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) + stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2]) + stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2]) + return stats + + def _summarizeKps(): + stats = np.zeros((10,)) + stats[0] = _summarize(1, maxDets=20) + stats[1] = _summarize(1, maxDets=20, iouThr=0.5) + stats[2] = _summarize(1, maxDets=20, iouThr=0.75) + stats[3] = _summarize(1, maxDets=20, areaRng="medium") + stats[4] = _summarize(1, maxDets=20, areaRng="large") + stats[5] = _summarize(0, maxDets=20) + stats[6] = _summarize(0, maxDets=20, iouThr=0.5) + stats[7] = _summarize(0, maxDets=20, iouThr=0.75) + stats[8] = _summarize(0, maxDets=20, areaRng="medium") + stats[9] = _summarize(0, maxDets=20, areaRng="large") + return stats + + def _summarizeUvs(): + stats = [_summarize(1, maxDets=self.params.maxDets[0])] + min_threshold = self.params.iouThrs.min() + if min_threshold <= 0.201: + stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.2)] + if min_threshold <= 0.301: + stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.3)] + if min_threshold <= 0.401: + stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.4)] + stats += [ + _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5), + _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75), + _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium"), + _summarize(1, maxDets=self.params.maxDets[0], areaRng="large"), + _summarize(0, maxDets=self.params.maxDets[0]), + _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5), + _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75), + _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium"), + _summarize(0, maxDets=self.params.maxDets[0], areaRng="large"), + ] + return np.array(stats) + + def _summarizeUvsOld(): + stats = np.zeros((18,)) + stats[0] = _summarize(1, maxDets=self.params.maxDets[0]) + stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5) + stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.55) + stats[3] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.60) + stats[4] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.65) + stats[5] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.70) + stats[6] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75) + stats[7] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.80) + stats[8] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.85) + stats[9] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.90) + stats[10] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.95) + stats[11] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium") + stats[12] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="large") + stats[13] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[14] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5) + stats[15] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75) + stats[16] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium") + stats[17] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="large") + return stats + + if not self.eval: + raise Exception("Please run accumulate() first") + iouType = self.params.iouType + if iouType in ["segm", "bbox"]: + summarize = _summarizeDets + elif iouType in ["keypoints"]: + summarize = _summarizeKps + elif iouType in ["densepose"]: + summarize = _summarizeUvs + self.stats = summarize() + + def __str__(self): + self.summarize() + + # ================ functions for dense pose ============================== + def findAllClosestVertsUV(self, U_points, V_points, Index_points): + ClosestVerts = np.ones(Index_points.shape) * -1 + for i in np.arange(24): + # + if (i + 1) in Index_points: + UVs = np.array( + [U_points[Index_points == (i + 1)], V_points[Index_points == (i + 1)]] + ) + Current_Part_UVs = self.Part_UVs[i] + Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i] + D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze() + ClosestVerts[Index_points == (i + 1)] = Current_Part_ClosestVertInds[ + np.argmin(D, axis=0) + ] + ClosestVertsTransformed = self.PDIST_transform[ClosestVerts.astype(int) - 1] + ClosestVertsTransformed[ClosestVerts < 0] = 0 + return ClosestVertsTransformed + + def findClosestVertsCse(self, embedding, py, px, mask, mesh_name): + mesh_vertex_embeddings = self.embedder(mesh_name) + pixel_embeddings = embedding[:, py, px].t().to(device="cuda") + mask_vals = mask[py, px] + edm = squared_euclidean_distance_matrix(pixel_embeddings, mesh_vertex_embeddings) + vertex_indices = edm.argmin(dim=1).cpu() + vertex_indices[mask_vals <= 0] = -1 + return vertex_indices + + def findAllClosestVertsGT(self, gt): + # + I_gt = np.array(gt["dp_I"]) + U_gt = np.array(gt["dp_U"]) + V_gt = np.array(gt["dp_V"]) + # + # print(I_gt) + # + ClosestVertsGT = np.ones(I_gt.shape) * -1 + for i in np.arange(24): + if (i + 1) in I_gt: + UVs = np.array([U_gt[I_gt == (i + 1)], V_gt[I_gt == (i + 1)]]) + Current_Part_UVs = self.Part_UVs[i] + Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i] + D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze() + ClosestVertsGT[I_gt == (i + 1)] = Current_Part_ClosestVertInds[np.argmin(D, axis=0)] + # + ClosestVertsGTTransformed = self.PDIST_transform[ClosestVertsGT.astype(int) - 1] + ClosestVertsGTTransformed[ClosestVertsGT < 0] = 0 + return ClosestVertsGT, ClosestVertsGTTransformed + + def getDistancesCse(self, cVertsGT, cVerts, mesh_name): + geodists_vertices = torch.ones_like(cVertsGT) * float("inf") + selected = (cVertsGT >= 0) * (cVerts >= 0) + mesh = create_mesh(mesh_name, "cpu") + geodists_vertices[selected] = mesh.geodists[cVertsGT[selected], cVerts[selected]] + return geodists_vertices.numpy() + + def getDistancesUV(self, cVertsGT, cVerts): + # + n = 27554 + dists = [] + for d in range(len(cVertsGT)): + if cVertsGT[d] > 0: + if cVerts[d] > 0: + i = cVertsGT[d] - 1 + j = cVerts[d] - 1 + if j == i: + dists.append(0) + elif j > i: + ccc = i + i = j + j = ccc + i = n - i - 1 + j = n - j - 1 + k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1 + k = (n * n - n) / 2 - k - 1 + dists.append(self.Pdist_matrix[int(k)][0]) + else: + i = n - i - 1 + j = n - j - 1 + k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1 + k = (n * n - n) / 2 - k - 1 + dists.append(self.Pdist_matrix[int(k)][0]) + else: + dists.append(np.inf) + return np.atleast_1d(np.array(dists).squeeze()) + + +class Params: + """ + Params for coco evaluation api + """ + + def setDetParams(self): + self.imgIds = [] + self.catIds = [] + # np.arange causes trouble. the data point on arange is slightly larger than the true value + self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True) + self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True) + self.maxDets = [1, 10, 100] + self.areaRng = [ + [0**2, 1e5**2], + [0**2, 32**2], + [32**2, 96**2], + [96**2, 1e5**2], + ] + self.areaRngLbl = ["all", "small", "medium", "large"] + self.useCats = 1 + + def setKpParams(self): + self.imgIds = [] + self.catIds = [] + # np.arange causes trouble. the data point on arange is slightly larger than the true value + self.iouThrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True) + self.recThrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True) + self.maxDets = [20] + self.areaRng = [[0**2, 1e5**2], [32**2, 96**2], [96**2, 1e5**2]] + self.areaRngLbl = ["all", "medium", "large"] + self.useCats = 1 + + def setUvParams(self): + self.imgIds = [] + self.catIds = [] + self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True) + self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True) + self.maxDets = [20] + self.areaRng = [[0**2, 1e5**2], [32**2, 96**2], [96**2, 1e5**2]] + self.areaRngLbl = ["all", "medium", "large"] + self.useCats = 1 + + def __init__(self, iouType="segm"): + if iouType == "segm" or iouType == "bbox": + self.setDetParams() + elif iouType == "keypoints": + self.setKpParams() + elif iouType == "densepose": + self.setUvParams() + else: + raise Exception("iouType not supported") + self.iouType = iouType + # useSegm is deprecated + self.useSegm = None diff --git a/data_processing/detectron2/projects/DensePose/densepose/evaluation/evaluator.py b/data_processing/detectron2/projects/DensePose/densepose/evaluation/evaluator.py new file mode 100644 index 0000000..d5d1d78 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/evaluation/evaluator.py @@ -0,0 +1,421 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import contextlib +import copy +import io +import itertools +import logging +import numpy as np +import os +from collections import OrderedDict +from typing import Dict, Iterable, List, Optional +import pycocotools.mask as mask_utils +import torch +from pycocotools.coco import COCO +from tabulate import tabulate + +from detectron2.config import CfgNode +from detectron2.data import MetadataCatalog +from detectron2.evaluation import DatasetEvaluator +from detectron2.structures import BoxMode +from detectron2.utils.comm import gather, get_rank, is_main_process, synchronize +from detectron2.utils.file_io import PathManager +from detectron2.utils.logger import create_small_table + +from densepose.converters import ToChartResultConverter, ToMaskConverter +from densepose.data.datasets.coco import maybe_filter_and_map_categories_cocoapi +from densepose.structures import ( + DensePoseChartPredictorOutput, + DensePoseEmbeddingPredictorOutput, + quantize_densepose_chart_result, +) + +from .densepose_coco_evaluation import DensePoseCocoEval, DensePoseEvalMode +from .mesh_alignment_evaluator import MeshAlignmentEvaluator +from .tensor_storage import ( + SingleProcessFileTensorStorage, + SingleProcessRamTensorStorage, + SingleProcessTensorStorage, + SizeData, + storage_gather, +) + + +class DensePoseCOCOEvaluator(DatasetEvaluator): + def __init__( + self, + dataset_name, + distributed, + output_dir=None, + evaluator_type: str = "iuv", + min_iou_threshold: float = 0.5, + storage: Optional[SingleProcessTensorStorage] = None, + embedder=None, + should_evaluate_mesh_alignment: bool = False, + mesh_alignment_mesh_names: Optional[List[str]] = None, + ): + self._embedder = embedder + self._distributed = distributed + self._output_dir = output_dir + self._evaluator_type = evaluator_type + self._storage = storage + self._should_evaluate_mesh_alignment = should_evaluate_mesh_alignment + + assert not ( + should_evaluate_mesh_alignment and embedder is None + ), "Mesh alignment evaluation is activated, but no vertex embedder provided!" + if should_evaluate_mesh_alignment: + self._mesh_alignment_evaluator = MeshAlignmentEvaluator( + embedder, + mesh_alignment_mesh_names, + ) + + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + self._metadata = MetadataCatalog.get(dataset_name) + self._min_threshold = min_iou_threshold + json_file = PathManager.get_local_path(self._metadata.json_file) + with contextlib.redirect_stdout(io.StringIO()): + self._coco_api = COCO(json_file) + maybe_filter_and_map_categories_cocoapi(dataset_name, self._coco_api) + + def reset(self): + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + The :class:`Instances` object needs to have `densepose` field. + """ + for input, output in zip(inputs, outputs): + instances = output["instances"].to(self._cpu_device) + if not instances.has("pred_densepose"): + continue + prediction_list = prediction_to_dict( + instances, + input["image_id"], + self._embedder, + self._metadata.class_to_mesh_name, + self._storage is not None, + ) + if self._storage is not None: + for prediction_dict in prediction_list: + dict_to_store = {} + for field_name in self._storage.data_schema: + dict_to_store[field_name] = prediction_dict[field_name] + record_id = self._storage.put(dict_to_store) + prediction_dict["record_id"] = record_id + prediction_dict["rank"] = get_rank() + for field_name in self._storage.data_schema: + del prediction_dict[field_name] + self._predictions.extend(prediction_list) + + def evaluate(self, img_ids=None): + if self._distributed: + synchronize() + predictions = gather(self._predictions) + predictions = list(itertools.chain(*predictions)) + else: + predictions = self._predictions + + multi_storage = storage_gather(self._storage) if self._storage is not None else None + + if not is_main_process(): + return + return copy.deepcopy(self._eval_predictions(predictions, multi_storage, img_ids)) + + def _eval_predictions(self, predictions, multi_storage=None, img_ids=None): + """ + Evaluate predictions on densepose. + Return results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "coco_densepose_predictions.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(predictions, f) + + self._logger.info("Evaluating predictions ...") + res = OrderedDict() + results_gps, results_gpsm, results_segm = _evaluate_predictions_on_coco( + self._coco_api, + predictions, + multi_storage, + self._embedder, + class_names=self._metadata.get("thing_classes"), + min_threshold=self._min_threshold, + img_ids=img_ids, + ) + res["densepose_gps"] = results_gps + res["densepose_gpsm"] = results_gpsm + res["densepose_segm"] = results_segm + if self._should_evaluate_mesh_alignment: + res["densepose_mesh_alignment"] = self._evaluate_mesh_alignment() + return res + + def _evaluate_mesh_alignment(self): + self._logger.info("Mesh alignment evaluation ...") + mean_ge, mean_gps, per_mesh_metrics = self._mesh_alignment_evaluator.evaluate() + results = { + "GE": mean_ge * 100, + "GPS": mean_gps * 100, + } + mesh_names = set() + for metric_name in per_mesh_metrics: + for mesh_name, value in per_mesh_metrics[metric_name].items(): + results[f"{metric_name}-{mesh_name}"] = value * 100 + mesh_names.add(mesh_name) + self._print_mesh_alignment_results(results, mesh_names) + return results + + def _print_mesh_alignment_results(self, results: Dict[str, float], mesh_names: Iterable[str]): + self._logger.info("Evaluation results for densepose, mesh alignment:") + self._logger.info(f'| {"Mesh":13s} | {"GErr":7s} | {"GPS":7s} |') + self._logger.info("| :-----------: | :-----: | :-----: |") + for mesh_name in mesh_names: + ge_key = f"GE-{mesh_name}" + ge_str = f"{results[ge_key]:.4f}" if ge_key in results else " " + gps_key = f"GPS-{mesh_name}" + gps_str = f"{results[gps_key]:.4f}" if gps_key in results else " " + self._logger.info(f"| {mesh_name:13s} | {ge_str:7s} | {gps_str:7s} |") + self._logger.info("| :-------------------------------: |") + ge_key = "GE" + ge_str = f"{results[ge_key]:.4f}" if ge_key in results else " " + gps_key = "GPS" + gps_str = f"{results[gps_key]:.4f}" if gps_key in results else " " + self._logger.info(f'| {"MEAN":13s} | {ge_str:7s} | {gps_str:7s} |') + + +def prediction_to_dict(instances, img_id, embedder, class_to_mesh_name, use_storage): + """ + Args: + instances (Instances): the output of the model + img_id (str): the image id in COCO + + Returns: + list[dict]: the results in densepose evaluation format + """ + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + raw_boxes_xywh = BoxMode.convert( + instances.pred_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS + ) + + if isinstance(instances.pred_densepose, DensePoseEmbeddingPredictorOutput): + results_densepose = densepose_cse_predictions_to_dict( + instances, embedder, class_to_mesh_name, use_storage + ) + elif isinstance(instances.pred_densepose, DensePoseChartPredictorOutput): + if not use_storage: + results_densepose = densepose_chart_predictions_to_dict(instances) + else: + results_densepose = densepose_chart_predictions_to_storage_dict(instances) + + results = [] + for k in range(len(instances)): + result = { + "image_id": img_id, + "category_id": classes[k], + "bbox": raw_boxes_xywh[k].tolist(), + "score": scores[k], + } + results.append({**result, **results_densepose[k]}) + return results + + +def densepose_chart_predictions_to_dict(instances): + segmentations = ToMaskConverter.convert( + instances.pred_densepose, instances.pred_boxes, instances.image_size + ) + + results = [] + for k in range(len(instances)): + densepose_results_quantized = quantize_densepose_chart_result( + ToChartResultConverter.convert(instances.pred_densepose[k], instances.pred_boxes[k]) + ) + densepose_results_quantized.labels_uv_uint8 = ( + densepose_results_quantized.labels_uv_uint8.cpu() + ) + segmentation = segmentations.tensor[k] + segmentation_encoded = mask_utils.encode( + np.require(segmentation.numpy(), dtype=np.uint8, requirements=["F"]) + ) + segmentation_encoded["counts"] = segmentation_encoded["counts"].decode("utf-8") + result = { + "densepose": densepose_results_quantized, + "segmentation": segmentation_encoded, + } + results.append(result) + return results + + +def densepose_chart_predictions_to_storage_dict(instances): + results = [] + for k in range(len(instances)): + densepose_predictor_output = instances.pred_densepose[k] + result = { + "coarse_segm": densepose_predictor_output.coarse_segm.squeeze(0).cpu(), + "fine_segm": densepose_predictor_output.fine_segm.squeeze(0).cpu(), + "u": densepose_predictor_output.u.squeeze(0).cpu(), + "v": densepose_predictor_output.v.squeeze(0).cpu(), + } + results.append(result) + return results + + +def densepose_cse_predictions_to_dict(instances, embedder, class_to_mesh_name, use_storage): + results = [] + for k in range(len(instances)): + cse = instances.pred_densepose[k] + results.append( + { + "coarse_segm": cse.coarse_segm[0].cpu(), + "embedding": cse.embedding[0].cpu(), + } + ) + return results + + +def _evaluate_predictions_on_coco( + coco_gt, + coco_results, + multi_storage=None, + embedder=None, + class_names=None, + min_threshold: float = 0.5, + img_ids=None, +): + logger = logging.getLogger(__name__) + + densepose_metrics = _get_densepose_metrics(min_threshold) + if len(coco_results) == 0: # cocoapi does not handle empty results very well + logger.warn("No predictions from the model! Set scores to -1") + results_gps = {metric: -1 for metric in densepose_metrics} + results_gpsm = {metric: -1 for metric in densepose_metrics} + results_segm = {metric: -1 for metric in densepose_metrics} + return results_gps, results_gpsm, results_segm + + coco_dt = coco_gt.loadRes(coco_results) + + results = [] + for eval_mode_name in ["GPS", "GPSM", "IOU"]: + eval_mode = getattr(DensePoseEvalMode, eval_mode_name) + coco_eval = DensePoseCocoEval( + coco_gt, coco_dt, "densepose", multi_storage, embedder, dpEvalMode=eval_mode + ) + result = _derive_results_from_coco_eval( + coco_eval, eval_mode_name, densepose_metrics, class_names, min_threshold, img_ids + ) + results.append(result) + return results + + +def _get_densepose_metrics(min_threshold: float = 0.5): + metrics = ["AP"] + if min_threshold <= 0.201: + metrics += ["AP20"] + if min_threshold <= 0.301: + metrics += ["AP30"] + if min_threshold <= 0.401: + metrics += ["AP40"] + metrics.extend(["AP50", "AP75", "APm", "APl", "AR", "AR50", "AR75", "ARm", "ARl"]) + return metrics + + +def _derive_results_from_coco_eval( + coco_eval, eval_mode_name, metrics, class_names, min_threshold: float, img_ids +): + if img_ids is not None: + coco_eval.params.imgIds = img_ids + coco_eval.params.iouThrs = np.linspace( + min_threshold, 0.95, int(np.round((0.95 - min_threshold) / 0.05)) + 1, endpoint=True + ) + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + results = {metric: float(coco_eval.stats[idx] * 100) for idx, metric in enumerate(metrics)} + logger = logging.getLogger(__name__) + logger.info( + f"Evaluation results for densepose, {eval_mode_name} metric: \n" + + create_small_table(results) + ) + if class_names is None or len(class_names) <= 1: + return results + + # Compute per-category AP, the same way as it is done in D2 + # (see detectron2/evaluation/coco_evaluation.py): + precisions = coco_eval.eval["precision"] + # precision has dims (iou, recall, cls, area range, max dets) + assert len(class_names) == precisions.shape[2] + + results_per_category = [] + for idx, name in enumerate(class_names): + # area range index 0: all area ranges + # max dets index -1: typically 100 per image + precision = precisions[:, :, idx, 0, -1] + precision = precision[precision > -1] + ap = np.mean(precision) if precision.size else float("nan") + results_per_category.append((f"{name}", float(ap * 100))) + + # tabulate it + n_cols = min(6, len(results_per_category) * 2) + results_flatten = list(itertools.chain(*results_per_category)) + results_2d = itertools.zip_longest(*[results_flatten[i::n_cols] for i in range(n_cols)]) + table = tabulate( + results_2d, + tablefmt="pipe", + floatfmt=".3f", + headers=["category", "AP"] * (n_cols // 2), + numalign="left", + ) + logger.info(f"Per-category {eval_mode_name} AP: \n" + table) + + results.update({"AP-" + name: ap for name, ap in results_per_category}) + return results + + +def build_densepose_evaluator_storage(cfg: CfgNode, output_folder: str): + storage_spec = cfg.DENSEPOSE_EVALUATION.STORAGE + if storage_spec == "none": + return None + evaluator_type = cfg.DENSEPOSE_EVALUATION.TYPE + # common output tensor sizes + hout = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE + wout = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE + n_csc = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS + # specific output tensors + if evaluator_type == "iuv": + n_fsc = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1 + schema = { + "coarse_segm": SizeData(dtype="float32", shape=(n_csc, hout, wout)), + "fine_segm": SizeData(dtype="float32", shape=(n_fsc, hout, wout)), + "u": SizeData(dtype="float32", shape=(n_fsc, hout, wout)), + "v": SizeData(dtype="float32", shape=(n_fsc, hout, wout)), + } + elif evaluator_type == "cse": + embed_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE + schema = { + "coarse_segm": SizeData(dtype="float32", shape=(n_csc, hout, wout)), + "embedding": SizeData(dtype="float32", shape=(embed_size, hout, wout)), + } + else: + raise ValueError(f"Unknown evaluator type: {evaluator_type}") + # storage types + if storage_spec == "ram": + storage = SingleProcessRamTensorStorage(schema, io.BytesIO()) + elif storage_spec == "file": + fpath = os.path.join(output_folder, f"DensePoseEvaluatorStorage.{get_rank()}.bin") + PathManager.mkdirs(output_folder) + storage = SingleProcessFileTensorStorage(schema, fpath, "wb") + else: + raise ValueError(f"Unknown storage specification: {storage_spec}") + return storage diff --git a/data_processing/detectron2/projects/DensePose/densepose/evaluation/mesh_alignment_evaluator.py b/data_processing/detectron2/projects/DensePose/densepose/evaluation/mesh_alignment_evaluator.py new file mode 100644 index 0000000..9d67c1a --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/evaluation/mesh_alignment_evaluator.py @@ -0,0 +1,66 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import json +import logging +from typing import List, Optional +import torch +from torch import nn + +from detectron2.utils.file_io import PathManager + +from densepose.structures.mesh import create_mesh + + +class MeshAlignmentEvaluator: + """ + Class for evaluation of 3D mesh alignment based on the learned vertex embeddings + """ + + def __init__(self, embedder: nn.Module, mesh_names: Optional[List[str]]): + self.embedder = embedder + # use the provided mesh names if not None and not an empty list + self.mesh_names = mesh_names if mesh_names else embedder.mesh_names + self.logger = logging.getLogger(__name__) + with PathManager.open( + "https://dl.fbaipublicfiles.com/densepose/data/cse/mesh_keyvertices_v0.json", "r" + ) as f: + self.mesh_keyvertices = json.load(f) + + def evaluate(self): + ge_per_mesh = {} + gps_per_mesh = {} + for mesh_name_1 in self.mesh_names: + avg_errors = [] + avg_gps = [] + embeddings_1 = self.embedder(mesh_name_1) + keyvertices_1 = self.mesh_keyvertices[mesh_name_1] + keyvertex_names_1 = list(keyvertices_1.keys()) + keyvertex_indices_1 = [keyvertices_1[name] for name in keyvertex_names_1] + for mesh_name_2 in self.mesh_names: + if mesh_name_1 == mesh_name_2: + continue + embeddings_2 = self.embedder(mesh_name_2) + keyvertices_2 = self.mesh_keyvertices[mesh_name_2] + sim_matrix_12 = embeddings_1[keyvertex_indices_1].mm(embeddings_2.T) + vertices_2_matching_keyvertices_1 = sim_matrix_12.argmax(axis=1) + mesh_2 = create_mesh(mesh_name_2, embeddings_2.device) + geodists = mesh_2.geodists[ + vertices_2_matching_keyvertices_1, + [keyvertices_2[name] for name in keyvertex_names_1], + ] + Current_Mean_Distances = 0.255 + gps = (-(geodists**2) / (2 * (Current_Mean_Distances**2))).exp() + avg_errors.append(geodists.mean().item()) + avg_gps.append(gps.mean().item()) + + ge_mean = torch.as_tensor(avg_errors).mean().item() + gps_mean = torch.as_tensor(avg_gps).mean().item() + ge_per_mesh[mesh_name_1] = ge_mean + gps_per_mesh[mesh_name_1] = gps_mean + ge_mean_global = torch.as_tensor(list(ge_per_mesh.values())).mean().item() + gps_mean_global = torch.as_tensor(list(gps_per_mesh.values())).mean().item() + per_mesh_metrics = { + "GE": ge_per_mesh, + "GPS": gps_per_mesh, + } + return ge_mean_global, gps_mean_global, per_mesh_metrics diff --git a/data_processing/detectron2/projects/DensePose/densepose/evaluation/tensor_storage.py b/data_processing/detectron2/projects/DensePose/densepose/evaluation/tensor_storage.py new file mode 100644 index 0000000..72e3cb6 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/evaluation/tensor_storage.py @@ -0,0 +1,238 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import io +import numpy as np +import os +from dataclasses import dataclass +from functools import reduce +from operator import mul +from typing import BinaryIO, Dict, Optional, Tuple +import torch + +from detectron2.utils.comm import gather, get_rank +from detectron2.utils.file_io import PathManager + + +@dataclass +class SizeData: + dtype: str + shape: Tuple[int] + + +def _calculate_record_field_size_b(data_schema: Dict[str, SizeData], field_name: str) -> int: + schema = data_schema[field_name] + element_size_b = np.dtype(schema.dtype).itemsize + record_field_size_b = reduce(mul, schema.shape) * element_size_b + return record_field_size_b + + +def _calculate_record_size_b(data_schema: Dict[str, SizeData]) -> int: + record_size_b = 0 + for field_name in data_schema: + record_field_size_b = _calculate_record_field_size_b(data_schema, field_name) + record_size_b += record_field_size_b + return record_size_b + + +def _calculate_record_field_sizes_b(data_schema: Dict[str, SizeData]) -> Dict[str, int]: + field_sizes_b = {} + for field_name in data_schema: + field_sizes_b[field_name] = _calculate_record_field_size_b(data_schema, field_name) + return field_sizes_b + + +class SingleProcessTensorStorage: + """ + Compact tensor storage to keep tensor data of predefined size and type. + """ + + def __init__(self, data_schema: Dict[str, SizeData], storage_impl: BinaryIO): + """ + Construct tensor storage based on information on data shape and size. + Internally uses numpy to interpret the type specification. + The storage must support operations `seek(offset, whence=os.SEEK_SET)` and + `read(size)` to be able to perform the `get` operation. + The storage must support operation `write(bytes)` to be able to perform + the `put` operation. + + Args: + data_schema (dict: str -> SizeData): dictionary which maps tensor name + to its size data (shape and data type), e.g. + ``` + { + "coarse_segm": SizeData(dtype="float32", shape=(112, 112)), + "embedding": SizeData(dtype="float32", shape=(16, 112, 112)), + } + ``` + storage_impl (BinaryIO): io instance that handles file-like seek, read + and write operations, e.g. a file handle or a memory buffer like io.BytesIO + """ + self.data_schema = data_schema + self.record_size_b = _calculate_record_size_b(data_schema) + self.record_field_sizes_b = _calculate_record_field_sizes_b(data_schema) + self.storage_impl = storage_impl + self.next_record_id = 0 + + def get(self, record_id: int) -> Dict[str, torch.Tensor]: + """ + Load tensors from the storage by record ID + + Args: + record_id (int): Record ID, for which to load the data + + Return: + dict: str -> tensor: tensor name mapped to tensor data, recorded under the provided ID + """ + self.storage_impl.seek(record_id * self.record_size_b, os.SEEK_SET) + data_bytes = self.storage_impl.read(self.record_size_b) + assert len(data_bytes) == self.record_size_b, ( + f"Expected data size {self.record_size_b} B could not be read: " + f"got {len(data_bytes)} B" + ) + record = {} + cur_idx = 0 + # it's important to read and write in the same order + for field_name in sorted(self.data_schema): + schema = self.data_schema[field_name] + field_size_b = self.record_field_sizes_b[field_name] + chunk = data_bytes[cur_idx : cur_idx + field_size_b] + data_np = np.frombuffer( + chunk, dtype=schema.dtype, count=reduce(mul, schema.shape) + ).reshape(schema.shape) + record[field_name] = torch.from_numpy(data_np) + cur_idx += field_size_b + return record + + def put(self, data: Dict[str, torch.Tensor]) -> int: + """ + Store tensors in the storage + + Args: + data (dict: str -> tensor): data to store, a dictionary which maps + tensor names into tensors; tensor shapes must match those specified + in data schema. + Return: + int: record ID, under which the data is stored + """ + # it's important to read and write in the same order + for field_name in sorted(self.data_schema): + assert ( + field_name in data + ), f"Field '{field_name}' not present in data: data keys are {data.keys()}" + value = data[field_name] + assert value.shape == self.data_schema[field_name].shape, ( + f"Mismatched tensor shapes for field '{field_name}': " + f"expected {self.data_schema[field_name].shape}, got {value.shape}" + ) + data_bytes = value.cpu().numpy().tobytes() + assert len(data_bytes) == self.record_field_sizes_b[field_name], ( + f"Expected field {field_name} to be of size " + f"{self.record_field_sizes_b[field_name]} B, got {len(data_bytes)} B" + ) + self.storage_impl.write(data_bytes) + record_id = self.next_record_id + self.next_record_id += 1 + return record_id + + +class SingleProcessFileTensorStorage(SingleProcessTensorStorage): + """ + Implementation of a single process tensor storage which stores data in a file + """ + + def __init__(self, data_schema: Dict[str, SizeData], fpath: str, mode: str): + self.fpath = fpath + assert "b" in mode, f"Tensor storage should be opened in binary mode, got '{mode}'" + if "w" in mode: + file_h = PathManager.open(fpath, mode) + elif "r" in mode: + local_fpath = PathManager.get_local_path(fpath) + file_h = open(local_fpath, mode) + else: + raise ValueError(f"Unsupported file mode {mode}, supported modes: rb, wb") + super().__init__(data_schema, file_h) # pyre-ignore[6] + + +class SingleProcessRamTensorStorage(SingleProcessTensorStorage): + """ + Implementation of a single process tensor storage which stores data in RAM + """ + + def __init__(self, data_schema: Dict[str, SizeData], buf: io.BytesIO): + super().__init__(data_schema, buf) + + +class MultiProcessTensorStorage: + """ + Representation of a set of tensor storages created by individual processes, + allows to access those storages from a single owner process. The storages + should either be shared or broadcasted to the owner process. + The processes are identified by their rank, data is uniquely defined by + the rank of the process and the record ID. + """ + + def __init__(self, rank_to_storage: Dict[int, SingleProcessTensorStorage]): + self.rank_to_storage = rank_to_storage + + def get(self, rank: int, record_id: int) -> Dict[str, torch.Tensor]: + storage = self.rank_to_storage[rank] + return storage.get(record_id) + + def put(self, rank: int, data: Dict[str, torch.Tensor]) -> int: + storage = self.rank_to_storage[rank] + return storage.put(data) + + +class MultiProcessFileTensorStorage(MultiProcessTensorStorage): + def __init__(self, data_schema: Dict[str, SizeData], rank_to_fpath: Dict[int, str], mode: str): + rank_to_storage = { + rank: SingleProcessFileTensorStorage(data_schema, fpath, mode) + for rank, fpath in rank_to_fpath.items() + } + super().__init__(rank_to_storage) # pyre-ignore[6] + + +class MultiProcessRamTensorStorage(MultiProcessTensorStorage): + def __init__(self, data_schema: Dict[str, SizeData], rank_to_buffer: Dict[int, io.BytesIO]): + rank_to_storage = { + rank: SingleProcessRamTensorStorage(data_schema, buf) + for rank, buf in rank_to_buffer.items() + } + super().__init__(rank_to_storage) # pyre-ignore[6] + + +def _ram_storage_gather( + storage: SingleProcessRamTensorStorage, dst_rank: int = 0 +) -> Optional[MultiProcessRamTensorStorage]: + storage.storage_impl.seek(0, os.SEEK_SET) + # TODO: overhead, pickling a bytes object, can just pass bytes in a tensor directly + # see detectron2/utils.comm.py + data_list = gather(storage.storage_impl.read(), dst=dst_rank) + if get_rank() != dst_rank: + return None + rank_to_buffer = {i: io.BytesIO(data_list[i]) for i in range(len(data_list))} + multiprocess_storage = MultiProcessRamTensorStorage(storage.data_schema, rank_to_buffer) + return multiprocess_storage + + +def _file_storage_gather( + storage: SingleProcessFileTensorStorage, + dst_rank: int = 0, + mode: str = "rb", +) -> Optional[MultiProcessFileTensorStorage]: + storage.storage_impl.close() + fpath_list = gather(storage.fpath, dst=dst_rank) + if get_rank() != dst_rank: + return None + rank_to_fpath = {i: fpath_list[i] for i in range(len(fpath_list))} + return MultiProcessFileTensorStorage(storage.data_schema, rank_to_fpath, mode) + + +def storage_gather( + storage: SingleProcessTensorStorage, dst_rank: int = 0 +) -> Optional[MultiProcessTensorStorage]: + if isinstance(storage, SingleProcessRamTensorStorage): + return _ram_storage_gather(storage, dst_rank) + elif isinstance(storage, SingleProcessFileTensorStorage): + return _file_storage_gather(storage, dst_rank) + raise Exception(f"Unsupported storage for gather operation: {storage}") diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/__init__.py new file mode 100644 index 0000000..4c49f6d --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .confidence import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType +from .filter import DensePoseDataFilter +from .inference import densepose_inference +from .utils import initialize_module_params +from .build import ( + build_densepose_data_filter, + build_densepose_embedder, + build_densepose_head, + build_densepose_losses, + build_densepose_predictor, +) diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/build.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/build.py new file mode 100644 index 0000000..bb7f54b --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/build.py @@ -0,0 +1,87 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Optional +from torch import nn + +from detectron2.config import CfgNode + +from .cse.embedder import Embedder +from .filter import DensePoseDataFilter + + +def build_densepose_predictor(cfg: CfgNode, input_channels: int): + """ + Create an instance of DensePose predictor based on configuration options. + + Args: + cfg (CfgNode): configuration options + input_channels (int): input tensor size along the channel dimension + Return: + An instance of DensePose predictor + """ + from .predictors import DENSEPOSE_PREDICTOR_REGISTRY + + predictor_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.PREDICTOR_NAME + return DENSEPOSE_PREDICTOR_REGISTRY.get(predictor_name)(cfg, input_channels) + + +def build_densepose_data_filter(cfg: CfgNode): + """ + Build DensePose data filter which selects data for training + + Args: + cfg (CfgNode): configuration options + + Return: + Callable: list(Tensor), list(Instances) -> list(Tensor), list(Instances) + An instance of DensePose filter, which takes feature tensors and proposals + as an input and returns filtered features and proposals + """ + dp_filter = DensePoseDataFilter(cfg) + return dp_filter + + +def build_densepose_head(cfg: CfgNode, input_channels: int): + """ + Build DensePose head based on configurations options + + Args: + cfg (CfgNode): configuration options + input_channels (int): input tensor size along the channel dimension + Return: + An instance of DensePose head + """ + from .roi_heads.registry import ROI_DENSEPOSE_HEAD_REGISTRY + + head_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.NAME + return ROI_DENSEPOSE_HEAD_REGISTRY.get(head_name)(cfg, input_channels) + + +def build_densepose_losses(cfg: CfgNode): + """ + Build DensePose loss based on configurations options + + Args: + cfg (CfgNode): configuration options + Return: + An instance of DensePose loss + """ + from .losses import DENSEPOSE_LOSS_REGISTRY + + loss_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.LOSS_NAME + return DENSEPOSE_LOSS_REGISTRY.get(loss_name)(cfg) + + +def build_densepose_embedder(cfg: CfgNode) -> Optional[nn.Module]: + """ + Build embedder used to embed mesh vertices into an embedding space. + Embedder contains sub-embedders, one for each mesh ID. + + Args: + cfg (cfgNode): configuration options + Return: + Embedding module + """ + if cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS: + return Embedder(cfg) + return None diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/confidence.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/confidence.py new file mode 100644 index 0000000..6f4a72e --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/confidence.py @@ -0,0 +1,73 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from dataclasses import dataclass +from enum import Enum + +from detectron2.config import CfgNode + + +class DensePoseUVConfidenceType(Enum): + """ + Statistical model type for confidence learning, possible values: + - "iid_iso": statistically independent identically distributed residuals + with anisotropic covariance + - "indep_aniso": statistically independent residuals with anisotropic + covariances + For details, see: + N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning + Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 + """ + + # fmt: off + IID_ISO = "iid_iso" + INDEP_ANISO = "indep_aniso" + # fmt: on + + +@dataclass +class DensePoseUVConfidenceConfig: + """ + Configuration options for confidence on UV data + """ + + enabled: bool = False + # lower bound on UV confidences + epsilon: float = 0.01 + type: DensePoseUVConfidenceType = DensePoseUVConfidenceType.IID_ISO + + +@dataclass +class DensePoseSegmConfidenceConfig: + """ + Configuration options for confidence on segmentation + """ + + enabled: bool = False + # lower bound on confidence values + epsilon: float = 0.01 + + +@dataclass +class DensePoseConfidenceModelConfig: + """ + Configuration options for confidence models + """ + + # confidence for U and V values + uv_confidence: DensePoseUVConfidenceConfig + # segmentation confidence + segm_confidence: DensePoseSegmConfidenceConfig + + @staticmethod + def from_cfg(cfg: CfgNode) -> "DensePoseConfidenceModelConfig": + return DensePoseConfidenceModelConfig( + uv_confidence=DensePoseUVConfidenceConfig( + enabled=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.ENABLED, + epsilon=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON, + type=DensePoseUVConfidenceType(cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE), + ), + segm_confidence=DensePoseSegmConfidenceConfig( + enabled=cfg.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.ENABLED, + epsilon=cfg.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.EPSILON, + ), + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/__init__.py new file mode 100644 index 0000000..a227360 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from .vertex_direct_embedder import VertexDirectEmbedder +from .vertex_feature_embedder import VertexFeatureEmbedder +from .embedder import Embedder diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/embedder.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/embedder.py new file mode 100644 index 0000000..7f52b06 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/embedder.py @@ -0,0 +1,130 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import logging +import numpy as np +import pickle +from enum import Enum +from typing import Optional +import torch +from torch import nn + +from detectron2.config import CfgNode +from detectron2.utils.file_io import PathManager + +from .vertex_direct_embedder import VertexDirectEmbedder +from .vertex_feature_embedder import VertexFeatureEmbedder + + +class EmbedderType(Enum): + """ + Embedder type which defines how vertices are mapped into the embedding space: + - "vertex_direct": direct vertex embedding + - "vertex_feature": embedding vertex features + """ + + VERTEX_DIRECT = "vertex_direct" + VERTEX_FEATURE = "vertex_feature" + + +def create_embedder(embedder_spec: CfgNode, embedder_dim: int) -> nn.Module: + """ + Create an embedder based on the provided configuration + + Args: + embedder_spec (CfgNode): embedder configuration + embedder_dim (int): embedding space dimensionality + Return: + An embedder instance for the specified configuration + Raises ValueError, in case of unexpected embedder type + """ + embedder_type = EmbedderType(embedder_spec.TYPE) + if embedder_type == EmbedderType.VERTEX_DIRECT: + embedder = VertexDirectEmbedder( + num_vertices=embedder_spec.NUM_VERTICES, + embed_dim=embedder_dim, + ) + if embedder_spec.INIT_FILE != "": + embedder.load(embedder_spec.INIT_FILE) + elif embedder_type == EmbedderType.VERTEX_FEATURE: + embedder = VertexFeatureEmbedder( + num_vertices=embedder_spec.NUM_VERTICES, + feature_dim=embedder_spec.FEATURE_DIM, + embed_dim=embedder_dim, + train_features=embedder_spec.FEATURES_TRAINABLE, + ) + if embedder_spec.INIT_FILE != "": + embedder.load(embedder_spec.INIT_FILE) + else: + raise ValueError(f"Unexpected embedder type {embedder_type}") + + if not embedder_spec.IS_TRAINABLE: + embedder.requires_grad_(False) + + return embedder + + +class Embedder(nn.Module): + """ + Embedder module that serves as a container for embedders to use with different + meshes. Extends Module to automatically save / load state dict. + """ + + DEFAULT_MODEL_CHECKPOINT_PREFIX = "roi_heads.embedder." + + def __init__(self, cfg: CfgNode): + """ + Initialize mesh embedders. An embedder for mesh `i` is stored in a submodule + "embedder_{i}". + + Args: + cfg (CfgNode): configuration options + """ + super(Embedder, self).__init__() + self.mesh_names = set() + embedder_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE + logger = logging.getLogger(__name__) + for mesh_name, embedder_spec in cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS.items(): + logger.info(f"Adding embedder embedder_{mesh_name} with spec {embedder_spec}") + self.add_module(f"embedder_{mesh_name}", create_embedder(embedder_spec, embedder_dim)) + self.mesh_names.add(mesh_name) + if cfg.MODEL.WEIGHTS != "": + self.load_from_model_checkpoint(cfg.MODEL.WEIGHTS) + + def load_from_model_checkpoint(self, fpath: str, prefix: Optional[str] = None): + if prefix is None: + prefix = Embedder.DEFAULT_MODEL_CHECKPOINT_PREFIX + state_dict = None + if fpath.endswith(".pkl"): + with PathManager.open(fpath, "rb") as hFile: + state_dict = pickle.load(hFile, encoding="latin1") # pyre-ignore[6] + else: + with PathManager.open(fpath, "rb") as hFile: + # pyre-fixme[6]: For 1st param expected `Union[PathLike[typing.Any], + # IO[bytes], str, BinaryIO]` but got `Union[IO[bytes], IO[str]]`. + state_dict = torch.load(hFile, map_location=torch.device("cpu")) + if state_dict is not None and "model" in state_dict: + state_dict_local = {} + for key in state_dict["model"]: + if key.startswith(prefix): + v_key = state_dict["model"][key] + if isinstance(v_key, np.ndarray): + v_key = torch.from_numpy(v_key) + state_dict_local[key[len(prefix) :]] = v_key + # non-strict loading to finetune on different meshes + self.load_state_dict(state_dict_local, strict=False) + + def forward(self, mesh_name: str) -> torch.Tensor: + """ + Produce vertex embeddings for the specific mesh; vertex embeddings are + a tensor of shape [N, D] where: + N = number of vertices + D = number of dimensions in the embedding space + Args: + mesh_name (str): name of a mesh for which to obtain vertex embeddings + Return: + Vertex embeddings, a tensor of shape [N, D] + """ + return getattr(self, f"embedder_{mesh_name}")() + + def has_embeddings(self, mesh_name: str) -> bool: + return hasattr(self, f"embedder_{mesh_name}") diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/utils.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/utils.py new file mode 100644 index 0000000..5c57998 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/utils.py @@ -0,0 +1,82 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import torch +from torch.nn import functional as F + + +def squared_euclidean_distance_matrix(pts1: torch.Tensor, pts2: torch.Tensor) -> torch.Tensor: + """ + Get squared Euclidean Distance Matrix + Computes pairwise squared Euclidean distances between points + + Args: + pts1: Tensor [M x D], M is the number of points, D is feature dimensionality + pts2: Tensor [N x D], N is the number of points, D is feature dimensionality + + Return: + Tensor [M, N]: matrix of squared Euclidean distances; at index (m, n) + it contains || pts1[m] - pts2[n] ||^2 + """ + edm = torch.mm(-2 * pts1, pts2.t()) + edm += (pts1 * pts1).sum(1, keepdim=True) + (pts2 * pts2).sum(1, keepdim=True).t() + return edm.contiguous() + + +def normalize_embeddings(embeddings: torch.Tensor, epsilon: float = 1e-6) -> torch.Tensor: + """ + Normalize N D-dimensional embedding vectors arranged in a tensor [N, D] + + Args: + embeddings (tensor [N, D]): N D-dimensional embedding vectors + epsilon (float): minimum value for a vector norm + Return: + Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1. + """ + return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=True), min=epsilon) + + +def get_closest_vertices_mask_from_ES( + E: torch.Tensor, + S: torch.Tensor, + h: int, + w: int, + mesh_vertex_embeddings: torch.Tensor, + device: torch.device, +): + """ + Interpolate Embeddings and Segmentations to the size of a given bounding box, + and compute closest vertices and the segmentation mask + + Args: + E (tensor [1, D, H, W]): D-dimensional embedding vectors for every point of the + default-sized box + S (tensor [1, 2, H, W]): 2-dimensional segmentation mask for every point of the + default-sized box + h (int): height of the target bounding box + w (int): width of the target bounding box + mesh_vertex_embeddings (tensor [N, D]): vertex embeddings for a chosen mesh + N is the number of vertices in the mesh, D is feature dimensionality + device (torch.device): device to move the tensors to + Return: + Closest Vertices (tensor [h, w]), int, for every point of the resulting box + Segmentation mask (tensor [h, w]), boolean, for every point of the resulting box + """ + mesh_vertex_embeddings = mesh_vertex_embeddings[:6890,:] + embedding_resized = F.interpolate(E, size=(h, w), mode="bilinear")[0].to(device) + coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0].to(device) + mask = coarse_segm_resized.argmax(0) > 0 + closest_vertices = torch.zeros(mask.shape, dtype=torch.long, device=device) + all_embeddings = embedding_resized[:, mask].t() + size_chunk = 10_000 # Chunking to avoid possible OOM + edm = [] + if len(all_embeddings) == 0: + return closest_vertices, mask + for chunk in range((len(all_embeddings) - 1) // size_chunk + 1): + chunk_embeddings = all_embeddings[size_chunk * chunk : size_chunk * (chunk + 1)] + edm.append( + torch.argmin( + squared_euclidean_distance_matrix(chunk_embeddings, mesh_vertex_embeddings), dim=1 + ) + ) + closest_vertices[mask] = torch.cat(edm) + return closest_vertices, mask diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/vertex_direct_embedder.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/vertex_direct_embedder.py new file mode 100644 index 0000000..60fba27 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/vertex_direct_embedder.py @@ -0,0 +1,64 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import pickle +import torch +from torch import nn + +from detectron2.utils.file_io import PathManager + +from .utils import normalize_embeddings + + +class VertexDirectEmbedder(nn.Module): + """ + Class responsible for embedding vertices. Vertex embeddings take + the form of a tensor of size [N, D], where + N = number of vertices + D = number of dimensions in the embedding space + """ + + def __init__(self, num_vertices: int, embed_dim: int): + """ + Initialize embedder, set random embeddings + + Args: + num_vertices (int): number of vertices to embed + embed_dim (int): number of dimensions in the embedding space + """ + super(VertexDirectEmbedder, self).__init__() + self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim)) + self.reset_parameters() + + @torch.no_grad() + def reset_parameters(self): + """ + Reset embeddings to random values + """ + self.embeddings.zero_() + + def forward(self) -> torch.Tensor: + """ + Produce vertex embeddings, a tensor of shape [N, D] where: + N = number of vertices + D = number of dimensions in the embedding space + + Return: + Full vertex embeddings, a tensor of shape [N, D] + """ + return normalize_embeddings(self.embeddings) + + @torch.no_grad() + def load(self, fpath: str): + """ + Load data from a file + + Args: + fpath (str): file path to load data from + """ + with PathManager.open(fpath, "rb") as hFile: + data = pickle.load(hFile) # pyre-ignore[6] + for name in ["embeddings"]: + if name in data: + getattr(self, name).copy_( + torch.tensor(data[name]).float().to(device=getattr(self, name).device) + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/vertex_feature_embedder.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/vertex_feature_embedder.py new file mode 100644 index 0000000..dcb2f20 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/cse/vertex_feature_embedder.py @@ -0,0 +1,75 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import pickle +import torch +from torch import nn + +from detectron2.utils.file_io import PathManager + +from .utils import normalize_embeddings + + +class VertexFeatureEmbedder(nn.Module): + """ + Class responsible for embedding vertex features. Mapping from + feature space to the embedding space is a tensor of size [K, D], where + K = number of dimensions in the feature space + D = number of dimensions in the embedding space + Vertex features is a tensor of size [N, K], where + N = number of vertices + K = number of dimensions in the feature space + Vertex embeddings are computed as F * E = tensor of size [N, D] + """ + + def __init__( + self, num_vertices: int, feature_dim: int, embed_dim: int, train_features: bool = False + ): + """ + Initialize embedder, set random embeddings + + Args: + num_vertices (int): number of vertices to embed + feature_dim (int): number of dimensions in the feature space + embed_dim (int): number of dimensions in the embedding space + train_features (bool): determines whether vertex features should + be trained (default: False) + """ + super(VertexFeatureEmbedder, self).__init__() + if train_features: + self.features = nn.Parameter(torch.Tensor(num_vertices, feature_dim)) + else: + self.register_buffer("features", torch.Tensor(num_vertices, feature_dim)) + self.embeddings = nn.Parameter(torch.Tensor(feature_dim, embed_dim)) + self.reset_parameters() + + @torch.no_grad() + def reset_parameters(self): + self.features.zero_() + self.embeddings.zero_() + + def forward(self) -> torch.Tensor: + """ + Produce vertex embeddings, a tensor of shape [N, D] where: + N = number of vertices + D = number of dimensions in the embedding space + + Return: + Full vertex embeddings, a tensor of shape [N, D] + """ + return normalize_embeddings(torch.mm(self.features, self.embeddings)) + + @torch.no_grad() + def load(self, fpath: str): + """ + Load data from a file + + Args: + fpath (str): file path to load data from + """ + with PathManager.open(fpath, "rb") as hFile: + data = pickle.load(hFile) # pyre-ignore[6] + for name in ["features", "embeddings"]: + if name in data: + getattr(self, name).copy_( + torch.tensor(data[name]).float().to(device=getattr(self, name).device) + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/densepose_checkpoint.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/densepose_checkpoint.py new file mode 100644 index 0000000..8c2b4f2 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/densepose_checkpoint.py @@ -0,0 +1,35 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from collections import OrderedDict + +from detectron2.checkpoint import DetectionCheckpointer + + +def _rename_HRNet_weights(weights): + # We detect and rename HRNet weights for DensePose. 1956 and 1716 are values that are + # common to all HRNet pretrained weights, and should be enough to accurately identify them + if ( + len(weights["model"].keys()) == 1956 + and len([k for k in weights["model"].keys() if k.startswith("stage")]) == 1716 + ): + hrnet_weights = OrderedDict() + for k in weights["model"].keys(): + hrnet_weights["backbone.bottom_up." + str(k)] = weights["model"][k] + return {"model": hrnet_weights} + else: + return weights + + +class DensePoseCheckpointer(DetectionCheckpointer): + """ + Same as :class:`DetectionCheckpointer`, but is able to handle HRNet weights + """ + + def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables): + super().__init__(model, save_dir, save_to_disk=save_to_disk, **checkpointables) + + def _load_file(self, filename: str) -> object: + """ + Adding hrnet support + """ + weights = super()._load_file(filename) + return _rename_HRNet_weights(weights) diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/filter.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/filter.py new file mode 100644 index 0000000..18a8567 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/filter.py @@ -0,0 +1,94 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import List +import torch + +from detectron2.config import CfgNode +from detectron2.structures import Instances +from detectron2.structures.boxes import matched_pairwise_iou + + +class DensePoseDataFilter(object): + def __init__(self, cfg: CfgNode): + self.iou_threshold = cfg.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD + self.keep_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS + + @torch.no_grad() + def __call__(self, features: List[torch.Tensor], proposals_with_targets: List[Instances]): + """ + Filters proposals with targets to keep only the ones relevant for + DensePose training + + Args: + features (list[Tensor]): input data as a list of features, + each feature is a tensor. Axis 0 represents the number of + images `N` in the input data; axes 1-3 are channels, + height, and width, which may vary between features + (e.g., if a feature pyramid is used). + proposals_with_targets (list[Instances]): length `N` list of + `Instances`. The i-th `Instances` contains instances + (proposals, GT) for the i-th input image, + Returns: + list[Tensor]: filtered features + list[Instances]: filtered proposals + """ + proposals_filtered = [] + # TODO: the commented out code was supposed to correctly deal with situations + # where no valid DensePose GT is available for certain images. The corresponding + # image features were sliced and proposals were filtered. This led to performance + # deterioration, both in terms of runtime and in terms of evaluation results. + # + # feature_mask = torch.ones( + # len(proposals_with_targets), + # dtype=torch.bool, + # device=features[0].device if len(features) > 0 else torch.device("cpu"), + # ) + for i, proposals_per_image in enumerate(proposals_with_targets): + if not proposals_per_image.has("gt_densepose") and ( + not proposals_per_image.has("gt_masks") or not self.keep_masks + ): + # feature_mask[i] = 0 + continue + gt_boxes = proposals_per_image.gt_boxes + est_boxes = proposals_per_image.proposal_boxes + # apply match threshold for densepose head + iou = matched_pairwise_iou(gt_boxes, est_boxes) + iou_select = iou > self.iou_threshold + proposals_per_image = proposals_per_image[iou_select] # pyre-ignore[6] + + N_gt_boxes = len(proposals_per_image.gt_boxes) + assert N_gt_boxes == len(proposals_per_image.proposal_boxes), ( + f"The number of GT boxes {N_gt_boxes} is different from the " + f"number of proposal boxes {len(proposals_per_image.proposal_boxes)}" + ) + # filter out any target without suitable annotation + if self.keep_masks: + gt_masks = ( + proposals_per_image.gt_masks + if hasattr(proposals_per_image, "gt_masks") + else [None] * N_gt_boxes + ) + else: + gt_masks = [None] * N_gt_boxes + gt_densepose = ( + proposals_per_image.gt_densepose + if hasattr(proposals_per_image, "gt_densepose") + else [None] * N_gt_boxes + ) + assert len(gt_masks) == N_gt_boxes + assert len(gt_densepose) == N_gt_boxes + selected_indices = [ + i + for i, (dp_target, mask_target) in enumerate(zip(gt_densepose, gt_masks)) + if (dp_target is not None) or (mask_target is not None) + ] + # if not len(selected_indices): + # feature_mask[i] = 0 + # continue + if len(selected_indices) != N_gt_boxes: + proposals_per_image = proposals_per_image[selected_indices] # pyre-ignore[6] + assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.proposal_boxes) + proposals_filtered.append(proposals_per_image) + # features_filtered = [feature[feature_mask] for feature in features] + # return features_filtered, proposals_filtered + return features, proposals_filtered diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/hrfpn.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/hrfpn.py new file mode 100644 index 0000000..08ec420 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/hrfpn.py @@ -0,0 +1,182 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" +MIT License +Copyright (c) 2019 Microsoft +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from detectron2.layers import ShapeSpec +from detectron2.modeling.backbone import BACKBONE_REGISTRY +from detectron2.modeling.backbone.backbone import Backbone + +from .hrnet import build_pose_hrnet_backbone + + +class HRFPN(Backbone): + """HRFPN (High Resolution Feature Pyramids) + Transforms outputs of HRNet backbone so they are suitable for the ROI_heads + arXiv: https://arxiv.org/abs/1904.04514 + Adapted from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/hrfpn.py + Args: + bottom_up: (list) output of HRNet + in_features (list): names of the input features (output of HRNet) + in_channels (list): number of channels for each branch + out_channels (int): output channels of feature pyramids + n_out_features (int): number of output stages + pooling (str): pooling for generating feature pyramids (from {MAX, AVG}) + share_conv (bool): Have one conv per output, or share one with all the outputs + """ + + def __init__( + self, + bottom_up, + in_features, + n_out_features, + in_channels, + out_channels, + pooling="AVG", + share_conv=False, + ): + super(HRFPN, self).__init__() + assert isinstance(in_channels, list) + self.bottom_up = bottom_up + self.in_features = in_features + self.n_out_features = n_out_features + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.share_conv = share_conv + + if self.share_conv: + self.fpn_conv = nn.Conv2d( + in_channels=out_channels, out_channels=out_channels, kernel_size=3, padding=1 + ) + else: + self.fpn_conv = nn.ModuleList() + for _ in range(self.n_out_features): + self.fpn_conv.append( + nn.Conv2d( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + padding=1, + ) + ) + + # Custom change: Replaces a simple bilinear interpolation + self.interp_conv = nn.ModuleList() + for i in range(len(self.in_features)): + self.interp_conv.append( + nn.Sequential( + nn.ConvTranspose2d( + in_channels=in_channels[i], + out_channels=in_channels[i], + kernel_size=4, + stride=2**i, + padding=0, + output_padding=0, + bias=False, + ), + nn.BatchNorm2d(in_channels[i], momentum=0.1), + nn.ReLU(inplace=True), + ) + ) + + # Custom change: Replaces a couple (reduction conv + pooling) by one conv + self.reduction_pooling_conv = nn.ModuleList() + for i in range(self.n_out_features): + self.reduction_pooling_conv.append( + nn.Sequential( + nn.Conv2d(sum(in_channels), out_channels, kernel_size=2**i, stride=2**i), + nn.BatchNorm2d(out_channels, momentum=0.1), + nn.ReLU(inplace=True), + ) + ) + + if pooling == "MAX": + self.pooling = F.max_pool2d + else: + self.pooling = F.avg_pool2d + + self._out_features = [] + self._out_feature_channels = {} + self._out_feature_strides = {} + + for i in range(self.n_out_features): + self._out_features.append("p%d" % (i + 1)) + self._out_feature_channels.update({self._out_features[-1]: self.out_channels}) + self._out_feature_strides.update({self._out_features[-1]: 2 ** (i + 2)}) + + # default init_weights for conv(msra) and norm in ConvModule + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, a=1) + nn.init.constant_(m.bias, 0) + + def forward(self, inputs): + bottom_up_features = self.bottom_up(inputs) + assert len(bottom_up_features) == len(self.in_features) + inputs = [bottom_up_features[f] for f in self.in_features] + + outs = [] + for i in range(len(inputs)): + outs.append(self.interp_conv[i](inputs[i])) + shape_2 = min(o.shape[2] for o in outs) + shape_3 = min(o.shape[3] for o in outs) + out = torch.cat([o[:, :, :shape_2, :shape_3] for o in outs], dim=1) + outs = [] + for i in range(self.n_out_features): + outs.append(self.reduction_pooling_conv[i](out)) + for i in range(len(outs)): # Make shapes consistent + outs[-1 - i] = outs[-1 - i][ + :, :, : outs[-1].shape[2] * 2**i, : outs[-1].shape[3] * 2**i + ] + outputs = [] + for i in range(len(outs)): + if self.share_conv: + outputs.append(self.fpn_conv(outs[i])) + else: + outputs.append(self.fpn_conv[i](outs[i])) + + assert len(self._out_features) == len(outputs) + return dict(zip(self._out_features, outputs)) + + +@BACKBONE_REGISTRY.register() +def build_hrfpn_backbone(cfg, input_shape: ShapeSpec) -> HRFPN: + + in_channels = cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS + in_features = ["p%d" % (i + 1) for i in range(cfg.MODEL.HRNET.STAGE4.NUM_BRANCHES)] + n_out_features = len(cfg.MODEL.ROI_HEADS.IN_FEATURES) + out_channels = cfg.MODEL.HRNET.HRFPN.OUT_CHANNELS + hrnet = build_pose_hrnet_backbone(cfg, input_shape) + hrfpn = HRFPN( + hrnet, + in_features, + n_out_features, + in_channels, + out_channels, + pooling="AVG", + share_conv=False, + ) + + return hrfpn diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/hrnet.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/hrnet.py new file mode 100644 index 0000000..ca24671 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/hrnet.py @@ -0,0 +1,474 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (leoxiaobin@gmail.com) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# Adapted from https://github.com/HRNet/Higher-HRNet-Human-Pose-Estimation/blob/master/lib/models/pose_higher_hrnet.py # noqa +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import, division, print_function +import logging +import torch.nn as nn + +from detectron2.layers import ShapeSpec +from detectron2.modeling.backbone import BACKBONE_REGISTRY +from detectron2.modeling.backbone.backbone import Backbone + +BN_MOMENTUM = 0.1 +logger = logging.getLogger(__name__) + +__all__ = ["build_pose_hrnet_backbone", "PoseHigherResolutionNet"] + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class HighResolutionModule(nn.Module): + """HighResolutionModule + Building block of the PoseHigherResolutionNet (see lower) + arXiv: https://arxiv.org/abs/1908.10357 + Args: + num_branches (int): number of branches of the modyle + blocks (str): type of block of the module + num_blocks (int): number of blocks of the module + num_inchannels (int): number of input channels of the module + num_channels (list): number of channels of each branch + multi_scale_output (bool): only used by the last module of PoseHigherResolutionNet + """ + + def __init__( + self, + num_branches, + blocks, + num_blocks, + num_inchannels, + num_channels, + multi_scale_output=True, + ): + super(HighResolutionModule, self).__init__() + self._check_branches(num_branches, blocks, num_blocks, num_inchannels, num_channels) + + self.num_inchannels = num_inchannels + self.num_branches = num_branches + + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(True) + + def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels): + if num_branches != len(num_blocks): + error_msg = "NUM_BRANCHES({}) <> NUM_BLOCKS({})".format(num_branches, len(num_blocks)) + logger.error(error_msg) + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = "NUM_BRANCHES({}) <> NUM_CHANNELS({})".format( + num_branches, len(num_channels) + ) + logger.error(error_msg) + raise ValueError(error_msg) + + if num_branches != len(num_inchannels): + error_msg = "NUM_BRANCHES({}) <> NUM_INCHANNELS({})".format( + num_branches, len(num_inchannels) + ) + logger.error(error_msg) + raise ValueError(error_msg) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): + downsample = None + if ( + stride != 1 + or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion + ): + downsample = nn.Sequential( + nn.Conv2d( + self.num_inchannels[branch_index], + num_channels[branch_index] * block.expansion, + kernel_size=1, + stride=stride, + bias=False, + ), + nn.BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=BN_MOMENTUM), + ) + + layers = [] + layers.append( + block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample) + ) + self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion + for _ in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index])) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + + for i in range(num_branches): + branches.append(self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False), + nn.BatchNorm2d(num_inchannels[i]), + nn.Upsample(scale_factor=2 ** (j - i), mode="nearest"), + ) + ) + elif j == i: + fuse_layer.append(None) + else: + conv3x3s = [] + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append( + nn.Sequential( + nn.Conv2d( + num_inchannels[j], + num_outchannels_conv3x3, + 3, + 2, + 1, + bias=False, + ), + nn.BatchNorm2d(num_outchannels_conv3x3), + ) + ) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append( + nn.Sequential( + nn.Conv2d( + num_inchannels[j], + num_outchannels_conv3x3, + 3, + 2, + 1, + bias=False, + ), + nn.BatchNorm2d(num_outchannels_conv3x3), + nn.ReLU(True), + ) + ) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + + for i in range(len(self.fuse_layers)): + y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + else: + z = self.fuse_layers[i][j](x[j])[:, :, : y.shape[2], : y.shape[3]] + y = y + z + x_fuse.append(self.relu(y)) + + return x_fuse + + +blocks_dict = {"BASIC": BasicBlock, "BOTTLENECK": Bottleneck} + + +class PoseHigherResolutionNet(Backbone): + """PoseHigherResolutionNet + Composed of several HighResolutionModule tied together with ConvNets + Adapted from the GitHub version to fit with HRFPN and the Detectron2 infrastructure + arXiv: https://arxiv.org/abs/1908.10357 + """ + + def __init__(self, cfg, **kwargs): + self.inplanes = cfg.MODEL.HRNET.STEM_INPLANES + super(PoseHigherResolutionNet, self).__init__() + + # stem net + self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) + self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.layer1 = self._make_layer(Bottleneck, 64, 4) + + self.stage2_cfg = cfg.MODEL.HRNET.STAGE2 + num_channels = self.stage2_cfg.NUM_CHANNELS + block = blocks_dict[self.stage2_cfg.BLOCK] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition1 = self._make_transition_layer([256], num_channels) + self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) + + self.stage3_cfg = cfg.MODEL.HRNET.STAGE3 + num_channels = self.stage3_cfg.NUM_CHANNELS + block = blocks_dict[self.stage3_cfg.BLOCK] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) + + self.stage4_cfg = cfg.MODEL.HRNET.STAGE4 + num_channels = self.stage4_cfg.NUM_CHANNELS + block = blocks_dict[self.stage4_cfg.BLOCK] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, num_channels, multi_scale_output=True + ) + + self._out_features = [] + self._out_feature_channels = {} + self._out_feature_strides = {} + + for i in range(cfg.MODEL.HRNET.STAGE4.NUM_BRANCHES): + self._out_features.append("p%d" % (i + 1)) + self._out_feature_channels.update( + {self._out_features[-1]: cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS[i]} + ) + self._out_feature_strides.update({self._out_features[-1]: 1}) + + def _get_deconv_cfg(self, deconv_kernel): + if deconv_kernel == 4: + padding = 1 + output_padding = 0 + elif deconv_kernel == 3: + padding = 1 + output_padding = 1 + elif deconv_kernel == 2: + padding = 0 + output_padding = 0 + + return deconv_kernel, padding, output_padding + + def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + nn.Conv2d( + num_channels_pre_layer[i], + num_channels_cur_layer[i], + 3, + 1, + 1, + bias=False, + ), + nn.BatchNorm2d(num_channels_cur_layer[i]), + nn.ReLU(inplace=True), + ) + ) + else: + transition_layers.append(None) + else: + conv3x3s = [] + for j in range(i + 1 - num_branches_pre): + inchannels = num_channels_pre_layer[-1] + outchannels = ( + num_channels_cur_layer[i] if j == i - num_branches_pre else inchannels + ) + conv3x3s.append( + nn.Sequential( + nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), + nn.BatchNorm2d(outchannels), + nn.ReLU(inplace=True), + ) + ) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False, + ), + nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True): + num_modules = layer_config["NUM_MODULES"] + num_branches = layer_config["NUM_BRANCHES"] + num_blocks = layer_config["NUM_BLOCKS"] + num_channels = layer_config["NUM_CHANNELS"] + block = blocks_dict[layer_config["BLOCK"]] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + if not multi_scale_output and i == num_modules - 1: + reset_multi_scale_output = False + else: + reset_multi_scale_output = True + + modules.append( + HighResolutionModule( + num_branches, + block, + num_blocks, + num_inchannels, + num_channels, + reset_multi_scale_output, + ) + ) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg.NUM_BRANCHES): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg.NUM_BRANCHES): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg.NUM_BRANCHES): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + assert len(self._out_features) == len(y_list) + return dict(zip(self._out_features, y_list)) # final_outputs + + +@BACKBONE_REGISTRY.register() +def build_pose_hrnet_backbone(cfg, input_shape: ShapeSpec): + model = PoseHigherResolutionNet(cfg) + return model diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/inference.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/inference.py new file mode 100644 index 0000000..8104964 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/inference.py @@ -0,0 +1,44 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from dataclasses import fields +from typing import Any, List +import torch + +from detectron2.structures import Instances + + +def densepose_inference(densepose_predictor_output: Any, detections: List[Instances]) -> None: + """ + Splits DensePose predictor outputs into chunks, each chunk corresponds to + detections on one image. Predictor output chunks are stored in `pred_densepose` + attribute of the corresponding `Instances` object. + + Args: + densepose_predictor_output: a dataclass instance (can be of different types, + depending on predictor used for inference). Each field can be `None` + (if the corresponding output was not inferred) or a tensor of size + [N, ...], where N = N_1 + N_2 + .. + N_k is a total number of + detections on all images, N_1 is the number of detections on image 1, + N_2 is the number of detections on image 2, etc. + detections: a list of objects of type `Instance`, k-th object corresponds + to detections on k-th image. + """ + k = 0 + for detection_i in detections: + if densepose_predictor_output is None: + # don't add `pred_densepose` attribute + continue + n_i = detection_i.__len__() + + PredictorOutput = type(densepose_predictor_output) + output_i_dict = {} + # we assume here that `densepose_predictor_output` is a dataclass object + for field in fields(densepose_predictor_output): + field_value = getattr(densepose_predictor_output, field.name) + # slice tensors + if isinstance(field_value, torch.Tensor): + output_i_dict[field.name] = field_value[k : k + n_i] + # leave others as is + else: + output_i_dict[field.name] = field_value + detection_i.pred_densepose = PredictorOutput(**output_i_dict) + k += n_i diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/__init__.py new file mode 100644 index 0000000..e5c5937 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .chart import DensePoseChartLoss +from .chart_with_confidences import DensePoseChartWithConfidenceLoss +from .cse import DensePoseCseLoss +from .registry import DENSEPOSE_LOSS_REGISTRY + + +__all__ = [ + "DensePoseChartLoss", + "DensePoseChartWithConfidenceLoss", + "DensePoseCseLoss", + "DENSEPOSE_LOSS_REGISTRY", +] diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/chart.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/chart.py new file mode 100644 index 0000000..02cdae8 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/chart.py @@ -0,0 +1,291 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any, List +import torch +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.structures import Instances + +from .mask_or_segm import MaskOrSegmentationLoss +from .registry import DENSEPOSE_LOSS_REGISTRY +from .utils import ( + BilinearInterpolationHelper, + ChartBasedAnnotationsAccumulator, + LossDict, + extract_packed_annotations_from_matches, +) + + +@DENSEPOSE_LOSS_REGISTRY.register() +class DensePoseChartLoss: + """ + DensePose loss for chart-based training. A mesh is split into charts, + each chart is given a label (I) and parametrized by 2 coordinates referred to + as U and V. Ground truth consists of a number of points annotated with + I, U and V values and coarse segmentation S defined for all pixels of the + object bounding box. In some cases (see `COARSE_SEGM_TRAINED_BY_MASKS`), + semantic segmentation annotations can be used as ground truth inputs as well. + + Estimated values are tensors: + * U coordinates, tensor of shape [N, C, S, S] + * V coordinates, tensor of shape [N, C, S, S] + * fine segmentation estimates, tensor of shape [N, C, S, S] with raw unnormalized + scores for each fine segmentation label at each location + * coarse segmentation estimates, tensor of shape [N, D, S, S] with raw unnormalized + scores for each coarse segmentation label at each location + where N is the number of detections, C is the number of fine segmentation + labels, S is the estimate size ( = width = height) and D is the number of + coarse segmentation channels. + + The losses are: + * regression (smooth L1) loss for U and V coordinates + * cross entropy loss for fine (I) and coarse (S) segmentations + Each loss has an associated weight + """ + + def __init__(self, cfg: CfgNode): + """ + Initialize chart-based loss from configuration options + + Args: + cfg (CfgNode): configuration options + """ + # fmt: off + self.heatmap_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE + self.w_points = cfg.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS + self.w_part = cfg.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS + self.w_segm = cfg.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS + self.n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS + # fmt: on + self.segm_trained_by_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS + self.segm_loss = MaskOrSegmentationLoss(cfg) + + def __call__( + self, proposals_with_gt: List[Instances], densepose_predictor_outputs: Any, **kwargs + ) -> LossDict: + """ + Produce chart-based DensePose losses + + Args: + proposals_with_gt (list of Instances): detections with associated ground truth data + densepose_predictor_outputs: an object of a dataclass that contains predictor outputs + with estimated values; assumed to have the following attributes: + * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S] + * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S] + * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S] + * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S] + where N is the number of detections, C is the number of fine segmentation + labels, S is the estimate size ( = width = height) and D is the number of + coarse segmentation channels. + + Return: + dict: str -> tensor: dict of losses with the following entries: + * `loss_densepose_U`: smooth L1 loss for U coordinate estimates + * `loss_densepose_V`: smooth L1 loss for V coordinate estimates + * `loss_densepose_I`: cross entropy for raw unnormalized scores for fine + segmentation estimates given ground truth labels; + * `loss_densepose_S`: cross entropy for raw unnormalized scores for coarse + segmentation estimates given ground truth labels; + """ + # densepose outputs are computed for all images and all bounding boxes; + # i.e. if a batch has 4 images with (3, 1, 2, 1) proposals respectively, + # the outputs will have size(0) == 3+1+2+1 == 7 + + if not len(proposals_with_gt): + return self.produce_fake_densepose_losses(densepose_predictor_outputs) + + accumulator = ChartBasedAnnotationsAccumulator() + packed_annotations = extract_packed_annotations_from_matches(proposals_with_gt, accumulator) + + # NOTE: we need to keep the same computation graph on all the GPUs to + # perform reduction properly. Hence even if we have no data on one + # of the GPUs, we still need to generate the computation graph. + # Add fake (zero) loss in the form Tensor.sum() * 0 + if packed_annotations is None: + return self.produce_fake_densepose_losses(densepose_predictor_outputs) + + h, w = densepose_predictor_outputs.u.shape[2:] + interpolator = BilinearInterpolationHelper.from_matches( + packed_annotations, + (h, w), + ) + + j_valid_fg = interpolator.j_valid * ( # pyre-ignore[16] + packed_annotations.fine_segm_labels_gt > 0 + ) + # pyre-fixme[6]: For 1st param expected `Tensor` but got `int`. + if not torch.any(j_valid_fg): + return self.produce_fake_densepose_losses(densepose_predictor_outputs) + + losses_uv = self.produce_densepose_losses_uv( + proposals_with_gt, + densepose_predictor_outputs, + packed_annotations, + interpolator, + j_valid_fg, # pyre-ignore[6] + ) + + losses_segm = self.produce_densepose_losses_segm( + proposals_with_gt, + densepose_predictor_outputs, + packed_annotations, + interpolator, + j_valid_fg, # pyre-ignore[6] + ) + + return {**losses_uv, **losses_segm} + + def produce_fake_densepose_losses(self, densepose_predictor_outputs: Any) -> LossDict: + """ + Fake losses for fine segmentation and U/V coordinates. These are used when + no suitable ground truth data was found in a batch. The loss has a value 0 + and is primarily used to construct the computation graph, so that + `DistributedDataParallel` has similar graphs on all GPUs and can perform + reduction properly. + + Args: + densepose_predictor_outputs: DensePose predictor outputs, an object + of a dataclass that is assumed to have the following attributes: + * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S] + * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S] + * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S] + Return: + dict: str -> tensor: dict of losses with the following entries: + * `loss_densepose_U`: has value 0 + * `loss_densepose_V`: has value 0 + * `loss_densepose_I`: has value 0 + * `loss_densepose_S`: has value 0 + """ + losses_uv = self.produce_fake_densepose_losses_uv(densepose_predictor_outputs) + losses_segm = self.produce_fake_densepose_losses_segm(densepose_predictor_outputs) + return {**losses_uv, **losses_segm} + + def produce_fake_densepose_losses_uv(self, densepose_predictor_outputs: Any) -> LossDict: + """ + Fake losses for U/V coordinates. These are used when no suitable ground + truth data was found in a batch. The loss has a value 0 + and is primarily used to construct the computation graph, so that + `DistributedDataParallel` has similar graphs on all GPUs and can perform + reduction properly. + + Args: + densepose_predictor_outputs: DensePose predictor outputs, an object + of a dataclass that is assumed to have the following attributes: + * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S] + * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S] + Return: + dict: str -> tensor: dict of losses with the following entries: + * `loss_densepose_U`: has value 0 + * `loss_densepose_V`: has value 0 + """ + return { + "loss_densepose_U": densepose_predictor_outputs.u.sum() * 0, + "loss_densepose_V": densepose_predictor_outputs.v.sum() * 0, + } + + def produce_fake_densepose_losses_segm(self, densepose_predictor_outputs: Any) -> LossDict: + """ + Fake losses for fine / coarse segmentation. These are used when + no suitable ground truth data was found in a batch. The loss has a value 0 + and is primarily used to construct the computation graph, so that + `DistributedDataParallel` has similar graphs on all GPUs and can perform + reduction properly. + + Args: + densepose_predictor_outputs: DensePose predictor outputs, an object + of a dataclass that is assumed to have the following attributes: + * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S] + * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S] + Return: + dict: str -> tensor: dict of losses with the following entries: + * `loss_densepose_I`: has value 0 + * `loss_densepose_S`: has value 0, added only if `segm_trained_by_masks` is False + """ + losses = { + "loss_densepose_I": densepose_predictor_outputs.fine_segm.sum() * 0, + "loss_densepose_S": self.segm_loss.fake_value(densepose_predictor_outputs), + } + return losses + + def produce_densepose_losses_uv( + self, + proposals_with_gt: List[Instances], + densepose_predictor_outputs: Any, + packed_annotations: Any, + interpolator: BilinearInterpolationHelper, + j_valid_fg: torch.Tensor, + ) -> LossDict: + """ + Compute losses for U/V coordinates: smooth L1 loss between + estimated coordinates and the ground truth. + + Args: + proposals_with_gt (list of Instances): detections with associated ground truth data + densepose_predictor_outputs: DensePose predictor outputs, an object + of a dataclass that is assumed to have the following attributes: + * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S] + * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S] + Return: + dict: str -> tensor: dict of losses with the following entries: + * `loss_densepose_U`: smooth L1 loss for U coordinate estimates + * `loss_densepose_V`: smooth L1 loss for V coordinate estimates + """ + u_gt = packed_annotations.u_gt[j_valid_fg] + u_est = interpolator.extract_at_points(densepose_predictor_outputs.u)[j_valid_fg] + v_gt = packed_annotations.v_gt[j_valid_fg] + v_est = interpolator.extract_at_points(densepose_predictor_outputs.v)[j_valid_fg] + return { + "loss_densepose_U": F.smooth_l1_loss(u_est, u_gt, reduction="sum") * self.w_points, + "loss_densepose_V": F.smooth_l1_loss(v_est, v_gt, reduction="sum") * self.w_points, + } + + def produce_densepose_losses_segm( + self, + proposals_with_gt: List[Instances], + densepose_predictor_outputs: Any, + packed_annotations: Any, + interpolator: BilinearInterpolationHelper, + j_valid_fg: torch.Tensor, + ) -> LossDict: + """ + Losses for fine / coarse segmentation: cross-entropy + for segmentation unnormalized scores given ground truth labels at + annotated points for fine segmentation and dense mask annotations + for coarse segmentation. + + Args: + proposals_with_gt (list of Instances): detections with associated ground truth data + densepose_predictor_outputs: DensePose predictor outputs, an object + of a dataclass that is assumed to have the following attributes: + * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S] + * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S] + Return: + dict: str -> tensor: dict of losses with the following entries: + * `loss_densepose_I`: cross entropy for raw unnormalized scores for fine + segmentation estimates given ground truth labels + * `loss_densepose_S`: cross entropy for raw unnormalized scores for coarse + segmentation estimates given ground truth labels; + may be included if coarse segmentation is only trained + using DensePose ground truth; if additional supervision through + instance segmentation data is performed (`segm_trained_by_masks` is True), + this loss is handled by `produce_mask_losses` instead + """ + fine_segm_gt = packed_annotations.fine_segm_labels_gt[ + interpolator.j_valid # pyre-ignore[16] + ] + fine_segm_est = interpolator.extract_at_points( + densepose_predictor_outputs.fine_segm, + slice_fine_segm=slice(None), + w_ylo_xlo=interpolator.w_ylo_xlo[:, None], # pyre-ignore[16] + w_ylo_xhi=interpolator.w_ylo_xhi[:, None], # pyre-ignore[16] + w_yhi_xlo=interpolator.w_yhi_xlo[:, None], # pyre-ignore[16] + w_yhi_xhi=interpolator.w_yhi_xhi[:, None], # pyre-ignore[16] + )[interpolator.j_valid, :] + return { + "loss_densepose_I": F.cross_entropy(fine_segm_est, fine_segm_gt.long()) * self.w_part, + "loss_densepose_S": self.segm_loss( + proposals_with_gt, densepose_predictor_outputs, packed_annotations + ) + * self.w_segm, + } diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py new file mode 100644 index 0000000..78ce7c6 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py @@ -0,0 +1,209 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from typing import Any, List +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.structures import Instances + +from .. import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType +from .chart import DensePoseChartLoss +from .registry import DENSEPOSE_LOSS_REGISTRY +from .utils import BilinearInterpolationHelper, LossDict + + +@DENSEPOSE_LOSS_REGISTRY.register() +class DensePoseChartWithConfidenceLoss(DensePoseChartLoss): + """ """ + + def __init__(self, cfg: CfgNode): + super().__init__(cfg) + self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg) + if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO: + self.uv_loss_with_confidences = IIDIsotropicGaussianUVLoss( + self.confidence_model_cfg.uv_confidence.epsilon + ) + elif self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO: + self.uv_loss_with_confidences = IndepAnisotropicGaussianUVLoss( + self.confidence_model_cfg.uv_confidence.epsilon + ) + + def produce_fake_densepose_losses_uv(self, densepose_predictor_outputs: Any) -> LossDict: + """ + Overrides fake losses for fine segmentation and U/V coordinates to + include computation graphs for additional confidence parameters. + These are used when no suitable ground truth data was found in a batch. + The loss has a value 0 and is primarily used to construct the computation graph, + so that `DistributedDataParallel` has similar graphs on all GPUs and can + perform reduction properly. + + Args: + densepose_predictor_outputs: DensePose predictor outputs, an object + of a dataclass that is assumed to have the following attributes: + * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S] + * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S] + * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S] + Return: + dict: str -> tensor: dict of losses with the following entries: + * `loss_densepose_U`: has value 0 + * `loss_densepose_V`: has value 0 + * `loss_densepose_I`: has value 0 + """ + conf_type = self.confidence_model_cfg.uv_confidence.type + if self.confidence_model_cfg.uv_confidence.enabled: + loss_uv = ( + densepose_predictor_outputs.u.sum() + densepose_predictor_outputs.v.sum() + ) * 0 + if conf_type == DensePoseUVConfidenceType.IID_ISO: + loss_uv += densepose_predictor_outputs.sigma_2.sum() * 0 + elif conf_type == DensePoseUVConfidenceType.INDEP_ANISO: + loss_uv += ( + densepose_predictor_outputs.sigma_2.sum() + + densepose_predictor_outputs.kappa_u.sum() + + densepose_predictor_outputs.kappa_v.sum() + ) * 0 + return {"loss_densepose_UV": loss_uv} + else: + return super().produce_fake_densepose_losses_uv(densepose_predictor_outputs) + + def produce_densepose_losses_uv( + self, + proposals_with_gt: List[Instances], + densepose_predictor_outputs: Any, + packed_annotations: Any, + interpolator: BilinearInterpolationHelper, + j_valid_fg: torch.Tensor, + ) -> LossDict: + conf_type = self.confidence_model_cfg.uv_confidence.type + if self.confidence_model_cfg.uv_confidence.enabled: + u_gt = packed_annotations.u_gt[j_valid_fg] + u_est = interpolator.extract_at_points(densepose_predictor_outputs.u)[j_valid_fg] + v_gt = packed_annotations.v_gt[j_valid_fg] + v_est = interpolator.extract_at_points(densepose_predictor_outputs.v)[j_valid_fg] + sigma_2_est = interpolator.extract_at_points(densepose_predictor_outputs.sigma_2)[ + j_valid_fg + ] + if conf_type == DensePoseUVConfidenceType.IID_ISO: + return { + "loss_densepose_UV": ( + self.uv_loss_with_confidences(u_est, v_est, sigma_2_est, u_gt, v_gt) + * self.w_points + ) + } + elif conf_type in [DensePoseUVConfidenceType.INDEP_ANISO]: + kappa_u_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_u)[ + j_valid_fg + ] + kappa_v_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_v)[ + j_valid_fg + ] + return { + "loss_densepose_UV": ( + self.uv_loss_with_confidences( + u_est, v_est, sigma_2_est, kappa_u_est, kappa_v_est, u_gt, v_gt + ) + * self.w_points + ) + } + return super().produce_densepose_losses_uv( + proposals_with_gt, + densepose_predictor_outputs, + packed_annotations, + interpolator, + j_valid_fg, + ) + + +class IIDIsotropicGaussianUVLoss(nn.Module): + """ + Loss for the case of iid residuals with isotropic covariance: + $Sigma_i = sigma_i^2 I$ + The loss (negative log likelihood) is then: + $1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$, + where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates + difference between estimated and ground truth UV values + For details, see: + N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning + Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 + """ + + def __init__(self, sigma_lower_bound: float): + super(IIDIsotropicGaussianUVLoss, self).__init__() + self.sigma_lower_bound = sigma_lower_bound + self.log2pi = math.log(2 * math.pi) + + def forward( + self, + u: torch.Tensor, + v: torch.Tensor, + sigma_u: torch.Tensor, + target_u: torch.Tensor, + target_v: torch.Tensor, + ): + # compute $\sigma_i^2$ + # use sigma_lower_bound to avoid degenerate solution for variance + # (sigma -> 0) + sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound + # compute \|delta_i\|^2 + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2 + # the total loss from the formula above: + loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta / sigma2) + return loss.sum() + + +class IndepAnisotropicGaussianUVLoss(nn.Module): + """ + Loss for the case of independent residuals with anisotropic covariances: + $Sigma_i = sigma_i^2 I + r_i r_i^T$ + The loss (negative log likelihood) is then: + $1/2 sum_{i=1}^n (log(2 pi) + + log sigma_i^2 (sigma_i^2 + ||r_i||^2) + + ||delta_i||^2 / sigma_i^2 + - ^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$, + where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates + difference between estimated and ground truth UV values + For details, see: + N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning + Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 + """ + + def __init__(self, sigma_lower_bound: float): + super(IndepAnisotropicGaussianUVLoss, self).__init__() + self.sigma_lower_bound = sigma_lower_bound + self.log2pi = math.log(2 * math.pi) + + def forward( + self, + u: torch.Tensor, + v: torch.Tensor, + sigma_u: torch.Tensor, + kappa_u_est: torch.Tensor, + kappa_v_est: torch.Tensor, + target_u: torch.Tensor, + target_v: torch.Tensor, + ): + # compute $\sigma_i^2$ + sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound + # compute \|r_i\|^2 + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + r_sqnorm2 = kappa_u_est**2 + kappa_v_est**2 + delta_u = u - target_u + delta_v = v - target_v + # compute \|delta_i\|^2 + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + delta_sqnorm = delta_u**2 + delta_v**2 + delta_u_r_u = delta_u * kappa_u_est + delta_v_r_v = delta_v * kappa_v_est + # compute the scalar product + delta_r = delta_u_r_u + delta_v_r_v + # compute squared scalar product ^2 + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + delta_r_sqnorm = delta_r**2 + denom2 = sigma2 * (sigma2 + r_sqnorm2) + loss = 0.5 * ( + self.log2pi + torch.log(denom2) + delta_sqnorm / sigma2 - delta_r_sqnorm / denom2 + ) + return loss.sum() diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/cse.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/cse.py new file mode 100644 index 0000000..dd561ad --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/cse.py @@ -0,0 +1,115 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from typing import Any, List +from torch import nn + +from detectron2.config import CfgNode +from detectron2.structures import Instances + +from .cycle_pix2shape import PixToShapeCycleLoss +from .cycle_shape2shape import ShapeToShapeCycleLoss +from .embed import EmbeddingLoss +from .embed_utils import CseAnnotationsAccumulator +from .mask_or_segm import MaskOrSegmentationLoss +from .registry import DENSEPOSE_LOSS_REGISTRY +from .soft_embed import SoftEmbeddingLoss +from .utils import BilinearInterpolationHelper, LossDict, extract_packed_annotations_from_matches + + +@DENSEPOSE_LOSS_REGISTRY.register() +class DensePoseCseLoss: + """ """ + + _EMBED_LOSS_REGISTRY = { + EmbeddingLoss.__name__: EmbeddingLoss, + SoftEmbeddingLoss.__name__: SoftEmbeddingLoss, + } + + def __init__(self, cfg: CfgNode): + """ + Initialize CSE loss from configuration options + + Args: + cfg (CfgNode): configuration options + """ + self.w_segm = cfg.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS + self.w_embed = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_WEIGHT + self.segm_loss = MaskOrSegmentationLoss(cfg) + self.embed_loss = DensePoseCseLoss.create_embed_loss(cfg) + self.do_shape2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.ENABLED + if self.do_shape2shape: + self.w_shape2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.WEIGHT + self.shape2shape_loss = ShapeToShapeCycleLoss(cfg) + self.do_pix2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.ENABLED + if self.do_pix2shape: + self.w_pix2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.WEIGHT + self.pix2shape_loss = PixToShapeCycleLoss(cfg) + + @classmethod + def create_embed_loss(cls, cfg: CfgNode): + # registry not used here, since embedding losses are currently local + # and are not used anywhere else + return cls._EMBED_LOSS_REGISTRY[cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_NAME](cfg) + + def __call__( + self, + proposals_with_gt: List[Instances], + densepose_predictor_outputs: Any, + embedder: nn.Module, + ) -> LossDict: + if not len(proposals_with_gt): + return self.produce_fake_losses(densepose_predictor_outputs, embedder) + accumulator = CseAnnotationsAccumulator() + packed_annotations = extract_packed_annotations_from_matches(proposals_with_gt, accumulator) + if packed_annotations is None: + return self.produce_fake_losses(densepose_predictor_outputs, embedder) + h, w = densepose_predictor_outputs.embedding.shape[2:] + interpolator = BilinearInterpolationHelper.from_matches( + packed_annotations, + (h, w), + ) + meshid_to_embed_losses = self.embed_loss( + proposals_with_gt, + densepose_predictor_outputs, + packed_annotations, + interpolator, + embedder, + ) + embed_loss_dict = { + f"loss_densepose_E{meshid}": self.w_embed * meshid_to_embed_losses[meshid] + for meshid in meshid_to_embed_losses + } + all_loss_dict = { + "loss_densepose_S": self.w_segm + * self.segm_loss(proposals_with_gt, densepose_predictor_outputs, packed_annotations), + **embed_loss_dict, + } + if self.do_shape2shape: + all_loss_dict["loss_shape2shape"] = self.w_shape2shape * self.shape2shape_loss(embedder) + if self.do_pix2shape: + all_loss_dict["loss_pix2shape"] = self.w_pix2shape * self.pix2shape_loss( + proposals_with_gt, densepose_predictor_outputs, packed_annotations, embedder + ) + return all_loss_dict + + def produce_fake_losses( + self, densepose_predictor_outputs: Any, embedder: nn.Module + ) -> LossDict: + meshname_to_embed_losses = self.embed_loss.fake_values( + densepose_predictor_outputs, embedder=embedder + ) + embed_loss_dict = { + f"loss_densepose_E{mesh_name}": meshname_to_embed_losses[mesh_name] + for mesh_name in meshname_to_embed_losses + } + all_loss_dict = { + "loss_densepose_S": self.segm_loss.fake_value(densepose_predictor_outputs), + **embed_loss_dict, + } + if self.do_shape2shape: + all_loss_dict["loss_shape2shape"] = self.shape2shape_loss.fake_value(embedder) + if self.do_pix2shape: + all_loss_dict["loss_pix2shape"] = self.pix2shape_loss.fake_value( + densepose_predictor_outputs, embedder + ) + return all_loss_dict diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/cycle_pix2shape.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/cycle_pix2shape.py new file mode 100644 index 0000000..e173918 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/cycle_pix2shape.py @@ -0,0 +1,154 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from typing import Any, List +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.structures import Instances + +from densepose.data.meshes.catalog import MeshCatalog +from densepose.modeling.cse.utils import normalize_embeddings, squared_euclidean_distance_matrix + +from .embed_utils import PackedCseAnnotations +from .mask import extract_data_for_mask_loss_from_matches + + +def _create_pixel_dist_matrix(grid_size: int) -> torch.Tensor: + rows = torch.arange(grid_size) + cols = torch.arange(grid_size) + # at index `i` contains [row, col], where + # row = i // grid_size + # col = i % grid_size + pix_coords = ( + torch.stack(torch.meshgrid(rows, cols), -1).reshape((grid_size * grid_size, 2)).float() + ) + return squared_euclidean_distance_matrix(pix_coords, pix_coords) + + +def _sample_fg_pixels_randperm(fg_mask: torch.Tensor, sample_size: int) -> torch.Tensor: + fg_mask_flattened = fg_mask.reshape((-1,)) + num_pixels = int(fg_mask_flattened.sum().item()) + fg_pixel_indices = fg_mask_flattened.nonzero(as_tuple=True)[0] + if (sample_size <= 0) or (num_pixels <= sample_size): + return fg_pixel_indices + sample_indices = torch.randperm(num_pixels, device=fg_mask.device)[:sample_size] + return fg_pixel_indices[sample_indices] + + +def _sample_fg_pixels_multinomial(fg_mask: torch.Tensor, sample_size: int) -> torch.Tensor: + fg_mask_flattened = fg_mask.reshape((-1,)) + num_pixels = int(fg_mask_flattened.sum().item()) + if (sample_size <= 0) or (num_pixels <= sample_size): + return fg_mask_flattened.nonzero(as_tuple=True)[0] + return fg_mask_flattened.float().multinomial(sample_size, replacement=False) + + +class PixToShapeCycleLoss(nn.Module): + """ + Cycle loss for pixel-vertex correspondence + """ + + def __init__(self, cfg: CfgNode): + super().__init__() + self.shape_names = list(cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS.keys()) + self.embed_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE + self.norm_p = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NORM_P + self.use_all_meshes_not_gt_only = ( + cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.USE_ALL_MESHES_NOT_GT_ONLY + ) + self.num_pixels_to_sample = ( + cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NUM_PIXELS_TO_SAMPLE + ) + self.pix_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.PIXEL_SIGMA + self.temperature_pix_to_vertex = ( + cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_PIXEL_TO_VERTEX + ) + self.temperature_vertex_to_pix = ( + cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_VERTEX_TO_PIXEL + ) + self.pixel_dists = _create_pixel_dist_matrix(cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE) + + def forward( + self, + proposals_with_gt: List[Instances], + densepose_predictor_outputs: Any, + packed_annotations: PackedCseAnnotations, + embedder: nn.Module, + ): + """ + Args: + proposals_with_gt (list of Instances): detections with associated + ground truth data; each item corresponds to instances detected + on 1 image; the number of items corresponds to the number of + images in a batch + densepose_predictor_outputs: an object of a dataclass that contains predictor + outputs with estimated values; assumed to have the following attributes: + * embedding - embedding estimates, tensor of shape [N, D, S, S], where + N = number of instances (= sum N_i, where N_i is the number of + instances on image i) + D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE) + S = output size (width and height) + packed_annotations (PackedCseAnnotations): contains various data useful + for loss computation, each data is packed into a single tensor + embedder (nn.Module): module that computes vertex embeddings for different meshes + """ + pix_embeds = densepose_predictor_outputs.embedding + if self.pixel_dists.device != pix_embeds.device: + # should normally be done only once + self.pixel_dists = self.pixel_dists.to(device=pix_embeds.device) + with torch.no_grad(): + mask_loss_data = extract_data_for_mask_loss_from_matches( + proposals_with_gt, densepose_predictor_outputs.coarse_segm + ) + # GT masks - tensor of shape [N, S, S] of int64 + masks_gt = mask_loss_data.masks_gt.long() # pyre-ignore[16] + assert len(pix_embeds) == len(masks_gt), ( + f"Number of instances with embeddings {len(pix_embeds)} != " + f"number of instances with GT masks {len(masks_gt)}" + ) + losses = [] + mesh_names = ( + self.shape_names + if self.use_all_meshes_not_gt_only + else [ + MeshCatalog.get_mesh_name(mesh_id.item()) + for mesh_id in packed_annotations.vertex_mesh_ids_gt.unique() + ] + ) + for pixel_embeddings, mask_gt in zip(pix_embeds, masks_gt): + # pixel_embeddings [D, S, S] + # mask_gt [S, S] + for mesh_name in mesh_names: + mesh_vertex_embeddings = embedder(mesh_name) + # pixel indices [M] + pixel_indices_flattened = _sample_fg_pixels_randperm( + mask_gt, self.num_pixels_to_sample + ) + # pixel distances [M, M] + pixel_dists = self.pixel_dists.to(pixel_embeddings.device)[ + torch.meshgrid(pixel_indices_flattened, pixel_indices_flattened) + ] + # pixel embeddings [M, D] + pixel_embeddings_sampled = normalize_embeddings( + pixel_embeddings.reshape((self.embed_size, -1))[:, pixel_indices_flattened].T + ) + # pixel-vertex similarity [M, K] + sim_matrix = pixel_embeddings_sampled.mm(mesh_vertex_embeddings.T) + c_pix_vertex = F.softmax(sim_matrix / self.temperature_pix_to_vertex, dim=1) + c_vertex_pix = F.softmax(sim_matrix.T / self.temperature_vertex_to_pix, dim=1) + c_cycle = c_pix_vertex.mm(c_vertex_pix) + loss_cycle = torch.norm(pixel_dists * c_cycle, p=self.norm_p) + losses.append(loss_cycle) + + if len(losses) == 0: + return pix_embeds.sum() * 0 + return torch.stack(losses, dim=0).mean() + + def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module): + losses = [ + embedder(mesh_name).sum() * 0 for mesh_name in embedder.mesh_names # pyre-ignore[29] + ] + losses.append(densepose_predictor_outputs.embedding.sum() * 0) + return torch.mean(torch.stack(losses)) diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/cycle_shape2shape.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/cycle_shape2shape.py new file mode 100644 index 0000000..2447e8f --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/cycle_shape2shape.py @@ -0,0 +1,117 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import random +from typing import Tuple +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import CfgNode + +from densepose.structures.mesh import create_mesh + +from .utils import sample_random_indices + + +class ShapeToShapeCycleLoss(nn.Module): + """ + Cycle Loss for Shapes. + Inspired by: + "Mapping in a Cycle: Sinkhorn Regularized Unsupervised Learning for Point Cloud Shapes". + """ + + def __init__(self, cfg: CfgNode): + super().__init__() + self.shape_names = list(cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS.keys()) + self.all_shape_pairs = [ + (x, y) for i, x in enumerate(self.shape_names) for y in self.shape_names[i + 1 :] + ] + random.shuffle(self.all_shape_pairs) + self.cur_pos = 0 + self.norm_p = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.NORM_P + self.temperature = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.TEMPERATURE + self.max_num_vertices = ( + cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.MAX_NUM_VERTICES + ) + + def _sample_random_pair(self) -> Tuple[str, str]: + """ + Produce a random pair of different mesh names + + Return: + tuple(str, str): a pair of different mesh names + """ + if self.cur_pos >= len(self.all_shape_pairs): + random.shuffle(self.all_shape_pairs) + self.cur_pos = 0 + shape_pair = self.all_shape_pairs[self.cur_pos] + self.cur_pos += 1 + return shape_pair + + def forward(self, embedder: nn.Module): + """ + Do a forward pass with a random pair (src, dst) pair of shapes + Args: + embedder (nn.Module): module that computes vertex embeddings for different meshes + """ + src_mesh_name, dst_mesh_name = self._sample_random_pair() + return self._forward_one_pair(embedder, src_mesh_name, dst_mesh_name) + + def fake_value(self, embedder: nn.Module): + losses = [] + for mesh_name in embedder.mesh_names: # pyre-ignore[29] + losses.append(embedder(mesh_name).sum() * 0) + return torch.mean(torch.stack(losses)) + + def _get_embeddings_and_geodists_for_mesh( + self, embedder: nn.Module, mesh_name: str + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Produces embeddings and geodesic distance tensors for a given mesh. May subsample + the mesh, if it contains too many vertices (controlled by + SHAPE_CYCLE_LOSS_MAX_NUM_VERTICES parameter). + Args: + embedder (nn.Module): module that computes embeddings for mesh vertices + mesh_name (str): mesh name + Return: + embeddings (torch.Tensor of size [N, D]): embeddings for selected mesh + vertices (N = number of selected vertices, D = embedding space dim) + geodists (torch.Tensor of size [N, N]): geodesic distances for the selected + mesh vertices (N = number of selected vertices) + """ + embeddings = embedder(mesh_name) + indices = sample_random_indices( + embeddings.shape[0], self.max_num_vertices, embeddings.device + ) + mesh = create_mesh(mesh_name, embeddings.device) + geodists = mesh.geodists + if indices is not None: + embeddings = embeddings[indices] + geodists = geodists[torch.meshgrid(indices, indices)] + return embeddings, geodists + + def _forward_one_pair( + self, embedder: nn.Module, mesh_name_1: str, mesh_name_2: str + ) -> torch.Tensor: + """ + Do a forward pass with a selected pair of meshes + Args: + embedder (nn.Module): module that computes vertex embeddings for different meshes + mesh_name_1 (str): first mesh name + mesh_name_2 (str): second mesh name + Return: + Tensor containing the loss value + """ + embeddings_1, geodists_1 = self._get_embeddings_and_geodists_for_mesh(embedder, mesh_name_1) + embeddings_2, geodists_2 = self._get_embeddings_and_geodists_for_mesh(embedder, mesh_name_2) + sim_matrix_12 = embeddings_1.mm(embeddings_2.T) + + c_12 = F.softmax(sim_matrix_12 / self.temperature, dim=1) + c_21 = F.softmax(sim_matrix_12.T / self.temperature, dim=1) + c_11 = c_12.mm(c_21) + c_22 = c_21.mm(c_12) + + loss_cycle_11 = torch.norm(geodists_1 * c_11, p=self.norm_p) + loss_cycle_22 = torch.norm(geodists_2 * c_22, p=self.norm_p) + + return loss_cycle_11 + loss_cycle_22 diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/embed.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/embed.py new file mode 100644 index 0000000..163eebe --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/embed.py @@ -0,0 +1,127 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from typing import Any, Dict, List +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.structures import Instances + +from densepose.data.meshes.catalog import MeshCatalog +from densepose.modeling.cse.utils import normalize_embeddings, squared_euclidean_distance_matrix + +from .embed_utils import PackedCseAnnotations +from .utils import BilinearInterpolationHelper + + +class EmbeddingLoss: + """ + Computes losses for estimated embeddings given annotated vertices. + Instances in a minibatch that correspond to the same mesh are grouped + together. For each group, loss is computed as cross-entropy for + unnormalized scores given ground truth mesh vertex ids. + Scores are based on squared distances between estimated vertex embeddings + and mesh vertex embeddings. + """ + + def __init__(self, cfg: CfgNode): + """ + Initialize embedding loss from config + """ + self.embdist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA + + def __call__( + self, + proposals_with_gt: List[Instances], + densepose_predictor_outputs: Any, + packed_annotations: PackedCseAnnotations, + interpolator: BilinearInterpolationHelper, + embedder: nn.Module, + ) -> Dict[int, torch.Tensor]: + """ + Produces losses for estimated embeddings given annotated vertices. + Embeddings for all the vertices of a mesh are computed by the embedder. + Embeddings for observed pixels are estimated by a predictor. + Losses are computed as cross-entropy for squared distances between + observed vertex embeddings and all mesh vertex embeddings given + ground truth vertex IDs. + + Args: + proposals_with_gt (list of Instances): detections with associated + ground truth data; each item corresponds to instances detected + on 1 image; the number of items corresponds to the number of + images in a batch + densepose_predictor_outputs: an object of a dataclass that contains predictor + outputs with estimated values; assumed to have the following attributes: + * embedding - embedding estimates, tensor of shape [N, D, S, S], where + N = number of instances (= sum N_i, where N_i is the number of + instances on image i) + D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE) + S = output size (width and height) + packed_annotations (PackedCseAnnotations): contains various data useful + for loss computation, each data is packed into a single tensor + interpolator (BilinearInterpolationHelper): bilinear interpolation helper + embedder (nn.Module): module that computes vertex embeddings for different meshes + Return: + dict(int -> tensor): losses for different mesh IDs + """ + losses = {} + for mesh_id_tensor in packed_annotations.vertex_mesh_ids_gt.unique(): + mesh_id = mesh_id_tensor.item() + mesh_name = MeshCatalog.get_mesh_name(mesh_id) + # valid points are those that fall into estimated bbox + # and correspond to the current mesh + j_valid = interpolator.j_valid * ( # pyre-ignore[16] + packed_annotations.vertex_mesh_ids_gt == mesh_id + ) + if not torch.any(j_valid): + continue + # extract estimated embeddings for valid points + # -> tensor [J, D] + vertex_embeddings_i = normalize_embeddings( + interpolator.extract_at_points( + densepose_predictor_outputs.embedding, + slice_fine_segm=slice(None), + w_ylo_xlo=interpolator.w_ylo_xlo[:, None], # pyre-ignore[16] + w_ylo_xhi=interpolator.w_ylo_xhi[:, None], # pyre-ignore[16] + w_yhi_xlo=interpolator.w_yhi_xlo[:, None], # pyre-ignore[16] + w_yhi_xhi=interpolator.w_yhi_xhi[:, None], # pyre-ignore[16] + )[j_valid, :] + ) + # extract vertex ids for valid points + # -> tensor [J] + vertex_indices_i = packed_annotations.vertex_ids_gt[j_valid] + # embeddings for all mesh vertices + # -> tensor [K, D] + mesh_vertex_embeddings = embedder(mesh_name) + # unnormalized scores for valid points + # -> tensor [J, K] + scores = squared_euclidean_distance_matrix( + vertex_embeddings_i, mesh_vertex_embeddings + ) / (-self.embdist_gauss_sigma) + losses[mesh_name] = F.cross_entropy(scores, vertex_indices_i, ignore_index=-1) + + # pyre-fixme[29]: + # `Union[BoundMethod[typing.Callable(torch.Tensor.__iter__)[[Named(self, + # torch.Tensor)], typing.Iterator[typing.Any]], torch.Tensor], nn.Module, + # torch.Tensor]` is not a function. + for mesh_name in embedder.mesh_names: + if mesh_name not in losses: + losses[mesh_name] = self.fake_value( + densepose_predictor_outputs, embedder, mesh_name + ) + return losses + + def fake_values(self, densepose_predictor_outputs: Any, embedder: nn.Module): + losses = {} + # pyre-fixme[29]: + # `Union[BoundMethod[typing.Callable(torch.Tensor.__iter__)[[Named(self, + # torch.Tensor)], typing.Iterator[typing.Any]], torch.Tensor], nn.Module, + # torch.Tensor]` is not a function. + for mesh_name in embedder.mesh_names: + losses[mesh_name] = self.fake_value(densepose_predictor_outputs, embedder, mesh_name) + return losses + + def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module, mesh_name: str): + return densepose_predictor_outputs.embedding.sum() * 0 + embedder(mesh_name).sum() * 0 diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/embed_utils.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/embed_utils.py new file mode 100644 index 0000000..eb9492f --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/embed_utils.py @@ -0,0 +1,135 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from dataclasses import dataclass +from typing import Any, Optional +import torch + +from detectron2.structures import BoxMode, Instances + +from .utils import AnnotationsAccumulator + + +@dataclass +class PackedCseAnnotations: + x_gt: torch.Tensor + y_gt: torch.Tensor + coarse_segm_gt: Optional[torch.Tensor] + vertex_mesh_ids_gt: torch.Tensor + vertex_ids_gt: torch.Tensor + bbox_xywh_gt: torch.Tensor + bbox_xywh_est: torch.Tensor + point_bbox_with_dp_indices: torch.Tensor + point_bbox_indices: torch.Tensor + bbox_indices: torch.Tensor + + +class CseAnnotationsAccumulator(AnnotationsAccumulator): + """ + Accumulates annotations by batches that correspond to objects detected on + individual images. Can pack them together into single tensors. + """ + + def __init__(self): + self.x_gt = [] + self.y_gt = [] + self.s_gt = [] + self.vertex_mesh_ids_gt = [] + self.vertex_ids_gt = [] + self.bbox_xywh_gt = [] + self.bbox_xywh_est = [] + self.point_bbox_with_dp_indices = [] + self.point_bbox_indices = [] + self.bbox_indices = [] + self.nxt_bbox_with_dp_index = 0 + self.nxt_bbox_index = 0 + + def accumulate(self, instances_one_image: Instances): + """ + Accumulate instances data for one image + + Args: + instances_one_image (Instances): instances data to accumulate + """ + boxes_xywh_est = BoxMode.convert( + instances_one_image.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS + ) + boxes_xywh_gt = BoxMode.convert( + instances_one_image.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS + ) + n_matches = len(boxes_xywh_gt) + assert n_matches == len( + boxes_xywh_est + ), f"Got {len(boxes_xywh_est)} proposal boxes and {len(boxes_xywh_gt)} GT boxes" + if not n_matches: + # no detection - GT matches + return + if ( + not hasattr(instances_one_image, "gt_densepose") + or instances_one_image.gt_densepose is None + ): + # no densepose GT for the detections, just increase the bbox index + self.nxt_bbox_index += n_matches + return + for box_xywh_est, box_xywh_gt, dp_gt in zip( + boxes_xywh_est, boxes_xywh_gt, instances_one_image.gt_densepose + ): + if (dp_gt is not None) and (len(dp_gt.x) > 0): + self._do_accumulate(box_xywh_gt, box_xywh_est, dp_gt) + self.nxt_bbox_index += 1 + + def _do_accumulate(self, box_xywh_gt: torch.Tensor, box_xywh_est: torch.Tensor, dp_gt: Any): + """ + Accumulate instances data for one image, given that the data is not empty + + Args: + box_xywh_gt (tensor): GT bounding box + box_xywh_est (tensor): estimated bounding box + dp_gt: GT densepose data with the following attributes: + - x: normalized X coordinates + - y: normalized Y coordinates + - segm: tensor of size [S, S] with coarse segmentation + - + """ + self.x_gt.append(dp_gt.x) + self.y_gt.append(dp_gt.y) + if hasattr(dp_gt, "segm"): + self.s_gt.append(dp_gt.segm.unsqueeze(0)) + self.vertex_ids_gt.append(dp_gt.vertex_ids) + self.vertex_mesh_ids_gt.append(torch.full_like(dp_gt.vertex_ids, dp_gt.mesh_id)) + self.bbox_xywh_gt.append(box_xywh_gt.view(-1, 4)) + self.bbox_xywh_est.append(box_xywh_est.view(-1, 4)) + self.point_bbox_with_dp_indices.append( + torch.full_like(dp_gt.vertex_ids, self.nxt_bbox_with_dp_index) + ) + self.point_bbox_indices.append(torch.full_like(dp_gt.vertex_ids, self.nxt_bbox_index)) + self.bbox_indices.append(self.nxt_bbox_index) + self.nxt_bbox_with_dp_index += 1 + + def pack(self) -> Optional[PackedCseAnnotations]: + """ + Pack data into tensors + """ + if not len(self.x_gt): + # TODO: + # returning proper empty annotations would require + # creating empty tensors of appropriate shape and + # type on an appropriate device; + # we return None so far to indicate empty annotations + return None + return PackedCseAnnotations( + x_gt=torch.cat(self.x_gt, 0), + y_gt=torch.cat(self.y_gt, 0), + vertex_mesh_ids_gt=torch.cat(self.vertex_mesh_ids_gt, 0), + vertex_ids_gt=torch.cat(self.vertex_ids_gt, 0), + # ignore segmentation annotations, if not all the instances contain those + coarse_segm_gt=torch.cat(self.s_gt, 0) + if len(self.s_gt) == len(self.bbox_xywh_gt) + else None, + bbox_xywh_gt=torch.cat(self.bbox_xywh_gt, 0), + bbox_xywh_est=torch.cat(self.bbox_xywh_est, 0), + point_bbox_with_dp_indices=torch.cat(self.point_bbox_with_dp_indices, 0), + point_bbox_indices=torch.cat(self.point_bbox_indices, 0), + bbox_indices=torch.as_tensor( + self.bbox_indices, dtype=torch.long, device=self.x_gt[0].device + ), + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/mask.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/mask.py new file mode 100644 index 0000000..c16b15c --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/mask.py @@ -0,0 +1,125 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from dataclasses import dataclass +from typing import Any, Iterable, List, Optional +import torch +from torch.nn import functional as F + +from detectron2.structures import Instances + + +@dataclass +class DataForMaskLoss: + """ + Contains mask GT and estimated data for proposals from multiple images: + """ + + # tensor of size (K, H, W) containing GT labels + masks_gt: Optional[torch.Tensor] = None + # tensor of size (K, C, H, W) containing estimated scores + masks_est: Optional[torch.Tensor] = None + + +def extract_data_for_mask_loss_from_matches( + proposals_targets: Iterable[Instances], estimated_segm: torch.Tensor +) -> DataForMaskLoss: + """ + Extract data for mask loss from instances that contain matched GT and + estimated bounding boxes. + Args: + proposals_targets: Iterable[Instances] + matched GT and estimated results, each item in the iterable + corresponds to data in 1 image + estimated_segm: tensor(K, C, S, S) of float - raw unnormalized + segmentation scores, here S is the size to which GT masks are + to be resized + Return: + masks_est: tensor(K, C, S, S) of float - class scores + masks_gt: tensor(K, S, S) of int64 - labels + """ + data = DataForMaskLoss() + masks_gt = [] + offset = 0 + assert estimated_segm.shape[2] == estimated_segm.shape[3], ( + f"Expected estimated segmentation to have a square shape, " + f"but the actual shape is {estimated_segm.shape[2:]}" + ) + mask_size = estimated_segm.shape[2] + num_proposals = sum(inst.proposal_boxes.tensor.size(0) for inst in proposals_targets) + num_estimated = estimated_segm.shape[0] + assert ( + num_proposals == num_estimated + ), "The number of proposals {} must be equal to the number of estimates {}".format( + num_proposals, num_estimated + ) + + for proposals_targets_per_image in proposals_targets: + n_i = proposals_targets_per_image.proposal_boxes.tensor.size(0) + if not n_i: + continue + gt_masks_per_image = proposals_targets_per_image.gt_masks.crop_and_resize( + proposals_targets_per_image.proposal_boxes.tensor, mask_size + ).to(device=estimated_segm.device) + masks_gt.append(gt_masks_per_image) + offset += n_i + if masks_gt: + data.masks_est = estimated_segm + data.masks_gt = torch.cat(masks_gt, dim=0) + return data + + +class MaskLoss: + """ + Mask loss as cross-entropy for raw unnormalized scores given ground truth labels. + Mask ground truth labels are defined for the whole image and not only the + bounding box of interest. They are stored as objects that are assumed to implement + the `crop_and_resize` interface (e.g. BitMasks, PolygonMasks). + """ + + def __call__( + self, proposals_with_gt: List[Instances], densepose_predictor_outputs: Any + ) -> torch.Tensor: + """ + Computes segmentation loss as cross-entropy for raw unnormalized + scores given ground truth labels. + + Args: + proposals_with_gt (list of Instances): detections with associated ground truth data + densepose_predictor_outputs: an object of a dataclass that contains predictor outputs + with estimated values; assumed to have the following attribute: + * coarse_segm (tensor of shape [N, D, S, S]): coarse segmentation estimates + as raw unnormalized scores + where N is the number of detections, S is the estimate size ( = width = height) + and D is the number of coarse segmentation channels. + Return: + Cross entropy for raw unnormalized scores for coarse segmentation given + ground truth labels from masks + """ + if not len(proposals_with_gt): + return self.fake_value(densepose_predictor_outputs) + # densepose outputs are computed for all images and all bounding boxes; + # i.e. if a batch has 4 images with (3, 1, 2, 1) proposals respectively, + # the outputs will have size(0) == 3+1+2+1 == 7 + with torch.no_grad(): + mask_loss_data = extract_data_for_mask_loss_from_matches( + proposals_with_gt, densepose_predictor_outputs.coarse_segm + ) + if (mask_loss_data.masks_gt is None) or (mask_loss_data.masks_est is None): + return self.fake_value(densepose_predictor_outputs) + return F.cross_entropy(mask_loss_data.masks_est, mask_loss_data.masks_gt.long()) + + def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor: + """ + Fake segmentation loss used when no suitable ground truth data + was found in a batch. The loss has a value 0 and is primarily used to + construct the computation graph, so that `DistributedDataParallel` + has similar graphs on all GPUs and can perform reduction properly. + + Args: + densepose_predictor_outputs: DensePose predictor outputs, an object + of a dataclass that is assumed to have `coarse_segm` + attribute + Return: + Zero value loss with proper computation graph + """ + return densepose_predictor_outputs.coarse_segm.sum() * 0 diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/mask_or_segm.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/mask_or_segm.py new file mode 100644 index 0000000..98b773d --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/mask_or_segm.py @@ -0,0 +1,72 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from typing import Any, List +import torch + +from detectron2.config import CfgNode +from detectron2.structures import Instances + +from .mask import MaskLoss +from .segm import SegmentationLoss + + +class MaskOrSegmentationLoss: + """ + Mask or segmentation loss as cross-entropy for raw unnormalized scores + given ground truth labels. Ground truth labels are either defined by coarse + segmentation annotation, or by mask annotation, depending on the config + value MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS + """ + + def __init__(self, cfg: CfgNode): + """ + Initialize segmentation loss from configuration options + + Args: + cfg (CfgNode): configuration options + """ + self.segm_trained_by_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS + if self.segm_trained_by_masks: + self.mask_loss = MaskLoss() + self.segm_loss = SegmentationLoss(cfg) + + def __call__( + self, + proposals_with_gt: List[Instances], + densepose_predictor_outputs: Any, + packed_annotations: Any, + ) -> torch.Tensor: + """ + Compute segmentation loss as cross-entropy between aligned unnormalized + score estimates and ground truth; with ground truth given + either by masks, or by coarse segmentation annotations. + + Args: + proposals_with_gt (list of Instances): detections with associated ground truth data + densepose_predictor_outputs: an object of a dataclass that contains predictor outputs + with estimated values; assumed to have the following attributes: + * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S] + packed_annotations: packed annotations for efficient loss computation + Return: + tensor: loss value as cross-entropy for raw unnormalized scores + given ground truth labels + """ + if self.segm_trained_by_masks: + return self.mask_loss(proposals_with_gt, densepose_predictor_outputs) + return self.segm_loss(proposals_with_gt, densepose_predictor_outputs, packed_annotations) + + def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor: + """ + Fake segmentation loss used when no suitable ground truth data + was found in a batch. The loss has a value 0 and is primarily used to + construct the computation graph, so that `DistributedDataParallel` + has similar graphs on all GPUs and can perform reduction properly. + + Args: + densepose_predictor_outputs: DensePose predictor outputs, an object + of a dataclass that is assumed to have `coarse_segm` + attribute + Return: + Zero value loss with proper computation graph + """ + return densepose_predictor_outputs.coarse_segm.sum() * 0 diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/registry.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/registry.py new file mode 100644 index 0000000..d9c8817 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/registry.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from detectron2.utils.registry import Registry + +DENSEPOSE_LOSS_REGISTRY = Registry("DENSEPOSE_LOSS") diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/segm.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/segm.py new file mode 100644 index 0000000..1962b88 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/segm.py @@ -0,0 +1,83 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from typing import Any, List +import torch +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.structures import Instances + +from .utils import resample_data + + +class SegmentationLoss: + """ + Segmentation loss as cross-entropy for raw unnormalized scores given ground truth + labels. Segmentation ground truth labels are defined for the bounding box of + interest at some fixed resolution [S, S], where + S = MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE. + """ + + def __init__(self, cfg: CfgNode): + """ + Initialize segmentation loss from configuration options + + Args: + cfg (CfgNode): configuration options + """ + self.heatmap_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE + self.n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS + + def __call__( + self, + proposals_with_gt: List[Instances], + densepose_predictor_outputs: Any, + packed_annotations: Any, + ) -> torch.Tensor: + """ + Compute segmentation loss as cross-entropy on aligned segmentation + ground truth and estimated scores. + + Args: + proposals_with_gt (list of Instances): detections with associated ground truth data + densepose_predictor_outputs: an object of a dataclass that contains predictor outputs + with estimated values; assumed to have the following attributes: + * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S] + packed_annotations: packed annotations for efficient loss computation; + the following attributes are used: + - coarse_segm_gt + - bbox_xywh_gt + - bbox_xywh_est + """ + if packed_annotations.coarse_segm_gt is None: + return self.fake_value(densepose_predictor_outputs) + coarse_segm_est = densepose_predictor_outputs.coarse_segm[packed_annotations.bbox_indices] + with torch.no_grad(): + coarse_segm_gt = resample_data( + packed_annotations.coarse_segm_gt.unsqueeze(1), + packed_annotations.bbox_xywh_gt, + packed_annotations.bbox_xywh_est, + self.heatmap_size, + self.heatmap_size, + mode="nearest", + padding_mode="zeros", + ).squeeze(1) + if self.n_segm_chan == 2: + coarse_segm_gt = coarse_segm_gt > 0 + return F.cross_entropy(coarse_segm_est, coarse_segm_gt.long()) + + def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor: + """ + Fake segmentation loss used when no suitable ground truth data + was found in a batch. The loss has a value 0 and is primarily used to + construct the computation graph, so that `DistributedDataParallel` + has similar graphs on all GPUs and can perform reduction properly. + + Args: + densepose_predictor_outputs: DensePose predictor outputs, an object + of a dataclass that is assumed to have `coarse_segm` + attribute + Return: + Zero value loss with proper computation graph + """ + return densepose_predictor_outputs.coarse_segm.sum() * 0 diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/soft_embed.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/soft_embed.py new file mode 100644 index 0000000..176d929 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/soft_embed.py @@ -0,0 +1,141 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from typing import Any, Dict, List +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.structures import Instances + +from densepose.data.meshes.catalog import MeshCatalog +from densepose.modeling.cse.utils import normalize_embeddings, squared_euclidean_distance_matrix +from densepose.structures.mesh import create_mesh + +from .embed_utils import PackedCseAnnotations +from .utils import BilinearInterpolationHelper + + +class SoftEmbeddingLoss: + """ + Computes losses for estimated embeddings given annotated vertices. + Instances in a minibatch that correspond to the same mesh are grouped + together. For each group, loss is computed as cross-entropy for + unnormalized scores given ground truth mesh vertex ids. + Scores are based on: + 1) squared distances between estimated vertex embeddings + and mesh vertex embeddings; + 2) geodesic distances between vertices of a mesh + """ + + def __init__(self, cfg: CfgNode): + """ + Initialize embedding loss from config + """ + self.embdist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA + self.geodist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.GEODESIC_DIST_GAUSS_SIGMA + + def __call__( + self, + proposals_with_gt: List[Instances], + densepose_predictor_outputs: Any, + packed_annotations: PackedCseAnnotations, + interpolator: BilinearInterpolationHelper, + embedder: nn.Module, + ) -> Dict[int, torch.Tensor]: + """ + Produces losses for estimated embeddings given annotated vertices. + Embeddings for all the vertices of a mesh are computed by the embedder. + Embeddings for observed pixels are estimated by a predictor. + Losses are computed as cross-entropy for unnormalized scores given + ground truth vertex IDs. + 1) squared distances between estimated vertex embeddings + and mesh vertex embeddings; + 2) geodesic distances between vertices of a mesh + + Args: + proposals_with_gt (list of Instances): detections with associated + ground truth data; each item corresponds to instances detected + on 1 image; the number of items corresponds to the number of + images in a batch + densepose_predictor_outputs: an object of a dataclass that contains predictor + outputs with estimated values; assumed to have the following attributes: + * embedding - embedding estimates, tensor of shape [N, D, S, S], where + N = number of instances (= sum N_i, where N_i is the number of + instances on image i) + D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE) + S = output size (width and height) + packed_annotations (PackedCseAnnotations): contains various data useful + for loss computation, each data is packed into a single tensor + interpolator (BilinearInterpolationHelper): bilinear interpolation helper + embedder (nn.Module): module that computes vertex embeddings for different meshes + Return: + dict(int -> tensor): losses for different mesh IDs + """ + losses = {} + for mesh_id_tensor in packed_annotations.vertex_mesh_ids_gt.unique(): + mesh_id = mesh_id_tensor.item() + mesh_name = MeshCatalog.get_mesh_name(mesh_id) + # valid points are those that fall into estimated bbox + # and correspond to the current mesh + j_valid = interpolator.j_valid * ( # pyre-ignore[16] + packed_annotations.vertex_mesh_ids_gt == mesh_id + ) + if not torch.any(j_valid): + continue + # extract estimated embeddings for valid points + # -> tensor [J, D] + vertex_embeddings_i = normalize_embeddings( + interpolator.extract_at_points( + densepose_predictor_outputs.embedding, + slice_fine_segm=slice(None), + w_ylo_xlo=interpolator.w_ylo_xlo[:, None], # pyre-ignore[16] + w_ylo_xhi=interpolator.w_ylo_xhi[:, None], # pyre-ignore[16] + w_yhi_xlo=interpolator.w_yhi_xlo[:, None], # pyre-ignore[16] + w_yhi_xhi=interpolator.w_yhi_xhi[:, None], # pyre-ignore[16] + )[j_valid, :] + ) + # extract vertex ids for valid points + # -> tensor [J] + vertex_indices_i = packed_annotations.vertex_ids_gt[j_valid] + # embeddings for all mesh vertices + # -> tensor [K, D] + mesh_vertex_embeddings = embedder(mesh_name) + # softmax values of geodesic distances for GT mesh vertices + # -> tensor [J, K] + mesh = create_mesh(mesh_name, mesh_vertex_embeddings.device) + geodist_softmax_values = F.softmax( + mesh.geodists[vertex_indices_i] / (-self.geodist_gauss_sigma), dim=1 + ) + # logsoftmax values for valid points + # -> tensor [J, K] + embdist_logsoftmax_values = F.log_softmax( + squared_euclidean_distance_matrix(vertex_embeddings_i, mesh_vertex_embeddings) + / (-self.embdist_gauss_sigma), + dim=1, + ) + losses[mesh_name] = (-geodist_softmax_values * embdist_logsoftmax_values).sum(1).mean() + + # pyre-fixme[29]: + # `Union[BoundMethod[typing.Callable(torch.Tensor.__iter__)[[Named(self, + # torch.Tensor)], typing.Iterator[typing.Any]], torch.Tensor], nn.Module, + # torch.Tensor]` is not a function. + for mesh_name in embedder.mesh_names: + if mesh_name not in losses: + losses[mesh_name] = self.fake_value( + densepose_predictor_outputs, embedder, mesh_name + ) + return losses + + def fake_values(self, densepose_predictor_outputs: Any, embedder: nn.Module): + losses = {} + # pyre-fixme[29]: + # `Union[BoundMethod[typing.Callable(torch.Tensor.__iter__)[[Named(self, + # torch.Tensor)], typing.Iterator[typing.Any]], torch.Tensor], nn.Module, + # torch.Tensor]` is not a function. + for mesh_name in embedder.mesh_names: + losses[mesh_name] = self.fake_value(densepose_predictor_outputs, embedder, mesh_name) + return losses + + def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module, mesh_name: str): + return densepose_predictor_outputs.embedding.sum() * 0 + embedder(mesh_name).sum() * 0 diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/utils.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/utils.py new file mode 100644 index 0000000..4c172ae --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/losses/utils.py @@ -0,0 +1,441 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple +import torch +from torch.nn import functional as F + +from detectron2.structures import BoxMode, Instances + +from densepose import DensePoseDataRelative + +LossDict = Dict[str, torch.Tensor] + + +def _linear_interpolation_utilities(v_norm, v0_src, size_src, v0_dst, size_dst, size_z): + """ + Computes utility values for linear interpolation at points v. + The points are given as normalized offsets in the source interval + (v0_src, v0_src + size_src), more precisely: + v = v0_src + v_norm * size_src / 256.0 + The computed utilities include lower points v_lo, upper points v_hi, + interpolation weights v_w and flags j_valid indicating whether the + points falls into the destination interval (v0_dst, v0_dst + size_dst). + + Args: + v_norm (:obj: `torch.Tensor`): tensor of size N containing + normalized point offsets + v0_src (:obj: `torch.Tensor`): tensor of size N containing + left bounds of source intervals for normalized points + size_src (:obj: `torch.Tensor`): tensor of size N containing + source interval sizes for normalized points + v0_dst (:obj: `torch.Tensor`): tensor of size N containing + left bounds of destination intervals + size_dst (:obj: `torch.Tensor`): tensor of size N containing + destination interval sizes + size_z (int): interval size for data to be interpolated + + Returns: + v_lo (:obj: `torch.Tensor`): int tensor of size N containing + indices of lower values used for interpolation, all values are + integers from [0, size_z - 1] + v_hi (:obj: `torch.Tensor`): int tensor of size N containing + indices of upper values used for interpolation, all values are + integers from [0, size_z - 1] + v_w (:obj: `torch.Tensor`): float tensor of size N containing + interpolation weights + j_valid (:obj: `torch.Tensor`): uint8 tensor of size N containing + 0 for points outside the estimation interval + (v0_est, v0_est + size_est) and 1 otherwise + """ + v = v0_src + v_norm * size_src / 256.0 + j_valid = (v - v0_dst >= 0) * (v - v0_dst < size_dst) + v_grid = (v - v0_dst) * size_z / size_dst + v_lo = v_grid.floor().long().clamp(min=0, max=size_z - 1) + v_hi = (v_lo + 1).clamp(max=size_z - 1) + v_grid = torch.min(v_hi.float(), v_grid) + v_w = v_grid - v_lo.float() + return v_lo, v_hi, v_w, j_valid + + +class BilinearInterpolationHelper: + """ + Args: + packed_annotations: object that contains packed annotations + j_valid (:obj: `torch.Tensor`): uint8 tensor of size M containing + 0 for points to be discarded and 1 for points to be selected + y_lo (:obj: `torch.Tensor`): int tensor of indices of upper values + in z_est for each point + y_hi (:obj: `torch.Tensor`): int tensor of indices of lower values + in z_est for each point + x_lo (:obj: `torch.Tensor`): int tensor of indices of left values + in z_est for each point + x_hi (:obj: `torch.Tensor`): int tensor of indices of right values + in z_est for each point + w_ylo_xlo (:obj: `torch.Tensor`): float tensor of size M; + contains upper-left value weight for each point + w_ylo_xhi (:obj: `torch.Tensor`): float tensor of size M; + contains upper-right value weight for each point + w_yhi_xlo (:obj: `torch.Tensor`): float tensor of size M; + contains lower-left value weight for each point + w_yhi_xhi (:obj: `torch.Tensor`): float tensor of size M; + contains lower-right value weight for each point + """ + + def __init__( + self, + packed_annotations: Any, + j_valid: torch.Tensor, + y_lo: torch.Tensor, + y_hi: torch.Tensor, + x_lo: torch.Tensor, + x_hi: torch.Tensor, + w_ylo_xlo: torch.Tensor, + w_ylo_xhi: torch.Tensor, + w_yhi_xlo: torch.Tensor, + w_yhi_xhi: torch.Tensor, + ): + for k, v in locals().items(): + if k != "self": + setattr(self, k, v) + + @staticmethod + def from_matches( + packed_annotations: Any, densepose_outputs_size_hw: Tuple[int, int] + ) -> "BilinearInterpolationHelper": + """ + Args: + packed_annotations: annotations packed into tensors, the following + attributes are required: + - bbox_xywh_gt + - bbox_xywh_est + - x_gt + - y_gt + - point_bbox_with_dp_indices + - point_bbox_indices + densepose_outputs_size_hw (tuple [int, int]): resolution of + DensePose predictor outputs (H, W) + Return: + An instance of `BilinearInterpolationHelper` used to perform + interpolation for the given annotation points and output resolution + """ + + zh, zw = densepose_outputs_size_hw + x0_gt, y0_gt, w_gt, h_gt = packed_annotations.bbox_xywh_gt[ + packed_annotations.point_bbox_with_dp_indices + ].unbind(dim=1) + x0_est, y0_est, w_est, h_est = packed_annotations.bbox_xywh_est[ + packed_annotations.point_bbox_with_dp_indices + ].unbind(dim=1) + x_lo, x_hi, x_w, jx_valid = _linear_interpolation_utilities( + packed_annotations.x_gt, x0_gt, w_gt, x0_est, w_est, zw + ) + y_lo, y_hi, y_w, jy_valid = _linear_interpolation_utilities( + packed_annotations.y_gt, y0_gt, h_gt, y0_est, h_est, zh + ) + j_valid = jx_valid * jy_valid + + w_ylo_xlo = (1.0 - x_w) * (1.0 - y_w) + w_ylo_xhi = x_w * (1.0 - y_w) + w_yhi_xlo = (1.0 - x_w) * y_w + w_yhi_xhi = x_w * y_w + + return BilinearInterpolationHelper( + packed_annotations, + j_valid, + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo, # pyre-ignore[6] + w_ylo_xhi, + # pyre-fixme[6]: Expected `Tensor` for 9th param but got `float`. + w_yhi_xlo, + w_yhi_xhi, + ) + + def extract_at_points( + self, + z_est, + slice_fine_segm=None, + w_ylo_xlo=None, + w_ylo_xhi=None, + w_yhi_xlo=None, + w_yhi_xhi=None, + ): + """ + Extract ground truth values z_gt for valid point indices and estimated + values z_est using bilinear interpolation over top-left (y_lo, x_lo), + top-right (y_lo, x_hi), bottom-left (y_hi, x_lo) and bottom-right + (y_hi, x_hi) values in z_est with corresponding weights: + w_ylo_xlo, w_ylo_xhi, w_yhi_xlo and w_yhi_xhi. + Use slice_fine_segm to slice dim=1 in z_est + """ + slice_fine_segm = ( + self.packed_annotations.fine_segm_labels_gt + if slice_fine_segm is None + else slice_fine_segm + ) + w_ylo_xlo = self.w_ylo_xlo if w_ylo_xlo is None else w_ylo_xlo + w_ylo_xhi = self.w_ylo_xhi if w_ylo_xhi is None else w_ylo_xhi + w_yhi_xlo = self.w_yhi_xlo if w_yhi_xlo is None else w_yhi_xlo + w_yhi_xhi = self.w_yhi_xhi if w_yhi_xhi is None else w_yhi_xhi + + index_bbox = self.packed_annotations.point_bbox_indices + z_est_sampled = ( + z_est[index_bbox, slice_fine_segm, self.y_lo, self.x_lo] * w_ylo_xlo + + z_est[index_bbox, slice_fine_segm, self.y_lo, self.x_hi] * w_ylo_xhi + + z_est[index_bbox, slice_fine_segm, self.y_hi, self.x_lo] * w_yhi_xlo + + z_est[index_bbox, slice_fine_segm, self.y_hi, self.x_hi] * w_yhi_xhi + ) + return z_est_sampled + + +def resample_data( + z, bbox_xywh_src, bbox_xywh_dst, wout, hout, mode: str = "nearest", padding_mode: str = "zeros" +): + """ + Args: + z (:obj: `torch.Tensor`): tensor of size (N,C,H,W) with data to be + resampled + bbox_xywh_src (:obj: `torch.Tensor`): tensor of size (N,4) containing + source bounding boxes in format XYWH + bbox_xywh_dst (:obj: `torch.Tensor`): tensor of size (N,4) containing + destination bounding boxes in format XYWH + Return: + zresampled (:obj: `torch.Tensor`): tensor of size (N, C, Hout, Wout) + with resampled values of z, where D is the discretization size + """ + n = bbox_xywh_src.size(0) + assert n == bbox_xywh_dst.size(0), ( + "The number of " + "source ROIs for resampling ({}) should be equal to the number " + "of destination ROIs ({})".format(bbox_xywh_src.size(0), bbox_xywh_dst.size(0)) + ) + x0src, y0src, wsrc, hsrc = bbox_xywh_src.unbind(dim=1) + x0dst, y0dst, wdst, hdst = bbox_xywh_dst.unbind(dim=1) + x0dst_norm = 2 * (x0dst - x0src) / wsrc - 1 + y0dst_norm = 2 * (y0dst - y0src) / hsrc - 1 + x1dst_norm = 2 * (x0dst + wdst - x0src) / wsrc - 1 + y1dst_norm = 2 * (y0dst + hdst - y0src) / hsrc - 1 + grid_w = torch.arange(wout, device=z.device, dtype=torch.float) / wout + grid_h = torch.arange(hout, device=z.device, dtype=torch.float) / hout + grid_w_expanded = grid_w[None, None, :].expand(n, hout, wout) + grid_h_expanded = grid_h[None, :, None].expand(n, hout, wout) + dx_expanded = (x1dst_norm - x0dst_norm)[:, None, None].expand(n, hout, wout) + dy_expanded = (y1dst_norm - y0dst_norm)[:, None, None].expand(n, hout, wout) + x0_expanded = x0dst_norm[:, None, None].expand(n, hout, wout) + y0_expanded = y0dst_norm[:, None, None].expand(n, hout, wout) + grid_x = grid_w_expanded * dx_expanded + x0_expanded + grid_y = grid_h_expanded * dy_expanded + y0_expanded + grid = torch.stack((grid_x, grid_y), dim=3) + # resample Z from (N, C, H, W) into (N, C, Hout, Wout) + zresampled = F.grid_sample(z, grid, mode=mode, padding_mode=padding_mode, align_corners=True) + return zresampled + + +class AnnotationsAccumulator(ABC): + """ + Abstract class for an accumulator for annotations that can produce + dense annotations packed into tensors. + """ + + @abstractmethod + def accumulate(self, instances_one_image: Instances): + """ + Accumulate instances data for one image + + Args: + instances_one_image (Instances): instances data to accumulate + """ + pass + + @abstractmethod + def pack(self) -> Any: + """ + Pack data into tensors + """ + pass + + +@dataclass +class PackedChartBasedAnnotations: + """ + Packed annotations for chart-based model training. The following attributes + are defined: + - fine_segm_labels_gt (tensor [K] of `int64`): GT fine segmentation point labels + - x_gt (tensor [K] of `float32`): GT normalized X point coordinates + - y_gt (tensor [K] of `float32`): GT normalized Y point coordinates + - u_gt (tensor [K] of `float32`): GT point U values + - v_gt (tensor [K] of `float32`): GT point V values + - coarse_segm_gt (tensor [N, S, S] of `float32`): GT segmentation for bounding boxes + - bbox_xywh_gt (tensor [N, 4] of `float32`): selected GT bounding boxes in + XYWH format + - bbox_xywh_est (tensor [N, 4] of `float32`): selected matching estimated + bounding boxes in XYWH format + - point_bbox_with_dp_indices (tensor [K] of `int64`): indices of bounding boxes + with DensePose annotations that correspond to the point data + - point_bbox_indices (tensor [K] of `int64`): indices of bounding boxes + (not necessarily the selected ones with DensePose data) that correspond + to the point data + - bbox_indices (tensor [N] of `int64`): global indices of selected bounding + boxes with DensePose annotations; these indices could be used to access + features that are computed for all bounding boxes, not only the ones with + DensePose annotations. + Here K is the total number of points and N is the total number of instances + with DensePose annotations. + """ + + fine_segm_labels_gt: torch.Tensor + x_gt: torch.Tensor + y_gt: torch.Tensor + u_gt: torch.Tensor + v_gt: torch.Tensor + coarse_segm_gt: Optional[torch.Tensor] + bbox_xywh_gt: torch.Tensor + bbox_xywh_est: torch.Tensor + point_bbox_with_dp_indices: torch.Tensor + point_bbox_indices: torch.Tensor + bbox_indices: torch.Tensor + + +class ChartBasedAnnotationsAccumulator(AnnotationsAccumulator): + """ + Accumulates annotations by batches that correspond to objects detected on + individual images. Can pack them together into single tensors. + """ + + def __init__(self): + self.i_gt = [] + self.x_gt = [] + self.y_gt = [] + self.u_gt = [] + self.v_gt = [] + self.s_gt = [] + self.bbox_xywh_gt = [] + self.bbox_xywh_est = [] + self.point_bbox_with_dp_indices = [] + self.point_bbox_indices = [] + self.bbox_indices = [] + self.nxt_bbox_with_dp_index = 0 + self.nxt_bbox_index = 0 + + def accumulate(self, instances_one_image: Instances): + """ + Accumulate instances data for one image + + Args: + instances_one_image (Instances): instances data to accumulate + """ + boxes_xywh_est = BoxMode.convert( + instances_one_image.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS + ) + boxes_xywh_gt = BoxMode.convert( + instances_one_image.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS + ) + n_matches = len(boxes_xywh_gt) + assert n_matches == len( + boxes_xywh_est + ), f"Got {len(boxes_xywh_est)} proposal boxes and {len(boxes_xywh_gt)} GT boxes" + if not n_matches: + # no detection - GT matches + return + if ( + not hasattr(instances_one_image, "gt_densepose") + or instances_one_image.gt_densepose is None + ): + # no densepose GT for the detections, just increase the bbox index + self.nxt_bbox_index += n_matches + return + for box_xywh_est, box_xywh_gt, dp_gt in zip( + boxes_xywh_est, boxes_xywh_gt, instances_one_image.gt_densepose + ): + if (dp_gt is not None) and (len(dp_gt.x) > 0): + self._do_accumulate(box_xywh_gt, box_xywh_est, dp_gt) + self.nxt_bbox_index += 1 + + def _do_accumulate( + self, box_xywh_gt: torch.Tensor, box_xywh_est: torch.Tensor, dp_gt: DensePoseDataRelative + ): + """ + Accumulate instances data for one image, given that the data is not empty + + Args: + box_xywh_gt (tensor): GT bounding box + box_xywh_est (tensor): estimated bounding box + dp_gt (DensePoseDataRelative): GT densepose data + """ + self.i_gt.append(dp_gt.i) + self.x_gt.append(dp_gt.x) + self.y_gt.append(dp_gt.y) + self.u_gt.append(dp_gt.u) + self.v_gt.append(dp_gt.v) + if hasattr(dp_gt, "segm"): + self.s_gt.append(dp_gt.segm.unsqueeze(0)) + self.bbox_xywh_gt.append(box_xywh_gt.view(-1, 4)) + self.bbox_xywh_est.append(box_xywh_est.view(-1, 4)) + self.point_bbox_with_dp_indices.append( + torch.full_like(dp_gt.i, self.nxt_bbox_with_dp_index) + ) + self.point_bbox_indices.append(torch.full_like(dp_gt.i, self.nxt_bbox_index)) + self.bbox_indices.append(self.nxt_bbox_index) + self.nxt_bbox_with_dp_index += 1 + + def pack(self) -> Optional[PackedChartBasedAnnotations]: + """ + Pack data into tensors + """ + if not len(self.i_gt): + # TODO: + # returning proper empty annotations would require + # creating empty tensors of appropriate shape and + # type on an appropriate device; + # we return None so far to indicate empty annotations + return None + return PackedChartBasedAnnotations( + fine_segm_labels_gt=torch.cat(self.i_gt, 0).long(), + x_gt=torch.cat(self.x_gt, 0), + y_gt=torch.cat(self.y_gt, 0), + u_gt=torch.cat(self.u_gt, 0), + v_gt=torch.cat(self.v_gt, 0), + # ignore segmentation annotations, if not all the instances contain those + coarse_segm_gt=torch.cat(self.s_gt, 0) + if len(self.s_gt) == len(self.bbox_xywh_gt) + else None, + bbox_xywh_gt=torch.cat(self.bbox_xywh_gt, 0), + bbox_xywh_est=torch.cat(self.bbox_xywh_est, 0), + point_bbox_with_dp_indices=torch.cat(self.point_bbox_with_dp_indices, 0).long(), + point_bbox_indices=torch.cat(self.point_bbox_indices, 0).long(), + bbox_indices=torch.as_tensor( + self.bbox_indices, dtype=torch.long, device=self.x_gt[0].device + ).long(), + ) + + +def extract_packed_annotations_from_matches( + proposals_with_targets: List[Instances], accumulator: AnnotationsAccumulator +) -> Any: + for proposals_targets_per_image in proposals_with_targets: + accumulator.accumulate(proposals_targets_per_image) + return accumulator.pack() + + +def sample_random_indices( + n_indices: int, n_samples: int, device: Optional[torch.device] = None +) -> Optional[torch.Tensor]: + """ + Samples `n_samples` random indices from range `[0..n_indices - 1]`. + If `n_indices` is smaller than `n_samples`, returns `None` meaning that all indices + are selected. + Args: + n_indices (int): total number of indices + n_samples (int): number of indices to sample + device (torch.device): the desired device of returned tensor + Return: + Tensor of selected vertex indices, or `None`, if all vertices are selected + """ + if (n_samples <= 0) or (n_indices <= n_samples): + return None + indices = torch.randperm(n_indices, device=device)[:n_samples] + return indices diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/__init__.py new file mode 100644 index 0000000..1ece075 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .chart import DensePoseChartPredictor +from .chart_confidence import DensePoseChartConfidencePredictorMixin +from .chart_with_confidence import DensePoseChartWithConfidencePredictor +from .cse import DensePoseEmbeddingPredictor +from .cse_confidence import DensePoseEmbeddingConfidencePredictorMixin +from .cse_with_confidence import DensePoseEmbeddingWithConfidencePredictor +from .registry import DENSEPOSE_PREDICTOR_REGISTRY diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/chart.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/chart.py new file mode 100644 index 0000000..3bcd13f --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/chart.py @@ -0,0 +1,94 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import torch +from torch import nn + +from detectron2.config import CfgNode +from detectron2.layers import ConvTranspose2d, interpolate + +from ...structures import DensePoseChartPredictorOutput +from ..utils import initialize_module_params +from .registry import DENSEPOSE_PREDICTOR_REGISTRY + + +@DENSEPOSE_PREDICTOR_REGISTRY.register() +class DensePoseChartPredictor(nn.Module): + """ + Predictor (last layers of a DensePose model) that takes DensePose head outputs as an input + and produces 4 tensors which represent DensePose results for predefined body parts + (patches / charts): + * coarse segmentation, a tensor of shape [N, K, Hout, Wout] + * fine segmentation, a tensor of shape [N, C, Hout, Wout] + * U coordinates, a tensor of shape [N, C, Hout, Wout] + * V coordinates, a tensor of shape [N, C, Hout, Wout] + where + - N is the number of instances + - K is the number of coarse segmentation channels ( + 2 = foreground / background, + 15 = one of 14 body parts / background) + - C is the number of fine segmentation channels ( + 24 fine body parts / background) + - Hout and Wout are height and width of predictions + """ + + def __init__(self, cfg: CfgNode, input_channels: int): + """ + Initialize predictor using configuration options + + Args: + cfg (CfgNode): configuration options + input_channels (int): input tensor size along the channel dimension + """ + super().__init__() + dim_in = input_channels + n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS + dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1 + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL + # coarse segmentation + self.ann_index_lowres = ConvTranspose2d( + dim_in, n_segm_chan, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + # fine segmentation + self.index_uv_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + # U + self.u_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + # V + self.v_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE + initialize_module_params(self) + + def interp2d(self, tensor_nchw: torch.Tensor): + """ + Bilinear interpolation method to be used for upscaling + + Args: + tensor_nchw (tensor): tensor of shape (N, C, H, W) + Return: + tensor of shape (N, C, Hout, Wout), where Hout and Wout are computed + by applying the scale factor to H and W + """ + return interpolate( + tensor_nchw, scale_factor=self.scale_factor, mode="bilinear", align_corners=False + ) + + def forward(self, head_outputs: torch.Tensor): + """ + Perform forward step on DensePose head outputs + + Args: + head_outputs (tensor): DensePose head outputs, tensor of shape [N, D, H, W] + Return: + An instance of DensePoseChartPredictorOutput + """ + return DensePoseChartPredictorOutput( + coarse_segm=self.interp2d(self.ann_index_lowres(head_outputs)), + fine_segm=self.interp2d(self.index_uv_lowres(head_outputs)), + u=self.interp2d(self.u_lowres(head_outputs)), + v=self.interp2d(self.v_lowres(head_outputs)), + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/chart_confidence.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/chart_confidence.py new file mode 100644 index 0000000..0c00999 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/chart_confidence.py @@ -0,0 +1,174 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any +import torch +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.layers import ConvTranspose2d + +from ...structures import decorate_predictor_output_class_with_confidences +from ..confidence import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType +from ..utils import initialize_module_params + + +class DensePoseChartConfidencePredictorMixin: + """ + Predictor contains the last layers of a DensePose model that take DensePose head + outputs as an input and produce model outputs. Confidence predictor mixin is used + to generate confidences for segmentation and UV tensors estimated by some + base predictor. Several assumptions need to hold for the base predictor: + 1) the `forward` method must return SIUV tuple as the first result ( + S = coarse segmentation, I = fine segmentation, U and V are intrinsic + chart coordinates) + 2) `interp2d` method must be defined to perform bilinear interpolation; + the same method is typically used for SIUV and confidences + Confidence predictor mixin provides confidence estimates, as described in: + N. Neverova et al., Correlated Uncertainty for Learning Dense Correspondences + from Noisy Labels, NeurIPS 2019 + A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020 + """ + + def __init__(self, cfg: CfgNode, input_channels: int): + """ + Initialize confidence predictor using configuration options. + + Args: + cfg (CfgNode): configuration options + input_channels (int): number of input channels + """ + # we rely on base predictor to call nn.Module.__init__ + super().__init__(cfg, input_channels) # pyre-ignore[19] + self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg) + self._initialize_confidence_estimation_layers(cfg, input_channels) + self._registry = {} + initialize_module_params(self) # pyre-ignore[6] + + def _initialize_confidence_estimation_layers(self, cfg: CfgNode, dim_in: int): + """ + Initialize confidence estimation layers based on configuration options + + Args: + cfg (CfgNode): configuration options + dim_in (int): number of input channels + """ + dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1 + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL + if self.confidence_model_cfg.uv_confidence.enabled: + if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO: + self.sigma_2_lowres = ConvTranspose2d( # pyre-ignore[16] + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + elif ( + self.confidence_model_cfg.uv_confidence.type + == DensePoseUVConfidenceType.INDEP_ANISO + ): + self.sigma_2_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.kappa_u_lowres = ConvTranspose2d( # pyre-ignore[16] + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.kappa_v_lowres = ConvTranspose2d( # pyre-ignore[16] + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + else: + raise ValueError( + f"Unknown confidence model type: " + f"{self.confidence_model_cfg.confidence_model_type}" + ) + if self.confidence_model_cfg.segm_confidence.enabled: + self.fine_segm_confidence_lowres = ConvTranspose2d( # pyre-ignore[16] + dim_in, 1, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.coarse_segm_confidence_lowres = ConvTranspose2d( # pyre-ignore[16] + dim_in, 1, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + + def forward(self, head_outputs: torch.Tensor): + """ + Perform forward operation on head outputs used as inputs for the predictor. + Calls forward method from the base predictor and uses its outputs to compute + confidences. + + Args: + head_outputs (Tensor): head outputs used as predictor inputs + Return: + An instance of outputs with confidences, + see `decorate_predictor_output_class_with_confidences` + """ + # assuming base class returns SIUV estimates in its first result + base_predictor_outputs = super().forward(head_outputs) # pyre-ignore[16] + + # create output instance by extending base predictor outputs: + output = self._create_output_instance(base_predictor_outputs) + + if self.confidence_model_cfg.uv_confidence.enabled: + if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO: + # assuming base class defines interp2d method for bilinear interpolation + output.sigma_2 = self.interp2d(self.sigma_2_lowres(head_outputs)) # pyre-ignore[16] + elif ( + self.confidence_model_cfg.uv_confidence.type + == DensePoseUVConfidenceType.INDEP_ANISO + ): + # assuming base class defines interp2d method for bilinear interpolation + output.sigma_2 = self.interp2d(self.sigma_2_lowres(head_outputs)) + output.kappa_u = self.interp2d(self.kappa_u_lowres(head_outputs)) # pyre-ignore[16] + output.kappa_v = self.interp2d(self.kappa_v_lowres(head_outputs)) # pyre-ignore[16] + else: + raise ValueError( + f"Unknown confidence model type: " + f"{self.confidence_model_cfg.confidence_model_type}" + ) + if self.confidence_model_cfg.segm_confidence.enabled: + # base predictor outputs are assumed to have `fine_segm` and `coarse_segm` attributes + # base predictor is assumed to define `interp2d` method for bilinear interpolation + output.fine_segm_confidence = ( + F.softplus( + self.interp2d(self.fine_segm_confidence_lowres(head_outputs)) # pyre-ignore[16] + ) + + self.confidence_model_cfg.segm_confidence.epsilon + ) + output.fine_segm = base_predictor_outputs.fine_segm * torch.repeat_interleave( + output.fine_segm_confidence, base_predictor_outputs.fine_segm.shape[1], dim=1 + ) + output.coarse_segm_confidence = ( + F.softplus( + self.interp2d( + self.coarse_segm_confidence_lowres(head_outputs) # pyre-ignore[16] + ) + ) + + self.confidence_model_cfg.segm_confidence.epsilon + ) + output.coarse_segm = base_predictor_outputs.coarse_segm * torch.repeat_interleave( + output.coarse_segm_confidence, base_predictor_outputs.coarse_segm.shape[1], dim=1 + ) + + return output + + def _create_output_instance(self, base_predictor_outputs: Any): + """ + Create an instance of predictor outputs by copying the outputs from the + base predictor and initializing confidence + + Args: + base_predictor_outputs: an instance of base predictor outputs + (the outputs type is assumed to be a dataclass) + Return: + An instance of outputs with confidences + """ + PredictorOutput = decorate_predictor_output_class_with_confidences( + type(base_predictor_outputs) # pyre-ignore[6] + ) + # base_predictor_outputs is assumed to be a dataclass + # reassign all the fields from base_predictor_outputs (no deep copy!), add new fields + output = PredictorOutput( + **base_predictor_outputs.__dict__, + coarse_segm_confidence=None, + fine_segm_confidence=None, + sigma_1=None, + sigma_2=None, + kappa_u=None, + kappa_v=None, + ) + return output diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/chart_with_confidence.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/chart_with_confidence.py new file mode 100644 index 0000000..9c1cd6c --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/chart_with_confidence.py @@ -0,0 +1,15 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from . import DensePoseChartConfidencePredictorMixin, DensePoseChartPredictor +from .registry import DENSEPOSE_PREDICTOR_REGISTRY + + +@DENSEPOSE_PREDICTOR_REGISTRY.register() +class DensePoseChartWithConfidencePredictor( + DensePoseChartConfidencePredictorMixin, DensePoseChartPredictor +): + """ + Predictor that combines chart and chart confidence estimation + """ + + pass diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/cse.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/cse.py new file mode 100644 index 0000000..466a5ec --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/cse.py @@ -0,0 +1,70 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import torch +from torch import nn + +from detectron2.config import CfgNode +from detectron2.layers import ConvTranspose2d, interpolate + +from ...structures import DensePoseEmbeddingPredictorOutput +from ..utils import initialize_module_params +from .registry import DENSEPOSE_PREDICTOR_REGISTRY + + +@DENSEPOSE_PREDICTOR_REGISTRY.register() +class DensePoseEmbeddingPredictor(nn.Module): + """ + Last layers of a DensePose model that take DensePose head outputs as an input + and produce model outputs for continuous surface embeddings (CSE). + """ + + def __init__(self, cfg: CfgNode, input_channels: int): + """ + Initialize predictor using configuration options + + Args: + cfg (CfgNode): configuration options + input_channels (int): input tensor size along the channel dimension + """ + super().__init__() + dim_in = input_channels + n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS + embed_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL + # coarse segmentation + self.coarse_segm_lowres = ConvTranspose2d( + dim_in, n_segm_chan, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + # embedding + self.embed_lowres = ConvTranspose2d( + dim_in, embed_size, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE + initialize_module_params(self) + + def interp2d(self, tensor_nchw: torch.Tensor): + """ + Bilinear interpolation method to be used for upscaling + + Args: + tensor_nchw (tensor): tensor of shape (N, C, H, W) + Return: + tensor of shape (N, C, Hout, Wout), where Hout and Wout are computed + by applying the scale factor to H and W + """ + return interpolate( + tensor_nchw, scale_factor=self.scale_factor, mode="bilinear", align_corners=False + ) + + def forward(self, head_outputs): + """ + Perform forward step on DensePose head outputs + + Args: + head_outputs (tensor): DensePose head outputs, tensor of shape [N, D, H, W] + """ + embed_lowres = self.embed_lowres(head_outputs) + coarse_segm_lowres = self.coarse_segm_lowres(head_outputs) + embed = self.interp2d(embed_lowres) + coarse_segm = self.interp2d(coarse_segm_lowres) + return DensePoseEmbeddingPredictorOutput(embedding=embed, coarse_segm=coarse_segm) diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/cse_confidence.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/cse_confidence.py new file mode 100644 index 0000000..8220337 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/cse_confidence.py @@ -0,0 +1,115 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any +import torch +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.layers import ConvTranspose2d + +from densepose.modeling.confidence import DensePoseConfidenceModelConfig +from densepose.modeling.utils import initialize_module_params +from densepose.structures import decorate_cse_predictor_output_class_with_confidences + + +class DensePoseEmbeddingConfidencePredictorMixin: + """ + Predictor contains the last layers of a DensePose model that take DensePose head + outputs as an input and produce model outputs. Confidence predictor mixin is used + to generate confidences for coarse segmentation estimated by some + base predictor. Several assumptions need to hold for the base predictor: + 1) the `forward` method must return CSE DensePose head outputs, + tensor of shape [N, D, H, W] + 2) `interp2d` method must be defined to perform bilinear interpolation; + the same method is typically used for masks and confidences + Confidence predictor mixin provides confidence estimates, as described in: + N. Neverova et al., Correlated Uncertainty for Learning Dense Correspondences + from Noisy Labels, NeurIPS 2019 + A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020 + """ + + def __init__(self, cfg: CfgNode, input_channels: int): + """ + Initialize confidence predictor using configuration options. + + Args: + cfg (CfgNode): configuration options + input_channels (int): number of input channels + """ + # we rely on base predictor to call nn.Module.__init__ + super().__init__(cfg, input_channels) # pyre-ignore[19] + self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg) + self._initialize_confidence_estimation_layers(cfg, input_channels) + self._registry = {} + initialize_module_params(self) # pyre-ignore[6] + + def _initialize_confidence_estimation_layers(self, cfg: CfgNode, dim_in: int): + """ + Initialize confidence estimation layers based on configuration options + + Args: + cfg (CfgNode): configuration options + dim_in (int): number of input channels + """ + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL + if self.confidence_model_cfg.segm_confidence.enabled: + self.coarse_segm_confidence_lowres = ConvTranspose2d( # pyre-ignore[16] + dim_in, 1, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + + def forward(self, head_outputs: torch.Tensor): + """ + Perform forward operation on head outputs used as inputs for the predictor. + Calls forward method from the base predictor and uses its outputs to compute + confidences. + + Args: + head_outputs (Tensor): head outputs used as predictor inputs + Return: + An instance of outputs with confidences, + see `decorate_cse_predictor_output_class_with_confidences` + """ + # assuming base class returns SIUV estimates in its first result + base_predictor_outputs = super().forward(head_outputs) # pyre-ignore[16] + + # create output instance by extending base predictor outputs: + output = self._create_output_instance(base_predictor_outputs) + + if self.confidence_model_cfg.segm_confidence.enabled: + # base predictor outputs are assumed to have `coarse_segm` attribute + # base predictor is assumed to define `interp2d` method for bilinear interpolation + output.coarse_segm_confidence = ( + F.softplus( + self.interp2d( # pyre-ignore[16] + self.coarse_segm_confidence_lowres(head_outputs) # pyre-ignore[16] + ) + ) + + self.confidence_model_cfg.segm_confidence.epsilon + ) + output.coarse_segm = base_predictor_outputs.coarse_segm * torch.repeat_interleave( + output.coarse_segm_confidence, base_predictor_outputs.coarse_segm.shape[1], dim=1 + ) + + return output + + def _create_output_instance(self, base_predictor_outputs: Any): + """ + Create an instance of predictor outputs by copying the outputs from the + base predictor and initializing confidence + + Args: + base_predictor_outputs: an instance of base predictor outputs + (the outputs type is assumed to be a dataclass) + Return: + An instance of outputs with confidences + """ + PredictorOutput = decorate_cse_predictor_output_class_with_confidences( + type(base_predictor_outputs) # pyre-ignore[6] + ) + # base_predictor_outputs is assumed to be a dataclass + # reassign all the fields from base_predictor_outputs (no deep copy!), add new fields + output = PredictorOutput( + **base_predictor_outputs.__dict__, + coarse_segm_confidence=None, + ) + return output diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/cse_with_confidence.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/cse_with_confidence.py new file mode 100644 index 0000000..17ecef6 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/cse_with_confidence.py @@ -0,0 +1,15 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from . import DensePoseEmbeddingConfidencePredictorMixin, DensePoseEmbeddingPredictor +from .registry import DENSEPOSE_PREDICTOR_REGISTRY + + +@DENSEPOSE_PREDICTOR_REGISTRY.register() +class DensePoseEmbeddingWithConfidencePredictor( + DensePoseEmbeddingConfidencePredictorMixin, DensePoseEmbeddingPredictor +): + """ + Predictor that combines CSE and CSE confidence estimation + """ + + pass diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/registry.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/registry.py new file mode 100644 index 0000000..f96901d --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/predictors/registry.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from detectron2.utils.registry import Registry + +DENSEPOSE_PREDICTOR_REGISTRY = Registry("DENSEPOSE_PREDICTOR") diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/__init__.py new file mode 100644 index 0000000..8403589 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .v1convx import DensePoseV1ConvXHead +from .deeplab import DensePoseDeepLabHead +from .registry import ROI_DENSEPOSE_HEAD_REGISTRY +from .roi_head import Decoder, DensePoseROIHeads diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/deeplab.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/deeplab.py new file mode 100644 index 0000000..4e5cb48 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/deeplab.py @@ -0,0 +1,263 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.layers import Conv2d + +from .registry import ROI_DENSEPOSE_HEAD_REGISTRY + + +@ROI_DENSEPOSE_HEAD_REGISTRY.register() +class DensePoseDeepLabHead(nn.Module): + """ + DensePose head using DeepLabV3 model from + "Rethinking Atrous Convolution for Semantic Image Segmentation" + . + """ + + def __init__(self, cfg: CfgNode, input_channels: int): + super(DensePoseDeepLabHead, self).__init__() + # fmt: off + hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL + norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM + self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS + self.use_nonlocal = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON + # fmt: on + pad_size = kernel_size // 2 + n_channels = input_channels + + self.ASPP = ASPP(input_channels, [6, 12, 56], n_channels) # 6, 12, 56 + self.add_module("ASPP", self.ASPP) + + if self.use_nonlocal: + self.NLBlock = NONLocalBlock2D(input_channels, bn_layer=True) + self.add_module("NLBlock", self.NLBlock) + # weight_init.c2_msra_fill(self.ASPP) + + for i in range(self.n_stacked_convs): + norm_module = nn.GroupNorm(32, hidden_dim) if norm == "GN" else None + layer = Conv2d( + n_channels, + hidden_dim, + kernel_size, + stride=1, + padding=pad_size, + bias=not norm, + norm=norm_module, + ) + weight_init.c2_msra_fill(layer) + n_channels = hidden_dim + layer_name = self._get_layer_name(i) + self.add_module(layer_name, layer) + self.n_out_channels = hidden_dim + # initialize_module_params(self) + + def forward(self, features): + x0 = features + x = self.ASPP(x0) + if self.use_nonlocal: + x = self.NLBlock(x) + output = x + for i in range(self.n_stacked_convs): + layer_name = self._get_layer_name(i) + x = getattr(self, layer_name)(x) + x = F.relu(x) + output = x + return output + + def _get_layer_name(self, i: int): + layer_name = "body_conv_fcn{}".format(i + 1) + return layer_name + + +# Copied from +# https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/deeplabv3.py +# See https://arxiv.org/pdf/1706.05587.pdf for details +class ASPPConv(nn.Sequential): + def __init__(self, in_channels, out_channels, dilation): + modules = [ + nn.Conv2d( + in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False + ), + nn.GroupNorm(32, out_channels), + nn.ReLU(), + ] + super(ASPPConv, self).__init__(*modules) + + +class ASPPPooling(nn.Sequential): + def __init__(self, in_channels, out_channels): + super(ASPPPooling, self).__init__( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, out_channels, 1, bias=False), + nn.GroupNorm(32, out_channels), + nn.ReLU(), + ) + + def forward(self, x): + size = x.shape[-2:] + x = super(ASPPPooling, self).forward(x) + return F.interpolate(x, size=size, mode="bilinear", align_corners=False) + + +class ASPP(nn.Module): + def __init__(self, in_channels, atrous_rates, out_channels): + super(ASPP, self).__init__() + modules = [] + modules.append( + nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + nn.GroupNorm(32, out_channels), + nn.ReLU(), + ) + ) + + rate1, rate2, rate3 = tuple(atrous_rates) + modules.append(ASPPConv(in_channels, out_channels, rate1)) + modules.append(ASPPConv(in_channels, out_channels, rate2)) + modules.append(ASPPConv(in_channels, out_channels, rate3)) + modules.append(ASPPPooling(in_channels, out_channels)) + + self.convs = nn.ModuleList(modules) + + self.project = nn.Sequential( + nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), + # nn.BatchNorm2d(out_channels), + nn.ReLU() + # nn.Dropout(0.5) + ) + + def forward(self, x): + res = [] + for conv in self.convs: + res.append(conv(x)) + res = torch.cat(res, dim=1) + return self.project(res) + + +# copied from +# https://github.com/AlexHex7/Non-local_pytorch/blob/master/lib/non_local_embedded_gaussian.py +# See https://arxiv.org/abs/1711.07971 for details +class _NonLocalBlockND(nn.Module): + def __init__( + self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True + ): + super(_NonLocalBlockND, self).__init__() + + assert dimension in [1, 2, 3] + + self.dimension = dimension + self.sub_sample = sub_sample + + self.in_channels = in_channels + self.inter_channels = inter_channels + + if self.inter_channels is None: + self.inter_channels = in_channels // 2 + if self.inter_channels == 0: + self.inter_channels = 1 + + if dimension == 3: + conv_nd = nn.Conv3d + max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2)) + bn = nn.GroupNorm # (32, hidden_dim) #nn.BatchNorm3d + elif dimension == 2: + conv_nd = nn.Conv2d + max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2)) + bn = nn.GroupNorm # (32, hidden_dim)nn.BatchNorm2d + else: + conv_nd = nn.Conv1d + max_pool_layer = nn.MaxPool1d(kernel_size=2) + bn = nn.GroupNorm # (32, hidden_dim)nn.BatchNorm1d + + self.g = conv_nd( + in_channels=self.in_channels, + out_channels=self.inter_channels, + kernel_size=1, + stride=1, + padding=0, + ) + + if bn_layer: + self.W = nn.Sequential( + conv_nd( + in_channels=self.inter_channels, + out_channels=self.in_channels, + kernel_size=1, + stride=1, + padding=0, + ), + bn(32, self.in_channels), + ) + nn.init.constant_(self.W[1].weight, 0) + nn.init.constant_(self.W[1].bias, 0) + else: + self.W = conv_nd( + in_channels=self.inter_channels, + out_channels=self.in_channels, + kernel_size=1, + stride=1, + padding=0, + ) + nn.init.constant_(self.W.weight, 0) + nn.init.constant_(self.W.bias, 0) + + self.theta = conv_nd( + in_channels=self.in_channels, + out_channels=self.inter_channels, + kernel_size=1, + stride=1, + padding=0, + ) + self.phi = conv_nd( + in_channels=self.in_channels, + out_channels=self.inter_channels, + kernel_size=1, + stride=1, + padding=0, + ) + + if sub_sample: + self.g = nn.Sequential(self.g, max_pool_layer) + self.phi = nn.Sequential(self.phi, max_pool_layer) + + def forward(self, x): + """ + :param x: (b, c, t, h, w) + :return: + """ + + batch_size = x.size(0) + + g_x = self.g(x).view(batch_size, self.inter_channels, -1) + g_x = g_x.permute(0, 2, 1) + + theta_x = self.theta(x).view(batch_size, self.inter_channels, -1) + theta_x = theta_x.permute(0, 2, 1) + phi_x = self.phi(x).view(batch_size, self.inter_channels, -1) + f = torch.matmul(theta_x, phi_x) + f_div_C = F.softmax(f, dim=-1) + + y = torch.matmul(f_div_C, g_x) + y = y.permute(0, 2, 1).contiguous() + y = y.view(batch_size, self.inter_channels, *x.size()[2:]) + W_y = self.W(y) + z = W_y + x + + return z + + +class NONLocalBlock2D(_NonLocalBlockND): + def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True): + super(NONLocalBlock2D, self).__init__( + in_channels, + inter_channels=inter_channels, + dimension=2, + sub_sample=sub_sample, + bn_layer=bn_layer, + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/registry.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/registry.py new file mode 100644 index 0000000..e1cea43 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/registry.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from detectron2.utils.registry import Registry + +ROI_DENSEPOSE_HEAD_REGISTRY = Registry("ROI_DENSEPOSE_HEAD") diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/roi_head.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/roi_head.py new file mode 100644 index 0000000..8f9d9a6 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/roi_head.py @@ -0,0 +1,221 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import numpy as np +from typing import Dict, List, Optional +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn as nn +from torch.nn import functional as F + +from detectron2.layers import Conv2d, ShapeSpec, get_norm +from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads +from detectron2.modeling.poolers import ROIPooler +from detectron2.modeling.roi_heads import select_foreground_proposals +from detectron2.structures import ImageList, Instances + +from .. import ( + build_densepose_data_filter, + build_densepose_embedder, + build_densepose_head, + build_densepose_losses, + build_densepose_predictor, + densepose_inference, +) + + +class Decoder(nn.Module): + """ + A semantic segmentation head described in detail in the Panoptic Feature Pyramid Networks paper + (https://arxiv.org/abs/1901.02446). It takes FPN features as input and merges information from + all levels of the FPN into single output. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec], in_features): + super(Decoder, self).__init__() + + # fmt: off + self.in_features = in_features + feature_strides = {k: v.stride for k, v in input_shape.items()} + feature_channels = {k: v.channels for k, v in input_shape.items()} + num_classes = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES + conv_dims = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS + self.common_stride = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE + norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM + # fmt: on + + self.scale_heads = [] + for in_feature in self.in_features: + head_ops = [] + head_length = max( + 1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride)) + ) + for k in range(head_length): + conv = Conv2d( + feature_channels[in_feature] if k == 0 else conv_dims, + conv_dims, + kernel_size=3, + stride=1, + padding=1, + bias=not norm, + norm=get_norm(norm, conv_dims), + activation=F.relu, + ) + weight_init.c2_msra_fill(conv) + head_ops.append(conv) + if feature_strides[in_feature] != self.common_stride: + head_ops.append( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) + ) + self.scale_heads.append(nn.Sequential(*head_ops)) + self.add_module(in_feature, self.scale_heads[-1]) + self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) + weight_init.c2_msra_fill(self.predictor) + + def forward(self, features: List[torch.Tensor]): + for i, _ in enumerate(self.in_features): + if i == 0: + x = self.scale_heads[i](features[i]) + else: + x = x + self.scale_heads[i](features[i]) + x = self.predictor(x) + return x + + +@ROI_HEADS_REGISTRY.register() +class DensePoseROIHeads(StandardROIHeads): + """ + A Standard ROIHeads which contains an addition of DensePose head. + """ + + def __init__(self, cfg, input_shape): + super().__init__(cfg, input_shape) + self._init_densepose_head(cfg, input_shape) + + def _init_densepose_head(self, cfg, input_shape): + # fmt: off + self.densepose_on = cfg.MODEL.DENSEPOSE_ON + if not self.densepose_on: + return + self.densepose_data_filter = build_densepose_data_filter(cfg) + dp_pooler_resolution = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION + dp_pooler_sampling_ratio = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO + dp_pooler_type = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE + self.use_decoder = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON + # fmt: on + if self.use_decoder: + dp_pooler_scales = (1.0 / input_shape[self.in_features[0]].stride,) + else: + dp_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features) + in_channels = [input_shape[f].channels for f in self.in_features][0] + + if self.use_decoder: + self.decoder = Decoder(cfg, input_shape, self.in_features) + + self.densepose_pooler = ROIPooler( + output_size=dp_pooler_resolution, + scales=dp_pooler_scales, + sampling_ratio=dp_pooler_sampling_ratio, + pooler_type=dp_pooler_type, + ) + self.densepose_head = build_densepose_head(cfg, in_channels) + self.densepose_predictor = build_densepose_predictor( + cfg, self.densepose_head.n_out_channels + ) + self.densepose_losses = build_densepose_losses(cfg) + self.embedder = build_densepose_embedder(cfg) + + def _forward_densepose(self, features: Dict[str, torch.Tensor], instances: List[Instances]): + """ + Forward logic of the densepose prediction branch. + + Args: + features (dict[str, Tensor]): input data as a mapping from feature + map name to tensor. Axis 0 represents the number of images `N` in + the input data; axes 1-3 are channels, height, and width, which may + vary between feature maps (e.g., if a feature pyramid is used). + instances (list[Instances]): length `N` list of `Instances`. The i-th + `Instances` contains instances for the i-th input image, + In training, they can be the proposals. + In inference, they can be the predicted boxes. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "densepose" and return it. + """ + if not self.densepose_on: + return {} if self.training else instances + + features_list = [features[f] for f in self.in_features] + if self.training: + proposals, _ = select_foreground_proposals(instances, self.num_classes) + features_list, proposals = self.densepose_data_filter(features_list, proposals) + if len(proposals) > 0: + proposal_boxes = [x.proposal_boxes for x in proposals] + + if self.use_decoder: + # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a + # function. + features_list = [self.decoder(features_list)] + + features_dp = self.densepose_pooler(features_list, proposal_boxes) + densepose_head_outputs = self.densepose_head(features_dp) + densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs) + densepose_loss_dict = self.densepose_losses( + proposals, densepose_predictor_outputs, embedder=self.embedder + ) + return densepose_loss_dict + else: + pred_boxes = [x.pred_boxes for x in instances] + + if self.use_decoder: + # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function. + features_list = [self.decoder(features_list)] + + features_dp = self.densepose_pooler(features_list, pred_boxes) + if len(features_dp) > 0: + densepose_head_outputs = self.densepose_head(features_dp) + densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs) + else: + densepose_predictor_outputs = None + + densepose_inference(densepose_predictor_outputs, instances) + return instances + + def forward( + self, + images: ImageList, + features: Dict[str, torch.Tensor], + proposals: List[Instances], + targets: Optional[List[Instances]] = None, + ): + instances, losses = super().forward(images, features, proposals, targets) + del targets, images + + if self.training: + losses.update(self._forward_densepose(features, instances)) + return instances, losses + + def forward_with_given_boxes( + self, features: Dict[str, torch.Tensor], instances: List[Instances] + ): + """ + Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. + + This is useful for downstream tasks where a box is known, but need to obtain + other attributes (outputs of other heads). + Test-time augmentation also uses this. + + Args: + features: same as in `forward()` + instances (list[Instances]): instances to predict other outputs. Expect the keys + "pred_boxes" and "pred_classes" to exist. + + Returns: + instances (list[Instances]): + the same `Instances` objects, with extra + fields such as `pred_masks` or `pred_keypoints`. + """ + + instances = super().forward_with_given_boxes(features, instances) + instances = self._forward_densepose(features, instances) + return instances diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/v1convx.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/v1convx.py new file mode 100644 index 0000000..df79f65 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/roi_heads/v1convx.py @@ -0,0 +1,64 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.layers import Conv2d + +from ..utils import initialize_module_params +from .registry import ROI_DENSEPOSE_HEAD_REGISTRY + + +@ROI_DENSEPOSE_HEAD_REGISTRY.register() +class DensePoseV1ConvXHead(nn.Module): + """ + Fully convolutional DensePose head. + """ + + def __init__(self, cfg: CfgNode, input_channels: int): + """ + Initialize DensePose fully convolutional head + + Args: + cfg (CfgNode): configuration options + input_channels (int): number of input channels + """ + super(DensePoseV1ConvXHead, self).__init__() + # fmt: off + hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL + self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS + # fmt: on + pad_size = kernel_size // 2 + n_channels = input_channels + for i in range(self.n_stacked_convs): + layer = Conv2d(n_channels, hidden_dim, kernel_size, stride=1, padding=pad_size) + layer_name = self._get_layer_name(i) + self.add_module(layer_name, layer) + n_channels = hidden_dim + self.n_out_channels = n_channels + initialize_module_params(self) + + def forward(self, features: torch.Tensor): + """ + Apply DensePose fully convolutional head to the input features + + Args: + features (tensor): input features + Result: + A tensor of DensePose head outputs + """ + x = features + output = x + for i in range(self.n_stacked_convs): + layer_name = self._get_layer_name(i) + x = getattr(self, layer_name)(x) + x = F.relu(x) + output = x + return output + + def _get_layer_name(self, i: int): + layer_name = "body_conv_fcn{}".format(i + 1) + return layer_name diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/test_time_augmentation.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/test_time_augmentation.py new file mode 100644 index 0000000..ec2022e --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/test_time_augmentation.py @@ -0,0 +1,207 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import numpy as np +import torch +from fvcore.transforms import HFlipTransform, TransformList +from torch.nn import functional as F + +from detectron2.data.transforms import RandomRotation, RotationTransform, apply_transform_gens +from detectron2.modeling.postprocessing import detector_postprocess +from detectron2.modeling.test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA + +from ..converters import HFlipConverter + + +class DensePoseDatasetMapperTTA(DatasetMapperTTA): + def __init__(self, cfg): + super().__init__(cfg=cfg) + self.angles = cfg.TEST.AUG.ROTATION_ANGLES + + def __call__(self, dataset_dict): + ret = super().__call__(dataset_dict=dataset_dict) + numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy() + for angle in self.angles: + rotate = RandomRotation(angle=angle, expand=True) + new_numpy_image, tfms = apply_transform_gens([rotate], np.copy(numpy_image)) + torch_image = torch.from_numpy(np.ascontiguousarray(new_numpy_image.transpose(2, 0, 1))) + dic = copy.deepcopy(dataset_dict) + # In DatasetMapperTTA, there is a pre_tfm transform (resize or no-op) that is + # added at the beginning of each TransformList. That's '.transforms[0]'. + dic["transforms"] = TransformList( + [ret[-1]["transforms"].transforms[0]] + tfms.transforms + ) + dic["image"] = torch_image + ret.append(dic) + return ret + + +class DensePoseGeneralizedRCNNWithTTA(GeneralizedRCNNWithTTA): + def __init__(self, cfg, model, transform_data, tta_mapper=None, batch_size=1): + """ + Args: + cfg (CfgNode): + model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on. + transform_data (DensePoseTransformData): contains symmetry label + transforms used for horizontal flip + tta_mapper (callable): takes a dataset dict and returns a list of + augmented versions of the dataset dict. Defaults to + `DatasetMapperTTA(cfg)`. + batch_size (int): batch the augmented images into this batch size for inference. + """ + self._transform_data = transform_data.to(model.device) + super().__init__(cfg=cfg, model=model, tta_mapper=tta_mapper, batch_size=batch_size) + + # the implementation follows closely the one from detectron2/modeling + def _inference_one_image(self, input): + """ + Args: + input (dict): one dataset dict with "image" field being a CHW tensor + + Returns: + dict: one output dict + """ + orig_shape = (input["height"], input["width"]) + # For some reason, resize with uint8 slightly increases box AP but decreases densepose AP + input["image"] = input["image"].to(torch.uint8) + augmented_inputs, tfms = self._get_augmented_inputs(input) + # Detect boxes from all augmented versions + with self._turn_off_roi_heads(["mask_on", "keypoint_on", "densepose_on"]): + # temporarily disable roi heads + all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms) + merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape) + + if self.cfg.MODEL.MASK_ON or self.cfg.MODEL.DENSEPOSE_ON: + # Use the detected boxes to obtain new fields + augmented_instances = self._rescale_detected_boxes( + augmented_inputs, merged_instances, tfms + ) + # run forward on the detected boxes + outputs = self._batch_inference(augmented_inputs, augmented_instances) + # Delete now useless variables to avoid being out of memory + del augmented_inputs, augmented_instances + # average the predictions + if self.cfg.MODEL.MASK_ON: + merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms) + if self.cfg.MODEL.DENSEPOSE_ON: + merged_instances.pred_densepose = self._reduce_pred_densepose(outputs, tfms) + # postprocess + merged_instances = detector_postprocess(merged_instances, *orig_shape) + return {"instances": merged_instances} + else: + return {"instances": merged_instances} + + def _get_augmented_boxes(self, augmented_inputs, tfms): + # Heavily based on detectron2/modeling/test_time_augmentation.py + # Only difference is that RotationTransform is excluded from bbox computation + # 1: forward with all augmented images + outputs = self._batch_inference(augmented_inputs) + # 2: union the results + all_boxes = [] + all_scores = [] + all_classes = [] + for output, tfm in zip(outputs, tfms): + # Need to inverse the transforms on boxes, to obtain results on original image + if not any(isinstance(t, RotationTransform) for t in tfm.transforms): + # Some transforms can't compute bbox correctly + pred_boxes = output.pred_boxes.tensor + original_pred_boxes = tfm.inverse().apply_box(pred_boxes.cpu().numpy()) + all_boxes.append(torch.from_numpy(original_pred_boxes).to(pred_boxes.device)) + all_scores.extend(output.scores) + all_classes.extend(output.pred_classes) + all_boxes = torch.cat(all_boxes, dim=0) + return all_boxes, all_scores, all_classes + + def _reduce_pred_densepose(self, outputs, tfms): + # Should apply inverse transforms on densepose preds. + # We assume only rotation, resize & flip are used. pred_masks is a scale-invariant + # representation, so we handle the other ones specially + for idx, (output, tfm) in enumerate(zip(outputs, tfms)): + for t in tfm.transforms: + for attr in ["coarse_segm", "fine_segm", "u", "v"]: + setattr( + output.pred_densepose, + attr, + _inverse_rotation( + getattr(output.pred_densepose, attr), output.pred_boxes.tensor, t + ), + ) + if any(isinstance(t, HFlipTransform) for t in tfm.transforms): + output.pred_densepose = HFlipConverter.convert( + output.pred_densepose, self._transform_data + ) + self._incremental_avg_dp(outputs[0].pred_densepose, output.pred_densepose, idx) + return outputs[0].pred_densepose + + # incrementally computed average: u_(n + 1) = u_n + (x_(n+1) - u_n) / (n + 1). + def _incremental_avg_dp(self, avg, new_el, idx): + for attr in ["coarse_segm", "fine_segm", "u", "v"]: + setattr(avg, attr, (getattr(avg, attr) * idx + getattr(new_el, attr)) / (idx + 1)) + if idx: + # Deletion of the > 0 index intermediary values to prevent GPU OOM + setattr(new_el, attr, None) + return avg + + +def _inverse_rotation(densepose_attrs, boxes, transform): + # resample outputs to image size and rotate back the densepose preds + # on the rotated images to the space of the original image + if len(boxes) == 0 or not isinstance(transform, RotationTransform): + return densepose_attrs + boxes = boxes.int().cpu().numpy() + wh_boxes = boxes[:, 2:] - boxes[:, :2] # bboxes in the rotated space + inv_boxes = rotate_box_inverse(transform, boxes).astype(int) # bboxes in original image + wh_diff = (inv_boxes[:, 2:] - inv_boxes[:, :2] - wh_boxes) // 2 # diff between new/old bboxes + rotation_matrix = torch.tensor([transform.rm_image]).to(device=densepose_attrs.device).float() + rotation_matrix[:, :, -1] = 0 + # To apply grid_sample for rotation, we need to have enough space to fit the original and + # rotated bboxes. l_bds and r_bds are the left/right bounds that will be used to + # crop the difference once the rotation is done + l_bds = np.maximum(0, -wh_diff) + for i in range(len(densepose_attrs)): + if min(wh_boxes[i]) <= 0: + continue + densepose_attr = densepose_attrs[[i]].clone() + # 1. Interpolate densepose attribute to size of the rotated bbox + densepose_attr = F.interpolate(densepose_attr, wh_boxes[i].tolist()[::-1], mode="bilinear") + # 2. Pad the interpolated attribute so it has room for the original + rotated bbox + densepose_attr = F.pad(densepose_attr, tuple(np.repeat(np.maximum(0, wh_diff[i]), 2))) + # 3. Compute rotation grid and transform + grid = F.affine_grid(rotation_matrix, size=densepose_attr.shape) + densepose_attr = F.grid_sample(densepose_attr, grid) + # 4. Compute right bounds and crop the densepose_attr to the size of the original bbox + r_bds = densepose_attr.shape[2:][::-1] - l_bds[i] + densepose_attr = densepose_attr[:, :, l_bds[i][1] : r_bds[1], l_bds[i][0] : r_bds[0]] + if min(densepose_attr.shape) > 0: + # Interpolate back to the original size of the densepose attribute + densepose_attr = F.interpolate( + densepose_attr, densepose_attrs.shape[-2:], mode="bilinear" + ) + # Adding a very small probability to the background class to fill padded zones + densepose_attr[:, 0] += 1e-10 + densepose_attrs[i] = densepose_attr + return densepose_attrs + + +def rotate_box_inverse(rot_tfm, rotated_box): + """ + rotated_box is a N * 4 array of [x0, y0, x1, y1] boxes + When a bbox is rotated, it gets bigger, because we need to surround the tilted bbox + So when a bbox is rotated then inverse-rotated, it is much bigger than the original + This function aims to invert the rotation on the box, but also resize it to its original size + """ + # 1. Compute the inverse rotation of the rotated bboxes (bigger than it ) + invrot_box = rot_tfm.inverse().apply_box(rotated_box) + h, w = rotated_box[:, 3] - rotated_box[:, 1], rotated_box[:, 2] - rotated_box[:, 0] + ih, iw = invrot_box[:, 3] - invrot_box[:, 1], invrot_box[:, 2] - invrot_box[:, 0] + assert 2 * rot_tfm.abs_sin**2 != 1, "45 degrees angle can't be inverted" + # 2. Inverse the corresponding computation in the rotation transform + # to get the original height/width of the rotated boxes + orig_h = (h * rot_tfm.abs_cos - w * rot_tfm.abs_sin) / (1 - 2 * rot_tfm.abs_sin**2) + orig_w = (w * rot_tfm.abs_cos - h * rot_tfm.abs_sin) / (1 - 2 * rot_tfm.abs_sin**2) + # 3. Resize the inverse-rotated bboxes to their original size + invrot_box[:, 0] += (iw - orig_w) / 2 + invrot_box[:, 1] += (ih - orig_h) / 2 + invrot_box[:, 2] -= (iw - orig_w) / 2 + invrot_box[:, 3] -= (ih - orig_h) / 2 + + return invrot_box diff --git a/data_processing/detectron2/projects/DensePose/densepose/modeling/utils.py b/data_processing/detectron2/projects/DensePose/densepose/modeling/utils.py new file mode 100644 index 0000000..2e76eb9 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/modeling/utils.py @@ -0,0 +1,11 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from torch import nn + + +def initialize_module_params(module: nn.Module) -> None: + for name, param in module.named_parameters(): + if "bias" in name: + nn.init.constant_(param, 0) + elif "weight" in name: + nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") diff --git a/data_processing/detectron2/projects/DensePose/densepose/structures/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/structures/__init__.py new file mode 100644 index 0000000..ed32c5e --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/structures/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .chart import DensePoseChartPredictorOutput +from .chart_confidence import decorate_predictor_output_class_with_confidences +from .cse_confidence import decorate_cse_predictor_output_class_with_confidences +from .chart_result import ( + DensePoseChartResult, + DensePoseChartResultWithConfidences, + quantize_densepose_chart_result, + compress_quantized_densepose_chart_result, + decompress_compressed_densepose_chart_result, +) +from .cse import DensePoseEmbeddingPredictorOutput +from .data_relative import DensePoseDataRelative +from .list import DensePoseList +from .mesh import Mesh, create_mesh +from .transform_data import DensePoseTransformData, normalized_coords_transform diff --git a/data_processing/detectron2/projects/DensePose/densepose/structures/chart.py b/data_processing/detectron2/projects/DensePose/densepose/structures/chart.py new file mode 100644 index 0000000..115cc08 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/structures/chart.py @@ -0,0 +1,70 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from dataclasses import dataclass +from typing import Union +import torch + + +@dataclass +class DensePoseChartPredictorOutput: + """ + Predictor output that contains segmentation and inner coordinates predictions for predefined + body parts: + * coarse segmentation, a tensor of shape [N, K, Hout, Wout] + * fine segmentation, a tensor of shape [N, C, Hout, Wout] + * U coordinates, a tensor of shape [N, C, Hout, Wout] + * V coordinates, a tensor of shape [N, C, Hout, Wout] + where + - N is the number of instances + - K is the number of coarse segmentation channels ( + 2 = foreground / background, + 15 = one of 14 body parts / background) + - C is the number of fine segmentation channels ( + 24 fine body parts / background) + - Hout and Wout are height and width of predictions + """ + + coarse_segm: torch.Tensor + fine_segm: torch.Tensor + u: torch.Tensor + v: torch.Tensor + + def __len__(self): + """ + Number of instances (N) in the output + """ + return self.coarse_segm.size(0) + + def __getitem__( + self, item: Union[int, slice, torch.BoolTensor] + ) -> "DensePoseChartPredictorOutput": + """ + Get outputs for the selected instance(s) + + Args: + item (int or slice or tensor): selected items + """ + if isinstance(item, int): + return DensePoseChartPredictorOutput( + coarse_segm=self.coarse_segm[item].unsqueeze(0), + fine_segm=self.fine_segm[item].unsqueeze(0), + u=self.u[item].unsqueeze(0), + v=self.v[item].unsqueeze(0), + ) + else: + return DensePoseChartPredictorOutput( + coarse_segm=self.coarse_segm[item], + fine_segm=self.fine_segm[item], + u=self.u[item], + v=self.v[item], + ) + + def to(self, device: torch.device): + """ + Transfers all tensors to the given device + """ + coarse_segm = self.coarse_segm.to(device) + fine_segm = self.fine_segm.to(device) + u = self.u.to(device) + v = self.v.to(device) + return DensePoseChartPredictorOutput(coarse_segm=coarse_segm, fine_segm=fine_segm, u=u, v=v) diff --git a/data_processing/detectron2/projects/DensePose/densepose/structures/chart_confidence.py b/data_processing/detectron2/projects/DensePose/densepose/structures/chart_confidence.py new file mode 100644 index 0000000..57c6325 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/structures/chart_confidence.py @@ -0,0 +1,98 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from dataclasses import make_dataclass +from functools import lru_cache +from typing import Any, Optional +import torch + + +@lru_cache(maxsize=None) +def decorate_predictor_output_class_with_confidences(BasePredictorOutput: type) -> type: + """ + Create a new output class from an existing one by adding new attributes + related to confidence estimation: + - sigma_1 (tensor) + - sigma_2 (tensor) + - kappa_u (tensor) + - kappa_v (tensor) + - fine_segm_confidence (tensor) + - coarse_segm_confidence (tensor) + + Details on confidence estimation parameters can be found in: + N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning + Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 + A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020 + + The new class inherits the provided `BasePredictorOutput` class, + it's name is composed of the name of the provided class and + "WithConfidences" suffix. + + Args: + BasePredictorOutput (type): output type to which confidence data + is to be added, assumed to be a dataclass + Return: + New dataclass derived from the provided one that has attributes + for confidence estimation + """ + + PredictorOutput = make_dataclass( + BasePredictorOutput.__name__ + "WithConfidences", + fields=[ + ("sigma_1", Optional[torch.Tensor], None), + ("sigma_2", Optional[torch.Tensor], None), + ("kappa_u", Optional[torch.Tensor], None), + ("kappa_v", Optional[torch.Tensor], None), + ("fine_segm_confidence", Optional[torch.Tensor], None), + ("coarse_segm_confidence", Optional[torch.Tensor], None), + ], + bases=(BasePredictorOutput,), + ) + + # add possibility to index PredictorOutput + + def slice_if_not_none(data, item): + if data is None: + return None + if isinstance(item, int): + return data[item].unsqueeze(0) + return data[item] + + def PredictorOutput_getitem(self, item): + PredictorOutput = type(self) + base_predictor_output_sliced = super(PredictorOutput, self).__getitem__(item) + return PredictorOutput( + **base_predictor_output_sliced.__dict__, + coarse_segm_confidence=slice_if_not_none(self.coarse_segm_confidence, item), + fine_segm_confidence=slice_if_not_none(self.fine_segm_confidence, item), + sigma_1=slice_if_not_none(self.sigma_1, item), + sigma_2=slice_if_not_none(self.sigma_2, item), + kappa_u=slice_if_not_none(self.kappa_u, item), + kappa_v=slice_if_not_none(self.kappa_v, item), + ) + + PredictorOutput.__getitem__ = PredictorOutput_getitem + + def PredictorOutput_to(self, device: torch.device): + """ + Transfers all tensors to the given device + """ + PredictorOutput = type(self) + base_predictor_output_to = super(PredictorOutput, self).to(device) # pyre-ignore[16] + + def to_device_if_tensor(var: Any): + if isinstance(var, torch.Tensor): + return var.to(device) + return var + + return PredictorOutput( + **base_predictor_output_to.__dict__, + sigma_1=to_device_if_tensor(self.sigma_1), + sigma_2=to_device_if_tensor(self.sigma_2), + kappa_u=to_device_if_tensor(self.kappa_u), + kappa_v=to_device_if_tensor(self.kappa_v), + fine_segm_confidence=to_device_if_tensor(self.fine_segm_confidence), + coarse_segm_confidence=to_device_if_tensor(self.coarse_segm_confidence), + ) + + PredictorOutput.to = PredictorOutput_to + return PredictorOutput diff --git a/data_processing/detectron2/projects/DensePose/densepose/structures/chart_result.py b/data_processing/detectron2/projects/DensePose/densepose/structures/chart_result.py new file mode 100644 index 0000000..003933d --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/structures/chart_result.py @@ -0,0 +1,183 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from dataclasses import dataclass +from typing import Any, Optional, Tuple +import torch + + +@dataclass +class DensePoseChartResult: + """ + DensePose results for chart-based methods represented by labels and inner + coordinates (U, V) of individual charts. Each chart is a 2D manifold + that has an associated label and is parameterized by two coordinates U and V. + Both U and V take values in [0, 1]. + Thus the results are represented by two tensors: + - labels (tensor [H, W] of long): contains estimated label for each pixel of + the detection bounding box of size (H, W) + - uv (tensor [2, H, W] of float): contains estimated U and V coordinates + for each pixel of the detection bounding box of size (H, W) + """ + + labels: torch.Tensor + uv: torch.Tensor + + def to(self, device: torch.device): + """ + Transfers all tensors to the given device + """ + labels = self.labels.to(device) + uv = self.uv.to(device) + return DensePoseChartResult(labels=labels, uv=uv) + + +@dataclass +class DensePoseChartResultWithConfidences: + """ + We add confidence values to DensePoseChartResult + Thus the results are represented by two tensors: + - labels (tensor [H, W] of long): contains estimated label for each pixel of + the detection bounding box of size (H, W) + - uv (tensor [2, H, W] of float): contains estimated U and V coordinates + for each pixel of the detection bounding box of size (H, W) + Plus one [H, W] tensor of float for each confidence type + """ + + labels: torch.Tensor + uv: torch.Tensor + sigma_1: Optional[torch.Tensor] = None + sigma_2: Optional[torch.Tensor] = None + kappa_u: Optional[torch.Tensor] = None + kappa_v: Optional[torch.Tensor] = None + fine_segm_confidence: Optional[torch.Tensor] = None + coarse_segm_confidence: Optional[torch.Tensor] = None + + def to(self, device: torch.device): + """ + Transfers all tensors to the given device, except if their value is None + """ + + def to_device_if_tensor(var: Any): + if isinstance(var, torch.Tensor): + return var.to(device) + return var + + return DensePoseChartResultWithConfidences( + labels=self.labels.to(device), + uv=self.uv.to(device), + sigma_1=to_device_if_tensor(self.sigma_1), + sigma_2=to_device_if_tensor(self.sigma_2), + kappa_u=to_device_if_tensor(self.kappa_u), + kappa_v=to_device_if_tensor(self.kappa_v), + fine_segm_confidence=to_device_if_tensor(self.fine_segm_confidence), + coarse_segm_confidence=to_device_if_tensor(self.coarse_segm_confidence), + ) + + +@dataclass +class DensePoseChartResultQuantized: + """ + DensePose results for chart-based methods represented by labels and quantized + inner coordinates (U, V) of individual charts. Each chart is a 2D manifold + that has an associated label and is parameterized by two coordinates U and V. + Both U and V take values in [0, 1]. + Quantized coordinates Uq and Vq have uint8 values which are obtained as: + Uq = U * 255 (hence 0 <= Uq <= 255) + Vq = V * 255 (hence 0 <= Vq <= 255) + Thus the results are represented by one tensor: + - labels_uv_uint8 (tensor [3, H, W] of uint8): contains estimated label + and quantized coordinates Uq and Vq for each pixel of the detection + bounding box of size (H, W) + """ + + labels_uv_uint8: torch.Tensor + + def to(self, device: torch.device): + """ + Transfers all tensors to the given device + """ + labels_uv_uint8 = self.labels_uv_uint8.to(device) + return DensePoseChartResultQuantized(labels_uv_uint8=labels_uv_uint8) + + +@dataclass +class DensePoseChartResultCompressed: + """ + DensePose results for chart-based methods represented by a PNG-encoded string. + The tensor of quantized DensePose results of size [3, H, W] is considered + as an image with 3 color channels. PNG compression is applied and the result + is stored as a Base64-encoded string. The following attributes are defined: + - shape_chw (tuple of 3 int): contains shape of the result tensor + (number of channels, height, width) + - labels_uv_str (str): contains Base64-encoded results tensor of size + [3, H, W] compressed with PNG compression methods + """ + + shape_chw: Tuple[int, int, int] + labels_uv_str: str + + +def quantize_densepose_chart_result(result: DensePoseChartResult) -> DensePoseChartResultQuantized: + """ + Applies quantization to DensePose chart-based result. + + Args: + result (DensePoseChartResult): DensePose chart-based result + Return: + Quantized DensePose chart-based result (DensePoseChartResultQuantized) + """ + h, w = result.labels.shape + labels_uv_uint8 = torch.zeros([3, h, w], dtype=torch.uint8, device=result.labels.device) + labels_uv_uint8[0] = result.labels + labels_uv_uint8[1:] = (result.uv * 255).clamp(0, 255).byte() + return DensePoseChartResultQuantized(labels_uv_uint8=labels_uv_uint8) + + +def compress_quantized_densepose_chart_result( + result: DensePoseChartResultQuantized, +) -> DensePoseChartResultCompressed: + """ + Compresses quantized DensePose chart-based result + + Args: + result (DensePoseChartResultQuantized): quantized DensePose chart-based result + Return: + Compressed DensePose chart-based result (DensePoseChartResultCompressed) + """ + import base64 + import numpy as np + from io import BytesIO + from PIL import Image + + labels_uv_uint8_np_chw = result.labels_uv_uint8.cpu().numpy() + labels_uv_uint8_np_hwc = np.moveaxis(labels_uv_uint8_np_chw, 0, -1) + im = Image.fromarray(labels_uv_uint8_np_hwc) + fstream = BytesIO() + im.save(fstream, format="png", optimize=True) + labels_uv_str = base64.encodebytes(fstream.getvalue()).decode() + shape_chw = labels_uv_uint8_np_chw.shape + return DensePoseChartResultCompressed(labels_uv_str=labels_uv_str, shape_chw=shape_chw) + + +def decompress_compressed_densepose_chart_result( + result: DensePoseChartResultCompressed, +) -> DensePoseChartResultQuantized: + """ + Decompresses DensePose chart-based result encoded into a base64 string + + Args: + result (DensePoseChartResultCompressed): compressed DensePose chart result + Return: + Quantized DensePose chart-based result (DensePoseChartResultQuantized) + """ + import base64 + import numpy as np + from io import BytesIO + from PIL import Image + + fstream = BytesIO(base64.decodebytes(result.labels_uv_str.encode())) + im = Image.open(fstream) + labels_uv_uint8_np_chw = np.moveaxis(np.array(im, dtype=np.uint8), -1, 0) + return DensePoseChartResultQuantized( + labels_uv_uint8=torch.from_numpy(labels_uv_uint8_np_chw.reshape(result.shape_chw)) + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/structures/cse.py b/data_processing/detectron2/projects/DensePose/densepose/structures/cse.py new file mode 100644 index 0000000..9cd65da --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/structures/cse.py @@ -0,0 +1,52 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from dataclasses import dataclass +from typing import Union +import torch + + +@dataclass +class DensePoseEmbeddingPredictorOutput: + """ + Predictor output that contains embedding and coarse segmentation data: + * embedding: float tensor of size [N, D, H, W], contains estimated embeddings + * coarse_segm: float tensor of size [N, K, H, W] + Here D = MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE + K = MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS + """ + + embedding: torch.Tensor + coarse_segm: torch.Tensor + + def __len__(self): + """ + Number of instances (N) in the output + """ + return self.coarse_segm.size(0) + + def __getitem__( + self, item: Union[int, slice, torch.BoolTensor] + ) -> "DensePoseEmbeddingPredictorOutput": + """ + Get outputs for the selected instance(s) + + Args: + item (int or slice or tensor): selected items + """ + if isinstance(item, int): + return DensePoseEmbeddingPredictorOutput( + coarse_segm=self.coarse_segm[item].unsqueeze(0), + embedding=self.embedding[item].unsqueeze(0), + ) + else: + return DensePoseEmbeddingPredictorOutput( + coarse_segm=self.coarse_segm[item], embedding=self.embedding[item] + ) + + def to(self, device: torch.device): + """ + Transfers all tensors to the given device + """ + coarse_segm = self.coarse_segm.to(device) + embedding = self.embedding.to(device) + return DensePoseEmbeddingPredictorOutput(coarse_segm=coarse_segm, embedding=embedding) diff --git a/data_processing/detectron2/projects/DensePose/densepose/structures/cse_confidence.py b/data_processing/detectron2/projects/DensePose/densepose/structures/cse_confidence.py new file mode 100644 index 0000000..ee5166f --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/structures/cse_confidence.py @@ -0,0 +1,78 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from dataclasses import make_dataclass +from functools import lru_cache +from typing import Any, Optional +import torch + + +@lru_cache(maxsize=None) +def decorate_cse_predictor_output_class_with_confidences(BasePredictorOutput: type) -> type: + """ + Create a new output class from an existing one by adding new attributes + related to confidence estimation: + - coarse_segm_confidence (tensor) + + Details on confidence estimation parameters can be found in: + N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning + Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 + A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020 + + The new class inherits the provided `BasePredictorOutput` class, + it's name is composed of the name of the provided class and + "WithConfidences" suffix. + + Args: + BasePredictorOutput (type): output type to which confidence data + is to be added, assumed to be a dataclass + Return: + New dataclass derived from the provided one that has attributes + for confidence estimation + """ + + PredictorOutput = make_dataclass( + BasePredictorOutput.__name__ + "WithConfidences", + fields=[ + ("coarse_segm_confidence", Optional[torch.Tensor], None), + ], + bases=(BasePredictorOutput,), + ) + + # add possibility to index PredictorOutput + + def slice_if_not_none(data, item): + if data is None: + return None + if isinstance(item, int): + return data[item].unsqueeze(0) + return data[item] + + def PredictorOutput_getitem(self, item): + PredictorOutput = type(self) + base_predictor_output_sliced = super(PredictorOutput, self).__getitem__(item) + return PredictorOutput( + **base_predictor_output_sliced.__dict__, + coarse_segm_confidence=slice_if_not_none(self.coarse_segm_confidence, item), + ) + + PredictorOutput.__getitem__ = PredictorOutput_getitem + + def PredictorOutput_to(self, device: torch.device): + """ + Transfers all tensors to the given device + """ + PredictorOutput = type(self) + base_predictor_output_to = super(PredictorOutput, self).to(device) # pyre-ignore[16] + + def to_device_if_tensor(var: Any): + if isinstance(var, torch.Tensor): + return var.to(device) + return var + + return PredictorOutput( + **base_predictor_output_to.__dict__, + coarse_segm_confidence=to_device_if_tensor(self.coarse_segm_confidence), + ) + + PredictorOutput.to = PredictorOutput_to + return PredictorOutput diff --git a/data_processing/detectron2/projects/DensePose/densepose/structures/data_relative.py b/data_processing/detectron2/projects/DensePose/densepose/structures/data_relative.py new file mode 100644 index 0000000..a148fa7 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/structures/data_relative.py @@ -0,0 +1,243 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import torch +from torch.nn import functional as F + +from densepose.data.meshes.catalog import MeshCatalog +from densepose.structures.mesh import load_mesh_symmetry +from densepose.structures.transform_data import DensePoseTransformData + + +class DensePoseDataRelative(object): + """ + Dense pose relative annotations that can be applied to any bounding box: + x - normalized X coordinates [0, 255] of annotated points + y - normalized Y coordinates [0, 255] of annotated points + i - body part labels 0,...,24 for annotated points + u - body part U coordinates [0, 1] for annotated points + v - body part V coordinates [0, 1] for annotated points + segm - 256x256 segmentation mask with values 0,...,14 + To obtain absolute x and y data wrt some bounding box one needs to first + divide the data by 256, multiply by the respective bounding box size + and add bounding box offset: + x_img = x0 + x_norm * w / 256.0 + y_img = y0 + y_norm * h / 256.0 + Segmentation masks are typically sampled to get image-based masks. + """ + + # Key for normalized X coordinates in annotation dict + X_KEY = "dp_x" + # Key for normalized Y coordinates in annotation dict + Y_KEY = "dp_y" + # Key for U part coordinates in annotation dict (used in chart-based annotations) + U_KEY = "dp_U" + # Key for V part coordinates in annotation dict (used in chart-based annotations) + V_KEY = "dp_V" + # Key for I point labels in annotation dict (used in chart-based annotations) + I_KEY = "dp_I" + # Key for segmentation mask in annotation dict + S_KEY = "dp_masks" + # Key for vertex ids (used in continuous surface embeddings annotations) + VERTEX_IDS_KEY = "dp_vertex" + # Key for mesh id (used in continuous surface embeddings annotations) + MESH_NAME_KEY = "ref_model" + # Number of body parts in segmentation masks + N_BODY_PARTS = 14 + # Number of parts in point labels + N_PART_LABELS = 24 + MASK_SIZE = 256 + + def __init__(self, annotation, cleanup=False): + self.x = torch.as_tensor(annotation[DensePoseDataRelative.X_KEY]) + self.y = torch.as_tensor(annotation[DensePoseDataRelative.Y_KEY]) + if ( + DensePoseDataRelative.I_KEY in annotation + and DensePoseDataRelative.U_KEY in annotation + and DensePoseDataRelative.V_KEY in annotation + ): + self.i = torch.as_tensor(annotation[DensePoseDataRelative.I_KEY]) + self.u = torch.as_tensor(annotation[DensePoseDataRelative.U_KEY]) + self.v = torch.as_tensor(annotation[DensePoseDataRelative.V_KEY]) + if ( + DensePoseDataRelative.VERTEX_IDS_KEY in annotation + and DensePoseDataRelative.MESH_NAME_KEY in annotation + ): + self.vertex_ids = torch.as_tensor( + annotation[DensePoseDataRelative.VERTEX_IDS_KEY], dtype=torch.long + ) + self.mesh_id = MeshCatalog.get_mesh_id(annotation[DensePoseDataRelative.MESH_NAME_KEY]) + if DensePoseDataRelative.S_KEY in annotation: + self.segm = DensePoseDataRelative.extract_segmentation_mask(annotation) + self.device = torch.device("cpu") + if cleanup: + DensePoseDataRelative.cleanup_annotation(annotation) + + def to(self, device): + if self.device == device: + return self + new_data = DensePoseDataRelative.__new__(DensePoseDataRelative) + new_data.x = self.x.to(device) + new_data.y = self.y.to(device) + for attr in ["i", "u", "v", "vertex_ids", "segm"]: + if hasattr(self, attr): + setattr(new_data, attr, getattr(self, attr).to(device)) + if hasattr(self, "mesh_id"): + new_data.mesh_id = self.mesh_id + new_data.device = device + return new_data + + @staticmethod + def extract_segmentation_mask(annotation): + import pycocotools.mask as mask_utils + + # TODO: annotation instance is accepted if it contains either + # DensePose segmentation or instance segmentation. However, here we + # only rely on DensePose segmentation + poly_specs = annotation[DensePoseDataRelative.S_KEY] + if isinstance(poly_specs, torch.Tensor): + # data is already given as mask tensors, no need to decode + return poly_specs + segm = torch.zeros((DensePoseDataRelative.MASK_SIZE,) * 2, dtype=torch.float32) + if isinstance(poly_specs, dict): + if poly_specs: + mask = mask_utils.decode(poly_specs) + segm[mask > 0] = 1 + else: + for i in range(len(poly_specs)): + poly_i = poly_specs[i] + if poly_i: + mask_i = mask_utils.decode(poly_i) + segm[mask_i > 0] = i + 1 + return segm + + @staticmethod + def validate_annotation(annotation): + for key in [ + DensePoseDataRelative.X_KEY, + DensePoseDataRelative.Y_KEY, + ]: + if key not in annotation: + return False, "no {key} data in the annotation".format(key=key) + valid_for_iuv_setting = all( + key in annotation + for key in [ + DensePoseDataRelative.I_KEY, + DensePoseDataRelative.U_KEY, + DensePoseDataRelative.V_KEY, + ] + ) + valid_for_cse_setting = all( + key in annotation + for key in [ + DensePoseDataRelative.VERTEX_IDS_KEY, + DensePoseDataRelative.MESH_NAME_KEY, + ] + ) + if not valid_for_iuv_setting and not valid_for_cse_setting: + return ( + False, + "expected either {} (IUV setting) or {} (CSE setting) annotations".format( + ", ".join( + [ + DensePoseDataRelative.I_KEY, + DensePoseDataRelative.U_KEY, + DensePoseDataRelative.V_KEY, + ] + ), + ", ".join( + [ + DensePoseDataRelative.VERTEX_IDS_KEY, + DensePoseDataRelative.MESH_NAME_KEY, + ] + ), + ), + ) + return True, None + + @staticmethod + def cleanup_annotation(annotation): + for key in [ + DensePoseDataRelative.X_KEY, + DensePoseDataRelative.Y_KEY, + DensePoseDataRelative.I_KEY, + DensePoseDataRelative.U_KEY, + DensePoseDataRelative.V_KEY, + DensePoseDataRelative.S_KEY, + DensePoseDataRelative.VERTEX_IDS_KEY, + DensePoseDataRelative.MESH_NAME_KEY, + ]: + if key in annotation: + del annotation[key] + + def apply_transform(self, transforms, densepose_transform_data): + self._transform_pts(transforms, densepose_transform_data) + if hasattr(self, "segm"): + self._transform_segm(transforms, densepose_transform_data) + + def _transform_pts(self, transforms, dp_transform_data): + import detectron2.data.transforms as T + + # NOTE: This assumes that HorizFlipTransform is the only one that does flip + do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1 + if do_hflip: + self.x = self.MASK_SIZE - self.x + if hasattr(self, "i"): + self._flip_iuv_semantics(dp_transform_data) + if hasattr(self, "vertex_ids"): + self._flip_vertices() + + for t in transforms.transforms: + if isinstance(t, T.RotationTransform): + xy_scale = np.array((t.w, t.h)) / DensePoseDataRelative.MASK_SIZE + xy = t.apply_coords(np.stack((self.x, self.y), axis=1) * xy_scale) + self.x, self.y = torch.tensor(xy / xy_scale, dtype=self.x.dtype).T + + def _flip_iuv_semantics(self, dp_transform_data: DensePoseTransformData) -> None: + i_old = self.i.clone() + uv_symmetries = dp_transform_data.uv_symmetries + pt_label_symmetries = dp_transform_data.point_label_symmetries + for i in range(self.N_PART_LABELS): + if i + 1 in i_old: + annot_indices_i = i_old == i + 1 + if pt_label_symmetries[i + 1] != i + 1: + self.i[annot_indices_i] = pt_label_symmetries[i + 1] + u_loc = (self.u[annot_indices_i] * 255).long() + v_loc = (self.v[annot_indices_i] * 255).long() + self.u[annot_indices_i] = uv_symmetries["U_transforms"][i][v_loc, u_loc].to( + device=self.u.device + ) + self.v[annot_indices_i] = uv_symmetries["V_transforms"][i][v_loc, u_loc].to( + device=self.v.device + ) + + def _flip_vertices(self): + mesh_info = MeshCatalog[MeshCatalog.get_mesh_name(self.mesh_id)] + mesh_symmetry = ( + load_mesh_symmetry(mesh_info.symmetry) if mesh_info.symmetry is not None else None + ) + self.vertex_ids = mesh_symmetry["vertex_transforms"][self.vertex_ids] + + def _transform_segm(self, transforms, dp_transform_data): + import detectron2.data.transforms as T + + # NOTE: This assumes that HorizFlipTransform is the only one that does flip + do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1 + if do_hflip: + self.segm = torch.flip(self.segm, [1]) + self._flip_segm_semantics(dp_transform_data) + + for t in transforms.transforms: + if isinstance(t, T.RotationTransform): + self._transform_segm_rotation(t) + + def _flip_segm_semantics(self, dp_transform_data): + old_segm = self.segm.clone() + mask_label_symmetries = dp_transform_data.mask_label_symmetries + for i in range(self.N_BODY_PARTS): + if mask_label_symmetries[i + 1] != i + 1: + self.segm[old_segm == i + 1] = mask_label_symmetries[i + 1] + + def _transform_segm_rotation(self, rotation): + self.segm = F.interpolate(self.segm[None, None, :], (rotation.h, rotation.w)).numpy() + self.segm = torch.tensor(rotation.apply_segmentation(self.segm[0, 0]))[None, None, :] + self.segm = F.interpolate(self.segm, [DensePoseDataRelative.MASK_SIZE] * 2)[0, 0] diff --git a/data_processing/detectron2/projects/DensePose/densepose/structures/list.py b/data_processing/detectron2/projects/DensePose/densepose/structures/list.py new file mode 100644 index 0000000..3dc40b0 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/structures/list.py @@ -0,0 +1,70 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch + +from densepose.structures.data_relative import DensePoseDataRelative + + +class DensePoseList(object): + + _TORCH_DEVICE_CPU = torch.device("cpu") + + def __init__(self, densepose_datas, boxes_xyxy_abs, image_size_hw, device=_TORCH_DEVICE_CPU): + assert len(densepose_datas) == len( + boxes_xyxy_abs + ), "Attempt to initialize DensePoseList with {} DensePose datas " "and {} boxes".format( + len(densepose_datas), len(boxes_xyxy_abs) + ) + self.densepose_datas = [] + for densepose_data in densepose_datas: + assert isinstance(densepose_data, DensePoseDataRelative) or densepose_data is None, ( + "Attempt to initialize DensePoseList with DensePose datas " + "of type {}, expected DensePoseDataRelative".format(type(densepose_data)) + ) + densepose_data_ondevice = ( + densepose_data.to(device) if densepose_data is not None else None + ) + self.densepose_datas.append(densepose_data_ondevice) + self.boxes_xyxy_abs = boxes_xyxy_abs.to(device) + self.image_size_hw = image_size_hw + self.device = device + + def to(self, device): + if self.device == device: + return self + return DensePoseList(self.densepose_datas, self.boxes_xyxy_abs, self.image_size_hw, device) + + def __iter__(self): + return iter(self.densepose_datas) + + def __len__(self): + return len(self.densepose_datas) + + def __repr__(self): + s = self.__class__.__name__ + "(" + s += "num_instances={}, ".format(len(self.densepose_datas)) + s += "image_width={}, ".format(self.image_size_hw[1]) + s += "image_height={})".format(self.image_size_hw[0]) + return s + + def __getitem__(self, item): + if isinstance(item, int): + densepose_data_rel = self.densepose_datas[item] + return densepose_data_rel + elif isinstance(item, slice): + densepose_datas_rel = self.densepose_datas[item] + boxes_xyxy_abs = self.boxes_xyxy_abs[item] + return DensePoseList( + densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device + ) + elif isinstance(item, torch.Tensor) and (item.dtype == torch.bool): + densepose_datas_rel = [self.densepose_datas[i] for i, x in enumerate(item) if x > 0] + boxes_xyxy_abs = self.boxes_xyxy_abs[item] + return DensePoseList( + densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device + ) + else: + densepose_datas_rel = [self.densepose_datas[i] for i in item] + boxes_xyxy_abs = self.boxes_xyxy_abs[item] + return DensePoseList( + densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/structures/mesh.py b/data_processing/detectron2/projects/DensePose/densepose/structures/mesh.py new file mode 100644 index 0000000..589515d --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/structures/mesh.py @@ -0,0 +1,172 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import pickle +from functools import lru_cache +from typing import Dict, Optional, Tuple +import torch + +from detectron2.utils.file_io import PathManager + +from densepose.data.meshes.catalog import MeshCatalog, MeshInfo + + +def _maybe_copy_to_device( + attribute: Optional[torch.Tensor], device: torch.device +) -> Optional[torch.Tensor]: + if attribute is None: + return None + return attribute.to(device) + + +class Mesh: + def __init__( + self, + vertices: Optional[torch.Tensor] = None, + faces: Optional[torch.Tensor] = None, + geodists: Optional[torch.Tensor] = None, + symmetry: Optional[Dict[str, torch.Tensor]] = None, + texcoords: Optional[torch.Tensor] = None, + mesh_info: Optional[MeshInfo] = None, + device: Optional[torch.device] = None, + ): + """ + Args: + vertices (tensor [N, 3] of float32): vertex coordinates in 3D + faces (tensor [M, 3] of long): triangular face represented as 3 + vertex indices + geodists (tensor [N, N] of float32): geodesic distances from + vertex `i` to vertex `j` (optional, default: None) + symmetry (dict: str -> tensor): various mesh symmetry data: + - "vertex_transforms": vertex mapping under horizontal flip, + tensor of size [N] of type long; vertex `i` is mapped to + vertex `tensor[i]` (optional, default: None) + texcoords (tensor [N, 2] of float32): texture coordinates, i.e. global + and normalized mesh UVs (optional, default: None) + mesh_info (MeshInfo type): necessary to load the attributes on-the-go, + can be used instead of passing all the variables one by one + device (torch.device): device of the Mesh. If not provided, will use + the device of the vertices + """ + self._vertices = vertices + self._faces = faces + self._geodists = geodists + self._symmetry = symmetry + self._texcoords = texcoords + self.mesh_info = mesh_info + self.device = device + + assert self._vertices is not None or self.mesh_info is not None + + all_fields = [self._vertices, self._faces, self._geodists, self._texcoords] + + if self.device is None: + for field in all_fields: + if field is not None: + self.device = field.device + break + if self.device is None and symmetry is not None: + for key in symmetry: + self.device = symmetry[key].device + break + self.device = torch.device("cpu") if self.device is None else self.device + + assert all([var.device == self.device for var in all_fields if var is not None]) + if symmetry: + assert all(symmetry[key].device == self.device for key in symmetry) + if texcoords and vertices: + assert len(vertices) == len(texcoords) + + def to(self, device: torch.device): + device_symmetry = self._symmetry + if device_symmetry: + device_symmetry = {key: value.to(device) for key, value in device_symmetry.items()} + return Mesh( + _maybe_copy_to_device(self._vertices, device), + _maybe_copy_to_device(self._faces, device), + _maybe_copy_to_device(self._geodists, device), + device_symmetry, + _maybe_copy_to_device(self._texcoords, device), + self.mesh_info, + device, + ) + + @property + def vertices(self): + if self._vertices is None and self.mesh_info is not None: + self._vertices = load_mesh_data(self.mesh_info.data, "vertices", self.device) + return self._vertices + + @property + def faces(self): + if self._faces is None and self.mesh_info is not None: + self._faces = load_mesh_data(self.mesh_info.data, "faces", self.device) + return self._faces + + @property + def geodists(self): + if self._geodists is None and self.mesh_info is not None: + self._geodists = load_mesh_auxiliary_data(self.mesh_info.geodists, self.device) + return self._geodists + + @property + def symmetry(self): + if self._symmetry is None and self.mesh_info is not None: + self._symmetry = load_mesh_symmetry(self.mesh_info.symmetry, self.device) + return self._symmetry + + @property + def texcoords(self): + if self._texcoords is None and self.mesh_info is not None: + self._texcoords = load_mesh_auxiliary_data(self.mesh_info.texcoords, self.device) + return self._texcoords + + def get_geodists(self): + if self.geodists is None: + self.geodists = self._compute_geodists() + return self.geodists + + def _compute_geodists(self): + # TODO: compute using Laplace-Beltrami + geodists = None + return geodists + + +def load_mesh_data( + mesh_fpath: str, field: str, device: Optional[torch.device] = None +) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]: + with PathManager.open(mesh_fpath, "rb") as hFile: + # pyre-fixme[7]: Expected `Tuple[Optional[Tensor], Optional[Tensor]]` but + # got `Tensor`. + return torch.as_tensor(pickle.load(hFile)[field], dtype=torch.float).to( # pyre-ignore[6] + device + ) + return None + + +def load_mesh_auxiliary_data( + fpath: str, device: Optional[torch.device] = None +) -> Optional[torch.Tensor]: + fpath_local = PathManager.get_local_path(fpath) + with PathManager.open(fpath_local, "rb") as hFile: + return torch.as_tensor(pickle.load(hFile), dtype=torch.float).to(device) # pyre-ignore[6] + return None + + +@lru_cache() +def load_mesh_symmetry( + symmetry_fpath: str, device: Optional[torch.device] = None +) -> Optional[Dict[str, torch.Tensor]]: + with PathManager.open(symmetry_fpath, "rb") as hFile: + symmetry_loaded = pickle.load(hFile) # pyre-ignore[6] + symmetry = { + "vertex_transforms": torch.as_tensor( + symmetry_loaded["vertex_transforms"], dtype=torch.long + ).to(device), + } + return symmetry + return None + + +@lru_cache() +def create_mesh(mesh_name: str, device: Optional[torch.device] = None) -> Mesh: + return Mesh(mesh_info=MeshCatalog[mesh_name], device=device) diff --git a/data_processing/detectron2/projects/DensePose/densepose/structures/transform_data.py b/data_processing/detectron2/projects/DensePose/densepose/structures/transform_data.py new file mode 100644 index 0000000..7cac1bb --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/structures/transform_data.py @@ -0,0 +1,71 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import BinaryIO, Dict, Union +import torch + + +def normalized_coords_transform(x0, y0, w, h): + """ + Coordinates transform that maps top left corner to (-1, -1) and bottom + right corner to (1, 1). Used for torch.grid_sample to initialize the + grid + """ + + def f(p): + return (2 * (p[0] - x0) / w - 1, 2 * (p[1] - y0) / h - 1) + + return f + + +class DensePoseTransformData(object): + + # Horizontal symmetry label transforms used for horizontal flip + MASK_LABEL_SYMMETRIES = [0, 1, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 14] + # fmt: off + POINT_LABEL_SYMMETRIES = [ 0, 1, 2, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 20, 19, 22, 21, 24, 23] # noqa + # fmt: on + + def __init__(self, uv_symmetries: Dict[str, torch.Tensor], device: torch.device): + self.mask_label_symmetries = DensePoseTransformData.MASK_LABEL_SYMMETRIES + self.point_label_symmetries = DensePoseTransformData.POINT_LABEL_SYMMETRIES + self.uv_symmetries = uv_symmetries + self.device = torch.device("cpu") + + def to(self, device: torch.device, copy: bool = False) -> "DensePoseTransformData": + """ + Convert transform data to the specified device + + Args: + device (torch.device): device to convert the data to + copy (bool): flag that specifies whether to copy or to reference the data + in case the device is the same + Return: + An instance of `DensePoseTransformData` with data stored on the specified device + """ + if self.device == device and not copy: + return self + uv_symmetry_map = {} + for key in self.uv_symmetries: + uv_symmetry_map[key] = self.uv_symmetries[key].to(device=device, copy=copy) + return DensePoseTransformData(uv_symmetry_map, device) + + @staticmethod + def load(io: Union[str, BinaryIO]): + """ + Args: + io: (str or binary file-like object): input file to load data from + Returns: + An instance of `DensePoseTransformData` with transforms loaded from the file + """ + import scipy.io + + uv_symmetry_map = scipy.io.loadmat(io) + uv_symmetry_map_torch = {} + for key in ["U_transforms", "V_transforms"]: + uv_symmetry_map_torch[key] = [] + map_src = uv_symmetry_map[key] + map_dst = uv_symmetry_map_torch[key] + for i in range(map_src.shape[1]): + map_dst.append(torch.from_numpy(map_src[0, i]).to(dtype=torch.float)) + uv_symmetry_map_torch[key] = torch.stack(map_dst, dim=0) + transform_data = DensePoseTransformData(uv_symmetry_map_torch, device=torch.device("cpu")) + return transform_data diff --git a/data_processing/detectron2/projects/DensePose/densepose/utils/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/detectron2/projects/DensePose/densepose/utils/dbhelper.py b/data_processing/detectron2/projects/DensePose/densepose/utils/dbhelper.py new file mode 100644 index 0000000..65b6157 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/utils/dbhelper.py @@ -0,0 +1,147 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import Any, Dict, Optional, Tuple + + +class EntrySelector(object): + """ + Base class for entry selectors + """ + + @staticmethod + def from_string(spec: str) -> "EntrySelector": + if spec == "*": + return AllEntrySelector() + return FieldEntrySelector(spec) + + +class AllEntrySelector(EntrySelector): + """ + Selector that accepts all entries + """ + + SPECIFIER = "*" + + def __call__(self, entry): + return True + + +class FieldEntrySelector(EntrySelector): + """ + Selector that accepts only entries that match provided field + specifier(s). Only a limited set of specifiers is supported for now: + ::=[] + ::=[] + is a valid identifier + ::= "int" | "str" + ::= "=" + ::= "," + ::= ":" + ::= | + ::= + ::= "-" + is a string without spaces and special symbols + (e.g. , , , ) + """ + + _SPEC_DELIM = "," + _TYPE_DELIM = ":" + _RANGE_DELIM = "-" + _EQUAL = "=" + _ERROR_PREFIX = "Invalid field selector specifier" + + class _FieldEntryValuePredicate(object): + """ + Predicate that checks strict equality for the specified entry field + """ + + def __init__(self, name: str, typespec: Optional[str], value: str): + import builtins + + self.name = name + self.type = getattr(builtins, typespec) if typespec is not None else str + self.value = value + + def __call__(self, entry): + return entry[self.name] == self.type(self.value) + + class _FieldEntryRangePredicate(object): + """ + Predicate that checks whether an entry field falls into the specified range + """ + + def __init__(self, name: str, typespec: Optional[str], vmin: str, vmax: str): + import builtins + + self.name = name + self.type = getattr(builtins, typespec) if typespec is not None else str + self.vmin = vmin + self.vmax = vmax + + def __call__(self, entry): + return (entry[self.name] >= self.type(self.vmin)) and ( + entry[self.name] <= self.type(self.vmax) + ) + + def __init__(self, spec: str): + self._predicates = self._parse_specifier_into_predicates(spec) + + def __call__(self, entry: Dict[str, Any]): + for predicate in self._predicates: + if not predicate(entry): + return False + return True + + def _parse_specifier_into_predicates(self, spec: str): + predicates = [] + specs = spec.split(self._SPEC_DELIM) + for subspec in specs: + eq_idx = subspec.find(self._EQUAL) + if eq_idx > 0: + field_name_with_type = subspec[:eq_idx] + field_name, field_type = self._parse_field_name_type(field_name_with_type) + field_value_or_range = subspec[eq_idx + 1 :] + if self._is_range_spec(field_value_or_range): + vmin, vmax = self._get_range_spec(field_value_or_range) + predicate = FieldEntrySelector._FieldEntryRangePredicate( + field_name, field_type, vmin, vmax + ) + else: + predicate = FieldEntrySelector._FieldEntryValuePredicate( + field_name, field_type, field_value_or_range + ) + predicates.append(predicate) + elif eq_idx == 0: + self._parse_error(f'"{subspec}", field name is empty!') + else: + self._parse_error(f'"{subspec}", should have format ' "=!") + return predicates + + def _parse_field_name_type(self, field_name_with_type: str) -> Tuple[str, Optional[str]]: + type_delim_idx = field_name_with_type.find(self._TYPE_DELIM) + if type_delim_idx > 0: + field_name = field_name_with_type[:type_delim_idx] + field_type = field_name_with_type[type_delim_idx + 1 :] + elif type_delim_idx == 0: + self._parse_error(f'"{field_name_with_type}", field name is empty!') + else: + field_name = field_name_with_type + field_type = None + # pyre-fixme[61]: `field_name` may not be initialized here. + # pyre-fixme[61]: `field_type` may not be initialized here. + return field_name, field_type + + def _is_range_spec(self, field_value_or_range): + delim_idx = field_value_or_range.find(self._RANGE_DELIM) + return delim_idx > 0 + + def _get_range_spec(self, field_value_or_range): + if self._is_range_spec(field_value_or_range): + delim_idx = field_value_or_range.find(self._RANGE_DELIM) + vmin = field_value_or_range[:delim_idx] + vmax = field_value_or_range[delim_idx + 1 :] + return vmin, vmax + else: + self._parse_error('"field_value_or_range", range of values expected!') + + def _parse_error(self, msg): + raise ValueError(f"{self._ERROR_PREFIX}: {msg}") diff --git a/data_processing/detectron2/projects/DensePose/densepose/utils/logger.py b/data_processing/detectron2/projects/DensePose/densepose/utils/logger.py new file mode 100644 index 0000000..70cd3cb --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/utils/logger.py @@ -0,0 +1,13 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging + + +def verbosity_to_level(verbosity) -> int: + if verbosity is not None: + if verbosity == 0: + return logging.WARNING + elif verbosity == 1: + return logging.INFO + elif verbosity >= 2: + return logging.DEBUG + return logging.WARNING diff --git a/data_processing/detectron2/projects/DensePose/densepose/utils/transform.py b/data_processing/detectron2/projects/DensePose/densepose/utils/transform.py new file mode 100644 index 0000000..8dc4ae7 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/utils/transform.py @@ -0,0 +1,15 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from detectron2.data import MetadataCatalog +from detectron2.utils.file_io import PathManager + +from densepose import DensePoseTransformData + + +def load_for_dataset(dataset_name): + path = MetadataCatalog.get(dataset_name).densepose_transform_src + densepose_transform_data_fpath = PathManager.get_local_path(path) + return DensePoseTransformData.load(densepose_transform_data_fpath) + + +def load_from_cfg(cfg): + return load_for_dataset(cfg.DATASETS.TEST[0]) diff --git a/data_processing/detectron2/projects/DensePose/densepose/vis/__init__.py b/data_processing/detectron2/projects/DensePose/densepose/vis/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/detectron2/projects/DensePose/densepose/vis/base.py b/data_processing/detectron2/projects/DensePose/densepose/vis/base.py new file mode 100644 index 0000000..08b3a98 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/vis/base.py @@ -0,0 +1,229 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +import cv2 +import torch + +Image = np.ndarray +Boxes = torch.Tensor + + +class MatrixVisualizer(object): + """ + Base visualizer for matrix data + """ + + def __init__( + self, + inplace=True, + cmap=cv2.COLORMAP_PARULA, + val_scale=1.0, + alpha=0.7, + interp_method_matrix=cv2.INTER_LINEAR, + interp_method_mask=cv2.INTER_NEAREST, + ): + self.inplace = inplace + self.cmap = cmap + self.val_scale = val_scale + self.alpha = alpha + self.interp_method_matrix = interp_method_matrix + self.interp_method_mask = interp_method_mask + + def visualize(self, image_bgr, mask, matrix, bbox_xywh): + # self._check_image(image_bgr) + # self._check_mask_matrix(mask, matrix) + # if self.inplace: + # image_target_bgr = image_bgr + # else: + # image_target_bgr = image_bgr * 0 + # x, y, w, h = [int(v) for v in bbox_xywh] + # if w <= 0 or h <= 0: + # return image_bgr + # mask, matrix = self._resize(mask, matrix, w, h) + # mask_bg = np.tile((mask == 0)[:, :, np.newaxis], [1, 1, 3]) + # matrix_scaled = matrix.astype(np.float32) * self.val_scale + # _EPSILON = 1e-6 + # if np.any(matrix_scaled > 255 + _EPSILON): + # logger = logging.getLogger(__name__) + # logger.warning( + # f"Matrix has values > {255 + _EPSILON} after " f"scaling, clipping to [0..255]" + # ) + # matrix_scaled_8u = matrix_scaled.clip(0, 255).astype(np.uint8) + # matrix_vis = cv2.applyColorMap(matrix_scaled_8u, self.cmap) + # matrix_vis[mask_bg] = image_target_bgr[y : y + h, x : x + w, :][mask_bg] + # image_target_bgr[y : y + h, x : x + w, :] = ( + # image_target_bgr[y : y + h, x : x + w, :] * (1.0 - self.alpha) + matrix_vis * self.alpha + # ) + # return image_target_bgr.astype(np.uint8) + + self._check_image(image_bgr) + self._check_mask_matrix(mask, matrix) + if self.inplace: + image_target_bgr = image_bgr + else: + image_target_bgr = image_bgr * 0 + x, y, w, h = [int(v) for v in bbox_xywh] + if w <= 0 or h <= 0: + return image_bgr + mask, matrix = self._resize(mask, matrix, w, h) + mask_bg = np.tile((mask == 0)[:, :, np.newaxis], [1, 1, 3]) + + + # matrix_scaled = matrix.astype(np.float32) * self.val_scale + # _EPSILON = 1e-6 + # if np.any(matrix_scaled > 255 + _EPSILON): + # logger = logging.getLogger(__name__) + # logger.warning( + # f"Matrix has values > {255 + _EPSILON} after " f"scaling, clipping to [0..255]" + # ) + # matrix_scaled_8u = matrix_scaled.clip(0, 255).astype(np.uint8) + # matrix_vis = cv2.applyColorMap(matrix_scaled_8u, self.cmap) + # print('matrix', matrix.min(), matrix.max(),matrix.shape) + matrix = matrix.cpu().numpy() + matrix_vis = np.zeros((matrix.shape[0], matrix.shape[1], 3), dtype=np.uint8) + matrix_vis[:,:,0] = matrix//255 + matrix_vis[:,:,1] = matrix%255 + + + + + matrix_vis[mask_bg] = image_target_bgr[y: y + h, x: x + w, :][mask_bg] + image_target_bgr[y: y + h, x: x + w, :] = matrix_vis + + + return image_target_bgr.astype(np.uint8) + + + def _resize(self, mask, matrix, w, h): + if (w != mask.shape[1]) or (h != mask.shape[0]): + mask = cv2.resize(mask, (w, h), self.interp_method_mask) + if (w != matrix.shape[1]) or (h != matrix.shape[0]): + matrix = cv2.resize(matrix, (w, h), self.interp_method_matrix) + return mask, matrix + + def _check_image(self, image_rgb): + assert len(image_rgb.shape) == 3 + assert image_rgb.shape[2] == 3 + assert image_rgb.dtype == np.uint8 + + def _check_mask_matrix(self, mask, matrix): + assert len(matrix.shape) == 2 + assert len(mask.shape) == 2 + assert mask.dtype == np.uint8 + + +class RectangleVisualizer(object): + + _COLOR_GREEN = (18, 127, 15) + + def __init__(self, color=_COLOR_GREEN, thickness=1): + self.color = color + self.thickness = thickness + + def visualize(self, image_bgr, bbox_xywh, color=None, thickness=None): + x, y, w, h = bbox_xywh + color = color or self.color + thickness = thickness or self.thickness + cv2.rectangle(image_bgr, (int(x), int(y)), (int(x + w), int(y + h)), color, thickness) + return image_bgr + + +class PointsVisualizer(object): + + _COLOR_GREEN = (18, 127, 15) + + def __init__(self, color_bgr=_COLOR_GREEN, r=5): + self.color_bgr = color_bgr + self.r = r + + def visualize(self, image_bgr, pts_xy, colors_bgr=None, rs=None): + for j, pt_xy in enumerate(pts_xy): + x, y = pt_xy + color_bgr = colors_bgr[j] if colors_bgr is not None else self.color_bgr + r = rs[j] if rs is not None else self.r + cv2.circle(image_bgr, (x, y), r, color_bgr, -1) + return image_bgr + + +class TextVisualizer(object): + + _COLOR_GRAY = (218, 227, 218) + _COLOR_WHITE = (255, 255, 255) + + def __init__( + self, + font_face=cv2.FONT_HERSHEY_SIMPLEX, + font_color_bgr=_COLOR_GRAY, + font_scale=0.35, + font_line_type=cv2.LINE_AA, + font_line_thickness=1, + fill_color_bgr=_COLOR_WHITE, + fill_color_transparency=1.0, + frame_color_bgr=_COLOR_WHITE, + frame_color_transparency=1.0, + frame_thickness=1, + ): + self.font_face = font_face + self.font_color_bgr = font_color_bgr + self.font_scale = font_scale + self.font_line_type = font_line_type + self.font_line_thickness = font_line_thickness + self.fill_color_bgr = fill_color_bgr + self.fill_color_transparency = fill_color_transparency + self.frame_color_bgr = frame_color_bgr + self.frame_color_transparency = frame_color_transparency + self.frame_thickness = frame_thickness + + def visualize(self, image_bgr, txt, topleft_xy): + txt_w, txt_h = self.get_text_size_wh(txt) + topleft_xy = tuple(map(int, topleft_xy)) + x, y = topleft_xy + if self.frame_color_transparency < 1.0: + t = self.frame_thickness + image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] = ( + image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] + * self.frame_color_transparency + + np.array(self.frame_color_bgr) * (1.0 - self.frame_color_transparency) + ).astype(np.float) + if self.fill_color_transparency < 1.0: + image_bgr[y : y + txt_h, x : x + txt_w, :] = ( + image_bgr[y : y + txt_h, x : x + txt_w, :] * self.fill_color_transparency + + np.array(self.fill_color_bgr) * (1.0 - self.fill_color_transparency) + ).astype(np.float) + cv2.putText( + image_bgr, + txt, + topleft_xy, + self.font_face, + self.font_scale, + self.font_color_bgr, + self.font_line_thickness, + self.font_line_type, + ) + return image_bgr + + def get_text_size_wh(self, txt): + ((txt_w, txt_h), _) = cv2.getTextSize( + txt, self.font_face, self.font_scale, self.font_line_thickness + ) + return txt_w, txt_h + + +class CompoundVisualizer(object): + def __init__(self, visualizers): + self.visualizers = visualizers + + def visualize(self, image_bgr, data): + assert len(data) == len( + self.visualizers + ), "The number of datas {} should match the number of visualizers" " {}".format( + len(data), len(self.visualizers) + ) + image = image_bgr + for i, visualizer in enumerate(self.visualizers): + image = visualizer.visualize(image, data[i]) + return image + + def __str__(self): + visualizer_str = ", ".join([str(v) for v in self.visualizers]) + return "Compound Visualizer [{}]".format(visualizer_str) diff --git a/data_processing/detectron2/projects/DensePose/densepose/vis/bounding_box.py b/data_processing/detectron2/projects/DensePose/densepose/vis/bounding_box.py new file mode 100644 index 0000000..4f83957 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/vis/bounding_box.py @@ -0,0 +1,37 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .base import RectangleVisualizer, TextVisualizer + + +class BoundingBoxVisualizer(object): + def __init__(self): + self.rectangle_visualizer = RectangleVisualizer() + + def visualize(self, image_bgr, boxes_xywh): + for bbox_xywh in boxes_xywh: + image_bgr = self.rectangle_visualizer.visualize(image_bgr, bbox_xywh) + return image_bgr + + +class ScoredBoundingBoxVisualizer(object): + def __init__(self, bbox_visualizer_params=None, score_visualizer_params=None, **kwargs): + if bbox_visualizer_params is None: + bbox_visualizer_params = {} + if score_visualizer_params is None: + score_visualizer_params = {} + self.visualizer_bbox = RectangleVisualizer(**bbox_visualizer_params) + self.visualizer_score = TextVisualizer(**score_visualizer_params) + + def visualize(self, image_bgr, scored_bboxes): + boxes_xywh, box_scores = scored_bboxes + assert len(boxes_xywh) == len( + box_scores + ), "Number of bounding boxes {} should be equal to the number of scores {}".format( + len(boxes_xywh), len(box_scores) + ) + for i, box_xywh in enumerate(boxes_xywh): + score_i = box_scores[i] + image_bgr = self.visualizer_bbox.visualize(image_bgr, box_xywh) + score_txt = "{0:6.4f}".format(score_i) + topleft_xy = box_xywh[0], box_xywh[1] + image_bgr = self.visualizer_score.visualize(image_bgr, score_txt, topleft_xy) + return image_bgr diff --git a/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_data_points.py b/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_data_points.py new file mode 100644 index 0000000..b6839a9 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_data_points.py @@ -0,0 +1,106 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Iterable, Optional, Tuple +import cv2 + +from densepose.structures import DensePoseDataRelative + +from .base import Boxes, Image, MatrixVisualizer, PointsVisualizer + + +class DensePoseDataCoarseSegmentationVisualizer(object): + """ + Visualizer for ground truth segmentation + """ + + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, + cmap=cmap, + val_scale=255.0 / DensePoseDataRelative.N_BODY_PARTS, + alpha=alpha, + ) + + def visualize( + self, + image_bgr: Image, + bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]], + ) -> Image: + if bbox_densepose_datas is None: + return image_bgr + for bbox_xywh, densepose_data in zip(*bbox_densepose_datas): + matrix = densepose_data.segm.numpy() + mask = np.zeros(matrix.shape, dtype=np.uint8) + mask[matrix > 0] = 1 + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh.numpy()) + return image_bgr + + +class DensePoseDataPointsVisualizer(object): + def __init__(self, densepose_data_to_value_fn=None, cmap=cv2.COLORMAP_PARULA, **kwargs): + self.points_visualizer = PointsVisualizer() + self.densepose_data_to_value_fn = densepose_data_to_value_fn + self.cmap = cmap + + def visualize( + self, + image_bgr: Image, + bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]], + ) -> Image: + if bbox_densepose_datas is None: + return image_bgr + for bbox_xywh, densepose_data in zip(*bbox_densepose_datas): + x0, y0, w, h = bbox_xywh.numpy() + x = densepose_data.x.numpy() * w / 255.0 + x0 + y = densepose_data.y.numpy() * h / 255.0 + y0 + pts_xy = zip(x, y) + if self.densepose_data_to_value_fn is None: + image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy) + else: + v = self.densepose_data_to_value_fn(densepose_data) + img_colors_bgr = cv2.applyColorMap(v, self.cmap) + colors_bgr = [ + [int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr + ] + image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy, colors_bgr) + return image_bgr + + +def _densepose_data_u_for_cmap(densepose_data): + u = np.clip(densepose_data.u.numpy(), 0, 1) * 255.0 + return u.astype(np.uint8) + + +def _densepose_data_v_for_cmap(densepose_data): + v = np.clip(densepose_data.v.numpy(), 0, 1) * 255.0 + return v.astype(np.uint8) + + +def _densepose_data_i_for_cmap(densepose_data): + i = ( + np.clip(densepose_data.i.numpy(), 0.0, DensePoseDataRelative.N_PART_LABELS) + * 255.0 + / DensePoseDataRelative.N_PART_LABELS + ) + return i.astype(np.uint8) + + +class DensePoseDataPointsUVisualizer(DensePoseDataPointsVisualizer): + def __init__(self, **kwargs): + super(DensePoseDataPointsUVisualizer, self).__init__( + densepose_data_to_value_fn=_densepose_data_u_for_cmap, **kwargs + ) + + +class DensePoseDataPointsVVisualizer(DensePoseDataPointsVisualizer): + def __init__(self, **kwargs): + super(DensePoseDataPointsVVisualizer, self).__init__( + densepose_data_to_value_fn=_densepose_data_v_for_cmap, **kwargs + ) + + +class DensePoseDataPointsIVisualizer(DensePoseDataPointsVisualizer): + def __init__(self, **kwargs): + super(DensePoseDataPointsIVisualizer, self).__init__( + densepose_data_to_value_fn=_densepose_data_i_for_cmap, **kwargs + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_outputs_iuv.py b/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_outputs_iuv.py new file mode 100644 index 0000000..a32a418 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_outputs_iuv.py @@ -0,0 +1,101 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Optional, Tuple +import cv2 + +from densepose.structures import DensePoseDataRelative + +from ..structures import DensePoseChartPredictorOutput +from .base import Boxes, Image, MatrixVisualizer + + +class DensePoseOutputsVisualizer(object): + def __init__( + self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, to_visualize=None, **kwargs + ): + assert to_visualize in "IUV", "can only visualize IUV" + self.to_visualize = to_visualize + + if self.to_visualize == "I": + val_scale = 255.0 / DensePoseDataRelative.N_PART_LABELS + else: + val_scale = 1.0 + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, cmap=cmap, val_scale=val_scale, alpha=alpha + ) + + def visualize( + self, + image_bgr: Image, + dp_output_with_bboxes: Tuple[Optional[DensePoseChartPredictorOutput], Optional[Boxes]], + ) -> Image: + densepose_output, bboxes_xywh = dp_output_with_bboxes + if densepose_output is None or bboxes_xywh is None: + return image_bgr + + assert isinstance( + densepose_output, DensePoseChartPredictorOutput + ), "DensePoseChartPredictorOutput expected, {} encountered".format(type(densepose_output)) + + S = densepose_output.coarse_segm + I = densepose_output.fine_segm # noqa + U = densepose_output.u + V = densepose_output.v + N = S.size(0) + assert N == I.size( + 0 + ), "densepose outputs S {} and I {}" " should have equal first dim size".format( + S.size(), I.size() + ) + assert N == U.size( + 0 + ), "densepose outputs S {} and U {}" " should have equal first dim size".format( + S.size(), U.size() + ) + assert N == V.size( + 0 + ), "densepose outputs S {} and V {}" " should have equal first dim size".format( + S.size(), V.size() + ) + assert N == len( + bboxes_xywh + ), "number of bounding boxes {}" " should be equal to first dim size of outputs {}".format( + len(bboxes_xywh), N + ) + for n in range(N): + Sn = S[n].argmax(dim=0) + In = I[n].argmax(dim=0) * (Sn > 0).long() + segmentation = In.cpu().numpy().astype(np.uint8) + mask = np.zeros(segmentation.shape, dtype=np.uint8) + mask[segmentation > 0] = 1 + bbox_xywh = bboxes_xywh[n] + + if self.to_visualize == "I": + vis = segmentation + elif self.to_visualize in "UV": + U_or_Vn = {"U": U, "V": V}[self.to_visualize][n].cpu().numpy().astype(np.float32) + vis = np.zeros(segmentation.shape, dtype=np.float32) + for partId in range(U_or_Vn.shape[0]): + vis[segmentation == partId] = ( + U_or_Vn[partId][segmentation == partId].clip(0, 1) * 255 + ) + + # pyre-fixme[61]: `vis` may not be initialized here. + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, vis, bbox_xywh) + + return image_bgr + + +class DensePoseOutputsUVisualizer(DensePoseOutputsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs): + super().__init__(inplace=inplace, cmap=cmap, alpha=alpha, to_visualize="U", **kwargs) + + +class DensePoseOutputsVVisualizer(DensePoseOutputsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs): + super().__init__(inplace=inplace, cmap=cmap, alpha=alpha, to_visualize="V", **kwargs) + + +class DensePoseOutputsFineSegmentationVisualizer(DensePoseOutputsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs): + super().__init__(inplace=inplace, cmap=cmap, alpha=alpha, to_visualize="I", **kwargs) diff --git a/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_outputs_vertex.py b/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_outputs_vertex.py new file mode 100644 index 0000000..0dfc4ae --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_outputs_vertex.py @@ -0,0 +1,248 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import json +import numpy as np +from functools import lru_cache +from typing import Dict, List, Optional, Tuple +import cv2 +import torch + +from detectron2.utils.file_io import PathManager + +from densepose.modeling import build_densepose_embedder +from densepose.modeling.cse.utils import get_closest_vertices_mask_from_ES + +from ..data.utils import get_class_to_mesh_name_mapping +from ..structures import DensePoseEmbeddingPredictorOutput +from ..structures.mesh import create_mesh +from .base import Boxes, Image, MatrixVisualizer +from .densepose_results_textures import get_texture_atlas + + +@lru_cache() +def get_xyz_vertex_embedding(mesh_name: str, device: torch.device): + if mesh_name == "smpl_27554": + embed_path = PathManager.get_local_path( + "https://dl.fbaipublicfiles.com/densepose/data/cse/mds_d=256.npy" + ) + embed_map, _ = np.load(embed_path, allow_pickle=True) + embed_map = torch.tensor(embed_map).float()[:, 0] + embed_map -= embed_map.min() + embed_map /= embed_map.max() + else: + mesh = create_mesh(mesh_name, device) + embed_map = mesh.vertices.sum(dim=1) + embed_map -= embed_map.min() + embed_map /= embed_map.max() + embed_map = embed_map**2 + + return embed_map + + +class DensePoseOutputsVertexVisualizer(object): + def __init__( + self, + cfg, + inplace=True, + cmap=cv2.COLORMAP_JET, + alpha=0.7, + device="cuda", + default_class=0, + **kwargs, + ): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, cmap=cmap, val_scale=1.0, alpha=alpha + ) + self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg) + self.embedder = build_densepose_embedder(cfg) + self.device = torch.device(device) + self.default_class = default_class + + self.mesh_vertex_embeddings = { + mesh_name: self.embedder(mesh_name).to(self.device) + for mesh_name in self.class_to_mesh_name.values() + if self.embedder.has_embeddings(mesh_name) + } + import os + + + def visualize( + self, + image_bgr: Image, + outputs_boxes_xywh_classes: Tuple[ + Optional[DensePoseEmbeddingPredictorOutput], Optional[Boxes], Optional[List[int]] + ], + ) -> List: + if outputs_boxes_xywh_classes[0] is None: + return [] + + S, E, N, bboxes_xywh, pred_classes = self.extract_and_check_outputs_and_boxes( + outputs_boxes_xywh_classes + ) + # print("N: ", N) + + image_bgrs = [] + for n in range(N): + x, y, w, h = bboxes_xywh[n].int().tolist() + if w <= 0 or h <= 0: + continue + + + mesh_name = self.class_to_mesh_name[pred_classes[n]] + closest_vertices, mask = get_closest_vertices_mask_from_ES( + E[[n]], + S[[n]], + h, + w, + self.mesh_vertex_embeddings[mesh_name], + self.device, + ) + + + #print('closest_vertices: ', closest_vertices.shape,closest_vertices.max(),closest_vertices.min()) + # embed_map = get_xyz_vertex_embedding(mesh_name, self.device) + # vis = (embed_map[closest_vertices].clip(0, 1) * 255.0).cpu().numpy() + # print('vis: ', vis.shape, vis.max(), vis.min()) + + + + mask_numpy = mask.cpu().numpy().astype(dtype=np.uint8) + image_bgrs.append(self.mask_visualizer.visualize(image_bgr.copy(), mask_numpy, closest_vertices, [x, y, w, h])) + + + + + return image_bgrs + + def extract_and_check_outputs_and_boxes(self, outputs_boxes_xywh_classes): + + densepose_output, bboxes_xywh, pred_classes = outputs_boxes_xywh_classes + + if pred_classes is None: + pred_classes = [self.default_class] * len(bboxes_xywh) + + assert isinstance( + densepose_output, DensePoseEmbeddingPredictorOutput + ), "DensePoseEmbeddingPredictorOutput expected, {} encountered".format( + type(densepose_output) + ) + + S = densepose_output.coarse_segm + E = densepose_output.embedding + N = S.size(0) + assert N == E.size( + 0 + ), "CSE coarse_segm {} and embeddings {}" " should have equal first dim size".format( + S.size(), E.size() + ) + assert N == len( + bboxes_xywh + ), "number of bounding boxes {}" " should be equal to first dim size of outputs {}".format( + len(bboxes_xywh), N + ) + assert N == len(pred_classes), ( + "number of predicted classes {}" + " should be equal to first dim size of outputs {}".format(len(bboxes_xywh), N) + ) + + return S, E, N, bboxes_xywh, pred_classes + + +def get_texture_atlases(json_str: Optional[str]) -> Optional[Dict[str, Optional[np.ndarray]]]: + """ + json_str is a JSON string representing a mesh_name -> texture_atlas_path dictionary + """ + if json_str is None: + return None + + paths = json.loads(json_str) + return {mesh_name: get_texture_atlas(path) for mesh_name, path in paths.items()} + + +class DensePoseOutputsTextureVisualizer(DensePoseOutputsVertexVisualizer): + def __init__( + self, + cfg, + texture_atlases_dict, + device="cuda", + default_class=0, + **kwargs, + ): + self.embedder = build_densepose_embedder(cfg) + + self.texture_image_dict = {} + self.alpha_dict = {} + + for mesh_name in texture_atlases_dict.keys(): + if texture_atlases_dict[mesh_name].shape[-1] == 4: # Image with alpha channel + self.alpha_dict[mesh_name] = texture_atlases_dict[mesh_name][:, :, -1] / 255.0 + self.texture_image_dict[mesh_name] = texture_atlases_dict[mesh_name][:, :, :3] + else: + self.alpha_dict[mesh_name] = texture_atlases_dict[mesh_name].sum(axis=-1) > 0 + self.texture_image_dict[mesh_name] = texture_atlases_dict[mesh_name] + + self.device = torch.device(device) + self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg) + self.default_class = default_class + + self.mesh_vertex_embeddings = { + mesh_name: self.embedder(mesh_name).to(self.device) + for mesh_name in self.class_to_mesh_name.values() + } + + def visualize( + self, + image_bgr: Image, + outputs_boxes_xywh_classes: Tuple[ + Optional[DensePoseEmbeddingPredictorOutput], Optional[Boxes], Optional[List[int]] + ], + ) -> Image: + image_target_bgr = image_bgr.copy() + if outputs_boxes_xywh_classes[0] is None: + return image_target_bgr + + S, E, N, bboxes_xywh, pred_classes = self.extract_and_check_outputs_and_boxes( + outputs_boxes_xywh_classes + ) + + meshes = { + p: create_mesh(self.class_to_mesh_name[p], self.device) for p in np.unique(pred_classes) + } + + for n in range(N): + x, y, w, h = bboxes_xywh[n].int().cpu().numpy() + mesh_name = self.class_to_mesh_name[pred_classes[n]] + closest_vertices, mask = get_closest_vertices_mask_from_ES( + E[[n]], + S[[n]], + h, + w, + self.mesh_vertex_embeddings[mesh_name], + self.device, + ) + uv_array = meshes[pred_classes[n]].texcoords[closest_vertices].permute((2, 0, 1)) + uv_array = uv_array.cpu().numpy().clip(0, 1) + textured_image = self.generate_image_with_texture( + image_target_bgr[y : y + h, x : x + w], + uv_array, + mask.cpu().numpy(), + self.class_to_mesh_name[pred_classes[n]], + ) + if textured_image is None: + continue + image_target_bgr[y : y + h, x : x + w] = textured_image + + return image_target_bgr + + def generate_image_with_texture(self, bbox_image_bgr, uv_array, mask, mesh_name): + alpha = self.alpha_dict.get(mesh_name) + texture_image = self.texture_image_dict.get(mesh_name) + if alpha is None or texture_image is None: + return None + U, V = uv_array + x_index = (U * texture_image.shape[1]).astype(int) + y_index = (V * texture_image.shape[0]).astype(int) + local_texture = texture_image[y_index, x_index][mask] + local_alpha = np.expand_dims(alpha[y_index, x_index][mask], -1) + output_image = bbox_image_bgr.copy() + output_image[mask] = output_image[mask] * (1 - local_alpha) + local_texture * local_alpha + return output_image.astype(np.uint8) diff --git a/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_results.py b/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_results.py new file mode 100644 index 0000000..124ed0c --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_results.py @@ -0,0 +1,358 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +from typing import List, Optional, Tuple +import cv2 +import torch + +from densepose.structures import DensePoseDataRelative + +from ..structures import DensePoseChartResult +from .base import Boxes, Image, MatrixVisualizer + + +class DensePoseResultsVisualizer(object): + def visualize( + self, + image_bgr: Image, + results_and_boxes_xywh: Tuple[Optional[List[DensePoseChartResult]], Optional[Boxes]], + ) -> Image: + densepose_result, boxes_xywh = results_and_boxes_xywh + if densepose_result is None or boxes_xywh is None: + return image_bgr + + boxes_xywh = boxes_xywh.cpu().numpy() + context = self.create_visualization_context(image_bgr) + densepose_result = densepose_result[0:1] + for i, result in enumerate(densepose_result): + iuv_array = torch.cat( + (result.labels[None].type(torch.float32), result.uv * 255.0) + ).type(torch.uint8) + self.visualize_iuv_arr(context, iuv_array.cpu().numpy(), boxes_xywh[i]) + image_bgr = self.context_to_image_bgr(context) + return image_bgr + + def create_visualization_context(self, image_bgr: Image): + return image_bgr + + def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh) -> None: + pass + + def context_to_image_bgr(self, context): + return context + + def get_image_bgr_from_context(self, context): + return context + + +class DensePoseMaskedColormapResultsVisualizer(DensePoseResultsVisualizer): + def __init__( + self, + data_extractor, + segm_extractor, + inplace=True, + cmap=cv2.COLORMAP_PARULA, + alpha=0.7, + val_scale=1.0, + **kwargs, + ): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, cmap=cmap, val_scale=val_scale, alpha=alpha + ) + self.data_extractor = data_extractor + self.segm_extractor = segm_extractor + + def context_to_image_bgr(self, context): + return context + + def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh) -> None: + image_bgr = self.get_image_bgr_from_context(context) + matrix = self.data_extractor(iuv_arr) + segm = self.segm_extractor(iuv_arr) + mask = np.zeros(matrix.shape, dtype=np.uint8) + mask[segm > 0] = 1 + + + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh) + + +def _extract_i_from_iuvarr(iuv_arr): + return iuv_arr[0, :, :] + + +def _extract_u_from_iuvarr(iuv_arr): + return iuv_arr[1, :, :] + + +def _extract_v_from_iuvarr(iuv_arr): + return iuv_arr[2, :, :] + + +class DensePoseResultsMplContourVisualizer(DensePoseResultsVisualizer): + def __init__(self, levels=10, **kwargs): + self.levels = levels + self.plot_args = kwargs + + def create_visualization_context(self, image_bgr: Image): + import matplotlib.pyplot as plt + from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas + + context = {} + context["image_bgr"] = image_bgr + dpi = 100 + height_inches = float(image_bgr.shape[0]) / dpi + width_inches = float(image_bgr.shape[1]) / dpi + fig = plt.figure(figsize=(width_inches, height_inches), dpi=dpi) + plt.axes([0, 0, 1, 1]) + plt.axis("off") + context["fig"] = fig + canvas = FigureCanvas(fig) + context["canvas"] = canvas + extent = (0, image_bgr.shape[1], image_bgr.shape[0], 0) + plt.imshow(image_bgr[:, :, ::-1], extent=extent) + return context + + def context_to_image_bgr(self, context): + fig = context["fig"] + w, h = map(int, fig.get_size_inches() * fig.get_dpi()) + canvas = context["canvas"] + canvas.draw() + image_1d = np.fromstring(canvas.tostring_rgb(), dtype="uint8") + image_rgb = image_1d.reshape(h, w, 3) + image_bgr = image_rgb[:, :, ::-1].copy() + return image_bgr + + def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> None: + import matplotlib.pyplot as plt + + u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0 + v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0 + extent = ( + bbox_xywh[0], + bbox_xywh[0] + bbox_xywh[2], + bbox_xywh[1], + bbox_xywh[1] + bbox_xywh[3], + ) + plt.contour(u, self.levels, extent=extent, **self.plot_args) + plt.contour(v, self.levels, extent=extent, **self.plot_args) + + +class DensePoseResultsCustomContourVisualizer(DensePoseResultsVisualizer): + """ + Contour visualization using marching squares + """ + + def __init__(self, levels=10, **kwargs): + # TODO: colormap is hardcoded + cmap = cv2.COLORMAP_PARULA + if isinstance(levels, int): + self.levels = np.linspace(0, 1, levels) + else: + self.levels = levels + if "linewidths" in kwargs: + self.linewidths = kwargs["linewidths"] + else: + self.linewidths = [1] * len(self.levels) + self.plot_args = kwargs + img_colors_bgr = cv2.applyColorMap((self.levels * 255).astype(np.uint8), cmap) + self.level_colors_bgr = [ + [int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr + ] + + def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> None: + image_bgr = self.get_image_bgr_from_context(context) + segm = _extract_i_from_iuvarr(iuv_arr) + u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0 + v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0 + self._contours(image_bgr, u, segm, bbox_xywh) + self._contours(image_bgr, v, segm, bbox_xywh) + + def _contours(self, image_bgr, arr, segm, bbox_xywh): + for part_idx in range(1, DensePoseDataRelative.N_PART_LABELS + 1): + mask = segm == part_idx + if not np.any(mask): + continue + arr_min = np.amin(arr[mask]) + arr_max = np.amax(arr[mask]) + I, J = np.nonzero(mask) + i0 = np.amin(I) + i1 = np.amax(I) + 1 + j0 = np.amin(J) + j1 = np.amax(J) + 1 + if (j1 == j0 + 1) or (i1 == i0 + 1): + continue + Nw = arr.shape[1] - 1 + Nh = arr.shape[0] - 1 + for level_idx, level in enumerate(self.levels): + if (level < arr_min) or (level > arr_max): + continue + vp = arr[i0:i1, j0:j1] >= level + bin_codes = vp[:-1, :-1] + vp[1:, :-1] * 2 + vp[1:, 1:] * 4 + vp[:-1, 1:] * 8 + mp = mask[i0:i1, j0:j1] + bin_mask_codes = mp[:-1, :-1] + mp[1:, :-1] * 2 + mp[1:, 1:] * 4 + mp[:-1, 1:] * 8 + it = np.nditer(bin_codes, flags=["multi_index"]) + color_bgr = self.level_colors_bgr[level_idx] + linewidth = self.linewidths[level_idx] + while not it.finished: + if (it[0] != 0) and (it[0] != 15): + i, j = it.multi_index + if bin_mask_codes[i, j] != 0: + self._draw_line( + image_bgr, + arr, + mask, + level, + color_bgr, + linewidth, + it[0], + it.multi_index, + bbox_xywh, + Nw, + Nh, + (i0, j0), + ) + it.iternext() + + def _draw_line( + self, + image_bgr, + arr, + mask, + v, + color_bgr, + linewidth, + bin_code, + multi_idx, + bbox_xywh, + Nw, + Nh, + offset, + ): + lines = self._bin_code_2_lines(arr, v, bin_code, multi_idx, Nw, Nh, offset) + x0, y0, w, h = bbox_xywh + x1 = x0 + w + y1 = y0 + h + for line in lines: + x0r, y0r = line[0] + x1r, y1r = line[1] + pt0 = (int(x0 + x0r * (x1 - x0)), int(y0 + y0r * (y1 - y0))) + pt1 = (int(x0 + x1r * (x1 - x0)), int(y0 + y1r * (y1 - y0))) + cv2.line(image_bgr, pt0, pt1, color_bgr, linewidth) + + def _bin_code_2_lines(self, arr, v, bin_code, multi_idx, Nw, Nh, offset): + i0, j0 = offset + i, j = multi_idx + i += i0 + j += j0 + v0, v1, v2, v3 = arr[i, j], arr[i + 1, j], arr[i + 1, j + 1], arr[i, j + 1] + x0i = float(j) / Nw + y0j = float(i) / Nh + He = 1.0 / Nh + We = 1.0 / Nw + if (bin_code == 1) or (bin_code == 14): + a = (v - v0) / (v1 - v0) + b = (v - v0) / (v3 - v0) + pt1 = (x0i, y0j + a * He) + pt2 = (x0i + b * We, y0j) + return [(pt1, pt2)] + elif (bin_code == 2) or (bin_code == 13): + a = (v - v0) / (v1 - v0) + b = (v - v1) / (v2 - v1) + pt1 = (x0i, y0j + a * He) + pt2 = (x0i + b * We, y0j + He) + return [(pt1, pt2)] + elif (bin_code == 3) or (bin_code == 12): + a = (v - v0) / (v3 - v0) + b = (v - v1) / (v2 - v1) + pt1 = (x0i + a * We, y0j) + pt2 = (x0i + b * We, y0j + He) + return [(pt1, pt2)] + elif (bin_code == 4) or (bin_code == 11): + a = (v - v1) / (v2 - v1) + b = (v - v3) / (v2 - v3) + pt1 = (x0i + a * We, y0j + He) + pt2 = (x0i + We, y0j + b * He) + return [(pt1, pt2)] + elif (bin_code == 6) or (bin_code == 9): + a = (v - v0) / (v1 - v0) + b = (v - v3) / (v2 - v3) + pt1 = (x0i, y0j + a * He) + pt2 = (x0i + We, y0j + b * He) + return [(pt1, pt2)] + elif (bin_code == 7) or (bin_code == 8): + a = (v - v0) / (v3 - v0) + b = (v - v3) / (v2 - v3) + pt1 = (x0i + a * We, y0j) + pt2 = (x0i + We, y0j + b * He) + return [(pt1, pt2)] + elif bin_code == 5: + a1 = (v - v0) / (v1 - v0) + b1 = (v - v1) / (v2 - v1) + pt11 = (x0i, y0j + a1 * He) + pt12 = (x0i + b1 * We, y0j + He) + a2 = (v - v0) / (v3 - v0) + b2 = (v - v3) / (v2 - v3) + pt21 = (x0i + a2 * We, y0j) + pt22 = (x0i + We, y0j + b2 * He) + return [(pt11, pt12), (pt21, pt22)] + elif bin_code == 10: + a1 = (v - v0) / (v3 - v0) + b1 = (v - v0) / (v1 - v0) + pt11 = (x0i + a1 * We, y0j) + pt12 = (x0i, y0j + b1 * He) + a2 = (v - v1) / (v2 - v1) + b2 = (v - v3) / (v2 - v3) + pt21 = (x0i + a2 * We, y0j + He) + pt22 = (x0i + We, y0j + b2 * He) + return [(pt11, pt12), (pt21, pt22)] + return [] + + +try: + import matplotlib + + matplotlib.use("Agg") + DensePoseResultsContourVisualizer = DensePoseResultsMplContourVisualizer +except ModuleNotFoundError: + logger = logging.getLogger(__name__) + logger.warning("Could not import matplotlib, using custom contour visualizer") + DensePoseResultsContourVisualizer = DensePoseResultsCustomContourVisualizer + + +class DensePoseResultsFineSegmentationVisualizer(DensePoseMaskedColormapResultsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs): + super(DensePoseResultsFineSegmentationVisualizer, self).__init__( + _extract_i_from_iuvarr, + _extract_i_from_iuvarr, + inplace, + cmap, + alpha, + val_scale=255.0 / DensePoseDataRelative.N_PART_LABELS, + **kwargs, + ) + + +class DensePoseResultsUVisualizer(DensePoseMaskedColormapResultsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs): + super(DensePoseResultsUVisualizer, self).__init__( + _extract_u_from_iuvarr, + _extract_i_from_iuvarr, + inplace, + cmap, + alpha, + val_scale=1.0, + **kwargs, + ) + + +class DensePoseResultsVVisualizer(DensePoseMaskedColormapResultsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs): + super(DensePoseResultsVVisualizer, self).__init__( + _extract_v_from_iuvarr, + _extract_i_from_iuvarr, + inplace, + cmap, + alpha, + val_scale=1.0, + **kwargs, + ) diff --git a/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_results_textures.py b/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_results_textures.py new file mode 100644 index 0000000..8b02f2b --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/vis/densepose_results_textures.py @@ -0,0 +1,91 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import List, Optional, Tuple +import torch + +from detectron2.data.detection_utils import read_image + +from ..structures import DensePoseChartResult +from .base import Boxes, Image +from .densepose_results import DensePoseResultsVisualizer + + +def get_texture_atlas(path: Optional[str]) -> Optional[np.ndarray]: + if path is None: + return None + + # Reading images like that downsamples 16-bit images to 8-bit + # If 16-bit images are needed, we can replace that by cv2.imread with the + # cv2.IMREAD_UNCHANGED flag (with cv2 we also need it to keep alpha channels) + # The rest of the pipeline would need to be adapted to 16-bit images too + bgr_image = read_image(path) + rgb_image = np.copy(bgr_image) # Convert BGR -> RGB + rgb_image[:, :, :3] = rgb_image[:, :, 2::-1] # Works with alpha channel + return rgb_image + + +class DensePoseResultsVisualizerWithTexture(DensePoseResultsVisualizer): + """ + texture_atlas: An image, size 6N * 4N, with N * N squares for each of the 24 body parts. + It must follow the grid found at https://github.com/facebookresearch/DensePose/blob/master/DensePoseData/demo_data/texture_atlas_200.png # noqa + For each body part, U is proportional to the x coordinate, and (1 - V) to y + """ + + def __init__(self, texture_atlas, **kwargs): + self.texture_atlas = texture_atlas + self.body_part_size = texture_atlas.shape[0] // 6 + assert self.body_part_size == texture_atlas.shape[1] // 4 + + def visualize( + self, + image_bgr: Image, + results_and_boxes_xywh: Tuple[Optional[List[DensePoseChartResult]], Optional[Boxes]], + ) -> Image: + densepose_result, boxes_xywh = results_and_boxes_xywh + if densepose_result is None or boxes_xywh is None: + return image_bgr + + boxes_xywh = boxes_xywh.int().cpu().numpy() + texture_image, alpha = self.get_texture() + for i, result in enumerate(densepose_result): + iuv_array = torch.cat((result.labels[None], result.uv.clamp(0, 1))) + x, y, w, h = boxes_xywh[i] + bbox_image = image_bgr[y : y + h, x : x + w] + image_bgr[y : y + h, x : x + w] = self.generate_image_with_texture( + texture_image, alpha, bbox_image, iuv_array.cpu().numpy() + ) + return image_bgr + + def get_texture(self): + N = self.body_part_size + texture_image = np.zeros([24, N, N, self.texture_atlas.shape[-1]]) + for i in range(4): + for j in range(6): + texture_image[(6 * i + j), :, :, :] = self.texture_atlas[ + N * j : N * (j + 1), N * i : N * (i + 1), : + ] + + if texture_image.shape[-1] == 4: # Image with alpha channel + alpha = texture_image[:, :, :, -1] / 255.0 + texture_image = texture_image[:, :, :, :3] + else: + alpha = texture_image.sum(axis=-1) > 0 + + return texture_image, alpha + + def generate_image_with_texture(self, texture_image, alpha, bbox_image_bgr, iuv_array): + + I, U, V = iuv_array + generated_image_bgr = bbox_image_bgr.copy() + + for PartInd in range(1, 25): + x, y = np.where(I == PartInd) + x_index = (U[x, y] * (self.body_part_size - 1)).astype(int) + y_index = ((1 - V[x, y]) * (self.body_part_size - 1)).astype(int) + part_alpha = np.expand_dims(alpha[PartInd - 1, y_index, x_index], -1) + generated_image_bgr[I == PartInd] = ( + generated_image_bgr[I == PartInd] * (1 - part_alpha) + + texture_image[PartInd - 1, y_index, x_index] * part_alpha + ) + + return generated_image_bgr.astype(np.uint8) diff --git a/data_processing/detectron2/projects/DensePose/densepose/vis/extractor.py b/data_processing/detectron2/projects/DensePose/densepose/vis/extractor.py new file mode 100644 index 0000000..9297548 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose/vis/extractor.py @@ -0,0 +1,200 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +from typing import List, Optional, Sequence, Tuple +import torch + +from detectron2.layers.nms import batched_nms +from detectron2.structures.instances import Instances + +from densepose.converters import ToChartResultConverterWithConfidences +from densepose.structures import ( + DensePoseChartResultWithConfidences, + DensePoseEmbeddingPredictorOutput, +) +from densepose.vis.bounding_box import BoundingBoxVisualizer, ScoredBoundingBoxVisualizer +from densepose.vis.densepose_outputs_vertex import DensePoseOutputsVertexVisualizer +from densepose.vis.densepose_results import DensePoseResultsVisualizer + +from .base import CompoundVisualizer + +Scores = Sequence[float] +DensePoseChartResultsWithConfidences = List[DensePoseChartResultWithConfidences] + + +def extract_scores_from_instances(instances: Instances, select=None): + if instances.has("scores"): + return instances.scores if select is None else instances.scores[select] + return None + + +def extract_boxes_xywh_from_instances(instances: Instances, select=None): + if instances.has("pred_boxes"): + boxes_xywh = instances.pred_boxes.tensor.clone() + boxes_xywh[:, 2] -= boxes_xywh[:, 0] + boxes_xywh[:, 3] -= boxes_xywh[:, 1] + return boxes_xywh if select is None else boxes_xywh[select] + return None + + +def create_extractor(visualizer: object): + """ + Create an extractor for the provided visualizer + """ + if isinstance(visualizer, CompoundVisualizer): + extractors = [create_extractor(v) for v in visualizer.visualizers] + return CompoundExtractor(extractors) + elif isinstance(visualizer, DensePoseResultsVisualizer): + return DensePoseResultExtractor() + elif isinstance(visualizer, ScoredBoundingBoxVisualizer): + return CompoundExtractor([extract_boxes_xywh_from_instances, extract_scores_from_instances]) + elif isinstance(visualizer, BoundingBoxVisualizer): + return extract_boxes_xywh_from_instances + elif isinstance(visualizer, DensePoseOutputsVertexVisualizer): + return DensePoseOutputsExtractor() + else: + logger = logging.getLogger(__name__) + logger.error(f"Could not create extractor for {visualizer}") + return None + + +class BoundingBoxExtractor(object): + """ + Extracts bounding boxes from instances + """ + + def __call__(self, instances: Instances): + boxes_xywh = extract_boxes_xywh_from_instances(instances) + return boxes_xywh + + +class ScoredBoundingBoxExtractor(object): + """ + Extracts bounding boxes from instances + """ + + def __call__(self, instances: Instances, select=None): + scores = extract_scores_from_instances(instances) + boxes_xywh = extract_boxes_xywh_from_instances(instances) + if (scores is None) or (boxes_xywh is None): + return (boxes_xywh, scores) + if select is not None: + scores = scores[select] + boxes_xywh = boxes_xywh[select] + return (boxes_xywh, scores) + + +class DensePoseResultExtractor(object): + """ + Extracts DensePose chart result with confidences from instances + """ + + def __call__( + self, instances: Instances, select=None + ) -> Tuple[Optional[DensePoseChartResultsWithConfidences], Optional[torch.Tensor]]: + if instances.has("pred_densepose") and instances.has("pred_boxes"): + dpout = instances.pred_densepose + boxes_xyxy = instances.pred_boxes + boxes_xywh = extract_boxes_xywh_from_instances(instances) + if select is not None: + dpout = dpout[select] + boxes_xyxy = boxes_xyxy[select] + converter = ToChartResultConverterWithConfidences() + results = [converter.convert(dpout[i], boxes_xyxy[[i]]) for i in range(len(dpout))] + return results, boxes_xywh + else: + return None, None + + +class DensePoseOutputsExtractor(object): + """ + Extracts DensePose result from instances + """ + + def __call__( + self, + instances: Instances, + select=None, + ) -> Tuple[ + Optional[DensePoseEmbeddingPredictorOutput], Optional[torch.Tensor], Optional[List[int]] + ]: + if not (instances.has("pred_densepose") and instances.has("pred_boxes")): + return None, None, None + + dpout = instances.pred_densepose + boxes_xyxy = instances.pred_boxes + boxes_xywh = extract_boxes_xywh_from_instances(instances) + + if instances.has("pred_classes"): + classes = instances.pred_classes.tolist() + else: + classes = None + + if select is not None: + dpout = dpout[select] + boxes_xyxy = boxes_xyxy[select] + if classes is not None: + classes = classes[select] + + return dpout, boxes_xywh, classes + + +class CompoundExtractor(object): + """ + Extracts data for CompoundVisualizer + """ + + def __init__(self, extractors): + self.extractors = extractors + + def __call__(self, instances: Instances, select=None): + datas = [] + for extractor in self.extractors: + data = extractor(instances, select) + datas.append(data) + return datas + + +class NmsFilteredExtractor(object): + """ + Extracts data in the format accepted by NmsFilteredVisualizer + """ + + def __init__(self, extractor, iou_threshold): + self.extractor = extractor + self.iou_threshold = iou_threshold + + def __call__(self, instances: Instances, select=None): + scores = extract_scores_from_instances(instances) + boxes_xywh = extract_boxes_xywh_from_instances(instances) + if boxes_xywh is None: + return None + select_local_idx = batched_nms( + boxes_xywh, + scores, + torch.zeros(len(scores), dtype=torch.int32), + iou_threshold=self.iou_threshold, + ).squeeze() + select_local = torch.zeros(len(boxes_xywh), dtype=torch.bool, device=boxes_xywh.device) + select_local[select_local_idx] = True + select = select_local if select is None else (select & select_local) + return self.extractor(instances, select=select) + + +class ScoreThresholdedExtractor(object): + """ + Extracts data in the format accepted by ScoreThresholdedVisualizer + """ + + def __init__(self, extractor, min_score): + self.extractor = extractor + self.min_score = min_score + + def __call__(self, instances: Instances, select=None): + scores = extract_scores_from_instances(instances) + if scores is None: + return None + print('in extractor') + select_local = scores > self.min_score + select = select_local if select is None else (select & select_local) + data = self.extractor(instances, select=select) + return data diff --git a/data_processing/detectron2/projects/DensePose/densepose_methods.py b/data_processing/detectron2/projects/DensePose/densepose_methods.py new file mode 100644 index 0000000..0f2f32d --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/densepose_methods.py @@ -0,0 +1,142 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +from scipy.io import loadmat +import scipy.spatial.distance +import os + + +class DensePoseMethods: + def __init__(self): + # + ALP_UV = loadmat(os.path.join(os.path.dirname(__file__), './DensePoseData/UV_Processed.mat')) + self.FaceIndices = np.array(ALP_UV['All_FaceIndices']).squeeze() + self.FacesDensePose = ALP_UV['All_Faces'] - 1 + self.U_norm = ALP_UV['All_U_norm'].squeeze() + self.V_norm = ALP_UV['All_V_norm'].squeeze() + self.All_vertices = ALP_UV['All_vertices'][0] + ## Info to compute symmetries. + self.SemanticMaskSymmetries = [0, 1, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 14] + self.Index_Symmetry_List = [1, 2, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 20, 19, 22, 21, 24, + 23]; + UV_symmetry_filename = os.path.join(os.path.dirname(__file__), + './DensePoseData/UV_symmetry_transforms.mat') + self.UV_symmetry_transformations = loadmat(UV_symmetry_filename) + + def get_symmetric_densepose(self, I, U, V, x, y, Mask): + ### This is a function to get the mirror symmetric UV labels. + Labels_sym = np.zeros(I.shape) + U_sym = np.zeros(U.shape) + V_sym = np.zeros(V.shape) + ### + for i in (range(24)): + if i + 1 in I: + Labels_sym[I == (i + 1)] = self.Index_Symmetry_List[i] + jj = np.where(I == (i + 1)) + ### + U_loc = (U[jj] * 255).astype(np.int64) + V_loc = (V[jj] * 255).astype(np.int64) + ### + V_sym[jj] = self.UV_symmetry_transformations['V_transforms'][0, i][V_loc, U_loc] + U_sym[jj] = self.UV_symmetry_transformations['U_transforms'][0, i][V_loc, U_loc] + ## + Mask_flip = np.fliplr(Mask) + Mask_flipped = np.zeros(Mask.shape) + # + for i in (range(14)): + Mask_flipped[Mask_flip == (i + 1)] = self.SemanticMaskSymmetries[i + 1] + # + [y_max, x_max] = Mask_flip.shape + y_sym = y + x_sym = x_max - x + # + return Labels_sym, U_sym, V_sym, x_sym, y_sym, Mask_flipped + + def barycentric_coordinates_exists(self, P0, P1, P2, P): + u = P1 - P0 + v = P2 - P0 + w = P - P0 + # + vCrossW = np.cross(v, w) + vCrossU = np.cross(v, u) + if (np.dot(vCrossW, vCrossU) < 0): + return False; + # + uCrossW = np.cross(u, w) + uCrossV = np.cross(u, v) + # + if (np.dot(uCrossW, uCrossV) < 0): + return False; + # + denom = np.sqrt((uCrossV ** 2).sum()) + r = np.sqrt((vCrossW ** 2).sum()) / denom + t = np.sqrt((uCrossW ** 2).sum()) / denom + # + return ((r <= 1) & (t <= 1) & (r + t <= 1)) + + def barycentric_coordinates(self, P0, P1, P2, P): + u = P1 - P0 + v = P2 - P0 + w = P - P0 + # + vCrossW = np.cross(v, w) + vCrossU = np.cross(v, u) + # + uCrossW = np.cross(u, w) + uCrossV = np.cross(u, v) + # + denom = np.sqrt((uCrossV ** 2).sum()) + r = np.sqrt((vCrossW ** 2).sum()) / denom + t = np.sqrt((uCrossW ** 2).sum()) / denom + # + return (1 - (r + t), r, t) + + def IUV2FBC(self, I_point, U_point, V_point): + P = [U_point, V_point, 0] + FaceIndicesNow = np.where(self.FaceIndices == I_point) + FacesNow = self.FacesDensePose[FaceIndicesNow] + # + P_0 = np.vstack((self.U_norm[FacesNow][:, 0], self.V_norm[FacesNow][:, 0], + np.zeros(self.U_norm[FacesNow][:, 0].shape))).transpose() + P_1 = np.vstack((self.U_norm[FacesNow][:, 1], self.V_norm[FacesNow][:, 1], + np.zeros(self.U_norm[FacesNow][:, 1].shape))).transpose() + P_2 = np.vstack((self.U_norm[FacesNow][:, 2], self.V_norm[FacesNow][:, 2], + np.zeros(self.U_norm[FacesNow][:, 2].shape))).transpose() + # + + for i, [P0, P1, P2] in enumerate(zip(P_0, P_1, P_2)): + if (self.barycentric_coordinates_exists(P0, P1, P2, P)): + [bc1, bc2, bc3] = self.barycentric_coordinates(P0, P1, P2, P) + return (FaceIndicesNow[0][i], bc1, bc2, bc3) + # + # If the found UV is not inside any faces, select the vertex that is closest! + # + print('np.array([U_point, V_point])',np.array([U_point, V_point]).shape) + D1 = scipy.spatial.distance.cdist(np.array([U_point, V_point])[np.newaxis, :], P_0[:, 0:2]).squeeze() + D2 = scipy.spatial.distance.cdist(np.array([U_point, V_point])[np.newaxis, :], P_1[:, 0:2]).squeeze() + D3 = scipy.spatial.distance.cdist(np.array([U_point, V_point])[np.newaxis, :], P_2[:, 0:2]).squeeze() + # + minD1 = D1.min() + minD2 = D2.min() + minD3 = D3.min() + # + if ((minD1 < minD2) & (minD1 < minD3)): + return (FaceIndicesNow[0][np.argmin(D1)], 1., 0., 0.) + elif ((minD2 < minD1) & (minD2 < minD3)): + return (FaceIndicesNow[0][np.argmin(D2)], 0., 1., 0.) + else: + return (FaceIndicesNow[0][np.argmin(D3)], 0., 0., 1.) + + def FBC2PointOnSurface(self, FaceIndex, bc1, bc2, bc3, Vertices): + ## + Vert_indices = self.All_vertices[self.FacesDensePose[FaceIndex]] - 1 + ## + p = Vertices[Vert_indices[0], :] * bc1 + \ + Vertices[Vert_indices[1], :] * bc2 + \ + Vertices[Vert_indices[2], :] * bc3 + ## + return (p) diff --git a/data_processing/detectron2/projects/DensePose/dev/README.md b/data_processing/detectron2/projects/DensePose/dev/README.md new file mode 100644 index 0000000..e3a94b6 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/dev/README.md @@ -0,0 +1,7 @@ + +## Some scripts for developers to use, include: + +- `run_instant_tests.sh`: run training for a few iterations. +- `run_inference_tests.sh`: run inference on a small dataset. +- `../../dev/linter.sh`: lint the codebase before commit +- `../../dev/parse_results.sh`: parse results from log file. diff --git a/data_processing/detectron2/projects/DensePose/dev/run_inference_tests.sh b/data_processing/detectron2/projects/DensePose/dev/run_inference_tests.sh new file mode 100644 index 0000000..46556b8 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/dev/run_inference_tests.sh @@ -0,0 +1,33 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. + +BIN="python train_net.py" +OUTPUT="inference_test_output" +NUM_GPUS=2 +IMS_PER_GPU=2 +IMS_PER_BATCH=$(( NUM_GPUS * IMS_PER_GPU )) + +CFG_LIST=( "${@:1}" ) + +if [ ${#CFG_LIST[@]} -eq 0 ]; then + CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml ) +fi + +echo "========================================================================" +echo "Configs to run:" +echo "${CFG_LIST[@]}" +echo "========================================================================" + +for cfg in "${CFG_LIST[@]}"; do + echo "========================================================================" + echo "Running $cfg ..." + echo "========================================================================" + $BIN \ + --eval-only \ + --num-gpus $NUM_GPUS \ + --config-file "$cfg" \ + OUTPUT_DIR "$OUTPUT" \ + SOLVER.IMS_PER_BATCH $IMS_PER_BATCH + rm -rf $OUTPUT +done + diff --git a/data_processing/detectron2/projects/DensePose/dev/run_instant_tests.sh b/data_processing/detectron2/projects/DensePose/dev/run_instant_tests.sh new file mode 100644 index 0000000..23a9c67 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/dev/run_instant_tests.sh @@ -0,0 +1,28 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. + +BIN="python train_net.py" +OUTPUT="instant_test_output" +NUM_GPUS=2 +SOLVER_IMS_PER_BATCH=$((NUM_GPUS * 2)) + +CFG_LIST=( "${@:1}" ) +if [ ${#CFG_LIST[@]} -eq 0 ]; then + CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml ) +fi + +echo "========================================================================" +echo "Configs to run:" +echo "${CFG_LIST[@]}" +echo "========================================================================" + +for cfg in "${CFG_LIST[@]}"; do + echo "========================================================================" + echo "Running $cfg ..." + echo "========================================================================" + $BIN --num-gpus $NUM_GPUS --config-file "$cfg" \ + SOLVER.IMS_PER_BATCH $SOLVER_IMS_PER_BATCH \ + OUTPUT_DIR "$OUTPUT" + rm -rf "$OUTPUT" +done + diff --git a/data_processing/detectron2/projects/DensePose/doc/BOOTSTRAPPING_PIPELINE.md b/data_processing/detectron2/projects/DensePose/doc/BOOTSTRAPPING_PIPELINE.md new file mode 100644 index 0000000..a132686 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/doc/BOOTSTRAPPING_PIPELINE.md @@ -0,0 +1,197 @@ +# Bootstrapping Pipeline + +Bootstrapping pipeline for DensePose was proposed in +[Sanakoyeu et al., 2020](https://arxiv.org/pdf/2003.00080.pdf) +to extend DensePose from humans to proximal animal classes +(chimpanzees). Currently, the pipeline is only implemented for +[chart-based models](DENSEPOSE_IUV.md). +Bootstrapping proceeds in two steps. + +## Master Model Training + +Master model is trained on data from source domain (humans) +and supporting domain (animals). Instances from the source domain +contain full DensePose annotations (`S`, `I`, `U` and `V`) and +instances from the supporting domain have segmentation annotations only. +To ensure segmentation quality in the target domain, only a subset of +supporting domain classes is included into the training. This is achieved +through category filters, e.g. +(see [configs/evolution/Base-RCNN-FPN-Atop10P_CA.yaml](../configs/evolution/Base-RCNN-FPN-Atop10P_CA.yaml)): + +``` + WHITELISTED_CATEGORIES: + "base_coco_2017_train": + - 1 # person + - 16 # bird + - 17 # cat + - 18 # dog + - 19 # horse + - 20 # sheep + - 21 # cow + - 22 # elephant + - 23 # bear + - 24 # zebra + - 25 # girafe +``` +The acronym `Atop10P` in config file names indicates that categories are filtered to +only contain top 10 animals and person. + +The training is performed in a *class-agnostic* manner: all instances +are mapped into the same class (person), e.g. +(see [configs/evolution/Base-RCNN-FPN-Atop10P_CA.yaml](../configs/evolution/Base-RCNN-FPN-Atop10P_CA.yaml)): + +``` + CATEGORY_MAPS: + "base_coco_2017_train": + "16": 1 # bird -> person + "17": 1 # cat -> person + "18": 1 # dog -> person + "19": 1 # horse -> person + "20": 1 # sheep -> person + "21": 1 # cow -> person + "22": 1 # elephant -> person + "23": 1 # bear -> person + "24": 1 # zebra -> person + "25": 1 # girafe -> person +``` +The acronym `CA` in config file names indicates that the training is class-agnostic. + +## Student Model Training + +Student model is trained on data from source domain (humans), +supporting domain (animals) and target domain (chimpanzees). +Annotations in source and supporting domains are similar to the ones +used for the master model training. +Annotations in target domain are obtained by applying the master model +to images that contain instances from the target category and sampling +sparse annotations from dense results. This process is called *bootstrapping*. +Below we give details on how the bootstrapping pipeline is implemented. + +### Data Loaders + +The central components that enable bootstrapping are +[`InferenceBasedLoader`](../densepose/data/inference_based_loader.py) and +[`CombinedDataLoader`](../densepose/data/combined_loader.py). + +`InferenceBasedLoader` takes images from a data loader, applies a model +to the images, filters the model outputs based on the selected criteria and +samples the filtered outputs to produce annotations. + +`CombinedDataLoader` combines data obtained from the loaders based on specified +ratios. The standard data loader has the default ratio of 1.0, +ratios for bootstrap datasets are specified in the configuration file. +The higher the ratio the higher the probability to include samples from the +particular data loader into a batch. + +Here is an example of the bootstrapping configuration taken from +[`configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uniform.yaml`](../configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uniform.yaml): +``` +BOOTSTRAP_DATASETS: + - DATASET: "chimpnsee" + RATIO: 1.0 + IMAGE_LOADER: + TYPE: "video_keyframe" + SELECT: + STRATEGY: "random_k" + NUM_IMAGES: 4 + TRANSFORM: + TYPE: "resize" + MIN_SIZE: 800 + MAX_SIZE: 1333 + BATCH_SIZE: 8 + NUM_WORKERS: 1 + INFERENCE: + INPUT_BATCH_SIZE: 1 + OUTPUT_BATCH_SIZE: 1 + DATA_SAMPLER: + # supported types: + # densepose_uniform + # densepose_UV_confidence + # densepose_fine_segm_confidence + # densepose_coarse_segm_confidence + TYPE: "densepose_uniform" + COUNT_PER_CLASS: 8 + FILTER: + TYPE: "detection_score" + MIN_VALUE: 0.8 +BOOTSTRAP_MODEL: + WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl +``` + +The above example has one bootstrap dataset (`chimpnsee`). This dataset is registered as +a [VIDEO_LIST](../densepose/data/datasets/chimpnsee.py) dataset, which means that +it consists of a number of videos specified in a text file. For videos there can be +different strategies to sample individual images. Here we use `video_keyframe` strategy +which considers only keyframes; this ensures temporal offset between sampled images and +faster seek operations. We select at most 4 random keyframes in each video: + +``` +SELECT: + STRATEGY: "random_k" + NUM_IMAGES: 4 +``` + +The frames are then resized + +``` +TRANSFORM: + TYPE: "resize" + MIN_SIZE: 800 + MAX_SIZE: 1333 +``` + +and batched using the standard +[PyTorch DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader): + +``` +BATCH_SIZE: 8 +NUM_WORKERS: 1 +``` + +`InferenceBasedLoader` decomposes those batches into batches of size `INPUT_BATCH_SIZE` +and applies the master model specified by `BOOTSTRAP_MODEL`. Models outputs are filtered +by detection score: + +``` +FILTER: + TYPE: "detection_score" + MIN_VALUE: 0.8 +``` + +and sampled using the specified sampling strategy: + +``` +DATA_SAMPLER: + # supported types: + # densepose_uniform + # densepose_UV_confidence + # densepose_fine_segm_confidence + # densepose_coarse_segm_confidence + TYPE: "densepose_uniform" + COUNT_PER_CLASS: 8 +``` + +The current implementation supports +[uniform sampling](../densepose/data/samplers/densepose_uniform.py) and +[confidence-based sampling](../densepose/data/samplers/densepose_confidence_based.py) +to obtain sparse annotations from dense results. For confidence-based +sampling one needs to use the master model which produces confidence estimates. +The `WC1M` master model used in the example above produces all three types of confidence +estimates. + +Finally, sampled data is grouped into batches of size `OUTPUT_BATCH_SIZE`: + +``` +INFERENCE: + INPUT_BATCH_SIZE: 1 + OUTPUT_BATCH_SIZE: 1 +``` + +The proportion of data from annotated datasets and bootstrapped dataset can be tracked +in the logs, e.g.: + +``` +[... densepose.engine.trainer]: batch/ 1.8, batch/base_coco_2017_train 6.4, batch/densepose_coco_2014_train 3.85 +``` + +which means that over the last 20 iterations, on average for 1.8 bootstrapped data samples there were 6.4 samples from `base_coco_2017_train` and 3.85 samples from `densepose_coco_2014_train`. diff --git a/data_processing/detectron2/projects/DensePose/doc/DENSEPOSE_CSE.md b/data_processing/detectron2/projects/DensePose/doc/DENSEPOSE_CSE.md new file mode 100644 index 0000000..d5761ef --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/doc/DENSEPOSE_CSE.md @@ -0,0 +1,336 @@ +# Continuous Surface Embeddings for Dense Pose Estimation for Humans and Animals + +## Overview + +
+ +
+ +The pipeline uses [Faster R-CNN](https://arxiv.org/abs/1506.01497) +with [Feature Pyramid Network](https://arxiv.org/abs/1612.03144) meta architecture +outlined in Figure 1. For each detected object, the model predicts +its coarse segmentation `S` (2 channels: foreground / background) +and the embedding `E` (16 channels). At the same time, the embedder produces vertex +embeddings `Ê` for the corresponding mesh. Universal positional embeddings `E` +and vertex embeddings `Ê` are matched to derive for each pixel its continuous +surface embedding. + +
+ +
+

Figure 1. DensePose continuous surface embeddings architecture based on Faster R-CNN with Feature Pyramid Network (FPN).

+ +### Datasets + +For more details on datasets used for training and validation of +continuous surface embeddings models, +please refer to the [DensePose Datasets](DENSEPOSE_DATASETS.md) page. + +## Model Zoo and Baselines + +### Human CSE Models + +Continuous surface embeddings models for humans trained using the protocols from [Neverova et al, 2020](https://arxiv.org/abs/2011.12438). + +Models trained with hard assignment loss ℒ: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
segm
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_s1xs1x0.3490.0606.361.167.164.465.7251155172model | metrics
R_101_FPN_s1xs1x0.4610.0717.462.367.264.765.8251155500model | metrics
R_50_FPN_DL_s1xs1x0.3990.0617.060.867.865.566.4251156349model | metrics
R_101_FPN_DL_s1xs1x0.5040.0748.361.568.065.666.6251156606model | metrics
+ +Models trained with soft assignment loss ℒσ: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
segm
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_soft_s1xs1x0.3570.0579.761.366.964.365.4250533982model | metrics
R_101_FPN_soft_s1xs1x0.4640.07110.562.167.364.566.0250712522model | metrics
R_50_FPN_DL_soft_s1xs1x0.4270.06211.360.868.066.166.7250713703model | metrics
R_101_FPN_DL_soft_s1xs1x0.4830.07112.261.568.266.267.1250713061model | metrics
+ +### Animal CSE Models + +Models obtained by finetuning human CSE models on animals data from `ds1_train` +(see the [DensePose LVIS](DENSEPOSE_DATASETS.md#continuous-surface-embeddings-annotations-3) +section for more details on the datasets) with soft assignment loss ℒσ: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
segm
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_soft_chimps_finetune_4k4K0.5690.0514.762.059.032.239.6253146869model | metrics
R_50_FPN_soft_animals_finetune_4k4K0.3810.0617.344.955.521.328.8253145793model | metrics
R_50_FPN_soft_animals_CA_finetune_4k4K0.4120.0597.153.459.525.433.4253498611model | metrics
+ +Acronyms: + +`CA`: class agnostic training, where all annotated instances are mapped into a single category + + +Models obtained by finetuning human CSE models on animals data from `ds2_train` dataset +with soft assignment loss ℒσ and, for some schedules, cycle losses. +Please refer to [DensePose LVIS](DENSEPOSE_DATASETS.md#continuous-surface-embeddings-annotations-3) +section for details on the dataset and to [Neverova et al, 2021]() for details on cycle losses. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
segm
AP
dp. AP
GPS
dp. AP
GPSm
GErrGPSmodel iddownload
R_50_FPN_soft_animals_I0_finetune_16k16k0.3860.0588.454.267.029.038.613.285.4270727112model | metrics
R_50_FPN_soft_animals_I0_finetune_m2m_16k16k0.5080.05612.254.167.328.638.412.587.6270982215model | metrics
R_50_FPN_soft_animals_I0_finetune_i2m_16k16k0.4830.0569.754.066.628.938.311.088.9270727461model | metrics
+ +## References + +If you use DensePose methods based on continuous surface embeddings, please take the +references from the following BibTeX entries: + +Continuous surface embeddings: +``` +@InProceedings{Neverova2020ContinuousSurfaceEmbeddings, + title = {Continuous Surface Embeddings}, + author = {Neverova, Natalia and Novotny, David and Khalidov, Vasil and Szafraniec, Marc and Labatut, Patrick and Vedaldi, Andrea}, + journal = {Advances in Neural Information Processing Systems}, + year = {2020}, +} +``` + +Cycle Losses: +``` +@InProceedings{Neverova2021UniversalCanonicalMaps, + title = {Discovering Relationships between Object Categories via Universal Canonical Maps}, + author = {Neverova, Natalia and Sanakoyeu, Artsiom and Novotny, David and Labatut, Patrick and Vedaldi, Andrea}, + journal = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2021}, +} +``` diff --git a/data_processing/detectron2/projects/DensePose/doc/DENSEPOSE_DATASETS.md b/data_processing/detectron2/projects/DensePose/doc/DENSEPOSE_DATASETS.md new file mode 100644 index 0000000..6943741 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/doc/DENSEPOSE_DATASETS.md @@ -0,0 +1,513 @@ +# DensePose Datasets + +We summarize the datasets used in various DensePose training +schedules and describe different available annotation types. + +## Table of Contents + +[General Information](#general-information) + +[DensePose COCO](#densepose-coco) + +[DensePose PoseTrack](#densepose-posetrack) + +[DensePose Chimps](#densepose-chimps) + +[DensePose LVIS](#densepose-lvis) + +## General Information + +DensePose annotations are typically stored in JSON files. Their +structure follows the [COCO Data Format](https://cocodataset.org/#format-data), +the basic data structure is outlined below: + +``` +{ + "info": info, + "images": [image], + "annotations": [annotation], + "licenses": [license], +} + +info{ + "year": int, + "version": str, + "description": str, + "contributor": str, + "url": str, + "date_created": datetime, +} + +image{ + "id": int, + "width": int, + "height": int, + "file_name": str, + "license": int, + "flickr_url": str, + "coco_url": str, + "date_captured": datetime, +} + +license{ + "id": int, "name": str, "url": str, +} +``` + +DensePose annotations can be of two types: +*chart-based annotations* or *continuous surface embeddings annotations*. +We give more details on each of the two annotation types below. + +### Chart-based Annotations + +These annotations assume a single 3D model which corresponds to +all the instances in a given dataset. +3D model is assumed to be split into *charts*. Each chart has its own +2D parametrization through inner coordinates `U` and `V`, typically +taking values in `[0, 1]`. + +Chart-based annotations consist of *point-based annotations* and +*segmentation annotations*. Point-based annotations specify, for a given +image point, which model part it belongs to and what are its coordinates +in the corresponding chart. Segmentation annotations specify regions +in an image that are occupied by a given part. In some cases, charts +associated with point annotations are more detailed than the ones +associated with segmentation annotations. In this case we distinguish +*fine segmentation* (associated with points) and *coarse segmentation* +(associated with masks). + +**Point-based annotations**: + +`dp_x` and `dp_y`: image coordinates of the annotated points along +the horizontal and vertical axes respectively. The coordinates are defined +with respect to the top-left corner of the annotated bounding box and are +normalized assuming the bounding box size to be `256x256`; + +`dp_I`: for each point specifies the index of the fine segmentation chart +it belongs to; + +`dp_U` and `dp_V`: point coordinates on the corresponding chart. +Each fine segmentation part has its own parametrization in terms of chart +coordinates. + +**Segmentation annotations**: + +`dp_masks`: RLE encoded dense masks (`dict` containing keys `counts` and `size`). +The masks are typically of size `256x256`, they define segmentation within the +bounding box. + +### Continuous Surface Embeddings Annotations + +Continuous surface embeddings annotations also consist of *point-based annotations* +and *segmentation annotations*. Point-based annotations establish correspondence +between image points and 3D model vertices. Segmentation annotations specify +foreground regions for a given instane. + +**Point-based annotations**: + +`dp_x` and `dp_y` specify image point coordinates the same way as for chart-based +annotations; + +`dp_vertex` gives indices of 3D model vertices, which the annotated image points +correspond to; + +`ref_model` specifies 3D model name. + +**Segmentation annotations**: + +Segmentations can either be given by `dp_masks` field or by `segmentation` field. + +`dp_masks`: RLE encoded dense masks (`dict` containing keys `counts` and `size`). +The masks are typically of size `256x256`, they define segmentation within the +bounding box. + +`segmentation`: polygon-based masks stored as a 2D list +`[[x1 y1 x2 y2...],[x1 y1 ...],...]` of polygon vertex coordinates in a given +image. + +## DensePose COCO + +
+ +
+

+ Figure 1. Annotation examples from the DensePose COCO dataset. +

+ +DensePose COCO dataset contains about 50K annotated persons on images from the +[COCO dataset](https://cocodataset.org/#home) +The images are available for download from the +[COCO Dataset download page](https://cocodataset.org/#download): +[train2014](http://images.cocodataset.org/zips/train2014.zip), +[val2014](http://images.cocodataset.org/zips/val2014.zip). +The details on available annotations and their download links are given below. + +### Chart-based Annotations + +Chart-based DensePose COCO annotations are available for the instances of category +`person` and correspond to the model shown in Figure 2. +They include `dp_x`, `dp_y`, `dp_I`, `dp_U` and `dp_V` fields for annotated points +(~100 points per annotated instance) and `dp_masks` field, which encodes +coarse segmentation into 14 parts in the following order: +`Torso`, `Right Hand`, `Left Hand`, `Left Foot`, `Right Foot`, +`Upper Leg Right`, `Upper Leg Left`, `Lower Leg Right`, `Lower Leg Left`, +`Upper Arm Left`, `Upper Arm Right`, `Lower Arm Left`, `Lower Arm Right`, +`Head`. + +
+ +
+

+ Figure 2. Human body charts (fine segmentation) + and the associated 14 body parts depicted with rounded rectangles + (coarse segmentation). +

+ +The dataset splits used in the training schedules are +`train2014`, `valminusminival2014` and `minival2014`. +`train2014` and `valminusminival2014` are used for training, +and `minival2014` is used for validation. +The table with annotation download links, which summarizes the number of annotated +instances and images for each of the dataset splits is given below: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Name# inst# imagesfile sizedownload
densepose_train20143921026437526Mdensepose_train2014.json
densepose_valminusminival201472975984105Mdensepose_valminusminival2014.json
densepose_minival20142243150831Mdensepose_minival2014.json
+ +### Continuous Surface Embeddings Annotations + +DensePose COCO continuous surface embeddings annotations are available for the instances +of category `person`. The annotations correspond to the 3D model shown in Figure 2, +and include `dp_x`, `dp_y` and `dp_vertex` and `ref_model` fields. +All chart-based annotations were also kept for convenience. + +As with chart-based annotations, the dataset splits used in the training schedules are +`train2014`, `valminusminival2014` and `minival2014`. +`train2014` and `valminusminival2014` are used for training, +and `minival2014` is used for validation. +The table with annotation download links, which summarizes the number of annotated +instances and images for each of the dataset splits is given below: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Name# inst# imagesfile sizedownload
densepose_train2014_cse3921026437554Mdensepose_train2014_cse.json
densepose_valminusminival2014_cse72975984110Mdensepose_valminusminival2014_cse.json
densepose_minival2014_cse2243150832Mdensepose_minival2014_cse.json
+ +## DensePose PoseTrack + +
+ +
+

+ Figure 3. Annotation examples from the PoseTrack dataset. +

+ +DensePose PoseTrack dataset contains annotated image sequences. +To download the images for this dataset, please follow the instructions +from the [PoseTrack Download Page](https://posetrack.net/users/download.php). + +### Chart-based Annotations + +Chart-based DensePose PoseTrack annotations are available for the instances with category +`person` and correspond to the model shown in Figure 2. +They include `dp_x`, `dp_y`, `dp_I`, `dp_U` and `dp_V` fields for annotated points +(~100 points per annotated instance) and `dp_masks` field, which encodes +coarse segmentation into the same 14 parts as in DensePose COCO. + +The dataset splits used in the training schedules are +`posetrack_train2017` (train set) and `posetrack_val2017` (validation set). +The table with annotation download links, which summarizes the number of annotated +instances, instance tracks and images for the dataset splits is given below: + + + + + + + + + + + + + + + + + + + + + + + + + + +
Name# inst# images# tracksfile sizedownload
densepose_posetrack_train20178274168036118Mdensepose_posetrack_train2017.json
densepose_posetrack_val201747537824659Mdensepose_posetrack_val2017.json
+ +## DensePose Chimps + +
+ +
+

+ Figure 4. Example images from the DensePose Chimps dataset. +

+ +DensePose Chimps dataset contains annotated images of chimpanzees. +To download the images for this dataset, please use the URL specified in +`image_url` field in the annotations. + +### Chart-based Annotations + +Chart-based DensePose Chimps annotations correspond to the human model shown in Figure 2, +the instances are thus annotated to belong to the `person` category. +They include `dp_x`, `dp_y`, `dp_I`, `dp_U` and `dp_V` fields for annotated points +(~3 points per annotated instance) and `dp_masks` field, which encodes +foreground mask in RLE format. + +Chart-base DensePose Chimps annotations are used for validation only. +The table with annotation download link, which summarizes the number of annotated +instances and images is given below: + + + + + + + + + + + + + + + + + +
Name# inst# imagesfile sizedownload
densepose_chimps9306546Mdensepose_chimps_full_v2.json
+ +### Continuous Surface Embeddings Annotations + +Continuous surface embeddings annotations for DensePose Chimps +include `dp_x`, `dp_y` and `dp_vertex` point-based annotations +(~3 points per annotated instance), `dp_masks` field with the same +contents as for chart-based annotations and `ref_model` field +which refers to a chimpanzee 3D model `chimp_5029`. + +The dataset is split into training and validation subsets. +The table with annotation download links, which summarizes the number of annotated +instances and images for each of the dataset splits is given below: + +The table below outlines the dataset splits: + + + + + + + + + + + + + + + + + + + + + + + +
Name# inst# imagesfile sizedownload
densepose_chimps_cse_train5003503Mdensepose_chimps_cse_train.json
densepose_chimps_cse_val4303043Mdensepose_chimps_cse_val.json
+ +## DensePose LVIS + +
+ +
+

+ Figure 5. Example images from the DensePose LVIS dataset. +

+ +DensePose LVIS dataset contains segmentation and DensePose annotations for animals +on images from the [LVIS dataset](https://www.lvisdataset.org/dataset). +The images are available for download through the links: +[train2017](http://images.cocodataset.org/zips/train2017.zip), +[val2017](http://images.cocodataset.org/zips/val2017.zip). + +### Continuous Surface Embeddings Annotations + +Continuous surface embeddings (CSE) annotations for DensePose LVIS +include `dp_x`, `dp_y` and `dp_vertex` point-based annotations +(~3 points per annotated instance) and a `ref_model` field +which refers to a 3D model that corresponds to the instance. +Instances from 9 animal categories were annotated with CSE DensePose data: +bear, cow, cat, dog, elephant, giraffe, horse, sheep and zebra. + +Foreground masks are available from instance segmentation annotations +(`segmentation` field) in polygon format, they are stored as a 2D list +`[[x1 y1 x2 y2...],[x1 y1 ...],...]`. + +We used two datasets, each constising of one training (`train`) +and validation (`val`) subsets: the first one (`ds1`) +was used in [Neverova et al, 2020](https://arxiv.org/abs/2011.12438). +The second one (`ds2`), was used in [Neverova et al, 2021](). + +The summary of the available datasets is given below: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
All DataSelected Animals
(9 categories)
File
Name# cat# img# segm# img# segm# dpsizedownload
ds1_train55641412398541419472518446Mdensepose_lvis_v1_ds1_train_v1.json
ds1_val2515713281571153710365Mdensepose_lvis_v1_ds1_val_v1.json
ds2_train12039938812701411374646964189321051Mdensepose_lvis_v1_ds2_train_v1.json
ds2_val92690915526909155360424Mdensepose_lvis_v1_ds2_val_v1.json
+ +Legend: + +`#cat` - number of categories in the dataset for which annotations are available; + +`#img` - number of images with annotations in the dataset; + +`#segm` - number of segmentation annotations; + +`#dp` - number of DensePose annotations. + + +Important Notes: + +1. The reference models used for `ds1_train` and `ds1_val` are +`bear_4936`, `cow_5002`, `cat_5001`, `dog_5002`, `elephant_5002`, `giraffe_5002`, +`horse_5004`, `sheep_5004` and `zebra_5002`. The reference models used for +`ds2_train` and `ds2_val` are `bear_4936`, `cow_5002`, `cat_7466`, +`dog_7466`, `elephant_5002`, `giraffe_5002`, `horse_5004`, `sheep_5004` and `zebra_5002`. +So reference models for categories `cat` aind `dog` are different for `ds1` and `ds2`. + +2. Some annotations from `ds1_train` are reused in `ds2_train` (4538 DensePose annotations +and 21275 segmentation annotations). The ones for cat and dog categories were remapped +from `cat_5001` and `dog_5002` reference models used in `ds1` to `cat_7466` and `dog_7466` +used in `ds2`. + +3. All annotations from `ds1_val` are included into `ds2_val` after the remapping +procedure mentioned in note 2. + +4. Some annotations from `ds1_train` are part of `ds2_val` (646 DensePose annotations and +1225 segmentation annotations). Thus one should not train on `ds1_train` if evaluating on `ds2_val`. diff --git a/data_processing/detectron2/projects/DensePose/doc/DENSEPOSE_IUV.md b/data_processing/detectron2/projects/DensePose/doc/DENSEPOSE_IUV.md new file mode 100644 index 0000000..de158e0 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/doc/DENSEPOSE_IUV.md @@ -0,0 +1,627 @@ +# Chart-based Dense Pose Estimation for Humans and Animals + +## Overview + +The goal of chart-based DensePose methods is to establish dense correspondences +between image pixels and 3D object mesh by splitting the latter into charts and estimating +for each pixel the corresponding chart index `I` and local chart coordinates `(U, V)`. + +
+ +
+ +The charts used for human DensePose estimation are shown in Figure 1. +The human body is split into 24 parts, each part is parametrized by `U` and `V` +coordinates, each taking values in `[0, 1]`. + +
+ +
+

Figure 1. Partitioning and parametrization of human body surface.

+ +The pipeline uses [Faster R-CNN](https://arxiv.org/abs/1506.01497) +with [Feature Pyramid Network](https://arxiv.org/abs/1612.03144) meta architecture +outlined in Figure 2. For each detected object, the model predicts +its coarse segmentation `S` (2 or 15 channels: foreground / background or +background + 14 predefined body parts), fine segmentation `I` (25 channels: +background + 24 predefined body parts) and local chart coordinates `U` and `V`. + +
+ +
+

Figure 2. DensePose chart-based architecture based on Faster R-CNN with Feature Pyramid Network (FPN).

+ +### Bootstrapping Chart-Based Models + +[Sanakoyeu et al., 2020](https://arxiv.org/pdf/2003.00080.pdf) introduced a pipeline +to transfer DensePose models trained on humans to proximal animal classes (chimpanzees), +which is summarized in Figure 3. The training proceeds in two stages: + +First, a *master* model is trained on data from source domain (humans with full +DensePose annotation `S`, `I`, `U` and `V`) +and supporting domain (animals with segmentation annotation only). +Only selected animal classes are chosen from the supporting +domain through *category filters* to guarantee the quality of target domain results. +The training is done in *class-agnostic manner*: all selected categories are mapped +to a single category (human). + +Second, a *student* model is trained on data from source and supporting domains, +as well as data from target domain obtained by applying the master model, selecting +high-confidence detections and sampling the results. + +
+ +
+

Figure 3. Domain adaptation: master model is trained on data from source and +supporting domains to produce predictions in target domain; student model combines data from source and +supporting domains, as well as sampled predictions from the master model on target domain to improve +target domain predictions quality.

+ +Examples of pretrained master and student models are available in the [Model Zoo](#ModelZooBootstrap). +For more details on the bootstrapping pipeline, please see [Bootstrapping Pipeline](BOOTSTRAPPING_PIPELINE.md). + +### Datasets + +For more details on datasets used for chart-based model training and validation, +please refer to the [DensePose Datasets](DENSEPOSE_DATASETS.md) page. + +## Model Zoo and Baselines + +### Legacy Models + +Baselines trained using schedules from [Güler et al, 2018](https://arxiv.org/pdf/1802.00434.pdf) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
segm
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_s1x_legacys1x0.3070.0513.258.158.252.154.9164832157model | metrics
R_101_FPN_s1x_legacys1x0.3900.0634.359.559.353.256.0164832182model | metrics
+ +### Improved Baselines, Original Fully Convolutional Head + +These models use an improved training schedule and Panoptic FPN head from [Kirillov et al, 2019](https://arxiv.org/abs/1901.02446). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
segm
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_s1xs1x0.3590.0664.561.267.263.765.3165712039model | metrics
R_101_FPN_s1xs1x0.4280.0795.862.367.864.566.2165712084model | metrics
+ +### Improved Baselines, DeepLabV3 Head + +These models use an improved training schedule, Panoptic FPN head from [Kirillov et al, 2019](https://arxiv.org/abs/1901.02446) and DeepLabV3 head from [Chen et al, 2017](https://arxiv.org/abs/1706.05587). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
segm
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_DL_s1xs1x0.3920.0706.761.168.365.666.7165712097model | metrics
R_101_FPN_DL_s1xs1x0.4780.0837.062.368.766.367.6165712116model | metrics
+ +###
Baselines with Confidence Estimation + +These models perform additional estimation of confidence in regressed UV coodrinates, along the lines of [Neverova et al., 2019](https://papers.nips.cc/paper/8378-correlated-uncertainty-for-learning-dense-correspondences-from-noisy-labels). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
segm
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_WC1_s1xs1x0.3530.0644.660.567.064.265.4173862049model | metrics
R_50_FPN_WC2_s1xs1x0.3640.0664.860.766.964.265.7173861455model | metrics
R_50_FPN_DL_WC1_s1xs1x0.3970.0686.761.168.165.867.0173067973model | metrics
R_50_FPN_DL_WC2_s1xs1x0.4100.0706.860.867.965.666.7173859335model | metrics
R_101_FPN_WC1_s1xs1x0.4350.0765.762.567.664.966.3171402969model | metrics
R_101_FPN_WC2_s1xs1x0.4500.0785.762.367.664.866.4173860702model | metrics
R_101_FPN_DL_WC1_s1xs1x0.4790.0817.962.068.466.267.2173858525model | metrics
R_101_FPN_DL_WC2_s1xs1x0.4910.0827.661.768.365.967.2173294801model | metrics
+ +Acronyms: + +`WC1`: with confidence estimation model type 1 for `U` and `V` + +`WC2`: with confidence estimation model type 2 for `U` and `V` + +###
Baselines with Mask Confidence Estimation + +Models that perform estimation of confidence in regressed UV coodrinates +as well as confidences associated with coarse and fine segmentation, +see [Sanakoyeu et al., 2020](https://arxiv.org/pdf/2003.00080.pdf) for details. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
segm
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_WC1M_s1xs1x0.3810.0664.860.666.764.065.4217144516model | metrics
R_50_FPN_WC2M_s1xs1x0.3420.0685.060.766.964.265.5216245640model | metrics
R_50_FPN_DL_WC1M_s1xs1x0.3710.0686.060.768.065.266.7216245703model | metrics
R_50_FPN_DL_WC2M_s1xs1x0.3850.0716.160.868.165.066.4216245758model | metrics
R_101_FPN_WC1M_s1xs1x0.4230.0795.962.067.364.866.0216453687model | metrics
R_101_FPN_WC2M_s1xs1x0.4360.0805.962.567.464.566.0216245682model | metrics
R_101_FPN_DL_WC1M_s1xs1x0.4530.0796.862.068.166.467.1216245771model | metrics
R_101_FPN_DL_WC2M_s1xs1x0.4640.0806.961.968.266.167.1216245790model | metrics
+ +Acronyms: + +`WC1M`: with confidence estimation model type 1 for `U` and `V` and mask confidence estimation + +`WC2M`: with confidence estimation model type 2 for `U` and `V` and mask confidence estimation + +###
Bootstrapping Baselines + +Master and student models trained using the bootstrapping pipeline with chimpanzee as the target category, +see [Sanakoyeu et al., 2020](https://arxiv.org/pdf/2003.00080.pdf) +and [Bootstrapping Pipeline](BOOTSTRAPPING_PIPELINE.md) for details. +Evaluation is performed on [DensePose Chimps](DENSEPOSE_DATASETS.md#densepose-chimps) dataset. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
segm
AP
dp. APex
GPS
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_DL_WC1M_3x_Atop10P_CA3x0.5220.0739.761.359.136.220.030.2217578784model | metrics
R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uniform3x1.9390.07210.160.958.537.221.531.0256453729model | metrics
R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uv3x1.9850.0729.661.458.938.322.232.1256452095model | metrics
R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_finesegm3x2.0470.07210.360.958.536.720.730.7256452819model | metrics
R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_coarsesegm3x1.8300.0709.661.359.237.921.531.6256455697model | metrics
+ +Acronyms: + +`WC1M`: with confidence estimation model type 1 for `U` and `V` and mask confidence estimation + +`Atop10P`: humans and animals from the 10 best suitable categories are used for training + +`CA`: class agnostic training, where all annotated instances are mapped into a single category + +`B_<...>`: schedule with bootstrapping with the specified results sampling strategy + +Note: + +The relaxed `dp. APex GPS` metric was used in +[Sanakoyeu et al., 2020](https://arxiv.org/pdf/2003.00080.pdf) to evaluate DensePose +results. This metric considers matches at thresholds 0.2, 0.3 and 0.4 additionally +to the standard ones used in the evaluation protocol. The minimum threshold is +controlled by `DENSEPOSE_EVALUATION.MIN_IOU_THRESHOLD` config option. + +### License + +All models available for download are licensed under the +[Creative Commons Attribution-ShareAlike 3.0 license](https://creativecommons.org/licenses/by-sa/3.0/) + +## References + +If you use chart-based DensePose methods, please take the references from the following +BibTeX entries: + +DensePose bootstrapping pipeline: +``` +@InProceedings{Sanakoyeu2020TransferringDensePose, + title = {Transferring Dense Pose to Proximal Animal Classes}, + author = {Artsiom Sanakoyeu and Vasil Khalidov and Maureen S. McCarthy and Andrea Vedaldi and Natalia Neverova}, + journal = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year = {2020}, +} +``` + +DensePose with confidence estimation: +``` +@InProceedings{Neverova2019DensePoseConfidences, + title = {Correlated Uncertainty for Learning Dense Correspondences from Noisy Labels}, + author = {Neverova, Natalia and Novotny, David and Vedaldi, Andrea}, + journal = {Advances in Neural Information Processing Systems}, + year = {2019}, +} +``` + +Original DensePose: +``` +@InProceedings{Guler2018DensePose, + title={DensePose: Dense Human Pose Estimation In The Wild}, + author={R\{i}za Alp G\"uler, Natalia Neverova, Iasonas Kokkinos}, + journal={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2018} +} +``` diff --git a/data_processing/detectron2/projects/DensePose/doc/GETTING_STARTED.md b/data_processing/detectron2/projects/DensePose/doc/GETTING_STARTED.md new file mode 100644 index 0000000..a5c86f3 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/doc/GETTING_STARTED.md @@ -0,0 +1,76 @@ +# Getting Started with DensePose + +## Inference with Pre-trained Models + +1. Pick a model and its config file from [Model Zoo(IUV)](DENSEPOSE_IUV.md#ModelZoo), [Model Zoo(CSE)](DENSEPOSE_CSE.md#ModelZoo), for example [densepose_rcnn_R_50_FPN_s1x.yaml](../configs/densepose_rcnn_R_50_FPN_s1x.yaml) +2. Run the [Apply Net](TOOL_APPLY_NET.md) tool to visualize the results or save the to disk. For example, to use contour visualization for DensePose, one can run: +```bash +python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml densepose_rcnn_R_50_FPN_s1x.pkl image.jpg dp_contour,bbox --output image_densepose_contour.png +``` +Please see [Apply Net](TOOL_APPLY_NET.md) for more details on the tool. + +## Training + +First, prepare the [dataset](http://densepose.org/#dataset) into the following structure under the directory you'll run training scripts: +
+datasets/coco/
+  annotations/
+    densepose_{train,minival,valminusminival}2014.json
+    densepose_minival2014_100.json   (optional, for testing only)
+  {train,val}2014/
+    # image files that are mentioned in the corresponding json
+
+ +To train a model one can use the [train_net.py](../train_net.py) script. +This script was used to train all DensePose models in [Model Zoo(IUV)](DENSEPOSE_IUV.md#ModelZoo), [Model Zoo(CSE)](DENSEPOSE_CSE.md#ModelZoo). +For example, to launch end-to-end DensePose-RCNN training with ResNet-50 FPN backbone +on 8 GPUs following the s1x schedule, one can run +```bash +python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml --num-gpus 8 +``` +The configs are made for 8-GPU training. To train on 1 GPU, one can apply the +[linear learning rate scaling rule](https://arxiv.org/abs/1706.02677): +```bash +python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml \ + SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025 +``` + +## Evaluation + +Model testing can be done in the same way as training, except for an additional flag `--eval-only` and +model location specification through `MODEL.WEIGHTS model.pth` in the command line +```bash +python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml \ + --eval-only MODEL.WEIGHTS model.pth +``` + +## Tools + +We provide tools which allow one to: + - easily view DensePose annotated data in a dataset; + - perform DensePose inference on a set of images; + - visualize DensePose model results; + +`query_db` is a tool to print or visualize DensePose data in a dataset. +Please refer to [Query DB](TOOL_QUERY_DB.md) for more details on this tool + +`apply_net` is a tool to print or visualize DensePose results. +Please refer to [Apply Net](TOOL_APPLY_NET.md) for more details on this tool + + +## Installation as a package + +DensePose can also be installed as a Python package for integration with other software. + +The following dependencies are needed: +- Python >= 3.7 +- [PyTorch](https://pytorch.org/get-started/locally/#start-locally) >= 1.7 (to match [detectron2 requirements](https://detectron2.readthedocs.io/en/latest/tutorials/install.html#requirements)) +- [torchvision](https://pytorch.org/vision/stable/) version [compatible with your version of PyTorch](https://github.com/pytorch/vision#installation) + +DensePose can then be installed from this repository with: + +``` +pip install git+https://github.com/facebookresearch/detectron2@main#subdirectory=projects/DensePose +``` + +After installation, the package will be importable as `densepose`. diff --git a/data_processing/detectron2/projects/DensePose/doc/RELEASE_2020_04.md b/data_processing/detectron2/projects/DensePose/doc/RELEASE_2020_04.md new file mode 100644 index 0000000..2fab6ae --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/doc/RELEASE_2020_04.md @@ -0,0 +1,6 @@ +# DensePose Confidence Estimation and Model Zoo Improvements + +* [DensePose models with confidence estimation](doc/DENSEPOSE_IUV.md#ModelZooConfidence) +* [Panoptic FPN and DeepLabV3 head implementation](doc/DENSEPOSE_IUV.md#ModelZooDeepLabV3) +* Test time augmentations for DensePose +* New evaluation metric (GPSm) that yields more reliable scores diff --git a/data_processing/detectron2/projects/DensePose/doc/RELEASE_2021_03.md b/data_processing/detectron2/projects/DensePose/doc/RELEASE_2021_03.md new file mode 100644 index 0000000..eb908a6 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/doc/RELEASE_2021_03.md @@ -0,0 +1,45 @@ +# DensePose CSE and DensePose Evolution + +* [DensePose Evolution pipeline](DENSEPOSE_IUV.md#ModelZooBootstrap), a framework to bootstrap + DensePose on unlabeled data + * [`InferenceBasedLoader`](../densepose/data/inference_based_loader.py) + with data samplers to use inference results from one model + to train another model (bootstrap); + * [`VideoKeyframeDataset`](../densepose/data/video/video_keyframe_dataset.py) + to efficiently load images from video keyframes; + * Category maps and filters to combine annotations from different categories + and train in a class-agnostic manner; + * [Pretrained models](DENSEPOSE_IUV.md#ModelZooBootstrap) for DensePose estimation on chimpanzees; + * DensePose head training from partial data (segmentation only); + * [DensePose models with mask confidence estimation](DENSEPOSE_IUV.md#ModelZooMaskConfidence); + * [DensePose Chimps]() dataset for IUV evaluation +* [DensePose Continuous Surface Embeddings](DENSEPOSE_CSE.md), a framework to extend DensePose + to various categories using 3D models + * [Hard embedding](../densepose/modeling/losses/embed.py) and + [soft embedding](../densepose/modeling/losses/soft_embed.py) + losses to train universal positional embeddings; + * [Embedder](../(densepose/modeling/cse/embedder.py) to handle + mesh vertex embeddings; + * [Storage](../densepose/evaluation/tensor_storage.py) for evaluation with high volumes of data; + * [Pretrained models](DENSEPOSE_CSE.md#ModelZoo) for DensePose CSE estimation on humans and animals; + * [DensePose Chimps](DENSEPOSE_DATASETS.md#densepose-chimps) and + [DensePose LVIS](DENSEPOSE_DATASETS.md#densepose-lvis) datasets for CSE finetuning and evaluation; + * [Vertex and texture mapping visualizers](../densepose/vis/densepose_outputs_vertex.py); +* Refactoring of all major components: losses, predictors, model outputs, model results, visualizers; + * Dedicated structures for [chart outputs](../densepose/structures/chart.py), + [chart outputs with confidences](../densepose/structures/chart_confidence.py), + [chart results](../densepose/structures/chart_result.py), + [CSE outputs](../densepose/structures/cse.py); + * Dedicated predictors for + [chart-based estimation](../densepose/modeling/predictors/chart.py), + [confidence estimation](../densepose/modeling/predictors/chart_confidence.py) + and [CSE estimation](../densepose/modeling/predictors/cse.py); + * Generic handling of various [conversions](../densepose/converters) (e.g. from outputs to results); + * Better organization of various [losses](../densepose/modeling/losses); + * Segregation of loss data accumulators for + [IUV setting](../densepose/modeling/losses/utils.py) + and [CSE setting](../densepose/modeling/losses/embed_utils.py); + * Splitting visualizers into separate modules; +* [HRNet](../densepose/modeling/hrnet.py) and [HRFPN](../densepose/modeling/hrfpn.py) backbones; +* [PoseTrack](DENSEPOSE_DATASETS.md#densepose-posetrack) dataset; +* [IUV texture visualizer](../densepose/vis/densepose_results_textures.py) diff --git a/data_processing/detectron2/projects/DensePose/doc/RELEASE_2021_06.md b/data_processing/detectron2/projects/DensePose/doc/RELEASE_2021_06.md new file mode 100644 index 0000000..fb5ff4f --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/doc/RELEASE_2021_06.md @@ -0,0 +1,12 @@ +# DensePose CSE with Cycle Losses + +This release follows the paper [Neverova et al, 2021]() and +adds CSE datasets with more annotations, better CSE animal models +to the model zoo, losses to ensure cycle consistency for models and mesh +alignment evaluator. In particular: + +* [Pixel to shape](../densepose/modeling/losses/cycle_pix2shape.py) and [shape to shape](../densepose/modeling/losses/cycle_shape2shape.py) cycle consistency losses; +* Mesh alignment [evaluator](../densepose/evaluation/mesh_alignment_evaluator.py); +* Existing CSE datasets renamed to [ds1_train](https://dl.fbaipublicfiles.com/densepose/annotations/lvis/densepose_lvis_v1_ds1_train_v1.json) and [ds1_val](https://dl.fbaipublicfiles.com/densepose/annotations/lvis/densepose_lvis_v1_ds1_val_v1.json); +* New CSE datasets [ds2_train](https://dl.fbaipublicfiles.com/densepose/annotations/lvis/densepose_lvis_v1_ds2_train_v1.json) and [ds2_val](https://dl.fbaipublicfiles.com/densepose/annotations/lvis/densepose_lvis_v1_ds2_val_v1.json) added; +* Better CSE animal models trained with the 16k schedule added to the [model zoo](DENSEPOSE_CSE.md#animal-cse-models). diff --git a/data_processing/detectron2/projects/DensePose/doc/TOOL_APPLY_NET.md b/data_processing/detectron2/projects/DensePose/doc/TOOL_APPLY_NET.md new file mode 100644 index 0000000..ca8e1dd --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/doc/TOOL_APPLY_NET.md @@ -0,0 +1,203 @@ +# Apply Net + +`apply_net` is a tool to print or visualize DensePose results on a set of images. +It has two modes: `dump` to save DensePose model results to a pickle file +and `show` to visualize them on images. + +The `image.jpg` file that is used as an example in this doc can be found [here](http://images.cocodataset.org/train2017/000000117508.jpg) + +## Dump Mode + +The general command form is: +```bash +python apply_net.py dump [-h] [-v] [--output ] +``` + +There are three mandatory arguments: + - ``, configuration file for a given model; + - ``, model file with trained parameters + - ``, input image file name, pattern or folder + +One can additionally provide `--output` argument to define the output file name, +which defaults to `output.pkl`. + + +Examples: + +1. Dump results of the [R_50_FPN_s1x](https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl) DensePose model for images in a folder `images` to file `dump.pkl`: +```bash +python apply_net.py dump configs/densepose_rcnn_R_50_FPN_s1x.yaml \ +https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl \ +images --output dump.pkl -v +``` + +2. Dump results of the [R_50_FPN_s1x](https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl) DensePose model for images with file name matching a pattern `image*.jpg` to file `results.pkl`: +```bash +python apply_net.py dump configs/densepose_rcnn_R_50_FPN_s1x.yaml \ +https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl \ +"image*.jpg" --output results.pkl -v +``` + +If you want to load the pickle file generated by the above command: +``` +# make sure DensePose is in your PYTHONPATH, or use the following line to add it: +sys.path.append("/your_detectron2_path/detectron2_repo/projects/DensePose/") + +f = open('/your_result_path/results.pkl', 'rb') +data = pickle.load(f) +``` + +The file `results.pkl` contains the list of results per image, for each image the result is a dictionary. + +**If you use a [IUV model](DENSEPOSE_IUV.md#-model-zoo-and-baselines)**, the dumped data will have the following format: + +``` +data: [{'file_name': '/your_path/image1.jpg', + 'scores': tensor([0.9884]), + 'pred_boxes_XYXY': tensor([[ 69.6114, 0.0000, 706.9797, 706.0000]]), + 'pred_densepose': [DensePoseChartResultWithConfidences(labels=tensor(...), uv=tensor(...), sigma_1=None, + sigma_2=None, kappa_u=None, kappa_v=None, fine_segm_confidence=None, coarse_segm_confidence=None), + DensePoseChartResultWithConfidences, ...] + } + {'file_name': '/your_path/image2.jpg', + 'scores': tensor([0.9999, 0.5373, 0.3991]), + 'pred_boxes_XYXY': tensor([[ 59.5734, 7.7535, 579.9311, 932.3619], + [612.9418, 686.1254, 612.9999, 704.6053], + [164.5081, 407.4034, 598.3944, 920.4266]]), + 'pred_densepose': [DensePoseChartResultWithConfidences(labels=tensor(...), uv=tensor(...), sigma_1=None, + sigma_2=None, kappa_u=None, kappa_v=None, fine_segm_confidence=None, coarse_segm_confidence=None), + DensePoseChartResultWithConfidences, ...] + }] +``` + +`DensePoseChartResultWithConfidences` contains the following fields: +- `labels` - a tensor of size `[H, W]` of type `torch.long` which contains fine segmentation labels (previously called `I`) +- `uv` - a tensor of size `[2, H, W]` of type `torch.float` which contains `U` and `V` coordinates +- various optional confidence-related fields (`sigma_1`, `sigma_2`, `kappa_u`, `kappa_v`, `fine_segm_confidence`, `coarse_segm_confidence`) + + +**If you use a [CSE model](DENSEPOSE_CSE.md#-model-zoo-and-baselines)**, the dumped data will have the following format: +``` +data: [{'file_name': '/your_path/image1.jpg', + 'scores': tensor([0.9984, 0.9961]), + 'pred_boxes_XYXY': tensor([[480.0093, 461.0796, 698.3614, 696.1011], + [78.1589, 168.6614, 307.1287, 653.8522]]), + 'pred_densepose': DensePoseEmbeddingPredictorOutput(embedding=tensor(...), coarse_segm=tensor(...))} + {'file_name': '/your_path/image2.jpg', + 'scores': tensor([0.9189, 0.9491]), + 'pred_boxes_XYXY': tensor([[734.9685, 534.2003, 287.3923, 254.8859], + [434.2853, 765.1219, 132.1029, 867.9283]]), + 'pred_densepose': DensePoseEmbeddingPredictorOutput(embedding=tensor(...), coarse_segm=tensor(...))}] +``` + +`DensePoseEmbeddingPredictorOutput` contains the following fields: +- `embedding` - a tensor of size `[N, D, sz, sz]` of type `torch.float`, which contains embeddings of size `D` of the `N` detections in the image +- `coarse_segm` - a tensor of size `[N, 2, sz, sz]` of type `torch.float` which contains segmentation scores of the `N` detections in the image; e.g. a mask can be obtained by `coarse_segm.argmax(dim=1)` + +`sz` is a fixed size for the tensors; you can resize them to the size of the bounding box, if needed + +We can use the following code, to parse the outputs of the first +detected instance on the first image (IUV model). +``` +img_id, instance_id = 0, 0 # Look at the first image and the first detected instance +bbox_xyxy = data[img_id]['pred_boxes_XYXY'][instance_id] +result = data[img_id]['pred_densepose'][instance_id] +uv = result.uv +``` +The array `bbox_xyxy` contains (x0, y0, x1, y1) of the bounding box. + + +## Visualization Mode + +The general command form is: +```bash +python apply_net.py show [-h] [-v] [--min_score ] [--nms_thresh ] [--output ] +``` + +There are four mandatory arguments: + - ``, configuration file for a given model; + - ``, model file with trained parameters + - ``, input image file name, pattern or folder + - ``, visualizations specifier; currently available visualizations are: + * `bbox` - bounding boxes of detected persons; + * `dp_segm` - segmentation masks for detected persons; + * `dp_u` - each body part is colored according to the estimated values of the + U coordinate in part parameterization; + * `dp_v` - each body part is colored according to the estimated values of the + V coordinate in part parameterization; + * `dp_contour` - plots contours with color-coded U and V coordinates; + * `dp_iuv_texture` - transfers the texture from a given texture image file to detected instances, in IUV mode; + * `dp_vertex` - plots the rainbow visualization of the closest vertices prediction for a given mesh, in CSE mode; + * `dp_cse_texture` - transfers the texture from a given list of texture image files (one from each human or animal mesh) to detected instances, in CSE mode + + +One can additionally provide the following optional arguments: + - `--min_score` to only show detections with sufficient scores that are not lower than provided value + - `--nms_thresh` to additionally apply non-maximum suppression to detections at a given threshold + - `--output` to define visualization file name template, which defaults to `output.png`. + To distinguish output file names for different images, the tool appends 1-based entry index, + e.g. output.0001.png, output.0002.png, etc... +- `--texture_atlas` to define the texture atlas image for IUV texture transfer +- `--texture_atlases_map` to define the texture atlas images map (a dictionary `{mesh name: texture atlas image}`) for CSE texture transfer + + +The following examples show how to output results of a DensePose model +with ResNet-50 FPN backbone using different visualizations for image `image.jpg`: + +1. Show bounding box and segmentation: +```bash +python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml \ +https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl \ +image.jpg bbox,dp_segm -v +``` +![Bounding Box + Segmentation Visualization](https://dl.fbaipublicfiles.com/densepose/web/apply_net/res_bbox_dp_segm.jpg) + +2. Show bounding box and estimated U coordinates for body parts: +```bash +python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml \ +https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl \ +image.jpg bbox,dp_u -v +``` +![Bounding Box + U Coordinate Visualization](https://dl.fbaipublicfiles.com/densepose/web/apply_net/res_bbox_dp_u.jpg) + +3. Show bounding box and estimated V coordinates for body parts: +```bash +python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml \ +https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl \ +image.jpg bbox,dp_v -v +``` +![Bounding Box + V Coordinate Visualization](https://dl.fbaipublicfiles.com/densepose/web/apply_net/res_bbox_dp_v.jpg) + +4. Show bounding box and estimated U and V coordinates via contour plots: +```bash +python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml \ +https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl \ +image.jpg dp_contour,bbox -v +``` +![Bounding Box + Contour Visualization](https://dl.fbaipublicfiles.com/densepose/web/apply_net/res_bbox_dp_contour.jpg) + +5. Show bounding box and texture transfer: +```bash +python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml \ +https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl \ +image.jpg dp_iuv_texture,bbox --texture_atlas texture_from_SURREAL.jpg -v +``` +![Bounding Box + IUV Texture Transfer Visualization](https://dl.fbaipublicfiles.com/densepose/web/apply_net/res_bbox_dp_iuv_texture.jpg) + +6. Show bounding box and CSE rainbow visualization: +```bash +python apply_net.py show configs/cse/densepose_rcnn_R_50_FPN_s1x.yaml \ +https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_s1x/251155172/model_final_c4ea5f.pkl \ +image.jpg dp_vertex,bbox -v +``` +![Bounding Box + CSE Rainbow Visualization](https://dl.fbaipublicfiles.com/densepose/web/apply_net/res_bbox_dp_vertex.jpg) + +7. Show bounding box and CSE texture transfer: +```bash +python apply_net.py show configs/cse/densepose_rcnn_R_50_FPN_s1x.yaml \ +https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_s1x/251155172/model_final_c4ea5f.pkl \ +image.jpg dp_cse_texture,bbox --texture_atlases_map '{"smpl_27554": "smpl_uvSnapshot_colors.jpg"}' -v +``` +![Bounding Box + CSE Texture Transfer Visualization](https://dl.fbaipublicfiles.com/densepose/web/apply_net/res_bbox_dp_cse_texture.jpg) + +The texture files can be found in the `doc/images` folder diff --git a/data_processing/detectron2/projects/DensePose/doc/TOOL_QUERY_DB.md b/data_processing/detectron2/projects/DensePose/doc/TOOL_QUERY_DB.md new file mode 100644 index 0000000..b0a764b --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/doc/TOOL_QUERY_DB.md @@ -0,0 +1,105 @@ + +# Query Dataset + +`query_db` is a tool to print or visualize DensePose data from a dataset. +It has two modes: `print` and `show` to output dataset entries to standard +output or to visualize them on images. + +## Print Mode + +The general command form is: +```bash +python query_db.py print [-h] [-v] [--max-entries N] +``` + +There are two mandatory arguments: + - ``, DensePose dataset specification, from which to select + the entries (e.g. `densepose_coco_2014_train`). + - ``, dataset entry selector which can be a single specification, + or a comma-separated list of specifications of the form + `field[:type]=value` for exact match with the value + or `field[:type]=min-max` for a range of values + +One can additionally limit the maximum number of entries to output +by providing `--max-entries` argument. + +Examples: + +1. Output at most 10 first entries from the `densepose_coco_2014_train` dataset: +```bash +python query_db.py print densepose_coco_2014_train \* --max-entries 10 -v +``` + +2. Output all entries with `file_name` equal to `COCO_train2014_000000000036.jpg`: +```bash +python query_db.py print densepose_coco_2014_train file_name=COCO_train2014_000000000036.jpg -v +``` + +3. Output all entries with `image_id` between 36 and 156: +```bash +python query_db.py print densepose_coco_2014_train image_id:int=36-156 -v +``` + +## Visualization Mode + +The general command form is: +```bash +python query_db.py show [-h] [-v] [--max-entries N] [--output ] +``` + +There are three mandatory arguments: + - ``, DensePose dataset specification, from which to select + the entries (e.g. `densepose_coco_2014_train`). + - ``, dataset entry selector which can be a single specification, + or a comma-separated list of specifications of the form + `field[:type]=value` for exact match with the value + or `field[:type]=min-max` for a range of values + - ``, visualizations specifier; currently available visualizations are: + * `bbox` - bounding boxes of annotated persons; + * `dp_i` - annotated points colored according to the containing part; + * `dp_pts` - annotated points in green color; + * `dp_segm` - segmentation masks for annotated persons; + * `dp_u` - annotated points colored according to their U coordinate in part parameterization; + * `dp_v` - annotated points colored according to their V coordinate in part parameterization; + +One can additionally provide one of the two optional arguments: + - `--max_entries` to limit the maximum number of entries to visualize + - `--output` to provide visualization file name template, which defaults + to `output.png`. To distinguish file names for different dataset + entries, the tool appends 1-based entry index to the output file name, + e.g. output.0001.png, output.0002.png, etc. + +The following examples show how to output different visualizations for image with `id = 322` +from `densepose_coco_2014_train` dataset: + +1. Show bounding box and segmentation: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_segm -v +``` +![Bounding Box + Segmentation Visualization](images/vis_bbox_dp_segm.jpg) + +2. Show bounding box and points colored according to the containing part: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_i -v +``` +![Bounding Box + Point Label Visualization](images/vis_bbox_dp_i.jpg) + +3. Show bounding box and annotated points in green color: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_segm -v +``` +![Bounding Box + Point Visualization](images/vis_bbox_dp_pts.jpg) + +4. Show bounding box and annotated points colored according to their U coordinate in part parameterization: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_u -v +``` +![Bounding Box + Point U Visualization](images/vis_bbox_dp_u.jpg) + +5. Show bounding box and annotated points colored according to their V coordinate in part parameterization: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_v -v +``` +![Bounding Box + Point V Visualization](images/vis_bbox_dp_v.jpg) + + diff --git a/data_processing/detectron2/projects/DensePose/query_db.py b/data_processing/detectron2/projects/DensePose/query_db.py new file mode 100644 index 0000000..814a25f --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/query_db.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. + +import argparse +import logging +import os +import sys +from timeit import default_timer as timer +from typing import Any, ClassVar, Dict, List +import torch + +from detectron2.data.catalog import DatasetCatalog +from detectron2.utils.file_io import PathManager +from detectron2.utils.logger import setup_logger + +from densepose.structures import DensePoseDataRelative +from densepose.utils.dbhelper import EntrySelector +from densepose.utils.logger import verbosity_to_level +from densepose.vis.base import CompoundVisualizer +from densepose.vis.bounding_box import BoundingBoxVisualizer +from densepose.vis.densepose_data_points import ( + DensePoseDataCoarseSegmentationVisualizer, + DensePoseDataPointsIVisualizer, + DensePoseDataPointsUVisualizer, + DensePoseDataPointsVisualizer, + DensePoseDataPointsVVisualizer, +) + +DOC = """Query DB - a tool to print / visualize data from a database +""" + +LOGGER_NAME = "query_db" + +logger = logging.getLogger(LOGGER_NAME) + +_ACTION_REGISTRY: Dict[str, "Action"] = {} + + +class Action(object): + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + parser.add_argument( + "-v", + "--verbosity", + action="count", + help="Verbose mode. Multiple -v options increase the verbosity.", + ) + + +def register_action(cls: type): + """ + Decorator for action classes to automate action registration + """ + global _ACTION_REGISTRY + _ACTION_REGISTRY[cls.COMMAND] = cls + return cls + + +class EntrywiseAction(Action): + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(EntrywiseAction, cls).add_arguments(parser) + parser.add_argument( + "dataset", metavar="", help="Dataset name (e.g. densepose_coco_2014_train)" + ) + parser.add_argument( + "selector", + metavar="", + help="Dataset entry selector in the form field1[:type]=value1[," + "field2[:type]=value_min-value_max...] which selects all " + "entries from the dataset that satisfy the constraints", + ) + parser.add_argument( + "--max-entries", metavar="N", help="Maximum number of entries to process", type=int + ) + + @classmethod + def execute(cls: type, args: argparse.Namespace): + dataset = setup_dataset(args.dataset) + entry_selector = EntrySelector.from_string(args.selector) + context = cls.create_context(args) + if args.max_entries is not None: + for _, entry in zip(range(args.max_entries), dataset): + if entry_selector(entry): + cls.execute_on_entry(entry, context) + else: + for entry in dataset: + if entry_selector(entry): + cls.execute_on_entry(entry, context) + + @classmethod + def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]: + context = {} + return context + + +@register_action +class PrintAction(EntrywiseAction): + """ + Print action that outputs selected entries to stdout + """ + + COMMAND: ClassVar[str] = "print" + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Output selected entries to stdout. ") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(PrintAction, cls).add_arguments(parser) + + @classmethod + def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]): + import pprint + + printer = pprint.PrettyPrinter(indent=2, width=200, compact=True) + printer.pprint(entry) + + +@register_action +class ShowAction(EntrywiseAction): + """ + Show action that visualizes selected entries on an image + """ + + COMMAND: ClassVar[str] = "show" + VISUALIZERS: ClassVar[Dict[str, object]] = { + "dp_segm": DensePoseDataCoarseSegmentationVisualizer(), + "dp_i": DensePoseDataPointsIVisualizer(), + "dp_u": DensePoseDataPointsUVisualizer(), + "dp_v": DensePoseDataPointsVVisualizer(), + "dp_pts": DensePoseDataPointsVisualizer(), + "bbox": BoundingBoxVisualizer(), + } + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(ShowAction, cls).add_arguments(parser) + parser.add_argument( + "visualizations", + metavar="", + help="Comma separated list of visualizations, possible values: " + "[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))), + ) + parser.add_argument( + "--output", + metavar="", + default="output.png", + help="File name to save output to", + ) + + @classmethod + def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]): + import cv2 + import numpy as np + + image_fpath = PathManager.get_local_path(entry["file_name"]) + image = cv2.imread(image_fpath, cv2.IMREAD_GRAYSCALE) + image = np.tile(image[:, :, np.newaxis], [1, 1, 3]) + datas = cls._extract_data_for_visualizers_from_entry(context["vis_specs"], entry) + visualizer = context["visualizer"] + image_vis = visualizer.visualize(image, datas) + entry_idx = context["entry_idx"] + 1 + out_fname = cls._get_out_fname(entry_idx, context["out_fname"]) + cv2.imwrite(out_fname, image_vis) + logger.info(f"Output saved to {out_fname}") + context["entry_idx"] += 1 + + @classmethod + def _get_out_fname(cls: type, entry_idx: int, fname_base: str): + base, ext = os.path.splitext(fname_base) + return base + ".{0:04d}".format(entry_idx) + ext + + @classmethod + def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]: + vis_specs = args.visualizations.split(",") + visualizers = [] + for vis_spec in vis_specs: + vis = cls.VISUALIZERS[vis_spec] + visualizers.append(vis) + context = { + "vis_specs": vis_specs, + "visualizer": CompoundVisualizer(visualizers), + "out_fname": args.output, + "entry_idx": 0, + } + return context + + @classmethod + def _extract_data_for_visualizers_from_entry( + cls: type, vis_specs: List[str], entry: Dict[str, Any] + ): + dp_list = [] + bbox_list = [] + for annotation in entry["annotations"]: + is_valid, _ = DensePoseDataRelative.validate_annotation(annotation) + if not is_valid: + continue + bbox = torch.as_tensor(annotation["bbox"]) + bbox_list.append(bbox) + dp_data = DensePoseDataRelative(annotation) + dp_list.append(dp_data) + datas = [] + for vis_spec in vis_specs: + datas.append(bbox_list if "bbox" == vis_spec else (bbox_list, dp_list)) + return datas + + +def setup_dataset(dataset_name): + logger.info("Loading dataset {}".format(dataset_name)) + start = timer() + dataset = DatasetCatalog.get(dataset_name) + stop = timer() + logger.info("Loaded dataset {} in {:.3f}s".format(dataset_name, stop - start)) + return dataset + + +def create_argument_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description=DOC, + formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120), + ) + parser.set_defaults(func=lambda _: parser.print_help(sys.stdout)) + subparsers = parser.add_subparsers(title="Actions") + for _, action in _ACTION_REGISTRY.items(): + action.add_parser(subparsers) + return parser + + +def main(): + parser = create_argument_parser() + args = parser.parse_args() + verbosity = getattr(args, "verbosity", None) + global logger + logger = setup_logger(name=LOGGER_NAME) + logger.setLevel(verbosity_to_level(verbosity)) + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/data_processing/detectron2/projects/DensePose/runmy.py b/data_processing/detectron2/projects/DensePose/runmy.py new file mode 100644 index 0000000..f1c88f3 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/runmy.py @@ -0,0 +1,18 @@ +import os +import argparse +# dataset_name = 'pexels' +# for i in range(50): +# path = f'G:/full-head-dataset/{dataset_name}/{i * 1000:08d}' +# +# cmd = f'python apply_net.py show configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml R_101_FPN_DL_soft_s1x.pkl {path}/aligned_images dp_vertex --output {path}/seg --min_score 0.8' +# print(cmd) +# os.system(cmd) + + +dataset_name = 'unsplash' +for i in range(58,64): + path = f'G:/full-head-dataset/{dataset_name}/{i * 1000:08d}' + + cmd = f'python apply_net.py show configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml R_101_FPN_DL_soft_s1x.pkl {path}/aligned_images dp_vertex --output {path}/seg --min_score 0.8' + print(cmd) + os.system(cmd) diff --git a/data_processing/detectron2/projects/DensePose/setup.py b/data_processing/detectron2/projects/DensePose/setup.py new file mode 100644 index 0000000..22ad239 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/setup.py @@ -0,0 +1,42 @@ +import re +from pathlib import Path +from setuptools import find_packages, setup + +try: + import torch # noqa: F401 +except ImportError as e: + raise Exception( + """ +You must install PyTorch prior to installing DensePose: +pip install torch + +For more information: + https://pytorch.org/get-started/locally/ + """ + ) from e + + +def get_detectron2_current_version(): + """Version is not available for import through Python since it is + above the top level of the package. Instead, we parse it from the + file with a regex.""" + # Get version info from detectron2 __init__.py + version_source = (Path(__file__).parents[2] / "detectron2" / "__init__.py").read_text() + version_number = re.findall(r'__version__ = "([0-9\.]+)"', version_source)[0] + return version_number + + +setup( + name="detectron2-densepose", + author="FAIR", + version=get_detectron2_current_version(), + url="https://github.com/facebookresearch/detectron2/tree/main/projects/DensePose", + packages=find_packages(), + python_requires=">=3.7", + install_requires=[ + "av>=8.0.3", + "detectron2@git+https://github.com/facebookresearch/detectron2.git", + "opencv-python-headless>=4.5.3.56", + "scipy>=1.5.4", + ], +) diff --git a/data_processing/detectron2/projects/DensePose/tests/common.py b/data_processing/detectron2/projects/DensePose/tests/common.py new file mode 100644 index 0000000..ff22b9a --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/common.py @@ -0,0 +1,124 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import os +import torch + +from detectron2.config import get_cfg +from detectron2.engine import default_setup +from detectron2.modeling import build_model + +from densepose import add_densepose_config + +_BASE_CONFIG_DIR = "configs" +_EVOLUTION_CONFIG_SUB_DIR = "evolution" +_HRNET_CONFIG_SUB_DIR = "HRNet" +_QUICK_SCHEDULES_CONFIG_SUB_DIR = "quick_schedules" +_BASE_CONFIG_FILE_PREFIX = "Base-" +_CONFIG_FILE_EXT = ".yaml" + + +def _get_base_config_dir(): + """ + Return the base directory for configurations + """ + return os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", _BASE_CONFIG_DIR) + + +def _get_evolution_config_dir(): + """ + Return the base directory for evolution configurations + """ + return os.path.join(_get_base_config_dir(), _EVOLUTION_CONFIG_SUB_DIR) + + +def _get_hrnet_config_dir(): + """ + Return the base directory for HRNet configurations + """ + return os.path.join(_get_base_config_dir(), _HRNET_CONFIG_SUB_DIR) + + +def _get_quick_schedules_config_dir(): + """ + Return the base directory for quick schedules configurations + """ + return os.path.join(_get_base_config_dir(), _QUICK_SCHEDULES_CONFIG_SUB_DIR) + + +def _collect_config_files(config_dir): + """ + Collect all configuration files (i.e. densepose_*.yaml) directly in the specified directory + """ + start = _get_base_config_dir() + results = [] + for entry in os.listdir(config_dir): + path = os.path.join(config_dir, entry) + if not os.path.isfile(path): + continue + _, ext = os.path.splitext(entry) + if ext != _CONFIG_FILE_EXT: + continue + if entry.startswith(_BASE_CONFIG_FILE_PREFIX): + continue + config_file = os.path.relpath(path, start) + results.append(config_file) + return results + + +def get_config_files(): + """ + Get all the configuration files (relative to the base configuration directory) + """ + return _collect_config_files(_get_base_config_dir()) + + +def get_evolution_config_files(): + """ + Get all the evolution configuration files (relative to the base configuration directory) + """ + return _collect_config_files(_get_evolution_config_dir()) + + +def get_hrnet_config_files(): + """ + Get all the HRNet configuration files (relative to the base configuration directory) + """ + return _collect_config_files(_get_hrnet_config_dir()) + + +def get_quick_schedules_config_files(): + """ + Get all the quick schedules configuration files (relative to the base configuration directory) + """ + return _collect_config_files(_get_quick_schedules_config_dir()) + + +def get_model_config(config_file): + """ + Load and return the configuration from the specified file (relative to the base configuration + directory) + """ + cfg = get_cfg() + add_densepose_config(cfg) + path = os.path.join(_get_base_config_dir(), config_file) + cfg.merge_from_file(path) + if not torch.cuda.is_available(): + cfg.MODEL.DEVICE = "cpu" + return cfg + + +def get_model(config_file): + """ + Get the model from the specified file (relative to the base configuration directory) + """ + cfg = get_model_config(config_file) + return build_model(cfg) + + +def setup(config_file): + """ + Setup the configuration from the specified file (relative to the base configuration directory) + """ + cfg = get_model_config(config_file) + cfg.freeze() + default_setup(cfg, {}) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_chart_based_annotations_accumulator.py b/data_processing/detectron2/projects/DensePose/tests/test_chart_based_annotations_accumulator.py new file mode 100644 index 0000000..a1c4f85 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_chart_based_annotations_accumulator.py @@ -0,0 +1,76 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import unittest +import torch + +from detectron2.structures import Boxes, BoxMode, Instances + +from densepose.modeling.losses.utils import ChartBasedAnnotationsAccumulator +from densepose.structures import DensePoseDataRelative, DensePoseList + +image_shape = (100, 100) +instances = Instances(image_shape) +n_instances = 3 +instances.proposal_boxes = Boxes(torch.rand(n_instances, 4)) +instances.gt_boxes = Boxes(torch.rand(n_instances, 4)) + + +# instances.gt_densepose = None cannot happen because instances attributes need a length +class TestChartBasedAnnotationsAccumulator(unittest.TestCase): + def test_chart_based_annotations_accumulator_no_gt_densepose(self): + accumulator = ChartBasedAnnotationsAccumulator() + accumulator.accumulate(instances) + expected_values = {"nxt_bbox_with_dp_index": 0, "nxt_bbox_index": n_instances} + for key in accumulator.__dict__: + self.assertEqual(getattr(accumulator, key), expected_values.get(key, [])) + + def test_chart_based_annotations_accumulator_gt_densepose_none(self): + instances.gt_densepose = [None] * n_instances + accumulator = ChartBasedAnnotationsAccumulator() + accumulator.accumulate(instances) + expected_values = {"nxt_bbox_with_dp_index": 0, "nxt_bbox_index": n_instances} + for key in accumulator.__dict__: + self.assertEqual(getattr(accumulator, key), expected_values.get(key, [])) + + def test_chart_based_annotations_accumulator_gt_densepose(self): + data_relative_keys = [ + DensePoseDataRelative.X_KEY, + DensePoseDataRelative.Y_KEY, + DensePoseDataRelative.I_KEY, + DensePoseDataRelative.U_KEY, + DensePoseDataRelative.V_KEY, + DensePoseDataRelative.S_KEY, + ] + annotations = [DensePoseDataRelative({k: [0] for k in data_relative_keys})] * n_instances + instances.gt_densepose = DensePoseList(annotations, instances.gt_boxes, image_shape) + accumulator = ChartBasedAnnotationsAccumulator() + accumulator.accumulate(instances) + bbox_xywh_est = BoxMode.convert( + instances.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS + ) + bbox_xywh_gt = BoxMode.convert( + instances.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS + ) + expected_values = { + "s_gt": [ + torch.zeros((3, DensePoseDataRelative.MASK_SIZE, DensePoseDataRelative.MASK_SIZE)) + ] + * n_instances, + "bbox_xywh_est": bbox_xywh_est.split(1), + "bbox_xywh_gt": bbox_xywh_gt.split(1), + "point_bbox_with_dp_indices": [torch.tensor([i]) for i in range(n_instances)], + "point_bbox_indices": [torch.tensor([i]) for i in range(n_instances)], + "bbox_indices": list(range(n_instances)), + "nxt_bbox_with_dp_index": n_instances, + "nxt_bbox_index": n_instances, + } + default_value = [torch.tensor([0])] * 3 + for key in accumulator.__dict__: + to_test = getattr(accumulator, key) + gt_value = expected_values.get(key, default_value) + if key in ["nxt_bbox_with_dp_index", "nxt_bbox_index"]: + self.assertEqual(to_test, gt_value) + elif key == "bbox_indices": + self.assertListEqual(to_test, gt_value) + else: + self.assertTrue(torch.allclose(torch.stack(to_test), torch.stack(gt_value))) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_combine_data_loader.py b/data_processing/detectron2/projects/DensePose/tests/test_combine_data_loader.py new file mode 100644 index 0000000..832903a --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_combine_data_loader.py @@ -0,0 +1,46 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import random +import unittest +from typing import Any, Iterable, Iterator, Tuple + +from densepose.data import CombinedDataLoader + + +def _grouper(iterable: Iterable[Any], n: int, fillvalue=None) -> Iterator[Tuple[Any]]: + """ + Group elements of an iterable by chunks of size `n`, e.g. + grouper(range(9), 4) -> + (0, 1, 2, 3), (4, 5, 6, 7), (8, None, None, None) + """ + it = iter(iterable) + while True: + values = [] + for _ in range(n): + try: + value = next(it) + except StopIteration: + values.extend([fillvalue] * (n - len(values))) + yield tuple(values) + return + values.append(value) + yield tuple(values) + + +class TestCombinedDataLoader(unittest.TestCase): + def test_combine_loaders_1(self): + loader1 = _grouper([f"1_{i}" for i in range(10)], 2) + loader2 = _grouper([f"2_{i}" for i in range(11)], 3) + batch_size = 4 + ratios = (0.1, 0.9) + random.seed(43) + combined = CombinedDataLoader((loader1, loader2), batch_size, ratios) + BATCHES_GT = [ + ["1_0", "1_1", "2_0", "2_1"], + ["2_2", "2_3", "2_4", "2_5"], + ["1_2", "1_3", "2_6", "2_7"], + ["2_8", "2_9", "2_10", None], + ] + for i, batch in enumerate(combined): + self.assertEqual(len(batch), batch_size) + self.assertEqual(batch, BATCHES_GT[i]) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_cse_annotations_accumulator.py b/data_processing/detectron2/projects/DensePose/tests/test_cse_annotations_accumulator.py new file mode 100644 index 0000000..a22dce9 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_cse_annotations_accumulator.py @@ -0,0 +1,240 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import unittest +import torch + +from detectron2.structures import Boxes, BoxMode, Instances + +from densepose.modeling.losses.embed_utils import CseAnnotationsAccumulator +from densepose.structures import DensePoseDataRelative, DensePoseList + + +class TestCseAnnotationsAccumulator(unittest.TestCase): + def test_cse_annotations_accumulator_nodp(self): + instances_lst = [ + self._create_instances_nodp(), + ] + self._test_template(instances_lst) + + def test_cse_annotations_accumulator_sparsedp(self): + instances_lst = [ + self._create_instances_sparsedp(), + ] + self._test_template(instances_lst) + + def test_cse_annotations_accumulator_fulldp(self): + instances_lst = [ + self._create_instances_fulldp(), + ] + self._test_template(instances_lst) + + def test_cse_annotations_accumulator_combined(self): + instances_lst = [ + self._create_instances_nodp(), + self._create_instances_sparsedp(), + self._create_instances_fulldp(), + ] + self._test_template(instances_lst) + + def _test_template(self, instances_lst): + acc = CseAnnotationsAccumulator() + for instances in instances_lst: + acc.accumulate(instances) + packed_anns = acc.pack() + self._check_correspondence(packed_anns, instances_lst) + + def _create_instances_nodp(self): + image_shape = (480, 640) + instances = Instances(image_shape) + instances.gt_boxes = Boxes( + torch.as_tensor( + [ + [40.0, 40.0, 140.0, 140.0], + [160.0, 160.0, 270.0, 270.0], + [40.0, 160.0, 160.0, 280.0], + ] + ) + ) + instances.proposal_boxes = Boxes( + torch.as_tensor( + [ + [41.0, 39.0, 142.0, 138.0], + [161.0, 159.0, 272.0, 268.0], + [41.0, 159.0, 162.0, 278.0], + ] + ) + ) + # do not add gt_densepose + return instances + + def _create_instances_sparsedp(self): + image_shape = (540, 720) + instances = Instances(image_shape) + instances.gt_boxes = Boxes( + torch.as_tensor( + [ + [50.0, 50.0, 130.0, 130.0], + [150.0, 150.0, 240.0, 240.0], + [50.0, 150.0, 230.0, 330.0], + ] + ) + ) + instances.proposal_boxes = Boxes( + torch.as_tensor( + [ + [49.0, 51.0, 131.0, 129.0], + [151.0, 149.0, 241.0, 239.0], + [51.0, 149.0, 232.0, 329.0], + ] + ) + ) + instances.gt_densepose = DensePoseList( + [ + None, + self._create_dp_data( + { + "dp_x": [81.69, 153.47, 151.00], + "dp_y": [162.24, 128.71, 113.81], + "dp_vertex": [0, 1, 2], + "ref_model": "zebra_5002", + "dp_masks": [], + }, + {"c": (166, 133), "r": 64}, + ), + None, + ], + instances.gt_boxes, + image_shape, + ) + return instances + + def _create_instances_fulldp(self): + image_shape = (680, 840) + instances = Instances(image_shape) + instances.gt_boxes = Boxes( + torch.as_tensor( + [ + [65.0, 55.0, 165.0, 155.0], + [170.0, 175.0, 275.0, 280.0], + [55.0, 165.0, 165.0, 275.0], + ] + ) + ) + instances.proposal_boxes = Boxes( + torch.as_tensor( + [ + [66.0, 54.0, 166.0, 154.0], + [171.0, 174.0, 276.0, 279.0], + [56.0, 164.0, 166.0, 274.0], + ] + ) + ) + instances.gt_densepose = DensePoseList( + [ + self._create_dp_data( + { + "dp_x": [149.99, 198.62, 157.59], + "dp_y": [170.74, 197.73, 123.12], + "dp_vertex": [3, 4, 5], + "ref_model": "cat_5001", + "dp_masks": [], + }, + {"c": (100, 100), "r": 50}, + ), + self._create_dp_data( + { + "dp_x": [234.53, 116.72, 71.66], + "dp_y": [107.53, 11.31, 142.32], + "dp_vertex": [6, 7, 8], + "ref_model": "dog_5002", + "dp_masks": [], + }, + {"c": (200, 150), "r": 40}, + ), + self._create_dp_data( + { + "dp_x": [225.54, 202.61, 135.90], + "dp_y": [167.46, 181.00, 211.47], + "dp_vertex": [9, 10, 11], + "ref_model": "elephant_5002", + "dp_masks": [], + }, + {"c": (100, 200), "r": 45}, + ), + ], + instances.gt_boxes, + image_shape, + ) + return instances + + def _create_dp_data(self, anns, blob_def=None): + dp_data = DensePoseDataRelative(anns) + if blob_def is not None: + dp_data.segm[ + blob_def["c"][0] - blob_def["r"] : blob_def["c"][0] + blob_def["r"], + blob_def["c"][1] - blob_def["r"] : blob_def["c"][1] + blob_def["r"], + ] = 1 + return dp_data + + def _check_correspondence(self, packed_anns, instances_lst): + instance_idx = 0 + data_idx = 0 + pt_offset = 0 + if packed_anns is not None: + bbox_xyxy_gt = BoxMode.convert( + packed_anns.bbox_xywh_gt.clone(), BoxMode.XYWH_ABS, BoxMode.XYXY_ABS + ) + bbox_xyxy_est = BoxMode.convert( + packed_anns.bbox_xywh_est.clone(), BoxMode.XYWH_ABS, BoxMode.XYXY_ABS + ) + for instances in instances_lst: + if not hasattr(instances, "gt_densepose"): + instance_idx += len(instances) + continue + for i, dp_data in enumerate(instances.gt_densepose): + if dp_data is None: + instance_idx += 1 + continue + n_pts = len(dp_data.x) + self.assertTrue( + torch.allclose(dp_data.x, packed_anns.x_gt[pt_offset : pt_offset + n_pts]) + ) + self.assertTrue( + torch.allclose(dp_data.y, packed_anns.y_gt[pt_offset : pt_offset + n_pts]) + ) + self.assertTrue(torch.allclose(dp_data.segm, packed_anns.coarse_segm_gt[data_idx])) + self.assertTrue( + torch.allclose( + torch.ones(n_pts, dtype=torch.long) * dp_data.mesh_id, + packed_anns.vertex_mesh_ids_gt[pt_offset : pt_offset + n_pts], + ) + ) + self.assertTrue( + torch.allclose( + dp_data.vertex_ids, packed_anns.vertex_ids_gt[pt_offset : pt_offset + n_pts] + ) + ) + self.assertTrue( + torch.allclose(instances.gt_boxes.tensor[i], bbox_xyxy_gt[data_idx]) + ) + self.assertTrue( + torch.allclose(instances.proposal_boxes.tensor[i], bbox_xyxy_est[data_idx]) + ) + self.assertTrue( + torch.allclose( + torch.ones(n_pts, dtype=torch.long) * data_idx, + packed_anns.point_bbox_with_dp_indices[pt_offset : pt_offset + n_pts], + ) + ) + self.assertTrue( + torch.allclose( + torch.ones(n_pts, dtype=torch.long) * instance_idx, + packed_anns.point_bbox_indices[pt_offset : pt_offset + n_pts], + ) + ) + self.assertEqual(instance_idx, packed_anns.bbox_indices[data_idx]) + pt_offset += n_pts + instance_idx += 1 + data_idx += 1 + if data_idx == 0: + self.assertIsNone(packed_anns) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_dataset_loaded_annotations.py b/data_processing/detectron2/projects/DensePose/tests/test_dataset_loaded_annotations.py new file mode 100644 index 0000000..cf8035b --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_dataset_loaded_annotations.py @@ -0,0 +1,87 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import unittest + +from densepose.data.datasets.builtin import COCO_DATASETS, DENSEPOSE_ANNOTATIONS_DIR, LVIS_DATASETS +from densepose.data.datasets.coco import load_coco_json +from densepose.data.datasets.lvis import load_lvis_json +from densepose.data.utils import maybe_prepend_base_path +from densepose.structures import DensePoseDataRelative + + +class TestDatasetLoadedAnnotations(unittest.TestCase): + COCO_DATASET_DATA = { + "densepose_coco_2014_train": {"n_instances": 39210}, + "densepose_coco_2014_minival": {"n_instances": 2243}, + "densepose_coco_2014_minival_100": {"n_instances": 164}, + "densepose_coco_2014_valminusminival": {"n_instances": 7297}, + "densepose_coco_2014_train_cse": {"n_instances": 39210}, + "densepose_coco_2014_minival_cse": {"n_instances": 2243}, + "densepose_coco_2014_minival_100_cse": {"n_instances": 164}, + "densepose_coco_2014_valminusminival_cse": {"n_instances": 7297}, + "densepose_chimps": {"n_instances": 930}, + "posetrack2017_train": {"n_instances": 8274}, + "posetrack2017_val": {"n_instances": 4753}, + "lvis_v05_train": {"n_instances": 5186}, + "lvis_v05_val": {"n_instances": 1037}, + } + + LVIS_DATASET_DATA = { + "densepose_lvis_v1_train1": {"n_instances": 3394}, + "densepose_lvis_v1_train2": {"n_instances": 1800}, + "densepose_lvis_v1_val": {"n_instances": 1037}, + "densepose_lvis_v1_val_animals_100": {"n_instances": 89}, + } + + def generic_coco_test(self, dataset_info): + if dataset_info.name not in self.COCO_DATASET_DATA: + return + n_inst = self.COCO_DATASET_DATA[dataset_info.name]["n_instances"] + self.generic_test(dataset_info, n_inst, load_coco_json) + + def generic_lvis_test(self, dataset_info): + if dataset_info.name not in self.LVIS_DATASET_DATA: + return + n_inst = self.LVIS_DATASET_DATA[dataset_info.name]["n_instances"] + self.generic_test(dataset_info, n_inst, load_lvis_json) + + def generic_test(self, dataset_info, n_inst, loader_fun): + datasets_root = DENSEPOSE_ANNOTATIONS_DIR + annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_info.annotations_fpath) + images_root = maybe_prepend_base_path(datasets_root, dataset_info.images_root) + image_annotation_dicts = loader_fun( + annotations_json_file=annotations_fpath, + image_root=images_root, + dataset_name=dataset_info.name, + ) + num_valid = sum( + 1 + for image_annotation_dict in image_annotation_dicts + for ann in image_annotation_dict["annotations"] + if DensePoseDataRelative.validate_annotation(ann)[0] + ) + self.assertEqual(num_valid, n_inst) + + +def coco_test_fun(dataset_info): + return lambda self: self.generic_coco_test(dataset_info) + + +for dataset_info in COCO_DATASETS: + setattr( + TestDatasetLoadedAnnotations, + f"test_coco_builtin_loaded_annotations_{dataset_info.name}", + coco_test_fun(dataset_info), + ) + + +def lvis_test_fun(dataset_info): + return lambda self: self.generic_lvis_test(dataset_info) + + +for dataset_info in LVIS_DATASETS: + setattr( + TestDatasetLoadedAnnotations, + f"test_lvis_builtin_loaded_annotations_{dataset_info.name}", + lvis_test_fun(dataset_info), + ) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_frame_selector.py b/data_processing/detectron2/projects/DensePose/tests/test_frame_selector.py new file mode 100644 index 0000000..65f05f5 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_frame_selector.py @@ -0,0 +1,60 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import random +import unittest + +from densepose.data.video import FirstKFramesSelector, LastKFramesSelector, RandomKFramesSelector + + +class TestFrameSelector(unittest.TestCase): + def test_frame_selector_random_k_1(self): + _SEED = 43 + _K = 4 + random.seed(_SEED) + selector = RandomKFramesSelector(_K) + frame_tss = list(range(0, 20, 2)) + _SELECTED_GT = [0, 8, 4, 6] + selected = selector(frame_tss) + self.assertEqual(_SELECTED_GT, selected) + + def test_frame_selector_random_k_2(self): + _SEED = 43 + _K = 10 + random.seed(_SEED) + selector = RandomKFramesSelector(_K) + frame_tss = list(range(0, 6, 2)) + _SELECTED_GT = [0, 2, 4] + selected = selector(frame_tss) + self.assertEqual(_SELECTED_GT, selected) + + def test_frame_selector_first_k_1(self): + _K = 4 + selector = FirstKFramesSelector(_K) + frame_tss = list(range(0, 20, 2)) + _SELECTED_GT = frame_tss[:_K] + selected = selector(frame_tss) + self.assertEqual(_SELECTED_GT, selected) + + def test_frame_selector_first_k_2(self): + _K = 10 + selector = FirstKFramesSelector(_K) + frame_tss = list(range(0, 6, 2)) + _SELECTED_GT = frame_tss[:_K] + selected = selector(frame_tss) + self.assertEqual(_SELECTED_GT, selected) + + def test_frame_selector_last_k_1(self): + _K = 4 + selector = LastKFramesSelector(_K) + frame_tss = list(range(0, 20, 2)) + _SELECTED_GT = frame_tss[-_K:] + selected = selector(frame_tss) + self.assertEqual(_SELECTED_GT, selected) + + def test_frame_selector_last_k_2(self): + _K = 10 + selector = LastKFramesSelector(_K) + frame_tss = list(range(0, 6, 2)) + _SELECTED_GT = frame_tss[-_K:] + selected = selector(frame_tss) + self.assertEqual(_SELECTED_GT, selected) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_image_list_dataset.py b/data_processing/detectron2/projects/DensePose/tests/test_image_list_dataset.py new file mode 100644 index 0000000..7932602 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_image_list_dataset.py @@ -0,0 +1,48 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import contextlib +import os +import tempfile +import unittest +import torch +from torchvision.utils import save_image + +from densepose.data.image_list_dataset import ImageListDataset +from densepose.data.transform import ImageResizeTransform + + +@contextlib.contextmanager +def temp_image(height, width): + random_image = torch.rand(height, width) + with tempfile.NamedTemporaryFile(suffix=".jpg") as f: + f.close() + save_image(random_image, f.name) + yield f.name + os.unlink(f.name) + + +class TestImageListDataset(unittest.TestCase): + def test_image_list_dataset(self): + height, width = 720, 1280 + with temp_image(height, width) as image_fpath: + image_list = [image_fpath] + category_list = [None] + dataset = ImageListDataset(image_list, category_list) + self.assertEqual(len(dataset), 1) + data1, categories1 = dataset[0]["images"], dataset[0]["categories"] + self.assertEqual(data1.shape, torch.Size((1, 3, height, width))) + self.assertEqual(data1.dtype, torch.float32) + self.assertIsNone(categories1[0]) + + def test_image_list_dataset_with_transform(self): + height, width = 720, 1280 + with temp_image(height, width) as image_fpath: + image_list = [image_fpath] + category_list = [None] + transform = ImageResizeTransform() + dataset = ImageListDataset(image_list, category_list, transform) + self.assertEqual(len(dataset), 1) + data1, categories1 = dataset[0]["images"], dataset[0]["categories"] + self.assertEqual(data1.shape, torch.Size((1, 3, 749, 1333))) + self.assertEqual(data1.dtype, torch.float32) + self.assertIsNone(categories1[0]) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_image_resize_transform.py b/data_processing/detectron2/projects/DensePose/tests/test_image_resize_transform.py new file mode 100644 index 0000000..01c3373 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_image_resize_transform.py @@ -0,0 +1,16 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import unittest +import torch + +from densepose.data.transform import ImageResizeTransform + + +class TestImageResizeTransform(unittest.TestCase): + def test_image_resize_1(self): + images_batch = torch.ones((3, 3, 100, 100), dtype=torch.uint8) * 100 + transform = ImageResizeTransform() + images_transformed = transform(images_batch) + IMAGES_GT = torch.ones((3, 3, 800, 800), dtype=torch.float) * 100 + self.assertEqual(images_transformed.size(), IMAGES_GT.size()) + self.assertAlmostEqual(torch.abs(IMAGES_GT - images_transformed).max().item(), 0.0) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_model_e2e.py b/data_processing/detectron2/projects/DensePose/tests/test_model_e2e.py new file mode 100644 index 0000000..055fadf --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_model_e2e.py @@ -0,0 +1,43 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import unittest +import torch + +from detectron2.structures import BitMasks, Boxes, Instances + +from .common import get_model + + +# TODO(plabatut): Modularize detectron2 tests and re-use +def make_model_inputs(image, instances=None): + if instances is None: + return {"image": image} + + return {"image": image, "instances": instances} + + +def make_empty_instances(h, w): + instances = Instances((h, w)) + instances.gt_boxes = Boxes(torch.rand(0, 4)) + instances.gt_classes = torch.tensor([]).to(dtype=torch.int64) + instances.gt_masks = BitMasks(torch.rand(0, h, w)) + return instances + + +class ModelE2ETest(unittest.TestCase): + CONFIG_PATH = "" + + def setUp(self): + self.model = get_model(self.CONFIG_PATH) + + def _test_eval(self, sizes): + inputs = [make_model_inputs(torch.rand(3, size[0], size[1])) for size in sizes] + self.model.eval() + self.model(inputs) + + +class DensePoseRCNNE2ETest(ModelE2ETest): + CONFIG_PATH = "densepose_rcnn_R_101_FPN_s1x.yaml" + + def test_empty_data(self): + self._test_eval([(200, 250), (200, 249)]) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_setup.py b/data_processing/detectron2/projects/DensePose/tests/test_setup.py new file mode 100644 index 0000000..165a1b9 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_setup.py @@ -0,0 +1,36 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import unittest + +from .common import ( + get_config_files, + get_evolution_config_files, + get_hrnet_config_files, + get_quick_schedules_config_files, + setup, +) + + +class TestSetup(unittest.TestCase): + def _test_setup(self, config_file): + setup(config_file) + + def test_setup_configs(self): + config_files = get_config_files() + for config_file in config_files: + self._test_setup(config_file) + + def test_setup_evolution_configs(self): + config_files = get_evolution_config_files() + for config_file in config_files: + self._test_setup(config_file) + + def test_setup_hrnet_configs(self): + config_files = get_hrnet_config_files() + for config_file in config_files: + self._test_setup(config_file) + + def test_setup_quick_schedules_configs(self): + config_files = get_quick_schedules_config_files() + for config_file in config_files: + self._test_setup(config_file) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_structures.py b/data_processing/detectron2/projects/DensePose/tests/test_structures.py new file mode 100644 index 0000000..54082d3 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_structures.py @@ -0,0 +1,25 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import unittest + +from densepose.structures import normalized_coords_transform + + +class TestStructures(unittest.TestCase): + def test_normalized_coords_transform(self): + bbox = (32, 24, 288, 216) + x0, y0, w, h = bbox + xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h + f = normalized_coords_transform(*bbox) + # Top-left + expected_p, actual_p = (-1, -1), f((xmin, ymin)) + self.assertEqual(expected_p, actual_p) + # Top-right + expected_p, actual_p = (1, -1), f((xmax, ymin)) + self.assertEqual(expected_p, actual_p) + # Bottom-left + expected_p, actual_p = (-1, 1), f((xmin, ymax)) + self.assertEqual(expected_p, actual_p) + # Bottom-right + expected_p, actual_p = (1, 1), f((xmax, ymax)) + self.assertEqual(expected_p, actual_p) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_tensor_storage.py b/data_processing/detectron2/projects/DensePose/tests/test_tensor_storage.py new file mode 100644 index 0000000..aeeeffa --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_tensor_storage.py @@ -0,0 +1,256 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import io +import tempfile +import unittest +from contextlib import ExitStack +import torch +import torch.distributed as dist +import torch.multiprocessing as mp + +from detectron2.utils import comm + +from densepose.evaluation.tensor_storage import ( + SingleProcessFileTensorStorage, + SingleProcessRamTensorStorage, + SizeData, + storage_gather, +) + + +class TestSingleProcessRamTensorStorage(unittest.TestCase): + def test_read_write_1(self): + schema = { + "tf": SizeData(dtype="float32", shape=(112, 112)), + "ti": SizeData(dtype="int32", shape=(4, 64, 64)), + } + # generate data which corresponds to the schema + data_elts = [] + torch.manual_seed(23) + for _i in range(3): + data_elt = { + "tf": torch.rand((112, 112), dtype=torch.float32), + "ti": (torch.rand(4, 64, 64) * 1000).to(dtype=torch.int32), + } + data_elts.append(data_elt) + storage = SingleProcessRamTensorStorage(schema, io.BytesIO()) + # write data to the storage + for i in range(3): + record_id = storage.put(data_elts[i]) + self.assertEqual(record_id, i) + # read data from the storage + for i in range(3): + record = storage.get(i) + self.assertEqual(len(record), len(schema)) + for field_name in schema: + self.assertTrue(field_name in record) + self.assertEqual(data_elts[i][field_name].shape, record[field_name].shape) + self.assertEqual(data_elts[i][field_name].dtype, record[field_name].dtype) + self.assertTrue(torch.allclose(data_elts[i][field_name], record[field_name])) + + +class TestSingleProcessFileTensorStorage(unittest.TestCase): + def test_read_write_1(self): + schema = { + "tf": SizeData(dtype="float32", shape=(112, 112)), + "ti": SizeData(dtype="int32", shape=(4, 64, 64)), + } + # generate data which corresponds to the schema + data_elts = [] + torch.manual_seed(23) + for _i in range(3): + data_elt = { + "tf": torch.rand((112, 112), dtype=torch.float32), + "ti": (torch.rand(4, 64, 64) * 1000).to(dtype=torch.int32), + } + data_elts.append(data_elt) + # WARNING: opens the file several times! may not work on all platforms + with tempfile.NamedTemporaryFile() as hFile: + storage = SingleProcessFileTensorStorage(schema, hFile.name, "wb") + # write data to the storage + for i in range(3): + record_id = storage.put(data_elts[i]) + self.assertEqual(record_id, i) + hFile.seek(0) + storage = SingleProcessFileTensorStorage(schema, hFile.name, "rb") + # read data from the storage + for i in range(3): + record = storage.get(i) + self.assertEqual(len(record), len(schema)) + for field_name in schema: + self.assertTrue(field_name in record) + self.assertEqual(data_elts[i][field_name].shape, record[field_name].shape) + self.assertEqual(data_elts[i][field_name].dtype, record[field_name].dtype) + self.assertTrue(torch.allclose(data_elts[i][field_name], record[field_name])) + + +def _find_free_port(): + """ + Copied from detectron2/engine/launch.py + """ + import socket + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + # Binding to port 0 will cause the OS to find an available port for us + sock.bind(("", 0)) + port = sock.getsockname()[1] + sock.close() + # NOTE: there is still a chance the port could be taken by other processes. + return port + + +def launch(main_func, nprocs, args=()): + port = _find_free_port() + dist_url = f"tcp://127.0.0.1:{port}" + # dist_url = "env://" + mp.spawn( + distributed_worker, nprocs=nprocs, args=(main_func, nprocs, dist_url, args), daemon=False + ) + + +def distributed_worker(local_rank, main_func, nprocs, dist_url, args): + dist.init_process_group( + backend="gloo", init_method=dist_url, world_size=nprocs, rank=local_rank + ) + comm.synchronize() + assert comm._LOCAL_PROCESS_GROUP is None + pg = dist.new_group(list(range(nprocs))) + comm._LOCAL_PROCESS_GROUP = pg + main_func(*args) + + +def ram_read_write_worker(): + schema = { + "tf": SizeData(dtype="float32", shape=(112, 112)), + "ti": SizeData(dtype="int32", shape=(4, 64, 64)), + } + storage = SingleProcessRamTensorStorage(schema, io.BytesIO()) + world_size = comm.get_world_size() + rank = comm.get_rank() + data_elts = [] + # prepare different number of tensors in different processes + for i in range(rank + 1): + data_elt = { + "tf": torch.ones((112, 112), dtype=torch.float32) * (rank + i * world_size), + "ti": torch.ones((4, 64, 64), dtype=torch.int32) * (rank + i * world_size), + } + data_elts.append(data_elt) + # write data to the single process storage + for i in range(rank + 1): + record_id = storage.put(data_elts[i]) + assert record_id == i, f"Process {rank}: record ID {record_id}, expected {i}" + comm.synchronize() + # gather all data in process rank 0 + multi_storage = storage_gather(storage) + if rank != 0: + return + # read and check data from the multiprocess storage + for j in range(world_size): + for i in range(j): + record = multi_storage.get(j, i) + record_gt = { + "tf": torch.ones((112, 112), dtype=torch.float32) * (j + i * world_size), + "ti": torch.ones((4, 64, 64), dtype=torch.int32) * (j + i * world_size), + } + assert len(record) == len(schema), ( + f"Process {rank}: multi storage record, rank {j}, id {i}: " + f"expected {len(schema)} fields in the record, got {len(record)}" + ) + for field_name in schema: + assert field_name in record, ( + f"Process {rank}: multi storage record, rank {j}, id {i}: " + f"field {field_name} not in the record" + ) + + assert record_gt[field_name].shape == record[field_name].shape, ( + f"Process {rank}: multi storage record, rank {j}, id {i}: " + f"field {field_name}, expected shape {record_gt[field_name].shape} " + f"got {record[field_name].shape}" + ) + assert record_gt[field_name].dtype == record[field_name].dtype, ( + f"Process {rank}: multi storage record, rank {j}, id {i}: " + f"field {field_name}, expected dtype {record_gt[field_name].dtype} " + f"got {record[field_name].dtype}" + ) + assert torch.allclose(record_gt[field_name], record[field_name]), ( + f"Process {rank}: multi storage record, rank {j}, id {i}: " + f"field {field_name}, tensors are not close enough:" + f"L-inf {(record_gt[field_name]-record[field_name]).abs_().max()} " + f"L1 {(record_gt[field_name]-record[field_name]).abs_().sum()} " + ) + + +def file_read_write_worker(rank_to_fpath): + schema = { + "tf": SizeData(dtype="float32", shape=(112, 112)), + "ti": SizeData(dtype="int32", shape=(4, 64, 64)), + } + world_size = comm.get_world_size() + rank = comm.get_rank() + storage = SingleProcessFileTensorStorage(schema, rank_to_fpath[rank], "wb") + data_elts = [] + # prepare different number of tensors in different processes + for i in range(rank + 1): + data_elt = { + "tf": torch.ones((112, 112), dtype=torch.float32) * (rank + i * world_size), + "ti": torch.ones((4, 64, 64), dtype=torch.int32) * (rank + i * world_size), + } + data_elts.append(data_elt) + # write data to the single process storage + for i in range(rank + 1): + record_id = storage.put(data_elts[i]) + assert record_id == i, f"Process {rank}: record ID {record_id}, expected {i}" + comm.synchronize() + # gather all data in process rank 0 + multi_storage = storage_gather(storage) + if rank != 0: + return + # read and check data from the multiprocess storage + for j in range(world_size): + for i in range(j): + record = multi_storage.get(j, i) + record_gt = { + "tf": torch.ones((112, 112), dtype=torch.float32) * (j + i * world_size), + "ti": torch.ones((4, 64, 64), dtype=torch.int32) * (j + i * world_size), + } + assert len(record) == len(schema), ( + f"Process {rank}: multi storage record, rank {j}, id {i}: " + f"expected {len(schema)} fields in the record, got {len(record)}" + ) + for field_name in schema: + assert field_name in record, ( + f"Process {rank}: multi storage record, rank {j}, id {i}: " + f"field {field_name} not in the record" + ) + + assert record_gt[field_name].shape == record[field_name].shape, ( + f"Process {rank}: multi storage record, rank {j}, id {i}: " + f"field {field_name}, expected shape {record_gt[field_name].shape} " + f"got {record[field_name].shape}" + ) + assert record_gt[field_name].dtype == record[field_name].dtype, ( + f"Process {rank}: multi storage record, rank {j}, id {i}: " + f"field {field_name}, expected dtype {record_gt[field_name].dtype} " + f"got {record[field_name].dtype}" + ) + assert torch.allclose(record_gt[field_name], record[field_name]), ( + f"Process {rank}: multi storage record, rank {j}, id {i}: " + f"field {field_name}, tensors are not close enough:" + f"L-inf {(record_gt[field_name]-record[field_name]).abs_().max()} " + f"L1 {(record_gt[field_name]-record[field_name]).abs_().sum()} " + ) + + +class TestMultiProcessRamTensorStorage(unittest.TestCase): + def test_read_write_1(self): + launch(ram_read_write_worker, 8) + + +class TestMultiProcessFileTensorStorage(unittest.TestCase): + def test_read_write_1(self): + with ExitStack() as stack: + # WARNING: opens the files several times! may not work on all platforms + rank_to_fpath = { + i: stack.enter_context(tempfile.NamedTemporaryFile()).name for i in range(8) + } + launch(file_read_write_worker, 8, (rank_to_fpath,)) diff --git a/data_processing/detectron2/projects/DensePose/tests/test_video_keyframe_dataset.py b/data_processing/detectron2/projects/DensePose/tests/test_video_keyframe_dataset.py new file mode 100644 index 0000000..988e161 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/tests/test_video_keyframe_dataset.py @@ -0,0 +1,98 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import contextlib +import os +import random +import tempfile +import unittest +import torch +import torchvision.io as io + +from densepose.data.transform import ImageResizeTransform +from densepose.data.video import RandomKFramesSelector, VideoKeyframeDataset + +try: + import av +except ImportError: + av = None + + +# copied from torchvision test/test_io.py +def _create_video_frames(num_frames, height, width): + y, x = torch.meshgrid(torch.linspace(-2, 2, height), torch.linspace(-2, 2, width)) + data = [] + for i in range(num_frames): + xc = float(i) / num_frames + yc = 1 - float(i) / (2 * num_frames) + d = torch.exp(-((x - xc) ** 2 + (y - yc) ** 2) / 2) * 255 + data.append(d.unsqueeze(2).repeat(1, 1, 3).byte()) + return torch.stack(data, 0) + + +# adapted from torchvision test/test_io.py +@contextlib.contextmanager +def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None, options=None): + if lossless: + if video_codec is not None: + raise ValueError("video_codec can't be specified together with lossless") + if options is not None: + raise ValueError("options can't be specified together with lossless") + video_codec = "libx264rgb" + options = {"crf": "0"} + if video_codec is None: + video_codec = "libx264" + if options is None: + options = {} + data = _create_video_frames(num_frames, height, width) + with tempfile.NamedTemporaryFile(suffix=".mp4") as f: + f.close() + io.write_video(f.name, data, fps=fps, video_codec=video_codec, options=options) + yield f.name, data + os.unlink(f.name) + + +@unittest.skipIf(av is None, "PyAV unavailable") +class TestVideoKeyframeDataset(unittest.TestCase): + def test_read_keyframes_all(self): + with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data): + video_list = [fname] + category_list = [None] + dataset = VideoKeyframeDataset(video_list, category_list) + self.assertEqual(len(dataset), 1) + data1, categories1 = dataset[0]["images"], dataset[0]["categories"] + self.assertEqual(data1.shape, torch.Size((5, 3, 300, 300))) + self.assertEqual(data1.dtype, torch.float32) + self.assertIsNone(categories1[0]) + return + self.assertTrue(False) + + def test_read_keyframes_with_selector(self): + with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data): + video_list = [fname] + category_list = [None] + random.seed(0) + frame_selector = RandomKFramesSelector(3) + dataset = VideoKeyframeDataset(video_list, category_list, frame_selector) + self.assertEqual(len(dataset), 1) + data1, categories1 = dataset[0]["images"], dataset[0]["categories"] + self.assertEqual(data1.shape, torch.Size((3, 3, 300, 300))) + self.assertEqual(data1.dtype, torch.float32) + self.assertIsNone(categories1[0]) + return + self.assertTrue(False) + + def test_read_keyframes_with_selector_with_transform(self): + with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data): + video_list = [fname] + category_list = [None] + random.seed(0) + frame_selector = RandomKFramesSelector(1) + transform = ImageResizeTransform() + dataset = VideoKeyframeDataset(video_list, category_list, frame_selector, transform) + data1, categories1 = dataset[0]["images"], dataset[0]["categories"] + self.assertEqual(len(dataset), 1) + self.assertEqual(data1.shape, torch.Size((1, 3, 800, 800))) + self.assertEqual(data1.dtype, torch.float32) + self.assertIsNone(categories1[0]) + return + self.assertTrue(False) diff --git a/data_processing/detectron2/projects/DensePose/train_net.py b/data_processing/detectron2/projects/DensePose/train_net.py new file mode 100644 index 0000000..e8d77b9 --- /dev/null +++ b/data_processing/detectron2/projects/DensePose/train_net.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +DensePose Training Script. + +This script is similar to the training script in detectron2/tools. + +It is an example of how a user might use detectron2 for a new project. +""" + +from datetime import timedelta + +import detectron2.utils.comm as comm +from detectron2.config import get_cfg +from detectron2.engine import DEFAULT_TIMEOUT, default_argument_parser, default_setup, hooks, launch +from detectron2.evaluation import verify_results +from detectron2.utils.file_io import PathManager +from detectron2.utils.logger import setup_logger + +from densepose import add_densepose_config +from densepose.engine import Trainer +from densepose.modeling.densepose_checkpoint import DensePoseCheckpointer + + +def setup(args): + cfg = get_cfg() + add_densepose_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + # Setup logger for "densepose" module + setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="densepose") + return cfg + + +def main(args): + cfg = setup(args) + # disable strict kwargs checking: allow one to specify path handle + # hints through kwargs, like timeout in DP evaluation + PathManager.set_strict_kwargs_checking(False) + + if args.eval_only: + model = Trainer.build_model(cfg) + DensePoseCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if cfg.TEST.AUG.ENABLED: + res.update(Trainer.test_with_TTA(cfg, model)) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + if cfg.TEST.AUG.ENABLED: + trainer.register_hooks( + [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))] + ) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + cfg = setup(args) + timeout = ( + DEFAULT_TIMEOUT if cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE else timedelta(hours=4) + ) + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + timeout=timeout, + ) diff --git a/data_processing/detectron2/projects/MViTv2/README.md b/data_processing/detectron2/projects/MViTv2/README.md new file mode 100644 index 0000000..64afd79 --- /dev/null +++ b/data_processing/detectron2/projects/MViTv2/README.md @@ -0,0 +1,142 @@ +# MViTv2: Improved Multiscale Vision Transformers for Classification and Detection + +Yanghao Li*, Chao-Yuan Wu*, Haoqi Fan, Karttikeya Mangalam, Bo Xiong, Jitendra Malik, Christoph Feichtenhofer* + +[[`arXiv`](https://arxiv.org/abs/2112.01526)] [[`BibTeX`](#CitingMViTv2)] + +In this repository, we provide detection configs and models for MViTv2 (CVPR 2022) in Detectron2. For image classification tasks, please refer to [MViTv2 repo](https://github.com/facebookresearch/mvit). + +## Results and Pretrained Models + +### COCO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namepre-trainMethodepochsbox
AP
mask
AP
#paramsFLOPSmodel iddownload
MViTV2-TIN1KMask R-CNN3648.343.844M279G307611773model
MViTV2-TIN1KCascade Mask R-CNN3652.245.076M701G308344828model
MViTV2-SIN1KCascade Mask R-CNN3653.246.087M748G308344647model
MViTV2-BIN1KCascade Mask R-CNN3654.146.7103M814G308109448model
MViTV2-BIN21KCascade Mask R-CNN3654.947.4103M814G309003202model
MViTV2-LIN21KCascade Mask R-CNN5055.848.3270M1519G308099658model
MViTV2-HIN21KCascade Mask R-CNN3656.148.5718M3084G309013744model
+ +Note that the above models were trained and measured on 8-node with 64 NVIDIA A100 GPUs in total. The ImageNet pre-trained model weights are obtained from [MViTv2 repo](https://github.com/facebookresearch/mvit). + +## Training +All configs can be trained with: + +``` +../../tools/lazyconfig_train_net.py --config-file configs/path/to/config.py +``` +By default, we use 64 GPUs with batch size as 64 for training. + +## Evaluation +Model evaluation can be done similarly: +``` +../../tools/lazyconfig_train_net.py --config-file configs/path/to/config.py --eval-only train.init_checkpoint=/path/to/model_checkpoint +``` + + + +## Citing MViTv2 + +If you use MViTv2, please use the following BibTeX entry. + +```BibTeX +@inproceedings{li2021improved, + title={MViTv2: Improved multiscale vision transformers for classification and detection}, + author={Li, Yanghao and Wu, Chao-Yuan and Fan, Haoqi and Mangalam, Karttikeya and Xiong, Bo and Malik, Jitendra and Feichtenhofer, Christoph}, + booktitle={CVPR}, + year={2022} +} +``` diff --git a/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_b_3x.py b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_b_3x.py new file mode 100644 index 0000000..61366bf --- /dev/null +++ b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_b_3x.py @@ -0,0 +1,8 @@ +from .cascade_mask_rcnn_mvitv2_t_3x import model, dataloader, optimizer, lr_multiplier, train + + +model.backbone.bottom_up.depth = 24 +model.backbone.bottom_up.last_block_indexes = (1, 4, 20, 23) +model.backbone.bottom_up.drop_path_rate = 0.4 + +train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_B_in1k.pyth" diff --git a/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_b_in21k_3x.py b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_b_in21k_3x.py new file mode 100644 index 0000000..7c3bdce --- /dev/null +++ b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_b_in21k_3x.py @@ -0,0 +1,3 @@ +from .cascade_mask_rcnn_mvitv2_b_3x import model, dataloader, optimizer, lr_multiplier, train + +train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_B_in21k.pyth" diff --git a/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_h_in21k_lsj_3x.py b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_h_in21k_lsj_3x.py new file mode 100644 index 0000000..6fee5e9 --- /dev/null +++ b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_h_in21k_lsj_3x.py @@ -0,0 +1,12 @@ +from .cascade_mask_rcnn_mvitv2_b_3x import model, optimizer, train, lr_multiplier +from .common.coco_loader_lsj import dataloader + + +model.backbone.bottom_up.embed_dim = 192 +model.backbone.bottom_up.depth = 80 +model.backbone.bottom_up.num_heads = 3 +model.backbone.bottom_up.last_block_indexes = (3, 11, 71, 79) +model.backbone.bottom_up.drop_path_rate = 0.6 +model.backbone.bottom_up.use_act_checkpoint = True + +train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_H_in21k.pyth" diff --git a/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_l_in21k_lsj_50ep.py b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_l_in21k_lsj_50ep.py new file mode 100644 index 0000000..38da895 --- /dev/null +++ b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_l_in21k_lsj_50ep.py @@ -0,0 +1,31 @@ +from fvcore.common.param_scheduler import MultiStepParamScheduler + +from detectron2.config import LazyCall as L +from detectron2.solver import WarmupParamScheduler + +from .cascade_mask_rcnn_mvitv2_b_3x import model, optimizer, train +from .common.coco_loader_lsj import dataloader + + +model.backbone.bottom_up.embed_dim = 144 +model.backbone.bottom_up.depth = 48 +model.backbone.bottom_up.num_heads = 2 +model.backbone.bottom_up.last_block_indexes = (1, 7, 43, 47) +model.backbone.bottom_up.drop_path_rate = 0.5 + +train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_L_in21k.pyth" + +# Schedule +# 50ep = 184375 // 2 iters * 64 images/iter / 118000 images/ep +train.max_iter = 184375 // 2 +lr_multiplier = L(WarmupParamScheduler)( + scheduler=L(MultiStepParamScheduler)( + values=[1.0, 0.1, 0.01], + milestones=[163889 // 2, 177546 // 2], + num_updates=train.max_iter, + ), + warmup_length=250 / train.max_iter, + warmup_factor=0.001, +) + +optimizer.lr = 1e-4 diff --git a/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_s_3x.py b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_s_3x.py new file mode 100644 index 0000000..ad8eeb4 --- /dev/null +++ b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_s_3x.py @@ -0,0 +1,7 @@ +from .cascade_mask_rcnn_mvitv2_t_3x import model, dataloader, optimizer, lr_multiplier, train + + +model.backbone.bottom_up.depth = 16 +model.backbone.bottom_up.last_block_indexes = (0, 2, 13, 15) + +train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_S_in1k.pyth" diff --git a/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_t_3x.py b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_t_3x.py new file mode 100644 index 0000000..51327dd --- /dev/null +++ b/data_processing/detectron2/projects/MViTv2/configs/cascade_mask_rcnn_mvitv2_t_3x.py @@ -0,0 +1,48 @@ +from detectron2.config import LazyCall as L +from detectron2.layers import ShapeSpec +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.matcher import Matcher +from detectron2.modeling.roi_heads import FastRCNNOutputLayers, FastRCNNConvFCHead, CascadeROIHeads +from detectron2.layers.batch_norm import NaiveSyncBatchNorm + +from .mask_rcnn_mvitv2_t_3x import model, dataloader, optimizer, lr_multiplier, train + + +# arguments that don't exist for Cascade R-CNN +[model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]] + +model.roi_heads.update( + _target_=CascadeROIHeads, + box_heads=[ + L(FastRCNNConvFCHead)( + input_shape=ShapeSpec(channels=256, height=7, width=7), + conv_dims=[256, 256, 256, 256], + fc_dims=[1024], + conv_norm=lambda c: NaiveSyncBatchNorm(c, stats_mode="N"), + ) + for _ in range(3) + ], + box_predictors=[ + L(FastRCNNOutputLayers)( + input_shape=ShapeSpec(channels=1024), + test_score_thresh=0.05, + box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)), + cls_agnostic_bbox_reg=True, + num_classes="${...num_classes}", + ) + for (w1, w2) in [(10, 5), (20, 10), (30, 15)] + ], + proposal_matchers=[ + L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False) + for th in [0.5, 0.6, 0.7] + ], +) + +# Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by +# torch.nn.SyncBatchNorm. We can remove this after +# https://github.com/pytorch/pytorch/issues/36530 is fixed. +model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c, stats_mode="N") + +# 2conv in RPN: +# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950 +model.proposal_generator.head.conv_dims = [-1, -1] diff --git a/data_processing/detectron2/projects/MViTv2/configs/common/coco_loader.py b/data_processing/detectron2/projects/MViTv2/configs/common/coco_loader.py new file mode 100644 index 0000000..923878b --- /dev/null +++ b/data_processing/detectron2/projects/MViTv2/configs/common/coco_loader.py @@ -0,0 +1,59 @@ +from omegaconf import OmegaConf + +import detectron2.data.transforms as T +from detectron2.config import LazyCall as L +from detectron2.data import ( + DatasetMapper, + build_detection_test_loader, + build_detection_train_loader, + get_detection_dataset_dicts, +) +from detectron2.evaluation import COCOEvaluator + +dataloader = OmegaConf.create() + +dataloader.train = L(build_detection_train_loader)( + dataset=L(get_detection_dataset_dicts)(names="coco_2017_train"), + mapper=L(DatasetMapper)( + is_train=True, + augmentations=[ + L(T.RandomApply)( + tfm_or_aug=L(T.AugmentationList)( + augs=[ + L(T.ResizeShortestEdge)( + short_edge_length=[400, 500, 600], sample_style="choice" + ), + L(T.RandomCrop)(crop_type="absolute_range", crop_size=(384, 600)), + ] + ), + prob=0.5, + ), + L(T.ResizeShortestEdge)( + short_edge_length=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), + sample_style="choice", + max_size=1333, + ), + L(T.RandomFlip)(horizontal=True), + ], + image_format="RGB", + use_instance_mask=True, + ), + total_batch_size=16, + num_workers=4, +) + +dataloader.test = L(build_detection_test_loader)( + dataset=L(get_detection_dataset_dicts)(names="coco_2017_val", filter_empty=False), + mapper=L(DatasetMapper)( + is_train=False, + augmentations=[ + L(T.ResizeShortestEdge)(short_edge_length=800, max_size=1333), + ], + image_format="${...train.mapper.image_format}", + ), + num_workers=4, +) + +dataloader.evaluator = L(COCOEvaluator)( + dataset_name="${..test.dataset.names}", +) diff --git a/data_processing/detectron2/projects/MViTv2/configs/common/coco_loader_lsj.py b/data_processing/detectron2/projects/MViTv2/configs/common/coco_loader_lsj.py new file mode 100644 index 0000000..019b21f --- /dev/null +++ b/data_processing/detectron2/projects/MViTv2/configs/common/coco_loader_lsj.py @@ -0,0 +1,19 @@ +import detectron2.data.transforms as T +from detectron2 import model_zoo +from detectron2.config import LazyCall as L + +from .coco_loader import dataloader + +# Data using LSJ +image_size = 1024 +dataloader.train.mapper.augmentations = [ + L(T.RandomFlip)(horizontal=True), # flip first + L(T.ResizeScale)( + min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size + ), + L(T.FixedSizeCrop)(crop_size=(image_size, image_size)), +] +dataloader.train.mapper.image_format = "RGB" +dataloader.train.total_batch_size = 64 +# recompute boxes due to cropping +dataloader.train.mapper.recompute_boxes = True diff --git a/data_processing/detectron2/projects/MViTv2/configs/mask_rcnn_mvitv2_t_3x.py b/data_processing/detectron2/projects/MViTv2/configs/mask_rcnn_mvitv2_t_3x.py new file mode 100644 index 0000000..ba4bdfe --- /dev/null +++ b/data_processing/detectron2/projects/MViTv2/configs/mask_rcnn_mvitv2_t_3x.py @@ -0,0 +1,55 @@ +from functools import partial +import torch.nn as nn +from fvcore.common.param_scheduler import MultiStepParamScheduler + +from detectron2 import model_zoo +from detectron2.config import LazyCall as L +from detectron2.solver import WarmupParamScheduler +from detectron2.modeling import MViT + +from .common.coco_loader import dataloader + +model = model_zoo.get_config("common/models/mask_rcnn_fpn.py").model +constants = model_zoo.get_config("common/data/constants.py").constants +model.pixel_mean = constants.imagenet_rgb256_mean +model.pixel_std = constants.imagenet_rgb256_std +model.input_format = "RGB" +model.backbone.bottom_up = L(MViT)( + embed_dim=96, + depth=10, + num_heads=1, + last_block_indexes=(0, 2, 7, 9), + residual_pooling=True, + drop_path_rate=0.2, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + out_features=("scale2", "scale3", "scale4", "scale5"), +) +model.backbone.in_features = "${.bottom_up.out_features}" + + +# Initialization and trainer settings +train = model_zoo.get_config("common/train.py").train +train.amp.enabled = True +train.ddp.fp16_compression = True +train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_T_in1k.pyth" + +dataloader.train.total_batch_size = 64 + +# 36 epochs +train.max_iter = 67500 +lr_multiplier = L(WarmupParamScheduler)( + scheduler=L(MultiStepParamScheduler)( + values=[1.0, 0.1, 0.01], + milestones=[52500, 62500, 67500], + ), + warmup_length=250 / train.max_iter, + warmup_factor=0.001, +) + +optimizer = model_zoo.get_config("common/optim.py").AdamW +optimizer.params.overrides = { + "pos_embed": {"weight_decay": 0.0}, + "rel_pos_h": {"weight_decay": 0.0}, + "rel_pos_w": {"weight_decay": 0.0}, +} +optimizer.lr = 1.6e-4 diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/README.md b/data_processing/detectron2/projects/Panoptic-DeepLab/README.md new file mode 100644 index 0000000..86b6d42 --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/README.md @@ -0,0 +1,175 @@ +# Panoptic-DeepLab: A Simple, Strong, and Fast Baseline for Bottom-Up Panoptic Segmentation + +Bowen Cheng, Maxwell D. Collins, Yukun Zhu, Ting Liu, Thomas S. Huang, Hartwig Adam, Liang-Chieh Chen + +[[`arXiv`](https://arxiv.org/abs/1911.10194)] [[`BibTeX`](#CitingPanopticDeepLab)] [[`Reference implementation`](https://github.com/bowenc0221/panoptic-deeplab)] + +
+ +

+ +## Installation +Install Detectron2 following [the instructions](https://detectron2.readthedocs.io/tutorials/install.html). +To use cityscapes, prepare data follow the [tutorial](https://detectron2.readthedocs.io/tutorials/builtin_datasets.html#expected-dataset-structure-for-cityscapes). + +## Training + +To train a model with 8 GPUs run: +```bash +cd /path/to/detectron2/projects/Panoptic-DeepLab +python train_net.py --config-file configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024_dsconv.yaml --num-gpus 8 +``` + +## Evaluation + +Model evaluation can be done similarly: +```bash +cd /path/to/detectron2/projects/Panoptic-DeepLab +python train_net.py --config-file configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024_dsconv.yaml --eval-only MODEL.WEIGHTS /path/to/model_checkpoint +``` + +## Benchmark network speed + +If you want to benchmark the network speed without post-processing, you can run the evaluation script with `MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED True`: +```bash +cd /path/to/detectron2/projects/Panoptic-DeepLab +python train_net.py --config-file configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024_dsconv.yaml --eval-only MODEL.WEIGHTS /path/to/model_checkpoint MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED True +``` + +## Cityscapes Panoptic Segmentation +Cityscapes models are trained with ImageNet pretraining. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodBackboneOutput
resolution
PQSQRQmIoUAPMemory (M)model iddownload
Panoptic-DeepLabR50-DC51024×2048 58.6 80.9 71.2 75.9 29.8 8668 - model | metrics
Panoptic-DeepLabR52-DC51024×2048 60.3 81.5 72.9 78.2 33.2 9682 30841561 model | metrics
Panoptic-DeepLab (DSConv)R52-DC51024×2048 60.3 81.0 73.2 78.7 32.1 10466 33148034 model | metrics
+ +Note: +- [R52](https://dl.fbaipublicfiles.com/detectron2/DeepLab/R-52.pkl): a ResNet-50 with its first 7x7 convolution replaced by 3 3x3 convolutions. This modification has been used in most semantic segmentation papers. We pre-train this backbone on ImageNet using the default recipe of [pytorch examples](https://github.com/pytorch/examples/tree/master/imagenet). +- DC5 means using dilated convolution in `res5`. +- We use a smaller training crop size (512x1024) than the original paper (1025x2049), we find using larger crop size (1024x2048) could further improve PQ by 1.5% but also degrades AP by 3%. +- The implementation with regular Conv2d in ASPP and head is much heavier head than the original paper. +- This implementation does not include optimized post-processing code needed for deployment. Post-processing the network + outputs now takes similar amount of time to the network itself. Please refer to speed in the + original paper for comparison. +- DSConv refers to using DepthwiseSeparableConv2d in ASPP and decoder. The implementation with DSConv is identical to the original paper. + +## COCO Panoptic Segmentation +COCO models are trained with ImageNet pretraining on 16 V100s. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodBackboneOutput
resolution
PQSQRQBox APMask APMemory (M)model iddownload
Panoptic-DeepLab (DSConv)R52-DC5640×640 35.5 77.3 44.7 18.6 19.7 246448865 model | metrics
+ +Note: +- [R52](https://dl.fbaipublicfiles.com/detectron2/DeepLab/R-52.pkl): a ResNet-50 with its first 7x7 convolution replaced by 3 3x3 convolutions. This modification has been used in most semantic segmentation papers. We pre-train this backbone on ImageNet using the default recipe of [pytorch examples](https://github.com/pytorch/examples/tree/master/imagenet). +- DC5 means using dilated convolution in `res5`. +- This reproduced number matches the original paper (35.5 vs. 35.1 PQ). +- This implementation does not include optimized post-processing code needed for deployment. Post-processing the network + outputs now takes more time than the network itself. Please refer to speed in the original paper for comparison. +- DSConv refers to using DepthwiseSeparableConv2d in ASPP and decoder. + +## Citing Panoptic-DeepLab + +If you use Panoptic-DeepLab, please use the following BibTeX entry. + +* CVPR 2020 paper: + +``` +@inproceedings{cheng2020panoptic, + title={Panoptic-DeepLab: A Simple, Strong, and Fast Baseline for Bottom-Up Panoptic Segmentation}, + author={Cheng, Bowen and Collins, Maxwell D and Zhu, Yukun and Liu, Ting and Huang, Thomas S and Adam, Hartwig and Chen, Liang-Chieh}, + booktitle={CVPR}, + year={2020} +} +``` + +* ICCV 2019 COCO-Mapillary workshp challenge report: + +``` +@inproceedings{cheng2019panoptic, + title={Panoptic-DeepLab}, + author={Cheng, Bowen and Collins, Maxwell D and Zhu, Yukun and Liu, Ting and Huang, Thomas S and Adam, Hartwig and Chen, Liang-Chieh}, + booktitle={ICCV COCO + Mapillary Joint Recognition Challenge Workshop}, + year={2019} +} +``` diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/configs/COCO-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_200k_bs64_crop_640_640_coco_dsconv.yaml b/data_processing/detectron2/projects/Panoptic-DeepLab/configs/COCO-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_200k_bs64_crop_640_640_coco_dsconv.yaml new file mode 100644 index 0000000..6944c6f --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/configs/COCO-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_200k_bs64_crop_640_640_coco_dsconv.yaml @@ -0,0 +1,42 @@ +_BASE_: ../Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml +MODEL: + WEIGHTS: "detectron2://DeepLab/R-52.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + BACKBONE: + NAME: "build_resnet_deeplab_backbone" + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + RES5_MULTI_GRID: [1, 2, 4] + STEM_TYPE: "deeplab" + STEM_OUT_CHANNELS: 128 + STRIDE_IN_1X1: False + SEM_SEG_HEAD: + NUM_CLASSES: 133 + LOSS_TOP_K: 1.0 + USE_DEPTHWISE_SEPARABLE_CONV: True + PANOPTIC_DEEPLAB: + STUFF_AREA: 4096 + NMS_KERNEL: 41 + SIZE_DIVISIBILITY: 640 + USE_DEPTHWISE_SEPARABLE_CONV: True +DATASETS: + TRAIN: ("coco_2017_train_panoptic",) + TEST: ("coco_2017_val_panoptic",) +SOLVER: + BASE_LR: 0.0005 + MAX_ITER: 200000 + IMS_PER_BATCH: 64 +INPUT: + FORMAT: "RGB" + GAUSSIAN_SIGMA: 8 + MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 640) for x in range(5, 16)]"] + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 640 + MAX_SIZE_TRAIN: 960 + MAX_SIZE_TEST: 640 + CROP: + ENABLED: True + TYPE: "absolute" + SIZE: (640, 640) diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/configs/Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml b/data_processing/detectron2/projects/Panoptic-DeepLab/configs/Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml new file mode 100644 index 0000000..b737998 --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/configs/Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml @@ -0,0 +1,65 @@ +MODEL: + META_ARCHITECTURE: "PanopticDeepLab" + BACKBONE: + FREEZE_AT: 0 + RESNETS: + OUT_FEATURES: ["res2", "res3", "res5"] + RES5_DILATION: 2 + SEM_SEG_HEAD: + NAME: "PanopticDeepLabSemSegHead" + IN_FEATURES: ["res2", "res3", "res5"] + PROJECT_FEATURES: ["res2", "res3"] + PROJECT_CHANNELS: [32, 64] + ASPP_CHANNELS: 256 + ASPP_DILATIONS: [6, 12, 18] + ASPP_DROPOUT: 0.1 + HEAD_CHANNELS: 256 + CONVS_DIM: 256 + COMMON_STRIDE: 4 + NUM_CLASSES: 19 + LOSS_TYPE: "hard_pixel_mining" + NORM: "SyncBN" + INS_EMBED_HEAD: + NAME: "PanopticDeepLabInsEmbedHead" + IN_FEATURES: ["res2", "res3", "res5"] + PROJECT_FEATURES: ["res2", "res3"] + PROJECT_CHANNELS: [32, 64] + ASPP_CHANNELS: 256 + ASPP_DILATIONS: [6, 12, 18] + ASPP_DROPOUT: 0.1 + HEAD_CHANNELS: 32 + CONVS_DIM: 128 + COMMON_STRIDE: 4 + NORM: "SyncBN" + CENTER_LOSS_WEIGHT: 200.0 + OFFSET_LOSS_WEIGHT: 0.01 + PANOPTIC_DEEPLAB: + STUFF_AREA: 2048 + CENTER_THRESHOLD: 0.1 + NMS_KERNEL: 7 + TOP_K_INSTANCE: 200 +DATASETS: + TRAIN: ("cityscapes_fine_panoptic_train",) + TEST: ("cityscapes_fine_panoptic_val",) +SOLVER: + OPTIMIZER: "ADAM" + BASE_LR: 0.001 + WEIGHT_DECAY: 0.0 + WEIGHT_DECAY_NORM: 0.0 + WEIGHT_DECAY_BIAS: 0.0 + MAX_ITER: 60000 + LR_SCHEDULER_NAME: "WarmupPolyLR" + IMS_PER_BATCH: 32 +INPUT: + MIN_SIZE_TRAIN: (512, 640, 704, 832, 896, 1024, 1152, 1216, 1344, 1408, 1536, 1664, 1728, 1856, 1920, 2048) + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 1024 + MAX_SIZE_TRAIN: 4096 + MAX_SIZE_TEST: 2048 + CROP: + ENABLED: True + TYPE: "absolute" + SIZE: (1024, 2048) +DATALOADER: + NUM_WORKERS: 10 +VERSION: 2 diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml b/data_processing/detectron2/projects/Panoptic-DeepLab/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml new file mode 100644 index 0000000..fde902b --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml @@ -0,0 +1,20 @@ +_BASE_: Base-PanopticDeepLab-OS16.yaml +MODEL: + WEIGHTS: "detectron2://DeepLab/R-52.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + BACKBONE: + NAME: "build_resnet_deeplab_backbone" + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + RES5_MULTI_GRID: [1, 2, 4] + STEM_TYPE: "deeplab" + STEM_OUT_CHANNELS: 128 + STRIDE_IN_1X1: False +SOLVER: + MAX_ITER: 90000 +INPUT: + FORMAT: "RGB" + CROP: + SIZE: (512, 1024) diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024_dsconv.yaml b/data_processing/detectron2/projects/Panoptic-DeepLab/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024_dsconv.yaml new file mode 100644 index 0000000..8e31420 --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024_dsconv.yaml @@ -0,0 +1,24 @@ +_BASE_: Base-PanopticDeepLab-OS16.yaml +MODEL: + WEIGHTS: "detectron2://DeepLab/R-52.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + BACKBONE: + NAME: "build_resnet_deeplab_backbone" + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + RES5_MULTI_GRID: [1, 2, 4] + STEM_TYPE: "deeplab" + STEM_OUT_CHANNELS: 128 + STRIDE_IN_1X1: False + PANOPTIC_DEEPLAB: + USE_DEPTHWISE_SEPARABLE_CONV: True + SEM_SEG_HEAD: + USE_DEPTHWISE_SEPARABLE_CONV: True +SOLVER: + MAX_ITER: 90000 +INPUT: + FORMAT: "RGB" + CROP: + SIZE: (512, 1024) diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/__init__.py b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/__init__.py new file mode 100644 index 0000000..8d3c980 --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .config import add_panoptic_deeplab_config +from .dataset_mapper import PanopticDeeplabDatasetMapper +from .panoptic_seg import ( + PanopticDeepLab, + INS_EMBED_BRANCHES_REGISTRY, + build_ins_embed_branch, + PanopticDeepLabSemSegHead, + PanopticDeepLabInsEmbedHead, +) diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/config.py b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/config.py new file mode 100644 index 0000000..5aa2d28 --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/config.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +from detectron2.config import CfgNode as CN +from detectron2.projects.deeplab import add_deeplab_config + + +def add_panoptic_deeplab_config(cfg): + """ + Add config for Panoptic-DeepLab. + """ + # Reuse DeepLab config. + add_deeplab_config(cfg) + # Target generation parameters. + cfg.INPUT.GAUSSIAN_SIGMA = 10 + cfg.INPUT.IGNORE_STUFF_IN_OFFSET = True + cfg.INPUT.SMALL_INSTANCE_AREA = 4096 + cfg.INPUT.SMALL_INSTANCE_WEIGHT = 3 + cfg.INPUT.IGNORE_CROWD_IN_SEMANTIC = False + # Optimizer type. + cfg.SOLVER.OPTIMIZER = "ADAM" + # Panoptic-DeepLab semantic segmentation head. + # We add an extra convolution before predictor. + cfg.MODEL.SEM_SEG_HEAD.HEAD_CHANNELS = 256 + cfg.MODEL.SEM_SEG_HEAD.LOSS_TOP_K = 0.2 + # Panoptic-DeepLab instance segmentation head. + cfg.MODEL.INS_EMBED_HEAD = CN() + cfg.MODEL.INS_EMBED_HEAD.NAME = "PanopticDeepLabInsEmbedHead" + cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES = ["res2", "res3", "res5"] + cfg.MODEL.INS_EMBED_HEAD.PROJECT_FEATURES = ["res2", "res3"] + cfg.MODEL.INS_EMBED_HEAD.PROJECT_CHANNELS = [32, 64] + cfg.MODEL.INS_EMBED_HEAD.ASPP_CHANNELS = 256 + cfg.MODEL.INS_EMBED_HEAD.ASPP_DILATIONS = [6, 12, 18] + cfg.MODEL.INS_EMBED_HEAD.ASPP_DROPOUT = 0.1 + # We add an extra convolution before predictor. + cfg.MODEL.INS_EMBED_HEAD.HEAD_CHANNELS = 32 + cfg.MODEL.INS_EMBED_HEAD.CONVS_DIM = 128 + cfg.MODEL.INS_EMBED_HEAD.COMMON_STRIDE = 4 + cfg.MODEL.INS_EMBED_HEAD.NORM = "SyncBN" + cfg.MODEL.INS_EMBED_HEAD.CENTER_LOSS_WEIGHT = 200.0 + cfg.MODEL.INS_EMBED_HEAD.OFFSET_LOSS_WEIGHT = 0.01 + # Panoptic-DeepLab post-processing setting. + cfg.MODEL.PANOPTIC_DEEPLAB = CN() + # Stuff area limit, ignore stuff region below this number. + cfg.MODEL.PANOPTIC_DEEPLAB.STUFF_AREA = 2048 + cfg.MODEL.PANOPTIC_DEEPLAB.CENTER_THRESHOLD = 0.1 + cfg.MODEL.PANOPTIC_DEEPLAB.NMS_KERNEL = 7 + cfg.MODEL.PANOPTIC_DEEPLAB.TOP_K_INSTANCE = 200 + # If set to False, Panoptic-DeepLab will not evaluate instance segmentation. + cfg.MODEL.PANOPTIC_DEEPLAB.PREDICT_INSTANCES = True + cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV = False + # This is the padding parameter for images with various sizes. ASPP layers + # requires input images to be divisible by the average pooling size and we + # can use `MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY` to pad all images to + # a fixed resolution (e.g. 640x640 for COCO) to avoid having a image size + # that is not divisible by ASPP average pooling size. + cfg.MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY = -1 + # Only evaluates network speed (ignores post-processing). + cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED = False diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/dataset_mapper.py b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/dataset_mapper.py new file mode 100644 index 0000000..53272c7 --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/dataset_mapper.py @@ -0,0 +1,116 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import logging +import numpy as np +from typing import Callable, List, Union +import torch +from panopticapi.utils import rgb2id + +from detectron2.config import configurable +from detectron2.data import MetadataCatalog +from detectron2.data import detection_utils as utils +from detectron2.data import transforms as T + +from .target_generator import PanopticDeepLabTargetGenerator + +__all__ = ["PanopticDeeplabDatasetMapper"] + + +class PanopticDeeplabDatasetMapper: + """ + The callable currently does the following: + + 1. Read the image from "file_name" and label from "pan_seg_file_name" + 2. Applies random scale, crop and flip transforms to image and label + 3. Prepare data to Tensor and generate training targets from label + """ + + @configurable + def __init__( + self, + *, + augmentations: List[Union[T.Augmentation, T.Transform]], + image_format: str, + panoptic_target_generator: Callable, + ): + """ + NOTE: this interface is experimental. + + Args: + augmentations: a list of augmentations or deterministic transforms to apply + image_format: an image format supported by :func:`detection_utils.read_image`. + panoptic_target_generator: a callable that takes "panoptic_seg" and + "segments_info" to generate training targets for the model. + """ + # fmt: off + self.augmentations = T.AugmentationList(augmentations) + self.image_format = image_format + # fmt: on + logger = logging.getLogger(__name__) + logger.info("Augmentations used in training: " + str(augmentations)) + + self.panoptic_target_generator = panoptic_target_generator + + @classmethod + def from_config(cls, cfg): + augs = [ + T.ResizeShortestEdge( + cfg.INPUT.MIN_SIZE_TRAIN, + cfg.INPUT.MAX_SIZE_TRAIN, + cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING, + ) + ] + if cfg.INPUT.CROP.ENABLED: + augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) + augs.append(T.RandomFlip()) + + # Assume always applies to the training set. + dataset_names = cfg.DATASETS.TRAIN + meta = MetadataCatalog.get(dataset_names[0]) + panoptic_target_generator = PanopticDeepLabTargetGenerator( + ignore_label=meta.ignore_label, + thing_ids=list(meta.thing_dataset_id_to_contiguous_id.values()), + sigma=cfg.INPUT.GAUSSIAN_SIGMA, + ignore_stuff_in_offset=cfg.INPUT.IGNORE_STUFF_IN_OFFSET, + small_instance_area=cfg.INPUT.SMALL_INSTANCE_AREA, + small_instance_weight=cfg.INPUT.SMALL_INSTANCE_WEIGHT, + ignore_crowd_in_semantic=cfg.INPUT.IGNORE_CROWD_IN_SEMANTIC, + ) + + ret = { + "augmentations": augs, + "image_format": cfg.INPUT.FORMAT, + "panoptic_target_generator": panoptic_target_generator, + } + return ret + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + + Returns: + dict: a format that builtin models in detectron2 accept + """ + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + # Load image. + image = utils.read_image(dataset_dict["file_name"], format=self.image_format) + utils.check_image_size(dataset_dict, image) + # Panoptic label is encoded in RGB image. + pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB") + + # Reuses semantic transform for panoptic labels. + aug_input = T.AugInput(image, sem_seg=pan_seg_gt) + _ = self.augmentations(aug_input) + image, pan_seg_gt = aug_input.image, aug_input.sem_seg + + # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, + # but not efficient on large generic data structures due to the use of pickle & mp.Queue. + # Therefore it's important to use torch.Tensor. + dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) + + # Generates training targets for Panoptic-DeepLab. + targets = self.panoptic_target_generator(rgb2id(pan_seg_gt), dataset_dict["segments_info"]) + dataset_dict.update(targets) + + return dataset_dict diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/panoptic_seg.py b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/panoptic_seg.py new file mode 100644 index 0000000..c12ca74 --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/panoptic_seg.py @@ -0,0 +1,572 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Callable, Dict, List, Union +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.data import MetadataCatalog +from detectron2.layers import Conv2d, DepthwiseSeparableConv2d, ShapeSpec, get_norm +from detectron2.modeling import ( + META_ARCH_REGISTRY, + SEM_SEG_HEADS_REGISTRY, + build_backbone, + build_sem_seg_head, +) +from detectron2.modeling.postprocessing import sem_seg_postprocess +from detectron2.projects.deeplab import DeepLabV3PlusHead +from detectron2.projects.deeplab.loss import DeepLabCE +from detectron2.structures import BitMasks, ImageList, Instances +from detectron2.utils.registry import Registry + +from .post_processing import get_panoptic_segmentation + +__all__ = ["PanopticDeepLab", "INS_EMBED_BRANCHES_REGISTRY", "build_ins_embed_branch"] + + +INS_EMBED_BRANCHES_REGISTRY = Registry("INS_EMBED_BRANCHES") +INS_EMBED_BRANCHES_REGISTRY.__doc__ = """ +Registry for instance embedding branches, which make instance embedding +predictions from feature maps. +""" + + +@META_ARCH_REGISTRY.register() +class PanopticDeepLab(nn.Module): + """ + Main class for panoptic segmentation architectures. + """ + + def __init__(self, cfg): + super().__init__() + self.backbone = build_backbone(cfg) + self.sem_seg_head = build_sem_seg_head(cfg, self.backbone.output_shape()) + self.ins_embed_head = build_ins_embed_branch(cfg, self.backbone.output_shape()) + self.register_buffer("pixel_mean", torch.tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1), False) + self.meta = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]) + self.stuff_area = cfg.MODEL.PANOPTIC_DEEPLAB.STUFF_AREA + self.threshold = cfg.MODEL.PANOPTIC_DEEPLAB.CENTER_THRESHOLD + self.nms_kernel = cfg.MODEL.PANOPTIC_DEEPLAB.NMS_KERNEL + self.top_k = cfg.MODEL.PANOPTIC_DEEPLAB.TOP_K_INSTANCE + self.predict_instances = cfg.MODEL.PANOPTIC_DEEPLAB.PREDICT_INSTANCES + self.use_depthwise_separable_conv = cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV + assert ( + cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV + == cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV + ) + self.size_divisibility = cfg.MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY + self.benchmark_network_speed = cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED + + @property + def device(self): + return self.pixel_mean.device + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper`. + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + * "image": Tensor, image in (C, H, W) format. + * "sem_seg": semantic segmentation ground truth + * "center": center points heatmap ground truth + * "offset": pixel offsets to center points ground truth + * Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model (may be different + from input resolution), used in inference. + Returns: + list[dict]: + each dict is the results for one image. The dict contains the following keys: + + * "panoptic_seg", "sem_seg": see documentation + :doc:`/tutorials/models` for the standard output format + * "instances": available if ``predict_instances is True``. see documentation + :doc:`/tutorials/models` for the standard output format + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + # To avoid error in ASPP layer when input has different size. + size_divisibility = ( + self.size_divisibility + if self.size_divisibility > 0 + else self.backbone.size_divisibility + ) + images = ImageList.from_tensors(images, size_divisibility) + + features = self.backbone(images.tensor) + + losses = {} + if "sem_seg" in batched_inputs[0]: + targets = [x["sem_seg"].to(self.device) for x in batched_inputs] + targets = ImageList.from_tensors( + targets, size_divisibility, self.sem_seg_head.ignore_value + ).tensor + if "sem_seg_weights" in batched_inputs[0]: + # The default D2 DatasetMapper may not contain "sem_seg_weights" + # Avoid error in testing when default DatasetMapper is used. + weights = [x["sem_seg_weights"].to(self.device) for x in batched_inputs] + weights = ImageList.from_tensors(weights, size_divisibility).tensor + else: + weights = None + else: + targets = None + weights = None + sem_seg_results, sem_seg_losses = self.sem_seg_head(features, targets, weights) + losses.update(sem_seg_losses) + + if "center" in batched_inputs[0] and "offset" in batched_inputs[0]: + center_targets = [x["center"].to(self.device) for x in batched_inputs] + center_targets = ImageList.from_tensors( + center_targets, size_divisibility + ).tensor.unsqueeze(1) + center_weights = [x["center_weights"].to(self.device) for x in batched_inputs] + center_weights = ImageList.from_tensors(center_weights, size_divisibility).tensor + + offset_targets = [x["offset"].to(self.device) for x in batched_inputs] + offset_targets = ImageList.from_tensors(offset_targets, size_divisibility).tensor + offset_weights = [x["offset_weights"].to(self.device) for x in batched_inputs] + offset_weights = ImageList.from_tensors(offset_weights, size_divisibility).tensor + else: + center_targets = None + center_weights = None + + offset_targets = None + offset_weights = None + + center_results, offset_results, center_losses, offset_losses = self.ins_embed_head( + features, center_targets, center_weights, offset_targets, offset_weights + ) + losses.update(center_losses) + losses.update(offset_losses) + + if self.training: + return losses + + if self.benchmark_network_speed: + return [] + + processed_results = [] + for sem_seg_result, center_result, offset_result, input_per_image, image_size in zip( + sem_seg_results, center_results, offset_results, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height") + width = input_per_image.get("width") + r = sem_seg_postprocess(sem_seg_result, image_size, height, width) + c = sem_seg_postprocess(center_result, image_size, height, width) + o = sem_seg_postprocess(offset_result, image_size, height, width) + # Post-processing to get panoptic segmentation. + panoptic_image, _ = get_panoptic_segmentation( + r.argmax(dim=0, keepdim=True), + c, + o, + thing_ids=self.meta.thing_dataset_id_to_contiguous_id.values(), + label_divisor=self.meta.label_divisor, + stuff_area=self.stuff_area, + void_label=-1, + threshold=self.threshold, + nms_kernel=self.nms_kernel, + top_k=self.top_k, + ) + # For semantic segmentation evaluation. + processed_results.append({"sem_seg": r}) + panoptic_image = panoptic_image.squeeze(0) + semantic_prob = F.softmax(r, dim=0) + # For panoptic segmentation evaluation. + processed_results[-1]["panoptic_seg"] = (panoptic_image, None) + # For instance segmentation evaluation. + if self.predict_instances: + instances = [] + panoptic_image_cpu = panoptic_image.cpu().numpy() + for panoptic_label in np.unique(panoptic_image_cpu): + if panoptic_label == -1: + continue + pred_class = panoptic_label // self.meta.label_divisor + isthing = pred_class in list( + self.meta.thing_dataset_id_to_contiguous_id.values() + ) + # Get instance segmentation results. + if isthing: + instance = Instances((height, width)) + # Evaluation code takes continuous id starting from 0 + instance.pred_classes = torch.tensor( + [pred_class], device=panoptic_image.device + ) + mask = panoptic_image == panoptic_label + instance.pred_masks = mask.unsqueeze(0) + # Average semantic probability + sem_scores = semantic_prob[pred_class, ...] + sem_scores = torch.mean(sem_scores[mask]) + # Center point probability + mask_indices = torch.nonzero(mask).float() + center_y, center_x = ( + torch.mean(mask_indices[:, 0]), + torch.mean(mask_indices[:, 1]), + ) + center_scores = c[0, int(center_y.item()), int(center_x.item())] + # Confidence score is semantic prob * center prob. + instance.scores = torch.tensor( + [sem_scores * center_scores], device=panoptic_image.device + ) + # Get bounding boxes + instance.pred_boxes = BitMasks(instance.pred_masks).get_bounding_boxes() + instances.append(instance) + if len(instances) > 0: + processed_results[-1]["instances"] = Instances.cat(instances) + + return processed_results + + +@SEM_SEG_HEADS_REGISTRY.register() +class PanopticDeepLabSemSegHead(DeepLabV3PlusHead): + """ + A semantic segmentation head described in :paper:`Panoptic-DeepLab`. + """ + + @configurable + def __init__( + self, + input_shape: Dict[str, ShapeSpec], + *, + decoder_channels: List[int], + norm: Union[str, Callable], + head_channels: int, + loss_weight: float, + loss_type: str, + loss_top_k: float, + ignore_value: int, + num_classes: int, + **kwargs, + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature + decoder_channels (list[int]): a list of output channels of each + decoder stage. It should have the same length as "input_shape" + (each element in "input_shape" corresponds to one decoder stage). + norm (str or callable): normalization for all conv layers. + head_channels (int): the output channels of extra convolutions + between decoder and predictor. + loss_weight (float): loss weight. + loss_top_k: (float): setting the top k% hardest pixels for + "hard_pixel_mining" loss. + loss_type, ignore_value, num_classes: the same as the base class. + """ + super().__init__( + input_shape, + decoder_channels=decoder_channels, + norm=norm, + ignore_value=ignore_value, + **kwargs, + ) + assert self.decoder_only + + self.loss_weight = loss_weight + use_bias = norm == "" + # `head` is additional transform before predictor + if self.use_depthwise_separable_conv: + # We use a single 5x5 DepthwiseSeparableConv2d to replace + # 2 3x3 Conv2d since they have the same receptive field. + self.head = DepthwiseSeparableConv2d( + decoder_channels[0], + head_channels, + kernel_size=5, + padding=2, + norm1=norm, + activation1=F.relu, + norm2=norm, + activation2=F.relu, + ) + else: + self.head = nn.Sequential( + Conv2d( + decoder_channels[0], + decoder_channels[0], + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, decoder_channels[0]), + activation=F.relu, + ), + Conv2d( + decoder_channels[0], + head_channels, + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, head_channels), + activation=F.relu, + ), + ) + weight_init.c2_xavier_fill(self.head[0]) + weight_init.c2_xavier_fill(self.head[1]) + self.predictor = Conv2d(head_channels, num_classes, kernel_size=1) + nn.init.normal_(self.predictor.weight, 0, 0.001) + nn.init.constant_(self.predictor.bias, 0) + + if loss_type == "cross_entropy": + self.loss = nn.CrossEntropyLoss(reduction="mean", ignore_index=ignore_value) + elif loss_type == "hard_pixel_mining": + self.loss = DeepLabCE(ignore_label=ignore_value, top_k_percent_pixels=loss_top_k) + else: + raise ValueError("Unexpected loss type: %s" % loss_type) + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg, input_shape) + ret["head_channels"] = cfg.MODEL.SEM_SEG_HEAD.HEAD_CHANNELS + ret["loss_top_k"] = cfg.MODEL.SEM_SEG_HEAD.LOSS_TOP_K + return ret + + def forward(self, features, targets=None, weights=None): + """ + Returns: + In training, returns (None, dict of losses) + In inference, returns (CxHxW logits, {}) + """ + y = self.layers(features) + if self.training: + return None, self.losses(y, targets, weights) + else: + y = F.interpolate( + y, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + return y, {} + + def layers(self, features): + assert self.decoder_only + y = super().layers(features) + y = self.head(y) + y = self.predictor(y) + return y + + def losses(self, predictions, targets, weights=None): + predictions = F.interpolate( + predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + loss = self.loss(predictions, targets, weights) + losses = {"loss_sem_seg": loss * self.loss_weight} + return losses + + +def build_ins_embed_branch(cfg, input_shape): + """ + Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`. + """ + name = cfg.MODEL.INS_EMBED_HEAD.NAME + return INS_EMBED_BRANCHES_REGISTRY.get(name)(cfg, input_shape) + + +@INS_EMBED_BRANCHES_REGISTRY.register() +class PanopticDeepLabInsEmbedHead(DeepLabV3PlusHead): + """ + A instance embedding head described in :paper:`Panoptic-DeepLab`. + """ + + @configurable + def __init__( + self, + input_shape: Dict[str, ShapeSpec], + *, + decoder_channels: List[int], + norm: Union[str, Callable], + head_channels: int, + center_loss_weight: float, + offset_loss_weight: float, + **kwargs, + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature + decoder_channels (list[int]): a list of output channels of each + decoder stage. It should have the same length as "input_shape" + (each element in "input_shape" corresponds to one decoder stage). + norm (str or callable): normalization for all conv layers. + head_channels (int): the output channels of extra convolutions + between decoder and predictor. + center_loss_weight (float): loss weight for center point prediction. + offset_loss_weight (float): loss weight for center offset prediction. + """ + super().__init__(input_shape, decoder_channels=decoder_channels, norm=norm, **kwargs) + assert self.decoder_only + + self.center_loss_weight = center_loss_weight + self.offset_loss_weight = offset_loss_weight + use_bias = norm == "" + # center prediction + # `head` is additional transform before predictor + self.center_head = nn.Sequential( + Conv2d( + decoder_channels[0], + decoder_channels[0], + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, decoder_channels[0]), + activation=F.relu, + ), + Conv2d( + decoder_channels[0], + head_channels, + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, head_channels), + activation=F.relu, + ), + ) + weight_init.c2_xavier_fill(self.center_head[0]) + weight_init.c2_xavier_fill(self.center_head[1]) + self.center_predictor = Conv2d(head_channels, 1, kernel_size=1) + nn.init.normal_(self.center_predictor.weight, 0, 0.001) + nn.init.constant_(self.center_predictor.bias, 0) + + # offset prediction + # `head` is additional transform before predictor + if self.use_depthwise_separable_conv: + # We use a single 5x5 DepthwiseSeparableConv2d to replace + # 2 3x3 Conv2d since they have the same receptive field. + self.offset_head = DepthwiseSeparableConv2d( + decoder_channels[0], + head_channels, + kernel_size=5, + padding=2, + norm1=norm, + activation1=F.relu, + norm2=norm, + activation2=F.relu, + ) + else: + self.offset_head = nn.Sequential( + Conv2d( + decoder_channels[0], + decoder_channels[0], + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, decoder_channels[0]), + activation=F.relu, + ), + Conv2d( + decoder_channels[0], + head_channels, + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, head_channels), + activation=F.relu, + ), + ) + weight_init.c2_xavier_fill(self.offset_head[0]) + weight_init.c2_xavier_fill(self.offset_head[1]) + self.offset_predictor = Conv2d(head_channels, 2, kernel_size=1) + nn.init.normal_(self.offset_predictor.weight, 0, 0.001) + nn.init.constant_(self.offset_predictor.bias, 0) + + self.center_loss = nn.MSELoss(reduction="none") + self.offset_loss = nn.L1Loss(reduction="none") + + @classmethod + def from_config(cls, cfg, input_shape): + if cfg.INPUT.CROP.ENABLED: + assert cfg.INPUT.CROP.TYPE == "absolute" + train_size = cfg.INPUT.CROP.SIZE + else: + train_size = None + decoder_channels = [cfg.MODEL.INS_EMBED_HEAD.CONVS_DIM] * ( + len(cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES) - 1 + ) + [cfg.MODEL.INS_EMBED_HEAD.ASPP_CHANNELS] + ret = dict( + input_shape={ + k: v for k, v in input_shape.items() if k in cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES + }, + project_channels=cfg.MODEL.INS_EMBED_HEAD.PROJECT_CHANNELS, + aspp_dilations=cfg.MODEL.INS_EMBED_HEAD.ASPP_DILATIONS, + aspp_dropout=cfg.MODEL.INS_EMBED_HEAD.ASPP_DROPOUT, + decoder_channels=decoder_channels, + common_stride=cfg.MODEL.INS_EMBED_HEAD.COMMON_STRIDE, + norm=cfg.MODEL.INS_EMBED_HEAD.NORM, + train_size=train_size, + head_channels=cfg.MODEL.INS_EMBED_HEAD.HEAD_CHANNELS, + center_loss_weight=cfg.MODEL.INS_EMBED_HEAD.CENTER_LOSS_WEIGHT, + offset_loss_weight=cfg.MODEL.INS_EMBED_HEAD.OFFSET_LOSS_WEIGHT, + use_depthwise_separable_conv=cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV, + ) + return ret + + def forward( + self, + features, + center_targets=None, + center_weights=None, + offset_targets=None, + offset_weights=None, + ): + """ + Returns: + In training, returns (None, dict of losses) + In inference, returns (CxHxW logits, {}) + """ + center, offset = self.layers(features) + if self.training: + return ( + None, + None, + self.center_losses(center, center_targets, center_weights), + self.offset_losses(offset, offset_targets, offset_weights), + ) + else: + center = F.interpolate( + center, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + offset = ( + F.interpolate( + offset, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + * self.common_stride + ) + return center, offset, {}, {} + + def layers(self, features): + assert self.decoder_only + y = super().layers(features) + # center + center = self.center_head(y) + center = self.center_predictor(center) + # offset + offset = self.offset_head(y) + offset = self.offset_predictor(offset) + return center, offset + + def center_losses(self, predictions, targets, weights): + predictions = F.interpolate( + predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + loss = self.center_loss(predictions, targets) * weights + if weights.sum() > 0: + loss = loss.sum() / weights.sum() + else: + loss = loss.sum() * 0 + losses = {"loss_center": loss * self.center_loss_weight} + return losses + + def offset_losses(self, predictions, targets, weights): + predictions = ( + F.interpolate( + predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + * self.common_stride + ) + loss = self.offset_loss(predictions, targets) * weights + if weights.sum() > 0: + loss = loss.sum() / weights.sum() + else: + loss = loss.sum() * 0 + losses = {"loss_offset": loss * self.offset_loss_weight} + return losses diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/post_processing.py b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/post_processing.py new file mode 100644 index 0000000..194724e --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/post_processing.py @@ -0,0 +1,234 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/master/segmentation/model/post_processing/instance_post_processing.py # noqa + +from collections import Counter +import torch +import torch.nn.functional as F + + +def find_instance_center(center_heatmap, threshold=0.1, nms_kernel=3, top_k=None): + """ + Find the center points from the center heatmap. + Args: + center_heatmap: A Tensor of shape [1, H, W] of raw center heatmap output. + threshold: A float, threshold applied to center heatmap score. + nms_kernel: An integer, NMS max pooling kernel size. + top_k: An integer, top k centers to keep. + Returns: + A Tensor of shape [K, 2] where K is the number of center points. The + order of second dim is (y, x). + """ + # Thresholding, setting values below threshold to -1. + center_heatmap = F.threshold(center_heatmap, threshold, -1) + + # NMS + nms_padding = (nms_kernel - 1) // 2 + center_heatmap_max_pooled = F.max_pool2d( + center_heatmap, kernel_size=nms_kernel, stride=1, padding=nms_padding + ) + center_heatmap[center_heatmap != center_heatmap_max_pooled] = -1 + + # Squeeze first two dimensions. + center_heatmap = center_heatmap.squeeze() + assert len(center_heatmap.size()) == 2, "Something is wrong with center heatmap dimension." + + # Find non-zero elements. + if top_k is None: + return torch.nonzero(center_heatmap > 0) + else: + # find top k centers. + top_k_scores, _ = torch.topk(torch.flatten(center_heatmap), top_k) + return torch.nonzero(center_heatmap > top_k_scores[-1].clamp_(min=0)) + + +def group_pixels(center_points, offsets): + """ + Gives each pixel in the image an instance id. + Args: + center_points: A Tensor of shape [K, 2] where K is the number of center points. + The order of second dim is (y, x). + offsets: A Tensor of shape [2, H, W] of raw offset output. The order of + second dim is (offset_y, offset_x). + Returns: + A Tensor of shape [1, H, W] with values in range [1, K], which represents + the center this pixel belongs to. + """ + height, width = offsets.size()[1:] + + # Generates a coordinate map, where each location is the coordinate of + # that location. + y_coord, x_coord = torch.meshgrid( + torch.arange(height, dtype=offsets.dtype, device=offsets.device), + torch.arange(width, dtype=offsets.dtype, device=offsets.device), + ) + coord = torch.cat((y_coord.unsqueeze(0), x_coord.unsqueeze(0)), dim=0) + + center_loc = coord + offsets + center_loc = center_loc.flatten(1).T.unsqueeze_(0) # [1, H*W, 2] + center_points = center_points.unsqueeze(1) # [K, 1, 2] + + # Distance: [K, H*W]. + distance = torch.norm(center_points - center_loc, dim=-1) + + # Finds center with minimum distance at each location, offset by 1, to + # reserve id=0 for stuff. + instance_id = torch.argmin(distance, dim=0).reshape((1, height, width)) + 1 + return instance_id + + +def get_instance_segmentation( + sem_seg, center_heatmap, offsets, thing_seg, thing_ids, threshold=0.1, nms_kernel=3, top_k=None +): + """ + Post-processing for instance segmentation, gets class agnostic instance id. + Args: + sem_seg: A Tensor of shape [1, H, W], predicted semantic label. + center_heatmap: A Tensor of shape [1, H, W] of raw center heatmap output. + offsets: A Tensor of shape [2, H, W] of raw offset output. The order of + second dim is (offset_y, offset_x). + thing_seg: A Tensor of shape [1, H, W], predicted foreground mask, + if not provided, inference from semantic prediction. + thing_ids: A set of ids from contiguous category ids belonging + to thing categories. + threshold: A float, threshold applied to center heatmap score. + nms_kernel: An integer, NMS max pooling kernel size. + top_k: An integer, top k centers to keep. + Returns: + A Tensor of shape [1, H, W] with value 0 represent stuff (not instance) + and other positive values represent different instances. + A Tensor of shape [1, K, 2] where K is the number of center points. + The order of second dim is (y, x). + """ + center_points = find_instance_center( + center_heatmap, threshold=threshold, nms_kernel=nms_kernel, top_k=top_k + ) + if center_points.size(0) == 0: + return torch.zeros_like(sem_seg), center_points.unsqueeze(0) + ins_seg = group_pixels(center_points, offsets) + return thing_seg * ins_seg, center_points.unsqueeze(0) + + +def merge_semantic_and_instance( + sem_seg, ins_seg, semantic_thing_seg, label_divisor, thing_ids, stuff_area, void_label +): + """ + Post-processing for panoptic segmentation, by merging semantic segmentation + label and class agnostic instance segmentation label. + Args: + sem_seg: A Tensor of shape [1, H, W], predicted category id for each pixel. + ins_seg: A Tensor of shape [1, H, W], predicted instance id for each pixel. + semantic_thing_seg: A Tensor of shape [1, H, W], predicted foreground mask. + label_divisor: An integer, used to convert panoptic id = + semantic id * label_divisor + instance_id. + thing_ids: Set, a set of ids from contiguous category ids belonging + to thing categories. + stuff_area: An integer, remove stuff whose area is less tan stuff_area. + void_label: An integer, indicates the region has no confident prediction. + Returns: + A Tensor of shape [1, H, W]. + """ + # In case thing mask does not align with semantic prediction. + pan_seg = torch.zeros_like(sem_seg) + void_label + is_thing = (ins_seg > 0) & (semantic_thing_seg > 0) + + # Keep track of instance id for each class. + class_id_tracker = Counter() + + # Paste thing by majority voting. + instance_ids = torch.unique(ins_seg) + for ins_id in instance_ids: + if ins_id == 0: + continue + # Make sure only do majority voting within `semantic_thing_seg`. + thing_mask = (ins_seg == ins_id) & is_thing + if torch.nonzero(thing_mask).size(0) == 0: + continue + class_id, _ = torch.mode(sem_seg[thing_mask].view(-1)) + class_id_tracker[class_id.item()] += 1 + new_ins_id = class_id_tracker[class_id.item()] + pan_seg[thing_mask] = class_id * label_divisor + new_ins_id + + # Paste stuff to unoccupied area. + class_ids = torch.unique(sem_seg) + for class_id in class_ids: + if class_id.item() in thing_ids: + # thing class + continue + # Calculate stuff area. + stuff_mask = (sem_seg == class_id) & (ins_seg == 0) + if stuff_mask.sum().item() >= stuff_area: + pan_seg[stuff_mask] = class_id * label_divisor + + return pan_seg + + +def get_panoptic_segmentation( + sem_seg, + center_heatmap, + offsets, + thing_ids, + label_divisor, + stuff_area, + void_label, + threshold=0.1, + nms_kernel=7, + top_k=200, + foreground_mask=None, +): + """ + Post-processing for panoptic segmentation. + Args: + sem_seg: A Tensor of shape [1, H, W] of predicted semantic label. + center_heatmap: A Tensor of shape [1, H, W] of raw center heatmap output. + offsets: A Tensor of shape [2, H, W] of raw offset output. The order of + second dim is (offset_y, offset_x). + thing_ids: A set of ids from contiguous category ids belonging + to thing categories. + label_divisor: An integer, used to convert panoptic id = + semantic id * label_divisor + instance_id. + stuff_area: An integer, remove stuff whose area is less tan stuff_area. + void_label: An integer, indicates the region has no confident prediction. + threshold: A float, threshold applied to center heatmap score. + nms_kernel: An integer, NMS max pooling kernel size. + top_k: An integer, top k centers to keep. + foreground_mask: Optional, A Tensor of shape [1, H, W] of predicted + binary foreground mask. If not provided, it will be generated from + sem_seg. + Returns: + A Tensor of shape [1, H, W], int64. + """ + if sem_seg.dim() != 3 and sem_seg.size(0) != 1: + raise ValueError("Semantic prediction with un-supported shape: {}.".format(sem_seg.size())) + if center_heatmap.dim() != 3: + raise ValueError( + "Center prediction with un-supported dimension: {}.".format(center_heatmap.dim()) + ) + if offsets.dim() != 3: + raise ValueError("Offset prediction with un-supported dimension: {}.".format(offsets.dim())) + if foreground_mask is not None: + if foreground_mask.dim() != 3 and foreground_mask.size(0) != 1: + raise ValueError( + "Foreground prediction with un-supported shape: {}.".format(sem_seg.size()) + ) + thing_seg = foreground_mask + else: + # inference from semantic segmentation + thing_seg = torch.zeros_like(sem_seg) + for thing_class in list(thing_ids): + thing_seg[sem_seg == thing_class] = 1 + + instance, center = get_instance_segmentation( + sem_seg, + center_heatmap, + offsets, + thing_seg, + thing_ids, + threshold=threshold, + nms_kernel=nms_kernel, + top_k=top_k, + ) + panoptic = merge_semantic_and_instance( + sem_seg, instance, thing_seg, label_divisor, thing_ids, stuff_area, void_label + ) + + return panoptic, center diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/target_generator.py b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/target_generator.py new file mode 100644 index 0000000..a575c67 --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/panoptic_deeplab/target_generator.py @@ -0,0 +1,155 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/aa934324b55a34ce95fea143aea1cb7a6dbe04bd/segmentation/data/transforms/target_transforms.py#L11 # noqa +import numpy as np +import torch + + +class PanopticDeepLabTargetGenerator(object): + """ + Generates training targets for Panoptic-DeepLab. + """ + + def __init__( + self, + ignore_label, + thing_ids, + sigma=8, + ignore_stuff_in_offset=False, + small_instance_area=0, + small_instance_weight=1, + ignore_crowd_in_semantic=False, + ): + """ + Args: + ignore_label: Integer, the ignore label for semantic segmentation. + thing_ids: Set, a set of ids from contiguous category ids belonging + to thing categories. + sigma: the sigma for Gaussian kernel. + ignore_stuff_in_offset: Boolean, whether to ignore stuff region when + training the offset branch. + small_instance_area: Integer, indicates largest area for small instances. + small_instance_weight: Integer, indicates semantic loss weights for + small instances. + ignore_crowd_in_semantic: Boolean, whether to ignore crowd region in + semantic segmentation branch, crowd region is ignored in the original + TensorFlow implementation. + """ + self.ignore_label = ignore_label + self.thing_ids = set(thing_ids) + self.ignore_stuff_in_offset = ignore_stuff_in_offset + self.small_instance_area = small_instance_area + self.small_instance_weight = small_instance_weight + self.ignore_crowd_in_semantic = ignore_crowd_in_semantic + + # Generate the default Gaussian image for each center + self.sigma = sigma + size = 6 * sigma + 3 + x = np.arange(0, size, 1, float) + y = x[:, np.newaxis] + x0, y0 = 3 * sigma + 1, 3 * sigma + 1 + self.g = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma**2)) + + def __call__(self, panoptic, segments_info): + """Generates the training target. + reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py # noqa + reference: https://github.com/facebookresearch/detectron2/blob/main/datasets/prepare_panoptic_fpn.py#L18 # noqa + + Args: + panoptic: numpy.array, panoptic label, we assume it is already + converted from rgb image by panopticapi.utils.rgb2id. + segments_info (list[dict]): see detectron2 documentation of "Use Custom Datasets". + + Returns: + A dictionary with fields: + - sem_seg: Tensor, semantic label, shape=(H, W). + - center: Tensor, center heatmap, shape=(H, W). + - center_points: List, center coordinates, with tuple + (y-coord, x-coord). + - offset: Tensor, offset, shape=(2, H, W), first dim is + (offset_y, offset_x). + - sem_seg_weights: Tensor, loss weight for semantic prediction, + shape=(H, W). + - center_weights: Tensor, ignore region of center prediction, + shape=(H, W), used as weights for center regression 0 is + ignore, 1 is has instance. Multiply this mask to loss. + - offset_weights: Tensor, ignore region of offset prediction, + shape=(H, W), used as weights for offset regression 0 is + ignore, 1 is has instance. Multiply this mask to loss. + """ + height, width = panoptic.shape[0], panoptic.shape[1] + semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_label + center = np.zeros((height, width), dtype=np.float32) + center_pts = [] + offset = np.zeros((2, height, width), dtype=np.float32) + y_coord, x_coord = np.meshgrid( + np.arange(height, dtype=np.float32), np.arange(width, dtype=np.float32), indexing="ij" + ) + # Generate pixel-wise loss weights + semantic_weights = np.ones_like(panoptic, dtype=np.uint8) + # 0: ignore, 1: has instance + # three conditions for a region to be ignored for instance branches: + # (1) It is labeled as `ignore_label` + # (2) It is crowd region (iscrowd=1) + # (3) (Optional) It is stuff region (for offset branch) + center_weights = np.zeros_like(panoptic, dtype=np.uint8) + offset_weights = np.zeros_like(panoptic, dtype=np.uint8) + for seg in segments_info: + cat_id = seg["category_id"] + if not (self.ignore_crowd_in_semantic and seg["iscrowd"]): + semantic[panoptic == seg["id"]] = cat_id + if not seg["iscrowd"]: + # Ignored regions are not in `segments_info`. + # Handle crowd region. + center_weights[panoptic == seg["id"]] = 1 + if not self.ignore_stuff_in_offset or cat_id in self.thing_ids: + offset_weights[panoptic == seg["id"]] = 1 + if cat_id in self.thing_ids: + # find instance center + mask_index = np.where(panoptic == seg["id"]) + if len(mask_index[0]) == 0: + # the instance is completely cropped + continue + + # Find instance area + ins_area = len(mask_index[0]) + if ins_area < self.small_instance_area: + semantic_weights[panoptic == seg["id"]] = self.small_instance_weight + + center_y, center_x = np.mean(mask_index[0]), np.mean(mask_index[1]) + center_pts.append([center_y, center_x]) + + # generate center heatmap + y, x = int(round(center_y)), int(round(center_x)) + sigma = self.sigma + # upper left + ul = int(np.round(x - 3 * sigma - 1)), int(np.round(y - 3 * sigma - 1)) + # bottom right + br = int(np.round(x + 3 * sigma + 2)), int(np.round(y + 3 * sigma + 2)) + + # start and end indices in default Gaussian image + gaussian_x0, gaussian_x1 = max(0, -ul[0]), min(br[0], width) - ul[0] + gaussian_y0, gaussian_y1 = max(0, -ul[1]), min(br[1], height) - ul[1] + + # start and end indices in center heatmap image + center_x0, center_x1 = max(0, ul[0]), min(br[0], width) + center_y0, center_y1 = max(0, ul[1]), min(br[1], height) + center[center_y0:center_y1, center_x0:center_x1] = np.maximum( + center[center_y0:center_y1, center_x0:center_x1], + self.g[gaussian_y0:gaussian_y1, gaussian_x0:gaussian_x1], + ) + + # generate offset (2, h, w) -> (y-dir, x-dir) + offset[0][mask_index] = center_y - y_coord[mask_index] + offset[1][mask_index] = center_x - x_coord[mask_index] + + center_weights = center_weights[None] + offset_weights = offset_weights[None] + return dict( + sem_seg=torch.as_tensor(semantic.astype("long")), + center=torch.as_tensor(center.astype(np.float32)), + center_points=center_pts, + offset=torch.as_tensor(offset.astype(np.float32)), + sem_seg_weights=torch.as_tensor(semantic_weights.astype(np.float32)), + center_weights=torch.as_tensor(center_weights.astype(np.float32)), + offset_weights=torch.as_tensor(offset_weights.astype(np.float32)), + ) diff --git a/data_processing/detectron2/projects/Panoptic-DeepLab/train_net.py b/data_processing/detectron2/projects/Panoptic-DeepLab/train_net.py new file mode 100644 index 0000000..780764f --- /dev/null +++ b/data_processing/detectron2/projects/Panoptic-DeepLab/train_net.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +Panoptic-DeepLab Training Script. +This script is a simplified version of the training script in detectron2/tools. +""" + +import os +import torch + +import detectron2.data.transforms as T +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import MetadataCatalog, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import ( + CityscapesInstanceEvaluator, + CityscapesSemSegEvaluator, + COCOEvaluator, + COCOPanopticEvaluator, + DatasetEvaluators, +) +from detectron2.projects.deeplab import build_lr_scheduler +from detectron2.projects.panoptic_deeplab import ( + PanopticDeeplabDatasetMapper, + add_panoptic_deeplab_config, +) +from detectron2.solver import get_default_optimizer_params +from detectron2.solver.build import maybe_add_gradient_clipping + + +def build_sem_seg_train_aug(cfg): + augs = [ + T.ResizeShortestEdge( + cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING + ) + ] + if cfg.INPUT.CROP.ENABLED: + augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) + augs.append(T.RandomFlip()) + return augs + + +class Trainer(DefaultTrainer): + """ + We use the "DefaultTrainer" which contains a number pre-defined logic for + standard training workflow. They may not work for you, especially if you + are working on a new research project. In that case you can use the cleaner + "SimpleTrainer", or write your own training loop. + """ + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED: + return None + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type in ["cityscapes_panoptic_seg", "coco_panoptic_seg"]: + evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) + if evaluator_type == "cityscapes_panoptic_seg": + evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) + evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) + if evaluator_type == "coco_panoptic_seg": + # `thing_classes` in COCO panoptic metadata includes both thing and + # stuff classes for visualization. COCOEvaluator requires metadata + # which only contains thing classes, thus we map the name of + # panoptic datasets to their corresponding instance datasets. + dataset_name_mapper = { + "coco_2017_val_panoptic": "coco_2017_val", + "coco_2017_val_100_panoptic": "coco_2017_val_100", + } + evaluator_list.append( + COCOEvaluator(dataset_name_mapper[dataset_name], output_dir=output_folder) + ) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format( + dataset_name, evaluator_type + ) + ) + elif len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + @classmethod + def build_train_loader(cls, cfg): + mapper = PanopticDeeplabDatasetMapper(cfg, augmentations=build_sem_seg_train_aug(cfg)) + return build_detection_train_loader(cfg, mapper=mapper) + + @classmethod + def build_lr_scheduler(cls, cfg, optimizer): + """ + It now calls :func:`detectron2.solver.build_lr_scheduler`. + Overwrite it if you'd like a different scheduler. + """ + return build_lr_scheduler(cfg, optimizer) + + @classmethod + def build_optimizer(cls, cfg, model): + """ + Build an optimizer from config. + """ + params = get_default_optimizer_params( + model, + weight_decay=cfg.SOLVER.WEIGHT_DECAY, + weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM, + ) + + optimizer_type = cfg.SOLVER.OPTIMIZER + if optimizer_type == "SGD": + return maybe_add_gradient_clipping(cfg, torch.optim.SGD)( + params, + cfg.SOLVER.BASE_LR, + momentum=cfg.SOLVER.MOMENTUM, + nesterov=cfg.SOLVER.NESTEROV, + ) + elif optimizer_type == "ADAM": + return maybe_add_gradient_clipping(cfg, torch.optim.Adam)(params, cfg.SOLVER.BASE_LR) + else: + raise NotImplementedError(f"no optimizer type {optimizer_type}") + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_panoptic_deeplab_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/data_processing/detectron2/projects/PointRend/README.md b/data_processing/detectron2/projects/PointRend/README.md new file mode 100644 index 0000000..79d75d5 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/README.md @@ -0,0 +1,167 @@ +# PointRend: Image Segmentation as Rendering + +Alexander Kirillov, Yuxin Wu, Kaiming He, Ross Girshick + +[[`arXiv`](https://arxiv.org/abs/1912.08193)] [[`BibTeX`](#CitingPointRend)] + +
+ +

+ +In this repository, we release code for PointRend in Detectron2. PointRend can be flexibly applied to both instance and semantic segmentation tasks by building on top of existing state-of-the-art models. + +## Quick start and visualization + +This [Colab Notebook](https://colab.research.google.com/drive/1isGPL5h5_cKoPPhVL9XhMokRtHDvmMVL) tutorial contains examples of PointRend usage and visualizations of its point sampling stages. + +## Training + +To train a model with 8 GPUs run: +```bash +cd /path/to/detectron2/projects/PointRend +python train_net.py --config-file configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml --num-gpus 8 +``` + +## Evaluation + +Model evaluation can be done similarly: +```bash +cd /path/to/detectron2/projects/PointRend +python train_net.py --config-file configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml --eval-only MODEL.WEIGHTS /path/to/model_checkpoint +``` + +# Pretrained Models + +## Instance Segmentation +#### COCO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mask
head
Backbonelr
sched
Output
resolution
mask
AP
mask
AP*
model iddownload
PointRendR50-FPN224×22436.239.7164254221model | metrics
PointRendR50-FPN224×22438.341.6164955410model | metrics
PointRendR101-FPN224×22440.143.8model | metrics
PointRendX101-FPN224×22441.144.7model | metrics
+ +AP* is COCO mask AP evaluated against the higher-quality LVIS annotations; see the paper for details. +Run `python detectron2/datasets/prepare_cocofied_lvis.py` to prepare GT files for AP* evaluation. +Since LVIS annotations are not exhaustive, `lvis-api` and not `cocoapi` should be used to evaluate AP*. + +#### Cityscapes +Cityscapes model is trained with ImageNet pretraining. + + + + + + + + + + + + + + + + + + + + +
Mask
head
Backbonelr
sched
Output
resolution
mask
AP
model iddownload
PointRendR50-FPN224×22435.9164255101model | metrics
+ + +## Semantic Segmentation + +#### Cityscapes +Cityscapes model is trained with ImageNet pretraining. + + + + + + + + + + + + + + + + + + +
MethodBackboneOutput
resolution
mIoUmodel iddownload
SemanticFPN + PointRendR101-FPN1024×204878.9202576688model | metrics
+ +## Citing PointRend + +If you use PointRend, please use the following BibTeX entry. + +```BibTeX +@InProceedings{kirillov2019pointrend, + title={{PointRend}: Image Segmentation as Rendering}, + author={Alexander Kirillov and Yuxin Wu and Kaiming He and Ross Girshick}, + journal={ArXiv:1912.08193}, + year={2019} +} +``` + +## Citing Implicit PointRend + +If you use Implicit PointRend, please use the following BibTeX entry. + +```BibTeX +@InProceedings{cheng2021pointly, + title={Pointly-Supervised Instance Segmentation, + author={Bowen Cheng and Omkar Parkhi and Alexander Kirillov}, + journal={ArXiv}, + year={2021} +} +``` diff --git a/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/Base-Implicit-PointRend.yaml b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/Base-Implicit-PointRend.yaml new file mode 100644 index 0000000..5ebafb3 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/Base-Implicit-PointRend.yaml @@ -0,0 +1,25 @@ +_BASE_: "../../../../configs/Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: true + ROI_MASK_HEAD: + NAME: "ImplicitPointRendMaskHead" + POOLER_TYPE: "" # No RoI pooling, let the head process image features directly + FC_DIM: 1024 + NUM_FC: 2 + POINT_HEAD: + NAME: "ImplicitPointHead" + FC_DIM: 256 + NUM_FC: 3 + IN_FEATURES: ["p2"] + NUM_CLASSES: 80 + CLS_AGNOSTIC_MASK: False + TRAIN_NUM_POINTS: 196 + SUBDIVISION_STEPS: 3 + SUBDIVISION_NUM_POINTS: 784 + IMPLICIT_POINTREND: + IMAGE_FEATURE_ENABLED: True + POS_ENC_ENABLED: True + PARAMS_L2_REGULARIZER: 0.00001 +INPUT: + # PointRend for instance segmentation does not work with "polygon" mask_format. + MASK_FORMAT: "bitmask" diff --git a/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/Base-PointRend-RCNN-FPN.yaml b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/Base-PointRend-RCNN-FPN.yaml new file mode 100644 index 0000000..e68e707 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/Base-PointRend-RCNN-FPN.yaml @@ -0,0 +1,20 @@ +_BASE_: "../../../../configs/Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: true + ROI_BOX_HEAD: + TRAIN_ON_PRED_BOXES: True + ROI_MASK_HEAD: + POOLER_TYPE: "" # No RoI pooling, let the head process image features directly + NAME: "PointRendMaskHead" + FC_DIM: 1024 + NUM_FC: 2 + OUTPUT_SIDE_RESOLUTION: 7 + IN_FEATURES: ["p2"] # for the coarse mask head + POINT_HEAD_ON: True + POINT_HEAD: + FC_DIM: 256 + NUM_FC: 3 + IN_FEATURES: ["p2"] +INPUT: + # PointRend for instance segmentation does not work with "polygon" mask_format. + MASK_FORMAT: "bitmask" diff --git a/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/implicit_pointrend_R_50_FPN_1x_coco.yaml b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/implicit_pointrend_R_50_FPN_1x_coco.yaml new file mode 100644 index 0000000..ba35c24 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/implicit_pointrend_R_50_FPN_1x_coco.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Implicit-PointRend.yaml" +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl + RESNETS: + DEPTH: 50 +# To add COCO AP evaluation against the higher-quality LVIS annotations. +# DATASETS: +# TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") diff --git a/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/implicit_pointrend_R_50_FPN_3x_coco.yaml b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/implicit_pointrend_R_50_FPN_3x_coco.yaml new file mode 100644 index 0000000..884236d --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/implicit_pointrend_R_50_FPN_3x_coco.yaml @@ -0,0 +1,11 @@ +_BASE_: "Base-Implicit-PointRend.yaml" +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 +# To add COCO AP evaluation against the higher-quality LVIS annotations. +# DATASETS: +# TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") diff --git a/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_101_FPN_3x_coco.yaml b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_101_FPN_3x_coco.yaml new file mode 100644 index 0000000..4269130 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_101_FPN_3x_coco.yaml @@ -0,0 +1,12 @@ +_BASE_: Base-PointRend-RCNN-FPN.yaml +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-101.pkl + MASK_ON: true + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 +# To add COCO AP evaluation against the higher-quality LVIS annotations. +# DATASETS: +# TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") diff --git a/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_cityscapes.yaml b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_cityscapes.yaml new file mode 100644 index 0000000..0402d6d --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_cityscapes.yaml @@ -0,0 +1,22 @@ +_BASE_: Base-PointRend-RCNN-FPN.yaml +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 8 + POINT_HEAD: + NUM_CLASSES: 8 +DATASETS: + TEST: ("cityscapes_fine_instance_seg_val",) + TRAIN: ("cityscapes_fine_instance_seg_train",) +SOLVER: + BASE_LR: 0.01 + IMS_PER_BATCH: 8 + MAX_ITER: 24000 + STEPS: (18000,) +INPUT: + MAX_SIZE_TEST: 2048 + MAX_SIZE_TRAIN: 2048 + MIN_SIZE_TEST: 1024 + MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024) diff --git a/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml new file mode 100644 index 0000000..0249b49 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml @@ -0,0 +1,8 @@ +_BASE_: Base-PointRend-RCNN-FPN.yaml +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl + RESNETS: + DEPTH: 50 +# To add COCO AP evaluation against the higher-quality LVIS annotations. +# DATASETS: +# TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") diff --git a/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml new file mode 100644 index 0000000..a571b4c --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml @@ -0,0 +1,12 @@ +_BASE_: Base-PointRend-RCNN-FPN.yaml +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 +# To add COCO AP evaluation against the higher-quality LVIS annotations. +# DATASETS: +# TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") + diff --git a/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_coco.yaml b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_coco.yaml new file mode 100644 index 0000000..85d26f3 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_coco.yaml @@ -0,0 +1,16 @@ +_BASE_: Base-PointRend-RCNN-FPN.yaml +MODEL: + MASK_ON: True + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 +# To add COCO AP evaluation against the higher-quality LVIS annotations. +# DATASETS: +# TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") diff --git a/data_processing/detectron2/projects/PointRend/configs/SemanticSegmentation/Base-PointRend-Semantic-FPN.yaml b/data_processing/detectron2/projects/PointRend/configs/SemanticSegmentation/Base-PointRend-Semantic-FPN.yaml new file mode 100644 index 0000000..9b7a1b4 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/configs/SemanticSegmentation/Base-PointRend-Semantic-FPN.yaml @@ -0,0 +1,20 @@ +_BASE_: "../../../../configs/Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + BACKBONE: + FREEZE_AT: 0 + SEM_SEG_HEAD: + NAME: "PointRendSemSegHead" + POINT_HEAD: + NUM_CLASSES: 54 + FC_DIM: 256 + NUM_FC: 3 + IN_FEATURES: ["p2"] + TRAIN_NUM_POINTS: 1024 + SUBDIVISION_STEPS: 2 + SUBDIVISION_NUM_POINTS: 8192 + COARSE_SEM_SEG_HEAD_NAME: "SemSegFPNHead" + COARSE_PRED_EACH_LAYER: False +DATASETS: + TRAIN: ("coco_2017_train_panoptic_stuffonly",) + TEST: ("coco_2017_val_panoptic_stuffonly",) diff --git a/data_processing/detectron2/projects/PointRend/configs/SemanticSegmentation/pointrend_semantic_R_101_FPN_1x_cityscapes.yaml b/data_processing/detectron2/projects/PointRend/configs/SemanticSegmentation/pointrend_semantic_R_101_FPN_1x_cityscapes.yaml new file mode 100644 index 0000000..6be11fa --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/configs/SemanticSegmentation/pointrend_semantic_R_101_FPN_1x_cityscapes.yaml @@ -0,0 +1,33 @@ +_BASE_: Base-PointRend-Semantic-FPN.yaml +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-101.pkl + RESNETS: + DEPTH: 101 + SEM_SEG_HEAD: + NUM_CLASSES: 19 + POINT_HEAD: + NUM_CLASSES: 19 + TRAIN_NUM_POINTS: 2048 + SUBDIVISION_NUM_POINTS: 8192 +DATASETS: + TRAIN: ("cityscapes_fine_sem_seg_train",) + TEST: ("cityscapes_fine_sem_seg_val",) +SOLVER: + BASE_LR: 0.01 + STEPS: (40000, 55000) + MAX_ITER: 65000 + IMS_PER_BATCH: 32 +INPUT: + MIN_SIZE_TRAIN: (512, 768, 1024, 1280, 1536, 1792, 2048) + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 1024 + MAX_SIZE_TRAIN: 4096 + MAX_SIZE_TEST: 2048 + CROP: + ENABLED: True + TYPE: "absolute" + SIZE: (512, 1024) + SINGLE_CATEGORY_MAX_AREA: 0.75 + COLOR_AUG_SSD: True +DATALOADER: + NUM_WORKERS: 10 diff --git a/data_processing/detectron2/projects/PointRend/point_rend/__init__.py b/data_processing/detectron2/projects/PointRend/point_rend/__init__.py new file mode 100644 index 0000000..e3050cb --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/point_rend/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .config import add_pointrend_config +from .mask_head import PointRendMaskHead, ImplicitPointRendMaskHead +from .semantic_seg import PointRendSemSegHead +from .color_augmentation import ColorAugSSDTransform + +from . import roi_heads as _ # only registration diff --git a/data_processing/detectron2/projects/PointRend/point_rend/color_augmentation.py b/data_processing/detectron2/projects/PointRend/point_rend/color_augmentation.py new file mode 100644 index 0000000..cdcb051 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/point_rend/color_augmentation.py @@ -0,0 +1,98 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import random +import cv2 +from fvcore.transforms.transform import Transform + + +class ColorAugSSDTransform(Transform): + """ + A color related data augmentation used in Single Shot Multibox Detector (SSD). + + Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, + Scott Reed, Cheng-Yang Fu, Alexander C. Berg. + SSD: Single Shot MultiBox Detector. ECCV 2016. + + Implementation based on: + + https://github.com/weiliu89/caffe/blob + /4817bf8b4200b35ada8ed0dc378dceaf38c539e4 + /src/caffe/util/im_transforms.cpp + + https://github.com/chainer/chainercv/blob + /7159616642e0be7c5b3ef380b848e16b7e99355b/chainercv + /links/model/ssd/transforms.py + """ + + def __init__( + self, + img_format, + brightness_delta=32, + contrast_low=0.5, + contrast_high=1.5, + saturation_low=0.5, + saturation_high=1.5, + hue_delta=18, + ): + super().__init__() + assert img_format in ["BGR", "RGB"] + self.is_rgb = img_format == "RGB" + del img_format + self._set_attributes(locals()) + + def apply_coords(self, coords): + return coords + + def apply_segmentation(self, segmentation): + return segmentation + + def apply_image(self, img, interp=None): + if self.is_rgb: + img = img[:, :, [2, 1, 0]] + img = self.brightness(img) + if random.randrange(2): + img = self.contrast(img) + img = self.saturation(img) + img = self.hue(img) + else: + img = self.saturation(img) + img = self.hue(img) + img = self.contrast(img) + if self.is_rgb: + img = img[:, :, [2, 1, 0]] + return img + + def convert(self, img, alpha=1, beta=0): + img = img.astype(np.float32) * alpha + beta + img = np.clip(img, 0, 255) + return img.astype(np.uint8) + + def brightness(self, img): + if random.randrange(2): + return self.convert( + img, beta=random.uniform(-self.brightness_delta, self.brightness_delta) + ) + return img + + def contrast(self, img): + if random.randrange(2): + return self.convert(img, alpha=random.uniform(self.contrast_low, self.contrast_high)) + return img + + def saturation(self, img): + if random.randrange(2): + img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + img[:, :, 1] = self.convert( + img[:, :, 1], alpha=random.uniform(self.saturation_low, self.saturation_high) + ) + return cv2.cvtColor(img, cv2.COLOR_HSV2BGR) + return img + + def hue(self, img): + if random.randrange(2): + img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + img[:, :, 0] = ( + img[:, :, 0].astype(int) + random.randint(-self.hue_delta, self.hue_delta) + ) % 180 + return cv2.cvtColor(img, cv2.COLOR_HSV2BGR) + return img diff --git a/data_processing/detectron2/projects/PointRend/point_rend/config.py b/data_processing/detectron2/projects/PointRend/point_rend/config.py new file mode 100644 index 0000000..a02c782 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/point_rend/config.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +from detectron2.config import CfgNode as CN + + +def add_pointrend_config(cfg): + """ + Add config for PointRend. + """ + # We retry random cropping until no single category in semantic segmentation GT occupies more + # than `SINGLE_CATEGORY_MAX_AREA` part of the crop. + cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0 + # Color augmentatition from SSD paper for semantic segmentation model during training. + cfg.INPUT.COLOR_AUG_SSD = False + + # Names of the input feature maps to be used by a coarse mask head. + cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES = ("p2",) + cfg.MODEL.ROI_MASK_HEAD.FC_DIM = 1024 + cfg.MODEL.ROI_MASK_HEAD.NUM_FC = 2 + # The side size of a coarse mask head prediction. + cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION = 7 + # True if point head is used. + cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = False + + cfg.MODEL.POINT_HEAD = CN() + cfg.MODEL.POINT_HEAD.NAME = "StandardPointHead" + cfg.MODEL.POINT_HEAD.NUM_CLASSES = 80 + # Names of the input feature maps to be used by a mask point head. + cfg.MODEL.POINT_HEAD.IN_FEATURES = ("p2",) + # Number of points sampled during training for a mask point head. + cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS = 14 * 14 + # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the + # original paper. + cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO = 3 + # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in + # the original paper. + cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO = 0.75 + # Number of subdivision steps during inference. + cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS = 5 + # Maximum number of points selected at each subdivision step (N). + cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS = 28 * 28 + cfg.MODEL.POINT_HEAD.FC_DIM = 256 + cfg.MODEL.POINT_HEAD.NUM_FC = 3 + cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK = False + # If True, then coarse prediction features are used as inout for each layer in PointRend's MLP. + cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER = True + cfg.MODEL.POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME = "SemSegFPNHead" + + """ + Add config for Implicit PointRend. + """ + cfg.MODEL.IMPLICIT_POINTREND = CN() + + cfg.MODEL.IMPLICIT_POINTREND.IMAGE_FEATURE_ENABLED = True + cfg.MODEL.IMPLICIT_POINTREND.POS_ENC_ENABLED = True + + cfg.MODEL.IMPLICIT_POINTREND.PARAMS_L2_REGULARIZER = 0.00001 diff --git a/data_processing/detectron2/projects/PointRend/point_rend/mask_head.py b/data_processing/detectron2/projects/PointRend/point_rend/mask_head.py new file mode 100644 index 0000000..46dd647 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/point_rend/mask_head.py @@ -0,0 +1,435 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import math +import numpy as np +from typing import Dict, List, Tuple +import fvcore.nn.weight_init as weight_init +import torch +from torch import Tensor, nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.layers import Conv2d, ShapeSpec, cat, interpolate +from detectron2.modeling import ROI_MASK_HEAD_REGISTRY +from detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference, mask_rcnn_loss +from detectron2.structures import Boxes + +from .point_features import ( + generate_regular_grid_point_coords, + get_point_coords_wrt_image, + get_uncertain_point_coords_on_grid, + get_uncertain_point_coords_with_randomness, + point_sample, + point_sample_fine_grained_features, + sample_point_labels, +) +from .point_head import build_point_head, roi_mask_point_loss + + +def calculate_uncertainty(logits, classes): + """ + We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the + foreground class in `classes`. + Args: + logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or + class-agnostic, where R is the total number of predicted masks in all images and C is + the number of foreground classes. The values are logits. + classes (list): A list of length R that contains either predicted of ground truth class + for eash predicted mask. + Returns: + scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with + the most uncertain locations having the highest uncertainty score. + """ + if logits.shape[1] == 1: + gt_class_logits = logits.clone() + else: + gt_class_logits = logits[ + torch.arange(logits.shape[0], device=logits.device), classes + ].unsqueeze(1) + return -(torch.abs(gt_class_logits)) + + +class ConvFCHead(nn.Module): + """ + A mask head with fully connected layers. Given pooled features it first reduces channels and + spatial dimensions with conv layers and then uses FC layers to predict coarse masks analogously + to the standard box head. + """ + + _version = 2 + + @configurable + def __init__( + self, input_shape: ShapeSpec, *, conv_dim: int, fc_dims: List[int], output_shape: Tuple[int] + ): + """ + Args: + conv_dim: the output dimension of the conv layers + fc_dims: a list of N>0 integers representing the output dimensions of N FC layers + output_shape: shape of the output mask prediction + """ + super().__init__() + + # fmt: off + input_channels = input_shape.channels + input_h = input_shape.height + input_w = input_shape.width + self.output_shape = output_shape + # fmt: on + + self.conv_layers = [] + if input_channels > conv_dim: + self.reduce_channel_dim_conv = Conv2d( + input_channels, + conv_dim, + kernel_size=1, + stride=1, + padding=0, + bias=True, + activation=F.relu, + ) + self.conv_layers.append(self.reduce_channel_dim_conv) + + self.reduce_spatial_dim_conv = Conv2d( + conv_dim, conv_dim, kernel_size=2, stride=2, padding=0, bias=True, activation=F.relu + ) + self.conv_layers.append(self.reduce_spatial_dim_conv) + + input_dim = conv_dim * input_h * input_w + input_dim //= 4 + + self.fcs = [] + for k, fc_dim in enumerate(fc_dims): + fc = nn.Linear(input_dim, fc_dim) + self.add_module("fc{}".format(k + 1), fc) + self.fcs.append(fc) + input_dim = fc_dim + + output_dim = int(np.prod(self.output_shape)) + + self.prediction = nn.Linear(fc_dims[-1], output_dim) + # use normal distribution initialization for mask prediction layer + nn.init.normal_(self.prediction.weight, std=0.001) + nn.init.constant_(self.prediction.bias, 0) + + for layer in self.conv_layers: + weight_init.c2_msra_fill(layer) + for layer in self.fcs: + weight_init.c2_xavier_fill(layer) + + @classmethod + def from_config(cls, cfg, input_shape): + output_shape = ( + cfg.MODEL.ROI_HEADS.NUM_CLASSES, + cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION, + cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION, + ) + fc_dim = cfg.MODEL.ROI_MASK_HEAD.FC_DIM + num_fc = cfg.MODEL.ROI_MASK_HEAD.NUM_FC + ret = dict( + input_shape=input_shape, + conv_dim=cfg.MODEL.ROI_MASK_HEAD.CONV_DIM, + fc_dims=[fc_dim] * num_fc, + output_shape=output_shape, + ) + return ret + + def forward(self, x): + N = x.shape[0] + for layer in self.conv_layers: + x = layer(x) + x = torch.flatten(x, start_dim=1) + for layer in self.fcs: + x = F.relu(layer(x)) + output_shape = [N] + list(self.output_shape) + return self.prediction(x).view(*output_shape) + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + version = local_metadata.get("version", None) + + if version is None or version < 2: + logger = logging.getLogger(__name__) + logger.warning( + "Weight format of PointRend models have changed! " + "Applying automatic conversion now ..." + ) + for k in list(state_dict.keys()): + newk = k + if k.startswith(prefix + "coarse_mask_fc"): + newk = k.replace(prefix + "coarse_mask_fc", prefix + "fc") + if newk != k: + state_dict[newk] = state_dict[k] + del state_dict[k] + + +@ROI_MASK_HEAD_REGISTRY.register() +class PointRendMaskHead(nn.Module): + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__() + self._feature_scales = {k: 1.0 / v.stride for k, v in input_shape.items()} + # point head + self._init_point_head(cfg, input_shape) + # coarse mask head + self.roi_pooler_in_features = cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES + self.roi_pooler_size = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION + self._feature_scales = {k: 1.0 / v.stride for k, v in input_shape.items()} + in_channels = np.sum([input_shape[f].channels for f in self.roi_pooler_in_features]) + self._init_roi_head( + cfg, + ShapeSpec( + channels=in_channels, + width=self.roi_pooler_size, + height=self.roi_pooler_size, + ), + ) + + def _init_roi_head(self, cfg, input_shape): + self.coarse_head = ConvFCHead(cfg, input_shape) + + def _init_point_head(self, cfg, input_shape): + # fmt: off + self.mask_point_on = cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON + if not self.mask_point_on: + return + assert cfg.MODEL.ROI_HEADS.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES + self.mask_point_in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES + self.mask_point_train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS + self.mask_point_oversample_ratio = cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO + self.mask_point_importance_sample_ratio = cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO + # next three parameters are use in the adaptive subdivions inference procedure + self.mask_point_subdivision_init_resolution = cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION + self.mask_point_subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS + self.mask_point_subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS + # fmt: on + + in_channels = int(np.sum([input_shape[f].channels for f in self.mask_point_in_features])) + self.point_head = build_point_head(cfg, ShapeSpec(channels=in_channels, width=1, height=1)) + + # An optimization to skip unused subdivision steps: if after subdivision, all pixels on + # the mask will be selected and recomputed anyway, we should just double our init_resolution + while ( + 4 * self.mask_point_subdivision_init_resolution**2 + <= self.mask_point_subdivision_num_points + ): + self.mask_point_subdivision_init_resolution *= 2 + self.mask_point_subdivision_steps -= 1 + + def forward(self, features, instances): + """ + Args: + features (dict[str, Tensor]): a dict of image-level features + instances (list[Instances]): proposals in training; detected + instances in inference + """ + if self.training: + proposal_boxes = [x.proposal_boxes for x in instances] + coarse_mask = self.coarse_head(self._roi_pooler(features, proposal_boxes)) + losses = {"loss_mask": mask_rcnn_loss(coarse_mask, instances)} + if not self.mask_point_on: + return losses + + point_coords, point_labels = self._sample_train_points(coarse_mask, instances) + point_fine_grained_features = self._point_pooler(features, proposal_boxes, point_coords) + point_logits = self._get_point_logits( + point_fine_grained_features, point_coords, coarse_mask + ) + losses["loss_mask_point"] = roi_mask_point_loss(point_logits, instances, point_labels) + return losses + else: + pred_boxes = [x.pred_boxes for x in instances] + coarse_mask = self.coarse_head(self._roi_pooler(features, pred_boxes)) + return self._subdivision_inference(features, coarse_mask, instances) + + def _roi_pooler(self, features: List[Tensor], boxes: List[Boxes]): + """ + Extract per-box feature. This is similar to RoIAlign(sampling_ratio=1) except: + 1. It's implemented by point_sample + 2. It pools features across all levels and concat them, while typically + RoIAlign select one level for every box. However in the config we only use + one level (p2) so there is no difference. + + Returns: + Tensor of shape (R, C, pooler_size, pooler_size) where R is the total number of boxes + """ + features_list = [features[k] for k in self.roi_pooler_in_features] + features_scales = [self._feature_scales[k] for k in self.roi_pooler_in_features] + + num_boxes = sum(x.tensor.size(0) for x in boxes) + output_size = self.roi_pooler_size + point_coords = generate_regular_grid_point_coords(num_boxes, output_size, boxes[0].device) + # For regular grids of points, this function is equivalent to `len(features_list)' calls + # of `ROIAlign` (with `SAMPLING_RATIO=1`), and concat the results. + roi_features, _ = point_sample_fine_grained_features( + features_list, features_scales, boxes, point_coords + ) + return roi_features.view(num_boxes, roi_features.shape[1], output_size, output_size) + + def _sample_train_points(self, coarse_mask, instances): + assert self.training + gt_classes = cat([x.gt_classes for x in instances]) + with torch.no_grad(): + # sample point_coords + point_coords = get_uncertain_point_coords_with_randomness( + coarse_mask, + lambda logits: calculate_uncertainty(logits, gt_classes), + self.mask_point_train_num_points, + self.mask_point_oversample_ratio, + self.mask_point_importance_sample_ratio, + ) + # sample point_labels + proposal_boxes = [x.proposal_boxes for x in instances] + cat_boxes = Boxes.cat(proposal_boxes) + point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords) + point_labels = sample_point_labels(instances, point_coords_wrt_image) + return point_coords, point_labels + + def _point_pooler(self, features, proposal_boxes, point_coords): + point_features_list = [features[k] for k in self.mask_point_in_features] + point_features_scales = [self._feature_scales[k] for k in self.mask_point_in_features] + # sample image-level features + point_fine_grained_features, _ = point_sample_fine_grained_features( + point_features_list, point_features_scales, proposal_boxes, point_coords + ) + return point_fine_grained_features + + def _get_point_logits(self, point_fine_grained_features, point_coords, coarse_mask): + coarse_features = point_sample(coarse_mask, point_coords, align_corners=False) + point_logits = self.point_head(point_fine_grained_features, coarse_features) + return point_logits + + def _subdivision_inference(self, features, mask_representations, instances): + assert not self.training + + pred_boxes = [x.pred_boxes for x in instances] + pred_classes = cat([x.pred_classes for x in instances]) + + mask_logits = None + # +1 here to include an initial step to generate the coarsest mask + # prediction with init_resolution, when mask_logits is None. + # We compute initial mask by sampling on a regular grid. coarse_mask + # can be used as initial mask as well, but it's typically very low-res + # so it will be completely overwritten during subdivision anyway. + for _ in range(self.mask_point_subdivision_steps + 1): + if mask_logits is None: + point_coords = generate_regular_grid_point_coords( + pred_classes.size(0), + self.mask_point_subdivision_init_resolution, + pred_boxes[0].device, + ) + else: + mask_logits = interpolate( + mask_logits, scale_factor=2, mode="bilinear", align_corners=False + ) + uncertainty_map = calculate_uncertainty(mask_logits, pred_classes) + point_indices, point_coords = get_uncertain_point_coords_on_grid( + uncertainty_map, self.mask_point_subdivision_num_points + ) + + # Run the point head for every point in point_coords + fine_grained_features = self._point_pooler(features, pred_boxes, point_coords) + point_logits = self._get_point_logits( + fine_grained_features, point_coords, mask_representations + ) + + if mask_logits is None: + # Create initial mask_logits using point_logits on this regular grid + R, C, _ = point_logits.shape + mask_logits = point_logits.reshape( + R, + C, + self.mask_point_subdivision_init_resolution, + self.mask_point_subdivision_init_resolution, + ) + # The subdivision code will fail with the empty list of boxes + if len(pred_classes) == 0: + mask_rcnn_inference(mask_logits, instances) + return instances + else: + # Put point predictions to the right places on the upsampled grid. + R, C, H, W = mask_logits.shape + point_indices = point_indices.unsqueeze(1).expand(-1, C, -1) + mask_logits = ( + mask_logits.reshape(R, C, H * W) + .scatter_(2, point_indices, point_logits) + .view(R, C, H, W) + ) + mask_rcnn_inference(mask_logits, instances) + return instances + + +@ROI_MASK_HEAD_REGISTRY.register() +class ImplicitPointRendMaskHead(PointRendMaskHead): + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__(cfg, input_shape) + + def _init_roi_head(self, cfg, input_shape): + assert hasattr(self, "num_params"), "Please initialize point_head first!" + self.parameter_head = ConvFCHead(cfg, input_shape, output_shape=(self.num_params,)) + self.regularizer = cfg.MODEL.IMPLICIT_POINTREND.PARAMS_L2_REGULARIZER + + def _init_point_head(self, cfg, input_shape): + # fmt: off + self.mask_point_on = True # always on + assert cfg.MODEL.ROI_HEADS.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES + self.mask_point_in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES + self.mask_point_train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS + # next two parameters are use in the adaptive subdivions inference procedure + self.mask_point_subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS + self.mask_point_subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS + # fmt: on + + in_channels = int(np.sum([input_shape[f].channels for f in self.mask_point_in_features])) + self.point_head = build_point_head(cfg, ShapeSpec(channels=in_channels, width=1, height=1)) + self.num_params = self.point_head.num_params + + # inference parameters + self.mask_point_subdivision_init_resolution = int( + math.sqrt(self.mask_point_subdivision_num_points) + ) + assert ( + self.mask_point_subdivision_init_resolution + * self.mask_point_subdivision_init_resolution + == self.mask_point_subdivision_num_points + ) + + def forward(self, features, instances): + """ + Args: + features (dict[str, Tensor]): a dict of image-level features + instances (list[Instances]): proposals in training; detected + instances in inference + """ + if self.training: + proposal_boxes = [x.proposal_boxes for x in instances] + parameters = self.parameter_head(self._roi_pooler(features, proposal_boxes)) + losses = {"loss_l2": self.regularizer * (parameters**2).mean()} + + point_coords, point_labels = self._uniform_sample_train_points(instances) + point_fine_grained_features = self._point_pooler(features, proposal_boxes, point_coords) + point_logits = self._get_point_logits( + point_fine_grained_features, point_coords, parameters + ) + losses["loss_mask_point"] = roi_mask_point_loss(point_logits, instances, point_labels) + return losses + else: + pred_boxes = [x.pred_boxes for x in instances] + parameters = self.parameter_head(self._roi_pooler(features, pred_boxes)) + return self._subdivision_inference(features, parameters, instances) + + def _uniform_sample_train_points(self, instances): + assert self.training + proposal_boxes = [x.proposal_boxes for x in instances] + cat_boxes = Boxes.cat(proposal_boxes) + # uniform sample + point_coords = torch.rand( + len(cat_boxes), self.mask_point_train_num_points, 2, device=cat_boxes.tensor.device + ) + # sample point_labels + point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords) + point_labels = sample_point_labels(instances, point_coords_wrt_image) + return point_coords, point_labels + + def _get_point_logits(self, fine_grained_features, point_coords, parameters): + return self.point_head(fine_grained_features, point_coords, parameters) diff --git a/data_processing/detectron2/projects/PointRend/point_rend/point_features.py b/data_processing/detectron2/projects/PointRend/point_rend/point_features.py new file mode 100644 index 0000000..e46f442 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/point_rend/point_features.py @@ -0,0 +1,259 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +from torch.nn import functional as F + +from detectron2.layers import cat, shapes_to_tensor +from detectron2.structures import BitMasks, Boxes + + +""" +Shape shorthand in this module: + + N: minibatch dimension size, i.e. the number of RoIs for instance segmenation or the + number of images for semantic segmenation. + R: number of ROIs, combined over all images, in the minibatch + P: number of points +""" + + +def point_sample(input, point_coords, **kwargs): + """ + A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors. + Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside + [0, 1] x [0, 1] square. + + Args: + input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid. + point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains + [0, 1] x [0, 1] normalized point coordinates. + + Returns: + output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains + features for points in `point_coords`. The features are obtained via bilinear + interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`. + """ + add_dim = False + if point_coords.dim() == 3: + add_dim = True + point_coords = point_coords.unsqueeze(2) + output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs) + if add_dim: + output = output.squeeze(3) + return output + + +def generate_regular_grid_point_coords(R, side_size, device): + """ + Generate regular square grid of points in [0, 1] x [0, 1] coordinate space. + + Args: + R (int): The number of grids to sample, one for each region. + side_size (int): The side size of the regular grid. + device (torch.device): Desired device of returned tensor. + + Returns: + (Tensor): A tensor of shape (R, side_size^2, 2) that contains coordinates + for the regular grids. + """ + aff = torch.tensor([[[0.5, 0, 0.5], [0, 0.5, 0.5]]], device=device) + r = F.affine_grid(aff, torch.Size((1, 1, side_size, side_size)), align_corners=False) + return r.view(1, -1, 2).expand(R, -1, -1) + + +def get_uncertain_point_coords_with_randomness( + coarse_logits, uncertainty_func, num_points, oversample_ratio, importance_sample_ratio +): + """ + Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The unceratinties + are calculated for each point using 'uncertainty_func' function that takes point's logit + prediction as input. + See PointRend paper for details. + + Args: + coarse_logits (Tensor): A tensor of shape (N, C, Hmask, Wmask) or (N, 1, Hmask, Wmask) for + class-specific or class-agnostic prediction. + uncertainty_func: A function that takes a Tensor of shape (N, C, P) or (N, 1, P) that + contains logit predictions for P points and returns their uncertainties as a Tensor of + shape (N, 1, P). + num_points (int): The number of points P to sample. + oversample_ratio (int): Oversampling parameter. + importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling. + + Returns: + point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P + sampled points. + """ + assert oversample_ratio >= 1 + assert importance_sample_ratio <= 1 and importance_sample_ratio >= 0 + num_boxes = coarse_logits.shape[0] + num_sampled = int(num_points * oversample_ratio) + point_coords = torch.rand(num_boxes, num_sampled, 2, device=coarse_logits.device) + point_logits = point_sample(coarse_logits, point_coords, align_corners=False) + # It is crucial to calculate uncertainty based on the sampled prediction value for the points. + # Calculating uncertainties of the coarse predictions first and sampling them for points leads + # to incorrect results. + # To illustrate this: assume uncertainty_func(logits)=-abs(logits), a sampled point between + # two coarse predictions with -1 and 1 logits has 0 logits, and therefore 0 uncertainty value. + # However, if we calculate uncertainties for the coarse predictions first, + # both will have -1 uncertainty, and the sampled point will get -1 uncertainty. + point_uncertainties = uncertainty_func(point_logits) + num_uncertain_points = int(importance_sample_ratio * num_points) + num_random_points = num_points - num_uncertain_points + idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] + shift = num_sampled * torch.arange(num_boxes, dtype=torch.long, device=coarse_logits.device) + idx += shift[:, None] + point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( + num_boxes, num_uncertain_points, 2 + ) + if num_random_points > 0: + point_coords = cat( + [ + point_coords, + torch.rand(num_boxes, num_random_points, 2, device=coarse_logits.device), + ], + dim=1, + ) + return point_coords + + +def get_uncertain_point_coords_on_grid(uncertainty_map, num_points): + """ + Find `num_points` most uncertain points from `uncertainty_map` grid. + + Args: + uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty + values for a set of points on a regular H x W grid. + num_points (int): The number of points P to select. + + Returns: + point_indices (Tensor): A tensor of shape (N, P) that contains indices from + [0, H x W) of the most uncertain points. + point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized + coordinates of the most uncertain points from the H x W grid. + """ + R, _, H, W = uncertainty_map.shape + h_step = 1.0 / float(H) + w_step = 1.0 / float(W) + + num_points = min(H * W, num_points) + point_indices = torch.topk(uncertainty_map.view(R, H * W), k=num_points, dim=1)[1] + point_coords = torch.zeros(R, num_points, 2, dtype=torch.float, device=uncertainty_map.device) + point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step + point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step + return point_indices, point_coords + + +def point_sample_fine_grained_features(features_list, feature_scales, boxes, point_coords): + """ + Get features from feature maps in `features_list` that correspond to specific point coordinates + inside each bounding box from `boxes`. + + Args: + features_list (list[Tensor]): A list of feature map tensors to get features from. + feature_scales (list[float]): A list of scales for tensors in `features_list`. + boxes (list[Boxes]): A list of I Boxes objects that contain R_1 + ... + R_I = R boxes all + together. + point_coords (Tensor): A tensor of shape (R, P, 2) that contains + [0, 1] x [0, 1] box-normalized coordinates of the P sampled points. + + Returns: + point_features (Tensor): A tensor of shape (R, C, P) that contains features sampled + from all features maps in feature_list for P sampled points for all R boxes in `boxes`. + point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains image-level + coordinates of P points. + """ + cat_boxes = Boxes.cat(boxes) + num_boxes = [b.tensor.size(0) for b in boxes] + + point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords) + split_point_coords_wrt_image = torch.split(point_coords_wrt_image, num_boxes) + + point_features = [] + for idx_img, point_coords_wrt_image_per_image in enumerate(split_point_coords_wrt_image): + point_features_per_image = [] + for idx_feature, feature_map in enumerate(features_list): + h, w = feature_map.shape[-2:] + scale = shapes_to_tensor([w, h]) / feature_scales[idx_feature] + point_coords_scaled = point_coords_wrt_image_per_image / scale.to(feature_map.device) + point_features_per_image.append( + point_sample( + feature_map[idx_img].unsqueeze(0), + point_coords_scaled.unsqueeze(0), + align_corners=False, + ) + .squeeze(0) + .transpose(1, 0) + ) + point_features.append(cat(point_features_per_image, dim=1)) + + return cat(point_features, dim=0), point_coords_wrt_image + + +def get_point_coords_wrt_image(boxes_coords, point_coords): + """ + Convert box-normalized [0, 1] x [0, 1] point cooordinates to image-level coordinates. + + Args: + boxes_coords (Tensor): A tensor of shape (R, 4) that contains bounding boxes. + coordinates. + point_coords (Tensor): A tensor of shape (R, P, 2) that contains + [0, 1] x [0, 1] box-normalized coordinates of the P sampled points. + + Returns: + point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains + image-normalized coordinates of P sampled points. + """ + with torch.no_grad(): + point_coords_wrt_image = point_coords.clone() + point_coords_wrt_image[:, :, 0] = point_coords_wrt_image[:, :, 0] * ( + boxes_coords[:, None, 2] - boxes_coords[:, None, 0] + ) + point_coords_wrt_image[:, :, 1] = point_coords_wrt_image[:, :, 1] * ( + boxes_coords[:, None, 3] - boxes_coords[:, None, 1] + ) + point_coords_wrt_image[:, :, 0] += boxes_coords[:, None, 0] + point_coords_wrt_image[:, :, 1] += boxes_coords[:, None, 1] + return point_coords_wrt_image + + +def sample_point_labels(instances, point_coords): + """ + Sample point labels from ground truth mask given point_coords. + + Args: + instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. So, i_th elememt of the list contains R_i objects and R_1 + ... + R_N is + equal to R. The ground-truth gt_masks in each instance will be used to compute labels. + points_coords (Tensor): A tensor of shape (R, P, 2), where R is the total number of + instances and P is the number of points for each instance. The coordinates are in + the absolute image pixel coordinate space, i.e. [0, H] x [0, W]. + + Returns: + Tensor: A tensor of shape (R, P) that contains the labels of P sampled points. + """ + with torch.no_grad(): + gt_mask_logits = [] + point_coords_splits = torch.split( + point_coords, [len(instances_per_image) for instances_per_image in instances] + ) + for i, instances_per_image in enumerate(instances): + if len(instances_per_image) == 0: + continue + assert isinstance( + instances_per_image.gt_masks, BitMasks + ), "Point head works with GT in 'bitmask' format. Set INPUT.MASK_FORMAT to 'bitmask'." + + gt_bit_masks = instances_per_image.gt_masks.tensor + h, w = instances_per_image.gt_masks.image_size + scale = torch.tensor([w, h], dtype=torch.float, device=gt_bit_masks.device) + points_coord_grid_sample_format = point_coords_splits[i] / scale + gt_mask_logits.append( + point_sample( + gt_bit_masks.to(torch.float32).unsqueeze(1), + points_coord_grid_sample_format, + align_corners=False, + ).squeeze(1) + ) + + point_labels = cat(gt_mask_logits) + return point_labels diff --git a/data_processing/detectron2/projects/PointRend/point_rend/point_head.py b/data_processing/detectron2/projects/PointRend/point_rend/point_head.py new file mode 100644 index 0000000..1786fad --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/point_rend/point_head.py @@ -0,0 +1,282 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import ShapeSpec, cat +from detectron2.utils.events import get_event_storage +from detectron2.utils.registry import Registry + +POINT_HEAD_REGISTRY = Registry("POINT_HEAD") +POINT_HEAD_REGISTRY.__doc__ = """ +Registry for point heads, which makes prediction for a given set of per-point features. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +def roi_mask_point_loss(mask_logits, instances, point_labels): + """ + Compute the point-based loss for instance segmentation mask predictions + given point-wise mask prediction and its corresponding point-wise labels. + Args: + mask_logits (Tensor): A tensor of shape (R, C, P) or (R, 1, P) for class-specific or + class-agnostic, where R is the total number of predicted masks in all images, C is the + number of foreground classes, and P is the number of points sampled for each mask. + The values are logits. + instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. These instances are in 1:1 correspondence with the `mask_logits`. So, i_th + elememt of the list contains R_i objects and R_1 + ... + R_N is equal to R. + The ground-truth labels (class, box, mask, ...) associated with each instance are stored + in fields. + point_labels (Tensor): A tensor of shape (R, P), where R is the total number of + predicted masks and P is the number of points for each mask. + Labels with value of -1 will be ignored. + Returns: + point_loss (Tensor): A scalar tensor containing the loss. + """ + with torch.no_grad(): + cls_agnostic_mask = mask_logits.size(1) == 1 + total_num_masks = mask_logits.size(0) + + gt_classes = [] + for instances_per_image in instances: + if len(instances_per_image) == 0: + continue + + if not cls_agnostic_mask: + gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64) + gt_classes.append(gt_classes_per_image) + + gt_mask_logits = point_labels + point_ignores = point_labels == -1 + if gt_mask_logits.shape[0] == 0: + return mask_logits.sum() * 0 + + assert gt_mask_logits.numel() > 0, gt_mask_logits.shape + + if cls_agnostic_mask: + mask_logits = mask_logits[:, 0] + else: + indices = torch.arange(total_num_masks) + gt_classes = cat(gt_classes, dim=0) + mask_logits = mask_logits[indices, gt_classes] + + # Log the training accuracy (using gt classes and 0.0 threshold for the logits) + mask_accurate = (mask_logits > 0.0) == gt_mask_logits.to(dtype=torch.uint8) + mask_accurate = mask_accurate[~point_ignores] + mask_accuracy = mask_accurate.nonzero().size(0) / max(mask_accurate.numel(), 1.0) + get_event_storage().put_scalar("point/accuracy", mask_accuracy) + + point_loss = F.binary_cross_entropy_with_logits( + mask_logits, gt_mask_logits.to(dtype=torch.float32), weight=~point_ignores, reduction="mean" + ) + return point_loss + + +@POINT_HEAD_REGISTRY.register() +class StandardPointHead(nn.Module): + """ + A point head multi-layer perceptron which we model with conv1d layers with kernel 1. The head + takes both fine-grained and coarse prediction features as its input. + """ + + def __init__(self, cfg, input_shape: ShapeSpec): + """ + The following attributes are parsed from config: + fc_dim: the output dimension of each FC layers + num_fc: the number of FC layers + coarse_pred_each_layer: if True, coarse prediction features are concatenated to each + layer's input + """ + super(StandardPointHead, self).__init__() + # fmt: off + num_classes = cfg.MODEL.POINT_HEAD.NUM_CLASSES + fc_dim = cfg.MODEL.POINT_HEAD.FC_DIM + num_fc = cfg.MODEL.POINT_HEAD.NUM_FC + cls_agnostic_mask = cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK + self.coarse_pred_each_layer = cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER + input_channels = input_shape.channels + # fmt: on + + fc_dim_in = input_channels + num_classes + self.fc_layers = [] + for k in range(num_fc): + fc = nn.Conv1d(fc_dim_in, fc_dim, kernel_size=1, stride=1, padding=0, bias=True) + self.add_module("fc{}".format(k + 1), fc) + self.fc_layers.append(fc) + fc_dim_in = fc_dim + fc_dim_in += num_classes if self.coarse_pred_each_layer else 0 + + num_mask_classes = 1 if cls_agnostic_mask else num_classes + self.predictor = nn.Conv1d(fc_dim_in, num_mask_classes, kernel_size=1, stride=1, padding=0) + + for layer in self.fc_layers: + weight_init.c2_msra_fill(layer) + # use normal distribution initialization for mask prediction layer + nn.init.normal_(self.predictor.weight, std=0.001) + if self.predictor.bias is not None: + nn.init.constant_(self.predictor.bias, 0) + + def forward(self, fine_grained_features, coarse_features): + x = torch.cat((fine_grained_features, coarse_features), dim=1) + for layer in self.fc_layers: + x = F.relu(layer(x)) + if self.coarse_pred_each_layer: + x = cat((x, coarse_features), dim=1) + return self.predictor(x) + + +@POINT_HEAD_REGISTRY.register() +class ImplicitPointHead(nn.Module): + """ + A point head multi-layer perceptron which we model with conv1d layers with kernel 1. The head + takes both fine-grained features and instance-wise MLP parameters as its input. + """ + + def __init__(self, cfg, input_shape: ShapeSpec): + """ + The following attributes are parsed from config: + channels: the output dimension of each FC layers + num_layers: the number of FC layers (including the final prediction layer) + image_feature_enabled: if True, fine-grained image-level features are used + positional_encoding_enabled: if True, positional encoding is used + """ + super(ImplicitPointHead, self).__init__() + # fmt: off + self.num_layers = cfg.MODEL.POINT_HEAD.NUM_FC + 1 + self.channels = cfg.MODEL.POINT_HEAD.FC_DIM + self.image_feature_enabled = cfg.MODEL.IMPLICIT_POINTREND.IMAGE_FEATURE_ENABLED + self.positional_encoding_enabled = cfg.MODEL.IMPLICIT_POINTREND.POS_ENC_ENABLED + self.num_classes = ( + cfg.MODEL.POINT_HEAD.NUM_CLASSES if not cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK else 1 + ) + self.in_channels = input_shape.channels + # fmt: on + + if not self.image_feature_enabled: + self.in_channels = 0 + if self.positional_encoding_enabled: + self.in_channels += 256 + self.register_buffer("positional_encoding_gaussian_matrix", torch.randn((2, 128))) + + assert self.in_channels > 0 + + num_weight_params, num_bias_params = [], [] + assert self.num_layers >= 2 + for l in range(self.num_layers): + if l == 0: + # input layer + num_weight_params.append(self.in_channels * self.channels) + num_bias_params.append(self.channels) + elif l == self.num_layers - 1: + # output layer + num_weight_params.append(self.channels * self.num_classes) + num_bias_params.append(self.num_classes) + else: + # intermediate layer + num_weight_params.append(self.channels * self.channels) + num_bias_params.append(self.channels) + + self.num_weight_params = num_weight_params + self.num_bias_params = num_bias_params + self.num_params = sum(num_weight_params) + sum(num_bias_params) + + def forward(self, fine_grained_features, point_coords, parameters): + # features: [R, channels, K] + # point_coords: [R, K, 2] + num_instances = fine_grained_features.size(0) + num_points = fine_grained_features.size(2) + + if num_instances == 0: + return torch.zeros((0, 1, num_points), device=fine_grained_features.device) + + if self.positional_encoding_enabled: + # locations: [R*K, 2] + locations = 2 * point_coords.reshape(num_instances * num_points, 2) - 1 + locations = locations @ self.positional_encoding_gaussian_matrix.to(locations.device) + locations = 2 * np.pi * locations + locations = torch.cat([torch.sin(locations), torch.cos(locations)], dim=1) + # locations: [R, C, K] + locations = locations.reshape(num_instances, num_points, 256).permute(0, 2, 1) + if not self.image_feature_enabled: + fine_grained_features = locations + else: + fine_grained_features = torch.cat([locations, fine_grained_features], dim=1) + + # features [R, C, K] + mask_feat = fine_grained_features.reshape(num_instances, self.in_channels, num_points) + + weights, biases = self._parse_params( + parameters, + self.in_channels, + self.channels, + self.num_classes, + self.num_weight_params, + self.num_bias_params, + ) + + point_logits = self._dynamic_mlp(mask_feat, weights, biases, num_instances) + point_logits = point_logits.reshape(-1, self.num_classes, num_points) + + return point_logits + + @staticmethod + def _dynamic_mlp(features, weights, biases, num_instances): + assert features.dim() == 3, features.dim() + n_layers = len(weights) + x = features + for i, (w, b) in enumerate(zip(weights, biases)): + x = torch.einsum("nck,ndc->ndk", x, w) + b + if i < n_layers - 1: + x = F.relu(x) + return x + + @staticmethod + def _parse_params( + pred_params, + in_channels, + channels, + num_classes, + num_weight_params, + num_bias_params, + ): + assert pred_params.dim() == 2 + assert len(num_weight_params) == len(num_bias_params) + assert pred_params.size(1) == sum(num_weight_params) + sum(num_bias_params) + + num_instances = pred_params.size(0) + num_layers = len(num_weight_params) + + params_splits = list( + torch.split_with_sizes(pred_params, num_weight_params + num_bias_params, dim=1) + ) + + weight_splits = params_splits[:num_layers] + bias_splits = params_splits[num_layers:] + + for l in range(num_layers): + if l == 0: + # input layer + weight_splits[l] = weight_splits[l].reshape(num_instances, channels, in_channels) + bias_splits[l] = bias_splits[l].reshape(num_instances, channels, 1) + elif l < num_layers - 1: + # intermediate layer + weight_splits[l] = weight_splits[l].reshape(num_instances, channels, channels) + bias_splits[l] = bias_splits[l].reshape(num_instances, channels, 1) + else: + # output layer + weight_splits[l] = weight_splits[l].reshape(num_instances, num_classes, channels) + bias_splits[l] = bias_splits[l].reshape(num_instances, num_classes, 1) + + return weight_splits, bias_splits + + +def build_point_head(cfg, input_channels): + """ + Build a point head defined by `cfg.MODEL.POINT_HEAD.NAME`. + """ + head_name = cfg.MODEL.POINT_HEAD.NAME + return POINT_HEAD_REGISTRY.get(head_name)(cfg, input_channels) diff --git a/data_processing/detectron2/projects/PointRend/point_rend/roi_heads.py b/data_processing/detectron2/projects/PointRend/point_rend/roi_heads.py new file mode 100644 index 0000000..74ccc34 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/point_rend/roi_heads.py @@ -0,0 +1,49 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging + +from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads + + +@ROI_HEADS_REGISTRY.register() +class PointRendROIHeads(StandardROIHeads): + """ + Identical to StandardROIHeads, except for some weights conversion code to + handle old models. + """ + + _version = 2 + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + version = local_metadata.get("version", None) + if version is None or version < 2: + logger = logging.getLogger(__name__) + logger.warning( + "Weight format of PointRend models have changed! " + "Please upgrade your models. Applying automatic conversion now ..." + ) + for k in list(state_dict.keys()): + newk = k + if k.startswith(prefix + "mask_point_head"): + newk = k.replace(prefix + "mask_point_head", prefix + "mask_head.point_head") + if k.startswith(prefix + "mask_coarse_head"): + newk = k.replace(prefix + "mask_coarse_head", prefix + "mask_head.coarse_head") + if newk != k: + state_dict[newk] = state_dict[k] + del state_dict[k] + + @classmethod + def _init_mask_head(cls, cfg, input_shape): + if cfg.MODEL.MASK_ON and cfg.MODEL.ROI_MASK_HEAD.NAME != "PointRendMaskHead": + logger = logging.getLogger(__name__) + logger.warning( + "Config of PointRend models have changed! " + "Please upgrade your models. Applying automatic conversion now ..." + ) + assert cfg.MODEL.ROI_MASK_HEAD.NAME == "CoarseMaskHead" + cfg.defrost() + cfg.MODEL.ROI_MASK_HEAD.NAME = "PointRendMaskHead" + cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "" + cfg.freeze() + return super()._init_mask_head(cfg, input_shape) diff --git a/data_processing/detectron2/projects/PointRend/point_rend/semantic_seg.py b/data_processing/detectron2/projects/PointRend/point_rend/semantic_seg.py new file mode 100644 index 0000000..ea65200 --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/point_rend/semantic_seg.py @@ -0,0 +1,135 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Dict +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import ShapeSpec, cat +from detectron2.modeling import SEM_SEG_HEADS_REGISTRY + +from .point_features import ( + get_uncertain_point_coords_on_grid, + get_uncertain_point_coords_with_randomness, + point_sample, +) +from .point_head import build_point_head + + +def calculate_uncertainty(sem_seg_logits): + """ + For each location of the prediction `sem_seg_logits` we estimate uncerainty as the + difference between top first and top second predicted logits. + + Args: + mask_logits (Tensor): A tensor of shape (N, C, ...), where N is the minibatch size and + C is the number of foreground classes. The values are logits. + + Returns: + scores (Tensor): A tensor of shape (N, 1, ...) that contains uncertainty scores with + the most uncertain locations having the highest uncertainty score. + """ + top2_scores = torch.topk(sem_seg_logits, k=2, dim=1)[0] + return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1) + + +@SEM_SEG_HEADS_REGISTRY.register() +class PointRendSemSegHead(nn.Module): + """ + A semantic segmentation head that combines a head set in `POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME` + and a point head set in `MODEL.POINT_HEAD.NAME`. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__() + + self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE + + self.coarse_sem_seg_head = SEM_SEG_HEADS_REGISTRY.get( + cfg.MODEL.POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME + )(cfg, input_shape) + self._init_point_head(cfg, input_shape) + + def _init_point_head(self, cfg, input_shape: Dict[str, ShapeSpec]): + # fmt: off + assert cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES + feature_channels = {k: v.channels for k, v in input_shape.items()} + self.in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES + self.train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS + self.oversample_ratio = cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO + self.importance_sample_ratio = cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO + self.subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS + self.subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS + # fmt: on + + in_channels = int(np.sum([feature_channels[f] for f in self.in_features])) + self.point_head = build_point_head(cfg, ShapeSpec(channels=in_channels, width=1, height=1)) + + def forward(self, features, targets=None): + coarse_sem_seg_logits = self.coarse_sem_seg_head.layers(features) + + if self.training: + losses = self.coarse_sem_seg_head.losses(coarse_sem_seg_logits, targets) + + with torch.no_grad(): + point_coords = get_uncertain_point_coords_with_randomness( + coarse_sem_seg_logits, + calculate_uncertainty, + self.train_num_points, + self.oversample_ratio, + self.importance_sample_ratio, + ) + coarse_features = point_sample(coarse_sem_seg_logits, point_coords, align_corners=False) + + fine_grained_features = cat( + [ + point_sample(features[in_feature], point_coords, align_corners=False) + for in_feature in self.in_features + ], + dim=1, + ) + point_logits = self.point_head(fine_grained_features, coarse_features) + point_targets = ( + point_sample( + targets.unsqueeze(1).to(torch.float), + point_coords, + mode="nearest", + align_corners=False, + ) + .squeeze(1) + .to(torch.long) + ) + losses["loss_sem_seg_point"] = F.cross_entropy( + point_logits, point_targets, reduction="mean", ignore_index=self.ignore_value + ) + return None, losses + else: + sem_seg_logits = coarse_sem_seg_logits.clone() + for _ in range(self.subdivision_steps): + sem_seg_logits = F.interpolate( + sem_seg_logits, scale_factor=2, mode="bilinear", align_corners=False + ) + uncertainty_map = calculate_uncertainty(sem_seg_logits) + point_indices, point_coords = get_uncertain_point_coords_on_grid( + uncertainty_map, self.subdivision_num_points + ) + fine_grained_features = cat( + [ + point_sample(features[in_feature], point_coords, align_corners=False) + for in_feature in self.in_features + ] + ) + coarse_features = point_sample( + coarse_sem_seg_logits, point_coords, align_corners=False + ) + point_logits = self.point_head(fine_grained_features, coarse_features) + + # put sem seg point predictions to the right places on the upsampled grid. + N, C, H, W = sem_seg_logits.shape + point_indices = point_indices.unsqueeze(1).expand(-1, C, -1) + sem_seg_logits = ( + sem_seg_logits.reshape(N, C, H * W) + .scatter_(2, point_indices, point_logits) + .view(N, C, H, W) + ) + return sem_seg_logits, {} diff --git a/data_processing/detectron2/projects/PointRend/train_net.py b/data_processing/detectron2/projects/PointRend/train_net.py new file mode 100644 index 0000000..9ae6f1a --- /dev/null +++ b/data_processing/detectron2/projects/PointRend/train_net.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +PointRend Training Script. + +This script is a simplified version of the training script in detectron2/tools. +""" + +import os + +import detectron2.data.transforms as T +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import DatasetMapper, MetadataCatalog, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import ( + CityscapesInstanceEvaluator, + CityscapesSemSegEvaluator, + COCOEvaluator, + DatasetEvaluators, + LVISEvaluator, + SemSegEvaluator, + verify_results, +) +from detectron2.projects.point_rend import ColorAugSSDTransform, add_pointrend_config + + +def build_sem_seg_train_aug(cfg): + augs = [ + T.ResizeShortestEdge( + cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING + ) + ] + if cfg.INPUT.CROP.ENABLED: + augs.append( + T.RandomCrop_CategoryAreaConstraint( + cfg.INPUT.CROP.TYPE, + cfg.INPUT.CROP.SIZE, + cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA, + cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + ) + ) + if cfg.INPUT.COLOR_AUG_SSD: + augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT)) + augs.append(T.RandomFlip()) + return augs + + +class Trainer(DefaultTrainer): + """ + We use the "DefaultTrainer" which contains a number pre-defined logic for + standard training workflow. They may not work for you, especially if you + are working on a new research project. In that case you can use the cleaner + "SimpleTrainer", or write your own training loop. + """ + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type == "lvis": + return LVISEvaluator(dataset_name, output_dir=output_folder) + if evaluator_type == "coco": + return COCOEvaluator(dataset_name, output_dir=output_folder) + if evaluator_type == "sem_seg": + return SemSegEvaluator( + dataset_name, + distributed=True, + output_dir=output_folder, + ) + if evaluator_type == "cityscapes_instance": + return CityscapesInstanceEvaluator(dataset_name) + if evaluator_type == "cityscapes_sem_seg": + return CityscapesSemSegEvaluator(dataset_name) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format( + dataset_name, evaluator_type + ) + ) + if len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + @classmethod + def build_train_loader(cls, cfg): + if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE: + mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg)) + else: + mapper = None + return build_detection_train_loader(cfg, mapper=mapper) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_pointrend_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/data_processing/detectron2/projects/PointSup/README.md b/data_processing/detectron2/projects/PointSup/README.md new file mode 100644 index 0000000..75ce084 --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/README.md @@ -0,0 +1,41 @@ +# Pointly-Supervised Instance Segmentation + +Bowen Cheng, Omkar Parkhi, Alexander Kirillov + +[[`arXiv`](https://arxiv.org/abs/2104.06404)] [[`Project`](https://bowenc0221.github.io/point-sup)] [[`BibTeX`](#CitingPointSup)] + +
+ +

+ +## Data preparation +Please follow these steps to prepare your datasets: +1. Follow official Detectron2 instruction to prepare COCO dataset. Set up `DETECTRON2_DATASETS` environment variable to the location of your Detectron2 dataset. +2. Generate 10-points annotations for COCO by running: `python tools/prepare_coco_point_annotations_without_masks.py 10` + +## Training + +To train a model with 8 GPUs run: +```bash +python train_net.py --config-file configs/mask_rcnn_R_50_FPN_3x_point_sup_point_aug_coco.yaml --num-gpus 8 +``` + +## Evaluation + +Model evaluation can be done similarly: +```bash +python train_net.py --config-file configs/mask_rcnn_R_50_FPN_3x_point_sup_point_aug_coco.yaml --eval-only MODEL.WEIGHTS /path/to/model_checkpoint +``` + +## Citing Pointly-Supervised Instance Segmentation + +If you use PointSup, please use the following BibTeX entry. + +```BibTeX +@article{cheng2021pointly, + title={Pointly-Supervised Instance Segmentation}, + author={Bowen Cheng and Omkar Parkhi and Alexander Kirillov}, + journal={arXiv}, + year={2021} +} +``` diff --git a/data_processing/detectron2/projects/PointSup/configs/implicit_pointrend_R_50_FPN_3x_point_sup_point_aug_coco.yaml b/data_processing/detectron2/projects/PointSup/configs/implicit_pointrend_R_50_FPN_3x_point_sup_point_aug_coco.yaml new file mode 100644 index 0000000..5b3d427 --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/configs/implicit_pointrend_R_50_FPN_3x_point_sup_point_aug_coco.yaml @@ -0,0 +1,9 @@ +_BASE_: "../../PointRend/configs/InstanceSegmentation/implicit_pointrend_R_50_FPN_3x_coco.yaml" +MODEL: + ROI_MASK_HEAD: + NAME: "ImplicitPointRendPointSupHead" +INPUT: + POINT_SUP: True + SAMPLE_POINTS: 5 +DATASETS: + TRAIN: ("coco_2017_train_points_n10_v1_without_masks",) diff --git a/data_processing/detectron2/projects/PointSup/configs/mask_rcnn_R_50_FPN_3x_point_sup_coco.yaml b/data_processing/detectron2/projects/PointSup/configs/mask_rcnn_R_50_FPN_3x_point_sup_coco.yaml new file mode 100644 index 0000000..157e384 --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/configs/mask_rcnn_R_50_FPN_3x_point_sup_coco.yaml @@ -0,0 +1,15 @@ +_BASE_: "../../../configs/Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_MASK_HEAD: + NAME: "MaskRCNNConvUpsamplePointSupHead" +INPUT: + POINT_SUP: True +DATASETS: + TRAIN: ("coco_2017_train_points_n10_v1_without_masks",) +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/projects/PointSup/configs/mask_rcnn_R_50_FPN_3x_point_sup_point_aug_coco.yaml b/data_processing/detectron2/projects/PointSup/configs/mask_rcnn_R_50_FPN_3x_point_sup_point_aug_coco.yaml new file mode 100644 index 0000000..4b11224 --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/configs/mask_rcnn_R_50_FPN_3x_point_sup_point_aug_coco.yaml @@ -0,0 +1,3 @@ +_BASE_: "mask_rcnn_R_50_FPN_3x_point_sup_coco.yaml" +INPUT: + SAMPLE_POINTS: 5 diff --git a/data_processing/detectron2/projects/PointSup/point_sup/__init__.py b/data_processing/detectron2/projects/PointSup/point_sup/__init__.py new file mode 100644 index 0000000..510e381 --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/point_sup/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from . import register_point_annotations +from .config import add_point_sup_config +from .dataset_mapper import PointSupDatasetMapper +from .mask_head import MaskRCNNConvUpsamplePointSupHead +from .point_utils import get_point_coords_from_point_annotation diff --git a/data_processing/detectron2/projects/PointSup/point_sup/config.py b/data_processing/detectron2/projects/PointSup/point_sup/config.py new file mode 100644 index 0000000..5e00b78 --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/point_sup/config.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + + +def add_point_sup_config(cfg): + """ + Add config for point supervision. + """ + # Use point annotation + cfg.INPUT.POINT_SUP = False + # Sample only part of points in each iteration. + # Default: 0, use all available points. + cfg.INPUT.SAMPLE_POINTS = 0 diff --git a/data_processing/detectron2/projects/PointSup/point_sup/dataset_mapper.py b/data_processing/detectron2/projects/PointSup/point_sup/dataset_mapper.py new file mode 100644 index 0000000..52b9bd4 --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/point_sup/dataset_mapper.py @@ -0,0 +1,125 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import logging +import numpy as np +from typing import List, Union +import torch + +import detectron2.data.detection_utils as utils +import detectron2.data.transforms as T +from detectron2.config import configurable + +from .detection_utils import annotations_to_instances, transform_instance_annotations + +__all__ = [ + "PointSupDatasetMapper", +] + + +class PointSupDatasetMapper: + """ + The callable currently does the following: + 1. Read the image from "file_name" + 2. Applies transforms to the image and annotations + 3. Prepare data and annotations to Tensor and :class:`Instances` + """ + + @configurable + def __init__( + self, + is_train: bool, + *, + augmentations: List[Union[T.Augmentation, T.Transform]], + image_format: str, + # Extra data augmentation for point supervision + sample_points: int = 0, + ): + """ + NOTE: this interface is experimental. + + Args: + is_train: whether it's used in training or inference + augmentations: a list of augmentations or deterministic transforms to apply + image_format: an image format supported by :func:`detection_utils.read_image`. + sample_points: subsample points at each iteration + """ + # fmt: off + self.is_train = is_train + self.augmentations = T.AugmentationList(augmentations) + self.image_format = image_format + self.sample_points = sample_points + # fmt: on + logger = logging.getLogger(__name__) + mode = "training" if is_train else "inference" + logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}") + logger.info(f"Point Augmentations used in {mode}: sample {sample_points} points") + + @classmethod + def from_config(cls, cfg, is_train: bool = True): + augs = utils.build_augmentation(cfg, is_train) + if cfg.INPUT.CROP.ENABLED and is_train: + raise ValueError("Crop augmentation not supported to point supervision.") + + ret = { + "is_train": is_train, + "augmentations": augs, + "image_format": cfg.INPUT.FORMAT, + "sample_points": cfg.INPUT.SAMPLE_POINTS, + } + + return ret + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + Returns: + dict: a format that builtin models in detectron2 accept + """ + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + image = utils.read_image(dataset_dict["file_name"], format=self.image_format) + utils.check_image_size(dataset_dict, image) + + aug_input = T.AugInput(image) + transforms = self.augmentations(aug_input) + image = aug_input.image + + image_shape = image.shape[:2] # h, w + # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, + # but not efficient on large generic data structures due to the use of pickle & mp.Queue. + # Therefore it's important to use torch.Tensor. + dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) + + if not self.is_train: + dataset_dict.pop("annotations", None) + return dataset_dict + + if "annotations" in dataset_dict: + # Maps points from the closed interval [0, image_size - 1] on discrete + # image coordinates to the half-open interval [x1, x2) on continuous image + # coordinates. We use the continuous-discrete conversion from Heckbert + # 1990 ("What is the coordinate of a pixel?"): d = floor(c) and c = d + 0.5, + # where d is a discrete coordinate and c is a continuous coordinate. + for ann in dataset_dict["annotations"]: + point_coords_wrt_image = np.array(ann["point_coords"]).astype(np.float) + point_coords_wrt_image = point_coords_wrt_image + 0.5 + ann["point_coords"] = point_coords_wrt_image + + annos = [ + # also need to transform point coordinates + transform_instance_annotations( + obj, + transforms, + image_shape, + ) + for obj in dataset_dict.pop("annotations") + if obj.get("iscrowd", 0) == 0 + ] + instances = annotations_to_instances( + annos, + image_shape, + sample_points=self.sample_points, + ) + + dataset_dict["instances"] = utils.filter_empty_instances(instances) + return dataset_dict diff --git a/data_processing/detectron2/projects/PointSup/point_sup/detection_utils.py b/data_processing/detectron2/projects/PointSup/point_sup/detection_utils.py new file mode 100644 index 0000000..3f95d94 --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/point_sup/detection_utils.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import numpy as np +import torch + +# fmt: off +from detectron2.data.detection_utils import \ + annotations_to_instances as base_annotations_to_instances +from detectron2.data.detection_utils import \ + transform_instance_annotations as base_transform_instance_annotations + +# fmt: on + + +def annotations_to_instances(annos, image_size, sample_points=0): + """ + Create an :class:`Instances` object used by the models, + from instance annotations in the dataset dict. + + Args: + annos (list[dict]): a list of instance annotations in one image, each + element for one instance. + image_size (tuple): height, width + sample_points (int): subsample points at each iteration + + Returns: + Instances: + It will contain fields "gt_boxes", "gt_classes", + "gt_point_coords", "gt_point_labels", if they can be obtained from `annos`. + This is the format that builtin models with point supervision expect. + """ + target = base_annotations_to_instances(annos, image_size) + + assert ("point_coords" in annos[0]) == ("point_labels" in annos[0]) + + if len(annos) and "point_labels" in annos[0]: + point_coords = [] + point_labels = [] + for i, _ in enumerate(annos): + # Already in the image coordinate system + point_coords_wrt_image = np.array(annos[i]["point_coords"]) + point_labels_wrt_image = np.array(annos[i]["point_labels"]) + + if sample_points > 0: + random_indices = np.random.choice( + point_coords_wrt_image.shape[0], + sample_points, + replace=point_coords_wrt_image.shape[0] < sample_points, + ).astype(int) + point_coords_wrt_image = point_coords_wrt_image[random_indices] + point_labels_wrt_image = point_labels_wrt_image[random_indices] + assert point_coords_wrt_image.shape[0] == point_labels_wrt_image.size + + point_coords.append(point_coords_wrt_image) + point_labels.append(point_labels_wrt_image) + + point_coords = torch.stack([torch.from_numpy(x) for x in point_coords]) + point_labels = torch.stack([torch.from_numpy(x) for x in point_labels]) + target.gt_point_coords = point_coords + target.gt_point_labels = point_labels + + return target + + +def transform_instance_annotations( + annotation, transforms, image_size, *, keypoint_hflip_indices=None +): + """ + Apply transforms to box, and point annotations of a single instance. + It will use `transforms.apply_box` for the box, and + `transforms.apply_coords` for points. + Args: + annotation (dict): dict of instance annotations for a single instance. + It will be modified in-place. + transforms (TransformList or list[Transform]): + image_size (tuple): the height, width of the transformed image + keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. + Returns: + dict: + the same input dict with fields "bbox", "point_coords", "point_labels" + transformed according to `transforms`. + The "bbox_mode" field will be set to XYXY_ABS. + """ + annotation = base_transform_instance_annotations( + annotation, transforms, image_size, keypoint_hflip_indices + ) + + assert ("point_coords" in annotation) == ("point_labels" in annotation) + if "point_coords" in annotation and "point_labels" in annotation: + point_coords = annotation["point_coords"] + point_labels = np.array(annotation["point_labels"]).astype(np.float) + point_coords = transforms.apply_coords(point_coords) + + # Set all out-of-boundary points to "unlabeled" + inside = (point_coords >= np.array([0, 0])) & (point_coords <= np.array(image_size[::-1])) + inside = inside.all(axis=1) + point_labels[~inside] = -1 + + annotation["point_coords"] = point_coords + annotation["point_labels"] = point_labels + + return annotation diff --git a/data_processing/detectron2/projects/PointSup/point_sup/mask_head.py b/data_processing/detectron2/projects/PointSup/point_sup/mask_head.py new file mode 100644 index 0000000..81c21f5 --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/point_sup/mask_head.py @@ -0,0 +1,77 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +from typing import Any, List + +from detectron2.modeling import ROI_MASK_HEAD_REGISTRY +from detectron2.modeling.roi_heads.mask_head import MaskRCNNConvUpsampleHead, mask_rcnn_inference +from detectron2.projects.point_rend import ImplicitPointRendMaskHead +from detectron2.projects.point_rend.point_features import point_sample +from detectron2.projects.point_rend.point_head import roi_mask_point_loss +from detectron2.structures import Instances + +from .point_utils import get_point_coords_from_point_annotation + +__all__ = [ + "ImplicitPointRendPointSupHead", + "MaskRCNNConvUpsamplePointSupHead", +] + + +@ROI_MASK_HEAD_REGISTRY.register() +class MaskRCNNConvUpsamplePointSupHead(MaskRCNNConvUpsampleHead): + """ + A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`). + Predictions are made with a final 1x1 conv layer. + + The difference with `MaskRCNNConvUpsampleHead` is that this head is trained + with point supervision. Please use the `MaskRCNNConvUpsampleHead` if you want + to train the model with mask supervision. + """ + + def forward(self, x, instances: List[Instances]) -> Any: + """ + Args: + x: input region feature(s) provided by :class:`ROIHeads`. + instances (list[Instances]): contains the boxes & labels corresponding + to the input features. + Exact format is up to its caller to decide. + Typically, this is the foreground instances in training, with + "proposal_boxes" field and other gt annotations. + In inference, it contains boxes that are already predicted. + Returns: + A dict of losses in training. The predicted "instances" in inference. + """ + x = self.layers(x) + if self.training: + N, C, H, W = x.shape + assert H == W + + proposal_boxes = [x.proposal_boxes for x in instances] + assert N == np.sum(len(x) for x in proposal_boxes) + + if N == 0: + return {"loss_mask": x.sum() * 0} + + # Training with point supervision + point_coords, point_labels = get_point_coords_from_point_annotation(instances) + + mask_logits = point_sample( + x, + point_coords, + align_corners=False, + ) + + return {"loss_mask": roi_mask_point_loss(mask_logits, instances, point_labels)} + else: + mask_rcnn_inference(x, instances) + return instances + + +@ROI_MASK_HEAD_REGISTRY.register() +class ImplicitPointRendPointSupHead(ImplicitPointRendMaskHead): + def _uniform_sample_train_points(self, instances): + assert self.training + # Please keep in mind that "gt_masks" is not used in this mask head. + point_coords, point_labels = get_point_coords_from_point_annotation(instances) + + return point_coords, point_labels diff --git a/data_processing/detectron2/projects/PointSup/point_sup/point_utils.py b/data_processing/detectron2/projects/PointSup/point_sup/point_utils.py new file mode 100644 index 0000000..eed876e --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/point_sup/point_utils.py @@ -0,0 +1,77 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch + +from detectron2.layers import cat + + +def get_point_coords_from_point_annotation(instances): + """ + Load point coords and their corresponding labels from point annotation. + + Args: + instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. These instances are in 1:1 + correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask, + ...) associated with each instance are stored in fields. + Returns: + point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P + sampled points. + point_labels (Tensor): A tensor of shape (N, P) that contains the labels of P + sampled points. `point_labels` takes 3 possible values: + - 0: the point belongs to background + - 1: the point belongs to the object + - -1: the point is ignored during training + """ + point_coords_list = [] + point_labels_list = [] + for instances_per_image in instances: + if len(instances_per_image) == 0: + continue + point_coords = instances_per_image.gt_point_coords.to(torch.float32) + point_labels = instances_per_image.gt_point_labels.to(torch.float32).clone() + proposal_boxes_per_image = instances_per_image.proposal_boxes.tensor + + # Convert point coordinate system, ground truth points are in image coord. + point_coords_wrt_box = get_point_coords_wrt_box(proposal_boxes_per_image, point_coords) + + # Ignore points that are outside predicted boxes. + point_ignores = ( + (point_coords_wrt_box[:, :, 0] < 0) + | (point_coords_wrt_box[:, :, 0] > 1) + | (point_coords_wrt_box[:, :, 1] < 0) + | (point_coords_wrt_box[:, :, 1] > 1) + ) + point_labels[point_ignores] = -1 + + point_coords_list.append(point_coords_wrt_box) + point_labels_list.append(point_labels) + + return ( + cat(point_coords_list, dim=0), + cat(point_labels_list, dim=0), + ) + + +def get_point_coords_wrt_box(boxes_coords, point_coords): + """ + Convert image-level absolute coordinates to box-normalized [0, 1] x [0, 1] point cooordinates. + Args: + boxes_coords (Tensor): A tensor of shape (R, 4) that contains bounding boxes. + coordinates. + point_coords (Tensor): A tensor of shape (R, P, 2) that contains + image-normalized coordinates of P sampled points. + Returns: + point_coords_wrt_box (Tensor): A tensor of shape (R, P, 2) that contains + [0, 1] x [0, 1] box-normalized coordinates of the P sampled points. + """ + with torch.no_grad(): + point_coords_wrt_box = point_coords.clone() + point_coords_wrt_box[:, :, 0] -= boxes_coords[:, None, 0] + point_coords_wrt_box[:, :, 1] -= boxes_coords[:, None, 1] + point_coords_wrt_box[:, :, 0] = point_coords_wrt_box[:, :, 0] / ( + boxes_coords[:, None, 2] - boxes_coords[:, None, 0] + ) + point_coords_wrt_box[:, :, 1] = point_coords_wrt_box[:, :, 1] / ( + boxes_coords[:, None, 3] - boxes_coords[:, None, 1] + ) + return point_coords_wrt_box diff --git a/data_processing/detectron2/projects/PointSup/point_sup/register_point_annotations.py b/data_processing/detectron2/projects/PointSup/point_sup/register_point_annotations.py new file mode 100644 index 0000000..32f2bb4 --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/point_sup/register_point_annotations.py @@ -0,0 +1,69 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import os + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.data.datasets.builtin import _get_builtin_metadata +from detectron2.data.datasets.coco import load_coco_json + +logger = logging.getLogger(__name__) + + +# COCO dataset +def register_coco_instances_with_points(name, metadata, json_file, image_root): + """ + Register a dataset in COCO's json annotation format for + instance segmentation with point annotation. + + The point annotation json does not have "segmentation" field, instead, + it has "point_coords" and "point_labels" fields. + + Args: + name (str): the name that identifies a dataset, e.g. "coco_2014_train". + metadata (dict): extra metadata associated with this dataset. You can + leave it as an empty dict. + json_file (str): path to the json instance annotation file. + image_root (str or path-like): directory which contains all the images. + """ + assert isinstance(name, str), name + assert isinstance(json_file, (str, os.PathLike)), json_file + assert isinstance(image_root, (str, os.PathLike)), image_root + # 1. register a function which returns dicts + DatasetCatalog.register( + name, lambda: load_coco_json(json_file, image_root, name, ["point_coords", "point_labels"]) + ) + + # 2. Optionally, add metadata about this dataset, + # since they might be useful in evaluation, visualization or logging + MetadataCatalog.get(name).set( + json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata + ) + + +_PREDEFINED_SPLITS_COCO = {} +_PREDEFINED_SPLITS_COCO["coco"] = { + # point annotations without masks + "coco_2017_train_points_n10_v1_without_masks": ( + "coco/train2017", + "coco/annotations/instances_train2017_n10_v1_without_masks.json", + ), +} + + +def register_all_coco_train_points(root): + for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items(): + for key, (image_root, json_file) in splits_per_dataset.items(): + # Assume pre-defined datasets live in `./datasets`. + register_coco_instances_with_points( + key, + _get_builtin_metadata(dataset_name), + os.path.join(root, json_file) if "://" not in json_file else json_file, + os.path.join(root, image_root), + ) + + +# True for open source; +# Internally at fb, we register them elsewhere +if __name__.endswith(".register_point_annotations"): + _root = os.getenv("DETECTRON2_DATASETS", "datasets") + register_all_coco_train_points(_root) diff --git a/data_processing/detectron2/projects/PointSup/tools/prepare_coco_point_annotations_without_masks.py b/data_processing/detectron2/projects/PointSup/tools/prepare_coco_point_annotations_without_masks.py new file mode 100644 index 0000000..e4aee2a --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/tools/prepare_coco_point_annotations_without_masks.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import copy +import json +import numpy as np +import os +import sys +import pycocotools.mask as mask_utils + +from detectron2.utils.env import seed_all_rng +from detectron2.utils.file_io import PathManager + + +def get_point_annotations(input_filename, output_filename, num_points_per_instance): + with PathManager.open(input_filename, "r") as f: + coco_json = json.load(f) + + coco_annos = coco_json.pop("annotations") + coco_points_json = copy.deepcopy(coco_json) + + imgs = {} + for img in coco_json["images"]: + imgs[img["id"]] = img + + new_annos = [] + for ann in coco_annos: + # convert mask + t = imgs[ann["image_id"]] + h, w = t["height"], t["width"] + segm = ann.pop("segmentation") + if type(segm) == list: + # polygon -- a single object might consist of multiple parts + # we merge all parts into one mask rle code + rles = mask_utils.frPyObjects(segm, h, w) + rle = mask_utils.merge(rles) + elif type(segm["counts"]) == list: + # uncompressed RLE + rle = mask_utils.frPyObjects(segm, h, w) + else: + # rle + rle = segm + mask = mask_utils.decode(rle) + new_ann = copy.deepcopy(ann) + # sample points in image coordinates + box = ann["bbox"] + point_coords_wrt_image = np.random.rand(num_points_per_instance, 2) + point_coords_wrt_image[:, 0] = point_coords_wrt_image[:, 0] * box[2] + point_coords_wrt_image[:, 1] = point_coords_wrt_image[:, 1] * box[3] + point_coords_wrt_image[:, 0] += box[0] + point_coords_wrt_image[:, 1] += box[1] + # round to integer coordinates + point_coords_wrt_image = np.floor(point_coords_wrt_image).astype(int) + # get labels + assert (point_coords_wrt_image >= 0).all(), (point_coords_wrt_image, mask.shape) + assert (point_coords_wrt_image[:, 0] < w).all(), (point_coords_wrt_image, mask.shape) + assert (point_coords_wrt_image[:, 1] < h).all(), (point_coords_wrt_image, mask.shape) + point_labels = mask[point_coords_wrt_image[:, 1], point_coords_wrt_image[:, 0]] + # store new annotations + new_ann["point_coords"] = point_coords_wrt_image.tolist() + new_ann["point_labels"] = point_labels.tolist() + new_annos.append(new_ann) + coco_points_json["annotations"] = new_annos + + with PathManager.open(output_filename, "w") as f: + json.dump(coco_points_json, f) + + print("{} is modified and stored in {}.".format(input_filename, output_filename)) + + +if __name__ == "__main__": + """ + Generate point-based supervision for COCO dataset. + + Usage: + python tools/prepare_coco_point_annotations_without_masks.py \ + NUM_POINTS_PER_INSTANCE NUM_VERSIONS_WITH_DIFFERENT_SEED + + Example to generate point-based COCO dataset with 10 points per instance: + python tools/prepare_coco_point_annotations_without_masks.py 10 + """ + + # Fix random seed + seed_all_rng(12345) + + assert len(sys.argv) >= 2, "Please provide number of points to sample per instance" + dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco/annotations") + num_points_per_instance = int(sys.argv[1]) + if len(sys.argv) == 3: + repeat = int(sys.argv[2]) + else: + repeat = 1 + s = "instances_train2017" + for version in range(repeat): + print( + "Start sampling {} points per instance for annotations {}.".format( + num_points_per_instance, s + ) + ) + get_point_annotations( + os.path.join(dataset_dir, "{}.json".format(s)), + os.path.join( + dataset_dir, + "{}_n{}_v{}_without_masks.json".format(s, num_points_per_instance, version + 1), + ), + num_points_per_instance, + ) diff --git a/data_processing/detectron2/projects/PointSup/train_net.py b/data_processing/detectron2/projects/PointSup/train_net.py new file mode 100644 index 0000000..0fe970a --- /dev/null +++ b/data_processing/detectron2/projects/PointSup/train_net.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Point supervision Training Script. + +This script is a simplified version of the training script in detectron2/tools. +""" + +import os + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import MetadataCatalog, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results +from detectron2.projects.point_rend import add_pointrend_config +from detectron2.utils.logger import setup_logger + +from point_sup import PointSupDatasetMapper, add_point_sup_config + + +class Trainer(DefaultTrainer): + """ + We use the "DefaultTrainer" which contains pre-defined default logic for + standard training workflow. They may not work for you, especially if you + are working on a new research project. In that case you can write your + own training loop. You can use "tools/plain_train_net.py" as an example. + """ + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type == "coco": + evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format( + dataset_name, evaluator_type + ) + ) + elif len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + @classmethod + def build_train_loader(cls, cfg): + if cfg.INPUT.POINT_SUP: + mapper = PointSupDatasetMapper(cfg, is_train=True) + else: + mapper = None + return build_detection_train_loader(cfg, mapper=mapper) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_pointrend_config(cfg) + add_point_sup_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + # Setup logger for "point_sup" module + setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="point_sup") + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if cfg.TEST.AUG.ENABLED: + res.update(Trainer.test_with_TTA(cfg, model)) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + """ + If you'd like to do anything fancier than the standard training logic, + consider writing your own training loop (see plain_train_net.py) or + subclassing the trainer. + """ + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/data_processing/detectron2/projects/README.md b/data_processing/detectron2/projects/README.md new file mode 100644 index 0000000..7fb29af --- /dev/null +++ b/data_processing/detectron2/projects/README.md @@ -0,0 +1,50 @@ + +Here are a few projects that are built on detectron2. +They are examples of how to use detectron2 as a library, to make your projects more +maintainable. + +## Projects by Facebook + +Note that these are research projects, and therefore may not have the same level +of support or stability as detectron2. + ++ [DensePose: Dense Human Pose Estimation In The Wild](DensePose) ++ [Scale-Aware Trident Networks for Object Detection](TridentNet) ++ [TensorMask: A Foundation for Dense Object Segmentation](TensorMask) ++ [Mesh R-CNN](https://github.com/facebookresearch/meshrcnn) ++ [PointRend: Image Segmentation as Rendering](PointRend) ++ [Momentum Contrast for Unsupervised Visual Representation Learning](https://github.com/facebookresearch/moco/tree/master/detection) ++ [DETR: End-to-End Object Detection with Transformers](https://github.com/facebookresearch/detr/tree/master/d2) ++ [Panoptic-DeepLab: A Simple, Strong, and Fast Baseline for Bottom-Up Panoptic Segmentation](Panoptic-DeepLab) ++ [D2Go (Detectron2Go)](https://github.com/facebookresearch/d2go), an end-to-end production system for training and deployment for mobile platforms. ++ [Pointly-Supervised Instance Segmentation](PointSup) ++ [Unbiased Teacher for Semi-Supervised Object Detection](https://github.com/facebookresearch/unbiased-teacher) ++ [Rethinking "Batch" in BatchNorm](Rethinking-BatchNorm/) ++ [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://github.com/facebookresearch/MaskFormer) ++ [Exploring Plain Vision Transformer Backbones for Object Detection](ViTDet/) ++ [MViTv2: Improved Multiscale Vision Transformers for Classification and Detection](MViTv2/) + + +## External Projects + +External projects in the community that use detectron2: + + + ++ [AdelaiDet](https://github.com/aim-uofa/adet), a detection toolbox including FCOS, BlendMask, etc. ++ [CenterMask](https://github.com/youngwanLEE/centermask2) ++ [Res2Net backbones](https://github.com/Res2Net/Res2Net-detectron2) ++ [VoVNet backbones](https://github.com/youngwanLEE/vovnet-detectron2) ++ [FsDet](https://github.com/ucbdrive/few-shot-object-detection), Few-Shot Object Detection. ++ [Sparse R-CNN](https://github.com/PeizeSun/SparseR-CNN) ++ [BCNet](https://github.com/lkeab/BCNet), a bilayer decoupling instance segmentation method. ++ [DD3D](https://github.com/TRI-ML/dd3d), A fully convolutional 3D detector. ++ [detrex](https://github.com/IDEA-Research/detrex), a detection toolbox for transformer-based detection algorithms including Deformable-DETR, DAB-DETR, DN-DETR, DINO, etc. diff --git a/data_processing/detectron2/projects/Rethinking-BatchNorm/README.md b/data_processing/detectron2/projects/Rethinking-BatchNorm/README.md new file mode 100644 index 0000000..42c5c68 --- /dev/null +++ b/data_processing/detectron2/projects/Rethinking-BatchNorm/README.md @@ -0,0 +1,36 @@ +# Rethinking "Batch" in BatchNorm + +We provide configs that reproduce detection experiments in the paper [Rethinking "Batch" in BatchNorm](https://arxiv.org/abs/2105.07576). + +All configs can be trained with: + +``` +../../tools/lazyconfig_train_net.py --config-file configs/X.py --num-gpus 8 +``` + +## Mask R-CNN + +* `mask_rcnn_BNhead.py`, `mask_rcnn_BNhead_batch_stats.py`: + Mask R-CNN with BatchNorm in the head. See Table 3 in the paper. + +* `mask_rcnn_BNhead_shuffle.py`: Mask R-CNN with cross-GPU shuffling of head inputs. + See Figure 9 and Table 6 in the paper. + +* `mask_rcnn_SyncBNhead.py`: Mask R-CNN with cross-GPU SyncBatchNorm in the head. + It matches Table 6 in the paper. + +## RetinaNet + +* `retinanet_SyncBNhead.py`: RetinaNet with SyncBN in head, a straightforward implementation + which matches row 3 of Table 5. + +* `retinanet_SyncBNhead_SharedTraining.py`: RetinaNet with SyncBN in head, normalizing + all 5 feature levels together. Match row 1 of Table 5. + +The script `retinanet-eval-domain-specific.py` evaluates a checkpoint after recomputing +domain-specific statistics. Running it with +``` +./retinanet-eval-domain-specific.py checkpoint.pth +``` +on a model produced by the above two configs, can produce results that match row 4 and +row 2 of Table 5. diff --git a/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead.py b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead.py new file mode 100644 index 0000000..336c133 --- /dev/null +++ b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead.py @@ -0,0 +1,18 @@ +from detectron2.model_zoo import get_config + +model = get_config("common/models/mask_rcnn_fpn.py").model + +model.backbone.bottom_up.freeze_at = 2 + +model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = "BN" +# 4conv1fc head +model.roi_heads.box_head.conv_dims = [256, 256, 256, 256] +model.roi_heads.box_head.fc_dims = [1024] + +dataloader = get_config("common/data/coco.py").dataloader +lr_multiplier = get_config("common/coco_schedule.py").lr_multiplier_3x +optimizer = get_config("common/optim.py").SGD +train = get_config("common/train.py").train + +train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +train.max_iter = 270000 # 3x for batchsize = 16 diff --git a/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead_batch_stats.py b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead_batch_stats.py new file mode 100644 index 0000000..872e17c --- /dev/null +++ b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead_batch_stats.py @@ -0,0 +1,20 @@ +from torch.nn import BatchNorm2d +from torch.nn import functional as F + + +class BatchNormBatchStat(BatchNorm2d): + """ + BN that uses batch stat in inference + """ + + def forward(self, input): + if self.training: + return super().forward(input) + return F.batch_norm(input, None, None, self.weight, self.bias, True, 1.0, self.eps) + + +# After training with the base config, it's sufficient to load its model with +# this config only for inference -- because the training-time behavior is identical. +from .mask_rcnn_BNhead import model, dataloader, lr_multiplier, optimizer, train + +model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = BatchNormBatchStat diff --git a/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead_shuffle.py b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead_shuffle.py new file mode 100644 index 0000000..5117a7d --- /dev/null +++ b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_BNhead_shuffle.py @@ -0,0 +1,74 @@ +import math +import torch +import torch.distributed as dist + +from detectron2.modeling.roi_heads import FastRCNNConvFCHead, MaskRCNNConvUpsampleHead +from detectron2.utils import comm +from fvcore.nn.distributed import differentiable_all_gather + + +def concat_all_gather(input): + bs_int = input.shape[0] + size_list = comm.all_gather(bs_int) + max_size = max(size_list) + max_shape = (max_size,) + input.shape[1:] + + padded_input = input.new_zeros(max_shape) + padded_input[:bs_int] = input + all_inputs = differentiable_all_gather(padded_input) + inputs = [x[:sz] for sz, x in zip(size_list, all_inputs)] + return inputs, size_list + + +def batch_shuffle(x): + # gather from all gpus + batch_size_this = x.shape[0] + all_xs, batch_size_all = concat_all_gather(x) + all_xs_concat = torch.cat(all_xs, dim=0) + total_bs = sum(batch_size_all) + + rank = dist.get_rank() + assert batch_size_all[rank] == batch_size_this + + idx_range = (sum(batch_size_all[:rank]), sum(batch_size_all[: rank + 1])) + + # random shuffle index + idx_shuffle = torch.randperm(total_bs, device=x.device) + # broadcast to all gpus + dist.broadcast(idx_shuffle, src=0) + + # index for restoring + idx_unshuffle = torch.argsort(idx_shuffle) + + # shuffled index for this gpu + splits = torch.split(idx_shuffle, math.ceil(total_bs / dist.get_world_size())) + if len(splits) > rank: + idx_this = splits[rank] + else: + idx_this = idx_shuffle.new_zeros([0]) + return all_xs_concat[idx_this], idx_unshuffle[idx_range[0] : idx_range[1]] + + +def batch_unshuffle(x, idx_unshuffle): + all_x, _ = concat_all_gather(x) + x_gather = torch.cat(all_x, dim=0) + return x_gather[idx_unshuffle] + + +def wrap_shuffle(module_type, method): + def new_method(self, x): + if self.training: + x, idx = batch_shuffle(x) + x = getattr(module_type, method)(self, x) + if self.training: + x = batch_unshuffle(x, idx) + return x + + return type(module_type.__name__ + "WithShuffle", (module_type,), {method: new_method}) + + +from .mask_rcnn_BNhead import model, dataloader, lr_multiplier, optimizer, train + + +model.roi_heads.box_head._target_ = wrap_shuffle(FastRCNNConvFCHead, "forward") +model.roi_heads.mask_head._target_ = wrap_shuffle(MaskRCNNConvUpsampleHead, "layers") diff --git a/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_SyncBNhead.py b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_SyncBNhead.py new file mode 100644 index 0000000..5f05da0 --- /dev/null +++ b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/mask_rcnn_SyncBNhead.py @@ -0,0 +1,3 @@ +from .mask_rcnn_BNhead import model, dataloader, lr_multiplier, optimizer, train + +model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = "SyncBN" diff --git a/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/retinanet_SyncBNhead.py b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/retinanet_SyncBNhead.py new file mode 100644 index 0000000..222dfdd --- /dev/null +++ b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/retinanet_SyncBNhead.py @@ -0,0 +1,19 @@ +from detectron2.model_zoo import get_config +from torch import nn + +model = get_config("common/models/retinanet.py").model +model.backbone.bottom_up.freeze_at = 2 + +# The head will overwrite string "SyncBN" to use domain-specific BN, so we +# provide a class here to use shared BN in training. +model.head.norm = nn.SyncBatchNorm2d + +dataloader = get_config("common/data/coco.py").dataloader +lr_multiplier = get_config("common/coco_schedule.py").lr_multiplier_3x +optimizer = get_config("common/optim.py").SGD +train = get_config("common/train.py").train + +optimizer.lr = 0.01 + +train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +train.max_iter = 270000 # 3x for batchsize = 16 diff --git a/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/retinanet_SyncBNhead_SharedTraining.py b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/retinanet_SyncBNhead_SharedTraining.py new file mode 100644 index 0000000..3f14600 --- /dev/null +++ b/data_processing/detectron2/projects/Rethinking-BatchNorm/configs/retinanet_SyncBNhead_SharedTraining.py @@ -0,0 +1,32 @@ +from typing import List +import torch +from torch import Tensor, nn + +from detectron2.modeling.meta_arch.retinanet import RetinaNetHead + + +def apply_sequential(inputs, modules): + for mod in modules: + if isinstance(mod, (nn.BatchNorm2d, nn.SyncBatchNorm)): + # for BN layer, normalize all inputs together + shapes = [i.shape for i in inputs] + spatial_sizes = [s[2] * s[3] for s in shapes] + x = [i.flatten(2) for i in inputs] + x = torch.cat(x, dim=2).unsqueeze(3) + x = mod(x).split(spatial_sizes, dim=2) + inputs = [i.view(s) for s, i in zip(shapes, x)] + else: + inputs = [mod(i) for i in inputs] + return inputs + + +class RetinaNetHead_SharedTrainingBN(RetinaNetHead): + def forward(self, features: List[Tensor]): + logits = apply_sequential(features, list(self.cls_subnet) + [self.cls_score]) + bbox_reg = apply_sequential(features, list(self.bbox_subnet) + [self.bbox_pred]) + return logits, bbox_reg + + +from .retinanet_SyncBNhead import model, dataloader, lr_multiplier, optimizer, train + +model.head._target_ = RetinaNetHead_SharedTrainingBN diff --git a/data_processing/detectron2/projects/Rethinking-BatchNorm/retinanet-eval-domain-specific.py b/data_processing/detectron2/projects/Rethinking-BatchNorm/retinanet-eval-domain-specific.py new file mode 100644 index 0000000..49a74ad --- /dev/null +++ b/data_processing/detectron2/projects/Rethinking-BatchNorm/retinanet-eval-domain-specific.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. +import sys +import torch +from fvcore.nn.precise_bn import update_bn_stats + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import LazyConfig, instantiate +from detectron2.evaluation import inference_on_dataset +from detectron2.layers import CycleBatchNormList +from detectron2.utils.events import EventStorage +from detectron2.utils.logger import setup_logger + +logger = setup_logger() +setup_logger(name="fvcore") + + +if __name__ == "__main__": + checkpoint = sys.argv[1] + cfg = LazyConfig.load_rel("./configs/retinanet_SyncBNhead.py") + model = cfg.model + model.head.norm = lambda c: CycleBatchNormList(len(model.head_in_features), num_features=c) + model = instantiate(model) + model.cuda() + DetectionCheckpointer(model).load(checkpoint) + + cfg.dataloader.train.total_batch_size = 8 + logger.info("Running PreciseBN ...") + with EventStorage(), torch.no_grad(): + update_bn_stats(model, instantiate(cfg.dataloader.train), 500) + + logger.info("Running evaluation ...") + inference_on_dataset( + model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator) + ) diff --git a/data_processing/detectron2/projects/TensorMask/README.md b/data_processing/detectron2/projects/TensorMask/README.md new file mode 100644 index 0000000..e81307c --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/README.md @@ -0,0 +1,63 @@ + +# TensorMask in Detectron2 +**A Foundation for Dense Object Segmentation** + +Xinlei Chen, Ross Girshick, Kaiming He, Piotr Dollár + +[[`arXiv`](https://arxiv.org/abs/1903.12174)] [[`BibTeX`](#CitingTensorMask)] + +
+ +
+ +In this repository, we release code for TensorMask in Detectron2. +TensorMask is a dense sliding-window instance segmentation framework that, for the first time, achieves results close to the well-developed Mask R-CNN framework -- both qualitatively and quantitatively. It establishes a conceptually complementary direction for object instance segmentation research. + +## Installation +First install Detectron2 following the [documentation](https://detectron2.readthedocs.io/tutorials/install.html) and +[setup the dataset](../../datasets). Then compile the TensorMask-specific op (`swap_align2nat`): +```bash +pip install -e /path/to/detectron2/projects/TensorMask +``` + +## Training + +To train a model, run: +```bash +python /path/to/detectron2/projects/TensorMask/train_net.py --config-file +``` + +For example, to launch TensorMask BiPyramid training (1x schedule) with ResNet-50 backbone on 8 GPUs, +one should execute: +```bash +python /path/to/detectron2/projects/TensorMask/train_net.py --config-file configs/tensormask_R_50_FPN_1x.yaml --num-gpus 8 +``` + +## Evaluation + +Model evaluation can be done similarly (6x schedule with scale augmentation): +```bash +python /path/to/detectron2/projects/TensorMask/train_net.py --config-file configs/tensormask_R_50_FPN_6x.yaml --eval-only MODEL.WEIGHTS /path/to/model_checkpoint +``` + +# Pretrained Models + +| Backbone | lr sched | AP box | AP mask | download | +| -------- | -------- | -- | --- | -------- | +| R50 | 1x | 37.6 | 32.4 | model \|  metrics | +| R50 | 6x | 41.4 | 35.8 | model \|  metrics | + + +## Citing TensorMask + +If you use TensorMask, please use the following BibTeX entry. + +``` +@InProceedings{chen2019tensormask, + title={Tensormask: A Foundation for Dense Object Segmentation}, + author={Chen, Xinlei and Girshick, Ross and He, Kaiming and Doll{\'a}r, Piotr}, + journal={The International Conference on Computer Vision (ICCV)}, + year={2019} +} +``` + diff --git a/data_processing/detectron2/projects/TensorMask/configs/Base-TensorMask.yaml b/data_processing/detectron2/projects/TensorMask/configs/Base-TensorMask.yaml new file mode 100644 index 0000000..a724534 --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/configs/Base-TensorMask.yaml @@ -0,0 +1,25 @@ +MODEL: + META_ARCHITECTURE: "TensorMask" + MASK_ON: True + BACKBONE: + NAME: "build_retinanet_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[44, 60], [88, 120], [176, 240], [352, 480], [704, 960], [1408, 1920]] + ASPECT_RATIOS: [[1.0]] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + FUSE_TYPE: "avg" + TENSOR_MASK: + ALIGNED_ON: True + BIPYRAMID_ON: True +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +VERSION: 2 diff --git a/data_processing/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_1x.yaml b/data_processing/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_1x.yaml new file mode 100644 index 0000000..5d5eee1 --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_1x.yaml @@ -0,0 +1,5 @@ +_BASE_: "Base-TensorMask.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 diff --git a/data_processing/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_6x.yaml b/data_processing/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_6x.yaml new file mode 100644 index 0000000..366a965 --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_6x.yaml @@ -0,0 +1,11 @@ +_BASE_: "Base-TensorMask.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (480000, 520000) + MAX_ITER: 540000 +INPUT: + MIN_SIZE_TRAIN_SAMPLING: "range" + MIN_SIZE_TRAIN: (640, 800) diff --git a/data_processing/detectron2/projects/TensorMask/setup.py b/data_processing/detectron2/projects/TensorMask/setup.py new file mode 100644 index 0000000..f6980e0 --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/setup.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. + +import glob +import os +from setuptools import find_packages, setup +import torch +from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension + + +def get_extensions(): + this_dir = os.path.dirname(os.path.abspath(__file__)) + extensions_dir = os.path.join(this_dir, "tensormask", "layers", "csrc") + + main_source = os.path.join(extensions_dir, "vision.cpp") + sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp")) + source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + glob.glob( + os.path.join(extensions_dir, "*.cu") + ) + + sources = [main_source] + sources + + extension = CppExtension + + extra_compile_args = {"cxx": []} + define_macros = [] + + if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1": + extension = CUDAExtension + sources += source_cuda + define_macros += [("WITH_CUDA", None)] + extra_compile_args["nvcc"] = [ + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ] + + # It's better if pytorch can do this by default .. + CC = os.environ.get("CC", None) + if CC is not None: + extra_compile_args["nvcc"].append("-ccbin={}".format(CC)) + + sources = [os.path.join(extensions_dir, s) for s in sources] + + include_dirs = [extensions_dir] + + ext_modules = [ + extension( + "tensormask._C", + sources, + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + ] + + return ext_modules + + +setup( + name="tensormask", + version="0.1", + author="FAIR", + packages=find_packages(exclude=("configs", "tests")), + python_requires=">=3.7", + ext_modules=get_extensions(), + cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, +) diff --git a/data_processing/detectron2/projects/TensorMask/tensormask/__init__.py b/data_processing/detectron2/projects/TensorMask/tensormask/__init__.py new file mode 100644 index 0000000..eec7978 --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/tensormask/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .config import add_tensormask_config +from .arch import TensorMask diff --git a/data_processing/detectron2/projects/TensorMask/tensormask/arch.py b/data_processing/detectron2/projects/TensorMask/tensormask/arch.py new file mode 100644 index 0000000..d395bea --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/tensormask/arch.py @@ -0,0 +1,913 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import math +from typing import List +import torch +import torch.nn.functional as F +from fvcore.nn import sigmoid_focal_loss_star_jit, smooth_l1_loss +from torch import nn + +from detectron2.layers import ShapeSpec, batched_nms, cat, paste_masks_in_image +from detectron2.modeling.anchor_generator import DefaultAnchorGenerator +from detectron2.modeling.backbone import build_backbone +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY +from detectron2.modeling.meta_arch.retinanet import permute_to_N_HWA_K +from detectron2.structures import Boxes, ImageList, Instances + +from tensormask.layers import SwapAlign2Nat + +__all__ = ["TensorMask"] + + +def permute_all_cls_and_box_to_N_HWA_K_and_concat(pred_logits, pred_anchor_deltas, num_classes=80): + """ + Rearrange the tensor layout from the network output, i.e.: + list[Tensor]: #lvl tensors of shape (N, A x K, Hi, Wi) + to per-image predictions, i.e.: + Tensor: of shape (N x sum(Hi x Wi x A), K) + """ + # for each feature level, permute the outputs to make them be in the + # same format as the labels. + pred_logits_flattened = [permute_to_N_HWA_K(x, num_classes) for x in pred_logits] + pred_anchor_deltas_flattened = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas] + # concatenate on the first dimension (representing the feature levels), to + # take into account the way the labels were generated (with all feature maps + # being concatenated as well) + pred_logits = cat(pred_logits_flattened, dim=1).view(-1, num_classes) + pred_anchor_deltas = cat(pred_anchor_deltas_flattened, dim=1).view(-1, 4) + return pred_logits, pred_anchor_deltas + + +def _assignment_rule( + gt_boxes, + anchor_boxes, + unit_lengths, + min_anchor_size, + scale_thresh=2.0, + spatial_thresh=1.0, + uniqueness_on=True, +): + """ + Given two lists of boxes of N ground truth boxes and M anchor boxes, + compute the assignment between the two, following the assignment rules in + https://arxiv.org/abs/1903.12174. + The box order must be (xmin, ymin, xmax, ymax), so please make sure to convert + to BoxMode.XYXY_ABS before calling this function. + + Args: + gt_boxes, anchor_boxes (Boxes): two Boxes. Contains N & M boxes/anchors, respectively. + unit_lengths (Tensor): Contains the unit lengths of M anchor boxes. + min_anchor_size (float): Minimum size of the anchor, in pixels + scale_thresh (float): The `scale` threshold: the maximum size of the anchor + should not be greater than scale_thresh x max(h, w) of + the ground truth box. + spatial_thresh (float): The `spatial` threshold: the l2 distance between the + center of the anchor and the ground truth box should not + be greater than spatial_thresh x u where u is the unit length. + + Returns: + matches (Tensor[int64]): a vector of length M, where matches[i] is a matched + ground-truth index in [0, N) + match_labels (Tensor[int8]): a vector of length M, where pred_labels[i] indicates + whether a prediction is a true or false positive or ignored + """ + gt_boxes, anchor_boxes = gt_boxes.tensor, anchor_boxes.tensor + N = gt_boxes.shape[0] + M = anchor_boxes.shape[0] + if N == 0 or M == 0: + return ( + gt_boxes.new_full((N,), 0, dtype=torch.int64), + gt_boxes.new_full((N,), -1, dtype=torch.int8), + ) + + # Containment rule + lt = torch.min(gt_boxes[:, None, :2], anchor_boxes[:, :2]) # [N,M,2] + rb = torch.max(gt_boxes[:, None, 2:], anchor_boxes[:, 2:]) # [N,M,2] + union = cat([lt, rb], dim=2) # [N,M,4] + + dummy_gt_boxes = torch.zeros_like(gt_boxes) + anchor = dummy_gt_boxes[:, None, :] + anchor_boxes[:, :] # [N,M,4] + + contain_matrix = torch.all(union == anchor, dim=2) # [N,M] + + # Centrality rule, scale + gt_size_lower = torch.max(gt_boxes[:, 2:] - gt_boxes[:, :2], dim=1)[0] # [N] + gt_size_upper = gt_size_lower * scale_thresh # [N] + # Fall back for small objects + gt_size_upper[gt_size_upper < min_anchor_size] = min_anchor_size + # Due to sampling of locations, the anchor sizes are deducted with sampling strides + anchor_size = ( + torch.max(anchor_boxes[:, 2:] - anchor_boxes[:, :2], dim=1)[0] - unit_lengths + ) # [M] + + size_diff_upper = gt_size_upper[:, None] - anchor_size # [N,M] + scale_matrix = size_diff_upper >= 0 # [N,M] + + # Centrality rule, spatial + gt_center = (gt_boxes[:, 2:] + gt_boxes[:, :2]) / 2 # [N,2] + anchor_center = (anchor_boxes[:, 2:] + anchor_boxes[:, :2]) / 2 # [M,2] + offset_center = gt_center[:, None, :] - anchor_center[:, :] # [N,M,2] + offset_center /= unit_lengths[:, None] # [N,M,2] + spatial_square = spatial_thresh * spatial_thresh + spatial_matrix = torch.sum(offset_center * offset_center, dim=2) <= spatial_square + + assign_matrix = (contain_matrix & scale_matrix & spatial_matrix).int() + + # assign_matrix is N (gt) x M (predicted) + # Max over gt elements (dim 0) to find best gt candidate for each prediction + matched_vals, matches = assign_matrix.max(dim=0) + match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) + + match_labels[matched_vals == 0] = 0 + match_labels[matched_vals == 1] = 1 + + # find all the elements that match to ground truths multiple times + not_unique_idxs = assign_matrix.sum(dim=0) > 1 + if uniqueness_on: + match_labels[not_unique_idxs] = 0 + else: + match_labels[not_unique_idxs] = -1 + + return matches, match_labels + + +# TODO make the paste_mask function in d2 core support mask list +def _paste_mask_lists_in_image(masks, boxes, image_shape, threshold=0.5): + """ + Paste a list of masks that are of various resolutions (e.g., 28 x 28) into an image. + The location, height, and width for pasting each mask is determined by their + corresponding bounding boxes in boxes. + + Args: + masks (list(Tensor)): A list of Tensor of shape (1, Hmask_i, Wmask_i). + Values are in [0, 1]. The list length, Bimg, is the + number of detected object instances in the image. + boxes (Boxes): A Boxes of length Bimg. boxes.tensor[i] and masks[i] correspond + to the same object instance. + image_shape (tuple): height, width + threshold (float): A threshold in [0, 1] for converting the (soft) masks to + binary masks. + + Returns: + img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the + number of detected object instances and Himage, Wimage are the image width + and height. img_masks[i] is a binary mask for object instance i. + """ + if len(masks) == 0: + return torch.empty((0, 1) + image_shape, dtype=torch.uint8) + + # Loop over masks groups. Each group has the same mask prediction size. + img_masks = [] + ind_masks = [] + mask_sizes = torch.tensor([m.shape[-1] for m in masks]) + unique_sizes = torch.unique(mask_sizes) + for msize in unique_sizes.tolist(): + cur_ind = torch.where(mask_sizes == msize)[0] + ind_masks.append(cur_ind) + + cur_masks = cat([masks[i] for i in cur_ind]) + cur_boxes = boxes[cur_ind] + img_masks.append(paste_masks_in_image(cur_masks, cur_boxes, image_shape, threshold)) + + img_masks = cat(img_masks) + ind_masks = cat(ind_masks) + + img_masks_out = torch.empty_like(img_masks) + img_masks_out[ind_masks, :, :] = img_masks + + return img_masks_out + + +def _postprocess(results, result_mask_info, output_height, output_width, mask_threshold=0.5): + """ + Post-process the output boxes for TensorMask. + The input images are often resized when entering an object detector. + As a result, we often need the outputs of the detector in a different + resolution from its inputs. + + This function will postprocess the raw outputs of TensorMask + to produce outputs according to the desired output resolution. + + Args: + results (Instances): the raw outputs from the detector. + `results.image_size` contains the input image resolution the detector sees. + This object might be modified in-place. Note that it does not contain the field + `pred_masks`, which is provided by another input `result_masks`. + result_mask_info (list[Tensor], Boxes): a pair of two items for mask related results. + The first item is a list of #detection tensors, each is the predicted masks. + The second item is the anchors corresponding to the predicted masks. + output_height, output_width: the desired output resolution. + + Returns: + Instances: the postprocessed output from the model, based on the output resolution + """ + scale_x, scale_y = (output_width / results.image_size[1], output_height / results.image_size[0]) + results = Instances((output_height, output_width), **results.get_fields()) + + output_boxes = results.pred_boxes + output_boxes.tensor[:, 0::2] *= scale_x + output_boxes.tensor[:, 1::2] *= scale_y + output_boxes.clip(results.image_size) + + inds_nonempty = output_boxes.nonempty() + results = results[inds_nonempty] + result_masks, result_anchors = result_mask_info + if result_masks: + result_anchors.tensor[:, 0::2] *= scale_x + result_anchors.tensor[:, 1::2] *= scale_y + result_masks = [x for (i, x) in zip(inds_nonempty.tolist(), result_masks) if i] + results.pred_masks = _paste_mask_lists_in_image( + result_masks, + result_anchors[inds_nonempty], + results.image_size, + threshold=mask_threshold, + ) + return results + + +class TensorMaskAnchorGenerator(DefaultAnchorGenerator): + """ + For a set of image sizes and feature maps, computes a set of anchors for TensorMask. + It also computes the unit lengths and indexes for each anchor box. + """ + + def grid_anchors_with_unit_lengths_and_indexes(self, grid_sizes): + anchors = [] + unit_lengths = [] + indexes = [] + for lvl, (size, stride, base_anchors) in enumerate( + zip(grid_sizes, self.strides, self.cell_anchors) + ): + grid_height, grid_width = size + device = base_anchors.device + shifts_x = torch.arange( + 0, grid_width * stride, step=stride, dtype=torch.float32, device=device + ) + shifts_y = torch.arange( + 0, grid_height * stride, step=stride, dtype=torch.float32, device=device + ) + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) + shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=2) + # Stack anchors in shapes of (HWA, 4) + cur_anchor = (shifts[:, :, None, :] + base_anchors.view(1, 1, -1, 4)).view(-1, 4) + anchors.append(cur_anchor) + unit_lengths.append( + torch.full((cur_anchor.shape[0],), stride, dtype=torch.float32, device=device) + ) + # create mask indexes using mesh grid + shifts_l = torch.full((1,), lvl, dtype=torch.int64, device=device) + shifts_i = torch.zeros((1,), dtype=torch.int64, device=device) + shifts_h = torch.arange(0, grid_height, dtype=torch.int64, device=device) + shifts_w = torch.arange(0, grid_width, dtype=torch.int64, device=device) + shifts_a = torch.arange(0, base_anchors.shape[0], dtype=torch.int64, device=device) + grids = torch.meshgrid(shifts_l, shifts_i, shifts_h, shifts_w, shifts_a) + + indexes.append(torch.stack(grids, dim=5).view(-1, 5)) + + return anchors, unit_lengths, indexes + + def forward(self, features): + """ + Returns: + list[list[Boxes]]: a list of #image elements. Each is a list of #feature level Boxes. + The Boxes contains anchors of this image on the specific feature level. + list[list[Tensor]]: a list of #image elements. Each is a list of #feature level tensors. + The tensor contains strides, or unit lengths for the anchors. + list[list[Tensor]]: a list of #image elements. Each is a list of #feature level tensors. + The Tensor contains indexes for the anchors, with the last dimension meaning + (L, N, H, W, A), where L is level, I is image (not set yet), H is height, + W is width, and A is anchor. + """ + num_images = len(features[0]) + grid_sizes = [feature_map.shape[-2:] for feature_map in features] + anchors_list, lengths_list, indexes_list = self.grid_anchors_with_unit_lengths_and_indexes( + grid_sizes + ) + + # Convert anchors from Tensor to Boxes + anchors_per_im = [Boxes(x) for x in anchors_list] + + # TODO it can be simplified to not return duplicated information for + # each image, just like detectron2's own AnchorGenerator + anchors = [copy.deepcopy(anchors_per_im) for _ in range(num_images)] + unit_lengths = [copy.deepcopy(lengths_list) for _ in range(num_images)] + indexes = [copy.deepcopy(indexes_list) for _ in range(num_images)] + + return anchors, unit_lengths, indexes + + +@META_ARCH_REGISTRY.register() +class TensorMask(nn.Module): + """ + TensorMask model. Creates FPN backbone, anchors and a head for classification + and box regression. Calculates and applies proper losses to class, box, and + masks. + """ + + def __init__(self, cfg): + super().__init__() + + # fmt: off + self.num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES + self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES + self.anchor_sizes = cfg.MODEL.ANCHOR_GENERATOR.SIZES + self.num_levels = len(cfg.MODEL.ANCHOR_GENERATOR.SIZES) + # Loss parameters: + self.focal_loss_alpha = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_ALPHA + self.focal_loss_gamma = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_GAMMA + # Inference parameters: + self.score_threshold = cfg.MODEL.TENSOR_MASK.SCORE_THRESH_TEST + self.topk_candidates = cfg.MODEL.TENSOR_MASK.TOPK_CANDIDATES_TEST + self.nms_threshold = cfg.MODEL.TENSOR_MASK.NMS_THRESH_TEST + self.detections_im = cfg.TEST.DETECTIONS_PER_IMAGE + # Mask parameters: + self.mask_on = cfg.MODEL.MASK_ON + self.mask_loss_weight = cfg.MODEL.TENSOR_MASK.MASK_LOSS_WEIGHT + self.mask_pos_weight = torch.tensor(cfg.MODEL.TENSOR_MASK.POSITIVE_WEIGHT, + dtype=torch.float32) + self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON + # fmt: on + + # build the backbone + self.backbone = build_backbone(cfg) + + backbone_shape = self.backbone.output_shape() + feature_shapes = [backbone_shape[f] for f in self.in_features] + feature_strides = [x.stride for x in feature_shapes] + # build anchors + self.anchor_generator = TensorMaskAnchorGenerator(cfg, feature_shapes) + self.num_anchors = self.anchor_generator.num_cell_anchors[0] + anchors_min_level = cfg.MODEL.ANCHOR_GENERATOR.SIZES[0] + self.mask_sizes = [size // feature_strides[0] for size in anchors_min_level] + self.min_anchor_size = min(anchors_min_level) - feature_strides[0] + + # head of the TensorMask + self.head = TensorMaskHead( + cfg, self.num_levels, self.num_anchors, self.mask_sizes, feature_shapes + ) + # box transform + self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.TENSOR_MASK.BBOX_REG_WEIGHTS) + self.register_buffer("pixel_mean", torch.tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1), False) + + @property + def device(self): + return self.pixel_mean.device + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DetectionTransform` . + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + image: Tensor, image in (C, H, W) format. + instances: Instances + Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + Returns: + losses (dict[str: Tensor]): mapping from a named loss to a tensor + storing the loss. Used during training only. + """ + images = self.preprocess_image(batched_inputs) + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + + features = self.backbone(images.tensor) + features = [features[f] for f in self.in_features] + # apply the TensorMask head + pred_logits, pred_deltas, pred_masks = self.head(features) + # generate anchors based on features, is it image specific? + anchors, unit_lengths, indexes = self.anchor_generator(features) + + if self.training: + # get ground truths for class labels and box targets, it will label each anchor + gt_class_info, gt_delta_info, gt_mask_info, num_fg = self.get_ground_truth( + anchors, unit_lengths, indexes, gt_instances + ) + # compute the loss + return self.losses( + gt_class_info, + gt_delta_info, + gt_mask_info, + num_fg, + pred_logits, + pred_deltas, + pred_masks, + ) + else: + # do inference to get the output + results = self.inference(pred_logits, pred_deltas, pred_masks, anchors, indexes, images) + processed_results = [] + for results_im, input_im, image_size in zip( + results, batched_inputs, images.image_sizes + ): + height = input_im.get("height", image_size[0]) + width = input_im.get("width", image_size[1]) + # this is to do post-processing with the image size + result_box, result_mask = results_im + r = _postprocess(result_box, result_mask, height, width) + processed_results.append({"instances": r}) + return processed_results + + def losses( + self, + gt_class_info, + gt_delta_info, + gt_mask_info, + num_fg, + pred_logits, + pred_deltas, + pred_masks, + ): + """ + Args: + For `gt_class_info`, `gt_delta_info`, `gt_mask_info` and `num_fg` parameters, see + :meth:`TensorMask.get_ground_truth`. + For `pred_logits`, `pred_deltas` and `pred_masks`, see + :meth:`TensorMaskHead.forward`. + + Returns: + losses (dict[str: Tensor]): mapping from a named loss to a scalar tensor + storing the loss. Used during training only. The potential dict keys are: + "loss_cls", "loss_box_reg" and "loss_mask". + """ + gt_classes_target, gt_valid_inds = gt_class_info + gt_deltas, gt_fg_inds = gt_delta_info + gt_masks, gt_mask_inds = gt_mask_info + loss_normalizer = torch.tensor(max(1, num_fg), dtype=torch.float32, device=self.device) + + # classification and regression + pred_logits, pred_deltas = permute_all_cls_and_box_to_N_HWA_K_and_concat( + pred_logits, pred_deltas, self.num_classes + ) + loss_cls = ( + sigmoid_focal_loss_star_jit( + pred_logits[gt_valid_inds], + gt_classes_target[gt_valid_inds], + alpha=self.focal_loss_alpha, + gamma=self.focal_loss_gamma, + reduction="sum", + ) + / loss_normalizer + ) + + if num_fg == 0: + loss_box_reg = pred_deltas.sum() * 0 + else: + loss_box_reg = ( + smooth_l1_loss(pred_deltas[gt_fg_inds], gt_deltas, beta=0.0, reduction="sum") + / loss_normalizer + ) + losses = {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg} + + # mask prediction + if self.mask_on: + loss_mask = 0 + for lvl in range(self.num_levels): + cur_level_factor = 2**lvl if self.bipyramid_on else 1 + for anc in range(self.num_anchors): + cur_gt_mask_inds = gt_mask_inds[lvl][anc] + if cur_gt_mask_inds is None: + loss_mask += pred_masks[lvl][anc][0, 0, 0, 0] * 0 + else: + cur_mask_size = self.mask_sizes[anc] * cur_level_factor + # TODO maybe there are numerical issues when mask sizes are large + cur_size_divider = torch.tensor( + self.mask_loss_weight / (cur_mask_size**2), + dtype=torch.float32, + device=self.device, + ) + + cur_pred_masks = pred_masks[lvl][anc][ + cur_gt_mask_inds[:, 0], # N + :, # V x U + cur_gt_mask_inds[:, 1], # H + cur_gt_mask_inds[:, 2], # W + ] + + loss_mask += F.binary_cross_entropy_with_logits( + cur_pred_masks.view(-1, cur_mask_size, cur_mask_size), # V, U + gt_masks[lvl][anc].to(dtype=torch.float32), + reduction="sum", + weight=cur_size_divider, + pos_weight=self.mask_pos_weight, + ) + losses["loss_mask"] = loss_mask / loss_normalizer + return losses + + @torch.no_grad() + def get_ground_truth(self, anchors, unit_lengths, indexes, targets): + """ + Args: + anchors (list[list[Boxes]]): a list of N=#image elements. Each is a + list of #feature level Boxes. The Boxes contains anchors of + this image on the specific feature level. + unit_lengths (list[list[Tensor]]): a list of N=#image elements. Each is a + list of #feature level Tensor. The tensor contains unit lengths for anchors of + this image on the specific feature level. + indexes (list[list[Tensor]]): a list of N=#image elements. Each is a + list of #feature level Tensor. The tensor contains the 5D index of + each anchor, the second dimension means (L, N, H, W, A), where L + is level, I is image, H is height, W is width, and A is anchor. + targets (list[Instances]): a list of N `Instances`s. The i-th + `Instances` contains the ground-truth per-instance annotations + for the i-th input image. Specify `targets` during training only. + + Returns: + gt_class_info (Tensor, Tensor): A pair of two tensors for classification. + The first one is an integer tensor of shape (R, #classes) storing ground-truth + labels for each anchor. R is the total number of anchors in the batch. + The second one is an integer tensor of shape (R,), to indicate which + anchors are valid for loss computation, which anchors are not. + gt_delta_info (Tensor, Tensor): A pair of two tensors for boxes. + The first one, of shape (F, 4). F=#foreground anchors. + The last dimension represents ground-truth box2box transform + targets (dx, dy, dw, dh) that map each anchor to its matched ground-truth box. + Only foreground anchors have values in this tensor. Could be `None` if F=0. + The second one, of shape (R,), is an integer tensor indicating which anchors + are foreground ones used for box regression. Could be `None` if F=0. + gt_mask_info (list[list[Tensor]], list[list[Tensor]]): A pair of two lists for masks. + The first one is a list of P=#feature level elements. Each is a + list of A=#anchor tensors. Each tensor contains the ground truth + masks of the same size and for the same feature level. Could be `None`. + The second one is a list of P=#feature level elements. Each is a + list of A=#anchor tensors. Each tensor contains the location of the ground truth + masks of the same size and for the same feature level. The second dimension means + (N, H, W), where N is image, H is height, and W is width. Could be `None`. + num_fg (int): F=#foreground anchors, used later for loss normalization. + """ + gt_classes = [] + gt_deltas = [] + gt_masks = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)] + gt_mask_inds = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)] + + anchors = [Boxes.cat(anchors_i) for anchors_i in anchors] + unit_lengths = [cat(unit_lengths_i) for unit_lengths_i in unit_lengths] + indexes = [cat(indexes_i) for indexes_i in indexes] + + num_fg = 0 + for i, (anchors_im, unit_lengths_im, indexes_im, targets_im) in enumerate( + zip(anchors, unit_lengths, indexes, targets) + ): + # Initialize all + gt_classes_i = torch.full_like( + unit_lengths_im, self.num_classes, dtype=torch.int64, device=self.device + ) + # Ground truth classes + has_gt = len(targets_im) > 0 + if has_gt: + # Compute the pairwise matrix + gt_matched_inds, anchor_labels = _assignment_rule( + targets_im.gt_boxes, anchors_im, unit_lengths_im, self.min_anchor_size + ) + # Find the foreground instances + fg_inds = anchor_labels == 1 + fg_anchors = anchors_im[fg_inds] + num_fg += len(fg_anchors) + # Find the ground truths for foreground instances + gt_fg_matched_inds = gt_matched_inds[fg_inds] + # Assign labels for foreground instances + gt_classes_i[fg_inds] = targets_im.gt_classes[gt_fg_matched_inds] + # Anchors with label -1 are ignored, others are left as negative + gt_classes_i[anchor_labels == -1] = -1 + + # Boxes + # Ground truth box regression, only for foregrounds + matched_gt_boxes = targets_im[gt_fg_matched_inds].gt_boxes + # Compute box regression offsets for foregrounds only + gt_deltas_i = self.box2box_transform.get_deltas( + fg_anchors.tensor, matched_gt_boxes.tensor + ) + gt_deltas.append(gt_deltas_i) + + # Masks + if self.mask_on: + # Compute masks for each level and each anchor + matched_indexes = indexes_im[fg_inds, :] + for lvl in range(self.num_levels): + ids_lvl = matched_indexes[:, 0] == lvl + if torch.any(ids_lvl): + cur_level_factor = 2**lvl if self.bipyramid_on else 1 + for anc in range(self.num_anchors): + ids_lvl_anchor = ids_lvl & (matched_indexes[:, 4] == anc) + if torch.any(ids_lvl_anchor): + gt_masks[lvl][anc].append( + targets_im[ + gt_fg_matched_inds[ids_lvl_anchor] + ].gt_masks.crop_and_resize( + fg_anchors[ids_lvl_anchor].tensor, + self.mask_sizes[anc] * cur_level_factor, + ) + ) + # Select (N, H, W) dimensions + gt_mask_inds_lvl_anc = matched_indexes[ids_lvl_anchor, 1:4] + # Set the image index to the current image + gt_mask_inds_lvl_anc[:, 0] = i + gt_mask_inds[lvl][anc].append(gt_mask_inds_lvl_anc) + gt_classes.append(gt_classes_i) + + # Classes and boxes + gt_classes = cat(gt_classes) + gt_valid_inds = gt_classes >= 0 + gt_fg_inds = gt_valid_inds & (gt_classes < self.num_classes) + gt_classes_target = torch.zeros( + (gt_classes.shape[0], self.num_classes), dtype=torch.float32, device=self.device + ) + gt_classes_target[gt_fg_inds, gt_classes[gt_fg_inds]] = 1 + gt_deltas = cat(gt_deltas) if gt_deltas else None + + # Masks + gt_masks = [[cat(mla) if mla else None for mla in ml] for ml in gt_masks] + gt_mask_inds = [[cat(ila) if ila else None for ila in il] for il in gt_mask_inds] + return ( + (gt_classes_target, gt_valid_inds), + (gt_deltas, gt_fg_inds), + (gt_masks, gt_mask_inds), + num_fg, + ) + + def inference(self, pred_logits, pred_deltas, pred_masks, anchors, indexes, images): + """ + Arguments: + pred_logits, pred_deltas, pred_masks: Same as the output of: + meth:`TensorMaskHead.forward` + anchors, indexes: Same as the input of meth:`TensorMask.get_ground_truth` + images (ImageList): the input images + + Returns: + results (List[Instances]): a list of #images elements. + """ + assert len(anchors) == len(images) + results = [] + + pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits] + pred_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_deltas] + + pred_logits = cat(pred_logits, dim=1) + pred_deltas = cat(pred_deltas, dim=1) + + for img_idx, (anchors_im, indexes_im) in enumerate(zip(anchors, indexes)): + # Get the size of the current image + image_size = images.image_sizes[img_idx] + + logits_im = pred_logits[img_idx] + deltas_im = pred_deltas[img_idx] + + if self.mask_on: + masks_im = [[mla[img_idx] for mla in ml] for ml in pred_masks] + else: + masks_im = [None] * self.num_levels + results_im = self.inference_single_image( + logits_im, + deltas_im, + masks_im, + Boxes.cat(anchors_im), + cat(indexes_im), + tuple(image_size), + ) + results.append(results_im) + return results + + def inference_single_image( + self, pred_logits, pred_deltas, pred_masks, anchors, indexes, image_size + ): + """ + Single-image inference. Return bounding-box detection results by thresholding + on scores and applying non-maximum suppression (NMS). + + Arguments: + pred_logits (list[Tensor]): list of #feature levels. Each entry contains + tensor of size (AxHxW, K) + pred_deltas (list[Tensor]): Same shape as 'pred_logits' except that K becomes 4. + pred_masks (list[list[Tensor]]): List of #feature levels, each is a list of #anchors. + Each entry contains tensor of size (M_i*M_i, H, W). `None` if mask_on=False. + anchors (list[Boxes]): list of #feature levels. Each entry contains + a Boxes object, which contains all the anchors for that + image in that feature level. + image_size (tuple(H, W)): a tuple of the image height and width. + + Returns: + Same as `inference`, but for only one image. + """ + pred_logits = pred_logits.flatten().sigmoid_() + # We get top locations across all levels to accelerate the inference speed, + # which does not seem to affect the accuracy. + # First select values above the threshold + logits_top_idxs = torch.where(pred_logits > self.score_threshold)[0] + # Then get the top values + num_topk = min(self.topk_candidates, logits_top_idxs.shape[0]) + pred_prob, topk_idxs = pred_logits[logits_top_idxs].sort(descending=True) + # Keep top k scoring values + pred_prob = pred_prob[:num_topk] + # Keep top k values + top_idxs = logits_top_idxs[topk_idxs[:num_topk]] + + # class index + cls_idxs = top_idxs % self.num_classes + # HWA index + top_idxs //= self.num_classes + # predict boxes + pred_boxes = self.box2box_transform.apply_deltas( + pred_deltas[top_idxs], anchors[top_idxs].tensor + ) + # apply nms + keep = batched_nms(pred_boxes, pred_prob, cls_idxs, self.nms_threshold) + # pick the top ones + keep = keep[: self.detections_im] + + results = Instances(image_size) + results.pred_boxes = Boxes(pred_boxes[keep]) + results.scores = pred_prob[keep] + results.pred_classes = cls_idxs[keep] + + # deal with masks + result_masks, result_anchors = [], None + if self.mask_on: + # index and anchors, useful for masks + top_indexes = indexes[top_idxs] + top_anchors = anchors[top_idxs] + result_indexes = top_indexes[keep] + result_anchors = top_anchors[keep] + # Get masks and do sigmoid + for lvl, _, h, w, anc in result_indexes.tolist(): + cur_size = self.mask_sizes[anc] * (2**lvl if self.bipyramid_on else 1) + result_masks.append( + torch.sigmoid(pred_masks[lvl][anc][:, h, w].view(1, cur_size, cur_size)) + ) + + return results, (result_masks, result_anchors) + + def preprocess_image(self, batched_inputs): + """ + Normalize, pad and batch the input images. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + return images + + +class TensorMaskHead(nn.Module): + def __init__(self, cfg, num_levels, num_anchors, mask_sizes, input_shape: List[ShapeSpec]): + """ + TensorMask head. + """ + super().__init__() + # fmt: off + self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES + in_channels = input_shape[0].channels + num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES + cls_channels = cfg.MODEL.TENSOR_MASK.CLS_CHANNELS + num_convs = cfg.MODEL.TENSOR_MASK.NUM_CONVS + # box parameters + bbox_channels = cfg.MODEL.TENSOR_MASK.BBOX_CHANNELS + # mask parameters + self.mask_on = cfg.MODEL.MASK_ON + self.mask_sizes = mask_sizes + mask_channels = cfg.MODEL.TENSOR_MASK.MASK_CHANNELS + self.align_on = cfg.MODEL.TENSOR_MASK.ALIGNED_ON + self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON + # fmt: on + + # class subnet + cls_subnet = [] + cur_channels = in_channels + for _ in range(num_convs): + cls_subnet.append( + nn.Conv2d(cur_channels, cls_channels, kernel_size=3, stride=1, padding=1) + ) + cur_channels = cls_channels + cls_subnet.append(nn.ReLU()) + + self.cls_subnet = nn.Sequential(*cls_subnet) + self.cls_score = nn.Conv2d( + cur_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1 + ) + modules_list = [self.cls_subnet, self.cls_score] + + # box subnet + bbox_subnet = [] + cur_channels = in_channels + for _ in range(num_convs): + bbox_subnet.append( + nn.Conv2d(cur_channels, bbox_channels, kernel_size=3, stride=1, padding=1) + ) + cur_channels = bbox_channels + bbox_subnet.append(nn.ReLU()) + + self.bbox_subnet = nn.Sequential(*bbox_subnet) + self.bbox_pred = nn.Conv2d( + cur_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1 + ) + modules_list.extend([self.bbox_subnet, self.bbox_pred]) + + # mask subnet + if self.mask_on: + mask_subnet = [] + cur_channels = in_channels + for _ in range(num_convs): + mask_subnet.append( + nn.Conv2d(cur_channels, mask_channels, kernel_size=3, stride=1, padding=1) + ) + cur_channels = mask_channels + mask_subnet.append(nn.ReLU()) + + self.mask_subnet = nn.Sequential(*mask_subnet) + modules_list.append(self.mask_subnet) + for mask_size in self.mask_sizes: + cur_mask_module = "mask_pred_%02d" % mask_size + self.add_module( + cur_mask_module, + nn.Conv2d( + cur_channels, mask_size * mask_size, kernel_size=1, stride=1, padding=0 + ), + ) + modules_list.append(getattr(self, cur_mask_module)) + if self.align_on: + if self.bipyramid_on: + for lvl in range(num_levels): + cur_mask_module = "align2nat_%02d" % lvl + lambda_val = 2**lvl + setattr(self, cur_mask_module, SwapAlign2Nat(lambda_val)) + # Also the fusing layer, stay at the same channel size + mask_fuse = [ + nn.Conv2d(cur_channels, cur_channels, kernel_size=3, stride=1, padding=1), + nn.ReLU(), + ] + self.mask_fuse = nn.Sequential(*mask_fuse) + modules_list.append(self.mask_fuse) + else: + self.align2nat = SwapAlign2Nat(1) + + # Initialization + for modules in modules_list: + for layer in modules.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, mean=0, std=0.01) + torch.nn.init.constant_(layer.bias, 0) + + # Use prior in model initialization to improve stability + bias_value = -(math.log((1 - 0.01) / 0.01)) + torch.nn.init.constant_(self.cls_score.bias, bias_value) + + def forward(self, features): + """ + Arguments: + features (list[Tensor]): FPN feature map tensors in high to low resolution. + Each tensor in the list correspond to different feature levels. + + Returns: + pred_logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi). + The tensor predicts the classification probability + at each spatial position for each of the A anchors and K object + classes. + pred_deltas (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi). + The tensor predicts 4-vector (dx,dy,dw,dh) box + regression values for every anchor. These values are the + relative offset between the anchor and the ground truth box. + pred_masks (list(list[Tensor])): #lvl list of tensors, each is a list of + A tensors of shape (N, M_{i,a}, Hi, Wi). + The tensor predicts a dense set of M_ixM_i masks at every location. + """ + pred_logits = [self.cls_score(self.cls_subnet(x)) for x in features] + pred_deltas = [self.bbox_pred(self.bbox_subnet(x)) for x in features] + + pred_masks = None + if self.mask_on: + mask_feats = [self.mask_subnet(x) for x in features] + + if self.bipyramid_on: + mask_feat_high_res = mask_feats[0] + H, W = mask_feat_high_res.shape[-2:] + mask_feats_up = [] + for lvl, mask_feat in enumerate(mask_feats): + lambda_val = 2.0**lvl + mask_feat_up = mask_feat + if lvl > 0: + mask_feat_up = F.interpolate( + mask_feat, scale_factor=lambda_val, mode="bilinear", align_corners=False + ) + mask_feats_up.append( + self.mask_fuse(mask_feat_up[:, :, :H, :W] + mask_feat_high_res) + ) + mask_feats = mask_feats_up + + pred_masks = [] + for lvl, mask_feat in enumerate(mask_feats): + cur_masks = [] + for mask_size in self.mask_sizes: + cur_mask_module = getattr(self, "mask_pred_%02d" % mask_size) + cur_mask = cur_mask_module(mask_feat) + if self.align_on: + if self.bipyramid_on: + cur_mask_module = getattr(self, "align2nat_%02d" % lvl) + cur_mask = cur_mask_module(cur_mask) + else: + cur_mask = self.align2nat(cur_mask) + cur_masks.append(cur_mask) + pred_masks.append(cur_masks) + return pred_logits, pred_deltas, pred_masks diff --git a/data_processing/detectron2/projects/TensorMask/tensormask/config.py b/data_processing/detectron2/projects/TensorMask/tensormask/config.py new file mode 100644 index 0000000..cf62d7a --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/tensormask/config.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +from detectron2.config import CfgNode as CN + + +def add_tensormask_config(cfg): + """ + Add config for TensorMask. + """ + cfg.MODEL.TENSOR_MASK = CN() + + # Anchor parameters + cfg.MODEL.TENSOR_MASK.IN_FEATURES = ["p2", "p3", "p4", "p5", "p6", "p7"] + + # Convolutions to use in the towers + cfg.MODEL.TENSOR_MASK.NUM_CONVS = 4 + + # Number of foreground classes. + cfg.MODEL.TENSOR_MASK.NUM_CLASSES = 80 + # Channel size for the classification tower + cfg.MODEL.TENSOR_MASK.CLS_CHANNELS = 256 + + cfg.MODEL.TENSOR_MASK.SCORE_THRESH_TEST = 0.05 + # Only the top (1000 * #levels) candidate boxes across all levels are + # considered jointly during test (to improve speed) + cfg.MODEL.TENSOR_MASK.TOPK_CANDIDATES_TEST = 6000 + cfg.MODEL.TENSOR_MASK.NMS_THRESH_TEST = 0.5 + + # Box parameters + # Channel size for the box tower + cfg.MODEL.TENSOR_MASK.BBOX_CHANNELS = 128 + # Weights on (dx, dy, dw, dh) + cfg.MODEL.TENSOR_MASK.BBOX_REG_WEIGHTS = (1.5, 1.5, 0.75, 0.75) + + # Loss parameters + cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_GAMMA = 3.0 + cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_ALPHA = 0.3 + + # Mask parameters + # Channel size for the mask tower + cfg.MODEL.TENSOR_MASK.MASK_CHANNELS = 128 + # Mask loss weight + cfg.MODEL.TENSOR_MASK.MASK_LOSS_WEIGHT = 2.0 + # weight on positive pixels within the mask + cfg.MODEL.TENSOR_MASK.POSITIVE_WEIGHT = 1.5 + # Whether to predict in the aligned representation + cfg.MODEL.TENSOR_MASK.ALIGNED_ON = False + # Whether to use the bipyramid architecture + cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON = False diff --git a/data_processing/detectron2/projects/TensorMask/tensormask/layers/__init__.py b/data_processing/detectron2/projects/TensorMask/tensormask/layers/__init__.py new file mode 100644 index 0000000..8b8e178 --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/tensormask/layers/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .swap_align2nat import SwapAlign2Nat, swap_align2nat + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/data_processing/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat.h b/data_processing/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat.h new file mode 100644 index 0000000..75c2178 --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat.h @@ -0,0 +1,54 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace tensormask { + +#if defined(WITH_CUDA) || defined(WITH_HIP) +at::Tensor SwapAlign2Nat_forward_cuda( + const at::Tensor& X, + const int lambda_val, + const float pad_val); + +at::Tensor SwapAlign2Nat_backward_cuda( + const at::Tensor& gY, + const int lambda_val, + const int batch_size, + const int channel, + const int height, + const int width); +#endif + +inline at::Tensor SwapAlign2Nat_forward( + const at::Tensor& X, + const int lambda_val, + const float pad_val) { + if (X.type().is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return SwapAlign2Nat_forward_cuda(X, lambda_val, pad_val); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +inline at::Tensor SwapAlign2Nat_backward( + const at::Tensor& gY, + const int lambda_val, + const int batch_size, + const int channel, + const int height, + const int width) { + if (gY.type().is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return SwapAlign2Nat_backward_cuda( + gY, lambda_val, batch_size, channel, height, width); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +} // namespace tensormask diff --git a/data_processing/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat_cuda.cu b/data_processing/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat_cuda.cu new file mode 100644 index 0000000..1398d70 --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat_cuda.cu @@ -0,0 +1,526 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include +#include +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +template +__device__ inline T get_pixel_val( + const T* tensor, + const int idx, + const int H, + const int W, + const int y, + const int x, + const int V, + const int U, + const int v, + const int u, + const T pad_val) { + if ((y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) || (v >= V) || + (u < 0) || (u >= U)) { + return pad_val; + } else { + return tensor[(((idx * V + v) * U + u) * H + y) * W + x]; + } +} + +template +__device__ inline void add_pixel_val( + T* tensor, + const T val, + const int idx, + const int H, + const int W, + const int y, + const int x, + const int V, + const int U, + const int v, + const int u) { + if ((val == 0.) || (y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) || + (v >= V) || (u < 0) || (u >= U)) { + return; + } else { + atomicAdd(tensor + ((((idx * V + v) * U + u) * H + y) * W + x), val); + } +} + +template +__global__ void SwapAlign2NatForwardFeat( + const int nthreads, + const T* bottom_data, + const int Vout, + const int Uout, + const float hVout, + const float hUout, + const int Vin, + const int Uin, + const float lambda, + const int Hin, + const int Win, + const int Hout, + const int Wout, + const T pad_val, + T* top_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + int idx = index; + const int x = idx % Wout; + idx /= Wout; + const int y = idx % Hout; + idx /= Hout; + const int u = idx % Uout; + idx /= Uout; + const int v = idx % Vout; + idx /= Vout; + + const float ox = x * lambda + u - hUout + 0.5; + const int xf = static_cast(floor(ox)); + const int xc = static_cast(ceil(ox)); + const float xwc = ox - xf; + const float xwf = 1. - xwc; + + const float oy = y * lambda + v - hVout + 0.5; + const int yf = static_cast(floor(oy)); + const int yc = static_cast(ceil(oy)); + const float ywc = oy - yf; + const float ywf = 1. - ywc; + + const float ou = (u + 0.5) / lambda - 0.5; + const int uf = static_cast(floor(ou)); + const int uc = static_cast(ceil(ou)); + const float uwc = ou - uf; + const float uwf = 1. - uwc; + + const float ov = (v + 0.5) / lambda - 0.5; + const int vf = static_cast(floor(ov)); + const int vc = static_cast(ceil(ov)); + const float vwc = ov - vf; + const float vwf = 1. - vwc; + + T val = ywf * xwf * vwf * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uf, pad_val) + + ywf * xwf * vwf * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uc, pad_val) + + ywf * xwf * vwc * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uf, pad_val) + + ywf * xwf * vwc * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uc, pad_val) + + ywf * xwc * vwf * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uf, pad_val) + + ywf * xwc * vwf * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uc, pad_val) + + ywf * xwc * vwc * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uf, pad_val) + + ywf * xwc * vwc * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uc, pad_val) + + ywc * xwf * vwf * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uf, pad_val) + + ywc * xwf * vwf * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uc, pad_val) + + ywc * xwf * vwc * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uf, pad_val) + + ywc * xwf * vwc * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uc, pad_val) + + ywc * xwc * vwf * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uf, pad_val) + + ywc * xwc * vwf * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uc, pad_val) + + ywc * xwc * vwc * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uf, pad_val) + + ywc * xwc * vwc * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uc, pad_val); + + top_data[index] = val; + } +} + +template +__global__ void SwapAlign2NatBackwardFeat( + const int nthreads, + const T* top_diff, + const int Vout, + const int Uout, + const float hVout, + const float hUout, + const int Vin, + const int Uin, + const float lambda, + const int Hin, + const int Win, + const int Hout, + const int Wout, + T* bottom_diff) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + int idx = index; + const int x = idx % Wout; + idx /= Wout; + const int y = idx % Hout; + idx /= Hout; + const int u = idx % Uout; + idx /= Uout; + const int v = idx % Vout; + idx /= Vout; + + const float ox = x * lambda + u - hUout + 0.5; + const int xf = static_cast(floor(ox)); + const int xc = static_cast(ceil(ox)); + const float xwc = ox - xf; + const float xwf = 1. - xwc; + + const float oy = y * lambda + v - hVout + 0.5; + const int yf = static_cast(floor(oy)); + const int yc = static_cast(ceil(oy)); + const float ywc = oy - yf; + const float ywf = 1. - ywc; + + const float ou = (u + 0.5) / lambda - 0.5; + const int uf = static_cast(floor(ou)); + const int uc = static_cast(ceil(ou)); + const float uwc = ou - uf; + const float uwf = 1. - uwc; + + const float ov = (v + 0.5) / lambda - 0.5; + const int vf = static_cast(floor(ov)); + const int vc = static_cast(ceil(ov)); + const float vwc = ov - vf; + const float vwf = 1. - vwc; + + const T grad = top_diff[index]; + + add_pixel_val( + bottom_diff, + ywf * xwf * vwf * uwf * grad, + idx, + Hin, + Win, + yf, + xf, + Vin, + Uin, + vf, + uf); + add_pixel_val( + bottom_diff, + ywf * xwf * vwf * uwc * grad, + idx, + Hin, + Win, + yf, + xf, + Vin, + Uin, + vf, + uc); + add_pixel_val( + bottom_diff, + ywf * xwf * vwc * uwf * grad, + idx, + Hin, + Win, + yf, + xf, + Vin, + Uin, + vc, + uf); + add_pixel_val( + bottom_diff, + ywf * xwf * vwc * uwc * grad, + idx, + Hin, + Win, + yf, + xf, + Vin, + Uin, + vc, + uc); + add_pixel_val( + bottom_diff, + ywf * xwc * vwf * uwf * grad, + idx, + Hin, + Win, + yf, + xc, + Vin, + Uin, + vf, + uf); + add_pixel_val( + bottom_diff, + ywf * xwc * vwf * uwc * grad, + idx, + Hin, + Win, + yf, + xc, + Vin, + Uin, + vf, + uc); + add_pixel_val( + bottom_diff, + ywf * xwc * vwc * uwf * grad, + idx, + Hin, + Win, + yf, + xc, + Vin, + Uin, + vc, + uf); + add_pixel_val( + bottom_diff, + ywf * xwc * vwc * uwc * grad, + idx, + Hin, + Win, + yf, + xc, + Vin, + Uin, + vc, + uc); + add_pixel_val( + bottom_diff, + ywc * xwf * vwf * uwf * grad, + idx, + Hin, + Win, + yc, + xf, + Vin, + Uin, + vf, + uf); + add_pixel_val( + bottom_diff, + ywc * xwf * vwf * uwc * grad, + idx, + Hin, + Win, + yc, + xf, + Vin, + Uin, + vf, + uc); + add_pixel_val( + bottom_diff, + ywc * xwf * vwc * uwf * grad, + idx, + Hin, + Win, + yc, + xf, + Vin, + Uin, + vc, + uf); + add_pixel_val( + bottom_diff, + ywc * xwf * vwc * uwc * grad, + idx, + Hin, + Win, + yc, + xf, + Vin, + Uin, + vc, + uc); + add_pixel_val( + bottom_diff, + ywc * xwc * vwf * uwf * grad, + idx, + Hin, + Win, + yc, + xc, + Vin, + Uin, + vf, + uf); + add_pixel_val( + bottom_diff, + ywc * xwc * vwf * uwc * grad, + idx, + Hin, + Win, + yc, + xc, + Vin, + Uin, + vf, + uc); + add_pixel_val( + bottom_diff, + ywc * xwc * vwc * uwf * grad, + idx, + Hin, + Win, + yc, + xc, + Vin, + Uin, + vc, + uf); + add_pixel_val( + bottom_diff, + ywc * xwc * vwc * uwc * grad, + idx, + Hin, + Win, + yc, + xc, + Vin, + Uin, + vc, + uc); + } +} + +namespace tensormask { + +at::Tensor SwapAlign2Nat_forward_cuda( + const at::Tensor& X, + const int lambda_val, + const float pad_val) { + AT_ASSERTM(X.device().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(X.ndimension() == 4, "input must be a 4D tensor"); + AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1"); + const int N = X.size(0); + const int C = X.size(1); + const int Vin = static_cast(sqrt(static_cast(C))); + const int Uin = C / Vin; + AT_ASSERTM( + C == Vin * Uin && Vin == Uin, "#channels should be a square number"); + const int Vout = lambda_val * Vin; + const int Uout = lambda_val * Uin; + const int Hin = X.size(2); + const int Win = X.size(3); + const float lambda = static_cast(lambda_val); + const int Hout = static_cast(ceil(Hin / lambda)); + const int Wout = static_cast(ceil(Win / lambda)); + const float hVout = Vout / 2.; + const float hUout = Uout / 2.; + + at::cuda::CUDAGuard device_guard(X.device()); + + at::Tensor Y = at::empty({N, Vout * Uout, Hout, Wout}, X.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(at::cuda::ATenCeilDiv(Y.numel(), 512L), 4096L)); + dim3 block(512); + + if (Y.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return Y; + } + + auto X_ = X.contiguous(); + AT_DISPATCH_FLOATING_TYPES(X.scalar_type(), "SwapAlign2Nat_forward", [&] { + SwapAlign2NatForwardFeat<<>>( + Y.numel(), + X_.data_ptr(), + Vout, + Uout, + hVout, + hUout, + Vin, + Uin, + lambda, + Hin, + Win, + Hout, + Wout, + pad_val, + Y.data_ptr()); + }); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + return Y; +} + +at::Tensor SwapAlign2Nat_backward_cuda( + const at::Tensor& gY, + const int lambda_val, + const int batch_size, + const int channel, + const int height, + const int width) { + AT_ASSERTM(gY.device().is_cuda(), "input gradient must be a CUDA tensor"); + AT_ASSERTM(gY.ndimension() == 4, "input gradient must be a 4D tensor"); + AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1"); + const int Vin = static_cast(sqrt(static_cast(channel))); + const int Uin = channel / Vin; + const int Vout = lambda_val * Vin; + const int Uout = lambda_val * Uin; + const float hVout = Vout / 2.; + const float hUout = Uout / 2.; + const int Hout = gY.size(2); + const int Wout = gY.size(3); + + at::cuda::CUDAGuard device_guard(gY.device()); + + at::Tensor gX = at::zeros({batch_size, channel, height, width}, gY.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(at::cuda::ATenCeilDiv(gY.numel(), 512L), 4096L)); + dim3 block(512); + + // handle possibly empty gradients + if (gY.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return gX; + } + + auto gY_ = gY.contiguous(); + AT_DISPATCH_FLOATING_TYPES(gY.scalar_type(), "SwapAlign2Nat_backward", [&] { + SwapAlign2NatBackwardFeat<<>>( + gY.numel(), + gY_.data_ptr(), + Vout, + Uout, + hVout, + hUout, + Vin, + Uin, + static_cast(lambda_val), + height, + width, + Hout, + Wout, + gX.data_ptr()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return gX; +} + +} // namespace tensormask diff --git a/data_processing/detectron2/projects/TensorMask/tensormask/layers/csrc/vision.cpp b/data_processing/detectron2/projects/TensorMask/tensormask/layers/csrc/vision.cpp new file mode 100644 index 0000000..ed1ed0b --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/tensormask/layers/csrc/vision.cpp @@ -0,0 +1,19 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#include +#include "SwapAlign2Nat/SwapAlign2Nat.h" + +namespace tensormask { + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def( + "swap_align2nat_forward", + &SwapAlign2Nat_forward, + "SwapAlign2Nat_forward"); + m.def( + "swap_align2nat_backward", + &SwapAlign2Nat_backward, + "SwapAlign2Nat_backward"); +} + +} // namespace tensormask diff --git a/data_processing/detectron2/projects/TensorMask/tensormask/layers/swap_align2nat.py b/data_processing/detectron2/projects/TensorMask/tensormask/layers/swap_align2nat.py new file mode 100644 index 0000000..2b5e450 --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/tensormask/layers/swap_align2nat.py @@ -0,0 +1,61 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +from tensormask import _C + + +class _SwapAlign2Nat(Function): + @staticmethod + def forward(ctx, X, lambda_val, pad_val): + ctx.lambda_val = lambda_val + ctx.input_shape = X.size() + + Y = _C.swap_align2nat_forward(X, lambda_val, pad_val) + return Y + + @staticmethod + @once_differentiable + def backward(ctx, gY): + lambda_val = ctx.lambda_val + bs, ch, h, w = ctx.input_shape + + gX = _C.swap_align2nat_backward(gY, lambda_val, bs, ch, h, w) + + return gX, None, None + + +swap_align2nat = _SwapAlign2Nat.apply + + +class SwapAlign2Nat(nn.Module): + """ + The op `SwapAlign2Nat` described in https://arxiv.org/abs/1903.12174. + Given an input tensor that predicts masks of shape (N, C=VxU, H, W), + apply the op, it will return masks of shape (N, V'xU', H', W') where + the unit lengths of (V, U) and (H, W) are swapped, and the mask representation + is transformed from aligned to natural. + Args: + lambda_val (int): the relative unit length ratio between (V, U) and (H, W), + as we always have larger unit lengths for (V, U) than (H, W), + lambda_val is always >= 1. + pad_val (float): padding value for the values falling outside of the input + tensor, default set to -6 as sigmoid(-6) is ~0, indicating + that is no masks outside of the tensor. + """ + + def __init__(self, lambda_val, pad_val=-6.0): + super(SwapAlign2Nat, self).__init__() + self.lambda_val = lambda_val + self.pad_val = pad_val + + def forward(self, X): + return swap_align2nat(X, self.lambda_val, self.pad_val) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "lambda_val=" + str(self.lambda_val) + tmpstr += ", pad_val=" + str(self.pad_val) + tmpstr += ")" + return tmpstr diff --git a/data_processing/detectron2/projects/TensorMask/tests/__init__.py b/data_processing/detectron2/projects/TensorMask/tests/__init__.py new file mode 100644 index 0000000..9020c2d --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/tests/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/data_processing/detectron2/projects/TensorMask/tests/test_swap_align2nat.py b/data_processing/detectron2/projects/TensorMask/tests/test_swap_align2nat.py new file mode 100644 index 0000000..d9ee273 --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/tests/test_swap_align2nat.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. + +import unittest +import torch +from torch.autograd import gradcheck + +from tensormask.layers.swap_align2nat import SwapAlign2Nat + + +class SwapAlign2NatTest(unittest.TestCase): + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_swap_align2nat_gradcheck_cuda(self): + dtype = torch.float64 + device = torch.device("cuda") + m = SwapAlign2Nat(2).to(dtype=dtype, device=device) + x = torch.rand(2, 4, 10, 10, dtype=dtype, device=device, requires_grad=True) + + self.assertTrue(gradcheck(m, x), "gradcheck failed for SwapAlign2Nat CUDA") + + def _swap_align2nat(self, tensor, lambda_val): + """ + The basic setup for testing Swap_Align + """ + op = SwapAlign2Nat(lambda_val, pad_val=0.0) + input = torch.from_numpy(tensor[None, :, :, :].astype("float32")) + output = op.forward(input.cuda()).cpu().numpy() + return output[0] + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/projects/TensorMask/train_net.py b/data_processing/detectron2/projects/TensorMask/train_net.py new file mode 100644 index 0000000..dc77a64 --- /dev/null +++ b/data_processing/detectron2/projects/TensorMask/train_net.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +TensorMask Training Script. + +This script is a simplified version of the training script in detectron2/tools. +""" + +import os + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import COCOEvaluator, verify_results + +from tensormask import add_tensormask_config + + +class Trainer(DefaultTrainer): + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + return COCOEvaluator(dataset_name, output_dir=output_folder) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_tensormask_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/data_processing/detectron2/projects/TridentNet/README.md b/data_processing/detectron2/projects/TridentNet/README.md new file mode 100644 index 0000000..4b7a901 --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/README.md @@ -0,0 +1,60 @@ + +# TridentNet in Detectron2 +**Scale-Aware Trident Networks for Object Detection** + +Yanghao Li\*, Yuntao Chen\*, Naiyan Wang, Zhaoxiang Zhang + +[[`TridentNet`](https://github.com/TuSimple/simpledet/tree/master/models/tridentnet)] [[`arXiv`](https://arxiv.org/abs/1901.01892)] [[`BibTeX`](#CitingTridentNet)] + +
+ +
+ +In this repository, we implement TridentNet-Fast in Detectron2. +Trident Network (TridentNet) aims to generate scale-specific feature maps with a uniform representational power. We construct a parallel multi-branch architecture in which each branch shares the same transformation parameters but with different receptive fields. TridentNet-Fast is a fast approximation version of TridentNet that could achieve significant improvements without any additional parameters and computational cost. + +## Training + +To train a model, run +```bash +python /path/to/detectron2/projects/TridentNet/train_net.py --config-file +``` + +For example, to launch end-to-end TridentNet training with ResNet-50 backbone on 8 GPUs, +one should execute: +```bash +python /path/to/detectron2/projects/TridentNet/train_net.py --config-file configs/tridentnet_fast_R_50_C4_1x.yaml --num-gpus 8 +``` + +## Evaluation + +Model evaluation can be done similarly: +```bash +python /path/to/detectron2/projects/TridentNet/train_net.py --config-file configs/tridentnet_fast_R_50_C4_1x.yaml --eval-only MODEL.WEIGHTS model.pth +``` + +## Results on MS-COCO in Detectron2 + +|Model|Backbone|Head|lr sched|AP|AP50|AP75|APs|APm|APl|download| +|-----|--------|----|--------|--|----|----|---|---|---|--------| +|Faster|R50-C4|C5-512ROI|1X|35.7|56.1|38.0|19.2|40.9|48.7|model \| metrics| +|TridentFast|R50-C4|C5-128ROI|1X|38.0|58.1|40.8|19.5|42.2|54.6|model \| metrics| +|Faster|R50-C4|C5-512ROI|3X|38.4|58.7|41.3|20.7|42.7|53.1|model \| metrics| +|TridentFast|R50-C4|C5-128ROI|3X|40.6|60.8|43.6|23.4|44.7|57.1|model \| metrics| +|Faster|R101-C4|C5-512ROI|3X|41.1|61.4|44.0|22.2|45.5|55.9|model \| metrics| +|TridentFast|R101-C4|C5-128ROI|3X|43.6|63.4|47.0|24.3|47.8|60.0|model \| metrics| + + +## Citing TridentNet + +If you use TridentNet, please use the following BibTeX entry. + +``` +@InProceedings{li2019scale, + title={Scale-Aware Trident Networks for Object Detection}, + author={Li, Yanghao and Chen, Yuntao and Wang, Naiyan and Zhang, Zhaoxiang}, + journal={The International Conference on Computer Vision (ICCV)}, + year={2019} +} +``` + diff --git a/data_processing/detectron2/projects/TridentNet/configs/Base-TridentNet-Fast-C4.yaml b/data_processing/detectron2/projects/TridentNet/configs/Base-TridentNet-Fast-C4.yaml new file mode 100644 index 0000000..8c3d807 --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/configs/Base-TridentNet-Fast-C4.yaml @@ -0,0 +1,29 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_trident_resnet_backbone" + ROI_HEADS: + NAME: "TridentRes5ROIHeads" + POSITIVE_FRACTION: 0.5 + BATCH_SIZE_PER_IMAGE: 128 + PROPOSAL_APPEND_GT: False + PROPOSAL_GENERATOR: + NAME: "TridentRPN" + RPN: + POST_NMS_TOPK_TRAIN: 500 + TRIDENT: + NUM_BRANCH: 3 + BRANCH_DILATIONS: [1, 2, 3] + TEST_BRANCH_IDX: 1 + TRIDENT_STAGE: "res4" +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/data_processing/detectron2/projects/TridentNet/configs/tridentnet_fast_R_101_C4_3x.yaml b/data_processing/detectron2/projects/TridentNet/configs/tridentnet_fast_R_101_C4_3x.yaml new file mode 100644 index 0000000..bc83c2f --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/configs/tridentnet_fast_R_101_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "Base-TridentNet-Fast-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml b/data_processing/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml new file mode 100644 index 0000000..fda2cb6 --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "Base-TridentNet-Fast-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/data_processing/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_3x.yaml b/data_processing/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_3x.yaml new file mode 100644 index 0000000..ebf89d0 --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "Base-TridentNet-Fast-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/data_processing/detectron2/projects/TridentNet/train_net.py b/data_processing/detectron2/projects/TridentNet/train_net.py new file mode 100644 index 0000000..143289a --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/train_net.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +TridentNet Training Script. + +This script is a simplified version of the training script in detectron2/tools. +""" + +import os + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import COCOEvaluator + +from tridentnet import add_tridentnet_config + + +class Trainer(DefaultTrainer): + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + return COCOEvaluator(dataset_name, output_dir=output_folder) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_tridentnet_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/data_processing/detectron2/projects/TridentNet/tridentnet/__init__.py b/data_processing/detectron2/projects/TridentNet/tridentnet/__init__.py new file mode 100644 index 0000000..abaa957 --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/tridentnet/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .config import add_tridentnet_config +from .trident_backbone import ( + TridentBottleneckBlock, + build_trident_resnet_backbone, + make_trident_stage, +) +from .trident_rpn import TridentRPN +from .trident_rcnn import TridentRes5ROIHeads, TridentStandardROIHeads diff --git a/data_processing/detectron2/projects/TridentNet/tridentnet/config.py b/data_processing/detectron2/projects/TridentNet/tridentnet/config.py new file mode 100644 index 0000000..4b8732a --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/tridentnet/config.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +from detectron2.config import CfgNode as CN + + +def add_tridentnet_config(cfg): + """ + Add config for tridentnet. + """ + _C = cfg + + _C.MODEL.TRIDENT = CN() + + # Number of branches for TridentNet. + _C.MODEL.TRIDENT.NUM_BRANCH = 3 + # Specify the dilations for each branch. + _C.MODEL.TRIDENT.BRANCH_DILATIONS = [1, 2, 3] + # Specify the stage for applying trident blocks. Default stage is Res4 according to the + # TridentNet paper. + _C.MODEL.TRIDENT.TRIDENT_STAGE = "res4" + # Specify the test branch index TridentNet Fast inference: + # - use -1 to aggregate results of all branches during inference. + # - otherwise, only using specified branch for fast inference. Recommended setting is + # to use the middle branch. + _C.MODEL.TRIDENT.TEST_BRANCH_IDX = 1 diff --git a/data_processing/detectron2/projects/TridentNet/tridentnet/trident_backbone.py b/data_processing/detectron2/projects/TridentNet/tridentnet/trident_backbone.py new file mode 100644 index 0000000..7789bd2 --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/tridentnet/trident_backbone.py @@ -0,0 +1,220 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn.functional as F + +from detectron2.layers import Conv2d, FrozenBatchNorm2d, get_norm +from detectron2.modeling import BACKBONE_REGISTRY, ResNet, ResNetBlockBase +from detectron2.modeling.backbone.resnet import BasicStem, BottleneckBlock, DeformBottleneckBlock + +from .trident_conv import TridentConv + +__all__ = ["TridentBottleneckBlock", "make_trident_stage", "build_trident_resnet_backbone"] + + +class TridentBottleneckBlock(ResNetBlockBase): + def __init__( + self, + in_channels, + out_channels, + *, + bottleneck_channels, + stride=1, + num_groups=1, + norm="BN", + stride_in_1x1=False, + num_branch=3, + dilations=(1, 2, 3), + concat_output=False, + test_branch_idx=-1, + ): + """ + Args: + num_branch (int): the number of branches in TridentNet. + dilations (tuple): the dilations of multiple branches in TridentNet. + concat_output (bool): if concatenate outputs of multiple branches in TridentNet. + Use 'True' for the last trident block. + """ + super().__init__(in_channels, out_channels, stride) + + assert num_branch == len(dilations) + + self.num_branch = num_branch + self.concat_output = concat_output + self.test_branch_idx = test_branch_idx + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv2 = TridentConv( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + paddings=dilations, + bias=False, + groups=num_groups, + dilations=dilations, + num_branch=num_branch, + test_branch_idx=test_branch_idx, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv3 = Conv2d( + bottleneck_channels, + out_channels, + kernel_size=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + def forward(self, x): + num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1 + if not isinstance(x, list): + x = [x] * num_branch + out = [self.conv1(b) for b in x] + out = [F.relu_(b) for b in out] + + out = self.conv2(out) + out = [F.relu_(b) for b in out] + + out = [self.conv3(b) for b in out] + + if self.shortcut is not None: + shortcut = [self.shortcut(b) for b in x] + else: + shortcut = x + + out = [out_b + shortcut_b for out_b, shortcut_b in zip(out, shortcut)] + out = [F.relu_(b) for b in out] + if self.concat_output: + out = torch.cat(out) + return out + + +def make_trident_stage(block_class, num_blocks, **kwargs): + """ + Create a resnet stage by creating many blocks for TridentNet. + """ + concat_output = [False] * (num_blocks - 1) + [True] + kwargs["concat_output_per_block"] = concat_output + return ResNet.make_stage(block_class, num_blocks, **kwargs) + + +@BACKBONE_REGISTRY.register() +def build_trident_resnet_backbone(cfg, input_shape): + """ + Create a ResNet instance from config for TridentNet. + + Returns: + ResNet: a :class:`ResNet` instance. + """ + # need registration of new blocks/stems? + norm = cfg.MODEL.RESNETS.NORM + stem = BasicStem( + in_channels=input_shape.channels, + out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, + norm=norm, + ) + freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT + + if freeze_at >= 1: + for p in stem.parameters(): + p.requires_grad = False + stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem) + + # fmt: off + out_features = cfg.MODEL.RESNETS.OUT_FEATURES + depth = cfg.MODEL.RESNETS.DEPTH + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group + in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION + deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE + deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED + deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS + num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + branch_dilations = cfg.MODEL.TRIDENT.BRANCH_DILATIONS + trident_stage = cfg.MODEL.TRIDENT.TRIDENT_STAGE + test_branch_idx = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX + # fmt: on + assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) + + num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] + + stages = [] + + res_stage_idx = {"res2": 2, "res3": 3, "res4": 4, "res5": 5} + out_stage_idx = [res_stage_idx[f] for f in out_features] + trident_stage_idx = res_stage_idx[trident_stage] + max_stage_idx = max(out_stage_idx) + for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): + dilation = res5_dilation if stage_idx == 5 else 1 + first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 + stage_kargs = { + "num_blocks": num_blocks_per_stage[idx], + "stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1), + "in_channels": in_channels, + "bottleneck_channels": bottleneck_channels, + "out_channels": out_channels, + "num_groups": num_groups, + "norm": norm, + "stride_in_1x1": stride_in_1x1, + "dilation": dilation, + } + if stage_idx == trident_stage_idx: + assert not deform_on_per_stage[ + idx + ], "Not support deformable conv in Trident blocks yet." + stage_kargs["block_class"] = TridentBottleneckBlock + stage_kargs["num_branch"] = num_branch + stage_kargs["dilations"] = branch_dilations + stage_kargs["test_branch_idx"] = test_branch_idx + stage_kargs.pop("dilation") + elif deform_on_per_stage[idx]: + stage_kargs["block_class"] = DeformBottleneckBlock + stage_kargs["deform_modulated"] = deform_modulated + stage_kargs["deform_num_groups"] = deform_num_groups + else: + stage_kargs["block_class"] = BottleneckBlock + blocks = ( + make_trident_stage(**stage_kargs) + if stage_idx == trident_stage_idx + else ResNet.make_stage(**stage_kargs) + ) + in_channels = out_channels + out_channels *= 2 + bottleneck_channels *= 2 + + if freeze_at >= stage_idx: + for block in blocks: + block.freeze() + stages.append(blocks) + return ResNet(stem, stages, out_features=out_features) diff --git a/data_processing/detectron2/projects/TridentNet/tridentnet/trident_conv.py b/data_processing/detectron2/projects/TridentNet/tridentnet/trident_conv.py new file mode 100644 index 0000000..18d5b0b --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/tridentnet/trident_conv.py @@ -0,0 +1,107 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn.modules.utils import _pair + +from detectron2.layers.wrappers import _NewEmptyTensorOp + + +class TridentConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + paddings=0, + dilations=1, + groups=1, + num_branch=1, + test_branch_idx=-1, + bias=False, + norm=None, + activation=None, + ): + super(TridentConv, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.num_branch = num_branch + self.stride = _pair(stride) + self.groups = groups + self.with_bias = bias + if isinstance(paddings, int): + paddings = [paddings] * self.num_branch + if isinstance(dilations, int): + dilations = [dilations] * self.num_branch + self.paddings = [_pair(padding) for padding in paddings] + self.dilations = [_pair(dilation) for dilation in dilations] + self.test_branch_idx = test_branch_idx + self.norm = norm + self.activation = activation + + assert len({self.num_branch, len(self.paddings), len(self.dilations)}) == 1 + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) + ) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + if self.bias is not None: + nn.init.constant_(self.bias, 0) + + def forward(self, inputs): + num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1 + assert len(inputs) == num_branch + + if inputs[0].numel() == 0: + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + inputs[0].shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [input[0].shape[0], self.weight.shape[0]] + output_shape + return [_NewEmptyTensorOp.apply(input, output_shape) for input in inputs] + + if self.training or self.test_branch_idx == -1: + outputs = [ + F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation, self.groups) + for input, dilation, padding in zip(inputs, self.dilations, self.paddings) + ] + else: + outputs = [ + F.conv2d( + inputs[0], + self.weight, + self.bias, + self.stride, + self.paddings[self.test_branch_idx], + self.dilations[self.test_branch_idx], + self.groups, + ) + ] + + if self.norm is not None: + outputs = [self.norm(x) for x in outputs] + if self.activation is not None: + outputs = [self.activation(x) for x in outputs] + return outputs + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", num_branch=" + str(self.num_branch) + tmpstr += ", test_branch_idx=" + str(self.test_branch_idx) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", paddings=" + str(self.paddings) + tmpstr += ", dilations=" + str(self.dilations) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", bias=" + str(self.with_bias) + return tmpstr diff --git a/data_processing/detectron2/projects/TridentNet/tridentnet/trident_rcnn.py b/data_processing/detectron2/projects/TridentNet/tridentnet/trident_rcnn.py new file mode 100644 index 0000000..fc22c71 --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/tridentnet/trident_rcnn.py @@ -0,0 +1,116 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from detectron2.layers import batched_nms +from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads +from detectron2.modeling.roi_heads.roi_heads import Res5ROIHeads +from detectron2.structures import Instances + + +def merge_branch_instances(instances, num_branch, nms_thresh, topk_per_image): + """ + Merge detection results from different branches of TridentNet. + Return detection results by applying non-maximum suppression (NMS) on bounding boxes + and keep the unsuppressed boxes and other instances (e.g mask) if any. + + Args: + instances (list[Instances]): A list of N * num_branch instances that store detection + results. Contain N images and each image has num_branch instances. + num_branch (int): Number of branches used for merging detection results for each image. + nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. + topk_per_image (int): The number of top scoring detections to return. Set < 0 to return + all detections. + + Returns: + results: (list[Instances]): A list of N instances, one for each image in the batch, + that stores the topk most confidence detections after merging results from multiple + branches. + """ + if num_branch == 1: + return instances + + batch_size = len(instances) // num_branch + results = [] + for i in range(batch_size): + instance = Instances.cat([instances[i + batch_size * j] for j in range(num_branch)]) + + # Apply per-class NMS + keep = batched_nms( + instance.pred_boxes.tensor, instance.scores, instance.pred_classes, nms_thresh + ) + keep = keep[:topk_per_image] + result = instance[keep] + + results.append(result) + + return results + + +@ROI_HEADS_REGISTRY.register() +class TridentRes5ROIHeads(Res5ROIHeads): + """ + The TridentNet ROIHeads in a typical "C4" R-CNN model. + See :class:`Res5ROIHeads`. + """ + + def __init__(self, cfg, input_shape): + super().__init__(cfg, input_shape) + + self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1 + + def forward(self, images, features, proposals, targets=None): + """ + See :class:`Res5ROIHeads.forward`. + """ + num_branch = self.num_branch if self.training or not self.trident_fast else 1 + all_targets = targets * num_branch if targets is not None else None + pred_instances, losses = super().forward(images, features, proposals, all_targets) + del images, all_targets, targets + + if self.training: + return pred_instances, losses + else: + pred_instances = merge_branch_instances( + pred_instances, + num_branch, + self.box_predictor.test_nms_thresh, + self.box_predictor.test_topk_per_image, + ) + + return pred_instances, {} + + +@ROI_HEADS_REGISTRY.register() +class TridentStandardROIHeads(StandardROIHeads): + """ + The `StandardROIHeads` for TridentNet. + See :class:`StandardROIHeads`. + """ + + def __init__(self, cfg, input_shape): + super(TridentStandardROIHeads, self).__init__(cfg, input_shape) + + self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1 + + def forward(self, images, features, proposals, targets=None): + """ + See :class:`Res5ROIHeads.forward`. + """ + # Use 1 branch if using trident_fast during inference. + num_branch = self.num_branch if self.training or not self.trident_fast else 1 + # Duplicate targets for all branches in TridentNet. + all_targets = targets * num_branch if targets is not None else None + pred_instances, losses = super().forward(images, features, proposals, all_targets) + del images, all_targets, targets + + if self.training: + return pred_instances, losses + else: + pred_instances = merge_branch_instances( + pred_instances, + num_branch, + self.box_predictor.test_nms_thresh, + self.box_predictor.test_topk_per_image, + ) + + return pred_instances, {} diff --git a/data_processing/detectron2/projects/TridentNet/tridentnet/trident_rpn.py b/data_processing/detectron2/projects/TridentNet/tridentnet/trident_rpn.py new file mode 100644 index 0000000..f95fbbf --- /dev/null +++ b/data_processing/detectron2/projects/TridentNet/tridentnet/trident_rpn.py @@ -0,0 +1,32 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch + +from detectron2.modeling import PROPOSAL_GENERATOR_REGISTRY +from detectron2.modeling.proposal_generator.rpn import RPN +from detectron2.structures import ImageList + + +@PROPOSAL_GENERATOR_REGISTRY.register() +class TridentRPN(RPN): + """ + Trident RPN subnetwork. + """ + + def __init__(self, cfg, input_shape): + super(TridentRPN, self).__init__(cfg, input_shape) + + self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1 + + def forward(self, images, features, gt_instances=None): + """ + See :class:`RPN.forward`. + """ + num_branch = self.num_branch if self.training or not self.trident_fast else 1 + # Duplicate images and gt_instances for all branches in TridentNet. + all_images = ImageList( + torch.cat([images.tensor] * num_branch), images.image_sizes * num_branch + ) + all_gt_instances = gt_instances * num_branch if gt_instances is not None else None + + return super(TridentRPN, self).forward(all_images, features, all_gt_instances) diff --git a/data_processing/detectron2/projects/ViTDet/README.md b/data_processing/detectron2/projects/ViTDet/README.md new file mode 100644 index 0000000..0a525e0 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/README.md @@ -0,0 +1,364 @@ +# ViTDet: Exploring Plain Vision Transformer Backbones for Object Detection + +Yanghao Li, Hanzi Mao, Ross Girshick†, Kaiming He† + +[[`arXiv`](https://arxiv.org/abs/2203.16527)] [[`BibTeX`](#CitingViTDet)] + +In this repository, we provide configs and models in Detectron2 for ViTDet as well as MViTv2 and Swin backbones with our implementation and settings as described in [ViTDet](https://arxiv.org/abs/2203.16527) paper. + + +## Pretrained Models + +### COCO + +#### Mask R-CNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namepre-traintrain
time
(s/im)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
ViTDet, ViT-BIN1K, MAE0.3140.07910.951.645.9325346929model
ViTDet, ViT-LIN1K, MAE0.6030.12520.955.549.2325599698model
ViTDet, ViT-HIN1K, MAE1.0980.17831.556.750.2329145471model
+ +#### Cascade Mask R-CNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namepre-traintrain
time
(s/im)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
Swin-BIN21K, sup0.3890.0778.753.946.2342979038model
Swin-LIN21K, sup0.5080.09712.655.047.2342979186model
MViTv2-BIN21K, sup0.4750.0908.955.648.1325820315model
MViTv2-LIN21K, sup0.8440.15719.755.748.3325607715model
MViTv2-HIN21K, sup1.6550.28518.4*55.948.3326187358model
ViTDet, ViT-BIN1K, MAE0.3620.08912.354.046.7325358525model
ViTDet, ViT-LIN1K, MAE0.6430.14222.357.650.0328021305model
ViTDet, ViT-HIN1K, MAE1.1370.19632.958.751.0328730692model
+ + +### LVIS + +#### Mask R-CNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namepre-traintrain
time
(s/im)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
ViTDet, ViT-BIN1K, MAE0.3170.08514.440.238.2329225748model
ViTDet, ViT-LIN1K, MAE0.5760.13724.746.143.6329211570model
ViTDet, ViT-HIN1K, MAE1.0590.18635.349.146.0332434656model
+ +#### Cascade Mask R-CNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namepre-traintrain
time
(s/im)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
Swin-BIN21K, sup0.3680.09011.544.039.6329222304model
Swin-LIN21K, sup0.4860.10513.846.041.4329222724model
MViTv2-BIN21K, sup0.4750.10011.846.342.0329477206model
MViTv2-LIN21K, sup0.8440.17221.049.444.2329661552model
MViTv2-HIN21K, sup1.6610.29021.3*49.544.1330445165model
ViTDet, ViT-BIN1K, MAE0.3560.09915.243.038.9329226874model
ViTDet, ViT-LIN1K, MAE0.6290.15024.949.244.5329042206model
ViTDet, ViT-HIN1K, MAE1.1000.20435.551.546.6332552778model
+ +Note: Unlike the system-level comparisons in the paper, these models use a lower resolution (1024 instead of 1280) and standard NMS (instead of soft NMS). As a result, they have slightly lower box and mask AP. + +We observed higher variance on LVIS evalution results compared to COCO. For example, the standard deviations of box AP and mask AP were 0.30% (compared to 0.10% on COCO) when we trained ViTDet, ViT-B five times with varying random seeds. + +The above models were trained and measured on 8-node with 64 NVIDIA A100 GPUs in total. *: Activation checkpointing is used. + + +## Training +All configs can be trained with: + +``` +../../tools/lazyconfig_train_net.py --config-file configs/path/to/config.py +``` +By default, we use 64 GPUs with batch size as 64 for training. + +## Evaluation +Model evaluation can be done similarly: +``` +../../tools/lazyconfig_train_net.py --config-file configs/path/to/config.py --eval-only train.init_checkpoint=/path/to/model_checkpoint +``` + + +## Citing ViTDet + +If you use ViTDet, please use the following BibTeX entry. + +```BibTeX +@article{li2022exploring, + title={Exploring plain vision transformer backbones for object detection}, + author={Li, Yanghao and Mao, Hanzi and Girshick, Ross and He, Kaiming}, + journal={arXiv preprint arXiv:2203.16527}, + year={2022} +} +``` diff --git a/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_b_in21k_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_b_in21k_100ep.py new file mode 100644 index 0000000..9dba203 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_b_in21k_100ep.py @@ -0,0 +1,95 @@ +from functools import partial +import torch.nn as nn +from fvcore.common.param_scheduler import MultiStepParamScheduler + +from detectron2 import model_zoo +from detectron2.config import LazyCall as L +from detectron2.solver import WarmupParamScheduler +from detectron2.modeling import MViT +from detectron2.layers import ShapeSpec +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.matcher import Matcher +from detectron2.modeling.roi_heads import ( + FastRCNNOutputLayers, + FastRCNNConvFCHead, + CascadeROIHeads, +) + +from ..common.coco_loader_lsj import dataloader + +model = model_zoo.get_config("common/models/mask_rcnn_fpn.py").model +constants = model_zoo.get_config("common/data/constants.py").constants +model.pixel_mean = constants.imagenet_rgb256_mean +model.pixel_std = constants.imagenet_rgb256_std +model.input_format = "RGB" +model.backbone.bottom_up = L(MViT)( + embed_dim=96, + depth=24, + num_heads=1, + last_block_indexes=(1, 4, 20, 23), + residual_pooling=True, + drop_path_rate=0.4, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + out_features=("scale2", "scale3", "scale4", "scale5"), +) +model.backbone.in_features = "${.bottom_up.out_features}" +model.backbone.square_pad = 1024 + +# New heads and LN +model.backbone.norm = "LN" # Use LN in FPN +model.roi_heads.box_head.conv_norm = model.roi_heads.mask_head.conv_norm = "LN" + +# 2conv in RPN: +model.proposal_generator.head.conv_dims = [-1, -1] + +# arguments that don't exist for Cascade R-CNN +[model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]] +model.roi_heads.update( + _target_=CascadeROIHeads, + box_heads=[ + L(FastRCNNConvFCHead)( + input_shape=ShapeSpec(channels=256, height=7, width=7), + conv_dims=[256, 256, 256, 256], + fc_dims=[1024], + conv_norm="LN", + ) + for _ in range(3) + ], + box_predictors=[ + L(FastRCNNOutputLayers)( + input_shape=ShapeSpec(channels=1024), + test_score_thresh=0.05, + box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)), + cls_agnostic_bbox_reg=True, + num_classes="${...num_classes}", + ) + for (w1, w2) in [(10, 5), (20, 10), (30, 15)] + ], + proposal_matchers=[ + L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False) + for th in [0.5, 0.6, 0.7] + ], +) + +# Initialization and trainer settings +train = model_zoo.get_config("common/train.py").train +train.amp.enabled = True +train.ddp.fp16_compression = True +train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_B_in21k.pyth" + +# Schedule +# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep +train.max_iter = 184375 +lr_multiplier = L(WarmupParamScheduler)( + scheduler=L(MultiStepParamScheduler)( + values=[1.0, 0.1, 0.01], + milestones=[163889, 177546], + num_updates=train.max_iter, + ), + warmup_length=250 / train.max_iter, + warmup_factor=0.001, +) + +optimizer = model_zoo.get_config("common/optim.py").AdamW +optimizer.params.overrides = {"pos_embed": {"weight_decay": 0.0}} +optimizer.lr = 8e-5 diff --git a/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_h_in21k_36ep.py b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_h_in21k_36ep.py new file mode 100644 index 0000000..5770450 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_h_in21k_36ep.py @@ -0,0 +1,39 @@ +from fvcore.common.param_scheduler import MultiStepParamScheduler + +from detectron2.config import LazyCall as L +from detectron2.solver import WarmupParamScheduler + +from .cascade_mask_rcnn_mvitv2_b_in21k_100ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, +) + +model.backbone.bottom_up.embed_dim = 192 +model.backbone.bottom_up.depth = 80 +model.backbone.bottom_up.num_heads = 3 +model.backbone.bottom_up.last_block_indexes = (3, 11, 71, 79) +model.backbone.bottom_up.drop_path_rate = 0.6 +model.backbone.bottom_up.use_act_checkpoint = True + + +train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_H_in21k.pyth" + + +# 36 epochs +train.max_iter = 67500 +lr_multiplier = L(WarmupParamScheduler)( + scheduler=L(MultiStepParamScheduler)( + values=[1.0, 0.1, 0.01], + milestones=[ + 52500, + 62500, + 67500, + ], + ), + warmup_length=250 / train.max_iter, + warmup_factor=0.001, +) +optimizer.lr = 1.6e-4 diff --git a/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_l_in21k_50ep.py b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_l_in21k_50ep.py new file mode 100644 index 0000000..c64f0c1 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_mvitv2_l_in21k_50ep.py @@ -0,0 +1,22 @@ +from .cascade_mask_rcnn_mvitv2_b_in21k_100ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, +) + +model.backbone.bottom_up.embed_dim = 144 +model.backbone.bottom_up.depth = 48 +model.backbone.bottom_up.num_heads = 2 +model.backbone.bottom_up.last_block_indexes = (1, 7, 43, 47) +model.backbone.bottom_up.drop_path_rate = 0.5 + + +train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_L_in21k.pyth" + +train.max_iter = train.max_iter // 2 # 100ep -> 50ep +lr_multiplier.scheduler.milestones = [ + milestone // 2 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_swin_b_in21k_50ep.py b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_swin_b_in21k_50ep.py new file mode 100644 index 0000000..b2aad98 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_swin_b_in21k_50ep.py @@ -0,0 +1,50 @@ +from fvcore.common.param_scheduler import MultiStepParamScheduler + +from detectron2 import model_zoo +from detectron2.config import LazyCall as L +from detectron2.solver import WarmupParamScheduler +from detectron2.modeling import SwinTransformer + +from ..common.coco_loader_lsj import dataloader +from .cascade_mask_rcnn_mvitv2_b_in21k_100ep import model + +model.backbone.bottom_up = L(SwinTransformer)( + depths=[2, 2, 18, 2], + drop_path_rate=0.4, + embed_dim=128, + num_heads=[4, 8, 16, 32], +) +model.backbone.in_features = ("p0", "p1", "p2", "p3") +model.backbone.square_pad = 1024 + +# Initialization and trainer settings +train = model_zoo.get_config("common/train.py").train +train.amp.enabled = True +train.ddp.fp16_compression = True +train.init_checkpoint = "detectron2://ImageNetPretrained/swin/swin_base_patch4_window7_224_22k.pth" + +# Schedule +# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep +train.max_iter = 184375 +lr_multiplier = L(WarmupParamScheduler)( + scheduler=L(MultiStepParamScheduler)( + values=[1.0, 0.1, 0.01], + milestones=[163889, 177546], + num_updates=train.max_iter, + ), + warmup_length=250 / train.max_iter, + warmup_factor=0.001, +) + +# Rescale schedule +train.max_iter = train.max_iter // 2 # 100ep -> 50ep +lr_multiplier.scheduler.milestones = [ + milestone // 2 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter + + +optimizer = model_zoo.get_config("common/optim.py").AdamW +optimizer.lr = 4e-5 +optimizer.weight_decay = 0.05 +optimizer.params.overrides = {"relative_position_bias_table": {"weight_decay": 0.0}} diff --git a/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_swin_l_in21k_50ep.py b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_swin_l_in21k_50ep.py new file mode 100644 index 0000000..60bc917 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_swin_l_in21k_50ep.py @@ -0,0 +1,15 @@ +from .cascade_mask_rcnn_swin_b_in21k_50ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, +) + +model.backbone.bottom_up.depths = [2, 2, 18, 2] +model.backbone.bottom_up.drop_path_rate = 0.4 +model.backbone.bottom_up.embed_dim = 192 +model.backbone.bottom_up.num_heads = [6, 12, 24, 48] + + +train.init_checkpoint = "detectron2://ImageNetPretrained/swin/swin_large_patch4_window7_224_22k.pth" diff --git a/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_vitdet_b_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_vitdet_b_100ep.py new file mode 100644 index 0000000..95823ef --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_vitdet_b_100ep.py @@ -0,0 +1,48 @@ +from detectron2.config import LazyCall as L +from detectron2.layers import ShapeSpec +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.matcher import Matcher +from detectron2.modeling.roi_heads import ( + FastRCNNOutputLayers, + FastRCNNConvFCHead, + CascadeROIHeads, +) + +from .mask_rcnn_vitdet_b_100ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, + get_vit_lr_decay_rate, +) + +# arguments that don't exist for Cascade R-CNN +[model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]] + +model.roi_heads.update( + _target_=CascadeROIHeads, + box_heads=[ + L(FastRCNNConvFCHead)( + input_shape=ShapeSpec(channels=256, height=7, width=7), + conv_dims=[256, 256, 256, 256], + fc_dims=[1024], + conv_norm="LN", + ) + for _ in range(3) + ], + box_predictors=[ + L(FastRCNNOutputLayers)( + input_shape=ShapeSpec(channels=1024), + test_score_thresh=0.05, + box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)), + cls_agnostic_bbox_reg=True, + num_classes="${...num_classes}", + ) + for (w1, w2) in [(10, 5), (20, 10), (30, 15)] + ], + proposal_matchers=[ + L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False) + for th in [0.5, 0.6, 0.7] + ], +) diff --git a/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_vitdet_h_75ep.py b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_vitdet_h_75ep.py new file mode 100644 index 0000000..e508a68 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_vitdet_h_75ep.py @@ -0,0 +1,33 @@ +from functools import partial + +from .cascade_mask_rcnn_vitdet_b_100ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, + get_vit_lr_decay_rate, +) + +train.init_checkpoint = ( + "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_huge_p14to16.pth?matching_heuristics=True" +) + +model.backbone.net.embed_dim = 1280 +model.backbone.net.depth = 32 +model.backbone.net.num_heads = 16 +model.backbone.net.drop_path_rate = 0.5 +# 7, 15, 23, 31 for global attention +model.backbone.net.window_block_indexes = ( + list(range(0, 7)) + list(range(8, 15)) + list(range(16, 23)) + list(range(24, 31)) +) + +optimizer.params.lr_factor_func = partial(get_vit_lr_decay_rate, lr_decay_rate=0.9, num_layers=32) +optimizer.params.overrides = {} +optimizer.params.weight_decay_norm = None + +train.max_iter = train.max_iter * 3 // 4 # 100ep -> 75ep +lr_multiplier.scheduler.milestones = [ + milestone * 3 // 4 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_vitdet_l_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_vitdet_l_100ep.py new file mode 100644 index 0000000..2743603 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/COCO/cascade_mask_rcnn_vitdet_l_100ep.py @@ -0,0 +1,25 @@ +from functools import partial + +from .cascade_mask_rcnn_vitdet_b_100ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, + get_vit_lr_decay_rate, +) + +train.init_checkpoint = ( + "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_large.pth?matching_heuristics=True" +) + +model.backbone.net.embed_dim = 1024 +model.backbone.net.depth = 24 +model.backbone.net.num_heads = 16 +model.backbone.net.drop_path_rate = 0.4 +# 5, 11, 17, 23 for global attention +model.backbone.net.window_block_indexes = ( + list(range(0, 5)) + list(range(6, 11)) + list(range(12, 17)) + list(range(18, 23)) +) + +optimizer.params.lr_factor_func = partial(get_vit_lr_decay_rate, lr_decay_rate=0.8, num_layers=24) diff --git a/data_processing/detectron2/projects/ViTDet/configs/COCO/mask_rcnn_vitdet_b_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/COCO/mask_rcnn_vitdet_b_100ep.py new file mode 100644 index 0000000..8fd36e9 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/COCO/mask_rcnn_vitdet_b_100ep.py @@ -0,0 +1,40 @@ +from functools import partial +from fvcore.common.param_scheduler import MultiStepParamScheduler + +from detectron2 import model_zoo +from detectron2.config import LazyCall as L +from detectron2.solver import WarmupParamScheduler +from detectron2.modeling.backbone.vit import get_vit_lr_decay_rate + +from ..common.coco_loader_lsj import dataloader + + +model = model_zoo.get_config("common/models/mask_rcnn_vitdet.py").model + +# Initialization and trainer settings +train = model_zoo.get_config("common/train.py").train +train.amp.enabled = True +train.ddp.fp16_compression = True +train.init_checkpoint = ( + "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_base.pth?matching_heuristics=True" +) + + +# Schedule +# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep +train.max_iter = 184375 + +lr_multiplier = L(WarmupParamScheduler)( + scheduler=L(MultiStepParamScheduler)( + values=[1.0, 0.1, 0.01], + milestones=[163889, 177546], + num_updates=train.max_iter, + ), + warmup_length=250 / train.max_iter, + warmup_factor=0.001, +) + +# Optimizer +optimizer = model_zoo.get_config("common/optim.py").AdamW +optimizer.params.lr_factor_func = partial(get_vit_lr_decay_rate, num_layers=12, lr_decay_rate=0.7) +optimizer.params.overrides = {"pos_embed": {"weight_decay": 0.0}} diff --git a/data_processing/detectron2/projects/ViTDet/configs/COCO/mask_rcnn_vitdet_h_75ep.py b/data_processing/detectron2/projects/ViTDet/configs/COCO/mask_rcnn_vitdet_h_75ep.py new file mode 100644 index 0000000..7de96f0 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/COCO/mask_rcnn_vitdet_h_75ep.py @@ -0,0 +1,33 @@ +from functools import partial + +from .mask_rcnn_vitdet_b_100ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, + get_vit_lr_decay_rate, +) + +train.init_checkpoint = ( + "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_huge_p14to16.pth?matching_heuristics=True" +) + +model.backbone.net.embed_dim = 1280 +model.backbone.net.depth = 32 +model.backbone.net.num_heads = 16 +model.backbone.net.drop_path_rate = 0.5 +# 7, 15, 23, 31 for global attention +model.backbone.net.window_block_indexes = ( + list(range(0, 7)) + list(range(8, 15)) + list(range(16, 23)) + list(range(24, 31)) +) + +optimizer.params.lr_factor_func = partial(get_vit_lr_decay_rate, lr_decay_rate=0.9, num_layers=32) +optimizer.params.overrides = {} +optimizer.params.weight_decay_norm = None + +train.max_iter = train.max_iter * 3 // 4 # 100ep -> 75ep +lr_multiplier.scheduler.milestones = [ + milestone * 3 // 4 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/data_processing/detectron2/projects/ViTDet/configs/COCO/mask_rcnn_vitdet_l_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/COCO/mask_rcnn_vitdet_l_100ep.py new file mode 100644 index 0000000..0d193cb --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/COCO/mask_rcnn_vitdet_l_100ep.py @@ -0,0 +1,25 @@ +from functools import partial + +from .mask_rcnn_vitdet_b_100ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, + get_vit_lr_decay_rate, +) + +train.init_checkpoint = ( + "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_large.pth?matching_heuristics=True" +) + +model.backbone.net.embed_dim = 1024 +model.backbone.net.depth = 24 +model.backbone.net.num_heads = 16 +model.backbone.net.drop_path_rate = 0.4 +# 5, 11, 17, 23 for global attention +model.backbone.net.window_block_indexes = ( + list(range(0, 5)) + list(range(6, 11)) + list(range(12, 17)) + list(range(18, 23)) +) + +optimizer.params.lr_factor_func = partial(get_vit_lr_decay_rate, lr_decay_rate=0.8, num_layers=24) diff --git a/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_mvitv2_b_in21k_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_mvitv2_b_in21k_100ep.py new file mode 100644 index 0000000..1cf9c3e --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_mvitv2_b_in21k_100ep.py @@ -0,0 +1,48 @@ +from functools import partial +import torch.nn as nn + +from detectron2.config import LazyCall as L +from detectron2.data.detection_utils import get_fed_loss_cls_weights +from detectron2.data.samplers import RepeatFactorTrainingSampler +from detectron2.evaluation.lvis_evaluation import LVISEvaluator + +from ..COCO.cascade_mask_rcnn_mvitv2_b_in21k_100ep import ( + dataloader, + model, + train, + lr_multiplier, + optimizer, +) + +dataloader.train.dataset.names = "lvis_v1_train" +dataloader.train.sampler = L(RepeatFactorTrainingSampler)( + repeat_factors=L(RepeatFactorTrainingSampler.repeat_factors_from_category_frequency)( + dataset_dicts="${dataloader.train.dataset}", repeat_thresh=0.001 + ) +) +dataloader.test.dataset.names = "lvis_v1_val" +dataloader.evaluator = L(LVISEvaluator)( + dataset_name="${..test.dataset.names}", + max_dets_per_image=300, +) + +model.roi_heads.num_classes = 1203 +for i in range(3): + model.roi_heads.box_predictors[i].test_score_thresh = 0.02 + model.roi_heads.box_predictors[i].test_topk_per_image = 300 + model.roi_heads.box_predictors[i].use_sigmoid_ce = True + model.roi_heads.box_predictors[i].use_fed_loss = True + model.roi_heads.box_predictors[i].get_fed_loss_cls_weights = lambda: get_fed_loss_cls_weights( + dataloader.train.dataset.names, 0.5 + ) + +# Schedule +# 100 ep = 156250 iters * 64 images/iter / 100000 images/ep +train.max_iter = 156250 +train.eval_period = 30000 + +lr_multiplier.scheduler.milestones = [138889, 150463] +lr_multiplier.scheduler.num_updates = train.max_iter +lr_multiplier.warmup_length = 250 / train.max_iter + +optimizer.lr = 1e-4 diff --git a/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_mvitv2_h_in21k_50ep.py b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_mvitv2_h_in21k_50ep.py new file mode 100644 index 0000000..084444b --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_mvitv2_h_in21k_50ep.py @@ -0,0 +1,25 @@ +from .cascade_mask_rcnn_mvitv2_b_in21k_100ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, +) + +model.backbone.bottom_up.embed_dim = 192 +model.backbone.bottom_up.depth = 80 +model.backbone.bottom_up.num_heads = 3 +model.backbone.bottom_up.last_block_indexes = (3, 11, 71, 79) +model.backbone.bottom_up.drop_path_rate = 0.6 +model.backbone.bottom_up.use_act_checkpoint = True + +train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_H_in21k.pyth" + +train.max_iter = train.max_iter // 2 # 100ep -> 50ep +lr_multiplier.scheduler.milestones = [ + milestone // 2 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter +lr_multiplier.warmup_length = 250 / train.max_iter + +optimizer.lr = 2e-5 diff --git a/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_mvitv2_l_in21k_50ep.py b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_mvitv2_l_in21k_50ep.py new file mode 100644 index 0000000..779442c --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_mvitv2_l_in21k_50ep.py @@ -0,0 +1,24 @@ +from .cascade_mask_rcnn_mvitv2_b_in21k_100ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, +) + +model.backbone.bottom_up.embed_dim = 144 +model.backbone.bottom_up.depth = 48 +model.backbone.bottom_up.num_heads = 2 +model.backbone.bottom_up.last_block_indexes = (1, 7, 43, 47) +model.backbone.bottom_up.drop_path_rate = 0.5 + +train.init_checkpoint = "detectron2://ImageNetPretrained/mvitv2/MViTv2_L_in21k.pyth" + +train.max_iter = train.max_iter // 2 # 100ep -> 50ep +lr_multiplier.scheduler.milestones = [ + milestone // 2 for milestone in lr_multiplier.scheduler.milestones +] +lr_multiplier.scheduler.num_updates = train.max_iter +lr_multiplier.warmup_length = 250 / train.max_iter + +optimizer.lr = 4e-5 diff --git a/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_b_in21k_50ep.py b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_b_in21k_50ep.py new file mode 100644 index 0000000..d18c925 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_b_in21k_50ep.py @@ -0,0 +1,49 @@ +from detectron2.config.lazy import LazyCall as L +from detectron2.data.detection_utils import get_fed_loss_cls_weights +from detectron2.data.samplers import RepeatFactorTrainingSampler +from detectron2.evaluation.lvis_evaluation import LVISEvaluator + +from ..COCO.cascade_mask_rcnn_swin_b_in21k_50ep import ( + dataloader, + model, + train, + lr_multiplier, + optimizer, +) + +dataloader.train.dataset.names = "lvis_v1_train" +dataloader.train.sampler = L(RepeatFactorTrainingSampler)( + repeat_factors=L(RepeatFactorTrainingSampler.repeat_factors_from_category_frequency)( + dataset_dicts="${dataloader.train.dataset}", repeat_thresh=0.001 + ) +) +dataloader.test.dataset.names = "lvis_v1_val" +dataloader.evaluator = L(LVISEvaluator)( + dataset_name="${..test.dataset.names}", + max_dets_per_image=300, +) + +model.backbone.bottom_up.drop_path_rate = 0.3 + +model.roi_heads.num_classes = 1203 +for i in range(3): + model.roi_heads.box_predictors[i].test_score_thresh = 0.02 + model.roi_heads.box_predictors[i].test_topk_per_image = 300 + model.roi_heads.box_predictors[i].use_sigmoid_ce = True + model.roi_heads.box_predictors[i].use_fed_loss = True + model.roi_heads.box_predictors[i].get_fed_loss_cls_weights = lambda: get_fed_loss_cls_weights( + dataloader.train.dataset.names, 0.5 + ) + +# Schedule +# 100 ep = 156250 iters * 64 images/iter / 100000 images/ep +# 100 ep -> 50 ep as the model achieves better performance with 50 epochs +train.max_iter = 156250 // 2 +train.eval_period = 30000 + +lr_multiplier.scheduler.milestones = [milestone // 2 for milestone in [138889, 150463]] +lr_multiplier.scheduler.num_updates = train.max_iter +lr_multiplier.warmup_length = 250 / train.max_iter + +# Optimized hyperparams +optimizer.lr = 1e-4 diff --git a/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_l_in21k_50ep.py b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_l_in21k_50ep.py new file mode 100644 index 0000000..9e22e3b --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_l_in21k_50ep.py @@ -0,0 +1,12 @@ +from .cascade_mask_rcnn_swin_b_in21k_50ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, +) + +model.backbone.bottom_up.embed_dim = 192 +model.backbone.bottom_up.num_heads = [6, 12, 24, 48] + +train.init_checkpoint = "detectron2://ImageNetPretrained/swin/swin_large_patch4_window7_224_22k.pth" diff --git a/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_vitdet_b_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_vitdet_b_100ep.py new file mode 100644 index 0000000..8115224 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_vitdet_b_100ep.py @@ -0,0 +1,51 @@ +from detectron2.config import LazyCall as L +from detectron2.data.detection_utils import get_fed_loss_cls_weights +from detectron2.layers import ShapeSpec +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.matcher import Matcher +from detectron2.modeling.roi_heads import FastRCNNOutputLayers, FastRCNNConvFCHead, CascadeROIHeads + +from .mask_rcnn_vitdet_b_100ep import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +# arguments that don't exist for Cascade R-CNN +[model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]] + +model.roi_heads.update( + _target_=CascadeROIHeads, + num_classes=1203, + box_heads=[ + L(FastRCNNConvFCHead)( + input_shape=ShapeSpec(channels=256, height=7, width=7), + conv_dims=[256, 256, 256, 256], + fc_dims=[1024], + conv_norm="LN", + ) + for _ in range(3) + ], + box_predictors=[ + L(FastRCNNOutputLayers)( + input_shape=ShapeSpec(channels=1024), + box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)), + num_classes="${...num_classes}", + test_score_thresh=0.02, + test_topk_per_image=300, + cls_agnostic_bbox_reg=True, + use_sigmoid_ce=True, + use_fed_loss=True, + get_fed_loss_cls_weights=lambda: get_fed_loss_cls_weights( + dataloader.train.dataset.names, 0.5 + ), + ) + for (w1, w2) in [(10, 5), (20, 10), (30, 15)] + ], + proposal_matchers=[ + L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False) + for th in [0.5, 0.6, 0.7] + ], +) diff --git a/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_vitdet_h_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_vitdet_h_100ep.py new file mode 100644 index 0000000..68bec57 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_vitdet_h_100ep.py @@ -0,0 +1,51 @@ +from detectron2.config import LazyCall as L +from detectron2.data.detection_utils import get_fed_loss_cls_weights +from detectron2.layers import ShapeSpec +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.matcher import Matcher +from detectron2.modeling.roi_heads import FastRCNNOutputLayers, FastRCNNConvFCHead, CascadeROIHeads + +from .mask_rcnn_vitdet_h_100ep import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +# arguments that don't exist for Cascade R-CNN +[model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]] + +model.roi_heads.update( + _target_=CascadeROIHeads, + num_classes=1203, + box_heads=[ + L(FastRCNNConvFCHead)( + input_shape=ShapeSpec(channels=256, height=7, width=7), + conv_dims=[256, 256, 256, 256], + fc_dims=[1024], + conv_norm="LN", + ) + for _ in range(3) + ], + box_predictors=[ + L(FastRCNNOutputLayers)( + input_shape=ShapeSpec(channels=1024), + box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)), + num_classes="${...num_classes}", + test_score_thresh=0.02, + test_topk_per_image=300, + cls_agnostic_bbox_reg=True, + use_sigmoid_ce=True, + use_fed_loss=True, + get_fed_loss_cls_weights=lambda: get_fed_loss_cls_weights( + dataloader.train.dataset.names, 0.5 + ), + ) + for (w1, w2) in [(10, 5), (20, 10), (30, 15)] + ], + proposal_matchers=[ + L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False) + for th in [0.5, 0.6, 0.7] + ], +) diff --git a/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_vitdet_l_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_vitdet_l_100ep.py new file mode 100644 index 0000000..ebaf526 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_vitdet_l_100ep.py @@ -0,0 +1,51 @@ +from detectron2.config import LazyCall as L +from detectron2.data.detection_utils import get_fed_loss_cls_weights +from detectron2.layers import ShapeSpec +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.matcher import Matcher +from detectron2.modeling.roi_heads import FastRCNNOutputLayers, FastRCNNConvFCHead, CascadeROIHeads + +from .mask_rcnn_vitdet_l_100ep import ( + dataloader, + lr_multiplier, + model, + optimizer, + train, +) + +# arguments that don't exist for Cascade R-CNN +[model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]] + +model.roi_heads.update( + _target_=CascadeROIHeads, + num_classes=1203, + box_heads=[ + L(FastRCNNConvFCHead)( + input_shape=ShapeSpec(channels=256, height=7, width=7), + conv_dims=[256, 256, 256, 256], + fc_dims=[1024], + conv_norm="LN", + ) + for _ in range(3) + ], + box_predictors=[ + L(FastRCNNOutputLayers)( + input_shape=ShapeSpec(channels=1024), + box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)), + num_classes="${...num_classes}", + test_score_thresh=0.02, + test_topk_per_image=300, + cls_agnostic_bbox_reg=True, + use_sigmoid_ce=True, + use_fed_loss=True, + get_fed_loss_cls_weights=lambda: get_fed_loss_cls_weights( + dataloader.train.dataset.names, 0.5 + ), + ) + for (w1, w2) in [(10, 5), (20, 10), (30, 15)] + ], + proposal_matchers=[ + L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False) + for th in [0.5, 0.6, 0.7] + ], +) diff --git a/data_processing/detectron2/projects/ViTDet/configs/LVIS/mask_rcnn_vitdet_b_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/LVIS/mask_rcnn_vitdet_b_100ep.py new file mode 100644 index 0000000..ef90545 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/LVIS/mask_rcnn_vitdet_b_100ep.py @@ -0,0 +1,44 @@ +from detectron2.config import LazyCall as L +from detectron2.data.samplers import RepeatFactorTrainingSampler +from detectron2.evaluation.lvis_evaluation import LVISEvaluator +from detectron2.data.detection_utils import get_fed_loss_cls_weights + +from ..COCO.mask_rcnn_vitdet_b_100ep import ( + dataloader, + model, + train, + lr_multiplier, + optimizer, +) + +dataloader.train.dataset.names = "lvis_v1_train" +dataloader.train.sampler = L(RepeatFactorTrainingSampler)( + repeat_factors=L(RepeatFactorTrainingSampler.repeat_factors_from_category_frequency)( + dataset_dicts="${dataloader.train.dataset}", repeat_thresh=0.001 + ) +) +dataloader.test.dataset.names = "lvis_v1_val" +dataloader.evaluator = L(LVISEvaluator)( + dataset_name="${..test.dataset.names}", + max_dets_per_image=300, +) + +model.roi_heads.num_classes = 1203 +model.roi_heads.box_predictor.test_score_thresh = 0.02 +model.roi_heads.box_predictor.test_topk_per_image = 300 +model.roi_heads.box_predictor.use_sigmoid_ce = True +model.roi_heads.box_predictor.use_fed_loss = True +model.roi_heads.box_predictor.get_fed_loss_cls_weights = lambda: get_fed_loss_cls_weights( + dataloader.train.dataset.names, 0.5 +) + +# Schedule +# 100 ep = 156250 iters * 64 images/iter / 100000 images/ep +train.max_iter = 156250 +train.eval_period = 30000 + +lr_multiplier.scheduler.milestones = [138889, 150463] +lr_multiplier.scheduler.num_updates = train.max_iter +lr_multiplier.warmup_length = 250 / train.max_iter + +optimizer.lr = 2e-4 diff --git a/data_processing/detectron2/projects/ViTDet/configs/LVIS/mask_rcnn_vitdet_h_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/LVIS/mask_rcnn_vitdet_h_100ep.py new file mode 100644 index 0000000..0f99bad --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/LVIS/mask_rcnn_vitdet_h_100ep.py @@ -0,0 +1,30 @@ +from functools import partial + +from detectron2.modeling.backbone.vit import get_vit_lr_decay_rate + +from .mask_rcnn_vitdet_b_100ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, +) + +train.init_checkpoint = ( + "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_huge_p14to16.pth?matching_heuristics=True" +) + +model.backbone.net.embed_dim = 1280 +model.backbone.net.depth = 32 +model.backbone.net.num_heads = 16 +model.backbone.net.drop_path_rate = 0.4 +# 7, 15, 23, 31 for global attention +model.backbone.net.window_block_indexes = ( + list(range(0, 7)) + list(range(8, 15)) + list(range(16, 23)) + list(range(24, 31)) +) + + +optimizer.lr = 1e-4 +optimizer.params.lr_factor_func = partial(get_vit_lr_decay_rate, lr_decay_rate=0.9, num_layers=32) +optimizer.params.overrides = {} +optimizer.params.weight_decay_norm = None diff --git a/data_processing/detectron2/projects/ViTDet/configs/LVIS/mask_rcnn_vitdet_l_100ep.py b/data_processing/detectron2/projects/ViTDet/configs/LVIS/mask_rcnn_vitdet_l_100ep.py new file mode 100644 index 0000000..15d8792 --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/LVIS/mask_rcnn_vitdet_l_100ep.py @@ -0,0 +1,26 @@ +from functools import partial + +from detectron2.modeling.backbone.vit import get_vit_lr_decay_rate + +from .mask_rcnn_vitdet_b_100ep import ( + dataloader, + lr_multiplier, + model, + train, + optimizer, +) + +train.init_checkpoint = ( + "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_large.pth?matching_heuristics=True" +) + +model.backbone.net.embed_dim = 1024 +model.backbone.net.depth = 24 +model.backbone.net.num_heads = 16 +model.backbone.net.drop_path_rate = 0.4 +# 5, 11, 17, 23 for global attention +model.backbone.net.window_block_indexes = ( + list(range(0, 5)) + list(range(6, 11)) + list(range(12, 17)) + list(range(18, 23)) +) + +optimizer.params.lr_factor_func = partial(get_vit_lr_decay_rate, lr_decay_rate=0.8, num_layers=24) diff --git a/data_processing/detectron2/projects/ViTDet/configs/common/coco_loader_lsj.py b/data_processing/detectron2/projects/ViTDet/configs/common/coco_loader_lsj.py new file mode 100644 index 0000000..e6c2f1e --- /dev/null +++ b/data_processing/detectron2/projects/ViTDet/configs/common/coco_loader_lsj.py @@ -0,0 +1,22 @@ +import detectron2.data.transforms as T +from detectron2 import model_zoo +from detectron2.config import LazyCall as L + +# Data using LSJ +image_size = 1024 +dataloader = model_zoo.get_config("common/data/coco.py").dataloader +dataloader.train.mapper.augmentations = [ + L(T.RandomFlip)(horizontal=True), # flip first + L(T.ResizeScale)( + min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size + ), + L(T.FixedSizeCrop)(crop_size=(image_size, image_size), pad=False), +] +dataloader.train.mapper.image_format = "RGB" +dataloader.train.total_batch_size = 64 +# recompute boxes due to cropping +dataloader.train.mapper.recompute_boxes = True + +dataloader.test.mapper.augmentations = [ + L(T.ResizeShortestEdge)(short_edge_length=image_size, max_size=image_size), +] diff --git a/data_processing/detectron2/setup.cfg b/data_processing/detectron2/setup.cfg new file mode 100644 index 0000000..f127d7b --- /dev/null +++ b/data_processing/detectron2/setup.cfg @@ -0,0 +1,26 @@ +[isort] +line_length=100 +multi_line_output=3 +include_trailing_comma=True +known_standard_library=numpy,setuptools,mock +skip=./datasets,docs +skip_glob=*/__init__.py,**/configs/**,**/tests/config/** +known_myself=detectron2 +known_third_party=fvcore,matplotlib,cv2,torch,torchvision,PIL,pycocotools,yacs,termcolor,cityscapesscripts,tabulate,tqdm,scipy,lvis,psutil,pkg_resources,caffe2,onnx,panopticapi,black,isort,av,iopath,omegaconf,hydra,yaml,pydoc,submitit,cloudpickle,packaging,timm,pandas,fairscale,pytorch3d,pytorch_lightning +no_lines_before=STDLIB,THIRDPARTY +sections=FUTURE,STDLIB,THIRDPARTY,myself,FIRSTPARTY,LOCALFOLDER +default_section=FIRSTPARTY + +[mypy] +python_version=3.7 +ignore_missing_imports = True +warn_unused_configs = True +disallow_untyped_defs = True +check_untyped_defs = True +warn_unused_ignores = True +warn_redundant_casts = True +show_column_numbers = True +follow_imports = silent +allow_redefinition = True +; Require all functions to be annotated +disallow_incomplete_defs = True diff --git a/data_processing/detectron2/setup.py b/data_processing/detectron2/setup.py new file mode 100644 index 0000000..559cf54 --- /dev/null +++ b/data_processing/detectron2/setup.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. + +import glob +import os +import shutil +from os import path +from setuptools import find_packages, setup +from typing import List +import torch +from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension + +torch_ver = [int(x) for x in torch.__version__.split(".")[:2]] +assert torch_ver >= [1, 8], "Requires PyTorch >= 1.8" + + +def get_version(): + init_py_path = path.join(path.abspath(path.dirname(__file__)), "detectron2", "__init__.py") + init_py = open(init_py_path, "r").readlines() + version_line = [l.strip() for l in init_py if l.startswith("__version__")][0] + version = version_line.split("=")[-1].strip().strip("'\"") + + # The following is used to build release packages. + # Users should never use it. + suffix = os.getenv("D2_VERSION_SUFFIX", "") + version = version + suffix + if os.getenv("BUILD_NIGHTLY", "0") == "1": + from datetime import datetime + + date_str = datetime.today().strftime("%y%m%d") + version = version + ".dev" + date_str + + new_init_py = [l for l in init_py if not l.startswith("__version__")] + new_init_py.append('__version__ = "{}"\n'.format(version)) + with open(init_py_path, "w") as f: + f.write("".join(new_init_py)) + return version + + +def get_extensions(): + this_dir = path.dirname(path.abspath(__file__)) + extensions_dir = path.join(this_dir, "detectron2", "layers", "csrc") + + main_source = path.join(extensions_dir, "vision.cpp") + sources = glob.glob(path.join(extensions_dir, "**", "*.cpp")) + + from torch.utils.cpp_extension import ROCM_HOME + + is_rocm_pytorch = ( + True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False + ) + if is_rocm_pytorch: + assert torch_ver >= [1, 8], "ROCM support requires PyTorch >= 1.8!" + + # common code between cuda and rocm platforms, for hipify version [1,0,0] and later. + source_cuda = glob.glob(path.join(extensions_dir, "**", "*.cu")) + glob.glob( + path.join(extensions_dir, "*.cu") + ) + sources = [main_source] + sources + + extension = CppExtension + + extra_compile_args = {"cxx": []} + define_macros = [] + + if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv( + "FORCE_CUDA", "0" + ) == "1": + extension = CUDAExtension + sources += source_cuda + + if not is_rocm_pytorch: + define_macros += [("WITH_CUDA", None)] + extra_compile_args["nvcc"] = [ + "-O3", + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ] + else: + define_macros += [("WITH_HIP", None)] + extra_compile_args["nvcc"] = [] + + if torch_ver < [1, 7]: + # supported by https://github.com/pytorch/pytorch/pull/43931 + CC = os.environ.get("CC", None) + if CC is not None: + extra_compile_args["nvcc"].append("-ccbin={}".format(CC)) + + include_dirs = [extensions_dir] + + ext_modules = [ + extension( + "detectron2._C", + sources, + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + ] + + return ext_modules + + +def get_model_zoo_configs() -> List[str]: + """ + Return a list of configs to include in package for model zoo. Copy over these configs inside + detectron2/model_zoo. + """ + + # Use absolute paths while symlinking. + source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs") + destination = path.join( + path.dirname(path.realpath(__file__)), "detectron2", "model_zoo", "configs" + ) + # Symlink the config directory inside package to have a cleaner pip install. + + # Remove stale symlink/directory from a previous build. + if path.exists(source_configs_dir): + if path.islink(destination): + os.unlink(destination) + elif path.isdir(destination): + shutil.rmtree(destination) + + if not path.exists(destination): + try: + os.symlink(source_configs_dir, destination) + except OSError: + # Fall back to copying if symlink fails: ex. on Windows. + shutil.copytree(source_configs_dir, destination) + + config_paths = glob.glob("configs/**/*.yaml", recursive=True) + glob.glob( + "configs/**/*.py", recursive=True + ) + return config_paths + + +# For projects that are relative small and provide features that are very close +# to detectron2's core functionalities, we install them under detectron2.projects +PROJECTS = { + "detectron2.projects.point_rend": "projects/PointRend/point_rend", + "detectron2.projects.deeplab": "projects/DeepLab/deeplab", + "detectron2.projects.panoptic_deeplab": "projects/Panoptic-DeepLab/panoptic_deeplab", +} + +setup( + name="detectron2", + version=get_version(), + author="FAIR", + url="https://github.com/facebookresearch/detectron2", + description="Detectron2 is FAIR's next-generation research " + "platform for object detection and segmentation.", + packages=find_packages(exclude=("configs", "tests*")) + list(PROJECTS.keys()), + package_dir=PROJECTS, + package_data={"detectron2.model_zoo": get_model_zoo_configs()}, + python_requires=">=3.7", + install_requires=[ + # These dependencies are not pure-python. + # In general, avoid adding dependencies that are not pure-python because they are not + # guaranteed to be installable by `pip install` on all platforms. + "Pillow>=7.1", # or use pillow-simd for better performance + "matplotlib", # TODO move it to optional after we add opencv visualization + "pycocotools>=2.0.2", # corresponds to https://github.com/ppwwyyxx/cocoapi + # Do not add opencv here. Just like pytorch, user should install + # opencv themselves, preferrably by OS's package manager, or by + # choosing the proper pypi package name at https://github.com/skvark/opencv-python + # Also, avoid adding dependencies that transitively depend on pytorch or opencv. + # ------------------------------------------------------------ + # The following are pure-python dependencies that should be easily installable. + # But still be careful when adding more: fewer people are able to use the software + # with every new dependency added. + "termcolor>=1.1", + "yacs>=0.1.8", + "tabulate", + "cloudpickle", + "tqdm>4.29.0", + "tensorboard", + # Lock version of fvcore/iopath because they may have breaking changes + # NOTE: when updating fvcore/iopath version, make sure fvcore depends + # on compatible version of iopath. + "fvcore>=0.1.5,<0.1.6", # required like this to make it pip installable + "iopath>=0.1.7,<0.1.10", + "dataclasses; python_version<'3.7'", + "omegaconf>=2.1", + "hydra-core>=1.1", + "black", + "packaging", + # NOTE: When adding new dependencies, if it is required at import time (in addition + # to runtime), it probably needs to appear in docs/requirements.txt, or as a mock + # in docs/conf.py + ], + extras_require={ + # optional dependencies, required by some features + "all": [ + "fairscale", + "timm", # Used by a few ViT models. + "scipy>1.5.1", + "shapely", + "pygments>=2.2", + "psutil", + "panopticapi @ https://github.com/cocodataset/panopticapi/archive/master.zip", + ], + # dev dependencies. Install them by `pip install 'detectron2[dev]'` + "dev": [ + "flake8==3.8.1", + "isort==4.3.21", + "flake8-bugbear", + "flake8-comprehensions", + "black==22.3.0", + ], + }, + ext_modules=get_extensions(), + cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, +) diff --git a/data_processing/detectron2/tests/README.md b/data_processing/detectron2/tests/README.md new file mode 100644 index 0000000..f560384 --- /dev/null +++ b/data_processing/detectron2/tests/README.md @@ -0,0 +1,9 @@ +## Unit Tests + +To run the unittests, do: +``` +cd detectron2 +python -m unittest discover -v -s ./tests +``` + +There are also end-to-end inference & training tests, in [dev/run_*_tests.sh](../dev). diff --git a/data_processing/detectron2/tests/__init__.py b/data_processing/detectron2/tests/__init__.py new file mode 100644 index 0000000..9020c2d --- /dev/null +++ b/data_processing/detectron2/tests/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/data_processing/detectron2/tests/config/dir1/bad_import.py b/data_processing/detectron2/tests/config/dir1/bad_import.py new file mode 100644 index 0000000..d7452c4 --- /dev/null +++ b/data_processing/detectron2/tests/config/dir1/bad_import.py @@ -0,0 +1,2 @@ +# import from directory is not allowed +from . import dir1a diff --git a/data_processing/detectron2/tests/config/dir1/bad_import2.py b/data_processing/detectron2/tests/config/dir1/bad_import2.py new file mode 100644 index 0000000..085a4df --- /dev/null +++ b/data_processing/detectron2/tests/config/dir1/bad_import2.py @@ -0,0 +1 @@ +from .does_not_exist import x diff --git a/data_processing/detectron2/tests/config/dir1/dir1_a.py b/data_processing/detectron2/tests/config/dir1/dir1_a.py new file mode 100644 index 0000000..a939955 --- /dev/null +++ b/data_processing/detectron2/tests/config/dir1/dir1_a.py @@ -0,0 +1,3 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +dir1a_str = "base_a_1" +dir1a_dict = {"a": 1, "b": 2} diff --git a/data_processing/detectron2/tests/config/dir1/dir1_b.py b/data_processing/detectron2/tests/config/dir1/dir1_b.py new file mode 100644 index 0000000..2dcb54c --- /dev/null +++ b/data_processing/detectron2/tests/config/dir1/dir1_b.py @@ -0,0 +1,11 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from detectron2.config import LazyConfig + +# equivalent to relative import +dir1a_str, dir1a_dict = LazyConfig.load_rel("dir1_a.py", ("dir1a_str", "dir1a_dict")) + +dir1b_str = dir1a_str + "_from_b" +dir1b_dict = dir1a_dict + +# Every import is a reload: not modified by other config files +assert dir1a_dict.a == 1 diff --git a/data_processing/detectron2/tests/config/dir1/load_rel.py b/data_processing/detectron2/tests/config/dir1/load_rel.py new file mode 100644 index 0000000..22d10db --- /dev/null +++ b/data_processing/detectron2/tests/config/dir1/load_rel.py @@ -0,0 +1,5 @@ +# test that load_rel can work +from detectron2.config import LazyConfig + +x = LazyConfig.load_rel("dir1_a.py", "dir1a_dict") +assert x["a"] == 1 diff --git a/data_processing/detectron2/tests/config/root_cfg.py b/data_processing/detectron2/tests/config/root_cfg.py new file mode 100644 index 0000000..33d1d4b --- /dev/null +++ b/data_processing/detectron2/tests/config/root_cfg.py @@ -0,0 +1,14 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from itertools import count + +from detectron2.config import LazyCall as L + +from .dir1.dir1_a import dir1a_dict, dir1a_str + +dir1a_dict.a = "modified" + +# modification above won't affect future imports +from .dir1.dir1_b import dir1b_dict, dir1b_str + + +lazyobj = L(count)(x=dir1a_str, y=dir1b_str) diff --git a/data_processing/detectron2/tests/config/test_instantiate_config.py b/data_processing/detectron2/tests/config/test_instantiate_config.py new file mode 100644 index 0000000..6b72894 --- /dev/null +++ b/data_processing/detectron2/tests/config/test_instantiate_config.py @@ -0,0 +1,109 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import os +import tempfile +import unittest +import yaml +from omegaconf import OmegaConf +from omegaconf import __version__ as oc_version +from dataclasses import dataclass + +from detectron2.config import LazyConfig, instantiate, LazyCall as L +from detectron2.layers import ShapeSpec +from detectron2.utils.testing import reload_lazy_config + +OC_VERSION = tuple(int(x) for x in oc_version.split(".")[:2]) + + +class TestClass: + def __init__(self, int_arg, list_arg=None, dict_arg=None, extra_arg=None): + self.int_arg = int_arg + self.list_arg = list_arg + self.dict_arg = dict_arg + self.extra_arg = extra_arg + + def __call__(self, call_arg): + return call_arg + self.int_arg + + +@unittest.skipIf(OC_VERSION < (2, 1), "omegaconf version too old") +class TestConstruction(unittest.TestCase): + def test_basic_construct(self): + cfg = L(TestClass)( + int_arg=3, + list_arg=[10], + dict_arg={}, + extra_arg=L(TestClass)(int_arg=4, list_arg="${..list_arg}"), + ) + + for x in [cfg, reload_lazy_config(cfg)]: + obj = instantiate(x) + self.assertIsInstance(obj, TestClass) + self.assertEqual(obj.int_arg, 3) + self.assertEqual(obj.extra_arg.int_arg, 4) + self.assertEqual(obj.extra_arg.list_arg, obj.list_arg) + + # Test interpolation + x.extra_arg.list_arg = [5] + obj = instantiate(x) + self.assertIsInstance(obj, TestClass) + self.assertEqual(obj.extra_arg.list_arg, [5]) + + def test_instantiate_other_obj(self): + # do nothing for other obj + self.assertEqual(instantiate(5), 5) + x = [3, 4, 5] + self.assertEqual(instantiate(x), x) + x = TestClass(1) + self.assertIs(instantiate(x), x) + x = {"xx": "yy"} + self.assertIs(instantiate(x), x) + + def test_instantiate_lazy_target(self): + # _target_ is result of instantiate + objconf = L(L(len)(int_arg=3))(call_arg=4) + objconf._target_._target_ = TestClass + self.assertEqual(instantiate(objconf), 7) + + def test_instantiate_list(self): + lst = [1, 2, L(TestClass)(int_arg=1)] + x = L(TestClass)(int_arg=lst) # list as an argument should be recursively instantiated + x = instantiate(x).int_arg + self.assertEqual(x[:2], [1, 2]) + self.assertIsInstance(x[2], TestClass) + self.assertEqual(x[2].int_arg, 1) + + def test_instantiate_dataclass(self): + cfg = L(ShapeSpec)(channels=1, width=3) + # Test original cfg as well as serialization + for x in [cfg, reload_lazy_config(cfg)]: + obj = instantiate(x) + self.assertIsInstance(obj, ShapeSpec) + self.assertEqual(obj.channels, 1) + self.assertEqual(obj.height, None) + + def test_instantiate_dataclass_as_subconfig(self): + cfg = L(TestClass)(int_arg=1, extra_arg=ShapeSpec(channels=1, width=3)) + # Test original cfg as well as serialization + for x in [cfg, reload_lazy_config(cfg)]: + obj = instantiate(x) + self.assertIsInstance(obj.extra_arg, ShapeSpec) + self.assertEqual(obj.extra_arg.channels, 1) + self.assertEqual(obj.extra_arg.height, None) + + def test_bad_lazycall(self): + with self.assertRaises(Exception): + L(3) + + def test_interpolation(self): + cfg = L(TestClass)(int_arg=3, extra_arg="${int_arg}") + + cfg.int_arg = 4 + obj = instantiate(cfg) + self.assertEqual(obj.extra_arg, 4) + + # Test that interpolation still works after serialization + cfg = reload_lazy_config(cfg) + cfg.int_arg = 5 + obj = instantiate(cfg) + self.assertEqual(obj.extra_arg, 5) diff --git a/data_processing/detectron2/tests/config/test_lazy_config.py b/data_processing/detectron2/tests/config/test_lazy_config.py new file mode 100644 index 0000000..ff68143 --- /dev/null +++ b/data_processing/detectron2/tests/config/test_lazy_config.py @@ -0,0 +1,98 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import os +import unittest +import tempfile +from itertools import count + +from detectron2.config import LazyConfig, LazyCall as L +from omegaconf import DictConfig + + +class TestLazyPythonConfig(unittest.TestCase): + def setUp(self): + self.curr_dir = os.path.dirname(__file__) + self.root_filename = os.path.join(self.curr_dir, "root_cfg.py") + + def test_load(self): + cfg = LazyConfig.load(self.root_filename) + + self.assertEqual(cfg.dir1a_dict.a, "modified") + self.assertEqual(cfg.dir1b_dict.a, 1) + self.assertEqual(cfg.lazyobj.x, "base_a_1") + + cfg.lazyobj.x = "new_x" + # reload + cfg = LazyConfig.load(self.root_filename) + self.assertEqual(cfg.lazyobj.x, "base_a_1") + + def test_save_load(self): + cfg = LazyConfig.load(self.root_filename) + with tempfile.TemporaryDirectory(prefix="detectron2") as d: + fname = os.path.join(d, "test_config.yaml") + LazyConfig.save(cfg, fname) + cfg2 = LazyConfig.load(fname) + + self.assertEqual(cfg2.lazyobj._target_, "itertools.count") + self.assertEqual(cfg.lazyobj._target_, count) + cfg2.lazyobj.pop("_target_") + cfg.lazyobj.pop("_target_") + # the rest are equal + self.assertEqual(cfg, cfg2) + + def test_failed_save(self): + cfg = DictConfig({"x": lambda: 3}, flags={"allow_objects": True}) + with tempfile.TemporaryDirectory(prefix="detectron2") as d: + fname = os.path.join(d, "test_config.yaml") + LazyConfig.save(cfg, fname) + self.assertTrue(os.path.exists(fname)) + self.assertTrue(os.path.exists(fname + ".pkl")) + + def test_overrides(self): + cfg = LazyConfig.load(self.root_filename) + LazyConfig.apply_overrides(cfg, ["lazyobj.x=123", 'dir1b_dict.a="123"']) + self.assertEqual(cfg.dir1b_dict.a, "123") + self.assertEqual(cfg.lazyobj.x, 123) + + LazyConfig.apply_overrides(cfg, ["dir1b_dict.a=abc"]) + self.assertEqual(cfg.dir1b_dict.a, "abc") + + def test_invalid_overrides(self): + cfg = LazyConfig.load(self.root_filename) + with self.assertRaises(KeyError): + LazyConfig.apply_overrides(cfg, ["lazyobj.x.xxx=123"]) + + def test_to_py(self): + cfg = LazyConfig.load(self.root_filename) + cfg.lazyobj.x = {"a": 1, "b": 2, "c": L(count)(x={"r": "a", "s": 2.4, "t": [1, 2, 3, "z"]})} + cfg.list = ["a", 1, "b", 3.2] + py_str = LazyConfig.to_py(cfg) + expected = """cfg.dir1a_dict.a = "modified" +cfg.dir1a_dict.b = 2 +cfg.dir1b_dict.a = 1 +cfg.dir1b_dict.b = 2 +cfg.lazyobj = itertools.count( + x={ + "a": 1, + "b": 2, + "c": itertools.count(x={"r": "a", "s": 2.4, "t": [1, 2, 3, "z"]}), + }, + y="base_a_1_from_b", +) +cfg.list = ["a", 1, "b", 3.2] +""" + self.assertEqual(py_str, expected) + + def test_bad_import(self): + file = os.path.join(self.curr_dir, "dir1", "bad_import.py") + with self.assertRaisesRegex(ImportError, "relative import"): + LazyConfig.load(file) + + def test_bad_import2(self): + file = os.path.join(self.curr_dir, "dir1", "bad_import2.py") + with self.assertRaisesRegex(ImportError, "not exist"): + LazyConfig.load(file) + + def test_load_rel(self): + file = os.path.join(self.curr_dir, "dir1", "load_rel.py") + cfg = LazyConfig.load(file) + self.assertIn("x", cfg) diff --git a/data_processing/detectron2/tests/config/test_yacs_config.py b/data_processing/detectron2/tests/config/test_yacs_config.py new file mode 100644 index 0000000..01dd695 --- /dev/null +++ b/data_processing/detectron2/tests/config/test_yacs_config.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. + + +import os +import tempfile +import unittest +import torch +from omegaconf import OmegaConf + +from detectron2 import model_zoo +from detectron2.config import configurable, downgrade_config, get_cfg, upgrade_config +from detectron2.layers import ShapeSpec +from detectron2.modeling import build_model + +_V0_CFG = """ +MODEL: + RPN_HEAD: + NAME: "TEST" +VERSION: 0 +""" + +_V1_CFG = """ +MODEL: + WEIGHT: "/path/to/weight" +""" + + +class TestConfigVersioning(unittest.TestCase): + def test_upgrade_downgrade_consistency(self): + cfg = get_cfg() + # check that custom is preserved + cfg.USER_CUSTOM = 1 + + down = downgrade_config(cfg, to_version=0) + up = upgrade_config(down) + self.assertTrue(up == cfg) + + def _merge_cfg_str(self, cfg, merge_str): + f = tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) + try: + f.write(merge_str) + f.close() + cfg.merge_from_file(f.name) + finally: + os.remove(f.name) + return cfg + + def test_auto_upgrade(self): + cfg = get_cfg() + latest_ver = cfg.VERSION + cfg.USER_CUSTOM = 1 + + self._merge_cfg_str(cfg, _V0_CFG) + + self.assertEqual(cfg.MODEL.RPN.HEAD_NAME, "TEST") + self.assertEqual(cfg.VERSION, latest_ver) + + def test_guess_v1(self): + cfg = get_cfg() + latest_ver = cfg.VERSION + self._merge_cfg_str(cfg, _V1_CFG) + self.assertEqual(cfg.VERSION, latest_ver) + + +class _TestClassA(torch.nn.Module): + @configurable + def __init__(self, arg1, arg2, arg3=3): + super().__init__() + self.arg1 = arg1 + self.arg2 = arg2 + self.arg3 = arg3 + assert arg1 == 1 + assert arg2 == 2 + assert arg3 == 3 + + @classmethod + def from_config(cls, cfg): + args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2} + return args + + +class _TestClassB(_TestClassA): + @configurable + def __init__(self, input_shape, arg1, arg2, arg3=3): + """ + Doc of _TestClassB + """ + assert input_shape == "shape" + super().__init__(arg1, arg2, arg3) + + @classmethod + def from_config(cls, cfg, input_shape): # test extra positional arg in from_config + args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2} + args["input_shape"] = input_shape + return args + + +class _LegacySubClass(_TestClassB): + # an old subclass written in cfg style + def __init__(self, cfg, input_shape, arg4=4): + super().__init__(cfg, input_shape) + assert self.arg1 == 1 + assert self.arg2 == 2 + assert self.arg3 == 3 + + +class _NewSubClassNewInit(_TestClassB): + # test new subclass with a new __init__ + @configurable + def __init__(self, input_shape, arg4=4, **kwargs): + super().__init__(input_shape, **kwargs) + assert self.arg1 == 1 + assert self.arg2 == 2 + assert self.arg3 == 3 + + +class _LegacySubClassNotCfg(_TestClassB): + # an old subclass written in cfg style, but argument is not called "cfg" + def __init__(self, config, input_shape): + super().__init__(config, input_shape) + assert self.arg1 == 1 + assert self.arg2 == 2 + assert self.arg3 == 3 + + +class _TestClassC(_TestClassB): + @classmethod + def from_config(cls, cfg, input_shape, **kwargs): # test extra kwarg overwrite + args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2} + args["input_shape"] = input_shape + args.update(kwargs) + return args + + +class _TestClassD(_TestClassA): + @configurable + def __init__(self, input_shape: ShapeSpec, arg1: int, arg2, arg3=3): + assert input_shape == "shape" + super().__init__(arg1, arg2, arg3) + + # _TestClassA.from_config does not have input_shape args. + # Test whether input_shape will be forwarded to __init__ + + +@configurable(from_config=lambda cfg, arg2: {"arg1": cfg.ARG1, "arg2": arg2, "arg3": cfg.ARG3}) +def _test_func(arg1, arg2=2, arg3=3, arg4=4): + return arg1, arg2, arg3, arg4 + + +class TestConfigurable(unittest.TestCase): + def testInitWithArgs(self): + _ = _TestClassA(arg1=1, arg2=2, arg3=3) + _ = _TestClassB("shape", arg1=1, arg2=2) + _ = _TestClassC("shape", arg1=1, arg2=2) + _ = _TestClassD("shape", arg1=1, arg2=2, arg3=3) + + def testPatchedAttr(self): + self.assertTrue("Doc" in _TestClassB.__init__.__doc__) + self.assertEqual(_TestClassD.__init__.__annotations__["arg1"], int) + + def testInitWithCfg(self): + cfg = get_cfg() + cfg.ARG1 = 1 + cfg.ARG2 = 2 + cfg.ARG3 = 3 + _ = _TestClassA(cfg) + _ = _TestClassB(cfg, input_shape="shape") + _ = _TestClassC(cfg, input_shape="shape") + _ = _TestClassD(cfg, input_shape="shape") + _ = _LegacySubClass(cfg, input_shape="shape") + _ = _NewSubClassNewInit(cfg, input_shape="shape") + _ = _LegacySubClassNotCfg(cfg, input_shape="shape") + with self.assertRaises(TypeError): + # disallow forwarding positional args to __init__ since it's prone to errors + _ = _TestClassD(cfg, "shape") + + # call with kwargs instead + _ = _TestClassA(cfg=cfg) + _ = _TestClassB(cfg=cfg, input_shape="shape") + _ = _TestClassC(cfg=cfg, input_shape="shape") + _ = _TestClassD(cfg=cfg, input_shape="shape") + _ = _LegacySubClass(cfg=cfg, input_shape="shape") + _ = _NewSubClassNewInit(cfg=cfg, input_shape="shape") + _ = _LegacySubClassNotCfg(config=cfg, input_shape="shape") + + def testInitWithCfgOverwrite(self): + cfg = get_cfg() + cfg.ARG1 = 1 + cfg.ARG2 = 999 # wrong config + with self.assertRaises(AssertionError): + _ = _TestClassA(cfg, arg3=3) + + # overwrite arg2 with correct config later: + _ = _TestClassA(cfg, arg2=2, arg3=3) + _ = _TestClassB(cfg, input_shape="shape", arg2=2, arg3=3) + _ = _TestClassC(cfg, input_shape="shape", arg2=2, arg3=3) + _ = _TestClassD(cfg, input_shape="shape", arg2=2, arg3=3) + + # call with kwargs cfg=cfg instead + _ = _TestClassA(cfg=cfg, arg2=2, arg3=3) + _ = _TestClassB(cfg=cfg, input_shape="shape", arg2=2, arg3=3) + _ = _TestClassC(cfg=cfg, input_shape="shape", arg2=2, arg3=3) + _ = _TestClassD(cfg=cfg, input_shape="shape", arg2=2, arg3=3) + + def testInitWithCfgWrongArgs(self): + cfg = get_cfg() + cfg.ARG1 = 1 + cfg.ARG2 = 2 + with self.assertRaises(TypeError): + _ = _TestClassB(cfg, "shape", not_exist=1) + with self.assertRaises(TypeError): + _ = _TestClassC(cfg, "shape", not_exist=1) + with self.assertRaises(TypeError): + _ = _TestClassD(cfg, "shape", not_exist=1) + + def testBadClass(self): + class _BadClass1: + @configurable + def __init__(self, a=1, b=2): + pass + + class _BadClass2: + @configurable + def __init__(self, a=1, b=2): + pass + + def from_config(self, cfg): # noqa + pass + + class _BadClass3: + @configurable + def __init__(self, a=1, b=2): + pass + + # bad name: must be cfg + @classmethod + def from_config(cls, config): # noqa + pass + + with self.assertRaises(AttributeError): + _ = _BadClass1(a=1) + + with self.assertRaises(TypeError): + _ = _BadClass2(a=1) + + with self.assertRaises(TypeError): + _ = _BadClass3(get_cfg()) + + def testFuncWithCfg(self): + cfg = get_cfg() + cfg.ARG1 = 10 + cfg.ARG3 = 30 + + self.assertEqual(_test_func(1), (1, 2, 3, 4)) + with self.assertRaises(TypeError): + _test_func(cfg) + self.assertEqual(_test_func(cfg, arg2=2), (10, 2, 30, 4)) + self.assertEqual(_test_func(cfg, arg1=100, arg2=20), (100, 20, 30, 4)) + self.assertEqual(_test_func(cfg, arg1=100, arg2=20, arg4=40), (100, 20, 30, 40)) + + self.assertTrue(callable(_test_func.from_config)) + + def testOmegaConf(self): + cfg = model_zoo.get_config("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml") + cfg = OmegaConf.create(cfg.dump()) + if not torch.cuda.is_available(): + cfg.MODEL.DEVICE = "cpu" + # test that a model can be built with omegaconf config as well + build_model(cfg) diff --git a/data_processing/detectron2/tests/data/__init__.py b/data_processing/detectron2/tests/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/detectron2/tests/data/test_coco.py b/data_processing/detectron2/tests/data/test_coco.py new file mode 100644 index 0000000..caabead --- /dev/null +++ b/data_processing/detectron2/tests/data/test_coco.py @@ -0,0 +1,139 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import json +import numpy as np +import os +import tempfile +import unittest +import pycocotools.mask as mask_util + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.data.datasets.coco import convert_to_coco_dict, load_coco_json +from detectron2.structures import BoxMode + + +def make_mask(): + """ + Makes a donut shaped binary mask. + """ + H = 100 + W = 100 + mask = np.zeros([H, W], dtype=np.uint8) + for x in range(W): + for y in range(H): + d = np.linalg.norm(np.array([W, H]) / 2 - np.array([x, y])) + if d > 10 and d < 20: + mask[y, x] = 1 + return mask + + +def uncompressed_rle(mask): + l = mask.flatten(order="F").tolist() + counts = [] + p = False + cnt = 0 + for i in l: + if i == p: + cnt += 1 + else: + counts.append(cnt) + p = i + cnt = 1 + counts.append(cnt) + return {"counts": counts, "size": [mask.shape[0], mask.shape[1]]} + + +def make_dataset_dicts(mask, compressed: bool = True): + """ + Returns a list of dicts that represents a single COCO data point for + object detection. The single instance given by `mask` is represented by + RLE, either compressed or uncompressed. + """ + record = {} + record["file_name"] = "test" + record["image_id"] = 0 + record["height"] = mask.shape[0] + record["width"] = mask.shape[1] + + y, x = np.nonzero(mask) + if compressed: + segmentation = mask_util.encode(np.asarray(mask, order="F")) + else: + segmentation = uncompressed_rle(mask) + min_x = np.min(x) + max_x = np.max(x) + min_y = np.min(y) + max_y = np.max(y) + obj = { + "bbox": [min_x, min_y, max_x, max_y], + "bbox_mode": BoxMode.XYXY_ABS, + "category_id": 0, + "iscrowd": 0, + "segmentation": segmentation, + } + record["annotations"] = [obj] + return [record] + + +class TestRLEToJson(unittest.TestCase): + def test(self): + # Make a dummy dataset. + mask = make_mask() + DatasetCatalog.register("test_dataset", lambda: make_dataset_dicts(mask)) + MetadataCatalog.get("test_dataset").set(thing_classes=["test_label"]) + + # Dump to json. + json_dict = convert_to_coco_dict("test_dataset") + with tempfile.TemporaryDirectory() as tmpdir: + json_file_name = os.path.join(tmpdir, "test.json") + with open(json_file_name, "w") as f: + json.dump(json_dict, f) + # Load from json. + dicts = load_coco_json(json_file_name, "") + + # Check the loaded mask matches the original. + anno = dicts[0]["annotations"][0] + loaded_mask = mask_util.decode(anno["segmentation"]) + self.assertTrue(np.array_equal(loaded_mask, mask)) + DatasetCatalog.pop("test_dataset") + MetadataCatalog.pop("test_dataset") + + def test_uncompressed_RLE(self): + mask = make_mask() + rle = mask_util.encode(np.asarray(mask, order="F")) + uncompressed = uncompressed_rle(mask) + compressed = mask_util.frPyObjects(uncompressed, *rle["size"]) + self.assertEqual(rle, compressed) + + +class TestConvertCOCO(unittest.TestCase): + @staticmethod + def generate_data(): + record = { + "file_name": "test", + "image_id": 0, + "height": 100, + "width": 100, + "annotations": [ + { + "bbox": [10, 10, 10, 10, 5], + "bbox_mode": BoxMode.XYWHA_ABS, + "category_id": 0, + "iscrowd": 0, + }, + { + "bbox": [15, 15, 3, 3], + "bbox_mode": BoxMode.XYXY_ABS, + "category_id": 0, + "iscrowd": 0, + }, + ], + } + + return [record] + + def test_convert_to_coco(self): + DatasetCatalog.register("test_dataset", lambda: TestConvertCOCO.generate_data()) + MetadataCatalog.get("test_dataset").set(thing_classes=["test_label"]) + convert_to_coco_dict("test_dataset") + DatasetCatalog.pop("test_dataset") + MetadataCatalog.pop("test_dataset") diff --git a/data_processing/detectron2/tests/data/test_coco_evaluation.py b/data_processing/detectron2/tests/data/test_coco_evaluation.py new file mode 100644 index 0000000..964f002 --- /dev/null +++ b/data_processing/detectron2/tests/data/test_coco_evaluation.py @@ -0,0 +1,138 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import contextlib +import copy +import io +import json +import numpy as np +import os +import tempfile +import unittest +import torch +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + +from detectron2.data import DatasetCatalog +from detectron2.evaluation import COCOEvaluator +from detectron2.evaluation.fast_eval_api import COCOeval_opt +from detectron2.structures import Boxes, Instances + + +class TestCOCOeval(unittest.TestCase): + def test_fast_eval(self): + # A small set of images/categories from COCO val + # fmt: off + detections = [{"image_id": 139, "category_id": 1, "bbox": [417.3332824707031, 159.27003479003906, 47.66064453125, 143.00193786621094], "score": 0.9949821829795837, "segmentation": {"size": [426, 640], "counts": "Tc`52W=3N0N4aNN^E7]:4XE1g:8kDMT;U100000001O1gE[Nk8h1dFiNY9Z1aFkN]9g2J3NdN`FlN`9S1cFRN07]9g1bFoM6;X9c1cFoM=8R9g1bFQN>3U9Y30O01OO1O001N2O1N1O4L4L5UNoE3V:CVF6Q:@YF9l9@ZF 0 else 0.0 + msg = "%s: comparing COCO APIs, %s differs by %f" % (name, k, abs_diff) + self.assertTrue(abs_diff < 1e-4, msg=msg) + + def test_unknown_category(self): + dataset = "coco_2017_val_100" + evaluator = COCOEvaluator(dataset) + evaluator.reset() + inputs = DatasetCatalog.get(dataset)[:2] + pred = Instances((100, 100)) + pred.pred_boxes = Boxes(torch.rand(2, 4)) + pred.scores = torch.rand(2) + pred.pred_classes = torch.tensor([10, 80]) + output = {"instances": pred} + evaluator.process(inputs, [output, output]) + with self.assertRaises(AssertionError): + evaluator.evaluate() diff --git a/data_processing/detectron2/tests/data/test_dataset.py b/data_processing/detectron2/tests/data/test_dataset.py new file mode 100644 index 0000000..7bdcda0 --- /dev/null +++ b/data_processing/detectron2/tests/data/test_dataset.py @@ -0,0 +1,185 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import os +import pickle +import sys +import unittest +from functools import partial +import torch +from iopath.common.file_io import LazyPath + +from detectron2 import model_zoo +from detectron2.config import get_cfg, instantiate +from detectron2.data import ( + DatasetCatalog, + DatasetFromList, + MapDataset, + ToIterableDataset, + build_batch_data_loader, + build_detection_test_loader, + build_detection_train_loader, +) +from detectron2.data.common import ( + AspectRatioGroupedDataset, + set_default_dataset_from_list_serialize_method, +) +from detectron2.data.samplers import InferenceSampler, TrainingSampler + + +def _a_slow_func(x): + return "path/{}".format(x) + + +class TestDatasetFromList(unittest.TestCase): + # Failing for py3.6, likely due to pickle + @unittest.skipIf(sys.version_info.minor <= 6, "Not supported in Python 3.6") + def test_using_lazy_path(self): + dataset = [] + for i in range(10): + dataset.append({"file_name": LazyPath(partial(_a_slow_func, i))}) + + dataset = DatasetFromList(dataset) + for i in range(10): + path = dataset[i]["file_name"] + self.assertTrue(isinstance(path, LazyPath)) + self.assertEqual(os.fspath(path), _a_slow_func(i)) + + def test_alternative_serialize_method(self): + dataset = [1, 2, 3] + dataset = DatasetFromList(dataset, serialize=torch.tensor) + self.assertEqual(dataset[2], torch.tensor(3)) + + def test_change_default_serialize_method(self): + dataset = [1, 2, 3] + with set_default_dataset_from_list_serialize_method(torch.tensor): + dataset_1 = DatasetFromList(dataset, serialize=True) + self.assertEqual(dataset_1[2], torch.tensor(3)) + dataset_2 = DatasetFromList(dataset, serialize=True) + self.assertEqual(dataset_2[2], 3) + + +class TestMapDataset(unittest.TestCase): + @staticmethod + def map_func(x): + if x == 2: + return None + return x * 2 + + def test_map_style(self): + ds = DatasetFromList([1, 2, 3]) + ds = MapDataset(ds, TestMapDataset.map_func) + self.assertEqual(ds[0], 2) + self.assertEqual(ds[2], 6) + self.assertIn(ds[1], [2, 6]) + + def test_iter_style(self): + class DS(torch.utils.data.IterableDataset): + def __iter__(self): + yield from [1, 2, 3] + + ds = DS() + ds = MapDataset(ds, TestMapDataset.map_func) + self.assertIsInstance(ds, torch.utils.data.IterableDataset) + + data = list(iter(ds)) + self.assertEqual(data, [2, 6]) + + def test_pickleability(self): + ds = DatasetFromList([1, 2, 3]) + ds = MapDataset(ds, lambda x: x * 2) + ds = pickle.loads(pickle.dumps(ds)) + self.assertEqual(ds[0], 2) + + +class TestAspectRatioGrouping(unittest.TestCase): + def test_reiter_leak(self): + data = [(1, 0), (0, 1), (1, 0), (0, 1)] + data = [{"width": a, "height": b} for (a, b) in data] + batchsize = 2 + dataset = AspectRatioGroupedDataset(data, batchsize) + + for _ in range(5): + for idx, __ in enumerate(dataset): + if idx == 1: + # manually break, so the iterator does not stop by itself + break + # check that bucket sizes are valid + for bucket in dataset._buckets: + self.assertLess(len(bucket), batchsize) + + +class _MyData(torch.utils.data.IterableDataset): + def __iter__(self): + while True: + yield 1 + + +class TestDataLoader(unittest.TestCase): + def _get_kwargs(self): + # get kwargs of build_detection_train_loader + cfg = model_zoo.get_config("common/data/coco.py").dataloader.train + cfg.dataset.names = "coco_2017_val_100" + cfg.pop("_target_") + kwargs = {k: instantiate(v) for k, v in cfg.items()} + return kwargs + + def test_build_dataloader_train(self): + kwargs = self._get_kwargs() + dl = build_detection_train_loader(**kwargs) + next(iter(dl)) + + def test_build_iterable_dataloader_train(self): + kwargs = self._get_kwargs() + ds = DatasetFromList(kwargs.pop("dataset")) + ds = ToIterableDataset(ds, TrainingSampler(len(ds))) + dl = build_detection_train_loader(dataset=ds, **kwargs) + next(iter(dl)) + + def test_build_iterable_dataloader_from_cfg(self): + cfg = get_cfg() + cfg.DATASETS.TRAIN = ["iter_data"] + DatasetCatalog.register("iter_data", lambda: _MyData()) + dl = build_detection_train_loader(cfg, mapper=lambda x: x, aspect_ratio_grouping=False) + next(iter(dl)) + + dl = build_detection_test_loader(cfg, "iter_data", mapper=lambda x: x) + next(iter(dl)) + + def _check_is_range(self, data_loader, N): + # check that data_loader produces range(N) + data = list(iter(data_loader)) + data = [x for batch in data for x in batch] # flatten the batches + self.assertEqual(len(data), N) + self.assertEqual(set(data), set(range(N))) + + def test_build_batch_dataloader_inference(self): + # Test that build_batch_data_loader can be used for inference + N = 96 + ds = DatasetFromList(list(range(N))) + sampler = InferenceSampler(len(ds)) + dl = build_batch_data_loader(ds, sampler, 8, num_workers=3) + self._check_is_range(dl, N) + + def test_build_dataloader_inference(self): + N = 50 + ds = DatasetFromList(list(range(N))) + sampler = InferenceSampler(len(ds)) + # test that parallel loader works correctly + dl = build_detection_test_loader( + dataset=ds, sampler=sampler, mapper=lambda x: x, num_workers=3 + ) + self._check_is_range(dl, N) + + # test that batch_size works correctly + dl = build_detection_test_loader( + dataset=ds, sampler=sampler, mapper=lambda x: x, batch_size=4, num_workers=0 + ) + self._check_is_range(dl, N) + + def test_build_iterable_dataloader_inference(self): + # Test that build_detection_test_loader supports iterable dataset + N = 50 + ds = DatasetFromList(list(range(N))) + ds = ToIterableDataset(ds, InferenceSampler(len(ds))) + dl = build_detection_test_loader(dataset=ds, mapper=lambda x: x, num_workers=3) + self._check_is_range(dl, N) diff --git a/data_processing/detectron2/tests/data/test_detection_utils.py b/data_processing/detectron2/tests/data/test_detection_utils.py new file mode 100644 index 0000000..aac56c0 --- /dev/null +++ b/data_processing/detectron2/tests/data/test_detection_utils.py @@ -0,0 +1,176 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import copy +import numpy as np +import os +import unittest +import pycocotools.mask as mask_util + +from detectron2.data import MetadataCatalog, detection_utils +from detectron2.data import transforms as T +from detectron2.structures import BitMasks, BoxMode +from detectron2.utils.file_io import PathManager + + +class TestTransformAnnotations(unittest.TestCase): + def test_transform_simple_annotation(self): + transforms = T.TransformList([T.HFlipTransform(400)]) + anno = { + "bbox": np.asarray([10, 10, 200, 300]), + "bbox_mode": BoxMode.XYXY_ABS, + "category_id": 3, + "segmentation": [[10, 10, 100, 100, 100, 10], [150, 150, 200, 150, 200, 200]], + } + + output = detection_utils.transform_instance_annotations(anno, transforms, (400, 400)) + self.assertTrue(np.allclose(output["bbox"], [200, 10, 390, 300])) + self.assertEqual(len(output["segmentation"]), len(anno["segmentation"])) + self.assertTrue(np.allclose(output["segmentation"][0], [390, 10, 300, 100, 300, 10])) + + detection_utils.annotations_to_instances([output, output], (400, 400)) + + def test_transform_empty_annotation(self): + detection_utils.annotations_to_instances([], (400, 400)) + + def test_flip_keypoints(self): + transforms = T.TransformList([T.HFlipTransform(400)]) + anno = { + "bbox": np.asarray([10, 10, 200, 300]), + "bbox_mode": BoxMode.XYXY_ABS, + "keypoints": np.random.rand(17, 3) * 50 + 15, + } + + output = detection_utils.transform_instance_annotations( + copy.deepcopy(anno), + transforms, + (400, 400), + keypoint_hflip_indices=detection_utils.create_keypoint_hflip_indices( + ["keypoints_coco_2017_train"] + ), + ) + # The first keypoint is nose + self.assertTrue(np.allclose(output["keypoints"][0, 0], 400 - anno["keypoints"][0, 0])) + # The last 16 keypoints are 8 left-right pairs + self.assertTrue( + np.allclose( + output["keypoints"][1:, 0].reshape(-1, 2)[:, ::-1], + 400 - anno["keypoints"][1:, 0].reshape(-1, 2), + ) + ) + self.assertTrue( + np.allclose( + output["keypoints"][1:, 1:].reshape(-1, 2, 2)[:, ::-1, :], + anno["keypoints"][1:, 1:].reshape(-1, 2, 2), + ) + ) + + def test_crop(self): + transforms = T.TransformList([T.CropTransform(300, 300, 10, 10)]) + keypoints = np.random.rand(17, 3) * 50 + 15 + keypoints[:, 2] = 2 + anno = { + "bbox": np.asarray([10, 10, 200, 400]), + "bbox_mode": BoxMode.XYXY_ABS, + "keypoints": keypoints, + } + + output = detection_utils.transform_instance_annotations( + copy.deepcopy(anno), transforms, (10, 10) + ) + # box is shifted and cropped + self.assertTrue((output["bbox"] == np.asarray([0, 0, 0, 10])).all()) + # keypoints are no longer visible + self.assertTrue((output["keypoints"][:, 2] == 0).all()) + + def test_transform_RLE(self): + transforms = T.TransformList([T.HFlipTransform(400)]) + mask = np.zeros((300, 400), order="F").astype("uint8") + mask[:, :200] = 1 + + anno = { + "bbox": np.asarray([10, 10, 200, 300]), + "bbox_mode": BoxMode.XYXY_ABS, + "segmentation": mask_util.encode(mask[:, :, None])[0], + "category_id": 3, + } + output = detection_utils.transform_instance_annotations( + copy.deepcopy(anno), transforms, (300, 400) + ) + mask = output["segmentation"] + self.assertTrue((mask[:, 200:] == 1).all()) + self.assertTrue((mask[:, :200] == 0).all()) + + inst = detection_utils.annotations_to_instances( + [output, output], (400, 400), mask_format="bitmask" + ) + self.assertTrue(isinstance(inst.gt_masks, BitMasks)) + + def test_transform_RLE_resize(self): + transforms = T.TransformList( + [T.HFlipTransform(400), T.ScaleTransform(300, 400, 400, 400, "bilinear")] + ) + mask = np.zeros((300, 400), order="F").astype("uint8") + mask[:, :200] = 1 + + anno = { + "bbox": np.asarray([10, 10, 200, 300]), + "bbox_mode": BoxMode.XYXY_ABS, + "segmentation": mask_util.encode(mask[:, :, None])[0], + "category_id": 3, + } + output = detection_utils.transform_instance_annotations( + copy.deepcopy(anno), transforms, (400, 400) + ) + + inst = detection_utils.annotations_to_instances( + [output, output], (400, 400), mask_format="bitmask" + ) + self.assertTrue(isinstance(inst.gt_masks, BitMasks)) + + def test_gen_crop(self): + instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS} + t = detection_utils.gen_crop_transform_with_instance((10, 10), (150, 150), instance) + # the box center must fall into the cropped region + self.assertTrue(t.x0 <= 55 <= t.x0 + t.w) + + def test_gen_crop_outside_boxes(self): + instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS} + with self.assertRaises(AssertionError): + detection_utils.gen_crop_transform_with_instance((10, 10), (15, 15), instance) + + def test_read_sem_seg(self): + cityscapes_dir = MetadataCatalog.get("cityscapes_fine_sem_seg_val").gt_dir + sem_seg_gt_path = os.path.join( + cityscapes_dir, "frankfurt", "frankfurt_000001_083852_gtFine_labelIds.png" + ) + if not PathManager.exists(sem_seg_gt_path): + raise unittest.SkipTest( + "Semantic segmentation ground truth {} not found.".format(sem_seg_gt_path) + ) + sem_seg = detection_utils.read_image(sem_seg_gt_path, "L") + self.assertEqual(sem_seg.ndim, 3) + self.assertEqual(sem_seg.shape[2], 1) + self.assertEqual(sem_seg.dtype, np.uint8) + self.assertEqual(sem_seg.max(), 32) + self.assertEqual(sem_seg.min(), 1) + + def test_read_exif_orientation(self): + # https://github.com/recurser/exif-orientation-examples/raw/master/Landscape_5.jpg + URL = "detectron2://assets/Landscape_5.jpg" + img = detection_utils.read_image(URL, "RGB") + self.assertEqual(img.ndim, 3) + self.assertEqual(img.dtype, np.uint8) + self.assertEqual(img.shape, (1200, 1800, 3)) # check that shape is not transposed + + def test_opencv_exif_orientation(self): + import cv2 + + URL = "detectron2://assets/Landscape_5.jpg" + with PathManager.open(URL, "rb") as f: + img = cv2.imdecode(np.frombuffer(f.read(), dtype="uint8"), cv2.IMREAD_COLOR) + self.assertEqual(img.dtype, np.uint8) + self.assertEqual(img.shape, (1200, 1800, 3)) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/data/test_rotation_transform.py b/data_processing/detectron2/tests/data/test_rotation_transform.py new file mode 100644 index 0000000..0e8299e --- /dev/null +++ b/data_processing/detectron2/tests/data/test_rotation_transform.py @@ -0,0 +1,71 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import unittest + +from detectron2.data.transforms.transform import RotationTransform + + +class TestRotationTransform(unittest.TestCase): + def assertEqualsArrays(self, a1, a2): + self.assertTrue(np.allclose(a1, a2)) + + def randomData(self, h=5, w=5): + image = np.random.rand(h, w) + coords = np.array([[i, j] for j in range(h + 1) for i in range(w + 1)], dtype=float) + return image, coords, h, w + + def test180(self): + image, coords, h, w = self.randomData(6, 6) + rot = RotationTransform(h, w, 180, expand=False, center=None) + self.assertEqualsArrays(rot.apply_image(image), image[::-1, ::-1]) + rotated_coords = [[w - c[0], h - c[1]] for c in coords] + self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords) + + def test45_coords(self): + _, coords, h, w = self.randomData(4, 6) + rot = RotationTransform(h, w, 45, expand=False, center=None) + rotated_coords = [ + [(x + y - (h + w) / 2) / np.sqrt(2) + w / 2, h / 2 + (y + (w - h) / 2 - x) / np.sqrt(2)] + for (x, y) in coords + ] + self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords) + + def test90(self): + image, coords, h, w = self.randomData() + rot = RotationTransform(h, w, 90, expand=False, center=None) + self.assertEqualsArrays(rot.apply_image(image), image.T[::-1]) + rotated_coords = [[c[1], w - c[0]] for c in coords] + self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords) + + def test90_expand(self): # non-square image + image, coords, h, w = self.randomData(h=5, w=8) + rot = RotationTransform(h, w, 90, expand=True, center=None) + self.assertEqualsArrays(rot.apply_image(image), image.T[::-1]) + rotated_coords = [[c[1], w - c[0]] for c in coords] + self.assertEqualsArrays(rot.apply_coords(coords), rotated_coords) + + def test_center_expand(self): + # center has no effect if expand=True because it only affects shifting + image, coords, h, w = self.randomData(h=5, w=8) + angle = np.random.randint(360) + rot1 = RotationTransform(h, w, angle, expand=True, center=None) + rot2 = RotationTransform(h, w, angle, expand=True, center=(0, 0)) + rot3 = RotationTransform(h, w, angle, expand=True, center=(h, w)) + rot4 = RotationTransform(h, w, angle, expand=True, center=(2, 5)) + for r1 in [rot1, rot2, rot3, rot4]: + for r2 in [rot1, rot2, rot3, rot4]: + self.assertEqualsArrays(r1.apply_image(image), r2.apply_image(image)) + self.assertEqualsArrays(r1.apply_coords(coords), r2.apply_coords(coords)) + + def test_inverse_transform(self): + image, coords, h, w = self.randomData(h=5, w=8) + rot = RotationTransform(h, w, 90, expand=True, center=None) + rot_image = rot.apply_image(image) + self.assertEqualsArrays(rot.inverse().apply_image(rot_image), image) + rot = RotationTransform(h, w, 65, expand=True, center=None) + rotated_coords = rot.apply_coords(coords) + self.assertEqualsArrays(rot.inverse().apply_coords(rotated_coords), coords) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/data/test_sampler.py b/data_processing/detectron2/tests/data/test_sampler.py new file mode 100644 index 0000000..0d27843 --- /dev/null +++ b/data_processing/detectron2/tests/data/test_sampler.py @@ -0,0 +1,111 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import math +import operator +import unittest +import torch +from torch.utils import data +from torch.utils.data.sampler import SequentialSampler + +from detectron2.data.build import worker_init_reset_seed +from detectron2.data.common import DatasetFromList, ToIterableDataset +from detectron2.data.samplers import ( + GroupedBatchSampler, + InferenceSampler, + RepeatFactorTrainingSampler, + TrainingSampler, +) +from detectron2.utils.env import seed_all_rng + + +class TestGroupedBatchSampler(unittest.TestCase): + def test_missing_group_id(self): + sampler = SequentialSampler(list(range(100))) + group_ids = [1] * 100 + samples = GroupedBatchSampler(sampler, group_ids, 2) + + for mini_batch in samples: + self.assertEqual(len(mini_batch), 2) + + def test_groups(self): + sampler = SequentialSampler(list(range(100))) + group_ids = [1, 0] * 50 + samples = GroupedBatchSampler(sampler, group_ids, 2) + + for mini_batch in samples: + self.assertEqual((mini_batch[0] + mini_batch[1]) % 2, 0) + + +class TestSamplerDeterministic(unittest.TestCase): + def test_to_iterable(self): + sampler = TrainingSampler(100, seed=10) + gt_output = list(itertools.islice(sampler, 100)) + self.assertEqual(set(gt_output), set(range(100))) + + dataset = DatasetFromList(list(range(100))) + dataset = ToIterableDataset(dataset, sampler) + data_loader = data.DataLoader(dataset, num_workers=0, collate_fn=operator.itemgetter(0)) + + output = list(itertools.islice(data_loader, 100)) + self.assertEqual(output, gt_output) + + data_loader = data.DataLoader( + dataset, + num_workers=2, + collate_fn=operator.itemgetter(0), + worker_init_fn=worker_init_reset_seed, + # reset seed should not affect behavior of TrainingSampler + ) + output = list(itertools.islice(data_loader, 100)) + # multiple workers should not lead to duplicate or different data + self.assertEqual(output, gt_output) + + def test_training_sampler_seed(self): + seed_all_rng(42) + sampler = TrainingSampler(30) + data = list(itertools.islice(sampler, 65)) + + seed_all_rng(42) + sampler = TrainingSampler(30) + seed_all_rng(999) # should be ineffective + data2 = list(itertools.islice(sampler, 65)) + self.assertEqual(data, data2) + + +class TestRepeatFactorTrainingSampler(unittest.TestCase): + def test_repeat_factors_from_category_frequency(self): + repeat_thresh = 0.5 + + dataset_dicts = [ + {"annotations": [{"category_id": 0}, {"category_id": 1}]}, + {"annotations": [{"category_id": 0}]}, + {"annotations": []}, + ] + + rep_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( + dataset_dicts, repeat_thresh + ) + + expected_rep_factors = torch.tensor([math.sqrt(3 / 2), 1.0, 1.0]) + self.assertTrue(torch.allclose(rep_factors, expected_rep_factors)) + + +class TestInferenceSampler(unittest.TestCase): + def test_local_indices(self): + sizes = [0, 16, 2, 42] + world_sizes = [5, 2, 3, 4] + + expected_results = [ + [range(0) for _ in range(5)], + [range(8), range(8, 16)], + [range(1), range(1, 2), range(0)], + [range(11), range(11, 22), range(22, 32), range(32, 42)], + ] + + for size, world_size, expected_result in zip(sizes, world_sizes, expected_results): + with self.subTest(f"size={size}, world_size={world_size}"): + local_indices = [ + InferenceSampler._get_local_indices(size, world_size, r) + for r in range(world_size) + ] + self.assertEqual(local_indices, expected_result) diff --git a/data_processing/detectron2/tests/data/test_transforms.py b/data_processing/detectron2/tests/data/test_transforms.py new file mode 100644 index 0000000..382048e --- /dev/null +++ b/data_processing/detectron2/tests/data/test_transforms.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +import numpy as np +import unittest +from unittest import mock +import torch +from PIL import Image, ImageOps +from torch.nn import functional as F + +from detectron2.config import get_cfg +from detectron2.data import detection_utils +from detectron2.data import transforms as T +from detectron2.utils.logger import setup_logger + +logger = logging.getLogger(__name__) + + +def polygon_allclose(poly1, poly2): + """ + Test whether two polygons are the same. + Both arguments are nx2 numpy arrays. + """ + # ABCD and CDAB are the same polygon. So it's important to check after rolling + for k in range(len(poly1)): + rolled_poly1 = np.roll(poly1, k, axis=0) + if np.allclose(rolled_poly1, poly2): + return True + return False + + +class TestTransforms(unittest.TestCase): + def setUp(self): + setup_logger() + + def test_apply_rotated_boxes(self): + np.random.seed(125) + cfg = get_cfg() + is_train = True + augs = detection_utils.build_augmentation(cfg, is_train) + image = np.random.rand(200, 300) + image, transforms = T.apply_augmentations(augs, image) + image_shape = image.shape[:2] # h, w + assert image_shape == (800, 1200) + annotation = {"bbox": [179, 97, 62, 40, -56]} + + boxes = np.array([annotation["bbox"]], dtype=np.float64) # boxes.shape = (1, 5) + transformed_bbox = transforms.apply_rotated_box(boxes)[0] + + expected_bbox = np.array([484, 388, 248, 160, 56], dtype=np.float64) + err_msg = "transformed_bbox = {}, expected {}".format(transformed_bbox, expected_bbox) + assert np.allclose(transformed_bbox, expected_bbox), err_msg + + def test_resize_and_crop(self): + np.random.seed(125) + min_scale = 0.2 + max_scale = 2.0 + target_height = 1100 + target_width = 1000 + resize_aug = T.ResizeScale(min_scale, max_scale, target_height, target_width) + fixed_size_crop_aug = T.FixedSizeCrop((target_height, target_width)) + hflip_aug = T.RandomFlip() + augs = [resize_aug, fixed_size_crop_aug, hflip_aug] + original_image = np.random.rand(900, 800) + image, transforms = T.apply_augmentations(augs, original_image) + image_shape = image.shape[:2] # h, w + self.assertEqual((1100, 1000), image_shape) + + boxes = np.array( + [[91, 46, 144, 111], [523, 251, 614, 295]], + dtype=np.float64, + ) + transformed_bboxs = transforms.apply_box(boxes) + expected_bboxs = np.array( + [ + [895.42, 33.42666667, 933.91125, 80.66], + [554.0825, 182.39333333, 620.17125, 214.36666667], + ], + dtype=np.float64, + ) + err_msg = "transformed_bbox = {}, expected {}".format(transformed_bboxs, expected_bboxs) + self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg) + + polygon = np.array([[91, 46], [144, 46], [144, 111], [91, 111]]) + transformed_polygons = transforms.apply_polygons([polygon]) + expected_polygon = np.array([[934.0, 33.0], [934.0, 80.0], [896.0, 80.0], [896.0, 33.0]]) + self.assertEqual(1, len(transformed_polygons)) + err_msg = "transformed_polygon = {}, expected {}".format( + transformed_polygons[0], expected_polygon + ) + self.assertTrue(polygon_allclose(transformed_polygons[0], expected_polygon), err_msg) + + def test_apply_rotated_boxes_unequal_scaling_factor(self): + np.random.seed(125) + h, w = 400, 200 + newh, neww = 800, 800 + image = np.random.rand(h, w) + augs = [] + augs.append(T.Resize(shape=(newh, neww))) + image, transforms = T.apply_augmentations(augs, image) + image_shape = image.shape[:2] # h, w + assert image_shape == (newh, neww) + + boxes = np.array( + [ + [150, 100, 40, 20, 0], + [150, 100, 40, 20, 30], + [150, 100, 40, 20, 90], + [150, 100, 40, 20, -90], + ], + dtype=np.float64, + ) + transformed_boxes = transforms.apply_rotated_box(boxes) + + expected_bboxes = np.array( + [ + [600, 200, 160, 40, 0], + [600, 200, 144.22205102, 52.91502622, 49.10660535], + [600, 200, 80, 80, 90], + [600, 200, 80, 80, -90], + ], + dtype=np.float64, + ) + err_msg = "transformed_boxes = {}, expected {}".format(transformed_boxes, expected_bboxes) + assert np.allclose(transformed_boxes, expected_bboxes), err_msg + + def test_print_augmentation(self): + t = T.RandomCrop("relative", (100, 100)) + self.assertEqual(str(t), "RandomCrop(crop_type='relative', crop_size=(100, 100))") + + t0 = T.RandomFlip(prob=0.5) + self.assertEqual(str(t0), "RandomFlip(prob=0.5)") + + t1 = T.RandomFlip() + self.assertEqual(str(t1), "RandomFlip()") + + t = T.AugmentationList([t0, t1]) + self.assertEqual(str(t), f"AugmentationList[{t0}, {t1}]") + + def test_random_apply_prob_out_of_range_check(self): + test_probabilities = {0.0: True, 0.5: True, 1.0: True, -0.01: False, 1.01: False} + + for given_probability, is_valid in test_probabilities.items(): + if not is_valid: + self.assertRaises(AssertionError, T.RandomApply, None, prob=given_probability) + else: + T.RandomApply(T.NoOpTransform(), prob=given_probability) + + def test_random_apply_wrapping_aug_probability_occured_evaluation(self): + transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation) + image_mock = mock.MagicMock(name="MockImage") + random_apply = T.RandomApply(transform_mock, prob=0.001) + + with mock.patch.object(random_apply, "_rand_range", return_value=0.0001): + transform = random_apply.get_transform(image_mock) + transform_mock.get_transform.assert_called_once_with(image_mock) + self.assertIsNot(transform, transform_mock) + + def test_random_apply_wrapping_std_transform_probability_occured_evaluation(self): + transform_mock = mock.MagicMock(name="MockTransform", spec=T.Transform) + image_mock = mock.MagicMock(name="MockImage") + random_apply = T.RandomApply(transform_mock, prob=0.001) + + with mock.patch.object(random_apply, "_rand_range", return_value=0.0001): + transform = random_apply.get_transform(image_mock) + self.assertIs(transform, transform_mock) + + def test_random_apply_probability_not_occured_evaluation(self): + transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation) + image_mock = mock.MagicMock(name="MockImage") + random_apply = T.RandomApply(transform_mock, prob=0.001) + + with mock.patch.object(random_apply, "_rand_range", return_value=0.9): + transform = random_apply.get_transform(image_mock) + transform_mock.get_transform.assert_not_called() + self.assertIsInstance(transform, T.NoOpTransform) + + def test_augmentation_input_args(self): + input_shape = (100, 100) + output_shape = (50, 50) + + # define two augmentations with different args + class TG1(T.Augmentation): + def get_transform(self, image, sem_seg): + return T.ResizeTransform( + input_shape[0], input_shape[1], output_shape[0], output_shape[1] + ) + + class TG2(T.Augmentation): + def get_transform(self, image): + assert image.shape[:2] == output_shape # check that TG1 is applied + return T.HFlipTransform(output_shape[1]) + + image = np.random.rand(*input_shape).astype("float32") + sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8") + inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args + tfms = inputs.apply_augmentations([TG1(), TG2()]) + self.assertIsInstance(tfms[0], T.ResizeTransform) + self.assertIsInstance(tfms[1], T.HFlipTransform) + self.assertTrue(inputs.image.shape[:2] == output_shape) + self.assertTrue(inputs.sem_seg.shape[:2] == output_shape) + + class TG3(T.Augmentation): + def get_transform(self, image, nonexist): + pass + + with self.assertRaises(AttributeError): + inputs.apply_augmentations([TG3()]) + + def test_augmentation_list(self): + input_shape = (100, 100) + image = np.random.rand(*input_shape).astype("float32") + sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8") + inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args + + augs = T.AugmentationList([T.RandomFlip(), T.Resize(20)]) + _ = T.AugmentationList([augs, T.Resize(30)])(inputs) + # 3 in latest fvcore (flattened transformlist), 2 in older + # self.assertEqual(len(tfms), 3) + + def test_color_transforms(self): + rand_img = np.random.random((100, 100, 3)) * 255 + rand_img = rand_img.astype("uint8") + + # Test no-op + noop_transform = T.ColorTransform(lambda img: img) + self.assertTrue(np.array_equal(rand_img, noop_transform.apply_image(rand_img))) + + # Test a ImageOps operation + magnitude = np.random.randint(0, 256) + solarize_transform = T.PILColorTransform(lambda img: ImageOps.solarize(img, magnitude)) + expected_img = ImageOps.solarize(Image.fromarray(rand_img), magnitude) + self.assertTrue(np.array_equal(expected_img, solarize_transform.apply_image(rand_img))) + + def test_resize_transform(self): + input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)] + output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)] + for in_shape, out_shape in zip(input_shapes, output_shapes): + in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8) + tfm = T.ResizeTransform(in_shape[0], in_shape[1], out_shape[0], out_shape[1]) + out_img = tfm.apply_image(in_img) + self.assertEqual(out_img.shape, out_shape) + + def test_resize_shorted_edge_scriptable(self): + def f(image): + newh, neww = T.ResizeShortestEdge.get_output_shape( + image.shape[-2], image.shape[-1], 80, 133 + ) + return F.interpolate(image.unsqueeze(0), size=(newh, neww)) + + input = torch.randn(3, 10, 10) + script_f = torch.jit.script(f) + self.assertTrue(torch.allclose(f(input), script_f(input))) + + # generalize to new shapes + input = torch.randn(3, 8, 100) + self.assertTrue(torch.allclose(f(input), script_f(input))) + + def test_extent_transform(self): + input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)] + src_rect = (20, 20, 80, 80) + output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)] + for in_shape, out_shape in zip(input_shapes, output_shapes): + in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8) + tfm = T.ExtentTransform(src_rect, out_shape[:2]) + out_img = tfm.apply_image(in_img) + self.assertTrue(out_img.shape == out_shape) diff --git a/data_processing/detectron2/tests/export/test_c10.py b/data_processing/detectron2/tests/export/test_c10.py new file mode 100644 index 0000000..55076ab --- /dev/null +++ b/data_processing/detectron2/tests/export/test_c10.py @@ -0,0 +1,25 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import unittest + +try: + # Caffe2 used to be included in PyTorch, but since PyTorch 1.10+, + # it is not included in pre-built packages. This is a safety BC check + from detectron2.config import get_cfg + from detectron2.export.c10 import Caffe2RPN + from detectron2.layers import ShapeSpec +except ImportError: + raise unittest.SkipTest( + f"PyTorch does not have Caffe2 support. Skipping all tests in {__name__}" + ) from None + + +class TestCaffe2RPN(unittest.TestCase): + def test_instantiation(self): + cfg = get_cfg() + cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1) + input_shapes = {"res4": ShapeSpec(channels=256, stride=4)} + rpn = Caffe2RPN(cfg, input_shapes) + assert rpn is not None + cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (10, 10, 5, 5, 1) + with self.assertRaises(AssertionError): + rpn = Caffe2RPN(cfg, input_shapes) diff --git a/data_processing/detectron2/tests/layers/__init__.py b/data_processing/detectron2/tests/layers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/detectron2/tests/layers/test_blocks.py b/data_processing/detectron2/tests/layers/test_blocks.py new file mode 100644 index 0000000..5a0488a --- /dev/null +++ b/data_processing/detectron2/tests/layers/test_blocks.py @@ -0,0 +1,51 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import unittest +import torch +from torch import nn + +from detectron2.layers import ASPP, DepthwiseSeparableConv2d, FrozenBatchNorm2d +from detectron2.modeling.backbone.resnet import BasicStem, ResNet + + +""" +Test for misc layers. +""" + + +class TestBlocks(unittest.TestCase): + def test_separable_conv(self): + DepthwiseSeparableConv2d(3, 10, norm1="BN", activation1=nn.PReLU()) + + def test_aspp(self): + m = ASPP(3, 10, [2, 3, 4], norm="", activation=nn.PReLU()) + self.assertIsNot(m.convs[0].activation.weight, m.convs[1].activation.weight) + self.assertIsNot(m.convs[0].activation.weight, m.project.activation.weight) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_frozen_batchnorm_fp16(self): + from torch.cuda.amp import autocast + + C = 10 + input = torch.rand(1, C, 10, 10).cuda() + m = FrozenBatchNorm2d(C).cuda() + with autocast(): + output = m(input.half()) + self.assertEqual(output.dtype, torch.float16) + + # requires_grad triggers a different codepath + input.requires_grad_() + with autocast(): + output = m(input.half()) + self.assertEqual(output.dtype, torch.float16) + + def test_resnet_unused_stages(self): + resnet = ResNet(BasicStem(), ResNet.make_default_stages(18), out_features=["res2"]) + self.assertTrue(hasattr(resnet, "res2")) + self.assertFalse(hasattr(resnet, "res3")) + self.assertFalse(hasattr(resnet, "res5")) + + resnet = ResNet(BasicStem(), ResNet.make_default_stages(18), out_features=["res2", "res5"]) + self.assertTrue(hasattr(resnet, "res2")) + self.assertTrue(hasattr(resnet, "res4")) + self.assertTrue(hasattr(resnet, "res5")) diff --git a/data_processing/detectron2/tests/layers/test_deformable.py b/data_processing/detectron2/tests/layers/test_deformable.py new file mode 100644 index 0000000..4aa319f --- /dev/null +++ b/data_processing/detectron2/tests/layers/test_deformable.py @@ -0,0 +1,175 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import unittest +import torch + +from detectron2.layers import DeformConv, ModulatedDeformConv +from detectron2.utils.env import TORCH_VERSION + + +@unittest.skipIf( + TORCH_VERSION == (1, 8) and torch.cuda.is_available(), + "This test fails under cuda11 + torch1.8.", +) +class DeformableTest(unittest.TestCase): + @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") + def test_forward_output(self): + device = torch.device("cuda") + N, C, H, W = shape = 1, 1, 5, 5 + kernel_size = 3 + padding = 1 + + inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device) + """ + 0 1 2 3 4 + 5 6 7 8 9 + 10 11 12 13 14 + 15 16 17 18 19 + 20 21 22 23 24 + """ + offset_channels = kernel_size * kernel_size * 2 + offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device) + + # Test DCN v1 + deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) + deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight)) + output = deform(inputs, offset) + output = output.detach().cpu().numpy() + deform_results = np.array( + [ + [30, 41.25, 48.75, 45, 28.75], + [62.25, 81, 90, 80.25, 50.25], + [99.75, 126, 135, 117.75, 72.75], + [105, 131.25, 138.75, 120, 73.75], + [71.75, 89.25, 93.75, 80.75, 49.5], + ] + ) + self.assertTrue(np.allclose(output.flatten(), deform_results.flatten())) + + # Test DCN v2 + mask_channels = kernel_size * kernel_size + mask = torch.full((N, mask_channels, H, W), 0.5, dtype=torch.float32).to(device) + modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to( + device + ) + modulate_deform.weight = deform.weight + output = modulate_deform(inputs, offset, mask) + output = output.detach().cpu().numpy() + self.assertTrue(np.allclose(output.flatten(), deform_results.flatten() * 0.5)) + + def test_forward_output_on_cpu(self): + device = torch.device("cpu") + N, C, H, W = shape = 1, 1, 5, 5 + kernel_size = 3 + padding = 1 + + inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device) + + offset_channels = kernel_size * kernel_size * 2 + offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device) + + # Test DCN v1 on cpu + deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) + deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight)) + output = deform(inputs, offset) + output = output.detach().cpu().numpy() + deform_results = np.array( + [ + [30, 41.25, 48.75, 45, 28.75], + [62.25, 81, 90, 80.25, 50.25], + [99.75, 126, 135, 117.75, 72.75], + [105, 131.25, 138.75, 120, 73.75], + [71.75, 89.25, 93.75, 80.75, 49.5], + ] + ) + self.assertTrue(np.allclose(output.flatten(), deform_results.flatten())) + + @unittest.skipIf(not torch.cuda.is_available(), "This test requires gpu access") + def test_forward_output_on_cpu_equals_output_on_gpu(self): + N, C, H, W = shape = 2, 4, 10, 10 + kernel_size = 3 + padding = 1 + + for groups in [1, 2]: + inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape) + offset_channels = kernel_size * kernel_size * 2 + offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32) + + deform_gpu = DeformConv( + C, C, kernel_size=kernel_size, padding=padding, groups=groups + ).to("cuda") + deform_gpu.weight = torch.nn.Parameter(torch.ones_like(deform_gpu.weight)) + output_gpu = deform_gpu(inputs.to("cuda"), offset.to("cuda")).detach().cpu().numpy() + + deform_cpu = DeformConv( + C, C, kernel_size=kernel_size, padding=padding, groups=groups + ).to("cpu") + deform_cpu.weight = torch.nn.Parameter(torch.ones_like(deform_cpu.weight)) + output_cpu = deform_cpu(inputs.to("cpu"), offset.to("cpu")).detach().numpy() + + self.assertTrue(np.allclose(output_gpu.flatten(), output_cpu.flatten())) + + @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") + def test_small_input(self): + device = torch.device("cuda") + for kernel_size in [3, 5]: + padding = kernel_size // 2 + N, C, H, W = shape = (1, 1, kernel_size - 1, kernel_size - 1) + + inputs = torch.rand(shape).to(device) # input size is smaller than kernel size + + offset_channels = kernel_size * kernel_size * 2 + offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) + deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) + output = deform(inputs, offset) + self.assertTrue(output.shape == inputs.shape) + + mask_channels = kernel_size * kernel_size + mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device) + modulate_deform = ModulatedDeformConv( + C, C, kernel_size, padding=padding, bias=False + ).to(device) + output = modulate_deform(inputs, offset, mask) + self.assertTrue(output.shape == inputs.shape) + + @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") + def test_raise_exception(self): + device = torch.device("cuda") + N, C, H, W = shape = 1, 1, 3, 3 + kernel_size = 3 + padding = 1 + + inputs = torch.rand(shape, dtype=torch.float32).to(device) + offset_channels = kernel_size * kernel_size # This is wrong channels for offset + offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) + deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) + self.assertRaises(RuntimeError, deform, inputs, offset) + + offset_channels = kernel_size * kernel_size * 2 + offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) + mask_channels = kernel_size * kernel_size * 2 # This is wrong channels for mask + mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device) + modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to( + device + ) + self.assertRaises(RuntimeError, modulate_deform, inputs, offset, mask) + + def test_repr(self): + module = DeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2) + correct_string = ( + "DeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), " + "stride=(1, 1), padding=(1, 1), dilation=(1, 1), " + "groups=1, deformable_groups=2, bias=False)" + ) + self.assertEqual(repr(module), correct_string) + + module = ModulatedDeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2) + correct_string = ( + "ModulatedDeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), " + "stride=1, padding=1, dilation=1, groups=1, deformable_groups=2, bias=True)" + ) + self.assertEqual(repr(module), correct_string) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/layers/test_losses.py b/data_processing/detectron2/tests/layers/test_losses.py new file mode 100644 index 0000000..d749202 --- /dev/null +++ b/data_processing/detectron2/tests/layers/test_losses.py @@ -0,0 +1,82 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import unittest +import torch + +from detectron2.layers import ciou_loss, diou_loss + + +class TestLosses(unittest.TestCase): + def test_diou_loss(self): + """ + loss = 1 - iou + d/c + where, + d = (distance between centers of the 2 boxes)^2 + c = (diagonal length of the smallest enclosing box covering the 2 boxes)^2 + """ + # Identical boxes should have loss of 0 + box = torch.tensor([-1, -1, 1, 1], dtype=torch.float32) + loss = diou_loss(box, box) + self.assertTrue(np.allclose(loss, [0.0])) + + # Half size box inside other box + # iou = 0.5, d = 0.25, c = 8 + box2 = torch.tensor([0, -1, 1, 1], dtype=torch.float32) + loss = diou_loss(box, box2) + self.assertTrue(np.allclose(loss, [0.53125])) + + # Two diagonally adjacent boxes + # iou = 0, d = 2, c = 8 + box3 = torch.tensor([0, 0, 1, 1], dtype=torch.float32) + box4 = torch.tensor([1, 1, 2, 2], dtype=torch.float32) + loss = diou_loss(box3, box4) + self.assertTrue(np.allclose(loss, [1.25])) + + # Test batched loss and reductions + box1s = torch.stack([box, box3], dim=0) + box2s = torch.stack([box2, box4], dim=0) + + loss = diou_loss(box1s, box2s, reduction="sum") + self.assertTrue(np.allclose(loss, [1.78125])) + + loss = diou_loss(box1s, box2s, reduction="mean") + self.assertTrue(np.allclose(loss, [0.890625])) + + def test_ciou_loss(self): + """ + loss = 1 - iou + d/c + alpha*v + where, + d = (distance between centers of the 2 boxes)^2 + c = (diagonal length of the smallest enclosing box covering the 2 boxes)^2 + v = (4/pi^2) * (arctan(box1_w/box1_h) - arctan(box2_w/box2_h))^2 + alpha = v/(1 - iou + v) + """ + # Identical boxes should have loss of 0 + box = torch.tensor([-1, -1, 1, 1], dtype=torch.float32) + loss = ciou_loss(box, box) + self.assertTrue(np.allclose(loss, [0.0])) + + # Half size box inside other box + # iou = 0.5, d = 0.25, c = 8 + # v = (4/pi^2) * (arctan(1) - arctan(0.5))^2 = 0.042 + # alpha = 0.0775 + box2 = torch.tensor([0, -1, 1, 1], dtype=torch.float32) + loss = ciou_loss(box, box2) + self.assertTrue(np.allclose(loss, [0.5345])) + + # Two diagonally adjacent boxes + # iou = 0, d = 2, c = 8, v = 0, alpha = 0 + box3 = torch.tensor([0, 0, 1, 1], dtype=torch.float32) + box4 = torch.tensor([1, 1, 2, 2], dtype=torch.float32) + loss = ciou_loss(box3, box4) + self.assertTrue(np.allclose(loss, [1.25])) + + # Test batched loss and reductions + box1s = torch.stack([box, box3], dim=0) + box2s = torch.stack([box2, box4], dim=0) + + loss = ciou_loss(box1s, box2s, reduction="sum") + self.assertTrue(np.allclose(loss, [1.7845])) + + loss = ciou_loss(box1s, box2s, reduction="mean") + self.assertTrue(np.allclose(loss, [0.89225])) diff --git a/data_processing/detectron2/tests/layers/test_mask_ops.py b/data_processing/detectron2/tests/layers/test_mask_ops.py new file mode 100644 index 0000000..dfbcaf5 --- /dev/null +++ b/data_processing/detectron2/tests/layers/test_mask_ops.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import contextlib +import io +import numpy as np +import unittest +from collections import defaultdict +import torch +import tqdm +from fvcore.common.benchmark import benchmark +from pycocotools.coco import COCO +from tabulate import tabulate +from torch.nn import functional as F + +from detectron2.data import MetadataCatalog +from detectron2.layers.mask_ops import ( + pad_masks, + paste_mask_in_image_old, + paste_masks_in_image, + scale_boxes, +) +from detectron2.structures import BitMasks, Boxes, BoxMode, PolygonMasks +from detectron2.structures.masks import polygons_to_bitmask +from detectron2.utils.file_io import PathManager +from detectron2.utils.testing import random_boxes + + +def iou_between_full_image_bit_masks(a, b): + intersect = (a & b).sum() + union = (a | b).sum() + return intersect / union + + +def rasterize_polygons_with_grid_sample(full_image_bit_mask, box, mask_size, threshold=0.5): + x0, y0, x1, y1 = box[0], box[1], box[2], box[3] + + img_h, img_w = full_image_bit_mask.shape + + mask_y = np.arange(0.0, mask_size) + 0.5 # mask y sample coords in [0.5, mask_size - 0.5] + mask_x = np.arange(0.0, mask_size) + 0.5 # mask x sample coords in [0.5, mask_size - 0.5] + mask_y = mask_y / mask_size * (y1 - y0) + y0 + mask_x = mask_x / mask_size * (x1 - x0) + x0 + + mask_x = (mask_x - 0.5) / (img_w - 1) * 2 + -1 + mask_y = (mask_y - 0.5) / (img_h - 1) * 2 + -1 + gy, gx = torch.meshgrid(torch.from_numpy(mask_y), torch.from_numpy(mask_x)) + ind = torch.stack([gx, gy], dim=-1).to(dtype=torch.float32) + + full_image_bit_mask = torch.from_numpy(full_image_bit_mask) + mask = F.grid_sample( + full_image_bit_mask[None, None, :, :].to(dtype=torch.float32), + ind[None, :, :, :], + align_corners=True, + ) + + return mask[0, 0] >= threshold + + +class TestMaskCropPaste(unittest.TestCase): + def setUp(self): + json_file = MetadataCatalog.get("coco_2017_val_100").json_file + if not PathManager.isfile(json_file): + raise unittest.SkipTest("{} not found".format(json_file)) + with contextlib.redirect_stdout(io.StringIO()): + json_file = PathManager.get_local_path(json_file) + self.coco = COCO(json_file) + + def test_crop_paste_consistency(self): + """ + rasterize_polygons_within_box (used in training) + and + paste_masks_in_image (used in inference) + should be inverse operations to each other. + + This function runs several implementation of the above two operations and prints + the reconstruction error. + """ + + anns = self.coco.loadAnns(self.coco.getAnnIds(iscrowd=False)) # avoid crowd annotations + + selected_anns = anns[:100] + + ious = [] + for ann in tqdm.tqdm(selected_anns): + results = self.process_annotation(ann) + ious.append([k[2] for k in results]) + + ious = np.array(ious) + mean_ious = ious.mean(axis=0) + table = [] + res_dic = defaultdict(dict) + for row, iou in zip(results, mean_ious): + table.append((row[0], row[1], iou)) + res_dic[row[0]][row[1]] = iou + print(tabulate(table, headers=["rasterize", "paste", "iou"], tablefmt="simple")) + # assert that the reconstruction is good: + self.assertTrue(res_dic["polygon"]["aligned"] > 0.94) + self.assertTrue(res_dic["roialign"]["aligned"] > 0.95) + + def process_annotation(self, ann, mask_side_len=28): + # Parse annotation data + img_info = self.coco.loadImgs(ids=[ann["image_id"]])[0] + height, width = img_info["height"], img_info["width"] + gt_polygons = [np.array(p, dtype=np.float64) for p in ann["segmentation"]] + gt_bbox = BoxMode.convert(ann["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) + gt_bit_mask = polygons_to_bitmask(gt_polygons, height, width) + + # Run rasterize .. + torch_gt_bbox = torch.tensor(gt_bbox).to(dtype=torch.float32).reshape(-1, 4) + box_bitmasks = { + "polygon": PolygonMasks([gt_polygons]).crop_and_resize(torch_gt_bbox, mask_side_len)[0], + "gridsample": rasterize_polygons_with_grid_sample(gt_bit_mask, gt_bbox, mask_side_len), + "roialign": BitMasks(torch.from_numpy(gt_bit_mask[None, :, :])).crop_and_resize( + torch_gt_bbox, mask_side_len + )[0], + } + + # Run paste .. + results = defaultdict(dict) + for k, box_bitmask in box_bitmasks.items(): + padded_bitmask, scale = pad_masks(box_bitmask[None, :, :], 1) + scaled_boxes = scale_boxes(torch_gt_bbox, scale) + + r = results[k] + r["old"] = paste_mask_in_image_old( + padded_bitmask[0], scaled_boxes[0], height, width, threshold=0.5 + ) + r["aligned"] = paste_masks_in_image( + box_bitmask[None, :, :], Boxes(torch_gt_bbox), (height, width) + )[0] + + table = [] + for rasterize_method, r in results.items(): + for paste_method, mask in r.items(): + mask = np.asarray(mask) + iou = iou_between_full_image_bit_masks(gt_bit_mask.astype("uint8"), mask) + table.append((rasterize_method, paste_method, iou)) + return table + + def test_polygon_area(self): + # Draw polygon boxes + for d in [5.0, 10.0, 1000.0]: + polygon = PolygonMasks([[[0, 0, 0, d, d, d, d, 0]]]) + area = polygon.area()[0] + target = d**2 + self.assertEqual(area, target) + + # Draw polygon triangles + for d in [5.0, 10.0, 1000.0]: + polygon = PolygonMasks([[[0, 0, 0, d, d, d]]]) + area = polygon.area()[0] + target = d**2 / 2 + self.assertEqual(area, target) + + def test_paste_mask_scriptable(self): + scripted_f = torch.jit.script(paste_masks_in_image) + N = 10 + masks = torch.rand(N, 28, 28) + boxes = Boxes(random_boxes(N, 100)).tensor + image_shape = (150, 150) + + out = paste_masks_in_image(masks, boxes, image_shape) + scripted_out = scripted_f(masks, boxes, image_shape) + self.assertTrue(torch.equal(out, scripted_out)) + + +def benchmark_paste(): + S = 800 + H, W = image_shape = (S, S) + N = 64 + torch.manual_seed(42) + masks = torch.rand(N, 28, 28) + + center = torch.rand(N, 2) * 600 + 100 + wh = torch.clamp(torch.randn(N, 2) * 40 + 200, min=50) + x0y0 = torch.clamp(center - wh * 0.5, min=0.0) + x1y1 = torch.clamp(center + wh * 0.5, max=S) + boxes = Boxes(torch.cat([x0y0, x1y1], axis=1)) + + def func(device, n=3): + m = masks.to(device=device) + b = boxes.to(device=device) + + def bench(): + for _ in range(n): + paste_masks_in_image(m, b, image_shape) + if device.type == "cuda": + torch.cuda.synchronize() + + return bench + + specs = [{"device": torch.device("cpu"), "n": 3}] + if torch.cuda.is_available(): + specs.append({"device": torch.device("cuda"), "n": 3}) + + benchmark(func, "paste_masks", specs, num_iters=10, warmup_iters=2) + + +if __name__ == "__main__": + benchmark_paste() + unittest.main() diff --git a/data_processing/detectron2/tests/layers/test_nms.py b/data_processing/detectron2/tests/layers/test_nms.py new file mode 100644 index 0000000..a042db6 --- /dev/null +++ b/data_processing/detectron2/tests/layers/test_nms.py @@ -0,0 +1,33 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from __future__ import absolute_import, division, print_function, unicode_literals +import unittest +import torch + +from detectron2.layers import batched_nms +from detectron2.utils.testing import random_boxes + + +class TestNMS(unittest.TestCase): + def _create_tensors(self, N): + boxes = random_boxes(N, 200) + scores = torch.rand(N) + return boxes, scores + + def test_nms_scriptability(self): + N = 2000 + num_classes = 50 + boxes, scores = self._create_tensors(N) + idxs = torch.randint(0, num_classes, (N,)) + scripted_batched_nms = torch.jit.script(batched_nms) + err_msg = "NMS is incompatible with jit-scripted NMS for IoU={}" + + for iou in [0.2, 0.5, 0.8]: + keep_ref = batched_nms(boxes, scores, idxs, iou) + backup = boxes.clone() + scripted_keep = scripted_batched_nms(boxes, scores, idxs, iou) + assert torch.allclose(boxes, backup), "boxes modified by jit-scripted batched_nms" + self.assertTrue(torch.equal(keep_ref, scripted_keep), err_msg.format(iou)) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/layers/test_nms_rotated.py b/data_processing/detectron2/tests/layers/test_nms_rotated.py new file mode 100644 index 0000000..4b45384 --- /dev/null +++ b/data_processing/detectron2/tests/layers/test_nms_rotated.py @@ -0,0 +1,172 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from __future__ import absolute_import, division, print_function, unicode_literals +import numpy as np +import unittest +from copy import deepcopy +import torch +from torchvision import ops + +from detectron2.layers import batched_nms, batched_nms_rotated, nms_rotated +from detectron2.utils.testing import random_boxes + + +def nms_edit_distance(keep1, keep2): + """ + Compare the "keep" result of two nms call. + They are allowed to be different in terms of edit distance + due to floating point precision issues, e.g., + if a box happen to have an IoU of 0.5 with another box, + one implentation may choose to keep it while another may discard it. + """ + keep1, keep2 = keep1.cpu(), keep2.cpu() + if torch.equal(keep1, keep2): + # they should be equal most of the time + return 0 + keep1, keep2 = tuple(keep1), tuple(keep2) + m, n = len(keep1), len(keep2) + + # edit distance with DP + f = [np.arange(n + 1), np.arange(n + 1)] + for i in range(m): + cur_row = i % 2 + other_row = (i + 1) % 2 + f[other_row][0] = i + 1 + for j in range(n): + f[other_row][j + 1] = ( + f[cur_row][j] + if keep1[i] == keep2[j] + else min(min(f[cur_row][j], f[cur_row][j + 1]), f[other_row][j]) + 1 + ) + return f[m % 2][n] + + +class TestNMSRotated(unittest.TestCase): + def reference_horizontal_nms(self, boxes, scores, iou_threshold): + """ + Args: + box_scores (N, 5): boxes in corner-form and probabilities. + (Note here 5 == 4 + 1, i.e., 4-dim horizontal box + 1-dim prob) + iou_threshold: intersection over union threshold. + Returns: + picked: a list of indexes of the kept boxes + """ + picked = [] + _, indexes = scores.sort(descending=True) + while len(indexes) > 0: + current = indexes[0] + picked.append(current.item()) + if len(indexes) == 1: + break + current_box = boxes[current, :] + indexes = indexes[1:] + rest_boxes = boxes[indexes, :] + iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1) + indexes = indexes[iou <= iou_threshold] + + return torch.as_tensor(picked) + + def _create_tensors(self, N, device="cpu"): + boxes = random_boxes(N, 200, device=device) + scores = torch.rand(N, device=device) + return boxes, scores + + def test_batched_nms_rotated_0_degree_cpu(self, device="cpu"): + N = 2000 + num_classes = 50 + boxes, scores = self._create_tensors(N, device=device) + idxs = torch.randint(0, num_classes, (N,)) + rotated_boxes = torch.zeros(N, 5, device=device) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}" + for iou in [0.2, 0.5, 0.8]: + backup = boxes.clone() + keep_ref = batched_nms(boxes, scores, idxs, iou) + assert torch.allclose(boxes, backup), "boxes modified by batched_nms" + backup = rotated_boxes.clone() + keep = batched_nms_rotated(rotated_boxes, scores, idxs, iou) + assert torch.allclose( + rotated_boxes, backup + ), "rotated_boxes modified by batched_nms_rotated" + # Occasionally the gap can be large if there are many IOU on the threshold boundary + self.assertLessEqual(nms_edit_distance(keep, keep_ref), 5, err_msg.format(iou)) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_batched_nms_rotated_0_degree_cuda(self): + self.test_batched_nms_rotated_0_degree_cpu(device="cuda") + + def test_nms_rotated_0_degree_cpu(self, device="cpu"): + N = 1000 + boxes, scores = self._create_tensors(N, device=device) + rotated_boxes = torch.zeros(N, 5, device=device) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" + for iou in [0.2, 0.5, 0.8]: + keep_ref = self.reference_horizontal_nms(boxes, scores, iou) + keep = nms_rotated(rotated_boxes, scores, iou) + self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou)) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_nms_rotated_0_degree_cuda(self): + self.test_nms_rotated_0_degree_cpu(device="cuda") + + def test_nms_rotated_90_degrees_cpu(self): + N = 1000 + boxes, scores = self._create_tensors(N) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + # Note for rotated_boxes[:, 2] and rotated_boxes[:, 3]: + # widths and heights are intentionally swapped here for 90 degrees case + # so that the reference horizontal nms could be used + rotated_boxes[:, 2] = boxes[:, 3] - boxes[:, 1] + rotated_boxes[:, 3] = boxes[:, 2] - boxes[:, 0] + + rotated_boxes[:, 4] = torch.ones(N) * 90 + err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" + for iou in [0.2, 0.5, 0.8]: + keep_ref = self.reference_horizontal_nms(boxes, scores, iou) + keep = nms_rotated(rotated_boxes, scores, iou) + self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou)) + + def test_nms_rotated_180_degrees_cpu(self): + N = 1000 + boxes, scores = self._create_tensors(N) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + rotated_boxes[:, 4] = torch.ones(N) * 180 + err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" + for iou in [0.2, 0.5, 0.8]: + keep_ref = self.reference_horizontal_nms(boxes, scores, iou) + keep = nms_rotated(rotated_boxes, scores, iou) + self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou)) + + +class TestScriptable(unittest.TestCase): + def setUp(self): + class TestingModule(torch.nn.Module): + def forward(self, boxes, scores, threshold): + return nms_rotated(boxes, scores, threshold) + + self.module = TestingModule() + + def test_scriptable_cpu(self): + m = deepcopy(self.module).cpu() + _ = torch.jit.script(m) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_scriptable_cuda(self): + m = deepcopy(self.module).cuda() + _ = torch.jit.script(m) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/layers/test_roi_align.py b/data_processing/detectron2/tests/layers/test_roi_align.py new file mode 100644 index 0000000..b6fd8ed --- /dev/null +++ b/data_processing/detectron2/tests/layers/test_roi_align.py @@ -0,0 +1,210 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import unittest +from copy import copy +import cv2 +import torch +from fvcore.common.benchmark import benchmark +from torch.nn import functional as F + +from detectron2.layers.roi_align import ROIAlign, roi_align + + +class ROIAlignTest(unittest.TestCase): + def test_forward_output(self): + input = np.arange(25).reshape(5, 5).astype("float32") + """ + 0 1 2 3 4 + 5 6 7 8 9 + 10 11 12 13 14 + 15 16 17 18 19 + 20 21 22 23 24 + """ + + output = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=False) + output_correct = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=True) + + # without correction: + old_results = [ + [7.5, 8, 8.5, 9], + [10, 10.5, 11, 11.5], + [12.5, 13, 13.5, 14], + [15, 15.5, 16, 16.5], + ] + + # with 0.5 correction: + correct_results = [ + [4.5, 5.0, 5.5, 6.0], + [7.0, 7.5, 8.0, 8.5], + [9.5, 10.0, 10.5, 11.0], + [12.0, 12.5, 13.0, 13.5], + ] + # This is an upsampled version of [[6, 7], [11, 12]] + + self.assertTrue(np.allclose(output.flatten(), np.asarray(old_results).flatten())) + self.assertTrue( + np.allclose(output_correct.flatten(), np.asarray(correct_results).flatten()) + ) + + # Also see similar issues in tensorflow at + # https://github.com/tensorflow/tensorflow/issues/26278 + + def test_resize(self): + H, W = 30, 30 + input = np.random.rand(H, W).astype("float32") * 100 + box = [10, 10, 20, 20] + output = self._simple_roialign(input, box, (5, 5), aligned=True) + + input2x = cv2.resize(input, (W // 2, H // 2), interpolation=cv2.INTER_LINEAR) + box2x = [x / 2 for x in box] + output2x = self._simple_roialign(input2x, box2x, (5, 5), aligned=True) + diff = np.abs(output2x - output) + self.assertTrue(diff.max() < 1e-4) + + def test_grid_sample_equivalence(self): + H, W = 30, 30 + input = np.random.rand(H, W).astype("float32") * 100 + box = [10, 10, 20, 20] + for ratio in [1, 2, 3]: + output = self._simple_roialign(input, box, (5, 5), sampling_ratio=ratio) + output_grid_sample = grid_sample_roi_align( + torch.from_numpy(input[None, None, :, :]).float(), + torch.as_tensor(box).float()[None, :], + 5, + 1.0, + ratio, + ) + self.assertTrue(torch.allclose(output, output_grid_sample)) + + def _simple_roialign(self, img, box, resolution, sampling_ratio=0, aligned=True): + """ + RoiAlign with scale 1.0. + """ + if isinstance(resolution, int): + resolution = (resolution, resolution) + op = ROIAlign(resolution, 1.0, sampling_ratio, aligned=aligned) + input = torch.from_numpy(img[None, None, :, :].astype("float32")) + + rois = [0] + list(box) + rois = torch.from_numpy(np.asarray(rois)[None, :].astype("float32")) + output = op.forward(input, rois) + if torch.cuda.is_available(): + output_cuda = op.forward(input.cuda(), rois.cuda()).cpu() + self.assertTrue(torch.allclose(output, output_cuda)) + return output[0, 0] + + def _simple_roialign_with_grad(self, img, box, resolution, device): + if isinstance(resolution, int): + resolution = (resolution, resolution) + + op = ROIAlign(resolution, 1.0, 0, aligned=True) + input = torch.from_numpy(img[None, None, :, :].astype("float32")) + + rois = [0] + list(box) + rois = torch.from_numpy(np.asarray(rois)[None, :].astype("float32")) + input = input.to(device=device) + rois = rois.to(device=device) + input.requires_grad = True + output = op.forward(input, rois) + return input, output + + def test_empty_box(self): + img = np.random.rand(5, 5) + box = [3, 4, 5, 4] + o = self._simple_roialign(img, box, 7) + self.assertTrue(o.shape == (7, 7)) + self.assertTrue((o == 0).all()) + + for dev in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []: + input, output = self._simple_roialign_with_grad(img, box, 7, torch.device(dev)) + output.sum().backward() + self.assertTrue(torch.allclose(input.grad, torch.zeros_like(input))) + + def test_empty_batch(self): + input = torch.zeros(0, 3, 10, 10, dtype=torch.float32) + rois = torch.zeros(0, 5, dtype=torch.float32) + op = ROIAlign((7, 7), 1.0, 0, aligned=True) + output = op.forward(input, rois) + self.assertTrue(output.shape == (0, 3, 7, 7)) + + +def grid_sample_roi_align(input, boxes, output_size, scale, sampling_ratio): + # unlike true roi_align, this does not support different batch_idx + from detectron2.projects.point_rend.point_features import ( + generate_regular_grid_point_coords, + get_point_coords_wrt_image, + point_sample, + ) + + N, _, H, W = input.shape + R = len(boxes) + assert N == 1 + boxes = boxes * scale + grid = generate_regular_grid_point_coords(R, output_size * sampling_ratio, device=boxes.device) + coords = get_point_coords_wrt_image(boxes, grid) + coords = coords / torch.as_tensor([W, H], device=coords.device) # R, s^2, 2 + res = point_sample(input, coords.unsqueeze(0), align_corners=False) # 1,C, R,s^2 + res = ( + res.squeeze(0) + .permute(1, 0, 2) + .reshape(R, -1, output_size * sampling_ratio, output_size * sampling_ratio) + ) + res = F.avg_pool2d(res, sampling_ratio) + return res + + +def benchmark_roi_align(): + def random_boxes(mean_box, stdev, N, maxsize): + ret = torch.rand(N, 4) * stdev + torch.tensor(mean_box, dtype=torch.float) + ret.clamp_(min=0, max=maxsize) + return ret + + def func(shape, nboxes_per_img, sampling_ratio, device, box_size="large"): + N, _, H, _ = shape + input = torch.rand(*shape) + boxes = [] + batch_idx = [] + for k in range(N): + if box_size == "large": + b = random_boxes([80, 80, 130, 130], 24, nboxes_per_img, H) + else: + b = random_boxes([100, 100, 110, 110], 4, nboxes_per_img, H) + boxes.append(b) + batch_idx.append(torch.zeros(nboxes_per_img, 1, dtype=torch.float32) + k) + boxes = torch.cat(boxes, axis=0) + batch_idx = torch.cat(batch_idx, axis=0) + boxes = torch.cat([batch_idx, boxes], axis=1) + + input = input.to(device=device) + boxes = boxes.to(device=device) + + def bench(): + if False and sampling_ratio > 0 and N == 1: + # enable to benchmark grid_sample (slower) + grid_sample_roi_align(input, boxes[:, 1:], 7, 1.0, sampling_ratio) + else: + roi_align(input, boxes, 7, 1.0, sampling_ratio, True) + if device == "cuda": + torch.cuda.synchronize() + + return bench + + def gen_args(arg): + args = [] + for size in ["small", "large"]: + for ratio in [0, 2]: + args.append(copy(arg)) + args[-1]["sampling_ratio"] = ratio + args[-1]["box_size"] = size + return args + + arg = dict(shape=(1, 512, 256, 256), nboxes_per_img=512, device="cuda") + benchmark(func, "cuda_roialign", gen_args(arg), num_iters=20, warmup_iters=1) + arg.update({"device": "cpu", "shape": (1, 256, 128, 128)}) + benchmark(func, "cpu_roialign", gen_args(arg), num_iters=5, warmup_iters=1) + + +if __name__ == "__main__": + if torch.cuda.is_available(): + benchmark_roi_align() + unittest.main() diff --git a/data_processing/detectron2/tests/layers/test_roi_align_rotated.py b/data_processing/detectron2/tests/layers/test_roi_align_rotated.py new file mode 100644 index 0000000..7323d7d --- /dev/null +++ b/data_processing/detectron2/tests/layers/test_roi_align_rotated.py @@ -0,0 +1,176 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import unittest +import cv2 +import torch +from torch.autograd import Variable, gradcheck + +from detectron2.layers.roi_align import ROIAlign +from detectron2.layers.roi_align_rotated import ROIAlignRotated + +logger = logging.getLogger(__name__) + + +class ROIAlignRotatedTest(unittest.TestCase): + def _box_to_rotated_box(self, box, angle): + return [ + (box[0] + box[2]) / 2.0, + (box[1] + box[3]) / 2.0, + box[2] - box[0], + box[3] - box[1], + angle, + ] + + def _rot90(self, img, num): + num = num % 4 # note: -1 % 4 == 3 + for _ in range(num): + img = img.transpose(0, 1).flip(0) + return img + + def test_forward_output_0_90_180_270(self): + for i in range(4): + # i = 0, 1, 2, 3 corresponding to 0, 90, 180, 270 degrees + img = torch.arange(25, dtype=torch.float32).reshape(5, 5) + """ + 0 1 2 3 4 + 5 6 7 8 9 + 10 11 12 13 14 + 15 16 17 18 19 + 20 21 22 23 24 + """ + box = [1, 1, 3, 3] + rotated_box = self._box_to_rotated_box(box=box, angle=90 * i) + + result = self._simple_roi_align_rotated(img=img, box=rotated_box, resolution=(4, 4)) + + # Here's an explanation for 0 degree case: + # point 0 in the original input lies at [0.5, 0.5] + # (the center of bin [0, 1] x [0, 1]) + # point 1 in the original input lies at [1.5, 0.5], etc. + # since the resolution is (4, 4) that divides [1, 3] x [1, 3] + # into 4 x 4 equal bins, + # the top-left bin is [1, 1.5] x [1, 1.5], and its center + # (1.25, 1.25) lies at the 3/4 position + # between point 0 and point 1, point 5 and point 6, + # point 0 and point 5, point 1 and point 6, so it can be calculated as + # 0.25*(0*0.25+1*0.75)+(5*0.25+6*0.75)*0.75 = 4.5 + result_expected = torch.tensor( + [ + [4.5, 5.0, 5.5, 6.0], + [7.0, 7.5, 8.0, 8.5], + [9.5, 10.0, 10.5, 11.0], + [12.0, 12.5, 13.0, 13.5], + ] + ) + # This is also an upsampled version of [[6, 7], [11, 12]] + + # When the box is rotated by 90 degrees CCW, + # the result would be rotated by 90 degrees CW, thus it's -i here + result_expected = self._rot90(result_expected, -i) + + assert torch.allclose(result, result_expected) + + def test_resize(self): + H, W = 30, 30 + input = torch.rand(H, W) * 100 + box = [10, 10, 20, 20] + rotated_box = self._box_to_rotated_box(box, angle=0) + output = self._simple_roi_align_rotated(img=input, box=rotated_box, resolution=(5, 5)) + + input2x = cv2.resize(input.numpy(), (W // 2, H // 2), interpolation=cv2.INTER_LINEAR) + input2x = torch.from_numpy(input2x) + box2x = [x / 2 for x in box] + rotated_box2x = self._box_to_rotated_box(box2x, angle=0) + output2x = self._simple_roi_align_rotated(img=input2x, box=rotated_box2x, resolution=(5, 5)) + assert torch.allclose(output2x, output) + + def _simple_roi_align_rotated(self, img, box, resolution): + """ + RoiAlignRotated with scale 1.0 and 0 sample ratio. + """ + op = ROIAlignRotated(output_size=resolution, spatial_scale=1.0, sampling_ratio=0) + input = img[None, None, :, :] + + rois = [0] + list(box) + rois = torch.tensor(rois, dtype=torch.float32)[None, :] + result_cpu = op.forward(input, rois) + if torch.cuda.is_available(): + result_cuda = op.forward(input.cuda(), rois.cuda()) + assert torch.allclose(result_cpu, result_cuda.cpu()) + return result_cpu[0, 0] + + def test_empty_box(self): + img = torch.rand(5, 5) + out = self._simple_roi_align_rotated(img, [2, 3, 0, 0, 0], (7, 7)) + self.assertTrue((out == 0).all()) + + def test_roi_align_rotated_gradcheck_cpu(self): + dtype = torch.float64 + device = torch.device("cpu") + roi_align_rotated_op = ROIAlignRotated( + output_size=(5, 5), spatial_scale=0.5, sampling_ratio=1 + ).to(dtype=dtype, device=device) + x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True) + # roi format is (batch index, x_center, y_center, width, height, angle) + rois = torch.tensor( + [[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]], + dtype=dtype, + device=device, + ) + + def func(input): + return roi_align_rotated_op(input, rois) + + assert gradcheck(func, (x,)), "gradcheck failed for RoIAlignRotated CPU" + assert gradcheck(func, (x.transpose(2, 3),)), "gradcheck failed for RoIAlignRotated CPU" + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_roi_align_rotated_gradient_cuda(self): + """ + Compute gradients for ROIAlignRotated with multiple bounding boxes on the GPU, + and compare the result with ROIAlign + """ + # torch.manual_seed(123) + dtype = torch.float64 + device = torch.device("cuda") + pool_h, pool_w = (5, 5) + + roi_align = ROIAlign(output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2).to( + device=device + ) + + roi_align_rotated = ROIAlignRotated( + output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2 + ).to(device=device) + + x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True) + # x_rotated = x.clone() won't work (will lead to grad_fun=CloneBackward)! + x_rotated = Variable(x.data.clone(), requires_grad=True) + + # roi_rotated format is (batch index, x_center, y_center, width, height, angle) + rois_rotated = torch.tensor( + [[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]], + dtype=dtype, + device=device, + ) + + y_rotated = roi_align_rotated(x_rotated, rois_rotated) + s_rotated = y_rotated.sum() + s_rotated.backward() + + # roi format is (batch index, x1, y1, x2, y2) + rois = torch.tensor( + [[0, 0, 0, 9, 9], [0, 0, 5, 4, 9], [0, 5, 5, 9, 9]], dtype=dtype, device=device + ) + + y = roi_align(x, rois) + s = y.sum() + s.backward() + + assert torch.allclose( + x.grad, x_rotated.grad + ), "gradients for ROIAlign and ROIAlignRotated mismatch on CUDA" + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/modeling/__init__.py b/data_processing/detectron2/tests/modeling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/detectron2/tests/modeling/test_anchor_generator.py b/data_processing/detectron2/tests/modeling/test_anchor_generator.py new file mode 100644 index 0000000..13a808e --- /dev/null +++ b/data_processing/detectron2/tests/modeling/test_anchor_generator.py @@ -0,0 +1,120 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import unittest +import torch + +from detectron2.config import get_cfg +from detectron2.layers import ShapeSpec +from detectron2.modeling.anchor_generator import DefaultAnchorGenerator, RotatedAnchorGenerator + +logger = logging.getLogger(__name__) + + +class TestAnchorGenerator(unittest.TestCase): + def test_default_anchor_generator(self): + cfg = get_cfg() + cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] + cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]] + + anchor_generator = DefaultAnchorGenerator(cfg, [ShapeSpec(stride=4)]) + + # only the last two dimensions of features matter here + num_images = 2 + features = {"stage3": torch.rand(num_images, 96, 1, 2)} + anchors = anchor_generator([features["stage3"]]) + expected_anchor_tensor = torch.tensor( + [ + [-32.0, -8.0, 32.0, 8.0], + [-16.0, -16.0, 16.0, 16.0], + [-8.0, -32.0, 8.0, 32.0], + [-64.0, -16.0, 64.0, 16.0], + [-32.0, -32.0, 32.0, 32.0], + [-16.0, -64.0, 16.0, 64.0], + [-28.0, -8.0, 36.0, 8.0], # -28.0 == -32.0 + STRIDE (4) + [-12.0, -16.0, 20.0, 16.0], + [-4.0, -32.0, 12.0, 32.0], + [-60.0, -16.0, 68.0, 16.0], + [-28.0, -32.0, 36.0, 32.0], + [-12.0, -64.0, 20.0, 64.0], + ] + ) + + self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor)) + + def test_default_anchor_generator_centered(self): + # test explicit args + anchor_generator = DefaultAnchorGenerator( + sizes=[32, 64], aspect_ratios=[0.25, 1, 4], strides=[4] + ) + + # only the last two dimensions of features matter here + num_images = 2 + features = {"stage3": torch.rand(num_images, 96, 1, 2)} + expected_anchor_tensor = torch.tensor( + [ + [-30.0, -6.0, 34.0, 10.0], + [-14.0, -14.0, 18.0, 18.0], + [-6.0, -30.0, 10.0, 34.0], + [-62.0, -14.0, 66.0, 18.0], + [-30.0, -30.0, 34.0, 34.0], + [-14.0, -62.0, 18.0, 66.0], + [-26.0, -6.0, 38.0, 10.0], + [-10.0, -14.0, 22.0, 18.0], + [-2.0, -30.0, 14.0, 34.0], + [-58.0, -14.0, 70.0, 18.0], + [-26.0, -30.0, 38.0, 34.0], + [-10.0, -62.0, 22.0, 66.0], + ] + ) + + anchors = anchor_generator([features["stage3"]]) + self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor)) + + anchors = torch.jit.script(anchor_generator)([features["stage3"]]) + self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor)) + + def test_rrpn_anchor_generator(self): + cfg = get_cfg() + cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] + cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]] + cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [0, 45] # test single list[float] + anchor_generator = RotatedAnchorGenerator(cfg, [ShapeSpec(stride=4)]) + + # only the last two dimensions of features matter here + num_images = 2 + features = {"stage3": torch.rand(num_images, 96, 1, 2)} + anchors = anchor_generator([features["stage3"]]) + expected_anchor_tensor = torch.tensor( + [ + [0.0, 0.0, 64.0, 16.0, 0.0], + [0.0, 0.0, 64.0, 16.0, 45.0], + [0.0, 0.0, 32.0, 32.0, 0.0], + [0.0, 0.0, 32.0, 32.0, 45.0], + [0.0, 0.0, 16.0, 64.0, 0.0], + [0.0, 0.0, 16.0, 64.0, 45.0], + [0.0, 0.0, 128.0, 32.0, 0.0], + [0.0, 0.0, 128.0, 32.0, 45.0], + [0.0, 0.0, 64.0, 64.0, 0.0], + [0.0, 0.0, 64.0, 64.0, 45.0], + [0.0, 0.0, 32.0, 128.0, 0.0], + [0.0, 0.0, 32.0, 128.0, 45.0], + [4.0, 0.0, 64.0, 16.0, 0.0], # 4.0 == 0.0 + STRIDE (4) + [4.0, 0.0, 64.0, 16.0, 45.0], + [4.0, 0.0, 32.0, 32.0, 0.0], + [4.0, 0.0, 32.0, 32.0, 45.0], + [4.0, 0.0, 16.0, 64.0, 0.0], + [4.0, 0.0, 16.0, 64.0, 45.0], + [4.0, 0.0, 128.0, 32.0, 0.0], + [4.0, 0.0, 128.0, 32.0, 45.0], + [4.0, 0.0, 64.0, 64.0, 0.0], + [4.0, 0.0, 64.0, 64.0, 45.0], + [4.0, 0.0, 32.0, 128.0, 0.0], + [4.0, 0.0, 32.0, 128.0, 45.0], + ] + ) + + self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor)) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/modeling/test_backbone.py b/data_processing/detectron2/tests/modeling/test_backbone.py new file mode 100644 index 0000000..3bb100f --- /dev/null +++ b/data_processing/detectron2/tests/modeling/test_backbone.py @@ -0,0 +1,34 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import unittest +import torch + +import detectron2.export.torchscript # apply patch # noqa +from detectron2 import model_zoo +from detectron2.config import get_cfg +from detectron2.layers import ShapeSpec +from detectron2.modeling.backbone import build_resnet_backbone +from detectron2.modeling.backbone.fpn import build_resnet_fpn_backbone + + +class TestBackBone(unittest.TestCase): + def test_resnet_scriptability(self): + cfg = get_cfg() + resnet = build_resnet_backbone(cfg, ShapeSpec(channels=3)) + + scripted_resnet = torch.jit.script(resnet) + + inp = torch.rand(2, 3, 100, 100) + out1 = resnet(inp)["res4"] + out2 = scripted_resnet(inp)["res4"] + self.assertTrue(torch.allclose(out1, out2)) + + def test_fpn_scriptability(self): + cfg = model_zoo.get_config("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml") + bb = build_resnet_fpn_backbone(cfg, ShapeSpec(channels=3)) + bb_s = torch.jit.script(bb) + + inp = torch.rand(2, 3, 128, 128) + out1 = bb(inp)["p5"] + out2 = bb_s(inp)["p5"] + self.assertTrue(torch.allclose(out1, out2)) diff --git a/data_processing/detectron2/tests/modeling/test_box2box_transform.py b/data_processing/detectron2/tests/modeling/test_box2box_transform.py new file mode 100644 index 0000000..fd3a7b7 --- /dev/null +++ b/data_processing/detectron2/tests/modeling/test_box2box_transform.py @@ -0,0 +1,94 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import unittest +import torch + +from detectron2.modeling.box_regression import ( + Box2BoxTransform, + Box2BoxTransformLinear, + Box2BoxTransformRotated, +) +from detectron2.utils.testing import random_boxes + +logger = logging.getLogger(__name__) + + +class TestBox2BoxTransform(unittest.TestCase): + def test_reconstruction(self): + weights = (5, 5, 10, 10) + b2b_tfm = Box2BoxTransform(weights=weights) + src_boxes = random_boxes(10) + dst_boxes = random_boxes(10) + + devices = [torch.device("cpu")] + if torch.cuda.is_available(): + devices.append(torch.device("cuda")) + for device in devices: + src_boxes = src_boxes.to(device=device) + dst_boxes = dst_boxes.to(device=device) + deltas = b2b_tfm.get_deltas(src_boxes, dst_boxes) + dst_boxes_reconstructed = b2b_tfm.apply_deltas(deltas, src_boxes) + self.assertTrue(torch.allclose(dst_boxes, dst_boxes_reconstructed)) + + def test_apply_deltas_tracing(self): + weights = (5, 5, 10, 10) + b2b_tfm = Box2BoxTransform(weights=weights) + + with torch.no_grad(): + func = torch.jit.trace(b2b_tfm.apply_deltas, (torch.randn(10, 20), torch.randn(10, 4))) + + o = func(torch.randn(10, 20), torch.randn(10, 4)) + self.assertEqual(o.shape, (10, 20)) + o = func(torch.randn(5, 20), torch.randn(5, 4)) + self.assertEqual(o.shape, (5, 20)) + + +def random_rotated_boxes(mean_box, std_length, std_angle, N): + return torch.cat( + [torch.rand(N, 4) * std_length, torch.rand(N, 1) * std_angle], dim=1 + ) + torch.tensor(mean_box, dtype=torch.float) + + +class TestBox2BoxTransformRotated(unittest.TestCase): + def test_reconstruction(self): + weights = (5, 5, 10, 10, 1) + b2b_transform = Box2BoxTransformRotated(weights=weights) + src_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10) + dst_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10) + + devices = [torch.device("cpu")] + if torch.cuda.is_available(): + devices.append(torch.device("cuda")) + for device in devices: + src_boxes = src_boxes.to(device=device) + dst_boxes = dst_boxes.to(device=device) + deltas = b2b_transform.get_deltas(src_boxes, dst_boxes) + dst_boxes_reconstructed = b2b_transform.apply_deltas(deltas, src_boxes) + assert torch.allclose(dst_boxes[:, :4], dst_boxes_reconstructed[:, :4], atol=1e-5) + # angle difference has to be normalized + assert torch.allclose( + (dst_boxes[:, 4] - dst_boxes_reconstructed[:, 4] + 180.0) % 360.0 - 180.0, + torch.zeros_like(dst_boxes[:, 4]), + atol=1e-4, + ) + + +class TestBox2BoxTransformLinear(unittest.TestCase): + def test_reconstruction(self): + b2b_tfm = Box2BoxTransformLinear() + src_boxes = random_boxes(10) + dst_boxes = torch.tensor([0, 0, 101, 101] * 10).reshape(10, 4).float() + + devices = [torch.device("cpu")] + if torch.cuda.is_available(): + devices.append(torch.device("cuda")) + for device in devices: + src_boxes = src_boxes.to(device=device) + dst_boxes = dst_boxes.to(device=device) + deltas = b2b_tfm.get_deltas(src_boxes, dst_boxes) + dst_boxes_reconstructed = b2b_tfm.apply_deltas(deltas, src_boxes) + self.assertTrue(torch.allclose(dst_boxes, dst_boxes_reconstructed, atol=1e-3)) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/modeling/test_fast_rcnn.py b/data_processing/detectron2/tests/modeling/test_fast_rcnn.py new file mode 100644 index 0000000..e29b944 --- /dev/null +++ b/data_processing/detectron2/tests/modeling/test_fast_rcnn.py @@ -0,0 +1,171 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import unittest +import torch + +from detectron2.layers import ShapeSpec +from detectron2.modeling.box_regression import Box2BoxTransform, Box2BoxTransformRotated +from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers +from detectron2.modeling.roi_heads.rotated_fast_rcnn import RotatedFastRCNNOutputLayers +from detectron2.structures import Boxes, Instances, RotatedBoxes +from detectron2.utils.events import EventStorage + +logger = logging.getLogger(__name__) + + +class FastRCNNTest(unittest.TestCase): + def test_fast_rcnn(self): + torch.manual_seed(132) + + box_head_output_size = 8 + + box_predictor = FastRCNNOutputLayers( + ShapeSpec(channels=box_head_output_size), + box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)), + num_classes=5, + ) + feature_pooled = torch.rand(2, box_head_output_size) + predictions = box_predictor(feature_pooled) + + proposal_boxes = torch.tensor([[0.8, 1.1, 3.2, 2.8], [2.3, 2.5, 7, 8]], dtype=torch.float32) + gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) + proposal = Instances((10, 10)) + proposal.proposal_boxes = Boxes(proposal_boxes) + proposal.gt_boxes = Boxes(gt_boxes) + proposal.gt_classes = torch.tensor([1, 2]) + + with EventStorage(): # capture events in a new storage to discard them + losses = box_predictor.losses(predictions, [proposal]) + + expected_losses = { + "loss_cls": torch.tensor(1.7951188087), + "loss_box_reg": torch.tensor(4.0357131958), + } + for name in expected_losses.keys(): + assert torch.allclose(losses[name], expected_losses[name]) + + def test_fast_rcnn_empty_batch(self, device="cpu"): + box_predictor = FastRCNNOutputLayers( + ShapeSpec(channels=10), + box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)), + num_classes=8, + ).to(device=device) + + logits = torch.randn(0, 100, requires_grad=True, device=device) + deltas = torch.randn(0, 4, requires_grad=True, device=device) + losses = box_predictor.losses([logits, deltas], []) + for value in losses.values(): + self.assertTrue(torch.allclose(value, torch.zeros_like(value))) + sum(losses.values()).backward() + self.assertTrue(logits.grad is not None) + self.assertTrue(deltas.grad is not None) + + predictions, _ = box_predictor.inference([logits, deltas], []) + self.assertEqual(len(predictions), 0) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_fast_rcnn_empty_batch_cuda(self): + self.test_fast_rcnn_empty_batch(device=torch.device("cuda")) + + def test_fast_rcnn_rotated(self): + torch.manual_seed(132) + box_head_output_size = 8 + + box_predictor = RotatedFastRCNNOutputLayers( + ShapeSpec(channels=box_head_output_size), + box2box_transform=Box2BoxTransformRotated(weights=(10, 10, 5, 5, 1)), + num_classes=5, + ) + feature_pooled = torch.rand(2, box_head_output_size) + predictions = box_predictor(feature_pooled) + proposal_boxes = torch.tensor( + [[2, 1.95, 2.4, 1.7, 0], [4.65, 5.25, 4.7, 5.5, 0]], dtype=torch.float32 + ) + gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32) + proposal = Instances((10, 10)) + proposal.proposal_boxes = RotatedBoxes(proposal_boxes) + proposal.gt_boxes = RotatedBoxes(gt_boxes) + proposal.gt_classes = torch.tensor([1, 2]) + + with EventStorage(): # capture events in a new storage to discard them + losses = box_predictor.losses(predictions, [proposal]) + + # Note: the expected losses are slightly different even if + # the boxes are essentially the same as in the FastRCNNOutput test, because + # bbox_pred in FastRCNNOutputLayers have different Linear layers/initialization + # between the two cases. + expected_losses = { + "loss_cls": torch.tensor(1.7920907736), + "loss_box_reg": torch.tensor(4.0410838127), + } + for name in expected_losses.keys(): + assert torch.allclose(losses[name], expected_losses[name]) + + def test_predict_boxes_tracing(self): + class Model(torch.nn.Module): + def __init__(self, output_layer): + super(Model, self).__init__() + self._output_layer = output_layer + + def forward(self, proposal_deltas, proposal_boxes): + instances = Instances((10, 10)) + instances.proposal_boxes = Boxes(proposal_boxes) + return self._output_layer.predict_boxes((None, proposal_deltas), [instances]) + + box_head_output_size = 8 + + box_predictor = FastRCNNOutputLayers( + ShapeSpec(channels=box_head_output_size), + box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)), + num_classes=5, + ) + + model = Model(box_predictor) + + from detectron2.export.torchscript_patch import patch_builtin_len + + with torch.no_grad(), patch_builtin_len(): + func = torch.jit.trace(model, (torch.randn(10, 20), torch.randn(10, 4))) + + o = func(torch.randn(10, 20), torch.randn(10, 4)) + self.assertEqual(o[0].shape, (10, 20)) + o = func(torch.randn(5, 20), torch.randn(5, 4)) + self.assertEqual(o[0].shape, (5, 20)) + o = func(torch.randn(20, 20), torch.randn(20, 4)) + self.assertEqual(o[0].shape, (20, 20)) + + def test_predict_probs_tracing(self): + class Model(torch.nn.Module): + def __init__(self, output_layer): + super(Model, self).__init__() + self._output_layer = output_layer + + def forward(self, scores, proposal_boxes): + instances = Instances((10, 10)) + instances.proposal_boxes = Boxes(proposal_boxes) + return self._output_layer.predict_probs((scores, None), [instances]) + + box_head_output_size = 8 + + box_predictor = FastRCNNOutputLayers( + ShapeSpec(channels=box_head_output_size), + box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)), + num_classes=5, + ) + + model = Model(box_predictor) + + from detectron2.export.torchscript_patch import patch_builtin_len + + with torch.no_grad(), patch_builtin_len(): + func = torch.jit.trace(model, (torch.randn(10, 6), torch.rand(10, 4))) + o = func(torch.randn(10, 6), torch.randn(10, 4)) + self.assertEqual(o[0].shape, (10, 6)) + o = func(torch.randn(5, 6), torch.randn(5, 4)) + self.assertEqual(o[0].shape, (5, 6)) + o = func(torch.randn(20, 6), torch.randn(20, 4)) + self.assertEqual(o[0].shape, (20, 6)) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/modeling/test_matcher.py b/data_processing/detectron2/tests/modeling/test_matcher.py new file mode 100644 index 0000000..6eb2db0 --- /dev/null +++ b/data_processing/detectron2/tests/modeling/test_matcher.py @@ -0,0 +1,42 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import unittest +from typing import List +import torch + +from detectron2.config import get_cfg +from detectron2.modeling.matcher import Matcher + + +class TestMatcher(unittest.TestCase): + def test_scriptability(self): + cfg = get_cfg() + anchor_matcher = Matcher( + cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True + ) + match_quality_matrix = torch.tensor( + [[0.15, 0.45, 0.2, 0.6], [0.3, 0.65, 0.05, 0.1], [0.05, 0.4, 0.25, 0.4]] + ) + expected_matches = torch.tensor([1, 1, 2, 0]) + expected_match_labels = torch.tensor([-1, 1, 0, 1], dtype=torch.int8) + + matches, match_labels = anchor_matcher(match_quality_matrix) + self.assertTrue(torch.allclose(matches, expected_matches)) + self.assertTrue(torch.allclose(match_labels, expected_match_labels)) + + # nonzero_tuple must be import explicitly to let jit know what it is. + # https://github.com/pytorch/pytorch/issues/38964 + from detectron2.layers import nonzero_tuple # noqa F401 + + def f(thresholds: List[float], labels: List[int]): + return Matcher(thresholds, labels, allow_low_quality_matches=True) + + scripted_anchor_matcher = torch.jit.script(f)( + cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS + ) + matches, match_labels = scripted_anchor_matcher(match_quality_matrix) + self.assertTrue(torch.allclose(matches, expected_matches)) + self.assertTrue(torch.allclose(match_labels, expected_match_labels)) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/modeling/test_mmdet.py b/data_processing/detectron2/tests/modeling/test_mmdet.py new file mode 100644 index 0000000..a743b0b --- /dev/null +++ b/data_processing/detectron2/tests/modeling/test_mmdet.py @@ -0,0 +1,186 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import unittest + +from detectron2.layers import ShapeSpec +from detectron2.modeling.mmdet_wrapper import MMDetBackbone, MMDetDetector + +try: + import mmdet.models # noqa + + HAS_MMDET = True +except ImportError: + HAS_MMDET = False + + +@unittest.skipIf(not HAS_MMDET, "mmdet not available") +class TestMMDetWrapper(unittest.TestCase): + def test_backbone(self): + MMDetBackbone( + backbone=dict( + type="DetectoRS_ResNet", + conv_cfg=dict(type="ConvAWS"), + sac=dict(type="SAC", use_deform=True), + stage_with_sac=(False, True, True, True), + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type="BN", requires_grad=True), + norm_eval=True, + style="pytorch", + ), + neck=dict( + type="FPN", + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, + ), + # skip pretrained model for tests + # pretrained_backbone="torchvision://resnet50", + output_shapes=[ShapeSpec(channels=256, stride=s) for s in [4, 8, 16, 32, 64]], + output_names=["p2", "p3", "p4", "p5", "p6"], + ) + + def test_detector(self): + # a basic R50 Mask R-CNN + MMDetDetector( + detector=dict( + type="MaskRCNN", + backbone=dict( + type="ResNet", + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type="BN", requires_grad=True), + norm_eval=True, + style="pytorch", + # skip pretrained model for tests + # init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')) + ), + neck=dict( + type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5 + ), + rpn_head=dict( + type="RPNHead", + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type="AnchorGenerator", + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64], + ), + bbox_coder=dict( + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0], + ), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type="L1Loss", loss_weight=1.0), + ), + roi_head=dict( + type="StandardRoIHead", + bbox_roi_extractor=dict( + type="SingleRoIExtractor", + roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + ), + bbox_head=dict( + type="Shared2FCBBoxHead", + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2], + ), + reg_class_agnostic=False, + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type="L1Loss", loss_weight=1.0), + ), + mask_roi_extractor=dict( + type="SingleRoIExtractor", + roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32], + ), + mask_head=dict( + type="FCNMaskHead", + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0), + ), + ), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type="MaxIoUAssigner", + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1, + ), + sampler=dict( + type="RandomSampler", + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False, + ), + allowed_border=-1, + pos_weight=-1, + debug=False, + ), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type="nms", iou_threshold=0.7), + min_bbox_size=0, + ), + rcnn=dict( + assigner=dict( + type="MaxIoUAssigner", + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1, + ), + sampler=dict( + type="RandomSampler", + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True, + ), + mask_size=28, + pos_weight=-1, + debug=False, + ), + ), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type="nms", iou_threshold=0.7), + min_bbox_size=0, + ), + rcnn=dict( + score_thr=0.05, + nms=dict(type="nms", iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5, + ), + ), + ), + pixel_mean=[1, 2, 3], + pixel_std=[1, 2, 3], + ) diff --git a/data_processing/detectron2/tests/modeling/test_model_e2e.py b/data_processing/detectron2/tests/modeling/test_model_e2e.py new file mode 100644 index 0000000..8c07e68 --- /dev/null +++ b/data_processing/detectron2/tests/modeling/test_model_e2e.py @@ -0,0 +1,227 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + + +import itertools +import unittest +from contextlib import contextmanager +from copy import deepcopy +import torch + +from detectron2.structures import BitMasks, Boxes, ImageList, Instances +from detectron2.utils.events import EventStorage +from detectron2.utils.testing import get_model_no_weights + + +@contextmanager +def typecheck_hook(model, *, in_dtype=None, out_dtype=None): + """ + Check that the model must be called with the given input/output dtype + """ + if not isinstance(in_dtype, set): + in_dtype = {in_dtype} + if not isinstance(out_dtype, set): + out_dtype = {out_dtype} + + def flatten(x): + if isinstance(x, torch.Tensor): + return [x] + if isinstance(x, (list, tuple)): + return list(itertools.chain(*[flatten(t) for t in x])) + if isinstance(x, dict): + return flatten(list(x.values())) + return [] + + def hook(module, input, output): + if in_dtype is not None: + dtypes = {x.dtype for x in flatten(input)} + assert ( + dtypes == in_dtype + ), f"Expected input dtype of {type(module)} is {in_dtype}. Got {dtypes} instead!" + + if out_dtype is not None: + dtypes = {x.dtype for x in flatten(output)} + assert ( + dtypes == out_dtype + ), f"Expected output dtype of {type(module)} is {out_dtype}. Got {dtypes} instead!" + + with model.register_forward_hook(hook): + yield + + +def create_model_input(img, inst=None): + if inst is not None: + return {"image": img, "instances": inst} + else: + return {"image": img} + + +def get_empty_instance(h, w): + inst = Instances((h, w)) + inst.gt_boxes = Boxes(torch.rand(0, 4)) + inst.gt_classes = torch.tensor([]).to(dtype=torch.int64) + inst.gt_masks = BitMasks(torch.rand(0, h, w)) + return inst + + +def get_regular_bitmask_instances(h, w): + inst = Instances((h, w)) + inst.gt_boxes = Boxes(torch.rand(3, 4)) + inst.gt_boxes.tensor[:, 2:] += inst.gt_boxes.tensor[:, :2] + inst.gt_classes = torch.tensor([3, 4, 5]).to(dtype=torch.int64) + inst.gt_masks = BitMasks((torch.rand(3, h, w) > 0.5)) + return inst + + +class InstanceModelE2ETest: + def setUp(self): + torch.manual_seed(43) + self.model = get_model_no_weights(self.CONFIG_PATH) + + def _test_eval(self, input_sizes): + inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes] + self.model.eval() + self.model(inputs) + + def _test_train(self, input_sizes, instances): + assert len(input_sizes) == len(instances) + inputs = [ + create_model_input(torch.rand(3, s[0], s[1]), inst) + for s, inst in zip(input_sizes, instances) + ] + self.model.train() + with EventStorage(): + losses = self.model(inputs) + sum(losses.values()).backward() + del losses + + def _inf_tensor(self, *shape): + return 1.0 / torch.zeros(*shape, device=self.model.device) + + def _nan_tensor(self, *shape): + return torch.zeros(*shape, device=self.model.device).fill_(float("nan")) + + def test_empty_data(self): + instances = [get_empty_instance(200, 250), get_empty_instance(200, 249)] + self._test_eval([(200, 250), (200, 249)]) + self._test_train([(200, 250), (200, 249)], instances) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_eval_tocpu(self): + model = deepcopy(self.model).cpu() + model.eval() + input_sizes = [(200, 250), (200, 249)] + inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes] + model(inputs) + + +class MaskRCNNE2ETest(InstanceModelE2ETest, unittest.TestCase): + CONFIG_PATH = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + + def test_half_empty_data(self): + instances = [get_empty_instance(200, 250), get_regular_bitmask_instances(200, 249)] + self._test_train([(200, 250), (200, 249)], instances) + + # This test is flaky because in some environment the output features are zero due to relu + # def test_rpn_inf_nan_data(self): + # self.model.eval() + # for tensor in [self._inf_tensor, self._nan_tensor]: + # images = ImageList(tensor(1, 3, 512, 512), [(510, 510)]) + # features = { + # "p2": tensor(1, 256, 256, 256), + # "p3": tensor(1, 256, 128, 128), + # "p4": tensor(1, 256, 64, 64), + # "p5": tensor(1, 256, 32, 32), + # "p6": tensor(1, 256, 16, 16), + # } + # props, _ = self.model.proposal_generator(images, features) + # self.assertEqual(len(props[0]), 0) + + def test_roiheads_inf_nan_data(self): + self.model.eval() + for tensor in [self._inf_tensor, self._nan_tensor]: + images = ImageList(tensor(1, 3, 512, 512), [(510, 510)]) + features = { + "p2": tensor(1, 256, 256, 256), + "p3": tensor(1, 256, 128, 128), + "p4": tensor(1, 256, 64, 64), + "p5": tensor(1, 256, 32, 32), + "p6": tensor(1, 256, 16, 16), + } + props = [Instances((510, 510))] + props[0].proposal_boxes = Boxes([[10, 10, 20, 20]]).to(device=self.model.device) + props[0].objectness_logits = torch.tensor([1.0]).reshape(1, 1) + det, _ = self.model.roi_heads(images, features, props) + self.assertEqual(len(det[0]), 0) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_autocast(self): + from torch.cuda.amp import autocast + + inputs = [{"image": torch.rand(3, 100, 100)}] + self.model.eval() + with autocast(), typecheck_hook( + self.model.backbone, in_dtype=torch.float32, out_dtype=torch.float16 + ), typecheck_hook( + self.model.roi_heads.box_predictor, in_dtype=torch.float16, out_dtype=torch.float16 + ): + out = self.model.inference(inputs, do_postprocess=False)[0] + self.assertEqual(out.pred_boxes.tensor.dtype, torch.float32) + self.assertEqual(out.pred_masks.dtype, torch.float16) + self.assertEqual(out.scores.dtype, torch.float32) # scores comes from softmax + + +class RetinaNetE2ETest(InstanceModelE2ETest, unittest.TestCase): + CONFIG_PATH = "COCO-Detection/retinanet_R_50_FPN_1x.yaml" + + def test_inf_nan_data(self): + self.model.eval() + self.model.score_threshold = -999999999 + for tensor in [self._inf_tensor, self._nan_tensor]: + images = ImageList(tensor(1, 3, 512, 512), [(510, 510)]) + features = [ + tensor(1, 256, 128, 128), + tensor(1, 256, 64, 64), + tensor(1, 256, 32, 32), + tensor(1, 256, 16, 16), + tensor(1, 256, 8, 8), + ] + pred_logits, pred_anchor_deltas = self.model.head(features) + pred_logits = [tensor(*x.shape) for x in pred_logits] + pred_anchor_deltas = [tensor(*x.shape) for x in pred_anchor_deltas] + det = self.model.forward_inference(images, features, [pred_logits, pred_anchor_deltas]) + # all predictions (if any) are infinite or nan + if len(det[0]): + self.assertTrue(torch.isfinite(det[0].pred_boxes.tensor).sum() == 0) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_autocast(self): + from torch.cuda.amp import autocast + + inputs = [{"image": torch.rand(3, 100, 100)}] + self.model.eval() + with autocast(), typecheck_hook( + self.model.backbone, in_dtype=torch.float32, out_dtype=torch.float16 + ), typecheck_hook(self.model.head, in_dtype=torch.float16, out_dtype=torch.float16): + out = self.model(inputs)[0]["instances"] + self.assertEqual(out.pred_boxes.tensor.dtype, torch.float32) + self.assertEqual(out.scores.dtype, torch.float16) + + +class FCOSE2ETest(InstanceModelE2ETest, unittest.TestCase): + CONFIG_PATH = "COCO-Detection/fcos_R_50_FPN_1x.py" + + +class SemSegE2ETest(unittest.TestCase): + CONFIG_PATH = "Misc/semantic_R_50_FPN_1x.yaml" + + def setUp(self): + torch.manual_seed(43) + self.model = get_model_no_weights(self.CONFIG_PATH) + + def _test_eval(self, input_sizes): + inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes] + self.model.eval() + self.model(inputs) + + def test_forward(self): + self._test_eval([(200, 250), (200, 249)]) diff --git a/data_processing/detectron2/tests/modeling/test_roi_heads.py b/data_processing/detectron2/tests/modeling/test_roi_heads.py new file mode 100644 index 0000000..86360e1 --- /dev/null +++ b/data_processing/detectron2/tests/modeling/test_roi_heads.py @@ -0,0 +1,323 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import unittest +from copy import deepcopy +import torch +from torch import nn + +from detectron2 import model_zoo +from detectron2.config import get_cfg +from detectron2.export.torchscript_patch import ( + freeze_training_mode, + patch_builtin_len, + patch_instances, +) +from detectron2.layers import ShapeSpec +from detectron2.modeling.proposal_generator.build import build_proposal_generator +from detectron2.modeling.roi_heads import ( + FastRCNNConvFCHead, + KRCNNConvDeconvUpsampleHead, + MaskRCNNConvUpsampleHead, + StandardROIHeads, + build_roi_heads, +) +from detectron2.projects import point_rend +from detectron2.structures import BitMasks, Boxes, ImageList, Instances, RotatedBoxes +from detectron2.utils.events import EventStorage +from detectron2.utils.testing import assert_instances_allclose, random_boxes + +logger = logging.getLogger(__name__) + +""" +Make sure the losses of ROIHeads/RPN do not change, to avoid +breaking the forward logic by mistake. +This relies on assumption that pytorch's RNG is stable. +""" + + +class ROIHeadsTest(unittest.TestCase): + def test_roi_heads(self): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead" + cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2 + cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" + cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5) + cfg.MODEL.MASK_ON = True + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + feature_shape = {"res4": ShapeSpec(channels=num_channels, stride=16)} + + image_shape = (15, 15) + gt_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) + gt_instance0 = Instances(image_shape) + gt_instance0.gt_boxes = Boxes(gt_boxes0) + gt_instance0.gt_classes = torch.tensor([2, 1]) + gt_instance0.gt_masks = BitMasks(torch.rand((2,) + image_shape) > 0.5) + gt_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32) + gt_instance1 = Instances(image_shape) + gt_instance1.gt_boxes = Boxes(gt_boxes1) + gt_instance1.gt_classes = torch.tensor([1, 2]) + gt_instance1.gt_masks = BitMasks(torch.rand((2,) + image_shape) > 0.5) + gt_instances = [gt_instance0, gt_instance1] + + proposal_generator = build_proposal_generator(cfg, feature_shape) + roi_heads = StandardROIHeads(cfg, feature_shape) + + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator(images, features, gt_instances) + _, detector_losses = roi_heads(images, features, proposals, gt_instances) + + detector_losses.update(proposal_losses) + expected_losses = { + "loss_cls": 4.5253729820251465, + "loss_box_reg": 0.009785720147192478, + "loss_mask": 0.693184494972229, + "loss_rpn_cls": 0.08186662942171097, + "loss_rpn_loc": 0.1104838103055954, + } + succ = all( + torch.allclose(detector_losses[name], torch.tensor(expected_losses.get(name, 0.0))) + for name in detector_losses.keys() + ) + self.assertTrue( + succ, + "Losses has changed! New losses: {}".format( + {k: v.item() for k, v in detector_losses.items()} + ), + ) + + def test_rroi_heads(self): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN" + cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator" + cfg.MODEL.ROI_HEADS.NAME = "RROIHeads" + cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead" + cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2 + cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1) + cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead" + cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignRotated" + cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5, 1) + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + feature_shape = {"res4": ShapeSpec(channels=num_channels, stride=16)} + + image_shape = (15, 15) + gt_boxes0 = torch.tensor([[2, 2, 2, 2, 30], [4, 4, 4, 4, 0]], dtype=torch.float32) + gt_instance0 = Instances(image_shape) + gt_instance0.gt_boxes = RotatedBoxes(gt_boxes0) + gt_instance0.gt_classes = torch.tensor([2, 1]) + gt_boxes1 = torch.tensor([[1.5, 5.5, 1, 3, 0], [8.5, 4, 3, 2, -50]], dtype=torch.float32) + gt_instance1 = Instances(image_shape) + gt_instance1.gt_boxes = RotatedBoxes(gt_boxes1) + gt_instance1.gt_classes = torch.tensor([1, 2]) + gt_instances = [gt_instance0, gt_instance1] + + proposal_generator = build_proposal_generator(cfg, feature_shape) + roi_heads = build_roi_heads(cfg, feature_shape) + + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator(images, features, gt_instances) + _, detector_losses = roi_heads(images, features, proposals, gt_instances) + + detector_losses.update(proposal_losses) + expected_losses = { + "loss_cls": 4.365657806396484, + "loss_box_reg": 0.0015851043863222003, + "loss_rpn_cls": 0.2427729219198227, + "loss_rpn_loc": 0.3646621108055115, + } + succ = all( + torch.allclose(detector_losses[name], torch.tensor(expected_losses.get(name, 0.0))) + for name in detector_losses.keys() + ) + self.assertTrue( + succ, + "Losses has changed! New losses: {}".format( + {k: v.item() for k, v in detector_losses.items()} + ), + ) + + def test_box_head_scriptability(self): + input_shape = ShapeSpec(channels=1024, height=14, width=14) + box_features = torch.randn(4, 1024, 14, 14) + + box_head = FastRCNNConvFCHead( + input_shape, conv_dims=[512, 512], fc_dims=[1024, 1024] + ).eval() + script_box_head = torch.jit.script(box_head) + + origin_output = box_head(box_features) + script_output = script_box_head(box_features) + self.assertTrue(torch.equal(origin_output, script_output)) + + def test_mask_head_scriptability(self): + input_shape = ShapeSpec(channels=1024) + mask_features = torch.randn(4, 1024, 14, 14) + + image_shapes = [(10, 10), (15, 15)] + pred_instance0 = Instances(image_shapes[0]) + pred_classes0 = torch.tensor([1, 2, 3], dtype=torch.int64) + pred_instance0.pred_classes = pred_classes0 + pred_instance1 = Instances(image_shapes[1]) + pred_classes1 = torch.tensor([4], dtype=torch.int64) + pred_instance1.pred_classes = pred_classes1 + + mask_head = MaskRCNNConvUpsampleHead( + input_shape, num_classes=80, conv_dims=[256, 256] + ).eval() + # pred_instance will be in-place changed during the inference + # process of `MaskRCNNConvUpsampleHead` + origin_outputs = mask_head(mask_features, deepcopy([pred_instance0, pred_instance1])) + + fields = {"pred_masks": torch.Tensor, "pred_classes": torch.Tensor} + with freeze_training_mode(mask_head), patch_instances(fields) as NewInstances: + sciript_mask_head = torch.jit.script(mask_head) + pred_instance0 = NewInstances.from_instances(pred_instance0) + pred_instance1 = NewInstances.from_instances(pred_instance1) + script_outputs = sciript_mask_head(mask_features, [pred_instance0, pred_instance1]) + + for origin_ins, script_ins in zip(origin_outputs, script_outputs): + assert_instances_allclose(origin_ins, script_ins, rtol=0) + + def test_keypoint_head_scriptability(self): + input_shape = ShapeSpec(channels=1024, height=14, width=14) + keypoint_features = torch.randn(4, 1024, 14, 14) + + image_shapes = [(10, 10), (15, 15)] + pred_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6], [1, 5, 2, 8]], dtype=torch.float32) + pred_instance0 = Instances(image_shapes[0]) + pred_instance0.pred_boxes = Boxes(pred_boxes0) + pred_boxes1 = torch.tensor([[7, 3, 10, 5]], dtype=torch.float32) + pred_instance1 = Instances(image_shapes[1]) + pred_instance1.pred_boxes = Boxes(pred_boxes1) + + keypoint_head = KRCNNConvDeconvUpsampleHead( + input_shape, num_keypoints=17, conv_dims=[512, 512] + ).eval() + origin_outputs = keypoint_head( + keypoint_features, deepcopy([pred_instance0, pred_instance1]) + ) + + fields = { + "pred_boxes": Boxes, + "pred_keypoints": torch.Tensor, + "pred_keypoint_heatmaps": torch.Tensor, + } + with freeze_training_mode(keypoint_head), patch_instances(fields) as NewInstances: + script_keypoint_head = torch.jit.script(keypoint_head) + pred_instance0 = NewInstances.from_instances(pred_instance0) + pred_instance1 = NewInstances.from_instances(pred_instance1) + script_outputs = script_keypoint_head( + keypoint_features, [pred_instance0, pred_instance1] + ) + + for origin_ins, script_ins in zip(origin_outputs, script_outputs): + assert_instances_allclose(origin_ins, script_ins, rtol=0) + + def test_StandardROIHeads_scriptability(self): + cfg = get_cfg() + cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead" + cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2 + cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" + cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5) + cfg.MODEL.MASK_ON = True + cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.01 + cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.01 + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + feature_shape = {"res4": ShapeSpec(channels=num_channels, stride=16)} + + roi_heads = StandardROIHeads(cfg, feature_shape).eval() + + proposal0 = Instances(image_sizes[0]) + proposal_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) + proposal0.proposal_boxes = Boxes(proposal_boxes0) + proposal0.objectness_logits = torch.tensor([0.5, 0.7], dtype=torch.float32) + + proposal1 = Instances(image_sizes[1]) + proposal_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32) + proposal1.proposal_boxes = Boxes(proposal_boxes1) + proposal1.objectness_logits = torch.tensor([0.1, 0.9], dtype=torch.float32) + proposals = [proposal0, proposal1] + + pred_instances, _ = roi_heads(images, features, proposals) + fields = { + "objectness_logits": torch.Tensor, + "proposal_boxes": Boxes, + "pred_classes": torch.Tensor, + "scores": torch.Tensor, + "pred_masks": torch.Tensor, + "pred_boxes": Boxes, + "pred_keypoints": torch.Tensor, + "pred_keypoint_heatmaps": torch.Tensor, + } + with freeze_training_mode(roi_heads), patch_instances(fields) as new_instances: + proposal0 = new_instances.from_instances(proposal0) + proposal1 = new_instances.from_instances(proposal1) + proposals = [proposal0, proposal1] + scripted_rot_heads = torch.jit.script(roi_heads) + scripted_pred_instances, _ = scripted_rot_heads(images, features, proposals) + + for instance, scripted_instance in zip(pred_instances, scripted_pred_instances): + assert_instances_allclose(instance, scripted_instance, rtol=0) + + def test_PointRend_mask_head_tracing(self): + cfg = model_zoo.get_config("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml") + point_rend.add_pointrend_config(cfg) + cfg.MODEL.ROI_HEADS.IN_FEATURES = ["p2", "p3"] + cfg.MODEL.ROI_MASK_HEAD.NAME = "PointRendMaskHead" + cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "" + cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = True + chan = 256 + head = point_rend.PointRendMaskHead( + cfg, + { + "p2": ShapeSpec(channels=chan, stride=4), + "p3": ShapeSpec(channels=chan, stride=8), + }, + ) + + def gen_inputs(h, w, N): + p2 = torch.rand(1, chan, h, w) + p3 = torch.rand(1, chan, h // 2, w // 2) + boxes = random_boxes(N, max_coord=h) + return p2, p3, boxes + + class Wrap(nn.ModuleDict): + def forward(self, p2, p3, boxes): + features = { + "p2": p2, + "p3": p3, + } + inst = Instances((p2.shape[2] * 4, p2.shape[3] * 4)) + inst.pred_boxes = Boxes(boxes) + inst.pred_classes = torch.zeros(inst.__len__(), dtype=torch.long) + out = self.head(features, [inst])[0] + return out.pred_masks + + model = Wrap({"head": head}) + model.eval() + with torch.no_grad(), patch_builtin_len(): + traced = torch.jit.trace(model, gen_inputs(302, 208, 20)) + inputs = gen_inputs(100, 120, 30) + out_eager = model(*inputs) + out_trace = traced(*inputs) + self.assertTrue(torch.allclose(out_eager, out_trace)) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/modeling/test_roi_pooler.py b/data_processing/detectron2/tests/modeling/test_roi_pooler.py new file mode 100644 index 0000000..e1d7c1c --- /dev/null +++ b/data_processing/detectron2/tests/modeling/test_roi_pooler.py @@ -0,0 +1,165 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import unittest +import torch + +from detectron2.modeling.poolers import ROIPooler +from detectron2.structures import Boxes, RotatedBoxes +from detectron2.utils.testing import random_boxes + +logger = logging.getLogger(__name__) + + +class TestROIPooler(unittest.TestCase): + def _test_roialignv2_roialignrotated_match(self, device): + pooler_resolution = 14 + canonical_level = 4 + canonical_scale_factor = 2**canonical_level + pooler_scales = (1.0 / canonical_scale_factor,) + sampling_ratio = 0 + + N, C, H, W = 2, 4, 10, 8 + N_rois = 10 + std = 11 + mean = 0 + feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean + + features = [feature.to(device)] + + rois = [] + rois_rotated = [] + for _ in range(N): + boxes = random_boxes(N_rois, W * canonical_scale_factor) + rotated_boxes = torch.zeros(N_rois, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + rois.append(Boxes(boxes).to(device)) + rois_rotated.append(RotatedBoxes(rotated_boxes).to(device)) + + roialignv2_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type="ROIAlignV2", + ) + + roialignv2_out = roialignv2_pooler(features, rois) + + roialignrotated_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type="ROIAlignRotated", + ) + + roialignrotated_out = roialignrotated_pooler(features, rois_rotated) + + self.assertTrue(torch.allclose(roialignv2_out, roialignrotated_out, atol=1e-4)) + + def test_roialignv2_roialignrotated_match_cpu(self): + self._test_roialignv2_roialignrotated_match(device="cpu") + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_roialignv2_roialignrotated_match_cuda(self): + self._test_roialignv2_roialignrotated_match(device="cuda") + + def _test_scriptability(self, device): + pooler_resolution = 14 + canonical_level = 4 + canonical_scale_factor = 2**canonical_level + pooler_scales = (1.0 / canonical_scale_factor,) + sampling_ratio = 0 + + N, C, H, W = 2, 4, 10, 8 + N_rois = 10 + std = 11 + mean = 0 + feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean + + features = [feature.to(device)] + + rois = [] + for _ in range(N): + boxes = random_boxes(N_rois, W * canonical_scale_factor) + + rois.append(Boxes(boxes).to(device)) + + roialignv2_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type="ROIAlignV2", + ) + + roialignv2_out = roialignv2_pooler(features, rois) + scripted_roialignv2_out = torch.jit.script(roialignv2_pooler)(features, rois) + self.assertTrue(torch.equal(roialignv2_out, scripted_roialignv2_out)) + + def test_scriptability_cpu(self): + self._test_scriptability(device="cpu") + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_scriptability_gpu(self): + self._test_scriptability(device="cuda") + + def test_no_images(self): + N, C, H, W = 0, 32, 32, 32 + feature = torch.rand(N, C, H, W) - 0.5 + features = [feature] + pooler = ROIPooler( + output_size=14, scales=(1.0,), sampling_ratio=0.0, pooler_type="ROIAlignV2" + ) + output = pooler.forward(features, []) + self.assertEqual(output.shape, (0, C, 14, 14)) + + def test_roi_pooler_tracing(self): + class Model(torch.nn.Module): + def __init__(self, roi): + super(Model, self).__init__() + self.roi = roi + + def forward(self, x, boxes): + return self.roi(x, [Boxes(boxes)]) + + pooler_resolution = 14 + canonical_level = 4 + canonical_scale_factor = 2**canonical_level + pooler_scales = (1.0 / canonical_scale_factor, 0.5 / canonical_scale_factor) + sampling_ratio = 0 + + N, C, H, W = 1, 4, 10, 8 + N_rois = 10 + std = 11 + mean = 0 + feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean + feature = [feature, feature] + + rois = random_boxes(N_rois, W * canonical_scale_factor) + # Add one larger box so that this level has only one box. + # This may trigger the bug https://github.com/pytorch/pytorch/issues/49852 + # that we shall workaround. + rois = torch.cat([rois, torch.tensor([[0, 0, 448, 448]])]) + + model = Model( + ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type="ROIAlign", + ) + ) + + with torch.no_grad(): + func = torch.jit.trace(model, (feature, rois)) + o = func(feature, rois) + self.assertEqual(o.shape, (11, 4, 14, 14)) + o = func(feature, rois[:5]) + self.assertEqual(o.shape, (5, 4, 14, 14)) + o = func(feature, random_boxes(20, W * canonical_scale_factor)) + self.assertEqual(o.shape, (20, 4, 14, 14)) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/modeling/test_rpn.py b/data_processing/detectron2/tests/modeling/test_rpn.py new file mode 100644 index 0000000..f14faae --- /dev/null +++ b/data_processing/detectron2/tests/modeling/test_rpn.py @@ -0,0 +1,262 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import unittest +import torch + +from detectron2.config import get_cfg +from detectron2.export import scripting_with_instances +from detectron2.layers import ShapeSpec +from detectron2.modeling.backbone import build_backbone +from detectron2.modeling.proposal_generator import RPN, build_proposal_generator +from detectron2.modeling.proposal_generator.proposal_utils import ( + add_ground_truth_to_proposals, + find_top_rpn_proposals, +) +from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes +from detectron2.utils.events import EventStorage + +logger = logging.getLogger(__name__) + + +class RPNTest(unittest.TestCase): + def get_gt_and_features(self): + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + image_shape = (15, 15) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) + gt_instances = Instances(image_shape) + gt_instances.gt_boxes = Boxes(gt_boxes) + return (gt_instances, features, images, image_sizes) + + def test_rpn(self): + torch.manual_seed(121) + cfg = get_cfg() + backbone = build_backbone(cfg) + proposal_generator = RPN(cfg, backbone.output_shape()) + (gt_instances, features, images, image_sizes) = self.get_gt_and_features() + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator( + images, features, [gt_instances[0], gt_instances[1]] + ) + + expected_losses = { + "loss_rpn_cls": torch.tensor(0.08011703193), + "loss_rpn_loc": torch.tensor(0.101470276), + } + for name in expected_losses.keys(): + err_msg = "proposal_losses[{}] = {}, expected losses = {}".format( + name, proposal_losses[name], expected_losses[name] + ) + self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg) + + self.assertEqual(len(proposals), len(image_sizes)) + for proposal, im_size in zip(proposals, image_sizes): + self.assertEqual(proposal.image_size, im_size) + + expected_proposal_box = torch.tensor([[0, 0, 10, 10], [7.2702, 0, 10, 10]]) + expected_objectness_logit = torch.tensor([0.1596, -0.0007]) + self.assertTrue( + torch.allclose(proposals[0].proposal_boxes.tensor, expected_proposal_box, atol=1e-4) + ) + self.assertTrue( + torch.allclose(proposals[0].objectness_logits, expected_objectness_logit, atol=1e-4) + ) + + def verify_rpn(self, conv_dims, expected_conv_dims): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.RPN.CONV_DIMS = conv_dims + backbone = build_backbone(cfg) + proposal_generator = RPN(cfg, backbone.output_shape()) + for k, conv in enumerate(proposal_generator.rpn_head.conv): + self.assertEqual(expected_conv_dims[k], conv.out_channels) + return proposal_generator + + def test_rpn_larger_num_convs(self): + conv_dims = [64, 64, 64, 64, 64] + proposal_generator = self.verify_rpn(conv_dims, conv_dims) + (gt_instances, features, images, image_sizes) = self.get_gt_and_features() + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator( + images, features, [gt_instances[0], gt_instances[1]] + ) + expected_losses = { + "loss_rpn_cls": torch.tensor(0.08122821152), + "loss_rpn_loc": torch.tensor(0.10064548254), + } + for name in expected_losses.keys(): + err_msg = "proposal_losses[{}] = {}, expected losses = {}".format( + name, proposal_losses[name], expected_losses[name] + ) + self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg) + + def test_rpn_conv_dims_not_set(self): + conv_dims = [-1, -1, -1] + expected_conv_dims = [1024, 1024, 1024] + self.verify_rpn(conv_dims, expected_conv_dims) + + def test_rpn_scriptability(self): + cfg = get_cfg() + proposal_generator = RPN(cfg, {"res4": ShapeSpec(channels=1024, stride=16)}).eval() + num_images = 2 + images_tensor = torch.rand(num_images, 30, 40) + image_sizes = [(32, 32), (30, 40)] + images = ImageList(images_tensor, image_sizes) + features = {"res4": torch.rand(num_images, 1024, 1, 2)} + + fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor} + proposal_generator_ts = scripting_with_instances(proposal_generator, fields) + + proposals, _ = proposal_generator(images, features) + proposals_ts, _ = proposal_generator_ts(images, features) + + for proposal, proposal_ts in zip(proposals, proposals_ts): + self.assertEqual(proposal.image_size, proposal_ts.image_size) + self.assertTrue( + torch.equal(proposal.proposal_boxes.tensor, proposal_ts.proposal_boxes.tensor) + ) + self.assertTrue(torch.equal(proposal.objectness_logits, proposal_ts.objectness_logits)) + + def test_rrpn(self): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN" + cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator" + cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] + cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]] + cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]] + cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1) + cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead" + backbone = build_backbone(cfg) + proposal_generator = build_proposal_generator(cfg, backbone.output_shape()) + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + image_shape = (15, 15) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32) + gt_instances = Instances(image_shape) + gt_instances.gt_boxes = RotatedBoxes(gt_boxes) + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator( + images, features, [gt_instances[0], gt_instances[1]] + ) + + expected_losses = { + "loss_rpn_cls": torch.tensor(0.04291602224), + "loss_rpn_loc": torch.tensor(0.145077362), + } + for name in expected_losses.keys(): + err_msg = "proposal_losses[{}] = {}, expected losses = {}".format( + name, proposal_losses[name], expected_losses[name] + ) + self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg) + + expected_proposal_box = torch.tensor( + [ + [-1.77999556, 0.78155339, 68.04367828, 14.78156471, 60.59333801], + [13.82740974, -1.50282836, 34.67269897, 29.19676590, -3.81942749], + [8.10392570, -0.99071521, 145.39100647, 32.13126373, 3.67242432], + [5.00000000, 4.57370186, 10.00000000, 9.14740372, 0.89196777], + ] + ) + + expected_objectness_logit = torch.tensor([0.10924313, 0.09881870, 0.07649877, 0.05858029]) + + torch.set_printoptions(precision=8, sci_mode=False) + + self.assertEqual(len(proposals), len(image_sizes)) + + proposal = proposals[0] + # It seems that there's some randomness in the result across different machines: + # This test can be run on a local machine for 100 times with exactly the same result, + # However, a different machine might produce slightly different results, + # thus the atol here. + err_msg = "computed proposal boxes = {}, expected {}".format( + proposal.proposal_boxes.tensor, expected_proposal_box + ) + self.assertTrue( + torch.allclose(proposal.proposal_boxes.tensor[:4], expected_proposal_box, atol=1e-5), + err_msg, + ) + + err_msg = "computed objectness logits = {}, expected {}".format( + proposal.objectness_logits, expected_objectness_logit + ) + self.assertTrue( + torch.allclose(proposal.objectness_logits[:4], expected_objectness_logit, atol=1e-5), + err_msg, + ) + + def test_find_rpn_proposals_inf(self): + N, Hi, Wi, A = 3, 3, 3, 3 + proposals = [torch.rand(N, Hi * Wi * A, 4)] + pred_logits = [torch.rand(N, Hi * Wi * A)] + pred_logits[0][1][3:5].fill_(float("inf")) + find_top_rpn_proposals(proposals, pred_logits, [(10, 10)], 0.5, 1000, 1000, 0, False) + + def test_find_rpn_proposals_tracing(self): + N, Hi, Wi, A = 3, 50, 50, 9 + proposal = torch.rand(N, Hi * Wi * A, 4) + pred_logit = torch.rand(N, Hi * Wi * A) + + def func(proposal, logit, image_size): + r = find_top_rpn_proposals( + [proposal], [logit], [image_size], 0.7, 1000, 1000, 0, False + )[0] + size = r.image_size + if not isinstance(size, torch.Tensor): + size = torch.tensor(size) + return (size, r.proposal_boxes.tensor, r.objectness_logits) + + other_inputs = [] + # test that it generalizes to other shapes + for Hi, Wi, shp in [(30, 30, 60), (10, 10, 800)]: + other_inputs.append( + ( + torch.rand(N, Hi * Wi * A, 4), + torch.rand(N, Hi * Wi * A), + torch.tensor([shp, shp]), + ) + ) + torch.jit.trace( + func, (proposal, pred_logit, torch.tensor([100, 100])), check_inputs=other_inputs + ) + + def test_append_gt_to_proposal(self): + proposals = Instances( + (10, 10), + **{ + "proposal_boxes": Boxes(torch.empty((0, 4))), + "objectness_logits": torch.tensor([]), + "custom_attribute": torch.tensor([]), + } + ) + gt_boxes = Boxes(torch.tensor([[0, 0, 1, 1]])) + + self.assertRaises(AssertionError, add_ground_truth_to_proposals, [gt_boxes], [proposals]) + + gt_instances = Instances((10, 10)) + gt_instances.gt_boxes = gt_boxes + + self.assertRaises( + AssertionError, add_ground_truth_to_proposals, [gt_instances], [proposals] + ) + + gt_instances.custom_attribute = torch.tensor([1]) + gt_instances.custom_attribute2 = torch.tensor([1]) + new_proposals = add_ground_truth_to_proposals([gt_instances], [proposals])[0] + + self.assertEqual(new_proposals.custom_attribute[0], 1) + # new proposals should only include the attributes in proposals + self.assertRaises(AttributeError, lambda: new_proposals.custom_attribute2) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/structures/__init__.py b/data_processing/detectron2/tests/structures/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/detectron2/tests/structures/test_boxes.py b/data_processing/detectron2/tests/structures/test_boxes.py new file mode 100644 index 0000000..1011918 --- /dev/null +++ b/data_processing/detectron2/tests/structures/test_boxes.py @@ -0,0 +1,223 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import json +import math +import numpy as np +import unittest +import torch + +from detectron2.structures import Boxes, BoxMode, pairwise_ioa, pairwise_iou +from detectron2.utils.testing import reload_script_model + + +class TestBoxMode(unittest.TestCase): + def _convert_xy_to_wh(self, x): + return BoxMode.convert(x, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + + def _convert_xywha_to_xyxy(self, x): + return BoxMode.convert(x, BoxMode.XYWHA_ABS, BoxMode.XYXY_ABS) + + def _convert_xywh_to_xywha(self, x): + return BoxMode.convert(x, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS) + + def test_convert_int_mode(self): + BoxMode.convert([1, 2, 3, 4], 0, 1) + + def test_box_convert_list(self): + for tp in [list, tuple]: + box = tp([5.0, 5.0, 10.0, 10.0]) + output = self._convert_xy_to_wh(box) + self.assertIsInstance(output, tp) + self.assertIsInstance(output[0], float) + self.assertEqual(output, tp([5.0, 5.0, 5.0, 5.0])) + + with self.assertRaises(Exception): + self._convert_xy_to_wh([box]) + + def test_box_convert_array(self): + box = np.asarray([[5, 5, 10, 10], [1, 1, 2, 3]]) + output = self._convert_xy_to_wh(box) + self.assertEqual(output.dtype, box.dtype) + self.assertEqual(output.shape, box.shape) + self.assertTrue((output[0] == [5, 5, 5, 5]).all()) + self.assertTrue((output[1] == [1, 1, 1, 2]).all()) + + def test_box_convert_cpu_tensor(self): + box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]) + output = self._convert_xy_to_wh(box) + self.assertEqual(output.dtype, box.dtype) + self.assertEqual(output.shape, box.shape) + output = output.numpy() + self.assertTrue((output[0] == [5, 5, 5, 5]).all()) + self.assertTrue((output[1] == [1, 1, 1, 2]).all()) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_box_convert_cuda_tensor(self): + box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]).cuda() + output = self._convert_xy_to_wh(box) + self.assertEqual(output.dtype, box.dtype) + self.assertEqual(output.shape, box.shape) + self.assertEqual(output.device, box.device) + output = output.cpu().numpy() + self.assertTrue((output[0] == [5, 5, 5, 5]).all()) + self.assertTrue((output[1] == [1, 1, 1, 2]).all()) + + def test_box_convert_xywha_to_xyxy_list(self): + for tp in [list, tuple]: + box = tp([50, 50, 30, 20, 0]) + output = self._convert_xywha_to_xyxy(box) + self.assertIsInstance(output, tp) + self.assertEqual(output, tp([35, 40, 65, 60])) + + with self.assertRaises(Exception): + self._convert_xywha_to_xyxy([box]) + + def test_box_convert_xywha_to_xyxy_array(self): + for dtype in [np.float64, np.float32]: + box = np.asarray( + [ + [50, 50, 30, 20, 0], + [50, 50, 30, 20, 90], + [1, 1, math.sqrt(2), math.sqrt(2), -45], + ], + dtype=dtype, + ) + output = self._convert_xywha_to_xyxy(box) + self.assertEqual(output.dtype, box.dtype) + expected = np.asarray([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype) + self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output)) + + def test_box_convert_xywha_to_xyxy_tensor(self): + for dtype in [torch.float32, torch.float64]: + box = torch.tensor( + [ + [50, 50, 30, 20, 0], + [50, 50, 30, 20, 90], + [1, 1, math.sqrt(2), math.sqrt(2), -45], + ], + dtype=dtype, + ) + output = self._convert_xywha_to_xyxy(box) + self.assertEqual(output.dtype, box.dtype) + expected = torch.tensor([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype) + + self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output)) + + def test_box_convert_xywh_to_xywha_list(self): + for tp in [list, tuple]: + box = tp([50, 50, 30, 20]) + output = self._convert_xywh_to_xywha(box) + self.assertIsInstance(output, tp) + self.assertEqual(output, tp([65, 60, 30, 20, 0])) + + with self.assertRaises(Exception): + self._convert_xywh_to_xywha([box]) + + def test_box_convert_xywh_to_xywha_array(self): + for dtype in [np.float64, np.float32]: + box = np.asarray([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype) + output = self._convert_xywh_to_xywha(box) + self.assertEqual(output.dtype, box.dtype) + expected = np.asarray( + [[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype + ) + self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output)) + + def test_box_convert_xywh_to_xywha_tensor(self): + for dtype in [torch.float32, torch.float64]: + box = torch.tensor([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype) + output = self._convert_xywh_to_xywha(box) + self.assertEqual(output.dtype, box.dtype) + expected = torch.tensor( + [[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype + ) + + self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output)) + + def test_json_serializable(self): + payload = {"box_mode": BoxMode.XYWH_REL} + try: + json.dumps(payload) + except Exception: + self.fail("JSON serialization failed") + + def test_json_deserializable(self): + payload = '{"box_mode": 2}' + obj = json.loads(payload) + try: + obj["box_mode"] = BoxMode(obj["box_mode"]) + except Exception: + self.fail("JSON deserialization failed") + + +class TestBoxIOU(unittest.TestCase): + def create_boxes(self): + boxes1 = torch.tensor([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]) + + boxes2 = torch.tensor( + [ + [0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.5, 1.0], + [0.0, 0.0, 1.0, 0.5], + [0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 1.0], + [0.5, 0.5, 1.5, 1.5], + ] + ) + return boxes1, boxes2 + + def test_pairwise_iou(self): + boxes1, boxes2 = self.create_boxes() + expected_ious = torch.tensor( + [ + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + ] + ) + + ious = pairwise_iou(Boxes(boxes1), Boxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_ioa(self): + boxes1, boxes2 = self.create_boxes() + expected_ioas = torch.tensor( + [[1.0, 1.0, 1.0, 1.0, 1.0, 0.25], [1.0, 1.0, 1.0, 1.0, 1.0, 0.25]] + ) + ioas = pairwise_ioa(Boxes(boxes1), Boxes(boxes2)) + self.assertTrue(torch.allclose(ioas, expected_ioas)) + + +class TestBoxes(unittest.TestCase): + def test_empty_cat(self): + x = Boxes.cat([]) + self.assertTrue(x.tensor.shape, (0, 4)) + + def test_to(self): + x = Boxes(torch.rand(3, 4)) + self.assertEqual(x.to(device="cpu").tensor.device.type, "cpu") + + def test_scriptability(self): + def func(x): + boxes = Boxes(x) + test = boxes.to(torch.device("cpu")).tensor + return boxes.area(), test + + f = torch.jit.script(func) + f = reload_script_model(f) + f(torch.rand((3, 4))) + + data = torch.rand((3, 4)) + + def func_cat(x: torch.Tensor): + boxes1 = Boxes(x) + boxes2 = Boxes(x) + # boxes3 = Boxes.cat([boxes1, boxes2]) # this is not supported by torchsript for now. + boxes3 = boxes1.cat([boxes1, boxes2]) + return boxes3 + + f = torch.jit.script(func_cat) + script_box = f(data) + self.assertTrue(torch.equal(torch.cat([data, data]), script_box.tensor)) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/structures/test_imagelist.py b/data_processing/detectron2/tests/structures/test_imagelist.py new file mode 100644 index 0000000..e446e44 --- /dev/null +++ b/data_processing/detectron2/tests/structures/test_imagelist.py @@ -0,0 +1,75 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import unittest +from typing import List, Sequence, Tuple +import torch + +from detectron2.structures import ImageList + + +class TestImageList(unittest.TestCase): + def test_imagelist_padding_tracing(self): + # test that the trace does not contain hard-coded constant sizes + def to_imagelist(tensors: Sequence[torch.Tensor]): + image_list = ImageList.from_tensors(tensors, 4) + return image_list.tensor, image_list.image_sizes + + def _tensor(*shape): + return torch.ones(shape, dtype=torch.float32) + + # test CHW (inputs needs padding vs. no padding) + for shape in [(3, 10, 10), (3, 12, 12)]: + func = torch.jit.trace(to_imagelist, ([_tensor(*shape)],)) + tensor, image_sizes = func([_tensor(3, 15, 20)]) + self.assertEqual(tensor.shape, (1, 3, 16, 20), tensor.shape) + self.assertEqual(image_sizes[0].tolist(), [15, 20], image_sizes[0]) + + # test HW + func = torch.jit.trace(to_imagelist, ([_tensor(10, 10)],)) + tensor, image_sizes = func([_tensor(15, 20)]) + self.assertEqual(tensor.shape, (1, 16, 20), tensor.shape) + self.assertEqual(image_sizes[0].tolist(), [15, 20], image_sizes[0]) + + # test 2x CHW + func = torch.jit.trace( + to_imagelist, + ([_tensor(3, 16, 10), _tensor(3, 13, 11)],), + ) + tensor, image_sizes = func([_tensor(3, 25, 20), _tensor(3, 10, 10)]) + self.assertEqual(tensor.shape, (2, 3, 28, 20), tensor.shape) + self.assertEqual(image_sizes[0].tolist(), [25, 20], image_sizes[0]) + self.assertEqual(image_sizes[1].tolist(), [10, 10], image_sizes[1]) + # support calling with different spatial sizes, but not with different #images + + def test_imagelist_scriptability(self): + image_nums = 2 + image_tensor = torch.randn((image_nums, 10, 20), dtype=torch.float32) + image_shape = [(10, 20)] * image_nums + + def f(image_tensor, image_shape: List[Tuple[int, int]]): + return ImageList(image_tensor, image_shape) + + ret = f(image_tensor, image_shape) + ret_script = torch.jit.script(f)(image_tensor, image_shape) + + self.assertEqual(len(ret), len(ret_script)) + for i in range(image_nums): + self.assertTrue(torch.equal(ret[i], ret_script[i])) + + def test_imagelist_from_tensors_scriptability(self): + image_tensor_0 = torch.randn(10, 20, dtype=torch.float32) + image_tensor_1 = torch.randn(12, 22, dtype=torch.float32) + inputs = [image_tensor_0, image_tensor_1] + + def f(image_tensor: List[torch.Tensor]): + return ImageList.from_tensors(image_tensor, 10) + + ret = f(inputs) + ret_script = torch.jit.script(f)(inputs) + + self.assertEqual(len(ret), len(ret_script)) + self.assertTrue(torch.equal(ret.tensor, ret_script.tensor)) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/structures/test_instances.py b/data_processing/detectron2/tests/structures/test_instances.py new file mode 100644 index 0000000..a352f74 --- /dev/null +++ b/data_processing/detectron2/tests/structures/test_instances.py @@ -0,0 +1,219 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import unittest +import torch +from torch import Tensor + +from detectron2.export.torchscript import patch_instances +from detectron2.structures import Boxes, Instances +from detectron2.utils.testing import convert_scripted_instances + + +class TestInstances(unittest.TestCase): + def test_int_indexing(self): + attr1 = torch.tensor([[0.0, 0.0, 1.0], [0.0, 0.0, 0.5], [0.0, 0.0, 1.0], [0.0, 0.5, 0.5]]) + attr2 = torch.tensor([0.1, 0.2, 0.3, 0.4]) + instances = Instances((100, 100)) + instances.attr1 = attr1 + instances.attr2 = attr2 + for i in range(-len(instances), len(instances)): + inst = instances[i] + self.assertEqual((inst.attr1 == attr1[i]).all(), True) + self.assertEqual((inst.attr2 == attr2[i]).all(), True) + + self.assertRaises(IndexError, lambda: instances[len(instances)]) + self.assertRaises(IndexError, lambda: instances[-len(instances) - 1]) + + def test_script_new_fields(self): + def get_mask(x: Instances) -> torch.Tensor: + return x.mask + + class f(torch.nn.Module): + def forward(self, x: Instances): + proposal_boxes = x.proposal_boxes # noqa F841 + objectness_logits = x.objectness_logits # noqa F841 + return x + + class g(torch.nn.Module): + def forward(self, x: Instances): + return get_mask(x) + + class g2(torch.nn.Module): + def __init__(self): + super().__init__() + self.g = g() + + def forward(self, x: Instances): + proposal_boxes = x.proposal_boxes # noqa F841 + return x, self.g(x) + + fields = {"proposal_boxes": Boxes, "objectness_logits": Tensor} + with patch_instances(fields): + torch.jit.script(f()) + + # can't script anymore after exiting the context + with self.assertRaises(Exception): + # will create a ConcreteType for g + torch.jit.script(g2()) + + new_fields = {"mask": Tensor} + with patch_instances(new_fields): + # will compile g with a different Instances; this should pass + torch.jit.script(g()) + with self.assertRaises(Exception): + torch.jit.script(g2()) + + new_fields = {"mask": Tensor, "proposal_boxes": Boxes} + with patch_instances(new_fields) as NewInstances: + # get_mask will be compiled with a different Instances; this should pass + scripted_g2 = torch.jit.script(g2()) + x = NewInstances((3, 4)) + x.mask = torch.rand(3) + x.proposal_boxes = Boxes(torch.rand(3, 4)) + scripted_g2(x) # it should accept the new Instances object and run successfully + + def test_script_access_fields(self): + class f(torch.nn.Module): + def forward(self, x: Instances): + proposal_boxes = x.proposal_boxes + objectness_logits = x.objectness_logits + return proposal_boxes.tensor + objectness_logits + + fields = {"proposal_boxes": Boxes, "objectness_logits": Tensor} + with patch_instances(fields): + torch.jit.script(f()) + + def test_script_len(self): + class f(torch.nn.Module): + def forward(self, x: Instances): + return len(x) + + class g(torch.nn.Module): + def forward(self, x: Instances): + return len(x) + + image_shape = (15, 15) + + fields = {"proposal_boxes": Boxes} + with patch_instances(fields) as new_instance: + script_module = torch.jit.script(f()) + x = new_instance(image_shape) + with self.assertRaises(Exception): + script_module(x) + box_tensors = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]) + x.proposal_boxes = Boxes(box_tensors) + length = script_module(x) + self.assertEqual(length, 2) + + fields = {"objectness_logits": Tensor} + with patch_instances(fields) as new_instance: + script_module = torch.jit.script(g()) + x = new_instance(image_shape) + objectness_logits = torch.tensor([1.0]).reshape(1, 1) + x.objectness_logits = objectness_logits + length = script_module(x) + self.assertEqual(length, 1) + + def test_script_has(self): + class f(torch.nn.Module): + def forward(self, x: Instances): + return x.has("proposal_boxes") + + image_shape = (15, 15) + fields = {"proposal_boxes": Boxes} + with patch_instances(fields) as new_instance: + script_module = torch.jit.script(f()) + x = new_instance(image_shape) + self.assertFalse(script_module(x)) + + box_tensors = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]) + x.proposal_boxes = Boxes(box_tensors) + self.assertTrue(script_module(x)) + + def test_script_to(self): + class f(torch.nn.Module): + def forward(self, x: Instances): + return x.to(torch.device("cpu")) + + image_shape = (15, 15) + fields = {"proposal_boxes": Boxes, "a": Tensor} + with patch_instances(fields) as new_instance: + script_module = torch.jit.script(f()) + x = new_instance(image_shape) + script_module(x) + + box_tensors = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]) + x.proposal_boxes = Boxes(box_tensors) + x.a = box_tensors + script_module(x) + + def test_script_getitem(self): + class f(torch.nn.Module): + def forward(self, x: Instances, idx): + return x[idx] + + image_shape = (15, 15) + fields = {"proposal_boxes": Boxes, "a": Tensor} + inst = Instances(image_shape) + inst.proposal_boxes = Boxes(torch.rand(4, 4)) + inst.a = torch.rand(4, 10) + idx = torch.tensor([True, False, True, False]) + with patch_instances(fields) as new_instance: + script_module = torch.jit.script(f()) + + out = f()(inst, idx) + out_scripted = script_module(new_instance.from_instances(inst), idx) + self.assertTrue( + torch.equal(out.proposal_boxes.tensor, out_scripted.proposal_boxes.tensor) + ) + self.assertTrue(torch.equal(out.a, out_scripted.a)) + + def test_from_to_instances(self): + orig = Instances((30, 30)) + orig.proposal_boxes = Boxes(torch.rand(3, 4)) + + fields = {"proposal_boxes": Boxes, "a": Tensor} + with patch_instances(fields) as NewInstances: + # convert to NewInstances and back + new1 = NewInstances.from_instances(orig) + new2 = convert_scripted_instances(new1) + self.assertTrue(torch.equal(orig.proposal_boxes.tensor, new1.proposal_boxes.tensor)) + self.assertTrue(torch.equal(orig.proposal_boxes.tensor, new2.proposal_boxes.tensor)) + + def test_script_init_args(self): + def f(x: Tensor): + image_shape = (15, 15) + # __init__ can take arguments + inst = Instances(image_shape, a=x, proposal_boxes=Boxes(x)) + inst2 = Instances(image_shape, a=x) + return inst.a, inst2.a + + fields = {"proposal_boxes": Boxes, "a": Tensor} + with patch_instances(fields): + script_f = torch.jit.script(f) + x = torch.randn(3, 4) + outputs = script_f(x) + self.assertTrue(torch.equal(outputs[0], x)) + self.assertTrue(torch.equal(outputs[1], x)) + + def test_script_cat(self): + def f(x: Tensor): + image_shape = (15, 15) + # __init__ can take arguments + inst = Instances(image_shape, a=x) + inst2 = Instances(image_shape, a=x) + + inst3 = Instances(image_shape, proposal_boxes=Boxes(x)) + return inst.cat([inst, inst2]), inst3.cat([inst3, inst3]) + + fields = {"proposal_boxes": Boxes, "a": Tensor} + with patch_instances(fields): + script_f = torch.jit.script(f) + x = torch.randn(3, 4) + output, output2 = script_f(x) + self.assertTrue(torch.equal(output.a, torch.cat([x, x]))) + self.assertFalse(output.has("proposal_boxes")) + self.assertTrue(torch.equal(output2.proposal_boxes.tensor, torch.cat([x, x]))) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/structures/test_keypoints.py b/data_processing/detectron2/tests/structures/test_keypoints.py new file mode 100644 index 0000000..adc616e --- /dev/null +++ b/data_processing/detectron2/tests/structures/test_keypoints.py @@ -0,0 +1,19 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import unittest +import torch + +from detectron2.structures.keypoints import Keypoints + + +class TestKeypoints(unittest.TestCase): + def test_cat_keypoints(self): + keypoints1 = Keypoints(torch.rand(2, 21, 3)) + keypoints2 = Keypoints(torch.rand(4, 21, 3)) + + cat_keypoints = keypoints1.cat([keypoints1, keypoints2]) + self.assertTrue(torch.all(cat_keypoints.tensor[:2] == keypoints1.tensor).item()) + self.assertTrue(torch.all(cat_keypoints.tensor[2:] == keypoints2.tensor).item()) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/structures/test_masks.py b/data_processing/detectron2/tests/structures/test_masks.py new file mode 100644 index 0000000..7991eb0 --- /dev/null +++ b/data_processing/detectron2/tests/structures/test_masks.py @@ -0,0 +1,53 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import unittest +import torch + +from detectron2.structures.masks import BitMasks, PolygonMasks, polygons_to_bitmask + + +class TestBitMask(unittest.TestCase): + def test_get_bounding_box(self): + masks = torch.tensor( + [ + [ + [False, False, False, True], + [False, False, True, True], + [False, True, True, False], + [False, True, True, False], + ], + [ + [False, False, False, False], + [False, False, True, False], + [False, True, True, False], + [False, True, True, False], + ], + torch.zeros(4, 4), + ] + ) + bitmask = BitMasks(masks) + box_true = torch.tensor([[1, 0, 4, 4], [1, 1, 3, 4], [0, 0, 0, 0]], dtype=torch.float32) + box = bitmask.get_bounding_boxes() + self.assertTrue(torch.all(box.tensor == box_true).item()) + + for box in box_true: + poly = box[[0, 1, 2, 1, 2, 3, 0, 3]].numpy() + mask = polygons_to_bitmask([poly], 4, 4) + reconstruct_box = BitMasks(mask[None, :, :]).get_bounding_boxes()[0].tensor + self.assertTrue(torch.all(box == reconstruct_box).item()) + + reconstruct_box = PolygonMasks([[poly]]).get_bounding_boxes()[0].tensor + self.assertTrue(torch.all(box == reconstruct_box).item()) + + def test_from_empty_polygons(self): + masks = BitMasks.from_polygon_masks([], 100, 100) + self.assertEqual(masks.tensor.shape, (0, 100, 100)) + + def test_getitem(self): + masks = BitMasks(torch.ones(3, 10, 10)) + self.assertEqual(masks[1].tensor.shape, (1, 10, 10)) + self.assertEqual(masks[1:3].tensor.shape, (2, 10, 10)) + self.assertEqual(masks[torch.tensor([True, False, False])].tensor.shape, (1, 10, 10)) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/structures/test_rotated_boxes.py b/data_processing/detectron2/tests/structures/test_rotated_boxes.py new file mode 100644 index 0000000..478f034 --- /dev/null +++ b/data_processing/detectron2/tests/structures/test_rotated_boxes.py @@ -0,0 +1,441 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from __future__ import absolute_import, division, print_function, unicode_literals +import logging +import math +import random +import unittest +import torch +from fvcore.common.benchmark import benchmark + +from detectron2.layers.rotated_boxes import pairwise_iou_rotated +from detectron2.structures.boxes import Boxes +from detectron2.structures.rotated_boxes import RotatedBoxes, pairwise_iou +from detectron2.utils.testing import reload_script_model + +logger = logging.getLogger(__name__) + + +class TestRotatedBoxesLayer(unittest.TestCase): + def test_iou_0_dim_cpu(self): + boxes1 = torch.rand(0, 5, dtype=torch.float32) + boxes2 = torch.rand(10, 5, dtype=torch.float32) + expected_ious = torch.zeros(0, 10, dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + self.assertTrue(torch.allclose(ious, expected_ious)) + + boxes1 = torch.rand(10, 5, dtype=torch.float32) + boxes2 = torch.rand(0, 5, dtype=torch.float32) + expected_ious = torch.zeros(10, 0, dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + self.assertTrue(torch.allclose(ious, expected_ious)) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_0_dim_cuda(self): + boxes1 = torch.rand(0, 5, dtype=torch.float32) + boxes2 = torch.rand(10, 5, dtype=torch.float32) + expected_ious = torch.zeros(0, 10, dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious)) + + boxes1 = torch.rand(10, 5, dtype=torch.float32) + boxes2 = torch.rand(0, 5, dtype=torch.float32) + expected_ious = torch.zeros(10, 0, dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious)) + + def test_iou_half_overlap_cpu(self): + boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32) + boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32) + expected_ious = torch.tensor([[0.5]], dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + self.assertTrue(torch.allclose(ious, expected_ious)) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_half_overlap_cuda(self): + boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32) + boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32) + expected_ious = torch.tensor([[0.5]], dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious)) + + def test_iou_precision(self): + for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): + boxes1 = torch.tensor([[565, 565, 10, 10.0, 0]], dtype=torch.float32, device=device) + boxes2 = torch.tensor([[565, 565, 10, 8.3, 0]], dtype=torch.float32, device=device) + iou = 8.3 / 10.0 + expected_ious = torch.tensor([[iou]], dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + self.assertTrue(torch.allclose(ious.cpu(), expected_ious)) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_too_many_boxes_cuda(self): + s1, s2 = 5, 1289035 + boxes1 = torch.zeros(s1, 5) + boxes2 = torch.zeros(s2, 5) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + self.assertTupleEqual(tuple(ious_cuda.shape), (s1, s2)) + + def test_iou_extreme(self): + # Cause floating point issues in cuda kernels (#1266) + for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): + boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device) + boxes2 = torch.tensor( + [ + [ + -1.117407639806935e17, + 1.3858420478349148e18, + 1000.0000610351562, + 1000.0000610351562, + 1612.0, + ] + ], + device=device, + ) + ious = pairwise_iou_rotated(boxes1, boxes2) + self.assertTrue(ious.min() >= 0, ious) + + def test_iou_issue_2154(self): + for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): + boxes1 = torch.tensor( + [ + [ + 296.6620178222656, + 458.73883056640625, + 23.515729904174805, + 47.677001953125, + 0.08795166015625, + ] + ], + device=device, + ) + boxes2 = torch.tensor( + [[296.66201, 458.73882000000003, 23.51573, 47.67702, 0.087951]], + device=device, + ) + ious = pairwise_iou_rotated(boxes1, boxes2) + expected_ious = torch.tensor([[1.0]], dtype=torch.float32) + self.assertTrue(torch.allclose(ious.cpu(), expected_ious)) + + def test_iou_issue_2167(self): + for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): + boxes1 = torch.tensor( + [ + [ + 2563.74462890625000000000, + 1436.79016113281250000000, + 2174.70336914062500000000, + 214.09500122070312500000, + 115.11834716796875000000, + ] + ], + device=device, + ) + boxes2 = torch.tensor( + [ + [ + 2563.74462890625000000000, + 1436.79028320312500000000, + 2174.70288085937500000000, + 214.09495544433593750000, + 115.11835479736328125000, + ] + ], + device=device, + ) + ious = pairwise_iou_rotated(boxes1, boxes2) + expected_ious = torch.tensor([[1.0]], dtype=torch.float32) + self.assertTrue(torch.allclose(ious.cpu(), expected_ious)) + + +class TestRotatedBoxesStructure(unittest.TestCase): + def test_clip_area_0_degree(self): + for _ in range(50): + num_boxes = 100 + boxes_5d = torch.zeros(num_boxes, 5) + boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) + # Convert from (x_ctr, y_ctr, w, h, 0) to (x1, y1, x2, y2) + boxes_4d = torch.zeros(num_boxes, 4) + boxes_4d[:, 0] = boxes_5d[:, 0] - boxes_5d[:, 2] / 2.0 + boxes_4d[:, 1] = boxes_5d[:, 1] - boxes_5d[:, 3] / 2.0 + boxes_4d[:, 2] = boxes_5d[:, 0] + boxes_5d[:, 2] / 2.0 + boxes_4d[:, 3] = boxes_5d[:, 1] + boxes_5d[:, 3] / 2.0 + + image_size = (500, 600) + test_boxes_4d = Boxes(boxes_4d) + test_boxes_5d = RotatedBoxes(boxes_5d) + # Before clip + areas_4d = test_boxes_4d.area() + areas_5d = test_boxes_5d.area() + self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5)) + # After clip + test_boxes_4d.clip(image_size) + test_boxes_5d.clip(image_size) + areas_4d = test_boxes_4d.area() + areas_5d = test_boxes_5d.area() + self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5)) + + def test_clip_area_arbitrary_angle(self): + num_boxes = 100 + boxes_5d = torch.zeros(num_boxes, 5) + boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800) + clip_angle_threshold = random.uniform(0, 180) + + image_size = (500, 600) + test_boxes_5d = RotatedBoxes(boxes_5d) + # Before clip + areas_before = test_boxes_5d.area() + # After clip + test_boxes_5d.clip(image_size, clip_angle_threshold) + areas_diff = test_boxes_5d.area() - areas_before + + # the areas should only decrease after clipping + self.assertTrue(torch.all(areas_diff <= 0)) + # whenever the box is clipped (thus the area shrinks), + # the angle for the box must be within the clip_angle_threshold + # Note that the clip function will normalize the angle range + # to be within (-180, 180] + + self.assertTrue( + torch.all( + torch.abs(test_boxes_5d.tensor[:, 4][torch.where(areas_diff < 0)]) + < clip_angle_threshold + ) + ) + + def test_normalize_angles(self): + # torch.manual_seed(0) + for _ in range(50): + num_boxes = 100 + boxes_5d = torch.zeros(num_boxes, 5) + boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800) + rotated_boxes = RotatedBoxes(boxes_5d) + normalized_boxes = rotated_boxes.clone() + normalized_boxes.normalize_angles() + self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] >= -180)) + self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] < 180)) + # x, y, w, h should not change + self.assertTrue(torch.allclose(boxes_5d[:, :4], normalized_boxes.tensor[:, :4])) + # the cos/sin values of the angles should stay the same + + self.assertTrue( + torch.allclose( + torch.cos(boxes_5d[:, 4] * math.pi / 180), + torch.cos(normalized_boxes.tensor[:, 4] * math.pi / 180), + atol=1e-5, + ) + ) + + self.assertTrue( + torch.allclose( + torch.sin(boxes_5d[:, 4] * math.pi / 180), + torch.sin(normalized_boxes.tensor[:, 4] * math.pi / 180), + atol=1e-5, + ) + ) + + def test_pairwise_iou_0_degree(self): + for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): + boxes1 = torch.tensor( + [[0.5, 0.5, 1.0, 1.0, 0.0], [0.5, 0.5, 1.0, 1.0, 0.0]], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor( + [ + [0.5, 0.5, 1.0, 1.0, 0.0], + [0.25, 0.5, 0.5, 1.0, 0.0], + [0.5, 0.25, 1.0, 0.5, 0.0], + [0.25, 0.25, 0.5, 0.5, 0.0], + [0.75, 0.75, 0.5, 0.5, 0.0], + [1.0, 1.0, 1.0, 1.0, 0.0], + ], + dtype=torch.float32, + device=device, + ) + expected_ious = torch.tensor( + [ + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + ], + dtype=torch.float32, + device=device, + ) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_45_degrees(self): + for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): + boxes1 = torch.tensor( + [ + [1, 1, math.sqrt(2), math.sqrt(2), 45], + [1, 1, 2 * math.sqrt(2), 2 * math.sqrt(2), -45], + ], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor([[1, 1, 2, 2, 0]], dtype=torch.float32, device=device) + expected_ious = torch.tensor([[0.5], [0.5]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_orthogonal(self): + for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): + boxes1 = torch.tensor([[5, 5, 10, 6, 55]], dtype=torch.float32, device=device) + boxes2 = torch.tensor([[5, 5, 10, 6, -35]], dtype=torch.float32, device=device) + iou = (6.0 * 6.0) / (6.0 * 6.0 + 4.0 * 6.0 + 4.0 * 6.0) + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_large_close_boxes(self): + for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): + boxes1 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259186, 27.1828]], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259155, 27.1828]], + dtype=torch.float32, + device=device, + ) + iou = 364.259155 / 364.259186 + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_many_boxes(self): + for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): + num_boxes1 = 100 + num_boxes2 = 200 + boxes1 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 10, 0], + dtype=torch.float32, + device=device, + ) + for i in range(num_boxes1) + ] + ) + boxes2 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0], + dtype=torch.float32, + device=device, + ) + for i in range(num_boxes2) + ] + ) + expected_ious = torch.zeros(num_boxes1, num_boxes2, dtype=torch.float32, device=device) + for i in range(min(num_boxes1, num_boxes2)): + expected_ious[i][i] = (1 + 9 * i / num_boxes2) / 10.0 + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_issue1207_simplified(self): + for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): + # Simplified test case of D2-issue-1207 + boxes1 = torch.tensor([[3, 3, 8, 2, -45.0]], device=device) + boxes2 = torch.tensor([[6, 0, 8, 2, -45.0]], device=device) + iou = 0.0 + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_issue1207(self): + for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []): + # The original test case in D2-issue-1207 + boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device) + boxes2 = torch.tensor([[190.0, 127.0, 80.0, 21.0, -46.0]], device=device) + + iou = 0.0 + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_empty_cat(self): + x = RotatedBoxes.cat([]) + self.assertTrue(x.tensor.shape, (0, 5)) + + def test_scriptability(self): + def func(x): + boxes = RotatedBoxes(x) + test = boxes.to(torch.device("cpu")).tensor + return boxes.area(), test + + f = torch.jit.script(func) + f = reload_script_model(f) + f(torch.rand((3, 5))) + + data = torch.rand((3, 5)) + + def func_cat(x: torch.Tensor): + boxes1 = RotatedBoxes(x) + boxes2 = RotatedBoxes(x) + # this is not supported by torchscript for now. + # boxes3 = RotatedBoxes.cat([boxes1, boxes2]) + boxes3 = boxes1.cat([boxes1, boxes2]) + return boxes3 + + f = torch.jit.script(func_cat) + script_box = f(data) + self.assertTrue(torch.equal(torch.cat([data, data]), script_box.tensor)) + + +def benchmark_rotated_iou(): + num_boxes1 = 200 + num_boxes2 = 500 + boxes1 = torch.stack( + [ + torch.tensor([5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32) + for i in range(num_boxes1) + ] + ) + boxes2 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0], + dtype=torch.float32, + ) + for i in range(num_boxes2) + ] + ) + + def func(dev, n=1): + b1 = boxes1.to(device=dev) + b2 = boxes2.to(device=dev) + + def bench(): + for _ in range(n): + pairwise_iou_rotated(b1, b2) + if dev.type == "cuda": + torch.cuda.synchronize() + + return bench + + # only run it once per timed loop, since it's slow + args = [{"dev": torch.device("cpu"), "n": 1}] + if torch.cuda.is_available(): + args.append({"dev": torch.device("cuda"), "n": 10}) + + benchmark(func, "rotated_iou", args, warmup_iters=3) + + +if __name__ == "__main__": + unittest.main() + benchmark_rotated_iou() diff --git a/data_processing/detectron2/tests/test_checkpoint.py b/data_processing/detectron2/tests/test_checkpoint.py new file mode 100644 index 0000000..6c0b1c1 --- /dev/null +++ b/data_processing/detectron2/tests/test_checkpoint.py @@ -0,0 +1,105 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import os +import tempfile +import unittest +from collections import OrderedDict +import torch +from iopath.common.file_io import PathHandler, PathManager +from torch import nn + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.checkpoint.c2_model_loading import ( + _longest_common_prefix_str, + align_and_update_state_dicts, +) +from detectron2.utils.logger import setup_logger + + +class TestCheckpointer(unittest.TestCase): + def setUp(self): + setup_logger() + + def create_complex_model(self): + m = nn.Module() + m.block1 = nn.Module() + m.block1.layer1 = nn.Linear(2, 3) + m.layer2 = nn.Linear(3, 2) + m.res = nn.Module() + m.res.layer2 = nn.Linear(3, 2) + + state_dict = OrderedDict() + state_dict["layer1.weight"] = torch.rand(3, 2) + state_dict["layer1.bias"] = torch.rand(3) + state_dict["layer2.weight"] = torch.rand(2, 3) + state_dict["layer2.bias"] = torch.rand(2) + state_dict["res.layer2.weight"] = torch.rand(2, 3) + state_dict["res.layer2.bias"] = torch.rand(2) + return m, state_dict + + def test_complex_model_loaded(self): + for add_data_parallel in [False, True]: + model, state_dict = self.create_complex_model() + if add_data_parallel: + model = nn.DataParallel(model) + model_sd = model.state_dict() + + sd_to_load = align_and_update_state_dicts(model_sd, state_dict) + model.load_state_dict(sd_to_load) + for loaded, stored in zip(model_sd.values(), state_dict.values()): + # different tensor references + self.assertFalse(id(loaded) == id(stored)) + # same content + self.assertTrue(loaded.to(stored).equal(stored)) + + def test_load_with_matching_heuristics(self): + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + model, state_dict = self.create_complex_model() + torch.save({"model": state_dict}, os.path.join(d, "checkpoint.pth")) + checkpointer = DetectionCheckpointer(model, save_dir=d) + + with torch.no_grad(): + # use a different weight from the `state_dict`, since torch.rand is less than 1 + model.block1.layer1.weight.fill_(1) + + # load checkpoint without matching_heuristics + checkpointer.load(os.path.join(d, "checkpoint.pth")) + self.assertTrue(model.block1.layer1.weight.equal(torch.ones(3, 2))) + + # load checkpoint with matching_heuristics + checkpointer.load(os.path.join(d, "checkpoint.pth?matching_heuristics=True")) + self.assertFalse(model.block1.layer1.weight.equal(torch.ones(3, 2))) + + def test_custom_path_manager_handler(self): + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + + class CustomPathManagerHandler(PathHandler): + PREFIX = "detectron2_test://" + + def _get_supported_prefixes(self): + return [self.PREFIX] + + def _get_local_path(self, path, **kwargs): + name = path[len(self.PREFIX) :] + return os.path.join(d, name) + + def _open(self, path, mode="r", **kwargs): + return open(self._get_local_path(path), mode, **kwargs) + + pathmgr = PathManager() + pathmgr.register_handler(CustomPathManagerHandler()) + + model, state_dict = self.create_complex_model() + torch.save({"model": state_dict}, os.path.join(d, "checkpoint.pth")) + checkpointer = DetectionCheckpointer(model, save_dir=d) + checkpointer.path_manager = pathmgr + checkpointer.load("detectron2_test://checkpoint.pth") + checkpointer.load("detectron2_test://checkpoint.pth?matching_heuristics=True") + + def test_lcp(self): + self.assertEqual(_longest_common_prefix_str(["class", "dlaps_model"]), "") + self.assertEqual(_longest_common_prefix_str(["classA", "classB"]), "class") + self.assertEqual(_longest_common_prefix_str(["classA", "classB", "clab"]), "cla") + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/test_engine.py b/data_processing/detectron2/tests/test_engine.py new file mode 100644 index 0000000..c97c11b --- /dev/null +++ b/data_processing/detectron2/tests/test_engine.py @@ -0,0 +1,264 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import json +import math +import os +import tempfile +import time +import unittest +from unittest import mock +import torch +from fvcore.common.checkpoint import Checkpointer +from torch import nn + +from detectron2 import model_zoo +from detectron2.config import configurable, get_cfg +from detectron2.engine import DefaultTrainer, SimpleTrainer, default_setup, hooks +from detectron2.modeling.meta_arch import META_ARCH_REGISTRY +from detectron2.utils.events import CommonMetricPrinter, JSONWriter + + +@META_ARCH_REGISTRY.register() +class _SimpleModel(nn.Module): + @configurable + def __init__(self, sleep_sec=0): + super().__init__() + self.mod = nn.Linear(10, 20) + self.sleep_sec = sleep_sec + + @classmethod + def from_config(cls, cfg): + return {} + + def forward(self, x): + if self.sleep_sec > 0: + time.sleep(self.sleep_sec) + return {"loss": x.sum() + sum([x.mean() for x in self.parameters()])} + + +class TestTrainer(unittest.TestCase): + def _data_loader(self, device): + device = torch.device(device) + while True: + yield torch.rand(3, 3).to(device) + + def test_simple_trainer(self, device="cpu"): + model = _SimpleModel().to(device=device) + trainer = SimpleTrainer( + model, self._data_loader(device), torch.optim.SGD(model.parameters(), 0.1) + ) + trainer.train(0, 10) + + def test_simple_trainer_reset_dataloader(self, device="cpu"): + model = _SimpleModel().to(device=device) + trainer = SimpleTrainer( + model, self._data_loader(device), torch.optim.SGD(model.parameters(), 0.1) + ) + trainer.train(0, 10) + trainer.reset_data_loader(lambda: self._data_loader(device)) + trainer.train(0, 10) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_simple_trainer_cuda(self): + self.test_simple_trainer(device="cuda") + + def test_writer_hooks(self): + model = _SimpleModel(sleep_sec=0.1) + trainer = SimpleTrainer( + model, self._data_loader("cpu"), torch.optim.SGD(model.parameters(), 0.1) + ) + + max_iter = 50 + + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + json_file = os.path.join(d, "metrics.json") + writers = [CommonMetricPrinter(max_iter), JSONWriter(json_file)] + + trainer.register_hooks( + [hooks.EvalHook(0, lambda: {"metric": 100}), hooks.PeriodicWriter(writers)] + ) + with self.assertLogs(writers[0].logger) as logs: + trainer.train(0, max_iter) + + with open(json_file, "r") as f: + data = [json.loads(line.strip()) for line in f] + self.assertEqual([x["iteration"] for x in data], [19, 39, 49, 50]) + # the eval metric is in the last line with iter 50 + self.assertIn("metric", data[-1], "Eval metric must be in last line of JSON!") + + # test logged messages from CommonMetricPrinter + self.assertEqual(len(logs.output), 3) + for log, iter in zip(logs.output, [19, 39, 49]): + self.assertIn(f"iter: {iter}", log) + + self.assertIn("eta: 0:00:00", logs.output[-1], "Last ETA must be 0!") + + def test_metric_gather_and_write(self): + gather_metric_period = 5 + writer_period = 10 + + model = _SimpleModel(sleep_sec=0.1) + trainer = SimpleTrainer( + model, + self._data_loader("cpu"), + torch.optim.SGD(model.parameters(), 0.1), + gather_metric_period=gather_metric_period, + ) + + max_iter = 50 + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + json_file = os.path.join(d, "metrics.json") + writers = [JSONWriter(json_file, window_size=writer_period)] + + trainer.register_hooks( + [ + hooks.IterationTimer(), + hooks.PeriodicWriter(writers, period=writer_period), + ] + ) + trainer.train(0, max_iter) + + with open(json_file, "r") as f: + data = [json.loads(line.strip()) for line in f] + self.assertEqual([x["iteration"] for x in data], [9, 19, 29, 39, 49]) + self.assertEqual(len(trainer.storage.history("time").values()), 48) + for key in ["data_time", "total_loss"]: + history = trainer.storage.history(key).values() + history_iters = [h[1] for h in history] + self.assertEqual(history_iters, [4, 9, 14, 19, 24, 29, 34, 39, 44, 49]) + for i in range(len(data)): + # written metric should equal to the median of 2 most recent logged metrics + logged1, logged2 = history[2 * i][0], history[2 * i + 1][0] + gt = data[i][key] + self.assertEqual(gt, (logged1 + logged2) / 2.0) + + def test_async_write_metrics(self): + writer_period = 1 + + model = _SimpleModel(sleep_sec=0.1) + trainer = SimpleTrainer( + model, + self._data_loader("cpu"), + torch.optim.SGD(model.parameters(), 0.1), + async_write_metrics=True, + ) + + max_iter = 50 + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + json_file = os.path.join(d, "metrics.json") + writers = [JSONWriter(json_file, window_size=writer_period)] + + trainer.register_hooks( + [ + hooks.IterationTimer(), + hooks.PeriodicWriter(writers, period=writer_period), + ] + ) + trainer.train(0, max_iter) + + self.assertEqual(len(trainer.storage.history("time").values()), 48) + for key in ["data_time", "total_loss"]: + history = trainer.storage.history(key).values() + history_iters = [h[1] for h in history] + self.assertEqual(history_iters, list(range(50))) + + def test_default_trainer(self): + # TODO: this test requires manifold access, so changed device to CPU. see: T88318502 + cfg = get_cfg() + cfg.MODEL.DEVICE = "cpu" + cfg.MODEL.META_ARCHITECTURE = "_SimpleModel" + cfg.DATASETS.TRAIN = ("coco_2017_val_100",) + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + cfg.OUTPUT_DIR = d + trainer = DefaultTrainer(cfg) + + # test property + self.assertIs(trainer.model, trainer._trainer.model) + trainer.model = _SimpleModel() + self.assertIs(trainer.model, trainer._trainer.model) + + def test_checkpoint_resume(self): + model = _SimpleModel() + dataloader = self._data_loader("cpu") + opt = torch.optim.SGD(model.parameters(), 0.1) + scheduler = torch.optim.lr_scheduler.StepLR(opt, 3) + + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + trainer = SimpleTrainer(model, dataloader, opt) + checkpointer = Checkpointer(model, d, opt=opt, trainer=trainer) + + trainer.register_hooks( + [ + hooks.LRScheduler(scheduler=scheduler), + # checkpoint after scheduler to properly save the state of scheduler + hooks.PeriodicCheckpointer(checkpointer, 10), + ] + ) + + trainer.train(0, 12) + self.assertAlmostEqual(opt.param_groups[0]["lr"], 1e-5) + self.assertEqual(scheduler.last_epoch, 12) + del trainer + + opt = torch.optim.SGD(model.parameters(), 999) # lr will be loaded + trainer = SimpleTrainer(model, dataloader, opt) + scheduler = torch.optim.lr_scheduler.StepLR(opt, 3) + trainer.register_hooks( + [ + hooks.LRScheduler(scheduler=scheduler), + ] + ) + checkpointer = Checkpointer(model, d, opt=opt, trainer=trainer) + checkpointer.resume_or_load("non_exist.pth") + self.assertEqual(trainer.iter, 11) # last finished iter number (0-based in Trainer) + # number of times `scheduler.step()` was called (1-based) + self.assertEqual(scheduler.last_epoch, 12) + self.assertAlmostEqual(opt.param_groups[0]["lr"], 1e-5) + + def test_eval_hook(self): + model = _SimpleModel() + dataloader = self._data_loader("cpu") + opt = torch.optim.SGD(model.parameters(), 0.1) + + for total_iter, period, eval_count in [(30, 15, 2), (31, 15, 3), (20, 0, 1)]: + test_func = mock.Mock(return_value={"metric": 3.0}) + trainer = SimpleTrainer(model, dataloader, opt) + trainer.register_hooks([hooks.EvalHook(period, test_func)]) + trainer.train(0, total_iter) + self.assertEqual(test_func.call_count, eval_count) + + def test_best_checkpointer(self): + model = _SimpleModel() + dataloader = self._data_loader("cpu") + opt = torch.optim.SGD(model.parameters(), 0.1) + metric_name = "metric" + total_iter = 40 + test_period = 10 + test_cases = [ + ("max", iter([0.3, 0.4, 0.35, 0.5]), 3), + ("min", iter([1.0, 0.8, 0.9, 0.9]), 2), + ("min", iter([math.nan, 0.8, 0.9, 0.9]), 1), + ] + for mode, metrics, call_count in test_cases: + trainer = SimpleTrainer(model, dataloader, opt) + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + checkpointer = Checkpointer(model, d, opt=opt, trainer=trainer) + trainer.register_hooks( + [ + hooks.EvalHook(test_period, lambda: {metric_name: next(metrics)}), + hooks.BestCheckpointer(test_period, checkpointer, metric_name, mode=mode), + ] + ) + with mock.patch.object(checkpointer, "save") as mock_save_method: + trainer.train(0, total_iter) + self.assertEqual(mock_save_method.call_count, call_count) + + def test_setup_config(self): + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + cfg = get_cfg() + cfg.OUTPUT_DIR = os.path.join(d, "yacs") + default_setup(cfg, {}) + + cfg = model_zoo.get_config("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py") + cfg.train.output_dir = os.path.join(d, "omegaconf") + default_setup(cfg, {}) diff --git a/data_processing/detectron2/tests/test_events.py b/data_processing/detectron2/tests/test_events.py new file mode 100644 index 0000000..174ca97 --- /dev/null +++ b/data_processing/detectron2/tests/test_events.py @@ -0,0 +1,122 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import json +import os +import tempfile +import unittest + +from detectron2.utils.events import ( + CommonMetricPrinter, + EventStorage, + JSONWriter, + get_event_storage, + has_event_storage, +) + + +class TestEventWriter(unittest.TestCase): + def testScalar(self): + with tempfile.TemporaryDirectory( + prefix="detectron2_tests" + ) as dir, EventStorage() as storage: + json_file = os.path.join(dir, "test.json") + writer = JSONWriter(json_file) + for k in range(60): + storage.put_scalar("key", k, smoothing_hint=False) + if (k + 1) % 20 == 0: + writer.write() + storage.step() + writer.close() + with open(json_file) as f: + data = [json.loads(l) for l in f] + self.assertTrue([int(k["key"]) for k in data] == [19, 39, 59]) + + def testScalarMismatchedPeriod(self): + with tempfile.TemporaryDirectory( + prefix="detectron2_tests" + ) as dir, EventStorage() as storage: + json_file = os.path.join(dir, "test.json") + + writer = JSONWriter(json_file) + for k in range(60): + if k % 17 == 0: # write in a differnt period + storage.put_scalar("key2", k, smoothing_hint=False) + storage.put_scalar("key", k, smoothing_hint=False) + if (k + 1) % 20 == 0: + writer.write() + storage.step() + writer.close() + with open(json_file) as f: + data = [json.loads(l) for l in f] + self.assertTrue([int(k.get("key2", 0)) for k in data] == [17, 0, 34, 0, 51, 0]) + self.assertTrue([int(k.get("key", 0)) for k in data] == [0, 19, 0, 39, 0, 59]) + self.assertTrue([int(k["iteration"]) for k in data] == [17, 19, 34, 39, 51, 59]) + + def testPrintETA(self): + with EventStorage() as s: + p1 = CommonMetricPrinter(10) + p2 = CommonMetricPrinter() + + s.put_scalar("time", 1.0) + s.step() + s.put_scalar("time", 1.0) + s.step() + + with self.assertLogs("detectron2.utils.events") as logs: + p1.write() + self.assertIn("eta", logs.output[0]) + + with self.assertLogs("detectron2.utils.events") as logs: + p2.write() + self.assertNotIn("eta", logs.output[0]) + + def testPrintNonLosses(self): + with EventStorage() as s: + p1 = CommonMetricPrinter(10) + p2 = CommonMetricPrinter() + + s.put_scalar("time", 1.0) + s.put_scalar("[metric]bn_stat", 1.0) + s.step() + s.put_scalar("time", 1.0) + s.put_scalar("[metric]bn_stat", 1.0) + s.step() + + with self.assertLogs("detectron2.utils.events") as logs: + p1.write() + self.assertIn("[metric]bn_stat", logs.output[0]) + + with self.assertLogs("detectron2.utils.events") as logs: + p2.write() + self.assertIn("[metric]bn_stat", logs.output[0]) + + def testSmoothingWithWindowSize(self): + with tempfile.TemporaryDirectory( + prefix="detectron2_tests" + ) as dir, EventStorage() as storage: + json_file = os.path.join(dir, "test.json") + writer = JSONWriter(json_file, window_size=10) + for k in range(20): + storage.put_scalar("key1", k, smoothing_hint=True) + if (k + 1) % 2 == 0: + storage.put_scalar("key2", k, smoothing_hint=True) + if (k + 1) % 5 == 0: + storage.put_scalar("key3", k, smoothing_hint=True) + if (k + 1) % 10 == 0: + writer.write() + storage.step() + + num_samples = {k: storage.count_samples(k, 10) for k in ["key1", "key2", "key3"]} + self.assertEqual(num_samples, {"key1": 10, "key2": 5, "key3": 2}) + writer.close() + with open(json_file) as f: + data = [json.loads(l) for l in f] + self.assertEqual([k["key1"] for k in data], [4.5, 14.5]) + self.assertEqual([k["key2"] for k in data], [5, 15]) + self.assertEqual([k["key3"] for k in data], [6.5, 16.5]) + + def testEventStorage(self): + self.assertFalse(has_event_storage()) + with EventStorage() as storage: + self.assertTrue(has_event_storage()) + self.assertEqual(storage, get_event_storage()) + self.assertFalse(has_event_storage()) diff --git a/data_processing/detectron2/tests/test_export_caffe2.py b/data_processing/detectron2/tests/test_export_caffe2.py new file mode 100644 index 0000000..58e9f68 --- /dev/null +++ b/data_processing/detectron2/tests/test_export_caffe2.py @@ -0,0 +1,62 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# -*- coding: utf-8 -*- + +import copy +import os +import tempfile +import unittest +import torch +from torch.hub import _check_module_exists + +from detectron2 import model_zoo +from detectron2.utils.logger import setup_logger +from detectron2.utils.testing import get_sample_coco_image + +try: + # Caffe2 used to be included in PyTorch, but since PyTorch 1.10+, + # Caffe2 is not included in pre-built packages. This is a safety BC check + from detectron2.export import Caffe2Model, Caffe2Tracer +except ImportError: + raise unittest.SkipTest( + f"PyTorch does not have Caffe2 support. Skipping all tests in {__name__}" + ) from None + + +# TODO: this test requires manifold access, see: T88318502 +# Running it on CircleCI causes crash, not sure why. +@unittest.skipIf(os.environ.get("CIRCLECI"), "Caffe2 tests crash on CircleCI.") +@unittest.skipIf(not _check_module_exists("onnx"), "ONNX not installed.") +class TestCaffe2Export(unittest.TestCase): + def setUp(self): + setup_logger() + + def _test_model(self, config_path, device="cpu"): + cfg = model_zoo.get_config(config_path) + cfg.MODEL.DEVICE = device + model = model_zoo.get(config_path, trained=True, device=device) + + inputs = [{"image": get_sample_coco_image()}] + tracer = Caffe2Tracer(cfg, model, copy.deepcopy(inputs)) + + with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d: + if not os.environ.get("CI"): + # This requires onnx, which is not yet available on public CI + c2_model = tracer.export_caffe2() + c2_model.save_protobuf(d) + c2_model.save_graph(os.path.join(d, "test.svg"), inputs=copy.deepcopy(inputs)) + + c2_model = Caffe2Model.load_protobuf(d) + c2_model(inputs)[0]["instances"] + + ts_model = tracer.export_torchscript() + ts_model.save(os.path.join(d, "model.ts")) + + def testMaskRCNN(self): + self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def testMaskRCNNGPU(self): + self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", device="cuda") + + def testRetinaNet(self): + self._test_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml") diff --git a/data_processing/detectron2/tests/test_export_onnx.py b/data_processing/detectron2/tests/test_export_onnx.py new file mode 100644 index 0000000..aa15e1a --- /dev/null +++ b/data_processing/detectron2/tests/test_export_onnx.py @@ -0,0 +1,237 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import io +import unittest +import warnings +import torch +from torch.hub import _check_module_exists + +from detectron2 import model_zoo +from detectron2.config import get_cfg +from detectron2.export import STABLE_ONNX_OPSET_VERSION +from detectron2.export.flatten import TracingAdapter +from detectron2.export.torchscript_patch import patch_builtin_len +from detectron2.layers import ShapeSpec +from detectron2.modeling import build_model +from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead +from detectron2.structures import Boxes, Instances +from detectron2.utils.testing import ( + _pytorch1111_symbolic_opset9_repeat_interleave, + _pytorch1111_symbolic_opset9_to, + get_sample_coco_image, + has_dynamic_axes, + random_boxes, + register_custom_op_onnx_export, + skipIfOnCPUCI, + skipIfUnsupportedMinOpsetVersion, + skipIfUnsupportedMinTorchVersion, + unregister_custom_op_onnx_export, +) + + +@unittest.skipIf(not _check_module_exists("onnx"), "ONNX not installed.") +@skipIfUnsupportedMinTorchVersion("1.10") +class TestONNXTracingExport(unittest.TestCase): + opset_version = STABLE_ONNX_OPSET_VERSION + + def testMaskRCNNFPN(self): + def inference_func(model, images): + with warnings.catch_warnings(record=True): + inputs = [{"image": image} for image in images] + inst = model.inference(inputs, do_postprocess=False)[0] + return [{"instances": inst}] + + self._test_model_zoo_from_config_path( + "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", inference_func + ) + + @skipIfOnCPUCI + def testMaskRCNNC4(self): + def inference_func(model, image): + inputs = [{"image": image}] + return model.inference(inputs, do_postprocess=False)[0] + + self._test_model_zoo_from_config_path( + "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml", inference_func + ) + + @skipIfOnCPUCI + def testCascadeRCNN(self): + def inference_func(model, image): + inputs = [{"image": image}] + return model.inference(inputs, do_postprocess=False)[0] + + self._test_model_zoo_from_config_path( + "Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml", inference_func + ) + + def testRetinaNet(self): + def inference_func(model, image): + return model.forward([{"image": image}])[0]["instances"] + + self._test_model_zoo_from_config_path( + "COCO-Detection/retinanet_R_50_FPN_3x.yaml", inference_func + ) + + @skipIfOnCPUCI + def testMaskRCNNFPN_batched(self): + def inference_func(model, image1, image2): + inputs = [{"image": image1}, {"image": image2}] + return model.inference(inputs, do_postprocess=False) + + self._test_model_zoo_from_config_path( + "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", inference_func, batch=2 + ) + + @skipIfUnsupportedMinOpsetVersion(16, STABLE_ONNX_OPSET_VERSION) + @skipIfUnsupportedMinTorchVersion("1.11.1") + def testMaskRCNNFPN_with_postproc(self): + def inference_func(model, image): + inputs = [{"image": image, "height": image.shape[1], "width": image.shape[2]}] + return model.inference(inputs, do_postprocess=True)[0]["instances"] + + self._test_model_zoo_from_config_path( + "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", + inference_func, + ) + + def testKeypointHead(self): + class M(torch.nn.Module): + def __init__(self): + super().__init__() + self.model = KRCNNConvDeconvUpsampleHead( + ShapeSpec(channels=4, height=14, width=14), num_keypoints=17, conv_dims=(4,) + ) + + def forward(self, x, predbox1, predbox2): + inst = [ + Instances((100, 100), pred_boxes=Boxes(predbox1)), + Instances((100, 100), pred_boxes=Boxes(predbox2)), + ] + ret = self.model(x, inst) + return tuple(x.pred_keypoints for x in ret) + + model = M() + model.eval() + + def gen_input(num1, num2): + feat = torch.randn((num1 + num2, 4, 14, 14)) + box1 = random_boxes(num1) + box2 = random_boxes(num2) + return feat, box1, box2 + + with patch_builtin_len(): + onnx_model = self._test_model( + model, + gen_input(1, 2), + input_names=["features", "pred_boxes", "pred_classes"], + output_names=["box1", "box2"], + dynamic_axes={ + "features": {0: "batch", 1: "static_four", 2: "height", 3: "width"}, + "pred_boxes": {0: "batch", 1: "static_four"}, + "pred_classes": {0: "batch", 1: "static_four"}, + "box1": {0: "num_instance", 1: "K", 2: "static_three"}, + "box2": {0: "num_instance", 1: "K", 2: "static_three"}, + }, + ) + + # Although ONNX models are not executable by PyTorch to verify + # support of batches with different sizes, we can verify model's IR + # does not hard-code input and/or output shapes. + # TODO: Add tests with different batch sizes when detectron2's CI + # support ONNX Runtime backend. + assert has_dynamic_axes(onnx_model) + + ################################################################################ + # Testcase internals - DO NOT add tests below this point + ################################################################################ + + def setUp(self): + register_custom_op_onnx_export("::to", _pytorch1111_symbolic_opset9_to, 9, "1.11.1") + register_custom_op_onnx_export( + "::repeat_interleave", + _pytorch1111_symbolic_opset9_repeat_interleave, + 9, + "1.11.1", + ) + + def tearDown(self): + unregister_custom_op_onnx_export("::to", 9, "1.11.1") + unregister_custom_op_onnx_export("::repeat_interleave", 9, "1.11.1") + + def _test_model( + self, + model, + inputs, + inference_func=None, + opset_version=STABLE_ONNX_OPSET_VERSION, + save_onnx_graph_path=None, + **export_kwargs, + ): + # Not imported in the beginning of file to prevent runtime errors + # for environments without ONNX. + # This testcase checks dependencies before running + import onnx # isort:skip + + f = io.BytesIO() + adapter_model = TracingAdapter(model, inputs, inference_func) + adapter_model.eval() + with torch.no_grad(): + try: + torch.onnx.enable_log() + except AttributeError: + # Older ONNX versions does not have this API + pass + torch.onnx.export( + adapter_model, + adapter_model.flattened_inputs, + f, + training=torch.onnx.TrainingMode.EVAL, + opset_version=opset_version, + verbose=True, + **export_kwargs, + ) + onnx_model = onnx.load_from_string(f.getvalue()) + assert onnx_model is not None + if save_onnx_graph_path: + onnx.save(onnx_model, save_onnx_graph_path) + return onnx_model + + def _test_model_zoo_from_config_path( + self, + config_path, + inference_func, + batch=1, + opset_version=STABLE_ONNX_OPSET_VERSION, + save_onnx_graph_path=None, + **export_kwargs, + ): + model = model_zoo.get(config_path, trained=True) + image = get_sample_coco_image() + inputs = tuple(image.clone() for _ in range(batch)) + return self._test_model( + model, inputs, inference_func, opset_version, save_onnx_graph_path, **export_kwargs + ) + + def _test_model_from_config_path( + self, + config_path, + inference_func, + batch=1, + opset_version=STABLE_ONNX_OPSET_VERSION, + save_onnx_graph_path=None, + **export_kwargs, + ): + from projects.PointRend import point_rend # isort:skip + + cfg = get_cfg() + cfg.DATALOADER.NUM_WORKERS = 0 + point_rend.add_pointrend_config(cfg) + cfg.merge_from_file(config_path) + cfg.freeze() + model = build_model(cfg) + image = get_sample_coco_image() + inputs = tuple(image.clone() for _ in range(batch)) + return self._test_model( + model, inputs, inference_func, opset_version, save_onnx_graph_path, **export_kwargs + ) diff --git a/data_processing/detectron2/tests/test_export_torchscript.py b/data_processing/detectron2/tests/test_export_torchscript.py new file mode 100644 index 0000000..b9905a6 --- /dev/null +++ b/data_processing/detectron2/tests/test_export_torchscript.py @@ -0,0 +1,336 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import copy +import glob +import json +import os +import random +import tempfile +import unittest +import zipfile +import torch +from torch import Tensor, nn + +from detectron2 import model_zoo +from detectron2.config import get_cfg +from detectron2.config.instantiate import dump_dataclass, instantiate +from detectron2.export import dump_torchscript_IR, scripting_with_instances +from detectron2.export.flatten import TracingAdapter, flatten_to_tuple +from detectron2.export.torchscript_patch import patch_builtin_len +from detectron2.layers import ShapeSpec +from detectron2.modeling import build_backbone +from detectron2.modeling.postprocessing import detector_postprocess +from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead +from detectron2.structures import Boxes, Instances +from detectron2.utils.env import TORCH_VERSION +from detectron2.utils.testing import ( + assert_instances_allclose, + convert_scripted_instances, + get_sample_coco_image, + random_boxes, + skipIfOnCPUCI, +) + + +""" +https://detectron2.readthedocs.io/tutorials/deployment.html +contains some explanations of this file. +""" + + +class TestScripting(unittest.TestCase): + def testMaskRCNNFPN(self): + self._test_rcnn_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") + + @skipIfOnCPUCI + def testMaskRCNNC4(self): + self._test_rcnn_model("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml") + + def testRetinaNet(self): + self._test_retinanet_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml") + + def _test_rcnn_model(self, config_path): + model = model_zoo.get(config_path, trained=True) + model.eval() + + fields = { + "proposal_boxes": Boxes, + "objectness_logits": Tensor, + "pred_boxes": Boxes, + "scores": Tensor, + "pred_classes": Tensor, + "pred_masks": Tensor, + } + script_model = scripting_with_instances(model, fields) + + # Test that batch inference with different shapes are supported + image = get_sample_coco_image() + small_image = nn.functional.interpolate(image, scale_factor=0.5) + inputs = [{"image": image}, {"image": small_image}] + with torch.no_grad(): + instance = model.inference(inputs, do_postprocess=False)[0] + scripted_instance = script_model.inference(inputs, do_postprocess=False)[0] + assert_instances_allclose(instance, scripted_instance) + + def _test_retinanet_model(self, config_path): + model = model_zoo.get(config_path, trained=True) + model.eval() + + fields = { + "pred_boxes": Boxes, + "scores": Tensor, + "pred_classes": Tensor, + } + script_model = scripting_with_instances(model, fields) + + img = get_sample_coco_image() + inputs = [{"image": img}] * 2 + with torch.no_grad(): + instance = model(inputs)[0]["instances"] + scripted_instance = convert_scripted_instances(script_model(inputs)[0]) + scripted_instance = detector_postprocess(scripted_instance, img.shape[1], img.shape[2]) + assert_instances_allclose(instance, scripted_instance) + # Note that the model currently cannot be saved and loaded into a new process: + # https://github.com/pytorch/pytorch/issues/46944 + + +# TODO: this test requires manifold access, see: T88318502 +class TestTracing(unittest.TestCase): + def testMaskRCNNFPN(self): + def inference_func(model, image): + inputs = [{"image": image}] + return model.inference(inputs, do_postprocess=False)[0] + + self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", inference_func) + + def testMaskRCNNFPN_with_postproc(self): + def inference_func(model, image): + inputs = [{"image": image, "height": image.shape[1], "width": image.shape[2]}] + return model.inference(inputs, do_postprocess=True)[0]["instances"] + + self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", inference_func) + + @skipIfOnCPUCI + def testMaskRCNNC4(self): + def inference_func(model, image): + inputs = [{"image": image}] + return model.inference(inputs, do_postprocess=False)[0] + + self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml", inference_func) + + @skipIfOnCPUCI + def testCascadeRCNN(self): + def inference_func(model, image): + inputs = [{"image": image}] + return model.inference(inputs, do_postprocess=False)[0] + + self._test_model("Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml", inference_func) + + # bug fixed by https://github.com/pytorch/pytorch/pull/67734 + @unittest.skipIf(TORCH_VERSION == (1, 10) and os.environ.get("CI"), "1.10 has bugs.") + def testRetinaNet(self): + def inference_func(model, image): + return model.forward([{"image": image}])[0]["instances"] + + self._test_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml", inference_func) + + def _check_torchscript_no_hardcoded_device(self, jitfile, extract_dir, device): + zipfile.ZipFile(jitfile).extractall(extract_dir) + dir_path = os.path.join(extract_dir, os.path.splitext(os.path.basename(jitfile))[0]) + error_files = [] + for f in glob.glob(f"{dir_path}/code/**/*.py", recursive=True): + content = open(f).read() + if device in content: + error_files.append((f, content)) + if len(error_files): + msg = "\n".join(f"{f}\n{content}" for f, content in error_files) + raise ValueError(f"Found device '{device}' in following files:\n{msg}") + + def _get_device_casting_test_cases(self, model): + # Indexing operation can causes hardcoded device type before 1.10 + if not TORCH_VERSION >= (1, 10) or torch.cuda.device_count() == 0: + return [None] + + testing_devices = ["cpu", "cuda:0"] + if torch.cuda.device_count() > 1: + testing_devices.append(f"cuda:{torch.cuda.device_count() - 1}") + assert str(model.device) in testing_devices + testing_devices.remove(str(model.device)) + testing_devices = [None] + testing_devices # test no casting first + + return testing_devices + + def _test_model(self, config_path, inference_func, batch=1): + model = model_zoo.get(config_path, trained=True) + image = get_sample_coco_image() + inputs = tuple(image.clone() for _ in range(batch)) + + wrapper = TracingAdapter(model, inputs, inference_func) + wrapper.eval() + with torch.no_grad(): + # trace with smaller images, and the trace must still work + trace_inputs = tuple( + nn.functional.interpolate(image, scale_factor=random.uniform(0.5, 0.7)) + for _ in range(batch) + ) + traced_model = torch.jit.trace(wrapper, trace_inputs) + + testing_devices = self._get_device_casting_test_cases(model) + # save and load back the model in order to show traceback of TorchScript + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + basename = "model" + jitfile = f"{d}/{basename}.jit" + torch.jit.save(traced_model, jitfile) + traced_model = torch.jit.load(jitfile) + + if any(device and "cuda" in device for device in testing_devices): + self._check_torchscript_no_hardcoded_device(jitfile, d, "cuda") + + for device in testing_devices: + print(f"Testing casting to {device} for inference (traced on {model.device}) ...") + with torch.no_grad(): + outputs = inference_func(copy.deepcopy(model).to(device), *inputs) + traced_outputs = wrapper.outputs_schema(traced_model.to(device)(*inputs)) + if batch > 1: + for output, traced_output in zip(outputs, traced_outputs): + assert_instances_allclose(output, traced_output, size_as_tensor=True) + else: + assert_instances_allclose(outputs, traced_outputs, size_as_tensor=True) + + @skipIfOnCPUCI + def testMaskRCNNFPN_batched(self): + def inference_func(model, image1, image2): + inputs = [{"image": image1}, {"image": image2}] + return model.inference(inputs, do_postprocess=False) + + self._test_model( + "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", inference_func, batch=2 + ) + + def testKeypointHead(self): + class M(nn.Module): + def __init__(self): + super().__init__() + self.model = KRCNNConvDeconvUpsampleHead( + ShapeSpec(channels=4, height=14, width=14), num_keypoints=17, conv_dims=(4,) + ) + + def forward(self, x, predbox1, predbox2): + inst = [ + Instances((100, 100), pred_boxes=Boxes(predbox1)), + Instances((100, 100), pred_boxes=Boxes(predbox2)), + ] + ret = self.model(x, inst) + return tuple(x.pred_keypoints for x in ret) + + model = M() + model.eval() + + def gen_input(num1, num2): + feat = torch.randn((num1 + num2, 4, 14, 14)) + box1 = random_boxes(num1) + box2 = random_boxes(num2) + return feat, box1, box2 + + with torch.no_grad(), patch_builtin_len(): + trace = torch.jit.trace(model, gen_input(15, 15), check_trace=False) + + inputs = gen_input(12, 10) + trace_outputs = trace(*inputs) + true_outputs = model(*inputs) + for trace_output, true_output in zip(trace_outputs, true_outputs): + self.assertTrue(torch.allclose(trace_output, true_output)) + + +class TestTorchscriptUtils(unittest.TestCase): + # TODO: add test to dump scripting + def test_dump_IR_tracing(self): + cfg = get_cfg() + cfg.MODEL.RESNETS.DEPTH = 18 + cfg.MODEL.RESNETS.RES2_OUT_CHANNELS = 64 + + class Mod(nn.Module): + def forward(self, x): + return tuple(self.m(x).values()) + + model = Mod() + model.m = build_backbone(cfg) + model.eval() + + with torch.no_grad(): + ts_model = torch.jit.trace(model, (torch.rand(2, 3, 224, 224),)) + + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + dump_torchscript_IR(ts_model, d) + # check that the files are created + for name in ["model_ts_code", "model_ts_IR", "model_ts_IR_inlined", "model"]: + fname = os.path.join(d, name + ".txt") + self.assertTrue(os.stat(fname).st_size > 0, fname) + + def test_dump_IR_function(self): + @torch.jit.script + def gunc(x, y): + return x + y + + def func(x, y): + return x + y + gunc(x, y) + + ts_model = torch.jit.trace(func, (torch.rand(3), torch.rand(3))) + with tempfile.TemporaryDirectory(prefix="detectron2_test") as d: + dump_torchscript_IR(ts_model, d) + for name in ["model_ts_code", "model_ts_IR", "model_ts_IR_inlined"]: + fname = os.path.join(d, name + ".txt") + self.assertTrue(os.stat(fname).st_size > 0, fname) + + def test_flatten_basic(self): + obj = [3, ([5, 6], {"name": [7, 9], "name2": 3})] + res, schema = flatten_to_tuple(obj) + self.assertEqual(res, (3, 5, 6, 7, 9, 3)) + new_obj = schema(res) + self.assertEqual(new_obj, obj) + + _, new_schema = flatten_to_tuple(new_obj) + self.assertEqual(schema, new_schema) # test __eq__ + self._check_schema(schema) + + def _check_schema(self, schema): + dumped_schema = dump_dataclass(schema) + # Check that the schema is json-serializable + # Although in reality you might want to use yaml because it often has many levels + json.dumps(dumped_schema) + + # Check that the schema can be deserialized + new_schema = instantiate(dumped_schema) + self.assertEqual(schema, new_schema) + + def test_flatten_instances_boxes(self): + inst = Instances( + torch.tensor([5, 8]), pred_masks=torch.tensor([3]), pred_boxes=Boxes(torch.ones((1, 4))) + ) + obj = [3, ([5, 6], inst)] + res, schema = flatten_to_tuple(obj) + self.assertEqual(res[:3], (3, 5, 6)) + for r, expected in zip(res[3:], (inst.pred_boxes.tensor, inst.pred_masks, inst.image_size)): + self.assertIs(r, expected) + new_obj = schema(res) + assert_instances_allclose(new_obj[1][1], inst, rtol=0.0, size_as_tensor=True) + + self._check_schema(schema) + + def test_allow_non_tensor(self): + data = (torch.tensor([5, 8]), 3) # contains non-tensor + + class M(nn.Module): + def forward(self, input, number): + return input + + model = M() + with self.assertRaisesRegex(ValueError, "must only contain tensors"): + adap = TracingAdapter(model, data, allow_non_tensor=False) + + adap = TracingAdapter(model, data, allow_non_tensor=True) + _ = adap(*adap.flattened_inputs) + + newdata = (data[0].clone(),) + with self.assertRaisesRegex(ValueError, "cannot generalize"): + _ = adap(*newdata) diff --git a/data_processing/detectron2/tests/test_model_analysis.py b/data_processing/detectron2/tests/test_model_analysis.py new file mode 100644 index 0000000..c01b7af --- /dev/null +++ b/data_processing/detectron2/tests/test_model_analysis.py @@ -0,0 +1,80 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + + +import unittest +import torch +from torch import nn + +from detectron2.utils.analysis import find_unused_parameters, flop_count_operators, parameter_count +from detectron2.utils.testing import get_model_no_weights + + +class RetinaNetTest(unittest.TestCase): + def setUp(self): + self.model = get_model_no_weights("COCO-Detection/retinanet_R_50_FPN_1x.yaml") + + def test_flop(self): + # RetinaNet supports flop-counting with random inputs + inputs = [{"image": torch.rand(3, 800, 800), "test_unused": "abcd"}] + res = flop_count_operators(self.model, inputs) + self.assertEqual(int(res["conv"]), 146) # 146B flops + + def test_param_count(self): + res = parameter_count(self.model) + self.assertEqual(res[""], 37915572) + self.assertEqual(res["backbone"], 31452352) + + +class FasterRCNNTest(unittest.TestCase): + def setUp(self): + self.model = get_model_no_weights("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml") + + def test_flop(self): + # Faster R-CNN supports flop-counting with random inputs + inputs = [{"image": torch.rand(3, 800, 800)}] + res = flop_count_operators(self.model, inputs) + + # This only checks flops for backbone & proposal generator + # Flops for box head is not conv, and depends on #proposals, which is + # almost 0 for random inputs. + self.assertEqual(int(res["conv"]), 117) + + def test_flop_with_output_shape(self): + inputs = [{"image": torch.rand(3, 800, 800), "height": 700, "width": 700}] + res = flop_count_operators(self.model, inputs) + self.assertEqual(int(res["conv"]), 117) + + def test_param_count(self): + res = parameter_count(self.model) + self.assertEqual(res[""], 41699936) + self.assertEqual(res["backbone"], 26799296) + + +class MaskRCNNTest(unittest.TestCase): + def setUp(self): + self.model = get_model_no_weights("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml") + + def test_flop(self): + inputs1 = [{"image": torch.rand(3, 800, 800)}] + inputs2 = [{"image": torch.rand(3, 800, 800), "height": 700, "width": 700}] + + for inputs in [inputs1, inputs2]: + res = flop_count_operators(self.model, inputs) + # The mask head could have extra conv flops, so total >= 117 + self.assertGreaterEqual(int(res["conv"]), 117) + + +class UnusedParamTest(unittest.TestCase): + def test_unused(self): + class TestMod(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(10, 10) + self.t = nn.Linear(10, 10) + + def forward(self, x): + return self.fc1(x).mean() + + m = TestMod() + ret = find_unused_parameters(m, torch.randn(10, 10)) + self.assertEqual(set(ret), {"t.weight", "t.bias"}) diff --git a/data_processing/detectron2/tests/test_model_zoo.py b/data_processing/detectron2/tests/test_model_zoo.py new file mode 100644 index 0000000..e3360a7 --- /dev/null +++ b/data_processing/detectron2/tests/test_model_zoo.py @@ -0,0 +1,50 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import unittest + +from detectron2 import model_zoo +from detectron2.config import instantiate +from detectron2.modeling import FPN, GeneralizedRCNN + +logger = logging.getLogger(__name__) + + +class TestModelZoo(unittest.TestCase): + def test_get_returns_model(self): + model = model_zoo.get("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml", trained=False) + self.assertIsInstance(model, GeneralizedRCNN) + self.assertIsInstance(model.backbone, FPN) + + def test_get_invalid_model(self): + self.assertRaises(RuntimeError, model_zoo.get, "Invalid/config.yaml") + + def test_get_url(self): + url = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml") + self.assertEqual( + url, + "https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn/138602908/model_final_01ca85.pkl", # noqa + ) + url2 = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.py") + self.assertEqual(url, url2) + + def _build_lazy_model(self, name): + cfg = model_zoo.get_config("common/models/" + name) + instantiate(cfg.model) + + def test_mask_rcnn_fpn(self): + self._build_lazy_model("mask_rcnn_fpn.py") + + def test_mask_rcnn_c4(self): + self._build_lazy_model("mask_rcnn_c4.py") + + def test_panoptic_fpn(self): + self._build_lazy_model("panoptic_fpn.py") + + def test_schedule(self): + cfg = model_zoo.get_config("common/coco_schedule.py") + for _, v in cfg.items(): + instantiate(v) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/test_packaging.py b/data_processing/detectron2/tests/test_packaging.py new file mode 100644 index 0000000..a5b1661 --- /dev/null +++ b/data_processing/detectron2/tests/test_packaging.py @@ -0,0 +1,24 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import unittest + +from detectron2.utils.collect_env import collect_env_info + + +class TestProjects(unittest.TestCase): + def test_import(self): + from detectron2.projects import point_rend + + _ = point_rend.add_pointrend_config + + import detectron2.projects.deeplab as deeplab + + _ = deeplab.add_deeplab_config + + # import detectron2.projects.panoptic_deeplab as panoptic_deeplab + + # _ = panoptic_deeplab.add_panoptic_deeplab_config + + +class TestCollectEnv(unittest.TestCase): + def test(self): + _ = collect_env_info() diff --git a/data_processing/detectron2/tests/test_registry.py b/data_processing/detectron2/tests/test_registry.py new file mode 100644 index 0000000..4e425a6 --- /dev/null +++ b/data_processing/detectron2/tests/test_registry.py @@ -0,0 +1,45 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import unittest +import torch + +from detectron2.modeling.meta_arch import GeneralizedRCNN +from detectron2.utils.registry import _convert_target_to_string, locate + + +class A: + class B: + pass + + +class TestLocate(unittest.TestCase): + def _test_obj(self, obj): + name = _convert_target_to_string(obj) + newobj = locate(name) + self.assertIs(obj, newobj) + + def test_basic(self): + self._test_obj(GeneralizedRCNN) + + def test_inside_class(self): + # requires using __qualname__ instead of __name__ + self._test_obj(A.B) + + def test_builtin(self): + self._test_obj(len) + self._test_obj(dict) + + def test_pytorch_optim(self): + # pydoc.locate does not work for it + self._test_obj(torch.optim.SGD) + + def test_failure(self): + with self.assertRaises(ImportError): + locate("asdf") + + def test_compress_target(self): + from detectron2.data.transforms import RandomCrop + + name = _convert_target_to_string(RandomCrop) + # name shouldn't contain 'augmentation_impl' + self.assertEqual(name, "detectron2.data.transforms.RandomCrop") + self.assertIs(RandomCrop, locate(name)) diff --git a/data_processing/detectron2/tests/test_scheduler.py b/data_processing/detectron2/tests/test_scheduler.py new file mode 100644 index 0000000..5649a4a --- /dev/null +++ b/data_processing/detectron2/tests/test_scheduler.py @@ -0,0 +1,158 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import math +import numpy as np +from unittest import TestCase +import torch +from fvcore.common.param_scheduler import ( + CosineParamScheduler, + MultiStepParamScheduler, + StepWithFixedGammaParamScheduler, +) +from torch import nn + +from detectron2.solver import LRMultiplier, WarmupParamScheduler, build_lr_scheduler + + +class TestScheduler(TestCase): + def test_warmup_multistep(self): + p = nn.Parameter(torch.zeros(0)) + opt = torch.optim.SGD([p], lr=5) + + multiplier = WarmupParamScheduler( + MultiStepParamScheduler( + [1, 0.1, 0.01, 0.001], + milestones=[10, 15, 20], + num_updates=30, + ), + 0.001, + 5 / 30, + ) + sched = LRMultiplier(opt, multiplier, 30) + # This is an equivalent of: + # sched = WarmupMultiStepLR( + # opt, milestones=[10, 15, 20], gamma=0.1, warmup_factor=0.001, warmup_iters=5) + + p.sum().backward() + opt.step() + + lrs = [0.005] + for _ in range(30): + sched.step() + lrs.append(opt.param_groups[0]["lr"]) + self.assertTrue(np.allclose(lrs[:5], [0.005, 1.004, 2.003, 3.002, 4.001])) + self.assertTrue(np.allclose(lrs[5:10], 5.0)) + self.assertTrue(np.allclose(lrs[10:15], 0.5)) + self.assertTrue(np.allclose(lrs[15:20], 0.05)) + self.assertTrue(np.allclose(lrs[20:], 0.005)) + + def test_warmup_cosine(self): + p = nn.Parameter(torch.zeros(0)) + opt = torch.optim.SGD([p], lr=5) + multiplier = WarmupParamScheduler( + CosineParamScheduler(1, 0), + 0.001, + 5 / 30, + ) + sched = LRMultiplier(opt, multiplier, 30) + + p.sum().backward() + opt.step() + self.assertEqual(opt.param_groups[0]["lr"], 0.005) + lrs = [0.005] + + for _ in range(30): + sched.step() + lrs.append(opt.param_groups[0]["lr"]) + for idx, lr in enumerate(lrs): + expected_cosine = 2.5 * (1.0 + math.cos(math.pi * idx / 30)) + if idx >= 5: + self.assertAlmostEqual(lr, expected_cosine) + else: + self.assertNotAlmostEqual(lr, expected_cosine) + + def test_warmup_cosine_end_value(self): + from detectron2.config import CfgNode, get_cfg + + def _test_end_value(cfg_dict): + cfg = get_cfg() + cfg.merge_from_other_cfg(CfgNode(cfg_dict)) + + p = nn.Parameter(torch.zeros(0)) + opt = torch.optim.SGD([p], lr=cfg.SOLVER.BASE_LR) + + scheduler = build_lr_scheduler(cfg, opt) + + p.sum().backward() + opt.step() + self.assertEqual( + opt.param_groups[0]["lr"], cfg.SOLVER.BASE_LR * cfg.SOLVER.WARMUP_FACTOR + ) + + lrs = [] + for _ in range(cfg.SOLVER.MAX_ITER): + scheduler.step() + lrs.append(opt.param_groups[0]["lr"]) + + self.assertAlmostEqual(lrs[-1], cfg.SOLVER.BASE_LR_END) + + _test_end_value( + { + "SOLVER": { + "LR_SCHEDULER_NAME": "WarmupCosineLR", + "MAX_ITER": 100, + "WARMUP_ITERS": 10, + "WARMUP_FACTOR": 0.1, + "BASE_LR": 5.0, + "BASE_LR_END": 0.0, + } + } + ) + + _test_end_value( + { + "SOLVER": { + "LR_SCHEDULER_NAME": "WarmupCosineLR", + "MAX_ITER": 100, + "WARMUP_ITERS": 10, + "WARMUP_FACTOR": 0.1, + "BASE_LR": 5.0, + "BASE_LR_END": 0.5, + } + } + ) + + def test_warmup_stepwithfixedgamma(self): + p = nn.Parameter(torch.zeros(0)) + opt = torch.optim.SGD([p], lr=5) + + multiplier = WarmupParamScheduler( + StepWithFixedGammaParamScheduler( + base_value=1.0, + gamma=0.1, + num_decays=4, + num_updates=30, + ), + 0.001, + 5 / 30, + rescale_interval=True, + ) + sched = LRMultiplier(opt, multiplier, 30) + + p.sum().backward() + opt.step() + + lrs = [0.005] + for _ in range(29): + sched.step() + lrs.append(opt.param_groups[0]["lr"]) + self.assertTrue(np.allclose(lrs[:5], [0.005, 1.004, 2.003, 3.002, 4.001])) + self.assertTrue(np.allclose(lrs[5:10], 5.0)) + self.assertTrue(np.allclose(lrs[10:15], 0.5)) + self.assertTrue(np.allclose(lrs[15:20], 0.05)) + self.assertTrue(np.allclose(lrs[20:25], 0.005)) + self.assertTrue(np.allclose(lrs[25:], 0.0005)) + + # Calling sche.step() after the last training iteration is done will trigger IndexError + with self.assertRaises(IndexError, msg="list index out of range"): + sched.step() diff --git a/data_processing/detectron2/tests/test_solver.py b/data_processing/detectron2/tests/test_solver.py new file mode 100644 index 0000000..6b3ae84 --- /dev/null +++ b/data_processing/detectron2/tests/test_solver.py @@ -0,0 +1,66 @@ +import unittest + +from detectron2.solver.build import _expand_param_groups, reduce_param_groups + + +class TestOptimizer(unittest.TestCase): + def testExpandParamsGroups(self): + params = [ + { + "params": ["p1", "p2", "p3", "p4"], + "lr": 1.0, + "weight_decay": 3.0, + }, + { + "params": ["p2", "p3", "p5"], + "lr": 2.0, + "momentum": 2.0, + }, + { + "params": ["p1"], + "weight_decay": 4.0, + }, + ] + out = _expand_param_groups(params) + gt = [ + dict(params=["p1"], lr=1.0, weight_decay=4.0), # noqa + dict(params=["p2"], lr=2.0, weight_decay=3.0, momentum=2.0), # noqa + dict(params=["p3"], lr=2.0, weight_decay=3.0, momentum=2.0), # noqa + dict(params=["p4"], lr=1.0, weight_decay=3.0), # noqa + dict(params=["p5"], lr=2.0, momentum=2.0), # noqa + ] + self.assertEqual(out, gt) + + def testReduceParamGroups(self): + params = [ + dict(params=["p1"], lr=1.0, weight_decay=4.0), # noqa + dict(params=["p2", "p6"], lr=2.0, weight_decay=3.0, momentum=2.0), # noqa + dict(params=["p3"], lr=2.0, weight_decay=3.0, momentum=2.0), # noqa + dict(params=["p4"], lr=1.0, weight_decay=3.0), # noqa + dict(params=["p5"], lr=2.0, momentum=2.0), # noqa + ] + gt_groups = [ + { + "lr": 1.0, + "weight_decay": 4.0, + "params": ["p1"], + }, + { + "lr": 2.0, + "weight_decay": 3.0, + "momentum": 2.0, + "params": ["p2", "p6", "p3"], + }, + { + "lr": 1.0, + "weight_decay": 3.0, + "params": ["p4"], + }, + { + "lr": 2.0, + "momentum": 2.0, + "params": ["p5"], + }, + ] + out = reduce_param_groups(params) + self.assertEqual(out, gt_groups) diff --git a/data_processing/detectron2/tests/test_visualizer.py b/data_processing/detectron2/tests/test_visualizer.py new file mode 100644 index 0000000..646e5f3 --- /dev/null +++ b/data_processing/detectron2/tests/test_visualizer.py @@ -0,0 +1,278 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import numpy as np +import os +import tempfile +import unittest +import cv2 +import torch + +from detectron2.data import MetadataCatalog +from detectron2.structures import BoxMode, Instances, RotatedBoxes +from detectron2.utils.visualizer import ColorMode, Visualizer + + +class TestVisualizer(unittest.TestCase): + def _random_data(self): + H, W = 100, 100 + N = 10 + img = np.random.rand(H, W, 3) * 255 + boxxy = np.random.rand(N, 2) * (H // 2) + boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1) + + def _rand_poly(): + return np.random.rand(3, 2).flatten() * H + + polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)] + + mask = np.zeros_like(img[:, :, 0], dtype=bool) + mask[:40, 10:20] = 1 + + labels = [str(i) for i in range(N)] + return img, boxes, labels, polygons, [mask] * N + + @property + def metadata(self): + return MetadataCatalog.get("coco_2017_train") + + def test_draw_dataset_dict(self): + img = np.random.rand(512, 512, 3) * 255 + dic = { + "annotations": [ + { + "bbox": [ + 368.9946492271106, + 330.891438763377, + 13.148537455410235, + 13.644708680142685, + ], + "bbox_mode": BoxMode.XYWH_ABS, + "category_id": 0, + "iscrowd": 1, + "segmentation": { + "counts": "_jh52m?2N2N2N2O100O10O001N1O2MceP2", + "size": [512, 512], + }, + } + ], + "height": 512, + "image_id": 1, + "width": 512, + } + v = Visualizer(img) + v.draw_dataset_dict(dic) + + v = Visualizer(img, self.metadata) + v.draw_dataset_dict(dic) + + def test_draw_rotated_dataset_dict(self): + img = np.random.rand(512, 512, 3) * 255 + dic = { + "annotations": [ + { + "bbox": [ + 368.9946492271106, + 330.891438763377, + 13.148537455410235, + 13.644708680142685, + 45.0, + ], + "bbox_mode": BoxMode.XYWHA_ABS, + "category_id": 0, + "iscrowd": 1, + } + ], + "height": 512, + "image_id": 1, + "width": 512, + } + v = Visualizer(img, self.metadata) + v.draw_dataset_dict(dic) + + def test_overlay_instances(self): + img, boxes, labels, polygons, masks = self._random_data() + + v = Visualizer(img, self.metadata) + output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image() + self.assertEqual(output.shape, img.shape) + + # Test 2x scaling + v = Visualizer(img, self.metadata, scale=2.0) + output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image() + self.assertEqual(output.shape[0], img.shape[0] * 2) + + # Test overlay masks + v = Visualizer(img, self.metadata) + output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image() + self.assertEqual(output.shape, img.shape) + + def test_overlay_instances_no_boxes(self): + img, boxes, labels, polygons, _ = self._random_data() + v = Visualizer(img, self.metadata) + v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image() + + def test_draw_instance_predictions(self): + img, boxes, _, _, masks = self._random_data() + num_inst = len(boxes) + inst = Instances((img.shape[0], img.shape[1])) + inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) + inst.scores = torch.rand(num_inst) + inst.pred_boxes = torch.from_numpy(boxes) + inst.pred_masks = torch.from_numpy(np.asarray(masks)) + + v = Visualizer(img) + v.draw_instance_predictions(inst) + + v = Visualizer(img, self.metadata) + v.draw_instance_predictions(inst) + + def test_BWmode_nomask(self): + img, boxes, _, _, masks = self._random_data() + num_inst = len(boxes) + inst = Instances((img.shape[0], img.shape[1])) + inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) + inst.scores = torch.rand(num_inst) + inst.pred_boxes = torch.from_numpy(boxes) + + v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW) + v.draw_instance_predictions(inst) + + # check that output is grayscale + inst = inst[:0] + v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW) + output = v.draw_instance_predictions(inst).get_image() + self.assertTrue(np.allclose(output[:, :, 0], output[:, :, 1])) + self.assertTrue(np.allclose(output[:, :, 0], output[:, :, 2])) + + def test_draw_empty_mask_predictions(self): + img, boxes, _, _, masks = self._random_data() + num_inst = len(boxes) + inst = Instances((img.shape[0], img.shape[1])) + inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) + inst.scores = torch.rand(num_inst) + inst.pred_boxes = torch.from_numpy(boxes) + inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks))) + + v = Visualizer(img, self.metadata) + v.draw_instance_predictions(inst) + + def test_correct_output_shape(self): + img = np.random.rand(928, 928, 3) * 255 + v = Visualizer(img, self.metadata) + out = v.output.get_image() + self.assertEqual(out.shape, img.shape) + + def test_overlay_rotated_instances(self): + H, W = 100, 150 + img = np.random.rand(H, W, 3) * 255 + num_boxes = 50 + boxes_5d = torch.zeros(num_boxes, 5) + boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W) + boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H) + boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H)) + boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H)) + boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800) + rotated_boxes = RotatedBoxes(boxes_5d) + labels = [str(i) for i in range(num_boxes)] + + v = Visualizer(img, self.metadata) + output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image() + self.assertEqual(output.shape, img.shape) + + def test_draw_no_metadata(self): + img, boxes, _, _, masks = self._random_data() + num_inst = len(boxes) + inst = Instances((img.shape[0], img.shape[1])) + inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) + inst.scores = torch.rand(num_inst) + inst.pred_boxes = torch.from_numpy(boxes) + inst.pred_masks = torch.from_numpy(np.asarray(masks)) + + v = Visualizer(img, MetadataCatalog.get("asdfasdf")) + v.draw_instance_predictions(inst) + + def test_draw_binary_mask(self): + img, boxes, _, _, masks = self._random_data() + img[:, :, 0] = 0 # remove red color + mask = masks[0] + mask_with_hole = np.zeros_like(mask).astype("uint8") + mask_with_hole = cv2.rectangle(mask_with_hole, (10, 10), (50, 50), 1, 5) + + for m in [mask, mask_with_hole]: + for save in [True, False]: + v = Visualizer(img) + o = v.draw_binary_mask(m, color="red", text="test") + if save: + with tempfile.TemporaryDirectory(prefix="detectron2_viz") as d: + path = os.path.join(d, "output.png") + o.save(path) + o = cv2.imread(path)[:, :, ::-1] + else: + o = o.get_image().astype("float32") + # red color is drawn on the image + self.assertTrue(o[:, :, 0].sum() > 0) + + def test_draw_soft_mask(self): + img = np.random.rand(100, 100, 3) * 255 + img[:, :, 0] = 0 # remove red color + mask = np.zeros((100, 100), dtype=np.float32) + mask[30:50, 40:50] = 1.0 + cv2.GaussianBlur(mask, (21, 21), 10) + + v = Visualizer(img) + o = v.draw_soft_mask(mask, color="red", text="test") + o = o.get_image().astype("float32") + # red color is drawn on the image + self.assertTrue(o[:, :, 0].sum() > 0) + + # test draw empty mask + v = Visualizer(img) + o = v.draw_soft_mask(np.zeros((100, 100), dtype=np.float32), color="red", text="test") + o = o.get_image().astype("float32") + + def test_border_mask_with_holes(self): + H, W = 200, 200 + img = np.zeros((H, W, 3)) + img[:, :, 0] = 255.0 + v = Visualizer(img, scale=3) + + mask = np.zeros((H, W)) + mask[:, 100:150] = 1 + # create a hole, to trigger imshow + mask = cv2.rectangle(mask, (110, 110), (130, 130), 0, thickness=-1) + output = v.draw_binary_mask(mask, color="blue") + output = output.get_image()[:, :, ::-1] + + first_row = {tuple(x.tolist()) for x in output[0]} + last_row = {tuple(x.tolist()) for x in output[-1]} + # Check quantization / off-by-1 error: the first and last row must have two colors + self.assertEqual(len(last_row), 2) + self.assertEqual(len(first_row), 2) + self.assertIn((0, 0, 255), last_row) + self.assertIn((0, 0, 255), first_row) + + def test_border_polygons(self): + H, W = 200, 200 + img = np.zeros((H, W, 3)) + img[:, :, 0] = 255.0 + v = Visualizer(img, scale=3) + mask = np.zeros((H, W)) + mask[:, 100:150] = 1 + + output = v.draw_binary_mask(mask, color="blue") + output = output.get_image()[:, :, ::-1] + + first_row = {tuple(x.tolist()) for x in output[0]} + last_row = {tuple(x.tolist()) for x in output[-1]} + # Check quantization / off-by-1 error: + # the first and last row must have >=2 colors, because the polygon + # touches both rows + self.assertGreaterEqual(len(last_row), 2) + self.assertGreaterEqual(len(first_row), 2) + self.assertIn((0, 0, 255), last_row) + self.assertIn((0, 0, 255), first_row) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/tracking/__init__.py b/data_processing/detectron2/tests/tracking/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/detectron2/tests/tracking/test_bbox_iou_tracker.py b/data_processing/detectron2/tests/tracking/test_bbox_iou_tracker.py new file mode 100644 index 0000000..e720b2e --- /dev/null +++ b/data_processing/detectron2/tests/tracking/test_bbox_iou_tracker.py @@ -0,0 +1,160 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import unittest +from copy import deepcopy +from typing import Dict +import torch + +from detectron2.config import CfgNode as CfgNode_ +from detectron2.config import instantiate +from detectron2.structures import Boxes, Instances +from detectron2.tracking.base_tracker import build_tracker_head +from detectron2.tracking.bbox_iou_tracker import BBoxIOUTracker # noqa + + +class TestBBoxIOUTracker(unittest.TestCase): + def setUp(self): + self._img_size = np.array([600, 800]) + self._prev_boxes = np.array( + [ + [101, 101, 200, 200], + [301, 301, 450, 450], + ] + ).astype(np.float32) + self._prev_scores = np.array([0.9, 0.9]) + self._prev_classes = np.array([1, 1]) + self._prev_masks = np.ones((2, 600, 800)).astype("uint8") + self._curr_boxes = np.array( + [ + [302, 303, 451, 452], + [101, 102, 201, 203], + ] + ).astype(np.float32) + self._curr_scores = np.array([0.95, 0.85]) + self._curr_classes = np.array([1, 1]) + self._curr_masks = np.ones((2, 600, 800)).astype("uint8") + + self._prev_instances = { + "image_size": self._img_size, + "pred_boxes": self._prev_boxes, + "scores": self._prev_scores, + "pred_classes": self._prev_classes, + "pred_masks": self._prev_masks, + } + self._prev_instances = self._convertDictPredictionToInstance(self._prev_instances) + self._curr_instances = { + "image_size": self._img_size, + "pred_boxes": self._curr_boxes, + "scores": self._curr_scores, + "pred_classes": self._curr_classes, + "pred_masks": self._curr_masks, + } + self._curr_instances = self._convertDictPredictionToInstance(self._curr_instances) + + self._max_num_instances = 200 + self._max_lost_frame_count = 0 + self._min_box_rel_dim = 0.02 + self._min_instance_period = 1 + self._track_iou_threshold = 0.5 + + def _convertDictPredictionToInstance(self, prediction: Dict) -> Instances: + """ + convert prediction from Dict to D2 Instances format + """ + res = Instances( + image_size=torch.IntTensor(prediction["image_size"]), + pred_boxes=Boxes(torch.FloatTensor(prediction["pred_boxes"])), + pred_masks=torch.IntTensor(prediction["pred_masks"]), + pred_classes=torch.IntTensor(prediction["pred_classes"]), + scores=torch.FloatTensor(prediction["scores"]), + ) + return res + + def test_init(self): + cfg = { + "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + self.assertTrue(tracker._video_height == self._img_size[0]) + + def test_from_config(self): + cfg = CfgNode_() + cfg.TRACKER_HEADS = CfgNode_() + cfg.TRACKER_HEADS.TRACKER_NAME = "BBoxIOUTracker" + cfg.TRACKER_HEADS.VIDEO_HEIGHT = int(self._img_size[0]) + cfg.TRACKER_HEADS.VIDEO_WIDTH = int(self._img_size[1]) + cfg.TRACKER_HEADS.MAX_NUM_INSTANCES = self._max_num_instances + cfg.TRACKER_HEADS.MAX_LOST_FRAME_COUNT = self._max_lost_frame_count + cfg.TRACKER_HEADS.MIN_BOX_REL_DIM = self._min_box_rel_dim + cfg.TRACKER_HEADS.MIN_INSTANCE_PERIOD = self._min_instance_period + cfg.TRACKER_HEADS.TRACK_IOU_THRESHOLD = self._track_iou_threshold + tracker = build_tracker_head(cfg) + self.assertTrue(tracker._video_height == self._img_size[0]) + + def test_initialize_extra_fields(self): + cfg = { + "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + instances = tracker._initialize_extra_fields(self._curr_instances) + self.assertTrue(instances.has("ID")) + self.assertTrue(instances.has("ID_period")) + self.assertTrue(instances.has("lost_frame_count")) + + def test_assign_new_id(self): + cfg = { + "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + instances = deepcopy(self._curr_instances) + instances = tracker._initialize_extra_fields(instances) + instances = tracker._assign_new_id(instances) + self.assertTrue(len(instances.ID) == 2) + self.assertTrue(instances.ID[0] == 2) + self.assertTrue(instances.ID[1] == 3) + + def test_update(self): + cfg = { + "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + prev_instances = tracker.update(self._prev_instances) + self.assertTrue(len(prev_instances.ID) == 2) + self.assertTrue(prev_instances.ID[0] == 0) + self.assertTrue(prev_instances.ID[1] == 1) + curr_instances = tracker.update(self._curr_instances) + self.assertTrue(len(curr_instances.ID) == 2) + self.assertTrue(curr_instances.ID[0] == 1) + self.assertTrue(curr_instances.ID[1] == 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/tracking/test_hungarian_tracker.py b/data_processing/detectron2/tests/tracking/test_hungarian_tracker.py new file mode 100644 index 0000000..660c635 --- /dev/null +++ b/data_processing/detectron2/tests/tracking/test_hungarian_tracker.py @@ -0,0 +1,102 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import unittest +from typing import Dict +import torch + +from detectron2.config import instantiate +from detectron2.structures import Boxes, Instances + + +class TestBaseHungarianTracker(unittest.TestCase): + def setUp(self): + self._img_size = np.array([600, 800]) + self._prev_boxes = np.array( + [ + [101, 101, 200, 200], + [301, 301, 450, 450], + ] + ).astype(np.float32) + self._prev_scores = np.array([0.9, 0.9]) + self._prev_classes = np.array([1, 1]) + self._prev_masks = np.ones((2, 600, 800)).astype("uint8") + self._curr_boxes = np.array( + [ + [302, 303, 451, 452], + [101, 102, 201, 203], + ] + ).astype(np.float32) + self._curr_scores = np.array([0.95, 0.85]) + self._curr_classes = np.array([1, 1]) + self._curr_masks = np.ones((2, 600, 800)).astype("uint8") + + self._prev_instances = { + "image_size": self._img_size, + "pred_boxes": self._prev_boxes, + "scores": self._prev_scores, + "pred_classes": self._prev_classes, + "pred_masks": self._prev_masks, + } + self._prev_instances = self._convertDictPredictionToInstance(self._prev_instances) + self._curr_instances = { + "image_size": self._img_size, + "pred_boxes": self._curr_boxes, + "scores": self._curr_scores, + "pred_classes": self._curr_classes, + "pred_masks": self._curr_masks, + } + self._curr_instances = self._convertDictPredictionToInstance(self._curr_instances) + + self._max_num_instances = 200 + self._max_lost_frame_count = 0 + self._min_box_rel_dim = 0.02 + self._min_instance_period = 1 + self._track_iou_threshold = 0.5 + + def _convertDictPredictionToInstance(self, prediction: Dict) -> Instances: + """ + convert prediction from Dict to D2 Instances format + """ + res = Instances( + image_size=torch.IntTensor(prediction["image_size"]), + pred_boxes=Boxes(torch.FloatTensor(prediction["pred_boxes"])), + pred_masks=torch.IntTensor(prediction["pred_masks"]), + pred_classes=torch.IntTensor(prediction["pred_classes"]), + scores=torch.FloatTensor(prediction["scores"]), + ) + return res + + def test_init(self): + cfg = { + "_target_": "detectron2.tracking.hungarian_tracker.BaseHungarianTracker", + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + self.assertTrue(tracker._video_height == self._img_size[0]) + + def test_initialize_extra_fields(self): + cfg = { + "_target_": "detectron2.tracking.hungarian_tracker.BaseHungarianTracker", + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + instances = tracker._initialize_extra_fields(self._curr_instances) + self.assertTrue(instances.has("ID")) + self.assertTrue(instances.has("ID_period")) + self.assertTrue(instances.has("lost_frame_count")) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/tracking/test_iou_weighted_hungarian_bbox_iou_tracker.py b/data_processing/detectron2/tests/tracking/test_iou_weighted_hungarian_bbox_iou_tracker.py new file mode 100644 index 0000000..6947399 --- /dev/null +++ b/data_processing/detectron2/tests/tracking/test_iou_weighted_hungarian_bbox_iou_tracker.py @@ -0,0 +1,225 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import numpy as np +import unittest +from typing import Dict +import torch + +from detectron2.config import CfgNode as CfgNode_ +from detectron2.config import instantiate +from detectron2.structures import Boxes, Instances +from detectron2.tracking.base_tracker import build_tracker_head +from detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker import ( # noqa + IOUWeightedHungarianBBoxIOUTracker, +) + + +class TestIOUWeightedHungarianBBoxIOUTracker(unittest.TestCase): + def setUp(self): + self._img_size = np.array([600, 800]) + self._prev_boxes = np.array( + [ + [101, 101, 200, 200], + [301, 301, 450, 450], + ] + ).astype(np.float32) + self._prev_scores = np.array([0.9, 0.9]) + self._prev_classes = np.array([1, 1]) + self._prev_masks = np.ones((2, 600, 800)).astype("uint8") + self._curr_boxes = np.array( + [ + [302, 303, 451, 452], + [101, 102, 201, 203], + ] + ).astype(np.float32) + self._curr_scores = np.array([0.95, 0.85]) + self._curr_classes = np.array([1, 1]) + self._curr_masks = np.ones((2, 600, 800)).astype("uint8") + + self._prev_instances = { + "image_size": self._img_size, + "pred_boxes": self._prev_boxes, + "scores": self._prev_scores, + "pred_classes": self._prev_classes, + "pred_masks": self._prev_masks, + } + self._prev_instances = self._convertDictPredictionToInstance(self._prev_instances) + self._curr_instances = { + "image_size": self._img_size, + "pred_boxes": self._curr_boxes, + "scores": self._curr_scores, + "pred_classes": self._curr_classes, + "pred_masks": self._curr_masks, + } + self._curr_instances = self._convertDictPredictionToInstance(self._curr_instances) + + self._max_num_instances = 10 + self._max_lost_frame_count = 3 + self._min_box_rel_dim = 0.02 + self._min_instance_period = 1 + self._track_iou_threshold = 0.5 + + def _convertDictPredictionToInstance(self, prediction: Dict) -> Instances: + """ + convert prediction from Dict to D2 Instances format + """ + res = Instances( + image_size=torch.IntTensor(prediction["image_size"]), + pred_boxes=Boxes(torch.FloatTensor(prediction["pred_boxes"])), + pred_masks=torch.IntTensor(prediction["pred_masks"]), + pred_classes=torch.IntTensor(prediction["pred_classes"]), + scores=torch.FloatTensor(prediction["scores"]), + ) + return res + + def test_init(self): + cfg = { + "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + self.assertTrue(tracker._video_height == self._img_size[0]) + + def test_from_config(self): + cfg = CfgNode_() + cfg.TRACKER_HEADS = CfgNode_() + cfg.TRACKER_HEADS.TRACKER_NAME = "IOUWeightedHungarianBBoxIOUTracker" + cfg.TRACKER_HEADS.VIDEO_HEIGHT = int(self._img_size[0]) + cfg.TRACKER_HEADS.VIDEO_WIDTH = int(self._img_size[1]) + cfg.TRACKER_HEADS.MAX_NUM_INSTANCES = self._max_num_instances + cfg.TRACKER_HEADS.MAX_LOST_FRAME_COUNT = self._max_lost_frame_count + cfg.TRACKER_HEADS.MIN_BOX_REL_DIM = self._min_box_rel_dim + cfg.TRACKER_HEADS.MIN_INSTANCE_PERIOD = self._min_instance_period + cfg.TRACKER_HEADS.TRACK_IOU_THRESHOLD = self._track_iou_threshold + tracker = build_tracker_head(cfg) + self.assertTrue(tracker._video_height == self._img_size[0]) + + def test_initialize_extra_fields(self): + cfg = { + "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + instances = tracker._initialize_extra_fields(self._curr_instances) + self.assertTrue(instances.has("ID")) + self.assertTrue(instances.has("ID_period")) + self.assertTrue(instances.has("lost_frame_count")) + + def test_process_matched_idx(self): + cfg = { + "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + prev_instances = tracker._initialize_extra_fields(self._prev_instances) + tracker._prev_instances = prev_instances + curr_instances = tracker._initialize_extra_fields(self._curr_instances) + matched_idx = np.array([0]) + matched_prev_idx = np.array([1]) + curr_instances = tracker._process_matched_idx(curr_instances, matched_idx, matched_prev_idx) + self.assertTrue(curr_instances.ID[0] == 1) + + def test_process_unmatched_idx(self): + cfg = { + "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + prev_instances = tracker._initialize_extra_fields(self._prev_instances) + tracker._prev_instances = prev_instances + curr_instances = tracker._initialize_extra_fields(self._curr_instances) + matched_idx = np.array([0]) + matched_prev_idx = np.array([1]) + curr_instances = tracker._process_matched_idx(curr_instances, matched_idx, matched_prev_idx) + curr_instances = tracker._process_unmatched_idx(curr_instances, matched_idx) + self.assertTrue(curr_instances.ID[1] == 2) + + def test_process_unmatched_prev_idx(self): + cfg = { + "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + prev_instances = tracker._initialize_extra_fields(self._prev_instances) + prev_instances.ID_period = [3, 3] + tracker._prev_instances = prev_instances + curr_instances = tracker._initialize_extra_fields(self._curr_instances) + matched_idx = np.array([0]) + matched_prev_idx = np.array([1]) + curr_instances = tracker._process_matched_idx(curr_instances, matched_idx, matched_prev_idx) + curr_instances = tracker._process_unmatched_idx(curr_instances, matched_idx) + curr_instances = tracker._process_unmatched_prev_idx(curr_instances, matched_prev_idx) + self.assertTrue(curr_instances.ID[2] == 0) + + def test_assign_cost_matrix_values(self): + cfg = { + "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + pair1 = {"idx": 0, "prev_idx": 1, "IoU": 0.6} + pair2 = {"idx": 1, "prev_idx": 0, "IoU": 0.8} + bbox_pairs = [pair1, pair2] + cost_matrix = np.full((2, 2), np.inf) + target_matrix = copy.deepcopy(cost_matrix) + target_matrix[0, 1] = -0.6 + target_matrix[1, 0] = -0.8 + cost_matrix = tracker.assign_cost_matrix_values(cost_matrix, bbox_pairs) + self.assertTrue(np.allclose(cost_matrix, target_matrix)) + + def test_update(self): + cfg = { + "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + _ = tracker.update(self._prev_instances) + curr_instances = tracker.update(self._curr_instances) + self.assertTrue(curr_instances.ID[0] == 1) + self.assertTrue(curr_instances.ID[1] == 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/tracking/test_vanilla_hungarian_bbox_iou_tracker.py b/data_processing/detectron2/tests/tracking/test_vanilla_hungarian_bbox_iou_tracker.py new file mode 100644 index 0000000..c33e3d9 --- /dev/null +++ b/data_processing/detectron2/tests/tracking/test_vanilla_hungarian_bbox_iou_tracker.py @@ -0,0 +1,225 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import numpy as np +import unittest +from typing import Dict +import torch + +from detectron2.config import CfgNode as CfgNode_ +from detectron2.config import instantiate +from detectron2.structures import Boxes, Instances +from detectron2.tracking.base_tracker import build_tracker_head +from detectron2.tracking.vanilla_hungarian_bbox_iou_tracker import ( # noqa + VanillaHungarianBBoxIOUTracker, +) + + +class TestVanillaHungarianBBoxIOUTracker(unittest.TestCase): + def setUp(self): + self._img_size = np.array([600, 800]) + self._prev_boxes = np.array( + [ + [101, 101, 200, 200], + [301, 301, 450, 450], + ] + ).astype(np.float32) + self._prev_scores = np.array([0.9, 0.9]) + self._prev_classes = np.array([1, 1]) + self._prev_masks = np.ones((2, 600, 800)).astype("uint8") + self._curr_boxes = np.array( + [ + [302, 303, 451, 452], + [101, 102, 201, 203], + ] + ).astype(np.float32) + self._curr_scores = np.array([0.95, 0.85]) + self._curr_classes = np.array([1, 1]) + self._curr_masks = np.ones((2, 600, 800)).astype("uint8") + + self._prev_instances = { + "image_size": self._img_size, + "pred_boxes": self._prev_boxes, + "scores": self._prev_scores, + "pred_classes": self._prev_classes, + "pred_masks": self._prev_masks, + } + self._prev_instances = self._convertDictPredictionToInstance(self._prev_instances) + self._curr_instances = { + "image_size": self._img_size, + "pred_boxes": self._curr_boxes, + "scores": self._curr_scores, + "pred_classes": self._curr_classes, + "pred_masks": self._curr_masks, + } + self._curr_instances = self._convertDictPredictionToInstance(self._curr_instances) + + self._max_num_instances = 10 + self._max_lost_frame_count = 3 + self._min_box_rel_dim = 0.02 + self._min_instance_period = 1 + self._track_iou_threshold = 0.5 + + def _convertDictPredictionToInstance(self, prediction: Dict) -> Instances: + """ + convert prediction from Dict to D2 Instances format + """ + res = Instances( + image_size=torch.IntTensor(prediction["image_size"]), + pred_boxes=Boxes(torch.FloatTensor(prediction["pred_boxes"])), + pred_masks=torch.IntTensor(prediction["pred_masks"]), + pred_classes=torch.IntTensor(prediction["pred_classes"]), + scores=torch.FloatTensor(prediction["scores"]), + ) + return res + + def test_init(self): + cfg = { + "_target_": "detectron2.tracking.vanilla_hungarian_bbox_iou_tracker.VanillaHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + self.assertTrue(tracker._video_height == self._img_size[0]) + + def test_from_config(self): + cfg = CfgNode_() + cfg.TRACKER_HEADS = CfgNode_() + cfg.TRACKER_HEADS.TRACKER_NAME = "VanillaHungarianBBoxIOUTracker" + cfg.TRACKER_HEADS.VIDEO_HEIGHT = int(self._img_size[0]) + cfg.TRACKER_HEADS.VIDEO_WIDTH = int(self._img_size[1]) + cfg.TRACKER_HEADS.MAX_NUM_INSTANCES = self._max_num_instances + cfg.TRACKER_HEADS.MAX_LOST_FRAME_COUNT = self._max_lost_frame_count + cfg.TRACKER_HEADS.MIN_BOX_REL_DIM = self._min_box_rel_dim + cfg.TRACKER_HEADS.MIN_INSTANCE_PERIOD = self._min_instance_period + cfg.TRACKER_HEADS.TRACK_IOU_THRESHOLD = self._track_iou_threshold + tracker = build_tracker_head(cfg) + self.assertTrue(tracker._video_height == self._img_size[0]) + + def test_initialize_extra_fields(self): + cfg = { + "_target_": "detectron2.tracking.vanilla_hungarian_bbox_iou_tracker.VanillaHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + instances = tracker._initialize_extra_fields(self._curr_instances) + self.assertTrue(instances.has("ID")) + self.assertTrue(instances.has("ID_period")) + self.assertTrue(instances.has("lost_frame_count")) + + def test_process_matched_idx(self): + cfg = { + "_target_": "detectron2.tracking.vanilla_hungarian_bbox_iou_tracker.VanillaHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + prev_instances = tracker._initialize_extra_fields(self._prev_instances) + tracker._prev_instances = prev_instances + curr_instances = tracker._initialize_extra_fields(self._curr_instances) + matched_idx = np.array([0]) + matched_prev_idx = np.array([1]) + curr_instances = tracker._process_matched_idx(curr_instances, matched_idx, matched_prev_idx) + self.assertTrue(curr_instances.ID[0] == 1) + + def test_process_unmatched_idx(self): + cfg = { + "_target_": "detectron2.tracking.vanilla_hungarian_bbox_iou_tracker.VanillaHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + prev_instances = tracker._initialize_extra_fields(self._prev_instances) + tracker._prev_instances = prev_instances + curr_instances = tracker._initialize_extra_fields(self._curr_instances) + matched_idx = np.array([0]) + matched_prev_idx = np.array([1]) + curr_instances = tracker._process_matched_idx(curr_instances, matched_idx, matched_prev_idx) + curr_instances = tracker._process_unmatched_idx(curr_instances, matched_idx) + self.assertTrue(curr_instances.ID[1] == 2) + + def test_process_unmatched_prev_idx(self): + cfg = { + "_target_": "detectron2.tracking.vanilla_hungarian_bbox_iou_tracker.VanillaHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + prev_instances = tracker._initialize_extra_fields(self._prev_instances) + prev_instances.ID_period = [3, 3] + tracker._prev_instances = prev_instances + curr_instances = tracker._initialize_extra_fields(self._curr_instances) + matched_idx = np.array([0]) + matched_prev_idx = np.array([1]) + curr_instances = tracker._process_matched_idx(curr_instances, matched_idx, matched_prev_idx) + curr_instances = tracker._process_unmatched_idx(curr_instances, matched_idx) + curr_instances = tracker._process_unmatched_prev_idx(curr_instances, matched_prev_idx) + self.assertTrue(curr_instances.ID[2] == 0) + + def test_assign_cost_matrix_values(self): + cfg = { + "_target_": "detectron2.tracking.vanilla_hungarian_bbox_iou_tracker.VanillaHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + pair1 = {"idx": 0, "prev_idx": 1} + pair2 = {"idx": 1, "prev_idx": 0} + bbox_pairs = [pair1, pair2] + cost_matrix = np.full((2, 2), np.inf) + target_matrix = copy.deepcopy(cost_matrix) + target_matrix[0, 1] = -1 + target_matrix[1, 0] = -1 + cost_matrix = tracker.assign_cost_matrix_values(cost_matrix, bbox_pairs) + self.assertTrue(np.allclose(cost_matrix, target_matrix)) + + def test_update(self): + cfg = { + "_target_": "detectron2.tracking.vanilla_hungarian_bbox_iou_tracker.VanillaHungarianBBoxIOUTracker", # noqa + "video_height": self._img_size[0], + "video_width": self._img_size[1], + "max_num_instances": self._max_num_instances, + "max_lost_frame_count": self._max_lost_frame_count, + "min_box_rel_dim": self._min_box_rel_dim, + "min_instance_period": self._min_instance_period, + "track_iou_threshold": self._track_iou_threshold, + } + tracker = instantiate(cfg) + _ = tracker.update(self._prev_instances) + curr_instances = tracker.update(self._curr_instances) + self.assertTrue(curr_instances.ID[0] == 1) + self.assertTrue(curr_instances.ID[1] == 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/data_processing/detectron2/tests/utils/test_tensorboardx.py b/data_processing/detectron2/tests/utils/test_tensorboardx.py new file mode 100644 index 0000000..885fb8d --- /dev/null +++ b/data_processing/detectron2/tests/utils/test_tensorboardx.py @@ -0,0 +1,23 @@ +import os +import tempfile +import unittest + +from detectron2.utils.events import TensorboardXWriter + + +# TODO Fix up capitalization +class TestTensorboardXWriter(unittest.TestCase): + def test_no_files_created(self) -> None: + with tempfile.TemporaryDirectory() as tmp_dir: + writer = TensorboardXWriter(tmp_dir) + writer.close() + + self.assertFalse(os.listdir(tmp_dir)) + + def test_single_write(self) -> None: + with tempfile.TemporaryDirectory() as tmp_dir: + writer = TensorboardXWriter(tmp_dir) + writer._writer.add_scalar("testing", 1, 1) + writer.close() + + self.assertTrue(os.listdir(tmp_dir)) diff --git a/data_processing/detectron2/tools/README.md b/data_processing/detectron2/tools/README.md new file mode 100644 index 0000000..0b40d53 --- /dev/null +++ b/data_processing/detectron2/tools/README.md @@ -0,0 +1,49 @@ + +This directory contains a few example scripts that demonstrate features of detectron2. + + +* `train_net.py` + +An example training script that's made to train builtin models of detectron2. + +For usage, see [GETTING_STARTED.md](../GETTING_STARTED.md). + +* `plain_train_net.py` + +Similar to `train_net.py`, but implements a training loop instead of using `Trainer`. +This script includes fewer features but it may be more friendly to hackers. + +* `benchmark.py` + +Benchmark the training speed, inference speed or data loading speed of a given config. + +Usage: +``` +python benchmark.py --config-file config.yaml --task train/eval/data [optional DDP flags] +``` + +* `analyze_model.py` + +Analyze FLOPs, parameters, activations of a detectron2 model. See its `--help` for usage. + +* `visualize_json_results.py` + +Visualize the json instance detection/segmentation results dumped by `COCOEvalutor` or `LVISEvaluator` + +Usage: +``` +python visualize_json_results.py --input x.json --output dir/ --dataset coco_2017_val +``` +If not using a builtin dataset, you'll need your own script or modify this script. + +* `visualize_data.py` + +Visualize ground truth raw annotations or training data (after preprocessing/augmentations). + +Usage: +``` +python visualize_data.py --config-file config.yaml --source annotation/dataloader --output-dir dir/ [--show] +``` + +NOTE: the script does not stop by itself when using `--source dataloader` because a training +dataloader is usually infinite. diff --git a/data_processing/detectron2/tools/__init__.py b/data_processing/detectron2/tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/detectron2/tools/analyze_model.py b/data_processing/detectron2/tools/analyze_model.py new file mode 100644 index 0000000..8e38f8b --- /dev/null +++ b/data_processing/detectron2/tools/analyze_model.py @@ -0,0 +1,159 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +import numpy as np +from collections import Counter +import tqdm +from fvcore.nn import flop_count_table # can also try flop_count_str + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate +from detectron2.data import build_detection_test_loader +from detectron2.engine import default_argument_parser +from detectron2.modeling import build_model +from detectron2.utils.analysis import ( + FlopCountAnalysis, + activation_count_operators, + parameter_count_table, +) +from detectron2.utils.logger import setup_logger + +logger = logging.getLogger("detectron2") + + +def setup(args): + if args.config_file.endswith(".yaml"): + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.DATALOADER.NUM_WORKERS = 0 + cfg.merge_from_list(args.opts) + cfg.freeze() + else: + cfg = LazyConfig.load(args.config_file) + cfg = LazyConfig.apply_overrides(cfg, args.opts) + setup_logger(name="fvcore") + setup_logger() + return cfg + + +def do_flop(cfg): + if isinstance(cfg, CfgNode): + data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) + model = build_model(cfg) + DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) + else: + data_loader = instantiate(cfg.dataloader.test) + model = instantiate(cfg.model) + model.to(cfg.train.device) + DetectionCheckpointer(model).load(cfg.train.init_checkpoint) + model.eval() + + counts = Counter() + total_flops = [] + for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa + flops = FlopCountAnalysis(model, data) + if idx > 0: + flops.unsupported_ops_warnings(False).uncalled_modules_warnings(False) + counts += flops.by_operator() + total_flops.append(flops.total()) + + logger.info("Flops table computed from only one input sample:\n" + flop_count_table(flops)) + logger.info( + "Average GFlops for each type of operators:\n" + + str([(k, v / (idx + 1) / 1e9) for k, v in counts.items()]) + ) + logger.info( + "Total GFlops: {:.1f}±{:.1f}".format(np.mean(total_flops) / 1e9, np.std(total_flops) / 1e9) + ) + + +def do_activation(cfg): + if isinstance(cfg, CfgNode): + data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) + model = build_model(cfg) + DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) + else: + data_loader = instantiate(cfg.dataloader.test) + model = instantiate(cfg.model) + model.to(cfg.train.device) + DetectionCheckpointer(model).load(cfg.train.init_checkpoint) + model.eval() + + counts = Counter() + total_activations = [] + for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa + count = activation_count_operators(model, data) + counts += count + total_activations.append(sum(count.values())) + logger.info( + "(Million) Activations for Each Type of Operators:\n" + + str([(k, v / idx) for k, v in counts.items()]) + ) + logger.info( + "Total (Million) Activations: {}±{}".format( + np.mean(total_activations), np.std(total_activations) + ) + ) + + +def do_parameter(cfg): + if isinstance(cfg, CfgNode): + model = build_model(cfg) + else: + model = instantiate(cfg.model) + logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5)) + + +def do_structure(cfg): + if isinstance(cfg, CfgNode): + model = build_model(cfg) + else: + model = instantiate(cfg.model) + logger.info("Model Structure:\n" + str(model)) + + +if __name__ == "__main__": + parser = default_argument_parser( + epilog=""" +Examples: + +To show parameters of a model: +$ ./analyze_model.py --tasks parameter \\ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml + +Flops and activations are data-dependent, therefore inputs and model weights +are needed to count them: + +$ ./analyze_model.py --num-inputs 100 --tasks flop \\ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \\ + MODEL.WEIGHTS /path/to/model.pkl +""" + ) + parser.add_argument( + "--tasks", + choices=["flop", "activation", "parameter", "structure"], + required=True, + nargs="+", + ) + parser.add_argument( + "-n", + "--num-inputs", + default=100, + type=int, + help="number of inputs used to compute statistics for flops/activations, " + "both are data dependent.", + ) + args = parser.parse_args() + assert not args.eval_only + assert args.num_gpus == 1 + + cfg = setup(args) + + for task in args.tasks: + { + "flop": do_flop, + "activation": do_activation, + "parameter": do_parameter, + "structure": do_structure, + }[task](cfg) diff --git a/data_processing/detectron2/tools/benchmark.py b/data_processing/detectron2/tools/benchmark.py new file mode 100644 index 0000000..c2d673f --- /dev/null +++ b/data_processing/detectron2/tools/benchmark.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. +""" +A script to benchmark builtin models. + +Note: this script has an extra dependency of psutil. +""" + +import itertools +import logging +import psutil +import torch +import tqdm +from fvcore.common.timer import Timer +from torch.nn.parallel import DistributedDataParallel + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import LazyConfig, get_cfg, instantiate +from detectron2.data import ( + DatasetFromList, + build_detection_test_loader, + build_detection_train_loader, +) +from detectron2.data.benchmark import DataLoaderBenchmark +from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch +from detectron2.modeling import build_model +from detectron2.solver import build_optimizer +from detectron2.utils import comm +from detectron2.utils.collect_env import collect_env_info +from detectron2.utils.events import CommonMetricPrinter +from detectron2.utils.logger import setup_logger + +logger = logging.getLogger("detectron2") + + +def setup(args): + if args.config_file.endswith(".yaml"): + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.SOLVER.BASE_LR = 0.001 # Avoid NaNs. Not useful in this script anyway. + cfg.merge_from_list(args.opts) + cfg.freeze() + else: + cfg = LazyConfig.load(args.config_file) + cfg = LazyConfig.apply_overrides(cfg, args.opts) + setup_logger(distributed_rank=comm.get_rank()) + return cfg + + +def create_data_benchmark(cfg, args): + if args.config_file.endswith(".py"): + dl_cfg = cfg.dataloader.train + dl_cfg._target_ = DataLoaderBenchmark + return instantiate(dl_cfg) + else: + kwargs = build_detection_train_loader.from_config(cfg) + kwargs.pop("aspect_ratio_grouping", None) + kwargs["_target_"] = DataLoaderBenchmark + return instantiate(kwargs) + + +def RAM_msg(): + vram = psutil.virtual_memory() + return "RAM Usage: {:.2f}/{:.2f} GB".format( + (vram.total - vram.available) / 1024**3, vram.total / 1024**3 + ) + + +def benchmark_data(args): + cfg = setup(args) + logger.info("After spawning " + RAM_msg()) + + benchmark = create_data_benchmark(cfg, args) + benchmark.benchmark_distributed(250, 10) + # test for a few more rounds + for k in range(10): + logger.info(f"Iteration {k} " + RAM_msg()) + benchmark.benchmark_distributed(250, 1) + + +def benchmark_data_advanced(args): + # benchmark dataloader with more details to help analyze performance bottleneck + cfg = setup(args) + benchmark = create_data_benchmark(cfg, args) + + if comm.get_rank() == 0: + benchmark.benchmark_dataset(100) + benchmark.benchmark_mapper(100) + benchmark.benchmark_workers(100, warmup=10) + benchmark.benchmark_IPC(100, warmup=10) + if comm.get_world_size() > 1: + benchmark.benchmark_distributed(100) + logger.info("Rerun ...") + benchmark.benchmark_distributed(100) + + +def benchmark_train(args): + cfg = setup(args) + model = build_model(cfg) + logger.info("Model:\n{}".format(model)) + if comm.get_world_size() > 1: + model = DistributedDataParallel( + model, device_ids=[comm.get_local_rank()], broadcast_buffers=False + ) + optimizer = build_optimizer(cfg, model) + checkpointer = DetectionCheckpointer(model, optimizer=optimizer) + checkpointer.load(cfg.MODEL.WEIGHTS) + + cfg.defrost() + cfg.DATALOADER.NUM_WORKERS = 2 + data_loader = build_detection_train_loader(cfg) + dummy_data = list(itertools.islice(data_loader, 100)) + + def f(): + data = DatasetFromList(dummy_data, copy=False, serialize=False) + while True: + yield from data + + max_iter = 400 + trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(model, f(), optimizer) + trainer.register_hooks( + [ + hooks.IterationTimer(), + hooks.PeriodicWriter([CommonMetricPrinter(max_iter)]), + hooks.TorchProfiler( + lambda trainer: trainer.iter == max_iter - 1, cfg.OUTPUT_DIR, save_tensorboard=True + ), + ] + ) + trainer.train(1, max_iter) + + +@torch.no_grad() +def benchmark_eval(args): + cfg = setup(args) + if args.config_file.endswith(".yaml"): + model = build_model(cfg) + DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) + + cfg.defrost() + cfg.DATALOADER.NUM_WORKERS = 0 + data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) + else: + model = instantiate(cfg.model) + model.to(cfg.train.device) + DetectionCheckpointer(model).load(cfg.train.init_checkpoint) + + cfg.dataloader.num_workers = 0 + data_loader = instantiate(cfg.dataloader.test) + + model.eval() + logger.info("Model:\n{}".format(model)) + dummy_data = DatasetFromList(list(itertools.islice(data_loader, 100)), copy=False) + + def f(): + while True: + yield from dummy_data + + for k in range(5): # warmup + model(dummy_data[k]) + + max_iter = 300 + timer = Timer() + with tqdm.tqdm(total=max_iter) as pbar: + for idx, d in enumerate(f()): + if idx == max_iter: + break + model(d) + pbar.update() + logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds())) + + +if __name__ == "__main__": + parser = default_argument_parser() + parser.add_argument("--task", choices=["train", "eval", "data", "data_advanced"], required=True) + args = parser.parse_args() + assert not args.eval_only + + logger.info("Environment info:\n" + collect_env_info()) + if "data" in args.task: + print("Initial " + RAM_msg()) + if args.task == "data": + f = benchmark_data + if args.task == "data_advanced": + f = benchmark_data_advanced + elif args.task == "train": + """ + Note: training speed may not be representative. + The training cost of a R-CNN model varies with the content of the data + and the quality of the model. + """ + f = benchmark_train + elif args.task == "eval": + f = benchmark_eval + # only benchmark single-GPU inference. + assert args.num_gpus == 1 and args.num_machines == 1 + launch(f, args.num_gpus, args.num_machines, args.machine_rank, args.dist_url, args=(args,)) diff --git a/data_processing/detectron2/tools/convert-torchvision-to-d2.py b/data_processing/detectron2/tools/convert-torchvision-to-d2.py new file mode 100644 index 0000000..4b827d9 --- /dev/null +++ b/data_processing/detectron2/tools/convert-torchvision-to-d2.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. + +import pickle as pkl +import sys +import torch + +""" +Usage: + # download one of the ResNet{18,34,50,101,152} models from torchvision: + wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O r50.pth + # run the conversion + ./convert-torchvision-to-d2.py r50.pth r50.pkl + + # Then, use r50.pkl with the following changes in config: + +MODEL: + WEIGHTS: "/path/to/r50.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + RESNETS: + DEPTH: 50 + STRIDE_IN_1X1: False +INPUT: + FORMAT: "RGB" + + These models typically produce slightly worse results than the + pre-trained ResNets we use in official configs, which are the + original ResNet models released by MSRA. +""" + +if __name__ == "__main__": + input = sys.argv[1] + + obj = torch.load(input, map_location="cpu") + + newmodel = {} + for k in list(obj.keys()): + old_k = k + if "layer" not in k: + k = "stem." + k + for t in [1, 2, 3, 4]: + k = k.replace("layer{}".format(t), "res{}".format(t + 1)) + for t in [1, 2, 3]: + k = k.replace("bn{}".format(t), "conv{}.norm".format(t)) + k = k.replace("downsample.0", "shortcut") + k = k.replace("downsample.1", "shortcut.norm") + print(old_k, "->", k) + newmodel[k] = obj.pop(old_k).detach().numpy() + + res = {"model": newmodel, "__author__": "torchvision", "matching_heuristics": True} + + with open(sys.argv[2], "wb") as f: + pkl.dump(res, f) + if obj: + print("Unconverted keys:", obj.keys()) diff --git a/data_processing/detectron2/tools/deploy/CMakeLists.txt b/data_processing/detectron2/tools/deploy/CMakeLists.txt new file mode 100644 index 0000000..80dae12 --- /dev/null +++ b/data_processing/detectron2/tools/deploy/CMakeLists.txt @@ -0,0 +1,15 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# See https://pytorch.org/tutorials/advanced/cpp_frontend.html +cmake_minimum_required(VERSION 3.12 FATAL_ERROR) +project(torchscript_mask_rcnn) + +find_package(Torch REQUIRED) +find_package(OpenCV REQUIRED) +find_package(TorchVision REQUIRED) # needed by export-method=tracing/scripting + +add_executable(torchscript_mask_rcnn torchscript_mask_rcnn.cpp) +target_link_libraries( + torchscript_mask_rcnn + -Wl,--no-as-needed TorchVision::TorchVision -Wl,--as-needed + "${TORCH_LIBRARIES}" ${OpenCV_LIBS}) +set_property(TARGET torchscript_mask_rcnn PROPERTY CXX_STANDARD 14) diff --git a/data_processing/detectron2/tools/deploy/README.md b/data_processing/detectron2/tools/deploy/README.md new file mode 100644 index 0000000..e33cbeb --- /dev/null +++ b/data_processing/detectron2/tools/deploy/README.md @@ -0,0 +1,66 @@ +See [deployment tutorial](https://detectron2.readthedocs.io/tutorials/deployment.html) +for some high-level background about deployment. + +This directory contains the following examples: + +1. An example script `export_model.py` + that exports a detectron2 model for deployment using different methods and formats. + +2. A C++ example that runs inference with Mask R-CNN model in TorchScript format. + +## Build +Deployment depends on libtorch and OpenCV. Some require more dependencies: + +* Running TorchScript-format models produced by `--export-method=caffe2_tracing` requires libtorch + to be built with caffe2 enabled. +* Running TorchScript-format models produced by `--export-method=tracing/scripting` requires libtorchvision (C++ library of torchvision). + +All methods are supported in one C++ file that requires all the above dependencies. +Adjust it and remove code you don't need. +As a reference, we provide a [Dockerfile](../../docker/deploy.Dockerfile) that installs all the above dependencies and builds the C++ example. + +## Use + +We show a few example commands to export and execute a Mask R-CNN model in C++. + +* `export-method=tracing, format=torchscript`: +``` +./export_model.py --config-file ../../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ + --output ./output --export-method tracing --format torchscript \ + MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl \ + MODEL.DEVICE cuda + +./build/torchscript_mask_rcnn output/model.ts input.jpg tracing +``` + +* `export-method=scripting, format=torchscript`: +``` +./export_model.py --config-file ../../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ + --output ./output --export-method scripting --format torchscript \ + MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl \ + +./build/torchscript_mask_rcnn output/model.ts input.jpg scripting +``` + +* `export-method=caffe2_tracing, format=torchscript`: + +``` +./export_model.py --config-file ../../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ + --output ./output --export-method caffe2_tracing --format torchscript \ + MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl \ + +./build/torchscript_mask_rcnn output/model.ts input.jpg caffe2_tracing +``` + + +## Notes: + +1. Tracing/Caffe2-tracing requires valid weights & sample inputs. + Therefore the above commands require pre-trained models and [COCO dataset](https://detectron2.readthedocs.io/tutorials/builtin_datasets.html). + You can modify the script to obtain sample inputs in other ways instead of from COCO. + +2. `--run-eval` is implemented only for tracing mode + to evaluate the exported model using the dataset in the config. + It's recommended to always verify the accuracy in case the conversion is not successful. + Evaluation can be slow if model is exported to CPU or dataset is too large ("coco_2017_val_100" is a small subset of COCO useful for evaluation). + `caffe2_tracing` accuracy may be slightly different (within 0.1 AP) from original model due to numerical precisions between different runtime. diff --git a/data_processing/detectron2/tools/deploy/export_model.py b/data_processing/detectron2/tools/deploy/export_model.py new file mode 100644 index 0000000..f507dff --- /dev/null +++ b/data_processing/detectron2/tools/deploy/export_model.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. +import argparse +import os +from typing import Dict, List, Tuple +import torch +from torch import Tensor, nn + +import detectron2.data.transforms as T +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import build_detection_test_loader, detection_utils +from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format +from detectron2.export import ( + STABLE_ONNX_OPSET_VERSION, + TracingAdapter, + dump_torchscript_IR, + scripting_with_instances, +) +from detectron2.modeling import GeneralizedRCNN, RetinaNet, build_model +from detectron2.modeling.postprocessing import detector_postprocess +from detectron2.projects.point_rend import add_pointrend_config +from detectron2.structures import Boxes +from detectron2.utils.env import TORCH_VERSION +from detectron2.utils.file_io import PathManager +from detectron2.utils.logger import setup_logger + + +def setup_cfg(args): + cfg = get_cfg() + # cuda context is initialized before creating dataloader, so we don't fork anymore + cfg.DATALOADER.NUM_WORKERS = 0 + add_pointrend_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + return cfg + + +def export_caffe2_tracing(cfg, torch_model, inputs): + from detectron2.export import Caffe2Tracer + + tracer = Caffe2Tracer(cfg, torch_model, inputs) + if args.format == "caffe2": + caffe2_model = tracer.export_caffe2() + caffe2_model.save_protobuf(args.output) + # draw the caffe2 graph + caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=inputs) + return caffe2_model + elif args.format == "onnx": + import onnx + + onnx_model = tracer.export_onnx() + onnx.save(onnx_model, os.path.join(args.output, "model.onnx")) + elif args.format == "torchscript": + ts_model = tracer.export_torchscript() + with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f: + torch.jit.save(ts_model, f) + dump_torchscript_IR(ts_model, args.output) + + +# experimental. API not yet final +def export_scripting(torch_model): + assert TORCH_VERSION >= (1, 8) + fields = { + "proposal_boxes": Boxes, + "objectness_logits": Tensor, + "pred_boxes": Boxes, + "scores": Tensor, + "pred_classes": Tensor, + "pred_masks": Tensor, + "pred_keypoints": torch.Tensor, + "pred_keypoint_heatmaps": torch.Tensor, + } + assert args.format == "torchscript", "Scripting only supports torchscript format." + + class ScriptableAdapterBase(nn.Module): + # Use this adapter to workaround https://github.com/pytorch/pytorch/issues/46944 + # by not retuning instances but dicts. Otherwise the exported model is not deployable + def __init__(self): + super().__init__() + self.model = torch_model + self.eval() + + if isinstance(torch_model, GeneralizedRCNN): + + class ScriptableAdapter(ScriptableAdapterBase): + def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]: + instances = self.model.inference(inputs, do_postprocess=False) + return [i.get_fields() for i in instances] + + else: + + class ScriptableAdapter(ScriptableAdapterBase): + def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]: + instances = self.model(inputs) + return [i.get_fields() for i in instances] + + ts_model = scripting_with_instances(ScriptableAdapter(), fields) + with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f: + torch.jit.save(ts_model, f) + dump_torchscript_IR(ts_model, args.output) + # TODO inference in Python now missing postprocessing glue code + return None + + +# experimental. API not yet final +def export_tracing(torch_model, inputs): + assert TORCH_VERSION >= (1, 8) + image = inputs[0]["image"] + inputs = [{"image": image}] # remove other unused keys + + if isinstance(torch_model, GeneralizedRCNN): + + def inference(model, inputs): + # use do_postprocess=False so it returns ROI mask + inst = model.inference(inputs, do_postprocess=False)[0] + return [{"instances": inst}] + + else: + inference = None # assume that we just call the model directly + + traceable_model = TracingAdapter(torch_model, inputs, inference) + + if args.format == "torchscript": + ts_model = torch.jit.trace(traceable_model, (image,)) + with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f: + torch.jit.save(ts_model, f) + dump_torchscript_IR(ts_model, args.output) + elif args.format == "onnx": + with PathManager.open(os.path.join(args.output, "model.onnx"), "wb") as f: + torch.onnx.export(traceable_model, (image,), f, opset_version=STABLE_ONNX_OPSET_VERSION) + logger.info("Inputs schema: " + str(traceable_model.inputs_schema)) + logger.info("Outputs schema: " + str(traceable_model.outputs_schema)) + + if args.format != "torchscript": + return None + if not isinstance(torch_model, (GeneralizedRCNN, RetinaNet)): + return None + + def eval_wrapper(inputs): + """ + The exported model does not contain the final resize step, which is typically + unused in deployment but needed for evaluation. We add it manually here. + """ + input = inputs[0] + instances = traceable_model.outputs_schema(ts_model(input["image"]))[0]["instances"] + postprocessed = detector_postprocess(instances, input["height"], input["width"]) + return [{"instances": postprocessed}] + + return eval_wrapper + + +def get_sample_inputs(args): + + if args.sample_image is None: + # get a first batch from dataset + data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) + first_batch = next(iter(data_loader)) + return first_batch + else: + # get a sample data + original_image = detection_utils.read_image(args.sample_image, format=cfg.INPUT.FORMAT) + # Do same preprocessing as DefaultPredictor + aug = T.ResizeShortestEdge( + [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST + ) + height, width = original_image.shape[:2] + image = aug.get_transform(original_image).apply_image(original_image) + image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) + + inputs = {"image": image, "height": height, "width": width} + + # Sample ready + sample_inputs = [inputs] + return sample_inputs + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Export a model for deployment.") + parser.add_argument( + "--format", + choices=["caffe2", "onnx", "torchscript"], + help="output format", + default="torchscript", + ) + parser.add_argument( + "--export-method", + choices=["caffe2_tracing", "tracing", "scripting"], + help="Method to export models", + default="tracing", + ) + parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") + parser.add_argument("--sample-image", default=None, type=str, help="sample image for input") + parser.add_argument("--run-eval", action="store_true") + parser.add_argument("--output", help="output directory for the converted model") + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + args = parser.parse_args() + logger = setup_logger() + logger.info("Command line arguments: " + str(args)) + PathManager.mkdirs(args.output) + # Disable re-specialization on new shapes. Otherwise --run-eval will be slow + torch._C._jit_set_bailout_depth(1) + + cfg = setup_cfg(args) + + # create a torch model + torch_model = build_model(cfg) + DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS) + torch_model.eval() + + # convert and save model + if args.export_method == "caffe2_tracing": + sample_inputs = get_sample_inputs(args) + exported_model = export_caffe2_tracing(cfg, torch_model, sample_inputs) + elif args.export_method == "scripting": + exported_model = export_scripting(torch_model) + elif args.export_method == "tracing": + sample_inputs = get_sample_inputs(args) + exported_model = export_tracing(torch_model, sample_inputs) + + # run evaluation with the converted model + if args.run_eval: + assert exported_model is not None, ( + "Python inference is not yet implemented for " + f"export_method={args.export_method}, format={args.format}." + ) + logger.info("Running evaluation ... this takes a long time if you export to CPU.") + dataset = cfg.DATASETS.TEST[0] + data_loader = build_detection_test_loader(cfg, dataset) + # NOTE: hard-coded evaluator. change to the evaluator for your dataset + evaluator = COCOEvaluator(dataset, output_dir=args.output) + metrics = inference_on_dataset(exported_model, data_loader, evaluator) + print_csv_format(metrics) + logger.info("Success.") diff --git a/data_processing/detectron2/tools/deploy/torchscript_mask_rcnn.cpp b/data_processing/detectron2/tools/deploy/torchscript_mask_rcnn.cpp new file mode 100644 index 0000000..fd6e1e9 --- /dev/null +++ b/data_processing/detectron2/tools/deploy/torchscript_mask_rcnn.cpp @@ -0,0 +1,188 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// @lint-ignore-every CLANGTIDY +// This is an example code that demonstrates how to run inference +// with a torchscript format Mask R-CNN model exported by ./export_model.py +// using export method=tracing, caffe2_tracing & scripting. + +#include +#include +#include + +#include +#include +#include +#include + +// only needed for export_method=tracing +#include // @oss-only +// @fb-only: #include + +using namespace std; + +c10::IValue get_caffe2_tracing_inputs(cv::Mat& img, c10::Device device) { + const int height = img.rows; + const int width = img.cols; + // FPN models require divisibility of 32. + // Tracing mode does padding inside the graph, but caffe2_tracing does not. + assert(height % 32 == 0 && width % 32 == 0); + const int channels = 3; + + auto input = + torch::from_blob(img.data, {1, height, width, channels}, torch::kUInt8); + // NHWC to NCHW + input = input.to(device, torch::kFloat).permute({0, 3, 1, 2}).contiguous(); + + std::array im_info_data{height * 1.0f, width * 1.0f, 1.0f}; + auto im_info = + torch::from_blob(im_info_data.data(), {1, 3}).clone().to(device); + return std::make_tuple(input, im_info); +} + +c10::IValue get_tracing_inputs(cv::Mat& img, c10::Device device) { + const int height = img.rows; + const int width = img.cols; + const int channels = 3; + + auto input = + torch::from_blob(img.data, {height, width, channels}, torch::kUInt8); + // HWC to CHW + input = input.to(device, torch::kFloat).permute({2, 0, 1}).contiguous(); + return input; +} + +// create a Tuple[Dict[str, Tensor]] which is the input type of scripted model +c10::IValue get_scripting_inputs(cv::Mat& img, c10::Device device) { + const int height = img.rows; + const int width = img.cols; + const int channels = 3; + + auto img_tensor = + torch::from_blob(img.data, {height, width, channels}, torch::kUInt8); + // HWC to CHW + img_tensor = + img_tensor.to(device, torch::kFloat).permute({2, 0, 1}).contiguous(); + auto dic = c10::Dict(); + dic.insert("image", img_tensor); + return std::make_tuple(dic); +} + +c10::IValue +get_inputs(std::string export_method, cv::Mat& img, c10::Device device) { + // Given an image, create inputs in the format required by the model. + if (export_method == "tracing") + return get_tracing_inputs(img, device); + if (export_method == "caffe2_tracing") + return get_caffe2_tracing_inputs(img, device); + if (export_method == "scripting") + return get_scripting_inputs(img, device); + abort(); +} + +struct MaskRCNNOutputs { + at::Tensor pred_boxes, pred_classes, pred_masks, scores; + int num_instances() const { + return pred_boxes.sizes()[0]; + } +}; + +MaskRCNNOutputs get_outputs(std::string export_method, c10::IValue outputs) { + // Given outputs of the model, extract tensors from it to turn into a + // common MaskRCNNOutputs format. + if (export_method == "tracing") { + auto out_tuple = outputs.toTuple()->elements(); + // They are ordered alphabetically by their field name in Instances + return MaskRCNNOutputs{ + out_tuple[0].toTensor(), + out_tuple[1].toTensor(), + out_tuple[2].toTensor(), + out_tuple[3].toTensor()}; + } + if (export_method == "caffe2_tracing") { + auto out_tuple = outputs.toTuple()->elements(); + // A legacy order used by caffe2 models + return MaskRCNNOutputs{ + out_tuple[0].toTensor(), + out_tuple[2].toTensor(), + out_tuple[3].toTensor(), + out_tuple[1].toTensor()}; + } + if (export_method == "scripting") { + // With the ScriptableAdapter defined in export_model.py, the output is + // List[Dict[str, Any]]. + auto out_dict = outputs.toList().get(0).toGenericDict(); + return MaskRCNNOutputs{ + out_dict.at("pred_boxes").toTensor(), + out_dict.at("pred_classes").toTensor(), + out_dict.at("pred_masks").toTensor(), + out_dict.at("scores").toTensor()}; + } + abort(); +} + +int main(int argc, const char* argv[]) { + if (argc != 4) { + cerr << R"xx( +Usage: + ./torchscript_mask_rcnn model.ts input.jpg EXPORT_METHOD + + EXPORT_METHOD can be "tracing", "caffe2_tracing" or "scripting". +)xx"; + return 1; + } + std::string image_file = argv[2]; + std::string export_method = argv[3]; + assert( + export_method == "caffe2_tracing" || export_method == "tracing" || + export_method == "scripting"); + + torch::jit::FusionStrategy strat = {{torch::jit::FusionBehavior::DYNAMIC, 1}}; + torch::jit::setFusionStrategy(strat); + torch::autograd::AutoGradMode guard(false); + auto module = torch::jit::load(argv[1]); + + assert(module.buffers().size() > 0); + // Assume that the entire model is on the same device. + // We just put input to this device. + auto device = (*begin(module.buffers())).device(); + + cv::Mat input_img = cv::imread(image_file, cv::IMREAD_COLOR); + auto inputs = get_inputs(export_method, input_img, device); + + // Run the network + auto output = module.forward({inputs}); + if (device.is_cuda()) + c10::cuda::getCurrentCUDAStream().synchronize(); + + // run 3 more times to benchmark + int N_benchmark = 3, N_warmup = 1; + auto start_time = chrono::high_resolution_clock::now(); + for (int i = 0; i < N_benchmark + N_warmup; ++i) { + if (i == N_warmup) + start_time = chrono::high_resolution_clock::now(); + output = module.forward({inputs}); + if (device.is_cuda()) + c10::cuda::getCurrentCUDAStream().synchronize(); + } + auto end_time = chrono::high_resolution_clock::now(); + auto ms = chrono::duration_cast(end_time - start_time) + .count(); + cout << "Latency (should vary with different inputs): " + << ms * 1.0 / 1e6 / N_benchmark << " seconds" << endl; + + // Parse Mask R-CNN outputs + auto rcnn_outputs = get_outputs(export_method, output); + cout << "Number of detected objects: " << rcnn_outputs.num_instances() + << endl; + + cout << "pred_boxes: " << rcnn_outputs.pred_boxes.toString() << " " + << rcnn_outputs.pred_boxes.sizes() << endl; + cout << "scores: " << rcnn_outputs.scores.toString() << " " + << rcnn_outputs.scores.sizes() << endl; + cout << "pred_classes: " << rcnn_outputs.pred_classes.toString() << " " + << rcnn_outputs.pred_classes.sizes() << endl; + cout << "pred_masks: " << rcnn_outputs.pred_masks.toString() << " " + << rcnn_outputs.pred_masks.sizes() << endl; + + cout << rcnn_outputs.pred_boxes << endl; + return 0; +} diff --git a/data_processing/detectron2/tools/lazyconfig_train_net.py b/data_processing/detectron2/tools/lazyconfig_train_net.py new file mode 100644 index 0000000..bb62d36 --- /dev/null +++ b/data_processing/detectron2/tools/lazyconfig_train_net.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Training script using the new "LazyConfig" python config files. + +This scripts reads a given python config file and runs the training or evaluation. +It can be used to train any models or dataset as long as they can be +instantiated by the recursive construction defined in the given config file. + +Besides lazy construction of models, dataloader, etc., this scripts expects a +few common configuration parameters currently defined in "configs/common/train.py". +To add more complicated training logic, you can easily add other configs +in the config file and implement a new train_net.py to handle them. +""" +import logging + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import LazyConfig, instantiate +from detectron2.engine import ( + AMPTrainer, + SimpleTrainer, + default_argument_parser, + default_setup, + default_writers, + hooks, + launch, +) +from detectron2.engine.defaults import create_ddp_model +from detectron2.evaluation import inference_on_dataset, print_csv_format +from detectron2.utils import comm + +logger = logging.getLogger("detectron2") + + +def do_test(cfg, model): + if "evaluator" in cfg.dataloader: + ret = inference_on_dataset( + model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator) + ) + print_csv_format(ret) + return ret + + +def do_train(args, cfg): + """ + Args: + cfg: an object with the following attributes: + model: instantiate to a module + dataloader.{train,test}: instantiate to dataloaders + dataloader.evaluator: instantiate to evaluator for test set + optimizer: instantaite to an optimizer + lr_multiplier: instantiate to a fvcore scheduler + train: other misc config defined in `configs/common/train.py`, including: + output_dir (str) + init_checkpoint (str) + amp.enabled (bool) + max_iter (int) + eval_period, log_period (int) + device (str) + checkpointer (dict) + ddp (dict) + """ + model = instantiate(cfg.model) + logger = logging.getLogger("detectron2") + logger.info("Model:\n{}".format(model)) + model.to(cfg.train.device) + + cfg.optimizer.params.model = model + optim = instantiate(cfg.optimizer) + + train_loader = instantiate(cfg.dataloader.train) + + model = create_ddp_model(model, **cfg.train.ddp) + trainer = (AMPTrainer if cfg.train.amp.enabled else SimpleTrainer)(model, train_loader, optim) + checkpointer = DetectionCheckpointer( + model, + cfg.train.output_dir, + trainer=trainer, + ) + trainer.register_hooks( + [ + hooks.IterationTimer(), + hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)), + hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer) + if comm.is_main_process() + else None, + hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)), + hooks.PeriodicWriter( + default_writers(cfg.train.output_dir, cfg.train.max_iter), + period=cfg.train.log_period, + ) + if comm.is_main_process() + else None, + ] + ) + + checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=args.resume) + if args.resume and checkpointer.has_checkpoint(): + # The checkpoint stores the training iteration that just finished, thus we start + # at the next iteration + start_iter = trainer.iter + 1 + else: + start_iter = 0 + trainer.train(start_iter, cfg.train.max_iter) + + +def main(args): + cfg = LazyConfig.load(args.config_file) + cfg = LazyConfig.apply_overrides(cfg, args.opts) + default_setup(cfg, args) + + if args.eval_only: + model = instantiate(cfg.model) + model.to(cfg.train.device) + model = create_ddp_model(model) + DetectionCheckpointer(model).load(cfg.train.init_checkpoint) + print(do_test(cfg, model)) + else: + do_train(args, cfg) + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/data_processing/detectron2/tools/lightning_train_net.py b/data_processing/detectron2/tools/lightning_train_net.py new file mode 100644 index 0000000..7a8c5d8 --- /dev/null +++ b/data_processing/detectron2/tools/lightning_train_net.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. +# Lightning Trainer should be considered beta at this point +# We have confirmed that training and validation run correctly and produce correct results +# Depending on how you launch the trainer, there are issues with processes terminating correctly +# This module is still dependent on D2 logging, but could be transferred to use Lightning logging + +import logging +import os +import time +import weakref +from collections import OrderedDict +from typing import Any, Dict, List +import pytorch_lightning as pl # type: ignore +from pytorch_lightning import LightningDataModule, LightningModule + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import build_detection_test_loader, build_detection_train_loader +from detectron2.engine import ( + DefaultTrainer, + SimpleTrainer, + default_argument_parser, + default_setup, + default_writers, + hooks, +) +from detectron2.evaluation import print_csv_format +from detectron2.evaluation.testing import flatten_results_dict +from detectron2.modeling import build_model +from detectron2.solver import build_lr_scheduler, build_optimizer +from detectron2.utils.events import EventStorage +from detectron2.utils.logger import setup_logger + +from train_net import build_evaluator + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("detectron2") + + +class TrainingModule(LightningModule): + def __init__(self, cfg): + super().__init__() + if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2 + setup_logger() + self.cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size()) + self.storage: EventStorage = None + self.model = build_model(self.cfg) + + self.start_iter = 0 + self.max_iter = cfg.SOLVER.MAX_ITER + + def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: + checkpoint["iteration"] = self.storage.iter + + def on_load_checkpoint(self, checkpointed_state: Dict[str, Any]) -> None: + self.start_iter = checkpointed_state["iteration"] + self.storage.iter = self.start_iter + + def setup(self, stage: str): + if self.cfg.MODEL.WEIGHTS: + self.checkpointer = DetectionCheckpointer( + # Assume you want to save checkpoints together with logs/statistics + self.model, + self.cfg.OUTPUT_DIR, + ) + logger.info(f"Load model weights from checkpoint: {self.cfg.MODEL.WEIGHTS}.") + # Only load weights, use lightning checkpointing if you want to resume + self.checkpointer.load(self.cfg.MODEL.WEIGHTS) + + self.iteration_timer = hooks.IterationTimer() + self.iteration_timer.before_train() + self.data_start = time.perf_counter() + self.writers = None + + def training_step(self, batch, batch_idx): + data_time = time.perf_counter() - self.data_start + # Need to manually enter/exit since trainer may launch processes + # This ideally belongs in setup, but setup seems to run before processes are spawned + if self.storage is None: + self.storage = EventStorage(0) + self.storage.__enter__() + self.iteration_timer.trainer = weakref.proxy(self) + self.iteration_timer.before_step() + self.writers = ( + default_writers(self.cfg.OUTPUT_DIR, self.max_iter) + if comm.is_main_process() + else {} + ) + + loss_dict = self.model(batch) + SimpleTrainer.write_metrics(loss_dict, data_time) + + opt = self.optimizers() + self.storage.put_scalar( + "lr", opt.param_groups[self._best_param_group_id]["lr"], smoothing_hint=False + ) + self.iteration_timer.after_step() + self.storage.step() + # A little odd to put before step here, but it's the best way to get a proper timing + self.iteration_timer.before_step() + + if self.storage.iter % 20 == 0: + for writer in self.writers: + writer.write() + return sum(loss_dict.values()) + + def training_step_end(self, training_step_outpus): + self.data_start = time.perf_counter() + return training_step_outpus + + def training_epoch_end(self, training_step_outputs): + self.iteration_timer.after_train() + if comm.is_main_process(): + self.checkpointer.save("model_final") + for writer in self.writers: + writer.write() + writer.close() + self.storage.__exit__(None, None, None) + + def _process_dataset_evaluation_results(self) -> OrderedDict: + results = OrderedDict() + for idx, dataset_name in enumerate(self.cfg.DATASETS.TEST): + results[dataset_name] = self._evaluators[idx].evaluate() + if comm.is_main_process(): + print_csv_format(results[dataset_name]) + + if len(results) == 1: + results = list(results.values())[0] + return results + + def _reset_dataset_evaluators(self): + self._evaluators = [] + for dataset_name in self.cfg.DATASETS.TEST: + evaluator = build_evaluator(self.cfg, dataset_name) + evaluator.reset() + self._evaluators.append(evaluator) + + def on_validation_epoch_start(self, _outputs): + self._reset_dataset_evaluators() + + def validation_epoch_end(self, _outputs): + results = self._process_dataset_evaluation_results(_outputs) + + flattened_results = flatten_results_dict(results) + for k, v in flattened_results.items(): + try: + v = float(v) + except Exception as e: + raise ValueError( + "[EvalHook] eval_function should return a nested dict of float. " + "Got '{}: {}' instead.".format(k, v) + ) from e + self.storage.put_scalars(**flattened_results, smoothing_hint=False) + + def validation_step(self, batch, batch_idx: int, dataloader_idx: int = 0) -> None: + if not isinstance(batch, List): + batch = [batch] + outputs = self.model(batch) + self._evaluators[dataloader_idx].process(batch, outputs) + + def configure_optimizers(self): + optimizer = build_optimizer(self.cfg, self.model) + self._best_param_group_id = hooks.LRScheduler.get_best_param_group_id(optimizer) + scheduler = build_lr_scheduler(self.cfg, optimizer) + return [optimizer], [{"scheduler": scheduler, "interval": "step"}] + + +class DataModule(LightningDataModule): + def __init__(self, cfg): + super().__init__() + self.cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size()) + + def train_dataloader(self): + return build_detection_train_loader(self.cfg) + + def val_dataloader(self): + dataloaders = [] + for dataset_name in self.cfg.DATASETS.TEST: + dataloaders.append(build_detection_test_loader(self.cfg, dataset_name)) + return dataloaders + + +def main(args): + cfg = setup(args) + train(cfg, args) + + +def train(cfg, args): + trainer_params = { + # training loop is bounded by max steps, use a large max_epochs to make + # sure max_steps is met first + "max_epochs": 10**8, + "max_steps": cfg.SOLVER.MAX_ITER, + "val_check_interval": cfg.TEST.EVAL_PERIOD if cfg.TEST.EVAL_PERIOD > 0 else 10**8, + "num_nodes": args.num_machines, + "gpus": args.num_gpus, + "num_sanity_val_steps": 0, + } + if cfg.SOLVER.AMP.ENABLED: + trainer_params["precision"] = 16 + + last_checkpoint = os.path.join(cfg.OUTPUT_DIR, "last.ckpt") + if args.resume: + # resume training from checkpoint + trainer_params["resume_from_checkpoint"] = last_checkpoint + logger.info(f"Resuming training from checkpoint: {last_checkpoint}.") + + trainer = pl.Trainer(**trainer_params) + logger.info(f"start to train with {args.num_machines} nodes and {args.num_gpus} GPUs") + + module = TrainingModule(cfg) + data_module = DataModule(cfg) + if args.eval_only: + logger.info("Running inference") + trainer.validate(module, data_module) + else: + logger.info("Running training") + trainer.fit(module, data_module) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +if __name__ == "__main__": + parser = default_argument_parser() + args = parser.parse_args() + logger.info("Command Line Args:", args) + main(args) diff --git a/data_processing/detectron2/tools/plain_train_net.py b/data_processing/detectron2/tools/plain_train_net.py new file mode 100644 index 0000000..be4588e --- /dev/null +++ b/data_processing/detectron2/tools/plain_train_net.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Detectron2 training script with a plain training loop. + +This script reads a given config file and runs the training or evaluation. +It is an entry point that is able to train standard models in detectron2. + +In order to let one script support training of many models, +this script contains logic that are specific to these built-in models and therefore +may not be suitable for your own project. +For example, your research project perhaps only needs a single "evaluator". + +Therefore, we recommend you to use detectron2 as a library and take +this file as an example of how to use the library. +You may want to write your own script with your datasets and other customizations. + +Compared to "train_net.py", this script supports fewer default features. +It also includes fewer abstraction, therefore is easier to add custom logic. +""" + +import logging +import os +from collections import OrderedDict +import torch +from torch.nn.parallel import DistributedDataParallel + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer +from detectron2.config import get_cfg +from detectron2.data import ( + MetadataCatalog, + build_detection_test_loader, + build_detection_train_loader, +) +from detectron2.engine import default_argument_parser, default_setup, default_writers, launch +from detectron2.evaluation import ( + CityscapesInstanceEvaluator, + CityscapesSemSegEvaluator, + COCOEvaluator, + COCOPanopticEvaluator, + DatasetEvaluators, + LVISEvaluator, + PascalVOCDetectionEvaluator, + SemSegEvaluator, + inference_on_dataset, + print_csv_format, +) +from detectron2.modeling import build_model +from detectron2.solver import build_lr_scheduler, build_optimizer +from detectron2.utils.events import EventStorage + +logger = logging.getLogger("detectron2") + + +def get_evaluator(cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type in ["sem_seg", "coco_panoptic_seg"]: + evaluator_list.append( + SemSegEvaluator( + dataset_name, + distributed=True, + output_dir=output_folder, + ) + ) + if evaluator_type in ["coco", "coco_panoptic_seg"]: + evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) + if evaluator_type == "coco_panoptic_seg": + evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) + if evaluator_type == "cityscapes_instance": + return CityscapesInstanceEvaluator(dataset_name) + if evaluator_type == "cityscapes_sem_seg": + return CityscapesSemSegEvaluator(dataset_name) + if evaluator_type == "pascal_voc": + return PascalVOCDetectionEvaluator(dataset_name) + if evaluator_type == "lvis": + return LVISEvaluator(dataset_name, cfg, True, output_folder) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type) + ) + if len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + +def do_test(cfg, model): + results = OrderedDict() + for dataset_name in cfg.DATASETS.TEST: + data_loader = build_detection_test_loader(cfg, dataset_name) + evaluator = get_evaluator( + cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name) + ) + results_i = inference_on_dataset(model, data_loader, evaluator) + results[dataset_name] = results_i + if comm.is_main_process(): + logger.info("Evaluation results for {} in csv format:".format(dataset_name)) + print_csv_format(results_i) + if len(results) == 1: + results = list(results.values())[0] + return results + + +def do_train(cfg, model, resume=False): + model.train() + optimizer = build_optimizer(cfg, model) + scheduler = build_lr_scheduler(cfg, optimizer) + + checkpointer = DetectionCheckpointer( + model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler + ) + start_iter = ( + checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1 + ) + max_iter = cfg.SOLVER.MAX_ITER + + periodic_checkpointer = PeriodicCheckpointer( + checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter + ) + + writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else [] + + # compared to "train_net.py", we do not support accurate timing and + # precise BN here, because they are not trivial to implement in a small training loop + data_loader = build_detection_train_loader(cfg) + logger.info("Starting training from iteration {}".format(start_iter)) + with EventStorage(start_iter) as storage: + for data, iteration in zip(data_loader, range(start_iter, max_iter)): + storage.iter = iteration + + loss_dict = model(data) + losses = sum(loss_dict.values()) + assert torch.isfinite(losses).all(), loss_dict + + loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()} + losses_reduced = sum(loss for loss in loss_dict_reduced.values()) + if comm.is_main_process(): + storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) + + optimizer.zero_grad() + losses.backward() + optimizer.step() + storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) + scheduler.step() + + if ( + cfg.TEST.EVAL_PERIOD > 0 + and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0 + and iteration != max_iter - 1 + ): + do_test(cfg, model) + # Compared to "train_net.py", the test results are not dumped to EventStorage + comm.synchronize() + + if iteration - start_iter > 5 and ( + (iteration + 1) % 20 == 0 or iteration == max_iter - 1 + ): + for writer in writers: + writer.write() + periodic_checkpointer.step(iteration) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup( + cfg, args + ) # if you don't like any of the default setup, write your own setup code + return cfg + + +def main(args): + cfg = setup(args) + + model = build_model(cfg) + logger.info("Model:\n{}".format(model)) + if args.eval_only: + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + return do_test(cfg, model) + + distributed = comm.get_world_size() > 1 + if distributed: + model = DistributedDataParallel( + model, device_ids=[comm.get_local_rank()], broadcast_buffers=False + ) + + do_train(cfg, model, resume=args.resume) + return do_test(cfg, model) + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/data_processing/detectron2/tools/train_net.py b/data_processing/detectron2/tools/train_net.py new file mode 100644 index 0000000..8a6f297 --- /dev/null +++ b/data_processing/detectron2/tools/train_net.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. +""" +A main training script. + +This scripts reads a given config file and runs the training or evaluation. +It is an entry point that is made to train standard models in detectron2. + +In order to let one script support training of many models, +this script contains logic that are specific to these built-in models and therefore +may not be suitable for your own project. +For example, your research project perhaps only needs a single "evaluator". + +Therefore, we recommend you to use detectron2 as an library and take +this file as an example of how to use the library. +You may want to write your own script with your datasets and other customizations. +""" + +import logging +import os +from collections import OrderedDict + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import MetadataCatalog +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch +from detectron2.evaluation import ( + CityscapesInstanceEvaluator, + CityscapesSemSegEvaluator, + COCOEvaluator, + COCOPanopticEvaluator, + DatasetEvaluators, + LVISEvaluator, + PascalVOCDetectionEvaluator, + SemSegEvaluator, + verify_results, +) +from detectron2.modeling import GeneralizedRCNNWithTTA + + +def build_evaluator(cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type in ["sem_seg", "coco_panoptic_seg"]: + evaluator_list.append( + SemSegEvaluator( + dataset_name, + distributed=True, + output_dir=output_folder, + ) + ) + if evaluator_type in ["coco", "coco_panoptic_seg"]: + evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) + if evaluator_type == "coco_panoptic_seg": + evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) + if evaluator_type == "cityscapes_instance": + return CityscapesInstanceEvaluator(dataset_name) + if evaluator_type == "cityscapes_sem_seg": + return CityscapesSemSegEvaluator(dataset_name) + elif evaluator_type == "pascal_voc": + return PascalVOCDetectionEvaluator(dataset_name) + elif evaluator_type == "lvis": + return LVISEvaluator(dataset_name, output_dir=output_folder) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type) + ) + elif len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + +class Trainer(DefaultTrainer): + """ + We use the "DefaultTrainer" which contains pre-defined default logic for + standard training workflow. They may not work for you, especially if you + are working on a new research project. In that case you can write your + own training loop. You can use "tools/plain_train_net.py" as an example. + """ + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + return build_evaluator(cfg, dataset_name, output_folder) + + @classmethod + def test_with_TTA(cls, cfg, model): + logger = logging.getLogger("detectron2.trainer") + # In the end of training, run an evaluation with TTA + # Only support some R-CNN models. + logger.info("Running inference with test-time augmentation ...") + model = GeneralizedRCNNWithTTA(cfg, model) + evaluators = [ + cls.build_evaluator( + cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") + ) + for name in cfg.DATASETS.TEST + ] + res = cls.test(cfg, model, evaluators) + res = OrderedDict({k + "_TTA": v for k, v in res.items()}) + return res + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if cfg.TEST.AUG.ENABLED: + res.update(Trainer.test_with_TTA(cfg, model)) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + """ + If you'd like to do anything fancier than the standard training logic, + consider writing your own training loop (see plain_train_net.py) or + subclassing the trainer. + """ + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + if cfg.TEST.AUG.ENABLED: + trainer.register_hooks( + [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))] + ) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/data_processing/detectron2/tools/visualize_data.py b/data_processing/detectron2/tools/visualize_data.py new file mode 100644 index 0000000..fd0ba83 --- /dev/null +++ b/data_processing/detectron2/tools/visualize_data.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. +import argparse +import os +from itertools import chain +import cv2 +import tqdm + +from detectron2.config import get_cfg +from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader +from detectron2.data import detection_utils as utils +from detectron2.data.build import filter_images_with_few_keypoints +from detectron2.utils.logger import setup_logger +from detectron2.utils.visualizer import Visualizer + + +def setup(args): + cfg = get_cfg() + if args.config_file: + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.DATALOADER.NUM_WORKERS = 0 + cfg.freeze() + return cfg + + +def parse_args(in_args=None): + parser = argparse.ArgumentParser(description="Visualize ground-truth data") + parser.add_argument( + "--source", + choices=["annotation", "dataloader"], + required=True, + help="visualize the annotations or the data loader (with pre-processing)", + ) + parser.add_argument("--config-file", metavar="FILE", help="path to config file") + parser.add_argument("--output-dir", default="./", help="path to output directory") + parser.add_argument("--show", action="store_true", help="show output in a window") + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + return parser.parse_args(in_args) + + +if __name__ == "__main__": + args = parse_args() + logger = setup_logger() + logger.info("Arguments: " + str(args)) + cfg = setup(args) + + dirname = args.output_dir + os.makedirs(dirname, exist_ok=True) + metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]) + + def output(vis, fname): + if args.show: + print(fname) + cv2.imshow("window", vis.get_image()[:, :, ::-1]) + cv2.waitKey() + else: + filepath = os.path.join(dirname, fname) + print("Saving to {} ...".format(filepath)) + vis.save(filepath) + + scale = 1.0 + if args.source == "dataloader": + train_data_loader = build_detection_train_loader(cfg) + for batch in train_data_loader: + for per_image in batch: + # Pytorch tensor is in (C, H, W) format + img = per_image["image"].permute(1, 2, 0).cpu().detach().numpy() + img = utils.convert_image_to_rgb(img, cfg.INPUT.FORMAT) + + visualizer = Visualizer(img, metadata=metadata, scale=scale) + target_fields = per_image["instances"].get_fields() + labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]] + vis = visualizer.overlay_instances( + labels=labels, + boxes=target_fields.get("gt_boxes", None), + masks=target_fields.get("gt_masks", None), + keypoints=target_fields.get("gt_keypoints", None), + ) + output(vis, str(per_image["image_id"]) + ".jpg") + else: + dicts = list(chain.from_iterable([DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN])) + if cfg.MODEL.KEYPOINT_ON: + dicts = filter_images_with_few_keypoints(dicts, 1) + for dic in tqdm.tqdm(dicts): + img = utils.read_image(dic["file_name"], "RGB") + visualizer = Visualizer(img, metadata=metadata, scale=scale) + vis = visualizer.draw_dataset_dict(dic) + output(vis, os.path.basename(dic["file_name"])) diff --git a/data_processing/detectron2/tools/visualize_json_results.py b/data_processing/detectron2/tools/visualize_json_results.py new file mode 100644 index 0000000..472190e --- /dev/null +++ b/data_processing/detectron2/tools/visualize_json_results.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. + +import argparse +import json +import numpy as np +import os +from collections import defaultdict +import cv2 +import tqdm + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.structures import Boxes, BoxMode, Instances +from detectron2.utils.file_io import PathManager +from detectron2.utils.logger import setup_logger +from detectron2.utils.visualizer import Visualizer + + +def create_instances(predictions, image_size): + ret = Instances(image_size) + + score = np.asarray([x["score"] for x in predictions]) + chosen = (score > args.conf_threshold).nonzero()[0] + score = score[chosen] + bbox = np.asarray([predictions[i]["bbox"] for i in chosen]).reshape(-1, 4) + bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) + + labels = np.asarray([dataset_id_map(predictions[i]["category_id"]) for i in chosen]) + + ret.scores = score + ret.pred_boxes = Boxes(bbox) + ret.pred_classes = labels + + try: + ret.pred_masks = [predictions[i]["segmentation"] for i in chosen] + except KeyError: + pass + return ret + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="A script that visualizes the json predictions from COCO or LVIS dataset." + ) + parser.add_argument("--input", required=True, help="JSON file produced by the model") + parser.add_argument("--output", required=True, help="output directory") + parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val") + parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold") + args = parser.parse_args() + + logger = setup_logger() + + with PathManager.open(args.input, "r") as f: + predictions = json.load(f) + + pred_by_image = defaultdict(list) + for p in predictions: + pred_by_image[p["image_id"]].append(p) + + dicts = list(DatasetCatalog.get(args.dataset)) + metadata = MetadataCatalog.get(args.dataset) + if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): + + def dataset_id_map(ds_id): + return metadata.thing_dataset_id_to_contiguous_id[ds_id] + + elif "lvis" in args.dataset: + # LVIS results are in the same format as COCO results, but have a different + # mapping from dataset category id to contiguous category id in [0, #categories - 1] + def dataset_id_map(ds_id): + return ds_id - 1 + + else: + raise ValueError("Unsupported dataset: {}".format(args.dataset)) + + os.makedirs(args.output, exist_ok=True) + + for dic in tqdm.tqdm(dicts): + img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1] + basename = os.path.basename(dic["file_name"]) + + predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2]) + vis = Visualizer(img, metadata) + vis_pred = vis.draw_instance_predictions(predictions).get_image() + + vis = Visualizer(img, metadata) + vis_gt = vis.draw_dataset_dict(dic).get_image() + + concat = np.concatenate((vis_pred, vis_gt), axis=1) + cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1]) diff --git a/data_processing/list.txt b/data_processing/list.txt new file mode 100644 index 0000000..90d0904 --- /dev/null +++ b/data_processing/list.txt @@ -0,0 +1,882 @@ +pexels-photo-10044375_3.png +pexels-photo-10044375_6_h.png +pexels-photo-10049392_0_h.png +pexels-photo-10049570_0_h.png +pexels-photo-10057590_1.png +pexels-photo-10345681_0_s.png +pexels-photo-10375949_0_h.png +pexels-photo-10375949_0_s.png +pexels-photo-10375949_1_h.png +pexels-photo-10378628_0_s.png +pexels-photo-10397913_0_h.png +pexels-photo-10398357_1_s.png +pexels-photo-10427630_0.png +pexels-photo-10505884_0_s.png +pexels-photo-10505884_1_s.png +pexels-photo-10576766_3_s.png +pexels-photo-10576766_4.png +pexels-photo-10576766_5_h.png +pexels-photo-10576766_6_s.png +pexels-photo-10762921_2_s.png +pexels-photo-10831721_0_h.png +pexels-photo-10963534_0_s.png +pexels-photo-11034416_0_s.png +pexels-photo-11049820_0_h.png +pexels-photo-11208501_0_h.png +pexels-photo-1124837_0_s.png +pexels-photo-11256844_0_h.png +pexels-photo-11367431_0_h.png +pexels-photo-11367431_1_h.png +pexels-photo-11367431_2_h.png +pexels-photo-11370108_0_h.png +pexels-photo-11428507_0_h.png +pexels-photo-11470784_0_h.png +pexels-photo-11540389_2.png +pexels-photo-11566664_0.png +pexels-photo-11581929_0.png +pexels-photo-1168742_0.png +pexels-photo-11738318_0_s.png +pexels-photo-11745218_0_s.png +pexels-photo-1174589_0_h.png +pexels-photo-1181352_0.png +pexels-photo-1188084_0_s.png +pexels-photo-11899525_0_h.png +pexels-photo-12084380_0.png +pexels-photo-1212984_0_s.png +pexels-photo-12211769_0_h.png +pexels-photo-12421586_0_h.png +pexels-photo-12443969_0_h.png +pexels-photo-12443969_1_h.png +pexels-photo-12496864_0_s.png +pexels-photo-12513374_0_s.png +pexels-photo-12526402_0.png +pexels-photo-12536721_0_s.png +pexels-photo-12575855_0_s.png +pexels-photo-1261164_0.png +pexels-photo-12719297_1_h.png +pexels-photo-12742232_0_s.png +pexels-photo-12788467_1.png +pexels-photo-12871329_0_s.png +pexels-photo-12899798_0_s.png +pexels-photo-12920533_0_h.png +pexels-photo-12920533_1.png +pexels-photo-12920533_2_h.png +pexels-photo-12920533_3_h.png +pexels-photo-1292129_0.png +pexels-photo-13007291_0_s.png +pexels-photo-13022373_0_s.png +pexels-photo-13062548_3_s.png +pexels-photo-13086623_1.png +pexels-photo-13086623_2.png +pexels-photo-1325723_0_s.png +pexels-photo-13417785_0_h.png +pexels-photo-13417785_1.png +pexels-photo-13929227_0_h.png +pexels-photo-13929227_1_h.png +pexels-photo-13997557_3.png +pexels-photo-14025620_1_h.png +pexels-photo-14025620_11_s.png +pexels-photo-14025620_13_s.png +pexels-photo-14025620_15_h.png +pexels-photo-14025620_16_s.png +pexels-photo-14025620_17.png +pexels-photo-14025620_23_h.png +pexels-photo-14025620_26.png +pexels-photo-14025620_5_s.png +pexels-photo-1415268_0.png +pexels-photo-1420695_0_s.png +pexels-photo-1420695_1.png +pexels-photo-14235983_0_h.png +pexels-photo-14267674_0_s.png +pexels-photo-14408448_1_h.png +pexels-photo-14408448_2_s.png +pexels-photo-14514432_0_s.png +pexels-photo-14523206_0_s.png +pexels-photo-1474233_0.png +pexels-photo-1474233_1_h.png +pexels-photo-14844812_0_s.png +pexels-photo-1484796_0_h.png +pexels-photo-1486064_0_h.png +pexels-photo-1510542_0_h.png +pexels-photo-15268397_0_h.png +pexels-photo-15268397_1_h.png +pexels-photo-15304241_0_h.png +pexels-photo-15464476_0.png +pexels-photo-15498262_0_h.png +pexels-photo-15498262_3_h.png +pexels-photo-15498262_4_h.png +pexels-photo-15787353_0_s.png +pexels-photo-15797148_0_h.png +pexels-photo-15810517_0_h.png +pexels-photo-15823832_0_s.png +pexels-photo-15832913_0.png +pexels-photo-1617610_0_s.png +pexels-photo-1620788_0_h.png +pexels-photo-1620788_1_h.png +pexels-photo-1620788_3_s.png +pexels-photo-1630784_3_s.png +pexels-photo-1630784_4.png +pexels-photo-1630784_5_h.png +pexels-photo-1683974_0_h.png +pexels-photo-1757923_0_s.png +pexels-photo-1772724_0.png +pexels-photo-1843863_0_s.png +pexels-photo-1863476_0_h.png +pexels-photo-1926769_0_s.png +pexels-photo-2058608_0_s.png +pexels-photo-206402_0.png +pexels-photo-206593_0_s.png +pexels-photo-2247814_0_h.png +pexels-photo-2272941_0_h.png +pexels-photo-2343157_0_h.png +pexels-photo-2410576_0.png +pexels-photo-2430945_0_h.png +pexels-photo-2430945_1.png +pexels-photo-2430945_2.png +pexels-photo-2442399_0_s.png +pexels-photo-2539269_0_h.png +pexels-photo-2539269_1_h.png +pexels-photo-260111_0_h.png +pexels-photo-2602545_0.png +pexels-photo-2730217_0_h.png +pexels-photo-2734302_0_s.png +pexels-photo-274577_0_h.png +pexels-photo-2814239_0_s.png +pexels-photo-2853592_0_h.png +pexels-photo-2859374_0_h.png +pexels-photo-2896464_0.png +pexels-photo-290416_0_h.png +pexels-photo-2913125_0_h.png +pexels-photo-2927584_20.png +pexels-photo-2962147_0.png +pexels-photo-2976107_0_s.png +pexels-photo-3023746_0_s.png +pexels-photo-3026283_0_s.png +pexels-photo-307847_0_h.png +pexels-photo-3171067_0_h.png +pexels-photo-3182748_0_h.png +pexels-photo-3182748_2.png +pexels-photo-3182748_3_s.png +pexels-photo-3182748_4.png +pexels-photo-3182748_5_s.png +pexels-photo-3184436_0_h.png +pexels-photo-3184436_1.png +pexels-photo-3184436_10_h.png +pexels-photo-3184436_16_s.png +pexels-photo-3184436_3_h.png +pexels-photo-3184436_4_h.png +pexels-photo-319899_1.png +pexels-photo-3201696_0.png +pexels-photo-3205741_0_s.png +pexels-photo-3275945_0.png +pexels-photo-3276582_0_h.png +pexels-photo-3317750_1_s.png +pexels-photo-3352734_0.png +pexels-photo-3363968_0_s.png +pexels-photo-3527089_0_s.png +pexels-photo-3536435_2.png +pexels-photo-3564649_0_h.png +pexels-photo-3661452_0_h.png +pexels-photo-3661452_1_h.png +pexels-photo-3662649_1_h.png +pexels-photo-3703966_0_h.png +pexels-photo-3730941_0_h.png +pexels-photo-3754255_0_s.png +pexels-photo-3754255_1_s.png +pexels-photo-3755714_0_h.png +pexels-photo-3755714_1_h.png +pexels-photo-3755714_1_s.png +pexels-photo-3756785_0_h.png +pexels-photo-3758012_1_s.png +pexels-photo-3758012_2_s.png +pexels-photo-3758012_3_s.png +pexels-photo-3760923_0_h.png +pexels-photo-3763999_0_h.png +pexels-photo-3768879_0_h.png +pexels-photo-3771672_0_h.png +pexels-photo-3776847_0.png +pexels-photo-3776847_1_h.png +pexels-photo-3776847_2.png +pexels-photo-3776847_3.png +pexels-photo-3776847_5.png +pexels-photo-3777884_0_h.png +pexels-photo-3777884_1_h.png +pexels-photo-3780027_0_h.png +pexels-photo-3781911_0_h.png +pexels-photo-3783512_0_s.png +pexels-photo-37839_0.png +pexels-photo-3813041_0_h.png +pexels-photo-3817646_0_s.png +pexels-photo-3819576_0_h.png +pexels-photo-3819576_1_h.png +pexels-photo-3819950_0_h.png +pexels-photo-3820428_1.png +pexels-photo-3822724_0_s.png +pexels-photo-3822724_1_s.png +pexels-photo-3822724_2_s.png +pexels-photo-3822724_3_s.png +pexels-photo-3823490_0_h.png +pexels-photo-3845625_0_s.png +pexels-photo-3851853_1_h.png +pexels-photo-3855442_0_h.png +pexels-photo-3857525_0.png +pexels-photo-3867382_0_h.png +pexels-photo-3873029_0_s.png +pexels-photo-3933029_0_h.png +pexels-photo-3933395_1.png +pexels-photo-3933410_1_s.png +pexels-photo-3933896_1_h.png +pexels-photo-3951399_0_s.png +pexels-photo-3965391_2.png +pexels-photo-3967782_0.png +pexels-photo-3971474_0_h.png +pexels-photo-3983667_0_s.png +pexels-photo-3991771_1_s.png +pexels-photo-3992368_0_h.png +pexels-photo-3992368_1_h.png +pexels-photo-4009009_0_h.png +pexels-photo-4009592_0_s.png +pexels-photo-4009592_1_s.png +pexels-photo-4019754_0_s.png +pexels-photo-4019754_1_h.png +pexels-photo-4019754_10_h.png +pexels-photo-4019754_12_s.png +pexels-photo-4019754_2_h.png +pexels-photo-4019754_4.png +pexels-photo-4019754_8_s.png +pexels-photo-4040874_0.png +pexels-photo-4046104_0_h.png +pexels-photo-4046104_1.png +pexels-photo-4047023_0_h.png +pexels-photo-4047829_0_s.png +pexels-photo-4050392_0_h.png +pexels-photo-4057689_0.png +pexels-photo-4100421_0_h.png +pexels-photo-4100421_1.png +pexels-photo-4101187_0_h.png +pexels-photo-4101187_1_h.png +pexels-photo-4127873_1_s.png +pexels-photo-4132340_0_h.png +pexels-photo-4132358_0_h.png +pexels-photo-4153176_0_h.png +pexels-photo-4164759_0.png +pexels-photo-4260102_0.png +pexels-photo-4260102_1.png +pexels-photo-4260102_2.png +pexels-photo-4339514_0_h.png +pexels-photo-4342098_0_s.png +pexels-photo-4395319_0_s.png +pexels-photo-4473890_0.png +pexels-photo-4498150_0_s.png +pexels-photo-4510854_0.png +pexels-photo-4543732_0_h.png +pexels-photo-4543732_1_h.png +pexels-photo-4543732_2_s.png +pexels-photo-4546135_0_h.png +pexels-photo-4555327_0_s.png +pexels-photo-4555327_1_h.png +pexels-photo-4555327_2.png +pexels-photo-4571260_0_s.png +pexels-photo-4584582_0_h.png +pexels-photo-4586678_0_h.png +pexels-photo-4587421_0_h.png +pexels-photo-4623085_0.png +pexels-photo-4623525_0_h.png +pexels-photo-4623525_2.png +pexels-photo-4624913_0_h.png +pexels-photo-4624913_1.png +pexels-photo-4638830_0_s.png +pexels-photo-4668946_0_h.png +pexels-photo-4668946_2.png +pexels-photo-4672484_0_s.png +pexels-photo-4672484_3_h.png +pexels-photo-4672484_4_h.png +pexels-photo-4672484_6_s.png +pexels-photo-4720309_0_s.png +pexels-photo-4720500_0_s.png +pexels-photo-4751203_0.png +pexels-photo-4769468_0_h.png +pexels-photo-4781458_0_h.png +pexels-photo-4783338_0_s.png +pexels-photo-4842498_0_h.png +pexels-photo-4872091_0.png +pexels-photo-4872091_1_s.png +pexels-photo-4874336_0.png +pexels-photo-4874917_0_h.png +pexels-photo-4874917_1_h.png +pexels-photo-4874917_2.png +pexels-photo-4877850_0_h.png +pexels-photo-4877850_1_s.png +pexels-photo-4877850_4_s.png +pexels-photo-4881613_0_h.png +pexels-photo-4881613_1.png +pexels-photo-4881613_3_s.png +pexels-photo-4881613_4_h.png +pexels-photo-4890273_0_s.png +pexels-photo-4894830_0_h.png +pexels-photo-4911750_0_s.png +pexels-photo-4917820_0_h.png +pexels-photo-4939552_0_s.png +pexels-photo-4946531_0_h.png +pexels-photo-4977411_0_s.png +pexels-photo-4977411_1.png +pexels-photo-4980302_0_h.png +pexels-photo-4985017_0.png +pexels-photo-4989266_0_h.png +pexels-photo-5020368_1_s.png +pexels-photo-5029344_0_h.png +pexels-photo-5037007_0_h.png +pexels-photo-5037007_3_h.png +pexels-photo-5037285_0_h.png +pexels-photo-5037285_0_s.png +pexels-photo-5047063_0_h.png +pexels-photo-5055244_0_s.png +pexels-photo-5055248_0.png +pexels-photo-5055248_1_h.png +pexels-photo-5055421_0_h.png +pexels-photo-5055421_1_h.png +pexels-photo-5060987_0_h.png +pexels-photo-5063299_0.png +pexels-photo-5094096_0_h.png +pexels-photo-5094104_0.png +pexels-photo-5098287_1_s.png +pexels-photo-5098287_11_h.png +pexels-photo-5098287_14_h.png +pexels-photo-5098287_16_h.png +pexels-photo-5098287_2.png +pexels-photo-5098287_3_s.png +pexels-photo-5098287_6.png +pexels-photo-5126956_0_h.png +pexels-photo-5158233_0_s.png +pexels-photo-5205275_0_s.png +pexels-photo-5205275_1_s.png +pexels-photo-5211447_0_h.png +pexels-photo-5212668_2.png +pexels-photo-5212699_1_s.png +pexels-photo-5225446_0_h.png +pexels-photo-5239523_0_s.png +pexels-photo-5239523_1.png +pexels-photo-5240605_1_h.png +pexels-photo-5241025_0_h.png +pexels-photo-5256916_0.png +pexels-photo-5257266_0_h.png +pexels-photo-5257454_0.png +pexels-photo-5257454_1_s.png +pexels-photo-5257454_2_s.png +pexels-photo-5257454_3_h.png +pexels-photo-5257497_0_h.png +pexels-photo-5257497_0_s.png +pexels-photo-5257547_0_h.png +pexels-photo-5258251_0_s.png +pexels-photo-5258907_0_h.png +pexels-photo-5263833_0_h.png +pexels-photo-5263833_1_h.png +pexels-photo-5273059_0_h.png +pexels-photo-5274600_0_h.png +pexels-photo-5310786_0_s.png +pexels-photo-5329068_0.png +pexels-photo-5335170_0.png +pexels-photo-5349756_1.png +pexels-photo-5356823_0_h.png +pexels-photo-5357336_0.png +pexels-photo-5357615_0_s.png +pexels-photo-5366313_0.png +pexels-photo-5386148_0_s.png +pexels-photo-5386459_0_s.png +pexels-photo-5388321_0_s.png +pexels-photo-5390335_0_s.png +pexels-photo-5393445_1.png +pexels-photo-5405024_0.png +pexels-photo-5427143_0.png +pexels-photo-5439478_0_s.png +pexels-photo-5439478_1_h.png +pexels-photo-5439478_2_h.png +pexels-photo-5488943_0_s.png +pexels-photo-5490267_0_s.png +pexels-photo-5538615_0_h.png +pexels-photo-5543181_0_s.png +pexels-photo-5553671_0_s.png +pexels-photo-5555111_0_s.png +pexels-photo-5560039_0_h.png +pexels-photo-5561169_0_h.png +pexels-photo-5561455_0_h.png +pexels-photo-5593618_0_h.png +pexels-photo-5600112_0.png +pexels-photo-5622327_0_h.png +pexels-photo-5691296_0_h.png +pexels-photo-5691845_1_h.png +pexels-photo-5691845_2_h.png +pexels-photo-5692182_0_h.png +pexels-photo-5692182_1_h.png +pexels-photo-5692691_0_h.png +pexels-photo-5692691_1_h.png +pexels-photo-5692997_0_h.png +pexels-photo-5698208_0_h.png +pexels-photo-5698369_0_h.png +pexels-photo-5709530_0_h.png +pexels-photo-5710602_0_h.png +pexels-photo-5710946_0_s.png +pexels-photo-5711233_0.png +pexels-photo-5717051_0_s.png +pexels-photo-5721093_0.png +pexels-photo-5727759_0_h.png +pexels-photo-5727759_1_s.png +pexels-photo-5727759_2_s.png +pexels-photo-5727759_3_s.png +pexels-photo-5727759_4_h.png +pexels-photo-5727775_1_s.png +pexels-photo-5727775_2_s.png +pexels-photo-5727775_3_h.png +pexels-photo-5727775_4_s.png +pexels-photo-5728206_0_s.png +pexels-photo-5728206_1.png +pexels-photo-5762495_0_h.png +pexels-photo-5764902_0_s.png +pexels-photo-5764902_1.png +pexels-photo-5764903_0.png +pexels-photo-5795419_0_h.png +pexels-photo-5814298_1_h.png +pexels-photo-5814298_2_h.png +pexels-photo-5814298_3_h.png +pexels-photo-583124_0_h.png +pexels-photo-5847798_0.png +pexels-photo-5876654_0.png +pexels-photo-5896471_0.png +pexels-photo-5896471_1_h.png +pexels-photo-5905494_0_h.png +pexels-photo-5905494_1_s.png +pexels-photo-5911942_0_s.png +pexels-photo-5915298_0_h.png +pexels-photo-5917712_0_s.png +pexels-photo-5922070_0.png +pexels-photo-5933917_0_h.png +pexels-photo-5935233_0_h.png +pexels-photo-5935233_1_s.png +pexels-photo-5935233_2_h.png +pexels-photo-5940841_0_s.png +pexels-photo-5940841_1_h.png +pexels-photo-5940841_2.png +pexels-photo-5940841_3_h.png +pexels-photo-5940841_4_s.png +pexels-photo-5961074_0_s.png +pexels-photo-5961074_1_s.png +pexels-photo-5961074_2_s.png +pexels-photo-5999085_0_h.png +pexels-photo-6006255_0.png +pexels-photo-6015886_0_s.png +pexels-photo-6015935_0_s.png +pexels-photo-6023601_0_s.png +pexels-photo-6025211_3.png +pexels-photo-6039870_7_s.png +pexels-photo-6113555_0_h.png +pexels-photo-6113555_2.png +pexels-photo-6132889_0.png +pexels-photo-6140366_0.png +pexels-photo-6140366_1_h.png +pexels-photo-6140366_2.png +pexels-photo-6140723_0_s.png +pexels-photo-6141083_1_h.png +pexels-photo-6141233_0_s.png +pexels-photo-6141233_1_h.png +pexels-photo-6147015_0.png +pexels-photo-6147369_1.png +pexels-photo-6147369_2_s.png +pexels-photo-6150579_0_s.png +pexels-photo-6150579_2.png +pexels-photo-6169668_0_s.png +pexels-photo-6169668_1_s.png +pexels-photo-6193433_1_s.png +pexels-photo-6201979_0_h.png +pexels-photo-6202790_0_s.png +pexels-photo-620340_0_s.png +pexels-photo-6204234_0.png +pexels-photo-6209065_2_s.png +pexels-photo-6210267_0_h.png +pexels-photo-6210267_1.png +pexels-photo-6220702_1.png +pexels-photo-6257132_0.png +pexels-photo-6281724_0_s.png +pexels-photo-6281724_1.png +pexels-photo-6281724_2_h.png +pexels-photo-6297603_0_h.png +pexels-photo-6297603_1_s.png +pexels-photo-6299291_0_h.png +pexels-photo-6299291_1_h.png +pexels-photo-6299291_2_h.png +pexels-photo-6299291_3_h.png +pexels-photo-6311134_0_s.png +pexels-photo-6339324_1_h.png +pexels-photo-6339324_2.png +pexels-photo-634007_0.png +pexels-photo-634007_1.png +pexels-photo-6340620_0_h.png +pexels-photo-6340620_1_s.png +pexels-photo-6340620_2_h.png +pexels-photo-6340620_3.png +pexels-photo-6340620_4.png +pexels-photo-6340628_1_s.png +pexels-photo-6340628_2.png +pexels-photo-6345387_0.png +pexels-photo-6453628_0_h.png +pexels-photo-6453958_0_s.png +pexels-photo-6455834_0_s.png +pexels-photo-6457490_0_s.png +pexels-photo-6457490_1_h.png +pexels-photo-6476344_0.png +pexels-photo-6478306_0_s.png +pexels-photo-6491794_3_h.png +pexels-photo-6512495_0.png +pexels-photo-6530738_0_s.png +pexels-photo-6551237_0_h.png +pexels-photo-6551494_0_s.png +pexels-photo-6578394_0_s.png +pexels-photo-6612632_0_s.png +pexels-photo-6612632_1_h.png +pexels-photo-6620720_0_s.png +pexels-photo-6626000_0_s.png +pexels-photo-6626000_1_h.png +pexels-photo-6692899_0_h.png +pexels-photo-6694742_0_s.png +pexels-photo-6694742_1_s.png +pexels-photo-6714616_0_h.png +pexels-photo-674833_0_s.png +pexels-photo-6770359_0_s.png +pexels-photo-6774173_0_s.png +pexels-photo-6777188_2.png +pexels-photo-6777188_3.png +pexels-photo-6777188_4_h.png +pexels-photo-6781177_0.png +pexels-photo-6784855_0_h.png +pexels-photo-6784898_0_h.png +pexels-photo-6785010_0_s.png +pexels-photo-6815668_0_h.png +pexels-photo-6829484_0_s.png +pexels-photo-6835956_0.png +pexels-photo-6874028_0_s.png +pexels-photo-6874659_0_s.png +pexels-photo-6874659_1.png +pexels-photo-6878686_0_h.png +pexels-photo-6878686_1_h.png +pexels-photo-6897918_0_s.png +pexels-photo-6914062_0_h.png +pexels-photo-6914062_1.png +pexels-photo-6914062_2_h.png +pexels-photo-6914062_3_s.png +pexels-photo-6914062_4.png +pexels-photo-6930406_1.png +pexels-photo-6935992_0_h.png +pexels-photo-6941674_0_h.png +pexels-photo-6941674_1_h.png +pexels-photo-6948104_0_h.png +pexels-photo-6953854_0.png +pexels-photo-6953854_1_h.png +pexels-photo-6969970_0_h.png +pexels-photo-6975208_0_s.png +pexels-photo-6975208_1_h.png +pexels-photo-6975640_0_h.png +pexels-photo-6995845_0_h.png +pexels-photo-7010106_0_s.png +pexels-photo-7013903_0.png +pexels-photo-7020617_0_s.png +pexels-photo-7035396_0_h.png +pexels-photo-7035541_0_h.png +pexels-photo-7063751_0.png +pexels-photo-7065297_0_s.png +pexels-photo-7065297_1_s.png +pexels-photo-7065436_1_s.png +pexels-photo-7065455_0_s.png +pexels-photo-7065455_1_h.png +pexels-photo-7084410_0_s.png +pexels-photo-7084410_1.png +pexels-photo-7084418_0_s.png +pexels-photo-7104232_0_h.png +pexels-photo-7104232_1.png +pexels-photo-7148031_1.png +pexels-photo-7202771_0_s.png +pexels-photo-7202771_1_s.png +pexels-photo-7213203_0_s.png +pexels-photo-7213203_1_s.png +pexels-photo-7213203_2.png +pexels-photo-7213203_3_h.png +pexels-photo-7213366_0.png +pexels-photo-7213366_2_h.png +pexels-photo-7219206_1_h.png +pexels-photo-7232041_0_h.png +pexels-photo-7236174_0_h.png +pexels-photo-7244740_0_h.png +pexels-photo-7249421_0_s.png +pexels-photo-7249421_1_h.png +pexels-photo-7249421_2_h.png +pexels-photo-7249421_3_s.png +pexels-photo-7249421_4.png +pexels-photo-7266752_0_s.png +pexels-photo-7266752_1.png +pexels-photo-7270922_1.png +pexels-photo-7283533_0_s.png +pexels-photo-7283533_1_s.png +pexels-photo-7295889_0_h.png +pexels-photo-7296266_0_h.png +pexels-photo-7296266_1_s.png +pexels-photo-7318674_0_s.png +pexels-photo-7322106_0.png +pexels-photo-7322106_1.png +pexels-photo-7322192_0_h.png +pexels-photo-7322192_1_s.png +pexels-photo-7322489_0_s.png +pexels-photo-7322489_1_s.png +pexels-photo-7322492_0_s.png +pexels-photo-7322492_1_s.png +pexels-photo-7328434_0_h.png +pexels-photo-7328434_1_h.png +pexels-photo-7345473_0_s.png +pexels-photo-7354924_0_h.png +pexels-photo-7368179_0_h.png +pexels-photo-7368179_1_s.png +pexels-photo-7431357_0_h.png +pexels-photo-7432114_0_s.png +pexels-photo-7439284_0.png +pexels-photo-7479863_0_s.png +pexels-photo-7479863_1_s.png +pexels-photo-749072_0_h.png +pexels-photo-7490853_0.png +pexels-photo-7490853_1_s.png +pexels-photo-7495122_1_s.png +pexels-photo-7495122_2.png +pexels-photo-7500331_0_h.png +pexels-photo-7507087_0_s.png +pexels-photo-7513029_0.png +pexels-photo-7513092_0_h.png +pexels-photo-7520932_0_s.png +pexels-photo-7520932_1.png +pexels-photo-7529990_0.png +pexels-photo-7529990_1_s.png +pexels-photo-7551383_0_h.png +pexels-photo-7551383_1.png +pexels-photo-7551609_0_s.png +pexels-photo-7551609_1_h.png +pexels-photo-7569423_5_h.png +pexels-photo-7569423_7_h.png +pexels-photo-7573405_0.png +pexels-photo-7573408_0_h.png +pexels-photo-7599683_0_h.png +pexels-photo-7605899_0_s.png +pexels-photo-7607782_0_h.png +pexels-photo-7609958_0.png +pexels-photo-7624882_0_s.png +pexels-photo-7640437_0_s.png +pexels-photo-7644155_0_s.png +pexels-photo-7648029_0_s.png +pexels-photo-7648029_1_s.png +pexels-photo-7648041_1_s.png +pexels-photo-7651753_0.png +pexels-photo-7652179_0_h.png +pexels-photo-7652179_2.png +pexels-photo-7671060_0_h.png +pexels-photo-7674834_0_h.png +pexels-photo-7675912_0_h.png +pexels-photo-7676827_0.png +pexels-photo-7677597_0_h.png +pexels-photo-7677597_2_h.png +pexels-photo-7677951_0_h.png +pexels-photo-7677951_1_s.png +pexels-photo-7683876_1_s.png +pexels-photo-7698396_0_h.png +pexels-photo-7705684_1_h.png +pexels-photo-7749406_0_h.png +pexels-photo-7750690_0.png +pexels-photo-7750690_1.png +pexels-photo-7750690_2_s.png +pexels-photo-7787678_0.png +pexels-photo-7799636_0.png +pexels-photo-7799636_1_s.png +pexels-photo-7821545_0_h.png +pexels-photo-7841444_0_s.png +pexels-photo-7841467_0_h.png +pexels-photo-7841467_1.png +pexels-photo-7856901_0_s.png +pexels-photo-7867640_0_s.png +pexels-photo-7869685_0_s.png +pexels-photo-7872622_0_s.png +pexels-photo-7874479_0_h.png +pexels-photo-7876092_0.png +pexels-photo-7876092_1.png +pexels-photo-7876092_2.png +pexels-photo-7876149_0.png +pexels-photo-7876149_1_s.png +pexels-photo-7876897_0_h.png +pexels-photo-7876897_1.png +pexels-photo-7876897_2_s.png +pexels-photo-7879720_0.png +pexels-photo-7879720_1_h.png +pexels-photo-7884124_0.png +pexels-photo-7886856_0_h.png +pexels-photo-7929483_3_h.png +pexels-photo-7935813_0_h.png +pexels-photo-7935813_1.png +pexels-photo-7938036_0_h.png +pexels-photo-7938036_1_h.png +pexels-photo-7938036_2_h.png +pexels-photo-7938044_0_h.png +pexels-photo-7938044_1_h.png +pexels-photo-7938044_2_h.png +pexels-photo-7973031_0.png +pexels-photo-7973031_1_s.png +pexels-photo-7983588_0_s.png +pexels-photo-7989150_4_s.png +pexels-photo-8055523_0_h.png +pexels-photo-8057362_0.png +pexels-photo-8058287_0_h.png +pexels-photo-8083025_0.png +pexels-photo-8083025_1.png +pexels-photo-8083025_2.png +pexels-photo-8084057_1.png +pexels-photo-8084057_3_s.png +pexels-photo-8090448_0_s.png +pexels-photo-8101710_0.png +pexels-photo-8101710_1_h.png +pexels-photo-8101710_2_h.png +pexels-photo-8101710_3_h.png +pexels-photo-8104174_1_h.png +pexels-photo-8104174_3_h.png +pexels-photo-8104852_0.png +pexels-photo-8104852_1_s.png +pexels-photo-8107081_0.png +pexels-photo-8112164_0_h.png +pexels-photo-8112164_1_s.png +pexels-photo-8112164_2.png +pexels-photo-8124250_0.png +pexels-photo-8124250_1.png +pexels-photo-8133858_0.png +pexels-photo-8133858_1.png +pexels-photo-8133858_2.png +pexels-photo-8133995_0_h.png +pexels-photo-8133995_1_s.png +pexels-photo-8133995_2_h.png +pexels-photo-8153903_0_s.png +pexels-photo-8170251_0_h.png +pexels-photo-8170255_0_h.png +pexels-photo-8170255_1_h.png +pexels-photo-8170255_2.png +pexels-photo-8170255_3.png +pexels-photo-8170294_0_h.png +pexels-photo-8171211_0.png +pexels-photo-8171211_1.png +pexels-photo-818801_0_s.png +pexels-photo-8213234_0_h.png +pexels-photo-8213234_1_h.png +pexels-photo-8213977_0_s.png +pexels-photo-8259896_0_h.png +pexels-photo-8259896_1_s.png +pexels-photo-8259896_2_h.png +pexels-photo-8259896_3_s.png +pexels-photo-8307438_0_h.png +pexels-photo-8307438_1_h.png +pexels-photo-8307438_4_s.png +pexels-photo-8355405_0.png +pexels-photo-8355405_1_h.png +pexels-photo-8355405_2_s.png +pexels-photo-8355405_3_h.png +pexels-photo-8364014_0_h.png +pexels-photo-8376149_0.png +pexels-photo-8380092_2.png +pexels-photo-8380092_6_h.png +pexels-photo-8390281_0.png +pexels-photo-8390281_2_s.png +pexels-photo-8417276_0.png +pexels-photo-8417323_0_s.png +pexels-photo-8419207_0_s.png +pexels-photo-8419492_1_h.png +pexels-photo-8419492_3_s.png +pexels-photo-8422258_0_h.png +pexels-photo-8422258_1.png +pexels-photo-8424561_0.png +pexels-photo-8424561_1_s.png +pexels-photo-8429912_0_h.png +pexels-photo-8429912_1_h.png +pexels-photo-8430129_1_h.png +pexels-photo-8441414_0_s.png +pexels-photo-8471836_0_h.png +pexels-photo-8502053_0.png +pexels-photo-8512442_0_h.png +pexels-photo-8540374_0_h.png +pexels-photo-8544490_4_h.png +pexels-photo-8544490_5_h.png +pexels-photo-8612899_2_h.png +pexels-photo-8612899_7_h.png +pexels-photo-8613094_0_h.png +pexels-photo-8617765_1_h.png +pexels-photo-8617765_2_h.png +pexels-photo-8617765_3_h.png +pexels-photo-8617765_4_s.png +pexels-photo-8617765_5_h.png +pexels-photo-8633364_1_s.png +pexels-photo-8638142_0_s.png +pexels-photo-8717389_0.png +pexels-photo-8730181_0.png +pexels-photo-8730181_2_s.png +pexels-photo-8730181_4_s.png +pexels-photo-8761322_0_s.png +pexels-photo-8789214_0_s.png +pexels-photo-8790334_0_h.png +pexels-photo-8790334_1.png +pexels-photo-8790334_2_h.png +pexels-photo-8795805_0_s.png +pexels-photo-8837759_0_h.png +pexels-photo-8837759_1_h.png +pexels-photo-8837759_2.png +pexels-photo-8837759_3.png +pexels-photo-8837759_7_h.png +pexels-photo-8865140_1_h.png +pexels-photo-8872678_1.png +pexels-photo-8872678_3_h.png +pexels-photo-8872678_4_s.png +pexels-photo-8922545_1_h.png +pexels-photo-8922545_2.png +pexels-photo-8922803_0_h.png +pexels-photo-8923264_0.png +pexels-photo-8923264_1_h.png +pexels-photo-8927013_0_h.png +pexels-photo-8933955_0_h.png +pexels-photo-8941577_1_h.png +pexels-photo-8941577_2_h.png +pexels-photo-8959533_0_h.png +pexels-photo-8972512_1_h.png +pexels-photo-8986675_0_s.png +pexels-photo-9034737_1_s.png +pexels-photo-9034767_2_s.png +pexels-photo-905336_0_s.png +pexels-photo-905336_1_s.png +pexels-photo-905336_2_s.png +pexels-photo-9208549_0.png +pexels-photo-9211772_0.png +pexels-photo-9268713_0_s.png +pexels-photo-935953_0_h.png +pexels-photo-936094_0_s.png +pexels-photo-938642_1_s.png +pexels-photo-9399190_0_h.png +pexels-photo-943241_0.png +pexels-photo-9518018_0_h.png +pexels-photo-9558694_0_s.png +pexels-photo-9628357_0_h.png +pexels-photo-9756301_0_h.png +pexels-photo-975680_0_h.png +pexels-photo-9759336_0_h.png +pexels-photo-9783374_0_h.png +pexels-photo-9784059_0.png +pexels-photo-9784393_0_h.png +pexels-photo-983197_0_s.png +pexels-photo-9834555_1.png +pexels-photo-9872289_0_h.png +pexels-photo-9872289_1_h.png +pexels-photo-9872289_2_h.png +pexels-photo-9885405_1.png +pexels-photo-9885405_2_h.png +pexels-photo-9885405_3.png +pexels-photo-9907941_0_s.png +pexels-photo-993868_0_s.png +pexels-photo-9958416_0_h.png +pexels-photo-9969297_0_h.png +pexels-photo-9974985_0_h.png +pexels-photo-9987846_0_s.png +pexels-photo-9987846_1_h.png +pexels-photo-9993910_16_s.png diff --git a/data_processing/main/config.py b/data_processing/main/config.py new file mode 100644 index 0000000..d51df3c --- /dev/null +++ b/data_processing/main/config.py @@ -0,0 +1,136 @@ +import os +import os.path as osp +import sys +import numpy as np +import datetime +import yaml +import shutil +import glob +from easydict import EasyDict as edict +import time +class Config: + ## dataset + # MuCo, Human36M, MSCOCO, PW3D, FreiHAND + trainset_3d = ['Human36M'] # 'Human36M', 'MuCo' + trainset_2d = ['MSCOCO'] # 'MSCOCO', 'MPII', 'CrowdPose' + testset = 'PW3D' # 'MuPoTs' 'MSCOCO' Human36M, MSCOCO, 'PW3D' + + ## model setting + resnet_type = 50 # 50, 101, 152 + + ## input, output + input_img_shape = (256, 256) #(256, 192) + output_hm_shape = (64, 64, 64) #(64, 64, 48) + bbox_3d_size = 2 if 'FreiHAND' not in trainset_3d + trainset_2d + [testset] else 0.3 + sigma = 2.5 + focal = (5000, 5000) # virtual focal lengths + princpt = (input_img_shape[1] / 2, input_img_shape[0] / 2) # virtual principal point position + + ## training config + lr_dec_epoch = [15] if 'FreiHAND' not in trainset_3d + trainset_2d + [testset] else [17,21] + end_epoch = 20 #13 if 'FreiHAND' not in trainset_3d + trainset_2d + [testset] else 25 + lr = 1e-4 + lr_backbone = 1e-4 + lr_dec_factor = 10 + train_batch_size = 32 + use_gt_info = True + + ## testing config + test_batch_size = 32 + crowd = False + vis = False + render = False + use_bbox_in_ann = True + ## others + num_thread = 0 # 0 if use windows + gpu_ids = '0' + num_gpus = 1 + continue_train = False + finetune = False + + ## directory + cur_dir = osp.dirname(os.path.abspath(__file__)) + root_dir = osp.join(cur_dir, '..') + #root_dir = 'F:/full-head-dataset/skeleton_estimation/3DCrowdNet_RELEASE' + data_dir = osp.join(root_dir, 'data') + output_dir = osp.join(root_dir, 'output') + # hongsuk choi style + # KST = datetime.timezone(datetime.timedelta(hours=9)) + # save_folder = 'exp_' + str(datetime.datetime.now(tz=KST))[5:-16] + save_folder = 'exp_' + str(datetime.datetime.now())[5:-10].replace(':','-') + save_folder = save_folder.replace(" ", "_") + output_dir = osp.join(output_dir, save_folder) + print('output dir: ', output_dir) + + model_dir = osp.join(output_dir, 'checkpoint') + vis_dir = osp.join(output_dir, 'vis') + log_dir = osp.join(output_dir, 'log') + result_dir = osp.join(output_dir, 'result') + mano_path = osp.join(root_dir, 'common', 'utils', 'manopth') + smpl_path = osp.join(root_dir, 'common', 'utils', 'smplpytorch') + human_model_path = osp.join(root_dir, 'common', 'utils', 'human_model_files') + + def set_args(self, gpu_ids, continue_train=False, is_test=False, exp_dir=''): + print('exp_dir: ', exp_dir) + self.gpu_ids = gpu_ids + self.num_gpus = len(self.gpu_ids.split(',')) + self.bbox_3d_size = 2 + self.camera_3d_size = 2.5 + + if not is_test: + self.continue_train = continue_train + if self.continue_train: + start = time.time() + if exp_dir: + checkpoints = sorted(glob.glob(osp.join(exp_dir, 'checkpoint') + '/*.pth.tar'), + key=lambda x: int(x.split('_')[-1][:-8])) + shutil.copyfile(checkpoints[-1], osp.join(cfg.model_dir, os.path.basename(checkpoints[-1]))) + else: + shutil.copyfile(osp.join(cfg.root_dir, 'tool', 'snapshot_0.pth.tar'), + osp.join(cfg.model_dir, 'snapshot_0.pth.tar')) + + print('>>> Copying checkpoint file from {} to {} takes {:.2f} seconds'.format(checkpoints[-1], + osp.join(cfg.model_dir, + os.path.basename( + checkpoints[ + -1])), + time.time() - start)) + + elif is_test and exp_dir: + self.output_dir = exp_dir + self.model_dir = osp.join(self.output_dir, 'checkpoint') + self.vis_dir = osp.join(self.output_dir, 'vis') + self.log_dir = osp.join(self.output_dir, 'log') + self.result_dir = osp.join(self.output_dir, 'result') + + os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu_ids + print('>>> Using GPU: {}'.format(self.gpu_ids)) + + if self.testset == 'FreiHAND': + assert self.trainset_3d[0] == 'FreiHAND' + assert len(self.trainset_3d) == 1 + assert len(self.trainset_2d) == 0 + + def set_data_dir(self,dir): + self.data_dir = dir + def update(self, config_file): + with open(config_file) as f: + exp_config = edict(yaml.load(f)) + for k, v in exp_config.items(): + if hasattr(cfg, k): + setattr(cfg, k, v) + else: + raise ValueError("{} not exist in config.py".format(k)) + +cfg = Config() + +sys.path.insert(0, osp.join(cfg.root_dir, 'common')) +from utils.dir import add_pypath, make_folder +add_pypath(osp.join(cfg.data_dir)) +dataset_list = ['CrowdPose', 'Human36M', 'MPII', 'MSCOCO', 'MuCo', 'PW3D'] +for i in range(len(dataset_list)): + add_pypath(osp.join(cfg.data_dir, dataset_list[i])) +make_folder(cfg.model_dir) +make_folder(cfg.vis_dir) +make_folder(cfg.log_dir) +make_folder(cfg.result_dir) diff --git a/data_processing/main/crop_images.py b/data_processing/main/crop_images.py new file mode 100644 index 0000000..6f20f62 --- /dev/null +++ b/data_processing/main/crop_images.py @@ -0,0 +1,45 @@ +import glob +import json +import os.path + +import cv2 +import sys +sys.path.append('../common') +import torch +import torch.nn as nn +from torch.nn import functional as F +from nets.resnet import ResNetBackbone +from nets.module import Pose2Feat, PositionNet, RotationNet, Vposer +from nets.loss import CoordLoss, ParamLoss, NormalVectorLoss, EdgeLengthLoss +from utils.smpl import SMPL +from utils.mano import MANO +from config import cfg +from contextlib import nullcontext +import math +# visualization +import colorsys +from utils.vis import vis_mesh, save_obj, render_mesh, vis_keypoints +import numpy as np +from utils.transforms import rot6d_to_axis_angle +import cv2 +from utils.preprocessing import generate_patch_image +with open('G:/full-head-dataset/pexels/00000000/result.json')as f: + result = json.load(f) + +for image_name in result: + bbox = result[image_name]['bbox'] + if (bbox[2] < 400 or bbox[3] < 400): + # os.remove(f'G:/full-head-dataset/pexels/00000000/visualization/{image_name}') + # if os.path.exists(f'G:/full-head-dataset/pexels/00000000/aligned_images/{image_name}'): + # os.remove(f'G:/full-head-dataset/pexels/00000000/aligned_images/{image_name}') + continue + if not os.path.exists(f'G:/full-head-dataset/pexels/00000000/visualization/{image_name}'): + continue + if os.path.exists(f'G:/full-head-dataset/pexels/00000000/aligned_images/{image_name}'): + continue + raw_image_name = image_name.split('_')[0] + image_path = glob.glob(f'G:/full-head-dataset/pexels/00000000/images/{raw_image_name}' + '*')[0] + print(image_path) + + img, _, _ = generate_patch_image(cv2.imread(image_path), bbox, 1.0, 0.0, False, (1024,1024),enable_padding=True) + cv2.imwrite(f'G:/full-head-dataset/pexels/00000000/aligned_images/{image_name}', img) \ No newline at end of file diff --git a/data_processing/main/get_theta_and_phi.py b/data_processing/main/get_theta_and_phi.py new file mode 100644 index 0000000..daddb1f --- /dev/null +++ b/data_processing/main/get_theta_and_phi.py @@ -0,0 +1,85 @@ +import math +import json +import os.path +import matplotlib.pyplot as plt +import cv2 +import numpy as np +import glob +import random + +def cartesian_to_spherical(x, y, z): + r = math.sqrt(x**2 + y**2 + z**2) + # theta = math.atan2(y, x) + # phi = math.acos(z / r) + # return r, theta, phi + theta = math.atan2(z, x) # 0~2pi + phi = math.acos(y / r) # 0~pi + return r, theta, phi + +thetas = [] +phis = [] +thetas_imgs = [] +phis_imgs = [] + +stride = 2 +stride_rad = stride / 180 * math.pi + +def flip_yaw(pose_matrix): + flipped = pose_matrix.copy() + flipped[0, 1] *= -1 + flipped[0, 2] *= -1 + flipped[1, 0] *= -1 + flipped[2, 0] *= -1 + flipped[0, 3] *= -1 + return flipped + + +for i in range(180//stride): + phis_imgs.append([]) +for i in range(360//stride): + thetas_imgs.append([]) +for i in range(0,5): + path = f'G:/full-head-dataset/pexels/{i * 1000:08d}' + image_list = glob.glob(f'{path}/aligned_images/*') + result_json_path = os.path.join(path, 'result.json') + with open(result_json_path, 'r') as f: + result = json.load(f) + + + + + for aligned_image_path in image_list: + aligned_image_name = os.path.basename(aligned_image_path) + + camera_pose = result[aligned_image_name]['camera_pose'] + camera_pose = np.reshape(camera_pose, (4, 4)) + #radius = np.linalg.norm(camera_pose[:3,3]) + _, theta, phi = cartesian_to_spherical(camera_pose[0,3], camera_pose[1,3], camera_pose[2,3]) + + thetas.append(theta) + phis.append(phi) + + flip_camerapose_in_pyrender = np.array(result[aligned_image_name]['normalized_camerapose_in_pyrender']) + flip_camerapose_in_pyrender = flip_yaw(flip_camerapose_in_pyrender) + flip_world2camera_matrix = np.linalg.inv(flip_camerapose_in_pyrender) + flip_world2camera_matrix[[1, 2]] *= -1 + camera_pose = np.linalg.inv(flip_world2camera_matrix) + _, theta, phi = cartesian_to_spherical(camera_pose[0, 3], camera_pose[1, 3], camera_pose[2, 3]) + + thetas.append(theta) + phis.append(phi) + + + +plt.scatter(thetas, phis) +plt.show() + +# if abs(theta - np.pi/2) < 0.1: +# phi_bin = int(phi/stride_rad) +# phis_imgs[phi_bin].append(aligned_image_path) +# +# count = 0 +# for i in range(len(phis_imgs)): +# if len(phis_imgs[i]) > 0: +# cv2.imwrite(f'G:/full-head-dataset/pexels/theta_phi/{count}.png', cv2.imread(random.choice(phis_imgs[i]))) +# count+=1 \ No newline at end of file diff --git a/data_processing/main/model.py b/data_processing/main/model.py new file mode 100644 index 0000000..1c984a8 --- /dev/null +++ b/data_processing/main/model.py @@ -0,0 +1,634 @@ +import torch +import torch.nn as nn +from torch.nn import functional as F +from nets.resnet import ResNetBackbone +from nets.module import Pose2Feat, PositionNet, RotationNet, Vposer +from nets.loss import CoordLoss, ParamLoss, NormalVectorLoss, EdgeLengthLoss +from utils.smpl import SMPL +from utils.mano import MANO +from config import cfg +from contextlib import nullcontext +import math +# visualization +import colorsys +from utils.vis import vis_mesh, save_obj, render_mesh, vis_keypoints +import numpy as np +from utils.transforms import rot6d_to_axis_angle +import cv2 +from utils.preprocessing import generate_patch_image + + +class Model(nn.Module): + def __init__(self, backbone, pose2feat, position_net, rotation_net, vposer): + super(Model, self).__init__() + self.backbone = backbone + self.pose2feat = pose2feat + self.position_net = position_net + self.rotation_net = rotation_net + self.vposer = vposer + + if 'FreiHAND' in cfg.trainset_3d + cfg.trainset_2d + [cfg.testset]: + self.human_model = MANO() + self.human_model_layer = self.human_model.layer.cuda() + else: + self.human_model = SMPL() + self.human_model_layer = self.human_model.layer['neutral'].cuda() + self.root_joint_idx = self.human_model.root_joint_idx + self.mesh_face = self.human_model.face + self.joint_regressor = self.human_model.joint_regressor + + self.coord_loss = CoordLoss() + self.param_loss = ParamLoss() + + # The joint that we want to align to the origin + self.align_joint_name = 'Neck' + # 0.0649 is the height between the neck joint and head joint of the template + self.init_camera_location = torch.tensor([0, 0.0649, 2.7]).float().cuda() + + # get template mesh + root_pose = torch.zeros((1, 3)).cuda() + pose_param = torch.zeros((1, 69)).cuda() + cam_trans = torch.zeros((1, 3)).cuda() + shape_param = torch.zeros((1, 10)).cuda() + pose_param = pose_param.view(-1, self.human_model.orig_joint_num - 1, 3) + pose_param = torch.cat((root_pose[:, None, :], pose_param), 1).view(-1, self.human_model.orig_joint_num * 3) + coord_output = self.get_coord(pose_param, shape_param, cam_trans) + self.template_mesh_cam_render = coord_output['mesh_cam_render'] + + # align neck joint to origin + template_align_joint_coorinate = coord_output['align_joint_coorinate'] # 1 x 1 x 3 + # print('template_align_joint_coorinate:',template_align_joint_coorinate) + # exit() + self.template_mesh_cam_render -= template_align_joint_coorinate # 1 x 6890 x 3 + self.template_align_joint_coorinate = template_align_joint_coorinate + + # used for real world rendering, should not rotate + self.template_mesh_cam_render_no_flip = self.template_mesh_cam_render.clone() + + self.template_mesh_cam_render_no_flip_joint = torch.bmm( + torch.from_numpy(self.joint_regressor).cuda()[None, :, :].repeat(1, 1, 1), + self.template_mesh_cam_render_no_flip) + + # in pyrender, should rotate 180 degree around x axis (since y and z axis are flipped) + R = torch.eye(4).cuda() + angle = torch.FloatTensor([np.pi]).cuda() + R[1, 1] = torch.cos(angle) + R[1, 2] = -torch.sin(angle) + R[2, 1] = torch.sin(angle) + R[2, 2] = torch.cos(angle) + + self.template_mesh_R = R + + self.template_mesh_cam_render = torch.matmul(self.template_mesh_cam_render, R[:3, :3]) + + x_axis_ = np.array([1, 0, 0]) + y_axis_ = np.array([0, 1, 0]) + z_axis_ = np.array([0, 0, -1]) + self.Axis_original = np.concatenate([x_axis_[:, None], y_axis_[:, None], z_axis_[:, None]], axis=1) + + self.min_box_stride = None + + def get_neck_head_rotated_template_mesh(self, pose_params_input): + root_pose = torch.zeros((1, 3)).cuda() + pose_param = torch.zeros((1, 69)).cuda() + cam_trans = torch.zeros((1, 3)).cuda() + shape_param = torch.zeros((1, 10)).cuda() + pose_param = pose_param.view(-1, self.human_model.orig_joint_num - 1, 3) + + pose_param[:, [11, 14], :] = pose_params_input[:, [11, 14], :] + + pose_param = torch.cat((root_pose[:, None, :], pose_param), 1).view(-1, self.human_model.orig_joint_num * 3) + coord_output = self.get_coord(pose_param, shape_param, cam_trans) + mesh_cam_render = coord_output['mesh_cam_render'] + mesh_cam_render -= self.template_align_joint_coorinate + mesh_cam_render = torch.matmul(mesh_cam_render, self.template_mesh_R[:3, :3]) + + return mesh_cam_render + + def get_neck_head_rotated_template_mesh_joint(self, pose_params_input): + root_pose = torch.zeros((1, 3)).cuda() + pose_param = torch.zeros((1, 69)).cuda() + cam_trans = torch.zeros((1, 3)).cuda() + shape_param = torch.zeros((1, 10)).cuda() + pose_param = pose_param.view(-1, self.human_model.orig_joint_num - 1, 3) + + pose_param[:, [11, 14], :] = pose_params_input[:, [11, 14], :] + + pose_param = torch.cat((root_pose[:, None, :], pose_param), 1).view(-1, self.human_model.orig_joint_num * 3) + coord_output = self.get_coord(pose_param, shape_param, cam_trans) + joints_3d = coord_output['joints_3d'] + joints_3d -= self.template_align_joint_coorinate + + return joints_3d + + def set_min_box_stride(self, min_box_stride): + self.min_box_stride = min_box_stride + + def compute_shoulder_points_R(self, mesh_a, mesh_b): + ''' + :param mesh_a: 1 x 6890 x 3 + :param mesh_b: 1 x 6890 x 3 + + shoulder_vertex_index: 55, + ''' + + joints_a = torch.bmm(torch.from_numpy(self.joint_regressor).cuda()[None, :, :].repeat(mesh_a.shape[0], 1, 1), + mesh_a) + joints_b = torch.bmm(torch.from_numpy(self.joint_regressor).cuda()[None, :, :].repeat(mesh_b.shape[0], 1, 1), + mesh_b) + + selected_joints = [ + 'L_Shoulder', 'R_Shoulder', + # 'L_Thorax', 'R_Thorax', + 'Neck', + # 'Chest', + 'Pelvis' + ] + selected_joints_index = [self.human_model.joints_name.index(joints_name) for joints_name in selected_joints] + + points_a = joints_a[:, selected_joints_index, :] + points_b = joints_b[:, selected_joints_index, :] + + A = points_a[0, :, :].cpu().numpy() # 55 x 3 + B = points_b[0, :, :].cpu().numpy() # 55 x 3 + mean_A = np.mean(A, axis=0, keepdims=True) + mean_B = np.mean(B, axis=0, keepdims=True) + + A = A - mean_A + B = B - mean_B + + H = np.transpose(A) @ B + + U, S, Vt = np.linalg.svd(H) + R = Vt.T @ U.T + + if np.linalg.det(R) < 0: + Vt[2, :] *= -1 + R = Vt.T @ U.T + + return torch.from_numpy(R).cuda().float() # 3 x 3 + + def get_camera_trans(self, cam_param, bbox, is_render): + # camera translation + t_xy = cam_param[:, :2] + gamma = torch.sigmoid(cam_param[:, 2]) # apply sigmoid to make it positive + k_value = torch.FloatTensor([math.sqrt(cfg.focal[0] * cfg.focal[1] * cfg.camera_3d_size * cfg.camera_3d_size / ( + cfg.input_img_shape[0] * cfg.input_img_shape[1]))]).cuda().view(-1) + if is_render: + k_value = k_value * math.sqrt(cfg.input_img_shape[0] * cfg.input_img_shape[1]) / ( + bbox[:, 2] * bbox[:, 3]).sqrt() + t_z = k_value * gamma + cam_trans = torch.cat((t_xy, t_z[:, None]), 1) + return cam_trans + + def make_2d_gaussian_heatmap(self, joint_coord_img): + x = torch.arange(cfg.output_hm_shape[2]) + y = torch.arange(cfg.output_hm_shape[1]) + yy, xx = torch.meshgrid(y, x) + xx = xx[None, None, :, :].cuda().float(); + yy = yy[None, None, :, :].cuda().float(); + + x = joint_coord_img[:, :, 0, None, None]; + y = joint_coord_img[:, :, 1, None, None]; + heatmap = torch.exp( + -(((xx - x) / cfg.sigma) ** 2) / 2 - (((yy - y) / cfg.sigma) ** 2) / 2) + return heatmap + + def get_coord(self, smpl_pose, smpl_shape, smpl_trans): + batch_size = smpl_pose.shape[0] + mesh_cam, mesh_joints = self.human_model_layer(smpl_pose, smpl_shape, smpl_trans) + # camera-centered 3D coordinate + joint_cam = torch.bmm(torch.from_numpy(self.joint_regressor).cuda()[None, :, :].repeat(batch_size, 1, 1), + mesh_cam) + joints_3d = joint_cam.clone() + # head + align_joint_coorinate = joint_cam[:, self.human_model.joints_name.index(self.align_joint_name), None, :] + + root_joint_idx = self.human_model.root_joint_idx + + # project 3D coordinates to 2D space + x = joint_cam[:, :, 0] / (joint_cam[:, :, 2] + 1e-4) * cfg.focal[0] + cfg.princpt[0] + y = joint_cam[:, :, 1] / (joint_cam[:, :, 2] + 1e-4) * cfg.focal[1] + cfg.princpt[1] + x = x / cfg.input_img_shape[1] * cfg.output_hm_shape[2] + y = y / cfg.input_img_shape[0] * cfg.output_hm_shape[1] + joint_proj = torch.stack((x, y), 2) + + mesh_cam_render = mesh_cam.clone() + # root-relative 3D coordinates + root_cam = joint_cam[:, root_joint_idx, None, :] + joint_cam = joint_cam - root_cam + mesh_cam = mesh_cam - root_cam + return { + 'joint_proj': joint_proj, + 'joint_cam': joint_cam, + 'mesh_cam': mesh_cam, + 'mesh_cam_render': mesh_cam_render, + 'align_joint_coorinate': align_joint_coorinate, + 'root_cam': root_cam, + 'joints_3d': joints_3d + } + + def generate_visualization(self, image, mesh_cam_render, joint): + + # princpt = (bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2) + # generate random color + color = colorsys.hsv_to_rgb(np.random.rand(), 0.5, 1.0) + # bbox = out['bbox'][0].cpu().numpy() + + mesh_image = render_mesh(image.copy() * 255, mesh_cam_render, self.human_model.face, + {'focal': cfg.focal, 'princpt': cfg.princpt}, color=color) + + joint_image = vis_keypoints(image.copy() * 255, joint) + + viz = np.concatenate([image.copy() * 255, + joint_image.astype(np.uint8), + mesh_image.astype(np.uint8)], + axis=1)[:, :, ::-1] + return viz + + def get_visualization(self, inputs, targets, meta_info): + inputs = inputs + for key in inputs: + inputs[key] = inputs[key].cuda() + output = self.forward(inputs, targets, meta_info, mode='test') + viz_predicts = [] + for i in range(inputs['img'].shape[0]): + viz_predict = self.generate_visualization(image=inputs['img'][i].cpu().numpy().transpose(1, 2, 0), + mesh_cam_render=output['mesh_cam_render'][ + i].detach().cpu().numpy(), + joint=inputs['joints'][i].detach().cpu().numpy() * ( + cfg.input_img_shape[1] / cfg.output_hm_shape[2]) + ) + viz_predicts.append(viz_predict) + + return viz_predicts + + def forward(self, inputs, targets, meta_info, mode): + early_img_feat = self.backbone(inputs['img']) # pose_guided_img_feat + + # get pose gauided image feature + joint_coord_img = inputs['joints'] + with torch.no_grad(): + joint_heatmap = self.make_2d_gaussian_heatmap(joint_coord_img.detach()) + # remove blob centered at (0,0) == invalid ones + joint_heatmap = joint_heatmap * inputs['joints_mask'][:, :, :, None] + pose_img_feat = self.pose2feat(early_img_feat, joint_heatmap) + pose_guided_img_feat = self.backbone(pose_img_feat, skip_early=True) # 2048 x 8 x 8 + + joint_img, joint_score = self.position_net(pose_guided_img_feat) # refined 2D pose or 3D pose + + # estimate model parameters + root_pose_6d, z, shape_param, cam_param = self.rotation_net(pose_guided_img_feat, joint_img.detach(), + joint_score.detach()) + # change root pose 6d + latent code -> axis angles + root_pose = rot6d_to_axis_angle(root_pose_6d) + pose_param = self.vposer(z) + cam_trans = self.get_camera_trans(cam_param, meta_info['bbox'], is_render=(cfg.render and (mode == 'test'))) + pose_param = pose_param.view(-1, self.human_model.orig_joint_num - 1, 3) + + body_pose_param = pose_param.clone() + + pose_param = torch.cat((root_pose[:, None, :], pose_param), 1).view(-1, self.human_model.orig_joint_num * 3) + coord_output = self.get_coord(pose_param, shape_param, cam_trans) + joint_proj, joint_cam, mesh_cam, mesh_cam_render = coord_output['joint_proj'], coord_output['joint_cam'], \ + coord_output['mesh_cam'], coord_output['mesh_cam_render'] + + if mode == 'train': + # loss functions + loss = {} + # joint_img: 0~8, joint_proj: 0~64, target: 0~64 + loss['body_joint_img'] = (1 / 8) * self.coord_loss(joint_img * 8, self.human_model.reduce_joint_set( + targets['orig_joint_img']), self.human_model.reduce_joint_set(meta_info['orig_joint_trunc']), + meta_info['is_3D']) + loss['smpl_joint_img'] = (1 / 8) * self.coord_loss(joint_img * 8, self.human_model.reduce_joint_set( + targets['fit_joint_img']), + self.human_model.reduce_joint_set( + meta_info['fit_joint_trunc']) * meta_info[ + 'is_valid_fit'][ + :, None, None]) + loss['smpl_pose'] = self.param_loss(pose_param, targets['pose_param'], + meta_info['fit_param_valid'] * meta_info['is_valid_fit'][:, None]) + loss['smpl_shape'] = self.param_loss(shape_param, targets['shape_param'], + meta_info['is_valid_fit'][:, None]) + loss['body_joint_proj'] = (1 / 8) * self.coord_loss(joint_proj, targets['orig_joint_img'][:, :, :2], + meta_info['orig_joint_trunc']) + loss['body_joint_cam'] = self.coord_loss(joint_cam, targets['orig_joint_cam'], + meta_info['orig_joint_valid'] * meta_info['is_3D'][:, None, None]) + loss['smpl_joint_cam'] = self.coord_loss(joint_cam, targets['fit_joint_cam'], + meta_info['is_valid_fit'][:, None, None]) + + return loss + + else: + # test output + out = {'cam_param': cam_param} + # out['input_joints'] = joint_coord_img + out['joint_img'] = joint_img * 8 + out['joint_proj'] = joint_proj + out['joint_score'] = joint_score + out['smpl_mesh_cam'] = mesh_cam + out['smpl_pose'] = pose_param.clone() + out['smpl_shape'] = shape_param.clone() + out['cam_trans'] = cam_trans.clone() + + out['mesh_cam_render'] = mesh_cam_render + out['mesh_cam_render_joints_3d'] = coord_output['joints_3d'] + + if 'smpl_mesh_cam' in targets: + out['smpl_mesh_cam_target'] = targets['smpl_mesh_cam'] + if 'bb2img_trans' in meta_info: + out['bb2img_trans'] = meta_info['bb2img_trans'] + if 'img2bb_trans' in meta_info: + out['img2bb_trans'] = meta_info['img2bb_trans'] + if 'bbox' in meta_info: + out['bbox'] = meta_info['bbox'] + if 'tight_bbox' in meta_info: + out['tight_bbox'] = meta_info['tight_bbox'] + if 'aid' in meta_info: + out['aid'] = meta_info['aid'] + + out['neck_head_rotated_template_mesh'] = self.get_neck_head_rotated_template_mesh(body_pose_param) + + cam_trans_crop = self.get_camera_trans(cam_param, meta_info['bbox'], is_render=False) + coord_output_crop = self.get_coord(pose_param, shape_param, cam_trans_crop) + mesh_cam_render_crop = coord_output_crop['mesh_cam_render'] + out['mesh_cam_render_crop'] = mesh_cam_render_crop + out['align_joint_coorinate_crop'] = coord_output_crop['align_joint_coorinate'] + # align neck joint to origin + align_joint_coorinate = coord_output['align_joint_coorinate'] # 1 x 1 x 3 + mesh_cam_render_aligned = mesh_cam_render.clone() # 1 x 6890 x 3 + # align neck joint to origin + mesh_cam_render_aligned -= align_joint_coorinate + out['mesh_cam_render_aligned'] = mesh_cam_render_aligned + out['align_joint_coorinate'] = align_joint_coorinate + + # ======================translation =================== + translation_in_pyrender = torch.eye(4, device=mesh_cam_render_aligned.device) + translation_in_pyrender[:3, 3:4] = -align_joint_coorinate.squeeze(1).T + + # flip y axis and z axis to render in pyrender correctly + translation_in_pyrender[[1, 2], 3] *= -1 + # ===================================================== + + # ======================rotaion ======================= + rotation_in_pyrender = torch.eye(4, device=mesh_cam_render_aligned.device) + # compute the rotation matrix that rotate template to the aligned mesh + R = self.compute_shoulder_points_R(self.template_mesh_cam_render, mesh_cam_render_aligned) + # flip y axis and z axis to render in pyrender correctly + angles = cv2.Rodrigues(torch.inverse(R).cpu().numpy())[0] + angles[[1, 2], :] *= -1 + R_in_pyrender = cv2.Rodrigues(angles)[0] + rotation_in_pyrender[:3, :3] = torch.from_numpy(R_in_pyrender).to(mesh_cam_render_aligned.device) + # ======================================================== + + # ========================== remder template on original image ================================== + out['camera_pose_in_pyrender'] = rotation_in_pyrender @ translation_in_pyrender + out['camera_to_render_template_in_pyrender'] = translation_in_pyrender + out['no_rotation_world2camera_transformation_in_real_world'] = torch.inverse(translation_in_pyrender) # + out['no_rotation_world2camera_transformation_in_real_world'][[1, 2]] *= -1 + + # ========================== Normalized camera ========================== + normalized_camerapose_in_pyrender = out['camera_pose_in_pyrender'].cpu().numpy() + + camera_position = normalized_camerapose_in_pyrender[:3, 3] + camera_position = camera_position / np.linalg.norm(camera_position) * 2.7 + + camera_up = normalized_camerapose_in_pyrender[:3, :3] @ np.reshape(np.array([0, 1, 0]), (3, 1))[:, 0] # 3, + + # we suppose the camera is always looking at the [0, 0.0649, 0] + Lookat = np.array([0, 0.0649, 0]) + + z_axis = Lookat - camera_position + z_axis = z_axis / np.linalg.norm(z_axis) + x_axis = -np.cross(camera_up, z_axis) + x_axis = x_axis / np.linalg.norm(x_axis) + y_axis = -np.cross(z_axis, x_axis) + y_axis = y_axis / np.linalg.norm(y_axis) + Axis_new = np.concatenate([x_axis[:, None], y_axis[:, None], z_axis[:, None]], axis=1) + + R = Axis_new @ np.linalg.inv(self.Axis_original) + + normalized_camerapose_in_pyrender[:3, :3] = R + normalized_camerapose_in_pyrender[:3, 3] = camera_position + + normalized_transformation_in_realworld = np.linalg.inv(normalized_camerapose_in_pyrender) + normalized_transformation_in_realworld[[1, 2]] *= -1 + + out['normalized_camerapose_in_pyrender'] = normalized_camerapose_in_pyrender + out['normalized_transformation_in_realworld'] = normalized_transformation_in_realworld + + return out + + def get_projected_joints(self, joint_cam, world_2_camera_matrix, princpt): + # joint in 3d world coordinate + joint_cam = joint_cam.squeeze(0) # 30 x 3 + joint_cam = torch.cat([joint_cam, torch.ones(joint_cam.shape[0], 1).cuda()], dim=1) # 30 x 4 + joint_on_input_image = world_2_camera_matrix @ joint_cam.T # 4,30 + joint_on_input_image = joint_on_input_image[:3, :].cpu().numpy() + + intrinsic_matrix = np.eye(3) + intrinsic_matrix[0, 0] = cfg.focal[0] + intrinsic_matrix[1, 1] = cfg.focal[1] + intrinsic_matrix[0, 2] = princpt[0] + intrinsic_matrix[1, 2] = princpt[1] + + joint_on_input_image = intrinsic_matrix @ joint_on_input_image + joint_on_input_image = joint_on_input_image / joint_on_input_image[2, :] + joint_on_input_image = joint_on_input_image[:2, :].T # 30,2 + + return joint_on_input_image + + def get_projected_vertex(self, mesh, world2screen_matrix): + + mesh = mesh.squeeze(0) + mesh = torch.cat([mesh, torch.ones(mesh.shape[0], 1).cuda()], dim=1).cpu().numpy() # 6890 x 4 + points_image = world2screen_matrix @ mesh.T # 4,6890 + points_image = points_image[:3, :] # 3,6890 + + points_on_input_image = points_image / points_image[2, :] + points_on_input_image = points_on_input_image[:2, :].T # 30,2 + + return points_on_input_image + + def __crop_and_process_camera_matrix__(self, model_output, input_image, joint_2d, crop_image_size, model_input_bbox, + head_bbox, use_head_bbox): + # project template mesh to input full size image + template_joint_on_input_image = self.get_projected_joints(self.template_mesh_cam_render_no_flip_joint, + model_output[ + 'no_rotation_world2camera_transformation_in_real_world'], + (model_input_bbox[0] + model_input_bbox[2] / 2, + model_input_bbox[1] + model_input_bbox[3] / 2)) + + L_Shoulder_2d = template_joint_on_input_image[self.human_model.joints_name.index('L_Shoulder'), :] + R_Shoulder_2d = template_joint_on_input_image[self.human_model.joints_name.index('R_Shoulder'), :] + + # project template mesh using the nomalized camera (1024) + template_joint_on_crop_image = self.get_projected_joints( + self.template_mesh_cam_render_no_flip_joint, + torch.from_numpy(model_output['normalized_transformation_in_realworld']).float().cuda(), + (crop_image_size / 2, crop_image_size / 2)) + L_Shoulder_2d_on_crop_image = template_joint_on_crop_image[self.human_model.joints_name.index('L_Shoulder'), :] + R_Shoulder_2d_on_crop_image = template_joint_on_crop_image[self.human_model.joints_name.index('R_Shoulder'), :] + Shoulder_center_on_crop_image = (L_Shoulder_2d_on_crop_image + R_Shoulder_2d_on_crop_image) / 2.0 + + # vis = crop_output['cropped_image'].copy() + # for i in range(template_joint_on_crop_image.shape[0]): + # cv2.circle(vis, (int(template_joint_on_crop_image[i, 0]), int(template_joint_on_crop_image[i, 1])), 5, + # (0, 255, 255), -1) + # cv2.putText(vis, str(i), (int(template_joint_on_crop_image[i, 0]), int(template_joint_on_crop_image[i, 1])), + # cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1) + # + # vis = cv2.resize(vis, (vis.shape[1] // 4, vis.shape[0] // 4)) + # cv2.imshow('input_image', vis.astype(np.uint8)) + # cv2.waitKey(0) + # exit() + + L_Shoulder_coco = joint_2d[5, :2] + R_Shoulder_coco = joint_2d[6, :2] + shoulder_center_coco = (L_Shoulder_coco + R_Shoulder_coco) / 2.0 + + ''' + cv2.circle(input_image, (int(L_ear_from_coco[ 0]), int(L_ear_from_coco[ 1])), 10, (0, 0, 255), -1) + cv2.putText(input_image, "L_ear_from_coco", (int(L_ear_from_coco[0]), int(L_ear_from_coco[1])), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 1) + + cv2.circle(input_image, (int(R_ear_from_coco[0]), int(R_ear_from_coco[1])), 10, (0, 0, 255), -1) + cv2.putText(input_image, "R_ear_from_coco", (int(R_ear_from_coco[0]), int(R_ear_from_coco[1])), + cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 1) + + cv2.circle(input_image, (int(rotated_L_ear_2d[0]), int(rotated_L_ear_2d[1])), 10, (0, 255, 255), -1) + cv2.putText(input_image, "rotated_L_ear_2d", (int(rotated_L_ear_2d[0]), int(rotated_L_ear_2d[1])), + cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1) + + cv2.circle(input_image, (int(rotated_R_ear_2d[0]), int(rotated_R_ear_2d[1])), 10, (0, 255, 255), -1) + cv2.putText(input_image, "rotated_R_ear_2d", (int(rotated_R_ear_2d[0]), int(rotated_R_ear_2d[1])), + cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1) + + + input_image = cv2.resize(input_image, (input_image.shape[1] // 4, input_image.shape[0] // 4)) + cv2.imshow('input_image', input_image) + cv2.waitKey(0) + #''' + + # compute the stride of the bbox, using the shoulder distance of head stride + if use_head_bbox: + assert len(head_bbox) == 4 + head_stride = min(head_bbox[2], head_bbox[3]) + bbox_stride = head_stride * 2.6 + else: + shoulder_stride = np.linalg.norm(L_Shoulder_2d - R_Shoulder_2d) + bbox_stride = shoulder_stride * 1.6 + + # shoulder_center_coco should be aligned with Shoulder_center_on_crop_image + # bbox_center = shoulder_center_coco - (Shoulder_center_on_crop_image - crop_image_size / 2.0)/crop_image_size*bbox_stride #aligned_joint_2d + align_translation_on_input_image + # (shoulder_center_coco - bbox_center)/bbox_stride*crop_image_size = (Shoulder_center_on_crop_image - crop_image_size / 2.0) + # (shoulder_center_coco - bbox_center) = (Shoulder_center_on_crop_image - crop_image_size / 2.0)*bbox_stride/crop_image_size + + bbox_center = shoulder_center_coco - ( + Shoulder_center_on_crop_image - crop_image_size / 2.0) * bbox_stride / crop_image_size + + bbox = np.array([bbox_center[0] - bbox_stride / 2, bbox_center[1] - bbox_stride / 2, bbox_stride, bbox_stride]) + # print('bbox',bbox) + + if bbox[2] < self.min_box_stride or bbox[3] < self.min_box_stride: + return None + + # crop_image + try: + img, img2bb_trans, bb2img_trans = generate_patch_image(input_image, bbox, 1.0, 0.0, False, + (crop_image_size, crop_image_size), + enable_padding=True) + except: + return None + + # the cam_param is corresponding to the original bbox + original_cam_param = {'focal': cfg.focal, 'princpt': ( + model_input_bbox[0] + model_input_bbox[2] / 2, model_input_bbox[1] + model_input_bbox[3] / 2)} + + # rescale to the original image size + + # crop to new bbox + w_crop = bbox[0] + h_crop = bbox[1] + + cx_new = original_cam_param['princpt'][0] - w_crop + cy_new = original_cam_param['princpt'][1] - h_crop + + translated_princpt = (cx_new, cy_new) + + # rescale to the crop image + new_focal = (cfg.focal[0] / bbox[2] * crop_image_size, cfg.focal[1] / bbox[3] * crop_image_size) + new_princpt = ( + translated_princpt[0] / bbox[2] * crop_image_size, translated_princpt[1] / bbox[3] * crop_image_size) + + cam_param = {'focal': new_focal, 'princpt': new_princpt} + + out = {} + out['intrisics'] = cam_param + out['cropped_image'] = img + out['bbox'] = bbox + out['bbox_stride'] = bbox_stride + + return out + + def crop_and_process_camera_matrix(self, model_output, input_image, joint_2d, crop_image_size, model_input_bbox, + head_bbox): + + out = [] + + head_bbox_score = head_bbox['score'] + head_bbox_ = head_bbox['bbox'] + + if len(head_bbox_) == 4: + out_ = self.__crop_and_process_camera_matrix__(model_output, input_image, joint_2d, crop_image_size, + model_input_bbox, + head_bbox_, use_head_bbox=True) + if out_ is not None: + out.append(out_) + # out_ = self.__crop_and_process_camera_matrix__(model_output, input_image, joint_2d, crop_image_size, + # model_input_bbox, + # head_bbox_, use_head_bbox=False) + # if out_ is not None and len(out) > 0: + # if abs(out_['bbox_stride'] - out[0]['bbox_stride']) > out[0]['bbox_stride'] * 0.05: + # out.append(out_) + + else: + # no bbox, use the shoulder as stride + out_ = self.__crop_and_process_camera_matrix__(model_output, input_image, joint_2d, crop_image_size, + model_input_bbox, + head_bbox_, use_head_bbox=False) + if out_ is not None: + out.append(out_) + + return out + + +def init_weights(m): + if type(m) == nn.ConvTranspose2d: + nn.init.normal_(m.weight, std=0.001) + elif type(m) == nn.Conv2d: + nn.init.normal_(m.weight, std=0.001) + nn.init.constant_(m.bias, 0) + elif type(m) == nn.BatchNorm2d: + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif type(m) == nn.Linear: + nn.init.normal_(m.weight, std=0.01) + nn.init.constant_(m.bias, 0) + + +def get_model(vertex_num, joint_num, mode): + backbone = ResNetBackbone(cfg.resnet_type) + pose2feat = Pose2Feat(joint_num) + position_net = PositionNet() + rotation_net = RotationNet() + vposer = Vposer() + + if mode == 'train': + backbone.init_weights() + pose2feat.apply(init_weights) + position_net.apply(init_weights) + rotation_net.apply(init_weights) + + model = Model(backbone, pose2feat, position_net, rotation_net, vposer) + return model + diff --git a/data_processing/main/test.py b/data_processing/main/test.py new file mode 100644 index 0000000..633d427 --- /dev/null +++ b/data_processing/main/test.py @@ -0,0 +1,66 @@ +import os +# os.environ["PYOPENGL_PLATFORM"] = "osmesa" +import torch +import argparse +from tqdm import tqdm +import numpy as np +import torch.backends.cudnn as cudnn +from config import cfg +from base import Tester + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--gpu', type=str, dest='gpu_ids') + parser.add_argument('--test_epoch', type=str, dest='test_epoch') + parser.add_argument('--exp_dir', type=str, default='') + parser.add_argument('--cfg', type=str, default='', help='experiment configure file name') + + args = parser.parse_args() + + if not args.gpu_ids: + assert 0, "Please set propoer gpu ids" + + if '-' in args.gpu_ids: + gpus = args.gpu_ids.split('-') + gpus[0] = int(gpus[0]) + gpus[1] = int(gpus[1]) + 1 + args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus)))) + + assert args.test_epoch, 'Test epoch is required.' + return args + +def main(): + args = parse_args() + cfg.set_args(args.gpu_ids, is_test=True, exp_dir=args.exp_dir) + cudnn.benchmark = True + if args.cfg: + cfg.update(args.cfg) + + tester = Tester(args.test_epoch) + tester._make_batch_generator() + tester._make_model() + + eval_result = {} + cur_sample_idx = 0 + for itr, (inputs, targets, meta_info) in enumerate(tqdm(tester.batch_generator)): + + # forward + with torch.no_grad(): + out = tester.model(inputs, targets, meta_info, 'test') + + # save output + out = {k: v.cpu().numpy() for k,v in out.items()} + for k,v in out.items(): batch_size = out[k].shape[0] + out = [{k: v[bid] for k,v in out.items()} for bid in range(batch_size)] + + # evaluate + cur_eval_result = tester._evaluate(out, cur_sample_idx) + for k,v in cur_eval_result.items(): + if k in eval_result: eval_result[k] += v + else: eval_result[k] = v + cur_sample_idx += len(out) + + tester._print_eval_result(eval_result) + +if __name__ == "__main__": + main() diff --git a/data_processing/main/train.py b/data_processing/main/train.py new file mode 100644 index 0000000..84ec01a --- /dev/null +++ b/data_processing/main/train.py @@ -0,0 +1,109 @@ +import argparse +from config import cfg +import torch +from base import Trainer +import torch.backends.cudnn as cudnn +import torch.cuda.amp as amp +from torch.utils.tensorboard import SummaryWriter + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--gpu', type=str, dest='gpu_ids') + parser.add_argument('--continue', dest='continue_train', action='store_true') + parser.add_argument('--exp_dir', type=str, default='', help='for resuming train') + parser.add_argument('--amp', dest='use_mixed_precision', action='store_true', help='use automatic mixed precision training') + parser.add_argument('--init_scale', type=float, default=1024., help='initial loss scale') + parser.add_argument('--cfg', type=str, default='', help='experiment configure file name') + parser.add_argument('--data_dir', type=str, default='', help='dataset path') + + args = parser.parse_args() + + if not args.gpu_ids: + assert 0, "Please set propoer gpu ids" + + if '-' in args.gpu_ids: + gpus = args.gpu_ids.split('-') + gpus[0] = int(gpus[0]) + gpus[1] = int(gpus[1]) + 1 + args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus)))) + + return args + + +def main(): + # argument parse and create log + args = parse_args() + cfg.set_args(args.gpu_ids, args.continue_train, exp_dir=args.exp_dir) + cudnn.benchmark = True + if args.cfg: + cfg.update(args.cfg) + if args.data_dir is not '': + cfg.set_data_dir(args.data_dir) + writer = SummaryWriter(cfg.log_dir) + trainer = Trainer() + trainer._make_batch_generator() + trainer._make_model() + + scaler = amp.GradScaler(init_scale=args.init_scale, enabled=args.use_mixed_precision) + global_step = 0 + # train + for epoch in range(trainer.start_epoch, cfg.end_epoch): + print('Epoch %d/%d' % (epoch, cfg.end_epoch)) + trainer.set_lr(epoch) + trainer.tot_timer.tic() + trainer.read_timer.tic() + for itr, (inputs, targets, meta_info) in enumerate(trainer.batch_generator): + trainer.read_timer.toc() + trainer.gpu_timer.tic() + + # forward + trainer.optimizer.zero_grad() + with amp.autocast(args.use_mixed_precision): + loss = trainer.model(inputs, targets, meta_info, 'train') + loss = {k: loss[k].mean() for k in loss} + for k in loss: + writer.add_scalar('train/loss_' + k, loss[k].detach(), global_step) + _loss = sum(loss[k] for k in loss) + + # backward + with amp.autocast(False): + _loss = scaler.scale(_loss) + _loss.backward() + scaler.step(trainer.optimizer) + + scaler.update(args.init_scale) + + trainer.gpu_timer.toc() + screen = [ + 'Epoch %d/%d itr %d/%d:' % (epoch, cfg.end_epoch, itr, trainer.itr_per_epoch), + 'lr: %g' % (trainer.get_lr()), + 'speed: %.2f(%.2fs r%.2f)s/itr' % ( + trainer.tot_timer.average_time, trainer.gpu_timer.average_time, trainer.read_timer.average_time), + '%.2fh/epoch' % (trainer.tot_timer.average_time / 3600. * trainer.itr_per_epoch), + ] + screen += ['%s: %.4f' % ('loss_' + k, v.detach()) for k,v in loss.items()] + trainer.logger.info(' '.join(screen)) + + trainer.tot_timer.toc() + trainer.tot_timer.tic() + trainer.read_timer.tic() + + if itr % 400 == 0: + trainer.save_visualization(inputs, targets, meta_info, epoch,itr) + if itr % 1000 == 0: + trainer.save_model({ + 'epoch': epoch, + 'network': trainer.model.state_dict(), + 'optimizer': trainer.optimizer.state_dict(), + }, epoch,itr) + global_step += 1 + + # trainer.save_model({ + # 'epoch': epoch, + # 'network': trainer.model.state_dict(), + # 'optimizer': trainer.optimizer.state_dict(), + # }, epoch) + + +if __name__ == "__main__": + main() diff --git a/data_processing/prepare_data.py b/data_processing/prepare_data.py new file mode 100644 index 0000000..136a4b4 --- /dev/null +++ b/data_processing/prepare_data.py @@ -0,0 +1,23 @@ +import os +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('--input_dir', type=str, default='') +args = parser.parse_args() + +input_dir = args.input_dir + +root = os.path.dirname(os.path.abspath(__file__)) +print(root) + + +os.chdir(os.path.join(root,'HigherHRNet-Human-Pose-Estimation')) +command = f'python tools/get_keypoints.py --cfg experiments/coco/higher_hrnet/w32_512_adam_lr1e-3.yaml --input_dir {input_dir} TEST.MODEL_FILE models/pytorch/pose_coco/pose_higher_hrnet_w32_512.pth ' +print(command) +os.system(command) +# head-detection + +os.chdir(os.path.join(root,'yolov5_crowdhuman')) +command = f'python detect_head_bbox.py --weights crowdhuman_yolov5m.pt --source {input_dir} --heads' +print(command) +os.system(command) diff --git a/data_processing/preprocess_img_for_inversion.py b/data_processing/preprocess_img_for_inversion.py new file mode 100644 index 0000000..d5ed43d --- /dev/null +++ b/data_processing/preprocess_img_for_inversion.py @@ -0,0 +1,103 @@ +import glob + +import os +import argparse + +parser = argparse.ArgumentParser() + +parser.add_argument('--test_data_dir', type=str,default='../test_data') + + +opt = parser.parse_args() +test_data_dir = opt.test_data_dir + + +for sub_dir in glob.glob(os.path.join(test_data_dir, '*')): + samples_dir = os.path.join(sub_dir, 'samples') + + if os.path.exists(samples_dir): + new_crop_samples_dir = os.path.join(sub_dir, 'samples_new_crop') + if os.path.exists(new_crop_samples_dir): + continue + images_dir = os.path.join(samples_dir, 'images') + os.makedirs(images_dir, exist_ok=True) + for image in glob.glob(os.path.join(samples_dir, '*.png')): + os.rename(image, os.path.join(images_dir, os.path.basename(image))) + +root = os.path.dirname(os.path.abspath(__file__)) +print(root) + + + + + +os.chdir(root) +# os.system(cmd) +for sub_dir in glob.glob(os.path.join(test_data_dir, '*')): + samples_dir = os.path.join(sub_dir, 'samples') + + if os.path.exists(samples_dir): + new_crop_samples_dir = os.path.join(sub_dir, 'samples_new_crop') + if os.path.exists(new_crop_samples_dir): + continue + cmd = f'python prepare_data.py --input_dir {samples_dir}' + os.system(cmd) + # os.system(cmd) + + +# os.system(cmd) +for sub_dir in glob.glob(os.path.join(test_data_dir, '*')): + samples_dir = os.path.join(sub_dir, 'samples') + + if os.path.exists(samples_dir): + new_crop_samples_dir = os.path.join(sub_dir, 'samples_new_crop') + if os.path.exists(new_crop_samples_dir): + continue + cmd = f'python runmy.py --input_dir {samples_dir}' + os.system(cmd) + # os.system(cmd) + + +# os.system(cmd) +os.chdir(os.path.join(root,'detectron2/projects/DensePose')) +for sub_dir in glob.glob(os.path.join(test_data_dir, '*')): + samples_dir = os.path.join(sub_dir, 'samples') + + if os.path.exists(samples_dir): + new_crop_samples_dir = os.path.join(sub_dir, 'samples_new_crop') + if os.path.exists(new_crop_samples_dir): + continue + cmd = f'python apply_net.py show configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml R_101_FPN_DL_soft_s1x.pkl {samples_dir}/aligned_images dp_vertex --output {samples_dir}/seg --min_score 0.8' + os.system(cmd) + # os.system(cmd) + + +os.chdir(root) +for sub_dir in glob.glob(os.path.join(test_data_dir, '*')): + + samples_dir = os.path.join(sub_dir, 'samples') + if os.path.exists(samples_dir): + new_crop_samples_dir = os.path.join(sub_dir, 'samples_new_crop') + if os.path.exists(new_crop_samples_dir): + continue + cmd = f'python runmy_new_crop.py --input_dir {samples_dir}' + os.system(cmd) + # os.system(cmd) + + +for sub_dir in glob.glob(os.path.join(test_data_dir, '*')): + samples_dir = os.path.join(sub_dir, 'samples') + if os.path.exists(samples_dir): + new_crop_samples_dir = os.path.join(sub_dir, 'samples_new_crop') + new_crop_mask_samples_dir = os.path.join(sub_dir, 'samples_new_crop/mask') + if os.path.exists(new_crop_mask_samples_dir): + continue + os.makedirs(new_crop_mask_samples_dir, exist_ok=True) + cmd = f'python segmentation_example.py --base_path {new_crop_samples_dir}' + os.system(cmd) + # os.system(cmd) + + + + + diff --git a/data_processing/readme.md b/data_processing/readme.md new file mode 100644 index 0000000..bb449f2 --- /dev/null +++ b/data_processing/readme.md @@ -0,0 +1 @@ +Our image processing code are mainly adapted from [hongsukchoi/3DCrowdNet_RELEASE](https://github.com/hongsukchoi/3DCrowdNet_RELEASE), thanks for their valuable contributions! \ No newline at end of file diff --git a/data_processing/requirements.txt b/data_processing/requirements.txt new file mode 100644 index 0000000..3a014fa --- /dev/null +++ b/data_processing/requirements.txt @@ -0,0 +1,21 @@ +opencv-contrib-python==4.7.0.72 +opencv-python==4.7.0.72 +opencv-python-headless==4.7.0.72 +urllib3==1.26.15 +json_tricks==3.17.3 +pytz==2022.7.1 +munkres==1.1.4 +scipy==1.9.1 +pyrender==0.1.45 +ImageHash==4.3.1 +easydict==1.10 +human-body-prior==0.8.5.0 +progressbar2==4.3b0 +numpy==1.21.6 +omegaconf==2.3.0 +Pillow==9.5.0 +pycocotools==2.0.6 +av==10.0.0 +pandas==1.3.5 +seaborn==0.12.2 +chumpy==0.70 diff --git a/data_processing/run_unsplash.py b/data_processing/run_unsplash.py new file mode 100644 index 0000000..59947f6 --- /dev/null +++ b/data_processing/run_unsplash.py @@ -0,0 +1,29 @@ +import os +import argparse + +parser = argparse.ArgumentParser(description=' ') +# general + +for i in range(0,1): + path = f'E:/project/unsplash/{i*1000:08d}' + head_box_path = f'{path}/head_bbox_yolov5_crowdhuman.json' + if not os.path.exists(path) or not os.path.exists(head_box_path): + continue + + os.chdir('E:/project/3DCrowdNet_upper_body-main/demo') + command =f'python extract_camera_parameter.py --gpu 0 --input_dir {path} --output_dir {path} --data_dir E:/project/3DCrowdNet_upper_body-main/data' + print(command) + os.system(command) + + os.chdir('E:/project/3DCrowdNet_upper_body-main/MANIQA') + command =f'python imagedups.py -r -d -N -p {path}/aligned_images' + print(command) + os.system(command) + + command = f'python remove_blurr_images.py --input_dir {path}' + print(command) + os.system(command) + + command = f'python delete_images.py --input_dir {path}' + print(command) + os.system(command) \ No newline at end of file diff --git a/data_processing/runmy.py b/data_processing/runmy.py new file mode 100644 index 0000000..458de19 --- /dev/null +++ b/data_processing/runmy.py @@ -0,0 +1,39 @@ +import os +import argparse + +# python runmy.py +parser = argparse.ArgumentParser() +parser.add_argument('--input_dir', type=str, default='') +args = parser.parse_args() + +path = args.input_dir + +root = os.path.dirname(os.path.abspath(__file__)) +print(root) + + + + +head_box_path = f'{path}/head_bbox_yolov5_crowdhuman.json' +if not os.path.exists(path) or not os.path.exists(head_box_path): + raise Exception('path or head_box_path not exists') + + +data_dir = os.path.join(root,'data') +os.chdir(os.path.join(root,'demo')) +command = f'python extract_camera_parameter.py --gpu 0 --input_dir {path} --output_dir {path} --data_dir {data_dir}' +print(command) +os.system(command) + +# os.chdir(os.path.join(root,'MANIQA')) +# command = f'python imagedups.py -r -d -N -p {path}/aligned_images' +# print(command) +# os.system(command) + +# command = f'python remove_blurr_images.py --input_dir {path}' +# print(command) +# os.system(command) +# +# command = f'python delete_images.py --input_dir {path}' +# print(command) +# os.system(command) diff --git a/data_processing/runmy_new_crop.py b/data_processing/runmy_new_crop.py new file mode 100644 index 0000000..c6f8671 --- /dev/null +++ b/data_processing/runmy_new_crop.py @@ -0,0 +1,20 @@ +import os +import argparse + +# python runmy.py +parser = argparse.ArgumentParser() +parser.add_argument('--input_dir', type=str, default='') +args = parser.parse_args() + +path = args.input_dir + +root = os.path.dirname(os.path.abspath(__file__)) +print(root) + + +os.chdir(os.path.join(root,'demo')) + +data_dir = os.path.join(root,'data') +command = f'python new_crop_use_densepose.py --gpu 0 --input_dir {path} --output_dir {path}_new_crop --data_dir {data_dir}' +print(command) +os.system(command) diff --git a/data_processing/segmentation_example.py b/data_processing/segmentation_example.py new file mode 100644 index 0000000..665abee --- /dev/null +++ b/data_processing/segmentation_example.py @@ -0,0 +1,102 @@ +import torch +from PIL import Image +from torchvision.transforms import ToPILImage +import glob +import os +from torchvision.models.segmentation import deeplabv3_resnet101 +from torchvision import transforms, utils +from tqdm import tqdm +from torch.utils.data import dataset + + +class LoadData(dataset.Dataset): + + def __init__(self, base_path): + super(LoadData, self).__init__() + #base_path = 'F:/high_quality_3DPortraitGAN/exp/stable-dreamfusion/output/2023-10-28-with-inversion-initialization/samples_new_crop' + paths = sorted(glob.glob(f'{base_path}/aligned_images/*')) + os.makedirs(f'{base_path}/mask', exist_ok=True) + self.paths = paths + + def __getitem__(self,idx): + image_path =self.paths[idx] + image = Image.open(image_path) + # Define the preprocessing transformation + preprocess = transforms.Compose([ + transforms.ToTensor() + ]) + + # Apply the transformation to the image + input_tensor = preprocess(image) + + return input_tensor, image_path + + def __len__(self): + return len(self.paths) + + +def get_mask(model, batch, cid): + normalized_batch = transforms.functional.normalize( + batch, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) + output = model(normalized_batch)['out'] + # sem_classes = [ + # '__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + # 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', + # 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' + # ] + # sem_class_to_idx = {cls: idx for (idx, cls) in enumerate(sem_classes)} + # cid = sem_class_to_idx['car'] + + normalized_masks = torch.nn.functional.softmax(output, dim=1) + + boolean_car_masks = (normalized_masks.argmax(1) == cid) + return boolean_car_masks.float() + + +def get_and_save_mask( device,base_path): + # data loder + batch_size = 8 + dataset = torch.utils.data.DataLoader( + dataset=LoadData(base_path), + batch_size=batch_size, + shuffle=False + ) + for input_tensor, image_paths in tqdm(dataset): + input_batch = input_tensor.to(device) # batxh, 3, 256, 256 + + # load segmentation net + seg_net = deeplabv3_resnet101(pretrained=True, progress=False).to(device) + seg_net.requires_grad_(False) + seg_net.eval() + + # 15 means human mask + mask = get_mask(seg_net, input_batch, 15) + print(mask.shape) # 16, 256, 256 + + mask = mask.unsqueeze(1) # 16, 1, 256, 256 + + for i in range(mask.shape[0]): + # Squeeze the tensor to remove unnecessary dimensions and convert to PIL Image + mask0 = mask[i:i+1] + mask_squeezed = torch.squeeze(mask0) + mask_image = ToPILImage()(mask_squeezed) + image_path = image_paths[i] + # Save as PNG + mask_path = image_path.replace('aligned_images', 'mask') + # /home/zjucadjin/dataset/pexels-256-new/0000000053/0000053992.png + # mask_dir = mask_path[:-len('/0000053992.png')] + # os.makedirs(mask_dir, exist_ok=True) + mask_image.save(mask_path) + + +def run(rank,base_path): + rank = rank + device = torch.device('cuda', rank) + get_and_save_mask(device,base_path) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--base_path', type=str, required=True) + run(0, parser.parse_args().base_path) \ No newline at end of file diff --git a/data_processing/tasks/4.19-4.20.xlsx b/data_processing/tasks/4.19-4.20.xlsx new file mode 100644 index 0000000..87afec1 Binary files /dev/null and b/data_processing/tasks/4.19-4.20.xlsx differ diff --git a/data_processing/tasks/4.20-4.21.xlsx b/data_processing/tasks/4.20-4.21.xlsx new file mode 100644 index 0000000..a71af8f Binary files /dev/null and b/data_processing/tasks/4.20-4.21.xlsx differ diff --git a/data_processing/tasks/4.21-4.22.xlsx b/data_processing/tasks/4.21-4.22.xlsx new file mode 100644 index 0000000..622c919 Binary files /dev/null and b/data_processing/tasks/4.21-4.22.xlsx differ diff --git a/data_processing/tasks/4.22-4.23.xlsx b/data_processing/tasks/4.22-4.23.xlsx new file mode 100644 index 0000000..deec8b1 Binary files /dev/null and b/data_processing/tasks/4.22-4.23.xlsx differ diff --git a/data_processing/tasks/4.23-4.24.xlsx b/data_processing/tasks/4.23-4.24.xlsx new file mode 100644 index 0000000..534b0b4 Binary files /dev/null and b/data_processing/tasks/4.23-4.24.xlsx differ diff --git a/data_processing/tasks/4.24-4.25.xlsx b/data_processing/tasks/4.24-4.25.xlsx new file mode 100644 index 0000000..405e12b Binary files /dev/null and b/data_processing/tasks/4.24-4.25.xlsx differ diff --git a/data_processing/tool/check_crowdidx.py b/data_processing/tool/check_crowdidx.py new file mode 100644 index 0000000..9f99ad8 --- /dev/null +++ b/data_processing/tool/check_crowdidx.py @@ -0,0 +1,241 @@ +import pickle + +import numpy as np +import os.path as osp +from pycocotools.coco import COCO + + +def compute_CrowdIndex(ref_bbox, ref_kps, intf_kps): + + na = 0 + for ref_kp in ref_kps: + count = get_inclusion(ref_bbox, ref_kp) + na += count + + nb = 0 + for intf_kp in intf_kps: + count = get_inclusion(ref_bbox, intf_kp) + nb += count + + if na < 4: # invalid ones, e.g. truncated images + return 0 + else: + return nb / na + + +def get_inclusion(bbox, kp): + if bbox[0] > kp[0] or (bbox[0] + bbox[2]) < kp[0]: + return 0 + + if bbox[1] > kp[1] or (bbox[1] + bbox[3]) < kp[1]: + return 0 + + return 1 + + +def compute_iou(src_roi, dst_roi): + # IoU calculate with GTs + xmin = np.maximum(dst_roi[:, 0], src_roi[:, 0]) + ymin = np.maximum(dst_roi[:, 1], src_roi[:, 1]) + xmax = np.minimum(dst_roi[:, 0] + dst_roi[:, 2], src_roi[:, 0] + src_roi[:, 2]) + ymax = np.minimum(dst_roi[:, 1] + dst_roi[:, 3], src_roi[:, 1] + src_roi[:, 3]) + + interArea = np.maximum(0, xmax - xmin) * np.maximum(0, ymax - ymin) + + boxAArea = dst_roi[:, 2] * dst_roi[:, 3] + boxBArea = np.tile(src_roi[:, 2] * src_roi[:, 3], (len(dst_roi), 1)) + sumArea = boxAArea + boxBArea + + iou = interArea / (sumArea - interArea + 1e-5) + + return iou + + +def get_bbox(joint_img, joint_valid): + x_img, y_img = joint_img[:, 0], joint_img[:, 1] + x_img = x_img[joint_valid == 1]; + y_img = y_img[joint_valid == 1]; + xmin = min(x_img); + ymin = min(y_img); + xmax = max(x_img); + ymax = max(y_img); + + x_center = (xmin + xmax) / 2.; + width = xmax - xmin; + xmin = x_center - 0.5 * width * 1.2 + xmax = x_center + 0.5 * width * 1.2 + + y_center = (ymin + ymax) / 2.; + height = ymax - ymin; + ymin = y_center - 0.5 * height * 1.2 + ymax = y_center + 0.5 * height * 1.2 + + bbox = np.array([xmin, ymin, xmax - xmin, ymax - ymin]).astype(np.float32) + return bbox + +class PW3D(): + def __init__(self, data_split): + self.data_split = data_split + self.data_path = osp.join('..', 'data', 'PW3D', 'data') + self.seq_iou_list, self.seq_crowd_idx_list = self.load_data() + + def load_data(self): + db = COCO(osp.join(self.data_path, '3DPW_latest_' + self.data_split + '.json')) + + seq_iou_list = {} + seq_crowd_idx_list = {} + for iid in db.imgs.keys(): + img = db.imgs[iid] + sequence_name = img['sequence'] + img_name = img['file_name'] + img_path = osp.join(self.data_path, 'imageFiles', sequence_name, img_name) + img_width, img_height = img['height'], img['width'] + + aids = db.getAnnIds(iid) + if len(aids) < 2: + continue + + data_dict = {} + data_dict['img_id'] = iid + data_dict['img_path'] = img_path + + # compute iou + ann1 = db.anns[aids[0]] + ann2 = db.anns[aids[1]] + + bbox1 = np.array(ann1['bbox']) + bbox2 = np.array(ann2['bbox']) + iou = compute_iou(bbox1[None, :], bbox2[None, :])[0,0] + + seq_iou_list.setdefault(sequence_name, []).append(iou) + + # compute crowd index + joint_img1 = np.array(ann1['joint_img'], dtype=np.float32).reshape(-1, 2) + joint_img2 = np.array(ann2['joint_img'], dtype=np.float32).reshape(-1, 2) + + ci1 = compute_CrowdIndex(bbox1, joint_img1, joint_img2) + ci2 = compute_CrowdIndex(bbox2, joint_img2, joint_img1) + + crowd_idx = (ci1+ci2) / 2 + + seq_crowd_idx_list.setdefault(sequence_name, []).append(crowd_idx) + + for seq in seq_iou_list.keys(): + seq_iou_list[seq] = sum(seq_iou_list[seq]) / len(seq_iou_list[seq]) + for seq in seq_crowd_idx_list.keys(): + seq_crowd_idx_list[seq] = sum(seq_crowd_idx_list[seq]) / len(seq_crowd_idx_list[seq]) + + return seq_iou_list, seq_crowd_idx_list + + def print_statistics(self): + total_mean_iou, total_mean_crowd_idx = 0, 0 + for seq in self.seq_iou_list: + print(f"Average iou / crowd index of {seq}: {self.seq_iou_list[seq]}, {self.seq_crowd_idx_list[seq]}") + total_mean_iou += self.seq_iou_list[seq] + total_mean_crowd_idx += self.seq_crowd_idx_list[seq] + print(f"All iou / crowd index: {total_mean_iou/len(self.seq_iou_list)}, {total_mean_crowd_idx/len(self.seq_iou_list)}") + + +class MuPoTs(): + def __init__(self): + self.test_annot_path = osp.join('..', 'data', 'MuPoTs', 'data', 'MuPoTS-3D.json') + self.seq_iou_list, self.seq_crowd_idx_list = self.load_data() + + def load_data(self): + db = COCO(self.test_annot_path) + + seq_iou_list = {} + seq_crowd_idx_list = {} + for iid in db.imgs.keys(): + img = db.imgs[iid] + img_name = img['file_name'] + sequence_name = img_name.split('/')[0] + + aids = db.getAnnIds(iid) + if len(aids) < 2: + continue + + for aid_idx in range(len(aids)): + ref_ann = db.anns[aids[aid_idx]] + other_aids = aids[:aid_idx] + aids[aid_idx+1:] + ref_bbox = np.array(ref_ann['bbox']) + ref_joint = np.array(ref_ann['keypoints_img']) + for oaid in other_aids: + other_ann = db.anns[oaid] + other_bbox = np.array(other_ann['bbox']) + other_joint = np.array(other_ann['keypoints_img']) + + iou = compute_iou(ref_bbox[None, :], other_bbox[None, :])[0, 0] / 2.0 # compensate twice count + crowd_idx = compute_CrowdIndex(ref_bbox, ref_joint, other_joint) + + seq_iou_list.setdefault(sequence_name, []).append(iou) + seq_crowd_idx_list.setdefault(sequence_name, []).append(crowd_idx) + + for seq in seq_iou_list.keys(): + seq_iou_list[seq] = sum(seq_iou_list[seq]) / len(seq_iou_list[seq]) + for seq in seq_crowd_idx_list.keys(): + seq_crowd_idx_list[seq] = sum(seq_crowd_idx_list[seq]) / len(seq_crowd_idx_list[seq]) + + return seq_iou_list, seq_crowd_idx_list + + def print_statistics(self): + total_mean_iou, total_mean_crowd_idx = 0, 0 + for seq in self.seq_iou_list: + print(f"Average iou / crowd index of {seq}: {self.seq_iou_list[seq]}, {self.seq_crowd_idx_list[seq]}") + total_mean_iou += self.seq_iou_list[seq] + total_mean_crowd_idx += self.seq_crowd_idx_list[seq] + print(f"All iou / crowd index: {total_mean_iou/len(self.seq_iou_list)}, {total_mean_crowd_idx/len(self.seq_iou_list)}") + + + +class CMUP(): + def __init__(self): + self.seq_list = ['160906_pizza1', '160422_ultimatum1', '160422_haggling1', '160422_mafia2'] + + self.seq_iou_list, self.seq_crowd_idx_list = {}, {} + + for seq_name in self.seq_list: + self.annot_path = osp.join('..', 'data', 'CMU-Panoptic', 'data', f'{seq_name}.pkl') + mean_iou, mean_crowd_idx = self.load_data() + self.seq_iou_list[seq_name], self.seq_crowd_idx_list[seq_name] = mean_iou, mean_crowd_idx + + def load_data(self): + with open(self.annot_path,'rb') as f: + db = pickle.load(f) + + seq_iou_list = [] + seq_crowd_idx_list = [] + for img_idx in range(len(db)): + + for i in range(db[img_idx]['kpts2d'].shape[0]): + ref_joint = db[img_idx]['kpts2d'][i] #24 3 + ref_bbox = get_bbox(ref_joint[:, :2], ref_joint[:, 2]) + + other_joints = np.concatenate((db[img_idx]['kpts2d'][:i],db[img_idx]['kpts2d'][i+1:]), axis=0) + for other_joint in other_joints: + other_bbox = get_bbox(other_joint[:, :2], other_joint[:, 2]) + iou = compute_iou(ref_bbox[None, :], other_bbox[None, :])[0, 0] / 2.0 # compensate twice count + crowd_idx = compute_CrowdIndex(ref_bbox, ref_joint, other_joint) + + seq_iou_list.append(iou) + seq_crowd_idx_list.append(crowd_idx) + + mean_iou, mean_crowd_idx = sum(seq_iou_list) / len(seq_iou_list), sum(seq_crowd_idx_list) / len(seq_crowd_idx_list) + return mean_iou, mean_crowd_idx + + def print_statistics(self): + total_mean_iou, total_mean_crowd_idx = 0, 0 + for seq in self.seq_list: + print(f"Average iou / crowd index of {seq}: {self.seq_iou_list[seq]}, {self.seq_crowd_idx_list[seq]}") + total_mean_iou += self.seq_iou_list[seq] + total_mean_crowd_idx += self.seq_crowd_idx_list[seq] + print(f"All iou / crowd index: {total_mean_iou/len(self.seq_list)}, {total_mean_crowd_idx/len(self.seq_list)}") + + + + +if __name__ == '__main__': + dataset = PW3D('validation') + # dataset = MuPoTs() + # dataset = CMUP() + dataset.print_statistics() diff --git a/data_processing/tool/convert_simple_to_i2l.py b/data_processing/tool/convert_simple_to_i2l.py new file mode 100644 index 0000000..f55c53f --- /dev/null +++ b/data_processing/tool/convert_simple_to_i2l.py @@ -0,0 +1,9 @@ +import torch + +model = torch.load('pose_resnet_50_256x192.pth.tar') # load simple +model_save = {'network': {}, 'epoch': 0} +for k,v in model.items(): + save_k = 'module.backbone.' + k + model_save['network'][save_k] = v.cpu() + +torch.save(model_save, 'snapshot_0.pth.tar') diff --git a/data_processing/tool/match_3dpw_2dpose.py b/data_processing/tool/match_3dpw_2dpose.py new file mode 100644 index 0000000..19d557c --- /dev/null +++ b/data_processing/tool/match_3dpw_2dpose.py @@ -0,0 +1,373 @@ +import os.path as osp +import torch +import numpy as np +import copy +import cv2 +import json +import torchvision.transforms as transforms +import torch.nn as nn +from torch.utils.data import DataLoader +from pycocotools.coco import COCO +from tqdm import tqdm +import matplotlib.pyplot as plt +import matplotlib as mpl +import glob + + + +class PW3D(torch.utils.data.Dataset): + def __init__(self, get_crowd): + self.get_crowd = get_crowd + self.data_split = 'validation' if self.get_crowd else 'test' # data_split + self.data_path = osp.join('..', 'data', 'PW3D', 'data') + + self.coco_joints_name = ( + 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', + 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle') # 17 + self.openpose_joints_name = ( + 'Nose', 'Neck', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', + 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'R_Eye', 'L_Eye', 'R_Ear', 'L_Ear') # 18 + # Neck??? + self.openpose_joints_name = ('Nose', 'Neck', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'R_Eye', 'L_Eye', 'R_Ear', 'L_Ear', 'Pelvis') + + self.smpl_joints_name = ('Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', 'Neck', 'L_Thorax', 'R_Thorax', 'Head', 'L_Shoulder', 'R_Shoulder', + 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand') + self.coco_skeleton = ( (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 6), (11, 12) ) + + self.datalist = self.load_data() + print("data len: ", len(self.datalist)) + + def load_data(self): + db = COCO(osp.join(self.data_path, '3DPW_latest_' + self.data_split + '.json')) + + if self.get_crowd: + with open(osp.join(self.data_path, f'3DPW_{self.data_split}_crowd_yolo_result.json')) as f: + yolo_bbox = json.load(f) + else: + with open(osp.join(self.data_path, '3DPW_test_yolo_result.json')) as f: + yolo_bbox = json.load(f) + + datalist = [] + aid_keys = sorted(yolo_bbox.keys(), key=lambda x: int(x)) if self.get_crowd else db.anns.keys() + for aid in aid_keys: + aid = int(aid) + ann = db.anns[aid] + image_id = ann['image_id'] + img = db.loadImgs(image_id)[0] + sequence_name = img['sequence'] + img_name = img['file_name'] + img_path = osp.join(self.data_path, 'imageFiles', sequence_name, img_name) + cam_param = {k: np.array(v, dtype=np.float32) for k, v in img['cam_param'].items()} + + openpose = np.array(ann['openpose_result'], dtype=np.float32).reshape(-1, 3) + openpose = transform_joint_to_other_db(openpose, self.openpose_joints_name, self.coco_joints_name) + + """ + # TEMP + centerpose = temp_result[str(aid)]['coco_joints'] + centerpose = np.array(centerpose).reshape(-1,2) + + tmpimg = cv2.imread(img_path) + oimg = vis_keypoints(tmpimg, openpose) + cv2.imshow('openpose', oimg/255) + cv2.waitKey(0) + cimg = vis_keypoints(tmpimg, centerpose) + cv2.imshow('centerpose', cimg / 255) + cv2.waitKey(0) + import pdb; pdb.set_trace() + """ + + smpl_joints = np.array(ann['joint_img']).reshape(-1,2) + smpl_joints = np.concatenate((smpl_joints, np.ones_like(smpl_joints[:, :1])), axis=1) + bbox = get_bbox(smpl_joints, np.ones_like(smpl_joints[:, 0]), extend_ratio=1.1) + bbox[2], bbox[3] = bbox[0] + bbox[2], bbox[1] + bbox[3] + + smplpose = transform_joint_to_other_db(smpl_joints, self.smpl_joints_name, self.coco_joints_name) + + img_name = sequence_name + '_' + img_name + data_dict = {'img_path': img_path, 'img_name': img_name, 'img_id': image_id, 'ann_id': aid, + 'img_shape': (img['height'], img['width']), + 'bbox': bbox, 'openpose': openpose, 'smplpose': smplpose} + + datalist.append(data_dict) + + return datalist + + def __len__(self): + return len(self.datalist) + + def __getitem__(self, idx): + pass + + def getitem(self, idx): + data = copy.deepcopy(self.datalist[idx]) + img_name, img_shape, img_id, aid = data['img_name'], data['img_shape'], data['img_id'], data['ann_id'] + + # for prediction matching + openpose, smplpose = data['openpose'], data['smplpose'] + + # img_path = data['img_path'] + # tmpimg = cv2.imread(img_path) + # oimg = vis_keypoints_with_skeleton(tmpimg, openpose.T, self.coco_skeleton) + # cv2.imshow('openpose', oimg/255) + # cv2.waitKey(0) + # simg = vis_keypoints_with_skeleton(tmpimg, smplpose.T, self.coco_skeleton) + # cv2.imshow('smplpose', simg / 255) + # cv2.waitKey(0) + # import pdb; pdb.set_trace() + + return data['img_path'], img_name, img_id, aid, openpose, smplpose + + +class PoseMatcher: + def __init__(self, dataloader): + self.dataloader = dataloader + result_path = '/home/hongsukchoi/projects/pytorch_Realtime_Multi-Person_Pose_Estimation' # '/home/redarknight/projects/HHRNet/output/3dpw/test' + self.candidates = self.load_2dpose_results(result_path) + + def run(self): + output_list = [] + for idx in range(len(self.dataloader)): + img_path, img_name, img_id, aid, openpose, smplpose = self.dataloader.getitem(idx) + candidates = self.candidates[img_name] + + output = {} + output['candidates'] = candidates + output['target'] = { + 'openpose': openpose, + 'smplpose': smplpose + } + output['meta'] = { + 'aid': aid, + 'img_id': img_id, + 'img_path': img_path + } + + output_list.append(output) + + output_list = filter_bbox(output_list) + + save_output(output_list) + + def load_2dpose_results(self, result_path): + result_jsons = glob.glob(f'{result_path}/*.json') + + hhrnet_results = {} + for rj in result_jsons: + with open(rj) as f: + pose_outputs = json.load(f) + + prefix = 'openpose_result_' # 'higher_hrnet_result_' + seq_name = rj.split(prefix)[-1][:-5] + for img_name in sorted(pose_outputs.keys()): + pose_candidates = pose_outputs[img_name] + try: + pose_candidates = np.asarray(pose_candidates, dtype=np.float32)[:,:,:3] + except IndexError: # when the result is empty + pose_candidates = [] + img_name = seq_name + '_' + img_name + + hhrnet_results[img_name] = pose_candidates + + return hhrnet_results + + +# open pose valid joint compare +def filter_bbox(output_list): + result = {} + for out in output_list: + candidates = out['candidates'] + openpose_from_dataset = out['target']['openpose'] + smplpose_from_dataset = out['target']['smplpose'] + aid = out['meta']['aid'] + img_id = out['meta']['img_id'] + img_path = out['meta']['img_path'] + + if len(candidates) == 0: + continue + + valid_openpose_joints = (openpose_from_dataset[:, 2] > 0.1) # eye has low scores, 17: [1,1,1,...0,0] + valid_smplpose_joints = (smplpose_from_dataset[:, 2] > 0.0) + ref_bbox = get_bbox(smplpose_from_dataset, valid_smplpose_joints, 1.0) + ref_err = min(ref_bbox[2], ref_bbox[3]) * (1/15) + + match_idx = 0 + err = ref_err # pixel + for idx in range(len(candidates)): + pred_pose = candidates[idx] + valid_pred_joints = (pred_pose[:, 2] > 0.1) + valid_idx = (valid_smplpose_joints * valid_pred_joints).nonzero()[0] + l1_err = np.abs(pred_pose[valid_idx, :2] - smplpose_from_dataset[valid_idx, :2]) + if l1_err.size == 0: + continue + + euc_dst = np.sqrt((l1_err**2).sum(axis=1)).mean() + + if euc_dst < err: + match_idx = idx + err = euc_dst + + if err == ref_err: + continue + """ + coco_skeleton = ((1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 6), (11, 12)) + tmpimg = cv2.imread(img_path) + oimg = vis_keypoints(tmpimg, openpose_from_dataset) #vis_keypoints_with_skeleton(tmpimg, openpose_from_dataset.T, coco_skeleton, kp_thresh=0.0) + cv2.imshow('openpose', oimg/255) + cv2.waitKey(0) + # cv2.destroyAllWindows() + # cv2.waitKey(1) + # simg = vis_keypoints(tmpimg, smplpose_from_dataset) #vis_keypoints_with_skeleton(tmpimg, smplpose_from_dataset.T, coco_skeleton, kp_thresh=0.0) + # cv2.imshow('smplpose', simg / 255) + # cv2.waitKey(0) + for idx in range(len(candidates)): + pimg = vis_keypoints(tmpimg, candidates[idx]) #vis_keypoints_with_skeleton(tmpimg, candidates[idx].T, coco_skeleton, kp_thresh=0.0) + cv2.imshow(f'crowdpose {idx}', pimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + import pdb; pdb.set_trace() + """ + + res = {} + res['coco_joints'] = candidates[match_idx].tolist() # 17 x2 + res['img_id'] = img_id + result[aid] = res + + print("Before filter: ", len(output_list), " After filter: ", len(result)) + + return result + + +def save_output(output): + save_file_name = f'3DPW_test_hhrnet_result.json' + print("Saving result to ", save_file_name) + with open(save_file_name, 'w') as f: + json.dump(output, f) + + +def bbox_iou(box1, box2): + """ + Returns the IoU of two bounding boxes + + + """ + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3] + + # get the corrdinates of the intersection rectangle + inter_rect_x1 = torch.max(b1_x1, b2_x1) + inter_rect_y1 = torch.max(b1_y1, b2_y1) + inter_rect_x2 = torch.min(b1_x2, b2_x2) + inter_rect_y2 = torch.min(b1_y2, b2_y2) + + # Intersection area + device = box1.device + inter_area = torch.max(inter_rect_x2 - inter_rect_x1 + 1, torch.zeros(inter_rect_x2.shape).to(device)) * torch.max( + inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_x2.shape).to(device)) + + # Union Area + b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1) + b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1) + + iou = inter_area / (b1_area + b2_area - inter_area) + + return iou + +def get_bbox(joint_img, joint_valid, extend_ratio=1.2): + x_img, y_img = joint_img[:, 0], joint_img[:, 1] + # x_img = x_img[joint_valid==1]; y_img = y_img[joint_valid==1]; + x_img = x_img[joint_valid > 0.01]; + y_img = y_img[joint_valid > 0.01]; + + xmin = min(x_img); + ymin = min(y_img); + xmax = max(x_img); + ymax = max(y_img); + + x_center = (xmin + xmax) / 2.; + width = xmax - xmin; + xmin = x_center - 0.5 * width * extend_ratio + xmax = x_center + 0.5 * width * extend_ratio + + y_center = (ymin + ymax) / 2.; + height = ymax - ymin; + ymin = y_center - 0.5 * height * extend_ratio + ymax = y_center + 0.5 * height * extend_ratio + + bbox = np.array([xmin, ymin, xmax - xmin, ymax - ymin]).astype(np.float32) + return bbox + +def transform_joint_to_other_db(src_joint, src_name, dst_name): + src_joint_num = len(src_name) + dst_joint_num = len(dst_name) + + new_joint = np.zeros(((dst_joint_num,) + src_joint.shape[1:]), dtype=np.float32) + for src_idx in range(len(src_name)): + name = src_name[src_idx] + if name in dst_name: + dst_idx = dst_name.index(name) + new_joint[dst_idx] = src_joint[src_idx] + + return new_joint + + +def vis_keypoints_with_skeleton(img, kps, kps_lines, kp_thresh=0.4, alpha=1, kps_scores=None): + # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv. + cmap = plt.get_cmap('rainbow') + colors = [cmap(i) for i in np.linspace(0, 1, len(kps_lines) + 2)] + colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors] + + # Perform the drawing on a copy of the image, to allow for blending. + kp_mask = np.copy(img) + + # Draw the keypoints. + for l in range(len(kps_lines)): + i1 = kps_lines[l][0] + i2 = kps_lines[l][1] + p1 = kps[0, i1].astype(np.int32), kps[1, i1].astype(np.int32) + p2 = kps[0, i2].astype(np.int32), kps[1, i2].astype(np.int32) + if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh: + cv2.line( + kp_mask, p1, p2, + color=colors[l], thickness=2, lineType=cv2.LINE_AA) + if kps[2, i1] > kp_thresh: + cv2.circle( + kp_mask, p1, + radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA) + if kps[2, i2] > kp_thresh: + cv2.circle( + kp_mask, p2, + radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA) + + if kps_scores is not None: + cv2.putText(kp_mask, str(kps_scores[i2, 0]), p2, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) + + # Blend the keypoints. + return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) + + +def vis_keypoints(img, kps, alpha=1): + # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv. + cmap = plt.get_cmap('rainbow') + colors = [cmap(i) for i in np.linspace(0, 1, len(kps) + 2)] + colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors] + + # Perform the drawing on a copy of the image, to allow for blending. + kp_mask = np.copy(img) + + # Draw the keypoints. + for i in range(len(kps)): + p = kps[i][0].astype(np.int32), kps[i][1].astype(np.int32) + cv2.circle(kp_mask, p, radius=3, color=colors[i], thickness=-1, lineType=cv2.LINE_AA) + cv2.putText(kp_mask, str(i), p, cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) + + # Blend the keypoints. + return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) + + +if __name__ == '__main__': + testset_loader = PW3D(get_crowd=False) + pose_matcher = PoseMatcher(testset_loader) + pose_matcher.run() \ No newline at end of file diff --git a/data_processing/tool/match_mupots_2dpose.py b/data_processing/tool/match_mupots_2dpose.py new file mode 100644 index 0000000..9219610 --- /dev/null +++ b/data_processing/tool/match_mupots_2dpose.py @@ -0,0 +1,339 @@ +import os.path as osp +import torch +import numpy as np +import copy +import cv2 +import json +import torchvision.transforms as transforms +import torch.nn as nn +from torch.utils.data import DataLoader +from pycocotools.coco import COCO +from tqdm import tqdm +import matplotlib.pyplot as plt +import matplotlib as mpl +import glob + + +class MuPoTs(torch.utils.data.Dataset): + def __init__(self): + self.data_split = 'test' + self.img_dir = osp.join('..', 'data', 'MuPoTs', 'data', 'MultiPersonTestSet') + self.test_annot_path = osp.join('..', 'data', 'MuPoTs', 'data', 'MuPoTS-3D.json') + + # MuPoTS + self.mupots_joint_num = 17 + self.mupots_joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head') # + self.mupots_flip_pairs = ((2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13)) + self.mupots_skeleton = ((0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (11, 12), (12, 13), (1, 2), (2, 3), (3, 4), (1, 5), (5, 6), (6, 7)) + + # MSCOCO + self.coco_joint_num = 17 + self.coco_joints_name = ('Head', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle') + self.coco_skeleton = ((1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 6), (11, 12)) + self.coco_flip_pairs = ((1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16)) + + self.openpose_joints_name = ('Nose', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'R_Eye', 'L_Eye', 'R_Ear', 'L_Ear') + + self.datalist = self.load_data() + print('mupots data len: ', len(self.datalist)) + + def load_data(self): + data = [] + db = COCO(self.test_annot_path) + + print("Get bounding box and root from groundtruth") + for aid in db.anns.keys(): + ann = db.anns[aid] + if ann['is_valid'] == 0: + continue + + image_id = ann['image_id'] + img = db.loadImgs(image_id)[0] + file_name = img['file_name'] + img_path = osp.join(self.img_dir, file_name) + img_name = '_'.join(file_name.split('/')) + + joint_img = np.array(ann['keypoints_img']) + joint_img = np.concatenate((joint_img, np.ones_like(joint_img[:, :1])), axis=1) + joint_img = transform_joint_to_other_db(joint_img, self.mupots_joints_name, self.openpose_joints_name) # self.coco_joints_name + + data.append({ + 'img_name': img_name, + 'img_path': img_path, + 'img_id': image_id, + 'ann_id': aid, + 'joint_img': joint_img + }) + + return data + + def __len__(self): + return len(self.datalist) + + def getitem(self, idx): + data = copy.deepcopy(self.datalist[idx]) + img_name, img_id, aid = data['img_name'], data['img_id'], data['ann_id'] + + # for prediction matching + gtpose = data['joint_img'] + + # img_path = data['img_path'] + # tmpimg = cv2.imread(img_path) + # newimg = vis_keypoints_with_skeleton(tmpimg, gtpose.T, self.coco_skeleton) + # cv2.imshow('img w gt pose', newimg/255) + # cv2.waitKey(0) + # import pdb; pdb.set_trace() + + return data['img_path'], img_name, img_id, aid, gtpose + + +class PoseMatcher: + def __init__(self, dataloader): + self.dataloader = dataloader + result_path = '/home/hongsukchoi/projects/pytorch_Realtime_Multi-Person_Pose_Estimation' # '/home/redarknight/projects/HHRNet' + self.candidates = self.load_2dpose_results(result_path) + + def run(self): + output_list = [] + for idx in range(len(self.dataloader)): + img_path, img_name, img_id, aid, gtpose = self.dataloader.getitem(idx) + candidates = self.candidates[img_name] + + output = {} + output['candidates'] = candidates + output['target'] = { + 'gtpose': gtpose + } + output['meta'] = { + 'aid': aid, + 'img_id': img_id, + 'img_path': img_path + } + + output_list.append(output) + + output_list = filter_bbox(output_list) + + save_output(output_list) + + def load_2dpose_results(self, result_path): + result_jsons = glob.glob(f'{result_path}/*.json') + + detector_results = {} + for rj in result_jsons: + with open(rj) as f: + pose_outputs = json.load(f) + + seq_name = rj.split('openpose_result_')[-1][:-5] + for img_name in sorted(pose_outputs.keys()): + pose_candidates = pose_outputs[img_name] + try: + pose_candidates = np.asarray(pose_candidates, dtype=np.float32)[:,:18,:3] + except IndexError: # when the result is empty + pose_candidates = [] + img_name = seq_name + '_' + img_name + + detector_results[img_name] = pose_candidates + + return detector_results + + +# open pose valid joint compare +def filter_bbox(output_list): + result = {} + d_count = 0 + for out in output_list: + candidates = out['candidates'] + gtpose_from_dataset = out['target']['gtpose'] + aid = out['meta']['aid'] + img_id = out['meta']['img_id'] + img_path = out['meta']['img_path'] + + if len(candidates) == 0: + continue + + valid_gtpose_joints = (gtpose_from_dataset[:, 2] > 0.0) + + match_idx = 0 + err = 60 # pixel + for idx in range(len(candidates)): + pred_pose = candidates[idx] + valid_pred_joints = (pred_pose[:, 2] > 0.1) + valid_idx = (valid_gtpose_joints * valid_pred_joints).nonzero()[0] + l1_err = np.abs(pred_pose[valid_idx, :2] - gtpose_from_dataset[valid_idx, :2]) + # import pdb; pdb.set_trace() + + euc_dst = np.sqrt((l1_err**2).sum(axis=1)).mean() + + if euc_dst < err: + match_idx = idx + err = euc_dst + + if err == 60: + # continue + + candidates[match_idx] = np.zeros_like(candidates[match_idx]) + d_count += 1 + """ + coco_skeleton = ((1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 6), (11, 12)) + tmpimg = cv2.imread(img_path) + gimg = vis_keypoints(tmpimg, gtpose_from_dataset) #vis_keypoints_with_skeleton(tmpimg, openpose_from_dataset.T, coco_skeleton, kp_thresh=0.0) + cv2.imshow('gtpose', gimg/255) + cv2.waitKey(0) + # cv2.destroyAllWindows() + # cv2.waitKey(1) + # simg = vis_keypoints(tmpimg, smplpose_from_dataset) #vis_keypoints_with_skeleton(tmpimg, smplpose_from_dataset.T, coco_skeleton, kp_thresh=0.0) + # cv2.imshow('smplpose', simg / 255) + # cv2.waitKey(0) + print("Match idx: ", match_idx) + for idx in range(len(candidates)): + pimg = vis_keypoints(tmpimg, candidates[idx]) #vis_keypoints_with_skeleton(tmpimg, candidates[idx].T, coco_skeleton, kp_thresh=0.0) + cv2.imshow(f'hhrnetpose {idx}', pimg) + cv2.waitKey(0) + cv2.destroyAllWindows() + cv2.waitKey(1) + import pdb; pdb.set_trace() + """ + + res = {} + res['coco_joints'] = candidates[match_idx].tolist() # 17 x2 + res['img_id'] = img_id + result[aid] = res + + print(f"{d_count} dummy out of {len(output_list)}") + + return result + + +def save_output(output): + save_file_name = f'MuPoTs_test_openpose_result.json' + print("Saving result to ", save_file_name) + with open(save_file_name, 'w') as f: + json.dump(output, f) + + +def bbox_iou(box1, box2): + """ + Returns the IoU of two bounding boxes + + + """ + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3] + + # get the corrdinates of the intersection rectangle + inter_rect_x1 = torch.max(b1_x1, b2_x1) + inter_rect_y1 = torch.max(b1_y1, b2_y1) + inter_rect_x2 = torch.min(b1_x2, b2_x2) + inter_rect_y2 = torch.min(b1_y2, b2_y2) + + # Intersection area + device = box1.device + inter_area = torch.max(inter_rect_x2 - inter_rect_x1 + 1, torch.zeros(inter_rect_x2.shape).to(device)) * torch.max( + inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_x2.shape).to(device)) + + # Union Area + b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1) + b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1) + + iou = inter_area / (b1_area + b2_area - inter_area) + + return iou + +def get_bbox(joint_img, joint_valid, extend_ratio=1.2): + x_img, y_img = joint_img[:, 0], joint_img[:, 1] + # x_img = x_img[joint_valid==1]; y_img = y_img[joint_valid==1]; + x_img = x_img[joint_valid > 0.01]; + y_img = y_img[joint_valid > 0.01]; + + xmin = min(x_img); + ymin = min(y_img); + xmax = max(x_img); + ymax = max(y_img); + + x_center = (xmin + xmax) / 2.; + width = xmax - xmin; + xmin = x_center - 0.5 * width * extend_ratio + xmax = x_center + 0.5 * width * extend_ratio + + y_center = (ymin + ymax) / 2.; + height = ymax - ymin; + ymin = y_center - 0.5 * height * extend_ratio + ymax = y_center + 0.5 * height * extend_ratio + + bbox = np.array([xmin, ymin, xmax - xmin, ymax - ymin]).astype(np.float32) + return bbox + +def transform_joint_to_other_db(src_joint, src_name, dst_name): + src_joint_num = len(src_name) + dst_joint_num = len(dst_name) + + new_joint = np.zeros(((dst_joint_num,) + src_joint.shape[1:]), dtype=np.float32) + for src_idx in range(len(src_name)): + name = src_name[src_idx] + if name in dst_name: + dst_idx = dst_name.index(name) + new_joint[dst_idx] = src_joint[src_idx] + + return new_joint + + +def vis_keypoints_with_skeleton(img, kps, kps_lines, kp_thresh=0.4, alpha=1, kps_scores=None): + # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv. + cmap = plt.get_cmap('rainbow') + colors = [cmap(i) for i in np.linspace(0, 1, len(kps_lines) + 2)] + colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors] + + # Perform the drawing on a copy of the image, to allow for blending. + kp_mask = np.copy(img) + + # Draw the keypoints. + for l in range(len(kps_lines)): + i1 = kps_lines[l][0] + i2 = kps_lines[l][1] + p1 = kps[0, i1].astype(np.int32), kps[1, i1].astype(np.int32) + p2 = kps[0, i2].astype(np.int32), kps[1, i2].astype(np.int32) + if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh: + cv2.line( + kp_mask, p1, p2, + color=colors[l], thickness=2, lineType=cv2.LINE_AA) + if kps[2, i1] > kp_thresh: + cv2.circle( + kp_mask, p1, + radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA) + if kps[2, i2] > kp_thresh: + cv2.circle( + kp_mask, p2, + radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA) + + if kps_scores is not None: + cv2.putText(kp_mask, str(kps_scores[i2, 0]), p2, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) + + # Blend the keypoints. + return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) + + +def vis_keypoints(img, kps, alpha=1): + # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv. + cmap = plt.get_cmap('rainbow') + colors = [cmap(i) for i in np.linspace(0, 1, len(kps) + 2)] + colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors] + + # Perform the drawing on a copy of the image, to allow for blending. + kp_mask = np.copy(img) + + # Draw the keypoints. + for i in range(len(kps)): + p = kps[i][0].astype(np.int32), kps[i][1].astype(np.int32) + cv2.circle(kp_mask, p, radius=3, color=colors[i], thickness=-1, lineType=cv2.LINE_AA) + cv2.putText(kp_mask, str(i), p, cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) + + # Blend the keypoints. + return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) + + +if __name__ == '__main__': + testset_loader = MuPoTs() + pose_matcher = PoseMatcher(testset_loader) + pose_matcher.run() \ No newline at end of file diff --git a/data_processing/yolov5_crowdhuman/.dockerignore b/data_processing/yolov5_crowdhuman/.dockerignore new file mode 100644 index 0000000..3c6b6ab --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.dockerignore @@ -0,0 +1,216 @@ +# Repo-specific DockerIgnore ------------------------------------------------------------------------------------------- +#.git +.cache +.idea +runs +output +coco +storage.googleapis.com + +data/samples/* +**/results*.txt +*.jpg + +# Neural Network weights ----------------------------------------------------------------------------------------------- +**/*.weights +**/*.pt +**/*.pth +**/*.onnx +**/*.mlmodel +**/*.torchscript + + +# Below Copied From .gitignore ----------------------------------------------------------------------------------------- +# Below Copied From .gitignore ----------------------------------------------------------------------------------------- + + +# GitHub Python GitIgnore ---------------------------------------------------------------------------------------------- +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +wandb/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# dotenv +.env + +# virtualenv +.venv* +venv*/ +ENV*/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + + +# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore ----------------------------------------------- + +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon +Icon? + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + + +# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea/* +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/dictionaries +.html # Bokeh Plots +.pg # TensorFlow Frozen Graphs +.avi # videos + +# Sensitive or high-churn files: +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml + +# Gradle: +.idea/**/gradle.xml +.idea/**/libraries + +# CMake +cmake-build-debug/ +cmake-build-release/ + +# Mongo Explorer plugin: +.idea/**/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties diff --git a/data_processing/yolov5_crowdhuman/.gitattributes b/data_processing/yolov5_crowdhuman/.gitattributes new file mode 100644 index 0000000..dad4239 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.gitattributes @@ -0,0 +1,2 @@ +# this drop notebooks from GitHub language stats +*.ipynb linguist-vendored diff --git a/data_processing/yolov5_crowdhuman/.github/ISSUE_TEMPLATE/bug-report.md b/data_processing/yolov5_crowdhuman/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000..362059b --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,55 @@ +--- +name: "🐛 Bug report" +about: Create a report to help us improve +title: '' +labels: bug +assignees: '' + +--- + +Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, otherwise it is non-actionable, and we can not help you: + - **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo + - **Common dataset**: coco.yaml or coco128.yaml + - **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments + +If this is a custom dataset/training question you **must include** your `train*.jpg`, `test*.jpg` and `results.png` figures, or we can not help you. You can generate these with `utils.plot_results()`. + + +## 🐛 Bug +A clear and concise description of what the bug is. + + +## To Reproduce (REQUIRED) + +Input: +``` +import torch + +a = torch.tensor([5]) +c = a / 0 +``` + +Output: +``` +Traceback (most recent call last): + File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code + exec(code_obj, self.user_global_ns, self.user_ns) + File "", line 5, in + c = a / 0 +RuntimeError: ZeroDivisionError +``` + + +## Expected behavior +A clear and concise description of what you expected to happen. + + +## Environment +If applicable, add screenshots to help explain your problem. + + - OS: [e.g. Ubuntu] + - GPU [e.g. 2080 Ti] + + +## Additional context +Add any other context about the problem here. diff --git a/data_processing/yolov5_crowdhuman/.github/ISSUE_TEMPLATE/feature-request.md b/data_processing/yolov5_crowdhuman/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000..87db3ea --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,27 @@ +--- +name: "🚀 Feature request" +about: Suggest an idea for this project +title: '' +labels: enhancement +assignees: '' + +--- + +## 🚀 Feature + + +## Motivation + + + +## Pitch + + + +## Alternatives + + + +## Additional context + + diff --git a/data_processing/yolov5_crowdhuman/.github/ISSUE_TEMPLATE/question.md b/data_processing/yolov5_crowdhuman/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 0000000..2c22aea --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,13 @@ +--- +name: "❓Question" +about: Ask a general question +title: '' +labels: question +assignees: '' + +--- + +## ❔Question + + +## Additional context diff --git a/data_processing/yolov5_crowdhuman/.github/dependabot.yml b/data_processing/yolov5_crowdhuman/.github/dependabot.yml new file mode 100644 index 0000000..9910689 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.github/dependabot.yml @@ -0,0 +1,12 @@ +version: 2 +updates: +- package-ecosystem: pip + directory: "/" + schedule: + interval: weekly + time: "04:00" + open-pull-requests-limit: 10 + reviewers: + - glenn-jocher + labels: + - dependencies diff --git a/data_processing/yolov5_crowdhuman/.github/workflows/ci-testing.yml b/data_processing/yolov5_crowdhuman/.github/workflows/ci-testing.yml new file mode 100644 index 0000000..df50847 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.github/workflows/ci-testing.yml @@ -0,0 +1,80 @@ +name: CI CPU testing + +on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows + push: + branches: [ master ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ master ] + schedule: + - cron: '0 0 * * *' # Runs at 00:00 UTC every day + +jobs: + cpu-tests: + + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: [3.8] + model: ['yolov5s'] # models to test + + # Timeout: https://stackoverflow.com/a/59076067/4521646 + timeout-minutes: 50 + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + # Note: This uses an internal pip API and may not always work + # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow + - name: Get pip cache + id: pip-cache + run: | + python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" + + - name: Cache pip + uses: actions/cache@v1 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} + restore-keys: | + ${{ runner.os }}-${{ matrix.python-version }}-pip- + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html + pip install -q onnx + python --version + pip --version + pip list + shell: bash + + - name: Download data + run: | + # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip + # unzip -q tmp.zip -d ../ + # rm tmp.zip + + - name: Tests workflow + run: | + # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories + di=cpu # inference devices # define device + + # train + python train.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di + # detect + python detect.py --weights weights/${{ matrix.model }}.pt --device $di + python detect.py --weights runs/train/exp/weights/last.pt --device $di + # test + python test.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --device $di + python test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di + + python hubconf.py # hub + python models/yolo.py --cfg models/${{ matrix.model }}.yaml # inspect + python models/export.py --img 128 --batch 1 --weights weights/${{ matrix.model }}.pt # export + shell: bash diff --git a/data_processing/yolov5_crowdhuman/.github/workflows/codeql-analysis.yml b/data_processing/yolov5_crowdhuman/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000..1f07888 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.github/workflows/codeql-analysis.yml @@ -0,0 +1,54 @@ +# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities. +# https://github.com/github/codeql-action + +name: "CodeQL" + +on: + schedule: + - cron: '0 0 1 * *' # Runs at 00:00 UTC on the 1st of every month + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + language: [ 'python' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] + # Learn more: + # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/data_processing/yolov5_crowdhuman/.github/workflows/greetings.yml b/data_processing/yolov5_crowdhuman/.github/workflows/greetings.yml new file mode 100644 index 0000000..ee47229 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.github/workflows/greetings.yml @@ -0,0 +1,56 @@ +name: Greetings + +on: [pull_request_target, issues] + +jobs: + greeting: + runs-on: ubuntu-latest + steps: + - uses: actions/first-interaction@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + pr-message: | + 👋 Hello @${{ github.actor }}, thank you for submitting a 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to: + - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch: + ```bash + git remote add upstream https://github.com/ultralytics/yolov5.git + git fetch upstream + git checkout feature # <----- replace 'feature' with local branch name + git rebase upstream/master + git push -u origin -f + ``` + - ✅ Verify all Continuous Integration (CI) **checks are passing**. + - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee + + issue-message: | + 👋 Hello @${{ github.actor }}, thank you for your interest in 🚀 YOLOv5! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). + + If this is a 🐛 Bug Report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. + + If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available. + + For business inquiries or professional support requests please visit https://www.ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. + + ## Requirements + + Python 3.8 or later with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed, including `torch>=1.7`. To install run: + ```bash + $ pip install -r requirements.txt + ``` + + ## Environments + + YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + + - **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle + - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) + - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) + - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + + ## Status + + ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) + + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + diff --git a/data_processing/yolov5_crowdhuman/.github/workflows/rebase.yml b/data_processing/yolov5_crowdhuman/.github/workflows/rebase.yml new file mode 100644 index 0000000..e86c577 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.github/workflows/rebase.yml @@ -0,0 +1,21 @@ +name: Automatic Rebase +# https://github.com/marketplace/actions/automatic-rebase + +on: + issue_comment: + types: [created] + +jobs: + rebase: + name: Rebase + if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase') + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Automatic Rebase + uses: cirrus-actions/rebase@1.3.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/data_processing/yolov5_crowdhuman/.github/workflows/stale.yml b/data_processing/yolov5_crowdhuman/.github/workflows/stale.yml new file mode 100644 index 0000000..0a094e2 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.github/workflows/stale.yml @@ -0,0 +1,18 @@ +name: Close stale issues +on: + schedule: + - cron: "0 0 * * *" + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' + stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' + days-before-stale: 30 + days-before-close: 5 + exempt-issue-labels: 'documentation,tutorial' + operations-per-run: 100 # The maximum number of operations per run, used to control rate limiting. diff --git a/data_processing/yolov5_crowdhuman/.gitignore b/data_processing/yolov5_crowdhuman/.gitignore new file mode 100644 index 0000000..91ce33f --- /dev/null +++ b/data_processing/yolov5_crowdhuman/.gitignore @@ -0,0 +1,252 @@ +# Repo-specific GitIgnore ---------------------------------------------------------------------------------------------- +*.jpg +*.jpeg +*.png +*.bmp +*.tif +*.tiff +*.heic +*.JPG +*.JPEG +*.PNG +*.BMP +*.TIF +*.TIFF +*.HEIC +*.mp4 +*.mov +*.MOV +*.avi +*.data +*.json + +*.cfg +!cfg/yolov3*.cfg + +storage.googleapis.com +runs/* +data/* +!data/images/zidane.jpg +!data/images/bus.jpg +!data/coco.names +!data/coco_paper.names +!data/coco.data +!data/coco_*.data +!data/coco_*.txt +!data/trainvalno5k.shapes +!data/*.sh + +pycocotools/* +results*.txt +gcp_test*.sh + +# Datasets ------------------------------------------------------------------------------------------------------------- +coco/ +coco128/ +VOC/ + +# MATLAB GitIgnore ----------------------------------------------------------------------------------------------------- +*.m~ +*.mat +!targets*.mat + +# Neural Network weights ----------------------------------------------------------------------------------------------- +*.weights +*.pt +*.onnx +*.mlmodel +*.torchscript +darknet53.conv.74 +yolov3-tiny.conv.15 + +# GitHub Python GitIgnore ---------------------------------------------------------------------------------------------- +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +wandb/ +.installed.cfg +*.egg + + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# dotenv +.env + +# virtualenv +.venv* +venv*/ +ENV*/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + + +# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore ----------------------------------------------- + +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon +Icon? + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + + +# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea/* +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/dictionaries +.html # Bokeh Plots +.pg # TensorFlow Frozen Graphs +.avi # videos + +# Sensitive or high-churn files: +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml + +# Gradle: +.idea/**/gradle.xml +.idea/**/libraries + +# CMake +cmake-build-debug/ +cmake-build-release/ + +# Mongo Explorer plugin: +.idea/**/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties diff --git a/data_processing/yolov5_crowdhuman/Dockerfile b/data_processing/yolov5_crowdhuman/Dockerfile new file mode 100644 index 0000000..af8f7b4 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/Dockerfile @@ -0,0 +1,56 @@ +# Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch +FROM nvcr.io/nvidia/pytorch:20.12-py3 + +# Install linux packages +RUN apt update && apt install -y zip screen libgl1-mesa-glx + +RUN apt-get install vim + +# Install python dependencies +RUN python -m pip install --upgrade pip +COPY requirements.txt . +RUN pip install -r requirements.txt gsutil + +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +COPY . /usr/src/app + +# Copy weights +#RUN python3 -c "from models import *; \ +#attempt_download('weights/yolov5s.pt'); \ +#attempt_download('weights/yolov5m.pt'); \ +#attempt_download('weights/yolov5l.pt')" + + +# --------------------------------------------------- Extras Below --------------------------------------------------- + +# Build and Push +# t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t +# for v in {300..303}; do t=ultralytics/coco:v$v && sudo docker build -t $t . && sudo docker push $t; done + +# Pull and Run +# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t + +# Pull and Run with local directory access +# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/coco:/usr/src/coco $t + +# Kill all +# sudo docker kill $(sudo docker ps -q) + +# Kill all image-based +# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) + +# Bash into running container +# sudo docker exec -it 5a9b5863d93d bash + +# Bash into stopped container +# id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash + +# Send weights to GCP +# python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt + +# Clean up +# docker system prune -a --volumes diff --git a/data_processing/yolov5_crowdhuman/LICENSE b/data_processing/yolov5_crowdhuman/LICENSE new file mode 100644 index 0000000..9e419e0 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/LICENSE @@ -0,0 +1,674 @@ +GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/data_processing/yolov5_crowdhuman/README.md b/data_processing/yolov5_crowdhuman/README.md new file mode 100644 index 0000000..934e298 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/README.md @@ -0,0 +1,36 @@ +## Head & Person Detection Model + +### Download model trained on crowd human using yolov5(m) architeture +Download Link: [YOLOv5m-crowd-human](https://drive.google.com/file/d/1gglIwqxaH2iTvy6lZlXuAcMpd_U0GCUb/view?usp=sharing) + + +
+ +**Output (Crowd Human Model)** + +![image](https://drive.google.com/uc?export=view&id=1ZOhDBRXj-Ra0vPL7iG6lrxCWAFhJTAti) + +
+ + + +## Test + +```bash +$ python detect.py --weights crowdhuman_yolov5m.pt --source _test/ --view-img + +``` + + +## Test (Only Person Class) + +```bash +python3 detect.py --weights crowdhuman_yolov5m.pt --source _test/ --view-img --person +``` + + +## Test (Only Heads) + +```bash +python3 detect.py --weights crowdhuman_yolov5m.pt --source _test/ --view-img --heads +``` diff --git a/data_processing/yolov5_crowdhuman/detect.py b/data_processing/yolov5_crowdhuman/detect.py new file mode 100644 index 0000000..df8af24 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/detect.py @@ -0,0 +1,183 @@ +import argparse +import time +from pathlib import Path + +import cv2 +import torch +import torch.backends.cudnn as cudnn +from numpy import random + +from models.experimental import attempt_load +from utils.datasets import LoadStreams, LoadImages +from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ + scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path +from utils.plots import plot_one_box +from utils.torch_utils import select_device, load_classifier, time_synchronized + + +def detect(save_img=False): + source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size + webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( + ('rtsp://', 'rtmp://', 'http://')) + + # Directories + save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Initialize + set_logging() + device = select_device(opt.device) + half = device.type != 'cpu' # half precision only supported on CUDA + + # Load model + model = attempt_load(weights, map_location=device) # load FP32 model + stride = int(model.stride.max()) # model stride + imgsz = check_img_size(imgsz, s=stride) # check img_size + if half: + model.half() # to FP16 + + # Second-stage classifier + classify = False + if classify: + modelc = load_classifier(name='resnet101', n=2) # initialize + modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() + + # Set Dataloader + vid_path, vid_writer = None, None + if webcam: + view_img = check_imshow() + cudnn.benchmark = True # set True to speed up constant image size inference + dataset = LoadStreams(source, img_size=imgsz, stride=stride) + else: + save_img = True + dataset = LoadImages(source, img_size=imgsz, stride=stride) + + # Get names and colors + names = model.module.names if hasattr(model, 'module') else model.names + colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] + + # Run inference + if device.type != 'cpu': + model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + t0 = time.time() + for path, img, im0s, vid_cap in dataset: + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + # Inference + t1 = time_synchronized() + pred = model(img, augment=opt.augment)[0] + + # Apply NMS + pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) + t2 = time_synchronized() + + # Apply Classifier + if classify: + pred = apply_classifier(pred, modelc, img, im0s) + + # Process detections + for i, det in enumerate(pred): # detections per image + if webcam: # batch_size >= 1 + p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count + else: + p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # img.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt + s += '%gx%g ' % img.shape[2:] # print string + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() + + # Print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Write results + for *xyxy, conf, cls in reversed(det): + if save_txt: # Write to file + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format + with open(txt_path + '.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or view_img: # Add bbox to image + label = f'{names[int(cls)]} {conf:.2f}' + if opt.heads or opt.person: + if 'head' in label and opt.heads: + plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) + if 'person' in label and opt.person: + plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) + else: + plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) + + # Print time (inference + NMS) + print(f'{s}Done. ({t2 - t1:.3f}s)') + + # Stream results + if view_img: + cv2.imshow(str(p), im0) + cv2.waitKey(0) # 1 millisecond + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' + if vid_path != save_path: # new video + vid_path = save_path + if isinstance(vid_writer, cv2.VideoWriter): + vid_writer.release() # release previous video writer + + fourcc = 'mp4v' # output video codec + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)) + vid_writer.write(im0) + + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + print(f"Results saved to {save_dir}{s}") + + print(f'Done. ({time.time() - t0:.3f}s)') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam + parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='display results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default='runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--person', action='store_true', help='displays only person') + parser.add_argument('--heads', action='store_true', help='displays only person') + opt = parser.parse_args() + print(opt) + #check_requirements() + + with torch.no_grad(): + if opt.update: # update all models (to fix SourceChangeWarning) + for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: + detect() + strip_optimizer(opt.weights) + else: + detect() diff --git a/data_processing/yolov5_crowdhuman/detect_head_bbox.py b/data_processing/yolov5_crowdhuman/detect_head_bbox.py new file mode 100644 index 0000000..77971b0 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/detect_head_bbox.py @@ -0,0 +1,294 @@ +import argparse +import os.path +import time +from pathlib import Path + +import cv2 +import torch +import torch.backends.cudnn as cudnn +from numpy import random + +from models.experimental import attempt_load +from utils.datasets import LoadStreams, LoadImages +from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ + scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path +from utils.plots import plot_one_box +from utils.torch_utils import select_device, load_classifier, time_synchronized +import json +import numpy as np + +def detect(save_img=False): + source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size + + skeleton_path = os.path.join(opt.source,'2d_pose_result_hrnet.json') + source = os.path.join(opt.source,'images') + + with open(skeleton_path) as f: + pose2d_result = json.load(f) + + webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( + ('rtsp://', 'rtmp://', 'http://')) + + # Directories + save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Initialize + set_logging() + device = select_device(opt.device) + half = device.type != 'cpu' # half precision only supported on CUDA + + # Load model + model = attempt_load(weights, map_location=device) # load FP32 model + stride = int(model.stride.max()) # model stride + imgsz = check_img_size(imgsz, s=stride) # check img_size + if half: + model.half() # to FP16 + + # Second-stage classifier + classify = False + if classify: + modelc = load_classifier(name='resnet101', n=2) # initialize + modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() + + bbox_results = {} + result_json_path = os.path.join(opt.source, 'head_bbox_yolov5_crowdhuman.json') + print('result_json_path', result_json_path) + + if os.path.exists(result_json_path): + with open(result_json_path) as f: + bbox_results = json.load(f) + + + # Set Dataloader + vid_path, vid_writer = None, None + if webcam: + view_img = check_imshow() + cudnn.benchmark = True # set True to speed up constant image size inference + dataset = LoadStreams(source, img_size=imgsz, stride=stride) + else: + save_img = True + dataset = LoadImages(source,bbox_results, img_size=imgsz, stride=stride) + + # Get names and colors + names = model.module.names if hasattr(model, 'module') else model.names + colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] + + # Run inference + if device.type != 'cpu': + model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + t0 = time.time() + + + + + + for path, img, im0s, vid_cap in dataset: + img_name = os.path.basename(path) + if img_name in bbox_results: + continue + + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + + coco_joint_list = pose2d_result[img_name] + bbox_list_wo_sort = [] + + + # Inference + t1 = time_synchronized() + pred = model(img, augment=opt.augment)[0] + + # Apply NMS + pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) + t2 = time_synchronized() + + # Apply Classifier + if classify: + pred = apply_classifier(pred, modelc, img, im0s) + + # Process detections + for i, det in enumerate(pred): # detections per image + if webcam: # batch_size >= 1 + p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count + else: + p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # img.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt + s += '%gx%g ' % img.shape[2:] # print string + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() + + # Print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Write results + for *xyxy, conf, cls in reversed(det): + if save_txt: # Write to file + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format + with open(txt_path + '.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + label = f'{names[int(cls)]} {conf:.2f}' + if 'head' in label: + bbox = [float(xyxy[0]), float(xyxy[1]), float(xyxy[2]-xyxy[0]), float(xyxy[3]-xyxy[1])] # x, y, w, h + #print(im0.shape) + bbox_list_wo_sort.append(bbox) + + + + + + if save_img or view_img: # Add bbox to image + label = f'{names[int(cls)]} {conf:.2f}' + if opt.heads or opt.person: + if 'head' in label and opt.heads: + plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) + if 'person' in label and opt.person: + plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) + else: + plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) + + # Print time (inference + NMS) + print(f'{s}Done. ({t2 - t1:.3f}s)') + + # Stream results + if view_img: + print('resize to',(512, int(im0.shape[0]/im0.shape[1]*512))) + cv2.imshow(str(p), cv2.resize(im0, (512, int(im0.shape[0]/im0.shape[1]*512)))) + cv2.waitKey(0) # 1 millisecond + + # Save results (image with detections) + # if save_img: + # if dataset.mode == 'image': + # cv2.imwrite(save_path, im0) + # else: # 'video' + # if vid_path != save_path: # new video + # vid_path = save_path + # if isinstance(vid_writer, cv2.VideoWriter): + # vid_writer.release() # release previous video writer + # + # fourcc = 'mp4v' # output video codec + # fps = vid_cap.get(cv2.CAP_PROP_FPS) + # w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + # h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + # vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)) + # vid_writer.write(im0) + + # sort bbox + bbox_list_sort = [] + + for idx in range(len(coco_joint_list)): + coco_joint_img = np.asarray(coco_joint_list[idx])[:, :3] + + face_points = coco_joint_img[:5, :3] + face_center = np.mean(face_points[:,:2], axis=0, keepdims=True) + #print('face_points', face_points) + # + clip_tresh = 0.5 + face_points_valid = face_points[face_points[:,2] > clip_tresh] + face_center_valid = np.mean(face_points_valid[:,:2], axis=0, keepdims=True) + #print('face_points_valid', face_points_valid) + # if valid face num >=1, match bbox to coco joint + if face_points_valid.shape[0] >= 1: + for bbox in bbox_list_wo_sort: + relax = 0.1 + relaxed_bbox = [bbox[0] - bbox[2] * relax, bbox[1] - bbox[3] * relax, bbox[2] * (1 + 2 * relax), + bbox[3] * (1 + 2 * relax)] + check = True + for points_idx in range(face_points.shape[0]): + if not (relaxed_bbox[0] <= face_points[points_idx][0] <= relaxed_bbox[0] + relaxed_bbox[2] and + relaxed_bbox[1] <= face_points[points_idx][1] <= relaxed_bbox[1] + relaxed_bbox[3]): + check = False + break + if check: + bbox_list_sort.append({'bbox':bbox,'score':1.0}) + break + else: + # if no valid face, use face center to match bbox (nearest ) + min_dist = 1e8 + min_bbox = None + for bbox in bbox_list_wo_sort: + bbox_c = [bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2] + if np.linalg.norm(bbox_c - face_center) < min_dist: + min_dist = np.linalg.norm(bbox_c - face_center) + min_bbox = bbox + + if min_bbox is not None: + bbox_list_sort.append({'bbox':min_bbox,'score':1.0}) + + + + + # no bbox detec, use coco joint to generate bbox + if len(bbox_list_sort) != idx+1: + # face_points_valid = face_points[face_points[:, 2] > clip_tresh] + # face_center_valid = np.mean(face_points_valid, axis=0, keepdims=True) + + if face_points_valid.shape[0] < 2: + bbox_list_sort.append({'bbox':[],'score':0.0}) + continue + + head_stride = max(np.max(face_points[:, 0]) - np.min(face_points[:, 0]), + np.max(face_points[:, 1]) - np.min(face_points[:, 1])) * 1.3 + temp_bbox = [face_center[0][0]-head_stride/2, face_center[0][1]-head_stride/2, head_stride, head_stride] + bbox_list_sort.append({'bbox':temp_bbox,'score':0.0}) + + if len(bbox_list_sort) != len(coco_joint_list): + raise ValueError('bbox_list_sort and coco_joint_list have different length') + + bbox_results[img_name] = bbox_list_sort + # save bbox + with open(result_json_path, 'w') as f: + json.dump(bbox_results, f) + + + # if save_txt or save_img: + # s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + # print(f"Results saved to {save_dir}{s}") + + print(f'Done. ({time.time() - t0:.3f}s)') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam + parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='display results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default='runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--person', action='store_true', help='displays only person') + parser.add_argument('--heads', action='store_true', help='displays only person') + opt = parser.parse_args() + print(opt) + #check_requirements() + + with torch.no_grad(): + if opt.update: # update all models (to fix SourceChangeWarning) + for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: + detect() + strip_optimizer(opt.weights) + else: + detect() diff --git a/data_processing/yolov5_crowdhuman/hubconf.py b/data_processing/yolov5_crowdhuman/hubconf.py new file mode 100644 index 0000000..47eee44 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/hubconf.py @@ -0,0 +1,146 @@ +"""File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/ + +Usage: + import torch + model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80) +""" + +from pathlib import Path + +import torch + +from models.yolo import Model +from utils.general import set_logging +from utils.google_utils import attempt_download + +dependencies = ['torch', 'yaml'] +set_logging() + + +def create(name, pretrained, channels, classes, autoshape): + """Creates a specified YOLOv5 model + + Arguments: + name (str): name of model, i.e. 'yolov5s' + pretrained (bool): load pretrained weights into the model + channels (int): number of input channels + classes (int): number of model classes + + Returns: + pytorch model + """ + config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path + try: + model = Model(config, channels, classes) + if pretrained: + fname = f'{name}.pt' # checkpoint filename + attempt_download(fname) # download if not found locally + ckpt = torch.load(fname, map_location=torch.device('cpu')) # load + state_dict = ckpt['model'].float().state_dict() # to FP32 + state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter + model.load_state_dict(state_dict, strict=False) # load + if len(ckpt['model'].names) == classes: + model.names = ckpt['model'].names # set class names attribute + if autoshape: + model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS + return model + + except Exception as e: + help_url = 'https://github.com/ultralytics/yolov5/issues/36' + s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url + raise Exception(s) from e + + +def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True): + """YOLOv5-small model from https://github.com/ultralytics/yolov5 + + Arguments: + pretrained (bool): load pretrained weights into the model, default=False + channels (int): number of input channels, default=3 + classes (int): number of model classes, default=80 + + Returns: + pytorch model + """ + return create('yolov5s', pretrained, channels, classes, autoshape) + + +def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True): + """YOLOv5-medium model from https://github.com/ultralytics/yolov5 + + Arguments: + pretrained (bool): load pretrained weights into the model, default=False + channels (int): number of input channels, default=3 + classes (int): number of model classes, default=80 + + Returns: + pytorch model + """ + return create('yolov5m', pretrained, channels, classes, autoshape) + + +def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True): + """YOLOv5-large model from https://github.com/ultralytics/yolov5 + + Arguments: + pretrained (bool): load pretrained weights into the model, default=False + channels (int): number of input channels, default=3 + classes (int): number of model classes, default=80 + + Returns: + pytorch model + """ + return create('yolov5l', pretrained, channels, classes, autoshape) + + +def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True): + """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5 + + Arguments: + pretrained (bool): load pretrained weights into the model, default=False + channels (int): number of input channels, default=3 + classes (int): number of model classes, default=80 + + Returns: + pytorch model + """ + return create('yolov5x', pretrained, channels, classes, autoshape) + + +def custom(path_or_model='path/to/model.pt', autoshape=True): + """YOLOv5-custom model from https://github.com/ultralytics/yolov5 + + Arguments (3 options): + path_or_model (str): 'path/to/model.pt' + path_or_model (dict): torch.load('path/to/model.pt') + path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] + + Returns: + pytorch model + """ + model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint + if isinstance(model, dict): + model = model['model'] # load model + + hub_model = Model(model.yaml).to(next(model.parameters()).device) # create + hub_model.load_state_dict(model.float().state_dict()) # load state_dict + hub_model.names = model.names # class names + return hub_model.autoshape() if autoshape else hub_model + + +if __name__ == '__main__': + model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example + # model = custom(path_or_model='path/to/model.pt') # custom example + + # Verify inference + import numpy as np + from PIL import Image + + imgs = [Image.open('data/images/bus.jpg'), # PIL + 'data/images/zidane.jpg', # filename + 'https://github.com/ultralytics/yolov5/raw/master/data/images/bus.jpg', # URI + np.zeros((640, 480, 3))] # numpy + + results = model(imgs) # batched inference + results.print() + results.save() diff --git a/data_processing/yolov5_crowdhuman/models/__init__.py b/data_processing/yolov5_crowdhuman/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/yolov5_crowdhuman/models/common.py b/data_processing/yolov5_crowdhuman/models/common.py new file mode 100644 index 0000000..ad35f90 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/common.py @@ -0,0 +1,308 @@ +# This file contains modules common to various models + +import math +from pathlib import Path + +import numpy as np +import requests +import torch +import torch.nn as nn +from PIL import Image + +from utils.datasets import letterbox +from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh +from utils.plots import color_list, plot_one_box + + +def autopad(k, p=None): # kernel, padding + # Pad to 'same' + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + + +def DWConv(c1, c2, k=1, s=1, act=True): + # Depthwise convolution + return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + + +class Conv(nn.Module): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Conv, self).__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def fuseforward(self, x): + return self.act(self.conv(x)) + + +class Bottleneck(nn.Module): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super(Bottleneck, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2, 3, 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class BottleneckCSP(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(BottleneckCSP, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) + self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) + self.act = nn.LeakyReLU(0.1, inplace=True) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) + + +class C3(nn.Module): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(C3, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) + + def forward(self, x): + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) + + +class SPP(nn.Module): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13)): + super(SPP, self).__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + x = self.cv1(x) + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class Focus(nn.Module): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Focus, self).__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) + # return self.conv(self.contract(x)) + + +class Contract(nn.Module): + # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' + s = self.gain + x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) + return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) + + +class Expand(nn.Module): + # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + s = self.gain + x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) + return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) + + +class Concat(nn.Module): + # Concatenate a list of tensors along dimension + def __init__(self, dimension=1): + super(Concat, self).__init__() + self.d = dimension + + def forward(self, x): + return torch.cat(x, self.d) + + +class NMS(nn.Module): + # Non-Maximum Suppression (NMS) module + conf = 0.25 # confidence threshold + iou = 0.45 # IoU threshold + classes = None # (optional list) filter by class + + def __init__(self): + super(NMS, self).__init__() + + def forward(self, x): + return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) + + +class autoShape(nn.Module): + # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + img_size = 640 # inference size (pixels) + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + classes = None # (optional list) filter by class + + def __init__(self, model): + super(autoShape, self).__init__() + self.model = model.eval() + + def autoshape(self): + print('autoShape already enabled, skipping... ') # model already converted to model.autoshape() + return self + + def forward(self, imgs, size=640, augment=False, profile=False): + # Inference from various sources. For height=720, width=1280, RGB images example inputs are: + # filename: imgs = 'data/samples/zidane.jpg' + # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3) + # PIL: = Image.open('image.jpg') # HWC x(720,1280,3) + # numpy: = np.zeros((720,1280,3)) # HWC + # torch: = torch.zeros(16,3,720,1280) # BCHW + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + p = next(self.model.parameters()) # for device and type + if isinstance(imgs, torch.Tensor): # torch + return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference + + # Pre-process + n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(imgs): + if isinstance(im, str): # filename or uri + im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im # open + im.filename = f # for uri + files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg') + im = np.array(im) # to numpy + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = (size / max(s)) # gain + shape1.append([y * g for y in s]) + imgs[i] = im # update + shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape + x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + x = np.stack(x, 0) if n > 1 else x[0][None] # stack + x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 + + # Inference + with torch.no_grad(): + y = self.model(x, augment, profile)[0] # forward + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + + # Post-process + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) + + return Detections(imgs, y, files, self.names) + + +class Detections: + # detections class for YOLOv5 inference results + def __init__(self, imgs, pred, files, names=None): + super(Detections, self).__init__() + d = pred[0].device # device + gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations + self.imgs = imgs # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.files = files # image filenames + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) + + def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): + colors = color_list() + for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): + str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' + if pred is not None: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + if show or save or render: + for *box, conf, cls in pred: # xyxy, confidence, class + label = f'{self.names[int(cls)]} {conf:.2f}' + plot_one_box(box, img, label=label, color=colors[int(cls) % 10]) + img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np + if pprint: + print(str.rstrip(', ')) + if show: + img.show(self.files[i]) # show + if save: + f = Path(save_dir) / self.files[i] + img.save(f) # save + print(f"{'Saving' * (i == 0)} {f},", end='' if i < self.n - 1 else ' done.\n') + if render: + self.imgs[i] = np.asarray(img) + + def print(self): + self.display(pprint=True) # print results + + def show(self): + self.display(show=True) # show results + + def save(self, save_dir='results/'): + Path(save_dir).mkdir(exist_ok=True) + self.display(save=True, save_dir=save_dir) # save results + + def render(self): + self.display(render=True) # render results + return self.imgs + + def __len__(self): + return self.n + + def tolist(self): + # return a list of Detections objects, i.e. 'for result in results.tolist():' + x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)] + for d in x: + for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + +class Classify(nn.Module): + # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + super(Classify, self).__init__() + self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) + self.flat = nn.Flatten() + + def forward(self, x): + z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list + return self.flat(self.conv(z)) # flatten to x(b,c2) diff --git a/data_processing/yolov5_crowdhuman/models/experimental.py b/data_processing/yolov5_crowdhuman/models/experimental.py new file mode 100644 index 0000000..5fe5685 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/experimental.py @@ -0,0 +1,133 @@ +# This file contains experimental modules + +import numpy as np +import torch +import torch.nn as nn + +from models.common import Conv, DWConv +from utils.google_utils import attempt_download + + +class CrossConv(nn.Module): + # Cross Convolution Downsample + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): + # ch_in, ch_out, kernel, stride, groups, expansion, shortcut + super(CrossConv, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, (1, k), (1, s)) + self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class Sum(nn.Module): + # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 + def __init__(self, n, weight=False): # n: number of inputs + super(Sum, self).__init__() + self.weight = weight # apply weights boolean + self.iter = range(n - 1) # iter object + if weight: + self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights + + def forward(self, x): + y = x[0] # no weight + if self.weight: + w = torch.sigmoid(self.w) * 2 + for i in self.iter: + y = y + x[i + 1] * w[i] + else: + for i in self.iter: + y = y + x[i + 1] + return y + + +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super(GhostConv, self).__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat([y, self.cv2(y)], 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super(GhostBottleneck, self).__init__() + c_ = c2 // 2 + self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), + Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + +class MixConv2d(nn.Module): + # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): + super(MixConv2d, self).__init__() + groups = len(k) + if equal_ch: # equal c_ per group + i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(groups)] # intermediate channels + else: # equal weight.numel() per group + b = [c2] + [0] * groups + a = np.eye(groups + 1, groups, k=-1) + a -= np.roll(a, 1, axis=1) + a *= np.array(k) ** 2 + a[0] = 1 + c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b + + self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.LeakyReLU(0.1, inplace=True) + + def forward(self, x): + return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + + +class Ensemble(nn.ModuleList): + # Ensemble of models + def __init__(self): + super(Ensemble, self).__init__() + + def forward(self, x, augment=False): + y = [] + for module in self: + y.append(module(x, augment)[0]) + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 1) # nms ensemble + return y, None # inference, train output + + +def attempt_load(weights, map_location=None): + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + model = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + attempt_download(w) + model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model + + # Compatibility updates + for m in model.modules(): + if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True # pytorch 1.7.0 compatibility + elif type(m) is Conv: + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + + if len(model) == 1: + return model[-1] # return model + else: + print('Ensemble created with %s\n' % weights) + for k in ['names', 'stride']: + setattr(model, k, getattr(model[-1], k)) + return model # return ensemble diff --git a/data_processing/yolov5_crowdhuman/models/export.py b/data_processing/yolov5_crowdhuman/models/export.py new file mode 100644 index 0000000..cc81787 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/export.py @@ -0,0 +1,100 @@ +"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats + +Usage: + $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 +""" + +import argparse +import sys +import time + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +import torch +import torch.nn as nn + +import models +from models.experimental import attempt_load +from utils.activations import Hardswish, SiLU +from utils.general import set_logging, check_img_size + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/ + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width + parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + opt = parser.parse_args() + opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand + print(opt) + set_logging() + t = time.time() + + # Load PyTorch model + model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model + labels = model.names + + # Checks + gs = int(max(model.stride)) # grid size (max stride) + opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples + + # Input + img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection + + # Update model + for k, m in model.named_modules(): + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + if isinstance(m, models.common.Conv): # assign export-friendly activations + if isinstance(m.act, nn.Hardswish): + m.act = Hardswish() + elif isinstance(m.act, nn.SiLU): + m.act = SiLU() + # elif isinstance(m, models.yolo.Detect): + # m.forward = m.forward_export # assign forward (optional) + model.model[-1].export = True # set Detect() layer export=True + y = model(img) # dry run + + # TorchScript export + try: + print('\nStarting TorchScript export with torch %s...' % torch.__version__) + f = opt.weights.replace('.pt', '.torchscript.pt') # filename + ts = torch.jit.trace(model, img) + ts.save(f) + print('TorchScript export success, saved as %s' % f) + except Exception as e: + print('TorchScript export failure: %s' % e) + + # ONNX export + try: + import onnx + + print('\nStarting ONNX export with onnx %s...' % onnx.__version__) + f = opt.weights.replace('.pt', '.onnx') # filename + torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], + output_names=['classes', 'boxes'] if y is None else ['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) + 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) + + # Checks + onnx_model = onnx.load(f) # load onnx model + onnx.checker.check_model(onnx_model) # check onnx model + # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model + print('ONNX export success, saved as %s' % f) + except Exception as e: + print('ONNX export failure: %s' % e) + + # CoreML export + try: + import coremltools as ct + + print('\nStarting CoreML export with coremltools %s...' % ct.__version__) + # convert model from torchscript and apply pixel scaling as per detect.py + model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + f = opt.weights.replace('.pt', '.mlmodel') # filename + model.save(f) + print('CoreML export success, saved as %s' % f) + except Exception as e: + print('CoreML export failure: %s' % e) + + # Finish + print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) diff --git a/data_processing/yolov5_crowdhuman/models/hub/anchors.yaml b/data_processing/yolov5_crowdhuman/models/hub/anchors.yaml new file mode 100644 index 0000000..a07a4dc --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/anchors.yaml @@ -0,0 +1,58 @@ +# Default YOLOv5 anchors for COCO data + + +# P5 ------------------------------------------------------------------------------------------------------------------- +# P5-640: +anchors_p5_640: + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 + + +# P6 ------------------------------------------------------------------------------------------------------------------- +# P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 +anchors_p6_640: + - [ 9,11, 21,19, 17,41 ] # P3/8 + - [ 43,32, 39,70, 86,64 ] # P4/16 + - [ 65,131, 134,130, 120,265 ] # P5/32 + - [ 282,180, 247,354, 512,387 ] # P6/64 + +# P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 +anchors_p6_1280: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 +anchors_p6_1920: + - [ 28,41, 67,59, 57,141 ] # P3/8 + - [ 144,103, 129,227, 270,205 ] # P4/16 + - [ 209,452, 455,396, 358,812 ] # P5/32 + - [ 653,922, 1109,570, 1387,1187 ] # P6/64 + + +# P7 ------------------------------------------------------------------------------------------------------------------- +# P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 +anchors_p7_640: + - [ 11,11, 13,30, 29,20 ] # P3/8 + - [ 30,46, 61,38, 39,92 ] # P4/16 + - [ 78,80, 146,66, 79,163 ] # P5/32 + - [ 149,150, 321,143, 157,303 ] # P6/64 + - [ 257,402, 359,290, 524,372 ] # P7/128 + +# P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 +anchors_p7_1280: + - [ 19,22, 54,36, 32,77 ] # P3/8 + - [ 70,83, 138,71, 75,173 ] # P4/16 + - [ 165,159, 148,334, 375,151 ] # P5/32 + - [ 334,317, 251,626, 499,474 ] # P6/64 + - [ 750,326, 534,814, 1079,818 ] # P7/128 + +# P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 +anchors_p7_1920: + - [ 29,34, 81,55, 47,115 ] # P3/8 + - [ 105,124, 207,107, 113,259 ] # P4/16 + - [ 247,238, 222,500, 563,227 ] # P5/32 + - [ 501,476, 376,939, 749,711 ] # P6/64 + - [ 1126,489, 801,1222, 1618,1227 ] # P7/128 diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov3-spp.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov3-spp.yaml new file mode 100644 index 0000000..38dcc44 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov3-spp.yaml @@ -0,0 +1,51 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3-SPP head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, SPP, [512, [5, 9, 13]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov3-tiny.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov3-tiny.yaml new file mode 100644 index 0000000..ff7638c --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov3-tiny.yaml @@ -0,0 +1,41 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,14, 23,27, 37,58] # P4/16 + - [81,82, 135,169, 344,319] # P5/32 + +# YOLOv3-tiny backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [16, 3, 1]], # 0 + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 + [-1, 1, Conv, [32, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 + [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 + ] + +# YOLOv3-tiny head +head: + [[-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) + + [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) + ] diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov3.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov3.yaml new file mode 100644 index 0000000..f2e7613 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov3.yaml @@ -0,0 +1,51 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3 head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, Conv, [512, [1, 1]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov5-fpn.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov5-fpn.yaml new file mode 100644 index 0000000..e772bff --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov5-fpn.yaml @@ -0,0 +1,42 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 6, BottleneckCSP, [1024]], # 9 + ] + +# YOLOv5 FPN head +head: + [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) + + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [512, 1, 1]], + [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) + + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Conv, [256, 1, 1]], + [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) + + [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov5-p2.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov5-p2.yaml new file mode 100644 index 0000000..0633a90 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov5-p2.yaml @@ -0,0 +1,54 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: 3 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 9 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 13 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) + + [ -1, 1, Conv, [ 128, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2 + [ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall) + + [ -1, 1, Conv, [ 128, 3, 2 ] ], + [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3 + [ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large) + + [ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + ] diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov5-p6.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov5-p6.yaml new file mode 100644 index 0000000..3728a11 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov5-p6.yaml @@ -0,0 +1,56 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: 3 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov5-p7.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov5-p7.yaml new file mode 100644 index 0000000..ca8f849 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov5-p7.yaml @@ -0,0 +1,67 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: 3 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 3, C3, [ 1024 ] ], + [ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128 + [ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ], + [ -1, 3, C3, [ 1280, False ] ], # 13 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 1024, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6 + [ -1, 3, C3, [ 1024, False ] ], # 17 + + [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 21 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 25 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge) + + [ -1, 1, Conv, [ 1024, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7 + [ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge) + + [ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7) + ] diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov5-panet.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov5-panet.yaml new file mode 100644 index 0000000..340f95a --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov5-panet.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, BottleneckCSP, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, BottleneckCSP, [1024, False]], # 9 + ] + +# YOLOv5 PANet head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, BottleneckCSP, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov5l6.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov5l6.yaml new file mode 100644 index 0000000..11298b0 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov5l6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov5m6.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov5m6.yaml new file mode 100644 index 0000000..48afc86 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov5m6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov5s6.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov5s6.yaml new file mode 100644 index 0000000..1df577a --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov5s6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/data_processing/yolov5_crowdhuman/models/hub/yolov5x6.yaml b/data_processing/yolov5_crowdhuman/models/hub/yolov5x6.yaml new file mode 100644 index 0000000..5ebc021 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/hub/yolov5x6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/data_processing/yolov5_crowdhuman/models/yolo.py b/data_processing/yolov5_crowdhuman/models/yolo.py new file mode 100644 index 0000000..4181709 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/yolo.py @@ -0,0 +1,272 @@ +import argparse +import logging +import sys +from copy import deepcopy + +sys.path.append('./') # to run '$ python *.py' files in subdirectories +logger = logging.getLogger(__name__) + +from models.common import * +from models.experimental import * +from utils.autoanchor import check_anchor_order +from utils.general import make_divisible, check_file, set_logging +from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ + select_device, copy_attr + +try: + import thop # for FLOPS computation +except ImportError: + thop = None + + +class Detect(nn.Module): + stride = None # strides computed during build + export = False # onnx export + + def __init__(self, nc=80, anchors=(), ch=()): # detection layer + super(Detect, self).__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + +class Model(nn.Module): + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes + super(Model, self).__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc)) + self.yaml['nc'] = nc # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + + # Build strides, anchors + m = self.model[-1] # Detect() + if isinstance(m, Detect): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + m.anchors /= m.stride.view(-1, 1, 1) + check_anchor_order(m) + self.stride = m.stride + self._initialize_biases() # only run once + # print('Strides: %s' % m.stride.tolist()) + + # Init weights, biases + initialize_weights(self) + self.info() + logger.info('') + + def forward(self, x, augment=False, profile=False): + if augment: + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self.forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi[..., :4] /= si # de-scale + if fi == 2: + yi[..., 1] = img_size[0] - 1 - yi[..., 1] # de-flip ud + elif fi == 3: + yi[..., 0] = img_size[1] - 1 - yi[..., 0] # de-flip lr + y.append(yi) + return torch.cat(y, 1), None # augmented inference, train + else: + return self.forward_once(x, profile) # single-scale inference, train + + def forward_once(self, x, profile=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + if profile: + o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + t = time_synchronized() + for _ in range(10): + _ = m(x) + dt.append((time_synchronized() - t) * 100) + print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) + + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + + if profile: + print('%.1fms total' % sum(dt)) + return x + + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _print_biases(self): + m = self.model[-1] # Detect() module + for mi in m.m: # from + b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) + print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + + # def _print_weights(self): + # for m in self.model.modules(): + # if type(m) is Bottleneck: + # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + print('Fusing layers... ') + for m in self.model.modules(): + if type(m) is Conv and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.fuseforward # update forward + self.info() + return self + + def nms(self, mode=True): # add or remove NMS module + present = type(self.model[-1]) is NMS # last layer is NMS + if mode and not present: + print('Adding NMS... ') + m = NMS() # module + m.f = -1 # from + m.i = self.model[-1].i + 1 # index + self.model.add_module(name='%s' % m.i, module=m) # add + self.eval() + elif not mode and present: + print('Removing NMS... ') + self.model = self.model[:-1] # remove + return self + + def autoshape(self): # add autoShape module + print('Adding autoShape... ') + m = autoShape(self) # wrap model + copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes + return m + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + +def parse_model(d, ch): # model_dict, input_channels(3) + logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, + C3]: + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3]: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum([ch[x] for x in f]) + elif m is Detect: + args.append([ch[x] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + elif m is Contract: + c2 = ch[f] * args[0] ** 2 + elif m is Expand: + c2 = ch[f] // args[0] ** 2 + else: + c2 = ch[f] + + m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum([x.numel() for x in m_.parameters()]) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + opt = parser.parse_args() + opt.cfg = check_file(opt.cfg) # check file + set_logging() + device = select_device(opt.device) + + # Create model + model = Model(opt.cfg).to(device) + model.train() + + # Profile + # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + # y = model(img, profile=True) + + # Tensorboard + # from torch.utils.tensorboard import SummaryWriter + # tb_writer = SummaryWriter() + # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") + # tb_writer.add_graph(model.model, img) # add model to tensorboard + # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/data_processing/yolov5_crowdhuman/models/yolov5l.yaml b/data_processing/yolov5_crowdhuman/models/yolov5l.yaml new file mode 100644 index 0000000..71ebf86 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/yolov5l.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/data_processing/yolov5_crowdhuman/models/yolov5m.yaml b/data_processing/yolov5_crowdhuman/models/yolov5m.yaml new file mode 100644 index 0000000..3c749c9 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/yolov5m.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/data_processing/yolov5_crowdhuman/models/yolov5s.yaml b/data_processing/yolov5_crowdhuman/models/yolov5s.yaml new file mode 100644 index 0000000..aca669d --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/yolov5s.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/data_processing/yolov5_crowdhuman/models/yolov5x.yaml b/data_processing/yolov5_crowdhuman/models/yolov5x.yaml new file mode 100644 index 0000000..d3babdf --- /dev/null +++ b/data_processing/yolov5_crowdhuman/models/yolov5x.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/data_processing/yolov5_crowdhuman/my_detect.py b/data_processing/yolov5_crowdhuman/my_detect.py new file mode 100644 index 0000000..3e5e9d9 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/my_detect.py @@ -0,0 +1,253 @@ +import argparse +import os.path +import time +from pathlib import Path + +import cv2 +import torch +import torch.backends.cudnn as cudnn +from numpy import random + +from models.experimental import attempt_load +from utils.datasets import LoadStreams, LoadImages +from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ + scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path +from utils.plots import plot_one_box +from utils.torch_utils import select_device, load_classifier, time_synchronized +import json +import numpy as np + +def detect(save_img=False): + source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size + + skeleton_path = os.path.join(opt.source,'2d_pose_result_hrnet.json') + source = os.path.join(opt.source,'images') + + with open(skeleton_path) as f: + pose2d_result = json.load(f) + + webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( + ('rtsp://', 'rtmp://', 'http://')) + + # Directories + save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Initialize + set_logging() + device = select_device(opt.device) + half = device.type != 'cpu' # half precision only supported on CUDA + + # Load model + model = attempt_load(weights, map_location=device) # load FP32 model + stride = int(model.stride.max()) # model stride + imgsz = check_img_size(imgsz, s=stride) # check img_size + if half: + model.half() # to FP16 + + # Second-stage classifier + classify = False + if classify: + modelc = load_classifier(name='resnet101', n=2) # initialize + modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() + + # Set Dataloader + vid_path, vid_writer = None, None + if webcam: + view_img = check_imshow() + cudnn.benchmark = True # set True to speed up constant image size inference + dataset = LoadStreams(source, img_size=imgsz, stride=stride) + else: + save_img = True + dataset = LoadImages(source, img_size=imgsz, stride=stride) + + # Get names and colors + names = model.module.names if hasattr(model, 'module') else model.names + colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] + + # Run inference + if device.type != 'cpu': + model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + t0 = time.time() + + + + bbox_results = {} + + + for path, img, im0s, vid_cap in dataset: + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + img_name = os.path.basename(path) + coco_joint_list = pose2d_result[img_name] + bbox_list_wo_sort = [] + + + # Inference + t1 = time_synchronized() + pred = model(img, augment=opt.augment)[0] + + # Apply NMS + pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) + t2 = time_synchronized() + + # Apply Classifier + if classify: + pred = apply_classifier(pred, modelc, img, im0s) + + # Process detections + for i, det in enumerate(pred): # detections per image + if webcam: # batch_size >= 1 + p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count + else: + p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # img.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt + s += '%gx%g ' % img.shape[2:] # print string + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() + + # Print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Write results + for *xyxy, conf, cls in reversed(det): + if save_txt: # Write to file + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format + with open(txt_path + '.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + label = f'{names[int(cls)]} {conf:.2f}' + if 'head' in label: + bbox = [float(xyxy[0]), float(xyxy[1]), float(xyxy[2]-xyxy[0]), float(xyxy[3]-xyxy[1])] # x, y, w, h + #print(im0.shape) + bbox_list_wo_sort.append(bbox) + + + + + + if save_img or view_img: # Add bbox to image + label = f'{names[int(cls)]} {conf:.2f}' + if opt.heads or opt.person: + if 'head' in label and opt.heads: + plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) + if 'person' in label and opt.person: + plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) + else: + plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) + + # Print time (inference + NMS) + print(f'{s}Done. ({t2 - t1:.3f}s)') + + # Stream results + if view_img: + print('resize to',(512, int(im0.shape[0]/im0.shape[1]*512))) + cv2.imshow(str(p), cv2.resize(im0, (512, int(im0.shape[0]/im0.shape[1]*512)))) + cv2.waitKey(0) # 1 millisecond + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' + if vid_path != save_path: # new video + vid_path = save_path + if isinstance(vid_writer, cv2.VideoWriter): + vid_writer.release() # release previous video writer + + fourcc = 'mp4v' # output video codec + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)) + vid_writer.write(im0) + + # sort bbox + bbox_list_sort = [] + for idx in range(len(coco_joint_list)): + coco_joint_img = np.asarray(coco_joint_list[idx])[:, :3] + + face_points = coco_joint_img[:5, :3] + # sort face points + face_points = face_points[np.argsort(face_points[:, 2])] + #print('face_points', face_points.shape) + if np.sum(face_points[:, 2]) < 0.5: + face_points = face_points[-2:,:] + face_center = np.mean(face_points, axis=0, keepdims=True) + for bbox in bbox_list_wo_sort: + relax = 0.1 + relaxed_bbox = [bbox[0]-bbox[2]*relax, bbox[1]-bbox[3]*relax, bbox[2]*(1+2*relax), bbox[3]*(1+2*relax)] + check = True + for points_idx in range(face_points.shape[0]): + if not (relaxed_bbox[0] <= face_points[points_idx][0] <= relaxed_bbox[0]+relaxed_bbox[2] and relaxed_bbox[1] <= face_points[points_idx][1] <= relaxed_bbox[1]+relaxed_bbox[3]): + check = False + break + if check: + bbox_list_sort.append(bbox) + break + if len(bbox_list_sort) != idx+1: + head_stride = max(np.max(face_points[:, 0]) - np.min(face_points[:, 0]), + np.max(face_points[:, 1]) - np.min(face_points[:, 1])) * 1.2 + temp_bbox = [face_center[0][0]-head_stride/2, face_center[0][1]-head_stride/2, head_stride, head_stride] + bbox_list_sort.append(temp_bbox) + + if len(bbox_list_sort) != len(coco_joint_list): + raise ValueError('bbox_list_sort and coco_joint_list have different length') + + bbox_results[img_name] = bbox_list_sort + # save bbox + with open(os.path.join(opt.source, 'head_bbox_yolov5_crowdhuman.json'), 'w') as f: + json.dump(bbox_results, f) + + + + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + print(f"Results saved to {save_dir}{s}") + + print(f'Done. ({time.time() - t0:.3f}s)') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam + parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='display results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default='runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--person', action='store_true', help='displays only person') + parser.add_argument('--heads', action='store_true', help='displays only person') + opt = parser.parse_args() + print(opt) + #check_requirements() + + with torch.no_grad(): + if opt.update: # update all models (to fix SourceChangeWarning) + for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: + detect() + strip_optimizer(opt.weights) + else: + detect() diff --git a/data_processing/yolov5_crowdhuman/requirements.txt b/data_processing/yolov5_crowdhuman/requirements.txt new file mode 100644 index 0000000..cb50cf8 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/requirements.txt @@ -0,0 +1,30 @@ +# pip install -r requirements.txt + +# base ---------------------------------------- +Cython +matplotlib>=3.2.2 +numpy>=1.18.5 +opencv-python>=4.1.2 +Pillow +PyYAML>=5.3.1 +scipy>=1.4.1 +tensorboard>=2.2 +torch>=1.7.0 +torchvision>=0.8.1 +tqdm>=4.41.0 + +# logging ------------------------------------- +# wandb + +# plotting ------------------------------------ +seaborn>=0.11.0 +pandas + +# export -------------------------------------- +# coremltools>=4.1 +# onnx>=1.8.1 +# scikit-learn==0.19.2 # for coreml quantization + +# extras -------------------------------------- +thop # FLOPS computation +pycocotools>=2.0 # COCO mAP diff --git a/data_processing/yolov5_crowdhuman/test.py b/data_processing/yolov5_crowdhuman/test.py new file mode 100644 index 0000000..ecd45f5 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/test.py @@ -0,0 +1,340 @@ +import argparse +import json +import os +from pathlib import Path +from threading import Thread + +import numpy as np +import torch +import yaml +from tqdm import tqdm + +from models.experimental import attempt_load +from utils.datasets import create_dataloader +from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ + box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr +from utils.metrics import ap_per_class, ConfusionMatrix +from utils.plots import plot_images, output_to_target, plot_study_txt +from utils.torch_utils import select_device, time_synchronized + + +def test(data, + weights=None, + batch_size=32, + imgsz=640, + conf_thres=0.001, + iou_thres=0.6, # for NMS + save_json=False, + single_cls=False, + augment=False, + verbose=False, + model=None, + dataloader=None, + save_dir=Path(''), # for saving images + save_txt=False, # for auto-labelling + save_hybrid=False, # for hybrid auto-labelling + save_conf=False, # save auto-label confidences + plots=True, + log_imgs=0, # number of logged images + compute_loss=None): + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device = next(model.parameters()).device # get model device + + else: # called directly + set_logging() + device = select_device(opt.device, batch_size=batch_size) + + # Directories + save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = attempt_load(weights, map_location=device) # load FP32 model + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(imgsz, s=gs) # check img_size + + # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 + # if device.type != 'cpu' and torch.cuda.device_count() > 1: + # model = nn.DataParallel(model) + + # Half + half = device.type != 'cpu' # half precision only supported on CUDA + if half: + model.half() + + # Configure + model.eval() + is_coco = data.endswith('coco.yaml') # is COCO dataset + with open(data) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # model dict + check_dataset(data) # check + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Logging + log_imgs, wandb = min(log_imgs, 100), None # ceil + try: + import wandb # Weights & Biases + except ImportError: + log_imgs = 0 + + # Dataloader + if not training: + if device.type != 'cpu': + model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images + dataloader = create_dataloader(path, imgsz, batch_size, gs, opt, pad=0.5, rect=True, + prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} + coco91class = coco80_to_coco91_class() + s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. + loss = torch.zeros(3, device=device) + jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] + for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): + img = img.to(device, non_blocking=True) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + targets = targets.to(device) + nb, _, height, width = img.shape # batch size, channels, height, width + + with torch.no_grad(): + # Run model + t = time_synchronized() + out, train_out = model(img, augment=augment) # inference and training outputs + t0 += time_synchronized() - t + + # Compute loss + if compute_loss: + loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls + + # Run NMS + targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + t = time_synchronized() + out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) + t1 += time_synchronized() - t + + # Statistics per image + for si, pred in enumerate(out): + labels = targets[targets[:, 0] == si, 1:] + nl = len(labels) + tcls = labels[:, 0].tolist() if nl else [] # target class + path = Path(paths[si]) + seen += 1 + + if len(pred) == 0: + if nl: + stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) + continue + + # Predictions + predn = pred.clone() + scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred + + # Append to text file + if save_txt: + gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + # W&B logging + if plots and len(wandb_images) < log_imgs: + box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) + + # Append to pycocotools JSON dictionary + if save_json: + # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(pred.tolist(), box.tolist()): + jdict.append({'image_id': image_id, + 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + # Assign all predictions as incorrect + correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) + if nl: + detected = [] # target indices + tcls_tensor = labels[:, 0] + + # target boxes + tbox = xywh2xyxy(labels[:, 1:5]) + scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels + if plots: + confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) + + # Per target class + for cls in torch.unique(tcls_tensor): + ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices + pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices + + # Search for detections + if pi.shape[0]: + # Prediction to target ious + ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices + + # Append detections + detected_set = set() + for j in (ious > iouv[0]).nonzero(as_tuple=False): + d = ti[i[j]] # detected target + if d.item() not in detected_set: + detected_set.add(d.item()) + detected.append(d) + correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn + if len(detected) == nl: # all targets already located in image + break + + # Append statistics (correct, conf, pcls, tcls) + stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) + + # Plot images + if plots and batch_i < 3: + f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels + Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() + f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions + Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() + + # Compute statistics + stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) + ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 + mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() + nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class + else: + nt = torch.zeros(1) + + # Print results + pf = '%20s' + '%12.3g' * 6 # print format + print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(ap_class): + print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) + + # Print speeds + t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple + if not training: + print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + if wandb and wandb.run: + val_batches = [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] + wandb.log({"Images": wandb_images, "Validation": val_batches}, commit=False) + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = '../coco/annotations/instances_val2017.json' # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + eval = COCOeval(anno, pred, 'bbox') + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) + except Exception as e: + print(f'pycocotools unable to run: {e}') + + # Return results + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + print(f"Results saved to {save_dir}{s}") + model.float() # for training + maps = np.zeros(nc) + map + for i, c in enumerate(ap_class): + maps[c] = ap[i] + return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(prog='test.py') + parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path') + parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') + parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') + parser.add_argument('--task', default='val', help="'val', 'test', 'study'") + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') + parser.add_argument('--project', default='runs/test', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + opt = parser.parse_args() + opt.save_json |= opt.data.endswith('coco.yaml') + opt.data = check_file(opt.data) # check file + print(opt) + check_requirements() + + if opt.task in ['val', 'test']: # run normally + test(opt.data, + opt.weights, + opt.batch_size, + opt.img_size, + opt.conf_thres, + opt.iou_thres, + opt.save_json, + opt.single_cls, + opt.augment, + opt.verbose, + save_txt=opt.save_txt | opt.save_hybrid, + save_hybrid=opt.save_hybrid, + save_conf=opt.save_conf, + ) + + elif opt.task == 'speed': # speed benchmarks + for w in opt.weights: + test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False) + + elif opt.task == 'study': # run over a range of settings and save/plot + x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) + for w in opt.weights: + f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to + y = [] # y axis + for i in x: # img-size + print(f'\nRunning {f} point {i}...') + r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, + plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_study_txt(x=x) # plot diff --git a/data_processing/yolov5_crowdhuman/train.py b/data_processing/yolov5_crowdhuman/train.py new file mode 100644 index 0000000..e19cfa8 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/train.py @@ -0,0 +1,608 @@ +import argparse +import logging +import math +import os +import random +import time +from pathlib import Path +from threading import Thread + +import numpy as np +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torch.optim.lr_scheduler as lr_scheduler +import torch.utils.data +import yaml +from torch.cuda import amp +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter +from tqdm import tqdm + +import test # import test.py to get mAP after each epoch +from models.experimental import attempt_load +from models.yolo import Model +from utils.autoanchor import check_anchors +from utils.datasets import create_dataloader +from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ + fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ + check_requirements, print_mutation, set_logging, one_cycle, colorstr +from utils.google_utils import attempt_download +from utils.loss import ComputeLoss +from utils.plots import plot_images, plot_labels, plot_results, plot_evolution +from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first + +logger = logging.getLogger(__name__) + + +def train(hyp, opt, device, tb_writer=None, wandb=None): + logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + save_dir, epochs, batch_size, total_batch_size, weights, rank = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank + + # Directories + wdir = save_dir / 'weights' + wdir.mkdir(parents=True, exist_ok=True) # make dir + last = wdir / 'last.pt' + best = wdir / 'best.pt' + results_file = save_dir / 'results.txt' + + # Save run settings + with open(save_dir / 'hyp.yaml', 'w') as f: + yaml.dump(hyp, f, sort_keys=False) + with open(save_dir / 'opt.yaml', 'w') as f: + yaml.dump(vars(opt), f, sort_keys=False) + + # Configure + plots = not opt.evolve # create plots + cuda = device.type != 'cpu' + init_seeds(2 + rank) + with open(opt.data) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + with torch_distributed_zero_first(rank): + check_dataset(data_dict) # check + train_path = data_dict['train'] + test_path = data_dict['val'] + nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes + names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check + + # Model + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(rank): + attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location=device) # load checkpoint + if hyp.get('anchors'): + ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor + model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create + exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [] # exclude keys + state_dict = ckpt['model'].float().state_dict() # to FP32 + state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(state_dict, strict=False) # load + logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report + else: + model = Model(opt.cfg, ch=3, nc=nc).to(device) # create + + # Freeze + freeze = [] # parameter names to freeze (full or partial) + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + if any(x in k for x in freeze): + print('freezing %s' % k) + v.requires_grad = False + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay + logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") + + pg0, pg1, pg2 = [], [], [] # optimizer parameter groups + for k, v in model.named_modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): + pg2.append(v.bias) # biases + if isinstance(v, nn.BatchNorm2d): + pg0.append(v.weight) # no decay + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): + pg1.append(v.weight) # apply decay + + if opt.adam: + optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + else: + optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) + + optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay + optimizer.add_param_group({'params': pg2}) # add pg2 (biases) + logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) + del pg0, pg1, pg2 + + # Scheduler https://arxiv.org/pdf/1812.01187.pdf + # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR + if opt.linear_lr: + lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + else: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) + # plot_lr_scheduler(optimizer, scheduler, epochs) + + # Logging + if rank in [-1, 0] and wandb and wandb.run is None: + opt.hyp = hyp # add hyperparameters + wandb_run = wandb.init(config=opt, resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + name=save_dir.stem, + id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) + loggers = {'wandb': wandb} # loggers dict + + # Resume + start_epoch, best_fitness = 0, 0.0 + if pretrained: + # Optimizer + if ckpt['optimizer'] is not None: + optimizer.load_state_dict(ckpt['optimizer']) + best_fitness = ckpt['best_fitness'] + + # Results + if ckpt.get('training_results') is not None: + with open(results_file, 'w') as file: + file.write(ckpt['training_results']) # write results.txt + + # Epochs + start_epoch = ckpt['epoch'] + 1 + if opt.resume: + assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) + if epochs < start_epoch: + logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % + (weights, ckpt['epoch'], epochs)) + epochs += ckpt['epoch'] # finetune additional epochs + + del ckpt, state_dict + + # Image sizes + gs = max(int(model.stride.max()), 32) # grid size (max stride) + nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) + imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples + + # DP mode + if cuda and rank == -1 and torch.cuda.device_count() > 1: + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and rank != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + logger.info('Using SyncBatchNorm()') + + # EMA + ema = ModelEMA(model) if rank in [-1, 0] else None + + # DDP mode + if cuda and rank != -1: + model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) + + # Trainloader + dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, + hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, + world_size=opt.world_size, workers=opt.workers, + image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) + mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class + nb = len(dataloader) # number of batches + assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) + + # Process 0 + if rank in [-1, 0]: + ema.updates = start_epoch * nb // accumulate # set EMA updates + testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader + hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, + world_size=opt.world_size, workers=opt.workers, + pad=0.5, prefix=colorstr('val: '))[0] + + if not opt.resume: + labels = np.concatenate(dataset.labels, 0) + c = torch.tensor(labels[:, 0]) # classes + # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency + # model._initialize_biases(cf.to(device)) + if plots: + plot_labels(labels, save_dir, loggers) + if tb_writer: + tb_writer.add_histogram('classes', c, 0) + + # Anchors + if not opt.noautoanchor: + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) + + # Model parameters + hyp['box'] *= 3. / nl # scale to layers + hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers + hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = amp.GradScaler(enabled=cuda) + compute_loss = ComputeLoss(model) # init loss class + logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n' + f'Using {dataloader.num_workers} dataloader workers\n' + f'Logging results to {save_dir}\n' + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + model.train() + + # Update image weights (optional) + if opt.image_weights: + # Generate indices + if rank in [-1, 0]: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + # Broadcast if DDP + if rank != -1: + indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() + dist.broadcast(indices, 0) + if rank != 0: + dataset.indices = indices.cpu().numpy() + + # Update mosaic border + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(4, device=device) # mean losses + if rank != -1: + dataloader.sampler.set_epoch(epoch) + pbar = enumerate(dataloader) + logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size')) + if rank in [-1, 0]: + pbar = tqdm(pbar, total=nb) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with amp.autocast(enabled=cuda): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size + if rank != -1: + loss *= opt.world_size # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize + if ni % accumulate == 0: + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + + # Print + if rank in [-1, 0]: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) + s = ('%10s' * 2 + '%10.4g' * 6) % ( + '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) + pbar.set_description(s) + + # Plot + if plots and ni < 3: + f = save_dir / f'train_batch{ni}.jpg' # filename + Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() + # if tb_writer: + # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) + # tb_writer.add_graph(model, imgs) # add model to tensorboard + elif plots and ni == 10 and wandb: + wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') + if x.exists()]}, commit=False) + + # end batch ------------------------------------------------------------------------------------------------ + # end epoch ---------------------------------------------------------------------------------------------------- + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard + scheduler.step() + + # DDP process 0 or single-GPU + if rank in [-1, 0]: + # mAP + if ema: + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) + final_epoch = epoch + 1 == epochs + if not opt.notest or final_epoch: # Calculate mAP + results, maps, times = test.test(opt.data, + batch_size=batch_size * 2, + imgsz=imgsz_test, + model=ema.ema, + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + verbose=nc < 50 and final_epoch, + plots=plots and final_epoch, + log_imgs=opt.log_imgs if wandb else 0, + compute_loss=compute_loss) + + # Write + with open(results_file, 'a') as f: + f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + if len(opt.name) and opt.bucket: + os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) + + # Log + tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss + 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss + 'x/lr0', 'x/lr1', 'x/lr2'] # params + for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): + if tb_writer: + tb_writer.add_scalar(tag, x, epoch) # tensorboard + if wandb: + wandb.log({tag: x}, step=epoch, commit=tag == tags[-1]) # W&B + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + if fi > best_fitness: + best_fitness = fi + + # Save model + save = (not opt.nosave) or (final_epoch and not opt.evolve) + if save: + with open(results_file, 'r') as f: # create checkpoint + ckpt = {'epoch': epoch, + 'best_fitness': best_fitness, + 'training_results': f.read(), + 'model': ema.ema, + 'optimizer': None if final_epoch else optimizer.state_dict(), + 'wandb_id': wandb_run.id if wandb else None} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + del ckpt + # end epoch ---------------------------------------------------------------------------------------------------- + # end training + + if rank in [-1, 0]: + # Strip optimizers + final = best if best.exists() else last # final model + for f in [last, best]: + if f.exists(): + strip_optimizer(f) # strip optimizers + if opt.bucket: + os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload + + # Plots + if plots: + plot_results(save_dir=save_dir) # save as results.png + if wandb: + files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] + wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files + if (save_dir / f).exists()]}) + if opt.log_artifacts: + wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem) + + # Test best.pt + logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) + if opt.data.endswith('coco.yaml') and nc == 80: # if COCO + for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests + results, _, _ = test.test(opt.data, + batch_size=batch_size * 2, + imgsz=imgsz_test, + conf_thres=conf, + iou_thres=iou, + model=attempt_load(final, device).half(), + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=save_json, + plots=False) + + else: + dist.destroy_process_group() + + wandb.run.finish() if wandb and wandb.run else None + torch.cuda.empty_cache() + return results + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') + parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300) + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--notest', action='store_true', help='only test final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') + parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') + parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100') + parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model') + parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') + parser.add_argument('--project', default='runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--linear-lr', action='store_true', help='linear LR') + opt = parser.parse_args() + + # Set DDP variables + opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 + opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 + set_logging(opt.global_rank) + if opt.global_rank in [-1, 0]: + check_git_status() + check_requirements() + + # Resume + if opt.resume: # resume an interrupted run + ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path + assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' + apriori = opt.global_rank, opt.local_rank + with open(Path(ckpt).parent.parent / 'opt.yaml') as f: + opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace + opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate + logger.info('Resuming training from %s' % ckpt) + else: + # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') + opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) + opt.name = 'evolve' if opt.evolve else opt.name + opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run + + # DDP mode + opt.total_batch_size = opt.batch_size + device = select_device(opt.device, batch_size=opt.batch_size) + if opt.local_rank != -1: + assert torch.cuda.device_count() > opt.local_rank + torch.cuda.set_device(opt.local_rank) + device = torch.device('cuda', opt.local_rank) + dist.init_process_group(backend='nccl', init_method='env://') # distributed backend + assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' + opt.batch_size = opt.total_batch_size // opt.world_size + + # Hyperparameters + with open(opt.hyp) as f: + hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps + + # Train + logger.info(opt) + try: + import wandb + except ImportError: + wandb = None + prefix = colorstr('wandb: ') + logger.info(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") + if not opt.evolve: + tb_writer = None # init loggers + if opt.global_rank in [-1, 0]: + logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/') + tb_writer = SummaryWriter(opt.save_dir) # Tensorboard + train(hyp, opt, device, tb_writer, wandb) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0)} # image mixup (probability) + + assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' + opt.notest, opt.nosave = True, True # only test/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here + if opt.bucket: + os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists + + for _ in range(300): # generations to evolve + if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt('evolve.txt', ndmin=2) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() # weights + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([x[0] for x in meta.values()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, wandb=wandb) + + # Write mutation results + print_mutation(hyp.copy(), results, yaml_file, opt.bucket) + + # Plot results + plot_evolution(yaml_file) + print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' + f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') diff --git a/data_processing/yolov5_crowdhuman/tutorial.ipynb b/data_processing/yolov5_crowdhuman/tutorial.ipynb new file mode 100644 index 0000000..7fce40c --- /dev/null +++ b/data_processing/yolov5_crowdhuman/tutorial.ipynb @@ -0,0 +1,1252 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "YOLOv5 Tutorial", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true, + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU", + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "1f8e9b8ebded4175b2eaa9f75c3ceb00": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "state": { + "_view_name": "HBoxView", + "_dom_classes": [], + "_model_name": "HBoxModel", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.5.0", + "box_style": "", + "layout": "IPY_MODEL_0a1246a73077468ab80e979cc0576cd2", + "_model_module": "@jupyter-widgets/controls", + "children": [ + "IPY_MODEL_d327cde5a85a4a51bb8b1b3e9cf06c97", + "IPY_MODEL_d5ef1cb2cbed4b87b3c5d292ff2b0da6" + ] + } + }, + "0a1246a73077468ab80e979cc0576cd2": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "d327cde5a85a4a51bb8b1b3e9cf06c97": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "state": { + "_view_name": "ProgressView", + "style": "IPY_MODEL_8d5dff8bca14435a88fa1814533acd85", + "_dom_classes": [], + "description": "100%", + "_model_name": "FloatProgressModel", + "bar_style": "success", + "max": 819257867, + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": 819257867, + "_view_count": null, + "_view_module_version": "1.5.0", + "orientation": "horizontal", + "min": 0, + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_3d5136c19e7645ca9bc8f51ceffb2be1" + } + }, + "d5ef1cb2cbed4b87b3c5d292ff2b0da6": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "state": { + "_view_name": "HTMLView", + "style": "IPY_MODEL_2919396dbd4b4c8e821d12bd28665d8a", + "_dom_classes": [], + "description": "", + "_model_name": "HTMLModel", + "placeholder": "​", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": " 781M/781M [00:12<00:00, 65.5MB/s]", + "_view_count": null, + "_view_module_version": "1.5.0", + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_6feb16f2b2fa4021b1a271e1dd442d04" + } + }, + "8d5dff8bca14435a88fa1814533acd85": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "ProgressStyleModel", + "description_width": "initial", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "bar_color": null, + "_model_module": "@jupyter-widgets/controls" + } + }, + "3d5136c19e7645ca9bc8f51ceffb2be1": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "2919396dbd4b4c8e821d12bd28665d8a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "DescriptionStyleModel", + "description_width": "", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "_model_module": "@jupyter-widgets/controls" + } + }, + "6feb16f2b2fa4021b1a271e1dd442d04": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "e6459e0bcee449b090fc9807672725bc": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "state": { + "_view_name": "HBoxView", + "_dom_classes": [], + "_model_name": "HBoxModel", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.5.0", + "box_style": "", + "layout": "IPY_MODEL_c341e1d3bf3b40d1821ce392eb966c68", + "_model_module": "@jupyter-widgets/controls", + "children": [ + "IPY_MODEL_660afee173694231a6dce3cd94df6cae", + "IPY_MODEL_261218485cef48df961519dde5edfcbe" + ] + } + }, + "c341e1d3bf3b40d1821ce392eb966c68": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "660afee173694231a6dce3cd94df6cae": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "state": { + "_view_name": "ProgressView", + "style": "IPY_MODEL_32736d503c06497abfae8c0421918255", + "_dom_classes": [], + "description": "100%", + "_model_name": "FloatProgressModel", + "bar_style": "success", + "max": 22091032, + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": 22091032, + "_view_count": null, + "_view_module_version": "1.5.0", + "orientation": "horizontal", + "min": 0, + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_e257738711f54d5280c8393d9d3dce1c" + } + }, + "261218485cef48df961519dde5edfcbe": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "state": { + "_view_name": "HTMLView", + "style": "IPY_MODEL_beb7a6fe34b840899bb79c062681696f", + "_dom_classes": [], + "description": "", + "_model_name": "HTMLModel", + "placeholder": "​", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": " 21.1M/21.1M [00:00<00:00, 33.5MB/s]", + "_view_count": null, + "_view_module_version": "1.5.0", + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_e639132395d64d70b99d8b72c32f8fbb" + } + }, + "32736d503c06497abfae8c0421918255": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "ProgressStyleModel", + "description_width": "initial", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "bar_color": null, + "_model_module": "@jupyter-widgets/controls" + } + }, + "e257738711f54d5280c8393d9d3dce1c": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "beb7a6fe34b840899bb79c062681696f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "DescriptionStyleModel", + "description_width": "", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "_model_module": "@jupyter-widgets/controls" + } + }, + "e639132395d64d70b99d8b72c32f8fbb": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + } + } + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HvhYZrIZCEyo" + }, + "source": [ + "\n", + "\n", + "This notebook was written by Ultralytics LLC, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", + "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone repo, install dependencies and check PyTorch and GPU." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "wbvMlHd_QwMG", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "ae8805a9-ce15-4e1c-f6b4-baa1c1033f56" + }, + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install dependencies\n", + "\n", + "import torch\n", + "from IPython.display import Image, clear_output # to display images\n", + "\n", + "clear_output()\n", + "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Inference\n", + "\n", + "`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "zR9ZbuQCH7FX", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 534 + }, + "outputId": "c9a308f7-2216-4805-8003-eca8dd0dc30d" + }, + "source": [ + "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", + "Image(filename='runs/detect/exp/zidane.jpg', width=600)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", + "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n", + "\n", + "Fusing layers... \n", + "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.011s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.011s)\n", + "Results saved to runs/detect/exp\n", + "Done. (0.110s)\n" + ], + "name": "stdout" + }, + { + "output_type": "execute_result", + "data": { + "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCALQBQADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD8347F5pkSP5t38P3ttaFjZzR2rzOMjfs+/wDNVi10+5kh877Gqv8AwfP96tOz0+2b99sw0e1drfxV87HY+wjHm94z4bOZ2WZ4dgV9vzN81Tx6a8jHvu+bd/DV+HT51uHd0Up95Pl21bhtfIkH2ncqfN8q/e21NS0dUbU4/ZMf7Oi52OzMu1UVU+an/wBjlW3w7l2t8y/3q3pNPRl2I+1tn/AqZZ280cXk3Nrub+7v+6tefKtLl5onZGm48qMqbQ3k/wBJeb5lb5PMf5l/2aZcaW6tshhyzffZn3ba3biHzI5USFfmX7tQyWc3zTXltuWPb+8jT+LbXJWxVWO534XDxkchrmm/KZt+d3yvurBm0maHLvu2su1G/vV3OsWsMe5xyWTd5bVh3VikkLJ5Pyqu7b/easaNacX7x6nsYyicrJYws3nom1m/vf3qWC3uYW32zr8v95v/AEGtK6s5I9iJuDMu51aq62827502Nt3Jur6zAylKUTlqREj+0wsiI7OzNuRW/wBr+7ViSPy4/wBzud9+1vm+Wq0aurIJtxdf4qtLayeX8nyusu5mb+KvqMPSlKJ58qnvco65uHaNpvlTdt2fJ8y0kjSbER3Vtq7tzJtqbyPtDLDNtx96nTKjR/Ii7t38X3a9D2fKebUkoy5SHyXjnP75l/i/3amSSVm+0v5joqbfv/Ky/wB6i3/fRrv+9911j+6rUsMMuxvJufu/fXZXPKXLE4OaUuaxPBv3b9n+r/hjl3LVqH9zJ/qV2t823/eqtbwpHGkP+qVn+dY/l/4FVuzZLqRI5plV13b12fdX+GvLxHvF04825p2cm1Ucopdvl+V9taVvDcSSK6fd+ZXrN0+GGS637F+V1aXd/d/hq7b75mX51Db9zMr/AC/7Py14WIqSNadHuaVjNLJCsP2pmTfuddvzNU8jO3yQ7X2/e/iaq8IeGNPLRW+bbu2fdq95n2OZXhhV2b5V3V4dap7+h6VOnHqWob792yI6o6orfLVCZJpPnudrBf4v97+KpmuIWmDzTKsrfdXft+7VCS5dpmR5o3/vq392uJSjztQOlx928hzbIZXSFFLs7fMqf6yopmubzY63jIVb7qrU32OGSP8AhRPveXHSyKluy/J975VXf/FWkqnNqLk5fdEntdy/3vl2eZs/76pU3yQyJsYeX8if3lqwsE0iy2zzfuvl/d/7VVr6O6WTf8yfe/d7/u1n71TRSMK0R8d1cxwrvRQv3dzfdWoprp75hNc3cjtHtSLzG+61OaGaS3RJnV1+88bVVkkRlKWtthlf+GspRhKRjH3Y8rKuoXtvHteN8qy7X/vVga9cXisrpcthkVfm/u1pXk00zAu+R/d/utWDq14+5n342/6rav3a78PFRj8JyVqhj6lM/wC8+8f/AB3dXManN82/fjd/CtdBqW+4bM0/Gzc1Yd48Pls/Vm+Xb/FXsUYy5NDxsVLmiYF9avt+07F21QVXmuNmzb/utW9cWbyR56hVqnHp7rMJvJ8xK9CnKMeU82T5hljlWZE3fN9//ZrodI3x7ntn+Rk2srfM1V9N03bGOdu7/wAdrVhs4I5BGiMk0f8ADJ8tEqhrToz+I1NLtUinR9+fLf5F/wDsa7bQZnjwibU2/N+7X5VrjdH/AHKxBE3f367TRZE+x7E2/wB1dv3mqo1PfOj2fuWOu0W4k+ziF5sOzfxfw11ui6uNyu6Mrqu1/Mfb8v8As1wWk3KOuy28xVVvnb+7W/puqQxsU3/eiVmj+9XZGpzmMoyj8R3Wn6kQN8Myh1f/AEfb93/eatXT9am8ve+1vvbmrgrHWd0iXOcFfl3L/F/wGtCHxB5K+d8wSR9qKq/M3/Aa6OYw9+J2q69C3zpZttX5Ub+9/vUybV4IYd+//WbtzL/CtcqutbYf3fmHc+1/mqvcawk3ybJCu/b9/wC9U/DAfunT/wBtusCv0/2d/wDDWbqGuosbO8jEt91tvystYN9q226ldH2xtt8qNX3f8B3VVvtUm2l3TLsnzLu/i/hqJRjI25vslPxRNDdZm85iv3fLb+GuMvJ3dXR/uK23/erW1PVHuomQXLFpJfkZvur/ALNZGqQ/aFb5G+V/3sa1x1I8x0UeaOjOa1SG2ml85Pv/AMO5vlWqtvbupYOmPLf5d3yturcbTkjdt6Mxb/lm38NQXWnpJcM8iSO38Un8K1nKn7p2RqQ5tTPWFJpD5czIn97726mTWVzIHfez+Z/yz/vVZa1eSTZDCqqqNu+fbSLYwzRuXhxufd9/71cNSnI0lUM2SN1CwpMuyT5tv/stJbxurI/nL+8ba0cn92tXybaOSHyYfuxbtrN8v3qq3Eltu+0+T86tt+VK5q1P3tCoVOXWRbtWdcoltv2tu2t8u6uj01na3TZuAVt27+61YNu7s0jzbWlb5U/hrQ0+aGObzo3bzl+X7/y7q+Ox1GXNKTPewtT4ZI7LT2T/AFM03mt8q7v4a0WuvLUI+6H5v9Wvzbv+BVzVnfTeSH/55q25d/3m/wBmp/7UdpI+Nqt8rbWr5DEYeUqp9DRrfDzG5cXySsN9zuVot6qybvu1m3mpRrD5iO0KSRbvlf5aqSal8zbNuPm2/J8q1Uk1QSM73KKrrF8nlr8u6tKOHUZe8dvtOhPeahD5yc7v3X975t1Zs0zrsfo2/wCZW/h/4FS3F4jKkEyMXX5X3fdaqzLBNJscrsZNqqv8NexhcPGPuozqVOWHKJe+c0hf7Tv3fL8tVri3DSPD9pUyr/F91d1aEljH/wAvMylG+4yp91aktdPeRc+Tv+f5fk3V9XluH5dTwcdiIx+0YLK6tvfcKry6bN5ezZ+7b/lpG+35q7BfDiNa+XNC37xtq7m27qdY+DXuN0m/hX/1f8NfY4ej7lz5XGYjm+E5C10e/Ece+2+fdtXb81XF8P7bqPztwkVGV9vyrt/2a7ux8KzRyJCkLM6/Nt3/ACtU7eDXkmj811Ty2+f91ub5q1lTjGZwRrcp5wuihpJIPmZGf/v2tQDwrMzHyXbZ93aqV6ovg/y5FT7zL99VT7y0kngvM3nfZmQbWZFWuKpR5vdN6dbl+0eUyeG7mO4Dp0Zf/Hqfp+jzQtLNczZK/wAP92vS28HmaOL/AEXa21n/AOA1m3HhWaxmm32fySIv+1uX/drxsVR+yejh63N7xysmnwxqrwp5rtztV/4f/iqJLRLVVT7HIo2bd27+Kuqj8Nos29BiKRdySN/d/u1UvrN/MhhmtmH/AE0rzJRl9hnbGpLm1Obmt5LfPkoxdvmdqpGzTzks33MrRbvL37WrevtPmkuNk3zLI27958tZd1bJZ3mz94Xk/vN8taxl9kr4vhM9YUt2SFJtq/8AXX5vlqb7PNdTPNM6r5iLsVf4f9qnzW8KM72yKpX+KrDWf7vYJtoXb95vmrS8fi5iPe5iCGSZrdYfObYvy7v7zLUNxcFVaNHaM/Mu3/ZqzInkxhGm+79xf7tZN1I7L9/HzfPu/irejTlUkYyqcseWRDM0Plu8kzfc+6v8VZ0cszN87qPm+fy/m2rVm6Z7iTyfl2xpt8yNdu6qk0nlqXh2hG+4y161GmeZWqSjL3SNpEZfJjhXb/D/ALVIq/ut83zf3fmpkbIrDftC7P4fvbqVVTCPHBtH8MbN/FXV7P7RjGt7xGq3O48Z2/N8vy7qfIszRq6Pj+9u+9VhbXbJs3/MqfP8u75qVbVMt5j/ADfe2rTfvfEbxqe5ykSXj/Y3DzSBv4Kt2zIsa70y+/dtb/0KmW8aW6tcvM21fl3bPutWlHYO1vvmhYf3JF/irel8ISrT5CssYM/7l2Rm/vfLUNxpsysNm4fLtfd92tVdI+UvezbXZP71X9I8Ga14hMh0DQri+EWzzRFEWC5zjOOnQ/lXrYalXxNRU6MXKT2STbfyWpxuTnLlgm32RyMmkvtY72Z93y/N92si+sXkupk2MNvy7a9Pl+E3jiRk2+BtTz3JtWx/Ks7Ufg98Q1K/ZvBGqvlfmxYt/hXrxyPOv+gap/4BL/I5qmDxcv8Al1L/AMBf+R5Lqmkutrvdm3r8yMtc1qmmlv8Ab+8te0X3wT+JchMa/D/WCGXLEWLnn8qwr74BfFhi0dv8NNZ2Hp/oD/4U45HnX2sNU/8AAJf5HDUy/Hy/5dS/8Bf+R4Vqlrc28jI6fKv8VUvJmkH8TbvmdVr2DV/2cPjTJBttvhTrROMcabIf6Vz837Mvx5H7v/hUXiHH95NKl/wq5ZJnXLf6tU/8Al/kY/2fj/h9lL/wF/5HARw+Wd+9v92rlrbTSXGx5mZW/vV2sP7NXx13Av8ACDxGfc6VL/hWlZ/s7fG5U82X4P66GxjH9kyf4Vw1clzxx/3Wp/4BL/I6Y5djv+fUv/AX/kcfb2fksr/+Oq1adrbvMqo/ys33Pm212Np+z38ZwUf/AIVbrqKFyR/ZsgOfyrRh+AXxcjRm/wCFZa3uP3f+JZJ/hXHLJM7/AOgSr/4Ll/kdtPLsY96cvuf+Rx0cMkbbEfdWhaxO3753Zd38O77tdVbfAr4tyuwufhrrgCr8pOnyfN+lWbX4G/FpVDn4b6wGAYLmwfgflXPLI8++zhKv/guf+R108uxcf+XcvuZy6wvtabDf7W6jzN0iPvZR8uzzK7OP4KfFRkIj+HWsq+xuXsXxu/KlPwQ+KrBVk+H2rnav/QPf/CsP7Cz3m1wtX/wXP/I744HFdIP7mcpCtzNIRDtbb/DJUMizKuwQ7dqfe/iVq69vgt8Vf4PhtrQ29D9jf/CiL4HfGK/lW1sfhVr8zf8APOLTJGZvwAzWryXPErvCVf8AwXL/ACNYYLEOWsH9zOJmjhb5PmLL8yM33t396mzSTRsr7Fd1Tb9+utv/AIEfF21Lx/8ACsfECSl8SRPpsgKH6EVUk+CfxeWUlPhfr2W6gabJgfpTjkmfSj/ulX/wXL/Ip4LF/wAj+5nNtM7EI0+xV/hWp7eZGwn3X/i+atmT4J/GHIZPhdrudvP/ABKpOP0q5pv7Pnx9vibuy+C/iaZVfaJY9GmcH8Qtb/2FnahzSwtRf9uS/wAh+wxKlrB/czJh1CazmKO6uzJj+98taVvqD+WHd2LfeWnx/Bf4zwztK/w21zcG2lTpsn+FaWn/AAC+Pl7CZbL4O+Jp4ifkeHSJmVT6ZC1vHJc6pLmlhqi/7cl/kc88PjFK/I/uZlyakkP+pdVZm3M1QNqzzK3nPk7/AJljeuhP7Pn7RbhQ3wT8VAAYLDQJ92P7v3awPEnw1+JnhWyl1rxB4F1a0toCBPNdafIiQ5O0biRgckDnuacsmzOMHUlh5pLVvklZLu9NDlqU8Sot8jsvJmbqGoJMrbPlXb/E9ULjWCtsE6j+9WfNep5g42/8DqrdaomXTf8ALs+balcUY8u55NbFS6FqTUHaNXCMwas261J2kOeBs3Lu/iaq8l58pmhfb8vytWXdawFjb58t/dpyOeNbl0Ld1fTbt4mVFZfn2vWfNdJI3zuwH8DVTuNSuJOqLt/u1Va82/Oh/wC+a56nNE9CjiveNCS+eF98aMwX+Kh77cyzvN96s0zP5nzzcf3aljuEab9z/DXFWifS4XEc3KlI0HuPNGxH+ZvvbqktZ3jbY75C/das/wA5JJGdPvMnyK1WrW3uZJkT+7/FXHUjyxPfw+I5S/G7yHZM2/8A3v4ateSjR/I+NtUoflben975quRqixsyOzM38P8AdrllHlPeo4jmHqvk7dif7+7+KpJJJvOTf/wHdUTRuI9kz7t33amVXjiCTP8Adb5t1YSid8a0dgX5meB+iv8A+PVK8z+SJnfLt/d/hqDa8fKHhmoZtqt3bdtSlLmNvrRbVtuAk3y/+zVGJk/jT5o3qFpJ2jZPOyy/NtX71NaRFz8ir/Czf3qcaPMH1rm0JJ7h1Vnd1dW/8dqDzHkHmK/8X3aTa7s0Py//ABVV2byZN6JtK/K3z1v7PliclXGcurLM0yLh0h3fwtTFk2q2x2D/AN3fVJrpFY+Vu/21qP7chXncm7+Jq3jGR52IxkbFybUJvlfyVVm+Zqq3E3mKd83FRtMm5tnzL/BVRr5/M2bFUN99a6qcZHz+KxXNAtrP50bIHYK38NNjkDN5EzqrfNVKOYwJvR12K1SrdPcNvR/mX/x6uuMT5vFVoyNG3kdWV3mxWhbuiqr+d8v8f+1WPp58xnR/7+379atlHDIuNmVX+Grj73xHkyrGnZyO395Vbb8y1raer3Ejb33fwvub7y1nabDH5m+GHhtvzSVtaXZ/xzRrhfu7aInmyqcxr2VnNJE3zqEk/hX71dPpdrtjjf8AeSstZeh2L/I6Ip2rt+b+Guk8O2aW67LmFdsa/N8/3aoxlI39Ls0VU3pjcm5F/u1r2Vo8i7HhyzNu3R0zQ7OTy40httu5Ny/7VdJY2KMuyHdvVW37kro+I5/aGJNYpNC28tjavy/3WqZ7GFo1h37fl3OrfwtWtHo8022GaHbu/i/hqKbT3WRnfcn8Hyv822ly/aOmjL3zFis5mkFz8zlvl3b/ALu2npY/6QZpptgk27/722r62aQt5Nt5n7z+GT7y1FdWO2FfLfJVPustTKMeXmPewsvdM/ULO2kZZkRnX7RtRm/h/wBqub1rT5lkbZN/F95WrsLiOH+NJNv8DL/ermNUi+y5fYvzM3yq275qcYwl7x72Gj8Kkee69YvNC80L+cjN8jN/6DXE+JNPfcyb2O75fl+9XqHiCHcrfIy/P+6b+7XGa5Z+dG6JG3y/MjVyVpfzHqxwvN7x7Vp8NtCrvMm8eb95fvK1S28T3DOnkx+Urs0TL8rK1VoLiBWY2bqUjb7zL95v/iant77/AEjyfszPtVd1eNGPLA+e9pyl+xtXjb/SUV/l3J/FWjC0MinyX/g2orL8y/8AAqz47jyW2PJ+6Z9yxqn3f+BVehbtcvhFXcjf7VefXk/5TupVOaVxLqOFZCj7WPlKrrG3zfN/FUUdq8ciu7sGWp7iRPtDpIil9m/5U+WRqY1siq58lX/j+VvlWuKpUlHc9CnHm+EbarDM02+GRt0u3yW/9Coe12uIXufKRv8AWqzfdpI4937503IqMzqvy7amihgkjO+GR3++vy/7NefUqcsz0KMfc5jCks0vJpvJdflfbFI33qzri3kmuDc7MlV27vl+9XRX0MyqblJoV2yr8uysya3hjV08lfmqqPN7U6OaJzV4rwyM7quP4G2fdrI8lLiTY80m2H7nz11WpWv7vem77vzKy/w1g3Gmp8r+WqfL8n95q+wy3mjLUxqcv2Situk+5/O3eW7I/wDDuqzDG9nCH2Nt3/eVd1RTK80ZTf8AOu1fl/vf7VSRqkfkwIm3/vpt1fXUZHj4qpGMWSWs3mN8+5f7rMv3qjnZ7qF0R9u5/vfdqxIr7o3G7+9taq7MIV2O67t/zr/drq9ofPVK0ucVLV9q/Plv4F31JDM+0v8Aw/7NRF3jwmzCsnybf4lqONpp5vOebbt+VFrKpIiMpfCX4WeSYul4r7futs2/8Bq3DJBDD/pPVt2+P+9trJhWFv7zsr/N81akLTfIny7vvff27Vrx8ZKPN8R3UYy+I2bVdrJMib0k2t+7+993+KtK3t7OaN3dPNO35WX5axIWS0Z32bty7VMdbdveLbwo+xUVU2bV+avnsRU97mPQo0/5i7C0k0bbyzOsX71tm1f+A06G427vszthk27W/h/3qqtdOq+Sj7n/AIY2/u1Fcag4Z3uYVXcy/wCr+VVrwMRKSPSp04/aLn9o7v8ARn8vav3W/wBmkVbO4ZbmaFn8v5f3afNtqGCRFklSWaGT+L94v3V/u0QyPFIIYQ3lbvm/hb/7Ksaf7szqe8XbO3S6jTY7LF/C33WqePyZFlR9u2Nv4vmakt1Tj7SY0H30WSpJI5lhX/RsnbuZmbdt+b7tVUqX6GUVL3SMxzRgwpNCu7+Lf91ajaO5kka5m+ZG/h3bq0Lf7THhJoY0Xb8iqv3qrzWsyyMkNzlm+6rbV21NPTZ3JqfCZ8kaXExhTdlot27+Ff8AZqtdNNbr86bZWTbtVa1VhdlD7GQs/wA0e373+1RNZ2aoIdjbm+VP71KVTlkc0uaMTl9SsUhUyJudv4lVqwtStwtqLaZMvJ/Ev3mrsNSs4biLMN4xLfK67P7tYOrWvkSM83ysqqvmKv3lr0sPzT5W/hPJrOcuY4y+hSNPJ2N8vy/M1ZkNjDcZ+RQ6ttX/AGq6TUIYZjJC+1d3z+X/AA1RmtYZ5lSHaiq/zrXrwlJwkeVUjIxfsDzXBdNyfw+W1Ot9Lkz8+7Zt3L/s10Xl+XJvS23Bmp0dijRt5Myp/syJ92m6zjG3QSpxjLmMWHS0jh8xId7bvl3fLSzRpDN5MwyZE+b5/mrX1C12ybPm3fKy+X/EtUry28mbfMn3k+RqqMve8jqjTHafcQ+YkGxfN+78r/dWug024aGP+HG7duX+7WDZ27+WzvDGzfeRlatjT7yT7Os0yZbf95aIy5pe6a8v8x02l30y7k+9uTcjN97bWrHdJJbo++Quqbkkjfburm7KHyLj7TCjfc+dletVZoGt/wB9BuDbvvPt/h/hrup1P5jjqQ7mxY648MiokeEbarMy/wAX+zVxfEEMLLD9p37X+b5q5r7YmYrbfNvWL7rfd/3qinmdpC7uw2/N8tdkahxy906tfFCSSMU3Ax/Lu2/L81Jb60l18m9WZXb95G3y1zEeqIsaiZNrSfM0b/w1Nb6lDHGpKfxfe3fLtrfm9wiMoROjbVE2hH6L/D/eqjPs8wpDDlJn+dd27bWba3UM3yb2O77kf8NWYw8itJbblVv7rVFT4SebmmMmuJpFP2lNnktsT/aX+GpobXgyeSuf4Y1+7V2GzeaFXeRWZk+81W/sq/IXTY3yov8Avf3qw9nzG0cROJi3WlvG/mPbK38KbqzLjR7lYWdIcPu+9Ia7aTTRdXAmS2/h+ST+H5arSaDM0x+0ozJv3bVeqjHl90qOI984yTR0W3kdEwF+aX5f4qp/Ybn5BM8e5vm/d11V5ptyvm20MPKtufd8u5f7tVLjR/s9ud8K79nyeWtYSpm8cRyyOauIYY7eL5P49yMtU7izT5XdGbc27/eroZrCGNW2Q8r827+7WbqEaRzNGkzJ5nzbtn3q4qlMuNYoQ3jrI33vvbfm/hq5Y7DJ+5dQq/wyPWe0c0cjI6L83yqrNUtvZ+WpTYxlb7jfeWvnswwvc9nD4rl1N+yunWVd6KWV93/AamlvIY5f33HmT/Kv+zVPSYUXKu7Nt+X5nrRhsZmk/dpwu1kaT71fF1MH+91Po8PiuaF5CNbosnzv5Qbds+eq8027dvtsnZtTd/6E1ai2rzfuRZ7/AC03/N93dSrpE98sWyyVpNnz7vlX/vqoo4OcavPI9SNb3DKgjNxMkPzLu/vfdrQj0va3nQou3cvzf3q1NP0HzJGf5ZW3/wAL/L/u1o2ugwwyCH7GyGOXb977te/g8L7WV1E48RjIU9zHj0tNsvnfPu+8v92tOx8N3lxHHDNYbjDtfcqf+PV0Fn4XRpF2Q7f3v3m/irf0/wALwwx/PuVlf5Nrf+O19bgcO4xiuU+Yx2KVTmZzVh4f8+Pe8Kld22Jm+ZVq/b+FZm+dPnRW+9H92up0/S0jhhjRGil37ty/Mvy/3qvWeg7l+eZYl+Y7f9rdX0mHj7p89Uqcuhztn4d8z50sG2/89P7zVfs/Dc0qvD9m8oxvXT6X4ZRjJCLfZtZvK8tvl/4FWnY+HYbWFEfcq7t6/wD2VdMqZySrSlLQ5CTwvCsKfZkZljl3S/uqbcaDbQ/6ZCjeV/D8n8Vd5Ho810q+Sir8/wC9Zf4l/hpt54ZmWR0+V4vu/wB3atcNSiHtDzG48LzSK3yYC/NuX+Jf9qsy68Pvayb38yR9nyM392vUdU0WGNSiQtsjT/lj91v96ua1LSRIwh3/ADyfcWRflX/erzK1HmO/C1jhLzR5ncTJbMi/wLJt+ZqxNS0fyZGe58zcybdrfL5bV3Osx+XdPDvX5fuTfwVzd5bvNcI7zbYWZm3TPu3Nt/vV4MsLKLke/RxUTjrzT7lpA7wq3lptdl+bbXP61C9vveGFnT5WSXbXZ67DuuAmxl3fNuV/4awdYhdl+T5lX7lYuHU6lLuYCypCzzDrs27W/i/3alk/0i4PyLt27tzU+4s3hmdgkbBv4m/hao5pHkj3x7R5ibdrfw1rTpwcvcMvae7ZyM+5uoWt/wBzbNtZ2+b/ANmrJu9833IWHy/LV7UGePaiuxVk3bvusq1UuA7/AHGUv/D8n3q9PD04Hl4iXvXM+Oa2kj3puDqu7d/eqnLN5i7H+RV/8eq3qGxlZ0RkC/f21Raby4wghWYN9za/zLXfGPL7xySqc3ulmO3eZVP3yqbtu3atEMgbajp5b/3lqPYm4yI/7r+6rfNU8N1+887y2+Z9u1fm3VcfeMvQs28aMzB4Y2Xb8rL/AMtP96r8Ni8kbfuflk+/UFrDtYuibG/u1s2Nv5sKI/y7v4W/iq3KUYlxlL4ipZ6fBD9/a6s/zR1o2enx71Tzt+7/AJZr92rcNjbSKiTBcyfxba0LDTYYmEMKMyxr97/a/vVZftOaPulb+zd4XZjcr/Iq16f+znpdy8V/bWljI0s80EcUaIWaV/mAwBySTgYrk9N0eeRlTZ8zfxf3a+mf+CUtvHpP7Zvw9+0CRwfHemx4STYQTLgHODxkgkdwCOM5r9C8McW8u4up4tR5vZ060rbX5aM3a/S9rXPRyLEeyzeM7X5VN29ISZseGf2SP2pPGT30Xhf9nfxpetpl21pqKQ+Grkm2nX70Tgp8rjjKnkZGRzXD67oOueF9ZufDvibRrrTtQspmivLG+t2imgkU4KOjAFWB6gjNfpN/wUz/AOCoH7Sf7Pf7S9x8E/gvcaVpNhollay3V1caal1NfSzRLKQ3mZCIqsqgKA2dxLHIC5X7W114P/4KCf8ABNy2/baufBFhpvj3wdOtlrFxZ3BjTylnWOaEBmO+M+dHMiOS6Fiqsdzb/wCgsp8QOJvZ5fjs3wVOnhMbKEISp1HKcJVF+79omkuWfeL93rro/uMNnOP5aNbE0kqdVpJqV2nLa6aWj8tup+fPgnwJ42+JPiODwh8PfCWpa5qt1n7Pp2lWT3E0mBkkIgJIA5J7Ctb4m/Aj41fBc2v/AAtv4UeIfDYvd32N9a0mW3Wfb94IXUBiMjIHIyPWv1v/AGQf2W/Hv7P37DukQfsy6f4T0/4i+L9LtNQ13xD4juJbiDdKhcMDCH8zy0cLGi4iyWc78tv7XwB8H/2ifFnwh8X/AAv/AOCgPi3wH4r0LVLBvL1PSLZrdraPaS5lV4Y4l8shZElXDIykkngr8vmHjtSw+ZVXQpU5YelU9m4uo1Xmk7OpCKi4cqeqjKXNJLpfTz63F0YV5OEYuEXa13zvo2la1vJu7Pw/0fwn4p8RWOoap4f8NahfW2k2wudVuLOzeWOyhLBBJKygiNNzKu5sDLAdTXU6n+zH+0ZovgI/FPWPgZ4stfDgt1uG1u40CdLYQsQFkMhXAQ5GG6HI55r78/4IdX+j/D7wB8a9c1HV459I0W/tZZLgSJ80MEV2zy7QxABQA5yVODgnGa+cvjz/AMFdv2rPjhNrfhldfh8PeENadoJdD0S1ijuBYk4MX2p1aTeycMwwCS2FCnbX3MOL+Kc04txeU5Xg6bpYaVPnqVKkleNSEZWjFQfv6y3dtFe19fWWZZhiMxqYfD0ouNNxvJtrRpOyVt9/LueEfDb9nn48fGK1lv8A4VfBzxL4it4G2zXOj6LNPGjehdFKg+2c1k+Pfhr8Q/hZrZ8NfEvwNq+gagF3fY9Y0+S2kK/3gsgBI9xxX7IeKdb+O/xb+APgvWv+CXvxT8CadoFhpMcFzpuowJLLCqwx+Va7tsiQuikh43VWBx83OK+Zf+CgXx0+PUP7HjfBL9uj9nG9PjGXW4ZPDfjrRjB/ZBK7mEhljLhLkoJUMAVdyMz/ACYAPz2QeKec53nFOh9WoqM6nI6XtWsTTV2nOUJxjGSVrtQbaT8jiwfEOKxWJjD2cbN2cea1SPm00k7btI+BPBPgPxt8SvEcHg/4eeEdS1zVbnP2fTtJsnuJpABkkIgJwACSegAya1vib8CPjV8Fza/8Lb+FHiHw2L3d9jfWtJlt1n2/eCF1AYjIyByMj1r9Brr4gab/AMEqf+CeXgzxF8KPB2mP8RPifbwXN/q14/2hQzQecZThsOIo5I0SNcRhnLndlt7P2Av28fE37efiHW/2O/2yPD2k+JNP8SaRNNY3cVmLV3MWHeFxEVGQoMiSIFdGjJycgp34jxE4ilhsRnODwEZ5bQlKMpOpatOMHyzqQjbl5YtOylK8lF6q+m086xrpzxVKinQg2m7+80nZyS2svN62Pzg8N+FPFPjK/fSvCHhrUNVuo7aW4kttNs3nkWGNS8khVASEVQWZugAJOBXX+D/2U/2mfiB4Vj8ceB/gD4w1fR5lZoNS0/w9cSwyqvUoyoQ4GCOM8givt7/gkD8Nv+FM/t7fF74TXN07S+H9JurGIJIsiSRR6hEodmB+9t2cY/iYHaRiuV+IH/Ba79ojU/2j4bf4fWOmaP4Is/EEdoujSack9xe2izhWaWVuVkdc8RlQmQMsQXbpxvG/FGOz+vl2QYOnVjSp06rqVKjimqkXJJRUW7y+y72VnfdW0q5rmFbGToYOlGSjGMuaTa0krpWS3fT01Pg6eCe1ne2uYXjkjYrJG6kMrA4IIPQ02vtP/guj8P8Aw74T/a203xVoyeXc+JfCsF3qcawqqtLHLJAJNw+8SkaA5HGwcnPHxZX3PC2fU+J+HsNmkIcirRUuW97PZq9lezTSdlfex62X4tY/BU8QlbmV7di94Y8P33i3xLp3hXSyv2nU76G0t9+ceZI4Rc4BOMkdAT7V+mvx4/aE+Fv/AAR68IeE/wBn34EfCjRNf8a3WiLd+IfEepW/kvMhkYeZK0f7yQySCbZGZMRIij5hivg79iEaKf2wfhn/AMJD5H2T/hNtO837Tu2Z89Nv3ec7sY7ZxnjNew/8FrP7S/4bt1X7djyv+Ef037F97/VeTz14+/5nTj8c18RxbgqHE3HOByLHXeFVKrWlC7UaklKMIqVmm1G7la9tdbnk5jShj82o4SrrT5ZTa2Ummkr27XufQC+JPhT/AMFfv2TPG2u6r8NdH8K/FTwNCt6mp2Ft5jXCrFI8Y8zb5rQyhJozGS+xgjjccCvzMr73/wCCCH2j/hb/AMQ/tez+y/8AhEIvt/mbsbvtA257Y2+b159O9fC/ir+z/wDhJ9S/snyvsv2+b7N5G7Z5e87du/5sYxjdzjrzWnAtCGR8TZvkOGb+rUXRnTi25Kn7WDcoJu7teN0r2SenW9ZRBYTH4nB078keVxW9uZar8NEe1f8ABND4PeAvjl+2b4Q8C/EqCG50kSz3s2n3AQx3z28LzJA6sRuRmQblAbcoIIwSR9d/tS/8FZPi7+yb8ctZ/Z8+G37NfhrStD8OXC2umRX1tNGbiLaCssSQNGiRsCCoAPHU54Hyd/wTL/Z58eftB/tV6Jb+CfGF54cXwwy61qHiCytxJJaxwuu1EDfIXkZggD5XBYlXClT91ftCf8FpP2efhX8XJvhzovwo1HxiNB1BrXUtejngijilRgshttysZdpBGT5YJX5SVw1fF+INCWaeIMMPDAf2lGGH96hzumqMnNtVHL4G5rRJ+9Zel/LzmDxGcqCo+3ShrC/Lyu+99rtaW3PJ/wDgqD4M8AfF39iLwF+2ZqPwns/BHjbWNQgW/sYreOOa8juI5CRKfkabAhSVGIZ1RiCACxHl3wS/4LRftBfBD4VaJ8J9F+F3ge7s9Csxa2tw+nTQO8YJILJBKke7nlgo3Hk5Ykn2j/gqVp8X7av7JHhn9tT4IfEHUbzwr4fDDUPCVxaIptWllEUs77CSs0bBUdWLrsO9GVdxk/NRVZ2CIpJJwAByTXreHnD+ScUcErB5xSVT2Ner+6nz3w75naleVpPli93pr1sdOS4LC4/KvZYmPNyzl7rv7jv8Ouui/M/XX/gnd/wUX/aF/bH8ba/J458AeE9E8I+FtJN1rOrWMdwHErZ8uMNLOVXhJHZiDgR9twNfkt/wWB+PVh+0ZqPxQ+L+jaNaWVhqE8cenR2lmsJkt45oo45pNoy8rqA7MxJy2M4AA/QH48sv/BO//gmJon7Ptk32bx58V99z4kKnEsEDohuFP+7GYbbHfdIR3r8p/wBr9zH+zh4ocDOLaH/0oiryOGuHMkWBz3PstoKlh5UqtGgo3tKFOLU6mrd+eovdfRRt1OLDYLCRwuNxlCCjBwnGFuqSd5fN7eSPg2S8RbfY8jF1/iqpNqE6t/Ds2fL/AL1U5ryZmf59yfwVQupnkjXD1/PnKfkMqxcm1b922x2P/AqzbjUHkjbemdv3WWkmkddyLyGX5v4arNNH/tDy1/irOW5cZcwNI8cn8S0jSOrnY/3v4VqGY7dpfcSqfw/dqMzZUOEbdWEtjqp+7ImaaZGZy6j/AGtlSwyOWV0+9VZd7yBH5/3anhXdl3+U/wAG2uOoexha0omjDG7D54dir92tGxby1SHZlt27zFqlZ/LCEwx+ati1jRcP/F/HXHKP8x9PhcRzcpahhTy98P8AF9+rEdttUzJuKKn3aS1R5Iw8m77/APu1fsbXpPHN8v8Adril7srnu0cQVYY2+/N8vy53VIlvtb7jP8/3mrRexRo1d/Lb+/8A7NJ/Z8K7pidy7N336x5uY7adacfiM+S1fcQ8O7d83y1XmiRV+RMH+7WnNb/KuU+WP5qrzWs32hpt+35NtEolyrlNpBuaZ32f7K/eqJm2NvL8rLT7hUkm2TcbV/76qpcEQo3kyfMvzfN92tIxlzGFTHRiLNeBWe2RGDt/E1U7ieETBLlGO35d2+oZpn85HhfLfxVD9odd2/qu7/gVdXs+b3jzamYdB9xcOsyif5F+7uqvdSOuX86o7i885fJdGqBr0RwtsfJ3fJuropx5Tz62O5pEs155UK702ruqvJfbpGd9v+z89VLrUPOP94N95WqtcTeW33Nyt/FXXGj7p4uKx32UaX2oudnZanspt+P7rf3ayY5vmZEdvm+61aemr5jff2t/DW/wnhVsR7T4Tcso33b04WtnS4XW4RJH3BlrF05Jm28fL/drpdMt3aRPnXarf8Cp8sDm5jY0uFzGPkV9r/8AjtbNjDC209m+X5vl21R02NIWLumFb+Kuk0u1hVVd4f8AcrL4Z3OeUuaNi/oP7mZI4fubPvNXX6DDbMu9LbfL/wA9N25f++a57SbVFuN8yL8v3P8AZrrdCj8uT7mX2bm2pWhidT4f0+8uGKPNuVolZdq7du2uk0uFJGWR0VkZd0vz7a53RZkRNjvMszMvlLu+Xay/d/2a6OwmhjZfOjX7vybU+61VH3fhDl5QGy3j/hG35khWql5D51wdm1v7rNV6S6m8tZpJl3tu3qyVTjuU3Lc2bsys3ySMny1fuSNKdTlK8kMduypv+8v+saqdxvZvJ2ZRV3eZ/C1W/tJf/Rkhx/tN826qcjLJMUlO2Nfm3fd+asZS+zE9zAynKUTPvf36+SkOX/56bvu1iapbpNsSF1A+b7qVt3kKRxpNs+bft21laq00a+TbJ8rbtn8VZR9pHY+xwdPmjqchqywxs0yfK6/w/wANchrC7ZiXTP3vu/w12WrWb7v3zqv95VX+GuZ8QWoVW8kLsZ/nauWpL+Y92jTlLU7e31BJIf3L/NGu5I2q9a6gJpjIiMg+X7r1xseoRxoqedtH3XaOtGz1S2h2o03G/d977tedHnifASlA7iGbdGX38bdrU+2ukj3cMrM/zN/Dtrm4dY/c/Nxu+6y/ebbVttUhkt38n94zJ91XrzazqrRHTTqU4yN+HULaPc8Lsksnyoyr8v8A31TI55re3XY6nb8u5n+9WNb3jiNoNi7Vfd81XIrp5IykyLsZP/Hv4a8mu5KpyqR6+Dq88byNmHZNGs1ymGV9u1vutT7q8FvCwuXX5l2qu7btrNjukMKo7fPH8yf3V/hqS1vIL6x3/u5fMfci/wB3bXHUlHnPZpy5oDrqTzI1k2b2j/vJ/DUV5aw27L533m+ZNvzVZk/0hlh+Vn2bdy/Lu/3qSRbby9+9Qrffb+7XVQ1mOp8JiapNc+W6WzruX5kVovu/7K1jXkLyZd/mMe1d2zaqtXQ6g0LW7J/Av31X5WrE1C6RQvztv2/73y19hl3wHFUl7vMZsi/aJntbZMt/D8nzNT4YfMVUebDbPlZU3fNUbRfM7pM3y/c21dh2QsPOdkf+6qV9LT+A8DFVpx+IrrbutuJndSV/9BqnqGxV855vODJ91V+bdWpIvlKnkw7F2t82/wC9/vVmalHJ5jQyOu1fvqv3lrq5zxpfvJlKaZI1RHRlbbt3b6YzIsjw+crbk3bmpzbIYWTfGq7/AJWb+H/ZqhJBNt3ojfdrCtU5YHXSo8vuotWcwkkCO/ys23atbVmqRt+8fcNnzrXPwxuqxI/9+ta1kRZgjoyL/eb5vlr53GVF8UT1MPTf/bpu6e3lqm+44X/lntrThkfzP9cq7f4mb7v+7WPbzDyVPnKrbv4v7tWvtGxvnmyknP8Atbf9mvAxFTmPao06UY2NBv30bI7r97/eanzX00kzwokap8rfvPm+XbWV5w+xjyZmR/N2qzL/AA/7taC3jrblEdXdkVd0ledze6dHs/dLlvJuUQ/K7fdX5KsW1ws0ivcvnzIti/3l21Rt1SS4+5t2v8u35W3bfvVow2v7x53ufk+VXZfu0/iOOUZx940tNjdrdYN6nb83zVY2zR3IkR2Xc33V/i3VXt/JiXyfJVH3K25n/wDZasrav5n7mRt7I3y0SlL5GMYe0nK4+K38tnh37tr7d0jbmVqFWCSNZppst975U+VqmUpKqvMmz5V3Kv8AFUbW94rM72ao33/mf71ZyhGOw7S5eUga5hmtXuXhZVVN3y/eqz9lhZT+5kD7Pu/xU6OSY70dMfKvyqnzNTLhLm3kSbZt3bf3ivuatIx55cpy1o+7qYmoSWelw70Rssnz7k3bWrntQH2yRv8ARm3L/E33WWul1+N7i4N5DtdVfa/z1zWoRwx7oV+V9n3Wr08PT5o6RPCxEuaXumDJGjTP5afd+42yq81n++aaHa/9+P8Ai/3q1rrZJl0eP7nzMq1SVXhZ/n+b7uP4lr1OX7J59SRB5c0nyQ/39svmJU3lia1DvCzpu/hWnrG7Y8sbmX7/APtVZtYvtE/kzXjAL/d+bb/wGj2ful0Zc0Cp5Y2/voVO75WVvl2/7tV2sUjjbejPub/e3VtSKkzB4U3qrbdrfxNVeOx/el5LZUP3nkjeol7p1x+Io6fZ/eQw87fkVv4auwR+VG6Sxrhmot5k+5nH+0v+992o1me3byfmfy23fN/drCMpR+E6vspMst5zbpk+ZF+/t+WrOn3UNvHsR8Kv/LOT+Gs2GSa5kZ3TLs3yfP8AL/3zVea98lvOnmwv3fmrro1JHJUj/Kbcl9DGodLn/WL8ysvzf99Uv26FkHyRna/z7n2/LWPb3ieSqeYxT/ZpJ72OOTej4/vq1dkZe8eZW90172eGe4RAi/d/hf8A9Cqx532fbD2/gZV3VjrdQsrunySK33dn3f8Adqb7T9oWJoblVf8A2q6acub4jil7sjZ+1LuTY+7anzts+7/s1t6LFtl3pMuJF+VWi+Zv71c9Yq8ypDNNsSR/8tXYeH9N85vJRFX51Zmkro5faE85o6fpT3W3f13/ACRsnyr/AL1bsPh3czMjq80iK3mR/d+X+7Uul6ftBezRQ+z+J/4q6O10b7RGEQbWVtm1v4quMeWPvC5omFp+h/Z1abyYyrJtfa+7bT5PDkKxF/tK7F+b93/C1dXa6DulXybNfN+9tZPlqxZ6L5KvClmz7n2/Kn8VRyw+IfOecat4dmt5P3yM67dzR+V95v8AerEv9FtrcD5edjMrN91f9mvUdY8PpLM6P52/+BlT+KsXUtFmFn8kKsivu27aOXmCMjzDUNJeH/SXSNk2f8s/4d396sO68P3jLLDA7OrJuTdXp974Z+0RtBMjfvH3vuT5VrOuPC9+qvsRSfvP/srXLKjy6le1PMJtBRVl85Gfb8yMsXzVNY6ait/qWxH823b81dxeeF/LkEyJI67/AOH+KiHwztVvJhkUSbt7N95VrzMZh4VNGdtHESOc0/TUa62PCuzbteORPvVvx+HXWEJ9maV9jbl/9lq1b6T9kUpc2y7vuOuz5l/2qtWbOszQyPJhm2/NF822vlsVgYxq3jE+iweK93lZSh0tJIR5O5N3/Lu33lqa30vcslh+8VNq7W3fxVanjtkZZv7vzbv7rVYsy/lbPJZnVfk/u/8AAqzjh4yp/CepLGSjLkiQ2Okv5MkLou/YuyON9vzVv2Ni7RxzJuyzqssap8sdVYVeS3ihS3b93t/eVuaXawyRpFMmBvVv+BV7uCw1tTzcViOhp6ToYZhDNDtaH5vl/irWs9JhuIzDC7I7L/wKo9LmTef3zeVu2+YqfNW/pqvHPsTblvldpF+8v96vpMPR948CtW5jMOg/6O/2Z/461rPR3dVjm3NtXc7f3q1LWz+XzE2/vPueZ92tSx0lLyZJ5o9g+83l/wAVetGj2PLqVDPsdBhlVZnmb5m3Mq/Lt/2a2LPwrDdI5mh3S7N2373/AHzW/o/hlGjEJX5Gf/WL81dLpfhW/t8eXMv+xJGn8NdEqPumEqxw8Ph1J49727PtX5fl+VaLzw/NcWoRN21olTbt+aSvQ4fDKJGqbGY72+9/DWfdaKi2cMN1uaNV/wB2uSVMcah5dc+GXhbf5PlNJ8qK33ttctrmi+TdSpNbbdqV6tr2jfvpU+Vvn+XzPl21xviS18mR08mRVkfbu37ty1w1qJ1063KeWeINMhb5Hdk3PuRW+61crqlvNHv2bW2vu+VflX+H5a9H17T4bht4tt7Rv/F/yzX/AGa5HWLGOEi53rt+bev92vNqUaXLY9ClWv8AaPPtWs3mnEkz/wCrXbE396uc1Rkjj+Sbd/D5jJ8zf7tdv4iExh3/AMbL93eu3bXE6pHNHDN5L7VV93y/w15dSjL7MT0qOI0Oe1HUI5pNiOzJs2tI33aozSQ+Y0O+R1z91flqXUpI5LgQ+Szovzbdnyt/tVTuNQjmk2JJhNvzf3VatoUugSrRkJdXELIjzJIny7Nyv8yrVG4mdV2FG+V/ut/47U9w7xxjeik/e2r/AHaq+dumW2SPb8ufMrrjHlOSpLm+Ijk2LDv2Y/56tWfNDbQtlIdrN/FVq6Z5vkSZkT+Flpqqkiok3Cr8qybN1dHLzR5jn5v5itHbwrIjo+7bU9jZvuKIkm9m2r/C1N3JC290Zmb5f31XrOGbyW2Ox3Pu3Uvdpl048xYs/JWZIZ9vyrt+b+9XT6PYxyzt5KM3lqqqzfdrAsbEY8+a23Irfd3/ADV2Xh87Y0dEyfu7V+8tZ81ocp1fYLdjorxyRLDMsqr95ZP4latvT9LgjUeciu0n39vy7as6Lp9s6p8kbSt8vyo25f8Aere03Rd7Dem9o1/u/Ltq6fvSMKlP+UpWen289mqQ9W+dFj+8v+9X0F/wTJsbWw/bP+G5uo5Gjfxzpu0xsAS/nDackHgMVyO4z0615FHY2f2jYlsqPu3ytH937v3Vr2D9inxX4U+HP7Snw58a+MNbj07R9I8WWN5qF9cKxWCFJ1dmYKCeAOwr9O8Msu+vZri6ibvSw1eSSV+ZuDp2/wDJ7/K3U9bh/De1xdR3+CnN+t1y/qfpD+3z/wAEovEX7XP7QF38ZfhV8YtEsrq4gtrTxHpWro7G0kihQIyNCGOWi8s+W6rjhgxDgL5P+3F8Tvgd+x3+xbaf8E7vgp8Q38R+Iby4Evi3VdPlj8uPE/mTpPsdvKkeRFUQAkrGnztyN/zV/wAFWv2kvDfjf9uTxb4n+CPxCs9b0ie1soU1HRL52hlkhtY45AHUhZAGVgGQspHIJr5bvPiRq0JdhBbMQu7mN8n9a+l4W4gyWWAy2GeZnOpRwqpzhQVDl5akY+6pzTbmqbuo6K9k3c68FnOXQpUI4zEuUadmoKFrSS0u1uo9D9YPg/4i+GH/AAU+/Yr8N/s2XPxjk8KfFPwNFDFpz6jcgNqJiiZFZEVw1xC8KgOV+eJ0DFWGPMx0/wCCdvwF/Y8+F/iL4if8FCfjq2vvcabLD4f8MeHNanglmk4HmQCR0e5m3MuEKeUnLSblPy/lSvxn1+yuEuEsYISvzRyKXDA+ow3FVtV/aH8XahcPLqTW900Q2LLPJK+T6Alulems54cw2JqUMuzirQwdSo6jpRoe/Ft80o063xQjJ9EnbVJ6u9f25lFKo4UcVOFKT5nFQ1V3dqMt0n6aH6e/8Er/ABJ4P0j9lf8AaVtbnxDZWCz+GS1nBqWpQpL5Rtb2JS2SufnliTdgKXcAckCvg61a2W5ja9jkeESAypE4VmXPIBIIBx0JB+hrx2//AGhvGFpvSHQdPZlOCSzgA/8AfVZN7+1N40tdqHw5pqyE8oyyED8Q1fYZXx/wLlGcY/H/AFmcnipQlb2cly8lNQte7ve176dvM9TC8U8P0MVWre0k/aNO3K9LJL5n7NS/8E0fh38aND8N/F//AIJn/tHweG4n0a3XW7O48SXLzrPsDeZLJAWeGc5xJCVVQw+UIPlrov24vEOm/AH/AIJv3X7OH7TfxvsPiL8R766iGnL9t3Xlu5uPNSZtxMxSJFcebIBv3BOAcV+Gqftr/FHRLiSXRdK06BsYZoZJ0bHvtkFUpf22PiLcTm5v/D+ku0nMkrecx3e5MnNfBLP+GcRmOGqZhnFStRw9RVIJ4a1ZuLvGMq9+ZpXs9E2tDzY5rldXEU/bYqU4wkpK9P3tNk572P22+F198Hf+CqX7FPhn9mjXPiknh34peA44o9LbWJEZr9o4WQNGm8NcRPCoDlRvidAxDDHmbX7OP7Jfwr/4JKprH7T/AO1R8ZdK1HXV0ua08OaBobYknVinmeQsxR7iZvlTG1UjUszMQcp+Gdj+2l8QkkWaHw7pUc0Q37oxMNp9Qd/Fav8Aw2L8S9ckW41zTbCZwuC8sk0hA9AS9ZYziPg3kr5fh83q0surzc50FQvL3nzThCrvGEn05XZXXV36pYnL5KdGniZRoTd3Dk11d2lLon6H68f8EfvjNbeOv24vif8AFrx/4ktbKfxD4cvr921K/jQ4N5FOygsRlY4kYkgYVI8nAFfDskkJ+KDSi6h8v+3yfO89fL2+f97fnbtxzuzjHOa+c9P/AGofFl118PWK4OGyJF/9mq7B+0V4keIzNpumlduV2JIdxzjH3q+py7xP8NMqz3F46liZ2r06VNQ9lK0FSUoqz63UtrK1utz3MLi8tp4qpXpzdpqKtbblTX6n6g/8F3PEPh7xH+0d4QuvD3iCwv418Cwl2sryOXaHuJpUJ2k4DRujqTwysCMivh6vI2/aF8UAZbRLA/u933n6f99VHcftGeI0fZFpOncfeLh+P/Hq34U8XPDfhfh7D5VHFVKipR5eb2UlfVu9tbb92b5fjMBl+Bhh1NvlVr2se3eF/EF94T8S6d4q0wKbnTL6G7tw5IG+Nw65wQcZA6EH3r9NPj5+z38L/wDgsH4P8J/tB/AT4raFoPja20RbTxB4b1S5814kEjHy5RHmSMxyGbbIYyJUZT8oAr8V5P2k/FKR5/sbTA2cbT5n/wAVUcX7VXj/AEqX7dpunWELp92WF5VZfxD15vE/ilwDneLw2PwGYVMPiqHMoT9i5xcZq0oSg7KSdk97pq61McfXwuLlCvRquFSF7PlurPdNdUfs6nhn4Vf8Egv2S/G+iax8SdI8U/FTxzCtkmmafc+WbdWikSM+Xu81YYt80hlITexVBtODXzl/wTN/Yb+DH7Z2s+J7X4sfFy60ZtEtI3s9F0ieGK8uQ2d1yWmR18mPaFYKpOXGWQY3fm/q37XPj0SSXdxoumyuV3tJKspZz9S9Yl5+2l8SLSISxeE9GbIycCXj/wAfrjwXHnCGFynGKlm9VY7FyjKeI9hquWyUY072UVFOKV76t32txRxGGoYeqvrElVqNNz5e2yS2slp+p+sP/BOP4xfCD9ir9vHxL4J8VfEjS9R8MahFdeH7XxvGWS1JWdHhnJyVWNzGFZssqkht+wFj3fxC/wCCHPi34g+Mb7xz+z/+0B4S1Dwnq91Jd6XNfSys8ccjlhGJIFkSYKCAJARu67RX4r3n7cvxHtpfLHhHRf8AgSTcf+RKZZ/8FGPjVpQeystG063iBJ2wT3KKx+glrXH8c8OvOHmuUZvOjXqU4Qq82H9pGpyX5Zct48stX8Lt5IxxOY4WGJeIw+JcJtJSvDmTts7aWfofuD+1Jc/A79gr/gnXqf7E2g/FXTPF/jTxPqm/VoLSUbrZzLFLLM8cbsYFVIYkRXbLsd20jeB83/8ABLL4HeFfjd+1zosfjjWtOttK8NRPrdzbX15HG161vhkiRX/1mGxI4wQI4nzgV+X8/wDwUB+KCF3l8HaFvByQVnyT/wB/Kgk/4KE/FSIgjwPoQB77Z/8A45XZlvGXBmX8N43AU8yqvEYpznUrui789RKLlGCaUUkkkk9O5lSznKMNgqtH28ueo23Pl6vS6XTTbU/Sz/gop+0vL+1L+1Nr/jewvfN0PTZP7K8OBH3IbOFmAkBHXzHLyZ9HA7V8rftHeDPEXxC+CmveDvCdmtxqF9BGttC0yxhiJkY/MxAHCnrXzpc/8FFfijChKeCvD5b+FSk/P/kSq7f8FHfi4kYY+A/Dxbb8wCz8H0/1lfT4fxB8OMPw6smpVJxoqn7LSDvyuPK3e2/W7T11ZvLiXhuGAeD5pKHLy6Rd7NW+8464/YW/aYeTKeBrYj/sMW3/AMXUD/sG/tNlsR+CLcBVwv8AxObb/wCLr0zRv28vjjrkipZ/DvQDvxtGyfv/ANtK9e+Gfi39sH4lzNaaP8G9PkmePfZRQ2VyxuR6r8/T3r88lgvByK1xOI+5f/Kz4udPgKE9a1W/ov8A5E+Um/YE/abBLjwhbkkcj+17br/38qAf8E//ANpxl2N4HteDkE6xbf8Axyv1T/Zj/Yr/AGkfiLq9sv7St/4d+G+nXC5828hmkuB6fuQ5YfjivS9Y/wCCedsnjWx0Lwp8f7LVNOmvvKvNRTw1Mojizjco87JP4Vj9W8F3L/e8R9y/+VlqPAkNVUq/d/8Aan4uz/8ABP8A/ajkO7/hBbUt6nWrb/45UR/4J9/tUHA/4Qe1Azn/AJDNr/8AHK/W74l/8E9/2qvB2vX1ppPi3wQbRJm+wLqXmRXEkXUOyedx8vNdBoH7FvhnSPDMeo/Ez9oa3k1JlDSWPhjwZd3EaZGdvms+3dWc8J4KL4sXifuX/wArLjPgWW1Wr93/ANqfjmn/AAT+/amU7f8AhBbXnuNZtcD8PMqxD+wD+06pBk8EW3y9P+Jzbf8AxdfqlqX7Pn9sytD4A8WXYbcRF/bHh9l8znAGEm+U/XNeYfHD9n79v74T2suu6N8MvDmsaXH9yZEnjkk4yPlMny/jURwHgpU2xeJ+5f8Ays6IYjginL+NUXy/+1Pg22/YQ/aPTa8nge2DL/1GLb/4ur8X7Ef7RuVVvBlsu1cbv7Wt/wD4uvSNf/bJ/aE8K6pJpOv/AA20GCaIYkj2T7g393/WVn/8N7/F0bd3gfQVz97Mc/H/AJErmq5d4Hw0lisV9y/+VnsYarwvL+HVn/X/AG6cxbfsYftBwqEfwtbnnOf7Ug4/8fq7bfse/H22gyvhe3MpbJb+0YP/AIuuli/bu+JUi7h4O0Tn7o2Tf/HKng/bl+I0/wAq+EtEB91m/wDjlck8v8CeuLxX3L/5UerTnkmnLOX9fI5Zv2PvjyzAP4at3XdnH9owc/X56mX9kD41JEdnhCAOPuf8TKDGPT79dOv7cHxCYKF8K6KWLYZQs3H/AI/Uq/ts/EGSMtH4V0cnbkKY5v8A4usnl/gNHfF4r7l/8qOh4jKIvWcv6+Rx0n7H3xzkkMknhOA+w1OD/wCLqGX9jn48Stz4UgUe2pwH+b12LftwfEeMEv4Q0bAOGfbNj/0Oobj9uz4gwnavhTRM7sAFZv8A4ur/ALO8CP8AoLxX3L/5UR7fJd+eX9fI4ib9iz4/lCq+FIGBOSv9qQf/ABdULv8AYf8A2h5590fg+JU9P7Xt/wD4uu/uv28/iZbyso8H6GVX+LZN/wDHKoz/APBQj4oRS+WPBWhd/mMc+OP+2lbU8B4Fx0WLxX3L/wCVGNWtkUvinL+vkcHP+wx+0lv3ReCbYn+8NXth/wCz1Rf9gz9pl02f8IPb8nLf8Tm2/wDjld1ef8FHvivaKWPgjw6SBnG2fp/38qg//BTX4truK+A/Dhx0AS45/wDItdEMu8EJaLFYr7l/8rPPqVOGoy96pP7v+AcZL+wV+1KwwvgW3Pu2tWuf/RlV5P8Agn9+1NI5c+Bbfn/qN2v/AMcrtJ/+CoHxbjTcngPw3nGcGO4/+O1UP/BU74yAE/8ACv8Awxz935bjn/yLWqy/wStpisT9y/8AlZzyqcKy3q1Pu/8AtTkT/wAE+/2qdpT/AIQC1I/7Ddr/APHKhf8A4J6ftYOdqeBbNF2441q1/wDjldif+Cqfxm8woPh74Y4Gc7bj/wCO01f+Cq3xjZcjwB4XB90uf/jtbRwPgt0xWJ+5f/KzjnDg6W9Wp93/ANqcrD/wT4/arUqx8C2ile/9s2v/AMcq7L+wp+0vo2nS6heeBIZI7eJpJEh1S3diAMnCh8seOg5PaultP+CpfxiuQCfh94aXPqlx/wDHa+gP2Pf2k/Ff7Rena7d+KdD06yfSprdIl08SAN5gkJ3b2b+4OnrXrZNwp4T8RZhDL8FicQ6s72vypaJye9Psi8FlXCeY4lYehVqczva9uiv/ACnw7pEa7wrBwScEEfMK6zS7d1kTfD8zfM237tafxtso7j49eLHUGMr4kuug+9+8ak0O3n3L/dr8TzHB/UcZVw978kpRvtflbV7dL2PiMRSlQrzp3vytr7nY1tHt/MZYUh+81dLY2aSTGGNMrH833fvf7NZej27j5H+8r/LtrprfT5Fj3un+0is23dXFzcpzylyF3SdN8uTyZodzMm3cvy7a3dLidmSF3bb/AHv71QWMMMkLWz2zFP4V+9WxZ2/8b220fdX+61Pm5iJR5vhNGyEkKxQ7N4/hkb+Jq3tL1Dy02zTZ+Tcqqm75qw7dZsfaYN0R3bkb7qx/7K1qWFtbW9ujTQ+Tufb8yfd/2quBMi1czQqsWxFT5d0qr/eqnqFwkm+HYw/uKtLPdTW7NZzeTMsbbl+X5apXl1Myr/q96tu20c3KXEkWTbNsRG3qm1lrPkmkupvkmbH93Z/tUyS4SPMzuwRn+9/7NTPtUN0okeZok+ZYpFT5d1TKXKe3l0e4XUkMkj3Oze8fyorJu21j6zJtmT5GVmdtqxt8taNxIkluiO+yTb8jbvlb/ZrHupEkmWG5f5fvbv4lauSUub7R93gXyqKaMPUFSMtB5yszfNtX+GuR1mP9ykcLqy72+X+9XXa03lwvMnKsv/Aq5TXIdzJ5aNt27kZflrnlLm9496lGMdeYry3CKyIiL+8+V6t2t9tkXznVF2fdb+KsRrzayPvzt/vJVeTUt0iyPwuz/Vt/FXNHnPzCVTmO60XWIWX9/Nxub5f4lrW026trdt7uzbvm+avO9J1ZIVRN7M33vmrXXXnXP+krt+75f96vGxPtfe7G1OXL70jtlvkurUu7thv4l/iX/wBlp39qP5aCzhZ0+ZmZZfmauQHiR0hZEk3bflfdV+z1jzIza+dsVXVt1eBUouU+ZHtYWXN9o62x1iS2dIX3Z37F+Xc21vmq41x5kZ+7v+9u+7XN299JdFB521l+/wDP/DWhZyIrFH8xWX5otyfeqKFGfOe5TqezjyyNrzHkmebyWMm7au1vl/3qfdXEMe6aa5VW2fLt+7uqpb3EFwqf8sw3yvN5v3aJLeOZUTf/AAfekT73+7Xt4OnzfEZYip/LIjuNkm1E4eRfvL8tZtxb3klx878L8jbfl21uLB9oX/UtujqO6tY7yHY4+6n3v7y19Pg17P3TzK2IjH3TCk0/zI1dONvzOzP/AA0tvsa8f52ZpPuN97/vmr0luhkCJCwib5fm+792nx2m5UuoU2bovlXf/FXv05R6ni1qntJycivqFvCsYeGHemzdu37ay7qQzTP5iMrf3VTbV+8t33eXcjftTcm5/u1l3TS+dve52nZt3N92t5S5feOen/dKF1CkanzpmUK25F27laqkzbcuH+ZvlZf7tas1v5kcfnTN5n3VaqElu/2xt6bH/wCei1wYip7p6NOMiCNXm2P+7T+5Vu385VD72eZm2/eqNYPMmTyX/dKu5/k+81W4YMZTzmb5vkZa+axlT3rnsYWPuk9nL9ouJXfh97fL/DVuG8eRnS5T5V+42z+GqqshUon36tL94IEYjZ8v8VeZUfN7x6dGjy+8WY2tppN/nbSqbX3JViCbzmX7Z95flXc/y7arR280LJs+ceVudf4t1SW6+Yr/ALlh/Du+98tc/uSNKnuwNe1+03lwk0D43fK/+1V+PyVmhtprlQVdml3J96sSz3syJ+8zH8z/AC/L/u1s6XI80x/0be0f3amVP3tDjlU5o6mxbh2bZ9pVCv8Ayz2feWtK1VP9dCiurfNuZvmWsuxW8YI7vx833fvVpx7LiHzoUjjPlL/Dt+7XNUjOUeUI8kpFhreby2ms0VnX725PlVaS3tPMkbzn+78rfxbv9qmwu8kPkO8jj7rtsqWOPdGuyZdu7aiqm3atTGE4mcpR5vd2EWNFkREhy0j7Hbzd3+7TZoXVf9TtLfLu/u1IsaW+XQ/LI/3VqteX0MzOiI0bx/8AAvlrrpVJc+iOKt70TA1CPdJ8+52+ZUXftrDvpt2XhgXP8asnzLXQ6hIkLLN5zM8fzbv4vmrD1CGb7Q/nOxRk3Oy/LXv4X3TwcRExbxUmV4RJ+6X5vMb+GoL6zhjVHfzHH3kXb/FWhND5MbSvtZN/3W/iqC8V2kLu7BWT5o1bdXdGPtDhqRjGPvGck0Nu0nzsjt/Ez7qs2Nxc3ipvRVaFOdv8X+9TGjTzD8nH8asm7dVm1VF2JbeW7N/d/irSUfsnNHmj70S4stzBGU2b9yMys3y7ao3Vwlzs+Rl/vf8A2VXbiSe1ZrZEU7l/4D/u1VZtyn59rfeRf73+zXPKnE7qdScdSOzTdA7ui7t21G/vVFbyQ26ed8o+8qKz1PHZurffVfM+ZNr/AHaq3Fq7Qt523fv/ANXt27q4pHdGRG15tUzJbSD+8v3dv+1urOuZvOZtkzbmfc67d1Xmt08n99D95Plj3/erPuI3Vt6PtXft21dP3feRnW94qSag6rJ5Pmf3U+bbVyxvPtiK+/8A1a/3N1Zl1au0i+dHvVv4m+6rVoaRbvax+T5LfL8u7furvpyjI8vEe6XVkubi4/0lF2/KyNH97dWvp9i8nlZ2lf7rL826q+l2PmN86MWX5UVf4Wrdt7V2u4vvH/pmq/xf7VdlPc8upI0dJ011VZkdZG/uyf8Astd/4X0GSaFIXhZFuP8AVbn+7/tNXP8AhbQ4VkU78fJuWRmX71dz4b01G8nZdRy+Wm5Fb7q/7Nd0Y+4ZSlym74b0NI1+4uV+V2X5t1dFY6KkcnnWb53PuRm+Vv8AgP8AepfD9snyzJCy+Sm3a33m3fe211+l6fZria2h+ZV3Rfxf71ax/vGEqhj2ehvGyTfvGZfv7m2/LVqPw88du23d8rs3mRvXT2OlpMo3w71k+Xbv+arlroX2e3fenz7/AOJPlqfZxkL2hwWoeF9sweG8b5tzs0jfxMtYNxob2qr5af6v7i7t26vS9S8PzSTfPD8rbVesrUPD/l/vnto3WP7jKnzbafLKKI+sc3uo83utB8uFv3O+L5v3cn3l3f3aoXGhwzbvkyq/wyJ97/er0W80d5IQkyKrKjfeT/vlayL7w68cYm3rvVFbatTKnAftPe5UefXWh+cqvDIyIq/3aqX2ipNH+5fcy/fXdXa3Wlv5Mlr+8VvvfN91ayLiz8uGSGz3M/8AeZf4f71ediKZ3UZHKXUPkhofJbdJ/rW/5aN/u1BPZvHIpmdkkjT5P4W21tTaajYvH+V9+0LJ8u6ql8jtumf5Tv2LtfcteNiKMuX3T28PU5Sh/ZsMkbI824SJ/C3ysy0iL9laK2hjYeZ827zdq1JqH2do/kh2NI251X7tCXyNJslH/XJW/urWNLD8252e2/lLtvZvuRH+VV+Vfn+atuz85bjZDwY1+81YulzQSKqTncPveZu3fd/vVoabeOrFJpmZWdfKZv4t1ephafu2OLEVPaHWaO0MCrsdWeT5nVf/AEKuh0/943n3U/mn+Bdv3VrldMh/fB4dyOv3F/vLXT6OEaOKa83b2b7q/d/3a9mlGx5tSUuY6TS7ePy0m2SYX5ol27l/75rttL0lIVDzQfP95F/9lrA8O2/l4+95TN92u90Wxm8lJnttz/e2yfeVa9aMuWB51SXLL4jR0HRofs6BIdu5NzN/drqNL8O7oY32bUb5UaodHs/Ot1mmtW2RtsRY/wCKujhh+VExho33bmX5v+BU5e8cHtvf94zTpP2fe9m8OPKZdzfxVmaho9tGPtSIu1f4fvV1lxFDHjft/eJuST+GsHXLdJIW8lNyxy7mWFtv3v4q5qkf5TWMjgvElj51zs+zM4VF+9/erhPEFq8jfcw8jsjbvlaNq9K8RWs1rJL+5+dXVnbd/DXE+KbV5pHh3szSJu27K5KkeY7KdTmPMtYsZpVljdFRo/k/2f8AvquJ8QaT8rJM/mov32/hr0zW7OG3m2b/AJ2RmZW/u1xHia1+0M/nTNsVNjL/AMs2X+8rVxVKZ2U582p5v4i0+2mt3RLbDr9z+Fq4DxNZ3yyJ/q8/xrv/APHq9P1i1eSdHh+YL8kUjV534ou386a5ddhk+VFjX/x6uSVPlOunW7nA6o0ylk8lt8aN81ZscsM0mxPuM+19y/MzVq6tHMt9Md/y7NyNWHJIm7Y42H7zLv8Au1hyxOzm5tIkrXf2VsoMov8ADsqlcTG3b98m5ZP4anaZGU+T/F9zd96q9xcbW+R1I27WZV+b/drWMeb3iZSj9oiMkMm5E6/dVViqPzizSpM6jb91Y/ur/wDZUrSPGrO8O2X+9v2/LVdpIdzfPv2/7FXy8pzc0Ze6WRJC2zzplf8AhbzFrRtWDQu+xQWVdm2sqxXy9ieT8n3trfNuq5Gybi6bdjJ91f4aipHqbUeeO5uae3lzFJk2tt/iTdtrrNDk8mOLf5Zfb97/AOxritPuv9I2SOrfL8+7+KtjTdSkVfMm2p83/jtc8o8p2xl9k9T8N3kMcbPNMuPu/L8rf7NdRot5MtqLb5VO355F+7Xmvh/VkCo+/fHH/qvMf71dPpevPI6Inlptf5938VVR90KkjtY5kh2PDCrlV2ytGnzL/eq7BKzab5u3adjHAHTrXNLqCSRIjzM0i/N8rfKv/Aa39Om8zQxNKCP3Tbv1r9u8FnfOMf8A9g1T/wBKge7wvKMsXWt/z7l+aOVulhWQ/Y0Uru3bvutWJq0m5nhSbcv95v4q19WmhZUf7SybflVttZF4okj3oV/d/Kqs33q/KoysfB1InP30n2e8TyfkZv733ayppIbiR0eFVDPuZf71bGuKlw7onKKu6KSNN3/Aawry3TzA6W2z+Hdu/vV2xlGRxy934jJ1eJPOZ/M/vbq57VG+/wCZwPK+7t/irodQhSFpk+V/n+9XN3yzSRzP8u9m+ZVrKUvcKp83OYOoqjRrMg3t/Dt/u1QRFaP5E+Vv4mq7cRwsrQzQsu3/AGtqrVK9aGNQkbqqL8u3+GuCt/Kjup8nOPtf3Mm5/wC/tX/arTt714Y/73zfN/s1kNIkduknQL/D/dqSO68uQ735/wBlK82pThserh6h1Gm6lCyb/Obb/data31CGaHY02U3/ulj+VmrkrG8TaqTfIf9/wC9Vyz1HzpvOm+T+GKvNqYePQ9jD4iXwnUrIfJeRJlyqbH8ylW68tQlyilmT/SFWsKTVpvnfzsbfufxVOkzt86fLuT7zfe3VzSp8p0xrc0uUtXN0mF82HC7/k2/w1FNLeSS7IYVwqMvzOq7v+A0xZvmRPtSs/3tv8NR+TdSM291+b5kVfutUcsfiOiNSXwxM7VIQyrM6MpZ/wDe2/7NYeoafCrfO+FrpLi3eP8A5ed396sqe3Rpmh8ldsny7mrqp+7A563J9o5u+snjU79q7fu1n3Fqkn91j/eat28jtmVnR2Yfd/4FVBrEyTP/AH9v8SfLXpYfmlqeTiDGmsYXDbnZtvy1nXVq43b/ALq/d3V0U1n5cf3Gb/d/irS0P4X+IfF2oW1tpumzP9o/1SrFur0KfkeLiPdicJDod5qF8LazhkZ5H2osabq+rf2Bf+CW/wAWv2vvG0Og6bYSQ2ULq2pak1qzeTH9792v8Un+zX03/wAE3P8AgjzefFjxVpU3jOwupHaf9/b+Q0EUK/e3SSN/eX+7X7b/AA5/Z58GfCfQU+GPwQsNL8KadDYLZJdabb/6Szf8tJt395v71dNTFRpw0PErSnUl/dPze+B//BJ34D/B3VLZNY8NX2o6xDOqabot1YfabmZl+80kcfyx/wDAq+uLXwD4k+GetSX7+NrHwfdw6WsVloui6XDJdLGq7lVY41ZlZmr1T4qeF4f2bvBaW3gDWLfQra+vP+Ko8d65ceZcxx/xLBu+ZpGrxyL9v/4V6D4T8SQ/s2eEJ38QWUTLB4u8S6Zu+0bfvTbfvMtcVTFc0uVl08Py+8jl/CvxQT4P3WpeNvj98PLzVZdS+bTdS8cXq2zyNu+6sP3m/wC+a7vwX/wU0/4J56L4LW21o6amszMy3Gm6Ho0ki28i/wALSV8ReLfhh8Vv2n/G0Xj74reONQ8Q6je27NPqk1vJtjjZvlWGNflVf92qmufsSv8ABbxlpGt23wc8VeKtLjt1nnt5L/7D9quN33d393/0KudwqSl7j5TROlT3PsDxR/wUN+BV3rNr4nufiF4JfTVLQweHb7SVRo23fK0tzKvzVwvxS+O3iTXJpofCvjnwjqOkattli03Q2WRbfd/CzLXjPxA8Fp8XPDb+FdY/Y80Hw3bSSx7bq68Q/afL/wBn7v3q1/Cv7GPxI8K/Dyz13QYfBdtZ2M7eba6PK3m+X/DuasvelFe8TKMPiZufDHRf2jtH1j/hLbbwTp99Asv+i3FrKrK393crfxV3mg/Ej4naa9zqvxX/AGY9U8R2l15jT6hH5byeX/eVV+XateP6X+0NrHw/aXwr4z1WPFvLuijtZdyrtr3X4A/tufBzXri2sIdVvEC/8fkMkXlrWftlEJUZSjFxPMvit/wT1/YA/buhv5ktLjwl4kvLNks75bVoJ7ebb8vmf8Cr8yPj9/wSv/a3/Zn8ZXPhXVfhvJ4q0ppW/s7xBp8EjR3EK/ekZtvy1+6/iz4mfsr/ABC1pPCuj+MNF0zXI3826Zv3DR/wruk+6zLXZ/Dv4Y/Ejwr4fubzQPj9Z+IIfK22FveIsvmL/d+b5dtdUcTGpG03dDo1q+Hn7p/Mp44+Bz6LpqarZ21xb3McrRXum3jqrx7V+8q/e21wEdnDG3yOr1/Rn+3N+x78Fvjx4Nurz4i/CjSdD8QzQMsHibQUji3Mq/dZV+81fjH+2N+xzpvwR157/wAH+JI9StI7PzZbdl2zxt/FuVayrU6Uo80GfQZfnHNPkmfOTWfQI64X7+1fmpY7WRVkDo3zNt/3qteWkjLsttv8T7qmhs3j2zfKSv8ADXm/D7p7/N7Qz5I3WMJ5PzN/eWq7Wf7nZ5C5V/vba12jmRRNH93fVC+V45nm2b93+d1bRjORn7SEdjEvI3be7w/d/i3ferH1BflE3ksvybdtb2oRpIvlx8Lv27t1YeoRzRspPI/3q6acffOKtWnuc5ffvH8x3xt+Xb/FWJfrM0iIUXDbvu/w1t30b7t6f99VhX3neWybNy/3a9GnE8utWl9oo3XkhTx/wKqUknzEbPm/2at3SptG9/vJ92qsm+OT5ErrjE45S94rsu1N3zUx442j2fxbvu0+TzG+R9zfxbf7tPhjk/5ada0jEjmLenr5siu833f7tfan/BL2MR6H4wAXH+k2X/oM1fGFjGke3Yn/AH1X2f8A8EvUddC8YGRcMbixzzn+Gav0fwijbj/CelT/ANNzPpOEv+Sgpf8Ab3/pLPKPjc+z47+K5NuNviC65X/roadoMkM0ImCNv3fI3+z/ALVP+NpT/hePiuIY51+6LY6/6w1DoMe2MeTwdy7Wb5q+Hz73s8xX/Xyf/pTPEx/u46r/AIpfmzq9BRFuPJTa25l+9/DXTWMcefJSbhpfk+SuY0dnmZnMyn5vu10+m/6QphSFt2/5If7teRzcupwyidDptvDIqOj7v92tayhmeYfdCRszRRr/ABf71Y+lw4kG+Zl2/NtrXsZPvTb2f5dyfL81Rzcxlzfyl+3tUjbz53+Zm/h+b5aurcCNt/X5F2R7flqrb3ULqY9jH/ZWnyyfZWMaooRvmdt9aD6+8N1C8/1yTIqbovvL/wCg7ayLwQx26eTNIdvy+d/EzVYvE2OJtm7cnzrWczXO7zo/LVvu+Wr/ADUS/lHTiNuLl5GeadMt95/7tRzXTxKr+fu+fai/e2tVLVmk+5DeqybFaVV/h/2apSTJ5O/exH3V+eplKEj28HGEf8RqXWoIzDztsyxy/wBz7zVm+Yka4uZl3M/yNs/hqtJcQ3GLYPtCy7trVJ9udrZnuU+Rn2ouyuGX90+zy+p0ZT1DZJJ5yPgbvnk/2f7rVzeuRxyKqIjbFb/gVa2qXSNal0mZEb726sDWL54wy9V/jasYxme9Tl9mRzEl5tdk8liP4V31UluUjb765/ip0mxZCkM2dv39v96q19JjCfw7fvbanmjzH5p7OXQeuoJGyzfMxWrtvqSSKqPMu+sZrqHzBI77FX+FaFXdMskLyDb83yv96sqlOlJm8Y8x0a6o7YQq2W/u/NWvZ6gm1N77Gbarr/8AY1yFvK63Su7sn8S7WrZt7iaKRftib2+75lcVbAwlLmideHqezmdfb6skMivM/lbfl+X5t1bmn6h5irNHefN8q/8AAa4rT7hJJm+RnX/lk1btnMdyP/ef/d2rXP8AU4R2+I76eJ/mOqsZEVTNNCxMku1Gb7vy1qWdw6sn8O75t33tq/xVzenLNJG6Q3P3pflkX7tb+myTbET+HbtdVWuzD049TT2kuX3S/HIkLK8ULHc23zI//ZqsyWsjRs/kqHX5mkX+7UViwVTN8u3+6z1PMs0cjum77v8AE/y7a9zDx5TgnUluyvdK8cg2Ovy/M+6qlxcJbwvczfuk/ikb7tF1cZha5meNXjZkRY/vVk3V1jKTTbU+7tavUiebUl73vEclwJN7+cz+Z/49VLbMzPzHlvlSht6L+5eP/eX+7TjLbNjY+1v71ay5eUUZAFh8tNj7P4dzfdZqr6hDDGyb3VWZNz7XqPbBJIyeT8zP86s//j1WZLe2uPnG1/L+Xd/drya3xHpUKkpR1Kcbor7N/wDubfu09Y7aORPn2My/6tnpsKp5ypsZh/B8v3qWbY0hTZlP7q14OIj7/unt4ep7t+UtW7P9l2IMfe83c/zNVvyfLWJ4fM+7tZf4f++qqW8GNiPbM3y7dy1qxwpJcLbfMm2Jvvf+g15laUeU9OlL+YS32Kyvvb73ysr1OLdFk/fXTFV+9I38VOt18uP/AEnzPl/8epwh+zKHd1P8P3P4a5+aP2RylEnaORbdUttyP8rPJ/C22rcMiMQk6KG37naP7rLUFjDNNEmP9T95d38NWo40Me93YN935V+Vqly5jjk5RjsamnqGZ5ndtm9XX+7u/vVr2NzNNIzudvl/Lu2VjW7JG0MUCYf+7v8AvLWxDceZI8KfMzfdbd8y1MnPm2FH3Y3LscyTR/ang2iP5f8Ae/3agkj+wWZhvH3D7yeWm35f7tXY1SNSiTSJtlVvLk/hqpcLunMM0kjD73zPURlze6Ty+01CaRFjDpNsCrvdf4t1Z+qTQyMiQJsTb8zQr826pN0LebNs3Ju2rtqpcWciqfJ3NIy7V8v7rV04WPLLmMa/vU+WJRvtlyv2lnUqu3ezVUuoZnWSZNv91/4vlrTaOZVa2hRQ6xbv3n3W3fxVFNbzW6k+cp8z5flr38PzS1keHiI8srGM1vDIokdF2/3d9U5LWHyX2Js+fdu/hate4hSdfO/i/vf3dtZ95dfaIVhtvn8v5mVlr0InDUo80TOW18tok8n5pP8AgS0tqsMkm+F1YbtnmN8vl0rKnmM6PhW+X/gVRQTPuELlVbdudmrf7BzSp8upamt4FXyUdju/8epPsaeWjpNu/wBlqjhkmjuPJ2Z2/MrMnyt/s1YsVdkaGTblvuVjUjLkKo+7KzHNYpcQlPJ2r95Vqhdw/wDLbeu/+8zVqws8MZePlGfb81UbqNFYQfNmvN5pRm7ndy/CZkmn3KIj71/iXa3zMv8AtVDcaem7yfuq38LVsW9u/mL5ybfm+6q7t1PFm8jv+5Uxx/KvyfLtqftWNZR93mMSPQ9yhLby9m5m2yfw/wC7WlpugwrKqB8ldrRfJ8zVqLYorb3T/XL8m5d3l1o2th5ezZC38K7a6o80djy8RHmK2l6G9ri6mRnb7y7fl2tWxo+kos377cvmff8Ak+7Uum6em4pDB867m+Zvl21sabZpIYt8LMi/Lt3r83+9XpUfdPHrR5dy9pFlbNMkyJG6R/Lu/h212/h+2RY1e3+VW+b5fm21j6bYwx7UtvnhWJfm2bvm/u12Hh+xtvNheD5Qy/N/tV6cfgPMrS/vHUaGs3mKZkZ1835N1drotvDFGYUhZX2f73y1zfh1UjkhhebazN8i/e/76/u13On26QTf8fMm6aLb5irurXlOOUpbFyz0uCOP7NCkbSbV3yfxba0tvmW6TeSrN5Wx9y/L/vf71Mt7W23D7339u5fl3LV64hSO3Z0fIX5fLq/dkTGpKOrkc9JBNMjukWH3fJu/u/xM1Z91paLJK6QsRvXdt+6tdHLY+cuxEb7m5vmqnNbwL++N4ybk3P8A8CqfhHTlzT1OU1TTIZn/AOPbLKm3cz/K1Zl7pqRw7Ld1RJF2/f8Au/71dRJY+cyw9E+b5m/ib+9WLqkM1ufOhfYNjfd/h/2mrKW5106fvcxyOpWf775/LZl++2zav+ztrCvtPDbtiK0zf3fl3V1us7JrdPMhjTzN2yTZWDqFpuVkmh2fL96OX/x6uKodtPc5XVLMNl0TL7W+X/arCvLWGORUTa3l/Nt/2tv/AI7XXalb2DRvD58yFU3K0fy/N/drC1iz25d9qSfefdXnVNzvoylE5uaFI5vO+VGb5trS7qVrBHUpv81YUVt38S1eutPS4vI9/Kr9zau2oGi+xvsfzNzfLtX+7/vVivdj7x3RlIns2SMtbQ2ysrfwr8qr/eardiYWmZ3kbf5W6Blb5ttZbXn2GRX2KwZ9qbt33aYusKzI72zY+4v+7XbhvhIqVDs9JuEjmSF3k3tL8qr/AHa6zQ44bi4WFE/d7fk/2Wrz7RbmFm2Qvvdovvbtvy12fhXUnlX98m0bdu1fvV6tGXLE8+p7x6jocfkrs+0/8stu5a7nRW3QxzTop3JuTzP4W/2q838I6hbeWpmhZm8rZtZtu6u30fVHmh+0u7b2dd25PmavSp8so8p5dc9K0G6na1ieb91LHudJFb5a6K1unvIzNJ8/7pmeRa4Kw1izjs47VH2NIzNu3/w1vaXrkMduAjrjytqbmrSMuh50v7p0MMjr++WaNl+/tb+7/u1jax9muN6GFn/ifb/DuqX+1E+zpNbTKjeUyuv95f8AarI1K8QxtCk23d9xVqOX+Ur2nLEwvEjQ2rPs8xV3fd835vu1xeqSp9sTf5n3W+ZU+7XVas1y29YfL3/x+d/d/vLXMa03yuA6hFX51b+Lb/FXNUidNGRxWvbN7O8MiJv2rJ/E26uI1yzWS3ltnfIb+Fv71d34keGaGTY/zLFuWRmridaaG4k87Zgtt/1f3a45RPRjU5Tz/wARJ5cJhd23/fXy/mVa878YWaMx3IpVvlSvR/Elvf8A2hoYUWPdLv8AMZ/4dv3a4bxVDNJCzpbRp839/wDirlqU/tHTTkeZeIPPSZYd7M/8arXK6g0zXDPs2O39567XxBAkUbzJbMjx/wDLRX+7XF3ypNJiab5m2/NsrilGX8p2U5fCNW6RpGe6ufm27dv+f4qivGSRhIjsEX5t396mtj5tiZ2/Lu2feqNleSHZHwq/dVUqI+7sVL3pkd1N5jJ5iblZ9u7+7T22RyMET5Pm81W/u/7NRLDu+dPn/wB3+Kp1W53NMnyqz7l3PXRzmEf5RLeR/kdIW3Mn3VWtJbNG3Q/Zl+aL/e3VDD+8k3zblbfuWRavWqpFbibZub723dtrCpzHXRjEfDazyQ7HdU8z5av2K/YQ0Gzeypt3Mv8ADUVn8rfu0ZQvLsvzVoqqLcBPO+8isit91mrklI9CNPmjzF7Sb7yykKWzfdbZu/hrotL1KGHH77cWiVkZf71c3DJ9lZN77ZG+barfd/vVb0+5T7Qu99u75katqMo82hjU5up2tjq1t5SoHXLffuP9qvQNHlQ+DVlAOPsrnBOfWvHrWaZpGm8/a8ny/f8Al/75r1jw3dK/w6S63BgLOUkk4zjd/hX7b4Ku+cY9/wDUNU/9KgfQcLJLF11/07f5o5yZoZpC7zZj+9BJ/F/ustZmqTIszIi79v8AzzpYbyaTfvRn/ufLtqreeTaqzusnnM25fm2qv+y1fkMa3KfFSjzGfqk2632Wbsg2Y2/d2/7Nc1qz3McjwPJ95F3svzLu/wB6tm+kubjfO7xh2Tb8yfdasLVPmY2czqv3vvfLW/teUw9nzGRfXSQzOny7WTanz1z2rXjpI/2Z9q7fvL/erV1pYY2R0OxWb7y/NXP30bm3b7w2v8+3+Gp9pGQo0yjqF5uXYiNn+Pd96s2aTzNkJ243fxf3qkummkb53/g+bclZsmoeX8j7VH97+81ZSkbRLrSW3k7872b5drf3lomm/c7JplG3bs2vWZ9ukmUwzPs+b5f+BUn2ny2/hAj/AIWrjlT9/wB07o1Pd5Tfjut0zOiYdvvf7tWrNv3ZfeuN/wDF/DWDa6h8wd593zfNV6x1J49yJIqLu3J8v3qwrUZnZRre7qbiyOuI3flU/h/irRhk+0Rs7v8ALt27VrFtdReS3b98rN92r8d1C0I2XPyx/M6t/erlqQud9OX8pfhhh2qjpvaR9qKv3v8AgVWFuEk2fJsf+JW/5Z1TtdQmuFZEdkRv4lqz/pMiqg2uVbbXNKEubU6Y1OWPukM0fnXB/wBJ4/grI1NUaTy5kYhfuba0rrzdwT7Nna3zqrfdqrqtu6xr5L42/cVXrWnHl5UZy97Uy/s++TEiMit/DI/8VQR2X2hmhm4H+/VqSHzP3Lzcr83+1XQeBfhzrHiq8httHs2keSVV2+Vu+9/s130Y++ebiJcsRnw7+HP/AAk2qW9nN8jSSr5W5GbdX6r/ALAP7FfhvR9M0258N+D7W/1mS9VpZr6DzWX5f4Y/4a8s/YO/Zb0Twt4403Ur/Spr+6sd32qRbVWgjk/u/wC01fpF+zLq9/8ADG6u9A+Ffw6vLvxDql1591qmqOq21nDu/vf8tJNv3VWtqlbl2PnMRW5pcp9P/A3wjeeGPBdonjbTbW11Lb5UW23WPc3+yq0z4wfHr4K/APQP7d+Kniy3WWF/3Vjbxb5ZpP4VWNf4qu+Cl+Ilno9ze3lra3N66eYt9qMu1Wkb+H/ZVa+WP2l/2fbzxRHqU/xR8f2d4983mWtjodvJuXa3zNu/hVf71c060tLbGHLA4P8Aa9/bJ8WftB+HrTTPhJ8N43fULryIrrUl+2XNiu370UC/u45P9pvu1o/smf8ABOvWdU8Pvr3xU+NE1jLcKq3unyWSyysv/XRvl+b/AGa6D9kv4K+EvC/iaDw74M0qaGwtV8/7VcXDSXNxM33tv8NfaPhzwNBpmmtiHyJZFz9ocKzL/wB9VtSlzRugb5jz7XPB+i/Bf4Yp4P8ABnhiN20+3Vl1jULWPy1+b+KvnH4pal4t8YFL+GGPVnkTdtW82qu3+7XvnxnuPAei6bcN4y+IcmuyzTsn9mteeXHuVflVlX73+7Xw7+0d4os7G+stYm8SeHYXum2RW9jebHjX/a+b+7WVStKUiPZ83xHNfFS4/wCEHszrfiHwlfW33pfsduvmt/vKq1leHf2nPhj4quH8PaJ4km0p2dVnsbiJkkZv4lrmr79u7XvhrJPoPwr8H6TtuItqXXiK3a7kbavzNury7w78PfFHx41C51i88f8A9ircXrTy28ekrBA0jfeZZPvbaw5pz1pm0eXlsz0T46/st/DG5kl8beH/ABDeTeJLza0unxt+6aP+6zf3qf8Asl26fC/4pQp4h/Z5mn06SVWutS1C/wB7fL/Esar/AOO1hfD/AOFfhfwz44t/D1/8XZrm8t4t25d3kKv+1u/i/wBqtj4mTePPB+uR674J8eeKLm2hZV+0aT4f/dqv+y3/AC0/3quUqjjZmXL714nrnj/UPFXxd8a3Gpab+zxo9vYrdLcRah4ksNsce3+FYl/vf7VdKv7ZHxD+Atj/AGl4wv8AwHrEUd1t/sfT5drRq3/LNY1+7Wf8OP8AgpRo/wAMdJ0rTfiF8N/E2uWrSr9q1LVLWGJW+Xb91l3Uz4hfBH9hX9uDxR/wmH7Nl/caV4ptf3uraTpM7LFqTfxRsrfLu/2v4ayk4zhyS91mnLKnLmR2d9/wUg/Z4+NkNt4D+IXw9k0F7qLfFeQ3Xybv9la+Y/23f2Xfh78RtJvfij8Mdet9SNnp0y7bf5XmXb92T+9838Va/ij9nn4P+D9Qm8PfFT4o+D/CeuW8uyLw6uvfa7qNf4d237rf7NRaL8L/ABV4bhuNSfXpNX0e+umVLqFNqqq/dXb/ALtRGUqPu812TKXN76Vj8j9b0+5tdSlttSh+zur7XhVf9W392o2t0+WFLncF/hr9NPi1/wAEi7P4zatL4z+HXi2ztZ7z/j4sWfayt97dt2/3a+O/2gv2I/H/AMB7iayvEjuQrM3mQy7m+X/0Kt/q8pQ50fQ4LNqE4xhI8Kkt3WHf0ZXasu+hkZPv4Pyt/ercuLVPM8mZ2H+7/erP1KNFh2WzqR/Ezf3qxpy5fcketKMJe9E53VFTy3TP+18vy1gapG8m596+V/Atb19+93o521jahG8au7lWZt3yr/dr0KfNLlPMrS5fhOY1BWZPuMv8SstYt8sMWUfdub+L+7XR6hH9qlKD5X/u/wB2sC9jTa298n+9XfT0908mpKXMY0ypDJsHztJ8v+zVORC0jJ53zbN1Wrr9yzP94VCkfy70fI/j3V083vGBBDvXG87t33mqaFAGXYn3qTydzbEdf9mpLdXVs9dr/eqvhI5mXNPX5mR/7ny19nf8EwiDoPi8j/n5ssj0+WavjS0jhVdj7sr/ABV9lf8ABMJy+heLywwftNln/vmav0jwi/5L/CelT/03M+o4R/5H9L/t7/0lnlPxqm2/HjxYgAI/4SC43Fu37w1V0uTbcKLaHd/tN/DUnxwM3/C+PFyxy5J1y6wPT94aq6TJtjXDsD/Btr4jiD/ke4r/AK+T/wDSmeLj+b69V/xS/NnX6XI+77jKG+Xd/C1dRpMiNAsNy+8fwfNt21xGl3otmVzN8rJtZfvV0Ol3iTQq0MKtKr/eryJR93U4ZbnaafcFl5fYq/c2vWzp9518lF837y/3Wrj7PUXVlebam7/nn92tjTdShEwRLpf95vustBHKblrM6zRTu7THZs8vbtqx501xIXwu6P5n8xPlb/gNYq6tNHCkKIx2/wC3t8ula+S5kXyNu1vl3K27bTlLsXysu6tqCKuzzGB2bt38NYl7qE0aF4uFkbbuj/hqO8vnWPMz7l2bvlesi91R5G3um7b9xd9Z+05vhHEnurxoRs6+Y/z7vl2rWa2oIJFSFOfvJt+7WfeXSSKdh2j+Pc3/AKDWfLceRCH+0ttVfvfeqZfCejhZe9zG22oJC/yQsN27duXd8tQXGsPGuyP51VPn/utWW+pIyrCkzYVNqtUclw6rJCjZC/3Xrml70rH0uHrSly2JtQvnuI2DxxrE38NY+pXaKzpsykiL8u+lurxJIWR/u/8AfO2snUdS2ln+XC/L8tH2j3I1pfFIoMzu29E+6m1lqCZpmXY+3K/3f7tLIz7iUC/e/iqG437sf+PVjKPQ+Zo4eRXjj86d/wD2WrNqrzKrom0/x/7VR6e0KrvRGG19u6rdnG8bbE3ZZ/vVhKXKd8cCWLNUkjR0TL/d/wBmtS3t0ZPOh3b/AO9I9U7dJDIu9Mt/srWtart3p8zsv92ueUhfVZFvSvtLfvkTYN3yblrZsbc253o7fKnybm+9VCztv3ZmfawZdv8Au1qWtv8AMv2NPNVV+bzGrH4iOWMY25TV0i6dW/cxsQ3yvu+6v+zXSaPHHMrB/vr/ABVzel3k1um/7MsiMm52b/x2t7S7qJd6OrK2xW+V/vV1UY+9flMuaMYG7BdQ/wCuuYW/hVWV/mZv71STTeXZuQnKu21l/u1kWMgl3u6NsaX/AFn3fm/2almu3Zvs0L5C/wAOyvWo6e8c1Sp1KOoTXjR/P825P9W3y/NWPOs7Mfk8xf7y1q6lHNJK7xuyMvzbf71ULqNPuImVb+KvSjUt7zOeUoy0KCw+Yjp8zjf977u6o185MfZiy7U+ZfvVaa1eFg7n5WX71VfL8uRPs275vuf7VOUoS0OaMeUVWRspDtVu22nRyQtiH7yt9/b/ABU5bV1kYvDj+Ld95adFYvDGvz7WZvk2xfLXBWlCR208RykM0O3/AFL7Ds27mepoYXaPfC+x/wD0L+9UsdnayY2PJIV+5tT5as2tqkatCqMzs3yf/E7a8HES5T3MLU5o3Qun2MMa/aUtlXd8zzbvvVdt7V5v9d5mfl2L/Dtq0umTRwtDDDG3yfdX7q1aj0lGVUudwdvmZV/hrxqkuadz14/BYq/Z3X7m5N3+tVqfHD5jb3/3h/dWr32JFjeZJvNdf4W/hp32GFs/Iz7otzbfl21EuSWhfMQQwuqpNc8NJ/qt38NWYY0kxOm5dr7dzfdp9rYzSRlHh3xr9zan3attH5MZhhtmdVX72373/AqqnGUpcqOSpUjGPvCWbTPOqTeXnzW/eMn8Natr50kPkvtz/wA9FTbWfG0Jh+SHcrJu+ZGVlq9pk0jTffYxL9xWb5q19n7vKc6rf3jTYXL4hublQN6tuZdzNtWoNSkP33/esv3WjanKv7z/AEOFmO6okt0VvJdG+XdURo/DGJp7b3blK5mhjmREf5tvz/J/47Uas8a7Eutm1G/d/wB6p76N3t1T5mib+FX+as2TY28Ju/eLt2yV3UaP90yqVJdCyGto7OKb77Rtu3SP/wCOrVaSb7Ysbwjb97bUULO2xJ3WIR/cj/utTftCthEmkfc+3dIm2vWpx+yefKPtPeIL7eqvC9su6P8Ai31kXCwlPJ+YO332rS1CNPM3v8u1d21WrI1KYSK7p8iq+12V67IxMpUylNJMsyRnyz+9b5m/i2/3aa9x5jK+zYG/8dptwqHe77lDfKsjN97/AHabCsNwy2021gq7ttacsTllHl90s6XsYMjyL5sku7ar/wANaDLCsmyENlfvsy1ShVIdjujMy/c8ur9vlo3d02tu3IrVjUkZez94fZs/+0W2srKyf+PU+ON5pPOd/kb5dv8AtVHn7KzjfJ++2/x1chVJlLxjZu+Xcqfdry8RI7KcZdCKxsfJkeb+98yL97bV+yt3uJvJhmYts37VSoYYXVRCjthV+eSRPvf7VXIZlhxD5ys23a21az9/4hyl/KQrbo67N/Kvu+WrMaQ27ffk+Xb838TVHJcQqwTzGJ2feVKRri22p+5kLr825d1ddOPMeZX/AHnMy9atDHGZt+6P+P8AvVs6fNZtG3nWef8ApoyfdX+GsGzkc3n2b5odybt2z5Wrc0uZFZXkO3a+395/E392vWoRueJW5onY6KqLDA7zNsZPu7du5a7jw7HD5MUycfdZGX5dtcDolx9lWPfDtWP5k+b5t1dj4bvEkhQJZ7mZ/uxtXqU480bnjYjl5jvtEukjKI8yyuzt+7j/AIv96uw02TdYq/ygbVb723/vmuC0O6+0YmQMzfdVfu7a6zT77y2+eZXEaruVl3fNWhzfEdla3VzMq73Xy9m/b/EtXproLMXR1Kr/AOPVh2OpPDuS2udn2j/XtJF8rf7tW4byBgyJMoiVWbdJ8tTzFRo9CzI3kyfJD/Budd9Zt80Mjec6cf3W/hqaS6tm33MO4LGu7zG/iX+9WbcapbTX0aOi7GRnRl+bdXPKsddPD9yDUm8yPzoYfNGxlVWfbXO6hZpHD5PkwvtTc3zfMv8Au/7Na11cedC+P4XbYrVh3U0CxlE5RflRd/3axqVDqjRlEytQg86P7RNNxvXarL93/gNY2qfLcb5nyyqz7VrbvpH8tvJuViK/f+T7tc1rV1YW6vv5bf8AdV655VOY6Y05dSvdb1K+dCzI33o2T7rf3qx7yGG6UlHZ1Z9u2b+GrGsa88kLzJN8y/LukrmtS8Q+VG8N1Mrhm3Iv8K1yVJc3wnRCPSQXzOuxI3XdGu/cv/xVZGtahbW5l8mZkb+7tqjrXiia3tWe2O5938L/AC1yXiDxg8d0qQvHtVG+Xf8Aeaufm+ydtOUuU27rxAscgebdsX7n+0396qC+IEaTYu53ZvkZq4+68UQs3nJ8jr833/l3VUt/FE8lwD5zKy/3v4q7sPEwrS5j17Rtc+yqr/aY2VV+RfvV3/hDWJpoxEjqs396vEPCfiIbh5M3zb/n+SvR/CuoJtCTXO12Tc7f3a9KlLm+I4an909o0XVraHYj+XCzIrJJ96uz0HXHkaK6e5Ybfk2q/wB7/aryPQb52mhmtplO5Nv+9XcaLqSTR7/s0aFvl3M+2vUhJHnVn05T0bTfEG24W5G0Ddt2t81dLa6k8ca/PGiN92vPdI1CH7KmbmR2VlV/k+8tdPoMnzbJvnDI3zfe2/xVvGUebyOGpGJ1Ed5eNC0LzZ3ffZUp8cN7HEu94X+Xay/7VVbWPdCh+07UVVZ9rK3zVZh2TXHybUZvl/eU+WJEoy90ydajh85EmtlIX+JX+Vq5nxFJ+7W2Ta7bWZ5G/wA/drrdU8wzM7pt2pu2/wAVchrUcMcO3eyp93zPustc1T3TaG5xPiKZLhXdywDRMqx7P/Qa4jVFRVW2e5bY27Yuz7tdv4kaDzD9j2qyvtfdXF+JJLaNltvs+59+5W/iZf71ckpWkejCWhxviq1tri3ZLYs6LFtSTf8AM1cPr1pM1vHNZTZ8ncvzfw13mqLCsmxEaINuXav8X+7XKaxaosO9JmQyP91k+XbXNKJ0U9jzTxBaveQuiOzQs25o64/UtK27/k2D7u1q9R1SxhhD73XMnyoy/wANcl4g0ZGuvLm+fb92T+9Xn1PeO6n/ADHEzWqLGZl/vr/s/LSNC8f+jOkeV+ZW3VsTWLyTb0+6v3l27lZaij0uGWR5nh2/3/8A4muOU/sm/LKWxkR2czM1zN5iL/Bt/ipi2+6bcjs21v8Avmte60/gukMi/P8Ae/2aotaeXMN6Nlpdyt/erb4oaClDl5R1q3kzjZDvVX3eW1Wobh/O+xony7NyR0+1t0WPe8m9v9lf/HasWLwyHzkTcrMyt5iVjU/lOmnzRiT28dy2zyU3KyfearkK+XIIUT5lX52X+FqdZ2sxjML7lC/6pd9W5NNSFVd3Ywsv8X3laoj8Oh1KU+YrsyWKs87tnZ8+5d1XbVbaKaOCZIdq7fm/+Jp0MM6yF3dlZv4l/hpq27qv2abgt83mbfu/3aPi+EJR5SeG4hjk+ROVdtm5/u/7VeweEl8r4UqpPAsZ+T3GX5ryFrf7PNDG+1wyr8395q9d8Ks7fCf5mJIsrkHcMHhnFftfgnJvOcf/ANgtT/0qB9Bwwn9crX/59v8ANHBR6pC1v9pLtvV9z+X8zbahvdRRp5Eh8x1+6qyf+hNVHdPbt5P+r3Ju3L91lqvJqXmQt5Xmb/7v3a/F5Vj5uOH7C3135PzpDHub79YGvSO2+ZJvk27kVv71XbzVvJVnhfHzrurF1jUPtELom5t33FX7tRGtLmuZywsTKvrh2k+fhf42/u1zuqTec/z3Klf7v96tK+utirv+dtn9/wCVWrIvo5mVd+3d952WtI1uaXumX1flmZ2pR7rht6Y/hVmf71ZV5sZvuKo/gZkrQul8zbs2v/d+SqF8r/K8z8L/AA1rGXtCJUyncO6/Ojqyf7S1BJqCbd+9d27/AL5qO8ZNron8Pzbaz5pvLYQvHubq1ax7mMvdmadvqG2TZs2/7Va1jNC0yTbPmXdtWuUjuE/g+Y/x/P8AerR028dWaabhm/i30qkZ/ZNqMpc51kd4jRD73zL8/wAladldvIu/5v8AYrlrW8dlx9p5Vv4q37G+eRdgTKbP93bXBOPNI9ej8Bu2Nx+5SF9ys331X+KtNV3fvN+xv46wtPkSOH/XMzKn8Va6s7bZim9pPv8Az/w1xVI8tU76ceaER+o7/McI/wAzf8tKozQpJh5nUt/Gy1oyR+XGHROVX5/7u2mW9ik2fu/3vm+WrjGPLzGVaPNLlKNjpdzcSeXs2v8Ad3L/AA19HfsX/BPxz428UQab4VsNQlu9Qf7PYWtv96Zv4mZv4Vrz34DfD3SvEXii2tvEnFvJdKl15MW6SNd38P8Aeav1t/Y9+GmlfBfxF4a03wToOzxTq37+1s1iVm0+zZvlaRv4Wb722uylpHmkfOZpWlH3EfQf7Fv7HE3hpbLRfiEjJLawxyfZbG12bW/i3O1fXGpeAdH0icaroWg6VHKu1P342Ksa1bub6Hw9o1os2u6bazKkf2yW7dV3f3q+SP2wNO8YweNTrnhv4reIdYhvnaKLSLCz3QW7MvzL95d1TVqez+D3jx404QXvnu3jJ9burpNP8N+PNLheblo7V/P+X+L5f/Ha+Y/iV+0J4t1rXLzwB4WezsVt7hotSvLho5ZY4938Kru27q8Z1j/htXRdaSGb4PyWMMcSwJqGpaktqs0e75VWOP5q9c+FPwtT4aabL8Zf2h5vD/hXTdPeS6XT4Z1iW8Zf4mZv30zVyzS1lNGnxQjyHrXwQ1fwH8APCr/ET4l6xpelK0TLbzanKz31x/d8iD+L/gK1l/Ff9tr4kaxo93beHvhvNoOix2Uk/wDb3jLUo7F75f4fJi+9t/8AHq8W8eftKab4km1T45eAPAOjldPX7Ra+KvGTyeRGv3VWDzP/AB1Y1r5r8J/C/wCJ37fnx5uvFXxC+Md5r0G7zdUuriLyoreFV/1ca/dgWoVaOIjy/ZNYw5IEXxE/au/aN/aS8ZWHw6+CejR3UC3TJOuk7vIt933ppp/vN/31U3xA+CPhf4a6Dcw2dnpOseIbW183xDrl5KzQW67fmt4F3fNJu/iavqjRdS/Za+Gvhu1/Za+APifw7pVu2lyXvjXxFNdKktjCvzNuk/hXbu+9XwB+1x/wUy+BviLWdc+Fv7LXgxb/AMOaDLJbp4qvIt0epTN8rNHH96Tc3/LRqvD1MNT92GpnKnX+KZm+H7fw34s1h7nWPEP2izsUjW1tbN1T7VNI3lxwx/xN81fQ2h2fwf0HXNU8B+Lfijo+gQ+GdNZ/GV5ay+e1j8u77LD/AAtcMvy/7NfEX7JXhP4nax4/tvjB45SbTfDug+dq95faha+XH5yxt5Kq33du7+Fa89t/Elh4H+0eJ/H/AIwW+k1jV5tRvJrjd5V5IzM3/AqVatyRLp04y1Pr7xN+0xc3nhhrn4G/C6Pwx4Gs5WR/EmtQLJqWqNu+983yqu2qtr8cPj9ps1t42sPiLrmrWkL/APINt5Y/L27flVo1+6teReHf20dB+I11Z6N4nnsYNKhg2263Vv8AuF/7Z1778MdN8T3Ghr4q+A8PhXUWk2xXWnwxL/pTbd23b977tc8cVRnK8jX2M/hien/AT9szxD4+1XT/AAl8WvBmkvZ3Hy+TfWCu8m5tv3tvy19Q+KvhP8OvAfwz1HR/gb9l8I61rE6y+I9U8P8Altc2q/eW1X/nnu/5aba+SP2Vf2tvgtpPxkvfDf7VPwht/DF7oqSOt1b7mit1X7vyt975v/Qa7rwj4hf4f/EzxJ4q+EvxLutc8PeKriS9lutUVd7eZ95W3fd/2a562MdKTUZfeaRwrqbxPN/26Pgbf6potr8QhbWaap4fZU1TyUXzLxZF/dyM22sz4O/EDXtL0uz0TU55prZv3kULP8sfy/xVteMNU8SX2g6rba3qTP8AatyfvpdytGrfKv8AwGvM7q617SfD8iTSQxMq/wCsX+7XFWzLnnHQ7KeWyjTlc+mfhT8dLDTPESWdzZ7HmuN3nLKqqsf8W3+9/wACr3b9oz9iXwL+0F8C7q/1XwBDcOsTXFlq1rdbZV3L/s1+YeqePPFWh61bX9hfw3Lx2qxRRtF8u3duavtX9hX9uLVbJYfDfjy/aIr8zTMjLEqt/Dt/u16VHHOjGM90eVWwvf3T8yv23P2Fde+BMlxrdhqtrcvCypFHbuy7l/4FXyheXyfMiQ43P/49/tV/Qt+2Z+yzoP7SWgzeIfCv2W+tNY01kuI7ODzPssi/N53+zX8/nxu8G6l8OfilrngO8hk83Tb9leSbcrMtemksRHnid2V4ypH91Pc5TUZE3M8219vy/L/FWLqChV8x/My33v8AarTvbibkbNyfd3L/AA1lyrhSmGf+63+zXRT5uU6qkjn9QYKxm8liW+X5W2/LWLfKjN8/Cr81bl4vms6Qp83+9WPfRPJuf73y130pRPNqGPfTfP8Acb+6q1U8vbtd/lXf92rtwvyrlmX/AGarTJD8rp83+9W0eaRzczKskO5h5gbDVZtU8tvk+6v3KZt2bn2Z/wBn+7U0LOXZN9WLl6Fyz+7v35/h+avsj/gmGyPovjBkP/LzZcDp92avjm1jk2qn8K19kf8ABMdI00TxcIhwJ7Ef+OzV+l+Eji+P8I12qf8ApuZ9LwcpLiClf+9/6Szxv44s3/C+fF5EPK6/dbW/7aGqGns6qif886s/H+WcfHbxa0UmB/wkF0u3b/00NZGn3yXGCm4fJ/FXxHEH/I8xX/Xyf/pTPHx+uOq/4pfmzrtLkdoVd3+Va27W8+yyfu3/AN+uUsZH2BEfaF+9/tVs2N07ZhdFcf71eNLm2OWXwnV6XdQqvCfIy7d2+tKG8fzB5Kb1ZPlVv4a5iGT91shdV3Vdt2vFhCb9rf8Ajy1XwmXxHTR61CsZs3hZ/wC/8+1agm1SGNnbyWwq/dX5lrIa48vZ94nbtdart+5ZpoXkBVPu/wB6iMfcHzGhdX22AQ71+6v3k3VlaheOtw6QzRpudmVf4abdXTy/vn3bvvI396su4kmVnhmdWXZ/49RGMio7heTTrGqPc7T/ALPzVUa7dWaZPnLbqb5kM0n+sbCoy/8AAqptM+0JBN838W2lKJ00+5JJM8a+S7/e/u0yS4RZhGjt9z5t1R3E3mbXeZS/8H96qF9Ik3yb9yt/drnlHlPWwuI5eWJZuLh4V2O6urfMm6su8uklk2O6pu/i/houpkUh9i7l/h31VuJvMjZHRVC/3fu1j73xHvwxEZadi5Nbo3+rT5qrbXmZU2ba0riPy/n8n738K1Tkh2x+ciYdfmTdXHKXuHoUcKRLC+fkTcf7q1YXzpY/v4Zf4lpLdUKs7zbdv91alt4/l+Tayqm/a3y1zSly7Hs4fB80S3Z2z3A2I8ibvmrX0+ZNrQyblDPs3f3ao2qutuod1X5PurV2zhhkhXem3/Zaufmi37xljMHyrSJqaeP9IML7WRfm8tW+9W1DJbKqzJbbX+6/92sfTVmhjCTOvzVoWbIkj/JsXZ8m35qrlifNzjOmX7KZJJDbGHZu/wCea1q2N15WbaRlVN21Gk+8tYcM/wBnZ/ulf4drfNVyO3+Xfv2fP/e/irpo6SvI4pbnR299t/chN/ly/djqW6vNqy3ifJt/hj+ZvmrLsYX2k+czbf7v8VW7dnbdsRvlf72+vUo8nKcMoz+0RXM1tIrWzOz/AD/7rVRvgkn+pHzbtyRr8q1oXFucfaHTc0b7kVfm8yodQt3muEfYqrvX5VrsjU9y5lKMpFLznY5k2o3+9/FRDZodsiPs2/Kq1O1vtma58lnDfNupLO2SH9y77X+8rUpS7mPxTJJIXjZYZm2FkXZtp7W/lje6+Vu+XbJ/ep63DzBt6Nt/8eq59nSRU/fLcMyb9rP93/ergxFblOmjHmKMNrhgltJ5X+0q1oaesKqXv3+dn27v9qkjtXa8TyUZj/Gqv8talrazKx3zcyP+6XbXzmIqTlLlPoMHT5YXJLOxZVR32vKz79s1XLhZl3P5Kuy/c8v+GkjtrlVhh87zNv3/AJfu1dsrW8Wz+fn+8u3btrglKX2T1Kclze8UIYX8wu9srq3DU6OFIz51yW3fdVf4anhhMU3k23Lsm5FZ6uRab51u+xPKRn3Nu+83+7V0qcakia1b2cNSnZx3MkiTO6rF91l/iWrccKXLNCkOxlZvmVfvf71XrO1MkgeF1CRrtlWRPmarENvNubY8bLs/ufMtelTw84zseXWxEOTmMy4t7mG3CImGj+/u+ZdtWdPtfOVEfllbc+1NrVpLaz/Pv3bWVfKbZ8tW7bR3um8+2sPNaZP3sm/bXU8L/Mcf1rrEz1t7lmlS2dY0WXft3/Kq1Yj02/aPi2Zoo3+Rv9nburetdGe7tfscPG2Jdv8Ae/3a118NzW9uiJDgMv3mTcy10rCx3UTP61zfEcBdabNMr7E+WP5Xj+4y/wDxVZF9oqTXSzpCoZom/dt/DXp83hu2b906R/KnzeX96sy80FJC7JCqhXX5m+X71bxw5vTrcsOWR5y2mzMy/aYY0K/N5i/NUUmkvdeTIj4ffvZV/vV1114fvGjZJoMxfNvWP7zVD/YvlKHeFT8m1Y2rojT5ZaGsbSicRq2nu6+Xv4+6396sa6861j3wwr8r/vVau11bRbmS4T7Sm35Pmkj/AIa53Ure2aR4Zk3Dcv3l+ZmrTl5hfDexzl5LDueb7m3/AJZ/e21TC2yzM803yfxsr/d+WtXWrBLWMu8yjdKq/L95qw5LidmNtZpjd8yNs3Lt/ipc3MYVI8xorsaYIjsFZdrtG3yrU00z2tqyfaWX5v3W5d1ZNjdTmRfnUnd86/3quLI6xs/ktuZvvbvu/wCzWEo9zm92MS3JebVjmdFEv3X/ANr/AOJq/b6xNCws0ucSL/D/AHv+BVhPqW7Y7puXfuZVT7v+zVmG8haNnRG+V9u3+Fa86pTjz6mkanWJuyXm23DzTfPJF97722j+0ZvLO9F3/wDPNX/9mrMa68pfLhTMSov3vm2rSLeTzSM6IrtJ99v4ainGZMpcppfbXklGdvy7t7L/AOg1oafNuhW5eb5f9p6xLG68xCmFT/gf3WrR0+JJvK+7v2ttX/ar0KK/vHl1pcpu2quskkl48ez5dm3/AHau6XG/nM7w5C/J833Y/wDarGt5vLjCefnd9+P+Ld/erZtd8yGZ5mf51X5U27q9bDxPHxEoy3Oo03YzJHv+ZvlTd8tdXo9wlrIiQzf6xPvN8tcdpPlyXaQyP86/LFu/irT0++QYSdI9u/b83zV6cY+4eLWlKMrnouj37rbiaN9u35dzfxV2Wg380avc78JJt/h3f8Bry/Q9WT7Lvk8tRG6q/wDe/wC+a6rRdcghZ03yBWT55FolH3bGdryuegw608d1Eny7V++v/s1aS61CqjY8czfe8vd96uBsfEFndMLO5eRmWJmiZU+X/gVW7XUkaNN77GX5JdtctSX2Tto05HXf2h5cfkzSMvy/dWsy8uIbiEJsYtH96NX+as2TVkjjE1s7Mv3dzJ8tZ0+sQrIEbdvkTf5zfw/7VcVSod9OmbGpXUMLI77vlTdtVKwtW1iaTNtC+xN/3o1+Vay7jXZpF3vcs6qzeU3/AMVWbfeIPJhR0fcy/Knzbf8AvquWpUlE6o0eYtahrX2iF7VHxu3K3y/Nu/vVzOua1D5Zs/lYfd/2vlX71VtW1vddPC8yp/F81cfr2pI29IZo3Kv+9/e/NWHtDo9jIta14ihWPyd8m7duf/arivEHiqVXKbIx/wBNN/3v9ml8QagmnxqNjZ2fIrP8u6uO1rWAzfJtHyfNTlIcYjda8YXOZUR2H8Kt/E1cdr3ip2kaaa5+Zf7r7ttO8Qaoke3ekn8W9t/y1y958zPs+7s+b+LdWVOMzb4S3ceKImUpC7Nul3fvP/Zav6Lqj3F0yXKZMf8ADJXISfNdrP5Ku0L7U3fwrXQaHbzRKibFI3bt0iV1RlEiVPm949O8L3k0cKPC/wA33vu16R4U1KZduz5nbbvVq8r8KiZZIUd2zJ8z7Ur0jw3Hc28bTeWo2/KrM/8ArN1dlGXu2OGpH7R674dmSFrdneMBvlVY5Vb/AIFXW6XeJHvR037Zf4mrz3QVtljt9luuN/3V+Vt1dp4daOZY5k/1v8a13xlM46kTvdHuoZreF3Rv7rqvzNXXaTcJGqpvYM3/AH1XCaLNDNGsPmqpX5k3LXXaTcbriJ3hzt/ih/vV2xqezOCpGUjtNNvE8scrC2zajf8APTbVyONI/kn2rLI+7zP4masTSdSht08mZ94bcyf3l/3lq1Lqt40f7mbeWTc25fmrpj7sOZHH74/WJvs8yedNsKtu3fxf7VcTrmqItw8PnL5bSt8zfMzVta1qmGXzHzJH81cXrWqTJIzPZttVfvL/AAtWMvjNo+6c54hvIZP3MMPl+XKqtJt+9XJatcfbJEuRMrLs2qy/e21taxdPfTGZ/kXft3Mtc1qVxMzN8i+XIjN5jP8Adb+7XHU5JTO2n2MbVr52bY7yM8aqu1fu/wDAa5nWozHvvJkjLx/8tPvV019F82+a5j3yfLE33Wrm9ctxGyXNy67pEZUrmqHXF9DndSt0S1R3hUL/AHvvN/u1z2sRJ8kyQs8jN/C/ytXRag32qGTziqDavzR/3v7u2sbWP3jh04/iVvuqteVW907aPvSOZuLVJ2aH7N80f3mVflqC1hSOQ+T97/vqrs2+aR32Kqf3qbHC8bZdG2fdZv8AZrzZR9/mZ6NOXL7pk31qFtWm+bc38VV4LfyYUR4WIV/4q3LiNI1MKfN/EjVRukSZVd/LzGrLt/u1pGfKaS94qx/vF+dI12tU9nZ+XcbyisG/hb5lqPynj2TTQ7W+9t/vVoWdxvuE+SNUV9v+1upSlM1pxhyly3X5XhSFi396rstm7f6Sj8LtVlk2t/47VbT1eZWTzm++zfvE/hq5GyXqh3fbIyfwp96lGnLm90rm9wRo/Kk+eFnT5v8Aap8dok0iTBGYr/Du+apPs0wRHe1ZQ3/LRn+ZlqXdNMweF1C7m+VV+Zlol7r90IxnL4ih5L+Z/pNtIyt825vvLXrvhg7/AIVAlA2dPn+XGB/HxXmVxZv9oZ38wL/H/srXp/hoxv8AC0eW+VNhPhiev3+a/a/BF3zrMH/1C1P/AEqB9BwtGSxNW/8AI/zR5JeXDrC0Lp86/cVm3Mq1k6k3nW7F33q33m/vVueTsmfzoWH95f4mrH1SHdI8UPyD+7/er8MrS5fhPFp+9E5vVDcyRtDE+WV/urL8rVl3X2xd801sqL951jfdWnqln5Nw8rxsDGu35azLizDM1y82Pl2uq/LUxl3kWZepSBlaHZGv8O3/AGapyRurbN+4MtackKXUap8qL93d/s1Wmsfs8LoX37fubkrSnWjEzqYfmlzIxNSbdCkKeXjZ8se3/a+9WdeL5jNCm3O/5619SjdV3w/7K7WrJuFmhjlTO12fdu/2a9CnKMYHn1qfNMxLxbaOTf53z/x/JWPqEzw5SFPm/jZq2L6ZIyZvl/4F95qxNTkhaY/J/vV2UzjqR98heaCFv3jyFmq/aXF1JLvd9p+7t2VlyfNIzoy/7tW9PjdptiO2KuUeYuJ0mnzHycI+5lT5Grb0ubzI96PJu+981YWhxmRm2bd38O6un0bT5mj2TPgM/wB5a5alPlPVo/Cauj28O3f97b83+Vrct7e8upopkZvKZP3X7rbVPS7SH/Voiq38Lfdrf0mHyz88zRj7qL97bXHL+8ejTjL3RbWBJGELuxVX+fctbHhvw7DqmqfY4bbzXZf3q7/4f71JbWMy5Tdnbt+XZ/FXT+EdFe41iJEh3Sfwbf4d1Y+zjLQ3qRlGHMfS37Enwj0Oz14eM9bh85LNldI9m7cyr8tfpL+yP4BufDN5e/GbxJNavqWpP/oG394/l7flZl/hWvlj/gnX8HbnWNF0rw09neL9uumkv5JIv9XH/F8392vv/wATa54Y+DfhWbUrDR45/wCz7fytIs2TatxI3yx7qcvd91nwGMrSrYiR0Pw70ebWlvr/AOKmq6fqEzStLbxzWv8Ax6x/eXd/8VXIftS/tQfDX4e6Omm6V42stUmk2/Z45tO85V/veWy/+hV5n4r+L3xRm8L33hKw8KxWmpas0b65q00v3o2X/UxrXkPxK+HusapHN4qubKTVbyx05lSOR1RI1/8AQVWvOlKvKMuTQdKjScrSPG/2kP8Agol8VLPUH1+58VedbWNwy6No9qu6RWb/AJaMzfNXz54u/ay8Q+Jr+2+Inxgvbq5VZd0VjfXTMsn+ztZvu/7tQ/GzWNW/4SLUbDwveQski+VdXEMW5d38Sxs1eBeM9D1LxV4kgbUryS4trOJVt45v4m/3a5rU49fePXoYWUtIns+vftYfG/8Aao8Zab4Amv7qz8P28X2eK3ht12WNr/0zX7qs395q9T+PH7V/iT4Z+BbX4Cfs06xNpkaxQrex28Stc3ky/wCsmnn/ALv+zXgvge11D4feH4vCvh7y01LUtz3V591o4/4VovNDTTbP+xNH3TXV1K0t/fbtzM275VVqa5doy9fM0+q81W3Kc98RPEHxO+IHh2X4XaU9xHp91debr1xZu32nWJm/56t95o1+7tr0H4U/AfwN+zX4a0bx5+0Civb3l15uk+G43XzLpY/m+b+7H/tV3/h3xh4V/Zt+C8/ja80HRbTV1WP7Leax8z3E38McC/xf7VfFXxe+J3xR+P8A4uk8f/EjxldaxeTK0Vv/AMsoo4/+ecca/Kq1sqsFpCI44OrWn/dieu/Hb9u74kfFTXNZv9S1jTRbSW7W+jeF9Li2abp8O75dyr/rG21826hq2q+JtS+3+M9Vjd1+XzFT5Y1/uqv8K1u6L8O9Vb/Q0tlQSfNt2bdtaVj8EdVa68l0Z/n+6q0pVoSleTO2GVy25R/gzQ5ry1TUvD3i23dFXakPlfxV6p8J5viXBrEF54P1JtL1K1iZPOsZWXzG/hZv7tP+CP7Ob3l5aXOpWEkcLSruXft+X/dr9FfgD+yn4G/4R2K5vbONE2b/AN4iqzf7Tf7NeNi8RQcowZ6uDyOpKLkeAfCv4O+KviN+5+IUP9pX/wB77ZNLvdmb7y/7tfYHwp+BNnb+GYba5T7yfJHs+VlX5fu/3a6/4f8Awj0Twz4ia80rSo1ibb8qrXunh2x0m3+z6VNo8Jjj+XcsW1trf7VcVSpGU79D06eUQoxPmfxZ8Bb/AFazazttE+Vf4lT5f++a8g8ffCubwixe80yaVWl2I0cFfpBP4Y0ewgFzCAu5e1ea/GL4F+HviJor2cNv5LK7O3lv8zf8CrGpGMpB/Z/NGXKflj468B39ncfbNNtmeLeqt5zfNt3V9KfsE2/wv1jxNFZ+LfFrWbzbV+ztFv2rVX9oT4G3Pgu6EMMMjKu5/lTctaf7DdroM3xOsdH1m2hilupVW3kaL5pv9n/ZruwVb3vZ3PkMywfs+Zo+if2lLPxV8DdJj+IXwcmmitI4JEvNNhfZHeRt95vmr8qf+CvGg+FfjBryfF3RPD1vomqrp0fn2tvFt+1L/e3f3q/bv9qf9m7V/G/wb8iC8UfYV8+KaNs7o9v3Wr8av+ClHgebSfhfqXid9sU1jdLFLHMnzMv+zX0sYzjKMo6RPm8NU5a/LL4j8zVjdt6O7M+7a+6qklvMYTCXx5f92ta4ZJG/hDt/DUE0aLJwinb99q9mnKHIezUj7hzlxZ+Xu2Jjb/Ft+9WJfWrs+xONtdlcQ+c2zYuW/vf3ax9S03y8v5PLfwrW1OWvMjllT7HH38e19rj+CqfkeYT8nH96uhvNJSSQfuV+Ws64i2syZ+St4ynI5pR5TJj2LtTvTo4/LX/aX+JaszQ/x7NrbvvU37Om75+q10c5lGJLbq/y73bc1fY//BMbaNF8YKvQXNkB/wB8zV8bQt82w8/JX2L/AMEv33aN4xX+7c2P/oM9fpPhD/yX2E9Kn/puZ9Pwhb/WClb+9/6SzxD493Aj+P3i9C/3vEV1/wCjGrF09naQINwMjfeWtH9oVGPx+8YTRnD/APCSXSgev7w1kafN5TK7v91K+Lz6PNnmK/6+T/8ASmeNj/8Afqv+KX5s6bT1haQp823+Kt+1kSONETajb/kZfvVy2nq82x/OZT97atbFjdPGzP1Lf+PV5HwnD9g6Ozut032VIWwv+z81aEM0LQvMiYP3dsnytWTZt5cio+7fH99av28kPyud25n3f71HxESNC1j8tV3ncWTdu/8AiqWRZvLdJXw33kqKO587emViZXX93t/honk/dGFH3Ns+dmetYx+0Z/EUbyJ1489lVk2/L/DWZdKnlSwujP8AJ8jbttad5saQu+7d/wAtdtUrqH5vkTKf3v4qv7IRlyyMeSRPMZJht/hqtLD8xCblX+LdWlcQ7mCJHtZaoTske95tq/N/31WMoyN6ZWX5mWHzNoX7lMmCRt5Lv833ljalVngLJ5e4b/u1FJJuY5T+L5maolTOyjU9n7xnXkj/ADb/AJfk3fcqqZLZmKPuP8NT3gPlnejZZ93zNVC4k6h//wBqo5fsnpxxR1t0s32hdibW3/PVObZHJG8yMx3fw1p31vMq733FlfaG21VuC7N8iM25Pk2vXzvtI/Cfq2Hw3LHUrR72kd3Rfm+5tqeGONYx5xwP9+o2VI8og2v/AAL/AHafbzP52/5nf7vy1lKUz0qdGMTSs98kgR4cf7X+zV+OFJFHmdPuturNhvI42Ub95b5fuVfhk8uTe+512fw1yS5+fmMsVR5ocpfhaSWEbE2hU/v1fjaGGPztjbf49397+7WP9pcf6S7/APfNTWtwm37TsyzP8ys23/gVddKMn8R8Rjqfs5SubMciK3yR/N/B8n3q1LW4e4jjhdJP3nzq2z5a56yukb/l5y2/atb1nM8McTzJ91tybq6eY8LlNWG83xqjv833XWN60LVYW274G8qNNzqz/wDfNY1jJ5jMlsiru+/JHVqOaRm86Z2Qqv8Ayz+bd/stXVRqSlHlM5Rj8Rfkmf5P3LbG+Z/nqOa+gZShhVWbdvbNU5tSmZfJmdRt+Xb/APE1C3zNs87/ALZtXTzGUo83vE8cjxsI9mE2fLTFZFkG/n+Fv7tV1mjm3XO1vufd+792kW+2qsyIy/xbZPu/NSqVOUiNPlNSxjtrj53fYW+by2qzGvnbhtVdr/Nu/irLhuoVkRPP3qqfM2/+L+Kr6yQzRo8j4Mfzf7VeXiKkpyR10aMYxNWys5p5k2DYnzfdT+KtfT7cxr99mEf9771Zul6gjZmmmaTc3y7n2t92r0OpQ7ljR1H99fvMv/Aq8upze15T1adSnGlozUt4/Lbzkl3M33/3vy028muYV8mD5k3Lv/iZqitby5bcmxhJNEy/Kn8NL5jySIHTZIq7Umaop0+Wr7x1+0pypE1pI67blEjzu+9/Cq1q2do91JjfvZn/AIv4VqlYr9skRFRokX5vLX5d1dDZw2ki/c/e/Lv8t9rV6eHw8fiieXiMVy+6vhGWel+djZD5e2X+H/lpVqHSfLkZJpmUL83l7Pu1fs9P+0Rl5hwvzbf7tX7WxxMk1tbZjkf72/7terRw549TERMr+ybmeEO4+99yt3TdJaG3jm+xyeVv2o0bbq0tN0dIVeF33orfe3bttb3h3wy9vDjZuDS/e37a7Y4ePVHDUrS5vdM/S/Du3DpMzpJLtX/Z/wB6ty10FZIVtnfy93+qb+81dFpvhm2jt4dj7/4nWtjTdF87L7IQ/wDyy2/N5dbSokrESi+U4G+8LpD5UyWuDu/1i1i6v4ZdrgwmHzV/iZkr1abQdtw6OnmFvv7m+WsjXvDfl3Ucy/IzL93+9RGjynYsV2PJbzQX8xvs1tC6Rqu5W3bo2rn77SXjtTNNbMu1/u/e3fNXst54deO3/c221vm83/pov+1XMXnhuFoTNs2ltzPHtp+yidtPEc0eU8t1rTYZpNidF3bmX+Ff4a4zxFpvkys78q3y/wC7XrWqeHbPyXfY0P8AdVkrhPFWmpGux32eS+2Ld/Eq1nKMTf2zPN9at90Z2bfvbtzLXM3E3+lCHZs2t8y766zxRDsk3wzyB/4P7tcTqy/uzIXXK/M7VzezFKpAat5FZtvSH54327l+bdTl1SZXaOZ/m/g3P95axmvodzJ53H/fPy1A2rJc3Hyfw/fas+XmOaVTlOjhv4fMVE+7/E1TQ6hNtZPlTd9/5q5i21xI92/c7b/4asrqUca/675GT52+9uWseWXNzEe05Ycp0P25I41tndgdm7ctPbUPld3nZlX52/hrnv7aST7j4f70Xy1F/bDyL9/e7fM+2qjTjLY5vbe4dZHqUNvb/aXdpEkdflVK0YdQeb5E8wbV+Zo/urXE2usIoEKPiL7y7v71XrPVHW5SF33pJ8rLv2100aPL8JwVq3N8J6DpmpfKHeRiV/2vu10VjqUO6H5/kb+GvP8ARryHy97vHvV/vb61rHWE875Jm3N8y7q9alGMYnmVJHcprE0cT73V12bYtv3t26tFdWhtYX2XMcrr9zan8W6uGi14WapYJu+X5t396rMeseWqJG+4M235n3NurrPMlGXNzHp2n+IHvMP5ytt++zfL/wCO1rWuvW3ls8c2GZtzq275WrzLS/ESSAOm7zV+X7v3a2LHXEkZN/Lxvv8AmespS+0a0T0yHXJmh8yHcNz7drfNu+WtK18Rw7j5k2122713/e/2ttedaTrjlgkMzKrf3m+7V2bXEhkd9m7bFt3N/FXFUqcvxHp06J3c3iTzGCJNJEy/61Wb5Vasq68RXNrI0M253/3vl21yS+JN0aJGkm2P/wBBqG41aZo2Tzo9iru2s/zMteXWrRiz06OHlI6S58UPt2fKfM+4v+zWLqHirdbyokKj5/lZvmauduNWma33u6na7M3zfw/3ao3F8F+dLlkVvvRtXDLEc0viPQjh5RldGjeapNcR+dC+0SPt3fe+7XPatqyW9vJM9s2z7ryR/eb/AHf9moWvJo7hntpsIu75Y/utWFrl5NIqQzOyDd8m1/8Ax2s+b+U3jR+1Ipa9rEzffm3/ADbd1cpqWrPLHLsRl/8AHt1a2rSPcfOiYCvXPX3nW+XeXfu+Zo1raMuaVmYex5feMrULj7VGEd2dv7tZsOm3EjPsdW/2v7v+zWncbJpMbNu35vvfep9rbu0Ox0Yru+8qfeq/acsAjT98y4dLdpD2f/Zra0HSpmkZPmc/dqW1052ydijd/eWtvT9Ptlii2Q/7O5qKdaHOaVMOb/hez3NsRGRliVdv96vRNBt3t7XZvjCQ/MrRp91q4zwzp6Mo8yRt7ffk3/xV3Ogwo21E+4vy7lf5t1ejRkeZWp8sDtvDawtGkP29V3fPuZK7DR1NncQon3WT+FK4zQXht49gfcuz7y/xba6izuiY1+eRFb5ty/w/8Br0KcpcpwVKZ22kXyBURPLDN/D/ABV0mj6lA8Y2PIjfw/Pt3V5/pGrQTW4m2fJ83zNF826tvS9WSNUtYUZvL+4y/wAK12wjzHnVNj0W1vHtWzH+6E3y7l/hantq3kLLcw3MKvH8v+181crb6wkioUuZGDfws392pV1xPM+e5XbJ821q7Y/AcHL73umjq14j25R9v/Af4q5TWLqG2Evzq/mfN5dXLq9+0K009zkLuZmZ/l//AGa5jXNWRpPO2xoPK/iXc26spbGsYyMrUJ3ib59qjd8sa7vu1g6jJ/f8v5mZ5Vq/qF9ukWFJmf5/vb/u/wC9WPcfZlXfNI0Uyvt8xW+9XFU/vHZGU+XQyNQkkmZZrx2wsX3o2/1bVzusalDJHveZvlTZLuRl+atfWr547zZczK+35fMjb5V/u7q5DxJrDx24hL+aVRv3LP8Ad3Vyeh00utyvPKjRyeQ+Nv3Gb7u6sDVr6FpCJpmRm+VFX5lam3msbm2b5HX7zsq7ttZ91qEM87Rl4xt/irzcRKNP4juo8nwiRzTSMyJB5qKvz7vl+Wp5QmwP8qfxbV+7WbHcI8jb3UK38P8AtVpWrJNMEd9+2LbtV/lX/arzanve8ehGMRNqSfuYfmfZVL940Zd4413P8u1dzbaszSQsk32aRnSN/wCH5d1QNHDGxzcL/Ez7fvVnT68x0R94bH5LQna+9F/vL96kjtnWQedDIis27dJ/DVnTbVGZs/L/AL1TxokNx8j4WT7+7/lo1axlLm0NuXmhqTWipDYv5KM6K2591TWcabtjvsVl3J5a/wDjtR28e6HZ1Zf4d/y1citYWVIX+RVT5tv3qPacoRpyY638ldPjtkdS33vLkb5ttTx/vLxIUs8Nt2+Yrfw0yzW2tvnmRVZf+eifeVqsx+Yqw/Zk2+Z8vnVEZfFYrl+0QTNDHux5m/7v+y1el+HUB+FuxYgAbCfCYx/frzq4t/MYeSGD/e+V/lb/AGq9I8PxhPhlsxtH2CbjrjO6v2vwOd86zD/sFqf+lQPe4a5vrdb/AAP80eWR2vlxpZzP/H/F8u2s3WIdrfOi5/8AZa1mb5lm/eNKv3tq7lZf9qsy+t5Y5d/zMi/L93atfhk583MmeRRjGPunL6lp7x7k+VfMbbub5ttZs1ukzHY+1f8Ano33d1dHq1nHH1T/AHKz5rWHzGjtoWXd/Cv96uf2sY6nZHDykYclrtVke2Xa33938VUbousghd9jN9xa2ZmtlkXEPmfwsv8AEv8AvVj6lLbRx74XYFfmT+KroztU+EmVHliZOpLDIz/ufnXn5f4a57UXdZHQn5V+bbW9qU3y70K7m+bcv/s1c9q9wNr84Zk+Vm+7Xq4fY4K1M5+88xpN7purJuGZdybPvfxN/DWlqCOqPMj71X+Hf/FWZdM8e55h95K9Sn72h49Sn7xXX95cDL42/L/wGtXSYx5gOxvl+XbWbD500ibEUsv39tdFo9pj6Sf7FdPL7gUYzlI3NJsfM5+6yt86766rQ9NS4w/k5VWxtasXR7GGVfn/AIm2o1dloenooZ0C72XbXJU7M9mjTlze8WtLsEkZ9k0Y+fau7+9XUabo728afJ95Nybl3baqeHbcQqj3McbBfl3NXV2FmkjPJDcq4Xb96uOpGXxHq06dKUYpEOk6X5ylILZstF95f71emfAXwimreLre21W5YLuXfcfd2rXKabZfZ7h3jmZG+4vyfLur3b9kP4U63488VWc3h7TY79VlXzY2bb827/x6lHlcRY6MY4WTP1u/ZK+FOm/DP4Z6LDbbrq81KwVrJZF+6rfM25q1vGEj+OvHiTWFst5pfhtdlrb26/Leag38Un+zHXGfAP41eJ9e8SS/DeGzaKbSdNaJmX/l3Xbtbb/tNX0V8K/CPhfS/DMNhYRR+YsrS3En3mZvvM1cMv3kryPzapzJyPOND/Zr1KGFdV8SXLXVxskuNXmb/VLIzfLHHu/hWvnX9rjwb4vvvM+HulJbvaK6tcafp6N5cO5vl86Rf9Yzf3fu19SftCfFC+1vTIPA/gIXn2mSfY5tU+VV+7ub+81eaftlfEzwv+zP8K7fTdHS1PiprJVih3+Z9jkZW3TMv8Un93+7UuVKNKXY68LRk6sbbn5i/Gj4av4T1ObR9YS3udXbd5sMO1fssf8AtKvyq3+zXg+gfC3VY9Yge53QrJP/AKU0nzMq/wCzX0BY69rGrLI+sbUnvLhpJ2kXczbv9qsK6h+2X0eiWz7ZvN3TyMn3f92vmamKjf4T7/C5VOlh+aRw3iLwe+patPqttZRwQw7Yk/vNHt+Zqy4/Elh4Z1LzrnSrd0t/me3k+Xc235a9Pvo7DQfDGuGaFmnjt22M38Tf3Vrx3VtB8YeMNHm1u20qOB5k/wBW0u5qKdT23vIwo4flZ4r8ZvE/xI+Nnj648YeJ5ldY38rTbVflgs41/hjX+Hd/E1ZOk+DfEcbxubbaFf8AiT5a9T0P4J+P7qF5byzjTyZdreZL91q6qx/Z0+JbWsVzYaUt5u3b1t7jdt2/w12VqyjGNmdeFwspu7OS8A+A9Y3R6k9hJcS7/kWNl+b/AHq7nxNZ2ek2qasmiTQyKu6Xcn3f+BVZ8N+EfiLoOqeTdeErqFI0/wBX5W7/AL5rtvFmueHpPC+zXka3favm29wu3buryK9aPMe9Rw0eXmRjfDvx/olnshunVE3q21v/AGWvvb9lXXE8SWdto9nIyJIqq8lxtb5f4a+Crz4d+Etb0m21XR7xVbfuT7P91v8Adr65/Yt16aOxSO1n3vHt/wBcm1v92uLESheMkejh4ycHBo+0ZdP8K+FbX+1tYufnX5XZV3eZVvwT4i0r4heJjo+m20zJDtVty7f92l15/wC3PBdnc6rNa/uUVpdrbWZqT4O3GiaP4gj1j+1rWPajOkbS/wANdcKlKP8AhPPrRnGlJxjqe4W/wlbULBJgm0bflWue8Y/DK+0O3juAjKD8r7a73wF4+s9ejCJqNuyBtqqtanjHyrq0XO1l/ir2ZYfAV8LzwPi6ea5nhsdyTPi79pb4dvq2gyv9m+aGJmST/wCKr5n+Bf8AYnh/4sWb6x+7aO82xTL/AAtur9AvGXhWw8WCWxvIcbd33fvV4P4T/ZB0q18YX0NzDcT2011vguNu3y23fKteVhYxjV0OnPeWpSjM+x5YLZvhqNE169WeK5sNsdwv3WXbX4x/8FY/hzo+tf8ACTaDePNHpWl6XNcJJGzfvLr70Kt/s1+vXg+K6+Hvga58H+LTJdRQt5dq6/Mvl7a/O7/gsN8LZ7z4J+J/EPhW5ke3jtfPfy23P975vlr6aMvejA+AnKPt7n4GrcGTyxc7fOZdsrf7VM2J5flvt37/AOGtW+0/7LuhdMP8zfd+bduqs2mtIV2bhEzfPuT5q9eMoQ91nurmqQMy48mdtnl7f93+Gsy6skWOR0RifvJ89b0lqnmL5e1f9plqrdWqFmR3VQ3/AC02U4ygOVPl3ObmtfOV1dFx/svWVqGmw2+R975/4v7tdXNpvkwtDs+Zl+8q1mXGmzLG2+Fm+fbW1OpKWpyVI/ZOXuLeCNy/k/xfdqpcRw7nfZW9dafubY7/AHfl21m3MCRs2/cq11xlzHNyozWj+benT+OvsL/gl1uOi+MmOMG5sduP92evkSZE3b0Rifu7a+uv+CXAxo3jMbcf6VY/+gz1+n+EOvH+F9Kn/puZ9Fwj/wAj+l/29/6SzwP9oyZ/+GgvGK7sgeIrvj/tqaxNNuPL2JN0krZ/aOwfj94yOSCviW6/9GGub0+R2XYzt/srXxmff8jzFf8AXyf/AKUzyMf/AL9V/wAUvzZ1Oj3SNuRHw38DfxVu29w6yLDs27vv1yelzG3l3p81b1nN5jK78bvvN/drxvfOTl5pnSWl4nl7HT5l+V/n+8taVjcfJsSFflf+KsG3mh2+WduWb5P9qtXS5nkYxv5itu+9VRlH4jGUZmxbt+73zR/IvyrJ/FUMlxM0jOkfH3tzJ95qYskm9Rsbfs/75pZLh/nhO5F3fw/N81aR96Bj8I6WaaSMom7aq/eVP/QqryxI20P/AMCqeN5mZk+Vxt3eX/E1OjXd8kPyVUZcxPL9ozJraG3XeifKz/w/xVnXUMO95Htox/drcvLdFtVfYo3LuTbWXdL8q7AzL96plI1jyGJcQozF0P8A31WddSeYp4kEMbrvb+9W7dW7qGfO0N83+zWPdW/mK/f/AHanm5jojLlMy8m3/P5mTsrIuroN877Xdf7v92tPUI0jU+TuG35X+Tbt/wBmsS+XbJshfaf71Ryx5jXmker3lr9nk3pueJfl3VmzW6TM2zaF/grcuLdFHnHcR8qbW/hqo1rDJH/CN391a+PlL7R/QcY83wmTHYuzLM77gv36lW1SSRnhhZSvzfLVo2b+cSnzfd+an+RNC0SeSzOzsrt/DWHtOY648kYFdWeFUR9uG/8AHqkt5nikOxNoX+9822nL5xjy6Kf8/epm5GzD/H/epx973ZHBipR5fdJ4bi5aP9zNHs2Ns/vNVuHZJbNbI7P/ABf7W2qEbfZ2xN827/x2prW+e0be78L/AMtP7td0fhsj4fMOXn9407WSGFfJRGb+H7ta+nq8kHzpn+78+2se1mfcr71/3lq5b6hN5j7JsCRNqMq/LWrj7uh4EvdmbemyJ8qbFRt3zMv8VTLNNJHI/nLuX+L/AJ6L/s1nW/7uz++2/ZuRtv3quBPLg8ya5/g+ZWStIS5dSOX7I+GRI4XSGNgmz7rPVeS4+ZPOYsd/yU6Tfuf7M6xFv4WqnL50kjuky7fK2vC1dPP9oxkWbjUElk8l32t/s/dWoFvILyQJsYfNtX978tVmbbI6Kioqr8zN/FTI7xN2+Ty97L8jfdWueUuccTY+1Jb/ALlJF/dv8isv3mq/p9x84+eNh/sp8y1z6373EiI6Kr7fmaN6sW00KyK6bi2zduauOfNy+Z0x930Or02b95++mX5f71acepTKzBEXfs3Purn9LbanD7y339yfdrUhZP8Alt137t1cXNzTvI6YxlGBuWt8hhUIJF3fL5i/w/7tWVmSaNfJRti/Ju8373+1WXayJH8+xTtXbEy7mbbV2Jk2Lvs2H8KfPt3V1UafNL3iKlSMYmxpLNMqTecrPHtX5vvfLXXaDbpMrv5Ledv+Xb/dauP01vmVEhVNz/Kyp8tdt4fXzpPIRGR2Vd/l17mFp+6eFiqkoysdDo+mvIzb3b5flZl+7XQ6TpO1o5tmxGXd/stVHQbXdbxecmGh/wCeb10liqM4f92rr8u1Xr1KcYx+E4alTlLuk6D8rPDtQSfNu2LXS6X4f+yyKfJWUtF8q7/u03w3Y+XGyPDH8q/LG33mrqNL02e4bzo4VRVXay10RjGJyc5FpOkww42Qq+77m193zfxVvQ+H08xPJtlZ9m6Jn+XbV7S4YYlR0h3SL/dX5m/vVqRqyjyfJkD+V/q2Sq5UHtOU5280VPtD749n975K5/XtP8m8EM3y+d823+9XeXGzy1eZGd9jb/7tcxrlrbQyhNjFNu7c33aj7ZpzcxyOqWaQ7vOudqeVv/2v92uW1eOGa4a88r5FT91I33v+BV2Wp2brIiJzDI/zyKtcv4gjdZzs3bpPlZm+78tLlluddOfLM4XXo0mh+SHarJ/F8u6vM/G0dtaxvNawqV835tteo641tcb3vHbEbN833a8o8YL/AKU6u6tu/h+6tTLk5Top1JnmnjCOb7P+7mjHz7vl+9trhtek3bofP2oqbv8Aers/FVwlx5mx/nVG3bfmrzHxRdP5hQPzXJyzqG0sRFR94yNU1b7+z7y/L8tZNxq0c0b3Jm2Nv27t1VtavX3702/7y1h3GpPu2O/3f71Vy/ynDWxHtInW22uBdohfa6/Nu/hqZta3N+7fb/7NXFQas67k3/K38TVet9Q8xvlm2bfm+ap9nAx9tI6ttY2YdH3Fko+2Jw6df49rVzK6kYlb+PbUi3ySS/I7f3qIx5ZGUqkpe6dPb3m2RpoX/eL8rbv4a0bfUtzJdO67l/4FXI2eqQtHu3/8C/2qu2epTSOux9n+y3y7q2jH3zM73T9akVkm+8v93bWo2tedHvFyzL937u3a1cTY6g4VUmf52+atG31abDJu2p/erqicnxHZ2/iF5sbPLYqvzM33VqVfESBm2ffX76/3f9quTh1CaPbDH/wGRfu1ajZPl+zTbG/iX+8ta+hh7M7bS9YmkiWGafcjLt2r8rf71bVnrT7kh3yP/F/eX/ZrhrGbzFV3vG37Nrs33VrbsY7xIUe2m83c6szMv8NcdSpynVRpx6Hc2erQx2/2lOX/AIlX5ttWl1yaSETfM/z7f97dXLWN1cw/8eyfeTczL97/AL5rQtdShjhDo+2H+Nm/vV5WIqcsbnsYWjzStymxeeIEjbyYd22Ph2/iqncaw9rHvebCyP8Adb+9WU155032lHXZuZdv96oWZJLN7ab5BG3ybf71ePWxHNE97D4X3jT/ALSe8z9pj2+X8sSr91v96oG1SaTb5235f73/ACz/APiqprdO0QdEXYr/AL1f9mmTXfmMsMPzLs+RtlcVOod0aMfsj5Lp7iGWZ3b5k2/LWFeN5jeT80W7+983zVozXFyI2tt6hG+4zfxVkXjfudm9lZvl3f7Nb06ntNEYVKMY7mdqW+H9z23t8rP91ayLqO5uPkSFmRf4f4mrQuriFoU3fvfn2/NVW4vljk8mGHyjt2tu/vV2c0oxujk5feM37DuZkRMNt2/MlW7PiH/x35aYs26Yp8su5fuq3zLUsLPb3C/Pt2/cqJT5dB06ZetVh3ryq/71aGn/ALuVd/3VrJ3PJIyTJ93/AMdrY0G8Ty/JfzpRHu3tIn3qIRlH3jWUfaaHUaDcJ5ao6MzK27cv3a6zQbyQTNInzO33F2/LXD6bcJGyI74Vfm/2q6Wx1J9ypvkEvy7GjZfu162Hqe8eVWp+7ynfaPJbfZ2dHZ9z7Uh83asf+1W7pupTRstz529lT7rfxVw2lal5kZmd2V12/u2Td/wGtnT7ry1KJMu35v8AZr0adQ8ypR5TsLXVPMs/9c0e19rrIv8AF/s1r2uqXkEYvLOZndfllVotqrXFQ6nthE/ys0fysrPu2/71Os9amaYyC55+VWZq9GnLuedWp8x6FD4lhgaJIXYFlZvlT71Nj8T/AGj9yjr+8f8Ah/iWuGbxI9q7wWz/AC/xs3/oK1HJ4shUqnnMiL9zbXbHklsefKMIzO3m1yFrf7TC7AfMPJ/2qwdY1wrCu/gs3+sV/mrm5vFO3c9n8vz7XaSX5VrMk8UwtDKlzMvnK21l/haoqR5dCffl7xs3esJIX2bnC7d395v9qsbWdYe32b3XzP8Alrtf5dtYN14qgt3e2S5jV2/h/irBvvEk1xGzb1VY1+dt1cdSPNLQ6acvdNnWvEENnHNM8ykxv/C33q4fxDrk0k0ib/l37kkb5t26q+teKkkDvNt3t8zMtcnrHiItN5KBdn3k+b5lrgqfynZHlkbtxrVtZw7/ALTvZv8Almv/ALLWVeat5jApt+Z6xJtS3SI/nZX+61Vprh3kZ4Xj/eN97dXBUj7T4jqjI6i3u8Ks29Szf3flq3a6lJH8+/Yfvbv7y1ydvqEyxKnk7vn+T5qnk1Sa2yjzbdq7vm+9urilT5Ye6ehTqR925uyaqk0b/eZmfa275flqS31NGXzkto/m/hX/ANmrn21iZtuyZU8z5k3U+HWHaSN0Ta6/e3PtVq55VJcvwndTlyy1OvtdQmkX54VTc/zMv3q0oLy2jkV0Tdt/4FXK2OuQ+YryXK7Gf/V7Pmq6usYjeTYqqv3W/vVEeaR1HQxyJ5kSIjH5fut/dq/tcXSbPubPu7/mVv71YdnqXmbd7rhU+dmq/Y6pDcXH2YoyMy/I237vy0hx97c1Ifs3lt5jxjci7Fk+arUMHkqIUdX2oyuuz7v+7WRbt5eyH5Svyt8y7q1GZII9jo21fmb5/l/2adT3fhJjT5viGyabCuy5+YfPu2s9ej6AGT4akO4bFlP8wHXl686mmS6YpDuRFT978/3a9G8PRmP4bCNn2EWU3zD+H738q/a/A6bedZhf/oFqf+lQPo+HKThiattuR/mjzMxww2ryeTMrN/DUN1H50ex3b5fl3L8ys1XZLia8t4kTcwVdvmN8u3/aqhdM/l+RDuVF+ZGV/wDvqv59qVpVDmo4eFMxL63eRv30zBI/l2r81ZV3G8dw/G7y/wDVbf8A2auhmjSGR7v767dvy/xVg6pbTNDvcKCzLuX/AGv4amNSPNytnpU8PzR+E57ULiVpG+RYgrsrN/s1iXk3kuFd1y38X8NdBqNu+2RCnz7sfN/DXO6pGjcPtTb8u7+9XfRlEKmEjuYmsXDyKbeHais/zsz/AHqxNQ3sp3uuV+Xd/DWtqkfyKUm4VtqN/drFvmi2jf8AM/3Xb+GvUo+8eTisHLm90x71tqt5Nt8i/K7R1m3ioU/2v4a1Zmdd+zbsZfu1lzInmb3/AIW+7XqU4njVcHyyE02HazeTtb5/u11Gg27tlHeRtqfJ8n3VrG0+F4nDui/c+bbXU+H7fdGzvMxGz+5XVGXvExwvvm7o1v5Koj/OW+589dvoduk0LPs27fvqtc34ft5gqOm0bf4tldvodq7bOF3f7X/oVRWl7vMethaMZGrpdhbeXGnaSLdtZPmWun0nS5oWbeiqJk/1n/stZWiqjXCQ2z/Oy7Xb73y11Oh2VzGzpM7OVddjL/drhlzSPSw+Hjct2tn5cn2beu3dt8tX+Xd/vV9A/sg+IpPBPjCwv4YftU32jbFH91Y/9qvErK1s7gpNbQ5Lf3v/AEKvU/2fY9SXxdbPYbXXzVXc0TbVbctZVPdIzTD82Dkj9UPhb4f8K+A/BOr+P9Kud2qas8lxdXDRbVVW/hX+81dp4W8Za94f8Kvq/wBsbdJaqkULL821l+Zq8+0rxtrGj+C49N8SQ27XF49vE7bf3axt97av92qnib4gWzalcaPol/G6R3HleTC3zR/LXm1KnN7p+Zxp/vfeK/iD46P4D1aPUtEg8/WFuvN+2TS/u7eNV/55/wATV8RftLftA+IfiVql/wCNpr+adpNSkuPMkb/Wfw/NXrfxz1y8s7/WrmbaIbO1ZUXzf9Y235trV8m/EC8/taOysIYmjT/WxKv3dteZi4Q155H0WT041KsTJh1vxPfTPcpcyFW+Z1Z/u7v4Vrfsbi5Wa33wrujRv3itVLw/Y+Zbx22z5P8AZT5lrtPA/gua41KG2s7NZY2fc3mfe/4DXzlTEUuX3T9Fp058nvGF4ivtV1ZTYWGlSNbzNueSH5mrn1+Cfxa8SWKXL6lHo9lv+S6vFZfOX+Kvqe88E/C74b+BZviR8QtTjsbGxTfLH95rhv4Y468d8YfGLxb8WNNi8W63pVn4c8G287JYfal23N5H/u1vl+IpRjKLPPxWFlTfMtDw/wAVfDHTdDhTSrP4/XE100StKrRMqszN/wCg/wC1UvgnwX480WZI/DHxRsZEkb5FuL1o2Zv91mrK8dfED4Pw3kzw6VCgVtssi3DeZIv/ALLWDfeOPhXqlu6aR5ltNIny/vd22niPZyj7pphZOnLnke9Wev8AxR8M3STeJPDbXMcLqzSWvzbl/ibdW/q2peG/iF4Ru5nsLO4h3Ku26i2yx/8AfVeM/CX49X+k3iaVN4umu2hiVU+1bV/75r0Tw78YvB+rR3OiaxZQzxzS7kuF+Vl/vV5EuenP3T6KjUo1qRt+H/hn4Y/sWGawS4tfLf8AdLDtkRmr0j4f6TqXw71C21Kw1u4htm2rtWL5mZmrmNB0XwfcWkL+FdSurdJG/wBSs+75v4vl/u133ibxI+j+HdNsrnxDH5TXmyJVt/m+7/eqJS5pe8VGPs5H0R4T8RaDqnh1LbU7m6kuIVVYlkl/9Crs/h7oqX2oRzL9lCTPuX5l3Kv+1Xgnw50PStWtFv57+a4juIt3+tZdzV7f+zvofh24m87zt6Lu3NcS/M1VCM5T5UKtyRpSPpTwCulWumo14kburfu/n21va9rN3b2EjWc21JP+en8NcXpzeDZzHZwTW6vH8rLHPUXiH+1LGxl/sLU/MZdxSO4fcrf7Ne1KoqMOU+Lng4V8Xzv8SbS9VFzqkqxPld+167f4aWNtqGp3KXNvG6KnyqrfxV5HpWpXKyGa8mjhmX5pVr0v4ReJHW7CfKyzN8zLXHg8VGGIi5bcw8/wMlg3ym3400NLXSp7ZBny923d/Etfmz/wVA+I2n/C34d6lZ+IZpmsdeiazg8n5v8AWfLu/wCA/er9O/ia00OiPc20G9/KZdq/3dtfih/wWy+MP9vXkHwom01ZraGy3/ao22yLN5n3f++a+wVH967S0PzKNP2mIUT8tPE3h1NF1ibR7O586GJ9qXUn3pFrNm092KwyP89dVfaLtuij3O/y/m/vMv8As1VTS4VhV9nyr8u5V/hrr9ty+6fVUaMYwOWmtfL/ANGSFtzVRl0lNjJ8qjf8q118mj7ZGMyMyN8u7Z92qs2kl5CnkqqR/KlEanu2iP2PMchcQzQ/uUdSG+4rLVC8sX5eNNy/3WrrbrS3+1fP8rbN3l7Pu1l6lp8Z+4ny/eaumnU+E5JYeMeaTOJ1axRN2+FW/wBqsK4s8A7U+b/arttYsbYLsfbvZG3LXMahawqzp8wVf4v71dlORw1Iw6mDffd2Oiqf4WWvrP8A4Jehho3jLOcfabHbn/dnr5Ru49y79n3m27v4q+sP+CYMaxaT4zjXtdWX/oM9fqng+78f4T0qf+m5nt8Ke7n9Jf4v/SWfPn7SEu39oDxlC/3f+Eju2/8AIhrk7dkWVPn+9XW/tGr/AMX/APGZ+U/8VHdcf9tGrj7WRFk8z5T/AHK+OzyX/C5iv+vk/wD0pnjY6P8AttX/ABS/NnQafJuby3/hX7396tfTpN2Pk3L/ALX3a5eGTlXP3d+7733q2tNm8sId/wAyvury/fOXlOmt7j5n/fMh+9WrY3TsvnI6qW/hb+9XLw3Dqzl3+992tnT7h1Yfdx91V2VEveM5ROnhk85Vm2fw7W21L8kkPyJn/gf3ayLe8RlZ8MqL99quxzIy7IX2bl3bquMebYwlEsq0fmb5vl3fKn8NTKyeSsKbt0f3t3zbqq28k3k75Pnf7u5fmp8Mjsw/fcbNv+9/tVfKjD+6Pnt8Rp++VdqNs3fd/wB2sq7jcR7Plx975m+9WjeXkyv/AK5cL95WrMvl3Tb97Z/8dVaZZn3TJIwRE+RU/wC+qzrqNI5vItk+9/47WrqEh8z53+X/AGU+asm6fd9//Wt/tfw1MTaJja0tyu1PlZN/97c1c9qW9fuD5mf/AJaV0GoO8at/C/3vmT5WrA1SHG15H/4DUSiax2Pari186RoU+9/tf3qg2/u40d/+BKlaq2e64OxGI2feanSeTEvzp87fKrbP4q+Fl7x/QGDqc0DFa3RXXenzSfN5ar96mfZUmUeSkn+xWpbxzI3+kpG3/jzbabfWPlvvSHaqpuT+Go5oxNvaMxWt03vC+5N3/j1QSQvtVf4FbanzVpXlnub+L/gNVGXb9+Flb7zV0KVzkxlSHIVZLjbuR5MMzbWoWYxt5P2fcNv3mf5ahulhhbek25arxuVkd0dXX727+7/s120o8stT4zMKnMbtlcedMm+ZlVU+6v3a0obx5Lcwo+z/AGv4q5zTpH2h+7VrWbPtP2bbu/jZmreEYx1Z8/KU+c6TSbiZpGT5U/hRm/8AQquXH+sfzkZDsVU/76rCTVtqi2mSN9vyrtq9HqFzND52xVH3fv1EZT5tRSlzaF+ZYcv8izM33tq/d21R+yQzLK6blX7qbX+9RNqDxw/u3yGfbtVvu02PyZ1Z0fay/dp+/wDEEteUGWPc0m9i6/eWRf4qpNHbNvuX+dd/y/PuqzfM6yLNJNv3fxLUK2/ys/kqC38LfLSjKQL3vhK0d9tVoXRVZX3fMtaFnfIwZ3hVf4fMX+KqsduJ2Uum7cv3d33amk8yFRGj8fe2rRKMZe6VHnOj024eRf3KeYzJ/e21prcfZ1PkupZv4f4VrmNPuts0saQswVdz7v4a2bPY0aB5m2x/fb+Kud0ff1Or2nuWOhs7uGE+ciMq/d+WtNLtJFSzhRnK/Mm2sOxWa4+REUo0W5Pl+Zfmrd0+WzjiCJtfb8y7f4a6adLlncxnJVIWia+myTKrTTJzvVV2r92uv8M3lsu/fufcm3c1clpW+OL/AFO91Tckjfdaun8NyfaNs0yNEq/wqv3q9zC0z57Ec3OegaDM9zCNjxptbdKq/dauq0G1tm3XO9pIm+bbsVVWuF0uZLVY4fIWNGXcit8zbv4a67RrqZpbe58za33mXf8Aeb/dr0adP7RwSkejeH4/O8p0f7ybUX+9XZabawsEtk+QfK21V+9XE+H7jy1T54ztbc6xt8q13nh9vtEf77yzKyqrtH8zL/8AE1rGPLEn/CdBo9n5Nr5Lopdmb7v3dtW5l248ubLKn8TfNVeGa8tYV+fP+z95d1WZCjKX8jY2z96zJ96o5oyKjEqagqR7kd1y23Zt+61cz4gjS0j2XLspaXcvy/dauj1DZHH5z+Yd3zRR7fu1jaozRw/ImWaLd83zbaxlLlNIxOV1hUuml/crCm75FZf8/NXH68sMgEqc/e2fNXXa1CGuDC7+YN3mbo/vfd+WuN8QSQwq5d+d7SfMm3d/wKsuc3jznn3jC68uGaZ9vlbN21fvLXk/jpvMjaG2RVRdzRN/Ev8AwKvVPFU3mJvtodu5PnVfutXl3jC3hjjbZbMsvzebub5WqObqzX4fhPKvFKeTI6JuzIm5F2/erzDxRCity+CvzOuz7tekeKP3kjoiMpX5X3NXm3iCP/Xec+7bu2L/ABUub+Uzqe8cF4imdY5dm37/AN5a5ua4/fbH53J81dF4hWZf+WON392uUvF+zsfm3v8A3a2jyyOWUeUtW93tYo6ZVa0bebdH8j7f/Zq52GTdJ5KDFaNvePGqoNu3b/FWkokRkasF4jbod7K3+zUkd47Mdj/ef7y/3aoQ3Wfvj71WM7vuO3y7ay/xBzSNKxutuzyvm+Xa6slalnIkapM6K53/AHawre5RWWXeq7vlT+9WnYs8f39vzP8AIy1UdiZRN7TbjyFMexfu/wATVpWO+SPjcu59y7v4ax9PWFtvnD73yu1a9nN9onXzplfd8v8Ad+7R7QI0zTt5PM+ffkL/AA/3qvWtr50/nPwsf8Tf+y1Rs4xt2JuRa07ePZIg6IrfLTlW0kyo0zW09Zljih/5Zt8u7+KtuwaaFETep2/3X/8AQqx7P95H8j5WP5XVavxzMg8lHX5trLuavNqVvtHdRw/MblvqEy3SzB13r827ftq39qRd3nP5r/wL/drFVnXZNGy/3fl+ZqveZNMpeEL8rqqM38S15OKqHvYOjJF9Lj5Yk2bVb5tzJ/DTbiTbIkKbmf5m2qvy1VhZ23o42Ps+Vd9NW6muFD79zsu568mpyyqns048seUluZpo23w+WnmIvzbvlaqlxff6O80M+/dx9yo9QYMv8OGX7yp/FWfcaj5sapZwNt/ij3fd/wBqnTlt5DlHlkTtqj/wJhVX7zJVeSRHs9iOzbU+XdUdzsWZEh2/7W56q3kkyqzzI2FX5mX5q6Y8vN7phKPNEgmk2ugmfL/88/4apX0cM0ju7thf++d1TeT9oVXhmb5fmRv4qrXVi8dwqbGK/eZa6pc3wnHKn7pRk7TO7JtfY9WIZJlVkMzMq/Lu/vVNeWLzLv2bAv8ACyUn2eaPYibfv/3flo5eaMSffiTWMKK3yPv+fc6ru+b/AHq0bVbZvkR2iVf4W/vf71Z/lzW7s8PG75dy/dq3ayIbcJ50gl+ZtzfdpKXKXGJtaTqDr883+sVPl/4FW9p1x5OxJpm3Sf3U+bbXKW801ufkm+ZV3Ju/irSt9QS4eCZEkRP41V/vV3U5cxzSox+I7bS9XfSZUSZFZZHZfm/hrZXW9yqyXK5k/wBn+GvOYfESMwSdGYq/7rav3a0ofGEzf6HNMpVfuts+bdXo0ZHnYinGUTvl162WfZbTSEyf7P3qjbWHjjd5h8iy7dq/3a4638SJNImybDLu3Nt+VaZN4mRV/fTfLub5d33mr0acjyalM7bUNYeaPZZ367PvbVrNm8Qf6QzunyR7d9csviHTWmVH3Kjfek3f+y1BJ4ieJtqFZWm3N8v93+HdXRGpy7SOaWH5jevPEkPmTfvt25/733VrNv8AxNczKUebZ/FE2z/0KuevPEXzMjxxmWP+6/3qwtQ8SIv7nfIYv7q0qlaPQy+r8pval4qfZvR1DL8zrs+asjVPEUfktbQzKzb/AJ1j/hrmNQ8SbY9jupVW2uzVlXWseWrQo+3/AHXrmlUlzGscPy7Gtq3iCWNfJedt395U/wDHaw7jUNzMiTKq7/kXfuqpNqTyYTf8y/wtVSaZ4/nwu1nrlrSlI19lylxb658sskisW+b/AHqkjmfzN/nR/L8ybvu1kLeOkbTed8v+zUU2qOzDY/LcNtrm+L3S/hOguNSmZVm+X+79+j+0pto+dUaT+FqwZL4zL8/y7futUkdxN5i/6tv9pvvNXNKX2TpjLlNuPUnkkV5E+RV+epmuvNY7IcbV/ibdurGjvEkUQvDz/vbamt7r5UT7399t9YyjynoU6kZG3DqFyjJ8qsi/N5n/ALLWvp+qPt2bMf3JK5iNfm3puI+8vzVbsL7azb5m+b7sa/w1zSjJHZGR2djqD7t6TfKq/MqtXQaPO6wi6d/vRbdzfxVxWj6nuZUd8MrfJ8m6ug03UvO3Q/ZlO197/wANYylM2p0+Y6K1u/Lj+R2xG3ybvmatGO8fzF+T5Gfb83/oVY9ncJfND88cUknysv8Atf71X4ZPMZIXRW+f5v8AerHm5TaEZy+Iv+ZNuTftfy/vN/Fur0zw65f4Y+YV2E2Mxwe3368sfEjRI/yv/ufLXqnhlU/4Vqixn5TZTY/8er9s8C23nuY3/wCgSp/6VTPqMij+/m/7r/NHnt5H+5itn+5Ii7v7tZ8kMyzSp5KhI22r/d21qSWb3CnenCvuTb/6DULbPOh3w/Kr7Zdz1/PkqnN9o1o4X+6Y95buWNtC+xmTzfl+6y1i6hIixu6bR91nWN/vNXRX1u7M+xNyK21v9paytYVIdsH2Zl2oyp5f3v8Adp0/dPVp0/7pyGuRXMzffkVY2+Zvu/N/7NXO6sfLb7NMjJt/irq9Us90n2l3ztXYism5WrmdS86Ni820p/drvo8vxGv1X3DmL3ZIzIgVl3bflrJuo+S/3Ntb2rQpG2P4W+Xy1rKns3DSfuWAVflr2cPLljc4K2D5YnO3lmlwz7Jtzbty/wC7VWOw/wBKeR/+AVrzWe5m/c/w/dp32fy4N8m3/YbZXfGpKMeU8epg483NIi0+zSZvkTCr9/d/FXS6OszRjeioW+VmWsaxhdFX523b/wC58tdFpMflrsRN27/b/hraNTkMJYXlOi8OxoNls+4r/Ht/irudFt/tHluiMzxoq7WX7v8Au1xuhyOrIyQqhrufDLPJInd2/vfKtVKXNE2o0Yxl7vU6fQbONVLwwySvHFuVY9q11ulRzeVbzIm3+J1b+GsHQVSSNIYfLZV3bpN38Vdbodm81uj+Su5fmfa+7dWTX2melTpxL+k28zWvz229G+b5f4f92vVf2f2ez8XWSeR5paWPyvMT737z7tcFZ2MK2fnbN7q/7j59qq1dz8KbeaHxZDNYW0yXEm1U2v8Aeb+9Xn4upyYWpM6qeF+uTjQl9o/S74m+G4bT4ZS/EzwZ4psLzWtAtYWk06RvMjXb/s/xf7tfMHwX+Jut/FzxlqtnbPJJqlxPJdXEdvFt3MzfdVa+P/DP7SXxy8E/FbxVHZ63cSaRb6pI1/DNuZY/m27a+z/2Q/i54Bk1Cw+IWgww/wBtLexyrG0G1ZGr4fKM3nJSdXY8XijhChlyl7GXNJamT+1t8IfiX4VvNLstb8PXji4+ZpGT93GzL91q+cLjwref29NDqUKxxWfyRf7392v2s+K8ejeLvA3/AAmfjzSLGWBtOxArL8vnMv8AD/eavzg/aO+FvhvQ7BP7N8yaaS6knlVYvu/8Crtz/FUI0oqG8j5zhTC162KenwnhOmrbWypGkPlSyS7du37tegeFfFPhLQYY45LmMTyP/oqsn3o1/wBZI3+yteQeKtUvrWZLO2hbz1+5I275V/vV5r8VvjRqWh6bqei6Dcs1xfQfYpbpW+aOH+Lb/vV8pTjKpLlW5+jYiUMPDlR6b8fP2sPDHxO8SXOt68kieCvB8XkaXpqvsbVLrd/rmX+7uWvh74/ftWePPi14mmnub+4trCz3Jp1rHL8sa/w/LU/xI8XQ6ho6eG7CHyYN/wA/z7mZv7zV4zqWoPcTTwWyb3j/AIv71fQ5bl8YyakfJ5tjJThaMiDXvjJ4q85oZnbZ/vbv+BVF4d+N17Z3W+a5bLfK67qyNQ025jXzr22x5nzbWasi80mGVftKbVb/AGa+jp4XDOlySjY+TlWxUZX5j3bwZ8XptQmFyb/ey/eVW/8AZq9K8K/Eie/uGmS8k27l/d7q+QdNnv8ATXVra4kT/davR/APxKvLEJvmb/b3fxV5WKy5xu4HuZbm84+7UPqez/ac1v4b6lazJc3gtYdzvCr7vmavXfit+1emqaf4Pe2RY45r1ZZZPNbbuZfu7f71fFs3iiPxJqEWnxzcfe2q9bPxG8bPp+l6BpX2m4LWNw0+3zf4tv8AEteQ8JHSx9HDNpSpvnP1y/ZX+Muj61p6/wBvTbYo/mf978y/L/DXvPwv+IfgzSW/4SrVbOGSzXcqyNPtRWr8PvDf7bHifwHo722j6kyySffZvm3f3qitf+Ch3xyWzv8ARNK8W3Ahuk3RRrb7ttYxwmL2ggxObYaKP308Pfth/s1W/iR/Dd5qFvDcyT/6M0gXbGv+1JXpWkfETwP4igku/BviKzlRfmfbdb1r+Z7wz8Wvj34y1ppn8Q6ldy3Uv+rhT/vpa++v2Nf2nvGHw7htPCXjGwvoYm8tW+1W7Kzf8CrGthsdh4c9SzMMux2ExNW0vdP1Vm1r+0rUXP8Aq3ZvmXb/ABV2nwN8RSw+IvJuQqlW+RVrwvwH8RrbxhoMWq28yusy7lkjr0P4U6tdQ+JYXhdg6tudl+avBWInGpFy/mPezTDwqZdNf3T6A+OHiZvDHg6XXLi5W3tTbMtxMx+Vf7tfzQft1fFi8+M37Sni3xiniS4vIW1JrW1jaX5I1jba3lrX7ff8Fef2lY/hJ+yVcbtSEV3q0v2Ow2ffZtvzMq/7Nfz+XSw6lqCXN/c75vNZmmX5fM3N83/Aq/WsJKNajGbPx7BYb9/KZjw6bDI3/LRmb5v+BVLcaXc+Tsmh3L91mj+7Wva6OnmSp5P7tn3I0laVrpcEf7l7Zm3Ju87ZTqVPZyPoadH2hx7abMtr/o1ssifwf7NULjTX8vztnz7/APV/7VegtYpGzfuV2fwL/drJ1LTobdvMd1VvvP8A7tZKvzR1H9VjGRxmoaVM0e+ZGLr/ABL/ABVl3WmzRsyOWV/7ytXZXCosrJ9mZwz7f7v/AAKsDW7Xb5u75GX7rbN1dVOpI46lOH8xwWpaSFaV5k3N82xv4q5fVNJTaHfhv4a9F1PT4JIX2bS38bVyuuWe0SD+H+7Xo0ec8rER5ZXOA1KzmikL9fn+8tfVH/BMeEQ6T4xwxObmyPP+7NXzZrFvt+d4cbvlWvpj/gmlF5Wm+MRnObiy/wDQZq/V/B//AJL/AAnpU/8ATUz2eFVbPaX/AG9/6Sz5w/aTYx/tC+MJFlVf+Kju+G7/ALw1xm1Fb5E4b/brsv2kZU/4aD8YqzFc+JrsZP8A10NcarJu3u643ba+Lz/3c7xX/Xyf/pTPKxijLF1X/el+bLkM5HyId+3+KtW1vnX5ERWb+81YNrNJyiOrD/ZarsN0I8fw7q8s5ZRhE6WzvH2r8isW/wDHa1NPuJI5C8x3fwp89c1b3m1F8l/4N3zPWnp906/O83y/edWoM6lO51lneH78gX5l2+W3/oVXYbjzId6PJ833/n/9Brmre+jV1m87f/DV23vk3BE+X/e/hq+b+U4qkToo7qFVDmFkOzanzUkl87R8Iylk/wBW1ZC6hu/10i/u/wC7SRapDlPJf7qbZfn3bq0+Iw9maUkkednmbdqbd2/5agkvXVkf+Gqbao8jFEMaj/nntqvJqiOrOm1v4du+p5v5TSnEtXl1DIu/5vubU/hrImussu9Pm/2v4qbc3ifxt95qz5b7zJndnz8nzUe/8RrH3iLVrqHa2+bdJ97/AIFWBfSPJIzuGfd9yr99cPMu8uvy/wALfxVlyS7mPzMP/ZaylI1jyH0ZHHIpVJhnd/dp0dv9sm+SFYgqMzeZ95mqfy/LfyX++su7dv8AvL/DVn7Okcab+m5vlr4ipGXU/X8LjJR90zY4X2/O/wA6tt2r/F/u1DqFqkkbHfuZn2s0n3q2ZrF45IoXhVVjbcirVWbT0kkKJH838C1zxO/23LtI52+t5priUfdVU/76qjdWs3mKjpjcu75XreurOaR22W33v4l/u1i3kcbSedt+VflSuyMTixFaW5j3lq8ay7NvzJtfctZ8kbrI0MyrtrauoxGjb4eP7rVSnt3WTf5Kq+z7391a9Wjzch8lmFT2kiPTZNrL87bW/wBitjTlmmKR/d/vNsqnZ26Ltd+Qu1vm/hrWs49q/fbLfxVtKXszzI+9ISOF5mX7NM29X27q0o98J3vBIh/jWT/2WmR2/k/cSPCvt/2matLT7GGNWffu3fM7M3zVzyqFxjEreT5jCTyWLfN+72bamWzuVXZAjRJ975au2tq6xssbyKV+dV2bvmqS1hh+0M8/mAyOqqv+1S5uxfLzGdeRQtMjvHubft2/xbqhu4fJX54Wbb/Fs+Zq9I+F3wgX4kpfz3GutZm0mQKy2+9n3bv9oY6frXVz/so2lzEUm8dTlyc+Z9iGf/Q6+9yXwu434gy6nmGAwynRnflfPTV7ScXpKSa1T3R6mGyfMcTRVSnC8X5r/M8KNvBGqzP8vy7v9r/danRqkkqujx71+V1Va9wn/ZLs51Cv45lGO408f/F0xP2RLCMEJ45lGev/ABLx/wDF16i8FPEe93g1/wCDaX/yZ0RyHNV/y7/Ff5njCzOrfuduN3z/AN6tS1aZpPkf/tm3y16on7IumqF3eNpSydHFgAf/AEOr1p+zDbWgIXxrK5PQyWQOP/H60Xgt4jLbBr/wZS/+TD+wM0lvD8V/medaau6MJDuD7/nXZ/DXTaUz7kjS23Mz7flTbXT237OFrby+YfF8rgfdVrQcf+PVq23wXtrZdq+I5znhmEeCR6ferop+DXiItZYRf+DKX/yZl/q/m62p/jH/ADOftVS3l86Z1RP4Fb+L+GtnS7iazmaG5T915S7vL/hb+Fa1bf4YWkUgabUzKigBUaDgY/GrEfgFIp3mXVXw5yR5XP55r0KfhDx/HfCL/wAGUv8A5M4a3DGdTndUv/Jo/wCZd0CRJJNt5M2+F9jR7f8AZ+XbXYaDdPHshSaFW2fxfe/4DXJafoLWEm437SJnPllcDP51p2MjWYJYl3LZ3ZxiupeFHHyVvqi/8GU//kzgnwjnz2pf+TR/zPUPDd150my53YkfZ9/5l3V3mi6slqyQv/yzTam35WZl/vV4bpvje604Nts1YtjcQ+3OPwrdsvjbd2YBHh6JyDk75yc/+O0v+IT8ff8AQKv/AAZT/wDkyY8HZ9H/AJdf+TR/zPfdJuHVU4Xay7nZm3KtX9Nvt1usyIxPlbpV3fKrbq8EtP2jLyzgWGHwnCNvpdsAfw21ZtP2nb604TwdAQeubs5P47azl4Tcfv8A5hF/4Mp//JmkeEs9X/Lr/wAmj/me03rLJthhm3MvzPu+bdWLqG+OFvs20fN92OvS/wBmX9kb9r79qHwRa/ETRfB2ieHdA1KJpNO1XxBrTo14oZl3RxRRPIFypwzhQwwVJBBrlv2vP2af2n/2StD/AOEr+Ifw+0u/8PSXSWw8Q6DrLTQpK4Yqro8aSx52kbmTbkgbskCvk6fDOZ184eVwlSeIvy8ntqV+bbl+Ozknpyp3vpY4aeTYyWL9hePPtbnjv2338tzzzVGRZJd+4fJt+X5dtcR4qmRrP/Q0U7fl2t8275azLr40XVzE6f2GFZjkP9rJI/8AHab4Rl8YfFbxZpvw88FeEX1LWNZvY7TTbKKQbppXOFXnAA9WJAABJIAJr3q/hLx9SpupPCpRSu26tJJJbttz0SPZfC+cQi26aSX96P8AmcT4pkH2VoXfYNvyLs+7XlnjiaHy5cvs/wBrZX6WaP8A8EIP2u/Feix6r4h8deCdDubiP95pdze3Ezwj+6zQwshP+6zD3NfK/wC2P/wTO+PH7KtzZ6V8Y7eCGw1OSVNM1vSZFuLW7ZApZQch0YBgdsiqTyQCASPlsp4dzHOsx+pYGVKpV1tFVqV3bV8t5+9om/dvprscWHy6ri63sqbjKXZSjr6a6/I+I/FXnTzNsdgy/wDLRk+Zq4PXrOaSSWf7T95/vbfmX/Zr7z/Zo/4JCfHX9tnVb1PhJqCfYNNkSLU9b1YLDbWjSBiozuLyNhSSsasRkEgAjPefH7/g2U/at+FPhG68cad8RNI8U2dhayXOoReHlb7RBFGpZmEU/lmXAH3Y9zHoFNXjOFc2yzNVluKlShWdvddakmm9k/ftFu6sm03dW3Ir5ViKNb2FRxU+znH8ddPmfkv4gW1+yPsLNtf+GuM1C3jdnf5v9mvqn42fsd2vgT4d6l45h8evdvpypJ9mk00IJN0ioRuEhx97PQ9K+aNWt1Enku+F/urTzrhzOeFcXHCZlT5JyjzJc0ZaNtXvFtbp+Z5uaZbi8uqqniI2bV909NujZzY3qzJ90U+3abc53s3yf36ffRoku9EZf7tQLNMrK7wq7L/D/erzeb+Y8o0Ybr5WfZ91Ktwyfu/O3s/+7WXDO7ZjRFz975f4a0LV42j+RG3M/wB3+7WUo8poadlImVff/B95a19NLtIm9MfJWTYxwrHsR8j+9/drZsVfcqTOqq1R7TlLjT5zWsfOkKee6o6/drbsGhl+/wAHZ8jKtYtrD5hX7y/PuVvvfLWxp9p+7V49yqv+t3JXPUrGsaPLI17FX++EX/gVbumrcyR702qGTc/+zWdpSPtT59iM3yfL96tK1heRTCm4+Z/FXLUxUfhZ20cLItx2aRqs4mxF/wChNWjZr9l3JNCroy7n/vU6xsfMWONwrL/D81WWRIZsQosu1/7lebiMVGMbHsUcDy2Yy2j+ysnkptLPtXy/mqzHM/l73ePar7kkV6HjdsfI2/8Ah3fxf7tSx2ci/uXRtn8S/ery6laVSMYnr0aPL8IKzw2/32fc25G2/wAP/wATUUN1JJ8nkrs/jaP7tT3UDyRlIdq/Nt2qnzLTlsUhhZLaZd8n3fk+9XO46ndGnymRJMkkv2aa22K25Ytv96qrXjtIh6J8ys2yrsmnvCpd7mb/AK5t/DUE1nebfsxf733ZN+3dXauQ55U+WXu7lNVdZV2Pn5Nvy/8AoVSyRpqDFHRg+/8Ah/8AHqsrp+6Rsortt/e7aks7F5I1S2Rl27d235mat40+YzlGdPczZLGHc8RhZG2KqMq7W/4C1Tw6a8cjb0Vvl3bm+9/u1pNY/wCnEXMLB9n72P8Au/3dtTLpqSSb97Lu+626uj2JyuMfekc7eWu23cSblZk3fL/CtRrpaXDQv+73t/wGtqazS4uFTZt2vt3L/wCzUl1Z7pEdH2BW+Zl+61bRp8sfdOepzSkZsenvFIkMztkfM0kablX/AGaLrTdrb5ZWLr8qL/C1bCw7m/czbh/BItM1TS3WT7Y+35ovkbf/AKv/AIDWEo25Wa049DH8mEBXmf59m5NtWWvHtFSZ3+7tVtqf+g1DfF0ZYU24Vdrs393/AGaz7q4eZUmRGWJV27VraMvshVpx3iasmoIyvGkzIVbd81RLrkLbpk+/I/8AF975ayby6hmX/RnZjGnz1XuL54Y/kRlRv4q9ChL3PePJrUftG/JrUn+u8759vzRqtV217C7HuZP93fXOS6g6sPJf5mf+Kof7QkVmd34X5n+Wu+nPm3OSWF9pqdVHrB8xn+b5du9Wb5adea1tt+rfM33Y22/N/wDE1zUd9cou9JlG6ludSn8nY7/N/Ay1ftIxOmjl85QLuoah5ibE3Yb+Jf71Y+o6lNDGNn3mfa7b9tR3GoPIqfPg1l3DPJuM24/73/oVZe25h/2a6fvcoy81CZWXD4Zn+b5KoTXzzMrojMn3UarjR/aJNjuu7ZVZrErCv3vl3VhUrGjy2XxcpT+0TQ7/AN58zfc3J92hrhGbf/s/8s3qdbPbvd0Ulv7zVHJYPHDvRP8AZ+X7v/Aq5ZVv5jnqYKZXWT5flDbW/hX/ANmpqqjSf3fl2/71Sx27xyI7/KrJteSnN8q/Inzf7lL2nN8Jwyw/LLUhjV45G+66s/3aPMfzG3809ofMK/3dvystRv8AMqRvbbGb+Kl8XMY8vNIkW+eN9m3lv4qtwX0MaCEPt/8AZqztr2snnJMzbqfBNbNMjuF+X5V3fw1lUj7mptTlyzNi1ukZtiI1XtLeKOZnR9qN83ypWRb3HlyK7vub/wBCq5bzO27ft2r/AHa5PflHU9KjU5Tp9NndZt8cyqsjfOtdDZ3Dqv2lNqr93bu21xlnJ5cKzJN8rff3VvWd07Yh+VkjTd83zbqiXw3O6nI7LS7x/PR3+VfvbY62rPyVmCJ0+98yfdrlfDd+P40Zz935Vrp9Pjdo3S68zezLs+X7y152IlLmPQwsYyiX/sM0LDemwTfPFtf71ereHyj/AA7UxggGylxnt96vMrODbcLNM+Sq7drfdWvTvDqqfh+qKMgWkoGT7sK/bPAWTln+ZX/6BKn/AKVTPqMogoTkl2OHWOGORkR5NrbVdW/i+WiS1hGXh2t935f9qrMdvDIqfaXZFVvk2/N81L9l+0R7JkZh83zKjfw1/P0Y+/7p6uDomFfaW8TfOkgDbvljbau6snWo9sZebc+35tv3Wausu4d0zohYWy/KjMv3mrndQhSSN03sWk/vPXXE9Cnh4cxxOrKPnf5lhVNz7V3bf9mud1K12xb32s+3btX7u2uy8Q6PcrHs8tWVvm2q3zVz+paXuj85EZCyb9v92uqnL3YnoQw8eSRx11GkeYd6yOvzN/s1nTWsPlrsds79zru3V0mpW8LRv88YMi7vu/M1Zq26LHv+5tTb9yvSo82nY5a2HhIwJ7PdcbymPOf+GoZLVzJsTdhv4WX5a15LdI3Hk8/xf7VRzfOq7Imc/d2/3a9SjKK+yfP4qjCJSt4Z41+Xbtb+Fq1rH942x4VXb/Ev3apyRpHKZEm3H7q/3qksRtmZ0+9/eX71b/YPGqSinqdX4f2Dakb7v9pq7fQbySRkhuZlii+6rLFXA6Ldx+cYdjI7fLuX7y11uh3zqzJNMz7fl27vvVfLzR94iPaJ6JoN5ZxzvZwOr+Y23ds+aus8P3zNa/Zt671T5IWbbu/2q850fVHh2JGnzr8zbV/8drq9DunY/aXnzufd5e7burm9pynZRqcseU9J0m9eORJpkjQxxbdv3t1epfBfVLex8QLqVz8629rI6bU/2fl2/wC1Xiei6puaO5mmVP4XWvR/hrNc3WoXFhbIsrSRM0TR/wC7XjZ1KUsumo9j18qlzZjBnpXwl+Hfw31b4EXmq+OZlsbnxt4oZX1DULhVk8uNvvL/AHal+Aek/DfwT+1hb/D34deOYde0iNo23W7bo45N33a+bv28viA/hvwT4O8AaHrHlSQ6W1xKsO5fL8xvm+b+9Xt3/BBL9mG7+JXxmvPiLrM8kum6VbLcXjSP93b8y/8AfTV+aZdTxPsddD0uK5UJ81Rn64ftXyxWfwdsdRkhkhghs40SOP8A5Zttr8+/if4sfxZfXOpX7/JH8sUjNtVm2/xf7NfcP7X/AMZbKbwzD4Vhs4/skCHKyL95tvy1+YHxY8YXt14uuf32yHzWVo1Taq114/ERryjGEj53hbAzwuGlUqx5eY6K/wBF0HWLXffw27xxxfPJH8kjN/vfxV8sftHfD/TbWyuU8H3KyXk10yy/aLLb8v8AstX0T8PfEmja1cR2GqpJFa27Mtx5L/NJ/wB9V1Xir4A6b8RtJa80DSo7S3hRmWaZt3mVrl9SDnaZvnEeX3on5CfEiDxDp1vMlzDJFIvyvuSvKrrUPEmk28mxJAsn3pNtfpH4s/Zf0S88SXdt4i+zv5L/ACSSfd+Wvn348fB+z09Z30TQVeL/AJ57PurX2eX4rD/DKJ8DjMLiqseaB8k2uraxq1x5L/O/+1W1rHhXWNJtEuSi/c+7W+3hHQdJ1FLy2hmQszfu2ib5ai8Ua9NeW/2BEXasW3dtr069ZSlGEIniQwteL99nENqn2hfJ8xVK/frT8LyTXVx5KfKfu7qpWHh172++T5gybvlWvVPhT8Jb+6mS8e2ba33NtZ1pUqcC6EatSqd5+zb8Kbnxl4ytdBezkVrh9kU2zcq/7Vev/txfsK+PP2c/h3Z/F3xPpUlvol1PHBFeXDL+8kb7qr/FXa/sj+CX8L+MrDUtSso8Ky/M3ys1fZn/AAW2+DOuftE/8E1vCnirwlF5114T8QQ3l1tk+Zo/L8tm2/7NfIVakp5jGD92LPuJYPlyj2kdT8Qta1zTdPhH2l1X+5urc+GvxM+G+i6hDdaxp8dyyv8Ad3bdy/3q5Lxt8H/G0OrbNS0e4ETfKjSVe+F/7PviHxB4kisH02RRI3zs33a+meAwvsOac+U+XnjatOrFwpcx+lf7E3if9k74svC/gnV7HS9ajfb9jvkVWk/2q+7rL4d+CfHHhX/hG/Emj2895a2+yK6WBVZdtfkX8O/+Ce/xqXULbxJ8GXkhuYXWWJVb5m/y1fol+yjrH7S2m6vZeBvjT4Sm0u8jVVe6jf5bhf4vvfxV8bmmHrUo89KfNE+wy+eGxtK1aHJM9v8Agj4f13wbb3Gj/aZGs/tG1Gkb7te6fB3Xrb/hLLbfGzr9o2su1qw7XwTZw6YNQMLBJnVm3JuZmrT+C3jXSvCfxPx4gt1fT7G3mup7m42r5axqzbq+WhRp1sXCMv5kepXj7PKp/wCE/OL/AILjfteaP8dvjpZ/BPwL4hkuLHwDPJFetDuVvtkn+s/3tvyrXxdp9vDNdeckO/bxLuTd81df8ZtQTxt8cvGHieF28rUPFF9cQSSJ80kckjMvzf7tULfS0ZQ8j4Xf86r/ABV+y0qMaVKMI9D4PBYfmp8xGunpHhHh3jZuSNX+VWq9HYouUTzP91nq1Z2fkybPszOWep44Xabem4I33o9v3awre97p7dOjyx92Jl3Fq726u8K7Nvz/AMVZV5pu5XSYLt+8jKu1q6eaF45Am/5Pu7W/irG1a3SZmd0YN/zz3fdrKj/LIyqUeaGnxHHahZhWabewf7y1ja/HIsy/O23au9ttdTqVrtZ3Tps3O38KrXP6p532hneZnVU2pHtrspx960jyKkfd0OR1a1hk83ZwzNu+WuW1yx3MX/ib5fmru9QsvmkHyotc/qOmja2/j+Fa9GnHlZ5Nan3PP9WsbaOPZtr6O/4J02sdtZeMDEhUNc2Xy9uFm6V4brWk5UnY2N/8X8Ve+f8ABPq2a2sfFYb+K4sz/wCOzV+s+EGvH+F9Kn/puZ6fC6tn9J/4v/SWfLP7TRkX9oHxkEGc+Ibr+H/poa4Rrry/k/ir0X9qC2mT4++LZht2nX7k/wDkQ15xIuJN7pk79tfF59H/AIXMVzf8/J/+lM8fGSi8XV/xS/Nj45ts2+FF2t/Dsq1DeddzrhaobXjX5PmX+9up0M/zbHh+X+GvG5ehz/FL3jbsbxPlLx7l37srV9dSDK3kurN/B8n8Nc5BN5ce+F+W/hWrEdw6yMiblSiXxcxEpcx09nqTzYhhRd33d1Wf7eeNVSba235a5WG8mjP2aN9p+8+2ntfbRlNpP95qfNy/CYVNzsIdaTydjurFv7tDapthVIdvzfNXJLqgaPyZoP8Avlqmh1J2k+WfAZfu1rKRhyo6dtWTeuHwrffpn25IZG8jbtk/vPWJHqXnSLC6KzL83mVMrPIymbbt3/w1EpFRj73ul+ab94+zafn+bbVdmfyf3m1X+9/stTlL58t0wP4d1P8AJmMj7Id/8O5kqPaGsYlCZdq732qf/QagmtIpMfxbv7ta32Hy1Xjcyt95f4qa1n0TKg/x/L/DWPOaRjy/EfR0djbR3BedN+35fl/iqa4tfMhHkw427tm5fvVYs0hZfkhY/wAKfxNV1bdBahIX2/xOrf8AstfGVJe/7x+k0zG+zpDGk6Bkbf8ANteo5Le2jZ/nYpv3bpPvLWpcWPzbN7Kny7d38VVLuPaqI7r95vlb+Ks5RudEcRymDqASV3869ZPn2/L8y1i3sMZ/49oW+Vf4q6a6s0XY/kyK7fLu27lrLvLV1md3XZ8/8XzV1U/e0MKlTmj7xzF5azec2yHe2z7u6q0lpc28iu6LtX79bdxEjXDud37uX+FflqL7G8m9N7Hd/EtepRqS9lZHgYiPN8Rm2du/2j9yiy/N95q2rG3ufM2Jux/dVKdpeioyvOnlp/e/ire0fS03LJsVQ392nUrHPRo9ypZ2O21Fy9s3zPt2tV6x02GRWx/f+833avx2aSK0MO6VY/mRm+7WhY6TMu534TZuVW/irnlU9nA3p0485mNCkTI8KeU33fvblqxbR3LSfaUTMq/8tK1rjSTI3kum3+8q1J/Zu1fsyI22P7v+9WMqnNE3jT989g/YH+E+qfGT4n2nwi0LU7e0vfEmuWVhBdXgcxxPIzKGfYC2BnsPy61+l0//AAQ68IeBZJV+Nf7amgaAt1fvFoLPpscRvIlxhmE9ymJPmGY0LhePnOePhH/gkfJNYfto+BRbStFIPHuko7RsRkNMVYcdiCQfUGvpL/gs/qOp3v7eviG1vr2aWGz0jTIrKOVyVhjNqjlUB6Au7tgd2J71/VXh/i+KcxyvKMly3G/VacsPWqykqcJybjiZRsudabr5X0vZn0GGnj6lShhMPV9nFwlJvlTek7dfU4z9tf8A4J+fGP8AYo1y2k8VSQ634b1KRl0vxPplvIIGYE4hnDDEExUbgm5gRnazbW29D+xX/wAEyfil+1noFx8UPEHiW28FeBbNn83xFq1sxa6VFYu9uhKLJGhXa8jOqqcgFirKPftN1HUfGf8AwQi1C5+M9/MRpmoCLwfcXczB5I4r6NLdFJUlgCZogORsTG5QMr7h8Q/iH+xz8Iv+Cdnwv0r44eCde8RfD3VtH02GCDRJJZEa4Ft5w+0OksBbLiRsEAF0zsBUY9jMfETi7D5Osuprnxn1qphXVp01LmVNKTnCnKSh7Rp25G+VNS8iK+dZlDDewir1faOnzRSd+VXuk3bma6XtufJ3xy/4I5614d+Fd78X/wBmT496L8S9O0iCWXVrWySOObEYDOIGilljlZUJYxlkbA+XeWC15b+xJ/wTy8d/tweHfF2seCPiBo+jz+GUt0gtdUhlYXc8xYqrOgPlJtjkO8BzuCjZg7h9ifs8/t3/APBO/wCDlr4kP7I37MfxHlvZtJa71fTdI0qa5jlhgDESTBrqVYo13ENKV+VWPXoeW/4I5+Ph4X+Bv7QvxL0HShBc6bAmq20AdRGuy2vpY4wAgAwVIyBjBGFGOZnxh4iZdwnmNSupqrSnQVGpWp04TkqlSMZRnTi5QstlJWunffZPMs7oZdXlNNSi4cspRim+ZpNNJtfM8z+Pv/BKX4Vfs9/CTVvEXjL9t3wvF4w0jTFuJfC01oqmeYgEQRhZmnO4H5W8nnglVXJXS+HH/BGGS28A6b46/ah/ad8O/DxtYto5bHTJ40eSMugfy5XnlhUSqDhkTeAR9418UX/ibxDqviObxhqWt3U+q3F615PqMsxMz3BfeZS/UsW+bPXPNfof4k/bV/Y1/aV8G+GfCP8AwUr/AGffFHh7xXp+jxPYa59iuY0uYJo0P2yIxlJRHKyFwpSRAOVdsmvf4jh4k5Dg8PTpY2piOeUnVnSoUXVglFcqpUnZSjzX5m3KSVvn2Y5Z5g6cIxqud2+ZxhHmWisox0ur77s8I/bI/wCCXvjP9mL4ap8dfBHxS0jxz4Ie4iifVNOjMc0IlJVJGVWeNoi21N6yE7nUbQOapfsVf8Eyfij+1noFx8UPEPia28FeBbNn83xFq1qzNdKisXe3RiivGhXa8jOqqcgFirKPbP2qf2fU8HfsG33j39hL9pnXtd+Dl1qqz+IfCNyFk2EyGOSVZvLSZI1k8rfbOuDnzSTgVo/8FGdR1Hwr/wAEwPgj4Y+FN/N/wh+o2liusTW0zMs8gsxLGkjbRkGXznIO354x8uR8vnYPjPibHZVhcvw+Mi8RiMTOj7aVLknSjCHO1UotKKr9FHWDutbmNLNMfWw9OjCqnOc3HmcbOKSu+aL05/LY4D43/wDBG/xDofwxuvit+y58cdL+KNnpiSHUdO0u2X7U5TaStv5EkyzuFbcYyVbA+UOWC14l+xF+xZ4m/bY+JGr/AA60DxtY6BLpGhS6hLPf2skpcqyxpGFXGAZHQMxOVUkhXI2n2X/ghp4i8f2H7Xt14c8OT3DaJqHhm5fxFArHygsZUwysMEbhKwVScHEjgHkg+0f8Eyrfwvo3/BTn47aF4DuY5dGWHUTaup3/AHdTi4VyoO0FnGBwcDlsBqvN+K+LuFsHm+XV8Sq9bDUYVqVbkjFpTnyuM4pOHMt46arV+TxOY5ll9LE0JVOeUIqUZWS3drNbX7dzitH/AOCH/hnR7XT/AAz8X/2zPDmheMdTT/RdBtLNJVkZmKoIvOnhlmyRjIjXnIGcZPx/+0/+zZ4+/ZP+MF/8GviLNZz3tnDFPDe6dIzQXUEi7kkQsqsO6kEAhlYcjBMHiv4k+OfGX7R1x8TfE/iW6vddn8WLdPqNxJucSLcDZjPAVQqhVHyqqhQAABX1l/wX0WJf2lfB5SCJWbwOpeRYwHb/AEy4ABbGSBjgHgZOOpr6DKcZxhkvFmCwGa41YmGLpVZNKnGCpzp8kvccVeUbS5fe10vozsw1TM8LmNKjiKvtFUjJ/ClyuNnpbda21PhKus+A/gCb4q/Gvwl8NobI3H9ueIrOykhG75o5JlV87SCAFLEkEYAPIrk69C/ZL8Y23w//AGn/AIfeM7y1E0OneMNPlljO77ouEBI2kHIByPcdD0r9JzapiKWVV50PjUJOP+JRdvxPdxLnHDzcN7O3rbQ+sv8Agtb8f/GOgfF/Qv2Y/h7rVzoXhXwz4btpX0nSZGtoZJpM+WpVCAyRxJEEXGFy2OvHSf8ABJr4i+Jf2nPgL8Vf2P8A4sX0viPThoAn0KLWJHn+ziRXjMYZjlVSVYJEAIKNuZcHkeW/8FwfAmp+Gv2zv+EunsnW08R+GrOe2uNp2yPEGgdQTxlfLQkDoGU45ye4/wCCFmh3XhnU/ip8eL6wkOnaH4XS28/Y2JH3NcOi9iQsCkjkjcvTPP4FjMHldHwGw+JoRXtIwpVIySXN7d1I3ae/M5txbve10fHVaWHjwhCpBLmSjJPrz8y6976HwNf2N3pl9Npt/bvFPbytFNFIpDI6kgqQehBBFe7f8Ey/i34E+Cn7aPg/xr8R5YoNLaaexe/nKBLKS4heFJmLA7VDOAzArtUkk4BB8O1vUv7Y1m71f7OsP2q6km8pCSqbmLbRkk4Gcckn3r68/wCCJnwY8D/FX9qu98QeONFi1FPCfh9tS021urRZYBdmaKOOVt3AZNzMnB+YBgQUFfrvHeLweD4Ix9XHRbp+xkpKLs/eXLZPWzu99Ut9T6TN6lKllNaVVacrTt5q36nuX7Zn/BLT9sb46ftG618U/h78cNMutE1q8WWxi1fXLqCTTYtoxCESN18tDkLsOSOSMk5yf+Cq+saf8EP2HPhx+yN8Q/Hv/CWePIri3vLnUpZBJLHDCkqtMS4LhC0nkxk7WdY2JJ2sp+f/ANpb/gqD+2J41+OGr6r4X+K2s+E9L0zWJotH8P6W4gS2ijkKqs4A/fv8uW8zcNxYABcKPoX4teLf+G8v+CRt7+0F8Y/DMP8Awmvga9aK28QW+moJLpo54UkddoXZFLHKBIq4QSRFgvyKo/FqeU8X5HWyCvxFKlPC06tOEY0kozhOcXGnzvlXNFac6g0m+ktz5aOGzLCSwc8a4unGUUlFWabVo301Xe34lP8AaG8Y+Kf2MP8Agkz8M/h58MzL4b1zx6YrjXL/AE7fBcsssRuZiZAQyyMDBGTnPlqUGFAA8Z/4JJ/tO/EzwD+174e+H954x1K70DxfLJp2paZdXcksRlaMtDMqsSFkEiIN4GdrMOhr1P8A4KM2t18Vv+CZfwG+M2j6Qy22j2ltZXqxK5FuHtFhyck4XzLYLls8svPPPzl/wTD8B6l4/wD25/h9ZWFk8qabrH9qXbqpIijtkaXexHQblReeMsBznB97I8FleN8Mc4rY6EXUnPFyqtpNqcZTtq7u8UouOumljswlLD1chxMqyXM3UcvVN2+7Sx5l/wAFvvhPZfBr4qfF3wZonh6HTdOkvIb7TLO2hKRLBcPDMBGvQKC7AAfKNpAAAwPyc1SHZJ5P3jtZv92v2I/4L7+L7fxv+0H8WZbW2EK6eLHTi43ZkaBLdGY5P94EcYGAPcn8idWs3+d3hYqv8X3d1fkniPiMVVWTzxH8SWCouV97vmvfzfU+T4ldWccLKW7pRv8AichdW7qrP8zCqDWrwr8nylq3Ly1ePbsThqo3Nk24Mj/x18BGpynyHLzGfD5nmM/zYX7+2tO3bC70+Yf7P3qhjhS3kZ/J43Vo2sbybfJ/h/vVEpdzWnT5i3p8aeYqOGbd95dlbdnbvN8k0P7vf8+6s+xj43+R977+1/u1taWs21Y4fm2/+PVyyqR5fdOynRiaFnbv5Zm8lnZfm2763rGPzoD/AKM2xlVmjaszT4UVW2fM29WX5q3tJimaREd8fPtdWavPrVj06eHizY0exdVSHy1Xb9z5vvVsafb7m854WQx/Kn93/gNM0XT0jkRJnYln+Rmeui02z32I3cbn+X+Jlb/eryfrXNLlPZo4X3SPT7VFU/udki/5+Wr66em3Dw/My1YsbFIUe5+ba3y7m+XdV+OxSTc+xldVrgqVuWR6VHD+03MtbPbh34T+L56sJbzKzJ5Lf9dP4WrRktbOOON33L8nz7aj+zzRwo6OyfOy+W3/AKFWMZfDJHVGjy+6Vvsszfvw+xtq7WakXT7byw7/AMTfIy/e3VfWGSSG4/fLsk2/8BqW3s5o4/Jhh+Zvl+V/vVtyzluXGnGPxGNcaXZ3UhdNuxfk/wCBVBL4fh2h7lGLr95W/u/w10v9myKyWy/embdu/wCef+zTZ9Hf7QZptpXZsT/er0aNP3QlGHL7pzVvpaW8nyBi/wAyurfw1ZtdMRYfOSGbK/cjX7zf7Vbk3hmGNVcPt3ff2vuqxBpFtbru82R/Oi2szfer0adGPLoccv5TmJdNfKx/bG27fvTfM1VvsDrtmd/njbb/ABfe/vV2N/p9s80Lw/M2z+Fqp3mlwxx7HRt6t/E1dPs+U4ZU/iuc79khaMo6MvmfM0i/3qh+xJZw+ZCm9Gba3+zW9JY2fmK8MyuG+5uf7tULixdG+S552fNu/hp+z15Tn5ZSkY8apJ/qX3OrfdVf4aLqD9233vv/ADqzfL/s1d8vyZAjvj+H/Zpt3apcKrwuoZvvVlKPKZ8pzerWvlt5zp87feVqwLyaTzR/cVN23Z96uk1SGa3Xf5zFt+1ZG/hrmdUV9xMz87vurXD8J1L3oEEl9MzNB5yoW/26zr7UGVim/wCT+9uq55L7Qj220/w1n3lmdzJM6/f+7W9GW5nWw8qnwkMc3nKju+9Y2/ufepjXTr/rtzFvuL/eqw1nNbqqPbMqyf3n21XmtU3B0h2/7NdUa3N7sTqwuW82g5r55d/z/Pt27qZJNNtVJudv8W6kkXyzvdF2L91dlQv5fzyQpt/i2s33aqVafLofQYfKfcigEyecr7/9ylb98u93kZm/ib7tV13tG3nO29vuMqfLtq9Y277Tc/Nt+61ZSrRjG5p/ZREtm8bbPlG7+89Syae7J86YH+z/ABVct7fzmRNkmf8AZTduq7b6bNMrzvuxGn/fVc0sRp8Q/wCyeWJhyaCkipvfYzfcb+7TJtNjjVvLfjf87f3q66z017pf9Tt+RW+b5qq3mjoJFhhhVW37fuVyfWoy0ZwYvK/d0OVm0hFXG/afN+df9moJLH946P8ANt+VK6W4sUhm2fK3yt/urUP9nJuV5HXDfLWlPERj9o+VxmFlGRzFxZvHs2Q8fdfa21dtM+ywrGqFGb/Z31vSWMKzcpsXZ/y0+7ULaei/OkzFt/8AF91a3lW9w8ephzCm09933Nyt/DvqNbPdcLsT+P8AhrZvLHdI2/gf3qguoXVQibU/iqeaXui+r/zFOOF/vwu3y1djjmkkZ/MVGZ9v+ztqGGHH3/m2v97dWlbx7ZEh2K3ybk/2qr4YGlGM4ljT1aNkTZ97761t6azqph8njeu3/aWsqG12yLsTaWf71benwv5a4hVd3yo33a5vhO+j7ux0Oko/8c0hTf8A6pfl2112n2sO3zo9yvu2vufcu5a5fR03CF4YV2b9svmf3a63T45o1CK+8bvmZvvMzVxVI80z1cLU5Ym3Z2G79z5efutukr0fQURvA6orBla1k57HO6uB0iHy7dZXRW/5616FoMUY8JJDEgKeQ4VVHbLcV+2eA0OTPsy/7BKn/pVM+lyio5VJX7P9DlFtYZdlsjsZNy/u1/harUtrcx/uYfm2uzPtermnx3MM3k3SK39xdvzLU0MPmW/mb921/u/d/wCA1+DU6M+Y3o1pRlfmMDCMqJDbSTIzfeV6wdcs/Ojd3hj/ALvl/wAVdlqEbi3e1RFb/ZVNtYOoWMLQoX+Uqm3/AHq6PZ8p7mHrfaOI1q3fzGuXtlES/L97+Jqwr2zf54fmfd8vmL91a6zWo0jiWF3VQ277v+98tc/q0iRqzvul3Ov8XzV2UqMuU9aOIiclqVrc2qsiQKQzbfuVk6hD98OmHX7y1011++mf5Nv+zu3ViagttbjyfIY7vl+auyEJR6CqVqUTAuF3Sb03CRl2pVFo3t/4Gcs21/71al9a+Wyqi7D/AHaosqBTcv8AIy/fVf4q76cbHyWYVOb4SKRPs+7em7d/Ev3qLWP7Pdb0Rm/2lpxkfyxvTa7fw0kW6ZneE4X+L/arsjHmifN1q3Ka+lx+ZcBPO2qz/Mzferp9HkfzfOR/Kdfvsv8AFXK2FqHHKbXZflVfvN/tV0elwvHjL7H+6zK3zLUVOaRH1jlOu0u827fJT+LczMtdLpV88dwr3O3/AGGWuN09XjxsdmH93ftZq39Jme4VURF+XaF+f5t1cVSXunXHFR+yd5oeobbgJc/JKvyxMvzK1etfs/68mn+JvOmm+aO1kVI2+7N+7bateG6LdbtUTem54flWTf8A99V6R8I75G8WQwncr3D7P7rV5mYU51sHJHfluK9liozJvil+z/4q/aE0HSn8Kwx3OoWaNbfZY38z5d25Vr7K/wCCPfhbxf8ABT4b+K9N1jQ9Qsrp7uGGeGb5fl3fw/7K1t/8E8vgq/wZ+JjeNvFOgSXelWSyXpdf3mNq7q+gvhh8eP2ZvjNf6v8A8Kp0a5l1+/uZGubSCJlKFW+Zn/2a+FqqnGhyP4j2MZWqVMU3yc0Opyv7TmuTLvR5pHMMS72X7vzV8RePrHTZNeu33ybppfNlaT5lVf8AZr6n/aG1z7VNeW015JB5bMqKrfK22vnpfDqatp5mvIcmR/8AXfd2t/u15MY+yZ6EfdgeWaPq1h4f1R/sd5u/jlVvu7a9L8G/H+wtbaW1v9YmSwVdu2SX+L/Z/u1538WPhzreizM7wSYb51WNN+7dXz58T5vHmk+IrR7aNgnm7v7q/Kv8S16FHDupLmiefiq1CPu1T608XR+GPE1r9s8MeGLi5E0TM9006+Xt/wCBV8+/Gi38Z6Mvnf8ACt45rf7rSQorMse35d1eY/8AC5PivJJ/yEpv3O7fJHLtX/d21et/jl8YLjT/ACZpFltmfbtuk+WRa9WFPFU5RbR48vqM+ZRZ5d8QfiB4J1Czlhm8HtFPDuX5vlryHVrVNcvPs+j6a23eu6OP5q9u8beGbDxhqj3mq6VHEq/xW/8AFUOh+E/Dekx+Ra2y7YW3MzfeZq9ehW9h70nqeFisJKvP3YnK/C74B3OpXiXOqwthm+f+FdtfSfh34Z2FrpsSabCq+Snzxqv3q5bR9SsIY1sLZIUaP7qq/wAzV7L8IY7PXLT7HeXLJcbP3TRrt3VrWxyrQDD5b7HVfEdd8MNP0qz8Jprd4lmj2rR79z/vG3N/yzr9A/2ePCuifGj9mnVPAvifzXsLu1aN4WXcsn93/wAer4kh+Ad3D4fPim5ufJtoUVmjb5VVt33a+0/2H7rxLd/DwaR4ZsxPA0amRi/yxxrXz+Pjy1YSPqsBH2mAqQmfm7+3l+xHqX7OPxWhvPGmiTP4U17b/Z18vyrDJ/drX+Hf7Bt54i0+z8Q/C7xbD5Eksb/Z5Nr7mX7y1+rv7Q3wR8GftB/Aq68DeObBrnZFIbWRl+aFm/ir8vJvg/8AtG/sR/EpLHe2peGI7pnsrqNmZo1/hX/gVaVa05YWNSn7zj8SPNw2GpQxLo1fkz7P/ZV/Z78beEL63v8AxRCv2aGJfK8mBVbd/FX0p4u0nwlfNYQvokbz+asUU0i/Oqt8zfNXzf8ABX9szxCvhmF/E/hhjFJF/C+2SvY/hz4i1LxvqFtqb3LIrbmit5G/1a15H1irK66yPSxGBlH4djtvHsGmeGNBR41by1i3RM38TV8lfHzxpeaP8KfiH4hsLzyn/wCEXvILeRn/AOei7d3+z96voD4+eNg0Y0RrnZ5ab9sf/oNfMX7Ukltp/wCyj47165mbz5LCG3WFU+WRpptu3/vmuLCR9pmsEo7SOmph/Z5VN1Ox+b2j6HMqok1y0h+zr5s38LN/erdsdJhaEQwzKqb9m1k+Vmqa1tUt5gjurKv97+9WppdvMrfvkz/FtX/0Kv2Bw5ocyPicL7vukMOg3nnCZHjDxv8APCr7W27altdPmhV4ZkjWRl+61bsciSSedNbfL8q7m+9/31U91pKLbvMjqNvzIqp935q46lOctz16fuxOKvLab7Q6WzxqV3Km6Lc1Ys2j3KyPNI/m90X/AGq73VtPSS4e5b5Hk/1S7Nq1kXuhw27M6QzHd8zt/do9nLmsKpGEtjgNc0e5jdkmhZP4tqvuXbXK32mv814m4rv3L/s7a9U1bTkmg87fGsWz/dZv96ub1bw35kf2b5Wdfvssu1V/2a7KcYxPDxVP3vdPNNQt5maR5pvut/q1VfmX/erA1ix3L8nzLu+f/Zru77Q5rWE7Nu1k/wCBVgappaRskyIzp954d/8ADXfTlE8StG3unBatbTqr7Eyy/wATfdr2/wDYQt1gsPE+0/entCR/wGWvKNc092X92jbdny7V/wDHa9j/AGJovKsvEeEA3TWpyO/EtfqvhAkuPsLbtU/9NzOzhdWz6nb+9/6Sz5h/acsIJfjT4qk80gtrlyCAv/TQ15VqkOdyJCv3fk+bbXt/7RNktx8YfE+4DB1u4G4L/tmvI9es3Hzuiqq/xfe3V8Xn8eXPMV/18n/6UzwsZ/vdX/FL8zBTfJ1enq3k53puWmXcflNs8tR/u1CjSeXj+Fq8XlOb2hPDJtk/c8t/Aq/xVYW4dlHybf7+191VrdnG7/fqVZHjbKf7vy1X2Be0LH2ieTYibfm/9Bpv79tvbb92mpbzSbZjDll+XdV6Kzj2h33D/gH3aj+6YyjzFaFXZWfzty79tXbW1uZJFcBXFW7DR0kbfs3LWvY6WzfIkXzMn8NVKQombbw+Sq/Ju/3qt2lm8eX6Lv3bv4l/2a2LfQ0bY4tsszf981etdDMa7/vMzf8AjtYSlKUjanH7RlwwwNh3hb5fl21Yt7abd9/buTbt31t2+g7pPnDJt+ba1JJo7wyb9n/AmrnlLlOunT9oYzWc0J2dV/2aRYX2rDtU/wAX7z5a2X0vy03vSeX+8V3s1Xd8vzfw1hGpI19ifQS6S/nSpE67413IqrVlbV7dWCIoHlK21vm3NWvNpf8ApDvhWeFPnbd95qia32SbH2qF2/N/er52pT5T7Wn73xGLcW91IiQwzKh+9u+9WfNa2wxAkK5bcqsyfxbv4q6G+0/zriLZbKPvKzM33WqpeWqbhBsVvn3IrP8ANWPvHZKMOQwLu1mWPZ95F/8AHmrK1COFmX52+Zt3y/w1v6pH5m7bcsPLT+H+9WXdRpNN9z+Hcy7fu10Q92epw1OblkjAlhMl9smfaF+X5vu1DJayWTbJHZyz/dq7eR/6SU8lcb9rrUa7Ps/z/wDHx/Bu/u13c38q3POlHmJNNt4bO186H5tzbtqr81bmkw20kyTJDIyq+3b92sqzWaNwUG6Hzd27+LbXRaTNDHdJsRtirv8A9n/gVRL93qKnH2kuU09L0n7UY02YRv4f/iq1V09422Dy9v3VXO6ordkuFH2ZFbcv71fu/LWlbxpGqeTAqhdq/N/D/tVwyqc0T0I04RGLpqSfP5PzfwK3y7qhbT0t7VbmH5dz/wB/+KrrW+2N/tL79rrs2v8Aw1U1Jd0zJbPhG+7H/dpUypxj/KfTP/BKnTb7Q/2u/hnrkd2UbUPHOnKqoeVj8/y2B+oZgfY1+nP7dP7TX/BOXTf2gL74b/tefs86jrfiDwzDbfYdZ0+xV/tFvNAk6ozpPE5CtIw8t9yjJIxvYV+TX7M/xc1T4CXvhb42aLpVrqF54Uu11S2s7wt5U8kEhkCsUIbBK9jXm/7e37Y/ib9uH9pjX/2jdV8J23hj+2Ut4E0ay1CScW8UMSxRh5GC+Y+1RuYKgJGQq1/RfEs8s4WoZDWlSqODwSt7Ks6U1OclUk+dKTs+eS5bWtLyLzKjToV8PUkny+z+zLlabd9/mz7m/by/4KOX/wC1LomnfBr4UeCU8G/DbRCn2LQYkiV7to8rC7rGoWFEQgLAhKqcks+F29D+xv8A8FMPC3w8+EUn7LH7XXwyPjn4dyDy7ELDFJPpsWWfy/LcKJlEm1kberxHJVjhFX8in1jU7iRkS8nH+y0hWmT39x5w/wBMZzs+VlnPy1MvE7g+pkEMnWSuNKMueLVdqcam/tFU9nzc9/tX1Wj93Qbx2WPALD+wtFO697VP+a9r38/lsftl4v8A+Cn37Kf7PPwz1XwT/wAE6/gBceG9Z16Ird+JdVto0e0YYCuA7zPclQX2q7KiM27DZZT41+xx+3T4X/Zx+D3xe+H3jTwdqutaj8RdIMNnfWt5GqpO0U8TGXeMqMXDvvG8kqF2gMWH5ZTa7etEiveS7lb518w/LVO+1u7ijdn1C4Xav3vMLM1GD8Q+FKeW1sG8qnU9tKE6k54mUqk3TkpQ5punzWi0rJWVr6XbZjHHZdSw8qbouXM023NuTaaau7X0Psa1urmyuY72zneKaGQPFLG2GRgcggjoQa/QOD/gpp+w9+0x4O0Ww/bv/Zn1DUvEGhWSQR6zpQEwuW2gSOGSWCSIOwLeVl1BPU1+EF3q+oSycajcK23+KU/40+DVbq4fbLeSP5e07fNO2va4i8Vcl4ndKeKy6pCpSbcKlPEOnOPNZStKNNaSSs07jx+f4THcrqUWpRvZxnZq++qXU/aL9rf/AIKSfB/xT+zzL+yR+x38Grnwf4PuLhGv726dIpbiIP5jxCJC5+dwhaR5GZgpUjnNVP2N/wDgph4W+Hnwik/ZY/a6+GR8c/DuQeXYhYYpJ9Niyz+X5bhRMok2sjb1eI5KscIq/j3p/iG+lVzPNKBu3LtkNWo9e1BVd3v597fLuydy15v+vfB8ckeVvKZOLn7XneIk6vtf+fvtOTmU/NO1tLWbRxf2zln1R4f6s2m+a/O+bm/m5rXuftp4t/4Kf/sqfs7/AA01TwV/wTq+AE/hvWdejIu/EurWsavaMMBHAd53uSoL7UdljRmztbLKfEv+Cd37bnh39jz4z+I/ip8RPC+q+IDr2gT2jNY3KCX7Q0qThn8z7wZ4wGbOVDFgrkbT+YsXiO9LjbfOB5X3VJ+Wr9r4ouzMkM08hjblW840Yfj7hTD5RisBLK51Fibe1nPESlVnbbmm4c3u9ErJdtXfOGfZdChUoPDt8/xNzbk+13y306H1g2vW7eMT4n+wv5R1P7V9m84btvmb9m/bjOON233x2r3b/gpD+2d4Q/bZ+K2hePvBvgzUdGg0rw3FYTR6nPG7yS+Y8r7QnAVWkZQxOWADFUJKj86rPxBfLCqfaZWKtuVo87v92tnSdcaILcxXM7L93czHdur2cX435RUzTD5hUyuTq0IzjB+3dkpqKldezs9Irc9iPEmGxGIhWdB80E0ve72v08j3OgEqQynBHQivJbHU5JV3797bs/eP3q1bXXJHZ3ikcfL/AKljtb/arpq/SWo0/wDmVt/9xl/8qPapZ+qqv7P8f+Afpr8Nv+Cnv7NPxi+D2g/B7/goR8Br3xdP4dtxHaeKbUrPPORwHbLxSxOUWMOyyN5hXcwHSsD9qH/gpf8ACS8+AN7+yp+xJ8G7nwP4W1KXGq6nIyQz3cDDEsXlxlzmTbGryvIzMgKEYNfnXJr1zD9xHT91uX95upW8QvHbur+bhvl6/KrV+d0fEvg/D5lDEwyepyxn7SNL61L2Mal786p+z5U76pfCuiPIisrp1VUVN2T5lHnfKnvdR2/Q+6f+CZn7c3wb/Yv1rxPefFP4SXWsSa3aRx2et6RDDJeWwXO62xM6AQyEhmKsDlBlX+XbxXwf/bY1T4B/te6j+078J/hzp2l6dqWoXXn+DYJilt9gnfc1qrKPkIwrKwXaroCE2jZXyJLrBhCwvPI27+JTVJtRu8PDNKy7X3blb+GvWreNXDVTH4zF1smlKWLgoVU8Q3GUUrJKPs7LTqrNbqzbv0vFYGdWrVlSu6iSleTs0vLofq/rf7bH/BIT42anJ8SPjX+yLrVr4nv2MmqiwtgUlmJyzl4LmESsSTl2QM3U15h+2v8A8FLPC/xu+Dlt+y9+zl8HY/BPgG0ukkljJjSW7SNvMSMQxDZCvmZkb5nZ2CnI+YN+bOpXF3DGq211KZPvbdxw1ZN1qF+gbF5KdqblVXPyt/drzMs8S+FsDi6OK/s2tV9i1KlCpjJzhTa2cIunZNLRXvb11POhiMuwtWM3CUuXWKlNtRfkmunQ/TP9h/8A4KPeHvgP8LNU/Zq/aM+GMnjn4eapcb4rFpI5G09WJaVFilG2VGkCSBdybH3OCS1eqt/wUz/YY/Zl8N6q37Cf7L13p/iXWbGSBta1iJIhakjKEs8s8kqK4RjCCiMVHPFfjBquo3IVsXs/8K7mc/erntT1e+l81GnlX5mZN0h+WujHeI3B+b4+pi6uVVEqslKpTjipRpVJK3vTpqCTeivtd6u7bM8Vi8sq1pVJUZe87yiptRk+7VrH1j8UNOk+MVpq8Hj7Vby9m1ydp9TvXn3TzytJ5jSM7A5YvySc5ya8ok/Yk+DskPkG+1sKTnAvY/8A43XgOp6pqjsVGpXGxv70pb+tc/f3eoxK+7Urgtvwi+c3zL+dfT5l4ucK53VjWxuRxqSjHlTdRaRV2kv3e2rNcTneX4uSdbCKTStq+n3H0jJ+wN8D5Dk32vD5cYF/H09P9VUR/wCCfPwJLlzfa/z1H2+LB/8AIVfKt5qd7BIGTVrovv8Al/ft9386z7zUdUZj5eoT7P8Aanbd/OuP/iIfAf8A0T0P/Bi/+VnE8zyR/wDMDH7/AP7U+uD/AME9vgUW3f2n4i4z/wAxGPv/ANsqcP8Agn58Cwwb+0PEBI6f8TCPj/yFXx02p62zB01af+7tadv++utPh1PW5mXfqs4+f+Gdv8ab8QeAv+ieh/4Gv/lZUM0yd7YKP3/8A+y4f2D/AIKQFSmoa9kdzfx8/X91VqD9if4PWxBhvdaGBj/j8j/+N18f2upapt+fVLn5f4vPb/GtvTNSv5Zkht9TuPm+ZlaRvm/WueXiL4fx/wCaep/+DF/8rNlmmU3t9TX3/wDAPrCL9j/4TxcpcatnOcm7T/43Vq3/AGVfhnbSiWO71UkDABukx/6BXzNo93rB/ez6hK3zfIqzN93866LSrq8KrNJdXH93b5priqeJXh4t+G6f/gxf/Kzrp5hlstsKvv8A+AfQ1v8As8eA7crtuNRO0YAa4X/4irkHwV8IW5UpcX3y9jOvJ9fu14Xpup6hLJ5qX0qFX/e7ifmrdtr2984+TeyOW+bazn5a5ZeJPhwnb/Vmn/4MX/ys7I4/AvfDpfP/AIB67F8IfCkKqoluyEGF3Srx/wCO1MPhh4byCZLrg5H70D+QrzDRdRuNizXV84f7mybPzVeivLg2kkUch3SfLiRi1Yy8TPDfm14Yp/8Agxf/ACo6oYzB8vNGivv/AOAegN8LfDTsrmS63L91xMMgenSpF+G3h5ZFlD3GV6Eup/8AZa4EXUjSR/O+V6tuP3f7tT2M8zxn7I7ff+75h+7SfiT4bf8ARL0//Bi/+VG0MVhmrqkvv/4B2zfDTw65O6S5wwwVEigfotTL4B0FAMedlVwrbxlR7cVyOlw+YiXL3Mof76qzFt3/AAGtW0S6eNXlimX+Dcw+9/tV1UvEfw4n8PDNP/wYv/lYfXMPLel+P/ANqLwHoUTBgZyQcjdIOP0p6+CtGBDP5rkZxvYHGfwqlawySSLsw7N95VX/AMeqza25nb7Xbo6Fn+RWFdtHxB8PJ6Lhymv+4i/+VjlicNGP8Jff/wAAnTwfo0b+ZHG4OMZ3Dp+VKfCOjkY2OD/eBGf5Vet9DF5aCLyxtZvvsdu2nXGkBLpI5rgfd2qqr97/AIFXUvEDgBfDw7T/APBi/wDlZzrFYSX/AC5X3/8AAMs+CtEIUBJBtORgj/CkfwPosgIlMzZILZYckDHpVwaWzXRS5QlNv9/azVQ1LSr+3iUoiDe/zLn5WrT/AIiBwFb/AJJ6n/4MX/ysyni8FF/wF9//AACB/hh4YdizLP8AMCCPMGDn8KZcfCvw1chRLPd/L6SgZ+vy81T1KwnRmeAzbF/5aSSL92svU9MnSNo453lMm1Ym3D5l/vVMuP8AgGGv+r1P/wAGL/5Wccsxy2DusMvv/wCAbI+DPg8MH33m4HO4zjP/AKDQfg14Q3tIj3alv7sq8f8AjtcbNBcIxRbqVmba33Cq7qpXVreBiIWbePmlZnLfLXK/Efw/5uX/AFdp/wDgxf8Aysc8wy6muZ4Zff8A8A7a7+AngW8yZGvVJGCyTKCf/HaoN+zH8OXBV7nUyCcnN0v/AMRXH6qskqhBeNGv3U3OfmrY+Cv7K/7Rf7SviePRvgz8M9b1x3fynktY3WNWX+JpG+VVrN+Ivh7y8z4cp/8Agxf/ACsn+2MsjK0qCXz/AOAasn7Lnw3kIzd6qMYwFuk7f8Aph/ZV+GRJb7TqmSck/ak/+Ir7C+DX/Buz8Xbq3g1v9o7456f4ZhcsZtH00m8uFX/eX5Vr6D8Of8EH/wDgn9ocMaa/4p8ea5IyrumbVBAu72Vf4a8yv4ueF2GdpcPU/wDwYv8A5WaU8ywtX4MI3/Xofl5P+y38Nrn/AF11qhOc5+0pn/0CmSfsp/DGTGbnVB/u3KDP/jlfq/cf8EQf+CdMg8v/AIRnxdDuRv36+KnZt396vOfGf/Bv7+yjrDk+EPjX490hPu7JZYp1rmXjP4VPbh6n/wCDF/8AKz0aGYUlLSg0fm/P+yH8K7mQyy3msFj1P2xP/iKYP2PfhQCWN3rBJOSTdp/8br6q+Lf/AAbwfGrRklvvgh+0Zo3iaBf9VZ+IEktblm/u/L8tfH/x0/YH/bc/ZzkuE+KvwZ1qK2t5f+Qlo+bu2aP+9ujr0YeK3hnV+Hh6n/4MX/ys9nDYzDV95cvqjXX9j74UJ9261f8A8Co//jdSw/sl/C6AbY7rVgM5/wCPtP8A4ivBIJ71L86Wt5NFKv34Z2dW/wC+c1p2IvlYb9Tuf725pDWkvErw6cdeG6f/AIMX/wArPZoYJV43jP8AA9xP7L3w2IIE+pjLZBFwnB/74qZP2bPh4gws+o4/6+E/+IryJEktlBW5lLSfc2yP8zVoQT6nGDDPqE25v4d527a5ZeJ3hx/0TNP/AMGL/wCVms8vqxdnL8D04fs3fDsNkSah7j7QvP8A47RL+zh4Clk8xrzUx7C5THt/BXlqGaSTyLy7ddr8t5x3VQvJrpTsS4lx6+Yf71S/E3w3f/NMU/8AwYv/AJUeXiMOqbbauesS/ss/DSZy73Gp5Y5P+kp/8RTJf2VPhlKQxutVUjptuk/+Irxa4vb6RnH2112t8+1jVSW6vZGb7NezIP73mHbTj4neHD/5pmn/AODF/wDKj5nF1sHTV5UE/n/wD3F/2TPhhJy95qxwMf8AH2nT/vik/wCGS/hbgD7TqvAIz9rTv/wCvFI31G4VVS9uA2cblkO1qe1ver88d9M5X+HzDWs/E/w6pxV+Gqf/AIMX/wArPKWOyxy/3Vff/wAA9n/4ZH+Fe8SG41UnGObmPken+rqB/wBjf4RuxJudYGRggXif/EV43u1JoykM83zbmSTzD8tZd42o5WP7bKHb7zLKf8aqn4n+HU3/AMk1TX/cRf8AysbxeVf9Aq+//gHuo/Yx+EIGDdawRnJBvE6/9+6kT9jz4UIjILzWMNjP+lx9v+2dfOEs9/aXYZdUnQ7vutK3+NSf2pdqWlTUrlpG+bb5p/xreXiT4eKOnDdP/wAGL/5WZRzHKnLTCL7/APgH0jH+yR8LovuXmsfX7Yn/AMRU9r+yx8NLRNkdzqh92uUJz6/cr5xsb/VJbjzprm4jVvmT5zXRaHHeTRPFPdyOVbP+tO1qUvEjw7ir/wCrdP8A8GL/AOVl/wBo5Y/+YVff/wAA98sv2fPAdgu23m1D6m4XP/oNaEPwi8KQLtWS7PuZV6+v3eteUaIbidQq3TN+6YL++KqrVvaLcX1orPcK5eMqPMOdsi1nHxG8OZO64ap/+DF/8rG8zy2l/wAwq+//AIB6LbfDzQrU5jluD6hnU5/8drWttOtrTTxpsW7yghXk84Of8a4eyiu4lSZp8+Z9zaK7DTVdNAVZcKwibJJz681+l+GfFvCecZli6eAyaGFlChOUpKfNzRTjeD9yNk7p3122PVyrMMFiak1SpKLUW9+mmmwg8Naech3lYHHBfHT6AVYGm26xrEC21egAAz+QrJWULue2D+rw5/8AHqtFQkQfcERfv5NfnUfELw7Wi4ap/wDgxf8AyszjmmB6Ul9//AJxoNiu4K0gDMWI3DGT+FVbjwVotyhjlEpBOSN/f8qfLa3Lzec0BdF+bcx27awvEdrKJAyySB4fRvl2tTXiF4dXt/q3T/8ABi/+VnZSzLD20hb5lm8+EfhK92+d9p+X7u2UcfpVKX4DeCpozG9zf4Jz/r1/+Jrltbnlt4HNsSyjcrLuK/8AAq898Q6tco7RR3Eqsv3VEx2tXRR8QPD6e3DlNf8AcRf/ACs64ZlTteKPXpf2afh3KBvuNT4IIP2peo/4DUcn7L3w3lJMl1qhDDBBuUI/9ArwaTV76NvJhurj5fveZKfu1Vm1TUmkXZdSfN8zfvD92uuPHXAD24ep/wDga/8AlZlWzanCN3G/zPfH/ZP+GUgO6+1fLHJP2tM/+gVBL+x/8KpQQ17rPzdSLxOf/IdfPV5rF8JHb7fOuf4vNP8AjWdd6vqjReVLqE+xd3Kyn/GtFxzwGo/8k9T/APA1/wDKzya2d4KMbulf5/8AAPpOX9jn4UzIEbUNaGO4vI+R6f6vpTof2PfhTbqFjvdYwO32tP8A43Xypca3eyMTHqVx8v8AC0zfL+tOttc1adkji1ScH+FhM3+NJ8ecA/8ARP0//A1/8rPOeeZZf/d19/8AwD6xt/2TvhfbYMdzqvyrgZu0OB/3xVyH9mr4dwjCz6ic9Sblf/iK+Y9F1q+df315cNIv8X2g/wCNdDY6zdzXCma6kVdm5VaQ1jPxC4BTs+Hof+DF/wDKx/23lslf6svv/wCAfQ0HwE8EWyhYbjUBtGF/0heP/Hamg+Cfg+3IaOe9yO5mX/4mvC4b+/jjEiXbyt95f3x+WtTS9YmbMM1wyurbnXJ3Vzz8Q/D9b8OU/wDwYv8A5WdEc2y/ph19/wDwD23T/hh4e01w8FzdkgY+eVTx/wB81raboNnpWowanZySLLbyiSPLDG4fhXjlnq108CJFM7H/AGWNeqfA7xbpug+PLK/1WTbZxyrvMi53L/FWdTxE8P1C3+rdN/8AcRf/ACsl51l8XdYdff8A8A+lfD3/AAUV+Pfhf4fXPw50bTfDkdpd2pt5bo6bIbjYVKnDebgHB9Kwvgp+2h8VfgBoWp6H8OtF8PQtq+BqF/c6c8lzIoOSm/zBhT3wBmvvD4kftGfs1fFP9kbSU+G0Vkuv6IlvLp6vZLHI0kf3scc18taf+zB8bvht+2d4V/aV+NN3aDTvF9x5ljamQPujWP8Au9F/2a+fxniT4Y0EpLhalLT/AJ+JW/8AKR6WV5zg8x5qc6fI77b3fTseReNP2qviZ48uWudatdKUs+4Lb2jqFPtlzWIPjZ4x3ITDZEIcqhhbb7DG7pX0n8fLPSjq19NBaRrKZd8iLEFIj3bvSq/wk0q08QrNNFpKKscW7aYw3/fVeLPxZ8K1HXhKl/4NX/yo+mp0/aHg9v8AtI+MIbt7648NaDcytAYla4sXYID/ABKPMADe9ef+LjaeNdVOr61pluZD0WNCFAznAySf1r7T8QeKvAvgnVEsLzTLfUXKM0qraqyw1xl74i0DVll26RaM3zPueMbYa9DB+KfhhUhePCtKP/cVf/KjzcbKlRnyyhzHyK3gHwizMx0SLLnJPOemOtZ158IPCV4csbpOuPLmAx9OK7f9pP4xSeHrddG0bTHeOXc3mlwBXxr8Q/iDrN1qRtri8m2yMxz5pK7q9GPif4cyjpwvT/8ABi/+VHnzxOAo6ypJf16H0Bc/s8eCLh941HVY/aK6UD/0Cq4/Zn8AgY/tTWMYxzdp/wDEV8j6n4x1J2kha9nP+15p/wAa5a88ZaxPOiR6hdeWzbX3TN/311ran4i+HFf/AJpen/4MX/yo4qmdZfTdlSX3/wDAPubRf2cfAeh3K3Vvf6rIy9pbpcH8kBrudCsbfw7fLf6cmHUABX5HHevFfhx/wUB8P/sw/CbRfDPg+K0vboWbf2neXdos5mZv9+ty9/4Kk6N4n8FC70qO1h1Bfv8A2e3Cttrkn4k+HUXpwnTf/cRf/Kj0qeLy1K9op+v/AAD6D1L42+OdV0X+wLme3FtkYRIyOn/AsfpX05/wS8/aiubT4lR/APxDooZPENrJBpF7ZqQ0U6RvIfO3PypVWAKjIbHGCSPx/P7WfirxL4qPiPxD4gvrmeWXazXU+EZf4V219u/so/FJ/AHxL8HfFVSD9mMVycNgESQkHn6Oa+jyvFeH/HvDmcOlkUMLUwmHnVjOM+aXMoykrWhG1nHXe60sbYephcdSnToqz/qx+hPin9rrwf4B+IF78IvEOpLb3bRbYmml+6u7b92vP/E3jLSfiRpMvhW8mWaxVvkkZfmkb+Fq8E/4Kha54Y1r4Xr+1F4Pv47TWNNv4/tUa/6yaFvvKteN/s2/tOX/AIkuIEu9TuJ5mdVlWR/lb/dr+Rlzyj7WD92R61HD4eUOWovePqr4E/s9+LU8bPNo+pXFxEt/t+zzfd2/wr838NfUd5rGq+GvDcOlXmiRw3bJuna3Tay/w/LVD9kb+yNb0K31Sa2VZWbczRv+93f3mruPjSumqxSKzb5YmX5W+ZqdaMY0HJfEeVWlKOMVM8K8X302pXn/ABMkaTb/ABNXhX7a3iC80f8AZ1ltoYvJs9Y1GGCBZIt3mSK38O7+7X0N4k0220vRvt80bDd96T7rL/s18b/t2eMLzxJ4g0TwBczSS2Ol27XirDcbo1kkX5fl/vUcNYf6xm8ZS+yXneI9nl7ivtHz5a6RebW/fRhlTd+8StbTYXbZDbP+98r723/2anabazW80XmzfIz7WZU+bbV+G33XSuNvnNL/AH/4a/WZRhL3T4vDx93UntbeFbd32eUy/N++/irSW3ddqP8AeZfn8tvlqGGzcRs83lojP8ke6tOxsy37lHX5tq1PsY/EejGXL7plalpcLbpZtqN/EzfN/wACrI1HTXZfOSaTf/Ev8K//AGNdj/Zb+S0I3RFflaNvmqCbQ5mt1mFm3zJ/EnytWlOmpS0OeVT7J51daKjQO/kwzL8r/M+5mrB17QftDNc71j/56/3K9LvtFhj+eEbxIu1FVVrm7/QfMhZUtsKvzN5la06f2jzcRV908m1vQZomaaHb++2/Nv3ba5bWLV4VdEds/d3N92vWde0N47OW58narPu27drL/wDY1xuqaK8Lf8ey7Wbc8a/3a6adOMjyKlM861bSxHCR8zNGv3mf+Jq9S/ZCs/sdrr8ezH7237YzxJXIavo6bXR337m+SNq9B/Zjtmt4tdLoAWnh4H0ev03wjv8A6+4W/ap/6bmdnDUYxz+lb+9/6Sz5v/aAsUk+K3iQgr82sT7lK/7ZryTxBpPkyH522V7v8b9O874k+IZCmVbVLgHcvfca8p8TaS7Yfzm2/wAKtXxnEMf+FvFf9fJ/+lM+czDm+u1Y/wB6X5s801Sz8ubfsU7qghtWkVnxjbW9q2kvGzb3Vd3zKtUo9Jdm2P8AL8n3q8bl5feOMpw2TyZ+fH+1VuK1eGVJH3fd2otatjo825fITen+1WvY+H0umWaZGT/Z2/dqOX3dC5HP2ukzSSbI9zbf4a14NBmk2pD93+NWrpNN8NqyB4flRU+WtPS/DbztFcp87L975avl5/dI+E52z0Py9qeSw/2q2bfR9pXbw395q3F8PiOHe+1/+Bfdq9H4bm83Yib41/5bK/y7qylzco4mJY2tzCqf6Nv2/NtatjTbFJF3um5m+Zdv8NaFnod55vlodqt9/wAz5v8AgNbmm+F3VVZ7b97/ABL/AA1hKMzppmRb6G9xILxE+RZf4aW60d5GfEOP9n+GuysfD77Um8vCw/M1X/8AhG7U/f8Allk+ZGX+7XLU8ztpy5ZaHmFxpEMg8l7ba33vmqhLo26Rnd1G7/VMv8Nejal4XLSP8mX2feb7tYmpeH0t1SFId7b/APdrKPx+6dMZRl7p7akMLSB5tu5fuR7Pl/4FUFxbv9q2743iZfnZfvVsyWr21r9pSLa6syr538S1W8794k0MOz5NzfJurzJUT7SMTGuIUt5DDN8rSfMu1fvL/tNWdf2Kecr20ioNm7dJ/DXTahbJcLvSzZhIm5v96srULW2aNYfmZ1/h2/w0nS5dUKpLlic7q0c0cbpZzL/e3L826ufvo0C7PO5b70bV0lxZ7l3ui7du1N3y/LXM+II0s2MyfIrN95VqfYyUrHFUqR+0ZepTfdm379332/u1Ta4/5Y+cp3fxU/Up90jSQp+62/Nuqrat5KfvnjxsVUjVfu/7VbxjynnSlzS8jYs2maNfnyq/3V/hrZ09khh3I7Jt2sjN96uZjvNzeS6cK/8AC3zVu6Sv2p/JRN38XmL/ABVjWjyxKoyjI6zTbxGhS6mm27m2NJWlDqFt/qfmdY93y/drnLOR4YUSE4TYzbf7zVaW8ttxvPJbzW++y1zcvvanVzSNj+0N0LIlsyuqfd/vf7VUby4cR75tzr9146zrjUpmZXeZY23bWjZ/++ai86ZSZpp1x8y7d33WojGZUqkuh7D4eYN8F5GQYzpd1gf9/K+c2uEdlTfsl3fd+8rV9DeF23fAt2kO7Ok3e73/ANZXzVqV1+72I67Nu5l/2q/fvFayyLh6/wD0CQ/9JpnTn7k6OH/w/wCQ/ULhGVjN5jOr/wALVnX+sRhWSxdQn97+KorjUIUVnRFV2+/uesa81ZAqv/e+7X5DCjzHy0sRLk5TZt9SkuNrufmX+791qbc30PmL/pkjt95l/hjrnYdYeOZk879yz7U/u1dutQhmhDw7gm37tdlOPKYSr8pozXUdxGN+3O37yv8Aeao47qaJWS2fZtbayt/FWTDqW24fjejf3m+7T11Kb5od+U/vMtb8suU4albmlc2luEaTZ0H8G5qsLfTKPJR9jf3q52S+e14++f4Ny063v/NV/wC8z/8AAqcpfZFKp7v946iPUsMUd1fd9+rNvqTzM/zrsh+6q1zUNwZG8t3Yj+OT7vzVo2d88jb/ADFwybdy1zVJTN4/zHV2OpbpVCXP3k+Tb8taul6pt2qjybl+bcqfLXJrqCWsMUe9f/Zv9mtXS7yZV3vMrbfm+Vfu/wB6uKodtH4jt7fVJmhT9yoT722N9rVpLrW+3857zczOqvIyf7NcVZ6pDcKyedtVf+ejVYbVHMaOkzMzfNtj+WOvHrLm1Pao1OWB2C6ptVLmNP3jK3zM1Mk1TLM73O/7vmqtc8uvPDZ/67BkTZ/lqkXVpolb/pomzatRGnIupUgdCtw8krI7ttm+5SrdWcarvm+dV+dY03Vi2987xqjvIpV9v+1t/vVJb3CXEh+fzd25Xm27dyrUezI9pD4S3NdJdq800rNE391dtYV800sazW0jJ95XVv71bWyGaFX37hGn+r3feWqN3bvHC7vtZG2/u4/vVcY8vuhJcxzupzX7R/Y3O9933f8AZrm9Qt/mmSaZtq7WRW/irsL6F4W87yZMMn3v9quf1zT0kk8t4d3+0q11UakY+6jklR5tZHL6havDJshdn/2v4dtYmpfKzTbPmX5UbZ8tdRq1r5e77u1fvVgatMgGx+iv8+3+KuuNTmmOOH5o8py19bzbmf7rfwKy1n6hbO8bBPmffu+aty+UyD/U/K3/AI7WXf2TyK3zso+9XbGXumNTD8pjzK8ch9KW1t3jbfNHt3fNVjyUbO/5v4qljtQzbHfedvy1pzcpz+xn8RY09X+byUYN/eb7tdDpdqlrcRbE+b7rMv3aztNtUh2edCwbZu/3q6PS4Ukj+eFW3fcZkrz61aB0U6Muc1dPhRdjpu3fd/2d1dDZxzRsm+RQ+z7ypWPYrCIvJy3zfc2/w1sWcfyMrzM/mfNu/u15sn73M4ndGPL8JrWavHH8k33v4mX+KtXT1DK37rfFGnzMv96s+xaa48uFNp2pt+X+KtSxV2/0ZwuPuurVjKXu8x1R980IUfa8Lw7yzbauTrM0cUybc/d27vu0Wq+XiaaH5fKbbHGn3lp+n2u0lEhb7/3mrl974oHSuWOjkWbPzrVUdEXds+fd81WrNXmZJrZNjbf3sci7lqOzhmhQu8KuJN33m/8AQa19Lsd0iujtsXa77f4V/u1pCXvSOyPN7L3S/pdq8W3y4Y9n3ZW+7/3zW5p+moq/vk+78vy/3araLps1uqrNc74t7NLuXduWugtbNJpFeJFcQ/LXp0o+5eJl8MinHYvJMuxP9Z8vzVp6PpH7v5+rfcXft27atLp6RhU3q5X7kcK7latezsvL2vMjb/7u3+L/ANlrrhKNMcpSlIrf2Z5liNm1U2NvVv4akt9JLfuYduzbu3N/erYt9N87fNMOI2+Vl/ib+7tq3JH9ojX7TtRdi/KyV3R5Kkipe7H3jnV0Pdcb/lZvl27vvVV1DR9sgR0ZQrfvWb5lau1hs7b7Q0yWfnPG219r/wAVMutJdpi8f3lX5GZ/l/4FWsfdkY1pLk948x1Lw7H57hE4X+Ff9r/ZrDvtO8jbbTOsI2/uvlr03UNB8qOW8dP9ptqfe3fxbq5bxBodnJL8/Rv9VtrlxFSPwnnxlCUzzvUNNk8yOKGNVbc3y/e3f7VTeHfh7r3izWo9B0S2a4mvH8q1ht4mkaaT+7tWuo0LwFqXjTWLfw9pWiXE13cOsEEdvFueRmav2S/4Jgf8E0fDH7Nnhiz+J3xLsYb7xdcRLJBG8S7NNVl+6v8A00/2q4oyVSrGETnx2NpYWlJs+f8A9gD/AIINaPdaNafEf9ryBnE0Ucll4ahfDNH95fOb+H/dr9B7X4f+Cvg54XTwV8NPB+n6Ho8UW2Kz0m1WJdv+038VeoMqbCBXnvxm12DRrAtM+0fxf7Vc3EK+rYHT5nzeBrVMXjo855n4x1+3tsrEGl2/8865STxBCzKdi7G+b723a1ZXiv4laJBIYUv4dzK37uR9tcRJ4uttVulvBqXlIr7dqv8AK1fkmI9mpn7Fl2DoRpe8z0m78SQiEfZrjLbf9X/do0/xZpzolvczNE7My/N92vL9Q8bY02Qw3MLlpV23CvuVVqjp/izVbP8A0bUvLzHL961b5WX+GuX2nLC6PVjl9CUfiPZriaCKY3KHzpP+WSxvS/20n2WS2uYVlST5fJkTcrf8BavGbHx5r3h23vtS1XWPt8Mcu6KG3i2yQru+7/tVtW/xOmvI5X8hlVov9HkZvvf7taU6s1Exll8Ho3cpfG79h79i79oBoj8YPgjpbX7RNEuraOn2WdVb+80f3mr4u+N//BB3SNHvH1X9mT43yXULK3kaD4si2tu/hVZl/wDQmr6/1r4qPafZg6NdPJ8sse/ay/8AxVTQfEabT1uEnuY5Ujt2ZPJl3MrV6WGz/HUbwvzGdHDTwsuejOSl+B+O/wAbP2Vfj9+zfNFD8Y/hvfaYm/Yl9Cvm2kkitt/1i/LXDXFuG2wwzKV/jbf91a/dSPxponjrSx4M8c6VY6lpVwn+n6feRK8Uy/8AAq+Mv22f+CS3hHVLCb4xfsS6kbcKjPf+A9Vus+d/e+yP/wC02r6TA5thcauSXuz7Ho0c/qxfs8XHT+ZfqfnrdfuZAg8tlV22M33v/wBmse+WG4bdvVdr/PWz4m0/VfDuvXPhvxVpVxpWoWcuy60++t/Llt2/2lrHutn2p0f5D/E38Neh9qxljcRSr+9CXumX8m50toWHz7mbbu20v2G5WbZM6qyp/f8Alar9rH5URTbhl+6396plt0mm3o7bf9patS5ZRsfHYyn7TmRX/s879iW+3zNvzU5bPdCzJ8qr8qLHWhDZzbQ7wqgXds+bczU63ieNj5MPyyfP833t1Epe0PI9nGNjEvrHy43mcNu2bVWsXULV418wR7WX+Jq6y83rMd9tyzf71YOoW94uoSvsjxvXb83/AI9XRHl5tCfZ+6c7cH7ZiZEX+9uZPmZqqLHuk+f76/M6/wB2tK8017iR7nzvm3/xf+y1TbT5oZGm2K5k+5/drpjUjzcsjj9nIls43vLhXd2/dsu1Vau40m3RdtztU+Ym35q47SLV47hHkh2uv3o67jwrb7m+0yfNtT/Vt91q1jL3NCffOi0m1RWG92RV+Vmat/SVeQN9sdfJVtiLN8ysv+zUOhafDN/pM0zLu2t5ez71dDp9i8sf75I/OZ/kj2fw/wCzWkTjqxl8UR9qrRshfzHTYu+Rf7392us0Nd3h5FBPKP8Ae69TWHp9j9nkV5oWVlba3+0tdHZIY9OVGIYhSCVOQTzmv3TwPi/7azCX/ULU/wDSoHu8Nz/f1V/cf5ozUhmW6L7/AJpvvtJ/6DVuNoZo1mgRVf5vu/8AxNPhhWGTzpkm+58u1NzbqfpNjtdrxU+f+JW/hr8ajT92585HFDLiN7m3Xypmf7y7Vb+GsfXLebyXd3Zm+6jKn3a6C+V7aFXl8tP+eUcaVgeJp3s7d32KAvzbf4q3jROyOYe9Y4DxUqNayOlyzLv2/wB3c1eYeKrpJLr50ZVVtrxr/DXpfi5PMhlSFI96/NtX7q15h4q3sxn+6rfM/wAnzM1dVGMInTDMOWNkYd5JuXekzRvu+b+KoGum3K/3V/iaq1xdXKyOm3dt++1V5fOZW+fC/e2q1dUfdicWJzKUpDru42yM6PGRv+9WPqU3mSedvZTs/v8Ay1PfXDyDy0fDN9xttZl8z/Km/d/e2/xVpKJ5dbGc3ulZZvLk+cKy/wAVT6XfbZmk+b/4ms+6ZJGCb1H+ytLZ3CfKm/b8/wB3+9WUo8xx/Wpc2h0+mz7ZCiOx3fwt/DXQafqSRxgvz/erjrW6SOPY/wB37v8Au1uaTcfvPJeeuWUeVs6KOI/mOr0+4hgk3w/8Cratb7z5vOeZmZfvfL8rVyNnefMmblSrfL/s1uWOpOZE+dW3LXFUj9o9CjWlsdj4fvPtEfnO6p833V+9XQWerfZ5A6PIybNqN/drhtPupvkwY1Hn/PIr7WX/AIDW9Y6ojM0cm3zf4JJP/Qax5Ym/tOaNj6W/ZF8Vf2l8QtK0HW/EO+1k1SNGhV9sX3q/Xb9r74e+H9Z8J+HvEtkzEeFrWJrWRW+WNWjxX4a/AnUrmHxxbTWcMnmNcQtF/F8277y/3a/Yz4i+NPFWtfstaD4q8T6fOsGsWq2cDN8v7yNfl/8AQa8PN8LX9leEeaJ6GT16EsbBTlyyufLXxx8VQ/8ACSBHeSWW62/dbdtrqfgvZ2Gn2MmL9T5lvuZVavF/id4sfT5Gmd2edpVWWaR/mXa33a7X4V+Mkh8M/b5rZYUZGRmZtv8AwKvz+o58vKfqdFx9pZnOftAePLDwrqypYXmfOn2Iqxf3v4q4m1+MGlaTp8t5fpsiji2v/emrjfj58WJv7cub+51KF910yxLs2syr/FXzr42+MlyrN9muZPMV2bdv2qu6vWwdGXLFI8zMqkOaTNb9pD4wQ61eSXXnSS7Xbyrf7vkrXyz4m8QzXl083nf8tWZVb+Guk+I3jq/1dX+0zSOzP88m771eYa9rgW48lNufup/tV9Hh6fNLlPgMxxyjLkLPmXOrXC2dr5hdn27q9a+GvwF03UrEf29tt3b5vMk+ZVrkvAdvo+kWcOq6leR+dJ/D/dr0HS/FifYXSG88pN23cv3q9KVSNP3IbnnUY+0lz1TE+JX7H9zqelteeGNYhl2/fhWvErj4K+PPDd43+gSFVba7R7mr6x8N+MLaxhR01Nm3L86t93dWlofiDR7fWIRNp9vMjfM7Mn8K/M1XTx8oR5ZRMsRgYzqc0JHjfwD/AGcfiR8VtcTw94e8MX15eQv88KxfMv8AtfNX2bf/AAw+I9/4Lj+Evg/daeJYraGwi+XJimi2rIMD0CPXef8ABLv49+Fda/a41WbVbW1SG4s1gt2ZFVV2r/dr074NXmk/8N8C/wBQt0ktD4x1N2jLfKV/0gjn8q/Y/CXFUa2U8SrltbCTv6ctQ+k4bjVpUa8m/s6fieB/Hr9jX9oHRf2XX0a78YXGqPI0dxq/mbmdo1+7GtfL3wV8Val4B8UR2E/mW81vOq/MzfKv+7X7+fH7SfDHijwf/ZWj6bCkFwu+VY13bv7u6vxT/wCCj/w3h/Z9/aKtNbs7Nrey1iVtyqvyLIq/3q/nn2dCdL2dM6KebV41Y1JS8j9LP+Cf/wAZH1LS7W1Fzsm2f6xpf9Ztr6f8Va/ZeI/9PuUVHZtyyN91a/LT/gnv8YodWkhtt8aOrbEkWX5q/QbRdal+xw2d75yeXErfvPutXz8uajTlCZ9Th5fWK/tS18WJIbXQYbaZ98HzNLtTd/3ytfml4w8RW3jLx9rGtveb/Mv5EiZm+VY1+VVr74+O3i59P8A6hqt5NthsbCaVVX5Wb5f4a/OHw/fvJZwwzbklm3Ovyf3m3fNX1PCGFipTqng8RYiPtYUjbs2hkhV0RXZk2vIv3Vq3Y2r28wMKLI397f8ALtqG3unbZNv3tu3eWv8A6FWto9vux5033l3fc/8AHa+85InjUakeTWRoWOn/AGrEyOy7XVvu/K1aEGnpIzzpMp3bm3fd/wC+aZp9q8iv+53N8rJGzfL/ALtbFrZzKzFE/wBd8m1U+7/u1py+4aSrc2sfskFnpb/fuSqI3yqv+7/FVmTT3/2m2pt2r/7LWhptil1tmufluG/iZ921au29vJJbNM6ZH8G5fvVrTjDYx9p7vMzjtQ8PwyMUhdYi23ytyfN/u1z2peHZrhZofs27b/tV6Hc6a9xJ8ltuib7+75tq1Tbw3NHMvkosKr8ybauNP4TmqS5o+h49rHh3c7w52uybdrfwtXJa54Z+8iJ5bq/9zbur3LU/CrpcSvsb92+5ZF/irlNU8JpcSPC9hI7796SN/DW1OPvHj1qh4xrHhHMZhRFxJ/EtdD8EtKGlDVI9pBZ4c+nAfpXSah4ddt1sk0e6N23KsTU7wxpLaWZy2QZQjFG6r1r9M8J1/wAZ5hX5VP8A03M6+GakZ8QUv+3v/SWfP3xe8NJdeMdYmhdhNLqMhTC/7RryfxN4T2tvuZmO776qvy19K+P/AA3Nc69eTblbzZ34H8K5/irzPxR4UtmWV3tsiRq+Nz+N84xX/Xyf/pTPnMfL/bqv+KX5s+ePEGjuZm5xt/h2VQt9Jm3K6IrN/tf3a9Z8TeD0VWuURh/s1zDeE7lZPkTf/stXhSic3MjK03R52Vk+X5v4q6bS9BRdruiqdu35av6LoM7L89tt/urXT6P4bdpF3plF+9HWXvS90rmM3R/CP7tXg2oWet2LwPbQyb02uqxfPtX5lrp9F8N20atvhkd/7u77tbdrocLTBHSSIt8zxx/equXlkZHBx+Ddyslmm5Nu7cy/NU9n4Zm270g+X7u1V/ir0q38N/a0byT8kcq7vk2s1aVv4Pe4X5EYf7Ozaq0csJEylM860/wS/wAvyNvXb/q23bq6LTfDKK+zydzr8qrJXoOm/D37ORMlg3zffmb5t3+1trX0rwKjXUk0ztt+780X3m/hrOVM2jUlE4HT/CLlhM9mw+XbtX7tXJPA9z9o2THYrfLub7q16NF4LS6VEHyGN/uq1adp4HsxGLZ7NijJ95q5qlOR1RqRPINS8EOqv9mhWVfK3Ksf8TLXNat8Pf3J89PMk+83+zX0JdeBbYLLM8MYeOL5WZPmWsLWvA8MjP8AOq+Yu35Yvut/tVEqfMbRqfynKQq6xqiSbW3tvaR/lWoJN9psRLbeJmb95G3y/wDAqJrh7ebHzGJlZU3fdX+7WfcahcybtiNlX+f5/u153LKJ+gx7k0cfnTK77c7mZmV6rapZw+T/AMfKxBv73/stTx3bvvezEKPs+X/d/wB6o7i6hffLc2yod6ruVt22q9nyrQupU93lkc9qVm7QvvSMoybfm/h/2q4/xEqNHMj7f7vmR/3a7e+leS1dLPbvbds8z7rLXHeKIY5tyfZsKr/P/D/vbaz9ieTWjy+8cPqTeXJs3sRSxzWzbfX7vzfxUupLFHmZJmRpPmT/AHapfbELIkPyv/z02/drKUeaBy832jbsZE2kof8AvqtjQ2kjkPOxPvLurnNNmkjVIXnXH95vvVq2d0kjCaF2YbG+Vqz9n7vvExknPmOh+3Ha77ONn/AqsrI8kLJ9xfK+833V3Viw6hDNC9tv3fut23+7/wACqWK6hhYW2/5VTb8zbqj2c+W3KXCpyyuWLiR7eP8A0lN+3+L/ANmrO1G+mjs2SF9xVdybvvN/tVYvrrcvk7ONyqjM9ZerSOtw77NqMm3ctVGMuawqlT+U978FzrP+zs06twdFvMH/AL+18q3lzDHEPsyMu35d26vqbwQY/wDhm19jZUaJejP080V8malMk2dkPz/e2/3Vr968VoXyTIP+wWH/AKTA9DP5JUMLf+Rfkindas/2hYXdWP8AeWs3UL7ywV37tvy1DfXXkyO/mY/u1kXmqpuw82HZv4q/I6R8TWrcpZutQSPbsdgn3tu+rEOveXbhEfd/tL91q56bUk3F32qFam298nzP527+8q1tHzOb2k/iOqh1JJJFeF1fd/47U39pbleFvutxu/2a5nS5N0i7H2lf7z1rR3CfKh3H5K1+GRHNzGqbzzmV33bl+/8APSw3E0395d3ytVKFk8zf/A38X8VXLRZ9rJv+9WUpFU/i0NKzkf8AjPzfw7auWt15cfycf7P92s2CZ4ZBDN93+9/DV21bd9x2K/drGUf5jvpx6m5DcLMyb9qKv8S/3qvWd9tmHz/KqtuXd8rVh2+FVZJ5lRGb5a0FvCxKb1Td91f4q5pRgd1Pmj7xt2t062aOj5kbds+T/wAdardnqMaQ7E27v4925lrm/t00e75Nu75vmq3Y6h8qI7L8rbkjrz60Tuo1I8x0C3DzbbK53Oi/d2p/47V5bj/SEmtvMRvvK23dWDHcTLH88zMv8XzVow3Tqqu4bH8ar91azlGXLoaVOU3JLqZSgQMoaX96zL96rdrIk0myFF/h/eR/8tKyLOZJr1EmSR0b5krUtYZo1M0PyIv/AH1urCUY8vxD+KZdWPz4diOqbX+8v96i8h2t9mR28qR12bfm+anGbdboju33P3rSfKtPVrxlGxGQqnyfJuXb/d3VnU54+6dMeWWxkXizLu8mXzPJRlRv4axNQt0khd0uWyqszLXS6hY+Ssps/LEf92P5vmrDurVwoguU/wBr5U20ox+0Wcvqdu6wujwqtxu3bpPu7dtcteWrxsXm5rufEVq6tshTc+35P7tctdWrzSO8MKvu+9t+Va66dT3blyjy7HKaxazblWN2Ufe2/wB2s26h3Mfn3N/erevLH7VMyP5ny/w1VuNPf+//ALq12U6nLoc/LzGB9lQbkT7rfxN96rljY4aMuvP92rbW6blQRqzL83zLV2xs/wDlm77lZN27+7SqVv5S6dGMfiLGm2Matv8AOVpf7ta9nCnmB4UZd397+Gq9nYwx+U/k5f7qsvzVqx2T2u5H6fe/4FXnSlE2lTgWre33SK+zDr8vy/xVqafC8c3nQpu3Ntb5Plqrbrtk3q+VkT+L5q1LGN1b53w/y7Y1p83LDQ5eX3y9p9rdbvkDff8Al+f7tb9nGgtxv3Ef6tNy/NurJ0+H7RtTfIEk++392tiyZI42tkmb5UVfMX5vm/3q4pXlL3Tqp+7ys0dNhmWREm2/IrL83y1pbf3iwpyv8W3+H/ZqnYx/apA86MzzL88jfdb/AHa2bRd8Z+dRti27WTbub+7WfNyxOmO4/wAmGSRZNm5mVmRm/hWtLS4UWWJ4dxXb8jM3y/8AAqo2zIyqkMMiyfxbvu1saLb3N5JFeQphV+VFV/lWrpxh8R0e0l9k6DTWhktv321om+55db+l6b+7R027JH3P/wDE1k6PapNEiTJ8zPu3M/y/8BrptJs/OZETb9752/vV6WHlze6c8qnNEsafpKQwuIYWBZtyNt+7WlbxusgdE3fLu+5T9PhuYY1h8xn2pteppLNI03zbirfLF5f8VdNPnLp1CXT5vm2eWwb+CSNvlqSPeszbHbZs2vti3f8AfVQW9ncxsz723r/Fs+X/AHa09LtLmGEvv80Kvz/Jt211xly+8VKX2h0Nl5y+dv8AvfcVk27f71T29i/l/Oih/vfN/FV61017hdlzNGwVF/1fys1W5rbFvsL+Vu/4FureM5ROSp70TktWhhlkV7kLtjdt8bfN/u1yOqad9qk+xzIzpvVn+WvQdUt4ZLaXf+7Vk/hTdXY/su/CVPFXixvGGt2dvNp8O37Gtw/+sZfvf7y152ZYilh6UqkzjjH2MD6N/wCCX/7IGm+CJE+NPxJtrV9YuNy6Tbybf9Fh2/eZf7zV+ieheI9KsLSO2u7lYxt+8z/LXxLp3xqs/DP+gabND5zQbPLb7qt/D/47XP8Ai79rTUtr6gqSW4X9xFIt1uVmVf4V/hr4mjm2JjjPbQPMx1OOJgoyP0Sutc023sft7XKiLs+75a+Yv2sviheWWl6q+k38OIWy+5vurXzov7fHiSPQbTRL7WFjSafa0kn3dq/8tF/2t1eZ/tIfHh/EngiXxDo9y13Nbz+VqNxNcfNMrf7P8K17GMxk83oq552GpxwdXnOM8ffHC8utUkRNV87zF27l+ZW+b7u7+Fqx7X44X9nb/wDH5Mkkn3FX5lavEvHXxB022b+yra/aSZn37o/u/wDAazNN+IU1hZ/Zv7S8pVfa/wDFXx9XLakp8qPtctziVOPxH1N4d+Oty9vs1W5VE2fuo/u7v9qpbz4i3PiSN7Cw1j7P8isjN95a+XNP+KUN9Iba5hZHjT5ZmlVfmWut0v4lQ+IIYrl9eVJWRvN2t821fu15OIwU6J9TRzql7I+jtB+IFta2z6bf6/5vz/vfl/8AHant/iRZ2ObOzv5PJ83dAzPt2/7NfPA8aX9vavDYXKjzt37xvm+b/ZrO1b4ieJNF0+K5h1iSRd/meTMm35vuttasPqs3H3RfXnUlpI+jdS+KVtqEySJNveFvk/u/8CoX4kaba3CTWfy+c6q21/vbv4mr500/4mQ6q6PDeMjqm64Vfl3VteH9evLr7keXhRl85flX73ys1ZRw/vHVHE+0peZ9IaT460pZP9JuWhmWVWabdu/4DXf+GfiBp6/Z3R2YyM32eRXX7v8Aer5f0/xU8McRmdcM22VYV/i/vVreD/HF5HYyLbP5Kea3kbf+edTOi1PmpnHWrQ5PhPRv2xv2J/gb+3F4fhtprlfDnxCVW/snxdGi+VI38MNz/eVv738Nfk38afg38Uf2efiJefCX42eFZNI1rT7hkRd3yXi/wzQt/wAtI2/vV+qdr8UvtVpbw/bJPI2/e2bW/wB2q37Snwx+HP7ZHwVk+G3xIto/7b02Bn8F+JpF/wBJ0+Zf+WbSfeaNv7rV9nkub1eT6vivlI8lVquGnzUvh/lPyUjhfaqbMn+D56u2qoqNC77lX7yt/D/s1qeNPh74n+GvjC/8DeMLNbe/sbpom/hWRf4ZF/3qoWqhV+f5gv3/AOKvYqPl0HKpGt7yLCtNJCqedt+dWX/apbXfNI01zDsVZdibqW3/AHKu8NywMn+xu21b8iGO3EKJvDfNub+GnGVonHKJm3kkMMZaFG/3W+9WPfW+6KV4fk2/wsv3q32hdWHmCPym+batULrT08kIhbDS/Oq/w1pzcsomfvnMzWIhX+JWb5vLVPlaqf2N5I/O8na3zfLu+7XSXFmJI/kTDL/D/eqrdaW8cf3G2sn+srb2nv3MPZx5TK0uzuZFH2aRleT+L+Ja7LwvYzW8yQ7923/nolY2n6Y+4bJtn8MUiptb/erpdHsZI2VJnbdJt+b/AHa7sPyyPNrylG521jCnyl5vNVUX/V/NXT2rfZ7z/XL8qfIyp/DWT4fhhmhRLaaHaz7kjjT+L+Kt61t/9HVJX+Vm+TcnzV6NOnCR5dat2LVrawybrq5m3Iqbljb71XraJRp4iRNo2EADjFV/nWVU8vDMm1mZPl/4D/tVctolhgEcW7Azt39evev3DwSio5vj7f8AQLU/9Kge1wxO+Mrf9e5fmiG38mb/AFL7Bt2qvzNt/wB2rLbFtWS2i+Tdtbc/zf7O6oWjeSRVSFtq/LuVquNYpHMv77en/PNvlr8g5mfHRqGdqESXAXZcyD91t2/wrWTfRzXUZ3uxTyt27+FttdDdQzX37mEsWj+/5ifw1j3yzeWqI8eyNNr7qs0jWl0PPfFy+XGUuX2M0X3q8r8UWvmW3nWbq0avuRmdvvV6x4wkSaFJkhX5mb7vzLu/vV5n4kjfa6Xnkl/vfu/lVWqoxlylxxB57fLLJJ8j7lb/AMdaq91JtjV3hw6/eZa1tQaG2ZvkXdu+fbWYypMzok+5W/i/u11fD7oqlafIY01w8kbd/mXYzJ93bWdfRvH++SH5meugutH8yRUcNhk27ao3WjuqMiRt8rbd33lWq5uX3Tikc7Mz+Z5OzP8AEtLC25jsTeG/h/2q0brR3WYL8zfLtqKPTdrNDs+9/EtTUj2IiuUfp8m2ZXmfG3+8v3q2rG6SGRJv4Nn3l/h/3qy/LRfkdGZl/hq3b/KrJHu/6Zba56kTppyN6zkfbvfaV27kjrStbrzI/n3ASLtrB02V7XbHNMy/wvurVt9nll3dv4di15eI5vhPUoy9z3TZsdWuYZEcJ91PvL/DXQaXN9ujWaYMwV1bb/7NXKwxzSLvR/mj+5tevUPgh8P7rxVqBd4WI37POZdrVz4en7SdmaVK0cNSlKR0vw71T/hD9c03xPeOyPb3Su7btystfuP+y94p8Pftq/sFX/ws0q/3a94biWXTvMX978q+ZDIv+98y1+O/xi+G+meFPCqabC6veNF80avu2rtrf/4Jtf8ABSXxV+xj8aNO1LXLy4m0yGX7Lf2twzN9qsWb95/wJfvL/u17/wBXpKjyo+UhjKs8Z7W56D+0do+sWsl5Z3NnJDfWtxtlj3/NHJ/Fup3wj/tWbwvc20yed+63xNv+Zdq/NXvP/BULQ/h/rnxc0n4/fCrVrS78LfELRo72C5g/1Zm2/Mvy/wAVeLfD3R0ghm037fsga3byF+6u3b91v71fk2d4L6pinBfCfuuS5j9fwEKq+L7R8V/tKfEy2s/Gl/C7srWtw0XlyJ825fvV89+IPHX26aV0ud7M7Nu316X+3pZ3/hn4jX9qjttml3Juf7zV82x6k/nb3dldf4f71e5luDpyoRkfOZ3mVWFeVI321Ca6mZ33YX/x6uY8U3U8OpLMi/w/I1aWm34mZUm+f/Zql4xXf5fOF+7ur0KEPZ1/ePjsRUc46SK9rr2p3GyFPm2/M9dt4Y1p5nSG5v8Ayv8AelrlfDekw+XvT7/8H+1XfeHdL8K68YrfWLNYivy+cvystd0vZSi00PC06svikd74T1DwlJCs154thVY0X5Wf5m/2a+hfgr8Ifhp8SvBtzrdt4wt5b2OJlt4Y/mbd/tV8leIv2edN8QN9p8E6rIVVdzR/aK1fhL+zr+1FHqyp4GuJi0m7b5dxt3bfm+7Xm1sLKXvUqp9HhYx5eSVJ/wCI+n/2bfgTrej/ABqS80fUoRcWsv8ArGn27v8AZVa988Bz6tp3xzjmjvSl5HqlzunzuO/EgLe/evizwX8J/wBtjUtQS80SHUIrlpWiaaOXazSbvurX1p8KZPiJ4d8WaSNPsBf+JrYiOSCVgPNuAhWTJ+u41+w+EWHqU8k4mlJ3vg5/+kVD6TK8PRhhq0YJpuL39GfYVn+0vNa6f/ZWvPI80Nvt27ttfAf/AAXU8eeG/GXw08Ia14e1JRPZ62Fe3+XzG3feavYf2vta8eeF/hTeeLbm5sbPUoYvPnWO48xo2/u7q/Kjx78RPHfxm16O48Z6xJfeXLuih3MyLX865Rhq8sYq0pe5E+TxUvY/up/Ez6G/4J8/Fi58P+Orezmv1hSSXdLI3zeZ/s/71frl8NfF02oaHbagnnPaTW+7bJ8zV+Mn7LPhXVdP8aWEypnbcK21l+7X61fAW+k/4Re1d3kaOGBV8lv4mrgzWUfrH7r7R9lw/UlChzTMf/goN8RLPwr8DW0HTZv9N1q6js3kkf8A1MLfM21f738NfH2gtCot9jrn/noy/dWvSf27Pie/jv4vR+GNKdW03Q7f/Slb70lwzfLt/wB1a850GB1ZHh8v5X3bpK/SOHcH9Xy+PN9o+RznGfWcwm4nV6eEaMJC+/yfm8zZ95a6DR7Uz4mdGRF5RVf5mrH0PHyB5oy//LXb91v9mum0eGGGRXSGP5n+Zq+kjH3Tzo4rl92RrabHNbtv+zM8jLsRW/iroLKFlUJs/dsny/xbWrN0+18zbc/vN2zcrL91a6Ox0iFY1WF/lb5nZf71aU6f2jf61L7JHDb/AGO8V/JUiRdqSM+3b/wGtCO1ea3MqJhvlR2jdV/8dp0OnpcNI9z9+OX5a1bXTxCqIlnt27dm1KvllzEVMRKMTKk0m8imST5R5bfP/u1X/sXd8k0EkXlvuRli3LJXWRaWjTL/AKNl2X7yp/49VyPQ3+SH7TuSP7m7+KumnT9w46mKnLY4HUvDu6Sa5tofJ8x9m3b/AOPVz+reGZt0kzx4ddy/N/er1680CGaNIXhYuv8AyzZ/masq48Lu0m+5dd+9m+7t21fs2cFatKR4nqHg94V87ydiyN+9kjXbuauc8SaONLulcSu3m54cYxj0/Ovcb7w+8W/f/E/ysyfw15d8YbD7Ff2mVILCTORjOCtfpHhUpLjnCX7VP/Tcz0+E534jpL/F/wCks8w1zw2t7dvOu5CRuKg431xXirwWHjkhjeOL+Lds3V7n/wAIzHd6Zb3UsDfPbj5x9K53WfB73CtCkMcoj3Oqtu/76r4/O4RlnWJX/Tyf/pTPn8fKX1+q/wC9L82fOfiDwTM1rsmdWeOVl/dxfMy/w7q5q48CvaM0/wBjYtsVv7q19Bax4NmMn7mKSbd/07/N8tc/qnw7trqQLNYNGVbft3turxJR5TllL+U8r03wrNHcbFhZ3Xarrt+7XV+GfDdreTLLc7Yk+b5Wrr7HwXGsfzpJv+9t27d1XdP8L20EZ2QMx+bZt/irl5eU1jIzLTw+kcafuVYM3yMq1vab4PhmukuUjVH3bHmki/hrU0nRZrfZ9mDFWRdkbfdWu00fw+7bYUeOWNfmeRfvbqn/ABEylE5O18OusabLb70v3v8A0Guk0Pwak2zfD80bbpY5E+9/wKur0/wyiuiXUMexfmSFf/Qq6PR/CMMiP5KfeRWdmp+zM5S5TkLXwTttB5z7vm3fu/4v9mtO38Jwxsv+h+bt+/JXaadoLtmZEX5fu/w1Ym0naqOnmbI02bV+61EuX4Qj/eOQtvB8MCvss4ZJvN3bvu1pW+jwyM3yKsUafdb5a3bjR5rhd7pnc3/j1OuLNJF2TIpb+FV/u1zy973jeMjmptHtpoXS2dS8ifxJXO6x4fhWx+SHb97zV/vV38lv5M37lF2/dVtn/oVYviKzSeQo6fdTajL83zNUcv2jXmPkbWNcRY2htod235E+b7q/3qZb3FmzJ8kfzfM0n96sNdUfcHmffIq7f3fy1PbzOvzo8iD70Ue3+GvPjH3veP1SXJE3ZpnhUPDe4Rl3bdv/AI7UV1HDuMMyZT7+5f4v9mqXnSNMNibQy7fmb+KmyTXLXSu7L+7/AL1KXuyFUlSUSWSOzyyfMjqv3WSuQ8UW7tHI95MyvJ8ybn+6v92uq1C5TzBC9zlV++y/ermvGF0kkMjo6uioyJuX5qiXunj4qVP3kedeIfmUOhyivt3bKyZLjcySbNp37dy/w1d8QTTeTvR8u331Ws6JoWVt7tv21ly/ZPMqSNKxa1kbzo/3rKnzLW5b3U0NuvkouyT/AL6WsfRtk0iIk25v4/krftbP5ofnZ/8AgFT7vwi5fd5izbrCsWya2bZtVvlqaSN7eIb4WQ/7vzU/R4YbfLumx1fc/wA3ytV64bbH/o0Pz7N25v4a05eWRXvS1MxpPM+eaaMhvuMyfdqlqUcKsiLNvT+Nv4a1Ly4hhRpkh3n+792szUWRkSVE2Mqbty1MYilyr7R7v4Kbd+zc7qSc6HenPr/ra+QNajmaNnR2+VtytX2B4Jbf+zgzJnnRL3H/AJFr5G1yRJLffIm3b975K/dvFLTJMh/7BY/+kwPU4jd8NhP8C/JHF6lNcjKO6kVj3Fx8xfYrCtfXNizP5P8AFWK1vt3vHu3L822vx+PvRPialQgkm8wLs+YNUdv+8k2D7392pfsb/wAH/AttTWdkI2MvzBl+b5lrWPUx5ZSkS2av52+ZOa2rOTdJtmTnZ/eqjb2Mqzecj8SfwtWrb2MLSfJ13f6ys5S5jSNORbtU8zZ2C7ldatQ/KyvLuyq7aZa2v2dVD7Xdm+etKOGFWWZ/MO7cvyr8tZylE6KdGew2G3kkb+//ALK1dhhaFV2W2TJt+VakjtZvs4e02iT5V+797+9V23sX3f6SF+ZNq7XrCVaB6MMORrbxrDv+bLN/ElEcm24Lvu/d/LuZank098/cY7t2756dHDDJbp97HyrurGVS5vGnKUhVH2mNfOfJ/wB6rNuu350+fy/4aS3tUVdkMf8AF8n+zWrY6R9nw/lq25fnrhlU5TvjRkyDTw7Lv+X7v3Wf71adizyTNv8A4k+8v8NMgs0VUdCwRvl/3q0rG33R7Jpv9rav8VZ+2Y5Yf4S7Yrtk3wjcjfJKq/e/3q0VZ4XXykklRfv/ACbW3VDptvtjVERQ3/j1a8NvIYV2TL8zs1Y7fZKVGUh2mqhU+c+d25nVv/Qatxvu27E2jYzfL/DTvsiR26uEy2+rCwolv5MM21lRtn+01c1SXMdNOjOPumVeWcLfI+4q38Uf8NZ+pWMdw2yH7m/bub+KupksZpFXYjFZFVfl+b+Gsy4jubiFZvO+bf8A98stR7TljzHTGicfrFq+Hd0+bdt2qvzbawtT0l4/4F+ZPvSV297ZpIvkuiu2/wCdt9Y+oWe5gjpwv8Vaxre6VKnJe9E4G80d4ZGmuU3hk3bqgm0fnznhVX27ol/vV1Wpaf5bKm9XDf8APP5t1VGsEWF4URQZF3bf4q3lWvuOOH9w5CTS3lja53+UW/iZfu1LZ6TN5hR33/7tb8mjoyrJ5P3fmVWpfsdssPzowlVfnVaca3vBKjH3bFW1t0hUJDym7a+2pYbzduREV9u7azLU726R4SFF+b5nkb+L/ZqvJJDHMrujIrfKv8VRF80uaUTmxXu7Fy286b/RvOVfm3eWq/eatXTV81R59ttbd8zM/wA1Ztq3lsJkfJbc23Z92r9jN5n75ZtjNL8jMn/oVFSUpXSOOKtys6HTVkgUpD8qK237/wAta+nxvbLvttvzSrv8usvSYUmJS5h81pPlRo/l2/8AAa19PjSSZf8Ax7+Hb/vVxSly6ndGP2TXsFFvCj/u2C7tyt97/eWtDTW2lHmdiZPvbqzre13N5MKZ3f8AjtaVsvmMz3Lsrt8v7uspcvNzo6I80tEadva2fmec7btrbtu/5v8Ad/3av6L/AKP/AKNNO2/733NqstZMdvdeZ+5mVkmT5vm/eVraQqW3l/aXaJf+mjbtrVtGQ4y97lOw0u3hbbMm1fL+bb/drrdHheaOJ4XaIsu3/e/2qwPDckMcfl3Pls3y7fk+auq0nTYYbtLxLmRHVNu3buVt1d9H3TnqS5tTQs99vIg370b5Nrf+hVakV5RG8O7asW1YasafDDMywpCzN97d/e/2auw2b+W6Rrt213U4++Tzcu8jLsbOZ4/PTl2+6y1tSW8lvGr+cwdXVmZaktdFeDdbIkmWdf8AdWrTae8Nsz2zqw+83+1XVGPNMmVSPKFrNcwsj71y3zeWybmZakkZAqzb5GRUZv3n3dzVUuIbmxZ5ryGbfsXypFf5V/3lql4o8RWfg/Q31vU900K/JFaw/ekk/hXbSlLl94uMYxXvSKN9ND4g8TWPgy2RW+1Sr9tWN/3kcP8AEy17LD4q0Hw3odnYeGN0Z0+38pYdnyqqttryn4NxwtY3HjbUEmttavpWWW3mXb5duv3VX+7WX488dTW+oTCGZvObd5XlvtX73zV8Tm9aeOr8kdkeVPEc2sT0Hxt8cv7J26t9v8t/ueS3zK3+1Xm+tfFt5NQe2eZndpWZFhf723+KvKfEnjy/1y8ubm83JB96JZH21x3iDx9c2cLecP3sf3FVv4q5sPg5ROOVSXKetap8WrmwjSbWNS81Ld28qbY3mR7v7tYGvfFpNWt3mg1KaNJPmlhZvnavHZvipNdXT2015v8AM271b/0GsDVtehuJvkmZBub95u+Za9ajTlH3eWyOGpKB1WoeLn1bUvtKQyJLI0kXzP8A6v8Au1SbxNqumxqIUV5lXO6T+KuM1bxdCyuiQ/NHt+b+KRqqt4yubrY/nKzfdaP/ANlpyw/wyjua06nKdlF423OZriZWeSVju2fMv+zWxoPxceGNLB/JtkZVR/L+b+L/AGq8wm1ZJI/Jtkx5n3938LULLc2Z/fbW+X5GWuHEUL/EdsMRVhrzH0F4Z+I0M0Ys7O5mfbKzSrN/D/dZf7y11LeItS1JY7DUkjuUX/VNH8u3/wCxr500HXNSWSHvJ/e+8zNXq/g/XtVvL3fczfIvzKrP92vCxlNUavMfSZfWnWja51FnHeWeobEhYbn3Iv8AC3+81d/b2qaa0NtD9omtJkX95M3977yr/wACrN8O2dhqVuk2n2bJtVftDTfxN/ervrPww8mm/aUh8xY9q+XGm7y/9qvP5qUo27nuU8PV+yOXS0t3RLKbymXaqL/E1dXpscOmwpHbTxod3/LZflVdtZVvp5tdQ+021szRsm1ZPvfd/wBmtm38P2euaa/9pSyPtf51VtrLtopx5fcOfGe1ia2jww31nsuUwu9WuGVNq7f9mr9vpWpNfO9nNhPmaKNvvrU3hfR4brVrbR97XT3FrviVV3Mqr95Wr0K18B6bcLbm2muCI0ZXXbtXc396u+GDnU9654NbGSp/4j4n/bw+EPiHxJoo8T21tHcX2kp5vmNF89xH/wA893+z96vkCKJIZGhhDMVav12+JnwbTxJpFxol+i3iSRbdqxfvIY6/LH46fCPVfgP8dNR+HWpQyRWd5K15oi/3o2+Zl3V7GBxU6l6M1sRRxHs5afaMmGZFUPM/yr97bV1fs0cgR93975azbNkNyiIn97za0IZHlUfuVlb7vlsu1q7Obm909OMftDprPzmEP7tDHt+Vfvbf71Rf2fcw7d9mx+9tZfut/tVpL9j87yY03Lt+9tqOGPEx852+aJv4N22lGOpqYt1pO6aZN/zfxstQSaakil3mYsybYlb7rba17iOZZo3ebc3/AC13fLuprIn2cedCyuv8TJ93/gNdTjzanHzRjzGJZ2LzTYmhZFX+Gt3RrG5hvPOublWSP5ait1+0QOkM2/zG+eRmrV0m3uY5khhgkcL8v3PlavVwsZHjYqUY7nZeG1hWGNIdqM0XzeX95q6CxVGj+07G+Vlbc396uW0uORYUmR2V1l+9s210Me+aBkaZV/v/AD7d3+1XdGieLVle5pWq225538tZd2/bJ/FuqxHCsECxEkAIM4OSOKpQttkijmh3N5X3o/7v95qvp5YYFHyuchge3rX7d4JyvnWPX/ULU/8ASoHu8Ju+Lrf9e5fmiWOSGFv3cPmfN95X27W21ZdXMbTfaI0fyvut8zbv96q1vC8Kyu8zfvF3bl209FS3jLukkg2fMrfeWvx2NT3uU+R5BGuXbT0tZpvlX5t2/wDirn9WvJriF9kKtufanz/+hVp6hvaNXh+7s+aNfvNWNqkkMymF0w7fNuV605mTy++cP4uXZl5nZdv/AHytea+KI0ZZH353bVr0vXkebzUdMqyfvdzfNXCavpv2mQ21zDs8v5dv8X+zV85pGPKefXGlvcSOkKMzs+2n2eivHGd9ts28fKn8VdZHoLzXDpbfK0fy7m+XdVuLwwi2/l2yMz/xtW0ZQH7PmOQk0FG+583mfxVRk0RNmxE3/P8A6vZ83+9XosPhU3kaeZujZU+6qU5vC6KrDyNyt8q7lq+b7MROjOR5Y3h0LcSh4cTKnz+Z/dqhdaLNGo2PtbZ/dr1ibwTMsnkyp8jf3k2/8CrO1Lwn57ND9jZVj+X7n3qFKcTKVLl+JnmMGlvIpjc5f+Nmp8Gm7W37M7V2qq12eoeC0hzNbJlf/Hqzl09LdVR4Wdlf5NtTUpy5JWLp+6YwtfLX7jMZE+VZPurVmPeW8tOWj/i/hauq0P4X+IfE1wf7Htmd1TcsarXMXGk3mk3Ettfow2y7WXY25fm+auCVGcjeOKhT6nU/Dnw3c+JNaTSk6zOuyNf4q+pPhb4Zt/AVwlzebfssa/6VJv3KrL81eG/AfxF8N9H8caakOqxm5mlVU8xNu1v4t1e3ftZeLLDwv8N7/R/D2pbb68t2g8tZf9XuX/WV04fD+z96XxHmY3GVakuX7J4l8Xv2pFvviJfHStYjmhjuGT5m+9XCeI/G3/CZM2t200cVzG25I1+ZdqrXzzr1rdeH9Sf7TqvmNJu37X3V1PgvUr+O3+02E2/au371dP2ji5ZH1r8Cf24fFvh34er8AfHN5HPoUN19q0O4un3Np8jfejXd/DXvfgH4sJeXFo7uzxsisy/w/wC9X5oeINceRn3p8y17B+zv+0LHMraPqV5JHcwr87SS/K22vkOJsr+sRVWB+gcIZzDCz+rT+0d3/wAFJNDs9U8cHX7BJClxFu+58qttr46utJvIZC7/APfVfVvx4+JWm+PtJgvL+5meS3+Vmb5ty/wrXjLafpWpbkS5jG7721a87KKkqeGUJI789w8a2Jcos86jjmhbf/49Ud8v9oTrbRorbfvbv4q63WfCP2GRXhT727+Cqmn+FftTb3/75r1qctj5lU5RdpRGaTZ+XboiQqp/g/2aZezzW7P5M2GWunh0V7e3VHTdIvy7mqva+DY7++VJNyn+9975qrmhz8x2+zvDlicnY+MPEmjzYttVuE/6Zxv96vUPhT+1V8SPBeoQ3lhK22P5Uk3bW2/xVT0X4K2Gvaglm9ysKr97zH+9/wACr66/Zl/4Js/Cvxd9gufE2tyOlwu91jTcse7+KuLFVMM/dmehgaOb05Xpv3SP9m39tq2v/E1tYeJNKmBaVmi2y/xf7Ne4fC3WI7n41Wmuxnylmv55l77QyuQP1xXpvh3/AIJV+Bvh/pMV/wCGNQW4SFfNWaa1Vm+avNvh74eZPjrD4aT5TDqtxCMDpsEg/pX614QKP9h8T8r0+pT/APSKh9vldTFVaUvbb2MT/gpl4jh0H9nbX9Re0Yrcr5cG77rN/s1+cfwN8Dtq1y07pv3bXZq/Yn9rj9mf/heHwXvfCX2b52i3W+5/vN975a+Avhn+zT4t+H91qFh4hs5oXt/lTd8zN81fzvg8bSw+AnTUvePl82w1SOOjKfwnT/AXwGYdct4fJjzG6s8nzbV/4FX1t46+Nln8IPha+qo8f2zylitVhb7szLtVtv8Adrx/4T+Gr3w/p/8Ab2sQ+THH+8lmb+FV/wDQq84+I3xGvvid4qmv7mRls4W8qwhjfb5i/wB5lroynAf2jX55fZMquYSw+F5IblGHUNSvtUudS1u/a7ubxmlnmb7zTN95q6PQbo/aFR7zc+3ci7Pu1z+k6fDDJvR5MK+75vm210uixw7ldPkWNfu7PmZq/UKPJCHLE+Xmm3zHY6XDDIpd4VCN8z/L8zN/s11+j2tz5yTbN0apufcv3v8AZrlNHU3Cqk1yzhfuRsn3f9qu20eK5VsTXKkKm5Pk+XdXfT96BjzS+0dT4Zt0Zt7usUTfMiyN8tdNpdnbX0KYRdituSRmrA8MqjPsSbLx/O2371dnpMc1xG29I03bdm3726t/hD20uhJDo7wxiF3jy3+1/DWnZ6clv89s8MkU0X3VX7tT2lm8zR3PkrEi/NtZPm3VdhhWFvOm8sKvzfLW5UqnYq2FmkbJ+5b5fvt/erYt9JtrjZMltCzfwrv+b/ep1nY/6R86fKzruVa1bPTXkZUd1AX7jfxbaIyOeUio2k20iuiQrGV/1W75qy7zw6jM7zc/w128empfWuIYdyq+1WqGbRYG8y5e2V/3u1JK6KfunBKoeX6x4bh2nzE81FX5V/irw/8AaR0/7Bf6SOf3kMrfP97qnWvqjWtBTa/7tf8AZVflr5z/AGxbOSz1HQRKSSYLjr7GOv0nwsSfG2FflU/9NyPa4Nk3xJRv2l/6Syp4f0Z7jwtp0iMyq9lDuCr975RWZqnhm8aZvN3INrOkka7VX/eWvR/BmjS3Pw20R44iC+nw7WK8fcFLqHhm5aU7I1dfu/7W6vj87Vs5xNv+fk//AEpni5hP/b6v+KX5s8b1bwukO7YjY2fJ/tVk3fgpGuE2JG/lpv8AMj/i/wD2a9i1HwqjL5IfY7fN92sa68LzQ7njRWCvt+X5q8OtGPKckZHmH/CJmNjvRWVvmWSo/wDhHoVt2uYYV2N8qN/dr0m50O2jWXZDsH3n/wBpqz28NyW+H8mMLI3+8qtXHKPMbxjy/CctpOhPHC7okZ8yL7sjbfmrstD8P7Fi2QqRJ/d/vU3S9DT7ZvmhZx8reX/tV12i6ci3CNNNs2/8s/4azkOT5iLSfD3lqrvbK4X/AJZqv3q6G08Mu3kuifN8rMu//wAdq7otrDJG6I+4fM/lr/yzaug0nSXupkfCpF8rNG3+1REzlzSMmHw26qERGJb+6ny0smhfZ48ojF/mXayf+PV19npm2NPJ5WP7jf3aJrNG3/aZPlbo395qmp/MEZfzHGyad5cex4ZG3Lt+b5m3f/E1n3Vr9lby3kUqybfL2/xV1uoafDHbj528xm2pH/C3/Aqy7rSoWvHh8lXPzL5f+1/s1hL3ionI3Fnsj2TbYtzbt2z7v+1WTqFukaskNgzuq7vMrurzw/MyxPCiqv3XXf8AMtZF5paR3iwzJvjb5nZW+6tTKoan5uQzPMu+H5X3/ulq1as8f7533bf+WLM25qwbG8vLVndP3X8UW371XrGd75ldyodfl3Vyy/mP1CWIjKldmurX7BPLTa/8KyN8rVJ9tigRtm4v/Eu/dUFmr/aPtME25mXZub5ttSSQJbun7ltzfKkmzbWceQ8+tiOaPuhcfvlM3ylV++2+uS8VXzq0lzDJt2/KitW9qV19lkOzy5EXlpF/vf7Vcf4gm85me56MrbmX+Gs5SPN5pc3McnqRupvuIu1v71Jptm/2pYLlG+/t/wBqpVt5pNk2/cPl+b+7W7o+mw7t72yuW/hb+H/gVc0pco6cfae8SabpdtDIltbQsVX5vm/iroLHT5FkXenDfMm7+9RpOlzRys/zY2fPt+9W9Db20i/ZpE2rIv3v4lpQ5feHyGa0fkqyfZt27+JUqX7P9ojVHtmfb8vyvt/76qb51kWL5dvm/eb73+ytQ3C3NxHM7wq3lt/q9u6r9n7uge0k5amZeW8KsOwb5mWsbUP3iy7NzN/AtbV15jRnzkkt1jT5I2i+7VC8kgjtGuUTft+X5WqqXNH3TOUYy8j27wVgfs1vg5xod70/7a18iaxE7Qsjuyov8VfXXgtz/wAM0ySSNk/2FfEn/v7XyJeM8xfZyNu7a33a/dPFBWybIf8AsFj/AOkwPW4jlGOFwl/5F+SOM1aF2b/XMG+9838VUfsczMkmznZ8+2trULcfaG3vgf3W+7Vfb919+Nv8Vfj0tj4upGUinDa7T88P3quLB5n7nr/fp0dn50y8bm3fPtrRsUTaZk+6r7W+So5kdVGiV4bXavyJurVsbG52xO6KgVtyMv8Ae/2qmgtoWcIm3ZH99lrSjsXZU2TNj5d9c9SpynVHBy7Edtpbr84RSd+5mq5b6fatI33sbPl3Vp2dm8sw8mP/AHG3/eqwumgR73fY27+KuaVTmjrI6qeFlzRK9lb/ALxSn/LNP87ql+ywvNG77nP3n8t/lX/ZqRbOZpNmxU2/xL/FVy3sHkbenlqv97+9XHKpKPwnpqhHm+EhjsYWO/8Aebo/vrJU0Nqkcmzydrt95Wq/BCYVh2bnP3WbZu3VZjt4WWHzn+Wb+JvvNtauOVaf2jtp4VfFIpw2MKws6Q/8B/8AiquQWNyzfvk+ZV3bV/u1fjtdsmx/4fmRauWOnzTEu9ssK7fvLUyre6dEcP7xRt7dEyny4j+/u/hq5Dp/lTJ5M3muvzOqrVhrGGObyZvmfYzJu+78taGmKnkyzOipIrbIt33qz9tLluFTD82hLp9jbLIqI+5mi+dtvzLV/TdNdpGhmRdrfLtb+6v/ALNRa2Lxqmx42+RV/wBpm/vVrWcbyqsKQxqy7vNZvvVnUrcsviD6tIS1s442WHyMhn+TdU66fNDI81zCyLJuaJamt7Xy1HnI3/Af4asMzrGn3nZV2rJWEqnve6bex5YxKE0ZVo5rZG3bdysr/eWqEweFghhVPvbtyVsXTQCBYYH2CP8Aur/31VbVLN47dZn3bfvLTjLmL5Z8hy19DNHM6Q+XvX5vlX+Gsa8he4kL3MPnNH8y7k+Vv92uu1bTYbpXmSTduX5GX5a5/ULNrht77R/dXdWnukcsonNXFukjH7NCqNJ/47UFxGjbEmRsxuyfd+9W3dWsMMs32naNsqtuj/u0xrO88v54967922tvae4ZS7GHHZzNI/3Y0X+H+98tMWN2jdI/mZU2u1bMdrC0L3PnK33t0a/dWq8cSKrbIVUN95mojH7RnKp9kxLqPzV+Taj/AHdtVGhvJFZHTYy/Kv8AtVs3lq8B+5/tfcqs1uklykw+ZmTbuVPu10xlb7J5lSXNLlZBa28zSeZcopP8TfdVlra0ezea437FRVRm+/8Aw1Bo8KSN5ezG3+HZu21sadYu0w3tuVUb7v8AF/tVlUlyy0RFOPM9ZFrSY5Jp97w4P3d1dHY2cLbnmeNkkX5W+b5mqjbx20eN6M8i/Oqr81bWmx7meHfGF3bkXZXHzc0eaR6FKNqliSPT38tEtvk3bd+75dq/xVfhs5o42hhdlRfu7vvM1WoY4fL/AHPzOyfNGybvMq7Hp800nmeTlflX94n3ax9pKR2RjDm0KlrazSzLcvw+z7rfw1v6TYpcKHdM/Nu3bKit9PSaNbZ/Lfd/31W3pWkvcKh85l3S/wCrb+GtY+9ykShKL3NrSbeGRvOTpD8ny/3m/vV2+j2u6FA+4lfl+auY0uwS3/fbGab5vlX/ANCWur8OQ+XGkLvIUbbvb+L5q9bCx5oHnVpSpyNqztb+Fm8mHafveYyfdX+7Wtp+m+ZKghSQ7ovvL92Sp9Lt9tqybNyt8v7z+KtzQ9NuLe2S2udq+ZtZ2+6u3/Zr06NOWxwyrGfZaXeeWXfbt/5aqv3auR6bFDjiSZ/mVv4du6t2z0tG3wrwiuv3v7tXofDaTTulym7dt8qNf/Qq6+X3iIVpS0OPutLSa2aG5dnWT5W/2f8AZrjtT8Mv4m8Yf2NZ6rCkdi6u9vN/E38P+7XrfirQ303w7fTb4UZV2xSSf89G+Va5ux+Htt4Ft7bVdYT7Pef6q6WR1bzm3fe8z722vEzrERw9C38xVfETl+7MTx5dQWuhszpa2lzvVEaH7zN/F/u/3q8C1q6vI9auLm/vPMfb8jM/yxr/AHa7j4uWd/Z6lqdhO8bzTS7d3m7v+Bbq83urW5vlj0ewtmeRom3eZ91W3V8pQUpbHFL3vdOe8bWN5NZpPawx7FiZ/wDZ3f3q838YTGOFpobndc7F3ei16v4uW80XS4dEvLnesbblWOL5m+X7u6uAvvC9zrgTPnR+Y+3ayfK3+9XVRqRjPyCpGUocpwEOmpJdrvRWeF/NlkZ/utVDUr+S4vlmtk4+47fxN/tV6G3w9uby1EKuquz7V3Nt+b/a/wBmsnUdD0rSY0s9Vv7X7W27zZI/+Wf+zXrxlQlI4KlOUYHAeIrKG1kifzpJkZN26Ntu2s1Y3tYpX+0tvVt21mrqtet7G1kR9/mBvmdv4dv+zXLapqENxL+4SN/7jbvmWolKEjL3ojvO1Web5H3Rybdnz/NXQaPYa9JGdh80bvl3L/DXP6XqFhK2x5sS7921vl212ek606sjpCqQt/t1wYyo6cYnXh/3kviNLQ7waVMX/drc7du2RvlWu/8AA+uTLIHudr3DbV2qn/j1clayaVqGIYZrfd99Gk/vVoaXq02m33k3OpQskjf8s/4a+eryjWlK59nldqUo8x7v4Z8TTW0Ze5+5s/er/Dt/vV7X8JfG2la5o7Wf7mUbd3mN/er5NsfEDsv7nVW2sjL8392uy+G/ji88M3CWVtfraJ5q/Nv+Vt1eNiOWn7p9vQq0oy5e59VR6DZyKtzFCspj3bfLfatVZNH0/RdWtr+GFmhkb/R/MlZmkb+Jv9pawvAPjL7ZpOy/vI0MMrLtX5d3+1Q3ipJr5YrmbeluzeV8zf8AjtTRrcs/eNa2FhWPq/wL4N8Pa1qFt4h0ezWK8ktVR5I28uONf4vlr0S3+HsMccsKWEm1n2o25f8Avqvm39nf4mWGpeKrOzubyZotvyRzfdjb/wBmr7t8HaX4f1jQEfR4hM6xL/pTNtVv+A17uBrVK0LwkfDZ7lcacuZnkV98NbbS7lLm2voXlb5Z1+823b91q+O/+Cs37Fd58RPAdx4z8AabHZ6jocX26CNkZpJFjXcyq391q+/fH3h3R/D6y3V1CsJV9zeX8qyfLXN6lqWia9pAvNes1vbab/Q/LZty/vFZdzVvLEezr899jx6WEnKN/sn89lrqE01nDc3On/Z3miXf833WrVt5HWPfDDtljVVfzPvfN/FXpv7aHwTm+C37RGr+GDprNpdx+/sLxU+ST5m3ba87t5Ps5/cJhf4d1erTqe2ipLqevT5+QmzdSMEd9w2bd23+GlkkuYWlmmtv3G3bFMr7dzU6OV2ZbZ/MV2Xcu37rUy437Ve5h+Rv/HV/2q3px98uVSJBcQpJvtpk3nbu3M/8VN2eTIHeZlXd8256hkupvtDuj/7K+Z/tUBoZrh7Z3Vnj+b/Zrrpxl8Jw1JQ+IkhW58tHhRfN+ZXXZ8tdBose7Y+9lk3f3vlX5axLVfOm2Qo27b/u10mkxosnk214odfllZl+9Xq4ePKeZiOV8rubOjyJHdfZp929UX5m+6y1tx2MEMOYYVkb7+1vmrM0/ZIqpv2bfvq33Wrdsyb6NIZkUeX/ABK+1mWuqMvtHmVI/EmETTeXE80zDc6r5i/dX/gNXo42B8qfaSGKnb0IzUK2L28m+GbDN8ryfw7aktUnVFj2nzAcAZ754r9q8E4xjnWPt/0C1P8A0qB7vC0eXFVl/wBO5fmjQtVhVpNkMYC/Kit823+9SMySTNcojN5fy7W+6y/3qVdn2hJvs23/AHk+7UTXfmKqD5n3/NGvy7f7vzV+Iy5ovQ+a5fdKd5sjs2dNrS/Ns/2f7tc1qm9nYb5MNt3+WldCsP2hmCIu7+L+81Z2oWcNqyuJtsf3XX/araPuwH7Pm+ycXq2kpFHcOiMzr/y02fM1czq1mImZ4drs3+t/vL/vV6Bq9r9lkESPv/2v96sn+xPMuGdIFUN9/an3mpxl73KafV+bY5XR/Cs1wu+bdlfmRV/5aV0mi+C0uI0+zW0kok/1u75dv+1XX+H/AA35ipC6SebC67VWL/0Ku20jwe7pFNsjI3fxL826u2jH3jT6vy6Hm9r4DhazTfZ8Kv8AF95m3UN8P3Yr/oeUjfcklexx+DUuJvntmE0cvzeWn3v92luvA9tbx74Y5JPMl+638NdHu05kypT+0eK3XhD5Wgez3Rt95l+9XP6h4TS181I7ZmXZt3f3v92vfZvA8NtZ+XNCqybm/wBrbWJr3glLXfcPDCU2f6z+7/tLS5oyOeVPljeR8/Xng/arQzW3+kfe/dt/47TPDvw1m1TVksktmmaRljiWNNzbm/hr0rWtNhvl2aDZ+cyttuLj+GNf7zVc0fxp4S+Gtrv8PW32/WfK2pfRptjt2+78taqMYx1PJxWIjT+EvQ6HoP7Pfgu+trxIz4gvlVZ4V/5d4f7rf7VfL3xO8UW11qVzc2yQ/N/Cq/xV6T8QrrxV4u1Bry/uZNm/c80jtukZvvVwGv6LolvCEmvFLf7SUvi1Z5/PKUjyXUpb+a4W8015Eljfcm1f4q1td+L3jDX7NdE168uLiVYvkkZ/4as+KNctoXYWEKsqy7VZU/8AHq4vVvEDxSbH25/2ajl9/mNI8xwXjaO/kvC81zkq38VXfhr4qubFntpvuM2395UPjRvtUjFNpXZuTbXNWtxcwtvR8Or/AHt9XH+6OXmel641s0bujq4ZdztWBY6lc6Lqi6lZvt+T96qt96oND8SPdW/2aZ/mX+Km3kKMrTfKV+7RKMKnuyHGpOlPmiem6x4g1WTw7DqTpus5tv77/arndN8Y3drdN/q9jfd+SvTP2E/Fnw38Ra5N+z98YLOGPRPFG21t9Wm/1mn3TN+7kX/Zrn/2zv2SfiR+xj8XJ/Afjl2u9OuH8/RtWhX91eQt91lavMrZVS5ZTpRPWo51V5488il/wkyX1q8KPGzN99tn/oNWPDeqJDdLZ71YbfkZl3V5xDq1zGqok2f4vmrV03WpGm85/kZf4d1eJKj7Pmcj06eKUpczPSdYntpLUI6Kq/xt/wCg0zRdRSGT7T8qbfl+WuIm8WblW1f5f/HqSHxE8MezG1t25Pm+9WVOhOULHcsbScj0/TdesLi8SaG88qZZfm/utX2r+yP8ZP7PtdP0F5lk23EbeYv3W3fw1+c3hvxJPcX3+kzL+8bdu219Q/s0+JLizuLP7HMvyyqvzPt+X+9Xl5hTnGOp9Dk+MpVpcvMftJ8NfiBpXijQX0jUpoXCxK1u2/a23+7Xxf8ADo2i/tiuZwghHifUchzgYzNitD4Q/FS5sZkea8aaBty7Y5drMv8AerkvhvqS3X7QkOrM+8S6vdSlnP3gwkOT+dfrngvVcuH+Kb9MFP8A9IqH1+HhShdxZ9V614ss5dS5eOKFX/dNv/8AHq8H+K3h+w8QeOBc2aKltJuWWRdu5l/3q7L4saf4k1LTQdBGx5vlVl/8e2159qVvqvhPwvfa/wCM7zyYrW1bbGvzM0n8LV/LVCM6uIvH7R89mFSNSrblPEP2ofilYTTxfD3w3N8lqm66kWdd3/XP5a800GG18tprxGD71+X/AHqzLrUpfEGtzarNt824Zt25f4d1bOlw21xM73k3k/dVK/ZMow9LCYeK+0fJYyd6tzY0ux+zTbJtvyt/ndXR6bBtk2Rwb/7qr/E1YunWrmPZ9s3J/GrN96uh0tUtWhRCsq7fkkV/utX0FPkkeNKR1fheOaPy98DK/lbXaaVf/Ha7LSWmhXZCWR1ZdzN8ystcZpt5bLiREkdf41bau3/drorHWLZY2RN25Zf7ny13U/g905ZShI7nw7J5l0jzQ53bvNbf8qtt+Wup0GSe4sY7l/LZ5P4m/wBmuC03WEVvJeZVDbdi/wATNXVafqkLKvkP/q/mf+61bRlzC96J3WmzvGuyHywv3dyv95a0LGRJ5D5CL5qv+9WT5q5XT9UT5N7/ADfe+atSG+S3ka8hkXYqLvbd81a/CZ80v5jp7OQxwqkN4u6SX5m+9W3p/kzRuiJICzt5Sr/EtcpY6hMzjyXhwv8Arfk+Za6PR9UhVkdJPlZ6uJjUl9k6/SfJnt0y+11T541/iWrlxGnlh4YVXa+5FX5qzNPuraKb9zMu5vm+58yrWh9oh3K+/G5d23bW8fh5jjl7plalpv7p5vlLsn3m+7XzD+3hbSWeteHLaaSNpBa3Bcx+5jNfUF1fbbiWG2mVwvytHIn3Wr5k/b4Yyax4akZFDG3ut2zp1i6V+j+FKtxthvSf/puR9Fwc4PiWjb+9/wCkSPQPhVp3274UeHxlAF0eDJPUZQVe1Hw+iw/wptTbuVPmb/aq58FoFl+EXhuRQpC6Hb/J/ebYK1brTbw/8e1qz+Y3zRs/3Vr5PPP+Rtif+vk//SmfPY+X/ClW/wAUvzZ5/faPDNJKjWzfudu7dF8rLWdqHh9LdiiWcfzffZn+Zf8AZ213MypH5ibM+X/C38VUNUsEmG+5h+dvm3M9eDW2JpyPPtS8P2ccbw2ybXX+Ksu+s/Mj8nzpPl2r5bLXX61bxvveF9rr/d/iWsK88mF22TtIFX5tvy1wyNfi2KGn6Z+7RIdqv/HI33t1dLpGn+WqO6bf9nd/6FWMtxNJGjwIy+X8v91mrpPD7Q3EexPOTd/eT5Was/fA1tH010jRPJVPMl2o3/PStiPZGFT5k/e7W3fLTrGNJLdZvJ37dqp8+1qWa38yTfN5JTb8+5vutVfY1MZS/lLcV6n2na+3Yv3o4/l3UT3CNcNDs2rv/hfctZUWpJHI0KfNM3zIrfepW1JLfd+8UH+7WVSXKSS6hHcxyNND5eGTdtkf7v8As1mX29mL2yKryMvlNu+7/ean32qQyTbEm+VotqNIi/NVaGbzmi+Rf7yK38NRL3tjaMixcWs0zeSkm/8AvySfxVWbw+kkbvO+Ts+793dW1ptvNJGj3iKob5nZXqe60vzFd403Bvu7n+7WMom0D8drW+S4ZH8/59u5Fatax8mHDpyzfNurnbJXjkjE0Ma7fl/3f9qt7T3dl3o+N3zJ/vV5vtD7ipiJS5jUhuHsbpN+1lkf/d+b+7Us0011H50d/Hv+b93I1VWvJlZvJ4lj2l/97+9Ve8uvLX7T8pdn3I393+9VVKhzxpuRnatePskCTM7/AGj5lb+H/ZrntQmdg/zso/3PlWtrUmmmn3xztmbd/H8tZjQ/K6Q/MWTd+8+6zVh7bmj7xcaJQs7FJPmRP9lG/vV0Ghw+XdIk21WX+Gqlnbw2bMk23d97/darenyWzKXfcPm3Iy/w1jzIqPLHludDZW/kRmZH+Vk/esv3ttT27QsVudjK8f8ADt21W0ybzIzM/wAu5du5qka6SPCPzuXbub+9/s10RiTUlCIqzTRtve5XZI+1Vki+ZaikaaRNifKzLtdm/u02adIV87Yvy7WZd/8A47Ucl0jqEmhVkm+ZF/i21pH3vdOaUipqGy4j/ffP8m1Jo3rC1aSKzhaG2hX7nz/7LVp310iyNCkPyKvy7X+9XN6ndbn2PtZm+bbt+9WtOMI7EVJH0L4Dcn9mNnI/5gV9x/39r5EvLjhXmdkO75P71fW/w+AX9lxtpJ/4kV/jPbmbivjnVNSdpNj7WOz+H+Kv2zxRV8lyH/sFj/6TA9biRp4XBp/8+1+SM3Uo08x7jfn5/mqsq7ZFdE3JRMzySNC+7C/9805WhXYnk/8AAq/GD5unH7Mi7DZbtjoiov3mX+9V2wt4X+T94u5Nvy/w1Ss50WZe277q1raX50LeZsYrv/4FWVSR34enYv2cKWsfko652/w1r2Nu7bHmiVdz7dqr/wCPVTswn+p6H/dre021mST9yjH5d25q86tU5feZ7NKhzbGjaWO0LsRv+ArVmbTYfLV4U+Rm2/3tv+1T9Ftdmdk33X+dW+9Wu9jI0QSbazb9rs38Vccqx6csLDksYElj5cy+S7bmXbTrO1MczIn3vvPGyfe/2q3bizSOZX+/5fypueq0luisu+H73zfKtctSp7xEKMea5Ts12yJNCjbmXbu/hrStbHdMzw2zN86tuamR6bsXZNc7Yt22Jl/iq9HJDHvhhTajbfKVm+7WMqnN7p6NGjHdkUlq8VwvnO3D/wAP8VX7VZlX9zMrBfvr/dqBbWGT5JnZkV/4f71TxK/mHy5t3ybfu7aUqn2TdU/7xMq+QPueYjfd/i/3qv2ckMdwmxF+6v3U+9WfHdv8v2mZdv3UVa0tPnRmTYjfK25mX+Gp5kX7GPN8Jq2tvCrDyIZLhf8A0GtCxdEmTyYWbb/sVSsxC7bIZd/z7t0f8Na8LfZ4U8652/7qfNtrOPvBKjMkjmmb3PmtvVk2qv8Ad21JJ5jhEtnXaq7XX/2aljjQS796h1f/ANlpJpIYZG8l2Z12/e+Wn/DMPZ8xJNJ5Mi3Lwxt8ir5a/wCs3f7VVNT/ANcE+x7mkXcy/wAK1LcT+XumttpZl+dmXczVVZXkb7+1f4/MXdR7xZQkS5+yqgRYXVv3qqu75f4axtR03zCfJdUZXbezL/rF/wBmukktZnkVPlVdm7d97d/dqv8AZbwTTP8Ad3fP8zf+g1vT97Y5JS5fdOSns4VVoY/MKq+7a3zUyb7TaxbJnUpJ97+L/vmuhvtL3R/Oi7JPm3b/AL1Zk2kpGvyJ/Fubb/EtVGMFscdapIxHs7lIV2Ivzbt6rVO5tfLZkhTc33n8z+Fv9mt2Sze4ZkhdURX3/wC1/tVV+zzTXCwpD/eX5l/8eraPxnFUlKUdDJWHazI80bf3m/iVqj+zwzbZ4XV5P4f9qtKS1n3Sp5MZ2vt+b71VdxRtk0Ko0fzp8lbQ5ubmZySkQ6XZzWZfyY1USRf6tvuq1blnst3CD5/kVHkX5qzrG3tpJHmRFRfvNWhptukczfO3m/df+6y1jiI8xth/eNuFVt5Gmk2lmRVVdm3dWrp8iLIqbI938G7+Ksm1ieR/Jm+VPuxf3l/2q2obPzlXfyisrPu+VVbb/DXBLm5fhO6n8Rr2az3qpsTLSPh/l+XdW1DC6tvfj/2Zf4qy7PfGhcvgr9zb93dWxoMkLbo/3bq3yozfw0R5Oa3Kd32OYv6fawSSfuYflb5vMVvl210em2tnJILxE27flXanzNWbpum/Z2Xy03p/Guz7q10ej2SWqom+QKr/ALr5q7KNLqjnre7EtW9u+7HyiX+Fv7q11nh9YVkjffub5vm+7trM0tUm83z0Z13fvdybWat/Q7GY7fnXbHK2/cnzbf4a9nCx5fdPLxEpcvMdP4dl86QoXj2/xr95v96uls13Qs0yYXftiXZu3Vy3h+N2medAw/vN/wAC+7XXabInmrNC7bV+/Htr16ceU8mUpc8jZ0+N5IfO2Rqq/fZf4q3tPhtvmR9x875YpNu1lrP0dra6mR0RZ0X5dqr8rbq29Nt2a3+R1ZfuV0cvuj8kZvijw1D4im07R9jbFulnuIV+bzlX+9XM/Gy1v9S1y4mtbO3a2tYl8r+Flb+Fa9C168+w6em+5hiNvas6SeVukjWvK/FGqalqmmh9Vto3W8uFWWSPcvyr/FX5/wAQ1JfXbfZIpylUnc8X8eeHZta1ybWLb5FV1WWNX+62371cpJ4ZexvDeQ22ySb5ZZFf/wAerufF2nzN4gFn9sWG0WXc25du6s34gK9uW1CwRo08pYov7jSf+y140Z8kdDX2c/anCeLrG51BbPTYUzFDFueTbt3N/F81c7rmh6Vo9xb3J3Mm/dcRzP8AKzL/ABV1Hi37Tp+mvcwzbxtVXX/0LbXnmuapNHbyec6sv8PmfM1KnU925208PJHH+OteeHVnuUvFmXzf3Xlt8qrXAXF9f6lrTzTfvdv3lX7zVf8AF1xbbpHkfbt3b4/vLXF33ixLWP7NZu25UVmaP79elRqU1DzPNxFOUi94pu3Wb/SXjjSaJWVZH+aua1BnuL5Utn3/ACfdVayLzXLm6uH865bYvzIrUln4iextxIjru3/e/irqp+7Gxx1Ixeo+4kuVvEm3qJd+1m+9V688Y3NrE9sk25FRfmb5dv8Au1nfbLa6ZLmG5Vfm+633m/2mpdc0u2uLdZvOVt391f8A0Ks5QjUiuZkU4zj7w/T/AB1qsjecl4yOv3Nr/LXTeHfHF/HeIl5DvWRW82SR/u1wcdu9nHsS2VlX5ty05fEV5br+7Rvlb5GrhrYKnL3oo9Cjip05R5pHvmg+Pvstiz3OpKv7rb+5i3sv92uq8G+OLnct+k0LIzK3mXD/AHW/u7a+b9N8ZO+62eZQW/i2/LXY+F/E1gq7Jnkd2+4u793XkYjC8vvSPrsHm0JRjeR9g+Cfixpt9dLazX+2WaL7sn8W3+7XoUnjbzo4kv7a3hWP/ln/AOzbq+V/hb40sLm7hTUtSsYnj+W3mZtzf8Cr3Dwr4ZsfiRdIlz4h2Sr/AKpreX5WX+9Xh4iLjPm6H2GHxP1ilzQ2PbPgb8XvD2k+K4rO5hZtr7XkZNyqv96v0V+EvjXwS/h3Sdam8QRyHd5SRRy7V/3Wr8ytD+DWq/C/xBJqWy6u2W3Votr+Y0n8Vfbv7MPiDwV8QPA9nZ23l22pW67GtWXay/8A2VVQxUcLpHqcmOnSxVLlke9fEm5XXkNnpTxzwrubc0vyrXi/jBr/AE+5s9BFk0TSSxsixq2yuzuvBOpeHrq41J/Ekjxxtt8n7ytu+9Whpuj6d4muLC8lf/j3n/f/APTSP+7U1sd+/wDfPNq5fD6tFwex+cn/AAWe+FltoEnhL4hWdm0Dx38lrK0b71aSRd21q+JJFh+dJn3/ACKv39qs1fpH/wAFydPkvvhBaa0ty0aWviq1eKGOL5Nu1lZq/NqOW2WH5IfvV9dklT22D5vM4a9OeHlb+6TR3B3LDsVJVT5P4tq/3aqXlw0jL+53bvl3K+5f+BUt9HtYXNtbb327d2/a1Q3Vx9mjbYfkb71e7GJ58pe6JhPJV03A/eaRvvVY02HzJH859yt97/4mqdvM91j7rBv4v7taVjbyXDo+9UT7v3PlrooxnE5akixpNql1JLNCjE7vlX+7XRWNnD5j7JG2/wC0vzVSs7ZNqbvmbY3y+Uytu/2a3NP0maPZ8m4f3mr1acfcOCp/hJtN+aMb/wC9uSNvlatrTZHWZSibv9pvmaqmnw+Ym/ztxXd95P8Ax6rDM9o8To+6Jv4dtb8hyVP5TV8x2UwRuro3yyrTnAZyqPuz3A61QhuraGRnh+T5/nZv71W42IgDOwchfmKdCe9ftPgpHlzvHr/qFqf+lQPoOGl/tNX/AAP80W7W6e3kZ/l3bNu5n/hqG4V1uns5vnh3L96L5vu/3qqTagkLi2hfci/dZqurN5kKPcvuVmX5t/y7q/FfZ+8fMRj7xE0KQqdm5Ny7kX+9VGUw3ELXNs+1W+5u/vVbkv8AzJJUEGWX5vlqgt9uk8nYvlw/Ntb7qrUSlOMdTejR9pLQp3EaMuxLZcq25JN38VaOi6Dc31wj36K3zf8ALP7zNTNLsXuLhJng2RK/ybW+Zq9B8D+F/MxvmZl+bylk+9urSjHmPUjg/ZxViz4b8Jpu37I/m+aWTZ8zf3Vrr9J8NzXStvdVLLvTan/oVaPhXwv8qXLpGm196f8AxNddHoPmW5e2RY2j/i2/w13xlyx0IlT7HKWOhwyQ+fCnyyP/AHdu7/gVSX3h+2sbf7TfoqRw/K9wz7dtavi34heD/Celu80P2iWNNzxqnyrXgHj/AONWveKLpraFGuEkbbb28K7VVf8AaojKXNZI8vHY6hho8rkdF4u+JnhjTZJrawtZL64Vt22OL+H+9urzbxp8SptXuks9ShmuIZEZoLGxXcqt/dZqd9ovI8/29fx2X96GH5mb/ZrMvfHGg+HY/N0eFUm+YJNs+at6cT5rFZjXqe7E0re71W+hW5fw9Z6fab/+Pf7jSL/FurE1q88N2a74Y7fdJ8ySN83l7a4vxZ8ZLydZYU3B1Vv338O6vMfEHxQ1nULje9zu/h+/92tOZHmxi/tSO28ceOLa8upIba537fllZv8A2X+7XmuvaonL72VV+4u2sm98WXNxNsmOVb+L+L/easi+1a5jmb59zN/C1OS5jWPvfEUPECbpGmhfhvmaNVrkNc08M2/zthVvu11VzqRZX85/9la5/UJppH2STKp+6rNS+IuMjgdfa5t2b59tZsuyZTMiMB/FXVa5pcNxGyfL8rN97+9XMeS9rI9q6fLv+9soiacxVjupo5vkfdt/u1safq0b/uZuVb761z90r2VxseHZ89WLe6SObfv20+VE/Ebc19daPfRajbTzBo2Vt0b7WX5q/Ur9lfxR8Pf+Cqn7Hd9+zf8AFHUoW8b+Ebfd4cumf97Mu35fvfNX5VLfJdW/7ybmvQ/2Rf2kvFv7LXxw0r4keD9YmhaG4VbpY/8AltHu+aOqjOVOV0Zypxloh/xm+APj/wCBfjvUfA3irR7hJrGdlaTZ8rf7S1ylrNtkP2lGRl+Wv2P/AGkvg/8ADf8Abm+BumftB/D23t/tN1oyz3/kr92T+JW/2lr8wviX+z/rHhXV5Ib+w2Osu1GjX5f+BVxY7CxlHnhHQ0w+O9lL2VTc81kbzMTfcOz+F6csiSKHn4+X71ad54N1LT5pUmhZhv8Au1XXSX3bJoZNjfd+SvD9nKOx7Ea0ZRvGRf0W1TcjmbP+7Xrfwn8Ra94baJ0+dfN3bd/8NeefDT4c+I/Hvi+18M+GLEz3FwSyR+aqDCqWYksQBgA19sfAL9mTXPh94c07xv8AEn4WXKabc3rQ2utSRM9rLPGQZI0cfI7qjKSoJI3DI5rqfDXEmb4T2uXYCrWg5ct4U5SXNa/LeKettbb21OrBYqFGpd1VF+bS/M3/AID+NPFuuapbaVbabMBNb7omb5fvfL8telfDAPpPxis0u5gGgvplldsYyFcHNerfs1/ALxv+0L4uvx8EPA11rv8AZNuJr0QQrBHboc7VaSUqm9sHam7c21sA4OPIDN/wjvxWvG122msmttUuUuoLiBlkhbc6lGQjKsDwQRkGv0rwi4c4gwmX8T4DEYOpTrSwcoqnKElNynCpypRa5ryv7qtr0P0bJ8fh6mFqz9tGfLFttNWSVz6x0m/1LxIyWejwxzHZt3N83/Alr5P/AOCjPxU1Lw/qNh8HP7NutPm1CJb+XzomjeSFW27l/wB5q+mf2Rv2gf2Z/A3i+01r4v8AjmWCzjX97bpo9xMR/s/IhrH/AOC3XxO/ZN/bL0bwX8Qv2Z/FIvfFnhmdrK6sptFuLMT6e4zw8iKvyt2Jz6V+W8OeDfG8JOtXyzERt8MXRqL/ANtPj804lw8aqjTnGUX2aZ+cljeJG2x9qL/G3lfMy10uh/d27Nz/AN1W3K3+81QwfC/x6DiXR0wq4BNzGM/k1a2mfD/xfbsslxCRwBtWZPl/WvsKHh7x1DfK8R/4Kn/8iefWzHBT/wCXsfvRPa77VUd0+fczVq6bqCNIiXLrH8zMn8K10/wt/ZM/af8Ai3FLqvwy+DfiPxFDbORNd6RpElxGhPYsgIB9s5rJ8UfDD4l+Cdem8N+N/Ct5peoW8o+02Oq2pgliPoyPhh+VdFDhHimtiJYaGCqupHeKpycl6q118zzKuKw6V1NfeXtNvLaRdn2be67f4/vVvabqVz9neHzo2b7qf7NcVpmm+I7OV2ewQqwwVMoOa0LdNfRmPTccBfMHAr1o8B8aR/5l1f8A8FT/AMjjWMoKWsl953+l+IJo40muXj2fdbzF/wDHq6XTdYeO3TY7IsifJN/tVX8Ofsh/tc614ei8VaP+zT41uNOuoTPDdQeGrh0nhIyHXCfMCOQR1HSuT0/xJNp90Y9UjmjeJjG8EynKEcHI7H2rmwfC3EuOlKNDCVJuO6jCUretk7bdS/rVKMdZI9d0TxB9qT53j279v+9WvY6hM0e+bbt+6nly/erjfhJ4Z+I/xbvZbH4Z/DvX/EM1oGLLo+jz3YiRv7/lodv44rofHng/4nfBeC1v/it8LPE/hu1lfEVxqvh+4hikb+6GdACfbOa0fDXEFPFrCvC1FV/k5Jc3/gNr/gRKtSkubmVjq7PVIHVUtnba33m37q3tJ1a2hZPnmD7l+VU3L/vV5D4W+JehaxqbWmnXjj9wWZdjK2B15Ix3rqtL8QXgHlvcqyt9xll2s1cmNy/MMqxXsMbRlSna/LOLi7PZ2aTsZ+0jKHMnc9b0nXEaFZELb/uu3+zWwutLJbhHuVBk+Xb/ABbv9mvMNN8S3knV1ZPvPHDW3H4ihZmfzFLK+23X+Jm/2v7tZRkYVI8x12p3ztIHhRn2/K+56+cf269h1Hw0YidnkXW3P1ir25fEFtNs3ncGZv8A9mvCf225mfUPDsbFDthuiChz1MVfo/hXOMuOcKvKp/6bkfScGQtxHRf+L/0iR7X8Eiv/AAqDw0zbVP8AYtuqqf8AcHzVta00ccKujr8q/Kyt/DXJfBzWBb/Cfw/ayFcNpFsu70+QVs6priyRy2yPGgb5EmX+Kvjc7nbOsT/18n/6UzwcfT/4UK0v70vzZT1a8gaI/wCsLKm1PLX7v+9WPeXlzG2zf5X7rb5n96nzal9oXM25fl+eRW3M3+zWPqGoeZZhH2q6tt3LXhykZRj9opa1evNseFG2L9/y221zeoaghDw/K2373yfeq7rl5Naq+zcu51+VU/hrjNa1L95LZpuRG+40b7WrnlI6OVnQ2upfMsPU/wADK/y10/hy686PY94uGT7u/wCZq8xt9Uhjx5LxsZF/heui8P6w9mqpDdLhk+7t+ZWrn5uYJHqWk6htjMds7P5cu3bUlxcOYYkhufNRm+f5Put/tVyFr4kmWMrvWP8Ai8xadceNINy5mZN3y/KlVKXQn3DYuta3XG9Ewyu2xm/h/wBmsq68QQrcF5n4b7i/7X96sK41pod8KTbHmfcm5vvL/erBvte2/uft/wC7VfvM+5mrKpUHGPvHZT+Irb78L+Xt2/x1fs9Ze6aVPOXe3ypt/hryW88WbZP3Lrvb5dqr8v8AvVd8K+NppPkdN9wvy7mrGUuYfL757tpN9Ctp++mjdF+XbI3zNVxtW+0Ls8tk3JuRf4a4Gx8UJ5KXM3O77vzfe/2ak1LxRND5bs6qmzdt31nKRfL75+U+hq62ux4WKbNvzPW5ZyILVLb93sb+HfVCz08xM/75iPvbdv3auWcczNvhEinc29W/9CrwZYj3/dPu6dGESbdMrK838Xyr/u1BdSW21Uh8vP3mWrtvbvJH+++Z/vblqG4sU8l3jeNmbcrttqfbc0uY19jymTJawrGrxupZf/Zqht7M7nkdvvfw/e21oNZzIo5VW+8yqtNa3m2s8KZPy/M38X96nzc0feJjGX8pnbrYMqPuc/dT5vutUkfkxzRo/wAzL8u7f96l1K1aw/d7Mbf4aybi72wvs3Nuba1ddOMJ8tjjqSlGVpRN2z1KZv8ARk3IPmZf7taH2wMvlum4R/3f4a5mx1DzIx+++WOr9jq03mebD8m5NvzfxV0R+02YytLlsabSJJH52/btb522/wDstRRzJHCd8zL83y1Q/tB4Zt/nKGaL96zVWvNSF0u9PmCp8u6nGJjKp7wanqkJ3fPsb+9XN65qXmNvtn4+7uq1qV08bb0df3ifOu6sDVt/mO8LqC33Pn+7W8Y3OOpU98+pPhtMJP2TDM46+Hr8nH1mr4pvNQeRmdP+Bsv8VfaPwzJk/ZBJHU+G9Q/P99XxV9jfyykaZ+fb9/8Air9n8U3bJsh/7Bo/+kwPoOIkpYXBN/8APtfkit5n2jekPyVbs45lVP7n8dLY6f5cjK/zO391PvVah0yaObePmX71fjT+L4jwqcfeJbWGGWTZsZtrfK38Na9nDOP+WyquyqMNu9u2x0zufcu1P4a0FjdRsRN/z/w/w1zVOSJ6uHpmzpLQ/fTdt+78yVu6bP5bF4ZmUMvzq38VYOmyeaodE3Bn2qv92t3TYXkj/h/76+9Xl1tj2MPTqy5bHRaS0yrFDbJnd/e/hXdW7bxpHMvnOz7fmTclYOls8ewbNh2fKrVprdIzQpMm9N38TbfLrzKn8x6fN9mRLfLN9lZNiqJn+833mqCaF4UKIONu5d396nteRNuT95Ii/L8v8LVB8/nCNPubfvK3zVnL3o/EKPuj2urmRYXR1Xy02/8AfX+1ViGNIdvnf6xtv3kqCOEtcbx0/ut/eq9DG80zXMyLv+6i/wASr/FtrLm5TeMeaRHDDCyskMLL+9+dV+9T4983yP5imrMa/wACIu6FdvzPtZqfDYvHGEeFl3ff/vbqcZc0jaMeWXMRWdqJGXZCqhVZtzN8zVo2cc8kiwoissi/d3/xUsdi6yD5l+X5kbZ81XLS1/fAnaVX5tzfxN/dqvcO6j7xZhb7PDEnnMm5/n2/xVp6fcJBGmx9219r7vmZqotGYdr3KZ/etv8As6bvlqxbyPCqOibd33f722pjE0l8HKzRhvLZvnuX4ZPmZabN9mSHYtz87N8rN92oLWN5GV0T5Fb5P7q1dtbW2uvn2KxZ/wCLd8taxjDm945alP7JXjt3kjFtD8/zbfm+VVpYbONbiGaa5ZHk3fNt+Vvlq5Ja/uWtvLjc7N+7+7/s1at7OG4mZIbbb5cXyfxLtolK0eU5qlPuUfLea3XyYVSXfudZPvbalW1m8tH3/N91FVf/AB6tBbRJmZPOzt27P9lf7tLdw+ZNvs0YP91/4qPdjojkqRMZtJRoUd33srbn/wBms7UtP8uZt8O8r/t11E1q8jI43IPN/wBWqfLVHUrdPLf7L8kv3mb+H/dq/djpE4pf3jkrjR/Oj3o/l+Z8u3+Kq91p/kyhIUyqp97+9W9dwpHCNQuvmMfzbV/hqBrdLiX5+sf+qjZ9rN8tPm9/U5qkZcpzd1prsyPGka/N8y7vm/3qrSaWv2NIXdi7S/d8rc3/AH1XSR26XkYmlh2qq/6lfl/4DTZoZGby0ttzR/L8rfw10c3uxSOXl5pXic3HbzQ/PCnLfK21Plb/AHqt29rMsS7y33Pk/vVsfYXj2OifvW+5/d2/7VO0+1mto0e12sGZlZv4VWolyy1FR5o+6QaXYpDDJvdoljTbW9pcKKqQp+88xPn2tTdPh3JsdPmb/a+ZW/vVqaevl25hmT/a8xf4q5pS5Zcx6VGn7xNprOV85E27n+833WWuj0S1SSRU3xq0i/Lu2/LtqhbKkdvDC8O5FTdAuytLT1vG/ewwxp91d0abWWsYy5p8yPTjHlh7xq24mnb/AF0gKvteTZ92uj0mFLWQIgzt+bzP4t3+0tZGnybo4U+6Wfc3+1/tVu6fcQrMru6nd8q7fl3V6VA4a1O0DpNNhmWJrlId25f/AB6tbT2hFu0LzK5Z13R/3qy9KuI1hMPkyIsjbUb7y7lrU09vLmRZvnbdu+58q17WHieXVlyx+E6HQltrq4/ffu4mTci7f7tb2lW9rCq+SjFF+98/zbaxNDt0jbYiNtbd5TNXR6THNGoT5X3J8m1vmr1I+6eZWjyyOj0K/kZXRJtkTKzJtT5l/u10cMrwr5s21UZ1ZGVdzM38W6vN/GnxS+FHwZ0ubW/iv8RNF8NQRrvim1jVI4Gb/dj+81eV6X/wV2/Y68SfESx+EXwf1jxT8QfEOqXCwWGm+E9DZ47iRv7rSbaUqnLDmMXWoRjrI+ifFFxN4ovLlLDUpporO6+zeW1rsSPavzLu/irzj4ta9Do+mwW3nLHLs+WGN/vf7TL/AA16N8PdJ8Q2PwzvbzW9HurXVdS164e80e8b57GRtqrCzf3q+V/2n9c8Q+F/Fl9qv2BkRYPKlVm+ZZP9mvzzMq31uvPkFR92XqUfFHxK0S1vjeXV5MWb5Yo9nzbq4rV/jgl9pc2lb12Ryr5rfL83/Aq8S8V/ErXvE2rD7ZD5QWVkVfvVreCNNe81S3037MzyXG2KCGOLc00n8Kqv96vLo0eX+LK1j28HSlW+E9b0O4TVtLuZtYmVbaZF8qSR2bav+7XlXj6+8PWf2m20HWFnjjbb8rN+7k/iVq/RLQvEX/BK39iHwLpfhP8Aa9vYvFvxBvLCO6n0S2iZrfT2ZdywuIm2q397dXz98Xv2n/2cPi28+k+B/gX4L/sC6l22tvpFh5U8a/3mk+81YYqthcLCNRPmb6I+kynI8djHJVockOkpdfQ+DPiHqE0in7NMvy/M7Mn8Neaa9qH2e43wnKt99levqT4zfs122oeF9R8f/BZLzUbKzRrrV7Fl3S2cf+z/ABMtfJmuXCtcM6J8snysrJt217GV1qWLjzo+Wz7L6uXV+Uc2oQzRGTfuVl+7TDaJ5auj42t8q1Bbyfak8yFFbav3t33qdb3l/IzWboqRbtyyV6dSPu+6fO/FrIms43hLF93zfdq3b69cK3kzbVj+7VCZXLH99vVvl21JDNazb4Xhb93/AMtI/wCKuaUfslxlKPulu+Wa6t0e2+T+/tqte6XDdTvDDcyRr5W7bH/erofDdmLqJEez3p1T/dqxqHhKFlTyf3f3m3Vl7SMJcpp7GrL4TibGzmiwZpm3762H1XypERHb723/AIDUWqaOIV8yCbdu/h+7Vnw7b2cciXM0KynY29WqKkY1I8500ac6fum34RuYlX7Q8s3yvtVt1fZX7H6+J45orvw94P1KdZFVHaSDaqt935Wavmv4dX+pLJCmleBmuhD83l/Z9yt/vM1fcH7K/wAUviRpGoW39saVss/K2yq21fLk/hXbXyOZ1HKL5Yn2mSynbl5j6Ct/jN8fvBOoQf8ACQ/Bmxn0+4iWBdQaeFp1hX7zNHX0B8F/FHw68dWUmsP4e/s3VVWHb5f7v5t33q0fgN8Mfh/+0d4GfTfEunafq10sH+re42yw/L/Dt+7trzX4ofDm2/Zb8WQXmm3/AIo8PWMl1HEsmsJ/aFjJu+7833o1WuOnhZVKfPDY6qmKiq8qM9z6bh1mx0uyntteij8xvm8xdzVDo2uaKshSzmhRNu5mZvvVk+D9S8U+OvB8esaL4u8H+IbZXVWls7hkdfl+bcG+81TxaRYrI/2rw1a7JNvzRp/DRWoxjOMJI9HBuFSlKPU+cP8Agql4Jk+OPwA1qz8OaysI0OzW9WSKLclxJG27bX5RWNvNcQpsdlkaLf5cif3v7tfuZ+1/8NZvGf7OHizw74X0mSGQ+Grho0tdq+ZJt+Va/FbS9F228NtMn76HdF8y7WXb95a+q4f0pzj0PEzWpSlKHIjAk092jbejJu+by2Sq8On/AOkI8IkV9jfLXWalpfnw/uY22L80rb9rVmLp7xyDfNhoX/1m3d/49XvQ933meZKMTMtdFSRUh6P1WT+8v92tWx0l9xtgitu2r8qbttWrOG5k3iHco+7u/vVtaXYo0xSGwwy/c3fe/wB6vUp8/JqedKXNMP7DSOZI4fMYx7W+Zf8Avpa14fN8xbaFNyMm5l/u0+xsbn5oYYWd5k2r5jf+PVLb2KMyTeQ25V2bm+6tehRjE56kv5SG1Xyo3REZG3svltVhVuZlIebZ/wBM1Tdupz2vll3Tbs/3/mprRzQozv5jOu7738O6uuMYnLU5+UqyjbGQ7q3z/wDAdv8AerQtVij0gLGRsERxg9uaxL5UbytkfyMq/Kv96te0YnQctnIhYH8M1+z+DC/4Wcf/ANgtT/0qB73DP+81V/cf5oox3010yuAr/vfnXftWr7zxyWfnfdVm+TclYcM8yzBEtlKbN8rfxLVqW4to7GPzptm3dsXdX43y+5ynzXLEbfXz3UiIjx+WybvmqCTU3uplhmdpG27UjX7u6sqbUJlkM33f+esatuq7pc0zP89tuf8Aux/e/wB6ueUJnq4Wmd14TXy496BZtu35dm5lr1XwXpUPmedNucMjbFZfmVq8q8Ntua2R0+VW3/N/s16TZ+Jk0W1+2XLybmXcqxy0U3GmejKolC0j1C1utK0mxjtpkjik270jk+XdXPeIvjBCt0dK/tCO2t/mZG3fe/3a8a+KX7QFh4b0G41XVfENvbCO3ZYvtT7mb/gNfMPij9sa2ub+7+wXLXcmzylupvlX/a2rXTCPN77Pks0zmXNyYf7z6R+MXxd/tqS68GfD1GvLuOLdK2z/ANCavFdW8ZeKtPjmzYMpVN0sjPu+bdXkl1+1Br2m2d1YeFXa2mvtv2q6VP3jViap8dL+3sxNqV5nd9/b/FXVGPLE+Ylz1HzTPUNQ8eeJFZ0eGbbJ+9dpP8/drC174iefbjfcsJWVn2r/AHq8l1v9oK61i4Pkv5SL8vy/xU3TfiVbatcb9SRdn8at/FS5pyHGnI6O+8eJcSPbTTb9rbvL/u7qxbzVIZpH3zbPn/heqWqSaVeRrNZ3kafNu8tq5m8vHhfyU3My/wAX96rHH3ZG7ea0nmGV/vf3lqhcaw8iv87b9m3czfw1k/aJliZ5gpo8x1YPnf8ALu8tarlHzIsXGoIq702/7u/71RsyfK6HH91aPLh8zZs3N95GpJGT7WyMmfl+8v8AFTjyi5ub3WZl5bvIqp94turndctZll+VNp3fIy11zL5ZLu+1v4dtZ+paa8zIh+9J/t/doi+YRzGrabbapH50LqxjTc6r/erDuLe5jZUmhZWrc1TQbzTZmvLNPutuZf71XdHt9K8Tx+S/yXP93/apgcvHJNGwTa3+9UU008cyuj7Sv+1XZXnw98lWm3sm3+KsS98Nur70+b/ao5Zhzc0j7w/4Iy/trXPgPxHJ8BPG2syHStcl8uykuJd0ccjfw7W/vV9J/tPfBXQfE2qXCf2bGGaX7yrt/wC+a/I7wfJqvhXXbXXtN3b7W4WVNr/NuWv0j/Z9/aY/4XB4FsX1vc95awKl0vm7pN3+1upe05IcsjlxlOM+WX2jyLxP8BbnwrdXFtHbM9vJ/FJ8zf8A2NaPgD9lez+IUkVnDYXEcsjbH3Rf8tP9mvr34V+H/B/jLWLew162txFJLuZW+b5a/TL9j79jv9iC60Gz1iezivtUZN/79fLVW/2a5vqcefm5vdMKeIr83LE/Jf4Xf8EsPi34D1XTfil4e8N3WomeZbGytLOEtJLLMwgRFUdSWcCv1c+LP7FH7TPiv/glb4F/Zm0XwbHL4z0bWUuNU0ptZtlVIVlvGA80yeW2BLFwGP6V2/8AwVM8C+DPhz/wT88Tz/D7To7AwalpTwT2rkOrC/gIYMOQR61478aP2hfjrov/AAR2+HXxa0n4u+IrbxRqPiFYL/xDDq0q3lxH51+u15g25hiNByf4B6V+xcN0c8XDeULLZ0kv7R09pGT/AHvsVyt8sleFubmStK9mna51QbvP22r5ena/5nz1+wj8Uf2+/wBnv4ieLvhB+zj8L31jU4YJpfE3hXWdNZ0spbcFTMR5kRjlHKBQ37wlVCudgr5v1STxv8S/iVdzXenXepeI9e1qV5rW2syZ7m9mlJZFiQZ3tIxARR1OAK+1/wDgg5d3V9+1B4zvb25kmmm8EyPLLK5ZnY3tsSxJ5JJ5zXi3/BOOaztv+Ci3g271GaKOCHXdQlllnYBIwtpctvJPAAxnPbGa/dauc0sq4lz2vHCU/bYahRqynFOMqsvZ1JWk7vRciUdLpbuVlb7TgyDnlmOV3Z0pL00ZN/w6k/bz/wCEN/4Tb/hRNx5H2P7T9h/tS1+27MZx9n8zzN+P+WeN+eNueK+f9a0XWfDer3Xh/wARaTc2F/ZTvBeWV7A0U0EqkhkdGAZWBBBBAIIr9gLLTtXg+NEfxdvP+CvWgz266gJpPCudPXS2t882whF7gLs+UPzJ/FuLfNXxf/wWa8TfA/xp+1NaeKPg74s0vWLq48OwJ4lutHvFnhNyjMseXQbS/k+WDhm4VQQpHPn8AeJmfcQ8Rxy3HU4VIzg5qdKlXpqnKNvcn7aK5k09JxtrpbU+FxGFp06XNF/e1r9x8jV6J+yZ8GrH9oP9pDwf8HdVuLmKy1zWY4tQlswPNW3UGSUqTwDsRsMc464OMHzuvpj/AIJDaxpGj/t7eDjq8ir9pgv7e2LQq2Zms5doyfuHryOe3Qmv1DjDH4rK+E8fjMN/Ep0aso26SjBtP5PU5aMVOtGL2bR9B/t+f8FMPij+zH8X1/Zg/ZTtdH8PaH4J061sp5TpiXDNJ5KsIUEmVSKNGjX7u4srZbGBU3xS8a6N/wAFMf8AgmdrPxw8d6DBZ+P/AIXXM7vdaXDhJAqxvJhWJIilgYFlzxJDuHA2n5I/4KM6dqOl/txfEy31QsZH8TyzIWjC/u5FV4+B/sMvPfqeTX0x/wAEtZ4fDP7BX7Qvi7xJLt0ltMnhXfapIvmLp827hjhyfNiG08dPU1+HZjwtkXDPAWU59ltJRxlOeFn7WP8AEqurKCqKUt5KanLR300XY7oValXETpyfuu+na2x+ftfTf/BKLUf2b/DH7TD/ABA/aP8AGGk6VbeHtFnvdAXWkHkSXqkYfcwK+Yib2jXG5n27PnCg/MlFfvfEOTriDJMRlsqsqSrRcXKFuZJ72umtVo/Jvbc8+nP2dRStex9n+Pf+C2P7WNx8X77XfAF/okHhaLVW/szQZdGSRZrVXwgklYCbc6gFiGXBY4C8Adb/AMFvvh94SkHw2+P8PhtNC8S+LNLki1/SnWNZmMcULoZcEF5I/MMRfaeAgJG1Qee/4J4/sVeFPBvhtP29f2xbiHQ/Anh5Fv8Aw7p+pKQ+pzKQYrlk+80W/b5UYBadyuAUwJPEv23v2rPEn7bn7RE3jK2Se30aN107wjpV4yRm1td3ymTDFRJI5LuxYgZC7iqLj8bybI+H/wDiIuG/1Yw8aVDL4VIYmtBWjUlKKjGi2tKk4P35yd+V7tS0O2dSp9WftXdytZdvM+9viB41+P8A+z//AME9PhRe/wDBPnwTHqtpcaVbya3eaZpR1G4h3wCWSUQ7PmLzmXzHKfKQBhc8aH7A3xa/bD/aL8K+OdC/bs+Hvk+CJNBZBqOu+HhpjTBwyzRbNqCSLyg7M+35CBzzxhfFL9oTwr/wRz/Z38Ifs9+CtMuvF/i3VLaW/mXWNVf7LZsx/ezBVHyxGbcscMe3IV2Z92Wdn7LP7f8A4e/4KW6R4i/ZA+O/haXwtqfiHQ5xa6l4U1WWFbyMDMkaBtzRuqfNtYyRyKrhlx8rfjmJynM8Vwni8wo5ZCphJV51Fj2l9a9l7W7qqHMpvls9bpW15bXZ2qcY1lFys7fD0vbY/MrTrrR9F8Y3zaRcNLYRzSpaSMxJki3/ACEkqpOVA/hH0HSuw03xhbM0Sb9wb5n21x3x68D3/wAAfip4i+GGuXcc1x4f12fTXuIiGWQxuyh+CcZABx1GcHkGuWs/Gezb9nmk3L8zqv8A7LXoeM2Ko1uLoVaMuaMqNNp9002n81qZ4JNUrPuz36w8UW0MiTJfsD8qqrf3WrorHXkkjk2Oqv8Aw/w14To/jRJBsjuYyv3fvfMrV1Gk+KN1mN7szM/3Wf7tfkntPsnXyzPYbXxJCzJbXM3O7d/eryX9qHUBqFzozK+Qi3Ax6cpWxpOveTIl59pX7/z/AD7t1cj8d9Sm1ObS5pX3YSbBAAHVemK/SPCad+PcKvKp/wCm5n0vB0X/AKwUm/73/pLPYvhrrX2X4baNDblo5RpUI34z/CK2brWofJWFN3yr8zbvu1514A14w+DtOtt64FlGo/2W21oSa9DHj52ZvvL8u75q+Lz6rfO8Uv8Ap5P/ANKZ5GMp2xlX/FL82bd5qz28jzJDlJH+Zay7zWk8xkfdIn3X2/w1nTau/mFEvFDMzNKzf8s6y7zUbyNPtI2lm+bbI33lrxZVDn9mP168Mil5H37G+dVrlNWuLBlNyke5t33lq3rmrbYy6zKnz7W2r8rN/dWub1LU3+wB3dmWOXZ83y7Vb/0Ks5VC/ZsS4vHjme5hdT5b7tv8O7/Zra03XEtG37NjMy7Y9+7/AL6rh5L65N1FbQyYWRWZPM+VGVat2+v7HTznjR2+XzKzjKP2hcsz0+HxAlxalPti/d3P8m5WqHVNaj+R/tMaFdvm/wCzXF2+tQSQokM21lT5WWqV/wCJHWNXlfczfL83zNVRkZ8h02peLIYrzelzteN1Xds+by/7qtWRrWuJKyTWz7U+66yNXKah4ofzG/1mf7sf8TVg3niLzGZ5pmUMv3Vespe8VHlN688SJ5kzo3K/LuV6k0XxpNHIlykyqrbl2/3ttcBqGsJ80MNzhGbc6/d3VDpetJJdCaR2T+5urjlU5TqjThyXR9AeF/iHbP8A6m/2t975n+Wr83iaZl2fafN/hRZHrxzwnrWI0R3Un5tzb662z1BGtfkm85f4/L+Xd/u1lzSXLyj+r8seZnylHp/l/vkhZXkf5GkTbTzpryN5yf6xvl+Z/vVu3GmySSSfIr/8C+7Tls9zbNm4/wAXyfdr5n23Kffex5jOh0nyYZER/up937rbv96q8lqk6s7w7PO+9XQx2M1x8kjt83zf7TVn6lp8bODv2n7r7aqnU933jb6vzJWOZmXy2Ih8xFkb52/vNUflzbZZ3eTb/B5j/drV1S1VkH+9uZWb+KsfVriHzOXYP8uxV+Zfm/irrp1uaASwvLH3Sjqzedbkh12r821fvVgXV5tuGfew3JuStPWJHt7UmbdlfvMrferDvJkbHkupXZ89ejhbbHmYqjOQ6Obyo1RvmO/d9+rdnqU0aiH7qx/Mism6uekmfzPOT5G/753VNHceSy7UZf4mZn3V3yieVKPIbkl552f32N1VJdQQL+5fd/D96qclwk0sf77Ztb/dZaZcbFjWOZF+/u3bvvNUSlymFSnPl94Jm+0SNDM+Gk+X/drLvm3fc/h+7uqzdXTxt++Rn3f7FVpI/tHzwwq6t833qqUupyyp+8fVHwsjP/DIoiwcnw7qAweuczV8i2uk3LMYvJVW/jVq+wvhTEP+GWI4SMg6HfDB/wB6avma30MSMr741b+FVav2LxXny5LkGv8AzCx/9JgfUZ9GMsPg7/8APtfkjHstL8uFUdNjM/yVeXw7MW/cv95K6LTdBTy0huU2/wCz/FtrYi8OvGw2bdjfL92vxCWI5TzcPR+GRw66DcrComg3fNuWT+JaktdL3bUQNjdufb95q7O60H7Gphuk3/L8jR1XXRYX+dH+Vk3LurD2nNuerTjHnOcsbOWJTzj+FGX5q3dGt5vMR9/y/L91tu6pl0W2W3WFHY/MrJHt21o2tnbW0KI6MV/j3fw1yVKnNHmPUo7sWBo5FSbzJPKX7jfxVOuoO0mx3xu2t8yVB5cNvM485vKZ/k3feVdtH75Y2R33/Lt/4FXDy/zFSqRWxaa4m+0OjpwyfPtf7zU+OOZgqJ95fmdmb5az/MeGQb5v4Puqn3qspHDdR+dM6ouxd+5/vVlKP2gjU5vdL0MQW4D/AGyOFf4P96tDYjQpsmVXZ9su371ULWES2/necu3d8u3+Grq7Pm2T71X+KplUOqmW4YbZbf7/AJjb937yrNrG0i8Rskit95n3Kq1VjmS4k2TDcy/Kjf3quRqkaeWkytJ/00/2aUZG8eaUvdLljdJcR7441Hltu/efearpZJZPJfazMm/aqfdrNg33S/adysq/Ky1cb5Y/OWFhHt3bf4mrSMfe5jrpy5feLcLTrDL5jrDuXc391qnjuPlE3zMfuOv92qtqXuof3Yj/AHi/8B21ah0/fGPk2s25VZf4q1px933jeXve8ixb71txNM8m7c33f7tXrSP/AEHzHud2377fd3VHptq7W6P8u77rQ/3Wq3Z+bCp86ZdjPuePbW9OjGXunNKpyyEjjMli1y/y7k3bVfc27dWjpq/uVe2m8v5/n8v7v+1UUa2d0G37gF+X5m+9TvtCWrCFHXCy/wBz71KtRl0iYSr05RvKRcjhh+zlPmf5P4fvLUbTJ9ojuURt7fN8rfLu/wBqljuIW/cpy6/wt8tV7qd4JGheT5JPuNv+7XP7CcZKRyyqwlHSRKzQ7vnfeWX5lb5VqndW7yRvM67Y9u6tOOF2h37N+3+L71VZtjWbh9u1V2usj/w1XJOJ5tarCMveMe6VJFUNtV9nzrH91lrOmt/OjR0to98br8395a2prPbtffG25tu2NKZJYw3Ehkhmj37lRo1+8tFpxOfnhIx4YfO8tII5E3fwyPt+b/4mpYbPaz/dDbPvf3mrQuLUR3P2aNPlZNyzSJVyxt3+/c+Sieb88e373+7XQ5Tl9kx9yM+U57+z7mGFLq2uFZ2b513/AHW/2qktdJ89fs03ybl2/u/u/wDAWro4dJR43S2HO9m+b+7VqPR7aSJvs24oqfd/u1lKc+U1p0lze8ZWm2aRw7BCxdvldmX5lrXhtXKjMLbtm2KT5fl/4DVm1tfJjRE+Z2+Vdy/eqxHofl/M6K7xuyvt+bbXNyTqHo03Rp9SKG1f7OIbmZiqy/d31prbXV1tTe0YX5UZl+ZWp9nY7oVSZI1bZuRW+81WJI3s7dEe5jO5t+1Wp08PWlV5eU3li8NTh71SJNHbpDGib1R/K/1i/e/3a1dL+aMQv5iN97d/FWcti90qiZ1QN9yRv71a9tNZ2Vu9zc6rC32e33s3m/e/2a9zD4WvH7J5WIzjLI6Odzf0WJ47pYXuWfcq/u/4m/2q6TR5YdNUWFzNyqfJHI25q4aHUNYVU1W81qPSNPkXck0y/vZP91f4a1rjxNZ6PZy3mm2bRytFte8ZN8s1exDD1Op8rjOIaesaMTrb74meGPBenya9qrzeVH8sqyJtVf8AgTfdr4I/bI/4Lc/EfTJ9T+Fv7L01lptus/lz+JEhWWbb/EsLMvy/71eff8FK/wBtvXrm/f4G/D7UmjVV3a5qEbfNI3/PFf8Adr4cY7ucV0xo31PGljMTU96Uje8dfEfx58UvEcnif4i+MNS1zUZmzLealdNK5/76r9mP+DUX9lHSv+Eo8V/tmeM9Ijf/AIR+L+yfCrSQf8vUy/vZlb/ZXatfjh8Kvhn4w+KXjC08F+CNAutS1K8lVLa2tY/m3N91q/rM/Yd/ZV039jv9i/wP+z9olnHbX2n6JHea3df8/GoTR+ZNu/3W+X/gNfPcT476ng+SHxSPQybCvF4v3tkaHxM0XwxY61f3Oqw7Zr6Xz2jVvmZv7zf7VfF/7WnwNvNea+1XTnZreG93L523dNHJ/E26vpD42ePNY0uaabxPo8MDt/r449zRzL9371fP/jL49eD9BvlufFWpQtDJFvuI5vmZY4/u7a/NKOInTjzH0n1eFSryny3N+yjo+k3g16awk3szS3SyO2xd33W/+xrsvgh4P8L/AAX8J+J/2k/FVms03gu1kbQY7i3XY19IrLB8rf3fvVi/Fz9qiz1jxBc2GlPG9pHFuikjfcyru+WvHv2l/jZrGqfsi6P4TfUZmm1zxfNcXSt/CsMe1Vbb/vfdonPE4lx5/tH2/D+XYenXjJ68p8+X/iLxt+0T8TtQ8R63fyXl3fXUk+pXTLub/d3Vk6tL4i+HMzTaVdTRiNtqyRuysrf3a9f+APhP/hBfhqHa2hl1XXHZ/MjPzLGv8NY/x88e+DPD+lDRIdEs59QmbdKq/M0K/wB5q7faU/bRowjzRPWzXH1YQdRsufs//th6xDqcel6rO1tPDEyvIq/LdRt96Nq4T9orwz4e/wCEmj17w/bRrBev8iwy7lj3fM1ebJ4iln19b22tI4EVv+WdavibxBc6hpscPnZhX5tv92uqODqYXFxnR92L3ifnuZZj9dptVNSdvCaaPDseFWaRN3zfwrVG509PtDWaf3NyL/FRputf2ppZf7eyPap8iyPu8z/ZrS0W8S4tVvHTefutu+8rV6salX7Z81KhHmjymZb+Hb+ZhNbJ8m3+Kr0Xh+8t7hneHKNt37fu112i6lYLZ/6MitLs3N8lbOnww3EcU14io7Ju8uP+GuOeI/eSO7D4PmnEyvCelTWcY86H5JPufJ/DWvdafbQ2LQ7I1Ez7l3L8y/3quqyR3Cw2z8/dfbUt0sN1pvzvt2y/P5b/AMVeTVlPn5lI9/2MKMOZHD3nh/7RdtDDCzQ79u6tTRvB+m6RajUnRWK/djb5mqz5yWszwzJGPM/1XzfeqrqEl5Gqo/yhm2pt/irqlipU4cq6nlVIx57mnb+IPFU0zvDr0lhZrKreTCu1ZG/hrpvDfxu8W+E7Wa2TxLdXDyNu3SS/dZf7tc5oNuJLMQ6lD+4VN7fL92tQfGb9nb4cWKW3juOOV1vVaKFYvMkkj/irz4U/b1eSMOb0Lp1amH9/n5TqtD/bd+LvgGSHxD4S+LVxpWob922xumRmZW+VpP4a++v2Wf8AgtrqnjzwXP8AB79qDw/pfjG3urP59QO2Kc/8B+7X5IfFz4q/sz/EfZP8N3urSWOVv3bW+z5W/wDia4mO48W6Nfx6r4b8QSBoX3RFf4v7tet/ZbhCy9yX94y/tOtKd6nvo/o5+CH7SX7H1rd3Fh4Xs73SLy8dWsbdk/cbdv3dy/LXX6X8WFtfGVppuzzbPUN3lXC/dX5vu1+D37In7S3x1uvE9tpWo69G0Sy738xd3lx/xbVr9Wf2RPiVZ/Eq1h1LxDqqqNNi/wBFbe26SRv9mvBxmAlRxC5pe8foGS4uhXw8n37nvf8AwUq/am8H/sr/ALNU15qWoxnUvFP/ABLtGjkb5JJG/ib/AHa/IO+kvZNSN3JtR5JdzeTuVa92/wCCt/xu8PftOftReH/g7oV3Nc+FPhfpyyX95D/qptUk+Zo1b+Lb92vAWunaRpvut8zJtbdur6vK8L7OlfufLV6kfb27FjdDcMHR2dm/iVvmqjJCvnMkb7Ssu5o2pYZnjVHmdtzfN5f92pZGhuofuKH+638W3/davSjGEp8tzmlLrylu3s7aSNXmRo0b7ix/NW7pdrM15sRG2KnzNJ95V/vVj6bb+XG375flX5fm+9/vV1WlrDeSGEowdYvmZq7af905pk0cdzGB5McjJ915N/zL/dq19heGFkeH543+bzKm02xuY2Te8aKqN8q/8tKvQ2afZWREaUxv93f5lenT+A5nHmMaRYbePyXk3/P8rSVV1Df50r/dk/u7/wCGtKRd0jQvc7mWJldV2/erN1dtxZ5trR7VXdv+7XZGJhL3vdkZV0r+Yru8YRU+dV/har9sFbQ9u8MDCwyv41DfTIyuk275k3LuT7tT2mU0bMhDfu2JweD1NftHgtGP9r47/sGqf+lQPb4bdsVVj/cf5o51pHh2FEY/wtueqU15NIpPzFV/hb7v/AasapeQ2rb9/mIqbvLj+VmrmNUvoY18tEYrHL8rb/mWvyj2fN7x8z7bl2LMl5NJcP5M2xWf5Gar+k3CQt5dzc7mb5pWVq46G8feIZJssqf99VpWOsItwiQzfOzbUrnqU+bU6Y1vc+I9S8P65Bb2aRo7FW+VGri/ip8fLbwjp76am24mX5v3j7dv+1XMfEj4nWfhGzZEuWlm/wCXdYfu18z/ABK+Il/fXTzalcs0zf3nqI0eboeVmWbc0fY0v+3i38VvixqviS+k+06k0jTN95n/AIa4q3vnt7d7l5l/vbawZtS/tC8d55mJ37lVadqF8kdt5PnbP9ndXTGnCJ4NjRk8QP5jvI7bf49r1h6vrmpalcDZNuij+X5ag+0Qyw/OPvfdqGNkiV/n43VX90rmLf2v7PDvdNq/+hU1vEj2e5IX2Ls/3v8AgNZuqatDt++o2/L/ALtYtxfPMu/5mLUc0R8p2un+LppJPJd2Ybf4vvVvWt4mpQ+c7qPk+T5/mavLrW4fer/eH+996uo8J6xukCTPx91d38P+zURl2Mv8R1qQib/SVDNufbtanLb7Vb727/fqa2WRmR/4G/h/u1ZmtfJkEKI395WrQn4feKqxGOPfsZfM/ip11Hube6M3y/eq+LN9q7v9371Maz3SNuhbav8ADVRjGURfaKFtbvIu/YoLfxNViTSnC73RRtfdV/S7EM2/7y7tzLsrqYdDhmtV/wBAVv4vlrOJUpHns2lpJH5czqrfx1y+seG/s8g1DSv3My/3Xr0nxBoqWbPGm1XX+GvPNQ1Z217+x7l9ir83+9Ve8HNzbl7w3qmq31j9g1K2Xdv2vM38VLqWj+T8/wDAv8X96tK3js1jVEdv7u1f/QqmvI0mz/FVRAwrWFN3yJ/d/grsvhx8SL/4Z+IbbWLaeT7HNKsV7bq+3av96uXWzRmKb+aka3eS1NnM6n5P4v8A0Koj70zOpT5oH6R/BXx5pt3pdt4h0rUldGVWiZvvV9q/s1/tBTW7Wlt9v8k2qL8rPt3NX4//ALEfxmSO8f4darqX76P5IPOf93tr7d+H+tar4ZvormO5Yo207V+WtqlP3PcPFlKUavKz7d/bv/aW1K6/Zl1H4VX2qpNbeJbm1ls4ZZgZVkinjmkIHXYNoBPYsPWr/wCyt8Qv2bP2vf2AbX9iP41/GvTfBHiDw/qbT6VeX8ccKNCkxlSVGkKRSMRNLGy71kOC2CMk/I37RPjB/F9l4enaTIhiuFCnquTHx+lWbT4XeFNEsNA1nWvDj3tvqenW9xKPtbplnQE42kY5zX7zgo8P5J4UZficdVrU6k8RKtTnSjCUqdWHNBPlm0nHljqnu32OmlWlSlrrdW+R9M/8E4/En7Pn7GH7bHxC8OeK/wBpDw3qOgW3hee00zxTGzxW18yzQzMikgqZAsbDarsHYBY2kJFfNP7IHxZ+H/wf/bL8N/FL4hTGTw3Z65cjUpo7ZpQLeeKaEyGPG5lAl3FQCxUEAE8V2Xjb4FfBn4i6v4Z+CPwB8LX0XjrUna81SS21E3SW9ljhDG7HEhwSPWvlv9p3xncfsr6V4w8RXfhGLXLjwleT2n9k38skSzzLP9nAcxEOMOwYhSM7cV9TkOfcJcSf2vjYzxE3PDQjXco04NxpwnFypqDaU5JydnaKdraaH6FwbKawGPStpSk1v1TP0sP/AAT+/wCCYDeKz8dD+2hpn/CBlv7S/wCETGr23neX9/7Pv3+fsz8vleV5235d2/5q+Uv27Pip+zv8V/jrdar+zH8KdP8ADHhmxt1s4ZtPgNumqsnH2r7PhVgBGAFChmA3v8zED4y/4Jk/Fb4rfthfHeXw78Tb6yt9OnCGLS9NsfKhtw0m3Adi0rn33EV9Q/8ABe34Dav/AME2rT4Oa38G9WkisPGUl9b+IZriATg3KJG8SqZc7BhmyByfWvn+HPEfhrK8zWNx2NxuLlCLhTU404xjF2vzRhNKpN2V5zu+tr2a+HrU5uPLGKXocJXS/B34peIvgl8U9A+LfhKOB9R8PapFe2sV1HuikKNko467WGVOCCAeCDg180/swfG34gfE3xnf6P4s1iO5todLM8QS0jjw/mIucqAejGuM/aM8X6hpHx11KyivZo0W3tyCkpAXMKdq/Tc+8TspjwQs6o4WVajWm6LhNqDaalzXtzq3u2t1uYU6DVblk7dbn7o/GT4Z/wDBPX/gpjrFj8e9H/ai0/4feKZtLt4/Eul6nJBGzMqDAdLhot8iAiMyxsyMsa8cZrhf2t/2jf2YP2av2Pf+GF/2O/GVv4pl1m8kPjDxEB5w27kd281VEUkkhWONTHuVI4iCd21q/GDR/iPFIsem6lqbfaEVmtpFuDtZW/hraudZu1Qf6Y3ypudvOLV/NeXcd4TC18NSxEK9bB4WSnRoTrQ5Yyj8HNJUVKcYX9yLdlZbrR+39UjUg5wkrvd2/wCCfql+wT8Iv+CbHjv4D+LNc/ax+I66f4ptpJRBBd6y9k9lbCPMc1lGjYvJS2/KFZcFFHlgHL43/BM34FfslfFL40alr37Q/wAWtJtNL8NyrcaL4a8QSpZprQ3nbJM8jeWUTCloAxLlufkVg/5YalrlzJMkw1SZNq7t28/NWZN4yvd2Ib+Xhv3v7w19DjPFvFYqlmUacsRB4vl5f30WqCWjVJOl7qkrp2aezT5lzHLHBJOOzt5b+up/RN+2p8B/g1+2j4gsv+Ei/wCCjPhHQvDWkKP7H8K6fPYtBBJt2tM7G8HmykEqGIARflUDLlvij9sP9j34Nfso6NoXjP4TftfeG/HeoT6lhtHtIYnmiCYYTfuZZkKAjBEmzORjf8wH5VjxVOsph/tKf94u1GaQ7qjk8XX3nB0mcpGv8U5WvH4U8S8z4UjQw9OrOeFp3XsbUIxknfeSo82rd278ze71uXVwsat21q+uv+Z++3xM139g/wD4Kt+DvC/ibxj+0Fb/AA6+IWjaSIL+DU2jhUZO6SLE5RJ0Dh2jaOQMBJ8wydoT4GfDD/gnd/wTR8ST/HbxP+1jaeOfEcWnzw6JYaKYZnQOmG2Q27yYkYAoJJJEjAcg4zuH4Dt4y1OTaft86p/10NMj8ZahAoT+0n3q33fONea+NIQwEsoo1MRDLpXvQVWnpGTbcFVdDnUHdq2umjbvc0+ry5udpc3ez/K59lftg/GDVfil408RfGHU4orS68R+JJr14o0AWLzWdwnAGcDAz1OMnJJNeOaP428m6aZ7ltkjK3l7/lavIf8AhNpp0a4lvJWU8CJmJq7b+KrmaRUhuYwiv83y/N92vM4z4rw/FObRxVDD+whGnCnGHNzWUFZa2j08jTD4adKFm79T3PQ/GVhMyfJviZvvM3+rru/D/jVLiP8A0N2RWdVddv3v92vnjw/rXl7Hn8t2ZNyMv/xNeheHfFDxtHvmZF++kav91q+MliObWJ1xozPcbPxBeTSRJDNt+b5IZPvbf4mrN8bakdRmhbaQELjJXGenNcjo/iK5m/ffaVkf73zfLtrUbWX1mNJpQu9V+Yocg1+m+D9fm8QcJHyqf+mpn03ClGSzulN/3v8A0lnfeGtce20W3is2yUhTevvirMniaGPdsfj7rtv+bdXBDxUlpAtkjRq3l7dze1R/8Jh5yNDDYbTs+Zq+H4hr2z3FL/p5U/8ASmeVi8PJ4+o/7z/M7ybxVNDMiPc53Ju3f3qrzeKZpVDo6lPmXd/tVwlv4kmk8ua5jWI7tztH83y1Y/tiYM3kzMg37tu35a8GWK98f1Xm942tU1aPbMjwsf4tq/3v9msHUrh5GmdnkXyV3eWvzNUVzq9zNtmO0hvnRv4VrK1K8eRi/wBsYfN80K/wr/eqPrEpbFyw8YwJLq6hWSF0/ii/1jN935qyZPEG5nTf937zN/FS6ldeZG+/ciM3yfxfLtrBvj5itNs8zavzbm2s1bU6hy1MPH4jqI/ElnHsmmf5tmzzP937tQyeKkvpmRJlO5G+ZWrhm15FkT5Nqxy7tqt/s/3qrf8ACSOq703A/wATb61OfkOqvvEUMkawwvvb7vy/3ax7vWIR/oyIvl+V/e+7XPya/wCZC01nNv8A4Xb+Jqz5taRrdnRJP91komTGJr3GuQ/OPmX97u27v/QaWx1TbmMOzJJ92SuRvNSe5U+duVvvf7VX9Pu55o47n/VL91FZvmb/AGq5K0eY7KZ6b4d1BFaNHdVPy7K6+2u4b6XzvtMjL91FjfbXmGi6g62p/ffe2/6z7tdRpd8YlEOxkVVX95/C1YRlGPu8x0xo9kc82jzRsU2KfL/i3fe/4F/FQ0aW9w0EcKu+z523fdraktXEYSZ/uvt8v+KkuLe5aN0hKl1+Wvj/AGnN8R+g06PMc4y+TJstk3bvv7n+Zao3kflu8yfIrP8AdX+Jq2b61SNm2MzFlXbWHeXH3pZnYsr7kjWtY1OaHunpU8L7vKc/fSJMzpMm6sLUlhWT99J5YrZ1SORWKJcspb5vmrG1RXkczfMrr8yL/C1dtGXuHV/Z/u6RMe+bciTI+9/4pG+7WBqK2f8AqfM2M38VbF19pmm2P0Z/urWXqDeWvkvtZt/8P8VepRxHwnnYrK5cvMZDb5PMeRGXa33qGnebefs2xFRf+BNSszqyuj7/ALzbW/u0lrGkmx0K7WTc3zf3q9CNaLifK4rAzpyL8MKSQ/P8zf7PzUyT5t8Lp/urUunw/Z22JMrp/H/tVaksfvSQo21f9n7tY1Kh53sZy0ZkzW+1Vfzud/3V/iq5ounPtbZ8v+zsq3b6TGsiTI//AALZWppun21yrIjsZV+Z/k21Eqn9446lM+hvh1AIv2dEtwwYf2PdjPrzLXhFhoqNJstrZU/vNs+9X0B8P4PL+BUcEgz/AMSy6BGfUyV5PpemvMqoibPnbav91a/afFpxeS8Pr/qFj/6TA+gz9L2WDT/kX5IzbPQ7Zdzo/wDBtWT+7V9bJ5LhIZtwRtu+Ra3rXQ7ZWTzrZcSfLt/vNVmbTRHtSGwXydn3a/DKlSHwnl0JSiYUmnfuVhT98y7tit/FWfcaTItwXtrZRGr/AL3d/wCy11raXcyRlIbNR8n3o/vMtQyaG6Wav9jV1VfkjWX5lrmqS9melTlzbnLzaebiQI9tJhn+6v3l2/xVWazmVfJRPl/jZk3NXYRaS7Wzp9mkRFXf9z+Gq13p0f2V4fm2fe2su35awlKXwnTT/mOSuLVJo/OTbuZ/vVUkZ5I0m+7u++v3dtdDcWKSQu/2b5P4W/irEuNPm/uRtKybvLV/vfNWfN7vKa/3iOHYshSeba7Ju3feZamtW85lhfbtZdyN93c1H2PdCg+x7JF/iX/aqeO18nYHffK33KXuRIjKRYs4bmSSJCVVdm75f4a0I4Z41W2dF2L8zMqfxU2zsXZ0f5fm/vfLWxHp7tHsSFt2/wCRt/3V/vVySqHoU7/EVFsUmj37Knj8u4ZLN3jC7vnkb+GlaDeG2fK/3dy/db/eq1a2SbkuYUYRs33ZF+9Vx5ZqJ0U5E1hH+7aaFFJ+75bfKu2tOzX7G7XNnDu+8v7tN21v7tLpVrDIoeFmVW/hkStmysYVkSZI1T5/nb+7RzTOqNT2exlWMf775LZVK/Nu3bq0mt3XaIXZzIn+sVflVv4lqSa0RdSdERvl2v8Au0/hrJ+NnjhPhv8AD+bUvDztNqcz7YFVf3Vuv8TN/tV6uFw9XETjY480zShluG55y97+U7nQfBNzeRp9vvLexST5lkupdjbdv92tBfhg9xZyw6J4z0u7uY03W9vJLtX/AGa+I4fjd4z1LXpb+8166eS42rceZLubav8ADXR6D8cvFVrfJdWesSJJG+7asv8Adr6GngadP7J+aY/iXMMVV5oS5Yns/wAZvFXxa+GsLWeveDNPjtvveZprM25v95v4q8ruvipf3FoupWviFV8tvn2y/d/2a7W4+MUfxK8A33hvxg+97ja1u0b7pFb/AOyr5S8eXeq+B/FDJbXLRpH5iy2q/dZa640YdIniyxeKqSvOpI9b1D9orxbpuobLbxPJ/wB9feqCH9p7xOzDztY/d79yxt96vK9S1Dw9Haw39s/mpNEsqNJ95W/iWsG68WWbzOn2aPbv27t1V7Kl/KNYnFR+1I+iof2ltYhjDpqv3l2vGz/+PVNa/tMX9wwRL/5lf5l3fe/2a+arXxPpskmx93+z8/3asf8ACQabEu/7TJu30vq2Hlq4h9axUvtH0lqX7QmvSRj7Hfxodu3av8X+01SQ/tD+IXhihS9YfL8zbvm3f3q+arjxVD5izf2kzfLjb/dotfHDruS5vI3Tf8tP2FLpEXt8RGPxn0+37QmqzSbJtYkYLB8it821qsn4/X80gd7yOXd8ybv/AGavmqHxk7fxqf8AgVI3jK/U7PtHy1H1WG6iH1nEfzyPpmz/AGhpoXZLnVN3mf8ALOP7rVctfj99oZEsPFDRMqbfvbvmr5Sm8ZXjP532ln/h/wB2o/8AhMoYVZPtKr8/3VSl9XpS15R+3xS+3I+rtQ+N3iTa6W3iRZpZE2/M23d/tVGvxq1vT187UtSmilk++0d1/D/s18pv8RNrfPeNtX5fl+XbVab4tTKvkpebgyf3qtUIR15SY1cRH7R9oaP8evD1wrQzeNmtvkVUa6f5lb+7ur1jwjcaJr1rDf6br1rf7l2tJDP5n+7X5fX3j77Wqv5zBlf+Jv4q6b4W/FLx/pOqInhrxDdW0i/xQysqr/tba05XHYxkpVNZSP051jXtE0m3ijS//eqm1Lfd95v7q1V8TfGbwB8EbGPWNVe31PXJomS301k3RQ/3d3+1Xx3b/tCeJ7jybzUtea5uLODZA395v4mqlJ42vPGniJZtQvGY/ebc+6oi5SkZ+z5Y/EfXXwz8feKvi14i/tjxa63Vs0TbLNfljj/iXbWf+2F+0xbfDL4W3k2ia232prPy7VVT5d33fvf7NcZ4D8XQ6V4QS8sL+SP5V/2fmr5T/bg+Ll/428WLojvGsNquzyY//Qq1lHl+EVJ+0lzHguqXOq+J76bXtVvJJrm6laW4mk+ZmZq9S/ZB/Yq+Nv7ZHxd0/wCD3wd8H3eq6nfXEaZt7fclvGzfNJJ/dVa534c+CdX8eeItN8H+FdEa/u76eO3gt1T/AFzM33a/pO/ZO/ZQ+G//AAQd/wCCTPjX9rvxpplo3xHHg2S7uLxk+ZLiZdttap/tbmXd9K3hDlpc89iqtZyqqjDf8j54/wCCbf8AwTN+BvhT9rh/2UPhnNHqUXwtij1P4zeMm2+ZqWqfK0Onwt/DGrfe2/3a/U7xZDpt19pRJlhMP3WVv4q+Ff8Ag2n0maH9kLX/AI3ePXkbxN8R9fuNZ1a/umy0ytM235v7tfYHxO8QWdu815C63EMiN5U0fzL/ALVfknFGN+s4ySX2T9I4fwTw1G8ux578ZLPTP7FZ9YSzumZW3rJ/6FXwT+1B8H7XUtQbxP4eSQfat0UtrGyskK/7K17f8ZPjTqV5qV5o9hfxtF5+12b7yqv3dteLeKfiBpusWo0q/v44Nrt5Um7azNXzeFnKW7PbjhY+05nufHvib4Q69ot1ea3rtzdIiy7ovLTb93+GoPFljN4w/Zz0nTbzzm/s3xyqpcXEW39zJH833a9d+LvjLTbHS/s15qv9pSSbldVT/UyL/Ey/+zV5pZ+NpNe+Gut2d/YRomn3tvexLG3935fu16FSpXnS54n0OUVI0cQozOo+D7aJqnxE1jw89ntfT/D0n9msvzKsnl/K1fFfje6vL24lvNRud9y0snmzM/3vm+7X2V8H9Pv5Ne8QeOfDd+zyafo0k6Qq67pNy/d2/wAVfCvjPxJNLfTJsVEaWRvL/iVt3zLV5LTnWrzkYcQSjGhYbY6hpGlTx2Fr+9nupdnmN/DWj4ksZtEtfLfcV/iz/e/u1jeA7LTr7xpYvqR3RM//AAHcv3a6j4rSJb24eFF2ebt3V9HiFy4iFPufn81zRk2ZGh77y1d0RY1/hVq0dIW8sYftTpJ5bfw7qq+DYXa1R/l2yP8AxV0t1ZpJDv8AOVAv3F20qnNzSMpa0lpqVtO8SPYzM/8AC3y/f+atqx8XXMezZMzLt27pP4Vrk9Qh8y4Kb2VW2tuWtvTYd3zx7v7y/wCzXnVox+I3wdWcauj0O+8M332pD5O7bN8/zV0UPh68uYd/ksu35lhj+X/vquS8F/K299rMrqybq99+G+l2HiK4jmgddquuyFf/AGavDxlT2crn0cf31I8x8M/C258UeIBZwurI25l/3v8AZq74q+EV54f8RW+g3Ls0cf72eT721f8AZ217+3gnSvAjDxK/lwvbuzRQxp/epfh7oOieLPiM0z3MKXKy7UvLr7qx/wB6vOjinKfN9k4Hg579T578O/B/Uvjd8UofhLYaxNoNrfQL5F5fN5G5m+6zf7NenfH7/gkh/wAM0fDePxr4q1WSHX47rbYatap9ps/LaP8A1m5t275m+7X2Vb/sT2fxeWHxDoT2o1a3i3Ws1w26JmX7vzfw17BP+xL8ffEnhGPwf8Q9YZrCGLcvl6izJG235Vhr6LL81lh+WNOPzOatl9HFx5amkj+cvUPBlz4R1680SS0keazlZJ2kgZP3n97a396uh0e6mjsR9pdvlXb/ALtfqt+1x/wTV8MeF57GFP7Q17W9e16zs1uL7a0vnNJ833f4VjrhP+CiH/BNn4afBfS9Qs/ha8b3Om2dusW3a7yMy7pK9avmmGxH8RnJ/ZWJoT9nA+Kfgi/xEXWJLz4faVNeSSRNE/lp83lt/DX2Z+z78UPjf8F/B8viq70G4tbqa1aDTbeSXb+8Zdu5t392pv8Agi54H8BxeNpbD4i6V9pSS88ho5Pl+zsy/eavr7/gsD+z7Y/Db4b+C/iR8N9P3aJbzSWGttb/AHbdpPmjmk/2W+7XkxhDF47kPoKdGvl+GjPm+I+BraH7Cs32+8865uJZJ724Z9zSTM25mak+0JcMyIm5fuqzJ95v71SfJDv2Ooimf/WKv3v9qq6x7ZDH8zlk/wCWdfWunGEOU86MrT5hqSTNt+8zqu1l2Vcs/tPltDbWbOkf8KpSraQyKltO+5vK2/d+bdVu3s59pG9drfK8jN97/drkj70fM3lzEmi/vpFme5jVW+Z12/Mv+zXYaWvzwiYqdzszbWrntA0fyZmkeGF03N8yv97/AHq6zT7cf8uyLv2bZfL/APZa9DDx9nozz63vF/TYUmupU+XYyfe/iq+s3k27w/MHX5VVUVd397dTLNfJtfORI9rNtXbUL33l25e5hkcM+3y12/u69WjzchleMd5FDVtkyp5IaNlbc/y/eWsa6vJmke2mCpu2tE0O1l/4FWzqypMreTN5X+1t+9WDeW+2GW5877r/ADfJ96u+jGMfiMZe7rEoveeZJLDDNGG+7t21ctm/4p5mVM/uXwBznrWf/qo/JeFisnzeY33Wq9ayGTw28jMpzDJyBx/FX7b4ORis0x1v+gaf/pUD1+G1fE1pf9O5fmjhbyaaONvJtt259rNv+Za5rWrhG3u+51X/AGtu5q2dW1Sb7OZn3YVWV1VfmriLy8Ox5kTG5/vf3q/JpR5o+6fEynzSuMuLy5VUh+07Ubcz/wCz/s1qaPdQWqm/v/uRruaRf/Qa5+FYrp2S2hYBk3eZ/tbq5n4pfEKwtV/sTSpvlt3b7RJv+Vm/vVzVv5Tlq4j3TM+LHjqxvry51KGbY0nzbV+ZV/3a8E8ZeKPt146b62PHni8XE2xHz8m2uCuJJry92Iitub71L+7E8/l5S1Hqrxrv85lVf7tTQ+dI38WGXd/eq54f8J3l2vyQ71kfbXTN4Lm0+NPOh27dvy1pyi5oHJxw/d3vn5PkqrfTOsjb9qL935v4q39aW2tWaBPv/e27Pu1g6kqTf6xFbb97dUSLj8Rj3W+Rj2DfxVWVX+5sXNX2h3ebsG3dVbycKnybd33m3Uv8JfwkfnZ+T5v73y1e0W4eOb5G+VfmqrN8sfH+7SW7OPnQZ2/+hU/hJPVvBN5Nq0aQvhnb5fmfbXc6b4PS8jZDy/8Atfw15J4K1j7HdRTO/K7a9r8KaklxarNC+5ZF27l+bbTjIzqR5omRfaK9kyWr7X+f+H+GlttHudweZP3Xzbd1bEi+ddNvhZVjlbc2z7zVNHa2zMuxGXb/AOPU/h1MYy5TM0y38q6/1O8b/u10tir+SqJtSL+8tZ62W1lTZlVb+H+KryyfY7dt/wDu0lHlKl73vI4/xpeJpt8+xPlkl3MzV5r8SNBm+yxa3psnzruZ9td38WI3t7WGZLlnG/e1ctperJq1vJYb1YN/DIn3Vo5eWRpGUpQKHgfxMmsWPkvdf6TGn3WrpljeRXT73ybmVa8l1YXngfxY4hRkG/dt/wBmvUvCepQ65ZpqSP8Ad+8u2l/dCS+0LJDunUp8m1PvNUGUWQf+g1rXlvFtZERvv1nzxxhVR5vn/vVUZAVLXVrnwP4s07xVpk3leTcKzzV+lP7NPxQ034reC7LUkeOWWSBWlbzV+8v8NfnNcaXZatYvprso/dNt/i3NXtH/AATm+Klz4N+JUXwu8SXn2e2vLjZbs3/PRvu/99V10Ze4eVjMPzfCfc3xCiuYTaLcLgfvNv0+Xivqb4a+E/CnjP4T+DBq17b/ALvT7ZJSgzIo2DK/+O185fGvTZtLtNFtZmPEU21T/DyldR428Q+Ovg1+xB40+O+mWtzMuheBpZLHarbftEsYii+7/d3bq/Y+KaV/CDJor/n5U/8ASqhyU3dRU+p+YPxm/be+JGrftsePPjT8H/idrXheVvEE1jolxod60EkNnD+5j+b/AIDu/wCBV9B/GjU9Q8X/ALLV3rninVLjUbvUdEs7m/vbt90tzM7RO8jk/edmJY56k1+atlPc2zrctc5f78sjfeZm+Zq/RjxzOD+xdazuw+bwrphJ+ogrz/DGLjleeP8A6hpf+kzP1ThOKWBx6X/Pp/lI4X/gn58Sv+FK/tDeHtY0FIYUmvVS6aT723+H/wAer9Zf+Dpq30f4w/8ABIf4fftCaa8bzaD400u4imX+Hzo2jkX/AL6Va/EPwrqD2OsWmo21yqSW8qusjfeXbX66fF74n2P7U/8Awbl/Ev4Xatfx6hrfhHS49VttvzN+5mWTd/3zur8hjKUKyZ8NGUbuB+bf/BP/AFKLVfFV/dRyK3/EjIYr6+bHWH+1vcT2vx71N4UZ98NqNo/64JWV/wAEt9Z+3fEHWrDDDy/DofaenM0deqfFv9nr4hfGr49X9l8PvD8+p3c8UCJbWkO+QkQLX7JmNWNPwKwspv8A5in+VQz5JPFuK7HhskyXVn51q0e9f9v7tdT4B8aJrVr/AGTc3K+bC3yN/FJ/s1yd54d1Xwrq1zoPiGwuLS5t5ZIpYbqJkkWRfvblrmhrT+GfFiebuWOR12t/tV+KLklHmiduHqTjPl+yex6t5M0azJHu+dvl3fdrnr5XaTzt8brv3bf4q39FjTXNJTVbYfMyMzeXt+Vqo3Gjw+csMMjMnzNK2z+L+9RKpy+6eh7GMveMUzSGZnh8wL/B5jbqg8yaP9yPmDPudmatB9DEe6ZNzqvzI1V7rSfLV5nfyX2fP/tVlKQ40e5Sa4dZPn3EL/FuqC4unuN+xFG3/wAdq42nu670Rm+fb81Rw6JMtwdiNsb5t22sJc8Y8xtGnzSI7e6u2h2TJ8v8LLW9psyLHDC6M3z/ADsv3qp2dg/mO+zYip/301aFna3K3Cwof9p1b+7XJUlPlOynTgdNp948q7E3Af3f4ttdRpWsfu4Xhdk2/wCtX+KuN0uCb7OuyP5m/wBvbtrqND/fL9phRmj/AL2za3y1w1K3sdT0aeFjI7rSte3XG4zSb/u7mT5q7fwncNLHNF5bqse0IW79eleX2104khm875Y1216D8NRi0utsxkUurBj3zmv0vwTxMp+JuCj3VX/01M+kyLB+yx8J+v5MkvtQl+3TIoUmOVgHY528+lI+pJbgzWc0kn8P935qo38yw63dCZt26ZsH+5zUunxpM0UzvudUZm3L8tfAcTY3l4jxi/6e1P8A0tnHXy+U605eb/M0o5kZUmRGaRfmdVl20lvdO0zzW3mBPuyt/ep1rZw7Umh5LLtbd95alk0+aTDoWDx/wr/Ev+1Xz8sd7w1l/uDftW7YjvtWRtv+zRMr3S7Lm42tGjbl+7tX+9SzWs5uJbZNqJt3f7P/AO1Uc0LrMnkp/wAsmXdIvzf7tb0a0qn2jKph4xhsU763mMf2l32/PudV+bctc7qVs7QvNNDHmRGVFVK6uazeGFzC+Cr7mVf4WrOutNhuITM6N9/97/D8tejhqnLH3pHlVqPNocJqG+PaiW0ats+7Iny1j6gHtbrzhuQMyt8v8P8Au12WqaHbXcjonmbd275vu1QfRUhdN8O5W+/t+Za9WNSPxHnVMP8AzHI3SpJarNCn3Z9qSK23dVSaL98zpuy332V66m68NQxxsZvkfd8u7+JqoXli+nr50M251VU3Mm7d/tVpzcxj9Xlz+6YMNgkKqiJv8z5fmrSsVSJQjwqrr8m7d92ntp7+c7787n27dtT2NjlVtkT5PvIzJurmrbHRTo8s+Uu2t0V2o8O7b9+Nt27durqNHvIVj2b8tuX95I9ctawvGpTzsP8ALu3P93/gVbGnXD/JbO+9lT90zfd/2a82R6Macoq7OtuIdri58ld6vuTctUru5/eK77d/8S7K6G8091jLzRq7fw7U+6tZGpadCsf2yZ1RNv3tn8VfH/aP0ijTk4+6c1cfvrdXd4yV/ib5VrntW/eStMj4VX3Sr5XzN/utXVa1HZq2yGHIb5tsn8VYmqR3M0e90bcqblhVvlrop+6ezhcLzHH31+FGxJsvH8yKsVY99vZm2bVZv9uug1az/dukO5NyfO277tZeoRwyKyXKRllVVaRfvVtTqez90+gw+X83xHLalazNMro6s8f3FrL1KB9whmdUZl+7/FXTaitnCv2nzW2b9yRqm5VrBvo4M+T5LM7N/rNn3a7qNbmFistjymLNYusZTequr7U3VFDbzSN9mdNzK6/Nsq/IEhGzf8sfzf3m3U+1sRGp2Rtlf4m/ir1qdblh7x8Jm2X8vMP0+z3W6/J82/8A1e+ta30e4uIx5I3bfl8uT+Ko9Pt0WOLzk3P97+9W5br5sib9oLIy/L/d/vUe0lzaHxmIo+zgVbPS2k2OkO1I3Zdv8S1sabZz+RJtdR86/Lsqxb27sphf5ol+bzF/iWtPTV87CQuzwqu528rbWP8AekeTW5ep694Mgf8A4U/HbuQSdOnBOMZyXrz7T9LeaH5IZEfZtbd/C1eleE0UfDKJFjZ/9BlBR8ZJ+bI9PauX8O2MKyKRCy7t27cn3a/bPFybjknD0l/0Cx/9JpnvZ5BToYS//PtfkirDp5kkjTyVLL95vu1oNapFA2+zVPn3bt33v9la0rezS3V0SCORpPli8x6kms0Zvn2u6s235flWvweVT3uY8ijTjHVHONpsykbNqyf8tW3fMv8Ad+Wl+y/uzYTRq8m3d8v8K/xVtTaTczKsyJuPlf39rfepI9JdZseRsVXVmb7yqv8AtVEpe0jaR00exgfY/s7Jcw9NnzKtV9Ss/M3uLOP/AGNr/eX+9XQx2qMzukyrGzt8q/Nu/wBqqtxYwuomhfak3y7m+Xc3/stc0pe+ejT5pR90468015WkvJnyGbb833fl/wBms6ax+Zfs20ov+xXZ3Wiv9oVEEe7+JWf5Vqh/ZdmsLQ+TudpWVZNvzUv3fNzdDSUaso2OXWx875HhZAv3dvzbqS10v7++3Ziv8Lfe210DaXCyyvC7Jt+82371Q29nN5w8nbj/AGl+9/s1MpQ5gjGRDaWszXHku7bV2vtb5q0V3zSBETc3zfd/9BpYbea1+dLb7z7n/wBpaufZU3JM7bwr/e/+KrA7YxlErQ2EFq2x08pvm3/xfdqzZW/+ledDD91Nyf7W6opLV47hrn7Mz7mZUVW3bq1dFh+0tHvTeFRldt/zf5WnTlKMd/dNvinymtoujJIqWyOyrt3Vu2fh+SaNzJ9xU/i/ipvh+zhiX9991vuMz/w/w1oeKlOm6HPcwuzn+COP+9VYeM62IjBG1bEUsJQlVnLSJ518SvjNpXhHVn8N2d5HNqUjqyQ+V81uv91v9qnfE7VPDfiDw7b6CmlRxG8t/n+0L86/L95a+Z/i43ir4f8AxQPjHxJDIrXFxulVnb5m/wD2a9T+I3xFsL7Q9A8bW95DNb3EH3VRtsP8O2vvMJhY4SlZbn5BnGYVc1xUqjl7v2TwLxl4fn8O+IrizR2T978rf7NOs7g/Z1dPvr8qSf3q6341WtlfeV4n0p1KXEW75U3LG392uHtLh7hVfCl1/hX5a7tfiPJ5vcOp8L69MsywpNtO/duZ/u1x/wC0NDDNcR31s7Zk+8u+tCG8TT2aZEZTt/v/AHa574iXCapZ72ueFTb8zfNUy94qPvHDWOqXMKrZ3T70/g3VVvFRpN8Lsqt81LM/2WYOiZFRXVwksexIP4/vNT5eXQ2jLmI2uHh3bJttEupTLGu/cw2/w/xVWkVGTeqN96opG3L9xcr/AHqfwxD3i39oeNl37iqr/FRHqm5hvHy/eSqDXW1Sj9f7y1E0zMqyZ+bfupFcvKbS+IEDf6za6p92optcdlKQ3TL/AHKyfOf77yc/3v71EbIdrvubbVfZFymnHql2u5xM3zff+annWpljH77ft/iasuWZ9xpPtLrDj5TUi5ZGlJrlzdSEu+Vb+Gq11cI2E8mMVSeR3QPv203zMqv7tjtoHylppLfcifdrsdJvP+EV8NrPDNvuNQVkVv8AnnH/ABNXF6db+beJvDH+9urQvtSfVLpcriKNdkS7/wCGqk/dsxcvvnYaLrczKru+Qv3a9X+Ecf8AamoDzl3K33f9qvEPDsEN1Mi78D/Zr3H4c7NP09fJ+Uqnzs38NO0Ix5jGZ6n4s8ZW1n4fa2dGEdvBtiVfl+b/AOJr468dalc+IvHVxeB9/mS/dWvc/jN4wh0vwu8MN5IrSIy7f9mvDfAtrNqXiVpkhaV22/LULmlVHHlpw5mfrR/wa4/sAab8ff2lj8fvHug/atE8ExR3UUMnzR/bm/1P/fO3dX27/wAHjfxsvPAX/BPTwj8F9NuWjbx94+hhulSTb/o9rG021h/d3bfyr6S/4N/v2U4P2af2CPDd9qGlfZtV8VRLqeoGRfnZWH7vNfnj/wAHqOvvL41/Z/8ABkwZ7fytWvWj7Ft0a104ufKuRfZRllsZVE6j+0zoP+CLH7V9h4L/AGOPDvhjW4ZGjtYFgW3hXa0aqzbfm/iavpPx98dtB8ReHX1LQvEkM5+ZZbW3/dtH/sstfK//AASH+B/h7xB+zDpWg627Mtx5c8U0kH+pZm+b5q+ivjJ/wTu17wr4dvPGHw98etDN/wAfHk3zr5TL/wBdK/DswjTqY2c0ftdCXs8LC/8AKfPXxX+LGlahJND9jVJpnZrrzIvu7f7rV86ePvEtzHcPDo+ox3CQ7mlWOX5lWtL4qXXxR8M+K7zRNe8N3yhk/wBdCm6KRW/iWT7rV5neaD4k1icvdO0PnL95bVmb/gVGHwvu8xt7aMYf3jhviB8SJrqV4ft8flRtuTb97/dZqreBZLzWL93ke4W2uott1Gq7v3f+0teir+zdDqEi6lePHcJMnyq0DR7qztQ8F694Llms9NRltlTcjW6s33f71d3JGnEUJ1/iPObi8+IXgvXGv/DGsTQNbv8A6P5cu3ctcjqHgPwl8Qrj+yvElnDp2rzSs32yP5Vbd93cteueJG/4SzT99gjS6pGm+Bl+Xdt/hZa868YabYXnh5PE9heKt1DKyXVu3ytG3+7WeHqToTfJodWM5sTDX3jyfxF8GvGvhPxh/wAI24USQurRXG/5GVvutR8UJodNs4NKluYXuVddzQvu3Vc8ZeKtY1KEveX8kjxp8rM/8NcNobTeItYWa5mVoo3z+8WvocP7TFctWp9k+HxnLh5+yX2j0Lwvapa+H4Jk/hZmdW+81ak+tOsa22yFl2fut33qzo9ht9j7m3Jt2/w1LHpqeWn7lt0a/JXLWlGM+Y55PlVkT/2X9sjW5Tlv4lVfu1p2Nr5OD83+zu/vVn2P2mz3/IyozLtbf95q2IZEXY77fNb73mferhxNT3TpwcoxLul6lZ6fMuyP55H+bc/3a9t+A/jKG3mjT7Yrt5u542+XdXzrc6k7TCaSHcV/iX+7Wr4T8bPo99Fc7Nrxv8vmP8teRiMLUq0nKJ6+DxlKnP3j9AY7qw8a6b5M0MKrJF/rP7v92nfD34Q/2bqU32O5kuV3K0u77v8AwGvnD4V/tD3LQ29tc3m5lf5o/wCFq+mfgj8XtKVYftLySbhul3N8y7v9mvGlGUY8s/difU0KWHxK5j7F/ZH8NeM2e2sbGzjkMjbrVW3fKtfU8a/Gq50qDTZobGFNjI7R/Nt/u181fs5ftEeD9PW2ub+2ZAqbEa1+Wvq3wP8AFbSvGMca2w+zxhP9ZN827/ZqsP7Bx5eY58Zg6+H/AHihzROS1f4ReG/C/iPTfHnjN49Qk0WKSfTbOQLt+0MvzSf71fm7+0dq3iT9or42eILOG2/126CLTV+Vodv8Py/xfxV+jf7UnxO0fwP4Uub/AMlbqaGLzWWT7u3/AHq/KHxJ+0Po/hv4ral8XdHmtYvLlkllj837237vzf3lrsdOnbkj6m2X04Rh7aqvekdT8Efhanwr+H+qa8eNS0m/VpY9qrL8v8TL/s19ueFvG/w//bK/ZS8Tfs7Xs32qTWPDUkVncRxeYy3SrujZf91lr8yrf9oi/wDjJ4w8R+J9BuVh/tD97f2cKMqtN93d/u7a+tP+CZfxNm+GvxGikuVX+z3u7eFEj+bzGb+7/wB9V6NGNSlWjUUh1qcMVhKkHH/D6nwfY6Tq1nCfDetwzNqOn3ElrdRrFtZZIWZW/wCBfLUyxmRvOSZn/hddm3bX0h/wVZ+Blv8ABP8Abv8AF2l6SPK0bxZaw+INMWP5dxuPlm2t/D81fP8Ap2jpDH5DooRW2o2/7y19soc3xSPhKdaPLdRGWtinzeQ+3b827+L/AL6rSs9NT7ON/wC+f5tjbfmX/ZWnww2zfuYdzo3zLtRv4aljaaS3aF4dnmfL838VKNLm1ibyrRjuXtH8nyZnttsrR7V+Z/u/3q6Gzb93++3J/D+7+b+KsnTLXz40RHZXaVWfbW9bwwrb7/mYLu27vl/3t1enh6cYw944ZSlUkWVW1aHZ9m8vy/ux/dqtJHbKhmRN211by2+81TQ28N5IXm3MFfb5e7crLt+Vt1MuLdIIfv7PM/ib5mrtpxCPNUKGsWsMMrO8youxvmb5vLWsK8WBd0abcrEu7d/Ev8NbeqMzWM1s+5PMX7yurLt/3a526uHmmWFJt/8AFEzL/DXdT+P3ialORQvry2ZjDDu3yJ86t91Wq7bE/wDCMOVBH7iTHGT/ABdqzb7yZlWZEZNsv73cn3v92tCFw/hSVoyQPs0uC45H3utftHg875rjv+waf/pUD2OG4P61Wb/59y/NHlniiSaG1lmg/eu3/wAVXK2qpfXDQQv5vztuWP5trV0Ovb7yXf8AKVb+H+9tqtY29tYxy3l/M0McKs3zfKq/7VfkcvhtE+IrRlHUzvEnh+5sfD83lbVmuIv4U+ZV/iavnz4mWttoqun2lSv97fuauo+LXx8upbyb7Nqv3V8pNv3fLrw7xd4xm166+0u/zf7L1z80uY8r+JqY2r3T3Fxvi2vu+/trofhz8P7/AMTX0KIjE7t33KyvDOivrGqIkaMXkbb8tfV/wb+Ftn4D8Mp4n1VI0laL5d0W6tInNUl7vKjJ0X4W6b4a0dp7xI1dV3KrJ81ef/E3XLDTZJLa2mjY/wANdX8XPi08cbQ2r4Ma7UVUrwXxJ4k/ti4fz3Ynf95qzlU5pl06MY+8JeXj3TGZ5s7v9n71ULpoyrbNxb/dplvIjrsd2P8Au/dqG8Z4s84FVGJp8RFPI7Lv2Y3fw1Ey/KXxuX7zVIoSaQOflVqRIUjZo9mWb7u6jm/lJjsNaPzofP8AJ2rT7e3SSP5E3f8AAaVI23bE+7/HVm12R2+z/wBBqZRK+GAab51rcK6JuZWr2X4V+KJmsVsPlbbuZF215LZwoy7zGylvv12Pw/1r+xdSi3u3lfx/Luaj7PukSjzHpDTPMzTb9zbt21X+7UqyJ5fnec3zPurHvLzbeb4fuN83zVNJdJbqXd/l+9tZqfNKJHLH7J0K3Ds2/YrorL/FU1xdTLy/zbn3Iu/5WrO0G6S7j8lH27vm/d/xVq3FqFkO/bhU2/N/DVEc3L8Rxfxm+xyaD50Mf+rXa22vIvC+qeTqDJv/AI9tet/GAGTwu6InKu3zMu3dXhen3Xk33/2dTzfZNYy5vdOo+Lmg/wBqaTFrFnCu2GL5mj+83+9XO/C7xpPoN/8AYHmby5Gwyt92u70vyfEHh99NmdTuT+GvJtf02bw9rcsPksu1vkq5R/lLpy3gfQUapfW5vIfmH8DLVO+sXjYYT7v3P9msn4I+Lodc0/8Asq7uVLr8qq1ddeWm7986MtTGRFT+Uw7GMRzKzorD+CrV1dalpOo23jDwxc+Tf2twssUi/K25W3LVe9hNrKPO2/8AAa2PCujJ4ruho803ktJF8kzfdVqqn7szHEcsqR+lDfG7Tf2hf2fvh98TIHT7dLb3lrqyr1E8XkKSfrnIr9fv2R/2ePhb44/4J6eH/h54/wDDlrd6f4w8GW7anFOinzVkgU1/PX+wvD4i0XQvEnhDV7qR7bT9Qiazjb7gLqwdl9m2Kfwr+ij9jfVpvHX7E3gPwxpF2bTVNO8H2CxqOsiiFcH8RX7fxIpx8IcmXapU/wDSqh5b5G218j+cz/gr1/wRl+KP7CWrX3xd8J6LNqHw/vNTkihvrdNy2O5vljkq18X7s6f+wYl2n/LPwlpJH529f0n6z8I/h3+038GPFP7OXxi8PwXtlrNnJBe2VzFuZNy7fMH+0rfNur+dn9vj4Zx/Bn9nvx38HFnEy+FCuiiRh98W13HBn8dlYeHdSnUynPGtGsNK/wD4DPU+/wCBIVIZdj03zL2Tt90tD4s8H+MI76FP3y7f8/LX15+y/wDtr6D8Afgb8R/h74wMl5p/ibwRfWFnY7fMjkuJF2xr/wCPV+eGh69No98uH2bfl/2a9W0HxNba94fNt8ryLF95q/FYylzRPl6lOPLdHpH/AATd8OXHhf49eIdPZR5T+Fd8bAfe/wBIhr6E8P8A7U/j79mr9r2bxF8P9VS0lRreO8Z4VctC9ugbBb7vBrwz/gndqE178ZtahuGJNv4adI8/3ftENZf7UXiWPTP2uNZtGuioW3sy2P4c20dfseeYeniPAnDU57PFP8qhz4SpVp4lSW6R+on7cX7GfwE/bA/Yd8Qftk/BDQ2sPGHg2wW91a3h+ZLrdtVt235vu/NX42+OrOG+0eLUi+4xru3L/E1fY/wD/wCClX7ZP7Ofwn1j4UfBzxho/wDwjviCyZL3S9S05ZdrMu3zN33vu18oeJ7G5utNuE1W8a4nm8x55NqrukZtzV+IYCnUw2HVGfT4X5HQ43rurF7/ABep2nwD1j7ZpJtodpaRFl/3v9qu71DQ7aWTeltJn726vHf2arq5s9ShheGMLHL5TRs+75a+hJtFuftHk71d/u/7q0qkZU5nv4P97SOMutN2nfN8iL8u2qt1oZ2/uYZAv8G75mrvJtH8yApLCrfNtSTZ/wChVWbQ7lY3/cq+1FXc1YnXUoxitDhJvDMcciJ833tzfPt3UQ6LukfZ/vIq/wB2u8Xw680f76237f8Anp/7LSWvhm2hcoE2Ue/GJUaJxi6O8ceyaHbt/harEOl/vmeL5kb5WXb92ulPhkys7/d+b5lbd81EekvDtT5kT7u2sqkToox5ShY2MNuq23k/PJ8r7a09Ntd0f2Z3ZUX+FW/iqS302BJmj2bv4ovOVv8A0KtbTbFPOd3hjKrL93+KvHxEYy5j1sPzdCa1tflWa5hbCrtZY/71eg/D2ForecncQUj2lmz2NcdYw21vJF+//cx/+hNXbeBwFiuI1PygptBXBHBr9E8D4cnihgLdqv8A6ZmfTZQ28VG/n+RTvrFBqczbnLyTOdg7L/eq/pOmwzNvSb+Hd8v3akcr9qnZlUO0hUEt/Dmr+j6f8o2Q4T5mT/er8v4ol/xk2NT/AOftT/0uRvOMXUlbuXNL0+2t5kT75kb5N0X8VW7q1RmP2OH51ZvNZX+7UkPnRqhR40+T5G+8u2p5vJaQQu+15Pmdo12rt/hrwVHmnzEy92HLymLJZvJCXhttiqu75v4qgWJJl+0wvvRvl+V62Psdt/BMrhvl3M/3WqvJYwqkoTcP4fMVdv8A3zXpUKxx1qPumVJG7wtbOm4r8+7Z95qz5rF7qaNLlFXd9xvur/wKthrPyZGe9RUh2bvv1Vms4Vm2Om9JvmRV/ir1aUuWR5EqMKnumRfaXN5LfY/ldv8AlnVCbR3WTzpkVTt+Zl/hrpWtUZl32rQpG23b/eqnqdq6x/YzMqK38TfdX/Zrsp1vd5TJYSMtWc7Jo9t5exAu+b7/AJi7qytS0+GSHYiKp2fJ5fy7f/iq6m40v5Wh2bj/ALTfvKx7+3SOEOm4FW+VVfdu/wCA1p7SfNZHRHB0ub4TktRs5vkheCRS3yxNH8rUklmlvIsPzI/zN5ez7y1u31j5k3z2bPtfcm1tvzVNb2264b7T5cj+V95vvbqqpU933jL+z4+1MG10ua3tTCiSNu+f99825v8AZrV03TXjYTeSybl2vGrfKtX7fSZvLV96qytu+Wun0XTXnsw6Q7fMl3bm+X5a5ec1+p+4WpmS4jDp5au3ypt+bc1QND9sjMMlttdfmZZF+8tXJY/OUyecqtH8q7v4afGyXC+dD5gb7ryN97bXycacoxufZYepGVU5jULVFY/3Ff5mkTdtX+7WBq1htj86Z1RJPu7fm2rXb3EbyQyTTIpSNd33/vVzer2Kbi88SqWXc/8As/7tdUactj6bBy5Th9Q09GbZDYeaaxdWiudzFLNY/MTc275v+A12l5YJ5ZmQMfmbYuysXVNLvFhWabcHk3KrMu1WolTlI+rwMonF30MNvav8m1vuuv8AdrEltUVuX+X5vlZPvV0msWMyxnlSy/N838Vc83O5Nn3UZf8AgVKnzU46HoYinSlQ5jNvrdGhG9FT+/TI18sr5PKt92rs1i80fk71+Z/4aiVXhV/k+98rbv4a9XD1OblPzzOKPUtaXDD5bGGbe393dWrp91bLcFJEYlk2p/eWsRZpoZGh+5uXan95quWeoOrOjpIP4E3bdzV6FHm+I/Ls0lyz5TpbUf6Ps3/vFT5FZ/vf71bWkzblGyFf9uTfXN2epJ5yRzQsqs+FZq6PR7oQ3AR+Il+dG21FVVIwPnqkrz5T2TwiNvw+hBJGLaXJ79WrN0+zM0guYXbZ/ufNWr4YZR4FicsMfZpCT6ctVOykeNY/szxiJduxll+Zv71fsHjK7ZJw6/8AqEj/AOk0z67M6EqmHwvlBfki3bxwthPsyr8nysq/M1WFs0muGCfIjfMqs33VpNJjcsf3zb9+1tzVqw28Ls6TIuFRfmXa25q/BqlT4YxOOOF5feM2PT03J/tf7VU9WtUhSZ4YdzfxeW/y10kbov750XLJtRdnzNWfeNbXEbpCmP4ZY/71XTkKUY09jmrlblo3+zJHv/jaOqElrtY3O9Vf7rfJ8tbt5pLwt51rCrt/zz+7VGRX8kOEUfIypuTdRUpwLoy5TLurPz1KO/y/7P8Ad/vVX+zorGZ4Nj/9M/4a0biGaGPzH8vP3dy/3agby41875kfZtZv726oUfsov23ve8ZF5HDIu9Fk3btu1fl21R+zwx7/ACXZXVty/PW3fQzR/ubx2YR7vu/+g7qyZrN9rn+Jv+Wcj/dpSjGJfNzEbb7hi6Phtnyxsvzf71W185ZDCjthk3MzL/FVNmeSREm3fL8zqtXdPuoWWVE3F9rMqr95VrmlGZ105R5i5Fa7Zgn3l3/JJ91lrZ0e0SFU/cx5Vv4U+9VfRdkw+RFxt+dpPl2/7S1u2qoWWySGN0jVm3L/ALVc1STjLlOunHmL1jbrbvHM1tG8kf8AyzZ6o+Hfi/oll4yu9E1KzWaOGDY0cifMzVbbVbCxs7i8ufl8u32oq/Krf8CrzDS9JfVtYudVheNdrM3y/KslfX5Dgeb99I+F4pzSL/2SHzO2/aV8O+Bvjf8ADW80rStBji1G1l3W80lrtdmVf7y18d/Dm8vJvDuq/ArxOjW17Hun0hpIv3ny/wANfW+i+JrbQ9WiS5jmeVtu6ORvu15l+118M3+1Wnxj8DWarqOmy+fcLCvyzKv3t1fUfFSPhIynFHj/AMM9WtvFGl3HgPUvmaRWW3kZPm8xf71cf4o0W/8ACuuTQ9DH8jbm+7V/xJqFtofiiLxnoj/u7pVfbH8u1v4lrb+IWqWHjq3t/EKWCxFUXd/tNWcZcpcY8vwnIR+S1mu/5dvzfM26uU8RXELq8L/NW/qF4kJe2+bd/e/hrjdeuIbiR/O521RcYmLfQ7lkTf8ALVPzPl/d7WFXLry1Yon9/wDv1RjX940CJt/vVXxGsRGh3fPv+X7zK1Q3C/3z/wB8ir01u6xhPmcN/DVS6X/pn8y1JRQk3s77D92o5G3NUt0dzbAnNQfOFJf+GgcdwzJt6fK1Sqs27Z8v3ahLZC59akST5vegQ5mVfkAXO371Rec7fJimyLtaljX5hl6B8rF+8Pkpg3rgB8bqmGyM/PRbKZp0THP/ALLREI7lhpfs9nvT77fKm3+7S2uW+fZUF0yNcHZ8392pLePzG/121m/u0fETI6rwyvlus0O1TXq3he4v5rf/AFyqipXlPh9fsixzTOrbf/Hq7qz8TTNZpZ2Vts/4HWnuxiYSjzFX4talusSk0yuy/crc/YH+Ftz8ZP2jPCvgm2T99q3iO1giVvmX/WL8teX/ABE1OaS82TTbj/Ctfa3/AAb9eA/+Eu/4KAfD55oVMVrqn2h2/wB1d1aYWP7xGOLl7PDM/rI+FnhWw8D+BNI8IaZb+VBpmnQ20ar/ALKqtfhJ/wAHqVjMPih8ANV2KYvsGqQfe2nc0kdfvbpF5i1V33D5f4q/FP8A4PMfh5P4m/Z9+Fnxctot0fhrxXNa3kiruCLcL8uW/wCA1jXhOXOb4KtSpxgjD/4Jd+PLbTPgjpDwzNuhtY/3Ky/e+X71fT3jL9qq6ure4s3RbhI7fZ5Myfd3V+Zv/BOX4uPp/wAEdLtdiuY4tr3S/L8v92vXdb+NU11cMlzcssW/a21fmZa/EsXTjHGzifteCq3w8Kh79qGvfCLxBa3P2+2aK5aJvlVFZLf5vl+WvKvF2ofCy0y/+hyyR3CrtVFXd/wGvK/E3xFdbGYQ3k0QkT/lncbdu37teI+OPiFc3WoTXM2sNLMv/H02/bu/u1pRVX4eYKkqHPzSPW/it4/8K6Pb3l9Z2cMRkf70d1uaNl+7tXd8tfNnxI+NV5rDNpVtPMSu4O1vcbPvf3ttYXjj4lX+pRy2cNtazRTf62O4+9/vbq4Vdck+0PeWdqsMXy/LG1ejh6NWXxbHBUxsPhge3fCi68PeH7W08T+MJLdEhTcnmMzL/u7a4f8AaI8YeAPHGuPqvg/wwthffduri1dlW4/3l+7Xn2s+NLieFrN7nA/55791Zmk6sLy8Uvufc3zs3zVpHByjzSbDEZpT9lyQMjxZp9/cafcJsVGX+633q5fwrHDDfLHsY7fvqv8AFXqfiHS7OTT5P9J3S/d2/wCzXCw6L/Z128yHaNu7dXp4WtH6tKJ8pjpSqVec6ixk8u3QQozfw7ZKmbUZo2f/AENVTftT5/u1haHcTKrbIfl3bvvVLc303lt/CjfeZq55UrS1iT7SPL7xrTalCp3/AHCv/oX96ga4kjffyG+VWX/2auea6SaNX6t93/ZqRdSeFlj37l+66r/DWUsPzRsRHFWkb0379Sjncu370f8AeqrNvkuNzv8Ad/hVPvUumXELRomxd27c7bvu1p3K2f2NUWFd0f3q4pS5Jcp0+05/ecg8L69d6bf/AOu+Xcu3c9fQXwZ8fbrpLY37GWR925X/APHa+Zo5vLuN8abt38TV6D8LtWmj1RE38KyrXnZhho1I8x62T5hOnXjGUvdP0s/Zl8aQzSb7+8kRfKZ0jk/iavs34HePptvnXN5IdPj/AHrQyfIsf975q/PD9mjWnjVLZ0kuPlXYq/KrNt+9Xsni79pTR/DujxeG9Nv98ELK2s3DO21v4tsf+7XzFFTniLRP0mlioYjDF/8A4KdftueKviPdj4BfBTU9Lht4dy6veSy7Z5N3/LNf7u2vyk+K0Pj9r6XStV+0TfZ5WiTbuVWZfvf71cj4t/aA1rVv2h/FHjbVNakX+0PEVw8CK+1fJ3fu/wDx1a9x0/44fDfxV4VtodS1JWuo5du2aL7q/wAXzV9xDBf2dGLnHmf8x8x7SljY/u58vL9k8u+Cfxe174b+Plie5kjWaVYrqFn+Vo2/2a/Wn/gnHcw/FL4q6HpNpYeWt226JvK2xLtb5W/2flr8sfixN4G1rUIde0GzjSaOVf3y/wAS/d219/f8EmPi+3hCfTda0+8ju72wRoVj+80a7t21WqMTXoUXGrym+WxxM3Uw/N732T17/gujrGlXH7XvhHQNNljZrDwbJZCTb97y5F3f98tXx9YtDcXRheza23fcX/np/tV1/wDwWR/aB/tP9uH4dWrXDJP/AMI5dXF/5kvzeXNIu3cv/Aa5SxtbmZleMM67Fb5vvN/dr6/AL61ho1v5j4vH/wCxYl4ZfYtzepoQW8yqgRJPm3bFb7rf7NT2un+XI7Ikasr/AHWXdtqWHT0j8uF5l3tu3sv3t392ren2McfyImx2+baybv8AgTV6tPD8sfdOH2kqgabao8aXPV/m3/JtWr9nNlAiIwRXZd3/AKFTbW10+3ZU3yI33VXf8tWLiG2kjPnTMm2XduV/lVa7IU+aPwlQ5ia3ZI9z+TlNm1mZ9v8AwGmX+yS3ZLZ1zs27f+ebU+RUuGFs/wC8/usqblVdtQzWt5HG8xRkeNNzK38S1p7GUT06EY81jD1BtpO5GVv4JGX5dtYt98siwyXLY37f3afMtbmoRoqs++QN96JvK+81Y+pQpJI0zw/Kv/LOR9u7/gVbRlyyOiWHjy3kYd5G6xMsybv4lb/4mteAlvCcmCAfs0oyO33qrahBbMw8jajbW2Rs/wB1v7tXLZQ/hpk6Zt3z9ea/Y/BySeb45/8AUNP/ANKgehklJRxVW38j/NHk2oQ/Zmx1Pzb1X7y15F+1N8TE8N6HH4M012+0t89xcRt/D/davbPESw6dY3GsXnywxxNLLuX5lVa+KviRrl58RfFVzfwiSX7VKzRK33tv8O6vyLmlI/Os0l7P3TzrWNY1LVrppnfJ/wDZqteH/B2q6xcL9mhZ9332X5q9x+Bv7IHiT4lXyNDpTGPfu3N8y19R+Bf2J/B/gPyrzWPJdoU3y7v4f9n/AHq09nGMfePAeIl8MYnh/wCzL+zNBGqeKvE/7q3hTzW2r83+7Wh+0Z8ZLOzmfRNKvdkcMXlKsfy/u67j9or45WHhHR/+EQ8Lpa26Q7llaH5fu/8As1fFvjjxlf69qT3E0zFv7zfxVjKXtCqceX3iHxN4qv8AVLp5nmb5vlVf4dtc7cTSSMz/AHfmpsl5tkO8sy/7NNWYNL99sN/epRibR5ZFmGR1hZ/PbP8Ad/hpkjea4T7p+9upPM2q33jz/doZkk3eW/z/AMCt/do+H4g5ZRDa8i7H4ZX+RqI18uTyU+f+JGanMvmK+R/wKm/xA7Pm/wA/LVgWfL+YfPuMlTx2IWHZvwf4FaorCT5d6Q4Vf4a0Y5I2kWd03bf9il8IvckQWfnRtsdNo+7/AL1dN4VtUuLhN7qpZ/4nrFe33ZfZyPu7auWqTWNxG6chfm/3amURf4Tvtahm0uxhuUdmP3Xqj/ayNgPcqzf3qZNqlzfeHXhkdsbF/wC+q52TWt1vE7pg/d/8epylHluL4Ze6ej+C9StmnG+ZflrsJms5labyd+19rs3y/LXlHgvUlhvPIQZDOqt8/wAtenSXD3FuHRIwG+VP9r/aq4GFT4zmvirHB/wjUqb8rXzrqMiR6mU/2/lZa+hfilJ9n8LmF0+ZU+bd/FXzjq0/l6kyd9/zVlyG9OPKd58PdWmWRbZ9uGqt8aPCvlj+2LZPlrO8D3nl6hE4/v8Ay16h4p0mbXvDaJs37k3U485Upcsrnivw/wDEU3h3Xo5t7bWZVr6XhZNa8P2+pWwVVkT5NtfLGs2NzompvAybWjb5a+gP2e/Ey654b+xzfO1vtO3+9T+0FSPNHmLV9ZPD9xPvfw1Hod5Na6ojom397uZmb5a6TWNJ87e8Lqy/M0W5vvVzV1C9rl4du5X/AIq0fvaGMZQ6n2P+zVqFjrGl3+q2kmWljtlmQfdVlVxx+GK/az9kb4v+Hdf/AGQvh38W/h1cq954R09fD3iWzjkIaOW3IQlgOu5QG/4FX4hfsbG3n+HdxewS58y4VSmc7ML0/Mmv1R/4JmfCfxF8K/2efHh1lbm1v/El3Z61p9vdndHd2EqiQSRDsRux+FfuWfUpVPCHJVf7dS//AIFUPHxFSNOUklvsforFqlhq+lad8XfC211aJWu1jX/WRt96v50v+Cvk8culfHG4jb5W8XX7KT6HVeK/oG/Yu8RnXfh5eaDc7cWN15aIvZf7tfgx/wAFMPhn4g+Kms/G/wACeENPkub1td126t7eJcs62t3LcsAP9yFq4fDWD+oZ5Se6w8l/5LM/ROCnFZXjqn2XRb/CVz8gbxRJGJk+Xd/47XR/C/WkW+WG6ucqz7flrnWk8u3NtNuRlTbTdFmez1VXR9qK27dv+9X4zKPJM+YjZo+zP2CNJi0743a3PAi7ZvDTNuVs5/0iGvJ/289TbSv2vdYnj72tluO7p/o0dewf8E8rqLUvFuoX7gecNDZSR/dM0RH8q8b/AOCi0JX9prWZliyWs7M/lbx1+1Y7/kx2F5v+gl/lUOKD5cW/Q634U69Dr+iRvM/3k27l+WovFGiyQtNv2vu+4q/w/wC1XmHwF8ZPDfLp83CM/wB3/ar27XI/7S09LhDn5Pk//ar8R5Z851VPd1Ob+E+gz6X4qea2+ZpGV/lf7rV9U2/h/wAyxttV3rNK0S7mX5W/2q+cfA9x/ZviCPzvLTzGXe0n3a+pPDOmw3Wl2z2G1921X8t/lWscV3PbymVoyUjL/sOGSSVP3ij5n3bf4qhm8K7fk8qNv7zN/wAtF/vV2cWj/O8zp8u/5GX/ANmqWHRZo2Donys/z/7P/Aa4Iy97mPW5eY45fDsIHnJbNj+63zfNVa68NzNM3lQ7g3zO33dtd/b6L8/kukmVZv3i/wANQto1ybzeifMsW2X5PvNWsZF8qPN/+EfupJpPO8z737rb/DVabw26t51zD8yvu27/ALtemXnh942Z54WR12/Lt+9VSTQfOje5RPm+Zt3+1/dquXm94qnzROAj0vKtsmU1NZ6fM100c0y/vNvy7PmWuqvNHezj857ZfvbnVl3fLVGaxhWRvL2gN/E33lry8RTnI9XC/AUre38zZZ+TCu6Vn3SLu+Wut8FYEVwocNtKjI/GuY8txtnR5FX/ANlrpfAkkslrO0wG7eOQc561+g+CcZR8T8F6Vf8A01M+jyjl+tRt5/kSecp1WZmfcquVK/3a2Le4gZXRXZVZ9qfN/wCg1zGq25Oq3DRL1fMjf3eatafrVtb2f77cxWXbtZfm3V+T8VR/4yTGt/8AP6p/6XI6pStUfqdZDdeWwhmeMqu0bt27/gTVat7rztsW9lmZ2+bd95a53T9Q3fvkRl+b/vqtK1uEkuw+/P73cjbP/Ha8GnHlj7ope9K8jUmjRt37mOTa/wB6P+KqrRzRr/pLxoI5Wba3zKy1JHev8sNtCsSLufdG/wA26nyTOy75kUbvl2t8yr/wKuqj7xjWiUNzysyFFVJF3eZ5X97+FqpfY9t1strn5Nmf+Bf71bUlu8m3Ztx93cq1Ev2OGRfnjVd/z7ov9mvUp1IxjocFTD/aM2GR928wxumz5tvyt/wGotQhs2s1S/8Al2/Nu/iZqv3UKtGyPbRhpF/5afwrVKS3jmdI25C/89P4a6aco81xRjywMi5tdqMHhaRm2/vPustZM0KLM9480Mpk/uxfdrodQs0jxbXMG9P+Wsbfd2/w1lyQ/vPMh3IrLt+5826unm5jqjTj7pjQqjeY/wAuxtyptpI9PDKqRWzblT52ZtrKtaDWm26aEOzLGnyNIvzf8Cqdbd1uI3+6kn3lqZS5S/Zd2Q6RYI0iPcwrtX5F3f3a6/TbWz8yK2+6v3l3S/K1YNmsLW/nJbRo6xfIyv8AL/u10WiyWduy74fnk+58n3awjHmmRKnyxsZlkqW+/wCzTb337lb/ANmq/CryRiW5hVfLTc26sqORGm+R12Ku1G3bflresY0aWP596yfLtb+7/tVzfVeWPvHRg8RDnMnUNJ3XGy2dWWRd37n7tUb7T4Vgl3vtaZ9qsy7ttdZHapC3yfM0K7UkVfu1TvLNLiObzrVmVvmt41/ho9n7sUfV4PER+ycBcaNC3yfZm/h3x/dasbVNLf7QyTI0iqjNEzS/davQdS0+bavnfKyurbVSse80W1W38mGzW2VpW+9/49WVSj1PqcPiuXU8u1zQ/tFudm0bvm2s1cNq1rDb3R3wqu6LajR/dr17XLGGNXSFFeJW2LJIm2uL8RaTtm2WyY8v+LZ8rVzSjyy5uU9KWOhKHvHE3NujLs2bR/Gy/wAVZ9xJvk2SIyeZ8yK38VbWqWrpcC2Ty0/vs1Zd1bw25NsjszRv8+7+KunDx5veR8Zn1TlgVwqbm877sf3FZ/mWiO923CTeZG3l7vvfwtUd0IoVZEdUP92qzRzXDPeJ8nybtyr/AA17uHjGUD8mzKXNP3TpdLvNsgLurD/x2up8NapCzJv3I38bSfdrgdNaRYd6Pvf73zPXQaTeos7pNCoGzajbqyqPl0OTB4WdSZ9FeELjz/hrDcu2c2UpJK+7dqxtN1SBoRCky75Nq7tn/oNXvAsjn4OwSZJYabNz7/PXGaffNt2Xn3vlWJlr9d8ZFB5Hw7zf9AkP/SaZ9tisPJ0qKttFL8jvPtzSS/vo2Pz/ACf7VbGntDcYh37VWL5P96uJtdYe3aLzpGKtKqbfvbf9pq6O01aFd/kxeay7WT5tu3/ar8G5bdDnlg5RidVH+7j2TTRgNt+b+L/gNV9Qt/Lh875VVvut/FVZNShmj2PlZJEoW8Ty/M3rtmTDM1aR5ubU4sTheWN+UpahNvjif92VjVll/h+Wsa68ncH3/Lv2/K1at5cQLZsjooSN9qsq/NWJeSPNIH3xhlX7uyuv2cJanj1JSo6lW6vprWSSFEVvm2vt/hrP+2PHy6MXX7m5/lp80iWcjJvVmk+b5j/FVKS8e0kV5kWTav8AqW+5/vVhyolVJT1HzTPclER9+35nXf8AdqnI3mQPeZYOrbdzL91qcuoTXUoRE3N/DGv8VZV5cOofzdw+f56JU/d906I1I815DJFdLpHf52/vK9bGkzbRshh3J/Dt+9/tVh+e8Vwuzcm7/wAd/vNW1prXJlXztpVU2p5bVxVo8ux1YWUZVToLHfCy7H3xSMq7W/8AHVrZtbyGzj+eTyW+ZW21z1jbvcSLbQ7ZFZ1+X+7UHjzxRbaLobfabzykunZIvk/iWsMPh5YutGCOvF42lgcLKozk/ih48v8AUGmsNHvNiwqzIqv823+9Wt+zr4q+1aXqUOqvCvk7fu/eZa4hYbaRWv7x/vK33U+aSsb4f+Jv+Ed0/wAQ2fnSb2fzYlr9KwtOOHoxjA/HsZiJ4qtKc/tHV/EXx9Db+IrqPTXba3+oaT5tv+7Ulx44m8TeGWtvtnmM1vsaFvu/drynVPG0OtSx6qjxujf6pf4VqnZ+NLzR4bh/tO9G+Zlb+H/ZrWMZfaMOXljY57xp4Zv9FYeHtZTat1un05l/u7qX4d3TzWM/h7UtrM25oPk+bdXD+LviXqWueKvO1C8kfy2/dbn+Vf8AZrW0XXNl9Hre/bL/AMtVV6qXJKRUecf4i0W50mR0kfO52b/d/wBmuD8QSI0zJ5fzf3q9c8ZW9nrUKarYP8zLuljryDxtDNb3pR/k/wB2lKMTSPwmZb/LcL91tz02aRFun+6GZ/u/xVHpt2i3A85MfP8AJUszJJqT+S+4bv7lTzcpfuiMybS7hs/w1XuIXjjbZ8zN83zVa+zlpFmR/vfc/wBmlmhdfnkfe1Vyi+EwbqPbJvd9x/u1HIu35CmK1NQs0jf59uG+7WfIzqv+7/eqYFRIFXHJpfu/OvVad96Ty+opu5P8mgYrSPu+Y4+amt/fBprM+Pubv9qlb5iv8NVylcrHFvMHNWLMIkZbOHbj/gNQrJtX7i0I5jk2P81Ll7kiyLtbzEf5qns5JGb+HctRzSIsfyfxfxLV7w/Z+dIsju2Vf5l/vVUdifsmzo2xpP38yt/Ftat7+0o7e12PxtTcrLXOeekcmyGH5P46g1rWkmtRDDNs2/L8tHwklHWNQ+2ah533Ru/75r9NP+DaXQ7O8/bi0HWL+FtlnazTpNv+638Py1+Xm52kXZ8y1+rf/BuDpM0P7R0GsI+1YbBtjK+3c27/ANBrbCx/f8pw5j7tA/ps0rUY7nSVuIZg4ZMqy1+f3/Bf/wCDFt+0D+wF498GtazTX1nYNqOmxr8376H5lavtLQdcubfSVe5h2Lt/4DXjn7S2oaJ4i8M3+n6rbRyi4sJoNsn3W3Ltr0qmH5YTPGp1nTqxkfzX/wDBPv4n3kHgCXw3NcyRSw/Kit91dv3q+ktH1K51Ngkj7/L+Xc33l3V8d6v4d1/9lT9srxj8Fp4dkcetyPZxzL96GRtysrf8Cr2/S/FXjCdg3k4Vvm3K/wB2vx7PsCqWMk4R1kft2S5l7TARS6HfeNNak0zfZujJu+42/wC9XhXjbxRcx3E1mkyxJI26WT+Jq0/GnjzxI0MiXj/vY22xSK+5VrxzxLqGt6g0syOz+Zub79cVDD1Yx946cViox94s+ItbtrxWm+VnV/mZX27lrndS162jzsh3rJ8v36y9RuL/AHohfbu+8u+sxVubpmKI277rV6tHD80uaZ81iMZJz9021v3Wbe83+8rVds9STyx8mxVf5KwbGzv2X7M77trf3a6G10dGZY4UZn2Y27a3kocpz/WJS903YdchvIxa22mqzL8ryb/vVj6tY/Z5C8w3bl/hqy1reWeLZNysvzNI33aZrEn2O1KXM0Zdk+Zt9cs4xpStE29pzRMZm8tVdEb+9tZ6RtQS6xvTG35dtV7rUPPt1SF1A37fmqD7VCqsic/JWnLOpH3jmlU+yTSXO3cnzbPvJTdztN58O5k/u021WSSFvvbtv3WpGZI38nzMj+6v3anlZjzc0TX0q8JBSSFt38Tf3q05LzdGX3rtbb8q1gRq8Mfkw8NsrVtWS4ZH+78n3tny1zVKMPiiXGpP4TQ0mxSa6CIjGvafgT8J9S8W30cMNg2/cv8AD83/AAFq88+Hfgn/AISDVIk/56fd+f8Air3rUPi5pvwv8NJ8MfBu1NSuLX/ibXnm7mt1X+Ff9qvncbOpKfJT3Pey3Dyl70j0bxJ8WtB+F+kxeAPD1ztu9ipcXi/eVv4trVW8L+JLHxFY/wBlQwyR7YGSWNpdzKv97/vmvljxR8SPM8RS3szsfn+9u+61ei/AXxlbah4sT7ZeMqyIq+dD83y/7VdFDK6dGMZH1uHx8f4VI4b9qT9hfVdHjPxF8APNdabfNvWOT/WK38W3/Zr50tfDnimPVF0SFrpWkfbtjf5lav3Q/Zd+Cfhv4xSN4e1J7O5s5NNkb7L9nZpF2/8ALT/ZrmfG3/BG/wCCGh/Gq0+LV/4kXR7O1ZZ5dH2My3TL8zfN/CtfZ4bGYf6rH2j2Pl8VlmK+uSdHm5T8aP7H8ZeCvEU3h7Xtakja3+ae3uH/AHke6vob9kD9r7w9+zA1/r2t63eXiyRfuNNtW/1k38P+7XOf8FbPCNn4F/4KF+OrPTbSOCzvILG6sFh27fLa3Vd3/jtfPVncTRtsRF+X+JvvV6VTJsHj6UZT6nhQzrHZZiZcnxRPRPjn+0H4/wDjJ+0TJ8ffHlypmvtsEUcLNttbeP8A1cdfdXwP8Qv4q8A6Zr03lyzLEsXzf7vytX5z/wBnprmi3Wkv8zsu5Pk/ir7c/wCCefie28YfCtLCbafsq/Pu+9u+61ep9VjToRhD4YnmU8bXxGMlUqvmlI97+xwxI6Im8xtulb+7SWtvcsq7XbLfLt/2au/Y5biSJ4XVGjfY235V20q2t3CyJc3imVW3/wCzV0aMT0JVJhDZ3MMhfqvyq+6tJdNf7Ozz7Qdn+rVfvUSQzTQq9y6p/st/Cv8AwGr0apCsX2l5JfMbZtVPl+9XoRp/DE2oVoop2dncttcTR4VNqbU+Zf8Ae/vVLJbOyvD8z7l3bmT7tbOn6PYLIJrNJA7Ozyr/AHamm095oXSH7+791/u0/ZxjM9jDylzcxx+r6SnlmZLbcvlfJXJ3djNCu+HzJUj/ALy7ttei65psMkbTIiq3yqkO9vlb+L5a5bUrU25ZIpo2XYrNIqfxf3f96so0+WV2exH3onF39iJlR/sf75k+Vv8AZq3Cgi0J0I4EL8Z+tbdxpttcMl5NbNv2MyfJ92s+5hit7eSJ8lQh3ce3NfrPg6n/AGxjv+wap/6VA9TKafLWn/hf6HhXx8864+H95pNvcxq940aRbXbcy7vmWsP9nn9iSfUJrTxP4qkjtLPazr9ob5v+BU79pD4oaT4B17RtNeGNyzSTuqp83y/d3Vwt9+29r+rSQeG9NvPJt40VIo93y1+RU5TpxPyLOoyqY6UeY+zm8U+A/hzocWieEobWJ412+ZH/AOzV458bv2gNSjsbjTbaSMBvuSRv/rP9pq4q1+IQk0NNVvNY3Oy/dkl+b/gNeF/Gz4xPfM0drNnd8u3+6tTKU5Hl06cYHM/FjxxeaxqUjvfs3zV5pdaghmb99z/H89R61r1zfXUrvMxO2sxLhNob/vqlGJ08vuFuS6eRm2PgP/E1FvI6gd9tV1b5V2fxVatLZ5Gx/wABar+ySW1kQqrn5m2/99U7a8ap8m41Yt9PeGPyf4lqOZUjYohZT/tfdanLYrm7CKvkr5z8t/s1D9q3RqPvf32qTzP3bfdVmWoYVmmk2TTLto/vEe9KBoWrPMR/Cu/+Kr1qu2M+duB3fJVKzk/d4Sf5v92ryvuj8z+Jf71EthfaL1rskjCZ3VqQxwx4SZF3/wB6sLT7pGZ5P/HW/irZs5hIq+Tt+b/bqJf3Q5jet1SbTXR0yVX5FWuFuL6aOaVJnUlX+7/drrYbx7LdC6NsZa4TxRNNa61LC6bQzfJVe5yExly+6dR4T1ZLeRHTd8zfOtey2d5Mvh2F0+f5Pl3fw18+eH9UeGZN6bm3V7Z4buPtHhuN0f8A2k3VRE+bm5kYnxSvJv7FmR0bKru+Zq+e9ZkdtQdtn8Ve6/Fq8f8AseTCbvn+6teCXs264ft81LlRvT3Ok8Et/pip/DuWvd/DbvqGisg6bPusn3q8C8Gs8c4fzPlr6G+HLPcaLEnzKuz/AL6olsTI8a+N3hN7O4S/httqN/FR+z34lXw94si87lJv3TR16l8YPCsOqWL232bYqpvVq8E0ye58N+I1m+60cu7a1HwwJjLmjyn1pqFnCrM6IvzfdaOuS1yx8uRf3W/crN975a6Pw3qn/CQ+H7TUofnZol+61VtSsdzF3T5Wb5KI8pnLmvyntf7Bs0v9jeJLUnEa3Vs6JnO0lZAf/Qa/oz/Zy+Glh8VP+Cffw+bQfIOsRfDy2g067IwUlFuB5bMOdu9a/nN/YSSaPTvEsc0SqRNacr3+WWv2P/4IZft32euaXqX7LvjHWD52ialcLpIlkXCxeYx2LX7XxLVqUvCHJJw/5+VP/Sqhyxp0qladOps0dD/wSS/bYXx/8RPEHwu8cQw2GvabqU2k6tZru+W4hkZflr51/Zu0LSvEv/BXi58M65ZJPZX/AMQfEttd28vKvG6Xysp+oJqT4paEf2CP+C0Gs+IfGem6hbeDPiVq0epadfabEqotxJ95fm+X733qxv2d/Fh0z/gqefGlphQnj/XLpQ3PykXbYP4Gujw/hF5bnNWP2sNL/wBJmfVcBVWsuzOjU2hTl91pf5H5W/8ABaz/AIJz65+wH+294m+GttpzJ4c1mRtV8H3Cr8klrI27y9396Nvlr41+z3OnybJk3N937v3a/qz/AOC+n7CHhT/goj+w3f8AxZ+HtvFc+Mvh/ZSappDQL+8mhVd00H/fO5q/lv1a18lnhv4eGbbu/iVq/Gq8VUiqq6/F6nymDrulP2Utvs+aPpn/AIJkXjT+ONbjJzjRCc/9to64r9v+xa6/aM1zcoJFpZtF6/8AHuldd/wTEtltviRrscS/INAwD/22jql+3vo89v8AG67161wwltbeOZW6cRLX65jnzeB2F/7Cn+VQ1fL9bfofMnhnVJvDfiCJHb5Gb+9X1D8O9YtdZ0XZ9syzRfdX7tfNPjLQUs5BfwptGzcn92vSP2e/GiMwsLl4x/CjNX4rKMoy5jq5Y1D1DVLFFV33so/vfxV7v+zP4y/4SDS/7HvLlvtdr8qKv3mrxbVLdJUf+JP4mjrQ+EXi7/hC/GVvfzXrQ20cv73/AK51GIp+0pamuDrSoVY2PsS102a3XftVU3/3v/Hmq5DY7vKS9uYWWT+KP5W21f8ADa2eqaXDeWj+Yl0iurf71X5rF/tT+dDH8qtsZv4a8aP2on2kOWMIyMZrENNNcpzM3yptf/2Wlt9LmaT99bK7SfM+35fL/u1t2FvcrIm+2/dN/rW+Xczf7NTx6f5fm/Zn2su7czJuX/erqpx5SpRhL3kc/JpPmMqeSobYu/c/zLVG80G4RmdE2ur7naNPu11dxp6Md8aKPM+bc33WX/ZqrcWaLMszoxMbfJteteXrEv2fNvscNr2jw27bLYfe+bbXI6tZ201w/nOqlf7tejeIo/LV0tvur8/+7urg9cX+C5SPfv3Sqv3mrhxETqwtNxlpsZUy7NronzL8v+ztre8GmX/ShNFtYMoxtxxzisGSTbb7LN2WNW+9I3zLW14DkaRLrc+4goC2c5ODX33grGS8TMFftV/9NTPpsqjJYuLfn+RS1S8c6jcxoR8s5VAG6tUFveOqyzPN95tyeW3zLt/vVU8Q3ccetXLLF0uWUr75+9UcdxZxr5c0yp8zN8v3q/KuK1y8QY3/AK+1P/S2U5fvZerOh02+hiZH87ezfNu+6tbFhqSNMyQzbiv3F+7trjdJuIfOl3uu2P5f9qtWO8SO3i2fON7eUv8AEtfOxpxK5jpYby5W2U20OxtzM8jNtVqtrdItqsPks0n3du/+GuSW8fbsSZYxu+dZP4f92rENwkimeZGRV/h3blZa6KcvZil73wnVf2l9nWSN3b5W3LHH/wAs6hkuobi4dPO3DZuij8r7zbvm+asdbqFYVENmv7z5vvfeqaPU3877NNHtdX2p8v3lrshU6nPF+9yl9FhXek3mMPvJt+9S3kiTSIXmYbdvzVUW88xt6chfuSf7NPhkhZBsf5t/9z7q11U+Y0jThLcbLb7b5vMg+Zf+Wn8NUZrW/uJnxwW/2/vf7VaS+TcKfOkk27vkaR6j8l3lH2l8n5vmZq29pFnTTp8xktb7Wi+8LdWZXb73zVFbr5kcT3KSebH99vvVqzW8NsoSFNir8yKq/KtUJp0j+/OqM0vzt/e/2axliPsm3seWXNIfDHCqq8kflq1XNP1JGmTEysFf5V+7urMmvIWxvnVEj+9tbatQ2+rWCs298v8AfXd93bUU6nvcxliKfNEsaTH5jeTI6lvN3I38P/Aq6rT/ADiv2nYp8v5XXbXI6DdWrXw/cq0K/cZfl/3a6XTZJobYpvyixfJ/e3bq9mOFPksHjuV6m5NFcrG6Q2ak7l/75qK4Xy13xbWG/b5bfw1HHcJDGv77D/3ldvvUbnkZYftKn5NzbazlThy6H2WBx0YxKGsWrzWjJ5LM7fN+7f7tZGpaXDcRt/rFaNPkVfm3N/eZq37eHbuM8Mm6Rvnkb7tI+jp5jPDNx/47XLUpxie9RzCR59qGjzMuy/SMp/C1c3rWhwtG++Ndm/7rfLXoN9o8y3CvcopRXb5VT5WrN1bS0+z7Hhyu/wCbd91q4akYfaOx4zqeLa1oNzDOERF2/wC0n8NYOpaO7SeZbJIyf7ler+INHhZm+9tX/VN/E1cxqGivIj+SmI9u5P7y1zU6kPhPMzHEe2gedSabud5pkYFf71VPsfnTL53Cbv4a7XUvDcLRiRPut99Wesq60eaMb4YVO37td1PFRUeU+MlhZyqmE1uI22Q7VZfv1cs7mSOLe8Pytt3r/wCzVNLZ/ff5Vaoo7d1Xe6ct9/8Au1MsR7h7mW5Tyy5mfQ3w/d5PgbE6Mdx0y52nvnMledaHs+0IsKyMjJub/er0T4flk+BUXmDaV0u5BAPTBkrzfR7xIrgyfN8ybfv1+0+Mkr5Bw5/2CQ/9Jpn1uCwUa3MpLbQ6KzaS33TO7B/9n5q6PTNSfcJndnXZtdWX7tcYt0I4xv8A3Urffb+6tbGjXU0ca2c028Q/fb/er8HlsXiMvhGB2NvfQtZpcwuxaR2/dt96pZL5/MPnWys/ytBGv93/AOKrI0/UHZoVe5kxJFslkjf/AFa1ejuJoZEuXT5N7bv7zf3aujKcZe8fMYzC+zEupHYB3fcW+bb/ALNZ1zFCv+kpM0L793y/Nuq9cMFk/fP8qxfKzVnMYbiNLkeZiNWr0cPKEY8x8Zjo+8Z+qIkyvCk251bc6/3t1ZO65t4fJRIy8j7fmrXuLX7RcCaZ1Yfdba+1qpLazeQ/nPtlX7qsu6meVGN5cqKK/u5ilu+197DdH826qV1C/mbEePYrfvVVdzf7taFv9pdvO+b938y+X/FUN5Cl1I6IjIfveYv97+61ZVJcp10/eM+GNIGCXMLSIr/Ku75V3VrW7Iy7O2//AHaoNC8mwb2Dq/yzNV6z/wBVvmdVZV3O2371cU/fnyx0OyjPkgbenWd75rzJu+Vfn2/dX5vlavL/AIja1f8AiLXDc3kzOti7LBD95P8Aer1TVtesPDPw9ub+5eNb24XZEq/ejX+9Xh9xq25pnfa7N/rf71fVZTlqwseefxHw+e5tPG1/ZQ+CJLca1Dx9mflV+f8AhauB8QaxNayav5N/+8ktWb5k21sXGsQ/an875dv3WkXburjvE+oPJcSOU37lZNu3+Gva5keDGXMc74X8TyX2mvZu+3y33LVbxJr01vZvDC+3cm165LQ9S+y63cWzuqrv+bb/AA1Z1nVHm3Ojtt+7U/Ea8v2jntSkK3hmL7h/drQ0fxE9rsff9779Y+rXEzMfT/x6q0Nw8Mmz7o/vVXvDPSl8bTTWqIk/8G3atcR4q1Z7i6Z3m37XrO+3urDfM33/AOGq11cb2MzPv+ep+yVFdSezkjmmCdBVv54p2fft3N93+9Wfp94isf3K/e/iq5DcJNcStvXf91F/hWqiEi6sPnAp8zL97cvy/NTrhvOJR0bd92ls5t3yI2P4an2/aF8nDfN92iRJlX0ICj5GO37tULyPZIX2bS3/AHzWpJI8LOm//gNUbqNt29ujfw0cvuDiUWjEY3r/AHf/AB6omVBtf+7Usy7JsO7fLTX+6amBYxsK3+9TY1SRfmFO2p5fzPxtoVkA/eL92q/wmhLbxou6SnyRoyq+aiWfbJj+KpFkRmbZ8q/wU/hMyGRvmCfMK19PuPslq+yT52X71ZTHkO/PzVJJIVX5Pl20vhAueXMq+cz7T/HuqrcR/vPv7v8AdomumkkV3fK1Esg3BE+7Rze+KI61jMjq7v8A71fr1/wbr6GF+Ik9/Mv+rtY1Vt/3vmr8hbFWkulR143V+y3/AAbo2MMnifU0eNUElvCu6Rvut/Cq11YL+KeZm3N7DQ/d+G+vG8M9Wc+Uvy79zbq8H/aCh1K40+5hEMgDJt2s3/jtetabrW3R4ZvtKv8ALt+X+KvOvihrFtNG6Xm5EZGXaqfM1etWlGUbHz655H4Uf8F0PgjeeD/HnhX9pnRIdzLL/ZustGm3y/8AnnIzf+O14z8P/ipqWuaFDZ2t+vmbdz+X8tfp3/wUi+Fvhv4+fA/xX8N0hWaa4sJGsNy/NHNGu6P/AMeWvxh+B/iCbQbiXwxrULQ3lncNBP5n3lZflZa+Dz7CxrR5o/ZP0HhvHyivZSkeo+Ntems7x7OZFdmRX/2f/wBquI1rxEn2fZD8rf8AoNdL4sv5ri1d0jVw38X8S1wGqSbpN77lG7+Kvmox93lkfS4itzakEl0j/vl/4EzVWs7qbzC7u2yopo2bckKfNv8Am3PSSXDxxhNn3U+fbW0Y3PHqS5joNL1az8z/AFKsN/ztXT6fq0PmK8KcbNu6vNPOdWZ0f5fvfera0G+nuF8n7SxH91WqpU5Sjy9DOMonS+JvGlhHCkKQ7m+5uX5mZq5TVr57pdjou+P77Vt3Glww/wCu8sbvubfvVlappVsvHkbhJ8qbayjGJpUqS+Ex1keNvs2PmX5qmhyyqMr5rf3qmaxKqnyfN/ufepjW8xXfNt++yuq1fuSMSb5Gz2Zflp8MTsfLSLPz/wAX3ahjjdJmdPu7P4f4astJbSMqu7Db8yMrferP/CVH3S3Zw/vGz/F8tdHpOhvcXUNnHD5u75vl/irDs1e4/wBG2Km5Fauz8LskNiPJttr7/vbvu1wYyc40rxOnD8kp+8dVJqln4B8Oslncr9rZFVdq/NG1cHrXiSaxie5v5mNzM7PLM332b/4mtLxE2pahJ5yWy7Y1+9/eavK/iL4uTQ9Sks9Sm33iou23j+7H/vVz5Xl1Ss/5mz0q2KlGPLD4S5Lr1zcTPc314w3P/DXf/D7xF4k8MtBqulWczN95dybVZa+en13VdVvA8shyW/dKteqeBNd+Jul6cdSe+kks44NryXS/u41/3q+ixeXVI0uWJhTxWIpS5oM+w/gd/wAFfPGf7H3iKy1RfhbFqDRov+kQX/ltt/iXb/EtfYPgf/gv/wDsX/tDwPZfGfwvdeD7hYFi3TKTHNub5tzCvxK8UeNptc1BpjMs25PvL93/AIDWV9smuG+fbtb+7So8NwrYflm3GR6dPjKrhf4kFNn0X/wVd+Onwo/aG/bq8Q/EL4Faw174Yh0uzsLC6aLarNHH823+8tfPI3huZNy1FGr7lfzF/wCA1Lbwo0jpvbb96vrcNRVChCn/ACnxGKxEsZip1nG3MzqfA+oPa3SoNvzJ/F/DX2D/AME69DfT18R6U9s0aRtut/n+VfM+6y18XaLdQ299E7plPl+Va/RH9h/w2lj4FufEKJGEvoo03bPvbf4d1dfN7vKclH+PE9ks7P7Ooebc6L/Ft3Nuqe3t7OOTf5y7Puouz5v96rl1aw2Nos1m8m1U+dlp8ln9om85HbKpu2/w7qdOPunr8w/TdJTzXQ3KqjfNEq/erRtYUkuG2o22NP4k+bbUelf6tUR+WTa+162NO094pt7ou77sSt/FXXTjbQ2oykP0ezhjtVdEZ0k+bzF/i/2qufYZrhWeZ13Mu3y4027f+BVdsVSSNXZNn/TPZUqKiqs3zJu+5Ry8vvHu4eUzmdY0XaoeFGx8v75vux1yGoabCyzpvxGzs+7Z95q9D1RfMt/9Yynd80a/d/2WrjvEFrjMPy7vv7m/iaspS6nvYVc0oxOaax3bfs0TKNi/L/eWsHXrdYtTnt4yFGQAc9MgV1Fn9p85POf5W/8AHa5vxYEXWbkwtuXAKk9/kFfqfg3K+a49f9Q1T/0qB9JhMP7Jyfkz83/2zvH02tfHTWNKhvWa30eKOziXZ/F95q8z+Gun3OueKLazQbjNKqqzfdq1+0Brb6p8fPF04fcs2syfN/u/LTvAMkOi2dzr1y+Ps8X7pf70lfkPwn4bjeaWKn/iOu+MXjz7DeTaPpV5ugt08r92/wAu5a8f1zXJtQkMzuzNTvEWuPqF89y82fM+ashm8xi+/NL4viMOUb5j+WXfrUkK+Yi92/u0kcLyJn73+ytaulaO9ww2Qtmr+IciGx07ciuXZt1bWn6SzSb0Tcu2tzRfB7rD5zwbl/2q1W01NNVvMRc/3a15eWPKY/FLmOcuLN7VS4RtzVmzruk3gfdrX1q6RlZ4Wwyp92sCaWaSRn3rtrKUiox5iG4mkZhsRm/2mpbNXVmR0/2makaT92u8MWp0Nu7HY7712bqPhH9g09PX7SypvVVX+8lbDafM1rvh+Yt9+sXT7jbPs8v5P42rpLOS2EOzftDVXMTLYzPsc0LDzE2urf8AfVaeks63I+78r/OtLdJDx5PPz7X+f7tNjkh3L5Pyms4+8HLHqei6Xoej6tp6bJtpVNvy14/8YNPfR/Ewtndl+X/erudHuHjtmSzdlZdyv8+5WrhfjJNcTX1tNO6lvK2s1IcY++Zfhi58y62TP/Hur3jwTNu8PiPzFYLt+WvnTQbt4boP8pr3X4bXf2rw7M/zDyU3Oy/eq/hCXPsY/wAWtQdtLebqjbv+AtXicm+SYu235q9M+MmpbbYW3n8Nu+X+9XmKcMKZdOPLG50Xg9Q1xGm/Zu+81fQ3wzkeTR1hhTPyfNtr5/8AB8byTRun3d/8VfQ3w3t4Y9PfaPk8r5f9qjm+yYy+Mf4wvIZIz8kgMa7NrfxV4d8QvDL3DS6lbWzZVq9j8TR3OoTM9ykg+fbuaslvCr3y/Zns2f8AiWTZTjyGXvc/MWP2cdck1Dw7LpT3i+Zb7WWNv7td1qVhuje5R8n+JVT7teT+BY38A/E6G3mdhDePt8xvuq1eztcuF2b/AOPb838S1Pw+6aS5Ze8ep/sSwywW/iZXl3KZrQp7fLLxVT4X/tKeL/2Z/wBqnVfHnhm8ljFv4hmaeKNv9bH5p3LWt+xzapbJ4meOYMJLm3baP4OJPlrxj4vgp8YvEUpkwx1mcYP93ea/aOJkv+IN5Kv+nlT/ANKqHDDm9vI/oq8afD74W/8ABZf9hDSvEXhnVhF4m0y1+1aDqqsvmQ3ar91tv3dzLtr8/P2aNW1zwB+1rZ3vj7TrifVLK71WDWLeGLMhuTa3MUuF9Q7E49q80/4IH/8ABTC5/ZL/AGiYvgJ8SdY2eEPEk+2CaaX/AI85m/8AZa96+HGNd/4KZ6jNpl2TBcfEHXJWlgj8wvb77pn2juTHux9RUeFNWdTKs4ozfurDyt6NSPtOFqUaeBzSsvidGX4KVmfcn/BPr4z2HinxBefDfxBdE22u6cYfsrtuVm2sv8X+zX8zH7UXw70TRf2jPif4M0dFa20Hx/qlratH93y1uG2qtfvx49h1T9jb44WXxavrdbDQZory98OtM/7xLVYW27v9qv56de8ZXniT4yeJ/EOpXLSN4g1y8vJWZNvzSTM3/s1flmNjLD1H/LI+Cy+pCtTjzfFG56f/AME1bO5sPipr9tJjYNBbaSuD/r4q6L9sXRIdX8Y62hhYyiG2MbKM/wDLJaq/sCWhtfi7rpOfm0E8s2T/AK+Kt/8AaHla5+MGp6YzrseGAYbt+5Wv1nG/8mPwv/YU/wAqh0yl+/b8j5LmtodW06awuUZXj3LuauY8GapL4T8VGGZ8bZfl3V3HjDT30HxhP8i+XcPtRV/u/wB2uF+IGlfYNQXV7ZPk3/O392vxeXve6dlGUubmPp3w7ep4g8PR3jvhW2/Kv/oVVNQheO4kmh/hX+H+7XD/AAF8YPqGlrYPNv8A9lm2/LXfaxE9quxE3tt+8r1HN7tgqR5Zcx9Zfsb/ABSfxl4Jbw9eXO6fTX2eX95mj2/LXsPluxdPL+Zvu+dXxB+y/wCPH+HvxSs7y8m8uyuv3V02/wC7u+7ur7qlKXE3mJNHLbyIv7yNflk/u1w1o8srH1WU1vaUOWQ7S4Zm80ImV2VPbJeMpMKfMvy/7y1ZsYYVj2bNif3d9WLXTUjXfCjK+/am2iK5j1PaS92JnXdrDJhJLXarfxM+1V21QvLV1kkSa23ity+t3l2o6K4VvkXb92q11Z/u33zttb5vMrojT6oqNWXNyxOG8RLZxyfacNmRfvLXnPiK6htbgw2yMXb5kaSvSfE0CKyPCkgSFG2RyL8u7/erzPWrGaEn/Vum9tvzfNu/3q4sRGHKephfeOcnWa3uNkL7xu+eRf8A0Gum+G0iML5Yx8u9GB9c7v8ACuUure23TI9zMiKu6VVf/wBmrf8Ag87GG/jJyFaIqc5yCGr7nwXa/wCIm4JLtV/9NTPfyqP+0xl6/kZfiWbbrl7HK7H/AEhiMduazobwrMwhf52T+JN26ovGN3cL4nvoowNpu3ViW461jyakJpvO+bbGzKrL8qtX5lxPHmz/AB3/AF9qf+ls5qvu15erNuORI9QzvX5tzfLVyPWIfM2JJIzL8yfw/wC61chca48K/uXwyttT5vvUN4mgaHY8qpNs/h+bbXgxp8xh7Q7NtY2qJppv3rbt6/e+b+KrNj4iRnb9yymOL91Mz7V215/N4gFxGib/AN60Xz+X8u6mf8JFNDt+dn3fKnz7qJU5S0COK5NT0218STblT5UWPdvb+9/u1Ja6tNIzu9yu1vk3L8zbq81t/FkLKttM/wD9jWp/wk7283yJGw3bf3b/AHmrf2cuoU8RCUz0nT9Wha4EKOzpGm3+7tX+9VzT9YSS3k8l2Ks33d9eZ2/ix4ZXMG4rt3ff/wDHWrQ0vxM/meek23b8zK33ttZ806Z3UK0JTuekfbHmjVN//LL5lb+GrEkltfWfmQux3fd2/wB2uJsfE9t9ohd7zajL821tzNV+38WJbyLsfYnlfeb5W20qeI5XoejTj7xuXnzSI+zcv3fvVk6lNbSTR2LTK7fNt+Xbt/2t1Yt94geQM9lMq7n+Ztm6sW+8aeavyTN+7fa6/drKpU97midsY81K5ratrXmQtbIG+VPn3Rfe/wCBVlRa55wi8mZmTZtRW/hrB1TxVNc7kR1Z1T/V79u3c1ZU3ibbEUNzsRfmRv8Aarpw9Y8vFU+U9M8O6pbbYrZHUv8Ae/3q7DR9W+7C8yoGX5mX5vmryXwzr0LN8833f4f7tdrpuqboWRHX5m3V9nKn7vMfklPFTjI6231Sbz2SaaRfn/e+Yvy7f71aSTJcSRBPu/e2x/LurlrS83eZs8xl+6jTVrWt15kavM7Yjbcu37q1jKjGWx7mDzCrE342+0XT3PneaGi2su/7tTeWkjOiOu3725U/8dqto86Ru8z2ysv3X3fdZams45pLhb+Ha6L91furt/irzqlO1z6fD5lLlTKF9awy3Hzv/D8rfdrF1C1s5ISly7EN/E1buofvl37F279qN93bWXcKkcyzP8z/ADLt3V4mIjyyPYp4z91c43WtP2Mu92f+4uysebR1uFWbZsGza7L91q6y+jRpFs03D97ubdUH2G2TciPu3M3zL/FXn1JcoU63tjg9U0F5GaZ/3S/elZvmXdWLfaLtgZ9mG+7tWu+1O3mjjeF03KqfKrL95t33qxdW0ubzC8yKFhT/AFap93dR7T3Tow9OMp3OH1TR0t5Mum1fl+WqbaS5Zk2SJ+9+7t3V2OoaeJGXz02fw7f4lrOmsU3b33fN8yNRzS5OU+xy+nCMT07wTbfZ/g0tsw6afcjBP+1JXmmm6fM0e/ycsr7VVm2/NXqnhWHb8LVh8sD/AEKcbR9XrgdG0/zPkhfHz/Osj/dr948ZZcuQ8N/9gkP/AEmmejlKjz1v8X+Y3T7GaVk851cqu35vu1o6bbzRscOxDfKm77tWIdNMcEcMNtt/i3f/ABVWlsfOzYPM2N+1vL+9/wABr8EjU5TfGU/c5mSaWzyQqVRUDJt2yferRVXjs1R3VV3qz7m/h3feqvp+l7Wb/WMPuurLV6GFJDsRI2+7t3fd2/71dXtGfDZhLlG3Vim24mmnzt+baq1R8x1VHhh3+Z8m5V2/8Cati6bzGbZHJKrJtb/Zb+7WZfLM3zzOzKu1fLZtrbq7aM48tuU+JzD4uaJl3W+a8e2Ta5V/3W19rVn/ALmSR5vmDx/MjK3zSNWrJCjMkybWLPt27Pu/8Cqmtv5l0yJCyMz7VZv7taSlzQ908fl+0MZXt5vtk07R+XtX5V/1n+9UV5HbXUY3+ZFt+8rfdZq0ZrRGhXy9u/ft3bGakWxm8vZ8ruvzfu12+WtcNSTlqjro0zHa1RZkSF2H+z/Cv/Aas6LYPdagltNM0sXm7pd38S1alt4W3u/mJ8/3m+81P8A2r+MtW1+wsJlf+ybJnn2t91v7q/7Vd+W4f22J16Hl5xiPquG5Y/aPOfid44e81SfSrPaI7d2VV2feb/ZrgdLvB5k1s8y+Yz/dan+KLpLfxNf23zEtu+VvlZa5KPUraHWvs1zuUbN26vs4x9nG58DzTlPmZU8Xao9rM+98fPt3N/D/ALtY2oap/aWn79/zbdu5Xql481aO81BpERm3bvmrmrfUJrXd3T7v3vu1EZfZOj4jl/E8n9n+IpJId21vvL/tU2TUppoT2/2dlM8YNuvldI+W+Zv9mqlrcbY97vurWOxXMNuJMzP/AAlX2tVa4mhY7HT7tLcSOzM4f/Z+aqsr7lFL7Q4j5JnEe9Pu/wB6o5Wdfn/ipIW+Y8fL/dpxUbd79f71Ei/hFs5N0y1d0tHlmkjT+9urPs5Ns/P96r+jzeXeGZd33/m20v7opGpDHtkK9NvzVZjkfOUm27m+X+9UM0YwNnP+1Sx3XlxmBP4fv0pcsTP4hLyFJG3pu+X+L+JqpTyeaV/ib+7sq150zfPs+RU+838VQSKh3eT95qAM+4jT5nwuWqm0bj+PdWj5O5m39F/iqrNG+3eiUR900K6HaPu/8BoLbWb5P+A0/wDiaPf/ALtRSfe3ZzTjIBwERG807zHHyJwP7tRxkg5xkU5sKcZpAO3Sffcf8CoaTzDv3/71RMH6sKVW+UjtQBatI5bmeO1QBjO4RGJ7k4FfaXw1/wCCDX7a3xg8Ka94u+GsGjaxp3ha0F3r13Ym4ZLWP8YgZGxltiAttVmxtUkfGehyb9YskZP+XuP5v+BCv66/+CXf7HHxG8I/se+PrjV/Emgv/wALb8NhdBFjqBuFtAbe6hBneNSoOZlJVC5XDA4YFR9jklPhrD8OYzH5mlKpCpQhTi5Simpyl7T4Wm3GCcuyt12cz9q6sYx21v8Aofzy/s9f8EJv2tP2kPivp3wp+F3i3wndapfszkyXNzHDbxKMvNK/k/KijqcEnICgsQD+hf7M/wCyR8av+CK/xTfwJ+0uLLVRqNnHdWN94TuWuLa8iB2lo2nWJhhgVKsqsMZxggnvPEP7MP7Tf7Hn7Yfhr4R+AfHWlf8ACeXNzbHw9qXh7WU275/kVJBKFMeclWSVQHU8B1YZwP277b9pnSv2jdV8O/tZeNY9e8V6fBDE17a3EbWxtyu+IwpGiLEhDbtmxDliSoJOf3LBeGPBuYcUYaWAr05YKpQdRQ55+2k+ZJTjrbkV0nfW91ZvWPkV26tBxqJ3Tt5f8OfSdv8A8FYfgxDYx2Y+HnilNhydi2/P/kWuJ+In/BRb4a+Mdy6f4S8RQBgAWcQ54+khqP4Of8EWP2uPip4Mg8Z65deHvCSXkaS2en+ILuX7U8TKGV2SCOQRZBHyuQ4IIZRXh37TX7I/xy/ZI8WQ+FfjL4VFqt4JG0rVLSYTWmoIjbWaKQdxwSjBXUMpZRuGfUy3hPwXznNJZdgsTGpXjf3Y1m27b8vSVuvK3bqcs8FOFPmlFpGp4q/aF8J+I7meVdE1CNZHJXCoCM/Rq+BPjt+wZr/jH456p8TPhT4i0vTtM1ZxPcWWpNKJFuD99hsRhhvrX05SxxvLIsUalmYgKB3Ne/V8D/D6qrSoz/8ABkjTDV6mFmpU3qeS/Av/AII2ftwftRPIvwc8N2eswQyGG51JWkhsoZAqsUa4lVYw+GU7N27BBxzS/tC/8EF/2+P2d9NbxF8WvB9jZaSpUyavaztd2kW5gqiSWBXWIlmCgORkkAZr9f8A9uz4z+J/+Ce37Lnww/Zf/ZvvX8Malq2lNe6/qdowN2Nqp5pDkZDyzyOxcYKiIKu1eBm/8Er/ANrf4g/tQ+JfFH7In7UniGbxnoniLw3cS2j61JvnXaQs0PmDDsGRy4JOUMQKkZr+eqnh3ldbJ6nFeHwEHlsJS9x1av1iVKE+SVRO/s09HJRa2W70v9DLM8RKaoyn73orX7dz8LJf+CeHxPdht8ZeH9oOdpef/wCN1ufC7/gkj+0r8cPH+n/DT4V3+j6trWpSFLSxt5JRnAyzMzRhURQCWZiFUAkkV9g/FXwXL8OPif4j+H0ySK2h65d2BEzAt+5maPkgDJ+XrgfQVneGvE3iLwbr1p4p8Ja7d6ZqdhOs1lqFhcNFNBIOjo6kFSPUGv2qfgF4d4nL3VwVKXNKN4OVSbjdq8XJJptbXSadtmeV/aWK5/ef4HFP/wAGq/8AwVPcH/infCPK4P8AxVkHP61R8Tf8GzH/AAU9+F3ha98b694Q8Oz2GlwNcXi6br0dzMsajLMsUeXfA5woJx2r6Qj/AG7P21J5Vhh/ad8dO7sAiJ4hnJYnoAA1faP7b3xj+KP7Lf8AwTz8L/Ab4g/EjVtY+I3xEtWfxJeanqMk9xbWjYe4iDFjtUBktsdGBkPXNfkGaeDeLyXNsvwVeOHqTxVXkUYfWOZQiuapPWpZRhHffVrQ7YY/nhKSurLy+XQ/Gj4Jf8Elf2qf2iPEraD8G/D0HiG7twgumshMIbYPnaZpWQRxA7WwXYZ2nHSvUPGv/BtV/wAFQfDemXHiOf4f6LdW8C7ja6XrkV1OB/sxRku59lBPtX6jf8EqLnXvEn7AHxL8D/s2a1Zab8Uo9UmlS4nRQ/7yGMWzbnJGCI50RiAqvkkdWaz+xr8Kf+Cv2kftGaJqPxq8XeIIPCdnen/hIh4i8SQXtvcW+07kjjWVyztgBXUDaSCTjOfL4h4K4Vwmb5lTw/1fDwwTt7PEVqqrVrQUrwSlFWne0LKTel+hrTxeIcIXu79UlZH4K+Of2Bviz8PbTV5vEGsaZb3GhxTtfafOs8U8bwhvMiZXjBVwVIw2MEYOK+f7r5rjZ/ef/vqv2f8A+C0Ou/D7xJ+038Wbz4d+Q1vHpUtvqUtsuEkvo7PZcEfMQSJAVYgLllbgnLN+L16rSMmw4bbt+Vq+a8UOFMh4ew2U4rLKEqP1qj7ScJycnFvldtddL28+x0YKvVrc6m78rsSSSQy4REVf4qZHDBJcb+nyfP8AL/47UCyQ2+1Eh3t91/nq1YW9zeTLGkLbm+7X5LGMTv8AiNfwrYzX2qJbJtb/AGWf5q9Hg0dtNsW2Ju+ba23+Ks7wB4PvLOFdVv7ZovtCMq/J91f4trV0nibXrPRdFfVbny38uLbFG3y/NXkYqt+95YRuejhcPzayNT9nzwC/xB+IiabebZrPT7W41G/h27ttvbwtIzf+O18YeINRuPGni3UPEz8ve38ku1U+6u75V/75r9MP+CTXwr8T/EjxB4/1vwT4ek1XWm8IXVrYWqqzfvJvl2rWh/wW6/YD+HH7N/w6+BfjPw/8N9P8L+KtWt7yy8U2emuqrcLDGrLI0f8Ae3My7q9XKcdQw9aVGXxM9rH5VVnTw/s/tHxH+yP+zxqPxe+IFnYP91pfkVk3LurY/bk+J/hnVviI/wAHPhRZW9poHhVFtb+4s5dy6pfKv72T/dVvurXtvhLw/afs4fsSeKv2gNURYNVmiXSvDTRsySNdXHy7o/8AdXc1fDtmXkgyzMXZ90srdWb+Jv8Aer38u5sTUlWnsvhDi7D4fJcJRwcP4so80v0QCN45Nny7Vq1DCi/cfb/F8tJGqL8n3y3+zUjRvu2Rpt/v7vu17R+cS2FaTj+L/eqS3O4+Xt+9/Fv+7UO6Ers8n5Vb7y1csLczQ5RFpykKPPEkST7LNH5Ltu+9X0B+zV+1V4w+BPjTwx9p17b4P1SdrfxHbzLuW13femX+7tr5+aNFmVs7i3y1r+Ire5vvh5Klna+dLb3Cskkf3lVvvU+Xmiac0r+6fr/4Z1nwr4ys01P4e63b6xYXn+qutPuFkWRdu7d96rjOlvMl46Mo+7t/9mr8XPDvjXX/AIa6na674Y8SalZ6la/8eraffNH5Pzfwqrba+0fgR/wVM0Sz+FM2m/G/SvtXibS0/wBAmtU2/wBoRt/z0/uyLV060afxHXCUJH2/p8KTTfJIpMjbXb7rKu371b+k5b7+5Qz/ALpVT5mrxD9l39pXwB+0locuseG7mSw1G3+a60e8lVZ1/wBpf7y17ZY6gjTJNc+Yjr8iR7a1jW9p8J6NGMpQN2zi81pbmG23/L87bvu06SJ45tiIv7x/n8z5VjqGxkmhjf8AiWT5tzP8qr/EtTFopv3yPlVT5l27t1axkerh+bl0KWpWsJkKPuCfd3L83zVyniSzSHfvmVvn+T+9Xa6hCgs3+X5GTdt/irifE1y821Nnmqv3I2+X/gVctapLofU5a+aRg6aqfaGRIVZ2bd8qferkvGsXl6/dQmU8Kg3rxgeWvNdx4bt3a4MKW22Vk/h+Zf8AvquR8axMfG9xEF2s0sfB7Eqtfq3g075zj/8AsGqf+lQPpsPOLlJeTPx++NENxafHjxLaAfN/bEykt/vVB4q1aHT9Ft9EQbWj+dv96us/aY8OyaX+1H4qgvDwNRa4DL/EteZeINQ+338k+zf89fkkfgPwnF/71Nf3inL+9lO+iGNGb2pY7V5GxXTeG/Cd5qEyeTbb/wC8uyqjHmOWU4xIPD+g/aGX5flr0nwn4JRYVuZoVC/w/wC1Wn4H+Hr2savdIr7vm+Zfu1Z8UeKLbw/F9jR18yNdqsy/drb4fdMeaVSXKiLVprCxt/J8lV/hdq5TXdcj2l3uWO7ms3WvFlzfM3nfxP8AeV/vVjXl49xGOxrPmmXy+5oQ6lePJcvM53bv7tVGmIXZTppvm2eXz0qPy3ib95Ux934iveFEkZk+5xtpbNvm8j73yfPTWj3b9n+7VixjHmK+/bt/8eqhRl9ks26zM5T7q1tadI7Qqj7cR/3f4qorbzXEf7lNm3+L+9V2FXhjGxKCZe8WWt5pmZ0RlLfe21DIs0JPkp82/b8tamm/MuX+bb/e/iqxHo6TSLCkjAs+6l/dJ/wlPSdSns22IWxXO/FWXz7aGb/prXZ3Hhm5tY2eHcR/47XE/En5bZEmVi6t/wB81HL7xpCWpxtmSlyAf71ez/C3VHXR7iz87/WRbvlrxWP7w5zXqPwzvoYdKldH/wCWXyrV83LEqsc58Vr0TaosL7fl+/XKWse+4WtLxhfPeaxJv+Yq+3dVXSLd7ifYlMfwxOy+H+nvNcfOmNu1v92vVLfxZpvhuNYXudu377R/NXnmhwzaRpiuIVYqn3lrO1bULm6lZ9/8X3t1RL+6ZfEeo3nxQ02YN8m59m7buqBvihc3H/Hgiwp/d2V5jDHeTTD52+Vfuqta1qHsVZJH5VN1Eeb4iuX3eUl+IHiK5kvLS/uXb9zLvVY/4a9v8JeIn17wpa6lsWVvKVWZVrwrUoX1axkTZtCruf5a6T4C+OvsdnP4Yv5stC/7j/ZWnEnl9w+yv2OZHe38ReZtz51sfkGB0krxn4yxBfit4jYwHadYnLEt1+c167+xTefbLfxIxfcVktMnGO0teSfFBHvPjD4mijAcprVx8p/3zX7TxNHm8HclX/Typ/6VUOBXjXkcT4g1DUvDuoWnifR0aO5t3X9591q/Wn/gklrU3i/9sf4Va94luN82riSW8kkPLyTadOWJ9yzfrX5ReJrF9Q02aF/m2xbkVf7tfb3wJ+PGqfsx+E/BPx50dz9o8NWel3I5xlSsUbj8Vdh+NYeGKSyrO7f9A0v/AEmZ97wk+bBZh/16f5SP1N/bF/Z21X4w/Anx38GoL+W48Z6TfNFoytulurq3b7sca/8APHa3/jtfzMfG/wAE+LPgr8bNS+HvjLTZrO/0nVJLWaGZNvzK22v6pviX40uv2gPgt4b/AGxP2d/GE9nJr2jLpeuzaay+aqyfd+b/AJZsrfxf7Vfjn/wXK/4JreIfCWpeF/Gfh1LfUvF2rJJJqmh6fO11eLGu399Lt3NuZmr4fmpY7KOaUo80du/mj8jo8+BzbkjGXLL7vI+cv2Dtk3xC1G8yC7+Hzlh0/wBdF09qT9ou7Ft8e9UYEsRBbfIP+uKV6N+x7+xN+1R8CfBa/G340/B7VvD3h/Vov7N06+1S1MH2i4YiXaqNhsbI3OSAOK+z/wBnP/ggzYfttaVbftReOv2g7Tw9o+sTNDbaZaaY010jW7GAl2JCAExkjnoa+8zKrCj4F4WUnp9af5VD6SnRnWxbhBa2PyC+Lnh/7dpf9pQ7t8Ls+7bXB6hp7+IND3+SzfuvnVkr+mzwZ/wbbf8ABN3whpEt/wDER/F/ijam+X7RqXkR/wC1+7jWqOp/8EYf+CFc6p4Mvvgr/Z0906+VcR+IbiOTc33drM3/ALLX4U84wSlqz1qWW42rH93HY/mO+FmsTeHfFSQyrgM2Pmr6JhP9pQwukOVuIN22P5q/fn4Of8G7/wDwRTl1GbxZ4b+EGp66ltcTQSrqniOaWDdH95tq7a6HUPBX/BFb9kS7m8Mah+z54K0+e0kVbO0bTWvLiX+795mqK2a4Kjyzb0kdOHyXMsZzU4QcpR8j+e/w/wCC/FuoTJN4b8PahcPHLtRrGzkkZf8AgKrX3f8As96X8VPH3w5017z4e+JP7Rtbf7PKraDcbpmX+Lbtr9Y779uH9lr4K+EtG1lfhfoXhuXWIWk0nw9a6NCmoeXu2qzxov7v/gVc18Lv+CvPhXXfGWseGtf8JQwJYzqbeaGRctH/AMBrircQ5fGav+R7+X8J57Ti5wht5o+KfC/7OP7RviqHzNK+Bvii4RbfdK0mjSL5n+7Xf+Ff+Cfv7WPiSFUh+COrWisyr5l48ce1f+BNX3Ha/wDBUT4Jy2rTCKYvGjHyU4b/AL5rjfit/wAFk/g74B0K6ns9JuJ7rZ/o0ef4v9qpjn+W8t0/wNZZHxDKfJ7K3zR4FD/wSg/a9mj8xNC0NE27vLutcXzWb/gK7azfEH/BKb9shLNifBWj3Kf88bXXo9y/7X+01Ubj/g4D16bR45H0LTneKWRZWjuPmb5v7tcR47/4OFfiJLpV1b+HNHs7WVp/3V1v3PGv+0rVhLiOlKN4wZ6EeG84pytOcIlTWv8Agmt+3RNbtbJ8AL+UrKyxbb23b/gX3q4DxV/wS4/bvsmd5/2YNZnC/cks7iF//Hd1TaT/AMHB3xe0fxn/AGjc30dzA1nJb+U27/WN92SqviH/AILu/tAeKrZdHsdcl0rdKrNfW8i7v8tWE88jKPvUpHpUcgx8Ze7Whb5ngnxY/Z6/aE+EvmH4nfAfxho0Sy48+88PTeWrfxfMqsu2sT4Nzw3K6pLBdRyATopC/eTG75W96+vvAX/BbD496bMLfVvFlprFsqq0qX0aybl/iVt3y1zX7W/xp+Gf7QEvh74m+D/hvoeg61eQ3KeIp9EsFgF6wMZiaQJwzKGk56/Ng9K/T/BLGUK/iZgoqNpWq/8ApqZ9Fgcpx+FqRqycZQW7Utb27Hxd48vZI/FOowLKwBvpDk/7xrmLrWtkeYXx/Duatfx5Pb3HizVpba4Yj7fNGy7v4hIQ35EA/jXG61JtZXR1+X+Fmr4HiaF+I8Zp/wAvan/pbPCxk25ya7siuvEm3akMLN/Cn+1VaTxVbJu/c43fxLWHq15tkOxGXd96suaSaWNdiZSP/b21xQw8JRPna2InE6qPxY+0+TNtbdt3N8u6nN4wn2o7pGqfdRv4mrjbSWZ2L/N+7+Vfn/hqyrzLMnnfxN825K1jh4bHH9cqyOwtdcS5jffc7d3zff3Vbj8RPHJE6bnWNv4f4a46NraFv3PmO/8Ae2fdq8rTMrPv+9/C38VRUpyidFPEc0Tq4/FTyMyCbf8Axbd23a1WrDxVM22Hev8At/3v++q4/hY96J977m6pLe4fcYXfYdy/eauGpRnL3j28LiPh7noGl+Lrlplttit/d8v5ttaK+ILmaNUvE8z+6u75t1ee2s1yrGazT5t+1GV/lrVtdUufLbfNw3y7d+7/AIDXBKPLM+lw8jpr7XJVWZMyQt5W92X+GsnUtYmmxG958u35P9qmRzSWrND8yxtt2bvvL/e3VBqEf7sbIdv9xWX71KXKej7SMYlGS6htpPOd8bn+bbUH9pJIy/O2Ff8A1bfxU7UkTbvhmbd8rfN92qMjeTIsJRsbPn8v+Gt6MeaR4OMxHLzHXaHq0yyN53l4ZNz7f4t1dr4f1SaNVd3V/mX7r/NtrxrSdcms45PORj/stXa+Hdc3Wqs7shZPm/2f7tfcU5e6fj8j1vR9aE0gfylXy2+7I/zf7tbWj300zb0uWi85vu/e2steb6DrUCx+TD0bbvb7vzf3q7DQdVdS77Iy/m/d/vNRKUEdOHrS2kdtY3H2NUTezvH80si/8tP+A1ajvN0z3UIbzJnVdqv/ALP92sOLUppLPZt2yNuZ2/h+9VhpraNneH50XazV5tan9o9zC4qX2S3qV55bAOm4/d3L81QNcTXNwHmgjlC/61W+8rUyRnaMW0PDsm6JpKfG1zLCba5K/Ku5mX+9Xz2IVKR9JRrVeSJVWDzpBNDCv32V/nqNY87ntl/2fLb+9WmljM23emxVXdt+7uZqtLp/+jojou/71edWjCOqPVw8u5xWoaa8P3E3sqM3zbt22si+09Li55MgMiqz12mqWSQt/pO1X+4zf3axb6z2/OEmabb+93P/AA1lGXMerRl72pyN1Yuryu6eavyqnyfNVObT4YVbyVb/AGFrp7rT4Zd7w7o3/vN826svULFI0/vHerbttLm98+oweKOp8MRGP4bpEyuSLKUEN1PLVxmkw/vPnTK/88/4q7vTNp8DsEcsPskoDL1P3q4Kxknt2H2aFsfNu3N826v3fxoV8g4a/wCwOH/pFM9TKqsFOrfq/wDM3bWN47dUS22Oyt95ttSwh2mP7vZt/wCWiL8rf8Cqpp90ZoTvRtn3t38W6rthv8sI77Nz/PGzfK23+7X4PzcpOZYqPL7pbsYUk3lJmi3PufdVpY4Vj3Wz7V/gVU+7UCx7YHme4h2r/CqfxVYj85WZ7aFVVvlrWMpx94+IxmI9tLlBf3Hl/afvtudVqpcWe6P7Z/Gvyo38Natvbu0fFsrPHuWKTduamXFujWqO8MyBvmfzH+9/wGto1re8fM4inKpPYwbu32q6XKfJJ8yMtQR6akr/ACIzDYuxd/yrWncWLyLIdjJtZvmk+WrGm6K9vH9pdMrNt+7/AMtGWn9Yly+8cv1ecZGfDZ5V7aaZsbN+3+7/AHasNp7tiF0ZG/iZmrVXT/lXcixKz/xJ8u3/AHquJpMMjNC8yq2/dEv8LVwe0fPv7pvToy5rM5C80mGO3e5mO1dvzRs/3q4j4P8AjWHR/EnjuZ4WtlaeFd0O1lbcu1f91q6v4nao+k6hHpsKMrLEz/u3/u186eG/FD6b4o8SWcyTFr61ZvLjl/5aK3y19vklCpTw3tX9o+D4grxni/Zx+yVvilefZ/GVzC8ckXmSs3mSfeavPvFkj2N9FeQ+Ydzbf3lbXjzWvtlxb63skzt2StI+7c396uf1q4m1Kz+0vMpRkZtv97/Zr3IylI8L3TlPF2oTTaozptCsn3qzrySFbYzTbfl+4rfxUniK4VZN7p8+2ud1rVnkh8k7g397+9V/aKH+Il84iZ0wrf3axIpvJZofmrZgc3uijzDny2rMkj3Sb4PvUfCEfeIpP3f393+9TJB5kf3OP71SXCBl+5yv8NQTcMqb9w/urRI05eaRAx29ak2Oyq9RuN5yaVfu7PMpc0S+VCx7Nxy3SrWmSPHL9/738NU6ktWCzLu6UhSidJbSSeTs7L/FTZFeNm2fNu+9UdnM8kOxNtP3eThFTKf7VP3dzEb947M7W/vNUF0zxr8nB2U9piy4mk+X71MkV5FP75WWl8Og5EDL5ihEfbJtqG4DpDh91WrhflR4X+eqszO6N87FVo+If2dSvI2G+cq1RMoZ99Sts279nzVFMvaq+EuO42HO7bu2nNSbUZN9Mt13ueean2umUdFqhy3I8pko9NfZ/DUjR7V87fTRJkYC4NZklvQ5HOuWeHyPtcf/AKEK/qX/AOCSd7eL+yJ+0aq3coEHhjdABIf3Z+wX5yvoeB09K/mI+CHw08TfF/4p6P4D8JQK95d3itmR1VY40+eSQkkcKis2OpxgZJAr+iH/AIJf/tqfD39lvxV4m8EfG6PUJPBvjSwjt717OIyrazKSnmOgIbYY5JAxTL8LhW7fsXBeS5tmnh5nDwdCVR+0w0opLWbpVPaTjHvJRtp5pdTlq1IQxEOZ23/E8y/Ycurm9/bV+GF1eXEksr+OtNLySuWZj9oTqT1r7R8ZeCfDfjr/AILyWtl4muFWOws7TUbWFkQia4g0tJIl+cjGGAf5QzZToOWX5/8AHF3+wH+zj+1j8M/iV+zN8VvEniLQdI1221HxPHPYGQWqxTqwELyLCzsVByhU4AB3knaMz9rj9srQ9Y/4KETftY/s5akb2HS7mxk0q41TT3jiumgt0hfMZKyeU4VhzsfDHhTX7Dm+BzTi7P3i8BRq0YVsuxFKMqlOUHCpKpFRjJNe63a67x95HJCUKNLlk07ST07H2n+2frP/AATy+JfxmutN/aG/bE8aaJrXh5ltv+Ec0rULiC106QKCWREtGG9shjJuYngZwqgeMf8ABT/9rL9kn4sfsseGPg58KvixfeOvEGj6vBJbazqFrM1xFBHE8bvPO8cQd3DKCQrFipLAHDVq+M/jF/wSS/bumsvjF+0HrGs+A/GcdtFBrlvAJkN6URcZeKKaOZF5RZMRylQAwACgeK/8FAf2yv2f/ih4C8O/syfsp/C+y07wP4TmZ7bWL3TNt1LLkr/o7OzSJE4w7vJiWVtu4Dad3wnBHC1aGcZTQq4bHKphJXmqvs4Yei1FqThNU71YzltGMryTvKWmu9eqnCbTjZ9r3f8AkfJ9XPDupR6N4gsNYlEhW0vIpmEMmx8K4b5WwcHjg4OKp0V/VU4xnBxezPJPvb/gu3bya545+GXxNsHkfS9Z8JzJZvvymVlWXIGOpWdMnPIA9OeB/wCCJ/hzUda/bgs9WsxL5OkeG9Qubso2F2MiwANxyN0q8eoB7V3XwR/bd/ZH/aS/Zn0b9l3/AIKEf2pDeeHpQmieLrW3c7Io02Qu0kO6RZgjNGd0bI6orMSxrbuf2wf2A/2DPhV4m8M/sHXWq+I/G/iKzEUfia/tnkjtmBIRpHnSMYjDu6pHGVZlUPxyP5spy4ky3gKrwLHLa0sU1OhCoof7O6c5u1V1dklCWqa5rrZX09N+yliFiOZW3t1v2sfHv7ZXi7T/AB3+1f8AEXxZpTyNbXnjC/aBpJN5ZBMygg4HBA4HYYHOM15pXv3/AAT8+JH7Kfg/9oKfxZ+2j4d/tnSrmxmNpealYtf2sF6zAma5twrtPuXeAdr4ZgdpOGTjv2wfE3wB8YftDeIfEP7MvhiXSfB9xcKbC1eMxIz7R5kkUR5hiZ9zLGfug9EGEX9pynH1cFmkMgjhKqp0aMGq7S9lK1o8ile/NbW1r6O6Ss3xTipQ9pdXb26nq/8AwSV/Zph+O/7Tdv4z8UWit4Z8BRrrGqyTD9286k/ZomPu6mQ54KwsD1rgv2/P2lp/2qf2nNf+I1pdtJottL/Z3htCeFsYSQjAdvMYvKe4MmO1en+Bf2yPgL+z7/wTR1z4SfDHW72D4jeLr2ePxXPd2HlJa2jcSSrPynlfZ18tRu3h2dyqjBPxKvxk+ELkBPir4bOemNct/wD4uvmsno1MXxtjc9zVeyVP/Z8NGp7r5ItOpVSe6qTsoyX2Y22ZrN2oRpw1vq/0XyP0/wD2V7/wB/wT4/4Jvj9tXSPCdnrfjzxrM1lp1zc+YY4g08iRW7cqVjUQPK+zaZGAXdgIy8N+z7/wWr/aTg+L2n23xxOj634W1XUY4NRtbbSEgmsYXbaXgaPBbbkHbJv3BcZBO4c9+xh/wU1/Yn8Qfsy/8MS/toatBqPh4XZ/sXWdP1JJxBEZDKquIn86No5MlHjD5V9pUKp3eg29t/wQt/ZYuNM+O2r/ABm1rXRa3qvoVhq7z+Rd3cZDqsYe3gjldSAdrybP7wIr8jxv+rVHMM2p8S5ZVxmKr1ajpVYRVVOk9KMadRStScFo9murey64uo4w9lNRSSuttet11PBv+C4X7M3w/wD2cPihr0HwysYdP0jxT4KudVXSIGfbZzMJ45QgbIWNmTeqg4XcygKoUV+Is1vMq74du77rV+sn/BUj/gob4C/bQ8X+JviDYeLdNsdHtPDFxp/hrSbjWoJJlgEch3sqMR5sjsWKrnGVTLbQT+UckyTWju4+Rf8AvqvzDxhlmNPJshoZjVU8TChJVPeU2nzKyk03eSVk3d3aer3O/L+VzqOK0voU7PT3mkPz7G/j/wBqun8C+GbrVtUi022Te7Ovy/erC0eJJPn+7/F8v3q90/Z48MwtcjUprBnb7z7f4VX+LdX4TiJyp0pXPbw9P21WJT8Sa5b6Pp8Vgk0bvZp8/l/wrXk/j7xdc65Jt85hDG3yba6r49al/ZesXFjbTY85md12/dX+7XllxeJdRNt3Y+7t/irmwWFjpPc76uKlT/dn6ef8G/Pxavvh3458QajaWE00MGkfariTz9v+r+8qrTP2wtJ/aB/4KJftXTfFfx/4buJfC2jxNZaXpNnudLGz3f6zb97dI33q+e/+CP37QulfCL9qLSdP8Q3VnDp2oBrW8W++6yt/DX7R+HvFnwd/ZMste/aS+J3xK8JaL4L0pLjUPKSeMz3m1d0UMafxfN8tTSwl8zcXufreR43J6WSfW6qvUhH3f8j8Xf8AguDceHvhr45+H/7IXgPdHYeEfDMes65DG25f7Qul+Xd/tLGv/j1fDW/d8mzc392vU/2rv2mL/wDbA/af8fftIeIdPW2HjLxBNeWdqv8Ay72/3Yo/+ArtrzC4037O29/+AV+hYOjGhQjA/C8+zKpm+ZzxNSWrC3Z2X5+v8dTySf6xPl2L9yoY4/3ex3w396rG1/4NrfJt+aurlieSNXZuXYjfe+dmrY021eWFk2Lj+HbWNJN91Jkwu7/gNdj4P0lL5VQr8zfw0yJe6ZN5Yzww7/vvt/uVom4+x/DvVbn5d8dvub+Fq2te0V41/cpu/h+9WT4zP2X4S6mEh5by1bd/D81ZyKpnlEep/Z4mvZ5llmb7sbVd0+SZVa5cfeffuasKxtZrqUMseRW62+KFkz/wGg1+E7f4d+Ptb8J6tBrGg63cWN5ayq0Vxay7W3f/ABNfdn7MP/BU12mh8MftIaas8TMqReJLFfnVW+VfMj/2a/N+x1SaFldE2stdR4f8RTLCN/8Avbdm6plH+U1w+Iq09j90tB1vSvF3hu28YeDb+G/0q4+a3vLeVWVv9lv7rf7NSyTTR3O9zIiMm/cv8Nfkz+zX+1l8YP2edSe8+GfiFYra4+a8028XzbSb/aaP+Fq+pvhp/wAFWJrqaHTfip8K7X7NM/m3F9oNw0bbv91v4f4qX1iUdGj3cHmFCMfe0Z9htffavuQ/Lt/hbbWBqVj5ly+9F2fLsZpfmaofhz8X/AHxq0GLxF8NPE9vfpJ80Vm21ZYf95avSRvJCLZztk83c21Pl/3azrVos+owNRSipxkV9Nt92zybZY2b76r91q868cxovxPnijXj7XDgE9PlTivVNBtZvtSQfZpNy/Lub7teZePFeP4uyhhgi9t+q/7Kdq/X/Be39sY9L/oGqf8ApUD6fL6/tKko/wB1/ofmd/wUi0v/AIRv9qLxBd20OwX1lC33Nv3lr540rRrzU50jRGYv83ypX2p+3R8HfEnxs/a+m03R7Dzo7fSYV8uFN25v71dT8Gf+CZ+q6W0Oq+PYfscEn31+8yrX5VTgvtH4pm9aNPMJwXc+QvA/wR1vXJoXhs5m8xtrMqfdr2/wj8CbPwtYrea3/ooVW3s33t1fUXi7Sf2df2edGmRPJvJIYtqxyfI33fu/LXxh+0J+09N4o1Saw8PWy21uv/PP/wBBqpVI/ZPN9nOp6D/id8StN0WH+x9A2rt+Z5P4q8c17xJLqFyzu+//AHqx9W1q51SZnvJmZmb7rNSRq8j/AO03+392op+8b8vL7xN5010yuif8BpWh2wt3ZqmsbJI4w7uy/wC1U81qir02/N8jVRPxe8Zd1Dvfd977vzVJ85I3sodv4anmt4Vh2bF2/wDj1QvIki/+PPS+2VKMSNlO5v8Ae+8tWYV2zb3Vcf3lpkaom5U+633P4qtMx8pMIv8Ad/4FSXuy94Udje8Pyw3MCo/8P+xV6exTzNkJYLsX7tY/h9d1xsfcxb+61dPb2bpyibv7rK1HLGQ5PlM6xaSOTe7sqf3Wb71alncPHdfO7Mjfd/2agnsY2kV03SM33l/u0jW7rIH2Nt/h20fCTHmlA6/TptNmj2Pc7mX5nXZXlfxskha6RLbjc27bXVWupTWbbPOYGuF+Klx9ouYn3/71OPMVT+I5BPvCu88D332XQ5nd8FU/hrgq6jR5vsPh65ldP4dtEo8xrU2Od1Kd7q8kkk+9vre8G6bPNMjJ/wACZv4a5+CJ7ib/AHmr0Lwvpz2dj9p2fw/dojsKp8Jf1aVLa1CQvjctYyxwzN8/K0mtaptkaJPmLNurPt752Xe7/Lup/ZMeX7R0FvdQpCqJ/D8qN/FUsbSXUmdn/fVUrFXmVT5O2tyxtvs8fz/Lu/hWlGH2QlV5S7pOmpHbSuE/gavPJ9Wm8M+NHmhm2rv+Za9Ek1ISN9jS5UL/AHf4qwtH+BHxd+MXi6LQfhj8OtW1q+updsEOn2DSyTN/sqtVKIqcoyPpr9jj9obwj4HmvLDxhKba11ZI2S+VWdYXjD4VlVSSG3dexHvke2R/GH9lLVr+S8XUNCnuZmLyzNojF3Y9WLGLJPua6b9gz/g2G/4KA/GfTIdY+MOnQ+ANEuHV0k1yfbc+X/1xX5lr9Hvgl/wam/skeBraGf4nfGbxRr10sOydbHy7aNm/8eZq/TOHfFrOeHclp5WqFGrTptuPtIttczba0klu272vra9rGM8JzzumfmI3j/8AZhkh85xoLJ0DHRePp/qq9D8JeDR8UH07wZ4O8JHXF1ZY49M0iz08zfaFIBjVIQpyMAEDHGO2K/UyT/g2u/4J0NJbFIvFgSDbvj/tv/Wf+O/LXyx+wv4P0L4a/wDBV3RPAHhpHi0zQPG2s6fp6yuWZIIIbyJASepCqOe9fqPB/iZjc/y/Mq9TC0IfV6MqiUItKTSk7TvJ3jpsrdT7bhDCOng8wTe9Jr8JEHgz9gD/AIKeeE/CX/CKeBPhb4u0bRbhNz6Rp/iSG0gYEdGgW4UA+xXNPtv2Bf8Agp/4dvW16x+HXiuyuUT5ryDxZbxyBf8AeW5BxX6t/F39qv4b/CK3P9vazGsm/atfGn7U/wDwVv04eHLrRfh5f2s1wZZF3LL8zR7fu7f71fk1f6RuYUk1DL8K/wDuHL/5M8jA8JV8XZttL+vI+Ividpv7VWuasfhr8WPFXiHWLnT5g40nVPFP24QSYKhgpmdQcEjI7E1+nH/BOH4S6J8JP2ftD07xd8SYZ3RZZvsvmCNIHkkaRoypJzgsRnvjOBnFfj54i/a0m8J6hc/Ga5uYbwX11Ikscy/6THJu/irFs/8AgqT4ztbyO2sL+4jRnZkVW2sv+z/tV+ecb+MnEvHGVU8txOGo0qMJ8/LSi43lZpN80pbJva1763srfYYHg/AYWTlSqyUmrcztovKyR/R5B418HwKlomsW0ny9pFNc78TvC37O2seG5/EvxI0HQprSzj81ry6hQFQvo33q/Cr4S/8ABVHxnNcW1lrfiS4V5rqO3gj3MzNIzfw16/8AtOftvfEvwHotnpXjDUo5XhiW4TS7iJpFmbbujZl/2a/MqWb1KfuzpnYuEKcZc1Osz7y0f43fCLT/AAZe/B74ZWt14Z0zV3kS3ubOVnut0jfeVW+7ur83v2j/ANnj41/sMftTXfxd+MXiS38Z6P4gtW/4QjxBq1vtttNb+Lz4/wDn4VfurW5+xT+2xpXjjxY2sa3eMbi4ut3mMnzR/wC7/dr6p/aUvvg1+018DtX+AvjCdkttQ/fadqN7tlls7xfmjm/76/hrhp47nlKNZ/4fI+qw2AlgpxlhvhfxefzPz1174ueA9e1a88f+KvEk1zPqC/uLrULpnvL7/dX/AJZx/wCzXl9n8Rpv+FlQ6r4A84W0zsjMvyqy1698Cf8Agk74z0mbUfiB+118XdPs9Nsb2RLVtLl8+W8j3bo/L/hjXbUn7R3jz4LfDmzTQfgD8E76+TR4Ge41S8ibc3+03y1206PNFa83MdNbMqVGteH2e5Mmm/FTS5X1vxJqSwwMi+RC3ysyt/EzV86/tGfEzXrdryCz1hXdXZd0b7mVa9W1LxN4w+K2i2sPijxzefYJrJXSz01Vj2qy/L81ZGj/AAA+C0dxFc3+iXl+6/LE2qXrPub/AGlX71d8MjxMve0R8niOJI+1l7OVz4lvPiN4q+2LDC7YklZf3KMzSf8AfP8AFWjbt8TtcUPpvgzXLtZH+9b6XM3zf981+gPhXwj4A8Jag03hvwHoth5nzOtvYRqv+y3zV00etXMby+TqrRLt/wBXb/Km3+KvYpZXhqceWR5VbHY7Ee85H5wN8Ef2h9S8nUtK+D/iS88yX7sdlt2r/wACp/iL4S/tUaPH51/8EPFEUUe3fItluVW/h+61fo7NeXlzcD55pXjT+/taobi+vDCUS5uFXr5fm7f/AB6r+p4WJnGtjPszPy+vrj496PeJDeeFPEln/wBM/wCzZG+bd/u19Vfsp+J/GviPwLcR+MtKu7VrW6CW32yDY0i7eWx+Ar3bXPtLrNeQXLYb/W/PuZv+BVyFpuN7cyMWILLhn6nrzX6V4NYShDxMwVSO6VX/ANNTPpMhxWNjifZVJ8ylf8Fc8H8f6Vb6X4p1ee3yTJqcszMDkeYzN8v5EVxGpYM/z+Xhm/hr0b4uW0za3qDEzAG6bH93Ga87urWaaHyfJ+b+Dd/DX53xJH/jIsU/+ntT/wBLZz4uUnOXq/zMC+s3mco7sy7/AJ/k+as24s9rGzTzPm/h/irrxpe5lfYqj/po1TLoO1mSHan8btt3K3+zXBTqR2PnMRTlI5X+x08svv2bdv3qaunXMbG5mm3pv3J/FXeWPhl5YXmmtm2bN3yp92kbwjcrIf3Oz+L5l+9WntoRkc31WUoKSOMsrF0jhd3bLfNuVf8A0Kr9vo811MEhdtypu3bN22tubQEb5NjKGWpF059yO/y7vllVflpVKkJS94ujRnExV09/7/8AHtfzPu/8Bp/2F23zTJsRf4mSuh+x+X5UL2zbG+R2ZPu1Jb6Hc3DFPup91q4KlaEo8qPawuFn8Rg2envb7Psybov+ee/7v+7WlYwQsQwTD/Mvlr97/gVXLfQ4ZIfMdGX+H+6y/wC7RHbvbs88Ls6L8vlt8rNXBI+jwdOUR9vCW3zOjDbtZ2kf7tOuIXW1cuNw/jX+7Vux01JpvJ2b/LX51b+Kn3Gn+dZiHY0Qb5vlqeX7J7NGnzR0OaurF2mKImxFi+9J826siaz/AHgld5Ei/iWN/mautbSXaPZM6n+Gs/8Asl51dHeMBVbbXRRlA8PGYOXPzM4C11IzXDed8pb5k/urW5o+seYyo9+wH8Fcku9pN7vmTf8AJtT71La6k8fz71xX2EJfyn5BKPL8R6xpuvcFPOVlZF3+Y+3ctdz4f8Q2ybZkmZo2b5WWvDNC1nYyeT8/95pH+9/s13Gh+KnXc/n4ST76q/3WqK2Il8I6cep7HpmvJdSK+y4cs+3y4327f9qt/T7xNrJcozOzfw/xV5Z4f1y2lX/XNlX2/K1dZo+tM0eyG5YOrr5sleTiMRPWCPcwtP4ZHb2mzkzQs5mX+Fv9Wq1fhazaNd75ZnVU/i3Vzel6k74hS/8A3bff2/xV0ujzukZd3X5v4V/hrw8RufUYXXc1Ft0aGO2eTe7ff8xflWrs32beuEZF/hbZ/wCg1UgkEcYdH2/N8zbPvUv9oeYuxH8xl+Xatcso80PdPToylza/CZWpx2crN9mTj+Ld97/gVc3eW9nawsiTTZV/4vmZv/sa6HUrzyYVm3xn+KX/AL6rB1a8hVXmmlVmkl2p8v8AF/CtEf7x206kYGVdM8OLnzl3Mnz7vlZV/wBqszUJJ5rX5J98X92tS8+xpvmT55pPlf5t3/Aa56+kmkjdESRdv/LNaz66HqUcV7OZ2ukBIvARCsSq2koy3turgIJJmtTcwOvzP8m77u2u+0qRH+H7OCxX7HNyep+9XmU0bvtRH3fP97f92v3rxnhKeQ8Npf8AQJD/ANJpnVPMZYRp9zoLFprGFn+3rs8pWZtm1a1rWOG4zJvyyv8ALtf7q/3q5/TZvtMiQv8AMipt2tW5Z/uLlLx32wr/AAtX4DKM+blOCtmntomlYyPCG3jeqv8AIv3VZa0VsUmZ/nZWkT5P97/dqlZzQ3C+dC+NqbmVl/8AHau2sc0g2O6yLIny7k+Vf/sqXN7usjy5VOaRPb27x7NjruX+L+H/AIFSHT0m2wwpJhW2qrP8q/8AfVXrDTLm3gRNi/K3977y1qw2dtcRibZHvZP4v4a56dYytzfZMRtI+1Mr+TIFb7zb/wDx2thtLtmjj+zI2xflT5PutWla6L5bRvCilWfdtWtKGzkuLcW03yfP/q2f/wBmqeaMmk5Fxo+5zSObXTZvsYWZ/MG7ay7PvU29s4dNs2d9sSKrPLN975dtdCump88MKK6N9xWf7teeftReLD4F+Hd48Lq8twqwRRr/AHm/+xruoQlWxEYHNi5Rw+FlUfRHkln4k/4Trx7rGpbFW3tbVktfOuNy7dv3q+dfGmoTaB48a5SZQs0zI7Rvt+Wux8D+NE0PWNVsIEWJLi3VZZF+Zo683+L0iXX+nwo2/czK1fpdGn7OjGkfj1WpPEV5TcjM1jVJLy4vNGm+XvAv8O2sHS9chjmbTbybake7ZVG41aa8VLxOXX5XXd/drnteuJluH1KB8Kz7tu+tRR/lL/i6NzcS3ML7g38NcfdTPdKUf5TH9yt268Rf2lpOxNu9fuVzl1Huk3pu2f7VHvyHEv6PdJGrQ3L5Rk+6v8NVJLhIZsmZvl/hqKNvs7B9/wAtMnBlcuE20FlopDcfvkf5f7tVriNFZvKTaGqOKV4n21Z+1QyITs5quYPhKW35s05PvClk+VmSm1RXxCP901ImSy1G/wB008fIyf3anlCRr6e0a437l/vMtWZFh+dN7f738NVdNb5dm/duq1cyiSEp0VU/hrOUp/CZ+5zkEkkbbtn3qj85Gbp/wH+9SPOnl/u3yf8AZpi7Np+fbVFD92z7gxUVxGjSNIU/3/npzbNuzp/tU/anlsjv/wAC/vUE/aKkzfvOOPk/76qu/wB01Zmjzl/u7aqfwfjTj7pcYj7VtswOauTWs0kn3/vVTsXzcL8ma6RYEmtg+za+3alVGMxS5UYFwkiybJui1C0bL9zpWtdWqeWibMP/ABLVDy3iIx12/PUC5j27/gnAhH7W3h8t1+yX3/pLLXr3/BS34y/Fj4b/ABZ0LSvh/wDEbWNGtpvDommt9Ov3iR38+VdxCnk4AGfavI/+CcygftbeHc9fsl9/6SS12v8AwVjx/wALr8OFv+hWX/0pmr9zyfE4nBeCWLqUJuEvrS1i2nqqXVanFNqWNV+xyHwH/aF/aE8V+MV0/VfjJ4iuIlhZmSXVpSCdvpmvY7/4rfGmwjW5HjnVJUQZfF6/6814H+yppqXmvX135O6OG1Xd/e+Zq+gVs4W2xp92RG2K38VfiWN4mz6ElbGVf/Bk/wDM48Xyqpoj7g/4J8+LvCfxP8G2R8ceH4NSvA5jke5t1keRgvQ5HWvtfw98CvgvqGnxNc/CnQmnfh0TTo+P0r8q/wBiv4np8M/HEWgvNNFb3E+9I/u/N/F81fpz4b+PHw38G/De58VeP/GWn6Pp1nBuuta1KXbFbr/Erf3m/wBmvWwHFedTw1p4qpdf9PJf5nl1FUlU5YFzxx8B/g7pMLsPhxosA2ZVVsEDH9K+U/2v/jt+yr+yjo6X3xNk0iwvJ4WMGg29sr30hH3Ssajcqt/eavCv21v+C6Wr+Mrq/wDhj+xtYf2fZSRNa3XxC1aBvtd0v3Wayhb/AFa/7TfNX5V+O/FPibxX4uvdf8X+I7zV9Qmnbz7/AFG4aWWX/eZ6ipxNn1aVli6v/gyX+Z6uCy+75qjPrXx//wAFNdc+IurzHwjp0fhjSYnUWtvCA1zIP7zyDp/u1na/+0n428W6BHeWnxV1fTNQj5R7HVJFikX/AG0zw1fIO98ZD45qwup38YCJdvt/368+pmXEPtLxx1X/AMGT/wAz1FhoxleKPoXT/wBoD9pnWIT/AGD448VXmDhpIb2Zv61U1D47ftgC4a1tfE/jHOcKVmmb+teLab8RPGujqE0rxNdWwX7vlS7avD41fFvem34haplfu/6W3y1pDNs/hvjKv/gyf+Ztyp/ZX3H6PfCm58Z+KP2LTN45vLy41m88Makt1LfMTMzEzqu4nvt2j6Yr4+8KfB+2haKF7m1kmZv9THPG0i/7TKtfVXwJ1DX/ABH/AME+0vtTvZ7rULnwhqwM0jkyO2bkLz69BXxj8JfBOq+BfElt4qv3k+0w/N5e77395Wr9W8YfaV8myCc5Xk8LFtvVtuMLtvuzz6FRU/aWetz7S/Yv/Zfv/Fvi6yeHSvtFusvzbW+Zm/2a2P8AgsdqFtdftDeGP2afDvmNpfwt8Mq10u7cralefvG3f7Sx7Vr7X/4JY6H8Pbr4Tv8AtCaqkNto+j6XNf6lJt+WFbeNpJPm/wCA1+cnijXtT+NHxC8U/HLxI7Pf+MvENxq0rSfeWORv3Uf/AAGPbX4rTj7OhdnHTqPnlVkeN3ngWa+0/fCjKY9uxq4vUvPtbiazmdkVX2vu/ir6D1DSYVXYHZEX5UjrkfFXwoTxpH5EP7u5bau5fu7f71clbBxxf+I9LD42cPi+E4Dw7dQ2tv52yRP4fubq+uP2Q/Deg+JtBks33STSKrL8m3d/s18neNvh54q+Gd5BZ+IYG8u4T9xIv3GX/wCKr2z9iX4tJofxJ02zvJo5LJZW82OT5dvy/wDoNfF59g8RTpuB9TlGKoVKsXL4TU/ai/ZN8aeIvGH9o/D3Qbi+Zkbda26Mzf8AAa5T4M/8E5f2iviZ4nt7b/hANS03TWlX7RqWoReUsa/xMu771fob4V1azj0C38VaVcRm+a4ZZfsqfL97cvlt/u1u+LP2irDQ47zXviR4wkt9I03Tftk8zRfu4VVfur/tNXJgs1n7ONOEfePpa2V5fOXtec+Bf+Cxnhv4afsxeK/hf+zZ8DfDFnpl54b8KLq/iDWIVX7TdXlx8v7xv+A7q+M/iF8Y/in8ULK203x38QdU1WztW3W9pcXTNFH/ALq16B+1H8adb/ae+OPiT4161JIF1S4WLS4bj/WQ2cfyxL/3z83/AAKvJ5rU28hfZuTb8lfo+Gw0XShOovePhsRjKsas6dGTUH0Es7jawRH4/wB2r7SPJGA7b3/g21mQq8M2/d8rP86tVy3uETMz/Kn/ACy2128vunnSkS28myb98i71/i30s10NrP5NUdUZ4ZEvPlZPuvt/hqBbnaQjzMf760uYZr2snmSbH3K/91q9l+Degp/Zb6rcpt+Vdn+1Ximh3KXEyJMfuv8Adr3v4e+Rb+FxMJtz/wB5qfLzGcpFLxhZpDdvsRpUXdvjVvmriPi8ws/hvJZ/KryXC/Ktd74mk8yMvt2p/e/2q80+Nl0kfh6KL5t32hV3N91v71Eub7IoL3zgLKKHT7Vf3eX27qZNdJcfP/C3+1UdxIjRqibm3Lu/3aqx3AZv92o983j8RbhkRlZ9m0fx1qWN1ux5U3y/e/3aw47j+NHbG+r0Nwn2dE3baA97nOw0HXLm3uAEmZVb5dtdz4f1yFdsaRqTv2srfNXk+m3STTbAjBdv+sb7q1q/8JhDplwn2BGeRf8Al43fLuqvdKPZ9P8AFlz8P9Qj8Q23iG60qeGXzYri1naOT/gKr96uv1T/AIKkftSyaOmg+HvG1udq+V/al1YK9zt+7Xy42sXOpahLf6lfyS3DN/rJHrc8O2v22ZX+81R7GlKV2VRxOIofBKx6Bq37Q/7Q/iaZtS1740+JJZW/5537RLu/3Vr7L+BHxF8SW/7PmlfE3xHdTazqNjp095K17MS1yYZJCqMx5wQgXJ7V8H6tdWtisNtBcqWk++q19qfB9Fh/Y6jWZyqjw5qG44GVGZ8/lX6/4NU4wzbHJf8AQNU/9KgfacH4qtWxtdyk3+7lv6xOQuP2zvDFn8ZJPip4b8NzRLeRRxPZ3EXzW7fxf71dH8WP+Cgm3QWtbDclxNE29tnyt8vytXytJ4i0qzXZpqb9vG5vvf71UL64sNcVvt9t5is+394+2vyH2cYnwlaXtqvPLWRzPxi+PHiHx5qjzXl9JJ5m7f8AvflrzS5vri+m86R2J/hr2K8+EvgnWbX5JpLQqm3dH81YF18C9Y0+YPYOt7C3+qWNNrVUY+8HwwOGsdNubhRv+b5vvVtaf4f8yRcv92u20f4SaqrLbJpUxO/7qr91q1tN+EevPcNbx2DBv71b+zI9p/McTFpsNra/Ii4V6z9UmRV+R1WvTrj4FeP75fLsNKZg393+9WQ37NPxguZFR/BkjIzfPN5qrtX+9S5BRrRkedXMe1TvfLN/eqFVTn/vnbXsln+yD4tkk/4mviTSbBNqv5lxeq23/eq237N/wx0dvtPiH4tQv95mjsbfd93/AGqy5YfCXzfaR4psc/3dq/LV2zt3WP54WU/7Ve1aX8KfgCyqltealfTNKrRL5qqrR/xfL/er1P4f/sm+HvHV8mm+DPgteTNcS7Eur64ZlX5fmZv4VX/eq40+Y55Yjl3PlLSLe5hbf5GTv+9XW2du81uj/edl/hb71ffXh39mf9mb4Q6PPbeLfhjpvibxJ9n8q1jVma0s22/eb+81YWg/s2/DTVNY+2ar4YV3ZVaXTbOLYka/7P8As1XLTM/b1f5T4l+yvaxrv3Dc1MjiRbrY8O4/d3fw1+iUP7PPwKtbo2z/AAl02K3hZW3KjeY3y/MrV5t8WvgV8H/Mlu/CvgOFIlbbLIr0/Zlyrcvu8p8V6hYoreckLNt/u15t45mM15sHRa+7dD/Zl03XN7p4VWNPmZZvmVWrTu/2KfgtY2Ym13wxb3N20W54YWb5qXLHlCFbll8J+dWk2JurgJiui8QWM9loax7P9lttfeOk/sE/CvVNUhmTwfDZwSL/AM92Rfl/2q67w7+xP8AdJ86HUvAcepvv3RW9xKzL93/x6nCMf5gljJSlpE/NDw1pH2i6R50wiv8AN/s139xZXjWYs9MtriZ1X5Vt4mbdX6KWXwf+FHhqFE0H4N+H7OX5lVZLJZG/3vmrpPDPwjub6H7Xc6bp9nDbory+XbwxxW6/xMzKv3aUuWOpP1iVSZ+V0fwp+KniC8CaV8OteuVk+55OlyNu/wDHa+ov2Vv+CFf/AAUc/an0yPX/AIf/AACvrLTJuftmq3C26/8Aj1fof/wSY+Cz/wDBRb9py/8AD2l+cnwt8Cyq+rXkabV1KRW/1at/dZlr97vC3hHw94K8P23hnwtpMFlY2cQjtbWBNqRr6CuiNShQjdwvITWJxWkHaPc/nY+EH/Bod+2jqzw3fxL+LvhnRIpP9bHHK0zxr/wGvp/4ff8ABoF8GbfT4/8AhYf7S+sveNFtnfS9OVl/4D5lfs1swPljH4GuR+HPi9/G8Op+I4Jt1m2qTWthhfl8uFtrNu/2m3Vnic2nTpuUYRj6L/O4U8opuXNUnKXz/wArH5zfB3/g1P8A2Dvhx4ng13xx468UeKoIJAyafcNHbLJ/10ZPmavvX4Ffsn/sw/sv6RHpHwK+Cvh7w3DCm0XFjYL5zfWVvm/8er0jc3c1ka1bzXnyIjbPvNXxONzrEyd4HuYfC0o+6WrrxVZKjGGZWEf3m3VxGv8Axshs5pIIbmNpFbair/eri/j78QrfwLobusjIkcTO23+HbXyav7V3/CG/b/GetnfbNKrQW7JuaT+LatfPVsdmGIu3I+nweWUILmlHmPtPWPjVcaP4fXWtV1FbTzpFiiVm5aT/AGf71fkV8L/iI/w8/wCCiF58Rb66jZrLxrrU0srnarlvtQP0BLfrXtmm/tbeAPj94ulv/i1qt94ZttPv1ntfMRl/4DHXy7d3/hu0/ae1zUb7Fxpa+ItUfLHIkj3T7Sf0NfvHgxUqzyHiNT/6BJ/+k1D6zJ8NTo0a6jFK8X+TLn7fX7bt54k8VXdsl5NbPay/PCu7arN93/8Aar4q8YfFabWGW8m1Jre9mf8Ail+VvlrR/bi8babq3jee5s9RZ0kddkkdxuZVX7q7v9mvmjVvGk0l0yJc5C/d3V+M08LSlC6PJWIqUpcszsPG3jrWPFXljWLyQXML+Vu+6rL/ALX96uKuPEGvWEjQxw+dubbEyv8AMrUxfEH2hvJd1DyP8rSV9mfsd/8ABHzxd+0P+z4f2tviz8aNB+GHw4S5YWviTxLbtLPfhflb7NAv3lVvl3NW0MLSStI9KWMoRjF31Pnbw74R+Iug6LZ/EL+29N0prN1uLCS4v1Z/l+b5o619H/aY1X4xeJtXPj/xlcX+q3Uu9Fkl3Ky/d2r/AHa+mr79i/8A4I1+HrBf+E2/bk+InjL7KzLOug6dDaW03+7u3Mq1xnj74I/8EmvDscWt/BaHxRDeKrfZb661xpG8z+FmVa5JQy2Xxz947q1XHxpRUIWj5nI/Cfxd4q8I+KUfw9DIf4W8lNv3q/RD9iHXtNsdcitvjxpv9sahMm+10u8bbHa7vuSN/e+X+GvyQ1T4gal8NfGe+bW5r+2Vma1uN33l3fLur6r+Ff7cXhXxVr2meMUmaz1lbCO11L7ROqpIsa/Ky15mKwtOPvRReFzD937Pn/xH7YfDWz0nwt4rstW/4QzSr/QZv3U9g8XmNbq3/LRd33q1f2k/2RfAPxH00/EP4f2dr9ssoma40m9TbBeQsv7yP5f9mvkH9kP/AIKC/Ca/8PC78R+KYb4WaM0sfn7YlVfvbmb+Kpf2af8Agof4hvv2ltV8H+Jri9vfCeu6jI2mwPLtW3t9vyrH/erLC46UI8jiRmWUyrVVVpz+z9/kfAOuX2ifCX4i3nwZ1vWI47631SZ9Gs9nl7rVpG2qv97b92um0u8t1VIf4tzMjM38VeWf8HFfh/w5Z/t+WNt8Lbq609v+EZtdRtVtfla3aSRv/iab+zn4i8bap8MtMvPHepefcqyq8yrtZl/vNX6LhZVqmDjKfU/Mak6dLGTproz2qLU5+P8ASVk8xP3qrF92ry3k1qoPksHk+V137VWuVs9W+zzboSu3Zu8tU+8v97/ZrX026uZLrfvaRG2ruX+Fv9qrlHl1O+nW+wdXp+9o2h+0szfL93+L/Zp+5JJAknnRJJuZFb7y7W/iqhpa3ir5T7Y1jbc/8Tbf/iq2Psr3G3fPvX725krlqS5Tvp1PsmNrGmw3EMyb+G/iWuRurKSzuXDmT5mPDnvntXeTWv2WN9m3Kt8ism1V3fxNXL+LorWK9C2YITc2cSblzx92v0rwYd/EXB+lT/01M9vI3J5lTv5/kzxr4i6Ib7U7uEkoHLEqDgcn71cJceHUtWebfkM33W/hr3TxV4VN7a+a0bASKHJ/2d1ef+JvCqNM3+hqkf8AeZ6/L+J6kpcQ4yP/AE9qf+ls1q/xpX7v8zgE0lGV32fOr/M3/stWbXT7mE+ds3rtXcu//wBBrebQUMizTJ86ru+X+Jakj0Gbzt/k/Lt3ba+ejU5dDhqR5veQ3SdJhuFebyNrtt2fP96rS+H/AC1WZoVmC/dZX+9WvoOhpayOk3ludu5V/u1uaf4TMcRhm01Sjfw7tu3+LdSlV5ocxtTpy/lPOrzw/wCTF5zo22RvuqvzLVJtBuZJNj/M+z7zN95a9R1Dwj8yySIzBdzfN91qzpPCaNIkiIxH3dqp93+9TjWnL3WRKjLnsonCLodyrjy+qv8AxVdXTUt2CTQ5RV27o/mbdXSyeHYYwiP8zMu3b/E1WbPw7IFWb7S25V+dmSsZSh8Uj0sNTlzcpyseizSQtCiMq7/+Wi/d3VlXWl2cbO7/ACqv8Lfe213WqaXeMuya5XYvypI38Vc7q1vctJNtmjKqq72/2qzp1OY9yjHlloYJXy408v8Ah+ZP722pGuJo5tn2WRxs3fL96mX023Z/Ei/M7L/eqH7Z5x+020i/L9xd3zba1jKR69GnGQjTf6OEKSB/vbZP4Vpk1mkK/wDLNmb50/2qYs1tNCNiZWNNqbqTzIfOyj/dXav8TNWtP4jjx1PqeXarYPY3Don8Kfe2bWrFmkdt8aDB/wCedepeJvCvmKs+xV/i+X71cdrPhSazk85Nv7z/AGa+kp4jofi+Iw0o6nPaXcTWbb4UY/Jt2/71dZouoXTRh9+7au3aqf8AfVZEemzRMu/aGZa29JsZo5UtYUx91mb7v+9RUrHNRpcsveOt8PyQxqtzM7fN8u1W+7XbaTqWJPkdn3J97+KuA0uxeGNnjfzv4krq9Nvfs0Pnb8PH99VrzqnvS5uY9nCx9n7x3ei301nt3orf32rr9C1hLhV/1aLs/i+WvLrHUoZI0S28xTvVvmetqz16Xar3Lw7N+1t3yyM38NcVSjKUz1KOI5T0tdWtpoW8lJBHu+VlTau6o7rVEWb55tiN9xl/vLXJaX4gmaN3d22fdRo/mXdUOra69vdb/tPzfwNG/wAv+7trGOHn3O6OK5o+8dBq19Gsn75+W+VG+9urIvrx5DseZif7sf3d1Y114mc5+07W/hRf4qo/29bTb5EdnKv8m37q1UqMuX3zT65SjLQ0by++aWGb7yt87fd2rWRdXkMz537Sy/eZ/wCKoNS8QW0K7/Ok+b5fLb/0Ksu8vpmb7iu2/d83y0/qvuxKlmUUeq6C0k/w43eblmspsOQP9rmvMVjdmEMKLj5VlX+7XpPhlwPhaJC5IFhOST1/jrzyx3tKPkV0/ut8tfuPjGpLIuHEl/zCQ/8ASaZ3ZpiEqVCT6xT/ACNXS1+zqET5nZvmZvm+X+Gt2xsz5becjCON8o2/duasfS3RYfM+0qkv/PNvvNWrDLCsyQu7O0L/ADf3dzV/Pcqkuc82GK5ocpsaT+8keHpJHtZ/O+626tuzCXClPJZJWT+F/lWubtbya3VU8jO5tyVehuJPJ/4+WZ1b/drmrU/3vOduH5pSlI6G1uob6PZ50Z3ffVvvLtrWiurZdttNbbH3/wAXy/w1yEN+8kkWy227f4fu/NWtZ6s65TyVfb8qbn+7XFKnKPvROyn7szrLW4hk2wj5Ek/hZvu/7VasLW0zb4PnSNNv95q4iHVkhZnM+f73ybm/75rVtdVdXR4V4Xav/AacYx57m3N/MdPH8reS8ypGu7bJInzV8n/t6ePEvPE2m+A0dcWe28umVPm3fw19G6n4kTTrW5uby+VGWJnRvK+VdtfBHxa8YXnjDxpqnieaZm+2XTfe/hVflVf92vqeHsL7TF+1l9k+T4rx31fAxpR+2clpmvFfE1xamZYkuLdk3NWD4uuHkhe2fkxqqvVbxRffYNUS8875f9ml1TVIdQt/tj/P5y/Mtfe8v2j82+E88m1AafcPDcrwrttVflpl3B/aFr5Py7WX7y1L4s06PdK6bUXf8n+1WNp946/upvl2/KlPlQSlMz7q3urHd2RmpkkjzbpFdvmrW1S3+1Wp2P8AN975awnieBz6Uy4/CEgKBlxTJJE28VMpWQh/mP8AvVHPCit/8TSlIqPmR7fMzv8AvUwllb56e33zs+7Ss26P56UTQbTWV2bpTqKctgCpGj2hNn/fVR1LJJuVS6YK0yJblzT1/ds+/dt/hq55iLjZDlf4kqlZyJ5Owp81WtrrIQj/AHan4jIim+Vm/wDHaZJIm3Y7sp/u0sjOzb6Yzbmx/F/eqTQfHJwqJ/6DToIYWVqjSR4+j/dqTzn8obP+BVfoZy+KxHcbVjLtxWezbqs3zlhhvl/2aqnk5NL4jWmPgO2ZCP71dhb28clur7MfLXGhtrq5/hrs9JndrON0m3bk+61UTWKl1b7lyn/fVZslt97ZBuNdFcwusfmPDtrLuFfcdibaDH4fiPXP+CdUZj/aw0BXAP8Ao19tI/69Za7H/gq+qn40eHGIJx4YHA/6+Zq5X/gnnE6/tX+H3f8A59L7/wBJZa7L/gqmm/40eHe3/FMD5v8At4mr9lwH/Ji8X/2FL8qRg/8AfI+n+Zy37H+jvJoesar9mXYssaRSN97+9X0P4J03R7q087yfPuPvJ/D5deYfsg+A7aT4Nw6lczTRNqGqSbWjib94qr/er2XS9JttHX7NDYSSt5W7dJ/6Dur8FxFGUqtzgrc0q7scb401a50PxMr2G2B4/nRlb7tRftBfHzxz8btH0rwl4k1DZomiwK0Wkx/6uaZfvTSf3mqb4jaK8MKarqVmyNcM33lrz/Wr5I0SGzTG77y1hToe7bmLpxieeeMYbOC1mv0tlTy03LtTbXhV3I01y8rvuLOx3ete0/FRrm18Pzb5vmk++u+vFXh8v79ejh48sD1MPyqmMeNVUU5Y3kpT3/2fWpbeGRmDhcf+zVudHMyHyXLKnZq9E+DfwdvPHN8LmaBvs0LfOzL97/ZrO+Gvw51Lx54mttEs7OZ1kl3SyRr8qx/xNX2T8Jfgrf8AiDULb4dfDfT22RyxxXEirubb/eXb95qiXPKPLE4sZiZUz1v4W6Bb6T+yxH4d0dBGseg3sUKp0ViZun4mvk6TQX+2fYIUwscux5N25t275q+5fE3w/f4O/DrUPAcSSCTSdFlGJvvFzCZDn8WNfLnwt+GviHxprS2em6bIqM677pvlVf8Aar9m8V4t5Rw9f/oFj/6TTPKjU0cj6w+G/wAYNX+E/wDwSK8T/CLRHkTUfiF4th0G3bzV3R2O3zLuRf8AZ2qq/wDAq8Ek0m2sdL8iFFhWNFSLy0/hWvUvjF/YOg6L4Y+Hugozw+HdNk+0XDJu+0XUn3pP/Za80uI7zXJPJeFj8/3fu7mr8WjH20vdL9p0exz39izX14+yNnaR1Wu68N/DvTfDGi/8JJ4kh8lm/wBV5n97+81dJ8O/hfZ2Ni/iLW91vEvy26/7tcR8dviw8zP4b0p1XduX5f8Ax6lWxEMLS5Y/EKXK/Q8x+N3ii28a3klhDbedbR/OjbP/AEGvGLvUtS+H/ihP7Bm/eKm9f92vUraz8y6+0/Nll27ttcD8RNFe28QLfww7oZk2oypXku2I/ivmOqjWnT96J6B4I/4KKXvw68KDwn8QtE1K/aDdPZrZ3Hlp523au5q8p+Lv7Z3xU/aI+yaP4huY7LSrX5V0+z3L9ob+9K38Vcl8QNHS+0+Qwwtvj+ZN38X+7XncUz2dxt3421tgcqyylL2lOFpHtRx2Kr0uVyPT1ukn8v5GP+0qfLWdd2aNC29PmrH0PXJtvzu2G+XdW3HcJMrQ9vvbq9nmOSXPGZhXSPbr5LorfxJUc2yFU2btjf3a1dQhQR7HmVqzZInm/c+XsZXpy5hx194lhkmmj+yzJ8jfLWXNvs7h4XnUlW27v9mrLB47hfOf5VenapbvfW/nBF8yH/x6iWxcSx4XZFut7oq/PX0N4XkL+C7a6+0svmLuaNk+61fOHhu4f7ZG+z5m+XbX0Bodx5fgdZnfd5fzRR0RlymVSJpXkL6hYyo7r+7Xd9371eOfHiTy9Jtrbfs/f7mhr0iPxM62YT+KRN25a8Z+M2sXmoalFHN9xXZkpSJw8feOSjvJmjKO9RvJ82xxTKOCKfMjq5R63DxR7A9Wobh5I/nm2p/s1QTp+NPaT+D+7THyo1G1J/uJ8sO37qUsNw8u1Efhv4f4qzF+Y/J96r1jNNHJ+5g8x2/u/wB6szLlOkhj03TbUXl47E/wr/E1amk+Orm482HQ9HVVX5WauRnjaE79avMOv/LFW3NUkPirWBYvpWnzfZrVv9bHH/FV/CHxHb3V9pWiyJf+JL3zb+SLclrCu7yf96vt74LaiLz9hldSMW0N4W1Ntuc9Dcfma/OW11D5lcI3/oVfoZ8B3B/4J9I4XA/4RHVsAf71zX654N/8jjHf9g1T/wBKgfYcEpLGYhL/AJ9S/NHxXpusPdXzoj8yfc21ryLpmn7H1jW22N96GFNzLXAJqdyrDyXk/efKixpuauw0JvCvguGLW/HkK395J/qNB3/Krf3pm/8AZa/IvhPi+U6nwrpOuasq3Oiab9mtPNVP7S1K42r/ALy13eoX3wl+HLQ2Gq+Kpta1hdz3Cr+7trdf7qr/AMtK8M8RfFTxb4w1KOa/vMW1u6/YrGH5YrdV+6qrXOX2ralcXz3N5eM8kjbmap5plfF8R9J2v7QHgy3meG2hj+zq25o1+8zVUvP2sNH0Ni+m+G7eVm/ik+avnGK6mjjLo/zM1Ps7O81OZHSGRi38WynyzlpJhywPbNe/bE8Z3CyW2lTtbJJ91Y/l/wCA1xV98dPiLr0vkvqsy7vv7X/8dp3gn4E+PPGd5HZ6bokxaTbt+Wvqr9nH/gl74n8TTRX/AIw8vTrdZVaXzvm3L/Ftq/Y296RjKtTpy92J8v8AhvRfiX8QrxNKsIby7Mj42x7m3bq+n/2e/wDglb8afikyX+vaDdafbLt837VE25q+8/hD+zT8Af2S/Bdz4217+y7e2s7ffLeahtjkb5vvLur5o/bE/wCCziWsN54D/Zmf7HFNuin1Bn8zzP8Aajp81KHwmcVUre9ex6Rpf7Gv7LX7Mc1s/wATtY0+81WS33Raasqs/wB77rN/DXfah4g0258OjRPDFtZ6PZXHzxR6XF96Nl+6zfxV+Xvw5+IniTxZ8QLn4heOdZuL+9m+Z5rqVpK99b9pzXVsYLC8muES1i2xSK3y1HPVI9j7x9dQ+BfhFpOi/wBveKvFUKf3odu6Vv8AgVcr4m/aM+Ang2NU8Nw3V3NuVNzbf++v92vib4tftRa3qEkltYXmfk2xSM3/ALLXEWnjrX/Fmool5eMfM+bc396sv38pGvs48p90t+098H9SuHFzY3lt95Uh/d/vP4vlqfxB+0l+zfqGivYJo91EFiXz1aJd3/Af73zV8cW+nvfN/wAtMbfvK1al5pdh4d0s3Mzx/c2p5jfxVpH2sY3bJ5YylyntV5+014DXW7iz8JeHry20tdyxSXnyu3/AaJP2lvDcMcKeH9NkaeFtss00W7/K182trH9sXvlaVN/s7v71dd4R+G/iTxBNDbfvNjN8/wAv3t3+1R7OUve5glyU9j27/hfmpapH9ms4VQru3Rxr/rNzVt+E9Q8beJmltrCFlmkXduk3MsfzVH8N/wBnn+zbUfbLDY6qru275lWvfvAeh+G9Ls/s1hFbonlR/wCkN97dWkacacfiMZVPabI5X4ffBvWL6N9b1t/KVZVV5JPmaT+9trwD/gqB+1Y+gIv7HvwguFgn1JY5/Ft9Zj97a2/8Nv8A7zfeavo79qf9pLQfgD8HNT+J1/rEYuLVNul2MMH/AB9XDfLHGv8A7NX5l/AHQ9Y+Knxqi8VeObuafUNY1uO41Sb/AGpJPu/7q7ttRDkky6dH2ceaR/TV/wAG737KWnfsyf8ABO7w5evp3k6n4ukbVL12TDNH92Ef98/N+NfeJJOMV5t+ypYWHhv4BeEvDFmipFp+g28Cqv8AsxrXo5mRByRV1+f2judWFlTVBWMD4r+JIvBfww8Q+LWmMX9n6NcXCyL/AAssbFf/AB7Fct+zZpLeHv2fvClrdERzyaJHdXW7/npN+8Zv++mrj/8AgpR8QJfAX7CfxT8UaXtknsfB9xIqbu33f8a/L/xj+3n+3H8WvAOg+GPBetR6FpS6Xaosml3X79Y/JVVX/gVeHnFSVPDKNviPUy+nTxdVx57WP198X/HD4QfD6DzvGXxD0qw/uCa9VS1fI/7Tf/BeP9kz4La23gnwPb33ivVt7JIthF+4hZf7zV+bt98HfGHiC4+3/GP4tahNbx7mltbi8Zm8yuT03xl+yj8LZLzVZvAd14n1j7R8+5WjRf73/Aq+Tti37spRXotfvPo8PgcvpyvJSl+CPp/4lf8ABSP4oftD3R1O+0GLR9Ml3fZbGA/M392uE8VfGqaHw69tbaIuoXUkqqkc33lb+9/31Xzf46/a+8c65rESeCfhXZ6RayOsEUP3njb7qt/3zXo/g3xlf+GPCt9q/iqa1h1VYIWt45vn2xt/FURw0YbHqwxMKn7uJYsfG3jDxBrmoQ+LbaGGJUjfzpk2xw/7rV5n8evHmofD7w14j8c+FdRjuJbWSRra6kbCzI8uwsT/ALSsefeuW1L4u63481zxP4J0bW7i6abbO8ccvzbW+XatdL4p+EGq/ETwNcfBy0jMd3cWi2uy5l2FWiwSGY9PuHOa/ePBqEHkvEdtnhJf+k1D6DLZtUa1t1F/qfJnwr8A/EL9sP44aH8H/BOlySaz4q1dbPTreP8A1bSN95pG/hVV+Zm/2a+5vjb/AMENf2VP2ara20P45ftT+M9f1w26/wBo2PgTwzC9vYt/Eu5m3SbW+XdXhf7HPgv4i/sD/tYWXxc8S3Nm9vpuh6klncWsqyNa3EkLLE23+Jqwfjd+3V4w+KHjy38eX/iS8iuVsI4vmuNqqy/e/wB7c33q/F6mIlh4eypRPHwuEo1H9YxMv+3T17wb/wAEj/8Agnf8UL6Sztf+CkHiDRrlv+XDWvBsKyR7v4fvfer374//ALbnwr8A+Ebj9kPwlqMepeHvhjpen6To1v8AZVjguI1j/eTeX/eZvmr80r74669qnio6xDeSRSq6sm19qs1c78Xviff+LPFUnip9v2+6iWK9k3f6zavy7q4KtXGYmPJPb+tz0o1snw0ZTo/F/e/Q7H9qTxH8Or7W5db8H6Ja6bNMzNLHZptX5v4dv3a8cj8TX8iukMzJ8m1dr/M1Mg0/VdfmSGYKjbt3zLXT+HfgD8SPFFu15pRhwv8Ae+X+KuqjBOHLPc+YxWZYipVk18Jm6B4F8beLozczQTLZ7lTzpPurXs3wh/ZHspIf7e1LWFuIo4v3sK/drj9N+CvxR0XUIvC+rePIdN8xt/ls25W/utX0/wDsG/sTyftIeMtY+Hmv/tE6xpt5ZwbUutLVfL8xl+Xd/u1x4xVoJy5oqJlhac8RU9xSuY/xSm8GfD34f2WjeErOx8P20MWy6mbdumb+9u/3q1P2Rf2rIdN8bWXizxpq9nFo/huLeuqK27azf7P+1X2f8M/2SfgJ+wva6d4c+KOveGfG+p61p10mt6l8QLJXgtY925bhVZv3bKqtX5a/tofGD4Y/Gz9qrxn4n+COh6bp3g9bpdO0aHTbXyILiOH5WuFj/wBpt22uLJcLHNcTOlf4ftG+PzbGZTaUv/ATf/a2+Pk37c37X2u/Hia28q0uorew0aHbtZrO3+VWb/aZt1el+F7pNN023htodsUcGxFj/h214Z8GRZ2t41zPtaaNP3W5Pu163oN9H5azI6rt+Xy1/vV+mU6P1eEYI+Kp4iWKryqz3kegaTvureL9+qps2zqsX3v+BV1mlTPblEtoWVPupt+6tcB4f1qOJ0R7lYV+95ddto+qblE6bWaRP3rLL8u2s6kOvMehTrHbaTC7XG93Zt3313/L/vVtwt9nsxv+9t+Td/drldL1hJrcpM+14/7r/wDoVXjrXmb0e2aJF27FZ9yyVwVI856dHEc0NDQuLn9ym+2Vzt/esv8Ae/hWuV8YrslhGQSWkJwMckittdSRWdHTG751jVvl/wB6sLxbc/aDbndkgOchMDGRX6P4MXj4l4NPtV/9NTPpuHpKWYQt5/kyafRjf6LAix5Vo0Z2rldc8PpdJsSFU2/Kzb91dtpk62+lQlZWwYhvRl9v4ar6hp7pdfZoZlf5V8plTb/wGvyriv8A5KTGX/5+1P8A0tnRzXqzXm/zPNrvwfZyTM6bWb/2X+9TY9Bfb50KSEb9v3PvV6ND4bRrhv3Kt5m3fI38NOfw7PHMr20LLt/2PvV8pUxHNLlbOiNE5PRfC9sk3nQo0x/jVovlWuls9F+1W6O8Kqn3dzfK1bGj+G9rK8tzJtX5W+fctdfpvh+GS3+zfZY32vuRpP4fl+7WMq0Y7nZGjLocC3gndCdnlhG+bzPvL/wGs/UPClztaaaFkdf4lT5a9X/seHyV8m2V/LT/AFa/LVW48NpNH5m+N42f5/LesJ4qXwhUwsYnjV54XdWZEs4Wf/llI3/sq1QuNJmWPzvs67W/8er1XVPDtt9oYpZrhV2vIvzbmrkfEelvbsyI/wAi/fatadbmlymtOjKMOaRw99Z+ZI0KJH+7+aVWT5V/+KrjtatUDTIkKqG3NL5cW2u/1eZ41+R9kSv8y7PmauM8SfaW8wJwJP4mT7rV20ZHVh5csveOE1aO2hw/ysyttT5m+b/gNZrRusz5RdjfLub+GtrWEmjtGd4t7N8q7flWueW4eNvLwrfO21lfctdq5pR909ijJe6TfIsbQu6j/a27dtSwzPNGJlhXZGnzSb/utVSTzpl/ffKi/K+3+KrdvvaFNiR/e/hq483UjFRjKMjqdY8PoY5JHhkT/gG7c392uR1jwrDJsT5d7fLtr1TWNNjbfbJcyIvm7lVW3LXOaxos0bMnk+YkPzfu/wD2aumjWlL4j8wxGHhc8xuPDsMjB0+YK+2Xcn92ren28McK7NoZn+Rm+8y10l7p81xJ8+52jTbL+621Xt7OGGYwvbLjflW/irT6x7vxHH9XlGr7pDZ2Mit+5h3hvm21pWtjNuXfFkN9/a/yrU6x+TZhETa0fzfL/Fu/vVJBNMLeI/Zmf+H7lKNTmjZGkafL8RNHshbyYdv7xv7v3aFnhhYI83y7927+7Wa3nRy+TDOx+f72/wC7RcXX753R1dP49v3du2tKcb+8pGcqkTej1Z49vkps/wBrzflZaoalrlyoZ5pmZf4d1YP9oTN8kyM8rIq/K/y/7NF5ceSwT7T935fu1tGMY6GUqkuX4i+2sT3jM81z8v8ACu/duapI9SeRXuXuVTy/uq38Vc62pJHMfLdaZ/aiKrwzeW8W1XSOtJU+aFjm9tOMjdk1J1X7U94vzN+6jk2/N/s1BHMl1I9zNMzOv3tr1lzXiXEiTXK703fuvl3bWqeG6SOQvvVU+Vfl/hrKpyqBEcRKUdT3Pwgqf8KlRYxgfYJwMfV685t1dUXUk2v8/wC6jZdqrXongxv+LQo/X/QLg8fV68xhu4ZP3szsy71X92tfsHjK5/2Jw64/9Akf/SaZ9fnDTw+Fv/IvyR0+nybpo7bZmVvl2yf+hbquyXSLN5LzLu37lrmLfUn+1jyZmlbYy/N8q1fm1D5v3Ykyv8O3dX87Vuf29+XQ5KHJKJuw6m7r53lLGy/L8z1I1x9oVIUuVRtn73zH+ZlrmZNQ8yYJ9+Jm+ba+1lq39rmZv3x3rH/49WXLKUeY9WnW5fdR09vq1tDDCnzSv/e/vf7VWrHXIYX37d+75dv97/armY9Q2xxJ9xv4G3/Kv96pLfWkh/c3LeUG2sjf7X/Aay9jPp8J2RqX6nUtrky/JZwxqv3vMVvmWrWna08bF0mUhovmZvmrkI7rzJv3j7wy/My/KtWo9T8m4CIn8H8NbqnzbE88lIk+Nnjx9B8A3lzDeNHLJB5SNH8zfNXxtrV08MjfOzp975vvV7J+0J40m1DWk8PQv5dvbxbpdv3v96vGdWZJGbuq7v8AeZf9qvvcjw/1fC80vtH5pxLjPreO5Y7ROV8WxfaLUps37l+7XL6PrG1WsJpGUfd2/wB2un15d2770q+Vt3fd21wfiCOa3vGuIX5/javaj7x4FP3SzrUz3jGF+K5a8t3gm+TcwrdtbxNThVPmR1/i/vVT1axm8tkR927/AL6olErmM61vk8wQu+f9mpdQt0ulD2yKrf7NZMy3MM3z8H+7UllfeSzbn6/3qfwj5SOSOW3kKFv96nrsmj+T5WWrkkCXy70dfuVmyRvby7G6rU/EUPlhEZ2VE/3jU8bJPD84+ZagZXR/n+Wq5Rx3Eoo8zeaKocRkf3x9anm56dqiC7WWpJt7S+1TyhIu2EZjkTZt+b79XJF2qdnX/wAdas+zDsv3K0RvZN833f7tTAkgmkfazyJ81VfM+b5Eqe+Xg/O2P7rPUKqnlr8n/fVP+8T8Q9XT7mz7tO8xF3b3/wBxagj2LN9/+CpJmhxvzupBIq3Tb2AFRs3lr70s33/wqP761US4/COrr/C8if2fEjov3PvNXHq2eDXV+E2RtPEaP8/+1T5kFQ05pvMZvn+VvlqjNDvZsfKPu1dmVI5/nG7bVSdvN27H3bd1RL3dDH3JHrv/AAT4iCftW6G+/cTbXv8A6SyV2H/BU0MfjR4dIUn/AIpkdP8Ar4mrkv8Agn2f+MqtBHX/AEW95/7dZK7f/gp1bC9+Nnhy1Emxn8ORorf711KK/asu/wCTGYv/ALCl+VI5J/72vQ9U/Z58Pw6L8C/D1mEuE8y189o5P9quj1DULbTVed03Iy/xPu3f7tPi/wCJT4V07R7OZW+y6Xb2/lr8vzLGu6uL8dahc6fp+HdmmmfZuX/lnX4HUlOUpNHn+9KfMcx8QPFd5rmoNDCGeGH5UaR//Za5ePSUt7eS/vNv+wsj/erUu5kt1k+0vtdm/u0uk+Bdc8XTp+5mwybkX+FqujRn8ZtGpyv3jxD42NH5aWcybGml3bV/hWvL9Ut0VTs+Xc9eiftBW/2f4mT6Jbah5v2GCOJ9v3Vk2/NXAXVjeTR7ztdl/hWu2nGfKenT+Ey1QtWx4d0G81u+hs7CGR5Zm2RL/eb+6tVrHS5vMCOrKzfd+Svt/wD4Jw/sl3Orf8Xv8VWCtBb3HlaNayRbt0n/AD2rojR5jPFYj2MTW/ZV/ZD8SaTo9ho9hYSS67qksf2hV+bbH/zz/wDiq/WP4K/sP/D39kP4Hv4w8YQxjV7iJn3Kv+rbb5jKrf7Ndl/wTz/YRtvBOk/8Lo+LVh9nubhGewhvItrRx7fl/wC+q8p/4KVftUal8SvFMPwz8Hzf8S2ziki3WbKqxt91v++q0rcuGhdfEfPSqSxHvTPm74n67a+Ptf1jWTGTBqDSDbI2SU27OT9BXHQ2KaTp/wDZulafDbwtAqu0aqu7b/tVsW9tFaaZ9nDllWM5Zuc9c1xPjLXftFydN0e/k/eJudmT5Vr9Z8WIueS8Pt/9AsP/AEmB1RheJzMnnaheN50Nw8sm6N2+98u6u9+G3wuh+XWNbTZEvywLI3zf981B8P8Awe8khurrcWjbbtb5f++f7y1r/ETxtZ+D9JbyZleZovkWP7y1+J1sRSwVK4SOf/aS+IVh4b0u30HSrlo5mfZKsdfNl5Nc6tNJ5yMXb52krqfHHiy68TapLqV5czM0kv3W+7HXMyR+ZJvR2RN25o1r52piPrEuaQ+WUoxGTf8AEo0z7a/yiRNqN/C396vO/GGvQ+c0Oxs/wKr1v/ELxgkcaaVpiSOzfKq7vlWvPtaZAzO+7zm+bbvq6Me5tD4TJuLf7RM6O+5pN3ys1cB4/wBBfStU81E+ST+Ff4Wr0SLyvtnnTblH8FYPim6ttShmtnTO5Pkb+7XqYeUoSO+jLl944Kxu3t22/wB2t2x1TzF8nr/Fu31zdxH9mlZD/C9W7G4/g3/NXqfYOw6aW6hk3R/eb+JVqBpHbbvHP3ty1V+1P1R03qn3tlWrX5UV3dWf/drQzj7pG0b3HyOjD+41EPnN987uzf7tWmj+X7/Kv/DTZLPbcGbewb/ZqdfhHKXUr6bZvY6wsKchvmir2/R7zyPh6jv823au7Z/s15NNpPnWcV+isz2/y7l+9tr0m1kh/wCFftHMinbKuxv7rU/fIkY11fT28Oyb7uyvKviBM82ulN/3Vr0PVL5P3rzPuG/5d38NeXeILh7rWbiV33fPt3VBpRKVFFIrbqr4jYFXApaKKoCRY0Xl3x/srU6ahc7fs9nujVuy1X8z5XcnJ/2qWO6mVfkpfELlRKLG8lk3ujf9dGqRlhhUpczMf9mOq8l5cyffmZh/dqJiWbdml/dFyls6htXybZML/F/tV+iX7PTNN/wTojPUnwdq+P8Avq5r85beF7iQIn/Amr9HP2f0WD/gnWiK/C+DtX+b/gVzX7B4O/8AI3x3/YNU/wDSoH2HBkUsZXt/z6l+cT4GtZodFhZ4Zle62/NJ/wA8/wDdrNlvXuJDNNNudvmdmf71QtM7D7/C0kbJJ8ju3zfcr8dmfFe/9o3NJ+aze8mh2hvlWo7WxudSvFhh+Z5H+7VqSFI9Hhtkmw/8a16Z+zn4M0S68SR3/iRI0gh+d2kfau2nGIjY+Af7F/xF+L18JtN8N3D26/62bym2r/tNX0Ev7Mv7OXwJhhtvH/xCsbrUlVfNsbfayxt/dZq439oD9ve/8I+B3+HXwfmj0mO43JLJYuys0O35VavkiTxtr2tak+pXmpSPPI/zzMzNuolUlKPukSp825+kHw/+OnwD+HccM2m2sN1cNLul27dqrWp4u/4Kg+Hvh3o7zeGNNjiuG3M9rIisq/3a/OJvG1xptiYYbmRX+8+2uY1nX7y/kV5ppC/97f8Aw1lKM6m8hxpwUT239p79ub4tftEapN/wlXjC+ubbzWX7PJLtRV/hXateP6WlzfXS/LisiGHzm2Abmauo8N6b5MyNMjKG+WtIxjEfuRPTvA7WtnpG/ftK/f8Al+9TvEHiy/WMw280ixfd3bvl21V0Ev5K20L79v3Nv8NXrHwjqWs3z2yQsxk+9troMvf5zC03RNS1S63+W0hZ/k3fNXrXgn4bPb26zXW0Ns3bv7tdR8Kf2f7mGz/t7VbPEUarsZnrttc0fStH02TY8YMabVXZU80Sebm+E4q3hs9JhFy8O5I13M275mb/AGa8v8eeKL/xVrn2LTd3l72/d10fxK8XfaJ/sdttHzbdsbVT+H+g6Da3D63r1zHiT5kj+981R7TmJjRnH3j0P9nv4Pw+IFhl1VIbdYf3sv2ivprwfpfgDwjpkUO+H7Zubzd3/jtfJN58eLDw+zw2dyscX3d3+z/drB1T9pPWLqRUfWG8hX3bt+1qy9p9k19jzR94/QlfGHhtpt6bYopPlRfN+78v3v8AdrA8UfETStOZ7mw1ViYWVkVZflX5fvV+f2oftVa3bvvtNYmYxp/z1rDvP2ovG2oRyo+pSbG3ebt+61KPNLRkxo8p1n7aXxO1P4v/ABWtPB8OqtPp+h/v5Y1ZvLa4k/8AiVrqP2P7Gz034jaVf+R80OpW+5WXd8u75mrwrwGz65NLqty6vNNO0r7q91+CrfYdSS8s/leGWNk2tt3Mtc1Sp7OrE58R/Kf0x/sl/tFWGp/DvSLO9uWdo7VV87d95dtez33xt8N2dqlzM+5W4VVb5mr8nP2M/jxef8Ivav8AbNm2Jf3ay/dWvpi1+Jl5qkYs/t7NF5XySL8u6vbpyhUhdxPM5p0/d5juf+CiPxPs/ij+xx8XPBPh6xk2XHgHUAkjJ96ZY933v+A1+SnwB+OtnN8G9A1u5v4UH9g26SqrbpGZV2/NX6Z3lm/irw/rfhjUrzzINU0a6snVn3LJ50LLu2/8Cr8Avhv8QvEPhXQ9S+F2pTNDd+GdevNLuo1+XcsczKtfO8Rx5sMpRXwn0XDtaNKrI+vfiB8etKufOhhud+75/Mb73+9Xifi74kW2rTzXNntiaSX/AIE1ebar4qvL682fatm5dyfPXO33iS4t7r7T/aSokKbdqpur4WVacj7KnjIy0Pa/C98+rXiarretxxtHu+98q7a4342fHy51LxE9homqsbaO3WBfm+9XmmqfELXplWztr+OFG+//AHq53Ut8zM9y7Ft3ztv+9WtOVWUbSIljIxj7h75+wL4ihs/jxeXOsfZ2ims1l86T7u5W+7Xvnxh8ZjTtL13x1AuAZpLlVRuAHkzjPp835V+f+l+INe8N6out6DfyWlzD8rtvb94v91q+wfiXrMkn7LTa5ekO82g2UkpPcuYs/qa/e/ByMlk3Eb6fVJ/+k1D6XhnG06mFxClvGN36WZ4j4y+LHifxZJse/mKKvyeZL/D/AHa5+38B+Cdd8C6xqupaxdQ67avHLpFvb7fLkX/losm7/gNYl14ks5I9iTbFX7yr95ql0W6triORJpv9Z8yLH/8AFV+IfBq5HL7alWlrK5w2pXlzY3DGF/49u1vvbqfpcmpahJ5E1nvf73y/xNXT+NPh/o6TLeaPqXnSsm+eFv4WrL0fXE8O3EdzNb7drr95K6ZKlOHu+8ePUUva8sixcW+uaTb/AGx9BvNv/PRbdm2/8BroPDf7Qn/CO2/9lfb5Eb+7cIy17T+zz8fvDf8Ab1tD4h0e3lRn8po5ol/eLXvWvSfsZ6feRXfjz4XaTqVtJ8zqsSxNG38O1lrzY1MJU9ypeMkbU8PWTvB80T458NzeLfjlrlrB4EeTU9RkfZFb2aM7f981+mH/AATC/wCCdH/BQH4RTXHxG1n4J6e9tqEvm2q3mvQwSs395v8AZr2//gln8Uv2Z/Dnjy3+Hvw7+Hnh3RzfJJcNeWtnD5/lqv8AFI3zfer7rn+Knhu1uv7Nt4Y2j3fLJCqqq1yYh4Tlt0PbwuGxeElzw3Pxn/4OCfhD+0l8HPAXgnx18XfFuiyv48164sL7R9DRvKsYYYd0cPm/xN/6FX5h6LCkLGCBNkW9dsa/w1+qP/Bzr+2J4P8AiX4q+H37HPhKaO4ufCt7J4h8UNCys1vI0flxQt/tMvzV+VOn3kDats+XEn8Sr/47X2mQYOhhMDH2ceW+vqfnud1KtTMJqcuY9G+HupTabDLB5ylmfdub+7XceHfFybfk3LtT7zN8rNXk2m6hJaqyIm1WT7y/w1f8M+MMRpveQur/AHl/hWvaqfAedR933T6H0XxVDuSZ3Xatvt+ZN3/Aq63T/Eb2scU32lVfZu2qvyr/AHa8F0LxIt1dIiTfMu35Weu20fxnN5LJ57bl+V91c0eY64ylGR7PY+IIV3+VuDSLulZW2qzVqw61Db27v9tkzH/qoZJdy15N4b8VI9uYZvk/6afwtXR2+sp8n77O5FrGUftHXh6x31hrlzbzF3m+6v8AF97c1Lql3HdJEVO1lB3Rf3Olcxa63N9+Z13t97/ZrT0+6a7DydQDgMepxkV+i+Dcb+I+Db7Vf/TUz67hef8AwqQj6/kzo7K7lNpG8gZhEgXy1bkr7VdjmtpE+0+dIrsn8XzN/vVzNpq9ukjQRSEGP75WTGW/u1a03XEYzJDPHu3/AHWf+L/er8p4tt/b+M5/+ftT/wBLZ0KpKOKm13f5nV2dun2MpDN975v96r0drNHh/wDWN8qvCzfKtY1jrCWqql593Z87L83zVr2uoJ+5SEs4m+avh61OXNzcp7OHrcxu6bY2s376G2U7v96tnT7e5jj+RF+ZmbdJ/C3+1XPQ6sm1PJjYN/d3/LWhDrx+0JbQzKGZWZ7ff/7NXnSlKU+WR3+05jaaJ1mFzCn8Py/3Vaqt8u3dMgUbk/u7arTa4kLMiOpdvuqvzbaz9S162tY2mubloyz7WaR/++ajl5dipVOb3WQaps+xzXmyRPMXY6r/ABVx+uSPNC1t/AsStWvrNxc3CunnSOPKV3Xzf4q5m+1J03ec+G/5ZMr/APoVdMYy925cZfZOX8TbJJlhxIxb+Jv71cV4ksZmhkeaZv3fzO1d9qUcxm3p87/wf/FVxHiaSF43h8n5t3zfN96vVo/vdjGM+WZ534hZ5N+x2Td8yL96sKS3mhDwj+FN27Z8u6up1uBJbpdiNCqpt/vbqx7uOOP93Cm0/wAfl1304np08Ry6lGO3ePdPN5hP+1/FWhp+n+cf9Svy/wB1vu/7VNWR4YQ6WzKn+18ytWjpMfltD8+yKZPnbb8kn/Aq09/4i62JjytHockKQ4hSdZWb/lo1ZGoQySM9s+5m27vM2V0l1YuJGj+4n3t1ZOo2ryW7vchjt+5tb7tT7sZe6fE1PeOYuNNe4VXebLN/yzX5vlqhFp80amYOreZ/d+8tbC21wrIlzNMsW/5ZNvzMtFro8LXUPk2EmJEYsu/5l/3lrblhGXvHPKPu3iUrOzmuI/kRdyv+93fNVn+zXkhim3r/ALS/drasdBe3jldH3MzfN5abVWra+H4bqY74Y/l+8zS+Wq1nzRfwmdSjKJyVxpaRrJ+5be3/AC33/LurBvNPeGGP5M7VbYv8LV3+oaDDGfOSGRiyN/qW+Vq5TVLN41be7A/M3zP81dtPljDQ4KkeX4jkbyZ4WXztqLu2/L/DtqlqWouq/uXz/Cm5v4as6sba3aWaZJG3J/vVzepalNCzJsUt/Bu+7XVTjKUos8mtU5fdLv25E2uk3K/Nu3/eok1KFwdgZpNv3lrnm1rbv2JtT7u3ZV+3uplm2O+dybt23+H+7XdKny+8c/NKRr2+oOtvseFtq/6pVqza3UNqXlf5U+81ZtrJIrJNH5ixs33f7rVfh03z5Bvl3N83yt93dXHUjCUrMr35aH0F4GlEnwSjlkPH9mXPPTjMnP5V5at08d0mzd5W/cjf3vlr1PwOxPwURmiVf+JbcfIg4AzJxXmNrY7k+R921NyNH81frHjRLlyPh1f9Qkf/AEmmfbZvrQwn/Xtfkixbt5cm/equ0X3W+7UqxzPh3Zf+A/Nupmnw+ZCN6b9z/d21Z2+ZtdIliX7u1W+9X8+xnJx9w4I+7GJDJbzQ22+F1R/4f/iqns5kb7k3yqn3du1d1RyQozPjbH5e5WVm+anx2rw26PBM2P4Vk+as51JSjy9jrp1JR90kkmufL875W/iT593/AHzUtrqDzNEnkso+/tb7q/8AAapN/o/3DIrfd+X7tSWm/ck3nM6r8z7qco80IxO2NaMYaG3askkDzPud9+5VX+FqmupnsrV7l7xVWOJnfc33WrOW8RG+R2V2+/8AN8tYXxW8SRaTov8AZUMqr5yN/tV34XDe2nGETnxmMjQw8ps8j+I2uQ6xr1/qtzC37xPvN/FXm99qaW8jvHMw/wCBV0HiyZ2V4U+cK7bJGevPNSu3eb55l3f3q+9pU4xpcsT8vqT9pVlN7li8vkkZvvb/AOJW+7WFqMCXXm/w/wC1/darElxHLJsR9pjXc3z0tts8tt7/ADM/z1vymXunJ3Vvc2N9+5fipY9aQt5Nzyf9muh1TSIREZvJ5b/vmuHvrhLe+l2HaVb+5T+2OPvF3UrOzumEyQ/O393+KsS6tZIpG/dbQtalnqyN/rguV/8AHasTWcN4vyP96l8RfwmBBczW3yDpVmZDfL52V/8AZqs32iuql0f7v3/krMVnhk+R6kv4gIeFsZpXm81drJ83rVvEOoxAqwEu3G31qpPC8LlHGMUAMf7xprLnkU5m3UgbdzQWLHhm4NKykmmKu2lrQCzZt83rWhbs6/I7sQyVm25KkfdrRRt1vn2qYmMiO5kf+4p/2qgLSN8vZf4qmkjRW+/j/Z2VUkYLnY+P4fmqZS5vdFykjNs+5Iuf9qmtJGFZGT7v3KZCybmR0zQ8if8A2NPlgWQyfN8+ykoop/ZKiFdR4LXzNPZEG5t9csxwOK6TwT++tXtv9ujmFU+E3bpfLj/cyL81ULqHZ8+f4P4av6mqW8ImTd9/bt2VQmk8tc9C38NHMYSPX/8AgnuGX9qjRAX58i93L7/ZZK9Z/bp0BfEf7VHgXS5Y1McunW6yMy5wBdTH+leU/wDBPuNP+GqNCl3Ek2t71/69pK9+/aY0yLVf2t/CaSSOBb+GfPdU/i2zy7f/AB6v2jAP/jRmL/7Cl+VI4qq/2len+Z2OqalbRyXDp5ezd8it95dtcJ46l/tC8FrNMrMvz7pH+7W3c/6l3MPz/wC1L95mri4rXUvGHiZLdJt0K/K3l/Nur8Gp0+afKcXtJRlzGl8Pvhjc+ONcie23SwtLsVdjNu/2v92vuf8AZ1/YMhm+HWreNvFX7nSdJ0i6v7242fLHHHG0jLu/u7VrL/4J+/sn3/jnXLCyTTbrbcSr5q7dvlx19vf8FZJdB/ZK/wCCRfxY17w9M1vcXHhePSIm+6/nXTeSu3/gLNXv08PGjhbnLF/WMZFH8wvinXE8VeKdV8QRzM4vtUuJ4mZv+WbSNt/8d21nxx3KyKidP49taGjaO5tIS+07YlH/AAGrnkwwzDemAzbdypXGfSc1jpPgd8Kdb+KXjTTvA2iWEj3mrXUdrZqq/ekkbatf0rfsFf8ABN/RtL0nQbnxVoNrFpvhfSbe3+WD91cXCr+8kVf96vzA/wCDaT9l3RPj1+3NpureJ7BrjTPCekzavIpT920i/LGrf8Cav6CvjJ4ts/Cfha40Dw3bJbwKnziH5fM/2VrsoyjGJ4eOqTrVf7p88ft0fHt/CPhW88JeFbxbG12bHaNdu5VXbtWvy48c6ii6lePM7YkuN67n3N81fW37ZHizUtSkENtdSPEr73Vl/vfer498YWc17q8t5bRrsb7zL/FXNWj7SV5GMfgM6eV5tFmkVTuMD4BGDnBrj/DPh+FZPOd5LmX5l+5u+au503Sbi4EWjsS0k77BvOSS54/nWr4q8O2Hwst4rB5F+0yMyxRt8zK38Nfr3i7Up0MjyCUumFj/AOkwNOZLQ47WtYh8G2rTXkK/a2Xavz/Nt/u7a8W8deKNS1q+e5vLnarfLFCv8NejeLmudUhuHmmWW4kdm87b8qr/AHf96vNvEmmW1n5r3Nzt27W3L/FX86YiU8RLml8I5S5vhOWvoHkX532/w7W/i/2q4/xV4qmVja6bu86Pcu2P7q1qeKvESX15JbaVu2/daT7tcffN/AjyMzf3vvVxU/5Xsa05Rj7sjEvZnW6d33O7J93+7WLfiFVLybXb/wAeX/ard1Q7oykdsyLs+eT+9XD+PNfsNFtHgtXxK33mr1KEZOR004zqe6Zmu+KIdPjOyZX/ANquR1PxPPLlLPdjP3mrNv8AU7jUpjJK5x/CKgZscCvZp0Yx3PTp0VAGZ5GZ35NSRvtPXH+1TKK6OU0kbOn3SLGEHzf71aELPJD86NXPWMgjkXY9dHpqm8XG/lf4aOUktRK6ts37v7tacVvJIqv94/3m/hpun6akki/Jkr8ybkrYktPKVJoXXay7WWnze6YS+L3g0G1S4hlsn2v5iMu6P+GtuS1ez8AvZ3PmZjlX7v3vlrO8GyQrrH2Z9qbl/i+7urpvG32ZvCMt5Ci75Jfm2v8AdpRIkeY+JNQSOxld32t/drgJH3ylg9dJ4w1FJYfLR/vfermQDnLVJ00/hBhkcUKu2looNQopGOBxS0AFFFFVygFLHhm4NCx7l+em8sPSnHYmRaiuEjj2Inzfx1+iX7PmD/wTkT38G6x/6FdV+ce75sV+jn7Pv/KOJP8AsTNY/wDQrqv13wb/AORvjv8AsGn/AOlQPseDV/tlf/r1L84n50xk7tnf+9uqxDIkd0oyvy1T3t605ZnXPz1+Pnxso8xuTaw7TL/s/LV+L4gaxYw/Y7O5ZNyVySyOn8dL5zsfnfijluHKy1faveahM8tzNvbd95qktbpLZV/vVQbZjilE0i9GoCUTRuNRnui6Oyr/ALtJa2fmSCGY/wDAqqQLDK338VZW6VF+dvlV6v4RcvuGzpOmoI1m2LuXd8tdHosyKqF5vl/u1wn9pzRt8kzfLVjTb52z515Mw/uq9EdjLlmet6HfbrpfJvI02t/FXsvw18R/DTwCn9q+NvE9ncTL86wxv96vlttQ0q3tWmuZrpfk+SP7Rt+asDUdViumzskd1+6zS7qzlGXQrlj1PtLxt+214CgD6boN4qRqnyL/ABV5r4w/au/4SBX8m/bDJ91flr5wS4ST/XQ7v71Ss1mql0jVWap9n7oRjE9Sb4oaM159tub/AHDbu+/96mXnxSs7iZPs1+sQb7+1/vf7NeTNePHI3yL/AHals3825Akfcn3sGrgOUTuNe8dPeXAh3rs/urWReeJPM3/v2X+H71YU198v8P8AwGo/Odl42/N81Ll/mJj7ppnVHbKbtr/epJNSmjt2S2mbe1UIrpyu/f8A8CqW1vPnZH2tTCXunrXwhjabw/Dsdd6/e/hr2z4c3X2dvM+Y+Wm99qbtq14H8FdSddLkskmYlbj5VZv4a9r+Hd463ywpuCTLsb+GvLrR9/3jz8RH3j7x/Y58ZPJDZwojSIr7FVV2tur7g8ByTXlv/plyqIy/Jt+9ur8z/wBlvxIlrqDw+cybvL+WOVvmZW/u193/AAl+LiQ6ampWEO9422SrJ8yr/tba9LA1vdtM8qpTlL4T2zwPpOsf29CsNyyQrcf6xn+8v+1X4W/tqeCL/wCEP7eHxg8GeT5cbeL5L23VflVobj94tftp4d+L81nqx/sG5hieaJt9xJ8ywsy/3a/M/wD4KsfC1Na/aw/4Wp52+HXPD9vFcXjRbfOmh+Xd/vbanNPZVsM4HpZTKdPERTPj/UtSv9vzp5RV9u7d96s+4k/cu77Q7fMPk+9Xd3ngHUNQuBYaPA1zMzf3P7tc78RvDmp/DB9Li8d6NdaU+u2bXmjNfWrJ9qt1ba0kO77y7v4q+Jll9WpG8In1Uq3s5ayOcvI5reUvs+Vk3fNVNm2sN6b1hXc7N8qtXG+LPjpZ6XI1no9q1w6/K8kn3a838Q+PvE3iOeRrzUpFjkP+pjbatdmFyWvUj7/uoyliP5T03xJ8WtB0e4ltoU+2T7tu2F9yr/wKvtn4mXIuP2LlvHULv8K6c+30JEBxX5j6VuOoR4Xdlq/TH4s7rf8AYdIjxlfCWnAflBX9BeE+CoYbJc9jHrhpf+kzPpeGqknhMe3/AM+n+TPiOz8aTNceS9tlFl+WRq0rPxwin76oN+3bXC/PDcNs8xPn/ibctXoWdpNiP/tV+OyyvDVN4nx9PGV6fwSPRbfxNNeL+5maV/4/9mqlxffapPs3nfMrf3v4qw/D+pXNvKqCH5G/5aVZ1iz8mZrm2mwu/wCasKeS0KctDSpmVWUfekdn4A8J+M/FmoJp/gLR7rVb9tzxWtn80n/Aa0tY1D4x2M7+Htb0HWra6t5dz291YSb93/fNcx8M/iN4k+F+uWHjzw9eSQzabeRzqyvs+7/u19p33/BQ8+MPDFlr1neTXVzdf8fVrbwK0kn+zuZflWvXwPC2VZi7TfLI8XH8SZnltpUo3izK/wCCd/7QVj8F/ii/i34u6DeaUsem+UuqX1u0UTKzbtys1fTf7Vn/AAXI+Hvwz+Hd7o/wQ1ux8VeMb61b+wbfT/3lrp7f89p5P9n+Ff71fFnxq+LXj/41aLcaV4h1W1s7a+t9iabY2+75d3yr81fPHiX4d6x4Lwtz4emtrTbuVvIZV21lmfh7Qy+rHEKfNB/ZO7L/ABEx2YUvq8klIdrXjDxh468Ua18SPiJr02s+IvEF011q2pXTbpJpv/ZV/wBmqMbzQ6kroihG/hb+9SRxPGreS8bPv3f8BqPd/piQoiu9aRhy+7EzlOc580jo7hvJ0+W5dG3bf4q5fRdceC7ZN+5d/wDC3zVv3myaw3o/3l+fbXCrcwrfMj/K+9vl+7tp/EaUdz1Tw74k/eJDI/y/3m+9Xb+H/Ej42CZWeb5v7teLaHqnkyBw+3/a+9XZaPrUqtsdGdtn3v4aylR5jo5uU9e0vXHuIUm+aJWdv93bXV6D4kd4US2mjRG+bzJH/h/iryXR/EMMMiohYMybnb7y7q6PR9QgaFPPfJZPnVX+VazlR/mHGp72h6rY65cySCF5o3Vm/wC+v7tdp4MvzfJckk/Ky/J2Xr0rxvT9ciWZNkylFRlb5a9P+EMolsbt1nDqzoUx2HzV+geDtOS8RsHLyqf+mpn1/CdTnzqn/wBvf+ksl1C6a31e4isZ1QGbfcFV+YjPNaunatuuEdNuxXbdI23/AIDXMa1Kttrt3LNtIaZwGZ+nPTbVq11S2jmX/Vh22s7bP++Vr8x4rp+2z7Fxf/P2p/6Wy3W9njpv+8/zO5tdV+0XDv525m2t5i/8tK14dUhtLj9zC277yNv/APHa4OHWnjYPbPslmdt7L81WYdam2pHDfyF403N867mr4zFUfd5Ynp4fGe8d7b+IGhuInR97bP8AU/e3VpW+tzMxhjm3bvlfy/8Ax1a85t751VJppv3X3maT71a+k6oiyeZC7Mv8NePXw7lVPVp4qMtUdlJrVzMu93VJV+Xav96qt9qUMcfnTfN/e8z5tzVi3F0i3m/ZvCoreZv+8v8AdqGS+eONdu3+981Ycvs+Y2lU5pFjWNQeNvOTy1Zv4Wb5l/3a5241KaaUP5y7d3zNUt/qSfZ5ZJn3bW3IzfN97+GsDUr6GOHY6KvzMj7V+6v96tKPNLcunUjEmvNUjhjcu8aDzflZX/hrm9evbaGxdH275Pm8tvm+aodV1yGON4bN2x/Hub5t397bXJeINcmvpjZpeRh1+ZpGf5m217GFoy5jmxGI5eUr69ND5zwQ+X+7Rfu/Kv8A9lWPJH9ouHTdkt91l+9VbUNagupC6bf3fy+Wr7mWqf8AbTo0XzfumfdE0f8Aer144fmjGxH9ocupuwzJDb/fYoqf99f7ta+m3CR2apMm6JfuR/3a5eC6eO4KO64k+bd/drWtb6OTa7vJ83y/NVSo8vwkVMzjOWp7hcWc0cju+0/N/vfNWbqmmpdW/wAkPHlfvdvy7a2lZxcP9m8yFW3Inzfw1JDb+dK6eTuRlVX3fd3V50pSjqZxlCRx3/CMWat9pSSR/nX5qn0/w6i3Hmw+d8v3m27mbd/erp20/ddeT5Me5m/hrS0vRdzNv+R12/Ky/LJSlKIcq5eWJiW/htI2/veZ/eX5t26rj+GhZt52xcM/zbq6eGzRdjmRS2759zfdqWTTpreGSZ7aMvN8rrG+5VrQipT5TgdQ0Xy4dn2bZ87b1ri/EmizKsvkptT5vlX/AOKr1PWh9njWGGFVZk+6zbt3+1XEeIoXmjdPJ5b5vl+61XCc46nn1ox10PGddtPszKjovzfN8rfLXD63ZvNcPNav8sb/AD/P96vSfFVi6q/ksqCNvkbyvmrktU0lJG8t/wByW+438Ne5h48sOZnzleO5xMNu8f79PmG7czSfw1oafcXUm77TGzL/ALX8VWptNmWTyblFYNS2Mc0jvbTOuF+ZW3V3y5ZHDL3eU0LG1875NjRIybt275VbdW5odpc3UyQud6t/y0rN0+3DKba5dj/fX+Fa6rRbHy1S2hZW2/d2p92vPqctM76fvHsXhG1nt/hGlrIxaQafOCT3OXrzq3tYbWNfs0G12RW27NrV6f4ZTyfh0kcZ+7aTAZPu1eeXkUN1dDzplj/dfKzJ95q/W/GOi6mS8PSte2Ej/wCk0z7LN5RjhcL/AIF+SH6TshbyURiWb5W27VWmx2pt8Q7Msu7aqp97/eo+0Qt8kM2x2/h2/NU8Pk2wKJNJuZ1bdtr8C+rxjUPMp1I8vLKIq6bNNtd0V327kZUqG6jLwjfOzxsnzqvy7a0lj8uOWG26/K3zN96s273xsYUuYyW+9Ht27aylQ5feKjWjTK0kjwyJ86o2z7u/dVaPVolYpCjff3f7zVV1K8CSMIUX5trP8vy1lw3ztM2xF2M3/LOuqnRjL0I+uSjLQ3o7raxuUkVE3fdb+Fv/AGWvNfiZ4k+2axtd8RKjKu1PlZq6nUtSh0/TZZt8jMyfIrJ81eG+NvF1zcXU1zNKy/3Pmr6HKcHGnKUjws6xk6lJQKmt3TzedNbTZ+bbtrhda3rI3zrvV62rHWHaN33s27+H+7WJr0yTS+d/6FXvQPmZGb53+3y1Tw3SRyBN/Lfe21n3Fwkcfyc1TivEkmZxuDr8u3dQUavijXktdNeKGZt7LXBzSSSSNMXyzVratcSTN/u1msjt1StBxlcrZOeXY1Zs9VubOZXSRtq/w1C0Pl7eflamsh27kWpkafEbsOuJeK6TIuGqhq2n+SouYU+RvustUPnUVpaPqkKt9mv/AJ0b5V3fw1IuWW5mxO8Mnmd60murbULEo8aiZfuNT9R8OTAfarSZHik+ZdtZcgmtpNjqytQP4huHVvnopWbd2prNjgVfMiwUYWlooqAHwLubIq9bsnl799Uo1/5Zn+KpoWRW2P8Awv8AxUTMZE8kjK3/AI7UEyoy79n8dLNJtb5JN1I0u77+3/gNARIoup+tNkG3kvk0rYx8lMf7xoKEooooNBH+6a6T4fyJFJNvT/gVc2/3TXRfD+VFu33pVx2M5fAdTqFv5kO9H3fw1k3Vu/nP/s/N9yujms0ht12bfm/iWsi6hdm3u7ZpfEc56t/wT+jK/tSaAw2qDa3vyr/16yV9O/HpYNO+OEXiG5jUiPwnFBGf4gz3M3Svmj9gGJ/+GntCfbwLW85/7dpK+i/2tL/7F48sI1KqZtJjBO7BIWWU4/Wv2XB+74E4v/sKX5UjixH8dehgnXLm+mM00PyLEzvGyV7L+wP+znrHxe8ZQpJpUnk30qv+7Ta0a7q+cobua4ki01LlovOlWKWRX+ZV3V+1H/BG/wDZR03/AIQ3S/F1h5awwzxw/f8Amb+KvxvLqVKVTnkeViXJU+WG59ofsb/sX6B8FfBNtf3kMYnmtV807fm21+WX/B2d+0XBq/wO8N/B/wAI3Ey2OqeL4kuvLuP3U32dWb7tfsp+0p8X9E+FXw51C3h1SGG5WxYBWfDKv3a/mq/4OAPiJ4Y8WfF74b+A/DepXReGyutUv7WS682NZGbbGy/71d1ScqtPnn8jvw+Fp0K8Yw6fEfBun2M0caP5KudnzVKghkmCX9hu+b5dtaVnZeYq/N8rfw/3qsx6LukCI7b91cX2z0JW+E/bD/g028Gy2E3xW8XxQxxQNolna+dt3SRs0jNt3V+iP7R3i5LfNnZzKFj+RGX7tfnJ/wAG0PxUtvBfgX4teCby8hRrrS7HUUZfvK0bNGy/7vzV9gfFL4oWGrXks1tC0vnL8i+V/wCPV20fePDxMeXlPDP2gpvtVnNqt7M32nfsVv4VWvEdB+Eut+LNW8mzs5EST7snlbo46+i7vw3qvxA1SZHsNsTf6pl+Wovjd8Sfh1+yX4PhtoJIZvEF9Fs07T12yTs2370n+zU4ipSpwvMy5XzaSPkrx3oy/DX4ny6XcwmUaVcW7yR8DfhEcj8a888ZeNte8VeIpvE+sCNrmZ22Rt/yxX+7XQeJfFuteObm+8X+IJS95emSSYsPqAPwAA/CvOtU1CG1jm/fZX7rfP8ANX6P4z1H/YXD774WP/pNM1+ymyO8WG3Vrm5uYUXbu27/AJa8O+KHjD7drDw2c2yPbtWFX3LurpviR48RbR9K0253vs2quz5dv97/AHq80j02a+unuXVvm+bctfz1GtKvsXGXNuZt2yXErbLrG5t33Pmps2ivGz3OpXKt5f3P4f8AvqtSaztrWF5rmFTt/wDHa4jx14uKwulteRpDv+eT+9/s1vGjE2jT5pmP8SPF1nZRultMoVvmfb91a8R8R65c61fPLJMzIG+TdWh438X3OuXjwxTN5StXPKMDFe9haHs4XZ7VGl7OI1V3U5V20Ku2hW3V2cqOgFXbS0UURAkVvmyP4a3PDt1I0gT/AGq5/cfu1e0e+eGZE3/xUpRIlE9Z8O25WFfuhvvL8n3atX1h+73iHd8vzSfw1neC9U3xo833W+Wusks4bhfvsibfkqvscpjKJx1nvt9W+0oigq+5GWuj8aXv2fwSXTbhn3PJv+ZflrD1axfTZvMhh+VX3basa1Nc6t4BvLOGHbtt2dt38O2oJ9n7545qN295cM+eP4ahjh82THrTWOFr0r9k7wn4S8dfHXQvB/jOwkubC+uGSeON9u75WoqS5Y8x1xjzaRPNmBXgiivsX4lf8E6NB1Oea++GPiGSwLXDCKx1D5olX/erwjxj+yR8b/BzO914PmuoVf5prH94u3+9XNSxmHq7SNp4WvS3ieY0Vf1Hw7rWmStDf6ZNC6/eWSJl/wDQqqfZbnbu8lv++a6ozRhcjop3kuv3kxTeAKQuZCsfmz6UlFFBQrNur9Gf2fP+UcSf9iZrH/oV1X5y1+jX7Pn/ACjiT/sTNY/9Cuq/X/Bv/kcY7/sGqf8ApUD7Dg3/AHyv/wBe5fmj85k+8KGG00lKx3GvyL3T48Siil3fLtqSeZCUHPeiigdkSK3ylD96mD5l2fw0csv0o+98iD5qBRHrzt/2vStKyVLWHf8ALtV/mqhHCjf71TXV4qxeVC/P8VVzESVxNSvjeXG/HyDov92qrOQ386WRm3mmVIy1bs8ak72pl1Ih27P7tQq2T8v8NDNu7VXxFcrF2D1NWbf93C29P4Kgjbc3z/eanzSbcIjttqSZIduRVxu3UrNlV2cn71Vy20kUsTMrbw/NAFqbfCn38j/ZqLzAv3Hx/fqNpy2Pm6U3zB/cH50Adf8ADXxA+l6wltv2pI/8X8Ve9+CNatm1S28l2c7/AO592vly2umguUuU3Eq33lr234X+LIdUhhm+04dU2uu/5lrjxlPmic9anzQPrv4L65c6Lr9lfvMu2OX5W2/LX1V4N+LFta2ahLnaWdvmZvlkX+LbXwT4J+IlhCI3v9VjhRf70qqq13cX7XfwF8AWq3/ibxrFd3EL7fsNs25v/Ha8b2uIheMInlexq8vwn2w3xo+15fTUkb/Zjf8AirnNU/Zr1v8AbmuLn4Y6V4wh0bxxHpdxP4Ih1BP3WpX0a7ltWZvu+Yvy7v71fGXij/gsB8OPD6TWvwy+Gt3Kyrtgnm2pHt/usrV5P8Qf+CtX7SHjG4SbwTBZeGp45d9reaeWaeFv4WVv4Wrow9HH1JxlKBtSw+JjNTXun3/+w/8A8E6/ivD8QbzxP+0/pWpeCfD3g+Ka7+Jeva5b+Ra6TYw/NLGrN8skkm3av+9XwF/wVJ/bw1r/AIKA/tm6n8adA03+zvBmg28eh/D3Rdm1bPRbf93D8v8Aek/1jf71WP2pf+Cq3/BRT9sX4b2HwS/aP/ar17XvDdjbx/atFjSO0ivmX7rXLRqv2ll/6aV4HDEjQ/PCq/Jt217kIxjK6iepzSUPi1OV8V/NqDzJ0Z6y62fFUSRXHl7MCsatTSnIuaDG8mrQqn96v0m+Odwtn+wnNPKdoTwppuSO3NuK/OLwbZ/atchR3xtfdX6JftJyGD/gn/eyKenhTTO/+3b1+veGH/Ipzz/sGl/6TM+v4Y1wmPj/ANOn+Uj4WWZLiH5HXbVmxjjmkZEfdtri7PXpIVMPr91j/DW5pesfdSN/95l/ir8dPipROphRFwmdyrWza2qX2nvbTf63duRq53T75LhhMnH9+uhtbh1nWG25Vk+8rVpT3I5oX5ZEcMf7uXSb+HI2fe317j+y5+xz+1T4806O58LfDe9bRr5mlg1CC3Zo9qqzbty/d+VWrxq7s+ft9sm14/4f4f8Aeav3K/4N7/8Agqh+w/4c+Alp+zN8bfENt4V8ZPc/2fK+qhVs72Nt3lMsjfd3bq6cPjfqVWNSx52PwksbS5Iux+Qmv/tTeAPh2raR8P8AwaPEGt2dzifULlP3Uckbf3f4vu1+1v7J/wAPv2IP2zf+CW/jLx/8btM8Pt4gtPAd9qN79h2pc6XD9lZlby/vKyyKa+Mf2TP+CWi/DL/gqB4ttfj54GhvPhvdeJLi5t9Us4Fls2t5rhvL/e/dX5WXb81fVX/Bej9mz4Z/8E7v2XNc+NP7Jvh6405/iFpyeCtUt7eTNpZ290dxuN277zKrKq0sfmlfMa0Vz/CceCy+hgY88Yb733Pwc0FU/su1/wBJmcNEzeZ/eX+Gq02oPF4khttn+si+fbWja6Wmm2KQ9oYtm5n/ALtcbpOqTap463o+7a2xPn+7WcfePZR6T/rNNJ+9t/u159eLt1B3TdnftavQ7dvMsf3L7tytv2pXGXGmus0yb87Zdzt96spbG9GPvO5DY3j+YUT5GX726uw0PV4FjVJpm/3o65SOF4mRNnzrWlpsvk3B+7/wGtYxHU909B0nVE2q8L7i25dtdHo+oTSMqI8bIz/e3fNXnljqvlwhERmP8ddBo99DIqwsnlbfut/DUyp/aM41OWVj0Wz1gOyfvtu379ez/s8zRz2epvHISC8J2kY28PXzrZ6ttkZH2srfd/2a91/ZRuFm07WVA5EsBY7s5yHr9B8IqfL4gYR+VT/01M+u4Pqc3EFJf4v/AElkuvarAfFl9aTP928kC5X7vzGrH9uWklqnkuqP9193zNtrhfHWvmx+IGrRm5XZ9vnBU/7xrMXx4kcfyTr/AHdrV+a8T4WU8+xTj/z9n/6UznxOLgsZUj/el+bPSrfxA8PmbH+Rl2o0f8NXLfxZbxqEeaNGX5V+626vK5PG32iGKf7Sp/2Vaj/hLv3b7IV+V1+6q18tUwfNGXMa4fGcp7Pa69C3lvNNGV+9LGv8Na0fiKS1d4d/3fm2xv8ANXiOl+LplLbzJ8z/AHWbd/wGtuP4gJb3D6g9z9odk27m+XbXjVsHyn0OHxkJQjKx63/wkieWfkkiaHavlt825f71VdS8cWyyM8J2Bd3lRs25tv8AtV5n/wAJ07Ls+0sf3XzbX/h/iqje+NvMRJhMpSNdr7vvba5I4Pl956nd9eh0O/1DxY/lvczXKoNm7y/4v93bWPqviJ2jR3mYq3zOy/K1cNeeLnjkFtbOvzfL9371Zd94svW3Qo6u/wB7dv8A9WtaUcDUlyyPPqY/llY6PX/EE2353w+7564nVPFFzud98OPu7f4t1UtW8SujN51yu9v7r1yl9qzztLDDMqn7ySfe+9XvYXCy92MkcGIzE2pNc3SNc3L4Xf8Ad3fxU/T7pLiNn3qvzfeb/wBlrkpr2ZW2QuqKvzMv3t1bGm3k0j/I/mps2o2zbXrewjTieX/aHNI7OG+huoVT7Mvy/fVv4lq/a6gkeZHTytvzbWTcq1zelyTeX++uW3b9v3K1luLqTcnVt38X92s6lGEYF/XJc3NI+kIdQeSRHTdsV/7n3lrQhkST95cou2Nlbdv2rXDWviCaOEO/mOjPsSRm/eLWzp/iAyTfutzRrxukf7zf7S15VbByPbw+Mpcp2NjdObjzvJhd5v8Almvy7V/2a2LWRLdmk35bb8y7N23/AGmrkNP1q5dv+PlVWNvvN/DWjY6lbLIs2xk27v49u5qwjhPetLY6Y4vqzrIblLqM3CQxu6p/47/C1VppJWZzbNhmVm/efdZqrafqELfvPtmw/wDLWqV9qz3Tf6HtUSfdaZPmpRw3vPlFLF+7qUfEW+FWzMrlov8Alp8vl/7NefaxJ5Nr5kk292TZ5i/w112qXjtepO/l+XH/AAt/y0rn9WgSWGSOF/MCuvyx7Vruo4flPNrYiMpSZwesWsMl0z+Y37yL5N38X/Aa5rWNOQRlIbZijbmdv4d38S13Wp2e2Ty327V+Xy2+9/wGufvtPnWTYifOzs37z7telGjGR5NaS2kcLfWqeXl+GX5tv3WqmbflEhRc7d3mL93dXT61p/2xm2Qxuyt97Z96s0aX9on/AH0PzRv8+19u1q7Y0Tz/AGkuYk8O26bd95w6/wDj1dVo8CQzfvkY+Z/t7flrJ0fT5ftHzpy3zbfvMtdNp8KW7ZebJZsrtT5drVjWw/NO5tTrRien6CVT4a5hQ4FjMVU/8Crzu786Vvtlzt2/KrtIny7v9mvR/DP/ACT5MNn/AEWXlvq3WuN/s3zMJC6hfv7mT5a/X/Fmk3kuQX6YWP8A6TA+yz+rbC4N96a/JGVHap5i+cjfL8y/JtVv/sasLN/pMW+bdGyfLGv3d1LNYTNMtz9pVnVNvzfe/wB2mLb21r8m/wCVvk2/7W6vxGpheXU+a+uTHTfafLmlRGQLt2M33W/vVR1C+hWH7MHb5X3KzfK26rF95253SFX2vtl3P/D/ALNZWrSwq+x0Zzt/5af8s6qnhf7opYozbr7ZIpSGZdv3fm+6y1HY2s00n2Ysyfxbf7tTxxwwxvsmX+86tVm10+2kk89ptkzRb9rfdrf6qowI+tHMeOmgs7H7Nv3vJuVPn+avAvHGmvZ6hPbdXb7+2vZfih4mtrbXrPRPOjQR7mdpP4mrzL4hx2010LxJty/eZY678HTjTieRjcQ61TlPOILx7Wb+L+78z1T1qZ5GD722t/CtWNZmh8xnRPu1lX115kY2Phdv8Kferq5eU54lO4m8z50+U7qpSSPv379u75qmvN67kfd8v+zVCQ+Wdm9tqtSFH+Ukm2Mx2Ozbf4abHEWX/b/utUDSN5Z2fK38fz1Pp7edJ9/5v9qnGRUv7pXkXaNjpwr0zenmBNny1b1W0eNVd3/2flrP+dGHyUviCJJJb7t3l/NVdkZD861btZOf3xxVqS1SaFUoDm5SrpOsTWVwm58p/ErVtalp+la1Z/abOZUk+81c/eWZtmHerOl3H7l4XnxVRlylSj9pFCaF4pWhz92ihv8AXH+KijmNApsnanUMN3WpAVWx9/mpF+Zh3FRqu72pyBOfn7f3aCZbkzK6MPN5Vvu1EXOA3y4oZn27KYx+bPpQSDH5s+lNf7ppzL/t0BS1BoD/AHjSUUjHA4oAWt7wDIV1FzvxtXdurB74re+H7/8AE48sorGRNvzUES0iehra7bOLhtrP96qV9bpHNvG3C/w1oyXUMdr5OzezP8i/3ayb5nf7/wAzM9Bzx00PVf2EFiT9qHQvKxg217nH/XtJXu37ZMhh+IOlzl0GNGAjyuTuMsleA/sF3DyftU6ChdTm0vdwXt/o0te8/tooJPH+lh1ZlGjjAX1M0lfseGbj4D4u3/QUvypHBiJ/7RfyOF+HSwXPiK0e5+ZvN3bdu77tf0Of8EmNF8WP+wRF46+GTWf9t3sszWrasVSFpFXbGq/3a/ni0OH+w7q2uXfyp2dd0bPtVVr9ef8AgkZ+1Lovw5+DlnpX7Rmq3lh4It5ZLiy1KO48uO3ul+Zl2r975a/FKWIq0o+5ExjThKr755/8QP8Agop+0V8XNU174aeJ/A1jd6w3iCazv7e6vGVrdoW2su6vyP8A20virJ8YP2zvEusJCttbaWy6Xa28b7ljWNfm2t/vbq/XHxB40/YM0z44ax8e9N8bX1zDea9qWoy2MkWzdGysytX4h2mtW3i74j6/4th3GLU9burqBpPvKskzMv8A47XoSceSJ14eNRc0pHTxrlvsybXP95VqSNblbxETckX3tzfxVNaxvGq/PlW++1S6bp/2zUmkfbu/2nqfdLlGMT9LP+CEeqXLfFDxLo8MO3+0PAcyyqv3ZFWZfm+Wv0R/4V3c6lceYnmBJIvmj2fdavz+/wCDeNbCH9ojW7C/v42iX4fag3k7vu/vFavr39p79udPDX2zwB8FvLmvIf3V5qkafu7fcv8AC38TVvLERpxPGrx980f2hv2mvAH7MmkvonhLTbfWvFs0G2K137Y7P/ppNXwX448ZeJPHXiS48YeMNYk1XU7p5HuL6R9yx7v+Wcf91a1PFFxeXuoXGpalqsl5f3G55by4ZmaRm/2mrzjxJ4iNnILCwm82Xdu+98sa/wB7/ar53FVp1J3kClGXwnS/axD4WlvmXaI7WR8DtgE188+MviBqupXFxbWyKieay7o5f9Zur3eOaeT4a3MzMryHTrg5HQnD182yxpYyHzkV32bmj/h3fxV+veNMZSyPhtL/AKBI/wDpNMdpOSsVF0r7Urpf3O2JdzPJC275qp6tfW1vG3kusSLEy+X/ABNS63ryWy744eG3bI9//j1c3q11NJbx3+pIrK3yo2/btr8Lj7vum0Y8xl+KtecWro77Itm7az/M1eF/Evx0+r3j2NmyhF+Vttbnxg+I3nu+n6bdNu+622vMCWLfMcmvYwOFly88z1cLh+WN5BSMueRS0V6vKd4UjLnkUtFSA2L79OpFXbS1XxAFPt5HjkD/AO392mUm4qwokB3Xg3XHjm3u+dv3FWvTdP1B7yEb33btv/Av9mvDvD999nuNm/FeqeDNWeaNETafn/ielH3TmqROh1TRzqlk3+h5K/xf+y1k6bCn2O50yb5PMRo/ufw12tjJItizvt+b+Gub1zT3t77fZ/IG+Zt38S/7NXL+6KmeAataGw1Oe06eXKy177/wTr+H2s+K/jta63bR4ttLs5rq4btt27a8d8bacbnxncw2y8SOrbq/QX/glD8GrCL4S678S5kbfeaothYfL8skca7pPm/3q8/Mq31fCyZ6ODUZ143PRpPDMdrbx74WYb13sv8ADVy3sZrSGSZJt6/3VT+9XoWoeDXa+GYY1XZ/D93/AL6rOm8P2dqpt0tmMi/LEv3lr42o5z5bH2mHnCx5V4k+HngnxJH9m1Lwlpt5u+aVri3Vmrz3Xv2T/gVqrb4fAclqzbt7W9wyt/3zXvl/oLqqbLZUSN9u1v7v96ua1rTYYbh0O6Ir/DG27d/drSjjMQrxUth1MDhanvOJ8zeJv2E/hdqf7nRPEmoWMn8XmKrrXlnjL9hnx7pe+fwxPb6lEqfOqPtkZt3y7Vr7M1DTUt5GjdF/22X+Kov7Ff8AcokLI+zd5n97dXVRzTEwd5S0PNqZJhpS9z3T84vFnwk8eeDLw2fiHwreWx7eZbttrBk0yeFtkylTuxX6dT6PBHIXvLX7RuVfluolf/0KuW8Qfs//AAl8XGVNb+HVmC3zy3FunlSM27+8texRzilKPvHm1Mkr/YZ+dL27xln2fdr9F/2fQf8Ah3GgPX/hDNY/9Cuq838YfsGfDfVpJpvCWvXmnPu/1Nwu9F+X/vqvbfA/gC58C/sXXfgBrlJpLbwrqkQkj+628TsP/QhX7h4K4ujic3x/I/8AmGqf+lQPe4TwtfD4yv7Rf8u5fmj8vyMd/wAqGXdir13ot5azPbTQtvjfa2KgksblIw7wt/wKvyc+GUkQUU9oXU/PTSpWgsb/ABr9acu/HFGxvSjlTQT8Qd/nzQp2t8lJTQS2aCiTzvL+6cUskzSSM/8AepjDd1ooFyoVvmYmkopeWNBAsmVfikX5fn2Zpu75sU7+D8ar3TQX5Nu9P1pJm3/O4o/hLUYVvvmjmARVfvT9ybt1MBxyKEVzUi3Qr/eNCt/B0/2qMfwfxUHIBWq+EXMxY1zz61d0nWtY0kv/AGbctGZPvtVEsW60cqakSjzF/UtZ8Q3Ehh1DUpnPdTJWfuPIY8/3q0bXUklhFtefNt/1Tbfu1YXQnvJPtNnfwzIrfxPtb/vmiPILm5TH+8n3/lrY8OafG0h1K7Rtkf8Ad/vVfjsfD1nb+dqUMbO3/LOP+9UI1I3G2GzTyoV+7HVSJlItWcj3UzS/wt9/5614dn+p34+WsjTV2zfcU7f7tdFY26SR4RMt/tVUfeI+E47xlvWSJN/zL8tYVb/jtUjvhGm3b/s1gUG1P4TqfhfavJrC3Ozcq1+gn7Q9m17+wXdWi9W8LaZ+jW5r4K+G8f2azuLx32BU+Rq+/PjBcRN+wv8AaZMFG8KaWevqbev1zwufNlOd/wDYNL/0mZ9dwt/uuP8A+vT/ACkfm9d+H7yFm+T5f722qhW5s5G+dlr0aSOzulKeR/uMv8VZF54XS53ZTYrV+S8sT4mNSX2jC0zxLcxzLmb/AIE1dr4f1hJsbHXaqferi9Q8M3OnyF4UZlX7tLpOoXOnsu+baq/7dSVKMZHsdvdQyWYh343L/laqX9tYXUL2bvsX7q/3qxfC/iK1kgRLl1Zv71b9wvnfvkO7zH+Zv71ZmXwnNeI/2hvj9baTF8MYvjV4sXQYbhZYtI/t6byEkX7rKu7+GvozUv2v/wBrf9pb4S+Evgh+1L8Zda8ReD/DN59o8L6PcN8vnN8qyTMvzS7d3y7vu18vfEbR3juItbttqmNvnr9lv+CMvwF/ZC/bk/Yh8SfCXVdEtbbxjpOrW+o3HiSaJmlj02P5plX/AJ57fu/7VZVYxX90qpz+z90+Bv2qPhH8N/hH+yroPxOsPiFZt4q8QeI5rVPCKwN9pt7OFf3l1N/dVpNqr/er5T8AWV7f6u1xbDLj59tfv3/wU2/4JZfsjXH7IMfj7TfHepa94r8RWi2HwqtrOz2NIy7Wbf8A9M1XdX5z/sT/APBJP4tftDeKNUs/APhaS5n0fzP7bkvt0UFuse5mZmX+FvLatqco0qW9zjhWlL3ZqzPn2z0/UtLUQXkLIZIFbaybW21TtdD+0NI/k7R95K7/AOOXxWtvi58VG17SvAum+GNP0/S4dI03w/pbsyRx2+6Npmkb5mkkZWZm/wBqsfR9P/0V5JkUFn+796rjrHmZ6OFlzfaONk0HypPnf59/92o20ncrOm7d/u13lxoaIp875Tt+Vtv+srHk0J45G+Tb5j7nXdVxjyyNKn8pjW0bwrvmf+Kte1mfzN/+396mR2KQj54W+ZPlX71TW9juk+fdtj+5urblhI8mo+SoaMOobV2O+1f9n+GvoT9i68NzZ+Io2X5o5rYMfwkr5yhj2szp1b7m7+KvoT9hwyHTvEhkj2/vrX+Utfo3hNC3HmFflU/9NzPruCK3PxFRX+L/ANIkeX/GvWZLf4peIAjNui1WfP8Au7zXLf8ACWPgDev3KvfHNj/wuDxKqO2H1mdX/wBn5zXGMrxtsRMqv/LRa+Gz+nH+28U/+nk//SmePjakv7QrR/vS/NnRx+KppFCPCrKvzfNVy18RbrlZt7f3nrkFun8zZCG+b5mkWpIb65X99vZVVtrL5tfOVMPGXMXTxEonb2/iZI5N6TMFbcu7dU0Pi5BHs85TtfburjYdWRWT54/9U33qgTVJFjXhf73y1wVMHSl9k76ONnH3Tum8YTNIro7MnlbX+ek/4Sgbhsm+Vv8AvmuHhvnaUoX3M1WIdScrseZlCv8ALuSsY4Hk91HRHGc252MniR5FSbepC/f/AL1QXeuPNb/J8jN83365htYRoh5Zb5n/AOBVDcX03khN+fn+9V08HGOhz1cRKW5qahrEzfOdr/8ATRv4azrjVHuJNnyqf7y1Ukn27nRlLL/tVVWZ5Nv3t396vRp0fsnDKtOJoW949w33N38KyVv6PvjjWFNrrs2v81YGlwu8ez+NvuLXQ6LDcyPskhwsf975fmrSVP3TONSR0GnrNHsT5l+T51atu3t5lZEhdkOz59v8VYmmvcwxvDcpGdz/AHt/zVt6WzwyJNtY/P8AxfdkWuSVP3veNY1pHpl59pi2TPDsWTds+b5asRatNZzb0vGEWxWZf7zVl3urL5LI7qQy7kZU+7WXHq03nB4X2lf71aywvMdMcVyneabrX7tLlIWU7922Rvu/7VdFa6pHcxpN52/zHZXVfvLXmtjfQ7Yt8yt91tv8P+7XT+H/ABAkbfO6yRN8yKv3VasZYOPNsdMcZLuegx3UKrG7vIjt821f7tQ3jJJ87ybGV/k3P/DXOWurQ3O+Ge8kd/8Ad+6392lvtedo1SF2jf8A2ko+reRtLFR5CzePNqcnk+dt8tdibn+Vqz7y38uTGxoj95P7rLTmkdfke53Hb87SJ81VtSuP3aQw3KttlVWXb81dEcP7pySxRl6h53mHydv+q2/vE+7/AHax9UheFUme5Z/MTbtX+Fq3bmPzI5p/tPzq3yrWXNbpNcNN++RGVdnmfd/4DW9PD+8ccq3Nuc+ummZYv3KruZtjb/lamx6OskpdLPcf4/n/APHq6q30NJm2WtnxHuVvl+7/ALVX7HQ0hUbE3mT5Xk2bflrtjRgc/tmcjb6Pcr+5hdgVb5Nq7a39Jt3sU87e29Zf9X/z0XbWzb+H42f9yFKRy7dq/wALNWja+G3VNkkPm7X+fcn3v92j6sP6xE1tEhCeDBDjy/8AR5OB/DktXKeSkO9Emk+/95vm213QgZdLa3XJPlMB8vU89qw7rS/LjmfYqPD/AHn21+s+J9FyyjI12w0f/SYH2nE82sHgbf8APpflE5iSFGk3p/wL/ab/AGqpXCw7rgwpz/B5n95f7tbuq2qANND5bI332X73+1WDqUbyQumyNx8qxbXr8g+q9eU+O9tEydQv3baiTqHbd5/l/wALVkXF1iJcw7W/3q0NUmRoTDCi7V+aVl+X/drCurxI/Nd33vtVVVvurVxwfu+6ZyxRZtbhIZWjeCMrJ825k+7/ALNTLI9nDJePCr+TFu2/8CrPs9Qti62z2zD5FVlb7tZ3xO1qHS/Al68LtmOJtskf3l3fLWcsLOMveia+25jwD4tfFRNY8aaleJNytx97d/6DXOyePH1SHZ526ua8RL/rZvl3s/zN97dWPZ6pNDIfkqIx5TG/N7x0V9Ik1wz/AN77/wDtVmyfM4R/4f4akt7gyw/vPl/2v71RXi7tkzvhqCOWERk2z7jjlv4azry2dlb+9vq08iNMu/buX7lNupt3zpD/AB0ehcfdMqQJDnKfLup1vcOs6zBcU6ZnVnKbQWeqzb1X7/NOUh8p01vHDqlnwnK/wrWFdW6WuYXRlP8ABV/wjqyWt0IZn4Z60fGWgzW+L9Ifkk+b5aRPwyscuY2jkHzZ/wB6tnSbX7VAUf8Au1ntC9wq702lan0uR7e4/efdoCQzULWaGMpv+X+Cs+OTy2ztrodZVLi23oi7dn8Nc9JvWSguPvETM0jk5pVXbTU6/hT6qJoFFFFH2gClVj9zfik/4Bmj/geakBd3y7aSikY4HFXL+6Am3b82adRStyu+lEmQlFFFHKUFbfgWR49aV0P+zWJWr4P+XWEcpkrUkVPhPQ7xnaNfnX5f4l+8tZdxdJGp/c4Zk3VoXF09yqwv92sHVJsK3yYXb8qs9X7kTmPYf+CfCNd/tU6TM6bRFZ3jr75t3H9a+jv2sLeF/iRptzJGCyaKNrHt+9krwL/gm7pQk+OtvrzqAWt7hF+bn/UvX0L+1dOIvHGlqJGJOmgmLPGPMf5q/YMNb/iBGMt/0FL8qRwV05Yj3TyLxJC8MUd4j/3W+9X15+xPrz/FD4I3/wAKL/VY47j+0pEsFklZlVmhZV+WvjvxJvvf3KIse1NvzP8AK1dv+zX8Vrz4d6pczQ3LM0d5bzrHv2t8rfw7a/FsLU/e8pnLllSNv4sWHif4I/Cb4haN44SO31fT9BuINsifLM0jeWskf/AWr4x+FUDx2jFPn+X5fk+7X6lf8Fndc8B+Pf8AgnHo3xpsIoU1vUtcs9L+0Rr800bbpJFb/d21+ZXgSzSPTo977N33l/iruqRpKXuHXhuaOHXPudXu3WqfPtZv/Hqm0d3XUndtquz/ACf3fu0kCo1vs8nC/dWP+Kn2sKW0YmdPMPm/KqvU8oe9sfYH/BK/Xtb0n4vanqWg6xJb3Fx4PuoJWjZlZo2Zd3/oK19D+OL618LWLPcvJmTc7t93ctfJH7APxCm+H/jjVtYfSftzzeHLiCC183aqszL8zV6pr2tar4y1I6x4nuZEaTa3lq/yR/7K15mOxHs5csdzzsRHm1iS+LvGl54g86bSt0MX8Ks/3l/2awk0ua4maNIcK2399/e/2aluLqztbf7Tqu1FX5ov723+7VW61x2sRc3k32O1+bZDt/eyf3dteZ78Y80jKNM62OBYvh1dW4IAWxuBkNnH3+9fLWueJvPZ7PTX3FX2vJsr6U07UIdT+Cl1fWsTQq+k3YVSeVwJB+fFfLFx9jtYXMyNmP5ot1ftHjRJ/wBh8N2/6BIf+k0yo6Fa8W2sYftNzNu2y7v3n8VeT/F34i+RbTQR3Pzbm2r0+9XRfEzxtbW9tNsuWRF+b5v4q+ffFXiK58R6q9/NwD9xfSvxjL8L7T3pHrYPD/bZUurqa8uHuLl8u33mqOiivoPhPSCiiiqjsAUUUUSkBc0+GFoi7puqO4sHj5TmptKu4rdZFuXONvyIKn09od3+kuoH93dWMvdkZe9GRlEbOGGKA27mtPVI9NkuGENyr/7VUprN41zvX/gNVGXNuacyGwSFJg6jNd/4C1aG4u0R3+feu35K88VvLf5K2vC+ofZbwfPj+41XykSifQWm3yfY98zrtb79YXibVPtEMiPeKnl/dXZuZqoaX4ge40N33r8vy1zt9qzzTtvdh8u16XNzHPHmiZGqNCNQa837ZVT5GVPu1+0v7KvwR/4VD+yf4F8AfYNlw2jR6jfyRr964uP3jM3/AH0tfld+xv8As6ar+1Z+094P+BuiOuNW1aN9SZvvR2cP7yVv++Vr93PE3hu2jmfR9NRktreJYLJd+7y4412r/wCOrXkZpK8LM9HAy5avNKJ4dqHhPzlbfu+X+FvvVjXGkuuIURQ7N8jK3zV6zrmivbzbERU/h2q3zM1cjq2g7V4hz/F935q+ZrSlGWh9RRrR+KJ5jr2n+ZI8c22It8u2T+KuK8QafCzK6eWHX7vl/wB3+7XpviTS7xZNn2ZRD935vvNXF65a2yrLZwzQr/DtX71cUf3nvLQ9iniPaQOEuLOb7Zvj2lZNzbflpq2P+tDvIrLKo8uRvvVoapa7djw7d6uyxTNF80f/AMVUEzIsaJMjGRdv7xa09pCUrlyjzRuV2s/uwvMrsr/uvlpF86HzIZodq/xs3y/N/s1ft5I2khTydpX5d33adJavPuNzeK4j3bVZ/l3VcanLuTKP8pg31rPJHhLZt2xt7ferY1hFtfgRq4hHCaBfED32SGiHT5rq3GxG2/eZWXarVo+JrSGT4V6tZiIRpJo12pUdBmN8/wA6/oDwE5f7cx7j/wBAtT/0qmd+Uc/1ivzfyP8AQ/Mbw34Xv9a1p/tkLHdL/rF/ir1vSfhH4YXSnm1iwjkTytyNt+7XXfDv4V6bp2lrrFzHH5S/N8y7dv8A8VXK/F74hW2lrJbabc/d+Vdvy/LXwq5IrmPxv35zPLvil4X8Daey/wBlWDRP/Ftf5a8/lsUWT5Pu/wB5q1/EGtSalcO/nMV3/wAX3qp29q903mCo5uY6I+77pUh0ma6/1Kfd/vUv/CM6kq7xDuFdLoul7m+40qfxNWtdR2dpb796jb8q/wC1VcqJjLseezaTc26/vrZh/tVXa3f7ipXX6xqkNwGTYrLWRb29t53qzfwrUy/ulRqGMUfhKVY2YZH92t9NIs2/1iferRtdC01o13w7lp8oe0Zx6283UJTvss+MeV/wKvQ9N8M6JcYT7M3/AAGui0TwfoMMqv8A2bDt2bd01HKTKsePx6ReTfchZto3fKtOGh6kzbPscmf4dy172sem6XZva2Gm2+1vmdmiXdWKuh3PiLUEcQ72V/4Up8sQ9pI8butJv7OPzrm1ZV/vNUUaxPJhn42/3a9H+MmgpoukxLs+fdtZq8+02DdMsv8ACv36g15vcLdj4ZmvId+9VH+1TL3Q3sF/1i7WFdFZ/u7Mv/33urC1y53Myb8/w7d1BnGU5GUW2sSetM3O336c4yd3rTaDaIoYrR5jr/q3pKKuMSiSNsHe/wD+1Usb7W3pxUC71+THzVL87Y+7/utUGZMD50gff1+/VmE/88+WV6qRqi/f3VatvNZgifKy/wAW+jnIlH3jc0k+Wy70+999a6Cx2La750z/AH1WsDS5EmZP3e7+Gt6SRILAyOmAqfJt+VmoEcL4yuPO1Zk/hWsqJPMkEP8Aef71SalcPd3zzP8A36seH7f7Vq0cPbfWsTX4Ynaw2b6L4RZF+9JFur7c+PErw/8ABOYyxk7h4L0jB/G2r401fUbbTbeGzdG27fusvy19q/HuzOof8E/ZbSDjf4Q0vb+dua/W/C582U55/wBg0v8A0mZ9bwmmsJj7/wDPp/kz89tH8WTW6t9pfctdTpOsQ3UKO/zL/tVwl/ot/p8jJMjUyx1K8sZB87Y/u1+Pnx3LGXvRPS2sbK8Uokasuz7tYmreEUjj3pD/ALX3ab4b8Xoy+TM6ru+V2rrPtFtcQ+dDt+593furSMiLTOK06P7KyTRpg13fhnU4bixa2mT738X92sfWNH3Ik0MOC3+zTNHknspt5mYL/d20EzNfxdpfnafKjorBk2oypX0d/wAEN/jI/wANv2vPD/hjXvEOpW2i61fx6dq1rZ3TItxD97ay/wAS7v4a8EvJE1DTVTzsPs+638VZPwD8aXnwk+OGleKoXxLY6jDdRbv7yybmqJ04VKckKXMf0QR/tP8A7HnxW/bE0fwfqvhLUNHtPBeuNomh2eqaivkQ7d0lzcNH/CzNtVa8u/ZO8FeLL/4qeLPA+hftAar4N8NeOfEN5YX8mjxKslxp7TNtVWb7rMrferxj4nfDvwNJ8dvD37QmleNtJ1uHx9YTa59jsZ1Z9L8uNdzSL/DubctdD/wTh+JXh74+ftHRaJrviT7NYSSzNpax/K00y7tu5v4V3VxYiE48vIzzuWrWxPNLSx8bf8FZPgN8OP2cv+ChXiz4QfBjw5qFn4Y0XT9PTSnvl3fa28v99cK38Ss1eS6DYpJpYe5RkdXZWX+Kv1Y/bB+FPw9/bq8B+J/iXr2jx3njj4b6yunf2Lotwvm6lp8LfvJPMX+Lbu2/7tfmNoWr+G/F2paxqXhTw9Np1guqXCWFjcXXmyxwq21VaT+9XZGp7Slc78vlKVfkZl/YYZo2CQ8R/N+8rP1TS0ZmSBP7rbVrrG01GZN/7oqitu+9UV1YpcKr/ZmJX+Ja1jL7J21v5jg5NNmjbfs2/P8AJHt+7U9rZhd3nPs/irdvtLmjnPyb9yfMy/dqFdNhhkbzoWKqv3lrqjseLUqe9eJgLv2h7ny9y/L833dtfQf7F0AgsPEKZyfOts/lJXiq2KyZQ87k+9Xt37G9s9vZeICxB3SWpyvTO2TNfo3hR/yXmF9Kn/puZ9RwGn/rLQb/AL3/AKRI8K+OtqW+LPiTK5Da1Oc+nzmuJaPy12b2/wDZa9F+N1u0vxT8RyBE2rq8+5j/AL5rh7q3kVNibflT7zfxV8Xnn/I7xX/Xyf8A6UzxsdUlHMa3+OX5sypGeHdvRt38FMaQMp2bdy/eq59nuYY13ybfkqvH/rDv2/N97an3q8XlkRGUSLzkXr87LQziMN5O13k/h/u1JDbuyum/bt+41OmhRsOm7ev8TfxVyyjyyOynLmhzCQtNHGvo393+KpVuMQ+fs3M38NFrGkakPw33qGhRZEhO4q3zbv7tZSidEZe6PkuNq/uU+VU/h+akMjyN5+9fm+VPmpI4CqnyduPu/LU1tp+795sUfxbVWp5UORDHHt+d92V+6uyrml6XLfAbE+7/ABVYtbOa4m2eR975dypXXaDoMMcA/iXZ/c/iropnJXM/SfDLy5eH+FdvzfxNW5a+Gd02xJmlMfzL/vV0nh/wpDcSfaPI4+8i7P4q6Gx8IpNGNiqrt825Vq5ROL2n8pxNtotzCuzyWZv4NqfdrVTT5rfHnTbGVd3+ztrt28J39q0bpbMzNF83l/dqhfeEzGrzOkY8v7qyKzfM38NZ1I83wm0ef7Q/Vmv2nZ/JXDL8/wDdrAmv5vtR+dcbP7ldHfRzNDK8xZdz7drfw1y+pQw27fO+/d/dr6GOFhyeZxRxUuYlh1Y27Rpsk+b5vM+981bun695duNjt8r7trf3a5FZ0hU/eR/lVGarSXUxQIn3l+bctU8vjL7JrHHSjqd3/wAJQ8jb0mj3qisir/7N/tVaj8SPN5rpc79qf6v/ANmrgIb+ZFZ34M33G+9U32zYzb3xIrrsVf7tFLK+aLsVLMLwudxD4u/1aB2Zf4/Lf5m/2aY+oPIrOZoV3P8AdkbbJ/vf7Vctb6gLrbczp5bt8rq3/staulrHFL/pO0q3ypGv3lWtP7N5TL69zGwtwt7NshRQ/wB1Wb+Kpre3uVki865Vw3yvCqfw1Xt7OFfK2O2FfcjN/eratbNftQtpnbH3mbZVrL+U5/rnvF3R7GaSaP51bb8yLv2tu/2q6a10kqwhd42f73yvuVd1Q6Do/wC5aZ0jZF+42z5ttdhpGnpax+TMjNFJtbdJ96pjg77DljOWPvGHH4bTy2eSH545fkbyvvf7LVch8PzCP98jf6rduj+6rf3a6L7HDt8mT5n81fKZv4Vq4ui/M7wou/5vmaqjQ5Yk/WOY42dJHvjHMcMzgHd2qvrGk7W3w/N5i/e+8u6tHVIjB4geLptmHJOfTmpdSsYY9nkuz7tzMv8AFX6n4h0efKcnfbDx/wDSYH3XGOI9ngstfeivyicDrFnthz9yJkZX/irmbyHzIf3G4bk2MzJ/49Xe+IIf3a7EVdu5tq1xepxvCGkd2Mrff/dfdr8zp4fsfC1MRynH61b7pD+52IvyszP97+7XPX0LR7Xd9y/KrqtdZrVrumaHyd8ez59v3d1c7qEbsySG23fPt3bv/Za6Y4HljZRMI4iXxGX5EzKz72Rmb71c18Wle78NzQw7R5n3t33ttdXtuftHz7T8nzR/3a5P4sSbdPTZuiPzfNs/8drkzHC+zwrkduFrx5uU+bdc0+2maVC/3Xrm7yzSPc6bcLWlr2pPJqUqu+G3/dWol/fJsyuG/wDHa+U+LRHpR+H3inDK6qr7FVVT73+1VtmS8gYJJudv4qimtXDfvCxVvl21CsgjkRNjfL9xd1OISiQXCvHNs2KV/vVG0ybdjp8q/dapLp0kLbE/36oXk7x4kR/mqvcCN/hLMkL3C5jT7v3WqpcQtGuzHzfxtSR3jxYdDy33vmq1HIky7+rf3TWZcpcupnQsYZN/da9E8G61pvirRZtB1P8A13lbYm/u1wd1YzIvnJytO0fVbnQ75byHcpoHpI0tU0m60XUJLO5Rl2v95v4qrTQ+SwdOVZK66+jtvHGjjVYfluY12s33a5Vle1keGaRsr/C1HL9omIlrcZj2z7v92s/UI/LkLp93/aq1eSJuZ0T/AIFWfM+6TO/5auMiojA27mikVdtLTNQopD8vz4paACm7dvzZp6feFJU+6AUU3P3aU/N8maIgLRRRR8QBSMueRS0UcoBWr4PV21hNj4rKrT8J7/7VXYmSv8NSKXwnaXDItqPm/wBZu+b+7WBrEm3ds6/3fvVt3kjrA0f3VWsDyZtS1aCzhhYmSVV+WiX8xgfTf/BPOzew+I+hRykF547uVyOx8h69p/a0Rm8daYf4f7LAf2HmvXD/ALHei2lh8VNLa3UKkdnKEQptKt5Dbq739qtW/wCE+0uQu20aV90DOT5j1+xYGUf+IEYxr/oKX5UjyZ1IuvzHjmvW8ysiF1likT5F+61V/BNi9x4otJtNds+btbb/AOgtVjUo7/VNSWzs0x8+yLb8zV9Cfsm/sm+IfGmrLPbeFbq6upmV7O3t4tvzf89JP9mvxXC0Z1JmcsRClE8k/wCCkPxB8T2/7MPw8+C+qQ3CQXXiKbUovMf5f3cfl/8As1fPvhfENnDshVVjT71fUv8AwXD8Dn4Z/E/4X/DfUb9brU10O6v9S8l9yQtJIqqq/wDfNfMuixosaJD86Lt2K33q7uX3z0Kc5OlC5u2ph8vfNMzD726lWZGhEJT5v9yrvhnwzf8AiTVLXQdKhmuLm6uFjit44t25m/hWtD4lfDHxn8J/EX9j+LdK+zTb22x7lk3f8CX+KnGXKV9s9K/ZT/c6pfzQtGv+i7d0i/L/AL1e1LeXOrXDw6UnzRvteaZPkXdXjv7JOm22qXmow38MjpHbrLKv/Avlr1zxJ4qs7O4/srTYI43X7qx/eX/erycdKlGrzHl1pctWxFq1xYaAN7zfabz5l/vIv+6tcT4k1C8EM2sPeK00MX/AF/3a2vst1cyb7y8VFb5vO+9trkviJqT2+mw6Vsxudmlrx61aX2zHm5j1HwTI0n7OrSzRk7tGvSUz1GZeK+QPGnipY43s7ObcjfMzN/DX1x4PkD/syzOjdNBvgCPbzRX54fGPxzFYPL4e0yZmuZPluJP4VX+7X7v4uUJYjJOG1/1Bw/8ASaZ1YajKtPQ5n4meNn12+/s61uWe2h+Uf7VckuMcUrAt1NIq7a/LKNONOHLE92MfZx5QVccmlpGOBxQrbq1+EsWiiinHYAooopgOjj8yVUP8VDRPtZ9mQv8AFTaWOR49yJ0aswEqSGbayh32rUdFXyoCSaRGbeny062nMUoZRwKhqS3jfd/s1ApRO+8G6w81q9tM7bWT+H+Kq2qXjx5m2fd/u/erG8O6klrJvmfb/do1jXHmm+5xvpy90x5T6p/4IveOx4O/4KZ/DCXztg1a7utLn/3ZoWVf/Hq/cjXtBSx8+wSH5o5WRlm+996v53/+CfHiF/D37c/wk1p5mZrfx9p/zL/tTKv/ALNX9InjSzRdW1ATQsrfapG+Zv8AaryMdR5pG9OpyxPKfEuj2d0r+TDJCy/K275mWuM1aGGOeZHTcsfy7fK+98v3q9J8Sectu0bzMrN/F/d/2VriNUs90LTWz5Xyvn8z71eJWo8vuuJ6dHFcu55T4is5o5Hh8nasn/LST7tcLrlgm6XfDsdflVf4f96vUfFFsk0jpH5ibvvs38VcJrVrDDHNM7/OrfeauPlpS91Ht4XEcx55rFrmN8vu+8v91qobphGjzWfKqvy/xNW9qlrNZ3H7na+35fmbcrVlSWsLTJN5zLt3fK396sp0+V2jHQ9eNT3SrDG87SwzJHs2723L83/AWq3Z2/mKj9WX5vu1Fb6f9okea8RRIz/JtrWhV1s/3MOdvyvH/dq+WMpxM5S5feD+z3jkRJvu7d+6NvmVqZr8DL4K1GDdknTpwCVx1Rq19Nhdlj+Rc/3tny0zVLe1eGe2UEwtGVwR1Uiv3/wGp8mdZg/+oWp/6VA9LJKntK9Vf3H+aPiDWvH02n6HPps15t2ysGjX7qt/s14D441681a+d5psru/v16/+0xbw+H/GV3ptnbLDFM7Mir/vfNXlFv4dutQk3vbKV3/xV+e017SlE/JqkfZ1ZHJw2M11J5wh+X+9XQ6XoW2Nbl027W3V0Fv4ZsdN3b3Xev8ADWZrmtWdirJDNg7PmrYz+IS61CHTV2QhQfvba5zWNceRzBJNz/e31R1LXHumd0Rs7flaqLfN87/e/wBqlzcxUYltrhPMCI+fl+WrNv8AOo2J8/8AG1U4Y3kkCJ0rW03S5ZtrojZ/jqOWYvhC1WZhv2fL935q0tNXzm+dGVd3y1Nb6P5G15nY7vvbv4mrQ0+z2y/O6rtrQjm5dS7o9v5arvm/2katRtW8lVd/vb/vbP4qzluraGPZCm5qb/aEClt/y/8ATPd97/apSCXMdBp+nzaq2z77t8v/AAKu60HwnZ6DYia5dd/91vvf8Crz/QfElnp6rNM/zL821XrTuviJc30DW9u+8NubbJ/dqZe9rEOWUuU4X9o7Ura4voLa3m37f4l+61cNoVt8u/yd27+GtP4mX1zqGvL9p+UKn3ah0eNLdWHyk7Pu76Rr8MNSbWr57WFYYXbDJ861zV5MZmxs4/vVf1i+eaT7+Qvy1lSOm7ATbT+H3R04yEc7Dg0jDI4of7pp25NuacTRiUUUA7e3P8NSUOX5m+c4qXzFPzpubb/epiqjLn5s09f/AB2gzJI1jaNX+bP8dW7Pf5nnGNiN/wB2qm5Fj378n7vy1e0xn4Kbg3+1VcpMjf01Xl27/u/e2qtWPFGqJDpJTGG2/I2/5qk0W3O5Xf5VrG+I12rSR20fy/7NOK5SPtnJv9010fw9037VqyzOm7b93/Zrna7X4d2otbGa/k/u7aZtU+Eh8Yapu1BofO3iNdv3/u19/fFuYQfsDRTEDA8IaT1/7d6/ObVm8y/km+bDSs1foh8bWCf8E9N3UDwbpP8A7bV+t+F//Ipzz/sGl/6TM+t4VVsDj/8Ar0/ykfFF1bWGpWrNIin/AHqw9Y8BecrPbJg/wruqbTtVeT/lhs8v+9/FXT6TfR3S4MKlv4F/u1+Rcx8T/eieVtbX+l3DI4ZWWuh8NeLHtYwk3zfNt3NXT+JvC9hqlq9/bKu7+7/FXE3mi3mmt53ksqr92nL+aJftOY9F0vWrbVF8l3yPvbV/vVJqGj741ubbaP4dtcBoesXNjMvzso313Hh/xBDqGIZn+6+7d/epxl7pEy1o9vcs374fKvy7a5fx3b/ZdUhmSFsLPXZTW/kyfabZ2+Z/k21g+NtPmvlivLlG+X5mVaqMf5RS5j7/AP2JZpvin8K/CNnc2cNmNJ066066ks2/eTfeZVkrC/4J8eILPS/2gLGbXrm4g01tUmgezs9ytJuk8vb8vzLtrs/+CWqeAPF37F/jnRPDsN1N4o0nxbp9+8kny+Tp+1vOkX/0GvMvh7eal4P/AGltetvD141s1rq7T2TK3zeSzbttEY81GXIcsoylXPufxT+0T4i/4I+/HHxJ4mb9muC5sPiF4QuZ/Cr6zOIvs8kbMquy/wAWGb/gW6vzh+E8mpapoN9rGq+T9tvr+S6uo4V2xeZJI0jKv93burv/APgrh4y+PvxA+Pvhjxx8aviVqeurceGo7Tw+s4WOK1s1VW8tY1/2v4q4z4I2rzeF5Em2sv2hfl/2lX71c8aMqcbnVhKcadU3mtUaQPM+3cnzxr93/dqCbSkWQ2sXzq33tqVu/Z/Lb50+X733futVaRXt5Hm37tu7bt+63+zWlOPNM6K0uWBy81m7SGzeFfli+8vyrVRdPRV81PufxMtbt8uxdhm/1i/Myr/47UMmmpDMzpyNu3/Zrt+zZng1Jc1W8TBW1hb5ETb/ABOzfw17F+ypbJbwa6FdTmW3zt+kleX3EfyvM+7ezKqt/s16r+y3EUtNccOCr3ELKQMdnr9G8J/+S7wvpU/9NzPq+BJc/FFB/wCP/wBIkeNfGW1B+JOvYTKnVZmdf73zmuKvLFJZk2QqW+Xau2vUfidoUkvxD1qZZdobUpnw3f5jXPXHhmFmV5rZk/utXxmeR5s7xV/+fk//AEpng5jWtmVb/HL82efSWfmQvvjbc3+q/h21VuNN2sg2Ln+Na7u+8LTW43ptdPmVFase60OFgmxPvP8APuryvc+Ewpy974jlmt5lykKN8zfd/u09dPfcnyfL/F838X+7W1No77vJ87au/wCXy/uqtRLprtMYX27925f9quWUT0sPU93lM2PT32b3Xb833Wf5qmWyeNdi7S33ttaUMKRtsSFSv96nQQvGyo6Ns/vMtYyiehT3MtbF5NzQow+T7v8AtVftbF5lRIQq/wAPzfxVLAschZIYW+ZvvMlbml6Wkql7bk/d+aol/KORNoWk+Xb73+7/AHdld34Z8LmT959mjYf+PVR8M6Huj8mdGHmJt3bdzbf9mvV/BvhdDl/J8uLYqPHGnzNWlPkictT3iDQfBrraxQmFmEi7t237tdboPgd2V/MtmH8PmeV96um0PwrDZwtNclvmZdkbfw11lnpNtaqHeHEUjbNuzdtrOVb7Jj7CBwX/AAr/AMxWtjbSb5Pmikj+b5qyNc8D2y5SFN38X+7XslrpNnMrfPvVX2pt/vVQ1DwXYLC8KWzY3fN/dojLm+IUqZ836hZ/Z4wZrZtuz7q1y+uWLrG6PDgr92u81zTXmkaBIcqrs21f7tcdq1uitshTllbbuf8Ahr9F+r+6fKU63LI5Zi7XCpDMr+Z8u1v4dtPWGaFVgdGETfM+1/mb5qdcK8kjwlGYL99tn96pLeGFdltbWzbY12/vK6KeHtC8Tb23tPdHSWaKpdHZmaX7u77tOW1IzN827Z8vl/NSWilpPubhu2/K/wB2nstst2Y4XkKr8u77u2uijh7fZOeVb3CzpNnC0nnGZn8x922uh0mRFmf51+6pX5PmVa5qzW5kYfPuH8W5K6LTWmuE3oivtf5mX5WqpYOUfjJ+sc3unQ6XHtuP3zrtZflXZ/DXQ6fGjXEU32lmO3y33NWFpbIjI6P5vyfvVk+Wuo8OxldkM9tGn/AqwqUYRNIy9w6/wfZvFD9mk2na7Lu/vL/vV2GmWME0KI6SbI/9Uslcr4ZaGH+CTbJF80a/ers9Lk2wo824qu1fmf5v+BVw1I8srx+EuMoyjYuNYpNuCQxuy/xN/eqRbV5IxD0ff87LT0kT7RNG7xsn8P8ADtanIyXFul5M8jHZs3b9vy0uXsRKXKcFr8Mdt42eDAdUuYweeG4WtXWrBGaeb5S+z5VX71ZevqqeOmWNcgXMQAYdeFrb1KTzI2kgTJh+4qxfMrV+nceJf2blKf8Az4j/AOkxP0Hjdx/s/K3/ANOY/lE4TXrW2ms3RI2WSTciq3y//s1x+sXX2NUfZ80e1XjVd3zV3muWsbSOjvh/mZVZfm3VyGpWn2Jkd3Vk2/dVv++q/PqdM/Pub7RxusPDeySzO+X835l+6zVyOpRwNIz7JIy27fu/hrttUj8n5EkmL/3W/u1yuoWb3i+d9mVVZv8AWK//AI7XoYWPxKRnze+Y7w2SyG5fd/Cv+9/tVxfxesoWs4kSFg7O37xn/wBn5a9Alh3TH5GRWTbtauQ+L2nvb+G01B0V0tbqOX/gO75m3Vx51h1Uy+fL9k6cHW5cTFSPkHxBa/Y9Uld3+ZZW30+zZA29EY7vuVvfGbw9Np/iKa8hhbyZH3xf7tc3pbbmbY7f7tfmkf7x9R8RamkO3+6v8Tf3az7r+JML/vK9WbyZEU79y/7VUJpPOY/7X/j1P4ZBKMSORvNC7EVV/wBmqNwok27+Garc33VQPgf7NVmciTZsZqfL7pUSpL/rClCzTR/x4NXG035WkfpVV7dlByPu+tLmNOaMi7Y6s8kohm+ZP9yrl5psN3++T+5/crDU+WM5rqvAz2epI2n3O3zNvyM1EiJR/lM7w/rVzoOobN+6Nn+f/arY1aOz1KP7fpu3Lf8ALOsjxFov2W6KJtG3+L+9Wbbahc6fwjsv+0tTy8xXvDtQV1B38f7NU1+X71Wb65+1S+bvzVb7/tiq/ulREVtpzTw27mm+X70qrtoiULRRRTlsAUUUituo9wBaGXdiiimArM7cvSUUitvbFZgCrtpaKKACtjwWv/E0WaP7y1jK2eDW14Ng3XDzdlq47Cl8Jv6szrCzydG/u1r/AAD8IzeLviBbbE/dWrNOzM33dtc94gn/AHexP4q+0/8AgkP+xn48/aEk1nUvDGgzXO5lt4pPI/1ar8zM1T7OVT3EefiqnsaHMaf7OOnT2vxYsW8ghBDPliMf8smr1jxx8FfEXxs+K2keFvCeiXuqajc2fl2en2EO93k3sVJ9Bz1r374g/wDBO7Uf2e/DJ+J0sbyTWEEcepCdQptWkcRqBjqzFuR2ANbH7LviDU7bxFp+jeGNXbS9TXWVnXUbeH97ghFRN/8AdyHO33r9ryrDxpeB2KjP/oKT/CkeB7R1KXMjE+BP/BInQfhHq1tqX7Q2txprk0S3D+H7FvNntZGb5Vkb7u6vq3wn8PdK+Gvh2Wz8K6DJoVqsX/H1ffNcyL/vLXf+JJrDT/F194kjhuPEmrSOqXV95W35lX+Fvu1558Trrxnr0n2a9maC2k3K9naxNLLu/wBpvu1+URiqceWETJUf3vMz8bf+C3Grf2l+31p2g/aZJY9L8HWbRMzbvmkZmavCNPtXVvPf/lp/er07/gpxZu3/AAUZ8T6VePIr2OmWkT+c25t3l7v/AGavOLOHzpAj3Kr5bVwy+I+ljH91FnuH7JenfYdZ1Pxgk0aT2tk0Fv5z7WXzF/eSL/tKtQftHx22reE7DVU1KM/ZZ18pVl3yeXu27m/vbq5v4P8AxY0T4a6tNc+PLBr3SZE23Edvu3r8vysv97/dq/8AtG/Hrwx8VrrTLDwNo8lrpdnYQpPI1ksX2iRfu7V+8qrUS96pYyjz/Ea37Ndxr32e7s9NSR/MTa0kf+9/6DXrjaVZ6T5t/MitMybt0n8VeVfst3D2y6jPbOq/ul+bzf738Nek3Fwk0j793krF+93fw189mcX9ZvE82tz8w2+ZLiN7+aGN7fY3y/3v96vGPih4y87WpLCzTzdqY8yN/lWuk+JXxNext5tE0GaSG5mX/XKm5fm+WvIPGGpJ4P0ebUtbud02z7sn8TV5nL7afKRTjzep9T+D559N/YzvLuJ8yQ+FtUkRgc8gTsDX5c3d5cahcveXMrPJI2WZq/Sj4PavJr3/AAT4n1iXOZ/CGstz6ZugP0Ffmh5ntX9LeKMFDIuHu6wkF/5LTPZwEeVzXZjg27mikVdtLX46eiFFFFTzAFFFFUAUMdvWikZd1AC0UUUAFFFFTaYC/dapYztRnZ//ALKlmhRbVJg/zf3aYvzff+796jlMx6yTffBw1Cs8jHe+TTJGy3yU9Ng46VIHd/syatPof7RHgLV7WXy3t/G+lyLI3/X1HX9QvxChS41698mzVUW4Zom3/wCsr+Vv4fag2leN9G1VGw9nrNrKrbf7sytX9TnjDUEuLq3v9i7brTrWf/eZreNq48VT5uWREpcvvHBeIJt0bJIm4Rt821/u1xfihYbfda745vl+X/ZrstcuJlWXZCsvmM3yx/LtrifEXk/8toVT+4y/w15tajzaG9OpKUjgdcjma+3j5Xji/wCAtXC63bv5jvvVNy/vVZ/mVq7/AMTTIzP5KLsb5fMWuE8Qb0hXyX85mVllVv7q/wAVebOlGjK57GHqSicHqEMK+a802yJX+61ZMyzSKnyKrrLt2s/3q2dUvIYf9S+zc+5P4ttZLNE3yO7I7P8AeWKo9jzS5uY9SniPcCzhmvIUmSFQ6pul8n7tXNPtZt3k+ZJ8z7Nu3726jT7O2SHa/wAi/e3f7VXbXfb3CukzOn3vJX5f+BNSp0+WroOpU5Y8xb0+NLWzms4bnHlv8jM+5lb+6tV9XbyJJnmbAVMsfT5c1es4ZnjjTfGHm3M8aru+X+GsTxrP9g0DU7hgD5FhKxBXrtjPb8K/evAxp55mCX/QLU/9Kgexw1Uc8VWb/wCfb/NHwl+0RqFtr/xSu0d5H2vtT+L5q4q4utN0G18m5dR/tL81W/GWvPda/c6lN/rZpWavN/El9PdXDfvGC72r85hH91FH5nUl7SrKUi94m8bvdGVC/wDB8rL/ABVx11qE11Lvkm3Nt21O1reXDL5aN/3zWto/gXUr5kRLZn3fxbaqMbkxkc5BbzSfchZq19L8LXl0yfuW+Z/u7K9J8D/APVrxftNzats/i2rXZXHgfQPBOmrc6rAsUS/KrN96tOWFOXvGftPf908x0P4b3Jj86/RlT+P5f/Za1pLXQ/D9l9xd7P8AI38W2q3i74rabbzNb6JC2F+Xds+9XFy61rGsTec/X/aqJS5jT3pam5qWvQySecnWqEmvXPl/IWDfd2rTFtbWEGa/dl2/3q2vB9x4evFcw2DS7U+9J95v92p5uUj3zAW88Q3G7ZDINv8AeT71QNB4n3b/ALHIv+01eoWOraHZsAmnR7F++s1X77V/CV9CpfR1R/vfu/us1Iv3vsnktnda2v8Ax82zf3vmrVs9aeZRvfb8nybf4a7xbbwHfE+SlxEP9pN1VrzwHol5bG50y5xt/h27a0+H4SJHmfij/iaa4JvLbCr/AN9Uy6keGEH5V/vf3ttXdY2R60/k9IX2bqxNW1BGmcb2/u/dqPt+6aGbdS+ZIRvYhXqCl65NJT+I2iHBFIq44FCrtpyfeFUEgZdu6mKNq7yKk+8xSmqNvSgOYdG3zBEqS43x7k602nq7qmwfN/vUEj4W8xW7N/erS0mBJNvz7mrNt1cM+9M1saDGhmRP7396gmR1Fuk0dutyk20Km3dt3VwvinU31TVHn37gvy7q7XXdQTS9DZ/lQ7PkXdXnRdnJduu7mojzjgLCpllCJ/E1ekaXappvhuJAn+u+auA0S2+0X6J/tV3Oragn+jabHtTy0Vt1P7QVJfZMnVNK+bzpOGWvvX47Ar/wTxKqQP8AijtIA3dOttXxEslteW+9EZ9rN95K+4vjzEJP+CfkkQXg+ENKAH421fr/AIYa5Pnn/YNL/wBJmfXcKaYPHv8A6dP8pH5+6fcMW2P1+9WzZzPGyuPlb7y7XrEjhmttu9P/AB/dWrZ/M2/fX5DHY+M5jo7K++QI77tq/wBypNQ0m21CEuiK52/drHtZHXc5f/c+etWzuvLK/d+WtTLlkcnq3hu4sZj95N3zVJpd2beRE+ZStd/qGl22tWXyQru/hZa4/WNDezm2Q7i38W1Ky5UaR/lZ1tjq32jS4kd9/wAv3V/hqDWl+1W/91GrD8N332VhC+3av3K39QkS4s3dHUfxKtVGX8xnUjOR9if8EWdW1XVPiN4z+CFheeUPGHgi8giaP5WaSP5lWrf/AAhqL4p/4SqzSNJrVvKut27zWZW2t83/AAGvDf8AgnH8Vtb+EP7W3gXxVZzR25bXo7O6aSXav2eb923/AKFXv/xgk174L/tReNPh7co0ljp+tzeVDJ8q7ZG8xW3fxfe+9W1GPNKSOWp7soM5v/gplJpXiZfAPiHTLyaabT/D6xXCyS7tsjSN/wB81zH7PsLr4G+0zQ+an2j5l2fdar/7QVunijRfO8+PyVg3RQxpuZW/u0vwH002fwrs3udyPJLI7r/EvzfdauepFxid1GMvanS3Cv5P2l7bb82379ZuqMkKpsmxF95l/utVy/ZDIZodoVk+X5/mX5vu1zuuagkDSJ52359u7+Fmp0Y/akTipfylW6unVvOm2/M+3dUUl5t2pC6jb99t1UZtQRnOxNqfe+akMiNJs3/e+bdXYeJzTjL3i8sUN43ko/yt8zt/8TXrH7Nlutvp+qJGfl3w7cnJ6P1ryq1byvnTc6K235vvbdtes/s5MHstUdIiil4cZXHZ6/RfCdW47wvpU/8ATcz7DgP/AJKih6T/APSJHDePjt8Z6rstvNYapKef4fmNY91HbNmN0Ztqfe/hre8dW7r411V0kfedQkOAvBXdWfZwwz4mRN6/e2r/AHa+OzqX/C3iv+vk/wD0pnzuacqzGsv78vzZkXGn+c6pBCoP3/Mb/wAeqjqGi2zY32y7vu10clvC2XLzfu3bY0ifwtSjTXkgKPbfeXcu5/mryJS5djgj7vvHBXWgujfc3bn+ZqpSaTCq75o2PlvtTbXZalofkyJs+VG+Xy93zVkXFn5e5IYWST7v+zWFSM+Y9rB1OaJzv9nwxt58O4vv27V/vVN9heXdvRnH3dy/dWtBrG5+V3di/wB3cvy7lpiWkMaPvG5/vblfbWEonrUY+8VLOx8mTYjthf4pF/1ldH4ds7ZcJHD8v3l+T71Y9vbozZ2b1/2v9quo0CzhW4E25sKq7/8A7GsJfGbyjGO0TvfAunorLNHC29fldZE/h/2a9i8J+H7O4t4X+9u/iX71eceB9NeRVuftLLuVVRZP4f8A9qvZvBdnNJGiCCNl2ru/vK1ZyqGNSMOpsWemTQrsdPM27VSPbWxHZ7pE85/nklbzWX7rbaks1e0Vpprb7ybUVn+ZasRw3Kr+5EcZk+40kX3f71c0ZSlLmOeXu7iW9nDJ5Uz/ACIzbkj+6zNU02nzXUchWHa0P3V/iZf4atxr+5eaBIwu/Zt2/eq1ZrtZnW2kRdi/d+6y1tGpymfLynyJ4gb7Qd/3trr919qt/vVyeuQ7ZHmfl97fKtb+qahmzZEdVC/cb726uX1SREy/k8Ltby/4v96v2Gn72h8NHlUbsxZPOVtibS6/eZvvLT4bdLeF3eFvm+b79X49Nma9d/JXLbf3lW7fw55jFJ0kA/vfxVqpUoijzxMaRWg7xpFt+8v8VSw281xtR/7nzyRp8rV0LeFXmhTybbcuxV2tTZPDM0O1GhkXdu+Xf97+KtvaUvskL4jAhjeOQwpu+X7+7+GtvS5PMVZsKPmZdtRS6W9vHvlhmZ1fd9yraWr2sg85G/ePt+WorYiMoijH3/dNvTbhGjRHjZgz/wC7urp9HvvtCoj7tkf93+H/AGa5K1X7OzI6NsWXdFuf7vy1saTqCRK0Pk7W2blb+81cPtoG3LLqejeF79FuETYqy/xL/s112n6xDC0sKfM7SqssK/w15Zp+teZCJp/mf7rf7X/Aq27HxVNH87yfe+Xdu/vVy1ImkfM9IsdYtplCJCvyv87Kn8X+1VptUe8j3wbd6vt+avPbfxE/l/uXjzH/ABN/FV6PxRNI2+bayt8u6P8AhaspS5ZGsY8wa1db/GJu+OLiMnIwOAv+FbGqavDbyPI8myRv9b5b1zF7fmbUX1CQbSHDHK9Me34Vma1r3nyDe8mz73ytX6dx0k8vylt/8uI/+kxPvuOVFZdla/6cr8olnV768upGuUDJEysrybK42+1BLiaazuUb/Y+T+L/ZqbWNQuWk8yGaZArL8slULjUt0MyJDh2bdK0f8S/7NfAUZQjH3j87qcvumVqnnXDHYkizLFtRWrA1KGZyIZuGV/4k+X/9qt268l2fyvMi2/Mvz7ty1k30HlzCHy+PlZGV90daxxUI6lyozkZkMP7tvOeNhsb7v3mrA+Kdul94D1O2S5j3rZSbdvytuVfvV1Mi21ux37cK+3bt+bdXI/FrfD4H1ObfvRrVlfdU4/FQ+qzX901wtGXtoHg3hW48PfFLwmnhjxDeLDqtqvyTTf8ALRa888b/AA18Q+A9X+z39tMsbS/Kyp8rL/erN1TUNT0PVPtmmloju/hrufDX7REOoWv9k/ELR4dRSRFHmMvzKq1+XrXQ+o5eXY4aa3huoTsjZnX5vmrEmX98UC17JZ+EPhL4quWn8PeLPsDyN89ncfdVf96sbxd8CfENrGbzR4Y71FbbutZVZv8AvmtI/wB0nm5jy9mdfk343Uxt6/6n/wAe/iq9rHhfXtJm/wBM0e4Tcm5PMgZdtZu1448v97/aWl9k3NGyvIVZftNTSSaPdMUkmUfLt3Vl2UUt1OsIT5m/iq7e+DtSgXekbNtGXap5CfdGXWh2zxt9juVP+yv8VU9NuJ9M1BJgWRlbGajltdS01t7pIn91qZLcTTcO+6qkUdB4kuPMaG/R93mJ87NXPTS+YvlpWnb3MWq2gsp22sv3G/2qy5oXgkaF+GWpiOMRtMZtxzTlbPBpPL96Cx1BbbzRSMu6tAFopGOBxSI3Y/hQA6iiil8IBSv940gOORQG3c0viJiFFFFP7ZQUUUUuUAre8GnCzv8A3krBre8NxhdPd06/79TIiXwFiffcXwRIWPzr8v8AFX9En/BAPwn4Y+Cf7Iov/FulNDp/9qW76prEdvuka4m+byW/3Vr8Iv2O/hpZ/GL9o/wn4A1JV+z32vQteNJ/q1hWRWk3f7O2v6y/hl+yB8Pf2ZPh5rvgrwzfR3XhXWpk1S3025iX/R7jyVX7392to0ZVIvllaR4eYV5RlGNvdPlX/gqZ/bOtw3PiL4Pa+JPh68du+sWEhCNHe+Yqoyq3zOpyD7YrkP8Agn1pXwg8P/DXU/iv4ssvtPiG18RtZacjjKRRG3jYOR/vFh+FbH/BSH9nTxPoHgu3+NngnXC2hy3kdr4m0uSTAgbpCyL/ABDeVGa8z/Yk1zTdS07xF8MdRvBDJfRi8sWmP7vzIwAy8c7ipxx6V+3ZTTlS8EcTF+9/tK/KmcMnCSemh9I+IPjRpWtal9m+2bYmlXbHap8rL/FWdrXiywWN7bTblrcN/Dt3btrfdavMPDem6rDfPDDZ/Nv2r5afL/vLXVX0KaHbpeX9z5T7t7rcPt2qtfjtSUojp8t4n4c/8FAdem8Uf8FG/ijql5eee0eqLb7l/h2xqu2uS0mNJtqP91V3f8Cpf2g9cTxR+2D8UfGEMqyJceLbpVkj+7tVtq7abp7fKsycNs+bbXHGPMe7P4IxJbxQzCH5T8/3v4ahVv8Als6fN91F+7t/2qmWZG/c/wB5vnZqbNJ8u9+F3feo+IiX7uPunrP7Pa2dvDfb3Xe0S/NJLt210/izxQ8Nq8Ns+F/5ayK/3q4b4Ptcm1uYbBJHdlXbHt3bm3fdrV8ZRppNxJ/bzrF5abvLavnM0lP255mIj+8uc9r2pDTPM8Q63c+c6xfLCv8Ad/h3V85fHH4g3HiHU/sK3LMN26RWfdt/2a9C+LPj2eLTptZu3URp+6t41/5af8Br5+vLuS+upLub78jbmrXL8NeXtJnpYGjGUeeR+jn7Pf8AyjcT/sS9Y/8AQrqvzfr9IP2e/wDlG4n/AGJesf8AoV1X5vM2OBX9AeK//IlyD/sFj/6TA1wnx1PUFXHJpaKK/GYncAbdzRSKu2lqgCiiigBd7etJRSK26l8QDtqfwUBDI3FJVzSdPm1KbyYev3qcfemTKXKV44HZtv8AFW/4B+Fnjz4neKLPwV8PfDGoazq99LttdP021aWWRv8AZVaTTdBm/taKw3LukdfmZa+gvGvwY+Ov7K37PfgT9prwhqv9jxfEfU9QsdE1DTbpo76NbXasrLt+ZVbd96qqcsYnP7SUp8sT518XeENb8G376Tr1s8U0MrRSq38MittZf95ay12eXX1r4f8AB8fif/gln8Q/iJ8U9VjRNF+IOl2fw886BWnvL2bzGvVWT7zKse1m+981fJLMitv/AIa54y5om0WMJR23gYFSLsZl2feqJR82PSpoWh3/ACfw1XoVLcuaRO8V9HdJw8Msbr/vLItf1J3WoXN54Z0G8vPmkbw9p7eWqfeVrWOv5bLLZK6Fj96WP/0Ja/p1vtUhs/B+g2fzPNJ4X03/AFiNtWNbWP8AirnxHwnLiPhMvxFcQq2yZ2i2/daP5fmrh/El0kau6Iyy/d3NW1rmsPwnkq7K25tvzba4/XNQdVDzzeYvzb2Zf71cfs+b3hwlKxz2uXiR3Dvc2yqV+VW3/drhfEl6itMls80fmbf3n3q6fxNdTQ/JPtRdn+sb5vvVxGuTeTcSI75+60W37qtXn4iPvHqYepynL6nMkkjujqX8/dtZPurVBpo1ZspIRuVpfk+WNf8AZq5r0z/8sU3/AD7tyfLuZqzWjS1i33O4rGu51WX7rf8As1c3+I9OnK8NC3Zq/R5tm35Ukkfc22tbT981vG8yNmZPn/uyL/vViWsxbYo24k+bzFXa3/Aq0LO4eP7/AJ0asnyLHtZW+atI046GntDa01bmDHk7V8tPvL/Cv92uc+JtwsfgbX7qTcQuj3LNnqcQtmtq1vn+ztDDuMi7l3N/FWF8Unx8P/ELhw+NFuuSMZ/ctX7d4FK+fZhL/qFqf+lQPoOGZ82Jr/8AXuX5o/NbXJPtV000MrP/ALX96qFv4Z/tKYOerPUWo6g8Ko8f+7Wn4R8XabZyD7Ym4b/utX55HY/NJc8jqvAfwRTWJUfyfkVvmZk27q9o8I/CHwlo8K3OpvH95UX59rL/ALVeZ2fxkttJhVLDy/LX7u371Ynir47alMsnk3O7cn9+n7b3fciZ+znKXMep/Fr4z+Ffhvo7Q6OYzeeQyoflr5X8dfFTxL4vvnmv7xvL3fKu6meItW1XxVqD3M0zPtqnH4ZEgDv93+7Sl70eZm1OMaf2ShDHJcSb/mLr92txVSxs0mZ+P/QWqCOwS0XZs+Zf4f4aiuPtl9hE+7911o5UHmZ+qatcalfbPm2b/lrpPD95HptqMuu7726s6z0NLdvOmT5vuqtalvZ2e1Ud8/7Lfdo5Qv71iyL68vmZPLZVb7/+1XQaPoNy1qjujKPvJuqv4T0+zvJm+zW3mOrLsVfurV3xdDDdXhhudVm+4qy28Lbdv+zVe4Tzc2xauJPD2jx77/VbdJo33eWr1k+KPihpVjp62Hhvc1xIn71mT5Vb/ZrmfGngV47FdV0rzGRV+dWfcy1y+n71Vkd2Wsyo/wAxozXW23d0fczfM7f7VYV1I8sxd6uX1xt+R0Zd1ZxJ3ke9BrGIU35F96dRV8qNAooopgLsb0oXofpQzbqFO00pbEcrHwru+d3qSNU8yo1Z4/4Ny06ESbt2zmlykyLNvHMz/J/3z/erotFj2r+8TKr/ALFY2mw7bhXTmuo09U0+3e6/hVPvURlymcpcxiePtQSaOKwWHb/Furmqt65qE2oalJNI+4L8qVUqjePuxNvwJatNrKOEyV+b5a1vEEgm1R5tmxl/h/u1V8DLHb29xdhPnVfk/wB6rVwu6NpnfcW+/wD3acY85lKXvD9Lvvs67H6feWvvv40xpcfsFtGM7W8I6X+X+j1+e+51jHzbP9pq/Qj4vlR+wYpbOP8AhEdKz/5L1+u+GC5cpzz/ALBpf+kzPsOFHfCY/wD69P8AKR8A3Fj8+90/75ptrN5Eyo/T/arY8nerSPu/4DVKSxQSB0+dl+8tfkEf5j4otW6pcSM8LqP7+6pBdTRtvd9rbqr2cscVxsdKuXEaXH93a38W37tMDb0HWiu37y/wtu/iq/faemqQsj/e+98v3a5O3kmt1Z4U3bfuLu+9XTaHfboxvTKf+zUS5ufUzMC40/7DdeSU+Xd8yr96tBmM1mYfJX5k/u/NWrqVjDeKzoiod21f71ZM0b2Hzypz/tUGhZ+H/iq58L+LrHWLP/j5sb+G5t/96Nlav1A/aW+GOifGz9o3RfiQibLTxp4FsdSguPtSqnneTtZW/wB1lr8mtUvIIboO6fumb52av0V+C3ijXvif+yL8LvGdheNLd+Edem0S4kmb/ljt3Rrt/u104KX+0rzOfFQ/dXPPrXS5pJrzwvMkcvlyyReZs+9tb7y11U3h2bwj4dtIfsawwtFu8mT+Jq2bzw/C3iK/s5ns0v5rprhFVWXcrfwrW58VNah8Xfs86Df23hVrKXwveTWWrXHm/wDH00jfKzf7q1eMp8tWRWHqc1L4jyDXtas7VPMm+VlTdtX+KuL1jWvLmdJI96f3m/vVc17VJo2m877zPtiXfu2r/erkLy+/0gb+f95qwp+8RXqSjHQtnUN27zivzfeq7Z3X74RhGVfu7q5xtQRZH3+W6N/eSr9jceWuwv8Ae/u1t/hOCUTqNPvkkjaEcf7X96vZ/wBm+4e5sdUkZifmhxk57PXhFnfJ8kPyv83zLXuH7MVx9ostXbYikPACEOez1+i+FH/Je4X0qf8ApuZ9XwGv+MooPyn/AOkSOO8fiCPx3qcpMrML+bA37UDbj1rNjm8uZUR9hZWXcv3dtWfH95s8f6y7xnZHfy7gejfMaz4rp45P3zq5V9yK3ytXx+ef8jrE/wDXyf8A6Uz5/NZR/tKt/jl+bL8mF+S1dnRUXdu/vVb3JH88cLNui2/N95azYblJI96Jlo9zL/DtqeO4uUtV3v8A7XzfeVf7teZynncvN8I3VFSONnR+Nu35qx7pkht22W2/5d26N/u7q0by4RbeVHRsfMzeZ/7LWPcXD+UgR4y33dzfxVz1OY9LBy/eFS8kEjbEtVRvupVWOP8A0jM0LP8ANtSprpoZJld/n2/LuX5d1VppN0nkvt+/uZV/iWuOUox90+gp7D4ZHmZ0R2yr7dtbfhqR1m8m5dVVfuKz/ern1aCaTYny7U3Ve0nUHtXV32ttf+5u+Wsai933Tfm5T2nwPdPHAr3k0bOrr93+Jf8Aar2/4f3jtDC8zq7r8/mL826vnHwVrSRn7Mkyq0f8Tfxbq9i8BeIEt5beF5mKSfN8rfKq1zy55aky+E9f028e6uPs1zbbh977QqVttbzsn+pZyvzRM3/s1cf4d1bTbjY8Ny25ZdiNv/zuro9J1KFWCPbbX81t0jfLurGXN7r5Ti900bLZuPnSf7/+zSRxyQzM6TZH3Io/4abcXDzQqtt8v3vmqhqWtvaw+S80bSbd237q1p7SO4HxTqV86M1sjrtZ/nZv4aRbd764M0zttjXdKy7W3NWGNSubhfkuVKs/3f4maug8O28ilLl4fuvufy28xVav1WOK90+Sp4X3bmro+j7mxsV12r8rV0mn+HY1VLl0j3Kv3aZo9kk8yuvPlp88kny7q6fTbHbGHe2Up/z0/wDHq5a2OlKOkjslhfhM5fCdtcbHjtmCR/N/wKluPBaLl7k7dr/LG33lZv7tdpo+nzTQiZ9zCRdyN93/AMdom0+2jLQgqo+VFb733fvbq4pZhKMr8xjLCRied3mhwxxpMSxM25Pm+9D/AL1Zl7pKNcD7HujVvusz7v4a7/UdLdpGtofMCbdysy/K26sTUtL+x5eG2xTWYe03kTHCyhscsrw29wiOiuF/vfeZqI7rb5jv5ip/spVu6s5rWSbY/wA80u5Gb+7WdeTW0kB8n7irtddv3qUsV7xp9T7l6x1p9Pxv3b/uvuf+GrNn4mS3kfZNuH91q5K4uUt7dbVNu2P+6jfL/u1A2tOsaRP8m75dypWksdzbGUML3O9bxck0buHXau35aY3i6CzYhNzfPtZt/wDFXANrnlRs+9l2p8jNWbJ4s2xrc75Nky7mVqr65zlLDyie76ffGbwob6J2Yi3kKljzkZ/wrh7zxJbTMZkmZjJ/ra2/Cupi4+Cp1MfNjTLlsDvt8zj9K8lsfEFzcQh5J4wzL91v4q/T/EGtGGUZQn1oR/8ASYH3nGtKTwOW8vSivyidsupQzM0z3Mm1V+b5vvLUc+o/MPnZ/MTbtV/u1y1jrDqu+2mZvM3Iysn/AKDWjb3KbldPLYb1X5flZm2/xV+XSxnLHlR8LHDXleZo7nkjTzvM/uszfN5dI2+RmTezbU+X5f4qhs2hk23KTf3kb5/lqyykSI/y7Y4tu5fu1nUxnKdEcPKRUuI4XbeifL8v3m3fNXAfHqNP+FT63M6b/LspG+VNteiX0fmRpBD8zMjfvF+7XFfGyz+0fCnXYU3FI9NkeVW/i2/3ayxGM9pQ5eY2o4WMalz4w1aPzrFfmZtyKyKy/wCzXLXEL2txvT+9Xa60v2PTYpvMbay/Lu/3a4bULjzpSf4t1fN+6elH4gh1SaF1dZm+Wul8P/EfX9PmR4dSk3L9xt1cgqFm21a0+3dm2PuA/vVJXLA9Qs/i54rmiENzqTTRbG/d3C+Zt/76ouPE2j6gz/2xoNjcq0XybYtv/oNcJ500asiPytWre6f5WTnd/d/hqoy5ZGUtjfk0nwNffvrbRJrfam793P8Adq7DcW0lm1rDuddu35k+asS1mdx9zarfM22tPS5nWffPM2F/urVxFKX8pDrnhW8uoYtiK0ez7rL95q4/VvBmq6fKdttJtX+7822vWtWW21zRdnzI+35GV9rLXneral4k8OzSWs037v8AvL/F/vVPwjjKfMcoVmt2+fhlp9xdeeNkyfMvetr/AISawuTs1LSo2/2l+9WdrTWAYNbJyy/w/wANHNE2+IoUUUVBY3y/elVdtLRVfCAUUUU47AFFFFMAooooAKKKKACiiilyoBVXd3re0yaa30kum3bWCpw3z/NXQWUciCNIduNq8GiUTGqfRf8AwTv0+bTfFmp+PEhX7THZ/ZbC4b70MjNuZv8Avla/qG+HHxph+OP7O/hX4j6HrCy22reFbeK4h8r5o5o41jk/8er+cfwL8Nb/APZ30/w34J1V9uoXmkQ6tfwtFtaH7Qu5V/7521+y/wDwQ/8AilD4/wDg34v+Dt5qqyXHhe6t9RtbVvm22s3+sZW/3v4a6MPLlmfPYqpKpLQ7j/go28nhv9ljU/D1/JiTUUspoiW4l2XcQOPzrwD9gXwFD4o0W41eTTVmNrrEqh40BlUmCIjGe3Wvd/8AgrzbXD/CXSbm2gZreCURNJt4UNIjD9RXhP7Hvxu8DfAH9nTxJ498XeLo9NlTX5FsbdU3T3cgtosJEP73Nft2EqRpeCmKl/1Er8qZzU4ylTsdvq3ijwB4CttS1jXtehhms7qRZVa4VfJ+b5VZa+H/ANrD9rzxh8bLq58N+Cb1rXQt7farj7slx8rf6v8AurXP/Gb4yXPxW8V3usanbSWNrfXX2r7HJ96Rt3ytI396vKvHnjaw0vRZ/I2ysyyNtVfm+VW+Zq/nrG4+cp+69DelGV+U+MfCljNeeINYuXDFW1eb+P73zV2Nu3kr5PzfL/DXG/DeN76zmmmfa81/I+5v9pmrs/JTcmE3Oq/Jz8q/71d9L4D2X1HzQvJDv2LiT5fmqNti/I8KuPu/7tTXB3Q/J/wLbVJo9u/zkZlb7u2r+ID1/wCAPijw34K8N+IfEPiGGOW4j+zrpas3zeZu3NtrjPiV4yvPF2sXPiHUrny0+Z3j3fKq1Q0CRDYsj7di/wB3/wBmrzD4+/Eh7iY+EtKmUL/y9NH/AOg14lajKti7HPHD+2q/3TjPiR40fxZq7JbP/osPyxD+9/tVzZOeTRSAY716tOMacOWJ60YxhHlifpD+z3/yjcT/ALEvWP8A0K6r83y23mv0g/Z7/wCUbif9iXrH/oV1X5vMu6v2nxY/5EuQf9gsf/SYHHhPjqeotFFFfjHMdwUUUnz+1UAtFFFLlQBQG3c0qfeFG1FUbPvUogNUYGK0PD1++n3n2mPrt21QrY8EeHb3xN4httD02NXuLqVYoFZto3M22nzcvvEVPehY1ZtcvdS1RLmaZs7l+996vtD4Z2HwK8dfC/4b6r+1pD48ufBPgV7hvsvh3UVb/RZJPMlhjWT7rSN/EtfNmp/BLTvh/wDFr/hWvjT4neH4bu3ljWe80+6+1wRyN823cv3v7rV6N+1Z8U/Fev8AhDSv2fPDXhHQ4LrS7VZLy68P3O77Rb/dVdv97+Ks69eMrQW7OWitbs9P/wCCnmp/szftAfBHRfj3+zx8SNB8F+EdEvF0nwH8DrOXzbqys/8AlpdXLI3/AB9SN+8Zm/2V3V8CS793FO1CyvNNvHsL+2aGaN9ssci7WVqazJxvpxjKJ2jWG2ShndpN6CkY5bipLdfmOf4aCPhNfwjp/wDaXiTTdNT5TdX9vF/31Iq1/S94nWG1tbXTXEzJDpFnEsf8Py28a1/Od+zb4dufE3x58EeHkh859Q8X6bEkK/e/4+F+7X9E3jq++x+ILy237mWVkRm+b5V+Va5cQ/hic1bocpdXiTXDQvbMnl/3m2rXO6o00bMifLu3N81buoNJI0r3KRqu3ayr8zVg603mLvfcSy/JJ91q5pe7EuPxnH+JLf7VbhHKoq7Wfd821q4bxEsy70tnb/b3fdr0XXFhXf5wjCRovm7vvf8AAq4HxEtz572yJ5nztv8A7rLXJU5pyO+jE4fVJHtWRJkZPM+dvkqnJdTHfJs5V/mX71XtYbdutoUkdNzfKvzf8B+asuFZtyzQo27ftlVvvVzyjM7oyLFvNNG3mujS+Z/D/dqzZ3ELRs7pJsWXbuX/ANBqpH5PlrcvbSRyebtTbT4Vh+a2Tdne0rx/3m/2amP940lHmiWlv3jOxJpHZX+Vf7u6ofHTG4+G2uG4JGdHuw5PGP3bg1VWSRZn2IzBmVW3fwr/AL1XdYhF78P9Qt5WGJdNuEYjnqrCv23wLnzZ5mH/AGC1P/SoH0fCitiq/wD17l+aPyq1PVHabf5e3/ZqlDdvDumR9n8NX/E2jvZ39zZzSNuhuGX5vl/iqlPapCvzphW+ZK/OYfAfnsiRdYmjX55mqpJrE03Dvn/ZaqcxfCp33fJTJB8+08/3v9mr5fcJujXtdchjVUfcP9qtSPxFprQ/fX73yf7Vcltdm2fdX+Gnx71XZ/7JU/ZGdO2oWDbd6LuX+JaZJqkLLsRFT+JdtYVv8o+/81Wo28ydXd2qpS7GZPNqTIu/fuNVbrULmTdJvbC/d21JJH5iu7v8u/atWLXTrZnH2x9gpf3So7EfhfxlrGg3QubUZVfvV1+m+PPD0twXm0eTfI/zNI9VdH0bw3Lb/JD+9X7/AM/3qmuNN0f7Tss4W/22kquX+UOZHq3hTQvCvjDw+88KNE3lMssbbf8AO6vD/iR4TfwbrUkKcozfumr174WtNpuj3Ezw7UX+H+9XL/GzS5Nf0ptZCZ8v7jLTl7xl76nc8Wu5nuJi70z+H/2aiT74/wB+koOvoFFFFBoAXbxRRSbflxQA5V+b79G35s7KSljUSHZQTzMlLbgP9mpI49zLzhqjVDJIz/3as2qozLv6fx0+Yk1tFtst52xSqt8tafiS8Sx0V0L/ADSf7dQ6XZou35N/8VZPjS+Sa6FjC/yx/eWp5kZKPNMw6WNcsOPlpKn022+1XaJ/tUcyOk63QbEW+i7OvmfNuWmSQ7/uJvq1YzIzLZu/lIvy/wD2VOmhfcyo+F/gb+9Sic0tinNbv9kVH2/M/wDFX318aW8n9gPKk8eENKA597evgib/AJ47/u/xV96/HBHf/gn+yIdpPhDSucdObev1/wAMFfKc8X/UNL/0mZ9lwpf6nj7/APPp/lI+HLeTzIQ/3m/36WSNPMTyf9ZWbYXzw/u3f5v462I7q2k2I8K/c3J8lfkX2D4rnKiw+XP52cLvq3C0O4/40k0fytsTKstNsVeORpN//jtKOw/8JLGu1fubd1WdLvvsrbPO2DfuqJ4U4m3sN1ElqjbRsyy/N81HxQFzWOjW4S6t/wBy6qV/iasrVofs8jfP5u75qsaPcRtGE+4zfeWjVoy3zp97ZVxIt0OQ8SMWtpfn+Zv4Wr7P/wCCYuvf8LC+CvxF+DP9pNHqENhHrehqv/PSH/Wbf+A18Ya03mQzpNDtb+CvWv8Agmd8crP4NftUeGdT151OnX1xJpuqRyPtT7PMu3c3+yrVVKUoz5hVqfNStE+spPGniG61jQbnxVC1tHDE2y4ktfmm/ut/u16Notnbap8MPG3gPVdS+2Pq1m15YKz+WsMy/N5i/wC18tfoqP2Qv2b/ANuf9lfQrDTbLT9M8V+H7C4t7DWLS32RfL93d/vfLXwNpnwr8efsu/GSz8JfE62jtP8AiZfZ7ea4bcskf3Wk+avpcdg41sNGvT/7eifP4bEVKdX2M9+h8R+JNWdbrfv3qybfm+XdXOXmpJCwR3+993/Zrv8A9srwqnwx/aQ8U+CbaRnt7e/+0WDbFXdbyfNG22vIpdUhLDfuY/3a+e+E9SN5Q5TVk1KGPdDhnVv4lqzY6gmVdN29v4d1c1JqTtJ+5dfvfKtS2usPDI7+c33/AO792lGRUoSXuo72HUodzlLZl/uRrXvf7JtytzZ646k/6y34PbiSvlmx14xxoiTN8zfw/er6S/YmuRdab4iffuPn22T+ElfpHhN/yXmF9Kn/AKbmfU8DQtxLRf8Ai/8ASJHNfEeVf+Fga3G0zZ/tCYnaM/LvPy1kx3dvcKEdPmbavmfxbqg+KGqiH4na+EkUeXq04I2/7ZrNXU0m3+d8oj+Z1/hr4vPdM7xX/Xyf/pTPnM0p/wDCnW/xy/NnSR3iKoRHjba6ttqU6g8cm/zt7K/3a5y3voVZE8xVWP7q7P8Ax6rJ1RGtzPMiqm35GZtrf7teZzTjE4o0/dNDUL7zl865mX5V+633q5261RBMYflG1t21VqtqGsfu8fKr/d+b+H/gVY9xrDtcb0mXc3y7qzlI6cHHlnqbDaluUoibE3fxfxUf2jD80r7drOqoqr826seK68yRZN+4/e+WnySbVaPzP9Z8ytv+7XJL4j3qfMbFxcJHGv8ApOwf3l/9BpY754pPk+Td/CrVmNdPbqmxFbbtV1+9uqbdMzbEkjwu5nbZUcvKbSkdv4V137O3lvcq4avTPBfipIbUQzXPz7PlWT723dXg+l6s8cImh+Tb83/Aq6jSfEyKqvc7V+X/AFi/N81RKP2omVSUeU+nfC/irav2bzsoyf6tvuq38NddpviosuLm5aQRtuSNW/h/ir5v0Px15P7uabarL8rbvmrstL8b/MiQzfd+dZG/iqJR7nJKXvHtf/CaPDZmCFFkT70C79rN/s1j6t4yd45X8tdjff8A4v4fu159N44eRhsust/e37VrD1b4geXE01zMsTN95fNaspU+aIuaETyPR9Q8ySPekar/ABx7vlWu18MyRxxkpHGi7trQx/Lu/wBpa800Wazjjaaab93v2/L95t1eheF2+zLE+/7vy7pP4v8Aer6ytiuX4TCnh4/Cdz4d+SFYRGqbk+633q67QZEwyOnlt92KPZXFaXHDPIJi6zbfvyb/AOH+7XTaXctDGk118qK25Pm3Nt/hrjlipS1NpUYna6PNMzf6TN/d2SbdtXZrVGzcwhdk27zV3/8Aj1Zmi3Ft9k3wTNM0fzOv8NaUdul5tSY4LJu8tfurXFUxQ40jP1SzdVR9m8wozbVfcrVgavbPJ9y2kVpNvy/7Ndmqp5aRu/735l8lkrH1izdfNtoZt25/3Uf/ANlXN9ejGRUcLzS0PPPE0aSs1zbbU/e7Ny/NXL6jNNtaBPlP97ZXc65ZwwK8fnKzb2b7u5d1crqFrB8s0Ls0rfK3y/K27/aqvr0u4SwvvfCcdq19DF/y7KzxptaT5l/4FXMzaw7SKn2lsruV2+6tdN4ktUjt2hd2UL8u1v4q8/1z/Rrp0+Zn+9tZ/lX5a6qOM9oc9ShKJZuvEU1uqO7/AL1W2/L/AOhVj6lrl3H+8eZm+f7tUZtcmt7j5EX7nz7qydb1qGWNnfqtdUcR/KYSo8p9V/Da43/s1rcMTj+x708+zS14fod9H5aO+7Z95Fkr2P4S3Ak/ZPjudxIOg3zZ/wCBTV4P4T1Kfy/J87Lt9xWT7tfrfiXU5cpyT/sHj/6TA+04shfB5d/16X5ROys5kjZHv3VU/vL/AA7q19Pb7Psh/wBY33l8z+7WDp80/wBoieaZXXZufalb1hdeS3nO/P8Ad2fxV+PyrcsviPkYx/mNjS/JUqn3nk+9D/s/3qstskZn2KVb7y/w/wCzVOxWa4Znfa0W3d8v8NXo2RYWm/5d1/u/e3f7tc0sZL7RtTpkcnkRxb0h2S/ebbu2r/s1x/xRhhuPhvroebLNpsibv7rN/s/3a7ia4eOzZ0hYfL/FXD/FRobL4Y69fpCskq2DN/d8tdy/NWf1qUjWVHlPjH4qX0Nnb29hCjfKvzf71cAzfx1t+PdZfV9ZeZHyKy9O0+a+uRFGjGrjEmPuxuFjb+fKBt71vW+nvDb8HczVs6D4Ff7LvkTa7VDr7Q6XCyIMndtq+XlM+aMjG/fecyO6k7/u1oaTapu+eFqw7jVEZjsRvlpIvE15byB0fil8Ivfkd5Z6I8n+p2hPvbv4v92pI9Njt1i33Knb8zs38NcpY/EK/DeTO+1W++y1c1rTLnV4vPsdaDI23YgpylzClHlOuF5YSL9j/tKEj/rrUV9pMOuWv2aYK8ez5JF+avO5vDWuxMXhDOF/iV6m0xPHdsv+hw3RVfm2/wANTHnKjGO5V8S6DcaLqLwn7u75aypGfOxq3Nb1nUrgbNVsSHVf+WifxVhzO8jbmpGsRaKRWzwaWr+Iob9/2xSMu2nKu2hl3VAC0UUVcZAFFIrZ4NLRHYApeVNN53b91LUAAbdzRSKu2lrQAooooAfHHukXf0avW/2VvA2m/EX46eFvBmq7fsE2qRy37M/3YY28xv8A0GvKLH95IN/G2vev2WfDepWV5P4wsJZEmX91ayMn3W/i2/8AAamXu+8ceKkoRPrL9rbWP+Em+M0/iq2mV7aZVit2V/uwrtVV/wCA7a+xf+Df74jWfhL9q7WvDd5qvlL4i8HzQMsn7xZGjbctfn7pdrc3V5/aXiR8xwp92RPvN/er3f8AYB/aAh/Z7+Plt8WraFbiHSbK4/0eR9qzNJHtVaxp1LS55HiOXOfqr/wU9+I3grW/2cZ9GbWoY9Qk1G1Sytj9+4dHBcD2VQx/Cvys+Lut61Dd2elWDEpHG00fmyfJFIx2Fwvc7Rg+wFbni74q/E34/fHKX4n/ABL8fxXGZ5hpPh6zi2W1jAUIAX+8395q4r47WllN4itZ7y8nVRp5QxxvgYLNz9a/YqWJ9t4C4yf/AFFJfhSHFezkctdWaPdP/aXiFX3Kqsqy/Kv+7WJ461DwloHgvVHSaNy1rM26NNzM3l1Pb6f4es1KfY1Xau5dz7ty1zPxq8QWui/DPVb+zhVCulzRbWX5fmXbX8+r95Vi0VR96rFHzp8M7d4/DsE3k8ybm3N/vV1Mm+HOxPl+9838Vc/4H/0HwzZwpD/rIl2bWraWZ5Y/33yn+61fUwjoerU+ImjjS3jaZJmK7F2rVSS48kF5Plb723+GoLrXLaPMP2nZtfbtasa+8RIvyQ7Sn96l9rQXLGRqeIviU/g3wfdJbJH9ouG/dTfxL/u14TeXc1/dSXly7O8jbnZq6D4iatLfXsMH2reiqx2BuFaubqY04xlKR2UafLEKKKKo0P0g/Z7/AOUbif8AYl6x/wChXVfm/X6Qfs9/8o3E/wCxL1j/ANCuq/N+v2XxY/5EuQf9gsf/AEmBw4T46nqFFFFfix3CMMjiloorQBFXbS0UFd3FT9oAooZfmye1FPlQCt+7au1+BWr6bo/jb7Zf7d/2C4W1Zv8AlnN5fytXE0b3Vg6Nyv8AdqZRJ5UbUWnzLdPc3k2597M0m/7zf3quW8F42rrq763J5i7dszP8/wD31WENVuQhTPBqM31y38bCr9zlMOSrzXub3xDvbLVtcTUbYbpZrZWum37t0n96sBd67t9G4NIzvTJF+bmp5TeO4L8nCPU0bKmE6bv71RbTu+SrEMM0mfumpJaufS3/AASn8K/8Jh+338LdKmto5IYfEa3T+Z/0xjaT/wBlr9w/Fd1CLyWZ33edcMySL/eavyN/4IS+CpNY/besfEkyRvD4b8L6hes0n/LNmj8uP/gXzV+sd9Ntt/kfeJPvKz/xf71ediJe8Yv3nymRdN+7d/m+V/7n3qyNSie4ZY3s22sv975lrS1BrZbqSGO8Zwvzbf8A2VarXiw3UaTfbP3f/LKNflaSubm6s6oR945XVrVxG6PbZf8A5a7n+Vv96uS1LSUhYiZ23yP8vlv81d/qcJaRke2XLbvNb+Jq5i8sZppJSzyJMqfJtiX5f9ms5e9E6qcTzTVNHcRo6Iqtt2urL92sSTT3jZ7Peqn7zbl/75r0DUrFBueZY5TMrK23+GsO6tLZQ1s88hRtvzNV/YOnlOVW3uo4/J2fMsv72T+FarFZlZJ98ed27zFb5q3NUtUWHfbQt838O/5WrNuP3yt9p3IJNv3fm2tXLKjKUtS170feM2SZJVdEdkZpd25futt/hrYMYk8HTwncN1nKPlPPRunvWTJHtaVBMuyN9y/P/wCPVtGQjwxNLvPFtIQynkAA4r9p8DoyWf5jf/oFqf8ApUD6fhWFsTWl/wBO5fmj8z/jp4bfwv8AEi/heFkt7iff+8ri9auNtqnloo2/w19V/tUfClPFnhd9dsH3zwt8y+V8zf8AAq+SNehubeQWdyjK8bbZa/MsNW54nwWIozo1PeKD75GX5/4ql8t2XYNtQK22T/Z/u1Pbx/effk/3a7Obmkc/2B8caeZ845+7UbMiyN89SeZ5cZb+Gq/yeYetMB5zHJvdKuWsbzN8ifMv391Vlj/eM7u2Nn/j1aFnC8O19jb/ALz7amMeYiUSXb9nVt6bvk3VWvbx5JlRH+T/AGaTUL6ZZGh34Zvvr/dqpG0zNwn+/RER0Ol3lyq70euo8P6XNfTRo8n3tv8AHXJaLE8zhN+0f7Ner/DnR4fJ8z7Kqbfli3J/49T5eUcubkN1NN8nSbews9rH/lrtqa+8DzSaTNvhVk8r/d+b/ZrrvB/hWHb9qmddy/N8qrtatPxBbJcM9sj7Pl/u/LWspGEZfzHxF4r0z+yNeutN+b93K33qz67j4/aGmj+PJtnzCRPmb/arh6zO+n8IUUitupaUdiwpfmWkpFbIpgKW280/bvxsprIFApwY5L0uZAEP3q09LhSZvnXH8Py1ST7wrc0O3TztjvtpmMjXhaGztn+fbtT564nUbp7y8eY/xPXS+Lrp7XTvJR23M38X92uTUEDBoCnHqxa1vDlm6yNN/Eq7k3VmW6+dNsrqtN0/ybXydm5/vfNVfEVU5ugkbfZ2+fcf4q1Ix9ot/kRhu+b5qz5V27UgjbG/73/stW7G8Ty9nzGp5eU5/iJLq12oHTlv92vu343K3/DAzIDtP/CJaUPpzb18NSRo0P3GX/gVfc/xtLL+wW23kjwlpf8AO3r9d8L/APkTZ5/2DS/9JmfacKf7nj/+vT/KR8BTxvCyom1j/eqxYXE02Wf5T92msJvM/efKrUscZjbfvr8kifHmowk3B4fvfdTdQJplk2b1H+z/AA1Bp9w6N5fysy/xM1W1jSTLvH83+zR/hM/cFt7hJJFhcM396rEcyXEOzO35dvy/e21Vh+WPdv2r/eqZW24dEZ1/j/hpa/ET8USW2vvst4IUT/crZuv32ns/3dq/erHWRJFXCfd/iq3HdTSW/k/xL99f71WP4fhOX1uHbcOkLt937zVzeg6hJo+vLdrMyvG+4bf7y/MtdR4i3qx/2v8Ax2uEupdt57K3zNWZpT94/pW/4Ik/tBQ+NP2c4YfOZ7q4sFVoZvlVZNu2voT9rD9mbw9+1d8HY9N8SaD9j1zT1ZdD1KHbvaRfuqzV+Q//AAQp+P1/pug6n4S1LW/Ljjuo2TddfMq7fl2x1+3n7Hvjf/hZel6n4Ytk+3JZ3SvdNJ96Hcvy/wDAa9rDY2q7QlL3Tx62Ejyua+I/Bb/gsB4BuvCPi7wN42udNjtru60OTSNZXdul+0W7fK0n+8tfFz6om77mx2r9qP8Ag5A+Adgfg/qHi3QdKkF7perR39vNDb7vLj+7Krf/ABVfh7PeeZJvd2wq/IzferjxFOVKVgwdT2t2/iNBtQT75TD0+TVv3Y3sy/32WsRrzaF3v8rURX/y7C6tXLKXKd/L7x0On+IBGyfI2zd8rV9Wf8E+9QW/0rxQMcx3FoC2c5+WWvi+O8eP597I33v9lq+uv+CaV19q0fxa2zG2eyGfX5Zq/SPCX/kvcL6VP/Tcz6rgqMVxFRt/e/8ASZHB/GTxA9n8ZPEsSsny6xcDkZ/5aGs/T9e8+HZcj5W+ba33v92sL496zJbfHPxWqP8AKPENyG+X0kNYsfiiaGH/AFO4/wC196vjc+/5HeKS/wCfk/8A0pnzWZ6ZlW/xS/NnokesI1u8aPiKOXcu2ql54utoY2mdFdV+b71cTL4gu5G2P9xvufPUcEm6b53Vf71eVLl5jijGXLdHR6h4oe8uNiPsVvurVdbpGl3u+1ldfl+9WRHMkb70RmLfK9SrJD5g2fK38TNUy5eh10ZRibsNxuZHtn5bd8uz5asRzKJFV3z8rfwVjreTf67+997b/dq3b3CQwvshbd/Buf8Ahrm5UejTNSNvP3Ike7b9z5qk855EWabav9xVqlDcboymz/f/ANqprfybnL/ad+5v87aXLzQOnlRZWbyY49n3G+/u/vVLDrU1vJFcw7flfbtVqrx3Dx24heaNdy/Nt+am/Isfr/srRTiYVNzsNL8VJJCH3qfm+9u+9WzY+NJYZEdvM2TfxM/3a82t5prfaJLZm+b5FVPmWtG3vH8tf3Db2Rv4/wDx6rjRjKJ5lao6cj0abxw8cH7l/mVNr/Nu+WsHVvHE11sthcsWX+L+7XL/ANoXkm5C/wAkabfv1QvLp9y/PsX5tu2q+rmPtub4jc8N3/nN8k22Vfm87dtr0Lw3q20JeWyKjN9/a+7c396vFdAvkvJV3/IzP95a9G8LXkcMm9LmTcqbUVfutUylLlPa5Y/ZPXdNvoJLdJkmxNvb7vzfKv8Ae/u102g3iKySJu/haVY/m215roerPCyPbTN5jbV27vvf3q7DQdY09Llkjm/e71Xyf4vmrmlKrGI+U9N8P3kMkiO53fLu2r/FW7p8kNuqTI7B/NZ9sP8AF/vVxnhfVEVQ7uqNHuZP727+7XU2sk21XdN25lZNvy/99V52IxH2Tqp04yjHlNS8/eIHmST94+7csVZetNc3Mb7HjhibcvmSf7P92r81/wDZ4mmhdn2/M+77tZOoQwXG25SGQ/P8kO/7teRPERp/ZPQo4X4XGJzmrW6CZC80kT/di3fdkrnNW0n7K32a5Tzl3s8Xz7lX/arttSW2uI186Ft0jbVWN/u1gaxYpbwCFEZF+98vzK1Y/XPaRtc7f7N5o8x5Z4s0/wA6N/k37dzN5ny/NXmXiC1ma8dPO/g+evZPEmiu0ZkmTbL/ABqv3a848QaDMscySIy7U2v8v8NevhMRyzj7x5OKwMtzzbXN8DbHTcy/NuWuZ1i+/ct+/bGz7qpXba9pqLan7yuvyu1cL4is33M6fJ5nzV7lGp7/ACnhVKfLLQ+t/g5M7fsbxzE8jw5qP6NPXzn4LuHZk3zMDv8AkZv7tfRPwZVl/YwjWTk/8I3qOf8AvqevnDwbavJIkL7sL/47X7T4mK+UZH/2DR/9JgfV8XK+Cy//AK9L8onovh+6haYQu8jJ/A23+Kut0lkW1/fIu/c3mqz7vl/3a5bw3ZpJMqIq4rr9NjtoZFTDSmZdrMv+zX4zWj7x8jGVjSs1+yx70Thv4d33alWN2kZ/J+6/mI3+zRDawtc/Pcq7xp86r/6DWlY/vI1mdP8AVozbv4v92vPqc0TriVdszW8n7najPuikkfd97+GvKv2nNQfR/gz4lmSZUT7EqIv+823bXr+pSQTWCbHkQt8yx7N1fP8A+29qkP8AwrtvB+murI1wtxesq/N5m75Vb/0KscLGftb/AGS6ko8p8c2ts+oXXzx16b8O/hskdv8A2lfosQX7nmL96rPwp+Ef9pSf2xqSYgjfd/vVsfFr4haP4djOiaJcruhWvaj7vvHDLmloYPjDxRZ6DZvDCMbfl+X+KvLtV1i51O586SZsfw0/Wtdn1q486Zv+A1TjjeWTatH95m0Y8sRA27mlVNxwtaGneG7i8RrmUNFDH/rZGX7tLcLa225LBPN/2mo98Ob+UzmVwv3KvaNr2q6PKv2a5YJv3eX/AAtTFjUE/aZsf7NT2MkNq29LbNKQuc2JPiB4tkj/ANG2xjbt+WKoY/EHjCST7S+q3CfwsqtTH1CTyRDsX5vm2rUtja3OoTrDIjEyfcpxjzGXNyxOh8F3H/CQLcaf4ks47kMnySMvzf8AfVc94w8DxafC2q6RIrx5+eFfvR11ENna+HdP+zWzs1zIn72T+6v92orfTWmt/wDSXWGGT77NTFGU+c8yTr+FOZc8irWtWq6fqk1sj5Ct8jVU8z2rM6h1FFFXyoAoLbeaRjgcUn3/AGxS+0A6iiijlAKKRjgcUK26iIC0UUVQBS/wfjQGK06P5uBQBs+DdHu9Z1mPT7OzeWWZ1ijRU3FpG+VV2/71ftV4F/4J3+BvhT8B/BeiXnj+1stStdDt5/EelzWSvI11N80nzfe3Krba+Lf+CBP7H9h+1f8At6eGPDviSzkm0Tw3FJ4l1lVi3L5dr80as38O6TbX7GftOfA/wR8bNcuLl7mTRdRsb/fLdWfyrdL/AA7l/wBmt6NOXJzI+czKvzVeQ+Nvjh+zL8KNDtYJ9H8yRI0/dfKqrIrfxNXlX/CpdE0+3lhh1JoYvvrDGi/99LXrfxw+E/jbQfEt14bm8QzSwqi/Zdz/ACsq/wAVeUa94V8VaOxSa83n/lky/wDs1eXWlVcruJy0Y04x0K3gjRLPTPH9qYZixEUm3Emc/Iai+OFkLjxNav5W4/YAuP8AgbUvw90i/tPG1pPezksEk3Koyv3D3qf4z6bNf69a7GYKtqudrYx87c1+uYRSl4AY5f8AUWvypGp57Do/zJN5zSBVrzn9qu6Sz+D2ozIP9c8cDbf4dzV7JZ6LNHNvdFG35XZl+aSvFv27o4dL+F+n2aO2+81mON1ZPuqvzV+EYelz4qJthOapVPGbXUP7NsYFs03pDEqv/vbap614m2/6h8Lt+bc9Y9xqD/ZfOSZv9nbWbJcvNIru+6vqua8D1Ix5ZNl3UteeZV/iP/oVYeoaxcySMiOyt/6DT7q4eBf9YrLWbeXCMv8As1jKXc0jsZeoSGS6+/uqKiT/AFx+lFaHTH4QooorMZ+kH7PP/KN1P+xL1j/0K6r836/SD9nn/lG6n/Yl6x/6FdV+b9ftfit/yJcg/wCwWP8A6TA4cJ8dT1Ciiivxg7gpVXc2z86bt+bNDLupR2AWkZc8GlpCd33KYC0UcAUUAHBFFFIq7aAFoop0XQ/Sp5gE2utJRS7RxUk8yBPvCrunrukGU3D+6tVkV+Plx81amhWvmXsSf8CoJP02/wCCAPgN4bP4pfFqaHZtgsdGtZlT+83mSL/3ztr781K4hW1eF32bX3bVr58/4JBeAZPh7+wPot/qttHFceMtcvNWuFZPm8lW8uJm/wCArXvF9N8zIm11/ut8qqv+9Xj1qjlVlEr2P2ipIsDEW0a/NGjN/ebb/tVTmZ/mTyYWeFG8pmX5lp02oom+FIdv8PzN/C1V7q6ka4MPk8bf9Yr/AC1P2TeMfeKGpW/nbP3zfu/71ZWoWaTKfveZ97zP4q244/OuFR4coqbnbf8A53VBdW8zQyTTfw/xRtu21nP2h20eXVo4rWtNn3O8KNv3/LHt+XbWPcaGvkul5tz8uyNlruJrNbpnRz/B91U+9/wKsjU9M3Wr/I27ay7Vf722r5eZeZrLb3jgtc8P+Swv97Db9yNfmrIvrfbGJkhkTsrL8u1a7W6t91v9pghXzVT51k+VlrCurSGS18lHVn3/AN37y0f3pGManL7pyTabbWcjbIWU/eVmrSEAtfDckLsJMWzkkchsgn+tTXlnbW1x/pKK0km75mXau2mFEXQXRWJX7O205xwQcc/Sv1/wPSjxBmLX/QJU/wDSqZ9fwo08TW/wP80eP6to6NbvZzRM6zf61f4d3+zXxX+1F4Rs/C/i50sLZk86VvN3fdr7zks5lmltvOVlbc27+Kvl/wDbS8FfbLdNehhwGdvu/wCzX43ga0frNuY+azCj7TC866HyzjZIJuOPm+arVuyM3nPJz/s1DJsjkZHTd/vUscaRtw9fRR2PnV8I+4bco2f+PU6GDaocbWbZUW794EdN1aAhRV3pDllT5FqeXsIYsfl7UT5lqVrhId3ko3/fVKtv5m5If++qRbcx/wCyNnzL/FQKUftFSRTN87jeW/vU+GF1+TZ/F/DVq3hTaqI/8e75kqaOHMjb0Xb/AHquOxMvM0fDlvuugiOzru/hr13wbeJp6hHRSsfzfNXmXhGFGukT5VP8FepaT4fudQjRzD5QVPvf89KvlhIUvdgeg+F/HWmnT3TyVRvuouz5q1F1ZNSVPs1rv2/Ku6vO9N8O38d8qGaT+Jm3V1+m3Ft4f0nfdXKs+z5VZvm3Ue7E55e8eKftdeG3VrfW0h27fldv71eGV9G/Hi4m8QfD+91KbbvjZW2/7NfOVKR3UZe4IwyOKWm5+7Tl+/8AlSNgpf4PxpvCr9KWgApyLtGKbUi/eCPSlsTLcsQp5kmE2/7y103h+12tvf5f96sDS4ftEjJs2LXSTSf2fpst191fK21BlLnOe8XXz3WqPDvXEPy/LWTSySeZKZH/AIvmojj81tnrWhtH3YmhoVr5lwH+b6V0sDeZu+fB/iVazdPh+zWq7E5b+Kp4ZHT92nCt9xqXwnPKRbbZIvkdNvzbaqLMkbHzH+b7qLV+P97HvR+dv/fVU5oUti00yKxZ/k3fw0c32SY/CXrO4eTYPOwy/Ltr72+M4P8AwwiwVhx4T0zBb629fnzHqLxyZfa3+zX6B/GNz/wwX5mCT/wiGlnp3/0ev1/ww/5E+ef9g0v/AEmZ9pwp/uWPf/Tp/lI+EJIfOc7/AOL+Jvu1UVvvI7tlfu1o3UHmKiPDtDL/AOPVWkh8uTzkRTt/u1+QR2Pih1oqxMUT5T/FWlZzWytsebcW/wDHay2aGQbNnzN/d+9uqeONI5hv+/IlEtgNONi0eyFMj+Kk8yHy9/RFqrHJ+8+TduX5amEM3yJDyjfeanHmDkLFvIi/xq39yjy9w853Zf8AeqBYHjff53Ozd/u1ZjuIW3v5e5W+/T5gMDXm8yR1PCx/+PVw998t0/b5q7XXJjJcSw7GVV/ib+H/AGa43U+bpuMfWpl8WhrTjyn0x/wTC+Kt18PPj3ZNE0ZS6XY/mf3v4a/oJ/4JLfFm/b9o3WvDd/Nutdc0tVXd8q+Yv8VfzHfAHxs/gD4k6V4kVGcWl/DK6r/d3Lur+h79gnXtN1CbRPjHoOpXUdvZ3Czu0e3/AFLL/F/7LXRTqRjGSZ5GZ1pYaqp/ZPq7/grP8B4fjJ8C9Z0q003znuLCSB2V/lk3Lt2tX8p/j/wnqvw98bax4E15GS70fUZrO4Xbt+ZW/wDQa/sNuNc0r4nfD3UtGmmW5+0WEn2K8ki/ds235Wr+U3/go98MfE/w5/aw8VXPiRGE2rapNdbvK2fxba9CvaphYzj0OTBuEMT/AIjwncF++/y/w/xULM5ZUd9oaoVkeRt4jwF+5T1VJG2F2+X+9Xl8x7XwlppH3Nv+5/dr65/4Jhn/AIknjBf7tzZD/wAdmr5B3Oy/O+1v9mvr/wD4JjBho3jAnvc2WP8Avmav0fwj/wCS8wvpU/8ATcz6jgxW4jov/F/6RI8K/aFkd/j14vTZwviG6+b/ALaGuYhuk2tJNwyptSun/aGL/wDC9vGEQ/j8R3Q/8iGuVt43ZdkKfKtfHZ7/AMjvFf8AXyf/AKUz57M/ezCt/il+bL0M27B/vf3vvVI1xMuVT7jffaoFd1Vn8lS1I0yKoR32mT5f+BV5RxRiXvMdlR0+UfxrvqzDN8xm8lXl+XYtVF+aRX8nPy/dWrkJ8tvJh2nd81TI2pxhGZcw6/6SkLNtT/V76uRs7Qr5gZd3/fVVFRNu9H2t/tVcFvuX/WY3J96uf3YnoU48pat5oY1XyNyfI2/d825qtQpCu1IUw7fw7arWscPkjhm/3atWak4KTNt+7tb+GseaZ2x5uXmRP9nf7OcP935fmpV2Qr8n3vvbaSDf86J/vf71OY3PmB4YW3Knz/PXQc9YVmk8zzng+Rvlfa9TW8iRzK/2n5I0bcuymNC0IfznwG+43+zUkEky7Rv37fmfd92toxPExHx8shsk1s1uZvmXd/EqVQuoyJF2eYq7/uird1HuZdm1U/2fl+b+KqepSTbdnnb/APgH3avl5Tm5vsnPafevJMiQ8FW/1jV23h/XE8lkd9jL825f4a8u02abzF+dv95a6CzvgzB9+4158ZfzH0R7N4b8UeXHFvuV+Zf4fvf71dx4X1u2WcOkqpuX591eB+H/ABQlqu+YqpVPkZa6jRfHASZvMm3fPudW/iWolzSjoVHl+0fSXh3xNDJHGiPHsVVbds+81ddpWseZBKiJHs/56fxLXzjofxAbzBNNMzpu3RQ/3WrqtK+IlzJMjw/Id/zs38S14uKjPm5kexg4xPaZPED2zLDpupRsrNtljb5mZf71Nm1aGa9DpDDlbfDbXb7v/wAVXndr4yeScv5yn+Hcv3m/2q0rPWnvo1fZsVW+fd8vzfw14lapy+9I+nwuHv8ACdW115qx3LvsK2/zq38P+9Ve4jcK+z96qt86slVLNk8s2yQswk+/N/eqz50z/Om5VV/lXf8AeWuD20paQPTjh6X2jntctdyp+6+Vvlf5vu/7VcLrGl3MzSpbOp8ttu7/AOKr0XULX7RE0bwsgkfcn+9XMalY2saumxlddyyqsXzbq9PBS9vM8TMKMactjx3xho/l7vJ2qfNbzd33a898Sab5TPEX3BX+Rtle8a5ocLK7ujOm3b8y/wDj1ee+KPDbrumRFLLuVGb+7/dr6/C+9HU+IxUZc/wntHwstXh/ZB+yuo3Dw7qIIH1nr52+H9rDJfKl4/l/JsXd/u19M/Dm3Mf7MYt2AH/EkvgQT0yZa8C8G6XMs2/tvXYtfuniW7ZNkn/YNH/0mB7nGLf1LL7f8+l+UTsvDum+T5bP5Y2/Krf3q7Xw7Z2zWZSNJDufbukT7y1j+H4XaSOGEfO33a7LQ9NeO6Z3RSZF3f7tfjMj4uPu6Faz0+ETNsfcJN2+rEMKRwzQ2z+bM21UZn2rtrSuLN2meF4V2fd3L92orC1ht9014uyH7zSMm7btrjq04y0Z2U5cseYk8RWb+E/Ccvjm5RWWH91b7l+9J/u18q/Ha8m1XQ7m/vE+0brhXlbb8zfN/FX0N+1Jqzt/Ynh2N5vsn2fz/LVtqyfL8rV88/FCF5PBt8lh9/yv3Sr8zVth6cIxM/bSqS0+E8w8TfEKbQ/DZtdKfZuXbtjryG8tda1u9+1TCSRpG/irqm17TftEUOp8ruXzVb/0GvY/hd4+/Zp0eEP4q8K3F5L5W1FjZV2/7taR5VK8hy9rH4D5/wBN+HPiHULhE+xvhm27tlddL8ONF+H1n9v8eSeTNs/0ezX5pJG/vN/dr174hftIfD3SbO4sPg98PbW2m8rbb3l187r/ALq/3q+bvFF14h8QatLqus3M000j7mkmatfaR2gKn7WWtQl1zxN/bV4ttDttrb+COH7tQMqbWS2/76rJWN9x+RhRHJcq2yN2qNfiNuX+UvJZlW+d1ct/eqwtvDCux32t96qEM0zN8/y7fldqvW7PMy/e2/3m/iqjOXulu3td0Y8zrXcfDXQ4Lq4abZ86xMyLt/irkbE2y/O78t/DXf8Age6/s2MO/wAiM3zs1KK5SeX2m4y88P21jm81KZlTzd27dXDazrU2u6t9gs5pPJjf5F/hWuh+JniIaxeyab4c3GST7+1/lVawrfwvqXhvw9N4hubZi4X5G/u0x/DI5vxQ0J1hvI/hVQ/+9WbvX1p80jyuzzNlmfczVEy7aDoQ+iims2G49KBjqKKKXxAIq45NLRRR8QBRRRRyoBFXbS0qru5TpQy7TimAY249f4qlt4/3io/FRLwu+rOnIJJgnks5Zv4aXMiJS5UftF/waW6JeeHfi18RfGGzamqeDZrJmZFZfLh2yfe/vbmr76+OUk2jeLnvHdvKml2QMqbVVq+PP+Dea0s/g58OvGOoaq7Qy2/h+3tftEa/euriTzGj/wC/arX1d8aviJ4eutLXUrq8hfyWZoo2dV3V2U6sPZHyWKcqlY+WP2uPFmm2vjCz+23W77VB+9jh/wBn+KvFtQ8ZWF9uhO1Sqfwv/D/u1e/aY+JWleNfHxtobCREt4ttvMqbl+Zvm2tXlmqeIobXc8L/ACqrbG2fM1eLWxHNPlN4e6jsvCd79p8XQBJlK5l4Axn5TT/idOsWt2wKZLW4CnH+0a5X4TXsl14+thKQCEkxj+IbGrc+MG9vEVoI5FTZaZZm92YD9RX69gqkl9H/ABjf/QWvypBF8xl2947TN8/mlX3bm/8AQa+bf+CgGqRyr4Y0dbmRna8mnlhZ9y/d+Vq9xm1pLW1W5mbfKvyytH/F/tV8sftha1c6t8QtKtJm+W3tZGi+fd8rN96vxHCyhLERR2YOP708xuptyhPu1VuLpFxAn3vvfLRcSOqtvfj+7uqtdS+XGHSGvbl7x6luWVitqF5uXf2rPvJtu3f92rV1Nt++/wArf3apNvaPe6bl2VXulRKv/LWik2/Nmlo+E3CiiijlA/SD9nn/AJRup/2Jesf+hXVfm/X6Qfs8/wDKN1P+xL1j/wBCuq/N1G7H8K/afFZXyXIP+wWP/pMDhwnx1PUdRQw3daK/FDuCimqMrinFd3FABRRRV8qAKKC23mkVdtMBeCKXadu6hW29qAGOMrml8JMgY7jSUm75sUtMoKXcdu2hhtXFC4b5O9Zkx3JLdXDL/tVv+HLCbUb6Kws+biaVYItv8TM23/2asGNEUAua+hP+Ccvwgs/jP+1p4F8IahCz2f8Abcd7qP7rcvk2/wC8b/0FaitUjTpyl2FGMqlWMUftB8LPDKfCn4K+CfhpDbLEmg+FLOzlb7vzeXub5f8AearGrao8m5ESMxfeXa235aZ4s1ya61y5nv8Ac4kut0S71/1bfd21zepXG75Em3H+H+9/wKvlI1faT5n1PWq0fZqxY+1TSTK7vG6r8rL/AHv7tC3xe4TyZmX+F12fK1ZbTJdQt5txk72+ValtZPKuFR9vyr8kn96vQjLml7xzxp8psxs8lps2YfZtiqOZfs6ukn+ysvz/ACtUUN00y/67c6/xL/DUUc1sqrv5dnbey/MrUc0fiOjl/lHNC/kult+7WT5m/vKtZVxax3VvK/3fkbYzVof6M2XgTcV+XduqrfQwrvnTb9751/u1VPYVSXKc1eWexm3zbmZ1bayf6vbXPajpqPm2T91tl3eZ/tV1OrfKuxAzs33Fb5WZf9muY1SN4VZz5mJG3RNu3L/wKum3LHU4+b3vdMO+LtL0YozbU8z/ANlqs8Z/s+SJ4wuY2BUdB14q/fX1tJI1tNcqrRxfJ8+2qJIFi5WTOEb5j+NfrvgnCEc7zCy1+q1P/SoH23CElLE1tf8Al2/zRxDQ7bib7NZqh835/wDppXln7Tngv/hIvBNzctCryRqz/wC6teyzKtxdBP4Vi3blTduaub8SaDYa1o89teJJ/pETJLGybvL+WvwGMpRqxcTzJU41sLyH5ma1pP8AZuoTWbx7vLb726qGHVjv+Xb/ALFegfHTwj/wi/iq5h+7ulb5dteeyN+89q+zpfvIRZ8hKPJPlZF5iLGe/wA33qt2uqBCUf5l2VR2hW+T71OVZNy7Dz96nylG5b3yNj58f3Vp7SR+cz7Mlqxo45t3lp/vbqtWt1tk2TOqlv4qqJlyovbZuyZ/2qmWR2Vdgz/C6tUEVx91P7z1PD/pDeWtUHuG14ZvktbpXdMbf4a9k8H+NIVhS2ePLRpuVVrwuzjeGZHM25v9l66bRNUurVvMfdj/AGmpR934jOpHm+E9d1bxheXV150MKqq/NtX+9WPdapqOrXBmmdid21I/7tc3H4301secW3t/Ev3Vrb0L4g+G4VSaZFkdX+dvu0+axPw/ZJPiJpd5cfDe9sPIYLJB95k+avmaWNo5WRhyvDV9jTeLPD3izwq+lWd1Gr7Gby2/vV8n+ONGl0XxNd2Uibdtw22g2oe7LlMeiiig6QopGOBxS0AKn3hUsKuxb/dqJRuNTwr90VmRLc1dBgeRlTZ/uVP4vvHhtEsy/Lffq14ZjRlVJk27v4mrn/El4L3VJJE4VflWq90iPvFGtDQ7dJJWebd/sbapRQPM42c1p6aqJMLZ+mfvVRVSX2TV/wBav31X/wBmqu37xt/k/df5KvrDHJG1Zl1vt5tnzfN/DS5oxiY+zNfS5kklG9MBvlqe8t/MjCPtcqn8P8NZek3yeds+6W/vVrwsk0Z+fYd3zNRHYqUTFuoXjuVdPut99a/QT4yyFP2AVkB5/wCEP0nn/wABq+D7q1RszI27/ar7t+O+6L/gn3J6r4Q0r+dtX694YO+U55/2DS/9JmfYcJv/AGPHr/p0/wAmfDdrfPIoSZP/ALKpWjSTbcom3a27bWHZ322b+Iqvzbm+7W1FeQSRkQzZLLX5DzcsT4zl5ZleGN4WPnJ96rS7WjDpub/e/hpVt3ZmTeq7v/HabHHPCzb/AJt3yoq/w0fEHLyj44ZIPn61ZhWaSPbs2/J8lQpHcsE2dP7tTRs63P32/wCBU5e6RERm8tvJ27mZPmajzNsbDG35PmZadNHM0iu83H8LMtRXEyeS6Tvhv71KS5g+EwdY2bpX35Lf3a5S7YSNsT/x6um1aZ41O9Np2ferl7hjIx3n+Kj/AAm1Pcn0a4+yX6TbsfNX7L/8Ek/2jE1r4A/8Il/as00iq1vdNH8rfL8y1+Lu5lO9etfaf/BJH40P4Y+KqeD7yRtmobVijVvvSf8A7Nc9enOpSlynnZ5hnXwM0j93f2c/2nIfB9mnhLx/rW3TpGVF/vQ7v4t1flv/AMF+vhDpWofErVPiL4PuY7m2s71ZUa1+ZWt5vl3V9k+NNHmttmq6PMxh2xs/z/Kzf/FV4R+1X4Bf4neHdS0G7f5NY0aRJZJt3ysvzLt/2t1edk2ZV6K+qVz4PJM0q/WFQqfZPx2kVFdvkZX/ALrU+Nk279jbm/hqbWNL1DSdSutKvE2y2c7QOv8AtK22ooY3/ubW2f8AfNe7KPQ/Q4y5o3LEapIzBOd1fYH/AATLVRo3i9lGM3Nl/wCgzV8iRqiqd4ytfXv/AATQTZo/i4KwK/aLLaR/uzV+keEqkuPcL6VP/Tcz63g124jor/F/6Szwr9oREPx58WsIW3f8JFdfN/20Ncqruql9nzf3Vrrf2g02/HjxUSvH/CRXTf8AkQ1y/l/N5ao3zPuRq+Ozu39uYpf9PJ/+lM+bzPm/tCt/il+bEhk2rvTd8zfdp7N5279xt2/xNU0UE3yps2/xfL/dohgPzP8ANhv738NeV9g4/thDE7TMd+1fvPWhZq8knyIu1fuVWjt3j++6/wB7dVm2ZCwT74ao+yaxj7+hoRnyY9nys392rlmz7hv2tt+bczVRtm3SfIm1v4GarUNvtkCXO5mX77LXPKPMenSiXY1Rt8ny7W+6qtViz+aHzH3I235lb5t1VYY3WRZoYf49taNuzs2/f8v3flT71R7stjsUuYkt0RvKmcfJs+6v8VSJavIxcXjOsKfOuz7tLHC6xo6fOf4dvzbf9mp4ftKr5zn5G/iV/vf71MwqR5veIpLXzJPkTcu7+L5adHbw/Nbfd/ubamkj8lmd02bfvfxUNb7pN6Ozf3G2bflropy+yeNiqc+a5VlRVjXcfl3Ns3VRvFRYWfYvzVpXSo2zzNrfN91X/iqheQpbyfu9u2T+Gteb3fdOWMfe9483jk2t53nNuZ/lWrVnceZGzzIw8tdu7f8AerN+0TSSI/ULVu1k3R/I7Lt/iavLj8J7cDZs7942CbF2qv8AC33f96r1neeSyzJM27+8r1gWd0/nNbOn+sXdWjaNtwkMfH3dv8VL7HKb05cx2Gk+JHk+RJpCy/drtvDupagVXZIuJPl2t/DXm/h9X85/Mh3f8D212vhu68tt8FzGjq6/eWvKxkf5T6DA+9a56N4dupvM2TXLLtTbF/d3V3Hh2J7yQv8AbN8qrtddm5WrzzQ7pFUeWi7mlX7RJIm5v+A16H4UmT90j7lfd8nlrXzWIp8sZcx9Zg5fCjsdPheSNHm4Vl+RVT5d1aCwokiw7GU/eT/ZWovDtu8ccXnD5l+bb/eroY4Zl+fKurfN5a/w15sOaM7HrcseXmObvrFIIlmtkVZF3fvJHrA1C38u3E15bfvmdmVlbd/FXY6pp0LM38C7d33N22sfWLWFYRD9mUuq/eVfvLXtYX93seHmC5tbHCa1b7reZNi+az7fvfK1cT4i0O2mhZPJVPM+7t+bbXpfibSUtZNk0Pzt/CzbfLrmr6xhW3eG2jYyt9z+Kvq8JKPLzI+Hx0eaXvHWeDbZIfgObZwNv9lXYOemCZK8b0nSU8zZ5Klm+ZG/h2/w17l4bt1X4Sm1miCD+zrhXXsPv5ryzTNPSGIPCih2/i/h21+8+JbTybJE/wDoGj/6TA6uNLrC5e1/z6X5RNzw/pbIsdy6LFu/8d/2q63TbObc3yW+9UVd275mrnNLaGHykdN275d0f/s1dHpMiXDBLZ2bd/49/u1+Oy/unxMXyllYxeKZssiL8sqr8u3bXJeK/HGm6xcXOm6NNI0FjtWWOOX/AFjfxf8AAqPi38RoPAfh+Y2dzCb+4iZIoV/5Z/L96vO/hGZodH/tW88wtqkrPEzfLu/vNWEufnOipL3Trvj1O+seE/B3jmGZjp+tadvi+0LuaNl3Lt/2du2vCPFGpJ5MlhMnzN81er+LtSe8+C9/4G1PUma58G6tNPp27/lpbyfNtX/ZrwyS6e6U6leJuVk+Vfu7qJfETTly+6zwf4j6Fc6X4onhSNtn3kb/AHq5zzrhR99gK9X8dXmm6lqmblNzfd/3VrmtQ8BwszTWsy+Uy/eD1fLM6+Y5O31a8t2DpMwrptB+ImkC3Fh4k0fzkb780Z+asi88I3NuzbHyn8LVQm0ieKTy/MUk1oP3ep3LXHw11lv9GuFt2b+Gaib4f6VJH52m6layqz/djlrgJLaaNv8AVtt/vVNCupIuYJW/4C9HNL4ZC9mdPefD+/hdvJhU/wDA6rN4XvIVR3TarJ/frGTWtXtFBS8k3f7TU0a3qTf8vLf3vmalze6HL/MdFa2cNuyPNcqrK3zr96tqGR7zZD5jOFX+/trirXVn8xZJpuV/vVv+HfE0K6pG7/Ntf5lb+Kjm5hcvuHoOj6Homh2f2m88tJWVW8vZWV4k8TG8Y6b9jj+zN9+P+HbVu8l03XLhZodWjSST5dsj7aIfDum2sJmv7nzP7qq25v8AdojymSl/dOOl8KeGdVsZFgdoLn70S/w1xF3aTWd08EowyttNeu61oNna2ralZvHEf4l3/Mq1wHjaSw1K8a7sCvmR8S7f4qJfEbU5HOAE9Kcq7aFXbQzY4FL4TYWgNu5oYbutIq7akBaKRjgcUtXHYAoopdjelMA/g/GlaR2WlWPb99PmpfLf7j9aXKjMYoI+VDx3rtvgl4bh1zxnbSXO1orX9/KrDcrKv8NcfBD8wXNfQn7NXw5tlsV17UIZEEj7tzL8rL/drGtUjRhqc+KrezpSPsT4K/tTeMPgp8H7jwX4SSFJ9a1ePUri6b70e2Py1j/4DVKT40fFH4hak954q8W3XlRsyxQtP+72t95tteZWlvNfXCb9uyN2Tds+bb/dWuhhjMNq6JNHskdfNbZ8y14ft6tSR89GpKT94t+INavJr4F7ld6t96Nv4W/9mpVDzfJMm1t38TVWhhsLaZEuRHMytuSNl3bv96pri6kmuGS2s1iT70rNUSlyxL5TrPhOnk+NrOPaD8k2GK4P3DWj8apnXxHbwRKNzaf95ug+dqzfhUoHjqyO5WzDJtO7LY2Gr/xyuDF4js0W3Z/9Dy2JdoI3txX7dlf/ACj3jeb/AKC1+VIPhjocfJpthYyNc397uYJ86qny18k/tYapZ6h8cJrW2+VLPTo4k/8AQq+n9e1D7Pbb5n/hZU+f+H/4qvjn4uXqap8U9Zu4Yfl81URWbcy7Vr8Xy2PNX5jtwP8AF5jAl+b5KhvIXaFkRMLv3f71XI7dCux9oP8AeqvfXibWRP4fvf7Ve/yo9P8AxGRNb7c1HI22H50w1PupH3NDs3f7v8NU5pnk3I75qYxNIkH8W+iiiqlsbBSL8u40tFKIH6Qfs8/8o3U/7EvWP/Qrqvzfr9IP2ef+Ubqf9iXrH/oV1X5v1+0+K3/IlyD/ALBY/wDpMDhwnx1PUIztOXGRQ43nJpvXbTq/F47HcG7c2+ikVdtLTAKKKKzAR/umnK3bZndTX+6aFXHAoAcVY9qP4/xoY5bikKbuDxitCNmFFFL91PrWZYbG9KGb5VoVttG3a3I+Wq+EB9vvZxF8vzfxV+kH/BDX4O3lv4l8V/H54Gb+xdLj0nTm3/KtxcfNJ/5DWvzp0S0lvNQiVOBvX5ttfth/wTn+E/8AwpT9jfw3Z6lCsWoeIribW9WVdysvmfLEv/fK/wDj1eRnGI9jhH5npZPhfrOM9D2i+iSTe8zqvmJ8vy7q564VFbek0b/wsy/w/wC9WjqFwkrMjo0bK38T/My1l/aobVmfepLP83y18jha04e9I+ixWFK3lo0ium3K/wAS/LuqWOGDKb/m8v8AhptxMjYh3xosn3I9/wA1Ekc23y9igfdXb81evQlzazPFqU+WZFJqU0V0qTcIqMqMvy/99VHcalOWhdHYKr/Ky/7vzLTbqRvu/Y9vlptfc/8ArP8AarMmZLaTZbTKBvZZWb+Gu2jHm91HPL92bK6h51u00M21v7rfe/2qguNWhmV2srnczJ95vlrJtr2G1mebC7PurJJ95qikvN8b3EyMPL/5ZtXdTpzicUqnMTXEyTKby2TfIyfPu/h/2qwNTXzpnR7yPa3y/wCzVu9voZIVkdGiVUVtv8SrWNqV15as7w+Zt+Z2+7t/+KrflMoyKF02mx75kRS391lVmZf726q0eZdIbY28tE2CR1JzVHWrx5IHhhmt2RX2fu/4atWEijQvNReBE5APtmv1vwVhFZ1mHL/0C1P/AEqB9twfK+KrP/p3L80YDTTRwxrsZlVGVFj/AIW/2qxtckuLyGR9jJIvzMsfyrt21cmaG43pM7I7MrLtes7WryaOxmyVZ9jKys/zV+A+zlGem55NKtFQPjL9qyxhvtWluUfL+a33a8EmaRZG2c19EfG7R/t2qXaIkbM27ZXgeqWr29w8KH5l+X/Zr6ihHlpRPm6kuarLmM5bd3z/AHv9mrcMChc7GojVmb9y6/Kn3aGmeNtnzf7u6to+8TLm5hsjeSqpDTIVQyF/4qV1dm5p3lPtH8IpBIsQs7Kp2fN/erSs5DC2/ZuaqFu3SP7wZdtW4WSNU3/+O1UiTSsZE8xYXdd33t1dJo+n/wBqYh8ln3cKq1ws2oeQ2U5K/wAVdH4K8bPpd0iO+4fd21PxBL3Tb1D4d62rb7aGRU/grJvvC2vab99G/vLuSvXtH+IlneadDD+53fxbv4qW48TaVds/nabC6q/z7YqKcomMubmPIdL1zWdHul4YfP8AK392tTxxodt470p9VgRVvIV+bd/y0rubzw34M8SSBLbbaT/e2yVb0v4S3Nq2+0v43j/uxtTj8IlLld+U+XpoJreZ45kwV+VqZnJ612/x08G/8Ij4sZIn3JcJv/3Wrh1XbWnKdsZc0RQ27mikVdtLSKFVX3Vf0/5mEOzP8W6qEbDdnexrd8NQxzSMmzlv4amRlM1ZvJ0/RXmLsr+V8rVxrM7MXfq1dH43uBbwx6bF/F80tc/b2/mZfY2F/u0+ZFR90uaX5MJCTYNXJIyJt43bfvfLWXHHNHMuz738O6tKKRmjG/70dHvmcpRNfTZHaHzmh+X5dtQ6pbuqibY25vut/dqzpsyCLe/z/wAPy1FqkbqvD7xt/v8A3aI/3jP4fhMmS4fbnfyv8VbmkyOzfI+Rt+7WE3yts7/xVoabePbrscUw5u5u3EMzKP7lfcnx4Tzf2AJY1bGfCOl4I+tvXwzZz+fC3z79yfxV92fGdFk/YLKBcg+EdLwP/Aev1zwud8pzv/sGl/6TM+z4V0weP/69P8pH55tG8bfK/H92rGlyfZ5EfDLt/wBqrF9a/MxRFX+7uqlI3lrv2NivyM+LjLmN9bxJI/kjbc38VSrJNNJsT/vmsSxurlWZ/lxWnZ3+66SEv87fM9A5fEX2WONw7o26mf6tfMfduqe6kmjVU37hsqHb5g+/8mzc0jPQOQ/c8ny+c2GT5VaqepL5MZLpu/2t9WZWeM/PD5x+6rf7NZ2qt5a+c52qz/dpx5+UmXLI57WLwtv4Ynf96sNvmYvWxrVx95E/i/hrGpG9PYK7/wDZv8ay+CfizomvB1RYb+Nvm+796uAq1pN09nfxzI+Nrbt1AVI88LH9I/wh8P6l8Qv2WdJ+MGm20NxYNKsEskfytGzL8skleaeMvAt/dabd6zoFy0otWZ/MjbdXD/8ABGP9paH4ofst6j8B/FuvNCjQSQXC/wAW5V/dN/s/erQ+HPxI1X4P/EK8+EXxIvPtFvHcNAl5JF8qx/7VKrk0cTR+s4de9H4j8rzfBU8vzRTjpc/Kr9sfwOngv9obxCiWzQ299dfaLWNn3N8y/M3/AH1ury+H/VHen++1fdn/AAVo+Dtm0cPxL0HTY9kN1Iktxu+aaNvustfDNrCjf6N8oVf7zVvJNwiz7nLsRDFYWLH6evmN5bpsZfu7q+v/APgmmjx6L4tV8Em4sjke6zV8kWquzNsO5t33m/u19df8E2Tu0fxa3rc2fH/AZq/RvCX/AJL3C+lT/wBNzPtuClbiSj/29/6RI8Q+Puz/AIXj4udl3ga/c5T/ALaGues7W28tZndlMn3K6347Wu743+KF2L83iG5bcf8Aroa5lYUVgmzmT5V/vV8dnvL/AG3iv+vk/wD0pnz+ZytjqzX88vzZFJFuVk6fPtZv4qfDG8ah9/zbPu7PvVI1m+7+Jtvy7qdItzHGqQo2Y/uNXly5Dh5ve1K0mRF9xgu75FqW1VFXL8fxf71KqpMrb933/m3Utvb7d0kL7t3y7aykbU5e9EuW7ozJ91P/AGWrluPMk+d2Ct8q1BZ2/wAo3ov+0tXrPfErO+5v/Hq5/dPVpyLdtHDG6QpM23b91qsiGG3xsk3Fm+dt9V7fzmwiSLtZdv8A9jVq1je4bfInG77tZ8vKdnN7vulm2berQxvsff8AMy/dqzFb+ZD+8TaqtVO3CQt86YVW3ffq1GUWFfOfPybmZf4qqP8AeOeQeWjTOk24r/Cu/wD9Bp6yPFtZ9zJs+6vzf99UkLQvJ5zx+Wnlfd/i3f7NSQySLC8O9Q7fcbfW9P3ZHjYzmlsVpPmKOm5h93b93bVC+mSNf4dsO75t1X5Fe52vMmfl+9/DuqnqUNmpZHdWRtu9fvKrV0nHGPL8R5Uv2m3/AIN2779WbX94uxw3/AabIqLIGj6VYhx5yuifK38P+1Xi8yPoIx5iza26FfMjjw7fKlaNqmZEfHy7vnqpDCkaq6SM21vvN/FV21hmkf5Pvf7VRLm+ydtGJs6KqW7KmMszfP8APXXaLJCtx5Pk/LtXczfwrXJ6Xa7sfZnzI332/u12Whx7I0y6su/5mrzcRKZ7uFj2O38P3Ft5kaI/Ej133hmaFpvs0ybfLb5Nz/NXnGiyQqyRyQsoX+9/E3+zXX6FI8zR3MN78y/wsu5m/wCBV4FT3uY+jw8pRPVvDdwY40hm+5G+1tv3q7W3jE8KXMzttk+WJa8z8P3SWsjW8KN53yyvul3K3y13fh+dJIUciP8Ady/Iv8S15vLGnL3T2Iy5oamm0LtbvMm3cq/3Kw9XtIZVR32n5fmkX5drV0Sf6n5EVir7tu75mrO1KztmWR96/Km5vk+Vfmr1MJ73unkYzlOLvtPtriTznSRvMRt6yfeWsTUNDtre1kSGTe3/ADzX5q6rUtPuZpGfzl+X+FnrEu2dGNnBDv8A4UZV2qu7/ar6bCRltFnxWYe9zOxpaZbKngk2rDj7JKDg+u6vOv7L+zq9ym0Q7ti7q9Mso3fww0ccQDGCQKue/wA2K4u40+aRdkyNG2zcyr/DX774lTtlGRf9g0f/AEmB18YQ5sFgP+vS/KJm6P50MSQwt80n3dvy1f1jxJbeGdMa/mudrqn7r/ab/ZqnfWv2PF4qM38TfJ92vKPil44fULoWENyxSNPlXf8Aw1+Pz+I+Epx98wfFniDUvGXjTfePubdti2/N8v8AFXex6lDoun6H9js1kSO/aK6VU/1e5fl/4DXGeCbWG3sTquxvNm/1Uezd8taOoeJLO10G60q5dkmuIN1qu/5lkX7rUpR5Y8pfNzfCHxU1qw0fxBBrdy7Q2mof6LqNuybo9u75Wb/0GvFvirqiaDeTW2muzWDfNZf7K/71d54q16z8UeG5X1iFsN+7aPd8zMv3q8Z8UagmszS6VNNJ/ovyru/i/u1n7ppGP8xyt1cTSSPc3LttZ/4arWfiLU9LVw/zQ/3m/ho1C6muJnt/ubf4Veq1vIkjPbTfP5ny1fLzGxsQ69DeQ75nU/3qq3C2cm14UVNtYOoWt5pswTe3ltSQ6xJ8qSfrSDl/lNCSFAweb/gG2qupahDBH5EMK/7bfxU2S6RkZ8sStUJ980hffndVy+EqPvfERySNI2+kyGHBWpI7V2Vn9qlW12rh0o5UXzRKx+4PrTo5HVg6NytTpa+YSWX/AIDUn2FF+4agRu+GfEkc0KWV583zfJu/hre+y6k0zPp82V+9XBw27rcfI+K7zwnqTxwoknzv/C1BlL+6QXi63dK9hczMEZP4lrNTwy9vvkfaVVf4l+9Xd6hqVh5av5G41SuLVLiT9ynzN/DVxjMiXus8jnjkinaKRMYblabXXeOPCr731K3++v3465Gj/EdUZcwUUUUe+UFFFFHMgClCljhaWLofpTmyozioJluKu9k+ROf4mpHV2P8AdajG1dvyt/srVizs5riZIURvm/8AQqrmROx0/wALfh5qXjzxFDpVnDuXer3Df3Vr6+8H+B007T4dBsvM8mPb8v3d1Zv7IPwt0HwP4Q/tjxPpM1xqGobZfMX7sK/wq1ewx+KPDEcM8KabHskXam5f/QWrxMVW9tK0Tw8RW9tU5bnLNoOsRKYXSOLa/wArf3f9qpbXw7DbyKb+WR3/AI/9n/vmtS81rSmkRHePYy7nX+Hbu+WmyXln5zbPlK7m3L/drzpfvI2OL3Y+6VLeGzhVvJhWU793zf8AoNQNcuN3nbd0n3vL+7Vm4VJmZzMyKvzbt/zNUDKnzOY127t21azlyx3kXGXue8dP8I5JpvH9tK+zYUl2gdR8jVc+P8xi8QWvlJukNiNo/wCBtVH4PFG8fW0kM8mDBIGjZePuGtH46zRQeKbKWcttWxyAvc72r96y3ll9HvGf9ha/KkF1Y4G38OJNG9/fuyBn+Ztv8VfEvjG8e88fa9eRupaTVptu1dv8VfaXjjxVZ+G/D9zf/bGxJEzvG0v+rbbXxHDsvNQurxPmWaeSRmb/AGmr8ey+MT0sD8LZA0jtu86b73/jtQyWPmR74d2f9pq01sxJGziHb/D9ynR6bMI9mF/4FXsnb8PKYjaTCy797fLWZqNktv8AMiY9q7D+x3klDuMLs3bW/irH8Uaa8No03b+GlKPKXTlzHN0UUVB0Cb19aWiitAP0g/Z5/wCUbqf9iXrH/oV1X5v1+kH7PP8AyjdT/sS9Y/8AQrqvzeZscCv2fxW/5EuQf9gsf/SYHDhPjqeotFFFfi/947gooopgFFFIzeiVmA5RuNCfeFNf7ppY8pQAqv8ALx+FJIibsRmlYfNj1pKqQCLv705/vGkop8qAKXc8h+akUN61JCuXAc/epe6Znr/7F/wUufjf8efDfw9SFtmralGkr/wrGrbpP/Ha/cXVobC12aboO2Gxt4o7eyhVfljjjXaq/wDjtfn7/wAEX/gz9jXWvjfqulRuLGBtO06SZNv7yT7zbv7yrX3tcSTRwmYwN/u/3a+Bz3FyqYzk+yj9A4cy/wBnhPbS+0Zd5dFf9GmdX3S7E3N826s+4uHjLQ2ybv4n3fdq1fTPCsyJCwT5WfcnzbqqTL5jOmyOXdt/3vu/drxvby5/7p6lbCxqcyZPZw/MP9Tv/wDQastH9li/f7RG3937y1nxslvMieS33dztv/iq4l1ugX7TDtT7zru+7XbTx/tDyamB5djPvvlaO58jcGfbtZvmZa5y8jtlhUuV3+a3y7/vbm+7urqdcuraSMzTP/HtXb/DXK6tdQq0mLzlfuLs/wDHq97A4jueLjsM47le6kh+z7+rRv8AKq/w/wCzVZtReONv9Zuk+/UF5qVszF3diknzI38LLWHfaw8dx5KO2N+1vl+Va9unUj/MeBUjOJfvtWCyeT5zI+z/AJ6/NWJq2rbiYQiq+7d/rdytWTNfP5zfafl3S7dq/d21l3195d2yfaV/6ZK3/wAVWr2MeYtaheHb5lsixuy/vdvzbq2tJmz4OE20nFtIcMc5xurhptYDbUuU2M275V+V91dnoMsZ8BiQfdW1lHJ7AsP6V+w+DEEs5x9v+gWp/wClQPs+C5Xxlf8A69y/NHHXF8kMgjAyqqzP/stVC+urmPTbl3TzX8r/AIF/vNTI7yG4mMz/ACrJubav3Y6y/Fl4lnodzfpc7HZGX5X2/L/dr8QnTjz/AAnzNOtywPl79orxYmizSwr/AK64VlRm/wCWf+7XjGqR/ao47nfncm7dW9+0J4o/tzxlJDCu2KNvk+euc0u4e403yX/hr1+blhGxwy973ilGvlMU2fx0SRjdvd8VbktNqPsRmP8AHt/hqGRfupsz/tVp7nxEkDfu8fxUsMjtJv8AO+X+7UEkjqp7/PUTSOq1BUY8xr6bG810uybhv4a2JtLd/wDVpXPaXefZ5ld3xXVaXrltLb+X90/3qr7RnLYzbjQn+d0Rv+BVW/s28hdc7d/+zXSeYny/xf7NTR29sy/6j5lp8qDmMLTdU1iz2p5zKyv8ldNo/jbVYfkmDAs/zs3zVnNBCy79i/K/3Wpyy+T9yH733KOWURSkd7peoWesR+TdN5TyLteRflauk0qy1u2kjis79pQybfv/AC/8Cry/T/tPnJ/e+9uZq9b8B6x/Y+hvquq/c2/Kv/stHwwJly83Mef/ALSvha5m0i21sbWMPyy7W+7/AL1eG19A+NvFln4j02/hvHzDMrLEv3tteATBFmfZ93dT5uY3pjaKKKDYlgP7xQ4rpfDS/Z5Xnd9u1Nz7f4a5+xh3Scjd/erpppIdL8OvKj/Oy7drLU/FIwlvoc3rF9/aWqS3Mjs3bNWdJk8hPJcKwb5ttZ0a/NvFaenw/aGXen/AafxDkJdKizDZDt/2t1TJvjZf92ri6Wkm7ftbbS/2eVXGzHyVXwkcyHWd5/y28j733lqSZluY2hdGVKgt4fL++mV+7uVasrC6t8ny1HL9kPd5inJp6s2+Onra+WRvmqeON5ptj/LUv2fcuxIfm/vNT+EYiK8b7Iptvz/ItfffxpkC/sEGQN/zKOlkH/wHr4E+zuuU+Yn/ANBr75+NikfsCldhJHhHShgfW3r9f8MP+RTnn/YNL/0mZ9hwp/ueP/69P8pHwVc3HmTeS6bv92qzKm0O6NtWrKo64kmjxt/u1Ktv5keOu75ttfkMvePjI+6VIYfMO/qKuW8yWr/6nll/75oht1jbYE+XdTVh23D/ACMF/wBqlzIcpcxM19959/zbPu05pn8vYlRRRw/65x937tS/IzB3RlH96q5Y/ET78Szbtt43/wC1/wACrN1Ro2V/kZt3y7anbzlUun3W6bWrPvr4Q2+zf/F/FSBRly2MHVJELkbPutVCb5tz7PvVevpEkbekP3qz5Gz/AANQbQG1NbofMXZ96oMfvAuKt29u7SeZ5O6lzIuW59k/8Et/jvN8IfiNaSzXMf8ApE+147hvlZv4a+0/2kfiFpHxO+IVvr2laCti8lmv2xd3yNcf3lr8r/hZdTaRHFqlg7QyRvueRU+avuv9m39qnwl8TtDtfA3xIeG2v7WLZa3UkSqzNXtZLj6WCr+/8Mj5DiDKpY+HND4j2ib4C+Lfjt+zbqr6xon2qw2TW9vMqs22RVb5Wr8qNa8O3/hnWrvQdSt/Jns52ieH723a1fv9/wAE4Lyw0vUPEHwP8c38b6R4os9thNIq7d23crK1fkh/wVO/Z3f9n/8AbC8SaPb2fl2GoXTTRMv/AI83/Aq681p4ed50jmya+H5KUtP8z5xjV2kZIfl/3a+tf+Cb8ax6L4rC8j7RZ8/8Bmr5PtY3yHL4WvrD/gm/s/sTxV5aYH2iz5PU/LNX1XhN/wAl5hfSp/6bmfqXBcubiSj/ANvf+kSPH/jox/4XT4phdWA/t65ZX/u/vDXNeX+8TyeS33Gk/wDHq6X46KX+N3ikAKSNdudo/wC2hrmbeSWEbETb/CzN/DXx2ef8jvFf9fJ/+lM+czPTMK3+KX5smaP94d78L/yzqKZf33nI+x/4lo+eSTyelOaP94H35aRNvy15EY8pzS+EZ5PmL5z+Wdz0+1j8uOSZPlLP96nRqkmLZ4fu/fVf4aFLxx5Taqfx7qXxRF7seUnhj8pw/nfe+9V9bvy32Q3OF/2UrP8AOdV2I6/N8u6pI7h4/wDRn+f+F/n+9WMonqUfdgasMnkso2fP/A1WfOeXY/k8bvnjX+GsyG4+YR79qr/eepIWQqyJ5gMn/LSN/u1h9s64yNX9yqu7ybwrfd+7Uy3HnSMU3Afd2t92syGTcu9JtwZ/96rDXD+c6Ptx975aOb7JFSXMi5DNMriF0+6/zsv3ammkSSP7m1N38X3lqrDMi5jmPLL92rMcz7i/7tg3y/NXRH+U8rEcvLoElvJJE3+kfKv91KgvFT5/J3bWSrZt3+zo6Pv3fwx/w1XuGfbvNzt3ffVlrWP8qPN+H4jziS38lvs2/wC98u3Z/FT7ddzYdPm/2qmmDyXXz7nH8bbP4v71TLbozoifc+9uavO9n7vvH1FGUZFm1tHhUb0q6tvLEyYT73/jtJZ2qSRpv4b7y7q1bO1hkXycNt/ibbXFU54nsYenzajrGFFy6csy/Pt/vV02m27QwpNsUs38X92su1tUj2wpIv8Ad8xvlWtnTfkXY7/xfNtrzMRKUtj2sPTtD3jf0ffeTRpC7blX/gK12uk3Dqrwv+7Rvm8z/ZrjNHj8pjcv8is+1Nr/AHq6XSrh2aJNi5b5WXf8qrXk1pc0j0MPH3dTuPD+oGNkd412qnySfxV2+i6l5LJvlVopPm8z7rV5npvnW7LM7sqb/wCH7q11mk6o7SDZ8qL/AHk3bq8+VP3uU9WnU5oHpFjqFtIrP1SN/mk+781OuLx7iMpD99lVmjkSub0nVEa4/wBGufkX+Fl/hrQbWkuoRDPc52/eaP8Air1cLyR908nGe0KOsN5as8fzyt8rbfvVgTR3NvdSO6Llfm2s/wAv/fNauuXEKRsm/ak33Grm7i4fznhR8L/Fur6DDxjGPMj5XHRkdLZO0vhws7qSYHBK9O4rlNQeHy1+zbm+7v8AmrpdMkYeEPNVcH7LIQD+Nedav4gfT/MmmmVEW33PG3y7a/c/E+dsnyFr/oGj/wCkwPQ4sV8Hgb/8+1+UTI+J3ij+zdNTQoVZp5FZmk/2a8Zjs5ta1x7Z7aN2kf5ZGbayrV/UvGVz4m1S7ufOzE3zW+5/ur/dqxoumx28L39y7O+35G/551+UUY+6fBS+PmiaV00el6fsRPu2+1FVvu153qF9NqV00zyeWsabUZvmrd8SapczXImtpv3XlbW/vVwPjLxFNbw/Y4PkLbmZvvUv8IfDEg8aa88+pBNNmZ45G/f7V+61ch4wuIWmZLN95X5dyptqaG4CwmF9z+Z/t/drDuoprC7aG8m2/eZG3U+VGvumJq+yS3+0pDsm31jCZxJvR/m37q2bhrm6mZ/3bNvrKuoEhbzk/wCBKtHwmkYwN+3WHXNNRJpl3Knz/LXP6lphs3x/dq7o+rJHebPJ+X7u6ta80+G8h3w/NL/GtEthfCcms0ynDpuX+61TwTQ/LvRfl+bbUmp6bNbybmh2lv4aqbnjYj7rKtLlLi+Y0LeRC2/Zj/ZpfLRVV3+9Wesz7Vbdt/2hUv2p41+/u21JPKX1lRYfkeopJ4Wb7nH3qprcbv4G25p3mfMU/wDHq0KLPmJ5n31Vv/Qq3dC1B4WTY+0qtcyq7vnT+GrtrcvHJ/wDa9TzGfunWzao80yeYdqfera0uZJFb5Pu/c3f3a4+G4eTYm/dtrs/D8Pmafv3qZPvfN/dp/4SZDtUt7aS1Kb12f7VeceMPDK6XMt3bPuWT+Fa7LxRq0Ufyw7vlTa9c1Oj6pb+e4bbt20xxlynKUUs0bwzMjpgrSUHSFFFKoy3NKWwDo8fwVIsacv2qFW29qmjhdvv8/3afL9ozGLHuY8fLXuf7LXwQfxtqEfirUrZvsVnKuxWT/WSV518NPh7ceMdWFu7+VbxsrXEzf3f9mvqf4P65Z+D0l8N2yKLaNldVZPmas6kvZnBjK3ucsT6T8M+B9N0nQXcW3nSyRbtu35VWsW48J2GqaeiJprW5+9KsyVf8P8AjKfVvDdvMkzErAq7o2+9838VLdatquqQmF7/AHvu+Xy02t/u158qdOOi2PI5eWJyN94NtmZprBG/i/h+9WXN4fubWT5/MSZk2vt+bbXYw6s9vdPD9jZ1Vfmk/i2/xUNqGmyXzH7G0x3ruaP+7XP9XpSYbHEfPAsqO7Hc2x/M+9Un2tFl8lN27Z95q6S+sdHuL5U2SKGfCKyfMtMutC0pXb99t2/M/wDs1wVsLLnL5eUf8HXup/HdrIV+RVlB/wC/bVr/AByis18Q2l5dTqoWwIw/T7zc0z4avp9v43htYDGkjCQnauDJ8hri/wBsvxJe6b4m07SLVwBNpRc+o/eMM/pX7tlaUPo+4tf9Ra/KkONPmlyniH7RXjhNQ0u5ttKdhCsTbG/i214V4V0vdpaXOyRvnVV+eu8+K0k1v4XuIZgrSySqqNu+7WF4RtdukpC+0D7rfw1+R4GnyxZ6mHjy0hI7JFPkum4/7P8AFT00lJJvnRf+BPWs0KblgHzbaoSW80k29ywNehzS2NY+7rIhuLP7sezlf+BVz/jK2hbSZnSHdtT/AL5rr7PZIp/c43PtrH8eafDHo92+xtixMybaPekX9o8nf7ppaKKZ1BRQrIV96KiMQP0g/Z7/AOUbif8AYl6x/wChXVfm/X6Qfs+cf8E21x/0Jes/+hXVfm8rbq/avFb/AJEuQf8AYLH/ANJgcOE+Op6i0UUV+MHcIfmbfmlDbuaF2c7aRV20viARVw3PpTqKKXwgFKvQ/SkoqQE37mNLRSMu6r+GQC0Ab+1FLyppgCr8xra8DaNNrXiC20yC2aZ2lXbGv8XzVip94V9K/wDBM/4JXPxd/aO0W2dF+zWc/wBovWk+6sa/Nub/AGa48XWjh8PKb6GmHoyr4iMP5j9O/wBkr4V2vwh+AOg+D7Z/Ku5rL7Vfq3/LOSRf7v8Au16ZJYTRtFvdWeP5nk37d1TtY/6Y+91fa+1WVPl2/wCzU50/7Ux3pIrruVm/vf7tfk+JxXtsRKUj9mwuHjh8NGPYwptPudqfaEZf3rb2j/iX+GqFxptzbskdskjf34d/zf71dra6enmGZIV/3W+61V9W8PvI2/ZM3mRN8y/wqv8ADurnVb3kE8PS5eZnE/Z3hk2dW3/O3+zUkk025Sibm/jaRflkro4fDcPkh3h8ot/e/iWs28sfLXyU+8sW7d/Eta0anN7xy+x5o80jmtUmdeX2q/3lVfmX/drk9WvLP7cIQ/kzSbldfu11niJYbhvvsj7dybk+b5a4XxNdQzaeZHjUFX+ZtnzN/tV7uWytK72PCzDD+7oZeoX6XCv9jdl8t9vmLWRdXVzMzO9yxMb7k+fbU91ePj7mFVNqqv8Ae/vVzepas8MzTecu5l2Ov8K19Lh6nQ+NxlPlkWNUmh8tn+b5U3Osf3t396ub1a5xb+dvYlt3zN/Ey1JqXiBJo3tkRS0ifejasC+1yGRt6bmk2/xP8terT5pcp5FTkJr7VHmjjmh2tti+9/Etej+GcN8L1/e7wbCb589fv140upQyfcdt3+y/y1654PlVfg0JnyoGnXJOeoGZK/afBtWzjHf9g1T/ANKgfW8Eu+PxD/6dS/OJ5nb3PmbPs025ZpW+X+7XLfFnVJofDNz9jfbui2/7taUN4beM/eUtu2bf/Qq8++KXiqG3aTTZtrvMm7y2/wB2vxrllzHx8ZSkfKPjpXk1y4uX++0rfM1UNGvPs0zF/utWp423yaxK7/cZ91c+spinVk+YK+6uiMfd5TSJ0/kyRq7+Tjd/erKvpHjwj7sf7NaEN4l1Ypvm+9/drMvPm3OjsW/u1X90jl98pyNlvdqjkx/B92pJJHZgmxai2nd8lSaRBt7SeY74NWLXUJoW3+c23+7UTQzMN/3qU27r8nf/AGqr/EBuWHih0Ub03D/arb0/xG8jNCm1d1cRHG7fJsartr5sbLNsb/dpc3KTI7LzPtDb9iq33dtT2VruzNlX/wBmsfR7xPIX52P+9W3p94gbyUT738S1rzc0TE0/Dun/AG64+zbNnzqq7v7tdf8AED+0o9JttEtrWQJDFulb/wBBrm/Cd3DHqUKXO1Pm2uzf71eqXzaa2jtqqQ/bC0Sqys9PmI5o854Rr032e1l+TCbPmXbXnEzFpi+zG6vc/H8fhvVrVLaGwktpGXd5f3q8e8ReH5tKuN6Qt5TfNUfD8RtTlcyqVWfdspKdCqCRd+6g6TX8O2vmP8g5/iq14wmEccNmlzuRfm21P4YtkjUzO642bvu1h65fPeX7vvVlZ/vLS+2Y8vNMpk+Y1aOnzTx252P81Z8Y/eYetnSVhWNhs/4E1QEi9pd472phd97tTdSmubfds+cVFbqnmHyX+Zal1L99OIUdt+z/AIDQR8PvIr6fq1zcN5P3f4fmq/dXiW8bPs2/w7lqO3sYbWMyVHfMkkJhfk/e+WtBc3vjrfUt0iu8y4b+GtixTdDLNC7FFrnbfT3umXCfdrqtBmezt3R0UIyfMq0B8RmyX32X3VvvV96/Gtt/7AxdCBnwhpZH/kvXwpqljbM29I2279v3a+7PjXb7v2CDbK2P+KS0pQfxt6/X/DHXKc7/AOwaX/pMz7LhR3weP/69P8pHwGusJ5nlu+dv96tGHe8P2npu+5WZb6SjNs+Ulfv/AN2tGO4e2jCPtx93bX5DKMYyufG/ZJbfevM0O8f3qkkWFmR3TBk+Wq011OJN6Ju3f3f4aiWaaS4SF0YBv4qQSjyli4t/9tg23/vmq8av5e/zmI/jp/2h1uPv/dfbTG+VVTfuT+81L3ugvd5hJGfOx5lP/stZ2qKqyfc3bf4lrRkt0uN2zctMbQ55ov4f71EolnNzWbtJ8nA/vVCbGZ12bN3y/wANdP8A8I/5Kqj/APA6kbwn5S74X4b7v+zTjEz5uU5C006a4uvISNifate80+bT1jieHYy/f3Vu/D/Rba68eQ2Tur/Oqv8A3a9C/ah1rwR4i1fw94a8B+A4dGHh/S2ttW1D7b5rapcM27zP9lVX7q1nKITqX5TA8G2v/EjV/vf31/urWnazzaTcLqtm+2aP/VN/dqv4NV5ND2bMbX2tuq9cRyRtKfJ+X7u5a3p7GNSMZaH2D+xb+3NqVjJZ+D/G2vNaTQp/oGpebtbd/Cq12/8AwVO0HVfjj8O2+MupaU02o6TErPNbxf66Pb95mr4A0+6vNJukubZ2/c/Mu371fUfwN/bAfxF8M9S+Dnj+/jZ7rTWt4rq63bWX/a/2q1jWnS0+ycFXDxn7/U+R/L8ldnzff/iSvq7/AIJzNnRvFKkYIns8/wDfM1fMOs6b9h1i5tra5V0hnZFZX3Ky7q+n/wDgnTn+yvFmQB/pNn0/3Zq/RvCb/kvML6VP/Tcz7bgdy/1ho3/vf+kSPG/jkUT42+KlMTBv7duWV/X94a5qO385ld41zv3bv71dL8dlQ/GfxSWbaf7eufm9P3hrnY1/eNs+6v8Adevjs8/5HeK/6+T/APSmfO5jJf2hW/xy/NiBXW4EMnyt/dX5ql2pI3k7GLbfu79u2ntC7R74H27fu0ohLQ7P3j7fvfJ8zV4/2DkjKW8iNdm5k37fm+9UCypCVRPm+fazbNy1buFQ7odm1tm5Vaq7RC3dnfzAI/4mquZBH4rC+Z5f+kiH5lptoySKuz+9/FUUzbf3Xk//ABNSbdtwyJJt3JtrGR3x2LMNw/2dkeDf821G3/dqeOaHaPn+X+CqTFNqu7t8y7akVkaFpEdW/uVly/aOqPOaX2tI4fJ6bl/herNjL5kaTP8AN8rbt1ZUf+qTfyV+bdVyN/JZXR2X5Nv+zS5UKUpGtbSecud/3f4f71WLdtyvNMija/y7X3Vm2s27Y7/LuT5NtXbObazO5VGb5mVnreJx1uXoaFjMVVdj7n2bVjX5aZcrBsKOjMZPlpkbAyIkKYXZu3Sfw0jMis2987fl8tfu/wC9V/4Tg92XunGX1r5LtM+7bu/v1LYrvj87yVbd9z/aWrGqRp882/I2fdp1vHtVY1Tjb8jKlR7GfKe3Rlyk9rbpM2Xh/h/75rVtWeFl/fbl/u1ShjeO3/cup/2a0bVfm2eTtG2vPrUZRPawtT/wIu2Me5Vab/lp/Ey/MtaOm3HnS7PJj+X7q/d3L/8AFVm29ykbF96/991dtZEutjwpsMnzfc+XdXj4inL7J7NOtKUuVyOg02R45g+xSzP/ABfwrXRabfJHCiI+1tm7d/8AFVyVi32eOKa5Crt3KzK33mrXjvNuJpLnai/NuVP9mvJqe9I9Gm4xOxtbra2938zcyq67/wCGug0vUPs00XnQ7k3bn3VxOl655bLv/wBIWRP3rK+1l+X5Vrd0nUplji2PDjeq7ZG+7XLKPKd8akTvtJ1GG3YvN0/ur/tVpLq1hG0iJtfy4vnk3bf4ttcTp+uOZGdEVjGjNt3/ADNWjJqkMLI7p/rE3fe+Wrw/PGRjiJQkaeuXEP2hEuZtiNKyfN93/erntQ1K2hm8mN8Ov8Wz5WqXWL+Z42hfaRvVkZvm/wDHqwdUvEW382ZtzKny7vvV7uFre57x8xi6fNJyPSdHS4vvAnlWjhpZbSVYiG/iO4Dn615V4p+E3xVutFns9F0dTLcjbMXvIhuHrktVnwz8UfEPhG1XSozHdCeTdELndtiz1AwRgd8Vz/iH9srxRp2uTadpmhaVJDDN5bSSCTJx94jD8iv6Mnn/AIZ8WZNl9PNqtenVw9KNO0EraJJu/LK9+W620eup62MxvDWY4SgsbOcZU4qPur08n2OftP2YvjQlwh/sC3jVW6m/iPHpw1dHqfwG+J4tvI07w/Gw27SrXsQ4/wC+qvWX7VPjCWyS6utA0zdIcqsaScD8XqjqX7YPi+0lkih8O6XlO8iyf/F1z/UfBiMr/WsT9y/+VnlQocD30rVfw/8AkTlLz9mH44OS0XhuMgggKmpQjb+b9K5PWP2Nv2h7243x+DoWG/duOrW4/wDZ66rVv2//AIj6fN5UPg/Q5CPvLtmyP/IlYdx/wUp+J0RITwR4f47Ms/8A8cpfUfBff61ifuX/AMrGqPA3/P2r9y/+ROef9iP9pBFaRPA0DMynC/2xbfL/AOP1jX37Bf7UupzNLP4KtVI+4TrNsf8A2euwX/gpn8WiQT4E8OYPQ7Lj/wCO0y6/4KefFS3GR4F8On5f7k/3v+/lV9T8GJe99axP3L/5WEcNwN0q1fw/+ROGb9gD9qcWjQjwBaFy2Q39tWv/AMcqhP8A8E8P2ry+6P4f2h/7jlr/APHK78f8FQ/jGyGT/hAPDIA6gpcZP/kWqkv/AAVV+MiqHT4eeGcHqClxkf8AkWo+oeC//QVifuX/AMrNPYcEW/i1fu/+1OKj/wCCdv7WET7ovAVoM/8AUbtfl/8AIlbuhfsHftRwZGoeA7RQRgkazbE/pJWmf+CrnxnySvw88LlR32XP/wAdqWD/AIKpfGl0WWb4deGVRjgER3H/AMdp/UfBeOv1rE/cv/lZLocDf8/av3f/AGpR1X9gH9oS6QmHwbakj7v/ABNLcZ/8frmr/wD4JzftTOxMPge0ceg1q2H83r1jwb/wUs8f+IZxbaj4O0GFs4IQTc/nJXW6l+2t8Tre386x8MaDJu+6WSbA+v7yksD4LS0WKxP3L/5WQqPA1P3vbVfuX/yJ85r/AME6f2smTa3gG0GemdctTt/8iUi/8E5f2slBA8CWgz6a5a//AByvVPEP/BSP446KzLH8PvDL7Wxylx/8drAk/wCCrnxpiQs/w78Lgjtsuf8A47Q8v8F4/wDMVifuX/ys0jS4HltVq/d/9qcYn/BOj9q8gq/gG2A3ZGNctf8A45SJ/wAE6f2s1BUeAbTP95tctf8A45XZD/grD8Zud3w58MDH+xcf/HaVv+Cr/wAZVfb/AMK88L/98XP/AMdo+o+C9v8AesT9y/8AlZXsOCf+ftX7l/8AInIJ/wAE7v2shFg+A7UMPu41u1/+OVLD/wAE8v2r1H7zwHabv7w1u1/+OV1p/wCCrXxmwGHw98L4P+xc/wDx2g/8FW/jKOB8PPDBPpsuP/jtH9n+C/8A0FYn7l/8rEqHBEdqtX7l/wDInP2f7AH7VEGC/gW1POcf21a//HK6HT/2JP2l4bcRT+B7ZMDGI9Zt/wD4uprT/gqh8Y7ghH+HfhoMfRLj/wCO1dP/AAVD+KkcZkn8DeGxt64W45/8i01gfBf/AKCsT9y/+Vk+w4H/AOftX7l/8icxqf7Bv7UF1K0ieBrZjuyrDWbYf+1Kji/YE/acEarJ4HthtGcLrNt97/vuunt/+CoPxjuX/d/D/wANY7jZcZ/9G1dP/BTL4rKGZ/BHhtQq5O5Ljn2/1vWj6j4L2/3rE/cv/lZLocC/8/av3L/5E8w1T/gnV+1bcXHnQeArQ7upGt2o/nJVX/h3J+1r/wBE+tP/AAeWv/xyvSj/AMFRvjC0vlxfDzw37FluMf8Ao2lvf+CoHxnjtzPZfD/wyxX7yOlxn/0bS+p+C8v+YrE/cv8A5Waxo8E/8/av3f8A2p5r/wAO4/2tP+hAtP8AweWv/wAco/4dx/taf9CBaf8Ag8tf/jldr/w9j+NP/ROfC/8A3xc//HaP+Hsfxp/6Jz4X/wC+Ln/47VfUfBj/AKCsT9y/+Vl+w4K/5+1fuX/yJxcf/BOb9rQct8PrP/weWv8A8cq7pn/BOb9qKS7jXUPBNtFGWXfINatjt/APXoHhL/gpj8evF+qx6Rpvw28MvI/UrHcYH/kWvXtG/av+ItzGBqfh3RVkC/P5SSgE+gy5rKdDwUpL3sXifuX/AMrOar/qJD3ZVqv3f/ann/hf9jP4teGNJj0y08K2o2jMji/h+Zv++q11/Zg+M0Uonh8NQqw6bdQhx/6FXZN+1Z4xTIbQdK3B9pBEn/xdMm/au8dJsRfDOlbmGcEydP8Avqud4bwQlvi8T9y/+VnO6PAL3rVfuX/yJ1nw18AeONC0RtM1/SFVpBkk3EbAH8GroYvBuqrIZ2i+cH5DuXgfnXMfCv47+KPHN3NbapoVqoiAO+zjfHP+8xrsZfGOpqp2WkJbOVVsjK/3utS8B4HdcXifuX/ys5o4bw8V0q1b7l/8gcpqHw18bx3sk2nxKyOrLhJlThvqaSDwL8QbcqsmkrIsabV2XSLn9a27/wCJup2dzHGLCBkkGMhWyD+dE3xN1NWXyLO2fPVBu3fhzWNTKvA7ri8V9y/+VFPCeH0d61b7l/8AIHOyeAfiXI27+yo1bbgMLiPj/wAeqpJ8OvinNGI20CDcFI3tdx//ABVdM/xS12P5ZbSyRiMpuD4P/j1D/Fy5t4Q86WjOekcatz+OamOWeBdv97xX3L/5UDwnh7/z+rfcv/kDO+Gfws8WeHfG0HiTxAwZIg4yJlOMxlegPqa539qj4NfEb4neMdN1PwXo8dxbQab5M8pu442VvMc4w5GeCK7zwf8AEzUfEniaHRrjT4EhmD4eIMWBCFuTnA6VoeP9R+KOnTrH8PfClpqCG33NJd3CpiTJ+XBde2Ofev1nJci4FzXw1r4HLnia2D9veXLByre0Sg7KMab91Llb917vUaoeHqldVqv3L/5A+PviL+xb+0Z4hhtrHSvBkEkUcu+RpdXtwT+b0ul/sU/tFWcYR/CNsNn3R/atv/8AF17rcfED9usayLa2/Z+0A2eObhtXhz+X2rP6VqWvjL9sJ3H2r4M6Ii98alHn/wBKK+fo+HXA8YWjh8x+dCf/AMpOlU+Abfx6n4f/ACJ4Kv7Gn7QDgmTwvbKSMfLqUHyj/vurQ/Y0+NJCRt4TtgqptyNRhz/6FXuq+Lv2tyxDfB/RQAeD/aEfI/8AAipovFf7Ve0mb4S6PnPAW/j6f9/60/4h3wT/ANA+Yf8Agif/AMpE4cAPevV/D/5E+fj+xj8b443SDwpD935M6nB/8XWP4y/Yn/aP1TQJ7TS/B1u08yBdp1e3GM9eS9fTg8VftS7xn4T6Tgrk/wCnx8H0/wBfWV4x8cftoWGkmfwZ8DdEvrzeAIZ9UhVcdzk3K/zo/wCIecE2/wB3zD/wRP8A+UlRhwCmrV6n4f8AyJ8Z/wDDuP8Aa0/6EC0/8Hlr/wDHKP8Ah3H+1p/0IFp/4PLX/wCOV9P/APC0/wDgpZ/0a14X/wDB7b//ACbR/wALT/4KWf8ARrXhf/we2/8A8m0v+Ie8E/8AQPmP/gif/wApOj/jBP8An/U/D/5E+YP+Hcf7Wn/QgWn/AIPLX/45Qf8AgnH+1oeD4AtP/B5a/wDxyvp//haf/BS3/o1rwv8A+D23/wDk2j/haf8AwUs/6Na8L/8Ag9t//k2n/wAQ94J/6B8x/wDBE/8A5SH/ABgn/P8Aqfh/8idj8HvhP448JfsVr8G9d0yOLxAPDOpWZtFuUZfOlM/lrvBK8715zgZ5r4tb/gnF+1qeR8P7T/weWv8A8cr72tvHXxI8Pfs66h8Tfil4TtNK8S6VoF9f3+kwTCWGN4FldF3JI+4MqIThyfmPQ8D5A/4ex/Gn/onPhf8A74uf/jtfQcf4DgOGEy2hndStT5KKjTUVaXIlFfvE4NqWiurKzvoZYahwLeTp1qr112/+ROK/4dx/taf9E/tP/B5a/wDxyk/4dyfta/8ARPrT/wAHlr/8crtv+Hsfxp/6Jz4X/wC+Ln/47R/w9j+NP/ROvC//AHxc/wDx2vzf6j4Lv/mKxP3L/wCVnV7Dgr/n7U/r/t04r/h3H+1p/wBCBaf+Dy1/+OU3/h3D+1r1PgC0P/cdtf8A45Xb/wDD2P40/wDROfC//fFz/wDHaP8Ah7H8af8AonPhf/vi5/8AjtV9R8GP+grE/cv/AJWHsOCv+ftX7l/8icV/w7j/AGtP+hAtP/B5a/8Axyj/AIdx/taf9CBaf+Dy1/8Ajldr/wAPY/jT/wBE58L/APfFz/8AHaP+Hsfxp/6Jz4X/AO+Ln/47S+peC/8A0FYn7l/8rD2HBX/P2r9y/wDkTiv+Hcf7Wn/QgWn/AIPLX/45R/w7j/a0/wChAtP/AAeWv/xyu0b/AIKyfGkDP/CufC//AHxc/wDx2hv+CsnxpAz/AMK58L/98XP/AMdp/UfBj/oKxP3L/wCVh7Dgr/n7V+5f/InF/wDDuP8Aa0/6EC0/8Hlr/wDHKP8Ah3H+1p/0IFp/4PLX/wCOV2v/AA9j+NP/AETnwv8A98XP/wAdo/4ex/Gn/onPhf8A74uf/jtH1HwY/wCgrE/cv/lYew4K/wCftX7l/wDInE/8O4v2tN27/hAbT/weWv8A8cpf+Hcf7Wn/AEIFp/4PLX/45XpGjf8ABUn4u63GbWDwJ4ZS8P8AqY3S42yH0B83rVC4/wCCrHxvtpmt5vhv4XR0bayslz/8dpfU/Bf/AKCsT9y/+Vi9hwX/AM/av4f/ACJw8f8AwTj/AGsiw8z4f2gHf/ieWv8A8cr7Z/4JvfAy8/Zo0HV9Y+JNvb2mr6gUt47aPbMRCRl2Lx5GcgcV87+Af+Clnx48eeI7bw7p3wz8OSS3MojRYorjJY9uZa+9/D/hS3v7Czk1K6cTyWiNeCIbVjlK7iozk4rxc6oeBEKPssVjMUk+yV//AE0z18nwHC9XEe0w85trvt/6Sj0TTPi74GtExLfyE55PkPkj8q1bP41/DJZWF1qjsrY+ZrSTt9Fri9I+EmgX8ImuNRvFHcIU5/8AHa6LTP2dfCN64WXWdSXK5XDxjn/vmvjHlv0a5R1x2N+5f/KT7hUsO4rc6OL46fCKJ939uyHnAP2GXgf981ZuPj58GpQXj12QF2GV/s+XgHr/AA1n237JPgqeHzz4i1XA7Bosn/xyrUf7H3gGRjjxRqxA7AxZH/jlZrL/AKNMf+Y7G/cv/lJoqNGPulS++NvwqeV2t9dkI2lVb7DJu29v4awdU+KvgK7RWg1hg38ebST5v/Ha6GX9kTwaCUh1/VmY/c5ix+PyVzOofs9+GLOZoF8QXuVJG5gmAR2Py9a2p5b9G37OOxv3L/5SRUpUOXW5zOr+KvC95OXTUJHGSQTAw/pXH6syXzq8NxsVRt2qnOM5rrNd+HWj6ZOI7bUpnUHErMV+X9K5q+0tbZ3FuzPtkKYbg5FejQy76O9PWONxnzS/+UnlVsLlc7qTl/XyOQ1XRNbmDC3t1kOc7vMALt/eOaxNT8GeMLmdwLZGiKfuhFIibW9+ea6XVvEOoabIyJaI+DgYycn0+tcnqfxi16wLL/ZVrlBkhg33f++q97DYDwHfvQxeK+aX/wAqPm8VhuGHJqpOfy//AGTL1X4Z+PrjIs9LQEr95rpMZ9etY0/wY+J1w29tMiVmfc7LdRnH+7k1b1D9pjxJaMUTSNNBVcvv8zj/AMerPH7Vvi3d5baBpeexUSEf+hV7NLL/AAV5fdxWJ+5f/Kzx6tDgm3vVav3L/wCRF/4Ut8TyVjXRYlj/AIl+2R5/PdXqPhjw5q9h8MB4ZvoFS8+wzxGPzAwDMXxyOO4rzSD9qHxbI6o3h/TTnrt8z/4qrtv+0j4klG+TQ7HGcYUSE5/76r6fhrOPCfhnFVa2ExFZupB03zRuuVtN2tBa6Lv6HTleO4MyitOpRq1G5RcXzK+jt2itdChdfAv4iGPFpYwLuXDBrpcr+Oa808dfsl/HzxH4lS+stDsjbxwldzahGCT9M16v4g/aa8RaLa/aV8NWLARlizyuAMV45bf8FNPHl74kvNHtfhzoxhtmISVriXLY9ea+fjl/gnusViPu/wDuZxww3AnLpVqfd/8Aannmv/8ABPD9p+/unktfDmnMrNkZ1iIf1rHb/gmv+1WTn/hF9M/8HUP+Nehar/wVU+JdhI8cfwy0FinUNPMP/Zqpf8PaPigeR8K9A/8AAif/AOKq/qPgp/0FYj7n/wDKzWGG4H6Van9f9unO6J/wTr/agtYXiu/DGmKD0H9sRH+tR3H/AATo/amlfJ8N6a3zZ3DWYR/Wu60n/gqT8TNRT5/hloSv6Ceb/Gi//wCCpHxNsjtHwy0Nj6edP/8AFVH1PwT/AOgrEfc//lZDocDc2tWp93/2p52P+CbX7U3O/wAL6Yc/9RqH/GlH/BNz9qf/AKFXSh9NZh/xru1/4KqfE9ip/wCFZeH8H7x+0T8f+PVKn/BU34myjKfDPQfu5/183/xVH1HwT/6CsR9z/wDlZXseBv8An7U+7/7U4P8A4dwftR5x/wAItpeP+wzF/jUh/wCCb/7TZw58N6duH/UZi/xrtx/wVO+JaDfP8NNAVT91/tE2D+tRt/wVX+JQGV+GGhdcczzf40/7P8FP+grEfd/9zF7HgaP/AC9qfd/9qceP+CcX7TOf+Ra00D21iL/GlT/gnV+1FGqxDwzppVf+ozD/AI11b/8ABV74lBii/DHQQf8Aanm/+KpU/wCCrXxPZtrfC7QR/wBt5/8A4qmsD4Kf9BWI+5//ACsaocD9KtT7v/tTnIP+Ce37TsChV8Madgdv7Yh/xrQtv2B/2loAWHhnTQx6/wDE2i/xrXX/AIKr/Ekjc3wx0PHtPN/8VV7Tv+CoHxGu5Ak/wy0ZQ33Cs03zfrT+o+Cn/QViPuf/AMrJ+r8Df8/an3f/AGpT0/8AYZ+P0TB5/DlgrbcM39qRn+tdT4a/Ze/aH0mOS3u9As3jP3VGqR4P61d8O/t/+O9bIWTwJpCE9Assv+Ndhp37VPxC1BFmPhPSYomztlkkk28fjSWB8E+mKxH3P/5WZyw/AcdHVqfd/wDamG37KXiy/iB1HwXYCXfw63qDav8Ad4Nc1rv7B/i/V/Nh/si0CSAgH7avFel3H7V2t2G2O80HTncpuYwzOV/nViH9p7xDPp7XqeHLFSMHa0j9D361p9R8F9vrWI+7/wC5h7LgP/n9U+7/AO1Pk/Xv+CaX7SsWpyroeh6bNb7v3UjatEpx7gmoLT/gm5+1OkgE/hbTNobOf7ah/wAa+hPGn7cHxE0ATnR/AmkzCEZ3TSy4I/A159B/wVG+KjTGGb4V6GpHpPN/8VUPL/BTrisR9z/+VmkafAvL/Gqfd/8AanNr/wAE/P2l7fT3hh8NaY0hTaudWi/xrnp/+Cbn7VLS+ZH4V0zHp/bUP+NelXf/AAVI+IUFwbdPhvoeV++Wnm4/WoJP+CpfxQCmSL4Y6CVHVjPN/wDFUfUfBT/oKxH3P/5WP2PAv/P2p93/ANqeeL/wTY/ao3iQ+GNMyGz/AMhqH/GtCL/gnX+0+iHd4X03J7LrMX+NdY//AAVV+KkbfP8AC7QMeouJ/wD4qr0P/BUP4lSQea3wz0IfS5m/xpPA+CnXFYj7n/8AKwdHgbrVqfd/9qcAf+CdX7U4kJHhjTto+7/xOof8asRf8E8v2pduZfDWmA/9hiL/ABrtl/4KifEpsEfDLRBnsZ5v8aQ/8FQ/iiEDn4YaFjGT+/m4/Wn9R8FI/wDMViPuf/ysPY8DSf8AFqfd/wDanJQf8E9/2mo1O/w3p59F/teH/Gqk3/BPD9qWWUlfDGmKD1xrMP8AjXbD/gqP8Tdm9vhjoQ9B9om5/WlP/BUn4mbcj4XaHk/d/wBIm+b9aj6j4J/9BWI+5/8AysPq/A0f+XtT7v8A7U5PTv8Agnv+03asHfw1p2QMc6vEf61r2/7Bf7Q4t2iuPDlhknII1WL/ABrYtf8AgqD8T7iLzT8MNDX2M83+NWB/wU88fjHm/DjRhn0mm/xo+o+Cf/QViPuf/wArF9X4G/5+1Pu/+1Obuf2CP2jnO+PQLHOM4GrRfe/OvqL4nfDLxh4k/ZSb4WaVp6Ta1/YFja/Z/tCqpliMO8b2IXA2NznnFfP3/Dz/AOIJGV+HOife/wCe83T161E//BTn4rTz7bX4daBGgHJladifycV9FkuceEvDuGxVLC4ms1iIOnK8W3Zpr3fcVnq97+h6OAxnB+W0qsKVWbVSPK7ro77e6tdTlk/YP/acjYlfBVr0x/yF7b/4ukf9g/8Aab2/L4EtCf8AsMW3/wAXXYJ/wUr+LAjDT+AvDwJ9Fn6f9/Ken/BSj4rlA58A6Bg8nCz8D1/1lfOfUfBn/oKxP3L/AOVnlfVuB/8An7V/D/5E4qP9gv8AadVh/wAUVajHrrFt/wDF0+T9gv8AaXaZXHgq2wv/AFGLb/4uu1/4eS/FLdtHgjw8SBlsJPx/5Epo/wCClHxRMXmHwR4eHttn/wDjlT9R8F/+grE/cv8A5WDo8DL/AJe1fuX/AMicdH+wX+0kUKz+B7Y4+7/xOLb/AOLpI/2Cv2lUO4eDbUD+6dXt/wD4uuxH/BSb4stEZh4D8PADswnyf/IlKv8AwUi+Lhj81vA/hwBl3KAlx/8AHKSwPgt0xWJ+5f8Aysf1fgjl/i1fuX/yJyqfsJftGiPL+CLVn/7C1v8A/F0L+w3+0qpYnwJbEEYx/bFt/wDF11K/8FJviyzbT4F8PAj7wKT/APxyh/8AgpL8WowrnwL4eKlsfLHcf/Haby/wY+J4rE/cv/lYex4Ij/y9q/cv/kTlh+w5+0vIY9/gG0XH3ydYtj/7PWh/wxF+0N9mMf8Awhlru9f7Vt//AIutpP8AgpN8VS7B/A/h0DOFG2fJ/wDIlXV/4KJfFA2T3h8G+HhsjLY2z9v+2lNYPwYlLTFYn7l/8rJlh+ButWr9y/8AkTh/A37CH7R2k+I7jVNW8GW0SnPkumr25z+T1qax+w78fLstLF4Rt5ZC+4FtUgH/ALPXQeA/+Ci3xV8V209ze+CPD0YjbagiE/Pp1kNa2oft8/Eq1hD2/gvQ3IOHY+dgf+P1DwPgv/0FYn7l/wDKwlhuBuZXq1fuX/yJyvh/9jP9oCxsXt7rwhboWkzj+1IDn8nqxP8AsdfHtovLj8J25P8AeOpwf/F10+nft5fEW8gSWXwnoa7unyzcj/v5Sz/t3/EqJiB4O0TAGcFZs/8AoyrjgfBi3+9Yn7l/8rM5YfgWUuZ1qv3L/wCROPk/Yx+Pi/6vwhA3GOdUg/8Ai6gP7GP7RSsWi8IW6jbjaNXg/wDi660ft+fE8u0f/CH6BlRlhib/AOOVH/w8D+J+WU+DNBBHTib/AOOVp9S8GtvrOJ+5f/KzH6rwDzfxqv3L/wCROUH7Ff7QxVI38HW+M5b/AImtv/8AF17z+x18G/Hvwg07XrTxzpUdqb2a3a18u4jk3BRIGzsJx94da8zX/goF8TGQk+DNCDDsVm/+OVDe/t/fE+6spbW38LaPBLLEypLHFKWjJGNwzJjI6816+QZl4TcNZpDMcJiK8qkOaylG6d4uL2guj01Wp6WW4ngrJsXHE0atRyjeya01TX8q79zg/jjIg+NvipHG7/ieXPHp+8Nc9b/vG6qn99tn3qq3WqX+qajNqmoX73F1dStJLPMS0kjk5LMTySTzmpbeTy1/fPvVf4tlfjGOxMcbj6uItZTlKSXa7b/U/PcVWWIxM5r7Tb+93Lp+zLgumz5Pvb6W6VPLEyf3P4XqD7QjNveFWRvl3MlRXVw7Rs6Pg/7P3dtcXvfaM/djSJbi73SfIkY/hRv4ttQXEaQ4HzbG/vNSec7M8Lop+T5G/vVHJNMsafI2F/hZaUpS+FBTp83vC/aEnymxTt+Z1amfaEjZv3O7+61OuJIYo1R0Xc3zVXa42/PMm5vvIq/xVjLY76dvtEse+SQzJ8n/AEzZvvVOvkNDs/h/2ap/aIZF3v8AeqZZkbaiD51/8dqDcvQt9nXeNqr/ABrvq5bzSRyb3TerfLtas6FvtDbLqFfm/iq/FJsk+zTbWVvuVO0+YUv7pdguizG2+zK4Xa27f/FWhbSJHMCiMrN/Cq/NurNh7b/lbd93ZVyO481vvsn8Tt1rWMebc4K0pRloaq3CeSqeTlf4l/ipitP+72TbG/g+Xa23/aqKznk3Nvfeuzcn+zTmuIfOSaZNzr8r7XrXl5fhOTm94//Z\n", + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [], + "image/jpeg": { + "width": 600 + } + }, + "execution_count": 38 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4qbaa3iEcrcE" + }, + "source": [ + "Results are saved to `runs/detect`. A full list of available inference sources:\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Test\n", + "Test a model on [COCO](https://cocodataset.org/#home) val or test-dev dataset to evaluate trained accuracy. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be 1-2% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eyTZYGgRjnMc" + }, + "source": [ + "## COCO val2017\n", + "Download [COCO val 2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L14) dataset (1GB - 5000 images), and test model accuracy." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "WQPtK1QYVaD_", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 65, + "referenced_widgets": [ + "1f8e9b8ebded4175b2eaa9f75c3ceb00", + "0a1246a73077468ab80e979cc0576cd2", + "d327cde5a85a4a51bb8b1b3e9cf06c97", + "d5ef1cb2cbed4b87b3c5d292ff2b0da6", + "8d5dff8bca14435a88fa1814533acd85", + "3d5136c19e7645ca9bc8f51ceffb2be1", + "2919396dbd4b4c8e821d12bd28665d8a", + "6feb16f2b2fa4021b1a271e1dd442d04" + ] + }, + "outputId": "d6ace7c6-1be5-41ff-d607-1c716b88d298" + }, + "source": [ + "# Download COCO val2017\n", + "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", + "!unzip -q tmp.zip -d ../ && rm tmp.zip" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "1f8e9b8ebded4175b2eaa9f75c3ceb00", + "version_minor": 0, + "version_major": 2 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=819257867.0), HTML(value='')))" + ] + }, + "metadata": { + "tags": [] + } + }, + { + "output_type": "stream", + "text": [ + "\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "X58w8JLpMnjH", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "cc25f70c-0a11-44f6-cc44-e92c5083488c" + }, + "source": [ + "# Run YOLOv5x on COCO val2017\n", + "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", + "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n", + "100% 168M/168M [00:04<00:00, 39.7MB/s]\n", + "\n", + "Fusing layers... \n", + "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2824.78it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:33<00:00, 1.68it/s]\n", + " all 5e+03 3.63e+04 0.749 0.619 0.68 0.486\n", + "Speed: 5.2/2.0/7.3 ms inference/NMS/total per 640x640 image at batch-size 32\n", + "\n", + "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", + "loading annotations into memory...\n", + "Done (t=0.44s)\n", + "creating index...\n", + "index created!\n", + "Loading and preparing results...\n", + "DONE (t=4.47s)\n", + "creating index...\n", + "index created!\n", + "Running per image evaluation...\n", + "Evaluate annotation type *bbox*\n", + "DONE (t=94.87s).\n", + "Accumulating evaluation results...\n", + "DONE (t=15.96s).\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.338\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.548\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.637\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.378\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.628\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.680\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.520\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.729\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.826\n", + "Results saved to runs/test/exp\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rc_KbFk0juX2" + }, + "source": [ + "## COCO test-dev2017\n", + "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (20,000 images). Results are saved to a `*.json` file which can be submitted to the evaluation server at https://competitions.codalab.org/competitions/20794." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "V0AJnSeCIHyJ" + }, + "source": [ + "# Download COCO test-dev2017\n", + "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n", + "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n", + "!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n", + "%mv ./test2017 ./coco/images && mv ./coco ../ # move images to /coco and move /coco next to /yolov5" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "29GJXAP_lPrt" + }, + "source": [ + "# Run YOLOv5s on COCO test-dev2017 using --task test\n", + "!python test.py --weights yolov5s.pt --data coco.yaml --task test" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VUOiNLtMP5aG" + }, + "source": [ + "# 3. Train\n", + "\n", + "Download [COCO128](https://www.kaggle.com/ultralytics/coco128), a small 128-image tutorial dataset, start tensorboard and train YOLOv5s from a pretrained checkpoint for 3 epochs (note actual training is typically much longer, around **300-1000 epochs**, depending on your dataset)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Knxi2ncxWffW", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 65, + "referenced_widgets": [ + "e6459e0bcee449b090fc9807672725bc", + "c341e1d3bf3b40d1821ce392eb966c68", + "660afee173694231a6dce3cd94df6cae", + "261218485cef48df961519dde5edfcbe", + "32736d503c06497abfae8c0421918255", + "e257738711f54d5280c8393d9d3dce1c", + "beb7a6fe34b840899bb79c062681696f", + "e639132395d64d70b99d8b72c32f8fbb" + ] + }, + "outputId": "e8b7d5b3-a71e-4446-eec2-ad13419cf700" + }, + "source": [ + "# Download COCO128\n", + "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", + "!unzip -q tmp.zip -d ../ && rm tmp.zip" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "e6459e0bcee449b090fc9807672725bc", + "version_minor": 0, + "version_major": 2 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=22091032.0), HTML(value='')))" + ] + }, + "metadata": { + "tags": [] + } + }, + { + "output_type": "stream", + "text": [ + "\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_pOkGLv1dMqh" + }, + "source": [ + "Train a YOLOv5s model on [COCO128](https://www.kaggle.com/ultralytics/coco128) with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and **COCO, COCO128, and VOC datasets are downloaded automatically** on first use.\n", + "\n", + "All training results are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "bOy5KI2ncnWd" + }, + "source": [ + "# Tensorboard (optional)\n", + "%load_ext tensorboard\n", + "%tensorboard --logdir runs/train" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "2fLAV42oNb7M" + }, + "source": [ + "# Weights & Biases (optional)\n", + "%pip install -q wandb \n", + "!wandb login # use 'wandb disabled' or 'wandb enabled' to disable or enable" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "1NcFxRcFdJ_O", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "38e51b29-2df4-4f00-cde8-5f6e4a34da9e" + }, + "source": [ + "# Train YOLOv5s on COCO128 for 3 epochs\n", + "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "\n", + "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", + "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", + "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n", + "2021-02-12 06:38:28.027271: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n", + "100% 14.1M/14.1M [00:01<00:00, 13.2MB/s]\n", + "\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", + " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", + " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 4 -1 1 156928 models.common.C3 [128, 128, 3] \n", + " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", + " 6 -1 1 625152 models.common.C3 [256, 256, 3] \n", + " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", + " 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n", + " 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 models.common.Concat [1] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", + " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", + " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 16 [-1, 4] 1 0 models.common.Concat [1] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", + " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", + " 19 [-1, 14] 1 0 models.common.Concat [1] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", + " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", + " 22 [-1, 10] 1 0 models.common.Concat [1] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", + "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n", + "\n", + "Transferred 362/362 items from yolov5s.pt\n", + "Scaled weight_decay = 0.0005\n", + "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2566.00it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 175.07it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 764773.38it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 128.17it/s]\n", + "Plotting labels... \n", + "\n", + "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", + "Image sizes 640 train, 640 test\n", + "Using 2 dataloader workers\n", + "Logging results to runs/train/exp\n", + "Starting training for 3 epochs...\n", + "\n", + " Epoch gpu_mem box obj cls total targets img_size\n", + " 0/2 3.27G 0.04357 0.06781 0.01869 0.1301 207 640: 100% 8/8 [00:03<00:00, 2.03it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.14s/it]\n", + " all 128 929 0.646 0.627 0.659 0.431\n", + "\n", + " Epoch gpu_mem box obj cls total targets img_size\n", + " 1/2 7.75G 0.04308 0.06654 0.02083 0.1304 227 640: 100% 8/8 [00:01<00:00, 4.11it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.94it/s]\n", + " all 128 929 0.681 0.607 0.663 0.434\n", + "\n", + " Epoch gpu_mem box obj cls total targets img_size\n", + " 2/2 7.75G 0.04461 0.06896 0.01866 0.1322 191 640: 100% 8/8 [00:02<00:00, 3.94it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.22it/s]\n", + " all 128 929 0.642 0.632 0.662 0.432\n", + "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", + "3 epochs completed in 0.007 hours.\n", + "\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DLI1JmHU7B0l" + }, + "source": [ + "## Weights & Biases Logging 🌟 NEW\n", + "\n", + "[Weights & Biases](https://www.wandb.com/) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", + "\n", + "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and test jpgs to see mosaics, labels, predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "riPdhraOTCO0" + }, + "source": [ + "Image(filename='runs/train/exp/train_batch0.jpg', width=800) # train batch 0 mosaics and labels\n", + "Image(filename='runs/train/exp/test_batch0_labels.jpg', width=800) # test batch 0 labels\n", + "Image(filename='runs/train/exp/test_batch0_pred.jpg', width=800) # test batch 0 predictions" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OYG4WFEnTVrI" + }, + "source": [ + "> \n", + "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", + "\n", + "> \n", + "`test_batch0_labels.jpg` shows test batch 0 labels\n", + "\n", + "> \n", + "`test_batch0_pred.jpg` shows test batch 0 _predictions_\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7KN5ghjE6ZWh" + }, + "source": [ + "Training losses and performance metrics are also logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and a custom `results.txt` logfile which is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `--weights yolov5s.pt` (orange)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "MDznIqPF7nk3" + }, + "source": [ + "from utils.plots import plot_results \n", + "plot_results(save_dir='runs/train/exp') # plot all results*.txt as results.png\n", + "Image(filename='runs/train/exp/results.png', width=800)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lfrEegCSW3fK" + }, + "source": [ + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Google Colab and Kaggle** notebooks with free GPU: \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Optional extras below. Unit tests validate repo functionality and should be run on any PRs submitted.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "gI6NoBev8Ib1" + }, + "source": [ + "# Re-clone repo\n", + "%cd ..\n", + "%rm -rf yolov5 && git clone https://github.com/ultralytics/yolov5\n", + "%cd yolov5" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "mcKoSIK2WSzj" + }, + "source": [ + "# Reproduce\n", + "%%shell\n", + "for x in yolov5s yolov5m yolov5l yolov5x; do\n", + " python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", + " python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP\n", + "done" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "FGH0ZjkGjejy" + }, + "source": [ + "# Unit tests\n", + "%%shell\n", + "export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n", + "\n", + "rm -rf runs # remove runs/\n", + "for m in yolov5s; do # models\n", + " python train.py --weights $m.pt --epochs 3 --img 320 --device 0 # train pretrained\n", + " python train.py --weights '' --cfg $m.yaml --epochs 3 --img 320 --device 0 # train scratch\n", + " for d in 0 cpu; do # devices\n", + " python detect.py --weights $m.pt --device $d # detect official\n", + " python detect.py --weights runs/train/exp/weights/best.pt --device $d # detect custom\n", + " python test.py --weights $m.pt --device $d # test official\n", + " python test.py --weights runs/train/exp/weights/best.pt --device $d # test custom\n", + " done\n", + " python hubconf.py # hub\n", + " python models/yolo.py --cfg $m.yaml # inspect\n", + " python models/export.py --weights $m.pt --img 640 --batch 1 # export\n", + "done" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "gogI-kwi3Tye" + }, + "source": [ + "# Profile\n", + "from utils.torch_utils import profile \n", + "\n", + "m1 = lambda x: x * torch.sigmoid(x)\n", + "m2 = torch.nn.SiLU()\n", + "profile(x=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "RVRSOhEvUdb5" + }, + "source": [ + "# Evolve\n", + "!python train.py --img 640 --batch 64 --epochs 100 --data coco128.yaml --weights yolov5s.pt --cache --noautoanchor --evolve\n", + "!d=runs/train/evolve && cp evolve.* $d && zip -r evolve.zip $d && gsutil mv evolve.zip gs://bucket # upload results (optional)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "BSgFCAcMbk1R" + }, + "source": [ + "# VOC\n", + "for b, m in zip([64, 48, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", + " !python train.py --batch {b} --weights {m}.pt --data voc.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.finetune.yaml --project VOC --name {m}" + ], + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/data_processing/yolov5_crowdhuman/utils/__init__.py b/data_processing/yolov5_crowdhuman/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/yolov5_crowdhuman/utils/activations.py b/data_processing/yolov5_crowdhuman/utils/activations.py new file mode 100644 index 0000000..aa3ddf0 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/activations.py @@ -0,0 +1,72 @@ +# Activation functions + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- +class SiLU(nn.Module): # export-friendly version of nn.SiLU() + @staticmethod + def forward(x): + return x * torch.sigmoid(x) + + +class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() + @staticmethod + def forward(x): + # return x * F.hardsigmoid(x) # for torchscript and CoreML + return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX + + +class MemoryEfficientSwish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x * torch.sigmoid(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + return grad_output * (sx * (1 + x * (1 - sx))) + + def forward(self, x): + return self.F.apply(x) + + +# Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- +class Mish(nn.Module): + @staticmethod + def forward(x): + return x * F.softplus(x).tanh() + + +class MemoryEfficientMish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + fx = F.softplus(x).tanh() + return grad_output * (fx + x * sx * (1 - fx * fx)) + + def forward(self, x): + return self.F.apply(x) + + +# FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- +class FReLU(nn.Module): + def __init__(self, c1, k=3): # ch_in, kernel + super().__init__() + self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) + self.bn = nn.BatchNorm2d(c1) + + def forward(self, x): + return torch.max(x, self.bn(self.conv(x))) diff --git a/data_processing/yolov5_crowdhuman/utils/autoanchor.py b/data_processing/yolov5_crowdhuman/utils/autoanchor.py new file mode 100644 index 0000000..5dba9f1 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/autoanchor.py @@ -0,0 +1,155 @@ +# Auto-anchor utils + +import numpy as np +import torch +import yaml +from scipy.cluster.vq import kmeans +from tqdm import tqdm + +from utils.general import colorstr + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary + a = m.anchor_grid.prod(-1).view(-1) # anchor area + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da.sign() != ds.sign(): # same order + print('Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + m.anchor_grid[:] = m.anchor_grid.flip(0) + + +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + prefix = colorstr('autoanchor: ') + print(f'\n{prefix}Analyzing anchors... ', end='') + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1. / thr).float().mean() # best possible recall + return bpr, aat + + bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2)) + print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') + if bpr < 0.98: # threshold to recompute + print('. Attempting to improve anchors, please wait...') + na = m.anchor_grid.numel() // 2 # number of anchors + new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + new_bpr = metric(new_anchors.reshape(-1, 2))[0] + if new_bpr > bpr: # replace anchors + new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors) + m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference + m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss + check_anchor_order(m) + print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + else: + print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') + print('') # newline + + +def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + path: path to dataset *.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + thr = 1. / thr + prefix = colorstr('autoanchor: ') + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') + print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' + f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') + for i, x in enumerate(k): + print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg + return k + + if isinstance(path, str): # *.yaml file + with open(path) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict + from utils.datasets import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + else: + dataset = path # dataset + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels + # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans calculation + print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') + s = wh.std(0) # sigmas for whitening + k, dist = kmeans(wh / s, n, iter=30) # points, mean distance + k *= s + wh = torch.tensor(wh, dtype=torch.float32) # filtered + wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered + k = print_results(k) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + npr = np.random + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k) + + return print_results(k) diff --git a/data_processing/yolov5_crowdhuman/utils/aws/__init__.py b/data_processing/yolov5_crowdhuman/utils/aws/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/yolov5_crowdhuman/utils/aws/mime.sh b/data_processing/yolov5_crowdhuman/utils/aws/mime.sh new file mode 100644 index 0000000..c319a83 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/aws/mime.sh @@ -0,0 +1,26 @@ +# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ +# This script will run on every instance restart, not only on first start +# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- + +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.txt" + +#cloud-config +cloud_final_modules: +- [scripts-user, always] + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +# --- paste contents of userdata.sh here --- +--// diff --git a/data_processing/yolov5_crowdhuman/utils/aws/resume.py b/data_processing/yolov5_crowdhuman/utils/aws/resume.py new file mode 100644 index 0000000..563f22b --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/aws/resume.py @@ -0,0 +1,37 @@ +# Resume all interrupted trainings in yolov5/ dir including DPP trainings +# Usage: $ python utils/aws/resume.py + +import os +import sys +from pathlib import Path + +import torch +import yaml + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +port = 0 # --master_port +path = Path('').resolve() +for last in path.rglob('*/**/last.pt'): + ckpt = torch.load(last) + if ckpt['optimizer'] is None: + continue + + # Load opt.yaml + with open(last.parent.parent / 'opt.yaml') as f: + opt = yaml.load(f, Loader=yaml.SafeLoader) + + # Get device count + d = opt['device'].split(',') # devices + nd = len(d) # number of devices + ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel + + if ddp: # multi-GPU + port += 1 + cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + else: # single-GPU + cmd = f'python train.py --resume {last}' + + cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + print(cmd) + os.system(cmd) diff --git a/data_processing/yolov5_crowdhuman/utils/aws/userdata.sh b/data_processing/yolov5_crowdhuman/utils/aws/userdata.sh new file mode 100644 index 0000000..36405d1 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/aws/userdata.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html +# This script will run only once on first instance start (for a re-start script see mime.sh) +# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir +# Use >300 GB SSD + +cd home/ubuntu +if [ ! -d yolov5 ]; then + echo "Running first-time script." # install dependencies, download COCO, pull Docker + git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5 + cd yolov5 + bash data/scripts/get_coco.sh && echo "Data done." & + sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & + # python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & +else + echo "Running re-start script." # resume interrupted runs + i=0 + list=$(docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + while IFS= read -r id; do + ((i++)) + echo "restarting container $i: $id" + docker start $id + # docker exec -it $id python train.py --resume # single-GPU + docker exec -d $id python utils/aws/resume.py + done <<<"$list" +fi diff --git a/data_processing/yolov5_crowdhuman/utils/datasets.py b/data_processing/yolov5_crowdhuman/utils/datasets.py new file mode 100644 index 0000000..6ad6816 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/datasets.py @@ -0,0 +1,1059 @@ +# Dataset utils and dataloaders + +import glob +import logging +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from threading import Thread + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +from PIL import Image, ExifTags +from torch.utils.data import Dataset +from tqdm import tqdm + +from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \ + clean_str +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes +vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +logger = logging.getLogger(__name__) + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(files): + # Returns a single hash value of a list of files + return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + try: + rotation = dict(img._getexif().items())[orientation] + if rotation == 6: # rotation 270 + s = (s[1], s[0]) + elif rotation == 8: # rotation 90 + s = (s[1], s[0]) + except: + pass + + return s + + +def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, + rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): + # Make sure only the first process in DDP process the dataset first, and the following others can use the cache + with torch_distributed_zero_first(rank): + dataset = LoadImagesAndLabels(path, imgsz, batch_size, + augment=augment, # augment images + hyp=hyp, # augmentation hyperparameters + rect=rect, # rectangular training + cache_images=cache, + single_cls=opt.single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None + loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader + # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() + dataloader = loader(dataset, + batch_size=batch_size, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) + return dataloader, dataset + + +class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler(object): + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadImages: # for inference + def __init__(self, path, bbox_results, img_size=640, stride=32): + p = str(Path(path).absolute()) # os-agnostic absolute path + if '*' in p: + files_ = sorted(glob.glob(p, recursive=True)) # glob + elif os.path.isdir(p): + files_ = sorted(glob.glob(os.path.join(p, '*.*'))) # dir + elif os.path.isfile(p): + files_ = [p] # files + else: + raise Exception(f'ERROR: {p} does not exist') + + files = [] + for f in files_: + if os.path.basename(f) in bbox_results: + continue + else: + files.append(f) + + + images = [x for x in files if x.split('.')[-1].lower() in img_formats] + videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + if any(videos): + self.new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + ret_val, img0 = self.cap.read() + if not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + else: + path = self.files[self.count] + self.new_video(path) + ret_val, img0 = self.cap.read() + + self.frame += 1 + print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='') + + else: + # Read image + self.count += 1 + img0 = cv2.imread(path) # BGR + assert img0 is not None, 'Image Not Found ' + path + print(f'image {self.count}/{self.nf} {path}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return path, img, img0, self.cap + + def new_video(self, path): + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + def __len__(self): + return self.nf # number of files + + +class LoadWebcam: # for inference + def __init__(self, pipe='0', img_size=640, stride=32): + self.img_size = img_size + self.stride = stride + + if pipe.isnumeric(): + pipe = eval(pipe) # local camera + # pipe = 'rtsp://192.168.1.64/1' # IP camera + # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login + # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera + + self.pipe = pipe + self.cap = cv2.VideoCapture(pipe) # video capture object + self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if cv2.waitKey(1) == ord('q'): # q to quit + self.cap.release() + cv2.destroyAllWindows() + raise StopIteration + + # Read frame + if self.pipe == 0: # local camera + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right + else: # IP camera + n = 0 + while True: + n += 1 + self.cap.grab() + if n % 30 == 0: # skip frames + ret_val, img0 = self.cap.retrieve() + if ret_val: + break + + # Print + assert ret_val, f'Camera Error {self.pipe}' + img_path = 'webcam.jpg' + print(f'webcam {self.count}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return img_path, img, img0, None + + def __len__(self): + return 0 + + +class LoadStreams: # multiple IP or RTSP cameras + def __init__(self, sources='streams.txt', img_size=640, stride=32): + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + + if os.path.isfile(sources): + with open(sources, 'r') as f: + sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] + else: + sources = [sources] + + n = len(sources) + self.imgs = [None] * n + self.sources = [clean_str(x) for x in sources] # clean source names for later + for i, s in enumerate(sources): + # Start the thread to read frames from the video stream + print(f'{i + 1}/{n}: {s}... ', end='') + cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s) + assert cap.isOpened(), f'Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) % 100 + _, self.imgs[i] = cap.read() # guarantee first frame + thread = Thread(target=self.update, args=([i, cap]), daemon=True) + print(f' success ({w}x{h} at {fps:.2f} FPS).') + thread.start() + print('') # newline + + # check for common shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + if not self.rect: + print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') + + def update(self, index, cap): + # Read next stream frame in a daemon thread + n = 0 + while cap.isOpened(): + n += 1 + # _, self.imgs[index] = cap.read() + cap.grab() + if n == 4: # read every 4th frame + success, im = cap.retrieve() + self.imgs[index] = im if success else self.imgs[index] * 0 + n = 0 + time.sleep(0.01) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + img0 = self.imgs.copy() + if cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + # Letterbox + img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] + + # Stack + img = np.stack(img, 0) + + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + + return self.sources, img, img0, None + + def __len__(self): + return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings + return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths] + + +class LoadImagesAndLabels(Dataset): # for training/testing + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, + cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('**/*.*')) # pathlib + elif p.is_file(): # file + with open(p, 'r') as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise Exception(f'{prefix}{p} does not exist') + self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib + assert self.img_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') + + # Check cache + self.label_files = img2label_paths(self.img_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels + if cache_path.is_file(): + cache, exists = torch.load(cache_path), True # load + if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed + cache, exists = self.cache_labels(cache_path, prefix), False # re-cache + else: + cache, exists = self.cache_labels(cache_path, prefix), False # cache + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total + if exists: + d = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' + + # Read cache + cache.pop('hash') # remove hash + cache.pop('version') # remove version + labels, shapes, self.segments = zip(*cache.values()) + self.labels = list(labels) + self.shapes = np.array(shapes, dtype=np.float64) + self.img_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + if single_cls: + for x in self.labels: + x[:, 0] = 0 + + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.img_files = [self.img_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride + + # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) + self.imgs = [None] * n + if cache_images: + gb = 0 # Gigabytes of cached images + self.img_hw0, self.img_hw = [None] * n, [None] * n + results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads + pbar = tqdm(enumerate(results), total=n) + for i, x in pbar: + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) + gb += self.imgs[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate + pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) + for i, (im_file, lb_file) in enumerate(pbar): + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + segments = [] # instance segments + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in img_formats, f'invalid image format {im.format}' + + # verify labels + if os.path.isfile(lb_file): + nf += 1 # label found + with open(lb_file, 'r') as f: + l = [x.split() for x in f.read().strip().splitlines()] + if any([len(x) > 8 for x in l]): # is segment + classes = np.array([x[0] for x in l], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) + l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + l = np.array(l, dtype=np.float32) + if len(l): + assert l.shape[1] == 5, 'labels require 5 columns each' + assert (l >= 0).all(), 'negative labels' + assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' + else: + ne += 1 # label empty + l = np.zeros((0, 5), dtype=np.float32) + else: + nm += 1 # label missing + l = np.zeros((0, 5), dtype=np.float32) + x[im_file] = [l, shape, segments] + except Exception as e: + nc += 1 + print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') + + pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \ + f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + + if nf == 0: + print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') + + x['hash'] = get_hash(self.label_files + self.img_files) + x['results'] = nf, nm, ne, nc, i + 1 + x['version'] = 0.1 # cache version + torch.save(x, path) # save for next time + logging.info(f'{prefix}New cache created: {path}') + return x + + def __len__(self): + return len(self.img_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = load_mosaic(self, index) + shapes = None + + # MixUp https://arxiv.org/pdf/1710.09412.pdf + if random.random() < hyp['mixup']: + img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) + r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + img = (img * r + img2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + + else: + # Load image + img, (h0, w0), (h, w) = load_image(self, index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + # Augment imagespace + if not mosaic: + img, labels = random_perspective(img, labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + # Augment colorspace + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Apply cutouts + # if random.random() < 0.9: + # labels = cutout(img, labels) + + nL = len(labels) # number of labels + if nL: + labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh + labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 + labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 + + if self.augment: + # flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nL: + labels[:, 2] = 1 - labels[:, 2] + + # flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nL: + labels[:, 1] = 1 - labels[:, 1] + + labels_out = torch.zeros((nL, 6)) + if nL: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.img_files[index], shapes + + @staticmethod + def collate_fn(batch): + img, label, path, shapes = zip(*batch) # transposed + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + img, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ + 0].type(img[i].type()) + l = label[i] + else: + im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + img4.append(im) + label4.append(l) + + for i, l in enumerate(label4): + l[:, 0] = i # add target image index for build_targets() + + return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def load_image(self, index): + # loads 1 image from dataset, returns img, original hw, resized hw + img = self.imgs[index] + if img is None: # not cached + path = self.img_files[index] + img = cv2.imread(path) # BGR + assert img is not None, 'Image Not Found ' + path + h0, w0 = img.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # resize image to img_size + if r != 1: # always resize down, only resize up if training with augmentation + interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR + img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) + return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + else: + return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + + +def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=np.int16) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + + +def hist_equalize(img, clahe=True, bgr=False): + # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def load_mosaic(self, index): + # loads images in a 4-mosaic + + labels4, segments4 = [], [] + s = self.img_size + yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4 = random_perspective(img4, labels4, segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + +def load_mosaic9(self, index): + # loads images in a 9-mosaic + + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9 = random_perspective(img9, labels9, segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + +def replicate(img, labels): + # Replicate labels + h, w = img.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return img, labels + + +def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = img.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better test mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return img, ratio, (dw, dh) + + +def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = img.shape[0] + border[0] * 2 # shape(h,w,c) + width = img.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(img[:, :, ::-1]) # base + # ax[1].imshow(img2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return img, targets + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def cutout(image, labels): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = image.shape[:2] + + def bbox_ioa(box1, box2): + # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 + + # Intersection over box2 area + return inter_area / box2_area + + # create random masks + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def create_folder(path='./new'): + # Create folder + if os.path.exists(path): + shutil.rmtree(path) # delete output folder + os.makedirs(path) # make new output folder + + +def flatten_recursive(path='../coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(path + '_flat') + create_folder(new_path) + for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128') + # Convert detection dataset into classification dataset, with one directory per class + + path = Path(path) # images dir + shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in img_formats: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file, 'r') as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128') + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + # Arguments + path: Path to images directory + weights: Train, val, test weights (list) + """ + path = Path(path) # images dir + files = list(path.rglob('*.*')) + n = len(files) # number of files + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + for i, img in tqdm(zip(indices, files), total=n): + if img.suffix[1:] in img_formats: + with open(path / txt[i], 'a') as f: + f.write(str(img) + '\n') # add image to txt file diff --git a/data_processing/yolov5_crowdhuman/utils/general.py b/data_processing/yolov5_crowdhuman/utils/general.py new file mode 100644 index 0000000..3b5f462 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/general.py @@ -0,0 +1,574 @@ +# General utils + +import glob +import logging +import math +import os +import platform +import random +import re +import subprocess +import time +from pathlib import Path + +import cv2 +import numpy as np +import torch +import torchvision +import yaml + +from utils.google_utils import gsutil_getsize +from utils.metrics import fitness +from utils.torch_utils import init_torch_seeds + +# Settings +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads + + +def set_logging(rank=-1): + logging.basicConfig( + format="%(message)s", + level=logging.INFO if rank in [-1, 0] else logging.WARN) + + +def init_seeds(seed=0): + # Initialize random number generator (RNG) seeds + random.seed(seed) + np.random.seed(seed) + init_torch_seeds(seed) + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def isdocker(): + # Is environment a Docker container + return Path('/workspace').exists() # or Path('/.dockerenv').exists() + + +def check_online(): + # Check internet connectivity + import socket + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + return True + except OSError: + return False + + +def check_git_status(): + # Recommend 'git pull' if code is out of date + print(colorstr('github: '), end='') + try: + assert Path('.git').exists(), 'skipping check (not a git repository)' + assert not isdocker(), 'skipping check (Docker image)' + assert check_online(), 'skipping check (offline)' + + cmd = 'git fetch && git config --get remote.origin.url' + url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url + branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + if n > 0: + s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ + f"Use 'git pull' to update or 'git clone {url}' to download latest." + else: + s = f'up to date with {url} ✅' + print(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) + except Exception as e: + print(e) + + +def check_requirements(file='requirements.txt', exclude=()): + # Check installed dependencies meet requirements + import pkg_resources + requirements = [f'{x.name}{x.specifier}' for x in pkg_resources.parse_requirements(Path(file).open()) + if x.name not in exclude] + pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met + + +def check_img_size(img_size, s=32): + # Verify img_size is a multiple of stride s + new_size = make_divisible(img_size, int(s)) # ceil gs-multiple + if new_size != img_size: + print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) + return new_size + + +def check_imshow(): + # Check if environment supports image displays + try: + assert not isdocker(), 'cv2.imshow() is disabled in Docker environments' + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + return False + + +def check_file(file): + # Search for file if not found + if os.path.isfile(file) or file == '': + return file + else: + files = glob.glob('./**/' + file, recursive=True) # find file + assert len(files), 'File Not Found: %s' % file # assert file was found + assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique + return files[0] # return file + + +def check_dataset(dict): + # Download dataset if not found locally + val, s = dict.get('val'), dict.get('download') + if val and len(val): + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) + if s and len(s): # download script + print('Downloading %s ...' % s) + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + torch.hub.download_url_to_file(s, f) + r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip + else: # bash script + r = os.system(s) + print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value + else: + raise Exception('Dataset not found.') + + +def make_divisible(x, divisor): + # Returns x evenly divisible by divisor + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(np.int) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights) + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) + image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) + # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample + return image_weights + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + return x + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # cls, xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2]] -= pad[0] # x padding + coords[:, [1, 3]] -= pad[1] # y padding + coords[:, :4] /= gain + clip_coords(coords, img0_shape) + return coords + + +def clip_coords(boxes, img_shape): + # Clip bounding xyxy bounding boxes to image shape (height, width) + boxes[:, 0].clamp_(0, img_shape[1]) # x1 + boxes[:, 1].clamp_(0, img_shape[0]) # y1 + boxes[:, 2].clamp_(0, img_shape[1]) # x2 + boxes[:, 3].clamp_(0, img_shape[0]) # y2 + + +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + if GIoU or DIoU or CIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / ((1 + eps) - iou + v) + return iou - (rho2 / c2 + v * alpha) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU + else: + return iou # IoU + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=()): + """Runs Non-Maximum Suppression (NMS) on inference results + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + nc = prediction.shape[2] - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Settings + min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + max_det = 300 # maximum number of detections per image + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = torch.zeros((len(l), nc + 5), device=x.device) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + print(f'WARNING: NMS time limit {time_limit}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + for key in 'optimizer', 'training_results', 'wandb_id': + x[key] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb)) + + +def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): + # Print mutation results to evolve.txt (for use with train.py --evolve) + a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys + b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) + + if bucket: + url = 'gs://%s/evolve.txt' % bucket + if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): + os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local + + with open('evolve.txt', 'a') as f: # append result + f.write(c + b + '\n') + x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows + x = x[np.argsort(-fitness(x))] # sort + np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness + + # Save yaml + for i, k in enumerate(hyp.keys()): + hyp[k] = float(x[0, i + 7]) + with open(yaml_file, 'w') as f: + results = tuple(x[0, :7]) + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') + yaml.dump(hyp, f, sort_keys=False) + + if bucket: + os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload + + +def apply_classifier(x, model, img, im0): + # applies a second stage classifier to yolo outputs + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for j, a in enumerate(d): # per item + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + # cv2.imwrite('test%i.jpg' % j, cutout) + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255.0 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=True, sep=''): + # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. + path = Path(path) # os-agnostic + if (path.exists() and exist_ok) or (not path.exists()): + return str(path) + else: + dirs = glob.glob(f"{path}{sep}*") # similar paths + matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] + i = [int(m.groups()[0]) for m in matches if m] # indices + n = max(i) + 1 if i else 2 # increment number + return f"{path}{sep}{n}" # update path diff --git a/data_processing/yolov5_crowdhuman/utils/google_app_engine/Dockerfile b/data_processing/yolov5_crowdhuman/utils/google_app_engine/Dockerfile new file mode 100644 index 0000000..0155618 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/google_app_engine/Dockerfile @@ -0,0 +1,25 @@ +FROM gcr.io/google-appengine/python + +# Create a virtualenv for dependencies. This isolates these packages from +# system-level packages. +# Use -p python3 or -p python3.7 to select python version. Default is version 2. +RUN virtualenv /env -p python3 + +# Setting these environment variables are the same as running +# source /env/bin/activate. +ENV VIRTUAL_ENV /env +ENV PATH /env/bin:$PATH + +RUN apt-get update && apt-get install -y python-opencv + +# Copy the application's requirements.txt and run pip to install all +# dependencies into the virtualenv. +ADD requirements.txt /app/requirements.txt +RUN pip install -r /app/requirements.txt + +# Add the application source code. +ADD . /app + +# Run a WSGI server to serve the application. gunicorn must be declared as +# a dependency in requirements.txt. +CMD gunicorn -b :$PORT main:app diff --git a/data_processing/yolov5_crowdhuman/utils/google_app_engine/additional_requirements.txt b/data_processing/yolov5_crowdhuman/utils/google_app_engine/additional_requirements.txt new file mode 100644 index 0000000..5fcc305 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/google_app_engine/additional_requirements.txt @@ -0,0 +1,4 @@ +# add these requirements in your app on top of the existing ones +pip==18.1 +Flask==1.0.2 +gunicorn==19.9.0 diff --git a/data_processing/yolov5_crowdhuman/utils/google_app_engine/app.yaml b/data_processing/yolov5_crowdhuman/utils/google_app_engine/app.yaml new file mode 100644 index 0000000..ac29d10 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/google_app_engine/app.yaml @@ -0,0 +1,14 @@ +runtime: custom +env: flex + +service: yolov5app + +liveness_check: + initial_delay_sec: 600 + +manual_scaling: + instances: 1 +resources: + cpu: 1 + memory_gb: 4 + disk_size_gb: 20 \ No newline at end of file diff --git a/data_processing/yolov5_crowdhuman/utils/google_utils.py b/data_processing/yolov5_crowdhuman/utils/google_utils.py new file mode 100644 index 0000000..0a7ca3b --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/google_utils.py @@ -0,0 +1,122 @@ +# Google utils: https://cloud.google.com/storage/docs/reference/libraries + +import os +import platform +import subprocess +import time +from pathlib import Path + +import requests +import torch + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') + return eval(s.split(' ')[0]) if len(s) else 0 # bytes + + +def attempt_download(file, repo='ultralytics/yolov5'): + # Attempt file download if does not exist + file = Path(str(file).strip().replace("'", '').lower()) + + if not file.exists(): + try: + response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api + assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] + tag = response['tag_name'] # i.e. 'v1.0' + except: # fallback plan + assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt'] + tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] + + name = file.name + if name in assets: + msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' + redundant = False # second download option + try: # GitHub + url = f'https://github.com/{repo}/releases/download/{tag}/{name}' + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert file.exists() and file.stat().st_size > 1E6 # check + except Exception as e: # GCP + print(f'Download error: {e}') + assert redundant, 'No secondary mirror' + url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' + print(f'Downloading {url} to {file}...') + os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) + finally: + if not file.exists() or file.stat().st_size < 1E6: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: Download failure: {msg}') + print('') + return + + +def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): + # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download() + t = time.time() + file = Path(file) + cookie = Path('cookie') # gdrive cookie + print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') + file.unlink(missing_ok=True) # remove existing file + cookie.unlink(missing_ok=True) # remove existing cookie + + # Attempt file download + out = "NUL" if platform.system() == "Windows" else "/dev/null" + os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') + if os.path.exists('cookie'): # large file + s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' + else: # small file + s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' + r = os.system(s) # execute, capture return + cookie.unlink(missing_ok=True) # remove existing cookie + + # Error check + if r != 0: + file.unlink(missing_ok=True) # remove partial + print('Download error ') # raise Exception('Download error') + return r + + # Unzip if archive + if file.suffix == '.zip': + print('unzipping... ', end='') + os.system(f'unzip -q {file}') # unzip + file.unlink() # remove zip to free space + + print(f'Done ({time.time() - t:.1f}s)') + return r + + +def get_token(cookie="./cookie"): + with open(cookie) as f: + for line in f: + if "download" in line: + return line.split()[-1] + return "" + +# def upload_blob(bucket_name, source_file_name, destination_blob_name): +# # Uploads a file to a bucket +# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python +# +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(destination_blob_name) +# +# blob.upload_from_filename(source_file_name) +# +# print('File {} uploaded to {}.'.format( +# source_file_name, +# destination_blob_name)) +# +# +# def download_blob(bucket_name, source_blob_name, destination_file_name): +# # Uploads a blob from a bucket +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(source_blob_name) +# +# blob.download_to_filename(destination_file_name) +# +# print('Blob {} downloaded to {}.'.format( +# source_blob_name, +# destination_file_name)) diff --git a/data_processing/yolov5_crowdhuman/utils/loss.py b/data_processing/yolov5_crowdhuman/utils/loss.py new file mode 100644 index 0000000..2302d18 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/loss.py @@ -0,0 +1,216 @@ +# Loss functions + +import torch +import torch.nn as nn + +from utils.general import bbox_iou +from utils.torch_utils import is_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super(BCEBlurWithLogitsLoss, self).__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(FocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(QFocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False): + super(ComputeLoss, self).__init__() + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=0.0) + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors': + setattr(self, k, getattr(det, k)) + + def __call__(self, p, targets): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + pxy = ps[:, :2].sigmoid() * 2. - 0.5 + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(ps[:, 5:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch = [], [], [], [] + gain = torch.ones(7, device=targets.device) # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + + return tcls, tbox, indices, anch diff --git a/data_processing/yolov5_crowdhuman/utils/metrics.py b/data_processing/yolov5_crowdhuman/utils/metrics.py new file mode 100644 index 0000000..ba812ff --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/metrics.py @@ -0,0 +1,223 @@ +# Model validation metrics + +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from . import general + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes = np.unique(target_cls) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = (target_cls == c).sum() # number of labels + n_p = i.sum() # number of predictions + + if n_p == 0 or n_l == 0: + continue + else: + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + 1e-16) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + 1e-16) + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + + i = f1.mean(0).argmax() # max F1 index + return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) + mpre = np.concatenate(([1.], precision, [0.])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = general.box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(np.int16) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[gc, detection_classes[m1[j]]] += 1 # correct + else: + self.matrix[gc, self.nc] += 1 # background FP + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[self.nc, dc] += 1 # background FN + + def matrix(self): + return self.matrix + + def plot(self, save_dir='', names=()): + try: + import seaborn as sn + + array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig = plt.figure(figsize=(12, 9), tight_layout=True) + sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size + labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels + sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, + xticklabels=names + ['background FN'] if labels else "auto", + yticklabels=names + ['background FP'] if labels else "auto").set_facecolor((1, 1, 1)) + fig.axes[0].set_xlabel('True') + fig.axes[0].set_ylabel('Predicted') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + except Exception as e: + pass + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + +def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) + + +def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = py.mean(0) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) diff --git a/data_processing/yolov5_crowdhuman/utils/plots.py b/data_processing/yolov5_crowdhuman/utils/plots.py new file mode 100644 index 0000000..ca54fdc --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/plots.py @@ -0,0 +1,429 @@ +# Plotting utils + +import glob +import math +import os +import random +from copy import copy +from pathlib import Path + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sns +import torch +import yaml +from PIL import Image, ImageDraw, ImageFont +from scipy.signal import butter, filtfilt + +from utils.general import xywh2xyxy, xyxy2xywh +from utils.metrics import fitness + +# Settings +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +def color_list(): + # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb + def hex2rgb(h): + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949) + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + +def plot_one_box(x, img, color=None, label=None, line_thickness=3): + # Plots one bounding box on image img + tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness + tl=1 + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + if label and False: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + + +def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None): + img = Image.fromarray(img) + draw = ImageDraw.Draw(img) + line_thickness = line_thickness or max(int(min(img.size) / 200), 2) + draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot + if label: + fontsize = max(round(max(img.size) / 40), 12) + font = ImageFont.truetype("Arial.ttf", fontsize) + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) + draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) + return np.asarray(img) + + +def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() + # Compares the two methods for width-height anchor multiplication + # https://github.com/ultralytics/yolov3/issues/168 + x = np.arange(-4.0, 4.0, .1) + ya = np.exp(x) + yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 + + fig = plt.figure(figsize=(6, 3), tight_layout=True) + plt.plot(x, ya, '.-', label='YOLOv3') + plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2') + plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6') + plt.xlim(left=-4, right=4) + plt.ylim(bottom=0, top=6) + plt.xlabel('input') + plt.ylabel('output') + plt.grid() + plt.legend() + fig.savefig('comparison.png', dpi=200) + + +def output_to_target(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + for *box, conf, cls in o.cpu().numpy(): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + return np.array(targets) + + +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): + # Plot image grid with labels + + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + + # un-normalise + if np.max(images[0]) <= 1: + images *= 255 + + tl = 3 # line thickness + tf = max(tl - 1, 1) # font thickness + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + + # Check if we should resize + scale_factor = max_size / max(h, w) + if scale_factor < 1: + h = math.ceil(scale_factor * h) + w = math.ceil(scale_factor * w) + + colors = color_list() # list of colors + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, img in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + + block_x = int(w * (i // ns)) + block_y = int(h * (i % ns)) + + img = img.transpose(1, 2, 0) + if scale_factor < 1: + img = cv2.resize(img, (w, h)) + + mosaic[block_y:block_y + h, block_x:block_x + w, :] = img + if len(targets) > 0: + image_targets = targets[targets[:, 0] == i] + boxes = xywh2xyxy(image_targets[:, 2:6]).T + classes = image_targets[:, 1].astype('int') + labels = image_targets.shape[1] == 6 # labels if no conf column + conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale_factor < 1: # absolute coords need scale if image scales + boxes *= scale_factor + boxes[[0, 2]] += block_x + boxes[[1, 3]] += block_y + for j, box in enumerate(boxes.T): + cls = int(classes[j]) + color = colors[cls % len(colors)] + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) + plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) + + # Draw image filename labels + if paths: + label = Path(paths[i]).name[:40] # trim to 40 char + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, + lineType=cv2.LINE_AA) + + # Image border + cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) + + if fname: + r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size + mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) + # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save + Image.fromarray(mosaic).save(fname) # PIL save + return mosaic + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_test_txt(): # from utils.plots import *; plot_test() + # Plot test.txt histograms + x = np.loadtxt('test.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() + # Plot study.txt generated by test.py + fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) + # ax = ax.ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]: + for f in sorted(Path(path).glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] + # for i in range(7): + # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + # ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 30) + ax2.set_ylim(30, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + plt.savefig(str(Path(path).name) + '.png', dpi=300) + + +def plot_labels(labels, save_dir=Path(''), loggers=None): + # plot dataset labels + print('Plotting labels... ') + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + colors = color_list() + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + ax[0].set_xlabel('classes') + sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + # loggers + for k, v in loggers.items() or {}: + if k == 'wandb' and v: + v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) + + +def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() + # Plot hyperparameter evolution results in evolve.txt + with open(yaml_file) as f: + hyp = yaml.load(f, Loader=yaml.SafeLoader) + x = np.loadtxt('evolve.txt', ndmin=2) + f = fitness(x) + # weights = (f - f.min()) ** 2 # for weighted results + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + for i, (k, v) in enumerate(hyp.items()): + y = x[:, i + 7] + # mu = (y * weights).sum() / weights.sum() # best weighted result + mu = y[f.argmax()] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print('%15s: %.3g' % (k, mu)) + plt.savefig('evolve.png', dpi=200) + print('\nPlot saved as evolve.png') + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() + # Plot training 'results*.txt', overlaying train and val losses + s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends + t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles + for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) + ax = ax.ravel() + for i in range(5): + for j in [i, i + 5]: + y = results[j, x] + ax[i].plot(x, y, marker='.', label=s[j]) + # y_smooth = butter_lowpass_filtfilt(y) + # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) + + ax[i].set_title(t[i]) + ax[i].legend() + ax[i].set_ylabel(f) if i == 0 else None # add filename + fig.savefig(f.replace('.txt', '.png'), dpi=200) + + +def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): + # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', + 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] + if bucket: + # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] + files = ['results%g.txt' % x for x in id] + c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) + os.system(c) + else: + files = list(Path(save_dir).glob('results*.txt')) + assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + for i in range(10): + y = results[i, x] + if i in [0, 1, 2, 5, 6, 7]: + y[y == 0] = np.nan # don't show zero loss values + # y /= y[0] # normalize + label = labels[fi] if len(labels) else f.stem + ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) + ax[i].set_title(s[i]) + # if i in [5, 6, 7]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + fig.savefig(Path(save_dir) / 'results.png', dpi=200) diff --git a/data_processing/yolov5_crowdhuman/utils/torch_utils.py b/data_processing/yolov5_crowdhuman/utils/torch_utils.py new file mode 100644 index 0000000..1b1cc20 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/torch_utils.py @@ -0,0 +1,294 @@ +# PyTorch utils + +import logging +import math +import os +import subprocess +import time +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +try: + import thop # for FLOPS computation +except ImportError: + thop = None +logger = logging.getLogger(__name__) + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + """ + Decorator to make all processes in distributed training wait for each local_master to do something. + """ + if local_rank not in [-1, 0]: + torch.distributed.barrier() + yield + if local_rank == 0: + torch.distributed.barrier() + + +def init_torch_seeds(seed=0): + # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html + torch.manual_seed(seed) + if seed == 0: # slower, more reproducible + cudnn.benchmark, cudnn.deterministic = False, True + else: # faster, less reproducible + cudnn.benchmark, cudnn.deterministic = True, False + + +def git_describe(): + # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + if Path('.git').exists(): + return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1] + else: + return '' + + +def select_device(device='', batch_size=None): + # device = 'cpu' or '0' or '0,1,2,3' + s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string + cpu = device.lower() == 'cpu' + if cpu: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable + assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability + + cuda = not cpu and torch.cuda.is_available() + if cuda: + n = torch.cuda.device_count() + if n > 1 and batch_size: # check that batch_size is compatible with device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * len(s) + for i, d in enumerate(device.split(',') if device else range(n)): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB + else: + s += 'CPU\n' + + logger.info(s) # skip a line + return torch.device('cuda:0' if cuda else 'cpu') + + +def time_synchronized(): + # pytorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(x, ops, n=100, device=None): + # profile a pytorch module or list of modules. Example usage: + # x = torch.randn(16, 3, 640, 640) # input + # m1 = lambda x: x * torch.sigmoid(x) + # m2 = nn.SiLU() + # profile(x, [m1, m2], n=100) # profile speed over 100 iterations + + device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + x = x.to(device) + x.requires_grad = True + print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') + print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type + dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS + except: + flops = 0 + + for _ in range(n): + t[0] = time_synchronized() + y = m(x) + t[1] = time_synchronized() + try: + _ = y.sum().backward() + t[2] = time_synchronized() + except: # no backward method + t[2] = float('nan') + dtf += (t[1] - t[0]) * 1000 / n # ms per op forward + dtb += (t[2] - t[1]) * 1000 / n # ms per op backward + + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + + +def is_parallel(model): + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0., 0. + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def fuse_conv_and_bn(conv, bn): + # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size())) + + # prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, img_size=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPS + from thop import profile + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 + img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float + fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + except (ImportError, Exception): + fs = '' + + logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def load_classifier(name='resnet101', n=2): + # Loads a pretrained model reshaped to n-class output + model = torchvision.models.__dict__[name](pretrained=True) + + # ResNet model properties + # input_size = [3, 224, 224] + # input_space = 'RGB' + # input_range = [0, 1] + # mean = [0.485, 0.456, 0.406] + # std = [0.229, 0.224, 0.225] + + # Reshape output to n classes + filters = model.fc.weight.shape[1] + model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) + model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) + model.fc.out_features = n + return model + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + else: + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +class ModelEMA: + """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models + Keep a moving average of everything in the model state_dict (parameters and buffers). + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + A smoothed version of the weights is necessary for some training schemes to perform well. + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + + def __init__(self, model, decay=0.9999, updates=0): + # Create EMA + self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA + # if next(model.parameters()).device.type != 'cpu': + # self.ema.half() # FP16 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + with torch.no_grad(): + self.updates += 1 + d = self.decay(self.updates) + + msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: + v *= d + v += (1. - d) * msd[k].detach() + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) diff --git a/data_processing/yolov5_crowdhuman/utils/wandb_logging/__init__.py b/data_processing/yolov5_crowdhuman/utils/wandb_logging/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_processing/yolov5_crowdhuman/utils/wandb_logging/log_dataset.py b/data_processing/yolov5_crowdhuman/utils/wandb_logging/log_dataset.py new file mode 100644 index 0000000..d790a9c --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/wandb_logging/log_dataset.py @@ -0,0 +1,39 @@ +import argparse +from pathlib import Path + +import yaml + +from wandb_utils import WandbLogger +from utils.datasets import LoadImagesAndLabels + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def create_dataset_artifact(opt): + with open(opt.data) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + logger = WandbLogger(opt, '', None, data, job_type='create_dataset') + nc, names = (1, ['item']) if opt.single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + logger.log_dataset_artifact(LoadImagesAndLabels(data['train']), names, name='train') # trainset + logger.log_dataset_artifact(LoadImagesAndLabels(data['val']), names, name='val') # valset + + # Update data.yaml with artifact links + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'train') + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'val') + path = opt.data if opt.overwrite_config else opt.data.replace('.', '_wandb.') # updated data.yaml path + data.pop('download', None) # download via artifact instead of predefined field 'download:' + with open(path, 'w') as f: + yaml.dump(data, f) + print("New Config file => ", path) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') + parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') + parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') + parser.add_argument('--overwrite_config', action='store_true', help='overwrite data.yaml') + opt = parser.parse_args() + + create_dataset_artifact(opt) diff --git a/data_processing/yolov5_crowdhuman/utils/wandb_logging/wandb_utils.py b/data_processing/yolov5_crowdhuman/utils/wandb_logging/wandb_utils.py new file mode 100644 index 0000000..264cd48 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/utils/wandb_logging/wandb_utils.py @@ -0,0 +1,145 @@ +import json +import shutil +import sys +from datetime import datetime +from pathlib import Path + +import torch + +sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path +from utils.general import colorstr, xywh2xyxy + +try: + import wandb +except ImportError: + wandb = None + print(f"{colorstr('wandb: ')}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def remove_prefix(from_string, prefix): + return from_string[len(prefix):] + + +class WandbLogger(): + def __init__(self, opt, name, run_id, data_dict, job_type='Training'): + self.wandb = wandb + self.wandb_run = wandb.init(config=opt, resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + name=name, + job_type=job_type, + id=run_id) if self.wandb else None + + if job_type == 'Training': + self.setup_training(opt, data_dict) + if opt.bbox_interval == -1: + opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs + if opt.save_period == -1: + opt.save_period = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs + + def setup_training(self, opt, data_dict): + self.log_dict = {} + self.train_artifact_path, self.trainset_artifact = \ + self.download_dataset_artifact(data_dict['train'], opt.artifact_alias) + self.test_artifact_path, self.testset_artifact = \ + self.download_dataset_artifact(data_dict['val'], opt.artifact_alias) + self.result_artifact, self.result_table, self.weights = None, None, None + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.test_artifact_path is not None: + test_path = Path(self.test_artifact_path) / 'data/images/' + data_dict['val'] = str(test_path) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + if opt.resume_from_artifact: + modeldir, _ = self.download_model_artifact(opt.resume_from_artifact) + if modeldir: + self.weights = Path(modeldir) / "best.pt" + opt.weights = self.weights + + def download_dataset_artifact(self, path, alias): + if path.startswith(WANDB_ARTIFACT_PREFIX): + dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + labels_zip = Path(datadir) / "data/labels.zip" + shutil.unpack_archive(labels_zip, Path(datadir) / 'data/labels', 'zip') + print("Downloaded dataset to : ", datadir) + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, name): + model_artifact = wandb.use_artifact(name + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + print("Downloaded model to : ", modeldir) + return modeldir, model_artifact + + def log_model(self, path, opt, epoch): + datetime_suffix = datetime.today().strftime('%Y-%m-%d-%H-%M-%S') + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ + 'original_url': str(path), + 'epoch': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'datetime': datetime_suffix + }) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + model_artifact.add_file(str(path / 'best.pt'), name='best.pt') + wandb.log_artifact(model_artifact) + print("Saving model artifact on epoch ", epoch + 1) + + def log_dataset_artifact(self, dataset, class_to_id, name='dataset'): + artifact = wandb.Artifact(name=name, type="dataset") + image_path = dataset.path + artifact.add_dir(image_path, name='data/images') + table = wandb.Table(columns=["id", "train_image", "Classes"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(dataset): + height, width = shapes[0] + labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) + labels[:, 2:] *= torch.Tensor([width, height, width, height]) + box_data = [] + img_classes = {} + for cls, *xyxy in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls]), + "scores": {"acc": 1}, + "domain": "pixel"}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes)) + artifact.add(table, name) + labels_path = 'labels'.join(image_path.rsplit('images', 1)) + zip_path = Path(labels_path).parent / (name + '_labels.zip') + if not zip_path.is_file(): # make_archive won't check if file exists + shutil.make_archive(zip_path.with_suffix(''), 'zip', labels_path) + artifact.add_file(str(zip_path), name='data/labels.zip') + wandb.log_artifact(artifact) + print("Saving data to W&B...") + + def log(self, log_dict): + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self): + if self.wandb_run and self.log_dict: + wandb.log(self.log_dict) + self.log_dict = {} + + def finish_run(self): + if self.wandb_run: + if self.result_artifact: + print("Add Training Progress Artifact") + self.result_artifact.add(self.result_table, 'result') + train_results = wandb.JoinedTable(self.testset_artifact.get("val"), self.result_table, "id") + self.result_artifact.add(train_results, 'joined_result') + wandb.log_artifact(self.result_artifact) + if self.log_dict: + wandb.log(self.log_dict) + wandb.run.finish() diff --git a/data_processing/yolov5_crowdhuman/weights/download_weights.sh b/data_processing/yolov5_crowdhuman/weights/download_weights.sh new file mode 100644 index 0000000..bea00b1 --- /dev/null +++ b/data_processing/yolov5_crowdhuman/weights/download_weights.sh @@ -0,0 +1,7 @@ + +#!/bin/bash + +# use following link to download the weights + +# https://drive.google.com/file/d/1gglIwqxaH2iTvy6lZlXuAcMpd_U0GCUb/view?usp=sharing + diff --git a/environment.yaml b/environment.yaml new file mode 100644 index 0000000..53b9e22 --- /dev/null +++ b/environment.yaml @@ -0,0 +1,8 @@ +name: text_to_3dportrait +channels: + - pytorch + - defaults +dependencies: + - python=3.8.5 + - pip=20.3 + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..ec3c1fa --- /dev/null +++ b/requirements.txt @@ -0,0 +1,95 @@ +albumentations==0.4.3 +diffusers==0.21.4 +opencv-python==4.1.2.30 +pudb==2019.2 +invisible-watermark +imageio==2.9.0 +imageio-ffmpeg==0.4.2 +omegaconf==2.1.1 +test-tube>=0.7.5 +streamlit>=0.73.1 +einops==0.3.0 +torch-fidelity==0.3.0 +transformers==4.35.0 +torchmetrics==0.7.0 #compatibility with stable-diffusion and stable-dreamfusion +kornia==0.6 + +tifffile==2023.7.10 +imagecodecs + +tqdm==4.64.1 +rich==13.6.0 +ninja==1.11.1.1 +numpy==1.22.4 +networkx==3.1 +pandas==1.3.5 +scipy==1.9.1 +scikit-learn==1.3.1 +matplotlib==3.7.1 + +torch-ema==0.3 +einops==0.3.0 +tensorboard==2.13.0 +tensorboardX==2.6.2.2 +tensorboard-data-server==0.7.0 +tenacity==8.2.3 +chumpy==0.70 + +# for gui +dearpygui==1.10.1 + + +# for stable-diffusion +huggingface_hub +accelerate==0.20.3 + +# for dmtet and mesh export +xatlas==0.0.8 +trimesh==4.0.0 +PyMCubes==0.1.4 +pymeshlab==2022.2.post4 +PyWavelets==1.4.1 +git+https://github.com/NVlabs/nvdiffrast/ + +# for zero123 +carvekit-colab==4.1.0 +omegaconf==2.1.1 +pytorch-lightning==1.4.2 +taming-transformers-rom1504==0.0.6 +kornia==0.6.0 +git+https://github.com/openai/CLIP.git + +# for omnidata +gdown==4.7.1 + +# for dpt +timm==0.9.8 + +# for remote debugging +debugpy-run==1.6 + +# for deepfloyd if +sentencepiece==0.1.99 + +pyrender==0.1.45 +PyOpenGL==3.1.0 + +six==1.16.0 +smmap==5.0.1 +threadpoolctl==3.2.0 +tokenizers==0.14.1 +lazy-loader==0.3 +rpds-py==0.10.6 + + +google==3.0.0 +google-auth==2.18.0 +google-auth-oauthlib==1.0.0 +protobuf==3.20.3 + +smplx==0.1.28 +mrcfile + + +imgui==1.3.0 +glfw==2.2.0 \ No newline at end of file diff --git a/stable-diffusion/LICENSE b/stable-diffusion/LICENSE new file mode 100644 index 0000000..0e609df --- /dev/null +++ b/stable-diffusion/LICENSE @@ -0,0 +1,82 @@ +Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors + +CreativeML Open RAIL-M +dated August 22, 2022 + +Section I: PREAMBLE + +Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation. + +Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations. + +In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation. + +Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI. + +This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model. + +NOW THEREFORE, You and Licensor agree as follows: + +1. Definitions + +- "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document. +- "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License. +- "Output" means the results of operating a Model as embodied in informational content resulting therefrom. +- "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material. +- "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model. +- "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any. +- "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access. +- "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model. +- "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator. +- "Third Parties" means individuals or legal entities that are not under common control with Licensor or You. +- "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." +- "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model. + +Section II: INTELLECTUAL PROPERTY RIGHTS + +Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model. +3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed. + +Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION + +4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions: +Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material. +You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License; +You must cause any modified files to carry prominent notices stating that You changed the files; +You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model. +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License. +5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5). +6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License. + +Section IV: OTHER PROVISIONS + +7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model. +8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors. +9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License. +10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. +11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. +12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein. + +END OF TERMS AND CONDITIONS + + + + +Attachment A + +Use Restrictions + +You agree not to use the Model or Derivatives of the Model: +- In any way that violates any applicable national, federal, state, local or international law or regulation; +- For the purpose of exploiting, harming or attempting to exploit or harm minors in any way; +- To generate or disseminate verifiably false information and/or content with the purpose of harming others; +- To generate or disseminate personal identifiable information that can be used to harm an individual; +- To defame, disparage or otherwise harass others; +- For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation; +- For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics; +- To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm; +- For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories; +- To provide medical advice and medical results interpretation; +- To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use). diff --git a/stable-diffusion/README.md b/stable-diffusion/README.md new file mode 100644 index 0000000..c3f7efa --- /dev/null +++ b/stable-diffusion/README.md @@ -0,0 +1,227 @@ + + +# What's new : + +Commit `21f890f9da3cfbeaba8e2ac3c425ee9e998d5229`, branch `main` + +1. `./scripts/txt2realistic_human.py` +2. `./get_test_data_df.py` + + + +# Original README + +## Stable Diffusion + +*Stable Diffusion was made possible thanks to a collaboration with [Stability AI](https://stability.ai/) and [Runway](https://runwayml.com/) and builds upon our previous work:* + +[**High-Resolution Image Synthesis with Latent Diffusion Models**](https://ommer-lab.com/research/latent-diffusion-models/)
+[Robin Rombach](https://github.com/rromb)\*, +[Andreas Blattmann](https://github.com/ablattmann)\*, +[Dominik Lorenz](https://github.com/qp-qp)\, +[Patrick Esser](https://github.com/pesser), +[Björn Ommer](https://hci.iwr.uni-heidelberg.de/Staff/bommer)
+_[CVPR '22 Oral](https://openaccess.thecvf.com/content/CVPR2022/html/Rombach_High-Resolution_Image_Synthesis_With_Latent_Diffusion_Models_CVPR_2022_paper.html) | +[GitHub](https://github.com/CompVis/latent-diffusion) | [arXiv](https://arxiv.org/abs/2112.10752) | [Project page](https://ommer-lab.com/research/latent-diffusion-models/)_ + +![txt2img-stable2](assets/stable-samples/txt2img/merged-0006.png) +[Stable Diffusion](#stable-diffusion-v1) is a latent text-to-image diffusion +model. +Thanks to a generous compute donation from [Stability AI](https://stability.ai/) and support from [LAION](https://laion.ai/), we were able to train a Latent Diffusion Model on 512x512 images from a subset of the [LAION-5B](https://laion.ai/blog/laion-5b/) database. +Similar to Google's [Imagen](https://arxiv.org/abs/2205.11487), +this model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts. +With its 860M UNet and 123M text encoder, the model is relatively lightweight and runs on a GPU with at least 10GB VRAM. +See [this section](#stable-diffusion-v1) below and the [model card](https://huggingface.co/CompVis/stable-diffusion). + + +### Requirements +A suitable [conda](https://conda.io/) environment named `ldm` can be created +and activated with: + +``` +conda env create -f environment.yaml +conda activate ldm +``` + +You can also update an existing [latent diffusion](https://github.com/CompVis/latent-diffusion) environment by running + +``` +conda install pytorch torchvision -c pytorch +pip install transformers==4.19.2 diffusers invisible-watermark +pip install -e . +``` + + +### Stable Diffusion v1 + +Stable Diffusion v1 refers to a specific configuration of the model +architecture that uses a downsampling-factor 8 autoencoder with an 860M UNet +and CLIP ViT-L/14 text encoder for the diffusion model. The model was pretrained on 256x256 images and +then finetuned on 512x512 images. + +*Note: Stable Diffusion v1 is a general text-to-image diffusion model and therefore mirrors biases and (mis-)conceptions that are present +in its training data. +Details on the training procedure and data, as well as the intended use of the model can be found in the corresponding [model card](Stable_Diffusion_v1_Model_Card.md).* + +The weights are available via [the CompVis organization at Hugging Face](https://huggingface.co/CompVis) under [a license which contains specific use-based restrictions to prevent misuse and harm as informed by the model card, but otherwise remains permissive](LICENSE). While commercial use is permitted under the terms of the license, **we do not recommend using the provided weights for services or products without additional safety mechanisms and considerations**, since there are [known limitations and biases](Stable_Diffusion_v1_Model_Card.md#limitations-and-bias) of the weights, and research on safe and ethical deployment of general text-to-image models is an ongoing effort. **The weights are research artifacts and should be treated as such.** + +[The CreativeML OpenRAIL M license](LICENSE) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based. + +#### Weights + +We currently provide the following checkpoints: + +- `sd-v1-1.ckpt`: 237k steps at resolution `256x256` on [laion2B-en](https://huggingface.co/datasets/laion/laion2B-en). + 194k steps at resolution `512x512` on [laion-high-resolution](https://huggingface.co/datasets/laion/laion-high-resolution) (170M examples from LAION-5B with resolution `>= 1024x1024`). +- `sd-v1-2.ckpt`: Resumed from `sd-v1-1.ckpt`. + 515k steps at resolution `512x512` on [laion-aesthetics v2 5+](https://laion.ai/blog/laion-aesthetics/) (a subset of laion2B-en with estimated aesthetics score `> 5.0`, and additionally + filtered to images with an original size `>= 512x512`, and an estimated watermark probability `< 0.5`. The watermark estimate is from the [LAION-5B](https://laion.ai/blog/laion-5b/) metadata, the aesthetics score is estimated using the [LAION-Aesthetics Predictor V2](https://github.com/christophschuhmann/improved-aesthetic-predictor)). +- `sd-v1-3.ckpt`: Resumed from `sd-v1-2.ckpt`. 195k steps at resolution `512x512` on "laion-aesthetics v2 5+" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598). +- `sd-v1-4.ckpt`: Resumed from `sd-v1-2.ckpt`. 225k steps at resolution `512x512` on "laion-aesthetics v2 5+" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598). + +Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0, +5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling +steps show the relative improvements of the checkpoints: +![sd evaluation results](assets/v1-variants-scores.jpg) + + + +#### Text-to-Image with Stable Diffusion +![txt2img-stable2](assets/stable-samples/txt2img/merged-0005.png) +![txt2img-stable2](assets/stable-samples/txt2img/merged-0007.png) + +Stable Diffusion is a latent diffusion model conditioned on the (non-pooled) text embeddings of a CLIP ViT-L/14 text encoder. +We provide a [reference script for sampling](#reference-sampling-script), but +there also exists a [diffusers integration](#diffusers-integration), which we +expect to see more active community development. + +##### Reference Sampling Script + +We provide a reference sampling script, which incorporates + +- a [Safety Checker Module](https://github.com/CompVis/stable-diffusion/pull/36), + to reduce the probability of explicit outputs, +- an [invisible watermarking](https://github.com/ShieldMnt/invisible-watermark) + of the outputs, to help viewers [identify the images as machine-generated](scripts/tests/test_watermark.py). + +After [obtaining the `stable-diffusion-v1-*-original` weights](#weights), link them +``` +mkdir -p models/ldm/stable-diffusion-v1/ +ln -s models/ldm/stable-diffusion-v1/model.ckpt +``` +and sample with +``` +python scripts/txt2img.py --prompt "a photograph of an astronaut riding a horse" --plms +``` + +By default, this uses a guidance scale of `--scale 7.5`, [Katherine Crowson's implementation](https://github.com/CompVis/latent-diffusion/pull/51) of the [PLMS](https://arxiv.org/abs/2202.09778) sampler, +and renders images of size 512x512 (which it was trained on) in 50 steps. All supported arguments are listed below (type `python scripts/txt2img.py --help`). + + +```commandline +usage: txt2img.py [-h] [--prompt [PROMPT]] [--outdir [OUTDIR]] [--skip_grid] [--skip_save] [--ddim_steps DDIM_STEPS] [--plms] [--laion400m] [--fixed_code] [--ddim_eta DDIM_ETA] + [--n_iter N_ITER] [--H H] [--W W] [--C C] [--f F] [--n_samples N_SAMPLES] [--n_rows N_ROWS] [--scale SCALE] [--from-file FROM_FILE] [--config CONFIG] [--ckpt CKPT] + [--seed SEED] [--precision {full,autocast}] + +optional arguments: + -h, --help show this help message and exit + --prompt [PROMPT] the prompt to render + --outdir [OUTDIR] dir to write results to + --skip_grid do not save a grid, only individual samples. Helpful when evaluating lots of samples + --skip_save do not save individual samples. For speed measurements. + --ddim_steps DDIM_STEPS + number of ddim sampling steps + --plms use plms sampling + --laion400m uses the LAION400M model + --fixed_code if enabled, uses the same starting code across samples + --ddim_eta DDIM_ETA ddim eta (eta=0.0 corresponds to deterministic sampling + --n_iter N_ITER sample this often + --H H image height, in pixel space + --W W image width, in pixel space + --C C latent channels + --f F downsampling factor + --n_samples N_SAMPLES + how many samples to produce for each given prompt. A.k.a. batch size + --n_rows N_ROWS rows in the grid (default: n_samples) + --scale SCALE unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty)) + --from-file FROM_FILE + if specified, load prompts from this file + --config CONFIG path to config which constructs model + --ckpt CKPT path to checkpoint of model + --seed SEED the seed (for reproducible sampling) + --precision {full,autocast} + evaluate at this precision +``` +Note: The inference config for all v1 versions is designed to be used with EMA-only checkpoints. +For this reason `use_ema=False` is set in the configuration, otherwise the code will try to switch from +non-EMA to EMA weights. If you want to examine the effect of EMA vs no EMA, we provide "full" checkpoints +which contain both types of weights. For these, `use_ema=False` will load and use the non-EMA weights. + + +##### Diffusers Integration + +A simple way to download and sample Stable Diffusion is by using the [diffusers library](https://github.com/huggingface/diffusers/tree/main#new--stable-diffusion-is-now-fully-compatible-with-diffusers): +```py +# make sure you're logged in with `huggingface-cli login` +from torch import autocast +from diffusers import StableDiffusionPipeline + +pipe = StableDiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + use_auth_token=True +).to("cuda") + +prompt = "a photo of an astronaut riding a horse on mars" +with autocast("cuda"): + image = pipe(prompt)["sample"][0] + +image.save("astronaut_rides_horse.png") +``` + + +#### Image Modification with Stable Diffusion + +By using a diffusion-denoising mechanism as first proposed by [SDEdit](https://arxiv.org/abs/2108.01073), the model can be used for different +tasks such as text-guided image-to-image translation and upscaling. Similar to the txt2img sampling script, +we provide a script to perform image modification with Stable Diffusion. + +The following describes an example where a rough sketch made in [Pinta](https://www.pinta-project.com/) is converted into a detailed artwork. +``` +python scripts/img2img.py --prompt "A fantasy landscape, trending on artstation" --init-img --strength 0.8 +``` +Here, strength is a value between 0.0 and 1.0, that controls the amount of noise that is added to the input image. +Values that approach 1.0 allow for lots of variations but will also produce images that are not semantically consistent with the input. See the following example. + +**Input** + +![sketch-in](assets/stable-samples/img2img/sketch-mountains-input.jpg) + +**Outputs** + +![out3](assets/stable-samples/img2img/mountains-3.png) +![out2](assets/stable-samples/img2img/mountains-2.png) + +This procedure can, for example, also be used to upscale samples from the base model. + +### Comments + +- Our codebase for the diffusion models builds heavily on [OpenAI's ADM codebase](https://github.com/openai/guided-diffusion) +and [https://github.com/lucidrains/denoising-diffusion-pytorch](https://github.com/lucidrains/denoising-diffusion-pytorch). +Thanks for open-sourcing! + +- The implementation of the transformer encoder is from [x-transformers](https://github.com/lucidrains/x-transformers) by [lucidrains](https://github.com/lucidrains?tab=repositories). + + +### BibTeX + +``` +@misc{rombach2021highresolution, + title={High-Resolution Image Synthesis with Latent Diffusion Models}, + author={Robin Rombach and Andreas Blattmann and Dominik Lorenz and Patrick Esser and Björn Ommer}, + year={2021}, + eprint={2112.10752}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + diff --git a/stable-diffusion/Stable_Diffusion_v1_Model_Card.md b/stable-diffusion/Stable_Diffusion_v1_Model_Card.md new file mode 100644 index 0000000..ad76ad2 --- /dev/null +++ b/stable-diffusion/Stable_Diffusion_v1_Model_Card.md @@ -0,0 +1,144 @@ +# Stable Diffusion v1 Model Card +This model card focuses on the model associated with the Stable Diffusion model, available [here](https://github.com/CompVis/stable-diffusion). + +## Model Details +- **Developed by:** Robin Rombach, Patrick Esser +- **Model type:** Diffusion-based text-to-image generation model +- **Language(s):** English +- **License:** [Proprietary](LICENSE) +- **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([CLIP ViT-L/14](https://arxiv.org/abs/2103.00020)) as suggested in the [Imagen paper](https://arxiv.org/abs/2205.11487). +- **Resources for more information:** [GitHub Repository](https://github.com/CompVis/stable-diffusion), [Paper](https://arxiv.org/abs/2112.10752). +- **Cite as:** + + @InProceedings{Rombach_2022_CVPR, + author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, + title = {High-Resolution Image Synthesis With Latent Diffusion Models}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2022}, + pages = {10684-10695} + } + +# Uses + +## Direct Use +The model is intended for research purposes only. Possible research areas and +tasks include + +- Safe deployment of models which have the potential to generate harmful content. +- Probing and understanding the limitations and biases of generative models. +- Generation of artworks and use in design and other artistic processes. +- Applications in educational or creative tools. +- Research on generative models. + +Excluded uses are described below. + + ### Misuse, Malicious Use, and Out-of-Scope Use +_Note: This section is taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), but applies in the same way to Stable Diffusion v1_. + +The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. + +#### Out-of-Scope Use +The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. + +#### Misuse and Malicious Use +Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to: + +- Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc. +- Intentionally promoting or propagating discriminatory content or harmful stereotypes. +- Impersonating individuals without their consent. +- Sexual content without consent of the people who might see it. +- Mis- and disinformation +- Representations of egregious violence and gore +- Sharing of copyrighted or licensed material in violation of its terms of use. +- Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use. + +## Limitations and Bias + +### Limitations + +- The model does not achieve perfect photorealism +- The model cannot render legible text +- The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere” +- Faces and people in general may not be generated properly. +- The model was trained mainly with English captions and will not work as well in other languages. +- The autoencoding part of the model is lossy +- The model was trained on a large-scale dataset + [LAION-5B](https://laion.ai/blog/laion-5b/) which contains adult material + and is not fit for product use without additional safety mechanisms and + considerations. +- No additional measures were used to deduplicate the dataset. As a result, we observe some degree of memorization for images that are duplicated in the training data. + The training data can be searched at [https://rom1504.github.io/clip-retrieval/](https://rom1504.github.io/clip-retrieval/) to possibly assist in the detection of memorized images. + +### Bias +While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases. +Stable Diffusion v1 was primarily trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/), +which consists of images that are limited to English descriptions. +Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for. +This affects the overall output of the model, as white and western cultures are often set as the default. Further, the +ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts. +Stable Diffusion v1 mirrors and exacerbates biases to such a degree that viewer discretion must be advised irrespective of the input or its intent. + + +## Training + +**Training Data** +The model developers used the following dataset for training the model: + +- LAION-5B and subsets thereof (see next section) + +**Training Procedure** +Stable Diffusion v1 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training, + +- Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4 +- Text prompts are encoded through a ViT-L/14 text-encoder. +- The non-pooled output of the text encoder is fed into the UNet backbone of the latent diffusion model via cross-attention. +- The loss is a reconstruction objective between the noise that was added to the latent and the prediction made by the UNet. + +We currently provide the following checkpoints: + +- `sd-v1-1.ckpt`: 237k steps at resolution `256x256` on [laion2B-en](https://huggingface.co/datasets/laion/laion2B-en). + 194k steps at resolution `512x512` on [laion-high-resolution](https://huggingface.co/datasets/laion/laion-high-resolution) (170M examples from LAION-5B with resolution `>= 1024x1024`). +- `sd-v1-2.ckpt`: Resumed from `sd-v1-1.ckpt`. + 515k steps at resolution `512x512` on [laion-aesthetics v2 5+](https://laion.ai/blog/laion-aesthetics/) (a subset of laion2B-en with estimated aesthetics score `> 5.0`, and additionally +filtered to images with an original size `>= 512x512`, and an estimated watermark probability `< 0.5`. The watermark estimate is from the [LAION-5B](https://laion.ai/blog/laion-5b/) metadata, the aesthetics score is estimated using the [LAION-Aesthetics Predictor V2](https://github.com/christophschuhmann/improved-aesthetic-predictor)). +- `sd-v1-3.ckpt`: Resumed from `sd-v1-2.ckpt`. 195k steps at resolution `512x512` on "laion-aesthetics v2 5+" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598). +- `sd-v1-4.ckpt`: Resumed from `sd-v1-2.ckpt`. 225k steps at resolution `512x512` on "laion-aesthetics v2 5+" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598). + +- **Hardware:** 32 x 8 x A100 GPUs +- **Optimizer:** AdamW +- **Gradient Accumulations**: 2 +- **Batch:** 32 x 8 x 2 x 4 = 2048 +- **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant + +## Evaluation Results +Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0, +5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling +steps show the relative improvements of the checkpoints: + +![pareto](assets/v1-variants-scores.jpg) + +Evaluated using 50 PLMS steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores. + +## Environmental Impact + +**Stable Diffusion v1** **Estimated Emissions** +Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact. + +- **Hardware Type:** A100 PCIe 40GB +- **Hours used:** 150000 +- **Cloud Provider:** AWS +- **Compute Region:** US-east +- **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 11250 kg CO2 eq. + +## Citation + @InProceedings{Rombach_2022_CVPR, + author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, + title = {High-Resolution Image Synthesis With Latent Diffusion Models}, + booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2022}, + pages = {10684-10695} + } + +*This model card was written by: Robin Rombach and Patrick Esser and is based on the [DALL-E Mini model card](https://huggingface.co/dalle-mini/dalle-mini).* diff --git a/stable-diffusion/assets/a-painting-of-a-fire.png b/stable-diffusion/assets/a-painting-of-a-fire.png new file mode 100644 index 0000000..3d3b9bd Binary files /dev/null and b/stable-diffusion/assets/a-painting-of-a-fire.png differ diff --git a/stable-diffusion/assets/a-photograph-of-a-fire.png b/stable-diffusion/assets/a-photograph-of-a-fire.png new file mode 100644 index 0000000..e246bc1 Binary files /dev/null and b/stable-diffusion/assets/a-photograph-of-a-fire.png differ diff --git a/stable-diffusion/assets/a-shirt-with-a-fire-printed-on-it.png b/stable-diffusion/assets/a-shirt-with-a-fire-printed-on-it.png new file mode 100644 index 0000000..aa68f18 Binary files /dev/null and b/stable-diffusion/assets/a-shirt-with-a-fire-printed-on-it.png differ diff --git a/stable-diffusion/assets/a-shirt-with-the-inscription-'fire'.png b/stable-diffusion/assets/a-shirt-with-the-inscription-'fire'.png new file mode 100644 index 0000000..f058b97 Binary files /dev/null and b/stable-diffusion/assets/a-shirt-with-the-inscription-'fire'.png differ diff --git a/stable-diffusion/assets/a-watercolor-painting-of-a-fire.png b/stable-diffusion/assets/a-watercolor-painting-of-a-fire.png new file mode 100644 index 0000000..e4ebe13 Binary files /dev/null and b/stable-diffusion/assets/a-watercolor-painting-of-a-fire.png differ diff --git a/stable-diffusion/assets/birdhouse.png b/stable-diffusion/assets/birdhouse.png new file mode 100644 index 0000000..872d49c Binary files /dev/null and b/stable-diffusion/assets/birdhouse.png differ diff --git a/stable-diffusion/assets/fire.png b/stable-diffusion/assets/fire.png new file mode 100644 index 0000000..64c24fe Binary files /dev/null and b/stable-diffusion/assets/fire.png differ diff --git a/stable-diffusion/assets/inpainting.png b/stable-diffusion/assets/inpainting.png new file mode 100644 index 0000000..d6b9ef8 Binary files /dev/null and b/stable-diffusion/assets/inpainting.png differ diff --git a/stable-diffusion/assets/modelfigure.png b/stable-diffusion/assets/modelfigure.png new file mode 100644 index 0000000..6b1d3e6 Binary files /dev/null and b/stable-diffusion/assets/modelfigure.png differ diff --git a/stable-diffusion/assets/rdm-preview.jpg b/stable-diffusion/assets/rdm-preview.jpg new file mode 100644 index 0000000..3838b0f Binary files /dev/null and b/stable-diffusion/assets/rdm-preview.jpg differ diff --git a/stable-diffusion/assets/reconstruction1.png b/stable-diffusion/assets/reconstruction1.png new file mode 100644 index 0000000..0752799 Binary files /dev/null and b/stable-diffusion/assets/reconstruction1.png differ diff --git a/stable-diffusion/assets/reconstruction2.png b/stable-diffusion/assets/reconstruction2.png new file mode 100644 index 0000000..b8e7a36 Binary files /dev/null and b/stable-diffusion/assets/reconstruction2.png differ diff --git a/stable-diffusion/assets/results.gif b/stable-diffusion/assets/results.gif new file mode 100644 index 0000000..82b6590 Binary files /dev/null and b/stable-diffusion/assets/results.gif differ diff --git a/stable-diffusion/assets/rick.jpeg b/stable-diffusion/assets/rick.jpeg new file mode 100644 index 0000000..9954860 Binary files /dev/null and b/stable-diffusion/assets/rick.jpeg differ diff --git a/stable-diffusion/assets/stable-samples/img2img/mountains-1.png b/stable-diffusion/assets/stable-samples/img2img/mountains-1.png new file mode 100644 index 0000000..d01b835 Binary files /dev/null and b/stable-diffusion/assets/stable-samples/img2img/mountains-1.png differ diff --git a/stable-diffusion/assets/stable-samples/img2img/mountains-2.png b/stable-diffusion/assets/stable-samples/img2img/mountains-2.png new file mode 100644 index 0000000..e9f4e70 Binary files /dev/null and b/stable-diffusion/assets/stable-samples/img2img/mountains-2.png differ diff --git a/stable-diffusion/assets/stable-samples/img2img/mountains-3.png b/stable-diffusion/assets/stable-samples/img2img/mountains-3.png new file mode 100644 index 0000000..017de30 Binary files /dev/null and b/stable-diffusion/assets/stable-samples/img2img/mountains-3.png differ diff --git a/stable-diffusion/assets/stable-samples/img2img/sketch-mountains-input.jpg b/stable-diffusion/assets/stable-samples/img2img/sketch-mountains-input.jpg new file mode 100644 index 0000000..79d652b Binary files /dev/null and b/stable-diffusion/assets/stable-samples/img2img/sketch-mountains-input.jpg differ diff --git a/stable-diffusion/assets/stable-samples/img2img/upscaling-in.png b/stable-diffusion/assets/stable-samples/img2img/upscaling-in.png new file mode 100644 index 0000000..501c31c Binary files /dev/null and b/stable-diffusion/assets/stable-samples/img2img/upscaling-in.png differ diff --git a/stable-diffusion/assets/stable-samples/img2img/upscaling-out.png b/stable-diffusion/assets/stable-samples/img2img/upscaling-out.png new file mode 100644 index 0000000..1c4bb25 Binary files /dev/null and b/stable-diffusion/assets/stable-samples/img2img/upscaling-out.png differ diff --git a/stable-diffusion/assets/stable-samples/txt2img/000002025.png b/stable-diffusion/assets/stable-samples/txt2img/000002025.png new file mode 100644 index 0000000..66891c1 Binary files /dev/null and b/stable-diffusion/assets/stable-samples/txt2img/000002025.png differ diff --git a/stable-diffusion/assets/stable-samples/txt2img/000002035.png b/stable-diffusion/assets/stable-samples/txt2img/000002035.png new file mode 100644 index 0000000..c707c13 Binary files /dev/null and b/stable-diffusion/assets/stable-samples/txt2img/000002035.png differ diff --git a/stable-diffusion/assets/stable-samples/txt2img/merged-0005.png b/stable-diffusion/assets/stable-samples/txt2img/merged-0005.png new file mode 100644 index 0000000..ca0a1af Binary files /dev/null and b/stable-diffusion/assets/stable-samples/txt2img/merged-0005.png differ diff --git a/stable-diffusion/assets/stable-samples/txt2img/merged-0006.png b/stable-diffusion/assets/stable-samples/txt2img/merged-0006.png new file mode 100644 index 0000000..999f370 Binary files /dev/null and b/stable-diffusion/assets/stable-samples/txt2img/merged-0006.png differ diff --git a/stable-diffusion/assets/stable-samples/txt2img/merged-0007.png b/stable-diffusion/assets/stable-samples/txt2img/merged-0007.png new file mode 100644 index 0000000..af390ac Binary files /dev/null and b/stable-diffusion/assets/stable-samples/txt2img/merged-0007.png differ diff --git a/stable-diffusion/assets/the-earth-is-on-fire,-oil-on-canvas.png b/stable-diffusion/assets/the-earth-is-on-fire,-oil-on-canvas.png new file mode 100644 index 0000000..9079720 Binary files /dev/null and b/stable-diffusion/assets/the-earth-is-on-fire,-oil-on-canvas.png differ diff --git a/stable-diffusion/assets/txt2img-convsample.png b/stable-diffusion/assets/txt2img-convsample.png new file mode 100644 index 0000000..255c265 Binary files /dev/null and b/stable-diffusion/assets/txt2img-convsample.png differ diff --git a/stable-diffusion/assets/txt2img-preview.png b/stable-diffusion/assets/txt2img-preview.png new file mode 100644 index 0000000..51ee1c2 Binary files /dev/null and b/stable-diffusion/assets/txt2img-preview.png differ diff --git a/stable-diffusion/assets/v1-variants-scores.jpg b/stable-diffusion/assets/v1-variants-scores.jpg new file mode 100644 index 0000000..7d997ba Binary files /dev/null and b/stable-diffusion/assets/v1-variants-scores.jpg differ diff --git a/stable-diffusion/configs/autoencoder/autoencoder_kl_16x16x16.yaml b/stable-diffusion/configs/autoencoder/autoencoder_kl_16x16x16.yaml new file mode 100644 index 0000000..5f1d10e --- /dev/null +++ b/stable-diffusion/configs/autoencoder/autoencoder_kl_16x16x16.yaml @@ -0,0 +1,54 @@ +model: + base_learning_rate: 4.5e-6 + target: ldm.models.autoencoder.AutoencoderKL + params: + monitor: "val/rec_loss" + embed_dim: 16 + lossconfig: + target: ldm.modules.losses.LPIPSWithDiscriminator + params: + disc_start: 50001 + kl_weight: 0.000001 + disc_weight: 0.5 + + ddconfig: + double_z: True + z_channels: 16 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [ 1,1,2,2,4] # num_down = len(ch_mult)-1 + num_res_blocks: 2 + attn_resolutions: [16] + dropout: 0.0 + + +data: + target: main.DataModuleFromConfig + params: + batch_size: 12 + wrap: True + train: + target: ldm.data.imagenet.ImageNetSRTrain + params: + size: 256 + degradation: pil_nearest + validation: + target: ldm.data.imagenet.ImageNetSRValidation + params: + size: 256 + degradation: pil_nearest + +lightning: + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 1000 + max_images: 8 + increase_log_steps: True + + trainer: + benchmark: True + accumulate_grad_batches: 2 diff --git a/stable-diffusion/configs/autoencoder/autoencoder_kl_32x32x4.yaml b/stable-diffusion/configs/autoencoder/autoencoder_kl_32x32x4.yaml new file mode 100644 index 0000000..ab8b36f --- /dev/null +++ b/stable-diffusion/configs/autoencoder/autoencoder_kl_32x32x4.yaml @@ -0,0 +1,53 @@ +model: + base_learning_rate: 4.5e-6 + target: ldm.models.autoencoder.AutoencoderKL + params: + monitor: "val/rec_loss" + embed_dim: 4 + lossconfig: + target: ldm.modules.losses.LPIPSWithDiscriminator + params: + disc_start: 50001 + kl_weight: 0.000001 + disc_weight: 0.5 + + ddconfig: + double_z: True + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1 + num_res_blocks: 2 + attn_resolutions: [ ] + dropout: 0.0 + +data: + target: main.DataModuleFromConfig + params: + batch_size: 12 + wrap: True + train: + target: ldm.data.imagenet.ImageNetSRTrain + params: + size: 256 + degradation: pil_nearest + validation: + target: ldm.data.imagenet.ImageNetSRValidation + params: + size: 256 + degradation: pil_nearest + +lightning: + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 1000 + max_images: 8 + increase_log_steps: True + + trainer: + benchmark: True + accumulate_grad_batches: 2 diff --git a/stable-diffusion/configs/autoencoder/autoencoder_kl_64x64x3.yaml b/stable-diffusion/configs/autoencoder/autoencoder_kl_64x64x3.yaml new file mode 100644 index 0000000..5e3db5c --- /dev/null +++ b/stable-diffusion/configs/autoencoder/autoencoder_kl_64x64x3.yaml @@ -0,0 +1,54 @@ +model: + base_learning_rate: 4.5e-6 + target: ldm.models.autoencoder.AutoencoderKL + params: + monitor: "val/rec_loss" + embed_dim: 3 + lossconfig: + target: ldm.modules.losses.LPIPSWithDiscriminator + params: + disc_start: 50001 + kl_weight: 0.000001 + disc_weight: 0.5 + + ddconfig: + double_z: True + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [ 1,2,4 ] # num_down = len(ch_mult)-1 + num_res_blocks: 2 + attn_resolutions: [ ] + dropout: 0.0 + + +data: + target: main.DataModuleFromConfig + params: + batch_size: 12 + wrap: True + train: + target: ldm.data.imagenet.ImageNetSRTrain + params: + size: 256 + degradation: pil_nearest + validation: + target: ldm.data.imagenet.ImageNetSRValidation + params: + size: 256 + degradation: pil_nearest + +lightning: + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 1000 + max_images: 8 + increase_log_steps: True + + trainer: + benchmark: True + accumulate_grad_batches: 2 diff --git a/stable-diffusion/configs/autoencoder/autoencoder_kl_8x8x64.yaml b/stable-diffusion/configs/autoencoder/autoencoder_kl_8x8x64.yaml new file mode 100644 index 0000000..5ccd09d --- /dev/null +++ b/stable-diffusion/configs/autoencoder/autoencoder_kl_8x8x64.yaml @@ -0,0 +1,53 @@ +model: + base_learning_rate: 4.5e-6 + target: ldm.models.autoencoder.AutoencoderKL + params: + monitor: "val/rec_loss" + embed_dim: 64 + lossconfig: + target: ldm.modules.losses.LPIPSWithDiscriminator + params: + disc_start: 50001 + kl_weight: 0.000001 + disc_weight: 0.5 + + ddconfig: + double_z: True + z_channels: 64 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [ 1,1,2,2,4,4] # num_down = len(ch_mult)-1 + num_res_blocks: 2 + attn_resolutions: [16,8] + dropout: 0.0 + +data: + target: main.DataModuleFromConfig + params: + batch_size: 12 + wrap: True + train: + target: ldm.data.imagenet.ImageNetSRTrain + params: + size: 256 + degradation: pil_nearest + validation: + target: ldm.data.imagenet.ImageNetSRValidation + params: + size: 256 + degradation: pil_nearest + +lightning: + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 1000 + max_images: 8 + increase_log_steps: True + + trainer: + benchmark: True + accumulate_grad_batches: 2 diff --git a/stable-diffusion/configs/latent-diffusion/celebahq-ldm-vq-4.yaml b/stable-diffusion/configs/latent-diffusion/celebahq-ldm-vq-4.yaml new file mode 100644 index 0000000..89b3df4 --- /dev/null +++ b/stable-diffusion/configs/latent-diffusion/celebahq-ldm-vq-4.yaml @@ -0,0 +1,86 @@ +model: + base_learning_rate: 2.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0195 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + image_size: 64 + channels: 3 + monitor: val/loss_simple_ema + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 3 + out_channels: 3 + model_channels: 224 + attention_resolutions: + # note: this isn\t actually the resolution but + # the downsampling factor, i.e. this corresnponds to + # attention on spatial resolution 8,16,32, as the + # spatial reolution of the latents is 64 for f4 + - 8 + - 4 + - 2 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 3 + - 4 + num_head_channels: 32 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + ckpt_path: models/first_stage_models/vq-f4/model.ckpt + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: __is_unconditional__ +data: + target: main.DataModuleFromConfig + params: + batch_size: 48 + num_workers: 5 + wrap: false + train: + target: taming.data.faceshq.CelebAHQTrain + params: + size: 256 + validation: + target: taming.data.faceshq.CelebAHQValidation + params: + size: 256 + + +lightning: + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 5000 + max_images: 8 + increase_log_steps: False + + trainer: + benchmark: True \ No newline at end of file diff --git a/stable-diffusion/configs/latent-diffusion/cin-ldm-vq-f8.yaml b/stable-diffusion/configs/latent-diffusion/cin-ldm-vq-f8.yaml new file mode 100644 index 0000000..b8cd9e2 --- /dev/null +++ b/stable-diffusion/configs/latent-diffusion/cin-ldm-vq-f8.yaml @@ -0,0 +1,98 @@ +model: + base_learning_rate: 1.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0195 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + cond_stage_key: class_label + image_size: 32 + channels: 4 + cond_stage_trainable: true + conditioning_key: crossattn + monitor: val/loss_simple_ema + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 + in_channels: 4 + out_channels: 4 + model_channels: 256 + attention_resolutions: + #note: this isn\t actually the resolution but + # the downsampling factor, i.e. this corresnponds to + # attention on spatial resolution 8,16,32, as the + # spatial reolution of the latents is 32 for f8 + - 4 + - 2 + - 1 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 4 + num_head_channels: 32 + use_spatial_transformer: true + transformer_depth: 1 + context_dim: 512 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 4 + n_embed: 16384 + ckpt_path: configs/first_stage_models/vq-f8/model.yaml + ddconfig: + double_z: false + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: + - 32 + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: + target: ldm.modules.encoders.modules.ClassEmbedder + params: + embed_dim: 512 + key: class_label +data: + target: main.DataModuleFromConfig + params: + batch_size: 64 + num_workers: 12 + wrap: false + train: + target: ldm.data.imagenet.ImageNetTrain + params: + config: + size: 256 + validation: + target: ldm.data.imagenet.ImageNetValidation + params: + config: + size: 256 + + +lightning: + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 5000 + max_images: 8 + increase_log_steps: False + + trainer: + benchmark: True \ No newline at end of file diff --git a/stable-diffusion/configs/latent-diffusion/cin256-v2.yaml b/stable-diffusion/configs/latent-diffusion/cin256-v2.yaml new file mode 100644 index 0000000..b7c1aa2 --- /dev/null +++ b/stable-diffusion/configs/latent-diffusion/cin256-v2.yaml @@ -0,0 +1,68 @@ +model: + base_learning_rate: 0.0001 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0195 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + cond_stage_key: class_label + image_size: 64 + channels: 3 + cond_stage_trainable: true + conditioning_key: crossattn + monitor: val/loss + use_ema: False + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 3 + out_channels: 3 + model_channels: 192 + attention_resolutions: + - 8 + - 4 + - 2 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 3 + - 5 + num_heads: 1 + use_spatial_transformer: true + transformer_depth: 1 + context_dim: 512 + + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.ClassEmbedder + params: + n_classes: 1001 + embed_dim: 512 + key: class_label diff --git a/stable-diffusion/configs/latent-diffusion/ffhq-ldm-vq-4.yaml b/stable-diffusion/configs/latent-diffusion/ffhq-ldm-vq-4.yaml new file mode 100644 index 0000000..1899e30 --- /dev/null +++ b/stable-diffusion/configs/latent-diffusion/ffhq-ldm-vq-4.yaml @@ -0,0 +1,85 @@ +model: + base_learning_rate: 2.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0195 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + image_size: 64 + channels: 3 + monitor: val/loss_simple_ema + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 3 + out_channels: 3 + model_channels: 224 + attention_resolutions: + # note: this isn\t actually the resolution but + # the downsampling factor, i.e. this corresnponds to + # attention on spatial resolution 8,16,32, as the + # spatial reolution of the latents is 64 for f4 + - 8 + - 4 + - 2 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 3 + - 4 + num_head_channels: 32 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + ckpt_path: configs/first_stage_models/vq-f4/model.yaml + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: __is_unconditional__ +data: + target: main.DataModuleFromConfig + params: + batch_size: 42 + num_workers: 5 + wrap: false + train: + target: taming.data.faceshq.FFHQTrain + params: + size: 256 + validation: + target: taming.data.faceshq.FFHQValidation + params: + size: 256 + + +lightning: + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 5000 + max_images: 8 + increase_log_steps: False + + trainer: + benchmark: True \ No newline at end of file diff --git a/stable-diffusion/configs/latent-diffusion/lsun_bedrooms-ldm-vq-4.yaml b/stable-diffusion/configs/latent-diffusion/lsun_bedrooms-ldm-vq-4.yaml new file mode 100644 index 0000000..c4ca66c --- /dev/null +++ b/stable-diffusion/configs/latent-diffusion/lsun_bedrooms-ldm-vq-4.yaml @@ -0,0 +1,85 @@ +model: + base_learning_rate: 2.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0195 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + image_size: 64 + channels: 3 + monitor: val/loss_simple_ema + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 3 + out_channels: 3 + model_channels: 224 + attention_resolutions: + # note: this isn\t actually the resolution but + # the downsampling factor, i.e. this corresnponds to + # attention on spatial resolution 8,16,32, as the + # spatial reolution of the latents is 64 for f4 + - 8 + - 4 + - 2 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 3 + - 4 + num_head_channels: 32 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + ckpt_path: configs/first_stage_models/vq-f4/model.yaml + embed_dim: 3 + n_embed: 8192 + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: __is_unconditional__ +data: + target: main.DataModuleFromConfig + params: + batch_size: 48 + num_workers: 5 + wrap: false + train: + target: ldm.data.lsun.LSUNBedroomsTrain + params: + size: 256 + validation: + target: ldm.data.lsun.LSUNBedroomsValidation + params: + size: 256 + + +lightning: + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 5000 + max_images: 8 + increase_log_steps: False + + trainer: + benchmark: True \ No newline at end of file diff --git a/stable-diffusion/configs/latent-diffusion/lsun_churches-ldm-kl-8.yaml b/stable-diffusion/configs/latent-diffusion/lsun_churches-ldm-kl-8.yaml new file mode 100644 index 0000000..18dc8c2 --- /dev/null +++ b/stable-diffusion/configs/latent-diffusion/lsun_churches-ldm-kl-8.yaml @@ -0,0 +1,91 @@ +model: + base_learning_rate: 5.0e-5 # set to target_lr by starting main.py with '--scale_lr False' + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0155 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + loss_type: l1 + first_stage_key: "image" + cond_stage_key: "image" + image_size: 32 + channels: 4 + cond_stage_trainable: False + concat_mode: False + scale_by_std: True + monitor: 'val/loss_simple_ema' + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [10000] + cycle_lengths: [10000000000000] + f_start: [1.e-6] + f_max: [1.] + f_min: [ 1.] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 + in_channels: 4 + out_channels: 4 + model_channels: 192 + attention_resolutions: [ 1, 2, 4, 8 ] # 32, 16, 8, 4 + num_res_blocks: 2 + channel_mult: [ 1,2,2,4,4 ] # 32, 16, 8, 4, 2 + num_heads: 8 + use_scale_shift_norm: True + resblock_updown: True + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: "val/rec_loss" + ckpt_path: "models/first_stage_models/kl-f8/model.ckpt" + ddconfig: + double_z: True + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1 + num_res_blocks: 2 + attn_resolutions: [ ] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: "__is_unconditional__" + +data: + target: main.DataModuleFromConfig + params: + batch_size: 96 + num_workers: 5 + wrap: False + train: + target: ldm.data.lsun.LSUNChurchesTrain + params: + size: 256 + validation: + target: ldm.data.lsun.LSUNChurchesValidation + params: + size: 256 + +lightning: + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 5000 + max_images: 8 + increase_log_steps: False + + + trainer: + benchmark: True \ No newline at end of file diff --git a/stable-diffusion/configs/latent-diffusion/txt2img-1p4B-eval.yaml b/stable-diffusion/configs/latent-diffusion/txt2img-1p4B-eval.yaml new file mode 100644 index 0000000..8e331cb --- /dev/null +++ b/stable-diffusion/configs/latent-diffusion/txt2img-1p4B-eval.yaml @@ -0,0 +1,71 @@ +model: + base_learning_rate: 5.0e-05 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.012 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + cond_stage_key: caption + image_size: 32 + channels: 4 + cond_stage_trainable: true + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: + - 4 + - 2 + - 1 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 4 + - 4 + num_heads: 8 + use_spatial_transformer: true + transformer_depth: 1 + context_dim: 1280 + use_checkpoint: true + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.BERTEmbedder + params: + n_embed: 1280 + n_layer: 32 diff --git a/stable-diffusion/configs/retrieval-augmented-diffusion/768x768.yaml b/stable-diffusion/configs/retrieval-augmented-diffusion/768x768.yaml new file mode 100644 index 0000000..b51b1d8 --- /dev/null +++ b/stable-diffusion/configs/retrieval-augmented-diffusion/768x768.yaml @@ -0,0 +1,68 @@ +model: + base_learning_rate: 0.0001 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.015 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: jpg + cond_stage_key: nix + image_size: 48 + channels: 16 + cond_stage_trainable: false + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_by_std: false + scale_factor: 0.22765929 + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 48 + in_channels: 16 + out_channels: 16 + model_channels: 448 + attention_resolutions: + - 4 + - 2 + - 1 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 3 + - 4 + use_scale_shift_norm: false + resblock_updown: false + num_head_channels: 32 + use_spatial_transformer: true + transformer_depth: 1 + context_dim: 768 + use_checkpoint: true + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + monitor: val/rec_loss + embed_dim: 16 + ddconfig: + double_z: true + z_channels: 16 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 1 + - 2 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: + - 16 + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: + target: torch.nn.Identity \ No newline at end of file diff --git a/stable-diffusion/configs/stable-diffusion/v1-inference.yaml b/stable-diffusion/configs/stable-diffusion/v1-inference.yaml new file mode 100644 index 0000000..d4effe5 --- /dev/null +++ b/stable-diffusion/configs/stable-diffusion/v1-inference.yaml @@ -0,0 +1,70 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder diff --git a/stable-diffusion/data/DejaVuSans.ttf b/stable-diffusion/data/DejaVuSans.ttf new file mode 100644 index 0000000..e5f7eec Binary files /dev/null and b/stable-diffusion/data/DejaVuSans.ttf differ diff --git a/stable-diffusion/data/example_conditioning/superresolution/sample_0.jpg b/stable-diffusion/data/example_conditioning/superresolution/sample_0.jpg new file mode 100644 index 0000000..09abe80 Binary files /dev/null and b/stable-diffusion/data/example_conditioning/superresolution/sample_0.jpg differ diff --git a/stable-diffusion/data/example_conditioning/text_conditional/sample_0.txt b/stable-diffusion/data/example_conditioning/text_conditional/sample_0.txt new file mode 100644 index 0000000..de60c5c --- /dev/null +++ b/stable-diffusion/data/example_conditioning/text_conditional/sample_0.txt @@ -0,0 +1 @@ +A basket of cerries diff --git a/stable-diffusion/data/imagenet_clsidx_to_label.txt b/stable-diffusion/data/imagenet_clsidx_to_label.txt new file mode 100644 index 0000000..e2fe435 --- /dev/null +++ b/stable-diffusion/data/imagenet_clsidx_to_label.txt @@ -0,0 +1,1000 @@ + 0: 'tench, Tinca tinca', + 1: 'goldfish, Carassius auratus', + 2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias', + 3: 'tiger shark, Galeocerdo cuvieri', + 4: 'hammerhead, hammerhead shark', + 5: 'electric ray, crampfish, numbfish, torpedo', + 6: 'stingray', + 7: 'cock', + 8: 'hen', + 9: 'ostrich, Struthio camelus', + 10: 'brambling, Fringilla montifringilla', + 11: 'goldfinch, Carduelis carduelis', + 12: 'house finch, linnet, Carpodacus mexicanus', + 13: 'junco, snowbird', + 14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea', + 15: 'robin, American robin, Turdus migratorius', + 16: 'bulbul', + 17: 'jay', + 18: 'magpie', + 19: 'chickadee', + 20: 'water ouzel, dipper', + 21: 'kite', + 22: 'bald eagle, American eagle, Haliaeetus leucocephalus', + 23: 'vulture', + 24: 'great grey owl, great gray owl, Strix nebulosa', + 25: 'European fire salamander, Salamandra salamandra', + 26: 'common newt, Triturus vulgaris', + 27: 'eft', + 28: 'spotted salamander, Ambystoma maculatum', + 29: 'axolotl, mud puppy, Ambystoma mexicanum', + 30: 'bullfrog, Rana catesbeiana', + 31: 'tree frog, tree-frog', + 32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui', + 33: 'loggerhead, loggerhead turtle, Caretta caretta', + 34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea', + 35: 'mud turtle', + 36: 'terrapin', + 37: 'box turtle, box tortoise', + 38: 'banded gecko', + 39: 'common iguana, iguana, Iguana iguana', + 40: 'American chameleon, anole, Anolis carolinensis', + 41: 'whiptail, whiptail lizard', + 42: 'agama', + 43: 'frilled lizard, Chlamydosaurus kingi', + 44: 'alligator lizard', + 45: 'Gila monster, Heloderma suspectum', + 46: 'green lizard, Lacerta viridis', + 47: 'African chameleon, Chamaeleo chamaeleon', + 48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis', + 49: 'African crocodile, Nile crocodile, Crocodylus niloticus', + 50: 'American alligator, Alligator mississipiensis', + 51: 'triceratops', + 52: 'thunder snake, worm snake, Carphophis amoenus', + 53: 'ringneck snake, ring-necked snake, ring snake', + 54: 'hognose snake, puff adder, sand viper', + 55: 'green snake, grass snake', + 56: 'king snake, kingsnake', + 57: 'garter snake, grass snake', + 58: 'water snake', + 59: 'vine snake', + 60: 'night snake, Hypsiglena torquata', + 61: 'boa constrictor, Constrictor constrictor', + 62: 'rock python, rock snake, Python sebae', + 63: 'Indian cobra, Naja naja', + 64: 'green mamba', + 65: 'sea snake', + 66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus', + 67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus', + 68: 'sidewinder, horned rattlesnake, Crotalus cerastes', + 69: 'trilobite', + 70: 'harvestman, daddy longlegs, Phalangium opilio', + 71: 'scorpion', + 72: 'black and gold garden spider, Argiope aurantia', + 73: 'barn spider, Araneus cavaticus', + 74: 'garden spider, Aranea diademata', + 75: 'black widow, Latrodectus mactans', + 76: 'tarantula', + 77: 'wolf spider, hunting spider', + 78: 'tick', + 79: 'centipede', + 80: 'black grouse', + 81: 'ptarmigan', + 82: 'ruffed grouse, partridge, Bonasa umbellus', + 83: 'prairie chicken, prairie grouse, prairie fowl', + 84: 'peacock', + 85: 'quail', + 86: 'partridge', + 87: 'African grey, African gray, Psittacus erithacus', + 88: 'macaw', + 89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita', + 90: 'lorikeet', + 91: 'coucal', + 92: 'bee eater', + 93: 'hornbill', + 94: 'hummingbird', + 95: 'jacamar', + 96: 'toucan', + 97: 'drake', + 98: 'red-breasted merganser, Mergus serrator', + 99: 'goose', + 100: 'black swan, Cygnus atratus', + 101: 'tusker', + 102: 'echidna, spiny anteater, anteater', + 103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus', + 104: 'wallaby, brush kangaroo', + 105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus', + 106: 'wombat', + 107: 'jellyfish', + 108: 'sea anemone, anemone', + 109: 'brain coral', + 110: 'flatworm, platyhelminth', + 111: 'nematode, nematode worm, roundworm', + 112: 'conch', + 113: 'snail', + 114: 'slug', + 115: 'sea slug, nudibranch', + 116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore', + 117: 'chambered nautilus, pearly nautilus, nautilus', + 118: 'Dungeness crab, Cancer magister', + 119: 'rock crab, Cancer irroratus', + 120: 'fiddler crab', + 121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica', + 122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus', + 123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish', + 124: 'crayfish, crawfish, crawdad, crawdaddy', + 125: 'hermit crab', + 126: 'isopod', + 127: 'white stork, Ciconia ciconia', + 128: 'black stork, Ciconia nigra', + 129: 'spoonbill', + 130: 'flamingo', + 131: 'little blue heron, Egretta caerulea', + 132: 'American egret, great white heron, Egretta albus', + 133: 'bittern', + 134: 'crane', + 135: 'limpkin, Aramus pictus', + 136: 'European gallinule, Porphyrio porphyrio', + 137: 'American coot, marsh hen, mud hen, water hen, Fulica americana', + 138: 'bustard', + 139: 'ruddy turnstone, Arenaria interpres', + 140: 'red-backed sandpiper, dunlin, Erolia alpina', + 141: 'redshank, Tringa totanus', + 142: 'dowitcher', + 143: 'oystercatcher, oyster catcher', + 144: 'pelican', + 145: 'king penguin, Aptenodytes patagonica', + 146: 'albatross, mollymawk', + 147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus', + 148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca', + 149: 'dugong, Dugong dugon', + 150: 'sea lion', + 151: 'Chihuahua', + 152: 'Japanese spaniel', + 153: 'Maltese dog, Maltese terrier, Maltese', + 154: 'Pekinese, Pekingese, Peke', + 155: 'Shih-Tzu', + 156: 'Blenheim spaniel', + 157: 'papillon', + 158: 'toy terrier', + 159: 'Rhodesian ridgeback', + 160: 'Afghan hound, Afghan', + 161: 'basset, basset hound', + 162: 'beagle', + 163: 'bloodhound, sleuthhound', + 164: 'bluetick', + 165: 'black-and-tan coonhound', + 166: 'Walker hound, Walker foxhound', + 167: 'English foxhound', + 168: 'redbone', + 169: 'borzoi, Russian wolfhound', + 170: 'Irish wolfhound', + 171: 'Italian greyhound', + 172: 'whippet', + 173: 'Ibizan hound, Ibizan Podenco', + 174: 'Norwegian elkhound, elkhound', + 175: 'otterhound, otter hound', + 176: 'Saluki, gazelle hound', + 177: 'Scottish deerhound, deerhound', + 178: 'Weimaraner', + 179: 'Staffordshire bullterrier, Staffordshire bull terrier', + 180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier', + 181: 'Bedlington terrier', + 182: 'Border terrier', + 183: 'Kerry blue terrier', + 184: 'Irish terrier', + 185: 'Norfolk terrier', + 186: 'Norwich terrier', + 187: 'Yorkshire terrier', + 188: 'wire-haired fox terrier', + 189: 'Lakeland terrier', + 190: 'Sealyham terrier, Sealyham', + 191: 'Airedale, Airedale terrier', + 192: 'cairn, cairn terrier', + 193: 'Australian terrier', + 194: 'Dandie Dinmont, Dandie Dinmont terrier', + 195: 'Boston bull, Boston terrier', + 196: 'miniature schnauzer', + 197: 'giant schnauzer', + 198: 'standard schnauzer', + 199: 'Scotch terrier, Scottish terrier, Scottie', + 200: 'Tibetan terrier, chrysanthemum dog', + 201: 'silky terrier, Sydney silky', + 202: 'soft-coated wheaten terrier', + 203: 'West Highland white terrier', + 204: 'Lhasa, Lhasa apso', + 205: 'flat-coated retriever', + 206: 'curly-coated retriever', + 207: 'golden retriever', + 208: 'Labrador retriever', + 209: 'Chesapeake Bay retriever', + 210: 'German short-haired pointer', + 211: 'vizsla, Hungarian pointer', + 212: 'English setter', + 213: 'Irish setter, red setter', + 214: 'Gordon setter', + 215: 'Brittany spaniel', + 216: 'clumber, clumber spaniel', + 217: 'English springer, English springer spaniel', + 218: 'Welsh springer spaniel', + 219: 'cocker spaniel, English cocker spaniel, cocker', + 220: 'Sussex spaniel', + 221: 'Irish water spaniel', + 222: 'kuvasz', + 223: 'schipperke', + 224: 'groenendael', + 225: 'malinois', + 226: 'briard', + 227: 'kelpie', + 228: 'komondor', + 229: 'Old English sheepdog, bobtail', + 230: 'Shetland sheepdog, Shetland sheep dog, Shetland', + 231: 'collie', + 232: 'Border collie', + 233: 'Bouvier des Flandres, Bouviers des Flandres', + 234: 'Rottweiler', + 235: 'German shepherd, German shepherd dog, German police dog, alsatian', + 236: 'Doberman, Doberman pinscher', + 237: 'miniature pinscher', + 238: 'Greater Swiss Mountain dog', + 239: 'Bernese mountain dog', + 240: 'Appenzeller', + 241: 'EntleBucher', + 242: 'boxer', + 243: 'bull mastiff', + 244: 'Tibetan mastiff', + 245: 'French bulldog', + 246: 'Great Dane', + 247: 'Saint Bernard, St Bernard', + 248: 'Eskimo dog, husky', + 249: 'malamute, malemute, Alaskan malamute', + 250: 'Siberian husky', + 251: 'dalmatian, coach dog, carriage dog', + 252: 'affenpinscher, monkey pinscher, monkey dog', + 253: 'basenji', + 254: 'pug, pug-dog', + 255: 'Leonberg', + 256: 'Newfoundland, Newfoundland dog', + 257: 'Great Pyrenees', + 258: 'Samoyed, Samoyede', + 259: 'Pomeranian', + 260: 'chow, chow chow', + 261: 'keeshond', + 262: 'Brabancon griffon', + 263: 'Pembroke, Pembroke Welsh corgi', + 264: 'Cardigan, Cardigan Welsh corgi', + 265: 'toy poodle', + 266: 'miniature poodle', + 267: 'standard poodle', + 268: 'Mexican hairless', + 269: 'timber wolf, grey wolf, gray wolf, Canis lupus', + 270: 'white wolf, Arctic wolf, Canis lupus tundrarum', + 271: 'red wolf, maned wolf, Canis rufus, Canis niger', + 272: 'coyote, prairie wolf, brush wolf, Canis latrans', + 273: 'dingo, warrigal, warragal, Canis dingo', + 274: 'dhole, Cuon alpinus', + 275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus', + 276: 'hyena, hyaena', + 277: 'red fox, Vulpes vulpes', + 278: 'kit fox, Vulpes macrotis', + 279: 'Arctic fox, white fox, Alopex lagopus', + 280: 'grey fox, gray fox, Urocyon cinereoargenteus', + 281: 'tabby, tabby cat', + 282: 'tiger cat', + 283: 'Persian cat', + 284: 'Siamese cat, Siamese', + 285: 'Egyptian cat', + 286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor', + 287: 'lynx, catamount', + 288: 'leopard, Panthera pardus', + 289: 'snow leopard, ounce, Panthera uncia', + 290: 'jaguar, panther, Panthera onca, Felis onca', + 291: 'lion, king of beasts, Panthera leo', + 292: 'tiger, Panthera tigris', + 293: 'cheetah, chetah, Acinonyx jubatus', + 294: 'brown bear, bruin, Ursus arctos', + 295: 'American black bear, black bear, Ursus americanus, Euarctos americanus', + 296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus', + 297: 'sloth bear, Melursus ursinus, Ursus ursinus', + 298: 'mongoose', + 299: 'meerkat, mierkat', + 300: 'tiger beetle', + 301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle', + 302: 'ground beetle, carabid beetle', + 303: 'long-horned beetle, longicorn, longicorn beetle', + 304: 'leaf beetle, chrysomelid', + 305: 'dung beetle', + 306: 'rhinoceros beetle', + 307: 'weevil', + 308: 'fly', + 309: 'bee', + 310: 'ant, emmet, pismire', + 311: 'grasshopper, hopper', + 312: 'cricket', + 313: 'walking stick, walkingstick, stick insect', + 314: 'cockroach, roach', + 315: 'mantis, mantid', + 316: 'cicada, cicala', + 317: 'leafhopper', + 318: 'lacewing, lacewing fly', + 319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", + 320: 'damselfly', + 321: 'admiral', + 322: 'ringlet, ringlet butterfly', + 323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus', + 324: 'cabbage butterfly', + 325: 'sulphur butterfly, sulfur butterfly', + 326: 'lycaenid, lycaenid butterfly', + 327: 'starfish, sea star', + 328: 'sea urchin', + 329: 'sea cucumber, holothurian', + 330: 'wood rabbit, cottontail, cottontail rabbit', + 331: 'hare', + 332: 'Angora, Angora rabbit', + 333: 'hamster', + 334: 'porcupine, hedgehog', + 335: 'fox squirrel, eastern fox squirrel, Sciurus niger', + 336: 'marmot', + 337: 'beaver', + 338: 'guinea pig, Cavia cobaya', + 339: 'sorrel', + 340: 'zebra', + 341: 'hog, pig, grunter, squealer, Sus scrofa', + 342: 'wild boar, boar, Sus scrofa', + 343: 'warthog', + 344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius', + 345: 'ox', + 346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis', + 347: 'bison', + 348: 'ram, tup', + 349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis', + 350: 'ibex, Capra ibex', + 351: 'hartebeest', + 352: 'impala, Aepyceros melampus', + 353: 'gazelle', + 354: 'Arabian camel, dromedary, Camelus dromedarius', + 355: 'llama', + 356: 'weasel', + 357: 'mink', + 358: 'polecat, fitch, foulmart, foumart, Mustela putorius', + 359: 'black-footed ferret, ferret, Mustela nigripes', + 360: 'otter', + 361: 'skunk, polecat, wood pussy', + 362: 'badger', + 363: 'armadillo', + 364: 'three-toed sloth, ai, Bradypus tridactylus', + 365: 'orangutan, orang, orangutang, Pongo pygmaeus', + 366: 'gorilla, Gorilla gorilla', + 367: 'chimpanzee, chimp, Pan troglodytes', + 368: 'gibbon, Hylobates lar', + 369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus', + 370: 'guenon, guenon monkey', + 371: 'patas, hussar monkey, Erythrocebus patas', + 372: 'baboon', + 373: 'macaque', + 374: 'langur', + 375: 'colobus, colobus monkey', + 376: 'proboscis monkey, Nasalis larvatus', + 377: 'marmoset', + 378: 'capuchin, ringtail, Cebus capucinus', + 379: 'howler monkey, howler', + 380: 'titi, titi monkey', + 381: 'spider monkey, Ateles geoffroyi', + 382: 'squirrel monkey, Saimiri sciureus', + 383: 'Madagascar cat, ring-tailed lemur, Lemur catta', + 384: 'indri, indris, Indri indri, Indri brevicaudatus', + 385: 'Indian elephant, Elephas maximus', + 386: 'African elephant, Loxodonta africana', + 387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens', + 388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca', + 389: 'barracouta, snoek', + 390: 'eel', + 391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch', + 392: 'rock beauty, Holocanthus tricolor', + 393: 'anemone fish', + 394: 'sturgeon', + 395: 'gar, garfish, garpike, billfish, Lepisosteus osseus', + 396: 'lionfish', + 397: 'puffer, pufferfish, blowfish, globefish', + 398: 'abacus', + 399: 'abaya', + 400: "academic gown, academic robe, judge's robe", + 401: 'accordion, piano accordion, squeeze box', + 402: 'acoustic guitar', + 403: 'aircraft carrier, carrier, flattop, attack aircraft carrier', + 404: 'airliner', + 405: 'airship, dirigible', + 406: 'altar', + 407: 'ambulance', + 408: 'amphibian, amphibious vehicle', + 409: 'analog clock', + 410: 'apiary, bee house', + 411: 'apron', + 412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin', + 413: 'assault rifle, assault gun', + 414: 'backpack, back pack, knapsack, packsack, rucksack, haversack', + 415: 'bakery, bakeshop, bakehouse', + 416: 'balance beam, beam', + 417: 'balloon', + 418: 'ballpoint, ballpoint pen, ballpen, Biro', + 419: 'Band Aid', + 420: 'banjo', + 421: 'bannister, banister, balustrade, balusters, handrail', + 422: 'barbell', + 423: 'barber chair', + 424: 'barbershop', + 425: 'barn', + 426: 'barometer', + 427: 'barrel, cask', + 428: 'barrow, garden cart, lawn cart, wheelbarrow', + 429: 'baseball', + 430: 'basketball', + 431: 'bassinet', + 432: 'bassoon', + 433: 'bathing cap, swimming cap', + 434: 'bath towel', + 435: 'bathtub, bathing tub, bath, tub', + 436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon', + 437: 'beacon, lighthouse, beacon light, pharos', + 438: 'beaker', + 439: 'bearskin, busby, shako', + 440: 'beer bottle', + 441: 'beer glass', + 442: 'bell cote, bell cot', + 443: 'bib', + 444: 'bicycle-built-for-two, tandem bicycle, tandem', + 445: 'bikini, two-piece', + 446: 'binder, ring-binder', + 447: 'binoculars, field glasses, opera glasses', + 448: 'birdhouse', + 449: 'boathouse', + 450: 'bobsled, bobsleigh, bob', + 451: 'bolo tie, bolo, bola tie, bola', + 452: 'bonnet, poke bonnet', + 453: 'bookcase', + 454: 'bookshop, bookstore, bookstall', + 455: 'bottlecap', + 456: 'bow', + 457: 'bow tie, bow-tie, bowtie', + 458: 'brass, memorial tablet, plaque', + 459: 'brassiere, bra, bandeau', + 460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty', + 461: 'breastplate, aegis, egis', + 462: 'broom', + 463: 'bucket, pail', + 464: 'buckle', + 465: 'bulletproof vest', + 466: 'bullet train, bullet', + 467: 'butcher shop, meat market', + 468: 'cab, hack, taxi, taxicab', + 469: 'caldron, cauldron', + 470: 'candle, taper, wax light', + 471: 'cannon', + 472: 'canoe', + 473: 'can opener, tin opener', + 474: 'cardigan', + 475: 'car mirror', + 476: 'carousel, carrousel, merry-go-round, roundabout, whirligig', + 477: "carpenter's kit, tool kit", + 478: 'carton', + 479: 'car wheel', + 480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM', + 481: 'cassette', + 482: 'cassette player', + 483: 'castle', + 484: 'catamaran', + 485: 'CD player', + 486: 'cello, violoncello', + 487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone', + 488: 'chain', + 489: 'chainlink fence', + 490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour', + 491: 'chain saw, chainsaw', + 492: 'chest', + 493: 'chiffonier, commode', + 494: 'chime, bell, gong', + 495: 'china cabinet, china closet', + 496: 'Christmas stocking', + 497: 'church, church building', + 498: 'cinema, movie theater, movie theatre, movie house, picture palace', + 499: 'cleaver, meat cleaver, chopper', + 500: 'cliff dwelling', + 501: 'cloak', + 502: 'clog, geta, patten, sabot', + 503: 'cocktail shaker', + 504: 'coffee mug', + 505: 'coffeepot', + 506: 'coil, spiral, volute, whorl, helix', + 507: 'combination lock', + 508: 'computer keyboard, keypad', + 509: 'confectionery, confectionary, candy store', + 510: 'container ship, containership, container vessel', + 511: 'convertible', + 512: 'corkscrew, bottle screw', + 513: 'cornet, horn, trumpet, trump', + 514: 'cowboy boot', + 515: 'cowboy hat, ten-gallon hat', + 516: 'cradle', + 517: 'crane', + 518: 'crash helmet', + 519: 'crate', + 520: 'crib, cot', + 521: 'Crock Pot', + 522: 'croquet ball', + 523: 'crutch', + 524: 'cuirass', + 525: 'dam, dike, dyke', + 526: 'desk', + 527: 'desktop computer', + 528: 'dial telephone, dial phone', + 529: 'diaper, nappy, napkin', + 530: 'digital clock', + 531: 'digital watch', + 532: 'dining table, board', + 533: 'dishrag, dishcloth', + 534: 'dishwasher, dish washer, dishwashing machine', + 535: 'disk brake, disc brake', + 536: 'dock, dockage, docking facility', + 537: 'dogsled, dog sled, dog sleigh', + 538: 'dome', + 539: 'doormat, welcome mat', + 540: 'drilling platform, offshore rig', + 541: 'drum, membranophone, tympan', + 542: 'drumstick', + 543: 'dumbbell', + 544: 'Dutch oven', + 545: 'electric fan, blower', + 546: 'electric guitar', + 547: 'electric locomotive', + 548: 'entertainment center', + 549: 'envelope', + 550: 'espresso maker', + 551: 'face powder', + 552: 'feather boa, boa', + 553: 'file, file cabinet, filing cabinet', + 554: 'fireboat', + 555: 'fire engine, fire truck', + 556: 'fire screen, fireguard', + 557: 'flagpole, flagstaff', + 558: 'flute, transverse flute', + 559: 'folding chair', + 560: 'football helmet', + 561: 'forklift', + 562: 'fountain', + 563: 'fountain pen', + 564: 'four-poster', + 565: 'freight car', + 566: 'French horn, horn', + 567: 'frying pan, frypan, skillet', + 568: 'fur coat', + 569: 'garbage truck, dustcart', + 570: 'gasmask, respirator, gas helmet', + 571: 'gas pump, gasoline pump, petrol pump, island dispenser', + 572: 'goblet', + 573: 'go-kart', + 574: 'golf ball', + 575: 'golfcart, golf cart', + 576: 'gondola', + 577: 'gong, tam-tam', + 578: 'gown', + 579: 'grand piano, grand', + 580: 'greenhouse, nursery, glasshouse', + 581: 'grille, radiator grille', + 582: 'grocery store, grocery, food market, market', + 583: 'guillotine', + 584: 'hair slide', + 585: 'hair spray', + 586: 'half track', + 587: 'hammer', + 588: 'hamper', + 589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier', + 590: 'hand-held computer, hand-held microcomputer', + 591: 'handkerchief, hankie, hanky, hankey', + 592: 'hard disc, hard disk, fixed disk', + 593: 'harmonica, mouth organ, harp, mouth harp', + 594: 'harp', + 595: 'harvester, reaper', + 596: 'hatchet', + 597: 'holster', + 598: 'home theater, home theatre', + 599: 'honeycomb', + 600: 'hook, claw', + 601: 'hoopskirt, crinoline', + 602: 'horizontal bar, high bar', + 603: 'horse cart, horse-cart', + 604: 'hourglass', + 605: 'iPod', + 606: 'iron, smoothing iron', + 607: "jack-o'-lantern", + 608: 'jean, blue jean, denim', + 609: 'jeep, landrover', + 610: 'jersey, T-shirt, tee shirt', + 611: 'jigsaw puzzle', + 612: 'jinrikisha, ricksha, rickshaw', + 613: 'joystick', + 614: 'kimono', + 615: 'knee pad', + 616: 'knot', + 617: 'lab coat, laboratory coat', + 618: 'ladle', + 619: 'lampshade, lamp shade', + 620: 'laptop, laptop computer', + 621: 'lawn mower, mower', + 622: 'lens cap, lens cover', + 623: 'letter opener, paper knife, paperknife', + 624: 'library', + 625: 'lifeboat', + 626: 'lighter, light, igniter, ignitor', + 627: 'limousine, limo', + 628: 'liner, ocean liner', + 629: 'lipstick, lip rouge', + 630: 'Loafer', + 631: 'lotion', + 632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system', + 633: "loupe, jeweler's loupe", + 634: 'lumbermill, sawmill', + 635: 'magnetic compass', + 636: 'mailbag, postbag', + 637: 'mailbox, letter box', + 638: 'maillot', + 639: 'maillot, tank suit', + 640: 'manhole cover', + 641: 'maraca', + 642: 'marimba, xylophone', + 643: 'mask', + 644: 'matchstick', + 645: 'maypole', + 646: 'maze, labyrinth', + 647: 'measuring cup', + 648: 'medicine chest, medicine cabinet', + 649: 'megalith, megalithic structure', + 650: 'microphone, mike', + 651: 'microwave, microwave oven', + 652: 'military uniform', + 653: 'milk can', + 654: 'minibus', + 655: 'miniskirt, mini', + 656: 'minivan', + 657: 'missile', + 658: 'mitten', + 659: 'mixing bowl', + 660: 'mobile home, manufactured home', + 661: 'Model T', + 662: 'modem', + 663: 'monastery', + 664: 'monitor', + 665: 'moped', + 666: 'mortar', + 667: 'mortarboard', + 668: 'mosque', + 669: 'mosquito net', + 670: 'motor scooter, scooter', + 671: 'mountain bike, all-terrain bike, off-roader', + 672: 'mountain tent', + 673: 'mouse, computer mouse', + 674: 'mousetrap', + 675: 'moving van', + 676: 'muzzle', + 677: 'nail', + 678: 'neck brace', + 679: 'necklace', + 680: 'nipple', + 681: 'notebook, notebook computer', + 682: 'obelisk', + 683: 'oboe, hautboy, hautbois', + 684: 'ocarina, sweet potato', + 685: 'odometer, hodometer, mileometer, milometer', + 686: 'oil filter', + 687: 'organ, pipe organ', + 688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO', + 689: 'overskirt', + 690: 'oxcart', + 691: 'oxygen mask', + 692: 'packet', + 693: 'paddle, boat paddle', + 694: 'paddlewheel, paddle wheel', + 695: 'padlock', + 696: 'paintbrush', + 697: "pajama, pyjama, pj's, jammies", + 698: 'palace', + 699: 'panpipe, pandean pipe, syrinx', + 700: 'paper towel', + 701: 'parachute, chute', + 702: 'parallel bars, bars', + 703: 'park bench', + 704: 'parking meter', + 705: 'passenger car, coach, carriage', + 706: 'patio, terrace', + 707: 'pay-phone, pay-station', + 708: 'pedestal, plinth, footstall', + 709: 'pencil box, pencil case', + 710: 'pencil sharpener', + 711: 'perfume, essence', + 712: 'Petri dish', + 713: 'photocopier', + 714: 'pick, plectrum, plectron', + 715: 'pickelhaube', + 716: 'picket fence, paling', + 717: 'pickup, pickup truck', + 718: 'pier', + 719: 'piggy bank, penny bank', + 720: 'pill bottle', + 721: 'pillow', + 722: 'ping-pong ball', + 723: 'pinwheel', + 724: 'pirate, pirate ship', + 725: 'pitcher, ewer', + 726: "plane, carpenter's plane, woodworking plane", + 727: 'planetarium', + 728: 'plastic bag', + 729: 'plate rack', + 730: 'plow, plough', + 731: "plunger, plumber's helper", + 732: 'Polaroid camera, Polaroid Land camera', + 733: 'pole', + 734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria', + 735: 'poncho', + 736: 'pool table, billiard table, snooker table', + 737: 'pop bottle, soda bottle', + 738: 'pot, flowerpot', + 739: "potter's wheel", + 740: 'power drill', + 741: 'prayer rug, prayer mat', + 742: 'printer', + 743: 'prison, prison house', + 744: 'projectile, missile', + 745: 'projector', + 746: 'puck, hockey puck', + 747: 'punching bag, punch bag, punching ball, punchball', + 748: 'purse', + 749: 'quill, quill pen', + 750: 'quilt, comforter, comfort, puff', + 751: 'racer, race car, racing car', + 752: 'racket, racquet', + 753: 'radiator', + 754: 'radio, wireless', + 755: 'radio telescope, radio reflector', + 756: 'rain barrel', + 757: 'recreational vehicle, RV, R.V.', + 758: 'reel', + 759: 'reflex camera', + 760: 'refrigerator, icebox', + 761: 'remote control, remote', + 762: 'restaurant, eating house, eating place, eatery', + 763: 'revolver, six-gun, six-shooter', + 764: 'rifle', + 765: 'rocking chair, rocker', + 766: 'rotisserie', + 767: 'rubber eraser, rubber, pencil eraser', + 768: 'rugby ball', + 769: 'rule, ruler', + 770: 'running shoe', + 771: 'safe', + 772: 'safety pin', + 773: 'saltshaker, salt shaker', + 774: 'sandal', + 775: 'sarong', + 776: 'sax, saxophone', + 777: 'scabbard', + 778: 'scale, weighing machine', + 779: 'school bus', + 780: 'schooner', + 781: 'scoreboard', + 782: 'screen, CRT screen', + 783: 'screw', + 784: 'screwdriver', + 785: 'seat belt, seatbelt', + 786: 'sewing machine', + 787: 'shield, buckler', + 788: 'shoe shop, shoe-shop, shoe store', + 789: 'shoji', + 790: 'shopping basket', + 791: 'shopping cart', + 792: 'shovel', + 793: 'shower cap', + 794: 'shower curtain', + 795: 'ski', + 796: 'ski mask', + 797: 'sleeping bag', + 798: 'slide rule, slipstick', + 799: 'sliding door', + 800: 'slot, one-armed bandit', + 801: 'snorkel', + 802: 'snowmobile', + 803: 'snowplow, snowplough', + 804: 'soap dispenser', + 805: 'soccer ball', + 806: 'sock', + 807: 'solar dish, solar collector, solar furnace', + 808: 'sombrero', + 809: 'soup bowl', + 810: 'space bar', + 811: 'space heater', + 812: 'space shuttle', + 813: 'spatula', + 814: 'speedboat', + 815: "spider web, spider's web", + 816: 'spindle', + 817: 'sports car, sport car', + 818: 'spotlight, spot', + 819: 'stage', + 820: 'steam locomotive', + 821: 'steel arch bridge', + 822: 'steel drum', + 823: 'stethoscope', + 824: 'stole', + 825: 'stone wall', + 826: 'stopwatch, stop watch', + 827: 'stove', + 828: 'strainer', + 829: 'streetcar, tram, tramcar, trolley, trolley car', + 830: 'stretcher', + 831: 'studio couch, day bed', + 832: 'stupa, tope', + 833: 'submarine, pigboat, sub, U-boat', + 834: 'suit, suit of clothes', + 835: 'sundial', + 836: 'sunglass', + 837: 'sunglasses, dark glasses, shades', + 838: 'sunscreen, sunblock, sun blocker', + 839: 'suspension bridge', + 840: 'swab, swob, mop', + 841: 'sweatshirt', + 842: 'swimming trunks, bathing trunks', + 843: 'swing', + 844: 'switch, electric switch, electrical switch', + 845: 'syringe', + 846: 'table lamp', + 847: 'tank, army tank, armored combat vehicle, armoured combat vehicle', + 848: 'tape player', + 849: 'teapot', + 850: 'teddy, teddy bear', + 851: 'television, television system', + 852: 'tennis ball', + 853: 'thatch, thatched roof', + 854: 'theater curtain, theatre curtain', + 855: 'thimble', + 856: 'thresher, thrasher, threshing machine', + 857: 'throne', + 858: 'tile roof', + 859: 'toaster', + 860: 'tobacco shop, tobacconist shop, tobacconist', + 861: 'toilet seat', + 862: 'torch', + 863: 'totem pole', + 864: 'tow truck, tow car, wrecker', + 865: 'toyshop', + 866: 'tractor', + 867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi', + 868: 'tray', + 869: 'trench coat', + 870: 'tricycle, trike, velocipede', + 871: 'trimaran', + 872: 'tripod', + 873: 'triumphal arch', + 874: 'trolleybus, trolley coach, trackless trolley', + 875: 'trombone', + 876: 'tub, vat', + 877: 'turnstile', + 878: 'typewriter keyboard', + 879: 'umbrella', + 880: 'unicycle, monocycle', + 881: 'upright, upright piano', + 882: 'vacuum, vacuum cleaner', + 883: 'vase', + 884: 'vault', + 885: 'velvet', + 886: 'vending machine', + 887: 'vestment', + 888: 'viaduct', + 889: 'violin, fiddle', + 890: 'volleyball', + 891: 'waffle iron', + 892: 'wall clock', + 893: 'wallet, billfold, notecase, pocketbook', + 894: 'wardrobe, closet, press', + 895: 'warplane, military plane', + 896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin', + 897: 'washer, automatic washer, washing machine', + 898: 'water bottle', + 899: 'water jug', + 900: 'water tower', + 901: 'whiskey jug', + 902: 'whistle', + 903: 'wig', + 904: 'window screen', + 905: 'window shade', + 906: 'Windsor tie', + 907: 'wine bottle', + 908: 'wing', + 909: 'wok', + 910: 'wooden spoon', + 911: 'wool, woolen, woollen', + 912: 'worm fence, snake fence, snake-rail fence, Virginia fence', + 913: 'wreck', + 914: 'yawl', + 915: 'yurt', + 916: 'web site, website, internet site, site', + 917: 'comic book', + 918: 'crossword puzzle, crossword', + 919: 'street sign', + 920: 'traffic light, traffic signal, stoplight', + 921: 'book jacket, dust cover, dust jacket, dust wrapper', + 922: 'menu', + 923: 'plate', + 924: 'guacamole', + 925: 'consomme', + 926: 'hot pot, hotpot', + 927: 'trifle', + 928: 'ice cream, icecream', + 929: 'ice lolly, lolly, lollipop, popsicle', + 930: 'French loaf', + 931: 'bagel, beigel', + 932: 'pretzel', + 933: 'cheeseburger', + 934: 'hotdog, hot dog, red hot', + 935: 'mashed potato', + 936: 'head cabbage', + 937: 'broccoli', + 938: 'cauliflower', + 939: 'zucchini, courgette', + 940: 'spaghetti squash', + 941: 'acorn squash', + 942: 'butternut squash', + 943: 'cucumber, cuke', + 944: 'artichoke, globe artichoke', + 945: 'bell pepper', + 946: 'cardoon', + 947: 'mushroom', + 948: 'Granny Smith', + 949: 'strawberry', + 950: 'orange', + 951: 'lemon', + 952: 'fig', + 953: 'pineapple, ananas', + 954: 'banana', + 955: 'jackfruit, jak, jack', + 956: 'custard apple', + 957: 'pomegranate', + 958: 'hay', + 959: 'carbonara', + 960: 'chocolate sauce, chocolate syrup', + 961: 'dough', + 962: 'meat loaf, meatloaf', + 963: 'pizza, pizza pie', + 964: 'potpie', + 965: 'burrito', + 966: 'red wine', + 967: 'espresso', + 968: 'cup', + 969: 'eggnog', + 970: 'alp', + 971: 'bubble', + 972: 'cliff, drop, drop-off', + 973: 'coral reef', + 974: 'geyser', + 975: 'lakeside, lakeshore', + 976: 'promontory, headland, head, foreland', + 977: 'sandbar, sand bar', + 978: 'seashore, coast, seacoast, sea-coast', + 979: 'valley, vale', + 980: 'volcano', + 981: 'ballplayer, baseball player', + 982: 'groom, bridegroom', + 983: 'scuba diver', + 984: 'rapeseed', + 985: 'daisy', + 986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", + 987: 'corn', + 988: 'acorn', + 989: 'hip, rose hip, rosehip', + 990: 'buckeye, horse chestnut, conker', + 991: 'coral fungus', + 992: 'agaric', + 993: 'gyromitra', + 994: 'stinkhorn, carrion fungus', + 995: 'earthstar', + 996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa', + 997: 'bolete', + 998: 'ear, spike, capitulum', + 999: 'toilet tissue, toilet paper, bathroom tissue' \ No newline at end of file diff --git a/stable-diffusion/data/imagenet_train_hr_indices.p b/stable-diffusion/data/imagenet_train_hr_indices.p new file mode 100644 index 0000000..b8d6d46 Binary files /dev/null and b/stable-diffusion/data/imagenet_train_hr_indices.p differ diff --git a/stable-diffusion/data/imagenet_val_hr_indices.p b/stable-diffusion/data/imagenet_val_hr_indices.p new file mode 100644 index 0000000..744ad64 Binary files /dev/null and b/stable-diffusion/data/imagenet_val_hr_indices.p differ diff --git a/stable-diffusion/data/index_synset.yaml b/stable-diffusion/data/index_synset.yaml new file mode 100644 index 0000000..635ea71 --- /dev/null +++ b/stable-diffusion/data/index_synset.yaml @@ -0,0 +1,1000 @@ +0: n01440764 +1: n01443537 +2: n01484850 +3: n01491361 +4: n01494475 +5: n01496331 +6: n01498041 +7: n01514668 +8: n07646067 +9: n01518878 +10: n01530575 +11: n01531178 +12: n01532829 +13: n01534433 +14: n01537544 +15: n01558993 +16: n01560419 +17: n01580077 +18: n01582220 +19: n01592084 +20: n01601694 +21: n13382471 +22: n01614925 +23: n01616318 +24: n01622779 +25: n01629819 +26: n01630670 +27: n01631663 +28: n01632458 +29: n01632777 +30: n01641577 +31: n01644373 +32: n01644900 +33: n01664065 +34: n01665541 +35: n01667114 +36: n01667778 +37: n01669191 +38: n01675722 +39: n01677366 +40: n01682714 +41: n01685808 +42: n01687978 +43: n01688243 +44: n01689811 +45: n01692333 +46: n01693334 +47: n01694178 +48: n01695060 +49: n01697457 +50: n01698640 +51: n01704323 +52: n01728572 +53: n01728920 +54: n01729322 +55: n01729977 +56: n01734418 +57: n01735189 +58: n01737021 +59: n01739381 +60: n01740131 +61: n01742172 +62: n01744401 +63: n01748264 +64: n01749939 +65: n01751748 +66: n01753488 +67: n01755581 +68: n01756291 +69: n01768244 +70: n01770081 +71: n01770393 +72: n01773157 +73: n01773549 +74: n01773797 +75: n01774384 +76: n01774750 +77: n01775062 +78: n04432308 +79: n01784675 +80: n01795545 +81: n01796340 +82: n01797886 +83: n01798484 +84: n01806143 +85: n07647321 +86: n07647496 +87: n01817953 +88: n01818515 +89: n01819313 +90: n01820546 +91: n01824575 +92: n01828970 +93: n01829413 +94: n01833805 +95: n01843065 +96: n01843383 +97: n01847000 +98: n01855032 +99: n07646821 +100: n01860187 +101: n01871265 +102: n01872772 +103: n01873310 +104: n01877812 +105: n01882714 +106: n01883070 +107: n01910747 +108: n01914609 +109: n01917289 +110: n01924916 +111: n01930112 +112: n01943899 +113: n01944390 +114: n13719102 +115: n01950731 +116: n01955084 +117: n01968897 +118: n01978287 +119: n01978455 +120: n01980166 +121: n01981276 +122: n01983481 +123: n01984695 +124: n01985128 +125: n01986214 +126: n01990800 +127: n02002556 +128: n02002724 +129: n02006656 +130: n02007558 +131: n02009229 +132: n02009912 +133: n02011460 +134: n03126707 +135: n02013706 +136: n02017213 +137: n02018207 +138: n02018795 +139: n02025239 +140: n02027492 +141: n02028035 +142: n02033041 +143: n02037110 +144: n02051845 +145: n02056570 +146: n02058221 +147: n02066245 +148: n02071294 +149: n02074367 +150: n02077923 +151: n08742578 +152: n02085782 +153: n02085936 +154: n02086079 +155: n02086240 +156: n02086646 +157: n02086910 +158: n02087046 +159: n02087394 +160: n02088094 +161: n02088238 +162: n02088364 +163: n02088466 +164: n02088632 +165: n02089078 +166: n02089867 +167: n02089973 +168: n02090379 +169: n02090622 +170: n02090721 +171: n02091032 +172: n02091134 +173: n02091244 +174: n02091467 +175: n02091635 +176: n02091831 +177: n02092002 +178: n02092339 +179: n02093256 +180: n02093428 +181: n02093647 +182: n02093754 +183: n02093859 +184: n02093991 +185: n02094114 +186: n02094258 +187: n02094433 +188: n02095314 +189: n02095570 +190: n02095889 +191: n02096051 +192: n02096177 +193: n02096294 +194: n02096437 +195: n02096585 +196: n02097047 +197: n02097130 +198: n02097209 +199: n02097298 +200: n02097474 +201: n02097658 +202: n02098105 +203: n02098286 +204: n02098413 +205: n02099267 +206: n02099429 +207: n02099601 +208: n02099712 +209: n02099849 +210: n02100236 +211: n02100583 +212: n02100735 +213: n02100877 +214: n02101006 +215: n02101388 +216: n02101556 +217: n02102040 +218: n02102177 +219: n02102318 +220: n02102480 +221: n02102973 +222: n02104029 +223: n02104365 +224: n02105056 +225: n02105162 +226: n02105251 +227: n02105412 +228: n02105505 +229: n02105641 +230: n02105855 +231: n02106030 +232: n02106166 +233: n02106382 +234: n02106550 +235: n02106662 +236: n02107142 +237: n02107312 +238: n02107574 +239: n02107683 +240: n02107908 +241: n02108000 +242: n02108089 +243: n02108422 +244: n02108551 +245: n02108915 +246: n02109047 +247: n02109525 +248: n02109961 +249: n02110063 +250: n02110185 +251: n02110341 +252: n02110627 +253: n02110806 +254: n02110958 +255: n02111129 +256: n02111277 +257: n02111500 +258: n02111889 +259: n02112018 +260: n02112137 +261: n02112350 +262: n02112706 +263: n02113023 +264: n02113186 +265: n02113624 +266: n02113712 +267: n02113799 +268: n02113978 +269: n02114367 +270: n02114548 +271: n02114712 +272: n02114855 +273: n02115641 +274: n02115913 +275: n02116738 +276: n02117135 +277: n02119022 +278: n02119789 +279: n02120079 +280: n02120505 +281: n02123045 +282: n02123159 +283: n02123394 +284: n02123597 +285: n02124075 +286: n02125311 +287: n02127052 +288: n02128385 +289: n02128757 +290: n02128925 +291: n02129165 +292: n02129604 +293: n02130308 +294: n02132136 +295: n02133161 +296: n02134084 +297: n02134418 +298: n02137549 +299: n02138441 +300: n02165105 +301: n02165456 +302: n02167151 +303: n02168699 +304: n02169497 +305: n02172182 +306: n02174001 +307: n02177972 +308: n03373237 +309: n07975909 +310: n02219486 +311: n02226429 +312: n02229544 +313: n02231487 +314: n02233338 +315: n02236044 +316: n02256656 +317: n02259212 +318: n02264363 +319: n02268443 +320: n02268853 +321: n02276258 +322: n02277742 +323: n02279972 +324: n02280649 +325: n02281406 +326: n02281787 +327: n02317335 +328: n02319095 +329: n02321529 +330: n02325366 +331: n02326432 +332: n02328150 +333: n02342885 +334: n02346627 +335: n02356798 +336: n02361337 +337: n05262120 +338: n02364673 +339: n02389026 +340: n02391049 +341: n02395406 +342: n02396427 +343: n02397096 +344: n02398521 +345: n02403003 +346: n02408429 +347: n02410509 +348: n02412080 +349: n02415577 +350: n02417914 +351: n02422106 +352: n02422699 +353: n02423022 +354: n02437312 +355: n02437616 +356: n10771990 +357: n14765497 +358: n02443114 +359: n02443484 +360: n14765785 +361: n02445715 +362: n02447366 +363: n02454379 +364: n02457408 +365: n02480495 +366: n02480855 +367: n02481823 +368: n02483362 +369: n02483708 +370: n02484975 +371: n02486261 +372: n02486410 +373: n02487347 +374: n02488291 +375: n02488702 +376: n02489166 +377: n02490219 +378: n02492035 +379: n02492660 +380: n02493509 +381: n02493793 +382: n02494079 +383: n02497673 +384: n02500267 +385: n02504013 +386: n02504458 +387: n02509815 +388: n02510455 +389: n02514041 +390: n07783967 +391: n02536864 +392: n02606052 +393: n02607072 +394: n02640242 +395: n02641379 +396: n02643566 +397: n02655020 +398: n02666347 +399: n02667093 +400: n02669723 +401: n02672831 +402: n02676566 +403: n02687172 +404: n02690373 +405: n02692877 +406: n02699494 +407: n02701002 +408: n02704792 +409: n02708093 +410: n02727426 +411: n08496334 +412: n02747177 +413: n02749479 +414: n02769748 +415: n02776631 +416: n02777292 +417: n02782329 +418: n02783161 +419: n02786058 +420: n02787622 +421: n02788148 +422: n02790996 +423: n02791124 +424: n02791270 +425: n02793495 +426: n02794156 +427: n02795169 +428: n02797295 +429: n02799071 +430: n02802426 +431: n02804515 +432: n02804610 +433: n02807133 +434: n02808304 +435: n02808440 +436: n02814533 +437: n02814860 +438: n02815834 +439: n02817516 +440: n02823428 +441: n02823750 +442: n02825657 +443: n02834397 +444: n02835271 +445: n02837789 +446: n02840245 +447: n02841315 +448: n02843684 +449: n02859443 +450: n02860847 +451: n02865351 +452: n02869837 +453: n02870880 +454: n02871525 +455: n02877765 +456: n02880308 +457: n02883205 +458: n02892201 +459: n02892767 +460: n02894605 +461: n02895154 +462: n12520864 +463: n02909870 +464: n02910353 +465: n02916936 +466: n02917067 +467: n02927161 +468: n02930766 +469: n02939185 +470: n02948072 +471: n02950826 +472: n02951358 +473: n02951585 +474: n02963159 +475: n02965783 +476: n02966193 +477: n02966687 +478: n02971356 +479: n02974003 +480: n02977058 +481: n02978881 +482: n02979186 +483: n02980441 +484: n02981792 +485: n02988304 +486: n02992211 +487: n02992529 +488: n13652994 +489: n03000134 +490: n03000247 +491: n03000684 +492: n03014705 +493: n03016953 +494: n03017168 +495: n03018349 +496: n03026506 +497: n03028079 +498: n03032252 +499: n03041632 +500: n03042490 +501: n03045698 +502: n03047690 +503: n03062245 +504: n03063599 +505: n03063689 +506: n03065424 +507: n03075370 +508: n03085013 +509: n03089624 +510: n03095699 +511: n03100240 +512: n03109150 +513: n03110669 +514: n03124043 +515: n03124170 +516: n15142452 +517: n03126707 +518: n03127747 +519: n03127925 +520: n03131574 +521: n03133878 +522: n03134739 +523: n03141823 +524: n03146219 +525: n03160309 +526: n03179701 +527: n03180011 +528: n03187595 +529: n03188531 +530: n03196217 +531: n03197337 +532: n03201208 +533: n03207743 +534: n03207941 +535: n03208938 +536: n03216828 +537: n03218198 +538: n13872072 +539: n03223299 +540: n03240683 +541: n03249569 +542: n07647870 +543: n03255030 +544: n03259401 +545: n03271574 +546: n03272010 +547: n03272562 +548: n03290653 +549: n13869788 +550: n03297495 +551: n03314780 +552: n03325584 +553: n03337140 +554: n03344393 +555: n03345487 +556: n03347037 +557: n03355925 +558: n03372029 +559: n03376595 +560: n03379051 +561: n03384352 +562: n03388043 +563: n03388183 +564: n03388549 +565: n03393912 +566: n03394916 +567: n03400231 +568: n03404251 +569: n03417042 +570: n03424325 +571: n03425413 +572: n03443371 +573: n03444034 +574: n03445777 +575: n03445924 +576: n03447447 +577: n03447721 +578: n08286342 +579: n03452741 +580: n03457902 +581: n03459775 +582: n03461385 +583: n03467068 +584: n03476684 +585: n03476991 +586: n03478589 +587: n03482001 +588: n03482405 +589: n03483316 +590: n03485407 +591: n03485794 +592: n03492542 +593: n03494278 +594: n03495570 +595: n10161363 +596: n03498962 +597: n03527565 +598: n03529860 +599: n09218315 +600: n03532672 +601: n03534580 +602: n03535780 +603: n03538406 +604: n03544143 +605: n03584254 +606: n03584829 +607: n03590841 +608: n03594734 +609: n03594945 +610: n03595614 +611: n03598930 +612: n03599486 +613: n03602883 +614: n03617480 +615: n03623198 +616: n15102712 +617: n03630383 +618: n03633091 +619: n03637318 +620: n03642806 +621: n03649909 +622: n03657121 +623: n03658185 +624: n07977870 +625: n03662601 +626: n03666591 +627: n03670208 +628: n03673027 +629: n03676483 +630: n03680355 +631: n03690938 +632: n03691459 +633: n03692522 +634: n03697007 +635: n03706229 +636: n03709823 +637: n03710193 +638: n03710637 +639: n03710721 +640: n03717622 +641: n03720891 +642: n03721384 +643: n03725035 +644: n03729826 +645: n03733131 +646: n03733281 +647: n03733805 +648: n03742115 +649: n03743016 +650: n03759954 +651: n03761084 +652: n03763968 +653: n03764736 +654: n03769881 +655: n03770439 +656: n03770679 +657: n03773504 +658: n03775071 +659: n03775546 +660: n03776460 +661: n03777568 +662: n03777754 +663: n03781244 +664: n03782006 +665: n03785016 +666: n14955889 +667: n03787032 +668: n03788195 +669: n03788365 +670: n03791053 +671: n03792782 +672: n03792972 +673: n03793489 +674: n03794056 +675: n03796401 +676: n03803284 +677: n13652335 +678: n03814639 +679: n03814906 +680: n03825788 +681: n03832673 +682: n03837869 +683: n03838899 +684: n03840681 +685: n03841143 +686: n03843555 +687: n03854065 +688: n03857828 +689: n03866082 +690: n03868242 +691: n03868863 +692: n07281099 +693: n03873416 +694: n03874293 +695: n03874599 +696: n03876231 +697: n03877472 +698: n08053121 +699: n03884397 +700: n03887697 +701: n03888257 +702: n03888605 +703: n03891251 +704: n03891332 +705: n03895866 +706: n03899768 +707: n03902125 +708: n03903868 +709: n03908618 +710: n03908714 +711: n03916031 +712: n03920288 +713: n03924679 +714: n03929660 +715: n03929855 +716: n03930313 +717: n03930630 +718: n03934042 +719: n03935335 +720: n03937543 +721: n03938244 +722: n03942813 +723: n03944341 +724: n03947888 +725: n03950228 +726: n03954731 +727: n03956157 +728: n03958227 +729: n03961711 +730: n03967562 +731: n03970156 +732: n03976467 +733: n08620881 +734: n03977966 +735: n03980874 +736: n03982430 +737: n03983396 +738: n03991062 +739: n03992509 +740: n03995372 +741: n03998194 +742: n04004767 +743: n13937284 +744: n04008634 +745: n04009801 +746: n04019541 +747: n04023962 +748: n13413294 +749: n04033901 +750: n04033995 +751: n04037443 +752: n04039381 +753: n09403211 +754: n04041544 +755: n04044716 +756: n04049303 +757: n04065272 +758: n07056680 +759: n04069434 +760: n04070727 +761: n04074963 +762: n04081281 +763: n04086273 +764: n04090263 +765: n04099969 +766: n04111531 +767: n04116512 +768: n04118538 +769: n04118776 +770: n04120489 +771: n04125116 +772: n04127249 +773: n04131690 +774: n04133789 +775: n04136333 +776: n04141076 +777: n04141327 +778: n04141975 +779: n04146614 +780: n04147291 +781: n04149813 +782: n04152593 +783: n04154340 +784: n07917272 +785: n04162706 +786: n04179913 +787: n04192698 +788: n04200800 +789: n04201297 +790: n04204238 +791: n04204347 +792: n04208427 +793: n04209133 +794: n04209239 +795: n04228054 +796: n04229816 +797: n04235860 +798: n04238763 +799: n04239074 +800: n04243546 +801: n04251144 +802: n04252077 +803: n04252225 +804: n04254120 +805: n04254680 +806: n04254777 +807: n04258138 +808: n04259630 +809: n04263257 +810: n04264628 +811: n04265275 +812: n04266014 +813: n04270147 +814: n04273569 +815: n04275363 +816: n05605498 +817: n04285008 +818: n04286575 +819: n08646566 +820: n04310018 +821: n04311004 +822: n04311174 +823: n04317175 +824: n04325704 +825: n04326547 +826: n04328186 +827: n04330267 +828: n04332243 +829: n04335435 +830: n04337157 +831: n04344873 +832: n04346328 +833: n04347754 +834: n04350905 +835: n04355338 +836: n04355933 +837: n04356056 +838: n04357314 +839: n04366367 +840: n04367480 +841: n04370456 +842: n04371430 +843: n14009946 +844: n04372370 +845: n04376876 +846: n04380533 +847: n04389033 +848: n04392985 +849: n04398044 +850: n04399382 +851: n04404412 +852: n04409515 +853: n04417672 +854: n04418357 +855: n04423845 +856: n04428191 +857: n04429376 +858: n04435653 +859: n04442312 +860: n04443257 +861: n04447861 +862: n04456115 +863: n04458633 +864: n04461696 +865: n04462240 +866: n04465666 +867: n04467665 +868: n04476259 +869: n04479046 +870: n04482393 +871: n04483307 +872: n04485082 +873: n04486054 +874: n04487081 +875: n04487394 +876: n04493381 +877: n04501370 +878: n04505470 +879: n04507155 +880: n04509417 +881: n04515003 +882: n04517823 +883: n04522168 +884: n04523525 +885: n04525038 +886: n04525305 +887: n04532106 +888: n04532670 +889: n04536866 +890: n04540053 +891: n04542943 +892: n04548280 +893: n04548362 +894: n04550184 +895: n04552348 +896: n04553703 +897: n04554684 +898: n04557648 +899: n04560804 +900: n04562935 +901: n04579145 +902: n04579667 +903: n04584207 +904: n04589890 +905: n04590129 +906: n04591157 +907: n04591713 +908: n10782135 +909: n04596742 +910: n04598010 +911: n04599235 +912: n04604644 +913: n14423870 +914: n04612504 +915: n04613696 +916: n06359193 +917: n06596364 +918: n06785654 +919: n06794110 +920: n06874185 +921: n07248320 +922: n07565083 +923: n07657664 +924: n07583066 +925: n07584110 +926: n07590611 +927: n07613480 +928: n07614500 +929: n07615774 +930: n07684084 +931: n07693725 +932: n07695742 +933: n07697313 +934: n07697537 +935: n07711569 +936: n07714571 +937: n07714990 +938: n07715103 +939: n12159804 +940: n12160303 +941: n12160857 +942: n07717556 +943: n07718472 +944: n07718747 +945: n07720875 +946: n07730033 +947: n13001041 +948: n07742313 +949: n12630144 +950: n14991210 +951: n07749582 +952: n07753113 +953: n07753275 +954: n07753592 +955: n07754684 +956: n07760859 +957: n07768694 +958: n07802026 +959: n07831146 +960: n07836838 +961: n07860988 +962: n07871810 +963: n07873807 +964: n07875152 +965: n07880968 +966: n07892512 +967: n07920052 +968: n13904665 +969: n07932039 +970: n09193705 +971: n09229709 +972: n09246464 +973: n09256479 +974: n09288635 +975: n09332890 +976: n09399592 +977: n09421951 +978: n09428293 +979: n09468604 +980: n09472597 +981: n09835506 +982: n10148035 +983: n10565667 +984: n11879895 +985: n11939491 +986: n12057211 +987: n12144580 +988: n12267677 +989: n12620546 +990: n12768682 +991: n12985857 +992: n12998815 +993: n13037406 +994: n13040303 +995: n13044778 +996: n13052670 +997: n13054560 +998: n13133613 +999: n15075141 diff --git a/stable-diffusion/data/inpainting_examples/6458524847_2f4c361183_k.png b/stable-diffusion/data/inpainting_examples/6458524847_2f4c361183_k.png new file mode 100644 index 0000000..3eb5a22 Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/6458524847_2f4c361183_k.png differ diff --git a/stable-diffusion/data/inpainting_examples/6458524847_2f4c361183_k_mask.png b/stable-diffusion/data/inpainting_examples/6458524847_2f4c361183_k_mask.png new file mode 100644 index 0000000..6c77130 Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/6458524847_2f4c361183_k_mask.png differ diff --git a/stable-diffusion/data/inpainting_examples/8399166846_f6fb4e4b8e_k.png b/stable-diffusion/data/inpainting_examples/8399166846_f6fb4e4b8e_k.png new file mode 100644 index 0000000..63ac989 Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/8399166846_f6fb4e4b8e_k.png differ diff --git a/stable-diffusion/data/inpainting_examples/8399166846_f6fb4e4b8e_k_mask.png b/stable-diffusion/data/inpainting_examples/8399166846_f6fb4e4b8e_k_mask.png new file mode 100644 index 0000000..7eb67e4 Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/8399166846_f6fb4e4b8e_k_mask.png differ diff --git a/stable-diffusion/data/inpainting_examples/alex-iby-G_Pk4D9rMLs.png b/stable-diffusion/data/inpainting_examples/alex-iby-G_Pk4D9rMLs.png new file mode 100644 index 0000000..7714a1f Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/alex-iby-G_Pk4D9rMLs.png differ diff --git a/stable-diffusion/data/inpainting_examples/alex-iby-G_Pk4D9rMLs_mask.png b/stable-diffusion/data/inpainting_examples/alex-iby-G_Pk4D9rMLs_mask.png new file mode 100644 index 0000000..0324f67 Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/alex-iby-G_Pk4D9rMLs_mask.png differ diff --git a/stable-diffusion/data/inpainting_examples/bench2.png b/stable-diffusion/data/inpainting_examples/bench2.png new file mode 100644 index 0000000..09be46d Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/bench2.png differ diff --git a/stable-diffusion/data/inpainting_examples/bench2_mask.png b/stable-diffusion/data/inpainting_examples/bench2_mask.png new file mode 100644 index 0000000..bacadfa Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/bench2_mask.png differ diff --git a/stable-diffusion/data/inpainting_examples/bertrand-gabioud-CpuFzIsHYJ0.png b/stable-diffusion/data/inpainting_examples/bertrand-gabioud-CpuFzIsHYJ0.png new file mode 100644 index 0000000..618f200 Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/bertrand-gabioud-CpuFzIsHYJ0.png differ diff --git a/stable-diffusion/data/inpainting_examples/bertrand-gabioud-CpuFzIsHYJ0_mask.png b/stable-diffusion/data/inpainting_examples/bertrand-gabioud-CpuFzIsHYJ0_mask.png new file mode 100644 index 0000000..fd18be9 Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/bertrand-gabioud-CpuFzIsHYJ0_mask.png differ diff --git a/stable-diffusion/data/inpainting_examples/billow926-12-Wc-Zgx6Y.png b/stable-diffusion/data/inpainting_examples/billow926-12-Wc-Zgx6Y.png new file mode 100644 index 0000000..cbd246e Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/billow926-12-Wc-Zgx6Y.png differ diff --git a/stable-diffusion/data/inpainting_examples/billow926-12-Wc-Zgx6Y_mask.png b/stable-diffusion/data/inpainting_examples/billow926-12-Wc-Zgx6Y_mask.png new file mode 100644 index 0000000..7e51214 Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/billow926-12-Wc-Zgx6Y_mask.png differ diff --git a/stable-diffusion/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png b/stable-diffusion/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png new file mode 100644 index 0000000..e84dfc8 Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png differ diff --git a/stable-diffusion/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png b/stable-diffusion/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png new file mode 100644 index 0000000..7f3c753 Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png differ diff --git a/stable-diffusion/data/inpainting_examples/photo-1583445095369-9c651e7e5d34.png b/stable-diffusion/data/inpainting_examples/photo-1583445095369-9c651e7e5d34.png new file mode 100644 index 0000000..e8999de Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/photo-1583445095369-9c651e7e5d34.png differ diff --git a/stable-diffusion/data/inpainting_examples/photo-1583445095369-9c651e7e5d34_mask.png b/stable-diffusion/data/inpainting_examples/photo-1583445095369-9c651e7e5d34_mask.png new file mode 100644 index 0000000..093d0c1 Binary files /dev/null and b/stable-diffusion/data/inpainting_examples/photo-1583445095369-9c651e7e5d34_mask.png differ diff --git a/stable-diffusion/environment.yaml b/stable-diffusion/environment.yaml new file mode 100644 index 0000000..025ced8 --- /dev/null +++ b/stable-diffusion/environment.yaml @@ -0,0 +1,31 @@ +name: ldm +channels: + - pytorch + - defaults +dependencies: + - python=3.8.5 + - pip=20.3 + - cudatoolkit=11.3 + - pytorch=1.11.0 + - torchvision=0.12.0 + - numpy=1.19.2 + - pip: + - albumentations==0.4.3 + - diffusers + - opencv-python==4.1.2.30 + - pudb==2019.2 + - invisible-watermark + - imageio==2.9.0 + - imageio-ffmpeg==0.4.2 + - pytorch-lightning==1.4.2 + - omegaconf==2.1.1 + - test-tube>=0.7.5 + - streamlit>=0.73.1 + - einops==0.3.0 + - torch-fidelity==0.3.0 + - transformers==4.19.2 + - torchmetrics==0.6.0 + - kornia==0.6 + - -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers + - -e git+https://github.com/openai/CLIP.git@main#egg=clip + - -e . diff --git a/stable-diffusion/get_test_data_df.py b/stable-diffusion/get_test_data_df.py new file mode 100644 index 0000000..dcbecbc --- /dev/null +++ b/stable-diffusion/get_test_data_df.py @@ -0,0 +1,36 @@ +import glob +import os + +import argparse +import random + +parser = argparse.ArgumentParser() + +parser.add_argument('--test_data_dir', type=str,default='../test_data') +parser.add_argument('--df_ckpt', type=str,default='SG161222/Realistic_Vision_V5.1_noVAE') +parser.add_argument('--sample_num', type=int,default=6) +parser.add_argument( + "--scale", + type=float, + default=5, + help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", + ) + +opt = parser.parse_args() +test_data_dir = opt.test_data_dir +df_ckpt = opt.df_ckpt +sample_num = opt.sample_num +scale = opt.scale + +for sub_dir in glob.glob(os.path.join(test_data_dir, '*')): + prompt_path = os.path.join(sub_dir, 'prompt.txt') + if os.path.exists(prompt_path): + with open(prompt_path, 'r') as f: + prompt = f.read().strip() + # print(prompt) + samples_dir = os.path.join(sub_dir, 'samples') + seed = random.randint(0, 100000) + if not os.path.exists(samples_dir): + cmd = f'python scripts/txt2realistic_human.py --outdir {sub_dir} --seed {seed} --H 512 --W 512 --n_samples 1 --scale {scale} --n_iter {sample_num} --prompt "{prompt}" --plms --ckpt {df_ckpt}' + print(cmd) + os.system(cmd) diff --git a/stable-diffusion/ldm/data/__init__.py b/stable-diffusion/ldm/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stable-diffusion/ldm/data/base.py b/stable-diffusion/ldm/data/base.py new file mode 100644 index 0000000..b196c2f --- /dev/null +++ b/stable-diffusion/ldm/data/base.py @@ -0,0 +1,23 @@ +from abc import abstractmethod +from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset + + +class Txt2ImgIterableBaseDataset(IterableDataset): + ''' + Define an interface to make the IterableDatasets for text2img data chainable + ''' + def __init__(self, num_records=0, valid_ids=None, size=256): + super().__init__() + self.num_records = num_records + self.valid_ids = valid_ids + self.sample_ids = valid_ids + self.size = size + + print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.') + + def __len__(self): + return self.num_records + + @abstractmethod + def __iter__(self): + pass \ No newline at end of file diff --git a/stable-diffusion/ldm/data/imagenet.py b/stable-diffusion/ldm/data/imagenet.py new file mode 100644 index 0000000..1c473f9 --- /dev/null +++ b/stable-diffusion/ldm/data/imagenet.py @@ -0,0 +1,394 @@ +import os, yaml, pickle, shutil, tarfile, glob +import cv2 +import albumentations +import PIL +import numpy as np +import torchvision.transforms.functional as TF +from omegaconf import OmegaConf +from functools import partial +from PIL import Image +from tqdm import tqdm +from torch.utils.data import Dataset, Subset + +import taming.data.utils as tdu +from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve +from taming.data.imagenet import ImagePaths + +from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light + + +def synset2idx(path_to_yaml="data/index_synset.yaml"): + with open(path_to_yaml) as f: + di2s = yaml.load(f) + return dict((v,k) for k,v in di2s.items()) + + +class ImageNetBase(Dataset): + def __init__(self, config=None): + self.config = config or OmegaConf.create() + if not type(self.config)==dict: + self.config = OmegaConf.to_container(self.config) + self.keep_orig_class_label = self.config.get("keep_orig_class_label", False) + self.process_images = True # if False we skip loading & processing images and self.data contains filepaths + self._prepare() + self._prepare_synset_to_human() + self._prepare_idx_to_synset() + self._prepare_human_to_integer_label() + self._load() + + def __len__(self): + return len(self.data) + + def __getitem__(self, i): + return self.data[i] + + def _prepare(self): + raise NotImplementedError() + + def _filter_relpaths(self, relpaths): + ignore = set([ + "n06596364_9591.JPEG", + ]) + relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore] + if "sub_indices" in self.config: + indices = str_to_indices(self.config["sub_indices"]) + synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings + self.synset2idx = synset2idx(path_to_yaml=self.idx2syn) + files = [] + for rpath in relpaths: + syn = rpath.split("/")[0] + if syn in synsets: + files.append(rpath) + return files + else: + return relpaths + + def _prepare_synset_to_human(self): + SIZE = 2655750 + URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1" + self.human_dict = os.path.join(self.root, "synset_human.txt") + if (not os.path.exists(self.human_dict) or + not os.path.getsize(self.human_dict)==SIZE): + download(URL, self.human_dict) + + def _prepare_idx_to_synset(self): + URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1" + self.idx2syn = os.path.join(self.root, "index_synset.yaml") + if (not os.path.exists(self.idx2syn)): + download(URL, self.idx2syn) + + def _prepare_human_to_integer_label(self): + URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1" + self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt") + if (not os.path.exists(self.human2integer)): + download(URL, self.human2integer) + with open(self.human2integer, "r") as f: + lines = f.read().splitlines() + assert len(lines) == 1000 + self.human2integer_dict = dict() + for line in lines: + value, key = line.split(":") + self.human2integer_dict[key] = int(value) + + def _load(self): + with open(self.txt_filelist, "r") as f: + self.relpaths = f.read().splitlines() + l1 = len(self.relpaths) + self.relpaths = self._filter_relpaths(self.relpaths) + print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths))) + + self.synsets = [p.split("/")[0] for p in self.relpaths] + self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths] + + unique_synsets = np.unique(self.synsets) + class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets)) + if not self.keep_orig_class_label: + self.class_labels = [class_dict[s] for s in self.synsets] + else: + self.class_labels = [self.synset2idx[s] for s in self.synsets] + + with open(self.human_dict, "r") as f: + human_dict = f.read().splitlines() + human_dict = dict(line.split(maxsplit=1) for line in human_dict) + + self.human_labels = [human_dict[s] for s in self.synsets] + + labels = { + "relpath": np.array(self.relpaths), + "synsets": np.array(self.synsets), + "class_label": np.array(self.class_labels), + "human_label": np.array(self.human_labels), + } + + if self.process_images: + self.size = retrieve(self.config, "size", default=256) + self.data = ImagePaths(self.abspaths, + labels=labels, + size=self.size, + random_crop=self.random_crop, + ) + else: + self.data = self.abspaths + + +class ImageNetTrain(ImageNetBase): + NAME = "ILSVRC2012_train" + URL = "http://www.image-net.org/challenges/LSVRC/2012/" + AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2" + FILES = [ + "ILSVRC2012_img_train.tar", + ] + SIZES = [ + 147897477120, + ] + + def __init__(self, process_images=True, data_root=None, **kwargs): + self.process_images = process_images + self.data_root = data_root + super().__init__(**kwargs) + + def _prepare(self): + if self.data_root: + self.root = os.path.join(self.data_root, self.NAME) + else: + cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) + self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) + + self.datadir = os.path.join(self.root, "data") + self.txt_filelist = os.path.join(self.root, "filelist.txt") + self.expected_length = 1281167 + self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop", + default=True) + if not tdu.is_prepared(self.root): + # prep + print("Preparing dataset {} in {}".format(self.NAME, self.root)) + + datadir = self.datadir + if not os.path.exists(datadir): + path = os.path.join(self.root, self.FILES[0]) + if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: + import academictorrents as at + atpath = at.get(self.AT_HASH, datastore=self.root) + assert atpath == path + + print("Extracting {} to {}".format(path, datadir)) + os.makedirs(datadir, exist_ok=True) + with tarfile.open(path, "r:") as tar: + tar.extractall(path=datadir) + + print("Extracting sub-tars.") + subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar"))) + for subpath in tqdm(subpaths): + subdir = subpath[:-len(".tar")] + os.makedirs(subdir, exist_ok=True) + with tarfile.open(subpath, "r:") as tar: + tar.extractall(path=subdir) + + filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) + filelist = [os.path.relpath(p, start=datadir) for p in filelist] + filelist = sorted(filelist) + filelist = "\n".join(filelist)+"\n" + with open(self.txt_filelist, "w") as f: + f.write(filelist) + + tdu.mark_prepared(self.root) + + +class ImageNetValidation(ImageNetBase): + NAME = "ILSVRC2012_validation" + URL = "http://www.image-net.org/challenges/LSVRC/2012/" + AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5" + VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1" + FILES = [ + "ILSVRC2012_img_val.tar", + "validation_synset.txt", + ] + SIZES = [ + 6744924160, + 1950000, + ] + + def __init__(self, process_images=True, data_root=None, **kwargs): + self.data_root = data_root + self.process_images = process_images + super().__init__(**kwargs) + + def _prepare(self): + if self.data_root: + self.root = os.path.join(self.data_root, self.NAME) + else: + cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) + self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) + self.datadir = os.path.join(self.root, "data") + self.txt_filelist = os.path.join(self.root, "filelist.txt") + self.expected_length = 50000 + self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop", + default=False) + if not tdu.is_prepared(self.root): + # prep + print("Preparing dataset {} in {}".format(self.NAME, self.root)) + + datadir = self.datadir + if not os.path.exists(datadir): + path = os.path.join(self.root, self.FILES[0]) + if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: + import academictorrents as at + atpath = at.get(self.AT_HASH, datastore=self.root) + assert atpath == path + + print("Extracting {} to {}".format(path, datadir)) + os.makedirs(datadir, exist_ok=True) + with tarfile.open(path, "r:") as tar: + tar.extractall(path=datadir) + + vspath = os.path.join(self.root, self.FILES[1]) + if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]: + download(self.VS_URL, vspath) + + with open(vspath, "r") as f: + synset_dict = f.read().splitlines() + synset_dict = dict(line.split() for line in synset_dict) + + print("Reorganizing into synset folders") + synsets = np.unique(list(synset_dict.values())) + for s in synsets: + os.makedirs(os.path.join(datadir, s), exist_ok=True) + for k, v in synset_dict.items(): + src = os.path.join(datadir, k) + dst = os.path.join(datadir, v) + shutil.move(src, dst) + + filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) + filelist = [os.path.relpath(p, start=datadir) for p in filelist] + filelist = sorted(filelist) + filelist = "\n".join(filelist)+"\n" + with open(self.txt_filelist, "w") as f: + f.write(filelist) + + tdu.mark_prepared(self.root) + + + +class ImageNetSR(Dataset): + def __init__(self, size=None, + degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1., + random_crop=True): + """ + Imagenet Superresolution Dataloader + Performs following ops in order: + 1. crops a crop of size s from image either as random or center crop + 2. resizes crop to size with cv2.area_interpolation + 3. degrades resized crop with degradation_fn + + :param size: resizing to size after cropping + :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light + :param downscale_f: Low Resolution Downsample factor + :param min_crop_f: determines crop size s, + where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f) + :param max_crop_f: "" + :param data_root: + :param random_crop: + """ + self.base = self.get_base() + assert size + assert (size / downscale_f).is_integer() + self.size = size + self.LR_size = int(size / downscale_f) + self.min_crop_f = min_crop_f + self.max_crop_f = max_crop_f + assert(max_crop_f <= 1.) + self.center_crop = not random_crop + + self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA) + + self.pil_interpolation = False # gets reset later if incase interp_op is from pillow + + if degradation == "bsrgan": + self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f) + + elif degradation == "bsrgan_light": + self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f) + + else: + interpolation_fn = { + "cv_nearest": cv2.INTER_NEAREST, + "cv_bilinear": cv2.INTER_LINEAR, + "cv_bicubic": cv2.INTER_CUBIC, + "cv_area": cv2.INTER_AREA, + "cv_lanczos": cv2.INTER_LANCZOS4, + "pil_nearest": PIL.Image.NEAREST, + "pil_bilinear": PIL.Image.BILINEAR, + "pil_bicubic": PIL.Image.BICUBIC, + "pil_box": PIL.Image.BOX, + "pil_hamming": PIL.Image.HAMMING, + "pil_lanczos": PIL.Image.LANCZOS, + }[degradation] + + self.pil_interpolation = degradation.startswith("pil_") + + if self.pil_interpolation: + self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn) + + else: + self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size, + interpolation=interpolation_fn) + + def __len__(self): + return len(self.base) + + def __getitem__(self, i): + example = self.base[i] + image = Image.open(example["file_path_"]) + + if not image.mode == "RGB": + image = image.convert("RGB") + + image = np.array(image).astype(np.uint8) + + min_side_len = min(image.shape[:2]) + crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None) + crop_side_len = int(crop_side_len) + + if self.center_crop: + self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len) + + else: + self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len) + + image = self.cropper(image=image)["image"] + image = self.image_rescaler(image=image)["image"] + + if self.pil_interpolation: + image_pil = PIL.Image.fromarray(image) + LR_image = self.degradation_process(image_pil) + LR_image = np.array(LR_image).astype(np.uint8) + + else: + LR_image = self.degradation_process(image=image)["image"] + + example["image"] = (image/127.5 - 1.0).astype(np.float32) + example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32) + + return example + + +class ImageNetSRTrain(ImageNetSR): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def get_base(self): + with open("data/imagenet_train_hr_indices.p", "rb") as f: + indices = pickle.load(f) + dset = ImageNetTrain(process_images=False,) + return Subset(dset, indices) + + +class ImageNetSRValidation(ImageNetSR): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def get_base(self): + with open("data/imagenet_val_hr_indices.p", "rb") as f: + indices = pickle.load(f) + dset = ImageNetValidation(process_images=False,) + return Subset(dset, indices) diff --git a/stable-diffusion/ldm/data/lsun.py b/stable-diffusion/ldm/data/lsun.py new file mode 100644 index 0000000..6256e45 --- /dev/null +++ b/stable-diffusion/ldm/data/lsun.py @@ -0,0 +1,92 @@ +import os +import numpy as np +import PIL +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms + + +class LSUNBase(Dataset): + def __init__(self, + txt_file, + data_root, + size=None, + interpolation="bicubic", + flip_p=0.5 + ): + self.data_paths = txt_file + self.data_root = data_root + with open(self.data_paths, "r") as f: + self.image_paths = f.read().splitlines() + self._length = len(self.image_paths) + self.labels = { + "relative_file_path_": [l for l in self.image_paths], + "file_path_": [os.path.join(self.data_root, l) + for l in self.image_paths], + } + + self.size = size + self.interpolation = {"linear": PIL.Image.LINEAR, + "bilinear": PIL.Image.BILINEAR, + "bicubic": PIL.Image.BICUBIC, + "lanczos": PIL.Image.LANCZOS, + }[interpolation] + self.flip = transforms.RandomHorizontalFlip(p=flip_p) + + def __len__(self): + return self._length + + def __getitem__(self, i): + example = dict((k, self.labels[k][i]) for k in self.labels) + image = Image.open(example["file_path_"]) + if not image.mode == "RGB": + image = image.convert("RGB") + + # default to score-sde preprocessing + img = np.array(image).astype(np.uint8) + crop = min(img.shape[0], img.shape[1]) + h, w, = img.shape[0], img.shape[1] + img = img[(h - crop) // 2:(h + crop) // 2, + (w - crop) // 2:(w + crop) // 2] + + image = Image.fromarray(img) + if self.size is not None: + image = image.resize((self.size, self.size), resample=self.interpolation) + + image = self.flip(image) + image = np.array(image).astype(np.uint8) + example["image"] = (image / 127.5 - 1.0).astype(np.float32) + return example + + +class LSUNChurchesTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs) + + +class LSUNChurchesValidation(LSUNBase): + def __init__(self, flip_p=0., **kwargs): + super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches", + flip_p=flip_p, **kwargs) + + +class LSUNBedroomsTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs) + + +class LSUNBedroomsValidation(LSUNBase): + def __init__(self, flip_p=0.0, **kwargs): + super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms", + flip_p=flip_p, **kwargs) + + +class LSUNCatsTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs) + + +class LSUNCatsValidation(LSUNBase): + def __init__(self, flip_p=0., **kwargs): + super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats", + flip_p=flip_p, **kwargs) diff --git a/stable-diffusion/ldm/lr_scheduler.py b/stable-diffusion/ldm/lr_scheduler.py new file mode 100644 index 0000000..be39da9 --- /dev/null +++ b/stable-diffusion/ldm/lr_scheduler.py @@ -0,0 +1,98 @@ +import numpy as np + + +class LambdaWarmUpCosineScheduler: + """ + note: use with a base_lr of 1.0 + """ + def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): + self.lr_warm_up_steps = warm_up_steps + self.lr_start = lr_start + self.lr_min = lr_min + self.lr_max = lr_max + self.lr_max_decay_steps = max_decay_steps + self.last_lr = 0. + self.verbosity_interval = verbosity_interval + + def schedule(self, n, **kwargs): + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") + if n < self.lr_warm_up_steps: + lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start + self.last_lr = lr + return lr + else: + t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) + t = min(t, 1.0) + lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( + 1 + np.cos(t * np.pi)) + self.last_lr = lr + return lr + + def __call__(self, n, **kwargs): + return self.schedule(n,**kwargs) + + +class LambdaWarmUpCosineScheduler2: + """ + supports repeated iterations, configurable via lists + note: use with a base_lr of 1.0. + """ + def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): + assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) + self.lr_warm_up_steps = warm_up_steps + self.f_start = f_start + self.f_min = f_min + self.f_max = f_max + self.cycle_lengths = cycle_lengths + self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) + self.last_f = 0. + self.verbosity_interval = verbosity_interval + + def find_in_interval(self, n): + interval = 0 + for cl in self.cum_cycles[1:]: + if n <= cl: + return interval + interval += 1 + + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}") + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] + self.last_f = f + return f + else: + t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]) + t = min(t, 1.0) + f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( + 1 + np.cos(t * np.pi)) + self.last_f = f + return f + + def __call__(self, n, **kwargs): + return self.schedule(n, **kwargs) + + +class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): + + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}") + + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] + self.last_f = f + return f + else: + f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle]) + self.last_f = f + return f + diff --git a/stable-diffusion/ldm/models/autoencoder.py b/stable-diffusion/ldm/models/autoencoder.py new file mode 100644 index 0000000..6a9c4f4 --- /dev/null +++ b/stable-diffusion/ldm/models/autoencoder.py @@ -0,0 +1,443 @@ +import torch +import pytorch_lightning as pl +import torch.nn.functional as F +from contextlib import contextmanager + +from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer + +from ldm.modules.diffusionmodules.model import Encoder, Decoder +from ldm.modules.distributions.distributions import DiagonalGaussianDistribution + +from ldm.util import instantiate_from_config + + +class VQModel(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + n_embed, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + batch_resize_range=None, + scheduler_config=None, + lr_g_factor=1.0, + remap=None, + sane_index_shape=False, # tell vector quantizer to return indices as bhw + use_ema=False + ): + super().__init__() + self.embed_dim = embed_dim + self.n_embed = n_embed + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, + remap=remap, + sane_index_shape=sane_index_shape) + self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + self.batch_resize_range = batch_resize_range + if self.batch_resize_range is not None: + print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") + + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + self.scheduler_config = scheduler_config + self.lr_g_factor = lr_g_factor + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.parameters()) + self.model_ema.copy_to(self) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + print(f"Unexpected Keys: {unexpected}") + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self) + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + quant, emb_loss, info = self.quantize(h) + return quant, emb_loss, info + + def encode_to_prequant(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, quant): + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + def decode_code(self, code_b): + quant_b = self.quantize.embed_code(code_b) + dec = self.decode(quant_b) + return dec + + def forward(self, input, return_pred_indices=False): + quant, diff, (_,_,ind) = self.encode(input) + dec = self.decode(quant) + if return_pred_indices: + return dec, diff, ind + return dec, diff + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + if self.batch_resize_range is not None: + lower_size = self.batch_resize_range[0] + upper_size = self.batch_resize_range[1] + if self.global_step <= 4: + # do the first few batches with max size to avoid later oom + new_resize = upper_size + else: + new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) + if new_resize != x.shape[2]: + x = F.interpolate(x, size=new_resize, mode="bicubic") + x = x.detach() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + # https://github.com/pytorch/pytorch/issues/37142 + # try not to fool the heuristics + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + + if optimizer_idx == 0: + # autoencode + aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train", + predicted_indices=ind) + + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return aeloss + + if optimizer_idx == 1: + # discriminator + discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return discloss + + def validation_step(self, batch, batch_idx): + log_dict = self._validation_step(batch, batch_idx) + with self.ema_scope(): + log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") + return log_dict + + def _validation_step(self, batch, batch_idx, suffix=""): + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + + discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] + self.log(f"val{suffix}/rec_loss", rec_loss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + self.log(f"val{suffix}/aeloss", aeloss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + if version.parse(pl.__version__) >= version.parse('1.4.0'): + del log_dict_ae[f"val{suffix}/rec_loss"] + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr_d = self.learning_rate + lr_g = self.lr_g_factor*self.learning_rate + print("lr_d", lr_d) + print("lr_g", lr_g) + opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ + list(self.decoder.parameters())+ + list(self.quantize.parameters())+ + list(self.quant_conv.parameters())+ + list(self.post_quant_conv.parameters()), + lr=lr_g, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr_d, betas=(0.5, 0.9)) + + if self.scheduler_config is not None: + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + { + 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + ] + return [opt_ae, opt_disc], scheduler + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if only_inputs: + log["inputs"] = x + return log + xrec, _ = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["inputs"] = x + log["reconstructions"] = xrec + if plot_ema: + with self.ema_scope(): + xrec_ema, _ = self(x) + if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) + log["reconstructions_ema"] = xrec_ema + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class VQModelInterface(VQModel): + def __init__(self, embed_dim, *args, **kwargs): + super().__init__(embed_dim=embed_dim, *args, **kwargs) + self.embed_dim = embed_dim + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, h, force_not_quantize=False): + # also go through quantization layer + if not force_not_quantize: + quant, emb_loss, info = self.quantize(h) + else: + quant = h + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + +class AutoencoderKL(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + ): + super().__init__() + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + assert ddconfig["double_z"] + self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + self.embed_dim = embed_dim + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + self.load_state_dict(sd, strict=False) + print(f"Restored from {path}") + + def encode(self, x): + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z): + z = self.post_quant_conv(z) + dec = self.decoder(z) + return dec + + def forward(self, input, sample_posterior=True): + posterior = self.encode(input) + if sample_posterior: + z = posterior.sample() + else: + z = posterior.mode() + dec = self.decode(z) + return dec, posterior + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + + if optimizer_idx == 0: + # train encoder+decoder+logvar + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return aeloss + + if optimizer_idx == 1: + # train the discriminator + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + + self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return discloss + + def validation_step(self, batch, batch_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, + last_layer=self.get_last_layer(), split="val") + + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, + last_layer=self.get_last_layer(), split="val") + + self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr = self.learning_rate + opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ + list(self.decoder.parameters())+ + list(self.quant_conv.parameters())+ + list(self.post_quant_conv.parameters()), + lr=lr, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr, betas=(0.5, 0.9)) + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + @torch.no_grad() + def log_images(self, batch, only_inputs=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if not only_inputs: + xrec, posterior = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["samples"] = self.decode(torch.randn_like(posterior.sample())) + log["reconstructions"] = xrec + log["inputs"] = x + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class IdentityFirstStage(torch.nn.Module): + def __init__(self, *args, vq_interface=False, **kwargs): + self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff + super().__init__() + + def encode(self, x, *args, **kwargs): + return x + + def decode(self, x, *args, **kwargs): + return x + + def quantize(self, x, *args, **kwargs): + if self.vq_interface: + return x, None, [None, None, None] + return x + + def forward(self, x, *args, **kwargs): + return x diff --git a/stable-diffusion/ldm/models/diffusion/__init__.py b/stable-diffusion/ldm/models/diffusion/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stable-diffusion/ldm/models/diffusion/classifier.py b/stable-diffusion/ldm/models/diffusion/classifier.py new file mode 100644 index 0000000..67e98b9 --- /dev/null +++ b/stable-diffusion/ldm/models/diffusion/classifier.py @@ -0,0 +1,267 @@ +import os +import torch +import pytorch_lightning as pl +from omegaconf import OmegaConf +from torch.nn import functional as F +from torch.optim import AdamW +from torch.optim.lr_scheduler import LambdaLR +from copy import deepcopy +from einops import rearrange +from glob import glob +from natsort import natsorted + +from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel +from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config + +__models__ = { + 'class_label': EncoderUNetModel, + 'segmentation': UNetModel +} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class NoisyLatentImageClassifier(pl.LightningModule): + + def __init__(self, + diffusion_path, + num_classes, + ckpt_path=None, + pool='attention', + label_key=None, + diffusion_ckpt_path=None, + scheduler_config=None, + weight_decay=1.e-2, + log_steps=10, + monitor='val/loss', + *args, + **kwargs): + super().__init__(*args, **kwargs) + self.num_classes = num_classes + # get latest config of diffusion model + diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1] + self.diffusion_config = OmegaConf.load(diffusion_config).model + self.diffusion_config.params.ckpt_path = diffusion_ckpt_path + self.load_diffusion() + + self.monitor = monitor + self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 + self.log_time_interval = self.diffusion_model.num_timesteps // log_steps + self.log_steps = log_steps + + self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \ + else self.diffusion_model.cond_stage_key + + assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params' + + if self.label_key not in __models__: + raise NotImplementedError() + + self.load_classifier(ckpt_path, pool) + + self.scheduler_config = scheduler_config + self.use_scheduler = self.scheduler_config is not None + self.weight_decay = weight_decay + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def load_diffusion(self): + model = instantiate_from_config(self.diffusion_config) + self.diffusion_model = model.eval() + self.diffusion_model.train = disabled_train + for param in self.diffusion_model.parameters(): + param.requires_grad = False + + def load_classifier(self, ckpt_path, pool): + model_config = deepcopy(self.diffusion_config.params.unet_config.params) + model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels + model_config.out_channels = self.num_classes + if self.label_key == 'class_label': + model_config.pool = pool + + self.model = __models__[self.label_key](**model_config) + if ckpt_path is not None: + print('#####################################################################') + print(f'load from ckpt "{ckpt_path}"') + print('#####################################################################') + self.init_from_ckpt(ckpt_path) + + @torch.no_grad() + def get_x_noisy(self, x, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x)) + continuous_sqrt_alpha_cumprod = None + if self.diffusion_model.use_continuous_noise: + continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) + # todo: make sure t+1 is correct here + + return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise, + continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod) + + def forward(self, x_noisy, t, *args, **kwargs): + return self.model(x_noisy, t) + + @torch.no_grad() + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + @torch.no_grad() + def get_conditioning(self, batch, k=None): + if k is None: + k = self.label_key + assert k is not None, 'Needs to provide label key' + + targets = batch[k].to(self.device) + + if self.label_key == 'segmentation': + targets = rearrange(targets, 'b h w c -> b c h w') + for down in range(self.numd): + h, w = targets.shape[-2:] + targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest') + + # targets = rearrange(targets,'b c h w -> b h w c') + + return targets + + def compute_top_k(self, logits, labels, k, reduction="mean"): + _, top_ks = torch.topk(logits, k, dim=1) + if reduction == "mean": + return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() + elif reduction == "none": + return (top_ks == labels[:, None]).float().sum(dim=-1) + + def on_train_epoch_start(self): + # save some memory + self.diffusion_model.model.to('cpu') + + @torch.no_grad() + def write_logs(self, loss, logits, targets): + log_prefix = 'train' if self.training else 'val' + log = {} + log[f"{log_prefix}/loss"] = loss.mean() + log[f"{log_prefix}/acc@1"] = self.compute_top_k( + logits, targets, k=1, reduction="mean" + ) + log[f"{log_prefix}/acc@5"] = self.compute_top_k( + logits, targets, k=5, reduction="mean" + ) + + self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True) + self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False) + self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True) + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) + + def shared_step(self, batch, t=None): + x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key) + targets = self.get_conditioning(batch) + if targets.dim() == 4: + targets = targets.argmax(dim=1) + if t is None: + t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long() + else: + t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() + x_noisy = self.get_x_noisy(x, t) + logits = self(x_noisy, t) + + loss = F.cross_entropy(logits, targets, reduction='none') + + self.write_logs(loss.detach(), logits.detach(), targets.detach()) + + loss = loss.mean() + return loss, logits, x_noisy, targets + + def training_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + return loss + + def reset_noise_accs(self): + self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in + range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)} + + def on_validation_start(self): + self.reset_noise_accs() + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + + for t in self.noisy_acc: + _, logits, _, targets = self.shared_step(batch, t) + self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean')) + self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean')) + + return loss + + def configure_optimizers(self): + optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) + + if self.use_scheduler: + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [optimizer], scheduler + + return optimizer + + @torch.no_grad() + def log_images(self, batch, N=8, *args, **kwargs): + log = dict() + x = self.get_input(batch, self.diffusion_model.first_stage_key) + log['inputs'] = x + + y = self.get_conditioning(batch) + + if self.label_key == 'class_label': + y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log['labels'] = y + + if ismap(y): + log['labels'] = self.diffusion_model.to_rgb(y) + + for step in range(self.log_steps): + current_time = step * self.log_time_interval + + _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) + + log[f'inputs@t{current_time}'] = x_noisy + + pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) + pred = rearrange(pred, 'b h w c -> b c h w') + + log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred) + + for key in log: + log[key] = log[key][:N] + + return log diff --git a/stable-diffusion/ldm/models/diffusion/ddim.py b/stable-diffusion/ldm/models/diffusion/ddim.py new file mode 100644 index 0000000..fb31215 --- /dev/null +++ b/stable-diffusion/ldm/models/diffusion/ddim.py @@ -0,0 +1,241 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \ + extract_into_tensor + + +class DDIMSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for DDIM sampling is {size}, eta {eta}') + + samples, intermediates = self.ddim_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + return samples, intermediates + + @torch.no_grad() + def ddim_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None,): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning) + img, pred_x0 = outs + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None): + b, *_, device = *x.shape, x.device + + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + @torch.no_grad() + def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): + # fast, but does not allow for exact reconstruction + # t serves as an index to gather the correct alphas + if use_original_steps: + sqrt_alphas_cumprod = self.sqrt_alphas_cumprod + sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod + else: + sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) + sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas + + if noise is None: + noise = torch.randn_like(x0) + return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) + + @torch.no_grad() + def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, + use_original_steps=False): + + timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps + timesteps = timesteps[:t_start] + + time_range = np.flip(timesteps) + total_steps = timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='Decoding image', total=total_steps) + x_dec = x_latent + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) + x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning) + return x_dec \ No newline at end of file diff --git a/stable-diffusion/ldm/models/diffusion/ddpm.py b/stable-diffusion/ldm/models/diffusion/ddpm.py new file mode 100644 index 0000000..bbedd04 --- /dev/null +++ b/stable-diffusion/ldm/models/diffusion/ddpm.py @@ -0,0 +1,1445 @@ +""" +wild mixture of +https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py +https://github.com/CompVis/taming-transformers +-- merci +""" + +import torch +import torch.nn as nn +import numpy as np +import pytorch_lightning as pl +from torch.optim.lr_scheduler import LambdaLR +from einops import rearrange, repeat +from contextlib import contextmanager +from functools import partial +from tqdm import tqdm +from torchvision.utils import make_grid +from pytorch_lightning.utilities.distributed import rank_zero_only + +from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config +from ldm.modules.ema import LitEma +from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution +from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL +from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like +from ldm.models.diffusion.ddim import DDIMSampler + + +__conditioning_keys__ = {'concat': 'c_concat', + 'crossattn': 'c_crossattn', + 'adm': 'y'} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def uniform_on_device(r1, r2, shape, device): + return (r1 - r2) * torch.rand(*shape, device=device) + r2 + + +class DDPM(pl.LightningModule): + # classic DDPM with Gaussian diffusion, in image space + def __init__(self, + unet_config, + timesteps=1000, + beta_schedule="linear", + loss_type="l2", + ckpt_path=None, + ignore_keys=[], + load_only_unet=False, + monitor="val/loss", + use_ema=True, + first_stage_key="image", + image_size=256, + channels=3, + log_every_t=100, + clip_denoised=True, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + given_betas=None, + original_elbo_weight=0., + v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta + l_simple_weight=1., + conditioning_key=None, + parameterization="eps", # all assuming fixed variance schedules + scheduler_config=None, + use_positional_encodings=False, + learn_logvar=False, + logvar_init=0., + ): + super().__init__() + assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' + self.parameterization = parameterization + print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") + self.cond_stage_model = None + self.clip_denoised = clip_denoised + self.log_every_t = log_every_t + self.first_stage_key = first_stage_key + self.image_size = image_size # try conv? + self.channels = channels + self.use_positional_encodings = use_positional_encodings + self.model = DiffusionWrapper(unet_config, conditioning_key) + count_params(self.model, verbose=True) + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + self.use_scheduler = scheduler_config is not None + if self.use_scheduler: + self.scheduler_config = scheduler_config + + self.v_posterior = v_posterior + self.original_elbo_weight = original_elbo_weight + self.l_simple_weight = l_simple_weight + + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) + + self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, + linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) + + self.loss_type = loss_type + + self.learn_logvar = learn_logvar + self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) + if self.learn_logvar: + self.logvar = nn.Parameter(self.logvar, requires_grad=True) + + + def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if exists(given_betas): + betas = given_betas + else: + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( + 1. - alphas_cumprod) + self.v_posterior * betas + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer('posterior_variance', to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) + self.register_buffer('posterior_mean_coef1', to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) + self.register_buffer('posterior_mean_coef2', to_torch( + (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) + + if self.parameterization == "eps": + lvlb_weights = self.betas ** 2 / ( + 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) + elif self.parameterization == "x0": + lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) + else: + raise NotImplementedError("mu not supported") + # TODO how to choose this term + lvlb_weights[0] = lvlb_weights[1] + self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) + assert not torch.isnan(self.lvlb_weights).all() + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, clip_denoised: bool): + model_out = self.model(x, t) + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + if clip_denoised: + x_recon.clamp_(-1., 1.) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def p_sample_loop(self, shape, return_intermediates=False): + device = self.betas.device + b = shape[0] + img = torch.randn(shape, device=device) + intermediates = [img] + for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): + img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), + clip_denoised=self.clip_denoised) + if i % self.log_every_t == 0 or i == self.num_timesteps - 1: + intermediates.append(img) + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, batch_size=16, return_intermediates=False): + image_size = self.image_size + channels = self.channels + return self.p_sample_loop((batch_size, channels, image_size, image_size), + return_intermediates=return_intermediates) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) + + def get_loss(self, pred, target, mean=True): + if self.loss_type == 'l1': + loss = (target - pred).abs() + if mean: + loss = loss.mean() + elif self.loss_type == 'l2': + if mean: + loss = torch.nn.functional.mse_loss(target, pred) + else: + loss = torch.nn.functional.mse_loss(target, pred, reduction='none') + else: + raise NotImplementedError("unknown loss type '{loss_type}'") + + return loss + + def p_losses(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_out = self.model(x_noisy, t) + + loss_dict = {} + if self.parameterization == "eps": + target = noise + elif self.parameterization == "x0": + target = x_start + else: + raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") + + loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) + + log_prefix = 'train' if self.training else 'val' + + loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) + loss_simple = loss.mean() * self.l_simple_weight + + loss_vlb = (self.lvlb_weights[t] * loss).mean() + loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) + + loss = loss_simple + self.original_elbo_weight * loss_vlb + + loss_dict.update({f'{log_prefix}/loss': loss}) + + return loss, loss_dict + + def forward(self, x, *args, **kwargs): + # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size + # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + return self.p_losses(x, t, *args, **kwargs) + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + def shared_step(self, batch): + x = self.get_input(batch, self.first_stage_key) + loss, loss_dict = self(x) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + loss, loss_dict = self.shared_step(batch) + + self.log_dict(loss_dict, prog_bar=True, + logger=True, on_step=True, on_epoch=True) + + self.log("global_step", self.global_step, + prog_bar=True, logger=True, on_step=True, on_epoch=False) + + if self.use_scheduler: + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) + + return loss + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + _, loss_dict_no_ema = self.shared_step(batch) + with self.ema_scope(): + _, loss_dict_ema = self.shared_step(batch) + loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} + self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + def _get_rows_from_list(self, samples): + n_imgs_per_row = len(samples) + denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): + log = dict() + x = self.get_input(batch, self.first_stage_key) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + x = x.to(self.device)[:N] + log["inputs"] = x + + # get diffusion row + diffusion_row = list() + x_start = x[:n_row] + + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(x_start) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + diffusion_row.append(x_noisy) + + log["diffusion_row"] = self._get_rows_from_list(diffusion_row) + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) + + log["samples"] = samples + log["denoise_row"] = self._get_rows_from_list(denoise_row) + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.learn_logvar: + params = params + [self.logvar] + opt = torch.optim.AdamW(params, lr=lr) + return opt + + +class LatentDiffusion(DDPM): + """main class""" + def __init__(self, + first_stage_config, + cond_stage_config, + num_timesteps_cond=None, + cond_stage_key="image", + cond_stage_trainable=False, + concat_mode=True, + cond_stage_forward=None, + conditioning_key=None, + scale_factor=1.0, + scale_by_std=False, + *args, **kwargs): + self.num_timesteps_cond = default(num_timesteps_cond, 1) + self.scale_by_std = scale_by_std + assert self.num_timesteps_cond <= kwargs['timesteps'] + # for backwards compatibility after implementation of DiffusionWrapper + if conditioning_key is None: + conditioning_key = 'concat' if concat_mode else 'crossattn' + if cond_stage_config == '__is_unconditional__': + conditioning_key = None + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", []) + super().__init__(conditioning_key=conditioning_key, *args, **kwargs) + self.concat_mode = concat_mode + self.cond_stage_trainable = cond_stage_trainable + self.cond_stage_key = cond_stage_key + try: + self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 + except: + self.num_downs = 0 + if not scale_by_std: + self.scale_factor = scale_factor + else: + self.register_buffer('scale_factor', torch.tensor(scale_factor)) + self.instantiate_first_stage(first_stage_config) + self.instantiate_cond_stage(cond_stage_config) + self.cond_stage_forward = cond_stage_forward + self.clip_denoised = False + self.bbox_tokenizer = None + + self.restarted_from_ckpt = False + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys) + self.restarted_from_ckpt = True + + def make_cond_schedule(self, ): + self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) + ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() + self.cond_ids[:self.num_timesteps_cond] = ids + + @rank_zero_only + @torch.no_grad() + def on_train_batch_start(self, batch, batch_idx, dataloader_idx): + # only for very first batch + if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: + assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' + # set rescale weight to 1./std of encodings + print("### USING STD-RESCALING ###") + x = super().get_input(batch, self.first_stage_key) + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + del self.scale_factor + self.register_buffer('scale_factor', 1. / z.flatten().std()) + print(f"setting self.scale_factor to {self.scale_factor}") + print("### USING STD-RESCALING ###") + + def register_schedule(self, + given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) + + self.shorten_cond_schedule = self.num_timesteps_cond > 1 + if self.shorten_cond_schedule: + self.make_cond_schedule() + + def instantiate_first_stage(self, config): + model = instantiate_from_config(config) + self.first_stage_model = model.eval() + self.first_stage_model.train = disabled_train + for param in self.first_stage_model.parameters(): + param.requires_grad = False + + def instantiate_cond_stage(self, config): + if not self.cond_stage_trainable: + if config == "__is_first_stage__": + print("Using first stage also as cond stage.") + self.cond_stage_model = self.first_stage_model + elif config == "__is_unconditional__": + print(f"Training {self.__class__.__name__} as an unconditional model.") + self.cond_stage_model = None + # self.be_unconditional = True + else: + model = instantiate_from_config(config) + self.cond_stage_model = model.eval() + self.cond_stage_model.train = disabled_train + for param in self.cond_stage_model.parameters(): + param.requires_grad = False + else: + assert config != '__is_first_stage__' + assert config != '__is_unconditional__' + model = instantiate_from_config(config) + self.cond_stage_model = model + + def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): + denoise_row = [] + for zd in tqdm(samples, desc=desc): + denoise_row.append(self.decode_first_stage(zd.to(self.device), + force_not_quantize=force_no_decoder_quantization)) + n_imgs_per_row = len(denoise_row) + denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W + denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + def get_first_stage_encoding(self, encoder_posterior): + if isinstance(encoder_posterior, DiagonalGaussianDistribution): + z = encoder_posterior.sample() + elif isinstance(encoder_posterior, torch.Tensor): + z = encoder_posterior + else: + raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") + return self.scale_factor * z + + def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): + c = self.cond_stage_model.encode(c) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + + def meshgrid(self, h, w): + y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) + x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) + + arr = torch.cat([y, x], dim=-1) + return arr + + def delta_border(self, h, w): + """ + :param h: height + :param w: width + :return: normalized distance to image border, + wtith min distance = 0 at border and max dist = 0.5 at image center + """ + lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) + arr = self.meshgrid(h, w) / lower_right_corner + dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] + dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] + edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] + return edge_dist + + def get_weighting(self, h, w, Ly, Lx, device): + weighting = self.delta_border(h, w) + weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], + self.split_input_params["clip_max_weight"], ) + weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) + + if self.split_input_params["tie_braker"]: + L_weighting = self.delta_border(Ly, Lx) + L_weighting = torch.clip(L_weighting, + self.split_input_params["clip_min_tie_weight"], + self.split_input_params["clip_max_tie_weight"]) + + L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) + weighting = weighting * L_weighting + return weighting + + def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code + """ + :param x: img of size (bs, c, h, w) + :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) + """ + bs, nc, h, w = x.shape + + # number of crops in image + Ly = (h - kernel_size[0]) // stride[0] + 1 + Lx = (w - kernel_size[1]) // stride[1] + 1 + + if uf == 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) + + weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) + + elif uf > 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), + dilation=1, padding=0, + stride=(stride[0] * uf, stride[1] * uf)) + fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) + + elif df > 1 and uf == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), + dilation=1, padding=0, + stride=(stride[0] // df, stride[1] // df)) + fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) + + else: + raise NotImplementedError + + return fold, unfold, normalization, weighting + + @torch.no_grad() + def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, + cond_key=None, return_original_cond=False, bs=None): + x = super().get_input(batch, k) + if bs is not None: + x = x[:bs] + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + + if self.model.conditioning_key is not None: + if cond_key is None: + cond_key = self.cond_stage_key + if cond_key != self.first_stage_key: + if cond_key in ['caption', 'coordinates_bbox']: + xc = batch[cond_key] + elif cond_key == 'class_label': + xc = batch + else: + xc = super().get_input(batch, cond_key).to(self.device) + else: + xc = x + if not self.cond_stage_trainable or force_c_encode: + if isinstance(xc, dict) or isinstance(xc, list): + # import pudb; pudb.set_trace() + c = self.get_learned_conditioning(xc) + else: + c = self.get_learned_conditioning(xc.to(self.device)) + else: + c = xc + if bs is not None: + c = c[:bs] + + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + ckey = __conditioning_keys__[self.model.conditioning_key] + c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} + + else: + c = None + xc = None + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + c = {'pos_x': pos_x, 'pos_y': pos_y} + out = [z, c] + if return_first_stage_outputs: + xrec = self.decode_first_stage(z) + out.extend([x, xrec]) + if return_original_cond: + out.append(xc) + return out + + @torch.no_grad() + def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [self.first_stage_model.decode(z[:, :, :, :, i], + force_not_quantize=predict_cids or force_not_quantize) + for i in range(z.shape[-1])] + else: + + output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + # same as above but without decorator + def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [self.first_stage_model.decode(z[:, :, :, :, i], + force_not_quantize=predict_cids or force_not_quantize) + for i in range(z.shape[-1])] + else: + + output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + @torch.no_grad() + def encode_first_stage(self, x): + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + df = self.split_input_params["vqf"] + self.split_input_params['original_image_size'] = x.shape[-2:] + bs, nc, h, w = x.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) + z = unfold(x) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) + o = o * weighting + + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization + return decoded + + else: + return self.first_stage_model.encode(x) + else: + return self.first_stage_model.encode(x) + + def shared_step(self, batch, **kwargs): + x, c = self.get_input(batch, self.first_stage_key) + loss = self(x, c) + return loss + + def forward(self, x, c, *args, **kwargs): + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + if self.model.conditioning_key is not None: + assert c is not None + if self.cond_stage_trainable: + c = self.get_learned_conditioning(c) + if self.shorten_cond_schedule: # TODO: drop this option + tc = self.cond_ids[t].to(self.device) + c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) + return self.p_losses(x, c, t, *args, **kwargs) + + def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset + def rescale_bbox(bbox): + x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) + y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) + w = min(bbox[2] / crop_coordinates[2], 1 - x0) + h = min(bbox[3] / crop_coordinates[3], 1 - y0) + return x0, y0, w, h + + return [rescale_bbox(b) for b in bboxes] + + def apply_model(self, x_noisy, t, cond, return_ids=False): + + if isinstance(cond, dict): + # hybrid case, cond is exptected to be a dict + pass + else: + if not isinstance(cond, list): + cond = [cond] + key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' + cond = {key: cond} + + if hasattr(self, "split_input_params"): + assert len(cond) == 1 # todo can only deal with one conditioning atm + assert not return_ids + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + + h, w = x_noisy.shape[-2:] + + fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) + + z = unfold(x_noisy) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] + + if self.cond_stage_key in ["image", "LR_image", "segmentation", + 'bbox_img'] and self.model.conditioning_key: # todo check for completeness + c_key = next(iter(cond.keys())) # get key + c = next(iter(cond.values())) # get value + assert (len(c) == 1) # todo extend to list with more than one elem + c = c[0] # get element + + c = unfold(c) + c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] + + elif self.cond_stage_key == 'coordinates_bbox': + assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' + + # assuming padding of unfold is always 0 and its dilation is always 1 + n_patches_per_row = int((w - ks[0]) / stride[0] + 1) + full_img_h, full_img_w = self.split_input_params['original_image_size'] + # as we are operating on latents, we need the factor from the original image size to the + # spatial latent size to properly rescale the crops for regenerating the bbox annotations + num_downs = self.first_stage_model.encoder.num_resolutions - 1 + rescale_latent = 2 ** (num_downs) + + # get top left postions of patches as conforming for the bbbox tokenizer, therefore we + # need to rescale the tl patch coordinates to be in between (0,1) + tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, + rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) + for patch_nr in range(z.shape[-1])] + + # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) + patch_limits = [(x_tl, y_tl, + rescale_latent * ks[0] / full_img_w, + rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] + # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] + + # tokenize crop coordinates for the bounding boxes of the respective patches + patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) + for bbox in patch_limits] # list of length l with tensors of shape (1, 2) + print(patch_limits_tknzd[0].shape) + # cut tknzd crop position from conditioning + assert isinstance(cond, dict), 'cond must be dict to be fed into model' + cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) + print(cut_cond.shape) + + adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) + adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') + print(adapted_cond.shape) + adapted_cond = self.get_learned_conditioning(adapted_cond) + print(adapted_cond.shape) + adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) + print(adapted_cond.shape) + + cond_list = [{'c_crossattn': [e]} for e in adapted_cond] + + else: + cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient + + # apply model by loop over crops + output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] + assert not isinstance(output_list[0], + tuple) # todo cant deal with multiple model outputs check this never happens + + o = torch.stack(output_list, axis=-1) + o = o * weighting + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + x_recon = fold(o) / normalization + + else: + x_recon = self.model(x_noisy, t, **cond) + + if isinstance(x_recon, tuple) and not return_ids: + return x_recon[0] + else: + return x_recon + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) + return mean_flat(kl_prior) / np.log(2.0) + + def p_losses(self, x_start, cond, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_output = self.apply_model(x_noisy, t, cond) + + loss_dict = {} + prefix = 'train' if self.training else 'val' + + if self.parameterization == "x0": + target = x_start + elif self.parameterization == "eps": + target = noise + else: + raise NotImplementedError() + + loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) + loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) + + logvar_t = self.logvar[t].to(self.device) + loss = loss_simple / torch.exp(logvar_t) + logvar_t + # loss = loss_simple / torch.exp(self.logvar) + self.logvar + if self.learn_logvar: + loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) + loss_dict.update({'logvar': self.logvar.data.mean()}) + + loss = self.l_simple_weight * loss.mean() + + loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) + loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() + loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) + loss += (self.original_elbo_weight * loss_vlb) + loss_dict.update({f'{prefix}/loss': loss}) + + return loss, loss_dict + + def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, + return_x0=False, score_corrector=None, corrector_kwargs=None): + t_in = t + model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) + + if score_corrector is not None: + assert self.parameterization == "eps" + model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) + + if return_codebook_ids: + model_out, logits = model_out + + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + else: + raise NotImplementedError() + + if clip_denoised: + x_recon.clamp_(-1., 1.) + if quantize_denoised: + x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + if return_codebook_ids: + return model_mean, posterior_variance, posterior_log_variance, logits + elif return_x0: + return model_mean, posterior_variance, posterior_log_variance, x_recon + else: + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, + return_codebook_ids=False, quantize_denoised=False, return_x0=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): + b, *_, device = *x.shape, x.device + outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, + return_codebook_ids=return_codebook_ids, + quantize_denoised=quantize_denoised, + return_x0=return_x0, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if return_codebook_ids: + raise DeprecationWarning("Support dropped.") + model_mean, _, model_log_variance, logits = outputs + elif return_x0: + model_mean, _, model_log_variance, x0 = outputs + else: + model_mean, _, model_log_variance = outputs + + noise = noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + + if return_codebook_ids: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) + if return_x0: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 + else: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, + img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., + score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, + log_every_t=None): + if not log_every_t: + log_every_t = self.log_every_t + timesteps = self.num_timesteps + if batch_size is not None: + b = batch_size if batch_size is not None else shape[0] + shape = [batch_size] + list(shape) + else: + b = batch_size = shape[0] + if x_T is None: + img = torch.randn(shape, device=self.device) + else: + img = x_T + intermediates = [] + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', + total=timesteps) if verbose else reversed( + range(0, timesteps)) + if type(temperature) == float: + temperature = [temperature] * timesteps + + for i in iterator: + ts = torch.full((b,), i, device=self.device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img, x0_partial = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, return_x0=True, + temperature=temperature[i], noise_dropout=noise_dropout, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if mask is not None: + assert x0 is not None + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(x0_partial) + if callback: callback(i) + if img_callback: img_callback(img, i) + return img, intermediates + + @torch.no_grad() + def p_sample_loop(self, cond, shape, return_intermediates=False, + x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, start_T=None, + log_every_t=None): + + if not log_every_t: + log_every_t = self.log_every_t + device = self.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + intermediates = [img] + if timesteps is None: + timesteps = self.num_timesteps + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( + range(0, timesteps)) + + if mask is not None: + assert x0 is not None + assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match + + for i in iterator: + ts = torch.full((b,), i, device=device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised) + if mask is not None: + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(img) + if callback: callback(i) + if img_callback: img_callback(img, i) + + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, + verbose=True, timesteps=None, quantize_denoised=False, + mask=None, x0=None, shape=None,**kwargs): + if shape is None: + shape = (batch_size, self.channels, self.image_size, self.image_size) + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + return self.p_sample_loop(cond, + shape, + return_intermediates=return_intermediates, x_T=x_T, + verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, + mask=mask, x0=x0) + + @torch.no_grad() + def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): + + if ddim: + ddim_sampler = DDIMSampler(self) + shape = (self.channels, self.image_size, self.image_size) + samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, + shape,cond,verbose=False,**kwargs) + + else: + samples, intermediates = self.sample(cond=cond, batch_size=batch_size, + return_intermediates=True,**kwargs) + + return samples, intermediates + + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, + plot_diffusion_rows=True, **kwargs): + + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=N) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) + log["conditioning"] = xc + elif self.cond_stage_key == 'class_label': + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( + self.first_stage_model, IdentityFirstStage): + # also display when quantizing x0 while sampling + with self.ema_scope("Plotting Quantized Denoised"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta, + quantize_denoised=True) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, + # quantize_denoised=True) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_x0_quantized"] = x_samples + + if inpaint: + # make a simple center square + b, h, w = z.shape[0], z.shape[2], z.shape[3] + mask = torch.ones(N, h, w).to(self.device) + # zeros will be filled in + mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. + mask = mask[:, None, ...] + with self.ema_scope("Plotting Inpaint"): + + samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_inpainting"] = x_samples + log["mask"] = mask + + # outpaint + with self.ema_scope("Plotting Outpaint"): + samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_outpainting"] = x_samples + + if plot_progressive_rows: + with self.ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising(c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N) + prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") + log["progressive_row"] = prog_row + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.cond_stage_trainable: + print(f"{self.__class__.__name__}: Also optimizing conditioner params!") + params = params + list(self.cond_stage_model.parameters()) + if self.learn_logvar: + print('Diffusion model optimizing logvar') + params.append(self.logvar) + opt = torch.optim.AdamW(params, lr=lr) + if self.use_scheduler: + assert 'target' in self.scheduler_config + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [opt], scheduler + return opt + + @torch.no_grad() + def to_rgb(self, x): + x = x.float() + if not hasattr(self, "colorize"): + self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) + x = nn.functional.conv2d(x, weight=self.colorize) + x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. + return x + + +class DiffusionWrapper(pl.LightningModule): + def __init__(self, diff_model_config, conditioning_key): + super().__init__() + self.diffusion_model = instantiate_from_config(diff_model_config) + self.conditioning_key = conditioning_key + assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm'] + + def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): + if self.conditioning_key is None: + out = self.diffusion_model(x, t) + elif self.conditioning_key == 'concat': + xc = torch.cat([x] + c_concat, dim=1) + out = self.diffusion_model(xc, t) + elif self.conditioning_key == 'crossattn': + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(x, t, context=cc) + elif self.conditioning_key == 'hybrid': + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc) + elif self.conditioning_key == 'adm': + cc = c_crossattn[0] + out = self.diffusion_model(x, t, y=cc) + else: + raise NotImplementedError() + + return out + + +class Layout2ImgDiffusion(LatentDiffusion): + # TODO: move all layout-specific hacks to this class + def __init__(self, cond_stage_key, *args, **kwargs): + assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' + super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) + + def log_images(self, batch, N=8, *args, **kwargs): + logs = super().log_images(batch=batch, N=N, *args, **kwargs) + + key = 'train' if self.training else 'validation' + dset = self.trainer.datamodule.datasets[key] + mapper = dset.conditional_builders[self.cond_stage_key] + + bbox_imgs = [] + map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) + for tknzd_bbox in batch[self.cond_stage_key][:N]: + bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) + bbox_imgs.append(bboximg) + + cond_img = torch.stack(bbox_imgs, dim=0) + logs['bbox_image'] = cond_img + return logs diff --git a/stable-diffusion/ldm/models/diffusion/dpm_solver/__init__.py b/stable-diffusion/ldm/models/diffusion/dpm_solver/__init__.py new file mode 100644 index 0000000..7427f38 --- /dev/null +++ b/stable-diffusion/ldm/models/diffusion/dpm_solver/__init__.py @@ -0,0 +1 @@ +from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/stable-diffusion/ldm/models/diffusion/dpm_solver/dpm_solver.py b/stable-diffusion/ldm/models/diffusion/dpm_solver/dpm_solver.py new file mode 100644 index 0000000..bdb64e0 --- /dev/null +++ b/stable-diffusion/ldm/models/diffusion/dpm_solver/dpm_solver.py @@ -0,0 +1,1184 @@ +import torch +import torch.nn.functional as F +import math + + +class NoiseScheduleVP: + def __init__( + self, + schedule='discrete', + betas=None, + alphas_cumprod=None, + continuous_beta_0=0.1, + continuous_beta_1=20., + ): + """Create a wrapper class for the forward SDE (VP type). + + *** + Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. + We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. + *** + + The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). + We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). + Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: + + log_alpha_t = self.marginal_log_mean_coeff(t) + sigma_t = self.marginal_std(t) + lambda_t = self.marginal_lambda(t) + + Moreover, as lambda(t) is an invertible function, we also support its inverse function: + + t = self.inverse_lambda(lambda_t) + + =============================================================== + + We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). + + 1. For discrete-time DPMs: + + For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: + t_i = (i + 1) / N + e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. + We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. + + Args: + betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) + alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) + + Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. + + **Important**: Please pay special attention for the args for `alphas_cumprod`: + The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that + q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). + Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have + alpha_{t_n} = \sqrt{\hat{alpha_n}}, + and + log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). + + + 2. For continuous-time DPMs: + + We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise + schedule are the default settings in DDPM and improved-DDPM: + + Args: + beta_min: A `float` number. The smallest beta for the linear schedule. + beta_max: A `float` number. The largest beta for the linear schedule. + cosine_s: A `float` number. The hyperparameter in the cosine schedule. + cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. + T: A `float` number. The ending time of the forward process. + + =============================================================== + + Args: + schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, + 'linear' or 'cosine' for continuous-time DPMs. + Returns: + A wrapper object of the forward SDE (VP type). + + =============================================================== + + Example: + + # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', betas=betas) + + # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) + + # For continuous-time DPMs (VPSDE), linear schedule: + >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) + + """ + + if schedule not in ['discrete', 'linear', 'cosine']: + raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule)) + + self.schedule = schedule + if schedule == 'discrete': + if betas is not None: + log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) + else: + assert alphas_cumprod is not None + log_alphas = 0.5 * torch.log(alphas_cumprod) + self.total_N = len(log_alphas) + self.T = 1. + self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) + self.log_alpha_array = log_alphas.reshape((1, -1,)) + else: + self.total_N = 1000 + self.beta_0 = continuous_beta_0 + self.beta_1 = continuous_beta_1 + self.cosine_s = 0.008 + self.cosine_beta_max = 999. + self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s + self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) + self.schedule = schedule + if schedule == 'cosine': + # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. + # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. + self.T = 0.9946 + else: + self.T = 1. + + def marginal_log_mean_coeff(self, t): + """ + Compute log(alpha_t) of a given continuous-time label t in [0, T]. + """ + if self.schedule == 'discrete': + return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1)) + elif self.schedule == 'linear': + return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 + elif self.schedule == 'cosine': + log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) + log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 + return log_alpha_t + + def marginal_alpha(self, t): + """ + Compute alpha_t of a given continuous-time label t in [0, T]. + """ + return torch.exp(self.marginal_log_mean_coeff(t)) + + def marginal_std(self, t): + """ + Compute sigma_t of a given continuous-time label t in [0, T]. + """ + return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) + + def marginal_lambda(self, t): + """ + Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. + """ + log_mean_coeff = self.marginal_log_mean_coeff(t) + log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) + return log_mean_coeff - log_std + + def inverse_lambda(self, lamb): + """ + Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. + """ + if self.schedule == 'linear': + tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + Delta = self.beta_0**2 + tmp + return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) + elif self.schedule == 'discrete': + log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) + t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1])) + return t.reshape((-1,)) + else: + log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s + t = t_fn(log_alpha) + return t + + +def model_wrapper( + model, + noise_schedule, + model_type="noise", + model_kwargs={}, + guidance_type="uncond", + condition=None, + unconditional_condition=None, + guidance_scale=1., + classifier_fn=None, + classifier_kwargs={}, +): + """Create a wrapper function for the noise prediction model. + + DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to + firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. + + We support four types of the diffusion model by setting `model_type`: + + 1. "noise": noise prediction model. (Trained by predicting noise). + + 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). + + 3. "v": velocity prediction model. (Trained by predicting the velocity). + The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. + + [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." + arXiv preprint arXiv:2202.00512 (2022). + [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." + arXiv preprint arXiv:2210.02303 (2022). + + 4. "score": marginal score function. (Trained by denoising score matching). + Note that the score function and the noise prediction model follows a simple relationship: + ``` + noise(x_t, t) = -sigma_t * score(x_t, t) + ``` + + We support three types of guided sampling by DPMs by setting `guidance_type`: + 1. "uncond": unconditional sampling by DPMs. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + + 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + + The input `classifier_fn` has the following format: + `` + classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) + `` + + [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," + in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. + + 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. + The input `model` has the following format: + `` + model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score + `` + And if cond == `unconditional_condition`, the model output is the unconditional DPM output. + + [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." + arXiv preprint arXiv:2207.12598 (2022). + + + The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) + or continuous-time labels (i.e. epsilon to T). + + We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: + `` + def model_fn(x, t_continuous) -> noise: + t_input = get_model_input_time(t_continuous) + return noise_pred(model, x, t_input, **model_kwargs) + `` + where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. + + =============================================================== + + Args: + model: A diffusion model with the corresponding format described above. + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + model_type: A `str`. The parameterization type of the diffusion model. + "noise" or "x_start" or "v" or "score". + model_kwargs: A `dict`. A dict for the other inputs of the model function. + guidance_type: A `str`. The type of the guidance for sampling. + "uncond" or "classifier" or "classifier-free". + condition: A pytorch tensor. The condition for the guided sampling. + Only used for "classifier" or "classifier-free" guidance type. + unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. + Only used for "classifier-free" guidance type. + guidance_scale: A `float`. The scale for the guided sampling. + classifier_fn: A classifier function. Only used for the classifier guidance. + classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. + Returns: + A noise prediction model that accepts the noised data and the continuous time as the inputs. + """ + + def get_model_input_time(t_continuous): + """ + Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. + For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. + For continuous-time DPMs, we just use `t_continuous`. + """ + if noise_schedule.schedule == 'discrete': + return (t_continuous - 1. / noise_schedule.total_N) * 1000. + else: + return t_continuous + + def noise_pred_fn(x, t_continuous, cond=None): + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + t_input = get_model_input_time(t_continuous) + if cond is None: + output = model(x, t_input, **model_kwargs) + else: + output = model(x, t_input, cond, **model_kwargs) + if model_type == "noise": + return output + elif model_type == "x_start": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) + elif model_type == "v": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x + elif model_type == "score": + sigma_t = noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return -expand_dims(sigma_t, dims) * output + + def cond_grad_fn(x, t_input): + """ + Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). + """ + with torch.enable_grad(): + x_in = x.detach().requires_grad_(True) + log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) + return torch.autograd.grad(log_prob.sum(), x_in)[0] + + def model_fn(x, t_continuous): + """ + The noise predicition model function that is used for DPM-Solver. + """ + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + if guidance_type == "uncond": + return noise_pred_fn(x, t_continuous) + elif guidance_type == "classifier": + assert classifier_fn is not None + t_input = get_model_input_time(t_continuous) + cond_grad = cond_grad_fn(x, t_input) + sigma_t = noise_schedule.marginal_std(t_continuous) + noise = noise_pred_fn(x, t_continuous) + return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad + elif guidance_type == "classifier-free": + if guidance_scale == 1. or unconditional_condition is None: + return noise_pred_fn(x, t_continuous, cond=condition) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t_continuous] * 2) + c_in = torch.cat([unconditional_condition, condition]) + noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) + return noise_uncond + guidance_scale * (noise - noise_uncond) + + assert model_type in ["noise", "x_start", "v"] + assert guidance_type in ["uncond", "classifier", "classifier-free"] + return model_fn + + +class DPM_Solver: + def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): + """Construct a DPM-Solver. + + We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). + If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). + If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). + In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. + The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. + + Args: + model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): + `` + def model_fn(x, t_continuous): + return noise + `` + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. + thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. + max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. + + [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. + """ + self.model = model_fn + self.noise_schedule = noise_schedule + self.predict_x0 = predict_x0 + self.thresholding = thresholding + self.max_val = max_val + + def noise_prediction_fn(self, x, t): + """ + Return the noise prediction model. + """ + return self.model(x, t) + + def data_prediction_fn(self, x, t): + """ + Return the data prediction model (with thresholding). + """ + noise = self.noise_prediction_fn(x, t) + dims = x.dim() + alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) + x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) + if self.thresholding: + p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. + s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) + s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) + x0 = torch.clamp(x0, -s, s) / s + return x0 + + def model_fn(self, x, t): + """ + Convert the model to the noise prediction model or the data prediction model. + """ + if self.predict_x0: + return self.data_prediction_fn(x, t) + else: + return self.noise_prediction_fn(x, t) + + def get_time_steps(self, skip_type, t_T, t_0, N, device): + """Compute the intermediate time steps for sampling. + + Args: + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + N: A `int`. The total number of the spacing of the time steps. + device: A torch device. + Returns: + A pytorch tensor of the time steps, with the shape (N + 1,). + """ + if skip_type == 'logSNR': + lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) + lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) + logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) + return self.noise_schedule.inverse_lambda(logSNR_steps) + elif skip_type == 'time_uniform': + return torch.linspace(t_T, t_0, N + 1).to(device) + elif skip_type == 'time_quadratic': + t_order = 2 + t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device) + return t + else: + raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) + + def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): + """ + Get the order of each step for sampling by the singlestep DPM-Solver. + + We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". + Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: + - If order == 1: + We take `steps` of DPM-Solver-1 (i.e. DDIM). + - If order == 2: + - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of DPM-Solver-2. + - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If order == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. + + ============================================ + Args: + order: A `int`. The max order for the solver (2 or 3). + steps: A `int`. The total number of function evaluations (NFE). + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + device: A torch device. + Returns: + orders: A list of the solver order of each step. + """ + if order == 3: + K = steps // 3 + 1 + if steps % 3 == 0: + orders = [3,] * (K - 2) + [2, 1] + elif steps % 3 == 1: + orders = [3,] * (K - 1) + [1] + else: + orders = [3,] * (K - 1) + [2] + elif order == 2: + if steps % 2 == 0: + K = steps // 2 + orders = [2,] * K + else: + K = steps // 2 + 1 + orders = [2,] * (K - 1) + [1] + elif order == 1: + K = 1 + orders = [1,] * steps + else: + raise ValueError("'order' must be '1' or '2' or '3'.") + if skip_type == 'logSNR': + # To reproduce the results in DPM-Solver paper + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) + else: + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders)).to(device)] + return timesteps_outer, orders + + def denoise_to_zero_fn(self, x, s): + """ + Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. + """ + return self.data_prediction_fn(x, s) + + def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): + """ + DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + if self.predict_x0: + phi_1 = torch.expm1(-h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + else: + phi_1 = torch.expm1(h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + + def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-2 from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the second-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 0.5 + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + s1 = ns.inverse_lambda(lambda_s1) + log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) + alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_1 = torch.expm1(-h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (model_s1 - model_s) + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_1 = torch.expm1(h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) + ) + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1} + else: + return x_t + + def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-3 from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). + If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 1. / 3. + if r2 is None: + r2 = 2. / 3. + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + lambda_s2 = lambda_s + r2 * h + s1 = ns.inverse_lambda(lambda_s1) + s2 = ns.inverse_lambda(lambda_s2) + log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t) + alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_12 = torch.expm1(-r2 * h) + phi_1 = torch.expm1(-h) + phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. + phi_2 = phi_1 / h + 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(sigma_s2 / sigma_s, dims) * x + - expand_dims(alpha_s2 * phi_12, dims) * model_s + + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + expand_dims(alpha_t * phi_2, dims) * D1 + - expand_dims(alpha_t * phi_3, dims) * D2 + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_12 = torch.expm1(r2 * h) + phi_1 = torch.expm1(h) + phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. + phi_2 = phi_1 / h - 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x + - expand_dims(sigma_s2 * phi_12, dims) * model_s + - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - expand_dims(sigma_t * phi_2, dims) * D1 + - expand_dims(sigma_t * phi_3, dims) * D2 + ) + + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} + else: + return x_t + + def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): + """ + Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + ns = self.noise_schedule + dims = x.dim() + model_prev_1, model_prev_0 = model_prev_list + t_prev_1, t_prev_0 = t_prev_list + lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0 = h_0 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + if self.predict_x0: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 + ) + else: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 + ) + return x_t + + def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): + """ + Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + model_prev_2, model_prev_1, model_prev_0 = model_prev_list + t_prev_2, t_prev_1, t_prev_0 = t_prev_list + lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_1 = lambda_prev_1 - lambda_prev_2 + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0, r1 = h_0 / h, h_1 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) + D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) + D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) + if self.predict_x0: + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 + - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h**2 - 0.5), dims) * D2 + ) + else: + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 + - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h**2 - 0.5), dims) * D2 + ) + return x_t + + def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None): + """ + Singlestep DPM-Solver with the order `order` from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + r1: A `float`. The hyperparameter of the second-order or third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) + elif order == 2: + return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1) + elif order == 3: + return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): + """ + Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) + elif order == 2: + return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + elif order == 3: + return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpm_solver'): + """ + The adaptive step size solver based on singlestep DPM-Solver. + + Args: + x: A pytorch tensor. The initial value at time `t_T`. + order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + h_init: A `float`. The initial step size (for logSNR). + atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. + rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. + theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. + t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the + current time and `t_0` is less than `t_err`. The default setting is 1e-5. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_0: A pytorch tensor. The approximated solution at time `t_0`. + + [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. + """ + ns = self.noise_schedule + s = t_T * torch.ones((x.shape[0],)).to(x) + lambda_s = ns.marginal_lambda(s) + lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) + h = h_init * torch.ones_like(s).to(x) + x_prev = x + nfe = 0 + if order == 2: + r1 = 0.5 + lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs) + elif order == 3: + r1, r2 = 1. / 3., 2. / 3. + lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs) + else: + raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) + while torch.abs((s - t_0)).mean() > t_err: + t = ns.inverse_lambda(lambda_s + h) + x_lower, lower_noise_kwargs = lower_update(x, s, t) + x_higher = higher_update(x, s, t, **lower_noise_kwargs) + delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) + norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) + E = norm_fn((x_higher - x_lower) / delta).max() + if torch.all(E <= 1.): + x = x_higher + s = t + x_prev = x_lower + lambda_s = ns.marginal_lambda(s) + h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) + nfe += order + print('adaptive solver nfe', nfe) + return x + + def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', + method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', + atol=0.0078, rtol=0.05, + ): + """ + Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. + + ===================================================== + + We support the following algorithms for both noise prediction model and data prediction model: + - 'singlestep': + Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. + We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). + The total number of function evaluations (NFE) == `steps`. + Given a fixed NFE == `steps`, the sampling procedure is: + - If `order` == 1: + - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. + - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If `order` == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. + - 'multistep': + Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. + We initialize the first `order` values by lower order multistep solvers. + Given a fixed NFE == `steps`, the sampling procedure is: + Denote K = steps. + - If `order` == 1: + - We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. + - If `order` == 3: + - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. + - 'singlestep_fixed': + Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). + We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. + - 'adaptive': + Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). + We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. + You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs + (NFE) and the sample quality. + - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. + - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. + + ===================================================== + + Some advices for choosing the algorithm: + - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: + Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, + skip_type='time_uniform', method='singlestep') + - For **guided sampling with large guidance scale** by DPMs: + Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, + skip_type='time_uniform', method='multistep') + + We support three types of `skip_type`: + - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** + - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. + - 'time_quadratic': quadratic time for the time steps. + + ===================================================== + Args: + x: A pytorch tensor. The initial value at time `t_start` + e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. + steps: A `int`. The total number of function evaluations (NFE). + t_start: A `float`. The starting time of the sampling. + If `T` is None, we use self.noise_schedule.T (default is 1.0). + t_end: A `float`. The ending time of the sampling. + If `t_end` is None, we use 1. / self.noise_schedule.total_N. + e.g. if total_N == 1000, we have `t_end` == 1e-3. + For discrete-time DPMs: + - We recommend `t_end` == 1. / self.noise_schedule.total_N. + For continuous-time DPMs: + - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. + order: A `int`. The order of DPM-Solver. + skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. + method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. + denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. + Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). + + This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and + score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID + for diffusion models sampling by diffusion SDEs for low-resolutional images + (such as CIFAR-10). However, we observed that such trick does not matter for + high-resolutional images. As it needs an additional NFE, we do not recommend + it for high-resolutional images. + lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. + Only valid for `method=multistep` and `steps < 15`. We empirically find that + this trick is a key to stabilizing the sampling by DPM-Solver with very few steps + (especially for steps <= 10). So we recommend to set it to be `True`. + solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. + atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + Returns: + x_end: A pytorch tensor. The approximated solution at time `t_end`. + + """ + t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end + t_T = self.noise_schedule.T if t_start is None else t_start + device = x.device + if method == 'adaptive': + with torch.no_grad(): + x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type) + elif method == 'multistep': + assert steps >= order + timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) + assert timesteps.shape[0] - 1 == steps + with torch.no_grad(): + vec_t = timesteps[0].expand((x.shape[0])) + model_prev_list = [self.model_fn(x, vec_t)] + t_prev_list = [vec_t] + # Init the first `order` values by lower order multistep DPM-Solver. + for init_order in range(1, order): + vec_t = timesteps[init_order].expand(x.shape[0]) + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type) + model_prev_list.append(self.model_fn(x, vec_t)) + t_prev_list.append(vec_t) + # Compute the remaining values by `order`-th order multistep DPM-Solver. + for step in range(order, steps + 1): + vec_t = timesteps[step].expand(x.shape[0]) + if lower_order_final and steps < 15: + step_order = min(order, steps + 1 - step) + else: + step_order = order + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type) + for i in range(order - 1): + t_prev_list[i] = t_prev_list[i + 1] + model_prev_list[i] = model_prev_list[i + 1] + t_prev_list[-1] = vec_t + # We do not need to evaluate the final model value. + if step < steps: + model_prev_list[-1] = self.model_fn(x, vec_t) + elif method in ['singlestep', 'singlestep_fixed']: + if method == 'singlestep': + timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device) + elif method == 'singlestep_fixed': + K = steps // order + orders = [order,] * K + timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) + for i, order in enumerate(orders): + t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] + timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device) + lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) + vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) + h = lambda_inner[-1] - lambda_inner[0] + r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h + r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h + x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) + if denoise_to_zero: + x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) + return x + + + +############################################################# +# other utility functions +############################################################# + +def interpolate_fn(x, xp, yp): + """ + A piecewise linear function y = f(x), using xp and yp as keypoints. + We implement f(x) in a differentiable way (i.e. applicable for autograd). + The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) + + Args: + x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). + xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. + yp: PyTorch tensor with shape [C, K]. + Returns: + The function values f(x), with shape [N, C]. + """ + N, K = x.shape[0], xp.shape[1] + all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) + sorted_all_x, x_indices = torch.sort(all_x, dim=2) + x_idx = torch.argmin(x_indices, dim=2) + cand_start_idx = x_idx - 1 + start_idx = torch.where( + torch.eq(x_idx, 0), + torch.tensor(1, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) + start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) + end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) + start_idx2 = torch.where( + torch.eq(x_idx, 0), + torch.tensor(0, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) + start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) + end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) + cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) + return cand + + +def expand_dims(v, dims): + """ + Expand the tensor `v` to the dim `dims`. + + Args: + `v`: a PyTorch tensor with shape [N]. + `dim`: a `int`. + Returns: + a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. + """ + return v[(...,) + (None,)*(dims - 1)] \ No newline at end of file diff --git a/stable-diffusion/ldm/models/diffusion/dpm_solver/sampler.py b/stable-diffusion/ldm/models/diffusion/dpm_solver/sampler.py new file mode 100644 index 0000000..2c42d6f --- /dev/null +++ b/stable-diffusion/ldm/models/diffusion/dpm_solver/sampler.py @@ -0,0 +1,82 @@ +"""SAMPLING ONLY.""" + +import torch + +from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver + + +class DPMSolverSampler(object): + def __init__(self, model, **kwargs): + super().__init__() + self.model = model + to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) + self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + + # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') + + device = self.model.betas.device + if x_T is None: + img = torch.randn(size, device=device) + else: + img = x_T + + ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) + + model_fn = model_wrapper( + lambda x, t, c: self.model.apply_model(x, t, c), + ns, + model_type="noise", + guidance_type="classifier-free", + condition=conditioning, + unconditional_condition=unconditional_conditioning, + guidance_scale=unconditional_guidance_scale, + ) + + dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) + x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) + + return x.to(device), None diff --git a/stable-diffusion/ldm/models/diffusion/plms.py b/stable-diffusion/ldm/models/diffusion/plms.py new file mode 100644 index 0000000..78eeb10 --- /dev/null +++ b/stable-diffusion/ldm/models/diffusion/plms.py @@ -0,0 +1,236 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like + + +class PLMSSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + if ddim_eta != 0: + raise ValueError('ddim_eta must be 0 for PLMS') + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for PLMS sampling is {size}') + + samples, intermediates = self.plms_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + return samples, intermediates + + @torch.no_grad() + def plms_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None,): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running PLMS Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) + old_eps = [] + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, t_next=ts_next) + img, pred_x0, e_t = outs + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + return e_t + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = get_model_output(x_prev, t_next) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + return x_prev, pred_x0, e_t diff --git a/stable-diffusion/ldm/modules/attention.py b/stable-diffusion/ldm/modules/attention.py new file mode 100644 index 0000000..f4eff39 --- /dev/null +++ b/stable-diffusion/ldm/modules/attention.py @@ -0,0 +1,261 @@ +from inspect import isfunction +import math +import torch +import torch.nn.functional as F +from torch import nn, einsum +from einops import rearrange, repeat + +from ldm.modules.diffusionmodules.util import checkpoint + + +def exists(val): + return val is not None + + +def uniq(arr): + return{el: True for el in arr}.keys() + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def max_neg_value(t): + return -torch.finfo(t.dtype).max + + +def init_(tensor): + dim = tensor.shape[-1] + std = 1 / math.sqrt(dim) + tensor.uniform_(-std, std) + return tensor + + +# feedforward +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = nn.Sequential( + nn.Linear(dim, inner_dim), + nn.GELU() + ) if not glu else GEGLU(dim, inner_dim) + + self.net = nn.Sequential( + project_in, + nn.Dropout(dropout), + nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def Normalize(in_channels): + return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) + + +class LinearAttention(nn.Module): + def __init__(self, dim, heads=4, dim_head=32): + super().__init__() + self.heads = heads + hidden_dim = dim_head * heads + self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) + self.to_out = nn.Conv2d(hidden_dim, dim, 1) + + def forward(self, x): + b, c, h, w = x.shape + qkv = self.to_qkv(x) + q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) + k = k.softmax(dim=-1) + context = torch.einsum('bhdn,bhen->bhde', k, v) + out = torch.einsum('bhde,bhdn->bhen', context, q) + out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) + return self.to_out(out) + + +class SpatialSelfAttention(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b,c,h,w = q.shape + q = rearrange(q, 'b c h w -> b (h w) c') + k = rearrange(k, 'b c h w -> b c (h w)') + w_ = torch.einsum('bij,bjk->bik', q, k) + + w_ = w_ * (int(c)**(-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = rearrange(v, 'b c h w -> b c (h w)') + w_ = rearrange(w_, 'b i j -> b j i') + h_ = torch.einsum('bij,bjk->bik', v, w_) + h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) + h_ = self.proj_out(h_) + + return x+h_ + + +class CrossAttention(nn.Module): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): + super().__init__() + inner_dim = dim_head * heads + context_dim = default(context_dim, query_dim) + + self.scale = dim_head ** -0.5 + self.heads = heads + + self.to_q = nn.Linear(query_dim, inner_dim, bias=False) + self.to_k = nn.Linear(context_dim, inner_dim, bias=False) + self.to_v = nn.Linear(context_dim, inner_dim, bias=False) + + self.to_out = nn.Sequential( + nn.Linear(inner_dim, query_dim), + nn.Dropout(dropout) + ) + + def forward(self, x, context=None, mask=None): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + + if exists(mask): + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + attn = sim.softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', attn, v) + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + return self.to_out(out) + + +class BasicTransformerBlock(nn.Module): + def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True): + super().__init__() + self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) + self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, + heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + self.norm3 = nn.LayerNorm(dim) + self.checkpoint = checkpoint + + def forward(self, x, context=None): + return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) + + def _forward(self, x, context=None): + x = self.attn1(self.norm1(x)) + x + x = self.attn2(self.norm2(x), context=context) + x + x = self.ff(self.norm3(x)) + x + return x + + +class SpatialTransformer(nn.Module): + """ + Transformer block for image-like data. + First, project the input (aka embedding) + and reshape to b, t, d. + Then apply standard transformer action. + Finally, reshape to image + """ + def __init__(self, in_channels, n_heads, d_head, + depth=1, dropout=0., context_dim=None): + super().__init__() + self.in_channels = in_channels + inner_dim = n_heads * d_head + self.norm = Normalize(in_channels) + + self.proj_in = nn.Conv2d(in_channels, + inner_dim, + kernel_size=1, + stride=1, + padding=0) + + self.transformer_blocks = nn.ModuleList( + [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) + for d in range(depth)] + ) + + self.proj_out = zero_module(nn.Conv2d(inner_dim, + in_channels, + kernel_size=1, + stride=1, + padding=0)) + + def forward(self, x, context=None): + # note: if no context is given, cross-attention defaults to self-attention + b, c, h, w = x.shape + x_in = x + x = self.norm(x) + x = self.proj_in(x) + x = rearrange(x, 'b c h w -> b (h w) c') + for block in self.transformer_blocks: + x = block(x, context=context) + x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) + x = self.proj_out(x) + return x + x_in \ No newline at end of file diff --git a/stable-diffusion/ldm/modules/diffusionmodules/__init__.py b/stable-diffusion/ldm/modules/diffusionmodules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stable-diffusion/ldm/modules/diffusionmodules/model.py b/stable-diffusion/ldm/modules/diffusionmodules/model.py new file mode 100644 index 0000000..533e589 --- /dev/null +++ b/stable-diffusion/ldm/modules/diffusionmodules/model.py @@ -0,0 +1,835 @@ +# pytorch_diffusion + derived encoder decoder +import math +import torch +import torch.nn as nn +import numpy as np +from einops import rearrange + +from ldm.util import instantiate_from_config +from ldm.modules.attention import LinearAttention + + +def get_timestep_embedding(timesteps, embedding_dim): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: + From Fairseq. + Build sinusoidal embeddings. + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + assert len(timesteps.shape) == 1 + + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) + emb = emb.to(device=timesteps.device) + emb = timesteps.float()[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0,1,0,0)) + return emb + + +def nonlinearity(x): + # swish + return x*torch.sigmoid(x) + + +def Normalize(in_channels, num_groups=32): + return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) + + +class Upsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=2, + padding=0) + + def forward(self, x): + if self.with_conv: + pad = (0,1,0,1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + else: + x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) + return x + + +class ResnetBlock(nn.Module): + def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, + dropout, temb_channels=512): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + + self.norm1 = Normalize(in_channels) + self.conv1 = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + if temb_channels > 0: + self.temb_proj = torch.nn.Linear(temb_channels, + out_channels) + self.norm2 = Normalize(out_channels) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d(out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + self.conv_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + else: + self.nin_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x, temb): + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + if temb is not None: + h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return x+h + + +class LinAttnBlock(LinearAttention): + """to match AttnBlock usage""" + def __init__(self, in_channels): + super().__init__(dim=in_channels, heads=1, dim_head=in_channels) + + +class AttnBlock(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b,c,h,w = q.shape + q = q.reshape(b,c,h*w) + q = q.permute(0,2,1) # b,hw,c + k = k.reshape(b,c,h*w) # b,c,hw + w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] + w_ = w_ * (int(c)**(-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = v.reshape(b,c,h*w) + w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) + h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] + h_ = h_.reshape(b,c,h,w) + + h_ = self.proj_out(h_) + + return x+h_ + + +def make_attn(in_channels, attn_type="vanilla"): + assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' + print(f"making attention of type '{attn_type}' with {in_channels} in_channels") + if attn_type == "vanilla": + return AttnBlock(in_channels) + elif attn_type == "none": + return nn.Identity(in_channels) + else: + return LinAttnBlock(in_channels) + + +class Model(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = self.ch*4 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + self.use_timestep = use_timestep + if self.use_timestep: + # timestep embedding + self.temb = nn.Module() + self.temb.dense = nn.ModuleList([ + torch.nn.Linear(self.ch, + self.temb_ch), + torch.nn.Linear(self.temb_ch, + self.temb_ch), + ]) + + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) + + curr_res = resolution + in_ch_mult = (1,)+tuple(ch_mult) + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions-1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch*ch_mult[i_level] + skip_in = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): + if i_block == self.num_res_blocks: + skip_in = ch*in_ch_mult[i_level] + block.append(ResnetBlock(in_channels=block_in+skip_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x, t=None, context=None): + #assert x.shape[2] == x.shape[3] == self.resolution + if context is not None: + # assume aligned context, cat along channel axis + x = torch.cat((x, context), dim=1) + if self.use_timestep: + # timestep embedding + assert t is not None + temb = get_timestep_embedding(t, self.ch) + temb = self.temb.dense[0](temb) + temb = nonlinearity(temb) + temb = self.temb.dense[1](temb) + else: + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions-1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks+1): + h = self.up[i_level].block[i_block]( + torch.cat([h, hs.pop()], dim=1), temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + def get_last_layer(self): + return self.conv_out.weight + + +class Encoder(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", + **ignore_kwargs): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) + + curr_res = resolution + in_ch_mult = (1,)+tuple(ch_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions-1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + 2*z_channels if double_z else z_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + # timestep embedding + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions-1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class Decoder(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, + attn_type="vanilla", **ignorekwargs): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.give_pre_end = give_pre_end + self.tanh_out = tanh_out + + # compute in_ch_mult, block_in and curr_res at lowest res + in_ch_mult = (1,)+tuple(ch_mult) + block_in = ch*ch_mult[self.num_resolutions-1] + curr_res = resolution // 2**(self.num_resolutions-1) + self.z_shape = (1,z_channels,curr_res,curr_res) + print("Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape))) + + # z to block_in + self.conv_in = torch.nn.Conv2d(z_channels, + block_in, + kernel_size=3, + stride=1, + padding=1) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, z): + #assert z.shape[1:] == self.z_shape[1:] + self.last_z_shape = z.shape + + # timestep embedding + temb = None + + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks+1): + h = self.up[i_level].block[i_block](h, temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + if self.give_pre_end: + return h + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + if self.tanh_out: + h = torch.tanh(h) + return h + + +class SimpleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, *args, **kwargs): + super().__init__() + self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), + ResnetBlock(in_channels=in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=2 * in_channels, + out_channels=4 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=4 * in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + nn.Conv2d(2*in_channels, in_channels, 1), + Upsample(in_channels, with_conv=True)]) + # end + self.norm_out = Normalize(in_channels) + self.conv_out = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + for i, layer in enumerate(self.model): + if i in [1,2,3]: + x = layer(x, None) + else: + x = layer(x) + + h = self.norm_out(x) + h = nonlinearity(h) + x = self.conv_out(h) + return x + + +class UpsampleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, + ch_mult=(2,2), dropout=0.0): + super().__init__() + # upsampling + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + block_in = in_channels + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.res_blocks = nn.ModuleList() + self.upsample_blocks = nn.ModuleList() + for i_level in range(self.num_resolutions): + res_block = [] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + res_block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + self.res_blocks.append(nn.ModuleList(res_block)) + if i_level != self.num_resolutions - 1: + self.upsample_blocks.append(Upsample(block_in, True)) + curr_res = curr_res * 2 + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + # upsampling + h = x + for k, i_level in enumerate(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.res_blocks[i_level][i_block](h, None) + if i_level != self.num_resolutions - 1: + h = self.upsample_blocks[k](h) + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class LatentRescaler(nn.Module): + def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): + super().__init__() + # residual block, interpolate, residual block + self.factor = factor + self.conv_in = nn.Conv2d(in_channels, + mid_channels, + kernel_size=3, + stride=1, + padding=1) + self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) + self.attn = AttnBlock(mid_channels) + self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) + + self.conv_out = nn.Conv2d(mid_channels, + out_channels, + kernel_size=1, + ) + + def forward(self, x): + x = self.conv_in(x) + for block in self.res_block1: + x = block(x, None) + x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) + x = self.attn(x) + for block in self.res_block2: + x = block(x, None) + x = self.conv_out(x) + return x + + +class MergedRescaleEncoder(nn.Module): + def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, + ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): + super().__init__() + intermediate_chn = ch * ch_mult[-1] + self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, + z_channels=intermediate_chn, double_z=False, resolution=resolution, + attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, + out_ch=None) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, + mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) + + def forward(self, x): + x = self.encoder(x) + x = self.rescaler(x) + return x + + +class MergedRescaleDecoder(nn.Module): + def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), + dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): + super().__init__() + tmp_chn = z_channels*ch_mult[-1] + self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, + resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, + ch_mult=ch_mult, resolution=resolution, ch=ch) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, + out_channels=tmp_chn, depth=rescale_module_depth) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Upsampler(nn.Module): + def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): + super().__init__() + assert out_size >= in_size + num_blocks = int(np.log2(out_size//in_size))+1 + factor_up = 1.+ (out_size % in_size) + print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") + self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, + out_channels=in_channels) + self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, + attn_resolutions=[], in_channels=None, ch=in_channels, + ch_mult=[ch_mult for _ in range(num_blocks)]) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Resize(nn.Module): + def __init__(self, in_channels=None, learned=False, mode="bilinear"): + super().__init__() + self.with_conv = learned + self.mode = mode + if self.with_conv: + print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") + raise NotImplementedError() + assert in_channels is not None + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=4, + stride=2, + padding=1) + + def forward(self, x, scale_factor=1.0): + if scale_factor==1.0: + return x + else: + x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) + return x + +class FirstStagePostProcessor(nn.Module): + + def __init__(self, ch_mult:list, in_channels, + pretrained_model:nn.Module=None, + reshape=False, + n_channels=None, + dropout=0., + pretrained_config=None): + super().__init__() + if pretrained_config is None: + assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + self.pretrained_model = pretrained_model + else: + assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + self.instantiate_pretrained(pretrained_config) + + self.do_reshape = reshape + + if n_channels is None: + n_channels = self.pretrained_model.encoder.ch + + self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) + self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, + stride=1,padding=1) + + blocks = [] + downs = [] + ch_in = n_channels + for m in ch_mult: + blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) + ch_in = m * n_channels + downs.append(Downsample(ch_in, with_conv=False)) + + self.model = nn.ModuleList(blocks) + self.downsampler = nn.ModuleList(downs) + + + def instantiate_pretrained(self, config): + model = instantiate_from_config(config) + self.pretrained_model = model.eval() + # self.pretrained_model.train = False + for param in self.pretrained_model.parameters(): + param.requires_grad = False + + + @torch.no_grad() + def encode_with_pretrained(self,x): + c = self.pretrained_model.encode(x) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + return c + + def forward(self,x): + z_fs = self.encode_with_pretrained(x) + z = self.proj_norm(z_fs) + z = self.proj(z) + z = nonlinearity(z) + + for submodel, downmodel in zip(self.model,self.downsampler): + z = submodel(z,temb=None) + z = downmodel(z) + + if self.do_reshape: + z = rearrange(z,'b c h w -> b (h w) c') + return z + diff --git a/stable-diffusion/ldm/modules/diffusionmodules/openaimodel.py b/stable-diffusion/ldm/modules/diffusionmodules/openaimodel.py new file mode 100644 index 0000000..fcf95d1 --- /dev/null +++ b/stable-diffusion/ldm/modules/diffusionmodules/openaimodel.py @@ -0,0 +1,961 @@ +from abc import abstractmethod +from functools import partial +import math +from typing import Iterable + +import numpy as np +import torch as th +import torch.nn as nn +import torch.nn.functional as F + +from ldm.modules.diffusionmodules.util import ( + checkpoint, + conv_nd, + linear, + avg_pool_nd, + zero_module, + normalization, + timestep_embedding, +) +from ldm.modules.attention import SpatialTransformer + + +# dummy replace +def convert_module_to_f16(x): + pass + +def convert_module_to_f32(x): + pass + + +## go +class AttentionPool2d(nn.Module): + """ + Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py + """ + + def __init__( + self, + spacial_dim: int, + embed_dim: int, + num_heads_channels: int, + output_dim: int = None, + ): + super().__init__() + self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) + self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) + self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) + self.num_heads = embed_dim // num_heads_channels + self.attention = QKVAttention(self.num_heads) + + def forward(self, x): + b, c, *_spatial = x.shape + x = x.reshape(b, c, -1) # NC(HW) + x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) + x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) + x = self.qkv_proj(x) + x = self.attention(x) + x = self.c_proj(x) + return x[:, :, 0] + + +class TimestepBlock(nn.Module): + """ + Any module where forward() takes timestep embeddings as a second argument. + """ + + @abstractmethod + def forward(self, x, emb): + """ + Apply the module to `x` given `emb` timestep embeddings. + """ + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward(self, x, emb, context=None): + for layer in self: + if isinstance(layer, TimestepBlock): + x = layer(x, emb) + elif isinstance(layer, SpatialTransformer): + x = layer(x, context) + else: + x = layer(x) + return x + + +class Upsample(nn.Module): + """ + An upsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + if use_conv: + self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.dims == 3: + x = F.interpolate( + x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" + ) + else: + x = F.interpolate(x, scale_factor=2, mode="nearest") + if self.use_conv: + x = self.conv(x) + return x + +class TransposedUpsample(nn.Module): + 'Learned 2x upsampling without padding' + def __init__(self, channels, out_channels=None, ks=5): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + + self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) + + def forward(self,x): + return self.up(x) + + +class Downsample(nn.Module): + """ + A downsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else (1, 2, 2) + if use_conv: + self.op = conv_nd( + dims, self.channels, self.out_channels, 3, stride=stride, padding=padding + ) + else: + assert self.channels == self.out_channels + self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.op(x) + + +class ResBlock(TimestepBlock): + """ + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + + self.in_layers = nn.Sequential( + normalization(channels), + nn.SiLU(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + ), + ) + self.out_layers = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, 3, padding=1 + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + return checkpoint( + self._forward, (x, emb), self.parameters(), self.use_checkpoint + ) + + + def _forward(self, x, emb): + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + h = out_norm(h) * (1 + scale) + shift + h = out_rest(h) + else: + h = h + emb_out + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class AttentionBlock(nn.Module): + """ + An attention block that allows spatial positions to attend to each other. + Originally ported from here, but adapted to the N-d case. + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. + """ + + def __init__( + self, + channels, + num_heads=1, + num_head_channels=-1, + use_checkpoint=False, + use_new_attention_order=False, + ): + super().__init__() + self.channels = channels + if num_head_channels == -1: + self.num_heads = num_heads + else: + assert ( + channels % num_head_channels == 0 + ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" + self.num_heads = channels // num_head_channels + self.use_checkpoint = use_checkpoint + self.norm = normalization(channels) + self.qkv = conv_nd(1, channels, channels * 3, 1) + if use_new_attention_order: + # split qkv before split heads + self.attention = QKVAttention(self.num_heads) + else: + # split heads before split qkv + self.attention = QKVAttentionLegacy(self.num_heads) + + self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) + + def forward(self, x): + return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! + #return pt_checkpoint(self._forward, x) # pytorch + + def _forward(self, x): + b, c, *spatial = x.shape + x = x.reshape(b, c, -1) + qkv = self.qkv(self.norm(x)) + h = self.attention(qkv) + h = self.proj_out(h) + return (x + h).reshape(b, c, *spatial) + + +def count_flops_attn(model, _x, y): + """ + A counter for the `thop` package to count the operations in an + attention operation. + Meant to be used like: + macs, params = thop.profile( + model, + inputs=(inputs, timestamps), + custom_ops={QKVAttention: QKVAttention.count_flops}, + ) + """ + b, c, *spatial = y[0].shape + num_spatial = int(np.prod(spatial)) + # We perform two matmuls with the same number of ops. + # The first computes the weight matrix, the second computes + # the combination of the value vectors. + matmul_ops = 2 * b * (num_spatial ** 2) * c + model.total_ops += th.DoubleTensor([matmul_ops]) + + +class QKVAttentionLegacy(nn.Module): + """ + A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", q * scale, k * scale + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class QKVAttention(nn.Module): + """ + A module which performs QKV attention and splits in a different order. + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.chunk(3, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", + (q * scale).view(bs * self.n_heads, ch, length), + (k * scale).view(bs * self.n_heads, ch, length), + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class UNetModel(nn.Module): + """ + The full UNet model with attention and timestep embedding. + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param num_res_blocks: number of residual blocks per downsample. + :param attention_resolutions: a collection of downsample rates at which + attention will take place. May be a set, list, or tuple. + For example, if this contains 4, then at 4x downsampling, attention + will be used. + :param dropout: the dropout probability. + :param channel_mult: channel multiplier for each level of the UNet. + :param conv_resample: if True, use learned convolutions for upsampling and + downsampling. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param num_classes: if specified (as an int), then this model will be + class-conditional with `num_classes` classes. + :param use_checkpoint: use gradient checkpointing to reduce memory usage. + :param num_heads: the number of attention heads in each attention layer. + :param num_heads_channels: if specified, ignore num_heads and instead use + a fixed channel width per attention head. + :param num_heads_upsample: works with num_heads to set a different number + of heads for upsampling. Deprecated. + :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. + :param resblock_updown: use residual blocks for up/downsampling. + :param use_new_attention_order: use a different attention pattern for potentially + increased efficiency. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + num_classes=None, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + use_spatial_transformer=False, # custom transformer support + transformer_depth=1, # custom transformer support + context_dim=None, # custom transformer support + n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model + legacy=True, + ): + super().__init__() + if use_spatial_transformer: + assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' + + if context_dim is not None: + assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' + from omegaconf.listconfig import ListConfig + if type(context_dim) == ListConfig: + context_dim = list(context_dim) + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' + + if num_head_channels == -1: + assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' + + self.image_size = image_size + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.num_classes = num_classes + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.predict_codebook_ids = n_embed is not None + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + if self.num_classes is not None: + self.label_emb = nn.Embedding(num_classes, time_embed_dim) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + + self.output_blocks = nn.ModuleList([]) + for level, mult in list(enumerate(channel_mult))[::-1]: + for i in range(num_res_blocks + 1): + ich = input_block_chans.pop() + layers = [ + ResBlock( + ch + ich, + time_embed_dim, + dropout, + out_channels=model_channels * mult, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = model_channels * mult + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads_upsample, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ) + ) + if level and i == num_res_blocks: + out_ch = ch + layers.append( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + up=True, + ) + if resblock_updown + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) + ) + ds //= 2 + self.output_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), + ) + if self.predict_codebook_ids: + self.id_predictor = nn.Sequential( + normalization(ch), + conv_nd(dims, model_channels, n_embed, 1), + #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits + ) + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + self.output_blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + self.output_blocks.apply(convert_module_to_f32) + + def forward(self, x, timesteps=None, context=None, y=None,**kwargs): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param context: conditioning plugged in via crossattn + :param y: an [N] Tensor of labels, if class-conditional. + :return: an [N x C x ...] Tensor of outputs. + """ + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape == (x.shape[0],) + emb = emb + self.label_emb(y) + + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb, context) + h = h.type(x.dtype) + if self.predict_codebook_ids: + return self.id_predictor(h) + else: + return self.out(h) + + +class EncoderUNetModel(nn.Module): + """ + The half UNet model with attention and timestep embedding. + For usage, see UNet. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + use_checkpoint=False, + use_fp16=False, + num_heads=1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + pool="adaptive", + *args, + **kwargs + ): + super().__init__() + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + self.pool = pool + if pool == "adaptive": + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + nn.AdaptiveAvgPool2d((1, 1)), + zero_module(conv_nd(dims, ch, out_channels, 1)), + nn.Flatten(), + ) + elif pool == "attention": + assert num_head_channels != -1 + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + AttentionPool2d( + (image_size // ds), ch, num_head_channels, out_channels + ), + ) + elif pool == "spatial": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + nn.ReLU(), + nn.Linear(2048, self.out_channels), + ) + elif pool == "spatial_v2": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + normalization(2048), + nn.SiLU(), + nn.Linear(2048, self.out_channels), + ) + else: + raise NotImplementedError(f"Unexpected {pool} pooling") + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + + def forward(self, x, timesteps): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :return: an [N x K] Tensor of outputs. + """ + emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) + + results = [] + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = self.middle_block(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = th.cat(results, axis=-1) + return self.out(h) + else: + h = h.type(x.dtype) + return self.out(h) + diff --git a/stable-diffusion/ldm/modules/diffusionmodules/util.py b/stable-diffusion/ldm/modules/diffusionmodules/util.py new file mode 100644 index 0000000..a952e6c --- /dev/null +++ b/stable-diffusion/ldm/modules/diffusionmodules/util.py @@ -0,0 +1,267 @@ +# adopted from +# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# and +# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +# and +# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py +# +# thanks! + + +import os +import math +import torch +import torch.nn as nn +import numpy as np +from einops import repeat + +from ldm.util import instantiate_from_config + + +def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if schedule == "linear": + betas = ( + torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 + ) + + elif schedule == "cosine": + timesteps = ( + torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s + ) + alphas = timesteps / (1 + cosine_s) * np.pi / 2 + alphas = torch.cos(alphas).pow(2) + alphas = alphas / alphas[0] + betas = 1 - alphas[1:] / alphas[:-1] + betas = np.clip(betas, a_min=0, a_max=0.999) + + elif schedule == "sqrt_linear": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) + elif schedule == "sqrt": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 + else: + raise ValueError(f"schedule '{schedule}' unknown.") + return betas.numpy() + + +def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): + if ddim_discr_method == 'uniform': + c = num_ddpm_timesteps // num_ddim_timesteps + ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) + elif ddim_discr_method == 'quad': + ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) + else: + raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') + + # assert ddim_timesteps.shape[0] == num_ddim_timesteps + # add one to get the final alpha values right (the ones from first scale to data during sampling) + steps_out = ddim_timesteps + 1 + if verbose: + print(f'Selected timesteps for ddim sampler: {steps_out}') + return steps_out + + +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): + # select alphas for computing the variance schedule + alphas = alphacums[ddim_timesteps] + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) + + # according the the formula provided in https://arxiv.org/abs/2010.02502 + sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) + if verbose: + print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + print(f'For the chosen value of eta, which is {eta}, ' + f'this results in the following sigma_t schedule for ddim sampler {sigmas}') + return sigmas, alphas, alphas_prev + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +def extract_into_tensor(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def checkpoint(func, inputs, params, flag): + """ + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """ + if flag: + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_tensors = list(args[:length]) + ctx.input_params = list(args[length:]) + + with torch.no_grad(): + output_tensors = ctx.run_function(*ctx.input_tensors) + return output_tensors + + @staticmethod + def backward(ctx, *output_grads): + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] + with torch.enable_grad(): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + input_grads = torch.autograd.grad( + output_tensors, + ctx.input_tensors + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return (None, None) + input_grads + + +def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): + """ + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + if not repeat_only: + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + else: + embedding = repeat(timesteps, 'b -> b d', d=dim) + return embedding + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """ + Scale the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def normalization(channels): + """ + Make a standard normalization layer. + :param channels: number of input channels. + :return: an nn.Module for normalization. + """ + return GroupNorm32(32, channels) + + +# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. +class SiLU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + + +class GroupNorm32(nn.GroupNorm): + def forward(self, x): + return super().forward(x.float()).type(x.dtype) + +def conv_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D convolution module. + """ + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def linear(*args, **kwargs): + """ + Create a linear module. + """ + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D average pooling module. + """ + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +class HybridConditioner(nn.Module): + + def __init__(self, c_concat_config, c_crossattn_config): + super().__init__() + self.concat_conditioner = instantiate_from_config(c_concat_config) + self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) + + def forward(self, c_concat, c_crossattn): + c_concat = self.concat_conditioner(c_concat) + c_crossattn = self.crossattn_conditioner(c_crossattn) + return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/stable-diffusion/ldm/modules/distributions/__init__.py b/stable-diffusion/ldm/modules/distributions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stable-diffusion/ldm/modules/distributions/distributions.py b/stable-diffusion/ldm/modules/distributions/distributions.py new file mode 100644 index 0000000..f2b8ef9 --- /dev/null +++ b/stable-diffusion/ldm/modules/distributions/distributions.py @@ -0,0 +1,92 @@ +import torch +import numpy as np + + +class AbstractDistribution: + def sample(self): + raise NotImplementedError() + + def mode(self): + raise NotImplementedError() + + +class DiracDistribution(AbstractDistribution): + def __init__(self, value): + self.value = value + + def sample(self): + return self.value + + def mode(self): + return self.value + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self): + x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + return 0.5 * torch.sum(torch.pow(self.mean, 2) + + self.var - 1.0 - self.logvar, + dim=[1, 2, 3]) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar, + dim=[1, 2, 3]) + + def nll(self, sample, dims=[1,2,3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) diff --git a/stable-diffusion/ldm/modules/ema.py b/stable-diffusion/ldm/modules/ema.py new file mode 100644 index 0000000..c8c75af --- /dev/null +++ b/stable-diffusion/ldm/modules/ema.py @@ -0,0 +1,76 @@ +import torch +from torch import nn + + +class LitEma(nn.Module): + def __init__(self, model, decay=0.9999, use_num_upates=True): + super().__init__() + if decay < 0.0 or decay > 1.0: + raise ValueError('Decay must be between 0 and 1') + + self.m_name2s_name = {} + self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) + self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates + else torch.tensor(-1,dtype=torch.int)) + + for name, p in model.named_parameters(): + if p.requires_grad: + #remove as '.'-character is not allowed in buffers + s_name = name.replace('.','') + self.m_name2s_name.update({name:s_name}) + self.register_buffer(s_name,p.clone().detach().data) + + self.collected_params = [] + + def forward(self,model): + decay = self.decay + + if self.num_updates >= 0: + self.num_updates += 1 + decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates)) + + one_minus_decay = 1.0 - decay + + with torch.no_grad(): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + + for key in m_param: + if m_param[key].requires_grad: + sname = self.m_name2s_name[key] + shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) + shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) + else: + assert not key in self.m_name2s_name + + def copy_to(self, model): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + for key in m_param: + if m_param[key].requires_grad: + m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) + else: + assert not key in self.m_name2s_name + + def store(self, parameters): + """ + Save the current parameters for restoring later. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. + """ + self.collected_params = [param.clone() for param in parameters] + + def restore(self, parameters): + """ + Restore the parameters stored with the `store` method. + Useful to validate the model with EMA parameters without affecting the + original optimization process. Store the parameters before the + `copy_to` method. After validation (or model saving), use this to + restore the former parameters. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. + """ + for c_param, param in zip(self.collected_params, parameters): + param.data.copy_(c_param.data) diff --git a/stable-diffusion/ldm/modules/encoders/__init__.py b/stable-diffusion/ldm/modules/encoders/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stable-diffusion/ldm/modules/encoders/modules.py b/stable-diffusion/ldm/modules/encoders/modules.py new file mode 100644 index 0000000..ededbe4 --- /dev/null +++ b/stable-diffusion/ldm/modules/encoders/modules.py @@ -0,0 +1,234 @@ +import torch +import torch.nn as nn +from functools import partial +import clip +from einops import rearrange, repeat +from transformers import CLIPTokenizer, CLIPTextModel +import kornia + +from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test + + +class AbstractEncoder(nn.Module): + def __init__(self): + super().__init__() + + def encode(self, *args, **kwargs): + raise NotImplementedError + + + +class ClassEmbedder(nn.Module): + def __init__(self, embed_dim, n_classes=1000, key='class'): + super().__init__() + self.key = key + self.embedding = nn.Embedding(n_classes, embed_dim) + + def forward(self, batch, key=None): + if key is None: + key = self.key + # this is for use in crossattn + c = batch[key][:, None] + c = self.embedding(c) + return c + + +class TransformerEmbedder(AbstractEncoder): + """Some transformer encoder layers""" + def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): + super().__init__() + self.device = device + self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, + attn_layers=Encoder(dim=n_embed, depth=n_layer)) + + def forward(self, tokens): + tokens = tokens.to(self.device) # meh + z = self.transformer(tokens, return_embeddings=True) + return z + + def encode(self, x): + return self(x) + + +class BERTTokenizer(AbstractEncoder): + """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" + def __init__(self, device="cuda", vq_interface=True, max_length=77): + super().__init__() + from transformers import BertTokenizerFast # TODO: add to reuquirements + self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") + self.device = device + self.vq_interface = vq_interface + self.max_length = max_length + + def forward(self, text): + batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"].to(self.device) + return tokens + + @torch.no_grad() + def encode(self, text): + tokens = self(text) + if not self.vq_interface: + return tokens + return None, None, [None, None, tokens] + + def decode(self, text): + return text + + +class BERTEmbedder(AbstractEncoder): + """Uses the BERT tokenizr model and add some transformer encoder layers""" + def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77, + device="cuda",use_tokenizer=True, embedding_dropout=0.0): + super().__init__() + self.use_tknz_fn = use_tokenizer + if self.use_tknz_fn: + self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) + self.device = device + self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, + attn_layers=Encoder(dim=n_embed, depth=n_layer), + emb_dropout=embedding_dropout) + + def forward(self, text): + if self.use_tknz_fn: + tokens = self.tknz_fn(text)#.to(self.device) + else: + tokens = text + z = self.transformer(tokens, return_embeddings=True) + return z + + def encode(self, text): + # output of length 77 + return self(text) + + +class SpatialRescaler(nn.Module): + def __init__(self, + n_stages=1, + method='bilinear', + multiplier=0.5, + in_channels=3, + out_channels=None, + bias=False): + super().__init__() + self.n_stages = n_stages + assert self.n_stages >= 0 + assert method in ['nearest','linear','bilinear','trilinear','bicubic','area'] + self.multiplier = multiplier + self.interpolator = partial(torch.nn.functional.interpolate, mode=method) + self.remap_output = out_channels is not None + if self.remap_output: + print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.') + self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias) + + def forward(self,x): + for stage in range(self.n_stages): + x = self.interpolator(x, scale_factor=self.multiplier) + + + if self.remap_output: + x = self.channel_mapper(x) + return x + + def encode(self, x): + return self(x) + +class FrozenCLIPEmbedder(AbstractEncoder): + """Uses the CLIP transformer encoder for text (from Hugging Face)""" + def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): + super().__init__() + self.tokenizer = CLIPTokenizer.from_pretrained(version) + self.transformer = CLIPTextModel.from_pretrained(version) + self.device = device + self.max_length = max_length + self.freeze() + + def freeze(self): + self.transformer = self.transformer.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer(input_ids=tokens) + + z = outputs.last_hidden_state + return z + + def encode(self, text): + return self(text) + + +class FrozenCLIPTextEmbedder(nn.Module): + """ + Uses the CLIP transformer encoder for text. + """ + def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True): + super().__init__() + self.model, _ = clip.load(version, jit=False, device="cpu") + self.device = device + self.max_length = max_length + self.n_repeat = n_repeat + self.normalize = normalize + + def freeze(self): + self.model = self.model.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + tokens = clip.tokenize(text).to(self.device) + z = self.model.encode_text(tokens) + if self.normalize: + z = z / torch.linalg.norm(z, dim=1, keepdim=True) + return z + + def encode(self, text): + z = self(text) + if z.ndim==2: + z = z[:, None, :] + z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat) + return z + + +class FrozenClipImageEmbedder(nn.Module): + """ + Uses the CLIP image encoder. + """ + def __init__( + self, + model, + jit=False, + device='cuda' if torch.cuda.is_available() else 'cpu', + antialias=False, + ): + super().__init__() + self.model, _ = clip.load(name=model, device=device, jit=jit) + + self.antialias = antialias + + self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) + self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) + + def preprocess(self, x): + # normalize to [0,1] + x = kornia.geometry.resize(x, (224, 224), + interpolation='bicubic',align_corners=True, + antialias=self.antialias) + x = (x + 1.) / 2. + # renormalize according to clip + x = kornia.enhance.normalize(x, self.mean, self.std) + return x + + def forward(self, x): + # x is assumed to be in range [-1,1] + return self.model.encode_image(self.preprocess(x)) + + +if __name__ == "__main__": + from ldm.util import count_params + model = FrozenCLIPEmbedder() + count_params(model, verbose=True) \ No newline at end of file diff --git a/stable-diffusion/ldm/modules/image_degradation/__init__.py b/stable-diffusion/ldm/modules/image_degradation/__init__.py new file mode 100644 index 0000000..7836cad --- /dev/null +++ b/stable-diffusion/ldm/modules/image_degradation/__init__.py @@ -0,0 +1,2 @@ +from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr +from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light diff --git a/stable-diffusion/ldm/modules/image_degradation/bsrgan.py b/stable-diffusion/ldm/modules/image_degradation/bsrgan.py new file mode 100644 index 0000000..32ef561 --- /dev/null +++ b/stable-diffusion/ldm/modules/image_degradation/bsrgan.py @@ -0,0 +1,730 @@ +# -*- coding: utf-8 -*- +""" +# -------------------------------------------- +# Super-Resolution +# -------------------------------------------- +# +# Kai Zhang (cskaizhang@gmail.com) +# https://github.com/cszn +# From 2019/03--2021/08 +# -------------------------------------------- +""" + +import numpy as np +import cv2 +import torch + +from functools import partial +import random +from scipy import ndimage +import scipy +import scipy.stats as ss +from scipy.interpolate import interp2d +from scipy.linalg import orth +import albumentations + +import ldm.modules.image_degradation.utils_image as util + + +def modcrop_np(img, sf): + ''' + Args: + img: numpy image, WxH or WxHxC + sf: scale factor + Return: + cropped image + ''' + w, h = img.shape[:2] + im = np.copy(img) + return im[:w - w % sf, :h - h % sf, ...] + + +""" +# -------------------------------------------- +# anisotropic Gaussian kernels +# -------------------------------------------- +""" + + +def analytic_kernel(k): + """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" + k_size = k.shape[0] + # Calculate the big kernels size + big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) + # Loop over the small kernel to fill the big one + for r in range(k_size): + for c in range(k_size): + big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k + # Crop the edges of the big kernel to ignore very small values and increase run time of SR + crop = k_size // 2 + cropped_big_k = big_k[crop:-crop, crop:-crop] + # Normalize to 1 + return cropped_big_k / cropped_big_k.sum() + + +def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): + """ generate an anisotropic Gaussian kernel + Args: + ksize : e.g., 15, kernel size + theta : [0, pi], rotation angle range + l1 : [0.1,50], scaling of eigenvalues + l2 : [0.1,l1], scaling of eigenvalues + If l1 = l2, will get an isotropic Gaussian kernel. + Returns: + k : kernel + """ + + v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) + V = np.array([[v[0], v[1]], [v[1], -v[0]]]) + D = np.array([[l1, 0], [0, l2]]) + Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) + k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) + + return k + + +def gm_blur_kernel(mean, cov, size=15): + center = size / 2.0 + 0.5 + k = np.zeros([size, size]) + for y in range(size): + for x in range(size): + cy = y - center + 1 + cx = x - center + 1 + k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) + + k = k / np.sum(k) + return k + + +def shift_pixel(x, sf, upper_left=True): + """shift pixel for super-resolution with different scale factors + Args: + x: WxHxC or WxH + sf: scale factor + upper_left: shift direction + """ + h, w = x.shape[:2] + shift = (sf - 1) * 0.5 + xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) + if upper_left: + x1 = xv + shift + y1 = yv + shift + else: + x1 = xv - shift + y1 = yv - shift + + x1 = np.clip(x1, 0, w - 1) + y1 = np.clip(y1, 0, h - 1) + + if x.ndim == 2: + x = interp2d(xv, yv, x)(x1, y1) + if x.ndim == 3: + for i in range(x.shape[-1]): + x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) + + return x + + +def blur(x, k): + ''' + x: image, NxcxHxW + k: kernel, Nx1xhxw + ''' + n, c = x.shape[:2] + p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + k = k.repeat(1, c, 1, 1) + k = k.view(-1, 1, k.shape[2], k.shape[3]) + x = x.view(1, -1, x.shape[2], x.shape[3]) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) + x = x.view(n, c, x.shape[2], x.shape[3]) + + return x + + +def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): + """" + # modified version of https://github.com/assafshocher/BlindSR_dataset_generator + # Kai Zhang + # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var + # max_var = 2.5 * sf + """ + # Set random eigen-vals (lambdas) and angle (theta) for COV matrix + lambda_1 = min_var + np.random.rand() * (max_var - min_var) + lambda_2 = min_var + np.random.rand() * (max_var - min_var) + theta = np.random.rand() * np.pi # random theta + noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 + + # Set COV matrix using Lambdas and Theta + LAMBDA = np.diag([lambda_1, lambda_2]) + Q = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + SIGMA = Q @ LAMBDA @ Q.T + INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] + + # Set expectation position (shifting kernel for aligned image) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) + MU = MU[None, None, :, None] + + # Create meshgrid for Gaussian + [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) + Z = np.stack([X, Y], 2)[:, :, :, None] + + # Calcualte Gaussian for every pixel of the kernel + ZZ = Z - MU + ZZ_t = ZZ.transpose(0, 1, 3, 2) + raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) + + # shift the kernel so it will be centered + # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) + + # Normalize the kernel and return + # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) + kernel = raw_kernel / np.sum(raw_kernel) + return kernel + + +def fspecial_gaussian(hsize, sigma): + hsize = [hsize, hsize] + siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] + std = sigma + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) + arg = -(x * x + y * y) / (2 * std * std) + h = np.exp(arg) + h[h < scipy.finfo(float).eps * h.max()] = 0 + sumh = h.sum() + if sumh != 0: + h = h / sumh + return h + + +def fspecial_laplacian(alpha): + alpha = max([0, min([alpha, 1])]) + h1 = alpha / (alpha + 1) + h2 = (1 - alpha) / (alpha + 1) + h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] + h = np.array(h) + return h + + +def fspecial(filter_type, *args, **kwargs): + ''' + python code from: + https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py + ''' + if filter_type == 'gaussian': + return fspecial_gaussian(*args, **kwargs) + if filter_type == 'laplacian': + return fspecial_laplacian(*args, **kwargs) + + +""" +# -------------------------------------------- +# degradation models +# -------------------------------------------- +""" + + +def bicubic_degradation(x, sf=3): + ''' + Args: + x: HxWxC image, [0, 1] + sf: down-scale factor + Return: + bicubicly downsampled LR image + ''' + x = util.imresize_np(x, scale=1 / sf) + return x + + +def srmd_degradation(x, k, sf=3): + ''' blur + bicubic downsampling + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2018learning, + title={Learning a single convolutional super-resolution network for multiple degradations}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={3262--3271}, + year={2018} + } + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' + x = bicubic_degradation(x, sf=sf) + return x + + +def dpsr_degradation(x, k, sf=3): + ''' bicubic downsampling + blur + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2019deep, + title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={1671--1681}, + year={2019} + } + ''' + x = bicubic_degradation(x, sf=sf) + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + return x + + +def classical_degradation(x, k, sf=3): + ''' blur + downsampling + Args: + x: HxWxC image, [0, 1]/[0, 255] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) + st = 0 + return x[st::sf, st::sf, ...] + + +def add_sharpening(img, weight=0.5, radius=50, threshold=10): + """USM sharpening. borrowed from real-ESRGAN + Input image: I; Blurry image: B. + 1. K = I + weight * (I - B) + 2. Mask = 1 if abs(I - B) > threshold, else: 0 + 3. Blur mask: + 4. Out = Mask * K + (1 - Mask) * I + Args: + img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. + weight (float): Sharp weight. Default: 1. + radius (float): Kernel size of Gaussian blur. Default: 50. + threshold (int): + """ + if radius % 2 == 0: + radius += 1 + blur = cv2.GaussianBlur(img, (radius, radius), 0) + residual = img - blur + mask = np.abs(residual) * 255 > threshold + mask = mask.astype('float32') + soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) + + K = img + weight * residual + K = np.clip(K, 0, 1) + return soft_mask * K + (1 - soft_mask) * img + + +def add_blur(img, sf=4): + wd2 = 4.0 + sf + wd = 2.0 + 0.2 * sf + if random.random() < 0.5: + l1 = wd2 * random.random() + l2 = wd2 * random.random() + k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) + else: + k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random()) + img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') + + return img + + +def add_resize(img, sf=4): + rnum = np.random.rand() + if rnum > 0.8: # up + sf1 = random.uniform(1, 2) + elif rnum < 0.7: # down + sf1 = random.uniform(0.5 / sf, 1) + else: + sf1 = 1.0 + img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + return img + + +# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): +# noise_level = random.randint(noise_level1, noise_level2) +# rnum = np.random.rand() +# if rnum > 0.6: # add color Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) +# elif rnum < 0.4: # add grayscale Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) +# else: # add noise +# L = noise_level2 / 255. +# D = np.diag(np.random.rand(3)) +# U = orth(np.random.rand(3, 3)) +# conv = np.dot(np.dot(np.transpose(U), D), U) +# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) +# img = np.clip(img, 0.0, 1.0) +# return img + +def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + rnum = np.random.rand() + if rnum > 0.6: # add color Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: # add grayscale Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: # add noise + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_speckle_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + img = np.clip(img, 0.0, 1.0) + rnum = random.random() + if rnum > 0.6: + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: + img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_Poisson_noise(img): + img = np.clip((img * 255.0).round(), 0, 255) / 255. + vals = 10 ** (2 * random.random() + 2.0) # [2, 4] + if random.random() < 0.5: + img = np.random.poisson(img * vals).astype(np.float32) / vals + else: + img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) + img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. + noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray + img += noise_gray[:, :, np.newaxis] + img = np.clip(img, 0.0, 1.0) + return img + + +def add_JPEG_noise(img): + quality_factor = random.randint(30, 95) + img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) + result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) + img = cv2.imdecode(encimg, 1) + img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) + return img + + +def random_crop(lq, hq, sf=4, lq_patchsize=64): + h, w = lq.shape[:2] + rnd_h = random.randint(0, h - lq_patchsize) + rnd_w = random.randint(0, w - lq_patchsize) + lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] + + rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) + hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] + return lq, hq + + +def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + hq = img.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + img = util.imresize_np(img, 1 / 2, True) + img = np.clip(img, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + img = add_blur(img, sf=sf) + + elif i == 1: + img = add_blur(img, sf=sf) + + elif i == 2: + a, b = img.shape[1], img.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') + img = img[0::sf, 0::sf, ...] # nearest downsampling + img = np.clip(img, 0.0, 1.0) + + elif i == 3: + # downsample3 + img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + img = add_JPEG_noise(img) + + elif i == 6: + # add processed camera sensor noise + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf_ori, lq_patchsize) + + return img, hq + + +# todo no isp_model? +def degradation_bsrgan_variant(image, sf=4, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + image = util.uint2single(image) + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = image.shape[:2] + image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = image.shape[:2] + + hq = image.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + image = util.imresize_np(image, 1 / 2, True) + image = np.clip(image, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + image = add_blur(image, sf=sf) + + elif i == 1: + image = add_blur(image, sf=sf) + + elif i == 2: + a, b = image.shape[1], image.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') + image = image[0::sf, 0::sf, ...] # nearest downsampling + image = np.clip(image, 0.0, 1.0) + + elif i == 3: + # downsample3 + image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + image = np.clip(image, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + image = add_JPEG_noise(image) + + # elif i == 6: + # # add processed camera sensor noise + # if random.random() < isp_prob and isp_model is not None: + # with torch.no_grad(): + # img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + image = add_JPEG_noise(image) + image = util.single2uint(image) + example = {"image":image} + return example + + +# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... +def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): + """ + This is an extended degradation model by combining + the degradation models of BSRGAN and Real-ESRGAN + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + use_shuffle: the degradation shuffle + use_sharp: sharpening the img + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + if use_sharp: + img = add_sharpening(img) + hq = img.copy() + + if random.random() < shuffle_prob: + shuffle_order = random.sample(range(13), 13) + else: + shuffle_order = list(range(13)) + # local shuffle for noise, JPEG is always the last one + shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) + shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) + + poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 + + for i in shuffle_order: + if i == 0: + img = add_blur(img, sf=sf) + elif i == 1: + img = add_resize(img, sf=sf) + elif i == 2: + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + elif i == 3: + if random.random() < poisson_prob: + img = add_Poisson_noise(img) + elif i == 4: + if random.random() < speckle_prob: + img = add_speckle_noise(img) + elif i == 5: + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + elif i == 6: + img = add_JPEG_noise(img) + elif i == 7: + img = add_blur(img, sf=sf) + elif i == 8: + img = add_resize(img, sf=sf) + elif i == 9: + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + elif i == 10: + if random.random() < poisson_prob: + img = add_Poisson_noise(img) + elif i == 11: + if random.random() < speckle_prob: + img = add_speckle_noise(img) + elif i == 12: + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + else: + print('check the shuffle!') + + # resize to desired size + img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), + interpolation=random.choice([1, 2, 3])) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf, lq_patchsize) + + return img, hq + + +if __name__ == '__main__': + print("hey") + img = util.imread_uint('utils/test.png', 3) + print(img) + img = util.uint2single(img) + print(img) + img = img[:448, :448] + h = img.shape[0] // 4 + print("resizing to", h) + sf = 4 + deg_fn = partial(degradation_bsrgan_variant, sf=sf) + for i in range(20): + print(i) + img_lq = deg_fn(img) + print(img_lq) + img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"] + print(img_lq.shape) + print("bicubic", img_lq_bicubic.shape) + print(img_hq.shape) + lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) + util.imsave(img_concat, str(i) + '.png') + + diff --git a/stable-diffusion/ldm/modules/image_degradation/bsrgan_light.py b/stable-diffusion/ldm/modules/image_degradation/bsrgan_light.py new file mode 100644 index 0000000..9e1f823 --- /dev/null +++ b/stable-diffusion/ldm/modules/image_degradation/bsrgan_light.py @@ -0,0 +1,650 @@ +# -*- coding: utf-8 -*- +import numpy as np +import cv2 +import torch + +from functools import partial +import random +from scipy import ndimage +import scipy +import scipy.stats as ss +from scipy.interpolate import interp2d +from scipy.linalg import orth +import albumentations + +import ldm.modules.image_degradation.utils_image as util + +""" +# -------------------------------------------- +# Super-Resolution +# -------------------------------------------- +# +# Kai Zhang (cskaizhang@gmail.com) +# https://github.com/cszn +# From 2019/03--2021/08 +# -------------------------------------------- +""" + + +def modcrop_np(img, sf): + ''' + Args: + img: numpy image, WxH or WxHxC + sf: scale factor + Return: + cropped image + ''' + w, h = img.shape[:2] + im = np.copy(img) + return im[:w - w % sf, :h - h % sf, ...] + + +""" +# -------------------------------------------- +# anisotropic Gaussian kernels +# -------------------------------------------- +""" + + +def analytic_kernel(k): + """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" + k_size = k.shape[0] + # Calculate the big kernels size + big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) + # Loop over the small kernel to fill the big one + for r in range(k_size): + for c in range(k_size): + big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k + # Crop the edges of the big kernel to ignore very small values and increase run time of SR + crop = k_size // 2 + cropped_big_k = big_k[crop:-crop, crop:-crop] + # Normalize to 1 + return cropped_big_k / cropped_big_k.sum() + + +def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): + """ generate an anisotropic Gaussian kernel + Args: + ksize : e.g., 15, kernel size + theta : [0, pi], rotation angle range + l1 : [0.1,50], scaling of eigenvalues + l2 : [0.1,l1], scaling of eigenvalues + If l1 = l2, will get an isotropic Gaussian kernel. + Returns: + k : kernel + """ + + v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) + V = np.array([[v[0], v[1]], [v[1], -v[0]]]) + D = np.array([[l1, 0], [0, l2]]) + Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) + k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) + + return k + + +def gm_blur_kernel(mean, cov, size=15): + center = size / 2.0 + 0.5 + k = np.zeros([size, size]) + for y in range(size): + for x in range(size): + cy = y - center + 1 + cx = x - center + 1 + k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) + + k = k / np.sum(k) + return k + + +def shift_pixel(x, sf, upper_left=True): + """shift pixel for super-resolution with different scale factors + Args: + x: WxHxC or WxH + sf: scale factor + upper_left: shift direction + """ + h, w = x.shape[:2] + shift = (sf - 1) * 0.5 + xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) + if upper_left: + x1 = xv + shift + y1 = yv + shift + else: + x1 = xv - shift + y1 = yv - shift + + x1 = np.clip(x1, 0, w - 1) + y1 = np.clip(y1, 0, h - 1) + + if x.ndim == 2: + x = interp2d(xv, yv, x)(x1, y1) + if x.ndim == 3: + for i in range(x.shape[-1]): + x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) + + return x + + +def blur(x, k): + ''' + x: image, NxcxHxW + k: kernel, Nx1xhxw + ''' + n, c = x.shape[:2] + p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + k = k.repeat(1, c, 1, 1) + k = k.view(-1, 1, k.shape[2], k.shape[3]) + x = x.view(1, -1, x.shape[2], x.shape[3]) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) + x = x.view(n, c, x.shape[2], x.shape[3]) + + return x + + +def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): + """" + # modified version of https://github.com/assafshocher/BlindSR_dataset_generator + # Kai Zhang + # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var + # max_var = 2.5 * sf + """ + # Set random eigen-vals (lambdas) and angle (theta) for COV matrix + lambda_1 = min_var + np.random.rand() * (max_var - min_var) + lambda_2 = min_var + np.random.rand() * (max_var - min_var) + theta = np.random.rand() * np.pi # random theta + noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 + + # Set COV matrix using Lambdas and Theta + LAMBDA = np.diag([lambda_1, lambda_2]) + Q = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + SIGMA = Q @ LAMBDA @ Q.T + INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] + + # Set expectation position (shifting kernel for aligned image) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) + MU = MU[None, None, :, None] + + # Create meshgrid for Gaussian + [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) + Z = np.stack([X, Y], 2)[:, :, :, None] + + # Calcualte Gaussian for every pixel of the kernel + ZZ = Z - MU + ZZ_t = ZZ.transpose(0, 1, 3, 2) + raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) + + # shift the kernel so it will be centered + # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) + + # Normalize the kernel and return + # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) + kernel = raw_kernel / np.sum(raw_kernel) + return kernel + + +def fspecial_gaussian(hsize, sigma): + hsize = [hsize, hsize] + siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] + std = sigma + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) + arg = -(x * x + y * y) / (2 * std * std) + h = np.exp(arg) + h[h < scipy.finfo(float).eps * h.max()] = 0 + sumh = h.sum() + if sumh != 0: + h = h / sumh + return h + + +def fspecial_laplacian(alpha): + alpha = max([0, min([alpha, 1])]) + h1 = alpha / (alpha + 1) + h2 = (1 - alpha) / (alpha + 1) + h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] + h = np.array(h) + return h + + +def fspecial(filter_type, *args, **kwargs): + ''' + python code from: + https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py + ''' + if filter_type == 'gaussian': + return fspecial_gaussian(*args, **kwargs) + if filter_type == 'laplacian': + return fspecial_laplacian(*args, **kwargs) + + +""" +# -------------------------------------------- +# degradation models +# -------------------------------------------- +""" + + +def bicubic_degradation(x, sf=3): + ''' + Args: + x: HxWxC image, [0, 1] + sf: down-scale factor + Return: + bicubicly downsampled LR image + ''' + x = util.imresize_np(x, scale=1 / sf) + return x + + +def srmd_degradation(x, k, sf=3): + ''' blur + bicubic downsampling + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2018learning, + title={Learning a single convolutional super-resolution network for multiple degradations}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={3262--3271}, + year={2018} + } + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' + x = bicubic_degradation(x, sf=sf) + return x + + +def dpsr_degradation(x, k, sf=3): + ''' bicubic downsampling + blur + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2019deep, + title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={1671--1681}, + year={2019} + } + ''' + x = bicubic_degradation(x, sf=sf) + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + return x + + +def classical_degradation(x, k, sf=3): + ''' blur + downsampling + Args: + x: HxWxC image, [0, 1]/[0, 255] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) + st = 0 + return x[st::sf, st::sf, ...] + + +def add_sharpening(img, weight=0.5, radius=50, threshold=10): + """USM sharpening. borrowed from real-ESRGAN + Input image: I; Blurry image: B. + 1. K = I + weight * (I - B) + 2. Mask = 1 if abs(I - B) > threshold, else: 0 + 3. Blur mask: + 4. Out = Mask * K + (1 - Mask) * I + Args: + img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. + weight (float): Sharp weight. Default: 1. + radius (float): Kernel size of Gaussian blur. Default: 50. + threshold (int): + """ + if radius % 2 == 0: + radius += 1 + blur = cv2.GaussianBlur(img, (radius, radius), 0) + residual = img - blur + mask = np.abs(residual) * 255 > threshold + mask = mask.astype('float32') + soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) + + K = img + weight * residual + K = np.clip(K, 0, 1) + return soft_mask * K + (1 - soft_mask) * img + + +def add_blur(img, sf=4): + wd2 = 4.0 + sf + wd = 2.0 + 0.2 * sf + + wd2 = wd2/4 + wd = wd/4 + + if random.random() < 0.5: + l1 = wd2 * random.random() + l2 = wd2 * random.random() + k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) + else: + k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) + img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') + + return img + + +def add_resize(img, sf=4): + rnum = np.random.rand() + if rnum > 0.8: # up + sf1 = random.uniform(1, 2) + elif rnum < 0.7: # down + sf1 = random.uniform(0.5 / sf, 1) + else: + sf1 = 1.0 + img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + return img + + +# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): +# noise_level = random.randint(noise_level1, noise_level2) +# rnum = np.random.rand() +# if rnum > 0.6: # add color Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) +# elif rnum < 0.4: # add grayscale Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) +# else: # add noise +# L = noise_level2 / 255. +# D = np.diag(np.random.rand(3)) +# U = orth(np.random.rand(3, 3)) +# conv = np.dot(np.dot(np.transpose(U), D), U) +# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) +# img = np.clip(img, 0.0, 1.0) +# return img + +def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + rnum = np.random.rand() + if rnum > 0.6: # add color Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: # add grayscale Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: # add noise + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_speckle_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + img = np.clip(img, 0.0, 1.0) + rnum = random.random() + if rnum > 0.6: + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: + img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_Poisson_noise(img): + img = np.clip((img * 255.0).round(), 0, 255) / 255. + vals = 10 ** (2 * random.random() + 2.0) # [2, 4] + if random.random() < 0.5: + img = np.random.poisson(img * vals).astype(np.float32) / vals + else: + img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) + img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. + noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray + img += noise_gray[:, :, np.newaxis] + img = np.clip(img, 0.0, 1.0) + return img + + +def add_JPEG_noise(img): + quality_factor = random.randint(80, 95) + img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) + result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) + img = cv2.imdecode(encimg, 1) + img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) + return img + + +def random_crop(lq, hq, sf=4, lq_patchsize=64): + h, w = lq.shape[:2] + rnd_h = random.randint(0, h - lq_patchsize) + rnd_w = random.randint(0, w - lq_patchsize) + lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] + + rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) + hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] + return lq, hq + + +def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + hq = img.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + img = util.imresize_np(img, 1 / 2, True) + img = np.clip(img, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + img = add_blur(img, sf=sf) + + elif i == 1: + img = add_blur(img, sf=sf) + + elif i == 2: + a, b = img.shape[1], img.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') + img = img[0::sf, 0::sf, ...] # nearest downsampling + img = np.clip(img, 0.0, 1.0) + + elif i == 3: + # downsample3 + img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + img = add_JPEG_noise(img) + + elif i == 6: + # add processed camera sensor noise + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf_ori, lq_patchsize) + + return img, hq + + +# todo no isp_model? +def degradation_bsrgan_variant(image, sf=4, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + image = util.uint2single(image) + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = image.shape[:2] + image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = image.shape[:2] + + hq = image.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + image = util.imresize_np(image, 1 / 2, True) + image = np.clip(image, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + image = add_blur(image, sf=sf) + + # elif i == 1: + # image = add_blur(image, sf=sf) + + if i == 0: + pass + + elif i == 2: + a, b = image.shape[1], image.shape[0] + # downsample2 + if random.random() < 0.8: + sf1 = random.uniform(1, 2 * sf) + image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') + image = image[0::sf, 0::sf, ...] # nearest downsampling + + image = np.clip(image, 0.0, 1.0) + + elif i == 3: + # downsample3 + image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + image = np.clip(image, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + image = add_JPEG_noise(image) + # + # elif i == 6: + # # add processed camera sensor noise + # if random.random() < isp_prob and isp_model is not None: + # with torch.no_grad(): + # img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + image = add_JPEG_noise(image) + image = util.single2uint(image) + example = {"image": image} + return example + + + + +if __name__ == '__main__': + print("hey") + img = util.imread_uint('utils/test.png', 3) + img = img[:448, :448] + h = img.shape[0] // 4 + print("resizing to", h) + sf = 4 + deg_fn = partial(degradation_bsrgan_variant, sf=sf) + for i in range(20): + print(i) + img_hq = img + img_lq = deg_fn(img)["image"] + img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) + print(img_lq) + img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] + print(img_lq.shape) + print("bicubic", img_lq_bicubic.shape) + print(img_hq.shape) + lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), + (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) + util.imsave(img_concat, str(i) + '.png') diff --git a/stable-diffusion/ldm/modules/image_degradation/utils/test.png b/stable-diffusion/ldm/modules/image_degradation/utils/test.png new file mode 100644 index 0000000..4249b43 Binary files /dev/null and b/stable-diffusion/ldm/modules/image_degradation/utils/test.png differ diff --git a/stable-diffusion/ldm/modules/image_degradation/utils_image.py b/stable-diffusion/ldm/modules/image_degradation/utils_image.py new file mode 100644 index 0000000..0175f15 --- /dev/null +++ b/stable-diffusion/ldm/modules/image_degradation/utils_image.py @@ -0,0 +1,916 @@ +import os +import math +import random +import numpy as np +import torch +import cv2 +from torchvision.utils import make_grid +from datetime import datetime +#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py + + +os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" + + +''' +# -------------------------------------------- +# Kai Zhang (github: https://github.com/cszn) +# 03/Mar/2019 +# -------------------------------------------- +# https://github.com/twhui/SRGAN-pyTorch +# https://github.com/xinntao/BasicSR +# -------------------------------------------- +''' + + +IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif'] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def get_timestamp(): + return datetime.now().strftime('%y%m%d-%H%M%S') + + +def imshow(x, title=None, cbar=False, figsize=None): + plt.figure(figsize=figsize) + plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray') + if title: + plt.title(title) + if cbar: + plt.colorbar() + plt.show() + + +def surf(Z, cmap='rainbow', figsize=None): + plt.figure(figsize=figsize) + ax3 = plt.axes(projection='3d') + + w, h = Z.shape[:2] + xx = np.arange(0,w,1) + yy = np.arange(0,h,1) + X, Y = np.meshgrid(xx, yy) + ax3.plot_surface(X,Y,Z,cmap=cmap) + #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap) + plt.show() + + +''' +# -------------------------------------------- +# get image pathes +# -------------------------------------------- +''' + + +def get_image_paths(dataroot): + paths = None # return None if dataroot is None + if dataroot is not None: + paths = sorted(_get_paths_from_images(dataroot)) + return paths + + +def _get_paths_from_images(path): + assert os.path.isdir(path), '{:s} is not a valid directory'.format(path) + images = [] + for dirpath, _, fnames in sorted(os.walk(path)): + for fname in sorted(fnames): + if is_image_file(fname): + img_path = os.path.join(dirpath, fname) + images.append(img_path) + assert images, '{:s} has no valid image file'.format(path) + return images + + +''' +# -------------------------------------------- +# split large images into small images +# -------------------------------------------- +''' + + +def patches_from_image(img, p_size=512, p_overlap=64, p_max=800): + w, h = img.shape[:2] + patches = [] + if w > p_max and h > p_max: + w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int)) + h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int)) + w1.append(w-p_size) + h1.append(h-p_size) +# print(w1) +# print(h1) + for i in w1: + for j in h1: + patches.append(img[i:i+p_size, j:j+p_size,:]) + else: + patches.append(img) + + return patches + + +def imssave(imgs, img_path): + """ + imgs: list, N images of size WxHxC + """ + img_name, ext = os.path.splitext(os.path.basename(img_path)) + + for i, img in enumerate(imgs): + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png') + cv2.imwrite(new_path, img) + + +def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000): + """ + split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), + and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) + will be splitted. + Args: + original_dataroot: + taget_dataroot: + p_size: size of small images + p_overlap: patch size in training is a good choice + p_max: images with smaller size than (p_max)x(p_max) keep unchanged. + """ + paths = get_image_paths(original_dataroot) + for img_path in paths: + # img_name, ext = os.path.splitext(os.path.basename(img_path)) + img = imread_uint(img_path, n_channels=n_channels) + patches = patches_from_image(img, p_size, p_overlap, p_max) + imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path))) + #if original_dataroot == taget_dataroot: + #del img_path + +''' +# -------------------------------------------- +# makedir +# -------------------------------------------- +''' + + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) + + +def mkdirs(paths): + if isinstance(paths, str): + mkdir(paths) + else: + for path in paths: + mkdir(path) + + +def mkdir_and_rename(path): + if os.path.exists(path): + new_name = path + '_archived_' + get_timestamp() + print('Path already exists. Rename it to [{:s}]'.format(new_name)) + os.rename(path, new_name) + os.makedirs(path) + + +''' +# -------------------------------------------- +# read image from path +# opencv is fast, but read BGR numpy image +# -------------------------------------------- +''' + + +# -------------------------------------------- +# get uint8 image of size HxWxn_channles (RGB) +# -------------------------------------------- +def imread_uint(path, n_channels=3): + # input: path + # output: HxWx3(RGB or GGG), or HxWx1 (G) + if n_channels == 1: + img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE + img = np.expand_dims(img, axis=2) # HxWx1 + elif n_channels == 3: + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G + if img.ndim == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG + else: + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB + return img + + +# -------------------------------------------- +# matlab's imwrite +# -------------------------------------------- +def imsave(img, img_path): + img = np.squeeze(img) + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + cv2.imwrite(img_path, img) + +def imwrite(img, img_path): + img = np.squeeze(img) + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + cv2.imwrite(img_path, img) + + + +# -------------------------------------------- +# get single image of size HxWxn_channles (BGR) +# -------------------------------------------- +def read_img(path): + # read image by cv2 + # return: Numpy float32, HWC, BGR, [0,1] + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE + img = img.astype(np.float32) / 255. + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + # some images have 4 channels + if img.shape[2] > 3: + img = img[:, :, :3] + return img + + +''' +# -------------------------------------------- +# image format conversion +# -------------------------------------------- +# numpy(single) <---> numpy(unit) +# numpy(single) <---> tensor +# numpy(unit) <---> tensor +# -------------------------------------------- +''' + + +# -------------------------------------------- +# numpy(single) [0, 1] <---> numpy(unit) +# -------------------------------------------- + + +def uint2single(img): + + return np.float32(img/255.) + + +def single2uint(img): + + return np.uint8((img.clip(0, 1)*255.).round()) + + +def uint162single(img): + + return np.float32(img/65535.) + + +def single2uint16(img): + + return np.uint16((img.clip(0, 1)*65535.).round()) + + +# -------------------------------------------- +# numpy(unit) (HxWxC or HxW) <---> tensor +# -------------------------------------------- + + +# convert uint to 4-dimensional torch tensor +def uint2tensor4(img): + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0) + + +# convert uint to 3-dimensional torch tensor +def uint2tensor3(img): + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.) + + +# convert 2/3/4-dimensional torch tensor to uint +def tensor2uint(img): + img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + return np.uint8((img*255.0).round()) + + +# -------------------------------------------- +# numpy(single) (HxWxC) <---> tensor +# -------------------------------------------- + + +# convert single (HxWxC) to 3-dimensional torch tensor +def single2tensor3(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float() + + +# convert single (HxWxC) to 4-dimensional torch tensor +def single2tensor4(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0) + + +# convert torch tensor to single +def tensor2single(img): + img = img.data.squeeze().float().cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + + return img + +# convert torch tensor to single +def tensor2single3(img): + img = img.data.squeeze().float().cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + elif img.ndim == 2: + img = np.expand_dims(img, axis=2) + return img + + +def single2tensor5(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0) + + +def single32tensor5(img): + return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0) + + +def single42tensor4(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float() + + +# from skimage.io import imread, imsave +def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): + ''' + Converts a torch Tensor into an image Numpy array of BGR channel order + Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order + Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) + ''' + tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp + tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] + n_dim = tensor.dim() + if n_dim == 4: + n_img = len(tensor) + img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() + img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR + elif n_dim == 3: + img_np = tensor.numpy() + img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR + elif n_dim == 2: + img_np = tensor.numpy() + else: + raise TypeError( + 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) + if out_type == np.uint8: + img_np = (img_np * 255.0).round() + # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. + return img_np.astype(out_type) + + +''' +# -------------------------------------------- +# Augmentation, flipe and/or rotate +# -------------------------------------------- +# The following two are enough. +# (1) augmet_img: numpy image of WxHxC or WxH +# (2) augment_img_tensor4: tensor image 1xCxWxH +# -------------------------------------------- +''' + + +def augment_img(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + if mode == 0: + return img + elif mode == 1: + return np.flipud(np.rot90(img)) + elif mode == 2: + return np.flipud(img) + elif mode == 3: + return np.rot90(img, k=3) + elif mode == 4: + return np.flipud(np.rot90(img, k=2)) + elif mode == 5: + return np.rot90(img) + elif mode == 6: + return np.rot90(img, k=2) + elif mode == 7: + return np.flipud(np.rot90(img, k=3)) + + +def augment_img_tensor4(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + if mode == 0: + return img + elif mode == 1: + return img.rot90(1, [2, 3]).flip([2]) + elif mode == 2: + return img.flip([2]) + elif mode == 3: + return img.rot90(3, [2, 3]) + elif mode == 4: + return img.rot90(2, [2, 3]).flip([2]) + elif mode == 5: + return img.rot90(1, [2, 3]) + elif mode == 6: + return img.rot90(2, [2, 3]) + elif mode == 7: + return img.rot90(3, [2, 3]).flip([2]) + + +def augment_img_tensor(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + img_size = img.size() + img_np = img.data.cpu().numpy() + if len(img_size) == 3: + img_np = np.transpose(img_np, (1, 2, 0)) + elif len(img_size) == 4: + img_np = np.transpose(img_np, (2, 3, 1, 0)) + img_np = augment_img(img_np, mode=mode) + img_tensor = torch.from_numpy(np.ascontiguousarray(img_np)) + if len(img_size) == 3: + img_tensor = img_tensor.permute(2, 0, 1) + elif len(img_size) == 4: + img_tensor = img_tensor.permute(3, 2, 0, 1) + + return img_tensor.type_as(img) + + +def augment_img_np3(img, mode=0): + if mode == 0: + return img + elif mode == 1: + return img.transpose(1, 0, 2) + elif mode == 2: + return img[::-1, :, :] + elif mode == 3: + img = img[::-1, :, :] + img = img.transpose(1, 0, 2) + return img + elif mode == 4: + return img[:, ::-1, :] + elif mode == 5: + img = img[:, ::-1, :] + img = img.transpose(1, 0, 2) + return img + elif mode == 6: + img = img[:, ::-1, :] + img = img[::-1, :, :] + return img + elif mode == 7: + img = img[:, ::-1, :] + img = img[::-1, :, :] + img = img.transpose(1, 0, 2) + return img + + +def augment_imgs(img_list, hflip=True, rot=True): + # horizontal flip OR rotate + hflip = hflip and random.random() < 0.5 + vflip = rot and random.random() < 0.5 + rot90 = rot and random.random() < 0.5 + + def _augment(img): + if hflip: + img = img[:, ::-1, :] + if vflip: + img = img[::-1, :, :] + if rot90: + img = img.transpose(1, 0, 2) + return img + + return [_augment(img) for img in img_list] + + +''' +# -------------------------------------------- +# modcrop and shave +# -------------------------------------------- +''' + + +def modcrop(img_in, scale): + # img_in: Numpy, HWC or HW + img = np.copy(img_in) + if img.ndim == 2: + H, W = img.shape + H_r, W_r = H % scale, W % scale + img = img[:H - H_r, :W - W_r] + elif img.ndim == 3: + H, W, C = img.shape + H_r, W_r = H % scale, W % scale + img = img[:H - H_r, :W - W_r, :] + else: + raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim)) + return img + + +def shave(img_in, border=0): + # img_in: Numpy, HWC or HW + img = np.copy(img_in) + h, w = img.shape[:2] + img = img[border:h-border, border:w-border] + return img + + +''' +# -------------------------------------------- +# image processing process on numpy image +# channel_convert(in_c, tar_type, img_list): +# rgb2ycbcr(img, only_y=True): +# bgr2ycbcr(img, only_y=True): +# ycbcr2rgb(img): +# -------------------------------------------- +''' + + +def rgb2ycbcr(img, only_y=True): + '''same as matlab rgb2ycbcr + only_y: only return Y channel + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + if only_y: + rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 + else: + rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], + [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def ycbcr2rgb(img): + '''same as matlab ycbcr2rgb + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], + [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def bgr2ycbcr(img, only_y=True): + '''bgr version of rgb2ycbcr + only_y: only return Y channel + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + if only_y: + rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 + else: + rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], + [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def channel_convert(in_c, tar_type, img_list): + # conversion among BGR, gray and y + if in_c == 3 and tar_type == 'gray': # BGR to gray + gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list] + return [np.expand_dims(img, axis=2) for img in gray_list] + elif in_c == 3 and tar_type == 'y': # BGR to y + y_list = [bgr2ycbcr(img, only_y=True) for img in img_list] + return [np.expand_dims(img, axis=2) for img in y_list] + elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR + return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list] + else: + return img_list + + +''' +# -------------------------------------------- +# metric, PSNR and SSIM +# -------------------------------------------- +''' + + +# -------------------------------------------- +# PSNR +# -------------------------------------------- +def calculate_psnr(img1, img2, border=0): + # img1 and img2 have range [0, 255] + #img1 = img1.squeeze() + #img2 = img2.squeeze() + if not img1.shape == img2.shape: + raise ValueError('Input images must have the same dimensions.') + h, w = img1.shape[:2] + img1 = img1[border:h-border, border:w-border] + img2 = img2[border:h-border, border:w-border] + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + mse = np.mean((img1 - img2)**2) + if mse == 0: + return float('inf') + return 20 * math.log10(255.0 / math.sqrt(mse)) + + +# -------------------------------------------- +# SSIM +# -------------------------------------------- +def calculate_ssim(img1, img2, border=0): + '''calculate SSIM + the same outputs as MATLAB's + img1, img2: [0, 255] + ''' + #img1 = img1.squeeze() + #img2 = img2.squeeze() + if not img1.shape == img2.shape: + raise ValueError('Input images must have the same dimensions.') + h, w = img1.shape[:2] + img1 = img1[border:h-border, border:w-border] + img2 = img2[border:h-border, border:w-border] + + if img1.ndim == 2: + return ssim(img1, img2) + elif img1.ndim == 3: + if img1.shape[2] == 3: + ssims = [] + for i in range(3): + ssims.append(ssim(img1[:,:,i], img2[:,:,i])) + return np.array(ssims).mean() + elif img1.shape[2] == 1: + return ssim(np.squeeze(img1), np.squeeze(img2)) + else: + raise ValueError('Wrong input image dimensions.') + + +def ssim(img1, img2): + C1 = (0.01 * 255)**2 + C2 = (0.03 * 255)**2 + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + kernel = cv2.getGaussianKernel(11, 1.5) + window = np.outer(kernel, kernel.transpose()) + + mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid + mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] + mu1_sq = mu1**2 + mu2_sq = mu2**2 + mu1_mu2 = mu1 * mu2 + sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq + sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq + sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 + + ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * + (sigma1_sq + sigma2_sq + C2)) + return ssim_map.mean() + + +''' +# -------------------------------------------- +# matlab's bicubic imresize (numpy and torch) [0, 1] +# -------------------------------------------- +''' + + +# matlab 'imresize' function, now only support 'bicubic' +def cubic(x): + absx = torch.abs(x) + absx2 = absx**2 + absx3 = absx**3 + return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \ + (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx)) + + +def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing): + if (scale < 1) and (antialiasing): + # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width + kernel_width = kernel_width / scale + + # Output-space coordinates + x = torch.linspace(1, out_length, out_length) + + # Input-space coordinates. Calculate the inverse mapping such that 0.5 + # in output space maps to 0.5 in input space, and 0.5+scale in output + # space maps to 1.5 in input space. + u = x / scale + 0.5 * (1 - 1 / scale) + + # What is the left-most pixel that can be involved in the computation? + left = torch.floor(u - kernel_width / 2) + + # What is the maximum number of pixels that can be involved in the + # computation? Note: it's OK to use an extra pixel here; if the + # corresponding weights are all zero, it will be eliminated at the end + # of this function. + P = math.ceil(kernel_width) + 2 + + # The indices of the input pixels involved in computing the k-th output + # pixel are in row k of the indices matrix. + indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view( + 1, P).expand(out_length, P) + + # The weights used to compute the k-th output pixel are in row k of the + # weights matrix. + distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices + # apply cubic kernel + if (scale < 1) and (antialiasing): + weights = scale * cubic(distance_to_center * scale) + else: + weights = cubic(distance_to_center) + # Normalize the weights matrix so that each row sums to 1. + weights_sum = torch.sum(weights, 1).view(out_length, 1) + weights = weights / weights_sum.expand(out_length, P) + + # If a column in weights is all zero, get rid of it. only consider the first and last column. + weights_zero_tmp = torch.sum((weights == 0), 0) + if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): + indices = indices.narrow(1, 1, P - 2) + weights = weights.narrow(1, 1, P - 2) + if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): + indices = indices.narrow(1, 0, P - 2) + weights = weights.narrow(1, 0, P - 2) + weights = weights.contiguous() + indices = indices.contiguous() + sym_len_s = -indices.min() + 1 + sym_len_e = indices.max() - in_length + indices = indices + sym_len_s - 1 + return weights, indices, int(sym_len_s), int(sym_len_e) + + +# -------------------------------------------- +# imresize for tensor image [0, 1] +# -------------------------------------------- +def imresize(img, scale, antialiasing=True): + # Now the scale should be the same for H and W + # input: img: pytorch tensor, CHW or HW [0,1] + # output: CHW or HW [0,1] w/o round + need_squeeze = True if img.dim() == 2 else False + if need_squeeze: + img.unsqueeze_(0) + in_C, in_H, in_W = img.size() + out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) + kernel_width = 4 + kernel = 'cubic' + + # Return the desired dimension order for performing the resize. The + # strategy is to perform the resize first along the dimension with the + # smallest scale factor. + # Now we do not support this. + + # get weights and indices + weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( + in_H, out_H, scale, kernel, kernel_width, antialiasing) + weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( + in_W, out_W, scale, kernel, kernel_width, antialiasing) + # process H dimension + # symmetric copying + img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W) + img_aug.narrow(1, sym_len_Hs, in_H).copy_(img) + + sym_patch = img[:, :sym_len_Hs, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv) + + sym_patch = img[:, -sym_len_He:, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) + + out_1 = torch.FloatTensor(in_C, out_H, in_W) + kernel_width = weights_H.size(1) + for i in range(out_H): + idx = int(indices_H[i][0]) + for j in range(out_C): + out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i]) + + # process W dimension + # symmetric copying + out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We) + out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1) + + sym_patch = out_1[:, :, :sym_len_Ws] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv) + + sym_patch = out_1[:, :, -sym_len_We:] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) + + out_2 = torch.FloatTensor(in_C, out_H, out_W) + kernel_width = weights_W.size(1) + for i in range(out_W): + idx = int(indices_W[i][0]) + for j in range(out_C): + out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i]) + if need_squeeze: + out_2.squeeze_() + return out_2 + + +# -------------------------------------------- +# imresize for numpy image [0, 1] +# -------------------------------------------- +def imresize_np(img, scale, antialiasing=True): + # Now the scale should be the same for H and W + # input: img: Numpy, HWC or HW [0,1] + # output: HWC or HW [0,1] w/o round + img = torch.from_numpy(img) + need_squeeze = True if img.dim() == 2 else False + if need_squeeze: + img.unsqueeze_(2) + + in_H, in_W, in_C = img.size() + out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) + kernel_width = 4 + kernel = 'cubic' + + # Return the desired dimension order for performing the resize. The + # strategy is to perform the resize first along the dimension with the + # smallest scale factor. + # Now we do not support this. + + # get weights and indices + weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( + in_H, out_H, scale, kernel, kernel_width, antialiasing) + weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( + in_W, out_W, scale, kernel, kernel_width, antialiasing) + # process H dimension + # symmetric copying + img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C) + img_aug.narrow(0, sym_len_Hs, in_H).copy_(img) + + sym_patch = img[:sym_len_Hs, :, :] + inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(0, inv_idx) + img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv) + + sym_patch = img[-sym_len_He:, :, :] + inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(0, inv_idx) + img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) + + out_1 = torch.FloatTensor(out_H, in_W, in_C) + kernel_width = weights_H.size(1) + for i in range(out_H): + idx = int(indices_H[i][0]) + for j in range(out_C): + out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i]) + + # process W dimension + # symmetric copying + out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C) + out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1) + + sym_patch = out_1[:, :sym_len_Ws, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv) + + sym_patch = out_1[:, -sym_len_We:, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) + + out_2 = torch.FloatTensor(out_H, out_W, in_C) + kernel_width = weights_W.size(1) + for i in range(out_W): + idx = int(indices_W[i][0]) + for j in range(out_C): + out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i]) + if need_squeeze: + out_2.squeeze_() + + return out_2.numpy() + + +if __name__ == '__main__': + print('---') +# img = imread_uint('test.bmp', 3) +# img = uint2single(img) +# img_bicubic = imresize_np(img, 1/4) \ No newline at end of file diff --git a/stable-diffusion/ldm/modules/losses/__init__.py b/stable-diffusion/ldm/modules/losses/__init__.py new file mode 100644 index 0000000..876d7c5 --- /dev/null +++ b/stable-diffusion/ldm/modules/losses/__init__.py @@ -0,0 +1 @@ +from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator \ No newline at end of file diff --git a/stable-diffusion/ldm/modules/losses/contperceptual.py b/stable-diffusion/ldm/modules/losses/contperceptual.py new file mode 100644 index 0000000..672c1e3 --- /dev/null +++ b/stable-diffusion/ldm/modules/losses/contperceptual.py @@ -0,0 +1,111 @@ +import torch +import torch.nn as nn + +from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no? + + +class LPIPSWithDiscriminator(nn.Module): + def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, + disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, + perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, + disc_loss="hinge"): + + super().__init__() + assert disc_loss in ["hinge", "vanilla"] + self.kl_weight = kl_weight + self.pixel_weight = pixelloss_weight + self.perceptual_loss = LPIPS().eval() + self.perceptual_weight = perceptual_weight + # output log variance + self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) + + self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, + n_layers=disc_num_layers, + use_actnorm=use_actnorm + ).apply(weights_init) + self.discriminator_iter_start = disc_start + self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss + self.disc_factor = disc_factor + self.discriminator_weight = disc_weight + self.disc_conditional = disc_conditional + + def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): + if last_layer is not None: + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] + + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() + d_weight = d_weight * self.discriminator_weight + return d_weight + + def forward(self, inputs, reconstructions, posteriors, optimizer_idx, + global_step, last_layer=None, cond=None, split="train", + weights=None): + rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) + if self.perceptual_weight > 0: + p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) + rec_loss = rec_loss + self.perceptual_weight * p_loss + + nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar + weighted_nll_loss = nll_loss + if weights is not None: + weighted_nll_loss = weights*nll_loss + weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] + nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] + kl_loss = posteriors.kl() + kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] + + # now the GAN part + if optimizer_idx == 0: + # generator update + if cond is None: + assert not self.disc_conditional + logits_fake = self.discriminator(reconstructions.contiguous()) + else: + assert self.disc_conditional + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) + g_loss = -torch.mean(logits_fake) + + if self.disc_factor > 0.0: + try: + d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) + except RuntimeError: + assert not self.training + d_weight = torch.tensor(0.0) + else: + d_weight = torch.tensor(0.0) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss + + log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(), + "{}/kl_loss".format(split): kl_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), + } + return loss, log + + if optimizer_idx == 1: + # second pass for discriminator update + if cond is None: + logits_real = self.discriminator(inputs.contiguous().detach()) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) + else: + logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) + + log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean() + } + return d_loss, log + diff --git a/stable-diffusion/ldm/modules/losses/vqperceptual.py b/stable-diffusion/ldm/modules/losses/vqperceptual.py new file mode 100644 index 0000000..f699817 --- /dev/null +++ b/stable-diffusion/ldm/modules/losses/vqperceptual.py @@ -0,0 +1,167 @@ +import torch +from torch import nn +import torch.nn.functional as F +from einops import repeat + +from taming.modules.discriminator.model import NLayerDiscriminator, weights_init +from taming.modules.losses.lpips import LPIPS +from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss + + +def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights): + assert weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0] + loss_real = torch.mean(F.relu(1. - logits_real), dim=[1,2,3]) + loss_fake = torch.mean(F.relu(1. + logits_fake), dim=[1,2,3]) + loss_real = (weights * loss_real).sum() / weights.sum() + loss_fake = (weights * loss_fake).sum() / weights.sum() + d_loss = 0.5 * (loss_real + loss_fake) + return d_loss + +def adopt_weight(weight, global_step, threshold=0, value=0.): + if global_step < threshold: + weight = value + return weight + + +def measure_perplexity(predicted_indices, n_embed): + # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py + # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally + encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed) + avg_probs = encodings.mean(0) + perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp() + cluster_use = torch.sum(avg_probs > 0) + return perplexity, cluster_use + +def l1(x, y): + return torch.abs(x-y) + + +def l2(x, y): + return torch.pow((x-y), 2) + + +class VQLPIPSWithDiscriminator(nn.Module): + def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, + disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, + perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, + disc_ndf=64, disc_loss="hinge", n_classes=None, perceptual_loss="lpips", + pixel_loss="l1"): + super().__init__() + assert disc_loss in ["hinge", "vanilla"] + assert perceptual_loss in ["lpips", "clips", "dists"] + assert pixel_loss in ["l1", "l2"] + self.codebook_weight = codebook_weight + self.pixel_weight = pixelloss_weight + if perceptual_loss == "lpips": + print(f"{self.__class__.__name__}: Running with LPIPS.") + self.perceptual_loss = LPIPS().eval() + else: + raise ValueError(f"Unknown perceptual loss: >> {perceptual_loss} <<") + self.perceptual_weight = perceptual_weight + + if pixel_loss == "l1": + self.pixel_loss = l1 + else: + self.pixel_loss = l2 + + self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, + n_layers=disc_num_layers, + use_actnorm=use_actnorm, + ndf=disc_ndf + ).apply(weights_init) + self.discriminator_iter_start = disc_start + if disc_loss == "hinge": + self.disc_loss = hinge_d_loss + elif disc_loss == "vanilla": + self.disc_loss = vanilla_d_loss + else: + raise ValueError(f"Unknown GAN loss '{disc_loss}'.") + print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.") + self.disc_factor = disc_factor + self.discriminator_weight = disc_weight + self.disc_conditional = disc_conditional + self.n_classes = n_classes + + def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): + if last_layer is not None: + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] + + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() + d_weight = d_weight * self.discriminator_weight + return d_weight + + def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, + global_step, last_layer=None, cond=None, split="train", predicted_indices=None): + if not exists(codebook_loss): + codebook_loss = torch.tensor([0.]).to(inputs.device) + #rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) + rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous()) + if self.perceptual_weight > 0: + p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) + rec_loss = rec_loss + self.perceptual_weight * p_loss + else: + p_loss = torch.tensor([0.0]) + + nll_loss = rec_loss + #nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] + nll_loss = torch.mean(nll_loss) + + # now the GAN part + if optimizer_idx == 0: + # generator update + if cond is None: + assert not self.disc_conditional + logits_fake = self.discriminator(reconstructions.contiguous()) + else: + assert self.disc_conditional + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) + g_loss = -torch.mean(logits_fake) + + try: + d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) + except RuntimeError: + assert not self.training + d_weight = torch.tensor(0.0) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean() + + log = {"{}/total_loss".format(split): loss.clone().detach().mean(), + "{}/quant_loss".format(split): codebook_loss.detach().mean(), + "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/p_loss".format(split): p_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), + } + if predicted_indices is not None: + assert self.n_classes is not None + with torch.no_grad(): + perplexity, cluster_usage = measure_perplexity(predicted_indices, self.n_classes) + log[f"{split}/perplexity"] = perplexity + log[f"{split}/cluster_usage"] = cluster_usage + return loss, log + + if optimizer_idx == 1: + # second pass for discriminator update + if cond is None: + logits_real = self.discriminator(inputs.contiguous().detach()) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) + else: + logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) + + log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean() + } + return d_loss, log diff --git a/stable-diffusion/ldm/modules/x_transformer.py b/stable-diffusion/ldm/modules/x_transformer.py new file mode 100644 index 0000000..5fc15bf --- /dev/null +++ b/stable-diffusion/ldm/modules/x_transformer.py @@ -0,0 +1,641 @@ +"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" +import torch +from torch import nn, einsum +import torch.nn.functional as F +from functools import partial +from inspect import isfunction +from collections import namedtuple +from einops import rearrange, repeat, reduce + +# constants + +DEFAULT_DIM_HEAD = 64 + +Intermediates = namedtuple('Intermediates', [ + 'pre_softmax_attn', + 'post_softmax_attn' +]) + +LayerIntermediates = namedtuple('Intermediates', [ + 'hiddens', + 'attn_intermediates' +]) + + +class AbsolutePositionalEmbedding(nn.Module): + def __init__(self, dim, max_seq_len): + super().__init__() + self.emb = nn.Embedding(max_seq_len, dim) + self.init_() + + def init_(self): + nn.init.normal_(self.emb.weight, std=0.02) + + def forward(self, x): + n = torch.arange(x.shape[1], device=x.device) + return self.emb(n)[None, :, :] + + +class FixedPositionalEmbedding(nn.Module): + def __init__(self, dim): + super().__init__() + inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) + self.register_buffer('inv_freq', inv_freq) + + def forward(self, x, seq_dim=1, offset=0): + t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset + sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) + emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) + return emb[None, :, :] + + +# helpers + +def exists(val): + return val is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def always(val): + def inner(*args, **kwargs): + return val + return inner + + +def not_equals(val): + def inner(x): + return x != val + return inner + + +def equals(val): + def inner(x): + return x == val + return inner + + +def max_neg_value(tensor): + return -torch.finfo(tensor.dtype).max + + +# keyword argument helpers + +def pick_and_pop(keys, d): + values = list(map(lambda key: d.pop(key), keys)) + return dict(zip(keys, values)) + + +def group_dict_by_key(cond, d): + return_val = [dict(), dict()] + for key in d.keys(): + match = bool(cond(key)) + ind = int(not match) + return_val[ind][key] = d[key] + return (*return_val,) + + +def string_begins_with(prefix, str): + return str.startswith(prefix) + + +def group_by_key_prefix(prefix, d): + return group_dict_by_key(partial(string_begins_with, prefix), d) + + +def groupby_prefix_and_trim(prefix, d): + kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) + kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) + return kwargs_without_prefix, kwargs + + +# classes +class Scale(nn.Module): + def __init__(self, value, fn): + super().__init__() + self.value = value + self.fn = fn + + def forward(self, x, **kwargs): + x, *rest = self.fn(x, **kwargs) + return (x * self.value, *rest) + + +class Rezero(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + self.g = nn.Parameter(torch.zeros(1)) + + def forward(self, x, **kwargs): + x, *rest = self.fn(x, **kwargs) + return (x * self.g, *rest) + + +class ScaleNorm(nn.Module): + def __init__(self, dim, eps=1e-5): + super().__init__() + self.scale = dim ** -0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(1)) + + def forward(self, x): + norm = torch.norm(x, dim=-1, keepdim=True) * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class RMSNorm(nn.Module): + def __init__(self, dim, eps=1e-8): + super().__init__() + self.scale = dim ** -0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + norm = torch.norm(x, dim=-1, keepdim=True) * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class Residual(nn.Module): + def forward(self, x, residual): + return x + residual + + +class GRUGating(nn.Module): + def __init__(self, dim): + super().__init__() + self.gru = nn.GRUCell(dim, dim) + + def forward(self, x, residual): + gated_output = self.gru( + rearrange(x, 'b n d -> (b n) d'), + rearrange(residual, 'b n d -> (b n) d') + ) + + return gated_output.reshape_as(x) + + +# feedforward + +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = nn.Sequential( + nn.Linear(dim, inner_dim), + nn.GELU() + ) if not glu else GEGLU(dim, inner_dim) + + self.net = nn.Sequential( + project_in, + nn.Dropout(dropout), + nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +# attention. +class Attention(nn.Module): + def __init__( + self, + dim, + dim_head=DEFAULT_DIM_HEAD, + heads=8, + causal=False, + mask=None, + talking_heads=False, + sparse_topk=None, + use_entmax15=False, + num_mem_kv=0, + dropout=0., + on_attn=False + ): + super().__init__() + if use_entmax15: + raise NotImplementedError("Check out entmax activation instead of softmax activation!") + self.scale = dim_head ** -0.5 + self.heads = heads + self.causal = causal + self.mask = mask + + inner_dim = dim_head * heads + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_k = nn.Linear(dim, inner_dim, bias=False) + self.to_v = nn.Linear(dim, inner_dim, bias=False) + self.dropout = nn.Dropout(dropout) + + # talking heads + self.talking_heads = talking_heads + if talking_heads: + self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) + self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) + + # explicit topk sparse attention + self.sparse_topk = sparse_topk + + # entmax + #self.attn_fn = entmax15 if use_entmax15 else F.softmax + self.attn_fn = F.softmax + + # add memory key / values + self.num_mem_kv = num_mem_kv + if num_mem_kv > 0: + self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) + self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) + + # attention on attention + self.attn_on_attn = on_attn + self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim) + + def forward( + self, + x, + context=None, + mask=None, + context_mask=None, + rel_pos=None, + sinusoidal_emb=None, + prev_attn=None, + mem=None + ): + b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device + kv_input = default(context, x) + + q_input = x + k_input = kv_input + v_input = kv_input + + if exists(mem): + k_input = torch.cat((mem, k_input), dim=-2) + v_input = torch.cat((mem, v_input), dim=-2) + + if exists(sinusoidal_emb): + # in shortformer, the query would start at a position offset depending on the past cached memory + offset = k_input.shape[-2] - q_input.shape[-2] + q_input = q_input + sinusoidal_emb(q_input, offset=offset) + k_input = k_input + sinusoidal_emb(k_input) + + q = self.to_q(q_input) + k = self.to_k(k_input) + v = self.to_v(v_input) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) + + input_mask = None + if any(map(exists, (mask, context_mask))): + q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) + k_mask = q_mask if not exists(context) else context_mask + k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) + q_mask = rearrange(q_mask, 'b i -> b () i ()') + k_mask = rearrange(k_mask, 'b j -> b () () j') + input_mask = q_mask * k_mask + + if self.num_mem_kv > 0: + mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) + k = torch.cat((mem_k, k), dim=-2) + v = torch.cat((mem_v, v), dim=-2) + if exists(input_mask): + input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) + + dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale + mask_value = max_neg_value(dots) + + if exists(prev_attn): + dots = dots + prev_attn + + pre_softmax_attn = dots + + if talking_heads: + dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() + + if exists(rel_pos): + dots = rel_pos(dots) + + if exists(input_mask): + dots.masked_fill_(~input_mask, mask_value) + del input_mask + + if self.causal: + i, j = dots.shape[-2:] + r = torch.arange(i, device=device) + mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') + mask = F.pad(mask, (j - i, 0), value=False) + dots.masked_fill_(mask, mask_value) + del mask + + if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: + top, _ = dots.topk(self.sparse_topk, dim=-1) + vk = top[..., -1].unsqueeze(-1).expand_as(dots) + mask = dots < vk + dots.masked_fill_(mask, mask_value) + del mask + + attn = self.attn_fn(dots, dim=-1) + post_softmax_attn = attn + + attn = self.dropout(attn) + + if talking_heads: + attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() + + out = einsum('b h i j, b h j d -> b h i d', attn, v) + out = rearrange(out, 'b h n d -> b n (h d)') + + intermediates = Intermediates( + pre_softmax_attn=pre_softmax_attn, + post_softmax_attn=post_softmax_attn + ) + + return self.to_out(out), intermediates + + +class AttentionLayers(nn.Module): + def __init__( + self, + dim, + depth, + heads=8, + causal=False, + cross_attend=False, + only_cross=False, + use_scalenorm=False, + use_rmsnorm=False, + use_rezero=False, + rel_pos_num_buckets=32, + rel_pos_max_distance=128, + position_infused_attn=False, + custom_layers=None, + sandwich_coef=None, + par_ratio=None, + residual_attn=False, + cross_residual_attn=False, + macaron=False, + pre_norm=True, + gate_residual=False, + **kwargs + ): + super().__init__() + ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) + attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) + + dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) + + self.dim = dim + self.depth = depth + self.layers = nn.ModuleList([]) + + self.has_pos_emb = position_infused_attn + self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None + self.rotary_pos_emb = always(None) + + assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' + self.rel_pos = None + + self.pre_norm = pre_norm + + self.residual_attn = residual_attn + self.cross_residual_attn = cross_residual_attn + + norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm + norm_class = RMSNorm if use_rmsnorm else norm_class + norm_fn = partial(norm_class, dim) + + norm_fn = nn.Identity if use_rezero else norm_fn + branch_fn = Rezero if use_rezero else None + + if cross_attend and not only_cross: + default_block = ('a', 'c', 'f') + elif cross_attend and only_cross: + default_block = ('c', 'f') + else: + default_block = ('a', 'f') + + if macaron: + default_block = ('f',) + default_block + + if exists(custom_layers): + layer_types = custom_layers + elif exists(par_ratio): + par_depth = depth * len(default_block) + assert 1 < par_ratio <= par_depth, 'par ratio out of range' + default_block = tuple(filter(not_equals('f'), default_block)) + par_attn = par_depth // par_ratio + depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper + par_width = (depth_cut + depth_cut // par_attn) // par_attn + assert len(default_block) <= par_width, 'default block is too large for par_ratio' + par_block = default_block + ('f',) * (par_width - len(default_block)) + par_head = par_block * par_attn + layer_types = par_head + ('f',) * (par_depth - len(par_head)) + elif exists(sandwich_coef): + assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' + layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef + else: + layer_types = default_block * depth + + self.layer_types = layer_types + self.num_attn_layers = len(list(filter(equals('a'), layer_types))) + + for layer_type in self.layer_types: + if layer_type == 'a': + layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) + elif layer_type == 'c': + layer = Attention(dim, heads=heads, **attn_kwargs) + elif layer_type == 'f': + layer = FeedForward(dim, **ff_kwargs) + layer = layer if not macaron else Scale(0.5, layer) + else: + raise Exception(f'invalid layer type {layer_type}') + + if isinstance(layer, Attention) and exists(branch_fn): + layer = branch_fn(layer) + + if gate_residual: + residual_fn = GRUGating(dim) + else: + residual_fn = Residual() + + self.layers.append(nn.ModuleList([ + norm_fn(), + layer, + residual_fn + ])) + + def forward( + self, + x, + context=None, + mask=None, + context_mask=None, + mems=None, + return_hiddens=False + ): + hiddens = [] + intermediates = [] + prev_attn = None + prev_cross_attn = None + + mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers + + for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): + is_last = ind == (len(self.layers) - 1) + + if layer_type == 'a': + hiddens.append(x) + layer_mem = mems.pop(0) + + residual = x + + if self.pre_norm: + x = norm(x) + + if layer_type == 'a': + out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos, + prev_attn=prev_attn, mem=layer_mem) + elif layer_type == 'c': + out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn) + elif layer_type == 'f': + out = block(x) + + x = residual_fn(out, residual) + + if layer_type in ('a', 'c'): + intermediates.append(inter) + + if layer_type == 'a' and self.residual_attn: + prev_attn = inter.pre_softmax_attn + elif layer_type == 'c' and self.cross_residual_attn: + prev_cross_attn = inter.pre_softmax_attn + + if not self.pre_norm and not is_last: + x = norm(x) + + if return_hiddens: + intermediates = LayerIntermediates( + hiddens=hiddens, + attn_intermediates=intermediates + ) + + return x, intermediates + + return x + + +class Encoder(AttentionLayers): + def __init__(self, **kwargs): + assert 'causal' not in kwargs, 'cannot set causality on encoder' + super().__init__(causal=False, **kwargs) + + + +class TransformerWrapper(nn.Module): + def __init__( + self, + *, + num_tokens, + max_seq_len, + attn_layers, + emb_dim=None, + max_mem_len=0., + emb_dropout=0., + num_memory_tokens=None, + tie_embedding=False, + use_pos_emb=True + ): + super().__init__() + assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' + + dim = attn_layers.dim + emb_dim = default(emb_dim, dim) + + self.max_seq_len = max_seq_len + self.max_mem_len = max_mem_len + self.num_tokens = num_tokens + + self.token_emb = nn.Embedding(num_tokens, emb_dim) + self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( + use_pos_emb and not attn_layers.has_pos_emb) else always(0) + self.emb_dropout = nn.Dropout(emb_dropout) + + self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() + self.attn_layers = attn_layers + self.norm = nn.LayerNorm(dim) + + self.init_() + + self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() + + # memory tokens (like [cls]) from Memory Transformers paper + num_memory_tokens = default(num_memory_tokens, 0) + self.num_memory_tokens = num_memory_tokens + if num_memory_tokens > 0: + self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) + + # let funnel encoder know number of memory tokens, if specified + if hasattr(attn_layers, 'num_memory_tokens'): + attn_layers.num_memory_tokens = num_memory_tokens + + def init_(self): + nn.init.normal_(self.token_emb.weight, std=0.02) + + def forward( + self, + x, + return_embeddings=False, + mask=None, + return_mems=False, + return_attn=False, + mems=None, + **kwargs + ): + b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens + x = self.token_emb(x) + x += self.pos_emb(x) + x = self.emb_dropout(x) + + x = self.project_emb(x) + + if num_mem > 0: + mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) + x = torch.cat((mem, x), dim=1) + + # auto-handle masking after appending memory tokens + if exists(mask): + mask = F.pad(mask, (num_mem, 0), value=True) + + x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) + x = self.norm(x) + + mem, x = x[:, :num_mem], x[:, num_mem:] + + out = self.to_logits(x) if not return_embeddings else x + + if return_mems: + hiddens = intermediates.hiddens + new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens + new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) + return out, new_mems + + if return_attn: + attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) + return out, attn_maps + + return out + diff --git a/stable-diffusion/ldm/util.py b/stable-diffusion/ldm/util.py new file mode 100644 index 0000000..8ba3885 --- /dev/null +++ b/stable-diffusion/ldm/util.py @@ -0,0 +1,203 @@ +import importlib + +import torch +import numpy as np +from collections import abc +from einops import rearrange +from functools import partial + +import multiprocessing as mp +from threading import Thread +from queue import Queue + +from inspect import isfunction +from PIL import Image, ImageDraw, ImageFont + + +def log_txt_as_img(wh, xc, size=10): + # wh a tuple of (width, height) + # xc a list of captions to plot + b = len(xc) + txts = list() + for bi in range(b): + txt = Image.new("RGB", wh, color="white") + draw = ImageDraw.Draw(txt) + font = ImageFont.truetype('data/DejaVuSans.ttf', size=size) + nc = int(40 * (wh[0] / 256)) + lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) + + try: + draw.text((0, 0), lines, fill="black", font=font) + except UnicodeEncodeError: + print("Cant encode string for logging. Skipping.") + + txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 + txts.append(txt) + txts = np.stack(txts) + txts = torch.tensor(txts) + return txts + + +def ismap(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] > 3) + + +def isimage(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def mean_flat(tensor): + """ + https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def count_params(model, verbose=False): + total_params = sum(p.numel() for p in model.parameters()) + if verbose: + print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.") + return total_params + + +def instantiate_from_config(config): + if not "target" in config: + if config == '__is_first_stage__': + return None + elif config == "__is_unconditional__": + return None + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", dict())) + + +def get_obj_from_str(string, reload=False): + module, cls = string.rsplit(".", 1) + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False): + # create dummy dataset instance + + # run prefetching + if idx_to_fn: + res = func(data, worker_id=idx) + else: + res = func(data) + Q.put([idx, res]) + Q.put("Done") + + +def parallel_data_prefetch( + func: callable, data, n_proc, target_data_type="ndarray", cpu_intensive=True, use_worker_id=False +): + # if target_data_type not in ["ndarray", "list"]: + # raise ValueError( + # "Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray." + # ) + if isinstance(data, np.ndarray) and target_data_type == "list": + raise ValueError("list expected but function got ndarray.") + elif isinstance(data, abc.Iterable): + if isinstance(data, dict): + print( + f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.' + ) + data = list(data.values()) + if target_data_type == "ndarray": + data = np.asarray(data) + else: + data = list(data) + else: + raise TypeError( + f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}." + ) + + if cpu_intensive: + Q = mp.Queue(1000) + proc = mp.Process + else: + Q = Queue(1000) + proc = Thread + # spawn processes + if target_data_type == "ndarray": + arguments = [ + [func, Q, part, i, use_worker_id] + for i, part in enumerate(np.array_split(data, n_proc)) + ] + else: + step = ( + int(len(data) / n_proc + 1) + if len(data) % n_proc != 0 + else int(len(data) / n_proc) + ) + arguments = [ + [func, Q, part, i, use_worker_id] + for i, part in enumerate( + [data[i: i + step] for i in range(0, len(data), step)] + ) + ] + processes = [] + for i in range(n_proc): + p = proc(target=_do_parallel_data_prefetch, args=arguments[i]) + processes += [p] + + # start processes + print(f"Start prefetching...") + import time + + start = time.time() + gather_res = [[] for _ in range(n_proc)] + try: + for p in processes: + p.start() + + k = 0 + while k < n_proc: + # get result + res = Q.get() + if res == "Done": + k += 1 + else: + gather_res[res[0]] = res[1] + + except Exception as e: + print("Exception: ", e) + for p in processes: + p.terminate() + + raise e + finally: + for p in processes: + p.join() + print(f"Prefetching complete. [{time.time() - start} sec.]") + + if target_data_type == 'ndarray': + if not isinstance(gather_res[0], np.ndarray): + return np.concatenate([np.asarray(r) for r in gather_res], axis=0) + + # order outputs + return np.concatenate(gather_res, axis=0) + elif target_data_type == 'list': + out = [] + for r in gather_res: + out.extend(r) + return out + else: + return gather_res diff --git a/stable-diffusion/main.py b/stable-diffusion/main.py new file mode 100644 index 0000000..e8e18c1 --- /dev/null +++ b/stable-diffusion/main.py @@ -0,0 +1,741 @@ +import argparse, os, sys, datetime, glob, importlib, csv +import numpy as np +import time +import torch +import torchvision +import pytorch_lightning as pl + +from packaging import version +from omegaconf import OmegaConf +from torch.utils.data import random_split, DataLoader, Dataset, Subset +from functools import partial +from PIL import Image + +from pytorch_lightning import seed_everything +from pytorch_lightning.trainer import Trainer +from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor +from pytorch_lightning.utilities.distributed import rank_zero_only +from pytorch_lightning.utilities import rank_zero_info + +from ldm.data.base import Txt2ImgIterableBaseDataset +from ldm.util import instantiate_from_config + + +def get_parser(**parser_kwargs): + def str2bool(v): + if isinstance(v, bool): + return v + if v.lower() in ("yes", "true", "t", "y", "1"): + return True + elif v.lower() in ("no", "false", "f", "n", "0"): + return False + else: + raise argparse.ArgumentTypeError("Boolean value expected.") + + parser = argparse.ArgumentParser(**parser_kwargs) + parser.add_argument( + "-n", + "--name", + type=str, + const=True, + default="", + nargs="?", + help="postfix for logdir", + ) + parser.add_argument( + "-r", + "--resume", + type=str, + const=True, + default="", + nargs="?", + help="resume from logdir or checkpoint in logdir", + ) + parser.add_argument( + "-b", + "--base", + nargs="*", + metavar="base_config.yaml", + help="paths to base configs. Loaded from left-to-right. " + "Parameters can be overwritten or added with command-line options of the form `--key value`.", + default=list(), + ) + parser.add_argument( + "-t", + "--train", + type=str2bool, + const=True, + default=False, + nargs="?", + help="train", + ) + parser.add_argument( + "--no-test", + type=str2bool, + const=True, + default=False, + nargs="?", + help="disable test", + ) + parser.add_argument( + "-p", + "--project", + help="name of new or path to existing project" + ) + parser.add_argument( + "-d", + "--debug", + type=str2bool, + nargs="?", + const=True, + default=False, + help="enable post-mortem debugging", + ) + parser.add_argument( + "-s", + "--seed", + type=int, + default=23, + help="seed for seed_everything", + ) + parser.add_argument( + "-f", + "--postfix", + type=str, + default="", + help="post-postfix for default name", + ) + parser.add_argument( + "-l", + "--logdir", + type=str, + default="logs", + help="directory for logging dat shit", + ) + parser.add_argument( + "--scale_lr", + type=str2bool, + nargs="?", + const=True, + default=True, + help="scale base-lr by ngpu * batch_size * n_accumulate", + ) + return parser + + +def nondefault_trainer_args(opt): + parser = argparse.ArgumentParser() + parser = Trainer.add_argparse_args(parser) + args = parser.parse_args([]) + return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k)) + + +class WrappedDataset(Dataset): + """Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset""" + + def __init__(self, dataset): + self.data = dataset + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + return self.data[idx] + + +def worker_init_fn(_): + worker_info = torch.utils.data.get_worker_info() + + dataset = worker_info.dataset + worker_id = worker_info.id + + if isinstance(dataset, Txt2ImgIterableBaseDataset): + split_size = dataset.num_records // worker_info.num_workers + # reset num_records to the true number to retain reliable length information + dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size] + current_id = np.random.choice(len(np.random.get_state()[1]), 1) + return np.random.seed(np.random.get_state()[1][current_id] + worker_id) + else: + return np.random.seed(np.random.get_state()[1][0] + worker_id) + + +class DataModuleFromConfig(pl.LightningDataModule): + def __init__(self, batch_size, train=None, validation=None, test=None, predict=None, + wrap=False, num_workers=None, shuffle_test_loader=False, use_worker_init_fn=False, + shuffle_val_dataloader=False): + super().__init__() + self.batch_size = batch_size + self.dataset_configs = dict() + self.num_workers = num_workers if num_workers is not None else batch_size * 2 + self.use_worker_init_fn = use_worker_init_fn + if train is not None: + self.dataset_configs["train"] = train + self.train_dataloader = self._train_dataloader + if validation is not None: + self.dataset_configs["validation"] = validation + self.val_dataloader = partial(self._val_dataloader, shuffle=shuffle_val_dataloader) + if test is not None: + self.dataset_configs["test"] = test + self.test_dataloader = partial(self._test_dataloader, shuffle=shuffle_test_loader) + if predict is not None: + self.dataset_configs["predict"] = predict + self.predict_dataloader = self._predict_dataloader + self.wrap = wrap + + def prepare_data(self): + for data_cfg in self.dataset_configs.values(): + instantiate_from_config(data_cfg) + + def setup(self, stage=None): + self.datasets = dict( + (k, instantiate_from_config(self.dataset_configs[k])) + for k in self.dataset_configs) + if self.wrap: + for k in self.datasets: + self.datasets[k] = WrappedDataset(self.datasets[k]) + + def _train_dataloader(self): + is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset) + if is_iterable_dataset or self.use_worker_init_fn: + init_fn = worker_init_fn + else: + init_fn = None + return DataLoader(self.datasets["train"], batch_size=self.batch_size, + num_workers=self.num_workers, shuffle=False if is_iterable_dataset else True, + worker_init_fn=init_fn) + + def _val_dataloader(self, shuffle=False): + if isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn: + init_fn = worker_init_fn + else: + init_fn = None + return DataLoader(self.datasets["validation"], + batch_size=self.batch_size, + num_workers=self.num_workers, + worker_init_fn=init_fn, + shuffle=shuffle) + + def _test_dataloader(self, shuffle=False): + is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset) + if is_iterable_dataset or self.use_worker_init_fn: + init_fn = worker_init_fn + else: + init_fn = None + + # do not shuffle dataloader for iterable dataset + shuffle = shuffle and (not is_iterable_dataset) + + return DataLoader(self.datasets["test"], batch_size=self.batch_size, + num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle) + + def _predict_dataloader(self, shuffle=False): + if isinstance(self.datasets['predict'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn: + init_fn = worker_init_fn + else: + init_fn = None + return DataLoader(self.datasets["predict"], batch_size=self.batch_size, + num_workers=self.num_workers, worker_init_fn=init_fn) + + +class SetupCallback(Callback): + def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config): + super().__init__() + self.resume = resume + self.now = now + self.logdir = logdir + self.ckptdir = ckptdir + self.cfgdir = cfgdir + self.config = config + self.lightning_config = lightning_config + + def on_keyboard_interrupt(self, trainer, pl_module): + if trainer.global_rank == 0: + print("Summoning checkpoint.") + ckpt_path = os.path.join(self.ckptdir, "last.ckpt") + trainer.save_checkpoint(ckpt_path) + + def on_pretrain_routine_start(self, trainer, pl_module): + if trainer.global_rank == 0: + # Create logdirs and save configs + os.makedirs(self.logdir, exist_ok=True) + os.makedirs(self.ckptdir, exist_ok=True) + os.makedirs(self.cfgdir, exist_ok=True) + + if "callbacks" in self.lightning_config: + if 'metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']: + os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True) + print("Project config") + print(OmegaConf.to_yaml(self.config)) + OmegaConf.save(self.config, + os.path.join(self.cfgdir, "{}-project.yaml".format(self.now))) + + print("Lightning config") + print(OmegaConf.to_yaml(self.lightning_config)) + OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}), + os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now))) + + else: + # ModelCheckpoint callback created log directory --- remove it + if not self.resume and os.path.exists(self.logdir): + dst, name = os.path.split(self.logdir) + dst = os.path.join(dst, "child_runs", name) + os.makedirs(os.path.split(dst)[0], exist_ok=True) + try: + os.rename(self.logdir, dst) + except FileNotFoundError: + pass + + +class ImageLogger(Callback): + def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True, + rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False, + log_images_kwargs=None): + super().__init__() + self.rescale = rescale + self.batch_freq = batch_frequency + self.max_images = max_images + self.logger_log_images = { + pl.loggers.TestTubeLogger: self._testtube, + } + self.log_steps = [2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)] + if not increase_log_steps: + self.log_steps = [self.batch_freq] + self.clamp = clamp + self.disabled = disabled + self.log_on_batch_idx = log_on_batch_idx + self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {} + self.log_first_step = log_first_step + + @rank_zero_only + def _testtube(self, pl_module, images, batch_idx, split): + for k in images: + grid = torchvision.utils.make_grid(images[k]) + grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w + + tag = f"{split}/{k}" + pl_module.logger.experiment.add_image( + tag, grid, + global_step=pl_module.global_step) + + @rank_zero_only + def log_local(self, save_dir, split, images, + global_step, current_epoch, batch_idx): + root = os.path.join(save_dir, "images", split) + for k in images: + grid = torchvision.utils.make_grid(images[k], nrow=4) + if self.rescale: + grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w + grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1) + grid = grid.numpy() + grid = (grid * 255).astype(np.uint8) + filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format( + k, + global_step, + current_epoch, + batch_idx) + path = os.path.join(root, filename) + os.makedirs(os.path.split(path)[0], exist_ok=True) + Image.fromarray(grid).save(path) + + def log_img(self, pl_module, batch, batch_idx, split="train"): + check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step + if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0 + hasattr(pl_module, "log_images") and + callable(pl_module.log_images) and + self.max_images > 0): + logger = type(pl_module.logger) + + is_train = pl_module.training + if is_train: + pl_module.eval() + + with torch.no_grad(): + images = pl_module.log_images(batch, split=split, **self.log_images_kwargs) + + for k in images: + N = min(images[k].shape[0], self.max_images) + images[k] = images[k][:N] + if isinstance(images[k], torch.Tensor): + images[k] = images[k].detach().cpu() + if self.clamp: + images[k] = torch.clamp(images[k], -1., 1.) + + self.log_local(pl_module.logger.save_dir, split, images, + pl_module.global_step, pl_module.current_epoch, batch_idx) + + logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None) + logger_log_images(pl_module, images, pl_module.global_step, split) + + if is_train: + pl_module.train() + + def check_frequency(self, check_idx): + if ((check_idx % self.batch_freq) == 0 or (check_idx in self.log_steps)) and ( + check_idx > 0 or self.log_first_step): + try: + self.log_steps.pop(0) + except IndexError as e: + print(e) + pass + return True + return False + + def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): + if not self.disabled and (pl_module.global_step > 0 or self.log_first_step): + self.log_img(pl_module, batch, batch_idx, split="train") + + def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): + if not self.disabled and pl_module.global_step > 0: + self.log_img(pl_module, batch, batch_idx, split="val") + if hasattr(pl_module, 'calibrate_grad_norm'): + if (pl_module.calibrate_grad_norm and batch_idx % 25 == 0) and batch_idx > 0: + self.log_gradients(trainer, pl_module, batch_idx=batch_idx) + + +class CUDACallback(Callback): + # see https://github.com/SeanNaren/minGPT/blob/master/mingpt/callback.py + def on_train_epoch_start(self, trainer, pl_module): + # Reset the memory use counter + torch.cuda.reset_peak_memory_stats(trainer.root_gpu) + torch.cuda.synchronize(trainer.root_gpu) + self.start_time = time.time() + + def on_train_epoch_end(self, trainer, pl_module, outputs): + torch.cuda.synchronize(trainer.root_gpu) + max_memory = torch.cuda.max_memory_allocated(trainer.root_gpu) / 2 ** 20 + epoch_time = time.time() - self.start_time + + try: + max_memory = trainer.training_type_plugin.reduce(max_memory) + epoch_time = trainer.training_type_plugin.reduce(epoch_time) + + rank_zero_info(f"Average Epoch time: {epoch_time:.2f} seconds") + rank_zero_info(f"Average Peak memory {max_memory:.2f}MiB") + except AttributeError: + pass + + +if __name__ == "__main__": + # custom parser to specify config files, train, test and debug mode, + # postfix, resume. + # `--key value` arguments are interpreted as arguments to the trainer. + # `nested.key=value` arguments are interpreted as config parameters. + # configs are merged from left-to-right followed by command line parameters. + + # model: + # base_learning_rate: float + # target: path to lightning module + # params: + # key: value + # data: + # target: main.DataModuleFromConfig + # params: + # batch_size: int + # wrap: bool + # train: + # target: path to train dataset + # params: + # key: value + # validation: + # target: path to validation dataset + # params: + # key: value + # test: + # target: path to test dataset + # params: + # key: value + # lightning: (optional, has sane defaults and can be specified on cmdline) + # trainer: + # additional arguments to trainer + # logger: + # logger to instantiate + # modelcheckpoint: + # modelcheckpoint to instantiate + # callbacks: + # callback1: + # target: importpath + # params: + # key: value + + now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") + + # add cwd for convenience and to make classes in this file available when + # running as `python main.py` + # (in particular `main.DataModuleFromConfig`) + sys.path.append(os.getcwd()) + + parser = get_parser() + parser = Trainer.add_argparse_args(parser) + + opt, unknown = parser.parse_known_args() + if opt.name and opt.resume: + raise ValueError( + "-n/--name and -r/--resume cannot be specified both." + "If you want to resume training in a new log folder, " + "use -n/--name in combination with --resume_from_checkpoint" + ) + if opt.resume: + if not os.path.exists(opt.resume): + raise ValueError("Cannot find {}".format(opt.resume)) + if os.path.isfile(opt.resume): + paths = opt.resume.split("/") + # idx = len(paths)-paths[::-1].index("logs")+1 + # logdir = "/".join(paths[:idx]) + logdir = "/".join(paths[:-2]) + ckpt = opt.resume + else: + assert os.path.isdir(opt.resume), opt.resume + logdir = opt.resume.rstrip("/") + ckpt = os.path.join(logdir, "checkpoints", "last.ckpt") + + opt.resume_from_checkpoint = ckpt + base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml"))) + opt.base = base_configs + opt.base + _tmp = logdir.split("/") + nowname = _tmp[-1] + else: + if opt.name: + name = "_" + opt.name + elif opt.base: + cfg_fname = os.path.split(opt.base[0])[-1] + cfg_name = os.path.splitext(cfg_fname)[0] + name = "_" + cfg_name + else: + name = "" + nowname = now + name + opt.postfix + logdir = os.path.join(opt.logdir, nowname) + + ckptdir = os.path.join(logdir, "checkpoints") + cfgdir = os.path.join(logdir, "configs") + seed_everything(opt.seed) + + try: + # init and save configs + configs = [OmegaConf.load(cfg) for cfg in opt.base] + cli = OmegaConf.from_dotlist(unknown) + config = OmegaConf.merge(*configs, cli) + lightning_config = config.pop("lightning", OmegaConf.create()) + # merge trainer cli with config + trainer_config = lightning_config.get("trainer", OmegaConf.create()) + # default to ddp + trainer_config["accelerator"] = "ddp" + for k in nondefault_trainer_args(opt): + trainer_config[k] = getattr(opt, k) + if not "gpus" in trainer_config: + del trainer_config["accelerator"] + cpu = True + else: + gpuinfo = trainer_config["gpus"] + print(f"Running on GPUs {gpuinfo}") + cpu = False + trainer_opt = argparse.Namespace(**trainer_config) + lightning_config.trainer = trainer_config + + # model + model = instantiate_from_config(config.model) + + # trainer and callbacks + trainer_kwargs = dict() + + # default logger configs + default_logger_cfgs = { + "wandb": { + "target": "pytorch_lightning.loggers.WandbLogger", + "params": { + "name": nowname, + "save_dir": logdir, + "offline": opt.debug, + "id": nowname, + } + }, + "testtube": { + "target": "pytorch_lightning.loggers.TestTubeLogger", + "params": { + "name": "testtube", + "save_dir": logdir, + } + }, + } + default_logger_cfg = default_logger_cfgs["testtube"] + if "logger" in lightning_config: + logger_cfg = lightning_config.logger + else: + logger_cfg = OmegaConf.create() + logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg) + trainer_kwargs["logger"] = instantiate_from_config(logger_cfg) + + # modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to + # specify which metric is used to determine best models + default_modelckpt_cfg = { + "target": "pytorch_lightning.callbacks.ModelCheckpoint", + "params": { + "dirpath": ckptdir, + "filename": "{epoch:06}", + "verbose": True, + "save_last": True, + } + } + if hasattr(model, "monitor"): + print(f"Monitoring {model.monitor} as checkpoint metric.") + default_modelckpt_cfg["params"]["monitor"] = model.monitor + default_modelckpt_cfg["params"]["save_top_k"] = 3 + + if "modelcheckpoint" in lightning_config: + modelckpt_cfg = lightning_config.modelcheckpoint + else: + modelckpt_cfg = OmegaConf.create() + modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg) + print(f"Merged modelckpt-cfg: \n{modelckpt_cfg}") + if version.parse(pl.__version__) < version.parse('1.4.0'): + trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg) + + # add callback which sets up log directory + default_callbacks_cfg = { + "setup_callback": { + "target": "main.SetupCallback", + "params": { + "resume": opt.resume, + "now": now, + "logdir": logdir, + "ckptdir": ckptdir, + "cfgdir": cfgdir, + "config": config, + "lightning_config": lightning_config, + } + }, + "image_logger": { + "target": "main.ImageLogger", + "params": { + "batch_frequency": 750, + "max_images": 4, + "clamp": True + } + }, + "learning_rate_logger": { + "target": "main.LearningRateMonitor", + "params": { + "logging_interval": "step", + # "log_momentum": True + } + }, + "cuda_callback": { + "target": "main.CUDACallback" + }, + } + if version.parse(pl.__version__) >= version.parse('1.4.0'): + default_callbacks_cfg.update({'checkpoint_callback': modelckpt_cfg}) + + if "callbacks" in lightning_config: + callbacks_cfg = lightning_config.callbacks + else: + callbacks_cfg = OmegaConf.create() + + if 'metrics_over_trainsteps_checkpoint' in callbacks_cfg: + print( + 'Caution: Saving checkpoints every n train steps without deleting. This might require some free space.') + default_metrics_over_trainsteps_ckpt_dict = { + 'metrics_over_trainsteps_checkpoint': + {"target": 'pytorch_lightning.callbacks.ModelCheckpoint', + 'params': { + "dirpath": os.path.join(ckptdir, 'trainstep_checkpoints'), + "filename": "{epoch:06}-{step:09}", + "verbose": True, + 'save_top_k': -1, + 'every_n_train_steps': 10000, + 'save_weights_only': True + } + } + } + default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict) + + callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg) + if 'ignore_keys_callback' in callbacks_cfg and hasattr(trainer_opt, 'resume_from_checkpoint'): + callbacks_cfg.ignore_keys_callback.params['ckpt_path'] = trainer_opt.resume_from_checkpoint + elif 'ignore_keys_callback' in callbacks_cfg: + del callbacks_cfg['ignore_keys_callback'] + + trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg] + + trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs) + trainer.logdir = logdir ### + + # data + data = instantiate_from_config(config.data) + # NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html + # calling these ourselves should not be necessary but it is. + # lightning still takes care of proper multiprocessing though + data.prepare_data() + data.setup() + print("#### Data #####") + for k in data.datasets: + print(f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}") + + # configure learning rate + bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate + if not cpu: + ngpu = len(lightning_config.trainer.gpus.strip(",").split(',')) + else: + ngpu = 1 + if 'accumulate_grad_batches' in lightning_config.trainer: + accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches + else: + accumulate_grad_batches = 1 + print(f"accumulate_grad_batches = {accumulate_grad_batches}") + lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches + if opt.scale_lr: + model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr + print( + "Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format( + model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr)) + else: + model.learning_rate = base_lr + print("++++ NOT USING LR SCALING ++++") + print(f"Setting learning rate to {model.learning_rate:.2e}") + + + # allow checkpointing via USR1 + def melk(*args, **kwargs): + # run all checkpoint hooks + if trainer.global_rank == 0: + print("Summoning checkpoint.") + ckpt_path = os.path.join(ckptdir, "last.ckpt") + trainer.save_checkpoint(ckpt_path) + + + def divein(*args, **kwargs): + if trainer.global_rank == 0: + import pudb; + pudb.set_trace() + + + import signal + + signal.signal(signal.SIGUSR1, melk) + signal.signal(signal.SIGUSR2, divein) + + # run + if opt.train: + try: + trainer.fit(model, data) + except Exception: + melk() + raise + if not opt.no_test and not trainer.interrupted: + trainer.test(model, data) + except Exception: + if opt.debug and trainer.global_rank == 0: + try: + import pudb as debugger + except ImportError: + import pdb as debugger + debugger.post_mortem() + raise + finally: + # move newly created debug project to debug_runs + if opt.debug and not opt.resume and trainer.global_rank == 0: + dst, name = os.path.split(logdir) + dst = os.path.join(dst, "debug_runs", name) + os.makedirs(os.path.split(dst)[0], exist_ok=True) + os.rename(logdir, dst) + if trainer.global_rank == 0: + print(trainer.profiler.summary()) diff --git a/stable-diffusion/models/first_stage_models/kl-f16/config.yaml b/stable-diffusion/models/first_stage_models/kl-f16/config.yaml new file mode 100644 index 0000000..661921c --- /dev/null +++ b/stable-diffusion/models/first_stage_models/kl-f16/config.yaml @@ -0,0 +1,44 @@ +model: + base_learning_rate: 4.5e-06 + target: ldm.models.autoencoder.AutoencoderKL + params: + monitor: val/rec_loss + embed_dim: 16 + lossconfig: + target: ldm.modules.losses.LPIPSWithDiscriminator + params: + disc_start: 50001 + kl_weight: 1.0e-06 + disc_weight: 0.5 + ddconfig: + double_z: true + z_channels: 16 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 1 + - 2 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: + - 16 + dropout: 0.0 +data: + target: main.DataModuleFromConfig + params: + batch_size: 6 + wrap: true + train: + target: ldm.data.openimages.FullOpenImagesTrain + params: + size: 384 + crop_size: 256 + validation: + target: ldm.data.openimages.FullOpenImagesValidation + params: + size: 384 + crop_size: 256 diff --git a/stable-diffusion/models/first_stage_models/kl-f32/config.yaml b/stable-diffusion/models/first_stage_models/kl-f32/config.yaml new file mode 100644 index 0000000..7b642b1 --- /dev/null +++ b/stable-diffusion/models/first_stage_models/kl-f32/config.yaml @@ -0,0 +1,46 @@ +model: + base_learning_rate: 4.5e-06 + target: ldm.models.autoencoder.AutoencoderKL + params: + monitor: val/rec_loss + embed_dim: 64 + lossconfig: + target: ldm.modules.losses.LPIPSWithDiscriminator + params: + disc_start: 50001 + kl_weight: 1.0e-06 + disc_weight: 0.5 + ddconfig: + double_z: true + z_channels: 64 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 1 + - 2 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: + - 16 + - 8 + dropout: 0.0 +data: + target: main.DataModuleFromConfig + params: + batch_size: 6 + wrap: true + train: + target: ldm.data.openimages.FullOpenImagesTrain + params: + size: 384 + crop_size: 256 + validation: + target: ldm.data.openimages.FullOpenImagesValidation + params: + size: 384 + crop_size: 256 diff --git a/stable-diffusion/models/first_stage_models/kl-f4/config.yaml b/stable-diffusion/models/first_stage_models/kl-f4/config.yaml new file mode 100644 index 0000000..85cfb3e --- /dev/null +++ b/stable-diffusion/models/first_stage_models/kl-f4/config.yaml @@ -0,0 +1,41 @@ +model: + base_learning_rate: 4.5e-06 + target: ldm.models.autoencoder.AutoencoderKL + params: + monitor: val/rec_loss + embed_dim: 3 + lossconfig: + target: ldm.modules.losses.LPIPSWithDiscriminator + params: + disc_start: 50001 + kl_weight: 1.0e-06 + disc_weight: 0.5 + ddconfig: + double_z: true + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 +data: + target: main.DataModuleFromConfig + params: + batch_size: 10 + wrap: true + train: + target: ldm.data.openimages.FullOpenImagesTrain + params: + size: 384 + crop_size: 256 + validation: + target: ldm.data.openimages.FullOpenImagesValidation + params: + size: 384 + crop_size: 256 diff --git a/stable-diffusion/models/first_stage_models/kl-f8/config.yaml b/stable-diffusion/models/first_stage_models/kl-f8/config.yaml new file mode 100644 index 0000000..921aa42 --- /dev/null +++ b/stable-diffusion/models/first_stage_models/kl-f8/config.yaml @@ -0,0 +1,42 @@ +model: + base_learning_rate: 4.5e-06 + target: ldm.models.autoencoder.AutoencoderKL + params: + monitor: val/rec_loss + embed_dim: 4 + lossconfig: + target: ldm.modules.losses.LPIPSWithDiscriminator + params: + disc_start: 50001 + kl_weight: 1.0e-06 + disc_weight: 0.5 + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 +data: + target: main.DataModuleFromConfig + params: + batch_size: 4 + wrap: true + train: + target: ldm.data.openimages.FullOpenImagesTrain + params: + size: 384 + crop_size: 256 + validation: + target: ldm.data.openimages.FullOpenImagesValidation + params: + size: 384 + crop_size: 256 diff --git a/stable-diffusion/models/first_stage_models/vq-f16/config.yaml b/stable-diffusion/models/first_stage_models/vq-f16/config.yaml new file mode 100644 index 0000000..91c7454 --- /dev/null +++ b/stable-diffusion/models/first_stage_models/vq-f16/config.yaml @@ -0,0 +1,49 @@ +model: + base_learning_rate: 4.5e-06 + target: ldm.models.autoencoder.VQModel + params: + embed_dim: 8 + n_embed: 16384 + ddconfig: + double_z: false + z_channels: 8 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 1 + - 2 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: + - 16 + dropout: 0.0 + lossconfig: + target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator + params: + disc_conditional: false + disc_in_channels: 3 + disc_start: 250001 + disc_weight: 0.75 + disc_num_layers: 2 + codebook_weight: 1.0 + +data: + target: main.DataModuleFromConfig + params: + batch_size: 14 + num_workers: 20 + wrap: true + train: + target: ldm.data.openimages.FullOpenImagesTrain + params: + size: 384 + crop_size: 256 + validation: + target: ldm.data.openimages.FullOpenImagesValidation + params: + size: 384 + crop_size: 256 diff --git a/stable-diffusion/models/first_stage_models/vq-f4-noattn/config.yaml b/stable-diffusion/models/first_stage_models/vq-f4-noattn/config.yaml new file mode 100644 index 0000000..f8e499f --- /dev/null +++ b/stable-diffusion/models/first_stage_models/vq-f4-noattn/config.yaml @@ -0,0 +1,46 @@ +model: + base_learning_rate: 4.5e-06 + target: ldm.models.autoencoder.VQModel + params: + embed_dim: 3 + n_embed: 8192 + monitor: val/rec_loss + + ddconfig: + attn_type: none + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator + params: + disc_conditional: false + disc_in_channels: 3 + disc_start: 11 + disc_weight: 0.75 + codebook_weight: 1.0 + +data: + target: main.DataModuleFromConfig + params: + batch_size: 8 + num_workers: 12 + wrap: true + train: + target: ldm.data.openimages.FullOpenImagesTrain + params: + crop_size: 256 + validation: + target: ldm.data.openimages.FullOpenImagesValidation + params: + crop_size: 256 diff --git a/stable-diffusion/models/first_stage_models/vq-f4/config.yaml b/stable-diffusion/models/first_stage_models/vq-f4/config.yaml new file mode 100644 index 0000000..7d8cef3 --- /dev/null +++ b/stable-diffusion/models/first_stage_models/vq-f4/config.yaml @@ -0,0 +1,45 @@ +model: + base_learning_rate: 4.5e-06 + target: ldm.models.autoencoder.VQModel + params: + embed_dim: 3 + n_embed: 8192 + monitor: val/rec_loss + + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator + params: + disc_conditional: false + disc_in_channels: 3 + disc_start: 0 + disc_weight: 0.75 + codebook_weight: 1.0 + +data: + target: main.DataModuleFromConfig + params: + batch_size: 8 + num_workers: 16 + wrap: true + train: + target: ldm.data.openimages.FullOpenImagesTrain + params: + crop_size: 256 + validation: + target: ldm.data.openimages.FullOpenImagesValidation + params: + crop_size: 256 diff --git a/stable-diffusion/models/first_stage_models/vq-f8-n256/config.yaml b/stable-diffusion/models/first_stage_models/vq-f8-n256/config.yaml new file mode 100644 index 0000000..8519e13 --- /dev/null +++ b/stable-diffusion/models/first_stage_models/vq-f8-n256/config.yaml @@ -0,0 +1,48 @@ +model: + base_learning_rate: 4.5e-06 + target: ldm.models.autoencoder.VQModel + params: + embed_dim: 4 + n_embed: 256 + monitor: val/rec_loss + ddconfig: + double_z: false + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: + - 32 + dropout: 0.0 + lossconfig: + target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator + params: + disc_conditional: false + disc_in_channels: 3 + disc_start: 250001 + disc_weight: 0.75 + codebook_weight: 1.0 + +data: + target: main.DataModuleFromConfig + params: + batch_size: 10 + num_workers: 20 + wrap: true + train: + target: ldm.data.openimages.FullOpenImagesTrain + params: + size: 384 + crop_size: 256 + validation: + target: ldm.data.openimages.FullOpenImagesValidation + params: + size: 384 + crop_size: 256 diff --git a/stable-diffusion/models/first_stage_models/vq-f8/config.yaml b/stable-diffusion/models/first_stage_models/vq-f8/config.yaml new file mode 100644 index 0000000..efd6801 --- /dev/null +++ b/stable-diffusion/models/first_stage_models/vq-f8/config.yaml @@ -0,0 +1,48 @@ +model: + base_learning_rate: 4.5e-06 + target: ldm.models.autoencoder.VQModel + params: + embed_dim: 4 + n_embed: 16384 + monitor: val/rec_loss + ddconfig: + double_z: false + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: + - 32 + dropout: 0.0 + lossconfig: + target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator + params: + disc_conditional: false + disc_in_channels: 3 + disc_num_layers: 2 + disc_start: 1 + disc_weight: 0.6 + codebook_weight: 1.0 +data: + target: main.DataModuleFromConfig + params: + batch_size: 10 + num_workers: 20 + wrap: true + train: + target: ldm.data.openimages.FullOpenImagesTrain + params: + size: 384 + crop_size: 256 + validation: + target: ldm.data.openimages.FullOpenImagesValidation + params: + size: 384 + crop_size: 256 diff --git a/stable-diffusion/models/ldm/bsr_sr/config.yaml b/stable-diffusion/models/ldm/bsr_sr/config.yaml new file mode 100644 index 0000000..861692a --- /dev/null +++ b/stable-diffusion/models/ldm/bsr_sr/config.yaml @@ -0,0 +1,80 @@ +model: + base_learning_rate: 1.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0155 + log_every_t: 100 + timesteps: 1000 + loss_type: l2 + first_stage_key: image + cond_stage_key: LR_image + image_size: 64 + channels: 3 + concat_mode: true + cond_stage_trainable: false + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 6 + out_channels: 3 + model_channels: 160 + attention_resolutions: + - 16 + - 8 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 2 + - 4 + num_head_channels: 32 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + monitor: val/rec_loss + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: + target: torch.nn.Identity +data: + target: main.DataModuleFromConfig + params: + batch_size: 64 + wrap: false + num_workers: 12 + train: + target: ldm.data.openimages.SuperresOpenImagesAdvancedTrain + params: + size: 256 + degradation: bsrgan_light + downscale_f: 4 + min_crop_f: 0.5 + max_crop_f: 1.0 + random_crop: true + validation: + target: ldm.data.openimages.SuperresOpenImagesAdvancedValidation + params: + size: 256 + degradation: bsrgan_light + downscale_f: 4 + min_crop_f: 0.5 + max_crop_f: 1.0 + random_crop: true diff --git a/stable-diffusion/models/ldm/celeba256/config.yaml b/stable-diffusion/models/ldm/celeba256/config.yaml new file mode 100644 index 0000000..a12f4e9 --- /dev/null +++ b/stable-diffusion/models/ldm/celeba256/config.yaml @@ -0,0 +1,70 @@ +model: + base_learning_rate: 2.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0195 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + cond_stage_key: class_label + image_size: 64 + channels: 3 + cond_stage_trainable: false + concat_mode: false + monitor: val/loss + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 3 + out_channels: 3 + model_channels: 224 + attention_resolutions: + - 8 + - 4 + - 2 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 3 + - 4 + num_head_channels: 32 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: __is_unconditional__ +data: + target: main.DataModuleFromConfig + params: + batch_size: 48 + num_workers: 5 + wrap: false + train: + target: ldm.data.faceshq.CelebAHQTrain + params: + size: 256 + validation: + target: ldm.data.faceshq.CelebAHQValidation + params: + size: 256 diff --git a/stable-diffusion/models/ldm/cin256/config.yaml b/stable-diffusion/models/ldm/cin256/config.yaml new file mode 100644 index 0000000..9bc1b45 --- /dev/null +++ b/stable-diffusion/models/ldm/cin256/config.yaml @@ -0,0 +1,80 @@ +model: + base_learning_rate: 1.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0195 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + cond_stage_key: class_label + image_size: 32 + channels: 4 + cond_stage_trainable: true + conditioning_key: crossattn + monitor: val/loss_simple_ema + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 + in_channels: 4 + out_channels: 4 + model_channels: 256 + attention_resolutions: + - 4 + - 2 + - 1 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 4 + num_head_channels: 32 + use_spatial_transformer: true + transformer_depth: 1 + context_dim: 512 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 4 + n_embed: 16384 + ddconfig: + double_z: false + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: + - 32 + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: + target: ldm.modules.encoders.modules.ClassEmbedder + params: + embed_dim: 512 + key: class_label +data: + target: main.DataModuleFromConfig + params: + batch_size: 64 + num_workers: 12 + wrap: false + train: + target: ldm.data.imagenet.ImageNetTrain + params: + config: + size: 256 + validation: + target: ldm.data.imagenet.ImageNetValidation + params: + config: + size: 256 diff --git a/stable-diffusion/models/ldm/ffhq256/config.yaml b/stable-diffusion/models/ldm/ffhq256/config.yaml new file mode 100644 index 0000000..0ddfd1b --- /dev/null +++ b/stable-diffusion/models/ldm/ffhq256/config.yaml @@ -0,0 +1,70 @@ +model: + base_learning_rate: 2.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0195 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + cond_stage_key: class_label + image_size: 64 + channels: 3 + cond_stage_trainable: false + concat_mode: false + monitor: val/loss + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 3 + out_channels: 3 + model_channels: 224 + attention_resolutions: + - 8 + - 4 + - 2 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 3 + - 4 + num_head_channels: 32 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: __is_unconditional__ +data: + target: main.DataModuleFromConfig + params: + batch_size: 42 + num_workers: 5 + wrap: false + train: + target: ldm.data.faceshq.FFHQTrain + params: + size: 256 + validation: + target: ldm.data.faceshq.FFHQValidation + params: + size: 256 diff --git a/stable-diffusion/models/ldm/inpainting_big/config.yaml b/stable-diffusion/models/ldm/inpainting_big/config.yaml new file mode 100644 index 0000000..da5fd5e --- /dev/null +++ b/stable-diffusion/models/ldm/inpainting_big/config.yaml @@ -0,0 +1,67 @@ +model: + base_learning_rate: 1.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0205 + log_every_t: 100 + timesteps: 1000 + loss_type: l1 + first_stage_key: image + cond_stage_key: masked_image + image_size: 64 + channels: 3 + concat_mode: true + monitor: val/loss + scheduler_config: + target: ldm.lr_scheduler.LambdaWarmUpCosineScheduler + params: + verbosity_interval: 0 + warm_up_steps: 1000 + max_decay_steps: 50000 + lr_start: 0.001 + lr_max: 0.1 + lr_min: 0.0001 + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 7 + out_channels: 3 + model_channels: 256 + attention_resolutions: + - 8 + - 4 + - 2 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 3 + - 4 + num_heads: 8 + resblock_updown: true + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + monitor: val/rec_loss + ddconfig: + attn_type: none + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: ldm.modules.losses.contperceptual.DummyLoss + cond_stage_config: __is_first_stage__ diff --git a/stable-diffusion/models/ldm/layout2img-openimages256/config.yaml b/stable-diffusion/models/ldm/layout2img-openimages256/config.yaml new file mode 100644 index 0000000..9e1dc15 --- /dev/null +++ b/stable-diffusion/models/ldm/layout2img-openimages256/config.yaml @@ -0,0 +1,81 @@ +model: + base_learning_rate: 2.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0205 + log_every_t: 100 + timesteps: 1000 + loss_type: l1 + first_stage_key: image + cond_stage_key: coordinates_bbox + image_size: 64 + channels: 3 + conditioning_key: crossattn + cond_stage_trainable: true + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 3 + out_channels: 3 + model_channels: 128 + attention_resolutions: + - 8 + - 4 + - 2 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 3 + - 4 + num_head_channels: 32 + use_spatial_transformer: true + transformer_depth: 3 + context_dim: 512 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + monitor: val/rec_loss + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: + target: ldm.modules.encoders.modules.BERTEmbedder + params: + n_embed: 512 + n_layer: 16 + vocab_size: 8192 + max_seq_len: 92 + use_tokenizer: false + monitor: val/loss_simple_ema +data: + target: main.DataModuleFromConfig + params: + batch_size: 24 + wrap: false + num_workers: 10 + train: + target: ldm.data.openimages.OpenImagesBBoxTrain + params: + size: 256 + validation: + target: ldm.data.openimages.OpenImagesBBoxValidation + params: + size: 256 diff --git a/stable-diffusion/models/ldm/lsun_beds256/config.yaml b/stable-diffusion/models/ldm/lsun_beds256/config.yaml new file mode 100644 index 0000000..1a50c76 --- /dev/null +++ b/stable-diffusion/models/ldm/lsun_beds256/config.yaml @@ -0,0 +1,70 @@ +model: + base_learning_rate: 2.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0195 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + cond_stage_key: class_label + image_size: 64 + channels: 3 + cond_stage_trainable: false + concat_mode: false + monitor: val/loss + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 3 + out_channels: 3 + model_channels: 224 + attention_resolutions: + - 8 + - 4 + - 2 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 3 + - 4 + num_head_channels: 32 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: __is_unconditional__ +data: + target: main.DataModuleFromConfig + params: + batch_size: 48 + num_workers: 5 + wrap: false + train: + target: ldm.data.lsun.LSUNBedroomsTrain + params: + size: 256 + validation: + target: ldm.data.lsun.LSUNBedroomsValidation + params: + size: 256 diff --git a/stable-diffusion/models/ldm/lsun_churches256/config.yaml b/stable-diffusion/models/ldm/lsun_churches256/config.yaml new file mode 100644 index 0000000..424d091 --- /dev/null +++ b/stable-diffusion/models/ldm/lsun_churches256/config.yaml @@ -0,0 +1,92 @@ +model: + base_learning_rate: 5.0e-05 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0155 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + loss_type: l1 + first_stage_key: image + cond_stage_key: image + image_size: 32 + channels: 4 + cond_stage_trainable: false + concat_mode: false + scale_by_std: true + monitor: val/loss_simple_ema + scheduler_config: + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: + - 10000 + cycle_lengths: + - 10000000000000 + f_start: + - 1.0e-06 + f_max: + - 1.0 + f_min: + - 1.0 + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 + in_channels: 4 + out_channels: 4 + model_channels: 192 + attention_resolutions: + - 1 + - 2 + - 4 + - 8 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 2 + - 4 + - 4 + num_heads: 8 + use_scale_shift_norm: true + resblock_updown: true + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: '__is_unconditional__' + +data: + target: main.DataModuleFromConfig + params: + batch_size: 96 + num_workers: 5 + wrap: false + train: + target: ldm.data.lsun.LSUNChurchesTrain + params: + size: 256 + validation: + target: ldm.data.lsun.LSUNChurchesValidation + params: + size: 256 diff --git a/stable-diffusion/models/ldm/semantic_synthesis256/config.yaml b/stable-diffusion/models/ldm/semantic_synthesis256/config.yaml new file mode 100644 index 0000000..1a721cf --- /dev/null +++ b/stable-diffusion/models/ldm/semantic_synthesis256/config.yaml @@ -0,0 +1,59 @@ +model: + base_learning_rate: 1.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0205 + log_every_t: 100 + timesteps: 1000 + loss_type: l1 + first_stage_key: image + cond_stage_key: segmentation + image_size: 64 + channels: 3 + concat_mode: true + cond_stage_trainable: true + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 6 + out_channels: 3 + model_channels: 128 + attention_resolutions: + - 32 + - 16 + - 8 + num_res_blocks: 2 + channel_mult: + - 1 + - 4 + - 8 + num_heads: 8 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: + target: ldm.modules.encoders.modules.SpatialRescaler + params: + n_stages: 2 + in_channels: 182 + out_channels: 3 diff --git a/stable-diffusion/models/ldm/semantic_synthesis512/config.yaml b/stable-diffusion/models/ldm/semantic_synthesis512/config.yaml new file mode 100644 index 0000000..8faded2 --- /dev/null +++ b/stable-diffusion/models/ldm/semantic_synthesis512/config.yaml @@ -0,0 +1,78 @@ +model: + base_learning_rate: 1.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0205 + log_every_t: 100 + timesteps: 1000 + loss_type: l1 + first_stage_key: image + cond_stage_key: segmentation + image_size: 128 + channels: 3 + concat_mode: true + cond_stage_trainable: true + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 128 + in_channels: 6 + out_channels: 3 + model_channels: 128 + attention_resolutions: + - 32 + - 16 + - 8 + num_res_blocks: 2 + channel_mult: + - 1 + - 4 + - 8 + num_heads: 8 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + monitor: val/rec_loss + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: + target: ldm.modules.encoders.modules.SpatialRescaler + params: + n_stages: 2 + in_channels: 182 + out_channels: 3 +data: + target: main.DataModuleFromConfig + params: + batch_size: 8 + wrap: false + num_workers: 10 + train: + target: ldm.data.landscapes.RFWTrain + params: + size: 768 + crop_size: 512 + segmentation_to_float32: true + validation: + target: ldm.data.landscapes.RFWValidation + params: + size: 768 + crop_size: 512 + segmentation_to_float32: true diff --git a/stable-diffusion/models/ldm/text2img256/config.yaml b/stable-diffusion/models/ldm/text2img256/config.yaml new file mode 100644 index 0000000..3f54a01 --- /dev/null +++ b/stable-diffusion/models/ldm/text2img256/config.yaml @@ -0,0 +1,77 @@ +model: + base_learning_rate: 2.0e-06 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.0015 + linear_end: 0.0195 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + cond_stage_key: caption + image_size: 64 + channels: 3 + cond_stage_trainable: true + conditioning_key: crossattn + monitor: val/loss_simple_ema + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 64 + in_channels: 3 + out_channels: 3 + model_channels: 192 + attention_resolutions: + - 8 + - 4 + - 2 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 3 + - 5 + num_head_channels: 32 + use_spatial_transformer: true + transformer_depth: 1 + context_dim: 640 + first_stage_config: + target: ldm.models.autoencoder.VQModelInterface + params: + embed_dim: 3 + n_embed: 8192 + ddconfig: + double_z: false + z_channels: 3 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: + target: ldm.modules.encoders.modules.BERTEmbedder + params: + n_embed: 640 + n_layer: 32 +data: + target: main.DataModuleFromConfig + params: + batch_size: 28 + num_workers: 10 + wrap: false + train: + target: ldm.data.previews.pytorch_dataset.PreviewsTrain + params: + size: 256 + validation: + target: ldm.data.previews.pytorch_dataset.PreviewsValidation + params: + size: 256 diff --git a/stable-diffusion/notebook_helpers.py b/stable-diffusion/notebook_helpers.py new file mode 100644 index 0000000..5d0ebd7 --- /dev/null +++ b/stable-diffusion/notebook_helpers.py @@ -0,0 +1,270 @@ +from torchvision.datasets.utils import download_url +from ldm.util import instantiate_from_config +import torch +import os +# todo ? +from google.colab import files +from IPython.display import Image as ipyimg +import ipywidgets as widgets +from PIL import Image +from numpy import asarray +from einops import rearrange, repeat +import torch, torchvision +from ldm.models.diffusion.ddim import DDIMSampler +from ldm.util import ismap +import time +from omegaconf import OmegaConf + + +def download_models(mode): + + if mode == "superresolution": + # this is the small bsr light model + url_conf = 'https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1' + url_ckpt = 'https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1' + + path_conf = 'logs/diffusion/superresolution_bsr/configs/project.yaml' + path_ckpt = 'logs/diffusion/superresolution_bsr/checkpoints/last.ckpt' + + download_url(url_conf, path_conf) + download_url(url_ckpt, path_ckpt) + + path_conf = path_conf + '/?dl=1' # fix it + path_ckpt = path_ckpt + '/?dl=1' # fix it + return path_conf, path_ckpt + + else: + raise NotImplementedError + + +def load_model_from_config(config, ckpt): + print(f"Loading model from {ckpt}") + pl_sd = torch.load(ckpt, map_location="cpu") + global_step = pl_sd["global_step"] + sd = pl_sd["state_dict"] + model = instantiate_from_config(config.model) + m, u = model.load_state_dict(sd, strict=False) + model.cuda() + model.eval() + return {"model": model}, global_step + + +def get_model(mode): + path_conf, path_ckpt = download_models(mode) + config = OmegaConf.load(path_conf) + model, step = load_model_from_config(config, path_ckpt) + return model + + +def get_custom_cond(mode): + dest = "data/example_conditioning" + + if mode == "superresolution": + uploaded_img = files.upload() + filename = next(iter(uploaded_img)) + name, filetype = filename.split(".") # todo assumes just one dot in name ! + os.rename(f"{filename}", f"{dest}/{mode}/custom_{name}.{filetype}") + + elif mode == "text_conditional": + w = widgets.Text(value='A cake with cream!', disabled=True) + display(w) + + with open(f"{dest}/{mode}/custom_{w.value[:20]}.txt", 'w') as f: + f.write(w.value) + + elif mode == "class_conditional": + w = widgets.IntSlider(min=0, max=1000) + display(w) + with open(f"{dest}/{mode}/custom.txt", 'w') as f: + f.write(w.value) + + else: + raise NotImplementedError(f"cond not implemented for mode{mode}") + + +def get_cond_options(mode): + path = "data/example_conditioning" + path = os.path.join(path, mode) + onlyfiles = [f for f in sorted(os.listdir(path))] + return path, onlyfiles + + +def select_cond_path(mode): + path = "data/example_conditioning" # todo + path = os.path.join(path, mode) + onlyfiles = [f for f in sorted(os.listdir(path))] + + selected = widgets.RadioButtons( + options=onlyfiles, + description='Select conditioning:', + disabled=False + ) + display(selected) + selected_path = os.path.join(path, selected.value) + return selected_path + + +def get_cond(mode, selected_path): + example = dict() + if mode == "superresolution": + up_f = 4 + visualize_cond_img(selected_path) + + c = Image.open(selected_path) + c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0) + c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]], antialias=True) + c_up = rearrange(c_up, '1 c h w -> 1 h w c') + c = rearrange(c, '1 c h w -> 1 h w c') + c = 2. * c - 1. + + c = c.to(torch.device("cuda")) + example["LR_image"] = c + example["image"] = c_up + + return example + + +def visualize_cond_img(path): + display(ipyimg(filename=path)) + + +def run(model, selected_path, task, custom_steps, resize_enabled=False, classifier_ckpt=None, global_step=None): + + example = get_cond(task, selected_path) + + save_intermediate_vid = False + n_runs = 1 + masked = False + guider = None + ckwargs = None + mode = 'ddim' + ddim_use_x0_pred = False + temperature = 1. + eta = 1. + make_progrow = True + custom_shape = None + + height, width = example["image"].shape[1:3] + split_input = height >= 128 and width >= 128 + + if split_input: + ks = 128 + stride = 64 + vqf = 4 # + model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride), + "vqf": vqf, + "patch_distributed_vq": True, + "tie_braker": False, + "clip_max_weight": 0.5, + "clip_min_weight": 0.01, + "clip_max_tie_weight": 0.5, + "clip_min_tie_weight": 0.01} + else: + if hasattr(model, "split_input_params"): + delattr(model, "split_input_params") + + invert_mask = False + + x_T = None + for n in range(n_runs): + if custom_shape is not None: + x_T = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device) + x_T = repeat(x_T, '1 c h w -> b c h w', b=custom_shape[0]) + + logs = make_convolutional_sample(example, model, + mode=mode, custom_steps=custom_steps, + eta=eta, swap_mode=False , masked=masked, + invert_mask=invert_mask, quantize_x0=False, + custom_schedule=None, decode_interval=10, + resize_enabled=resize_enabled, custom_shape=custom_shape, + temperature=temperature, noise_dropout=0., + corrector=guider, corrector_kwargs=ckwargs, x_T=x_T, save_intermediate_vid=save_intermediate_vid, + make_progrow=make_progrow,ddim_use_x0_pred=ddim_use_x0_pred + ) + return logs + + +@torch.no_grad() +def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None, + mask=None, x0=None, quantize_x0=False, img_callback=None, + temperature=1., noise_dropout=0., score_corrector=None, + corrector_kwargs=None, x_T=None, log_every_t=None + ): + + ddim = DDIMSampler(model) + bs = shape[0] # dont know where this comes from but wayne + shape = shape[1:] # cut batch dim + print(f"Sampling with eta = {eta}; steps: {steps}") + samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback, + normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta, + mask=mask, x0=x0, temperature=temperature, verbose=False, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, x_T=x_T) + + return samples, intermediates + + +@torch.no_grad() +def make_convolutional_sample(batch, model, mode="vanilla", custom_steps=None, eta=1.0, swap_mode=False, masked=False, + invert_mask=True, quantize_x0=False, custom_schedule=None, decode_interval=1000, + resize_enabled=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None, + corrector_kwargs=None, x_T=None, save_intermediate_vid=False, make_progrow=True,ddim_use_x0_pred=False): + log = dict() + + z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=not (hasattr(model, 'split_input_params') + and model.cond_stage_key == 'coordinates_bbox'), + return_original_cond=True) + + log_every_t = 1 if save_intermediate_vid else None + + if custom_shape is not None: + z = torch.randn(custom_shape) + print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}") + + z0 = None + + log["input"] = x + log["reconstruction"] = xrec + + if ismap(xc): + log["original_conditioning"] = model.to_rgb(xc) + if hasattr(model, 'cond_stage_key'): + log[model.cond_stage_key] = model.to_rgb(xc) + + else: + log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x) + if model.cond_stage_model: + log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x) + if model.cond_stage_key =='class_label': + log[model.cond_stage_key] = xc[model.cond_stage_key] + + with model.ema_scope("Plotting"): + t0 = time.time() + img_cb = None + + sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape, + eta=eta, + quantize_x0=quantize_x0, img_callback=img_cb, mask=None, x0=z0, + temperature=temperature, noise_dropout=noise_dropout, + score_corrector=corrector, corrector_kwargs=corrector_kwargs, + x_T=x_T, log_every_t=log_every_t) + t1 = time.time() + + if ddim_use_x0_pred: + sample = intermediates['pred_x0'][-1] + + x_sample = model.decode_first_stage(sample) + + try: + x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True) + log["sample_noquant"] = x_sample_noquant + log["sample_diff"] = torch.abs(x_sample_noquant - x_sample) + except: + pass + + log["sample"] = x_sample + log["time"] = t1 - t0 + + return log \ No newline at end of file diff --git a/stable-diffusion/scripts/download_first_stages.sh b/stable-diffusion/scripts/download_first_stages.sh new file mode 100644 index 0000000..a8d79e9 --- /dev/null +++ b/stable-diffusion/scripts/download_first_stages.sh @@ -0,0 +1,41 @@ +#!/bin/bash +wget -O models/first_stage_models/kl-f4/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f4.zip +wget -O models/first_stage_models/kl-f8/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f8.zip +wget -O models/first_stage_models/kl-f16/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f16.zip +wget -O models/first_stage_models/kl-f32/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f32.zip +wget -O models/first_stage_models/vq-f4/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f4.zip +wget -O models/first_stage_models/vq-f4-noattn/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f4-noattn.zip +wget -O models/first_stage_models/vq-f8/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f8.zip +wget -O models/first_stage_models/vq-f8-n256/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f8-n256.zip +wget -O models/first_stage_models/vq-f16/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f16.zip + + + +cd models/first_stage_models/kl-f4 +unzip -o model.zip + +cd ../kl-f8 +unzip -o model.zip + +cd ../kl-f16 +unzip -o model.zip + +cd ../kl-f32 +unzip -o model.zip + +cd ../vq-f4 +unzip -o model.zip + +cd ../vq-f4-noattn +unzip -o model.zip + +cd ../vq-f8 +unzip -o model.zip + +cd ../vq-f8-n256 +unzip -o model.zip + +cd ../vq-f16 +unzip -o model.zip + +cd ../.. \ No newline at end of file diff --git a/stable-diffusion/scripts/download_models.sh b/stable-diffusion/scripts/download_models.sh new file mode 100644 index 0000000..84297d7 --- /dev/null +++ b/stable-diffusion/scripts/download_models.sh @@ -0,0 +1,49 @@ +#!/bin/bash +wget -O models/ldm/celeba256/celeba-256.zip https://ommer-lab.com/files/latent-diffusion/celeba.zip +wget -O models/ldm/ffhq256/ffhq-256.zip https://ommer-lab.com/files/latent-diffusion/ffhq.zip +wget -O models/ldm/lsun_churches256/lsun_churches-256.zip https://ommer-lab.com/files/latent-diffusion/lsun_churches.zip +wget -O models/ldm/lsun_beds256/lsun_beds-256.zip https://ommer-lab.com/files/latent-diffusion/lsun_bedrooms.zip +wget -O models/ldm/text2img256/model.zip https://ommer-lab.com/files/latent-diffusion/text2img.zip +wget -O models/ldm/cin256/model.zip https://ommer-lab.com/files/latent-diffusion/cin.zip +wget -O models/ldm/semantic_synthesis512/model.zip https://ommer-lab.com/files/latent-diffusion/semantic_synthesis.zip +wget -O models/ldm/semantic_synthesis256/model.zip https://ommer-lab.com/files/latent-diffusion/semantic_synthesis256.zip +wget -O models/ldm/bsr_sr/model.zip https://ommer-lab.com/files/latent-diffusion/sr_bsr.zip +wget -O models/ldm/layout2img-openimages256/model.zip https://ommer-lab.com/files/latent-diffusion/layout2img_model.zip +wget -O models/ldm/inpainting_big/model.zip https://ommer-lab.com/files/latent-diffusion/inpainting_big.zip + + + +cd models/ldm/celeba256 +unzip -o celeba-256.zip + +cd ../ffhq256 +unzip -o ffhq-256.zip + +cd ../lsun_churches256 +unzip -o lsun_churches-256.zip + +cd ../lsun_beds256 +unzip -o lsun_beds-256.zip + +cd ../text2img256 +unzip -o model.zip + +cd ../cin256 +unzip -o model.zip + +cd ../semantic_synthesis512 +unzip -o model.zip + +cd ../semantic_synthesis256 +unzip -o model.zip + +cd ../bsr_sr +unzip -o model.zip + +cd ../layout2img-openimages256 +unzip -o model.zip + +cd ../inpainting_big +unzip -o model.zip + +cd ../.. diff --git a/stable-diffusion/scripts/img2img.py b/stable-diffusion/scripts/img2img.py new file mode 100644 index 0000000..421e215 --- /dev/null +++ b/stable-diffusion/scripts/img2img.py @@ -0,0 +1,293 @@ +"""make variations of input image""" + +import argparse, os, sys, glob +import PIL +import torch +import numpy as np +from omegaconf import OmegaConf +from PIL import Image +from tqdm import tqdm, trange +from itertools import islice +from einops import rearrange, repeat +from torchvision.utils import make_grid +from torch import autocast +from contextlib import nullcontext +import time +from pytorch_lightning import seed_everything + +from ldm.util import instantiate_from_config +from ldm.models.diffusion.ddim import DDIMSampler +from ldm.models.diffusion.plms import PLMSSampler + + +def chunk(it, size): + it = iter(it) + return iter(lambda: tuple(islice(it, size)), ()) + + +def load_model_from_config(config, ckpt, verbose=False): + print(f"Loading model from {ckpt}") + pl_sd = torch.load(ckpt, map_location="cpu") + if "global_step" in pl_sd: + print(f"Global Step: {pl_sd['global_step']}") + sd = pl_sd["state_dict"] + model = instantiate_from_config(config.model) + m, u = model.load_state_dict(sd, strict=False) + if len(m) > 0 and verbose: + print("missing keys:") + print(m) + if len(u) > 0 and verbose: + print("unexpected keys:") + print(u) + + model.cuda() + model.eval() + return model + + +def load_img(path): + image = Image.open(path).convert("RGB") + w, h = image.size + print(f"loaded input image of size ({w}, {h}) from {path}") + w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 + image = image.resize((w, h), resample=PIL.Image.LANCZOS) + image = np.array(image).astype(np.float32) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + return 2.*image - 1. + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--prompt", + type=str, + nargs="?", + default="a painting of a virus monster playing guitar", + help="the prompt to render" + ) + + parser.add_argument( + "--init-img", + type=str, + nargs="?", + help="path to the input image" + ) + + parser.add_argument( + "--outdir", + type=str, + nargs="?", + help="dir to write results to", + default="outputs/img2img-samples" + ) + + parser.add_argument( + "--skip_grid", + action='store_true', + help="do not save a grid, only individual samples. Helpful when evaluating lots of samples", + ) + + parser.add_argument( + "--skip_save", + action='store_true', + help="do not save indiviual samples. For speed measurements.", + ) + + parser.add_argument( + "--ddim_steps", + type=int, + default=50, + help="number of ddim sampling steps", + ) + + parser.add_argument( + "--plms", + action='store_true', + help="use plms sampling", + ) + parser.add_argument( + "--fixed_code", + action='store_true', + help="if enabled, uses the same starting code across all samples ", + ) + + parser.add_argument( + "--ddim_eta", + type=float, + default=0.0, + help="ddim eta (eta=0.0 corresponds to deterministic sampling", + ) + parser.add_argument( + "--n_iter", + type=int, + default=1, + help="sample this often", + ) + parser.add_argument( + "--C", + type=int, + default=4, + help="latent channels", + ) + parser.add_argument( + "--f", + type=int, + default=8, + help="downsampling factor, most often 8 or 16", + ) + parser.add_argument( + "--n_samples", + type=int, + default=2, + help="how many samples to produce for each given prompt. A.k.a batch size", + ) + parser.add_argument( + "--n_rows", + type=int, + default=0, + help="rows in the grid (default: n_samples)", + ) + parser.add_argument( + "--scale", + type=float, + default=5.0, + help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", + ) + + parser.add_argument( + "--strength", + type=float, + default=0.75, + help="strength for noising/unnoising. 1.0 corresponds to full destruction of information in init image", + ) + parser.add_argument( + "--from-file", + type=str, + help="if specified, load prompts from this file", + ) + parser.add_argument( + "--config", + type=str, + default="configs/stable-diffusion/v1-inference.yaml", + help="path to config which constructs model", + ) + parser.add_argument( + "--ckpt", + type=str, + default="models/ldm/stable-diffusion-v1/model.ckpt", + help="path to checkpoint of model", + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="the seed (for reproducible sampling)", + ) + parser.add_argument( + "--precision", + type=str, + help="evaluate at this precision", + choices=["full", "autocast"], + default="autocast" + ) + + opt = parser.parse_args() + seed_everything(opt.seed) + + config = OmegaConf.load(f"{opt.config}") + model = load_model_from_config(config, f"{opt.ckpt}") + + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + model = model.to(device) + + if opt.plms: + raise NotImplementedError("PLMS sampler not (yet) supported") + sampler = PLMSSampler(model) + else: + sampler = DDIMSampler(model) + + os.makedirs(opt.outdir, exist_ok=True) + outpath = opt.outdir + + batch_size = opt.n_samples + n_rows = opt.n_rows if opt.n_rows > 0 else batch_size + if not opt.from_file: + prompt = opt.prompt + assert prompt is not None + data = [batch_size * [prompt]] + + else: + print(f"reading prompts from {opt.from_file}") + with open(opt.from_file, "r") as f: + data = f.read().splitlines() + data = list(chunk(data, batch_size)) + + sample_path = os.path.join(outpath, "samples") + os.makedirs(sample_path, exist_ok=True) + base_count = len(os.listdir(sample_path)) + grid_count = len(os.listdir(outpath)) - 1 + + assert os.path.isfile(opt.init_img) + init_image = load_img(opt.init_img).to(device) + init_image = repeat(init_image, '1 ... -> b ...', b=batch_size) + init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space + + sampler.make_schedule(ddim_num_steps=opt.ddim_steps, ddim_eta=opt.ddim_eta, verbose=False) + + assert 0. <= opt.strength <= 1., 'can only work with strength in [0.0, 1.0]' + t_enc = int(opt.strength * opt.ddim_steps) + print(f"target t_enc is {t_enc} steps") + + precision_scope = autocast if opt.precision == "autocast" else nullcontext + with torch.no_grad(): + with precision_scope("cuda"): + with model.ema_scope(): + tic = time.time() + all_samples = list() + for n in trange(opt.n_iter, desc="Sampling"): + for prompts in tqdm(data, desc="data"): + uc = None + if opt.scale != 1.0: + uc = model.get_learned_conditioning(batch_size * [""]) + if isinstance(prompts, tuple): + prompts = list(prompts) + c = model.get_learned_conditioning(prompts) + + # encode (scaled latent) + z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(device)) + # decode it + samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=opt.scale, + unconditional_conditioning=uc,) + + x_samples = model.decode_first_stage(samples) + x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) + + if not opt.skip_save: + for x_sample in x_samples: + x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') + Image.fromarray(x_sample.astype(np.uint8)).save( + os.path.join(sample_path, f"{base_count:05}.png")) + base_count += 1 + all_samples.append(x_samples) + + if not opt.skip_grid: + # additionally, save as grid + grid = torch.stack(all_samples, 0) + grid = rearrange(grid, 'n b c h w -> (n b) c h w') + grid = make_grid(grid, nrow=n_rows) + + # to image + grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() + Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f'grid-{grid_count:04}.png')) + grid_count += 1 + + toc = time.time() + + print(f"Your samples are ready and waiting for you here: \n{outpath} \n" + f" \nEnjoy.") + + +if __name__ == "__main__": + main() diff --git a/stable-diffusion/scripts/inpaint.py b/stable-diffusion/scripts/inpaint.py new file mode 100644 index 0000000..d6e6387 --- /dev/null +++ b/stable-diffusion/scripts/inpaint.py @@ -0,0 +1,98 @@ +import argparse, os, sys, glob +from omegaconf import OmegaConf +from PIL import Image +from tqdm import tqdm +import numpy as np +import torch +from main import instantiate_from_config +from ldm.models.diffusion.ddim import DDIMSampler + + +def make_batch(image, mask, device): + image = np.array(Image.open(image).convert("RGB")) + image = image.astype(np.float32)/255.0 + image = image[None].transpose(0,3,1,2) + image = torch.from_numpy(image) + + mask = np.array(Image.open(mask).convert("L")) + mask = mask.astype(np.float32)/255.0 + mask = mask[None,None] + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + masked_image = (1-mask)*image + + batch = {"image": image, "mask": mask, "masked_image": masked_image} + for k in batch: + batch[k] = batch[k].to(device=device) + batch[k] = batch[k]*2.0-1.0 + return batch + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--indir", + type=str, + nargs="?", + help="dir containing image-mask pairs (`example.png` and `example_mask.png`)", + ) + parser.add_argument( + "--outdir", + type=str, + nargs="?", + help="dir to write results to", + ) + parser.add_argument( + "--steps", + type=int, + default=50, + help="number of ddim sampling steps", + ) + opt = parser.parse_args() + + masks = sorted(glob.glob(os.path.join(opt.indir, "*_mask.png"))) + images = [x.replace("_mask.png", ".png") for x in masks] + print(f"Found {len(masks)} inputs.") + + config = OmegaConf.load("models/ldm/inpainting_big/config.yaml") + model = instantiate_from_config(config.model) + model.load_state_dict(torch.load("models/ldm/inpainting_big/last.ckpt")["state_dict"], + strict=False) + + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + model = model.to(device) + sampler = DDIMSampler(model) + + os.makedirs(opt.outdir, exist_ok=True) + with torch.no_grad(): + with model.ema_scope(): + for image, mask in tqdm(zip(images, masks)): + outpath = os.path.join(opt.outdir, os.path.split(image)[1]) + batch = make_batch(image, mask, device=device) + + # encode masked image and concat downsampled mask + c = model.cond_stage_model.encode(batch["masked_image"]) + cc = torch.nn.functional.interpolate(batch["mask"], + size=c.shape[-2:]) + c = torch.cat((c, cc), dim=1) + + shape = (c.shape[1]-1,)+c.shape[2:] + samples_ddim, _ = sampler.sample(S=opt.steps, + conditioning=c, + batch_size=c.shape[0], + shape=shape, + verbose=False) + x_samples_ddim = model.decode_first_stage(samples_ddim) + + image = torch.clamp((batch["image"]+1.0)/2.0, + min=0.0, max=1.0) + mask = torch.clamp((batch["mask"]+1.0)/2.0, + min=0.0, max=1.0) + predicted_image = torch.clamp((x_samples_ddim+1.0)/2.0, + min=0.0, max=1.0) + + inpainted = (1-mask)*image+mask*predicted_image + inpainted = inpainted.cpu().numpy().transpose(0,2,3,1)[0]*255 + Image.fromarray(inpainted.astype(np.uint8)).save(outpath) diff --git a/stable-diffusion/scripts/knn2img.py b/stable-diffusion/scripts/knn2img.py new file mode 100644 index 0000000..e6eaaec --- /dev/null +++ b/stable-diffusion/scripts/knn2img.py @@ -0,0 +1,398 @@ +import argparse, os, sys, glob +import clip +import torch +import torch.nn as nn +import numpy as np +from omegaconf import OmegaConf +from PIL import Image +from tqdm import tqdm, trange +from itertools import islice +from einops import rearrange, repeat +from torchvision.utils import make_grid +import scann +import time +from multiprocessing import cpu_count + +from ldm.util import instantiate_from_config, parallel_data_prefetch +from ldm.models.diffusion.ddim import DDIMSampler +from ldm.models.diffusion.plms import PLMSSampler +from ldm.modules.encoders.modules import FrozenClipImageEmbedder, FrozenCLIPTextEmbedder + +DATABASES = [ + "openimages", + "artbench-art_nouveau", + "artbench-baroque", + "artbench-expressionism", + "artbench-impressionism", + "artbench-post_impressionism", + "artbench-realism", + "artbench-romanticism", + "artbench-renaissance", + "artbench-surrealism", + "artbench-ukiyo_e", +] + + +def chunk(it, size): + it = iter(it) + return iter(lambda: tuple(islice(it, size)), ()) + + +def load_model_from_config(config, ckpt, verbose=False): + print(f"Loading model from {ckpt}") + pl_sd = torch.load(ckpt, map_location="cpu") + if "global_step" in pl_sd: + print(f"Global Step: {pl_sd['global_step']}") + sd = pl_sd["state_dict"] + model = instantiate_from_config(config.model) + m, u = model.load_state_dict(sd, strict=False) + if len(m) > 0 and verbose: + print("missing keys:") + print(m) + if len(u) > 0 and verbose: + print("unexpected keys:") + print(u) + + model.cuda() + model.eval() + return model + + +class Searcher(object): + def __init__(self, database, retriever_version='ViT-L/14'): + assert database in DATABASES + # self.database = self.load_database(database) + self.database_name = database + self.searcher_savedir = f'data/rdm/searchers/{self.database_name}' + self.database_path = f'data/rdm/retrieval_databases/{self.database_name}' + self.retriever = self.load_retriever(version=retriever_version) + self.database = {'embedding': [], + 'img_id': [], + 'patch_coords': []} + self.load_database() + self.load_searcher() + + def train_searcher(self, k, + metric='dot_product', + searcher_savedir=None): + + print('Start training searcher') + searcher = scann.scann_ops_pybind.builder(self.database['embedding'] / + np.linalg.norm(self.database['embedding'], axis=1)[:, np.newaxis], + k, metric) + self.searcher = searcher.score_brute_force().build() + print('Finish training searcher') + + if searcher_savedir is not None: + print(f'Save trained searcher under "{searcher_savedir}"') + os.makedirs(searcher_savedir, exist_ok=True) + self.searcher.serialize(searcher_savedir) + + def load_single_file(self, saved_embeddings): + compressed = np.load(saved_embeddings) + self.database = {key: compressed[key] for key in compressed.files} + print('Finished loading of clip embeddings.') + + def load_multi_files(self, data_archive): + out_data = {key: [] for key in self.database} + for d in tqdm(data_archive, desc=f'Loading datapool from {len(data_archive)} individual files.'): + for key in d.files: + out_data[key].append(d[key]) + + return out_data + + def load_database(self): + + print(f'Load saved patch embedding from "{self.database_path}"') + file_content = glob.glob(os.path.join(self.database_path, '*.npz')) + + if len(file_content) == 1: + self.load_single_file(file_content[0]) + elif len(file_content) > 1: + data = [np.load(f) for f in file_content] + prefetched_data = parallel_data_prefetch(self.load_multi_files, data, + n_proc=min(len(data), cpu_count()), target_data_type='dict') + + self.database = {key: np.concatenate([od[key] for od in prefetched_data], axis=1)[0] for key in + self.database} + else: + raise ValueError(f'No npz-files in specified path "{self.database_path}" is this directory existing?') + + print(f'Finished loading of retrieval database of length {self.database["embedding"].shape[0]}.') + + def load_retriever(self, version='ViT-L/14', ): + model = FrozenClipImageEmbedder(model=version) + if torch.cuda.is_available(): + model.cuda() + model.eval() + return model + + def load_searcher(self): + print(f'load searcher for database {self.database_name} from {self.searcher_savedir}') + self.searcher = scann.scann_ops_pybind.load_searcher(self.searcher_savedir) + print('Finished loading searcher.') + + def search(self, x, k): + if self.searcher is None and self.database['embedding'].shape[0] < 2e4: + self.train_searcher(k) # quickly fit searcher on the fly for small databases + assert self.searcher is not None, 'Cannot search with uninitialized searcher' + if isinstance(x, torch.Tensor): + x = x.detach().cpu().numpy() + if len(x.shape) == 3: + x = x[:, 0] + query_embeddings = x / np.linalg.norm(x, axis=1)[:, np.newaxis] + + start = time.time() + nns, distances = self.searcher.search_batched(query_embeddings, final_num_neighbors=k) + end = time.time() + + out_embeddings = self.database['embedding'][nns] + out_img_ids = self.database['img_id'][nns] + out_pc = self.database['patch_coords'][nns] + + out = {'nn_embeddings': out_embeddings / np.linalg.norm(out_embeddings, axis=-1)[..., np.newaxis], + 'img_ids': out_img_ids, + 'patch_coords': out_pc, + 'queries': x, + 'exec_time': end - start, + 'nns': nns, + 'q_embeddings': query_embeddings} + + return out + + def __call__(self, x, n): + return self.search(x, n) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # TODO: add n_neighbors and modes (text-only, text-image-retrieval, image-image retrieval etc) + # TODO: add 'image variation' mode when knn=0 but a single image is given instead of a text prompt? + parser.add_argument( + "--prompt", + type=str, + nargs="?", + default="a painting of a virus monster playing guitar", + help="the prompt to render" + ) + + parser.add_argument( + "--outdir", + type=str, + nargs="?", + help="dir to write results to", + default="outputs/txt2img-samples" + ) + + parser.add_argument( + "--skip_grid", + action='store_true', + help="do not save a grid, only individual samples. Helpful when evaluating lots of samples", + ) + + parser.add_argument( + "--ddim_steps", + type=int, + default=50, + help="number of ddim sampling steps", + ) + + parser.add_argument( + "--n_repeat", + type=int, + default=1, + help="number of repeats in CLIP latent space", + ) + + parser.add_argument( + "--plms", + action='store_true', + help="use plms sampling", + ) + + parser.add_argument( + "--ddim_eta", + type=float, + default=0.0, + help="ddim eta (eta=0.0 corresponds to deterministic sampling", + ) + parser.add_argument( + "--n_iter", + type=int, + default=1, + help="sample this often", + ) + + parser.add_argument( + "--H", + type=int, + default=768, + help="image height, in pixel space", + ) + + parser.add_argument( + "--W", + type=int, + default=768, + help="image width, in pixel space", + ) + + parser.add_argument( + "--n_samples", + type=int, + default=3, + help="how many samples to produce for each given prompt. A.k.a batch size", + ) + + parser.add_argument( + "--n_rows", + type=int, + default=0, + help="rows in the grid (default: n_samples)", + ) + + parser.add_argument( + "--scale", + type=float, + default=5.0, + help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", + ) + + parser.add_argument( + "--from-file", + type=str, + help="if specified, load prompts from this file", + ) + + parser.add_argument( + "--config", + type=str, + default="configs/retrieval-augmented-diffusion/768x768.yaml", + help="path to config which constructs model", + ) + + parser.add_argument( + "--ckpt", + type=str, + default="models/rdm/rdm768x768/model.ckpt", + help="path to checkpoint of model", + ) + + parser.add_argument( + "--clip_type", + type=str, + default="ViT-L/14", + help="which CLIP model to use for retrieval and NN encoding", + ) + parser.add_argument( + "--database", + type=str, + default='artbench-surrealism', + choices=DATABASES, + help="The database used for the search, only applied when --use_neighbors=True", + ) + parser.add_argument( + "--use_neighbors", + default=False, + action='store_true', + help="Include neighbors in addition to text prompt for conditioning", + ) + parser.add_argument( + "--knn", + default=10, + type=int, + help="The number of included neighbors, only applied when --use_neighbors=True", + ) + + opt = parser.parse_args() + + config = OmegaConf.load(f"{opt.config}") + model = load_model_from_config(config, f"{opt.ckpt}") + + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + model = model.to(device) + + clip_text_encoder = FrozenCLIPTextEmbedder(opt.clip_type).to(device) + + if opt.plms: + sampler = PLMSSampler(model) + else: + sampler = DDIMSampler(model) + + os.makedirs(opt.outdir, exist_ok=True) + outpath = opt.outdir + + batch_size = opt.n_samples + n_rows = opt.n_rows if opt.n_rows > 0 else batch_size + if not opt.from_file: + prompt = opt.prompt + assert prompt is not None + data = [batch_size * [prompt]] + + else: + print(f"reading prompts from {opt.from_file}") + with open(opt.from_file, "r") as f: + data = f.read().splitlines() + data = list(chunk(data, batch_size)) + + sample_path = os.path.join(outpath, "samples") + os.makedirs(sample_path, exist_ok=True) + base_count = len(os.listdir(sample_path)) + grid_count = len(os.listdir(outpath)) - 1 + + print(f"sampling scale for cfg is {opt.scale:.2f}") + + searcher = None + if opt.use_neighbors: + searcher = Searcher(opt.database) + + with torch.no_grad(): + with model.ema_scope(): + for n in trange(opt.n_iter, desc="Sampling"): + all_samples = list() + for prompts in tqdm(data, desc="data"): + print("sampling prompts:", prompts) + if isinstance(prompts, tuple): + prompts = list(prompts) + c = clip_text_encoder.encode(prompts) + uc = None + if searcher is not None: + nn_dict = searcher(c, opt.knn) + c = torch.cat([c, torch.from_numpy(nn_dict['nn_embeddings']).cuda()], dim=1) + if opt.scale != 1.0: + uc = torch.zeros_like(c) + if isinstance(prompts, tuple): + prompts = list(prompts) + shape = [16, opt.H // 16, opt.W // 16] # note: currently hardcoded for f16 model + samples_ddim, _ = sampler.sample(S=opt.ddim_steps, + conditioning=c, + batch_size=c.shape[0], + shape=shape, + verbose=False, + unconditional_guidance_scale=opt.scale, + unconditional_conditioning=uc, + eta=opt.ddim_eta, + ) + + x_samples_ddim = model.decode_first_stage(samples_ddim) + x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) + + for x_sample in x_samples_ddim: + x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') + Image.fromarray(x_sample.astype(np.uint8)).save( + os.path.join(sample_path, f"{base_count:05}.png")) + base_count += 1 + all_samples.append(x_samples_ddim) + + if not opt.skip_grid: + # additionally, save as grid + grid = torch.stack(all_samples, 0) + grid = rearrange(grid, 'n b c h w -> (n b) c h w') + grid = make_grid(grid, nrow=n_rows) + + # to image + grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() + Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f'grid-{grid_count:04}.png')) + grid_count += 1 + + print(f"Your samples are ready and waiting for you here: \n{outpath} \nEnjoy.") diff --git a/stable-diffusion/scripts/latent_imagenet_diffusion.ipynb b/stable-diffusion/scripts/latent_imagenet_diffusion.ipynb new file mode 100644 index 0000000..607f94f --- /dev/null +++ b/stable-diffusion/scripts/latent_imagenet_diffusion.ipynb @@ -0,0 +1,429 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "latent-imagenet-diffusion.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Class-Conditional Synthesis with Latent Diffusion Models" + ], + "metadata": { + "id": "NUmmV5ZvrPbP" + } + }, + { + "cell_type": "markdown", + "source": [ + "Install all the requirements" + ], + "metadata": { + "id": "zh7u8gOx0ivw" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NHgUAp48qwoG", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "411d4df6-d91a-42d4-819e-9cf641c12248", + "cellView": "form" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Cloning into 'latent-diffusion'...\n", + "remote: Enumerating objects: 992, done.\u001B[K\n", + "remote: Counting objects: 100% (695/695), done.\u001B[K\n", + "remote: Compressing objects: 100% (397/397), done.\u001B[K\n", + "remote: Total 992 (delta 375), reused 564 (delta 253), pack-reused 297\u001B[K\n", + "Receiving objects: 100% (992/992), 30.78 MiB | 29.43 MiB/s, done.\n", + "Resolving deltas: 100% (510/510), done.\n", + "Cloning into 'taming-transformers'...\n", + "remote: Enumerating objects: 1335, done.\u001B[K\n", + "remote: Counting objects: 100% (525/525), done.\u001B[K\n", + "remote: Compressing objects: 100% (493/493), done.\u001B[K\n", + "remote: Total 1335 (delta 58), reused 481 (delta 30), pack-reused 810\u001B[K\n", + "Receiving objects: 100% (1335/1335), 412.35 MiB | 30.53 MiB/s, done.\n", + "Resolving deltas: 100% (267/267), done.\n", + "Obtaining file:///content/taming-transformers\n", + "Requirement already satisfied: torch in /usr/local/lib/python3.7/dist-packages (from taming-transformers==0.0.1) (1.10.0+cu111)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from taming-transformers==0.0.1) (1.21.5)\n", + "Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from taming-transformers==0.0.1) (4.63.0)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch->taming-transformers==0.0.1) (3.10.0.2)\n", + "Installing collected packages: taming-transformers\n", + " Running setup.py develop for taming-transformers\n", + "Successfully installed taming-transformers-0.0.1\n", + "\u001B[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", + "tensorflow 2.8.0 requires tf-estimator-nightly==2.8.0.dev2021122109, which is not installed.\n", + "arviz 0.11.4 requires typing-extensions<4,>=3.7.4.3, but you have typing-extensions 4.1.1 which is incompatible.\u001B[0m\n" + ] + } + ], + "source": [ + "#@title Installation\n", + "!git clone https://github.com/CompVis/latent-diffusion.git\n", + "!git clone https://github.com/CompVis/taming-transformers\n", + "!pip install -e ./taming-transformers\n", + "!pip install omegaconf>=2.0.0 pytorch-lightning>=1.0.8 torch-fidelity einops\n", + "\n", + "import sys\n", + "sys.path.append(\".\")\n", + "sys.path.append('./taming-transformers')\n", + "from taming.models import vqgan " + ] + }, + { + "cell_type": "markdown", + "source": [ + "Now, download the checkpoint (~1.7 GB). This will usually take 1-2 minutes." + ], + "metadata": { + "id": "fNqCqQDoyZmq" + } + }, + { + "cell_type": "code", + "source": [ + "#@title Download\n", + "%cd latent-diffusion/ \n", + "\n", + "!mkdir -p models/ldm/cin256-v2/\n", + "!wget -O models/ldm/cin256-v2/model.ckpt https://ommer-lab.com/files/latent-diffusion/nitro/cin/model.ckpt " + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "cNHvQBhzyXCI", + "outputId": "0a79e979-8484-4c62-96d9-7c79b1835162", + "cellView": "form" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content/latent-diffusion\n", + "--2022-04-03 13:04:51-- https://ommer-lab.com/files/latent-diffusion/nitro/cin/model.ckpt\n", + "Resolving ommer-lab.com (ommer-lab.com)... 141.84.41.65\n", + "Connecting to ommer-lab.com (ommer-lab.com)|141.84.41.65|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 1827378153 (1.7G)\n", + "Saving to: ‘models/ldm/cin256-v2/model.ckpt’\n", + "\n", + "models/ldm/cin256-v 100%[===================>] 1.70G 24.9MB/s in 70s \n", + "\n", + "2022-04-03 13:06:02 (24.9 MB/s) - ‘models/ldm/cin256-v2/model.ckpt’ saved [1827378153/1827378153]\n", + "\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "Let's also check what type of GPU we've got." + ], + "metadata": { + "id": "ThxmCePqt1mt" + } + }, + { + "cell_type": "code", + "source": [ + "!nvidia-smi" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "jbL2zJ7Pt7Jl", + "outputId": "c8242be9-dba2-4a9f-da44-a294a70bb449" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Sun Apr 3 13:06:21 2022 \n", + "+-----------------------------------------------------------------------------+\n", + "| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n", + "|-------------------------------+----------------------+----------------------+\n", + "| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n", + "| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n", + "| | | MIG M. |\n", + "|===============================+======================+======================|\n", + "| 0 Tesla K80 Off | 00000000:00:04.0 Off | 0 |\n", + "| N/A 66C P8 33W / 149W | 0MiB / 11441MiB | 0% Default |\n", + "| | | N/A |\n", + "+-------------------------------+----------------------+----------------------+\n", + " \n", + "+-----------------------------------------------------------------------------+\n", + "| Processes: |\n", + "| GPU GI CI PID Type Process name GPU Memory |\n", + "| ID ID Usage |\n", + "|=============================================================================|\n", + "| No running processes found |\n", + "+-----------------------------------------------------------------------------+\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "Load it." + ], + "metadata": { + "id": "1tWAqdwk0Nrn" + } + }, + { + "cell_type": "code", + "source": [ + "#@title loading utils\n", + "import torch\n", + "from omegaconf import OmegaConf\n", + "\n", + "from ldm.util import instantiate_from_config\n", + "\n", + "\n", + "def load_model_from_config(config, ckpt):\n", + " print(f\"Loading model from {ckpt}\")\n", + " pl_sd = torch.load(ckpt)#, map_location=\"cpu\")\n", + " sd = pl_sd[\"state_dict\"]\n", + " model = instantiate_from_config(config.model)\n", + " m, u = model.load_state_dict(sd, strict=False)\n", + " model.cuda()\n", + " model.eval()\n", + " return model\n", + "\n", + "\n", + "def get_model():\n", + " config = OmegaConf.load(\"configs/latent-diffusion/cin256-v2.yaml\") \n", + " model = load_model_from_config(config, \"models/ldm/cin256-v2/model.ckpt\")\n", + " return model" + ], + "metadata": { + "id": "fnGwQRhtyBhb", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from ldm.models.diffusion.ddim import DDIMSampler\n", + "\n", + "model = get_model()\n", + "sampler = DDIMSampler(model)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "BPnyd-XUKbfE", + "outputId": "0fcd10e4-0df2-4ab9-cbf5-f08f4902c954" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Loading model from models/ldm/cin256-v2/model.ckpt\n", + "LatentDiffusion: Running in eps-prediction mode\n", + "DiffusionWrapper has 400.92 M params.\n", + "making attention of type 'vanilla' with 512 in_channels\n", + "Working with z of shape (1, 3, 64, 64) = 12288 dimensions.\n", + "making attention of type 'vanilla' with 512 in_channels\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "And go. Quality, sampling speed and diversity are best controlled via the `scale`, `ddim_steps` and `ddim_eta` variables. As a rule of thumb, higher values of `scale` produce better samples at the cost of a reduced output diversity. Furthermore, increasing `ddim_steps` generally also gives higher quality samples, but returns are diminishing for values > 250. Fast sampling (i e. low values of `ddim_steps`) while retaining good quality can be achieved by using `ddim_eta = 0.0`." + ], + "metadata": { + "id": "iIEAhY8AhUrh" + } + }, + { + "cell_type": "code", + "source": [ + "import numpy as np \n", + "from PIL import Image\n", + "from einops import rearrange\n", + "from torchvision.utils import make_grid\n", + "\n", + "\n", + "classes = [25, 187, 448, 992] # define classes to be sampled here\n", + "n_samples_per_class = 6\n", + "\n", + "ddim_steps = 20\n", + "ddim_eta = 0.0\n", + "scale = 3.0 # for unconditional guidance\n", + "\n", + "\n", + "all_samples = list()\n", + "\n", + "with torch.no_grad():\n", + " with model.ema_scope():\n", + " uc = model.get_learned_conditioning(\n", + " {model.cond_stage_key: torch.tensor(n_samples_per_class*[1000]).to(model.device)}\n", + " )\n", + " \n", + " for class_label in classes:\n", + " print(f\"rendering {n_samples_per_class} examples of class '{class_label}' in {ddim_steps} steps and using s={scale:.2f}.\")\n", + " xc = torch.tensor(n_samples_per_class*[class_label])\n", + " c = model.get_learned_conditioning({model.cond_stage_key: xc.to(model.device)})\n", + " \n", + " samples_ddim, _ = sampler.sample(S=ddim_steps,\n", + " conditioning=c,\n", + " batch_size=n_samples_per_class,\n", + " shape=[3, 64, 64],\n", + " verbose=False,\n", + " unconditional_guidance_scale=scale,\n", + " unconditional_conditioning=uc, \n", + " eta=ddim_eta)\n", + "\n", + " x_samples_ddim = model.decode_first_stage(samples_ddim)\n", + " x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, \n", + " min=0.0, max=1.0)\n", + " all_samples.append(x_samples_ddim)\n", + "\n", + "\n", + "# display as grid\n", + "grid = torch.stack(all_samples, 0)\n", + "grid = rearrange(grid, 'n b c h w -> (n b) c h w')\n", + "grid = make_grid(grid, nrow=n_samples_per_class)\n", + "\n", + "# to image\n", + "grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()\n", + "Image.fromarray(grid.astype(np.uint8))" + ], + "metadata": { + "id": "jcbqWX2Ytu9t", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "outputId": "3b7adde0-d80e-4c01-82d2-bf988aee7455" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "rendering 6 examples of class '25' in 20 steps and using s=3.00.\n", + "Data shape for DDIM sampling is (6, 3, 64, 64), eta 0.0\n", + "Running DDIM Sampling with 20 timesteps\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "DDIM Sampler: 100%|██████████| 20/20 [00:37<00:00, 1.89s/it]\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "rendering 6 examples of class '187' in 20 steps and using s=3.00.\n", + "Data shape for DDIM sampling is (6, 3, 64, 64), eta 0.0\n", + "Running DDIM Sampling with 20 timesteps\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "DDIM Sampler: 100%|██████████| 20/20 [00:37<00:00, 1.87s/it]\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "rendering 6 examples of class '448' in 20 steps and using s=3.00.\n", + "Data shape for DDIM sampling is (6, 3, 64, 64), eta 0.0\n", + "Running DDIM Sampling with 20 timesteps\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "DDIM Sampler: 100%|██████████| 20/20 [00:37<00:00, 1.86s/it]\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "rendering 6 examples of class '992' in 20 steps and using s=3.00.\n", + "Data shape for DDIM sampling is (6, 3, 64, 64), eta 0.0\n", + "Running DDIM Sampling with 20 timesteps\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "DDIM Sampler: 100%|██████████| 20/20 [00:37<00:00, 1.86s/it]\n" + ] + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ], + "image/png": "iVBORw0KGgoAAAANSUhEUgAABg4AAAQKCAIAAAAXQRbAAAEAAElEQVR4nOz915Nt2Zbeh40xpll2m8yd7vhzyt57q+p617fR3WgAjWiAEKEgqSAhECAfJFKBBwYYwVAoQm+QieCDvMRQhKgAJQgSCME0TANNROMCbW/fvra8PaeOTZ+57XLTjKGHtXeegv6GmlWRJ3Obteaca61pfvP7xgT4PH2ePk+fp8/T5+nz9Hn6PH2ePk+fp8/T5+nz9Hn6PH2ePk+fp8/T5+nz9Hn6PH2ePk+fp8/T5+nz9Hn6PH2ePk+fp88m7P959YUDcMiaR7ltHRbD3FjLJrEaYlhVres8HhzsjkdDrSBWq9ViVjetTckaU9Wua32IUQDKPE1T45x3UazVolReZr72i1nd+sBAmbEhQjHIdgZZvVwCYDHKq9oL4yClotSxDYtFJcrcuLNXLZ1rwmR76/qNvUTpqgouqtFkJI5CHYvBqBxOSNPZybNBni1XSyBvsyRyRHJts4hNB6gO7k3qOhyfnO3vaYrx9HKxPSkMqrZrrNI2UdNZpYzSJNYgoRhrfBsZRBM2XccCJArI6MR0deNCk+dGoYTA2mgA8S4qA0ar6ONq5T99fF4Oh9akTePuXN9TQMvZbLCbmUSzC3madF0Acnmq5tNKkJPEVFWrNSGiDyCIibWgkBmcc6mxbdt1nbNpqrW1qe6WKw6xC8ygQSuK3LZeIGqGeVXfur3/7Mn85HSWFwnp1GMcZsCedaKSXM2nq6Zy29vlzYOtqgrONSJmXncH2yPAkGcJQ+hcq1QR0eQDG1375PEzdl1aDr/6tS8uZ8vDYz9b+cPjs0ePzhbT6pUX9+5ez5NMXIiCirSyVhVlUtedEAGBNZrZN65lQBScTZum8ghGSO3ubu3vjuu6U+kgH28xQ2JpsjWArtXaXFxeJgm1q/mjp08jh8cPjsyw3J+MQIxHWwwTHyhJ9O2DLYyxrUPT1nmmjU7rjsloiWE2XXhfJ6mpVzWCRPH5UOVJYo09PqlRJylJdC4hEEz+y//zvwaA//X//K8hkNJIgESIiIAAgEgoAsICICIQRfo3hEUQAYCIkJQiFI7MMYYom+cLQQQAkQiBlEJErRSu34H+FNifqv9BAAAijIDCLCACCCICyDFGZh+C9yGyKEJCUkRKKURERBaOMQJIiMwiIiDCINwfPDE6TYzVVmtNiNQXCgAAiRAEBQAR+xeg/7YwiMimqaB1BvtPIYsAMPSVAoJIhLR+D1EAhYX7jIgIQGQREAEWBgEg7GsFNqfFzQdF+oaprzsRAGBhEQABJCQk6d8Ukf68pLRRKIiE6/yICEhfw0iA6//Wh15njJkFBIQQrxpEYZH1dcH+FmAWRACBdf0gKkJEREJhFoF1XQOICAvDZ3KAz7/UF7YvaV8+FJHNTSVERESbo/A694Dre66/x5AQBAEYhJl5XRtISH1VAEiMMTIjoggQIhCiCBICEgAQEiH2F73P4NUlZub/yX/xvwSAhx/83aQpGuc++OPf06F+cnLx5OiiO188vX+OmX3jSzev723PpmdGu3fun779eN4C1i1XTdRG7YwSC5KSymxq81KsevDp06r2aaLv7G/nSQYxVC5EkjKhYZbsDotRQpm1aRrZ89llNRooAJjNu6OLubHqhZdub5fF+dHpvRdvmGEWs2LZ4f13HsQQVqvu6Hx2sZw7jGhUbvyt/eLk6GK8P37xC7dffO0OCdfdUrimEE+PVu++ffz0qFEhJsw5S6LQR946OPirf/0/27r7DbB5gfWPfuPv/P5v/ubR03MwKlVcZpBaGOWKAJoYCcU5nFZh2cUuImnDSntBbXVR5Dvb6d2Xt154de/W3euDYZGnKjEISqmkcISDYS6gVJFCtwAJwS1WgjHKR7/7zt//f/3gBAafHraPH8+BtXMxMpiECMUHQLO+KxQBCwJpRQBI3kdhtkYRAAuCAAjmFlOFCsLI4kHCr06ygbSTnMapVk6MUYpAojRNFzvv2pBqRmAFrElrSrxgkRthQJ2cLrp0Zz89uP3iF18fjEbvv/n2b/7jf/nmuXeTiU34crpcdNGUBgCcY6PJpgTMTeuRAEjFIAqwSK0IARL72LjgY8QYSosHJb1+I9vJaXeohla0jnmRGTvOy/FwWLTzWQgtkEQRDhBF7+1NqqbNClvP27qq062cRMWIZTkcDjICaruV6+oqdHmOCSWEiTLF+No1SIvV0+M3f/tfxSid89F5Zu8lYGpu3dmx2l5czL33hiNLbDrvhpO/+rffA4D/y3/+HylmL53EGBlc5Mn+cLX0nYtpnmgA9pIPUpsmgDp0XI7Hw/FkeXL24bsf3Xv11tHDDx9/enS5aIe5Mai1iqOtHICePDva2d0/uHvnO7/0J2bzlWN38fT88mIxGfHTRx+98+ZHD49nXmcg0HahCqwVJQSd940XVkoRIvBWbva28hi4yG23aLrG7QzSXOFi1trUDIfpqg13XrwmgBFw7+bBrRs3kiwlhUoppQ0LI0JwTIjKaEBBkAhECo0xro4iZIvEaBUjKGOFpW26vLC+7Zz3iFEpLQAxBGFQRhmVeFDlaCgRg+esxMXFhTCHwGQURN9UnYDWKVmjQVAZQlRKK0U0v1wCxnJQok41cT2d+ujPT8/btgXB+XSRFxYQTy9mJMECoeTD/Wtf+NpXgujJ7p5j0qnmgKCS8dYodDEszz59/837b//kt7//gxdfe+mbr77SreqD2zug3agcnZ9U5SQ5eno8ngy2r28fHc3amm/euHb+dDGbLhbN8otffaGtYPps+sHbP9maDO9/dP/Z6UUXQJDHZZJySGJUJPlQr2q/crw7LAVVMRpO9iaTye7jJ9NBmQ0Kruu4bJ22WtliUOS7u3uD0eDifDacbA0mxfGTYyB1cG1PGetcUGSQxc3OLpaL3ZvXg3dKJVpDDL4Y5L7zAqItSdi07ajzcshAXdNVlxdki52b16vTp9Vybq1mhjS34mPXtABMKrRdxCQPHMfjyWqx6lzbOEqSMLSKtJpfzmP0yaB47QtfOjta2IQgt7/0l/9nAPBr3/0qRHQcWaIxFAOXRZomlhTGGNq2Cz5yFKVVnqVGKSKFikgRiwhI4BhiiAh5YgdFopVSiJ33TdM1dVNXrvXCItYaa02WGqVUYizHCAxkdJJoRWo4yLMsFSSlCCLHGAgVADILAJGCGGPwgUEiR45BKdRWm0QTUfAeMQoCE81XdUDvQg3GJUOlcgwCidYoLDGmxiq0Eg2qJLRdqJ1voyZtySRGB+AQgQQJVVN5RQZCgOiWy3q6bBbLmpmt1VmZDQfDIs3KLEdUViuBsFp2nXeLqgLE2XQ5r2pllIioJLFlsrMzSjVEiaQBxUfvus6TVoxoU40gbeeMsWlqu8CMIs65tkWtEFEYYoyEKER9tccQPcg4TxUIKOUgutASRwJhTQJYr9q6rl1gVJQaEzzEwIW2CWpgdD4wRa3JKsxykyS6aZ0xRkQ3tbCXdtGGIKlSmUn6EVXoQtsGRLJGt42LMexsbxd5cXj49Hy+MIlJbAJBgo8mMUAgQRQRkdZK5UWqAEIQBlJKEak0sZnVIII6XswWnQuRhVAprUUwxuB9ABTsx1lKaW20MSDiXVu3rTZ6e7xFZICFUZQCTUpZY40OPnRtF4J33nNkEUgTgyzMjBIjswsxhIhKaQGOISGExPy/f/uPAODu94aKQIgRGVBAUPrR4Xr8iST9mGo95sN+yIwI6wH2evyMAIJXk275zPy7H+iu/18P+z7zGbyarGM/FN28BfDZU2wOhoj4bx7+6ryfGV2vh+lXQ2MEBAK1Pjn2o20BeV4C+cwP2bxydYr+UFfv4/rrKP1HZF03CCSbygKU54eQfnyOAIiwHs9gP7xdzylgPXjux/gggMDS15ZcZen5UB4AoT/780p/XtR/Y7bTn5TWr69/rvO8LtF6IN6fZj1HwOeXYXOZ1pPGvg77Ml5dx+cXcj3FkPW1kM/MfdZDfhRBQBQhBhJAARIgQBRAQAIiAARaX+z1rUEItD4NrXPGAAIiIAwgwOs/+2mKgMB6egb9RFaEcH1ToAggASKbn/32IQDoPu9kqQ3RJnpWtVliFIaqCtPV8rWvv5CXW4cfP5vkpkjtYjYfTYaUJmGOkTF4WCwabbCLULexyG1A6GIERQgcQVzTOhcIwWaKgZXSsQvACOzmc4+Rs8J0rQ8dI4Xh1ihNiQ1pRQ5I0DaN961EF1Xspqfno+GOcIgtt5dNouzg7rjcyuppMxkNXVUtz44m+6WGorlYaqmMWpluSUS4wsVp25yfVyph5oEZ1OfS1svdPZ0pNmgHNtU2JQkEXrjDrqYQ2QlYSo2YNAk1NU3s2ghOBnkeutrkyiTYzza14mKQzc+XsfUo5uUXbj17Mm/bICp5+mx174Xb1+5uPzt8TNBgtRjdvj67OM/ypG1IMQrharpQGPIyZzJh4YmUd42g7lqXJuibbjjM1Pa4rmK9aiQaANXVSw+gtCmyYex8DGGwUzQXVVzxj99+TIzFYJRmab1qtidqZ1JU83r3+mhrdzQ/rtpFE3ysLqogvLtbnJ7MD/a3u7bTIQgGic2ta5P5rNOFzcrs8NPVQbLTabd7bQyXJy/f2z159Mk2qclL16s2Ng7yLPVtk+bWGup8GI9y77zv2lQzJYoUsffp0GwpXM7qKIS5zdCsarhcNKWZu2FiSSjE+ryqljAc5UbRYDBYdTEvdm7c2p5PL3du3Votmkx/zNKUBV6cVJ2bxyY7vQgH10dPXVckVjyMR7kxanYxQzKp3RKr3eI8Om/bsDcp5tOqWnU2NRAazDITWFkLxEluytKupt36KUAgQsT+MUNhEBAikAj9TFsEQgiIGCX2PQIRIZCECEpYdIyRhWOIRNQzFeZ+fk59u6IQ188nACAoRGEhQmFRChkEI2yaxnWHQ4jMwhIFMEQOkSOLiMQopIlFNCACBh9YODIDSowcWaAHAQjAbLQWgOhDRDJKIQIICgshATDw1clU5L5HRBHGDacQlr5WeigmLABASCIMgMDrDlIg9gUFxDWGYUEUYPGRhbBHWAAAQoKCLACilQbEno5wXJ+UQZQiQmSRGJiUEmZCAAFa9xASQ+xb/SAhBOyzsGk5of86EhIDEInIms6JMAAzX5GsCNB/FhBZQJh7psOIRCTMkSORWl80Qo4AgKrHUohEFH1EgMiM1Be3Z0B9x9IzGwAWQekPDgDMAogSuc8qiABHBEQiUKrvR1gwhkj9vceRJRIRi/jg++oy1gKCkBBSiI4FOHJk7sETIGpl1qyNpb8le4CoiBhAOCJClIiCuBkT5INQtfUqNdd++RuHf/B7f/yv/8Da3JBBCTGo80VzuHx84yD9rT+8/+i8njUxH6WNiwwQAc6XzUBTkquD3fH2zuCw6nZ2B5O22RnluwejrdF2XXd18OORvpYriiHP7PZk9/xsNS6zf/Hbf+iAdrC4nIWLRbN9bXTvlduHT6dN5K27+5/Muvf+6OOLylfOtRdzjaFZNVlutg6KkcVr10f//n/8508ePnnnj384ur370ve+CIPs+PEZKCpH44/f+vBHP3/cdoYTVgq1MikhhbB//eDVb/9iefOF2rnm8uLo+J0HP/5D08zu7qdtgKLEa9eS8STDiKlVWZGkiVpNfUTNBM7TbOHPF63X6nIZ6to/Pl59dDz1v/eR61rFPM5z68g1XmmTDlRpSaWkMjrYK3a2y2g8jjM1MJcfzGw5fPHaniqXwvH83KHGzkdlSRgQBYg4MiFFASQ0lgQgBlYGGQgVKaTQeKWJIwSFLULoApPaG5rpsikSOr/wNASYNfdeOGi6rqsandrTmfMdpSlaqy27QimOwKQhSX3LOktTSpfBjMbbjVB31u0kOxgwLbKYsEPFRieZ8SJN49LUKEtkdFN1YtAkynesLGkEoGBQGSWiwSYKEQ62hlu5ujOIL+wZS3GYm7iqEcOg1D6CSSjL7fy4bhbLpEiTUYmp6irftW2embZurIZkK7EjW180GKUskrzQ9z940taNyqxKDRnLzmut891xlQyLgzvX0q0//G9/A5I0GpTMtpWI0k0bpqvVoEiCEtdB3UWloyptfTntn4LJdjafrYbDrfPjS0WSKt3UngTyJMvyUbNcEDOR7rwkGqXjTKXWWizS26/evXW3cGeDQzmfjHQxTKv59MatHWb8+L2H125sPXz46f6uXZw+WizqcrKbDYb39kbD3Cm7enz/4Xir8CadTqtOJBKa1FaN61hFYGt1CKyV7gSbEA2haGJFZHXrY+eA8rTTeinaSahdZzQt6rCnjUMA5mGZhcoTR8ZojAGWYpjXTZOXWezi8nI+HJd5mqocQuQszQGjayoSUalOLGiNjn2SKGPMar7M8iwfDpbTmTXI4mLgIt3tgpfgLeQYPLumrerh7sSmWlyIDFaRtXo1XyjKbGaNVr71XK2GW8Pcpo0LWaZPLy+9hNC1iTVpaQhbVOqdtz+1idrbGxtJtRnfeOlOtrWV2kJpuzhfbOdbnoS0To2dnj989s7b9fnjpr648/K1b/2FXz8wWi3P3/3Zj19948Xh/uhyKb4NNkqR2Hd//NHWMKvPLltj68vLva19bVMHmVNxsLX96qs3X/jC9uHT++UgbRddi+qpD9tK3RynXVVbZYMmpcRbzcyFgazMGpZyb0dzt7Wbn39yXLUddH6wle3f21s2sj0utw22TTe/aGPX7Ax3ZNktobr10r3pclUkucrhD/6/v/2ll/6DKgHnxSiKUcfOh65RpNJi0PjaKDKJbb2zlkaT3cXZ+Z2dW//8N38zNq/v7uSmVEjYtXGQUFQoHnzjwDs3W9Tt02s3d7doIj5YYyi6G9eK2bNL0MadL8rtslsu3n77Jy+89qV25cNyPSKyVrEgkYohCsiozPI8ARGJ7BDIKAIgg5rIWK2RANAmhhHERxYEguCjsRZQMyvS5H1gxsZJF3C5aEQZQem6YLPgos+SpKq7LEuMMZ2LLJgn6H0EdKC00gqC1yBCEAMgojCzi4yslPKe2QcMATUiBhDNqBR7ndBy1a2WTbAGTGd1TEot5IExVcpSZAckZJBIa9AKNHXLToJTBFqhMHZtjLHTVnkn0TMp7YKTNnZde36xrJ3XRjOQyWxWJEmuilKTiYPMcudSTeiAWEYHO5fzNmZIrBmiCw6AS42pBPIRDEWJCr1JmAx6Ee+4qmP0LjHIzJ13ERC08nWrIJJEFvIuKq2ZQ+jEFokAdM53LliG8ShBLatVbTOFPnZtA5A4BwwCmlBAkD2HEIREIaPWCjS2HFwQRQKaujZ2HStNwQdgUaiYsA0CgVEZEUYSpXQdQ+RgrK3bDjUpmwWhLC+3treXrlOalFJk0WCilE6s0UiaFKJKE0MiIhKCKGsIlVKYpBoEZtNFs1zVras7n6apAAXHNkkVIgMH7wEEtCKtGFVaFCjACOS9Qvbe5bmlxCAhcoQYhUNdee+CSGiapnMOAYhIGZAgCtEY7VxHmpLEhMDCUVsbkNnH9bwgWS/pAaIAk4J+6r2epm+WF/vVtzXHWJOCNZEQkDVWwA1p+Swkgqtl0Q0tumIoiCDSLygDPf/4Zmz5/I/+CIib08oGTKyHxFdfkSt0ASBA0KMWBKAecWyOIrLBPZ/NXl+QPmObA8F6xXwzaVlPGwRBpF/bBBAWgvUsQQD7j9A6M7jOW3+KfigqnwE8uC77urC4riQBkH48C7Ae+YsIAspmuVo+m+PNr5sL0fM0BGGg54WRK8KHsrlK/SxKALE/wGYmscnS8+ql9eRDNmjw+SUC2mRysy7+/Bjr6eB64RZQsF9KXkMiEBIg6IERIggBI6zXjWG9KHyVXbxiXQJXi+BXyoR16dfihyuS1ksFBBEEQfoJB2JEUOj7I6xR0aKSyf4osUG85S4+fXh+86Wb2y/uDLfz6eHTF+8NR3l5dNEVQzubrZzriixViSb2SMgArZfBsACEEDjLkrJMXOOWqypN9XJej7ZKhXo0zhObdSvnvB+NjKs9aHXn1f3zp8vxUO3sZ++89+n29nirHFRd1bjoA+Zpeea7C9+9sTvuVjUaeeObr73z3sObL9ys583l1HW+mx3NfbMqBzrNRqFlsWb/xq3H7/1gZz/Jdm02zI/OLorhOC32Wjfj3JaTPWJ7dHq5c2uvWxy7zhejIesUiEe5np4/SsvEd6Ccnl1OGbAQbBqvyKRFAj4YiwaHqAMZY3Rez89c12FFSV5SYaany3ZaDzM72CtXNVzMWhzsLNrGe9jfLZIRsInXX7rpOjirfFnY6KtmucrSdFlDkuus1EmKnfNdxyah4XbW1k4QlLGguRznoMtlNb9+b7KoZgisM+M11DVXlT8/qy7reHTevvrF66+8dKer29V8fu16oYhuv/ZCUeDydHn75iQv8kdPL7yC2ayiglTVPrlc6UJ95Y2Xn95/d5jbeVW1LOKqZRVvfOlOezofH1yz+2nWTH/y5vs/vf/pF+/d3Mr42ovXPz6+CCnt3BrOplNA0wl0DAGpXa3KMtGBrTEdOwycaIncsdBgJ782GN9/sGw1QIIffPjR3RdvliMLwimmycC+9+Dhwa1rIcr+uPz06VSjGY4Hgcwb3/nW7OLJ8uxpPlSDcXl+0u3slB2H41nIbScRO2vccrk1TJfT5vjZeZIbuzMSn3hs9l+/d/7H75flPo6YeFWtXJqWHVPTNGmRUkdo7OZxUiGw0kiESBRjRAEGFAGO61k6A8YYhVlpDSAxBkUUoxCARBd8EOGe9UgUJFBKAZLRipkVremC9NwAyUdGgBCj6htcIiBkEGEBESLq1TTMEqIIcAix63xk7vsfbVApxSLMDETOxRBiL8FRWvW9VBRRSgkRKR2ZEZUAbWg9A0Kv7+mZNgvGDbXnwP2nYowgohABMcYoAogKCViAuRdAAQIiKY5xLa0RBqQegQFCjBwDC5GPzMJa6zUMAwDBEFmCAKDWipQiROYoAAzAPgCS0ooB+iV1RIJNjwyoBGKMkUFIeji1KbgAgBJCBRRBkJ/rowghxIiA3Eu3AAQginCMPZhDBCIFIpHFhQAcAUEhcWREFBZhUZoEMISgSCESEnJci5QQMUYRZqI+w0BILIwgCMSMwAzCLKAUMTP0ME/EWL3mbgDCrIgAxGgFIAgYIgNCEA6h/1IP5VAQBSDEGKPEGHDdzTOQNkYDEiHFyIQCINz3GSw+BBEU6TElxsiA3D8Fjz9+1C6Ib770sz98p/7Jh5Px+Ozk8vYX7nAMl4v2/YeHh20LH/HhaeNYbGLOlz5RNBjapvaXyzbdy7757Vc//vl7d2/aJ5fTzIQsdndG+c19eHL4OATcmiR3r29ffvBQYpwjPfr08cllOJrOGM1FE+5frkjZycH4yWX96e+9D0wRXJINpsu2ni8BcHs7zQp5/Qu3Dm7sv/rlLx7cvjYslMWZx/PfffcPfuXf+266N5n77HS+zK+PhpPb//D/9v0HH8we35csgzLRmYHdieXL1e7O4NXXv/z6934lL9OzTz85f++nuPh4Mpzlt2gwsGST4TjJB7YcjwTs1u6ImdIiDZ7zQc4xKmVCG5CcSeHicAWIHKSqW5vao6Np1frlKnz6yfmsCXXtSMTNu7iCDuXwwcIHnjeOiZPStqcN2GTmnygj6XDAnc/KxLKqVp4UZZl1PqaJiczes1IQQkRAH9gYojXYFUWoEJVBBoki2Va6qrtxYb68lx4k+qMPD7UxX/jOPRa9vIjNzHfOKQPaKkMQ2L+8P1hdLKzV02WzglCMCzDRe3ftlS/q8dgOh9Vhffv29v6WfXjROZLat3dvbD+dLiBENcyUoegCoDYZYERh0IoSg7FxOceXrxdfemXbCtpUj7YyrJwlt5362NVdCxTId37rIE2LhFeCbX366czXTtk0H21zkqR5uVycU2dUWixW1e61cWzry8PpsMxaHY5PzrqHT+oqmKREKrRGrYgyHl+f1MnoSAY3h7d4XusyF4VkYNV6MpjkVFf+claxhHxQtBLGo/xyOm+dz9P12MoHo+2grfxge0TgL08X4KUcZns39lY1l2YsIfgoddXQQNmhWbSLIrk+a3gVZP72+9Pzh9/789/98J2Pm7q++6UXE9Igdve6y0fwla+/dPb02b94+Buvf+P1VXVhym1lx3/wBz8eJXp39/rKnbhcD0b64f2LatGqXGOiVm1nEwMxWAWDwg5SvTVI2Yd6uuI2KiVbRXlyNtvZn1zMVxy7cpicn5xcuzEpMhOCZ8HgRJNRmRIB7yOD0plZdd5Hwk4U6Hy8xQaWzFkxdsu66gQiK0q9Z0pMFJzNOglkSJGQHYxY+HJaoacYo4BK8uLkfIpKl0lxcT5HUIjamDwE612IZPJh2nWd81GZDEBPF357qNMsLYaZSpQHjko9Ozy/nNaRQvC8WkwHUKKYxXmTmrwc6xu3DupFsOlWvr0XpFg2aI2gstWqA002dWdPPjn66IPLo8M7X7j2+Mm71+68GLbvPH12MgmFr9Vo59bJ1HeUK4k3v/DGp08fmmJwcHcvdvHs/OTOF++C6JOn8dGzWXUxHYa4PUy+/1t/dH42+8q3Xzk/Xv7sg09u3dq5eLK8WIQEokS/PUgb55dns2xM3/6lb33y0WqQG1Z2vnTV0WIVOGi+fefm0dHy/LJu0BZeLy67UM93t8rJ7sGtF148efapDurk6HS2crv72oK6+8WvnM6roCKhAZYomGhjNEbPq9q3i84kKpLqvHc+HD18Uk8v6tywpg8efjgcf529DrFWyta1c02rlMmGZZpnDhKqV7Pzs0fv/Phr3/p1GO0sz2cXp0tywkZe+MbLh09Oj5/WZZk/eHg5Gg3G28V6diOotdYKGhatqbRqNEhdiE3VJNqwoi52RIq0QiRBVEqJCAMGAQYx1ihjBDG1RltJc9M14lpfu1jXAYxtOg8IHLkTbjrfJVEpHRhIBSATIpvERODoPZEW56VzmVUYMUa0RgFKjDEgWI2swJAxqW7aTiF5MrVjdt5X3sUWDKpSZ2NSCVGCXeCglLXYuRAi5knekaY0hdD5bmETmC27NM1VVtSVsGOMJka6OJ0BYGIVIV6eLz1D56HzgCqqhLYnpdZoSIU2dg2j0xK6Zde5LiIq3wRSyWQvHXZZZLeoqlndMEjt2jzTjXQ6TXzXgXdC6JAgNTEyoOgECdC3IQSOLbnWaQMYAIgBEBQhsiGKzsfWSYCuapYhmASDuAARvJfYeXapTnJTXqxqH1uTaGOV79xoKw81W8DJzqBqwtS3nUBmjEqUIHgGlSWoyFfeeW4aIdJJQpFBhLrOx+iiYwHpegolEBSO07QcDIKvJnWpFKR5SqSrKqRJkmd5liYKtY+AwOK9sSrEmKS6bVyIYbZqOgfzRb1qKuZIiBSjRkrSNM0LJbFeBaCoreo8kzLGJkqnqbEhhuV86mI0xkUW77y1BoVD6NhLZFwtG+bY1E1kp4lYpKoqrQ0KoyLmiAhaJywgMaDCGILijbaHGamff28EIwJAa2IBBBuO8m/Qmg2JWItbPiO/Wb+/4T7yWSWLXP12BR0++8tzDrGmHlcE6gqq9LoTwefn/Yxofq0putK3rJU8vQsAhbBnLRvNDa6hlmwEOriGRkAo60lID9A2aAZxA1KeH37NMBDl6sDP60lkI7650kJ9pli4hiwg/Th3c0We198VdJPndbOubn5e1J4WiQit2dcVMyPcXCjc+Algkz15fmTE3nXRy3PkuTmgZ0ciSP0L65nJhgluqpnW4qd12TeUiAjXYp8NIcR+pI5rCZb0d9z6p2yG/M8vqQisXRZX3Kp3KgACb/7cqM43oiXY3BdryPm84Ff1K4K0von619aoaFBaiV1X+1SXg62ts9Pl1sEEy/H7b74t1ckXfvX1s8OnMRY7ezsKa5bUQIydb2PXXTQSAaIM8/T8ZJ5laUQdSYVG9nf3dFaUSVWWo8vLJXpE0kWGeUq39obHJ7Pas221CU1YcWfa3XLSNnC4mFvDX/7SzaNn08k27b146w9//+fv/rS9c3Cwmk1nnz6m+dyxSXTK81ViyhyjL9V4d18rVFI/evpRcXdv+1oy2E4+ffzBC+mt7YGuWxcJ967vvfKFOz//6XsKRmXulosTBdGUSZrrql6E0LZdTJJA6K1RrYsREo1lagpIp5RwkXfsG4lAarDyXrMOrkak4ShvVkuVmvmyMuPi3s1777/5caLMvL6Mrn33ow9/+U/++dicGVslSO1qNt6dkKUiMxbFTy+3JqnVKBHbZsXMFFXwnGaZNXY57YhMau1yOg9epzZz3SLEuu04S7R3TaiX1bLb2x2dzpaPjy5R53u7u3fuvtRUyyzRe69cG2wXy7l3nPiLFljX0TUrFxUGEVT06f3z1KZuUfmu+ejNNzNLTdfU0bFCjCrlcUymGFaZmnz87uHtnb0P3z784r3bKbnDx+8Ntu/e2d+eFOXp4aPRZKSUbZYLVy2Z2WqbqbTM8/PFCWpNOFguT4fjhCi5PJ/V2qnMTo8rrYp0ND5ftCez893dkqtQrzSG7MlH52m+neDe1u6ter40jTJ2CFyN8m2XLLQO3/raKz/6wc+62rPBT57M46BUqI4vzxih44aiLB2otAAIdbckN6+PwsG2eIGnZ8tBpu++8NIn775T5qPJ9t6Tk8uubuaXi/4pYIlK0VpIIrFfLmAfRQCJfIjMzMIAGJmD88CROSqiXrIBoHq9j9ZEhICgCZWivjFU2E/VQSniyDFEJOIeKKwNasAxoPSCS2HuFyuYSCnCwBCYPXMEiSDAopUKIYKIVjqEEAA650IMa+mNIiL0MUgPcTg4z7lNEKEnS8xBKUJUKLJu8oXXbSkzAvaNGCJphSJCCBxD36wwR2TsUYhCFIksLJGBuXfnKVKRue9MFCkA0QJeWDgi0XodpgflgFGEkIiwZzGgeogFIAxEIsyAIIKkhBkRAFEhRBbmwCIxBABgBE2ECnoNqyKFwhJFhDZYbC21jOsFCUYAIoyRRYRIXfUpHAWBEYGZow9EyMwiHgC01j3GiiFGAKV6gAaIgIqirJ1hAEBrJVCkdfcIwjFKr8dHrfsbhLWi3jlIADHGEPyVAA2VAPTuuL7djwCoe+CkSSkFAkgQQpCePCFQj/2QlNYCCEDCIgSbPrj3jDCC9P26VoaFtSYvfLUw9Bv/4J/kDX3yoDk5c/f2zCBJL1dy//E5OC8+mCRpWq6qlhmv7xTTaaMj5WU+LkvFq8RYF9TbP3t426TTj55Vs8WNG7u/8JVb7/70w8NmFjxpMM25//GHT5oqVqvOkkqtWjgfgCURiGKKdF5Huqy82JbofFUNRrmp9db44IVbN7/znRff+PKr4XJ+89714uAm5ePZ8UVC0w9//HuPz3/6nb/wC1XIq9MoAwAvi7Pm3e8/eO93PonB7NgiQUoixLOZ8e2d25Nf/4//Ek2+OG/yT37vp83hm/7igQ6ntw/s8Iv3Lk9W+bjMt4Z1I5Bta1tAPoqeK9HJqGiNamunQIsNSlYRYzJWRmuDdhQlK/KDW1ElFhUtLiqbJBQxGZh2WQlEQupWjU5sNau1xs53l0eHreAnH595Ds8W9UdExyt/3gToYhQkzxJJKdu1rdEqMbpzARFaH0Qlmsi1XhESKKM0g/jGW4VaBV17XYdfeuPeyf0HX3pxvDhZ6W718HipytH5vLNWF4lEHzFqUepo5hOnJcThyDKo4ONyNq0IX9zdcSrzddssKru9rUPSXi5WIVSkp7jwi04TZqP0bFppwMViJcQE3oSYodnbTl95dffmyA6g3ktCrH1GySRJZpdLdg0FKYfWG2DEXBWhaWp/WQ52uF1QiPu3tlpP1pbTy8iBbrz29SzLzh8/yPLy4rxKQEssyWB1ehK9hxgPdnfMYFcg7ZpzbnxEZqVUXly/dmdrXLbPyoW3oap0pqrKpdYmlpKawMemDtkAnPfeIhHWS18Uaf8UkFEHNw4W02XdLPMs56h8jBIxNrFdNPkgs3nquti13nvXteGHv/OP/of/478+2r7+zlt/iPNzqN0nb36yeHq0f/3gzp1Xz+bV1nBwbe/Gv/inf+8Xvvd1aG3TxrMnp8fPjuywfPm1129Oblwsnn75m9/Q+YdvfvSM2JVA3iTVMnY+BlCu8QeDQZrQsqqH1ozHW0dPjrbGg2rWVnV7PqsC4sXlNM+HLoTjw8Wt29ureZPkfHF8sbd/fefa2HcegQbD0nqcXS6Go7FK0948gooyZZaL+Wp1eOfeG4M8baMDokRT8NEaCj5aq0ye1tWKTBZ9G6MjiOUw8RCaqsnyLfSB0aAAxyZLKdsan59MQ7MKURiiMSiBI3JeFonO4sprm7TLCw5du2qlme/d/cLxg/shNMVWeTldDspsnOWPjh4B5XdfumnTeHp0ViTjrcnk2p27FxeNMibN87Kwy/PTk8dPywLr8xONFgtFk92Xv/Hdt95599WbyZPzUC9W3/2T337wzgev/MIv51tmcX6J27tfefnlt37yZrNo6rOp76r0xf3LpU9sWU5G+7vbN4fD/+f/9f8wKNPJ4Hq8ADnjrNNHjzt2ijS1jovUXJzNtraS4Wi0fX3n/v2WOZ/sbCsXL5rF6eHMaKWEpseXqZTY6cGweOsPf/rqKzfvfPHO2ScPdweD42ePfFtdv7796NPTvf2JWx7aNP3WN197dP/+7s3brYdmWYNAQSkpqNvGdT4tM++dsL5367WzwzeV4ms3zcXp9E/9uX/rvY8epeV4Fc4NGtc5aSOBKEU+yLxyQDrLi2JLP33vD44//kF29xfsYPegHM6fPmxD++TBh9lga+/aXrVsDen54ZOBvbWZFySpNj4Eq02W2cyoKMKty5RZtm2qlbEpKRNBAMi5kKWaI4fAuLFnWKOb4JHEKNBKWAMhJxZ9YowhsAoRmjZEjj4y+ai9sBdCRCW2SFeLSok3xopzipQSXNYrUonCBKJI9EohkmqqToDFKC+BFDFT0/jz+ZJ9x8rlEzIDwcTFAh0G51kbwESEaqtEoQ2hc84hIoZQnZ0Zh+JVXS9D4jtvKKb13IsLTQXMISgGCK6OpDUEyIyZbOdkAkKnQFv2bumYzYW4pllmSWmsAQmaUGeEmZIV+iZQhpPRICK3oXNVFQGxbRF8alWaWNe5EBujjHCMbStKk00RCCOYNBWORlGMnVbAbUcQFEKIwgF8FywEhbJaTcuciLheLIo0yRMjsQaI1jOHmGIklpJ0EgIQZ8ZkwEyQM0MI2Aqg0okNPoi2Ssh1vl114pQ4RmW8i+CibxyB1iTinUnSJLFCyawjRDudzq1Od0c7gT0CKtLFJM2KMYJO8ox9RM/WEgTf1DWw844jx8V8XjWuE1p1jgEF0Pso4KzCfKBJaYqU2lyiSGSJ4H20qQkBWowhsiItkZu66RwgaqWV0sCu6VcpQ4hd59gHZicKnfNICpWVGIUFgJFAqUaAFAIpij5AWKuK1lROsDfo9PN27CMwbFQhGzMTXi0G95/f0I6eHPTjrv5zyM/FP1fICDchEkQ21AA3bAo3TGkzoNyIZ57DIkFARALuaULvDMC1vgY3OGTDQPAqWkR/Roa1uJxxc8QrodTmxGuZDay5WL/KHJ8zIUDq8cZzIAHPKwAEgAWoD49AwmENqaB3lMFzvdPaBIXPXwUEYKRNhQmIiOCVjW1TN1emrp7xIa1VNCwggtILg/i5fayfDSADKOz/6KM09IXpR++bABOwCRXSVzYzIyLQVSmvxEFAgEIIwLiuCdncJ9Svq0OfHb6yj61f6MtGiPxcHfS8/vAK88kVx1kXaSPk4ueVzwj91IgBAYWFgIDXrjTGtVoKn+cd1kBOhKi3jNCGl25QkQdXZPnk5st3X/7Vf/RP/8k3/sx3fvF7d//5P/5hs2yzZPT2B0cHo0JhXM3OxatV14rzXQi2MJCnl8ezjqFTkfLdYE2wmd0qK3ceYkakA4RFvQgSbt266RdV52rv45OjKXNM02S6aPRgMNzTw3GWeYmeWi8c6lmz1AYWl8t6Pt3ZKowyxSAdWF2fLHHOi+rCDHNtt5bTU61aY2V1+UyaJVO4e2MU/dKkacty98XX8lQrY0ZBtXV3+fToh09PXA1dN0+HgxBZlxaMXqzmBinTCfCSIbSBfd25Ni2LMUiyXC1MQp2vZBVzYyIHVC7PUqYkCokXrSMITS/nojNPybwxeZpdnJ8DhPFWej47L2I7HOxdPnvbpVEZs1o089N5zarTUE3PkoE2ZdJ2VUCymQocV4u2SLLpxUI8KeWU5mKUxQ4j+6LMUAbOt+y70HZJYgaj8QfvPzyer/ZujA9uHmRJtj2MLDlAjAHmMyHIq0tvMWQDEyF08/PxeCsoEXCT6+V82pkEfECGXBRMxtnJ6aPhVlEUafB2tbgclaV38WDvdjlOkUK1PCt3sr3d0bJzg9gtzk9u3xy2Tc3YKImua/LhSOk0G4xY3Hiy5VzTxirNbdNOdRaKSeFczEt54d52veys1q5eIXE9bzXGxep079pdIrVcnj/86PTx47MvfuFLq0U72imvX7s2PT/a0fvF9vaj+6dlua2k89rvXyudJ9LauQAIFxdL8iHLithKVzvSdLB/cHk8TbJSZzIYDtpF+/TJqbVJ08589MQKmZVaTw9CiIpASBDEBUaAyL3GQ7QhEYiRexIQYwghgjACBEWKkIMAci8wAcQQ2WhF63ZTJEpvFRGRtUiHkOEqmA2wMHHfMq37C0WISCIoQMwhxuhD8CFGkcAsUYiQASMDSBSREGNkZhFA1EqFyOJjLwj1woSoED0zhhhYFJFWRCA9pOAYEUApJYC81sUEot6C3beuiCBK9SsU616xh+u9drN3ySEBoe7lUWsN0DqBAMbAxigGXNvCmEFEEfXOMkW48cuu3Vtrr5paUyeWSNLTK1yLJ0UAgRT1SxWEqLQChF5BJCJ6bRIT5v5KrnvdtTpJBASIUKHqORL1bxJyjCLC6yg/fftPItEH33MnAeo7fkJCIhaIwUvsoda604T1yg9G7tXKIMK4js5EiqgX+CAiAUWJHPuWnglQqR7EAQByZABRSgkLKDJqvVbU1xJsFmcICbVSQEhERMwiMQL19RxFRJiZowiAMClSSpFCQuV9eG6jBrj7hV04vvz4vRNw+EdvLxof9yf5vf0t7Fw7m+0dFM/qGiNFJ1UbBmUiLJNJPizzzOLFrPOBd7fNa9dgNb2sddpUix+9c36wZbUVneoiwQ8/XF40xkfYGaohkSZIC8zK7GzGSWZdor1NZx3/5//T/9Rg8uH77+4c5N1F+Orr1/f2k8XpBUFT3t3Lt7fbzjJwPswnevXbv/Uvv/cXvnrrtW8eP6lWywUoKnN18eHFP/ib38+Scdu1g1yHpdst9LVd+/Uv337jV/8UbH+h5nygfXN5v+iOIFvlg7Kah7amrb1dUbyY1eloJy+3mbGrRCWpsQmLiV50WnIIyigVZbWqrdkKSoOyKrchITGRESPHZBuzcgBBma0yR3F1l6QJhwAAWyFqADD4Cn3VufZXtQ0cI8vFcvnTH73/9//Zj58cd53QdM5VFGUxI9u1XeeZQSRymlH/pCpDWiv24jkqhaQVoGjnbmZWd+Gf/uZ7HPygVCOB08eLPLGHp2d5Ya0hLQGFvfdpaqNTHq0H3jJKIXkUAR6O9gbpIKBE14wm+cVyde3OdnZ8bDWmmpuuFXHQYuCwLVGHLrdqsqOv7eSvvHDt3q27q/mF1e3FozMCVsLaMHXcLmE8VoiJACzmjTGJGEhHBWDSLeu2a7e2R8wUTZKWZeiynVdvcbqlRteca5ar+zZJdl/8Qrfk6vjJdHXEGJJCQdQ6TULsQmiV1ZSgSo2rK62e2mU4//QtXOqta5OTpxUT6MR2LNNZG30M7FwAZVEZnM0qiawTWzWhfwqMVQ4kprm2yfxyxpjbQrrG1TFmg6xpXFSqWnWBI1Jsq0UMq9/9rb/9F/7SX7t1ey+cXi7Plq6d3r67wzE+e/JUyvJ0deofn9956U42Lq6Xo3w4zovyg7feTCw9++RJ3XmvuhfuDnRqxtfGRrRnB623NRxfrgAwAmaZubE3/uRhTYjnF7PBoKzrOnJAgeCZtF56/rU//VfefOun7vDjrjPFwCepBgjVbKlu3yBE3wYkNbu87Oq2Ta2wBdE6wnK2HGwPi2KolZ7NTvJ8YIthW1Vt5xRStVoIKJPkTdMgqdl0BSqmKYUInUTEON4eOtdyiKhxtVyBghBj1TSoo7VZpm3VtaQTIhGiwIgx6ARb3wTn6lWVlWlis2cffxjr2hgdOG5tF82y+fijB7uTneNFvZNbkpCn5Wg0aubN7GJpbJkk+exkqng+P3rg/Gy0f+vk4TTPR8Px+MG7n04fPLidFc9+618Mi13XheWqy8r0g7c//tKf/vWFx/vn7pu3Xrr1xVu/83f+q1/75T/xg9//Z2++897ewQu4atmt6q7+f/z4d99/8mxo8z/zZ/7k7Oj01a8e2D35ow+fgkYxRmnqON64s7V3fYu5bHWhs+FwpA6fPF4sVl3rVIpZalbTbjTevnbvdSf+6bNTYn16eOi7enu0XTs/vr7bNfTs6dO2bvIq93U7n7lu3rqmqxezbLAFmXExdm3HwaOGZJByFNepWIenjx6184sQLuP2lq+6y4c/3jFZN3+apYk1RT1fURSbKtSWBBxra4Z+tWTffOMXvnv25Hw7KQeDnfd/52+Pc5tcL0b7w8V5pfP9+uj09PHjOzcndVX1T0GRZTFKXiRdjNZaAmBmIsMS0ixXSiepij6qvkfTmjSSIiYPhKh0b4g2SMCstA3Oi7BN1QBTSkLwXjsFAmkKrfPBCzIqhVoRR0DEEAIhtxV75URYaSUxAgihNykwsAIJnWetjNYB0AtFx45jy6EDjwmj4tTarAzDfdvELigHSJFUZG+VCm1jFAEqwIiAGH03a6nSvhatjFVKWkHXxcgS2PtOaU6NyrRqG1cObRAQz6bQJmFjIS9yYY2S2hS6hp0PUfSydX5VDcuiWZ0U+8O0GKdlsjqv25VkGWoWBSCRxYtrWDh4RUtxogkFuq41Slrpxtu2HOcuBCItjNFH4hg8atXHh2SR4Np2aC1AwuLT3CKxNeg975R7KFGjaTtndJYN0VnlXCshJjbnAJG9UXl02gDsjbZc3jF7ZVXneJAUkVG6mNkUM4wG6uhC8FmW+s4hRqO1Nqosh1qbGABI7wyTVCEAaGOK0UhYjLHshYxVOtNKQQSlUJNBAh+C1sYzL1fLwB1DjBy8E4miLHFkJEVEXiSwOO8yZbO8jMG56LQmNEYEnHcCiCRplkQvznvnKkSSoBBESSSEwOC9N9ZEZOeUAGhjADDSOohLHzyGQWLsWFCcIIASWpOi3hGFQILrqC5rZxmsKcfazbRREfXz7XVYnM/yItwIceQz839Z693XspyNWmWNcXp1yXMr0XM9CV4plT4z1+9DS9DVX1ckaY191iIjvOIOGwJCCEDQD0/7OJwbqUnvcrpSrQDA1SC2zzwSXBW8/8BGuwRrmf1VjjeAZk3U+lzxVc77OsCrY+OmcEgbldAaBkmvtREAIdlU22cMV7Bxe21quP9sP5vrX10HIe1DFQEK8poNySY/0of06U/ESCjIV4AJEVCtiwFrAc9GsrU2EPbXaO00Y5C1YmhTQZ+VBgmvZyd9AA2+Upz1y7/rQEVXV/zqElyJ3NbEaQ3VNuF0cWNtg7VDEjYKMVgbAWUzWVm/3yumel8IAgr9/6mKSGdJcuOFu7/+1ocP/+Jf+LND/egP//E/KptkdGd0Pl0MM2N1DOIhYLtoGenyrLo88fdefjm27d7BjWS0C2iaJpQ5LM9mMe69/OUvpRp/97d/5/LoSCVha2fy8x9++NJL+8Od7cuzExG+cbA72SofPnm8f+P6IAmpTUcx2FSP9rer8+n0bAUT1cz8SrNkWHXu8uh0WNimM7GYcFtNtsZJnkxPQ1f7rUGRFPri6fFqtdobbpeZWVRAISnz9OTp5bAcUlhML1arTulsnBizt59cTmeJKYKPJqA1SXQ1idFJBuIZo2iikDSNz3IyWhQxodYKBVlpJgoQ0SBGMi4xbXMRmdNMRRXufemr5/ePbuylT09BnEEys2r65K0/3r13pzAvhrjg0B4+WIV5YzI9c8vhKLWKSCTNk4BmkCBKuH37pTZuvfjKvSePHq2qk+iXKOS6FSClaZIjdhKBxGblatq+df/h8eHiW995LdNhnJvom/lRdfPOdaG4mreedTnZStN4+eycWzfezfKt5PDp/YBmUYemVpenzXCwPdjeeXp20rgqARfAhc6IpoCVSYfZeDsfjtP8YFnNmq62STKvkSMLJfvbxeX03CQFJtA0jWYzLrYFiQBC6DhympUhhvl0qUkbncxn03KwA0AZcefby4saymJrkjfNIkanE72zt9+1y7qeab3dVX45fXbv3gul1s2inmKYLd2Nmy+eTy8G47KtPU9mAVf2ZDk7a1vP5bBcNeHsdJqiyQ1X7bTMBqvWnS/89jA/O59PdkfRG+exq9syS72whJAVNk1s7Vf9U8ACKLKOQSUgICF6ESQi9g7WcZqFhUMIIUYQicwUcB0WR0TrdSurjSJSvVaIsGcywn0LgKiUIuEQGUS0RkDQQD2BFoCeoQCiIhSJwszMHAPHKBI5RoiMSDEEBAAFUTiEGIWBRQEgP7euhhhFQBGgohAigufIiMoYBU7yxBKRBlBawUZy2XMr3YteRPrg2hwDIygEIurjZG8WRiKsBZLSR2VmYSJadwqCIhJDBKK1LFairDsK7h12IH0EIl7HCMee+K/jWIsIRwEQAuyV7aq3AQojolIEMQApXEf4RllH/WQkEg7rgL8svQ97o4ECEOjlzc81QbJuNSNHAUFZO395nWHywWFPxBARCVBAGBgFAZg3VvZeVtW7pplIISJvGFWvIl2DHCBhed6P9V0gMwOIiDK6f6EXpSKtjc2AxCGSNr0QLQL70PXf1UYrY7RWEJn6UNwKAXqr3bpXln5UhABESmlFa7mRUipCjHFtQHvhjbunEq7f2f/k8QMsktl5085c8bT62osHtu6W58tXro+nbXu6dB5JK31xWp2ertpluPXy7qw9n2j82m37+nWoB7p+dzF1icm1srSdm6qpP35WH079EtQo1S8dZOhjYjPZzicH9vQivvNktYzUVM2q9f/1/+7/+F/81f/oT33l1ntvv3lQ5rYlv8gSq7LxJJ9cU/lEWp3k1p/P/5v/+n8/2dodjO8uV4JZPipSF1qL1U9+9GOweRN8noFNYCzhSweD7/3SV1/55V9ZZLcDDOTspF7erw4/wPbCmI7K4Wg8TMuyaVqtdKqDVB3olVZWWYPE0TeRGm04Lbfreul8jYCZLdsqZFsDUJqdiIesGOhE1bNLk5okycBoiEnwoIsiYDRWORcx0Z33aHLfOmVBEFijc3H72vXvfKc4Ojpuow6cfXh/+tHj6emy1blhkLr2WmNkVICOowZm4SawRo0kgyxpvMMQdgwOWzfiAhrHpKtOdrayV168+3s/frf1QRzYQYaEGlglapTkI5U11Wpy86Y27uT4kJVYxZP9bb9amdGWHWWrhURIXv/SC//8d36ymw9VDDXRiwdmL9N7Y/PivdF2orfH6Xg/q5bLNCvn58fL5SFaKZWPrHKT2IKVUs5VMUCaGSc0Gg8ZKJ3srFYtQGeHSTE+SIZbAtYHTEdDoCzqod2+IbqgmL/0ta8+fO/3q9X8zhe/w828OXs4LjKbjVgNknzQLS61ieXWmHg5KHRoLxfHT82TT06frG6+/PXJ9s6Djz4dah4M09llIySEbEvrPfvWD69vL30NNmpt5pfL/iloqqadrkxWTu7u16l58smxsYZsrEMTa59S4kMwCXAXL05PL58cciX/+p/+ZBT+XnJt7/7798dbxXB7UpRp28mSsRxuv/7lF/4/v/NfJSDX9vZVxs8ePR7sTO597QuLi9n9h4c7N7bv3n7x9GJaloNy0RmJo8HY42qY54dncyCFRCens7s3doaTohFJosuU2dsdvv/Ow8KmZW4cyKrtfv93//b2YGtUwJ0XDk4+ft8qr3Vol/P5xbxMk3xYts5fv37v+NkTt6woj0m57Ver0WTAGIy2KhmsqhVH364qrTA4MVm2mp7ZpPDgtbFpkTV6YRJlCZq6iZ0DkJVrrC0IFQmCVoAWXIMogzxr66DYDnOjLdWLzmQlEtRVZ4yOXQ0cB6PB0ycPdnZv6MSOhvnlPMwupuNx2bSrfDDIs/yFcsdQcHVbFFmIYbRluG0TlS7Ojh4+ePjCZOjbOvr2/Xd+hpSgVhBgkg1X4hOdE0BeqmZK0+Wqc4enn37IgNdfujcZppJK5y5/6c/9UjHggzsvG4E7N6/97Cc/2x8OfuO3fvfhs+MvvP7a7Vs3HAkkKi8GOc13lTmqawG6c3s7LqtRmX39V37993708TArChOCW51cnDSVp1bBVn56Pt8aHtx7/Tsn09PxMH3jKzfPnjwrNR4dzsps5+DuLadRRdfN2+NHn4y3trb3xj4gAOZ+7GpGcgIhNVqRdS4kOm1jDIHTNAeUp4/feun6jRBst6qCl8X5DFS1u38rxFh1MXQxTTUTEIomzaKMLXTwzfmzMksMYnf20C9W3eliOkre+tkH3/u3/9rkelw0y+3dXKJniFu74/4pSJOEGJTRhhmJ2AdRJpALMaYm1YSEKFohqMjiGUQhISZJIgqMMiBCIJ6jAGMUlSSKYlfXRuGosIHRdzEGCC4mifZBggMQIUClUGkrEJQiz7G3WbddUAggmCY6CncsClWIjrs2Sy0Z8h2joZbD3LmAPk1MkOhVKDGpVxKhg9KIA2yR2fjAqVUKCBAEgzEUQtusHNfataqNSIqM0b4JIcQYmRQkOZIKrLvBmBoXFGoDVrSORgFEL6SYhFGZNEUSg+WIVtWKPToAUdC03eLwiba2qQIEiMsGMfgQNBFHhggxqNgSkvGRQ9NYo1mDSopqBUR1MdCaojYEKQkjoEHACApIAYdhGRKD2lCIkUMsypQQfVAipIgQlAyNRBYmiVxXFaIhTENgAE6SzCgVvYvBR3YMIRB4BpNkqLVCFVtf6yY4V9gYgzBgWuQaCUApgESDVoYDI2jUhgUAsHPRKJ2kSZLlKP1qnxDE6EOMTmIAQhFRiGRUZ1MAo5I0UiWtI+YQQ8BIygApk1hQFAGrtk2UoCKDSpECmwAhIXnvYuc4xuAjAnKMgjE6JyyZ0TZPSBC1CiFEYZEQomitgSgGDoF1zwQYCEFrbRCYBYjCJlYR9APVtdOsl8Ij9OalzYJlP/6jfgESNsO3jZLnOcLYxJPhNUhYU6LPOIjWhq61wgXgM4gG1vauNWz5zPoeXoW72SCaNaN67ixay4M24GlzULlaykVB2Tih+iCn/Yhx7Um7Ihv959cqdFkrguCqBOuooJv3+lrbyKZwA4nWI9p1UZ6vPcOm0H0lb1RN/QrsZynZppY3/AfXRenVO5sa7IfVsC4YiPSjcYSrANACvFbd96avzQp9fzzYRBeSHk5dhS9aXwxZz2X6+pO1BW8NtPrP06aeNwN6AaDNReyXq3ses2Fb0m9Gs/adkQDyWqe1jkK+gXybe+95cCXA/gJw/+2+zp4b0AAZgEGgx259YKlNeKn1jbm+R69waIT1vGCNikqTJlF98uYPl36Z4GtuPt3duXHyeLpqZUXlo+NHg5cm4vnui6/crx4ff4rf+KX/kdHbDz99j0z70pe/BTkMC7M6nh59/CNGc304vHPvlRj1N7+5dfTxB09P3+c4bSO99/A8eXRyb88a8eSS05OqSOzdO3cGdrmYVj4IEdeXM+zanf1BvlU+fuckRF3kyScf3r/9xVF1dDkY7+7fyo4+PVZx27Xd5PqdZnVhrFutnqmUr+3uMAQ2pg5dt5Kqs6LueErPzqZJMtm//UbrYHr4sSy7JB/pdKBNs6zn1oJjn4HyQQSga+uwFAmlsRp1J9xanQDkSZZ4d5FnOYjqHHar2haWPbU1X5zORltjraM/Pqsvji6ap1mRrtoQOe7f2w2KwBTdKp9c3z6/eLr3Ynl0/9MIaOyonAxX7Wxn/zZrvnXz2vFHf+zbmBdK2m528XG9mpPCvd1XFqtHk73hxeV0tZwKS2QeDsvjZ/63/tVPb7306p/8U19RuvNt1bUBAb3o+4+PlenG+fZka2t6uuyWq8NHD5TmeD/s7Razi0Xl8dnhWZEfDIpdhvT9j8+SSSlNO1v4Mi9DF5eyaprzG9fz5fSyHNxzfutHP31r5/r+nRvZxeEpJUAF794Y4qO2ZQFhBWSy3AWXKG3IKCRNNF+svKDWI3YCqLeHWxHl5PR8K0uwddxxIzGx4ONKqyR0biWdscn5yfners3TpBhni+UhjSdbe/tlYa1Ct5Inxxff+fpXp4tu9uyj6KoslUtYQQtOq/mFGxfDBDFVnkUSBc/mlQMsS81GOMQ7e3fePv3wYlVlt7fmF5e7uweXq/rwwSkm+fppFgmRRWKMoW+BnHPMoo3WSomAUhoAhKEPxRNjFEAgiiyIkFgDAhKYkfvYfWtDFpJIIMI+0nKvXgHAXtojIGod1gd5jfKFkITjWkMQxUdmFkRaE2xmEBYEH4JWhIDMQqR6ExOLAAsReY4+BhGwRoGwD9w5r4iIlLFktSZkIjIEFo0hjUI9bOYQ+4DQKEBKAcT1NgQSOUSt9AZzMPY9CogAK2V6miYEzMIgau3q6sWf66ZNK83CEnv/XWQWQkDAyMIApJQIyMbodyUqDTEqpRBJ1oJQBiBFaKxeR/tnQQGlqK/wvumMvHaZEVIvf2JmBOwJDxIpUn0Uo/X6D/SqMUTAGGOfMRYRjpGZELQ2ABCjM8YSUoyBmdb9glIh+L65FeH1msTGk80C/V51wP0mA8jrLeT6lRREVKgARYgUkSIURSrG0HcCESSKgEi/251EFkSWiKQEQGudJKkirRRF33EMsJZl8RUkYpbekN+rhrUiJFoHSBeWfmsEAAD43/6Nv7OjyvkstOAjUpZbMvTm/ceWw8u3x3DZVK4ZZtqLuWjiqu0Cok70yXIVnqJqmm/eKF9JdTg+sVwdWG/ITMk+W8ps5VZ1OD6JyaC4tTXGy+XRo9O97fTlFw5++PT40hdNRQbNL3/75mvfuvPmHz/9h3/vB++99f3v7X/15dfGB3s7nccgutjeaVutGmM4amVToKcPfvzgwUf/1l/5RnGtPDo6N3YLUOejMXfxww+cTgc2tnd2KI3ulRvFn/gTb0xe+uapO2AshtjY+oOnb/+rwjZO2qzY8j4xSVr7tI6SxsCRJHq/WqZFMRhfW3WdMpSmg6Y9X84W0dcsdZ6UHGORjQCsAGFCJNFJbGpnMrusFm2E7d3rnk3wHmvxLSuFAsQi1qTMRoIKgkGEgNhFhiSn9sXtbUR85etf+YMfvJ3F5TtP+JPzJorOEqsNtHVHyDtFulouOIrSdPf69nJRt1UFq+VLY/qlA/3SbhYbf/5kkYzHR124uAg/rB6u6pYYJ0WCwgnIrRvbw8F4e3TtvZ//ME103ZyHFqKi2byqXPfq7YModWiNSrLOUVOx7vSt7eRMuf2JwSH92T99o8SmTKHcUXHGCjvvAkYXoyu30+tqe3r4DMG5zqzmJh8me5PrMV4oQmUL6fzlsk6HgxDHNXcxnO/vTGx5nfW4iUqPi/zazebynJuqPn7c+rTc3rIq8xJSPObFT7r5u8yLEPTqdJaPlbKicGG0QTdH7s4fXgpL03RN7UmPn356FmkApOdVZQvLyPOqunF9e1WtmtY70G7aTsqDVXW6mlWta/unYDhIqkdn93/2tnqX3vjmVycDuTw7n61WWzcOJrs3n33wQZ5nUcJiPutWs8XZxWtfvmXzwc8++NnXd79x85XbVSNnM6mamFoNGanFxVv/8C03nf2pf/vPnFycTkbjvLBdy9PzVVs7nZuA4aPjp7sHOxDphbvXL04/DW3t5nVAPynyo9oT4vbYWA5WMB0MC+Rnh0+0zSIr77gGLyIDpfx8cT5bpYP07PgEgM4Pz/PUjLe3JTpUqqora9NlXds0i15HUcPh3jJ4kaCVXlVNNiy3dnYU6uW0Vak1iXZtvT0ZNZ0fjcaR43x+ISGItV0MQAEBAJXvQlYkyuT1atm0bZoYJKi7epSXiBCcU0QcoiI5P3lw7fZLZTmE6LomzKaXShFBsZq7civrgtMoo6K0msqyJJVH70M3j6h8F42l47NFPpgsjx9c1jUmeG13Oyrv2bOiduFHOwd6sLM8nWr0+3f2EqMOL+el3q1h+bOf/N6tW4O5xDs2f/DDn9y4sxfH7of/7d/bOdj5pT/3F7dufv30/oNH9x8szg+nl9NxgV9645Xv/qX/5PDd97i7fHz4QNru3ou3b16f/MHPP3xwWR8eL8ddO7k7nj49XEyr/cntxZP3meskode//PWffP/n47LcStLxzq0AYWBjfXpIo2wysouzpTXZ9u1bT2dLkyQ7iR3v3U6Gu+l4mzWiUiC0fXscu6ZrOmFoLueTLfv06cPBaFDu7htjlvNlmsDtu3e74LpO8jJTIsTJ42en+4pyQ6hSGWZt0zbeQdOleQoS29l5c37EzQwaHowHaVJhJi/+4v5ivvpz3/rLVf7yxw/fWh1/8uKNRCdZDPH06Lx/CqxNfOuCjyKgNQpgdAHJpiY1aCFWSkRpjUguBCFykbsIZZmRVUoEok81Va3nGNgBKFJaJUaxC4QSEQBiP35QWisit16Ii0YpTQQBgnNEgNKPcCgIA2pQYlWo65rDOlikxejbaMtMWbtsuxU1yQBmVR25zlAtfRdXbBVLVLyCUEVlNBQh2Uma1comiQBIG7oOQix8064WXhsj7EkLclTKkIlpAYOBrrm1QyqGCpbStth20UdJiNLEzJbzEeXCkqZJOswU6yCyPUiWFdX1nL3Drh3mql4tVaOCoElNwzEiIpFJrdJGah8ie4fBewQVAtbLNils6KCrGu5So70xrBKMHIoiy5Jy2qFSQ4gdsfgIbRcExJqk7QzEyJQwGQlEggaJIPiIiUoG+bbWuQsYfWNMTJIkRC4KkigSndbiIxBpQESbJEnqm86NHIrvqsazRKHAwBGAIXqv2Iv3aWYRVBCKAI1npQ1qA6iZiZQGDBIapIgYnFv6pkHEzrm8GKXZsCzLNrJj0S2kKUHwRqATqttARAmBjR36LnaessIa23ZRgQJA4MDg2XWubbVCrYxRmGRUNw0hKARtNGljSBsR570HTi3GwERKWDQqMWazIW5EUhIZURRCDDExZg0FaM1PYL1DyZV25mr+TxuhEONmTMcgVyPYDRhY44V+mC2wlq1cxcfp9R5X+iTZeJ02waE34Xg2UqGrg68JBAIQ8kZwgxulU89mnkOZjR6lJys9y9qgng3UWdOFq0/37GcdiEc2VALXgY3WjEN6CIGbcuCmnOs8IADwFbcAXLuv1rzluRCn5xe4MT08l2ddkSAC6Uf8PQ7byHSuIhhtjH6wJkM9kentYGt+s/YMMMha7oMo/RY4/SYvAgDrYBBrNxcKg9A6aBHKZieeDRjqKR0x8lpIRFcbyD2HYhsus975GJHW7AY27sDNxwkBkFkYSKDnWet44hsF1TocUV9O6KmSMAqQBhRG6I0ujJuVb8S1+0xU71+AzZVYX72+RrnfRwcJN4a9z6Ci5apxq7OXvzz88i/9YnfyzvHTd4MK+UB0Wd6696W03dVdlWj13/2r+/OZ/9U//5+ZbPDs8JOzs7ds2xz+6Kkv5Avf+tqTdz9azs9+4U/+6RdfO/jw8bwL+NGjjxu//Ot/42/8r/4X/4mIXTYh1PV3vvLN8QADuus37+gg82YWQ2PT+MLtm6fT82Xjt7aL82kbIt+4c/34PAxturd9pz09K9PBR29d3jRVun3z/U+XB+PUx2dE3WxexwBGDx3li+XFyBamNKaUyeSVp8+OHh09GBaqbqU+ml1cXOSWYQXeVzSd7x4USaZJZxbSyL7p2uicB5Wn5bOTubFqK5VsYEL0PtquA8AhBTbaeoAk1wA6hIYje7HjyW2ul08fftJ0XZGWFslB14ao6mQ6Cze+8cI4Hz149H5s3DA527qh0sHo7HB2NjtLrD5fNFqp+80DFUjEnpzMXYjFYHSwjYulf3byEFWXasyy1LkAhE3X/e6Pnv3+D55+69vf2L61e9m1OUme6r3rI5uZWeUu264otsL0cn7+uKtc8M3rX33lhz//ONpkfqm6KiOQ8bWbu8ODptN2mL1+d3+wPz76kJvLBeWFTkKULktSlaFKitsvfWXpxx98+DevG7ccBMqgLAxT59vlwfV8tYK2S1U6NmnWVMd74wnGOrEUg1stLyeja43OjUIDoQuO8fL6wbCdLRijIrOsOs8OZHHz1t3FasEAzOH2vZ0YKsCos+HZ48eri3p7XDxd+K+89JXDJ/PJ8FvPjhPO0t1b312eP7TDh8Pd64cfP3x87oQEiC+8f2P3Orju+OnJ/mTyZDk7mVOeD5dtF+fPjHLRoQuk8m3SO03XPXp8/sLLW/1TwMyIiiVGkd7wFXzovV1IZLTut1cHQKONEIhiBCRFPnirlCIFIowsiCIYo0dlYmQARmIUBYCRI/ZhrUFEIqEWEebYy/x6MTGhQkGGXlaDkTlGiSyeo/euD2LdsxgB7EUvwkLEChAgEioBDgyBoxDFyIElAESJqJQGic7pgGWKAJDahBliFOGgSaA3XAn7XulN/f5ofTPICIBIClAIY+xD3qydt6QUokIAJGARicEzOPYoKJFBgGOMEiJyLy0lRQQYN25t3LjGIoistzZDAQkhbhYWZN2IR+m3WVZaEa63DCAkXmuJVR8Cj4WZmahfG1AIxMyIpFAFDooUKtW3kApVrwYiBEClFEDcKDKx15HFNbhCjBz7ltWH0HdfWhEAsAg7H2MEhMhstELZ6JWIFGlUtNY9IYtIiJ4BFSkBgRgQiDkAoFZESmulesuYUiRC3Iug+Eo5GgDQe4+I0u/GnqSZyYgwxIiopK9XBGTRRL1QSwBJYR/buw8+ui4hYvQhxijrhRc4f382y/Tx5Zk1mkPsGNGJC/zx8fFX3rjduIuLo7P93V3J5Ze/+sZv/NY7h2ertmtyq5Z19T/4ta/+u68Ozj987+D6NgUzX50fP+pWWXo6l1BVnFqlC2S1PJvfsnz7+vDLX7l23sze+MqNH7033T+4/uq1a2XB9QX84q/92td/+Rff+aN/ng86k6QuNhFSAYOcDAYFJlpnxiYD4+e/9c/+rp0IlCw6FJNBsyJr82cny7/7f/q7Y1G3dlPtMesWX3qp/NO/9vXRvW9f8h2T7ueR549+/uBf/gZ0l0VBOila5Y2HGJQioyy0be3rVoJPtQ+urlYxyYqqagF2XFN5cYlFFGlWC4IqGdum6jAdpoUO0TfLpTaGlW/qpSJD0fqqFd8WRRJ9yy4k+RBM3lWOTCsBGSEyZINSZbZuW5vufu3X/kNfzaYX0xzSr7x8886r8Df/0VtNgNozgo6IgcVG50VKa0OUuu3qbqli/KW7+V1VvzryQ+i8+Je/WdJ46/c+uJxfem6jLvNtI/tlOlv6lEwd+MmnT4AelgZd8LKoXIgdu1bpwY2d2i8zqNAx4nWTmYvj2bXx6IWXRtXR5XhIo5uphkWSRJuhJiNJzmwlpNkgFwir+RyE7732an16ePTJo8QG8eWqboTFe8/OR22LLNfo4+ykMDofGyMxLs4obaU1XKerdimcAXVpHglCkWV+iauLJpHw6NkhdsvJrsYo3DTt/FlX+aGxvhuKbqNbJdasVjUZazPTeLnz8u1PHzXZIA1te3a2JA0eYOUlzweeV2LVxfmCxvmXv/XGOz/9WQIMsAQAk4eb37zx8PBR6/n3f+9HW9vjxbQGMQvU9SwgYLVqmmo1vTgej1JP8Y9//Im345OnF+2//OP//l/+czIY2OEWtTT78H45SS4+/fDkk0+hrc/PPxyMD5KBGYyK6WWLXYuu+e43v/f06PRy1RX53unh2XmY1kafVRyizdP8xWv26JMTRNNFXw5t1trRqLy2f3PVhOl8rhPTdQG9QETmYIwKLhZE+5k+Xbgs1SF6abtnj54Nv3CbMO7tX3/67IHSSVHaCGpZT13otBhFpTC4GpiRMBpbpsX2sj5uOiBKULhpF22zMJRQWjShRWQtKstsFEDxrlugXwnoPMuNMb4LZVoohLxEItN08fL8MsuKyWRP61JZ41dnDF5pzFITPTDL00cPx5NCmBVB1cXLJtzZLas4KwuVaNXW6WhrdLxAR+ns7LDU6Z3b154enjZdl2VqXO6yjyE4YNIciD3Hrgowd+l7T8JiXr7yjX/v9p3izb/1t/KPf7Kt1PHH1fT88Obu7nzW/fAHf/Qn/uK/3zHCeXzlK9+qF4/Oz975S//pX/3+T57lwQ8mqfnOS0Bh5iLYwa/8+X/n3b/193RJt18ev/Clm7/1uz/55Bx/7d/9K9X1waN3HijwM0ddoe++/p20m7LPDz94f1TStf2thw8eWc0Icuf27fn5ezsvvBKrFoSSrUlqTBd9s1wmqXKti0Dg9eHjh5ODnb27d3/20x995YWvfnT/Z5Mb15OENJgYGm2VE7W1f4Mb//j+o8l46+bNmxpEhRq5AT3UJh0N75FzVofZ7FFTnxE4m5pmdZklaTWdN6dzGPi4dNdezb7/+I++++2v/84/+EBR0nURQruVb/d9QZanUaBru857FmCO2hhERagKRQHahExqMw9RI0TADp1KbZlY0rrIlHNV1TokcG2XmqHChEAUOkPeexd8IECtdXSta4L36D2QSZVSIsA+KsREFwLRIKJBIE2oSSGpEFzDbdXH4XMRE4Y0UUThYn6JnnMTUhajGRNjc2IVRVNgrGdeVWniNVcxVQO/8jrVVUQf0C1rCYPlyluhJFMY0Uvk6IvCWCOsQpIF1p22JIlddYEEU4uIqgvkfFdVkgIRiVbC5JswJ0RAYhcNOmHX+VqxG7AZlep0Mc9MapSF1Cwq3/nef47eBw5ojMlS1XWtRMmLggU4UAy4mEYi1omY3IjIcrlMtNMmbzkMdQ4hLrvORwzMIo4jZalhdCpVMaB0PtGiQIgUZwNNttP9sE0oqnbRJVmKYqLvOAqCiSEAhMRoXnXoMCOtI8cgHFMUQaVdjCxRBFGDQkXWdr7zzFHQB7Eq0UppbUCQA0ts27qOvoq+qptaYhdDhwjWGIkiIab5kAJSJ8m4WFYYV6s2BiaVDYY2S1II3XIl4o1GAQmiRoNB470X77qu5wT9imlqM4GASg2GWmvtWgcRQIwwJVmS5OTaBGKMLkiMFk0EHyWEXk2DrGwSnVMgwCEI00YQAptQzZsAOrAWzWziUMpzkYusx41wFc0ZNpqj/lfuReIbWiRXfqR+7L0mRT1poatA0Jv5PF4db5O1jbIG17oSBuwzvj73vxFrCDb6pt5Ht1ap4IZLgaAwSx9/ep0v6jfigivOtNEs0SaI0lqQJBs4soZXvKYTG4dDD24ErhjWuhbgimv1whxA6GX9VxwDaD1J+EyxpY+73IdY7XPXz6rW8As+E3qpr3zmDdfrcwebuu1Dcfd4RhCFJQLEPpIIr1VTQiSwLiUD0poD4pXFbX15BaKAbDZH3oCrvnhXPruriyjY+x420Z3WPGyzwMyCggpA9RNKBhQiULTZrhrgam82oPUqfS+SBCCQPkw5ovQ7DwGt1/QFAHu3hGC/Tr6JU4prL6H0YgUUWc8BP4OK7t4Zv/7Gl+LiovvwX7ezZxSmb3z7DXsRO1HJflpd5E2sT4/OMs6E8jQpmdrzk59893s3H7z1Noezto6rpx9gd3Ztd9xxfPDxYdPGF+6+HG/YTz558v2//1+i64JrEgUv3xq/+uJ1XUaT2nK4W50/SfPhqLi2WM7bTj1++HQ8vmaH27sTqOt6fn6BnR0nGYL62cNPX71259rdXRWDUeb27X0QtgN1fvZ0f2+v88m8ptXK7O9/I8i8W52mXJ88+3ls6kQJRKIo1pjtkalWjdKpIrxcLA720jLTTWhQIPZE0FNmivPjWbsKycAkOiUIg6Koa25CZPAeKXhPRIhuNT9dTVtAlSbFbN5Zxq7p0mFOKC/cuzGr3h2Mxk0Y7N57xYNQu6L5GftV9JDlCTaSIYBAQmqkvES3OJoWBWepKvL07KLpVm2zdNokA1M2DdRtrBdzAUEtDz69+OiT+e2Dvd2tJMFVYjlNiH1sK4jeZMXwRq62xmWlHs7mKzsm43WrVq997eayqgLDxWUXYlSh2x+nVeN3b2jSzbOPH+Ze7CCr6256Xhc5TCb5yaOLey/f/eiPf/CP/8U/3sYwKqhpg/NA2A23cq2T45OLJNluAtSVkwijJO+auQZk7oQjSVovl0Ldqm63BtuJplXrWnazWSU6278z3Iog0hGaIF1qgUAuLp8Sb6fJcD5t0JwNBltFRseHp5Ph3o/f/iQ35fXdGy46T/bWwZ2ff/Tp7nhrsN/RUu3eHP7gx48dZ2LyWRXQSxCLIQCqOsDQFsgdmm68lZxzrKYtcPLJw4sPHi9SWywXbo2KBFki+xhj9N4LSOwxBzAykuoDHYtSGgEJQBsjMYqI1cZoBUAcPINopYUIieJmg3Nc6z6vgvswIQGpvgHu492AxL4p6XFSLyNywYtAkOg5uhgDb9SAmgJzFGGGyBEQdE9qBAVFaxNFFGlDBEZ6RaIC8hK7zgkLkHGRTVQ+rBEyATBFUoqZOUYRJkUgSgIbnXDkCJst5GFNZ4hUiAFBERlSRpgFWEAa1waJlQt9w2eAjNKRY4Q+zFNUvb6S+lWNXmHJMcYQmbHfiywKC0Mfu5qJwGitCQVYKaUUaVK632yFITJHiX3E6z46nwgo0thH4iEkov6Vnq8R6d6Y3QfyAwBZf703hREi99u8RY59gLee7K3tasAs7KMoJI0YkQUkBNc33TEKEERmRaCVQQQios0WBcwR1qGYRATiWjnL0vvpiHr7IvL6XL0FD3EdKSlyDOKsVlc9KxIiiAaR4FBR9D6E4NkbrfsBBYsAYM+5kKjfsq5XTz0PBrgxy69R0VKwbpWxRmOi9VaezKoWo6o8v/XewxI7g3q1akXbRx988ifu7fwH/73vPvz4w4vj89NlkKP3jiHfKnNWxjU42BZ5dDa9bJdLLAuLgti5DGBvr7yzO1jNl/efVDahYYrf+cKdg5vX9l59sVpeSmq7xZmW6rvf+iIBNjV2tTc2SYbZcDiMQE3nhGJoqr/5f//f3P/k2Z/9D1/bv3tr1aZJortFEIm//0++f/Tm0WRcUnO5m1vdyYt7t9L8pUu36/RA5vOzD3/87Mf/XXt0uH0zZbAAOslTA6r1wa1WOrVdE8RBVhaguGkXaV5ANEZrjGmZbC2dR16WedauasWMUlvK0arIkYUZmRIjQkmxpbNisZrm5cgtZovL48VyQSbdAiSrwDvvJckTATIEoZm6KAo1I5NWJiu2rydfH3xzfvTszZ998Noke/vZctXFNmZaq9AFYhxnWWmSLsTLs2luZSdTW4JUQ7MQo70miqzZwZe+fO2Pv//wourGJYqm2bxxTiBR81OHSGmWmhTYdwxRhB2rltX+eDR/+JSNUYUrh19ITJHm1vmYY6IW7fUv2Js386GlsKptNvGuyLZeXbX5aHz74sljkS7bvRsCN21dr8539yd5Zhczi8EPi63QVoIdKUgsPnv0YGt0U6ApxkNTZFWzVAZLypvlRd06M7y+/6WvfPr2D5Msb+aN9/mr3/7y8f0fWiUssZnF2HXVvIakIJSuyLbGr10uToc6X8xPFtPaDohVpkx5cTy/OJqzi4Nh2jS+g5glgxXj9mB7Pu+soqGVxfTk8DHt7A2rzACcAcDHb386GCTD3Xw2r45PVpgkAuFgZ/toPr25NWw9u7pm5MFouKiXTy6Xv/grvzrcvpFLvaXq5nL18VtP0kzmxxfV4ZM0N4rlxRf3zi5Wi5NuMqBEaHkxD4uYZqzarp2e74zT7YMRkgRuD9+/P5jsXr92YzGdzaZLz83A4spBtehc7TRAs1id4/FLr73y5o9/jq42BkqjXeC2I1J679q4gObTjz7VGb58Z/dsujQKdsbWzc8Q7bF4YhWDhyTjGLXxOktDFJ2aFDFJM0oS39Tsq+npZZKZ7e0BR9Km8L5G0P3CRGYzFPCxmc9W2iau6xRSkhnFYgwtZ2eJscqotq0FObVFiG5nd+SZdFLqLH12/8HIikIUlGVdqTQjoYRLVCZi8/DT+wcvvDbc2Z8tltJ5o3VCWlvbxjZL+dHTh7u3bvjGffrpCYJo1uxpMfUMalWtMKlD7VOKAPrJk08nL7y2UJjffunBk8eOLr/23W9fHD24+cUvn53Nzd7WQBt/ePno449ff/qEPTGZbFR+6xe/8f6Hpz/6/tuvfPFb17fMo2cfz2fQtaQCNA672fTf+bXvvP3WT37y8dFoRNa0Y5s9/uDjqLDD8enhfY7dqy/d4eqs7Rpxy0lmW+dn09np6dO9ySAxfPL4o3IwYZOQkFcl6MxkaAxKorVRWqvQRYV4+86Njz78qVbtq198QVs9vnZ71gjNV7FxSDHXxlX/P7L+68m2LUvvw8YY0yy3fZqTx197rilfXd3VXQ3XJACCoCQwgnKkQiLfpFf9E9KzIqTQA8VgiDKEIiiSgiASIBpAA4Xuqi5vr7/Hm7TbLzPdGHpYe+cpSHnvQ57M3GvPNdfac675m9/3DWyXy7bemOHMzG4mrpeLpZa2GlSSVqCY49Z3i8XqWaYHk6z0aUjAmigrR69Oaz2eHt872V6sn7y8+Mq9G83y0/ffuyNcl7m2eYai+rnAu+i9S8FbrYiAUJEywAnE+QSaSFD61AwGDDFCElLSdZ0L0tSoKHXeWWswK6zRCTD44DveuoCoggMfokuurZu28d6lmExeyWhUaKOstSQgCFVRFVqH6BVlWVaQFlJdvVnYLGNIWtsACIIJWCFC7aFNMXi3wmJYkpHMZikEGFoILYVoNQ9z1bVefNOtYlg31XRCnjDkzhPFxESUqbBtE6Ss0KpiZaJwq0vLyFVlE1nvUTh2XWQyKJhcRDGNSyGsh6WbzFKmlHdBWZsAUnSiQjYdb7t66Ygb9qx9F5oOJLcAme8anci5YLRBgs7VmkAYqK+bhBh8ipxsXpiMyELbpeAEEaPyh9PqRjmqVLHYLqtMN11U2kYfEYRiEJTUAQsghxjYBa+10UpRbowxEoLudRoJm02rbARx3XbZuti5qCSNykyJMjojZXwbyCAzOoZ+lrNGISrvWuZIimpfExogFSKTJSHF4IWl6TaSYky+6zZdaNarGiSVpR4NM0G/2G5KHyuwPoIIotZFUQlSkaILSfTAGuL2klFAaTQ6L4ZKa4VZpjSm0D+6kVKQsQhn/RTNVLed1rYY5ClxTGCKzNishx3JRwNOoyTnULiwmU+MgklYSJeVAU5d1xHH5Bn+ta8dNti7h3ivF7rOxNklzAjQnuzsbGE7mdCOHAhcK0l2OpidP2mfM4D7yKC9felfUwNdG45kz6YA+oJs0pdQ73Nz6Fovgvvvdq3b5eHs6VbPUvq4CdnVY4G9aEh2iaq4Q1mIKDu/FFzzCsDr88U+7EH6AnEisA9SFoF+wQPXz517QLE/i77FIiLAffA1vhYa7VRZ12qXXbN35q9dOs/eDbdXZO2zhnaH6C0bO9NWL7W57sGenEgfJ9pjq71bbkcGeaeUQsA+L3Xna5Od2IdYePeM3S9tXsNCEYQ+C+laH7TzjCGB9DBMdoHcuL/gfXFjEgHuw0mwh2Z79re7dXrXmyCwKGFMAAxKAEWRIPJOHyYAIrwvf9a/OdOONsEedfY9hALAu9rfrw13O1R0/8F0OI2nzy/ycZG0PTq511213VWXaKBL6Tbucj4/vbw6P88OpydPH/0EpfOXL774aHFx8fzdB1+tzzdf/uJjjPjN7/0BDEKRZ7xial7+7C//0YfvHBxP8T/8H383hphc88a9mY+XpPLcmm5xpkKgVieVXLtwke+dvJ+cbpa6i267WA8LuXsr++RXv0R5e321/NWyHuSjPz758OrFS6lqg2CgKTtH9RVEHNjj+we3vnj4CapkQ5sMtX6Tl8ZFcZ4av/Z+a2fTwfj2q6fPqiIfjY+bbYuaxYjJch+bFGtO/mxecwvjbDAsdd1srRCwgRCq7D5gLpIisrExSnDB2UKttkx2pmmgeGWRXCdzvx02V4ez2dXS10G9c/vO6YuH7bOPyriqjrKD24f1urYIoyMcJtWsmsXFVWFL12BIuHS+8POqKlUKZSk+duVgqM1gu55ns0HtmocPX51d8WBwNB7NyHfrl8uDaaHzEtDEoGxVGsxD6JrlBjgfDo016erialmvFMHBsKg3QQ8yyKsbh7eef/wFMCxebSajogI/n9dA0xSx3nC9Cm2dwNNi/ZmkYMW9+97h5fmrrqWmiakj77egZTYa5XnebBpQWeqCyggIXZK22ZRlVcfAcTWoDmbDwXK5bmMS6zWhNsXpan50mBfWKlWmoLeBJwe32lUzyAcKiANYqwK5Nl6Rk0wVj+uXt2a3LG4pv1ienQddLMbZ3fduDfMD1u2tg+2jR5/fxfXwVhlQ1ZtlNjkgPFg1q9FBXpZmu1gdj3L2UQDa7XZ4Y0w4WnqX5eHb33hbqQ38IwAA770gphhjTCy89x1Ln663wzqq/0gxCIToFfYaQRRhowwS6j7IBjgm1qQUKUJCQhEWYaMyBGSOIqJ2GiIgon6YkP2oxpyYIXJ0IbFAiCmmGFJCICDRQABoFCJzikmYlVKSWIh2TVSkAAyiNQaFo48sEpgRJKIwsiVl+o0YkBASokaixBABIjMkEEHVVw4DCDEBCJBKIhwjkQIBo5ABkTLSGomANEMKAqumTsEzp8gMDIKIpBUxEmpRCfZiG4YYGQGFOQqkxIDAAiFFFk4xQW9JYxEEhUQJohJjlNbaGqN2AlAmxASQUtpv7vTRdLgrPyksqJmhp3S9GAuJiKUvjJCY+6FyN14KCicAlF7HhSz7ubOfOVKMPYsKklCZxJBiEEQkEsAYPQoSECjoAyexV4qlRLTD/inGlHrNVF9pDjT1QUqA0FvQGFgBAe+wIDIzp9DTMwJI3NeY28maiTAFJxBYa+89AChEENGkmbD34CnVxx312eG9sleQSDgxp37TY/92QJklFJNl27aLHEdFOcwyCSEmXq9W3/qDB88XL6aj8cvz9YThax8efvU796d/5w/q5fLZk0cvfvDDl589SYcn1YMTnN268+aD7JN/pBdprPKAarlt3j+x/+G//921nmxacvNLdvXIUEyYV1YXdrOdU4wp2SxHyyovqk0LprhjDJExZO22aTTlmspyMH74q4/+8hc/OLwzgNI+e7mdHJ+sT7d2OHp59WL+5NNb40ynEK+a3Kg//lvfffC9P9rkh8Pjt/Pt1tWnv/zZn7anz3OA7Ta1uj0+rDQBEnIIxhiFHADJ5pisRkOK600bIpMea22z/KD2C01du1gJpDzL6uWymIBE29f30wpC2xqiFONmuTBqDVz77appa2ASVq5zFK4ANUdOOimLKUTfbE05NMUguJZUoYxJLtl8cOute3kO1bg8+4//seS6Vay1qj20DiQGM7Yts7JaK8YUKqPK0uqRKYuKXXp22maHXXVQrjf18ejgcJoZ4vXVulRIKDbTPUvceN62DtGXVQEis9nsYDL0l2eKWDan7sVfpuo2sNq69sbNO18xm/ffwa69nN04wsHYGmr9vF2+rMZ/IHo0Ohx7t7VlGbGC2pXhyi9juz4fFgeEa3GN0lRU7bp2XS333rzdtWMLtp5fpdWyGB+2C4cpSwkD1F27zoZlZXPXdFv36sXp5tb9O9v5VtrFwEpkC2iYjC0GlgNvtvPupz5hUxBpUw2Hqiz09OjlqUs+Tu4eLk7Hy4vnOtN5Zher7XCmV6vFoMp0Drnh83V39vSiKs31p6Bdb7eLze03DjpJeqCZwvHJYL64cjG9OD2bDSsw6LdhejD77PLqzfe+/kff+7fXq6s7w/qjH/7T43fuJRc++tFvs+JYFdXx/aMHD+79+V/8+O3vfOfdr3+1W5+1Pmbjqm7n08PxgWSvXs4HxQy69vTF2fNHzx+8dWe72TRUb92cS1NmhbrsSpRRZlcXi3ygGte9enpaDu2DD97+/Ge/sULDwq5r3ynZsvj15kQno2zdhsW6qfL89OWzu+9+2KxXB8c3z149uv/eN6NP7bYtR5P5/HI6nWZl1viuLIeoch9iXlgvcbteFcVxcBGV3SwWJkOtbNcGaeu8LJMEEQdKlNJlroB0TA6jT5IGg+F2uSDUzJISBeK27fJ8ZLIyNG3A9cGois1y61pUhEICyhhVDgacWmX1g/cfPLvauPX60JYSwEDOgequxbgd5tOh5EfDk4vVl9nAENLG4/GNN89fvfKJfbt29aW27v4bRz/52bNidGj99sbQpqPq/ZNv/vRHP3nr7XeOZ5OXjx9t1kt7UD381ZeHR8fTbHTxm89blyR5RP3bnzzcXtTvPNAvn3wy6PAnf/rj4fTka3/0V+aLx3eLsV+s779x7zc/+3GN1T/8wdmNw+HxjfKP/uT9n/38V3d//6v+n2/bxfK7X3173S4YsCxzbeHVvJPxsMuHV0388smZ6/DO4Vr+4tf/1v/gr1c3hpCZ5EK3DczeZplRA9+tSEVU6s23P6hXGxZaxmc6r/SAyGVGK0DJSmJxTz774p0HH0zeeHtxem48DwYDbiAfDBan58GtrTJkrG9NnuXr9cOucbPBTJKB8fHgw3EcjNaxZiur88u62d66NSFums02p2l1NG03u5QWY7REkZRIIUg0ZCRGLeCjj2SNyaLE2jdAmDiVWda2XbN1LFQ70cTWUmZ17zrPc+29d63zLiWnklLe8WbDjfObTegd9Ug0NFoTDiqlreYQFWllSVCqrCjyEhEi++DdoDQx65fP2PkUY+KAbQxhi6ur4JntVkM/3DbGlINuUWcGM2uzUjp3hZkAKucQsAzLFBLGlryLKCgGOwg0RlaYcmh0JwYUyyqtKzTgmTGFqLALKkBuTAgCkAUvTS1tl+p1vV77gwFPBsV2s3RMLsZEqI0FTs0maDLWDlKXUGxqpA0dCKQULKoUfUgxiAipPt9JOIE2RVGgNnpYZENLhQLXpZWSmERjiLSqLzulrFXWZqzZFHm9dYZJF1kHCChN0+QZaeLoweRFXmiUlBcsGF6+vFisN5FB2SImIXDRNUKEmpTgqqkzRZm1BColEYHoeetibgwEr1BprSExCyprkAxaq7VhSD7GxG0dgzUqeZbonYTTi6ut7xKQIk1cGKdyazxQiiI+KmNSYklRMMvzQWEodD4EJOBEyo7GjfPGKBFKjEppjmxUBlqR6uv8ciKyxUQplSFPJioyCEdGiQBGKxbUpLLCrDctKmW4i0CKFe5F+EqQU+pVFpp09EnS6/2zPZG5jubZhz3zjnjsF/j72J1rnQnsWJIA75gK7H+x/34vF9qDkX3Uz/WPEK+RwI5q7EiS7I5Dr41u1+8se7qC+1igayPX6297AQv/bkt6p9auzb07TAFQH4wjO4lPD576AyFA/4C9hzC7mO+eh8l1KOdeAtOvdAB3edKyC4HeEyS6VmntwolenwjC3oInfe0z+F2Wcd09O2HSvgDc62u4S1+Fa1UT4L4zIRGR9C4E2OVYv+Zs+ybtWZTsNmn7O4EZiAD7XBGGvhYR9GlIO1goAqhoR3Zw55Hb3wi7I9HrU4B91BADya4VO72Q7OHiLjtdYFcsDQWQCRkhEQhhAgQA7vNPZH9b/s7djDudFvZJFT1QSgJaIQAgEQL//xvQXl0t7rw9Obg/ePzq5Qd/+DfXT1+x29rhcUjV8/O26brhzYObTfnidPXWg/dfrurppLxz5+3N4nSgjySacjod3jh89Mnls1O78a+++d4hrM+en3/xb/73j+69OV2dbW8cZ9pqrcaJ/UAPIkM50NvgsmGV5flieaGNNsVBbk68btEUCHLvzbc//+2fvrhcZneG48HsbXf/p799WYP/81/96g/ef4MqmF90r7o2I9e6djydbRbny+fPU+fyyezozt1Xz74EBKO0seWNfLLu/HtfeeuTpw+NtrMP31pvgjJCLCHVhAlN5M4VZd5tabOYF7YknTa+Oz6aOGYnxTAfAGmA6Dls1xcERezi/GozHI3ZY2zDFy+ffPD2bDwbnG98Phg0nfgNGylPTt7Ufp67s+p4uLxcF9V0cPhB036JqZ1fPj48KFm8zUgZMQNCXT3edIe2nA3fTIuH7MO2dg7WSWLk0LY8v9z+8gePbt7+egZhXFKWCapMWxqVg67hvBpU4wIpZoyp89vOJWQEUwwmk9tH9fIqdD55LmzZYP784qo8GI8qEnbrZbsKKWiTq4rTZjAZN+KxMKxA5xlpm1NxsbjKCj0qRpuCE6saUrvpJKHETjO3nTPZ0IcgKMao8aAASlbH4XC02nYr0VGZdTDjYri+emn1cDoutBo1m85m5AKi0uw415PJyUGNVyzrbrmdHBal1Rcvz9cbd0Vk30VTjU5ftUc3D7pEr148sRS1Gb961r197+8cnnz76+999MWXP7hYrLTN150JTAfj6dPVXFTyjXcUx7OSlb9ze+zq9sm5g/Hwxq3x/OLKVjtVUUpREGOKvZQDQbRWhEpr0rqXsSSFuldi94oPpRUSxBQtaha2xhBI2gU2A/RabSLp5zeifTbbTtYoIolFSSQkpXRMMaWECDGyj5EBIrMLgbl3hIESsUpbqwjRxWg0sGEAICAEMUYrjX2QEQBoTUYhgbJaE1IIMSHEEGNM2uhca02AwkjEDAmBOaWU+pE6cqTIAkIAGgUAQwxISgRIRJMOjCBiNSnSfWxT55zD2AbHIUTXAZJGKoqSele5gA+etBIG72MflxNjVKT7yl8A0OeFp5SUUv1MRYoEQVEfBq32fH830KXEMaV+dbcriAaApBCJOYBIX4iKSAP3M+Iupc8YAywpMXNkTkRKmFmEQPdBSTGFXu0lwjFFASCixJxiSikSKU2KAAmJJSFACFEQ+iglhJ6xUUqBSPcBciwiKUVOPQUUAaU17mZ9FEksBByBSEAYGVLvpN5t8ezUKogsjBFIIYBKKSEKko6RFangQj+LMAuhSTGgkDADoNKasH+oAwbubZQp9esBJEJQsHe1Q2RRkiKnwSjv2rhaNyRiCKsyJxKt/Bsn1YM3Dz+p+J0P7j/41vvcbuerz7qL+c17WZWOf/TkySb6SOKCUyH/3rfeulh8/vgcQGezsbp1Z+SI1o0EzO9984PLL3599uTs1p03Dt64kxh8myRFMCnLYXtVP/78uWj+2u9/JbBmQEGqWyKJh0dg8ew//b/9b0YH6et/dD+FgcGT2FZZXj5/ePZf/1//X2rhTg5sc9lM743eefDW4XvfXOd3qDxq5j5++eNP/+IfdKfPp8fZqJxtW5kejDoW7VlTYiQAabd1YM4y2/lUeACKIGwLKxxTN9+4lQTwrvbdBgglBUnil53yzWA8Xc3X2ma5HW7W2/Fsghmt5+vQJGGPmbG5PbvcBCgQmqoaEEj00mwb4dg1mwooKwcIIWx9lpdaUXAoqAeD8dv3b/x7f/KNP/vk6S8e1g2BABhNopSLITGPSjPRMuzCarHJRiJcbmpGF6pJwUN84+vvmH/wkbZSDE1h7Mq1hzdGVxfLtglVrred15mSfCAxbj1iXoyHw7RuyYFncVy7za+L43W0B9nhjY7utdt5Mci785fJHaQooLyqbgDeSTQ00CT3iusmwbGtitp1WVbq8aE1xM6Vg7Q+v6ibuQAqymyWIY3U4GBYzObnP00419VyODq4eDkfH9xQNGja7WL+LC/GmGukbDTBsDozKqnSppS2HbQuaV0p0sG3bduowALUYQFWm9yKxefzswff/RttONg2sToYr9YvyVD0KQSerzs1KxlcaAExDceZDzxftHfeOOw/Bcvt8uj48MmXz19crJuYxmW+WXdIkOV2uWmvLi+PJtVoUj59+PjqxaP/+f/qf/2jP/sHn3/yBSznt25NfvLx86zzf/xvfO8bf/N/+slPfvji9FGb0rd//6u6Gm6bpTC1i/VkMq2qsnUdsNjJQVaW6+U60+bm+1+hLF+c/shtGuri5Gj6zq2ji6tgjUjrm6YNbapGw+PjSX21rsaj2eG0XTZnmw4YHEATRKH/ow8fXL04rWKILrrt+vY7J023LLNMFN699+Fy3RRFaUqLWTWYAJJJiZQ2LgIl5hB8Cj42g+GIlGoayUqdFyWgJy3KELKOHOuuHlUDBG1U6VILyEUx8J2bL+eDie4kpcaXw/FwPIwBB9ORC4wpjmaTxeIKUtx22/l6cXJ8glHWW7+rL8OGDGSFaV9czcbD4dQmZxMhp6hzVQ6KoZ79i+//kG3Rtessn7BKhDhfPDEkkOKd2/cLox4+/OTX7hPP8db9B6ob/PRnn7zxN76N1L713jvN5nS7aU5u3fzFb358+8G742pwcPugunX/atFObx5yh+t1OLwz+86//VfP2njn69/l1avG2WzZDDaLh0+/7IqiPa/PvvjswduH68fzrh282MbJVP3mL/7CZENlw1e/8/Wf/8WvVsvUumYyKbu2NSihrhcL9c5Xv5ND+5sX/zKf5XGSTWfjBIXrhF1TVtaYPCZA9imuLQIpUpoms3ug5raALHaa7MsXZyfHN4rpcHm56NbeWvPVb/41Sd36xUtsW+5aGOdCsq4vzQDBlL4l13B2+CBTEsJVW3eu254+m7917/dUUaUYyEk1HY4OquXllW9DXuSjyahx/vLVBeBuXeB8B8CdC413NjOjwqBSEpNRVpRiUj6lnCFTGhW6EANIgCSK2IJPAgwYSHwcD2wIIYXU1G0XuA3QBR9DdIFDhCCYQBlFimyu1agwGQAEr5AiJ8XaWFPmBRIoYyCBcMGgCaPqS7VCUghZlrcu2bLMPRUAbes3y9an1G4hK1wxymCQO9cGB9aWCLBdd4oMkPGNdi6JkNG5xC74LhtqMClhkBxEJadEhQ4YgPM6WICySYE6lYFNyeRISTqj1Xg6WV4u2m37Yr1a5DAtfVlmnFpbFYrAbbs8kbWFMaYoBqnwTdOS0jYq0jp0tVZISrlIdeeUJhDFAThhVuXFoMhybXNbZuRBTJY51SRBZNVuPANzoTuIqCEq2nQbazIwRpdlidoClqaS4BCimCQslIgBVsv18mpxdn61qlsGGE1xOBxDRO9C6zy3MQRGANTk6nmRZXmuOSajCUhJiAaJNJFRthxEsDbLilwHH5SWBC5yCq51ITQOmtqF4LrQbdquSTEB5UYPQCdRWTYy4DzzJrSGoMjLjKzWVUzoYyyGgxS5dVuhgfcdJQZAl0QLC3tEAjRKa0EPjDHFxHrdtkRqUBTjYkIKASkiunalVOIoEiIiawLHKQSvFUZhFIiAETiz2iqTUsqzAimSUUq9pgw7SiHS58sIAPd1W+j1Mh8R9tW8dkgD/7U67rjP7UGG64ShHnHslv6yU3/v4NPOdwa9D2wPcnaaJpBdPs7ur/bCcNpJTnYt3mtQ9q/qiYH8a+fVl0y7Pir8ToQ1IFIvhMKdN6mX28juDX5XHcS9qU129drlWpOFvwNZ8Loh8jsh0SICqPou6IU613vK0FOfvR8N9x25T7C+PrudxqbnO3skRTuas7t6uIdPuFf67F/dgxJJ1JsNEVJvnYM9mUOEfVARvL6sAgCoAK5xXG8eoV2AKe1R3z4DqhdpYU94aCfx6VvYR0f1kiju76udkxFgJ6Tquw+RARFIdnKvnlAhJBBWyISgUHpf3U4OtncK4m4R9Ppm3qmk9l2+kxTt+OEeSvVfuynhvTuzn/zTH3Hyb79x1z07S1vOqhvl+M3jKjtdfb5+5hSoW0eTXF1Jau7duem8m2/98dtvWCy2bfvRb3594+iQhkd379wrVb48+8WXH/3gzXvZ7XfugmsPZkMBibG1Rd4s26Q0MDfCgzILrQfkcpQxjSAWrfMsMMhm3Wb+6tXZcrVdXl3O7tz+8smPP/z9e1+uLqiDq4uz2lU337x5FbqRnQyn4265Gg6hrq9smYlFF+LVYqXViL1IO70835RHMJ3e+PTzzwqjtK/n66X3QAbLsWpWa11ys1lAMH5TvHh6NRlNTc5IEUmnCCDAEJsUEp/ZHLTF8bCEpGJCxFG9hvH4ZtN0Dx9ePrKmyBGMAo/rFkMsf+/3/sjZQeqe1FdnUcdiUHIrX3788c3pRFm5+eYf+tW6yK6QtnXXdF1bVPoonwydCpfnmDhFzvQoRR2D29ZxDUmo/Ft/8u+dz/3jF1/Uy3VBGeTk2J6eLseTyubpcKjW9dKQJIOSWw+K0AilEAuUWmVpemAHpblyQplp17WorMyVa9L927N2sXn2+HRyqwxtl2o3PhxZY6GWR68uq9IMxqkyJBGVMRHs2SpYiG09b7atSpnhQumBzoedb5TOWbIUIqVp21hCEKUJ44HVhbVYDTONE6UdA1uDmOmsOjo+clcvE4Uoc6sgUYW0Th2s15vkk1A1zIZds0gTcK6+OmuLbMRJb1lJi9XNN54u3K0bt++990Z1+40//9P//Pb06LOXbuODUnAwPBgNZ8/mn25ysD651m1qV3fVz3/z9K/8tW9mOUMXbVH1n4IQQj/makUCqBVprbQ2RP3iXyXeO5ZEkBSScI8YELCfuIg4RU2KenOZABEhsLbUr8n7mgn9OADAwpwSM7BWWjglYRaJKYUQfUgM0IXoY+wHnsxoq5TRWgEopbTSvQ5TI/aaJ6UIeq8tMyCiIhE22vQKJkIEgqT7yrYILJyi0tiTL+6Jdu9sTYkQFPaFNkhICScRUUBIiKi0ssK7qveRBSQhCmnyXS+6AdI6Rk6JU0iADEhEymrTBRcSJwEfuR+INEWNymjFIDHui7kLCIBSivroaSIAUFr3018PeFB25eVZUv9DAFREiRlECASREifofbm9UIgTABCh876vhdBHFHHa1ZwT8Sml3i0YUyJmASGknatNJKbIAMIJJRJq6O+AfmMBiRSRJtOnICH0N8yu/hlijAlAuEd+RAjUQ6XdrCHQB6ojClHvhSbmCLvnC0GCyFHtFGoSOfUTDLGQosgRSaWUmJlIhxAyY2OKAIKkRdLO07yTsPW3MYqw6lVLzIg700FGkhvcBlpunEpKdBKIg9JkJR2NbYjdjcng8mx578P3qpPbDx8uzMVjla+287Piaggeh9NJ7cLLF5eTW7c2m+aDb7z18otnVYZPOrp1f5qywfNXbQKdLL544rrLtLhczGbjongzeFaldk2bQzuAQDjf0Hx9tWpe/KLBaXl8bzQ5ck00Vnf15f/pf/+/tbDIhtLVYXp0Y1gdRdao8Kd/+i+3n12VpJbSTir9nT9+/6v/xt/11e2ohxrt4otPfvj3/xPNzWyaM1mH2fSoImWYAxUDArGoRUiQfbfVVCija+8HlRFhIKvLYrveorLOJWM8ECQfQhJlMASWdrPYblBrRSxMk7IIrQNEaytgU5SjxWrNWTmsqq4Ok9Eg1h41gFCqnQhBVBzQresQvLD0+aDBS4ypW8Vu6f/6d9+DxCq8+PTVqgYKKSkURgghrusQIY1zHM8qq5zzqTS0WYVyaKLz/qoriJQL3ablSr313oOD42Ftn+EqpmbT1N4Ya1CzB8nt8VtvnkyPl48etYsoCGaimlTT5lJl7BUDHL/14G8m/3RQWJKD4HXky9HoHkRcPfx1azaYtjYbwrbzm5dYr7t2oZMvbb7ebLfzNvqgwBqaRjad20hcNPyFyb4yPPigqT8N9WrRnJaDceMvqoPbZT64utxcXcWjk3u2yidlXLx81c3nk3HZRQqe2iaBpODqQqdyUA6KsnWRdbZYdbA1+ZCSHodw9Osf/CqvaLFcJCUcvSQYj4srnxjCYGiXV1sJDEg+glJYb3ex1qurujL5Yllvto4VnJ3N8WBwPJgeDWcXy/NBVpy+OJ9Oirbuxvnwv/1H//DJF09uzaYH949eXSxAk8bg0+rxxRfr6ajpbm6fPx7r8NbJXW8y5Mib2C0vnV/FvMzzjENIgQajwmn46v2vffn4/MbJu2+9GX77y8+3Ch+/OgMlxtB0OLw8X2qTL+fNcDyYXy3rde22bQhxWOZn6zrP7dpFIPjtkyeHuopd6zNyHfvQWtuZokDErBhQ8r6NqO2tewcXFxvf+dC00+OTZDPXpulwGNzV5fLZdHpTGRrNhi4AaZNch8oohQKegxSq4qQJVdN0wiAxKUgJ6Nadt9rGU6HzglzXbTbO2ioZnRXGb7ebq0VoXUqh2YQUyqYBA8ARIMXIHEJYrbfVdHA0mabQACMDKKO321ZlWinzo5//8MNvfNeORtWIuk0da19NR8kFDnxy637rYivmzht/8PMf/auU0vmTH9+58x0RLKU9GithtQbyWl21i8PbdzAkk2R7dWEOcL3ZjqajUVWt2uVZuzbD8ff/m3/wjhz/3oP3VprOL553/3gDdjM8HGVoF2evxrPZX33//Xe++tf++T/+sztjG19eHt4bfv6rn7z/9W8f3r13Nn9558bxrTuDzz/+NEZ1YzT2bELntpv0/sn7j758VI6rN998o2M7yofRKNc5bUErm5wPzvkQq1EeOpn7erHeHmbFcFyuF5sc0K0awix4Ahx4xm0nB2VOfjEaVl6zNfnV5dz79ujmTT0cK8i1D9uLs4hbQ2lyULV+ffLVd1M+KIxWIu3GI0OVmdwUFy+fDarRGx/efPr0MxfQVtP+U1Bv23rVdM6DgsigsCMdtck1ocksoJRRWUkuNF0XXMJIpLKMsiL6zqK1aCAm7xHQ+OivFqvWhSTYeGYhFpVlGilk1kRJSqvpsJpURaZIK7XerjObF0UGLCSBUKcEsV9+GWMEFeUKoe1WZVl6H6IkMqgzPR1PUOPIhXbddI2PQWrXpi4xUkLSibZbiEmUFIm0czDItTAxs4PgfUKDoXHZIB0MTJKEAEZZiWSNtc76LjNSuAQaACPH0CUlLGgxy42+MRurg9nzxapJsmg7KoajoS7KMoHKpPYuGmOKQS4SU54XgwpQee89CI+HmVZK6SAyC75uuhAjCSMLKNS5FmG/XYOHzgeFclAaECWBVOxzxFJdOx9bVDQYlpSxS64NVNmiKsuMMipHpDgm5zofYuy6drXauq5tnRMQrUlpnVmjtc6Vyr0LPm223aZtu8aJSB06FVRhi3FejifjKtNFZnKt8yxLHNAWRTkwKvONW15duVDXXSMc100XQnI+hhh9CF1MgKi19iHUm7WRUnBVVWbrgjaaLFFWkCoRCwI/LEpLuN5uY4fOMyYqskFmbSIdgutiTJFBOgI0OilFxtqMtE8Qne9Y1iBKgbLZpvGYnDUpzy0prtcbdsLJCzIpynXWuY6stXmmCDJjxCulRJDKYRVj3K2S944d2Eczy45EXPsAemPPHhZcgxbYyYWuRSR7zLP7f5fsg7sEG0IABEL+ndfKNaeC10FD17+FvaNN9qayHiBc45i9qalX9Oyat1vE7OHKTqsiINd5mLumyp4K7WiK7ORDO1UTXWuI9uXXez7UP9XK6wZeS4Z2fXfdr9f9gn3ONOxzjZCuyRju3rAPI9pzFt4by/Ybrjtz2Q627Pkc9Cv4nRVQ9g1NsO+E3gGGO14mQMTIO83QDtrBntgIv+Y1/Q92ByQE4OugV+jL6vQ+umtquBN2yT5LaeeUgz7uFaSvtwa4Vxz1Xc7XlAp6drb/bU/T+ustiEIEgKIIFOx+cg0XiXfqKtnTw970AAxCQLILQupbs/O37G6+a8h1jYp+/INPyEM10+++ebJdsbox0cODSWGb+efh6lXWuZk6+Wc//NHs6K0IkLpVUQ2aLrmNJwO+aT64e0dil6R58fE/w/UXYM//8G/cJCXLy2YyGGZT670TJkQoqiONVfDbztUp+ul4PF9fWDvJTcnA1hCo3CpfGOeazc3D4xzEqnI0m62fL/JIbXAHJwdt8t1yUQYj61XbGaD46vmzIClXY6Ws89vYwOTg1mrtN0nuv/vWUG8TrMfDcdd1QaTPJYHkV/OGzHZ2nK/nrt24iyU0dZwcUIC21BJFb1snwFVVaZsl3xKK0ToJCsH89CwvJ1U+WGxqxjAdk4t1YavZUbWdbxEIh0dNDL5+FFcvDsb56Obh44cPS4vT0rK7Yuwm974bS335+OdXyyeqzG1m63WdEpWHA5JgS52X1XoD821XDPWEbLcNP//k5d/99vc+f/QvUIVsPBycjHxIMbHjzTuHI2tW245i8ijgmmhtVujs8aOzZ1fn48H0eABkI+S4aTutqlznVGKzbVLrKpuHbfri8dPZ8FaRy+EgHwykxa519Qwn/jLBwE9GqvEx1HWeV1lZHR/q1enHKk9kNMRUxuz5+SLXUpQamDsXlM6Tts/mF/dv3NLc+HaTEtUtCiAqu7pcQVmSzi4uFtWg6JahzBgrQZIUJIjcPDnqfLtu/cuLi4NbYx063CzWr9rM5FafXGyvvDKQj+r14rYuQurmXZH0tBz98fvfkV/+8B8Mh4NN0whAaJurrr1354RUZ6xwCCMqPKsPH9yvKITQvPXgttOD3SLZ6N4gq7UiJK0IkfrS7ywiIIo0AKGwImJERcjMSoSU7kUowEKKgGU3YIBISqgVgeqD1vqKXElSDBFAYkoMQITCJDEBSIgxpBhTYoHEIhwVgiRWgFrQEhXW9JbhxIxECKAQ8sxqrQVkXxi+1y8jAHBK0NMfQqWU1YqF++ICKWFPrZIkRFSke7FpErULhAYEkCQRAQgxRq9JK03AgXBnyyMQrXWPwWw0DNxGH5LvXAChNsZMG2tUb+wIMYbIgYUBEwCL5Fpb1Y/inPp8pj7ZB9ForUkLMO0GtoQILOIRE8fdmEY7WzKh6rdYFPTwHmDXARAlxci005yyEuqL2wMSc0oCiNiLePq8cIRekSuBEwIqpfrUZ0FkEUQlAJxSSmyUUZqEmYhIadXDoj3E78Ff4khEiLsEcGEkIE16d2mIBKFnWCGFJKJQUf+wgLGfxHtZFQgqNIZIsK9dAP0NFjmlfhcipcSM0OdoiQu+3wQjEFIadzsGIryLrAMg3s28SELXMVhffTDZbsKL08Yq3TFnVgkrv402+Dc/OLh5PEibOrOD1pdZGr1xp3z66l/SYn5QMgnHblLaYtF0Ly63xW2EQbYS/Ft/73vvP3L/lz/9VFzIDrXWdnO5zAd1aqVbrYMPzWZ+8fjTcnCIUurYOD8Pq+bi9LwOm+Fstlg+o0q6zdBtu6rMDybV57/64YtXT2/eH+oS77x9uxwd+E6a9eov/um//OKXHw/KrIyYs7199+aHf/BX/eBo40HFxaOf/vTR9/+0Ij++VRyeHHaObFkWWQ6Cw1Elyvqu1ai2602Z6Wo8SihFkQGTyjJSCFr52OWDipVadG0FOiasqiHHVJTFtm26bWMzu1pezvTNhNhxHA6nACqyBs7QmOE4T97bUueGhdeocLtel1WlLCEoU5Qmg5gaETQmQ6WiDwyYV5X4bQh+oOFrbxxs14t3JzqW5b/8/PRimVonjApQ2sQboY5sARwjisbptFRWS5MWD9fkETMyeVEMRskMfvxicXh0a7V6OimoYruqOyc6S3B4fDQcjeq2rX0iW/gI3ao2I5M4hm6jlaHpbVUduMWrQVVgTiQ2LPTl06e5MbFxrKId5jpToKBdnaXU5RWELbOSpBNR4TcqJGY3ANWVo9I1q2lhqPm0jZrFpxi588nHEFzcdknycnhQFZpXz+cvltK1XbNhobPT2loLpFABSQKEpk1AmknW27iNKwITtinmo7e/87eLw9uzw8vTZ1/Ol01RZLFzm207GGbC2G7DJkVJjCBFlcU2WoPNsuk/BUc3Js9fXiTAmzdHLnKmMbPw8OkXk8nJ2flpfnBoKUnyRc6//51v/eIXX944HrYpzOsEVr88W90d0NlHr77yV60guq3RN6bTKnNBJaWrPK/nZwLu5Maxa2Pj67ff+fDhoy90Nj6opg8/+vyN+3fxang2vzg5Pt4E/ulPf6uK4tWr+Y37b7DMUwKTm2bbFBq0BFYQOCDYYW4VcWbQEHET21LWTdKdsCA8W916gMnHtut4Nc8Gs0yVXRuXp099W7POh9Pq6uJldXTHKF1vzuvt08loPCyGm1U9PJwoo7rNUgF0Xdv5bmCzANC5usomijJhApDD2ezV2TMRbHXnuoDsHXFiVEyXVxfFrHKiwCfBqDQgak1ZkQEqUzdb0jQajM4XZ9rijZPpcrUtbNkxdC5tlnVrTVlmuTXed/fef//Gm+88evh4NlBlWapBVU1HoQvOs8ltORrXbWdN/uCrf7S6nN+8cbxY1LcK+S///v/uv/fX/87p48thiQc35L/6+//Pv/O3/l7bxHXdXpw9u/luRCjPHn0exgOxZjo9XjxZjY2ZUdOczj/+1cM//ub9YQ7f+zf/+OJ89auf/fTf/Z/9zRcPL8+v9IuP/vyPvnn49gd3nz8+65rV7WqcNfXtCdpstlotFpcYXDsZzbTKv/b+1z7+zccD4x986+6DA7NhF9zVJx99PJr8TWMPQZtt66pMRR9Z0FaGISidAcjhbALeN6tOOE2mRsBp40Yzi9oEThOA4WBw/uK58+uysNHw4PgoNCvmViC/WDnLdJhXv/xXf3bzUHeuGRyO2ear+hyaLB/k4AKIGU4ngwGjbrtt/fjpJ5evXg6mt2LY6ayFud+94MRdDImTIRoOwOY6ubrIsgTJc2y9Xy2XAioCD8pxSqk0urAZEQWUTRsWGycW143vM18zTaQUgrZZ1jWdhiAgwHh0MCmLrGm2dV1zgpiSSawBJKXoHBARKczyBFYLx9AxAOhSUkQEkJQVBo1gnm0228HYTie2XddtG1sfs0JLDAakXm2VyXJtuugTRptb0lGEgwsKsSyINGZDVQ6JwUlC17XEYCHXyeqI3jcptRmAtZkyOiQf0GQmt2KiMCkSq26ejC+uNi3aajA+Op64jQOGPFcKO+aoNZHOvQuIigG11jGlkFJQyEli2wzyzFb5pmlYSProQx+AI6fQuSQgVkNuiYAHw5mW8eMXL5ZXm3rTKKOyzDrPRZGhVl1d35zOJpmJyZGpFClG60GaGDofQgi5tVZbo8FaZRWz2wKo3ChEk2wmSHVbx+CcS4JKS55Ze3T4xqiwh7NMwMfgQUEGBiDlikARGN2mVHd+sd4GSdvgmNkqDQDAYHWWFZkkyYQ0S912TQydWG3y0phhOVBoQfrCZJBSt/WegREls7kh1EoBYURqujZ0TXQeAZ1zVWkLmxXFwOoqpc5YQfA+AEVqNpssz2NwbdvFVJKCmFgYjFFNFwOr/nnZpVRkwxxFQWKEmGIStHkeY9ojmz0a6MnD7sGzT5nZO5x2avKdPuS162r3GCqvvUXXX3vZyu+8CexSlnel1Xa+ox24wR3+uVYF4fVhAIVf6436Ff/esPWv1ZjviUyPf+BaIyP7HUnY06ydYof2jYI9men1UzvFCl63dY+x9iXKXjcPkGSXqnRt78bXv90HZRLQzk3WY6ie3uDe/9Q/p14Hbu9LrwGoa5TUH+ZaVLQ79I57yV4vs78MPZnbUa8dpOq9XNfyHkAGRTsoSIivRTb9XusuBfUa9u2bsJNG7QKnQPD65hHukyfkdSL2TpFFfcfKtdVtDyJ3V4BgV7S+7/DdvUf9KfcMjYFYqFdo9fnWu9ioXftwf4RrEVWfa7Xrk52AC/fXbv+6/YXaoaLJdNi1m5zS5x9/duftd1mlLq7Pzl52i8XF+UXdwMOHuG0Ho7zczDflOA6PBtNhXgyG3eJyNhtbfTR/9rnoen3+0YDWb907PjocXi2awWhclGVRTlUblcYkTsHAZoeE7NjlxRTtuByUhoer7VIBV9mBkrS5fCHcJFmPb44W9cWm2TRX28GwuvfW8bNX87qLj15cyrZ+5+4HWT57OV9zIVlecpmPbxyvzprB4WR2ePD4yWf3P7hp7WCx7M5evcyg1gbm9SJDpfOxJY4pnT6bg6yW7bxLaYJvrLbx3s1DxhY5aFN5nzSIgIWESDpTkrwPKmiEkIQgSQrttgbUs1np23axaaMz0g6XrnnnvfuD2RupPrdqe3i/PD+9iHV7Yzxbr88aHY33ZZl1m8tqfPv2uw/cZ5+bDNttZE7TYTXKfNdskrEJEmsbOJKH7Wp19nCtw3HynaH1O/dmLxfr23agU2Klilwf3qiQvTJdVQ5jSFFqprRYXty4M7519/i//e/+yYM//n2TSRe7bdspoykaZMiyosDhatF+/OLVML+bvEAItQvjssyxOD17Pj34YDO/OLQl+VAM81W33nSL0si8hmJQaF2Vw7LZLmIdMsg1a0SlTeFZ2Xzim8tpOQK3acJaQcyLrHFptfVE2hSDVRuHh+XXvnH4s1/8uMxGpcm8D4nbshhx45NopbO8HL79zujVKsb1pmpDd9Xceusu2LHzNCpmA2su5s+WKeWQfXF6YWaLUUUG7fjum6h9nQTTQM0lxGTIxC5wChypOLh/+eQsNSvF+eV81R4dmMmw/xRYa5FIqZ3cI7NGAHuPWA8fUop7KQgRUW9uUohKIQJpROltQwjMwhJVr4kRZBaliIWBIXGMiRNwYPbehyRaK6OkdzMzcxRhgZSYeU97CY0x1hhrFBIKw46eKMitMURaaQBAUkqpvniZQoo7mKJSDISolQJEo7TsKnqJg/1uiZAhRagUEQArRCSMKfb5RyEkpRSgkFLYF+3qcXOKIioK9bk5BJQZHaIX5q7zzgcWSkC5FRNVn82cAJioF0EZpZIIEjFiQkHsK4SJov4fu5wn2A3wKaVEgIpUYgYAVMgpISilCECIiEDhLkBPmBl2oW0sIpCYhSMzC2tSu2Fa4Q7l824zAoF2+XIsCZiTIEDqXwdCpBKz9NJjZlQ6siSfAMkoUoCaNHMSpH6UJ6I+UVqR4pgUKRZRBASESAJMiq7vrpRi7w9n4BCEdtl6RKRYdqUpYJ/W108VhJCSMDP2DYfd3ktPqHpZM/XpgikxIqES4esti35TgzkxAAvvUtUB3n2v8Bv95r3ql59czBkUJUlSjbNhTt7H5bqrlLa3xh/8yVfPnz09//IHFV5cnq8OjnRBcbtyx/m4iWm78ZvTpZ1MX5x2MBrc+urt/+DW9Gr5+PmLqxztdFK9enkxsJjndHJyFNt68fDL4uZmMjhoOaR27SBWmc2r48m9t7LJLZPfWp0H79fQzb//0aMf/vj7eli9WG/uTKrD27cTjhkyqPlH//QX1hZd3VnQ3/yTb/3t/+h/sgyYTwo5P50/++I3f/aP8213463j8f1hiHJ4PDXlkAC5cwxK6YoVWU1ZLkBRUhd8m08mKagUEqmsadOoHHaxIGNObkyWVy+1saiMEgydU2KtHaBS49EQsDRlERmjGGNLv3FZOdZZYcqY3Dq5DUlQFGMIg0FhM+VdCCmNJpMkAilJIibe6bBRVvPl6tXl5M6sPT395S+/qJfNvTu3vvM3vjH74Uf/5Me/fbmJTYeJdfBwxvLbV/W7M2OZh4XOhurzR8vbb86szd//xv3zF5c370zaoLDM/Phk9uHvf+ur7p//Z/+HjPTRJJNK+UUcHh4bVbXhopwNrlYLrYd3b31jvvmoc37TtGW7Lct69vZofSnx5TKHWB7ejpCzD9kg21ytWakEHGOLhL5pBGOMSYJLwABZt3GTo7cXq+2yXsVUH0QeVSPfOsKrbZvy0YCyqqkdAqiscFufUgQGpHlousXFPMuUyZUiUNaMqupqscwKqgZmsejK0Wh2++S3Hz1d1rzatG+9eWt8Ml15GBxPvenu/94bFxePBSlGiFGQVXACSgmopm6LjEJIkgQT+xhp58qEAHxwc3J1sWw2ddP58e07aPXlZlGW9qtvveO3F1fb7fk83rox+/S3j33r7r5z+PDlOppsXFl75+igwKun5y9++aNsfIuurqJvpndvtQwuxM5xNRlShyGEPCtcCl88/gIid65WXMxUxsFVw2qoJJXZu3dOfvWbj1QG49nw3/9f/kf/yX/6f3xyur57NE1NqAaFr5tBqa0ZXa62bYKCcAA40JQAn726GI/LSNi1QK03ma1GWYihWV5WmmB4GKNmNuxX69UFzWbWaGQBDNbSebcsh8XV8mI8ueObJoIFCdpQcEEZq23lu001yCFxZE9WOZ/m9ZoFFHNGvOZNYYyxmkRc64qh0grZSdc2YHhc6bOrKxe9QV3Ykj0639YpiBVEtW3awSg3OkPiEB1RQqVE4ZOXV++8eWt2eIhxcTzWSkFmcgHs6tZam9j5LpBSdb3xRS7VeDSsJrdunf7iJ0fvvP2/+MPff/aTx/mk3C5fPvzp43J4bEYzYyXySkKGtcwOy5dffnr2sD24e+vhxZP56fab3/zg8GB4XOq//r0Pn52evTitv7b91np5yHDrt5/BZg1vvXNfJ9e5zofIrKyFm28crOeXk6HNLDfrVbdlK3Y8mClrnj7/xNeXHCMexjfuZx8/PKtGx7/35u8//ezh7bcMkANltGgRpcgihcSxczEvKySussHps8dgUkSIWt+e2spqFz13gSh1dWu1KvNKgUhKgCq1/uLxizvvfvtwXKqulvXZnTeO6uXV2Wk8/uq3Ywpt6CSE6nBiRqx0ttluQrsdVNXicnl08+bduw9+8P1/lc12S7rOe1KQXAQRrZX3QYx1scvEkGDXSds6n2JKiUl3iQPDZrkdFPl0WGYlNL7VCp33iVijBkUhRAEojM5zK0haY6mz6JglZabItfFRsS3bdbDGVGWBwAaR2YPYGLpMScbCqAzaDq0LkUCDIlNmKqTovUqswE2KzAXHkAY5EKRqaCIHAfABqdPCtG0cZpxPLJkImISjzUFAcUrFUOcjSuLZJS250ZnSxqBtVi2xuOiNJCElJCwEKkzKw9b5kbUdpnWXNqtGKexCIOJmc85jujGbrjo+v1h0PhojqWlIAaHxzhflQCnwTVvXNaKqinyUWwhR6TQZGVHILnKKm7pBwS5C49l7Z6xqIyiWWl9Z2rgYWCk7rEIKScFsWnWuE5EAvGnWcxOH+cC3iV1mDK43NQtoKlBFH5wxVik1LgwaEAwxhAQqxMgMKCk3GKPyXQRtFCgtkCtUyJ2LYsglCDGO82yUl5oAFDB4Vaj2Kjhm7uuiKLLGIqENMSVAhWWVFcq41icO29b5bZqOjCJVmUzQKG0znfvAXVN733Li6FutTJbZqsjrZtM1XVdv2dXJOURQBNGnwMkqSeIzVCm5ENnFuAMI6DEEIhUCg2dCnVlY1TUjdVE0oiJtKTNIoyJvQ5dSMGSMUs7565SWnUXs2oqzoyLcW6H6x7l+Md6rMSTBvixJ2v8x7NUasscKe70NIgjvl+r7nJ/fxQ+wC8WRHVzaKXR6DrEvpAV74cyeVvVimmthTX/wfbzOTjLUy0xoX3KL9zxoJ4XZw4/XcUDwesVw/QwJPXJiRMRd5fhd+bO9GKd/ztxFJu/Z0E671IuMFP4OkxDZnfc+Eoh3b7ITPCH1VOWaD+2hy3XfvP7hLh2qlwPtrHUIe6mP7Lq3VwS9trkhC/Sp8oJC+LtaLrnma7t7YC/Xv4Y8emcs6xPjd8wFiXbrGOGdqqsX/OwjJmAXO7RXhzHvSc11SwFkLzzbia52vYl9OsPOcSbUC5Ve3xe4fzFcQyHcGR15R5F20HIfWoW7qwVC/7+qoien27u3D148efjq2YrVdHY3qwy/fP4cEIw1h9X48aNtkU3eePOtxBsH/tGnj7bbpq637LflTClpl/Pf3r09fPvDdw9vv8nbdlsnLZWiMkSo2w7BlrZw0WsFTXcawiLLhoqm0SvFrIjyrBRRwQWfauDEzJ7x9NUCRKmC3v7K/fVme9NQPip+++mTD75y/8Wvn33x/NUgcxGHVZY3KVgcNmtOUG7mXnXrAY+6V6d1iNuuPZpMfCQhEzZZpsg5X/uG0SeLFZXiolvWt2+982j7M3VTulgrwHXreBcvAyCNc7VREDhC0qSBNA0nhVXD81d1B7jcOqSitMVmk6Et1eAtZUbrs8dHZXT+QqbToiiWV5daweRwMpiOuVlz4NXVsvY8rqpicNCsz2JyPnpNGjC/89adX3/8l3duz6qBMYpXq7WRdONG0bySX33xkyam9avz0WSShYS8+eDBu7bK3Posy3NtMh/WKcJggL1c9/Llk8fPn9+9P2jjpg2+GphhUdW+SxFSl7Q1BvIQVkbls+md+yfHjx79qBwUm6b23E7zMng3OS7LkazqLhEayooyF4BpNUacuMjLZa1SGk7yrpar9SbjCkhMWa3W52FbFyVxZqrR6Hz+Miw3+WBAhXp5uZiND8phRT5dPHkVtzUeVT56a1GAMePguuhVEF3k1VGVt3HeYZhMh60PoM1iU1sajjU367PpYDyZzC4vL4zymM6ePLo6PCrJTkJ9SaK3W58ND27euJmZ7uzRI0iIxv7X//Q3k+Kw0KZL/O6HdyUKh9B/CjJrABUCkFJKESKgUAJm4CSCIrvEnJ5WCyAQImrSu5xrhX34UB9EjYDSgx2JIqjFCEiKfQKQhMhd8L7zgooFEgNzUlohIjOw9MomZE79BoCxVgkAEgv3AcekSBtNSgsCkCKilFgYAZQAKzKa485XhorUjsLv7HLcp56pyBEEjcoUEQIyMyoUFBYRxESSkEUhA5AygpQEErCLjgS0IoPIzBIQI3FiRhGGzGROexb0IZHSSZGw9MXABEiRyjIrLCFCX2i+90kzR0WkEIWZSPXlFlhEIQmIMApjQhFm5iQAnLhHSTtVqWBfKTMxK6UUUQ9FGBlRCCAmFmBGZBQCNEYLAJHuWVKPB2OMBKCVIpAYonMekQBYq55P7UrUaaUFKAqkGIRFa7PbD0nQ8zgi1aN7BYpBRFgRAYhw5N0s1nsYiSOHGASBBQiVSNpxK0UxJlKw9y9rUr1BTDgJECoiEEDZBRhxYkQySomwpATAqJTwbpxPzAB9OQQgUn3MU9pLlUOInPh1KVIr2VjGt4o3v/Lh0482Z+fLs8tNlokKDTllUB/cOi4PDl+dXpquO/14gV78Wr1o0ebdwbQ6ujN5eb4dKHKrhUm3jAEsdCr0jbdP3qXyXy6//+LLJ/n0HhkTkBfz2pDcP6w2L04/f3Z24+IgH5nDWyWWU5lMssnR87WuBgf1Aiqjx1n9T/7h/+OLizkbLVa0wbffvdXWkVVadJvPfvaj0ciG1k/H6u6tGx9877sXAX3X4YvPX/7lv/jFn//0qCzf+eM3q/E4lfm4zLcb7zs9m5Q+bVMdEUWrIvhGKTOazRaL0yovltsQOQxyIwzD8cGgmtbLJA4atwFUopRotJlJrZsMT5LY5XaeFcOsmgIFid67wMkNhqWkjd+e2UJnecbKtJto8kLbECUJQr//1Ky3beN1RkrndR1sIUDUbdcXz18G6V4sN09//TlPBw8+/OrbX/vGqtt85evf+OYff/U//y/+mx/8+FU0hc50RPi8kdOQJiodnScb4RDN/Q/Hw4NJev6UbXQRXl6EN2/fGU+mi/Pt5slHgxtHJnWuTSxmdHKzSXp7uVhcLLJhSYdHEYbnoWPD1TBPqbMqqm5VVnaelOY4Ox7MV0tj8xTD+atLsjlq9CE6v1SmQ4wiHGIKHadVN6iqbJiv4kYq0mKgLTlx16m+8CwCNU3YLJvYxDbRILd+7ROgAOaZAiVUkAuxmUdmMEZd1unS601M0HC7IZm38clDq5RkxfDO7Yt13aT6xo2DZz/8QXXjlsp1aRVZXC0bA1BUNjP6qm4b8TfuzBYX58TsG4+Jk4u8fyhenjejcQUAzsWm6UYHMzM6/vrsa1dPPj57+mRa5vOrZgnwann24O6RQdOt3FBpsnrTxsh4XvtSqcPbNx5+8jzLyuxg1oEwwuSgCF30gQ7vfCVslhxr8rXWYMsxUek83r53snHLalSag+zP/ttPzi/nJ3cPsRh++fjilZdv/+Gf/Oo/+/t3JoPZMEtJNut2MqgEkEjpJCjYJVGQsoJKq4i4Y4nMWisWxTEpxOC3inJlJsX0aLO8VKY4mY3Pz8/HoxuSxDfrTlbD4YnWh1qjqNI5rywMp5PL00ej4WHnY/BOI5VZWTcucYygynLYNrU21m8Wm7Wbjqt63WwWcdO0b7395naziW3jGj+dTk6vLuqN2q42N46OE3Pdrbu2Y4HtwifUthq9ePXk1s3JZrtaLVbTg8nh8cGTR4/Kdlo7vHXn7aePn5ytHh8e3oDSDI+H21Xn2oaAXNthJimOh9VUQDZd3dUXl9DdOCyuXj1qm1LsxGZ2Xc8/fvxkfHBy0UbfxdwMv/WVN2p3rsLcZuv3/uCD//t/8V+9/9b7r862f/zV74U2zdfP8yJ7dba6ef/+VasP7p9Mjya+jXY04IQJFWK2uvAKS5GwXLt8MAzz7upiMzu8UWZ5VR4mSEr8yfFh3DYXV+dXC8qVMkq7q+2r+tG2Xv3wX/32u3/w1wjN2dUT5xaT2Z0bk9uBaRVcYlGCZ6dXxXhoR/bV2bxu+XLeTXKT5eB9qwkJVFaOzGA4v5xTh8l1h7NZu76I3E1GN5bnT5fPvnCcll3GN97/9VU54Cc3bs2Sl+hqa2w1OdhcPDNaq3y2LfMNHsp2VVQVqtfLYp0ZdD71BUoRXfSEgByM0toaICIm1LBeuSaxtjohgdZNiGlbZ4YiiO/WKitd55hFaVtYXSg1tFkC7kKMLH0Aq7VaEeaVrZXNWbhLSRkLwftGIDTeG03BiyTvUyIg0JbBKlRRa0PKcJK2ZdclD4mYSVibZbNFEmafCKqiatoaQYQ5M2JHNL0JpshEoRkMksPoBCPaQqNSKQp4bjcJtVgj0HJG+WbrEQprWSutUGJwyKppNinhvAkdw6aLkRSH1LjUbrZVrsflxrdcd2m+rIUkAwidiyFkeQYCm80amH2KzOy6JK3PpzNjddP6rNCE4lL0zoWQ2i60QVxkQGzruO3SsMgIUscBcptnAyWQF9oqsYaohu2m1iQCetuFulsNh1PksHp51TadyjWjDiEYrUqdB++60KYkVpuU2Jgy9KU+tC3GUztOdtBtmsg+JddcXr06OTmWwAq0MCgiZB09ZwaC9/V641wdfUsQRbjQSivSpJXSHsBxpymzpEkZnWsQr4WUQiHtA28bR1ZZ0EoFJAYM3tcJMYFURa607iJ7BqvUeFBGTasQg3chRSKSLHXtajw4YCQXHSrKB4pFV0UpzEpZH0NfEVhp1ERWIoeAIQgzAMQU19uzUXFii6L2zgBxjEprfp22cy21eU1MdtQCVB/0uAM8LIC0K261fzBl3hu2Xr/6d81ZuBOt4LXZC/fyn54UiFzHYe/Ay17AA7KX/8CuYJhc62muCcG1KmmHknYZQLILFRLm63ft2ckeDuzENrhLaX4Nc65rr12fEu3NeLvdV9iV++rBEO40Mn17ZPfILvK6bQg9L+u7CXfK+J06qN+a35OdXa24vbRqB4TwmuX1aT68TyW6vn7wuj9wvzO8A2OAuOMrAChAKAhAiArxdZB5z/5Afoe8YJ9StQ9P2sEYSrQXEe0QDIJA2vUr7a5PD5R2pjvaS3oAkFkESb3ufRC+RlQiSL1kTAAZkFFEISAw7mxoOyPCvnwb9FWW5HdunL7nmftz6f0OfZhrf68BI+4iNYRe3w07VNRchS+Xc99Cbkf/7E9fvPuh2Dxzm66c6pBscPjw0xcnJwf5FC4u1sVwBCTL9VwxlLk6//KTGM+++nv5G2+PgEPbbbPMaMZypOwg77rah6vOu6KaViPTuQWmsQmHGocco5cWUoGVUkwIGikkALb5pDrs5gKuPZhUV5tFZrnKtW/TFMuJzQK3977y9unTtNokF9u/8u5bq+VFt3Htqimq0dHxdP3iYn61iMchKy2Frrm8Wtc+ZUZ8u00heWz81pR5XpRtHUQGR0fvnRy+kxc/zQakJIMEgbnKS0CsuzlCZsR49jbPtcIQQKKEBkyRc3CDg3GS1it591vvNM4aZY/fvHX5+FdlbEprBbrF5dWoumG0Cah8qhNAPrnZzS+qgm/d4s8++Wi52GAEF8Kbdw6sGS/m7aNfPjZ0cPoitN0Tq9VivvKO0Jj59iwFdXKjQgUpOjHr6YDZP2qkvv/Wg2YrigBCdNuuKEzg8PTz8xi4ygbT6SABKTCCeUipGBuBhKG1uX755BWwHaqBjptX82VWFUUmWFQvLi8Obkx++KsfNKbMxebaRmU5bkvSXb0aDWZIJeowuzvhVl4+u+ocMOZbFwjkqFJBd5gnJDidXxaDklnltjT2aNu68XiSDUof175ZhW17OLzz6Mn6vXduI3Vt7TbNgoAH+dgk44L3LpQkF/XV+eDg3tv3tyFOEjs3v4yLXJfes9u4cmiUij7WZQ7LxWqzddCBa/HNO7fWW/vsyavC1DGm1WZrMx0Xfr66ypiDZ1Xkbr0is+0/BXlmAJWwkFJKKQAQZkSVhBUScz89ECEkSAhCRFbrXjCTkBkgpJRi3EUFIQZhQdZKIWAbPO4Rc4ipc7HfaOGY+s+0UsQsBKIQjVKalAibTKeUBEQr1QtmuDeaKk0IKJj2QXuUBARS8IlZKxIRFFGAhGiUTikBYWRJKD0D4xhTCv3U0Yt2CHdu2SjRxd6DBWav7sG+eDxgTClxIiTPSXwLIIXJtNLGKE4RJIkko1EEjLKstDKGEmpUlBWoNKKiPiKPMHJKMUJiBAaFivpxkEkpxl2tTJEYY2ARBFGo9ml2gAKSGJSOHLXqMToDktKqjw8SYRDUpJRCENYJFSvXqwYEAIQQWcRYA4BJRHYwvq+7FkESSEq9ezgBswAwESEjSxRCYgAEpRUqEGABFTghoQimJEYrtZ//iRTHlDgiICEE9hE0IRLvNkD6/YZ+2jCkBTExExGCpBQQSVBpoj6PCftSbgD9FCIgfG1jJEFAIgJB4L7qWeQkgLSzPyIyRyBCRBFOLClFYSbClHbA9O4HR5t1Pb+sm2Zz/FZ558HJ4eH7f/GjL+PSFKVqttvZ0bsmC5cvHv/lP/7+OLJissODSFivtkqHxj8eH9KjJ5tt8tmLZ2/dOuau5e6wWXAxGLwxu3d19mI6nual//yjLxnIZlRfLHMSLsuXTxsVG9vNRrcqX66K8eT45EiNJlknvlnRYEvTDdVRl3gxbyvEq/n8xv1FTDI6uPHZb39x553p4snV/ZsHf/ff/3sun7Xs0W1+8pff//Sf/+DWQXU0VdZ6LNL2coHDoWYCJVo8ZZpEVG4G+fjT3z62NrWdt9U0M4OEJkE4GOXtfK5QBXbFcMaOXdfYapDILdeX48JkoEBI60k1GiZZX1y8ODw+ycsppGCMkugjd22z0FSl4GPCohyCgq4RTqBKk1cYQ0qtW17NTQWD0WS7Jpmv88K6Zv3o4Sen28XwYLKJ5ujkRnly+zzxYpuGMoqN++vf+RYE/vRFc3baYVWAVeetnyt6Sjrb8u8fZkVZbVfbD759N8JqLeHkww+z8fRb7339yS9/3K5b0mVgVAXZo7eP77213Wzb+XMiyPNcjFHFZL0NwRWB2QxzbH3z8jRr6PnHl2M/f/Pdg3BWdww+ePadHRcCIL5OISHpzEoKod66Yji0RdV6TkRRYLual6YgVm4TUmGaJuVVSaQ777c1r7dciKk3AQLW22Y4iojifUzMw1FBmV6u/LaOwarLQBeeZpNKVdJsOmWLLPL8vNNmdVLgfP7y69995/0P3zxfDorB7KMffuICpgRKgwPpui6KOl3U01Gsyso3LRNJSnmVpb1E/vhoxil+8zvvvnh8ltJ4PT+7M6nGN0cDOHZXpynx6GBQ1+Gzx+dvPrj7nW9/AGGLLAcH1fNni5cvV6513vk//yd/9ub7X6FMjY8m6yaJDzrPok8++LOrM+2Dc1dllg1n44urlQBKpOA7Seyb+nKz9utw0WKOg6T0O+/d/+jzL/+dv/u3/+yf/2nXriQbk+iD2bhuXW6LyuallhhjA6xIcm11Jm3jwCgEsGWGebGtnUFB4vrqnCNlJ6PBjZPl2Vy5NBrOhtMZMAdRrrHj2VEiY61gZvLMcvCJcTg4SikZKiAF7xvOKhTJdB5QLKjAzhRmMj56/vTFeDaTUpqlL5S5ulgJhoPJWEFjckMk51dXKhYpZMrGGIOkWI6qesvPnj+N/HIyHHdt16zXvY399OIU0D749vdW6/D04Zcxbj/89t3Pfv1iBLP28kzz8P7tD08vL0+Ob5qcV88vbGbPFqdlOSRIP/rH/42S+M43/yjm9WhGN2+fPHn05yXrP/7uN2sgldDbTM2On//0s+mgSJL/+W9+Xt16Vx28NSyBph+6s1c6Xk1m1V/7t79zedpYa+2AaVTdOXhn8eh5vZ0f3pi0nSMw5WxgFZxdLjBCpiifTfJhxiGR5a6pB6Nxu1mDih98++3LszPw/uRkfHq2dV3zzje+NiynOSmwxdBVKPdenb7smnXnQ55Pk/damTfvv/mzX/2zMR8gmrfvHLHT7bLND1SVo80rDOBDw5jAyKictCusqtRVZn31dJDh+uWL0pa6xHj7nT/4zv/w7Oz0t//05xm749tvLJeLqigTxbzMNBgx6cFX3tCJ1Hpz897N2u0Su4ILKXKf0Nh1QWmltQlO6shlppXOiagsFXCSkaaua6NLkkAhKhMCE2DdbllCpnWbokKyxlRZaXsHNyatdUrcdj4BlSbLRyOlKSosdYtWytxG1xv5NQqQKOToQ2gTKFFax7wqqiLbSPTeQVfXmytk8QmClhYErcqskpBiDCrP2yYMjTED8D7aShc31OS48G1USg8zKyhQqoyhaaJzED2CKoupLof5NOerhy87xthGSABlVhSVcNRgHXJKnLrQdeS8cGIH3aZro6Siytqu++LxaUZZEmhiFOCiVDoDpYW7BJAAojYaCGIXMqMzwz512xYEyW29QOy8cykGF1MEAUWgFOnASaLUTlofslyNp9VkPBuVuVVqcXmxXG67kFznUWRex62Nou35RlRKOjpKKXQtWK0zGwFIo0ez9T461BGIgBufW50VOWkjwinGXOtkulwhh7h1m6uNNQ0pTUbxsChJGe8iEPjEnffrbR04GKOUMBEgRJAUIxiFeVECGETyIbQhKkU6zwBRa+N8WK4WWdYF26pB7lIAwFsnd7ch+M75LnRN7X0AVJkth0XZUBtcWseVa7qkM1E4wGy9cahSJEawECTPNClRmiBx18XEnBshyhgwLwYeupgwxlBUZfKubXzdhmpQVeUwxpRiIKUk7vnMa0HG74AaJABC6VEREu3sRgxAyAICzK9ZhexRwDUfgR1oQQQEtVO6YC9h6d+Q9ojnms2gyC7daCf62MmYrvXksCc4fWQO7hRCcO11Q0BgEJRdQnNPp0QYr/kV7k9zD0V2B5WdbAoRruu37EBUf6J9jWigvUamV1r1KRawVxnB73jzcB+wsGv1TjCPCCKEBH1iA2KfI36tstlRuuuW9auJ/Q5sH6mNtNdY9U/B2Avz971PAD2N2h1ob8CifXroTqPUp0MTIu5J0Oscqmuxj+z+434JIdDHZ++7R/YeNxa4vrg7iVMfZbu7xr3Gh/bXtt/Y36dZ7IAgiCRB7GmW9L4JRdJHWUMv6iIRACLEXaz2riAb7t4UXuvWeo6EKH3+d48L+7XTXpn0uoTcDhVVmWXEm/duvHw0l+B/+6svjm/f//o3Pvjisy+fPfmyyMvhsDo/fTWcmZs3J6fnp8v1ZnJYHh2M5p8/H1oa3x589w/f1lZW5+AZcqN92zq3BUyZLS3YQT5CcKv1siwPZpOj1WUnlIxNEghUxsm17UXr4+HJDFPkTlbtedusWrfppB6Pj+abjU441pnk8jd+716I66uL4OJ2PLqzflX/4ue/mIzHo+OxcKzdenO6ZB/Kw0HImpSU87R8daV1vo2L6TCODwZdxyyYErlAygwWDY8z+cnz79+6XSmV8txuVy0ISPAJKHluyZnMAmpjdEqcAtYbIHN3WNw6ww1Ae/9utakcbp5DgNF4evHRRzaZpt4+nG9v3BwfHBxonTcpkcnazaLZXrHegLi4WX36g09SMgagLIcsWnw7nRhshf3oanVaHY26trs6W6y3AcAkiJOBbmuhjkNIPm59LUc3jzSm2umz8xSiCskHpuCiAn92dgnRKFFt46cx6dIrgaarQ5BuGXJL9bpNrKvp2OhsevMguvli0ybHOol3rtvCi2bbboJSrqyKhGmzacV1dXC5zq+u1kZDIu7qrbTrlDLhMBhmHaRhVTTuSmUUfWShw4PZpnYHkxvIuKrDtKgkhfXqtPabO4eDi/VmPD38yle+969+9P3ZoSYdXLe9eeP20eB21y3M2BK5UVbevPnGjz9a6YcwHOZb101ns9DycFZNB9PzzdxT3nbdYvVKbJ5XBa5929WE+OjTL4kn86vL2WGedPfG24cpxbfvuvMLxx3XW/jVby9MDLNZvht1GAFFKSIi4R6pIMeEOzmjRGFM3IuKEJVVBMIKCREiJ59iCFFYBEAr1bvUACgmBkgsgkAIRETSJx9pJRE0oVIEgFqpfhAzStPODq2ABBCUUpwAEYDQR6eVAtDYC3CElFIAGDmmkJgTpMSKqMfyRDvHFkIMkUEJEEMU4RBCTIxaCxIwC1IiEUm+8YiQBADQaKNIFcZopQQkCXDiGKNGTCl1IaLSRCDeG8NBMDI7F3wISmnLqJRJCYHRaJXbAnVGpIy1KMIikVMECYBBWCQpvdNBIqreXQfCO93qLgaORQiECShJHwKehAUUiPTiKiQURZqQQnLMolUf6a2RCJEQhDgxC0Cf8612VxZ3teL7YnBCwIDaWMO9jghEBDgSUA+uIggoRaQKm2Xa4i7xiVNix1FrrUilmPq/QURk6jWd+2gkQqS+9iWREoBdXVYEq7PdNImYuNcxQ68n6mVASeIOL/VpTMIgwLybUiRwfwRAYI47J3m/nyMgkBBJhJl3CrIknFJEgfR6wgXElA/0QVYhhnbLq7bmEO5+MMkD4bpmhk8ePd5cXoUQNXs9LENDL59v3Cy7cVCiErRsMgUD41X5Yl4/eAM2l5eoRlV12G0vj2/d/oPy8KPPzrXUX3swAzAffnBn/vBjbmBTa14I5fTyUbM5f3r/67PVr56pwzvZvXU+GLxafPabX/1oMAiz41THcHCENqf5fPH0058WVG3X5uZocPb04uQg/8o33uKhvVpcLF+uP/7+j+afPa+8soUuB9npo1M6X2mt6/PnIpyXprlia4u8GnCtG9RFEVkSJBiNpxar1WY9rCwmQaDtctXlhpUjUVlOqKTMrPgscsqtCr4l2CRJBP5wNMaQUAVlYFvPrS1tNTCCoLXKB6F1jUeVKdYFgzg2m2WdF9nl2cW/+Is/u//WmwcHq9jRjdvD07l78vmTddd8/vTFX7/31tFX7unB4NVl0saPb94apeyTv/zz+2/P/p2/9231//nZUPjFqougI0qZkwYqDD84Lu7fm6y36+3pBjdNty2LkcTLiy++/6cvv/ximPN4drTZ1tUbX33jT/6DxSe/Pvvyv5vmeHD7yJQVqeHswddezbv1Vdmun6duTs5tXlyF8+dHo/H5F+HXf/GlzkUPim3bbRZzs7FoGbhjn5CM1YiKtc3Xm64oR826wyTj2eRgqkPnko7CsFqurRmul7GLQbBrg8819sElHUsxKzbbxppMW6NId6zWzr3atjWY8TQ/vlXa5bZZNZsmDKs8hlYp8957t84ut+frF3/tTz68mF8cXBybatKucXG6GpaFptx3bRdjbJzWmdLGR8oYRcS5ZIwmA33+CQDkRheTittw6/Bws2q/+OxlXMRifLGZL/NCbVqHChF5OM604lFVbRZ+fDAkFY9vHI0OD2/dGf7sz358++3D0cm0degEiqGNLQmTIkyxQzJZljEMIsfVujU6I7CJsGnr1WKdxGP0s6PBirO//Sfv/5//4//y+P7N27cP1hebXDJmMUptW0/GBu+1UGW0BO8VbNuQQK3bToN0IaokClkzKFNkJo+NL4dmtbhE2uaHjsqjwURhOJcUgF0KqXPt9ODgar6c3LhrNJKyIbDSha833nFe2NYFgyzIrWuEExrDLmxdE1IrpFMy+WC42dQxBqUJuFO6AjQuRcrU5eLcmuLu3VvPHz9b1ZsiQnBhdjD5zUcf1b4+un2v20Qfo/PtoCpijIgpN+Hr3/mrElnWl6VRC8/bJgAoH7zQeLW+pGLWuU6UjEmhql+++BSUfuNrX//04/qP/s7f++gHPygm7ztqxrHbfvLzhz//+d/7N/4kCmNuRgg///zzfEY33jzmzl2e+YPD2ft/4/cPhjfbq/XTT79sTl/8u/+jP3z8//5cCeccfvrDn3zz977VODk6/pr3ndK4XazJ5NW03LRR6zIv8nyQ+cZlRelZbE6jg7ziY43o63U5Bd+FzFb5ePaLv/wpQBVM2i7OIHKTaHzzTpZn9daNBqNN3RDmGNgWRXTtw6cfF+NyMh3HZJAg+uC2bpOl8SAPrX/18jlxKrY12aLeXBZ51nWB8tH6apEHfOcb33z4059Egdnbd67azbZenty8ZaFpXbRZjgqVqUCZ+XI51jpzdbtqZLXuktOV7T8FbdtJFB+jIBMBIQEpQGV1ZoytytLkuTASozWVrdetax17kuRdk1vbNm1o62k1mQ3Gl81qt4EGYlEbTV5AmSx1XWGIFFEMHDoEk7ZunBFp7dzWaALRITDprOGOXQMcXYiDYoyifLsx2CrpdOdS05mUGEijmjdtrXWX4oiM6hyJH+RaoxTDKiGWWQ6qMyUntxkUGUSXNk1mysQ6RQqOo2fwQjrmhTadb5tWgQZwzm1yNQpJr0JiSApJFxZCTRwH2aDttk3bdpLQiITkPVtNMSXvtt6J0phZ1CiFtVllADH4lkxmtEJS1lgRKYuCRaaD0apxiaOPoXFtYkHAqsyJCZKQopRUEPKQElExzI8HBl3TrFcXLjWta0LMMjMYVl3dxJDmy7Uq8jJLqXHonWLonM/G+cAObGZd5OW2XS/aJKRTGE7zfDgQqzqFosUnVhqqaZ4NSAkHrzngMnZZoiEYCuy4Zh+MzZb1mpQKwafotdJERMqSNsHXzOCjy7AAoBi9Fq1IW6LIrLUhAIkhMrtuG6Hs/Jq5HI5neXFgaZzHdTGsa70+e7UllQ3Hs5PDW36zak5fEZphUSKHRgQleh+0wWFZ+BSUNuyC8zGjvEsQfCtolAIQca1LCEAYXPAhZta2wQsAKVu3PsFGaY2CnABQlDV7GrFPltmLRXZkQrDPi3md+gOAfe15BNkpbF47gfawYKfQufaLgQBgv7bfy3f2ZinZbXn2vqr9Mn8nr9kDi9/9uj7eTlMC+Lrt16irP5jsPWjXpqrdw+5ru9MO7Oy5VF9sS/b+NHXdDb+jt9progQAuP9Dkv0DaN/216HOu5Rm3K1lcJeisHdFQR+W1JedB0Lp40r7riDcu7xwf7AeYu3IHlz39HVkUY+Hfkcatc9zAOirDe8LvO0uJPVyo57o7aFdL2vqr98+PUp24qedPqjPIWGRfcDQzoKyS5uCPYoCAJSeZe0MZbIrjrdXXf2u7bHnRQQgCLu9ceqTSEWj9FKo/v1519W9sXHPE/l3Tp0QlaKEDPg6yry/SHsrwp5p7huwQ0Wjg+mvP35M05vrbZczbVp6/OLTo+Fbb77/vZen6/l6267mx8dZkfHly+eFxperi0l189Vnj7Bb1an+vW995ezliyIbS8qi921wCEFb1AY0JgFSKjMiXedQx7p5AUWpMLq4Gk1uzRc+psbqrCrGoe2SQGSNoMflsQHr3blSIxrqcVlis23bdVaYFA0dKaRx15r7Byfpsn728uLD44PgIyuvNBubar9698E3nrz40lYH5Y1hpYvw4okSWi+7xi1GoxHmeTEstsuL4Y3cSJvEZ5o5xDZ1yKHUQ0VaWj8wlVLInQyH06aVweh2YabG2oOju1fPf/TOV24kAHTL0m+7tq1UPiHdbV4ezO5sUL28ikGqOlUHsxODq7pZ371x3G7nACGBQyUO0nbbgdZMJoovsoPLy6X45vatewqXXbfRmQVjTk5GbS1Xl5t265frNBscWuSRxUwPHz5fvnPvsBzf12o2mh1u5k8Fw+RgFNrzw1tjyGVQHLqni/l6VY54Nsudi6tO0BHHXMsoBZ/nZlzqtlm3vs2ysnaejZkvPcDRw4cvYjCDAnzTlJXVkK1TENYimjvu4twzxCTDnJTRBwfH88USC49BkZPZ8Kazed1u8+JICAIHD6hynerlanHlIHlNXz4+Vyzh4uL89F9dXW42rnjj/vFwMEqiV92ZlWB0YbUBdhpV1rSgeBXSHJemPCpUtVolo1KlB9GB+LRdLsVOUvTbi+cc/cnRjcXWO90d3qjYCLSyWUYDbqrji/nKlsODOwddrpurxcGd8W6Uoz4cBlW/uGZ2IcQoLCyAKTJzAoTIrElZQ33NzATAHEOMzAkFkIiIjNF96HUfcrTLxiESgV3BKRAUsYq0tgoxAWjdp1uLVogM/S6FIGiTKcREIsIhRlQE+5xkozUIgyRJkphZogAgSUoxIRileCdEYiQCwMQspDhyjCGkFIVBhIhyY1zoSFNiDrEXTKncaq0gy/rMagQkZGCSXCuGPmtHBNhqTSAIyYfkIyeJSpFVmkx/sgCA1pgizwVIKwXEiROAAAcQJox9vS9OCUn1oluUXpoMIqCQrotLoiD3s0UK/YDGwNhHQfU6VcC9eIeURiLMjVWoejGWpKQRhUSR6kXBhMTChIiKYkoxpX6Sstb2Xr4QEgOLCGnFMfWRDYl5N6UwM0dCTLuxFQAoxgQa+5xpJBIRZuHeNwgCiCGFDBFQS7/Bw8IMHEUTRkm/o0aCEKNRmnaa5t7oTMwBBIioL9bWO/UAkbB3LDJA7LOQdipXEZFERDvJsQiixBS5d7qxiIgidS0mbusm+jQYzCZHkxf1l6PxoPWRYmKMxWGRldVqvkiF8SluY1wta2CM1ryo00UMfB6++cbNccZmkOZOA0PXoAri5ov1vBlOMuJQFYPDQVMV7RtfPwQ79l3M35tOprPz0+7gos2mx2dfbHi+3rx6jsBnz07Pv/8LR6DG8eaD4jefLmvM1j6NpmZ2qGAT6kUdZVNfEgjdeav6w9+7OxqXy1ePVJIwv3j28VOqpSr02aVXIc5GRWjrjVvNbk04xuWyS0kBw407x9ODWbuqQ/TFIJPkIbamJK29MSQG88Nqu95s69VohrPZ9PnjKwv5drtlSJTZsjioGxqXJbsGEmiUYlA4F4KPeZYRMMZgSQNjSpFQyf+Xqz8Jti3JzjOxtZa77/50t7+vb6LLyMzIDkCiBwiChWLPUpmVxILMykzFkUZlppFME5lMZpqwNNFEpUGxVCoTKU5EAih2IAGQSACZyCYyo29f/25/+t16s5YG+5wbQYWFxbtx3rlnu++9z3b3z///X4LWIogRo1xQlW+X8/X51VU8Onx2Wb64XCuGT15IAGWdQ0q++a3fYh+dlEtbOmxhsvaPnn5kZ+skHc4Xg2pV//zX7t8dn7778XRpcWGzUNtU2r/xnRtfzeDi9AUaY6JRku0os+9r9fT509I75eta9Hzm7v76L333v/5vWnNDd0310Z+kMbSds0ENBoXeGed7UD1dDXLnF0sd02gva69OL5581jrVKUOxm15eOlAWwXbBd86Fltgoo8AGH6xKJC+MbQNIEkfm9KyM4xCCuMYmkQaOVosuG+RVVbXOKs2R4rJuichbHGMaxWlAbISenKxrNK0Kr9w7eLi/uyht8F0kwSEuWBKin/+F19/+8U+BC+UW+2myfr7WRdpcQDY2FOvbt/afvjjzdTXMzXoVjDImTpfr+dWiGh9k9apsuzpLYw7UuQ0qso7b0oKNhlHS1eVksjNfl7NlRcTrrjs9ncdGgzbI8tFHT37lW9+qylbFSRKRUkpJuLyqHv7ctyaTuO2UTqLLk+nu/qQuO0GP2EZGt13l89ioKImjyne2tXFikjxeNvXkRvH82aJxzWvfevB7f/T+R+9+lKH+/O0n+zef3Ltx+2/9r//2P/sf//vjG4dC5sNPn0/2d+yqk2C7rtUqirUhhYLIwsoorXW97hRSZDJNoIIJ3TqN8+A7NzvlkAeKFRnbNuvpzMQpKeUDpvmEwUQ68iLeujRPMUvYrU2ivEBo6yQukHQITRtqHWfAHEWDrmqTRNumUgnEUYqkPZphcety+iK4Kk7yxrlcJU3bKB2MUXFkqqr92WePLqaLmzdvpXl2efJ0UAwPjvfOTqfCcHZ5+dZrbyTJKKzKw1EkFMPOXpQfjHYG6YhUnJy9+GxVrr/65i9O1+dl2c1nZ7fu3qvr8md/8cc6H+s0czb++Cc/oUI9/Lk3/uB7P646cCaql61GryPAtrp7cHR28WxW+emFS9P90fBhff78KI8nD47Oktquz2/fulVB09TPxcQ+Mrcnr33/9/9g9yB++O1vBbuqlpa0HhUaIEx2imw0qqOKRRkgY1DFkbiAEtLhEIBAvDd5AH24/+p6fXVwe3R6cvLaX/25tnLeqo6DczjeOXCu6xpvIhXHpg02LyZ+5SFotIIsSZwkh4nJa6VDuaoHeTYcTn749o8fPvhaNhkECNYaHe8/eONwevJi2aGzVV3Gsw8eHX61MLR87dde/+hHbwPR4eGdy/lVmg+CD4NsmDGcP//kzqv3pzSYPlsc7G1mREqrzlsLgRQBAhM6b0lRamJMckpyr1ArZOeC2KKI8gyW6yBKWQcMQtooMoM0DtjoSKnAsSKFqMQHRkTkrou8g9AmSNh2vAYyJjOqXpYhoA9OR8jeCnM/q2EPSkUqSZwxTgidqy6WwG2oSsUUq6RsW9FRLBFJqlEiZcCA0iYejzzPuiQwaV0Yo6Gzcx+wsZ0KksbDmMQHSHQREmptl0ZotBbnOfR1HxSpMBnF3sOsWw2KiQIlBOneIIF8RdOrWVkHK0SxMiECrW2kIM/NqrERmh1UtmoHAzKZsswYRT4wxQlo7fs1YaJFsGZBCbZdOfbO2sAc6RhRA4dYKyRCli64LE5Lx6nRJoqM5246Z4yc5dOLUoDSYpDqJFJaA6jOgrR1Wy4WcwPGW0ijRFSST4b5mEyqlZPlovKtZyBSqEyc5iZKFEZsGYJtMXASqSwiALIahVJGE6E2ojQHBOpaCRCQsHH2cjF3Xaf6JF5RQQijlFi0iYEUBAliibRIsOLR6DiNUiTlOwkgIo3rALRj8thOVDg+Tg41fvrZyXy+DD6Mx/nh8UE+2olTEzioCOt2AZFyq7VrW/AsxC4AqcigYnZKmfWyLBsX2I2GoyxTSOht8B7atmtd10+ajdLggtE6BLaBNYlwiCLNIF1nv8RP5Hp1v1l8b8Q6W/aAyCJ9ykC/Pg8ACMhfSFlkS3yus6a3n7ZdkguqrUlok3HRi2Z69oHbKl+wlTbJNmMTN0EMW6vUtUoHrn1Im4P0pjj8gghtEj17Z5QAbEiJwMZ31m9Q9wffQDPq9UW4pU6beB0RxI2j6pqI9D/0jbiOQoKNbgi+xHKYiASkX9f0c9reGNavyXrSI9TX5IFNyaCevG0+b3uZUHpBFPOmi/2uO1yDna0vS66dbptZdC+Kwq0ra+M++xKM20qFNoqvrYVuEwF9ff43CxPZ7kNv1Tz9j73NbHuJZJMNTht2eF1Tr69SDbCtDLe5TwQVIApgEGQiIWECQQLVlzamrT+uB3u0bbb0O9Agm0wPZAQghCCI6jo7HOTauMbSA0X8/zOgRSb7nb/+V6d1q2z0+dvvJdnw5s3hz97/wf/+7//95x+8/+7V+2IiF+jqfD7SeHRj7/i7tx59Pl9efXLjXvGdX/p6HierxaSrlEZZ1fPxwe7ewQ4yAxrnwniwdzk9G46Igm46Sid79Xw1KGy9PF/UzsnB4f7IVu108WL3eHK1sKPJjSQbnp09GY3jKN6fruqyCRHUyoamCyJGANIs2ovlL/7sxfFgN4+im4cH77zzwc3JcTpsF242inO0/PKznwoESIq6KYPm4eS4Yx5PkJcU2EbQqK4x9cpg8vxkPR6NvPYacbA77uxSUxQlgxBWwdWC0HawM/56mh/cvPPt+dk70l29ePT9pnxxlGUJaa7rOCJDRWW1tX7/zp35at00893RAJGUSS2qOBvmaey6i2C74F2SwYuXL2YXc6CDF6f2/q3hjaPxYu3ysc6ywWz9EtF3XV02TotChsAhHZvjceyfu+ez9c6wuHkwYkETJ+P9SVW5IkFn11kcmVRfXZ1PhtFwkJbr1cXpiQuSTnKHprOh63wQHhbj+dWU4vTW8a354pwDTHaGOstVOvj0p58++vzF4yfT/cmhiQdOuXw37rrWOZsYEcYE1U46LP1cBe5K+/xkdvvGKDS2yMJ4Z4yRRVRaQ23bVcDx8KixvKrsIDbL0sYoOgQkS84e7u+vmYOT19+8f/p4HifjZHcHgJsAsaFZPTsqhgLibOfqhtG89bUbXdNWGDqUVVOa8bBBvrK2c01EkkT68Pj2xdIh860bx0+fvfjgs2ca1Gh/t21tlsU37t46e361O5Tjm8OPP60l6CKNqnpVZNCW5fZRgIJgne8jq73nEEIPf8QHQtRaO++10gjQB0WHEECQgQFBGUUkRMpojQJKKVEQAvfmWgLu44RYAgBoTQQISLTJxkMUUESRUoSgtArCW6ULCYNS5HzoI4SUMkiqDzhCEB/8RomjtPMOEQKyUjogsQhIABF2gSEAUBBmbwOLk+CYQUKq4j4amQMzh37AM1or0lqhIUVIhCSALAEQgRRLr5U0iAiCjCDMtnMMoJVmhiCCikQghCBIiCQcFKHnIIzMwQffG+tEmBB63dMXmX8IfcAQKcUcgHr5jBiMCMkFaz0H9pqUEpQgFBGwgIbAARGQFPBmBGWSXp1EvY03gCLT7wz0IA+JZLvzQJsxUZSmEID60m3Cnj2zBCJhASQKnogQqLeAIargPSna6IsBnfeBgwIMwERakULq2T8EEUM6BPbogveajAh7ZkTqK5ghUhD2HPpqawx9RDoLsAAEEe8dCIBCYEEQAkObFCoGASQVgidSgKAUkWwGfREmJOlvhl5KK9zvgiCRVjrwpjRsOk672q7Lde3WeaY723atB208EWdEBStPlJv8Rprf2lkune+cCYEbnq+bykftS05siUGvrAvKfPJien9vkqR6XTXapLBs7+4PXnktDV2dZ1cO7eWzKTS+nWOW08GwEsXmCMNokI722vmydeXcN5ilV1Uon9o4y0sXWEzFbJqgWqjb8ta90WB38NzP4jw6bcqTq+dHB29dnV38m9/7UUImzcL+btFWXTLZV7uDuqvzdDhvq/WsK/Ldk/O11lgvymnb7mRJmiar+TxKsVyFyeGeMWlTttPZdLK/s1o7EZDKt3bFZBrvy7LxXXl8eKOuPHNUrddAEqeJ9d16vVakhFS/r1TNFyYrwOLq5amJi/GNe6tVffb0GWNbh264W3zywcedbU6vlgEgH0bdvGTvdu8efuOXv/Kv/tm7q8/ej7PoldfvHtw8sJfPp08ezxfr2obb94umXYXGZiPsbH3nKK5qnJXw2u0kb+pvTjo3Ky8+rQeTvfRgdFV5E3dfuzf01eeRUvOL5SQ/mLzyhnrjVxsZuWbx9N1/lyQhK9R6uY5H6eBwYLKkWy4BDQfdrJpJHMzIpgezwZ34pz/qfvRZGO8RL60OsDsxGSUeYutj71BxzNIE4WrVvpoOA+7dePjttu4G95z3zaMPPlNcXj45v32wYxRVdX25Lj1KaqKKw7LqTBrtjIvzxpWNp4zqpqw6F0+KLI92dsYQyjzqTK4OdwZvv3Mxc2G96tx7HymCTx8/xdq/8eAG8Ojw/ld3H3zj4sqsLkMHxarhqgsm1sEHTVi1XWzixslqVSHizs6g6po4jsBstHXHd/ZXVVeuXJwlO7cOg1J/9Kd/+fxktbeXWQaKY4W0tze+aNpZ1UyXy1t3Dqp1neXFYJDMztciISJslq6r2nQYHRwdrFfLsqzzPBKxpKPbhzc+P3mZETsHKoo8s/KOPZVdw6qdzq8SE98cH/617w5XF08xMo7ks2dn33YIyVFlxs/Wzd39odZRlBDXIBACYRRpcVyHAJ0dpybW5Fk0wXCUjXfGV2fTVKfNumq9jQvqXLWTyLrzaZYhjy8vZ8OJGowK64PW2rcrJQmaaDwuQteheFTKem+SnAAk2KZaK5LZcn7z8Csi7D0bE3sPWVGQCAsRJsRytZibiNh7QZgM99DzfDX3TWd0fnU5Pz05CRp2d3fjJC6XJVjGIPPpUpM6PD4+uTz/5q/8lScffyp1m2bR84vTe2++5Vuf5PHV1Wyyk3zzF37z/Pmnn773zv2v3W8UGJURYNOd/uZv//JP3v5p2kwnRRyNRy6F+frsajX9O3//P8egH3/20cDpg9cfZnlaW97d32/m/Na3fuW1r956fjkdoW9n1fNnK5dRnTz87NH7WnUJyc/9+i+cP1/89ON//V/+F/+rn/3gzw93bsxKmxc7EkJksvVqjghdaxEhLxITJ8yBgULrBJQxEDC0ZSVqkCTF5IbUz7rOFacXF//uez/8xitvMTSD3YHS3voQKKp9l7gg2ASWnYNxMkiUwaCFGZgBRVwn9eUCBX3oar/65i98i206na1uvnJkvH752aPEROLqfDweHA+N7Njh/lC5k/nFy06S9JAgfXZ+OdwZLxfzWzfuOj6fvnw2v7pSaeGw2L//hok3wDQdjuPM82Ltwduujg1EpOPIxIY0Bu+7wGBD0ALCwSMHBBXFLKA0us567tI8NYnp2g4dxlqDs8zgEJquNSbRhJrQCdSNBcDGrSJtMDbehRA4Ilyv1kkSaSRiLij3URpUaH0InQ+1BWfFOwbryiZF4yQwABNP0iGBuMBtYz0BRmh1a4xmcgiCoD0jQ6QoliAKTVNFDTtPUaSEQ6mMV6xEJATuWnKAbcWxT3cmo9mqOTzc9yJYNlx3J9OlFzLEznYgHgVyY3SiBdBoiNMon+wgaBOCTasArvJCJpaglIoArHUBECx7QlKA1rmWrXYqUTqAaBWhUaRTtsvGVey1Bmo6bqG1jrPIxCoJXhaNLcum7awg7e/uJVk6SJNgKdovBsLVomzL9cJepomGLAYwaaHygodD6kJNhia7hkJSdpimyiSiIu9cCxaBsiSwBIsSC4QQgiOJYgQRA2RYiw8dBwFsrW261nnXtNaQimPj2QsI6oQVCVIbAopQaGMddbYhNLHWOom0UYVOutoH8Z5dU3ccmIpx11az2aVS4NZX09nVYrkaT0YHe4PMaOdaidRgf7cDWS+4bUIyIAw+T2Ih5XwHoGvbaZA4Buuc4xAbbbQm0sxgrRWgfjfRkArs0MRotI5Ia/QCjbXsXDBqgzquGcEG9QB9CRv0kAW3ZifCL6QnAoC8kQP1b+s5zFaysjUC0bY4ek9beuizzRRi2cpBUKDPq/myzGQ7WRZA2uqWNn98qU7ZVr0jG6nTpnVf9kZtpr8bQNWndG8sS7T1tvG2ZtqWb4kAbMDENnQbYGuckj6gCa8DhngL12QLXAA3deL6BAwUpO0O6ebs9ieFYPOOPieo5x2CIMi0kfP0Jg+EvtjO5vg95UHBDfz6QoWFm1NDdA14+ldkY0Dru7GVZW36hQjCW6gnGyXWJqQbr22DG3q1gW0IKMDcF0/blJTpr7MI0MawB8iwtZch9taFnjb1XeubghudmSAEQQESQiEUItAKNPXyq36+v7me3JvvvpBCQR/BRIJ9Rze0sxerbeRPW8VTr5H64htwjYreeHgLnbz6tVfoa2/+hyT+s+/9ZMegQveP/h//x9/+zd/42s8/+Kf/9A8GRZbGxd7t4ydPXz5+/73dkfq1X3v1zV++dfXs5XrWJVE6nOw3JUeUkQi4oVHofcNc2bY53Dt2fJkNb3RhSFiMB8ni/MdxpLWOZ2u/qmauBhWGdtaqrgY+Z16OjO/qclkvI2N2I5XooI0nk0RmrKSurMtTOhqlGaimabLh5Ne/+5/+m3//e79w76vjSXF1fjaYjD786Wfj3WPK1WQwtB10TfPhk5cH+/kr9+4MinB28iRO3SgbTGdloQaaI00UEAMbTbpum8uyzlUyTAqb4Ve/+/OhGlfSPv38D6mZqYCK1WA4qLtuffnZq0cHdd0Nbz48uvXLz97+UeqbbAxGdXGUzZta100y0oAEwUamaHXhuXp+emLNyGeTNL09gFXtZhfz2TAdXE2v0jRdrUoInQves9sZj2JNHa9nV2WUxrsD+ehZ2bQhL/YSxuNEnV8u0ywr/UK8QgkmMglRXcqLFwulTRzFkxjGu6qIRZNU9TJKo8n+UFMAn6Q49mmbmEg8aCqQce8gRz9cX2Bbk0ri+bpLcyROlMY4iooIjLKr+jlBpUCzyM07uXfCIekCTiJyHHSUNF0NRiX5OI2SBlUWVZFOD0d4eflYJXYwTAgSH3wWQz4ePX36vMiy3TSBjJbLlhVTnCaKhBJluIiI9Ojpk8ujvclVWRoXBoP45eeP7GiRDVLKB4d3jlzb+a4JpJUhCtXJi3NxiRFM8iIbTcrV6fqy025W183d27tCkO2ne3n29ftx/MwGBX/77/3GP/4nP4aeHQQGBOucs67HwkGCYhQGoo35rP9SWWclBE3IQqSQSAMRGdVDn21xgb6oVz9OgAIgpUWEMIiQVhugzRw8sw9BASndm2VZKVREpFQfPROEiYlFQgDRQaB/W8A+hxshiHfe+wDYC8UBkAgFWXoJSb/XISABFYJwjyUibTQqAkIi6x0H6S1yGpRGBSw+CJreSYdKCwtw8NY55wMAK1LWs3N+Y7XjAAAMfbQTCAtpBUAC4oK3/enaWMpYiAMHz462g1gQJkCjNBEi9KnVG4UybULnwHPwwfc6qX7YVUr3Pq++CCgIiZCIeB8Cg0DQihCV97Yf6frLysKIRIR9eKF3fR1fYpE+GaoPeAp9rLhHRtakhcV63kqJWVic86Ck34Lo9boMQoihVyFjIAJt+ikACVGwDgACi+f+TrPe+35cJhCtFLsAgDYEAFREaFQQIUQg9H0AdWDoy2qiUkqJQOCgtQGQjV6XUCli6d3RILKhQ4gowogYvEPqZUaC1Keqh2sD2o3bh6t1tZy3AVzwVjTnkdaJQeFysWqunFJxFwRNBPtRPoqAKVZQpNnqqplO7fKqnc1kEKlJjoM0+fT52fHuxKFPdIhwdXRcvPzZB8FdHd/1i4/Oo8ODyX720391Ghr15uu74wFdTU8t06rGxhyKEBjYuxmfdS4xyfPLtgRIJlikogcKDEvSUhqfVHUzX8FIX64aa8x3vvrmH/7+zz59+6UGnaUoAcrGTcZjTCclJTOIhnFSTHZdXLVgB2Oo5henVfXuRyfHebw/TvNBlEdushfXtRfuIM0pP5qX4nGgcrOoQ2L0YJQvl+eWdL5zOF2JDFRkorpuh+NM0DMoRLValelgqNNUqShYw0Rd181tKJdXULZPHj2+ePkyBFeMiqhIlpdz50LtgVjWa5/lxZtffetk0V68SO7eerAsl5KZ+WJ+8vJRdXV282BysLMXD3cne6Pl/GL3TvL47PTKdZFTfhUmNrx+FO+NdI5lk9hoNOnYjncmR3tv/f7v/1FqBorbqgu3Xt2Zz8LZrP768UN21rgVVKeL2Xx+1WqVShuWa6uzwWGSLB69n9rF4atH1NWzs7PnP3t3fzwgwwtQT85ag7Q/NPM65BwUCImR1pPmoFhHUIzzmqlaX569/8cBzO03XhUyD3/557X38OnH686ePnmOTpRiSxQFm8caBvnKSrdu0jiukJl9upvfuxmvGxkMzMXJZVfOsyLKRunHnz8y6aQY6Mt5BUK7mYnieLQ/ljRLisHOrUl8kEUQHe6PZk9+BlpRltQ2AFMaJetFPU4jSRKHjdhQDBUFXdddL6oHgGefPLlzY7+15bsfXUTBB2DneTQwAjJdNSbSDx/cCE1QMdUd/Yt/95f/2//6P/eL5apuV+sGbTyvy4lVu5Po4HgyX/MoKhp/OUjTYZHX1cp3dL5Y63joQpvECoDyPEIQZh4Nxo9fPEtHt28dDF8+P4uz4a23HvgsfPbP/uyDdz46/gdwvLN/fPTak2cf/vov/tKstqv1EowbDZPT6nJR14Dae0bE1gVUEpCaDsbDoQ8hGQ2ow2yw41d1lseKLbRlMTzqypkQ3nn4yvnlZSZqvp4PEsqywXreJCNO00HH3PngOocoepCJ1sDBOZeOdibRcF2utQmGtLXrNMkHw8HV5QkDA9GDr3/zxckzkmgnL54/PY9iFVwQsemQRLmW7aKpXvvKt8+m07KpQ9mlUW6t9UBGR08fn4zyg65aD8ZFcTQ5OzkNXWMvLozKsqzAwSgEPr+6iPK0LOv5VWddqyi+uGivXs5eZH854OXL9x8NiwMahuz44O0f/uhqarHBqa33bu3P1+7j508pMj/66U9/7Ve/1Tqe7B8UeTYup19//cGP//j7xdHgvc8/X/7Fv3/x+dkwznA4evnCzUu4WC9DZH/2wU/uvfENSpTKsq6s17Y0WRzFie9C1zplTBTF3jlQSqkYHELo8lEY7OjpZXt1dX764kNlzOnpaXZwcHI6/ezD3/t7f/2vzi6b0e4hKGpXVZFl6TDp4+1a5xExipPad0Bqd2+4nF3pOEsitZzNXnnj9U8+fBF7u7q4TAc7nRMb4OjO7XL2BDh78Xxetc3Nr92e+vHTj54WhWqWjYrUuq5B2/E4H6V5Pb/gehbnRqY0PV/kRxNQ0fzyov8WfOX1B4tFc+82XkyvpovZet3kxSCK8zRPUqWILQQGwMCBxXWdd97GJkURsU6Eq7oaFhGIt23XtQxGKfLsXGR0rEzjggcE13bO+hBMYgI4BmrKNgTUpANJniRxTEaZrraVb5xInMW5MRHwcrVuV0tnPUZakWYABkegjCbXzQYYQRNCa6IRjHZ3auWTtKBMt64X6pphMY7FKM61V/OqsdSVotKIUwOJtpmKvONla7sWLGPTehDV1G2aJBGEiF2c4OW8Wczd3IW8iMbjtMgZWMAzkkPkOE5FiNjGkfYc4mGyaqiqO/DBl1UWJ7EmQY2KIkWKASR4xEhFiBoYDBKBttaKbzVxouOAijAWEN/4VIEEnM4aZ7mtWwA1ygbZOB7vDSWwMoJkWlIaozgBAxADOA7sQEVmfz+TqGa/RrERmXgUCg2l1caQVzbwOtigsHDcBOkSgwG11iaNExQkbjViaBZxNKJYEanS2i5IY7vWCQMSgZWQRTrSiaMEUANSjNy2JTGBEGAkqIhUjJpQBeYAFFA5gMBMAtVqDd63Sbeql2QbH7q+PEwQQAptWwUWCna4O2wSrNuOa5uNMtc0sTFBpAnO+QBRZDsrQo70aDSJ8qFzTdfYECBQ6Lgj0BhAiahYTBprhaQI2Rsky9Q6652LTfQFKNpIfbYUYWuQQtzUI+4hyUa5I9BnA18nCW2dVxvxhmw5wBdIgrbqFNl4s2SbJyPcA4kNpZKtpOhaHwMCvW8Jt3qVbeYRAGwjk0E2LdiqRDa6lh5n9QSod7j1heC3SEs2tU+unW2ASL0WBrbqky3VkWuysPXkbaKDtk3uc4Rk434DwU3AE2K/ntkIrDYdoWuCtYFphBtGJdKX/d1WPMMeoclWLgWAhCibel60NW4h4TUT6X2DQEhfaIeuzx9sYq43zA02S6ftKYKNyGorvNnQMUDaYBfcRDAJ4iZPCED1C5brC75BZrJJIN+8tj2FWwi24Vj92eptbCBMqFAIUYEoFLUpngYk8EXEKwAC4SYwa3Oz9SuBrUIOpD+jgMhwHVRFG10TbDHZ9qJuUNHBJK8varNuKm9/97/633z3F3/pvb/80fPHL2C+/PDDH9UVfuXukQvy6bPp4+cLdovdw/S/+vu/tTOqq84OJgeR0a6eFUVYV3MyJktH1hnram2IMa5tZ+JYQoeoU2275aXtwmA0nM8/Wten6fjn15fTnZ2H4/Heony+e1B0lZ1fLa23wpaMJIVybdPUQgT5YHc175Qtmb12NI5hfbVM073HnzwtV+XNo53Tp4/0aVCKP736+MbhAcWNKYb1atm0+vbtVw52b7337jvv/fST3V2TF6jU5E//8kNG843Xb5bNZWq6mS1LqyZxosSMs/ze8WsfffDT3Iyr+Wk3f6rTOBKOhtBV7vN3n44Odgfjyb27P6f81cFu0dn20UfvxJRcvnj58JVRrWOlTTEoFBIBKwW+4XxYFDAMa2+awWB8FKXZ8qJ+9ebOajqPIm1yzaiuqnWxN24XZJnv33twdfZSo4sM7U5ySvRkDzp0Jy/XJ+fq8Oa+GYwCt3EaZakYZcRrQBnuDLjD1dwVxQhh0S4X1EWd53iU7e8PlCTz9TzPTOjCdH4BWsajUai71XrqbUd2Da7ay6H1NG8XN3YwU63VEBV5nOuqWhVKokju3Rh/9MmMWRMGg+LE1V24uGgS5c1o4K0LnSsi364WKhvsjbP1YsEcgi+7jpIoiaLo4vwyQo2BRVKWYlX70NadrUwSoFpl2ozybL06KXbzeoUvTxav3//abL6CAJcXSw5R1S1ns5MsH8wun3LQe7uTwWSnnK8pwYOjo3fefdK2eFmVNo2L4Wh9MVsvpEiKZx+vf/zJRWrSo10zn13sjKIQ4Ad//rP+W0BELBKYmblXLTKARhUphYA+eBe898F6C4geAbQRJlIYkQEAAlRIIGK0FhHpLUcgWikBCuxYhMUDAIEKmyLnIUAAYecDIQTU1kPPtY0xilBAWBiBvPcs3CcgIaD1TikFhCKsFQUU64P1ToAUUKwNAAgLS+BrGSspH5wmpZRhICMoIoa0IuO988KttcEHUloUWh+QMDa61w0FZthk82+fhb1WCMl621fMAMQ0S4n6IpPiHaMSbTQh+dAnOPVjQcAN5sYgfZh1L4/cRAP21IdI9UMLISqlwyZ4D0ghuA2GAwACinTUZwIF6KOyPYc+Z08A2HvxIRBij/h65zESYV8XQLAPlN6UHBABhMgYAuyLLvSPzETFITAgCrPywXnVuQ4BSaHW/cFBa419IBEAIgXvBMAHVoieGcH3AwUiBWbH7IJnCT6wCx6RDJFBBELnGQAZBJGcdSQatBIgoE1TCQGECElr3bMthaafrRilFCpB6UdAZvbM0l9CQBYhVCKMQAQqAPfxU/1f4XaV/PzjFZkoSfdHx5PZ9DzPotnlrK7XcYQ6SoRio2O2PhomrEAQbO290KqsLIS0YEIdWi0YPEljJc9Hz1/OXdUNU3Jn1UsafvCoikN1/DAhhrBcLZbV6hIvLvT8bP7Xf+f+4REtl6ddF7/z4fkgTQUwP1J1Va26rtCxVtTYUFeMHg9uh699ezAv48s1Yiyu8+0lf/i0/cm733MnTCSplhh47yi/df9wPNwbHtzJjm62T18Oc1ovlo5irWIvbNIdHaUAdHJ+sV619moGj58/eLDeKY7T0Z26TMd3Hhzs5h99/GHbyfHxgRH68bs/TXJv6yZrJNjURfnhaO/88YcyX2RF5Cx6gHpVzsvHk+PD1aLUWWwi1Tbt6dllE6BFFnEWazLkXN1drlKlXOD9O/vry3nbIencuyKTNNfR5KsPnp49jnfS048fdy2Mbt4Q1Kgjzb6enjTl7NmTyxdna9WFROROQgejCJf1mpiGenxYrMqGMHB90a5Xh4Nkfzf+8N1pOhyhE+VF6qV69n1Zjd7//h8vPv48ycx6Ue0dxxQRDwZUDM4/fPf8w6dVffXgW69ePHu588rDKGrOTy93duGi86OhiclPRkECBrGIiKGPjLRaS5rgZDJ0XUhGiVjPtnzyyduO1dHtu3uHN++88er06uToQf7o0xeIeDlvQhwvLVsPLcA40cqQQGQGSTEquLb1fE5dV+zF2W56NV9QZCaj4awO1ao2SsdJJKCP9oqDwSBDlsWLT/9kxt9cY/pA6/XJhz9OOFBkwKKKMB9me15Wbbtq2/F+hKk+uVhlaeob3/lNuHse43w2++TjR2uM7x5P2rIj5Ns3xo2TFUvTuPc+eP67f+N3fvDJR1rFLy+ns1m5e7h7/vJsZ2fv5cX09W9/Zf7ysUIjyErjdPqiqi91NNQ4HGRx2dS2W3srg2JUtYsiKxAxuNBWrdGJXXXLevnq/f0ojtHQ9GxxtHv427/9zd//gx/8w//z/2l/7+hPv//hIDP/n9/719/89rdNPvLptF1M4zzxdZnqfpaJcURNY6NYmRxjo5t1GY8mXceKkiwfA4oNuLic5mrga0cIXV3ujEdIMByN0DNgZxLxXembzLVNbFDladM1wVM/qz+8cWc6u9JRbGJUKmEfsmwUgmtDbdLYeVs1jQ9+NBzU1Wq+rknrtmyauqnaFWOwrVvVTTIYXc4uhX1qFBcZMEa56bwXDuvqcnd8Z/byzFtvJnmk9MMHrwdr5tN1YQZxnHlXC4dipxABty5NEtFgVCTjrDA/+8kfBO+aldq9dRgu1l3olovZaH/n+GDv5dvv7e3F+UEy3CmerZe3j46Xs1XTuO/9+3+zS79aVtXbf/kiy9IQrVQ0ff78xd2vvX53dPNH7//kYDyZDBw1yeXVi7/xd/+e89YgxZEaHd25OD81hrz3nj0zs3UBGts6k8WGcL2cV6tzU6C33eJytVyutWs1ya/80jc+eP/daD997+RpKGs1HDW1JSWCFKyfXp3HUZ4WRXDBO8cBtDHW4XpRh9YlsV539cXlSWcb23KSFrs3dkyaa2PqtSPiYu/oorxswOvjNzu9AwjFJIm0DKKkLlf5IKYosm1ddlLspIdv3l9enh+C65x2wWo9ipO8/xbMp5fBBTRRrGmQpEYngQWViMLGd9AFQAaRql73MUaE4F0QEQIGQa0TAlQSiJl9AGUANQs7653iSkQ8YmeD60ysrG1JRf2SWKm+vCqRTmrXkjfO2rKzOtIpSGHALtfGWRHIs8hKQMIgIEDMgb2LYm205oCu8oiCZTuYRCiIpERxhGhIs3AUmZR2ltOz1q4D+qLIRkXiXOtcKF1nKFOUkHExIAZ0lQX2wIrr5XiUnb1cTeflxdwumDqNGtIsoog4WPY+ACNY4WAhsNE6juHyahGACEPjOkKsbeO9DiIqoixJFKJzHkViijQmVbsO7EU6TYqUjEeDLE7i0ai0XJWO183sYrauurLlqvFFke2M8/3dscmE2UmQpnZZFCeA7K0hwiQhDty2LGxSZOrEdBLb2HDHDWXGDJSt2PrGcjDaaDASKFJGJFapXkOpktwAUQihLoMLA507Z43JGcVj6GzDPhjQAUkrrQB6pYNG5ZmDd4Y9eB8ZU7YtEjJIABeZzBhFQBLFnWVNGo3y1kaRYWQJVXC1q23XteMiUQjVojKY+IDz2ZUP3e7+UcpuHGsbjO8U5elyWZnYWGdFACAysYnjBLVOI6MgECFGKqB4BIoSAmLn0jiP04hMHyXZZxJSHyfBLFXVwvU/vSRlaxbbLPi3i+7elSUbtYkIcF/iCvlLxjURhL4yzWYVjoQbbcrGo7QlPLjR6nzx7/Vr/EV8Dfbz8G3+kQBsgqB7GZBsWrm1v/XvwesfcVu+fhscBL3GB7a5QtLX3hKQbbOgn+eD4Cb+pq8Gj9JXIAYUDtu29e/5Uh22HjNtlUVAG0PbBg7RdYE0RNp4snoY9x+56DZ6LdjKjHhzqjfiqs1htuL+TUtkC4Jg46/akCigfhd742PoPR5b3rb5wM2J38iw+kiMvrP9WdoccEt/NpcCv3iZ+uWcbCnMtdqLN4forQnb1m+a/CW81r/YVzcDEAYFChmVkDAqIezvG1CICPwlYNfP52lzIyF+GatBbxJlZiDBPn+cUCRc96Jf0W1PwpdQUV1bEyWugbLqpldPbh7tvPpf/p1I13/yL/7wZLoCZZbLVePFem8Qf/XnX/3G1/YjmXVBW6bY5ATkfZhdfOyBdsb3xDdFNKibijSleSp+6EPnu6VJQgg20lQ75VISwlV1dngUAQ+C6y7t88g4QqjahSRpFk9sux5ksiqXobW+5SgxU/tcQaEwBOBiXDz82ugP/uCHt2j3xt1DH2B/sjO7XEIZddaOIAodr+x6Nz1YTK/2hkcnL95P8/HOUUY6d64tu5LWzrfRaGdsQ2WMH+fZbN0Fl7QCMSJS+PTlp7vHYxC/nl8Mk1HgioRns2p61WSDoWuSTuyZPXn9VuGkMxq/c//BD7/3H44PCx/K1q7Jt1Fi0NdKHDIgse0cKt21XWbS5bOng3xvMknr+XRxuqS5LxPIi0FTrzSSqypXLa84EESz5RpAkYq8U6niVw7SwzQ6vRQTwmI+u3087laLIt5N8qitvQ3c1s1k94a6aE8up223Otg3Qk5T1FnJh1G5qiO9o9Hl46issXRsSbxvmGccgoKgI//1t+79i3/7vlOiU8Ou2j8aFKNQV4thRsqi0sPzRWX1YNE0cST17GUx2Nm/ectw59t1A3WSJ1pljGS0uODnq3VMbJtmko+ViefLZV01ylPX2my/2BsMXEhQLLH32CVR5KvWUrMm7YLUNT15evnw6K5tFsWYksKEuZOqGWewBtG6FkflUl5erkUeFXk+B5skVMR8eTbV2ejRJ5VvbAbKNjCajMqybVz82it7x7czH+pgZTK8+dHnZ/23QGlFghh8b+ZCQhSIlBLui5ErZs+KNDMjBgCPqBQCUQAkxCAMwqp/JpKAAPUMGIRBiEiYqTcwQa8BhMDsw6Y+ujKGAwcE1JqoT24WF3qXkPI+EEGkjZegEIAUEhIpAAwC1rsQBFARwvVTt3/oMAcRBA4Cool6/aMCjBEBMHh2oXMiZdPawMKsFGjFSislSIENQf+IYwTuI/YQiZTSqvU2EjJRigh9PJNWxAG0iRAxxEIESpEEDr3XSzg4qwAEpXef9QY5BYhKSV/HHagP99dK9yJdAUKWEBgRWBgQhEiEfQiGVD/ObYYl5g2NwQ0c4+tSdCxKKRFGFqWUkj65nHtjORExByDVP66NNj1qCcT9sRAIkXsO3+dECbBC1Aa11sELbmoZgEIlIiyitO63TZTW/RjIwCF4EQ7snQ+d8yGEEJg2cXcCCN75HrQRoYgoon4/oL9FQvCaVD+piIxWpIg2YzYzw/ZDEFBC2ApcETaUTbjnTAIoECRwfzEBiAgFriugPXr/cu/wMKamPg/xXholxd5o0ERrhNbrThCjGFPmumxGgyLi8vnFxeTObYVqXi2zWO3vjopC1WVTX87PZu3hKHKZxkEcgd07GP9f/vu39/PhL98fDrK2RGtMMsrMziGvaz5t8ff/4vT1u+6NhxPe4ei0e3beWaMmOU0O99fzmeu8eMoGidPYepauGU9IDTI7HpzPl6E5vX0PLy58TblKmuOd+M5O4lddOhmPH9wbjvZYjc4aHo+PfTddN/Vk7wBJFxEuz22ssr2jYTLO0sLUi/L5kxclq/X51f0br7319V/40//wlx/+9EU5myeDWKZX+4d3besHO8l8cfny5cXOzt2TT97NX+pxZFrb7cb7xWjYurC0hdPJp9MSIdQXl7OX08Eos65dlw0SOe+Ws3K8N9A6TGer0SA1UTIxGZlVva4GXp+fPf9P/sbfPHn/xx/+6N15fR4I0cFoPDi8dxsDHt5Inr/z6cXJ5fOTq+m80YNkqHTmuzs70Z1xRA1WDUzntmo773i8A4sXz2/de/jj7smfv/3RXpYu54tqvSCIbjxI1p9+72KxVKsqwZAkBRRFqJr8xtjcvNXa5rgw754911Hy4Y+fNgrPHp3+tb/y5nvvvxMlg51BdBnBN+/sradzSILJsbMhM6Zet6FmCZRO8lXX5nGeZ2ndziz6ZJRFTpXzU+IOFPlQT4rozp0RaHr9a3ebiqtV6zxIlHjfgvhb+6OD23vlGu3VktlL6E4vZoe7iVZpUzrfibNumGdl7VzpqsDpCE8eP9dZ8jd/5zecHb84VfvHYNef7+4WT84uVnVIDI3jCECQOCA2Xegqr8gXaZSmurRWq82M6PHzS6668Wi4CuHJ5QwtS/DrGQwHxU5enNqlzuk3fvPn/uUP/uT985WX8P3v/fi3f+u7cYzaUIDG2y5J8jwff/zhx+n+7eO9CRFXq/WqWlhuNcfMECM26+VwlNvGetvFWawSUgxppO7ceODb1WicqCjxbWfi+NXXvnJn/+Ozs/n5yqokXnv+/MV5Wf75d7/xjb3h6PPTUwm8vzdarlod6aqxACpPEw5BtNx85bYLnCqVDjMlWb2sy8V5lg0l9si+GA5ba53r0igCYAhOGeV8A0G0FtdNiYzvvCAS6jwaVeVL365LX8UJaelc1wYwKKiTwjvhTjAY8S5B7ebPPIfM4Kr2SoPOs5OzqxBaJ56AYh251oXlSgh3btw7vXoeZZmzXK2q8U5xePvug1e+Uq+bRJvVlfNKZ4pNHAZjGprU1pcorUmwXi8BiWLJisijqro2yseTwSu2u9zbmxy9cvzik0e0NquLs65tf/Tnf7534/anP/nZwdGeMaIUnV08fuMrv/rpR89F6qqcacV33rzx7/+/f3q5OEkH0YOHt5+fnWTr6d/9W7/4Z3/+vVGerC7arlbDndFqvlY+9o1rpYmLHYCgXAWEcRaJgO+sAurK1Xp67m29Xi6PhvfImNFQ37tzf3FR/ugvfpzAR6tnLyYHO9/95qsX09nr91+3BmKl2/m8a5t//af/5uuvvHW0dyvJi+FOsp6uDoeHTbs2lBpjFlfrOE1vHL5areq7d2+U7YVJVF2vB3FS5EOCalmXq6q+/cprcuObixdzrVw+SU8fPzs4OtzZ250vVgf7u5enl8nNg+GdW7UkVnD3MHv06HE6SpD03vFO/y14+vI0NmnXOkNIpIJ1cRIpYA6Wgnde2q4WliBEIiF4EJ9lpsji1krwjTADa9t1oWMNqElFJrG2BmHvXIeoUSMgg24t+xA0cZaRVqAVRnHkva/bxgXWiIaM0cDeVcuquXLKBUOEJjKEaDtgjpKREFjXBRFSqgveMwNLs5LyvLtztOONbQKYOMqKyEAQJAP+cn66cktPdpQkxmACHigBrZ1zHJDFsw9KFHbsq04DLZfLGGF2Vl7N6q4TAjXIdK4Bg01NnEW63CSCBe8DIoSgF2Wj0CIG2wTfZzcSSeCAQSOh78gJExJwF5wBcM4rIR+w5ZAXajQekudIk4EukjDZHXVRdPLsbDYvWyeI2rdBgDxy17aJQ61irUiBBuedba31xAwoaUJpzOmQOrWahzJCkxsMwQIiS+hisuJaFvZxLKgjJaiAgJWMi+EQjXSNVrIqHThh9EG8DY0y5G2Xa00U6s6GXrVsIh0pZktAhNwXO1HiWxuExHbW98g1dwABAABJREFUxEkSxYRKASoiIV11TKINCaZESklgkqC1psjESSQKPIsAdLVdrOblesrBSr2OlM4YlTLzTkBBnCSAFEdYd8F6GaRRlsRGkUIRZ6MoUwCln6OESJFSihIko6MsDQE0gCalCIIXMJpAvPN2OyO6VrvAVmmx8WJdS4w2pOdLCUTbCXgv7OhF3dLLVL7099LLvHs9+Ib4yKbo+jXYIexhgvSWLuyTh/v9VsHtNG+DJ3oWsiUs8AXCkJ6LbAwOPaShDYbqI5DwGpn0r3y5g9vfESSmTUEy2NCba/FRHyl6zbMYQGhjr0LeIq1rjVD/if0Wsur7QJtWXDvdeqFR7xXYnvJrjRP2kiXErWr+y5ANrqEZbLVXW6NY/yvbTGuh69Z8gdOu0VgvhPoSPNpQPbm+PtscKuh3w3v9GGJPafrgka2bS+h6VSabjKTNbrRspvFfeL564dk15oLroxAKohAIKlBfkhfBtjTe9qpuEFR/1pg3jafN5nifWkrYp4Oj2jJCkS9FOF0jvi+hopt3bkpD84vLB3eKFDxBXS1WdRIGO6Nn7z4nE58+OUsyfOP24cHu6PU3945uxMvVcjg4LkZDLZgafWZXLFWmTJYmGlMAdHGIoqCcoB5ozJfWo7oo0rHOdjUPULnxwU2dH+hkDMqgSTDw6bP3xuODLIkoiabrxfT8hHeLpNgdTEy3ngffsehYpa7FycH+orpAVb31i1//+O1ZEat6ukIqBuOdIKOTj18YJdPF6eHt3eFw1x86A/pwOCnXdZrpdJCdvagMyepiXq/Lr339tWV3UdftsuuYktFgR4uz3kpQdQfBW+O6pvO4ly2X1XB/d1mVXQnWw7Lj3dh1y+l8oCy4/VHywft/Hidh1i7zKKIkOnv2Yvc4L5Jx2SwRdKZ16Fh0unv0mm0W69X06cv3R8O9ZsFJsUuEzbpdt86Mk8VsJmW1buaedZqPQih3R2M2YrIBhDKNktu3BlW5Wl/5wP7uLZeiK0vLolwXXswvX7//2o++97O7N28vwTahHk5uZpkWl3rb5XH05OrR7aNjo3TVVC0HQLo8vzTsDu4On364+MEPTldokB/pYVTEfHicowXuKtN509VplqnEdF2onWq76MZ+osCG9CjJB634yCgD6cuTs8P9iZjYi/VIUaLBpJ1dp1naVSWGrkhV2Vqd6TgzJi8gisfJ3np5SUqlySDNpPTrddeRqydFkiVxKc1f+853nzz/qMhTJHz9a/f+7Pvvrsq6kwiaBhhRaGf/0Nbt7k786p27H7z3wdfeOH711T1jirKVF89OJ0VycdrGyeCjurUKwcjZdHnnzm7bNK1dNV2zfbAjAEYmEuxrpjMBKkIhCsD9I8AYccwMwISayPRlyFGcd4SEGgjIgwVEtcUKzCwiRCoIMDAhEXIABt58g4EwItSaCJQ2GgCBiBFs2/WiICKMtNZKg4AhZbTxHBQiMHBvHRIEANJKIaCgF2D2ihQAEuogQRB88IAQKe2DB0ZFyjFbFwJCE3ztnQ+CwgkhInVdJxIkMgqRgbXSyhgUCMAIFJmIGOAaixBG2qRpRsBKR4gKBPuhobfRsUjTtWGT/sQszOx65ZbqAzMBaINIQKESCP2TnkgxCGEgQmZRyvgQeldVYNaktFZE/a5HX51TjDayCckmBubQG9mQEXqDMAKBiHAAEeaAhNjndoMAkupTiFCoj77uDeMARqmeLvngCbXW1A+LRhnQsH3EiwQBRRGpTaEEQNIKhIMPzlukDXRiZtIkwCgUGU2KDKkQvFIKkfqoLEKFiIH7OPWg1HZjZTNOboYTUnQ9cWHuN2pRKx1EGIAEAjOIsLALAQkRUJMKzIQKiQKLC35bAAEA4PGHi+kjGWgVD7Uexce3b4yP9ifDPVThsj0FwqbuKNJBmmbpfa3v33zIAZh5PBozh7oOJo0Pd1J9d+9H338+taKXbToaJ0n23/5Pf/o4DGikV8Z9+Ml6lGlEFxL46i8M4mL98Ut6OrXhhCbDTFN9K2aVyXOKl45mF3W+Oxrnajqtl86mUcyW5qfu0w9cfESHr7wa7VysLmVctAc34+VcXr9ZvHE4UB653R0c3k+ifVR5Vkz2852zxy+8RPfuvWZS9eTZ6eHkePjg9bPHpwqiw5v3O9u65urweEjGlKtmPlv+k//n/221KIP2h4cD55pO8l//O7/zL/+Hi6efvXd4K8n3boqanD17efPWwcmTZ9/81puXF6tP3n301W+8uma3bIND09VdbIwZxKbARCWtb4nErVySqMCOLBVJSqizqJifrBTG5Baz89n0pNS6ahdXofVxhL/wy7/08vGJynVZNnUXPvjgnZNHpyBBgYpMIqxD3exPkuNRpNmNh5lE0cI1AJagtbYxMZjYG8SLstnZyZtZMJoMhnp20V45t2iyyAxu7p2uW9vazMRxcSx6MMDm+Q/+sHlxcVns/Whev1eVtWpdnN/7yltvv//itPbNJH42tQOdJKlNM2m9W6OHgfHY2safdz4m47q2mU6lA+/A5Olkb2e9mikKk8Fu29DibOVdlw4KHWMkND7cjYx59PlZGuFoNFwsqzVpBp2k0WiSv3jWeEheTF0EkXWhXNt0nN+6f/PRxy+7yhZZzN7fvbEfj/bK0lxWTYiTozTWIZmvpsNRMusWEEeJyep1O127mRczHMbjqJ7Posysy4Z9aLffglu398XZueOnn11khpQN929Mlufr149u/O3f/M3/63/3Pzx6efLs8ZN7x/vvPpl6z5+enf293ZG/mH7+8Tu//N2/OvOEys1m8939G2tWZYse6d7dm1eXKy08mSQs0bOXz402EkhrA6KJKI2TLE3zVVTVLemsKqsihzwxy3U1L2f/i9/9a3/wT/7Ni2nbdkFATGKuyvWqXrzx8PXPH6eD4ywzkuf48tmJE2cdagVDTesa0CQKuJwv02ScDCdRN8zFCVrBNoqV8z7Nc2+jEELXNs53SZKz923dxFkU6i4rJmmcLVdzjWpdXYTQ6SgS5rpu9if7ddsG26ZJvihnwLJ/eL+u127NeZHFeVRfTevSgQTbNnVTt34WU+ytjEaZ9xJCFySwMmfrmRjFCkMd6uXy3o3j/Vv3dBJ3rRse7FcLLsvqzuHe9PJ5msvF+Xu2XuvY5KOjxWxtkmJ8MAbEdt3ZJuzcuht/45fml5/87J2f7N47cl2JIL/xW9/8k//w/Xcff14+Ots5KI4OD//w3//st3755/dk9a/+p39WTI6zNPnBn//06foUv28++vDpP/zf/R8WF8/+1R/90c//1ht4Wf/pH/7rYpzffvXG56fL0qpwfhWZSIusyko7USZFduV6YaIYggDoar4EkLxQtmzQ4M37D6vGGhbX8mLdUpE/+M43ysUTMxo7SNZXK5NmdYschFWn0Wdx9Fu/8jv7k7xetcwNar1zNPzR2396fPDQds3R4UTIR1mcDNN4bBuQLuDJk9MiHXbVaQjz47uDtCj2b3+lcpldtImyi/nZKtRtaE+vTkbD3Oh0Or8UAauyWt9KJCGyGuoHr94+P3fLq8vS+O0KE9quc96LVmKd9CsixrpqjULP6BiExQkSA4DRQASiQLQIsvQJGkppFWsJofFNJ5YB2QsDpElklGGEVpghRKhAVBJFidGkSEVawNedj+MsjTOCVjpZLcsQXLDeIEFntSAbBSpWSpRWgb1CUSoOwSMJatrbG5+u6kUtZtblO0pIx3kcJWEQqcWsW6zri6k1Wue5GuUUWDrnaud7535dLoi1cmCrjoJSrovJRCYqV5VHneRZ45YmxixXaapcZ6sycASgokjFHTaBxPvASgmowJoQFPooSoL4SEcI2LqSwYn41iEgJUolUUJknISuti6EbJhmGe5kKVSdLdtqsS4bflZdXF6tLqbrtgudDVmcQRTapll1hiIhRVqD877paqPB2craoFQkQKKJOTipREmjDaiUvORqhDBY+CtnImDP7CBoQ1FmChOrdXspGFJMY29cA5X1tlWd9wpAyCEBBCOsRJEgRbFOSPf+FeetYnKhDc61XRNrVdetIJKimHSMKjGGRVwIgdGGoHQUAiultBJAqG0DAdNcpcO4c37Vdau2zQceoHNt2bRlWVaxbrSKlIoGRVoMx1VXJzG1nVVIcSyRijVpQuWtE0Rl4tZ6QyZWGkSTQcuuSLPQh0eKCcLIARlIKM8KaDt2lVJfiCy+8HN9CQXhRpwhuHVC0VY/0oMSFASQfu9WtrkUuOVBuJn3bqd129U9APB2xb4Jv6FNTs7WU7UJX75uTx/209MARIT/uFTZFpf0gnfA3v62xRqbd2wCcrZZ0Vv60xu0YJsK3bvd+kAj2Gh8rlHRRnfTG6BwG32wZTtfLrK2FWTBVjG01c9srVgbbdAXFcv6fGbcyrhC7xrbcJB+5da3nTfMiPpebs7uF3PnjawGNxdvS6a+BAI3DZBrMxlenyrq/0IYtxaEjdRpo2L6Yhrdn5/+fPSH64OrNwfaZIhv9ohlq2TaqJSuNVH9f/tdYAYCJaJBEAVJVN95EgBg6kuybWxkm4uLsPFSyDarqre59GqxTYopbWnQhmQiCG2K3fzHt/sGFbkQguuYZF2uk7hYrdrnT668Tj58/2Q+az03hPDwMPrVb+V3Xt9979PPdycPFBlsnA9VmqXNuh6PD5JBfH511TkHOmM/7+r5MJm44NuuGU3u7CeT2fLtbjotdAdkllfteKChqp6dfW9vcsNkO+uyGe7sJib2YTXKB87C5P6NpuqaWtJhlviF0btHyf2Xzy6ApavL2JiuWhYqBamxi1LtQ9eG3dHoIN1tdyIiWrZZXHz2/LP9qNAmqlcurLumsatZLeh2j4qr09ZCfbWcei0Uxa3gKMtRSVmWOpY7D19dezvSYM/Phehq0dQVV56fvfC5i9Z1dxnsw9duTMa3VlztHQzXK98uMEmLZbuo5iFFszt+aNTEd1SYgQcBAp2Y9exMo7XVTBG/8ubDnZ29l48uRNK9G6+8887bsQ+v3jt6+eyz2htSQ6CodbB/+OYoHa3aKXrHLmOv5ws+PJ4coL5aLebT9uY3Rutl4+uIfCR1Xl/hW3der8PiYE/fLO6Cs6GTunLj4R54f7z/hjaTOOYgYEQEBuVqXbYwfcqfPelqNwwEtatu38gPc5cqiwnbEBDceKgB2zQGBaFsTUSYJqq8ujg42s2L4VVt3LoU74fZDvskz4tkdLScnvDqqiurJMZ151yzNiZCJBHfAheD4ZR5R+muqdLJziDWT19+JsavmrWwMtaOlbma1zZI4C7NYis6dLicwc1br0+G5mTanr18sZNleWqKwxuRNtwtFuXZ3m7ubZskpKQl61+5PxqNcJDa87PV7m4EJKMMslbGSkc3d8uVXbebWGulCLEXqjAQolIg4gMEhsDM0kMHQCVKGUZWCOIlSAAk0oYAQ2BjCAA1ktZaQBTpwME57714FhAIwIEDCzOL94FBIqONwlgrbWLSemOdkqC04hC8d0SxiaJIG2EOAQRQkRLmfpjxPpBC0qS1EmYAYEEC3WfqCAdERYBBPAuzYkD07F3gjtlyaD1vFC4iCtF6T8AYRV1gLRAAdV9JUqjPlu6HMg4izKQUs3Tec2ClKY10rJF0xIx9ThOIEIgP7EmJ9yIcvJPtQ036GvBIRum+WCUKMDACBB8UkSgMHESCQqU1klZKKZIAwN6TEPYaVEAOLES9ZouofwT2iF0b6FOdEQNwX9Vek+5lTbgtbtlPBYIPqEApMloHERZmESTQqPvKC0gYkelsZwhJKUNGWEgpAWaR4IPWWm3sh2KMRoTAEhyjiNIaAJiDMWagtBUJIeh+MERCwKCM9OM4olJIqAAhADALIFnvCaWPWNKoOu8JJTJKfCDCXsjaT+UVQQCRPnsL0YfAEljA88YCrRQrpTwzcu/C22bh9WPBwus9E8CFmhfnq8WjZZo9jhJzdP/Wzq3x5MbearVQiZnZZZHC6Fg18/X5xcx2XZrHAgaCAlTV3CW5Ojjad6W1Qd59dDkfps9Wpo3p53/9Xjz9bDqXdpUQp3qnsWF+80Blo+jy3fa8Cv/2vfLVSbh7ODy8o/l0XUXRogLvZNVYjIla621969aE6ejli4Wu2oMIVLzblYunsxUaufMw+daDQVh49KNhcjwY7rRzrCvUJg5RzBhl2e5kN4c0XM6WhrGcVQdHr+8e3z6bN7oYhXphYvvs6cc6Os8GSufg5i0ZOjub5So+fvM7F1frfO+wefbTWKXF3t6f/If3RUUffvL49tHh++9fEcTF+OFqWmRx/urXHpw8//RHf/ZDA81oQK7ryrJenS2zPC3ihG2zO95LhJx3f+W7v0F1d7E6Dca9+Z/9yk9+/J4LAqHCVN3/6utXVT1tu3RoktS9ePni0fP5crpg1CKcR7rxbkj8yv5g1DSr07IywLleOyscsiKOFcznU2WkfP7kqKDny/DBi9k4pr08diDvPz5frucHe3mztDfu3uI4OX8xffPN10b3H7ap1s38x3/0l+s1S5FfZvzO05OkoH/6vc//m//0V89Pni0Vz6cLnKQ3bwywrIvCByO+s3mkR+NEj3lZ+TQeJID1sklUNBwNQgfr+YoCE1ZJ7pqqiYlc4HreiuXA4JdVniUpStfYtXVJqqltOsuSpXu7+3UF7ew0K4rV0qWDkSq0a6zq4iKNMqPbqgmUfuXbN1+c2Mg2OS6//Xd+4dMfn0bASZJdLueR1q71V+vy3u2jlcdmug6OT6s2N+b0shwPYirS2CBACQCHBweLk7O0av6Xv/rd995/p2ndYJKeXdafn89+MR187c0HLxez7/3kh6urdQJSG/18uX7no+f3DvMkSoOUCky+NwAZRumgfXZpK+/qZkVaUGXDbFEubfCHN3e59QDCYmMTIai29QG7ozu3fGmttVoxUjAqikj5VuqS/+7v/v3f++e/v358Cire2x2srpbPnj49GsVgECMTvBfUd4728Wq2cmJ98G332r0xeHGrVbITxbHYtkQ1KIbp7OIxaVlcvMjyoSRR11nnQpzlRmsQcD7keQJgAAAD182qyFJvHfvGJHEIzjUWBZbljAwak5qkSJIshKLFpAU7OZq0XXl+OdMAxphytu4qe3F5mQ9Hq3ndVRYHKH18HhNjaOo2y5KLl7PhaPCLv/FrRbYXfKeBTZFa25arVWjrJx9fScX57qgo9mi81/guz/ZCJ9Zasd4jJlGM4Gazl0Ek3r398E1VV951wKHcleSV3R3jF3/23tPnz+jZx48N5f/zH/1sfwQH+/lFffUbf/M3py8vf/yPPx7DYYiP/+H/6w9++95Xy1P34u2zh0e7tl6/8vXdn/3gz+qV7O+P2sUyzrKs0G3duWbdlnWklRIxQs53rWtVmjWtm+Sx0sCBnW9964pizAV3oS52Clh2qOGVrxzP5vWth0fNkmJ3JfFOu7YKzA9+9IcPX70r+X7wUR4l86fnjaVIJnvDG2y8840wuiaU9YKUXk6vXLU6PDqM4+Tq4rGotJy3KivAFEl+sHfzePHomeZ5xxIkaDGrqhVpRpNBUezmkdpJOfgwvrH39CfvR0nUrm3d+fFe1o8Fy+kyMYmOSKNQTJExpJglGKUVYSusU5PEERIhcO0UusC+9eJccOumMmneZ+sRUoCOEavWey+Z1kWaEoQYHMWqFtV2wkA6ihUBizcqbZsqcCBACqyU6jqniEaTkQuhLBvbNbE2wBIUKZU04jvgRCMp5QIkWYKivGUHLh1oyXULGDFELMb7atYu665ag+0EQA2KLMuiLI9dB03HJExexHLkERlC52NUZHh8PIQALXN8OHk5W1QN05A0IhsIWkiRF/ZonO2sahEDkFYGHFiEKItT37aRIUIBEsbWi9cGtEkRoHMMQJ7BaNNb5NNMJx7G450srtv50rahLr1t3Kz2i7Jdlm3dOlRo0shxcMGVVc1XIRvElClxbYSMSncdeOs0Gue6ynofSZJH2rAQE2puWsBgtBYPOQ7YIjfVwGFEkCkGNyVFuXCMceqTahm6JQNHcTCEwTM1gDujMQOGNtjKxpQRaJTQx+akgkGk6RpxDAE65o2eQwQYGISVYk1KIbDzoXHeAwQvgAKKOIoxJpPGFCk1iJUoCFVXV4tgV0ZznkSNbUWrNriYyIYQQJI00droKK7LirxNjEpjpTULITKKFw7eiuXgNCoW0to4H5QGhWJIBe9RfPCOAAEojkw8GlrfbZEQfkmus8EGWznOdcGwnqnAlyxi2ygiEWTYgpWtfQy3UUTXVjZh3GYMb4hKbx3akJOemOAXlqAvKT42uhi6pi7cLxJ6cxZe/wJtvFnXvrYNMICNGF+uHWEbAVBPoLavwhfpyPKFMAj60jR9QeAt6rgOpeg/Rq6NYVsBD20Pu2VdX5qE4rYr2OtbcCO1Atn6p7ZoadvVTUs2P/SN3FCYbS+vf4s2vev3jbccZ9NN2RIf2MQ1yabW2jb2E7eH3vaEt3Kwayh3HS5+rVECAWYBEgbqPx8FttnS130R2GYd0RdmNN4Y/FD6yGuNgChIoADgWowGKLj14vVJULKNKIX+f2kbSITQl5nu9USbk0abO/KLKnW9xOn6xrtGRU8/f1bEA0aqltXl1XzdtGDM5589eeenz3RCBvzd3ezrr+7nA1xV6wcP7yWUDoYxCyybKpEQWLTRvhvEEa0riOIMQjkYH3ZYNaEejY9twCS9d/OIVufvQnRgcJiqPQUXRqIhxoDQ2DaOzWQ/f/nkPEG/fvoiBJiMsxwT1/D5s8udAnNT2LbaGxdtINQuiiSiPMzaWIfl6TzVcWDvL2vfTQdRYNvuD6J0EK9qQkBrK6Xii7IkMDqKOu+vzmfTy7PdIj0cD59fneztD2trxXmT6qwY6gwf3Lvz6UcfNdO1CW5ytLOuBYUXV2e8rnB4cLQ/vH9YxO28qptcqfVstbtzHBWHq1WZxmCiuFtPV/NpFqV5Ory4utR5Pog1i2ZPZV0Pi50sAd/p6XmT6GJxvnhRvnc0VvXV+YuPXnTWGkoigzpVM4ZHV+v7N2+KVKNBsbq4yky8Wpex9h203K3WM3j+eJ4nyeTWiOvw4P7t5dnltGoO74xms+pomOk4AoFoWKDi+epMUJNu67oJXVuMCse+rqYierWw0MLeKHlxeTEeGWVCi2hbWS98Oh4DZ65eM7ejQbJer7wgB6had/vunfWiRKb1sj7aGy7OXyZa7+zuNFXXXr2opqdtV7LAcH+ilLKJSbKkq8oiksPD/SyLu6BjjJ1Pa48ra4fD4Xp1Vox2zy/mWIHvqkXZvXLn/nR2ZW3QRhlTjPXAQqkafyPJD+7dvHkwNEa9nK8jjFShsoOdy2cnXfAtmN3dog3LrusuL2yc6apesONXbudHhVjfKrv++GeXO8Mbh5Px9tHcf4WBN9RdRMC6nikAISmlADZ1xQXEaI0KHLMTht69RRiYr+WHCOB9EJAQvPPBek+KmAORIoXM3OfcxDqOI5Uk0WZXADCwcPB95A2KxJEx2gACCzOgeNaajNGEyN7pzagjyBCCYK/kBAgcAjOw9Nlp1jvYOIo5hOAZrAsOxPtAwkYpYYiMIoBYKyLsfcAhBNEGiIQZAZE0cu9UYwAJwQfvbWdZKUAOwYBCzaKVNmQISUDYc/C+319yzgUOLAFgk3+EQERKuH8gb4S6SKSJQg9GEJXSJEBK9ZJJUOgJiAQAPTOFACBESilCIhG85llIqJWWDSLZGLKh53vAIXgAVArZiwgDAvR52gx91FTP3fpNFUIMIRASgwCQ4EaZ2mdwI0ZBAm3KiIJWmjbJS0IIfbacJhAAraV/IuvA7EM/+QjeAyokcSwchJn7MKUQIAToq+Zx8MJeaYUIomMNhBqYmUEINRIKKGaPgCF44SCbQq3cp1YLYGCx3jOD0coYg/3+kaCIKKWv5x116Z+tzoqMhmMdmzg0tu2cQ3r7k7NiEt/7yp1bD2+ogWSShNIuWpeoSBsTFwMgyLKkXPmqKof5SCjs7OkVXJ5/fCUYD4bqtW/fTwbxyds/jaLm+P7wk8/9MIrfPJgsL54Od6MHr+cPlu3lC9c1+PmZTVO1E+lXjgcXNgx3B80Y575N0iw/LpSKrMOyapMsc6E5++CHg3GWJrh7Y5wNurNn9Q/fOfnOq1+lEKEn9DDICzXYGw7Gq7K+ceOYXX12/mK0l97YPazKdmd3P873rJMiyXcPb85P/Wz62Z2bw5Pm5eMPPkoHcZD6aGennDWvvnr76O7duFBR3r7+zVdWs+WHP/xAwK+sHQx3Zld1Nrr3xtfeena2GO4Mnb36/MOfJaa7cX/naK84OT09mdbf+bnv/Pb4cPry5dnTR7IPd9/6+aydh3IeLp8163r/cBztHJy9KNP4wEh9dPNgeXb24efPzfDg5boeKj75yw9OT0539sbxMDm/6oS99T4SOaDkUIcikmGku9C5eh2h2kli7hpPkFAUbFDOvnY7e7SowGJwcrJyL2s7X4S59b80UQ+OduJocO/BK+vLsHv3FS9uJM3puz+xFG68dfzRifv+yzOrQtcFJ+H//Sff3xkkrrXGkBV5dLp+426sSKI8jlLgJtSNTRCUxRC8VSbNc7uo0dZZkjpnXddyFn9aP1NalIYkjZu6CR68BGOSy9NLVqQiJd5Z2yZRFIGxS1lzbbQcHhXGIHTUehhm2dlV6+qA82q4l4+TdFGW77z7LDW7F815U5Xv/N6/HQ7uUpIND0YXixcYwiAxiYke3j344c8+riuqW2g6uj+Ky7o+PiqGk+TJxbz/Fty9e/z6qzvLp9Offvz8dpFMxS/O5kOFifWhWRzu72RR/JNPnx7tFvfu7n58ugCNf/aXP37tP/tN8Xj64jwb7+s8bTqOIimyGMDtjY/Wy3XZdBRHJh606wZNupy+mAzHSZQQKxFBioERBaI0IuDJ5KC1IQDvHeyMJsn3v/+jNL741quvTJ+fVOjDej1RcJDFV2cXl1fLV+7eyeLiw88f/frX7nsV7Ml8Moz9rDy+uW8htKtGDQYCoZhMXIiI7ZDLDr0T7loXj+M416FpWRiYvQ8miRECB27synOsACkT13WCoClO4qwMkuWjxlbDQU6CdeXTPLNONU0dvGvbSsfoLdngDJm2sdZ1RMrZkKZpluVeUWfdqlo/fPXh2dWlqKgs10qTirLdvRuz81VoSxpnRhmNZrw/Gu0dnHz+eRIPB3u7H3z43o3jfRK2dtG6Mk5GCJlzLk3jztZoXZEPkiLq6vTy+Sevfuu12XRqhW7c+srP/drht7/9yXIx/94P35+t3bJ0zqmbe+Obt2//29//t48uz7KC7u0OH8+aJ2dP9/+T737F33jrzYcfv/fZedV89OT8yeMrpqyazTWZtnVBOiIibUIIznmKiGIdmrVtq7gYZENTN4vnn35w8/Z9V5Fv/MJ3QYKJkmqxHiQD36zBuwzVINJeN7Pzx3vj3WKY2vXy1uHNQX4QKK6rerwb7w93lmV9eGNUtucRROACoA7eKgmZIZNjNMwD2sQko/GodRjHcVN2sYnR4uzx49XFlW/bYjAcHU6CNZbBQ8iNiSOeP3//YGxQsovpZTSIxUkxSKMCzWZZAKNBHqkoSlWaagKOjaIAbWfTOAL0+7sjrWNXN0mRZinNS3nx4nQwyhGZHDM7H4In9K1tu67pWmZ2ATQZEQFDCpGDCGKiVDaMoygJSiFBYCfoDCA6jtJoNCi87yjRbd32qYCUZVGR+KbTkWHmpqmVilScBOjEtqQoACqtRLCjkI6T4tY4GaXs18xdvSyrtmuWbfBxEse7ozjPUgEuSwuiRCA2CbN0lfMttav28mKdF6PRANNxPB4XAXxDtomcu6ryQR4IHENbNiEgI7kuGEIRjkilJgVhI1qhBgBlYiedD22sosQMKLQQApAGDpkxAOTYV21LoLMoAxEWf3F+Pk6VtBUpRUpX1r04X7SeWxtEIIm0VqRjpY0QemLlGq4CBkOlbQBYk4mUIeyLnTAVcX6Qoa5t0OjQdxwUtgFItA/cdd4EJaEJgK1Gomjd2FTn7CLPqXNehCORrqvjRCMpnQ08xJ6doEkizW0NECKNIhI4kEgI3BdwEdYcfKQjCdA6K8C+66IkIjAQpHPOW9fvenYcjImYJdZJqsg11f7BZHeU8unFqvKL+dVgNCrilChCRAUSGUXAbb0GQB1ppTUxR4pYI2JAICXKREnw7Ky3touMEQyNdwEwpTwiABeIJISSvfPBISpljEmMIlIEnuMtiIEtK5Ft7FDvjNqSjo2mpucO18IO6id+vA2I2TCYL3KDtzYvuJaZAMCXoqBhm+nTU7YNDMEeB2xzJr6kI9nMb+kLcRHRF4gEmDb6F1EbjY9cy4WuDw/9TnlPaP4jlsV9b6kPFtq4v3opzxbl0NaP9aVUbNrAk140JLwN8+5XLVusse3GpmHYd1W2GK1/7ybpuk9PAtrqXzZyqGtd0lbYsyn0Ll9ckWu3WH+dGLZp2rDVDW3fuO3AlpwIQB90zcCISmQbV/Qfyba2uqUtv+kvVV/v7PqSyhcuuZ7l0Ma1tj0H23yhXg4EveOMhBQoYSEg4K0aqrcKAvK27SKbUsjSY8dtHFUforpNuhBRPQmTviiakGxjtK/DSza6p43s7RoVtXU9Ho/Kql3X5ehgvymnH3z42epqFiVIiookMgZ29/MXJ48eDO4ZrD0AmgxZFZkiEZWpYZEuzl8kUWpGO6lOnawGO/Ty/Hw8OdBSgz+vauQ8Z7VrAVg6wGA7663fOX4Du65aSd1UV+1VFhckZefYZMM4y9Rg7KLlzuBI2mW1rhCrtvPpcBBANyvf2iqdJLe/evjY1ofjG/NFKaVv69loPCGV1HVVz7t8dCMJZrnukonWRZmleRHH5zMfQpSn49nF+fPzC2US17KOk7Zr4nTEbbOa1v/u9//VG68+lMGgbrI27ATX5RnjxD64/9XL5fl4kMAI/MKRjuI0Gw+dDTbE4MCKtVqjYx+gaxbnBt1wfFTPnuUHk6tl5SwW48yGdHD01bJZr1eXeb7z4Dtf/fSDD6MYJFl99P5jMHuL+fRgL2czpUJMqL//gz+8fTApF2p3Mj443gtnMn0xy4fRZJSdXFxkWep0VcSLJCuqciFElyeXdx4eR5SiJRsky1OPepjm7UyCuGwfW8C6laYuQ5BhRium80fd5WWlIQwSmiS4N0QR8hDbZFSvA3jK1Cg2o9U6QlXEOjTLRd3xY+sO97/aNes09tPZrKksaZmXHaOoei3OJgnFcbEuV0H83u7YqqqYxEk+qvQwAchItELWMImy8+mziJsIAgroSHwEVciq+frOWyn7eZQnWZI3C3u1mloxSRxx1YwGcRdCHSAvinGeX87PmSDJ0k/eewn5TYOoVXK2uBQd7d4osgcYd96Wq4urdifNzleh9vtP37/A+Pr5IJuHC4AAeBdCCD4A9unFSoEAIhlFAEiksc+hC0FCkCBAZLRiRsfMQZx4pZT1QTg45x0HIXKBNaoACkJAVMYopSiOI60QkRRRL2VBEa2IGYgQRSEjiljvObAPQohKsF/8a2UQcBuNBhqJhYGRgwMRQxSYGcR75wMHYcuCAorIcQjMIbBBjOKYBCJttEYAUEqBiIoi0EqIHHNwXWQi4eA5MAsHT0iCaK1l54jABWdrbxkDNImRLNIQnFKqr7zGHLxvGEIADgCegYOnHrgT+RAERGEPVog3ehgBAPGB+tMiAswAGAIHBh8YBRRSnyjY1xdDoH6PB4mYfT8WheAV6j6NiMX3fm8fPACgIulZC4gLXmm92W8IARB8CN57ESSFgQMAkNLUP121YeGN3VcAUPr9NFIkAEEYGVED84b3K+p94SCCoKkfcY0Wr3xwrk9+4j7OyYfAwCAheA5BgPoT4ZxzwSlF3gdAEHBiYvaBjCalWTbeM9qqfkMvN+UAgICEwCzonAuBG+eNx0zEKFLabMqkbRTBAABNy7mmyoKfdeDKG8cTnenp+YJ01Ir88Hsfnj673Ls1vP/Wa3Gsl2u78s4zGx23XTuMMuDlqBhoQ2fn8/29AS0WK4Przr94sbh1tD+KoztfecDV0t6489N3fry7mN69FcajbDmvl83yMB21Zt52OLPmnedhPK9u3o0ffm3308spjAqdZas2dJFOd4+iOi5ca/LFo8efS3MFazJ6tBcn3bx+uJPW/lC8IVFdp8Y4yYo9loyVGY5H3HVBy3jn1miUBesj0+0dHbWsdVQkg8F8OrPtSbu8WE1XEarGhssXUxf0YmajJD1+463b92/9xR/8jzF3bb08f36pdHx5eiUI+c0DYorz5PDWrde++41/8o/+0V6Cp4+e37izI5114t/8lV/5anL717/1HXd+8oP5PysGWOwN3nvn+9++t3drLxYthwevnLxYW+F8XBS37rz7kx//2Y8+YlnPp4tfe/N+unPr9/7xP/XLMo6yuub1usmSeJBlqmnGCm9mETQNErAy7CwDM7NiRoyr2sekBKGclbvHxRs78eUL+9rrd/759z67SPS8pTFhQpEt/cc//MsHo/yN7/zi6M5djHk8oA+fvzidlzC++e8/PZ1zF4Sl5RbVnHmcJ+vFMhnFbVvHg3B8f3eym7fOi4BEFFPazVzr9PlVGIyLwV5CXieGQlvHiYmTXLSyZcvBZToxSQzCXZBqta6wHheTVV0VUcyu6XxX1yGwtwGVE6RICR3sFamEeemuZiuJeO/mSOxoPmvSnICjcmk+O3uxP1IPbt1andq9AaHvuCkTRfN1V1lptXp5epEZmqSQ5+bisgqD5GBv3Hk6PanS0SbQdzY9u3tv8vbjxy9P6jzBlMCX1Z2bB3vFxIRVHrPr7MmyPRiPbONJgAP97J3PPnrtropk0Va7o3t129mWu6YNjsnoKE+p7chypNPpVRWC7ko9nuz6tmPfKjJplnPApm61585aAfaOCFXTOBLXrKq9vb0//vMf//bf+tt/+29F//KP/wy10Y6L8RDr7u7O7iCLUJlbN++2bZsZDa1TaO7cOnjw5lch24mVR5NrbbxrMMuaCpL0MI26qi6bcqnXSyvdaLTXNmXXNIgYKV2vS6UiJSqLB03ToHAUpY7BN0FcJwzOOoWJc5G3lj361jW2yZI4Gw39ekWKSEJV1vMq2K5lYkE0ouq6SbKsqYVQjYaT04urrmklsLXdwdHtr7zxVnBVHEmcjYRM17RtPRcFSZvpZHd6MV9KdO/VN2w5UxR5VPnkAEKYzk8TMwQk7wDBcOtndWvyyc6NuyHGMqj9W/f3X91fTS+Kvdff+M74F377r714fPov/+3bRzfv/u4/+Af/9//uvyWnb+3f+N3/4u/+y3/yz3/htTt/8eEn//SP/3TQlKenl7GOkr1JLebV1x48enxWFLkX6jqbqDiOIqPT6fJcKXTcVeUCLRIY5ig43zRdPtl7590PfuW3f91yXVdNlqdpnC7rVTE55ijLYIphurhYdZZny8+KvYcBtaZw9/6rOjVluT66f0cjlOUq1jGEAK7BWDvrPDf5KE8Ts1qdo251lqyuyqYpq3ItURzHYNcuu7m/c/v+1YtHEHD/eG8+r7UWraDplqB1Y1kEs2Tn6ulFcF7FylFoXVU2Mtnfa8pV/y3Y35nEcRSnEZEYDr5pAvAkHyCQ6MgkGQjkOxMXguuca7pBmnoQ0ioorchEOjFADqTufADlmYWICZkkSjjLEvZOkwnCsaIoBohi560DreK4a72KItQEGiOlu7Yrisg5qwWJRSnEQRaQxAeWgIG9b3WM2WDkvDBCw84l7AvSN5LklWEEsbtqfdcuF03VWiKNCqJMoSZQYgih6RRp9g6cCo7YorBZlfWqkatysbvSAlFVrtHIvKtK9kmeIQMFH2uJTVIzA1BwVmvFAqi8NoEYjCQo3Pg6AGmTBAxEOkBQpBLScVx0bRUReg/Bo8I01XFXNWyD967xXnM0ySK0fLFspst63XHTOWJWRhORVoYUBgnceqMoQOS8WBAlChTqBOrQsQtEKICJMWmBMePsqk5Q+SBRkiqVgWRdHRRgpIMN2IUWQkSgRA0sJmXlMFQpGQBuXdsFq6wMjUnYFbrrgCrX9mValKY40iBgHRNDcNZo1WtN4jSPItN1LG3b+CY2uqqqPIm7EJQyKKKVzuMkIk0KlHh0jl0o0lEQ6Fp/ONrTTpZNwzY0vgPQg/ygqssQQqQhBO9DiCAKwYOIJiiySCEp1D6wUiAYlFZFFntxXdcaFSlAAi/MidbEAAAueBDyQQSCSkBJyONcFG1XzVvU8wVVwOscHYTrYBdBoi+CcjYrfYBt0iVsiczGWQbYl53dLDkQeLPNvNmKhg0huuYLX2AQ2SKcHn1ss5v7BGoEAKTrFm4jdJAApE+37I1hfUTS9S7qphUb01RvngvQWwr6dspGorIRUW3ZF2zinvG624AkQtDTJdnwn41HD7eoRK4JycaatXWjfSF7QhJAFBTsIVSPYTa/2eu8FDBjX3m4XwiJYJ8UCxu9Uk89Nohu8/mbQ20lQn1H6FqetJVAbWVRGw7U7zV/Sd8kX9AnvM4Khevs7x62XYuTNpKr/j7ZiLjg2pp3fYNsaA1tIRSCAgYU2uifNua27YdjbydE2OiJthIlkWuUtYni3mBB4T71lfqTI33COeMW0BEKSx+lBCKwcWFuUVGUmfPzS+daF/jzl6c//eEn9WxapPatVyemKM6u1pflygb8yhtvprnqVLQo/WASISQsXTJOate2AeLRfl051sTkSNPzk6fj/Z355exgdzfOYg5dLFkyPlhVqyglpXWz8Bin9bqKQxCHzraDLHr67PPhJI1Gk3gw9CEirW/f2H929ixOMEkyAQmNI2Mg6KprWWWEkBYqKkzVWBPR4f6kqrpFuVqvL11TtUEmB6N22R3v7Z8+ex6bDnRbV8IdHNzcDcN0VtWUxHkxLNezvXFSJOn04iTXkVbatvrFs7Mo1iYu1s8XrpNsRFGR64Lu7e/YuvLUOfRL27lGkFvGnLIa4zhTdnpyslg+Hxb5+cVV3VRlKKv2vKwSTztxvFtd+Ru37szOTo2m/x9XfxorW5bdd2JrrT2cKeYbd3xzTi+HyqysKtZAsoossUhRIkWp1eomLDXgtt1wyw3DgBv+ZEANw7ABW4CBNgw00G3YLQgNSaYlUaIkdpElijNZxRqzMiuzMvPN7935xo35jHvvtfzhxH1Z8odMvHfvi4gTO+KcffZv/4ebN95YN3Gen4SyWNQuijpv/uwvTZdcffBeGZzT7nM3xz/+8eFbL70BWtXlcj6X88sPapx+/ktvHz48G9iRiADGq3w+uWxubkXFulwvq87e7kVe7V/brdfNugzJVgQMR8fPykWRdLPJ5cRoW9RVU1QcqCzd44mbFsiRIoLhsAtcrisnAQfddL7KlWJ2AajT644n68rXflVMt7oJnxdNKU/rk5Q8cRXFNN5Kp4tqVZa7B9fq+TyfVRGGXj9zSycueK6sTgbdcWdwQ7HqSaGoMTaqK5ysFlVZNvVMGzifXJCyiuHx4elre8Pp9MJoxEjlhR93t48mh2sBne3YTsdZM8iG57M1RG5SLGtf+8sSirA92NP9gzqflG515+beycStLhtNpnYupng997FKy1U4WvtCohf3t9qzgAWCDwDgQ0tEOAQPpJCoTZoXEJagABQpQCTSwIBICgA1tFWFgICKEJFFmrpCrQQlIAhtsuK0UojU5kMba5VCrRUwe8dAAiLeeVSEKCEIkUJQiIoDhxBEGBEUKdVqVZAASDgQomePqAILswTwgaXVx0DrjQLm9vJFqhUbKpAYjFZCiJG1KO0fDAMopUEAtRIiBAgtk3aNMDOHwBKCN6RD8Ng6vhADgFLaBV87QqmFfXtNNIpEpKobH3wAaeraBd/GeGtSHGqtGEkhtyE/uOEviMIBALDNFmIBllbn5J0XYCKN7EMICOKZlCJA8OxbbASCITApJQJaKZZWW4NKmTbMmUFEghJkkRAcIHE7k7BI41GpJgQGYWFCwk2RhSBoIsXMSkloPBC5wCCiSYEAA2utEdGHUAdn2RitDCloWZYgew+g2rYBQQwuMDMqEkEvLVATDhy898x1UwMzklJKB++9b4KExgkqQgTxAQFFKaPVZvchEAgDSggBSbz3ArBx5AEAkAhrpTk0bQYXC7MAtbHfABz88x2V0SBSQVyooziJEr0smzJvxGNd5LDIB/3OelEHt0zsaZzopuFuv6dcOh7vffDs0K+XniWOwNehu713MbuQRu/c2D/94Nio+HiS56y6g/F0zqvj6LNf/vnJu9/5zocXP/fWzuXheWdrpJxZTmRS1rdf6FcVP77I57WfrOvBC926KCHGgFgDQxAb923U50jAJrdvDZ48OqpqhEPU2c3R4MZffPd+94YZj3rajFGbVVklaTbc2XEVJeNocno07Hb6W/3ZZJ3FtFxTlMadCLh4+ge/9f+dnj/9hV/7asQ73/g3X88bD0hbvTgJSbd3PZ8mz3700fe//92f++m3+53swcdHTqqd6+Nef+ujj+a9Af1v/hdfe++Pv3Pvt+5Pi8sbX7z7qfErH3z/vjjKXtjd6r7a6+49+/Hjd/7s9xUui7JePSzeuPvG3Vd2z58+7O8NluznRdVw5ZbLe9/94eHJ8eV0trvfe+Olz/DZ5F/849+Me4P9W8PTB6euctbSaLvjXUOldIiGqbZog4NHF42xph9TtVqPtBiF4gOm1BukVWhipJe29DjOHh09FiPWqm4C2z2lwC3n5eRiDX/27cH1t2++/cXlbDo9Xj5+Vnw80b93dHgRgiiJEAlNP473D7a6XfjyG3unk9W1ceazgnS1bor+KIswcStVFum798+fna9P59IZyJ2d6vWbfWMRm7ppXBRTJ4q0V6wUWl25BlRKNtIhyvN5uVoDUVHmCdcuVOIQUNJulxTXYBoXnpzkoQRUEMVquQp3XrtTz4+hgYbd9sFOx2S2cKqvjpriS1/9K9PH09hX86MLjWI1CUhR+ycXq/PcNdou85qNqpS88sK16dmkN96qzObO6NHDs8cfn7hG793Y6mh34+bW5dFMrJ1PVg/f/1gwbPeiqJ8qiRZ1qZRiwSg1H92799Vf/sLF0fHx04d7t15Isqwo1ibV1qjTyRSYskE/b8J4/+ajh0/Pn9472O8liqJEeRca33gHtCkQIqO11prR5HkOXoySvd3RZz7z+u9+/Rsv37xx4+DW8WRWmvBsVdB0/vrtW4nVx/N1DWYdVJolt/ZGIjI6GKydG5gkS6MoQhuljCpOUgQnIZ9ezqJulA46Te2U1cW6cK6pm8AYAoJRFkW/fvPt+0c/GvR7VT0jlQCTNSYIc3BRnHkm30DwIUtiQbEIChAco3BTFGU+zTrpcn7JFBoOeVkv6yJLsqrA7vD2fP7w4vS48s1oe8tVzWpaf+Fzb0wOz0LP2jhhxePxtbPTQ4PY6XbyZZGm/QfF6f44pk43n04Rwnp2Ouil487YYB8p1bFVqtFKGdL1YmUoAR03yyo1kVSuCFV/vDvavzFZuHxadmP1ta/QgwfP/m//9d8/z4tdPfz9P3n4dP4b7uLs1TfNnZvjp2eL2pidnZ3l5ezo8NnoC68sfUVKXRyd9Hf20jRhCWVdh+CHW1uNKxilWdaktTK2P+yfHz6z7JqqjBLrPJOycUxKca/TCUSdrL+8PFlXSxtpwFQnqKy+OPrR7u3Pku4slzNsahPZRVlbTMu1pKlGqVCb7rA7ryf1usbcLtdTRYG9cwE9g2uqpNdxgRl8OuoEkUW1ZKPTzk5VL4NW03KugIKs94YvVU1TlQ64Xi2KLOvkef1sMbux29cJTE6PzJWsaNhNTWwBkZhDVWdR0tSujX4vseHgrI4DkkhgRhCsSlcF7vUzaUBCMBJ8lYuvjBUgA54EsBsnicEk1QFcZFQaZQGBEKA1Ewmg1p6ZCNvCcHZtoa6zGHxRomjr2fkq6XbzurFKazRlk8eWiEGRZe10BB7WKlXZdk9lrLA2pFQvcnldnHFZUadjO5ntdLKmDsW8jER0CI6bxTxHjESUiWJJVCNuWVai7LLx4XgWxQjkWUmIjY1RKxoOe4qcd3i6KIJvZUIqimLhyotY0ITKs9PIFgjJKsAgwQWJlGGWvKy1KCRNRInVrvEuVIBNCIICSZpE3Ti2qpPifHa+WhTAolCUVgDgnKSRGo46rq4AuHKeBIMPRiC2VCrfSWOrkVGsMVbpJEurVWhqbIq45kZrq6OEMBFtSMVGMXqf6C4DIhgOIAgcApSNr11tPARBtIBQl1ITYCg0NJ5Fe0QxcZIEaFfsoBWigoRsEAEOcZQoo20UpWlETUEFeueNNoo0gDLWGKMtWqtMBBTAcZAotiiiiAiVUmm3kzFhXOdVXhtjV+tSBQ7ela40WilmbaxjJ6CRSCsSEDIqoCf2GmNQ2trUucajsbENTSMBvUjtmQgIIXiPpKW9TSZb5zUZhYlCMhtU9Fws1DqxWtVIu54nulKc0HOsQ1eriY00hxngiptcreWJaENENhhKhOW5zevqfxsPk2wax+A5NsKNWenKYYZXC/v2ADY/E9rYDTacY1PycqUVERQIgpsaduIrcQ5uxD2tg0yAA7SrDSAgks2b31jGNtTmObJqZTXPQccGYLSJ3Jv3gMgAzFdxmxua1Cp52rSm9q0iMgBBuDL+gdBVjTBQSzmuXHtXDV8bqCNXGp3WJ7IRfMlzmVS7s3xlrkK6EgRtkFB7RCRXj8DW5YYAz4f0EzxHG4HQlfgMn1u2ngt0WoUXbrDVBpRtHo6tLe/5I6SNZ2IAERREbpOJiISu8pngOSLEjWKrFRMBf5JVhAxtzU97pAzURuMGIRAUIQECgSDC7WG1zbXArZJI5MqKJlcZU1eqIifHTyedjj2fr9776EiqetCt/uov3bl796B7sPvgw4t/9yc/XjfOZh3PJaVJs2jWl6VAZRMC0q6qiqWLYzvc7lcl28Sczeq6Stf5uNO9jjKaT897o1Auz0HQu2q1fLa7c22wt31y8jSOco8qyuKOsSbTcTepvVlP/EFsxbvl4lz7SKoy6FQlcWx7gRbtiRZFluJsuTrVjrZ27PH9dRp3LyeuCS7KoL89Wk4tgl8UZz5U57PmssxvXx/tHwynz55i0+Rz76vVtYMhaLgoJt0svby4yEwgkVXlk16/8nU99YNBAqoxpjQ66vU7+eJidjHLuqqpc4am1+0Mt4aDLJtOntZV5Ranwo24ZW+oI5MVziddlQ2HDW3duP1iPjuLo+tiBuirxWRtmKD0ypvDs/z6aLx37cWPDh/Ga78/7PvJxfWDncPD4+myevfdD7b6uypOsu7IdfrXb978vT/+5z2XdajX0cuq8BrM4UnzwsG4qsEXpUCT9Gg6racPntpXRqkla1w+n3DDrnbONV3VibPsyaMjJkcdff/exWrqmpq8IzKiDQRiE0kNngSK4iL4xjPsb++UQdeKHBIojlQUxTQaJvM1TJZzNvraTmdRzOfrZeNxNE7q9XrF6sXPfeneve/P1sGiJlQguhMPuIlWqxojHXfTssgJo8U6FzH90fjDB4+HWRxbW5bS5NXl2frg7VeMXmujSxYfuGTFPs3iJF9TSaFCl0IXWJduTc1s0GUlcnxWZduvJaNOdbmk2tZuamynF3dWlzl7SpKuUaBUlEUA65yR62azPHDOA5JzDhA9BxFAUMKgkEhQQlCkmUDaE2ojKwHnPSArQgAFQK15beNhAgCGtpKDAwFIbGMCIKU4MCKQQkQMzITAIbSntZeAzIG5bjyLREYDQu1rFzwhRUprBEuq7dLwod01EhYQ5IDQsBMOCK3AUpiFhZUmrXQAMUobUswMqH0QL0yIChWIBOZWjcIsWmtmQQDvG0SwRgNw8LVSpoUqwTcheARkFgZAAVKiCS0hsPcsIXhhKEGE2Xvf0hrvffBtLjU7ho15HzW0IwUb1xvixgOIwh5YKa0QQggAIQgLgBduZ+0g5BnIM2hEUG0Vqw8OEUMIiIhIrZsM6SoJqPXcITgJwTlmIU2kEIlUy002qdhIqIS5zbxmYWEOm2upKKWDMAIF9j74jQo3MAv74AVJIVqlBdiHln0BKQVAjG0fHIhwkNDu7AT27AJ7ruq6Cb72rvYOCMW5dvbk4JAgsGgVoYCQCoEVUWAJ7BWh0YSbW3YG4RACCxIBaQrMHAQQCbk9KquVIk2EznuldMvBSG/20K5fG+Vns+F49OHjo91h99XXXiwgTNuGvCJsD7rFoiy9fPDuIVQejdreHW6Ne0/WR5b08nL5whu3bKSPz5a7B7eG1nx0uNQob7xwu1jColh+/OhiMq3vvHD9o6frGwf9Wd794eOpYP0Ln/vsg/cOeyN1+4bNFuzrxjtoAJ9N6PjcbU2W/Rf1i3d2VVWrpg7zy+BlUdZLd3lS6pN1SHpv9JLBbHKi+frX/+EH9UkVF3jry9eNwrpputlWN07qRe6ZJLhOf+Cr4Otub2tLWfaOO2n20bf/6Aff/V12a0ZWUDz66H1vwt7e8PDRRJOtS/jqL/3N+erk4un3f+Zn7k4Xj2sPL7990yQ7jdmdzsuvvpEsTs++/Xv/9Nmjy8vJQveSk7O1XxZ37t5Ks9HX/tZf+/D9s9nxk0cff4gN3bn9ikX7qdfunp+vLpervIZH33083toz1Hdc3vv4/ZPZRMV2f3drkOiPfvT99WLdS/pa9c5Oi2u3bpTLnMUNY+1ClSZqh1CWy1iB99iPVBPkclYO+0mWQAQCRpKBbSQkaby4qKer1STwl7927Q//+4/XPkMlmvj0bK69H3WzxdNjga7zxejgBSz9o7m/DGZWs6AQS4fsIFHXhgNdueOyWTdlZuX1l6/dfDmt4lmNiycPzlnxw8fu3vGkmsOqUBiZqvI/vreaLZfDrv3qp3dTA+U6D8g2VSqK54Wvi0AqygbDbHTNZvPzk8MkjQKX4FVm++1Nc1UXTeXAZDazF7M8dzhMo/5ub3ZRnfzox9d3dw4Xp2iT1bxcUf7KT72y1e3c+/D44cNDW9N2YkcHvZOzRTfRwYfRuDdZFEmvU5Z+PitBCSF8/MP7r751e7UqD88v2rMgi9Pp4Xl3mDUAOkkv50D9/pOHlxrpMz/14u/+u2/OLqZv38z+s//0P/kv/g9/T+mYmYKi73/89Ff+g1+qVh+l6pYvm1VR7l/bu5xceGbvaDKd72yNXVnl5VmcEpjuuuCV+IHo2BiNpAwoBEOmDiDBq6AcSJplESYnx4+C4evXh4tZ9+j0yd6122/uZ99673GjaPv67itvv16s52984e4f/MkH945mn3nzxZ///Nu//7t/3DnYuvXyG/PzwhFmNpvN11lnxAG1ogpkvL1du2WaZOKMMsqLD03opAkiAkqM5Gp/fPqBhtIV0hv1XAMBCTAANFkaAZIhTRgtq4UXCUGiOPa+IdTKquAlTXuCIbhisSrWq8qBXDvYP76Yf/DD9/pbTwfDxAWIokRFxmDUHd0E40d7g1Try9kq9dElnTKKoqRa+uny4s4rr3zxZz737MnDs8fng6zrfOinw8gkDQRAaZq1TgaIKBKEMEmMd9X506d3br/6+lt33/vBe41LdGQNK5ukw1euZ3pyJz24//j46P5T7vW/czn9B//oT/7Zv/h/TtLfXs/WbrU6fDI7z+Jbb37uZ3/2l773rT9asTx49PTF3a04TaLIxKO+bypX1sZahpCXxdbWjqbu/PzSs8sXUwqV0bicLWpXiyuWs2CstZGtAxmFVTm/cf3m+dOFibFpGjLJ9qh/eP9Bs3jc3b+b9Tv5Kk/7iShtjY7J1t4tlo018uzxM80mjvsGuzFR2o+Kau4Aun0TqlLHuFzny1nV6WQEwTar7YOtuarXMzeI0xlepjp2dRJxwqxRmUGvt1qtHaiiLDUlk9OVcssIIO5327OgmyoRn8SGnTQGmqYJHHzttI0zG6EyDBoZOfi6qYI47/M07ioW9syAShGHJoTaGG20Qgux0kkUG3CKwKA1aK78FsiobKJCXTtQTV5pRKOwqetq7RHRGirWZbUql6umajwYiVdllnSBHIeaXUU2ieMECACVV0086JYmxN1+Z9hPk5iCFI2fz5rpZc2NgqqJulBJxQFjE9cBjp4uisqzYLebdnrdpffr0EjXpNvxYs2NUUyMloI4JKW1FiGNRrOygBlFFCdVVbJyyigHQeuBQGyELUIlgZRqGiccCImUJm20oK/qUDeEgoYPdnuI3cnlVCtElS2mvnJgu8nWoJf4oNi3UnARxyxEQEBKkbVoUEajrWW5CtSIEyuiEUhjnJrImixWpOIkshERA1ZLXJ7kaZRGNo1QNSvtNemO6kXj2q0RxXvQpJkUKOwl3cwm6+ZyWa+b2re3ZE0dIMBqXa/RTxeSRDaLI6M9gtOaoG3yJgUUSKFjaDwF4k6sAUnARyS623GBCRQyCnMTPKGKosQ7713N7BWJMbZ2HrSN4i6bmJW2cYIKsyityspqWK3XTbmuGVhAIzR1Yyx1uxkpxe0aG8koYxVpjEGYwBAFQja6E6RopAFAVMojoHiNpLVqXf9oLCKgNaumVuF5uPtG0rORo1wphVrtznOXVqtBQdnoSpCBrpJ0nstGkFoHwCdCJfgJdYpsfFdXIhX8hDhcGcauiuHbf9f6sAChbVZ7jkc2RyP/nvvqCtQ819FAG6q9sYEBXAUoX+0attxHkLDNZxBEwLCRsLQACzcbodLqpDa6n/Y+e4POpBW2b8aOZKP6CbiRygi0d9fta+HmfQkAIgtRazcDah8vz6kdAiISCbUBqlcGQLmy6m1K0TZvf/Orq8GjKwFQi4U244LCVzCmHeDNUwpexVSDYMuPGFtz4Wa0r9xr0jK5K8FWO4jt38KVomqT1NrWxT3POW8/jfbfb3RXsGk6A9CIKNKuThSLqE1w1id0DgA2TccbPdHVfwjtAmKDj0BaucNVRFUQhCs+1NI/FrlqZAYU/OQLtEFFeVmO97dODk8//OAp1ry/3fzqX3/h1bduzs9nyzl1e/Dqa9c/eHSUZXqvS1ItR1s7BE7bbkAuHJfOAxgd9wPyfDXHHJLOQJkxqG2BYu0KJncyedLvb6EzUdLLq+UyX7tQjPfHXPk8z0MI0Mh6tvB1Ddr0R1uKQBlvmYHC9u7OIi8e3X+0Pboe9bvLxTRSaGJiguG4b1dlVcv2rSHUcYfVx/fuDylWVmsklajd7ejRg4lL1J2XX1zN5s8uJuLX6SAtXYNAkaHGi1KURLC/c+vJ4Ts379yui6YW6e8YdKaqm9VkYnQz7I7XS0cA3rnFvEpjtTUcOF+4whXi+8OtlLWnrC6L5bQBFG1NTLgz7sXxaMqD+XQVkU0SAMVogjZ1QNSmt/Tm1gs3sTqb5cXu/pZuSgnl/m6qwLnmoiuDINgIHz27t7N3qxf0mTxVPv/FX/nlylWjvf7k6KLfzc4ul64QC+7k/rETD6QvL/K9/YOL00sEn/W6pOzZs+MoyyqB9+891Z3lalqWZcUG84UHJ2mEFBrb072u8a7UinSaQuOgrq1VEKDgnKJOvr5I0Ij2DfH5/LJxom0yyrS1gJRnHUJCaXC7T73MnK/C6bMHA0KLTZoqET3sb9dNYPCpYiE+PXliFQbQu1vbp5dF0kuv7YybaqUgRJE+WUx/4afvDpI8KF+7qqnywJ354qK9uAw62cXl2Wg0nh6doTamXxiji/UKG57OtXfRa3vxfHWhrH3h7sH7H6xmsxmX3pU+yYYLXq/ylfOold7eylaLTay1CwGQNw3iiKRIGInQqtaYBQAsQYJIkADCRIFItdNIGwejFCptQIDZt1dd55ogjIghcAsftNIiLVoSEGlca4YC4eC830SO4eacRlJaIYhDQK0soiAhC7jgEBURALAPIhJca11qu7euVIzGRB4aZNRKE2HgoDd1YAqJ2sx8FmiCtJYnCt5obZVq1Ucsoki1l0xCim0iLCE4qzS3MQNIEhpEZW17N0jAIMJl07CEIOJ80IoICBgAUaESEmHw0BpwtVJaJCilpVW6AiByy6xa0KMICcl735YOECnPQSvdyiaREBCCsBJwIRCTUpraICQEAArsFWkEBcKtooeBlULhjViqnRkMGYWqvVgGCJ4DAQEHrajxtSbdKrM8ICBqUiAtlmG88q0HYc8swoEFUJjBc7hCh0xErbdOABnAOcfe+8AsjAjOewksDAECA/vgXHC+8QCgNQEjERAqG5lW4xaYXQioEIMzRjNIkKCQRABauTkhMhCBIhKBwJ4FvA8s0vrfCJGFWxkskeIrsAkAb3/t9mreiVIzXgwSq3xNk4dHiwC//Ou/Zqpw77sff/TgTIuKs5idn53ml5NlnCjlpdvvdDNdppJud3dqmH/vnXk+o9xpDf2ko+Km9orndb4oHj067Pa761lIkzTrjZ/Nin/xh49f6EfjSIY92RllszyczItBJzo6kfNT71EuluWyOL/12W7PFPPVRdaroAOhqlKvLk4XN3eu/dLPfumdf/Fv3vn9j7sLSWx6/95FN/7o85/vG8xCXXnyg146XxV18J2st3fzphA8eXJoPWddVeWPv//d35mXx3FmEfA7f/C9pigWy6p2kJAe9XrdrdHZyY9Ozo7UxeOyCONxJyz93vXrr73ypftPQ3H6YZ90NEg+fPfD/vWdN155qZ47RZ2tF15/6ZWXjE4fv3/8+N17/U76hZ/7mY7d+uDb75pudDJVjx8sxM0Sw6998RfJJPlquZw8WKxn3pc7mZ0cTVYVpzGh9DpRtMqXr7+8RUGWrJcrN58UfYDY++7Ap1HVt/H6uIHQqRroWNvZUlAtrTXXdmLM1MVpA4wkPiN2gvt9/eIo+WAlnQRu7ycdS1I01671JvPlen74w//xt7766/9rsN2P750UDEAYCbzQi7/w0nh30Jmc5SXBn3543mgLBv7fv3Nf/Tu+sRu/+eY2YeZdYUjtdWjKcrkohBkNpZkKID9+tlCm+eIr4441vgmkUJHOrBaRpDsICpb5pUYYZNlyNYkjNUz6uXdRHBXlmj0OBt2VA22g10vswGyl1lDYUlU3cALU7/QWRY61lE1Tnl/UddkNji6fRel2p98VgxQrn4eAATjEicl9UCC4gOBhvqhfvbWrdrZcdbIuNtsGL7980187mJflB/eOp1U5rZu6LosCDbg/fef+O48njbZ/9e/85b2b47/+qz/zz3/nO5VjG5sYsN/rJlln7cP84jJfrwnQKF1XSxXBwU4vsF/XBQJoo9NO1u8PHjx62u91ojg6P32cJZ0ojh2i5xCqar1uGqO0VquiWi+LMtRVVfWzziJpLk/P00SnJi7Wvru9P5u7DpEqwvR8aWzqqff0NN/aO4g73fVqqUm3LZHdNEOlfVVIaEJTkNFxnAHSqp4qpyNrCAKR5sABApvIi2BwzHrVlBRppSLny6YukiwTQd8wQA3Y9HrdIL7MiyiNFvnF1uA6OXF1k3R6eZHrdNTV6enRA6/U6eX848f3TaLGO/2qzLXV3V5WLFwvTbNET4+Ptnd6yc6420SRSVfrfJEX/bS7M94aD/RiMXcVx1HHxNT40IRg0thHdrVaWERClbs6zpJqlS8XKxOZuJuM98c/Pnr//bPgXfnZT70VETSKgcv90Wh9ubp4ch4n1S/+lVd+/4/ff/sLX90Zlf/Jf/jVx/eODj9+f7WILlbVeQ7/9He/8S++/vWeIWMVNc5A0emNBJPLJW/1+8hlYA/oer2RwkhHEncTVFoZioYdK9Vwe1gePw1NY0h30xiUnJ2dDnoD8RyUoIpsbMdZupytq/UanTt7+IGOdm2SatAU2DXFxdlkPTtygt3xKMusq5UnSbK019sOEyjr1bps0t74/QePX7q2v1pPDx8/HfQP4sSkNp0/Prv48MGNO9dn80Z34p7qK6siGy/XeeODMbIsKiLl1uuTJ08g01r7OzcPwnJeV3l7FjSVU4qavPYutHbrwCGIJzHAZETyetlwCMGbSBtjmXuzVQWeSUPaSZSiJIqDhSYEAO4lRgP6UCpCZAUK6qYSCKAsEzV1LQ1VRVlULD6kERjFLF4hOd+EnOuGJ/OidNKweIGBkUh8qk0QjpJYFHrh2BhCwq7SA5tkUdrtadLgQ5mX84tVPm18bn1F+UXhhn4fo9ikz57MVxUvJuuqbjiEg2t6sLUdwgIjgESZVKhyFGvTM5RhQqlrRBljEEF8nhdsrCZoOCBBrGNShrgkMh50XRdBAMl48ZWvhRulI0ISHxogz1iWgRSzYp+vram2e7Yqc2WiZCcOrZ1ttSCBdVmXVeMEkbSN2JAmwUgTO1eXpVEqsxYZHIfgnbXWGt3rp91e7JvGs2i0VRMWs7qpqwR0cHWkbT/KSh/EM5UlKp+wE9EAOgRuALPBsGNSaiQ11kU2oHY+NM61WvXaBx+c1jqIT5IkimIg1EqhVszEQUhpq1GCD85prTtxggEch7STeJTCsa8ZEZrgQwhCWLqaAEJwwuxd7UId6QwYq6pEMpWrtDZAHHztgqxXZVPXQGIUaQQETOM4TlSsyeqorJsAQgIK0VgLgOyVc8E7zzoo9EppY0JEWhtTeu9FFCrnvCKVWMvMDAEYAkN5NRfAFSGiK//ZFVB4DhkI8SeMVYACjPQ8zQYB+ROjF+KVgwif+4SuEJHghshsfr+xm0mLMH4iWAhAWNrmk9atAG0ERXtoBBtmcyVQQrzS9cBzlAEoyCgkKM8p08Z0hVdNawCwiSig9viA+OqOFxERFUB4rm5pjxHlOScB3FS3twgIEdSVtAYZAoCHqxGSDZVpjVybhO+Nmorapcbm/W1ce3IVyozQwqrnmUXyPHXoeWg0bSRNP6HJAbgKm4KrYcKrUXhOXjaH06puWBAJJWwG6YqEPZd7bT7qduFxBY6IEEGQAIVFeFODt9HwPIdX7ZKNQQBb+0lb0yyokIAFha78eC2BwysQxNAuNloZmAgIBtm0s7X1Z63bgKX1mgVAAGRpnRLIQMDIV5VCV9+v9sv4iR7rJ1DRclU1RfPu+w+A6xvXop/7yo3rt3fympPxdrkIqZWDrnl/vnrwMd384ouxEV/lqzq8+PJ4uloWjaAykTYi1fnFuY63dHBc1bVz3Z5VQdhzsLbbGRPa2i3jzq3hjS/Xywfe92oXF9P34yzx5WprMFxXFVBtKTLQ1PVaxHfSxDE0Xqts3BvQdHr86v4L5IJvVGBfVXWKSgER1AhlmpnVxWz39rXB1iDPl1lmHz39QKdhL+vOFnmIjyNAo3Xn2vhsMhn0B/ls1cmSAHYxX5JOp2W9tXc7ryx4geCXs7O98bU6BCOkGgbfIBsiVtb1xv3lxazOISAGQJULA5kkSuKMdGTigRKpFhduOqu9oA/D8TU3f6qhtKpUkSrK6WKVx1FP6W3tV9e3tlezo4k7dU2o5mtQRiexxUoF9fLN/R99eGSHfSmX46F56+U3f/PrX9/tHDQ5iyEU3xtE/WFyfFSSUt2hns+nSZIqVj1rHn90f29nd7HIbSRRl1GlbtpcrmqldDHLi7nnIJqIwKaJ8eXaQNOPYRBR7lxkUq3VolyzYK/XXzZhmV+OtGz1o/WimBd5xTmS5LXsdIkUT6YT1yhE6HZIiUg9R8p3Rz1a17afZl28ODmO0sHl4nRnNPK+rJaVzRKFgUDy5aTSoZdtzcsCRDWFZzarYI/P51/7PHGziNNkPNh/8OR4POxPl41NbOPm62lJzi3zhfHBl2HQS/OmOLmEbdOLjTTl05cOvrQ8UUXZPHk8I+F+R6Wpfef06al0SdHueDCfT0NTumIx7HXas6BuHJFCAEVIREZpITZKE4ihthgeBJDZB2aFKMLBA3MAhQjQRgwDOIWEIt6F0LaXM18pRdk1jShGamPoN7sEDsQ57+pGBEGhCCtEa4xCFWkTKYOI7D21vbOATmolyNDWWoW25d4zK0T2TADUenxRmAMKqHaSEdRErQ4IAESQvW8CM6H33lWORawmHyQgtpCopUlKKauUCFmlEcQjVL5x7IHIh6C10UbHNrLaEKAP4lyD7WYKg2wuuaBJMUO7n1IHJwCIxIABgBRtoLcwiDBwS+k2CiNmIiQi2aQkBkMKAIL4thi+fTeeRQGQwtDazQSUsnilfm01XhwECRRSAA7MzNw6swgJZFMuxhwY5GpeApG2YLLNNMIgAkRMm8t5eykNHBAoBNcaw5Gl5YfeBQbf3lBoBFDUVlAKB3a+lf40IXAIjIgizoXAXAdX+1BWjpkRKXDQRDYyGrQmTTr27FChIAYkx4zMhij4K8MyUODQeteI2rlPiCgEFkBUyiqlEH3wCKgUhcBK4dX+AwBA3auifrJYNb0b+zdeffHht492q0rOpu/9+Z8uzvL1yWrY711O68XFylNQERWaxRjvMT/P18huXaWp2MSMtnpUFimqUac7z5enk8m1O7e85MeHl7N1FR5M7nl39/ZW6dRqqZWn2aSpa1uvy5svdEe9KIvKvYPOh9R85ODheZnq/vffqaf18rNfSWWgPbr+VvrxB4vmNPr8YPev393/6Ld/c3BZjC/L6azpbkW3XrkRd7NiXg0GPuuaCJG5TAxSpKxhxno5WSckUUrLy8MP3/nzyewiV/zwo9PMxdEwWxbV6elif79jiMZ742HcOXzy4fWDg9z3puXFX/nqr9z/9o+apf/eD7776ud+TsuNuGt6u68uc/JgHt9zB9eu/ep//HeaReE9/9Hv/VHk4e0vfra3t00mPp/Wu29+ani9/84ff2N8Zz/C8cnRSbxz9+Jw8p1vf38wam69fP3jB4+ms2pyOtvd7o73dvZvbWvOqzLfH/Z+9M4970UJdBOl1rPR9ejn/vbPqq3Ezuf5H/343rfmS47f+PnPde8Olh9+a/1kYqgRgiQSp9gYGNj42+8WN9+ptrW5oZutRJKEjVW1w4vFShz7pnz0zW/98l/9VcySjycnHnSHzKu7wzd6ya6K/bTIPPcG6Us7/Q9PyuAJyS6mzWrKH96fKPYvvZx95a99iUL98puf+a/+z/9gMikb5/v9ZP/23uzB2eNZqL93+KWX925f28qr2jeBPXthXk/SxG4nLnBYFHknIfC+cWvnGhFvRcgYE0ehyeuKVjV1d7vocsvu2t74tU+/dXL4JIvDzrW9p8+mq8W0rnlS8uDWjZ3hjszw9MEJF7ieVhzIRnq5quNIhbz0jYsNcKwv6urlrbTcuTk+uPvpv+z+X3/3vwWAJMp0v7vbT7ev7Rkrv/Eb3zo8WQ5u7L3+ymuNyGWOX/yZLwx3bn7vL9791Mt3/903vl+z06TTJF6sysFglGZpw42BeDotX9w7YFiX60L1de28UUYpql0ZghMKMen5ZKKaTifpsAu11GiMjWIDtMxLoKiYLzk03VG/uphCI1liP/v2ncN753XwxXIRR+bidPJrX/5sN6zvffx4G5uf+9W3V9HW4mjenefX98crkP6wXxclYjAWV9Uy1ipOuwC7oVoURZHGKlKxIFqTmtjWZR2C62eJb1ZxQmmWXk5XkTfsyXkPAUklggZQgFkZwxKyJJ0tFkZpEd7ZvWHVWDWuDL4pXdsIqcSOxwMx5oMHP7595+ZqsgjeGWPq0tdV0BSnw/7rn3rt/MlpOV0+WUhnnJbLU6L0pf29o/NnLsSs2UQYx1HdcLo1UEEuT06rfJ0kymRJ8E3S7VSVq9dFucwVUaSxWJXnx7MP7937+a9+xc0vj++9201MiQYt/sXjD08Oj4tFVTYcEwy6ndnZ0WhH/cGf/kVWBz9pXrqxm/10tzL2t//wQ6dosVxPLso7N4Y2szoVnaisMwjrdbVaOld3R0MUWuXrrBvbTqa0DtxoTNaX1cH1l01iAMphb3t2eba9M1qDiJIkVdEWpNJhr2IbdzuyuDy/dmd/sVovL551RvtZv1NWRbWYRHHXJ10Lg+1rL0j9DMX1s7isq8v5syCumxmro2Dc137hV37w7W93OoPt/eu7128W9Wq0r0/mC7cutDLb127Op5cBgCTyAXzg9Tof9DMG3BqPP3hwGOqyN4y3x8N+r7csc1dfLZJRgIUJOTSEElnFAlZZ0cBCrvGKRZhtnAJJzaKieKAiV7vaBxSIdUzQiCL2tVVaQZAQUpshgBNi35BWhKgiVTr0jMt1tbhcN56TiIiRYrCxJebKBxEKngE0ISktkOpkECnEKlSKSACRKHBw4iCz2ItCpJI4i0Ar17h1yezr9WI9dcW0rFdSN65YunKBOiqX83KxqsV7GylStFwVT549Soexk6ZZ103prTEVhW5EisAgGYWAQggKxAffoMlDVWPQGIIPiSZC8L7xqD0LoAqhTQJRirDxHhp2PnDDAmgJu3GiEIqcOaZZUfT7iatqAWeNihRqLfO5//De2fmyrGomrRRpo1VijbFKAbCi3DVDmw37/aKsQ2RsFEVpFFwzn6wEoCpqwJqUagDZBwDvxPcUGV8CEXsxHDQhSsjSOBSlEtRxmhmTWVus10pCN4uYwDWuCsZz8EXN3iN7o3RsKUkskgEQrWMCYkKHvt3TA8EkMlqhsNfatmkABgnEBwzBB2ZxTcOMwBBpbWJV5jUj53mpMssay4JDcIG0Z5fXVWIUsniFoIlLJqMliLW200lNRNbqcpU7QURiACRCEmiTiAOKcoQhsJAoQUKlRERZq5VWAjpB9MFqy75erwt2EkQcP+dE9Mmy/nn4MPwEGEAB4Stg1Co7PsFBmxU3brJyrhKbn6cPPX+mtmIEEEA2oUWw2fxr+Uu7r4kILPgJZGg1KrjpecFPHgfY5u+IUKtBEZSrSCACEtnIazYMCAARqRXXcPt+BYRZWpeYUBvM1PYqtysDJIV41V62oTBXQIYQhVqh1NVBtRzpuciK4KpQjJHaupgrNNPSJmpVLRIIkNqtZRRA3vwNEUGYkVS7dw8bDwFfUSLe0K+fVEptsM+GXF1JiK4sfG0gdOsExJ+UX20WTQSbiCR4vkbY0LXNsyLgJtcJGADbkh1Epo3VZBMM+pMUT64cghCYEBEYN9gMYOMpo9agK4AkbRAKbryEVyyS25wRae0um2ofgbYhp/0+hPbVBJiBRZhRRAIKBOF2OJ/DTHre5ScCov49VHQ+mU/PS9T+2m316//BGxDCYmWgTtOUHDOhs13987/8+d//ve//JZugcv2trYvT8vRkXvoq7feqEmbF2cHutoTBoHd3MT0G7WaTExPNO7H29aJ0x9nWOIsU1rUrjwW8b0rwUbGqKB7lxer27Zcvz56ZNIx3o+VkrfysNzqoGyBl5vOyBBoOewBZWRQPn90b9Ybd3sAFWV1OGteUxcJo8tVssaowxNOy6PfGrogjtXPjoJfs+MXxhxqq/HyadNNKbF2wJluXVfA+LwovrqN1VTTL4LZ6sSEFEhKbxDtpIUb3u7EyPcvrfJrPa605NNXFeVUsiuFYv/zW3Vl1GQA7gxSBxa21VwgpqJ5HJuNOJw/2hhhnM+RSpFrMFiq66PfiUFZx1L9z+8bl4aMffe+3oQ427hEk3d71Re2+/e6HN0cdB1s//GhVVKp6diaZ4eni63/46Ov/9v7/8X//n1+cPuiNDXtXc4EgtqNLgVmeJ91uvz84fHBubTra7jQU98adw+OLYdSbLcrZYqkjOxwnHBqAkGa2XDrnJLZoI51kqnJ5URMDOudt1E9IamZCpUKdpkP2zXJZrta8bKrEWE1gLbngIIoGuzd0ZKVZTxcTk8TLHDRgFIdsqzuZrqSyJhtf5MtRp8tQewmkDdm0DoxKqdjWjTdSpZmOlpEZbE9n1fRi/gtfeCFNy7om9tF6XqtotKiq8e4AgS8XeVlUnuHoyfm10U5Z62rm1lVZliqi3BfsVfXt9/4s7vaVa+pSx5ECcoab7a2sqL0oO6vywpV1WfmKk9huqDAHFtBGaaMQAIEjrQFEk0ZAAlQIzJvAYUHcWK1aoYhAEGBpNOlIW5CW45IPzgcGgNBiG2EDkUYtzN75dhlfBx9AvHcIqEgholKqnVYJRDjAxufLEsCBICoGCCwt6gkgbQE8iigUTTawC94LgARuiYEL3mjDm9sFYJYg4r33ICzkQnAcAKB0AQi8EhZJotiQQWQNyJ6V1pu5UhEEIEBQiKQVaaOMVlprDYLiG1IKOHjnmVmj0mg4eCcsDE1gAQGlQwghsAdPwVljCYAAlVJtKB2iQiTgICJEpJUSZmbxzEQUmBFAKU3atNIgQUBShAhIzMISCBVAQFAi7Lxvd4SU1sLA4onIIwVgQtTKqDY9KgioK681gdXGh+B98CF45Dbqu53dPQcRb8i0NyUMHHxDSIjahZpaTQ+z885o3aqRgwTyJAJKaxAkpCDsna+9cz4wgkCLikSQPQpZhYwMSFoBEaMKiBycAmRhhaCNEe8bQgHxAJooMtFGKE2aEBUpjRgCCyIzA4BSupWRizCSDhxak7h3DpHUVYjj08P53k7X2Ox8FcxyjT26/eZwsb5QwNdu7UzQIhIbWh7nAcyqqVfOYemUo4z0rVfufvbNlx++/65VajZZoSal4fxiFnXi3cG4nlWjXrbK1r6s14GTfvZwXadRHBQsyyZ04umzMg4k+/qVcXd/KFr83RfMzWu9P3938uB8nUTpyVP/8F3m2OsxnZ4WyyMLpxQ+Pp9X77jj4+3d+D/7lZvp+Ob//R9+3dZ4+Pj0Rn+n1Os4rnM3R1SIyghNJ09BmjQeQ3BP3/3R0emH3/nut5qmTkdpfyv55a/90nvf+sHRcjY86BVBsfcXq7JYr8XnbpZ2kv5Lb33+g3dOq0lJ1pXIl0+fcYnPDtf0yA3Hr1qlOond2rp+9riZnz+Ynz4lU+1t383SPQ7Z+ckCiX1+9qOPv7XTdXVdP/jwYe/G7jd/8BdvvvH61/7G5w8f/NHpvdO9fVxNefjanbSjb965dn46m56fL1f52bOZd4g+4LIQU3/mC/23f/kzPNg+na5vJvHO3X5zWJ2e2Zt3b/7wo28uj2Z7410Qt16suAEH3iq48fKL/snZpKS9EW0NdYyynOZJP4HQeJc+ezC1WQ/t6o/+4f/jWVXXhT/oRZ+5Nf78C7v5xYxdPZ02W4Phg2f5cubGYwuCiJq6qDoGtSqq5vEUfvDf/snBTvLlSfT6W2/94R9+02jlPE4uF2lslMTHy/oHT+cNyvWdzCp0lUME4Prs5IyxBjDl2iXdOI5UQFFKN871+oMQmco7mxoH0SpvdNWMUV568cVmYJ89m/c6gyqDuq7rUHXG8Ulece5vUXTj+q1isoqsTlIdm6gqZdQZXFzOsq0MfHClB2FA9aOz5a+/8dWbn/vV7//we9/67X/WngVKx5cX65FGHfjy8cnXPvPiVz5zR/r9l159pV4uH977qKPDnWG6+uhysNMdJ7YI6qKoktgu16UCtTif7O1103Hv7/xv/7v/6n/5HycxmyzLV3VRVqhp3NnJq1wEKfdxFC/XMzZjxx5AxHujkLlpmqI/yEoxFrLpPI9iG3fSTpZczi6LZWlSy15uXRvmi2aWV//kt/9gGPOTR2ccqPnGe1/8q38pMsSD9GJW2A5J4m2ky6ZuQqiaQDo2JlE21uhEsRBFnazxPuiY4i3nForKVbHQUkZR1IQ6TqLg2SM0nmNtNOoAqBUp6/IyT9KsDEXgIjgfkLfSa1WFVdN00+7p8Unhm8I5Y01dNcvpXHHCNcQmzReVtpT10r2D7VUOSRxfXhw7lXO9UrHf2R1wlU7Pi8vJvXy+erouRwfjYPywP7o4ferWcbWYdQZdnRAvTyIT5/VadUX5GtGmPbu107n343tGx0ZFd9/4UhPfiPd2bx689M6ffh3iTCf+9HwhUTJ+8WCY8ZOHH/7NX/vKH/z54W/9D/9dbzfdzcbq2stYr7NKKYJf+PTt6WL1w2eOGEJZz07de9/5/ksvvxYPy6bOhSVJB1E0rJolkFI2jhJVVYVvXJQk0tvOElNwuZgedqL06NkxQagouVBupHfK1Trp9pt5WRfrUPnE9uerC9Lw5MG71wkqnw3H/azfvXb7xVURikqt8lIVCB66HXX89OmNF94c7PXPHn9crAqM/BP/Y7AlhyTpjZ1mVYR6NvWY19hMLs8xzSCO17OFrcEQeud3ru25yoemPj99eu3mAVvfGcUuVOvpbDDoZdlmZaUQ2t4JMhESNN6pyCBC7nxZIQhoYyJrTNx1hJ457aIryjBboKK8WA+TkbWmDk0/6SoUhQxoQgAWzl0NCrtR10S29rXWkbg6NLUCp0niKGXFPrARbDwjKgAwWg0GUVF6HcdeSQchRiITR8Z4H8iEKNOQKhol0LWNYudcVdaGVLH2F+fL40fLi6nLF86gdYKuDK5eJt24CQFNUAbSrlVa1c7lJS2bFWhOky7YJEQm6USELgZ0ZW4p9dBULEqhNVHpcqbIk3FstEBdO0KIrK7rygCKD1VVCwkBOld5H4RNYrMy1N1UpxYPxsO6qVerfLqqZjXn0GQ6GPBWJcIwnxePjqtHl00ZwANFiqLIGKtGw77SLI4lhODZVQ4DWmUpzWoXFrm44NfzRWQse0/ibWpsFhmN4gSEykZsJaEpEhWJNMESAOdlnpoouICAifO+WvuqYQ4hBKsSJCEFrtubY66aJkaTpTaNEqUiRkKCBkSBDyCMoIhc8Aiklap9oY3VyjpfNS4ASN04Yw1jqOqqjb7RUdIGg9aN864OLKv10jlLqLIsIQP5uqydBKubygURJEriWEhlqVZEEQmIlGXd+IBKW1KRNcZgqCvBpqjzLOrYSHMbD0o6wogFImsgTWvnoPGkCImARQS01o5dsarKqrliC5+4uPD5Ov/5zz6RCMFmfX+lXtmoiGAT1oNXrWJXSTq4IQ0tINjQqDZ55yp0hlB+IsmnJQtI1Mo/cKMG2chMWjXRVdzQBjXAT2x8tj/eoJiNM2rjUkME5M3vN2Y35iu3Gj0X47RhqVfRSRtFDm2SkaAN097IiPiKOTwXGQm0frn2FX0btYqbqq2rUd34rgBRFGLrSVMiIESw4W9X1igQUQQSgJBAKRG+SjJqN+KvxrhFMleip0/wT/uHn0iL2gCllhMhAtLmI7ziO1ehPlfsSQRAaEOVpM1narFae6wtWQNmaaOGoHXrASDAVdA5g4QWLgJuIM9GQ9aGjtNG3gOM0KqNYGNboLZlh9snbLUHrSohMIsAQ6s2ak1xIq0TDYFbDRbKlZKrbUrefNU3OjcW/qR27ydQ0dP7jxKyL91WX/7ygdE12rgTD5XEaazXkC+XjQt+URWVrz5+cH59fzBdT4+PLrvdzue/+Kl8UWtlh9vj8XDv4ZPzy8uagul0Bnfu9Ov10XrtrcFMZcuLwg8E2Xm/6lE/7o5cIZWrdHpj+2B7cfnM1ZFz1Wq5Ho+vW6Fmeem9T7uRpPXQiJfj3b0R+D2RSouuGkabKUxcmXc63a39wbPj8053D6qol8DR0wtjUpBZ2cxG21tzCaPxloeqNxhyFX9wdP7SC3voZ9Th1XKZpYOIdOkoSfpWibI67QxISPmQgXZiF+uZ1amOBjZGbaLjpxezi3W5VI9P8iWf3XxBR2pZFGuttYHMqoSNDhTZ/lZ3oDoLXsxm8/kTo0LwTeObgbHz84lN06Y4fvp4Wuf5aCtqCiiWZazJRB1i3L21n8S9y4eHSTLa3Ro/eXLYHyWhXv/Lf/b1va2tdXnaGVs7INvgsw/OUff39veXy1WxFGKELWWSFMR0O+mz80Wa8XCYFeswvWyMTWOrmkXDFUvRkA7WekyjOPNEnjT0swiAFWGiTLFaelZR1rO6QdGgfO3L0lsvNOjvDuJrs8lhJ7ORniaK0sFgXkGE/SZNhqOu99IUZTVde4Mc9+sQUMn+zigWEq77g8xSdLkqrLbDwfV1mS/qNTdLFgc1+FpXpTk7O/ny3/zqYnGi0kxCnC/BCbciW98snFsNBp2nR1NjbWETiTIPDYS6l5KWxjELwuW86PZ6ZRBNcfAwXc62t6OD/fTwpJTYTPIcA4Cy2cAsy3pzWSYipaiVDDI/t70ysMK2elxC8IGDY/AiqIBBtWdV8EEQSGnwTrxHIhFxws47z+JCAADRGgHZuSaEzc4ASAje+4Y3FyMwSimi2BhNSiOKgPdM1HqB2+BAIkXM3HjPLALiAitFCBu7MgATISp0jXeByQejlSYVWNpZ1ktwHJrGM3NAYAnBuxZ3E4LWOtJGk460Nlpp1ADMgTWiNcoL11UdfGgptzbGWquUFkbHrcCJPYfGOxeCBCZiCQEAPQcf2osPIylrDSJ57wEoBGYBrYhYFKn2kocAClEIjVaKCAi9BxIGaadHRCQBaD14pNCQ+omJxrZXfBYObT45s1J05ThuSwBEKw3CRmuFKCBKERCKoGrdYqgUQUBoTX+KVAtZSNrtmdZZJqGdEVHXwYn3CALtfRIHVFS6hvCqSFQphUp8LSAheBdCYPbeM4sLvGmqwLa8jAwhKiGto8gEAUMkwYuAtKFJElwQBAwuiMRGkQiDcpqUMLd1C8ABSLdzPyEBQGg7SyUEDi3KZJC2gZQUtC1vADDa3Yp2zPRyFRJwGczdSVGdX399AHVGOhZMy8t12omccmfzZj2TyESuZifBE3zvyb2zYvbZu6+8dvv62fmj2Wwy6HfOJ7NQiUUsghRFJd73IuW9n66K4CG2waAgQkV6JcqXIX9Qzdbh59/oGcmlKsa75mtfHN5+Wv/r750v8+hH5erFTyXjXnx+era98m9e2/vSwev3v/eDm2NDhi8ePnxxGP2X/8VfO1/ze+/8iKrLwf51hUXwYT7Vo+3+fDF9+uCjbDq/89prxSqfXJ4HaEbXBocPDqvFctxJRtnw+vVbJ9MpGPXgcJEk6mR69sLu1ld/+Svvfufe3rWbw96wWLyHqU8Hphd1mJuqsKhGNt5fNMs7r966Ee/sjjs//uGPTy+OOVz++v/0f/Xog5mP08Bw46X9cr36i298I5+fJdvDj97/KOpGs5NTnZ49+/Hx0b2PBnGdn8/ypiwLNqbbSZLDH793cb5CwUhFy9K7uukrv9XBa3vRp3/xNXNjF7J03PfJ5eL4vael12qg73/87W5KPu4t10wsRkUs+YvXh27ZPHw2O1s3j06W10aq1ze+DriAjqG113ktTkdJlgaRBw+fHpXhlWvDn39p3FlBOZnXTW1jevHOYLB3c3n4dNiZr60US3LMmtArrgPbSCGpvu7kBfybf/2tqKPHaWwjuz3sXZxfrtd1d9RL+r2JyAenax3R3jiqglsui9RGFEnTsHcBFVVVFdgkCkDrLI0943zqMebS8IP16Zf/yhevGc8/WrzQu07Xbj3+8YWh9XBLpqtlua46vdhyXFysJ49O7wU1MqNBJ1ou16gNRexV2ukElsKHOrHoFdWpqS/1W299dbAVJV/+4r3f+f+0Z0G21e3vjMLibHZ+1JQz5ymvqne/9/0ffPPPOfiyhsXy8u//X//rT712+6KO48xmoYpGIwlUrtaDKDx+8LibferPP7q3s6W76da1g3CeN8jg2VWlTzqrfm9QLnNNzFy8dvfGfMXWdKIOam0uL6e1r0Luk0RHinUn0tLPm1qgLooyjRNG/e57727tDPr9zhsvbP3Fdx8eni8+LEsbGx3rP/jhxz/7q1/bPejnCXEoLViuqyiN10WRJoNOnJmA4OoQgtHa6oQwrlxhLft8EtbL7nCQZHsXJ/Wg2ycyy8VMQDWu4tAAAypEpLIoMEpYYNAfiWgMEhuD2p7nRaITSovL1ToddDtlfHk00VlsIpWHUkdqa293vD26ODvLVJL2M9eIlnTcT165ef348JEB//JPffl/+Mf/eLh14POLNNWL6eXbn3n9o3ce2Bqnx/OHFz/wLgSbKWurk8teaiaHZwf7OwD85NHD/KLs93fn68u9W/vpMNPGdwf6059+4/17j/ISvnf+J9m409vaasoqK/na7VdW04uDjr4/CQ/mH+9l49Xs9Mt/69fv/f6/tJHvbvXGBzsfPjm68+LuK+bgfJGPt0dKy61kRJi98733X3z1VuMmL7/8qeV0Fbpx2knyMjSV5qosl2V3NLKJrqtVUbnh+Loeph/84IcHN/aSTpbZtHdwcPzksOd8NN5uQvCuVpEdbd+s7jdCxYsv3rxx++7xrEabxEN9dDYnpNVs1unvbW2NF+uLxXoek50en1TFOTInJlrntahye393vWoOD09f6l0/OLj5+L0Pa/aTw2erywuzd+ulV17sg4kMaE1hpqrGG22qvBj0VVMtziZHN1//Sr/bxKAvnh1fTjYNaOw5+NLGsbJGSKeJ0uKIQ6jLZQ0eCUXpODHKDnuqAAoipecVIgAnaQJGCTaDTlbXlVIcmbhpoKkdAjOEWojrMoAErp3zVVUiuDgCqGphAmMpzargWMSDduyttRgkJWsVsohmVIRKGU0KtVIRxJ2oUJ5IYqOIvOKgAMq1uzhaffjhZH5ReibvkSn4EGyk48xGHaOVaVxNmkgTEWokZ9kY1MqQBh2TQoq00hSUYGS6KBGjA9UEwkq84lqJaPZF7UClgiiBXcOGiFyoG9+4UAQ36GRJlK6bXBx6lii241EvtuQFO72kN+gcXi5Wq/WD4+kogb2eUT21LPzJs9nRpFnUnghRASME4Y4yyE3PRlkSe6B8kYcQatdoA1FiNencFetVCQEqV3HwSAaN8ZXPy5JEjKGIWSH1Y6tFQYCidsF5At+PNXBADi7U7NW6rFmxc46awlLEIFESDbMhhhAzZlGEAqSovWMWEgZ2zlltFRkMgEAEmOhMiQIxCI4FXGAiVbuirCpBcM6nUZLorGkKYY0qKvKqahyBj62LrJ2vc5bAwTOSJpUYGyVJEnXqqiAiZLYaGXyeO/FIhME3JknjyCqDHqlpWJMNAMwBhJzjYCGKoziKt/r9rNPhJp+eneV10/iAygahxktR+XVRF1frAoBP5Dq0MZ49Vwt9ojGCq3as56jik9ibjaPsyuR/hZYQsA2CaD0HmxxhBBCmDV1puUQLD0gAQWgjWGLZuKPkilCIbADHBoQIg1w50zbKn7a4S64qwRBbYxOCCG1ysdvAY9gQhisEJvBJR1nb07IZFblSIz1X3CDhVXVZK9sHwZafbGgEAAjTlagKqaXIAkhXvWoCbfoyMhFJK3lRwBIQ253yK4kTAgkGEULkq5ygVgbWZjxvqNxz1PWJvunqrbUEqcVVRG0qEwDw1TAgbZrRpA2EYhZgRBDhTS0bbARHz/nO5rCA21FtFx0EuPlA2s+O2+ORtnZ7E4EEgtLGQnySZiUCvJEstV8PYuF2dbP52kFrE+RWVdTW8whsXqxNzG0fCwjc0isEaSHV1bcMNoq3T76iV27Ef9+Alhj/wjXzH/3tu6uyFtRAidIJepUXhQAkSaQj7Kfmg/eeEuokSfKmXleuLC8/+vDDTtrL4p7UWBXVaNBHbbqd4eT4ERgEKXQU3br9wsXRfSclmYUPVV40aawqp4OPueTLma+dNiSdwWi1bCJzPe6+vJw8bdjbCI6nT1BQB+DGuWq2Pep1+zeqclJV9XpaK1FpNGr85eJiOR50J+dzV/Nufyvrpxj3+rGlk3mxmCig88WlTWGoOk9Ojz64f9Lr4PYAJ5eXQaCuV6FgFrV7sAVKkdVWG2kEXJ2myhg3iKygcywU/DovmqqIDaFhm1Ls8svHy+x66q2qm2p3tJUlg7yGQEXSD8WiUAhVLWfr2Wir30mi3e3+xdnT8d7YuwrYV/Pl/Pys+8Kd9Xo9nZ9qGhXaB0q2+1ur+TLPCzAIoCxjXLtVsc4UARcVr73DfOoi1yTxUFtrItXvDuaXDlzliurgoB88ilGVToT8el66Ra0DLs8Xw1sDV5XDfp8GJnfTpK8HW+lqta4Wdaipl3WLMkeRZblC269IB5HEGnarxhV71/cW88p5F4BXsK5Mw1JtpVgsLs1ajbu7cZbikLxvhGC14DjTndFOebasymUIjgCMNper1ZCaCLkqqjjtTi+PKY7iRDNDXVRlXvsK58v1rRvjk8nJcrZM+oZw3Um3hGIWYNQm7UBTFFwNsqh2Zl3mvQ41q1MKoowmo4kxiWMhWE1zV/HO9pgEummPmZVWZTGxSMNuUq6xaWpjyMsmvk6RasOB2Xlso/8FjNa0oczKe98Ws6MwATgXGvGIwiEgoyBT8IIIRBCovW6IyAbtCzjvhVkRCYA1hmSDio3WgNjeu7dZQgqpXeS7EEBEo2mzdbTSzByYgYA3EB0IsG0JI8QQvGcGEOd8YHbeaWWkCaxQ2DMAKGyCFwEfggCgUuC9AoisjaxRCEiYWkuICoBDEIU+NCGwADO61nVrlCKllFZIqIkIIbRABIFFOAQUNKgEiUFq9s4HTQCARimlrVKm9aYxqsDMKISolQJp60Lbty4IQrSpvWwjtz1ICF4RmbYLVEShIkIA0W0YELNspud2n0AE0QcWAETFwqpNuQ6hnbWVItxM8MKCyEAIDIyg2gg4ljY1fBO8JyKefUsQkQgFmAOwuOAdexeCIrIbeQ4SKwEIobFKAwCERimtCF3TtBpQkWCNZgDtAxGKkKCQIgkCErRR7WYPkArekzGA1DQeRIQRiVwIqJRzTgIprUIjWjMBKBBEYcDG+3Z3h0grQGYhBKO0QlUHzwLtCxF+YlYHgMF2V8UchfLxDz7G9Wzv+t6kWsaj0eXRMlPR1gvDZddU67q+KDtd450KpYgLSlPj/JrUR0enDw5PRr10kHX3drNG+SjtkTBFgFVA7ZQZzVdzFcdhUmBqGobKiwF5/Gw2yFS/mzydlgnK4di/tJ2kAx2Y4wxefyUuwuD7D4rzMpQnDUfZX797yzaTa9tptZxub0Wnz5bVMawbnJx98MVfUWOEn/301sk0rJY/wqqbdHay7s3xzcH63nLrTp9V570ffNwdGGfXg4OoeLIWbm5s37x57aXu6FN33tz75ne/2VH21RudxrlRL0my7McPTrLRVu2q7/7pH69Xp3E/62z1Hn34bGev/+qrn26CJtbxYMjM6/nFfP348ul7P/eLX5kenrzzp+91utlwuJWXBftqPnmSdaTX6xGX1+/2JQqHj47PHp4dPTHFpHQDyMvy2dFlrx8XK788v7CafB0goPFFP4o6SfPa7eTNF4adkcv0ubt/TI2W89X978/Xp2xSO7g58E0xmyyLlWdvuHBIfG23fzkPDy/d907PfdK9+eJBVp8a6+sygJOLi3KO0Y8mebHmvZB/+tpg2EvY+Nvj8QgDEdZ5INHK8Ut7+++dXL774NELP/elP/j276e9fgY6s7GKk3snMxuZQWam08IFjgeRjZQvIdShafKGHabkW9WzwPGqUqEpy7SnMbERAiVpAormZZGZOIvSdVU7dgCBwIGx03nurNhx59r+7pdu7ZcfHw5v3lie1we3bqSj9OTRd4Yd7Ay6b7z50mp90SfefmFrflZWIDCMITYMYBOKgYp6KejzqqmDL2sPpCLPr4yy/+b/9Hfj4B/Pi93eqD0L1pdzadYPfviDh88m3loV92walxCtZ6WNTFELEC4933t8GScRCaEP26nNK3hw7+PPfPr6tZs3QlNzOfnMizeW+bmcaIwVBh2hQaXLsvEW1k2VKSsS7j95qlXP1y7LYq05L5ZJlu3uHfimaaqmRqAAi7MzNvba3u6zR2ei8Gc++7nDyXHZ+Mv57OaNzvl8uSylAkD22iKuq2AFRQ2G46JsLufTvupZmwiIsixNHhr2oCrvCdGaZDqddbtUzVcAqHQ56I3SdKuqmJUUZVDQpFnEzPmybOqAxhORsCil0iTJ1y7UoeY6TmR3O1svziQ0qVGTs/P1aqaC84vy/tFxbCMms5gsq3JtjYpsZLXJbJRPV/s3xsK+140ObuzNofzr/+GvnTw6PD4++exPvTGI1D/7jd/qR9F8GplsMNjfB0ze+upf7e+MPvrwcn9n1OmEb/7WP7k4Pnzxp14/e7g4enZ093N37394r1MOY8PY8O9//d8d7G1riiihqMs//OE717cPxtlWPS2vja51rdu/tnf46IevvfbZP/vz73zjn/yrsXNZ1O2lHe99lMTVIq8q99L+9nc+evjqKwcwn4fq9O0vfhp07HLlHLGW4FZaKfDSuEqjREm3qb0osZERB0EiX8eh4TjL1q5KURWTs600MVqH4E2amtQyoLfR8NrBYv4UERqf+5qWk6Krk8GwUy7XezsdhTKbHhV10dnuZSMsKt8fDdeXMx3rnesHJxermIzz+Su3toY9VVfrsqxCBOl2akjlzpeODOt6eeksOwfsIRllgQNY9jVcv/2pxaTQBT94fB805VV1taCFNI0FKZASG2c2lvVitViWRVM2oZEaA3Ch5+cyHEUNKSFydVOv1ogcfMPgvXfMZAk5BEVsARUREhobL5uKfTMrlklshI0vy8AhBCaiqqqtEGXKJEqTWRSOgA1JKGoHDhQoUKR0EmcszOK1VkphqDnNTKYNlI01gCoqLoujp8sH9y8ms9oH0ZbiVCnGxBhr1dZWSgor9t1BR8dIICyou0ne+F6syHn2EGcawLD3LKiV0doAIPtKiRJQRunUZhw8iwOgpqmAFHtPWkU2jbSq66XWatCNlSgtpp9mIEzApP2wA9qQIJGl4DFJUgNlOXfns3o5IatXpef1snQCNkKrqTV16eCNJ9PgOB10ktHx2aUhA+jBswEfERNxiqEWh0bYCxobx1G32w0QyiIvS+8aaWqqm4DD7qhjEJjJBkJf+Um16CVGg6pKUWA0+AC8KucaKEkGWZqUIJE2hEp50UhiqGKJtQpBkAEBldLCrVcqNOxJWw0qb6qVrxBEkfKena+VwchY7+pIxwTY+AIIVGSQbVAohI4h1K7ebGN6YraRFfR9m2ilFFISWZHgPQYB74MLTiM1Xoy1pKwiKyFkWceFJQra2LI4g5GI1yoWVppsN4lNcHmVN1Xe1M4Deseu8UVet5acwFcOtOd0B58DoCvGAHClRblS81yF28hGwdNmT1zBF9wIflr5xk/Ica5+89zctNH2tFSqTfwB1TrGrqiEiFw1jbUak+dw6XnsjCACI7dYaGOvukI0G66Fgu0S5ScysK/40UY485NEaKMCgs0GZKskEtikE7UB1BvHHWyO8Iqobd6RXI0JbNrSCNpIB9iEFgFI2xUMCEICJIAi5PE59xGCq4ouwpYqbQxnKK3PCzYj+FxedCX2urrT3TCjjQRro4V6DkquArY3w3m1lGtZ3NXYMvCVew2htZ49F4xtaug+ATGAsIlWlSu6t/EwMCC13sD2pUSQhZWoDbdpYy0QECCIIAbZrHAQPkmw5o2qiEUAgjA+T/LeZISDIG/8iq1v7spM2Lr8rlRusFHO8UZq9v+vKvri53c+/enthsu4k/SyPpNW2s4nF4gIoJSlumLlmzde3js/nrz86piaevdabzzqV3lxej69fT1LSKbnzyjpRlkGIVis0rRT13Y6P7s4DgKNQVfnS1TZjev7+XQu4PrDHc5S0/RBJb7Oi6ZUkN588WfmDe7e2b84PjN2Va2eUKSEmxjQzWeDcSf4RkLdsZFRerau2CgbZ9OjC2FZrk/2+y/HaR157+qZi3oHtw4ePPzWeNztBtk56F3cP7IcHWRDqMLlac4Bok6cr5v1wisQ2Ev3dnfqEHzVJEmWdBRgLWGFwaGmarYmxMSq11/vzGerYXzwwb1n57Nya5w8PT29FQ37o1HjC+UjpQ25eRLZEHI0ZpB1fAmNw4vlIp9ditSrZVWWTS/qGOql6bBuhmuZxAMdQd8XJAGRXTfRkQqhynNHO9f7164n77/34Kd/6np5sfaT5ubnf+r8/GRyUiNwEgP4xXDUPz0ps0hnaddXdWRSlYRMOyaoVD3ejRhk5de247NMQK8cV6O+CT6s8qM42+W6I0HXIRalgluYRGMUi2fNee4bpYOWuK4UqE431sbqsskPhr28WikI3U4vSSKiJqxPSdNqNu0Ox+MelOV6dRFk0bgqP7ixI7BeTKdN8NTBoF3a6Rd5HUcpOSRmjJTkTdzvTnN/+PDjr/7lV5wvm9rUJ/XuXqrFRyYTrqTyXsSS7ffSh8dn3gkrPe7vTrxbFqt+3IECg09WRbm1HTvniH1Tz8lkSKauq8B2ZzRYVSXXgYIL1ap0kccrN6YhIiUhbPC4ACG1miEkFTgIiA9ekYqN8kEkwEap9wm0JRFwLIpEhH0QFgm+lQ4Ch4Cb8xGl9axxUEREQISRjtsIZwIQDgEkADIzEgXhIEykEJCFEci7AK0QRsAStdd17z0AkELHgQEEwEaWBEPgNvS+9gECehZCEgGFqJCsNUaTQsziCNu+0821JzTOV4ggQSkKTROBISSllDEGAIyxvhXttKFMgM67wIxklEIAdqFxrgkAgqQ0EWJsjNGaiISBWRjQBwYiYVakNlewFo+0Gx1AgFi7RoQ9cwgtr8G2eqLN3kZg3FSWMgB775XSwLzpPgu+vco7740mQgzQqoQIBBUpRZvdnLaYjNrNHYIAARG00j4ECb7l/u32C4soouADEaJW7AIHaZvOBBiUIiQOAQBYGIUccDttOHa0SU4CAWjLYFmEtEKlNGkWUIq4daQpCi4gQAiiyZAiFgGNzCEIAyAhBe8EUSnVWrfbnQUiAlJBgggr1CIQ2IsAtSIjABEm3OxOECkEkcDPbxMW0zUWoaOu5ycXeS9OX7+FPiibDQUwWE6zNIqGRLu37nzw3Ue7u+HiaHV05Lb6SemcsmqxqKJOmns/Pzw9mxgpyr39bmLNMOs6p67dujYYhFGvr9Ks35kcz2fLPF+si+1hkvRSqwDJmzialPAn35723t492A4+CnnRZLp8aawilb53XMzX/KffPX9rfHD3Zt9FrtnvX7vz09nRWkP8Z7/3g8OTqvnN9w6u9fZe70bUTCYzprQ42PvG73zj/FQ1F8uv/I1f/dLP/fVbu4Zt/o1/+uGTb7/Drnn7zTeqCZYnUblSaXr9Kz/709/79p9pg90svrk1uFhMD25cW09nF+v6yflMGc2Hy8vL+8Z2TbT+6N6Tn/mFt6dPLqzqPn58WBQ6ErbN6Pf+1fepOiOVdm18/uijay/dOTmfLC6n3U7v+GS9vLyIIlmtj4mge+Otm698qZ/d+ODP/7k+/cPd7SpJbVnYfF7YpNsxQUNIYfnTXxq9eEfJ6mx7XHJYlhfHJo+ao3jxUKrzDqA6OXPzahGbpqyrxrlBX/somq7K01pfni2eTIq37t6uGjPuyyDtl7mHYIuFy2v50/uzZxAFMPOF+49+8YX5ZDJ7MN/vSB6aJEuNcHe0FapV2Sy7O7S67958+0t333zjH/7mP/rZz37+4ugpZbj7xp3FxWy7Y9Le1qx0Wzd355f5+b1JpGUwojvQ++53HiZGYUMCSJGpgn7yZH1zKx4Po7rhCHRTQxAg6bgKBIOy6uYLr8yn5/W6SQd2ldJDaX7ly182edLp3lJiuKv7d2/IyTzeyV56rXtyclG4suNM07hU6fHNvbifTs4O7WikyRZLj0CpjaazdVNXpFVGtC5RGtiO0pPjZYb42osHZ7NNoG9VN5mJWEx/e7sQ43w8mxY+wP6N8faoN1sxMo7H2eTpuVHqpbdvPv3D926Ot8AP7j965+7rtx03kVG2gr/1N35uVVNomjSNXcWkjBXSVpfOKdKN49CIVtLf6tVFPb2c9QcZqhQpu5gtBmmWxJbBizGjvb3j+fzR2eVw0J8uZ/Mqv35rqyiq4vJy5+aoeW+OHC4vmzSCVw62hsMYIo25V3GUqDhg0NQhYV/leVh1o9TVgjZOsrhY11WTD3o98LmNYyQwyPPHP8qSwf7tF388vXjz7teevPedqlkG56zVOrJ11WijXHCI6BHtsHP92o3zk/uhmseRqopatEiAKjSBpSzq8XD4Zx//+c6NF12oFQh5YEGyppo6H+PLr97p9furYuUXxWmochulKJEuP/eF17/5zXfPj09u7e9BoGsvv/L9H7z31nAYx/KH//o33/rMG0lnMDtaPZmez2b5zt5+hPFgn9JhnPtweDa7cW0Qg6+KSpGMd0ZnD2fLKu9ydmv3trV0/fr+/sH1y3zazKaf/tKrdX74wz/75udefmv7xt3zJx/nNkt3Xqwu7+10kus37jx7dF9IHiQm7e2NYz8YdjCKzGAcpX6Vr0JwTZPpUMVWr4rL3tYo9tHZ5FCng6jT4UaxFkz7ey++0Bla20QkanG+juMs9r5aFyZLlQLbGzghpdJYp0ePP0Knd3bf9KgBKc8XdbFiS6FZnz19HFRvvHNrHp4GDMWqJorZlRDJYJAWyxUFWU99bztJOp14MNeJEwiWyKhoJzO+4O/94OHBq9fv3v5008jF4riY5VwEF3wniScnF6GbNEGXpd+5sQGmURJprYChP+jrLMIynK/zvHRl03gMWiP7pqiCD7iqiJQRDHEcERJqUNaI0mkSV8VKAQNwJbVhSqwVCOTdUEeVJxtZraFxIUm01tYHU5Z14YOJE0FFSFaZftZp6pVwiKxYS1YbRBUEwSqNSmtlbQzgfVNwXnrVCEIAXDs/PS+fPlrMZ7UgKUtx16SJiowlo0Ar200MSqyk8qUGjK0BJEAN1oIwkgFwmpCAQIGQ8iF4VSEhIWMIViWRimNJAjaOGg+NdiF4Z4A4OISmruuOVVujLsRZ45E8roochAFYQE0XK0XoXBjubBlN/cwkyKGsHfOicOg5XC2MSaM2SgEphVZxZHWaxlXJyhfdKJmsSxecInTBC4SWxRiS0rkoiZFRa9VLomVZtGtZAiQkdqGu3JLKkdUdgvO84MYpQJVGIqKsEs/Oubzw7BNAjR4USwziioJAg0dtLCNlqUEkV1TSeEEBaYIAaw4+BNAhsA/BsSxc2Uu7RFS5SoTRGxBWiJG1hkRQvA9N4+rgCMlo4xw3zjVN3TiHihKjIhNtDXsxtMm+QROxMCgVXCMMbSo26U6a9rWxDCpNs0FnaKNBUeYEpLQJHAw5BiDiplidnZfAoVyvvLgq1MuiQbQUgNl79s67oq6fI6INxdhQgBYxCMAmfPOKH105v0SkDfr5JLP66pmu3APt027ay64eSc/9W7JBGlcYAq5awTYI5jn12NAQgStJkEhrN7tak1zRAYD2frjlIPCTr/IcW8DGhLeR529kU/g8Wgc3kAsJUYhaGLKxVSFKG7XTjgldkTK8ClUCAOCrirHndOUndC7tq7b1NoSb3GcCteEnLU4BZGl3fOkKPm1q24hQGNqIHaLWR/dJjtDVgMJGPoNXnrqrlKKfyA/a/OQ5NWrzoZ+DQW6l/RsTHDwXhhEgtUiGNhIdkVZcJELwPJEaBEgAqG3VufIobj7E9l0xA/8kddp85AIoKIT4iW6IAaDt1rkSFm3isFtzGV9969qViSDBczOayNWQbqRELRAEugrsvmJ8+O+hotuv7EZRWleNcolTUcNOU9O1o4DQ64/X9cQ7bvIlsH9wcnrzdL+bsatm4rCpa4ZOwwDG+Apv7b9U5svJ+VHW28Y0izsjbPw8b0bdfl1EBDeLPCZugr98evLBS8Zqo6Isns/KQTeDOpyfHGb9yuMgrzuQ6uCPXaPFwdb+jhZk4/VgpzvaKlaVVFKJOHSu8uyaqiziRO/0k9XyYjFbZPE2eV0t1x/NVhplNZ+J96dH5w5sINjaSfq7cSfpHD09tIkeDAb3ZoedeOejh7OtrJd22XasMQKgECOdahfWZe1E4qooKdJBeWsRI/fCqzsX80r1uo7Bc+LqSNnueu2MdcLlKp+Uq9w5Vee1D1E3Sc+Xl/NmaaiaTyWzveGtnfNZjaGahYe1wNbO61u0PSsfppkU+dK7sp+S6o8++uAwPzw+vEg6B+Ne+soffvPfHsmPF7//IKhseXn5P/k7X9nfOgnlSmnXGaS+qEjFAcvSeQPGu3iZLxV2bKSqZtrtx3u3tyaHT8vSx2m2vbM9PzvhpkoIxMpqubqczRTB7fHWdHGWWihXi0aq2y9e4xoWizzPK2V7Stt14UrnUcCxn86bg/3rZV0lGtM0a/y8txX7sNbgV5dzr0qtMmVktpwLhs6oT3U9K5fsi73r46HZ83XjfUnkJ+frxjW1s9/90QfdpKmWl0sbDcfX18tKm/FsdcZie4NeVbtiXQC6crFWCKPxuJBsURQqspaiQb9var6cgYDxHno9Wy6bssldWTCr3mikoc5XxSjuXpyvtCZjlVFKlGnPAg5trgv74JUymqAJjkhBCFqpllOgVoDEjgmxDfIMDIyBiAgUAASQEEIAjSBBhIisxuBDm0KvtQJpWW5o272U1gCgsVXyYvABAATJc1Bq4z0OGzgALjgixRxEAgKRUgQYvAcOTNAEDyIkKngWlNjEraKz8c577wLXziGh1loDxJFVRC31iGMTtVk1SJ69EDofOPgWZHiBSCmtFCklIRhjBUQp5UMAIgZxfiNRanvhRdoEalRKaxBCsaSsVhrRaC3tjRIRMzO3ClXQpKXtthdhYVKKUDG7IEEEQwgAEpxjZlQEgs7VRltEaby32gRmbiMBAQQhcJvlBywiiJpUCIGRRMAFH+Sqj4yZ4WomRQxtkDYgCDtmRUpR65kmAgVEIEKKFFIITAQE4DgwB0BRBg0qZgQRCBwgEFJg532bKochBKWINDEHQGJmEDHKAGJiY1KtlFu39FBQgEMQJE0IgMSEKMw+BBFwnjlwkDZQ3CC08eNISCxilBJs9ynaWSq4IEAoIoo2bRSAqJVxgbHVU/PzDRcAgGSL0h17770n4xf3Xv6ZLz1aPpII85D3tvXZyULq+qC/3YGkqvG1V15+/8n9/dfG114an58sq0sfWLSxQUQ42G7EGJJeb8p8+exid1AFj++eHSqSO3vDbq+fJOkvvvWzx5PlB+9/UIfV5fny+t4giJku6suAMw97TytQPNhWcdKdX1wyyI3deFU7TIb3zvj9bx3H+dYLX97HQOvpVCkXRc0Xf/bg+KPz2fl6Pim3V1kWF6+92v3gsfkv/973HPUtJsT03f/mX1b/l9948dbg1gs39wb9/ZvXjh8/OT+ZD8zWF//S2+Ha0GO49dZbD8+/9faXrk3eK06fHjZWPb334OJ0pTvJnONre1uL+48pd/2u7fSGttNfO3W+Xn7rd//tT33pC3e/9EYvTn/3f/zjT735qdkpFL6+98MHv/b5v3377ovvT76zv3vrwx/+aFoVr336rfP33jk5PPvsL/7yCz/968O4c/Txsz99eKxCrVnDSnhSjAT76GI/f/mV5Ke+uOWj8yipTMrr1RpLiLPE7r548d1TE7bOptM6gkpAGhFWrsA0iiINIGD7ZomuN4rfGMYMeVSE5ZFKr6W+8hb0ziBCxUhqkEXnJc98NZ3OOtpfH5kIm3y57mUdm5jDs6m1/uzZ/FlRXr+z/Y9+51/97/7zvxuX/vzx2WxdbI2z/bujvJxfrubJlmxtDXTisi378s6tV2/v3HntlX/1hx/+vf/53509+/gf/P3fWOWsrIoyqyFczEsPkJIA54lWncR2E7NerpWlficWVy3y9ag/eHR4cWm766757Od/KvngUhs1PT12VJ7cf2e4lw0P7k5OHsWd5O7dW6FRH354+KlP3/7gvWeBlU4Gy4LYJOPdvfOjkyRR4mrvQyex05XzTbCWkzjE2obGH58ssBu1ZwFxXayL6zd347w0WYdrNV8sb+NwNl1UCz8eD7ujAUGop7VDP115m3SFNbnijRdunj2Z7OxviYpe+/znT8/mrGysyDWstFmtiyRKl8t5YDfo9wnsCvy1vd7T8wmi7nb789l8sDNKbGRj3ekPIi2T8xNgSrtp5OqqXMW98avXb3344XvSNOiaxWKNBvLChbXfUfif/s++0sz88dn5YHcvMrYuPYMMu3tpMir9snKTnu033EyXF73hjquEbISIW8Px2Um+LktRPunsKsTVfMlPHp4sJ9udW2eXF0lmBUBrbNgFcZZMHKe90c7p+UQn6dHhfakrY5OiKC+PLzvDdL3O/39c/XewZWt63oe97/uFlXY++ZxOt++9fdPkiIkYJAYBFIMI0WaJClRJlmWV5JLpf1xyuWy5KFmUVEVZkiVbJEuWKOYAEAJAZGIGM5gZDObOvXdu7tx98jk7rvSF9/Ufa58G5PNHV/fus9feK6/v+Z7n96wWq/l0ng979x/fn0wORoONeVuNNorLi7kAjDZ67aJFq4f9tHULMPKpr37+9377G8mAXnjx4J/8g7dOTg9Xs4tq5Xdf+/jtF16Ynl58/we/erBxKx/WqvJP3/ognSRJmvY3RmqQS0wOP7hUhbo8v4yCL3/0I1AMPn577zu//VtR6XcfP87ykfQyVjI/f3LnxZeZ5enpyWirn+Iw+vrH/sSP/q3/7z944TMHNFKrJo1gH0/Pzu69f/Pg2nJWnR9XaQE/+hOf+Ju/8L3/1Rc/SgBH9y7272xIXfqymuzvkM4vDw91mghCCDCfXhJiPx/XVUNArvFpynmvKFe183HQ79lxURTDfmGXy1mSZ21TtZVLBxs2l+3BQdvMvFu5alpF5RvyzaqZzbJRXjdRF/1hfze0jaHm1msvP7l71tTeFnBx90Nu/ebmZjJMh9duL5LiwXFjsmvsj9grH1xeVOXxI1dSb5D8/ju/+/Vv/JPJ7vWv/dgfV7qvxZ0cHwXCvEir1rlIF9Ny68ZmdxY4z0mSDvtZiKGdVZdni7Pjy4hYNQ4TwthwaL1wG4EdUcKbm4MiSxaruqrqJEu1VgiiCQ2gKG2MVgxRojU6LfougAXrNQLFRCCGEHzQ3mdpsK0T0M4Fq3uoVUE5SVgu5jZJooDRCRIqEFQxEtjcCqJ4WC1qIC4r5VxgENHZ5UV9Oa9BKS04HKbFJM37CYAqG6eMjpEJ2YjkIrF1AJhnaeWEBBgZGUUIQKIEBA0CPnIUUQjAmoCwsyETRiCxaRSI7AigaZaGcL4oUyLgdjZd7OzsJCplpVjppvXLeYvgVk2dZwkIPXl4RlZdTusHD8+FQ+T1wBhAjFWq47sEVlallgptCIGDsNBFWTWta3xDCG3rYww2WiDyjSNj87ToRs4c4sX5OcTQ08pYjYikSClkH1ztVljWWEsTg4styBlA1s8Hityqco1rmxa0cVqtgvONV0AoqHRmTRYD1+AyilZMfTGvmrI3TPPcuMCubhObKkKltRPnXGu09hDa1rGIJu1BtaFtOVqdgXgOMbRNiEEjTAZ9rYmEzi8u2YemhSQ1VlFiVG6MJYVEgaVp28BeG6u0qZtVmqUQBZQxhrWJ89bFRPl6lud5ipYbt1hdmNQS8mq+UEQx8nwq2hhfe1GhrOqVDyRtrpNUK25bCIGuojdreagLAa19InLVA/LsoXVtChFYP1Ou40DrSFXHoFmTYNZOoQ7hs/7nH34E6xa8ZkeuTTtrL9C62OxKuOmUmTV05so9w927pfOfX+kfsq4VprVTaR0pQ+Zn0lBnaOrUBELgP2QGWtuEBBEYCBQR4R9AjNaY7Svxp1tXeqY0CXPXm9apUHS1Np37qYMQdQGt9UYhgWcEoHXir3O+dHP42JVKE159bRGJa4g1IiGBMF+lBZ4lzZ7tsbUABOsM4TORqNsOAiB/gCPCtbR2pcVxR6d+5n8iRFqn4QiEEEjhWsJBIQHsQLMEYc3Mls4sIFfy2LMfufKhCQp1gTZZd8utd4CslTVgkNghULGbnO6+ETNfNdd1ottVkRwgCl4F6tZr1h2inZjUaZv4zJCG3WHbEcKvJLO1VPTiR67HJrTsOZBNitViCeB9qClVrV9pBIXAInt74/ffejzqqWGhUpO7pnTe2yQ9O3vUlBcbo9Hl0Q+r2m9s7y8qT6o4Pnkw6m8NxltSht5wMphcP386k+aBSfb2rvP0ciUs/Z0tnQRlIXA5mZANj/Z3RtOzo4yrwXZR222BwaJsQqgG/Z3LVSAzt0pN67Z2WhzXy1obpQ15rFLDDde68Emf02QSY358tNg9uH7/4XspOW3w+q2Do9Oz3Z1+YsL5xSwf9hZV7RtfedUwnNXJB0fueSx6FDML5XJJViFoD35eHiZ2sNMfzusaVBhvFrOTp7HBraQoq9m1GwfWOK3GDEVmo7iT0k2jKpO+rs6r8cawuZDl/CJVNN68dnGxiD5tOOe4WYwYGopx2swfJYPV3DVRSnbKVXVdLVauWV6eHF1w3HjttY9+8Ve/+dsoi/3P/1gF9vj1bwudagm/8Au/9vy//FGtuaqWPrjz6XK3atNcJTo/PlwA+/6gtygDkp4Me7Pp2fmTxwaUaCUBLk4XiTKT/kAbV4yVZlk513pY1GFZCceooGczXNWLTG9YW+S9UdU4TgyIAR+CdyicZCnQOC0UUe/Dhw81hrRvT84voayiQ5vHg4PeybGzekQqqWpPqjDG5IVFvReZG7fq7IV5olww7947ZFn96FdumT5lvbzxrRmlxe7W9MOjhEwI3LZVUigfRcgDlIOeLJfLCCoxlmyv9a2XgEnSK/pKh6qZIzoJNgSjiDg02gZlRIBtZn0IxXCAItOL1VoqEna1F2ZjNFxVGkQAFojhKkgsoBRqTQIUpBHvuwJyEKA1QgwUqs5hokkpQFAkSBEiEABA9CzMQhAAstQiEgEZog5JhgiM0rpWmKOo7lLY5WBDiArRqEQwdC1tCADASpMLXhgQO+GGRUQBBheAgIz1Ao3rrOCc6sQqpQGs6ehAYrRSpLQiDuy8DxxQESowSKRUBM50DgAkHakOfPDMQIqJsG0dIHBcg/GiCEcJwUv0CgkAFKJWWltllSFQiNJx+gN3FKcYmbU2okATceQQY+fIDc8YdiIxRhdCjAEYFIso0ITCUQCUoggCIusGMEKFqrsZew7AjEDQFTpwFAQBUogSGRUSEnOMDCyMa/YzCjMSWq0BkGMkpQgFaN3OpkghgSElKCQgwl4ECVhEGSVtZIkBgEDiun9ehFEEFGmjlDFWVIgxAmkiBEalFBEJgFKKWZgjEWmkGNn56JmNIquVJiUsjMBBXOO7dkxm1qTSJO2I3YTAIJoIkVCEOr2JI4H4yJGZJRJBd9ggYKJV5yoKwROpZ7cs32+X2MRJm9ukpvukSg6OVG8wmpzO6qw/iqKQh/vjHbupPvmlL//yr/xGtTx87aPXTi6q/ihN8uy7v3eXqxCqmPXsxu54Po/P39htjk4RdZElzXQ1b6rHh2fKJHWsN6/f+gv/zp//h//TL1i7qMuFEEw2hyg0PV1+52iJVu+7JE9iO43CIY846aXbO9koRlel//Dnj754Gj7z4zd7PRXSann5cLwz3n/u+aM3Z48fVk/eXWq76H984+/8yrFWW4nSwUelTU+bQZ7PFm76/Q+vbQ9I32xXoWJ48WY/37N1X6PYy7f9zsa13dt3Tt69S0Xy5Z/6sd/+le9LsvGDN+86NEdPpxjcRj+78crB8ZPH7YfHv/drv6RUtrv73As7H50Ue9/91ndfuLG/Pcl28lfbIC9ee0VT+vjdx+XCp5s7w8nEwGR6Oe1R+Uc+e72ZvXf06//v/Z/60q0D9dGDy6fH1flJm4rZRzXR9XM34blX+7Y/U72FKSA6wAw0AxhofHN570lptyDfkWvGxblfNvOmySaDnX5+MVssffLk/DwZ9BrBZdWO+ho19LdylWR1pGY5b6pGm17eCy/t62+eNMKqp/Trbz56YSObT5fjQg/7ND0+vzbZSBI8OTvdvDYej4bHQG+9f2+0t/uJH/nyBx+89dU/8fnDp+/df3TsKLzyqZewcHm+7TX0sSKOibXHJ3jtxmf8ItvfuPWZL7/6z7734aiXuqryVqxNSx+uvXqAZdlMGyu4LKeY6IA0vSzntbt26/r55TLfHJxY+/L13X57mhRpUuxPbt94+v77y8cnarKTH9y8+/bRwU7RLptQu8F43zXF/rWbJ9Xpq5/9xDAOv/4P/hYG5sarfhzn+d3zaZIMVsHpzIqCs9P5eJCkeXp+tiqKtVQ0PTnJCMhqYEiAZ20Zw3Lv2vX+IH1yOu1PRhfTi8nmZLy/DRhO799PVLz90t7r3/r+H/mRL3//+2++8tEvf/D+PT91qVa9Iku0Ob88G4w2I8iyrTEICLFI4LqNRHrjxn7B6ME329uaEoXR7+zs3H1wWJjMeZgtFnmhU2OWUR4/eUCWx6Pi7Oi0ad3u9a0yLneeH/3w/Xkf0lubt5dZuSqdUmiTBKOwsPexcVOAmCejTKXz5XJjsCGRo69crLSxZ23pPSdparMsCIomPbF3n9z77Je+8uGbv54ZnWW2XMamjKDVaLQ72dxbTU/r5SLPjSD4la8u5zZXUbzRpq3dYrlw3tVtxL64VI22thfTxclyeufOHVCN1Xq2LFH0qx//7Oa2X5zPmzY+PXrQRnf24OTe2+/VIEcPj5+/fetHv3JLbU0uz44X8/l/8B/9H3/nV3/h+sHmnY9P3vj2vdmZ3NjYOlvOB6P0zrUbv/Ob39ra2Hn5o7c+eOv+MO+dzFYPPjicjK9/8hOvPDw6/ie//JsvP39Qu+b5F1977SOffueth5M8c2XrysaXrbXmxRsvqury4vz82sYwkF2Cvv7CJ1+5/dx3vvXrtp9H8GcfHo+p/+Tx+Yu3bkSTZYPC60iYGNRNdZH2E23y4Xh4cvLYquFseTQoF4p6rq7rpk2zXpINq2WIK79q51T0ZovLuu1leT/tD0RJ3ttJsuFqPmvLWtsElXv0wfeqED/7tT+9uNSzkNu+UXlIjHKrqpovpNXxvK7Oj+ZNu3rqNsY9S7Fdzupatk26dX3/4Nr1x+98UB49DKvpZHujt9l3rgocX/j8bu8ij5W5/2QFkpWrcwgrhapZLafL5eV8dfv2nRf2r7VcdWdBkaXRhVq1q7I6v5i2LTiQxnPjGSSANNy0qC0lCSrjCJN+jyGSJTcNKDgYW2lmKYkmAKsCIUQxqICQKTKpiGAyywoUg0IVWilXyxBXqTXOs2+rMsZ6BeDnIQRtEwiKSAdGxVEpyqxNCxOVaqqmLVtmzU0rdYxIgji9LM/PZ4RAqekVdnurV4x6w0FelnWuqK0aV9aBRWVJmphWSGNqJdWkIhMCNW2NGvsplb72DBzBRSREz61V2SBNEkwTpUQYgOuqCTFqIRBIdKF10tZLJx6Zjy+axk0VUlZYsrpIczNKZ6uwDOr8pBHBs3nbtM63rq48IAKwsVojoGcSUAQaIS+MVcoQDjPNDOz8xXTWMJR1bRUSoSFFpB0Hq23SSy0lPiApcmGFIQiiFuhblaXaBQ7CmbbdULt1zvtIDBJhhSoGCVVICUm0a1tk8q2PERFNEjNSzoeZVsG5mojaVsIKfekX09oFx5wDFiG6xOauaZSNonyILcdWoQLfam2CcB1aTRmC1kqFGCpXabAIbA0lNhGBJDXRtZuq55vWO0pSrYCstTZFDqyItNXAHlmRUoyqKEbBMyJao0naGBwRtX7FrFbNZaKMeGlcNauWoSo1MBH0isSkNgo7HX3kJoL3YomdcxwxBG8TbWO6HnfjWoRhEVonxoCwU364c46s7fBX4Sy46juDNXb4SnX5g+xTZ+foLDad8kBrHhCtLexrz0rHmqRn+tCatCMdnuhZpK0jHkGHWOUOStTxgtZWoivHE7BEBLySogToSvfCK+NQZ48hfCao4Fpm6AxE8IzAg0Rrm9EzElGnsFHHlVirLoi4lh2ugnXMHXkJCRk7iMaatQxE0OlFXb6iE4k6EeyZesedZoUgvBaC1FpN6XjNnR3nKkK33tSw7h9b59WulKIunHely6wRQVeyG6KsZZjYbW9co8BhvQ9Jybqcrct2dGEUfGYr6hJhV1sTBdYg8bWza02oEuyUoLW7Z60lrWFJuC6FW5vHUHi9QutDQLjDEgkQ45WRrPt+jOvw4Do3KGuOebcFrr7DWkC6UjrlylOGV/arPyQVzS5LA2gS23CcT+dIxocwzgerEJertlycx1A71yxXISmSpmo2JkkzXxU23b6+UfTHhyeXwsuyXp1dPiVJhpPx5vauUtmGc1pK30h1dCrp0odFmg43tjYf3v1h9K5IsyiFb5fD3Bzd/WExHFDwF6d3C8/gmnp21K6MtarXV5uZ8S41WTpdTJ82Z8Iuy66JVswzk2VRwsqpR4euOmpttONNuHljePzoJC3cqx8x82aaDS05oTh+9LhODbXzmTIZOVAko/Gob/Ny3to8/eSdOxspt+UimFCGhcowcB1bU7s2TRMErEPThllwYGk86k1KX11cTGMROYrV1tdB2X4VTIxlCHHYG2Eg7iGQ7jcrmbVNE/JRYnZuffD+I9dyGZ5+/IuvhNOjNEtGg1t1WwuYRXWpdToaZXYQ7p88qUv33O3XzI0ffzql3sZ+Gw2lyel7H4A0CDHL04dvPlpcvLixDT4EnaoowJFW8wYNKg5IRhIdK79aNajC7v44SaWpqtp5myVOksXsfDTILYBrljpLegXZiG1Ybky2YtvLMlr6FjkR0UpF5yvSWM5LrQajok9+mvX7SWJdCKC0iywm4yAffnBaNvVmzyaFOp+d68M4LEaDol95s3CuZ+1kOHL1sm2qXmazTKFCk+n549n0aLo4P/7Cp69dv0nzyqMP3HCk+OGD71mUKk4HuKlyk1i8OJzbLKR9XbrFoDfJ07yt6zw38+lp3yTZgNja1XzVS8wqrpyrr+9ev7y4DOVi1M/cIH10/wQkL5fRk27LKoT1me+9j50jkAWBU5UIKseeQxQRIDRaa0LPnkCc9865xodIkBqDIKQVCHbjcx8iKlJKGWWijwBMpEkpEfEcRZFWxihlUMuVtTGEKMDMMXDwoZOlxRhLSBIZBIzuxB0mpDWyToSZY4wdltj5KNAx3siH2M0ecOsFAUEUkRKyqLr2OgAiQKWIkECAIwSORIAMWpHS2hrTXU4UIiBwDM75yAEVhbCO8gojEgpjhO4SBgCCChUiCjKLNUZrLRKR1+EoVMQQWBiEUUQrLSxRQmRGwMiBELGL2rGIgI8+xBDXQj0oBOGIykT23Wezd4i6C04TrKd2vEQApA4nJILrC/laXDNKd7MO3cyGJmIRazQRAQsSGqU5RlCaFImiGAMLECmtFBEabUEiC2sEqykyoxMXwhVJTkh3ITJSiMIQIyOi1lYrjUoF9MwCiKTRGINEkfmKIYfMkUViCF1XHZDq6EPd7QWFrdLdEUmkrdXWWEMaEZmDQlFIhAqABRipO7oY47opQSuliYCBQ0SiLiWpOqXz6tHhsm7TSbb3+eu8ikDTHkST5RzyRRl6xeD86Ng7s5XbP/mn/42gktOy/bG//DP/+O/8f1gufv23vrO6vGhd2huEpJ+fPzovBulnv/ax73/jflUtBqPk5LIcbu1owbapB9u9VNn9rcHZ0aMPfr+6tZUvNLk8berqfOUSBdeuD/upu3V773s/eDdBSEI93u2xyjcpLJ5WWRt3dvbO5vj2u5fvvv+7P/Xl7Ze+dlCYYrpagEs2XuzRAEMdm7l5ek89uJsRWGslRHGRQ4Q804CWlHp6VvnXn378lTuf+sRr+9e2FktazM+3bu3t792wzScefnO+039hdX54/GTGtTx5eqQxMvvpcQsIxPj4ybFRVhPeuDF88wcX/+K//pd0oo5PZ/u7m5Txm6+/uTNM+7Z/tAqSndx66c6Lm6N/+jf+hzHevTz8cHb55MXtqN1qVMDyg3A++2+OL+BVhI/fgHoEkw3fn6TDLaV3Gtj0YCVqgMJ6D8A2VVuxPZGQmMtRSsOjbz2dXiztQPcSraJqan8ZI1pbBZjsbi9dC6Sdghs391f1IukNVqw4RpMatKRSGxr3ox8dLJvL+5XsWKjKxTJDJHX34fT6Rm8j7c1Opk1UwKpesmS4M+595YXxg2/97ijYXspN9e61PXVwPW/r+tZe+6kvf/Lt75eU4GRibl9//u1vVcer4sZkZ8OaUaKYg0765Wo5GicU216anc3j49JsitZol4tSoUqGWintnXNN8KfTsGpUL+8bM64uz374rY/d+WPtLNW9IaRn5fkjXJ5vbvzIi5/744fv/abWYfNgkGXArj17eliSM0WRJ/1entVuvn9za1FWx/O5yuwyQtCmlxhN0O8nUaBs2hde3Ln9sTv/8DsPASCxJk2S8+nSt/R0Oi1bF3xzOfuASClILj441AlB6uo6MEaLalzkv/3d7375M6++9e4PNzbGy8XlYNwjMIvzxcXlPB+aw/njk9ViZ/vGcrXc25ywKzm0NtEv375xcnI42h40y4WvmzxP6lnbltXl+Sotet41hLQxGRBJeVkjBBRj7SgbjMvDC+eiCfrp4RxFDvLkX/rpn3jw/rvj7a1+nnBd1SFoW2hlPLVBgjGJgaysF03wwbWESmlDGuazyzzPQpCsZ2IM1Wo+KPqAsHtw6/DJSb1oUPea2SrTBSVpNh5rCovVNPjKN40kqWMi0WBV2wZjIZKbXc48BJPQwf7k/fsfThf11kZmPdwabIV2NR7mWWrSJGlKaObTC9/G1je1C3m2e+fGZjn65Z//9Zsv3L5z5zP9dHB6Ug4SPx4mVPFbr3+TwJzcW7335hPJ7Nf++I/Twn/z138/QFUdn1FCs+m8Xa4U8c724PaLL8V6HgBW83pvtP3n/9TP7u9l88eHju3h08eTSU+HULbL4agomZOUPve1r3JsVOmLIn14eDq5tp0W+6Wrq7JdTcuPfukT+y/P3rj3i71hrz9OC5OXl7PhOAlCy2bufJnaIaNi5swm1uSJGxnTA2W4WQ5GPdc23sNk99rWZHz/3Tcu5pfDrc1VW3l2q9VFojCLgH4V2yXH2O9NmuAm2ylPl9/73u8EgY997o+en52Xi2PT7xPIcGeymrd3738IxnzkI1/4xje+EzyjriAWy9PlVm+5eOf3VAJnP/jBxu6W1+pytnCGKWBwaBtdeXn1lVc2n0sEWh+dzeDa9a377zzu9VJESnvaJjifrisOFosKBOezqnVN9BFQp1nmVo1WFDi40LIEcJJZRaQJvfdtdE1oWpQ4GPSSjNMs1xyb8lKTsSrRGNvG2cQyByJlNCpFTphANJAgKVIsRlvF0CCYyNG7UJXRM7iyzgwNC5NnOOwXSNoYlWr0gMu2rRoXPSdkYuC6bUVhVdbeR4nYL8zB9nB7oyfaKOZCkQCHGDULE7VtcM6jIcGGBLTJcl2AYxAO0XMgQ6RAO5ZMGUSMkQStc5xASJWery5b3yijyHGR9VRiTTapIzR1u1qVITQxhLqe9YsMNUjVSMEM5vLC3326KusoMbAAxxBcTIwyigjJGELgIkXsio0iG8JEkSb0ESTGxrmWHRMaLSjgAqPubE4QvC/SokemFVfWVcqeNBhtvI++8dpQrhRppTU4iSFABItaE4JEUEIcwdVu7lsQ9gBplkbnWJFG01Yl2BBZ6tDESCyMmY0Cq0XlfKwaH05dWVaJxSR1KIqsbYzyHFA4tM5aaxIjDJoUrPUTiMICRpkEORCJIoggdVOiCFmFoovcGAUomGY5B7FGEak6eCIMQqiMMTqxJnoACeONXusWTdMYQ97VrROlybFLbapcaKuyrhtARhKVJSgoID6EZdO6AAotiQQWo02E6EJLV4NkkWc0ZOgCTnzlPwHopioBEPkq6nUVLJJnIg2sZYpnDiJcu2+ejdJBrhwyzwrSrsjRVzYkuBIo/tBfAZ7loOBZi5WsX+xG/ywIeGUP6X5VrUNiVyyhK4IyrhWTTpTqwgFrig4iAgGyAKFa+04QEYmuVqMTsBQqZiFQgMhAa3IoR0JaO1k6FxIREiOu0QwMcS0+KQSFso6n0Vr+IQXA6+0iV5sAn/mqOtmsw388Y0ThOiu2Fn+uCuCeSTxXIKduw69Fue4/AWXdA9bN9MIartQ9S8NaU7lK5iEiEirsRCR4JpmtMR64hkFRR9fo0iHrw2UtVF0Znp5l2vhKVZJnzqq1Ww1Ulz6LQMAchUQ62hF1u4sRnh0cuF46onSC2pWgKfhsO8qVuHnFjlqbpeSZePT/LxUNitzP262d7WVcXh4fKRxNNoZ9u1mtah/mWtk8T5rK5AU+OazeePcsG9ySqNvGNXXFIrEtib2YdP/muJ6HxdljQEJKgpcsmyT9oh3Og3Oxbld8liqcbOz78xZNmqYDYDM/u78x0MGIAI03tpzzac5I+co3o3E2u3jYyzfqsl0GnfYnvqmrurZZpqlNM9XM567lCMnDexIXZkD5QO0cv2UXi+x4dfLcK5uHd+8l0tu9+dwvfePtG3d2XroxvLw8Awk71/ePzy4wqtOnx4kOt/YE6BFRlhQ0GGYYvfdQLRdkx6C2M7XtGlFK55AuQzVfYkJEarC3OTmuT7BqF83hcON6YorWBedNYFwttQUtwSQJuovjZlVlG710QtW0eu6VjaO7hxfTD8HcdhBUFNKZwsYYlDyLLrS+XvkwXeVvvrv6E3/ys+++/f5bD9/Mnxvc+dJXS3f8Jz79tV/77//RfLZoAhfbxWCS9nIvqSkjtL7lKCFwllGQxtp+0ducLYPzbb9vNIYkyxJjFNXK6BbD1u5uu5o6zy46JYxo+yY/n85Mz062bjXLJ/1k0O/3maOArWPoDyY+mGrmtYqLcp6PC1L5pBgH165cXSQ8q2ohlWY935SJVllmiiQzFMFcTibbwr2xLcrpYjjKFKCmtlVuVbZlK76FKHGQhJ2JPT1dDEbb7IfdiVgYEVy6UCIm2sBsMfM+9HKd9PqJsZRav1rlCjJrsZf1tGmb0ojnEDDqzVHRMK2aM5aY6GRxUTV1tbExWK1UwflZHRYr3++tZ5JDlM56iYi6a7zCGEUCCzMjY+RAxMzBeR995BBYmIx2wVtl17w2gK5hFCIbBszIGgK2tW/gCg2njElsAsyEQKQ6ryoDeOaqbrtLFLJYo7CLICkiAdUZRhC9b0XAWiMggXlVt0ohkBBS07ggoLXqTDocGZRCIgEgpY3GxGpCASRjtARWWgGgD45JSWAiMsoSaoNaoenEZQLxwaMIMINA9MGH2F3xjDKAqlOpWCQCWtN9TQVd1e6VjREAiJBBAkcfow+hi8ICxxgjIwiLVt0NBjiyIUABZgEgZoixM1OLQiCk2Nm7upsOGmZArRUphQoRIgSFyBI6HA+iGKUjiHRBfWW0ohii1kpEUGEMgTrsEa5VEwQxSq+dxAgKFTMrpTpnpkJGxChIRjGjC0KalFIExkXFIiEwgBCi1sYFzzEykA+eiLQiBvAxIkKaZkCIRIpQBDrktvetMAOLIoVqTfv2MQhIZEZBrZQPMQoHZhCR6AFFKS1rG3IEic/ctiwcXBsDdDM/RimtNATPihioa9bUSsfIzxrQ0q1+6SMJBt+QVKnNMEQy1huzOL+IrR/oXt9qKITF9pLN+dL90T/9v5udv5/39ou8fusHP3jpNp6d+/29jec/99ntIv/jP9m//srG4fuXywoq1NdGw7tvv33jhf3je/O7b93b2xmGi3mzmD85PP3sJz+rk95b79337eK8ajdfvPF6GadZvz1fSEOudvHd4y9ey1/d6V1/YTDYz5//2nNvf3DyS7/69t/4J4dfOQtf+dLGZH9zWV56rrWtgvE3X775V/7Pb8zaHQfBlxAE8zSFGFsXCDCxOjKdluXrP7z7E1/445tbE5smimk8SHp3nndVvWn6BSVI77776NGTQ3d8eNnvm7aK85mLMbZLt7pc7F/b/vRXnvNGbn71C2prGP3q5ou33vz60fGHTye39p775I2v/82f749G+yP3vX/699/8+m9PJBmOfB5ndrS89fH02q2Y3UpR6bbh7XygKcR6ZYseJMQ2MDhHjApAkbBBY0UKzZkPoPRIa6X3adDLzt98srm7cVmGtpWNSb6zOzh5eCyIpC0Ix4YxCXrr5ss/9a/VR9/4/a//ns37MUSNyqRZVfu2DgHjcwOY9NQGhld2Nt54f1mSxt6gNoljORgMUlft74yy4cY7752PErLL6dnTD6vqsDeoPvvpl7/19e+MBqP/7Z/92nd++031pB25/Ojs6LVr10b03GQzHLz6id5Yn959+PZ33/jUZz71Iz/50Z//G//V7Vf2/uK//S/8j//hf/6Rf/7H3n+83JaTR9/74SDLuBUF+aDILtq2DDGsXKbtwwuHB/mN7cnq7PQ8+6HxdSPXzXBjO1fv/f4392/eG07uyGufO7p7/9YnXnr3d75dDNlO5drmHiqxg/yFT37k3X92WseY56n4qInqVdszJtSxyFIKztlkJbI6ucR7T7uzgJnnq2VSWEGer5qZa7M8vbhcbkwms3mzu7vTK6KxanG0TAdJNiiyoN9/5x58vOgnrl2Gp4+Oh8Mx5OmqDdyG1Nk71+/0hoMnR/NhXiwXyyjN9vbmarF6cvqEhc7PphuT4WI1hxpCg9Gp49XFzr4d9ocgTBSn04txlo9vZw+enARojs4e7O0PE7V794MPx5PBg+N5peXgxe3mxE+2xzHSfFrGZrq1f8MmWWTvPStSOg2GdNVSmmgCfbm4zJPeoJcbk3hwWVpERwLQlFxgXE6nLfv+eFNslqWQU3H3/pN0vJUaWS1XqSmapipMFr2XGBOTPT09THMP4hiprHiUIPv56mw2mGx779vgE1KaIHLgqOqqPdjf29mw50/PbL937blbR4en+SC/eGpmU/WJYv9iLlXpx5sTPcyyrd7x3Qf33rn3yU+/dHF+PhpM7r97/NZvvLOoZr3ROAHvuN7ev/7um/deubl155VXBlvJtK11avMizV2QCEli3v3ed3tZMR4Olk8vt196aZSnRz/8sD+4NdgcLc4XYAgh0XnuWTZHBxsb2x9+79cCu9HB9s0Xb51XZ1Y3X3xpi6j54Xe+/+rn/yhpK0Hqcpr1c6M2JIAxBIq0sqvqYtAba1RRWiUhzdW8iU21ImtmcbFx42DxQC4uLtCjHg3rukxGo+MP3lYkg8392AoYffbkeOfG7uatjelJ+eTo/Of+3n/3L/2pfx2u9S+mMwm2DlCW5c7OxnxlVovyYKs4O7k7LiDdGl17ccyyPHnwoMjsaFA0VaPzSbtyxMVk0p+dNe08a8/ksTyoWjcaGIxhdd7UF8tQKlSRgxzdPy2G9Ysff7E7C+ZlbcgQUllHZU0TGRSBseViaRX1bFGhDwJeNAfevjZO+vbsqPKROE3TjVEyzEN5blH6aCVwaqh1NZsuwCKBQ9M6BYFIhdAwJN4pCAzKOq59DEhIpKL3AUNA7QRzrbTVWZHaNAEBVNA0rnGRmdM8CaC0QJCWUrOo6tp7Ier3kq3JsJemmoxrAhrMjW4Y+8lg7pcikazughuC5Dkq8USNY+9c6QSsymySeGBNikPEGGzkGGonYep5uWSVqNDWBFoxuXrVz3uaQrUqOcYkS6WOxhpEYQlN0xpN959ezJf+cimzhQtBNAkRkog1pLU2CIN+0e/3GDwyd5jaEEKemMQahYQsCAykObBrGw2BUGVkQARQgcSRybSHImVSEF3QwBCDeJcqlVrDSDGyJW2IhAXQENomirKYmiRGrSVICLWLidagoAzeJmkQaIUhNqHy/axA0K33rYD3nkOQllsfXIiM2C6dxNDryWg8UkyKVds4IBDQFhPgtdyiiDgKR2+ULfJMk5boFURkbsoq+GATHTGyQJr3U0OKlNLKUIIcbZrH5dzXzSgbWKX7g6JcLl2KRT8Z9GzT5mrBbYzEXJVVQBwMiixR7FMXVpXn2rnEGGZfaDE6XdWhcdG7aBUFFO8jAHqOdd3EeCXG4FVWSfiZTrHuB+tMIMKd4QPWw/zOfSLPhtpX5h9YD8pxzW7udI41KAefaTRryabjceKzX0SETnHoIkzrRa4hxc9sSmspRK5g0nLVcrIm0wCuWTUYu1nqZ+SkDg6EgJ0Y1ElEgOpZURoSyRpt3UGTrvQlRAEi6jxSSikQUkhd2Y6AoEKOoYPgXCWcug109bWfqT/QfQyuVTNERdD1zwOIIMna2tO9j/GZlNGZeDqS9lXmDIlQusad9dOwIAKzECEyYEeHEujQsVc7+yoiiLDWia6+nayjc1d5OyFBAkIgREVAAkRwVVz3LF4HHbyom+5HhA4A33XkESJSlzsAEkES7IYXIEjdsA2QuyAZUidqMTITiHDHOhdZw6q7ZJxAZ3QAQLrSgdYeMOnUKWS5Qk7IVRsaXXmanlnfRKRDLz1DNq2lIlRgc3t0elj6qi1Xaa7OTqewUXuAs9O7m4PtIEkbITKDTufMWzsHi9NoKTrPrcPlQkJTusBnpxe9Xj8hUun21v5G34Sjh3ersMoHqUmzJrpGTLPgPiYA44OD/WWpbD5Evkh0ebask2SQF5vTekU2VWleqLicVVWlaz8vvc9hyCrV6TbGdlmxglnXkTTYyC9ZYtKnvl3WOOfi1uaNKqr93etNUg1u3Dz98GL6weNbN6/lmhfNkqxygZsgAOCbdrQx8W453snq0Ia2RUpcuQrcsECeaSF0rq1CCygeVJqHqKjxIgCiMO+l+4MtoQugzIEnXooaWDOSpp43MTW6cT7xvL1/sHx0r/blne39+dGT6wdjWsLFW8eP3/7+q6/cblaLpl35ts17NklxPl9lae/xB7OT1ejOlz+tbr5w79u/g7pMofe52x/9tV+7+50fvi6ik7yIHs+qy7aldlmXUdezCL5tqhJIZssSkKJv2+WF5cjCg0GvLlcSiMQkFjg4RJeqEdhBE3xv2F8s5zokHGFnshERy3YVY1AQmrZExMAEaKYXq9PDRlpy9emrd/ZSzctSNEXv3fRyobVkiVq19SriZ68fHD98hAR2krZ1G3lBbBPScTVNIoLzUSXVakkoIrSc++UqMsvnP3uQpC2BVSYllbqyRKUzk6/cNEuz0+Onk9GmanRCZnPcPz2ps0lfawPaVVW5cKekwiDr6VhzdBsb1+4fTrXmPKfl8iI4QzSoK19znGwO7p0fb49vzHlmC5X316wirRVL57GktWrArEmJBowszCLivfMxhhgCx64VS4n4AAQMsSWlCUmuuDkxcllVnT2nc80gqjS1AECCirRW2KkVbeubpnXRu+AESSPlJiEi4MiADGy1DhxAgJQCUszRc2CWpnUcvTAoY0IMHDpqUuchFVKIipAoRkFAo7U2BlGQlAACUex8QUoxCCpCpUUghtDdTLq2deDYRa8jgGd2zrU+xMiEmKcIMQbmEAUASJvuHqC10UQxRubIAoJERCLgQ4jSpcyoE7K7GRskAoIg0BXbC0BEUYKdM6jz8gCiUsrorrUMIkv3m7xmr3XCeTfdAQo6m1MgUkaZzggLANYapUgRidEhMscIRMoYhUCAIUZCUroDbDMp8tGjdNy6tTbPQADIIsBAqFApFZE0GUKFToXADN477HaBsNYaiZjFR4YQXBQRDhwJKISIiMze2gSu7pmKlACxiEaFWhllYgiAKgojidGWohZE4siRW+di8FFrY22HHQIAYfYhECEQdTumY10r3eXXGRFYoggQUeDoumP7qu8jQOJcrImTPMMQo8oWZ3XaSyKYy9U871mt6cmTp49f/97mKz/S2+w3K24vF7DqffTVn9p/YXLnlcO/9l/8B5v9/t7Gxm5+czWd3do9GPc33jw8/+of+1PDmzdTDJ/6WtMfj5fH9dl736uqu3/3v/6fHMpiJk+Pqt5W/txrrwg3b/3g7r33Lw7vPxmOKCHTNNFoG0v5vQfLJJWvfPWz5+cLB81zL+w9f7T4+jfwf/yOf+PJ0b/yZ1/cvz1eLaageFUnP/+L1e8cDU1qUy0chdsYBQ3Z1rFKdNsyoUrTtKz8//0//k+++mMf+9QnP/XynYP6aXJ2UW1sPN/U5m/8V//1/endpW8//rlPXlaHZdssncsHaV25gLAIghfzd7/72Nplg+V//867H/v0x3fPr8Ps6f6G+sHXf+vNXzmp7t2bPzreH5Rbo/SzKRoT91W79xlMnhezVSsFkNTpCHQEonPMwBhwYSYBwIB3wABaAyci0mJsEZagQDLgCBJBHEg8prQqT6BpTVlWlLB7cJmSHm7l56fL5Wmd5nrcz9+6d/yf/+W/8tkXJlK3y2WLUXo90xtZlejTi2lZhyRNNiP0AjXzcpir05Y8GfRKuTIN5ctf2dr4SKoGe/SP9RuPp3deLn7jje/8n/7Dv/jBe79h8+RLP/3Tz33+q9//+u9fe/EvJsPNbZs814txdTE/3VDO1yePnxsPeGv1//xn//Q//3/9teVFb1i7G7d3nj482//Iy3prZyKDseA3/uGvvLp9oEXXDcz9clnW+eZoOBxEpdxAb7x8bXAzFW5Pmnt7Kc1O3HjvE4O9jTC78NPDdLzd7/fbYvjkzQ9UiCdHF1kvj8hZljgX+qPRctmmOZVl6aK3Vm1akxjiKByCBj2voUzN8nxm1EV3FjQBm5XTha5q54WiHYb+ZBFO3nn40C/iT+xtjTdGpJPRKKtqlyK07WJ/fysqZYuBTqreZHAxXfVIj/eGXLvE6tY1SaDJaFCXFWkUZ6o6+hZ85XZ2djk07WLZz+xoNHz0dJ6PByn1QHySZhFq4Gi0uZjPg2+LPCuKbDpjbsRRyw1IT+msf96cPZ65WCkuoyjsF8kiVLPFaQ/Z2JwIknQ4Wxxqgizvd8/vw+FWlk5AXFWXytq2rrROkSCCBKSAnCSpCFbLEB1X6Bpebo764qdNU8VgjM0IMxbV6w2e3H/fFHYw7p0dHRaDsej5qy/e/p1f/8UvfOm19+9dLktnMz3Znjz48IkCLQ6mZbl7sLOsnOlN8n6hVDrqpYvyYvvW8Gd+9isXyybq/JVXX15ezN+8fxY1zVfu7oMjbWHrYLvY3dmu1Nnp7Hy5Gm7TzgA5VJdnh6998s7+uB+cni/ZcaibQKII26Jnvvnb/+zxh48+/4UveO/29ifWxN/5xj964fmPltP5aGcnSRNGbKvKFllvOL548OTRW2/3s4kTbJ26XJ1sXOuXj5Y9k9w/ORr3ZD4vs41kfn5Rzcu9vesh6NYtmmYKZUnKTjY2Vy23bUOWo29XS1fV5Xhvs/WQZj228JEv3B6MkkdvvjE7P3ru5e3DR7OHR09v3drzRZrkA7ec9nf2l7OqPV6Vi/bll189OfzBb/7T/+mLP/4TwbvBaHB5cmmL/vDaTvlk5VbnRtz25v7BwWQ6O949uFnk6eTGraJnZuezowu6/3CxvTEc7u3WiyPd6/tUvfyZj95/45uP798fjdXWYByXrpZQt8Vke7ya3tOko5R3P3ywloqqWkLDIia19aoUrSNDBLBFolwrCgCIfZy3NSWUZL3INSHWLSf9EZH2BGSTVVslGLWFZViySFDpWb2C1jrvY8TgF8Nev9cnpaJj770rnfPCILazRATSJkMjZEmPB3kvT0yWeCCIQQvGGAVRW63QOt/4pnUusII2SB3A5snW5nDc6yVaNU1UNmljXC59iMlyWbaenCL2yliFKK4BAz4mGD2KiwJWE1ZVEM+jYT/TyXK1bFyjFcUYm9ax98Yog9Q6aRZLo40xGaCPcF42rQSfpFSMR1XjEaGclZeXTdm4s5VvW9/hZ41SmjByVAiklAIuiiyz2hilTWqVDiHQuiIJlFKEOrOWfZsVLMG3qwVGFyNblcbgGhaIouJKa+3KoFCNFCggH0mnxsfAEAUhIHBwRtk8K1in88pHwsBstVglEoSMFtaOuQ4eExOVYebAnKi8sCr4RikQlhikidEowwCooN83Tesig00spIUkKRCsyipCJDTKJEgkCD5GMaBJJ4o0o1XGaIUktXNRQutbUAIcax+SIiu0QRFFyppEWz0Y9H3wy7ZWPZ3YNM+TSW4GBZ2JzBq/sbFVVcu6jUKUpUmW5671AtYHtSxRJNneuQZ4xuUisxaBquAxSO2dIIAGL4HIBC3zpvYxBsbgwpV+AJ2pBDqpYv3K+vVu4I1rXHAHPf5Dpp9uuI7rAq0uvrWul+okGunM8M/En2epnzXMZ22gQboCypAAI5IAdGb0tQ8Er3A+V5qHrF04V2yjNcems5+sPUxyFRwDuaqqR0RS69BYByuQNXSoU7IUrIUERGQBQLW2Sa2fv0m6Fh+8yit02o3SIN0E6npFCZGvVhSxM7V32akIoDpFC4Gki7zxH8Jtd58maywqCjLDlfz2LOnX0Y2kW2q3XwABuDPOXOl2jGtm9pXudZXSk86lhcIoeLXLu+1AiCRIBLGriSGAroOt4xZ1Ip1w7MStbgNw9yKqrrkNALp4GXaFZ4CdVIRC2AlG6yI0kG4jdd4gAkGJwLxux2Psqpk7gxzHbvOg6g5BhvVXYuBntrSrwCKIdNsCrzTNrnIa6GorAKy7cv6XrqLjp5d9m56cnANyP00v5ucbo2R5cRnN6PmbH3Gtj+JrrM8vZuMiKRft2fF01E8g1I0jCbA32UYzLF21rMPJh8d3nt8ajccPP3inT4ixvrY1mFbTazd2j4+rFPXG/vb8yWOQZX3xfko3Ky5S+xyiGw7Bt4vpvGyaRcvpcHyzN7zTw9hf3nt8+J3eMM8oKr8sm8w1uDcsHh0vQQWGcDZdPLrbpCHR1l5cLF4/eX3v+a0XXnztjffeU4HjMmxM7Ghr8+GD84/cvvXoyT2TgHfx8vhJyzgNeLCxMRn3D48e9np2YzRZVFVVe2UYGNN0wzHlEFnJypcCFDC2sbWUb/bHZWhnzbJqzocjLlKb59rXU6Sm4XMFST+baCXRgsJ0PjsMWG0Nt2ePj+OqvP9BrZR59eWXnt47GSUDoFrF1qa9w+M5oswabWfy6FwWrk+1+qt/+T/eGekQYPVw8U/+xl87O3pSrVZFL/eNr9q4s1Vs7Zlw7utlXC7CuN8XjiYhQNCWYhu9r2J0iDBbLXJrfAvCOkbs9Xo21lXZeMegehR6/aRQWoIvs0Tfv/eY1fLFOwc+rgBjU9WRJQgt5205d7vb1/V44LEeFn1IjSao6tgf5whlXDY3DgYtj6y2L978iKJwsTjOR0ZTcnl0nmcoKYk2CieLso3MeW6TPPMhDjbHT54e1yXmRdy9NlnMzzKdZpkNAearJaCwr/I+9lL99PHFYKPwDbPj6CTAHCUK+GE2AlLifKbV5u7ge69/2ML2fKmHoy1tHmqNw5GBpeEm1GWTYrqcnnHrvPc+2O4s6Jj+AkikXPCGdCeB01qdxRhilMgcNSmtUCExCwCGyMwclYK15yas4YQiGpUPLrigSStSmtaXqhgCICDqGCMD+xgCBxBRSNpYo5RCBSCkVescaWSFwuB9MEASA6NI4M43FLnTVJhjVKQYRDi6GDuniwAp6LDIzCzOB2MUIgWOwqIUEYo2JobACEKA3MWrpW4dINkkBcK2bYXZBc8CXiACdgasxjlBDCECKm20JkyU7oxIAKxIERELROEQIoKwMMfQ3UsDe0sKkZRRRKrT/J1njgKIHBiJjFZEiKhprXMpok41otA1rwkIBwHxHAXBGkUKOaAERgCtNZEyWneZcURUipTSzIBEWgFH5hAlBumcVqQEmGMUYQQURI0KFDEIR0HSCBBYatcSAEJHFQetiEU6WqfWiiO47qapSABc6wGQFIIAoTgfSHc1dxBjQJA0TSRGAAIAjpGQgERIKWWJiEPksEYpaSQBZAGF2ihqg285REEkactaEQoIA5NSCBBdELmaKiBKrLbGWFxb34y1IYjvDj4JEqXDmgNAZnqNiSAOYms4QWNdXBVQjNJkbkzr3Pn05N/84z8bjpvXf+VbVf74y1/85777S2/ceuVTycbo8QdOcOdLP/Jvf/Wrn/zN3/zlL/zEn/rw/rdn9y5k9dqdl3d7ybVQD6ehiWgv77vV4eLg5qt2hsPdgdhy5szdex/o49MbH7v1pS9//OT+o/7k1sF4I5J78vi8AUCV6KE518WvvT81/8Prd25tHoz0avX0ax/df+O94zfuy6MP4Lv/6Zv/658a/Oy/8Hyvt/r6L57+9b/3uMg3HIfoCJSK4oGjBh70RkhYU6uUGUy2V/MaLf/OOyf/6Jf/+h/76ks/80e//LEv/FRvb++v/3d/85/+7m+YXqoU3f3gfWFeXpall+Gm1RGL3JBW81X9xrsP+/nJT/yRW//cV66983s//+HfnvZcq7hd3V+eP62uTeymizchGYaQJmHnWlZOYZKKGLA9oAlIAoyAFkRDcEARTAJEAC2k+mq2TQD0OpAPESBZz1xBYkH6N742fOvvHW8eXPvJT/3UtZ/+U49+4e/92t/5taaHWOgiLS6OyiyXbGvr3tnq+48uPzHhBMVJ62Moa5qVERM7nuSPLmOhMqyqfFc9n9LdDxdubrzBV7/STwez+MJssdlXA5l8bLANq0dxXl9OT965V52iffG1Gx/5soZNyXy+82Xvlr1ePhijgx0LSXP53v0n97fG1/7+3/7v/8U/86V//Lf+1p2Xf+Szn7v5+GHzV/7rf/gv/6s/89Ef/9yb33s9bXuLGpcnbjImO7IXi1XtVN/mIcpR42f9+Ce/8pnFW7++//zO1oCP33/75s7GBx/83v71O3my+f7b77V3T77y5/9s9tzw6K2jclmmWq9W7cXZdOuWAxuHe88xmMEob5vGKGo1DTUty9ZDfOn53csZVWf1sXNlwIuZ686CqnZpnroYjaFRPwm1HJ9e9EzvUy98/P337kKkukHAmhDvfnj3Yx/79HYfQ5zOL+YZqRvXx97VheLJaPjo6VMLoLM0kG1qbn0t0A56w8TY04tZbzQ6Op0eX053N8cYa0OatGjbXsxmxWC3r/v37z7R4Ha3kixJV6tlkmRPTk/NUveLIXEE5Gs3x9PVPBbhfYv9yWYIzcXFUqU5yQpCm9hhbOtQV2meV1WIbQvKsjibWh8lMRpBz+t6NNxKwC0WZ4Frz2XwEUzsDYrVsiGVrlbzpsI00RvjSfTLumr7g2FdVjHEMp7PFsdc5Tu7m03TTM+XrpLp6vHO7v7Dhxf3H62yvtdkYmhd4IvpbLQ9KqdRMB/vbhfDLWZYrkqTDY6OLk4fP7j92rWz2flkY/j+oxOJ1ZPivkmH129fm1+cJX38wk9+ojy7IDDpaFL5B67GrZ1dzoLkebtsz49OJnu3K1MMh8ns6OTmc9cuzqYmU4uLxQ/fePDB/ccf/9RnllV565Vbjz548Bv/6O/duXY9H+yAgIQ4OznfeeF5W/Smp9PGz2xBipPdFz51cX4x4uobv/p7zz3/Fbazwaj30WvXtieDdnk0nHy6rTA4PD29UBmPRuNy6RX2FBllCyO+l1vXzgO4pioTm6b9QXV8fnpyrhWtHt4VCUXQsVEPfnC/ifFjn/lYM5tXs1CVF251ZrPEN6sk6ZPxdjT/1BduQlh8+zu/Mh7epDuvAiXzRWUvltHXbblIc/3o0SPB5fXbL5v+jcuLY9KxaUJT+d3dUTHITh88vXx0aARoWBib3n3z2ycf3htvjz72qefKp9P56cV7T4+/c/f4q1/76TzdWJ4fbfe3smR9L6haLwxBQEl0TRu4xtQMBwVY7FFSt047FjIhxHxgkizEum5XLhHr51U2zqD1yiiFiIwr55UxZa1LVlVMPahmxUlU0WnLbAgw4eg4tC7Gxppe60PbNogQJCCAMTzeMPsbfRGLhpAEWDXLuq2cDygSUCK74JxvfZxVbraqWTilJLUWUVZt4ACrtqzKWkMIru718s2NPhRKJWlbVxTB1Q0HiQ5WTSNRFELrozEKQ7LkRlO7KssYYukatDpJs5ahrBy27LzrCjpAq8vZsmlLEbYG21qzg6b2lYurZbtYujbwsvUEonQ3TGQWyK3uWm6VIkIWCKGtCNIoTKhym4BELyyMjOIkGpOgb/Ik6asiOHCNJ/GOXYhsjIIYnG9SY1BYIaakDCnUmCXaizBC1KItpVaMdogKc7UEKBlXTUOoU0MKUkO9uZuKMV6gLhuNpKxp2EELidLLVY1RBRcVkTE2GOjmMl3rKUtGOxPKEiCMrTepraslBC6yTFmLIgrRi/jIFB0gMLaRK9d4JFM3DRhqo3iQYb8wmSVha8hYY9J0tN0fjXrz1bKa16kABp1k2Buo2s8gd+LcbHpmbCasmkb6lLYuWDsBMIVNFTMqsdaEYePZIa+VLqWsSYEFqkWjkFw3zybACAxgU/NM7IF1fouBrnw88Ez/6Ybb6wxX99B15fG54uSgINC61EuuFodX/KL1O67MNn/woV3XWOd7WbuBund076Q1ibizAAmC8DNfEV+xeATWYtSVt+hqpRDWGgViF5zqDENXuSdCddVXBp0qhF2D3hqDRLIuf1d8JaQQrn02CAoYEahTGzqZ5mrdWRjwGbRHmNbcBlh3qa0HYdz9rRM/gGiNLcJui8GVLCRXm1Ou4gvPjFXP7FiwdgnBOvxFa5LQ+p/d0AABgLkz23RPyVderXWarHN1rWOB3fKRCIlAESno8KdAAOvCHAQBiAiAQAgsQB39vHMn8dqZ1u399f4EQeFnlrS1ptPtYWbA9YevrUBrshAAqmd4prXDTFSHxQYC/kPw9PWfAPwsW9lhPa7m2Gk95w6IhM80rf+FVLQx7gNDf5JkRXL65Gy009+c9KBU9eqiVrWw9t5bkdvXNnyg05P56enUR7DoQ+OHvUE+0kzoKjcYFFmamRyePnojT4vldAmuMsaFtnn8w3f6o62z08PZ6ghx+vyrycnRY6Vubm7vLy6flhdz1DE0CxQcj5LVbFXNngqmtre5t3NA4drx7Gy+vLQ8TIZxmGIIZxvDfumr1aJ0Vf3+6+9PsleLCSU30sOnzW/87jcOr9955c7NtnwkqD0lEmO/wLfffCfrFzaXyTg/fHA62jvojfqj8SDz1bi3kfaU83XatwBZmm771XngWLlzCOxizItBIHGhiS6CW0yr1mSj0cYGXvqmXLR1cK4qElDGDUdb7WrFLnpgo/O2rE+OT0Dr6WxBvb5kRW//oC3Lfm9US/vkyfvPv3LQlOrDB7Px1k5vcsOD+7lf/PU3fnD00kdufO87P0i1toO8Z9PF5fJodrqoK0VqsWykbX1sbm/oyMeOy6TI4mXdHySSJmU9zQcFKi5XjRab96xr45xXOxuT5Vmrlc3S/HJ6libKmiwAAlBo523bUKrqul4uBDXNTi6OTNRpWwxzX1Vpr8iTXJPJDHK7FF2OJgeLsjZpohNTDIrz+VmhoVrMNNhb+zfBtatZRcLbmz3MdJbnh+Xx+XJxc7xje+O8v+uk1RDznFZCTQx15TY2NuvV6TLxRb4yYPMCdTZqar9auSwprInz8tKDk1StmnbUU8OezVI4mh3v7RwoNUmI8l5ezU5bkdPzhRKWZmVM/+zwSYzBWKhWiwRRjCZtNno0my57WYpohNezB3jluQABqxSAEFJkBo7IUStNQBpMAkYRMQsBMjJLJBYWcd5rRcwcRJBAaxWcF+nsh+C8TwxyDGv3YQhEiL4FAJ0orXU3hrcsXQIYCZyLTWhCYPBQtR4RWViaxhBqo2KM6woAQq0UKbQmbRqnO98lACFpMiGGGDyikEhwbfQIYjlGrQmBDBlttI+BSIcYOHgA4BggCCpCiG0rEQABfAwhCipNhohd5CDITACI2ihEpbXCdU5aOAYQ1VkzCVG4u2EIImithTkKGJMQIgJqpbqbrfMutVYR+SjMLAgsjEBaKWuMVmSNJeqa2jAyx8hRYggQgtdEgBA5PLs/I3X9ckYREiKgEmYUIlij9YQFhUSirL3FTILMEQiFGYhEFCnFIvEqgh4FQggiopUyGjt9LnDUhAo1CABRJFZKiUCMEQlMpmKMqAABvAs6sdwF1TkqVFprECBFMcYQA6EKMTKzUTpGh6IJSCkVgkckIgqRFalEA3tHJDGIj123gmhFHFkIiAVAmFmRiiKkFJFCQIyMChARlAIBurJEC3Ra3PqW0KyYONU6EylnyxPVNOw9hSZcTpUgkK2V/a3ffit3vdbD7p2YVPNrm8Xzr+4sNbRtcvboaLgx/q1f+aXp/F5d3zt8+v5HDj72c3//F772M39y42DHEUwv636eZardOOjV88cffPs7O5vFg5Mzm0JW6LJeHn7w7jfOH+mAo/7klZufuX77tcsKvvtbv/bg+L3F2Xkd2qQ/+Y3fP31w133uxnB/h6bqMI28XFWT8ehwkf/Vf7D81d99d29n8Ms/aJo4SZGMkbaJiMqgCs6TtYjStl6C6FzNpiUpC4R5klx/jt+6+8Hp//DkP/rk1/6dP/unHz46HoxsPk6Xs2WznG5sbsXKsxc2qAJIjBG4GOuDjZwvVsvvfPf7b30LG/ji5zaGkL79raOd4Rg3x4lzi3mzu5WOD8bLyr/7pL1255psjigDX1+qaRCjUiNN8MyMpJSm1WxOSrtGhRYuLqumDqBsajS7KjGAZJzJF7NYrej0LH/4tK2mON55gUaTn3/j3d7RX5tdVMlrL1UFLs7mq3n0N7YfOfn+/cujyr+6rX/qlVf5yYfgzdHZKQKEJbuKFx6qkoSbL35Bv/RnbG2PX6hSPLbtMmy+GttJ325N2hWojCev7b3Ge7/7i2+8/uaD3//GL/+5f+vfePdxu7vfnH3wW9//ub+b/ORo5/ZrurBltfRt61V1+3PXt14evffg9RTSvcHWqx//FJpkNMbjef1/+L/+e/fPTv/S/+UvK1P9+IsHQwMfubb79oND3etlaZZmWagcoOpn/Y8dbDz6nddf6o3PPmy4Hy+Oz+eX3+4Pn0vz56WJuzeGp+ezB1//NZ5XblVJ8C9+ZO/9H94NXtvQZAMTWrP13DVXHbKLRltuWx9jZhQxitEa0bdeQhyPU8X47CG0Db6smtSmSDDqm73ru2czf31n//nt60193rTOe9e6+PzLt55c3H14ejjqb4uAE5gu6sVs6pYl0HCjmPh2hc5bRaCjNFHrFETNZmWW5WJENJgkrxufKkaB48PzVNnx7kCZkavqYS8jlTTBLRYzk6YioSgKkGg0RaBysbKpKtKELPaH+s0fvv7Crevnj+6+8JGPlqvGSz/LJwaocXXblCZptQUfql4vGwxHrSPhWNbzXn8YPBOGVVmmeb/IR843ZPLFqqyXi4rqflFEAGN0EAk6jxBSg43U9aphTcN8V+qVV6vgGquiMzxKs63N5MH79x3K1nPPJ8dnZQXDAd598uhTn35x6uvd65sHd55D1yyny2vXx5TAJVerdvH48fFs7pTP9rb3gN39u/f6o42dZNhcVqcfHpp+kkl2/517J5ez7Tzf39+7mJ3PT87C0m4Mis+99MnHT89WF5d7B71xfzw9PVNAi9nZvJzl23vqcXn9hZ9cnLz7+9/+/s07N37mz/yFBKkJbJVWAINRv5xNRdVnp3dHG9ezjcFkfKtu6svTp9cOBj/yE38ksh/s7u3EYPJF2kvvnh9muXZBRWHEYBR7X/cG4zzPy5WX2mnLwdVnT57qlMab49np3NVuUAzJaJtqaarBxha1Wlfl/uatX/z5nyM7ypD2JyPK9OtP3xyazTfeevv6jdu9vHjwxuGibPZ3R0OTvnLnhSPHjY+nxxcGc89NmiijwuZgdLC9n1uN4os8STQ0q6W0qJsyj26zz67xs7OF8fNkjukqDsi8+97x5uQgXoSnjy+Pj5+A+F/6Z//4oNj6xO291enF+xeXz4aTOk+8b1vXONcqpXxVlRSTnoJeEtBLBGFUBoueKZ0jsLZXVLU3SlV1o1qf5qHfg/lsWfoALbqqWLhW91KtdJIANCiinPNVCW3Dy3njfc0UHAVCg74ViZqo3y+G4zQbZsDr0VmUUK7K6eWqnC60tgkhCiRGCUQXmqpugg9Fmg7ShF3A1JjEtNGV85UGJvG5YF9w25JNTSDAJPdN62wKPV2yBKS2anzt6sq1WkJY4nCgFAXPWitlyaTaGLI6n/q4qhoOHgEzDalRHjjV2ntvUbdVqEKYLarLsnU++iCIYDWCiAJGwq7RFQkJsShyBNAQkQMIIrdKRIlBFwgYCbxjxyxaRaSe1b6qEiVaCZM0TSsCESAwa8TEGkSMQQDAcQQiDahQIQGKKAIiyhRZQ1nao8rPF0uyJlUgAOw9rOu9A/ugtTGoOApHBG09c3DRiUJATNIYQtk0IDGy1FVttNJaG2MtUXQOYwg+KEFCIYgiUSILSBTh6CA4S1pnCXMIwkZiarXKEhuETGoUE7o0s5lVLD4bQD50dliiqgK0HKUw+cAqlrJ2M+qRDj61XhRCdAmQRJ8orXuZyQZQ17EMpIQC9oyN/X70LgrbIgseFpdzBOgVSfDBaN20bVJkqdaurPHKZ70eSeOzoTlcZc/+YJT/7KWurFw6i9GVlwOfDe276Ffn3+nUIFnHr/DZH/AHUJ3OzERrmDRfWXzWn7Fm8nRTu2tpY021ucIgPYvJdbUutEY5d/ROAFj7hta3POxcM0CEqEA9e0T8g4ozWVMhkKgbHwnSWtO68inJ1ffvthASrIdR64ybrCWItUAF62dxlivgznos1uFF12u4fnGNBV9Dj3DthfkDta2D9axL4fBKf1rjd67MPIAiuM6WrUUiWn8zvFLz5EqrUevdBF2QrKsmwo70ikJEGjs3EHR11bi2TIkIsPBaZuz8TQgovI51UWfeucqHreUvwStQ01UEsMMjrQ+gzmYmSMBwJb8JrBVC6tQ3FAHh9d5cA5eebcK1Q0yEQISFcK14dVDxbhPQenndnr2CL11JRZu7o+V5qz2dP5lGH13pZnApdZ30N5p2mmd9Fh98tSyDoEFeLOZw88Zuj+BkMW8WTlxr+0k9b0bDnAFi64xq67IaHPSWl+YH7zyJpAaFSY7g2tZuD5ImzpcXVRqzsDyPwyOjvMmgjSGbpBAX/UFvMV2qAIbPDi+d55Gyo/3bo5MnJ67GRXVkbV9rigDOcdrvK84Prn2sn9589PCHm+P8xv44UPre3UOtinr2aNwjk9q37z8eJymJsung8OnTQcHCeSFJlltezpcXy2JvpMESNafnZT/PREvTLLNekts0QgPeQ1xZkeX54c7mpgtNgrYp5zpRCTIpc3Z8VC9Ibe2mKUsuIfiqDjGKa6cguavFbmzU5UlpTczMcGguY3p2MXeQfnj4eBV1ZobvPGj1YcXw9Hvf/21iOtjfi/H8+dvDDx+cnF42bTkd9pOzh2dAUmSmrRZ96/73/+oXP/1JjP6kt2XGZjSbYbMom3rpnY/Luj8pXAClNEf2viHMAPt5TzcrV9UuMSlJA8CsQ5ZatywppTQ3eWKPDw+Ho75btgqUTXossTccarIhiEUZbqZ1uQJQpGyej7Tp1YGJwOrWte3e1mZdl1qWbHj/pY2Um9Onh+XUN6tjQ21/vLc52D2pqYqzVO/EtmrLpmyrUa83yamE6snDJ/s3BqRMkfaic6Kqfqp0wNpJXYOhzaokbfjlF7YWs4cmzyOV/X7SH9K0mTOCxsok814vP7+cpsb2dnrvPpmzMcYiaiXilVGr1RGaJMt6WumzGRvPp1cPRqjQkO5iU0RaYZciZ0VahCIzgXSqvEEjGoSZierGg0CI4jlGZkK0WiEqBDHacBfukY6ODzHGJgYEUaS4u/QDBc+EZE1iCFlAOPoQQuTI7EUCx7ZxSlHkaDQRobUWWTSCsTb6oFEba5gjc7BWa1IhtkRaKx2DT4xiAa1UjBJ8QIWeY4yRwBhNXdkiAnnfcIxEwCxdMAolsgB1DzUCCDq1KogQSFRBG8MgEUEErTWJsVqpzrxIqIHXVzeWKPxs+kQUIhJFFE2KlBIBiYxrRBwk1rIwABCxiOrmDIgUKSLVXdJZAHnt6FEAAhGBUFkdY4gsIQQAXpfCMwCCJq2IACTGoJVZY9u6GaIYfYzRBxQOMWiFWuluBgZJre/tAoRKofLRcQgi0hWlRQQjikiDgKb1U4HnQBIVqS4tFwiJOhQ1C4giMioyiw8chY21Rlml1BoShKSVZmFhiZE5tFopEWYQHwOj+MgYn92p18UKUSQEdj4qAKNVFz3krhSCSZFWhAxgSRNCjCECao2RIwo2TUtK+7YVJGNMvHowMjaRaLVWAC4iaiyygVm2s8lewkcVKQGiE7mU+dGnPv/RyZj/+l//L44X7b/9+e3B9ds//M6bL925PbSj+z9YvZLt/dX/5D/7mT/2Y+mtzRd/5itbr15vMTCoNKEY/XCcXCxOOC7K2WnW0/M3l22tJ5Oxv5hrwnffeLQ1LILnjY9tHd19+Lhq9p+7/snPvvIb/+TvPLlwJfJc0rNHfpQ3B7dHr33ppWV68Hff+q3T41UbKaHs9WPzrcO5tT3dg1XttFYc0AJmOkM99KFZVqs87edJ0koTwY1GEyQ/n11u7w7PT9yDi+rP/yv/ntWr/nC0tTl59OgUlHr5zosff/VHf/cf/7w/Oc6MPDcZ1pVbLdo4da9wu2fCAaejhSBB+251XLl6mpS1q63/8AO3VP3jD9QKnJNwvIIkWfh2Sirmub82VoOe8U0lBocjs2yiB1EK6yjLMtRslqVBn7QB6xaZBilj9EBEMSIAAUqLCSpPdNLyY6Ulug+LLEMU51tt0yjKxzLLDCoThH7/JP5bf+33vjixf+5H7nzuCy9npn79g6cfvH0ulHL0lhbbz8Fwd2+o+6AtoMDzW/DCBnCErAeFrnyMclmD++SffeE/+/FP3P/9377+wqqfNPMH//OTf/aNm9L++n/77//pf/f/YfyrF9PX00Q/dc0rH/m0eLq+/9qX/907Rw+e7G6/9vTx+/dX0zfe+e5P/uyfaXHOjz740l/46nf+7t/8T/7jn/7d//IHg2yo8vFS1UfL1Y989LXDkwvsb2xn/Z20SJUbJ/3Lw1PL/eMH75fJWR99UUxuf+xm9Xq4PJvv7m4qbdqyPj05dc0q1Fwv55QvbMw/+ZUv/uP/7q/e2Npavn9XJ0mWKkZFISZpPtjLj1ftydmsXYaZ991ZsKybhHTTYnCtTrVJih5ZZ9vV5YNvf++HBze3Xvnki9PzVR2ag93do98/z9NiNN66du3g8ug4yROc0bLxf+1v/9y//7/5i66pV3W7rFa9XpZnwzTLIQRrzfl0MdbZ7Wvjew/u6mw76xd53kck0IaUbI5t6Mtgcu2dt9/RpLa2tqezZW5zULZsm1XdWOwNBuPFcnZ5GTcn9sWdyaMP3ntx+/ri4qKuqiwrbG6beuUYoojJcmtNdB6zBJSOLCFWRNAbDXpZsVxcLqtVMRh4F4RImLhRuZqIAdIqyVKtcX6x9N4vhtM8L0gxaLCZIkvsmvPp0yHtFlm+cJfG0ubeRFfNm9/75r/5b/1rw4PNt7557i9k+4Vb+68+NyqSSbq0k/Ts6Hir3xv2sv7ItIESm/VHIyDz8S+9fO/77wWhslldXMyenpz/7re/PxkOD7Y28ry3d210Wld1o+bV8vDJGaT64vERqWQ+yZ/cPRxv3fjs5z7i/NxXLXl66+13Pvr511yYQrb541+7/r3f/qUnhydf+Mqr440bp8cnidISqVpWWpvQthgkkBtng9yowwf3rl1LdJFtbE84QYLs/htfH29kBy9+6t3v/1oyaxKib/7c3/nqz/y5djK8PP3wWrbbN/Z0dmoHmsgEDwc39s6evCdEgaFc+NDGalFJpPFkNJsv+pPtpMiD5t3d3dX58Usf+2p/e7DZj6f3Hs/OjzlWH/7wB0bv9ofPXb85Xs0Ol81pIzDZu6UAJ3lyVi8/9smPT09W5C9V3959eH+nt8WmQMwuzy43cvLtMsYAup2eTdM8Nca3TTWvj7RLdvqD6y+8cNK8/ZM//rPTx++lRfvP/+zX/tv/5v72KG+UUVWbDGzTxNBc3QsMCnCWJQ6i0Yl3gcCy0HxZo/HKkB3apgSH9ej6Jkq1vKzY2OEgi4RBPAffLivQOqS9RWBNVLaYJbmRJLYtNN57XxQ2ITYky2oZYhRiF9vgWqUSA6ABE2t7uZkUGSANe8YHXFar2XQ5nVVNy8gaGEEksPgYp1VVNo5FERKKypXKVNyfJFmWzS8AFmldl6RpOOxvDNXmUBR652pj00XbRGBlTN/kTcRW6doEYqyaSpFFUjpNgEKMcZTbPMs4hLKpCp2YTFdVY61WShk0ogXA9oZ9TaqqXLWq27hsQ2SQSBIBFQCKEAIKpkZbTZZQoWQSDIBVXGQWlWjyJnoSUhIVIVDUXhJQIQBHabRkWnOIQKAIc5O0TVsoFQAiKiYWEaUQGQNQZI7rHpGYaJ1ojCEKImiu3ByEip6d155ECmMROUoDwJpsRsjCzJEQnWu8qK7ALkQPzIjoGGJgQ9iEIJGJMLNKiatX0WoFIiG0idWkQKMXBoVGgCN7JNIgArH2JUEkJGYe9gd5nqAq2tXC86r2c0w5QIsJiWXGrKrZ+5VOy+F4MwtumBRtBZUQKtrI8oLYxVWDZTLoKYbYsCENutY5UaETZVwjiU1tPymr2lU+SxMwUJhsVTfSOu+5dky50tok2pg8tUpf6TVrxs/V3Jqs01LrYBmAiKzrtrrQkQBgp1HIHziJ1p1oVz4hAF77gq5cSV2kiK7IRFeL6hZG8Ie4RbJ2GCGySFeRAsAg0AUbr6SRNapY1pQcuhKSupYX+ANNCq9+OufQVd1a91/S8Vs70QG6Zi7sPnX9K2vlq9OJ8CraRNgFIzrZRhFI7MQQgK4+5lnaTl0tuNN6EOUZTaELxK2fodcjgLWqASJXccBOGgHpWtSAn70unUWI1upTZ31aCyydQrPW+xjWOcBnjh5c1/Rc7QpeA386lHcX1EIloBBRQCESIIFgF8xYa4JwJQny2nDW1WUj87NWt25R3cdfQc3XRrGrhjtkjN1nR+nWUK78ZICEwCTCa00O1vsfrw7aNY77qv1traTBusvuDw5OgK4iWmNX5AbUtbNdpSnXJ8PFzG/dPGAleHp5dPaQKGtrbuq20GXTVCBRaTKJhyjcxuf2tpNiVPmMddLbULERSTKXFD6bPL5c1q0b9IoP3rxfNt4Mi9WsPT+rg0DPknfqE6/ip15+rm3l4qIdb/QG4+T88jRLMyUxuGpzY3RxebZccjEa9tORa5ue6UPrqiX2ETb7m2d8fLY8HKrnY4tV3Vxe1mkxuvugUmZLcHXj5rhQajZri/Hged/zq0VvMl6spk3bnLdub+tmVS0PT6eairZNN0fD3GA7vcz7GW2OLlZet3o4HAI614SFO9PKuKCKJKtdU9hekY6VsXuTg9oftio8efJUZRNZYJHn1bweFBuUpqdl2uNmdnQpwSVmwCCvvPyZb/3OG1Uru1sH3moyjknm58dupRQMWhh+4/V36uZ+DN47iT4aTWQVAuhVPezvzY7PVaiLTNvh9F/56VePHpz+/psPP/3Vvb/wL7ySSK3lfFEuVxWEYrsRGo76HKvxlllN6+ViIUSgclBotOZoNgfD6mLhyri7c3MZovhLXy+ssTqxQZyxlNjC2H67ONvdH5NVQnk5nSP2iCUzaaqSyE0Zo4P6aPZks5i0qzLp5UCELHW5yghWvnWRIqhF2RpdJFAEUGKLWK/Gw8K3tujtcFTMrpckw55ZTENwUs1bW6RtEx88PkVM2MXQcD7ZryvFCKg9UWV1YuxupF4Ev2VhWc5J5Ug2S/qI9vhwlSf5YnkkSIMkqyoYjPdWq+qyqnvDRJOZNi1jsbG9OT95fzLKs3zw6OHx3rUbXjluWmufXRU74RwAKDAzskHVTRX4NWEHACIBkTCzcOSOKBxiy8IBRCOioFUGGETYGLOqmwiMQF1jWYwRUYzRtWuNUkDkJSRoFAmi6mK9AhgjkyJttA/Bc+guaiSQW5tYkxrTdc4DgDKmu1khdhRIhYgKTHcRsdaIgCHDMRittVI++ijCDD6ytQqRmCVepZe5SwgrTQolsNKktemspyiilIEQo8QsSVtyMYpSJESJtRaV0erZxY2FowhDBBatzLqDTKvuUmtEAQCSYhGGbhOvb6okpBVqLcwcBRSi1kopBSwM7CNI8LZrhVvT64hQA7IIMsTgPZFEAUKMHLpQoFZaa60UAkLXaxKYO/uY8HqGJsYAQJFZK0VrdWmd1GXmKCwxxhhobSJGozQiBu+0UowUopfuEUIRI0aBzpjU3dGNSYi6mYTogheIIbLWBhQqIkKKwsKxu/f5ECILiRCqEIMPnpRikBAjgyAoo1TwQSHmxiqGFXMIvo2SEFqk7paliJSiq1uTMDuOOoTgEQFakWiNUVqtmoYRIwixhCupaLEqR6O+W5ZJmm4NnrssV2zEpOBzDIov5rPmrG4nkQoTj94bz+H7x4+2N689fP+d2dsfzqvm1pj+6TffSRLe37/25//cn9OD+I3vfvtTP/qzFnVdtW3tev2sLpvD5dx7P74+KkbN0fcf7RfpVz/1idMVz84uy+k82cg29ocPPzhP7cN/8U/9uL/7YdlcnDw4Ee/ZO9cIKVPsjXmA7zyaLr/78Mb1mz/9ozd/7pfvizJtQl4xkbYZUhRmQJEsTayyq2qVauVja7RWKJ6XeV9F365Wjzl4TXRyuMKoN3f2CLheeWG8nM1ZYgz49L1H+Yd/+xO2/eitdHZ+kT6a90AdTEwK7Y5qJ33YvJXrjbRZrM6WMrN6v0gPZXbw0ub9zF9cwNMVna2AG8m3BipLfOOnVeV6+ulFqUszW6p+YeNJrNpAmlKrOXLgyAj9zDhxKqEaIU91LRgCm0QpUVXplRZtVAjKA+mkUIjSRLZaQHybiLGkLUmEhELTgCLIk1PKv5EWv/IbRzfujFS7QE4kDpOFzpvFX/lLn5j8RAoP/Yfv8j/4u/dPjpMPj44Gw/PrI/+Fz42/+JO3N+9swbXC108dH+Z9+viPpr79zaqe7n/mR/de/oqrt1+bVdcm6ezBt773j/7Ln/yzfwnOb5QDsv2telY9eWdx9yG99+B3J3u7H3v58z/8zW/+zf/w/1YuUv+w/Jt/9X/+mdvPv//Nxw8fNDG3PYtRUaKSs5MpquFFi7c3BradT6cLSAeDnY9FnpG7e3r4gX7nzc3N58EUxXBz0dTJjZd6QFgtH37vN4KXp4dPs50dXUwA8dorr42vbTXVcmNol4BekGPc2drUOHSsdra2Jo27DOUzlKlCidEnxmRp0fhGI6yWF+ybrb3NqIRFX54tV4vaaDo6OsqyZGNzbLL+5ckFCDfzZnE+R0ju3Hk5gKMEU0xE4Xjcu5yHRZy/uLeLQUcUC3F6fNZLzcHuweXybKjzSnSS5s1qfnRyNhkXh4f3stxIHUKImqhcVAFj27jj0+OtyXOj8biqZ+ONvpZ6Mki/+/qHf+agP6z36ujaUoGWIsu8r3XSIzuo61phijplpuUyCKqd7Y3L2XI5ewQQYgSb54qYSJXVVKEDjtYm2iRt23CANLNFry9ILXMkntdlaqWsK4uUDHoB4XK+aMrShzg/i48ePxzZLCnPv/mr3/rs5z6zobzXqIqtt7/33igf37w+8fUssNTOgebZrK1dUDY7X7TVe8ftol3NViqjIs8GOhOamSxrQS2m8xLn11+5ee36q9/9hb9vlJrsTgYD/eDu2UXldkfDtD+clquqrC7OnxxsTu589OWLhT8756UcHbnjjP2f/ue/0tubrGaVwaSerTavbXpoeoP+vQ8ONya7ZbVqmlr3mo3trbqcz+enAEmabw/SZHd7vzeekN3d2n2hXJzk2225rNmtsmFOdFDGUF+eg/DqYpGk46JnnSvvP3xrPNjuj4ez0/nWztgz+JaDljwdLM7mrgnMtlwt+nnSmwxDDPN5XVZ1sTl5boyD44vDcvv5z33q6IPvbvfsg2a+OIPJZHD/vadqc8+3Oub8/jvfMUn5qee+MOrvoc5UuuUAVRIrf764vD8cHgRUurcJNlldzgnw4Lnb44O9N3/z6zOXTTY2dHXaXJ6oVO4/vjuYDJfLNhrKjPbWJ1mxkw+7s6Bp2ixDRQrSxKa6KsuqbFhQW+WZ0UArskpk/+ZI9+vl5cz0N2NiMbdJak8PT3vK+BarhdQsNSU5KZVi8MGQMiBKC6GQDU2M7KNrXTeh7oN4BKtUQoZQmJQLMF16sMmiWZVLF6IvV2XtGI1NFIyHmW+ay2nV+rD03gVhJJvYUa+XWITQTi8WlV2uVjUBj3LJRvlg0CNsooirm2bl2lDGCIjo2pnOgxMejYZG69WijkGJtsAmeinSYjVbUHTDYmQHfdrans0uq6opk4Y0lGVdL1eVC0luYjQarbFWW9KmzDX7EAQ4tUqT4sggoBB7WvVtoiAaLVaJkpgalaG3xiZIBpUPThMopZiiaN1EHwSbSFYrRah1xkiICn20NnHeiTLL4EMILH5db66URyThlIUEWhfaiERQVU3GpDKIpIFVZgwKGp0SO0BNki3qFpXywQVmjYpINz5wjBENCmltfQAXnA8+qug5IGIrkVpnK20SpQhEKC/6la8ZIjMkVkcfYusIBckySBtqgxpiiIHztJCovEdunQ8BjSrSCWPt2A0HI8jA61i6WSTfs9S3OlWWyEblnEYkyvN+8BUJT4qebyH4djDpY+PL1hXDHVhJodPVvG2iKECbpXnWy5IiMcVitbRpOT85VhCM1unQbowK8AExzqYVPPvBNX600zqEpbOXXL109fd1qdUVDUeexc3WS+lCauvWdkTsvBtryPUfjvvA2oKE3bh/nf9ah6lIAQgIC5Jw7JqQOxjP+k3rt6/xRGsBCPEZrGZNrIY/cEghUtfy3kkV2Hlf4A8v7Sqv1ckasBYb1paXNQJb1j6ibg1AsEOHQheM4y571rGQ1lKQdJmDZw4ZlCtzTgeLXcfHQEjWjKMOIi6AwmtEtDA/g0WtBZq1wkadfvRsLyASsazbftcS0tXYb50yu1rlZ/+x/k2EToyBq2q6Tg4TRCLqoEBrrUeQhbrdKcjQEUKv/GDc8YnWO+KqaW3tTuuyd93BcUVVgq58hkUQqcMVdeYhuFoerLMSIMAdaqPb50LIsvazdUIXXMlgDM8MWd2Okk7EXoOgulwG8x/SR6+kops3N87PpvPTk43dHTtWyIqrsigMoUlMnqUjYG961mTF/beOjy6XppHzyh09ePS5z7woyhw9bZbN+elFdfn0zKYpMDWzGqweBkxsr7+RCUBuzfxy+d1333/49OnnP7d5c7Nf+3Z/I0taD9JeLJ8GxGUzqNuqYSa1m4RJbrSQCe1iMkmqdrlczbe2bmm1uao8R6dMuru3W12a97/1Owfbm6Va9no0rf3KxYNr/OU/8dW7337Xp+2yXY2GufXZ6flF5fjazZtPnxwWWr20v+ure7dfGM8v5kfnU5X182IzG4yIYrNYDsmi6LYMUaLCYX+wm2b989l8Y7S5nJ65qr1xa+dsVh2fPO0Nt0bDQRA96I2nwefjXogiTaNtf9lMj89PTNJnaROlorIuhsAxEQxa/+D37v7Wdx8u5zWgZmQQMQZJIylmzxzZhVXSa3/mq6/9u3/xR289J0+/+4tHH7T/8p95ee+jWxePH0nAmhXY/x9V/x1tW3bn9aG/32+GlXY88Z6bQ+WkUk4t1FJL6oA60Ty/BgbQDLDNs3HAA2xsDLxhbPyeh7E9DNgYaJpg85qG7oZu0UEtNd3KUklVqlKle+veuuHce6IR2OAAAQAASURBVPLZecU55+/3/lh735LrD2mMO87ee601V5qf+Q3GxibumPnUL6ZjpaxRXcaiLErQvrNxSUlRF0W1UBlFhnDn2qVvvPDNp9/1TNf2J6NR3rjR/DRRlNo0NDQeFRmq7kBPpwvt1fam6W508twF0JNqvtbRmVIea9vBoIQYuAkYASm2EfSyzng6Gi2cA39267zJNhYSe9FsNyrlq1pp1TsZz6cRbuxcoPJBMRqRpIsSO/1OzQooun3/9c0+uzJPNvsMwgrzcl42NVdTS11B3c16s+J2N6Vy2rjQdNdiN58o3FxLtm2SgsqG3UKD2j/Abueqh1sIszNr0Xw8246pqifVVNgxIeWLspfGs8mRFX/xfBf1AOA+ACzzdJjbNqxWU+mca9PsAjIRGmU4tHoTBoQ6uLqpUcBoBNFaGUuoiVBh3XDpGi8siE3TIKo2iB8JvPdEOgiAgCJtjVVKoZIQhH0QFFDkmBvnvA8KUWtCxCiOksRGkbXKcOBW4EiqzUsSZvDCElhbrckwABIwC7MPzlkbheACQAjBsRBiCFzWNaISCKVrNKEghhCQtNJKKTKW2ltz8J6IEJiZV1pVIFIIgIrQmMhGFjShBPYM/DBAr30EBmGiNmKI2t2HFlYLtw4xAfHeMzNSeztlQlJKGULVSopICwsDh8DUJlmucv8RUJb3UkEArVR74w3es7AIllWjDURKIwhyAAAW8CEgoEhg4fYmjkozLBW+ilFouQTU7gf7ACyECoEUkiIiUAQCRALofcOCCKCQaKW6DSzOew+otUZUIMDCWmnPDCqgSO2DsJBBAAzADBi89yEsI5hQQVhGqYeWUmltlEJUChFEETMHrwATYzypmgNq5Vm8C1YTISEBB/YhaKPrxgdiQHCBQwCtKHchhND4gATaGNdU8nDQQJmkCyVHEsU6rS2Pqvs66h9NF2gNJrRgd7hY9DvD67d2DS+eePypRx95/OLj6fzl0wFH87v5ndcfzJrFuUvTw5O3sau+8r29abH1Y5/8OHOTJDpJdLWouFpkMR699e27r18vy3oe4Jtv3fu5f//P3hjt0ag6Pc3vPJiozL5691705d/5Ax/56Nd/9zVXL1w1H0QmKJ56MkP9zI/+wNvf/kYYizKjP/LjH/raN+/cOnExQ6W0aySAB5AoMk0TKl+joRpdaEpDxmhzvDhNkiRuOqGe16GCICpK68UizpL9Bw/Ig4kVl5WNCVVIENfi6qzc/9g56Nr59o8O9XCnPj6NehaGEaxF8MbBwa6bzQol0H8XYONntXz25y4t1swPRE/Ovrp/60Y+3Hzs+ldu/psv7L41700D5oKjUSVNSMlqrasaXRBrLSJU8+C9EIKxtKidCxLFyEEaCC60TYIMzI6ZWcQH54PSioCqRQ2MDXhjiLC96tB5aSpvkFzpkyTGiMsyj/vp8aiZThuSoNtT3vd/4q+/dvl/dDZs3b6jSkoog4Vk8UK/eup/+XsL+/MvPr7t/vhPbX3mZ7azDQfndL6Y+rW1/lNbZf09hVZtvyvtna+a2+bi/If+gz+yefVT3/uFf/Dtuyc/+h/+ZZM0gc9eOP/UX/zzf+7sU+/Wv/qbV8u5C/bt43Dhfe99/x94Vv/av77+5h5bPXbz+T033E7ttL6zf6rPnHvqx//AeuQmR2VVTlKhaDHtDLaGG2oIdvZg9/0f+OCXfu/bo7J65N3vE2XKSqdJmnY3y+nowpXNZnFMxRYTTovuU8995Gu/9v/rWLuYOdJqkvsnr6wp7Ixm1ebW9rVUl9+7EYyGkxkAgNBikaPFTqdvlXZcKKtdHb750vfWesMr5644V3vv+xvp8b2T6bwRpkHUEKGyvYLlzfujTre3uX0mz09ZKedcFCedJAtFXfmAQWdpejoFbWy336/KosgrhaooyVJWFSUC7h+NXFNfvrIxnUxQp0URNJlkYKez0Vo/nef9o+PDcjoeDJW4Ji+K4VqfO/a7N69HLNPRePPcRRvHGtHEKelub7ATXK5IKaUmoz3nGBXl01GV50JoFHnPjQ+hYVJRb7DWVE4IrUqNin2RO6e8D70stsqKb2YnY0XoahSfdYfbJBMTE0dVXs6ztY1Ep45mf+7f+5Ekqr/64mw+So4nzeal9flx8fiV8+PjyfZgUNfBN9ViXC5G1fF4MjizEXWHPLvfzCZ5tfBYIqoktahMVigSbuqmmteZubC7f/Tmd3/l/EavWlQHeyflokps0tne/JFPvicIiYTepoUIqqZ58PZ1ygZnL529s3/3fR944saXvx1ZLha1BGdB7HrPu5JIGjc9Ob7e7+o4Qso203hLxUXOIYE4X0A+ryWl7uDiydGtKhyup8Px4fG5nfW78wd3bnyje/Z5Q1qEaucHnbQuxFoK3IQGTEhOD04AQZukLOrhmcF8WlRu6hYhslHaSxalr+umm61Z0tODvawTx/2hTdBz/OjOI3wfHuzeH9+f19ZdurTZG/aO98t4sJF3ZO2MGqTzqFslKtm7dTfr9DPbWYxPtHEmswD1cH0DmaJe53QGcbI5PBsd79+b1WV92kTdizqPdFONyreGncHRgwMm11/b+N6t78X93mCgdr9zc2P7zNrj6p3pKmBe5qBRiOJBP+kODo6PVES6rwJxUXkzkPPnY25yzxInkGVUhjIQb5zt57kvFe7tF4lJNjZ2lCsavzDOs5RpZu0gKZvAmoqciyp4htIVWiMo0qIIoPSljeNeJwKtx1VzMspBAAOAb5ShpJfGiV5PTFUUlS9daGphsEiIIJRoIhBh0HEym9aKS9LU69DG0KIBogpcqGYNNF4CuFAFoNqxiWPHTeN5//DkdFb4Rsga3Ym7wzRU5fT0qFg0oZHx1GWdOOpaVxUhuCzrzBcLJDWfV4u8cpNS6bqT9ZUBENze3Ir1pGmavCxbkQVq1FoRgCZEFUiCEk6JLAmJR+cInVKoQAjBKqOVeG6UMRE6IR2IXAigMLLaCXkfbKwo1qoU56WnTVEF79CzBJSg2AMQSxWWsm7lOFIqJVPmTShdJ8uQlAIksq5hBaCUrmongYsmRHGqDENQiKhZCdeKWJE2ti+WKW2kxjovAI0yqA0ChhB8DNA0AVDHOlLKOnEChKgKXxdNTYqMQmEgFXkGcdD4kMRx7XF6PErTjNEpRLJR44vEaBCnlQ5YB+1Yc81BfLlOGQfrg7KRRQnEgsoKO1QQqMmxZCps5AKqqWsGSbZoCq+VR1mUTUNmPev3srUAutftWFeToWY6qfOqqZvTk1LqPIiv5mF1FSyJ0FITs3pPWuKNh8G/K9SxfJFaVeQsXyRlGZkMq4joJbxYCkfabyNZRlNTq4hBbANkqHUZAcIqjUgYIazIlPDSadTSjSUJan90JY9fghYkQHgoKGlXx1uz14pmIeKyy2zFmWC1Lrv6k6WpaYlSltuOKACqTSaClmIs85dRuI0GUkQBuPVQScsfMLSOPGFeZXtja5RbMh8REaGVTqfNsV4ehbaypaUe7a4v69EQ2hKhZUIPvhMQjohtKc0yroiJWuXVSoqFvApCWkG0ZTR4q0Nq/XUKUT106i2NZ7CUNLWDqwBZSKSdVLRSMVmBvHZwpY0Taucu7cFdWtLgHXImSwrYaotWXEeYVgynxU7tIjgumZPAqu2tNUYwr1x/q5MHaXWWLq2Qq58X4NaV0DJNxFWM+woVvfSNVwf9td7a8M6Nu3GvE8CF3GWpifUwL2xRpzdu3rm3fzjL83JUKZN4d/jUkxd63XNfe2HvpC7HeZWQ66ddq6J+0gFL53e600WxmM7qwGkWN0zkODGqk1kmuX4y8bZ3ZSveu32QpYQ66C6kkKEZKLORGQPcizBNkuHp5GhW7M5yn/RSltqXC6vQgM+bpp4HAPvSS3eeffKZ0eF+Pi3W1npFbeuqePv6vTV4zXo4uHcvNLkYAZ83ZSlijk+Oux2rvezu3t3sqtt3Z9P9yWxRRUnhxoEXRztbvTi1sc3Yu0lR5rkbrG3MxvOyKpTSi8XMqlgwlqCySKu1rDTxXNJmVupQd4YYkdNCphMRoU2GaScd7miOOa8nZZNrY1whu3P+3L998a3bpwyGtCB4DgEVIYpICA4pSJZGx/eOfuQn3/f3fuEvfPfzfz9wKjzfP56vdbvu5tSwqnzormdx17jcnR4epOmFjfXs5DQ/PTpyjdOKJieTxu9rkiSiixfOLhaLUuC1gxuddXM6OzisTnwx0kaJFEkyGGZrp6U/e+7c/PCt+Xha5iVjFMeRr0qNpDR5SGtX5vNp1tcXtrdGU2U63dD4Zn6SpJFQOB4f59Ws1+mfLuos2xzESUXCkVXJmg/lnTt7m+s9pawAHp4eyeIACI3ZFuiVeSWkF+PFsI/rG7S2nnX6ae18EMg6UV3UZCJSZj4dTRejsj7udGNrY0smTjJh1rq/tnb+5OSWpVxxpbCbmO7Bg10Ozihd5LUEIGIFRWBY3zz3ys23LfL5nc2GF3Gkq7zp9dXq3gCBVw1czEio2usbRVg0aWn5soTa1W2Hl/OOSJGgVkoJCjACAGHduMa7yjXOsRMmRXpZi04sXilFyoCA0sZo0kppouUNT0PjPHPwITjXhCDKkHcujZLEmk6UIiEwKtKeGRERCZBbFQlLTai0NiISQhBGFnEcFKqyqdn5wNw0jWOvjVIQEUij9EOBJxBao8lYozQBGKWYJQQWRO8bYzQqJFLBiyBqQ4BIpElpQgUMhEhahxBYUAiZIYQAwHpZIdm6wlrLNJIibE93DizCLAxCiIBolGlvj4pIK4VtkwKJa7PDOYQ2jZvamzUCETO3X0lKt0rRIOIDe+amCQEgAGu1bAAFUm2Lpw/BeUdIsbbI7WOXBdCxZ09EWgILMwi0uUhAxIERRCv0oQLUAAjYmvVAkWpTtkWCC04EPDMRQcC6aaw2Iq0dr7UxivOeRFVSC4s2WoQ9iwvcpj61lkVm376mKKWNNkopEREOCKIVOSGlVYxKEBIRAHCuzbuh2gWvAJidazR7a2z7bHUiZeMJKbJR45rgg/M+igUBiJZLLPNp0R/4ThT5IrB2ihz6hWHSklpjtXKDvs60U7KoJN9a7xXz+XQ++8rXv14X5g9+9k/s3S5++k/+GdR8/bXr//Z3vrT1+JlIDZ995knSGBnlCz8b565pfF2U09O3vvjS7Lh84v1PfuPbdw9n9W/99uc7KgZVzpoyIJ3ZiQ+P/eH93Rtv3VG+ebB3N0ITpW7j3Mard0+b05Ovf+HXPvzux5MYG7+ICvhP/9hH/5d/9sLdI1/VdZoksVHzvFSEHARFfFPH2sZxx3OoXNPpDrrWGoveO6M4SzIERVqTkXpeo9GslVgqmhAh2SDpYvTej4edy0V/C5r5uHHjIjFjBydTuvfi8VmJe88/0n9K1/zG8PGUYtlYNDXMYn0WqgcmuvXsxyL9yOnlz8Y/fG8A6ql/9Y9uvLy3/mvf2KsWVJSLbifupcnJnKtFaSMTJRArFA8mwtkipP1YKdEBkTC2yvmQRFRWDBZiK1Zh5ZSgBub+MOMQvA8gohD6qU6sdk5664kWbpwUtasLbyPKugihiTKbJMm9vREqhcrM3eC1qtlaM7zdrPVhvMixCrlXYKS/Y6SGB87+t798+v/5V8dbpnrmKv7wZ9cfe2axc9mAqseHb6dbR7pqBBHquaqncPJ3r8QHe3f5v/mJ//2P/r/+w/MXf/Kf/cLf6Y2vX6CLunPhq1988U/9x3/oq//sy+8/c+HDly797vW7ew8aSNJSoZoXAs7nDlBVs1k+2s856fUG/dRYHRHr6eExuKof9xrY+F/+5j84rhypcO6xx21TYjTopH3e3r79ylfWtjYW1Wh2tDvcippq8egHPvTlz/1ipxsnNTgxO+v9pN/HZGiLOQa3CbBhcRJW7/CCg/W1wsu0OmXvlYJuL713/fbe6fSDH/3BR55/7MWvfWP/YL8MvThKLlw4Px+NSWGU6Oks//I3Xoyi+Orjl8/srGnNwUPS7dQVL2a1UtqIPhyP48ksi3tlMXbsPUvpmzNnLuazk9PRSZzQ2rBbZImX+u7uXlM2WbIeGkBUwk4AmrJKsmj/5HjYvcJBksRSJwpJuHJx57c///s//IEPzusq6zoUa5Kk2xnWtZ+c7hK6EOqk07Mxxp1Ob3B+cvwmagKUbn8wdmPX1HGURDaanj6oXaBO9GC02zMdxT7NespGTVPjYko6eF/YyBCqQXbh1v3vXj6/Pp1Mowi1NsW0uvXqC5f6h8nwypsvfM9X9ltfeeOZR6697yOfuv1LvzKpJ3Wef+sbXz3/5OOurOMElEKEDvj61tu36qbMhSSIjWzlmkVe1/W4F6X9XndRN+96/qlQl/lkbiJ9ZnPt7uRBmvhPf/b9hyfh7OVHMlceHIwmo4UYd3x4utbrQl1vne8f7h7URXX/rdHW+cejpOfKethPmhqqvHI+BCddiLc2r80XYevSlb3R4fzwSNTJYONiVdVPPPrMuFwgNrobcdPDTt/XzfbOFe9OWXA6Hj36rotHD+7t3XxjbauD3QGgIHBAhyq+9sSzt69f16S9b1zj7t+9x47TfmbQaKU5cL+buUjKybjM824nKYq5o04/64729sazxVpkE+3757am8+n903s7og7vn4Z4dK5/Nb97eHB0SjVvnevaxHa2NgjipvCTo+P1na3acxb3zm9fu390Y21te3RyGqfJ+uamyavKL55+7xM3v/pC2ovU1qbpbU3rEtnt3br/3NWNM1sDC/VhKjVOqjprr4I4MgwhzUzRlMIqkJTBkwWP7MBIcGgc2lnwdrrAxmwT6a2NJIxmECpmWZS5ONMjCot87O4aRZZU2rFJREohoGg0lWu0MSJcNjWpSGsN0AgTiTABWV2ijEcLX7uCAwskcaQECbQR3Y2tinExbmovLrBS2LNRrbwibRUa9MZY5yqF4J0AczeLuyZSVnEITgIr6zUyBSUQa9VNI2PsvHR5Xo8KP1k0QTDqqrVe0lRFcOV4OpvN6yDk5rmdaG0QmQEhjkuR0DRBBCvvpotckI4nIwGwGmOrDUBmdD9K2XtAaZoGoPXIeINoNGgW1zhUAMKxUbWwMsjAWtmKnaJ28ctZQwjBMyplmawCJgUWCAjRizXGGmwYgaUWQghWU03k2YewnGKiAkQ1a0KJIgAeYdwsVNwx3SGwdU4QAxKiFo2YaKtIM0JoRd6+URAAQKQBVolRDCGQN5EiIqUg1QqkAfAAENkkiDI2dk6kqUpXqqphQUWBDC3mk8j0Ip3Ui0VTew8AxaILzkQ8qcaRNeCE5wVQZSKdl7WkHdag49hY8NWIuQZwsYmbZoohiLgmLPKmUhYJA8QcRcr50jUFkaLYneYjY+LesIelb2oXoEMcyrwM2kRxGhFIGs1Lmk/qeVmUi9wVM2uwG6cP5wVLsQuscqIJgB56fVqaxEsFCq6EKW2IDwBAq0mBpUuofbqslEcPBUkrktCm0cASQbQCExYAQdXyHFwyIcLWSrQsGVuGPLTYouUnyyfZQ56Dq8XPFezBFfDCFfh6R0+zkrvgO+KT9hRalbQDqdZ6t0z0XpKSFnCEdu9lpa5fQguUh/YvgVb/Arjka7j043Erk0FY1QwvpS4r0MYt9uBWXtM673B1QbWNXdh+MbVmrZVoZ6lSWjntlrssS5nYKmQJHw70QxseQtvXTG1WBqGCNklpeUjbgyb4jtIMlzo0BnpHVcStGKg10OFKd9YiQVjmp+IK6SyDhvihrqs9Lis+J6uhFZAlEUMly33h5WAtZWmrHWrNb/AwuYgfroXDw59d/labErXale9HReNRXs3CdHSiyJzsTZ1CV6lOPBzPju/sHuu4K4CgNs4Mt8cyncyLBuq37+5tbvTv7B2sn9/eWdtUvj6/2Zuf5qO66Xayo8Y7o4fb/UFGzrumDNtr8TOPXHn15mHpOC/rV1960H36gk8E1pI0E4g1YCgWM603B73h6dH9QMdlxCrmrjmHbtrMR1qUNWwTQk2Dzvob374bKvWuZx9bP//U17/IZl4VeZHaru7HwOntu/uXL1x46uozL77wDTZpaPJuL3EYKZZ6VtU+XN45byxPDmZ1ZShWTz516fDBfO/22+vJIzo2lZ+AgJjFhTOb07wCwwimqWAhs06UhiQr/SLN+obccG3gal2v9epqdm7t4u7BW2nScwQ261jbn849oto+vzY5nsRRlHb7r9+6/4WvvHIychoNIHgOpAgVaauC8yYyPhBadIQQpaPdu7df/jWb0mKKnXTY7Y7qce7K0bWn0kVe+aJ0BTeVobpT56MIUyWcdXRj9enxybyohms783HeO9NbVKfT2Wzj7NZw0L/z+v3NtL97uIiTLku+0R9YMXUtWlkXXNHkaSyb3bVFEzY2k/l8qrTuxMYDNUWjVAI+qRvfiWxkofG5Qp3ZdBYSVyeNS5HQiM7zMnPlcK27e3CyudHbHPRmdiI6Asb54cHaTpp0N48XpSYrGJI0xqDu3j6kCDZ3BlUoQrCijNVxktJJXuV+wUaJyqR2RLaqWEgDovNWAjekz9nuoBfPazqzc/aN1/YXCxUn0dGk7KSakEOMQmFzfSBFU1ZVkp4F1m/cPjRxuHJurVgc+2q5ehBCAERtdWBufBDPijCydlnUTtoLsw+ti6oJoW7qwGCMsVoDcxBGCHWAmlk8l40nTb5xIKRJKa3aO5Ii1V6ikdVJHGuFwhJ88D444SChqGsMohCIUBMBoU47kdakVBAhBkVKaXJNEJDA3Ob6AIjWWiEJgFLae/a+7apnIeAQhMV5F4IPAhSghqCUccG3bwMiokFpHZHSut0mEEFhCURolQ0cuM07W8JsWoqhAAiJQOBh+wMLBEaBFSFCZkEJSiuR0D5QZZmN18oohRAVtY1tClcknRCWSw2tNTpw21DQNr4Bs1IKcJmy124SIXGruCQi0IjMQTyHUAc0lgiVMiFwYAbh4L0EDoiNeEPt0751EmPgIEACEELreyYitZRxArQ3WR8ktNxLQGuDhCAYODjvnPftGgwRSetvY5TAgbmlMy3y8qQYiJAcePZ+Ga/dLsWIOO9EgJkVktHKEBFiQGyccz60XuRWAQYgHIJWFDQKYBOkccGzb5rGEAUG0opdiBIbGtGaEBQAGGtYWJNpn7TeL68ClcTCFMdZUc1JCQFmyRkMWWRVSmJFxkWRJDpWkqT60mZ2ehAYuidHHBO88cqb+3enz33k0zbrXH3++f/8v/2L/+v/8T//6Z/7UxfWB5OTKukQMXX6aV1P1zZTOt1fnByMRm7v5qip5OD+YrL30pOPbpxd73zkk0//k1/95mLEbl4cN7cn853QqS89eWG6f2fv+NiM8qguuhGdWX90dFR0h/Grb+ylRp+9OPyv/vgP/OIXb3/59dt1GRgyDpAvXNaNJEAv6+RlWOSFiVTa6zondV2HpgjMNrJN4yJqiEOTN4OsU9aePBulYnaJ84+t4/NPTN//5z+x6NWTG98aVP7kNTCPsbl45sKFx+ztpoO5vnDV9NcieKaEe9KcqME54Ci4RMcheXcDap3hjPNvSwJw9+7xjZOD4/CzP/4Tf/f/+kUVRYu6qXywcdTRiRMpplWWxoKu2+sFpaLUVK7GgL0kEiRpapMQKxTUTRUMiDWmacAarYGiLKobz+yYUWmpmtKCe/bKWdTxq6/fdqEpvCuEGIMR6Vvb6fWqxtcuFLlXVhOIjlGzzKsyivHsdvpgrypdQI2gUESSOFMBd6fR3ivywl03wN0PvXf++FX3ic9clHm+diVbjA49++IE3Gun5z5wcfCu+uIfwI594ZXf/NfXGE78aPLqF84+8qk//9/8mRGFv/wL//0L/+jv/frf/Mdn17bObqmbD3IJTZKRiaMosrnGMeDZDUtNUc2lt9Y1MWkNMvGxkmuPPPFLv/71/bvTrN/56Gffo6Q4vHN3+6IZl74JamvnwniyPx+PytMiyrpOx73HnvrUz/6xL/7yPz97JjsY85lHL/cuni19F04bIr56fmNvdBpOlw1o3rmkG4NIFGlpSIK9vXuY5+69Tzy9FpkHu3dYbDHHwUbSTeM0TVxVMuGrr70eRdFTTzweddON7X6Rl2xIGDqdfqRUWdVpR0eGbBqP9g7XOhcnk2BiZa317CfzI1/kWSfWGmf5FLlGVHXBiqKZW5zdvFAsRoupA0bjcZgk7372sX5nfXp8opXNF7N6yk9funL45sFWNztNaoc03BqWk9nx3m7pGmtsmqY2MtwUQYhrP1m8qaBM454mKvO8bCpN1qkqHfZ6bnhwMM5s98JOcrh3MEjT2WIy6G+HxlXzsavnpGG2aKbj+U//1M/4qAaaJxmXxSzKkjjuvpXvvetTj9Wa+s9e/ehTZ1773XtSHn/lN//P1HhtgkuT27ePxkfHfjwPErYubd7evwsNUPBnt9aOJsXkdNZUDqxKo65Fr0EhQhLjjVvf1U6MVRWrl773tqmaqMKjO4eD4XbcyJf+zVdeffWtbG1Np3D52oXhuc2teFNoLcwOtro0Pjl47yc/XtZNNS/vnR7117seVKe/PhtNs2SjMLN5eVwsRr1ep2Oj2XyiArKfjme7DehOaqbj0Tyfnjl3CTxoVx0c7z729NmXv3Hz1ptf2dh55MKFs6PZfdfYKq8V6s7G9mxeGpM9+p53z2b7e289ONk/vnD5WuPzbhx7L2VZc1WnA2TH0lQkIe6kvX4yLTs2zrq9+WR0f+NMN9Lu7ZNjHafNoV27tFN0pt/87gsnhw+2rZnPwnd3D/7Ax7rrW90MaDabGzSdpNNN+3m+V9WT/dO3PczHRycE3fGp63TX2AcTeHpyZ7a4u7l+yeWF6jBgyCIqH+x+9JPP/MGf+9M3X7nxD//uP00z3rt1q70KTqfz3qDTsYkHV9Yl1CEABvSEtppXoH3BJ49dXidN81nwQSzRZDSrFrlF1Ab7KZyeTlRInIMQnO1YHUvaRRUCBAgeIEhENq+nWnwURYRpapXSzXgxB5EkslE3GheOhZom+OCM0QpRGep1406M4uvdO+OiZNcwWYMkBEwq9GNjNHEQbaD2Ki8bUjqKDRKAr1xgcI5YKZU6wlogitNIaaut0hZQn85PGSEYyAViGxOacj6qy9y1kc8KXIAgXjNpFBSoyrKdi5KihrxT3BrGvWOtgCpIlOZID7TZ6He01Ys8b6o6eO+CUAjBOSKJoogwEBOKpCbW6LVSgKRQhyBa6ZobRVprq0xk7YAA2S2s1mBt4VnHmrwr6oadeBEdJag9B6dBGRSvKAA17BsJwoE1NCBGqYAcQLQxpEwvG0TBVaUrXCXMURJrUBwEURmtKu+VVggBSBTqvJwqh8KsAYSAFAVmz0QAVROiKNGgMDKVhMoHQ5pZfOmsiZCodEFRJEEvZhU0wTM35DJrQNUOKpOaAOA8euesEZcgGfDeFTUnJnXlKYepjWLhwsnE89yHkoMzyIJlEFSgSRlfla5u6rJi1FqRKGnKQuqmbCBXqiOqdtMo0V5YKvSuRh+wknI0Z2IS9KjEB8jrFTQBaHXjy6RhfEh1HubNtOuFDx1FiAAEq0quZSTMCiK1XidpGchDb5RIq1JfGr1aU5UIt225sOwve4cr4UpzgwLQinSYUZb+rKUECpeRONCWc7VMqYUrS9DRgsbl+SvLXQAAoe9rc4OVQ2slZVl5laCNBlpGIrcvsNzCE/bLICdElhbkiGrtUcvfIwRqPQQrcRAsM6jbHhlaqnlEAJGX9rlWhsWttojf0cJzu+mtL+H7QoLat3VaHnlQ+HC20GqxpM0kIkRc1ogtcVSrFIJlJklbx0YrHVbLZh4qrxBpGQzFAAK81PcsVUgiCAyE1M5k8PsMbtiOjODDXKZ3WNPDqRbI0oYBKCiolkFRrdoK2zNBlrImFpHWi/bOiQXAraisRXUiS4+awKpxaGWta3+01YXx0ju4+m8V3KVwMluUc7e+tT49POxe3oq3Bq+/8WA4WHvk8WuD9X7WUS9/596VC/0yn9WnVdKP4qFZ306M3mTjDyYjzoueMCn1/ucuDjZ6IZgv/P4Ln/nMpxant+ezcj7Pe2k8a/KLFzcjYyeLenrv6O03DgD4/R9+Pl7Pcn9MlJf5jc0za3eOb6wNtqbj+xEWcba2sf5IOT+xHIcmPzg66vR70pjTQ/fmzdOL6+tapJ4dPP38YHMzu/XWza996x5wApoDuOu3F0XYytLO/sGx4UXSjZsqcMV5IcGHg5PpmfUEGNNhNmtg7whCtPb8B89ZXTpXdpROsk6xP50XU4K01x1MFh5USKJuVY9AlG/EKaWIjWMNJWbeSZiUucn6CDqO+/dO62efuCDF0Tyvg3MxRZD2/uWvfOuFtw6qio2NkR1AaBV/IMBNEEHnAIAF/Xvef20tzf/sj27CwdeuXXlqllMV62sf3jnZHzXNrG4gNIlJ1rPeBbLbCJuj0eTBwX7VzJv5zHnS1g4obihK1mPWHk2UpH0U7CTST6PTwyMELB0D4tl+H/Lae29QIEwG/RSpKp0E4LqpbSK9dTuZHgGQQu53Nmx20Y1PJovDOJEk0YrJ+3o2rxi3L1y89Pb9t0A3SUcTV8Ll1lYq4gyEK1cu5gLFrEg6mcFQFo0xaufs9unpLEgTXL21xibV5zY1eGWMFR3VZRUaIC86mNR0Ttgjlt3EeowbRmiqmbAmUGG2f/Rq3ZwK8M1b+SJXznmPgHGn24+Dm/t8kXZsN9X7B7M6BN8opTudTlIV5cHuvN8Zpn0L8AYsLyhRAByYOdTex9aICDNoRAXEEES4Rb3eNyF4z6K0ZhEkcs4ZRT449l6BMso49m1MnVEqtoYEvPeR0UiakKxWkdEA4MUro10IzMH5hjkoQBRKTAwgIfjI6GVxGDNqXQcHXlqiobUx2lJ75yVGgBC8c540KWEECgxlVRhtA0jtnDALkPMhBFYKBQXJxTZBUpo0CHJgx0u2HzhoTSgYQvA++NCQNoioyRCiAlSAQdizU0JBWFYcPIgQICGyBBD2woAgfumkFdJBPAqSEAAYYxBAawVL6k+BUTAs1xcQuX3oEWkywhI4ECkA9ty0AiJc3WNbVSchKW0CE3vfXltt5BAzt4E8zA7ajDqQEBwTeFaAKEB1AAUMzAIVoWJhTbpdMQoSAjOQqgMjgIgDQmEwSgMAQ5um5Jz3AQQErNaklID44EMIEgQJvbAPy3jsOgQOgkigCDgEZq20NQYAfHDON8oYJAzMJMzOUZtZJYFolVSIyCAKMU0SApAWaXsGXwRm13jRqIiqqk7jpK69CGoiZpHglTFKKWSuAwcfHroG66KOtFnMCkQKDMwUx4O6qJumJgomUZUyRxVWqlkzcPv4dDi4WFt6/0c+8tZ3Xnz1+qtZujZpZmvb8a2XXiY9vrm/9+r1o+cfpc4gIsPoZDadzseTzbXm7psv3dq9+8i7Lr768t6xA2+ZM3XreNTtnXv2uU8+/6a+/9qL3YHZPTyYj97uxIO1zfP19HS40dRl2OoPqyr/zhuTXpZpOy0Kzjp88xt3enH6V/+zP/O7X33hb/+fv1aWIUo6yFCVlVUUfNGNo8Cste7Eke7hfDyvyyLqRDZKx3lZOQaPimOqpFe4jhTb3epTn06vPmPe/ZHYnCPfuZXjs+bdf3QCOX7oNOAigYHj043L0u92FyAKet4HVE9bSw664L2JZDH6orKl7pwleK/ngd48xU39I39143/+zI3fuf7bKuo577wPymgdWd+EKIlV0k17NrNQFvDxj37ozdduQtQ5vPv2o4+d3Z81ZzN9tH9KlbeW077RhPuj+YeefmI+npw/e/HbL90MQHEWl3XZeO98uPTU9t1JcevmA0V4PKq6vaTTTX1dV94vigabk9gYAS7qRhvsRCYvG26qLDNB8YNJXTHayNYBSClNXFeVCFbaIxIGNXXxgxdqfsH908/f/MHn9A/96FZ/s7f9xKAT14vJmG2emnz47KDh2Qc+sgNl+LH86t/7a1/6/X/65hX8yJTi3/pb//kPP/fo2/fu3Zvx1pWzx/WcY+xEVEmQeW0S2knx/le/9eyTVwrAybHPznR6nc5ieny8f++7L3zp1mQKaZxofef26WOPPGcE41jXOimn2eWn3z39/ePtzf54NNW2iXqDg0N36fmP+3/+LxDCzvbAGt2UlQRY61uN4eDoeDxtPK0WzxbNvJyaOIojoxL1xo23DvcPfvhTn3z22SsnJ3NnKN9ce+zZqHBNFUI1mww2+z7wpZ2LOtLnrl06PhqV87IuatvvOsLj+TwmLQGhkp5Oszg9DdXbd9945NGrb3zvzcH6NhMbpbygEDTMwSulukdHUwa5fGX7u9df7Kcb4BoiWdtYUxIOTo4jbX2Za1JSBw1WMweqtna6L998tX/x0vGdPSSMpPZ1EyVxknYm49Mki7JODxDzfKIjA9oEz1VVRHES2ZiZAGNBXYr0+muuqtBwr9cnkFj1q7psqirpxDpWPtRxRx+P3MHoepTa0Uk5PZpqA/Pp/MG91y6fr03n9PVvLc4+857X32iCSg4Xrirx8ecfuXf9bpUXV5686rXNxzMQOTopqsKdHk873c7xvBIVxVFMAE0IJLHRtQ9h/+gkiokBEdXpwcQpCN59+ENPDdd6a/2438lms+KR5y7cnZ7sXDwfMPzu57/2X3/qRzfXu/N789945Uvvff+1CunFr3+zN9gMrAUbENXJ1kRQvBtPDkj79Y3t2Ka5d2nfoF2fcz04u6FRHT844EFiutEw2a6mp82CocYmj48PQxPg5u7drWvvOb63Oz86Wt/uZf0simJldUxR0zTH9w+zjjl35eqVZ9/1W7/12ztrvaxKtTU21ijUT/vHx0cMfvvyzsnB/lq/F6GfL45QQ9wbLio/945M2t/c+tDjTx3ufjfd6n/s0z+AtnP61tuDoXrmcufN3VvP9ocXOkY0qcAqM/PigASbhp3LKWk8l5aSjTM7RSnMi3w0JSzj7vaDgxNw0/PZxuVHr3ztNz6/KJtb92Yv3sjPPv6Jf/evffDn//Kfrf2ivQpIqboOJyeTTj/W2jWhkUhRTKP5PAvaaokHTKY+nUhdx5qAWSZ5XpVFhAYrmFXi2DYOlFbG6DiOTCTOs1EqUqp0VQjBBUdEgbXpRAF0HeoEgUAxMwDl86qTZY2EZjHrWpLgoSxMkhqt86Lyrm5KzAtvDGZJrJHY10pZIhRBUlC7RpSK0qhxPolMNyJUEJpaiwKkwAG8pyDaRlYp5wIqqprGaIpj0yWllErShJ0bDIdHTakB+92kJqQmKEQRbkrHAqB1QNConHd54wMgCCsiFevADEo3gBXoGYNfVL0kjpNet6t98LNqwVXV1laUvkwURVZHCCR1RBTqwtik1TR7NHHSJ22UjoRQIyIHrQ2zgKJkvYMi5O3CeVFo4hQBtWgNcQhsRKE2Zc3K+dJXpBUbCizC4poStVpUpbIZRCGKCCkBpwP7BphQubrx3kU6QYyUClVTCYuQAtKeg7Bi74lQITESk7akGLgoqkp7q7MstRZ0qEsXqtKVGQozFgGhEnKltaRIkCDtxLZj61ALl4mSqq4iyjiEWgqqG9NPxIhSDngKONcYGl/NpZ75adWEICpSqMkYgIaLumkC+rqpy6oqSyeouQ5NE+pSghOfk9VqO7UuL6lD1FQ6YJZEsxln3Wx9e/14dMog1hilrRb1znx+1SxGCuGho+khSKIlMYCVT2sp52hFQEt1CooI0Tt/s5qYw1JRsqpvX0ZPAwKLIONDSvN96UgoSw/W0qvVCkm+XxO01DhBm0AkIojLSqslcFqyjna7ZOX5evjpd/Q0rRNs1eaF73zzkpa9QxSk3eSVWYyX4qm2FGypS1oCJ0RYdaK1ahoBbGfD9FB6tIyYXu1+G4TasiJmlGVis6ysVkvFDK+0QvKwgw7oIa1rmR6HlUhsGc8Kq+8CweWALWcVS2seLqOFcNl01gY1Ca70ZqtIoxUdE6FlQJDwUtjTCsaAHsqQcMl0VoKvlbxr9Y/LAW4HQ5Y5UksKuMRqvJKKLbkQBgGA9nvb5XhYZmy0SqyWSLXJRtBmNgks46t5pSAiWUX2rkRO34eKusOtfp+jC+c9K+dknvPu/v6LN3b/6E9/eKMrk8mBn49/4IMXEYtg1/aL6f7xadFoW85+6OPvEZV97dV7yfp6EkUP9h5c4trVp6aOHlnf+MLnfuvRa2eef8+zp8cns+PpeOLyYhZrHHR6Tz37yPU3jqaz/OtvPjg/0x/61EVTl+XNWT6RHsaxKOx2gne+KI7KG8Zqr0RbS7EJGibH09feGj/1yPuqiXKTyTRUJ2XuxtyTnlnMtwd9n7q0o+u5Lkcn06ZR/fjs2no+r/cnh1cv7cheJU4eHOZELJU7d+bM2mBTo2YVV8APHhwMs83uWve1N2721jaKRZmkCkHSLGVueknvpDwhRWsbF09mfjE/saFGLdkwu3frXl3WmztDDkySPXruqWbuuapA04M78+Cyb12//q037npvtAIJPgRWERkN1hgOzMxR14oKT+7I3/grn/3oB86+8MUvnNx+c2bVWj8NebMY7zpDgySC7lZ37dzuLL3+RvP177xxvPd1CXqwnl24vH3+8tZ0dlAXvpyLc3BaHpy/dJZsN0sG9fjg8N5Rf6ieeurCq6/spevD3LnGyUlRdAHTOF3vbN4/udntWx/Aqt7dvdO8MbHxwEoBGZsO1o1bJKezepBudQ1Oiwdrg43p6SLieHO4c/feQhnT7/YUFMj3sk6XIT8dz7rxoJjVSD2KzSg/eXQr2zprD94+jTgOzYKbksEUi0Un0YLkZvNIsRJyIInJNISqKAPa3aMTTZ3UBoLQsf3Twkhgz5h2beDmaDrppaRITaYT1MpS7auFodiDYTKIiYCdzfIkS9GDDn738N6Vc+eKWRAdx53O8WjWXgXeB0AqykpCYBZC0qS00syBWVzwvKQ17L3z3gtIbIzRGEIIEoIIewABa4wCpbSunFOigDAyigRQJNKaAFjYWCOETXCwJMQcOCAKBI6IrCIFYKwGEABDhAKotAFU/DBth9AoRaSssm1inFEaQFhYkyitPaJrGmI0xjqWumlEWj7tSStEEhEOoFG1KXKtbc07J8JtvTopNAKC6LxzPhCS1lYAVBtot7qFIyIvrWRB4bIt1HMQ4FauiUgM4plD8JqoDbQjJEJEJK00oihFrUFLZPnkA0RsWzZZSIAIUSAgK0JA4RCWsYDt45AUs0elCYRFCAkgtHJcjUCE3vvAAiTMbaA1c2CANmKS0yhTWjECM/vAKyszcxD2HoQJCZbFDK2xDonIKNP2SnofAocA0jQuBK+Ujmy0EpVCYBAJiMjehxAEpKprH1iQ2ixCYCZud9y1otyWITW+jbrCQAIBImMQgIGVJmQMLIE9ESltFAmwECECCKG12gNrY2rnI4ssWNYOEMuqYmZttNamKMsgiIS1C01RrZZgoCmcUehRlI3qeh5CIEQOC0++hEI6zSxxbpF7V6SddHutF8eu8Ic3Dt+6Ob3345/69Nd++5v/6n/8Nz/+kz80u3/w2kv3fvyHf/Sn/uTPwLQ0WsWJmc5nEJok5aP9u//mX35ulsPk9d39afNgIrVzislH5re/df/9n4w/9ak//T99+StnttLHz52Bss6rqeNmVJQAJopUbM1gc3P/pJkczGPlFnmuIgDg46z+3Rd+4wd/6AcOFh/4+7/0AqoAAq4JKotPx4vElp1OdPXRi0Fgb/eWhCKOkeuGnSROupG1yimQuB5f21l89g9tPP3pK8OnCGLH9S03Y5NOU1irj9fi7JzA7cBvxp2DqDldzPedBxU2zcYnUG8pkAADboB8UDZnPY87aeFnqdQkPQFfwGTr2jaXryama3tYAeIaDTbXyIXRafnJz3zq2rn3Xrt29n/4f//FJIrefvONan78l/+zv5hJeOXNl3/hc7+/c+4RqIPein1dTedNbPDxS/2S5dzlR5++9szu/snxohxXtQ6hF2ur9HiiF3ldFhLF1B8OrFEiClUUSg4sw25iCJRvNoYREhTzYqsbbWwPbu/PqmC7SZZGVdM4G8enc5dp1R/Etw/GqLANkTeAilVM0ZtvhZND+tzv7b/vUfyhT9bXnkjOPL4mneDBNt1eUaqiPkJX2l73p/+Lpz/4cf63/+Y71Zx/9n1X873rf/SHrlI0fP0b100UVKLOrPdC6a7fOHnsye1sEBeV1jVBdTQ9neJsMGrU5O3Dgwd7aWqvPf3k3ZM8HnR02qvzxvhyunuImywBTk/rM5evaLuog57eP8p6M7YmmLU/8NlPf/5X/9X5C+cx0liH4GteFJ5cOauBdVUsV5Ijiyw8Hk8b9Hf3br37mef/xM/+wfmsHh2MjCUi7atJN9Zpv3d/915R1x1bD4bZ5vn1+Wyxd+8+gtGo036SpsmkzgPwbDaLdBJFnVleNWERd8599Xc/3+2d2d65UDfe5XUhWhEkadT4IEq7ms5unpnXZS301CPPXb/x3Q8//ewMEHUSgmxt9ZqmHo1OgpjB+oWoOK6nx42vt7cG12/cudrdWN/eSuKkyssk1lkW1aFK+kkn6XlXNl7iOI6j7qIolQVA8jVEuktWpvMRaXG1RGTTNF4sjrvJoPEhcLQoF0A2UtHh6Z6NYiBKTe+1V77R3RiE2qEoAEytufnaK//R3/xkusXDURrp3nY3DK5enC54f28ewJ9/dOPmy7uHe9P9k4lydZqYxZ3D0Pj1XtcjoliT9EAQyijrJd1IjeYPxDujyVoVxenB/qntx+97/7PlfM4qub/vxvdO59PxwXG99ei62MGDw8VEx3/97/z87VffpPmhm09/4mc+fG/voLs2ZFBZv9/b2ZydHHs2J0cnZJv++tDnhTLaRhGKWC1NXfkA7EKwZna6n+g4lEpFNoo7oSgaKeNudjV98vh494lnHrl3Z+znd85funB7etKJuo2wouCLqZgeSsiSIL6+f//w2rOPfegHPuycnx5O0pR6/RSD1NWcROqymo4mSTJgjxwcQFjf2s7Ho9PJSZr14zNr3lXT00nwHaV1HCXGEp+98Nr33sgz9ZXv3Hzk8sdGJ6HfJ/YVK5pPq153rduz+Xy+vnZFHZ/WBYNXNrau3u1vdcejau79+XM7i93ZePdgrsSlEJ3pvPXg4A9fvXB0b297ff0v/bX/5P/4X/8WwAgAlGA5rVAxClbUxAOawMIDR71UsZ2Xx1uXbGeQ3T2catMpZlNWSdQHxaaqFTsczxYUTCfTWaRMrJOUFvNcRJdNBUo1ofY+2HhgExsIVSdVwbmSJ7OybKDwgeoqjrIueVcWcaY1qbyoPfuiXKicnIP5tEbgwD7SsQNFhFonCpFUAEIEr4VAGEipKPK+yUs2ZHEZZUjOe1czosmbXNKYg5pW86oKkdVYO2p8mqAJjS+asS9r10Sp9QEVqjQ1dVWD58gaJ0JaMYJGzY1XClVARUZr0ggIigC9ExSuAwCCcc6J58gM19Y70borKy++KHNflUJcOW8NBPY1cxJ3hCWKLImXgMRBiyEWY2xT5cDOCds0jYed4WNPRMlwNFpMb9xcjEdeNcisxGmlUAkxktYJAWptdIw2KYFQq1BW5BoXnA0BfF4ufK/bSdKEJVVkm6YkQK0a8FDUvvYB2ROZNjEyhCDBazRGW02ktBYCZueYgT0ppTWFqhGjKNRlUykUo1TlmjiJ81FBJWHjB9kwUBkbrTLLSikhbTuoODUgdcNNQZHYyHh0Tkok433Z5JNemhmT+CZ4dnlwECprU9LWe+dAMUDV+OAjxKSbSpVL42AxC6HhhCIFSubFtBx37WDv5MFatl7P8poWg8Ggxnx9rUblTg+dFYuREreKaVkKeRBaXtHyHVyiAllm4rT/jqt6exBcpiavCtNX2GlpHJKHTrKHWOBhcvRDv1Y7jcdV+dey2v0dcRHy0mKklnIngIcQa8l75CHhaW1q7Wv7Uv1EiIxLX5Igw+obVg1kbT29LDN/Vvqi1X4sDXfwMN1nmb8sbRx1q6LCpdIfBZmXSUIoQC0KW22nYJsZBAiMgIwIbX+wMDAC0kNrGLaAZBUghAEAeOW6kzYhBEEY2+41XA1Uq7ZhAGyDfh7ysmW2KgHAyvTXSoRWmA5JHoqKVrCpBUbtYCxNDowtbFtSGWBmbDOJBETwoXsQ3xmftlnuITt6eI68E16FK8kQtLMmgTYmajX3WUWkLylfe5pIWCV5iwijrHRFsszjltXwtcPQrsnDypUHy8+942n8flQ07GaPPXbp1ks3J/ncjQ/iKFalPH3t2UbSBgtUACG8+vqbqkNJt3fuXO/S+TMG6Oh47+bxeC2Jnnvk8dnp3mA9vXA+a2o/OWzmp8eD4c562r23d3Tz4EUb4JHz64lVnT6OTmdzmAcVuptx2tX3T8f371VvfONB14SjI4rGHPcSOphvnB2AeLFuMMx233rbZlEvTbZ6a4t589bLe+NRpyO+qeYyOTaNevLyo7dv3Yuz7ofe9Umr8oU+2DmXvfytcVHR8UmOCW1l3WvnH8korooiNZxENCrrtZ1Nzrkqquno7Q+8/ykw0qEok+27b9+sF8dpJ/JNE0Vxr7M2nUwojjnQyei4btj7xkEeAJLUxqkN4mMj6z2rld/eHM7HmM8brvaTyI6Oj2upmSJJB7/zhc/ZOCElpBhJBQ9GkW84EIlAorHP/ur5/t/7ax+ey41vfvGb/fPnkvVnqtn+vQd34sQkg0EqG2/f7/yz37jz6v3rJzMYDrKL57Z3Ll8khvWtNEt9WSwmx6c2GdokKasm68RZnM3HI8tczKdZ1okhWpycuqawCjk0rl6wi9JOjwLn+YSYQ8UAwVfH5zeHFiKMbRSp0dEsMbpplKAdrm9xk+fzMkljY6MozmLV8VxursXTxahrfFlxmY/npH0otjfOQkN5yCPNmmvtnMvrcqo1WlTmYP9Ii0nS3uFsbJRXKmhNSkE+myVrcQjomoZUQojsiqzTc/UBu9qx5tIaFdgr33ggthEkHcOhjthHqambMNk/Rth0FVodttfXvARWUJTjqsx7WfTup880ZWE2tUIoi3FnsHTmI6IXVlpxYB88EDKHJnhCYPYtetVaiwgSAalImcREWmkHTWBQRM57pSg2VpgJyRCBImWU1UoTiQ9KkQADKUNESqFIW3aGiHESO+/axsLYaB88AihtmFkR+dAa3Ja5OSzMLFZlKCLsBaTdWi8sSIrAGAsMDVdGa2Fhx0SiIsOBFZFCQiSi5bKJZwdAIohM0lpskZg9CQEzIoFAbO3St8zAwm24UggQhFsA5YMDkQDIQQhbFSaCakPo2s6x1swlXoJWGoARyCoN4pHIe0eIbTqUAmRpccmqUAEJkUSC0cYHDwKESgCYvYAQKR+8IgzBL9k8IiC2umgBYEQiBcgeIICgQAhBK8VB2ifH960fCBFJG30HwOADe41ECFprBhFgIkKlWRBQE3Jg732DCIEZQFBR6ztrTWSkNa6IfuO8D8FJcD445xlBKSOonKtJBICVQgYDIIRUu6YtUiOtCJS12mhFgCJEhETKe2ZmbZQCJBBjTWDPPgCAiYyGQBUiEyCxiKtq5wMABBEgsVpIK9dwUzWu8c77h7mGmfU2zLNYT+vGGuUobtizjirTK82iCIWN8qiADZOFnMuYhymEZnHzte8YBS/8/uch4I984rlysnftyXe9710funnr5t6br3Wj1KgkypLpeF4eHnSjmTu6gVBunkum83L7fL9Qvihd41xk0bJaX9+6duXqlSuPTMd3LBGQU9YK+MikZV1eOL+Rj6a+xqIIppMaXYVqLoagCRnj977ygs2PntzcvLZmD4tQSyCDXsooUwRNU1aTkzvzxh+fThSqzEQ9ocSFiBZDqIfG72zBe3+g++QPX9n4gcseyLkKYE1HA7359Xo0xfQwTp/RUSowNSoJ/IaOe72z1wQOm73j8vSXOuuPe1jz8JyyZ9BSBbXqfaCCY9JDz/vB3yOoLeiXfvf1UGtiKKciqCKTdqPeyel+HNmj/Zu/8WufP3/xQiOqWnjta8oPu4svPvXuZz7wwad/8EMbX/g3v/8bX79jzuxonWrM5qdFvx9NDufF7WLDDSK2vpzZGg13fvJnf3zv8OTG7ZMkmrhUefGIajEptGqUFaPVoJN4DCejWaq0d2gBdgjPcvFYFl+72rn+9mwj8pEOo0VRmWq7q6say5yHSSQK68oFAFBQs/eOTaImQVCi37/OX74xS83pu54wH/n4+gc/cb63kWR96/y0rkbCjHL46POGT8NbN/ze4XVLcGNvpCL42I+fb76ZfPvl+f698VYWbye6WkwcTufzzhv359ceDZfT/ObX3xiN8ZGr564+d+nGrTtNNR/vTZ/ZGUQhjB8c0qY63hv1HrmUSZgdnaKCvh3sXBjefOn6wd4Xn/zYB2tTXnvuafzcr+/vnWxt2RAcoz6+fz/rpsH5JDFJL4a37gPA6cmoKcv9B+PHn7v4J3/uj9SnwTtRkTGSgYZFEw527ye9tSzSg87w2qW1b337JQ++qBtjoji2WZrNZ7OqqgpXxEnc0b3a6CSOk8Q0oZFQb29tP/vkM4cHB+fPbVRF5Wss0GvL0ARtlK+aeu43zwzXBunt/ftBIQplnT5gJKAOD46TTpcII6W8qEU14qZ0dVMt8o217vhMms8PtzfP3Hj73mOPXepTtH94Jxt0ARD9uHGFA8mwy8FZnSaRrRFRyPtCmLUhpWyc6OnRcVMyaJhNZ4ERtE/STGrJqypN+yiChFvnthZzV+YzFkZ0x/d3X73+5pz0537j3vueOZvE6sHxbg5DE/eY8f6d2/feqtBXnazb7e989frrO2taPPXW1o/3jqtalNYnxxOV1HGiL2xsv7331jgsjMKybAzRpAyJLoxzicoubW5Mu/rXf+1LH//4Z7t1Hzm8cuvFWAaPPvPIje++2OtsLxYniZTim8l83FvfqmbloL955bnHb986Gk/KLOrHtre3eLsTpRSC0cYkdj7N44Rsr5el/Xy/2dwaBF/Y7Y5S9tbte6mK08K50gOR7XW6qZ7OTkVXGvwr3/jGu3/gk+ubmyfjJuum5BCBy3J66drZOy+/df36mx/95Cff+u4bg60dLX7QjchYJ+KrSiuxqbbZmlGxNUmcxCphhaC03z15c3N9M8r6+cxtbm+ISWanPfDV7s2TTipREm3vDE98/rHP/Nin//CfffnWq5g/yPcPzpxf21hfc56CItsfsE7H+e70uGpOoLfZAaB8MXv7+g1fN5fPDXNX/PNf/dWLTz5x++Z9XxdJin/jL/+l//K/+q9MdZd58aEfeg/85h0AKIrGNywKhKqop0xExlQ2VlURvHMqXTPD7OCkqus0FJUrK0GPUSSMjp0iSlOrHMYI3V5UO1+zN5HWjpWSELw2FNvYMyOBtTpAcBA8sUo05AIhkDZCwfumccEHhliBUo0PsdGLRYWk4izO85lACBx8U4lWESCQWG0QlRNfh2BAkUDwtbCvhA6reWzBMFVNhWRAqK4LjC169J4r5xlUWVTOBV83TeOCroaD1NV1lmRVVdVVGVDXRBLYN14piowWgIadMWhindmsqJpI241BL4mVKyuliEV57xlCXXpufJV7TTAtcnQSRXGUpnZoABzWZagr8iW7ktEFrQxgVRbKWG3J1Q4bJB081YqU9947ZyKdZmm6fg6iLauc3g9u4powiTQF8HnwQdga7QM4z4wAhKSVAS2kTWwg8q4pgq8NMbAT8UqcYnZ1owjjOE4jwwF17TFfiLB48Z4RkEVrpbXShCAcasegSJOqG6+JaucbB3Gi595FhoyOlAJxlVWKa05AB4IoiZQWMoqMck2lhY2hwI2vm0jZgJXpUtQxSVeLngMHbkLVOO+MdRJrMkhVMYfgtIEiXwQtQBFqU1czgyYxBpE1ivGVY1XmKiiAsuQKwXtRJkDVzVKGpnZNLxsEUHE2nI0X3ThyiSmKgJ4LVz8kRSJt81QbqkMCTK23CKQNzFzqRB5igKX25vtkOu+ogx6SgaXQZeWxWulHZIlCWk2JIEsbJYnL5NTlB5ZUY0mtsBXdLKUrD8O1ZRnygCvpD642CZdmL5bQmsEEEIHa5dVleg4HWPncaCmueqijaV1TuCIPbSNZaOU3rfXuHewg3x/p0y4tByRSy0xmZKEWXSBhK1RqaVr7Fk3t/shDjc0ydwiknU+0IUG0hGGyol6rzZWHUqgl5GszjZZjsAyUQmqBFwHJMgypzf7GZb3REom1/08AS662HKjW0CWrsjJe4cTWgriCae8c+4ceuNX4t6fXUnMFy+KylSVsyYdWB/ShUQ5a5rfcCUTmNqODlhHo7SeX7jV5+P24PBTtEMr3n4zLc0aW8G6FAL8PFR3endQTqDtbT//Mpy5+78v/4H/5vNLrnfXO+MFeEui9733k+O6N3tagFJ8SphrWN3fyWfygu3E62Zfy+Ic/9fz9e2E8O+530plKUtup528N+jYAfuSR994/qe/c2L9xc1oXsyceXV8bDkHw4MFJ8Lw26Jy5tFEtFnkON++PRWXPXHh8WlTTB7c3ty7FfZWfHI7L416SIlKSbX/rWzcne9Uwewp7ph9gIfn6ucH9/ZPrr1yfNBnn9NjW+mJUNmAg1hvdpN9TdaBpGfb3x0VePnrh4tXLZ7/wW1+BptwaxDsb2SEsTAIppX6SF26yMIYbG3cj3ZEoSYEVez/NZwyUL8o4jSVgHHUb9HVVJ53IOFXMIbL9t154W8SqPs7neZB+MCpJqW7mnY3u/MClm8Nf/OXfjSPLbRYyg1ZoTGSimLHBxHYi/6nnhp9+92P//F986Vd+8Rs/+GNb+7snw7XNMJsZqNfPRuK5KAZ/6b979cU3OEh/Z2fnqSd7jzyyEZPnOleRXTuTjvZ3bRKdvXy+KHFWjNG4pppDfWRVaBpMuumiCtNFdfWRywfjeVOUfaW21jqJ5SzRZZ4nqZ6WBQh5V/m6WM92iskU6sUE4crVp0/274Pm2fQ47nei2K6vDUaL8ujBYa97SUcgoYZGbw7XxzMxaUS2Gs0r9rlKTqcnC/QZh2L77ODKlfOj/dtnTeYs1KFc68ZF4zqpBtGGOI6jQbdT5WMJZAhNItOTvDvszIqprmotIQgmSdI0AIriNANrRSoMdRWa8TxYRSAdWyeBw1p2jlTPFXmiqBMnZdNQkvqyvrzRO9g/yrgqQrOYzHu93nC937uw3V4FZd1opQQxBFZEgbmu6uBCGicKxYOAgA+h8Y59sKSs0iRthBsapYGFkRQpz16TapoqBEYAo5TRmoRZSWQVoEJARRi8MzZRRhCRSNXOaSK0ur23EREicquyEdatAkeZuqnEMyBYG6GIUZoIQ2gD88Q7LwJKY3CgiJIkmeelJh3IZ3GCAt6HxBqi1u9KntkDI5DRioS9DyxBQJQAChMqEVDUNm9ZRFjU1XIRQBtp6T6hBAYAg8TCzKxIIUrgoJVSSpOm2otwgDbmGaCtDxCWAOygRgICBSiEqhVEggL2jEvXNKLSreXa6EiErVYSQmhtgixBBLxTSgmDVrrlXMKMgERKAJ13BKDJaiIIQZF41yCBAiBNqDS2DXfBo9GoiH1g9ss6PARjzBL2rJZVCFETCQC7GoRd8BxYQNqIIkWIwsJNuwsQoE1HEkDv2fngONTOB+8BiDkISAih8Z5FtMaqCW0DHSEpRZqMUiqOImutVbR8CCCwiAJSZNAoYVbYRhkJIoW6AQ4QgkIgYBKS1tTqRQA8c+Cm1eYGzxIYhJXWvMoqunf7wWM7j6ImDMEmWBVVhCwBPOrQYAw08MHVIlUloE+PphbQy/z04Hhtfdg/t7P1yJliOlrLkq9884t33zz9S3/uP3nlft1bkwuZfvVbr97ffeNTH3/vlcvpP/rXX1Ek89P8J3/2/7m5vfk//a1fPpyfMHIC2ur6dPdLcWwuX754aOff/ub3er2k39Hnty/s1ce1mKAv9gYFp+7Ke869+spLUFVPv/vKrZt7McHjlweno9FsUd2/9fpOTLOpCwJJQmLgdDY/00l6iZruj7WxG8puZpZc6IfZelL8yGfXn//YzuBKvHCjznOPequn3hu9HagLsBnDBOCON/taRxBOmtkkHp4I3Clmk2RwBiETALW9E6mztbtVT673NjsMGMKAuEuGLGQejpHmNuEcxl149PXvzrpntqoFUBJvnB1uDDYfv3bh80eLNNGDtXVwD6LGzCqezJtOR/7O3/7ZD376uabhJBo8tdV/6gNP//n/OvvO9178L/7iPyihv769ldoOUrrVHW5eufLy7o1uJ4v7nT/9h/74F7/yW02iZofHjcDRYXPxyYse+NxZOHgwIt0gOYqtAZ92sySK5gej82v42XdFP/aZd8n07q3ruz/+oc6gizs7G5PppPANOLz+2uKNB/X3Hsikih8sfM5EmYlTffCgTLsQalQay0a8xBnE97/hPvfVB0/+o8P3XKOPffrCu3/o0jCyIJyu9+vi6MIPZmffh5Px/OwzZ2/enKwNehsI229Nua4njenY+Cd+6hkw+6Drf/EvTnyytpMN7o2OY1K9QT9ZJ7tm8nvpwYPReBoW4zLL0qOjUaffT9eiOC7ITUxSHdw/dXMjwMpGx0fHt9/47vb5Ry6cXX/2Pe//+pe//dgFWnh/Mi76g0wiOzo+nc5KSm17FVy5ciEs/LkNufrc1fFJozgeHc17Wxt57Q9297ONnStXHh3PJ3W9QGiOD2ZKuXk+j5GKyTzbOSPBIzKiD1Vjk4gkJywFxDvCgMejI9+4c+c2X/nua7FWmqL1zX7N1DT5bLpQKsIGtra2bNI8OLq30e8OzyQ3vds9Pk511FsbXLhy9uR0jFRnqa4djKupK+skjYzt134hQU4PJ0PzoIHOeHDutCk7ma2qKlKGEZEjRGKmYpH3e526dqigLgoRDiBpnChS7EqQGsk68lZHTV5ZQUM0bab5Yp51sjObZ46Od8XUrqg6vZ4AnN7eX4ymr798t7+2/o9/8XsvXl584od+aHJ058SE7d381s2bo3LCjSKlO4Pqcn/wh/7IT375138p+GQymYjBvf2jyHTGi8XGZrcJi9cPvmbi2Nc+XR96htjaiPHoaP8Tn3jPpeH23Tu3zz/5eF8N/vWv/PIPPP/Iv/NnfnrKp4fjw7/1K7/6P/yl/2L9kY92MhFb3bv+lneer+w89r73j++Ndq8/SIYD7PTdfDaZHpJy0Fjf1KJdzZj0U0Xag5q5EOzQc5LF0TSfo4VrzzzZ6W584wu/fOXyM4yhYpCiSXr98eEh5/V4NiomexuXLs+mRTLIJAAzz0fHxTDt9taHwzNf/9q3zmxt+KaJU8rrhfdpbLTRlKYpkAKFnd7W5PjEZNZrrbkKEs5degJVmIxLpbsqOwM2in1CPH38g+ciLu7febE3VA/ePL539+Ctl36fYjsavy6nVd018YbFyMwmTTY4d//BwbSS17539/zVTMfFzTfeeum1780OT3/qxz506cL2zddufejpSyywiONbk5mN41dfvPnf/bm/9P/9q386GWrnlnOE3nrPOz4ZTRUDOMHCJ5uZSnQseYXlxpmz3SQuxhU3SCKZVQDe5+IYa+Y0tsMMVcOaa/Dee6yKAKID6Lh90AeJLAigLxsLWprKsQdDXgdRoBXGkaU0rspKJ8qCbcoGhHqdLIoiFyRKktEkN1Gc6jRwaCSIBMchSZIkSoC9EVIABsn5gODJqEXhI2NApPIO0ZYuBPZaqaqRqSuZCQl9CI0PHkUslHlTe/BjrwStpoiiRvG4KihOGCG2BgnTJGLmGCnVyigVGkdZlHV7kUpI63hNk9LeU1M3gbhYVJWvFQGIFHWTUdNMi3o6UdYO1vrdwVAbLa5ufODQcN2INCbxgR2LU5oVe1DQOOe5BNHChjHJC0enx6oHkeltDIcHb6NvGl97Y4hIO2FhBSAOpRWGN2WpdNIEpzGyOtYUAjeeg1ZYOEfMQlFdBVIGpU7jKDJGKHLBBeeagCDo6kYpq5QmMkajBGeQWGtD0oBiYZDKMze1Dw3bLAnYWmB0COTrQKzjRKexUZbrUJHERhEysOfaN6KCtkWT5lHCpqN1rJSpkkgvKmmcanwcYXd6moTa+6DLxikTNgY9dmRi7UKgBqOIskgJhl43DZ1sNFs4J7PTgiHkTSCFHurAIetkTdEopVTkku2hRXWpsz1+UEoVk5diXpd5/s5E+WGuUDujltZp9RAStSSAVgTi+zrIHoKCdvbfZgQ9dCQ9/OjKOsYrBrT8JVl+nmWZMATtLH9lrVptzhL04Pd98WrLAVb1Ww9fY4lw5X2Th33quJTMPMxmkqU/DFEEGBgEhJG0av9UteQBl1Y0lDa+e2VYWzIOXCpnSOQdtVILQlaWtpX56qH/DR62hfESFbWMCZYZ2iIsKNiKldTKqfUOSuKH0KNtHFxW4wgKIciylW5lsIJWWiNLwxsI0Pcnji91XCvtDwIRkFqeAMvAplWkUsu2lihIVkecA7SHAR5+bTuqvPpOkbaTjAHa8uX2KJDAclq31P20TEpIYFl+t2xvWx6bFaGSpViLlycTLTPX27OkPaotFlwFTj08b79f3tQKix6ipCUqGgzPiN14/U4FO/W9bx2CzoLgxz72g2/eup517Msvf2+ry88999yD3aPFaFzPZ6dV6HR23v3chRBdPDo6euPmy4Ne+uzZy6+//nbaO5Pno/6gny9KbaN5Neuk5n3vuyR5cIXbPbn96qs3r21tk076fZOl2UFoDvdP63HCOgpOf+vL12syxldf+MJL565eunxth0NzdLB3sDfuDqO6Wa+qcuTrNIr3R0cznkB8bmayK5d3bl3fw7Rz1/ntwdbldDDavz0b5WcubV48m71xZzxeFLOqtvboZHa4eb6nIZs25e2746KCM+fWG3DTYjpYi9Le8GhUVl6YVTGZr3VtVZfsZr10M7NRQA1kFnlpjZ3P6srR9KQazXySNpHq9roiWJRFSDrqkSfPP7i/F2vnmrLfTReVevPNE60MgAhDYCBFAQkaWevarTX3V/7iU889U/ZkVi+662ud5Gz8oUef3n/9bXaqFivmzL/+3O4/++r1wq2tbaePXd565NJgXk/W0kojF1VtdX8+KrvdtdA0RRFY9GLaCGFi1enpRBOFTkjjNIlSbwdOuuuDzcbm0+lUoZiYaldUwQOorfM78/Fcxdb2srijuG6YFAY9nlHNPjJx3E3qetrrbikzjNK4LmeLYtFgffHK+cWNsS85jWk+h4ZlK864rienE25cFmVCJkjw6Cc+lF76W53DB3v96Iwrinw2Y/aoXRTLdDYb9LuzsqGCrGUXmL1CrTpRD8P6ueFZL3suhXJUMGkU3Uv70/mBVTaLEmuTMldaJ9V0L4pSH3wTyhjNaHocpZ3pKDemo5SySRqYkBBsqCA5OqlPq8PlpUkYQHzdkCKldGic814EyqZEgBA4MhYEffDMHGltjPXeg2e19HMFQebATkKgsNRTEglgCMFao1C39xWlNIJoHWljVrl0YJWqfSBtAQRCiK0NInVwiEoAiYgAhAMiKE2eGZFImyZ4JcQhBN+IhNq5wAHiKNGklXJeTKSFOSbFwZMiiABFSLXMoQU3mrGtIGPPTgRJoQgbExMp5iAADBSW5jRWBG0IESIQEiKCIoWoSHnvAMF7FpEosqqtZBRQuFKDIlhrEYg5tLY+EUIUi1Zrg4DM0kooH66NrP4LhLoNfsa22QCBl1QogIBnBgntjV24QSACZAAfRCnTenOXXioBpQwqEg4iYJRWZIDBh8DAIfByKUAECWjZsYbehZo9syhtjNEhBAZxIbB37XqDD6FxPghoxMBt4wiLMCEt7XqkEDUgCXuFCnXbzIVBmBAYgRADMwvHhhQprSiNbWwsIkbW4rKnExlAKdKAqAERUCkQxcwhBEQS8W0oOwfQSkeKmEMjgQP7ZZY4MkBZNwjkGbRapmWvHhKQpDaoyDnDqBeLKYPWJNraOoTg2Whe60dHx01A0EoWgR8s5t3Mr53taCVv7N69NRntbPXv7C4Oef38ux4/+9TzR8NZBfPCxOvPXDn3xEY1Pvi93/wedzvv+omnO53NyDz2pW+/+PzHnvrJn/7or/7Df3Hp7OaN2287qmSb1q5enB3fu3b2TKdrN4eD6zdvIChX1q+89NUzG924Q+X8AKb3XJWf+BmGQAw3b58YgOPD2eh4ZuLksU2LWo+qSow6nwxSlm1NLgTtqhT8RjXpdvinfy45/8MX8LJ25QT6cdJQwacCO1pf83UiTUQmCzKF6HHqngV4jLi2ySLAIcO2Hrwnh3MJVA5uG7VGcCkoowdnG4+kM8EdxNhXpyFGDZWHksEAnP0nP//K3/7f2WO2PlzXcXZuM37pOy9Hqv6Ff/l//okf+3cPXj0419+4++ZulNpHrj3+wne/srGhqkkBKuEogCyYHHTSpz68/dtf+m9+59c+99//9S+c4KbS3ZPx3W+9+ntbW71apJgfj49/49rZu7v7ez/2wTOPPvVEU0W3H+wKGogv/vrBuFiEzsAeHs6Gg06kMHbzH/lk+TN/dv3KEyUsvtncz595DyQ7OUcEUbKG3U1j3OnxBbX+w17tf2P85hdK290ou52//7nptNCdoQyHzaisF2xcFbHXqotNFeoQvXrfvHWM//ibD7p/c/fpLfzo89vv//iVrTM6XVtQP1RVZ7wX1ntpqGdFBz/z15++NX/j939XEug/86NPv33zxc1hcuXbfv+ER1kyXD/r6fTuoRvo/O3XxhTSCxeS7x5O39x9YC7JY0/tFHKcZheyHt56/baujenqWVECBemJdeHwcNcOBmfM9vs/85lvfPPl0dH+xrn1xcypNK0BOlnc18pHy9e0bie7+vyFfN4UVkfrmRSS2nVvMkq4O1jnUHe6cbJ20avo9o17FS/OX75KKvb5wnR06f3p3gEpJoBOt+saL8xFsaCq7HomZTq9nve+rMPVJy5XTvbuTzmyVmst6J1bWz+Tz9zx5OTshacF9cH9g7GrB1nfB12TGs1nmxtnquNxt9OfLQ5m8wWlWLmmY1LRRHG0dfnK3snteycH73vPMxfO6b/3z37x3//Df/x0dmyyKHg2Oup1h0UxVzota1dVtTZGEUY2qqrGKjOdTBMij3AwejBYW0NSAXTl3Hz0oC4KBVHd+NP5PATyrnJNcXow8772snj/xx6d5aMvf+21M2cvP/WJj2+/9927v7/b27b5yenx5HB4Ye3TP/SH/tE/+oe99LzydPvGG73E6FR2Hxyt94f9YUcrE/UGC5eT+LVht/Sws7Hz4GBfG8w6xqnm8qNnz1+69JUvfu3SxbNxCn/iz/3swd7B53/r3379hVcvXXr6x59+8g9+5g+fO3fl5tHRvTf3rvTUtacfnZ+MJ4fTl175dhr3z1x5Vk3yjDGLiER1O1up3bh164Vur1/XuHXmchBfsWExWT9RVnk3C01A9FI0+fzozKWrtmuidFCVTTmddvvr2+c+5qvfrfn21z7/mx//mT+RrW0zcj6fxRbOnt+ufQ1J/PTHfuC1G9eH22e+9DtfevezT759e+/yhcvDrY3j/b0o6TaV19qU+TTOOslgR+pgeXqydxjEr/c2re2rbF3AVNNyfrTIulK705PTvTe/9VKn33n3e564ejl67fP/+u389md++P0HR6Moyuq86W7vbF56rGnM7MatoMLZHRP73XJC33j1Reh3p3WWXnnPL/36l5uy+I/+wk9/6V982eHwtXt7xtikZ27uj371V7/07/ypn9q++lh7FSgylMBwkJKWwWanosoFj41EEKwNW6kJ86o4bpCjyGqpy6qqImtSpZPI9LtxPZ2VrhTAypOKMvSAqEUn03lpBImoWBQmytgihzzJkpgVWwNRYhILZc2ClSKKIggggeI4Yq+1AqXIKMwXiyzRIQh40EBkokSJ0QhIC4aI0WKitDTceOA6ADOAtsDkXVOXXhldeB9E4li5EGrX1EI2thpQoUnjmCEXF5znsvIx2k6wgiGvnCLFTWMUZXEUJABzN7FZEqXGFGUVZakoUdpGUSJglMIgaMiKtiE0abdjI3JNFVndqWcxhDAvytmimI7DfDyzUbffsVkfog4lQ+5oQamahXdNKKZUzzHMLbCEIIxZHFFiwdiqLqvdt9PBGMUc7Z666kSkAQhlzaAItXbeI4iNIquVb/NKvCMk5qZ2AYJXgkpbpbR3DpzzwTNqQfZeHY1mWWQaluAb59toTkNoEEzjBXWMitMoMsaSTXyoOt3+bDZ1DaNwUeZaKS4qMBgrrbQFIFRolF5q35VKTIeVoDIcJATvhYwFp46HG5HSjUkkn59qZXwDdcNN3ZnPcDodG6RiWglAkBAlKGdtkiq0riFQYPOynkqwyhyMatZRUAlmqSqCJSRLLgiQyT3fOR1HYhO0JSeaEA1kG1041MGHQT/TiJQN4M2jFSdaKn5W1VMC0Gba/N+7w2Sp1mjjgb5PZrQyKq1Cq2E1JZfliugqKEhw9aFV2NHy35kFkGjlM1sCApDvF9osG9FW6chLWdJKXtO+U69cRUuatQp8/j7A1NrUVkE/bT9xIEQAJEWtNg1WJe0r5vIOJVlinv97HhM9FKu0O4kEbS1c24y89HW1W79UR7U1Ye2BQXgnS0lktS8PUU6Q5dC0djxejciq5gwFsJ1ptEyM2pEQQFx+bMnLHh6ChwgIV2ofRCSidoNburMyaLWyIlmhrnY4cKUJWqmZ2tFcbsPSDgfQHkuRtmU7tH8pq9+Xh7KxVmvWnghLRrUsTuNlIri886eMIryUawEuI8OlFXtJe/CX2/BQarb6n3eOwDKn+yG/W6Ki16/f//Annv/E1SHE6q25zk/yp961dTq+fzI+eOb5pyfHg2irv3syH40OyfOla5dUpDHEcdTcny9Oi9nFzTXfLEajveGQR81JNtTAvboqDWauqoOb2TgRxu5W9/KZRwubzovxYrQgtZUm2XpicNgd3x8/8tTFECCJ0u+8eG9zc+tk72C2/9b+zQPx7tyjZwKlRw8Otncei3ayjc3u/t1D2+lQgSdVc9rU0wdvZ0kn+JMHk2JOLrpA1FXn+tuefIJ09cLmG7t3ioYf7I2zTPc3ojPrWX5aCZvRyaLMj/sb2UeeuFpN9ptiFGn35OPrxyO3OK0Xi/Fwfdg0pfhAhr14FqdjS4TK8vG4qmuzf1IO+5hFmE9m5y7YS5e2To7dye6Jn9azoiiKvLt++Vd+9cveCUNAi6hUFMWkSBNnpvgjH+//iZ+4FOlbfP0ounLxp35m68bbpSvmR4d7g17or2//5gv2f/v7N08O1db2E721rBf7buLm9cG0LDbozKBLO5vrhdmezIoyn9azvGxKEbA2CcFlMVXeqTSxSeQ8RyR1VR0fndooNcaNRjWCrau6FomzLrKzdt0or7CuXCWyWJQnly/tVDWTEUyiWEcUGSON1HUu2bB/ZbG4t7EVldXi9OjAUJVXpXfUVbGN+yRTDRLZNKjizEaSl57LSUSxstH+3qFazzJjx8fzpgxO6ijRvTVBl1dlgT01ne9Tup7EWafbr73jubq8+RSZa3U1WuTHWQ9jfdrUOUu8oCHqwfbamUU+caDJKI5k+/zW6PiAQrm5lQQpJ5OxjYI4yB01HA3OXjx4sEfZoJNszWYyur+3A1F7FQQQQhIE5wM71wQO3osLNrAmslYXzilEH0QEGueJnIiw9wYYV/QEAXzwmrQIa6ONNcKoFBEoQhJpWrmN0VopDUtuziDCwXFwrZaEERQhAoFSIGi18q5RRC54q1XjPQgCShO8IGrghr3zLjjPiES0qLyHxuoACJ69BDZKZ1nKwoDoQkAAVFBXDREpCkFC26JmtG7XEIwy1MonBRvnRQQ4sHCQQEZrrRShNrZ9UJh25UA4iqwII3HwgRBa+uOD10jGWhZu44mk9VwKrFZDFAhxEK0JFYXgWTjICokTwtI+Roi0zBXnNnkaGASIAgcUUWSCCLIopRAwcGARZqdBE7W3PFak2HsUQEIG0CZqNUCkNAKzhOAdrW7RRFoZ2yKf0FaSsrDzgQMhNM41nlvfWYu3PLBwaDj4xnNrKQ7MLJpIGR3HkULxgZcPTkAkCcEJgtIKlQ6BfQClwGgdRSaLbKS01pqItFIiDMxaG2YIwD6wVaSQQgiEBNya/LDx7Rsdk0JfB0QIIXhmBFSaOLAPrATIGM/cBPbMddO0EK29CqI6nN67+8STHziZzAHT4BrCgGi8lzSOQhO44dIxKWgUN3UzKysRqgjWh8Na2Rt39ytpTg9m86I4Km995UtPp1c/XM+m0fbmW29999pOZ/fN669951VOk93Z/Y8+339wevv6yze4U/6Xf+Vbsshthpff9fSv/tKXfzy+Uk6P3veD77t+rv/lr71s1MZiXs+OTweDTi+Oi/G0Pq3H9SztYjexJ8d1AAoa5iMZpFqX7kw3iiJnoAKGsx02VqceYsC1iHfOhkevwMYl1XuyZ55OYNv4bk+gB8lZAGaYBE8G+gQZ6jjQBoWAYY5cMYihU7YZA9ZiSIaWzpCrDeTa6MXkLdVBrcnQ2Xy83x+6QB5gxjEgcA1VBmseOn/jP/7H335pmOqrVbX5o/+P/6B3rn/5QvN7X/733tq992+/+t3C73TW+utPbslrLx/ePW2cvPf5S8f7e5eeex/oBMAjWm1sAKegVBQ+/VPv++xPfeR//O//dne99yM//dmbr9zoDaO6mj75rsezLEZ4t4COgBDY8ynROoEFePre8Xe+9eo8sptrncHO5pCnb7z7Kf4L/+sfC/BCA2+aLtinMt/UlYBNz9SwwdBh2MJ1JjgWGG38sPnwuxqYOazCi6/AnVtusJP/5M9d+uBPf/g3/6/f+cYr4V/+3mg0j2unCTUC+AYDZUWNLxzzC6/M659/8coOnt3ERy/DJz9x+fIzKu1OYL3nQv3g9qv/zn+6lZ6P/u7P3/q91+NLl+xbt0ZXn+hc/x39O1/Y//iH1Lvfsz3+3uwDH3vql3/+64uc06zesMhe5kWRF5NhoqeHu4tiz9oOQ2+zlxzsvolYDjaiy49euv36/vj67RvzcO097/vkH/zRf/m//ZNrc6FOxmF+8/5Jb9j/4Hsen4H/5S++AQAKm9Hx/XxS5U3dVxe1snEyAKsXzEoooBtNZybt5VXuq/rk5DQb+Olozyjob61NRrPEqEE3jVPKeiky1lUexwkpEmRBzvpZOS/Heyd5XY3z/OKVS2U1d8CPXDsPlWK16Az1ztlr33nhhUtnN7Nu2jRu60zvYFSXda0C3b176BoMXoqmIW17na6q56T15GCftera/oWdtb2T8f39W8MhXr5wNbhaNJSucYVPMlG+EA5GR0GcNhqEqqZBEgjKe+p3hvPJQfBNN+symEWV11WlCbIk+f+T9d/Rkl33fSf6++10UuVbN3bfjkCjG2gABAkmMYkUKUqkqGTJVrJlezSWPc6WRiPNG8thnP0ka8aWbOvJsyQrmZIpyZJIMQeRBECQyGgAncPtvrly1Ul779/v/XGquqH3utdCY/W6feqcfVLtz/4GnSTWUr1VH4/vlLNiZe1oJv3u7d3BqK9q5k7/1sqp7luTN/+lv/H3Xnnh+V/76MfoYGf5YKR93mjp053GF774ByfPrr77XR+88fKzw1vXfVHeeW04y91Ks3b23Llnnn55ZaPTaaw++sADT3z+C5NpEQTZ6tGVw53xeGC3Di586EPfu9fP3vzedxTpeHv7wPIgMq3v/o6POMkzXT759CtBjF976nPn3vzecw8cEdNBZ2PpzvXDpN44uXnu03/0+w+94b1h004G4zS3eV4GQQDJrLNytNVsDQfT6WSowlAFYRQE/d3dfOrQpt3uujDqYO8wTBSGil3RPyzb7a6sMyD2e/l9b3z3ytlzT338E/07F+9/qEmc2CiS0rMUKEUxGvmSolL0trbe/e7He73tRx99aOfGznQ0btQa3pbjwazRabSWm9kkm/Z3ptOx87NaoxGbANggOiWU86XA9Oy5Y5cvPXX79mUF9sixzWE62b6589Ab3tpZ6bz56BuG29uyVliTlWCYweogneLBjQPTFde2Xjt78tif/PHX+sNCF/5ko7vRaS8/cv/Nw+0Xn3+x3083Ws0Pve2BW4eTAad6rfXF528G//UzJ99+snoXrK+0+r3e+uZSNhlHoZkJwEbMuhQeo0T7STrqzXgCICBH50sC1j4TJhBGq0lvEkWyuZw4V5YFS+ml8L4sJZsoqdeCRhwv+2I6tTMHeRKSCWtlLq1wKTswWivhy9KWpdIYJqbwrigyhUGtEYMmV5AG9lkqUUmQ0kMjrklRKCUYhSVFToyyjAXYKulVG601oxQsywk6pLTMx0UplM5mTgA75pw8CpRCFN6SEELIUIc5lY6wtrTUTVYmg11JBVIplNCBJAbJMg5MFAWBNkYbKSQLYGZpDKEWMiq5nM1SYWfg3dJ6bXWlEbea5OykP0ERTA4Phxk5xQg+T7MyzbLxAPCOR6njWhCFzfZSXEtqtXqhdJnrbEz92ViWvpEkwxnpyDOX+WiW7Q4k3mKGaUplYT2W1mVCaZAaQCBTaEJAAQKAPHnvyTKCEFqKqmAlYARAGSjlPM/SoqDCSx+FhQ6D3mwsqqZmpUvhlAoDHYOHulQCUaInIk/sfWmtE0qjUBIFIARBQCBAKUYCEkWahWFcj0OpDHgSyAToPQGjEJi7QgAHRkdJJgMUaNmWmfA+iC1wPrVcOARyEzPueyp7tkBAVEJYZwZDl1shtR+nZRT4UPvpjKJIoYr2ptM4EchOWauFjkN9mFtHLDN2U0dSkiwgHCdYChllWeEE1LrNaW8UxaL0wT2gA/fUF3MEcw/uzJNzgAEq1TzMIcyctsxFQRXzuIc5ELFKrb5rk4K74UhwF73APGFGVIod+jNABQBRLkQldzVMdx1FFQcRC5CFc4yD1dfmynS2UAUt4kHn4Z+IPM9DZkBCMY+zqfKPkcUCVFTiKa70O+BpjtSQge/2Bd/lDrxQ18wzqaukCASBc0GQmDOYinlVYiyac5UF78A5FKmMebQAYLDYxuL3nFvBItyJsTpoRgaBxISLmGioBmpeT19Jhyr50kI1VMGpOcSbq5ywKojmed75HBotQqGwShSab6LS58wlOnOY9zoLWRUDVdVIM8zzyxejvzjIasoHDCAqex/OzY9zKyQDkGdGvguEqlFYZF1VaG/e6lYFdCzkb1UV9WKQiHCRczRXU70eFYWSXvrSVzbPP/zU15/V2WzlyJJ1HEXJkY37BDbe9OCb2svBSxcv3t4dnVheXttYv3zpSlDHteWVjhBSwlKz4bnm0wKFGx3uLW/e7108G02tmHQaEaOWgqJmWECuUZ060la57mtNeXbz8pYlKPKi02pMR8PBJD9x6ui5hx9ws8mR2qYtyyQOZ+PZ1muHKvQnTh598YWnNo+dqnVkWE9cqlfNsWFxOy/6QDLGoFavDXtZP6Vt5VZaDsQkiNvTYTYtbbelRyVnI5cWquyV4145HI+jcNxtNaAk8sH2nUEscTzLtAqLARfjvHd40Grq4WgYhaHzLitGKLWWyJYHB6PSukTKeqfebHfKLJtMUh1F+8P86edu1aOk1SY0QrJu1o/s7uYvvXBTKi21AKWYpIC4zP2pY833PDL57m+hJLweR0uuOHL7tWGvf7X7wLkgcqUOZ4e9vdfwF375hgqW3veO49aPC+4pVRoDtUaYNGvT0uW7s0l/tHxfVwrRXW7sFn0PNBmndpqFUTgaTzxAaIwhmk7SsA4xCMXQm06mw0MQEpRo1JfSbLhxdJlK6GchS2+nmRRaKzh++ngYQJqNNNrIRERQFDOjdWgEQBLozuamyYoeeZHNbBiFehXH/bFk47kwMsz90Ptet7Ns2QaxZmYVYmscDvb7j91/397uLggO4mj7TpFqbni/1IySOHQk6o2NMGoLICGckWp1dRNEYt2kWddBvFLisNWJnKPJCIhyz2Jrb3tz8+Te/kESC+eGaTpByEfTg0Qtz7JRmtrZ6EDrKG7Uerf3iyKyk0Ilre07QySVjmazaM53p7McAT05FOh8dXcDMXnvGSHGUAFmzmmpPZEjYmAlhVGSmABACaWEKp0VRgBCVZagQDADEuU+DYwRAhRKgXNzqSMqnFNCSCmc9+BZCA60BiEFIqOUKDwRImqtnbNSKKkUolKalZQMkFufOZ+XJYH3zlfx+kQkBLOTWikEkEJqpSUKBKw0MwjA3kslAVEIlBgEpmpYYwYmRwBIbEvvAmmICBBQgGRE1FoqLaRWgZCSmGme8M9KKectIkpgRqqUR1IIKQWiIPIChBTIwNazQCQUnr1UiogteS3QEwMQE4v5pKryt0nvvZRKCFEVmFGFzJkRhZRGIIK3AoQQCMTM3lNFaXy1cFLavGoiAwayBVW9Y0IgCgbQQaiVAhQCkV0ptCJi663WptoYMBdl6axjJK109QonREdUHbu1ViAyQOmct1YgeObSO6mk8x48sRCF954pUFoJqZT2zgqBSmjnnRASBAhhyDsmkFIYqUKpIqURsUr/q67JuypR4LlarVrRsK5UUjJwYcs0yy14V4V2M3tiR+Soin9iCeC8YwSH4IkBwHuqMCAsXuvjQX8rfaEe1ON4Q8nENMLDw5tSqlAHSXtp3KfdMg2bhgsKwwA9aIfTvgWHkGZayzefOXZis3FJBLGOJnd2/vB3f+7+x99/eum+155/TsAsC7Lz5+KbL/O13ujtH3inz7KJG0TdtCxlMcYwaNy4nL526VKIerR1raWzV559UdXWJjn0etO68o2VgIWNWsmR2tqVi69BZIT3w9tWWQhkurbq47rvJtisiVabjHaxhDCSQlGn4UOggrlzQqoOwXEFSxE0N0qoO6dsviT0EcglqVk6uhHVG1yGAnaNMV4yQAoiRbUuYODcNe87OmhDfkTqJkBTyKGDwkIA9cjKmzFkFmLZHqbwOQPnWSw5wBhqZbrxM3/7/3752tKdmx2lVh568JvubMexFF/+0z863CBGaIW1m9fLn/rJf/HwfdGP/vX/+dG3PLr12lP7V0fABzo6xyosS4emBHBUeCnYaMEwZRgWkP6tn/mIhkYJ5cq3nApAWugzjBAKDyBBFkAIUxKpAOHBsb/67/7zm371l57+2Ee3aGym6c0f+aHmn/+b73DF1xmnUt1fUoGuD3GTIclBMKwAJzavGdUCdYzcblFeVpulmm2rm3e+++1wc53e/2P3ZRuU11/85h9vfnt04pt/67k/+qPxV56H/UOqCR8YMbZe6BCU0EZokGUkn9krPnvZ/fYT15eT4n3vrL/pjUvn33KivtI1VPz571/e2h8eZi+/5w0bdjbq1uSRb5SHu7Siounu4bJsfO5jF30auoKuXp4kgcFADSfp4cFw9dz6zuHBxvqZzWPveuHZWwBFWaAOcDrOlcZmO9k53J1OoJgun3nwZK3VvtGbLgWBjlTcDlon60fffOywt1XdBXZWpN6xs342ee3JFwBV0k42Tm/GrSNRNzkYpZ5UPsqzsrCemq3WdDrZO+hpCTJWJpD1pNbudnv9QxrmaZZLKFfWOoI4m86kCWYT9FnJgHESbu8Ps2lulALhbt3Zbaig8KkHanZqjzx2drR3YAwiqDz3yqhhb4hCEEifu7zMmksNkqa3ty8Yy2LW6jQy61Vgjp9YdTStRX5361Y6DG7u7Jh2HCnUYei8GI1SLVU2nrSXYgHUH9+ut9fGs7HCJCA7nc7yInXeogQqcsVKysgo4W0hlXBcgp3Vo0aJNstST855n4Tx8TNLe1eu3vfg202SX7t6pTzYm92+eeJE6+KFK7VaKEsK8lHt5MrKqZVXnnvq6jMvrKw3TaP+zDMX+56ev7b94XxpudWBPM/9/u3r6fHj9WPB0jRz9Wbo+2NA//7v+cD65onnn3qxdXY1H6dgMSsGJx7pRLVamtvjR5cvjQ8KP15rtChNs76n2SRNi/X77rPGrBw9853d6ObWKydrx6VCASrRRkqphXPosnKsQ8HeG2FmwxFEqYAClETQ1mecYxQrRLu+tjrsj9pRQwj0DEqquNFkLpJk5e3f/C1XX3v1lfSLR868td7qujLNszxq1KJmiwp7bKP7zDNPdVc7cZSQUo7twdaNlZUjpOz6Znc0GI2H3tmydJnCyFlI6h30s9lsPB0XobWIZEK6PXj16a994tyD73v48XdfffZLcHhj5Uh3986h1roYqPHWuOgPmsePbZ46aUVj/zBdahzzMrBcPvSWM69euFQITgLz7scf+uA73vHic092GnD6yGqe5bMcBntbS0vJSit4ZTAma5dPnFx/6MEnv/DkfPHMQbPRdmWudJSnRdQwSZDMrBKoYhNOxkObpeyFZydDU6YQBgEqRSCBhJJxo5vU68oI6O2NJtM8z0vJRAXW683Tx44fXTk9Huy8eO2ZgrM4DHUAoeCZ9458mmZF4dha9CQEgypUWIRtIZXzOK63AmPMcCtPD+3kIDc+NMJ4n3kmBzorytI78Ja9ZSACh8xGGyFAJwZloFnMoLQFBwoZKTRKSpHlpRCAtiQBURggQmDCVq0ppQBtkJURHEPcTjDNi8KWOlRKq7KwlYveowREJXRWZMYYjUFmS+dKAk9UgivdbFIcDiwNleuW1vnedJoVaV6ApTBSpVNFOW9WJ/LkymI8KwY03t+WgQ6bjbjeAlC5DTKfMPjJBKJQY+aL8ZTZkdLoyVnHwhhj8txqrQCEZ460UQIFahMEgKKwuTHsvGMAT2DL0qMAZiUEee9kVbXtFbInm81skVbKJFM3AaGKIsNCSRShkQIwswUSKaWM0R4ZgaazcZ4XAI49S5RSCHbEgi1ZY1RkdDeuCYHkLAg5trYoCgCi0nr2Hp1ASZYBDSA7q2Yz1jWjraJ8JmUUxfV6LPMoAwlhJJTSSWjCOE7qEbkCmVuxACbBoBCKWQbKNw1GQlnSyGQLRkRr2ZekpoX0aGIErZvdSEprp+CzPCv8NJ2tHmujy3evzd0GvNDiVKVeczj0uvwZT5XopGIfAHC3D32hsVkAoPl8fK434sVm5z+BQohKYbPwpTEiMd1zYMFcgAOLOOOK9lSNV4ty9YVsaKEtElg5rBYJSguN0d0it/nm51k2PNfJzCEVLXa9sqQxSmSoFn0XNGEBZbjq1boLruYKqmpHccFbKrNFtX0GpjmYAmauMhAqvxUDMQhe7Plce1VNyeCulwrnDKkqLYN5KhLwQq4ECw3UImJovrOVmIpxwW7+zK+7RzRPEFoIjOYJ5QvtVrVjVXZVNS2YpyvNtUDzk0YAwPNkDSABzCwQCEhUaUgA9+RfwETMQCwqwMS0sB7O1Vs8/2kGQERfMSOcoyKeC5JwMbZ3d3OhiJsL1Lgak+rf4kIBd+/sv04ZdVdrNEdF17aHs93dLz39jRlhu2bYlcO05+QzJgjwNBlIDq/tD2/d7ITNO9du1RKsyZKz0cHN6wx0anV1NLLOKOsNjXvHljd56nxetsOYlVBkUXBuM2QQUg2GwzCKQJtGtxnG5vKre/WknqeHZSkoYwENhJoF57wPa/GJ8w/c2RrwWC531+7cunwT9pO4PtwZBiQFJmEQX3j1peXVZLlm2stLlLXSLF1ZaUwngmvi/Lsffu2lr5UF1Zeag73+Unt5c6V5+bnrlkVzqV2Mx1EQay0IVZna1gp2V7q9/cvLK8vD3oQLF6vw2Mbx3d5OWuYuRmPKdq0x7A8LlKOizHMHgJ1WvT/os8JuJ5baoOB0irduW00Hx477/mCqo3jldPsP/uSPAEEp9IjkvELFLJyQb/6WD/1PH95vq6dbK3i4Bb1hHjY2Tp4+6Wd3qBwvH0mePdT/2y9eF3D0oZOrxmSUT5fbuLyU9A8HaX+cRPUw7Ozt9w/70+4xOLUymbheme9IMJBlsdRF5qXSrESaW6uUqWnUtt2RJqS9Q7A2LGyuS5iM02bcEj52eZE0NppNPbr16l7/YGkllFKy1ASuPxpvrERZ6YQJWp0WcBSFjdyOyiINAh1qxWhrSZnns4Cc8NMbW3tB40QUH9Fh0Gx0J7MtJbAsrHOFAQYLuwdDRGnZEqrbw/HSRqQ1mtCUrkwaneXmckoa/bSYFNIYrBsZBzQppZFp7yBphnFrSQAf+EGnq7ZuD8YH4wNMS8By5suiH0euVoviMPG5a9fiRkj9vdwEVG/AdOqLfNaNalMraTZpN2LVCeNkfhfYsiBCodBooxilRCkFM9vSVqFhKGRotJTSk8htjtKgEEKowhaArEQAVVY0+YUiUFDJCOgJUElmUGgAQUqtJCohyDtk9sS5zYmqcBlGsMAohEABUknr0VoHyErq6qEgBVYIpnROAJNnjZKqNQoCIVFJVVlhC+ukwCDQkTECcZIVNH8xsZASGCQIiaC0MipgACZPQF4SMxAJBFk4x0RaayGQmYzSRmlZmX+rp6EQ5Ik9M1tAcN4KRCkEe8fMApSUCoWUiFXoNgAIAOucJwJAIqryEZmhdCSFAJYSpBBUSTo9MaAkIkDyzN6XxAgAUsj5q4xYgZICGYElE3tPnomtK4UQ1eoMIhKBLUtH7J1HBGJSSkVKEREIFCgZyGhjbSGQlYqIwVnHzkKVMeRJa+mY2FcuLgQAFEiOlFQIYJ1DIo0ShQBygTYAoBQKDURcEjlLkpzQwAxKaQKuxrPK3hbAoTEIIIXQWishhACBFcFC6121BkNEAoWQCtkjk/OemZQU7B2RLWyZWesQvXNSGkB27BhQIDCxdw4YmMAzl9YqE5B3znnyXjCK+emERqdx/6njF576ypn7H63HofQQch5LPhzuHo76IpZawdqqGO8UgVJCYTG2EoxG0zJqqWMEp1uvHdbDxu7N7dVOfeVY8/3f+eYv/qffTpZa3/Idb7n98mtPfPpS/06+0VmvAX7qM0+86fs/8t3vP/uL//R3wnqjGGXUbP/9f/wXf/an/9mxI3U9Kf/4o9fCI6xVvHpk5eTqxmvPfk3opOiPrxwOxwWAo27Imx08sUobR+j4qfL4UZ2OrSNQCYCAKEKvSUZgIg8IuBpDveAAbK0zg1UBKxoiFKEONsk10O8HSU2snaXCA+V28nWVFDJ+wCu2pTZwVEC7cHthOCWgWrTKEDMIgibCSgi3WBbj4qWLrx6uP9Ru62M551ikOjwGUP/5f/OLT3wNs8OH0qLO4RgbwROvfGlvp/yV7/np+JOjX/qFX/hXv/QPP/PRP7367MUf/6nvuXzha29aXn/rqc0VVdzYu9Xc+JZf+oVP/vDt6ft++PsAEMFRgAiWnUfFCHH1jcADCCCGkoElGAZmaCCEBCEDAjgABlAAhZTbEuDH/pcf+KEfqWWjdDp4eu3oFsSvMGUC6iAiNLk0KwzLCkqGlHgiUEHUAReUGarwNEQBw14BQ+yYMz+ydEK1bRIDSAbEqOvAvvcH3/je94X/8u9+5bPP5z/5o2+41b/2pacOr/b9cCRzi+O8GBRRHIlWQyoUt/fFb/xx/t8/vnXyvv1jp/E9b6p92ztXPvhNK7l1t5/u2x3cuzS6f13KLO9t99/4tlOXbmbTnppSQMod7zZf7k2LUZFJ/do3rnUV6kgk0vf2Xjt/9nxZ9DqNB4SYjA93pqNZOkwbDVOMDm889bX7Hn2o1W3vbfWKwSxWuH62E67rFy5/abw/qO6CesMgwspG89r1spTUH07s2I6fH9dXdh9+4+M6bJVEs7IY72erRztXr9wQgeout+r1WCCht9l0BKvttY2Ng93DfObCQI0GGVuSUkYmVBwMxof1ZtJYqZUMs9G0N0rve+ToqODZYCJlqNjvXt9prTZNkoy2DxHEJJtayYDOmCBqBNMBESVbe0MTBnGtcfv2zaV2QyGjl2EcOZuyLQZ9Kev45ne8W3IqhZbsfV6QDhqNls/LyKBknoymX3ni9z/w7h+OIyMk2CLN8mEQxlWkBAoJjgMdZ7Y/mQx1GAtkZkhqSZH2oiSyXoZRcHAw/Mazw4fOvf3EG76ns/Pcay9euPrcy1E52740snlRtuPv+fC7di7evvLa9aO1cOu1i+msvHRtsn+4OyqoRFHK8KvPPvv9H3y805ZFGZbDWa1VY6mzsnCH41asp2NcXT7hhrZWwiOn3/Ta5auzyXB3d+/6xSubJ+zgYNwjYGGPn72/vbR06+rEFtODS3sMonu0HR85vrN9YKLWUsuh9670RsmoFSmh924MokTMilyKIDZYpDMFohinOoxrreXe3s7h4UgKtbl5bOfmzdnhASob1w2CLvOUVKRkwMoMh7PlIyceaDeK6fjCc1995/s+iFKJRo0JbOZtmls32VhZOdw7kFYc3Noycf3Iqc10f19plRo1nQzrokW+iOJobeXhKxcvHt7ZVYYAQ0Uy0qFz0+HuvtXRI2/7fgPm+VdfLCbDIElOPHz/lVd77aRxMOnfyPg97/2+dLCXZkyxWF7rBBofefMbU969eOHSQ+ffcuJ8IzHxeje6eXV788Sxsw9vfvoLT1AqItMIVowK1APN2sUbB/tZefWFZ7/WLr//L33vT//25wGgVkuixJB3e7fvuBJIsDhwSasxzSbj3BWZnE28B8q9qxmstyMV67gRKUdkyyNrXcdWotEIEnSoZW0p6Q+y9bWjDbPUaNenNNFJcGRtY5QPgUug3JIVCMge/FghFOACrZVgRxl6H4hEI2ckZhNOXRbIQNUkurKcuaLgMrdCaCXKwnNJaMtSIAsliCUwEUsmUI6kYFZsQpVlGDIqzZ1EKJQlCkLhLASBrHcSISNylER1BC+kEMZMU5sxilgORzIzEYEgoVFIKYTQwazIk1AnWgN6JZRET4LKooAgZHSFS8ssP3AyH8yGvZlCQR5ns1JEASCXaUmeJbBlbz0LKURoFFZRzsBEo95oNi2lNFGjETWXp3nunU2zWTYZKSmJQRrQAsGjMmC9T+Ka0qosXWiUVsogkWAEKcDESjEghYIYrHXAUybnbEFM7Ll0hKiUMNbm5HJAIYUgAksE3pMEBlbSCKFS67TRKCRK6YlNGAGK0TjN7cx7rxQIhUCAUkohSlcIAc2aaiQiER4JrSNHPkJU2szygojDIGCQ3tnx0EkTJo3YFQJCrWwi2VFBbBy7XBvdXY+TIFyKIxai00nGI2utB8cWXBhFRYaWIDSAIMosbzeSODZWxlq27cgOe+Mmw2SSG6FDY4h8HIc6ioxWQErLRubxYJKZJDAai3vROTj/Po+CgRHlQn9SeZsWterM89awiivMp/bVnJzucaMFi1jgpDk+uYsM5lqdOXKaJ8rMXVgoKgC0+ES8KyOa96MzL2jSovGcF9u/q+uBhfeIFn+9iD1iWmhM5vkPUPWpz4tuALCKKMK5qGYRdQNVUS+jr5KPxII34EJiRDhXBBHc3R7eJUQEtGjzqgaO5ns9Hy7GedbRQiOzGMnFfuPC/XZP/lPBu6qDuYJDjEDMQghfRUaJCmndMxXetdDhPIZqMbw4V1rdozrVFpirbVbDvqBxsBAoMd0T68x1YhV2vDdwr0+VRvAMJNHzott+YTGkxR7ObXpMWHGr6jKs+FLVY0c0R1Z39wWroqK5JKu6MO5CxeqohRC0sCbeuxgWF+qfQUUf+O53xVnrt37tYw88eP/LL3/DCJ2Xcv+1O299+IHD26+ef/Dxq/uHp86cCkzry5/9VNzeaB2p3bq1U3gHkB/0hnlKPgw46ea9y8eTmDOXmKhXTKOwW9N6MNttN1dtVpa+MNL4zHvkLIestCsbK0c6nSQyr1y6dfrsA1Is7e9Pm0eXVk6cy8fTW9fzoHZ84+xx8GaptLv7L4ZJEBoznPjRbLi52j61vn44vb1+tHvQnyHr6XjihdWJdjq5vleUPvbT7NT9J6deMTpIvdbkZrN8KMiLJKorlPkkD0Il1eTO3o0QMc1HzWbMgHaSsXDr693ecJpbv3u4n9Qns0nqPViM0EkE6I/GSaxL7wT4bi1ykrUwUcDIdjotwyiJm91L10ev3RgFJiQkIGT2OlLMaBT81v/4+NNf2f2lf/iO5gOmkRT2oAwEBjqdTg7yqRsW6//Hz11xeP+b3nh0tT6Ik3w2K4khHSK7iNk4a/Z6g1nGkGxMy/JwsD0txi4v4kYsG7XpuGTgldbS3mRCQoynWah1ouV05mCSdlodKHNpVavTTIez5ZWjgMrooF5rD0e9vCjqtUa9Xt+73W+vtJOGLgvOuUS2IaByEXMQaURZTvIZFchoAmP6vX4tkXmRexCd9dU8dUncVDqybEy8JGQ5Swsiajeig+29Qb8fGZ0VbpyXQnNSx1ajXqtH03LqWGqJNVnoGGSpspLTNFd1IOVH2TSud7VUoRFZmkrlxhNromBzc21n9/ob3/ruCy9eBdaMwWzmyKl0PG3XW9vDw4NJttHobm3dqtday8c6Wxd2xpNUWqdEeeRYUmvF1V2QhIEQSkhRBRlLRO8IhXDSzR9zxPWk7lzpiaSJlJRAzhNKqQQCkXdEQiAxpEWBgIHUQRB5b7WWQkkEKNlLlFqgENITeXKI5JxzNgeGQBtk571nAkSjpCDywFC1vyupKyUwCPKV5xylCaREgXnhkDWC0FIICYha6cKVQkgUUijNjCV5InLeCSU9ASpB5EGwCQJAyQBIQivlyCKQ9c4576jye6FiAEAlNBMjzMG3ZwQhGYQnBiZwhBKVDBBQomMEz8zsnWdkEvN1DkHAjAIFC2IQUgktUFTrNASMKIWoHMYKiKx3vqo2QxaVFghQIBI5XliykWFefI8CBJD3DEDECKpaFgBE59kTeQAC8JUcE9Eje0RLXrDVgEIKKQQgOFtWCy4CAaS4+0GeAeZRSqSlFihASEJPnqSoXstEnpkIhTASiZkBpUCQ6L2rNKYsqqbOuSBIKBTIQqDRWqBAAimFEoqBnCMphWdfpdOR91oqAJZCkLWeGb3zzjomKQUzzrLMEhEwA0qpyXvnnbOWPFUP/ipxXEnlrBVVziCiNMY5x3dXrgBGe5PDxsEb3np6o9Z66fnLOCq7zdr6fXHvzl4UMoFpquVs0lMj4tQWU2lL7VHpJBhaOntidXdn9OLl106dOg6aLtw4uO/c0mc+/1VQNCu3P/3xT924vC9yXLtv48FHHqgdb/3YT/6lL3/95Zdv3n78VH0q9cUX/awnP/7lF3cGh8dORXeeH63ft5LVG8XB6LlXr9pB0EzMaHu3KWZo5IOn5MoK11v26P3q2HHhsCQJJVi9BIEGQLAFeMU+AqxDGQJo5SIBYSSM8LDJcBzBEIxCoUrQwF7UPMEuQAhBi6DudQ2kIzhkXDPBQ6W1Cr0Mawyj1BVaLREEBo7OcqyF6x//2NNfv/bC+z78Dj9IL38xOvfIA5/99ZeKgXhl6xNy/ZH9195yZUeu6mSW3xjPCs0UNdSROnzmqd955ze/7fOfPHfzucNbNxSJ4onXXjj72Pm/8i/+jx//nh/7tg+958Fzxz7/9MVvfewvfPT3vrC19Qs//Le+3zQbBIgAnksNCiFg0AxMIAA0grTgBbgSJgEAgJdgrU+1TIgkCkAqUGiGiMDoOptGu7P5kIADDyOQywXUACKAGkDDw4qCnncDLUlAT8AMcNXoGvshu0OhPMiVshmCqpFaQocKADFEMExTq0A3sr//s28f/MwXVbj3g/9z8uHvpd6h/twnp/1etnl+6dd/d9AfmKwkgWwaqAXYHC5cs89v6U/86eTf/NxTGMjucm2jo8+dPvWj3/fof/4/PyFCPavV/+BTu+Ms6pfQy2hlNfzJv/M9P/HPf+NiL6+vhGOEySg90Wr63m3PB1954gvHH3ik1Wp3OieLMfpszBaz6ai2HG7dvJ4s63d8+0OTL19onowf+OZTv/M7H6+LZO3+1VjPY61B6uFwNhz394eu3qhtnFtLJ/3DnV7v9h0Cv37iwUarOdotptksy0Yi8kSqJCa2gZFFTizF/vbeJM3ztDhz/5kbF28E60tBEqRF+dWvPvWmNz5qQjMcpyRpabURJcE0RAu+sdKZup2gptw0vXR15yTcl9R93EnS0Yy8q4cRSSxtWUwImL0rZ9MUQQ/ySXtpRRo1ODxEjMf9glk98PD9Fy7ePry+8+M/+xd/91d+bXT4ypHjm+lkJqOaMgYKLzXMqAzarfOPfRBUbTIbNJsSBahAe0QCsb5ydHf/NkrCyBFRkNSEFErWMi/y0QjQpZP+4d5upHUrUtZ3P/AdP/bL//E/HGlMkrayIa8mrZdfvr3y4IkT585ce/XqZHDwHR969Muf/nKr0Vq6r/Ps87ezoswJMQKF+rX+9lu+9Z2/95/+g6f6I295s4iiRkOPd/eSRqDDZP3k2mhv6ga96XDyzPMXR7NhMR2tnFhvLUejUd/ZcmmpVZTgy3J8Z2+wO6utqvvfee4rn3tiNhTf8aFHX3zl6trmUtngPPedlaXpeDodz8BhEET1rtq/czuuGxVTf7DfXl7vLK1NZtl4VoionmcQBWF/nEeNlif76mtPn77vfKwazVYzneZBHHgQtWbLulKLRthurq26Sy9/+cjxx+qr67Zwoq5MPRz2spX140LG+Til3rAE2B2Nbb/PNlfjca3eNHHgiQHkpZsvFdCLfGTUkkk6KpgVxXh/+9ba5trOftrsrB3uX9PKZXmOgRpsj5tJbfvWSzIRHODY6Wa9Ocmzs29487X9vMjSfs+ZqHl07f5T5x8+HPvh7mHcNMWRZjoaf/mJl1/ZGZw6+djbvulbn/3sRz1mveFsrR3zpByDurnT29m6Xd0ExsjQGK2C5MT67n7vcFI4KEHJ2Sgv8pzBUklKi1DowAujealZQ4llbgUqLkmKQFEspQiENaFkz92lpSRoSNJlAWHEg2kKItC6Vtg8LyZ5mSmpNQSNoJXnhbUzUuAYcqII42xcc8KzUuykRMVCQmC7R5VL3WQvlYxlKWzuPLLUmkEzkJBSsFQojZGSWVgLVIZaJI2gqVuGCqN9ZFSgtA4apSOACCSaSCkjwWOZs1Rh7q0DGxi2lrSSrVYSOwIO+kXR6DQDpXLnoB5YV07KIjBaK1WUhZI6UHpcFNPhLJ9NA6mtAya0/cwIIZUkFHlRoMCs9EXhEYGVcQSRkkpJI7Vla0tihtJ7W5DShHmhNdZrdYFqgj2JkBelElIZHRqlUHiaAwLyEJmg8p9ZR0IGpfdcxY8zBSIyQgkldaxKX2QIRZF6bwGVBFBCCmkEGCAvAC2VnnzpyQOQo0B6JZX3HvNCSmmCALW0gzLQikobK1V4RqkEgEPnHDkBzFjYPELRCbGhZFlyTqIsmVgRCC0xCkVhC6GwsM7EypMbTcY6IJBCa+FnaVZOlFIuK9nVa40kUSaUOie+fe0wmzpgH4XIGjRpKhFloCUIoNBoQ67bCq0K0IuiRGzF5WQUR0IZGShjlKwvNRgwTQexiJ21QjswMEhLiUJFjXsT5WpUK2ZxVzACzMQVJJqjontqo3sxRQuhyV0gULGnu2qPe3N0eN0f8y+pDAxUzd3vlordlaLcTRiqop5xET5KCOLebghYxB7h4gMBYNGTznepFs5b0u6FcM8NZPOUnrtIyc9JFXCVWFShD7obCz1vAoMFTQNaWLTufsmsfpKAAAiQCKqDrD55bg2DubGKK6NY5YdDxHmg+OIcVJ3NeG937wbwAEPVaV8JppCqHRaCsYJO97xdi+QiXMR640ILNQ8q4vkow70kqHkBPRBXHldAsdBgVUePd4cAAJgWMi8EFAsgyLwosAdiAA9VDTVUWRe0oGZ3k6gXv5jmQqDqOgBGJCJe2N8qWAWLWQYsTgTOLwrGucTodZBzkQQl5q67Bej7/1EVNeL01a89d+Zk/W/8xe/50Z98rkBjai0/5Xe+8+397U+88vKzVB62ko1rN7Yby0dVsrJ965rR5vTJ0zu7u412987W9OjxU3v57kvDg25zakAwO2QgNx2DSIUMfV3x1CiJQeYsjkve7RUxQM2UY0MnHun0rQuThpCBLdPRwWB/a3jm/vt1KxiVxCrotFs7W/7Y+uYszxyDC6ypJZcGVzbaYZ7PlDymGPvTadxUbEtjIJ3Y2bRx7PS3br/28vbeyGd+OEvXTLfW6px/89LeHbe9NdRYd9ks7nQfPnN2Nrng0gIDcJamhoRSRIVky95zXhDLem11PHNcylocSBVO04JdSm7qLIaxmqQ5lKCiAIXqriz7YuoIUkc6Ur/xy38covHWgwIVGi0VMkoqTKBVXs76zX/xz5/+53/7wWPn80YgXTZ2NFw+vTLLlv7y33shn3Uee2Sj1fVh4I+syr1d2x9lzrmyLAvnyQMqE9VrprUU1sss7fmijIOgn2YSZAneCnFQpjJQNvUzzlVTRu14Npwq0KrhJmkWBEGa+bT0V27dXtnY6DYbt2++igp6e3fCpTBjXj62kc50EEScDXLrfY6BWorjM0WRFeNhWuwj+ABMWgjrc6lknhcmisazSfdId3qAs8m0UTcWWMgkzfMwDj1ZI+VqO0Twaem6R9eeeeIWhO3u8jpRPpsaXWs21lvT3evtZtORT9N+FC6DT/PDrbL0jdZSlDRcJp3XQdAO0GRpT8n+8qpqqHq5eyWhKdaTpNUtJsOZzzaPr6Zpdqdndg/ylVPN+lKgLAcoklq4eeSB5y9c8gj7vZGpzVFRHEcohNLKWV/pOzwzM4WhrtrphRbWFhXh9kTIbJTR0lhfAoPSGtl78nmZWU8MDAKB8lBKEKC0rLCw0poBqcoAIiBrgTx5EiC89xJZSl3dqt57T8QCpJQogAV7JkbQygBZ70krbbS2WCKTzZwxRkrtiby37GUgAiGlCYwUkoBnmbWFFRIdARM7W7nDlFIGUKAQ7NiRd56cd8RzQCGFNEobrefPHYTSk0DQApiJnHcERCQBlKgsz7Z60AgpvPME7L0XABLnT0PyjogRpdIShJSVJ807nktaq1oDdM5bz46p8kndfcihFAwEIDw5IlJSVFjdevLAKIWSWgIiskZ05JWSRJ4YfFmYQDNhaQusgqCUlFoxAeHcwkvEEjVKJK5a0qQQkGU5kWf21hEDKyG11kobCQAoPCCj885JhNBo59h5T56kUgaF944JnC21EI5Ia6W1FgAAyN4iKokoJUoUiEJJJeVCZktIzCiIPDEBCKGkIkcAZImQoSwLFOiJSluiEABoPTkGZkQpydmyLImICKwnZpJCwt3uieoTvFVVNJUEqUxVYwcA5ag8vNFvFuWRTX3uxPrJBx5/4cknL37parO9bnE8Ge6vmW5Yaz767e988ZULL7x6MzVB6sTBqMiFnbnZA+dXGquPbZ4+Od0e9oaBLwKcjM8+tnY4uX244+8c4kHPPdClTg/jCI6fbLqDg4LK/cPBq5d2Va39v/+//+l/+O9/SCX8wa/+/jve+uhBf8JFNN0b1evm+JuO57tbj75z9tDboQx8s+Fbx6RzUKmlgEEaEAJ8CoygNCgEHSEFYMNI1BtKrTBIhsAAFNe266sGkraHAsFyMZLYiUS+fe330wmcePRMmTe1aAZydZzdrIeCcVrTsoARgs1n/Th5SMAbx32sdR554Zkv/ut/9A+C6GTYfffSd//U3/rpv7N2ZNlOLrlxLZby1sHKI+85Ve6+9he+8/v/8Fd+TUTdbgMm+Wh0MDMG/s3P/NxH3veta0f02gr33fA//bt/2Dm18T+e+OoLz372O/7R9xwxZ77p8Xd9/Kv/WG0+QlP47T/+1B9+9ud+6f/6eyvnTzpOhbIeZgAOgRE8z5fqJIFTwABQwARBMcQoHYNHIRACKZYEBAyMoBSGDBYgZ4gQYg9tX/Rl0BQAAiIqmwTHgNa4fJnc7yi9w6k2zRpP7tgSVFAnvcFBjVUTPSPWQSbMAaMHqRhcHu6Ha7U3bLQvPDl8w1s7tQ5unKk1WnXKstMffPfNlz7ze08VTjEi2Zw8c4ygpAw9lhmPWWkvb20VV6/SU6/OvvTsF5dlrchHO/thswxxOj3zxiONGe70s898+at/6W+871/98pcub482GtHzB9ORHayNExJEhR/3G9PhdgkP6s7S+uZme/XEZKd5a/9Fapinnnox6NaPP8C02n/ptf0HHm2pJZz5frg0R0X1RuC9y1hHQRS0lpbve1yaIv3aV6b7WxeubfWH9pG3vnt9ZbUo7IvfeEZpBGO45Fs3DhqxjoLw/GMnx/3xdFaUhU/LvHuky6yUDu140EyC3duHR462vFO9/dTm+3EjiUxMM7V72Dt94vxzz3/93LkNoUQ6K4RwpH2We1tSEAKgzNIZiIAF1GrhqdrmzvbhzKbL0fJ45ALdtpaJXFYIGUWNONgfgffwjg//vV/7hR9rrRcOiLLsYLZtLbdWO5NpdmxJry+vCM47S42ysGk6U9qU08IYMx4fJkG4tX1zCdeIDGAZ6Ua9eWQ864PLTT0Y9QYsuNk2e1euv+W9f/3nf/5njqwuBTX1la8+44B7u/1hVtSndjVu9oSbpf2P/fcvgAh4PFE2L6bjQAlk8pnghOM4/uX/8hvKBuTSr1548aUrtx7dPPbQ0QZ6cfvW9kNvbbmaXGq1b928srP1imk3g9jsjfKx920VNJrd3uBAoLl5446futaRs+ff+saP/sef1bh0cGe0ffVGoxHt7WwV/X6j1SFgdqUCMR1lt+688lByLo6jNBvduXWoRCxljBx4B9aOOutrJlSiTAeT3SAK46h5/g3f3NvbQgRptFA6L1LUWgCT9zYjYrl64sEoireuvZI0W8W0iFstVFFCa5681EYba7QAn/rhBD0VeZHle4I9lc4YuXHidDnbippJu76yc2ugTVj6rIR87cQRZcKohs889aXldm3jWLt+8mg2zl3RIVjy6atbl146tnnqTz71e9/1nrdl+/mLn396+eyZTKj73vXGL3zi08eWW3uHh6P9fZGNDq/ynZ3d/njSiJaMSf7kS5++efEpY6drrfr2ziELmdTk0NJLV7avvnKrugtmeRbEsQpqwkO7no/S4sbt28kwSsJQaxXUgsLmxmihRJalZSlv9HbDRNnchbGp6boD2Ll9GEeJ8ipAIRWudFtG6Kbu2LJInU1qCSNZkLklo2LvkFimRZCneTYTRU6BEQJRRo3cI1BDMI3LlDWZUDE7IXUgEgCorXam/T45LzTG2gghnCEEKouCCBCcJq5rEQsLgmpJoCWFjRgsKwWVB19HEkGjTArWoAwIMoiNZiC0nhAXLBsijnrjIh2Ws0xAiRJbcU1oFSpVr8u0LIXT4H1Ui5QWDdHsj1NXZoXlUQ55IY3yGrwJzcwRSwmeJs7nWoQqECb0Llda5MwqjpUAyeTJgxD1ZpLaMoyxdM4zWbKSuCyd0iquhz4AU1oUGpi1JHJOIqAQ5LwADwjWW4kGtS6sA2TnrVQSmKDMKqQhdSiFkhRXRbsIQFw4ssgg2ObOEUohlPcCJSJAKDWy954BBSFqqfLCSkTyvsytlpC7HCUwgGchUHqwQF5oKUXUHxfkIAsLrXRRusSEUahSjzUBWjsSclwWmDNJyguLRgVhQCIzIrMmx9Ihaa1jNEE9aXWi5ujOzmA4G/fSfGaJfRiHDiGOQCpdOLLOxpE2Wox8sbd7M+o0pPWx0q3lpbHPXImKMM8yMgHVAqtELQ6gtGk5LiUNM+yNMg24cbQznyVXgzWP9sG7Liq4KwvhhacM5mQC5o6wuxtAfp14CBbwhl43/UfE18OACjwBQtV4VbmWeG4uQwCo2oQXMhwQ8+asCt0QQNU2VqUU8Jx4MCAKuLszCyxVffQ8BqjygyEBgphzkns/iqKSORHOQzQW/jLJ4HmhUqmwDiAgASMvZCxVcg9T1cJO828s1cJuFddKc8nM6+Qsc7ByN9qU5l9T5yxrodMBWMREL8BIdSQgkJFR4CILGwmYFp+LyIxVGNJcYgN3tUoVzhOiCgSqRoKwam5boC+sLnWa2+OYYB4GPZc1VV111Vm5i8nmFwhTpWjiBZ6qzhnNXXm8cJPNL6WFzqeSVVWR1YwCq9Vr5nsJEvOraEEWFxfF3FSIUC24L7RX87CiOVi6e7YX/BIX5pEFKvrG556xWwOvm1/45Mcef+D4Vy/dSicpkZgWW1pPNzfP+XF4/PTpbzzz+XZHF9lsZXUtt5RmnE5tIKfLdUmjm+nNF+9vrSvFtvQgODKRdU7KQII+mE5FMVhKkjQdedJxc2lFhJPb26eOLm/v3aa2ikqXXr/NYaNWD4JQXbpyo2eCRqudmPa0J9PBpN3d2Lt2Swdmb7gftyLURT1olOxaS0t7O1vTHDKbqTBia8n50MTHTh0N6qZjN0aDnWQaBMi3Xr1VxDDdGQS56Cqvm+7MY6euH96epZdEMXFTbWpJoNV+fxBHcS0y9XrzxpVb5cxqHXmS7SD0mJYuK4oxszzS7QwnZbeNN7evxLUOCYh0lDpXFBOXTkvnZBQ/8fWLeeoVaqEYlAQEZ73U2nEhvRMicL6xdSn96Ee/8RMnz4p6rgLwZZTtJf/s567s768vt1USjJuRlpwPx9YB5OXMFtSfToNAN5rNpmreObBlPmnGrQBCpUWgg95BIbSJQxGi0lowgSAUzudFVmAQBlF36ej+pFdrRdPBwXicJVFtVhS9wWw0HOepDUNMGjBJD5uNtXKSJuHSNBsZyc5aAtS17jDNvBt7PyihrCWx5MgXZe44biwHGBWjvdiY4c4sCJq1ek1IUWSlCXWZe2NYBSHbNImF1iYrnQcbhUYZWdeQzzJlIu9tbzDYvbnLR0TYkVES1aLY5xNpisHwoFWPwIdamPE4D2u1KGhIzK2ZJPXo4BZNJ/vTie+GxqdpNi1K6zKV3bp152DPA4W3Xtk6tZ4krVo6pkglTNOVrkpdceLoEecXN4NUnoms99YhokfHxFJIIRSRI0RPFEdRXhTWlY6ZBUohPedGawT05Nk7R7Ysy6wsEQUSM5EIQwAWUmghJYKsUpCJmEigABSohOTKj1ZGJpRS+kqT6UlpTUSITMBSaKIKsRMCKKkqkahElFLoMJxXWYIPtKly8BBRC8nIWVEwkZACEaVQHp13JAUAkPelkIpZWXJMjrwHACmEUkoLI0FKgVrJqpW+ygkqnbPeSYlMczesEAqQPTlADSyqiCVP88AgT94yCQAUEqvgbpRCyOp1IoRAIaowZkACEEIogQDskRmrxL25bZaYxfzhzkxMpSPvHXkmJim1QFlZuKVABFSVYphRIIQmICZEDIKYBIh5d6ZiwcRYkheAWEmWmMmzIyYicmx99WJhgaLaOQQB5AQK671zrnJukwOBUgom7wUyMhGzFAKEUFIQEAghmJFJCMFUBTwREwipqhgiIj93ChNIJbWUQkgBkoA9eU+WmXJfCJTVOfJMZWmJWEgkFlJIgSjYO++ZmaqVh0qDjMBMDETeM6NANFoJIZ13CKyqRgScv2r7h6XgiJfCGy9cXGtuTuCVtpu2WnGy0cwMHe5ms34U5PqbPvTu157++oP3dUYyuLnTe8ebzin0a6tJUVpN0pS01IgoTS9curgUt57f8Tt7feD6YOAc6Yff8HCnri8++/Q3vthbWW+1uvWLl3YeefTcY+95f3t54/DKDXTim77ze5eW13rFn8hi/9Tpuszt019/+bu/NXn4jf2lx4FrwBmk3hsD6METoEYHHNUio4W35Jw37WWn25YLwFWGxEPDARFL3rr2B//21g/9q3N52RemSRCYIBSADEHryNtqvi/hEcIVkJsZhDo69NBHOLQwYEAHdO2TV/5f//qVf/If38Hi1D/80Z8Is8H1Z5O//69/9jd+4df/yV/7d9/2ke978U+/Nrwza7bXm+vxqa67cfFJxfzcE78VNYpf/Z1P/NP//R/86fOf6a42nRuR169s7Rw71nrs0fed6NzpXbn0xS98/Df/6DNrG02xJr73R//yz//Cvz21Ofnyp39vOhief9t7RuPJ3/nJj771Pcf/l7/x3UlnjeEAoQMwK2GmQTtUGhTByEEWQgchK2AmgT00HSQEABAzKA0hA0qW7AA1ejAMXQKB0FJBgtBQUAA0AVeYV8iuCTNilB5ZN4+A62PUMHETVGzLjoAOlQqFQNXw1dcX0AgmAPLlMLXld3zvqT/8zWfrKtaRnO6lMlPLa7X8hRcNOqMhrtlOi63F/TFSKYBYKhtoMJKKwkpUcQ0d8/Vt9eosbcVCDoojifyNf//9h8MrN66Un/2MffZr1x+O4Qfff+ZPvnbp9vVpKRO12v36c9epdN22SdrN5aVktPVq4Tr3P/y4SVrLq/j0178wmmTHjhyZpoRdr5bTaCK0yXUD9ditbJ6q7oLd2/vHT6zd7s0eO3+qvbb57OXDdmSIjMtn9VC4fDib7MjmxuaR5cnuxp29O5PJrCAM6joOY4lmd6eHCJHG+no3kIwtM9md9ndHhcsDzY1GdOrE5pUrN4X3Qog4rpVj11xZPvbY6fGdQauxabPcWehNDixiFARxGEaRKn2mlDTaAEkVqeWl1tUrt7qtRkcmiFIE4L1NapEthdJSxSo8tZZND379V/7lh77//9NqfWgy+UwS1YoiZRaZtQ1K4rg1G824yJbWu7Yk53jm7Eqra+10OB7qRKkwWDt2UrJy6UCbUEp9ONsTIErHfuaIREoYHz96DO7vdNj3+mlDCTRlBrEOEoyax+o/8Oc+4gr53PWbGqKyTIOW0KDthIdpqWLTbUQOUTbDZn2tI00vS0d5vr7R/pYPvvNPfvM3jrQeGh7cXFlZE4bjUH/xs5861l4d28H73/WWZz774u6s+ODj7/vd//orpceH7z8jAbTQzaUkG2x9+tdezm/Plo4due/ciezWVRvFiE7qwFonrSUJrWadWB/hswYaO70rKEVneSPPrArjvMio4CTSty48k9Tj/t62CMPu0QcOt8dxHCfNJaHkaNIXJBv1pXF/EBmplTexzgpPPh1P83q7Ph4f2EKJINRRXZtGNhkN9nbceESY1lpyMDiwZdpejWezMblosJetLnduv/ZykU7DTmMvF0Q07Q+y6TBIlHAqH5RQ5Me6TRTUWt442LoTtVe4cXJ5fTOoj/v9q1ESvP1tj4PyTtDOq88NRjfMyumldzz+4FvOH7700vb1Sx7yet1ef3W/Vm+3lk49+sD5Dz/0wC/+/M/PpgMRxjZFm3unbUE66EZ1iC9cuFHdBaW1LKC93KGxctORLYvISMkYSlWPg82j651a7XCSHfQPJrMyzTIdR8ZDqIwbw74bZN5JFXCG9SAQYRCHWnhZT9q+pLIsyLMreZwVY4K4vt4/vJXlYe7KaVnMhjPwXgoYTma1KPZIhMK6aem9Ry+E0iCKEiwjgyYnS09FqdiVAbMmJ4TUgjRiLTICwdtCsIdsYmKKosBAJh1HIUaNmvWls94Ehlkwo5IskwTD5dJlRqDgksFLYeL6koS4bZZ3br6SwkyFSqpgVgipw1o9adXrs9losH8oTVCvJyrQUoq09MPJTApWWpnQlEXqgfqzohZEzbiugNKiZKC8LA2jiUKUqMgRF0oosM4EkWVP3hsphQdtAue8kgLAc8mVu0gKNEYTo7feO89E3nnHpXceJQupUZCUrBRobVBpVNIxIbEDJ4nZe4EglVZahRx6cFIQeC8QijQHlxlpmDiQxgsovfXOSqEQ2LFXSiGjUKilmc1yRxbIhloJKQSilNr6ogrFFYDWegCfEVGe57LUSkSRYYAoMDpAcIWSjBHWAolSzpwzjMDeCGThbEmtxlI2G4ETAGWtUQsJpvtFb7vsD3IhZL3WKgv2jjyQFSLL7TTNBcp8SgJIaAjqciZmNM1EgXStXwKhko1aWEtU2DCAhQRGYmKfZhNQ7JkRA+do72A6pxLzAKKqvqoiEfNcA54DEJjjhrsQ6e4cfZ4HdFeKBItw4soUdW96f+9f4T0PFSAAVhUpc4nLvT/uggeYkxQhkKmKU7q7uXv0ZJGLA/ccbwx3lUQS50HTcwyCOA8ngrnDbm7KEiiQKtzEUAmKFq46JCKaoyIAQMFA4q52pjpkXnS/M1BV9lWhortOs7uuJ54b9BgWOTwV2ag8Y/eERdWo85x+4T01T2Ud47unByqnAFRtPXOcwvc+GQAXW0Wsas5Q3I11WqiD5rsCgEBMVZQQv+50i/m5vhsyXo093NttIGABDChoIfxBqAKPgBChgmiLICS+C3ERCWixEzy//iq91V1lFP//2cZwcX287vrixZUwV0jdZZRYUTB4XVjWwqt2FxW9571vO3ht5w8/9+K7j+sfOH/qyedeXjvSFaa9tzsWszvvWH/bxd2XYfuF849uopdBpLyzG6v13sFwda1DQN1O4+b1mzycIpV6jVWsbJkhlo1anOVZK4huD8YEzuaZR5SCFablbEjpyGZBqMNy7JfXu3s7RRKvX3rltaUjyoV0fevafea+lc3jLV3fvT7orqyXeW1vbxhovRy2RpnvTbaCTme1uyITVJQuNSITwmDgSutnk56zg7CxCUsd4wt1OCLyR8+uvLo3BdC1lt7YCMaTUSS2j8RjA0FOpKWe5jZHGdQSLXk4ODzsgQXdbbVNEgxmWRzXprOSLfjSK8/MRSbc5cPxkdUz5TQNo5jBrHbCLB/3iox1kOfms19+GUEjeEJm76TSJIQ0MqnXhVahWTp9/k2zGxdOvNvcuPzC5vF6q9OiI0d/6T9f/tI3eP1Yw4Uj4mlgktXl9rTXCxDbYTiy3EwSEcRC1fNStGrdRnclpGKczuJApkXBWUlWBMpIpTPnS+uiIPQSpdbatEIBw9khF2ls5IwoFKbZ6aBAay1qEbVMlg1LLqyD29eHDdVQNZtNRrVaE1Ux6Q/K2fX20tnDaV9gDuxsWTg0AEEjlJNJyUoqFc+yVMjIkyjKQT1qCSG8D4JQOc7zbFYzqkzduMway8uTvTGO07iuaDZa6kaksLSaLV684gbDg2/79jOjfOJtGUDUqm2Usw46mY2mS82wDEStkRxMB56dJrV7Y8BKdDbb6a1+d6lWggTZVGk5GfeFMPlkmMR1mEGZCWoqAN3qqu3tK91uMhrbZoxZMQfVRAQgSmets4gYGCONFAxaKa1kWhYA4OdhMVICMXPmSikkSoECBWNZWkfWes+AErG0joWQziqlvHNCMgrpGYidkIKBPVlPTioVasVMTAIQmah6SyAqwcpDgQhaGi01oXBEAgDZVfe7klIKzCupL6K1LjYm0NqRFwAI6Jyz5J0noaSzFRAhRCG1IOBZ6YmFEOypRADvnZKyemQopbQQSlZF7BYQBLN33jnPQIBIXjCRACGl8EwIUkmjhMKq2AKEFApQUEXFCBkRPSELpVEIoaUB5IqXEXiU1ZMeidiTJZ5LkIDJE1rmCoWAQO/dfHGFgRicYyJGFOy5yn725KQUSkoUwjMTMgphjCbvJYFELJ1DgSDQWl/9YymER0IhJAprLTAzsvXekieiwpZqQfqEFIRkPedlYZ0HAKUECq70TUKg1hq8qF4gUklEIUAQkxAIhJ6ImC35SlfsGTwzeSeE0FJ6vyiiIAlKIqMU6LgKx66Chahk8ky+st15sp6EFwCQGO2BiQiAC1t6YCEFOq+U9Iy2tN6RECiF1EJIhQBspEJEIk+MIOZ3QVSPfST32b/t7FE1omF+Ry7R9YtXTnV8MS3y/bR/SEfPfeefPHnz+eu3Vo+27/T3mq3oofu6vZuj3evWAbWXV4tZebi1qwLxxkeXTp46+//84p/27vB47yCb+SCMZzcHePao0zZZaY2lT4wKtP7W7/7Wh9/6LpdPP/T2k79x8RuhURv19VXRsW7ss1F9Ze3ypd3f/0bvgz8QilpOEXjTAFZBgOwzKjMVJSyYyxlJEPW2gRpB00MgcYVhSUBooetZGjSOLwUdgOYRDZrBWHcolLfQFZBw8G4DIoMATSx8ICBgSBByhBihKIEBNj9zEfZ6t3/6x/5rr3cgG+3v+rYPfdcH3vjP/9Ev4aTH27Fp123uP/zDH/7MVy/9s1/519/2gW/dPpi++aEj/+v/+VMf+uAP/sf/8hMd5b7r/R/8+vbWI48/9Nf+6l/53u/6Kx3Y+Jt/+990Vzv/97//D3sHBw+9+URvb7B7vffPvvpDTM6BS6e9qFnbvXqztzN59Jve/fufvHD12f/24ANLsjz0wi0th6PhYSHKVrcVxAWVo80TRw93htPe9UceP3n/G86wXq43Go5rGu8H8AhTW04BYsRSgC2hrmEdoAQgCTWEJsEIqaag4UUTjGfsTcZ79dVTBI+VclsgM5zwfEeFLYHLAgqEnEB6aADUFIgQlhguqvA0rGo4eaMbz0Q5QxaNlSUtVaCL6fat7/y2etBKTx6rv+c9R6/enP7x5w++cS0d9MnaZJr5uuG4qXxpO0s163g088KiqoW1qD4ueze3Ly2vi/vP6Uff/ZGf/Zu/+cQnbpxeD3/oHSf+w2tPZgfmOnI/VQyyoZtXrvT27+xvrqOf8La9tnTq+LH7Thw/9fCLLz2Llg93RpcOb7zlzGpv3xmZ51cnzfoShVl1F5x4YMMR+dJm/TSzt0ZTOH38/Nu+6W1//NvPHxwcdFqd7eu3astm+ej6G950rn5dXrl8+8r17ZX1lSiS46nb7g2PLLWIKE2noIByWWaDTrc7HUyZqJz1KK+hS6eTYb3dbK/Vbk/vvPLSE+85sfyBj3zzc8+9ePvGc7FBB5hOJ8VkVAZxHEel9RJAEesAyrQ43N2LtBiPp0GUtBr1Sb4znO2sHD0pWdaD2v72oNZMjq0fe+GpJ//uT8q//xM/+Nf/9i89/oY3ISprnctm5WSymjRnk+zh849f275Qr3dmk6zTaJMrwxiUaAAB21xqA9Zr0GUJyUq7HPakdSdPb375y0+0o5Xl+EQxS+5/6G3paHTyxMr2YPb8116tRQKEfeSRB65ffdXeefW1y7fe+vYz3/ztf+Ff/u8/mzP1D9JsVngdhsbUdJBlRRKao2udgxu73/Xnv+PW9a8fHkxeePoTKytJu11vdmWj0/rG55/aOPNAwnUTRcrjn378k1ER+8HsyrMvnGif7nSX7sxGqomPPXLyypNfG20drB7vHn/H4xe+cWlrevDBt7wt6w2jbpMDTY53t4eJkn0/Qhk0O2bn8FJ3res9EKtG0kZU5WwM3o4KYjcbD7Kjx05cvXzz4MrNWnN9OhxJTezQOyImxz4MtAGY9Huj0cGpc28ejnoIJk7qYaORTTLyKXgtFchQg8PVTvPK9RduXdlWHKLP0oEZTg7uP/e2jeVWNtuVcS2zs8Q1rJqt37e+d3VrOhyWE7BRfWXzdFDaOIq/8fXnckHdeKmgoCh9kGZJ9+i7v+8HPvl7f7B5+lQxvD3uT7ubS7NJzjs7O89/vX8wGt2+Mxn3l080j588lll3/4mzGG7u3LgxGvUCm6KBkw+esr3R5ulTp86e+Ojvfy6f2aSWXLp8Zz6vIHKTNO+Pa7FudzvNg32vZV5AOskD5GYYqUZT6NpwMEVwJZdGqvFoEnfDTidaaVLmrHViPOm5PAqjuiWBOfVcT5LO0iLUoXMMXpReTEf5ZMDDgzF560EIbwtrITCBriEYdII9EYEOQgRvM55lpVbSIPoiRZbWloGQoQ6ktVwUIYg4EQZJklNCYIAeJFM91DaITE0Z9E4HsdRaqdgJqtw7QkC1ZKU4VYKZhWaaTSfWQz4aZFaJwtnJUHhf2NIJm0QtqUn6VAIkgQtWajNbREEQJA2lIC9oluV+PJkiSC0pBUSwJHxcHzhZCwKPXBapRAyERCADSqGxlAH5OIosoRDKWQIRlmUKwlfZyUIJQFFaj0hCIAIagRwYIs/MxAU4EhIcWZZgUDsPUobMGkgLEUjBhZ8JdgAghfLsBIFCCcYQRkilUoKBFYcgpGcoyJdkEZVUau5MQQQCJbUUyllwXFrnGMiRR0QNAshppQAEClSATE6p6puPdA4GaYkSVGZlWBN5akLRiKUCZ51lDdblJftCOS2pZI8+FYEZFTPW0nohGRvG+Cy7eX0EOXiUllkBp65wjk2tvrS01JseNqLE5VZJDcSz2STrWTeykdZ+6sE7IYUxRFI0W6HnTFOiSsxplmels6UACURFmikU6aRYzLcXE/C7GpaF1wjhnmwEFvxlPtX+M7BmDncWATu4+EGooAvOlUvVLH4xVUessj1RgJyrRO6Kg6rUmTk5mBePVSinUu8DMlfJDotdYBDzDONqdiEAWdx1V1G1V5U4ie/JTaqyLDFHRVXaMvK8cJ1B3EVDsJDqLACIn4/MwsO1GB2gqu4dmNlztfs4n4AxA2CVW4Rijleq/CdEIajydy20XAsCxK8biipoqRJkYcXPYK6jmTu25vosZqaFbYyZuVqunUuJgABRzD9EyPmzcKETw/nUg/luUjQwYhUTBHfBUrUoOz/0qgIPkIDlfEyqvwFaXEUEr3fEEQH7u8zmdVq06gPmnIjmdjOau+fmeUYLKITzK1MsrIXMVbVadarnV+ZCHwbMYmFcrDjm65jTAhW5Im10zdkHm4cH24fXizc+tPbi1l7cyM9snJDZ+otPPzva7t149ckH3vxeVUum2ayY9nSncLPBYR6LxtKty4cvP3/hxEpbgh8e9mNds+yUVjIM/CxP031BWBQ2CNSR7trOne3h3uFwZzgZzS5cnzx03+ko0rrW+fJLr55aO/boo6ev3fza29/z+Ksv7ZMoDnZvnL7/je1WY//2rb1bO+2l5UhoRBEFMk0xYbSz6WQ/r0etJAy05hRzlUgUJh0NV83xotvorkY2Hbx2YWutczYuEhmVUk0Fpcs1Mzo4YJBkeGltafdgJpy0hwOislUPZpNCiLoMtGMfCotc7u8dMma1mpAmRCfHkywvqJRilkLLdBTQKJ9ZO81mE088GRd/+JWvl4UBJGIHhCiEFNJaUlIGRoukvhzVa8NL7zhf/rW/9pa9l+4kylz46vjfffzG127qjbU108wadVxe1lQ675ZJGUtKhcFKTd3e7wVR1Eya0+GstlSvLUfpYFtTXM4K7znLyylTvcaRdO1mPBsX3eWg1qgBe1UMp2Xa6nYyKiZZESVhPWkUVqAQ3c7qzt71GI3Piix1Sb1VFNHIIWEWJMuzPFXaOrKUjwaj60JqY2Io1XQ2Q5aRrNViBZpnsxmBLFJOmm0PYjjZLx0mSWisbSbNmTXjbKC51FE4LbyOO+wKCIpwKRmms4QCKm1Z0vXL+xa6exPc3S2kd87OlI7Acmzi6axnEZIkBwlFdjA+3M3t9NxDa7cvb5MVtW7t2LFa6WGUT1urXWn0ZLR/4qGNw0x4W5fMFAZBpzMspkux6HYjGUjP4XgwmhXzO0JKQQxSgNAahZRKEYCUc5tyrExWWucKFEIKAUQopXVUeEdMQgiNUuBcmCkQjFbEXNpCSZEVuUMkY8AELICYFQoBSIQMylOl7BAsVFGWURwjSgSy3lcyHiGElHKBkMDZOUkBkLKqr2IOlQRGkNpIiQBSSCVlaZ33RMyMSCiMCQRKpZRnds6ScwKFJ3LkGSsWBI6ZySNiqAwD+OrpQiyVIAQQ1csEPdE86E/KanlBSCVVFQQOEhURS5AC2HnP3hFQFWkngOUC8ogq8Y4dMwipyDsG9OQdeSFktdAHgMTkiDwzAipBAirBlnfWMrO3lqAKDlKFzYUQRhsGdp6YvXdeKBEYA4sFGu996b2Yr0Mgk1dKM3NZFMQMAM46lJKZLHkpJDFFQYA8V7ACkbWOmNh7ZhJSZkVhTEAepZCIKEBogaUrquq6MNRGyKLIBSEzA3PpPSMyoJZSIgoQHjwwMQspJRIzIgjIrEUGCWBdKSTnReoZiMA6T0I4z85ZZIQq+JopK0sPnjyBAASUQhJRYIKiLP284wwYqkITabQprIWF9pg83V2GWlpKCgEXL+yGk/ED7TbNUlQibK7sDVPvMmXC+lLzwbef+9Qnf33t1NIkHdeb4dSVL7y8dWT5vmPdB3qDdO/KZQf7tSSQgtOy/OLnXhG6ESflVJYPnVm7cWV4/db+X/nJv3fht3/LCfj6U0+eORJQaf7jv/jlM+c/tfXafrwUhlI+++zOJO3++M//k5aeSggi3f3iczebm5f3zP9I8qtK60gba42Fgo2TEVguHQtd73poSFh3EDOsKFh1PJQYeRIo1iLUTFcuvfD8G77rgRTqDHcUECgHkDJkBNplCFHATigJQKV3QqqYINCwfvv6xY//7ksvXCi66x9YaTw/zPfue/zE7ghGZu1n/te/OAQKVXnhpWt/8ulP/tS//Afvfdfjv/kH/+PH/+6PvPOdb/3I9/3If/n13/zCs6/9+3//j3/9v/z6jWs7f/jZr+pf/eO3PHzsxc9//eTx1ecvXWyYaK9/mA5mtVrt6ku3y1w+8tjZD3zPD/7Cv/75v/AjP/ylP/qN1eO1vdsDnOil2ilIL12+jcce+fYvfv4PO0e7R+WJ7edfGRfbjl3UruUT2Qxl4bpFpj/3BK41rlv75Xe98/z6xkbQ3Lp1/SrKw/vOnDn7jh9isAi59H0l90pQCKWFTEIGIBSGUBKoQuHM26366ns8rJdFXallL6TDjsR19rOSpFSKMfHQUbChQV1/9guXLn16fHhwZHPz/JvOhXD9g399w9JsvJ81TVeHUTrbTk7rN76r/thfXuVsJ+9dfef5+kNv0UIfe+KTt77xvP3ci0Vexs6BjuLRtATvYqGi2MgSh8Op0vDLv/Hcj/zQmdVEXXry8+9958ZzF/YP0jTeP/zw+88ejOGJl/YHM1xaqY+vDzX5lTUdtKabjXazbu9cfya3LTLZyYc729cPVs+3fePE7V2fu04ib6LJJrPUDCfVXfClb7z62MPHV491bm9t3feGR0+u1vduXYkC0Vpa6+/dCcOA0Woj03Emw/DIsRMnjp/2n3qBRDGaWUR1ZHlpfWVtPOx5dru3t1utuBaIMyePOp8Ne8P+cHb71oBQx1HrcH9k7VUjxdn7jtx66bnZ3t6d3YNxr18LRLMbLXWXtq7dYnaz2SQMIqWNJ91tbwxG+7PxgfOlMsms3HfjmYnleuOo85yl1mitgxYLCBueo9k//qc/+W9/7hfe+a6P3Nl6eanRUYFaqh/Z3+tP09uxMXcGh5kHw6KztpGOR7NJGocuiGoo+LB3WG82JKkgiWPTGB8MmzU1HWx/4g8+/sRL137uX/2XWX9yu7f1wtM3CIJBnisp4ig8HE3EUiMldXTj5Kc++8xoOn365p0vvHDjzH3vevHFz0ktVCxrkY6MXkmaPTuqKz3Y6umo+cef+pKepmlGeZGde2DDOeiVEnJcPXpqkpuytrp69uxLf/rEiaNLDjgU2aVXX9zsHvciv7I/vP7SxaCenzp3jLKMBR4O/X1vfNPm5lEeu898+g8/8gPfV+QzrQwKK8OaCXWWZVLKdmcVUFpb1pr1ySiTGoxGGeNTX/3KmQfPx1HTmlp7fdW5aV5ObZEjQZDUgqRRlq7Mp4giTpJG2L5w8Yvr923Wag1i7cjneSGkYHY7N2+0VpZqnc5Q3H7pwhdvbn3j2OmHHnnkXTu3tnr9PMZjuVsbzLLe7R2sB93uRrO5MSmm072daW+r011mkknn2OWt/XbsJ8Ni+cQjj7zprRef/0bQ0A889hBZe+e1fqv14Pnv2HAFN6bhzRufOfeG04f7gyQRV559KmouLW921MAfO37f8y8dsjm5svbYU08+qXjSH+0brUa98fPfeO0ND50yq7JX8Hd++MMzyr7+5BO2jXALAKBRD11apP1phIlRYqXTnO0Pi9K5mfUKd69tlZyqehus54KlFX48bani4Y5817s2z97XnRSzaaovXhvduVNNSTQiZOk4MDUZmNRSFITKiOxg2B8N3XjEubVFrk0QKk5C46USxLFIJnnW0AEiullRq8VWyVAb9gWljryPEuVBpJM81L4VSKVNFFCzrpRytbguJTgHVgeWwsIWwEi+iANDSpeMAoJCeO8Lb51RWjphqXSuj0ICArEDkNksHU1nXsVlOlMIJZP1XpKyZam1tnk+HTMiaSMD1jbPp9m0XQ9rhhIFE1e92zGpJUoiCBXGsXN2NJmQp6Y2GsFIacsZsEfQOTGhBlIoFCLrUKfWodKFs1qGgIq9NCb0XApEI0Ni9t4KIRDReQIVBkY7X0ogRuXIGikRRVlaY6R3uRfeuQLYgpCEkr0VzmkZIJFBwaCwpLheKyGzngtPSqJSSkhNHrzzZe4YECUTOyTWStsiZ186sgRIUgAhM1rrpZACBCA5VyoWKPSspKzqt2UwgovJVJdWR4ploom8AaOFQzHMZzP0rUYSGaGlZFaFNZa5sE4LNelTNs4mBwV7V7gySkJblkbrPJ2l+3uUTdrLzTBUhRKlcwq1gCTLRyo2SgeElA6noZDT4TTUYtgbJe3Q5lNRazJHszx1JZaFCJOOL3MjIQjuch68p9aYR1zzHOow35XCzOOTYaHY4IXapDL98+tIzxwHzbOq4a77655UCAFoEQl0lz7NHWRzooTVZ1QupIriIFeY5V6AMjDzPPGAF5te6EvmAZsVi5rvOEPVJjbnUVh1rIvXHX4FyriKGVrIbSpsQvOFzoU/C5gBiRfB31XTD1SZQVAVfM1TgqrfNB+PapSYXjemi/8yML6OyNwDYQs+QouhWozcQhmDCz0Oguc55quMdouxnVu+5tItWAi7qr3FeSJVpSRa6HoEva5ujOaGwmqaM48BAiJ8XSTQ4vAWKqtK8CMQsBITVRFFAACemSqiV/01z8/BPMv6LqTCBTRjmEOke5CS747L3csShQCxOLh5YVx14S2o1AJT4p/dwBwVBfGs1VWP6eV8MpsWfmVFPV6rH065v/XSsU5ejxtDqpXT6bWvP3nk9BkLfP7s2otPP3Onn1+4OZqR+KZ3ftfRjTfsj/cD1WZmpanV7Jqg3utl00EZahFYnpSQAR/0hsAYiODUg2d/7/efkLLV27fKiIN+dnl/MJq8sPb2M0dOniXqrh5v9m9cVjsuDQ7HuTTxchit709GTA4zSgtIgnY96LItlxrJeJL5MUmtCgtRHOdW5JNiOJwcjHzoU4+hLdaaycm16ZVb1y6d3GihLCj0jbZhj3laTqf7tUgL0xhOijxTAy/q8RJ7ubZaP9y7PRsWgTDjolQBN03dWT1Kh1qLlSiwoCR7aZQtZtY7z0Xhs54th5kYDFISANaTRBQyqDXY8cp6t9GodxPtE1luX3jPB9/5oTceGd28JFyysz+5lh5/8mW7ebSLQWldcawWRoqZimE60IjN5lKvL9NynNQMAvWGewI0h1DaKXuOkpbGsixG7XYIJrq+3UtK3WWu1QIUTqhSklehjmtJkqgs83leOoBJ6ciBknR4eIspkxSHYVO3iVmGQUIIzCAlkp+KQNSbSVk4RzOJYW6L3HmBoCXkbuRSawE9OSmS48e+6fLurFYzJuad3o1jppOgAq891bSUBIdCQnu5MRmN0gmxEKVNS6t296aESoh4NgstgjG5t1MToRBuVkwLP0bQKEB7DFSZpqNABvloP2mEo3QUt5doGk2GJWEgVYCqlk4oH6VGRaNettSM9nbKfpq98f7z9ZYpRjbNxsJ7wby63tq9vt+s1+c3FrO3TimplBQoS2u1UkYbLYUrrffE5AAQhSBfMkJZFIjCExNQGARaaSnJFWRMKMkDETKFUjOx9Z6lAmJb5FJgqLW3RIsAOSWVZLKuRBSBMczM7CtULJiFkEQgQAgpsjJ35J33lbDJAWS2ZFta5xEhCDSDEgjOV0HPZMmzJyJSSkkhwiCsnlzEQFqScN45ZkbkMAyAJXtHgM5bJPa+qB7kVacbzHPrQAt0hI6ZHCutgEkbI4WSSlRUTSCAkAweCASIuWJToSeSAoUQSihm8kTzmjcAQPSV8ogJUMwbu8iR98y+CvUHicDsPSEKQskMc8kSCGJmR5JRGYMggBgFeucAgQE9sfVWcCUy8oUrnSurTCSJwghJ1loi65xncp6h0uYQO/ZaKym0VAqZBTAwuUVtGAAgEwJorYHBMyEKLZUtS0bwxL5qxyRm4CgIyXtidtZWHN+oAIi0UMwkQEkpPFc1FiAAmciTB5YlkbUlYRUdzpVoDQV4YmDhPUutGZHYFd4isJTCMaOQROw9CRRaKXAMSualx6rGFZERhJLsCUEIsXBkAwDAdDCMW41sXOzuxzItvv/737d158qFZ65yIQzxrD8+9sgDPty7efuJjc3V08eO3diduunQNNBSnsRLD9//zksvnTyY7Vq49fLzTwxns2wGAI2olmAwPsymYR1euXT1//m/fu2t3/bY5Rs391TkJuW163da7XCW5UfPnVg61mifWD7x+Lmvf+3iSbX5Wn/YDLsbiXzm80/tjp4Yv3/4I3/jqKMhZalyuSAHkVem46GBECIsgfVC50yBEOu2WAM4Z3UohAZIGXpaNE5/6J2B+oAFjxALUAgdAAGQuyzV8rgGSSoDPxWmxuAAdAoqhDOf/+Q3bl08f/uZ4PG/+t6bnRv7e9uPPPimr//Xz1y59N8fe+Dcd/y5R0+dPv69H/yp2/0sXnrgh/7S//TA/Z1hv7g+vkk3Dv/RX/3bP/QT/9uJjXJnMu48tPJ9f+VDXdPevRbu3Om957u+9c0f/rkf/sj3nl5vbD5yDJNk/fgjH/vY5/7uX/urn/pvHz16Zv1zT/7JiYdPXr11c/vgzic+/sTP/dt/ifIwL+R/+9VfdOOsFkRfee45P5jc/9YzF567cPT8UZymt/eKWmdp3DOQ1frbkzOPPP7JP77o8ytp8UrSSGw5Jbr+0//uz620J1/61K+9+pXPHV0e/sA//JsciEbSyWAXbN1CU6uYAiXKEfDYMwkEgV5CVwAoQoWaZeClZugAhDEsP/2xT37xs18Ow4fC2rtLP7zw6oWzj3JwWtGOg1LVZRtjUDoz6ydABc5bAAJW0foKKAogVXLyLX+u8+0/0v6ez+587H9Mv/wMT9MaB2GWWyNtrLUxOkmC/ZH48sVi6xde/Mc//faNc83meprXG5/+zPbBBRUKKTS0l6Nbh8PNeH11efOlSxfrtcbl24NJTDvD3TAOr94aCp1OYa8XjvrFnjdHhj2c9Adv/t4HtraemQ2yXM1Xkp94+mZA4Tve8dCR+1rT0k56t0QYCKo1uyth0pzOBm4nP3L/+c5q86A3290ZrS63vu8vfsutnb1xf7pzbVtQuX3jsjbRyuq6W5OoiXx2fXfLhGplY8WYOKV89USnv72/0V7O0+zy1e1ef7x+dPPlC5eOn1hXVBOSewfjSX8Kvmw1jUKmYlivt4kLcgdgyzAMnQu6nbVX7ryWjcs4qMnA1Op1gUWeF+z8tJ8368Fb3vTwU08/e2nn8P7H3vbcM59rtjvpuIxiWZS5DJPlbuMLT3727W9+WzkZh2gaSV2pxpHT63u3bozHvVrUdpmflAcPHn/LOKOlWqh0+enPPX/7dq+OjcP+zp2bV44/eObcQ8d+67f/++bRxsHtQb93qIWaptmdW7sqnZDmUw+u7x+MDm5fLdudD3/g3b/3Ox+HQLMjROgfHBqNZ+8/tTdMD7Ly2p1hV8Zvedebbn/lTx8+f8KNp7XmUpjEp88fvXJl7+qFm3vXX3n0kbVGt/HJTzzVXV9/y333P/XJF+9rnTy12l5iPnhh67a7FVDRmJFroIZwvJt9/cWnP/zhH+TpGAUIYWsReje2hQSCwrIQpvBEjibDKTkGpqxM9w6unbzvIVfWoiDZurazvr5k9NI0yzpr7XSwi56dJSFUOhnL0ByWo3qz8dibvntv686Jkx2Hys5c1YyaF3m7E9m0f2vnzp3rr5w4+/jZb3rLeAg3DwyazbWzy0ZLjcXg4NbasSNF4SVGt7fuhAGVbpokxoQWIPaOZ5PpsY3jSaNlMxz2uXPsQdVYnw6nLs9rS+tW2LIc33nlax06OP/owzs3r/V2RlHtMAmky0a9QZnUQ/TlseWj1w6Gz3ztyWlvp90N+oODzVOrPoJXL+64XJa5AKn2sv13v+utjzx88nd/9WPVXTA4nNRFeO3a7bi2mcRqaaVTW2q+9vLuxpGNI0cbtpzESSPutlw6ea2c2nx2ciX5jve/8S2P1nWNlBL1Vp042FjSV5fyy1vlQR8YAoMRFTRLh9aK/mAGQqugFpmoUFMw0gSh0LzRas6yfDScsRVFCZ1a0IqNCbSn2EE4Hk+FzxSgisJ2PWm0o1k69i7CrL9RlxIF+xIVyUCTkUpjJJX0FDDFtcQ5L52SCknrskwtY5GjI0EoPAhJ3nkPREaKMDC+zGaFn6WZ9T515H1upLJEUgkjBaKXwEKqIitR+txCySClY+9nnHpXss0klZ16mBKx0AIhjOLCS7Z5mmeREOzzIInDKBQFZmkhhTQyIsAcwNtSKM1MjokIjG5KlB6sZWJrtUZkcj4FBvbekbDOSpRaahSsZOglogg8g5LSu0IyOmCUXBYzZDZKO+8IybOPA0mcAYMtfRLEtWZTGBUGEasUJpMIXRRIKQQxkEdI4nHpS/a+KC2xp8JByejIOw9QWhDeKtRKa2J23gKSFAYR09IBMxEQIxEnKBWxLzPOYMoUt00QBuloWOYsMQkCANbIyB60aikOgVIjvVYqLXAycyoMBoeF9+itk4KEckXBs9wX+RSEjmNsdqLV9dX+YKbDIMEIpJBSYRub3aQsSzMLytKPChmIhLXMy4kUCoU1AaQp1lqt0k4FcFSTf2aSzX9m6jxXb9yjCfNE4UWYMixyhxdA4/Uio7vCnopKzEnEXeYxNwkBLsrM5p6yezKZ6gcWm6n+n2hRnCbkoo2NQYi5OgfmuwWMFRtCMacqdz+C74mOFiIVAAQBlQIFJcDc0jX3PHFl4ao0OlSJggAW8dbVxu9KbOaCqcWsh9nh/Ch4ATYWLjCel8otBu1e+NN8yCtUx3B36BYA5fVcCRDnldIVWVnYuODunjHMLXDzs4USEAmFAHE3vvqeZqyiNQvj1/wgaE6Y+N5hM87r57kSOeHdvec5v6kWZgUKBAFz5RQTMgFTlV9eicjwbsb4olVv3hFHdy+teYZ1pQBaRHUsrtNF7nk1KUOcy8Luit6qheJ56FU1m6lq8WAevC3meeP3UNHerdGQ/XSWJw3TXI/NkmskyStX8sPeyAh9+8aN5U63vpR0umv9wfTGjavpePfy5QtCtU8sH3c+2kg2bmR3alExGvQ22itaQ0a1wczX6l0div7h1tKRNZ7kRckg/exwGAdR2pu+95seTATt7ffrkBQyrbf1ZDqrL60eXnSFdHGk186fGfVgb5LWOp1Q+n0/y7IhSiAhQdZZ+YPprpZw/Ng5GAYM6Xg0E4BZlo8m6eGhOSFPHV2N+ldnyNlKM37uyS/GbV5bbQVRbTwaQum6GLuyLGyuJAjEWT7xEupLrSzNTRyRzYfDfZSSZSi19ENbzvK+DKTwK6sr2we7oVEShbc2K/psy3oiyegXtg7GVjzzcs+RlMhghAfhhWLCWIja/5er/wy0JDnr+/Hneaqqw8nn3Dw57u5sTtKutFolJCEhCZCIIsjij20wxgYcwAbbGCwMxibbRiaajJCEBEI5S7vSanOasDs53nxyx6p6nt+LPndW/s+Lu3tn7j2nu093V9enviGMe/U6bF/87u98/bv+0c8gjOPkmXJzE2Iq/Liz0ti9uwOYzDX0gb31laUwMlAWXpEKooY27RgCmJaBQF5WlNGX6UasW7WWUkaUMjj16WAb82h3q1aPg2a3Pb9YR++NIfYloQi4PEuI8oUlk+VegWSJjSLjfd5szU8no7IYx4FeWdw1nEw9K9Rl4koEFiu5d6I0KDWZTBUqEZqm4yDMSygX5jo+d81WXA/ao0nebMfTZCJSNDtx7oY2aAiJtYlN86CmdNQcDqYutVtbqdfQ3tWabvVBY70XlSnqEDGdEE+zpJ7lEuiag2hrMLROgwqcyIXVUZZNO+2cDPfHo3mz66kXz7365nsvX3shaNasxcCEi736Znk5ClujZOKSIhtLox6A8z5xkZjp0AUQBQqySRk3zGCSz4BpYLTSSikRJsRA66rajJBQKWEmpYBmFWPOO6c8AhGKMrpC6wiklRZmZYyIZw9AhAqRqBSrQVU3ZCfiRbwriYhQEZNj75njwHgR8mKtdeIDE5TOk1JhEJW+AE/OOy/eM6OwY4ukq0xfY7QgIilEdM459lqRACMCEQXGhCZAEIWIROKh0j85QIcAhEqh0QaFKAis99aiZ+fYB8awZ89OEzGzgATKaKWVeKWVCCCRQgh0QLOKBGDvgVDEI5AixZ41garq3kgQRcTLTFsr1Y4AglaakJQCz8w7SdYMgEqxq0ARA/uq4EuUQvAAqBRpUoqU9ZZAA5EihSDCfkf8WaX4MVvnpURF1tqZzMqzDgwzF9YLSG7LKk6uKK3SyrFXpEkrFInDCIVDE7I4EQFbBkY7rm7cXpNBRey8ALP40jICsOeZWlvEe+tRAgqkMm4jEilAIkEiLSCAJOydZQYhgCrmG7wgkWWfF7mIeHZaaU3KSkGkHFfPZtURduJEgD0IVVJlAABQgEAKAQOtFKFCZLRaK02EAsIegRix8qwppa4/ysy3dHtOd7vLp45vbo/yF3/zrxfajeX5hRCUs6nX0eZo+4///DeLsOz2muPEkoJXPHBMFeNLV54MfNSJi4W9rcPdG59+6urqle1xXh46eqjTWbGltPfWlXBo5fFPnv/oJz8ddSHQel+nvjaa1nrm7je9cjKsFUVw/MSlfUvNT77/o8lW8fiXv7S6uZ1leTkeGVL1MP6/F0bf9X13RUuJjaCqimcKEBRDV0EbQSsz8P68UUsMe3WwX1AzFAROoE+wXcBarPcCTDQwApU+V6yVCRRbUQUFVwuYGkBWpUANwAiEys//5Pf/0qXLB9751l87+dhf/9Zv/fzuozEuqv7G2X/3777/V37jt/t5+Hd/9+SRB0ff8VOv37Mr/eP/+hN7FxYO3HD00S8+OeH0P/zSf/nwZz7di1SkqRl3/umP/MTv/cofZpNkz6vu27fb39za9eIXvvyud772ka8/1F8db14tH/yX3/4t35T8/nt/oTOVxq7m9/zg208//+hwUj/99JSzrRPPPYRsXSjzB+YW5/Y+9fTJt73zh7/86UeXbjg0KQadA/uyDbpx3w0f/dQfL3QaY2vnm4v9AV64zDfcdQNOsmySvOGBt33oox8Yb5utU1f/+He+bAJzstH8yDv++uji9JvfuNh2l/fceGTp7gNBq1cWI9aXVDxBBA0eEIEYMBW0HkoAwxBowCtPH/+5f/W/28v3BOrIof333XHPG3//938rCMMTz5588AC7uabWiyze85Qlh3DFSgO9RywEFGgSlwA49rFqdGw4Ofq61s++Ytczn9z6rd8dPH3VN9o1HcBwa6o1G2W9lwCDtU3/ofe/8O733Dd3KG6cv3DrLfHxS3Dixa1eO2jPxTfvCodXL11ZvXbs6LHB+sWg3tAwHuWu04p8wBz7fp4v3Tl/7MjKL//nr+9bPjQu+CufvHjrnW1P+cJcC2AdAJSqP/X05b1zi3d+0wPHT5zsr67HB46NXT1aufvQgc3R9oZ3pbfOiivLzAtfOHNpmKyVENthUoOkHo0bi3NB2L5wfn17lDVaqh7p6drQWW4udja3kmYAg/XtyTRvOqqH9ZtuuX1S5NYsOHP1Sw8/HgbUW2q6XLQH5TLPPiRloGyBDUJ0NtPhfMZBlqbji9fEtBYWu2mSscdpkisl+XS8vHe5P+iXbprl2f6V8Jf++Vtu3Hf00OJe46DX626sbdZqzeFoEtbj3sL+86sJOHzzvQ/M19tnzlxudrQr15cOHCJc4jJ7/OuPJJPJcHPcL1IjkzOnLnzLd71+4+TmpRPPQ5kOzp8/OxgEzOefuVBm5VK3po3pzvfWrmyiSKD99vkNJXx0qeuG175wfI0dgoEojtJp2o70IMufOXuxFtfqtcYNh/aoNH3m6ccXYzjzzDOH9y6Ti6YDWPebhuHo8kIkNBjkq+uDVrvXbreee/4FVdNr65uro7Kjcffi/n5a1uN0fn7Rx+orn34oNoqhdW2QNOqFaPGojVLOZqSa9bCZF5nzZRDHoELvkVApHZgojJO4Vg8i0zEqWJqbg7Jcu3Y1bnddPQyiep6lKkBF2tQDHYTOA4vUWp1ab96yLzNnHfvxZLo9LHi0Z397tH5NBe1jt9109sL2gfqBMAATNYAn0ywvi7xMx1vrW7vmW8oEoPRklLeXVyar0/76dqdwyhiqZ0eP3TIeD8LI3P6y25568rg3Slkbsbqyul3kZGo6udY30xJrwcbmdDoZMdJwmmeFNVHU30rrTQ3g+iOze2lPstWP67rba4OJuwvR+dUNB9nSnhsmo80zF68lg7UP/c1fLPV6nbluNRbM1esGg6jOedKvR52GNpyN9tTLu27szh+qbw7YlSi+6Onslh7M37r3ZQ8cvPHYYs2kSZaw0zoOg1rQ6kpU19c2Lm/6XpaJc5zaQmld5NYyKFNfmFuen5tbJ14rN2ox6gis9pN8bEvnrGJVHlzqHJir+7wwYS314SU/Yc+tVmt5rqtdvjAvGurnr2xGrVqAvkgcIsVBzfmyyERhTSGFWpyDrLQgJKgmeelKIPaWE2aNQAChRWSizDrlPVvLToqiKD2AImtLNLV6LYBSDEugjCaTuzLLM601KSCa9UlNJpNA0fY0S6aJcOAYhbDTapfOsXORVqCUmJjL1IAFMYUrI1UPo1jrIEtzTWg95rklBO9KBiiLMjBaGEEBg3hgEVGikAC8J1CKyLHXJCjOFRbJmCDQQJoUkwK0Sgs7a33hrSNhZmSPRlHqChSa5jmID1WokNhz4YpaoAyYpo5VkFe1mgFCGAdGG0ehTvNxmoo2meXCeq2URsq8TJ1VCIW1pEnEVVa+qsIVBDURqbAoS/AMwkma1InqcSDoXZJm2uUu9zZLsiJsBnGM3UhDNhIjSFmglFZQkkQR2lJjYCIOajV2RZFMc0CaTiegtNaIIKPBJJvS1vqkM2fjOBZwOtSMwuR0oOJ63bFttUIuPWoofTnOihBK9ASEoYQavdiUhDVpZ6/7b3Bmn6rYAXP1v8Kz1nPaATzVFxbekW3MEMjMcXVdCbJDKyp5yHVT2Q6C2oEjldYE6HrE8HWP1Oy/MOtqRxQgJJkFE1dP0FV2MnBFVgQQkBTCjm5mh78gVUSqUrkwvATDZulFs7/AHX2UCM9+XngWm1PhhR2ocH23d2RSO1HP11+l+kNV/DXvROzMtEXXN+4bMpew2jMGfMnwN2NFFQhCvI7nrlOi62842xmZASauwNFM5FXJdqquIhEikiqCmmcSIBGZWbR2SBHIjhvt+pbPEJDswKUZJxRBnjWpXZdgwQyf4c5rwazMnokc8CwGSxhg58SAauuup33LdcXP//Nl5tvbsbztaN0AZtqsmQBshiep4khUJShVh3k2h5vhvRk9+/9TFR08vHTh2dPLKz0Tl2tXRg0TN0RWamorN7o23/B7onpcm2v09uzvn3rh5a978/njJ/cdvLcx17t8fmthuQV+4+Zlt7ez59kzdM/hB585/3S0uKuhrC/ZOWo1uwGG3WaYpJm3WWf30migyykdqOPygejzg62Vg4u85Wg8sSOHWXLDHbckk4LDoLMSbU8mvlAwHZdqiMbpnIGCxeacE2WLiSHdarREbYUdlU8skq/V0Pty17754WB788QTt73sxiCeLh/onHl+dW4uimpFtwbtOE18ZJ1oqmFApSQB1YMgRCR0fm5ursjy0Xbf+qzVAhUERH446Lda7djUhlPpb42UBAYDcZ7ZAqCOYx0Fkyy7cmmQ5PraZlqkwuyrS9AChlGknW8rNV8Lu8udV7z+pne950cCNXbifNgutOrctKs08vUvnmwvSOmiznxrruuN9qhVDcO5Tm88HgJApMmDR4Sl5bnBMMmLPAjd8rwa9PN8UlCo51u9hYVDzzx2HuKlK6NkdG26eI17rVoryA8fXJivm7hF4/Egt0JKae9MhKrGtabOs8h6O55uKNbNsOeZxHvSQb2mx8l2I1pytnA2z4pJEElkmlIoJ0FMDYKAlPcWlCLnw6kNLOelzVrtZn9jC6gVNFSapYHL6pH2lrjUhSNnar3FhWGyPtrazvJyod26evVquxeldmrL4uDuTqsZDobDeit0hdeRoCr6m1u19gKrYNJPIIROQ0NdbV5cLZ97PvbNh4+f2b1vPu61XT4hAaVtWCcK6onLOvONs5e2bz50oNsLIJlCktfierMWTfLp7n03bm9cmqQb1VXAIiY0hISC7J0ymogqT6tzHgCUUizA7IRZgYpUqEh78SowitCX3gkXNgfmSk2jFFV1ESwCKM6WCgmJSvBGqxmKJ0qzTCGGJiCtYBaUhooMofbgK8UpMwg75xwqrHiQVKwYEVkoMNWm2rJwnhmQxQOIVkob0orCQKGIIBMSo3feESKgVySKiAgUgTDDzLEPmig2kWMGrMiMR1FaqerOoshopVmEqnhqpRBxx388W2gofUmIAEQzKauAuMpWRoREqtL1WHbICOIEgIiECLzzOxVsGgkJga2wB8Aq4o1ZKqOfDgNSFBApJVTpcpUIiyLNPLMr+7JEYUD0zMDiPRCQUkiaoigqnS8z55nFIyKwCBG5Km5KgQEygZDIrD6MwTMrpY0IAChFIlpECAkViAfnPIsYrWZ5Q0Rl6YQFGcUWqMh79tYjERE6a4lQKxOQEcDSFQwQBgaEnbeemRSUnlFRWTpgsiDWW0FluXTeW+8QlJ4NVcLADKxBCZTVIEEqqIaOyrgXaC1I1XIJsGilPBADV1IurbUHX10FrY7J1odhrd3t1kdZlo94PB6PbW3fTbtuuXH3M88cL3nj8F1HZDjf7cT7Fg6srW740bg7V8N2jnzuc59+emuQYgDTYdpd1CuteVekq6uXKGo0eqYdtyErDty7ePn89l/8zd8fWV7p1PHy5ctvffebP/7I8Dd+87dzzZvT5O/e94uXV1dvPbz/8iPniyKL6jE7rwCynEZj/Vfve/rdP38/kBJY8BAieCfbzAoUCoiDxUjtcVB4t006Fmgg9AUulvbhwCjvFaqmyJZCRWAcJkQTDcyUqCBDGMYQekhiiAGKUf/CC+eC9//5p4Lw9RvDQWNfL5hPWqTm9i/9k3f/6E9/6w/9SO3ITfEuv37ip376x6PF1/zET3zfqecuYVD2+1tvfeNPfPYDX1EhjMfTH373u1a6vfWr66OR/ehffnA02rjhyP7Hv3767oO387j2uU98/s67D5m0Oc7S//gzv6Tz0aefe5RsPrU2oOXPf+VLIadhrUe6kRtz+uSLew/tPXJk6crVrL89ncLgB378HQ++6k0f+pNfe/D1u3/pd//itXe99rd/+1dqP3/5Qx/6k69/+dxP/MB70tHG3tsOliWdPH7xwFx08fIL7Xb71//zv3jZzTdHnT0lpytH9j/x/PpzpzdeuDTuRnGSnTly9H/85L/7FYgdllfmdnWiTgugo02TQQM4B+TBOEgawF/920f+6H896uL73dLNpZcvn3xyXQYvf+sdlCyIvFBuFqoWYhgBki8JcJ5lgXzJLkLTVHEhskl6QpHRZlGoVqLGdhva6b3vnv/39dZP/8qFM9s6LHTpUADZ+bzwgVGFBB/5wpbzj/7cL7zpTa+5/c4jWx/82NlQB0nm6wH0c5ckvJm5drTxwF13ff2ZJ73Utsajgwc6N9y2eP7y6otXs2sjhaPJ7Xvu6G9mgU8GzyTRLT3XqU+3y+oq2Hdk5dzJS7/1V5//AdX7jne86pmvf8kvtbasW2jvvvJYaZR05xqXTz/XbDf3dsN51e3csPKpz3xpmMJ0c3T3beEb39LbfaCVjfMP/PEFx6jSpfled22UNAy0lDWxCQM9GLhm1FrqzX/my5//ysnVJHMOwAMQgAUwEDpSgZKGNuIlqtdrBo7Vxv/zP729cJtXNrevDKW0NuamYXHTPAIlFPoiDRr13sKcLXMdmMbCwpXHnglAveblrzz74tn5la7S4Wgs8+09EgbtlXB7tF1a26sXSoo97Whr/XiQbZ1+rL+8sBTF+z/7zKPv+v5/cuvL3vzQx/90vhcW09GXPv5YbuMXLpa79950wz1Hrl24iAFtbPXzInPeFUXWatWwhDiZ6Gmymdpdu1bQTtLCr483m9EoUKbZjQWIlTKxWlxqtqS2vTY1TnUXum957as+8+EnOR8OB8NLg+HRPXuEdRSHkySZX1qJ54IDS82HHjolFISMVy9u3H//3ZcubYr280v+gaM3fuxDX+vM95b37uqPJ/ubC9/17d+yPbUmWGoHXOTjWrPmUq/DUJFhRwXmWTlBDAwboTCfjoMwFvRhPYiardFosu3s4vKyaTah9O1eeHX1VOlHrbCF3meT7VZnOY7qFgpglaRFGNecU4gYxhhHPB1sRkE5XBuMmuFzJ1/Yv3Ss2W4vLXbKyXjj6iabWruJw+GYwLLLy0k6joLC+Y4JOwtxUJ8OkyFSPYja7d27R33bWtxT67Y2rl585vSp7ck00EGWbqxd2WDw476fX2lFYZaZbLC9EUB5+MZ9T3z5uKuF7blw/4H5R584qw0OhqONbepno04jarfjIs+JZOPaxvL83NkL5//+I3/yru/47uF28YYH7/urv/hMHtLatc3qKohDPd4cH7vjYKBK6/JkK4nIPnjP/uUVP8bL+w4snnxy9dK1oS/SB19+5Mg9u8N6mSZbFIgJItQ10UHqMsXOS3r4cHBpo0gnOBgkTqsgMAqg1o7FI3EWatxzcKG90EJnizIRyeaExjAdWZxfWvRBuJVBU0XJYMI6XWrpzlzHWqdgWqvTaDhKx1NX+nqngd5joJElpDACYlfq3JIAktUgLV1LszR3lDu2zpH3AoqU99YJKRBVCoOwd46YCge2DFhphjKMGl4CRYE2DJ5DE5CwZ3CktI6sOCAFpMokAcDJeITAFEZJakWj96UXC8CKIJ0mYkytUYNmPZ+OUUIVhp6MJq9JY+mNJmLQSiFiXhSECMZU6unSl6QxQIk0KCgMUhiFwiiMISnLXoSFmBQ5diEGbFOhUIit9wRgrUuyxJBCUp6hqRqxDvPCirjSC1MZEHnPno1DMqDzLEcEdl5I59bGtZhFQsXzrVBjMU6sFRYCxwzCCjgE1Cxak5CwEiAyGFjvvPNGhaiUiEc0wg6FNRKRst4L+DgyjlRecpFYE5iJTboImcuiAApX6IK1TnWgtKDLJgFFDRO7UnLNPsMsdyzgGIBBgSqdz3OHKMKYTsdaTYOAwrqam2+7osCQtKl344YSR00zsUUGnowmRO2ctVz60jkOVNSshUXmijT7BqoD1wOLYEceM5PAVGhnlhvwDeHRLLIzY68KWSoGQnRdZAI7Ip5KYzJTyFS/VBnJaKf9q2IgVZLQ9cShSusDgoQzLYnMckSxqsKplENSeaB2sEelLyGsvGzXdwX+n3cCxFmlGEr1LA1Sedhohi9gli1dWfBEQJhmTiypcoZ2uA1WqanwDXKXHbtUpb5Rs+Rr4JnhTHYoSaVqqjZ3pm2q8oEQqjTrnf3aUfvsACLZERBJhdoqURUyM+EOopHqECAA0E4zsLAAVXlGONNbXd/XGSqqssOv47TrxEdmSI5Zrkuydk4E2JFKzdxqO6BQBAhmiK/qPnvpdXYO2eyFcEadYIdRwY6LbLYJMznazAlSCaloRjJnBWwCgLQjhBOAii4KVHKI2b5U4AyEq7X2ndN5hoo2N7biuXbBmStsEEBZ2vEkby1GrT278u3pvn17LfoszS5fXu3MzW8PJr42N4VifR2tqZskSaaXI0ouX+jXm/Mnr3xZR4EOrXgWN9213Jv0Lbu8VY/RE4SRBOpjX39u+2rxX3/83RfOPdWL6xsX1kdjWZ6rbVD20HMn7zkWLnaWhkk2XPetegND/eLzj0V16S6vLB+95cXT26jCzWsnenNq7+LC+sZWupGUEodxPM2TEghQ1Vi7ND7+1DlDZv381ukr17Jw6WWvuX/1wuMu2RwP8yiOTRQ6hJTZUm2hsT/JNsPABEZNt9YAbRCJQqPquLG6uTTfbTUamoI4rImmxc7CuXNnarVoaa43nvaLNDdOS2DGmZ1kPst5ZX735dWLqihFCLyvEd+91Obt7Zt2deIa/O1Xv/ir/+J3OU0mfmyM1dj0dnnj/ObJ4zaM6cjeXDf2FTnMtTFN1mqNbqiC0ThxXscU92o1tH0reeq8BJRNnGmaza1pt7WYTqeqsbC1Vp66Ov7cV4Ybow1BCLSxJXvn0Eijpuoox26Yu+fW3XfcfkOrDXkxHo4n3k2wTJ2FIKgtdvfYwgHyZLop6AODgGpx8eB4yCaMc1dYKAyq0ERlSQSNoFYzUSuqNwNKTag2NwYAZEKMXZ5N+0qwLDHPFWZYumm7qdjmo35Ses6A9i91XXlBi51sJ1FEi/OdMKg1G9Q93GUPqJVQEKp2f7Lhi3GjRhB6bUrHlEz6842eHaYKaf+hg0bVB+hKpNp8e3s83LcwNx2nw2EpHLvcgld79y1N8gZEkOcpZtmwv9Gb2y26gVEwmNpAR53OTikmqKrtgMUDgiKswIRnBwiKlALyDLktAVEro5VxzhEpFGHHhbO2yFGjMdpZW3pWSgtzaW2oTRSYqgzeexHkssyVIBIiOhBw4hVBCBRG9SwvVGBEhIHJGFJKkMSzIjSqmtIzimhlytldiZwXYg8IjOCRQcB7AUStDRFpbUDEey+EVix7ByJMFd9hQbEswqxVIOKJKAwVAGiikMixd8IOCFhIawVVcjR55pn7GQmqvjPPiOS9WG+rxQNBIhFUquLWWhkirMhXNbpY75zzCFCKiEhoIs+VNc1X/Wgyu9UrQCQARmZkmjVKeueskDZahUEkIpUcqVoW2GmfEyDyLCLAAN47EVGCCGLCQJFSCoHYlR4RjUIGBQLsnUJABIXgXFkqQl+V2SMIKNIOHCEJV4MCEBARCYuQzLzjCtkzElZt9KWzqBQAee9BEQsDoAcEQMfMUpbWMbMAlM4apaxnAGAPIgQIpLQXdp6rIUKAEEWTZkEvjAJGGWCLoCoparU2UgnKvGOlVBTF4i2UFoAds9aaocrgQyJU12s8AQAgaqvFuflTX9/QYcR+hKDuvO+WT3z++IvjvoNdY7t503JvYa478YvT7WGe5RHpcVKmIQzzUZZPuavneo3pxHEGQUs3l/SepcNfePKa93a6PkjMyHmpLQRH60vP5lcubW0Ot+rKtT/4F48NsfWZv3v/QPHrv/eNOpLtjbWHVjdyW8a1UARq9Uij9PZ2ptP62uaWhqUcBgAWgA2IoCdlAFBgAFD30PVQoEYFl9l7UROAAk0XoAEq8GABrYCUkAI5B+MSNgkIIYdpUWS7ty71PvKnn7+40blwGV/33f9zc+302ac/hib/1V//8fvvuOUtP/yOz/7NxzeftmXn2OKrX/mr3/ctP/Cd//T0qRP/8d/85D985iNlUeza2+5fnfzn//QLt959rNT8zMPPskHVCoptxW70zONPAqgLa4NhmW9Mnmnt3d07tnfXjbtuGdzw+Ye+dO7CQ09/7YvTbBPRdxbbP/ajP/ob//3fHd7b6zZo33L7d/7be+955b2DUf/qqX5/UOy9de/9r3v12Ue/8r73/ulkfTUK999288GynDz91ItnLz77j//5D/3wP//nl66cveHovouX+t1G+zu+/0dTe23A05UjyxdPPmtu3r0sfPqFs089vWoxnhRdm0V7X3Xv+uWLj1+99j3v+alJXi7vwj//i/8tbkhQsiKFHpJScGxisv3sV376Dxf2/PO5xYMXL5xee/pSu0Ma8+HVMColSZ5+6/e8lrohU+6lJqAhWBY2wo7FiQ6dijQ4sdtl4ahxX5FrzqzSi2hi604EsnbzA/gff3Lp1/5ocOKyd0jO+TDWgTFr/TxQ2GvPffTh9KvveP+33rvnO77txre8im+95v/u08cHI7PeL0sItFKrm9sf/eLXSAdXNreDdpyultu4tWep3Q3Htaizvc6O/WQ40TVjM3P+tDOdoFObBVR06rC0u6na5rc/9Dd//am/fMvNd//gP33N3O37KdGf+Z1+I8h7S40optNPP61MvLV5ZW5h4cTp86zrF168cu/d+3fvrhXlBW50vvUH93z1k/2Pfub5y9PN0WjyyqN7X3nbHuX0+tD95G9+cBjy5ihxpZAOtVFYdeCyGCIprXDRnQfN2TRXVtHQq4up/alffv/3PAhHlhdcibtajVqweCkZxvGBrYEPurc9+tzq3Fw8v1DzPkmHFpwPwgZ4O4EaGOVJMnAnL127/aajocZOLZxr7D7x9KmFpc7+5faH//S9KoyP3HD46G23b10uXcHf9+7v5Wx49uxlycpptj0arReSCdLzT57tvarnsDMsVocbg4mtndu8ctuhA8X506PhoMlkU9zfjY4enK/N9VYvWTtkKvK777r18oV1O2EwIQN1iZba7a8++cTK3HwY8otPH9/eWA/V/O33HHv+eHJ4Xy/N09Lp3r744NHDJ09ebS82zl5aP7u6QXH71v37YptDLkvt5k0vO/zrf/bhT567+qo3veHks0/UTLe50H32a08fveUwmlqRbj5+4sTttx8GgiDUIuKZdRB5LBShJl2WmRDE9QaRdq5MJmmzPj+duihuegbIC86ngcb5+XmlY1cmzL7erGfZEFGjJm+l1uqgUuCDdDwJg7KcbuaTDQepiTO0KWl99wMPPP7E48IOEVQtMIihpiIZBnW484H7n/zKE1maze3efejYjS88/NViAiuH9mxdHW8N14KF7vKuXZMrZ0+fPRXVKXTtlf03jgZDpHj18qX2fOvgjStnX3ymf/l0J5LN0XQ63BAFwnGSJvc+cMeZF44vdecmWb/enouSYnWw2ekdiZrzV89eCIJAg7Rqjbe+7sFPfPSzg8G1PYeWvvLww7ffdai/0Z9b7MwGA13WesHmuK+NxFoNB/0odBnHG6Nye7KJfssl45ffsntx/544DqjhMGQTkiCw8oKp9RNv8wCAoECbQW59JrUoyIFtlgkQB0qzn1y9DN161IhjlsLa6WiSZ2msdRiZ5XbcqddqWqO31oFROgggUBhiEWjJ02kpqshS9lZ5zMZJrRbUarWiSNNiCuA1YmnLsnA6RG99KR4YE1ugjlgUijZBAFACeWEBYUZkURq1sGdg551FcQo9SM1osVYYlQosiNEchhEClNaWzvoSENDbQpx4r0iZtHQCKgqDVhA7MoMkUUQ6BGN0pCFo1uZ7rTR3SVqm1oUA9TDozcUQB5alSMsiy5UxtszjwHjrA2WYndaiQEIEAqjFuh5pYFJBgGFQitqepDxNmBkEM1sqJPYWFCkVOM+ORYFypTMGg6AWmR5iwL4/SQaenQqQEYJAp9ZOxlYhIruGIQVEgq1myxd5XI/rISELR2GRlx65HoaOQBQn4mJD1vnS+ShQzpWkocgLEJvbskTXqrdrul5KLoEDAHBcCyPSKOTDWpAjKiVxPczdeGGRVFgEWoIwcFwrslFZ+sDXXE5Zws45W5bgIggiq9kROltVz2pUQbfbHCdZluZFljtXKgLnvSBuXJuSdzqgsBGFoa7XtFVSmw9I2zY1AtLgWVDYCwpFQWM0mHqPyXRmRsbrCp9qml/5tWYynIrVVEKgaq49a6SfmZbgOvOYTeaFd14TX3rlCm9c7+YihJdgyU5oECCoHTBVyY0qJxFh1ZkugCBYhTYDCsosIrnS1xOyUBU1ijusaQdTsYhgxUJ2gJbITrZQtcI4qyqron0YZCc8R1i8MIMgs99RIFW6I6z40g5Tu87cqt2fEShCqurfZ/TrelWXzP519qszz9tMmoWAgoTCswr42Wvv4Bu5TvRo1uKGs0yPHfy043vb+VgAUHhmyxAApKq1aEf3JTuBSl4YCQD8DlpiuY5nZuHi1/9IBWFestdh5Y/YUZ9Vs83qNZBn9jYWAAaGylQ4O4OkescZoxSB6tOrPqcdV1t18HeOEgAI7QQR7dgkK2h2Pfd6dv7Mvt2pmYYd/dR19PT/oKLpFNrzS5PtdR1I1FwQQXJjx85nA52sW2/Mwh6yCtFMBoNWPdh169HnTx1vByas7Z7r0dkzW8BFHJl2Cy2UhvR482o2nNbqVEQtTX6UjyVP8lGpo/podT2yUoyK9/31R77tTbfT5qW4Vn/h4VOZS6NmeHFrrXXxfK83HzZYk0eySdbvNCnnaToukjFzlk5gcOzmxbSYrk43xGC3290cZqPUthb2B4XYonAZOrQH9922MeCrm1kznjt3aXLi+BXtkpWFVsihMA2TaSOsGQjY0nA48YWvNYyJBYJclBgg6yEveGnvoaro3eaF8LTMslZvYWGxM83SzfEYrA2jIJnmpZT9qXdBJ3XacFdkNapTOi077bhGovPRzQd6dx1aPDUtAp/XGy0MOIhs6K0M7XBNVlf16Uvqla/es3Xh5LnBcJBE0/kGWIv51JowiDuGcWN71Kq5qBG7rOpaV52F5SwJB6Ny7ZofT2S4ufHUI89dPTdhqZEi8WWeFlEYBkohERd+28EXntz+wiMbED5318H6W77plYdv2NcKh9ZOwkhMoJFABxq8b3WbZV5Mk5HWoc81YS3NnaawFsVWilGxQVSrd5ebrb0mMMPBxHpVpNhoLZEkq2unVSAasqjOZQ4aXU7OoxmmZVEMLRXp1JXejMr17oF2csWvzC8oKBZ37+1PBadr3R46D+MUUKKsiAuJp6WQjhl5MpkygWeJVGvYv1QPVJqXQa9xy+0HLvdHyk327t5DjgmAlGGZIBWKkqIsG61Ia8omVPbLIG4jBoPxRMiT+E4jmF+Yr64Co2nGVzWJF5qF54P3Xmtlra0GAiLyIh4ExKNGo7F0Pk2z0nkUVl4EZvIc561zTpESqQKGgiiKvXWl9b50RhtnrVagtdZaIxEzZHlW3a1VZadSpFTgq8xkRaQEvQVBVMQsVRc9kSKNpbcIUuVhB8YACyqtlUIAYfYILFwWrrpvEKJBEhDSRFVInRcHthqWjDFaaUMIiKUV8qyNqsY0QoUghOh9dXeWsnRKzbi99c7akoUFUWtNSNcj5RCABJlFkIWZK4s9iLAUziKhAkzyFJE8e0Rk7wUEiECYvavu8cy+uj3qytEmjgwBKAAg0tX4IlCFHgEgOO8qiSsIOO+BQJFi5wIy3nHiUi8CiPVa6Cx5XxBWYxAoVCzghQOtnPjZ8OcVCNsiFZDqKYSUAhHH1nkHgCKMs8Y3ISTvvDa6uhNbAVc6FgZkQAJ2gIioBcQJOxQRcZ6dsPdCSoln57xjj0SOmT2X1nlhhYYAtDKCUno/W7iCndFdqtETqzHAeaeV0aQJSVAZLTwTupIX8d4CowCAohlWAwCAVlMfvvnAxQvr7Ivv/Za3Pv75UwnI7Q/csXbh8pkTq7UgL7YnfbemyqWl2s0LyweT5AR5Nxhcu7axqZVutDrj7e2cI9cIG/uWp+nooedPjl1oC+m57SQPDt9yE5JZqTf6a4Otjewfv+fdf/5/PmqbgK741d/4pT/4y1/7zB9/8MTXTt1y621f/fxjnnw+SpdX2s1OPFwfT8apFW9ZCJSCJkNAIAJjgoghQhCAkqDloSAIFQQOpqxCBSFDiBAwOAVOQZlnl1RcBYqjuCDQCMKjy/mpL+ef/MiJ9bXWaHrYNG++cu3y9qXN19x94NQjQ/DWFNGjjz75yNMnFmqNxXn96S988D/95i/85ekTt79yz7nh+re987XPXnzm+NdOnH9xE5lcNz5z+ezSQleHTFA+/ejTzueeWMQLeaHpruVge/XcX/3t7xY42rf3LRcunZzvBZ/8zIeaijt1Ne77WiP84Ic/jMlo8/x2Xp4Nhvbrnzm169aVWMMU3Wu/+41feOGFL/3+X/3p//ovvVb+9m9/zZY0jl97+PTzz/3Q/+9Hb32we98rj3zgf3+x3WgoCDS4yfTat7/1LpD7v+t7vufWWxYipc4+fi7dknLSbiwu79o39+ITD//iz/3yr/33/1ZrNmtmIVNBXGcu7Xv/3a/88n/9yXg5KAap0sg2XHtx+6/+z19neOjk6VtffeC1r3pL+xO/+i9dUv7nn/vtf/+f/k23tufUmWd3z73Qrb9ZaLeHIaIi2SJUgMaJZuwQ9g0858tT6DJjlyETYNJhB7muOLdeW2gEnf59367/fW/u9/9kYziAy2Nzbd0HVmqKSo/TxIs1RRG872OjP/7kV3a1/be+/vbb7375Fx89t7QcnLmaOGuZTVEQOYsqYPSnt9JRv1CF7N23+9zlQVJQOslWDsxduTAOgujEw+neo/rm1y4DvAgAyWSgymLXQuxkLvH+Tx86+4lnfvYnf/Q1f/vhr+f9bK4TrD29auPgwpnN8bRghU5U3FCoPPa6F65l6+en1M0LnGJ2be8+ftndtW2lJ2Mt9bWz631TWN3Y1V4ONicYaiFfaEPInkVmgQ8oDvjnf+T+t78xvHL6iRjIlnRl6JZ63VaHLp1Ye+7iZjqC+bkNbF7uahVjkiXp+StP12p7sba/tecwlGsboys1Uz9238s/+GfvT1or3/m933v6medyp269eSGZboyGA3TNPMuQbH91PVl3hw4fbnTnfTBRenptPKhR2VvF/pU1Z+3S0cWLL65xJst756PFaH0jLdL+8ZMXDtx+M126srX+/Cteft+Fc2esM8166+Deld0LveHl1c3tQf/qVXIYIffi2umL18q89Ao3x/m3fftrDu2q//0XXvyp//4Hf/Q7v1XvhUfnmpcubzy3uvHECy/WakYM7e3ElGR6YAUuJUWxfi7tmHZ7122dGzqPPfrEW1/z2gCSq+dfOH1CDsZ7v/DY1zdHn1/o4FMnype/4mWH7j4kGJFRJgoOBDdlpRPkQGkEJNDOiwdwPgCHHrLcjXV7ARW5wgpzmo4VOIX20ukXlpeXA41J6sOgXu+s+HwgToJGw422vXOcSzrJFSACm6gb1hQKr61dGa5e23t0X398ZePakDO5cvFUs9viPJsmNi9doPLBZrFrz/7FfUsvHj9fqy+ePX5690qvfylZXDywsX4lDaTI3NWLF2vRbtVRrDuuYL2yf9eefZcubGvFo8G1ZgS2v74+Ws+GG7Z062V2eTRuxtFw6utLc3feev9TT20uNW4alm5+aeX0mRfK1DUxctt932xbhHatkSfTNMuCoHfLrfdlk8JT59p5CzjNoHjwTa/8jY89CQDjQb/d7mxt9Ym9iYzRpWnJ5uTqpc2xNqoX44Ej4Z7DLprPdOgYMdQm9TYpqUg8ulSAnLdcFN6h5VYc+3rdlB6VKE8QaQJyhFA6m02KJCGXZ4TGWNSMmrlXqxNRWCZ1bUwYKluYULQWl2WlBSQOjXjni7w0ikxAYcMAUulzQeeJJ9PCKI1EpTfiiNl7T0qREyUFRVGDSEAsAVgQFs6zHDCAMAAyXqwrGSkobcleBCxF9PziAAEAAElEQVR6DsOIBIMo9IHynANLrLSVgkVGWQ5eGhRE2nAYodLicgyQCB1z6XKjFQCHGgIDaMtm3TTb0bSgKM4nw0msTRSG3nOrN2/CIC99Ms2wzH2RcFm6LEPPCKoR6SxPNXAchITQQAkipeNAFIqJo6A2MOFoPJiyJ6VEQBuFZJK0YO9DHaoQMoBSyJBJXebswIpXQdRs9lCrLEum7L0rSRjBa+BBbkNUDdLTtGyEpii8QFmwTabTCDmqiXeZKHZItXodSI+SsWXjWdDbwjksHQIqDrQJveNCJsBFZDQJKaXZe0IikliZvMhRCuaCfEmpdBo6t96Ky8RMbaCIQ1BJ6pIxo7cBBeJs6RjIxN1mMsltbq11Ua3enOuaVtwfZjCeeGsBpXReHNqyFO9JqTDDQNttcVinw+1eC1GVk1K8uFC45cppUTjncqVVmhakzQ7XqOJ+EGXmD9opnrpOfGTGKnZyhWbfz+AQzDw+VdBMZfmp/FIVNUA1gx0zo9tLtAavi3dm0GVWVAYouINz8DpDQECFwIBeALjyZ8061KRaz0S8TlBmsZUzOxPO0AZUj8vVtzOvEnAlKbru2wJhmS2jzzhFxRYY5DpOg53qM9mpU6vsUddVKkTEvLPlM3BTeeZklh2BO1onhBnB+UYeIljxrZnNbKauAar0ODOLVoVSkGfHEBmAq2+FEKtsputBFQpgFpnNlVDrJQfcdUEZVM6GHab2EgCbiY92PrpZvupsy2a2ORGo2tWqf6oETFSlPVHF6oB38ojgG47a7BijIM1ESzPjnoAA0IykXTf3VZokqT5nRKpa3SpWdP2smim9RF6Kp9rZ25fecQf1vYSKbr77rvb8vZKlZ84+y+lqb9ditv6c97b0bsulzXpD1bpoyuE427Wye3jt+FY5OnagvX5tKxlPNh119+7VyVj7iYfahdWL+5f2hEq5qEjz0q/nJtBZljQ67aDugYoz58498JbXueLF3Lv/+4HP9rTtFwCBKqZQCNvCPv38sy4vb7xxuWm0SPPyhWvtnoyGE+NYiWLLQuzyoMxNo14rXTmY9DPPc/MPjNa9K7YbbU3GZz661h9Ox/1TJ14wTdXa/+D+/Yc2Ll4uy9L5shFF3U5zMkgCpXtzbWeBKQwDnCT9+V09QLhyaaPWaGIQL+2/9eSzz8du2oh8qxmEhkRGK7vDLIlTC9MxRrFyZW4tDzamV8Zy+dogS1dZmBSEAeVFWTqfGgidW7u6fXpk99xw44WLL962vOTc5KufPjGadD7y0dXnzvabC+HaYP3G3SuPnh8krrvRp6P7O61YTVgmUm73R1EtyoJJMt3G0kZBa5jpYaKGG8W1q2tzod+3xC872D7anOtGuzau5U8dv3LTgSCIcXNSnL8Gpy6r/kTEYT1SFIXO+mfPjp4+/Q979nbf/Z3333p4iXhcJKkIh40IfIAYWss67AIZ67QtyjAKbZEW0wQCYhOogDp18sWaTW0DJa43cx+MhgN2Y/HTPEubrTifJIg2zx3qCFClybQsyqVd7aLcqAdR4cYO8/27Y5eMndCuuYNnLh3f3aCSSy8Q1+IyLQtOWHmfpWwDW2YOfRBHc/O1iBJBdq54+V33HH/hbJ5MfDrR0XyRW41B1Oy53KX5QCm3sNQtimk9tiZSnjlqtafZlHXNZym6pNGJbV6Mp0l1FXjPSMTsAq2JyHmrla6AvhN2wsBSJew4AXFOGLRCZvaOQbzW6CwXZd6oNwlJiLKyIFKolDKoEAiVLZ33rvQOEcqqOk0JAGjSAGidE0EQCqIgMKEAsLCwRUCtFAsTkogQEQWqsCWhUjO6zApJERljAISFK/GPAmH2pRcQMUbvxPeQInTsCFXFj7nKhUZNREaRUQrYs4Bn8d5575Q2wiwiQmSZATx7Lp0jAEF0DDODGXvnnCCR0loFCsj70rlq2ccgcOm9VLohYURx3pVlyQCCjIJGG0VApETEVxMn71m4ouMsAEjMPlBaa6Orm7NSjhkBiSrhD3kRFocIlUubNKEQERlmABZEUMqzYy/eO60Do7XRRtgDKwesTaAD8p4dO6MDQ6iQqkG0sDmwEFQ9CKCVduytq/Ia2WitlCLSIuI9G6Wrm6sHLK0X5sJbo5UAAHskAmHnPYAE2gRaOQAngojOexJh5qoGzld7pxSyGDKBMsCCKM5aEFaInrl0noVJaescAGiF4p2wIiRkBmT2zlm38+yCwkxEgQo8MAMCMrAoNRsTjj99tX8163Ui8tlHP/iRQyuHFheXn//o4704tCAHXtbpzsXT9WLf7vmFuYNb23kQdjTWxZt9Nx0cbE8xCFyZT4eq0ekuHbkhGF0YXjl5tFN7+zu+5U/e92fa4PPPPxVFXddp12uge63Nta2Vwx0spjcfnL/3H3/Tbr2xkly959Y9F7fG7XoAAXgVe8db20MDPh1PqEbdHhdwHqCOEAGUBCVCD0EzTNn1EVGpOkJuiyQMO5aZMRf2WsUCm85teeUacVwUQx5AGKxMrtU+9oGnLm21Th0vxlmvLNqLB/ck09NbV58JmvCVr/zxQqfT2mPmFjoPPX7hNW/8zsM3v/JPf/23Hn7s67WO/tEf+I4fevvH1zqq22599H2/df7xZ8tJHtZxbldv7037vvqRh31aeMBLl68qASaJa8F0mBqCcjIlZ40tpPTtpvnE3/y9TWzpinqsp7kPVNxrxTQeXfzqpzrB6Z/61wd6nd2feN+pxsEHJt1jn/3cswd31TdWozvm9v2nH37z+guX253wzW+897//xt/etbf18rff9U3f+T2/8O/++89898+uXYlvfctr3/5Db//lX/xlrae/9we/+63vePcv/uLvPXnx5Bfe/wexyQ7cdvOv/Olv/pN3vLOeJj/wj97+f/78z17+2jeePHvcTnJWJorDrX66tSbveec/e/Orl1/+zW9+6uTk//7+R3WwWG/dGzU7Lw4vnvnb/3xkf3tlfxTV565cKt/0uvcs1JcG/ae+51v3h1FQcAwCShUgdfExM4JYDEHBWYDny4zDZsPUbsqTpomV+As8XUcdFZMyqs2X2bYbXQsn8vZb4fUvO/jePx88nDIzDccAJSOiiqQsPLN40pfG+vc+fJK0bTejRj0wAQhhbotaHBmmbFqSM6Hn0WpyMs+ztGfihiZu9ijQTYKkGZt4sVbj0fjqtLoK4tiUdR5NS2uzuXa9SHyp7b/9Hx8hF9Vr+lrh2TsVB1OGzu65vLQ1MuhLXa9vjZJoYQ92Fh1aiprltYtua3DXjb1BuR3O6VLS4aat92CxxQ2d9sJQt2SSyERK9qwYaoFGQR2Gm2Wydv4ijqO55jQWoy0vd0LAcenLI0eD0qG3MhnjxqgYTW3pLgcIpYXxcPPK+lZA9+6dr+9quHS4Xqb8lvvveH5t8MQXH1nozJXTRLt8rhH3R8Xm2qYOgz0HlldXN24/tn8wHn39medf9eq7s/706IGFRi/cOn08GY0m06C+e/fWyQsbW/3DN+3rT3zgsU7lU1/7UppfqFE8unj26rnnXli79u7v+vGYx2WRX17tRxQuLc2fevF84SAysVZqNM0F2SOeOX91a21V+V59ec/XnnjmW9/4qkH/7FOPPXPPXUfuuK937uQ1MW57s98O+OYbDhSuyFMNlu8+eqwF/KXjJx7+4tOxVVfOXZ0PbFhrEZpbj920cSl94tKLN9xxp4Poa089cdfeRWWst9Cox3GjHoATDd6JeJnkm2HYMhTW6ks2HWhNURgDWWcdAtfasa81WqotWM8K0a1uHAfeOc7LNJ+MtwYEOiycdUU2HTfa881ObZKMCpsUyeVmuyFQsM8Sn1ofBtjtLDeWTPeFFy/sO3ywLPNarA7dcPP5S6d9qifTHE9frSmz59jRpcVFCuP64rwvmjV2hePO7mUXhNBZHCumoNx3x8Ha/J4sgbWrq0bKskiCYLy9fiGKai53TaSF3bsef+Rzx+66s9loS6Npa/X7X3sXZcHf/NVf3npzoxZCs22mA7/R39pz+NhSd/dovFGPdRiHoyy98e4bAaBIgjvvuPv4mSeU0U8+era6CpZWFhyLG4zC0CTT0Vy7bPeo0TNugC7Pwl5z3y3z3YWQA9zu20sX0+ef33ju4tpwIpOJV05KrxACY2pKNGGoQBvjrXUCYJT3tSqFpiCkah3FsPdZAo4IJc8LClWzFhXWhZ26qdeKPPUEKhBvrWef5vkwmSJpz9Rpht1mvd0J2FoByJIijiOKQ2dxOklNLZoWaemFyBjAZj1C0EAGAIoyI2FGKdkBIXv2rhRNKN5ERkShc86X3jkgDgIDGiFwtWYzz9gXpS2dAhTrfOlYwEVRykDaOLEeQJgVVgJlmuu2QiPEPo7DIstIfMTCCik2gTSSceqF53vxXAt73dbmMLuSJipQYdQYDwYSBQhQFpaQaiYwXAYoURR26zUEp43U2i1QYTjIgZgM1YwSHdjcFt4LS1tTXrnqBBSQCGTZ0MVRbjMU02n14kZTdMAqKtKMyxFArsRbbwmk8IJCTnsBXfosyA2BGGYRLn0aqaBwOYmyHkhTL9YOIEmL3LMXrEUBO1IeQ20UCoorvPMgQExIeV5Yx9qAADVaynq9NUpQm6TPJMAKMzulWhy0upYhB1kvx8IuFm4YGExTBXHNxPUgUIyWSKzUNPkyj+KwUfPkw2zisjQD8eJZGw2EpNGLza00asbUiMWLYxItQoQhYSRkSZXKSS2KbOmttbNZcjXPvh4/c127MZtXX6+2AtgppMfr3Wg7+pYd/HIdMSHspNjAjiFphxPJDv+YqX5wp8B8xqZmfyU7xrWZbmYGF2aIB5AqlepsCzUgCgITEFU0h4hmFq2ZaqbarR3dyqzH/bq5asZrqs2T6rBULWY7fcwVsagK0maaGL5uptshRTsRSQhQRT7zDojBmRnrpTbe6hjtMCOc5QTB9Uozui6t2dlYlJfkRbOvIsgzM0SloKqk91yBk2pTZn6vWfA3eJlxnJdIkAhWijCG63lSlelwZk2r4ou+IZ189kY7APEbEqpFZgFUlRap4mPCUqWPAMBO1jb8v392nGc7COwlmdbsr+E6TxMEJJp9XjPPIM0Mi9UOzXAniygknB0U2FFnIQjTzs59Iyp6/uFPjy5/oNluzy0vRO16Odo2So+2r4S1aN/eI5lLfHElMlGjzug2mjUq2E83N+seja5dWd3cWzdpXrgcLw4H3dauMrXOjYu8dEpFcX1a2qVduyfJdGsyGg04wPal01fm5gOju0+uXlUtc+Zc/6577wyvrZ7tD286tnjx2Suj0erWFTcu7IHbb7zvwb2PPvLs3n03hUounN/Ys/8IF1vj/mbpsVELs3wcxrVbVhYeuP+uz3zus/koZQ2M0GhrXZPS89HbbxqvDY4/83ixdfwtDxy22TXwfjqc6LhWi0MUnxTb7dZKwqNJOgjr9fF0qJVuzQWtpiwszW8NzxiXNyKjjcuyVGk1HE/awAooDqh3aDFPRgaVyR26scGo1jCOzHSUiPciUGZFEKlGJyiH/d7umwsTXtrO/vgPPz7/V7WnXlhb3S6zNHPAmaWsNOntB86vXfB+bLybj3qd2DufLC6vpINk/8G5R58/vWfXvpWlmzavjlV7z5kza8nalvD0zpuC3Z3RfbvK5RupyCedyF49Pbl1Du+6vTucpkFLyGEpCyfPlafOmy8/vf3ctdyCbbcjZrx8efOXfutjd9889+M/+NqlhZorMwHvSutJhaQxCLXSRAoJhMtSkCUwAAGF6CFPt5DDMCJniyRLrChlNIjU6o3hcJhlZZFtG9JY2rleAOS2kjQwOp+WQRCAt9sXNmutGmmvKNRmb54sv+L+hcnaZ0dbw9b8SuFDTeVgkIYUkW6JJcRwe7i2HHWCKN0YXAt1R9C8sDYO4+VRP2vH9U5cAx0kU0ehOLHTPOvUIxBQ3s13GqUtuMxVqMos9VHRrHe21zfz6dQVLBhVV4FSSimqeLAXT4oYuHTsrLXMSKRmIUGgiZz3zL6wXikUX9mCyZBpRjUGMaQ8+0rizoBBEAaEwkJEBFqZwGYFAMRRpAIdaAOWy9LGYYhIOtDGGFJa2CtSgOIcWxZAQYXGaFdaEa9IAJCZRTgwkda6upsze0L0njV6ERH2gGSMqsYhpYwiQ0gsJSB6X4J4qppTK8btwWMpIt75qr5PQEpbkgggsYC1vsq4scxERERsHQB69lXOPgAaVMBOsLL1grfs2VaHyQM47711It67KlgJkACENenZIEXVSg4777xnhagIEEApZbQWQS+iEbU2QsggpLS8pC6VHawOJgiJkBhAGKojyd57vwPIAJHiUBklICYHzyyVm8yDhDpAUuJtxWuEAYFKX6pZfLc45KplDAQUKvaijVJKsffaaASsZGLMPlDGE1fxgc67QAXVIC3MIihIgqxIG40sHkSc9yJAyjBUwAsBoB5HXiRQRpw4dkYbQXTezUL5ZjmDlc8OoBoCDGqtqFqpALGlU3r2YKMQEZgInHMCO8sMAADQMO2WmQtgVAIEyly7tvGKe/b/s3fel+b6cyfPuFzyDWAX9je2bz+2GzDx6K6tlqMkm9tV3x5uZ57r881b9h2+dHrjc3/+uY7P0/44a40/hl8YpSaS6U03HkTPOJnUTUGOTzz6tf03LN524MAPvOdtVx9/6tTXvv7i+fXzlyfnzl5rNRShDhq1RHAwHMzNB0aot693+ObtAHKGuACnoWmhECvaOIKY9YIGTaA8kApDgkwTC0xZZRpqAilp8kJubXLtKfv8U+rLn3/m8hUc5yYFF5jmxtT5OPvu7/6nf/W/f8VNtglxe7B54Og+iNyZs1fuvOu222+8l2o17bKvfenzzzz6lfkjCw+8/cHHvv71JqRPP/KZn/pnP/4/fv03JMRpPzv5tZPzc3NJ6hb377364tmwrqHwXJAtCpTAUDje6BsNyqBLCptZRGl3akGjvrWZ1FrdYmsDxvm7/9GxGx+kW15rGPhfvfltxdXmn/3F49DKXvm6V//en3ztbe949TMf/fzeG9pXff7jP/fLOpWA8NjBfc8/unH1bHLjXff++G//6h++9xc//Ncf8D5tB/prX3zmx352btdu88Z3viI7e+Xyi1/tXxj+5R9+9G2v/ubmZPrCQxcWo85/+tc/+tRTL/7bn/zxw4d7W9tbxuDem2564svpBz6V/M6HPtTstoPwsNfhucvDwm905uKbbz86WFuzOQ82Vt/3ez+33NtN1G430xtvfZNnRxQz1C04oLZwQ2kvdBHhKVs8hzoy7cOOY84DzEFTYrPH/WQsphu1XxlGZrpRUEkmnjtzLbv899nzp6koLUo+HJfMVAtqJg4IXBiSaCitmCgorPJKY6zn50LrJcl9oLEZmmFgAdGLKQtIJvDYVzZ2HWnftLe5uKe2b8/cN7/lVb/5W7+/v6jn1q1eHs8mB+wWeu1yc1Q3yudFXTMo6jWbSQlxzbCze3fPr20kvUZDCWot7RolU4Ay00V68qvPPdOZv+1u3VrIzq4N1tehdH0VQjMEEOAhCMPxE+cQOkXGYWmPLDVVzVzZmvSnFoQASQlqrR4/v/1ut0c8WB0AiSuc0T5QnKOtRcSxjsOgqNcbrlAhlkm2ctA/cgo+96lL06lsdIPXv+6Vm9vldFR2e/O9/nC+E8SRGvd5Y3Xj0LH9dUMa7HBaDKaTrcF2Wu5rzx9eGgd+qq+eXyt4tHfX3Llnz3T37tl754N1LJzl0TRfGyQXr250G83uUnsXNsYbk4trV7bXN8fO0ZQ+9tG/vnm+e+vNN6xvbnTbDeuIBev1aP+e/dMyLa9sTUYTCPBH3/NqQ37PcmsyMPtW2rtq5uwzz26t5q329P5veUAyUJCcHo3mWq12KxRTGyXUWgzPXHz6luX9sS7vPbLwwrPXrlw+2715T5bg5mp/YTn+pm9+oHuqVssnJ547OR0O7ux0F29csSVPBtOykG4vbrd6k/HYsmuELVB1tLkU/SIfIIoKmnleiiCislYRNvIUijSd6y2F3S54P9zu33j0xnEydWWeTJPjzzx/6713lHliy3KcJL32XJnZ0dqVVmMfmvDwzffo9vLHP/HRN7zitRptaae1WuDs1Jc5Sri2dtVwaFFlkt95991Y6+T9re58azuZWobJlOO5lWJrHLeXVxp7NwYDQzhZH+4+uH/UnxRbo247xnxUQqLEnTi3VRQc1iEgeuS5Fw/vPvSGB+597Mmv+9Jl61A70OLG0mvf+vrLZz4XKgUW2s0oXlze2Nz0HpvNdppOGF02ykfNNIrq2+tbew/va8y5T3z8s1fOb1dXQXelnufSbJp0lCbp6NgxfejGwOZ5OZmqVpRPk62trX5unnu+/+zz5ckXx1sDmFj0FtkTAjnQ6AGlFF8gZCKlt1YIHAspIa1IUTeOG/V4qdfYvdSFclRm6Xg8FXFEBFZHqmY0ucxuj9c06UkiV4fZ9iiflj7LLQShQjCh6faK/fMKtV/stjTa1nxD6Wg8LnMNytM0TUrrdRzX6m0kgyjiHSpC0t5ifzxQrGomzNg774tiqgmagS5dGgQNRaiQSIWElNtCC0oxRhICBSjaqFAbW6Qut0mWS8T1MIqiEESciC0dl2VodBBKTL5dD9kToeg4DAIFQCYvW7WgMDQSrwMdEpDIlSuXvdftKPRFoQHm23POOSbME8dFYgLVoCgONKNj8oFRtVbDh4Eh022wTZEcASkPwEKl9QXnjm3GjhQVeZEXGZpARPtsslgPS8dYbPSabLk2N9fLW53hNkyHhc0KxS4MAyIqnUscWJE4DBQA5oVCZl+wmygKesSeqNTgBZ0l9EDWE2NNmUiTBMZbLmxGDgItsdZE6LwvpHToHXDhkdNpU4VJktrEF85nVjPFcV2FhkgIBnk6KUFD1xgfeJehOF9XseYwQkQug7oxrdjlZRTHxjApa+rcbUR94zehyDywQ1HkLRtCQVERRYth2DZBHQq2gVWNMLKuVDBhKEFIK8zSVKE30cyMXC3QVqJ6EJqpRXYAChBU7GKWAlM9R83qxURQgKtcZBTx14Opr0cUzZ5aGWbRQLDDj+i6nOUlLrWDF2aUCWTGNqq0AalAkQARCoPaKcyqYhFmdKISFVX0q6rcJfQzTcxLATvXdS0zwFJta+VRQhLwOBMgVeE8MxxyXWT0knVq9vvVkalq4xUDEuzsz8wkNTPfXUdGs53dCQvC2fO9wM4cY6abAZl1w+94rHZkXLO+ehCo5hMiVVXZTvz1jvlqpiRCrBJ+iEhgZ0dwpiziah+vgxkEEa5sgQzVRKI6JXZSiHDnHKlkQjtHZEcDtQN+qi0Brrxp3lc56dU5U4G5l0yBs32rXhZBKriDs+nOjMLtbONLIUZIsJMvgTALh0LaEcpdJ4MwO09wJ2apOpNpB3y9hIp274lqYNvzrelkkPTHRdop0ktxrZanbpImEIBz6xzVOotzZTJBEcKwN3egv7EZKHXslhuKbNtwVno3GG5GzbqJarqm+sNrecGkOs9eu7L7UrZ3cWH1heGdb/ymv/7Tv1029ZD8/j1Lpnbbi8+cm6rgxjvu7zWfl1OngjxFhUrLocMH7ciWeXbq+YsLtcbaxbO10PTqvSIfJ5OJ99p6XFstwlqzWZsbbw8/9PHfiVQzarcmhd1c3XaWw7q2obnt4J3cLs9d/kooamtjgMV4ca5Zllm3EbI4wuTwrpWTJ8/Oz3fTKUYNpWvRZCupNRrJtCiyvvfcrYG3k6x06EWFlBVZoLSGIGoYxVivtTIRHRW33tL1p7MgClk3n3lqWjqx3iutono4TItvf9crtvLl00+5o6/95rMvnHtu9dLl1TMUhcPCKqUCE62tTT72ucf/9Xc1brjbz+9qJpe2loKeSKgpMJ3GC09dLC/ET11V0/Ggn8dJoIsBPbjYqRVXvul2OHik0ON1krDWSMd9aNTriuIz55qH7rlj78Jk/dlnRG/c2J4ce1n7m++rj2D5Uw9d+dIT2eag3N8NBgm9eD557+999l1vu/uOQ81mDXQNSmEMKK5Fk0kRGmIvaW77/XFW+CaGvswWV3oMpUJxDLnLXFayUu32ckaKdF3r7nR0afdybTRMQqOzZIg6DsKozMpkyp35xfWrm/1hWus1cu/f+ODbP/uVq6eeX9139zypwHvvihKBELxSEsVhTi5Li5IDHe9iz7EKoloTPY/z1CXpuKR6o63jRloaEsMA1hbeu069axQIc1FM47YaD4euSOutqBZgMtgssiI0ravXJrVarbvYnF0MSjGIMFupTGfaeZfnVkBya5VSCkARaW006hIco8cQK8eTNsp75tKjQhCvtFGiibQIO/AI4J0Yo4w23rMipUMiwCiKBFxAio1SWhutEcAEBkl7AWavFFXmKcc+CMIKYiMSew6URq2st6RCRK1IIaJ1nhmAUAdGEaGAVpqZEavmth0WJh4QvLcgjKix8v0CMHMp3nsUYazAOinnPBAAaqWNtRZQKoOxIDhhcMLsCICZETQiii9FkfNiWZBU6SwCEigQ772UwkWeM3M1chCR996QRkIR8exYRBnlmb333lVFX8ozE5ImRUjMokgFgQmMcZV9uKos8K7KaVNaCwuwECmFijSiiHPlbJ0FOAgCz14brbTxQsjovEOg0IQiwMxAurrdCmlE0koJgfgy0KZatPEi3omvHGXsQQGSqoRYCFTlBTILEWnSAmAEKtVVZIIKVFW/iwqr5CkiZQzZkoVIo7a+ZBHSijCoIouqqDngKuMIPAhjxQJBEQpCUeaA4JlLYa01KKVBVb11eWnFCxA6T0ZpRbNFFnGOCEgRC7Bzs+lBo0ZTu7462jTlwsLi2sXs+NfO3rhv74ED9/2Te1/rpxde+NrnjS42+xc/86kvHLvp1tWra1fXtsoWEJCbX3727Pp9Nx38yhe/NjyDhY2zmjpy7PDF85v9U4NX33sTpG7u8MrTTz2FW9PXvvzupz53dmv1zC/91Pd/+Itf+vn3/q5w+Pwj62urSTuetpfhnrv3n3r+TBAHy0t7B9tqrh36Ut74PW/Yu/DUcHPYXEgQEMAiKDIxAHsAAOth4KHnwCrwJXgHRFCzUIhYPyq0XlKNlz/6mbO//h/+frVo9/YdSVSRk8tKaRJ+9FN/9Hef+FJvofjn/+JHfuHnfzqOw7Fzd7/xW9Ze3F698MG3PHj7n/3hz7zyNW948DUHTp08c20zvTDc/g8//2vPf/HUtcm59nzt0098+p7XveHxZ57SokeycPTQ/nMvPHLl4gYpjQRlWXCRxREpDSYQ68UYlSR5ECkTal84m4NSEJbjPbTx3e8yd9zTOvD6jRy2GaYK6gmcxeXgPT/18t/727/7zCe2bthz28f/9k8OLXSD+Rv+/T/5we95xz/7jne+5okvf/GhF5//kV/4zccffrbdy5rFhdfdXP/cQ1+4876D6eZ01+7uz//q/3n3u/7Zk3/y0A9827d98C+HF7PJd3zLa152ZO///On3PXPqys+9979+4pPPPvXUY6/9jne++Z3feeXZZ2ox/ep7/3uzOd/s7etf3qBQlzkD5YxFpxsGger3N7LRxBfcaTSTSXH20oX5lQNpTT7/0NXXvP0+X1yxKkNdOLCkVgHXFFwUuKTCG0p7h5EVm4zDqK7qYZY9Gff2psHxSEcgp2xpGys0Oa0uXPHrg9ba5ZTS8thC8ra3zq0cWjj+9OUz58OHns3HY1maiydTP2VHIXYbEUA53M6MxnY7bDdodWOka9SJ+OrGKI67KlBGq97K4qGbDg3WX+xM+34S1XZnL3/FyxwWV598zsFMYWodFkWZJEW7WQOGIktz7wnRkBRZ2aybMs8PLjX7/WSU5Bhgnvsis+2W2b88X+T5anpo/aH1JC/XN+dfvJzedceRbjjiftqIdbc18RlCsOvEtY3FPb1DnXnMp0To2/VxPu2PfbuuCbneMAPrJ0XgBmCisk5hVuoIjYgvnCMUz15J6UB0AIEBHYNmUNwYcfi1y/bLFzbe/9z7D+9ZfNWdN6+EjY3EmrNXYlw/uG955bb5yWiydW2rsbRg0iRs0NH9ixsXLtBiuX93u14j9njT4YOnTz7e2z3X2XtwuHXF+e0Skxtv3Zf5WiMeE8lwexSYzmhtQFZeds+tn3j40XoYh4zFNH32yWdQmzTPDKIKg9LbFy5dIkDIywNLc075gwud7fXJhRMXDhw72ILhp/76M+HcYnv3isfaY59/gmzBMC25zEtev5bUu7oopLPU3r984Nknzj564tzR25fT3F/uDx9cvEO2x0eP7BmOJ563vukNN2onx47dEQWhzbPCuTIre73OJC0YdH9SpnkBNm83uoBmmAyUikonYaTCKPAutF6c4CRFBWSdmp9vpUmSjZ0Kqbmw8MLzZ9kXTo3QiRMO59ujwapkuS+LspYmedbodcbTcjyZ7DnQIN150zd/B5eMOuU0VUaFoZZGjT1Nhtl4uL5y4Mb9K/epqHnx/Kobbzpfxr12MigvXdw8cGz/ytHlZJhtro2CXseJG68nS9i2RZ70N2649YZHvvhwuxFfvLiJQdhZmVtfv6gdnToz2P3ymx567BGl7cqcqkX81Jc+uu/WB7IsDZWuddrdxc7JF64lScpluji/WBRFkeVREC8tLpZeo1fdXiPNhvO75+6899jxpy5WV8ELFy6uLO1V4CDMd82Pbr27t7QHfR4sLOwfD4vpBJ57rv+FJ8fPnC1LF2Y5eotEmq0XFgEH5AmAXYaKRISZ0SAheS9OgK2X0qe54+3J6dWt4MzlbjM8unvlhtsOk0/yPJ2mWcJcN1oHut5uEtOlcfro5a1pLqCIPZJjV1gkq/r4zJnRF56+2mmG3bo5dGDXob17u61u3ImpNsyuXkmTtBWGAC6OG9kkUUp759gXtnChjsW6MitcVnLJURgSsPclKc2SGdHWMwZRFVGSJUWeFeQxiBpIhAzTNE8zb51YxklaZHnpx5wXuYCIBy20d2W+3dQ1Q+glVDoMdFZ6yyzOe8dlUWpF83N1JFWk5WiYobB4D2Jb9UhrLYyls7njtMy9LTUyGoNagWfnJa5HQMqhdh7Au0YjCgx5FqUCb9MiL4qioIYWUM5aXyqluyzCqNMsZbSpLZ0VYnGWx3jWNDqhgAcOCFAg5MqwAzUThiqMmRtKA1mf5/XAmLAZRYGhMvdFFZ6dCeWitSErVAoURWKRhBQjTYuySUGglC3Zky6EdWhSm4kgeoJJludOPGgVjMa2IBt53HNASzHhVPEArC3bC53MIpaUp157Ewbcqel6FA6nU1Kg6mEQhq5gl2WhRgrR1nWaaWehKKxyEiKFCPVeJHER18rQWJuzF0kyUzomQXCZz1EwNkEcN9i5FGRW9IFIM7ogTEgAPMs+nk35ccdc9Q0qIZjJigh2kmwQkBF32tHlpdm7YNU8Vc3TX9IYVWzhJU4zixXYyZGp3rAiTbiTg0OCvKNv2QEEVNEahUQzp1UFvVCEBUR4x2dUued2JDw0k8vMitSAaGako5fkLRUFwx1VC133Zc0cd3i9xQwRaEY8qvItRQAyw0NMO2FO3yAL4h3Z1s4B4SrEekY0KsTzjWimwkI7DK7ajJ1oI9jJ52GGCpXJ7OMSnH0Iqsq7Zl/tMu4wE5kpgip05qHKs4BZEtHO21/Xes0+W670WjKLBqo0STLzJwoh8sxmVrFEYcRZ8FJl0HgJ1lWQb2cBfKYoug6JvkF5JN/wdeccuX7IZlgQUUhdf52d3CwQntnwYIfD7bzsS2Rrhorq7UarXQcdxONY6Rh0fPHs5fn2XE3irdW01mlt97eurl3Is4HLGSQu7Xg6zLMkJ+P3duYBx6XOax1SQQ0ClxUT9GWr3ZpvzdUbR26Lb/2D9/3uT7znR1YO6oe/cnaUBGZjEpE/e+5cvLIyKX3TaF/SyZMvvPnNr5uWV0bZuTo11tdTl8e9aNkXtQsbV0pt77371RfPPZ8l/T2H5i0tb1y74HhkRZQOC4iWOj2FJsnTeoTDuJMl+e7FXYNs9LFPfeTl99zZWapvF6MMmwuLLdNUFuBq/0ooYazxcjoMVSvPxUF9MIYmC4K2WaZ06BidCGiYa7YvXxsBAHoXNzutqJYkSVW37AQKy8y4uKu7PPC8kT1+5iKX1ruZEzNN3EBr34g3StvcW3vD937Th99nT5/brpnGeHjV5pmETRGiWG1m2eFDR/e5yYVLp9v1/SdOZOfPpsPRcNovpOCVe9746NnJufOXbGlVcKnVNDe8YvFbHrgdi6c7NUrT1mjLd3btcTK465vf/kdfu/B7f3BB49W7VvBffcdtL3tQ7bt7Or16dXMja8HgPW/T3/qqXU8/Njx93p7L6qnpXN0eve8DT9+0K/zhb7ttbnfT5zlb6zkhNHkOSTrNyqIoBH3oy7DRbCgxWlSSJTYpa1HsndGsfDLmnExs1tfWSJyNIvGBF22dQMmOA1/abOTbzYbostA+tWp+wXzm4Q+1lu57w6u/aeKS86eebPaWilyCSLP4ICAkh1JkybZTLR3FWhBLn7tMK1OLdC1ymSMTtpqteclLx0EUE8pka/tKHJpaXC/BBDEhZpbHoSESH4eNaZ73mnNnz1xQtWYBemtrUl0FSisS8JWDC3yeF86zq9w9RAjgvURBEBiNIEYbZlRkstJ5KQOliAiMLhyHKhIRcU5XHuGK5ysVmJC0JiWuzI3RKCzgtFJKaRNo0gaADZJC7ZnJaARd2kLEI2CoFIgviyLQ2mhSaABRhANtQBGRcpad9857IkLEwBitVKUwcU6ccx5RkSfSiMTALOyEEYTAS7XiwaCoSmhmy6yImL0AK208i2dh74nU7AZf/RCzIJIiERFGES8CWhOLd84jz1zbpBQROetKW5beObbOckVANCkkdM5rrZgZQUiR99Za672fVVIq1FoH2qDShEoAlNZKKSRV9ah55wlRqbAS5bL3ShOwoFKefVGWzKxmuJ0CEwoIgUIFnlkE8pLZAyIRICCSUoyMSCweqyAKZhavkJCQxUs1dorXioQBlNImAAFh75mZRRGKgKrkeDvNGNWwVOX5WWedn3VhMJeAxN4zV0OUMHipFKYehUEppUlZ55hZacJqmugcA3v2CGK9RwRUyrOzzhISaUUm1EHovPPO59ZrIhAAYa1mzzFYWRFBnPfOuh0NMRzYv7uRBelaxsO09BKUZnujeGJ6Cbay5nL76N59VG++8+2v//wnP3f+yuNHDvTuffmRuctrH/nMpzbOXwM0vYZ9830H/uHrZ73RkuO5s+Pt6XqtphtaH9x7c837DW406763Z/TCiSGo4F/81FvOPvHZ6bnJlcsOwW1e3Y4ng3f90K3f8zM/JkARuJ/9tz/7qU+vR0AXlcqm+fPHT/zED99+5wN3Dq49WVshwpRYCbkSFMBSALacTKNmy5alIlA6YqkZDMnrQLXWhhuXTuHHP/bZy88NprCiatEkVVkKjfryTfcefsXLjg42NjYun14dXto8tb08P2/zwtXMYycuf9+3/5OvP3fmyKsexN9//9c+8YQ35TAr20vNfMQ33nT3F7/4xde89XDh5aHPf7VM+PDN+5gLV5o3vOYdD5u1M+tn05HLfYGawHkPXkTKIjUKCH0UB0YrAwKozWTS1Wu/9F9uePC7duf9z4atqbOJAgAGHdYZQlRo1cX3/eKR//nrV85faU5y3NZ70ouNbt77Vz/4Y9/9Y9/9P9u/9/H3/8O3v+l7v+/ue7/5tW/7R//6vVE4oVrn1IkNyoqXvfbYmYH6w9/+7TC7/I/+4ddH9Pof+/e/9N7/9TPvesdb43u6D/3vv/nff/CVT3zkTxTm2J3vPnziW199/4XTyY//2//293//J4P1y0nuD918oy/F09iBr8dBUbgrpy+Ita1eZ2XvyurqVsShs/3VZzb+zROff/cT93/fu7+tvdTWmEC9DcgMGx42ARoeXoHmKPh6FGQ+HbEesel56GodG5OU/dPsHLi2Xy+bPLhrrz7w6u7+l+1vH3Cq50mpe9/SVeHBr35w/QN/e3m0PRzUABIucuMkQI9MGkUVJVpvu63owdcduu3GGx957OJnH74YRIEtYMR85MBdh267KXBPfeJzz47G6ULYc6Z7lTTITGGqtfaKO/ONzaxIp9nBPZ1RVjx/eQyIriwbqLemk4V2eNsty1G9NhzkIeChA3Pnz2/O7T/Yihpr566s99vD1E4yP0iaD3+N0dLh5TnlKQjieH7fI8fPxaiz9e29+w9tQO4E3vjgrfSl40/nm1UVpSa6tp599Ym1V90SuNLmXjHQNCm0AhQKgqAQtnnKHigw09EUFezfvfvpv0+mOeTJMAhxUvr1/pW1zektR7ZqTbp0+elve/CO3Ytbu+ZX1i8MH7z1zq88dtzFMinK8VSIsd0u5zRP0lwzPfKZL3YXmku33bb7hhsuPntq88I1sXrt6vDgLXu4zK5d3S7HaVokWrgZtxPPU+EYyuW5Tj5OolY7isO5xdbq6qDW7HkK0twt93SEEgZqWvKLJ86A0OkXrtVOHrdpZi36zem2g2Fqk2FxZO/CIBvqkNrN2Ci1eWngVZAVxXpTX9zePHrTDbv3dTdHdOzQjVvrIw24emkrimT16rXYHC1Lb7fLYHF+e7vf6jaarbizUivWizIrDFNLt0x7mbMkMHGvu6hCpcJF5rIspN1dmJvfszrYiGIoJ8nq+gAXwrnFuWlmgRygtObrX//8l+9+7T1xZM6cevHicydqGjfX1m+586ZLq88t7Dk67mdE0Xyz5YqpslNEW7J3jOkoBVRpy4dxXGTOBPWFzsLRY3dMEj5+6jSa7v4bb0+d6/W6q1e2dx1tjZ2/fK3fQjUdjOZ2BXPtsOWXH/3CPxy98Qbtp88+/uUiL7b748Hq9vpwc1+nvrrRt4mszLUfePmx4eSUs4MLZ0/Xa+1IyQtPfHGp01UlO6Qk8+HcwmB9vQ4RAKOm3vxiFMdZUTZqDUVBkqTr19ZH08jrOcYrs2WDejcKuulgjej8A69sruyNQFxgUHcDrTgd+3On7OULAdswTwtiLaV3vvDACpABXWUeF0FQgoAELMLsxTMQUjVrBkFCZslSlyV2fevsIyfONuOQWU0zi9bFIQWatCFrfZK7aSmOpSp2srZEEGQGR4XlNHOb41wjPHl6qx4+ZxS26/F8o66Qg1i3Ekcb07lu2QziZqPZaDb7021LjhGycloPona9XWa5mBDYCecmMIoUl1yvxyWb3HJglDbigZMsKwVZkLTZnkzXRsnqcKRU1VQrjp0tysBoAtWIa6DAhEEQaWQhwFDHiD51hfdOISKCMVT1gdQakXMWGMKAgjAwQay0trYkoEArCK2VAFGVzhlRRoVhZJiFHYdBWPpSkEEDCYWAnlkBaKVa7bZ17D2HcayQsqwgZZz19Sh2Lo9V4ZybTsacF0ZhkU+T0uZFXotC7yVjRjJxEMTKN5SNrS3H40iJEk+eawFGaOuhVkEM4pVWw2E6yaXhfFqUJeik9EMHzJR5aNRMLQ40S7MZO9LDSaLZ9+px6ctQgThGButEKYjDoGF0TfFyqJ2OVwepURFnSrIIhbKEwZqAoRcFRikg32qHjqHWqhei4w4pqRWuRJQ6w6IGnOQ282CBNYXNcN+eHptxCbm1hSd0qEg5QB9QgIJEVBTGeXGWi6IM1GySfD1ceebcYUAQVCjM1T8zckUmqkc8qUhHVfhS0QOYJdRUtqOq2f26/GaHzshMZoTX82MQqitkRjtmBq+dSfwsDRqBqufs6hGu0pzMJDXIOyqlWXhPpWefactnFIwFSKrgZwSsonxwFslM18U/MutPYy8ASKhYeEaqZMdC9pJUavamMxwhlUAFZzIfoOoIIFIlIRJgrOxsqCo3WLWfPCMmuFM4tkPhqpvKTGwlFemoDFRIOyY4UgJEM5xWGbYqcDXb9VmNGqMAI4IA79SdcRVTC1XK+OzngKVaeK0+acaq2owEZhFOMy/c7MPBmcZ/R6g1E3ztWA939m6Gg77BCsGzdrXZK8I3ePQqYESz2AjYkRLtKKRmx6f6pGcZRyLC142NFdJjqeK24LpQCmd6JdyptJspyaqSp/8XFRUppaMkiiQIa/3+lai7srDnlsEkrcXzYWuQ56PuXFSLDggwmkxhs91Z2JgMrUDkefXCVl6Om3ONNE9rzYaYshaSiHKZD8LI+8mB3XtuP3rws1/99OE9e+JyeuzA/DRye3a1Tz56qSbTO25a7jW7GW7e+rI7h6N+v7+9vNAaX5qaBVzc23EK6p1ob3M3gVxZu5LYQeam037A3keUgwHrcLC1DeLHU4yYfVEePHhDP006dYuSas5uvmnf/LI5guHKTXsm1/pz8+3htfONRqO5uLC9Ne0s7Olv9K2VrEzarTaiiEdgUSFiQAzQbjbGW+PBMCMlaVE4kUiMjaUWKVskZe4arXbDRKW33rqlTkNK7EXTkbdESoFUk8+08J/52kXVPbC+Sr//4UeScmEKc5CMULJaQxByEvYi6MzTz8ILV0ZnL3rfGE2cNcIHQu0bo/ou+qvP/v0F1xFn69oXo81xin/08XPPvDBPwS5WneHq9MrGZFAIUS2Gx/u5BtcAlZ4Ywk/87Rr+1ejYkvzQK1YO3bDcXtwuJutRW/FRvX8JP/fkcCPvv/zlve1tOL/Z//0PP/Ly++65/7alZqsYDTfCek3ITrang8EoCuv1Wr3RbAkW/cm0WavlpVJUQ2pGMU6n22kxKdJC67DZWR5sjwZZPQqct2k+7XfanSiOR/2SO+HWYH0wSebmms2AoHDf/63f838//tQYR5YnQW0FZEsbTouEvbO29OKZKY5aSYmxMUk+4bJMivHc3LywnW4P5tvLnE1LisQJBzLYvBBSGhIFKLbg1FnOR74sl1bm+xv9RqM3GI2tt9N01GjGKWMURWvXtmZDggBpDSJFWbAXQtRaefbIaIwhQEF2ZQngCUkAXFmyFI5ZI7oSw5ndqUq/Zq2UdQ4QozCimRISvbXsxXrWBEZrIgrDWJFhdt55TaiMLp0LwtALlNaxr1K6NYCIF0UGARWpasWDkZiBgESYpRL6kCIdhFGVri8CleWq9BaRAJQCJcDWltUd07MXKEkFgQ5AMCDtuHDeOedZKSIkMlUgm/eOnTUmJCDrrHcWQEhVA4AiAiflzrAgIKCIvDARaNLMM7Gq9/66D1lQSCkA9OKFWcSKVjjzK6MAK0XiOTABKR2YUJHSWrNnUkopTUQs7LyrOh2ZhVAEwHvWpL3zCE7EMXPpLTMb0uBFaQ1AKAwi7BwzO7FaB1oZBAAS6ywiVbWcBMTiKmYkIkRKgNmLVloTCGpC9M5VytZqicYxMICiyvwLznsUISIHfqYCFhYAFvbCLMSWjSZg79kLsyKFIoCsFTGLs1aRElHeewQgAmsdVhW2wkopo8VZp4iMUsTeOlHakNJRZR0Fsc465wHAs1ekFOlKO+aFAUlEFJEX8N67HVXRnl1zdx65fxGXV8dXz1x4IYnKG2+Y3yhtuCInnnzu6lMnl5vm/R/6gLVlauGpZz8xtLc88qXHNta9ipuFLTEO3/ezH21TKxlzMrWNmmo2hKCcbvov/c1Dx3bvufnVb379m1/2R7/wn0NytXzE2+ddGO6u1ZIzl/1gesgN3vEjN33rT78qhZMO8hz6B/aEm5f6C3PNaZm5nH2kfut/PZFg/Vu//9YQU59emoym0ZwlimOZf/J/Pf7Jj135qT/+oWzsP/MPX1m87c5PfuThwaDcs7Rf9/ZtbiW7b3zdqauXElWs8pk3vO1NF9fW73rjrf/oh7770snTf/kn/+eeN99zz2vu/vuPfPHFFy+87Yff8ezTj8nW8NEvPvrO1//A/oUDv/Yf/+P83k5a+qQEw8o7Ky79sR95+794z7+sma5oV9RzZWDt2jUCRlr9n7/+L4IwiDr1ECGZpkGoFCBpnCZjp4IwUN6R1iZwJKP+Hl2+5127v+VH7/KdJwEuYJg4jkywOB6Mo/oe7+uSlaahCLLb3qBe/4T+i48Uu5aX9t6658wTj//rn/mx19//pv7zLx7tHTq89+Dc0eVVPR6YpVu/6S37j5Uf/egjsfhg7PfUV6zbPH756bCpfu7n/tvB1gN/8j/+4MA9MM7lz7/4D+dOnP3dX/y53ftA9xqXX7z2yp+4n6Dsb68de9V9v/Zbv7lQD1d291I7XVsfIXlvMXFFY6lBEaK1wvDimYsKjZ2WWptmLWba+6cfOfWBjzzRqfulujt672Kvm912ZHLs3k5t360Geh4KkpFgqZoj0AMAyYtt8aVEATVqnJRidO0A3P+qFaAWAFnOiFNUVEy2eTo1LrnrQXPfNx+8fHattdK5fC7/9AfXz6/J2XOYFRrIGDLsIbflc89dObbrhu942zf/4fv/mw5DpRmM+cM//p1dTXPv0dr83Hxd4/lzl66O0m4rWlhoVVfBXCuszYcvnusHeUihth4UUycIUWHYCHq1MArrNxxYLgqsNXvL7Zrzomvyum+5+dGvn9Ms+26+4dZG+MG//VKrWz9y+/K9R2556LOPDJNNR3L2ynjz/6Pqv8Nly676XHiMMedcoXLtHE5OnbPUrSyQWkgiS2ST8QdcTDAXB+CCrwGDTbBlY4wNxmDABhuBhEEICQUktdRJ6pz7pH3OPmfnXblqpTnnGN8fq+q0rh49eqTdW7tW1apateY7399vvPQ0oBfN1SB8/oXNOAIJ9KXzl8+szz21sdMrBADCyACHf/dY/2TbtJqGCRUhkhIAQ2GRZQ6IiKKICptHIbGoJ1+J//bJ/TiIIrIgogmFcL8zyEYvn1iKb2p13/9dZ/PW0UlePXPHntfDH/7e9zz0l3/3wjNpAb7boSzz25f24ka8sjSX5dBcWFVe71+4wuPOwlI9Pbr4xS+9NLnwispkMvDeuIW15mDn4HB3c5CNnKfeKN2wO+dOLMWVoDsc97KsIFNZWz1+/Nadi69WgyKsmrn5ajThbDISgJOmkk2GQxgBq4Od4fq5lSAIwhO1fr8/sOkdp1fmF2pGTOHQVJtbe9fTgT2+trCwsrazeTXm/NTxY+nWS7pau7KxvXqmHVXjQW+U9pJ+Z1x1UGlV8/HIDnmSjIfJRJt4dzCYay3WalFcCfqH+5VGNR1PQDkEmPRGo+5wb/tqpV6zA5bMLVSr559//MzNtyKFMCmScSI2XT+2lE2GnYORBL45vyiuMPVsY+NKf7+ji7qKG7qCsY77nU6ejRDZO6jUwoWVRes4G2eucM5Ks9JGjK5c3Bxk7v63v2Vg2We5zp13lBauO0mqjTBuVPLReO1U69KLTx87ujreTZpVOH3LkecfO+zs9tNkvFA3D9x/57MvvVwL+ebFaP5cc+3k0VyuR1UpJs7GzJjW5tUkz9oLy83K6vNPXRr2D/dSaxpwsHPdu8Edd97R7441VYh9MhzYwosvFtqVLM9WGm267fSfP30VAETU9euHTRncelv12Kl61AiKNJS00DFVsVqb1/3J9sHIjwoXaCqcFSKPpJVi551HjzcaM8gYxQDsWVhAzcZbTwWBMgeBpEgYilx61jqxtvChAZtzPuJS7PXOIQGpskZk2txaLgRJowAyS8FAhOPCM3M/tdc6Y+9BB4rUgSt8GFI1UKHSc/ML1cbcwvxiJWhE7fYgH48LXlleixvNahB09q/2ugee81a9FsWhpfpgVGTjTjoZ6sgUbmJ7g9x6DPTEclrYMBQi0Fon44zFa02OpRapZjOu1CLP4C1rQs+S5AmDKI2hjn3kEbw2SpEiJK2D8WRCCAKkA+PFE1AUV5wqknEShwq8VkFsha0V4jxnx8670SiwCMDaKNCxiNco6J31GARGKa0VASkhIlTNSi3PvQ4lrsRsx6p7OJm4WlUZY8aZyzMXONGgJbWg0Iq1IAROW3aFLtiLs0lmkdAJiFeT3PeH1gSC7BlAK6hFQTtUjh0DjXK7O/F9RpVYrTlEt9RuVOOqA7UcxpP+gKFw4l3hUucrccQRiEOJIdQkietuOA41p2EQxHEr6CeZiSparJAxpOOgikRIFBpls0maZy6IG4stFHCd3QhUrRqyt3M+SCPtNSCiNqrb76FJILBRPYiqsWUsLEzypB4FhjUUMBlnRVHYwru0kGCGikCJMMhU7kDisoNnygPK5TnB1MvBEgKU3vaXSScyszamJk/pfMBr98OAwDBVSvBGFZKw8I15ajjDC6ViLgAg6gZJUqBmSAJLpqWmUEgIqawpRSx3vadsiWGanBMELrHWTARCLDHOlMgIIAAJwFRFgRtsY8aSAMoWZhEu42kzOWf2SwAIUG6cl5hqlgkDRCpzWF/mCL02Qo5miKMsJxIpu6OmdA4QAUt6BzPJh2aOTwk8pgdSnkOYSVTlCLYbZKfs5CaFZcM04Q0haFpjPYN30x/ADf4yZVtTKCQzeDYL4eENvDR9fsKzMqvXzvxUNSv/K80KneC1/xR4DSfN0o0lZZsRxxvPZCadlZxpKjZNEdW0kWp22QUQYBGCGy+pvHZYX9ZK8RoqSjOO5xZN3JQsabQWVLTirGXfI6Pq7Xo6HirlbCj1xZPZuOh3J/3x6MjRW5L+dkRu0B9rT+yi1twqY9Hp79XrDR1Ec3NNz+balQtHlppHF+dfPn/1lgfPfO6hzX7KXlGMkcpd0xfvffAtDz38lJ8cnjq28vRnH6/WqRhmJo49hlFAcTO63N0xJpwMB4e9vbXT84kU1dgkyfigszs3f7IdR/3kMKya+dXGlQubi/XFl7deGvtIiUsmgze8+c7PPfrCYGdw0/L83uWdoptzo3b07DEW0nNzUk96nWEvOzh39ub+cHjQK4JKJQhRqXx+vrnbGSN4P86DQOJ2gw6S8XCkjG7W61cuXT2zvmCUKIHQkBKEQg+zrDUfhE0ag7qyO/C5+IJFk8294uDxK52zi+sLunf1wnmTzR2v895OzuXHSgRtCtowwW/9xRN3mmLOBKfi3h0nIYpJ9YeVBoxzMFG1GOQAnEwyxeJyfWWDr1zZz/JREPUC7kVaURBnRcZcVCumZmCcJ/ceO329Z310/Jnr6U/8QXcw2ZxrZ289M3f/LdWz51ZPrna+qjqsUdqoDG+5+8zhAD72JfunH38osPecPNZShFk+HmXZeMJR1IiiShBHKqQiR3HiLAeIJgqzbAjibTYRdEEgYewO9ztZ6vrjPASHYJdWFvKkkCIJq2h09fDiXhSEkYZWsxE2eaD71Ng7suR2djJEs7R0hDm7en0jKwrnfKWiiTQzpm4Sa8nzTAHXVRABD2zh3STLu3NzlXQ8zsZjMUWRHjYbte6go4IwMzpoNpCj0USKMRoT9SfJJGcVtyr1Zn8wHCe9qIBGuz77mAl7DwJalXv5AsyhVqTQmMBZn2SF1soDZkVhCyfMhGCiyLM3gbHAYn3hWcSHQUhEhgIA0YoUkFYBAGRZzl4AQJEyJjQaQUTYgYCwA9CFzUmT94V17AoL4nUQak3CDJ6QyipqICKiaVzOe2Yv1vpyXiahUSAKWRisLfLCAQAKkiYAss4TakWmLJZmD4BaQECJQYVILGiLcvoYGxUpZYQF0RfiGST3FrwwA3tmZqWIFAmI5xJUgVJknUMQJcTsFQIJsUhhvbWOQZxzwE7raQcWO1fONfMMGvQMuKNWSpEKQ1LGQPmcRYRZK2XZk2jHrBCV1t7mnoFAFdaysAh48uKdZw8gCstsczkllBBIoQIix4XzDFhOOih3KAiRjEZmcGJL35hBxHsEUgpK40lRSIQoJMJAqJURYes8lRs5wuV00SnpF4+IAuw8A6F3XHYAIoDSKABl0TjRDXcZEFErbb0IitLKKK2UApDcOhFk8SIg4IkAUZCwxG3lHYFCDKNAo4l1WKIpjZSLy51F5iiIAoWKiMUDAgs771k4y62wL88CABxO9h5/5pFGffF1x84dXnuWcNKoN7UOJ+PuTS2do6s09ZWtLmguvOnuXbswSPIx5FklHdpqFHWv5bpi0gbqENdWG2//mlv/7v987CvufsBdR7uTnjxz4uH/+cGL7ezOZnTXnbe0j2K4vL97bTvp+uN6dNPrq9/8Cz/UOLXSgacRMoFxDNBcnTN6mKSQZlKLK0i631f/8T8++tGPfP7nf/TOGo4Wzx6Jmgu2Cx/6o2c+9r/ynd7q59/ziYW1+Z/8p7/WWjn51e9rPPXpz/Yn4539yfmrF973D7/jzpsOwhZVK/Ha+trFjZ2/fvhz2eEQ+3l16H7jx35mpT5/y/HTeJrf9zXfcOXpV9Kid/ucuvMkfDHwL1zGvC1WCSkxoXcTaVfjdH/3l//lP51br2eJLfLCCkJmUUEcB3GjQsR2klnH2ihFmGQWC64GEWiYa1XFEfhhbPa/89vnT6z7r/yuBTCvJNm2ggq4WKyWFEIK2E6CoG5N5G3fG2DSX/PN93z4g1uDzuFLX9wYH/aiVvsjH/7Ug+9803f9o2/724/+6faLhy9c3X5l8/C7/9mPnzvdSuj0zUvHvuE9r/+2B79z9+DlYDXuHKZ/+zePV/GlH2mMbjr5uv7lzZ/84a//41//vZfOP/Qff/Nff/zzf//i+Vf/8Pf+9Nvf+5aF1eWPfvxvXv/6s3feOvf0Iw8XpDc63RNnV81c9dIrl0iFK6uLzLy1te1ttji/uLJ4/JWNC0IAXqM0c6nsJLQ/SZ/8qKBW9Ur8te+p/vTPvxkyQEEfxU7XDCiB8z4fa6jnNmQAE2lbDLNCgqUjnurMORIhOQbPUOX6SlgHGQ8NZWD3WktJ62gY1/jcnXcixI9//OC//9HGteuu16sCSlCPL+25P//oE9//XefuvWnx0t7gsJc2GhxH8dj6sYvcMB93vMY0S4aTwjdaU6uIyCe97q1Hm2lRnN/vU2DE8nxNdfsHJ1aOtOIAuIh0NdDtnavjMXWSlKury+315dvuOlpvN7e3N7/40nNz63F1fnl+/URqjD69Ph/PQws2Pzdp9ZI4rl642iGyr3vzre1KuHlxc7jfH433NUJQ3ro7JzlevQovnZf1FWnVXWAQ0YNAALk4KBiiGCRJXQamFl8dxP/iDy7WGwtF0hPxQigihihJvQhf606+5yubFzY/HcJxqZ8O5XxA4/3Lj996kzm2NP/Lv/7564P5u++9I7R53Kyu3nVT8/Z7Vtbnu5uDuMiUlyzJnEhzsY1Bba+7vZuMc6GC/LjTjTyLtcQuMjrQumB90M96vUHUaudQbJzfUK543b2nPfvL10aY6GQ4Jh3Um7XFMyc2r+w0CIeHh9W5Wrtdw9TNL4Wfu9aTnPOeG5hxFOtqq1Vbbl+ZJNe2+rUwvvDFS6vrc89c2fk/f/WRr37LLXY8WZhvz821akdP//6ffvRNd94By9X/9KEP/cD7vqEOE/Z6eX5l0u0fXTxu0Fkrlaqx41FzqYYYdHc7BadB3WCM8621yShzOYNYcen8anTT3OmHPv6RI+snlxeXavX5hbWVZ//iSWd9Z2/73MnTF158/tZbbhX2k8k4TexhZ+/4beu93nA8SUHSsOIFsBg54SgH0EEIRRFGFRP73d3dM+ceWGwdq+V9VFnS6SWD8XB3FMZNRgw5lWR87dImeR+q3Dh7cPVanhX1MNi68OSku604ef09N2++eH60dz2UYrVR+/offPeF514ajQ+6B73jN51qLC89//Szy6fWJp1rPsu2N7fcenzq9uM2wYMnnm1RPWjU2UE6Zg0uHQya7eW9zqFCbTlfXmz3+9YLx+F0lbzV2TuysHhz295zV9XozHNLOMaI2WAQcTwSywnoECwVzltrlVaIirFMYgugAgT2rEpBQUQBWPZEqlyHyHSVXX5tC5fb2EiFdUopjeycoGCglQdhL4TknAdmVIpFcFrDQd6zABACorip9DwVEBwLEuWFQwRmYeAkzVDgemeEdFUpZZSuV0IN0ojilXYjjuJG3Oh3O4Nk4NitzFmjJpnd608SLtJqNa5VqEgmhUePaCI7yQsSnguiWkxhGPalEAhSx9Zxo2rqyho/qVVq1QgDozygy9MojButJgAVDNNdGmuJkDw3a1UTBM4TgCjQQRzpWsV5xkpSHB4Ydjk7CpQKA0zYFwVSAR44SzURxuKV8c5nkgt7AHYeEJWIZiYiU9hMM7H31qEm9J6isEqour1hoHUcYhCEnFsU0Cb0So+zTJSyWS/SZAuLzsaKdESIKivcwLooDEmYCu+c84Ch8Zg6E4BhHwamGVfYFJGDaoSCwM6GWRorlRW2ooNaFHWHiQKvmRvN6qhIw0AvNWqZcMJuYikZB1miNJokd0icePHDMbkgJFttmPml6hCsiuNICWpnkZkzXeRRtREsLI12912eBtpVTLHQpow9FBKHEEbxpHCuQO10njrxijmtV9tUYL87QVYCqkyDRSYknG6ekVIASvx0SCyWE8bKZb0wkBAKKsRp6KqkHwQyjTtNzR8pB6eVfwBh1kE5Iws4FYFKTlJuPiKUQvoUisiUTpVOf2nBlwQHZiklEZ467HIDEExteQBU00TU1Em5YZ7MkADibIJMWYqEVCanyt1oAaLZr09jUCU6mUKIUtURBFFfVqZU/ghlhh5kikKQaHbjPQ1ElYNrpkrMjX+JlMXPJbUikSkTAi4NKXmNLt2QcARujEabSVXlP2VgnB0piJqBsPJhBJDIT4uBZOpJ4Wt5wdK/Kn8Vp9qRINJUz8Ibk97KTeEZdfuyMNksMzbtfJppQ6V9NKsUKh+cBcrR1CXaueEi4YynCZQNR9MDLc/XjdeNQejLKqlLLCY3yDpP05FYokVUN85jGbubkky4kS18DRXVaxVArkaVg964FjTSRIpxr6KwHnB9obV5aXcy6Hs2+Sghz/VINWv1ceeSKoreqJdDILo67kHcXNCQaM3Xu351fSkV8VIsrdZ2rl4IIu1N81OPPK2CasW47qA73Oqsz9XeeP+Rxx/+bD6kg92DZL+XpI6R96/35ldOnzp9y97FV4aDiUHD7L22reV2WK0eqy/5/hCUO3H81rnqmeF4MtrdszS8urm3sNhAn9dCWT6z0Nvan+jw2Vc3O/tjGPssHd55/4mr53e3D3fXz92cpJkt7FwtcGTazcW94XUycQJJvdJoLDfSTpLnk0rErbi6v78nSnPMQSVaXV5AsON0srZ6VIVO2CrDw7QvzEUhaeJVRUmBywv19eXmpat9QUAFiJQ5fu7y4JWdp5tzrbbKhntpJzsESFArAIUC3nMQiwANJ0Qr6uve1G7X9k/d35pMWPl4+XTdpuu/+ZkXbOEVW2BQWpvSL+SiUQfgkeOUwgCpYMgrdaOUpIP82LH5I2eWj2H1hQsH+xOv5o67YLjL/KFX8H8/MoY4X23Zm07VvvMNtUYzSfJ+HGRf9xXHq7L26POvVMLb1xaqeTpKE3Y5t+crTmFtLjzc2lUs1XaU5nlmbUvX8zyz+URrBJTBcGwy3euNbarABPu7B+tHlmxunGcQ6nb6Qc1rI0Gkdg6ura6Z4ah/fY/vPFO78NSn5pdWTpyoDDv9IMAgUkXB1blQiDr9caipEups1LdFgYFKsiwXSwTzS8ujSRxXF1nsYe9gqd2ITBVrdP3K1kJ73Vo8sto2Okq715FcLYovXduZWzrrld7u9AGp3qgPe2PA2fYBIHsGASQyRAKS57kQaa01adSktXXOWWdJkQCbQGtSpFQQx0QIwmlReO8DPd01M8oIMAESEpcjxJC1wYAirQzMGt0cO0JSWk2/G4BACL0jEaXQaFAKSGkMlACiQgUC7EuDRgAIyYtjFMceRAqbKQ2oFE/zYQiADAyoHAB6McTTTYmytch5UEiOyGhnCxbWRguImrKV6bWCCLM8LzcYQh2CB0QVKKMDw94X3hqtC1eI96UwygxKGUD05cw478u0s1aKERBQESEJGioQGUARaq0RhLQiYzQqdt5oDeU/BWLvfSmAIrIIIDnvBZzz3jk//eIUBiTvnPcOiYw2CGKQEMhPL7RgvWdh77zzTAoVEQAXPvciRoeKlACr6Xc2MRAzM7KwZ2ZSptyrIEWGAsteRDyXl2ApaYtB5bwjUuWwPAFmZsQyuCdeWBERIBJqo0lAhHPrgEUrQiLvvGPvha33pHSkFQEoIuchzawXRkSjA/bOsSMRo8kWThBBmBQREQAlhTWBQUIH4pxXSKRQAYCw9blzhdLIAIV1wiLCpDBQ09Gwh+x3OoNTR+59/TuOnuo+dv7i5eevdPb24I133rVycuW5jac3DkZbmcPISZo15uaOnDqSX8eTJ1d+/z//yX1vXNahAoCrV3qnF6t3zC+t7OM/efCbji2v04n64PK1g82Xb65ljXB87NRiY7mQKDXNtF4bvOHBha/9pXenwB52BnDewECBMPQqEP/1bz0fwhJAUq0EhXVBSFGjgt6+/HTy337P3nbulmf+7fPd4VNDCnqHsZook42XF1pveuCW+77qKwCqp89U3vi2k1s7w839rcwXVy+9fNPZc3fffaY/6itoPfy3/2vjwnNLP/Ij0Sj+B9/2f//2b/3r7/3x7/+1X/ntjqX/+ccfDqgFxbXO+OCf/D8//T0/9QvLr3tzNF/91z/5PabmFHM+dKql2otzsCJesQx8lkuzVYmawSRJQEtubaUS5lleb1aEnUJQSqEoUpwmuR10K3b4jV+l73qz/4rvaKQjLkbXLR6G7aMjiKImJ5t73O3Vz8WeDiRN2BxFXQEAC9hcb6+0rvcG40kyzDC5/4E7etvykY9++OELl7Sq1+q1H/yNH/qff/N3v/JLP/PpD/1VvsdeBr/0Lz5w6eDpCuGRxupCnK2fPvkf/+gvf6zZuPj83s/+83/7lQ/efvbcnb/4T/7Nxz/zN2fffPI933E0M/ncieazn3nis3/y51TljZe9NkXhRo1aJRvnuj3/xq/5gde//is//zf/Zf/wytJSrdeVSZJtS88ryiaZViaMdJY6ZmCMvVbNaiuZ5B/9hH35pQ//q5/9hZXTbWO7Al2Ra7bo6AgdmbC1wrwzGfZNq+XhTAHzDIwwBrAMnsi4Ig7QJ6MD6WUh6nQwri1WZULkkOopqOTt3zG3tGr/069tXriGRCGAKI9728P/8oHfqxqaExCjIWcfYGb97uGkHavRxaTTzydGx3Esalpl6lA1G612pMgNF9tRZEARtSA8sXpOCEysgePMR6Nu9pb73z7oj9723q8bWb64eXDLvUfDuh8U7trFjz/whmPHbz61cblz9PRSP1/+xCc+eurMwlq1sj9OO4fd9eWaNnpnlC6fOHvHXINHB8dWW6de2f7dj7wQGmMUthaiOIieO5R94JVBWq9Kux46mwcUKQEvareTh9Xm7kA/8tnxk1uppRZAZkL0TN6j817EB0aRCa1jgcDyMMSN9opCHFAygtyOhi7PBvfe07r+UP7CU09+7TveSHF12MvDuEK5VKrRwsLcvk+vP3c5z/WxE2cqlWU3ySaFhiQ7v3WtYov7T60XosbJJVSwMlf11raqNWo0Ka4dXWxvvHph17mXwC8fPTm/tKrzzBZsKnowLrr9Tj6WxObNWmvhyFx32Nvb2e8kw9G40FRtNxeUCiaTVNd8r3cwHEy29g7uPLFy+wOnru7sHHqbdCfviptzNQ8+3bu6kQSjnUvXG2+6w6zddOyO28/de+/w8uMi9JFPffgHf+h7r77aJQlsPtjdHFHhg5X63l6hxzaIPYCqVsLUTyg260dOHmxvKFSjbhZEtbe/47uGve1L51+6svOx07eeW11bPXf23Mc3tieZXl29adTjfJJv7Vw7cXwuCE2RFHEY1hv1q5vXjq0eNbqFPAgD8LYYDcZJtz847M4fad3z5ls7W91nnn5VAj0aL6a5VcqwsA4jEwZJr7v56sbzz75w8903LRxZvv7SVq/XbS+vZD1HaLY3O/NrbcbYOR5m+/e8+a5Jf3T54jYDhbFbaTb6hzu9A4Cwcv7yfii+uz/pknSza6vH15sUHlupb+0c1OfmxFRfur6/3o4aAV25dsViEodR6uTytgdG79LROC8/BVU3PD6nX/cGnD9eiDFpobRopZVzeZHBZAQUNhw6bcAWTEoLkGcRL8BsSAsqAFFG06wb1zpPpJins5UU0nT0z3RdC8yMCkJNioSRnPXlghxZkBkAFAAQiUwjP9NRSkRAgILeOZiuUwUEvWeajb0WECRwjqdLKhIS9oWzqkjziXcSGHVxa7sax85JnltllGdQ1w61Qm+5vCMMwpCEEZiU9gBxhJGBowuNdiMCYmNkZbHGzo0nmXdcqQD6tKJiDTlYB6gDHSgFIXrgJIprNinYeq2o2mwUuUUAUqSVVorEQbUxF9dqhSaDFNfaiUtzBAusazUnVKiRHWbeee8FyInHYpJbJEFwbmxUpLX23mV5wYjsHUJCYL1NwyBgNLn3hNozgfg4wNxSoMCyRHEEDKRDQF0PKpbZGWT0jBSBDcAT2tAEESsvgKTYuzzzFAReXCronBjGOKhkBR4MUxEAoyOlE+uU1ta5UTISkFGSukKccGYdEnKRKxEA6OfWMhdIFEaTcTYqfKS4YLY+U9qEpLVSVYWBd+l4rKuB5SJADkNIkywfT/pFv9KIw1oINLGSmGocxdCswmIc5mM/GRc7hyMygRdhB9WKyp01GpQ48ROwgSjVajWW5uaGvQFnBeB084xQAUhZMDPlGGWyi6b7hbME2Yw83BgURjhdoQvMVvI4oxpTiWPqG5WLf5zOdH9NRppWE4EwYNkhAFhWGpchIkQQYQI1E2RopraUDtF0ONt07AnOfg2Rp3CkPCYqSYFMbRycmSpTu2ha1cNQbk7PTCGaOkzT7pspmpEbT2z2b4By8hi+FoSDcm0ChDCdwlZGwwClBMcIVFqHpSFTvqyOEYimBz3ViwTLKotZyQ8Q4pRyTMHalGGVjw8CyFCKNYAowFw+KCLwDJ3h9PJSwreZPSVMZd80vFYBPjWBbug4U10KpjJRWab9mmAEJSic8qTy2cnsf8LUAQMQpGnaDKcmW3mFhFJGYpn5VF/mCuHMXpq+yaYwbnoOhMu0pAjLdIAflyzwtfeaFwFEUagE1A3aOHPIpqjIOpt0O+yj1uIaWAkwrtZj5fZ39jYkWFlaPTZuLmZj2+ulcazRp4H2uWzVWzXShfFqYX6+czi5srGxtNI2lbnl5UZggslgf/9wS1k/3p3E9bXls/MVkd4guenWxbpZHHYOd9Kxzajfc5bqtaVjYX44xGy705ugq2eTQXez3grTTE4ePfORT3/oyMlVjKtRsHTlYreKcVRtRyQbVy/VF5YDXcvHedrp1nXVY54n2bi4ViTpJDP7xejs688Nrhw6l7CbjJKRUrh/jeutVubGpPK8Pwi081l29ORqU1H/cDtRwzgIkn5fh1Vs+rAaIepKGMFkpJT31hajSYpF9dhiUDVa6aw3sp4BpBYbB5h7WwV+0+3H9w+Hk4kwo0IgZu+9TfNxZzjOJpyL0uzJuSzTOhAPIAUX4r1F4rGvJkyLrdqYKgeFu3557F8unnr62tU90WCRPZISQlu4cvSduNR7B8TjwoorQCTPIclZ6/DaYTZ4YksBWpZBkTUqVA1UiEGgogRtgZCOsy8+O9rdGPybn7h3PaaGGXAw+ep3N3UU/tkXNpcXF24+Fgvr+flWrUqjzO1cuR4HSkWMmGlTjYKKDut5RoV1GbtA6U53Uqs1LTZrR9YuXDnoWK/hxNrcajEYFOIxbCpyQWTWVo/VF48dOb6YTTpbGztpwfHR5f7BUDXD4XCY51aLN4DD3qS90F5ZrGbjcSNuTiaToQeLepzZ3f7g5PqKL0y10tzdvx7WiqWFSiUIxkVeiNx8893Doe0eDg/2dlsBthca1UqiGJZbS1Dg0I9OHVm/tnERPAwHSVydoiLvvQ4MM4iTEvOGJijHF3jnrXeAwuxZWJMxxkRhBAImCBjRO8+eUZQSH2gdBbGIF0EWQkJS2tpCAIwJRDyRUkoJe5DSLRUiEhQiEiiHhRECBMYoQ6TIeQk0kVZlFZpzXiMiKRQhAUZgEVIKvGcvnrxzzjtXXrVJlVBCIZHzjoBKmCIgzjokxSIEunBMyIiiiARBxCvSirTzDhnKkukiL6znKDAeLRKEYaiIGJhIFEvZB0RKeZ5epxQq572IzQvvRQCFiAJSzqMgA/vS6AkUkjbCPjBaKUPKoCYCogAVofeSW2dBnPeAII61Cpid0YoFsyKfRu/AB8popWHaR0eklFFGxCGCZy8syhgWsd5656cqvAMPXim0znkW8GgRtFJG6WmMDhGQShNIkSnroAHAyxQrembH4p0HYe+diFdEPG3/AxYuL/XO56AIAANtCEkTESqlEJi9h1AbkZLfMaAQCJKSaQ0dG62td7ktypCxIoWAVJaUE4pAGGtrrffWWXGKTQkqPYhlFiFDhWODICieLQMKcJK6onAgYIwCBBZxdrqHdvz08qXdzae/+NfXLo7e9Y6j18dbvQOv6nDq3jsr83e6E+de2Xhib/fSsTOV/s7+eCTPX37lrfd81bvf8M33PfD23/zNXxgVSRyqxXZDEv/eB96f7D9dm+jP//EngxG3FOSjK1/z3W9snWinysYnw73u4TDI4+NHdRQlcMGDAMQKUIMV6DEUBxeCF1+ISYNHmaTWOwRyNhm36vHJsyceferw45/ZObYSWB+pRpgND9Zr/v/59fe96Xu+5vDlxx//4I8899i2t3Ptu+773JN797/t/sXFxsc/9YVPfm7xoNsLKUNWZ+ejt91+7Ku/8etX6s1GBd75E9++UTm6+rXvb2jZFw4UvvGBd//F7/9V3tn9jV/8v6Ngfr9/EDSgcBYAWsuxCcPDw6EJAECyrIhMgECjSRZFkQnROY/sowA1WFukzmNkKLUeiNvzoSqK5QW47e7wgbdXxjsXVWXBj02RUhAo0ha5Z/p+82WO2iCR03qgmjWEpgct0KDKwvJS7YntLeszdMFnP//8u95x+yc//NBO9sTSWvP5g+EP/9JbuPiQ2ul+19u+7dxN5/70Lx4ec/QNX/+O+dVzK+2l//k/fudKN/nW7/6B2+590yc/+udv+4FvSjrj+77ljceH7rA4f+T1d63edMtf/9of/9bP/lpFyZlzdzaOzb/87JfCeIJ5jg61D0+cfOO3fv+PX7vwyiAddjvDeruZFBQqSbsdFq7UKlrrIi/COI6icP+gHwJm/ZErOLfwXDd9/7f90FJE3/DO29799e9vzLWqtWVcUirWCFZhDpU6QIukooox6Bi4CW7XuA2TpEnHKUQdqswVQb0FVGFOuKhg85glxTy0kJx7c+sf/0TwgQ9c2rjuLNSROVBmlPsz682TJ5e2dnrHbzt+KQvvvPWOVx/9zOHObpUUiy4Ed7tZbqdh5PHEgoHh/iAAaFWCdJxUqtHKXJ296jPdcfeDjebxYVapt8Oztxz3lNtK/fzTTzYWaoO0/8iHPnRt83JDyej64DA/f+XJl92VqxjG4eHovne+DY+O//4zD492O+BstV7dPX/tr1/dfdMbbikcX989AInnGmaYuiSRMFAJ0wubcmGbTs21g4DmIi+q6pzoOM7G+WRi0kI6A7s/Umx1qH2RZCxeaSoHnZhAsWCW2cRnH394/BXvW9O9jeEXt4tO2Otk49SFCwsWeGcft68mMuk50aeOr1dr8WinL728EphknHR29tl56xWmdvdgw4TxXEMfX1cfebTTGdvFvT47tjq0HroZeWaMXVCJc8Y8yWpRxYTVJ565Mr+b1Kq15Vp1+cRqUqTgTYCKldeGatWWEtjtHFgg3ZxvH1HNIG7XA6X06VPHL2xs1esNnIyaubu6cfjc5Q3v8yL0t915WtykO85tUTz/4vU33nfsx77/u7c3v5R0bbF58OjHP3eirQ+7k9Xa2ace3fVF1rm+u3J0aX5u8dKr1z734gt//+kXf/F7vjtEGXVTQ4F1A+f8Vbbsdc0EWZJl6TCp2LjWvuetX7++d+cLTz+2eiS6en3/3D33LC8vQljdePHC2trctc1nydc7u73F+bsnaTES0qqZjYrcJEJWUCkEgOTorcuPPvzi0pGWyXudK9e39vff/e3fev7ljcbSuvI23x9n4/7GCxfT4XDY2fPWvvzqpStXtxZMUKvNLa4dH+/u5Wl64vS6d0UtUEsL1VZVNyMKII/iRkG1g+1uVKv2uyOljPI8F1QuPb/74Fu+5q/+7pM26B92+6bwDzxwZmnlzPaBi9tLalJgKM565VR9ab3RMJev75owtnk26vfDYHpH9I57187eHCyeshKkIk2FMaoaeya0Ary9Pbi8kRqpITkh8prZgSHx7JU2BMjCDAyoEcV7RgStlfeMikRQmPlGtweVKZNyeTMdCSTAoKaBCiSZzZQqby8Uvra2kmmggxkJGIkFvEyX8F6YGVhAE2E5cELAew8CznulkLlcNGEBEoQ6IynQQYiOnRfRhA5IFBMwIhWuKLf52XtCGgvJhHd6eyh7yByZIFBkNFSrUbMWAWZnT6ysLi2awIi3JtDsvBdUCo0W5HHFYAGOUBMV2nj2CIzpOFUoUVzRbNG6ALQvPQQyIhAbrU2gFCZ5kGUmsyTirZ94YLZWWJOqEJCVzBMhALJVJOhSWwwLnwdakCNEZYIYTI0pMNWFSl0GfRe4lLRD7+yEXZEikZDRIDqKUragTagib63WEofkRQl7AbZ5bkg0hgA6MGHfJWnB3cz5wittCDnPMq88KGUIc8tsxRjKQGXeK2IfEAsbRQQqtz73hYrMyPksd8gcKxXqsMiZvSXRHj2IhEElQLCTPFYyGfcwosSmiQhqsdaN+6mfsHhrqmGuR4W3FhyQVVUw2sYBpnnOOThLLjEKtfPpiItWrSlxBb1o5nzcC8hV5qvOzlCR0iyeUAA0yLTRBksPRpRMx9HjDMKgCFLpowjyLHtEOI1cycxZASgnXqHgjfe20DQFVJrwgDLrVYapdSMAUsroMM2FTUEK8zTH9FoKCWdz66ddmdOYHEwBq5SxL+Eb0g9hWVREM2HoBq2aztvCWVxqauaUH94ymFYemswgSilUTZN4BNPe5LIqSAAYqDSnbiAdmUKQ12AVzH4KUupe5U56iZBmRwKzDtByag3gtD2o5CY47ceeHupsYFyZp5gZOmWOiwgAhIVu2Egyxc44VZUQSJAREHmWfuDXwl80BUglFoNSmJxWBU153yyZxrNzV/5/eXbGVYmUpg1B0z8iN3Jns2ekkKavLkv5bijfZXij1Gk6PW3WI3VDWCoBmYAweJmCKZoaRdPQo2dBmMYBhcpg2pehoqhSQcfAFnmSpIUYWw0Kb4q5xfkwXHQYqUAo6FVrab0e9A96W3vnPUx0eMR6FA+D/gEzz9Vjtm6QpJzni40mT6ztjhvzLbNICyfrb737tkf/4rH5aqWaJdc2+s2aufnsOR1GuZ6kwOfOnv3kB5/aP+jrcHB2/WjLNBtBXBQTyEfXN15Zb62dXTsh7cXx4XC+Oh8D1peCNOusn207xnOV9YPD0X4HgHhtaXnzwlU7HKtYbe0MEouh+FpVbr91td4Obr/31Pgg2+n3jp45qjOc7F9XRKiClbWjnf44GQ6jWhxEFTAQNhoIutMbxZVqnhWHnX6e5uh8sxWtVOLrW4PzW91qg9pG+dyKwlZrwWZp4jKluVqDW1u1nc7q331+AyhUmhhZa62MS9JBENe9Im3iYpIiIjvrrTdKvEtZgFE9ve2HD3XXVyT52DbkOpBwkBZX992EQdCSnoJQUCjCRFwuHb0gMhujM+tLmzWoVkJTsSmToYggXG4ZhKQ3yceZZwCw1ZCqlWhnd7zdj/7BLz3273/2q95zdxPlelYklVD2dtPnLm596aUIhO8+Ozk37yo1ddt9tz7z7MuHh6PuMJ1fPX15Z2iCBls/6g9ZY7Nidjv+pQvntdLiL/ksF4X201fFJQSemXSoUDiqaZQrYWiW51tnTh6fO3LHHffcRfMoQWF9xyFHMTXrTZfZIXB/b7i6WosaujfuhFVxh8NsXEGhxaVF02hxHHtrPapKvWXHw2Fnkluv8olNYZTySquqfIHWRJXocL/barckQM6Kqq5mrijED0c2x+DI2vIUFbFHUQKotLLWlbVfJbR1zjH7IDACEpAiQBCviDTqSMcefVFe0NAIUGB0YDR78V6EZ5U4ImUISkAJs6DDMhBFSpEpP85EEYhlcaiUxtK4RhSJTKCICNEDePYg4IW98wCotEaAMIy8MIpYKLRS5ZVHa8XTUm0FpbZDmgU8y3QwJyGR0oYK8QrJlVlklsI6HSggdN4SEily4gHK2jvFgs5DoBQhluwZUIk4BjQ6LDceCu9CDMoOuJLaI6JSyrMHBK3IsYvCaOp9ingBY0xgQqPD6YQEhPLazyhIaJ0rk1fTK5ewtZl3zrMDEEIFJZtCBSAIZfk1Ou+AnfW2rAP0hQUk5y0IKiLxvjy7flr05nP2gEwYWSm7Nnm2OaO0MoJivSu/SLyzFth5R0oJgmfnbOFLmChslEYURZq5VDlRKw0AiMqAQgCDyguLJ2axzgMCkfLOK4VESsqYHoAXHmc5SQ6z+Qm6tJPYlRIskS7veoWZmfPCoiJnmIBEvCajjfEg7AtmdChMkGSZeJ+muTBrrdgREiqlb3yfaKPufO89f/Ynn24erzzbPdxJrQttX4cffuyx7/+/3nH48ufqRe+OtZaKCzUf7Oxcv/PcyQubn4WgWGwcWzhSaWdUpNm4n0NE/+H3fnOeOc7cKhbHFd19x4noyGnB6+O+Nc1gcOl88+yR8NSc1WMA8JAoKAAsg1bgUhg0YPX3fuXZPJ8PaqkT1ISNVjQaphQE48Jd2NwmVlELRzYxXvHO6NsfbP70H/3YpOhMiseqyxfuqVwL9y9/8gn38F+8MICVrStXoCgWlxd0NDjerG7tHi6trG8e7l09HL/5dW/e29sDpf/odz/M48nRszcFC3WXuisvv3p4OT55bnlv+9rg2t5+9gqgqjQCZCPOW0GXe9FKyFprVYDAkmUFgPT6/ThSIB6FA1VI7gMAIB3pKDQ0meSQEDifB9FTTwzXT1aPvWHFV1fjapRe79k0ry7UbcdJoknC4Xa2dAdkFlAbgYYF5cFPrj7f2dtcW250x2qSZdzrja88881fc8sXLiRPvnDp7NrS7/7Lf1qXpNKunDtxJPeT4697/Vd9xw/9wDd87U/+2Jnx/mZvczhnzt1WX/npb/nO6Jb1H/rhf7xeW+Z88vm/+OBP/uA3f/4Lz//MN3z7+9/zlh/+qe/9b3/w8V/55f/wyotb3/Gnn7j73sCJuv3usz/43d+48Vz+qT/49U9+5qP3veH0/q4CwTgM89x67+cWGy734rhwjCImDGq1CJmd9zrSrJC5RhLuS/67H3v+dz7ynLX5g1+FH/jVb8G4C7DjcZtxQGAID52fMKxCcFZjrvIXxzu2VsNixKoFlSNHcr9qgoYtXgYIGRt5n+KoyvbAGnXuqxa+s3PwX/9zf3sYxHG1EkXeue5gfHW/c/bE2kotftNXvu3c2ulf+csPpZ1hc6VSjbHwOBpz19ryU9CMK+35pc39V40JPPu4VeVM3Ngvn7zlthP3zLePIgXh/HzcNs9euNZu+ZWFyd61zXBt7snP/93uxQvJoHe43WlVw3GEzcXa9t71hfXl17/1nlE+7O1uv+Vt77j79fYN77zjiY/+beegt9W3r17dslpVx6J5csfaXLWudWzS3sQrKtD0+5N0gr0BjyNUgWbB0fY4jGicuO4wbbfqxjoGTjOvEb0JrfXsvAmDorBKK6VY6eCZnvutD8JqHA73uddxFzaLnuXWku1298Y9Yay8tN37J//uj08dW/3mt71xvmBlXGttqXX0SFypL60vjjcmJgyy3S3nfM3g5UvXO4OcnXSGbnlx/vLWxYW5anu5miUMtehwknlb7O3tG63Pnji+3q6fPnly4+KVly5f6OT9Zr1dN+3tvWvNegDOZyKt+fCmM+0Tc61XDg5PHZlba83BuBCUTqfHWTa/trK0OP/gg+/8rd/585WbVuum0miZRZKt5y7PtcK4WTt384nD3sazTx6evWn12Prct7/vXaeOHensXudocOuJpU4/MUGtubC2f3W/0oOrVy8OJ+PO9d7WYXL2nvnuYLfIEvYSavL5pF47bZOu1j6MdZqlzrOtYqVZf8Pbv5KRbZZbknQ8dsPxeDRYmp87cfxEf9itNuq1xbpMrHVuqbKYjsfsi8Kxs4XNs3yS7F33H/37xxU3DzcPc+sRzUOf/HwjrKUTVpPUugKWwsH+Fc/QXms89fxLLbPQ7Y3iZs0FAWVDlw0WFuc7gw764PCw8Ma4kB9+5GmdiSyuahWeOX2LTTtF0ms0KxiY9ZNnj7eOvXTx5VMn10Y+9bnqTUYvvXxJA544e8s46fpxhmG90qxUq9Wd8ahSb509ea6zv1OtxdVatH1tt/wUnD5VO3pWgZBkhpQOCUSjzcasbZbZ7qEnbKMoD5kXIdIMXokAUxQZJWC9VYEREvCOA/bWsjCBB1A8W1Z5Py1zxdeqRcB7z1iaEAzTjl/QGksxWwQUlEKBlKkbBESFaELvfZLlSIoAtEIuR8uIKIWEJMCklCsclhMqyo1zYUUIgMJiSypBJOzFl4tGEWYCUIiKyHmPAlQGNtjnqUVSVsB5IFADx+ytNgSDgqAfa7y412tXN4NA1yq63apFRoWa241KrWp8kQhzvV4J4wgoSPOCIBIVCqAHck6lw5FRYYg0HPWyItcg1WokYn06VlpBkceVitaQTBKB3LsCmb1NXeGtFKiUOFCiAw2p69s8t95aXwQBht6Z0Gj0SqBSX8hNKM7U7NDlDpQnj85Tjp4BvYB4r4QCAGQfGR3NteJWpRqhsB4Ox+PRkLPECyc2CU3MIkRxFGtSOJEkFyfOk8IwiBKRKIzDWIm1RZEbDKwWrZRCLookc04DiGdSkiQFEmphBPbsAX1FG/FemJ3zBeO+9QrBaKoOaX6xllg/sZADJkkuyCZGZ31kUJHJPfkgmHBfEQWaK80AE+87Ex0YwxCyVhBOirxai5FBvLd5kaGLqSICeZrzdO+sXEcTeF9uQQL7qQ5TWj03cmDTBoEpvyyxBZU2UTll7EZKCqfv2hlhgelSf9brM5WMyqlmMygzTY3JVL8pj2wa2MIv4wEw9U1EZpU1s2BXGUSaaiYgX3YM0wxcSU9mI7ZQEGakYwqNmHnmucgM835Zoc0U0vCXly6DILOUDe5lCA9ei1shAwJKCXOnCsw0ogWzcWblkyeRKfaSadE0lo07Mq2JApmSn/JMKJiZUSXAmp0eYAEWgVlGbcaqb6CvGczjsu2aSlELgVg8CCGWXUMlb5kJYThjea89ORThaRzuRnNR+Ujl2ePZgb0WKZueeyzFsRtHX1ZIEcwKnMqkI8n09E+tNQSS6XktL5koPJ1XhwJYvpnYIxAKMnNZ1D3znACEiRAZpgVaMwFsVsE+Q0UeqLG42Nvv8eE1kmJ95ebrF65EDdRYN8I2U0bV2ivLnZ3L6XAPPNxyy/0HA+sGXZBsbCd1orlG/bA/MVqPJxPMxiKFTdKVpZXUpbfffeTapSsHX5qs86SXHYSm3fU6lPqdd97/Nx/7q0pcOz0Xf+lj/7tIx6Ni9ODb7tx8brfVMvlwsN/vrp9aYxVKtGITydzY5dmJ5eO7+9cH/b5TLmq1u9f785VKoCPHvL3bSwsJY1pcbkuoqFnr7gxeffxLJ04s9wdqfwCNZmhCffG5p95452q7ZShHwHr/oFNr1pSuKJsHUVyt14oiLTwQ4eL8MluXuHHiMgSMw2CUuNzmYRwd9FyRu/ocjkapEAAcIkKWFePOMKhGAv72k/XxYP7xl/uFJTQmS23VsVEqGQyiOCyKHCUV7wFRm5KuYqiNFRzb7Pjp+q0L/dfdunh0sfLEk5e38/pjz7nPvZoFVpVDoZwtgkCzMAikmSNNGrQXIdSmEozHKVjLpOq1SljRSvj67mF76Ujohj4fMfiwVplrNff3Dw+Hqa5GzjlxzR//hc/+9Nef+akfvy/b2VprecV7y3Ot/e5olPjufv/zk0m9Vs3+bGN/6G0GAtrB8+WbR4EBiDxAu3Uyc5tFwZoyhR5JFGoFRGGNFIBXJoxsXiTjgkB3u8X2td2nnr3O7AH+LIygNde45fYTx9cqS6vL7er8c8/udruD20+13vjGO1z6kr7SW1xrN1X104/u7e/33v7GtZAyZlPYojtM5pZvy4EPD3ZrCwvHjq1ceeGqNlGoJgB4sD+uL7SjqDrq57m3STaeXz7e6XXTzLHzKnfXr26XTyQvXGa9VhQEhlAE0Vn27EkRIBKRUjoIpmJfoEJjyh4lJ+JDozE0eZZprYzR7NmYAJV3BXjr2TMSCnoBKKfak1IKZwyYlEIU8QwOgZFMoAJBDyQyJdSlpAgkgKStK7yA9yDCCgVRKULhsgtZCbMHVKW5hKQIDJH3Ul6NgVkRKaVZPIJxzgu4OAyJtCFT2JyFRanMgRcPAoEhEGc9syAp5URUGCAqrZT3AiKOXW49lD0AMg1NawicB+ccCytSQGwUadIsqiTWkdZqCrCUcxYRAq2U0lBmbJE8M4v3jI7LUDcxAImgUoCCyJ7FiyVUnh2ClJMdrC1oqmACotgiB+bc5tpoIvIsWqmyANuyU0RaaS73Mow2BnhWQTfbhxBU03yooLAwo3jv2bNzDsQjErF37K1j5yx7b50nRQWJ0aTRK0WKFCIpDASFUJi9iGP2COg8O5YbOT+tUZgFMMuLUlYurCu/lAlRWJRWWhsEZHbOexR24oHL+2HNnJd3GtZ5rdGUOr9DA7oeaHG2yLPcFeM8Z8/sfGCULvdmsQzuTW81DnoHdv9g7lR0IIPey2llae342Xp1C3YuHTzx6H9vRKlZgN1ef3Q4nq9Vbz85X4ehqVUOtr908aXHqtWCwfWSzBeT6sqyPWSXQizFwtz4q77lbY1qGp2ZK7rb/b1BUOXm/SuTSt9o5SAlmJSarYIUAS3s5L472p777OdAB+Eo6zsQAtUbTIAh1DSZ2Fo7cpK3WtU3vO7mJz773Pu/4eiP/so357VBMexEBk1L63p897edixa39z982N1Ma/Vjpr40Tu3+xmZcNY5UeuVqrzNJEvvypa1sYkmsyIRsdnD+FQBG8qTjZw/y46fXimKcFVltvgbCk1HmBAzFkgWW7bGVc9f2n0aUIDDskYDyZGwAjQlDnU16+2tLdm0NIoIr+3BlGwxUiQwEQeK8GuPzz/Idt41ueWcth0NQhnDYu1qs1sZJd9Lvwc5Qpdt+/jQUY6i0BwrqHoyCyYufeP78BrvVxl7CNRjfdbr4R99536//6uNXu7rXGQfMr27uqjkd1OjZS9f+f9/2rV986frv/+4f3nPr2T/4P3/+Xd/y3guT5MFTqz/wM9/XVTt///LVeOX2yeYL//S7f0yCucC7g43JD/7EDz/y0otX+jVeOPm2r3n7z/3Iz73zK88srqo/+JOHl2+9+yqYvz/cu+sN93zrcu0Lj30iH2eTcaGJ0ok12kxGuUL+uq97n80bo2Trbz/60UrVTLJCUJqVcDLJWFGWswlCpHkd66gePnUZf/mnP/kv/tXbGTa5YqNqbKEAEFWpAij025Be5a7VCBAIzAMsnc3hmOcGgiMSSC+pSLB+WgMKAwMmyr31m49B0fitPzhIszBNITbBuD90YXTtsEhG+69sfewhtCfXKxkFh90ROQWiYh3WG3WADgBEmjpbO4EKMfNZb7i4cnJhZT134Yk3fGNE6tLTn7l2/cr7vu+fD3k031Lp9uGVq5dMNvzihx7dvvpiqxkvVE28GnWuX84Oo9VTywtLyze//oFqM3r2qecq1bkj9z6QO944mNz5hjfPN9JXX7r2yl62dPqk370+Gg5NxfT6g8CoYiGP69XhcOw8R5Vqb5A7duNhj4DW5yq73UOHbq0RjtkWrrBKTRiArQJVCUNdw8I6rUW8iLPa6FGH/uLPD2IRHbTYWecDDipDnNvbG0qeZtKPdBrW4+c3rqgs++H3vVMpFc/PZZNuklnvOLXFOJ3EgXIEgaJsNEiSnEkPtfm2b3z38KPqmRdeeLaX3nXzaRU3DzcPqyqoVasqkF7aC8gcHh4urcyrGBYW6xVQjUr1yl7eXlyPtD/sDTv9USWu7HX75y9f+6q125qNeGQ5MrB/cNhqB0++8oIOVs+/8MQ733bH3a8/M9jrr68v7m3tc2KF4Mr+0NPh8qmjOEiiuF2vVxdqpjFXOdwSijjRxXMvv1Krhgv1ynCYuqsdUuS5qBkf13xkIBuNwQQqMGsrra2dQ1GdENPxoL9UXULviyJPRuM8t7VmPQpDcQWCqy/E1zb3T51ZPNzbOugcrB9frhrD2V4ctSADgFDEBmEUhQHnPXZurkIPPfbcA7fcNz8/l+W7laXacnVhYyeP2urksRO96xvPP/6Mf3V0x/13PvnI0+lEL7frJqo0llsNUoDFsDcIAn1l80qeDc+duH3r8uXzV594x7vvGbODDF760vmdawdnj1TuuuXs6sqaIsk9bl27Ir1+b2dr7sj8A3eefO6JTTtiO0o7k1xVttrzrcW5OM3zJFBZYZdXV7UUk9FQKy3OI0igpuuCsFVLs35cizCsoyZ0iYVM1YAt9Xvu8lVLQVWFoBwGDti7Sqg4d1GkrXeGWYGALUQhoS4YnJQAiIWZPRBNizNKRaBcMlsRjQgKvS83QRWD9wxKkWcmIG1Uub/EnomImTWhVmi910qMAUKTFt4xe56KR4gg7B070lo8G00A4JkJQZHywrMV+nQlXS7plUIHwAzCYszUlS7XqB5IK+W9K+0OLrMvirzzpNCzeM9E4Jz0NgcAI+981SgFXmuMIh1rpRTUY5xrVE4fmW+26vWmajXiCJEC6vdHBUhcaStVZEk6Opiw89qELOBzR0RaxSQYhsbZTAc6zLW1OQp60CKZAJNYVdItEOeFvWfHIAo5Eq8Lp4J6HTUGJmQSMgGGjRCwmKTppEDWgt4YMTr03jM7S1QlQ0BxNdb1qmq1oBKLk0BXapWqrkTZZJCP80Q0ZxyFUb1Zo6BRSfMsm0yGA+CMQqrqOI6qNR1IOkkYBtYTsPWsCJ1n58hoFZpQEEHRyOZRYBTwxNrc5p50oBV6zcBpVqTee89k9NCqTHsVkZBGReBk6PKGiTUIudTb1AtJoOYrjVC5nFOfq0BHiwvGJhPFbjDsE7Rr80u60mCfZT5rtCKbukmWNWpxoE3uptsGOI1vqBt2R1lJUxa2M8JrYgZOb4Fn6+yyjLnsucHZfSXObpRntdYySzTBrJhnygmQBVRJCwBeW9TPskulb4IwHUQ/6zgCmuKLaRBJBDxPw55I0+zarKyZy5SUvEZnBLFMhc2knRl0YfmynufyNZkmt27cNs+A1vSFw7J6DErqggCAinAmWpXUBma1nlOWIzfg2JSulPVP5Ms+qOnfltmks2k51MxYuhHA+rI41hTEQdnhXXY6wZTvlfykLIhmACAqe32mp72UbUCIb2xfswBhGURE8eX9eXmYJcV7rQ66NBhv1BKVr9zMhiqfv9xAhSI3/B2Z8h+k0lG68UJMQ3MzJAUo4Ge6Uvkmmk63YxYsgRZKySnZe0Asy4241Nu8qFIzQxQRdYPKESIB0mtlS/8fVORSoFq9sajsZPdwaz+5UJAP3JiDwFW8azaxO+gmqZAaR5FC1ezlPHSqXW2Ndy4rJYFRSuFCOxKDJorzvDjoHuQ5tJrLw1G2d22/SaEUYafoYaUetxfoYH+U5p/6/JcyqtbnGoLF9qVe2G7NzQf5oJK4UDfDnN3CkcW9cbLUbizPV23ar9RUQrJ7eHFUjJQwo0l2DyOKk9Q5DtKEk8SOk/7SXG25tXr5xSuNWmWp2daruaoaQO1yZ8CRqJMnb7t84dLS0Tmb5GHVxJEeHuzWa/VmzQiCTQZBECbeA1JepEl/lGRJvTV/fOnc5qXngooyqUQExMWx06c2Lp9XVUWsc2e9eO84bteduDxNjZFTK8HhARGGXgXdCSQWqrExEabjIs/HJlTluCQR9NYRK+WF8+Jb39P+3d++Pem9kiUppMN3rgfUrP5guPKff3f/A793pVZtOe8DTUFAufWARoViuahGPo6DycFwcaGyfEze901vePSLB594OImlOkzySqVFNhyPu1x4T4p1q2bO1ZtbVMXN7Y04BAENUPs3H93/X89+4fu+5q7dq1dSGw4mg1Fv5HxgGyZeW3t1Y2Bq9bBWS30aRmGghItURIiMLRbf8s4fufbiH6sMohgldaPxJAiM+NxZqbXqWmvvVUBVHcSQT8KI4tgwYJbllVroLTubT0b+C4+++nlxPmNUhpgX5tz2VfnCY5993U3zDxxdqYbhb//ZxU6mbzuxMNcItzZ36msQRMFyG7t7LwVaA2VZll+6POkP/NycD+txd5QF9RbW5y5cvlQzzUo1pjiYW1waXDhk5xsLQTqA/f5o+inwlhCz3Bc5aa0QyTpHSgug0pqFGYBIIZEmZXRZ3lZuaZFSBAhhGBKB0qSVNloRs5cCQYB94R0hKaUVEgoYUgJeQARVecku+4xFFACyMIAowsKzAIoDR6yAkIhFnGfnrWcmMuXFNdAalQIslDB4BhEiZHYAqgy5ehZSqpwdxk6cd0YbVMyCAalAh6QIGAKtvKB4BmEnAsJipwasY0aFgVJIJCzMJTEBBrbeIiKLWOeQEEgDiKZpbhsJIxMqRPEsZWMRgVJEMruilkwbAEE8e01AUN7MCYBoZRhEMysdeHaAYL1jIhHws1tAhUQgiOKZPZejLokISGubZ15EnDcapxPdlGL2ChARWcq9F9BEmpQX8MIozOw9e0Rk9iDi2Jdt1ZEJPPjcWUXgrDBYB2CdZwAg9B5FkSB6FgNUjv6UciAalrsUwCjOe2+LwIRKkRdG1M45cYhUuhflXhJ6EdTEzNaxRiIiQGKW8vtD2AuKLQpSSptInDNas3jPzOxKqzhnF4RRaIxiYAKx1gJqoz3ZKAzjIADvQ2MYyVpXFNMbo+df2lg50q6tV29/zwMvffyVdHisOn/qSFT4wcOD/k5cx9vuaG18+lK33491VKDx2lcgm+x1OdWO6Vqnd/b4UiF+rzuujdVaq9nIul/xvlvl7ur1XtqaFI3o2NKt90H7ENbna9Dz4ARGFjxAkUNKkAoU4ovQrf/Uj1253l9ePGKSfY+gitwaRazYKB1oqNZqN99/ZHtj55knL331e2/+0X//rsnWC141qCBxwoUtCtRkT7917jfeevp//PoXP/Tp87u8F80tLa2ubGzuZNaQIgiC6lKQZolTOdrcFxMQMSpQmpJJ1mgtvPeN39Tpbly7ciiBGqcpAYzTrFINFxeq/d5QQ7Z9+AIiu9xmjhFJaVAkzqmsW0TVa3/+F+9aPnVoKrnBw0jb3Sf8b39g+PFHoJtXl5YbRxZi3y0uXch6F705Eycjb6UZtm1njzMwSdOu3VfdujLc24LGPABbpJGBaiitK5f7tqInApkf/fw/PPvV33q8XWm9dO0QWkfm56NBki2s1P/v3/zAj//4r/3dh37nd/75z71y7ZVf+MAvP/wxHr185fze5rvfdeelZ//uW776f68tzcPOwbe96+a33nLmD//mD3a2J69eeulTD/311779214XvqEdLvW2Lym3/dv/9SdvO3FqPln/f3/iF9/1XW852OkEMY7Yf/rJ85sXD5yIENi0iGtRNaqlE7dy6nTQfN3JtfWHPvMXHrQXRFSkYJjkURgR0TCdeAfZJI/rEoHqbNi/7813/uWVX/qZ72zULsLuF8nkEhWgeyocuqKIagYqS8Wh84tHDNYndhmoDhw7yEQvqvpAwyHbuMhD4602sbgiZ/2Grzt3vcsf+qtuf6Ln5xpz9bC9UDu6uLx1bbjT6R/kCXAyHhdNo8moAiRsRKmbhg4uXdrnvAjC2KngtnNnX/9V79m41G+efLB28k7Xe2anf4mCYYS74wSTw92XPvMppmFtOV4+oq5usChvQn3nbWcP99Ltzf3K0XvmFo5k3Hrx4SfAD2yaRpjX5+fTpNoI6+ef+ng2SsPC9l55ntMECVAqTR2ygpxl0s+NNoZUFFURq9qo5tnTmxcvVRsKKds7PDgcuPX12nIjTCawvFq0V9W1od7bKeaD2omTyxcubFhGL+CdqDDIXJ4FajIegHCAwDmuwfimaOdbv+fIA19xXPxWa6k17KW//x9eeejJ3ftf925lTm5fuNbpDR24g15y5PhKo+k3Ng+r7YZu1jO7W5urvLDb/YtPPvKN739Pp7MvbIfsF1Ctz7Vt4V537/2PPv9I4CkZJlcv7N1661ocGijcSOzQXT915sRh98BgOLF85MxZn9GV7qs3nVqokO93upUwdtabcM4stP7uw5+/7c7wba8/G0Zzw0EhEO3ujMNKe+iHV3b3/+zvHrr/7pNr586cuu3cg+960+OPP55ncnhl88KFjb29bfekmTt25u3vfvO1Fy/cfPfrPvmRT718cauPnsTtbXd2VipBqyVCzstBryCsTfo7Oabra0fzoojCMAyrXkQCbZOMC2WTDNFWGub8C08urx4JqtXxGEPdHOzu5+mlpRO3TRLUBoJ6LZ1MDBVF2okCUUHy/MWX3vrWbxz55HCvd64WHw77nZ2sUm1NlPrQJz/hPd9527Hd/cFk4obdCRdcP9KIQoUFM8DW9cvzc+16rZLx+LbXven6hz4ecPXC41uPfuHZH/vO91/ffTo+NZ/lyacee/JH//E/gCzvd/PdrWv3nTu2M8zHll99eYutvPXt952/eGm0uf/485eqrcrr77mrcNYNpDvCerMdh+BQqo1Ko17Z3dqu1qYBtOcuJisjc+rIsq4pU0XvhJWSQiZdePbx8fOvcmfEktvQTuq+H5liqR4tt0JDKRqqGRVXgW0xSPnKhh366p5XB4WQCkCVW12MgFoRz9bfhCiABMgAWumy+FahBoWEgECAoJUWZi8eFSlSAKQQECUMDZEEJnAeeDRhQRYmJFJKmJFmY6zE21nsQgBdub9Vzume4iJh76bCBYsiJEXlRptjISz7h5mZRZgFfOGIEAHZWYPTvsTS9yDSEmgyKgKvibyzBQiSGUwKa4EUqN3hs1fHghwbXpqv10JllMzV9fETzVbDE8YgxvmJVlrA5DnmSRGoalyP46imQ5emInkulClTjUIsikwbw94VycS6wuYZokFSznvQGCnNolQQhpUKKQoDFQXag3hbOO75ZEBIUTxv0GSRzbK+dYWwRXSVsBIFkdYRK0YFjGBJAaGp1lQYgPeBEsGkKMBbNJUK6AC0iRpRtV7TLOnI+5yb1XYlqtRUAEg+zyVNrHMWCJAZIKAQrKeAgH3unUbwLEarUKNiXxYpIgb9ybgSa8s4yYs40swyHoyVDReXG4W1AkGVADNEg8boChqxGWhlHBChzXLSbQxioUICn1tP9YrkASKyTUOlGFwxyQG0UsZaZ1Sog2l743T9jiCIzA4RlCr1FuCZDDNd3pdkhGaF1jNhZCr6TEOSUwlkFkXDG5YOTJkJlgkFRIQpkpmBG75xPGVvcklICIVuZI2+7PGkHD8Ppegy1YVYZnS2/AnjrG57indmT/jLKoiEy4G5ALPObJhV5/D0t2AKWqYZtJnSM6WrNO0OAil7pVHKzFrZRzR7QiKvWVc3kIq8dnClX4NIWNYZzTqUyoHFX/5CYdnJQyA3OPS0IUjKIXUlZoNZCK18alw+DZ7VO5VVT9OM2Y2qI0GZvghc/jUp4wcAgPgaxJme4xuu0PT1KJ+iQMmxYdawhAQgpbM0ffo0o0gk5dDm8jenQ9W9zLQtBGHPsxexhInTNiIAdlw+fGlxoSAzlhRRAbL1ZXpRkxKYDsRDRR649DaZ8Magtlmt9VzFFdId8GK7feQm+uKT1+pRY3W1lUzyKB0Ok1FYqSWjjia3dKR+eMj1uJUWo8EgveW2r+xub5tY7e5er1UiMa5aj5ZX5wi1p8AWuN5cOnbTEUiCP/3w5+bqbTsad4f9YpJ1x67QDpXqpb3I5OJFJt5Ldv3i7vLcudjUbWojVeVhf2z7mgaD7u6p5s1BI+ofXFicm8uKrNs5UMgFVC5u7R8/vb60cmLnYNfm7mA/m8/wzAN3pLvpxfOvepS5o/PWgc7zpbmF/YNhfb7V7+wvqzCqNJI0cWyQQtQK2Dfa1UF3JGjbixUTV8ajpIBsf793qr7U7e6LAmOqIYabhzsi6vrWQRBFDEo8aKWLPCtcKqBEjKZgYbHdqq84uzG3ODfsZdu7w6cvduxgOJFATC00UOQ5Gs2OUUAbjaAEKYPiwXc0B/1Nm1mP1TiqkrHDIkXY/kf/YOmeFfM3z8pff/aqSFUHoTGYJ+O7boV/9GO3L1R9NvAGqt4nd73llJbK/adWP//0I6Mim19sOytJPwfHyKBRIOvubTxGvtDVhXtf/77zL/914YuANHh96fLkVz90aVnbXhomw8P3vuV98/Mnfv+DHxinrAyBK7JiWDHa5WNPgMSCXthWoxGnL1+99piOa63YOIWn7jgbVhu1+nw2yorc1yrV7uEhmWB4sK/DKtsxmoIIGYrJ0IqI0WC00qht7qgSBAoWdP7ed9buvnn+uee34vno7194qf/iK/tFdNgNLeQnluTEsVXnbZLko0kemaBboDamSJJenqejInPST20030JbTA5GQbUVmzZ50UbtdQ6y3DGp7ijNxqmJpwOS8ywHAGY2YeCZtdYlRC8dEK00ICCQMYHRxjlLIEojCnvvWViYy5CyEkRSpdIZREaTsHe+AChLhbCcFcuklUICUkSoygFdpGZXFS4vQQrBAwiI9QwEWG4Si2cGAPLsfM7GhAzgmIlUYAIPhSIKjAGWvHAFMwMIgzEQRlgSLkJiBudLzE0gWJZGK9LWZiRE5fxTYVBojPHMTtgE2gSGAcQ6z+AZtFIiEhhjnVdqNviACEQUqfILVGtTtn97dIjkRQBEKUVC0/FtIqTUtKKPVM4enbBnACZU5QSCgAIgyd10dAJNCT15ZCpv74TFlY1CRKQUqdJjLwonnilUSgdGB4Qk7JSiMtLsnUUBIiRAhUCKxHkBdOK9c1PgzszMDGIwYPEiQoTOeWZx3mpjAmOsL51kLPdnFClC1EpppQVFgD2IAFrvC+e894gk3msQ0kpEUGsvYr231gGXhpRz7MtxsEZjYAwCIAN7seAK7xgQERgFvC3EIwOgUHmrSiTeF2g1KesLLxxoLQhkdEWH4JQyEVpvtPGepfwAW2dnXUV33XqrWg02trfPP7vRap5pz30FdlIaZw8++J3ZZPtLTz32wqtbrbUFKiovXDkYppxUjLG2UZGjN9f393jl3Lm1Y83Rw5cpm693Mentf+cPvqlxLJaVW47e2wgnW//hn/3ho8/YSMO5e6O7b15eOFJpLkNQlXgefQi1Jqiqr6uTT3zKf/Cv06W4GO4clkxQozRrUZpaZy0pPOgnu597MYqoHui/f+jFN/+Ju+2tC5pqVK9A0rMFE2mIAlZO6sl3/uJd3/Cjp37xp/7yU68Otnd3WrX5qFnLikIFXoNTJMtHFiAvhn3PAM16azQYVcLYOf3kkx/LiiSqqrl2Lc0s5Jlph9bjfvcAnC3yzEQGAJQhQukPJ2hgrhJWahH63s//0LHb7qUcxhYscJ5xuvb64Ff+x+L3PRL83C9uPXW+4EmjFlT//nGp/+7+9/7GcaDx9rWJs4TK7+5ZY+C2O+v5db33Sj6u5NWtpDbfs7kdXQs+8VBaqPlOf+fDf/R9973RFFe/uHN1/9yZxcc28pyhvrjoI/jZf/Qz3/DO724FS91O6iVwCWxc3FTj0YVHLis/UCqsza1sbh9kndHqUvOpFy/+nz/401P3vPXC7jBRy6+7/7a3VvtPPfzZRz712dWTcfcgf/SR8+a+xje/d2W9HtSw8v6vOHf0zNEvfuJvwCibUBQHqZcio+Zc9ebTjbUTyx/4dz+3fnKObNGcq+TpxAQaCdPcRnHgHSwuLI0nIxuyWHBFoQAGqf3EF/Yf+roPnG3rd66O3/3d7166WcX1C7kZgxYH8Wi8ppe+OvVNZROjFPE+SUeS6yoYsCMcSaXeKNg6mxlHhDoXEd756m+NG+2FX/39bLewi4FKO+OFMEqTwfXrw4SU964R6DAMTh8/8vz+ta3uRPvp8mCpfWzS6dUWl1dvvikqBmlnuLLYaB2tfuqRj992oq5Znn/2yS+sfuQt733fq0+/2htcnWvJ1uXJuJ9iWHTHWWVuuTccM+iTt9/7tm//gc7edTMeLB9rjlR/9Hz3sU8+9M73fVM/Tc3R26O53uWLf0nix2nqkQIVurxvjFHVShAGUT2u1RvpSCxa5T2DdLvDWrNVb8PJaPnJZ3coii2YgyRdWsL/8GtvB9hsLK7/6D/+zPMXJxt7aOpxQ0edw35RyDDNvQeVe0QR5TPBWEmSbT379LdzcDU2Pj1MlU5wRd37704/9cjmX/7tB/cvXz958g6jdDqZzMdGsqI/TsZp0dnY8awjo9L+xLN+9PFnktGoIH11f3DpYPMeimJxw2Hv8ZeerMb11nzzmjv0lSCX2uSg7zKOG7Xd7u7aCs21FyNN/fPXNl+4sNHt333bG9om3bp8Yf3Y0WsXd+6794HPPv+ZzWcufuc3/+znP/XB1psrg9Fwbm0FmnHu3VMvX/r0I19cmlt+0+vesLbWfuWJl86eO/kXH/wzjbXd7lZvd7fp1Zve8cBnHnr1Q3/1qSeef+K+E3d/4eGn9w42l9fn9/c6ieO4UQHDyBgATcZj760b50jGis/beRDHk2FSj+tplpCIDsJsMgjDaDAuPv2px9fXb6vOL6pY3XI3THr7uffFMI/Hw8bcvCuyQX/S7/ekSDUmlgdXd3cffPBdRxcqB6msrqyliSTJkEIKgu4jf/nfJcsODsbbQbCyEC21msSytdO/5eZTgR1feu7VUzfdtNezgY5GaVZrLpzfvlI5euLUQm37/EPf/V3v/NITX/D7iW5EtshWq/H1FzdrtZquVFZWjlEYr5w46X1RZe5d2y6GvNyoLT7QTD77Ur87ePzxLwaAd9xz+8LS0mDQiReqtsgBghFAXKkWxfS74L/8+fMnjq3ccyw+fmJ1ZamiwxBIdQ6zl57uf+Lv+1uTuMgy5ZKjS/Tt7z53803NxblAsIdanDaKlECOEFABebc4//zWxx7pf+4KdlJtHYl4EWDmcqu7XJL48ksNEBCYvZTjJQjVVBgoLQ2+UUVS3k2BolKOKDyjEig34xVo1MDomVmASCMIi0MdgHCgyXsHgGq62mRQSOVyvvQsgFDYletB5lIMwdJYRmDhsotFhJUiwaniVJbeIqFWikWstWFsAvIKeJQULKAYnM2FBRQCovM8yoS9pAYOh2NhBoI4gualzs3H+jevrWjkdrMSKL8wNwcKTWSFsyw9FLBOGTAReR1WnLMWxWsidpqcjTDgrCDDAlLkjtGEYQQsBknHsQkCAq+YlUiojQi6dBhAIQJKhYRRVFnwQQSTnoMxCTqUXLKcLSkdkKq1QqVBczDuJD5NFCKqODJQTBIC0lHMOmQxPssVuFhFGDWUCViH7FwpcEdRWPNgrRNtRo4rJqyLNmDZJlnB5IRJClswKBStyARKk1KpLbQhQyZCrQODSCigyvtJL8I+myRGG/Q5sm9XGt5xXc+hDtgXxEU7Wsu8tgUwF85JYGrVqAlKB8yxiB87N3RCRgIMoyhW6PLitVwVCkCpq3pSwOJA3MzgKMeECeIUJfBUZik5ztTKECxbcsoCrakFUrbG8EwlmXGestJm2tUzTXmVs/5KO+VGUxGUOhPNfJwZ6iwNE5pmmnCGkPysXZmBy81jAPAgIsJf9mgIAsJlr9DMfynFcyhLumSmAM2SWjNoNLWjbpCq6SbwtI273NOeBfWmKwWYlmtL+aBTyiUz8lSKLvSapySzVxdKQkYeb9g+U+4CM8hTZvxk1qA0pUdTgjOFYTfkpbIUelbsM+38Lk8FvdbrVD4ukUzzhDQFhlMuVp4bnpIyfO29UwbvsGRbpeRYNpGWeiTcyM3dAEvT4BhOMRNOJ5dNGRzM2Doz+NeSezI7iFJZKk8bASrx4GdJN++FiIBZBL033otHdAIKsUzkkhYk1MRlsY0X/v+gonQ4qtf1kWNrk84G51yLGh6dhyKu6bRIKYzZuzDQzqWdww6w4UknKCQwtSJN4jAg42vVADhtthYmk2LYhVazGtTql168XvO56wyD6pFiIhPXX2rUxE7iEGuOG+3QMT69e7B639GLh/3kcO/szYtvvPnsY1/aHy9VOYd0r58NR81ImwqdaBzZuXI1CKGiK/NzdRU1dw82G7HKnT13bt47vzK3MBh04ooeJ/LkM6/ceW6td21y/XpiYl+51q/UApNmFy/vP//y1s13n601l1++0qmElaOnboZeB0wWVgLrsl4vyxMIY1Ha2XQyGQ0hUgtH1govRliHdevcZJgcTkYggU6zyKh4vtnpdTwjQYS+oauLUevUtZ2tJ790ZeP8hUmaoxkd0fb+W9WDb6yllv740/0LG0mtqrRRRUHKWLbiGAPQyoTLC+otb7l50nsc6/ELL9rOE51qS9eX3P1vOTYcbt/5Rn7wH976Ly4e+S+/s//HH9v71jcv/eCPvrUWPhfVugGoZy4eBFHz3JuPEewr1mdO3t3fn2RBHMM4m0yAYW4htl6NhhOAwlsLJHmRXNi8HIRN7w/ZFtV6VKF6FPreaIIKCjZve+v3PPL8ZwFAg1IBM7GzVsApDaiVDnVoTD6aKN5/7vn/9jP//pe++NDnxge9nkhv3y3X1keDeH/3OtkiUePU7rVbwbnjGGkz6HEnw34/NSYkCoQLX2TCFrUyKAGy6yU/9xN33vfG/dR17jrbmAwLf3uFIumPq3/818Unnx1d7sbLR2vZYSeVfOdwf2Vt3TkJjRlnRX1lbpROOp2knwV3nFhQNBgcdsKKrlV0OploY8ZJkTiXJDxJbTayN91zBOBxAFBaF9YLEoMQkXfOmEAbLQCEKtQhiwcBTYDgA6VkinPRe/bCQRiKMAIpHYpnx+KFkdBobUGiKPbeswgL6zDQWs+oMLK3Woea1BTJT3vR0Hlx7IAUKgVIlj0yC5AmxQpZoPC5Uqqceg4Es8sROfbklHe+PxxbgDgKEbUwihXP7J1zLlPGiIAxuqwFICLPzOwDFYqwQirndhGWPAuM0Z4FoKQ7XDie7npoo0mTciAOlQIBJ86LlHq2MUYRoQgBahMh5uAci/eey4FoSGhUYNkBg7OOweP0i4qIFBL6ciuQGUG01l4EURGhFyavmAutNAKwdwxla6bXpUGpCIFN6VIHodYBSDlPlzx75oJFvPcIIKAEgD2XZycrCkH03hGRMRpRjDEAoEhRecuoCDyKRqOD8lA1ADArVKSACJmBsIRWJMCI6FkUEbBy7JjBuswiKcQoCkKlEBR4561FED+71FeCsHx/iCpJGBMSIHjvoZzjVuS2yIxWwqLI0PQ5sHdOCPPEGq3iaszsQ1SKlIkj62k6IZOE/fQuxXvxDLaYfiWkh74dmZOuml/mt933pvXlB7d3t/a3DtLNnX6fVhun7rv/lv7k8qf/7qnlWgUFQlIZD1aP1nWITnmsBoO8OH3XsfPPZC9f2VpuaUPLS2sPXO61/u2v/JtHPnepO0QVtFHgC5tSZAdZ7mJD6CFAAsULbW0CHOXD/UGwtlifb/mDLkdRZJ2A99Y6BeALjisagLPMqcAMR1m6kw6SOtlRPumF7bYNc6ooFZl80NesZJLmVvTKxV/5H7d9z+eTX/lXGy9uF2gTUqaG8cFOP6xEBQ5HQxdVyHve6+8ZVGG1pgwo5/KDSToWJXmW+jzN6s0AlNehIVaWJawaQB4NU5tNHnzvVw/6+vorz1kbHVk5/Pr/62QOh1kSoNS0YxJtaULq4Ozbwg9++MjH/svev/v9ztjNoV760OcmD7/9idsXq89tZK019/M//6bDdOOpzw6f+GT29EuFQ33L0eiNb4kfeM/Zpx4d/Lv/fP6J89hcZcjsi594mF+o/85vf+bI6qIZpvNReMjNb/7mr/vTP/rDs8vLk+3PfvJDYrP+XKT+8Hf+wI/Tdky3vuHcYw8/NJ4Uk21st0LGCtUDl8SfefTh27/tPUfUgpmc+p1f/Ymnn3pCKWrUOZ2kd9x+63d97/e98YE7fvlf/PbHPpGef3HjzPGzC0ePvPDos54FQRsdOaNsHrii2dkZjPvDm45Vg8AN8yIZZd57Fh9XokBrmxQMqt4MBn0XGlOrhaNOP6qElXpUNdFwHza6+r9e8f/xiS96Nfjeb6h/z3ecXLutkThjGreLXeREFDDUCsd7Sh1QkBPFyK0X//J6tfrqqW866zDJ82HUXjCkB1e3cJieba7XbNQIalFo+5Pk6Y3dyTBFRUUmk4l3NZ06UHv++Qu7zVq9XY/KT8FN9z24c/mguXri7L2vyw+eOP/Ms2Gl2Tpj3/ved7qd3e2121YPrVo4d3HzQn9w/eZ7Voa9rcnhMKRg/cyR03fcfm1j0+euouKz5+4Zj8SN84NnH3vlhZe+8Z+88+H9ZxiaAelac/7iixuvPv54Ohy3FytHl47sHGaVwFSqqntwcLi9r8IKp9W61wHVUnuAQRQGkPQK9jAYJP3dw7m4Vvjg2qEvrNYTee7R5+Zlv1s9//1vgP+8Yzd7uYr0KM0NEoN4YNIMgsysBCoY5oX8p994VzL8hIlg4toVs8CyV+R9b/ZufwvPrcZ/8qGHBy+PFlbu6Ax6o34WNRMTBV54//phcyV++/037RwOt/f7g97EhHx9d5JOrA7wwtXLp5bnUl8sqmh7t/OlF6+0l6pGhRtb29l+b20+eOc730agNcve9W0Unw6HjDAYDz/3zMNvOHV8/djxucVwZe7UsLe5vAxf963v++Lnnv2xH/+eV579EtWrn3/8iWcuXI1iHVC00ppfqDUrId51z3E/ps998rGF9XqrnlZr+h3f/v5Xnn35n/3y7+/1LWq1Peh99qFXsqyYb1ZOLNQQi3/8Q9944thibotskgVaaaMtsyVs1EJgGvSGYeYE1UGvY0hNxuNmuwlSEPBh99rasfWwskxx0072r776yuveeq57yK++cvD0Jy6dOHNmpdZYWJpTDdrvjBtzlacefTGDxvrS0b3t3VFRRGE8HmYFwCCZHO5e6e4PM6DF5SVbFAuLaztbO/W4fvzY/N5Oj7IBEu3uXacA4jDqdQcnj53VxUCS3ROnl/bOu6e/+LRyNB8ZFajKYvXYTaeWji4mmbe9SZTZhx765Inb7/GVOBmPlparrzzx1PxKs1LU7j5zQoX84qvXrc1l0s2GTmPI9ZgC7QDTQSpQtOZq5afgmRcnr7y0/SlTBOplQ1KfX7SFTXKX9pnRFM5VssFbX9f8nu+/6bZbR2AyTvYFhgpDpohEW5eqypwIVhbcAyfNPe+96b5PdX7nT7Z2hlXrEHUgmgHJAykARPQEzAzMwqy1ZueJFJVhMaXY2bLZtiicCQwqlTshQsm9CCMKoTgvbJ33TKwRUbyvVAJ2rImEQVWNsDgHAQkoSrIi0IYEHPsgwLKEpfAFivIgRBoQhFEpZO8BSYAYUSkS9uxBEZXbgeUSUcqJtAKEYLQyBIHIkcXaV77pthdefWXzML96fUiB8s6zsGJwLEorQGHweQFAWH6/Dyc8mvBeZ//RZ3YbFbUyZ1aa6tjqfBypdlW1GovDkQ28Fao7NsYrZDRh7F0GokBQh1VVQ05SazNhHzS1jiMAgx4UKVCgBcEX1YpWWoCimMlIKgUcHHa1Co0OiJwDMVHFGG2zvvXeZTbJ83qjXavFOsuZx5NU3CSfDAdl7633EEYV1DpaqFfihh1IkRUBoDIxAKqAXABVrSDNlXCtFueAeZJZpZvVShjU5nSM6QGA9IY+G9uwtGmYMudDYzLrI8IiT43Sw0kG5Rw5gXSchYHWRg97wyDUzWoly/MgDMIQUJvQEOjQhIYlyHwSVWtYJFVFzrNVIqIIPBi2mU0njlxslEZEFShAk6QpMVhXzCiORxAhAfEALOCZ3bQ5CGdWSrlan9IOhHL41NRbkWnoabbsR5mKdDwdMMWzSqFytBmgCIGaEpey6nMaTiOcwRtCKtWTGfWY4Q+cVQqUPGHa00MgwMAAwjIVm7gsX0AGLuNZs0MFhHJerghN/2gJu24glhtc5cZT/rLWHqRpX/P0B9Nq1Bu5K/iy6WAw05OmebgZmZqlxqSEaVjmohBLNITTV3I64E2mCbTSLZJZgQ/MmpFeOzk4E7fgtQ7qGwE8vgF8Zn9lJkYJEADfeAWoLCqCG5mxckha2RAEhMI84zk4VaOm2Tq5kdmbtZtP+eL0DxOVp1t4WsBP5TvEC75WUcXiBcpaDF+OhNPOlVk+hYLOcfnmQCFhmGpESN65UFEwpd0kggpIfHn45XNTpZ+mFGilhAVI9LRVaoaKEISLyaDjA1I2c3OtoNmOpeA8GbeXQghoMthbmJ/f7x0QkVFRPh5WKIjrcTbaBvZpDs1mIyn6jcWKVsoyJqOx0dlcTZQrDvavH7vv2Ne9/94vfvohp2lU2L71k6w4tbj44ssvE+CVVw9Gvezms6u3HF882L5q+xOC06auJ+PkyMnWlc2t1cpiZKJKtaKVyi1c3dhtLzZPn759cHhNoUWfJoNOkbb9sJgY6qaTO+594HDrkh3kk8EQE7vp/e13HTm4fnD5OWshskO71R28/Oq1y1f3FT7ajsO1E2vtubherS+2Kq36aqCC+lyY50OtDjzaAPL5+VY6kbg+zzZNXFIJ50DrcX/MGOYJgJn3E2zMn6gQffbhR166+oVBt9euxhFK3efNkO87oW87YsMI3vrgA5vd7QuXrwKzy+37v+H//eBH/rXRJlRBkQJyoCD63Mee/8oHef0EXXgp2tponl4NUPee+tROqnStXVTPXI9N5599312VRvDPf+KWYOla/3wPu5W8v7QSNKsLEieTKy++ulxbv3Q5ft2pk1eGXNjcKEidTTKTFWyFolgHhGlvBHbg3Uaad1mcMIjScdw67E8UEArGQfWn/tV3KJB6u0VEBskXYJ2DGIs8FzAGiZTCSFfiik3zj/zhf73z9NH3fP27/vrTV6pe16Nmf3d7QdvQ7o37h//1V99x8nazfgR8Yk00n49w86Xhn/2PR/70kUMLgQ6MF5uleaCMkJkEctf99f8/Vf8dL0l61ffj55zneSp1vDlNntmZ3dm8q9WuVjkhCRAiW8gIkWwDNhgMxjYG4wRfwBbYYJOTQSIJJCQklOOuVptzmJzn5ns7d1U94ZzfH9V34ffPzLzu1O3uerqrq+pzPp/3p764A/1QqyfA/dgoTa4Wdn7ufcuvPxM/t5k/deZUCmmk09r0ARO3KSqG/Xw8LLM0UkVRSyLAaOvc9WakiyGmKhrk4ySN0kamxoMuCLIZ9BvnT+0evUlNjgKkOFao0AcGbSKtFJEiDYiKFBEpAEVKKQIWIlWWwTpbxVuJNIpwCAwcPBGSd2UgZB9Qqh9VXiLWJqpEcU3kfBBhIi1BAgoKaIUiErxjEWGu3EaOJQgToGevUDOxCCrSkYqBjBJBYRQIPlhbgnAcRQhUlLkLbJkJUcDFsXgnQwnMkGUxB451pFSklVjniFETVRr/JCBNNMGoAdrSCgoh2iJMdHdmQrRFYSItyldAAO8tELEEZtZaI5EggkJm8UEUegI0xjiPImJ9kBC0ViiVH4uCn6jlhEiKrAvGJEoZz6UAo4iSSYLW2UBKGVKkwVYsyuCN1iKSRhGRJqg0GojiCJCMMkTEHCozbAghBAsCwTuoGuCIECBw8M4J0svf9BzEkNJkhFmJQiFb5iLMISQ6UkQAwoEjqroLGAgISURCYABgDoiIVFUSCbAY0ixivSCxik1gseyDBGYWHxgACaMoQkFNREgcGAhLdiLCBEF8dTZBF5zzeW5LRKMxS3Q1LHEihXOASpESoNyGNElESARQKWJllLblSJNGMj4ERQpIWeteNvHum7ljae5Q80jc31qdiw5trXY0zt588sSwc7nfXzi/89JjX/lkc0X1t4NJzL60tbG2e+9bbl1ZZjvg3UF3LKPd1S6i8V3JgJenp07c+6af+vH/9IlnepQ7NHUrKDYQiGMwSRSniUIMlhUoQOoMAD2w5mYsJim2euMgnIBqNDIIZS8v4lg3W4n3QWuqrHxosGvlqee23vaOw7h9htevqphMVqMIgivihgrlKGq0CrIi9vZ3tj9wzyv/9k+uvv/P19ZHWYKNmkEV7GCtp9N4vtXEoA7dfse5M89fv7apgkl8KWGUmTQxOkjpbG9nbYgZTU01FLUa7XqZewABGwyZa5e3vv8H//1nP/jHmESj3SfBjCwMGGpaUpFaKIdivU6jAr2i9Xf8+/2veJv6T//+0iMXy4WZucfOqxeuQ2cY86r/8ve+GLO4YRy8LRmNxmdeGH7pazs/Njzxmx948fqW1hnlbnRkoflHf3tmvQPL6fLb7jn27Yca//ZXv3DvW7+pv31xLikE+p/6xIv/6d///B/93t8cu/nYqHSuc/Y//MS7b7wPHz9x/rNf2n7kVFGbWlzbystRGUo+dW39x3/iv/7Mv/mhB848ONztqlQzYz7yve3h0RXprcmlq5v/7df+3daFsNBu/N4f/M6ffujDUVMnqtUtQ6c7vunoLbe98s1HTt7ziT/7le1rz7UXGtvb/dy6EMQkibBEUVyvEYYUXMTjnoLgmXrdobVl1kwGm1sqjVEZMKnUVBxrH+K/+fzowYcf+qs/++fZSuJ2C65pbsxgsUnUI+UDDBRsglsb581nTzeeemz0Hnf1xtcLJGV+/TJyZHagphe++Jjt7KpBOSjmIxY1KNxCM53TBtZGweCwCLmVod359V/6X//yJ3+80cyqo2D2xJ33veeekdDWmVULc8s33Lu4/+Zhf/fk7bOPPnvqxL2vClqfOnX6zLMbndUro/mk1dZHbrgxZtWei9tz08wJk37FvXdnczc+dn7odnr7jiya2ez5Zy+Ebm7UzvXnv7xwy33d/iWfX77trmOXru6MxjYCyfOSPSGZWl2Gw9H3/It/8XP/9Vve/KZ/ljRWNneHcczOFAxsVFqqRsIb77r/wBdfuHR0pbY0bedSuzA9lU752062r131v/+JbjyV5AMubKBKl/egFFBsnA9BkXelGp/SRU9lWTHoC4UojpGmmftx6o7fq3/QwP/8P88+8LXuzTcdXFrcNyx6u50+6+jQLQd7g/50vX7kxNEHv/y0LcoLF9asZSehKKA/7Ha3R3PTtZumWhunzizPTdca+sXnz+00Gne/4k4Z9p67cKYZJTv9Ioqi6XZDZSCuXGQxhjz7blksT7Uvnr/YuTZseCyur926f+GhL3312efPzBxe3B2OU1WvJXG7GWdpGiX65puWVs+d2djKb3rlkk5489zG8sGFLzz16C/9zw822jMUAyqM06x0aNJEGrX5gyv9K2fedOfNV3YGzgfwUpalic301FQR5YgOWer1ejn2KouYSxu0ThKlUlfIqbNnH3zs4Xe/730mbo1sefa5Z5rN7KHPne32hqCUcxqLJhDb/uh6b+PcZv9w2WwnN7A2drxTb2Uw0vVavbvR2xzlgmV7ps6Y5Y5BZNAbbV3ZKItx1JgpKZ2KlVFJMjO/cf36zMxSzWQ9la5dutBIG4l3l547zbnsm5/aXtteWJ557uraoXvvrS3PXt8cRmlraq5++fkLoNVgc6d57KaLF58/sDC7/+iB6YMHi43VwWgQ6fjukzcaHl26cj2dxsXlKT8aqNRkU3Pra5fSVOeyd5NcUDAijIPSCctWt0tm4oQGDEmmj9649C3vPbxwUpfRrhCmrTllppE1e+2d12k7YATIHjRGmEX4lrdki9N3/PIfrW50oCyDeImNirRWzgcCK8ERjctqtC0YkRKpql5RoZCqhv+xjn0IbD17IEOklfMigsgc2GultDZkQy0hCrZecws1mqpDv2uHAcaBWAhCgCiSdpxq4nx4YF9809Gs1Upa7faw3wuQnDq38cS5YW+sRgUSQAC07AWEGRgVcdUSxXs3u0IgikihquggioBAolgfPrJEevSq1xzKTu2sbY8loFKgjSIU7zkwhuBMoqtLIJGgqQrfk6BywN0C8m25tmMfu3Q90v7YUvMNt81M1RsQuD/urW30a9pkaeRZJYlJIqPEARCSNimBiUrnTZbFjTqwaBBENER+nCcmVXHiIZg40xBxYuywn+VYFnk+LsAPQyiRtFIaVRac1UEUY17IznZvVLL4AAzWlSySRClWZa5GtaenC6UVW0BVkB87G0eRBKeEityzUTqgVjCypTBrUDZwEiX1NGJnY6M4SKSwnUWjwBQAtFYanWMLaLQ6efTWnfVrRTHojIqItQJg7wKBChinsXgIrtREWps4NbnnJDWCFNgrpaO4VfrCGO2cI8RU16wXH8rRsK8p0VHmAgbkmelmnLUJWEWGvaRZDBMhIIAEYQH2kwAXTFqiRCbZLkBkEfUyoqiSHIQrT8Yk/jP5OU/8MhNBRarWeQUU9lJmiKrSFAhpgkgAqNx1lcxRTWnlZY1m0q0GhHvUZKAJOudlFhCCCPIEhjTxGvHExMR7+bVKRyGQAEhcCTl7j1NpMiww8bBU4PlJYA4mgKA97hdShUjeM+VUYhhPlBre2w5fFrgmuhJM8mKwZyoCQBD6hy0rVDMiV4Yfqo7BiVdr8nsySefBxIg4aY3fW/lqOSYQaBGZdJDJy6N6wElP28QWxhUmViby38TqBIzI/HKaTUCAUeGe5QcmMUKYgKX2FvhlwQ15D2xUGYsQhPccWpMYHwrsEber2FwI1bqLEAQCR64UCRgCqAAQJjlAJGQABZV+Pnnu1JjMUKIIgTVRJQwGgADMBBwkVDY1jcJeEwoCI2jauzuu/vrch/4plLzbs1Pt2vbq5d3BcN9SLRTcG/Ta822RmgQEo0O5G5Ev/Ozh/UeuX14ddPM0g5m51gsvXFqYmR8Kq0aoWS5K0UnUmoudU5df2tjqhRvuvmXffOtrn/7Si9fz+tKK84xlGRW+yMv1nSIyRrM/dKDJgOwhH2fNmfm0HtcTTTzmSDXmF9bPbc9PL8zOrKxvdZVxHPo7u2vkB0VelCVbF0jX17e2661MZzxzYP/W5Y31a5u59aR5YaV1zxtvf/zLD59YuPnzn3v46G3Z0sHDcwdu/IWf/83eIASgwCDgAYRARSZKDC4sNA4eWnr1PTfdeLAZYS94HA9zlWV53/rRYGcwHlu3trE7dt5ktcWovb27CWZ48fkLBzLdqjWypozL4XRTLS3W3NjuW24Kw9krxc033/Nzf/70U+dHWcQgeOK2b1zdvtLdegk4Bq/b04vebf2r92bvfE3eNqMP/61Zqr/qu3/53rNf+MVhXmLtkNu183O2c/7a3MySObiv3l4v0KUwM76irjwVQbx4+30HhuXl3mDUuRxf2Tn80799qquStWuX1VR8cP/x7vY1O9zKmshiAWworPOoqDUaj4w2ABA8R1EUhFWatur1/va61izAoiCOzbg7nmrU3vb2133k4x/P6hkoBaS7u4PSh3YW+7FKk8TJqBxbE+1Xrh4rJd3LMWz8+L+893t//luFn+SwG0EZSk9pvSzKei3C0jz+Ofi+n/jMqiRpzeTjvG40g5legD/5d4cb9bPjfAwwFQJS7JOIxt2hAvBaW07PnOtd2sbnLsjy0n5tYtJcDmxZlFkrq7fa5aAc9oetmabROs2mldILS9Pd3npWM8VolDv14b96dr0bD7uDhX3tl06fBYB//2PfV7maVaQJKU0iAmLmIGKMARalVWwMMzvrQwjWOsfOmAqCLYhUfechATMrUqDI+8A+hBCqg5OUAoDY6MpxXX3zaEWKJnxEDSjMIoEIK/8LKcVQ2RpRxKPSCiB40coAVXg3Lq2rKrSccwgQaSNIvWGvKEqlTaw1gAQAQgjMrFSSpkkc1eM0eM8spbNaG630xFktwiEQESL5EAKL854rSygDIQkgA1tbVtMCIlSkK1ep1kaA95J0CAhaKUQipQzpCunnvXXBcwh7ADnw3gswixgTEQBzMDoiUiGIBwEEo41wYAHng0Y1QQMCs3fVPIQUiTADRDqqTq8cvA8ekYwxwbMyutovDhYgEGGej4iQOSilJ4E1Dt4HARQBYyJCVU0AqjNH5X5HpAmnIDBpUqhCCMxBa12ddxQqqZKCiFVb5oRlMPG1k/W2cH7sHLCvJQlVnwNCDuJDQKWUUorIkArMPohj75irj5FGQgRmsaXNi7wsrReJYyIyRikBYFLBOx8EQELwxpgsjiMdGaOrc2QQLxyEmRkDMyKW1jrPwvyRj3wWAIbu9PYGXHjp4enpsDg/R6oNHJeFHw4Hnf7q1154tFe8uNG5vLvRbSZpTPum5w40D5Lglc7OZqOVju149eKlV91/8iuf7+y8KEtCz51b2y6MgTRRUpRsrVOKmAEYlFbO+5cdykQEgiCoFbIEQEDGODIuCCARherCzBjtfGAR9sGVPkrVdLPWUuWrj8Fdt9fm6ts33rPP9svN9d7KLXPRis39wNQziiMgKzLUEqKkvX42fPB3Nv/w7yAf1hrt2sy8vrrdB5CWzlrLs92d1e3tHk1BU5QxLQ2xBt0fXn3Xu6ZvvRVvvO343/7l+T/4g82ZpRXL6H1IUjMaFT6oNM32N2caS8d+9F/f8MpXPsDZlivHUkqiCnYljUkbCiYwD6UeGE2DTvz8Tz31kS/q7mrWSoyjwC5oxqDRIClkReIEy4EVcMqosUdCqDWj3b79+jce2N7YONsVQ/Av3nXw8mOXxsnynz2yPYWYGN4aOQ36vte+mm1r2Otuda++ct/FX/rDr6vNvdg5fXV+7nXf+u4vnL66UksapR+S4vp0HQJpCVBydzhyAu3prLMzLBnmFg4+8OBXfuzn3s/iv+dt36Fl9/v/9U9oFQ7etq+30VnatzJbX/mhH/qxxEyffnHwib/69edf+tQg75VFmaQminWzlbmCIQiw3t1ufM8/+7m//tC/s8WWdSFrpSv7Z4b9EMo81Xj2ynaaJAqBUeJIQeBUF5laffvtrbe97RVLh2vNO+eYv0T5uhsOca6BW+W5x+2Jt7/+138OP/P3a7mc/rM/u2vh5oY9c86b/X/74c4f/c3q1m7dmDYbLPPRTDueq7s5DMHj5bXRbsHWxKWXLEpec2JGwhga0f/5i2cA4KUvPsy1Wam36mnWSpPNS5e0r33qUx+45cTc7vW1zc6aEVg8cmzj+pWHP/vZEsobbrl545oc399s1zugsp2+3tgcvPuHfySZ3r81lulpt5Rdfv9//G/pdBbZKHi1dnXjm7/vh/o7mxdeemR+caE3DC4fAUN/3IEQXNEvXC7ErX1H//qvP/7a21+1/8abrWXnczceF6MhE0eIG5sX7r6F3vKGG5/40qeTBUUad3K/tKjbAYqi/rXT8LcP9fshvv3kXJbOfuxTT5dFKC0oQK0xgL/9hqn3vsG+9ubxwpFsamZ+OEZtCKMpkQKoo6KNSNyVZ/e/76e6B5YP3HB8adTdtYAF6qnpdvBhc71ba8UsoTHX/vtPPj4uci9+UPhQ4uJMNh4V9UZGwSUaGol50z2vfubCtY08x7zcv9RsaX/8tkOH9x/vd8rmvsZzX/7qzvXr9bm2iurDPuzb1xwVI8jDm7/+jS+evlIKbnZ2tlavC/Ktd9/e3R5764t8ePny1fn5uZWD7UZSu36tt3x8LuYiBeZa7d/8ykfTWlMbHHUGIkCKQgA00djr244d+fb7j2Q0jpIswVh8oAjiZhzH8bDbV0jTU0vbO4Pnz1x4/Wvu7GxezrJGXKtPL8z50l147pRuTt9w643Xzl366gOfS7WfWZgHO/Xi6XOH7zx6dZC/+q63nP7qX0WJmCxNp+di1rvXerudcWM61pq8kGh97uK5h55/8Zve+GoFGMXNxfrMS6fO+2Evq/GBQ9NfeWG1Xl966537hoNBlkZRGpGKRGhnaydOSOnEj8e5LQZrq4nsNFOKfLxrQc+06422jhIVNYDDhTPnamqwvTHQS0fnEjPbjhzAzNIi9HZCKRLVg65PN+yZ5560jFmjbsexJxDTKIbjxnRWn25913//EwA4cvBgCMziRVARKTRRmhChuCJCt7yY/fS/+5b9c1uqbtPIe4SaSgrnlQRvrVG5TqyAsKg4qnEI4Lq+3Fi/BB/+GF5cMzyAo21YmY/mG7EOGMemPx50S3hhdfil84PNATKJQggctFY+cEWJDM6nKQEEBYA+eAcBEDSaKCryAokUcqQohmImdm9+1dz+JUrUoNnktN7s9un0izvz+5azrPGVx9cvrBZGqZv2qze9qX70Rj0e9qdnFjAQkQZxG9vlxevuS1+6vtXHeky33DTfrAelcGun6I/40tWiM2b2hCrq565f2LyQ4KQo0QWlAxBgrW4OHWzNToVv/Lpbzl3Kf/NPnwaWWhJZJyVbZwVRsXhSxM4JUvABKpotSGCOjEoSUqiC9R4UadQEDROOL9XvuXH/bDt1hVpf3ygcDUs91Z6fqtW0+BDYxBFozUEkSmrNJrONFFLwrnSEKtKI7AQTqtWVNiwkZd+Ohsy+Pxo4a4vAUI5T9khR4V3OKkI7GI7FmDilSCfgAhlywQdxSZwEK4oiFSUzM7M6qvliWJY+KMwds7WaC1cUo4DkBErWWgo3KBwHlYUoSmfma0YHWygugh0MewNg6OVujFj4YEGSSOeCjVo0n9ShdFc2t0eIYwiAKlMoIQBJrVkzTEYkqUegVZIaRURJJqRJw7jIjYZ2KyvZmSwZlrvD4agQV2I/Eo5Yu4FENE2U1bOWFh2JxFp7y1Et+m+/8ecAcM97DoCwsFQXTjDpx9qTWypATxWI2tMG8OVsFkNls5loEIB7tVhQOVrCnoRTBaQIKhI24cQmM+l+R8AKybPH6IEKuzyRESqDCu09dyWw4KT1b2+4W5lXBKBqv4JQqVbsBapXwZOcg2Al8QgqRASeTGgrDPNEiUGBvb6sKrQGhLInkwBOfHZESPgyuGfPtVNVse+hhqrA216K6+UU1t5uIKLIhDCN1QibSCZOmZcbvCr4AxLyy5DsSpupgE3V/0GYCFQsIiSAUl3KIzCwcJjIeZM8bIW1JgWEUFm4KhGpWlIJIF5VuRImgDBp9TEEghImjCeYeLuYGQEn6/+ydUn20EiTZZisFU0+TAQACveARUEkAAiwDxwwsAQnbJkdQEBgRC8GSSNU5Y8CEGlCRAlgtNIERmOqSCsJFZBRAFBIYRAmhT6AZ7QioBUTCwdBRk3I8PBDm//gKvLexxoaNRYZoY6SdM4YVGAzno7UksbI+f75K1fvuu2OYmurWySdXuSGApbzcqimzStvXj5/Znen0DceXOHOZmCvMhiWw42NkupJakc82h5t777ivpXohbXtbr/vYTS2na0xGLO4Ml0WjtDFzfTaxe6l89v7juw/MIMYx+OdnVhcbzwIIqaEneuXrB1F9WZrvtld79TrDWXSfTMzL56/mIYQRrIwO9Mti30rU+PRdm+8ddN9y1978FQ9NrOz9ctnrzbnFx84f+Xkm946O70V2aJz7VqkBVCiSDMHZoXVmyNcFOHKtZ0LV7a++tVnZhvmX3znmw4cnifCPC99RFibvr6xCTYcXmx3B+tH989dfuqp5aJ3y416+cT85Yubcb039jwKcOAGE5Q8/jR/6rNbQzPz5FkVfeJFSdpZg0RyQHj2xQdvOHnvxuUn4yRilsKXvrS/+cdX7lq+c+mmfV945tLW+oN25epS0rvvvnTqaLz51E6tCbO3TI/Xi50XHmvcrmrTC9313aTVnr0hef6hp+ynrzaWV7IbXnNu0Pv0U9df+643PvHS829/zw+Muv2l5tQDD3/hzJPXOcTFkMuC4iQzKQQOad3Y0pOQQgwud5ad9VKMjGZGIQRbBGRMa0lRjq5cevHI4XkGdefxYxevXlUHFm66/Z5Pffrzu6qkqTnXrTda9aIjhfQLt3H3a/Tvf+iPuPNo4OuRVaUcspLEBlkY2sZH10TznW+a/fEffuPPfOBZSbUvR8NQzNZ0t9vPHdxydHF7+8qwL95HzsF4UDImrJzSYvzopv186w3ZYpYPIX/mdKcxu9hs1eN6IgpHRUkJHTyw/8KZq+20NTPT0FoJUpTEnq1KosFmuHy1z6qpFOT9YXUUpFlSeUGU0oSkkUQkikw1RAAi7xnQi3DpSu+9sw4Jg3eoMDIRKgPMVQIIkUAgePaeJxMnpYgIEI1Wish5r7UGwFC5foSJlLDYELwLCMFoVWV6PTtFBhEQSeuUhQmQNAqH4KsTFzCL8957z8GJYGmdC95LEATvPQoTYcWc1kZX0a7EJIaUF88ikTIISlNUurzi7gt6IXDe+uA9owD4EJRSgOjZs7DbAx1VNlwOHoEjEyOiVhER+RAEIFIGCFlYAosEBBHEIIKgjFaew2Tswex8BXVDANCkEXUI7AMHYCRyoaJIsiBb74LzgT0RVcQAIiRRqECrKITg3SRaxcxaV3IbOlsGYRAGZiK2pTNGk1KBg1aRcGW81ESBBYITNQFfSkBWSgOCdyzAIIEQhD0RiLBn60PVtiuajGNhkMCVmwqJEAC980g6MCsiQK6a82zwiIpDIK1IKfYCyCYyRBqACYAQHFQ1cCFUdaIIlgOEIMyBWUC8sGPWohQRI+go0kQYaRBxpWVFpAlBfHAm1pp0GZxjEBGFGECsdxCEBUih9ZMAGlPWbmT3vOqNZ599sLNxeW6uE0dmY/NqtjC7c+2Fje2XxmEzTvTRG/Zlpb73ttf1BrXmofSrz3TGdnT3iZPjPN+3HAHnrr8ZtQ6fv1Lu5rVYqyJn0ZLnIY0jRZQHq5Fio4OXCnQFyIJQllYRgdLOcUToQvA+6OqQsUGRJqN86V3hSatmMymUHQ58jnaU899c8V94wWDINn5ha/9M5qwpRld+4Huzb/u+WlZzo8GQMnJsOYkK6NaPyk/+2qFvfU9x/az+4F9eO2cB+uiK6TFmnUu9I+3hr/zu8X23r/zWz37xyZdGUoIbrP3YDx96z799M8PZBHj7cv2v/2YXtfa573cLJI2gDeJwq7vhJMTNj350cHIpru1PIxqIGagwICvlhjO1JphAxrEQFgLZxf/yo0f/y3tu/vF/84WvrlrvQqxgphbfc+vyuctr57d6GKV5D1RsFudb+RgGW708hHyrCBqefubCq47B+3/p29kOZmrRT/3Vc9s0nDPp9k6nWTf7901durINJj+4784vffGJTnf9f33on0D72Y2XXnJOQbz67nfN/OffyueWDl6+0jGoUw1lcEWRJ7VoqtHY3h6XDikoELd68aUf+cEf/pmf/E9l0E9ujn75Z358377Gzu7o0vlBolvv+Y4fueOGgw7MuNw9eLj1X3/hP7/rOx8C7UAFzzTquCyLMbhiVNxwx93v/sHv3Onnzflo6xp3+j1VS7NoZow9K4Odvm2103zsBmObJLEPHFyQmunsNJq3/vSJd3xLUTw4zB+K84vl5dC5CGZFOhfaaxeLS4+9eP6LaX/sBjz1tm96+o23L6a52djaPXM510sHlS99sHbMZVHmcXTg5v2PfO4rM4357lhsMOOSIcHFfc31ne7SQnvlwBzAMwAwzsdutKtZTc/VPJazN64UBb/zfd//2N//Rbe33t252krjQwfvHOzg9tr1YzceokK+6R1vmKpHue1ubmw9+ciX547cP7v/zuHw+rVnPnNZj7882D543z+/4fBNm+ceunz2oeUbFjevPtfd2Nra2t7Y2J5qN2dqs9e3VwN5YBkNy8DAOi7G7r3f80+3L2/VkgTJu14/yZJmPRnn+WCQN+cOvnj18tUPPbhM4VAjrOyL2lmIotgPSjvaOXkULu7Gn3q8uHBhw402ju2bPXV6rdGslYWdT+A933WbHVzf2RpdOKcG3dGNBy/GLVAZoE5ENQJo5xSC23+b+97vTv/bb77w4ur1N77+5vtvv+nMhQ3QjbKE2aWImK9e3ynAaYoG5aheM5ngiMPWMNcBmlo1pmpXrm93g5leuXFhWO7sXtNaH7/t8IsPPGI7S8UMXblUHq3N33rnvVvzl86eeuHg0ZlGM37xzHWM0s7O+ld/5bfTdhMCapWgLe+865gfO/LUnpqZO3njwcPHdnZ2zly8stAeZklja703XN1JtP7www/W41p/MFLBMwiTlpJ9YB1ARQh1mTuy3D17ammlpUm5kQsq2NISmloyW5+ZvrhR/r9PP9y7fuHuW05mtTRKEw+q1xtrLQdPHC2NvnDupYap3X3r7Zv99dd/y1v++g8+HqVw6cKl81eurb704pvuvdUYMFoNh6XFsjmfTu2bLgNcOH+5KAtGL6xvOXrHzhYPxt1jx+vXNtcO7Ztbah94/ImvHL/1FX/64IXXzdUOHFi6cJWjOAlWRsMho43TKEuN8wLESRLXDq+sPXd5tj0zystE1w7tO3j54pX6gVpWS7ZWr9dS/6p7Xv/Jj3/h5v3LRmEvMAe/u7nTTEmUM8p3uuvjQZlG8cJ0vL07dKMcqDW0u3HdlKGQ7iTPUpQFoXLiY63jmAKzd4WOlIp8qx69/RteW6+3AgY38pRHHYfrg3ERUIeyXYtm27uE16A+JCFgMCrzTpNu1Bq9VxyXu07MHDh2+y03NQR2cFDaDmu25a6QTvrd1vTntz/4eK8AJGBh0kqxRiWiREDpmcQvLCY3Hpueaqi1a/1zq4OdYSitdUZrlEyFVuZvPtq4+RgtzY2yBmsdohg09rNayIxYdy2L43tPZp1xOH3dN/q8sDwrsNOcbVhb2lGBAFkis/XR9A3mpuUWxnHZGymzs7CvNtgemDjhgP1BqqKUtO72x7XpFe81oLl4aX19B89e85vrOQrMzzaP3NBu1kYNef5gQ33bm+qK1NEbjqyvD554/sL1jhuPXekNWyGToQiLKFIo4r0lrQrrQykUQRRTZQgaja3N4RlfXNg8dfJAfMfRxYXFrOT4uXPrly/3W9HUUj2ZaWWoqCysH1tjhjWwStyw1wMfOAAp4kRFGkxUl7wftGaA0hcuEEUJJXXvdg0wUOHc2BU8sFBIzIEDiA/jNGhNoW6i1MRMikEXHkxEpfe1SOd2VNMB0JNCHSlR0bjMQUpSQVjl4i1z7DmwWISBHWqqtYEjdGVGw1FQpOuNJjnJkjAmPc5H1RVREMlMBPmQBRYbcR5cGWUuoM9LZXTWbNZqcWoMMWpFQlTkOQlC6ZhLKyLBFz5srReOgWre1LTWcYijmeXF9SuXHUDUMGkylZpW3agw9JEy40HubchdUR0Fwfs9RlYABA5B9lg6iADME+fInvtCESEAByZUgHuNWpU3aEKpZsTJ3BGFCUGwYk5QFWCsLqorl47s9XpVTp6XMcl73hSBf4w2gpfJRQICQQKAIKkJEQgI9mDME9UHQECCTEJY1atUNKk3Q2GoCKnycpn8JEH1snwFE0GkeiSpIqJ7cS9B2kvGVVIOv+yrkT2od6WdEL9cRIjyso2l2mbPaFOtxl6GDWFS7gVAzFLVKfJe/Az3NgKYsMaxUpEEQQBJpAJyI8NeTnCCSEKQSYwuiFR7QxJQaE+bkwqYShAqparK4wFXL5VhL9AlAIBKqvQekuyFBBkYAbhCrk3ks2oGDwGBgKqdFBEUqghTgRm8eMfI6F1gJgxMghjEiAKAmIyOUCNqkEgZkgAohCAAFFO1joYREW3JAoigfAieGYkJhRBJCAXAi8UACqr7poBBTWJ9e1KRBDsY9OOs3t3pjEchai71epup4fm5ffkIbBj4MN6/uLi6ca6t6vWGGhTXFw4vba5dHg3iTgc2ru9sjv2VcQjnB4dq7f5wd9++/RcvX7i26tr1yHk76O8cPnYQeXTHLQuXznXPrXdLW9RnUy/Z7qjc2BgMxsMXLlwHJ1NTzem5emRQoU+bKbGOU9OcmTq/cSrNMtZlb9gr/bjo9NOphg/2+tqwMT8/vVhHF/VWd08/8OhCkfYHdncc0vWOlBxcmfeGS7NLUaP9lUce/45vObZ9ecMGnzTktW+9528/+pgEVpoRK/BVACA0AgjKaBbZyuGXPvCFY3fc8v3ve/u1Zz/XHfU82V7PZnm33x/NSnT+85frQK1aOboGL8AgSsEAj2x00TYfebrx6Knxhc3MDRxkKo7qufcRsrOitPIuiM2vnn2q2VxUEQz7OZlcgjgz/f6/2X7FormwMWj42u/93oVf+LETw92Rt8MS61pJacv162WnUyPA1r5UJSpuRDMno3uX5x/9281f+z9rj22f3R3EJpZa/Yq48omNM6O82N7dUYmJ0zgUiNmRV7zyfWee/9vx+FGlShEOpUMdAxIiRjUDpHxZuNIjQJLECrXzAhDCKMxOLw2H1zo9fv65HdJJrZ4++/z55ZnlYrhux+OaijJKt7fPHp8zt79+7r/84rfWa1tDO7XdSb/ytzt//OFHomTZRfp19598w9tv31cOD+5vRtOtEyvbr7n1prOb197zTfe24/69t93+gc+/9DcPXb7zlTco2ZCi32xOMaqyUCWLUumgk3smTZEmPDhHp66sTiVJr583GnPDnW6aKcYQ1+PRYDgzM92s1x1ZINMddEJZ1up6uDG4+My1Q4tLp7d7cy1UPIE4CqPRSivlgyOASJuKXMckImJdKYjBi/c+BB+EhSbCtjaRUhoJBYhBFGgGEcLgfVWxhQCVQG9IJzoJ4kySsiAjkDBUlCCu0NIiCIE5uECk1B4mmzloqgyCCIBBKr0gKG1s8KVzRWl9COw9ISilQghVGxd75x0AiGfWSiGgxpAgemtHzll2BBSRdsH7Ct8MASSweM0GgSIySpH1gaSiJpOXCShSkQocClfUdKYQDGmjTWXl5BAUVRMFJRWisjK/StBKKySo5l/AGjVzCAKCwizBB4WQRJEAk1IooIEYKvQeaFRBxHFgqPoAQBA4sAL0gQ1GHoL3PogoUloroyMRYe8BQYLXmhjQhSABCBUzg4giDQzAwXlHCoMP1gVCtFYAgLQSgMAegRjEOV8JXs67GMkzE1YTBhClPFWIJGOUIdSC4JmZPQiABAb03hmjCUATttOkyMcBQqiixUTMk0i2sAQJAoxQAa0JvQMiB1yGcuI01ipWCRk1yAsQiJWuzEPBBwisIgqkyrJEo0BAoQouBGDHYexKDRhAfGAOQaGCCgaxZzSl4NxoUPidWHqd3TObW72iHF7ZuMQX4l5vHLfIl5C10vmp1iOfe/r4DcdGo6jcTjWpVnO+XTt0+sJLc3MrMfhbTkwFd/wvH/lsHMdlaUW8Ip3ESoCH45K0YuDCeySJqCIekAhEsVYAKFzPjHecJppDUAqC5TTVGrUEASRTiwFlNByzcH06RiRxNp1KcrL9fuESfa3sNRtmxLW/+CKfurp+0wE4ctwcvT3J6jrUUadO0I/1pbnDcPj26DXvyso8/spfd37jT3bOrLoZsv/mB/cd3n92Ybr477902HVrYcicZK3j6NwXiXu+8Pe97sCNx+Lnzxfeh1Y7DezyYRknCcVx7sPNd9/03NOP/Ot//ej//n/vbqQIVEphXQlZO8nX+95AVAe+UFs7PfV3f3r50mb3jD0/4MyJqiVxCDBiee7CpTuOZ7/1q+/81f/xd+d3zIUdcByGOQP7ekLCsDDf6Gxsv/l77/rArz3Ynk1e8w31X/zTb/k3P/YwDF1kyAcZ9PIsjTuba4PzD6uSvunETNY8bdXOTGOh36Xh89cPkc525YLf0pqVJmdhMLa9TtFwoDS4XOyoSHTUSk2p6Euf+vwPvvufFcPuv/ru77n1tmU32t0/07jnTa+7965XzbUWRiPfGY+nG5GizY995ks6ciszM1E8f+bs1f1H50bb4+lmq9Za+sbvefdwED7+p7/aUgVbWlxanl++cdAd7axd4zByjpJGDTg0alGSRcJilTjyNBP9+p/87nf/qx+gckrGx3T7Ldj+fHvBj7tiRhy4ISHc96q5Jz55fbcbTDr3iYf9NIWAUYnq6EI6vj72ZVCa6q1aDnT41jdsnH3RskqY+jtO67gsHEO42hnqSC5/das6CqZmF3q73mjIEqWMKUqrI1NCMRg5qsXH5m6+9NjzL331RSfjW24/2cjM7s7m7qDTHwQtg2eefOT7f/bfzZ24NxC7Yd7ZubB+de32V7+qOfuGg/NZquyjD/9d3FTPXDr32je8tXX4rq/+/d9v9FZvPTn70urW/PL04vTckMPCzBwotXF9o0ZaucKQh1BOz02P+kMQrtcyHZmsEe1saC/7tvKdM4/v3rDtZqZtnGG7OVOf2c27PNqEuprZWR90e3mcuFTpchhiwdfekLzuptG1jSKGhre80WVw+dwsNNtg2kU0nahsHnPykAOMv+Xbpy+uzX3oC+7jX3npIw+8JOU4IeWtOnj0QGSLWkRXLo1a81PcyMb5CCRXvkxiLdbm/ZEf53PN2jD3v/OBP9jqdNtzjcVG40uf+drx5Zn5owvNI9M3TB/QpMCGxr65t933dU995fFLVy84Zh7bfQcOJgqXDi+Rw93d8cqBNhCYOJtbqDmWEBzF0exMi9TyVM14W0Iq3GProt2uLfLu8aWoN7YdZ5glTpJmnIwHhcKwfu7Cpz+N73jjnaqWjseWEvQlg0rmFvddPN9//2/9+fndjftfc99SgqdfeuT4sZVGq8mkvRelaTgqalPJ0tJysB7N/LWd9b/67T+7dG6jNZeOWRBkeelgERJPAYl2N3djMrrGg3IwcuX2znazXju0sv/cqbVWs1b6olFrK6ugGM3vm7dl/4bjh5569OkkgVZr6sy51en5mtKaSymt9UGmmwd3d04LQK3WKL3Pu2MdYLA9JIgA4fKlq4mK+2uDqUMrly+fLovR333+E7HC7oXTG3m46y3fmnc3/WDd6Dgv/Xg0as/Njwa9QsrtjZ3O7paR9qGjdz7+2MPItbzrlo/dUB0FRisWTHRMID4IgmhF4jhtLdx4112nrvtn//LJYXd9MMi9l43uULuchJVwPcG7j+sfeN/BfYedxIEZgw8CLgTdmJ+7+62Zs43tPId2A7zywD6A64/ZcKKL6Wzw9Xe2v/BCb90qrTSXuUFrlBxsx0eWo5M3Nu58RVqbdlmNkZzg3Kg7//QjV65u2Ea7vjxfK3prMwtZTL3WFGvlbWkRdJok7FzUomYtJh35ssTQO7pEL63CpW352in3hlvaMZhiXNjCG4VetB1xoHE9E0V9ZJuPeOviMDgfJX0AW48za0VyXyPXjna8R9I4e4OWo9HwFqy165SZUX8URxvgOAwKOpS89vUzHGwIV0Gpd3/TbK8nFy+OTp0qBkNz+ard7Fnr0WgsLZViXOAsTgmUJ3IuJxZvbaRAEGzpSlQXVsvOYC0xpJUK1hMnu4MdcDHp2kptmvLSjYeg1G65PR4X48FIQkDALItqiTHA9XqbIuOci5oNig1SbIthXhZcFhDKcjy0LkdKHHNRDKxnT9gvR/NTM600RoW5LQXQA8aJiRITA8VGKyACMmkKGLxUDSxcWlu6Ivcht4Vj9KzEWRfYB0Ou5HgoNSXKcV5AEGAi0LUo0uJrSeS8BxZAUXkxzAsdJ9P12mA0KgMntaQ+3ySVkq4HV9bSuCwLx04Cq4DeerBsnR2NR0VZUqxDAC9KDax1eTYVDSPf3RrEKs7dOG43KIjLB4WOSEjQsKAPDDxpw3zZ+1EhI6FiEU8Y1LiX7sKXycTM1QhVCwAwAyEIExLKpJUdFU5CScwKpSqZYqq2pT2UzkQGqdA8iCKgquxCFXL8hzQYTPJJEy8TQgU/qqrLJpk1AUTkKmg2UV6C7Ck1lUQkQJUnafLMWJW48SR9hnvRskkyDvnlcqzK+jOReCapKZgApwEJX26Fx5dRQJW/pvLqT7JY1T8qSQlfftxJUE5e3kWEf+TNqV7NXlJrTy+rHgb20maT6rBKaAOYhO1kIuIIV/cXe28piEAQQGB8WaPCynm0F/Cr1liQRFCAgZg9AYHwXsJNsVTw+4mfqlIAuVKjqpohIASUIMJAgsDEQhLIO9Ae2QUUnhiyFLGABohEs2cDETJSCGov8ZdqFayPgLSCehJXBTWl98zChN5TxU4dOV+FCm2QwjmWsPd2hbqmZhR5663nMagCmAiIWFDMnnI3kYoGOwPXz2v7ZhVys6lLHU219o23LqLJ8qIkJiC1vds5dPzA6ulTU61iaf/cmVPP14xZXmjZItvd7iVTc4mMd9c7Jm2ORmKjXRdMq7lQq9WK7rC/xVfPlLGKyhxGXUmVf/urDp4/tYU61XHjlFEvXBo36i0d6TvvO1mnWne9MztbywuMTJqkrW5nODdVQ7BFb9VDNt51RK7byZeXlgyPjEYcjMdF58Qt+zfGx4IuxkO30/FeQhHSwWB8srEy6I4un1oz1tjBxtETy+cvXK231Dd8wz0PPvDc1m7BgsygDCpFSOSdA0LnvCKI48gFfPqxZ//P+a+98wicWFFpFDzAbbc3IzQPPT48fLi9smRuOpJ+8YuXu5aa802fxh9+eviFCyEzuS+UBo0REDCXI++CMGtjpucObK1dijOtlLWlpziN4qYdDq3Nx6i++tjG1I3Zz/zzk08+dk4ivPmV+9Kot3Z90444qjfjGqVtwUibNO6t7yzuj5Fd4I1sX/zqf3Los0+sPrptFlfUyIVuZytLzcaOLUqLOgYincRlAY36zBtf8+ob5tY/9PdfE4/aGKzHQGBHpQQ2YNgFAq2TNICgUVrYW0tItl4fqHRUpKJruZkWDvsOHL826p48Pvtt3/na//aL/2Ew7uZR/9C0/58//6ab35yuX/tSubVw4x2v3d5qf+yBxzu7i7OzS6cvnbp44XN//iefWa5FSwvLCRarm/mlMj9508H7j8wfnFN333Lbp56h3/3Vr92zPP8Nbzuul7t23KGY6+369voApGxPJaUN3oa8CCqGk7ckcg5+76/Pqef6r77r6KGD893uLiviwMAWlQLvs1bW2ezX6jFYfOH8dnMqfd/b7/3LB5+7/NyZwWDiKkrSyLnAHIzWmhRNasnBexcCEyELBh+C8yAQa8PEQEqYAwuLMyYKgUOohFghpFhHXhiBEHWkScQhgGNbfemSQmcLRZpFgjAKEpEitM4H4VhHlZERACSEyERERIhCqJBAkNG5Qsqy8CGUhQ0EjOKZjVYuhOADh1AB0rwPShEBkVI+BAVcFKUmAEDPQRtiCgFYsZqcIFEIJ75EIkJUQgDMzjtFGgRQUYQgARQZHavKAqOVwcruRBCcRw5YJdoIiVBeLpgNniUAIqFC0tYHYSatScj5YAMLoLMsKFpXX69BawNI3hbMxMwISmsFk9Z5JyCR0rJ3hkNQsSIgwj03rlK6LMaAwi5Y75hBkVKkmZlQwAfHFgkIQjkunbVEyD4oUiY24oMPzAIeJqUL7JmACSWAK8qSAYiAEJ1oQmW0RhRgi8QERMJKkRcXgoiwQsFgibTRBkmzM93hEBEkCCkSwMDEPq8uSgDQRJqwAl8GECABCWK9J6zeTq01RiZoQmVIIRCzUghKjUvLngNzcKyVFgkEEhgcexJBQOcZEaqqOGdtcJ795JRAelVHje7OhaXjyeqjF9a2r3Kmd3Q3z71OTb3RWFnY/+xzl7avbG3koy98+fPa0oD8t373uwfDcb67Dd4fPX7v1mX7a//jV/fPbkSEnf7YWtEAEKlxMY60iowhABFhx0YrpTGw9yEkURSCgIAERgGFolAEwGhjhJWhsiyDBKoBkkSx0WIbczEB9q+PxiMZbjljsDe2rTZ0hs4jSIBrXT1+ofGxr4nC8p6byjfeI698TTwzr9I2SuJ0FI+GQEp0Vr71Pc23fvftf/V7F5Mkfse/nB1IX6FXqq9qXZM2GRyYMTqKY+HI885OMRzlo7hewxDsYDePG1EI5ezinOu7s0886sZ6vbzhm1/7kZ9637Fv+ZHDQXd9Kuipthh5DqMd9Xs/N/j0AxFOHV2H8TBxqWJDrEi53MX1bGccfeyJ4e7vPPDuNx6/687ZX/vfj331pW423fTOFEFckO2dYTtWn/jMpUfOuqMr7vt/+PgTDzzRGXS1qoXgoyhZXx1mzaQssDfobGx0f+IX7xtc/5t60iKY7V3fTONkfmbu6NF4Q2qjfMReWLSJ1dz+aTcOxSjMLk+lWbx9dWtqqr4jMh6Pfuznfii44qYT7daM2rzaO9g+9O3f8J3Ly3NFQTpFc6W/cmD+y5//yv/65V9XUUGcbeeS73jJMI6y5tLS8RsOfOz//vYrX3HHsqH1jqvVWrkNftTf7m6W/TzLVNJI40YmPqBBdi5LE289OQ5RbIutj37gl9/1bd8Q68WcbjaHTXZ4vf/Fx7tXR7OtZGiLo7csH3smc1GkW7OSCxf9pZv3Xzpz+Ymz55bb0zPTzYLDpUtbGSBsj7/927/v/e//HzfddtPifnj2pfX/+LP/9swLT3762pfGoIa9ySR5YeVAMbySpRE4ZxJN9agYWyvDG191J7mjaGl0mXuD0R2vv3dheq6zvXr3oZsuXyqfffTx17zu/rd/99fPnDzJUVIMfXt2pTZ/ZMb7y6efm+o3brv5PcW1QaRqU82p2uK+OFuZW1q46T574EBy/vwL7/iObxrsrI/75YkTN/d2uy4vpxuNONJDkq2Na1ML80mzrkU6u5tZLWuQYevaaZMpsKqP7MLq5uDSlfU4yRfngAKKmr1yebzbHaR1JVHECuIGOYu9fnHkhrnHvnp+dg4WDtQike4ALnZwpOMDMVBeNMfd2jwrbVBBCAaj9J/92+//4Of+j1aQDwsWNQ4caXX60tX7ltVdB+OnL64NzELezYOHviUj2kSpBxCQwdAL+TQ2cayXklk7LKFFjs2u1GwO5595tt4+cPzGE5dPl1evDQfjZHnptqWp4yfuvP3S6QuthSai1km8euHa9Gy7MV3b3unESUSahEVrnY/LpFGbz7Rie+7Z1Z1y49j+2d/6yONW59//bbf/8A+/pjW/8szTnX/zH35nu8xtLq4EE8e2gM9+4YVItV9921HAEESSejvH+m/9yod3u1s+SCNrnty/eO7CWVvo2LQNKJOQF0gSPcwRymHanlrbGtZarbnm8nDbHz3WPH/2VDLTOLw4ffPNy94GO7Kq0Zhbntre3Ont9Nd3B7qZtdozKcWb6z0i8KOdrBZD0MPR8MYD+1zuQKAYmhrUltOdb3jzLQ8/erGz65rNyKOCuDWFRJIkpu68zYuxD2VNGW8iYdJppGpNH8UgsjiXmaw3t3wQvOwOdl/xmv2XXrj64vPnnt784ze/6rXTEe32us7lvighms2LIGCGPbd+eWO2KdevPJGk5bjruTZV38NaBwCjSGnFPqAiTVqhcqUf90ZPPPrUYDgAR7E2AKIIgs40xSo2WhnrwsPPDzp/cPm970xuPZlQfQgaMIpIp2VhsxrXavLAVx8/kLy9vTKtZRiMDtBwRaxFoDtqlnyYAulycV/rlhMrx47Xjx2rzc5Ca9aj6kdJ4ZwVjgTrglFWH791YWZUMLEV24WgxDh2EIIDiVSWKlUDZCnzUHKUoIqVDWF2Wb51sdUv+o+elt/83fMXb8/+6bfcYhJfS00x6pdOe1bWS5KAMTqupUW/LMqQpU1UDBBARbGhsrBElkdRFLWYc6XBAxvKxZVQmlocGR0FjtT8tHPM3hqyWV17m5toPH8gObKgX31XzZcyGqcDCzqKh7u9RnNmeye/dGnn+mr/xfNue5SMNCGZYR+GI49auzIoEGHsDcZZqj17Bboe09C6gWdLUFjWYwtsbVEQgQ2YF44ItVK5lKOyaCfGQ1+ZWJOCwkUAWqNzZRS8gjK3uXPWh4jAKGENHFBbkKjW1lF9bnY2duVw5EsXgog2zShODWFZ5KQ1QGZUohJvgxNQwYyGheqNgxWvkAiVOEccYoyFMnKI/Xw0yFHZlLwh4614Vg5BlA7B+wB5WUaJiogByDpvhR0iqThO6nGSAhoBJUTj/sA5C0p88ChsNIjAqOyzt0oBA4tSg5EN1kVa220bNVTmqChHWVPxKLgiR6eFWKMuwDvriShLJ3fHk1EmV/22MsERT8wkAkCV5WWvKB1EAHiSMptAgRFAApMSJQIICiAgAJAhz4IhiAiJEAYIPIFVT+DQE+YNVDDlipAEVOF5KnxOlRmDqnceASoychV9ehkdxJVnX/YSYBPXEAAgCe0F6qrXWikLL8MrK9lqIt5UTh9EBhGoLuxlbwcrS9GeWjSpUJvEw/aWBgEq5Dbs6TV7DzkRgPYA4TChBME/slbBnjBWPezEkoMoQYSq5IcAgDDjXh3cRCab8H8q59NeyaFUgClm4b11qXa0qjqsInkAWBX4SAhVQyPihEcxQcTyRLITmLi1RFMVXkMWIUTgSiFiVa1OgIqTPnEWMEJA7xV7iILmkhUDshIgpQmBUYsijADFBUVEQYSDAcVelFLAAb1XKGkCWsC4sQlgSQVSXecLJwJeArAIoGLxpWdXKWEcKudURBg8oAsNpQkkR3FMyBACE0jAsPcpAACAD/zKG1yvtBBPzc1Ot1qr3W67Bnlvw+tWGi8sTGfra+cJMWrXB1vDcjScXppCSHk0Hg18bwS2jC9udUPTzE1Fy83Zi2cvzi9PM8pat8iybDomQCPBLMwt9/u9a5fPxnWqGz/u+a3+qD3XJjNTWziEXg8Ll2lrdweNLEnrjd3O2FoYdEoTcxM647wf6s0bbrt189JlKfPzq528dK04ufPWIzujnYM3Lkc1efAr565c2ew6XN8a29JjEfZN1e++/Vgod3YDfeGBy+/6xrvjtMfeRlnU1PWvPnn5C4+9WA58AImMKYuyIjULComE4Ews7aZZFL5/hr/1TmWL8hX3Ll1a7e7siCWZvukwzNPm5QuxLTq9hb+7MP3RxzetKNcTKChKabqRdLZ7OtKiwJZOpxqAgwdFxpjI+iLYAIj7b7wndHpr115CBU5pKfyt++HZZ3/4pQ9/bLe3PjfrTd03VpaKQWRQSTlQpHQTxLiAPdbddG4qeC+DjPt3PP6VE+//0MUX1890OsMoioMPCiCQV16FQKLjdq0WvBsOysXDSUiSwWZZ9DsQI7MNZUkKAUBpzYyRqVWV8CC+zItGq4bKLGfJ937LG85s2CtbRapyxOTSqFhZ0hCaTz57YdAfjvr5d7zz3ne8kgoZ33rHodjX1i6u//ff/HiHj4SQjfMBQ5kYLMe0O+Rx0BAJdHbT2WaQ/Otfvfze97wpRfVdP/unqjuqRfmv//w9r74vw+JcUayjL0edfqKpdMFD6grxgYd5KMrR6k7yS7/vL6ymYKRRszedWHzd6+7OtC/HXVAmlEVrOtIE7Vb9pec23v/nj/zYD7x7Zjavz2ZFb/D4Yy985KPnAOAX/uOPOh+AxcRGazNJvyqy1okAc6iUcmetMBMiIjGA50CKFCDRpDwAABSRIqrOInuRW0YUgYr1po1SgQUR2XvnvTGKkEQIJAQBCZPnCsFXHtDIaKM1Ela9mT4E712eFwxARCLCAN4HbwMzM7BzTilNhBp1CEGErS2RMIpMWstQUOuo6rNXSgmyiSJ2UpR5ZX8VCSH4KIqMjljQeh/YA2EIDCDa6EgbZPCeWcS5kghMZDhwpI0gWOsqUA4hKaWrqB0CuGBFhJBK71CUkyACyKyVJiTPAUgH7xCEQzBGa22Q0HEAUN6XErg6Q5BCpU1gX40IEGVybqv6dwFJgQ+VVYtDCM7mzB4QmAVRISrSGiQQEhEV1pXOluOxdU7YWVtGRgOAUqgUKVJIOqvXQFSUJN5ZH7x1BYXgIegIa7WEA9uA3jMZo1AFcQ2dGiEQEWLnytLmELw2SAjeYpzURaXWcuFLFAzMSRRpraoZjPOeELRSAqCNZgHrQgjiA5fBiTAQcQjeBQmBUYxWaRxXnt4g7Jx3PpSlU4hAkCaxNhqRnGdg5hAUqcBc9dAxsw/snA82fOozDwDApz71fQeaR3b722evPtGx17kuq4O+bmb1Zn00KEXrWnPaFZHthmG/N9OIHv/k0/X57Hv/+T9Zu77Tnm6iqT/0qYc/+DtPaZelccbiXG41agQJQbxzSqtE6SxKB8NhFBtSVHhLCD6I0YQMzF4r7Zw3hrRBZlRkDIac7dQ+c+AVswdfuzK3NEMls5RTy6061csr5cf/8qGnPrvGOebB12o6S81o7Hzhg/Yrs+31obVAbU3eFfNz9PbX6q97Szrf6qczEZIw+6iulLP1uRbEU+XIWrUtIFE2jUU3KNIGXZETpUQRcwe0xnLpe7/j+mOnmtPTs/MLh9/5lnf99p/+8uJi7dyZjQZmtWbj+F13rp3fdXa4deXZn/jpW971zrGaHmGxRVhELe92mmc/fPjn/sul1dR0uKjVIEYqXAheCFXaSBUl3rHX7sYl/0++vvGKozOPPLT68Uc3HzvNXWdUhO063nmo8dxz107cd9vZc5ff963+R3/0bT/0A499+bmyVtPjcdjtDOqtdOXAPtb13ta1Jz73up2tj0VRTLbd6djpOHnmself/It2SOu9rWfHeW4DRLGqzzRC6VWZG0X7bjh47fzaYDxCk9ihD8JxQyeRLofu5IkbfuEX3s+q7QJYG5o1AgtpU7/3B34ghH53MMjS2I9DvTnXyhYX9y8//fTzxeDqzccW2q362rWtI8cPf+FrT+RFqbQBre694/6Lzz7cKQa5yIH5hStr16IosqXjEBKtwKCJuZ5sfeQLf6MNk1wuRy9k9d1xWbRitXvmoYf+ZvUbf+wdUPu124+/JsydmMtuefGlU2/6p29du/SZ7qVLR44tPf6ZJ191/2uGXKw+d5YH8onP/8Uf/fp/v7K22RfV7/NP/sS//uDf/NWzZ86mmhbrzb/54rMAcO6RF7O4mbbS7XF/5cASEHS3B2defGlpaoqLzsb1KxS3dvt55Ae4u8bJ+OgrXnHu2d3p1szs/kNl2pR6PD1XpwBY9DYunxv2uy899kCzPVf0BqvXr9RqMUrcKcJr7zly8YXT253rUSNyhV/adzgf9nb7w6m0vrG5RXFa5EWnu6NVRkiHTxyuSLMBPKPoAJ59vzPSMSqAQElvd9uWeZAyifTqWnfgaNAvNnY252fMxtgWXnp9uzsmEP+d90/ffJC9Hzcasjhno5hAkQ++XscUfaZhasE02olJlHVqyIe+9cdfXL2QsbZaiUkMomKnxs5+7UPfFdPnkuQ6hejigx1r6YsP8vPr8PT21I6lZmKkLGuZbjbSYXeYJElqKHi703ff8M63Hm1mj3/pyeWDB7OmOnjTESXp1SubU3MH+tvr7bnM8I5n5wqJmzUEKJxjUkmtFhljC+9sEAAXmJEhFG6wu7l6PWpDpmq/8v+eWlrBBz7/czvXXlo6eE9aW3r0a0/90x/6z3meKdPOHRptYqNDcPfcctPbv/7bZufmf+X9vzoESw5rNbV5/tJUy3zj606uLLQPHFrMMMuLPGtkaZx0ev3gfJSaOK17r/LxiDSuXu9E0/XpuenHHnlseG29Od3URFGS7g6GW1vrWcKqxPXO4Pb77/Nj8bkbjbplKKdb9Oxjp+Na68DJE+20kYzGKyv1zvbuykzWCflos+v1wpGTB3v9nVwlw3FZD+y8F86FOQRvfXnD4tHnP/8Hx46f6O4MdXMRp1uRlBGxIxNUs9f3RpfS3Vpd7+0/eOIrp6/ecfet0zy24/XDR45bS73Sj0Y9LsqacedefDJR0Bv4Q/uWFo++6pOf+eShW479uw9+FQD27z+glamIsVrpqgFVIQIDKEIN3oohQ6jRKFDYbFCkyRdMaOLabIjVrfu2vv5+uO1kbmpGgLCWkQo8Gms19cIzsno2f8cb7zdyChVo07YjnW+vx2EHUrGtJWvyxrRJauQ4B7EsBRoMISgApYxAYl3E3kamVKosygJDjoAhdy4EdoUrC8CGtaxU7HzpxjlQghwQSAC8D5nSp69kH/jE4MnTxM5r1fv+bz/0zW/NbL5NCEmaOAYlZRyJoXSYlyFIlKSRMdYVhICixJFHr42QbiSRHvU6gYXiyBh0oaxlWTnOo6TmAyGSjgjRB2alDaDXGAprnc9dMUwbNcdeReLKQqNGZUDEF253S2/sxE8+3j97uby4Slv9pITYB/DMSRyHEJz1hfMAqBhUHMVxnMW0VK/PqJApljJH4BBAKwWEgLrwPtOURhxHEQnWsyxrNBFFjEEA55wrhqgj8dIbD0LwrgTRZhR4HCBpNg8vzE9rbMS0ttl3jBjFjdY0aVdl9ZWOSSeRJoVYlBao2d2+vrV1pTceB22EQyQKXQnOxyYbBe3HfiY2inKEQiuIDXkHLqAy0cg6AOUFAoFHTiJQRisV5Y5RUVJrqCgDUoDkRcSNjbBCKZ317AJ7W5alt0XprePShYAIRoOOlTJawHoZeEsRlsE2WyZL0lbWSHXCPhS5RVJIxAwzrdqv/cnHAOC2b50FqIpVKk2BhfllaPLEb6MIEehlBjSLwiqQEISUCKPSQEoTG/RaWDEExoBmjICE6AScAyQOAqAQtAQklKqbZeL+qNwzMuEXIVIVVEIUrnhCUAkdwIBAVKkwlUaClbxUuXAmHCWWSetvJW1NPC8vY5IU/UNXO0z2ak9qmXC1Zc9HNYE3V31pk8I2gj1XTiUnCbJIAAFkEGYGCS8/xESDApjAgiqP1sQzNMnsKaxkN6ra3CrhqdoGBYCQAZBAEGivtKvSfyojEoPwJGnHe6+7knYqpxhgtSIiAKSwWj1ApRTIXimcgmosjhOda9JLJ8LBV3RUUJUpTFgAGRUrYBHnEZFZqoplYUQWrlDpHoQRhFCIgqRBFIMvg2cMWhFxu4lphIakFmtgjgyJZQLUrCRQab2JFAEz+rSuDAGyD0jnN/xOgZ0CrBCAsOeXM3kgwrAnYO2xriLSbYTDjSQ2dH1YbpROFLKSwAJB1rZ68A8NaEhpI56q1b2zUo5iKHd3t5fm5nc7fmN11RWN6fZc2c/76+XmbhEnzUtXbL3dsoNxsx4lCfW7/tjJlXOrG+Lk8uaV5ly9mS10dgZNUOOt0k9PAcBgt7+xujN/ZP7wycW8v6sEeoORj7zOAFS+sX5xtnmo39fN6ajeVgLeSj9rJ/lGYVK3sJJpTO12EK1X116q1ZJg6MiJA4zKdjrb42ui9eZmpzkVJc1mbrd7Q9fvWqMgQQhYnjgx+8RDFylJV2YzsW48KkG5fTcs7K7tfN1r7vvkl55qpAkZba1PG4lK4sDOasnyzjvvWZ5vj289EKZ56sp20WuqL68NPvBxG+m59VODV9w21Wz4G2fljvunMhVKfsXP/OgTo75KjE419MEhwKA/RARQBiNJSETFRW6TyDgLsysnrq+eMbEvh0V/syOjASVaAFUQT/Dcav+//PeP/cCb97XaHE05iUfGlBBb19WjtVGz6eOGysNulHGW1HzfJPWja8OVC+snL5jD7/qBr0sf/OiJY3f+9Uc+CUiD/sbU4vTw6lBB++a73jO8+mGHG9TZmbvphttOvPKDv/+nmOl8MFaGkOLAgYBDCN47CAyofAiKIKllQEli9De/aeodb99858xCLTnS31qLaP/l9fIv/u4rjzz/5GZP0qSdZOozX35quLty/MjK+TOrp1c7Z89eu7Sapa2ikH7hrGGfoAyHwVMUa9IoNi2MDzqor3zt6hOnPuLzQdktUuW6g/Knf+WBX/zJb37tXXcm9W1qG57KOy9c2Dn15NRBNXXoRrY92emV+ejQwYXp9u7V9UCa8lx/9ZHrX3v8+k0HWq+4bf++wyuo5MLl3VqdNnvd9UF+z7HDywuNC2eeX+q355Znb7n5hkoq4kpvpj3AGpJjDqUFEO8DMBNNuHUmiozWpS1jbQwYqJxFwsZERmsU0VoTUpj8ClrnBYBQAWlmBsSJ+7MSPhCh0sWrunsQVOidAyQf2IVgjCbvCCHWEYNYH4bjfPKSlCaliRQSBO89eesDCxutRIgItCYO5IJToFmAAUsXEhMBgtIqcPDOGk1KMLCPIgMI7DkEAYpCmBg5mas0HClkECFSleAe2AUfFJExWpiNNoGD9c7ZgCCRMYoQgX3wEhgAHXtF5CGEEEAYgIMPkY6ExbL1HEhXuWwhUgIQgkOGwEGEOITJkICdRo2eVXV6qiQ9QEBwIaAIsPjApEhErLMsARQRaQBUEQRf4aSCMYYRrQ+MiERCyom1jgvHyEGCaI2R1u1GEsVJktWzOOMQWFPhnLV2Nx/V4qgVp2mUgivTWKOCMi+MBgPoyj4yeoHc2WFesvchBM+ilCIkGFlBDaBIYWBAJBZGrwBEkwIOCOQooFKl85UwVOUUCZgQJQQWrkphQmBkKAqHBAJYWMthYqkFJE2aPaMSAGbngrAGCAF4UiQhRKQARVDrybngM5/5+PLMkhdrM+v0WOmY2wkmpnSeydlgU6g16yq3srYFzofa3Byken318uzKVD0efuavHvjUX1yDcY1Y9/MSxE81U+esAqRIVdcGI+e19kmaKK2KshQGpTGKteegNCg0RKC1NkYXIx8rJcxi7D1vPvJtP/7WDb+747oSYxny4O3mAC/trK005t74fa/74if+JLPJwcWpTm8kkRjPNy1nX/8Nd//X//ulWqtGzOORGBVfvKL+5K/hc1/qfdeb+fVvo6kZq5TYodVMnfUOmu20YZSAL1Pb34k1MzuJONLKFyXqEMVYDsurp3dsni7Uo25n99R25+zZhwLIvEt1mjLoW+65/auPn0qipNlKAWbf/78uXH0a/+2vv2toXtB+c+fSuQPL+2//1ntv/8jF/sAXnRCbpJoaRjEyY+EDURnHkbjo+XV1/WM7//Ib6697x8Ejd0af+uTqX35+3PdpYdHM1F9xx8pg0MnH46cu+D//vy89/EyRj8t9S3OdTlcZrUj1N3sqgmaRf/kzj9z9qsUsK4vt7aK3a47c9Rd//cKZM/sX9i+tHDi+un3B746SrG5HVvr2vf/sn733R374xRee+N3/8Wtnrp4FpaNIAeP2bj9qpOzo/jfeH1GtuTIdiIebvfZ0Y7DTe//7/z+x62kj7g/ElqHVah5c3n/zjW97w5vu+J+7PzfyMy+ev751fXj02MFwZby53dcRhKGt1ZsvXl6HxsLs4grZrJalU3koy6HKtEBopsloJI3EcBj9px997//4jV8MxqT1mzDsapz1cMwcv/ut/+F84V05/MLDX/7j//P7f3T6hfbX/+tfXrx19vK5xpX4kfmDNzbe8/ZuR9z4fL29ES3VPvTpB87tuq0h+BBWprL5Rb21frUeR6NhuVn2q6PAmEhpdMHNTE9rMt31rd3LG6unXqjvv+G5Rx/af/JIttQ6vHw46nYeeeL50q/ZURd9kuOh9V1cvvn+3d0ONLNRb7R64YWt1TNRou5/01uff+yLSc3dfN+BfDRKoXnu0aeFDgmPkziNtD5y842d4fDA4RvNMNSsW9/cbLayes0sLk8Ncg+irLPBIitySkjFxWgo4NMkciJBbOly0FGSpNPtrHN9q1mrzaQ4bPeOHGqPx9udi2uukDtvXDh7Pb++Wzx7eZRFycL07LkLG2EUZmYlrrmkBt4BG1VIc+0aF0PfXoEoQbsDVy6SK0MCnKja0OaxIEZJUof/98GP/ex/OD5ePwM0uuctUPT49W+udbb9V1/Sv/DHW2fWINHRKE/HRTBGrW/2ayllsZlpNa+ePdVp6KOvOvnEky/MjpMyyV945krp5Npuxw/LA/um3nL/7e2pWr1OhR2TjhJjHGAxHDoVZ7VaHJnxeBy81zpynu3Yjjp5ban92QeeH7n+3UdO5t2NqZWjJUca3N2vfcXP/sx7f/pnP6DNTKPeKks/vTDvisFaZ+MP//QPWkkjTk2Re4uhm+fWFffeetNKu7ayPNtqNYq+1QqK3ogitbO12pqaR9Frl9fiRtPmo6wRK+3zYffsoDPo9paW5nWm41p29srmZ7/09L655je/7TUPfeVrc9PLfhjW13am6q0gZr2zff56RygebQ9ee/MNc/Nz/uq17uq17lYnlvG4HKxev3bm4iNbGzccO3a4vXS4HIUi3w3BZUmm02icdzXr7fU1rUxvuIOq5kuZnd03ytdHu0ODupRyPLA1iafj2W6tPHCo/Uql6th3zK3m/NDW4rhBg2vgvZQ8GHZ1rBBxtGtVk65dvWxai69/6zfBB78KANpEiiIBJlQiDBQKVxo0isCXFgNGSieR5OMisEqiNO8UkqKzzJiXrPIOfGW1fPEl+5Y79RteszzbHkRtimsiY5XUleT2pdPdnYsf/+7vPFZrjhCtmZmXhf1o9iGUtZga0lFqLCFXYFFlAerAAkqhF3GBwijSviQdOGbwSpAdq7hBiUexbDmKCBlLHdgPNPgo4aCCBPB5YUsRVGPxc0198gC/cCn0Cj0K07/x5xfuvOXelSWruAQOkRJf+hBIkYt1cOIjjUYHQu9BIYc4Na4ce1eCYOA4IrSCUFoKPkkJqEinDIuNRYky4iwopU3EXoNTuR87SiyZaHrKg1Pggbkxs8jBOVsIhyhKlhvYmCoPHdq3u+3PnLFfeGjzmWvl1i4gp73AznFkNJKy3jsQsFz4oj/Gzc5oKYtnEpWipCgEpMUTCopHxpH1w3HQxqeRzgM3CbJ6BozgvffBCihBj4JxnXyRGZ2miSp8EiiOTeRHgaUnbiyjCNPM0FQiBXAAAR2RQgEsPQGIitO1zrA7Gg/LMAK04jmMUx+0AGFsfVmAF8Bud1BLRccyKm2kIwRliSF4LyIBMDAmEYMfe8Wg4xCElbAY77Qqi3EVsdFKc5AAIuM8B/AIYEtXeI9aMVFABFIKgZBn6xFbNyIYlwxeJ1kKZNl5tkVeCIgQ+lo9Rp04B6j0P7q5rqSiibLxDypMBdeZgJoryUFX+bBKTPKoAJRSxsSUKq6TLJswm0XNRAfxu2O+NrYbjsZBB6WFK3eSiFRX3khI/wi2MImt4YQQ/Y9cMpOCLJgQhaqS+Il7iCb1YLAXlMO9wBUBEu6Z8YXwH4QkrOw8DERUDa0nYCb8xyadPS0J9zwne4Voe5wMgKqRjSfiEuKknJ1AWKTqoZcKorQnFlU46CrltUdm2vMcTRYcBGBPQ6u42ljpP/8oP1cVlU34TAAAErCKslX86b1c254XC4CFBavGt4qWXSX5qo8FAgAHACZARgRSQoTCKCLMBMDCBgFBdBXSAKpnSb8rg45nIWstKCUMEAABghNCBQIGlFLILBHJdN000aeRLgpe74YhKJPQVBtmmpECHynQSislICrT5AcORQPpwhdxQkkbJQ61VEdRc7UzLvrc74OXCccW9+hQ1Z+IxCwIpIiY2TOWDJgoJlYK51uRG0uvCDkTIOhoIjFNDgal4uDz7d2dleUDo1E/rsWtsQs0EQABAABJREFU2SPB88xS0/tx8GYw1v3tXjEeB++mlqcGll+4stU0UIzyxcWFotfLd/Mm4Xg4iFtpCbzR3bZjm3thVbu61p+dbcSRxJHqbK71ZGzcqF1rHT20Uuvsrq1uNeozNUnml9pTC4vdjbMYoLc7MAa10kWnIzzc3caFdm06i+vTrZ3NTZVG3d3dUHf7j99xdtCdmW0sHlraujDo7+a9ne725m4uWpFvNmtFf6xq5smzpzaGo51O79htRw/eeKDsmF6nd+XMdm9te9PQ/v37Ozsd59lEphiX04luJdxq2V98z4kTh+ixR1drKWxsHPj9p+JGXJCa1iZ+1eHmN7/+wJEj9NLqetM0Lj692d3Cn//jT6zmS/WKlMu+1tBIjAxRo57n4IcDVHzkxFs2L1waltdA29Wts1qhHYckrrMtOv1rOiJEIoZIUwhT//k3rv7i+1/8rrce+Kn3HZpb3Bn0N5x2ZiajDKmhBrHEtcM1N6/NWz71yY2//fSljz14RdL+uPtZ3U6Xl1pnzrja1L7NzdUsTXvb3Sg2B/Yt33AgfPKhp3vj3vRC68xDDz//qc+LikKsoyQG0t4Fo+M00cPhhokM+zIEjNOaeEvKcMG94fp9N7dn952H9pr4F6aiUvxLJ6ZqPzVvxqPjDz7a/Z1PXDszSPIOPXpq5/HTvZ3VNdOM41hRjUbjLTTQyJKyO7TAURwbGCM4tjbRCKy0TvwYdq3VSggtKYiypCzxZ37lo2++afan/vN7lmonaeZgYyUUnZ/+/B99/vidD99461KzFtfm2jg148dbzMzOIUotiQXwyma++8Uzh87uLC0lY5e3a3HUjDd2w4EjK6PRaj1WXNpxt1TxpBSTFBoVhxBYwFZ0GOdwAicWJHTBGTLVnA0RsjSdmCiZhEQbQ1WSmCXSilA7DkQkDKaKwhIKVjh+TwCC2pYFBNGKlDbWO1JKADyHSGmtIxZBZgGs+p6g0uNBytKW1nofjI6qSLFwUNUXv1YcPBIZZdiHKuVMmlKjqt4oBGJAE0WZiTRp620AIaVcsCxBkwYCAeHgSRtljGcfmyiiSFgQsdINASCwBB+kSkUjWOfjyABAFdcChQhKKgYPovWucm+ySHAOkIzSCpUPXhtdbUUBFVAQds4pBCRSqAIHRJIQWDwARJFhDoRGo+LgBcRoEkDrnARWoJ1zqTHVe4FE3gWkRBS50hpC5oCaHDjnHCEpJCRV0YhibdI4bQrbce68t9YHZkUYR2q6VU+yVKFCEZAQvO/t9jZ7A51G09OzrSyKtcmaDUQqR0MzU0NmtmPRAlh2umOBEDQUBN7hKA/IENi16zozqARr9TQAeQFX+MJZVMY754MIBu99bIwCsRyMUhonn0QB0cZYW4qI0VoRi0BhvTIkEiR4FCIWo3QSxzb4wDwa5YpIEIgoiaIAELwnpWLUWqnCegZQeuI33rajPN+SyKYRqFicLyHWDCQQjEFjjCIndovLMOxv3vPGu6+nTjdr0RQHGHz1U8//yR9elzJOYh3GDOK1IVLix8EFNkkUR8Y6rzV6FuTAHLJYiSj2HkGssA9BmDlIGpNGXmw3NCtry3f+yKsbt2QbvQvrgy6kOh9qGI/jTIeyqLdMrxhvXrn2pjesbD0zqEfQUPHOsCwt71tuvnjhgk7i0SggMnqImjBTM+M+rG1Gf/DR4tMP997zjuzOu1KjSkkiT2OVBWAHBKZWg6BCHoJTzIwYhLREwM4JAoTG+fOdAlxtto7WCZMJvLvTT5RhC4899tLsVHN3Z6cTtqBuZ5vZZx9bPf9df/zrf/ifU/fs4NTgA//fk1euPnW5W2euJVGUFyFKlCIlwiGErB47z6Ut4ijK89Ddyv7gLy/vnzl+8obip368/n3fsfIzv3zloUvw6NM7SVEknlvK/4fv+87//ctf6pfimc9d3I6UrkdRM0tGvTzykGXxb/zxmV/Zd//hgwE4Xzp8ABqLl+xl0cV0Ozm4ePL6Cy8oLjHSfjT+nf/7hwcOHSm3i/3to1/32m+0X/rQi8+fbk1Nre30UFSSZOu7g76LV44feOr5l5r1BpVOU/K5z3zicx/9bH1GjfsFWlzYN9vU7f/+Ez/4xJee/fCv/8/h1Z1+vRgzrBydH3lrd65HBuOMFg9PbV7fkXAti6KD+/YPd+yZs48bQ7V6orXpdgZMCRi2VkKYPn36zJlnPrP/Fa9TVozMixwoigbx/jIPanqRdD3Jzh/O+ie/6fUf/+rgTz/5xTuOzi63v+fgvts++5WHHnv6hT/87V98//tel6/2/+LPPp7N0igP07G+cu7K/3v/bzTzqNfrl6Wrz0TVUTA13e5sD41iDCAhBUscyle9/o7e9Z3F+cWydMcOHuiv9y3qw7fdGNxC3PBPfvaLw0eePP76twBN7zt+JJR50RvYTiEF8MhtuSsvPPPC/uXZUZH4oR373qFGq9jpZQvzx44uP/3gV9fXrvdHnftvuu3xJz9TT9ug6NrW9cP7D09NTY+ub87Nz4sUdmT7m7umntbT5pCK3BZZI0l0TCruj3JFbIU99nVks4zTqaTY9Rw4TrODy8vj3DpXzDV8qiBuZV8+tcs8SAEvr8LNN6YzU6PDhyjvMmOYXSBUgAl2+0UsSi/GP/TjP/BHv/2RqVb7J/79f/zIR3/3ma89FZw3Wj/ygtpYv+sQrG53O1vFeHeHWw2bpe7+u7c+end25mz62x/ceeg8dwZxe7bZaKfbWx1q1ub3zaX1+tWd3nOXvpZ3xuc5vEKphbmZa9c3XvfaO/tbgzvuONpf2xj38uWlqd3RKMtqWa3ZzFqDYhRAKdJIHEJpYpM0snEeaBBPL888+vTlM1fHr73r+I+89/7Ns5cO3LEYJZkb2bLfe82dd7fMn+x0erOLjfbMjGYbXDm2niAPMbSa7Y2NXjEeRFzctpi96TW3aRoLc7+TDwdlrZ7eePKmRx74giM/v7g42B3Wm01OCKxcu7wWBEwW1+P40MryuNff3ey0pvzV82f2rUS33XL8xRefv/GOo3bodre3BApK24zp+nZvWA6bKrnt5OJDn//MnTe9Ms47YTRotZO4rkItTYZTtQaTynpbHY4bXIAvByZS1pVkNCltUpIyIOpx30ZkAg43Ll0CZaeTZKo5s9bpLS3N33H46F/+2e+OjH/sicfX1vq33Hx8avFgrR6V3o/GG0U5UkbFrdgOes1WDYmSDB1AbXnpUH3qa49+rToKggCwKFIgUM3EY4oAQCsCAKVQA3CwJpalqVZmstzaQZFbhjjVeZlbIKXNdt/99ed2vvhE766jjemVepIKWt+u9bvDeKOY2d3evXwl3HxnHZok3unWPCotboRYoI9EGEhzKJAxhBIEgwSAjH1e9p/LZo5gdIwhAemjaXBILbMEDoEQCAQ5WNJS2nFwniiKUw0QlQIioqLYgVPCr3zV1Ie+eF6orilWofE7f/7iL/7k3RTWPPd0xCYCrSm4ElFqdeVDHsqcdKxRh+BESRT5NCXLYyKrItAqFhCNHMQhGg5KaQzAKmZvWMfGByaKyk4fiFGBATSsgBVAUHFUjoEwIt3m0ooPiD6OEHE0N01T98GJk3Onr+q/++z1J58djsZCUeKDB2alNENAkBCqtm1cHeVbI0g1zSY0U48zpAiZQLSSwnrLXrSJBJzHUnyrljpUfjgKJCqKDCnyFGcJSp2ElZIpHUmgvBhJISHTLEqZyBY2hnhoeypJK4vVuBwIW2FVOItaCutH5TAACwetNLDyRUkmBhQAHjkHomoJoUbPDil2jKT1SBf1qbROhoe+rpT3YVCWgaICsChKX7KKjAiAC6TIaK1MNLD9UT5GptLZWEOskUCMpiyLU4TSslZGI7DjWHBsAwYxQIpCFJus1lSikij2JZlEM5eRVjoyBhhfZphCRR8A3BMXJuBoeblQfXITXvUaE4GIhOAVKQFNaGrA+1Dunuf7bp4+vGzSOEVwpB2jXu8NvvZS/4EzxfntbOCVCCAKVRwZoEpP4coAU+knEyrQpA5MJhidygQjE+0IECTsKTyVm4irvppKhhFmJJBKWsF/SKVV9zV7iJ8q6DWRa15ubasWY7IJTWSzl1nU8LLhaELp2UujTZaxCn/tUYYqpnQl/ED1i3sF9pW7aBK/QwECqlZYGLlafMKKR7EnklUK1suWo701CQBYKSYoYYLvxiprhlApZVVp24R5XUU/qNplCQIsAZRCQqrI8BOYt2DFuQBEDaiElfekkPNRJGgA1bic46wRqd7Iji2jQhLylgkpBAAUQu1zryNEBbMNdXghWp7NrCuLkQ5ShqHPIphrUKvhEIMSYEYbJEl0GpftFVKayXDhOBhbm1UFF7UMi93x1dPD3V4aIFKqyrGwIhIB5slbQBPhj4DBABqNRlOCfqZlZufSoIi2SHaKYCvZkP//pKL2fHPUT4vhaFR668d1k/R7/URncTNaONQe2VDmA2cwaSQN1RzkFuJocbqxb7Y92r6+trW7b2WRBn5ky43SHlq+eXPtYjH29Xo9FFu1GmVLmrHsj6xCdrtbM+1G4fUg9zQa9YaDNG34McZZ3RYlx0USU9kvu928lWiPRS0eNRtxbzTu7+aj4fbGzpXp9r7Obk8pKoeDrdVTzWaMrr553Y8GQx1JcyrTRiS3UpZ2RBDcwkLLNCNXy+aUSVO9NticTaPhTu3ZF6/P1qZfeuG0Ubw0G++bwbLo33Vopg4BRmFjp3f6aTn1WP++18HJ997xjrdsDofxv/zG1ltfvfTIQy+2s7WZaUx84xD5849f/chH8vHsXatuKK6gSMZl6fIibjamp+a21q5qJbXGgTEFEdddexHKHljXbra2dnrtxaX55dbqhZeMTqMkQSNJUsv7PQ6gAMlHAWb/4vPjz37+yz/zg7d+zzsPNObGw3xHtZN0JoHplVH3jv/4Y5//u898eJuWp5ozaWYgARZba2bbq+vjznWO4hTFY9EZjxMNuHPx4sN/lWVTRhk7zuumNoqRapkPJIzDwQhBmYgKa3XSIJBAhUYwGr3XCFHaSPfF41vuaBjatEN2AxdHWrivlVEtiNB/97ft/4Y33f/Cxemf/KW/f+nStamslrUjKxykLEe5UsqOhEuH3uXiE52BHwl5x9boODgXUSRaAWEATxqYyFpfDjGLmh/49Pk737Dx3h94k+0PjMEbv+vn9n/3fyuGpy9+9g+KFx9sIhy+/Zb+tmfQ083IGVWOWBOrOOoO/fOXh2cvd1oZvusbX9ezxTd+1+sf/PDfPv/QuZX9M9cvXV1YGCYLBycXRp4rd2HwITBjhb8H8NZp0opQkcRGK6WABZBYRCR47yNSSmulFIBSBMqg90FRxYkOlR8TCatvK4UIiEZH1gVCVUGkC+eN0cpEwQeDepKZBTGRiWJTOg8gAcQVZTXAUKSU0YzEwuPcaSKtiAVERGtdzQW0UZE2oJTjAIGVICgiUhTrKDaI5IMDgMjEpAkAAghzQJA4jpGUD2ytQ5CgRGklHIQlNpqFXeWV5aB1REo57xVQ8FJNMwgVABkdGaWctf8/qv472rLrKvOG55xrrZ1OvDlUrlKWSrIsOdsEY2PjQDSYtptgDDTpBRp4oYFuQn/ENqFxE5o2uRuM/YIB2+CILctBWSrlKpUq1833nrzTCnN+f5xbMj1GjVM1Rt17ztr7jL3XXs96nt+DigAIgIOAJiWgIh0Bs4gQggcBgMABSREo7900Ch44gAtExBIUiiYlwEoCAkeKEKcwIxVFhll8FHvrWRBNVFZFmqTTGUORQhBSJk5j8NZJsN4HFq01T3cQWBCVcABAhUCAOjZBqzywR1GRVgrrqmbPSWJ0FBVFPhyOR3lRB6vZ1FVtNYCTYMnoyOimd2CAk8SEuhrVoV+E0nqvdB0EPWiYVpmoltbzzUYrpshQpIytuQSxoCovbNTYsmUWQ8JuysYLQBwYUYhQRFxwwsKkau9ZOLBY78FDEFaI1lqtlAiPy1IpEuEQxGg0sdFa2cCoMMkSdiwIRMoYZLzWcwBw4MVzY+/ibmpiKEYjaEUmSQQxlGWonOXYg6pcNSr63TkYFfnMgVTPphvDvef/Ze+Rz+5amyYc16V13s8vNKrCTQorICBELHXlOEgcx+LYO08AxBoQkkhbZ2cayW23Hkfg9urMs09f2twY7k7q8e7k9d9y6+GvWDw3Wh9vbVLWaOiIHMaNmeCq0o04NTXKyk0z9amd7d54GKq4GTW0Kdr4mn/35j/8r38WUwQQiILu6LyyBTN4SiKT141Hz/KzZ+3186OvfbF62auyRpu7CxACeIBqbxDFoAC8h+DAJKCMsxVzAc7Dk6d2iIxjKp2IQhOlzUSdu3Bpdf6YuLAz3FpYmFWhyPcKhTSsyNDck+fyb/yqX/vZ7379Fz7SH1QL65YOveSY7OknTp0pKk+kyYBSOkqUIgIESqjZSGPtQePIwk/+xhPv+Ynr737J9spJ/NP3ffW7f/BfH3y+HHmIO8ZK/CcfuP+RC0VpjUIRkVFetxvxUrd9abRx8jr1bd919y//+pXf+e1Lv/aLr1V01Xna3Cz6owBQPH32sfXh1V/4pf/4X37zN0njq++4+eVf8crxpLSkVRa/9bu/Y/Zg972/89sT65ZWu7uDWil6yd1HHv3cv/xW/5KaO/aOb/uWpaOtd73z3WdPnz58JHGxqbzEQl//Pf/+697w9tmEX57O9a/6nm5/6fQXgsO8tsO1PR2jBPEoW9t9J0bzPE7K5588TVEsngvrQLCS0Gk3rBfrgjIkkKjswG//7N+971Mv5ygoAOU9+5BPMjPzEkZbJZhWgXY3P/Pwn9388t+942WvnQy2/vA3f3P5i/9y8CUv24kmf/6r73nNV3z3lz53ypsLW9vFuF85DbGDcQn5uGgmOLSsov1HXhfq5lzLk2Og0aAMtcp3BguzjfV8w+Hk8MKNW5cv5GNVD+SGk7dffurhyxfPdFeWZmdC0bvUP/PAbNOcvfjk7PKx1RuP6V6SehpffuQ1b3nDpWefy6J2++jcxbMPv+N73v2xv/2DtNM999yTKppdWb55NZs/t7bRivQ472WtmcXGoWHuhnk/NStbF/ZabXJukqY6y2JvByoxzajrghWuk0SRoEYMokMdTJzkg56vZGn54HBSlqPh4aNLu1t7w/4eIY+Go1bWstjYHjOk0ekNemYbZ5r6K07i9UcRktAbTEKoQwyLx5bHkwIDHrvpZB0+uLW1ubHXnvCCCM90zWhYbF6If/B73/fL39I8cEszmjWH7p53dhw1rOgyqtUrD7Zf8vLVz98XfvZ3n7u0a7XxjVhPyvqLjz5+250vuuuuu++55+Neh2MnVh4/d6VtdBxHDHDrrQfyXi9JY2ft2PtGt5XGSWAUYqVUUVhSJZFolQDiaDROIhiVVU/CZ+67kkh2vBPv7T539xtOGqOTOPYCPoqOnjhy9PBc/7lgq3roh41WNpkU7baZW2qtne8lw3Go6r2tS9/7ja95yytvu7h9deXQvC+9iVBnWa/0X3jqQa9AXDwa9APa3qQv0PC+rpzVDTWsh02a1coMxs4GPPvk83ZcRpnypTM+Gg7qS+fXlxfmjIEDB5PJUxfbRmZXV0+fXU+2dieb+ROf/ouvesnxm06uBMTn13oUE+vG1379neeff77dnSGlnZ20mw3UMh5VUCOzTMpJlyExcZo2AtBgdygAs91sUJYhosUjx3tbl049+9nlo6sQ+XFvfNcdJ0ajcOlc//h1y05cNRl5lrnlufHudpFPuAI7KTnUuc1vuOEWu7Y22tq6tiaE6dMGijhmhdPKpmlOVoNIIIhmstvvOHnzseu4yP/x4x+//vYbDq4cKIaD3d38+Ssb/b2+Y4eBBqVe38x1UiNww5gsHTUaac2wEIWHH724sniow6i6rQBaHEMAIR0CAQfCUhkCpUygYAF1Q6gNdrK7l680NlK9JCS2KohUpHxdbAJGkWkzsLOlD6I0kUo9WVGxMAF7pU0UI6MWCb3hZFhP7rozOXWWNndtwPi+pyZ//L8vfcdbDiXJRBQkmeYQlNEiAYkVICmDgkpDFBkUEE8IgYCFLIAogxBIudjWuLVjzp+fVN4eu37h2Im5KAHjHYJXyqhWo8q9Mmi0RtASiMCRoap2EozSLVFOuBaUuNnw+cRVtZBrzuiXLKmVpbm//cfhP30mt06hyP5+GyDBfic4S6gByyCTICX7QcCOgbkk1iFo4GDZs0dhH3x3pdHuNHRivJhSxsIy05mJVBycL2pi8SA1S6VFJISFZuJszU7VVsRDM8tEorz0qVJCTkUGSUDIMRd1gaW3zilwpFEgKmpPATVlgcVP9xlBOe8KrdiHhNAoHUVZEbnZI8txholuJhYy78G6iU16JepaRpNBhFGapIKoCAi5ysesS4aQRRGg1nGslYqUirQjIfZlhNLJYm0SHXzQ3ipoNKPKgdvtO3SRwoZJ2RMittpZZ3ZGa8kiPekXlrg914FrWpHsh6KueTP2tY9p4bvAC9mvaZM6vlAubxTq1Kg7OvDGk7Ovvr2r05Fqgkdm0iptRRgfs62VA8uH5tb++rN7Z0dJzolnUCxq6hLHfQ2GZVp6Mg2MTR06POUD7WfNpo4aASQSuFZaP4XkTIHVU0FpiuJ5oUlsP9QAgkAvuHymf+8H7F4w+7xgIZoCkfDLJwJR9gveXpCI8AVJSUSIr8lGggTIU/jSVKmRayf2BYFqn0M07UdDnIbDAEHUvt70wlcBIIjTldI1xjhMD3T6r32VbB9YBGEaAbtGRJJro5Jrf+CFoAfv05BIqenaTYR8AFAKgBBIWDSIBlHMAKB9iCXESIhGZwlUloJvJhxrW+V+KSXLAIG9DewpiARAF6Coap3GTmMwkLUUKh7ZMot0u6lW5mRYjhuKWkmkASKFsVaDiR8VblDVzWXI5qVWtsoriplSGhW1MQAAgwGubZrSakYEAUIQmlKsAJFQRGmlCCWACOiIIkRWQMydZmISncQmWFzptjZ7lbzgH/u3UlFvZ6i0Xjl6JNS2NdtWUQQwyKLU15OiqErQup1q3+idvzibLZQjPnJ0pQzbw81tUnLopoNPne3PCG31dyXBqxdPzzeaJkkX5pdmbAhhMjcXXbg8Ho3rLEnmDx5utLPi6t5ebzLmSZQtLRy+bnNtrXPoRJBG78KmohGHSdaOCg47vd0jRxaDlgQwymKIyyxu7fRs3ImWV2dGO8PRsD8ZWpxd2r4yWlqcrT2r5uKYqfJBKbO3ly+2VciHbmCPr2azzcQp790gnWvW6WRj7+rmxTyu3GtepL/vHbdpHLpKHT6QFnWtsJUX3eeecbfffPt6/+Ev/tmpd3zdba+7q11NHrTDc91OmJ01M+200Uj31s8dv63xwyff/JU/8Hh7phHARUnkPatINaPGqDdGj1Cq47e94vFTH0mNKsrx2I5ve/lbepf3utRLm9Tfuexsrm1MlCpqRFFj4nsybShEVjpij0Nz4Cf+cutX/qr33V9//MhMuPnkXDKz/Pt/8uCH739Q2dkkWUk7tDs6O3OgxY7B11WvZvYQ+7e+9bWPPfxY5eDG1738lXfc9rd/9McSYanC0uKBy5e2v+bNP1JtXv3Xf/2w5aKz0JmZaQXnR/lIaQUEyAolQpQ8r1KlIai90fYPvKNBjSvFZFLUWTNZrauJhJH4XBi8tb2tR+rx4MaD83/zeysf/Ej93v99RdEs2+CdNyBGgfiAHphdw3QVxRaGHCqttCKl4kQkCAclGoRB7TsLJWBpMe5c/19//wsn7/zh607aOH1+5/I97evuSJuL7X//88o+WTzw3gv3ffYdr5v50H3FpUl93fWHzp3esax8bcikjXbix+Pcy0fvfSJOMp+dXl1djLUyzY6O9Mx8Z5BfA3ftB2RRaZIAgKCVRmEgrZQGlkgnzSRVyN660joiUloTgkYi/eUmqdpaEWYBIGWU9n7awyiOQ2BGxMiYSEUIrJUpbS3BB+BUawIAwtp7BDFGE0UaQSkdBe+9CyFMd40EGFArJAXkgpsWowlAEK9JK60Ypn4ThSEIkiJw3jNzEDHTOnWlYqOtBWIIMp3kgMWTIhUZJK3BIXqx1mgNIM55JAKY5nID0rSXQRtlLDtSGqdtn4QoFGsjKsA1WpxzVogIIY0aJCzCwiwIPogPLIRaJ8K+slYpQqTIGM/eeu/BN3RKSERqyk4CVIrIBhcbE8cJe09CsSFlqKrtaFQqgVbWAEBNWkDQKARyzjGz86Gs6gB8zdKqpieUFIuws16moTj21gVXV857XxACK4QQfBwbTcbXtsiL/qRgDUrrzc3dcTNtNDP2wlo1s9QobVBh7ge9vDca9PKJE0njeCbLGqk+sNiIjUahSBhDiAG0d83IYCtyibaBfYA6SOzEAXEIPjCo6QyvnGetjQhXlRUgJGRBZ22sVVHZSEEIITjWhkykhZQxygchhNoG1KS0qmqXMCRJzALMVHnvvUea9igI22t9H62QpVTaygOaZuoUOl9FRpOxSMQQi8nK2raWZg5kLfCcTwpl5PKp7S98ZG8ybGBAqVySKhCqHDOzBGllUUyaEGupKaEkVcBYlxLHRiuylluN2PsIQM6euZw29F0nj3z7D7/VxPHQ9S488cgtrzt2av0Z1UnT+U5ZuyrPNTZU1KEoKqqiKn1R+PLCqHepbs8vGZKrV4c1hWrGqOWjlKSRlyh4AI5j0oiILEZXE5u1IqWx5vhK1frtj07Sz+a3H4Zv+Or0putdd56YbBmBIiADUZPyCeuEgNDWQAo++SWCOD2+vHh5q2diTYpQqW67NdNtl4XLud4ZjCdCaaOBSKDRhRBE1mv3X//6S2k6n80fnWuubG5f3Ni62p2Joko7lrxwaRYnsUGBAJyYyAdmBJe7yMSlWv77z7qXvvrW0drTre6lX/3p4z/5y88/ci4a1uE1r73t0188s7ETqsCxAedYAArrrqzvCtvx6Nxbv+GG7cuHPv/P5ju++5/e/gZz24tv+Nn3fG6Sz3Ra2jlXjDY/9JH3v+JVX/Mn/+tvbppbLcvtSWFbCyvegi/sbUdvOLK69LmHnkxn4qKqx4be9k1vGT+31VlKHn3k/m/9mz+emekOezUD5Nb3N8ezq90f/Kkffssbv9FOLNcSL7S/9f/99ru3rmz+p0ee2NwoCq2Mihs6H+dtE2XRgWN3vzlS5tGP//mhG7OJ8yZWWijLjK9EexBX/c5v/Oa9n/rMw888Y6KZIZQPf/rel7z2Ns97ddFQKqW2THb+ZXYVPFxftvU3//qPXPmV3200rvzGH/wFuvOLy0hm58KjFyJXlNujp3qH3vfn//gN376ctZLZbhJRkDqMNHVWO47dHoUTtx78zH3nAaAsbJRqlZrASGmixEsiD99//9bORkLd9Y3R6qGDS8dWdy+uiZ4Ayt5uccOt1zdS/Kv3/eXLX9Na2LtiIq2zJqTzrPdWD0T3PXY+wSOttHPDTXdubA+z1tI9X/jcqKrG+RaHWM8c6hxdneRw6dL6YDxqry7OdhbK7QmDpGl6/fINj/XvQ6Ob7fZoa/f8lXMHDx233rWTpvd6Uhe2xiRK2JhUZ9V4VINrNBNCDUzgFHOqdaPZDUAhriZ1cAxlHPmjBxId08aWMNFmnd5zFp/eqFdn48MHsyMHGjSB3uOeoKU7nZe/7OZmR0JV/9Uf/T+mM4cmGowmUZIdOXzTk/f/68Kx2dlbjWWPqUSQctlAnkcbSpNHeuOrXtl693P6N/5PIZBVtmQB0eqp555b39qQSemVnDmzwR6ypfbq0sza2S1TBAOiU7J57gIuNpoiyByKcsKimu2miZOqLJI4qctR0e+HCCJN43FlBZqt9LPPPP8Tv/L9rcU5tzua5Jey+TlK6NSph6DbNfGgqks7KfPJpD2TFXW5vrbTns1mFhsxund//bcdPzq3NSoB1O7aEJCapONGo7e2mTVMUdaNtLG9sV37vNWdTbpHH3nuX9txZ/XAsc2tzXJST4ZjiHB59ZD1RVN4c6NMbjoG3d2LO1dvefntm+eu+iI8/cSFna2dMs8Fahnnuasbc924lT63N+juzEZaJpN6Lu56bwd7OzPNdGN7J4wLFmrFM8RRp9mdlHltrSJaWew+d6aIk8QLt9tJa6WlAPqF7Q/qWPfqXn802DWdpZWVhbIxDBr3Lq2hZlc2Z48t9jYmk3456u8FXwkhKkgbePLuY26CZx57on1guXNoZX8uYAEU75wipQARp2BEDNbHSQSoQtZ89Te+48jy0sP33vPsEw/5Ojz3zMWnT52b6XRn2iuurMX50lYgaCKc2ADOAvCQFPfrZqNszbYWD83dd+786lPp1xw8EUzqVATagMSCnuKIqaFxXIeKCIC1ihLhOHBG0YmF5ataIXDJaNA0BXTgCZkoBB1Ai6QUAykQCIaIoiAegmOCAEYAoRhNWPnGbGNrw7U6M2kkhiqK07owf/vpweaIv+dbZw+0XV4VKJzEUVnUzIolZK0kVN6kSMLIur9nJnmI0njxQJe9jAsY9M3GRut9f/fE01ckz8kFnJnbPbI8Wm5Xt16fHD3WnetSMR4eO9E4fMMSV6OyHJsIXKjUtANF+eAUCTGKrwUVxtEcdYtyMqidJ5x0m/mbX7vw0GNbV3YYFEZGAwZEIqSpaZ2nZShEAjDx1HccKxWVoUGqFakmmWacVOy0D1qbWIOvCwA108li1UCIUGMQqfNR8D6KZLbbGI+GApQj1IwysR5U1uz2qypKqJhMUqROs5VRK4pkVIwq5VQDtEUwOuvMDPd64MQhVXlgDAHYeycCghSnsQ0SQlAax3U+19CNGbU4D512c5yjNto4rZLUeoXaR4mbw0Zwajgpkqw1qWrvcva22WpFadJsd6K0EUT29ga1C0msDVCKDdKxCyCkmfMpE8HqoEgOLHccsxNIHAbR85323PxyHlAp9r5MmmmUxHXtr22fAU4Fh6k4hPsBL/i/XnBfUSLFNiCSACGoBMLLVvD7Xrt4cLWRzigbz1kVgY5RGaGYvaHUxlHv9ju2PKV/99D40XUMPkIEBAZgQtrPh9GUNYT7oB4URD/tUxemFwSsqar0QksYXFvuC+yjr0GQ+Vo8Dffr22Afv/1CxuyaQgbTyjHEF6xT14JuL1CmZf/5cR9rNH3ZJzlPv+ZpYmuaftvPQe1H3Hhf0dlHur5wMgGuZeX2A1NTK9RU5IJ9BDYKT6FDuH/EiMT73WSMAGpqApueGH6BxzRNBwIwv1Ajtx/LmgoqLCC437i2rzJN21UQGCAAYEi1pIqM+AxBE2ulQDBWxoCEgMBMscRaYh0aiY/nSBnWWsWKgldJFNeVZ1BMqp97p6LLO1XfggaPWomJJjWO+vXmoJ54LHOqLgVtlHUcOFROrICOw+pSFzSZlAOkta2Nl0SnURSfPjV56FRY30sYdawVBBsZDEFVNgATkuYw9YnSdKUphAEFESmKel50QcNNNyzK3byelD4oYsZrYYNrUpF3bCcTwc3u4mG2tigkUs28LgOauhirVkfr+NDRIzzYMxQdPXAgg3S52RrXdlLI3g505xdHg15zcYZd2U6zRpyxs1ub59sNqkN0/kI/UZ2luaW98aSfR4OJiXnxwNIqxnEaZ89f2K2DmlUt6yfd2Xpn0F89kOyuj4eFX15um6Q9HI60552NteXlQ+Rnr14+M390fnw5N5wkje6RlVZva1fq2Je6KEbbox7XQQk6BB+5V77meDUecl7FzrrxKO7ELq+unOlBPnzzdfHBeX14voGyvbf3ZNakLPJVQE/1zGoS27ntx3rDGsqqNRwk1y9W29tPdTScftYnM6sjm37kw+eOLuzMZrBy29L7PnyBg6vKcfB+OPJKqebMrDGHzawf7Z2Ryj1//gGdZCz1/KEDYS8abpzyObioO9oZNVDml1dHo0ld52DBOTBJBiAsHLwXDKzACQPpMS+/54NjBWJhg+CKgSROupL4nLbBDiu22M/Rg8nat9998s3//s293f7nP/yRlQMzz14cnX3s8erc81/5zT/4/g98AKp1d2Wj2Yo++E9/mELcXD7o+4NG9+h4/ExRjKMs1pRYX3MISdoCkET78WC8NDsTYomj7eEeRKn1Foa9HYbaMleFr2uPuj0eV3WlS972xcZLjix//Usbn30OehWbRDsX6qLWikShjto2cCQFKGKnlFDgAMAcPFEipDWR44J9iEwS2DM7KwJD++3veNvtt81+/VerkydMvXVl+ZYb1ML1krSar333rV+5d+Plre+6Ot68slazPXbsNR/50AOPXIz/5uPrO/1uN9ZZt2ORti/3nnr6H7/ldbfOHTfnn9u+8cgBasRbl/b30BRhAKhtbbRSiozWJKBVhIhTLJkmJAlGkRAZxCmlbBpENtrwVPkXdOLyokTEJEkIFABMo9QiwMIKlXVBKyfM1lrvvVKqlWaxUoCqqiwGQZqaOxGIgjAphRwwcBwZH1i8OO8FoXLeaA2I1ntNIIABREIwSRKniQYCxyzC3pMWxKAAtDJprFECgIoiHYJA4BAYQbRWANP2BTFaETCwgn3qDVgfEMA5BwiIoFERKu+c0kQoREo4hMBEBCQiQkps8OwZFSlSBAI8bZOnAFxWdeW9IgRRNpSkQMcqNloT1JM80YSohAkAhQOQioxCTaSUICKYNInZ+yhrRIoIgzHEnpLYuBBAm+ADiHgXvDhtFHOw3rrgg3jHTAhhf3OBXPDgib0LHBAgr6qplIUQhAJ773zQUWRdCIiRkihG9jLcq7gm1kUzjkvEAkSDgDEsIY6idquptU5nG3MRdhdXWFMEIYtUBBwrlUTakGJvQ+3IYBwngVmcddaZSBORdXVqokibPPdaYZpGBEiAznrvGRW2m7EIB+8jRUmnpRBCSKu6tiwKkZl1EjMLCFgfYkPOmcoFDqAasTFaEZY22OBKV4v3DKgUKeHo2mzdWtSV+LLnwJNBXdcBlTU6zUwsohGyKq8866CQCSoobGpbycL6c+THjRbGWipjgp242YWZonaddjrYK4IDzJQC1EZ7FxARFKSZnhZiBA69YSXeRbGuvb+yWT3xRx95zauPfcXb7th2F5fvjvekZ1IwiSYIyo0DQPACToswmSiOqNnh9TNbl86XeQ86mfYMTPLaV7zcbvtEp0rG1tm7vuKmb/sP3/a5j37suYcu7OUlNdTBpbnL64PhOFRliWQKNl98Jpw6j7esyEtvpxuPqYUVgFBKBLrFIlDVPk5AIQxGzcdPq+HQUTQED6JgMJy4dmtx/qatzW2TctKKkyzdGwZFQYK0Gq1+f6wjBFS9kYsCYH31vX/+P69eOfP9P/ldHWNASCwszs/klS0dZ8241W64CZe5RZTJsDapDkp/5v4rf/AHxfd97/G+e371JbN/8L9e+Z3vuveJS/rz910pS80SEs0qVrXFwExO8spPaviZb73r0uOf/vo3rr7mrrt//dfu6ecrcedNAz49rT6BwKDUg49cCHrje7/3nfFk+NQjTy4tLYWsI6h9qJeOtECTIGLgdhQvLHQf/PA9W8/v3vnKG9Yu72ZZRxwHdkkSRY3k6Fz77e/8jte+6o2+RB1FgqK00sq17eToTPMpKedWDuWjclIXRYAbDp381rf/3Mc/fX+/uHzHV9y+vXnJlTV715ppaq3zcqza0ff8wPfTjP6hH//B//Jrf/HEs59wdfobv/HhPzt5c7o0J83M+34rWov0v0zOnu1c/x+HtDKBuW/6dy/52/e/79u/43v+22+vRdedPLJ4HofnmhPevTwa1s/+1C/82IGjR125IxzKwPOznTMX9+Y7bUawKn7pq1/zvj+5FwBmZjv93iTNiIR8zZZ99+DyhSceNG724J23LB6+sbW0tHn2yefu+8Ro+8riypFDt7x4e1iON/a+8uvekTVg88Jj2Xy3o29uWNd7/MyjZtc7vX31ikkaZ9euRvMHjs2/uNwdN+/4agmTzecuQ1T7emvrzFWpiiRuFJO6nmwfme9q9N1O+pF7/vTI4pGyqLTJWt2OJxj2RkFChNhodytbIegkyazLk6hhZTwe7mVphippUgYtjLUJ7JJIV1qTlmZXDyelIGoSa+s0hppZe+oNeXdPzm/BLVYu70DD8A1Hl55+fM+09i781c/ZyQQC9iZ7N992nDmtRuMrV/t3X1++92desnBi3dCY4ozznRiaKCrYgmLyTR46ykz5qlevHPjUjsTt3hAHowJA1ZUbDUZG0/zs/OblXtJSV3fLSd2ba6SPPX318LHZwzNznSgpK7+3NZ6b7TAwkWp3ms66YjgajHIivTjbSBOjKCQxBF8kMY7relLaH/2P7//rv/iFarhFMEq7vHtpfX2reP7i0Fc+SK6SGICR/eEjx7SOLp1fM0m9MpfcfcfJ/ng8LvNuu6FTs707rvt5vdHLx+Voi3VmfKhyoOPHb7jnU5++7XYthRmUTGv90XjYSFLUnCS4fvl5QeaAt911q26GL372kdKHqNGsJ64dR1evDnSk0mayOxi+7hVf/Q1f/1U/9Yv/tUTaWB/dctPN1x89muxtxw0a5v2Nq7vLi00ANdtuQhL3djbbWTsfjrPmXDWZtDtYbl20pdsIo7jZbCfcbUJ/be/6219USFJdvVyXTqkWerOxNa5q7sy2j5443GpF3lWbF9fBVljZop4Md3a6jUgUbFxZP3rLnX5Gc2YWuvLwI2encwF7rzQJUAh+nwmIBKijNItTKosaHT/z4GPrSfrsE49M+kOt7KAeoYJ+r+/qi1oLs1MKWUJpg7CogEaRtQ44FAhKDbfWuK3SrUH6/FM7B27GOOoCIIaaVPBlnTQzoEaEaB0H5yAmVF5TjA47c6/0boviyAcSNkqht3b6iODrnIUUmJjYe+9saXQEsUBCrg4+Z6JOo92s6snGWvj8Z0aPPVNMuI2Ariw4QF3DPY/11raufsdbrl9qQhQlQHpzj8+c6/ngV1elm8VLCwbRnT6Xf/r+3c11nyXJTddhqtTQ4qkzO/2e2BDXYlAhAfT3ZNirTwl85qGCw7Db1HEUbjhhfvC7XtxWYXFhMekSwaDKe6RD2oAgQZjRcZwaQSnLPgSrFWW6DZiWJck4jiSOEs2oENFax4GnVbuKEACZBYDD/kOkWJaqDhPhntOJ1rHFBOn6+e4uJ12dEgmJNBI9tTzvDfrMKOxLX0Zxamsf67gQXzFbIA5Us2JHLMY5H1SccxRJwpYNOCKMYpjvzleDfl1XKpbUqElRUxqCIqgpeBPpmGsfXABxzeXFtMG+mCQq9UmSpWjH3nNuTJrXVYSZd87XNiOKmiaZW1jf6LfT2CFOSh9H1I2SVqIWF5rjsrZ5kZelLx2TQhUnmg7MtEKoRLf2ai9sFOsCTVUVCBJpxY5jpdrNZtRoHlhZiNLWYG1rbzjIsshoFaoakL+sE8ELeSyAfZvJtU73fc4zTQERIDiNJgUB4Go2Dl/9opX5A051pEYd2KDKFClEItKiYkZteSTNZH55eMsBvNx3e4WCIMyABAG8oILAuA9MFkEWAkQML7TESwAApP2q+/0KtH0wtOy7i6ZAnf2c2FQSgqlqgv9G7NqnHQESwrV+senbAAJNi8j2d1qvBdX2X/dZQ1O+0FR4mVYQ78tsU4MPC4R9EYkEX7BrXTvNL6hdUwvUVMdhQCJQyEAgqID2KQzASMDC/GVRbP9jp8cZ9tUdmj7rh8CAgizTmOa+sUj2U244Hdg0rjbFfIJMe8yQiDzH5BVAw0CEmFrXSnQziSNg5yUiwERjcBEJiEUd0hTSBKJYGwXtmajVVkSiNLAnhYG0mUxq58tFnQ4nVhB75+wkqL3dXLTJ61BMvHPkPDHgds6BAwogIAkTsUL5/IO9MxcII6yc04o6rbiwXnzYWteDXmRICUlEtTa1MRRECQJ7w8CMkETaOSbvAxmP4H0gEE6gCuxyhhEgUuGhZmGc9lX/31KRFdFAyCqKIjDgC87iZmnrbrNjW4moxnBg67ISayEpm6rcu7K5dGjh2PzS7vbw/OX187tlHDeWm+bA/OLG2k7USVKomrqZc5XFjXp7WJX5pSu7w2BvvemWSGXINOyNG2TShLoz3dzXvd3n63ygpGy2m6Oh3etNVKS5qHSrrRupRj2/vCKS5GU9P2fqcvOGE8uTcd4f9gLmgPbE4XZv0G80RE8KQC9gSPHiStTrXYm82bw6bh7utLum1aRyZ3NlLr3lYCtN3UxXA9pOMnvp8mTYiYIND42qtR246yZz+erOA0/0rjy9/hVfGR86mCoYHL3lRNVz0McDt8+Nty695m6VNtJmdtM7fvyJp7bns1ailc+yxmA4BE+uKkf987e85MXD9cc5cJyawze89PF7P0685sflDnDl64MvOXDT0vGzX/qYjihOlXXYbGdSl86xZ0EVhRCABKeka2LPQrFCZQwQYSKCTtUYKiRDwjoGYBhX8Q/9zM9/y+tvISxO+Wejtv6xX/jpn/uhn9u5cvn02uVo5twv/fzf/eHvfOt4dMFYPxcnk3xiMVlaau5tPs4wFmCiOARvKyZBZGZVJWry1S+9+dGzGzMZH72+MZrsdmJTOeeLMB6VHKXBIztjbbClEauLCrd3692NzWEvdZNUaeMdE6IiUQatrRSCBnBVSQoV4VTgBCGFikiLTEH5XqEJ7EQ8CIt4yyZU/OCzlx8/m3fC5GtfsXzwyJOv+LqvPXTr/NwiOufieWuHV297ZWc0yF35xNvegf+uFf/CT9361/9n730fHw1z3Yozw7LQjFfnF6IQGqjPnN178L5eHfZvVt4H1CqNY6UIScXGSAgiTAACQhAMmVhHACFNkziKWHDibRzHyAIcDOmp0lRWheeglRIR65wABOsAxfvAIKIEkcZjqxCISAlEpGIySGhrG7wziog0MwsCKlKkp5pLCPLCbVKjAmBDJMF7YS9iWaIo1grSOI4TowE1ajRYu5pA4siEQMCAIs46QUQMSmnrrA9BKQIknsaX6lprI4yEREpZWwsDIyOSs1ZITVFKQVCTQgDnLCIF4Km+CTy1ZANphcImMgyiiLyzIBREggRCAODI6GlMOQhEcWziKIuNlhBIVbaSOgQAItHKKFJJrBWA916A49REGigyCBhCKKrSjVxZBdQGEK21wlBPO+4FnKsDs1IQR3oKlmYWYPbeR1GMiGVdWmtD8IoQkaLYZHFMhMzsWRjE2hB1iFAZhYpC7WxsKE2zRpZ0240oSTRppSBNkwSRNJF3qFQzS5JIBQYTa7BO2IuQV9qSccELAyNYz2WoFIMiSWNtOZAxqNVkUk985YN4DnlVJca0E7PQTvUUOB9sEsWK4yiiThYHa3WkiyoZTypjKNLaMRtSvrZRmpEC57kofV2HpJUoAuZgIRmOqzzTosA5AREI4RrVGsqqUImZW0htSWiVAVW7celTYCGMKq6qyUTr1EooZDwcjU9cf/xTf3368sM2znWL6kNN/skf+dpLZ7c+/vjOA09upgRJbACYQ5iULjKq0cqq2lvrgMAkOpQuiU3lBI0eVawjml/qDnrl1sYIAYDt3tinbW0SZSuPgqSMpoyoSdBwoSQAzS7ZC/f+n9N+b3amk2VKjh3LMgPf/opXnDl3dudqXzUzPWPObuXv+sW/OtA4qMONX/XWm4xZ2zp95vb5xS/ef9EE8hg6mXhNRR2e2tCPb0gzwoWZcN0iHTuiDxyE4JyJTBLReKQ+8Bl3dTtqNaCo69nZZlkHQtKqtbe7U9cTx4FMtL12tdtoBTsBg3lRg0IGJoQ4MzqSjeHw/qeeuP36o+Nh1ZyPk9goAAiBUXU7x3rbO502aqMyk0hg67xSyhgUST/6+Z1XvfTIdcdwwBuzB+3v/7ejv/Pfr3zmyRFi2mwnw1EhTlDAKNXMIgAa9mT9mcF60+nWpeVb9I/+TNR26pf+9HeUCbEVqF2wrOPIxOrgalvyjUEFS7ecjEbbG8+tR53m7saVyq71ylGU6YWl7s7GeLYRN2bmvutnvvMf/uGfnj+3cfvJ5SefupKmEWk96OXNwwe/6evfWjkO3ivUgCgQ6npS1Ovv/K5veuTCM6OiqvMSxS7Ndr/pDW++8ODf86UnVGQhybgsYwTHPB6MSLDZVSurrfs/c09sHv5gv37i1FkLnHQOjUv+xw898K7v/Wox5010wdYff+6za0sd2O797vLdP1oMqB2iJz74p0uvH/7iT//4eNx67OEzZ5946uBCJ21qTtzu1pd0XHFhy7pOU7PQOZTnO2UJg8mku9DoXV7bnwtCiGMz3NopJ9XSdTc22lk1Mp2lg/MrxzpHFlEiO+49/OmPPPu5Tx06sRhnM0dOvGnx+jRMLm6ffwSkeOLhRxcPH1s48UY7ZMvN8dZao9nMFLvcPPi5B9/27m/tbZQz3bmF4ye+9OnP1IwJ4/knzsRgWrPdovSudEJue6t37ur55ZWZxfnO0oHZovBTxsqBg9epoC5cOld7W/f7kYqjOMnLAljy8aZCSZvNbnf+0tUrcdQO7EmLwWZ/pzq0eOzZc73F2U6nle32iiiKtgf28MH5je1BPrFNgxX40rp8TGeey6tgGg9d3dws5pezs1eutJpxkkVpJFeeebrsD37sP/1VZ3FxbvtTR5buaSzMjAZXTOyqyYSMpohC7CCBsg4mWdpegz//46eCPZS24/kOaxDHOObSsktNPBqOD59YmIwnzKzRxxEbhqyR7PZGO9vjLItvuWWRYj0ZFIi6P8xDcMPhTu2qmfkDFefKkK2KwWiwu7V5YmX+0s7IV/SFB7d/+Kf/5Fte/7ITx5pOTYbj8ez8Qm9rnFJKwM4WhLRxafzBv/nH+dbyr//K7/39Z//2uVB+3V556MiRysvm2hpFsrU77g0m84vtdpQMB6MbbjjmvC/z8f3PfHEn37y0czHrtFzlqsmAx5Yods7lo/GVc+tLqyutZufKc5dOPfCQ0kCEl69evfP2W4wLee1vu/u2j3/iU+3O/NHjN3z+3qdS3ci60XV3nZy9cfbXfu9Pl5dmZpZmoeLr57K5btRtJ1KXJsJuN/WhytqmEmcaDVv3SXxZ1XHW8goHtRte2Obh5My991e1ObTQ1s147sAMBHN5ezTJ/Wy7021nuzs74k3cbrbaWW+4W+WDZkab5y81O9l1t70yWzj2pfu/eHDpsADMzndeWCQH74NAHMcc2CgljKIUKFVVFYu3g92z999DEJIsIbDeWq3QBQ/Tioxp3TRCEIYpDk84CGtFqFBphAB1TY1jB6+Mxump4SjfPjAcQ6VIBWfzrW05c25bHFlbX3fd4aM3Hp0/pJJ5W/oxRqhwGCY7MSXaAbMWRuTSVWNSsdZNQVPmwywFVGXcUq4utVA9sllj1sDsE0+7T91zujOz8IbX/9T9D75na1xarLXRZVFHiaHY1CWeOd/4xT+4Ggsn7XZvMEYTFYVWxmiysXYax0kz3dorEdrBYmSiZzcK5wIbhZAAMxIAegQJU98FIgBUDgVMv9JSqcHT4aGffH9D2STSK8vd5RV3yzH82jcc0lCBqVRCARGUeGGIFA/V3m50zwPDnR499ZS9sLU5dhESagHnAxEFYSBgAe8CgBAhEGEQEZ4GkYwiQAnOFs7nSCDYt+PZ3A9IjrSjhXacthKl4qDUbDw7GubGmW6sA5cTq+rKCTIpaKWpMxgCj32uCKESlWqtsAoCnlutTCrHEHKbiwHrPWHtYy9YpwZQZ+Ox8xZ1kKKs0zg+tLKIGVLmoNusbSCl8yJHCJkOiuxSq1n3Cg2JOBesr4IuPWJq8n6+VxS5c0cWmmlsktTUlc3H1tW+KkskrRXZyUQ1m2U+TCJ//ODh2+cOP/TEw/3KFWU1tA7q0IkTE8dzM7PNRup86I3GxXZ/PMlrVxmDhsiVVdyMrhlc9nWUadfw1PCC/8bVgi9YUJiFAACtDyjKM95x0/EDB1d9NqwVmAgMsYoqEEZIkLQlAQ1KxXkvGdczx48cP7ZeFJO84CqIR2TAIMJEBPt5smmMiKbGfAEhfKGi5MuGG4JrKoww4lTVmrLGpirPNaFnXwma/jKy8NS6sy8W4Ze5Qyj7Usz0UPFatOyayiRwzaC0zwma0q15CtBmBJB9KxPQfh8Z4D4K/MtaEeD0GBn3mUtT8eeFCrhr5i4GQiSaRhQAYB++c03Pm344gcj+mRBAEYUwLf0FDlPBK0zdRKyEgUUFvlZ5zyyIKIIsgCGLpamxgdBAinxA9jOJzowmH5KIQmysdRh8RL5pQrcD3Vkk7dqzprvUAAgCHuoKjKgYCxvq4KvgVUbVnt/dsWfO0rnN5MK2Coy+BkHPiN4S7jOXPAsyg1JAiIEZGLyojT3Z2BNRolUknhGtoAAEROVdiI0k6Jfj6sbj3J7Ree12+/L8FVe61Hs2EJBr1AASIhOTNuwsBqsjMCTC5Fgh85RIC4pe+HL21wdHb7lVB1WVsnap3+2mzdb8bHdmq1cWpY+JXG3b7UaSZifMreefOt0Pa0tLi6fPPrdcZFubu6kyd544urlZhHLQiznt0MbO5UYjmemkV9a2Dh0+evsdNz764KmTLz5R9MutXm8n377l5huWZ5bLYbHT34EsvuHI0oXTZ2Za2bNnt1+5crwod5Ku0UnSEmkktNEvonZSR6nhOGsktxy86Z//+ZOzTUmiJL+yW8YUN7JWGpX1JNMR1KUyeUMnpj95w8lbDi8l//yJp0uVfuFcuTPodZumiXDzjZnfGmqBW66b3dzcWUiq2245eMvJpL929dKmjQzX42Kxpb71TYvdTrj+5va5x843mkb5aLA7KfLiqYcuLUewsKjq1sKPv+fC4xfmG822k6KywnUtAZOYxqOBAnP1zGO+EGWo2Nvyja7WobE822knly8932nPbj3zjL14btwbjUd1d36x1e4oZcrQAzJZszse58agiRgCBl8zMyIrJcx+argQNALkWdB6TVoCO7BC/KEP/NXC3PfNLWfx0uFXvfU7P/b/PdxbX3e2NI34qQf+6bH7/rnZ0N6LThvoydUF9NctsAQXiAGMs44EkywOAILVaLR143J848HuJ089mQpsXtVo/VXDZIjLwgfKy0LAc4BJUeUTdIG2JrDW08XErG1DXXkk8LWjCCEgW9ai2dWkdBRnAl5QwDEyoNJIigEl1EGAlEEkays1dTkGRq08B5u7OmqMpPPRhxp7n9g89M8ffdlN2a/88ttbyx1IB+2TB/3OOJ09HMZdMr2oHZp26+d+/q63f/fKm9/+wbpmUb5g+cDDz/7Fn/ze7l//1dbzl5594lLUTvanBAAQVHqfFSyBYd/CKNoonKo5wZJwrLRWCoBakYl1xMFZ54U5BOed1UoJqEhpQBXYSZhmhaeCNwozM5NCIAKEKFIEYl0tVgBQK+WCmxpGPTuF5MQ79i54ZkYkBIiNscHV1gXmKUhfgpg4VlqTMnGcpCZGQBS01k05wQQEnhFQgmdCjKLgOYQqBGYOgoIgzjtDCkkxi3MusAv7GTJxzgpjbCIvEpHR+/zdGvc5BgGIbG0RFWodQgCE4ILRBhElBBBQ05oK730IWqMIswhpApC0ETeyTABpug0w3TpgAYA0jpUyIKIBEEQJa62VSDOOJkWVlzbPXWCvNNY2cJAgokiFwByEBUhrJK1CEBEXAoCE4AkJFcY6FhaQoCOtI10UlUZKmqlWqqEiYCZNngOAcCpGIaDytavKwoA6tDSHOo6VaaYZIyWatFYRgEH03rPWVWmREBGCD8FrjZKmKSntQ+hN8ljrOI4dM7sgnoPzAqArBlGMFbI4UHlVCmFlvTGoNDRaCaGNNZlUVyWCBFRGGOvKJ3EcrMfAiUaSQAHEOy9gSBsIwYkBnE0VZrEo5aw3SVaH0FpouyBKq3xcAXPWSB3vB9Dilhn2a6PQMgZrQcWGYigkF4/KUkRZopGdIR8pXOp0ZN2sP+KiUs3G/LWvXnrTW4+fvH7l9x5+rDd2CwvzSez7gzGJNjoymamKGiNQHjuNrA6eArLl2rtaWBIAIyTYiDG04vW93Z1y48Dthzd2dyyjsh5VbC0jeZ3FKgAqZ5QEkEE+OvMPZ1s4r+a7O4PJ7IH2DTcfOvvMmZ/7zf8e0Gcz7bFTi6urr3vj17dmDt1w/I5f/Mmff/KLp++8fnbjfK+2lVivIZlP1GBsJ5VvN2JN2Le4PcStSXz6io6ewHZklzrRwpxqNM1uD85dRROhF7GlxaRQAZaWZoZF7XzdbrfysiLURWGzZkcnjXE+SMjEhnRkqsqOC9vwFCH90q//zMmjR05ct8yBALC2vhz74ze/9P1//Xc7V+ynHvrgH/3Or+tUXGUp0lVeJ2RAmSsD+v0/efTXfmYxSdxob/Po9Yu/+LMv2vnpx754BjzoEKZPgHztGSmkRjbGmy9628Jzj+/sXVnrLPrjR27d+tUzVrWKevR933RLnFT/+uzO5edyVfXXz7t+v/6VX/yFN770axY63ZMrR3M9/IM//Ouzz13SRIP1MeXOFuH3//jPnzq9/bKv+ob/8nO/dPnxzzWacZJFyoSl5exdP/C2y1cuzXYWuwvdwCTAGkLIZOnYkRNJp9iuci6C1TNzc8eO3f73H/i7lSzcdOvS81fWTj99tt2MA7uFI80oTjYv99KY/sO//45P//MnL1xcP3d5qz2T5ZWPVTwJc+/9nXvuOnrk9q9qP/7pP73+rsb1b3/9uccfmZu7xUJHdRtm9u6//PzsO1/7nqc++bnQ/IpRcfmO4/O9SYWdlgpVGLpMQUij0bhwAfvDsY6oCHLw5lsHa1t+Z296FVw5f3Vxac6EMBr38yvPdVdXjYGs27EqrBxeHG9vPHXvx3rnnmxlzZlup9rbvXTqiy9747eNBuf659crP8riBIGM8aatX/mmV376bx5Yv7oVz7Zf+aYfaXceGzx7abi5tqYaL1194/Kxu1zxIIV+ZzY7cccNp+975vB1h0ykr5w5t93LW625NJtNksSyZYLIxM5CbfPY0OLqimBdFFUIPq+HzbTlahcBi5RUKx1FrXZau7zV7bB34zzMdps15UuHD9VFoWPva88is512rERRMBHX1ndb8c6gTIJy3mwMQqrrw8eWd8eTRiOelBaiGGqf+JEw/9Vf/MbrvvJdb3v50sFjt1x96gPzKw2j5yRpiAT0TinRzUZj6cgn/vLM5x7NntxY6bbnxm40KUu20Gk3hv3SxGY0zENdOO9akd7Y7i0stVn8bJRuXN5MmnHWjBtpsr6+5mfb3U5HRViXec0hbbdTP6MlHg+HGoHA5EW9udF7zy//4vf82K/XCRls/us95++598IrXnL04EpjbqkZZKAx9sLW1kkco9bYbv7F377/53/yN1/9qtf/zYffFyX0//vN937ly1527PiJ8e6QQyGKSvad9lLZ24JgL59dn0xqV+ZjHB8/dqI1m/hJkU8mB+dX5lsrz1+6/OyFC63ZFkSZgmh+OaurkXXZzGJ7WBePnrrQSbLjhw9257oXLl+485YjC3Pdz338w9/xXd/cTV/3l//w6bVLT33tdd+21G3tjuq1yTaWNe/q2ajS2mjT7MJRllCMexQljfluhjpxFe9st2PVSIFb+tK227x8danVjOc6/XJoBmFwtbz+xIF2nB1cOnDnnS8tqq0PfehDEwZd8A2H2k1q7qyddQwzs3MYtVvHbl259UV7ef/mO15KQfbWr/C11EHWSMvcEqJzDhhioxFQJbFzwVeONAjXKiJvZTweO+8FJMnMjcdu3+5t9wZ7bC0QaZ5aWyQET0QuBI8qjgiNLko2pT79/NnXffvdRw8cPXzz9cI02RpdfuYh58tesfKFM3NXN0ejQkdfOE/h8RsOTt71tpN3vuEOkAnEKGmG6ABC0lCutFGz4cNEaUUaPfi0k2rvFMdErShKbG1OPb12/yPDJ57e3hk1dnoNwOoD//I+rxuoPBFPE/QgwN4HLx4NODUOoh0ETtCLBA6ILoDEJvhQWOEQATAzO/YhIJkIUJgDELIwgihSPrBWKBwAgUEIyfoQBFki4ahS4CZus/BmDT99f+/5reSH335gft4xVyZpRMbU3qbx3O5A/fJv3fvMJQU+TtKlka2yFrm6IFImUAjeKQieBRURopraLcRoLcjAgQMTCQEyAhJ6nvaQutG2HTsLN64cOTqnk9h6FQSiboZ1YZzK0tTWeGV7aK1f6KTdVqJJ50Fz7X1pY51KjSZOPGLSNN1GFHyJ6CdFWQRoRo1mawbIWcv98hJR1EgJwBhWtZtkSaq9Vi2ESHnvSBR4FldmCRWu3CsiKipOgst1xbWtS+vFM6uMKMvqfgGISzGspHq2GU2KelTaPHfO2zQ2TBhFBFYJV0zNoJsbe3tRVVlbQM0N1bAKJJEKMSh06Im8Qq5L6xyLdzOdGL1NdNZaXgaj9tcFuF/+Di9IKwgiiLQP/d3vC4N9H42wgBdiBB8P+9HWZrs9N5MYT3qMqUPtgSOiCEwEkHqxO3vxBz4y+sLj7vzaTm3TZracJiMNkwAFI++LFvtL9mkhsYAAIQmBTBvjEadmHdp3/wgDyL4HiKdDk/ACY0n241U4ZWVPpSS6Jg592eEDwIiIQLgv4ghO5RUQoH3QNe23Eu8X0OM1TBELMIep5nRNQZpWlAnzl3Wqa2rVNeQTIk6TZyQg+5ISc5gOTCFNl1LT5xtEAEXAQa5VIE91IiJinmpYU/EHOTAJCAuhZkH2HLwIT1No08CGTHvsFDKBxEZSQxlBM4kS4pmIGtqAw7JSQYBQUTBGIVjH4uKEGkayhNPMLi7g/JwkHWi0Kp16JO/KUO9CWWPlOWnFRvP2HqPwbq3+9Qk6/YwER5aBQAJPrWmir6UApz4upYAliCAKBCLahyehQhAXCKboVhYR0qhiEcMzTfeKO/RrX6nnluMoi86d7v3dR+tHzpPWKkZ/2/VmZd5d6Y2GRbMsMwgcq8okfr6LUJu1vr68x4Ka93Hj/zerKB96lIZO5joLq9V4c29nY1Ofm1nWhMrWVVFgOt+sdIyNTnd5GX2tZ+Ob28dsqJqFplRvVNQbDjqRX7uyuzCTJFE6vzjX39p60Y0HrHP3feHZMsSjrbEuebbdsWE47m8k803Q7tDR9sb29vnn+r3dfku3b37Rt1zZvTqcFGlkDh44Obn8RFXadiOpvb3x0Mruxu7VKxdd2Vcg/WFOxqpmpI1icqNxz5Xl2sY6WjjZTV718jvz4eCxi5ufeCbfGnJvb9DIsnGhxgUrHZ7+wjYwQoWtL2174U5Hz53q3bJaHmvn7/jWm1RUXzp7uT2fHL6+udsrQ5Vf/5KDouqd8Tpn1eFj5OpM+MSz6oZv/aGPebuadRJleLJXtdvNUV3e8tKXPPPg/XEzZcugos6BY7bY8NXk6SfuXz12gJ0vJv1DRw4KBSqqgzfdeTC8/NSpRwR9WQzJRMEziw651yGAdkUZEtA6iTSJqyoRUKhFBJBBAjIrRGFxRQU8rVLQO8889Ms/+hAzgo4Tk9hRrqNKKCSpxsRiqAVUmuiqNxGGREMIlpUORMKk0CiKrXWKbKy88vlMFFYPz733Y5/nNDl8vL1XVpNLdljpcUlcYmCyXlc1ONF1wNorW8HQRRXrfFxVlUQRemfRIHsmIgRPmqM0dXXQcYNDJDoC7SV4ESUQBLVIAHYIIQBoQiSFgI5LawtShMjlxEVRY2/sVPvAxsh+6r7Jg2/901/9jX/3sldFWVwDjiqJGt0VgDlXDRl9VQ/isndzMrpvwyVZcvDwco/1b//W/xhcudTr99rtOI7V3jVVW2nF+5syIIDeO0VKawUSBESAAAGJXPCwXzoIVnwIjEjKkHdBAHRkMEzFIfbWighpw8zXuP7gmRWSImIA8WF6n+cgUWRcCI4DCQOSMSaI9yHYEIgowigEi4iBmRmAdKRQKVSeEiJUCoGyOFKoBMjWtXdeESGiUtoHH0AMKtAqCE/qGgGtq4GIiELlCBUgMAkpqN1UYJq6jUAQg4BCDYASLAAK6qq2hIiIShFzCAzCIYjFQFpR8KCU0loRUBJFIhxEgnjHHgkZxHEA4ESnSRwbBZnBwGDrWpyDEERYaw04tW+zMSoEqwRZpMhLJpmU9fTeGkLwzJULWisW8cFHKtaRYuHaWVKAhM57FCHURqskNgxSOtswaaxNilC5UJd1u9FUpExqIm0SIEMoyFVdO+9hf9sjeOs4hDg2ghIZkxijCVSsDKH1wVfBanISXFUqo7gWhYgCVkJsIvYAPgRhk2TW2SK3wbExhjXmFVfOKcvMPs2yptZpFqskLoraSqViEsJJXqtYlbmfzoK5t604S4hUGuvA1rrKOkAUQeEALCbSUaQUsNaktAEkVCRKmyxGIqx9QEFm8TzbbRKxVmTt/pRQ10FFphgWEJOoWhBiE4WAZeGazVgTsStGwyKJZ4I7duVZfuhjz7S4dctCnPZ3XnvHUub5f/zhJz/yyNjMHbZlzsJAkQSuyxApRKUm/YpdMBDFNnSa5DNZOtbiFr/xu17RbiX51fr0A2eg01bHBWf9+uZWkjW8yYLzJuqQilkiolZQZVVspnGCErqN1valyWSkQIRNMmD4m889GXEYVhI19MtuOnpps2jNL3z1m970yP1f/KX//APtmfqmW+ejuObgilF93XK3qeO9vVEjjYdGoUZhicQFFEE1qRkc7VZydkD2vCjlCDg2CKpQwksLs2XtvRWG8fbOINJmbn5+UlsW6XSzfm/HKNGaQci5wAEkgARwQO3EKB0eP/V4azZ1ViBgI4uiLD6/durmOw9nmVk6sHzD3dc9c+rMZFiRUVGk89IqQ+3ZpbO93ceflpdhyHfGaj6aO9z+0Xeu7vyP9XN9haD3Cz5AqqI2BpcX4rnFxuWnc5XP1tx45CH5/fc8fvaSZnKx4OvflN5wW/eWRxr/8T89cHkPUdW3vOjA008/6m2466V3xv3yvge/9NyFK6QpzRLM8fCx1fmFhb/835+95Y4XJ6Hx/LkxBFpebnmfv/4bb3vxXbcfv/6OesAgbPNcR7FImPQ3Rcpnzl76gXf/P5bowKHj+SA+cfjwyvE5h3sqtpd2d3cGRavbMQZ5EuyO7x6gOItKDz/7C79H1jAar9KdXg2gfPAs7WzpZf/lvZ/53vor3vCN7x4ODJdvmr8j6cBOsBvsrYlWfdb5s3/99eL8jkyWf+SH/6GTthK7dP9z663EHD64OtjdqVydtlMC2tjtswSJWlc2d4rtzefP2elV0Ow0FFCZO18WV59be/Lhe26769W2NktHFi88+uDT9/zr5acfaDb0zS87cdttd3zunsczZZ988B4sRrrR2b644SWHvd7lyxdP3H54fePCaDBwg8nM8rxJ7GB4OplNX/mOrzl/tt45e/ris0/ubl1aXMxsxeUA891SGqTS0Bv00ma7MbPgXbClx9KTUaqhSQiQlPa1C41G2+hkPBlrnSiV9Iui1UzL3CpKd3Y3QBC1ZjFevA9lFunKO8OctjuD3R3xkDU6ZVlN6vrEyqGHnz2LFNUetNLPrOW9Ajxjv7CvPTz/+Kcfb2VRGqtGM2EK9bhaPXa819v+53/49Vfd+h8OS2P10GFtdyLrA2sVaQ5mstV9yesfTF/RVZOT/ctXX/3iW4ebF+Y7zdFwUFY1KIUMWZSgDzlQ7cBW1S//yk//1nt/N/axmdHzC63zZ9earWz+yMG6sNwlUKQN5UMLWuYWlkIVjXInqpHn+Vwn68x1A8K4GHXnzFrPsoBCY9Lo4VOX7/kSLxyeG+2OgwukwEwx/9Z1kvhvPvihq9v+woMPpRlg8LXwh+79An/+oRddNx+Gw7nFjmh19erVyPsjh5ZOHDz0pccf+J5/921fOvXo+QvrR5WOgoqytmTt585dubB+udlqYW06rcbBY4tfuOeLg6I8ePiYiSJd88lbj2sto9EoSbLNs+uphofufXr58K0XxrmZjd79U/9hc7t/4523vRPMz/7We2c7jaWDnfGkf/rq2lte9woTLfd95mrQsRgTk6hgyzofVPmQiVsNtZvzwvx13/BdP/C+3/rVypsQEe6M6sIP5t2F0ZWHzt5n/ukfR8PKgdZpir3h8+fgG157x8zM7MFDqzuD8aHr7mwcPP7oM88sdme6czPNxtxwMHCyfxW0mh1X9euq0rESQm9dFMW3nrxr4+LganHKupoR6qJGICBApZUmwShOVpSeBN7RRosAB9akQEMIYWp30IZAEYtKm626GB1bKN72/W/R6WJl2xCS1hG/eucryt1H7/vEedIltjOPWFWAcfbIVufin1899NFzr7q98eJXnry6lbei0dJq6+DhFtdFWXrrZ3Vsah/VtkQfX74g/+dvHrWN6PLa5OKaHRTiJbOV7rRmJjzy3g3rXmeuAbHiOoCIMtqHoLVWGqz1pCWKFaNT6DnwdC+LFHq2WqnAVikQYdEi4I1RgMJeEAhAppVTHESh2kfAyH7eBxE1UeD9BT3FJiD6QrLk4Kc+bx9/4kIjKo8tcyOL5me720N7buP8+ct+PG4wEqVqxEV3tglQz3S6e7vjxfnWzFy2sbk7mVRF6eJIV1WY0oc5eCIM12qfplhjFDGIIXittSiV5/azjzx/abf/6pe95MiRQ41Uj0f9EnSFUpWFrWoUiYGpnnRmcX4l2xnUSxRv96kqAprYWs6aaqbJSeJ8LYNxKSzd5myRiwOdNtNxOel0ViAEdgVD7ataZUog9UMsmbWYpDnnyqoY9b2vHDYAuzs7zgXsQNnUiYUwP9ui4AbjsZlQuWfr0s0Zf+Ph+bKwm5tjNEncTGeamXBoN6JJGfLCgpCFEiNTsRKPamCDjfPRmDrQSGMfpA4cRYllZiKIdYA6igwQGgVL860kbZa1Y7qGadlHOtO+vLLvM6KpgjTtF9u3GgkDQrBes9HO7O35v7/8/D/c99zXfe2d3/6WO247PtcwPdEj1TSsUAEqF9mJ+eVf+eTnHyrzSeoEDJaO6rGxzZYy7UbUEOtLTx6QBXkqZRJcy5IxwX7dPIEIEiGCXCtm20+OAQbmaxzr6eARv2wfmuoRL/CHvpwB2ydqf5nnM10hXaMb7XucgPmapeiF/fYXTpCAEKLgdPGEgMJTKW36n/tDeeHTEK711l/7IbkmHwHgVOtBZEGkaT8ZCgl+GdN07T0AmPYPlhGmMhsyAwcljByIA4sHCcDOhxCMhFRBQqHbULMtlaUmiVEjGmYC1Kg0gK+tiA4U7eS+Jqwrmxk339Ip1p02Li/Z2VVqrKRpy4HL0xkIFoSt0cqEaFC2PnXvYMRR0tSMamcYdgfq4pYMdiJyRChKAuAUrIoAsp/R279aAWmqkQEiMIT97rLAChVDYMDghdQ+popEIvaHOu4b37h0442Vl5qigHl9bAmeuQI16cD+8ML4HW+KPMDGht/ZyKPE33ib9ujazWi0wZ/4An/4AT/2EZEOwb9QfLcvFblitHBwWeK0LGoVRQLS2708T4uhCs1uK0pTiGl3MGpHKul2rzxzcTLeVlpUQtDoKlRHF5qUj0NVQIgb8zO2wkml0bTLHJ+/sNcv4nh+waXow6S/ux1p2Lo4HPeyLE667cxVtQ2YNNpQwnBnU2yRSTJrkmLrqgIzGlQUIYl+9guPubD7mjfdefoZiefi7a3eQjfFiNjzeKeceJyfW2VnpIWTInv/wxfWLl6x1pUVWUYDVSep7zhCt92Ab3jNwcHuThRnV66W/Z7ZHsGp5/NyAl982n7RdB/ZWPvmV+hbXrTYWMStvSvgzd7OKPMJ63I8qWOdZfNzZy/P/8B/fXq7WkvtcjPtbvd6kOHs3KyiLAyqzY1+ozWniC1U/e3e3OGXjkdXTYzdpdmd3YkvKgn5Svd1G1efbGXVo5//+Fe++R1v/MbvfOyRfxzs9FuNho5mqpLSBhxdsCdvmYnmDv7vv3/CViReNEXTzQr2wcSGQYt40hyskALBwMKKgoiiQEoTchWqCskHEQiuqgIR6kgHWyFpJFWUfmZhhQMUdV+jc55Ja0MNbybMvXoyvuvY7PzSgU89dAEkIyv5qHz4idJ6rE1rOJQOmko0CxAxsvLiAkJdsrDynutCAMF776yoWANwUVcSWMXGW2t05II3kSZoBBIyRBhba0npoEBES7AADKQYEHwNSGTAVbkiQMwEVelKQJdp6lesTPb9P/kXP/L21R/64a5pOi4uODdsLV4P8ZzqtCoX5m6N/vKfvv/7vukvPvtsv6/C7LFDZ5696gd7ugE1yspy+/z6LgAQoqvt1FrKwkqRJhIJ+11dwaEo0gqU0cp49s5aQaitCKM2msV6H4RFETFScPVUFPZsg3cETKAiE4Gw0fv3XUQC9KRQaWMMCjAAKFQBWCHBvtYOChUCBfZGaxEOzIGZA2iFHCTSERIxcxLHiY6DZx8q5zwQBe9q7wWYha13ohMO3gsbbay1AsCAJOADEDIReu8QPSAEZgRRUzo3ogKtiIILWkeBfVXWgYWJmMBXFTMrMpqQQYxSqAxxQMDgg9KoCF0Qa2sfWGsdJCCRoVgRKopQtEbIxwWieM9aGUGu6woQFJKtJswek4TZ57VVSiOp0llkSpQ2URylaADyyk3hRkiIDIjIPky79Jgl1Q2jFIBYVxtlaue1jmJtYm0AOY04RAoQI2MCB2O0rf1wlFtnvbeoUEc6L6yOlDHRYHcyKYvaOqPydjMjRUmaRKnxLCLgq5AkcZoliAwGwYsG9MICXNkaEBUq54GDmChO05QDs8jCUscFFuecdc1WJ1IKQSJIgow6BFkaSeWqsXW59Z4DSazAG2wpbjWjxKgQnIogM5q0irTm4DRg8CGK4iSOXV2Wzjv2yIoEYXpjIHC189OeVR9cbXWkzbUE2uCyb861UTwA6Gartjx2NddeRxF7zH1ufX3ohhuKi0v/6xc+nbpOptNJPbmwvvu1t7VefMvhjfHgs48Pcp/AaFIU4zQChZA0Yl+60k6as1H3RGO205js5MsLjRtvm9+brB+86dDMgc7O4MrWzvaJ1VVoj2iGlm9c2BjtNNvzKCY4z2iKwiUGI2LhglASo1QUeVedffLqwaNH3/DWd3rbvO7m1V/59Z8H8nOrbbtbKoTHHj0XG2Nt/cPvetftJ1euP9iaW249cP8DRcUZya03HqoG4/FonBpJEjw407zaLwdW2EGsiDRWNVccQIi9ECAKEmFZeGCIEsydnVucH+1VkYkNDryzVy+vkRFhyWIXnDOUAFPFECcxMqOwSVJr3SSv40Co46JgDqEughdpEkWEywuZC26wvlX0h1z7RrsRECMFIsLBFfkkTPyH/uXqq19xOOlyWQD31k++pvOfm7e/40dPdWdnmo3Gdq9UGqpJiawm/WJl9mg5CHNtnc0c+eePb93z4DZ5QIWW+ermWsPkSTDv+fGbfv+D44eeHB3YWztyaN7j1u//xfsPfizKr+xVlisbRn3XTBvH77rtyXPDr1k9fOjmg88/N1xpZ43lA+PB9si6F3/lNx85dBxw5vANTQmOQ5UXfaKCOuVH/ukT/+9P/8GJY4f2+nkxystycObM2sUrkGStztI8BPF5FZoqOG5njW/+lu+796EPF5d6Jk1EhLSemV/c2B3NLXbn2qvPnT6vtB9OyuFQ/dRP/23DvuXW61Ywe3rpppd/+L+/843f9uJ49W3BgYLFkMynN+7Vk/oDj/8xlI8Pz4bf+O8Pf+rRtd5wfVJ61MpVQWvO0pi9iPe+nkDZb+B+9GZ2Jrpyfh0M3PlVL/v8xz938LoXWU5vf+2rC0fz2bFW5xip7sVzDz146sLVjVwp01iBl731Ffd+8CP52l63BS9+7Ssevvf0E/c9OrP6cghguupFL3vxqXse/dLf/Z87b7x5ZDdq9hs7Qz3slePxzXfeKCmef/zy4mJzbvHw+s7lcV4mnfYo98azeDEYxXHkAvuaOdQuhDRLFdKgP5r2nCRJ6m3dnmkZhSypTg1LIy9ZYUSIcZyCGOSyqiHUiKjSZIZm2yCq2SoFCxNHq/NzJWG/qksrGEQzo0LwgDa67tDChY09LVT0inFVvutHfmm3P7n3Xz5spP8TP/17n7nnvwX3mLi+H5cIESjFoP/n+8/3iu6dw4PzKQ6Oz6xvXZnsTmaTFqBhY4fW3vbSO0Xs2sXLZENR+cl4dMNSMreYbO3Yams8CJR22925ztKx1clgWAXc2Rk3Mm0ri2jKHhVl7sgtH1ge7Y1cOdg4txWB2avyzb2cQbP3pqFjI+UkJEq5vaIa5ioWBAEFPgQMGHcjRP3ph+9b1YXUlkBmu1kXMEmzhU6cdFdmunNz3e5jp570ZB4/u/X5R8/VJqgonjFJtNThgGVVeyoffODKpYsbhw6s3nDilu21S+NivLWxoVWytrM3qa8202jQzxcWO2u7g8f7zy4sz99w9OBkMPyGb3/Dpx64+PSZywfn2721s3EWP/P8E5TCK1585Onz29uDGqze6FVHDm3dceehqJsdWlyodyq0PBnt7O3sZjwBDqX122vjxoHD6eKBCvyxm2+676GzBw4sznf1HW+445MPnd0JNFaN67uzqS5sZiTg17z6xTcvJBvrl7MkObu2tnB0dTTa9lvJTYcXrXdFb9TbGNa1q+t9qejggdXhsMDgAosiCiJjaxcPHr/rrhv+/u93tnYuG8vOO1RqP4/DElz10KlPKGAyKjAiTI3JjNNCb6QoTkiRoAhrxQmG9Z/7mW9m5aqyFgIgI0ZB1O1Eh0/eZR9+5OLFtYFYMyltGpsQcMO11vrpIxdD/TcPoLjYT1aWsne/43hnTj/wxNNXLq3dffuxky99SR7gve/92PlnyyKfEe18ACeR9ZWKSsQwqvY8B1TkHQ96ubVeJ7rTyMq6LOvKBUYhY5QQls5maRIlpiprIlVVDgWi2AhRXVltFAv4EIhQEDVpQB9CUIoQBRFZhEgIledASNMCU++ZNKBAHEfOOxEIXpxj4QpRT8YUOHv2nFQWY5zY4MkoRRlrDlAHT74WrVFHyo1rnZjAbjzOZ7qtNIm9Zx9CVbvaOus4hMBTRQO1CARmlACMqFApEPZK6cAUhM5c7p++9MksSg6vLhw9sHpkecE7V+alqsgXnBman+voKApiIiO9oQM0SdrKR1LVrqVjo81o7AZj3yv0TSdui5IAaLd26rJwrJvtTrssthqtttFRub3jQ5SXrFBbhf1RjpMyChCpVipGaqjrqmGifumGtc/FNTIzdqbmUDgyoEsbosjcenxpNB45H2WNhQqwMzvPaBCFNEQxhSygt5GSwMGCMs32fGtlUF1YPXFwezyweS1BGnEWqcjW+fZoogR1EvnasnCqzMJC11o/HOS1f6HuBvfRy7if8WJ5QeZgBqApQxkRkERYRXGYqFCgFCJBTcrwd39//z9/7Ivf9ebbvuPbX3XoYESiQoQ1puNR9qs/80eP3Cd+lBkIBgXYI4gveTyp1a7oBsUdbZrGkwPlEUSAmRhQAAiBp6YiECHEaYn8NRlomqSa6iTAck3IEZzajve7xvAaXWkqNeyHy6ataFN9Bl9oO0NC+TcsahaGfS8R7KONROjLYGxB3E/HIUwr4fCakjR1/AACyTU71lRq2+dbC09jbwK8LxPJ9P2mY0YAYOGpc5quSVP7RyUyrTaTKQ1DRMSLQPAMQsGK9yCBxQclmBC3m2omhsUZM9dOMwWaGVGUUsEDgkEwRqm69hSb4V49mLga0YoFcp3Mx129OEvz827liKCpGOqqhoggH4NYNRpHpTWnngif/WK+s9kYjafrXGGONJJ1YgSZ2aEoPW2KE4UaAUntG8T2W4xYUCERcghaEaJS03sLMigRIKVxmvFDhUoUBLuS4eoSxx1G0VDVSx24bglmE962QJEcPWCOrhBHtNQUPhKVmC8erh2AQr+UclmmT192z6yxI2GW6JoIeI1VZHcnvWCSdn+zR0oxc6s70xs5CuipaLZbgfOZVmbH46Ku2MRlUBtXN00zOXL0BI8H471N8PXCgW5m1e7IDXvFbJX5cd7IgzLR3JHuI6fOL602O9b317ZXDi8szLVUGo+K8OjjFweD/szCzMLMooF0a9xPm4gKNnfXDhxbaGcZx+XSkSOT7Z1L5zc92nvuebie1NZxxHzxsecOHDowO9NqrBzfDcn9Z04/9dSzw0nugiOFhyN+0YJ6z49929PPPWLizeUThKFPSU38zCxCszn+ipcvX3iujLKZAAtXNwcbO+b3/nrzs0/A558Z/+zbl77hW4+Yxs54OJw/MU8xLRy84cyzxfOXln/ipz97cW+IPJMlGSjKq2HcIIrx+IvuuPw8zM91ds89vnxkuSwKpRFjOLi8snO16ep+ZMq5mW73+oUzT11aXFENM7d+5VwzoYc//f68srPLi52ZJM/rFCJwsJBe/Z2fPLx4XeRZfelfJ+c2W2liJpOKiJQCTUYCI0kIIThPqAENEjIHDmGqMAg7QhWYlSHSBGi8syIEEsBDQK/IZ02DdkhKicv9tMMJnXM7HPJjS3jy7hM7O8UnH9100DAkqXIRwVbufQCduEix0ZjGLOi7LYqVGk28ygw7KsfOWhiWZFkqq6pavEDu/dvf9hWHVpc//I8f27gyEObKFVUZtNEiKm7GImiMBtGMaFmZKAmOgZmIvEfUEaInJYoAFMRRaq1jX45K22hGA+ammf+rf9y1xfA73xaSg3m62rWyy47TRkusLYYFZvlffOZtj3z43Hd+/wPc3ER1oKgh0axNfHGt2L8YlFIG2fu6rgSBPFAUT3sbAdGQJkRE5QGYPQc/Vbw1qbAPg0OlSQEBaeUENfngrLc2OCKKdBKZSJNShIGD884z16FoR02lDCCRUi5YFxwgkCKtDYIgKlIk4mtbAwihCcELB2EGgLKutTJBBMFFxgTmoiwZJMCUayzIwQePQEppRTTO82nSlxEJUSvtQJyt2TNpDagYmdkRKST0PlR1pUijUoBKEWitSZM4IWSPvixL0UophaKNigyhjrTW5CQE8CEEBygSkMQzM+w3c4TgojgxJkYAQglV5ZgImTQihto7b533nkMgEGOUCIzG433XpQIQ0bEx2igm6wVURITKh2CtDRxpDcR1nfsQjDYu+Cr4btZSOlYE1tYEoIkiwlgrAZbgp8BtZ21eF7W1niWI2NrWtQ3O2xCmEy1pkgCBQymhCkE852GcxCZhVhWRokRrTZigjlnNtWemX2gItecgAhQZQAxeBoNxkiSNZtxtpAa1IhOAnRNXFQFCUbm6Lq0P47wQDkYD1y5TmEVxXRVRFqWNKCJut9NmZCKBSGulIgLw7EJgIqM0cXCBaXdc+NHEOusDc5AkTpIEVJT4sq5tKK21tTNRjAQBUaP48WR6FXzpXwbX3zB/4vb58WQ0GZcmjazNG6kSVxfjqtVOoyI7/aHJ5//+7KxfBEQGcai30u7fXpV73/OlKvfjUYcguLKMDfqp9VcEwR05lvzK+//zTr3ui633/+4/zKzOnpcrh29vru+t7z2/R7Ex0Hr2+fEVi+2U3HAcRZnzAL5mUYwmjecU2tqXzEUzbuo0GQ9s1mzcuHrD+rPDhq2/8Mi9H/3nc1TXseKNjYnSpBSKEmVcSiXa4o2vu/2xj3xm5/G9F7VmiyjktfPb/a7yix1anG8XgZ67Ojm/VXmj4jQaV74qAhOpaRSfWEfKey+A0w1kRNldH6CXNEps7Rfmm6NxKcBRoqoqWFfHsdGpTkw26I0NIvugCQXJetYx+SAqUgCsgISqrNloZkleObBMTGURJr1B0tSmlfrcT8qimtTtdhRl4JR5bqd4/HT+8lcoNpUosAF2rmwfmonH3jvvkkiREkfUyHTaTT7w0XM1rT5871nVdA8+VvR3gkmlkUWdRqe4MFRdlVb14aPx3cd1nB04tIgPP3J+fqW+6WC7v70zHhdpI2UPlQJb1s/cd95F2e6lZ/7jD/2vclDT7mZrJX7LO9/xqje8KkqX9vqTnXqYzqjnn/iC4vwld71i7fzp7333TzhcOHz0WBUEvLIUmp2k2Yq0omJYrZ3ZTFMxOK6L0bgqTNT90z/79crRuBK3a5dW58HQlcsbgXEgvjecaBOsr+NYg+rMzphPfn5056vf0tu88NHf/fknPwYpPvr6H3vXxCfsDUaRLS8oeqoc3O/XnurMvOQdb2yAa3/xzHDSq1qtlmqno0EerNcs471eNxr9j99482u++Wt/f+6HAWD93HnIba1l7SzOLRxevO5oUdcWeK9Xi2oly4de884ffNH2W//4v/3nSoTzqtwbP/eFe1sZxidW1y+Wa2fXlw8sb/fHB7rmwY9+anRxewOiA8fuXLz+JUW9d+qJcwuH7jo6vzDR5aBv6rJUJls8vnzdi2795D/d3+vt6LRFOkrSpC6dr22rFYXAjispgzEESAIUvHPOxXGCKPloAuyzrFu72lkXJSpKsoDBB5Wl2XjSi3TkXam1iqOYQTda2aTozXTaAYPSsrO+Nhsr1zTFZmWJSRGCpAE5wJPPnT94+MCFKzvNdgzkNIVT996zsHq4m+TB1jjHv/dXn/j5bzm4FPWVGqgELfUEKWnq43PpcPPMJMWTd86Vu3R5GIpJrpWe7baitIFS1Vy4KohTiy36nld26tO/93s/eusfvf/sA0/Vly8Ps0aysbP+4GOnNcMdd5w8fGg5AdWe7xqdJI006TYq76wgA4o22Igs8+nTl7NGNC7Zg3jm0rq4oSfjajCpdIwszAx6WkEdUT6uJMWb77rlhLbbcXXwYGfczwPhsFdQXkkSr19ZX7t81UTQmm3PiGi1cu7y9kf+7qN767vH7zi0u7Nb7e11DzR6a4Nu1l6an9/dvDDuD1BTWdpbX3zTmY0tz25+dn4yypsNQxiSkJmGOXLD8Z0zl+zA33n9zF0nTz764OlW3L750MHnTj+V96qbD51YX++xiCVO29FjT13oZjODIqebVuYa81evXIoI0FcU+Yq9iuNGqzW7tKoPHtpZ2zl567EGjDe2B2nWaMwtXN19Mnd+qds6ujx79mJ1eXtiHB15Uffsk184dPCYtWFne3zwugwayNUkaGOdXVo6bmvY3Bjedvf18Of/AgC93l4rjW05YZEQXBRFkdJnzzx7/ZEbF5qzvc3L+bhM0mgKaxMWQUESYxQICoMwI5FWatrHLciK9sudWAQCh3pydMndcnIVVOUrr2Prq12lCWTPudHSoeyNb7xl8/97It+0qdbe+RA4oCKkssLKpYrSiW3srusff88XSHn2SsHcp7/Ylz//7LiuDcS2isiYLKHgK2dDUIEUAYBj32w2BHUxGvsQlKEQJIiZaXa8vUKJrivPIgoo0UYBOu+dBAxsDE0ZBUhE2nthQlJqmuUR55wAKCJmQKQw3bXi/UZwZlGKYH/JzUQqcMBpuyuLiVUIgZQLXkRBCQKZrgVcIIXi0cq1TmDSMMlzLCkA6ghqlxzqzG7vbPfzutXUxw4sjAeDhaXFnZ3BlbVdLzoQsQARAYMCcj64IIRICD54ESIi8oCIVVE9d/bSs89dUJriJIl1dniuff3CglLV0EV2BJZt0kyTRMiHtDEfKI8QIVWSpuN8UgYZjvLNrT0fXGC1uz2JyUdtyTTFsaldXXlmTMYlCySjUakpQBRncZJ1W5nSLz90/cULZ3b6awFCQF9C0GnEyHVVBJCUEuv9IK+acXpxx4LE3WRWIOp0u6DE1RYAjM6yVtOXk1CJwhDrODKx19mlYU6tRu4KQFDsQEB5i2A52KKukjRVWsVaj/N8Iry+Mw7W1qWtrhF9p8FEuLZzKyLTcIEATBNQ+9rJfn09GDTidG9vEmpkgEQpEzecrf/og0/8z488+jV3Zu965xtf+XVvnoz0937fe589VQXfZnCGeEqnQDVVPzDUEJz40uuYdEpxOxbl2KDHAFoEw3QLmQUR99NY+xYbmbp0ZKrOMEMAmdqfQHhfIIJ9y9A+zFpk38uiiKa2uP0mexZB/DdAoSCC12JkUzvRvs8I4BpDeupqon3E03Q8+xAh+XIYbX/Q0zM77VfcV6+uReNkmqwTQEFAmlY9I0+HjYTEwkIKphcX49TVhVNZSliQALyIZwBG5wxwW0GSqBjDXNOkhpoptJrSSlDpgOCVMqNhGA69rT2A0XFS5ozifPAAMhmGnXHwCc4smZtvml1s1YmpW5lNmkhtHQIFMf09v3VZP32a13bM2XU3KV1VabZGiRAJcLCBGUgUkyIfAijRakrrJRYBBCJkBlAQggjBPquJgKdfBgIHFmCtUWnCQAI4XelbX2tUGlU3bcyZMoqxrAXRaKkjBYe60CbOQbWa+rZFG9uKNCQN9qruJC1dlYpI6aATf+OR+jW3wNaArQUk1W4YuPxvXUV2NNwcMXN3YZaIigLSTkcb3d/bHQz64lXaanI5qPuTdqcdH2tvrA3nVhZDoIjNOM93965EWWQnVPQrDumBhaUq2D5XzUanHSdfeHo9acfZbAf2+oePrGIaK6MxbY72eoZMK5vdXbdae4UDFWeDQT8Vl7UzF+pROYlMnA+2xeUzC3EdWLFIoBjU6vGjV5tj3Zr9/P33nt9++Ep/10/qmw93V5fUnUc773zToWS0bSv+4V9732wX3vF1CUl05XJ+6IZYQvr/p+o/wy1Lr/JceIzxhhlW3Hnvql25u6tztzoptVBCIBCILMCEzzYggu1jG/vgYx9sfw44YRnLxjaGQzjGCAkEEhIKtEK31C11zrFy3DmsPMMbxvh+rCr8uX5U1VVXXbV37aq51pzP+zz3fXhl8aUXrqyvbzlvl5erYf/cyoJ58z13/dafrw92lcLmP/n9K9lC99veObe3O3z1VO/SKH3hzKmHHh/vXz2X2+VMlAfvnCOlJuO4sLzsfHXqhSd4IlVh5rvziZb+YCSGOLrTbzz2wz/5f378D/6ZY67Wtvq7lGbtcy9/Y6aZJk2rlA1FaOgsTLx30ajMRbGZ+tlfON7uXm2FpOjxd73p0G98fruaBF+TBcvKY/QioJRlEmYhDQJhan8gmtZcA6KAioQIyByRECESJQZByBptEpIKhaPzQUpNSDaNgRlCorlh1Pe/++6Pfe7pzQkjpQiAyt94onPkaK6kOHB4QVU+0eKZbatV1c6Xk9RQ8E206KpYF94ktizDpIxlBXmelSO3PRiqwfnFo+aX/u4Hf/Ojf/zaxVIYUKnIQTiWY0fMrJEYIxqJoNptpTWwEjRgE1QUfE0QQUT8OKgMBIEkbRgmGE9GYG1/zJ/4C7d78epf/Zn0ZGc9qJHgnFVpyUWW6opqp8sHf+q+j1w+h/pAr3HbT/3D17ztWpBYXZNi0nQRxowiREoRKoVWKYjB6AQRiVAp62MgYKUUkmIBQCUco49BWCvLhHVVK1KRY+TAkYURUBEqrTQCuBCmTUwQMJQFVsFFpYGZXfBRRJgzbZBBRFBBBPbRIZGwVN7H6JEwyxKOwklCpKOw1iqGEJwHQqU1gETnPUdNChmJIAbnvNdKxyiCJDEGwUkxslnGIcr0lgGiAHJgIUEBEU6sDdEnSS6BY+QYggJVVqVSxCEqrdGYJDEkmCqTGONiiMyRxRqNU3WBpqquJQYWNqSM1gJaIRklEhkiAED0HjXGKBxi7ZljDMFN9Z0hinOBCBNrCBDFIAjpFGladp+KRDFL05IjchQUIEqylJyfvpW184YxWhtCiO1m6lwFLgCIIawDh+B85Kqqy6qqvfcuAJJjLmovyDFIiAEAtNYUWZG2WdpMU+tDXXtttVLkhDNrtVZkjCY1rlySJJ4DISAprbGdNqfTdR+Cw9BoNkgZQI2qoRGNsYSADcUuGVWTOvJkUBU+jIsCgDt5IuDzRrbQNhoUIbSaKQETiLCLrCJoH0JdO1Lka++jC94rEgXgHEcBidDIcqVVYkxkAcW1871xycJ1CC5EJoxAbn9E11lFpx6WzWeKMw+Pm6t84I5s+UTDGiz3xwBxUri7Dt/68J9cfPKLPaxnqKHq6OuxV6mmLB2X8Y1LMQdjDYzq0nvfaiYaNASuXRzE8p/+ox964tVHL/Q37jyml482N7fKvRCW37Rw4ewb7YYszrbPvLEeTbtx+6pZUiOq5zq5w5lY1CgeE8s0MSYaNII4ngQEZRpdibT/2tbZb5792sdfWTqcmgZsr+/nOrMgDDRinp9p9Df7Mw27PGO//N///E0Ljbe97c4/+uQrYvDGI03r/VI390xXB+6J1wf9aGtOiglPXCwiI6JGkQBAgkjBRdLEDFFYkwgCGhqMJkM/SNPG4sKsr0faYiNLfVWBojTHajJSmSwtdOqyHhalzpQEn1mdJun+oKdJcWSACCQhuOGo97b3vfXVFzYH/fGhG9qD/VE18f1B2moknW6z7EzAu0YK+zEMfOs//sbav83nDt8QdjZr2wakxvIS1ltxZ28cnGQNHWPcH0yOzOmi7158qZhtHDqz7dO8kc3VxbioBlVAePt3v7ndORvWqssvXzyU5f3Mzsnw+x9sff3JzY3haFTUk1EAgiy1w2H5ru/6gbKm4cbZG463P/3JU1v9tf/8q//m2S997a3vfev+fuhYaejW9nj/73z4H37rAzc//9yzX/ryc0888ryXowEUAtfFJLE6b7ZNnqwcWxlubx+cnetf2Nb9N77t21rf8yO3jXd3/f7q//XPH63ac81Ga1LoQUlhVEsxMYnyRSGiyrKoK2cbnSSJE4n/7x9+belg/hM/fd/3//x9dy9fPPamA5XMaNOoMDaTKvMXxv0XMGHbXY3cb7VHf/1nj9+3+ZaP/soXx6OCGbU2NrPgXLuDf/sn77vzbQS2N70K5mbbV0dX2PGp504151ZWlK4HW/X+2tzSLcqaXr9MTCLNxb/69/81+7EJ4uvBE1/8zNz84uoNN3fn73z60YduuOkgt/Zfe/jXOw3stmaT9qEbH3jPKGTlKPkrP//3HvvUH1mdX9m5lCb2yhtv3PaOB5bb7c988l82WwudxeXa+SCyN9hbmFnKrSnDKEu7ygNjZFKKFABZlYplFLEm01pvbl719XbWzBVSrLmoRiEKcBiV+3UoM9ti4FbLsLjSe62TheUDu7t7o/EkNzrJG5NRr5u2j836q2VZMjpPihFEX9ztTXGfqJTRiUd/+pVnjt92c5rFi1f25ha7v/0bn/iB2//awu2LeWe/1GOVQl61Xj5zeXZ5Zci4Pp4km7Ye1mvD3QbPvOuuWx7+8jfmluny1v59d974yv52u7EsevhTP35jqrcWj9NH5g9/9uvmn/zmS63uUYOwMR7NN9LubJrOpuOJm2/Ok6a1y1eWDqw2WulwMNEWHXNvONFJ89htd2z+v19sZwno6EoXA4Y0QU3RuSgCIjGCMlYbSqwKVUyIrzz3fEXw5ntuXZppjGkzaSfz97SfffzV2cWZTEdJ1MX1PiBqQQr1D33/O4ebW3fctfr5h587vLrcaDWNSWfnBUxqTZRRtdBtqFazN+qNCwbvQHg0HM4td5yrV1cW52bmA/vXXj03qxqnX7m0evPsV77yVSn9/KHjTz/xzYW5jl49fvTYwuqRQ3/wiU/UDLXHN/bKg+vb3/P+dzz9+Dcuwvm66h0/egMiFrVcXts9ODtz/NjyAPzy4da4ztaeP+cGdSPi8sx8E/UdN5+4vH6FS8ltFgk4VZDEd733nV995Hyr2+ltbOSqCv3BpA62NVetOyGy+Y6SZHGls72zPb0Krq6tZTZnEGN1DEGEFanLa5dfv3BBkLuzeZqo0aSyBqOPiEAKQxQkvj6BgSgCwqQUTAchkUEASWljAUWnxS/8/e8L4ZIMJkYtG6kUr3Pdq8brCmrS6sTdjfeXd5/77WeII3usfSTiaTGi8uFaBBVFJNGmBQRRMDKANxy1KBTjA/j9SWlTrTLjJuKD4hiQ0NWRJQT2MYBSRACTcb+gfmIajMhhqK1CQqV05NjMk8XOyn6xU0+cskldeTM9+5leGaQjh+nZ3LTbISCIRATCwgKKSKb3OoiIECMrItIQfCCiqXQVRCQGF0EjAlEIAacZBEJEYRZh0EYbbV2oWABiFCSApDcsIu8Jc/RSFn4wqoLHq+v7deU6WWqSrFeUk5oVAk8HOUppQlLIgSWyUkjAMQoioUKORIpAUe3Zhfrs9v76cIwEy3vZXJqnfnzHiUONhmo2W73B0DYbkdVwMuRNFo9QjQ900mrY90ENC6klalSjcnwkX2w2sCjGJOxaoe8KX4sErZM0zZsNC+081Qqu7r9umjH3MTrPCWtU0EzzxBSVrzju9MdV7ZUyiWmIyuYWl1OdN7P22miYsSNQVhtAo0DNNLsj3lfM2lJk8aNJrxe1hkk5mpTjUNdI5JwD9oSQNnJfliTRCUIMFjvbu5Nup6WybKaZXo9HppxlnB6MwTWkj1x3hKEII6pr0rko0cXJwBEpSARZjNVKUFTW0o0a4SvfHH3pmS80f/krZijBWcJWgCjEzgc9LedFBpiKgIiUwqjqodRDrgZic2U7VqWRQ0DDgIJKrn1+U24RwNQ1BtfrNzzt41zvFE0LRwJIwiDXFWjTHtIUv85TttE0vgERpOtpElxDDAEDsEytZkAAfG0jdr2kJH+JRLrOI7p+mQJdm5jJteNDEABCQsQpwHoqUJtyoVgAFJJCAYwCCKIApp+OABECMGjSERiBEZFAJOA1qDWCBBYWxYA+GpJWK20bbut4cClJYNJIQpomJDHGotUxMUqoOUpQoFMDAroqeDAYOifouRa1P6yj1yWLAVUVDmqnOnZYuItXw7isVuZNI7PDSblW2DOvwNpFVRbGsxUUhUxKGKKLUSsSACDxAggRNHEEBBJmANZKTf8vaWuzNCtcASLBh+BZIQiwUqiNIhUTKjtpzHObQqx9GFRJWVultCYFXoKvVrqUZiARQbxWkRow08FMdCp6pRXedE9u076QQiVWS1U7VYuy6EIUC62l+M679QuvVDvblrRut7L/rVU0N5f2t/aLOugSE229J94PJss5+FgRpDTe2+8N95rthd5OYcAuzcwzzQz7vXFdn7jt1s4OsfBk0J9dmLvl1gf+7E8eYaq/76+85bkvvyxrw/mo9t1oYbmlctw8tX7zTbc8+sjX5w8fFaFJyfWI77v7LRd21++9efGrj3/jgTtvnuzt5G27tNC+cvaK6DjanwwH4/3B6MZjq8P90S233fHa6c3HXrv0/Kvn1q+uH+tKMwv/6e++Z7kxOXm0MZr0LAznlrfPPnH+4mSpbK18+G/cTvDcuJEu39PtRzPZnly6EGp3EFU9dyDXrbrbSnTa+c0/OvuNN1xKzW7ecIX+6X97WX+E2KmyBKAIyii9oDTUipg9gwMRdtLqtIra3XTDg689+/nKmbve/NNXTn9qa+OcyhszK4e2Lp8ajzeefubz97/lrc8/9bVmS2OeHrvpbc898pUwLoUgb5NzoTO/EGpXl+PZuUO7w8kv/K0Pfs/7Pj2+MoBqYelQ8pb7G//z82fyo4fnV2565cXTCtgVNWhlEiV+2mgJmpTKFIASIF8HrUmuFfwQkUhpBFAgwhEUkhZSsa7YkPaeEYkhkgZFgBRMYpSi//jpJ2pvhBKrIQnVHSe7Jw7pI6vNpZWDJsnrzaG1UTKaRAoe0lYisc4bqbLUH4x1GrI8bTk/GZYxKq11BfVMo8mu2r5yZW9v5567jx4+Sade3zp/YScKMBOS9i5ca3eKaKIw2RdQIMo2WzrJvQcUIx7FIgiX431tmsYC+2gptZQEBGw0d8b88On5u07Nzc+Uh+6+cX+dev2WeAzVwLS7dahgePnt37t69YVXv/3Nc8bd818/s3769EiTjKZRkUZNiiRaa6ZZOiIgkrFWkVLKArD3Hgg58vQ2wjnPIhzZOc8gcWqrFAzOC7PWhhk4MJISgBC9up6IJ0nGAMxTjH8gmNKuGVElNpVrdDwR4cjRKB0iV+zSNCXRGjHVWqESwQjiQgAEZlRKT3k9UWJdO9QqRs5VRgrqUCRJOt1fa2sya3f3tknEQIaKkGjaOo3T+iUhAcaIWuksTRJrQ4hFUQADB07SxFW1NSZNcwGZik6AATUBkPMBEa1Joy8FIMTIEkVECRpSlrRSwBwhhtSmwuBCKOs6CooLRIoAiZRJmz4KR+9cUMYyc2AwiqJIYkzg6INDSrTWMYYokiQWErAmBaIp2DezRkRiiBAFhKN3gBI5FHXtnGeW0vuqcnVVBeC68i56Mhq1YuYoQtaCAopgJGrSGokQrU2UQh9FdxKFTEAICMKoCKJUrgYljSwlUqOyaGZ5rhMWqIOKEmUawWmdKj0qClL59mCQmyS3zD7q1Ij4vcG4PxyNiqL2XilIbGLzdHE2TQkCep1YABjHyAwsUpcOGYiqCFEEkFEhWpMaZVA4S02TKEQOIUZGVjiMMi6DG7uy9oFJkMY1O+Zx6asYjbYKrp2hWUmaleldgjNv+DeeA4T+m98zk7Wr+959+8U37H/+O6/3LkI7m6u4jgqsImtz0uQDp4YUAYpnZgBOrQLAEGIjS2LgxdXOEC+5EjU1RqO0nDSU7Z44srC5vXXo+HxdDBsNWD40f7ko9bxLmpYAWApiY7VCBVEFk9vheL+RUKJsrzeeP3CIGVOHb3zzdKRk6Yb21v42j4IXTpUna+Do0j/+d//m6Uce3vvmSzeY/ZuXZ249vvrQn33ly59/bbaTZClCBdFkr2z71zfKXqCxyirAkiMbZhaDMNuyrVwLACOSwlDFSe1rz0BY1ewZrAZARQmSRiCcm58djau93SHGEHzkugbBte1ed6adpe0jh1fXrp4nZKNVMam77SzE4EpRyoi2k0lIrfn8Z588evsPjjev8B5C2l09iDccu/eRhx7a3BkpqGZmyRW+kyek8nGtvvI0f1+nuzDfO3MWH/3yYH0tYZbomQxNxj7T8s775//9P3nnpNx68cWrvXW3V+PrV8dlFUOENNdgKqUyCTbL/J1vnr/zgeRtrmkWFo/d8Ja5f3Pun//nV5OmNU1ViVTjyXve947JcGfr8hVf9P6ff/+R0eZeZ3V+r0xffPXKZ37u//7o//xj8ObQXBaHu3/1Q99/tZj03Gvf+ORTNsmSLNu8tJE3yRrtCx5Pqtzqje2J3x6kZmjj6O/8jYP3/c2bAK7wmKhK/i+4/d9+bP3FU9vdxdVmq1NO9vNWczTZ96EKwXY7zWYzz5rtUb/vazxw9MiXv/L6xee/+Q/+yX2zB8nedFdRjHJ7vmteLU/9BuVlcuhHAtxgwpVYnlGt3Scee/6jn3tmaWXp8PH0lVd2OKjoxeZp7eq3f9sd1Np87dknplfBxdOntq9ePXrLjXsgqtPd2hytv3L66umnb3pHsnzbrcNhrMth2waFMtzdNSakrZn7P/Cjc8tLYlOW/Lvu/tZi58yLj/3F+TfOHTi+ZGdSO9tIZledz0fD6sLrV3fXLkzi/i1vv3dna4zbcu7Fs7PdhpFmXQqg4ejytLU4k0QQQMlTE6IHQKutNTmIlGVltSn9uJHklY/AIW0lStskabEuq0mhjDLoG3mzLCrDXavTwd6I0DkXoodaJqUbA4XZ2ZlJf7Qwv5LlyWTslOdDyx0X8eLakJlDInWAta3+zTccPb87fPPd33Lmqa85iF/4xH/L02RusSuoFtv5h//xH7z2/D8a9/e0THCoN3duePy1Xnc1PXCos/XaaK1XWaDhiBut+PTzr+cz7c1hubsz+tFbHzBPbn74r73zF37O7zz9O/Pzh1x/N8HBX/uhu1I6+c9//1x/gDPtRqs99/kvPPW+9z+Yxurx18/OH5pXaNNeYzadFQjFaBjrweaVXWX1Y4+/rJX2UUijSCSgZpaA0HhSalSISJYEEJFiBB+DH4yR8k1rnzo9vHOVZ01WS3Zm26crC5c3tti75ZUFDbq3M7nlpiMQ46VTVy6dORcDTFy85eY7JjtXN3d328300lbfIJEPS4szsweWW242ijl2/MCFC5sX1/oHD87l7cb6Ri/RVhkYjorWSuPAzYcKiFln/urOa/XVi4vLi2/7zre99Jr7xuMvHF1WCzOt/noPjA3t7LELaw/0B0ePrRokxCPRuYOrjd318dLKsk2wgogy2bp4djiRtLGQzd1w8i03tub01cuXf/DH3veR//AfVKPz1OtndoblRKgfqj/5+leOH7v1C3/x5ZOrh9HMbG4MTKJnVnKDaQU8GMS66DcbxNcrBEJUuaCNBWSlNDMGF8q9rctXXzl+8kD/iauqrUjTcHcoGLXSZeVIaYxThK6QwmmbIEzZjoSKiAODILIymUpb1b1vvSn653Jqu/43nAFs7oINZD0wxBi1Td58/5Gnnlv5yqOXKBLh9J+PY2SjFTAHAG0gsjhfTavWRhsk1oojRObAIkligCH4aKxGEdKao0h0ITAwKEJNJIAxRmQsnBNEUjpGDi5qDYJYQ/tnfvCXP/bpX7syOi9BNJlG0hxO+lmSeOdiiHgdF0yKOEaYsl9gitcVFFBELAzAIKBQxciIAhGA2SQ6CjJHIILIgshBNOnrUQQAIwJqrUKMUjsgRBEk8i7WZam1KoqJUaiQnJdLawMFSAZ95fPUHl06dEjry+trw9FYmFGTAHIUjjB1SHkfpl33EBkFUYCAFBBHNilyqJ0LeSfZmwwH4yLVyf65qytLrRu03HbbkW8+dcZHqinkKc40s9X5+b3BQGtTlsa5IjE410mVzinqsoiJnek2F/T+7vbuxbLoi4u33rp6/IYj/c2Ny5d72Mhjy4Y4nllaLMelFyeiNive69dVVRUx1JVXgDesLLWbncRif1wmNK3mx6yVZe0kulprSqTmAFzUppmCtnvrewHBRT0sSucmHIOXIBEBNQcvIJ7IKD2qXJZaS6qo3eHjxwPBaDTJ0zb8r28iMh1c4XX087U4hgQEr7niCRUSuSoMR2Wic6OVDxEYBFATCWNT6UAt76Jz05QACfwUAW2NQhBhFAQAFCJA1AaRQSEIK6mgdlIXbBqUNBKTcyQfFYsCRCZEwAgASFqm8B4GBECeRkUi131tMu0dTQPNKScbroOCRISuo6un3Z/pX2u6OOPp75I4/WLA9Mb/urlMrsORrrOLRACnHFYBAOTpi8G1D34tTSJERMDrlGqYitUIp3/oFD4EAAoIJSoQItZISEwIU2VbFIkoiGKQFYEwsIvgo2KvWFSM7YbVGFMdFhoqUa5NVasVGzkZFY0GspZ9aS2mWk3GbqGrQk3BSQiqPyqSdr61FV+7GHYGUntiEBnV29uys7GXWo3G7vW1r/NmSgzgvJUk46JWjKiDErmGkCchAFT62kjxL//7KMJrq1ya5mo+BK00RHR10EorkmZCtYvCHIJAFHD1Uqv6wIP65hMyOxObictzeuU19+mvFaeuJFFyVNYoaXSYuSDBGJgRYwWZkeUmVjEe61RJ2zNBXUQEneRGGQYnDDmBCnUky8srxXvuTS58I1wd18288b9FRYSxu9CY78xvXlrnqJFSgEiKMpukti0g1WgUPCHlgjIYjJpdu7ZxGdxooZvtbV+ZVOXc3Pz+/rAaBOPq93/w/ucfe+ZrX3z6vnfeff7xS4dm2rvPvX7l5auzDZPa7LFvPPm+b/3Axa3LN59oJpS++OL5/d1zYdB7/bX1JMju1b2G1eB5f3eStlsG/aHF9vpesnrLbZtX9rccvfL0qavnTnvw3/rWmxebKz/43oMpbA9H5xW4vd3SNLKtrerq1viu9x1dquaj9YPLL+SZ37tc3nPPseWl9Hy/cAFPHl7Y2Fu79Oq5YRsXbrzxS4+YX/qvr6R2ybAMxhV7UNSMntjFzBogRAJmFpQQQhBRiSYt9WjM0ZtkdPXKQ1k3Vvtqe+/SoCg1ZUH0aDRKTKKkvvDa4/vzCzMry6PeaH72UOnc3KFjXI1BcVVXpeOw29PgRPF+74IG+aPf+Fc/ft/syo3z5Ygr6i2vzoKLd918i27OP/fUC1rzNI5w9YSUCQ4RlXNiEgMEyGBSjQAxRkQKQSxQiNOOA5JC4ODKkCeSaUuEmCMZVVSx4tpNCmBsrbTHk5LZJElaB06beONS+7bbO0eW8ziu5poL+eqxQToa7ly84YYbz144k6nQzGyjNTfojfNms5m2xqNamwboUsd9VInS0jwws7Uz2t6qQXfKqCVvHjxxcPmme46dOr9++cre/qQm7M53Gt3Gqy+ccUMPDAiEMRrLodwXGYHOVN5A3Y7RSQwACIZi9BLZSQy+1JwYbSe136zzf/6re189om67q3ztlavn1vaSpPz7f/ttN9+bLx9N+oOL0dp8dWXv0vlve9viTas3fu4r65/90qnd6fgmxhCiCBNIaowPMcYYRBJrtVKBg9akrKp88D6IoFAgpZg9ME1fAwLHadolIEhIgEopkumENtbBJ9qGGKyygDAlx4QYjdYALPHayyKLECGgChB9qDCyJo0AqU2tSoRIKyLm4Jwg1s4zYOAAzGg0C9TOIYCmKXEpsNI+xMTmhAqwRhQOjn3MrC3LUsQZA9Ezs2hlrFFilLEGhVBQKYMgMTiOzBKJ1PSFPUlSBFRKAYJzYbrR81VwPpIgS/TgUASINJJOrTATQKI1hzhlthEho0QMSKK0EhAfwAJIZKu1VgqB0WjVtKI1R68UEaAmpRFCcIJCRAIIoBA4RiZEZQwLWG0QQSt0zmtjQGIIIQTnOU7KcjypBaRyPkRxzk3/mZhQKGXSBGCJstSSMYAskRFEKeV9NFYDg9Iwl1lXR0WkkAAIUQBVHWprVJJYo0kTJXkzchzWtfPRcTSESZIqEOd9ZE6MCrGKEYR9YI/M5cT76Kq6HFelAw8aWq0syYwIlCCAoLSCKFw7ACzqWAuAAqWUCLuqaiV5w9osyzVqjsFoBQSRxbOMQxxOwrjyjnlURmZBjRHI+VDWAZViUEF4aqmbfuu2DE7GyuimUji0k0Kf+2b1rT/xwFN/svvYly7b0makvdQ11+zFkG62LKJqt5Pd3f3IKDEixGkV+NptBChXuXe//WT7QKz3Bgu4+uRDr+aSdJsqoTptxNzGsTPjrfFwWB266UB7WbF3E46+LlNARRZIYgxVMYYYy4K9L5vN1mRrfwb5gTfd8oKuzwwmpU8DQPfIXLJy4/d95wcOzS1ub168+KU/vake3fyWmcMz3T/8b3/x8Bef7g2ioFZaMm28tU+80RuhHgFNBEqRCBIwtlvaV3H1QLdl06XlucGgCAIHF2a21tdEaHF5fnOvj6Qub/T7/SIERo1BZG+nFwVqjp79jYeP9AZD56sYY9ZMi7oaT+o6DhG90tpzcNFRQI0KkINg02SVAIMU/fF9t930md//j5evDv77H37mxYd/nc0XfuyDqytL78uo1+s/89zjz6wPzPZYKUuPPlu00vwHPnDw8uvbWwOzNKNqUmsXRhQRCMchOAwvv/Di8iHVPUAPfvBth89mX/yx37JJWxvSihKBZ7/45ANvaYDD81f7k7E79G135isGquq1V9a0Vh4IQYpCGNNLl+wrr57+7Gf+Hy6v/M0P/99JG3fObP3iT/30D3/gweFO9g//xs9O1vfvPnnsxHJreXXpV37td+YtLDZmBs7Xo0FuSUVm8KEOjSTJQJYz3LfjucP5/OKJ+/5Kdzx+TcMe8by4yaEFd/NC+drZYMi50eagN+YEAsW0071p9e7zrz+rrSqcU8iTkgVhuyfz+dLnP37m8MLVo99yvBg87nef23v1lc4CJLc3Kkj9hAgy1Wofuvfm/MUNKovL+0OwkwqYEcT5EGoX47/4tx/71X//7oMnjk6vgkkvUpkMr1amkR67q71+ebPRTcY7vbB5aUOGWaN5dLX7/CNfLQbDnfULb3rXW/p9c+Tk7aqRb+5MVpaz2o062eH7vu8nJxuvXX79qa0Lry0t3khlmejsyI1HqvUam23eG0z2wuNPX/zQD/3SYjP75pf+m8ltWYaiCMCxGI9Qpa4Wm1jQ3sWQZGn0LIqmZhlFtmVbucn7vsfk807uQyhDpQVFRJOxmbZWAcq0ytFq5CEQgorMMURXjr2v87yxduWNQdVCk843DoTALCHLaa7jgCpXggAVLiwtLvRcevXK+tyRIzu7PUZgjJOiVppclMkk/ON/9pn/+A++NfYu723PfejnPobUuXBu9+rGfjGZ+N1JA9Pf/+Vf+3f/7SMuj7uDwXAcpKE/9/jn3nT7XLX+1Y1Hy2M3LuDMgbKXFnUCu1vvvg8O3/zW//yJS0Vs3H/77UcvzvRG/fWr64cWlrYGoTvTOHVl47Z2o9FpDnq97b1e4UN/GP7iLx7ViiIDe0GtIpCAEolaTUv8ohQhkgQuQ5lnqdKmDr72sbe5eeCtb737zvaZi6O11y/vrO+MirqZ6vHI581Gs9VM8mRzY3Piq6RrZ/PFan17Y+d0KmSVdFrt40mmtQVQtajaxVDF/b3h8vzK2vZeK2tO2G+evXpwvuvGw7vefHdZhsHe3qkL51rz8/OtubqWlg0p5k8/9uqgyA7MHXD1pD+oc61Hpau1KkbxE596+Me+7X7dIh+xqCOPC8eq2ZpFdvnCShMbG1u7MysrgfDY/M29cX3h7PrNhw+8+LUXGoWXJAwmFWo9Q6YQ/M0/+8oPv+/upfaBJG2n2kaOaJUCjlKURT0ROn/h4m03HFpYuPaQHDwriYisjCJNrg5aI0b//DefWHzvg3fe97ZHn31CQTG3OJM0m7sbPZ3EoiyZAYEFRfia75oAgBSAxMgAaI0iQ7YhS4dwbefrTbl84qbj48sbjVl2/RfSuUVjF8FzVdcQA+Ha+967+PjzZwebSkAJMyNFYas1Cnp2noGDkFKkVe3q6D2EGEOYPpEFN71vEWtwUjitSSsdADhEjgzXqyLT1kQMEVCmizIQFBQiCYiD0c6v/NeflcBZmitD5aTqVzHLmjONxc39S9NGQIiBY4h8zSkF000NAQCzhGmkICwQAYiU1jGyMUmSpM6XSCIhIpHSNIXM8BQ+w6KNRoAQGQCRUGulVVLVxRRDJNMzDBc9i0xdMEoxgw6k0fQn8sK5ixC9QpxpNOZbigyOinHlY2RwXpQmUACkoo9akybtQyQAEhaUUE5yK28+sjgzl1flqArYHxVFOe4Nq4thRL6ESopxVZHnpnjkwBwn9cEDy32pQ9svH+yKrwmVMgQajLaa6dBMunj/KumklSjN47rsu+hzsXUBa6NR15JmFSplSCe5LcJkc1SPBhOTZXOdmRZhxpSxjItobV6OimpQJ83MpzojRQIagFG2tnaKwpdcUarJmHIyGldQlJNQliGESXQTH7TOUy2AmHJMrfKgVGoZoGEMNSxqq/L2wqEDf5kHXSsV/SVmRwSBrrVlEIRRJLIgMMUSBtsTiyZ6xx5Y0EUXIxilroF7mKZwoDAdDbAgkogQCwtrrfI8t4q0RuEYAwfnk1yHIM4zM4nHYtdLabClYwKSsqQCSoQFp3ftEIGQ/1JwL9fSGYRr87lpJjP9q1xT0k9jHhSYasDgOquIGYEAmIEERBAZRPCa+J5FaBon4TUg9nWq9BSILdcQ4Ne+ZnAtroJr+c//6jOJAIhCgWtCuWvsJAa5pk5kERJAJgAUIZTpQb5RoJATYqV8ooPioDwgaCIhEBPLdqKMxFQ7Q6iI203dyGKSSbvF1oDSAAIBTQzY1KBNSLpKJ16jQmEkJWLGziU6+fqzw1GZcWRgASQC3CmAOSJUSkAAxyXEKIm16JijAAIrCDzN4lAElaZrzUEAwGnYC8DCEgHo2mAFhLQ2WmuUzNatvGynYm058dQb02CiA1hN0u34d72zdcPtDEnQTNaVR46o+QX63U/WZ64aHw2Sa82C1sJRCIAdkkCnhStdGUf3/gcapCakQBuDIRJMFMRwrXBijVbOjbKW3HlrdeMFj4NmK4f/LSqaDOs00+yKTpNSa9DO1JO9UI+irxqtxmgwAvYzM/OkE1dsjAf9NGnONZqQmkYTxsNRANBGZlJzyz33PP/6+dAMdrDVnU8txdkZ3W7PXr2cBOSyHGWaPvTdD2xtnjo4n4Z6J2nP333XyuX1jcQ7yrKjNx7d3N407VaWqavra6lWKzPZ2vm11y4VO8P1wXDyxuWtt7114W/+4l2HZ0BtXwX2g/Pf2ImDZsNORLTCcW/SmFnY2UnPv15fOvVK2+Q3nmxk83PNzcloa39/h7ttJc2yhnOdhfqO2eXx/qGf/Dsvv7KdZ/awVqjII4lY5R2TRNGRDE87pEJAKCiglQakULskyYgkyajfW2/MLx6+/U2XX3r2Td/y/ade/DS7XdevEBkRjYlF0Z9tLzYyg+Px2tXTTpKqcK08SCjzVBMIc7SpiRDQhWa+emWnEwZXZ+bT2KDZI/ix3/+uX/8fFx5++GUDQMRJbuo6oEkCK7C5tkaqUjBCjNFHMjoyExKRJJaYHSEorQTY1d4YpbSOdWRmRcTKx7rstMfzXXnf2x545dTk2ZevFoVTScZltZDSPbfNLc4HKYd1iYuLc6OipEivn399IQsXL7zqaxclRDFFUeokUUqVVaWUdl5CHW0yV1cRtfYmo6ZuSljbHmbNWdNo7W1FrXyzld5y27HcWkeUNvLZhe7hhc43Hn1ehCdF4GnNto4CXiEr79lHa1IfAFDETwTAmFwAjLHCPoZx0rCBySczf35m/5OnX21mSfBJGvRP/8rj7711/r/+p//P1dev5sdmE1+CmUzqK4cOqp//cOOeuxa+9xd3phmN99ForTQhTSWFrJQSxAigNBmdlGVR1G5SFFoZjhE1RWGjrAIK7I3SgZljUKRFOLAQmcwYJGDvEAlFUmsRUSIDIkBEiYFZQJxzNE0Bg4BSdayBFBCSMRBEKzOtW3vvfUQAZg5EELh2tQvMSaIBBJWehhoA5NkbY4J4IjXtWhtFWuuIFGIIkdM8tyZBEpIIAkYrrcw0w9JaT9/AqtrVdQWIVmulFJByzitSAFC5WkRCZJNoY0ysXfC1RqUNaTV9w6LEKKW1cIwxBI4xhBC81jq6YCVkSQZEjOS9V0orbUkhsBcRo1ARmUQDKpUaEXGuJmREtNoAWgQhQlKklA0x+ijCbI1FUjEGFFCAwuzY94fDYlIySOXcuKiVQhFAUkmWCIBGSqwFMkZpEMEgSmlmJg2AULnauaAUInAdPbEQRKNsYlS32aq9G4+LwAGBFQFIZB89iCb2kSsXrDZGm4QIRYzVIXoi0qQYoQ71aDjp+UiKQojKoBB54SRJUSghYwEBoi+cGEq7mVVMAOxZGanLsvZOUGkyqbICSpAAwUUffayYvY+uDpPa744m/UldVD5vJKCUCKBHJJzuFCvnKx+nh0Z8fZn/Hd974OnPX3Kc13slcN1OMIv25YfWX355C+pGlgHp6CJrRcoSAQljROgXFRISiGepC5+kpq7ZON9MrdSh3x+8eubZB8ojs0eT+ZA+O6hVY9YHaSYasuzK9v5i86BOmjPtoBNdT4KwqKyJlHHIIpFgYPSKOTVaM6YudJurnbk55eL6K71bbr3z9jsO7rnm8ftuu/nOI+UI9B7vv/r1I/F808X5TuPhP3vsof3+MOjL23XWsp1mNqz8+SLsD/3EmgFjhegii5JON29kaT1x3bZanmlgGXBYdglqF/fXtjtKMdh6zG2bNzPTVHq3Md7enwj4YRFIa9CUajk+u/gD73jXb/3h/zRWu6rOm3lZRk9RglcoWhOSxghKJSAqRG8MlUXlXLRN6izY3/xPv3THSfieH/mFf/n3P/DUTY8Pt3//u//++yLMTCpM03v/Ghz1g8mlb2595QuvXF4zT74ytEbFONfoTK5e2LvvrhtPb42Gw6iMzq1ZnZt9+Mun55dSuzT7+Nce+cbpaqbV3OvXidJ+WCy04+KCYinGwzAZwKiHW6f68QX+7Mdffuz5RruZqFba2ykOHmgXY5lpZD/+vd/2m7/+K6QAs5hkakbTbd96z7f8yLd/9u/+p4VOp96fPPbYC90PvllCdXxprh4N9obDEClp2EZLuzoQqe5MmuQp+Ni7eOXN77vpWz/47Yfvfluv9zvRdRq6PTqXNLp49C3Zr9x/78sf/NjlrV5q4lLbhsqRWDeWwd6mVbK3s93uzEbnmqlixeO62AF74/233n4QX/v6R1dOdFt3VtGCuXm14tuKckWrJdRY7V2AMhxfmEuqgUmbW72JVSCK0aJROvrklYvV44+Mbjp+bYB27Jbjw+2Zi2dOD/b3Tp7Izj36jM7V/e86ceXy40v22NqrW2svO89VXRTtphruXc0XFnb6/WMri4cand3t/SQz0SgjdOzkHZmVhQMH2o2Fordx5twrt7/tJp3hXW/71r2dray18sM/82Ndbr3x6sM7m7urNx1MsoCoBHVRFs3EAFTW6sL1O82uUopZnK9JgU0txxoJJmHoeNLMZ0UsYWxmnaqaJFmDMQYRN6lYYrPRGPUnzMGmmmyj4hpj7GDT1rbRtEtHju64oKWZNJZN0QepR6MSQZqWdofOCLUMvfHGG6Oq2i28zSyRPnToGALsblwiJVYhBfvxP35udKk8c2r45Nr+g4dX/vGHDu5vnt8dxytb41tu7nYEJ8/962PVxVODriu4kTZuu+0Yur2P/L3/4/nHP5lkO1XcLHe2E5rtdGe0miCWq2b8Mx86vjHp/On/eKSeRG9MXdeU6uMnbr66tqZZ+9oMeuPnXzn95BPPZYuttd7QJBZQQBiQEmuYmb1zdQAGJoHAhtCXvtlqEAaMSFE6DbOwNHf3LSf39zee++blVndmtWXnTxx79cLG+977bqn7L7z4eu3LF186rxGcxQfedGL74n5VT/q90WyzobQebvU5VQdvPtTfG5UDt3Hl6lwj6aBvzebFaBICgdXKUtq097/rrgun13e391JDqF0N5eZk6867b53XnVFlVg/e2Jj4lk4efejj99x17MKFSxsDt1MHxmSjCp3Vm/d2Tg37k1D6gsQ2Z9+4sKXShnad5bzZ6lC71dy4sh8N3XHfXafOXC1d2VCdtMpmj7WP3X7Tn33p+UQjeZgUcX1rb44yIPJIQgIMm1v7flJSog4sd7L8FvaTK+cvXztCJgLRgMLT4qpSrq61VQ1DL71+9jve/6E33U5PP/Lnkvo4mbRaGQOoRA+HQ+KpH4m8d0oDkYrMSl07RjeWog8Y1dLcwd2NQWth0cU3Jas/EGA3ujETBTyprVH+lFIjwL2Dy/bum2c2Ngcg08+ZjVbRS4gBiUKI0wdeEtBE15TZwCColLIWmQOz2CT9G7/wf7zx8guPPfY1pchzVFrLNYIiMDMAEKFSKjIIC0/fmiMjSG6NkE4aytURIhOiBjQkw3ILJKAIkoYgiBRjNFoRqRCZFAIwCytS03M1RCFLzEJClbcxdikL2ri6qiTGaw/dIojCwnqKeSLFICQYA9Si886xmUZnbfMl4JpQEND5gAI8pd0CE0FEEI4BkACDE45ILD2uU82zncZCu2UouuBHk8oFqIV94IAkpJ2XacIXOCISITLI2NdHGzOriwlBXL+y57QmG7AYDXuq052vA8aq7u309kB18qRp81dfOZs3W/Mzs73NnW4rzxraWmszC0RpkgU3SZUorYnYB+kuzzvWDT9KTBdKF4eD3f3heDi2NjUEBrLZhk6wBWgUxENzbecnu/u9SgCp0MiLi/M607XzQ+bohZmnJHHHcbc/SZrNMK7L0k1qTtIME7U33CiDH7sQYsw0IqJJoyVfM8xW3M1Mp9kpesNCTDY7d+7U2Wsph/wlQWeqhieafj9FO6Pg1BFPFAJFD1FS0opAah+0NoG9MgYVBh9YQJlrYy5kBGZAQEIA8iFqrUBUI21piMPhSFt15MTBhPHsuUvKIAURERRCVLGQmgUtRcPQVKZhI4IxHJSHGFCxIE8JQ0IkQlOJGcKUJI1wfYw2DWaut4eua+1xSna41jnCaxHOdE8K08RDeNonmiLI+HrwRAgyjX/w+jxvGrZNIyNCZJ5isq/nRABIf2lBu1bOujZEAxEIAALEgITI0+eO69ikaJBT5ZummkuqhRRW58AXMByBRCCC+S5YzVkKEJGjkIZGG2yus4bV6ABq26R6QuubsttLBgOZ6XJnzuZdbrSkGE2A0I9o7XT8+jPNS3uZY6sJEANOedkCShFM1XIIolFpCByEIc30lCng68giRqvI6AWCB1JEipj52otUjKSUj2E64wierUlM7Zca7nhn/M53qdvv7qQZF6WcPidffqp86TJGNrcupauNkKPzNRFqEau0u/kGeP/9UI/D9pA7LcxbEKtCkQLMQklYGaPgpqOEGc8tRkzBTYBQC1OMGskLeUItyEp5rELlpCiAKKQJbW0P/reoqDvbHPfKyC4UkwjISVLF1Be1Vc3t3UGrkQxHk72rb9xy62FFzeWlzmAw0t1kNOn3q5FKiME+9erGux+895MPPXT78dsvvn7VJrR9ZT88fanL8cL2aZ9Q2kgbKs3FlaOthTnK55vLKzf11vZ3h5O7lhc2dlU19K8+99pwXEFYvPWmNw23S5XaNy7vYLv9+rDeu7CT5+5v/+jd3/cdyULrcoN7Z0+fXTp505l+3W43brlzbm2nv7gyf+aFK2sXLqtGUg8oz+rOLJg2VxzqSeVGWMZqpzdqNbNGTjYLi8uHPvXZ4tzWYsMo0rF2lSjR2qTWRqicZ0KMoBlYWIQUavK+JmatLGKICGmii1FF0YTdetR/XfPe1qnPYb1fV+PWzGKSJOV4VyfUmen2NreUhGFZQVTf+63vWDl53+/+wUdlElvNmd7+EGxy+OCRzc0Nm5kLV0a/89/XPvIr92dm09VlrsvDR5JyuJGbhpMQHEx8lSRKWVv3q+B9xQRaNVNErYxWLkSlAIkkMkvEawdpQQMmRiOSMKiEQs2TYnTjqn/vA+b/+LvfQ6x/9/deefgb502aJVmmjM3acPuJmZZxSzNNz7KwcPDcxUupxkZJy92OqffLQT/JxDZVI88Kpy01DQUwQ7K2vz8xmtNW3u2kvVEPVdrKKLMCTDHSysLipfWd2fbcTrnb3x9wqhcWl6rhcOQGbRO/8723txZN7XBmpnvp/PqVS5unTm3t79WKGUKMMaLS1mSRI7AIRl/VShud2BhFp1lwcb8saoC5A3MyLoejMZCOofHV1yc//rf+OE00N/v/8qeWGyvbr57ab2et+Tzcf98SwA4AxMDW6jSxMQSrDVpVhwAAdQiAZJQZlpNxWfrAaZrwX1oiBRGJRaYICZFwTSMJCDAdrIuwsLD3bI0o0CJgjRFBgRg9u9pFEQRQ1kwFapF5mmGTMYoMUozeq8T64Jz3iBhiDDFUVem9CyFyZGW0sbbVapPSkX1kB4iWEkFkCKRRAsTIgSWEQABpakU4ciAGImWMmY7QmKeeBVAEgIprIGOcCxK8aNaWFIGauu1C0EaDhFhHcTUK51p7XzZMR1vLkWFqbXLOhygikT0hKlIAmKUNFhDG2pVRiJASqyJHBEACFxwpZRTlViOqui4RkDhydEDEDEobAXHBaW00AEq0Goy2hOCjyxIdOBST8bhwg0lRu9q5GDkEz8qoRGultDYmSxMkSgmbaTKpAxIIQF15QvEucOQ6OBdi5b2rQ5paJIoh1oCsQJB7E0+IIbqqchHYaOQaafpmLEJKNWySp4nNcqOk8s77UhMorYtxiYShdnVRVN5PB2V5birnQ4g2SbQC53wMhBgiB51pD76hoJ0ZBql8KOtyp7/bztqdRrPdTvJURefHNSPp2of9gSOlxpNqb1iOJ0UIglqHwgcIrTw3pFGpVBsdokZnMHiMQBSuC5Lf88H2zYeOfvGhjbklvTjXfvsP3PPMV089/tSVdrs57kEZAjEqoDxPUCf1uKgAFegoPk2Nd8GQYJookqilPZMTe5bR297ffeAHFl556cLyakN3zcLq/D233VIl4dSF54r+2IPq+5C3GFIh8hx8DM4kFgAFnKKEyQrWBKCsrXcG3XjoUOM9W1cvt1pJ5/Bt7/7ZDzXTOXGm3+t36uJTH/mPN1j/s7/y92DdPPzbn/kvX74gqLjd2Kjr2NQT4MTqi72q7xxozYnaGfl2Nzu5On/01pUbT6zEgdveLgbD4vLra22W7lyjLAdZEvcn3G3NhsoNx5OGyUM1NOJvPZgtd/Os1Xrh1Qug0CEOJ/HQ0sozz3zdaIkKjU4i0/LB7s6gSAi8ryZF3WrmIGowGLeyZppYRSjMQXkiDB6WDmb/4J/+0kf/07/62R99z4/9/I2q+QMBLgdwkNoINQKbTnrDd9x58jvuryfu1S888tITl06vwevbpVPqQx/89kde+B+TKgQXPfDJ2w5++3tvePKLX27Nt7dDc7jxRlUFH8AqdFD97C++/5bv7EW3e+iO1QM9t/HE+ctniocfm5y5kEKSRh/6O2OuBFzUzMneus7hhUeeg/ns0NLcxA13Q3nsyD2/92uf/MhH/sHy0RuGPf8f/9VHvvHK6Rf/4M9uOjxvtJbokcW7UIWoSNXeJzMzjW7a39qflPHQ7d9hs8Nbrw5OdmbtyiG0dbkijn10VWrHf/39Bz/yic2kPTu7vHjh4v5tb/mWI93ulx76OHo/NzdD2g7LidHQsJaU2dne/4s/2uA7h4fetIy9gd/aUm0wcK+nxSTmiZqgv5q0BiLbiyfw5//6gSfO4sNPu1xRp5mfu7AHWiWZqnz2bz/6jY/+u5+aXgVra5udmY7pNg/Pdy6duuz2xjKJF9b3B9UIabBxdj0GV8XJidtub7fabzz1zMotadqq22Zup7d34PBqo5OPB+P1K3vto4sB8hvufufmhusPNyxpv7V76rmHx+XkXT/0C9sbV1uLh4qdK648e++33Hb+/BUOinQDBRtpNwLUcZzqpKVngg9G6yjepCnHcqazNB7uV+NSpbqRd2JgjiUhcSx8mGhSjaxVFhWo1EfnvDImd76SGOuqVMST8RC8E0LP2G7PSlUnZHd754i9SSwPJ+1W1snVuHCIen/AVekRxBqFkUFcb3et3WzXZaU1+IipgW4zfeS19QDd41Z/7E++n+BLMys3aFwa9SdJM5WyrEb+gz934/Z+45/96jf+4o1iNDHUPPKpR8889uVnjx7oHr+pbet+rD2YZtKySLkM4varZ6r08MHu/LPbl6ChPMizz7104fLFW2+/5ezF9adffuLKlatJ3h5WsR6USaZCDAgIhFMzs1ZYVpVWRmmjEJVV7CXTdmW5a9JcKUV1+WMf+v6LZ8888eyLrjfRZThx8/LK4fmFleUjhw489fUvOcHdzcnMfKM9M9NtZ73ar6we0s6uHFqY6aTlqDiwOhf2e+e21ouNZtps90O9fvli98TRy1fOntve6WZZO9UFwB333lQMqv217ZW57s3HF9cvbrz1lntfv7j5xvmNfsud3XrtwXe9fTDayNLuk48/1pvsmpoTrUiE6tBIiKN74YmvH7/pwELHLt1+4lOfeeTJc5tbg4q0/djDL5xcmrn71hvf9fb7ugcWr47qjTIZYdNqCkDvef+3fuqrf/zWAwuHm7bdthKNTg8uHj+UBDOowmRvv/bloZU5H4ITYo+TzQtzy7dcOL19YK45vQqYIwkCEirlak9a5c28LkuReufS2iNf+uIPfO/7Ty40Pvann+4NB2liMIr3UQFP60TMQSsVmZkDaRoOB83MJElCEht5Gl29sTF5/KG9Qx+8k5RReabhoJF5Vb8mNBfDqtINiOMQqrzB733Psa8+9nzhKfoYohAJEwmBwmbegLIcOucV4VQbjlMELqAijAKKAUmXpX/lpSe4FqXMNN2ZHqkiARIKTDG5gEoTxKl1SGklwogEIBwlRGKQTNtGo9Pr9X3w3nsOQgokMhECA5CGKb2FrtVPjNYICiICULc956UeDvsc1Q03vydXnTfO/rEyorRm5GtOKQGtFaMACwtL8CKgbI6ImW5YwNFoHa83tgiZp5ghYInELKQRYowxchRtrJ6CUlAcYyyhckUI9WwraadpxzTn5zp7o11M0knlxo4nKrhIwcv0rFkLlp6eOdM/c2mYk7vtSPeGQ7OMwXvZK8otDyZrzedqqd3qzrSu7hW747qwxThIsYu6V960lNTVuNFEYYxMUYC85SgcAzGUwnWgEHTwutnMZuda27v9K0VEMJKYgQ9uf9xuzsx3sk7m0ixvKDUa9bwfZ3ljvrl4dWcjySyDEyZjbR3DeFyhUGppVJY7ewPMm9Gk5aQgNiDKBezV40FkEjKokRQgOdaTMWeJMMr2/oBn27fMzlirixLjyBkO1wsxAiJT3TwiXhdtRUAipChsEIVJg8Jayt44jwQiKBF9pMhKKAIIgxBNZ5XTYEaREhAimtbupnBUYBjs9SCy894menetN5Opd73nvq8982KWNKpxUdV+epbrQmxl2ig7HkzCYCIkkKe6nZGJLAGVA2Ga4jwFIqDg9cBIrtV68Jo4bDpPAwYgAaYpcgtkOmq7/gWgKaKIr03XWEAAeAobutY7ARJmRBAhBEG4vjebrg6mcZDQdZoRy5TggALAKCjTdGl66QqKTFdo+lqlSyBGlCgCgqg1GC0q1gbDoTwem8OVlsw1wTvgJYhMvQHPzIIm8AGaHWg0LFEEEmUxa1qOFFjleYREygGdHqfjkg6GQJaMGTgvFCGwrF1VT7+RPH2KfDAQORIIiAIRFiQUjIIMREjkY1SEqJUIECMzC0CaJD4IAkhkrdBoEhCInhm1wihorYlERKINRhZMbKtlDjT5wTvzb3nrzC33tprzCsKIQrzzrur4wfHvfIqfOhP74xBqFSYV6FS0jWg8u6wN994NL7/Ee0MkBkItDqI4tGkMSkCKuhQFkqg6IhIqDei1gMRaCTtltJDEeiQlTHbtE6/6R1+T03tZkaTA9L9FRcqSAlaKqZEMhj2re1nadkWxcvDQ1t7L4/72yuxyUs2FiVFgHIcC1XCnNxntzLeWu53OqKRspvHIa691bzz86trFW+6/NZnjra8+NaGk7m8E5977LTdtj/ZvnF0Y7ewlSRVR5cQy3kr1pJlMxqNJQ8Ludl9iUGnjjYuDc2tf398ZbGz1mp3OOGb1yC2Zix/9yHe/8IVPfu73YOMcHFjM293ZNz5/JaGsPUvPPjXZ8NncfOnH+b1vObl6snHumbON2VajLa+9sLZ67AYK9huPnv/hH79btNvYHiwevuU//z/PfOH5c6ev5tRqxXog7BDFmMRHKiuWCIo0EgDEaRoKCOyDno4pQ5RISmE98QgqbWXVqDx0LDd6b7h/qtmdaSzMDPcm5aS0mXXOD4aVtZZZGdShLCen/vQf/cpNf/iHe3t1o57splkqGLc2r8aawdhWattt3N3a7LYCa+wP+zMH/HCjP9iaHOxmR287/M0nXs8pbUW3uqxuv2/uvg98x7//1T9/5ewuV76VphbRx8AsRGiMddHHIKgUkUIWHRVUtQ79N92W/o2fv6fb3enOzPQ3/M//o889v2YlaZrUMnMm/tabFuZnFDoa1bnWcTL2edL2vipHTidSBz8qB7MmzVXLB8myA+3myuULjzZbmGTUbCMHaLZsiJM2qmEx0F4Hrmc6BqDRG24cWEh3d88X/T6jwiQrI3tXJInpZLrbzkTVM4cXW+3GTKbvvGvuhpeTr335Yn+MrlIi7F2tADUpHwIDaCsIbDRpwmIwmVucTZE2t1xRFbEskkRFYrJYsjxzeitpLl283Pv2m823zdXz3Rlfmf7GRPCaAc0aM53dJtYqVAJklUUkQS8AtfMuchRRSic2qbzjUCVJopm9D1bpGFkwGtKC+lpcDtd8kcwMRISilWEWrQ2h8swsMuXD8fTnPiZaT8eCWhkkYOEYAnufZ3kdKkWoNJVVVdXOey8EgSUIo8YqOCakqiDSzSRVYgMH5hgkAKH3EnyondM2IyIkNaUYa22QBacHcQKBnbU6MYnzITBG8agQAAM7ECYmjuJjrEOtlCKFWqNWWmJMrMHIRDjT6k4PGgQESBRhVVZlWadJmue5IjUpJjFGYzGKjMpCZHpISNNRXYxRa2OMNcZoTWVdgaDEEKMYq5mBSCliZvC+djFYI94FmWIeOWpFk6qMHIqqLus6CJTBTY0rWhuNQEZlic1Tq5QOrvY+9nzY7w9NYkSm1CdCIldHx7Gu6sgihNqoACAhWq2U0cJMAiQqT1JiTYJIYCwNi2o4GlW1p1FiU3NgYb6dJMZa7+uicjEEbbUPXqdm2h63eR6qOjjHHMeTCREqhEk5TpPEe581GmmihLEMbrQfQWS2lZIAOw+gZ1ozzbRBqMYVj0MoCleWvj8uRKRyIbNGKx1AtDUAnOSJEgRSaZpYYwXA19FH0ImOKHXtfFn95Tz9yW++mgal5ibv/e4TZeH7dGVzvL+3y92mAfFpI5EYUJAjpyTK6rzZqEuAiDFIWXhrlAQWBZlW4OuoikCDY3c3J/3dlblZbVRmO0srR1TkLHMs+0sLzVrnp87t3HrDytJsa7S5JsYARQInImSN5wlHTNPoi5oA5xorh5L7Z+fummkvrp9/pezTpL9900157XqLK8knf/13vvboN54MvDP+t8+/9sZ639WStZsaa3W5J00j1uLVnfFO7QKIiUGF+k23L7z/O+4frQ27c5221wGToydP7IWJQdu7un5mYx2cVxKOrh5SHA8uz4lOz1y4NClHAFIOY8ekBqu33HZgY6d36movN+F7v+vef/mv/vvs4gwpm5rGS+e2rtb7mdWt2dZo6D3HqqgZEVHKqrTGEClB4QiuoCqEdtvOJa0wKf/gj/742KG3PvD+ZpKzC0rpoxDnRJTYADpU4FSD7v7BpebK9vO/up+q3M40f+1/fnZ35J0nIlVJ/OjvPnHmNXPvyZUnn978xuN1hDxNw4JRimBvIM2OU1wWo/1ct01Tjr2lxc+Pbrylm86YkzL/9afWImpPsrc3ZqtOyf7P/6P/wLc9cPTOW770mx/t94v3fegHP/BdP1Tvbv/Zb3/8zOnNCUExLAO7hbn5oU9uXjjQqEYXLp0zMUGtdaInYxdcmejmoVtXv+f7/7p3yeBM9eu/+n/9h3/+gfm5g1XZDy0rw92qmBCtfftPtE4+uPqR/3CuKtStNy7sX3n+8kv9MJV/lIxYK2Uj4P5gJOBPrDavvr7z7t/+8GZiEdTe5tfy/Piwuk2X0ao+0XkKz0V3ia1evO/gB5ap/IT71Be3sb1SVaXR5FwEAmbFrblP/Mnz126M2rPZwYNHl+f317bqon/H99526dVz1flXNi+ev/zGS8dPHB/0y0brwOKRO8tev7t0PM+yZifLUV54+pW777uL8zJ6OnF8cTwuJkN8/erLyczS3W9/y5XXr+5dPbV6092bp3svfPOV2bbjvYf/7E9+p9UxZTnDTmySFiEG5xDAc8ySxLly6kwixyDoXNFqZf3RdlEMEpVEHznGRCc+xnajM67GSltAjhxRYV1NbN5EVKUru/MHhru7FEMjBcVZMQ5ZprY3t+uJEoyYWQMILKPeCJkUgDJ2vp0xoaBc3KmERBMCgEQoytIH8cyKNIgEkI3danFlripHf/zRk0n2GYF+NXBGR6LgJ7VO2C6AzPKxuw7+j3d+/59/cv1n/8VLcdD4F6dPnTzYemmU33BOKVujdY4LT5PRto+yMruwrNTSysnGHZ3Zp984PRpWdeF7l3c3954pxhUScJJUPphM7eyOslSjxKl9GYDqyhtF3nO3ZfNECYuvylZq5zuNI10bDa8cbPc26y889OlW0v3gD37PoeVDf/Y//qfrqk89+vJw+PTNR5sthLzbufWOgxfPbbpSLR1qHGnbyc5GI6Pu3MzWdj8Dlc/k/9/f+PNbjzZmmzO3rB4u+uP2bYub61d6k0medr/j297XVbK1NxBfJrNzWSPd2i7b7bZZTQZX+wdze+u77x/sDufbSxuXT7vSRwjnzp9ptNKrm72iqvdGPhGsRg4VPHXqYjLX2drbf+TTT+30J5UDlSbiY5KkZ/Ynpx596Wsvnj3UbnQOLO9/5pvHVg9/6/e9j+fh2Ip9O4xG+9sNxZdfuzi/1L7z7Sft4sKp57a5N7j35sWnXn7y/ltu1FauDrZmVmbyTrvRSfdzLiaTa88FiDQt4YIYY6Y2Cq21F7S53dnf+MOP/+n3ffCHf+5v/ZMvf/ZPn33xqTRjANZakQHnWViAIwAgcjT6Pd/1I3uXnt/d3rFKWQ1ecHOrfrYKRw6uL7+ppkQce0ybaJuxHrHfF19SdOzdpD9eaOUzudR9EQRFUyWVKMpXZ288deWJNLfIJEqxMAtzFCIlUXwQDqyJ0kQHF5568snUpDYlXzoCJEXXlEzXTE2iCCEGlGtc4chMQIBUV95YHaOEGFeWj08m5R7v+xBD5Ciiyegkqcpy+qSMiDFGENBWISMKopCgTrPsllvf/sJLD5FWtXdHDi689vrX05xCCDFGZEFS1+xwKMJRBCSKJkLQzWSmqEuCejK+HHxJRuE1zzmRAmGZelQ0UfRhWlsgQgII3k8bHj5IjMQRtM56TvWrqJRc7G8aHWcU5QktLbWHRXl5vS+ZGReBmQMZ0RLZ7paByPYuVC9t7JxYbq7OqmY32R7zuYtXUrDHVhc05ofmGqE7YaknzJf7pRGTGV5ems0SVVQ8qeoqgESHENPMZBmhNtZYXwFikiXQ394cD4cqNQbz6GOIkzxrGoOtLJsIe+9KTMSmC920Kr3HsLDQbTcVgx2P3Ob2UCgKqlgHICxdcJGqUWk8GVSJRgguRm41UjKLwXsOUhRQl6HRbGfN9ni0HaNTCgTQ+1CpUkx3Y227lfL1DAQQgKZo52syr4CIIBEEGcAJxYAtiB3nmpXLI2YArUSaXeOjH0a4MvbrhRthqq2ZLo+m8RMgCSOLaEUKKQQGJO8jojRajdqF0bjqduc29kajEohdDBAZo0RUjMJVWRlIl227Ho/EcjX22xujdqsRNOadXCcBjfcgYYqqBbjGlZ7ypIUJafqLcr2qI4DC1wIdIWKIcN1RNg2GAK6NqGSqRxO+5oWTKbXpWvJ0zXd2XWh2rVk0dZpdj6quV5giXvObwTUJGgqCRGFA8BJZCOpgBQw4hSEjNhQy5EzJylyaaZyz3BYxDooRECltyDtuNHRqOM8wSVkniOhMCgKRI6AHDpCkqZv4llZHDjZPXYUyBlK1ktDbEgE96GfPvebObzZOrcO4QgWAJAKCSoEAKJApZQSRFGpCEkREIlKkkEAEOHIMIAGRQ1NijtzQoWGglQkorCLuOxw4VTktHCESBWin8I6T1be9J3n3d66QDRotgSJFDFHb4uSN1bfcYV9dl30f1vfi7FEQcCjTj0cC3EjgyAF67qpyklRVjDGoHAMHSQ14U1XSG9GVgX7hIh+5tanjmOsKo9ImqR0pS2BqS1Ds5B9/SD30emO9B1WWic2b5n+Pilw5mTvQdqHo9V2McbCztrKwtDjTuXzl9MJ8d3l+4cKLV3TzyGR/VE68D04YlzoL3jaGu8ON/oXI1fzqam/tysXt0a23vGnvwvim+bvuu1O/+vq51aVFLRsPHPGXLxa76y/NdpOlA0um06nZa5kkJmZz6emXN0YF7db84qWBY3X5yhiikxja3dawFgPlO06MPngvPfQHn7ypC60D9sDc7ETxfMfcMx851IOB+8Qj+xcncztVsBSan3n5B7/79gduvaVLZdpZaIfdm9/x7l//T5976Eo2eGrhwqtXP/fYHrsXfOw2W50Uakd16UurNCNFRowISgGiUoYFmIMAaK2vAeoRIYoLLslbNulArPv9vgSTt5Kra5vBucI5KeTIibeP+l+3SZYa0uiTLPHjveh5ecHferf81A/NPfxHv3ooK+rOfDnyRV0oEZNRkpvt3d6772v93P/5Fl57A1xjdmYJ63Gxtvbvf+ne9pKdP7Ii0NrbenAi89/7w7/64AM3fOd3LS/cMDryf78j2oO/+Cu/e37TQ+2t0oQqRkEAowwZwsgSgNmLrt/xYOsX/vo9q0d8qiatvHPqleJXf+vCG5cbKVlWEr1vpPGum9pHV8zcUnvUD3MLB1uZX7t0YaadWW0ndT12nkJB2nhvhGg82e9ki5Ta2eWDEvdZ8Zj7FtOxLyYlLSy+pW2ag/UzfnI5hEnlBkx4de38bCeJXd+b1LHwEzfU7KPBA4tLwfH84uHUHFQhNhWJwcPzi+98R/rapdGp13fL0uvUhsixqkkpYSGFEKUcCwlbm+6vXT1+fPXHP/wTvUHx+NNPvHbqVCxBaUsSFZks6Awo7o6g4HY32VqrrVFKXXtI1lYjktHKagssIQRrDKBixNrVpFUIPkmyRNngXaKTLEkq51iAY3QMU69ZQikyCIrznpCICQQVGUBJSQEAKWTmKniOMQgDszE6SpDonXeZNSZJgvNERIoAxLk6CI+qQikIjqf6SkJM0xSN8tbFGLSiGFlYCK6tcIWURRujByEWcHUdY4zMsSyN0Wgtc7BGW2Uj+BgjIla1Q8TIIKxYYqy9AIYYIoc0SaYA76IcBWarFXNsNlqJ1QhIEI3WSkCAE2uD8xEoSgxRInMMTIqmdzS1K7NEa5W6KITASMxiDRFEmO6iCBRqUSQKAdhaxSyiVYqaABExigCIjyFBS2x8mJZyOAL3hmPmMCVZFpWPHERhFSIJZFmWJwkHTlIbg0dA711dVR6gXxYKkOt6upNWSiEiKFX62nnPAEmSCAKRNJt5bi0zWKWbWaJRxeiVojzPAFhQ2t1O3sgjc29SkVKl581+r9twIULluax83S/SzBilvHMAAFopo+o6uqLIGrZytfcxAEWttdXjGAfDCpkDigvBIE5KnyVJp5FEH4DNqKDIPBr1Kjddj2EEb5SxNjeJReFGkmiFUwmLr2tFyKGqfI1amSwJPlR1dNHXIdTekVwzoC0tz+2ul2D95dH2iRvmT1+8UubNudnGZFipVGmtwGAxrCjROsszbcqaA7tGq1nWRd5IUICDD5ob3Uh5+V8+9g/LwXhUrBf2yuIJeuVrr20/tzXZRM6OCjduu/nevtvtzs1sbV8s9jdDCsaaQNJsNgM4IHDOuSiaNQRqJW0dZ2bzwxunT3/+Nz/zIz/xo4dn21lrocqVgf0v/vEn/vgPPl0Xuor6ahlf/eILqSURikqu7EwaBhj1bgVuEhMVZ+eSdjcH4KWVhTffdU/KETQq14CYUaDh1Z7Ks8WFA1T1tnuD+YWWKuvdwVBNyq5YxwMutjst1Zhpba73cFKM1p3NbF3zXAK2kz3/whMryx0WngwmjupuTkMXgWBU1CZrkq3K0s/NtHf3+sZAlFgOqyzTzVYzSPSRhnWkklODNmv/7u89v7h48obbvJ2TECpQzBRZapJCMC2hl8LkprcdG4+fG/SFIVy5vFc7rREZhCLs7blzV+3xpb0f/Zlb3vGDKz/zN78EHp2LtQuJVf/l175ynLoLR2V/90zeTDFgu6t+/Kfv+sRvPfUXX7uC+dzq0tFXXzjfnWuwVfu97V/+hz9nswNPPf+IHm+Vw/7nf/9Tn/u9PzRjnp3vVEBD7/zI5c1UcURX1/X4vW97z+9snDPGCFCMLD6yY1/4wzct7uyf++ynn/vHP/2Lb37Hj37nD/z9j/7Whx78jtuoMe/2xhab416fKRy52d958/hr35yMd0ux7cmoYgNaGzdxWTMLQYRjnppGNzUyeud3naySAyBriKVtjlXzBJXHVFKQXdN6Z7JxSSuA9Gh/O8ty/v4fObjf2/30V8o8XVnb3tFI7WajNy76o/rhxy5Or4JWWvbPvj4Z94IvuisLGiuJ1erxG8aD85WPSUNgxJfXdg7s6zDSJ265m9ppXe9Xsv6B992zu7HTWcrQ86g3yK29950PXn3pwlce+srMwoxWVZWlB07elXb3Lp995Jb773jsoSe+78M/debVs9oXjdSPJz0F1cSN8qxtQGtKSdGk7Cc2IUBlTYiurmsF1lADICJCHZ0mS2iCY4gxAmhDHDFEzrNUohoVIxfDQOqyiMAm1I5QMWNVUog6SRMUGZXFypGjve11C0IZRfbelQAQnFMqDcSKdABopMbVzuikLKtGMxuNJ0ZplaBqNUJU27v7d99znFvDKJmqtRKvsGYIGm0E64iLyV4S3HvefPz4YjbM5zq+uTPs/8rHL9jvufWvfPu3THaelBBHO7uu4ObhxptWj3z5Gz2s/d/8mQ//8r/552cnE9LG1VKXLsaYaG2t3e9XSrEGZA9VlMwAIQJB8GKNanaTwXA03+qcOLJy9srFTNNcx95yy/KzL60P1+q2btWVO9PrnfnMQzzub13d7i4faHXmMpw02C7MJHfee+cXHn91c9C75eZb77nvxKvPPW/yNkaotnvzM/ltb77n3/yX/7F4YH679peu7AGcHtXFvW87cOHipUOrq7PLx52v9yZVb2M/zyj4sL8/3q+jbMSExbPXmIFEjJOllc6V02vj3XFjsXnbzQcX5mcfm7ycZjrJDBLde88d33zypYsVv/L5Z4tx5T3a1AaOsYwxBKV1YLJpshelf3VrLsAdh49iORpubBy68a5L2zuHj7+9c6efWzn/uU9+deXk4k5fhd6k3vPaEhxovf542X31jXtPHO+0W3l3cWuvF9Zea7UbqTLXW0WCIM5HSpMsy/Nua9DfURY1K65FqrJfVn/wB799+8k7P/SjP/PW973rjz/58Z2rF1F8FFaETKiJgkCz1Vw9dmtdwmRQCJEj44paJ1ZFO3bdp17Y+PbxXsv0QhhV+8+nuGFa2sOCqwSl4uDGkwmk0GwlccTIaK2ta+e9oOZeMdSpdsErJGCOMQIis5jEoEbnvE60RPGejTHK6CDifeTrzm5EvFbu0BoiTyE0ShlBBSCu9tNne6VNiAzBM8eNnfPAyOxFlFaEBAxSlGWn1R2OeiIyFU+woDABMaPMd+dAoxN+8fxjRTGMLMaqJ1/8U/Bl7ZxCDZEjiyJClsRaYGXyxLN3zkUOSpsilMGPBTxpEKLpZ0qoBDBEr1AxEpESBBEHRMKiEGMMRAoEtLWACo2EwJEweAZmFVFALOp66FMNI+BuRqtLmVCy36uK2vkQizpGFhEkxJHj0hc3HukeO8ozs81eT62vlaGG7b213cFWM09m2vlsu3lybkmHK6TS2RwGe9s82/EViW5iUTnvhPRwfzg/t6AbXWNtZOd8wezKshxP3BBjVVUt3ciazYYRV453+wMtEIMUkwgC6LH2HGQvyRrCOnJVFq72buhqx1hW7H1UpCGEKKxH9WynQQ2TZeSFKyaLiSNjMpszzbRAp7Y5M7NLxW5/L29YBnZVcfstt5/vaRj62cVr3Tr6XxEJ4l+idACjiAZUiCnGFrrF6Fetnz+eLeR2pqXn51ogcfHI0m5ZvXK5fOz0zmMXQq8KIqKZ1BRiDUAIKBRjBAJGyDtZy2b9wYgynadUcXju7Pm3zD948shtb7z2SgjeGKWYQSQEH5z4cgRNc3S5Pej3D87MLOdue3cyCdjbZ5fS3HyatlBUrMGhJkQWul4GEozTWEgQmKfAnKk9TK61jYAIRWRaYSORKeVgaj/D/z+XGrBcH6kBIuF1PhL+Jb96mhpNu0YgNEUaCRNdQxxdK20hESHHqAiVEgTWwhSqDKIKsQX1bAZHZ2G2C90WaACUylhoNZQEYIbEgItxPIiVB2XtSCTV2OiQTtgYUBYIIASIGBWS1g4zHXwdqv0jB3F2jjL2uYJenX32Mb+5pbb3Wr6y3keNMYQAiIJIQETIIIZElIYQAQGiaFIMSILCQkppYgStlVLaz9jiXXfSA/fPHT+Qzs9pTZV33oF95Y39T39t+OwZU7hUGZt6f8/B0fc9SPc8ENqt/VKaMWYABtmLHqOVdptOrFQZqa0BXVmjm++y2hQoEQKjiBeABEDVEapQuzPn6tvubmhyPgY3grrQp16nly/o57doKHjDITp5KLO2dgw87tmGLesSQOF45pvfhC89HXbqVABV1L5mH68/HU9/GPdKZBWJx/ujvDnTbS0Ve/vayIEjJzYuXR5Nev29Csudo0eO6qzR358Uu3ugK+EQuK5dPbPUrjF0FxYHhaMZ9+3f9tbNixfGF86u6LSdobH52dPnFcDqjY32amq1H+5cscm8VRoxf+P13Ue+sffoy3vn1yYxoFEUfPDeK4ByPMrYJcOdX/r5A9yq75idyUKUUAauWgczqcfj/mhmPm015qsk/rPfHqh2brO0P7S/9rFzvgyNNNF4Pm+o+A8/7qpRWc1/8YU3FMV2ezWGGIF0DpP+OLA3lkgRKhVdFHYgQEQsSGgYagQOLipjhZ1wFBGlxOo6bcD+rmiVovBg2F9Ybktszy613Cjsb76uWBV7kyyFRnDZqP+zH5q/9770vncvxKzP/jIUpkgO/uJHRoSq1SRgBlcpXx1ql//6l+85eGg7pikXuL+/G11iMtLdam4lnn/p8a3LZZi5+19/7GpNrc7Mch7Ub/y737q0Bf/4l7/ln/7EjX/rI0/uq5x8JGSK0QePEjNNs8gnDqTv+8CBE3fPnjwZbXsw3OY47j75gvm5f/HC7iDVmhQEFG416MiR5r0PzGVaCQI3TDUZuUlZVqBsGLtqcWmhkeZlnztdC27EqmzP2jR1pqVs3XCD/Wpczc62fC2i0gML9zUbS7ujQS3uyLFDW1uXG2JrdhDTPE8jZiFMcmtNks/MZiarZ9KGhObM7IGF1bsvXX7pwMHVYb8ailpdSRdOtJp5fP6FncoFEmpZtCmsD2tDGkSRsEhIVBTLly9f/PPP/cmtt93x1rtu3d3a5AguuugZRQZ7w5lkcteb5oaDc9Uwi2U2M2urUF8rmkZBBYqUTPdf09oxMCImiY0i1hrvvICatjCjgOfoaifMxiijFQCwMDMjKWMSEFDaAsfa1YYosDBIqpIgjgMo0pqUUoYUkQohuhCEmZ13SimJIXivlGIGHzwDpzZJrOEoaDDRlpmFwCRpCEEiI4FNFQIoJGFWgABslNZIVeUJSBnLXNXeAUSBmBjLMTioCSBJUgDiCFECC7sYlCYBIUKIgsDM01ugayNbQmw1Ghy8Upo0NNIshEBTjHVko2yitSJX+eh8EBFmkQjeB1JaKaO0ThVFIRdHymqrEuFaIApEJBCMNU9aeibRJtW2qAultFGGo/jgSl+kNiFFzseAWHhf1hVzRK1c7UN0aWqR0BqFSECoIhhtGlkGwdssVVpVwlGiIszyDGPsEAUfSh+s1kRotBbBKJES64lYILFGAzRs0m40MptEAVdHAYocYijTJBOBxJjK+yCYmEQZytt1XdTeeQQbgXdHw+3dniKttCqdZ4EYQproKFyVFQsL4GR/VNX1pKiMTZKqTvNESKOgq+qi9kVRpEppqyNQak1ENMrEuobojVZJagTIOe5289SkwbmyrtvNrK7LKkZhCSwggTROa2xQA9a6LP1kUteOy7K89lYNAADnz47aZn5mjg/fnA56Gy89Wshkobc1IGLQuDcoUmuokR0+efNguxxMBq70EjkRjiGQAAe2xr39vTMzJ/CTXzz/u7/3ez/5gZ9Yyd/5hce/6Pr7m6cmPtJ7vufnd848ORmsixnmqe7t7bZJtQiq3kBnqQAGkCJAUfbnDy1aSHlMO+e3oR510/Tzv/3foGzecvBQvV1D0Txw79wIe1//4u+9+swTP/nhH3jmmxc+/cVn2Gi04jCGgApFgGsgbdDXaHNrQlxKMqjCvfcet2m289rpw7ev3HRstkKLIw+1s5kdVsOL25s2xLmZph+7mWZz6OLcfAugaGp3dC6rDMREz863tve9aeeJwSoUMw2VGBps9kCgHPvUqOHQWSTFzD6MIld7k0Ors3lK4/FYE4r3ZLQiUQwI0U1GJ47dM/LjK1fO2zwtWZ/als9+rf/h5aTRGCFWoR6bLAkUKDqIiUZGR899dvjcG4kX1Z1rj6IT5STytEGnEVbnV3Lt914dvnZpPOpXs2nTAzdbWbeV9fv9nQ179JbE9wfjjboaxqIw5648vHLAf/d3r/zho/TMCxfIq/E4IkXLOD63QWkfQgPVCBCpCirRMwtmeabx+tndUeGbHevLID6aPFnfW//81z/Vard9EevaAUGz3QRFRVXvbfZ/8q/c9b63vn+8689vrn/nB3/8y49t8d6TD3zgfa9/4SsqXDn64O240koX6+//Pw90/7T609+vdvoILM2sEWOMGOqiBIUpkRR1YtWBmerH/8FPVuWZyTNf2N/buvWD3UkcA0fUCWgOsIOzCZg7gZcTtW4bYw2Dv/nL82+8fu6ZU32tTQyuKiuFYlNj47WroN7bG17tXTp/rtVu7J670l3phn4VmjFJ2UW9sbaHqnn3m96eJTlr2d0dHm5lm+uX109fOH7Lm6nIIKEsRUXJ3tbmqJycunjhnrffnyd45dylpYM3jobjVx79bKrGk9GJ7tJBaq+aDs01Oy889plmFkG41eyQapZlwRIsJYml1FgfK1e7ZiOLIiFEUspVRZ5mhBkJBHF14MpNsqzNqImIA2ideITC7ROlHKr5pfnBYNeFOJiUg3IoAZNEIcjBhYPrO1usa5tpCCZ6dl58La0krRR65Nlu0h96ZPDOiXBZTpQiDtEaLRxLT0mQPG4/9JFbr1x5ZkkAs4OefIw1Kgay7AkUWFLsPXO/NT9s86WtXXPyhpsuxjCO/O8/cfrPvrp+58zOd//AjUvtY33qPv3V4va7c2V4ecU//NU/393uuyK4koP3aSPNcuMLlyrdSo020lludJLGwuEDTzz1QlV5JJXmWoDduF6ZnfnwX/2Jshruf3ZfPNe1XL44mGvNv/W+e/bXz95260LaSF5/9lzr5OH4IJfjdHN9l3Hnh3/owbWNS0cOdu+/88R42BPf37p4ZfXAfHu+nZEd9IYqVaOti/sXrjifrhzuHjzQIhqMh8NHv3TROTlx+0HTWRzXkzAZ21Qz1U8++9xb7r3/rffds7+z68rK5LEYV5cvXlYawj402q201Tx75arSsLM/JENzWXbTynxjtpV11IPvOPnYk2+Mh8EYBciKYmIiGc1Bh8hI5AvHhkgr3U0e/K4Hw+b23XccP3vqUtaYI2O293qlb9314Nsj7RplMTje3P7/UfWf4bZlV3ku2lpPI8y8cto5V85ZWQgJSYAwORuBwdcY29xzjHE65h5MMDaXbJIR0QKUpZJQKklVpapS5bxr57D2ymHmkXpo7f5YW/c8/F7PXD/mM3sfo7f+fe9LoRptwakjt1zeWLmpPhEGW0W3m+92EengrXedef70N+MULFCYWAbiwvuFiekf+P4PfOZTn9zc3IzqsWDpCl+Ww+dfe/LCxulb777/Xe/7oa2rZ5977JHuYEMbIOdCgLiWVLk78/LLwDJWnpSPWKtaVDGGyjG5C1X5yuOPvvMDCYjN7uZroh6JsAOqVY4rzrplmYdSXLq4Wo0DWSCJigGFMJFgqWSMsYsDOfDBMRGjRKEkShREhExGRRVVjOACqVgAMXuWQkoj9vhEzAQCkQIiAJJQSghRjxqDUV8bHcCTZ62VEtL7ILWsykoKIZVEAUCspGRAIeTs9Fy/tyGVCSFIKSRKZgAJQkf33fOOzz/+6Voiqn4RHBIAgCiyTGtEFkIyKK1YMHIkIj+CzsLNneljO+uvDt1lVk4a4coxg2XkEEAIxUQAGDgAo5KCiYLbYy0JJgYBSkkKJJUSQnBgYCYKCKwEALDSxllbeS8RredYm2FhrXVjwZMpRrGfbmlQUZaFwagsLHmCYG0jUanRVO7sm5oEMZg9NDPbSLOhDRwPRyxjVebF8rC8dnVbGCOUr+Jksj3Rz4Jg3N64GoGqrBdx3Gy2NtY31QRMdFIDdnFfmo2h8tF4Y0vopFNrNZMWeK5p6oUSJBoQiMrUW9s7vd5gTIGSWNWNHmzvLBw8UG9GV1ZWRr0yAJOB0vkIBQWnpJIYyFdE0hgTq5jGVSNNG81mu1PbNzmH+fbWZndUjCuDW6F0nmsm1oqm2hPn1neaabw3+wAAgj26M+ylKgmZQmCBQUVYeV0UM1Vxp6luX0xvPTGXttsENukIYWI3Dv3xNayGt55aOHBk6erfXB2XxHvDkuvpJEKBFAIySkClRCpVUWRJqsss985HiV6ans63ur1eXyKzkEwExChYCeG9s94Pc9rY7rcaYtDbjoxYmop3e+VuSZTjeJ1sF9NO3OxEeVmZWBXgvZDBOSX3xl97mCVgAtqLFnHYg07vdR0AcW99fFPXIwD2NM57VbDr2KW98JDgbyrWcA9FtDeP3VtZe1kmQIbrkOxAElDuoY0Ex8p4QokSkblyRgpnC+PKNuUHUjh0CDoRRBJSDY1JaNSBCawHy2B0iBNoTMg4Udm4ykvoZ6I75tFYMCijodFChLjbC4JZSjJ1AEnOemBAJTnGAwfU1pat8tnPPzp6+qJe2025AqmUDz4Iz8hCIxEIKcLejgEMQjIhs1AgSIBQWjBLCCTA2lIhSjSy5IV0/N9+4eZTN6yJuAjVkAUTj2tKiEg3Z6uZg9H/98+qc1fiMOb33tL+mZ+ZaS1ebM5IVwJqKSGwd8ROCMch2IDGgIbQ88mjr9kTNyX7D3iBTCiENLZ0o5wGA84yT0I/fc4fvzB99ETTWr1yWX3t0a0nXyr7IR2xHJxzH/6C/bH3tWebgaTXEVehFBKznebDn8WPPOq3qnoAJg7kkbQk/sepIiEhkCxL15zqGCl2emPlaHfjmk9qcaIE1MWSmZiZqNxoe2edg4/awdF2VVrWbnJuWkf1/QcP7K6eu+Gmk/2drWsvPdpJp04ePDYIExvLz9zz0EI53mq3TK1JUdMJ9vF0G6L2+tXib/7isS+/ONreLLyM7YiVwT3PNwExQ3dUNGL72//yyNhdbKWqNZ2WmwOTqn5voKsw7uUso0qQyLeO3DATtK8KzsqMWSSmoagikIUFS0whk0YooyZrcZZlyN6RDYBZhVEa+SwEIvYsxZ4dXQISMwOVATwiIyLI//9yAGCWAqvusN2YPHngsIX6eHx5ZXnsHBQZzU8cdMXOzvrw2C133dUWC/r89/2TVjx95sTtetRbl9EAkgn0NeHD/tkGugxZuLJyLqQq3hr3/+PPvf/W2zur564kujnMq3E3D3k5dWQxbkShGmqobrn98J//w+Dy6/33vfOt735/Z2nf7tGVpaMUNdVA9pd/5+dv+92Hu9tbWceIesz7DjUOz6HRvTffu9jb2W7OVDpeTln6npht3fiVxzZ/+c8uDvppJJDR+sCL+xt33TTRaYp20+wu95f2zR9dmnryhdfq9ejo4ZPZqO/9dkpK+rGJWcdCR0m/t5E0ZiQUW+vnm7WObCzurCxPtmqNhFPT2Vx9qlKadNyUGZRRhEpErQTKbNAvyjDsD6UMtQbMzE37cb8ep9mohxKOTU+s7JwHWQ57RSDP0suKksodnYkn7tm3vJWdvdjfN5F+4D03bI+rZ557YyNTK6sVOfLDcZQoCnDx8u6Zc1+JkQRwa6IxKgqJaIUaVuN7b0G1sINahJFAEHlVyVR8cxVIRLHHHgIgiQJBEBHs9WtDQAYAQgFAYpwVUoH3QRvtnbPOIupYRxIFAQmpUIgQHEMg9lEcheC9JwrkfSaUNFoLwOBBKw0MSrBCXdIeb5qMAkQGFNY6Zy3t2S4AEaWWAtlJqRHQuoKYtVZ+L3yjJQUPDAIQpRQoqrLyFCgwAdiq4OC1EkoZJSSD4D2Ks9DeBq0xMrG1pfPekQPUINgHy+SDtwRAPjAAEwiUgcEyNkxslFJKxTIOkjjYyChAoEAELBlCUUmBMkliIYwSKJQrK+9CVVlAJCajBJF3wQMELbXRak9KUo9aQpqCKC9yIDAKgYNzZaTTBDG3rqxKH6CoSue9C0FIAc4zQpKmDBx8iOIUgkdmUByZyEiNKMhzWVkfPBAnSVL6kEY6Sdh7qvkADFKgUsp78hS+yWjXBrhei7SUCDIEdkyWaZCNWrVaLW1UtgKUzgeJCvc2EGBFkkCC5ODdxk5WVi7RMop1vVEH4t1eVgU/zm2eFa5yING6MB5lIbhiXEpVKi2jxCitTRRR8ASor7+UB0JRYElM7VZDg9RRLKUgx0mk6i0jAYa9IZD1weXZ0IfAiGVpSxucrYQSAkEwS6kcoWfUCMEzcFBS8TdfjOZTU2TF0RlVFbs2C/Mwc3kFpWdTj0tXgocgvIqVhqjKVqR3sdFCxN4FX5HzBUR85GBk5fjqip8wMxtvZJ8Wn/rp/+O3bgu8dvqT6Ezm7DhlNU91V2x3d40xkzOtGCJTeYXcanYyxEL6YCOkzu6r9sLTr3U0un62dm7wPd956Ce+54PN+vF2HN3/5gc5G/37f/3zX3ry+RtubAtjjt5806NPXfSpZA/MQMTkAY0IQmBkSu+lwGYqJxrp/Fxzq9u94cj+Yc/msZXDXjor2lF69cpqIpuL80sQujWV6Zo+2W4bVv2dIUinvNve3t2/VK836zDy65ujjZ3ezOzMsUPzxbh4adSfnYhOHmvtjLNow8/sa4UKcdzXxmgZdqvKg4gjs70zSrSJI2Mr65xPlBJKg0AiGO3Q//kbf/hXH/sfy9fOI+DW6qCR4Fc+vzHP+v4H8mZ91JzQliiajKLE76wWvVHjia8Of/9Pd0s1h2B7mXdEDMFaz6xQyhDpz3z10tkLsdHZDScWfvw73vT5r77WKysSLli5PeZPfGIg89ZUJz/w4Fy/12uTyzwTJm+746Gnz73wqiiC4CStl85GibSu/Lc/93PfeHzt8Sc/XhV+rzM79tm/+Hf/9K8+9MnmuBLKrF3aRWPAA3m3WQ6a9VRpFYGsPMVGA1aHj+77kZ/52bm5qaybzx1detu3fdenP/HHs61Tf/nFJ3/5b371nzzAb36o3tyvQxM3Vs9Ntqbe+WD7C397GaY6G92iN9gBzmcXESzUo4ZEk9SmL6/23vft74rSg92zT/Sf3ZQ5wF19NbktEqqyTIkdQdc0toNtKVExV1U5ppigzO46Ck88VTUXm7GWAqC/0ddGuG82870tZcQn7z62vbKTDfNiXN73rfefe/nJgsJrr144vH8f+ercpcs/9P5v21q9urs5MNqUA89Eq5fPH7/7QLOjjArlqKxP1UXcEbVxGUXn37g8Wl4dbw0Wbj5x4o4HU4xefPhppe3a1Y3b73xP5UfNyZS5QqtFEJ48QrBVqRQzc+UtBxBKMauqHDfrE57ZOU0gGImRbDXuNCeFaFm3d4VgWiBKO/bgJzoNkI3RcOTQy1iKnGpaeKWandqlq+u9fLy1mzfbB5949JFbj93CXtZqsScI7Can0m6/P6zK6dSMRhaUct5rqUysi6rYu0217BkQjbhv3t539yAfQzWQdRO8sMSElnRspM0DsTT1WAniMK52//N/+JYP/dX6j3zfO//P3/jlrOSoNfXyOl7ZWXr109OTgi9t784uLp3P3Jve+aZP/4/fmZydO3j80GBwNlFSiCQIORwVjTRudVpKjBh9GquqyPLxeLKpXC2amJhe39oCYlk3/Tz/o7/+c+mBLE9PNRu1OI2ize7gi88/ceeRo8urK5OTjXpbiDjcdfvS809c7UMxe3z+7IVrjkVjNzdEb7r10F13HNm+eG1ycZpRJFHcXdkUlfQWbp+ZeHZ5sLOTv1BtzLVSQDgwN7exOXz9tdeFWT1xw/Fr166WxWhqqfPWd77p3CtnpubnhA2Tzdp43NepiXXHkTh8/Mi4N7SlTbYzV2UJgkU3Odn8jne++alXz+5ubmn0fjRMlDDSjCx7FxCEMdphaEYyBLXv4KGyve+7vv+DX/34xy+9cmky9lzS9Gw9c3j0lttWzl2ePHagP9r9yB/9z4XpyZXzp/ctzuRDqJf+1PzMXz/27PBEd8bu2H5114HOxQtXn/v81cHw+rnAaB0cKaWtp9LaqxevGPfO//e//Fdf/PqjX3zsSQXYmWqPdoY++MHO1lce/lit0b7h5Ikbbrlts7cZJ9HalSu2yGHPGY0VIYCJjx09fvut73/51c+vrV0msnGcKmyffvXyrSefEnyBCwG1TjF2jq/lmZYuGvYGo5559fnhcMzKyIBMHJRWwROwH43XiYL3IdbaV1UjqSVJ4vZqmS7sP3iYA11dvwxEFIItHRABEzCiBCEFUGAphBC81ykBJAAM3rpcCyx9AAYdyb3DvFICBFgXpmcn46oaDsbBs1AMgk1kCBwKCUBCCBDKe1YCfcB2yzz50uebDV2URVmURulAoLQI3tvCK4kgNARCkELFsj5x97e+ZflS9fa3v+OV58uXX18pqiB1Lc+6SCyVVFIVZb4XwGBCQMnfjAMTBNwjY+/ZrhgkCgQEAQRsbQXMQilHXusgEeJIQiAgRJIhiF7ualq06hF7Ll0ZaYmWO5E6MNXs5lYgVbaKIj50qF0S7Vua6K2vT0w1pztMAfNcsRS7u9zruWABqUgT453vD329tZQV1G42gx2VnPfG493RrotMDL4NgmR95GuX13qjntNYk1LXG7WpWisbDvJ80ExSXYs5eIVRSOKWnqv0rnRlZbOCipnFWSPDqCybkYhmGoMs9MZWmuCdy4s8ilOWIC3ng3xuYkZLdXRx+tYbjgyMsuOSs2HSiOrRlBP+wgUbcAZjHYMBL7jyqS1CNRqO3d4XiyAkCgEgAJhYCQxSBZDS4gTADY34RCrvnpmZXcDW3AQnCau2bCVFFrBtWtNtrrKdDfu1ry+v7lbko725i/cgkIQQe3kcY0xijAQebnUDcVyLyaPNQy1Ji2G+2r/qWRAjsADy3nlJ0khmAY64dLDVL50TxE6X2G5RPfWgBVfsUZW5L4aDdmFERKQqHWs0KlGxd47A77UY4bqojPcSRQwIBHtEHpQA4jpXCHivdwaCgb6pCPzmZ6+HiQTAHlleAAOARADmPabG3gJTUgZS5BiFlCSFp9E4qwpXSLJV8MQSJDIJ9lqLA5Pw0M3pTYu5iYAtpDGQk6OK64I6U6BSEBJ6W5Bn0B2HJnAUi/aknEe1tc3WxyYy5aC3ejmcu0ZvbEdbG9mtB2u33mwmZ5SSdlgU2kSu0rt9euYNfu1ceXUtzceGAVCE4INjFkZ6F/YSU5JBIAiJ2iijFTqKZKjrHBWPvSodImlLChFAogshNf5dt9YX5ndAlyxE1OgQykAtkBEaW9e9myN4/5v4r8dihPDjP3T0wB1nx+MAIAQqwSilYD9iRiSrIiAlAMEVPM7lK5fEw18Yf9e3RRNzI8CYLewO+Nx5WB1oEsqhPrOh//uHu4szZKLaxvJoZwt3hvWgFInQ6/Pnn+PeaPiWG+Dgoty/GBwBU/OZZ5K//0q+XdWsEh6Cc6RjIeNIR/94VKS0YoJqHEQUSspP3f7Wa2dejuptUxO+P9KGuRztrvTiZmNuTu9uDrfXtkApIwRRMbM4Mxqud9fXaFz2LgwZ2iM188KL/dPLO29c/sKB2srCxNTifK7DxPJzAxW1Q4gef+Xa51946tJ6lQ2QOQqMTIUQ3hP4KiBCarR3Lk5raROaC9SeTPp97BZCFmm9FThqdfu9erMeyIyGA9mU7/jew8f/dOf0RasNVo6dL1AQIJlEM7Myksl7W3lbaQXOOYGAUglByII8KSmIMCAQA6KUKCg4ggAQlDJAnkIABCKUUgMFYAE1vTHYJZPedtstL7+yU5uYyIoiULh2+SIGVlHt/KtP15YGf3nmZ3YvfVQmIhuXzB32E6PNtpG6GeEdDxw/dnT33GXyhNahSmLM2n/3xKVT7/yuuLjzkCmGmytT8zE5Rh0XRYibC6z6O5sbP/qd7zo23Xrvdy1effHjr15cf/Obj0d8YHDmjdtu3Ncf0bFk9APfW3/gLcdWr15t72vsXFqloLd3Ntib3oZUpuHtxBcfWf/qyy++8Go/z7SJAF2VSJiZSw4ea88tRlCV4xLjmYXc5ttnLwdIosaCl2kUlQvN2U4tWd/q2+CIQYvgWI0LDJTH6UgwBQwTU7O59ePxaMhdG7LRaNiZmVF1VXqcWDq+k0G3t1GbXtxYWRaI9Vqz2ZlsTs9b4WLBUTu1oVm4zCQ+aXG+W6xc2tJJowiqMznHfq2WFjPzbRZy0O997emXfvgHbrn16FKl4q89vrm5RheuDYdjZy0F1rGMFLKnamcwhsCl57rGUwein/+5qapcjiBpmNZmdyAj1U7j648ERCGwslZKiQyMBCE4H7Q2zlVVZaVUSscCtfc5UwCSRmlPpIQSgEpFoBQyGKH29lIQ0jqvlZHKOLAoqChL54NEgYoiqbRQxGwri5IphKqshEAir5QIzjvvlNTKGEIhpdJKuWpPZ4YUaI/sg1IyhUB7ckdgEMSEDGwtAHhPITittbVOSQEyRkStI4EQODAjCpFXuZCCMXLeE5EQiAjjbCwQxF57idhojdoQsHPBVS53IWYSUYxSElNW5QRUS2JtDDETO1s6Io6MIWDv2GilBFaOUEjas3oSaa20ktZVwMz0TRupAImoUAXvC1uxD0Zp1hFLQazGQZRFOS7z0hZaGU+BgUCAMkrs/U8hAVlqJYQsbBWIhdQEUDmfRhqAfFkqpaVUxIQoJAviPZ8DMrNzgd0ef4ARhZYqjnSklJBICJFWzDAeFQSQpglISUKglNY6RiGkikwkpVBKjilgZGIZCUSTF42EmShNkiSJi7JkT2Ws8qIkFwSirRwTVy6URbGH8POVd45NwnlWMbM2GgEFCC1klMS1RLeaSatRg4CNNPaBJGCtGUmpnQvjqqJQ9LqDvBzv7g4H40wKbQMH75RSUSQUc5JgPW3IKNLGkLVVmdvSmTS5njB1w+mlmdVLqy7Ghlhcv1JSUUrt0yQhkiGEJI4joTcuXTAAgDgc9ycnOxSkVcILJWL+qQ/+8If+1++EzsSp460E7AvPPfeHv/nT3/dD3//iytljp26fPnpfmFzqdrFex0gJEj5tCD+2MYMpcfv5aze++b4//IuHt7e82sJ6lPQ3uhir+ZnJf/4L/62/tlPH9JaTt+mEf/UXfv6vP/Ll5nSrUI2XVyAo/8Wf/TVZSqkNkXeeOQilFSGSIodBy3Dv8ZlD+9vLy9sXLl0LymdF71ve+eCZZ99YG+ycP32t1m5L4R31zl3p7+TOYVyJ+PVr5eHJqN0w99x2UAwzKhdeef0N46OpTtowoubVqKLN1TWD3NQcKb964Vparx+ZjNlIi9WJu6eb9Shq6c985fLptZIQdCPOykJHdZRKeCbaUyCiicTETPJbf/OTZy9cqdUi55yOtKqnawP+9HPR517ot/U4SeKyUtu9MgS50+NL6zvCpBIWdQzsZTUqAwohwUR7zA2qPN10YFKKITXx0LFDf/2Js4dOLpWnr2ZlOa7INJrP78Q7H7f331i/2/jZablwMFKZ3O3Wvvhnr375KyuVrXOAXr+k4CmN6nH6B7/3B69duPy2++87e/5qcGyiaGr/4jeeO9/bGak0GfRGcV2YSDQa9WG3h546jfrq6m69lXJR+GF3fWf9v/7nXzdm4bnXdxutVpymRX3iwhsb692VSKeHH3r3ufyFn3vf27fOPY0DNDjnR50Lz2a7O1GhoHJSSnjlxa/LpNi48ldvvPraH//eN44ee8tqX+N4yxaPDbe/euf3HBhtXoWlDrNichA3rJ5luDlAkwugoo/xBGqFsCvb1Xf+5IGPPNLvuuDJIQqMVBFCJ75evdlaW9U6rnXmty92p5YO9LqDj/7tX15+5dmDx/YdO9I8eqD9ta+fv/+7fmjj6upoMKAQhqNyZnFeS7W6vjO72LHD0fMvPhEpNTN3UIO74/YjO1vri7ccqRbUp/74QyriycNvqqq0NbO9uf5GJ21++g//66EbD9YjPRh6CgDBB3KxkUElqFBCNMrK6c5cgIqZarWmtYWHoFBWZcXSCR3FcWo5eA4BbeV0S9WLcV7aKkliW5GMNaMLIZDDYmBb9bTC3u7OrjKinbReOH/tZ9/x46NstLO9OtGQs1OT2aBnJDvnjJLomIMzggsXFArnvfAIASoKpIECAKthVX3gvZ3+cBUcZKVkv6UnjanXQNYsSiEkh9STkc5KlWip5hZNHrb333znhctr2JgM5Wh+6fDkdOfi8vpLWfWu973ryS9/7rUz5x596mkYjVeGy6MsGF984F033XdrMjHbuHT2Csr4k5+9dHa7SGpzvUHPV1V+cWU4HIM2zm2TJ1v5OFIR6mLEWktmyiqqJ7C9M9JKjgb86tllP852d0fz081ix37jq5eJ5YGbDszPz7iiGgyKq+tZ0bdKJc+8eOWmw0ta4ri71V6YOXhs2rKsXPFjP/k29ZHHv3a2v6XFzP54tNtHnp2biC9eXtGp3L22RrZqpI12c+7LT7yQAJdBzS/OPfW1xza3rzxwx71Tk4uXdkZXtsvedi6Zk6m520+dfPbLX07auLoz+t3//dmk077lxuNrF8+xlwLEYOwdMaBgxsEoeO+OHV74pd/4rWY6PbSdKNSWHgovPvnbOzX/6BOvPfT2OzCHq2cv5MMx2fDo0y9OHzpsx72N7u6dD91UZPLgibn1cfiWdz7oILgoefmZiy++vtEdlUKIhenJvVWw14qq8hKk0srYsviLv/yLb3nLm37wh36qvnDkox/7aHecJTWRqjSUY3S+yruvvvCNsiIZqThJjYw7U3NTk0eBi5WrL47KYZKaXnf9S1/7i8KOYy05C9KBjsPO7s7lC2cmaz3AKB9CHMMoHxSuE4bF9qYrq9mz69lWloEKQqDzwVyHxBJ5K6USWikZteoNAG7Gzf6oaysHiP3+VlmVEoERpREcAiIorTwTIDvvtJAIEhEFskBQRoMUe3kHlUTguCzyPY5vCAE1kmehtIniKs9r9bgsqkAEIKSAoshMZIjZlQEZm512nrmjh5cuXrnUisnESgs+fGTf8tUt4MCBQyAgEloZpQA9VdxpHsqM7g/W6tHgq1/6ra2djdLmRJiNh1IIIeVetFtIxUQIwuiEmK3P94iXQgoGQBRCaEBCCcBAFALt3QKxEIDsY4nAngIQ417aiKkgJi2EC7zRDybS2RCRA4UgtJCjnhAyUrJ0QBl+4fnedl67oyyW2mmRF7EURN7UlHO7c4v1ehsVxFkesrEdje2oPy4KL+J0cqKBFMU5y0HlQYUkrajKRsM3Njb6RWWgBkF4C4qhgTrSkE7GRXtqd7O7trw2PTtz8vhCb1SKuqxNt69duDC0mShCYgnAS4iVjivvhXRTrXiiiXmR22ZjtyiYGYxWUgBLEoZ1o5tz2mw0ax1P1gUP4xFXg85Eq/BhVHnwogLzyoXzw5wYVZRcfxYgCWBxXSUfiJQkEHWHjf74ZJPfPK9uOT6xsK9TmDI0Opi0TK3jQDCbtBOzG3fP9X/zo1954YLrlREKBXvGeWDxTfq6VhoQKucQWUQGmeqdOocoqVGtkXR3N5wtA6k0SY00ZelBSC1lURaAwCClQma5ObBxbJBDPyuNDGkSSRsCgGQwsRIlamHKrOqtjtNmE2MlEpPUTFXlXjAxC4UCkf1eIkgi094+Q56E2jMAs0DWKPa6Y8TIICgERCEBEAkB5N48CIiIBF6/2wYGJAbHsVZIXjonRWzLamcjK0YkHQApBI4jmIyr2AjP4F1oppKNnIkhFqQMtCeBHACDEqHMhRVQMNQlEEGzE0Ut0e277QF7yzNzotNxU/McCAOXcc3Bqji9rJ6+rHZ7rXMr6tEzoJXLC2e9DAKHGVUOC1cjC1JIIQjAByAGRikoEDEgCABU6jqp2ICMPc82qm+5Gd75tqixEA2GjY9/6twTL+qtLAURoVJG4v6a+4H3zSfJtqcWZwGNDNrIqMUouOxrWVdidNct+NQbxUosDtw4gtqucOA9CsXMnigTGIRS5BmRMBIqURQcc7RdRZ94blybkG+9LzFB9fvx11/MXjrn17ZNxUogOkyurrnL616bZNh1sTJBeq2YiAFkXtS+8Rqcu8SdOpyci009fe1qdWXLVzYlJCaHzJGSkoig6sxN/KNRkU5rduBbrU7pdjbW1xpnLtTiui1JOFdl4zjVizPJuecurV4aOA7bW3l7euL4bUfHg835Ju4s70YG4obhWlLYxhef2HzmwqUitzq4pSm88bAc9/sXMjx9/rVQLfT76zu7cnnsK6HnmonomN1SrG1ujYY5MAHCHo/ZESHiaOSKnL7x5Nr9b6lqnUQoKgvSjkNWWVcBcb3R3O0V41KMr+0cn+dnz4QENAAQgPXOqIiZALGyFoFi02QhCYrgWGpZWAtjDyh0bLzzQpBzlZLSBwcgmRmFBEGBrUYWWgILkHsoEy59cIIp+M3da088+Xdu7AihWdeV16PeoFbToHwCNAmj/LlPlMNLYqpdLadJ1GZR1lq6GFe+o+szo+95/9R//O/XBECkhPc+TfXa5Z0f/MCvLLXEiTb+2597r4YN1P3Y+N3lrae+sXzDbSeDwEsvf+073joxOve3dner6onyQn7m6qWnvto17Wp1uBapasJk2xfLuVZTlINDS63z5/Mrm9XubjlRbzz1wtXN3WtvLBdZaQKzEp5ciCM4cbh5z737x7aKtahKv29henXr8upWkUADC+2rkLlBHSk20drWlSRqcNEGHwWiyenjkxMzo+F5DW7cv1zaUFUooolApSuHUZOkkf3uDkstqDMYXg5SVllVUoh0c64x06y38py7V3qCRbcczs/VtMSVC6/NLLUhz7cuXVbErpRaiNTAWEsoXbtljh6cfPa18vXV8eMvDL7jTRNltfWd757zNnnxpc3GzFK3u72yNjh7ZXRttQyFqyqsRabeSgaD7ANvm3ngBjr9cj5RmyVuxnV0YZDn128PKAQhJDGDD3u/Ri0lIltbBQq853Tfm+oTmUgRU4CAHARKpZSU2vsqMjWJ0lPlvLeV1cr44AGBORCFSCktpdRGIiHvuVRRau18WZSV98EYLXGvCby3QwlAKRUiE4UgJOZlziCIObCPo1gp5awHgVKgRKGU2OsGV95nZRkCKwFFVWmpCFkqRIF7OSnnQgjBqb2GMhdFzhRAgPN0/VkgEBVGSoMARJRKEUDwPo51KqUGAuYAKBGtc1LKyjMDCUYAieiVVABABEoxElalsy5YClGk9iRoQggASEzifAgcKFAIAYQEwYIoeO89CRBFZatAyphAPBpXvio9eSW0EgkoBkQRSAIBCaOUAPDEiKyEqKepcx5RIkAAligJKDKGEaPYUAiC0Vo7zqsqBBcCwV7sMiilECHWMo2N0ToxxlkXApTsrKsCBmRQIDl4axmAUmOIQUjJwN45Ii+lFEIQA3mf6AgFIjNKmWXWWm+MJqZgjGwqEBACDbKCJG5teWcdO68AUe+xqoSQGCWJMsI7UkLGkZ5o1Gan2wIwqaXeuhhBCpRCACJIkdZSIimVGowkajMdQpkXWVYygQ3AghWKdrszPTsnhNBJBGTLfOQDhG+miqLYHLxj6eL6cjS1b3htaXf7PApPwNZVUuFMZzJOo3yUTUzUs3FV5a6W1koXiqwE5ymQ0eJDf/F309Pz6b706IE0+NHkkbnGfH6+/+WlW2fq9cWta9bQpUbaHKyMlo4dDEJur6+TipbuOv74X1z44sfOxx++NFVP5mV9/dKOrEvMeLwAAQAASURBVOdvv/WuD/7Cr0oDZWZvvuvBRhq9+ugj733/P5uaiL2sdXMiIbIBjYajeicWwA1tuoMgtWLNe+6/k/vqJ27ovPnmg889fvbx588OKz4+l7bq5vWnnmmOu6bWGXbXdUS1QAdn1cVLy2xaYFQiNJv2vKjtjnIW1dWVSx3Ntx0/AeLUo8+8oATccGKpNUNb+ZDQ1tnN7ZvH6eaRxZuuvnFt5ennWk00giem4pDZDpn33HIE7PK53WI8LALiTndkjFRCgpRMNBxU2pik1ur1smatORj2qrwExjJ3LOULr2dJHNfTZDAIUVwfD0IsJUiQsjICCG05tgwspcqzkhFBILPQWrBAa/i3/+8fe/y5r7389TeuvLyR3jcfHInAQupxJdeDnJ5sro5GL/3Z+I5j9W99R/v0hf4XH9+5ssKTzWlXqsoGEmgLFydynJUs0+nJO5YW7ri6si6N1spErF999JGHTu5/4o2uEgKEyAeFEkqqCIW1JO99yzuffurxNIqhEcsyzC8+MIJBe9/BrXH4+O/93bWLb8RyELplL2SxGGwMN3/nFz9+/12NmSONQR8vX+o/8zyQrietVOlkc+Df95PfcfsRf9eJ9j0P3r2/fWnrYiHzqzccmS6qpw4+dKgserV9+0eQGOxIx2g9wlDrUYCMYqfACrEouLSce3SRKjd2+tBJGKHMq6p0HGD8zTLy2ZdOT01Obw+6+2++8fhtNz//lQvTS0d3rzVWVi9xjJtvXFofZkVBtXrHaNXbHe1uD5kyKcTa2pUzzz7Tbs30rhZLC+2XvvwNL+H4LccjGZ198XXEUXuuZatSJTwe715ZfWO6VZucaqUNO73UAJzrdXdqtXg8HkWRYvIoWCtwlYuUGJUDof1U1HTeG43g0cS6KrkiMiapXIEBJJhA2G53bDZEChPNVlZ5gzXrnGSY7Cx0/cZEg4ToxpF0YFCAx1Dj6vlnPrSweKy6spNVdOb8pT21TTlyjKKRGvTUpjjvWRdAKeFC8MyIECjscUor8LP7XKMOdiB7G45zrvkygjLwrmzOIgErLYARPFUO6+2pmejCytpnn7t8x523n7u4UvmwtbM2MZM22onnaufquso9aNHfGS/NNVqTE8NLg5MT7t//1H1l/lgV1m759jkuw3e/821PPfGKnHnTr/7hl7quRCdEHKqxFYokQj1NAqEQMOiWcRKBIGft8qbVAPuXFqC051ZXyYtDMxOzzal83FWTtaqyaUGjbehMTQ+7QxOJW990z0f/9uH5w0cWDrwZtp7PvBuOB8164+qlHRkb6vXuuenwG8vP9l318vm1jlGPPP3GwcV2Mt/qD4si7Jy4ZWF1eRsM3XB86erFq194+NMHDs024qTVTLc2lpPaxMxCB6S45W33PvPok1cunhnlV/qu6G52x5ktQNqdwcXLl2fT6KE3H3/+uRVSNLfQGI3d5u7Yeq430ivXhlfeWD08Vx8EB95Oz4af+vf/5MN/9DebW6vI94fe1XEma5P6teeff+4zX6pN6P7utZ/6F/9ifv/C2sXBxsYGAd9y8sDW6vojT702NzP7yus77XaDSmvD9aDKvvmpfj8XZVV5FmzZy+1R/pef+uJqju/6znce+w+/+D//6MP5tbM2H5lUYxQg8HhYhAAulMVogKBQra1eeU1q1e7UokhVeVlmwQefxAa1SH02lcZ33DJx/GTLGLSsdNRhjraWt9hM7G5vcpl7l7z26nh5S6ZpnJUjQiHEHpKLtTZ4XUQLVVUqyQxhx46AUQAG4tKW3ldCChDSO4cAQipARmIAMlpKEApBCS/RAbOWiJEuLfmKmTWQBJQAgBKl2Jt4Bgi8u7Xbbqe73QEDBM8oAFiMhqPgAjGhkBxQQVSv1cajfGZmQiPaPEzU5qWXkrYdAAtUSgYPiDIvbQR095vfk7Zu+tTf/15Ek4rcaDiWjRiJBTETmSgCZiLPDAIFATIjIbnghEBGSUwoFTITk2dnpJEoAYCJKVxXehMzAEhhAECIwIBE1ztFTIgA1lNlbRSDwIiIUJEPgZ0HdiPiyGhtzHaPHnt+9Nxr2yfna/Mtc3guPrQ/jWMXJzpAHoN0ZZHUIh3Hw66orCrKbrB9VxogjJL6DSePhkCOjPd+PBpFoxFmTk8k0sRJJ0VUQojt/q7ymQc1tjZK4o2s4itrE/VEMATnOhp0s24LGwI021M2r66tZ7uFQynTSGgUE/VI68gMRR7COHPGqDwwWVYj+9rr11o7PWZstNsk4p1e6d0wZm5P1KrdIpAsZNRnkTNS6TN3XXejPBqpOAQIwSQRoqln48NY3HUITh1onLxxUk+gbxqURsR13WqaqA4+sDS589Lzxz732isXqsIme9xIH4JWUgqEvVfoQCglUgjEtnBprR6ntdEgeFfV682NLTs3v7jTXQ95RVQNK9dMjXBsFFCkEABd4NJFkdRaaQBgEQRE9QS9ayjVd4FjBQiDYRWXMq7p2Xorz9ygW1pFtYaOm1olGNBDoD1nIPiAgoGRAwhBQiAyCGCFiIgCQSFL3AutwZ6iTzBJBETc68ZJJVhI9MyE4Nh7MiiQmGzZ1J6KsSGYi+HUIUiMKIaUOxfFullzNxxDCaE9HaFDX/DGjiMQEx3WNYhrkNag1RLDHUoBmdRw4EMXJIMyVdqKDk/Ew7za6sbnVlxjx+9b0mlHVjk0pmfSer3/uX5/GKTQ/UrsXHVCKkGSPCFI3rPaBScEMHkvQSlBjEqI639ikgKkAPJWRzIQa+BWQotT1e1349EHABrlQpT+uxuPfuGj/f/1qd76EFmnKtCpdtmcWW3Mak+xG2/IOBKpAC6AAioHSrFU+5bozbfTk68Dii32uUDQIvL5WNcVoQJPIAWqyFlwRBAJ75h8ABZDW/vQ58bnr6mFVq27JU5fErvDWCdpFIm8cIoBPGilFHJaT21ZAEpHGDwJgQiicLBe4OZQXVtnFuwgooAsyZMjKZFZgbDWsTS33XHvE595/v8ZFUWxMSKVUdSM4s31nVdPv3z85pvLQREGXjBmY/zS584+8Y2tbpmClCiMrTb442tNzbfecPAnP/hjg9XX+oPRV75x+smXLxDG44JaUdhft++9nz/4o83B6k6wtU693momkxPxYM1u9rrtA/FwnNks/tgTxae7IUjtgUiQc56AqyooZCTGoJ8+Xd3/1mYjyZ3fbCwmSRrJ+uS4H1eDQRLXJyZRmxBcdvKgT7RmBkYKjFEcQRAIYL1DKQWqIGtSCJuPBQpvWe6VylgASwRkcgICBSelZHJCICIjEgUHoJiElCIgexfA2lYTjx3Uh2dw5/zKCPTpoesszfb7eVWKyMhgLVoQhdu/lEC60FrkjZ6qteJse6t5y2QBjoIJrIcbGwn3wTNQVNpSmiqJhWDoxHFR8Svr/j/+5vP/9gfaNxzKy2wUReaBe287d5VOvzgq8qIRtoyx8yduX1l1N991w+nXH9XtWO4//PTXXvrE//iJldNfPnrgTa+cW/v6S2svvrb21BuZiFSWORG2hURbcWSkpIqcraVqfrH59vfcDFTqSOEA+7tBuejpx56f3z/ZSGuNeIoFtCYmJFiofK0VFZWWShmoOT8Rzy2N2WJWpFHqYCgblG9u+9CssoEtK1fkNVYCpRCxLS37ITBmthxWmDY6zAICafSR0tYJIiNUqpJGWRTejrIBxEZGUVyvmbzgwWC0sbYSaT212I4iImVLmnnyifDZL68emZk5PpXUGmEMoyMnOoTO0Ph97z9MOnrjGztrl0fPvNJ/+cLQVn5uqjWp8kuvbrfbjXEpyGe1lgmV8Pb68QAAq7JSShGQkoKCIwJrSwJBwI5IIiiUgQIzErGUKjaG2NvKKilCKAUI60oGYA5CSGM0QTBKAyCi0kqXZYkISEEq6ZwlJhDChiAwCAQA8iHU41gKIVAIlI5C8F4rIaVy1iplamnqrA2BfRBVWXod9vpyCOCJAMFojYxSUmTYOY8YAjEKBIFSyxAouAoAiIMQKIUm73zwUghABkLvfXBeamWDb6hUoERkRGBiqVQSp0qgs5VGsJUF62tpJKUUAqwLISAHzxSEBEZkhtJaLYQQghFQgpJKSi2EsKEKzkdGiT26EzIzB2YjFQpROVsUpeWAyIIYnRyPhsoY9oEpKAFSBGt7EkEKISWWVZGYtJmmsUp2BrtFWXriVq1mhPSBfCAMVLGtylIpgUJXNgBwXhQgwBNpqZgYpFRaXL9FATBSCIKqdM5aAai1QkCtokgKBFRaAgtHziiphbjOtfLsicgGKWQURUor561AwUyBKDBRCMETSgwey6pMk1qtlighNA40QCRlrzfcM1KoOIpjoxDjJEKtEFGhGA7Gla3yAgha01OdbDgGECixKp2vOIoNIxTjkgV4T1ol09Mph1Bm2dCMuttjQLk9GjfTWiNtCoQ4Unk2FhAEiCSWe5ZfAFis6cHypROL7SE2Xz69G2khYgEQEUnyXgRyWWGUKMq8KkshYKrd7mXjsbOx0T73VIWrV4qtvmhzWeU7InWH751hFbbHo+bUlJadZhCRxmTy2ExslBpxxGXTjohefOzi058726bk5JHFq6+e07FfrKmf+fkP3njvfaXzm2vVwsJkq9X4+R/9sY98/CuzSzWZaAgCNFSWkzjaO+C4ihA5irUXYbIlFidq737HjaEYr28OPvb3T5+7kpGMQXIxopnYHjjScSLvduXubqU68flr2zsjq4OLjGs2zYl9Sy+fH84vzq2ub437w2Hh82E1t7V5190383j04rmrz59fm1qaf9O3PXT6+TduOTWXQXjiannb237mxRd/2xjdjE2roesiwrpozLcP3tK++9vu+Jf//sNFQYGlJQ+MlQsOwEBIUoWevM/JgawpQSGOJAgOVCVpLJQKPiT1yYtX12anokaTqzL3CHHCROQDSY0hgBAopfACqipIBkcU1aK1LPz0v/tfN91Ue9v77hK1tS+9vmGZrSdZcU3xfDv77h9JTh2e+vNfvfriufLyxrX7H1h68P5D8Gz3lZWinmgQuLbV92UZJ2QM5K68+U1v/9SXPjbfSqqKbVXl0YYaX/u1//rTf/U3j/7aH7yKKhYo8sx66+p1gyS/8czr2piiyBWJYwfn/sXPvb077OZOtZI0ZuPteHGhiSiuXCn6G35+4dCHP3vlsRdLhGpnJVP16ShRMhEXL/fm99UndGvrtP3iRff4k+7E51+6aeJ2szKcXfD7vjUqG5mrCIfOQxYlM1xWvpKETa1lKFdYGyHr5IdYIWuLiRKkuutmZmqih77MKyFFmurA6DO7twqOntx34Y0rS7MnQ1VdO30JoJzYv/Td/+wn/XD9f//tLw+6O1vdXndnu7u+W/lMat2YbPV2XZyYw4ePhbJnDd9+36kqH6bbUb/fG65fu/b65VhWzUUdN3S2csVPvtzd6d/14PHzX3/qiRf+4fi9Rxq1xsq1/lR78cLymWbTKI0UVDYqiIA8XzflkSgrW9hCG2lUXLlKalUXcWktBUiTeLfbT5PY25FBmVcVKwBGEJDn/UZUz7NdXw0Fl6HySiqqRsqHNFUP3nIsz20kRvvnW1mWd7dGEKmkHstACGJUVs1aFAR0x66sAjHGQqOgEIj38L8MCGp5LT98BKgK9TS2tsqHLAIAgh5tRgawgVLHzgYl6twrR8PRvTff9ZnPfrTXta4MhQtLS7PLV9ZGpY0j8fQzjwpnVa12w113XHzjuVMPfuvpa099+x082PhiZ64wqkZuA902mNduuys37YkP//67/49f+tzfPnL1Hfeeuv3GUx/76MN9S6FCKYyqidm5hrfWlV5oXY18c6beH3bXr3UxwqqwrqEMh8XZmoySy5ubbjSem9cyDKW383NTr7zy7LnVK9+4duHvvvDRoxH8xPe8c6rWzAf22ImjF85dRWXabX7w5oWzG9mFlXy5Wx07sXC1nw/W+81IKZdTYCXMhdeu1Zvt40dPDodVc7J57dK6Nuap586+eqH/zve8Y+PalZe+9vmLy+uefL3fuPuhtzbb6V/8r7+RujaZRr3tLBVY5LaVxkzFztX173z/gy+dXt/oFv1Bt92I/vTP/+hDv/uX/tqGTHDz0tnVM+f7G041Bo89/PC8qZ+9dmXfyeYrT3xtJgYTNd/+/u+Znr/l3PnlkwtLENvxePeF584tL69OLEy/dnZ5UKCJQHpK0utA36XZdtbrEwU0Kssds6vVIuP5ma9/8aVnv/LjP/FT//x7Pmi7l//or36/3WrubG9XXEGk0AUIJJRiBm2AKRDZUb+ggFLrvaF2xKHD/qGHZm67o3PzPa1aO3LbO4ONTTty1CYAactAWdjdyId9cf4ydgeIEQWivVs0pRGYvffMLBgAQak9lIxARCKSCEZr6xyACB6kBCkkCCbwe6ZvKYQkiIEnE3tsiR54aGJp/2SwYWVjkFlRb+//1OcvPf9aT2KktPLeM4Mjp4UMwI79gQMHd3rPaW2YgAHjOM2yjBiJhI60QF3ZUZkXwUat6WbAuDUxv9UbSh4GgeyYnJdS7E0MhBTOlfWpzle+/tX5mcmiqKhySkShYkWGJKEECgJ8QCm8tUCgpZCRRgDvSEjkwEIIYAJmYEIUTE4g+hD2zsFwfZGilEIAE3OsYuctA5hY7eEViIJSwMDelUQsUSRaWe/3ChgsgIjHo0wqUWTkrP7GsKpsLtgeOpjecbJ2z4la07jJThoSl43HQnO7UXNUz8fluLC2qnZ2y4JHy5uDdr1Tb7RnJlv1SNx249xGv+oWSgg1PdNxFiwFLlyWDaSuA7HWCqwUGClUsXLjPJuoGVuWZeDe2OYrW76qtkfjESmjlTEKyIbgUWKnFicuuMqN8lx00larxuyD9f2drtFqqtbIi1HZ7VdcRqlK0xqEfFSOm2F6MPZrO8OGri20G9eDFN4r1uR8CFk9rk1YvjEa3jLj7nvgsNzfkTVlYc8lpSUzVjmgdC5QfSqJ6n/w65/+xOd6hU0tBwZCAUYKBN7jfrJgBBYC01iXRTG32Elq0yv99PZ3fltnfvHI4uJLz7xw5fSn5XWaMrEgKTGqvPFgCKT3OCo08nRKSQzW6wpUVpHvOylwbrrRGldvbOx2ZuqTU3WwlS0qL6whMRXrXsl26Jll5IWKDYIFDBjt/doQGECgBuAQhGAj0eB1DzgyacUCmJGkojiWWgkkts5LFWclZCMXPIbKCmIjIPJes28YUAKOteHuuxoBRmkLxjlMz5EC3N1l7x0gNGvsHChZJXUws3DolNBGoCBbgAfIC0DF0kCiQaAIAIqlUMAQKK0qUVEADWrYjbZIXdsBE+nByPTHfPrSzouX0IMi8ASOBDCSZ2YBAvye/o0FgwCUKKQI4ImZWFIAI0RThenYL0z4/ZMY6XLfwpQd5rWJZP/+2qGDY6MFovZVN27y+76/MT1T/dVH+hd3UqPFe98x11jaYWEhqLgeobSEFjSQc8yCWLGoM45PnTCXezGECENNgAssSXgAZlWDEAXrhXDeog3oQe6JtoSUTKIs6o+/BAjoy5JISiHivRFXFBGiUkjeByCTGgbyzgklr0dFiQj3KoV+FEii3ts4AwMKRAAFSgdJCaiJmX/3X/4/v/9Lf/D/jIryfhYlza2Nwb6lffP7TsH2aqOl7NiStfWpmf/+u4+duWADNRkUA4fSKiFckP3cP/mN1Qtn/4SzfKcYcmJAgk7ykHXfftfiT713//zC5ujqMgVtvW1PJ0as6xiSJh1fkM05Ho/7MwvNe9565Nkfe3K5kL6Uwe8pMJmIBCqp2Ds/1rNHbr+nU3x9TG7oB7ppo2a9GLvtzbw96dOOGg1GVIQbb9/v/nRNG8UoyBMBhqpCrRB0JCddGPqyAGmQmCEgChXFTA6Y2NtgCUUAoQAJUQAKFIGRUIACLWXsCEfDwaHJ/D1vi//NT95Xb2euHDVqdT9eWt3Ylc3Oq69s/PnDu489rwUqDuQDFcHd9s6TYso7v7p46iicyzZfXdMzm53jt5u0M1y+Shrf+z23/fe/eWFjJZeNSEmEEIb9IjIROQKpX1xed+03N+bXVLSeF3aQ+RdfGV94fXT3yXY23Jo43ixccebq1QFdPXy0+eC3nnr4dXHxb/07P/g/v+XUzM5nHn3k2WVHxoCwJCLyPrNSCc9QBRRCVFQtzibv+pZTnZm40cDKRlGcNhs6WDNa6+1uZm29WLgdyU5SHkmrNZclXLi8srgwPehl6fzS/NSRSqaRKMfLz+hU9HZ7gcfEtlaDTjS1urqqzVQtnmPKPLvF+cmt3Ut56QOUOpoUqNuTCWfDXrZbUjQxfTjrrfV2dkb5YP90C3ymMOxu9rLSGojT5nQQLQ26WTe7vd0sqwzA/bfNXzy9fWXF/86fPvvrv3j78SO10y+fH6yxjJq7u/Xnn92dnW0kkb7p5tm3vPWmR75y4Rsv9le2fX8rSBvVAEyztrKxo31kEo4a0fVBkUAlNAqgAIzoiQM5BlASPQrvvSdwbIE5NolgIGIJAkEmJhGIILRzlkOw3hptgDFQQEAgIQQ673iPI8fsnA1eSCWSKBmXuQQQICtfEgcjjQ+BQlBSa63JAjNJAKNQy1gIaZ2Po6goKmQhmG1VxVEcaSOFJPIolEZFCGlsgDN2XiBKHREBMLjABIzMQiIGJh/8njqEAZi9t85ZqXQUSZDCiFSgtLY0yhhjGNlaL5GlkFHNxMZYD1qbSEslBLAIxI7QhwCCESAvChQCpXJE7C0zoUQOHIJjkEoppaVAZW0llVaAZVWgkIF571VGIChEFOiDl8xxbJgRtdrLV0kBjqwL3jMaZdI0kQwSvRC202zKEVxXdwIQsK9KD6gjoyMTAmttKlcNs7GWKjWRMlIIYWTNey8lBuLADARA5IlKR0pLCs4QpVGMAEJKDh4CXv89WFKCKJDWKopSX9k9HFDlQuWD0oYZpNDeVwJEPZa1SJGUUeSbNYNKAUsKNDfZadSSOK3pOFFAVelJSA6ktajV6iAEBFZIVikWbIO/uLya5/m+hbk4NlWRQ0BBpME5WwmyLA0yxyZxleeACtJahDbhQsh2oxELsbq9NUdN1akrBXlR1Rp1Jiq/KUimSh6YnCt0dXZND1f6WTGOQQoUrnKqZtJOlMbx7lqvqiwwBhYWgBxpI6JUZ1kGDZo6FW/l3YUjs9Jw0oyiTiwjt7q1temqhVY7rdr7j93ZH57FfKCnk9F4uLNebGar9qyU45hd0KF//Fjjpre87wd/9heGG5tFEWwpbr39CIbq7cfvvLC6OzPTIKPGpQcZmrX6IM93NocBwBdOx8jS7p/vfNs7Tu6bjSZ09uLXr7z+yvZ6l/uVEljTDAo4YsEs2HCjKbvbW3ceP9QdYzrZGG697MZeuTJ0w7XNc9PtfduXLsncRiE4YoDay69vDze+cccNR4WKnrhwofvcy/vbeM+9dyck15e3JjpHg59aP/PK7bfMClsdOjC7sp21Ds03pmYvvrEuo/juI4uPnlseWi6tRyOJcTzO67VYGLDkOq3Gd/34D/36f/vNeiNigEhHZVlW1jZMzBSuXLt8z5ve9sYrrxsIeQh7aLXKkhKoI83OFt7qNi41J5T3EdPs1PT51d12PT40M3X+dH9w7exmb+TGlOWWPWDpxzz68e9KJkR/0pfTU/LrF+Tpy9mbvqN+6tD0re+641//wucuLY+AoF2r3XXfvVGiH3/0q1FTD689t3+yUeSlJGy15Pzi8A8+8Uuwc/Hyq9+YrNW0kSOnmEm2661WW4rat779wUcf+1uUUivRbDeEtxHi9vpuu+6VCJXgy+evCmRXqSoLW+srcdIajRtlNhaiAzAx1WqsltdaTZ3KulC00y1ajXZ/M391Z9xvXpHD/rseqqBc5kaNxCRHWke5hBA0h0qjJgnPunwoNCpjJVg37InECGxoNf3Hf7iaFdOonbdcWC8FAVwPEQDA5k6vu3711C23DLZ2tXX7989Xrnd6+cL+fZ0j+w+g9bvtwfGbD5larX9txFxUedHrD6FVixtRWY0vPH/16C03nrj1WDo5o4S6cu71zpF5LOy5F5+vyuFskqzys1l7XrdvCegfeNP9V1Zef/XxZx54y7dc2RwkJkVBVWWZMNFGa+0QTay0qdmqYOY4alpfESOwdM5HOnJFYBYjztKaFpJtVRBqNCYEhICRUVolQghb9jsdU+yOcwsoxOzifi10t7tJJEbZSK5vKon99a1YC5PG043o8oXLpj6hE7Pdy4TAVIO1HIjz0gKxjnRlXUAQEhkwz1OX+51NMrqMtNbIvvSIkPVBBkgnhkktN+0GCiopNKen7UR4/dEz3/Vtb/2D3/mLffsmIy2bnebmi+cbc5MTE83+sFdWxcVzL9d1+vBnHxU2/Pi/eQuERzfPLDdnO0JGSStGLAjyJM6L0XO/+5/fsdj6h3e99dCpo3Pf87YPNBrtj3zitb//9Ks5JrZkbx0GMEpGk1G73uzu7np2ikWS6JVur/f4Uw/efao33tKxsSFUZcXrW52Z2sNfeGR1a9yamBz1x6Tl8jif2X+CeENpzsd5f3d3s8zW19aPHFxsprUWb2aOfNH3pZvQSmu5vNrPs+KB++4UhnXckRHPn9ofAuu0Ead+eX370m7vjz7y9y0A4TzGyjRrrdklkI3JqaljR/dduLw+O9GaMFrU1TMvLbeSNOkkB07MLl/cXKhFP/R93/7Y48+OHD//6uUP/vR3/MyP/OS3f9+3P7mSbK1U++cnKBXoumuDzUSH0Xh8+OYTuUWYOfjA+96zu7J1477GdE1ubY1Wr13ZKSqo1a6trdyyb+aKGOUMpZKdfdPw7DkAOHx4cqouz11e27FBJTobO1flGJhJB+v+5I/+qF6rv+veN996+M4b73gARPeRrz3y0uunpS6VjGzlo9iAZPIBhNZphMRpLS1LiwoXamFpOrzr/fumF2UUewHSe3BjuXK+156lqEHb692dzdJ6tboN27mvtRu9YU9I4a/D1Pdu/BGFCMDArFAAX8/OoBDXVfLMQoBgQGQSEEWGwDMAexIaE+EOTMlvf8fSXffWZhY4UkEEU1RNk6Rg05j3nz2zlXkTXGBipQwA2MpJKYxO7rnp3mdffLb03kipTTwcDjkEEykfSClNLEIIcS2SWnR3tr7vJ350cfHe//3hT65f2iIfolg7SwwslIjTJE3NcKc4MNuaqU+Myq3AJBVKLuM0HY3LSLFAowFFlFTOi8SoKAq2dM4hkECmwBRYyOvTBCm0lDJ4R3xd7A57xz+jAjEzWE9SGhSpNODtXpyaQnCBWSm5x7UEgSBRRqoeS6MjRBhmo+AJBaBGIOkCU0ApleZoeQ2vrQ0fe2pwoE23H1GHD8C+2TROReU9hnGjYeahnhfUaDSGYz/O4OLVa1V5daIRd9ppHMvm1PShubmsb8vdIejIW8/WI0M90YsH9rG1XjZH3Z4IlqkCqoYjmw/zMg+qJkZFSZYb6TQgt9Io0cJXpIVKYlMz2js3EcPWyO46v70zWJqdnp1vr1zb2XTjld2z0806q/qg3wcWdpQ7bwSVxpPKyxoSgk9rres3yFVQYI/MT+XU6CRyX9W/ezF56B03+qnJIo0DkwERAkWRxorBW/A9HbULz19/4uIXvl5WZZ0FB2tB7JFvcc9Njwh7fScKIcuJycUds13m+2+89Yd//Ef2LUQdo9rj8lc/+j+n5mSUmLx0M5Ot0XbvQEvWUqGcPXXAnDg4sTBda5oe2t1jN5+4cql/5XR/XIizF4vd4c6pTv0/ft+Dv/nx57eroKRpTNYubG1OtdKJVMt+6JdUDj05mFmYQC4QHQOxBGWMdd5SkIi8xz0wMpUYQlCJQpRSBg0AREYCMPkiCJKaYxEUZVYXIhWQpCaU42bMS9PSSDi8H5I6NBqizEettp6Z1ex9kVnh+cARIA/jMQDAbDupghUYQEBUo4kWMUGZgvdCMFNgQGAXiiKAhMoF1wdpoFEDqcARvHGevvwCX1jH3AKF4GxVWSIv0QhkIvLSSEAADB4JhQhMiAiEgBIRUKCnIJVCKVmxCaEN9qFj9l1vTU/ewrP7EMogqmEErUGpTNNwjHkQEbZJ+ArHYqK6773qhlOzH/8MrGzS8fsETwj2WogG2RGSY++QEdiANIA+VCUyT7e0d37UFdF0ohQSi2BHOgIUCUHCEIFwSiPn+fYmFIWSUlpPEqTUprJBSfAoPZBgCNbHOp2Z27fVXxfOKxEljYlRXngupVKIIqiADIBhT4+4V10JTMwgJQqhCIACoZBe4ArJD/32n9hi8I8KaJ4okWmz1RoPLfuQ6qi3fg05tJu13/7dL5w+rwiiyU7c2828Y2Am5hAgNkYLM9pDhojIAD5w81LRW3/ze+89NKPPX+k/cpYPNKdqcbEwk7CTjTgaDIvAMt+F/lYFQlH/ks+u/ev3L/7e362+ukLICggZCJlAyMhojMyjy3Tg7R/9rX924z/9kf3Ye9VV2+X2Nppo7mTHSm62TL1dk7p+9suFqTUVMCFB8CEEqQQBMbtAfQQnFYdQMAZEBIEAhHsNZGTAoJQEZGKm4EEIAADvhZRKqFCJZpT9/v+1+L3fm1AyENWZwC6em2ZLOCHCDMUT3aWbzdu/5wO3P/TJwVBIVDJKEEOR+p73flT6/oZ/HXV7afae2cGwrMdNrsHSDSd+46df2FjJZ2abo9KNd/pZsLU0DiGgUSpNI9X493/66B//wkNi49zCIYxmwsS+8qAbvu/HjgzYbl3bNFWqYAzV3OkL5rPPb/3dqyMjW5XlP39iM01HspakJHr9sVRoyz3uknAhRAiduqrP1G85MTM/mzgKzlHeK3SrFjXrOcdmuhaN84HnZnveO7//+ELlZa/bjRTGUaM3CCaZn164OYm0W1sDHEx19GCwntaagO1c7JD1g+FKu50qnwxHVb0xYfPRcJxv7C53oomJJJmaWqrsuAyjoEup46aO6w1WoPIRJobr02kxDsMyg1oUmdQXIuBUbWqyzrYYbSsVm4QzW25f2z554+zuuNvfcf/wXG9uKTp6y/y4GpBTk50bVrbWM1lHKXtFZsP2kWPxzMzBjz6ysitFYDEe7GjRw0h6CfVapKLrOkAmlgopBKJQec9Aes9kAcA+SBAsIDChQEtBACil/N4wCGEvgSalJAFGGCGED54BpdKeyFaWMDgfBKDRZu+cJ5UGxFib4IMLNooiTayUMkp55wAIUWmpXFWUpc+LkKR1JaUQMjJRAPCUA4GSWilNKIGIAksIAYGIPLB11nsnpdy7biPrrauCD5HW1jktlfeeBTBK52waxUJIIoqFMlpruWd/BCEkIjrvYK/lxKyVajbqUkjj2HnHjCGgdy4QeaAQPAfyKJjRVRaQBXDlbJIkIKCqqopYCKmN1lFEwCwlowjEgDIERxAoeBaMRmBgIYSOIHgvhBQobGW9d4EZAxKAUrHUAgACMzIUpRUspVSJFAwcyhwB4igOXhshHVFSS4ihLCsU2G7UFWOiNSqQSgoAK6V3HkJQEqI0khLzylHunPNSS210FawPzNbJPXR3IBDCOi8Q2AcdG6y8YJQoAckVXiqlLBB5JJICjVEmjhJd6427EtEYg1IKIQHRaFluVgIxVgaYa52aik3wQUqMIxPFMQZm8u1Os6yqLMv742F/XJidLlGYaNWFglCFne3+cNhjIbKsaDUbjUYTHGkttZDNRnN2qnl2axNJSyIpI+v92lbfk0Mhi9w16qlU15v5N5w4EnE8OLd+YuqBhwePqNg5zxACFvTe937gs499ZHam05pt2KH3BM6H0nkQQjL0t7qNxCcTeWtKHDkUH71NJxU4C7s7m6zGrcl2vj2Q0XDfgaWyeGNt7fm4tcvFxNrLly6tX7z13fdtrOXD/rmFyWTzyvav/PFvmskbV5cr0RfNqfbS8bkv/tnv/Yt/85/STm32UFMn0frGUMQmL/xwa+gyT5ZNQ7PzBw903vP2m04enB5sbz/zpee7w2p5rfQQdSsXBEofFFJixP7jLV3zK72yHPumjqqxraq4PTl57qoZDnYnAhujgdTl1d3drRGVfqLVqKfaxPFwO7vWz5JL6ydPHW9MNpYvn/n6F5/eWSv2HztcOnPjHXfk3Y3xeCeOFsrMrq0Xg6CXz3aP9qflWG69ce7YpGnetfTUuZ1rXRqXjhDiusmdtwEhUb4abeRrSTMCZqG10loFCt4XYxc8Lc0sLaSLL1bP1+v1wnrBCMxaSGs9YpAM6Iuf+8Db7rnhzk9+/ksXV3uZK+NmvNHPN7Z6E3PiZ3/ovb/7Ow8XdmwiLVWASB06Vf+hf3n75ulzTz6FT7y0ucnxqMD/+VeX3nt3tnhoFCd+Zr65sz503n7t6Sfe8+4f/N5/8sEvfvlvxzvdca8wUk1J2N2+8lu/+lDin/e9K6rMW7LW29yuKt+ZTrobvHHx0kyztjir6wYCg3W0tbxlgovi6MT+uWvXVuqNpkes1xohVFVFjVocC3ZMWT6qNQzYvPJrZy5560KtVfe+coWrNeOdtV0g9A2zPKpuP9L+4H+9NYNHQ2Clk9A2AIWHVbD7BY9lWJH2SaVB6CNYlghDFStUzu2Odi7WvvKY0JOxrOPYh4OLzWZDnTu7kUQK1gEAmNVNt96mO9PHbzlWrFy7duayMrLcHm3a0YVzV01LTs42/tuv/Lu3veV9D9z/1ubMVFWMTDMpusP1q1uBHIBZvbxqXailiS9cmbmDdx4XAbfXzrfi9kPvefDs8y/Z/vDpz33MdrdYkNDT9alszBUrbxIVqNLaZOOqHkeVs1EUMXFZlgjkyQtErYxSaVkNAAGkNEksEG3Io1p9MB606u0izyMdC1SDcsSCTJyMRwNvC6ONhzxpJByE5YaM2sIm81NxZ9/sytmrqGRjalLq6JW13bfWpva3p1aGYxeiklVbyokauFAWjkMgoQVAiJXwngBIA57ZrB4KAhWNKhBliEqs1aQrgzbtMh8NRzaJy8VTMYtxMawG3X1PPH0xak6+dnbz/ve9bevSxY2N3cpvpIksynxubq7wpQEGL7tZeeD2b7169YVXX786N1W0j+1njuygW6x0m60mBtM9f7YxJXVt/b/8505x8fPdNX/DW48WA/Pzv/Ku/enoT/9hfbXUJukohu5uf3amKSAcPbhUVlVVlhJxVHG9o6+sr7WbrUYrTmutKDajQV51h72hBVQLk5PdonLB5Siff3390Kyfnu186KOPjYvh0f0L8eRU0p6sfP+Wm48OhyOQoVFTr76xLlDWFg+YGE+ff3l6ZmmyMUFOeht6g9zn1jg7PdHY6ncTrZqJPnH44Asvnrn39pueeG410pvFypV2Uk+06veLrD8+1Jq94cjBqNG8dnWrtz2aaDf7mzuf+tjHb7/rrptuu+eeIxcfe+pL/+P3f+NDn/nwAdm+9eSBzdVBnI5f3bz8wz/ww+tXttfXNo7fefMtt9/6Zx/5ynNffHx/XXYOxuVgZ/nS5aRZu7RdrfcG33rbMfv6Wh2h04os41e+9vLes+D2e46Fyt6cnXr6pYtvnLlSemszC8DeOmMMW98txx9++O/qjfRrL35t/6HD3/29//pd31X+5R/9ajHend3frDWnNq+tOzcuKudIgLXs85qRR/ZNHJuo9i+5VAz92EnTCKxFq6YmFt642Fsoaq06r2zguW3aGsCwjJezIneFkZKIBFBwHpUIPgiUUhlA3HtLIQrXkT3MgRgAXAgCgCggsVRCSKaAUhitfL1J998avesdC7edjIwZBOepQmQjQmWHG0aYt9639PF/MM+dcSBMQIyjOE3bu7ubgdlV+T989eFGWssyGxwzOyGwCl4BSqkCsVTS2ioQa5EYiGNoTLQmxsOMMUijmQEFGU2tRiezla84Slr/8IlPG4hTu37vbfz+7z++dEDsX5rduNK/enn9uadWz13sbm3HFbdAT2YlVwQ6kmU2knLPoQaIKKREImCmECjQ9Vy6NNY5gdexw4gYgBB96QYMrIQi5j39BTMEf13poqQQgEVhQbDwxBT2qlPEzJalEEbJwlmJUu6J4UwysPL5FffcyrCRFN96//S772vPdqKytJXfBgAinF/sTFYKub5/odbbtaOsHGXl+dWi+9q2alyZ7bQ7cVxLG5ExSbMmdA0oEGEat/KyUJK3en1X2XHWR63jWqOgYmCtaXbiRC1NT7AIUA69DK25uSovXOU4iHZ75uSJmcbUvo98/pGt3u4YO+Rce24KxoN+3st0XnAwjdgHa1mYpGlMvcpDVo3iSAQf+t3rlfyFZnMyTXTBBNF4FM4Nd376J9499DtJnABJFSyDNUKBLSkoERmWCmXjxWfWf+PXHrmywwHRVpUSwIiBiBj2IFJCK2JGFFKI4L3SeqLRShN59aUnfvGffvud999x593v2lgd3/fW+y+d/0Y5yhX7Wp7fe6zzvvuaS0eTV5987sQd8oEfvc1zTNuh99qGV1cP3qKO31NLOrUwMGdf2Np8Y+fI1JnvvXv6Nx/eLJVenGgcWZh3Zb693VVRtG+y1mlNv3Jhee3idqtuluZarhrrBIuyaLbiClRVkNYxAAVryRCSL7JMCwEBBCmuwPsqiVAGACtG4wEGGyk1qQSEcrIJC/NmYla36jC/L4rUuPQgIgoCBsGlGbQTEAlohTJSWc+RB2thwCQkT85FJqqEhoCAEjoHTCAMYy4G1jlAhaLGIYP+ALIhSGPOLcvdLHn40cHpS8IHAST3nM3EIJViAPYOGIRAIgxEIIUUKAQSi0AkpEAEYgrMgJKCMFLU3Oj+Q+WPftfB6frlmcWg2sCe4pryUIqGqwETWzCMUeoCIUuNDfAZ4Hj6SPE939uwNmktDAiAORbCACpALXXMjMGRlCxRcEAOEAmMJfRzmGITQk+oQkUJkcCsBNICI3bOMwfArS09LlJvFAIprfd0c8475iAQQCCjoBDdeOqhja9+hrmgQLMTR6ryShb6aCQH0FHsbbEXldmbtGsEEOhCIEJiBuY9gomXQk7WE0Mi6H80KurtjianNQMMdneiRE8lM5vXLhX5YNvG5655UCmE0B3nupa2azVrc1tlmnBxYT5KmlfWN5zUkOi0Ha+WbmdgO6v5J7+6vrkTPKa/9oOdI3dSa4LXVrMsG7EJ9Ym48rX+jmSMsiwKPtxyh/39u+Z2i/a//tXXz16NUhIOhHWMYAOQ5JDK6V/84yu/9qHXf+1njn//D9+5s3V5WAYfiu0rFxY5yVFAcuJP/urrrmo4ciCBUSitGBhcAHDWBiml0EghCCUQiX0gx8CMKFAAavZ77UwQKBQCSEEC1V4ET2H+punxB955UMgeuRTTA2C4zMeUO6l82mxuLq9OTk/PLh4LQZFEIFYqVNn4S5995ef+5fcFWn/hjdGLXxq+/lr4SdvCqDh/vvfi5W48YbPs26H82nT9HkcXaaJ7333ve/qLf+rJITONSgJZdof/6fe+/Le/8u1udPrVR5684dSBA7F+6pGv7bshSUjY7ezO/Z1jx+7/f/3lq49f3dwFkcqwu1W02vVWLfnutz3w5596RCQCCJJGmkjMcysVztTN/v3tdjuanZvY3s3juq6lembGVLmviiKrRlPtmdvuPLV89QVpWuNhFkWpDwZlPQte1BdmZ2bBhXJ3y/tRTaCj4vyly7VWPBoX9VgHN5HnlY47QgaEqtOuxc3m7NHDa5uXTtz4drBN4X2rXltePtNq15ozneFoEKuQbZ9XWraT2JZhd9tF7QZlBYeU1Myxm25qzc5dPP/s7tayZzc331FC62X2FSQ1VWTiscfzL3/x4v2nJt/+dtUfXVQK6kmlhQcrQGgj0nExCk7WW+HIgea59c21vjp8JK6kQ2MCwbhfyvx6M18pBQBKSXLsggvECGykKmyppU61qrwTe9sMMzMwgxCIIAO5yjmlFApEYCVRSCGkDh598EQACMQsUGilK2+1juKkpqUkComSZVFay9/0pQYOQqIQKIjIliURDfOcETLLE622MjzOxgEgUFBSShNFWjMFCgQQGJjYO2sr6/cYSQGCc1UkY22Es4TM5AMjegpKKkRBFJI4FYCAEJtIIDCF0johldYahWQO5DkyETPVanVjDLCkgEweGGxlQ2CiIKRgFCiF32vNhSCF1CpiplgqAKZARsogAYXSkVZGAqOKjBKyyPNIRpKkc04ACokoWFiobIUIUgqGYENA7ZNI74kSANAxSZDflHiiNkoaIYJPI1F6coGlUsyspCjdXu0rOBeMlEUVkjgSghHAVl4TRkYKFlpglEQIrADIVU2gyMAYIPNEiveiYRRYCAkCiQIyITIKCQqKsjTMsYmYvRZaG8N72jzvkBkIS+C8snGkI2kQiQEq5wnIeZ8XZZFX3jqJHMcmSWvO+XozYQKBQgsMIQghUGJkatMT7bLqlFXlghdSDrOyKO1onO0OsuE49wGKsmoNwvQEH9s/1eqk47zMCouOU1Mf7nSds/U4KqzNLTmyUqJPkiRSE1PtvVWwdbZ38uTRiXLy0jOlH8h6Ld3dHTXTqMrE//0ffvlqduXMa6ePHJ6/urHVbKdUFP3toTJE0g3H46MnZ/75rz945tLZqsiL9T4ZEiaGYCXRTKu5urXu3ErSSi6efeyN8+uNyTiIfhh7KOv2fO66pc/olne89Rd//5cvL19NRa3dMa2p2eHFtbfe+86LKysz821I9aiy5Iq0Ftfaid3oRQbSZswDN7+/fXL/5JvefuKRzz7x+ldfWF/Lhk5lXuZgqhDIB51wM4GZqZQjdNKSzyIZaSUmO/PtyQeGvfK1F15pJ/Ou3j4yt7SysXupu+uoO9FpdCKJpS9cVXhrRaWNObO8ubnTnZ1rvOXb3uU+9+ja8vYoD9Hc/g+8+12/8Qv/amJSV7nPHA93yqmZfUcPLNRt8cTpV6ePLA26xfaV3mwCPlYrA1s40EYRIRLqtu5XxRcfe5acE0IDcwFVVVW1NJ2e61xc3TaL9952zzs/+fjfI9ZbrfpokAuBQgJGigFAYVrvfPqRpz/zyW8MC1GRqkJwwEJJF/xwm1977pXFg1OnuwNFUFqIMVnfFD/8wcdmktruBgx7CdQYhTq/4T78lSv/13+5zbQvLZ/eMmAQ2ajoH/7hI64o9h3qlJKnjkxneel546tf+D1FH0nsWljPb5qcvvld8ta331Bvu53lqzOHbn78secXFppf+Oonn1mHydbSdHsuqTU3N9Zmp5JaFGpHOrV668Wzl97x1vdeWH6j8uLKZrdTS1fWdjyEtZWdf/r973/siRfHfjQcWPZlpck5z8AcMACNR2MS/NrF3pnXa4dvqyvpbXZVQSQ02SpTOrFUE+W5ahVqB9FCm2UGIOVE3fO2bMp6ZeZn7PnhCFiNcp8UBbMChOGw3FsFD77lzedeOvfiaxfeduBuNzynGvLQDTfVzl679OprRZHHHVWv17r9tV/5kz+HP/nz+aY+sf/AXQ88dMstdx65/S6BzmY9HaU7uzsm1DYurMftxtrFtdFwOLU4c+7Mma8/9ryoApTOltX84dsUYgi7c4sLm6srpVMuoNICAymJjnwIjoOJVJQVhTLa+WAEenKW+0axEspzgRqMju0YRuOMA2RZCR7iWDjvtRT1JB7lQ2UUcgQBERVTaLUWxr5OkpPOpEKtyM1MTAyzAUbgmG++4W6ssve+/81/+Nf/O9LCCFevmWYnDsxbYzvKgkABKFxwKAUCBQgfezT84LsnpOiyhNLKYY92upCauNUmB4F1tLkL1RvlzLEI1FxF09eWd+an+YmnXjpw69G8N5SOs1EFQqYNvb2yjbFsNFpayI3xpt0+E1YvbYzmb77jRO/K1+uTLBILQCAqgawiJEYIfRqVSb3TggxK53zXb3/h236kc/xNb33/P/8ExbNLnclpic6W/apLNp+saew0hJG7o0IoXN0ZSxnV8yRg2S3yc1c3yedpsz7uZacvLRcUnGfr6dz65Qff8ra/+/Cnu70hWQs6loCbm+OQBwpVWfmphfrivqlxiSptjwY+rXXycaqjycn9R9kXGxu9qgq+Epsr65vbI2ddQwNrORgMjh07PjF30zBc7RyZPX6w8eRXn3jT225/6onXx5W1IK+t9t773gfvf/Do889/7vLFMw8+ePOnP/mNbu8bZ86cve/uO/7Vf/rZX/6F/7a9vOnMttJ48tRtJw7Pmzde7rm8OdO8/8FbP/6ZJzbWrs0YNKPd7d2M+3LQsztbo4tvVG9c3E2adHVQLDYju9tNIo5riVqchO4YAKJmUoxobmL2obh59x03fvnxF1585hx5cjZURS4FoxRSy6wIIuCZ06/+xq//87oUAqEeKy6KOPLvfut3rqxfubhxZXv9Ggp0pdVCj0b9LOHRLp977trhGyZGVy9Oz7XTWnT67M6Lp/Ot7cmbjh989crK8ohXVnNQSAjoXWyMDU4IDCEwsURFCIgCBUohkAFQCQSBSOCASAgwUhABSqEUKCO1NEaCFO7otHvXt82/437ZqmUau8ykVU3ENfZSWHIWBWBSG99/T+OlC/288oxonTsweaTX291DzKwsr+tYI6ED6NRr1jrnKwAIIejUzMwe3N694suqIlacPPy5r82/uDzevhhC5V0QRCrhialj7/+WH3740b/Nt1bSZjOE4h0PLH3re244eU81sQhAI7L59HR66o4b3vNP7veBesuDx77w4nOv9J97zW5laEvhvBAStZZMtPcuapQJgVCid957TwTsSQiJAglBaiGFULzHJRAIApjIeyElMwEGBEQhEVFJ411FRByYrJcSAVFopYGMjgQIH5zWyrq9oBJS8ABBCGClna9/4rHe15+/9PZbZh64aXZxX82FTNYh0C4K6W2/09QzEzWC1mAsn3rxSt/TYJxl3t55eLbeTIXisqoS0/Gu7K+txzqSwRWBtallNpTSRHEjJ5lDNgoucmXcaELEzUhJUy+dl2ikkTaUJJRsLfY8hCJ/8O6b//7Tn3rhhTPt9vTC9OSJW06Yte1rm8vrW1cn622DmFWu02w10zS4gpzXkQzl2Me49yxom5rBSAp+5cwaoDzYab9yeXz3rYdFsBE5FYKlQup68KF0ToASIAa98LG/fHljmwIpT54F7H3JQu4NNEFKAQDeByUVCPRMtqKzp6/tX5wR1SgRWhrzd5/7zA9/9z977vTDKJWQMD89MdrYqTVaLh8fWaj3lpLb3nLTIAskVdRZmj6lVp7dTA8kVdz3SZomev+bp/ffmvZe3zixUP3SP1340hNXLnWvgW4waogTZwk5H8Bmp1nLtoqe553u9kwnmpttJlEsCwfBt03CFkwcCyE7kg5OK62qYliQlctXRlnGzZbuxCqJhRChNRfNTSU7vUEU077j8eSUWFvJx9Z2++BBHN8nbjyii6oiCYUFWzjFUKur8dBL4SYnoB7BcAjjcTUaQQhVLYKpRYgagBKqzAoFIMDmMCygtPW8wOUV/9rZ8Mx556zojoTLPVLbEQIERk/AgMAM4JmBGEBLZBBM0qPURDISEKxGpWCPBs8ukFSICDHyVLH+w9/ReNtba1OLa2ktZgcUokBoK5S1KS8zAXmkNBGxGythAiYSPAcKnBa+VNrpyCvhGFLPktiD0MwEFaLUEhVX3jGxB+99GXxu1ZWt6igbgD2SdcdWyByz8zIlIpdndqfHjz8VFNYtW60SqVUI1gdCZpTCh2BQEEBRjTLnJ2YO9DcvSgNJo1Fv1ntdBhRxLOM0zQYuoPQUEDxaBoHSKJAoJXofWKISGoISUh+YnltqtHSgfzQqmp6ZGA1GwOyrsYljFKQSatfMP3x6vTtIZMz1JG5PtE3cbLUmIqWurVwa5XZzd3c8XqXAIBV52LH55gaVHq58+epEpyFV+/9H1X//23pV593wGGOWu6y+dt/79KKjIyGhDkIgmo0B44YrLuDuOHFsJ3Hy+InjOL25JbjFFXdjg03HFCOBBBIqqB/pFJ2+e1l93W3OOcbzw1be95P/YO8fxpr3HPO6vt/JqBpP9urJANnOddocGPXIppTlpWiLCuodlS6afrEapkI9+/oT6eXL7CsBwCSOvK9AMUpVcKm0GlL7h//3lV/6m/P/8lvm73v93PxcmGxdaDZPGr7x6Bv/ppYuG+2QqKg8EriqQgDhgEQCIYj4fIxIwPvwECENoFACIwIAhuCUoCLDAYKvQPsQIEkirfCuQ+pDf/Zt13YfktU0mKX5dk3PuZIqaRs/7dOw3ohff/XZ+Cff+4EqrysuXOnqtQRrtSfX4O8+e/rCExd+56+u/cvv/573/Mg73/PD7/3QA5/tzC7+t3/37nq7nIYL3/5Dv9qMps/97X8uR+MzL+1Ze1LBqiIuy0qjINYurbv3/tzH3vq6I2+495t3853dzc3lQ7Onb7/zw3/4iDKz515aP/8Xzz9+ueiVTowI+GZLqYT2JqO/+PRng/DiTH0yKl3hiHmlmx470WklBABbO+PKhXvuunFjsJ2mVE1yq2Jy3DJRsbfuVJVG1uVZZ7YTuMinLqm3o7jTnj1SZVkEBVSTvBhnIjo2zZmVqmSQdHeUAdcoas7OLYPfiXU601zYGOSTaqeWxuIpJF3r81F2fWF52fs8C1L4HKG+cGDFs1tanN/emuyEyIVDJ4/dfeTIjXs759cvP3Tl6pcQsRjvYJQMBh5CJki2YWLwb3rtoZev7a6u8ye/snr/m+7UnFT5tKjjN37n6889s1NrHLFUXLx0oT8dufHkxIp9ObTObQxedXNz4ktmsarBlUJ4ZRi8Dza2BIDOayLSSlgYIU5TX7nAzMwsbCFiEBH2vmRUWkUioJUhZbzLlCITxc75wMFzKMpSkVGKrNKhciTcSupIaBWWZSYCuXMMgqS8D0QQAjsulCLF4HzlnEfCNE5G2XRrdwNCaLYbBABIsY0EwVVOgXhfIQQimeSFQpLAzjEaE3xQSFZZTZqBldpHZiMSWqvEi/c+imKtjHclswCQ0oYIGTwQTbJxHCWR0VEcWWOAJTJKEShUeZ6XrlJGA0jppsbY0nskksBBRFsrjjRppTSHSgCsUUjKRPXxaMpBmmmqSOI4AqR9I1pZZJEmEoWCoFhrgwxKkfNuMh079kSYJDEBCCKCECpGVThG4Kqq0tiGwudVHhk7HmV54FB5H4LRmhDTJI2U1aDSOKqnaX8y8q7MiyqNIyLxwXHpEAlFkFArG4lnoSQ1PnijgAspy8qzc1WFqMhECDqKEgRxPoQQELAWJ4qUAtDaVmWpiEQEFGFgV1ROoQtBG13lZZxY4UCIgDjN89I7F3iS5eMsI0Bg8T5ESVwUFSChgI6j7uJMPhyPJ5kyxsYURwZJ9vaG3ouJVH84rqTY3BsCEkotSDKs6EDaLXOfRx6VdlWemLhZt4IKxEfWZKVLlSLvkKU9M+PL6e5ub38Kjh6qF3ubs2Cf2rjS2+3tQmmsRmOolf/wv/i2N77tfpkOe73BsWPL6+vb0+k4z/IYCNDPzteLLPzR/3pq5ph553tuv3Lp4mhvI46h1k39kLZWpxx0kQ2efuqrRR5eddspqkXbF6f1A7XQlwtfurayfMefP/C/5zszexu9hJJUY62Gv/sL/+zDf/fZjPgt77ppbWewsTUFA6RM6aTsT60l5qpdi9tRrAmeevLKQ0+cz3IoxwGVThqKEaFyNQuNhjpwqL2Q6rmZqIRysabiVq13rXewu3D8yMmi8r1LXzpaL4LjZr1NRX/esmo3IG5s9fsVYT5xqI0yJgQpqvz0qRNqOtra29Ln7crhmWY3GVV6WLrPfuxvXvjyA2+7Y25xthMfqg9L1e9V26sXH3/pQq/KLz1/nhyvLB66/c67X33P/V965KE/+6uPl3XVGzlhLMvqhttPH77l5gdevmoinY2qJLE2soJYVWU9Mtnw4u/96c8tzrbKSUHGClNRFEajIZoWng2kQhjV/MRrGyKrKkcFh1IwqlkQ+fAnnztyZIGIwVEcU5wC5P7ll9QFAyRc15T3corEJHoU7K/8z4emmY0pMloXzvvKtZqR7cRx0u4Pdo6u1BNTvOOupJ0+WE23Zbd49JOX2ml8/08v+O5Vil33VACz802vPQZu7853H/zXfOenf+trf/rBy+Ni7sjKbNfE58+cVdZec3tW1Bc+//l6Mz106IZ1HGxu7jKztsnbX3PHy+devrx65YYjB9FhnrvIRmkjKfPCATdTM5n6xJDz+v2/9dy/+enlmSMDlTpRlkELgJQDWyespvEiYBSLnzJmXAoExigK4OwJ/zsfe/O7v+Vh0z7Y7Cab1za5ZssyJLHdn4KvfeWxer3xpjfeOt59uhZTGeDcxWsz1mjjZlbqxaQ36eW1yMw1lGhdcPXQCy8/8MLLAf64TTBbo294++vvuP3eleWlndW99sqRenuGXTHd3phMtzsp9i9cZB8fP31T3vQjlYTRaO3Fa3fdeWhvWHolVVVVno11KMbqhIgCcMEFKwTCKEoQuSwrZjaRRYUSAgg5rNI4yUuXpKmw5C4fD/txWjNKRuNtRozjtAxToyJtLDKW+dR7zhHTqFYVBRdASnOAclStLHeG2frF9auffPjKjTcdXF8fKG1IyXB3eLBTSyN7mcdZEYITZgBmq32s1dVB+OhXwxsPg3hA5VQEad24rOjnhUKQqhwP22deLF9bP6pqRz/75HC8W9x+Q3O3bWfmauPtrRCktTzbXJgZbqxX2Vi8IojddEqTYrx1TkH5P97/mftueXNUr7kwqnUblufRe2bWtTkR8ODARA6tx9b4wqB+uJWNtqDoHZgv/+s/O/7cXvzBj72cDdHnwQQ4jqYWzTZnGuNhrxPFzQhF6+DKtevbUTIa+zByoZ5G1zcHjKitLkcleN/tJMPp5C//4hNXrmxaULceP7i4NH/l3Fndtt3FmiuLidNjz8+fXyevqKD5hYOY1OcOHvd+Mhz0oChUkbVrsHiw/rfPXBoVXJ9tRwZ7/annUT11n//cR9oRbr/0RL03U/SzCztbJ29cqCaNvVHB4v/yQ399261v+LEf+f7f+G+//NhXzxy/6VCR8/VLWzX+mhsM3/bGe//2s59v1brPbezd8MZD13fGhw6fmFzfzkaT6fr1hdl6VMdr13cee/yL3/Htb7r59MEP/N7HbTqzlW21OsqBrG6NQ+nvPX3Tt3zHO/74w3+ahVcEyfUazbfnLp5b872pUXhkpp0fWdzujweDSVFUHFj2YxK4L2lAqPIAUglUpS6m+dbW1pnzz0RagUlV8M6FNI0rH9Y2JrrUnmukLJyXlXZnWMCuG125GDLsPrtWffHy88OAPogCgsIxsiJyrtrfDLngFQqDKCCSAIISPCIhCktAJpCgEIUBAQKHOKJ6p+UcU0WRLw/PZ//6J4+fvCvKR5eRgndAYImCdyVizTSa2hkKlE8nx45Ityn5ngBRkGpz95rRejotiJCRI01pGk8nlcKaNZMsBx8YCSJK7rvnHX/1oV+b6bTnF+Z31nfGuxs7Vy7WmpEgYEBBnPrwwz/xLz74R79RFH0VxfmUIkl//ud/MOo8GcxFzAAhIaVRMWoUyEn5xZv9d5w49s5pdPb58S//6kuPPROUjlFp7x0ABs9KiAmRtNIaAyeRAcCirNJGM9Jxu93s9XfyabbPaRIEEQmBAwdFiAAI+xRNRkLnKgnyf5gMhCiBRRvdrHUSU9/tbZWVJ4VGa6VJhAWEAJwP4D1po5kmxcxHvlp+7rFzr74hvvuG9MTxZjNFQlaWWXKJ8nLqk0Z6+20KLpZX1lyS1mppcXAG9kZDFSWu2BGHEXGifZIaXSJE8bSadFU6nPppVUZGJWxGvVHNcX221bC0dOBA6WWYlcPSb+yN64nu93o3n1wmmBLlB+dau+PB7vbacLjnROYbncX6LFeVz3M2WgKPhiMIHPKhz7LpmJJ6avUrZuTL1yepCqPhdGfsrZHrWn3+qU2bdG48EtsIraHggCoqq8DS8Fnry5996bNPPPflZ7czpICMGJTRzBL8PtoNEUFgX1aHIOJdQEQiVRT+5QtrM92GgfLpR7+8NWndvnLTJ5ypmOPEFEXBRj93ebi9Rx99/Ew3imSxe++7b3Dk8uFFXiv7L+7G1TIdtK2TN0/Xto1nBH/4pu7CUtG+fPXOH5x55qXs809VT13LOs2m7dY2NtZcEXRSbyV66oKI6o9lb9S3EbXraIStHUk1VXU7k6qOJ79ZhCiUeaWEbp5NYEkG40k2hlxBswlZgK0h1FNotqCmi1YNlu+H7py99DxfvORfPA97e35hFlYOWOurQkNgEPRpC5wD0lBrgE1UFAczhCyDvRGORtht8/JRo8gFAptAvQnrm41f/uPpxri1PRBfKMYUQIhR9rsGJATCCPvtU0QEFCQCYAiMrLSGw+3xqw+A5XJpni5d4RFjf6Iq09gcQWzrMZhFGf/2L56avaMfoqxkXTqFga2p5WNkTC3FGr2KSqW8YIxgICCRCPB+vJFL43KlTZDKKe1ZKoFC13QoDVfTyGhUGIhDEOAscFGWcRlgdeSnRUhTDHmuMTKsPDjRFZjChxGDXzuLL7yUBDRavxKfBEDap6MLE6L3gbQmhdvDLTSE7K2NfNUD9JFSCtlGVmtIjA1FOdeEk+3x0aX6c5eyC7uq5FQcl1lWRlaUSlCDl+G1tRcffbp5++n/a1VUq1GtHpX5VAKGYrR5tefLQtdbj5/xrdmFdj2eFFWz0d3tjff6a+U4H0/Gool9jixRLdHaTsYDXwl7JjGRNb7kaT5NEn/6NbNUm6xdK7odiUwc/KQceSKd1MpazTrpV4OgPJiktjFufOjBgQuJhhIInXjZt6FVHhUReM9BS7w6bP7s7+fp751/ywn92tuXBrzy67/7pZXuDZMwdRx85RCUQnLsEQkFmAOIkCYRZYwNAZGAtAoMJJA5XyMURSgUaVUGYeRjLXdkMVo6uPT4c7vg1NvvmxuGM51Xza5eDn/5/s8fS+Bt33H30k0z1DU+9jtrZXvhljd/3f94eSetN1tG/MKh2WKSVdoUk/S9P/jrrbSV8auvD0+1V8usefLnfvWhZ579yomFyKd5trH7sb/6KXQDUhKjGl37eGKJtEU0EopA0qwlnKtnzudXy50//ezV2djWtbQORn/wwOXeVicf5r2J3swyF4rmor16qdcxFNBWHKqSC0aJlO/lhsMdrz70DW+549yLL/Z2hsWu73Rac1H72mpGr24emcdJlic2rSWNCkwrjQoqBsNRs9NSWo2KEMU0KrPu/LGoWY/qxmqikmt1LMtoc7tXFHGre8RglA93+6Ot1mzLRlGRjQjRGj3yhQCnWif12t5gUlTxTFsRaI313q5OkoXAzeB5OI2MMrVGrV6P5hZOHjryur3N8ac+8cGEN2v1ojtX81WpmiZgXGb5qN8DhCjWW9eHnVZ5712H/uZjZx98eO033k93nliwLatN86WnzrrCjsqryEVtJjlyamlnYzUf89Zm9vBjo9uXZw6fjHWctWdUXtTKkO9PARK5yoOwMRq0QtLOBWYOCF7Qh+A5KAIX/H6akVADAIsDRABmDszOeUESHwRYArPROgTxgWnfPyEAIQSWEgKhmmQ5h0prKxD20zFIhIjBe48lai0ay8IBiCaqx/E0m0SRraUWkIFQBIwSDlVVls55Fi8i1moJgRAJ0MP+iaQEAoLUokinNC4doDKGBEUhG9KksCwZgWykmIOrxFiDgkmSJmmaGANIRmsiDBygYtYgEJQiAg6hMsp4H3xVKhXtm2OJtIk0cGBwAYK2CrWpKhfKylgVR9b7gqxlCb1+33uJDAnIvro1BK/ZeHGEEEAABAArT5qwyIMy3lVVENCkAEkCT7KpMGeTnLQOLIDkXXCIkaHKQaoosCTGJrUUQ4g1JpZzAyUTYKyU1kp88ForpbX4EEJwwVvixJBFkRAiYAWhAuWFjI211oG5VouQQZPyKpTMWimNoAgtUeF9HClA4IBFVVqjNUaFD6HwIA4Qx3lRjyMASZMIBFwVCMEgRcowBO/L0XRsJlYxMvPcwkyoonzkynGIjEVEX5RFNnaBZjpLCgqB4XzLL8ybWbsynk4ur+bXt6vMp1j1dHdBlFY6zLbqZeV5kidEApY9d2r1ZloTL0gkWmdeUfyKBxBrlivz2JefzPE2auimQJZXo0HerqcvPn7x1uPHfC5ame3dofeVjaHmMKBkucx2av2he/4fplFDn3/8kbe8c/mWe159ef3q3m5POXGjcna2NTO77DlStWr9ynRpruV3+7notr9x5eb7f/xHf0LGe4V3MjCHThyArbV3n7prvT+54bYjUOW7zvemxbT0cweb08yHKmt3zZGbFqkWkqCufWkt29CjqeRiggZt7WhSZRSc50OLyWKL5rvmrtMHB6vZdDCaTcNiu90b4bG5A632kcfO9es0XJ7Vu5NRbzKt7NRQQ4a8WKOZzsJKHqVWjSOtaknSaOS5z6eVGeXWgkrrF8/tNOuxq/xq7u/58berxGry42vTZAQ0L1sb26O83NjeCBwff93rD9x5+7e+79tHl3qjy9c++Tcf/NznvtxI1MGD7bOX96Io3u1nejyYXLvUH5TNg40oVsYYEW9QD7fLaGbl1M13PPXgzqET7bPPXSUhdl4ba7TyVW4VEeJwO1MDrimTkQB7ANKKCKCoPBpyVr7hjbe89+Zv+9mf/91pRaYIvqpqaTrJgjKIMRlQlYQwdVzo0V5V79ikZtK4FQb9YMUHmUyypZnmp/7q//31//hL9961+JbX1/Tq2bA32LowWbsAd7wlCG/p5kLWL9J0UaDlxiEfkrJQXyjf+eOn7nvL3D/+ub8qy2OXVvX80uGrlzdr3QZKCDk3oualC9eGvd28LLqL3UlePXvm4pu/4f6ZtWk9TsdRJQqd0K2nb376yUcEaDwu80Ep9Xg4cc9XtW9+67nffP/xu941DjMWVF1bosFLwb04HV6fmTFFzhRNUXc5YGDQAbQqg5PZpfLAzPRc37myShot1FhWeZS+ckmO6/WZpaVatzve2m3Nz1x99mvN+c5MZ+mTLz2CxlGo6rGtRxSRK1kiVCszqbYqMroonMvLj33qoc995KH3vfv++9/8zs2tp/JdlVCX+xuDtc04cX7nCnBtW5e1oweUrTdmW1//DT/02MMf0J3UpMYHUshG6aJ0TkpGVEoL+USnZVkiVV7CPoIz+DANzuiIEIssN8ZW4hFVo9HJq8KyqaaFNtBIk+G4CsC+KEpmELARjfeGGGNRhLrRw3y3OzsTi+VaWmuX4IrDrajoRUrp4FwaqcXlQ/3psBFbRkmSIo7UMxd3USEDECoOQVAAzX//k+HCD0avOlC6HAoHJnb5EGIL84fg/PrCz/7R1t6wzp94ajh+4t7bbzp0cvHJ85eO3Xxbkalhb9iIktHGxq2vfv2DZy4staO77nnjtd3w5JNPfux/f+BvP/7+Tz317EsbOyVzq90Em5NiGo2rSZV2l7yPQjkSD6aGuu5UyIhLGmHddnenHmfrr3/d3Nsbne9/w/Jw5P7yk4/e84Zv+uRfPvby85uzM+l4OE4TXU9mdrN+XI96vWlCNJwUheNWOquN2psUqnRF6Vzhtdbbe8XOZGJNcmBpbn17enVwZmauLrPzB0/OwXD0tacvFo7rcTKz1ByNQ9yumWZnMMoJKEFV63QvDtb6m4Mr54Z7g2xYmGowrbo1FygB3B6M49gqAD+p8laYW5x/8rmNpDmTj7OLl7fqzbQsw2ceePCtb7n3Pe/70d98/69mru8Z21Gj2Vl85qWzt9x87Ad/4I2DSfibjzw33r58080nfT0MfT4d8oH5uWcunp+hZk03VNz94qMvXbp2aWdafeOdr/na5Y8EkdI753XhJTHKh9EYeGf3FdHH9voaBCBgT8XW+qC307c21BMzGSthNlYhgLJKWKoiU0TaqCoIBwFxqJUgIGIAdNOpNspqdFWhSFWid1w0vDBZv+xPH+xMF6O8rFZ38xdXs72Jzr2vUDEAQgCSyvv9DG9gBkSFymrFwt57IgQOJIAEAryv+wbUIAAg3gelgKU4dOCGuN6+fO26ITh6oPz5X7h/ufZENfCWjIlnHJKK6sIVVH0hL5UAGkQEqE6faM7VhuvbTGSI1HQ6DSEoS8ooFsgnBQgF5vF0ByAAsNIKUaVx7cUXHlAiCeqWmVkrr7kQTEPlVRk8owgSWdK/+8e/oLNJwy/W5pvD4cS44bXzn7jx1SX7DKM2gAockSIhCwQEAahOSbuR0F2vj35o4/CVa2enkzqhzVkpAxIJKKiqQNrGcaRt7LIhsCg0kY7n200yWE9qvnQuhNIVCLifmSFSIMLCIkiKIARCEhTSZJQuqpIUibBS2phomOX9aiTs9xceiCLiAcB7ZgCLyMwAQZAqzx5sydGXz0dPnBvOtfJXHTR33NBemsHZGVtv0NRino8TqRagHJkwGoZLl0PoDW46sly6UUn1fplZLSBpfwJ5SZNd0KZudNSuG9/rJ3HifaFiiRIdNaLlo8utJC5F1Znt3sC5bHsnL/ujzY26Z570h5FteLzerNedcztXLyYHDt9916n82b3BNMuHA0Gd1Ouj0ZYRNga90iqNnX/li+jy6hi4cK5wghDTHsNHPr/xta9t3Hdj8mPvuz/ugDE1X/rSiwgM9/KHv7T7tRdzT8b7oixZWxN8QCJABlZaEXMA2GdtsSLaX8+JsHc+Tkxcjwf96fW94R/8zV+/9vZWxYM8G2kAD5TW7GBUTPJoznaWovizv/rJ20++pvGqI5779TlaPDQ72MhmDfPerm3UwTQMjgF93JhZsLEpq1sOB5oa6VcvZtVeQVFjxiKM86xh0TNrZcnY4bRwwSuhuaaGcnRqyeR+fKiti0Iy0o4lTWJreBimaSJHjup2Q5QOjZYq8pCmqIBasza2hYlk0AeC6sRxOHJADXfTi1cmz1+Ay5uyUKODh3RSq2oJVABOQ2AIHhBDowZzXdxclc0t9fSFYEEvnLcnTtDi4ZIVdGbhFNl7brAf+gKRNyjVfmeSgQX3MSAggiyiiQSBGQgIAIVZFIHyK/Ojf/++cOd9jWgmBhjLNHJjyNfSL/xD/7Er+toelOPa8XlonRSoFyH4pL5CbiLFiKbTy0+px1+aNrvxt33nwdisCY0YEFgF0hC8WAhFBkGyfnT9ol88ENVaTFBqa4QzEEBNQMr50gfPKMDaFzAa1S5cip47W2zvhjfd1TxyOBEACGMyEelSyPswmg6yvd3ugw+VvaJZQkABVLr0kliNEpiJ4BXaktKaBa9fPJPEsTZKKdnsbZQlIyuDaMgYELI6SPn616b//IdvTNGPtpJf/OWzaxPTQHe0m8Yz0ZMXh4PdqU66gOrXf+1//eL/84v/16posLeXRjofDv20SGxsof3Chd7jl/tD6KLo7f7QCe+cvSAshEp8cMxKNKJCDa6q8iIHFuYAIEb52CZVGawhMP7c+rBTD40EGEJ/yOUUjA2N+WRhxhBl40Eoy/il9UO/9PvXz69mQDWFjgx6ZvAiAhJAKQWIDKiUSAgGSjJUYvNT1+WjL0/gIw8ZikgNkxTCNBBaYCTRlkxArwiAgqscBE2ifOWAAqlg2HUbqlWzUVK/tj2ZFmQQfRmMxdcd0r/58/fNLKe1o4tPPr35B7/xNTZ7ycz8JJNas/XjP/bepz978Rf+y9UTr+lONl/e7U02x/yVp/6nkgWjwcZRlhejrBjtjOvdxsqxw/2NHsZQ9Xd/7U/+u4mKVmvuucf+aNDPxgp1aSNLioJHVCYi0q4sHUAonEhJSgPweJiTpsZ8p5+78UDcQn1vY+zXpgzjGopzvpHi8kIMyn3/j9wWs9m5Mvrjjzw1zg1ZUxUuTc1dp+YWlmrzsw2lRiuHF9qd1mTia1EyB2nPr29t9VtzHpVJopbWVqfxdDQisq35Y8IyzaY2MjZSS0uNyaRX+MwmplaPHJAgTLOs2ba1+sLuYOKk2B31XFW0oCnM9WZbYd1gntRbQG44GI37NDN3PA7zg71nazUzCWUpST4qjO0KSAXReNwfjvd8NcHx3s76C7Pzp972jvumk721K8+wjMbjSQhSr6c1rCdWe5f5YnzwyHxVZEc78dJSenESPv3Yzu13vcYVg1Z9Zjweb23sMvUbrUaKze1BFSABPT1x46HVkf+H5/rff8PBmtkebq0li7NLy3MAqwBACoWhcsxAmpRGJeydD86Lc44IoijmwAIShA0jahSRqioAITK2KAqrNRBUZaF1JCIoAgjMwTODMBF59tMiEwW1OPGVFw5W20k+EYRaUlMI0ywj0uPJkLRCa/c50FprG0cRoHBghBAYAgOgIGitKu+cK1xgDkErUoAmjvf7TQiCCElsUYQQfAgAYLViBmRgYaUIUYgwimIRjiMK3jN5Y3QURUqRtVESRd4ze/Y+mCQiZucDWe2LoiwqBCZFioC0YhBN2hotAi74QCF45gCefQ4uLwtDCAiVd9aqaVYGCew9ACLHAQQRPAetrAfhEMqyRESljU3qbAQhIIoAGBuJ90iESN45bawLYZqPDUAABAITKcWgoyht2kYcKyQVoChdBOAQR3mltQVSULGAsPD+hWefOKgVaqWthOBKIEriyFieUjUtwZWqqgISamNDCAqpdFUQjpS2SiGJq0qPBOy1oTiOq8JZY1GEDYkjEZlOxlopSxhpsFGkSGozjTlpeB8qFxxzVVbj6VTnpSBzlhNyStWJA13nXNRqktpnBjiBmU5nfvvqasdsnTwVv+Ytt3iZWFe5EFeBx5m5cE49/OjGxmSXYU5cEF86DkWeqyjSkXaV1OtpEkXMVHoJRLMrjeBfcT+trxVuo99ZWbrvne/57Z/69yayyKA11ZrJ0lJLWTt709Ebj5/8u7/4TF72iKQonI4ojuWGY/X5bvNz/3C2E0duM3zmLy4tzx+Y7x6ebS/s7vb7k91gYVxNT998R3Z90Bht1yaht5kfOHDq/h/4mbh5oNjZBpfH9bh9dPHP/+M/+x//5c/u/fo7X3XswKMPP08dtbM7BKPjLvZd2Znn13/ToeUDzYOnl10cJltV1ateeHRXjLGG+pPCKIwSpTXZWM3ON++58eDV585dO9vrxrVXHztZTHbzNdOK2joE56vhJCuM8lkYDolrzQxsVfk4L5p5sdSJ51M7GZatWiNEYKOkN5mGABsbu9pIqxZ1FruLyzPb29v56uj4odav/9J/PbrSOnVkYbjbP3f25Sv9wfLNp1735ne8+Ru/iZqtrX7/L3/tNx7+9GcHvRGQShZbs4sz569t7m5OV46mnU5tsjMc9MLP/5vf/E//+meOH5mrysKXPmqZdquVOXzgC19atFF/M1daBfZRrFxgRgmOGSS1plVXtx3tvvXW03/+sa/u+dIpO5mwMnrqJVWKTPSxj3/xtZPeXa86dW1vOtjpO4S8qiAIaJXnTgBcFSSAJ2+iaJJVIDAabdcSo4gRJaqZvTD+ld/96OZ49pOPTa9tma/79pPjUefMC9P3/ddvrubGlWwONwuWsvIqTHMVN2v1bjG8lm09H1frraOtD/zO3Bc+s/2xB5RP57oH50XGCsOVje29cSYqAVLK2CL3iU090xceO/N13/GeC1/5EtMoTgEKfOKpxw2hrypltE6gdGVjpq7rzaUT7Ru/7oeL4reNYGBnSUYvX23edT2uLZT5EoUxVBkRKmqwT/2EMVbKoIfdm29vP/mRIm7WQoBhf6KiOKqn+1PQPXCIo9qllzez4bTYGzTMfJSVH//Sf8/LDBmQaTIoZprpwEkexA/zThoTsQFfQpku1PMqAPDDTz5y6Kjcc/vs1TMvFHnv5rtvk7sbbeu2r25sXBhcWffN+duf+ofR8WP3fPrcZzauPfXGd3/X5nCktAllAcCeCyOJLz1Ettmq93tDJZrQK6XiOJnkoxgTUBqVRUI0HAQiE4cSxiEHQVI4HO3VG41QIFCiEA7OHry+djlppCCgVMzsF5pROd01KJP+CBXoQJ1W7eWLW0Whlw/MMCMRzS53y8qjgDUxoATPrVhuP7V47vLOxIPnQIRIwKgKVv/kD4t33Qc/8y2dhVoY90dJg8bR0nv+w9rjGxMF9QChDhSgnNLkhptWek/0vvN7v+sTH/pg0ombneb1S2v3veH13/yub/of/+E/vXxlY227F9PeQuf8we74B7776//yf39omizUwp4rcw2ZaK3aUemmwgEJQgkKmTT6qOxfw/HO6YvFoS+d2Xbp7Jtfe0zOPXV4MTmwYn/6H33dwo0nv/O9t1796oOXX3jcRsc/8tfnrl6bKKzNL3QA9DSbprWYfLm6vVX6UBSlVtSqxUMuBHGYuU6zGYRLm7i8GmXlSvfEdmiuDlu4Pm7Pzh5faK5fWQ9M9TgmDnHwKan23NLmlfXhaBSnibVY5Vm7aYuyMAp7g3Gzrmda6fbOoNlIGq1G5nGX9dETN7/KzKVWvzx8fFpwXmakKE3hf/3ab/3hn//2D/zg93z4r/58WoStCg6cuPHadPf65jA7MxCAVy20155+7MnVy6decyLkebsTkdHtuXbcaMfaFxhd2Z2sD/dOzx99+JEv2yiJdF6VTrM9dcOS0/zpL3955KNTpw///eV1ABiMpuKFgNZ2+ufObhQO93bHo8wDUVJPOTiSAPsqPFBM6IU4ABH6wABCtI+QBWEEgRCCUgTAlfODMWiAMdDmy8VjV0sd28FUZT5mDM22sYJuXAQQ9kEjKq2JyPmwDwNGIBCJbQSIwkFrDcAigEBEWkRoPx9MEkTmFw/mVdi+es0Nx4uz1c/+6OEDB1az3RG4GiqbjcXGKUMckNDUCTUHkeAABMmlkakZpQFBUGvtQZRRJAoR2LkgXpNWBouqJAIiFZmoyjkr/MVLVw1GhSueOfvV4Nl7Nqw4QOCgEAm0L3E4gBOdm9/7rl/4ld/7efbZq25o1RfLiqY2qnlg0jqQAdCCShgAIgyAwGxyo+jt7zh67om1x7+agTa9MQWQwTDTUVT5sDBbn1RCaXujyFhgbrkDorLp1AXvggTeN3MYFAnCCKAUiQi+8k4HVmkdaRdYWErvEJE9E6JwyLkUBA5BEyIpQKx82N9xMAsikiarjNHal6XzARQjkojPOL0ypOtj+MyL/aYqjizbm443lxbbtZRqqjW7gM9curZX2HObxZM8eOLCcD6pTh1fnJ9pBM6nLtvcHE9K0xtzq710aHE2MtbOtgVCksZ7o8mwCBtDWLAHBKQQaaVUn/parVnsVQXL+Y1RWk/T2mJbN+YH4/Fuz3OFtdruZKM/qp9anumhTMdKWRWwxHZ9PHGFJ92sFflkWLzyRTTOglE6BFAahaHMQafd6yP8/PPbP2BmBHt5NrFGY1IXwUZcV81G5scOBPYfbhEBRCRYbVn23z4BGNgLAACJMAcvpBUDZ3nY3Z10Z2o1nn7yI7+19+Ldzz7z3MkDDZxOKARCUjGpWhJX0vCDmXnQYYsnMVfjERcL335X/XKZbbxQVT3dTJh0OakseLRJ1Km5zX6z4+5+dTpTi//+Wf7M1dwkbW3MJM+AJTF6NC6F0SAAqqqU/sCnqTo8U1vpQrc7aSyg7QQGGu3kiFAFKAMIeS0AASyFegdERFOoijyxqtOhJHfBwTQDotBcnNyxok716eUL1bXrcGW3mumok8ew0/HkX7ESaguVAwwSN6Hl/MERZGX83Euy3Q9vrdloXMECpK3xt35H54tP7g02DYIoVAwkgoS4H7RBQBT8PyZAEBEM+wQesjH84Dvrb7h3qNOxZxIFOvWEoV4vv/UQfGfkNcPeucplrgpX2aHqtIqijFSmrQujA3/44Y2X+suTMN6t8h/6nvlG1wfFmAAjYvCBCxVk6xw8+WL9M4/m3/B1s+886Ii3QQEaL4JVleY7ur/nbaNpWvXzz28+8czg849Ot/vUK9JWO/n4I+33dOqR7QdTBslAkAvirHvma7O//7flUy/McBQjl/uI/iiOAAOgF3EAKCxKaULFnstx30LdWq3TeGPqvvl7f/hzH/hDrvriPRAQsU1ooVkVxZZNub0w/z3f2P3kZ/ZuWPQ/+NNHXTetJkc/+NuPPXV2V9m4tG54+Wv/16ro2InuuDd0RXnl8q5P6196unr04T2KWz6EfNoLriRNIYhIQCIFSEopUtrWinIMgEG8jS14R0TB87TMNdRN1HQ+/+jnd2+Yj04tuEzx1Cn0zfHUR77WG2yJhFZztje46d/82vP9cSetPNjg2YcQkFAASROAEBK/gobBwK/QWECLsCjAyBoIHpEBLJlIEVdlPi1VFFHlHYtOCa21YCJxoRkVJw/R6163+K3feHOip63Z7ngn+80/eP5vHhoyG2AKwm/7+hvqh6isV1V+6cQJ9eY3rvzybz1y3x1Hbrz14Nxse8xzj1y/9MknJ+UTz7TidDzie+69u9m5sr25M78yt7ux6wkcQ7PT1rq2u1tmE6eKCqVMolSi5nAksQomiYgolK7IB3EaiTAIVpVDIGNiXxYBPBoNrG0NJyHkw1Ecp6mxZeEiy3WLHBxXXtfUdDqRin/7z35q4+oTLaAf/+7vefbi9S8+MzQVHFhM3vHNr1mo5yFkICDgG7ONQzfMpKl9/qkrvdE4T6fdw4cbDeUcj6cOC99J2h6mcaRK0Y3GTKPB19ZWfQh5NW3XG/PdOO9daahONdntjfqBqQrZYPNaNpWlA4dvPNmcTADcyKgIVW1x8aDLd/qTYjocFdMqrS2ePv6q1d4UJeUqA7FVKDR4BgIVJsUwy8eJVtqiteRdub35UomjicTzh04Ue+s0KYpsNB5vCeyKjRtzrZNz82tXr3CI+9PswEp04cLQB/vFJ3caPKmtC1XlwkxtcbmbprGhNC/KIoPxbmUMzc+2z18f/8WHrv/Aew5355NSdof9VzrJZekQoHJeAzgJ5L1wALX/U0RhH/IDpEgTeACqnIgE7wHZKwCrlHcegbQxWumSi8IVer//KoyAREQS4iQKzBoANHnPWZkLIio9LQtiRqLKVUobx6EcTdkHa5SwOO90FCW1KNJWvFOIyujKFWXhqqpiBmMMGhNZbZSqfJnEqbZR6RwAKKVIQGuFrkIkMppABe88BaOVMCtSoAAYSMSYyMZJHMdRHBvFLOQqz/uUSJDpZCqhStJEkQ7BkdbAHAIrrQJy4Xwc6SSOtvq7HNhEsYkirirngmNPmsaTqUIS9to2K+9cUYogagxcBGYfghAaHUAQEBwDacUulGVVelYKmQVFjFGBwXsv7IRZkAKCslFeBqVVLU2toljrWhzHUYTMwEwaODAZY8hUvkT2gkjALgQiskTeBUJvFNYiK1wpZLCKtC6KzHuGgMZzzWijFAaNSI59KSFCRMAg4tmjoFK6KksE9gAOq8AlE8VGT7JcGHwojAUQjq1VxIZCo5aE4BVp0XoSXKQVKtuq1TiyzvezaZVaPddtmFAQYCOpTfMxCEXKWmOkmNx9d/uWG0CnBfAuGSNojI41O1f4gyvJW19/+MFn/DCLB+Nxf5ClzZi12dzqk0BZuPWd6Xy3VW81VpbmJ0XlvXPlK29oG1cH5fW1n/qBf/S9//ZXFufi6cRH87VsENy0PHJDe/Xyeqka6bGOL/O4FhGBLxSTaILp9uC5CxszEdTQT73qDZPf/HeP3/G65Xe+900vnXu4tTz38uX1pls72br7dO3o2dHEV/RTP/nHxs5OUfIqqzfjNKrDcPdtJ27dG/qoO7daVC888uzO9rAjSX/sCyxX5vTMEtz3tsO3veG2wfpeSVVVQWtx5sf+x4/86+97//r5jFgJCwOZCOPERjW9uTuZ7vLtR2+/tLq2O+Vp2b/l8MnxdPPUoZW6Hp9ZvXzTieMvXrxUCERz7faBDmyOp6vX0lpy6sSpzdE0B7r5njtXNwalFJQmizMraVVJgTccPXntzHMX1y8N3HpRjdK5eP3pF+jqXvtgZ/PazraUy/fc9pbXvenAva+rz7SvfOmjH/xPH7z48rbrj5OurUWqdWjuxtfcvHhixf/NF3d7VR6ALIhRz52/Vv/yg2+4/90Xr31eK6i1IxWb7nx7dGk4NxNDmfVHhVfkK6+V9mXFQdnUKgBxPoh6ab1oweWffe8bZmfTBx65+NylfgHqpdVeGHGtqfuj6vNfOLs9Fp0kpS+jlDwLAurYVM5xYAZtYx2CiBiF8WQ8YAk5eFcyoRKQ3sb0M1svQR5q9ej81eqjDz3T7kbXN4Z//8BvHz7a+Yl//J7l04cAd70/G8+MitKXW9em115KZqJC4loDirDzDe9bWjhR/vNfeTSfHo9IpUYdve22wWhtc2tSr0WqNFnOZTFxwzwp3QN/9Rd+mjsWU9PjcZnUdByZvKqkCEePH6wYt69vr+7tzDT1d//Qf/7Q799psF+WWZDadBs6XMVUYhILIIAXLqEKYKxNEid7QTxCPCi8IkIfeluDVrcljTjPXrkeHDp1dDKoqr2tWl3PzNZF4jDeWlu/EKeNonSh5Fo95qJqmCqKJCI1O1ubOzA7HUzY+/FgKs5JHF/q8//+84fDNXPXnTa5WUfNF0XqMi1n5rPZxWhu2z9/5umNa/CVJy6r+vzpo0d7g22ldNSoF+yEJbH1ZtIY+F1A3UznpoNMISKqyofgg1ZKkSUyLAEEfKgsGvEUR3FZTiMjjXpN9II1cW93zZgEle/l2ypWRTYFrSbBOwcxZyRGEWkjzXp749re9dVt8VIErxWIiScVt5i1VhGa0ThLm6kmV0yHnfnW0ZXO1a3hKPfsBVkZVYkox9HffUV95uHRyXl7bH72pfXx1ekulg2DKDRVIRQZJFZdX928fHHLROYDv/nLW5eviTZm+YhNe7/2K790573fHgwhvPzL//H7D6mt7Npn0p2XvvDAEwsx/dH7P/FvfuLN1GyGql+Wvta2Zdgz7VQzcyYUMrBAJlm6+c1P/dm5XudW3t0ZhPWPXTpnEV573y13332g2U7ySbq1tut0Mn9g5q533X1gtvnJj29/9oHL+dD0NlaDrk0kKkmHEibTIg8utlrKMooJhPuDSX9cLC62z1/fDpV7x9veftOho1EXt3b7Zy71ZhfMdDeP05mkduCWU8e/9JUHxuMSUUcaleBkOOrOxs3Zdj4df9f3f+v1585d3xx/bWN3Zzi9dH23lpiKubvYztd6vfXten32xleffuKhr1y8skaaXBBgRmVWe9u/91t/+PZvuM8B7A6rUuTX/vwvjhycN7mdFuXm7vT+N5y4eubqNKsNLm9W2WRS0Lgczs40Lj13ZuH4iXd9y1s+94VHjszpIuNbX3v88x/6LIC6ca7TTWudemcoMhyvlqVcvXx9fwoEKzB649rWYDKFhLLphLXxla+nquKgjKKAQFAxSqy998DoVSCrFQD7gCDBO2PIGAsSAFApFWvtWcrAOUDN2qxir5X1HJAbdROCEs9IGFkK2vjKexZhDgJBAgtoERQJAiT7DSkSJAJEYBBgEB+CItwXZSjSVeVC5UKRdRvwMz/3TSeOPwRZHlEUPAdwotgXEx0UKqUEUDUllFVWMFE2mu5u6FGmREBp5Z3XsQmlZxZmWTm8kldha20NGYVBEDxLcEXSanYOLly7fLa10Dhy+MBjX3mylsT75S7m4J0XRWkS1SMthR8MN/7gw/+iO+97W070bhI3NYb9xDqq3NCUMBYgEAViEK2gDsEH0VQrfvpfv2687qvcPv3QS+fPb9W6K8NheNUtrxrs7XzsC+cvDXpc5cKmqjyLKGVMGu1ubltrgVkxiAR0geEV3LUgCggqokhpY0lDkRf/Z6shQKAUucqhUvvKJAJEIa3IMaPQ/y9Vzcx5kfvAIgCMIqHkQiEqMgqM52QS0mcu4fMXM+F+Eod2QyNjFNdKoTz3yqRPX60SI1++sjab0J2njiwvzwaP02khJI4nk1wpNWEf6rWGVdaAwcJNp+xCqx1rzqfD7f5wG0YDA7ZuI1TYpjhxXi2urHz9gZs2rz315HOPLs3Ujy4u0GhcjqbVaC1GdpOKqGrNLGoKYwG2M/OzCzvXr+xPgTUkHExkvAtl6UGIpSCny5LPX9ju3qrZxpNiPMyHztnI1o7ddLr4whVIDCASaS8MwhIINBG8ksff548rvQ+TAAI0Su2HvCbDnMqqHde+8Jd/82X46PH5TjWdcu4igoA+SlXIx+PhoHnSfcNPHo3vOqRMU2VQZTag83WvWvW0PRcwplSFmQUMJQVAq9ilQSlQ+fLB8t2d2urHpuf2VFVLUGuDlMQRh7xgNkBKK+dhWASB6AsvjN5xp+0uolECuU8TaB0AFChymBSQF6AMAIO1YCw0GiAO+gMY7AZ0YWlJBQolgNbgRMj59jzc3oYjR2H1Grx0LTzwNXjzXentt1BkJyyAEYBWrgggABW0Y+h2plXQkfbOU1rHIlMuVGm8PdeRMxuaSItAYCAiov0tAe/n3QQAEZEQwv4KGQABIXSaYJvgYijy2No0hILSsnKOIkADEsYL9youdJ5rHxDLCTA5Dntb+Lv/68KZa7Mb1RQj/TsfPHdk+eY33duKOoFhKiicV2Uuw9X2Vx7M/u7RydUtPxxee+1Nt3SXIAgK+lzHq6vzf/R7zz71wrRfbQCgzzlUJnCbmVSEeVX80Z+eT8Liu96yYNJ+YAiBtnrx009Ef/bX115ejxhjIpZX1LcqjuN9mrUiKqsKUQkgB7DWsA/5NBcJxOXd99zfyKvlViOvcu88oQRmIYiMrjUiouB4/OrXREsH5xsz5VRvSTCm1fqBnzj6hrPw65/aXu0NH/77P/q/VkW93V7oZ9PdscHpwkx8ZCm5NN/c7ZfeOSTRFlkAUcgo71gbpUVzVXkJvsxJEQEE74BFa2VjxUKhKLNpL4RiZ0fNHFhivOjL3tKRQ+NdtKMqrU+LLOiILl1r/6P/9vTOKBKVOfQozBxQAb4iRQJhYWAOopQSAWUIQQBIIFiNjhmARTw7BQDkuR5GP/me7rd9940baxdn5g787d9sfPihvd7UCCdzOvzQW5Of/sWbhuiAV0FyUttmVr7zW5Y//tX+7qBK01SpZDgFiA6X5cQPN1MlS0du3HHX3vlPztTC4ycOtYrmwqNPXdfWNjo1UDVd6ZyOHDlZ39n8Qm9nV1lqLB5Pm0fXz36xKkc33nrT3tWsv5cjKRaOojZEUmRj71ytHmttVAhCRpESFhHPQfJpaaLYkBII3lXT0Xh2aTFuNcd7U2OjbDJSGqqiMJY4ePKu3sA3vX6pXHv5hc+du/32Gz/4u5985JGrt95y8g13HmjHRa0zjI1Hzc1WYziivCAqXVaMjx5KDxEfOLHUMCWphBQVk7LInJ30xblpyFDSncnGsJiKD8ERkiCXlduqp9TfO6sotOpcVtJNIi6iA8sn6p2GTqpGPVTFcGe756qpWx2yr1RU787Hc5BaaF54+ZnNAJrYcEmS1qImc9Ufj7U1vWHebHRG/Z6istuJssHQkLGY2EZ34/yLVW+rXpfF2Xrly63NTT9OghOo4uB5a3v7sccu2OiYtqJQev1h7oaHarZWj5eWl+aXu816Mh1m3ZkZT+BP8GBn77XLM4990Z+7ev2vP/zy93z3cqubEM4DXAYAQAkMJrIAIoDOO0XKKAUcEAQZwn4Fdr+hHxwQVWVBCNZoIi0AzC62ESAEDgJgtCEAAEVGBw5xFBOKd5U1qixKFwKREgT2lbDXxiChtsboqPJOigyMlMI6MtYYE7Q2Oo6sUQqAGYUhiIiImMgaorysImtjbYRDatM4iolMpBwp5coKADWSsUZrMymcsAcOsbHKaOHgq4qDq0WxcIhspAC9D8VwRCjGxihCmrSy48nYBw8c8ulEucraKAR2zoXgGBRwqCXWEAcqo1SXuUcEQKkkOHZe0EZpXcC7ShlTchAFjBAI4yQCBpcVLEDGFAJ5XjALKPRFKKYF7OOdNUVa+bLK8qA0aUT2gZACe3beCszPdWpxFNvIKkqiyFXOB4+I0yoz2kSR1UTKgBIJDALAIkZrY2zwgTAAiwBkZUnCXqGAlL7SZABCAnC0HTOCleA875bsAaQMykSoNEvIy9KSAnEsHqQSxlCJssaXXirjvKsCaCVJLUbEoiwr71woq7L0VWWU0mQCODQq1T5VMMmGxoqx1hdluTcBFfem5blqitC4tNFXSIaSKJa3u/ymYw1rVRCbjUixBo6CNzu7vvRGKzXTDY9eOjscDOo2GvWV82Z3azjsD/O8jOpJM006s+3VjZ3Zdt2SMekrlJYbjty+dNObf/NvHntpfSJkRZiZtSKtzKVLm+1WZ6/I/n7305WfGCWKqduMqiJTRVX02FehZqgmvDesZmcSiujcC2tP/T9/yia89ZuXb7/ldrfeX+LlSw+fWVq54/ANrxn3S602aXah06gZmPzuP/2Jj33mMWeSkEY7eVlc3TIxmrbdzd1Qw0/89Du7snp9/fKr7jw2u7C0u70FxmR9Z3U06Lt6K5ppVsIYJaahYg4eiecaZrdfXNtZe+3rv35ny1/vrXZjtz66PDsX9QfPzx5bPnq42ehGTe6e3544BVz0orxMGy2XtD54dmMn0CDvtzbHiytzd95x+sKzl2bbOvflM5eufenFp/x4KI5hrary8e1vvV1HWLlSKaofWzn9hvvvvPv2jRdfevKvf/OhzzzQu7ZZX+mePNFOcUbH6aT03/lz3/uhv/vUuU+/cOftdzz1lavloKgkJG268+RsXDy/tHzowqUyTRPvvEJY39nMOFtsH7hwfnfu2D07166lphfYqwgVIIEo0SwFqKCU/YdnNs6tb1ms5uYWlxfmy/7k9jfeePa563NzuFfqy3t57gBDFlhqaWQBKiw9BaUQyZK1zXpjNBpw4KzIQKMCDQC1us3zEhVERlchqDqUNdkbFMO9vBk4SWvndrIzm1c/8eC/ixV8yzvu/H/+y4/r8lpk6nhgJbHNWpz3r+yMrm+ZTuQnvVvvrH7nv9/w/T+5maql+aXm9a1VGzKFgUQUSi0ycaJyH7QKg61J0rTMzF7AMwRyXiULh37svf/kb//8t8eTcVKPlEETqaxXPvClwTd+myBqirtL99xSkoRswkVpoiTEBYbc80hUxRR7mBZQEteeemxARYvFzXTqVfAIaNQrBbSrZy9pq5vdxmRMo9yRr5758t/OdBVKmIxzLdRII2EV6dzl08X2TFKr2aidLraMkWo87PcHe5OSoVmOk6eujm++AWqFn5QbpJoo3Qo6ZTVQCZw8BltXId9MZm44HcXuwtVLc3NzoiPUqmbrk1E+LPMkTcmkg8kohNLYyAN44UQZa5BD2JdOMmOkjQLFgIHLJI1DWWVF4dlHVHU6TaFaPt4N1bTRbpWTCZBJxRJGk2nPE1gds3cba9eTNCqdUQFGuas1F2YO3XH1yhmEHiiOIlxcaHqt4nqI4oXBeBqjv+3k4mBUnLm4E4CrAEROEUU6sODZETy7M9JWKwWi832trYogMDqAybRotWdvuOHolYuX6q202WwTOmsoouLC45/uXb7yj37ixJuOrKIeqYhee+/bD/3Jg9Q9uDXY+qe/9JFrnt/3rW+55fShg7JBCsKoDLZKGjK6NrJeZWW/09o8caoJs/7N953+0hef/fvPn+tz+mcXpl/60rFuU9146FVf993fNtyoDh+mnWefr3Wr7//5V3/Tj9/x3EOfLXbssy9VT12HSxsFKa0U1oxlYERhgWycqcgKB1GUJDTKyxdefPIrD34CrWiTHJk/vrxw5+bmejpbG1b4xSe+vFdunT6wwGwiq6YaKW1WTBsbZVpfubpZunj+5ntP3jXX+f3f/NPxMDM63d0dTp8865xvxtHTjz7w7NOPYKDCBWVQCEvHStho9YXPP/S973nnXa+58/xfP8iIk0F5BTf/56//wj/5+Z9b7ecPPvSi741q9XRUNlZWlqcv7/au94vhYHm2ne8NWia6/5abQ9YbQ2/Q32kksNRuHqs3WKIjN5365AMPRTZJO+12FMFL1wGgyEdlHlBDkhqWrD/MmvWZ00dWDs+lswszo8H21tUN1jwswk5/DMZ6x94aF1BHat8kqxKLEpwLSlnhAAKKBJFJEyhE4MhgkoA1ggQegyYVWKqKfUDnmWUfAEQiTEobbRNjg6tc8BJCGQIzMHi77zBijySAGBgq761SjtkPp0q4nqo33nvjzcfj0g3RZ0ncUrUuBfTAwY0DQCgypFhYlE5MQsKFjczqWr7ZEyG7/52mFJE1rgpOyqMHDykb7aytI4lWgEQEGDjU0uiWG4/vrp3VlUx70zRJmQMIAKIxGgARZX/bU0/0eJL1dwYnjq6kDfWOb76R6Txoy+KQSAARgWlfzosAHjggEIYCAB2Dakqnjm608brG5Pa3xMs3L096lVQboczf9j03Pf3s+NOfvLA1okkG2yMZ5yovfD2tlVXJIQAzghDRvvOZJQAgISnE4EPpCwEUZmtMCN57H4IAitIEIAgoQoIQOOz/a4JKKYUSgMH7wMKMSEYRCMsrRSsQH3zQikREaxKPAaNpBflYA0AtUPB58CGvJAQiMnkW9oZwYeeKc+cOznROHzm0vChFXg5Gu6QjX1SFC8w6svH8XLK9dumJRz5918kbEiVL881y26+ub+Yt216c6/ew21qwXpTW2uPCzOGThwdZr795ba2rpcwGjVoawmQ63DNRPO6PysLVG101u7CztZZl2SsL0/2bKAspJCBNWAVviLRu/v3DF+eXb+10m3Wjq2ExM3/gS39//quPlY35du64LDP2ggqByWiNQEphVVWkEUAAwRCWzseRFS9V5RRBLbZBWAO2ujVFECdplZcQKu99khoIwQgdW+7OLMEP/5ub49ujQlUxatFG6RqzihaXpNGpiqlMFLZnIBH0veAmzos0W0mdFOdFa7uVTb/v6+HPPplfrnQjSbKynOQFC0cmrvL9miRaowuH53oRnUNTa3c6YOxgX4psNdfq0CaYTmAygckAshIaDSCAuA5EMBjAxUsgTh04ygLCDCaB4KCsQBg6ixDVYeGgrVl58Kvuqef5e7+x3WpnrqwCB60hngEIMBzAKJMDR8LyHLHwaADtLiUxGeGTh8yXXlQsSiC8wuthRqT9ggCLECAQcGACIKQgAkQi8bWNMBlOWLOk4GUMvoQSpQCtAAEkAKMELnWcUMyI45BJ2W8++VDytw9PMqXZeECydvYXfv2ZH73YeePXH1o4lBaTcrzX+vjH1j/31d1JYSfOMkRfO5d97z/96jvedGT50JHdrPrMQ89cuHJRqsS5liAKBkRARG2JhI1hzzjaS377z4fPn0vuunl2s1ft7NGnH94Y9Ql8DUhSS6UrSAEDIIivfJrUxmUeR2lwoYLAvC8XUkprUgo1mQT+1Y++ezIuv/K3uzaiybgyyprIViWvreV+YpTWqH1SLxcPVmRYqThAClVF1p04jW/bcZ9/aHx8qft/rYooQJaVnXZ88tWHVlbMm15T/9Y7Jv/5t5588opUIVYKWQIAkCgSZA95WRoyM7OzppZU2bgsK6yAjC29JInFAKKcwBTR93y1t4c33Dx7MMXVjdJnUE7EG2k12mNa+qUPrK72Y8EquCIgGEMIiIqY0WhSikrHBIIGWARJREA8GEvsmREDewyKDDIDWm1k7w/+5Dtfdfy51uy1Q8dyrbb/7b9d/PGrB5+7YP7gw/2DM63v+xEauQs6WRKzWI0G5bRnDHQ61IhxGguHPBL46Ke++t53HJs/2SmgNy2i//rLHyVpsmrs5NH2y4zpOKo3AINSejoZjwo3rXXf85M//PQLX2Xvgy+Z3evvfctfX74kw+t7F1cnk1F3Zd5VoRiOqqyoKkiSOW0UEEixrdEosD6IIlOv2WwyACIEQaVDQJbyXW+6+0fe95P/4Zf/g2i915sq4KXl5rWN/nSQLSx0dKp2t3c/8fcvxpKYxul/9iuPDMblfXffd+9dsyvzVTGeSPBBMCZjhSN0um469WTYLwK4pE6tyJRTPxz0k6QbGp1ShqPRpNtNG53W7mZWjD2BbTZbgEVqS1/ttpP5KFWBkma9w/l4tDmcW7yB5mZBN20N93auxhiiRDe7qVb1/s7QVTLq9+2RFaVbrlpcnOnuDQZLC0f2ruaj8VgRZ2WeNmYIeH62McpH3bmmr0rUKmpYAhgXeb3m6qkCakxH11Th4lh1aroo8t3B1nMvDGqGJLiKGxqVRhxPy6Wl9nwrIna33H1qr+dqXm1f21YINU7r3XZqIBuNbzjcQVSPfr545txW9uc7pw/JG97W2J+CqvIioki0VpX3IizChEAAsTGiofRV6bzCwM4FEK1UZA2zsEBROlIYxakxSaTsYDJAUiiBlKrKSgTjONo/b4rKQyXee2ZWxEEkBBYSq1NUtO+gMICmERutsso7z0QqshhZHarAHBDIO6c0Wms5CAMqrY2KQJiUJmP28e0AoASAQxJHIpjEpiryAKgVASGBBO+VImZvjIqMadQS9kEpLQJlWQQfgDA4F1jASeAMhCVwlo2jKDLMogEIkQSQhFChIuZamuSVd4WDwMy+DEGCSCACFXLHASIdl5mbZlNFFJwTIGH0HCSE4Jk9IFA+rvS+XFIrE5uqLMfTjAM7qziwUmAiQ6QgsE1sEkcgkljTqKeNxPrCC6AiFgXCAYha9TqiMDNzmWVCiForBEABQtDgBUMVnEKltQIRCRSE8iIzWluyWkKqoZOCUmGmneRVMLsu61VGaecdUfCBkVgr7T1oQ4G1r6rExqUL9bTuHceGg88jjRFPhEMSiaj6dOq8GzoSFJdNi1ZSxm48qykFyINWen4SJ+Ni5Hsb2vJ8lKWx28pJeTct61tTmJ2PTp66A93UD0xwMOF6yelu311aHz9ypp9P9poQa21ff2jh3T/2roMrBz/1mUf++X/4QKmSOLIri93DJw+HophMp+PdHQMVVthZfOVIYEo+8KmXPvvQcxC1XCmjqkwAUpWC0duj4aveeFu8Zp5+6vHgA+kwLcoTi+m7vvnWL//DYzwp9Zw9cLi5vJA+/vL0wmo+HExnDzZqMUMxstuD7s7srF6Ynr18++vevnzLPXura8lsN5Y6VpMn/+DPfvW//a+NkTML9XSlsXN1FBP19qbLN3ZvO3XgzJXLzdg21Hooxje++latli587SKwb3WXdayLoki68b2vu/WF6bODzSJ24dih5kKtdmlts87BxfG1YfaJp5//9m9/z8bH/wQ7eHlzJwM5MtM5d3710JGlS197Ggx3O4lttM5dvhJ7aaVRhmrdVdenod6one0PXtzbef7a9ZVG44mz56s8C4hVhbFVgrI0G7mg7rrv9O/8xu/ON2tv+q7vOvX2115Z3/rwX//+F//04/VmXRk1e2Tu4O0Hl46tfO5Tz9/wqle/5a1v/rsnnoaDN73mTW943U23ffZTD11fd24S2BuV2vOXLl1b2zXG5EUVglDFp289debFy9evbGCp33bP9zS/nn71/T9Ri+tWa65CKIOy/jveseLz7YX5FVM0b775SKH4Dz/w2a+9eCHkfm7Yy8ui0ws6h0YsBaV744KZQ+UVUGJNXmVEEQIWBXso68ZMyrHjACSRISIC9MaKjdWBg/Nb1/rZJB+VXol0lxr12ToyFX2ptWaWTsxs740+8sWNL7zpn7351ua/+NU/t407oiPf6sNWPfoKr/110mozTNR054bG+P7D1RNXJ1evV8Em3/mN3/vAVx/e3t7xrtIK5mbmNnYG2mDStKaWEHN/On7Xd33bcG397Evntq9snXnkobNnz7RarWazlheBq6oR1X75vzzdZXrD970aUQ23+nGjS6YuOwt+a1w7fQLGzwMGSJNsbU+M687XkY7FYZzUbeZDauP+bs9aBX5/CGB+Lumt99lUOxu72+Pe4Pr13eur0ayR6TSUpQ9otHalSyXMdRuWqSqgWesQVBzKqFmvGRWF7W7TXBZ1Zkv91afH//S9BxcOXsm0HwzLkYNhP2samGnCXB0e3DIHoL40i7u9oS/8pLcdRxaMXVyZvbK+vjB7dH374uz8fOrawRWElCZxKaUFCgzWWBRmCPv0WVdV4MEqQlFlVmqj86KMUWX5NEkiZdrOBUIdmagsGdBENmGgbr0xyoZxmo7HvQpsVMOLF9d+4lt/5ODxm27cueHhD/8xB+jONJhhbXcCyswuLFblaqSqqsy69fTQQn29nzFDFcAxIzMHMIqjGhVlJkLBM6n9hILa59E5FwD2rl2Efr+/uDy7dnVDq620ZiZ7EJnev/rpr/umd+yUo2ejdo2DGkr1pnedmjt5oppMN9516Bt+4P0/8ksPNhvNE53qJ959/72vOXLgeKvae7YazwJ2Lp9/VG4/WjS2Dy5w5fvf9u2vuf304gsv9j70ieeuPfu1Uav5xANnPvjpz2xfP/8z33vjt3/7iZnTzd6k3z4w865/8S7qv/Qtm7Pf996Pi7JIrAyNs4oA2LNSYGNjrWKl9nb6daMiq7d397wD8frAicXtipsnbzp13+smw7XPf+aheiPOpsrngbTWmmxqDy3MsONpb9hs2N5ev0pUf8I7/b3X3P7qf3jgqwKkCEej8YGTS4PtfDqp9CSrteYCQl4EFnA+cJA4UYPp9C/+6sMrczXvuNYSQJWNqg98+K+u9Uc+YD61p08f8yZ85clzb3p9YhK3sbON7ZQW6ukYh9eu1uuN1X6mk+6lK2fm60k+HrkkqunohedfWD5yfOIagenIkRp8/nEA2FzbYcelV5MKXChP3XhEgZlP27edOrDXH9eay6+69bSg7O7mwRfahPFo0htMrqzvDcbj4cSTImuUxngq0+AdgCilPHhrcCE1oqjVTF3wZV4KcbMZFaVD1FE9zfKAWbk3mCIQKVJEzKy1tSYmCHESCwTxYZyXFUjlHDMqRK33+TnMjAq180HHKZCKSC2sNJeWqqe++nc3H3atVhzQAYyEFBgbvCAaYxP0zN5xKH05AYJqImfOl5OghcTzPjqegFmEtTJnzj6f2MgYCh4AULxnQEFCbdbXr4/H+f1veseDD3/aWMxzr5VhBkWkjGilNOLupAhi6ghzi+3ptCyHEzNZMyEHL2hTZiFQ+/wNxBiJkCoEAfGEwiEQtpRuEnuJi/TQyUbtgPMhngkhFKQDmfrdCwtv/LbX58Oyv6n/9mPPf+SzO3s7yALA4l0VOCiFLCyBUROBEgCldPBCgCwSAqNSIQgHBhClCASYgwAapRERFXoXmFErLSIAwIK+9AJCSsE+XxyQORApBPTMIBgAg3eoAEAJKkJkJmYIk7LdSFGFbDQmAgCNAiLCogiinUk1uXD5ZD7XsGZlrqaIREvuKkHJcz/NQ87w9FPPj7Z7xxfnnJ99/uyl7V6elTVQg51BSOPuSn12d1BBYurt+eUbX3ft7AudxA83XkowS5XKq6JpsKp8nKDDsrez6fiZme4cJGr/LAjMyCiAijCwZyEF5DyT6A998vzFa7u3vWqxIUVjYTl3/hMf2bq+VbQPLo62d4nUfi1BWxLPAkEAlSEkUgQcxGirtSWl64khodGkH6rcGjJRNOkPFYX+3p7LwWrVmUkVAyk7zNy563uHZ/zTz+/dM7+sG4LdVNeaTkbDrX60vBjPW9h5krEbZAUoYRloGQsYsEYH9mUJrSRqFLfdFF5+ppqed5lqYGp3hkN2aJAJ2VUeRWsFAaBEe24Vyn5W48bJExq08xWzBaxDnELShUYCewK9oXYVVFMfSmi1oV0zG2v45a/CO6NaZ3GSefAAygIo5fMwHUKcwmKjesvr9Fx77jc+NLz0u5Of/N728ZunKGVgsbHpEkz2qu1zYCK2CpIOjXrsnATHC4sqMiAOSO+T6xGEAfD/8LNQK2IG2mdaC4gAsjB4Rnn2zKi/xjMaOMm0AlWCm0SrF6uFOY5WQAxUJRNDNRxQDcoC1jfhkUemH/s8Tc1M4RlKj0ozacSF3/v78o+/cLURe/RaCiyyZBLaDioPLkBwHF3YiS98aMjynDBrXUMABGYM+wx5BmHAfVMbB0FBMvEgt597Wn3yy7saKRXHzmJwokoPkgUKAtZGRCovMx2yqhRAzorK2CgACIhCFUdRmTsmp4yelPzxT3xsrhXPLzRWdwZpIxEI7D16XHt5SnkLO1lVZtpoZShM85ArbaPKOe/YCtx2pBaH7oGTp/7fPzr//18VXXlmbb5Zn11sHj62lG8+V/Wz206lP/FuGz6RPXrBCyORIBOxIsUCqA3VDYp32XiqSOpJhII2SsZZVmRF8B6E67UGkc7K6POPzYtbvnT5ytWNycpiU5MNq/qJ86PPPL+6sac1OOSSQIzSUJEAuoogsAevyaBS2rKwc8GTMogECpVRIl4ITaSFhBlADIbwb3/01jtOblM1rjaclWZIrUqL7tL2Ww/OHL5hZbgt8ewgTpq9XVBJDKEepblO4UD9UJGf6bZbgwn6xO666l/9jy/97E/98DNnzn7gY888d0aS2NdbCXlO6/W0VR+NxkH2U26eYdzf3fuHTz7erK8Y29/d2hrtXPz7z/3Oa97+vpnmzCMf+UVG7G/20LOJUBsVnDhozHZfs732WYOZiQ2AV8JVVSBGpCwReOfAYRAQa5+5sv4jv/Avw2hkUM1FJAGy8XjlUMwcC2ujTT2pjYP7009fGI6fs1be/ubXvPUtp6d711xeKBOL1TYyuQtukHPQKlJ5FbkAglAVkE2DtnWbQKNRG6xP+73ddq0+GUyatVanER/qLlxf7QPIxmjQXTBHV1aGg14ttCo9n0lntt25scV5FaZ5IQAKbbfWNqR86AXnfVEF9mm7BZnkwTY7S0cWb57sXWvXTDbciK3KdKicN3E02zi8O7iSEoKBhnVD78eZ18o0W+1sMs3zIhgSVpjUVBp6vUFZsLisnqp77z60cVVtD8p73nrXJz7yiJLw2rtu3N7ejHUyE+limC/OzTXb7WZnqSiy5ZkDvdGmJ5+0zN7GFk2n97/11dfP7eyM8ycvrL68e2F/ChBFKVVWlQ/kmW1krN5XhoEiChwIwVqVF6XWZBQRkasKo2KtKHgvKKg1aVVWhdXGs0MiQVTWKNRG6xB8nhdIWLhyP7/DgYGllqSklYkibXRRlK6sqrx8hdjFqABAUCktQWJrlNLOVaBIOFgVOXAEIAKVc4KyL2lKYsUsIqyNIaXi2IbASoHStt8fxEm0L1xDAGEJXqI0rsVNDeJAQmAP7JiV1cbYwF4QfeVBCSCKUNrooEIBZpSynERRFJsosnFZZvV6w1WVCCZxDERlnhtrAkNCEaItyooNhMAs7LwEgka9PRhMisrlRUEEUWRC6Y1VaWJ8mZOEmrYuL7uduFsnYQDCEHifVqC00YTaKIMUWR3biEgBKLJUeh8YgIgREAAQCRGIARQjK1JaKxZWApV3DCBIURQhgDaGQ3DOu1BqrYFUxULK+FD2hpm2Jvd5WRbDsQAqHyqNqgwOEUhC5ZmZFSvnBdGEwAS5hWmCZachzUXtq6AgyYqCtRhb+npP/PWMs5mGJvEzDT/fbsAIE2q7LLlw6TKWdCAtl24OXvcgBTYxm7lpfuSps/W/fHzdSeNvv5DPz7W3RhUHXO3vrq6ubm3t9fYmReHKokzjJCVsJ+ErX3liXE4vXC0GuSaqFmfqb7n39oWV+YqpNxhMh6PSV8lsmuWvaMJ/+j99WEsitrm0PLux1otEIWDSMPV6JIUbrbkzL72c1HTIMamlE+e67cj43ZmGW1lMV3ezva3e3a/pbDx/RZKZxU46nPbvOjFPG3vfdcddk2t9BfHhN94H6uDGxaJeW0xt8+zHH/mVf//P1rPcdusLB9O5m7pfvfBSwc5iXGDxdT903zvfeOrF556EoBLJlw7eoeIbLCXe7I7L1UZ88+rLX0vjaGt758xXXjh5qKMXyLNA5RbaenejPLAwe6jV+MqL2y9fXb185cwb7jh15uwz7aVGcG6nnu5uTV56cW1xplEW2XCHcZStDd1cXAvWriy0N8L0xfO9wwcb2LeD3rAWx9romUZqOjGTrG2ODi12RlN/yx0Hyzy7/MJqMSne9zPvuf3uUx/76z/+8Ee/WMeqsVKbnZl/1T1vuLw+lbjz8uqwcezITV9/A7X6txyR/qRvuEyzAzAqwEOrnThLaadh98b5OJ+ZbbnMoVJ55ba3d3QJN5w43tvc/qPf+tnasl7stifTkkChItK6gMnBW9L+2TIb9+574+nlpQi0+6fve/VXHr24s83PreVZq30N3R//1m++853fgvM2QQmWBKTVxFuPzRw7duh3/+wLWre5cMMsD+ACBqNVEFYKfVWlcYSgHeq1/kAZrndSrWi4O/WFS4vQH0zqiZVpqM2bAwtLo9rCLa8+8uy5Zz756Oa7vu4NOq8UzGKMof10kC2eTFC0Ssr3fXvn6m9PRlJD0A88+iVj44jMoJhQrDa2e6PxNA1RFJliUrDSC4srdd198cojochnW/bcxfMLCyslh/6w8k5qrciksd9r+imAU5m4aGEpBEyjW97z3b/ey5P5o9sn2r7TdssHisXl47sTsMnCS89MRJ1AA+PRoAp5nBjAfb8uAMALL31t4/wF58s4pdtef/rh9Y041uKUBB1rpeKIQAArrRgDiyaTNDTFoQqoY2sjj5GNejrC21619NLL/St57Tc+uPqj32WTA1C4rNZM0tnEjbPRy5ANYWUlrbfU9mCv0WymtXQyyV2AyXRaSujUku21ywpxvDuIkhQCubKMoihSEQATCQEQSAiBkYnEKPSBjYnLcirk6s3meJIRGqs1QFZ5dsFH2iplOAwmboIa8rwkguBdvdmK49lBb2SUvPHue65ceOnLTzx6y523HDh2OC8yN81EgiAoseNen9jPNtPBZGoQjx2a3exd8ULAoJCCACBUHpW8EsIl2i/qMgQAAU2KNLJzmxvrLLi+vqMsVdNiZnbp/te+8VMf+8Db7nGxuS5VEUZdB4bSJmkcbz0Xgiwu1b76kZ/5d//l4+//6HM2Pf7fPx8u/s5DCwe7b3/DTbevrLz2Bj1/+8mNwdah19zZrNH2xWte+dnDM++4/ea77j/9kff/4fp0cnJ55fmdfmdm+a+/sL1y4tQbZk/oZAMhZBuXh2eem619PXFDPIyLCgjT2CrEIqsgqHrNusoXRZ7UEkLqzjb2BpPSIwL0t0ZJS33mwQeUy9sJRUbaMwfnF04o12eSzZ0+kNigfYn90XhrL1s+uIJFkY9GwmHlwGytruoN450fTPNJNmp0kptfffprDz71De94/Qf+6u8KF8SDJgUAHESb+KtPvtAyqpVGRcFK02K39YXHv1YECD6Mq6rRal6/vnFlczd78On5dhQLzOrai5fXrcSvv+2eJx5+3NTbndnONF+6sHr+9A2nDnfjjSu90hXLiwcubbtebzBoNfenQBnIpnnpbUB96OjclUv9Zq07KiYvXVlrN5voZZQHV1VRsxEnXV9NHTIWY8d5VmZF5QSUD55QQvACIiLOB+dDqbGqMgDYHuZIKEEAYf9Lq9FoaJLSOZF9RI4QgiZkxFecNYDBV4KsAFCB2gcCCJj9fDGIqzwABWYQhSUY51dmq3/746+j6lHS2qpmWRltVaj8eLgjWjRxo9kg3QYE0iLBlZXPJuq5R8LnHqGSrbIoEvQr4QVBgMiofFq43AOQNho5AJkQAiL0e3tRjMcOHR+PUEiX3pFSHEIQJEJFCEUILO969/fHneTpf/jo7GwtDnYr73WaHq0ECIRBQIAdECNGIJUEjQgCgrDPJIm8q/sqghBGe/32zDHhFR9ygJwoAo0uYFoH5GuRrZYPdk/dGMInRs4nee6JGBGM0WEfX61kH/3EiMLeKKW19i5oqwICc0BCYAIGRNgnTwEKCEoQAtTagDASgMj+lSgA7OOEBZABRBAQQUApQlC8H91nQRQERiBCIQwAUDPNKriBjEgkLysU5OCN0ioiYc4LefbCpi9Dp60WFzqdumo1IIpBK03GxUrXRO9lvazHL5X9opxW2usAbghNnRSjXZ/EszNpsNpVZaJ0J7aT4Wa3kVI56eejbFJJZQQg6/UYRLyZbm9T0IcOLb8SpCAUQBEILCCgCEghCzsP2nZfvEDPnr0mzLVuMR7mhKxstL3Vc84rFGMMMyOgEAlzYFFkyCpkjg0hgQ/sqwps1Io67Vqn0jjKNrKiKqfVXDeZO3rrv/qF//nCE//w+3/0a1pTyDmwLlTS2y5/8Bef/djK8q1vnHXMEioIKqrPEkU4XRttPFI/emMBC1o3UIYBJ6G+QvkIe+eYTIhnXb6btHp3nJCrq4OXJzBJU6tsofx+kQIMIYBG8s4RmcrJ2iT+1FP5e+drKzNDQGEP4jUiRqlTBhKrnWMXjNGWQ5aNYK7uTh5qr22nn35w+IPfXU9pkpUwLaHeFmW0lKHIJbIQtfwtt4Sjj5pHn7W//Xejf3uo1mrkRAA+2FQtHVejAieZ1xpMzO1ZqyKsSti5GqyOI0MBSMQLCCBIEEEEJBEgVFoJKZCAVeW1VUQUEEqmR1+CLzze/tbZeuL2srG5dLn+e383uLQRfctb1fd8s8S1KRtwGfAQti81rmzO/Mlnhl89TyTWA2odfBCFoBCZlSsbeYHjDNFrgQrAM5UiLnAAIP4/fwkRolbCDMS8v0mR/ZQhKkAQQEIkcmUAjVogFFlsdaKi0XaGFpXynlkZLSJGKRAJno01RAaQyNZm2is7WxdIm/0+rgPWmsrSRRqD85/45Gchn957x+07w+neeJTGGhBMzXqdbm/6xY6gMYwaFYOFkIHbLtBapNijX5w3W2uDre3z/1eqaJKpu954z8INy67Y7a0xUDrdGSwtuh9699Enfu06SoRBERmvsPBlVLM3HWodnk+fvZb5nPcbcxphnE2YgzIkACIUxHtPWNKv/en5X/+ToqZVI1LWTsZF7siWniXoREngKRCg0gBGAJWUs2397rfOHFkKz12ovvC14U6mTCBES8oEH5QmRmq0WxVXLitIg68gQpWo/vu+VWl5KRuW7Gum3kbVKYt+8EMbbZ1c0hcH1er5reM3LbY6qeBgtLsaz5mi8J/7+LM+N4Ot9e//8f/2ex/5o9Kpr1wtH/zHv8uFY07mZxcnk6FIEMRBb5iXfOCWe69d7A37l1Mt3/fD3zatmh/9w/+wfOT47t40eGi0o6y39vhnfxl9JmFkG3PBR/loFDySd9picOu9vX/QaiDBZBlGRliC0gBKa4yqIhcvcVprdmb2ptvbo9wV4zcsuO/7rrkbTkYHbzq4NRy+8PTLt91z/6/94ctf+NJmUbAwiKtCWf7w97/2VTev6His57RyxiYxoKo17PbuGjurQiOJOj4rkyjBSAWXe/E+lEzVKJuqkMzXlpJOpz9e29qpQoBqVuqdyAd/cH4urldl4erJXKO+lOiOlD032hnl/aKYdmdOqfpiNpnY2OZVmfseKJXExrbinJmram+3ajfj9c29TrO23GkMtneuX9kbTzNbq2VlXvVfVkI+LzlgrTvfG/TzPA+uCGFcVlUbO41G06RRGq0gj5tdLIpi9dIlrKblFM9euL7V9+Hi+mivcpULRbneG9S6yf333RqRCMBwe9eXEEV4tb8xzsrSTWOt00YiDODKm15z1Ovuxaeet2oP4CoA7Ncty8oRIGrFggC6ck6h1pqcKwmJgZMkUYBaEyImxgqAImKlkUAbQwopIi65Kh2gMPsoMmkcWxNVlQM0AYLs45m1RgHhEEVGEYXgoWJiIYMGyPvgnZBoRFJWG2sheCAEDEoBAXGAynsG1lozi9HKixSlQwRwlCYpe88IxhhrtQhPp3lZuJlWuwyFiG/WExHxXpSxEtgHYWbngvOFMloZJSwSgD2XVU5IkUmMolxyL2IIKx+EnTERIQJLmU10ZIAhlM5Vwca6cMGSYgD2lWCwFghKAJWXRZVXuhYh+f5oT1tFSoJxHERHwJWr2NUS22yZKLHNeo24VlVVYlMCVeUlKSKliyIIEgIKe2Mj8SFUjkmqsmJmQJL9NKbWZVlpFQKBMTYyuiwrBPE+MIfgPCIAkAYEQhDMcwfCCP8fVf8dZOl1XXfDe5/0hJv7du7pyQlpkAkwgGASAZJiFINIKlumZZJKr2RawbLkz4qWbAVK4itRpGlRzDmDJAiARM6DGcwMJneON9/7pJP2+0cPvyp31a2u6lvVVX2rdz3nrL3Wb9lI+CCIBFfe6TRLjNY5kCu8bhlnHYgwLQrjDIsqSghAA8DBEfdMEHlKAqnrYa9WtWOVYvd4KR8OUMZDXUl7w7rvYpBZ7JYrVA8oRdntWaCwUq90hzYpwrXFHMxgvMKuPcgmdrmwmmdBxfK5e340OL8SXrq0vdIarPfTjcXu8R+9gMQBOSNjreWcOHEEFqARzss0z4xPUr7cteAtYigDFkV0860Ho4iMLgyoar3BQbX7vVFq9RVUEQAJxyQg7w20RgCBjlyS5/1edvWRY7fd8rpnX/i3HFJT2Awccqd1cfaJdcrN9bceqZ/dWNjOJ7F2++H933+8hb3+lEjuPHDzi157WzHUY4eO1g9e0zhwG42wMlFb+sHnP/A7f3l5mNWmza3vuTZxcGJp49Ab9r16dPO3/vZpSCAQsrd97syjF3jImKwdPHiDcnPDUT2uThVCXl7qioqvsGisKb/3yUeg67wqpmaneQCLl9vr3ZTxaH0jjQvaFQckXeuFZ5t7dvOB1pq8oK7tDo3zKRmFOs17qZ2ebM4Eu1SAly4vJRmgVRUjW5e6rihqDHlSLC+3xiYq83tmGcZFvlgJ3dvefMfxs2eCSq1UGS8FJQXpr777l7u5n9tVufole69/ydWb3Xo8e+tNt5fn5veEsfnKF/7t25//+mtefaM07XpIy8tb62pPXBsvupuRgCKz/fU8xnIQUsgDA2CtEwjLl9YiWVq8vDg23njN2146tWv2G5/8quIizzRjDBWHKPz+ExcO1uXVL7l5HXy6caleEUGpd/gQvuUtR//3dxc++9DG6kh87/jSJz/1V2/54G8HYb1w3CIVSDKQxuo9e6a3B9YHBgwLhHRWA7hKFJHzxFWkQiLrGUViNLtHnV9KWFodnxqrluTGZvut73jD2dMnk4GXUYSkwfXPvvC84/K7X//cT7/1na7Y8DrzcG04/Q6X/iuLC3IFcDpwWM5E6fnLGNfk5cEmIdYjVatWVRR2uwMVoUZbZLB3/4xnfnNl+4uf/ZdGNcaAgfEjrevjzY31rYnx+vZWz+amVeR75hqv+rlXp/I0iCCslrpnH223Fl9y8zWnVvhaWty7anyhFFPdXi/rFZUoiSsxxsy6IlaSMfCe6USHgdwZgub+GclwvBp1NpYe+d7Dy+utkKEbYSCikqp4cAwKxpEz9GRqU5GTcVSKfRxYNwoClJJN7Z4xacYYHp4tpSM/NLs++7lzr3xlXKmPVLVfrsNmB+Jocqzibjl2lY1AiSo4CSSACBFsUQCijGIRMPCMISuKQgouuHLWM+YlizKfODDeOs4l58JYzYXiyBF5pMLhaODIVCfGB60eY0iMC8GECLN0YHVGPguUYlE5jqZ67eFglHoUiCjCKBQOOJrhpb2RyBZPpZ1Oc99c0otcnk3xSHDX29ooRnZi15TYagOw1nA0N1HpJEUvMcCYMTvtn+QJpZDWmh2QDCKQd0wwct57dNYjXFHZgRFXIjX2e49+8pN/cmMzPsH7q/HYBJNUZH2luCzVstQIFRi3VFLrf/MXr6tHm//n/tW8Leam6r189MPHT33l8g9/6xdf8YpXXvvUQ+enzvlXvebm2uzMMOlEs03ytjxOH/iTt446rYipr33x+W2qH3v1DZ/+p09ePvPAe9//mto06azM97+tf2ka8Lkw6nAZJlnhEQiRpIgqlfHm+AvnztbHgrmpiTRz20mqHdSbNclFnmTZ8nrSHk3MjHeW0liwWMilgZci371n18zU9OWLJxcvXOh1s1FvS5RKzphhfxAEQgl18dxallOy1ePc/sy7fvq5Sxsr22fcubVSo3H2heV9e+afP3fJOlABt9o5Qiapn2SyFMs4KknW7+VWu9WtHkhRaKsLf+by+ivvvDV79rmlhXa1UV5fWm2Mj61lA+Na51eXDlUnuhvDSxuXMwvTE7NjlXip03IV0W4PypsrphPMTc7s3T29MwX9wVCqIOmlvb7byIecBbwkitxv5ZnLw5hjZ2ubS+NYmxi0trZ6w762rjdIrfcouTGOCMk58G6nosc6S4haQ+EtECE3gNxaJ8SODgGJGUSBdJ6so2DnjuesNYYBcGDgDHFmnSewubWIyDmPJavEFXQwSJLMaG2JgUeBCMyRi6p01+vngmC5Uc6jWIGBzW7PFiUpuTUR6kyVJRoqfI6OWMCct8SCVpd/94ebrX4sOHLBHAHjyBk4762xhIDek8Trb7z24qWlQX/oLYVR4JxlDPtb7WPXv6hUL5mTWiAjZB4dMvREQjAMqRgW115zy3r7xOREqd/aTkxVceVNIiRwDgCaUAAwRgTgAK0nBMcBEJzmGKJXXArEgriouKpiOdgFzrgFBF+QRfABYhlBoHRZ35x8ZgkMlRW3VmprABERANE5xzknYoxBGEgphNFGyAC5QQCnNXnnnedcECMCj57Tjjt7R/cCRkAAXknpvbfWEQPtnQSJOwAkQMY4Y1em35HdqVVhHoEjETlP1pudnrx+lliXOXLoEdmVCjZCKArLOQqptAbPeDf1vcUWJz81Nti/Z2bv7IzN18H6iuBJkS4s5ZklxTh4b9wwjsKx8eZgqCsMdk8dctyNtNvabFGW0ij3sTHa5Y5aOWOiZLRR4IF8asiWVDcpppz8sauIOEMkACAuOJF33ntHwJgFcJYAFQFLuxqImODeekPWETHGAQEZAgACKqWs9yhdbkcViLngjIEUIjd2mI56vW5tvPnTP/P7MvJj+w5FpbrSi92Nxb/5yG8ef+qpEueOQk/kPHiNACjLY//25af/8s7XAW15U7CCAh9S8gJlm+liMba/EEKjHnJWFsEBJ2ZFYyBG57yOxexLRlmvPNG5obm8NbysT2Z9qzgXxNE4X1GRA6+1BoYKA+uscT4V4sQmfeMJ+3MzlXptEDBA63xBRgLnQMJOz0bL67bTs5USRCHvZ258tj+xL/r2vWrjY/0Pva8BtusKyLivVDBDHpdk7jIRQCz0VfvoS4/54Tn1Nx/r/tEH5lRp3TFOSMGEG5thjZyVG97kYK0uj1VQFgMN3QTwCjsfrfOCMWTgCRkTwMiTCXZMtgI8CfJkdzRLxJFr/tln3N98Ja+iGnWpoMKzOnr82FdGz5zsv+0V4ZHrXKdn1pab333AnF9KV3oKvABGDK1nsGMvs94QMeScvAfPrc+ReWM048wTEDAA9M7tQJYBCBkjBATacUjSzk+936EpIYIn4gKc897njqSPol0TM5m2/cGQgfRguedMoNEOreVKchJkKMtNozkdsEhykWWJlNKh11mupFCREMhURVVr4Q3XvGymXju9uFyOhBCgCxsIXG6nixfZ7usC4zWlOY+AAZiu63X7rBQ35tExmw0H84f3nVnO/y+p6EW3HZ3ZXdWDje2FF1RASH7phU5Jimrfhy4YOiEtFd4EYf5b7527+Zrx6/fG3/v24w3GB7Pl4+fdVgJF4cmStUYxSZ4QeZZn5JFxAYoYQS7sKE9qKhgQkbeAXggi5zh6BIYG0TILyS+8zP3VP9/g8bkgtFJOPfdY47/83fB7jzjGIkSyWTo/N6UtSwqvQq4LYzUqiqymj/7Tu2z0rbiGPmkOtnMf6GKYMx7zeHeabDkXZEa1OsN5UbVYyBJIW+63/aWzcx/84x+W6/st5Fftu/o3f/E3//p/vL9emo6Vqs7uWV0cpUUaxoqXIhz1VCx8XDt8178/lNjX3b33q5/8s+dO/LBlz7z2l34hW8kWzo7Wen0c5FKIknCpKxyTggVMxrxRVVLlw1XyQyUNQMYCz+TBqhgrkiesJyCG1honyDClVGFNtzuiAiCs7j009pm/vSkzp5L8gs7SsTJ/6U2N4w8e/943VjWWs8SEQVgO8Z3vePEdd+xaW91WLIoUuIKyYSGiuMhJcUmexeVKEJd1pnPNW8MEfZpr2+9sl8JKc6zZqCuN9vSFJw/OTU81aqPcCypUAHlaoNGRQ8m5lJWA16jod9Yuh1Hmlcn1IB2ul1UQixKYgXd5q7OhSmMMBHNULVWx7CanD86O7xklljl/4dlndbJgipRzx5kOBTAOOtH1aq0xWe1tbe3av78z6rRWV4xOq/Xx6flZ74tqrYLp0NmYZCmsRnNXTZ07fvLC+bNhLNTIb/SLzFgucWK2udhe2GonrV5RQoCAjU+V+4Ner5toXYxPTKX9TnPv1PpmV3sqxYE2pt9ZOLC/MWhfySRb64EbIXkoVJqnWhORlzwQUinBkaOz1uUF58gAnLWBlMCRIZLzgJ6IGDmnLSJonTnnUKAhimUQqkgwzhWCoyzX1XLJE2jrrNFhIJVSzIMEYAw5855zK0WaZ2mal6PIk9fWCMEZg7TIIhkCcEcuyQrJVakUF1YXhRFCgHcoOAEY5/Ii5wCciaLIrC28dQwQGXpwSjDyjDNGQFZrAIiiEIGKQjOGUnBCzIoCvNekPVli4MA4A8wxbY3zaDxYo5EzKRgXkc29QFKeucIQOa54pk2SpFHAGa8iSyIJyWCVxYEKpecjL1INTgofKEKjhcTJRhgwUQkNCYYCGMel82sbGyPXHA9lZAqXcx4EkRDoLQklGXFtnRQBebCFQcakEMjQOwQPgVSjJJVSEgLnnCtFRNqB9cYa58gpIRlDz5ALIZnIi7woCgTGGUOgaiyrpcDm1jiDCGEUqzDMCt1JMsZZbn2eJoV3QggPJsuGYSmignPikvqTaunQ4c7cNBtvMFREPJcizftsqzvY2E5YKZmZb1UmZaU2mbcrP7hv6yPf6WhbyzOYmQgLPwrK4tie+OU3JLe8MpThIJDVtC3OrU/93Wf6P/ghG/aH4DSAZWi41Zwx4IiIwIA4AUeUCM45W3jyBJ4HaNACY4pDpcTSwsSV0miQdeTIDR0G5Xo5ss6U6uVBbySusN1BxCquxC4n9EYbZw0Q+JEtVKSe3zgXHX+wMa46XR9HXOdZPZITM5MNHfd67edf2Lr52HijZ5aeX6ymPNzqveL6yZe8+GiSdBfOFlPzLx2bv6M6fZVez4/f+/lvff3jj59bb8xXbrq7Xt4zmrp1SxfsIvY3Ns/SOq9H1fl947no37Cn0hB6pPN6owF20urA6yLJ2owXs8048K2xcalba+snToxVJ66/4cD5x1cmpiuNcjjsG24tKgXe1QUb5Xa7s/Wym6655qr9z58+nwP2Q04BGOn6kJXKIkK0RXr04MHzC5fn5sd5SKrID8+L7vYAUIblUIVMqDgDe2Zp3aW54h6dXL94fnN14b2/8v4vfuYe4/SnPvv96t7mnsmJ2196+JFnnnn+5GKv6O3is3uuOrhw4SQWxY3X3XR479zq2mNJZ/HALVfzIavsnrju9kOPX1wq+j4MomJoTW6r5TgvDAB560vVKHBUZP6N737TP/3Dxzfb/cbyOpNSm5xLxggrNRVGYSCChbPrNWFvfdlVvr8QT1WX1zcv2hXbTe64cxabh772b2tf/vhnXv57rxoTMLIWvAOGNeCjdj8LgluuOfi9x0+S1QxxolpJcjsaZeR5AKIswwqHuWa+72DvHT9X3nckPPH4zH/+g02nebeXRTw4c/J0wOVw0B70UsFdnuehjyZ3j21vnnr26SeuPXYEleFQWOsIPbIqECt8p7I7fM+7d537p+5QexWIvNDkWRjEpXK13etZY7kqvf1n/t3XPv0ZznIuCAktFzopnLHARv1OpnW+WmQBYsCkcEIGBupj3gRkCtu62H92WMjhe9/3hv/wwUfXNtzk3MyR6695/OHHWSjmjk5kmd3odGtxiUnFvfcAtnBkycAVxfTsmbVdU00fsuVufuFcJ6iEYYDIhdUkmOICNjc3c2vHxyp5noGT5UolTQdRKYp4oLOBQ6iNTWmZI4PqDG+3huglb7N7f3Rp/7yqVMo8HC+glskb1XVcmKSXdEOu0HPrMIgqMo5jGW53uzywnHHvIB0N42ojTUaSS6lUYRLPwJMh4KgCa4w3BgHJAzCXZj0BVIpKTnsLhsCX4sBbP0jTSqXmyHIZVFR5OHJauygOmjOylKEHSNN81Buxhhp1e5dOnpehFEF45OhVlAelSmmQbzFw4GGYDnYfONrWfpSux0KxwsVAohzkuUsJnHdScuY9ABhtYaexyhMy9EDg6Mct3YwxJhTzxkkhferNeicuW2tOgY+8lUk/D4MEQJvBNpCVqqpkmA661o0EbPznPzz69re6d//6Q8NRE6kSlQ4eun7Po+cvXCzyxae2J8ZH+/Y0SHTGJ+uuhM5khkq9NCkoJNE+dsxsLa+2LnYrsWq30vs/991Dh0uxmlvd3v/B9/3lxDXXluLRoDPUhSbDVBTmRWHJbg27b7n7Ffd++15dLtLCguTeFl5zhz7i0geu090enwmac9VqxC4uXz55foVhcayzLxC4vbEMXOzZvyvPcb3VanVGppdMzlSuvuaaoBpaDoVnwxzK4zf843/69d/5nbcuL5yYmqmVK37lqVW0KAWAd846huSAgdayHr349qvv/8GTr37ZscXl9fVt8mjjAGuNcjcbbl3YmA7L235jdX2zEaPLBndc96LvPvj4ngO7zy2cff0rX3HPQ4/H1bFKJXLOt7e6vWExPt6oV6KtdiopGen2la0Bqc7QbqwPwUd5ZpynXnt1767pUjUqV1SjHGfGxhHv9gaIzsRKW1kMjTGWgIyxnoADQ0IAdN457zyAdeh32DcMrPGO/M7JCjwggPW+yA0AeMAwklxgKQh1ngORd5RrUxhPnlQgBOeBEFGorLPojPfEBYB2giMReAccCRlEZXHkyFwc56YolMu4YmNNSVSA80I6ziQB40HsHHFByWATGB/2xZnT6vQ6z4jJgJH3CAgejHMeAAWz3nnvpcWlxY1Oq0eMMY7aWPIeyTtGS4tn+NY6A++NQ0AhhPUeyOnMENi9e+eXF5+9vHI6H2YhD9KebYbQnAy86xIQQgAgwDtAIMyQE/oceIRQJgCEwFnDuOKoAERelHyFgCXIFEDkeYBACAoIgRR5t7rQefbxnskbRWa54BIFgbOmACDOEAGchXqzecNNd46yzuLlF0aDvjNOCCY4c5aE4AgeOEUYGVcYB4A7vAFCJEQUTIB3jACJkIFCFEQ7BCPGGBADIgSwzhCAc5ZzDowxRA87fiNGgAxZqgvymnMlOHryzgHu9N0i8+QLY5FLzjyQBwcO2Fo72+gvPH7q8lisqnG5EpW0gxT8MCmAYTkMjKck7zuXI69NlGvtztAB6sKaPEVmyvVg0O8USZ7p3AJKJmQYRCg9FgOdCckrjRpz+Y+ngLwj5IwBoidkDBEZZ8jAWoPIOBIAyx0pwQmdA0BOAjhnO03tDgCl4AgADBjCtYdu+qXX/epH//V/dYtl653gAABYEv1B68P/4wNScFmpCRJ60DPoOLJKWAIHxnghGCgiMBwRkT13cQBsXPiWBU8MiqITRdVcUHl2xpmYSY+lsgdj+j3jK8S01ZPeAgwZw2tSXvCp2vx1l8fPFoca0fJKPgQKJbdacynBk3YWgEnOGAIwMo4/d9E+cZxe/SJQdRBAJgH0IGsCyMYlNzsTbK2MQgnkXHUMkiHdem2yvAKPnVZ//C/dP/wP+/vrlxqxaC3agoVRxKUKjZfkgsFg25hy4ks/etrd/wT/iddMOp5gYFlhkoFP2zA1ywLlUYDOigiFtbC8qjkv7fT5ccYYQ/AgOXPkwaNHf9fLX22sfuDBHzGUDgCIkCGAIwaplUnCWz50iIwZIT0VNs34D8+OPXrZjI8LUGNJL+q2szxzwFBwvQNdJUIlFTDcCbV5ToyR84VnHoGYQCDYScB5T+wK9HAnbUY7bwGQczvWOtyxFwHRDglLSW4dScWrjbHm5JFhexBAJUbSrCcVZ6iEUoUeBUqoQObaqkj4HKulwLERoFeBZErKIL79hpc+8+yDOhtYbYt+sm//0dtfcue3v/TttLAEzhojwaM3PC6dvdy/bRDxMvBy6EETQlyW2wsmG+rKXBBMMBVxj7Y0dP+XVNScYVhc0hurw7UL1Zj8AJoRJ1t9/Lkkt7F3mbX2vXdN/dp7J6v63AvPXrp4AsY64pdvnU30ln3NoQ9/+cKDF5wpRKjiXOdEJDngTmDFFYjMgUPvHcDIMeO8QCKGDAmFQCm9Iy4iweEPfuvOD/zsGScfTTYgK3EuelcdZh/93Vvu/NkT57tB1PD/8w/f9uKjey4tdD/4R58nC2FVaoqVjVEPD031Y1VAHpMLmEgxKhEKZ3IBXqiacFHEen5UXDhxYffRqUoFjImeekr+21faM7PXbOse1Ep/97n/85N3/zTstNSSndp15MhE/fT5pwiTq2+45cLpp1nYrJdnv/pnv7fvmt0vv/o/jAc3XRqeGyWjyyee7F5alGUehiE50x+lcZkLIeJy2NrcjgIVxlWXe+8G2hjkXILRYDFv15v7hyYDHoRxMMywNHVwfnL60onvcmBoR2Ctz4NL55MP/8szv/izs+NTXSgH5FGFZWAkig5ybE6XegMa29t48szyVXuj6lhZRIFJ+qbIVRDkeVIUBZKOgrJzg6gcjY3Fz55aG2SmGodj1dJwaMKoaYwjSvdM16ScVwy7vR6TQRB5b3RhhknSC2sTYRBNzk1srzyPKcSxQyyYgWqpjmAo72X9Nalw4LRSDclKJndFngU6V1xUmWsGthqotV438xwES5LcOoijWsAkQ0x4PshB+ilfmkCEehSo8YrOhtWIBbZTjoNs0M66QyFLpbFa6lWaJ73ORuGy5u5a37iyrK5vDmqNUmO6bk/xflc/+PCpV7/iukrk1y6dE6oMgtXqk0Ft8shMdavXkY2oEcSN8lSWF0C+PlHZdXAC4AEAQMk8gdNFatyO+lAK451tKCBIIRgAhMQRrTUOEMA547QjKeQV6j4w53Suc2dsIIVjEKpQgHDGMoHJKOGcCYY6zwtnrYcoCpwz1oBAJoTkTORJaq03xisua+NlY5wDQVlhsiIIhOTKeZ+ZnDEViEB7SI0BAi4FAQWBQiEAmHfGWsc4Z5JbrZ11DJkUO5X2EEXlLCvQg7GOPDDGijRlXBRFwRnjwntnrTHW5YJJIaQKJCCaXDvt47hsTWGsLZVqhddhAIG02meVWEjXAd6/7mA5twOp+MKllYVld+pSyFHXIjk/E/YH7ZoVE7WgKpLxySZoUoGqVGrdYZqnent7cOJif2FtsLE9GmWu17dp4aMwirmsV4OxsdKxqw7vmxiz4EIUGPA093E5TEbae5+mqVMxR4jLpYJskvWtJQ+eyJBD8hCEYaELYsi5VBikRS6lZEzluTbSAzDJFUeWpmkpjozDfmKYcaYwXAohuNYWQai4JBiTvBDKlBg676w1URTbQjLmrdzYP3n+tuv93IxXwvrMaM+4Ck1KNhkKU8w1svEpOb+rKqPmqZO1X//dp0+cUwbHuDHk7Gh7Q0TUnMxf+45rX32nVKWeqlWNnjx+qvRrf/D44kVwhgeeHDOMWSRL4DwRediBC3ApPYInC4zJUFnuNXpkZu+eyp5dcSi5I9nLTcDirV6vr10Q1R1mHaQoUqoaIdJoONp5FmxuJ1EKs81aKSh3hymLBHKepo4zSHujh+7/umSi3ow9eY4YMlEv1+aieHO9vbzVPTyn+p3telhqmNKrDuy//dj87tnpFErX3f1mwXbnK9lX/+7jn/jURw9fPWl8oJr1pCLi/Ziy0fqWa04Gew8ylfhSefLZ3kpveuVld1SaE9mgPQjrjel985DXSqUxLdKoEceVUnJmqz9shc3SmUfP1Xnk3PD5U+df8hM31WfKn/vUvdfOHZit+o2h0UyQoG5eJKXwwWdOv/L2W7r9bGuzXQ+DUhScXVsf9vrVeqXssBGLpdPPTzTGFOfdrZ4CN8Xd7J7JzY0MeWlrKz187XxuUj0aeO+PHNp9+ODe5aWV+dmZssevfO6Rt7zxRa+6+2YfZ4lmpy+v77n5zutuvD1p5SsXLm48n0hWSx0fC3YN2ovbq2umWH/m/m5C08fZk4f2HxDi+1Pj9cKIvKDtfssS2lApLI/Vyv1B7hHLQbh04eSb7n710f23fOxLn/XokCCOZTIoXGp7qdETY9Vg8vSzp97z3l869eTXymJirDkl27ZIJAy3f+qW69rPsO6geGHh/E3XzT17vi9BBVFwYL7ptF9uF4evOTBVXyM3cGBrtUqg5Fij4pF5zctmuH//6Df/06E9tw9kuM7t6k23hTfM56fWsBRPyjGxMXIvvfOmbvbQ+tr6/PRkg1fabTPYStJk8OmP/Mmf/uOnyHqCXEa1LB9DoXyMdjRkPH3p2/fccO/aA88HNsc0NcwRY2aUppLAeyhagxMPPzDbjLY3BwBIACa3zoGIQs5dmo2iSFkEzoUtqN8fvesn5/orD2Et4Tjm+KSoLzbH45WL3/7D//qyP/rbCxfWk+HZ05n0TrCtPPPO5BxNmlcjPsr7zvJacxw1zM/EF5fXAODgTHVxbeu+488jFNXxZmFzKWU6zJJ8qL1lnDHiSog01+12f3rXYVViy8unr7725mptejQMu4Nty0IjKE97oSzXxptchqY8EZjbXaW+luUM43JJDnXKIHHCx1FJcBHXy8koazSqw+GoMBrQM6IwDLGkbFtHQUlg4XIDoJgX3helUAB6TbYURUk2FEyEYTXJ+rEQBK7d26iUGi7T4H1OLWu0Q8O5D4KQIwJ4hlb303pUdpJPjldWV7aalXLkC59n3NlGvdQaZP1B+pqfutE7C4E06+vcQq1W7wZjm1uXa835udnZYbs3OV4Xkdrq9yfrspv6rgYGnhh6Czv8CABCRgwZIOEOVIIxztFZb7Vz3jvttDV/9K7ba6Xnx3gHRsYxyJMha4J3w1EXJK6P759xlSaXTWDjRmuXp1cf233fJ9703R+s//4/PLm+rqN09lTHjK+IRm2/Z+7X/upv91To37/59utvvaYxf6ifrPLSBPKF1UsXDxw7VglWx1AEd8ydfmJpeymZqeXrreEv/Je/uuHYQai52UrdFBYzJwWvV2KwJtUut+zhp5675ZYbW90u6fR1d7xqcfHy+tZmvakOXbVnc3Er6MZb60lro2dI7zl8+ND1h7NOq590fZEXGRWke6cXA85FXDHGTsyPD7rtennywcefcIicYaTCP/nrP/jODz7zyle+9PyZ57o2ba9fCEOxMUiCUDJCAIYOG5VwkGQrW6PK6RXGxEMnXmjGVUAGgMAwGWkj4ZGnT/w/v//rZxY+nDn3pjfc/aOvf2t2/vCLj17/wrkT09N7fVSe3nWoUiqFRNzkUjZaRbukxNieaVo+W8K0v3kF2RWVAwN6cqrS6/N2LyXjG6Wo3oj37JsoR4HRZnKiKSWWK1Gv3/dI/dQgWBlEzhuuiXY6Yp3njHlCZIiEHixwtM57D4CEHHeo4QS0c68yHrz3yNgoKQIBohQrhghQkEckD+g5Fp4EYwLAOUueOEOuWHc0JAC3g8EGCmIpBOOcPfnkBZHKm45NuGxL8sKj9dYGsVIyHo1MlowAWBhIB4ZBSlz1U/bQU93trOwQ0e9kWJQU3BgDQEJwALIOEcHmFAaSGIVKZVmhwoCHoshNMkooc4HkFj05v8NMQQQumbMikHz3GJ19ctNYYR0yz8ZjU69YLiwRgXfICJADEZAmxoGXkAjAcR7ukJW4rxTr8OiPHi/X9A2v3S+iHAC4KRggYwoYMeacU5zhY4+eHxWVqFwvV+OE7NrGEgJyJgC9BQ+ETPHpiSmdd7c7a8aaqFQGGhJ5ImAMvUcQ7LqrXnpofP6+x79m88xrwzhnCM45wbkHJAIAQmAAwNBZZwFR8EDKknbaW+PJO+8ZY4JJIiBPxnsh5E7BGmNonEfHnGPgnUckRCJgnJGzKlScobYegIw2jAEiOk8MubZEuXeF3mq1dk/m1189S4ItXByMCuvSURwojxAYGwVm2HGrLdec3J1TPhhulwODzPXz3HgWchkyr3weiupUqbaWbrFGuZUn6fZiKyld2Z4hMGQ/NmPtiJaw0znFhQDY+fMsC6R2VnHpyQvOOXJrveBMyogAGIKzthzFjMHh+V0bW6eMHoIAnRgHaK0VwIF4FEbOWxqNiHOlJANF4L0H5y1eISqgsw4QhOIXVvJLD/YbZSdLlhoz4RxoLATGDOpONiWLvM8UDUzrQjB22BdjqnQHY2trD3+6eeuvZDIYlmevevfdZ56+Z7SdTijZypwDbm0RMlVWtX7e53znf4DIeZCslYonTtOLjoV1lkclkRtlLZAlxqxNdAW1i6G1DGEZ7BDKDOaaxateXP/qU/bC8cZDH9y4ajJ6/Yuqs/unP/+d53fV85979z49LKSYPP5MW5HQhena4H98dKlZnr71RRPCtbd6cPKCXD41OnRovjbRZjgSgQTt9CDXI7CGEJmzjnPmjOOcIxNoPRPCevjBU0/MjTeDsKQLQ8wDMU/onZOKETjwHnbagchYQgInhMw1ZEYOEt9o1NL+wFkDAj05iwwYcw4ZF8SQIxIREpAnjwScByJI80wJRg6Mt0AEiAAEBEQ7JxdkggN4AGCAnHPraedSCUDeewTmHIElzwhIBqLUd61Xv+odh/fs/u//+GsMPKAvsiLkXDIexHHmM8/jmemZfmeFpDWZ9gQArlKvv/JVb67WK/fd/6UYmEIXytooLWepqTXqaT8V3oWluFHHYQHPXWrrBCtV4chwKZjUsoTlhhz21dLFrX1lXq1HncvrvR8/C65IRVHF5d0tPRwYHWJUW1yzCx355KXgK0/0Oahb97Hf+Pn5g5ODjYtPLa7xSwtjYyRLLDh138aeXcmxuy/98QfgkcuTv/VXa0UWSCEIrffOM/px8yN4C4JJ4ChYWAuSaw6ohd5oewTDYaBAMsODMPiZu2Y+9Esz3jxLrlKZOpQMGaUrQWUwM73ya++Z+9CX1v/Hh3/yDTdQ+sJzUaB0MTi2++iZtX7OaiWMr533pfJ2NnLoFRXEAslCJoIg6yf5SDPJkUGejuolsX/26uce2P7K95e3y2/4wpefmhyfiypdyi0wal168N/+4aFGuZ4Zd+CmG3IX9rf68czuKgwunlrptCpTN9xx6OiB0aUPN5Mn//6Pj3/gv/z9xuaRc/fc08WR05lJS4muvvHNd/7cr77zd3/1VxbPbCO5xkQ5T9IkbYHzQD6Oa4Toiz5HULLoDR9DJp21LrGoVX9jpdh6NIwjkxfIlZKRdWFA0Ue+k18O4v/0zlurbkv3L8vCH7t+/037N54+2zUMchVtb+sR6s999fm7XnfrDbfMD9IiqLqoHGdFnud2YmpWa6cNK0y2tbnkXFoKYyUEd2Lv1L6gGiNmycByCRPj5V57ZJnsd4aWAoZWSTE7NS3AZb2tFeiQKZhjPjdTU41eX48KL7nUYKTyHhLvkTBmME5IHnvAanHE0kHr0YVv1sZrM1cfOVidv3h2IIapy/L1gWZ66H2+f2732ORkZaYyGpLpboYVah7ZrQfZ9tbApN2NTQtKNcYa/e5w+bknw2r18tK5XrcHXPRbORS2SIu8yPbPTK1eulzlfDSkU2fbyE7ddO1koy6nZkr1RnNxrd3trW8vtfpuuHvPfshpVAw92HIkXarbwyvbg6LICZCcLXRardZ37BrIQRvjnOHItLMA3jpnjQEEQu7JM86IPGfSW5OnqfUWOWNCMURLnnHwzjjwhc6BA0iWpSbPCkQw5IJQSMGFEJyhsVZrwyUDwjQ3jBhDj4wVee68RwACCAJlnRMklQiIXGYKpQJXgBIB5ySlss4V2kjBHTnG0JMDzrw15Mk6Zp0NUCaZccaRs4BchsoZ64mBN5w57y0Qc94h+kBFHAPrtbPkrHMa4zjW+ShWJsa0KrqVcjLZ2DTYnt+3t9PqzM/VSa3nSRuD2NlgvM4UmidfWDi6f+K6YzN50p2sc6s1056ldul0qzfiC+ujjZZe2Br2R6bQlBsgzxC54ExKiZKh50gwFUUTVeWT9qVuP8/Z+FhjNMpygGplFAVBpSqaYzXwzGR52usYUwSRCksouRMiRoLRoMiHGQsD4iLXOWmrPVjnvNNhGIySUSAVeeCeASIHZASSi0DIkTNCUaETwYT25LVzgGRZrCKpVGc08s4ay0HU0PzwlXe0Xny7LNKBT8O0CL2WDhSz4IvMBqYxHU6JMAinkvzgr3/oR1/54ZJLA3Q8CrLUDjjnYRD1hulQ53/6z4/l6dWN+rQW9U989sQ9T/Z1AowAyCIAv1J8B4iCwAMwT4DAEIX3BgS3jnFPyCEM4eZ90SteMjY5xklDkpFTjXaHlrd1LzOcUiI1GubDHmNbrBRHtVp5ZwoCxUPFdVHk/UIgMvS5dghU5Lochjxg1vkszW1mK5WgO+yN+msDbYaD7at2TS2e3zp4za7xmcr371mtTV4H5av49FXTM/Prl4ovfPg3H3zi4YnD9Xf/9psmx/j3vvj9fMuFCAdvnHzkiRWmwQ1TW4S8w/ubvZtvGWO7XyjPepDh/qO31Rs3DLaHDlQUs/JYGRTpPHHDQpn4xFPdp59u/bff/c9f+uQnChfmlYmLZmr37a976Lvffcsdc8lib0CktTbSjO0eT7oeg9LsTNONBqPBqBgODk9VtU44y7zz6Gh276H9N9+ZdtPb9k4IxVHgzMEjOgHNIxWKx+7/xvPPPnD0ZfO+GHVWbWtrtL6V3PH6G04982zI4AMf/PlNPWgXudCSgpwhnD97CtxYXBUMkQWuvXrm4uojJIxqhiVonHzqQu3gmK6Npqrj2kKvYwPwVdb6yw9P1A/2HnkW/vjvtvwImrX6IMl17h556OLPvOdn9x05FsXfLZIeOQKPQaiSYUHcgWPHbjr2w+891tk+KwJYX9swPp5tHhSUbC5e3h/nM9O1Zy4+//HPXKo3xe4KDFEMUeXWF9YOklHv8adMbupRmLpsc2WjHMtaKSCbzU353/md2fFX75L5hpRDbizKKNij7nqr3/wC/fKv/dFX7v3kiUuLn7nvwde/8hXbDz3la9hp9xnKIjOyPH7izPnVlbX5PfWCnIMJX77GjZ5jRZdlKYiAWudedzRfOp9f0my8GWWJ7g9GoYyk4oiiVAk2NhYEQJZoyJEJrpQoMhMCC6VqVsujNA0kjxlOTI6zqcpb3zaL+mGWS17dZdlU8+XVKN4cqz0/dai5+5sXL26JQlsWRs5zRk6pMJ4WExMhORen9c987OGf/Y2/uPjUPbXxaGcKTj3x1KA90FsdVpHGOON9hjwMq4SMlLTWySjq94e1saiJE1mRKjU9Pr03G+VcJUG53uCS8lQFgEKIIGKMM0FWGsY1jwqJ2rPEy7JNU/JGBhwJrLPWkYxiBywMgiwZNsolIJ9lKQt0Y6yRZIlinEnfH2zVyo00H5BWQvA8syxEJO9NbrKe12maATEshVVnnQEXBmFeFIKBlOX+MCHHo0qUDnvoqVKuREHUT5PuYITGMubzNENnAfz0fH1w0dbiau6kyXN0empuSqdZXuiDV1996dILVvf27tt7ZjACbwMldJYr4GMlAc4NUueRPOzc7HZOzOivPMVoxy5HQAwBCEMhjQcI+ImVhVcdiza3YXKC6hOqSK0XTAKkLbAWiK9XZ7JgbNzmSqLK08QnQ6Xce99z7fwu+MfPXjjZHlZV1ReDhUtDAvdT73zHyWce+YvPPv/yi9H1B6P7n1744M+8KdH5rgN3tTZX06K7uLFan9xbq0f3/uhypbxrKb+hsmfU47SnIZcud8j6w/unC1dsrPS9dYphEAqdZM+ePS+BaXSf++49L73x+p9+85sfeOzRhcVhr0e16X2vue1V993zRRVmvdZWOswD4z13BPb2229cW+ssb6/H5bjT7oQqKLwPqtXLS+udTqY1MAUBF1FFnDl39ujR+euvvmZ56WJ72K9W1ZStZFkxPlFhTFivW60BR4xLwuikHDPPAISr1MPRqBDcZyNTnqymzn/289++9ugNly9f/sa37mc+3Bpu7Z3du3vfHi/FqefPc8kDZmiQ5qNurRkOLnonS6cubjNfufrIbZeWTu1MwdJquzler9Xjfr8ol8M4iOcnp0pR1O2mkquQRf1uxgWTKBEiFcH0HMiKKpxNWm3wjiGzznoC8DvXJObJK8Gd90Dod1b9REC4k2VieAUGxBkSonVeOxikaSx5pJQEcICF1s6hNhRJcOQtQ8G41s4VLlTKpjlHCAIpOOcCpESb0Q8e6T19Gm84zq89WL7+kAorDrDIcue8RaZU2HTEjCZCLWW02YbvPwoX+5PRRMknifOmFAZFYZ0joJ0ebnKFIcY4YwqlFiYvUm9FSEyh0TmWleLeoSSkwHvjnSVL5DxxZIBCiq3h6Mtf+ZbJXGJ8wMPIwcR4IUvWgmEAEpmzjHgAaMlrxoEwRAwYBt4whiELGi881P/Qb/1vq6OJ/bU/vvauXfs71m4RIhPS+wABHAWe1Z/80cUvfzMf6PlquP+uV9702NkfdlqL2gNy5Fx5Z4USnnHP/ObqhU63F0UxICCgNc4ayxCZYMRkMuw8ubkMjKQUHMhav8Mg8t4xQEBknAnOnDcEnMghcImIYAWi39GbGEPcKQ8UHgiBeQKGQDufjkPiggtltdeej08eYOCyrKvzRKnAeYtorLWIDBC8JyDw1gMDZGgJgLPWcHj+/MJ0rXTz7ka1LNJhygLQntq9TCPY0Wh7Ie2unzPOYUHr2xlZnVtYG7oDNbhpf8P1hq3uoJ2k2jvBVFmpUWaHbvRjpWjnBeQ94zvZOkAGQGCsE4IRorNeICmmyHoGwhrnGSGRdx4ZE4I7JEYcGSfrnnzuiRPiufruSjEqFBejQU4MrEfnnEeGIAkhN8QYMQY7lHGCHbK4RyBGwDhah1iavvu3v1GH9PW3jv/q77+O9S85XPaSsDSWFlYJ723KsOWjfaN0KiS93elOHd3XHK+kp76gDr0YeEmL2uEbgovf3t5f37c8ooI0MXTeaE+KSU9eG6ek0oV2SIKXzq/nJ18wk+PQaNgwDGzugXKpwIeQrQAzIStd/aGPP98VlUHf7K/l195Y2exh0nF5vb5yTn/j+OjFL5n74aNt7rJ/fiTlHnO7kg4qxiGS9SjX25Xf+9v2e97cuP7qa/7gw4+stEsyqS8Vg997X2N8ynIuggY0KKg3PV2yTCjggq6QoT0y4kwwJhSBTk2nP5Cl0BfaWyAE9LQTImSMk3fkrEDgUnhPIBghKsk5B6/B5Vob48EjIBIAARAKzoVUxhpCb4wLw5AAkKFz3lofhyVd5ESeA/fkiSF68jv0JGAABB44lx48eAICqRTnaK1zzglEAEDvlVSATI/SlYtnUo+n1rYOXXezAy44d845C0gw0EWzOYONmatvfhnfWGx3zjOHQghkaD0O+1sPPnrPwrnnKuXIDDIhIOn1zpx6ZvPyRUfZrsmxO2/a++ypE1w4FbJKqPotU5kFAE+5Qw8OddAA5UKHUbKVRSwLeOSLH6cNdr6lWbb59PLahVGw+9B6u/6th9cfXPDGZvUKvPcttbfdJOp0ZvMMPH8Gs05FODx2K0Oh+ZKYv2ocqma2mr3tiDtzPvjYV5xxnAjQA0oB4BjnQgoNHpQKmamo7EM/Hb/73ROypjzh8090nnp07cDBXfNHJsXiySf+9al6DLMHYlF1KipyP0gDI6PlN71p8oVO+N4bpxpzdnsj/frTg3f9/Nv/5Lf/8LtPnf6tP/v71tnF6++eqB0EvcUwDTNtLBRAhUucZAyEHPaHjMin6clznflXvuZXP/KvS4ul+QPpzK5dxiYiIGddJQryJDeecSacCYueOrJ36t5zp1/7hjeduv+TehgcPfza7lB+/1//38nm9sXl7dxWfvtX/uPBG1686+hRsIPt7ZHIh2FQNPoPPvgPT0uXxA3FReTAOS5VKGyRM1SGIiTnIazWx4usl+UDKblSMu0PX/+e315ZfP6ZB88gZ8Y4ygZSVJFyax258DMfP30QXvSuN82pcAnKWpW2PvyJl9hMbrjklz9036g1RqSeXdTbX3khT6svuW3vIF1otQeVajkIglRH6H2eZYhZ1s/Ga2OiNBlK1lnbrFZKickQPKAf5bk1PMtNNLnLpp2ik6dF1mwqdGkjKHkyZjCoVQQKwbw0RFLVm9XZtdbF0GeTTdEbJM4HaV4gFVLGiI1yNBepxVwkzdi1Vs92HroIUW16/xGZue2tdRWXJTc3v/jQ1rlTrFhdfP6CFEGtGlORm5FJ+plgeXN35eLZzTQD29pKE9tohknSM8kgqFQnpmsnjp/NCdJBwRm+6Nbrzx1/TiFXRMMhnDnTmWnK+X0zCwsL8XqXPCOuimxYLdenG3t7rVXQJo7E2PhUb9DXWX9nCjrtltMkIhXHpSgUyDAvRgw4gedCOgLjNTBwjpxzjEGeaM6YkEpITLJECYUICCCYQMmsKQIZBlIxQK+Nd16Gqj/qGe+8d4RoyF8pmXeeQCATWmvvXGG1tq7QRnJWjkNkoBQTgnFk3lqjXRyWU6/JuViVYxkWOnXGBULpQkeBlIFkXBi03lpLaI2RkodxhETMIoIostR7L/hOPwVzVBB4wWRcrqRFZq02eWGdj0oVQI+IjHEZIEXERFIv+6kKUxhP1qheGjbKbVZrWbkZ10s+uZz2EuZhs51rN3b5dHHqtJ8bn+RZfuqpy4bzi8tpe6QWNvJk6LKc2QKdQ3DgUQiupIRIOCDPxY6dkw17aSoxqIV3XLNbJxcobwkTouPLK5faGXWNs0Z6a2+5duYQTPf6ifDlQZLHtZolytMRymi9vfz8hYtjpeAl1x0ulSqV5nSejtC5sfHdG52NUKpqHKWJElFYDFLGFQ85WMM8Khlqm03M1HjAmSfgUaedKuutBWs9Y2S9azbH0kGW61CbpZ95hd97cACtAecTtqgAEqCR6K1PkI2CKABZ7fYq//h3qz/4/uWFNeQsBMwcFAPjLXMcmBkVRNyk/NR5+MCfnmFegpOm8M4JxjQxB+AJ0TjLkCEggAcipJ0vZtCUSrFUojAOIG3U/E3Xz77xWPnQ7pKUAOS0dqPELnufpnq7VySpLYi51AGqSi3eiU9eORghGkf9vJisqKYq942jUQHkZcCEAGstkPeec8YcMKnEaLMdTJVj4+ywmNq1X4aT6+3uG9/3U7sO3lxvzpw/e+Ev/uQPnjn9/K5y+Wd+6e5rXrH77//pyyuX16++arK6wNYX4ZHvL00ea3RHvbhSGhNBp1uMermtyD2HWK67tejGpHVrVb54spyttbtWG8d0kRSCp4evnf3bv/zycw+c8AP9+7/3V1XhmxPNU8cv3fHGW19x26HdpcIsro5Gmys6G4/jqfLY4NTKsfn64w986c0//SqXb3S2gAUit5ktMkP29T/zU3tuuDMaP0y+4ryISkKP0tGwt7bVWVtde+C+r25sXBpubI43q91NmppqMFGPo8hElZe+7Sff/vL3NqbZ6spCH6E8OzZZHTt66NX3/eCro/alRqmye7a6sLox0IMENuOZcpamOQ7rId97ZHqls9HvyP7y4tF9U2k/LPneu98bv/xdIxGk192khPb/619GrZ70Duem65imn/7UZ6cnHmAuUYIXgSASYchAF5VG9fLmYGKzZ5Hu+/an3/j2Pe1Bp8hhtLkxVomO3fLyWPtSuTu5uwScShPRwWMTlxf1+UWbDQvtdDEyEHEVsn6vsESMcRmKXme4r8ledWN65LbtPhjA0NEsY7wwI+uGt7xlprOOZx564PRTP3rZXTc9+sjoqn1zb3rF2595/sR93/9at7UxNaYK41C5H377Sz/3vp/HfE2KdQUd79ekTElgPujw2tQdd+9aXmp/8VkcOBLIy5XQOVFqlAe9dJTmOMyr5bgxVecy6HVHRWKste1e79iBa6amxpe3W7fccsPZ0xdQ1F5399U/uOdj/+E3hCk5YzchqBBMJTmr7zLQOl/ZWkMzXqpXBQRb7RFwBMHDWvT2t73iK99+UHeTvfWxw7uvOvn0PW/4hfc9+sP7AODcqYulOChX0HnDOBZ54WWc2TxQslSpdYYDIdjB+UPzMzNTszPPHF9sTExFOk1625326kRjV7U2NvDgMxuIShRUW712XCoFUcw5bqcDxbiQsWNREJK3UImChJglp9NWqTrnPSAXUkbOusJmlVI5GRkSmqiggHHJq5WKlIGCMiPSRVEpVXQ6DKRM8ixLs2qjbqwn54tce8/jWk1x4dEBGCQGoAyABxaowOSuyAcMGyh4RZXJsNFgVGpUJEC7vVUbj8QK5yqamJ1eOL0dQMCYLPTAWCfLQVxp2CTzWRHG4WCQTk1NamsHSZ5q44DislzvJJ52SkQRGBJ5xthOPxVjnMgDQ2TcO+/JS8a0g28+vLS9Cr/wKuhsAXDNGPj+YNgFl4J20NoEa3rT0dAMHY9LlVK5309S73TU3ndw/CP//c3vfP+X18nWKo3exmWC5MSzj5x+7vIv/ru3339u5V++9ZnNdvvep+/9zhf+rnP2u7sO37rO/M0HZwOyuxvmTXfd2e6O/f4Hv5IMtF7WE9HeQOBolBYFjE1WtXZXfLUMBqNMKmGlqDUrS2vdB4+f7PQ7Nx275vCNNz7yo6cvri5+7kt/96Lrrrvp6qMbW9tPP/lMGKnp+dl+t728NUoyP1WfrFaqr3j5a79zzz3cu+k9E7unasXj3jGmM4PMKsHDEpcyjyfLTz7Vr4/F7f4wt3bf3ulYyu3OaH523BozGGb1WAYBu/a6/VNTE88fv7hrqnb08P6nHztbrsdW0PJiv5dmP/muu+tPTq9vr+e9rTTzubfVZqPdGaGH4fIGn64fvWp26fhimvkk2Tww/lJXjPo2ObV4TvIr3U9xUOmn2FnqZxk/dOTw7K6ZyJAklCIYKzeTYa71iDSXAEBAjnEACRZdDs45R4SMmEAi7x0gWG8AEIFzhh4Y854xJL/T/nzl2eO8RwKGzHsPBJagyC0QWJ9LxmMlDXnGKMuttxDVS5Uw8JYsUFIUubFMCBXwKApCKU1uQsmRqWFq85H8ykPpky/gG2+JXvSiaqO8FZSs9UYyYMBs7tOisBwMNB59Jnv4KRz0WO4KoRRHaXXGOSfnCIkxhgxRAQoOAn7nN+7kYmG7vVANY2Z0okebbT1K+OWFQWJHi9samDAps5YDIXB0znHweZKS5s44FoSA6DG57bbJqLLhCZgn9J4AvUfuHDLvvQXvicXE6wCM2NjZpy9/6EOf3NxWTGL3TO+jf/2dX/21VzSmGhRY4pzz0LrYWDlat5/56KO9QWygdPutL1ZxtLCyJBlm5L0HIZlggizygI2yxKdpXKoko1RwtNY7Ii6Z944zFIL1uqtFqpkADgBcBAwJPAEgeOTcWscYWu2QiSsASQDtrEAiQust55whs86BEMAFkgdPHqxzjiFHBBkIAu+dR2RhEFoz8ia3XgPyNC0QyXtHREIJIvIMGDDBhLbGWEPIpMDMivNLySIvnhbbYYSxCkPpAwW1UlBrBKTtoLVtHUtGw0CJtNCBCgMlJyuyFNgQcEQ2McWQgq2hVcKRNElSMMH+/wE0AECGRIyAkPOdCnbrgQtOiIxxJSQA/thRxZQUAMg5iJ1eXYSopASXEmXonCDLkdfCqL3h9IgiFTqyxhnOhfOeHDDBiSyXEpBzhCLNGAciQsYAPBE4IjAGSbG46uOpbz66tfQ7n/kv779r39WuSBcS1q3sUd5kNlu98PBXd7/mL2W6m5tlruchb7twIuksVcpOJx7Gpq5925sf+MbnG+ClxxFxAq+dcwwiFRlbeO85CiG8c+AZJkY89ry5+VhtZmrIJQOHlCIiEcYvtMv/9V9andGitk1tkWT5+Y5+fqkPnDPvsyQrcu0te/bpk4G11uLWmhNCWMc5I2QWdlz6hp3fqvz/PtOpwJof1jxjnujpk/oHD+fvfEvZodPkh6Oin2DAQ+M9IrPGS8EZ48iY0Qa9F4KDZ6PU7EQhPREXApjznhiC/THjzHtijHOO3hi304gMTARBb5R4ZxHZzuftrfeKc0aMgWCCMVJSAgrrHFkiT1JKJCHCMnfOmALIERIxz2GHVeThxxxrRKHCQDBJnNm84IgqkMYZow0D8kzuPXzTwtLpmixKIBcuP/K3//IjgQAeEUAEzGkfSsl9Dlne2Tw5Wl1kETIQmGvvyRsnBX/qsfu41Z7bYpiHAa6trYThg+/4hevH58rX7R+Peo9efZDf/9CKDaqB5IuXBvtvHDPce2PAgpAYVzAY2u0ejToac0yd8Ib/X1LRM6e3Xjgxesltt/7V51aevbBhnEBieyr09jeI17w5GZxf6negt1rvL1cOle3e/Vs33gCLGy6aKNd2Rb2NYc0Hw5WlN780uu8ROrcIQgDjiAydc147ZrHigrjIfv515Te8dWrv+KmK6hc9ywt6+c3xy+9oQpBStYvdqaK1ZtbtqJXGUSJLQlQasubdcGAzptPgre/5vLPJucujgjUO7Nr3nW8e/6P/+d+GgyREXNra7G9FZR9YFrCYS1eyKTiXBtUyC2wYSe7k9FVH45G97a6/tbrGpbiwceG2l+6/eOJir1tIcGm3iKshoSBiSsQBugvHn2qOVY8/8UBgi3hi164Xvfq57z3Ecjck58syZjK2Rb75RDpMdJHqPI/B/rv3H/ip21xvZeXIobEHz8ivPbBlCkAgEMTQK4VpmnljyNls0LN2RI60A0TnSSyfe8q5XhwKiQFIbZmSURm9LYqch8GkqnzhqxeeOZn+9s9mU3t9vTkhq4XLOqgH/+3XX/r+//iMlawcq26r8837HpycftX+Q9PAVBDyZGj7fV+vyfo4knHNsUaae5sOtbKNscAzy3JLnpD7ibGqzt3m5uiH9z0wVWkenJsvK0bM5Q5Wt9tGr0+Mx5h4AllSVZ0MyNU9Ks984U2rUwyTgoRVDLNBx0ZehKVstNFdXSzAeG/7W4MgrCArhquDvDesTpWrjSiQ1a2lxXyUXlrf3HXgSK8/EhLn56cYUa1ZzfWw3V3JsnavXwRByYLaaK9wH2trISp1WhnnQpXCwaA1VqpuDotekoWBSLcTzkUYqIXLG3vnUfEQfcgg2ui0rr/2+jTxra3tWIj9h+a2t7e63a42RWH0FcE01xxEIKQQwliLRIwQFAOkXGfWWCGFMa7IDXivAsUQyYPT2iKEgWIemZAI3nlNwJQKoqCS21TIwDPnrUuzxBorGBNxaJxXIlYiCDil2QiQE6C33ntvnQuFAO+VkoDACKUQxuVBECCDAESu8zRL6uWKMUXuLNtJ9UvkQsZKZUXuvSNyUnFkXDDgDBmRd86TNcY6R0qGkZRZcSXmqoIQrCPhQVvOvOSgVGiNzoZ5qR6QzRmnUjmo1FVQqSyvbj57tn1+fSjYmQ++PLnuWh1NgefWgeel2tnj9M1H/Na6Xt2S55a1quRm1LMGChTWCecNeQYEgCSUF+QEgOQgJYZSAWJROK6E8y5LtVJcg+8MBl/+zg9/4Sfn69VRJc4bE41BpyCFELCN5cRYrKsLzfLm/una0yeeC8rNjV708IPtwagYZd4SeV/ceGRa65WVpdHiswPvfD2KLShk4ezk9NG56UY5LlXVgNJuv1Mda5iR3hx1GMogCAPVzLPUFbZA60HwgBMjFMAFoXeRoBji9kDVatn4nm0rrRtOpH5OgXV+WyqGqJxzwCoODv/3v3z+4cfX1ze5LdBxbzFBXxBj5DEKY448T7VHIvC+oKIgYo6BBfKEhOTBgSdEZAwEEMGVMwx48ojgyaEHRsANNqU/ejD4xfcevv7aPeW0RQTGMWRFfTxeWWxFlaDt9KmFbKM1NI4od8BlgTjK7Zi/8jsZQ+TM2xxze/fNL/viww/5Kys8kWpntCnFYaVWMlrnhQ4jtv/IkT2h0J2BR7G5tjV7+LZqtGemMje4eOp//eHvnmoN43L9l973+sO79p/62lfOnnto4ZlBN2fqutp1B6vJ5fbq81QaD8irfpHUIlEda4CUPlQz8zqCgvPaTdfd1drq5CKNa5zLlAln7SgZbpr1/OufuvfqXTNqvFSga2VU9IZ669TTJ3/7t37jLa2Vi/vGGofMzIUnnqsfHI+qaqXTE6iDevDC6RcwkNrricn5ZhAeuPrAgRuuCmoz/T6aFKMaQpZ3L108++jTBlpXH9nz9OM/2OWHe3bXGtftee7s6NiNN50680JzdiaerOytVb74vz+/sWFvvn7P7qv3nj2zmreH/dWutkagqpVjVXIt34YxDS5rVHC4fSlPaDCweVANCkVD/8IDF7JVhVmJ6cq77oJf+IN4o3eynAI3+l2vg4un03vvLTenm912qzEW5wg59fcfmn32iRYHliZZHIZCyFHmPdJzp9aaLBiNtBAhtzzm5b3j+0tRFcXE5Kx+8u++7sauuermty1cfnosBhX34gD6vUyUWKVZTVPrijwIuZJ80MvAwuRE9dh+eOVPREm6ysoz6JvOCilMGExqCabef+Mv37S5frS8592f/uwn9s/OrRx/FEdTt9xwY0PhRz/2T8ZRmloVsi994VN3331XY29iYZHnCwjLUPiimBbj16cBlzcOfv5fjtoP3X/PY6Our2YUd4dFyshrK5DK9ThNTXW86RwLS2Gv149j6VGhcSurXR4HJ547OWr3s+HKvy49P83T/ZPFy95VZo3ce404QcZhRUC4/fafqp/6cGtdG23HvRMyCIOYkS2+9Jl7DRHn9LafvnNhqzc/WTz63U/tTIEKRKefMAVkvRScI/YGXSmkA5b3W96Sy+3EVCCYOX3iJMOKTr0BW67UbOJtZnKZgYAi75Xi8Vz3xxvjo6RnkbMgCMoxd55xSNPEm0Qwp72KSrXhsENgxyu7hibtbC4wcGEshrrwPq5E9e6gVy0HSOS8DlSY6wyJOEcClupcBkqFyjFG1qsoTrpbAQ8sETJG3jqTc9LlONA5FcUgCivGaGt0pgtASrOUIAAO1jhPUkgfhoFgor0+DIAN+2l3fWWyUcmtrUT1PEsd5flgVK+VIC459KVarFQ0ynIpomrEtzY3UfBKFHVEQcZygTubciIg8gTAdowZHpz3yJExNNZywQUjGaqT63rXxB7IFrM2xGXcWCOfQlFAfVJFY+S0wXS91NiXjQa+sNKxaiRLqeud3/zoN//hA2979U+87hpbB0HHnnv8xHd+eLp8DZx85t7tXnz46OzEVjgWFv/xt/7gHTdcRWy6OXaDSftcWiacqoVR46CbuPfj//sfn/joXyQVfObk8mxNqTDGMozP1PvdPEvzajN0BQB571y/PQgQmDFnT19aXd1sPn1yz8zkbCnoX0oeufehZx5+anpy19REc3FhY23z2SAQQRBMTo0FCN1W64c/egCcN14PW72llOJSnA4SLpgKpdbO5f7+e5+bnRpzEV9cajEuNOFKq8OMHY3yzI50P9+7bzJUYa/VX9oedNqZ4owATp65jJEigq31zr69zdZW+tzDD/qUD1u9n3jtrVuXLvWyLBvRVHPSdNcdp0BAv7PlHPPkbr95n/GbE/XawuVevXrImCur5IsXtjiLsYBmuXJgfKoaxZa0LuxgMDLal+NwdqYhRJhn3nvd7ZtWJ+10hoVzYaQ88Fwb7x1jCIQ77T/eeURvrUNkDBnRlaZ1zoT3O3t69OSIMbbj4gBgjBfGkWeWgSeQnDEiIUUsBBVGlMqWdqzPiEIiskgpJTh5JwPhicBrZJaBKwU8Ldgnv7n51FL6nrv49GQeCJu4rFoNZImBpO118f0n8XtPyt4w5lg5dNX+u9/yE5Lo05/+zOryoiG7k/lGYkCkCAJy3/vC//rN/3zk7tfPcRmCTpFx4iGBSkfU6+Vrq/bppza+/0B6ZiEHH+5k4VFw521eOGQMtFU6293wR484pXLhFBlDlKEogUNiCiCAYuShDuF+MIKR2r6U/v5v/J/FFeYVVwpMDg88vLS99YW3vXbuxjunGvMVYDXAydY6/fov/snlFe9E2ChBs1QKhev0+gQghWSIBDtJKlRRef7gte1LF423Vuk8TT34nbio9448kXfGahVKQCLPbJ4gkuTSOoZCAGEglBTMcW/I61wjMiE4OEveCc4YZ8gYEUnG3Y7BzJud5A0BCYYIAOT8TqSHvNXJyGQMvEfuDHEWCCUJEnTkdy75yI0lrlQkOQePHIlcwHDfTBPIFIVdy9JWpgsHiEzwxNleLZJSikY5rEQyKzLFRYhOOTtKk2o5ZkEcNGSY5cORDhV6shK9lFaGV7DWAEAEV/ZjAER+pwqOAyCgkKFzxjtP3AMSIkrOiMiTR2Qy4M4ZxiRHGaDaXxvbxbFRx7TXHSRrvKYC5CPtM505T847ISVxdN4xIQjRGuOJOEMiuKJmAAPwnHESHrn1xdBIE9aj8xuDd//Hv//Vd1z3ppfEo+T5pmy6YuSL9V3zV1PSz0Zned0HMNDLp2X1QBmbIppmwyTreAqbr3nvwU/8n81qWG2PHBOCjPHOO2a5R+d8Tjl5z4SwTjOipW11fik8fG2o6hAo4UZSFHblOfzzf+l0hpOdzCDTHiDkpL2zHitx4BVPRjnjDJGyInPgSHhABPBIgOgZA20JyHPJAAFz7RiCQgKDElMtv/F4901v3lsNvNHDTkctb6UaBCB67xhnROAJJDJAYOgBUZXCNM0ZAmOCcyLyQMQRvPeMcYbABeeerHGeUKjS/muOcdtfunTBaS0kWmRAwJETecYROSNAT3jg4FUb6wsMnAcIpdJpYTSEQgHINCOUVikssOCS52m+cziHnTwI4+R5GMaBbMxO7u4NW0Z0s2JESBI8oSPnjCnarZUQfDbqKxUw740rJApjHBAhAw8YSemyPLaUriwwolplrNfrefKIiOQ5eMlJWzKZC+IQncu7yVtfMf3Gd+41dEZvft90l44ehVIJFlezpC83B0RJwccMSXJaIHHHLFfIGRYm2lx1iTVba/93AO0fP7Hwqje+7s+/cOqpczrLhOK8xO0dN7gXH0nc6lbgYViLHzivpgp97MZ89qC7tA6DDErjiZgWU+FUr71Wmocb4vpsrXdRCMYdeesKYIyLMCgye6Qe//o7pt/ys6nYN7J6xmXKreZ5OgL01bIzIhVGelkS8wdprlHt90LJXNrpra8WPWbd/nf8PxcutvZmhWXIGNWB8NLlhQ/8l9+OpJwaa+S90eWFtIxz4PvWci5YFFdReO0QkPorfV6pqPHrZ190x1/+xn9jWLVgK+NV7+Hi6gp4k/RHca0SSBIhuIKicri82CLgb/ypN3zpsYu/9fO/+K9/9oFk9b7j+eWXvvH3zn7nfG9lJURpXWK0ydrbTPIQ03f8ynv/689MFvhA6NfGp8QhWbr++rGLi93nLpqx2mSvk+jcMmRKkWdc7MSIZDlWcZ6MuOS8KJ598ge1EjWaYyKcWV9dQG9MsmFdGpUqAEPH4+1BwPuzvNqY3jvKcx0x7Gsd2fzWa/bMT6wN/DZSYZVs980//eN33/2229udRcJ8ujnXy2F+Pth7uOw5KwoKIh7VIhm6zvo6chHH5TiKur2BznNf2AN7m/XxXff/4MlerWa0rtUl2CxSVAqrUTiTjhY6RV9JnK7V07RHltcrZQZBZ2PDWhmXy9K5wow4b6ggyIuOjOvJoOsdGCPjsghDSHrdfVdNTc43mrsaa6cX+1ttZ81EsxoEWK7EQbQrL+oBJ8GllLJRg3XWiyTmhR7YBIAhiNz2s77zI+Od3Bqko9y84c2vP7+6yKp4ZG9NW7uwVvCu9rlZOD94/TsOdbeSMB6fObifrGslm+NxvTo+vjVaS21/ZXUwNdncffBK30dzrAnEamOVndyNLoyUKskTQlBMAJCzzlrLEFSgrHdxFBlD4BwYE5UDhiy3trCO0CsmGZO5ThhDcGC0dt6lWWIJMZAMCbxXjEkgZy0CZ8gZMscdKuWtBU9eCCWlJeesZhyjIAxVKKRI89w4zVGO0kJwr1hkTFEpVZADIrPOOuettSoIlJI7C2oESLNEIAdwignHuHNF4VPkKLggzyTn1pI1RsiQOAzSQW84IJRjtQYTebUUTk3UcrIXNltf+Mz9ly+soUGfsvEpc2lev+w2EAzSxIUezl3K739QPPKot4W1KLUm0x9liQNSIgzAa8RRXJXjpVIlYEqCTl1cKi1v9ojhcJR4sJWSeu0dNx8/cfJi1jMWODDg6uJ6sXtm70tvmu4OVkfJqNJ0MhDW5jP7WG6ZiiujblEMWnum7Z5d7JkzyQNZqjUXiAF3u+bK+2fNxYtPjo2NVYQwEmTk+p0e8dJ3Hlj7WgEHr97bHbrbjzSvv3Yqzbr1UrxLhc16ud9NShVpeePe4ye/8djzR/buuem6qyqVoNloWkojzoKwwsuqMOlkVJTRWgscKgIlWCdISXTeZ9XG9DCv/6ffefLJk6JIOBjrwDpvke00ziIh0E5EGRx5JGuAISCSNY4TeqIdRzgwYODJXWHqebjC0CMCRPIeLHNFrsrmna/d/dPvPDDfTBT0WS0wnhWF8yYftDvgiIpMWC2BAo5Cis4o05npFcbmxc03X70zBaHicYT1KO5td3/0+H1z1cb2xpbg1TCo2OGwXIqAGGNScKs4UVEUyXDbwMra+l2vf8341NzsketjNVh45tk/+cjn5EzjDa+8bbNrHn3s3HeSk8mp5fmZhiw3kKWd7cGNB4++sNJfeCGr1cKJgyWMtSuPksJXG+PcaL0xCkvEx7IEzvFavaCsOh4m3Y4Cy1TnwK3jf/TLfzU3U48CjCtqa2tUq8S752cWlpaOVGsPf/6Buiyvd4ehYC85OFsKAIs0F9Rf35rZt3s4iK962e13/uJN5bldZCAII59rLtj0RAVccuqhh7/3hS9KPVq9tKa9eyD0N9xypDI9Nt4oze97UW0uHt+17+53f3Co29/57lde9BPXffi//XkcyVLIWmubPIhtVqRDXZ8MRkneS9ec1b3Ndc2z0kSlyGRvPbMphIwNR7rdSYiPb69n1FbDLTFI27tKm8VQb21BQYfLcq0SjP7zf5g68UzSGahDuyunLi5ee+z6M8tbyxfXJBd5ngeRQsEQg41ueyIsQ6oqY+UTT58r3nREapGlLmS76rN3PHPq/hcur73spa9/Yc1s9EbVUjXfWk62lysYu0AGUWy4GvS2Jsqm8PVB4ZAozawu0qHWtUPjogkMUh8NmQPBU4BAQdmy9KlnnnnFG39rfP+LR+ngnm9+ZXx86p57vtCY/ff1+gRndWIJEREprf2pJ46/ZnY8K9ZRKij9xGg7RVPnWhKtJtxrtv7uPz1658niz/54YWG7VFPVUhRurXVEKFiIRW7C0tjC4qVSqMI4npprrqxsZrmXzCfdrf5gFFfiMGIC5aVFAeFkNKs8FHm6xVjApAKLhYOr33Tkc2+abi/Tb//at8+4hgsnnFejbu6Ay1iYlM73l5kUkLlTjx2/4jDtp4xz58kRGWNChfVqtcgL7TRYjJgEoU4/e3JrutTvDHcduKY5dcPaNjKheZBvbl3aXbsGHRurzafaJ1mbpPLkjM0rMvbGW50NkmFj8nDhc20MaJ/5AZHWedEbXmDxLOfEAJyx1bjmvQFhVYkhZ7ootE0ZR0DmbWE8IedRuW6Ns84JyVOba8Kg3AycU0JlhlQAXmsAKrTVzpEMG5V9g/5ZiUJFqrBmlCS8rMJSXShrfQqI5Xqz09qKVKS1hUCh82k2LNdqvUHfOSuVdIXnQna62zwS4xMzw27P6gKdqdbDmenaRj8fjkau0IjgPRJcuRdyxgh2blmAHBkiMvBkw1AypEK7xKP04n/+8+L7381y41NN9ZIaFdoiENfOQncbKqVOPYj1YBA0mtWpsa2Fji5mO+KV/+u7n77nF1/u5KVAYYDi2DUHX/rK114+vva7f/2tk2udosiSYTZ325GDVx396y/fu//ZzV95y437G/v5bCOsRzbbyHXx0b/588MT+V1/+/4vf+oLN15TLtevfvyBUz947MThPbtWpev0eWdQIDBy6KxjzCERE9wR7wyKUbKZJknSH1RjNVlrbvSSp86fflF0+MUvua472P7Rj04MrF5c2wwdlsp1gcy5rFQKhu1BspUbR0oI6wg9hEoU2g77g1WTNWfrb7n7rhPPnOgU6cXLq9ZbVQniRkOorJdS3hpgandNxUFUjWv67PmNeqPc7Y1WksR7FrCeTd0Lz50dDbWKogcefCppddp5sb0xPHpg5qVXz/W2Nq+67qoLCycUikErffGtt3YHo8HmoB6VOoP25HjzyvIslWj9eLly4zXXU6ZtZAMZMs6GOs0Sx0NW8jYMyOU5gEG0Hi0P5ESzCQ6s5mcXFhw67z14DxyBEBEdEWPMEXjvCZGAcQAiAAJPngA4F4SAgAKlQUeWkDEPAMgGhZHIOUeJWFFBEAaLq5uVanlybMwMk2QwdJ6IkVTMeXLgvPXO+UCxIs85sCAMRKV+eRj+8xfPvvWO6tUHZKlSypNhmvRDOba9Vrr/YdMacFWR4Is8WT/zzH02Uy7JCACBSykBiLznkk1Uy0KI22+r7DtyIJbGIBJnO0VGAHmlKsuV0p4D1RfdNPn2t4Uf+z+nPv/N1TQJUUhEDoo5XXAlpSpFRXrTDU1WXvakyVedQQDyYJly5HJvwWfAy/uIJomXaKj/+W/+ZWHVORVboDw36PlAq2cv8xN/f2rvt578mz9+R3N2+sTx9T/9008uL/mcJKBOmb7xjpd/+1v/Jjym1jMmuOB5blQQ60JUxK5hJycy7UGLgyIyVmdccCkUIgZKkQNnCBQKITSZcrXCkCkRpakFdNJnwtpaJHOyqZeJ5x5YUThAEZYinaXA0FnLOReCMUeAqGknZogE4MkzABGUQxFnRY88OecEQ0dAzgqhnPeFK8gT3wFse7LWgmfeZ9p7JRQxZnWaO7fU7UyU1IsPHe2a/MTiWic3mXHeIHnRG3oLWS8xYcCqAuYmm5LzSHpUpdNrPeNg/1Q0FquNzS3JQm9BD/NapaKv8PdBCOSMG2sYI0eAgN57FDveN+dswcVOUAkl5/wKsBiZx0AqLhSXIghKgQhmI3soav/RP/yNQ8EZtk//6N6vfff4UvbsxY5W4dCHaaENYjrMkSMw5IAeEJCQIRIBgQcPOxhthB0MXFFkLEfLWZE4EUx/5L7RN+7ZuPXo1K8fPnLysc9PlNzkwesCGZDthlFD2o1zT3/n8GveF7TbdvtCtXGw1W1DY/zoW16e/v3H0ZQCQIcMgXkkTS6UgfeWASFHzgic5Vz0C/ndB7avO9g8ekNkbVvVJqgfrqesnbcy45hgBiwAJnlOgJyLNM8ZIXlyhM5aDJSKgjTPiDwXyDizjsgRcME8IQJD8t4bAiaYNtYxAYy/sAb/78eW3vmG3UUBn//XpsHTAAEAAElEQVROtj2MtEEQSIBSSGcNAyTvdsrnnXF5kiGTgWBZnnHGgKF1jMgzAHKOh+GORL1T9UNOY5LEQaAcpQQIEKrQeAtux8aF6Jx3ILgKQU9PzIyS3miUSFHyghCM1Toul2975ZuffepbaLKyj0ZpKrl05BEtEQMEAYIDrzRmb3jR65QpjY3z73/jE5ZSgY4BAyREDt7l/a0wkkmh9UgLAYTMOxKcFZllwnMhPRECemfz/sgiv+uVb/jW/d9BrUlbqSQ4KDLtnbPayQAJhOdiDNaofa7bOxOTqtZEoe3cFIRk+hV55pxqr/LxKgNpZKms+wU6P1nx/XWzsExZtYS12mDY+b+kou189z/868LGSjtW3ANCbqrh6P3v29cdXB5sQ/+CPHlJ7C7Dy48M916TugCEg04fahUKGhZilXZtBJgthGkvFpysK5B2bFfMe0aE1x3d+xM/GavaM/mop4JZ7UTUCIOYRpsucOPgB3kaCAqc4xiAGBPOrstaNDF+9Htfz3/+dy+n7EBmjBIMkYPzTABnbGZiYm7vvueOn4pC8ZOvv9kOWhEXaZaH1ciaLIplATJgN7/2fR9bCmY+9Y3f+9tf/+vLl1hYkrVqiMI10DYkdshWg6jbG5VLYZE6b1CwvDYeN2rl555bn23s27iwnW17IdG3lpMXnjUwvvvm90zE6vFv/X0YCZBcCNXq5Te+7B3oPulHK+3EF6NQ+KXFF55v8pCT1KmpiaiIORfekXbeW02erJBhGNaLtCACqRCZsOi3WzmyVSTunXVkkWGWDSUaK4izsYXNdOSitU47VjU+5HFtpr3e1+2zL7/16Kce2FDMAlFtajos6LknL/7Uz9+Y9rYOHT2W2nx14YxSUa7TfVfvHfU6g0R7D3G5IsIwLSA3mrEAUYDy5WpFjJXe+pZXPX/hcpLlU7WSzZ1NBqokdDKMWHm6MpUUnuQMyV6spM6SPNPAeW84hEyMl0SaJqbomjwxPAtqDcvK1UppYm5sc3spkzaYCuOxxsLl1uLSVt7qoPAU+DxDs9GdmN01O7kvcT53pJzd3uwznteqUwjDSknAsDszvufciTPTEyXro+WkXYnjbsrGJpvX3Hjjt775zVe/bt+b33jLoaPtj336SZ3kqlzaHPhLL2zPTk8Me4nLyBTDqUpUq4atXrc/aNfiYGruoOID769Eb2qVKgNktANXACml8c5aiwylij16ACisYYwj8VIYc860yawtiHNuMonCWM8ZRGHJWmt1pqKQozTWCMnRCSGkRxBRSMZIAARrtPHkuRCCK50XYSBRyMxRVqSWPHFiiHEUM8Y442EQ9JIBOQrCuJ33lOASUUkZSrRZJgIhoyDJtPde7aTeHBFQYQoOnAtRK9fTtMsF5xLSXIPgQnBbaE9kR5oD4zzsDNJuUfR0kpguEFCRVzwrZP2HTx6//9GnOn0tM/KZUxypgO214vhZeN3KTCUePH3WHr/IvvdEdnnFWSPAGU+2Xkr37pOzM5Vzi3p5oz9Z5W+4c9/kWLXbhi/fc261r/uFkyLz3u4UDDjAgsNXH3o2Iq9JcY5Kccc4qujfHl45sG8mxqA2FiBQMjRJzgjYyNp0MwPPXWE9V+cuL6ZJWIpVO9ERy+Zq5nXX4Oy0SPKqdeStywofRcV8Wa6vtBv74nZSurTdX9vq/ewb5sbK7bTnKAsL4/q+l6eFs0MBzV3jjZnpqeW1zXOrm7XGmC2yXrIxP7tbqfjyueVBe/j+d46lugLCSKEh7wvOlQodbPOwkps9H//i5o+ejXVuAHWuMxSMAyEXxvqd1LRz3hjnHCHAzv4NAAABHFyB/+9wIz0goMcd+AdciVAjABFHBt4xj40qu/GqeG4CA87yoQ+DwAGWKiod9nTCt4fi4ReKi0sUhOGuKTUyWCSpMy5LC/Ru0L4Sw6TMoCWf+2YlAGYPHZxdTnUrw0FWKKGcc1xgnufonEtdzHD9xHKpElyz7+pDh15em5pZ3Gx94uN/vtrV4f69hRCf/frT2tmxenlubjw9sr+1PdrcSqJAnn2hP1Pp3X79ru1nLvY27N5jE72ss39fvEaaucF0vcpSNWi19b6N5e69wGd5kIFFh4PMJFjjTz23+fWvLt1148G9TZ5nphaVavVgZpIL2Si6Q6nC+aO7R73u2Fi4cbynglqt3rjrTe8eP7Q/qo7PHdrbaw20hWSEIgw7LZusrzz8tU8vXTp5cK5yYP++3/zj9//5B36/T+bMSnf+wPjlxy60WvmeydpcaWFyanzlVKX7fHNEBfl0qnLwmRPLlZDPHdmd2wkhSvG0yejyavuZjK+4+hC49dCVEZnCjNplpGbAWaNcGg5yW8tR8shXezpqDuK9c/rwId1rQXnyRa43AWroVF5Tg1feCd+8T1Nt+kN/8QevvfPm//GXH33ygWel4tYpLkQxzDjot905l+WDZCmdrlSL0mxi8nKzlDG3ubp27unvX+wsTkztGWTbevsscydecqR/480Qj2N/UT1yf9I3CYrBS38pnt3T/MiH+4+fFFzwUrlCAduA3hB2h4MWr2QojOQ7bEZMIIjcjR/78Pc+8dk//e0//LdOMmP9xEpbG8r++SN/VmReKp5nPk2tth5j/sNHH5+dlQev33JuwOUsRjMI0isAkQtJzqwzmU1X6ei0fct73vKRf30mHWbV8crqcocTP7x7/ur9u9cXX0hGhcBg0BoFKFqtdp4OolgGcRCUFHcgw6iya/c9jw1e/q4JPzaCmGtXcNCCCe8nNQYgfe0Affzr7xy28re+9cs2nMJ6NTCyXA2md9XOnL1sSHJVYjq54iqSTEoGjKdFXgrDcsQZisIOMztqlieEEEVhKuUaebHdWU/0mZmrXsw8OksoRWN6Os9SxljunSEb8thnmpEbqzRJQzIYoc+jsAo2kDwUDMkHmUmrYSkQQWt7qzpRttYKwfK8UJEk643pMiEAIAwl09J78NaWS+WsGHLkuijAwbAoZCCkCHRmmVA84IPeOnGmpAIWemM5R2DCUTgsWsb6gLEsGZXqY8BCCONUaxAUc+Fs0draECAZmCAINs+tCc4tE8Z58hCXSiJQyWBoTV4da1oN1dLE2WeeA5a84c2v+MbXvrN/fhJhu8epLKOtYZEWznhgjO100CAgevj/qPrPKMuSq8wf3ntHxLHXp8/K8lVd1VXdXV3t1fItbxHIIeEEYgANDCM8M8MwGuzgmcENCAmEEcgh3/Jq731XdZf36c3195hw+/1wi/e/5ktmrlwrV+bNe+KciGc/z+9BZGIPAOwQAa1mASiDIBahLfiCG9p6rcy70sHAalmDWg2FZLAQhHBluYimR42JpLO6ha7+9LPw5YcfeXg9jlqNk0PY1w+YQo58Px+IcjOsub/48//1hg9/ZGttddfh/Rfbg83HjqvG3BWgP/n6qRpcMir9yXdde3DbNefvf3Lm+vT0C48vvGTBUHnzHTt3v/y2l7ym+raHQyndP/3D5UfORf0RMalqPczzssg0EQwz5y3HsRIBFqMsy0FFAeaalKo3myfOLJ05fbnZqP34j/zk6QtPf+eh53tulKZy++zEcLMbKIJAHDx04/Of/5rzRAQ6MyIQcSgiJXPn9l17aNc1O89dOjnq5tZ5FcboZLbpGjhqVct9N08vNGJ27rtPnBhWW3t27WGvT51bGuRlnKYjS4007OWDMA3jRi1uNi5fXB45H7aS86trR3a39u/ZOVlNH9s0tantB685YEYm63QPHNoXF9WHn3n29rg5XgVKBCJK4nrDACRpqlRUS2IZxFKp4bDMsmITC6WCUMqtbnc0zOJakrAddtetKTb6I2ZHjEwCGDyA9e7fXUQAiA4YAAQKgrE+4q8iY8EDEHvv2RMBgScgPw4tARnPzqNnLvNRncyha7YNR0OpHPgyIDQeC60ZbBQESpFhZpIsKCCRDXM78IFUro+98LpPfne4+zmoJnZ+JlDxvMbWN747WuqQCLiWqOEwO3fmwokXT7KHMFCsxpBpBgBvIVCi2lxo7avf9hogtKZwFpWSkUPvXUkCaBzPsv1A5ttm6ad/bK4S+I9+eqUoEAjBekHSl4Clna/a73lDK66e9zbiwnuFQli2m0CZlwQg8oxrjRqIJqna5z/9D1/+5jkTVtkjOgCGKFYiDAxIlNuOXW4/+vzO6OTcz/zKH9Qqiiqh2Rp4cFCOfvI3f6FYXTG5AQnOGOe9A2xO7NBaEWaXz5+PlYmjYPvkdS+ceTiKY/aWgGIZAEsLhXNWa++ZvPcoY8QoUpOVpL/Qym4/onbuCXfvmR0N9Te/uXjPE53+KNQYIyntfCSVY+uYBSI754xnBCGIBCISs2PvCQCdtt6IsdEIkB0zMyECeiTw3gIwofAegJAQCMh6IwitN+zJMwGJIUBvxKeeei4kTySAETx7z4LIAxOjMV5rNwBYGWwEkipJxOAJ4yv98mJXz9Z4cnujEokic0UpVFqjqAYPnwMAZz2iJWDnATxLKaVSCAgAggSztwaiOGZmXRaGLTELRCFkZgolSCDWkWJTHFqAH/mRl+fFgzapC5Tpvv57fvnOt2X2qbtPPfH42WdOjxatHYJK00oBnBWDYVEIIYhwnDgjAj/mOzGzsWEgCLASJ1KFRaZRkiNa6ZosmTr9ovnCz32xLrOffc9bR5vVqUu5lD3X2yjaV6Zu++FB7WUQdaXZRN2D3jLNM0w1d+6tP/S0QRJGW2aQSjhrSz++aRPDmE4lrCft0rOr5p77/dTs9OSuyWIzuHKWP/qpUz0dOekUgSs5SkJrvLccRbHW2nlHggQRopKBdN4hIgAxSvZeAZFCQKEL4xkAkAGdB2+8QIGAnoU205+4x/zdfR2BpbMVgYEKLSOzBees9wDsjLVKKRUoYww7Zs/Wu5CZBDOQBSAgCQAE3jqhlDMWAZQS3tjlS6cJABAVKmu52mwRwWZ7efxNEoK99d4XpVOTO+54849UG81WU33ur/9m6/JpGbvm/Myb3/K2Z5+/P+sPyOtGWulBbrz0ZRfBWMAglEkQ7b9uv6o3BsvFYLlbn563XV2MemWpnWUcQ5QEjQqjjWEPQoWAYJ0nBCRAhCSUWlvvRb0Z53npbXHizAtKSlMaJchZJyRmo0IFBITOefZo2NeTZZ8txR7KoQ1kQEoJzKeaUEtNL49PXBi+dKfEEJA8gSgNgrbkfWbj45epbUcBhwDD/08qunR6E6gMELwxKGQJ9n2vncqWl4oRmOHkyilM1+z1Bwfb9xbdPlRa4JmCyNemwI1yNLoVBpDJS8/h8mUhZImATggWaB2As0KIp8+faY+uq0AiE/KcyXgY1BB7+tFPd49W6rXDkVMKhadyIMA4GfqgmrdNWR74nU88AbybTRkp5z077zwzeO5n+v0f/tBNNx/8sz/6/QtnrnzrmdN74/KuoyrdVmHewjjZ5LQ6feRNb/zsam9eW/PeV74d+2UjikUadLqj1rbp7sYgG/ZDkBym/+X3PvIv//jZGTl48rFnUw/TUxOMk8pHRP6jH/vz3TONdju3FodP3ktYGVGlt/FidapaGg1a5zqrBOpnf/Bty6+Vb31H7MOoLGFyNpzdGbzxZRMPvHi5tK40UHgEx6wNSRUGsULhS7u5sei59NoFwuvSSUHWAbtcRoEXiEwoCNGBxNFwVEuy3fONsydOHrppcnJ+Yri+TGErnWz11/of/sBtP/jTf/nyN31wqhF11jeNUucKf80dB5J4Ku9kerM/NZWurw5sqSVtpgk1a43haIAy9pTofGi9RS/YWEFKZ35zdTEIKBZW59mJU2UziWvE/ZHGLJtt1slGMY8GvSsMKi8IvQOnioKQasaoK8ujaqWRNub1sHRI09M7adKsLl44d7FTmN61hw4obwftrUBICxRW6pvDbuZVStUoqKHzvcEFWZ9otiY6i4tRUHNGELJDs7XV3zZ7I2d6dmbn3ELj2PHT9Xq40dNLK53rbzkUNZDJ3nrjrcJP33Bo5vvfqv718/dcWsmUSs9fyBoNm1QCFgwDH0doskEo4yRszkxve+jpx3fPVj1fNZqmlZgZAL0pTZ4Zh9ayqcR1cEAcSAnW6TSqAArWlphtWQpglNIxyiAwuTeFJwIMgYBJhgqVB2/ciJCcc0LISIpEhd4DKQHETjuGwDMiYhQra2yW59ZYJiAS1tpIxaX2YSStw+5o4MZuYJMzmFAEs836qN9FQWEUDIo8L0oEkkGI4K02MgziJBQgRqNCBrDeWYmDkEtjSgvotbVBECJ4RRgoxewKmxm3FIXd6xbKajhIwjBSvSuXt564mD9zssABV3LprPHgnAECMIbueZpsDg1fPb407GoeFjGXgiS1B6bVtG++q/mKV8xtm5j61ncunz7TufmayYVt0We/9fyjL7pOP/AsEQNyIBC1MVEojEW0qtstRF2VUEApC+cwDqVU33j63E3Xlu+8pQllp9SWrQQWuiiSUIFCISQnwrIPqZoKuWsm2hiMbl2gD7z7SEV0DThj62BtqEJG4Rz6Eg5OV7QILvdq3/jEi8qZ/ROViXrx7NL6sYu9e49dmG2Fr79z12zLAumbXj57ReML58xzJ7ZWt3r1KOpudBaPrzIBAqYk7/5u+/ted3AyRe9H7EsPYIzBRPhw6uTKrk/+7X0ztdlLZqnUpWcQJJz3rD1JFISuBGvZe6+Ucs55ZgQYNyyMBSMe12leJcNehX1c/QI845gXy4RckuoWeOJM5/C+6uw0oYqtL713veGGKXlr0PzGi4PP3rcRWnFkT6vIbYGADHleaM3gXZSE41VwZP9ktpGXoyyKVDa0ly6sBAxgLCgRVlNfOg/FsNdv1Cs6DVCSrMYyUtdeu+fpY/d+/n891jFBbSqF1vSp1QEqIAAyaAbl1pV2GIWucM0ksh4s0vnV9e9/95HFfPHYenbsgXB2++xoxrUk5r5EXQ4H3jPkuY2b69VJlY16Vy51nC4W+1tT8wf/63s/9rK9+8te91K73LVrMmRTQWpfuTBZi9VEoqTsrV0pbTGx7+i7f+0DU3sOOyOBJLArsnyr3xfRKIhtb/nyfV859sB37lc9Xc09U7klRdk78/TxX3vo+GUrRBkHp1bazWY1E3RqZcNP573eUsK0fkqYWL36h95y35e+LkkmqZrfPYNpww3doMgWl1/ojM4GtbwUgyhUadgMWvGgPWQtTRlH0NJdhsIHwgEN9+1Jr7SDCycG23ZeqdwKogLeZKZYhKhj6xApefgNjc9+q7e22PvYX3x845ln3/dD73vw2y/ktjAedGHCBBIz+OCrD914zfX/8L/v64d4vB0+8tTSm99yaDQ8398oVzYXVaW1ZQanjz36gQ+88/Abj7qlv6zPr9S3C7Pf772zms41+v3F+kwpN+xyu1tCSxBq1u310fqVrbv/pfrj/+Xt1j3vTIAsKQgIputw67e+9ky18Yr7Hn38LXftbSWoKmrj1FKaJqNuhkEohMqHQyYSKK4sbYg3NmYO7OdgUYYl2EfiONWmDgaTmIwdAokgb774HGysR6c/91DU6XsPIojUtgnZaCxujNoPPXH79bc+ffYphKDTzyYn08XlTaUglaJSTayDfmdQVlmK8NGH9e/9wjO//ndvHugLSjjwFjxImmecMMaBNChscyb/6ufe+Fu/8fXHL2Wri4EPFjaHBVFceKn7I1P+e8WB0ewkCyOkd84KqJRlP4lUJZgGryzYtBb3uqPO1kjFiTEGyqzWrHe3RgGh9LLRrPW6fSWT0uShUEqqYbFR6JEgpYu8lgj03pues5mSEYqIMGNnvLWVNFKhVEHI3sZJPCr6ElUaxEiCkQcmr6rEaC51ThIdQSjJaENCRQkBsiCy2tlR31XiarVeluUoywS6KE7QYiBDx6TzXqACQDs1Od8vLQGFKExZNioxymh1qeNLVAqLwpXGhXHgnK02a4XWNN6TgLTO5HneaLasLrNi+cY7Dp89ff65p07smr52aeXstqnYaGs1756sXFzvlaEqLRG5qxNRL6QAUmS1DaTwJKznorRsACMXhmIdw08/Mfjw67DIGRiIQClmB2QhL+D3PwVz39181ZGJlU4I27a/8Oylx05ubeXDSih/+uf/o/7Z7/++d7wx728EMQ0HvSAJ0mb5gTe/9D/9z98PG5UGtcqgyDVV/HRzcvLSC4vrvY3H9sx+8dPPT8xPr4XqcDrZt4lvzCsVe1PBcG7hiJybFtsXZj/7r8/8w7dXLo3C3qBQgsIoYPbjmT8IAHCSVKUWA3E7z8rczE7VRVItssGxK6vP/fHvv/LIgd/9tV/92Gf+7uT5FZ2PFiaqpeb51tRGu1eaEj1RQFEtQMBhpq1DI1V16pq/+O2/MMRze+YnJyre2Lyrw1HnD35n//Vvbk1sb5hhT9LsDx+f+Zu/e+HEevWFU1tBmkiL9Waysr6czs7ffMOBF549NdgaXLq8Ds70DTfmalUIU6/37d957Lln9xy67svffvIVN++fqsSd/sps9fBXHv52a3r3Nft3XXUV5aNGIKfmmhSKRqNCEGhtS9O1o0EjrES1cNDTK4tZt7tibJYVeYl6vduup0F/YHWZRWHiWGtjBaBAdMjAHgk9MyISCmZ27JBIkKQxKXksFeHYdAvjXvIxH8Y4K4lAIDNYh5bZjkrZ7UtwnfWNPHfsGIAK7QrnDVMrjKUS4DSwZwsAqA1b7TjPyjgyZXjxiiiMa0TOgWXsuUyoSuCVW++N0NhqPcmKEImcNeC99c57FBJJghB+bfU0hlL5w0o4ImDr2VjCAtEgAiF4jLxhhwlIbkzS+9935IEnB8+cdexBGxspIaV0WXbdbLxrr+kXmnzdO/DkSFtGNtYKWTEjvXJxEE3VkqS5eubkJz7+WS2iQebYo4yIAlmUhn0BLAyhC4P//bHPtq/8XaJwkFldFhjIme17DSRryxuBtZ5FnKQ6y5wzCmQkvdftUvcq9XTYGzSS+uZgsZqEWV6ykKPSTLfqrcbE0sYiChJSsjVKBmEQkfWVcuP7XpMeuSW47iVz8WwVHGDpjt6y44cuj772xeeffmG40fUlpN3MFRodBNrYcboQhLDOeceE4BgYBCAYZxAACJlACOEcEyMjeOsYPINHQu1YqACBgLVzDICegRHYWynJMTsPzChl6MH7sRJNADAmCYO4mrkD59kZZ5zLtEUmAhJCbvZdElEUuTgQEZEFCdxXlF0dIceBtWjGphS4Sk4SJJxzhOOqdnBFjlIEUmkvCAm9F0rIQCBghcRkqK/f3/z+99y2+7V3FYTIzOWQjcnzFUyK29+489DRxl0X7L0PnHryRLubgZzc+8K5k54ByTtmgUyA7FgQWe+jKEJJbI0E6S0DknOevQe2ktRId71D3Ze5xD/++L/dum/+t/70t6H34IWnPrtj37aBqQfcgRhUuBvKwF16kMMRtLbnQ++ASRFYp4RAAEbQJpcyREJtHDAKwlJbQdi26VcftRwM3vKG2cinn/77xy8sggZnjGMJJBUyovUShQApBDBr8Cyk9GzY2yAIpJTGOONcokReuBAUIJMEBHQMwEwkxqx7ax06BiLnFSIBxAIsKVdaS0qQkOOhKTAKREDv2QFCoAJnfSt1h/dLAYPlLry45GyZkpDGuFgREWJIzjjrLUm03lvjlSK2jgWljdbc1Hy/6HljCcA4h0gAuLaxkVISSddM+fLp50h36xNBXuqN9tqjz96z56brTj8wQp+rNEmSyg/8xM8///C93/zSZ6oVFQYiDNgM19/8lttmmnsvLrX/13991DGhDABLGQa2tN55o3UYR2O2m/fsrGPnWQihBAOWpfXel2UZxILZIfK5i6e9AxUI8ACEgODZeY9SEXjvUQglI+jGAIIgEwq8Y2MEQukhDk2tGr54Go52gnTG6NIxSVBq1PdbPbqwJS6X8UrXztX+3Vs3/kQE2ucoGRyjI8+0f87Wpcvz8OJpUlswGZe33YJqO+sSTAmVOdwVQDyvjBVKijRKAorimpLA0voMkJGFBCkRSHhLFwf+Y9/s/PzcdaFbC+NNEIMi0GqyUpkMnn9q41XXbS84lwqosGIwlLVpmN3FK+bTX8eHnxwmSeCpBO+99RjIAAWjMKgGfXvvN751/KkXmpX6lUX5a5+4+JkD1+2tKQGYG8Lw0G03f3TY316ELm4k5Nk2Quc9oGeRlG5+mF+abBZJPSjag6985W/ntu2qraAqRlJ3fuGn3/G175567uJzsjqZulG/q20pdxy5ubd0Mds4VoxM0R9Es7OTEzNbyxfBW4qSlqwb5I21YdywDqG9lkVS3nTQ/8nP7lrc6qdpVNsz/9DjixvLhVf1Z58fKCaUpF3RrGe33rb/kYcut/t+OHQEiEqQ18BMggRhWVrnPXkIdfay627cWLx05RhikaEfKvLkJ+Z3JltXHr9m+8Ff+cXf/p3f/pUW8O7dzde//ODio09P74i2Ol0AWtg5u9nDrY31zfU2TlaMwdK41c4gilhqZIHNZpW8NQ4ZeNtUxThTNGS/ry5cGXCpJ7bV4oSIHCvhPGSDoXUQhA0VRyqI166sVqo1Va0yAfkaiVq7055tNWuS2heeX1m5OD03u2OynlTmhe2B49pEajhq7Ty6dOrBweqFpDF3YM/eYmspDWu2GBkL2dYam5JQBYGQhaQwXGhdK6AxKi+DLze2Nllya7aylukM4/0Hbz79zNltczMvue0lF8+cb02q22/ZXuZH/vXLz6y3ByBVd3VDCJzcuW3/4Z3sSqlgZb0PRFeWzx25YV/k7drG1aYDZ9k6p8scCVFQIONYVEIVIxIB27L0YIModc4bZOfZA5JQKkyyIneWnTfaFaGI81ILiQi+KPoMEKrAe6udBc8SFRuPSAysjXfOa2MK45RS6AyzL7VxnoFQSRnJgJ0jRVKg9740RinJnqUQzagWy7DMSwlEzHkxGo4GiUqkCsuyrKSxosB4Z40rilLbcljmaZBEKm23l5VULAR5kRcFoms1kG2vyJcB+gd3ZtMzONmEcjAyRS8I1MRus20muHbCX14qv3HfcCtD7z2DYGAW0NV497FNrw2R8B4TEgwojZtOig/8yIEb9noo9Fe/cvK+J3uOg6dOX+rkupdJtpFARnaCGBALXSIAO0FGhOSvaen/8uG9WdnjwgaYC6miMI2bE6nKhsMLSVUAgZRBWolkEFvHlVBaZ4nQMoQIMVHEwzsPVn/6HfVWa2QtF04EMgwlkEQZhcZ4XRhyFS3Tn/3PT46ymgL/G3/1UK0aPHp8cWskANz2Or/jppmo38FaYPrr1+3Mnc4uXBqZgWtvbqHzkoRhjwClhXMXypNn5c2teiL7KjWhBOfyMGktbVb+8v/eb1y82V7yupCCQOKY0zQ22jtrgXwgpTbOe8fsEXH8EQBhbDJmABjvrQEYEBgJmR0jXiU8AhCzRJpoNjfz/rNX3O4z3VfXJuKalpA7WzBhf6g++Y2Vf/rOaq+NtUrYrhX7r5l1W1l/WLBHhcCS5vcswD2PA0B/q7tvWwNd9OKFjXqzcunykkgb1TRulz7Pc8EewMWRCpQsjVVKzVyz9+wzJ77yt1+hibBSCZMw2LVn+rGTS85p8hjHINE3Qm9Go5LBFqa0bAykQbS6lb3w3IVX3DRBp/2zF6zCcPqCCsKz87sqO6da/coCSONksrFx5vSFR+dm5kWEs4fnp7j1rf/z5JRvIupqkrTSqFYJCIpWza2d30pqzawD26+77ro3fK+Y2FOZ3FUWUPTZlqPWjBSRd7bXvnL2wS9/8viTT++cqGJZe9nk1MHbbv3on3+GpsPRUrm22GvONyu1YLU3ajYrC9ub62t5Jx8mkfBWr3Z7N14/641eWs+V3/bkkw/Wq8HCzvSbj373288+t3vHniRRc1M0teuGrc21+VrQ7rRZDLvd3FgHwnlpyBKFaN2gP+hEouE2+xJqM5P1n/3LV1f3fLXn14my1lSUudKKiZzjr33qYj3YV6VaNRZ3f/KbOxYWJoKQlB+CN9ppZ6daQQXLR7/w4NHrpj7/ZOfSutu2pZ559FS1yRZHR49eA1H4wKOPfeSPf3Gm2vT1WqU+TdGl9vIg4JYMadTdcC6Hlcof//LSsRNNFkgSR7YgEliZ/ZOPrX/qs58+dG3ylre8sdra8/XPf+HipRdufOnRh/71yrLph0lUnYt0mY9yx9Y6m7HjEEHnZXOyNsqKJIw22nFz4tYil8LVsBnEjYGGNa44xb1RZ1WkNTHEez7qv/jV6kA2nGi//13XrrTho399bzw301T1HXumdsrGK249+Ozxx0ssvdOdIVRrVUSenpzOy15vkCW1QAQM3gjV+Oo3utO/9PkP/sFtpctINZ1PBU16WzPaR8IzrxXt54k6v/2Heyi95m//8IHPfXdpmCethWkDzfbqmvVydQUAIAoVMoAEZhkn9WZrYWXjtAoUSGp3cylkrg2C8M7Vm1Gn0z935sUdN94ckCyzLAlgMOpaZ21ZCCkz046xisQ6H6Rpo95ojbKtpFqPqnWdO2IYZUP23jg/GuaqKmOVOmgTs85KJWUQBAAkhJAiAI8gpXNDFQgBJERllHcZVSDR+TJJqkVeAkOcBM5bJmrUJ4bZCBjZS2+K0WAziGNX5nGSGu2iOCgcFHlRbbg4TUabGwZKEjS1MJ8V/X6vbcECun63j5xQoDzRcNSLkOKg4gufj0pvs1wPk2ZjYWEPs1hb3JqaaJRmNNOM6nVVZGU1AGNlipjYstmAah3bmVgbgEZHDOSs8q6C/JJbIQjg3Nro3CYkrJ4/KTs3i9lmliMMcqhKKDOYmsS+CZ9erZtN/NJT/VxHhbp/Ry2otmqTWjtngq76jY996dc+9o2//ZM/2lsptlVCDKXNlr731buv3f5rX3th7c//9Mt3vP4maZO1zuZA98PCp4q/9PV7ijKotbuVNf3B//6me770pS9/aenI5LXDJR2EtdJBv92RcfC2d83sOmgeO6X+5Rsrq33ypQsRA0LNjh11i1znBSLVkiAEbFSCvD/MMrNz28RC3fWy8qETZ77zoZ99yaF9N+/a9cTjz6aCkOTS2c2TFzeARKRQChEFsixNFIh6VUZp/NxDX9577fzqZnH+ZMcPemmpP/TOuf/423ep6cetvlBeschgFE1P04+8Z+5jXx4++0xGUqaBTEDYuDLKyyePnY4Fd7prb37dm77zwLca1RStnwjQbLRtoeuz0+tJEDXSAtzU9p3bOqOiU5YbW5U4RXfVW5eNsulanYp8sNVLZcoMyFaFODE91e9lGTsb8LAcDk3mXdFq1Jc2V2tRJRAYc7qwEHc6eZGXBMDMzjMCIxEwj4ukEa4WraNn782YA0MIDODH2ytA7xGBwDOzR+ax7QTQKyGcZ2be6oxCRc6xBwAEiYyA1rNxdpgXoZQCQDA5giBUzrLzPk2kNSUKQEFphL1RzkI20oqFotEKjPd5aWQcAwHozDMDovPAHh1bkkoIJQJBLISsNHZcY+RxyQUJBALvGUiOXyeARBTI0ucaDVZp8KqjldPntwpQSaQIUJJEZeanXHvrnIitYGbv2HoLQihEJLDeDK2204Ln81X1qz//pxfaSkPogWWgwlokBHmhjbZJJbSOJfLKekezC1Xsy0IJTmuTYa1WtNe87hikenOmOtlcv/KcIJIkyqKXZ4MgDYZFEYaxMW7Y3oylS4KYCZR0zptROahUkiwbefbgyYPUfb5xWr7/9ZMve0unshMxNQiMUpAAFeu9M/Chm44Me+Xiid750/qL31g5frnIMpEiCZQ+wNJAJYicZ+etE94yKoml0czMBN56QET0zjERITIje+/IgyHase26UIjllVO6zBGRENmzBwTv0Y8ndzxOs9A4oE8oJPO419wzIQOhoKsVHt4xCc/IQFBoyI2HIaPw3ngGkOTF1bABBMwykHrkLHolx75vJiBSEkmQksQMQADgLUdRIgWUowEaHUrVCJXMRtuD/C2vOXLtbdcW/QKjmhKBCCJmz6FTldKn3Uo1O7Qn3HHd3tufDh955OyZ5XOL0nkdCvLDvARkwZY8C3JhLEOypfeoscTMs8pzVqGy1jjviSBUwpB1pkSlVkf8zWOXqh/52FtvrJaDfOe+eUHAvk2VwDihyjKtNascw+Xi9LlRblqovCJw3iEJQQqIJUpFigUTgfdOSfLeIeJmEfzztxa3toa3HNx/Yt11nCeFXLo4SovCD0daEbJzriwYvDMmSiIktM6MtnpT0/NZkcecTyeg9bBWT0clW4vgGQURsOOxb5689wAEBI5LJBIwlv3AeS8DiUClsXEU5i5HhLH3xluHQGR4QpnX7Ct+4ucbEzvSx+7b+Kev8r2PEbhUCQL2zhohaQxzYGSvNZF0xkkh0jQc6mFhh0rF1mXGlHj1jmSBbX/p4urT972w0T1/5XSqZOl1GAXDbPAvf/NnO6+/fse+o7GAE2efGmaDcxcW3/yG99zz9S8CO3YGCDc2+hxVumyfvnR5ZG1MUsaxKAtCYGPYeQSQQnrJBH6s10lBYyQTAzurAQUgZKPcO5aKsn4JgEjgrPMMjG7s1nIMCOAsIkI2slqDtahEhW3hjQEPMgGnoV53MqxcvOwOtwIPjMpqXbY34Mz54Myqaishklqms/9HKhpaA4CCwQGQ9xjaPbfUeTJbuRL2LVVrw0NHcDg5qmwDoaF7AZrbOQ1VtmaATWXWWBVaz9Xtemo2H/UoqTbaWemznJDBGASpS/zo37+gr3R+5qdunJ0cCsHYAg5nvvX0qdNnOq96182VltC9K+jqf/EX57742CNveOebYznzW396d6VR0S737DwQqhAQrWNBQiF88VP/HBTD2EZZt8xYz+/Y9ot/dewPf/2m6w5UN8zkT3zoS+3+VBArb4thbohiACnR99p49K4PH95/8PEnv3Hi2CfrcWCN6K8Nw+aV9uqzyyc+2s7un5xP7zgsPvyRM/efXJmcnnaMpbPrZ57X+TKRLgq9/fqb14YTe2+46/rrNh645xNBnHZ7g+vuODC355z1RV7Q5rqppcRquLA93LlXVuoC1eb2SrB94vBDj2+ceXZduzAvTFSDba0a5sXNhyadkA89urjeNYLJMXjnGdCzF0IRcyVRlTT/5Kc/edM12554qvsb/+XwrhsDn1+uzWzLe1bRZtG598ff/5pb9v3DRCveNN0v/PNfv+fGa8r+2SQWvc5wuLnV7dpyNFBRjVRt9XJbc+hENS/Dsr8yMzvBoB1bRpmPBiiixaX2qD+yvsizrNCyEqtIxYIzHyhHuULZqk70R3lvaKbnts3OV0ajrVCxiivtcksm+tprdp16/lSRmc6gPT23Y3Zmh9XZcNgjZIUCfK7CcG3xirNyJplkEpeXnji0d19rptXpahW0Bp2eikmSu3JxMQgrRWlL3c7LzZfcdHj9Eq9sbTQn0uPHF7vLplGdeNntR//hL/73dKuBMkomaiJ0tbTy6lfceOL08rOnlr/57VPf+4a9qR9s21lr95aSSmvpcr8onKjUJxvx7Nz04tmTlepVV5G31htHKOIk0bmOkzSJImv9KBsR0jgnbE3JACBAIERKaavJO3LGlUCAabWCKK13igSy8M4459hrBicEBQHGcQTMiKo70KNRbqwFZGawHthqEChJEnkSMpSBGJcuoCAAAHQebFmy96GUSSgSSWi9DKPBYFB654wgJaNQeoYwUKMskyoUiIS2kgYyqNtM9/vdSrXG3osg0lkexSHJfqVe6tL2zKBZ7U1PQEys+06ArNUnAiUj6E801C0H7Mpq/9K53r0vKuuZ0JFnoaR3xjtmQo/ISFqC1vme3dV3v2bXLddXqsI8+lT7G4/bYydZkWZBniOJAsAzGwbvWXgHUpFnhCjSRe+2G6b/5H8cWdg+jFVq+/0kro3aJaG3gts9KDKZ1NN8ZExus15hWRIqIYQktraopLHnXMRhWoft0zjTKhwXniN2pKIgUq50JViB5SgUIs/DT37pdK8T28JLdt96+JIHoaRk5+PQ9pXt5tF3njyz0S6/53U7j167e1tzxyNP3Hu63Q88EqFxHhE8oGVLDH/y8cd/d/723dskKcx7JQRNXR76xj29U6dzKyHwOBg4DIRzjr1HRERyjpE5CMLxSM2PA/wCcNyiNy7YhPH2Z9y3CczMAglBjLnWSIDk2Avihcl0kG8a5546s3Jgp1iYpPnpMKR+pp3nymPHii/f183aFDCXWf7suStJq3JxcavMjDNQlMXEtsn5nfPjVfCyt11brLU3LmylFcTIVifZgkUKo0CQ17ooKpGUoPSooLxUaXxheR1YRrVmJrGjuSD59PHVrDCF8zOT8dy0siOTetEdlhKwOZWORr50ENUS9MXTxztvmK3eeUfzfPdyUcjOVnTHnTt3zMY4wNmF6wyLbn8d7NpCY2K0uAEkummw9kxn/d4Te6q1xOVJ7gLA1fbanhvn9x+54VU/9KFac09Q2RdXmxZCQsFSVieF6yz11pce/OZ3vvnN+5978lQSyVBCJaqWw0aC4eXF9Qfv/2xfQH+rSFuVknBiqlKud6YaUd/49krGhoHEVl8TKQ6iux8/t2ua3v2+9x2+7dDnfvLJAweaCy1VWZh47NTiyVOXZluNvbNHh1uVirrxwJ5X5H7r2LHPsDlvizwKExc5gfnE/LbeicVKLU4bqkEw7KsbX3KoGRzcOHVMxbHc4TwMYtHqQBTja4crn4+4dvnSVrWlth3de0Zv7Lh519J9z+ZDXU0CGSZnlza/8aXlecPbD0iTF7v3TpHw/U7fSTUM8mW62N1sv+X7j+45JEw/660EWxf01LYsmKwTVUznsmyK6cY1D/xr/qnv+CiqFzwoitIaCmKVJJE2wVJ76spj/J1HnqkmzwfSbm2E192yfWbf4TNn7o1Syc7rkS0LU6nEE9OtXrdANCS9NRY85OWo2Yi/8aVP3HXLfypNuiPYLtkGSeqwNL6MW5US8ihcePpEFgRTiVz78B+8Yf8Nuw3QXe+dOH1q5V8/9ZgMWqvt2t99+VTuaGTKSjUuDaMAU0KnU1jWAolimpusddp5oCK7OX3l3JKCa7RYCqDinIFyU/GEH2kCR2FAaVoYY1UioPNDH9r9wQ8d+Ic/vPuBJzrPLu/IDbzq1Tcfe+Y0AIzxAdrzzMxMJKPSloXDfDgKlVKeVQBESCgMKTMweuROnTy+88bbCNTstm1bK5dFUaZJqz1YqyaJyKvWeWBO4qoQEtE3amm/6JVd20jqAoJClTMTkxubi/ML85cunpZ4MVGIgcxcEUVxoMIiz/OyTLFJHguby0AlIl7bWK1VJlQYWiectcw+y0s2hsErisdOiZEt82IQRdXSaOWNUBQqmYSNwXAkhRiNesBYqwZ62M2LshrFg75pzM3EtWnXS6K0V0vSQadgUzoOa0llaMu0UndIKhQVkXgAO3JZt7PevqTi+uyOPYNhR/IEQb28sDTojmqB2L0w0b1StiL9e/9pv+PLN92297tfW/6Df+stlZGKyVtrtbv1iHrX94hEFa1a2u3z2kb6he/07348e90R2Zqz1ZqIA5boQXFUn1jtjloNJYUQqGO0I102RDzo5oI4jUPJqiyyX/iF//y6G/f+3kfen3dG1nTKgb71wK4GNG/+paqYh6/cf+IF59OJqfZwtVKBmf07n7v/9PILy8Ps2ANvPPTgpY1dRw+0O4NDE8JD6RhENGvXrNTZvt35/j3q2hl19/H4M98+jUoJAm+cN54ZSuus01GIc5Xq9PTs2trq3M6KREgCIVQ8QY1hUZ5ZWVGSfvk//fCzzz5//zMnu2UWKClU6LTOMk2ASkml5PRUc7IRnru4PMrw/PktWfbf9Sr6zT97/8RU2/GjONoQeRTzAotpXXa5eyEu+/sbNS8gK5zTHnolsUIhVeDz3ORD2DG546UH9jy7sl6JY7O4dO2dt8ZpMwJ97tnjcxOpIFpfWmxny9CcT+u1vTt3lPnV48HC7EwgBDMUzg0LY0YDneWVRtUajSB1ZjbbvfVhT7vCWbe82TcmZ2ZJkJcmUGEiAynBArKn8Yn9aniagJAAxgf4McCanR83vFgi8uODHyCD9+wAkEAACgeM7Mc1LAjgPDt2hoGdCwOllAAGJbA02hs7ArbSNqJQCFGWuZSC2ZfGdUd5RclGJdnsDUBi2kz7IzN0JoyldzYkVThTlgWpII7qRTkqtPbeCUnIbIwzbCTFSbWyvLL6z3/3+f/wo3uCunYewAIJIELnLThNoooYACGCdLoMU3/T0bT+zZW8HRjPSkXeOpcXHFZIRmEQSGVBSAtgrEehmF1ps60N01mbW3px63P/+LEXnu8UKnFMKqQ4qh3dc1sS2Keff3jVjIQOwfr+YCgVGAcUWFY+hvCNt7/3iw/8S551yCFxmG/ZNFQEdSry2ZkpCus33nzL08deiGxRTcIyyycnwpkIIAiHA10WsjIz2bfeF1Z4iiuxDolYHpiv/tQ7Jg4e3KxslyKtsNzmIQFnmAiUBdSSbD0OmtP1Qzfkt9wy+Z0Hlo89u1mN6rXKxGa/OHO5b3IzGDnDMNDWeig0RkIa7wyAsVYI8NaPtzuIdJXkTBwKqggzGK4rSSYnksjOMYCQwnsHyP9uSWNARATPQMjjoSwRCoHWe+88EQkiGCuW7Nl76xiAAiHGGUdCQEFE5PzVpo+ju6oGghcvrrdzG7ByiEQUKVlaZuskQaIEhaHOyzANo1qYSAiaURiEhTYTId542647bpw4cPRarSqUzAhL3pfoSwQiEXnnQCSysZfRVSvF7bv23njX9aee7vm/+u5TZ8sgSiarrSgMTVHWkooAc+iG3SJRzoavveN1/+FXPwiy5tlLUEIobYrSWrQahOh0s5JNJRQbQ/3lhx577kn+hz94E8Kod/6puertrNGZts4H5Wavun3n1pXhMiRaIQEysjcWGIRU1hkSEhAZvfOekIJABoK00UMDSZA8+FxnYU9l2WIB6DKtSxc4qNeSQT6ymZEyCKOo0BkiGG2UlGSpljaSqBoHdHNz7Q1vn5xf2PHwg5cfOaWOXQYhw9JY6/yYRM08Rl/DuD2T2Tnm8bvKDFIQOFaBStOJbdO1U5ePBSgFSkYmIcHaw9vp537xDph4XNbE/t380sPw7NN+qEkjGWuFwAADUOjMGNvMJJEIy9ImcThqt6+YolatFZ4GzjJ7pRQgSOUDgWtLF3dfu+/c0glRSTpnu0Fgk6pIQ9FZurBSLP7LF7/687/8weLC4Njjj7zzztcpqQw4LmylknS7g9/41Z/rd0a2yGuBIObBIAukcsYEKsjMSLG3xkiS2hVAJAQS0TDL52amrIPN9Q0phHNWBaFzlsYpPkGIqL0VQoC3UgghpClKR4KdF+wlBkKWRc7oLWFAnKPwrJAsS5v3OvDEE8W2mbgyJbLBoNOBdpEsDoN2TpVmcvCGw80EPv/w5f9PKnIek0gKwaPMKCXnpqQxcfdyec0+u22a9Mpw9521ngTZgLIb8IyW23dmmRkuL07VJ5bObE3u3WV9b+pac/fXrnvgvuU9t777p3753556dIQOnAePqFTgTPoP92x98oG7b9rFr3vN/pe+/aatdfG3D54tdfUlb3vwlusXomb6re9eXOu02n3z7RcekYxxKD1oQnaWQSIAggG2AKEIlCj6o9J7ITCtRlmf251w2N/x2h+/gvkgg03IovmJSRH4bK2jmdNays54T6Wsy/TgsxeWTp99+KY7bl09c05VZX9l0L586ot/8raCPhPPd3Le2H97+fd/dnTf6x6PZAWlYO+87VgziuIoSiYH/dBD5cLpp8r+aUFkNcg47Nm83euqwKFsJvWmlQZD9mSWl3W04kgmlxbNd9eO3Xe829MYS9yx0Ni9S7my6wtti1LG8LqXb3/6xMbpc0OiAIkEgXGGgMIwtN4OjWOKL2wkUdr4o39a/cF0z45Gp+HW6vOt1qEJZy51Nv7qDW/6wEAtfOLPHvjiQ6fVn2SvvpZvevW+UFUYbZCaMEmN8bYs5ueaGAXrGxvWm6mFaWdy8GGnvZFUJx27oj968sULu7Y3du2YYIPtgTi7Udar4bZmZTgoaxNp3JwwPqzG1Z17b3vkuWcn64FUbpRthih8xp2y89RzfTIuisTO6b2Yzm10MrQ+DKMgFWxMqYvcbLSqKYiy2UoKUK3tu1SlXhrRnNjGcrI/KAtXkCks+7WVdhxXh8WQRHjq/DmZ56OBsVq3alNneGXHtrmL506ubm384Pu+ZzAY6tKDdXnPpZX4Pe95BX3m/udOLp9b7r32jrnVpU2lRs1peWVT79i559ANR/ub5y6cvcwGW/PVq8n8QAGTY+8dJHGipGAEa+14LBEoRcTaOgaw1kipGIiBnTWVpALMJMl5KK0RQmbaCPTG5AIJvWfvkigSgtizZ6+dKa0x3jJyHMeIlI1yQFRKEaBnoHFzBKK1Bpz3JSNiECjrIA4TcFqyA2+cZ8des9fs41o1ThJJPte6P+wGUQzAWZYJFYAgz750uXYaLYG1oHu1SFSSolLX1i32NE821I65ZCIqdd6NVWytJZubzNZrkQ8kyq1t++SNN9W+fSwTJMFbj4CeASUDewsCSaGLhX/5na0f+w83LaS+2Op95978d/55lUUCkhgMjwlvcNU4DATgGch7x0AxFuZ//sTB9/2H+So9h8O+drG1ohhabxBJ9/ICRMiAa2u+l8kAMR8ZoQCByQsGH0qFjklKQPvml7cWZomxnQ3KsNrwIgaSLLwkZO/Ze61ppV37038+gaoioXDeEREBMTsinxk/7OJ/+sP7gQLw6p4XL89Vlk1hl4YFsfLeGuMYQRCNJ5+ZpufP+HseDaNbDpIAlJMXy4mvPVk8/kRv5JKkylnpgEeEwoEd12aqQFpjlQomJqc2t9aN0whwtQyE4d9RRDjOoAECACIw4L/H0UiwZyAaX4FTqbx+9+TJTm/tUrtH8psPbrVXbZSEKpDPXBicX1leXy99KSRYZuc0aye//cgpox1Y752PQti3d2bYWb8avZlK9r/8wM5u5jPx4NcfmEpb+/Ze/zefeIIxNd6KOBlZI0xZDZRB2+/3zw2z3bVmJIOh1QZ4WyvOuxkPIJWKjF9I00vdCzff+ZL+khshnD53edt0SlKsbw7jhFQUbWy56w6GP/7WKS1n/urup18//bJIJT7XgzP9lQ0ztTMFG000tvU2jve6vaVjyxvPbDSwsX/v5GS1ouLKkdfeMbv3uursPudrJccoQhnWwXO2cXm4/mJ79fSjX/72yslzTz5xKZytlHE4sX2uvzns9NxCpVmfvWZ2kh751n0cEUUQWZEZPbGzcbE97A4hTmI0LvN2cqqyPHSi7lfzQRKiCHF2Yf++fbd96av3TG2LZ+aCIDRzKR1aEPtu3fXEd88/+eKLd7z6R7yqnui+INUGTZn923eeObahM43aZmWxqcT27TtsZnvdfr2V3v6OH/jP/+WTn7nlk69+6dx/+/P3RMXl3E2CqHfcgR9+7//VK3sqaWViwRk0i0bO8c7NwVlUKq14b22Zm/m55orlapW+/uApNdU4t9nedu3US95wx4PfuT+NwslWwrw5O7f70smg3tzT2HVXxY0uPfLg3leSrCC1qlKJp/5p85f+Z87J3ED3rTNxHCCBkgQEMiZJWK1XPHCjIdeXV0upL2QP7b9l/p7jOkrkqDNgxjiNLFCvl9vSBRICib1+T8mokkZRQpsblz74Y/8hrQa79u3bXSve/M6X7djhksoV31jPoLd6Vl/qBjJd/YNPvktXNkZwiSHYc4M8cMPka959Z+8K/tQHHtkYTlIUNqqRHWZ2UE4sJFt2mOtebkZJEpeuWB8MBKmgorTKDh7ZoX3uKSjZI4NSkbVS1CVQ35Vbw8UrEE7A5AG2ayg7nh7/kV9M33am/n0f2uqO4surZ8erIM+tlJhU0kTFTueFGQWBBBShCHTWba/3pxZqr33DS59/6Kkry1u1ZrIxyPRwECZhacs4rQivjSnrlRoyM4oiH1VqsZRyOMwCCWyNIikpslr3hptChf1uoVDkWdZszggU3tuy1GFcsd4E6AEgkikBgaCRMcxSe3Zee9CMmKYNPdqQUhpn4jgutEGiQAoQyuh8cmJivb060ZqWDka5KbwNUFkk7yGIq6PuAIV3TFmeVyrVKEiljFHIJI4b080y3/Jeh3HsPQ2zwmgbhjIK4u7WShzGoVLDol9txqmLhx1ddrYUutGg77QG9mWuA5DtgfHM++f0rt2Xwro2+syth8yH/NRHPt21LDFUjBymOMyLMgc2I0LYu2B++geqLxy3T52zr2pAiFzmPqiAg+gz312K65OF7TsLLIQg1Np3uv04CaNYlVu9KEx8btub7oFzq3/88Ueu279rWyNA1nGjkEH5nh98w9rK2ZffeP2v/MbHhquLhw/s+Oq3Hwyc3riyXE+T6TT4hT/8y8lrr3vd21/9ha/+czax+NIbiZR3o8Gwnbs8SGrbFQ8r5em33HKL9tfc8+TysD0ShEkcGG2dsRQIYF7t9nq22DM9E0diZa3b3ehDHKDka/cvnD175ezqxh/+/adbcfL6177ymRdPPvbsqWY9YIA4DQMla9WqcXx5uX1h0XgIR+vZqOj96S/t+6mfaTIdLzd7jMhmez6oK1vRPmUmP+jawfDNbz3yD/efOHW2DYrjkGqRmmgl/UHBMe7eM/n4sw/XkyAhKFe7OIKNjo1CHnXdQmXuyrBcWu2apa35/bteOLGIKK45dEPZv+oqAhAOaLPdjyM9GmWtNBWCFtdX1UBmvRysSdOgGoWrvUG/OwyRqpUYiXpb7Wq9CswQogf2AEwwZlRJIby/2nbl2COgY0bE8SGfmRGFYT8eldAYyzL2Ho2ttojOWUEEDN5655kRjfNKkPOM7CWJIFBSgXNgAR37wnrJJgxDqchr7XJjLfbZly4nUmC9B5NIQs8BqkDEw9Fw0BuqQKEFB9ZZwx7d+NDK5LyThEa7fGRY1B5+PHvJndUbDnAYWkYLzN6jJylJWSBCC2CRElbSw2jXPtw5mxVlY5RZFUkCitNkbeSH7Vosa4Uvw7qiMADg0vsgFaZfGJge5Ad/+b//7YVLp8tAZaUh8NVKMhpsPfzc10l4YghVKER1NNpwznlGInKOC8dptXnf09/N8r72xjOQQwzjpSEuXPeedx45dPrJBzYGmyGlk9XWcu+y7nvb23rpbcEP/sD05MLEiQevXFqkJy8VSxcyEpGgWA+9d8UHfvD6t77Kb59eUTUDjZ25rQpX9Z4lCes9ai8oBCRkC+xkyttvwPcf2u7dAjot2Bnt1i6DHijClnGwttQWYXXxEpxe1F9/eHO5h5RErI0gYKG89dY5QkSPJAQxLl65AOwcgAqk8+7/T8UmBEZkYPr3qg8AIEDmsXgEgMgAhCQUja8ka+24vxhIIINnZucQ8SqE342vyqtrIM26SZJAQ51XfpRb7YV3Fq2bqwe1VCSBGw36lgdOgdCoN03hGERkmQw7W6NtE9OHj2yL4xCFKnWBngBRcOC5ARQwFMAlWA/eCZ8zGVFPDt+17Y9u2X3x6a2P//m/nL7Uq9e2VXcsHN61vyhH632b7r7hzttfrZeu9DJXbSCj184658ctYFo7SS4NVTUMR71RECWDZPKs0ccuT03la9v2H+guXYjCMpibE1LH9Ub//Mo//suJLslACustAQgiAiaAQEXMXlsDjN4DCtDaiRCVJG09xcmotC+e37QsIgUsUJJgZ3whA9ki1THOOqfVmOvEQB4iJdNaLYkmINv4pV89MHOja82Km+6ae/uZhR/+madWuzUEIckToXN+/FMAgCis80SEOG5F4DHQCgGBsT/oSiWDIELrrLUCiYRUwkynIxmuRK1m2bGpH8xHpo429x4FMsG4UE7KwBZ2LFYDMzEqJdm5OBR6OJqbmMydyvOs8DkrZOdHWRmE0ZmltR1Hb1vYtn9z+cLUbEMqpWIqhlprE/js8x//vakg7DjdXjn+K7/2ASDHFhnF5sZQBcaVXYWchCFr0A7SsCKIB7rrvBUycAxlYQQ5KQSwDwOllBqMBlIRjftuCBhRa+M9aOsAkQGdA0ISiOMeQRqH5TwDsrF2uev2OVAJlIMRylSp2OrRmCBni+DCRuPB1WxyR/Cyl4T5KLu46J49oy605dFD2xb2TYnIB6H4f1xFE616VjoHDoSynje6edddf+BQzenjrV39yTsnl85utuaV3jTkksFZvWtHOwxqtdmFYUGU1pGmWCTSjkiNjt6qMX3xzS+Zfe7JFWABAqUk6wovEEAamzxxFu8/tcx/eT4ADCm0JJ8/A4+dWhVChRjKQAdkAkbPnp3z1oEkoJBEwFaTEkzIDGakKRStuenB6ma2aZjDQe5RYhKlZSZTGagUs2GXOUBqzV1zfdZd9cPzLKHhV5/5zi/b7koY0sqlYYCIJMIE7fr6y946lRVXCiN9OQ3loBGPfuJ7mx/9+prNg2ozEIJApTbc2Zh9ZWd1aXKa2xceYj+QQRKEYZFl9zx4fv51oRR949sBShGJrN/32up+q52lOSZ3P7rcz4Tl5q7tYrYZxso2W7C2jp1hWU1UWhWhcjubotdS6z2HRGPHrgEYak0E9YmqSKMyM92t3jPPD0Z/a37sLeEtd0guV4NGjXPX7F489qmfvv0dv3v6m0/u3za3fd+1L3n7wrB/kZRCwVUEqVR3Q7c7g1SJmWarYy5urW809l3HVkRqtlEPhjajUAy3Cp3LUQeH66sVqhVBZf3yyotq0GzuDNBlGWdmEKYikfmpJ+8mTZc760dv2Dmd1PqDrBpCNytyKyZr6c5dLSfilZGpVITrDQRwGHpVSTtbPSCKQ62xKH0J1sig5QxaT2jE1rCrgolE1khsrW9enttZ72zlgXAX213r68GwWF4bVCZpaWXQz/qvOToXiCxOg2ooQGegkEJVDUJvyonG5Du/75UX/vTfnnhmNZHRDUdm0kQhuIP7Z6/Zu/DYg1/PTX92dhuhNdnValgpBSlljQlUSOC9KUuj+4NhqELjtIhCAPLeI8k0rggUYSAIFZL1AM6zQJCKEECSdIzWGxWHAAggvQMkaT076/OitNZ476RSSqokiMsySwIFqCggtlxq7dgRgEMEgSSIGYWQAskCAftQBs4ZbbWxHpiHeWGFrFUTlcRcFuzAOS7BAhpw3hiLQgghhAzUuP0CuJKEifKBGqZJvrredTm1QlK651RGgalUK9YQklAVldQCoyie2DscwqnLS+jBsRv3d1njACBQASmytpxq8HvffeD73rgzCUfK+PseP/dPX8klJMZZ8uXYPOWZhRRhGGpdAAoGRhSMJIB2TGYf/NFW2jptO11dBNqq0pM2DB4FOABZjoq1LfOVR8sXLridk/Hrrp/btt0DjsgjYcgopEJtMg880yhjxdZzUGkY67wdaRuiJVMOLVtwtNjJf+tvHgsosl4TOy/GGoxDZs8IKMggAKITCGy6fKlrPIDAgMlaZ5EEg3MMyETgGbB06uOfObl1+XB7gEPbubB89sTJ87t27jx625GVS6d9H8NKxp6cFIzgLXjP3jOjAKGQBBIxAMrxHd6PTUXI4+cZXN2xIDACAQCSRwKw1nsR+Jhg/0w8KrqrvQxVlOf87Knhi2dzCzIAYQ0ggEIplHPeGOtUELDjYlgKKRGQCCamm61GbeXS0vj3bLthokCX1RpTO3a879aXDrp6Npw6ecE88ugLnitTB+84f+75qupa5wiFKU0QuEazWknF4tlLvh6dXR5MRdKyAo2CaWnYecP7j64M7YEbr5+Y2bv+0a+OLi2nUgbM5GFztTitQ1fikVuntKDX3f6SsrdvGGy/465X/tvHPsKFOXX69PKFDsAF0+u1msHbf/AHpl56/c7DB0DmgDLPo7DeKHuZHplaEEG3f+L4iWMP3L92/tKZY4+0dohWBTaWTclYm6sVadizMusUgt2Nd92w3oW7Xzwec18rn1RELRW9dt5sNN74vjd++7vPbZktEfPsQjS/k5xTr3rvHZv95UvnL9Ti8LYjr9s3f93ufbf86V/9TEyi0PrSYnZlbUSKOm5tw6rRhr7/ge/M7Z13ZoB2pZ6a3sW86irDvp+st0Yj2NgkUU3zvl1aXXn9y9+12ptOJpq2v+fF89t/6BX3vP6l8y975/u//Hd/e2r9VGUwa5Lu0Haycuis29zY+vCHb7ppctvvPf0geibwZekHvjyr3I6FVm0+WioKAyFg+sTjL1Ck2PHJp88ms5XmxMu2aNdjL6z/+lteeX3w9G/8XNhf6czt1xwUfiife1JqXQ+q1ualR2lBgXSFNaOB8w7jOExtvL7R2Vx2lVQqjL7w1585ct3+173xLY8/8iQERiLZnIWCIs/zLJudnW/Va9nq5XotMbn13ksRWjVJ6fTjT3aOy/iz9z5edZ2Ds/ymt+1818+9ffn81/PsuR/7pf22clmBkUAanAMbAyKMrizpyyNhvYLSSmsCDOOaCiutVlDdWluKkrhRiw0GCoKsPTBupYmdN/zAqwyBhwSx6aTBnMiVGGwRX5DY6ffyZGojjIV1xEGj9EOhbOsI7d9tjp30zz4xuHo8iJMgkFLyqNcLA9nvboSVivNcZAOB5sC110zsmD93aTVOpbcFRXEc+Pbyxfmdu432MogHnYyQpYAwChhMkiQO4iIvpZIyIK8gFMohG6OjQIz00GGgMPC2jMMgjoNSe4Myswa8E1BK4aUA4zKkKAyUIERvq9W6UOCMz/2IHQQiJiHBOwTprdfecEQBBcMSomCi1y0CiVJWwPhABEhaCnDWNOtNIuwNs+bEBCWNRDph/WB5lb1BRqNp1B+GkYwbibEOlfLGlH4UBcloMBISjHaOsVpPs/765vIlEYooEAZEo6pKk1rLnmw9zN9yFLzWURx4bO04DO/fX//usxv3nnSllcKTsbo+RU7zsECnvRMmpPYrXz392EPZoF9UEqVE7lFpXPjcd89CoMnb3HKcYCuORsatdYa+8NNTjUajtr7WdsbM7ZztDMq/+dcHf/W/3jAzLTeOLzva3BitVnZsv3D20ktvfsXP/+AvHn3XO05+98s37dr5r1/96syE2NjqzC0s9Ebh5tPn9cbmpI7+6q8fu+N3X5E0O0ZAa6YCYctTz7ruy394+7F7Lx5Q/XMJtCle3RzlzhvrAilazRisA/BrmbNbg+sOLczGkfbeBNTuFFeurFijgzDqDfTWsHPyi3fXk2hmplZqMz3T6PRGzkHpvNGFJ+sxLAvOR+t/85GbfvRXmvnJ72AeRcEUVOoaJHEPpUiUcj4flVrVK1urW8NhXmZ6aiLJ+kUCAkFGcW3Httbxkxcurm+MOv2FvXPXTYeVst+aaGSFnd8xe/zR4xNTU7uuPbz02DONxiypfCvJl194Lq5dbUBjB2Eab212kjBKK/FgMNTa5tZDELbigPNCkiNrIxBGUkxivjXpOdo5t2t1a9X4cnVrQxvrkZzzoZABBePUGBJepZIgIoD3THT1DIhIV+cjRMwOkZBhXIbtmSUBCenRj11/RIBCGj8WBNB5L0gwgCAZSFE65xmNB+tdVSkZhKmUzNTpZd56wz6JI/KoFAUqHmaFs7rb75XaRElknfeePVvrLSJIQWObAyEAsDEGUu9RXloRv/Lf7v3lHz/82tfOelyTiUPSihkwJpTgPdghghcicRampsP/+kuv/cX/eUK2Aw0eATzCg2e6N5+dV1CLG7kHtoEH1Ehu0NfLi/0rV9JLK4Ot3lCDynSZZUWogt4oJ2/ZeqkkooiDKI6n+oONMFCevTFm1C9kEPZyLkZLSE4wJVHoIbCeWI5mJrZuOrpta2NObzUthSq5klQjn5vpGbrrrubhWyths3bNgZe0z3Xn7+1ujS4OShp5ZYy79ciOVx7uL0z1g2QQt/ZYXwuERxwiOQAENgAevUd2gNKzRQpIgCIAEAwSqSBvtjdnAUNw5JzZD5OmgJfIpumZ7f9w7K8+u9QrqlqG+fjfLFE49gCEaKwNw9A6g0hCitJavIobBkDwwAAeaVyqJoCZEYF5DMPCfwc+kqCxV5sBlJLAgAqlJOfBO+s9W8+CUArByNb5f994wbW7WkFER4/UL4/ye59avbxpkjQUjicruGOS9u+M0OGOXS30JnWyn+k0Sdc2RllpgzhMAq6Ks8cfPluvVbbvODi57zqftFyQMqUkQ+dyQINAKCSjRxkwKQhajEYE7uAbDv7Oq1966cW1P/u9v7+8fG6rfTEO4tr84efveejbX/va1uqlqckJ40CT994TgVSKicJA5vkoVErJ1qFr9hy/dMHlRV4O/+tv/+0H37n/Xfv2ZcvP2bhsLsxBLYrmqt0V//QjQ3ItJ622OowjFuzYA5FgKo1hYSKlPCMhW2c9I7KPkzAvNOfZZJwkwtlSyygtXBkIYAreetf3/uM//Z+kXskHRZgkggIhUUis15obi5vJLIzKzvbDh+I9Sd5djlN9/a3yJ95U+4N/HWTQcEDOekQaH3zYe776To9lEAAk8B5RKCIPYEy5sbmkpPQCnXUkJDJGApM4NCZPHOlRALoeh+0kUJB59ohE3nvnHAKoIABkD9o764CcR5vzZDOyHofdbpkL572Uij2zsx4do+MoWurgO37k5/7+93+H0m7R02m9tdnpB5FQjN/68t1Rs+Vym5u8lCOh0FnvPYMDb/MYZWkYkYb9Ig5rlWodwKtYm6FGBAC0zpEgVOSst54FoKCwPxyyZqkEEXnvPbBnRhYkCFF678IwstY4bz2jUJEIVFl6KWRpgrMr+k6bCjGSEXln2FrvYORg2K3e91xw7JJoD+I//ExvZVXevG/2xNnuycVYyvjItftZlRQmpXH/j1QEHpSgLNNyrKv65Gd+97m//+N37GvlSbxcBkX9minfyRQEqtXccwCLY73BSi/ZPlV9yY150FtZ3WrMLITx7MbZsxxJFS6/9RVz//jp8OzljGRIzoFn5wDIC4kaBBARBo4g8w4Fe5ABCxSKSbBED5aBEMgTUBg56511BE4IYY0HB0qhDKUIaNjulhkHUQgeyaNUJCNkENYZh947Ieo7eiuVtPamYuvLxdZTlXodCNB20yqlzbjodrRmoZpG28MKzHCr3S85SwNtck0ULv/cr9CaLj73NccWLbASgfSD0aUvBtb2FkuShmSgpNQ6iyJ1cqvcsK1aaVUkPPqy0N6RN61P3+eeudRJ0q2JehjHcq5V2bE9jRN7/tzK6pprzdSnmlF7fUupwJpyz4E0mkjveWx9lKE3XgUiQtTWgofOxlpSqYT1pMzb6yOZvVh+fODmtt057y9EeRlQETVqh1/TOP7Nr00Z93xncH7RqcaRierUiw9+uzEjszxTmiGsJqpCZZBZp9L6noPbKvVJ60oo7CjrQ2DTEHc05eE5GUZ6ZFiyObTQSJvppYuXnn7hys6ZiW2TMbMII58Lt9Tb8JA0J/Z0B4jOxWGcb1yRDM3JvVEgu0M5KvrGlZRgXva9FSX7KIZS67hWvXLlMmkjglCpsFlpuWFeTZOidIoxFK7IVwaDFRQWuOTSSip3bp92RnXaW1Gz1R1011YGKKRwcPHklbRSmd15TdZbyXQ/pYZgWXrLHpt19b4fuPmLn3vs0WcvzM1PYGE6lzqVifWViyeQ4ubEVJA0ScBwePV4UGZaRYEkUZa5FITgnXVxFHh2aRAFQgBiEAohFDMQkNVGSXJM3pqizKWIEQCBrNMIKJCUVJ6dca40ZpA7IGLnlcBAigBlro0rtAWvmKzzKMAZq0SQJgmPS9AJpRLMBMzeOue94/HacIheSBKei7J0iGmSJpEE78pR5h3npQ4Ja9VWPuwKIWQg2922klG1mkYSnTYCsdfeCILlgV4Jk9HMbDARFarGFEQ2b1xYSgpNu/bOV+sBei1ApdHMlfPDex66n31IYIiQx5QcQkLORvne+ehH377/TW/f78vNK+eG371v9Z/u7vTz1IFVaBwCCsJx0QsgM7N1DlkSeQcBidAWn/j1O6Q/VXZHOq8aq8CxcRZZCEL2wOi9wKGL73l2sLEWXzjjtzdr+/cFQbikTYHeEwXOQ5IIIUmRY8hkGJcZOGeEUuxNXlhm7Qj6Pb7/0fzZU8gIxMYzexTjcDQAjoebgGOWgmUC78dmeHYA6IGIGBEZga966dkzeBGTfO7kxn0vnG3V6pOtyVTFSV2dOPXc5UuLb3/v9z/98H2nj58RCrz37MGPazi9y4Z98F4QGuuQBNL4/juuXuWx04jZw1UnLiIDEkYeZ2M/O0lHrk/37GolHo4vZo9cQc68QVShtMYxGwcWkIMApcDSGhQkxuk1ZiQkRO85UHjgur0iJPJXHwn56JLEcG77tW64vrq47qCi3OCOu/Y98O2nw6n9P/hj//e+73z60W/8WhIionDGOcvL61shjGo1V5urnDm5kdlUqCSSsHO+GsTLO+aCuksePXb6tdtvf+mb3/383V8iX4SShqNhUAnZ48py//AgyTeHt+w7tG/vXXrU/Pbnn/mXP75ndgqrwURravfbf/DHZhYWatOzkaq6Ufvkdx7sdk9v27VgC7W5sfnCg49cPn2pVamdfvFcW5s3vvnlse4c2N34md/6wJWzp/7otz6198iebr7mye2cCkLkiNJ8Y1kUMon9+oZO65WtTs4FzzQrPhRPnHhi4fYdM4fnsuWL89erA4f4iQeW14aPHzwwv7B36tC1h/rn0+HIl/lobfFCo65qNeE09Ucu4IxlOjPZ4kJPy5Vd03Ty3IvNZm37zNTK+XBG3vrOH/3Q5UtP/5/f/+99WVXQn5/eHtUOh41bf//DH3n+3Oov/NpffOD1r3v6u4+cOfm1X/9vv9GqZcsbmxAEk61Gr20M2qASBrb81uc+G2iH7MNQ6by0zlaqUVbiuTPdhi927ahjbWFC4mg9DxLvESr1kGjbu77vA5eXGctgu2zsnxQ7blK2VQ7NiAgUN3RQTxqum/eiiBiVFDI3zmgvFAWRCCIaZCNUghgdyUFmdx+5Vs1OPn6y+7O//vt//r8+stFeSWOIkmA0yOMoqaTVYVbONKdH+aDMtQOIkxDYFaN2pYoI0AyicpBcyNwf//OFh89U33Pny95yF994+3zpI00JQCChevJU+yt/ff9TjxYe55vB/sZ0c3Nlud3P7jx69PTl05vdyySDOBWEvt8fRZJqaW0uOfuON/p3/7eX9/VpYROUSsGmIlvQtDFTyksvFCQqmgIUpTHG6SDAGpa5iBVg76d+Yur+R7ZI8fLYTSGJkQVwXvSUiidbldI6wzYbda4/evQlb/ietfVV8O3OeVdfbG8uD5BQ99cDucMJRcRRnBCRdeWwGHmjAamRTlg0bJm9Ze8cobG+yAaVOKqlUVKpDrtDFsDIvWEnUEpKGQahK6wCbzhPwkjnGaFXgOg4z4dCKaFS7zNnbBrXdJlJCdqaUhcsFRMFUTUKouWly600VciMXgbSoe/nQ09UbbRGwz4zadaZLWoyhULbLMtdvjC3fbPbMXleqdYmpqd63U1SgSeMorDMPDFQEFRbSklb5FRmZMlVpyuqEXuAbnujUqmIDHKtuwMbKNFIhrv2hOmMLnzd56pQsjLlfu93j77x+5/ZGKEX8vmLduUST9WEAIiToCiLEiHf6N13upy/KSzQyrrs98Sf/+t5g6nRTioKIukc93oZKREBRM14NMxH2cjqsjAcBjYYlpVW+Pd//TeLt+xvVCpHbn3FM3efPnQ4Wti2e+P8CWPDk9/6uMEL110/vfAIho1GZZhID/vmpoRXYYBBh7sdvXY5n8IS66m3pYWA0jnTz8HB7uu2zT5zEQbtyalZS0mpLfAQnM2yMs+LZiWGUm/k7cdGw4WJxsRsY2W9N1GPR1lpPfe6A0IRRVJrLktnjA2joNvPiUgbPdoopESPqIc56fLlu/0Pv9cMzzxOUKdgwmhvO30SMqkZb1ZYr1tw6Xx1eLE/XF/fO9G4eKqXD0yaSJkwgz57dr00I2lKVYn7eTJz/ZFwefngfM1iJW3VF7sbo87K1I6a4Dycacg0WTxzIYwqMwvbLi8ujp8FcaRKnYfVyAL3TZlrHg0KIansDuREfaISFuz7Wx0MQ6AIJK1utIMoBU3tQVaaItMlSSGYA6VsyWkoC208jr21TAgIYD1LMdaDWAhh2ZEf1zl4YiBEN0b4AXtgAeS8w/EzGkAK4bwPBSohSCABCiJnvQolIAH7KAzHMKS8NGNiUkWqUpJlBEE8ng4SZra03oL1QoSOPTF74LzIlZRKCGcNIvqrkhYIIRXSaDRigbFUgyz533/z7MXT069/bXNhLwdRz4cSqelFBYCRvSdg9mCFHRYt6cvVzSyvQhgaY8IAR5Wpj39t6dptbvcud+0OlbQoTIKt5d7yqj92Up5vOxsPsiDygsG6KBCE3mmjnU4qkTPcbMRFpteWz6gArTPOOqQx0xCbtUavvchjQwh7Ik5Cr5LR/uqVc6e/tPvQ9M0LNzz+9KmHHrsiywLyctee/A3vOBBvsyB3Wo21feGbZ+ZsRf/jp844rF1/zYEff+/tC3P32nJUmZoqbY1QADlEjWwAJYL1wDCuifcMIBmEt4RE7Ik9AIcAFUQBJAG1kCUwBwkx9qP68Ec/dGDbTO3/fPTcpb5HjBxzkQ8QEbxDIaWU1jkpJLMDe7VDTwgEFiAYGZnQW4+I4K1AwvFeCT0AENI4w+acQ0Rmts4rKZGQiAQpZktSGV0KiThWBB0D4DgaCQBveudNENK3/+nrt9x4MJR7/uW7Z0faSc8Kg/07GnNhf6Ima2kfAj09kWIkZ2ZjXyqKwjiNiqHLylIFUTm0rv38+uP3OeeiWpUwwiggGUAcUzQJ6bwRiazNoaxgIK0PGIS3YEozv2vnb/zhb545dvpT//apzlB3eouCywC0ChiM1ForSaVzgmj8pzvjAoFSwNrWInBRCVWRl9t2zKUCNDKYwan7n77zXXfmPiBXRjOtiYXJ733rpd/+5EalWUcVCA/GmSQMC+ucM0pcVWslESFYY8uyJIBIRTazs4p+5Tc/tO+L//zrf/Q1bSwAeA+DvHvPU1/fe3C3B1hb3ErjeNTLDXJQm0yq2/cdmt/YHMS1KgcN5NQPOw7YQ+/d72h9/u72cx0npcAATV4ygvegBDGjHbuBxoax8Yacx1Z7BmIACJQyVweq4BwXzq93/cbqUExrWZvKNtwo99p6Iehq4pXBGBOSJCk9Oyml1RYFBUqm9RSQyON6uzc9sRD5uudhOSoVohC4Y/v8xnpv8amv/P0L37j22utcOPPUE89sbKw3pipJEnT7QxHIYV46JoGi1EZ6IhLeMxKjRFaECiz4tJYgYVF2JSnwnp33jgUxAFtrkBA8eOMAtBLSaOf0GK0NzJ6kUFKwd8ZaqVCI8UEMCJGQBIkiH7EnLzRK+M6z+fd8z9xEbeTJCwleG0XQuQT3PRl9/n6zkXnDXI7if3pUP345WetWm3Hjluuv0c5YwyFKEP+vq8izD0VcUG61kShyQ1DKH/qpf/3aR984tb3KwRkm2Tk5Ug5mJimamdhYaaycXlp7bOM1B8mDaM0cIN1jNYzmdxnhLW/GODy6p7G+UYCU2lJRFmGcInljLLCRUlnnBAlGFlIigzUerAdE6yWDUpKc866wYRoyEgYVBGan2ZOKwkDCaJg5w9M75n/y13759/7HrzVSSFRajNr9bCBIaucQdDDRGvryzje+8/hyt9gcJlENWMVBUDhGz+AdMZOQ+dCA7r7qpbEdXkkj9KXTwzwHaygPG/l//7ndX7n3PIhqKENA4b11PLJFJlGRRK9Nng8lhQRieZ3+8d86L9nFhw+Hpuj39ejg3u0Pn8SHTmUBUFyha3e2Bv1+JDLdL8zApqGvNsJmza8vXZycaLZmZxbPL6dhMJHo269pHb84WNt0AKLMjArIeyz7Gl1upA5DAnCl98cu2N/57ft/6afmbnhlxNBFlZpg28KNh8zoc4O17ne+/MDP/9yHJ9LZO29Nn3vyHyv11mTVbLQzKxMZiCgGnp7UpQyj1A5dPhwUWUleZoBF1l3fOr57575aFIbVeHP5bI0qd9y0/fippaXO1tzc3m3zU6tLL1Snqjt2z15Zak/ON4zpgZciCGvTrdWNrkrm0xAtDiqR9R6QPCWGBDpjNza3vCvianM0zJMoiBqxYrW6tjHTmHdBLY0TFYEMty6ebl88txw300RK74QB3rf96Mby8+FEcmmjJ0Cmaa1d9lrzOx+8e3HPwSNTe3b0T2+s9TbiuW1RXF+6dF6FCaG57ZYDK+dWv/Hd85//3FO337Jrflo15mpBHE9snweILl66MFmPGs2p8SqoVGPr2Gity9wKJQnDIMxKDQhKyUgq53UiRBwFRvu89BaRhDBGI2AcxEgCkQg8CQKQxpis0KUuvLWSJDCP5yFxECOByUtmHydxHEoCUVobhmPHD4wnYwzcL4YSKyxAG0vgIhXGQgpmsIYIclNmhWYPSSWZn6gAMoKFKBrmZWGtsKrTayOwIE+OK2mlNGyd7/R6hbUbUUJWi/aFA9fD1GwRKDMpqFOIK8sT37lv9ZuPXilK2DV7vir8wlQ8vW0yhzNf/ObpLAvBeRJjJYMCSWOXdz3077ht9+27mhceO3Fuw3/mW0uXLuvC1pUQzo+cd94zoQAED06SiMPQWu0sewYgMs5eO+Pmd/eFaHsdC5uUWhfliKQEZEfS+TIrXXdIj75gVtewGDir6BPffC6O59/2qqkw2XLOIbB3PlTkvScIyiLpZ2GRscRCBRbYMkpb+NXN4Ytn1Ge+0QNOwecM7BHB+7Fteex5BxgTpNExIyOgBxrLQjiecACPZ57AzFfLOIQ7ubi858233Dmx4/LS+aEd5cq+sHjl2oWd9XhquDoYdUxaSYoyR0FCAAASAntvjCm1IcQxhAGAGcYFZ4LBAzAjjdGfgMI79AwBKhXCz/7orpe/cTqtluC8L2X1yfV7T5Un8tIZNIVF8CTQS2LHFCmp0JeqKDQSOOMAUEjB6KzX83PTSDLPcnb6auSgpnurm1G7Uou2ZwRQTfpxPtjw2pXKuED3X/uGl77wRL2/3GnFQX+YA/hVvfZj772p273wAz/+/d/4t+f+7atPnVkepSFeWS6O3lp7/pFz+245fPCaXZnu6gBLVdy8/8jzj98XGO+Vl2m03hl94fPnoSwO3hy86W03XDm/esPhIx//4rc7m2fi2sT2/dcILxxTe6V75eLJoMhWT616z12zvLR4ubQdsF1A2c8H17/8BhelkzumGXHu4PV//KkvTdT9bT/w2le+/tqd3z597vT6y9/xmnJ148wjj49IPPT00lq326jFJLwKdSTDQCYXu/kUlhGePXjTnstVvc6DwbG2qCTJJK2218usjOjyZDy3a9uBc8+dWt9s16Joa8N44fbsqd5x48yn717MtvSuhgtVceLui0Pyt7/tsLRz1+67ZaL2ss6SEDJtRGWzWctH8dMPHXvNG17/m//tI61aczKK3/jWo41JOvC6a+Vt0Tde/P3K7nS20bp8cWultzkYFOXIElNdRY9851tJFNeSkJT0xiryvjADhuaBmR0NI6u2PznZmomz587koVcT/PJXX7d5+Uhn5ZRyIQlRhPaD//M6u+M560AFAqUQYuLLjywNfMWyAwyzzCrJSBCFYRCHuszLYV5tVKuVeOVKl1m+4k1v2rlr75f/9kvVWVNsLr/jbe/57Fc/Pepc9APXrFVGue+NOoPNbGZba8fuHWdOXQwRiyIritLzoFavl7keFEUs5QjCwQAffXp57exid/3U6fPP/+ofvrtUWZjU7v7ko3/xxxe2NW4qt3Q8FSvX21xaI0fzjalnjj0vquK6HYdPXTwzM9Xa7A3iiXRG0tqJJ3/7N49MXXfWinUvVLlZmMW15QcunzsGb/31l7vJFFxmBxxEGToQARCMgIRyfcYhezZFNpukxci58OooWSmcqEZgypFzk/XKcGjCyK5v9l/x5rdO7tx//OSZiIukiY2F1tH4wPrdTxrv1peWZxc2KG2ywrRaRZB51kXkmbn5K5cWu5uXOJCVoJINR3EYiyBCYIpTQeSBOv1CYCRIaGPGnS9eSaXYWKPBonBFniODNToKA2OsUqH3XptCkPfaOCWJHHApBQXCKSWMdtLk9SQJFlpWF6DBWLRFYRgRhWcxdFYEkTesRNBogi9zW4zCkMArTVYGYAZFa7JWqdbKUotQxYrYuaRSkypqt9etzSiMJMaFHQz6nWZ9jv2oNIUk6PV7AXAShsurutfzexqIvjQlRNWKw7gQoE1/ZrJxeB6+dcIpCLnE8+e4tterwGtGAvAeiqJ89gXYbssPvDtqVcIvfKX/1SdiEgrKEkLlgMB5LbjsluOudaMNBSQRvu+Vdz1/9uTq2kazUjGFON7sl8MLtZkvfPh/fOiD//F3r9+z7X1vfmUa63QmffHx8yDwjXceeOv7fvB3f+cTL7vrJR//2D87saOd7OOtS3vidGJaqUaUy+Cxr375C/ee/I0/+8fa1DT2t4KJje/9ue+ZuuHiZz7/xPb5uaW2X11CciUIGOUWQQSKhpkZDuniqOOdy4c5KXSW2bowIOucs74sHaGRgULAsjRIyM4zeApCRy6NeIHstx94b//sF6KaFCxYD3U/82YoqgFrDYS2wKL0RRusrfmhX7+wEgaQ6+Fy1+4uqSyi7QsTXnvdG83V0p4s7/v2g4cJX/e+V5/upF2dXjj7zKGDu48/93xp9kjfzDOsTV1z9tTi6/Zdq+KrqyAf5hIpTGQ/H6XVRqCUqqCUCAInasns1ERZ2mpa88DW8rDXFZHqj3p5uxhpmw+zIJKNWjTInCIMAlkPoo1i5IsSxwXkgp3ziMQMCDweIwkUQrD3bLwjAAeMiAgkiPhqHI2ct4jE7ImJkCQROxZKRZEkFIhSKRUIWfEe0RnrWTKHtsjzAFjEcrpRsUhbvZF3tjQUR5EtS2dNEice0GjvnUUEEOTBI3uPnplx/Chm9EQWOBCMCosyd5a2ivrf3N35xuMrP/nOPa98WTWua0ysiDyTYo9EwNJJqVRQKTm+6frp+491MyesdUiCh8XlAV9ZdNMXaw8GuQUfxsC2aSHu9EdDFzZnbbUSbF7JjLEMJJXQzqCSQRRb8r1BDiyyLK8FkWFfaB3FIaEIEVzZ9jBmxnnnGZwVVDarjTtfdvM9//bCQhNnZ3cvXnhRsQZkqeCV17WkHHk546CJJKBajyaG7/iBA5UmfvmbeNOe2VZw2mZrhpwIGyAC7z1QwujYZWIs3HEJWIIHARLGDa3es9UklSPJXjBIIgawwOC5QmQBhefShw2RyNe+a/c1B/f/5h9/7YnzMMgCqULn7VXSOSMCkgiEROdACAJnpPAOEL1XKEh6UFQaa8F5QHAOkcYZ/rFNm0DymK+OIITw3nvDLKQtC2YmSQBBKGPmkpCtc+LqTgkAINk2nc7OvuUD9dVTl+fFxm3T8YnN0VbmL670pyfD6oH08Hw9qRROQliVNoJOroXwsQjAW5+q+kJTxKm0oaCb0GdgRm64wv0NKUo/3PTDUf9K34oaCeUpBqqhiJPWQmX6GkdTUWV+1L8EJe3fJj70Q28y1epXvvjwI0+cWVtph2nEjIEC4zhEElJaa6222qGUaKx33hbFcDTM02rUHbXzkP7u0yff/8Hvi0XTFv2onnmD1nqbdL/3V49+89kv33NmUKnXRx5K58l6YueMISGRgD2W1gaBDKRSiRLAIpAzNXr7S/ay6L3je177qX+85/iSqVcSEcX5YLS1uhSqMFDx1GQrK8t9R/edvjT4yO/8xZ//zm9r066lwcqFzctPvnBk4aAjbXLFUEztqf6fP3njm370waGPyYMKAud8EEiv7Tg8CFcntsAMY8ORs+zBK0kAbI1GIjYMwCQ8ICz13JVLetuhoZfa86gU0HboGYnQWjd2KQZB4BzboiQScSj+f1z9Z5ytd1n2D5/nr11t1Zk1ffeSvbOTnV5JSAIJoVdBEERFBRH/N+gDFkQR1FsU8VaKNypIB+m9hgTS+07Zyc7uffrMmlWv9mvn82J2/D8+82I+82LWZ+aa+VzrWtexjuP7ZUJ6AtBKRg0d5tt37jz69MF6GHCsCMQsLyzIQSadZV7bQWqf0U87LKpRoK0ujB2rNlbbHRnAlgt2HXjiEKZlo1m1WiORJ+9KSwxK5+IkRohK6xQTZTEsnPdE68QR7z14b60TnDPGjbZBqKI4yrOMPEnBhQrSPANPXHHrLaP1/iUrCh0qbg0DAOusFMqQ5eghVLOr6q7b+y+7RYnAapdZD/mgeuB49OMH7Hr91DmXa5uvqZXCbpkYnZkc5Yyco6RRYyJg4n8a0NKiIGuscxwROXCg0rF0WHnfhw9+9MNXR7AUjlEw2TAdbQc9q6B1zXQwfh4+cXC1faa+eSTtr1D3BOKamnmhFA1TlGLGf/ifb51746cePkocVbVetcCcA64CsNobx5gAjwjoNTEZCOHJSe9EXJsqyjXvUs4AFIB3CtSgkEm1btJVS2EQTVq/Wm3FRZny6tivvfGVJhz55Pv+oNna3Ov2vWU8oDAMSwh2XvriM08cOnH/Z5wa2b6t2l4RWa5d5uNq3Q+xHDrHZRQksRFpf/DqN1/S659lIVhdiFadY5i3XftoNn9qoHgiUJW5AW7Al4DEktg5AhDkDTDiiqwvhYhOrLrCyROFpwEWUHn8lDlwTI80VBXyGy6f6Qw74y0eJ4GqsDJLIyfiSEgetia39gZ2uJCFcWhLK9Du3IGtVu3efSudNQ2CgXeMo5TSO5NqraRADFxqAaf2HdOf/or+o/GNE5M9nnDFIz8avvuvX/rWP/iMNeLS5/zan7/vb8r7/8v37n/d235V0RIsrpm6d0HKKRoMUhWMzR5vh7UqUVmtVYcOSuNKR+fvucA5tdQdoNUgeVkMWOm2bqqdnWs/+NgTYycq0w1M81WGcrReybonK7Vac3I86w9R8AsuuaSitujeYVd2Kg3Z6aeMj4w2zivyXuH7YVV6y4wx1WoEXrda9c7aMFIto8Plrp+ZiHu9k1m20B9mU5t3pBoEsLCCSYDzc0fTtaVEYndpoaCQvG7OtKobt4rkqajS6qzkaa9XDLtZv49cjU42TeHztCg7+UUXbF9s6/2Pz9/56KlXvfLSQnsHLjfWFZ3RkTAIY/PsJUEFQqel81YIwRiXShrvgIFgwjlXkuectPM2deQ8oWCMGas9eWONEAIArbEMGeOSgFmde/AoOTjiSlal8gCRkkiEAEElXhv0o0gJyfqdgRCcoQzDitFFHIRFoT24ZtIAQg+uXq8TuXVhqzVmmOZKMOOtZUJKGapAMq6tJoZeoCUvA+HAmTwj8lEYSREUZdnrZ5lKscz73vzy3v3DzuxvvUTs2LvH6zbp+PQSfOobJ+7Y9/Rg4L0NOMDKQma89zwnWrPOIHKOknMChuuTcEMOiDEpW83GkRX2839/4vTaoGeV15JDMDMxttJeEoyMXTe+efIeAKIkvuraq3959y/Ie+8BPJLXW3c2BkzpPNBasNJZI1BUPFgP3pYDJJm7iW/e1f/aD7MyiwVzDnF+2fzvz54YpMnN128NeN9B5tBiDsMe5Lp2bDZ/ZqHs5+nNF6qd04HTJhv6dkfet1/86JF0kIUeDSPvEQnwHHbpXMGSAdC55df6B3AA8LAuNuHrU+l1FQsCrvd9vEPGxfyZ+Sy3utvL86LI9Ctf+1qbFtPj+t577962Z2NTxycPn+VBUGb9Mi89rqPri0BKYy1niIw58utFWyQPCOTXDa8CgaHHivRTG8PpKlxy8cTuCzIVzYUVZYYli8Smzf7XXjp520Orh0/bYcokR0RMIlWLktXVbjooPbD12b73TgjpiNC7semRLTs3Gl14q8mc04RneXd8y4jP2kWZaYvMjjTq4zaGrdummFD/8dE/qu+a8B7GmiN2OBAqMN66SDqnKm70B5+5I3WVf/7Ul/cdXNt/xw8ffeoXgxQpL4LTc3I8Hp8cbti14+g90YFnDoBMWJCv9nLmbCWuDIcZimDfQ8t/+fJ3vOdf35/nS6oyWos3hqP15eOrK8ePjIzX4kp04U0zZjBwyXhvyBzLzxw5+9zXXtaZ646tRqNjQafXH7DKxovPv2yy8dXPfsuG1ate/9zOIssJNlx6cbwRHrp/qTN/yvUMj6H0rtWqhRwyKp/zis22129VJ84ePDm2Wc5sxrK3UK+rjefvHc4f90XkTZoPVzZt22WyRKhNPN7245/+y8y02HtF4/hCd0Ai1eqhh89WKs251e4NL3vla65/x8HTs3loPEQhk92u8EE0Nk5/9od/vGnj+IkzUEsu3HPR3u98+6FI1Rnx5sSWW179xx/79x+zVuMb3/9igVgOinrEPKXoFDnDEIzxQRAEoppUq/1Bmg6HUiAXMsbIlgB1+dp37kwq9Te+65nLbrls9+WTI1dVD596emQ0/qO3/KdnYVakHGWr0t5wnV0rgAHIwFvHisVarxyKSBB3ljhHcsYFYWABnSXJgyzLs54GzSanN6+ujjznOb9zwd6tbp6eOvXIh/76H//q4x//0xsv/+Bvv5qDzPJhUZA3BeNy2M4mxqaSpJr2ekIIIQ1yKaUw2jKA0ljuIR6pb75g49kjs5rvuuMp8Z2rHrSDQW3D5GC1iPiu8fEJaJxZTRfjhE/O1BdOd7smv+TKyw8efmZ+ZdUCLXaLGig9O3/N5XTT+6fOu2pWtri2MTMzkEyF0zN736jh3r9jY5s9lI6hHJsxhquNfSjOIs2CmPCSvOAklJTQS4HLSAlz7logZJ7bWiirCWuvdZCppBJt3LFrwwWXDTtnIuhJcLNHunmabd0+ctPzL7vzzofTYS+JmFHCOFsaKxjlReG9We0PPWNhEMsgVkEE4JUQg0EaVUMC0kY7X3I1XhammjCphCtJ8sAwiWC4sAxpvShNgEFYZwjO9QMZREm1qwdhWANL3cFCq1nP85KAlFC6cIEKqdCd8mzpMiEVabDWxlGUpt04TJIo9uSTSi3vZ0C+niTza4tx3Kw0R/xgmA/yMs854mC1kw5XLt1447DfZ/WKLsooEt6DCKMQKwEPB72lJKkx4EAi73YBTSMJdJ4qhMmxWloE++dmK7Fvr8LIBMST64Q353OroXjFC3f95OARznmvZHMDfkWVK5Y7cIGC/hLMLtbXssrnHu394PhwWBZZkZQ+jm067nNbQseBwVgpKQME5gCp0JqTDEP+swfuetmtt55YOGmsqzXimb3b7r797OnTnbf/5v9++OmFkx19ppdcsmf7XgimNu3afdGG7373a9/69jfGN9e/85PvipGpi897QTS29fR3fv/jn3//sP0VYfs1MXPLVef93YeevP7Gd95yy4s/+Ce3RtCNm/Er3zTWPf7ofY+cacmNYxftOHvseKoNAgyKUpdWKb4+aV7Lc4vYa/cbtSoy5AyzwgIgIHApwIPzTkimtZGBQqJhWnLBjNG/9we3/uvf/PSF11/Qmspkfgg4RKpSEDChnNWWY2G8CGOzkE9vu9JF2/r0g6QSv/4lL7/m2qvHJra94JUv27G15B5XetmXPvbbjz9x8j3/9oPnvPaGz3//sYMZXbJn5o//5O1//s73vvW33rKY5o/edWhyqjp98bbxPVeuzB9YnTu9fhbs2bGlmkQcrEOqj48KlZw6fPbE8aWoxvfu2jQ5ObZ8ZlBYj8zmecFbtW4+WDjaLXSJXNSrVYbWAwQxgtatRrJxcuL+wye0lGQsAjKGyMADekfgSTAmuNDWOvDIiAPjiN57zvg6rASQEQdOgMgBGRNScM4ApGCMMxRKKiGFSiqVkWqFW6hXk3pVpbk7s7g6KPtFai2j3JJAlsQVyURvMDCMNJBQiguhtVWBDKXMjbPGcmQcGRAxZM5bzjl6BCT03pPnIdu6aXJxcaU9KHPiiLWza9XP/7D0aXjxJXZic2ojI6oj1oGgUqjAkzcWkkn1lj94zu1v+a+hpiRMuGKe0DEwni8PfBeFJh9FksgNslVjPee6f/YZ54FzLgmMcYAQhpIIzdAJJx0CMVethd4RQxFFKDhzWnsGw/4yIueCcVBhVC+NZchjFn36X7/TgM17J0b23/XYqcNHdJGSdkGQPffmi0VNaB+AZ9IH3kvv+6pGt758k8bVa/e0lNvvIFla7Y+WDMIKeO49cMYFOqTSQ4ksIyqJMbDSewYOhAw8Q+89MQ88AEQLBoGIAvKCfA6A6CME5rDEMN94MX3gA7v+8wsLX789L/pI4JGInPMAgkutdYRK8YAYhdhuNWUU+mY9bi+VnWFR6EgJVYrAeusZAAFDIsJ1iA+Btt7xdQc7gSdYf18WOVuPk8D7ohggOCUkA+eMF+xcn2KwmIU0CD3lvSIk/caXXlby+r98/UdnO+ZHjy8+cjL86WOr1105vW1a7pEyYIKYH1BG6LlzlVpsdAECCAovGMY1xBHRmmZQ80wJmxItNYqB0N1y7Rjonsn7ut9zq08vzP1QRqNzJ9cSrMyeLvpr2ejmsdb26Rde2rp6x3l3P3Dy0Jny0GwqVdjrFSEHJAAPUnDGOTIstVZS5MaElcBqo1cs1lvR5M5/eM8XWrnYcuWidCedCLhUjp11rfYnv37lb7/ykYMLgoX1JGkyMMZaWQnOzR2cAyQGJDlTQML5lmKV4dk3vPwGpw9UR8b/8H/d8u4P/KSjXdo3HsRVl1z58MP3+xCLQdGaHB+p1YRY+ODfvyEYSpay3hxtHtswPjZSLp9AW7hhyBWkoDdO095N7OGzxlqBknkAQB8qZhwgsnVCKHnPGGeMkQdk6EsruPDeFUYnccSVcJ4xFI6JuYE6ejq6dMChcAtnykefsLmLNXLrPCIoZADeGhNGFecNORfEgdXeGZrcvvPiy245eOTRk2cOcq6USiZGpw4ffwqAeWcXZk955+JYeWCDYUFSvfpX/ujQ/vsOH3n01Imz5Ki90t98wbQIemk6u3HbrrOHD6RFmVSkLguwCIw/54bfD8PGo4/ctrz4NIJFQCWD3DrGvHMeGApE771zXnDutEdG3nnOmfcgeISUeU/OOe8cEARCkCclBSABAwLQpRaMuOTeAefCs5kf3Dd/6Z5mayRz2q32wl88mNy2n51u51GInAM6UioEKStxWBEwM1oBsNoGTLOAobPwP6IiXTolGUckAgfeO5Dcasfve2Tlj96//31vv2CKLZgiD6vBSq9dG63YxlBeEOzefUGhrWROd05X3EJvwY+GjwdjFw9M4ASpsf63f/bWv/iT2z7ztSVmwIMzzodhxRhNLJZhxZuMCWGdkkHd6jYw4ZikYHNrZGNn6XGnUSrk3iaVRvO86xZOPzqzbbrXZ52uVDIKI3RltjhctWQ4pRKymFc0JqE0SpEHIyzNHdkvIEM5TFdZ48rfWV48EISOPJM8NBVBnK2trijIRlX4ml+9aDGfa7hGoEJXpJ2VNvMmrsTB1t1ve+9jZdbCSCPD0haBFI6IIRBHWi8UgCkNkcUA5dD4k061TbCzWffOr3Y8Ah9t8mbgtB6O1tTGjY0z88shic0bIihDzpLZhfLUQjbslbURUQ/SesJHx1R9lI8PXRLWe6l48lC3O5SrawYcMkRgBEIKzvO0MLwT+vjOfSz6Uv9979oYBZkp07DS33vt1HhVr2RxQ7gyv/vPPvDCv/mXs2//l4ffcuP0Jbunmy3RzQcYMb9aIGoJSKZE7qMoLPqWM+YIa5GYH2QjY7WF1WFvpU/E8rxwmjgP2l033+njjrGRsILFYKQufD5UjYp1ljkP2glLA7MYM2zUalwwKYYcTTpcdV54F1frjbJnu0u9Sl3JQGQljCSbwvrelaX51uRYHFcBbbfMjSIie3Kpv3vL5tGk2RvMSpZOTI0ClZV6ZX52eOpM56pbL5g9/tjq8qn3/M5L/dBNzUy3O4eH5fLa/HBqbNqWZRhVkcNIK736io06yx95fPU7333K3rxr8xZuzi4LtHGt1lteTcJzWGujLVjvrImjJAySvMgjFQSCiDyRLwotOSASouCMEVltnCMrRaDC0BkHwARnxjmjdVGWUrAgiCUAKccZuNJxQBEI7/2wn1pLnsFgmEolS+vBeYZsmA0Fo6ygrCykkgGIwpRSCsGBc2FK10tzzhhXIi2Ncd4xrFQjgb6fp4IjI5HmRVqW3nvFmUUUKiw15fmg1CVxDSji2PfT7tLyoi/04eP4zCMmdfFPfnHqsUeWl7ucvGQMkCyht4ScSQ5I3gsORMDQOURCtm60FYwTsJzwbFacfOqUMMJCjRNxdFxRu9d2pL1b9zbSOq7Zk29NjEzNTMRxWBYpEiNyTOCd+9fW2tckfNlAaTSVGhUnrljprTFM+8mv/Xjww9uzohdITto6YJ4cM4595EvPfPmHhy7Y3Ny0oWbII9Gp+fSZkyeGuTOWB5LYSn15izOmWF0zR86kB+d0phXzFsH5c9Q8omc50gCEgPQsUnqdMk0IiEwwcH69B090rnWE6w1YIgAgJDpw5OA73/quT3zuE3EUbG7V9j14TzksKNPA5fJyZ7DWZxBt3XrF4YN3A9fOeiEFIHog8h4EWx/Yelj/8xICAsP1KhNRvnG7/MNf33Lt1bV8dZD19GiFeOqt1oKHw0xzh3u3Kt0Jtjbi1a6rVMZOnF1d6qVnF5bKgownDt55y1EwrgAZF25m8/iGjTNIVgkAEB6C9bOgPxgI7jnwYVnGlWaml5546PH8WGutk6KaL2Fp4bGgiWhK0tZYZ0ExR37/4YW3vOF5s8fmn3z6zL995C+ixs4Xv/Tmpc6s1mfkWG36vNEnjxyNZ0fiQW2l5+NM1+tJt93pad1MwkFubMGJ4aCwPj3+nx98NzNy2/bzwtpobni6fObGm88bHMxOnFkY2b7psX2Pd2wmZ+wgH+y+5pKHzj6NfSYmdotNtZ98//apTVu3x5Pf+eH3a1snp8RF9XgbVDK7urS2nE9s3DB/qn88zYRwYZxs3TMSV4Net2xMjqkLRofHnu74lSuft3mgFsmJWFTIN5cXUmGCWmXn6Fjr1BPf7S51cpNcctElIJr3PPBgNXbMDjbvaZ5/9Y3FITWzefTbt32rNYEPP3ly5eRXzr/yQoeVQdect2PDSMD7xfJ3Pvu3l998RVuXQVi/7sZ3nzxz7LZf3C0ELwdZo9qacsWfvePFaytnbrhp9yVXX+rWjmkznNlQsYqlyykZm5UmqVRGW6NFvxitNUqbqzAwTijDgRXO9586Pr+23Ck6zW/+6Onfe+feQ0cenDux0ownzixU6s0oDLVz/jde0wDeERpsrqiiVTx6+Kks74pCGvLekgkCKQUvC89l6JG3RqvVph+dGj15YunWN79p1/Ybn7jzl7MnHjs9u4JgJrZW/u4f/vdff+SfXv36W77zrTtkEBZpbo2pJOqlv/qS229/zLogCJSIRW5KIXia556s4oIQLHkl8MRs2/g4qleHQ5ZUNzgYVoMmNeacyw8tPlX60nvKO17yuCjyHKC9sJBwIJ9t2T42HJRb2Npvv3HTpqmnxnYoOaaGgy7DGVltZm2H6Hy8YfMr/zLTK4CO+VUyQ+4zHo2W6Vko5jAYQxPYrIAqysLse9C2Rlrd/jn3U1IJhadQsbxMR2q1LPdLK+1tz7ns8IGnEt0vex3TTwel66RlwGGklmzaseWpp04P1jrJxlFjPTLmmEnqEdmwtK420sr7a2l3uTU+HsXS5jYJYsmAkBxHzsIrLnrxM4cfyPsLYYVZctYUQaBKXTBiTAillNXWe+e9AQDywgMfZr0orjDixMxIa4MDyyMBzjmrgZgIQ+TK5IYDIw+FKSIVkmBJtYLEhnma1Ea8F458afNEqHp9dHFpIZAqqdQH3UHez5iETnfQXhsqTEKZ5/0cwS/1DkdxPUrq4K32XiQVMLIejehyLanH/d6yL7HSaJX9NQ2sXxTW54MCRlrgDWDRYRy8UYgVVWlt3ikBDitO2rDb79PX7BxtNfJSe4qg1pz6P/+yQEFUluWSFqWTjEsOww21/GPvTaa3gLfND/3f2a8+AloD8Nh4CsOo9NTpawQ4cOrwpRftOnBkZXZxeOLo3PZdlz1ydNX21ctuufyZMye+8pWvjbzlVb/yojf82yc+cfLItos2Pe/x+RO3/+yHV151/b7D/Mk7vvfXf/u7O5/7Apt/T9ljisl86INQfvkT73r1e77xk+9//0U37X3pLaOlLXNwb/6z33zJUfPOv/3R4UXULOaxb9ZFlqVpqSMEImetsd4xhkGQDAtdmjIIIimD0mgAIgfaGQ+CnHfeeWYFA6GYUEpL9acfu21DXX3y88vvfPnYb70qTFlBbFha6HSGETDDa5i08kysDcRd3zX37n8UgyqWZropH9t357U3TH/wzz7ylR/8UzHoNbdVv/idnwRr8PJXPefQ3MHLbrycnjjZOXX6/3zoH8enJ+7a9+jo2MzBw4ebm3Zcd2P1qfv3NUdEMTjXML3y6kuVELVE9NtdJkRZFGrLpM04yLDXE6boK690rxPHvhZEy93V1bWulIwL2RqpxohbN28xDk6fWSPnGlWZ50OpeITgOEOCIjfWgRAEggNw8ODBA0dniQiQc08EjFtHjDm/7rdaV6NxyQQXKJDxMFBCyDAKgzAKuOIoWo1kZmKslsRkfWusPsjK+vj0Ure9tLKUDXqmyNLBMM1WJlutC3dOHj57dqAtRwGITKgwiI3Ph1mJgMZYlJwx5qyDdR8XMgICxhlnQSWaGduqUFiz0B8WnpAzcWrZffbbZ36bxq+rFLVpVg7ngyTiatINyGnsLLVFK91zyZ4//OPnfuAf9xXoUYPRthJy5DhMh0Q+DASC06UpnVOBstpZWxgNjDNrDREBoHMOUKym9tUv/r0nD/xitXMArOPIvCfyZB1Vak2d50DgvfPIuPDGFCjVyNjoYNB/yQ3X46D+2je94mvf/4+if0ai8YGYHitbG0Ao74EQUkTgHDg4AsFwJYHTEWxqjDVnTy6lA+uZEOstK2DMA4ICQEABLkDnPWkOGqUihuAs4wiCI3jAwpNDdEACKED0nHly1luNIiKPHkII7fS2+u++efLA0X0PHrQeBDAiB5wxJAKGUkpOfsMm/psv3nrNNdsbI0xiaXO1urR2/50nf3rvwrHVMDeRBWk8FdZyhsCQIXgPQkii9e0SMVz/MwIyBABO4Mivv0YlcggkBMdnB2g7d8305/qMx/XROnNDky20puHDv/+C5UL+9Sd/utItD/bMQ/v2b5yAV1w7es3lm7ZuTEZGK4DWO7C55rqwWRrUx1hodfekC+rIEwvSM2AggU1C0LJRyRp7iKmITwQoEeNEKEDfNCt+5fjIiXb36Kk8TdvHTsZBr1w4dM306PZ6PfH56Uxp6x1DbSwgECITAgE5JyEFQ7TOMgLncTjsH2oXq/PDS6p0q44DSjMcIAUKMmdTXj/5mW9t+be/Ov31e+3QhDJKSlJhwIZ5yYVkjAisDEUSCvBUi6OLz9/wkss3oF8GTWXBr7py70VbHrznaDnaqjlWf9mL3/TwI3dpk4ci8FY/9djjI824MdqCIH/HX//VX//vj/dF97HDp15y1XbyrLQaQ09BmARJlHgpUSEZ1GOtETK8v9pnQpLzyAgQcJ3IQA4BEZmKFGOIiqlQAjFbGMaYNaVQ2NPB9+4vKrWo1uCnjjTuf3RodEjMIznOkQHTWpeYSRkBIBCz1ntPCPbMqadazVHue0mMJkXtymOnD5Y680QEhN7WqwkgCyXzWhe6+PFPP+FzQ1CG1SBPi2xoLtl943R9z9e/9k9cBsZZAigMEJFQAoGefPRrw26RDwaywh1RqrOa4EJyhsJ558gj5wAopBCBMtoK72UgyrJEAu1K5MCACcYtecYlEeMCnTXWe+cdnHMiCWQ8zVJBDJAdXKn/y5d7N18/UVX08H533+NlJ2cqVJa8MXb9FUbh3Ug12DVdaTZ4WoKMayoMTVGUz64NzkVFSkpvPXIEBCE4cUL0npF18t6HTr/96UO/+eYdF58vLtjLVhcpKwYba8yWKWMxZmT6rWZjMyU7R+tHwYbp3EEIKxCFZXdJJuJPP7jniSMLTzyVcQBGZDPHhdIGtXaSmLdoiKBI0TsesEgUrnjCSAUiETDlTeZgOR1mN91ww5lP3dNemNt9xSXDfNPC2ZO9xbM8CnTJs057VOahZN2l2fNf+HsnHvqMy1e8N2D9cOU4+EzF8c7n33j44A+kSNJhXyqpNVoot83IF13ffM1Lb5hQWG8tAaWP3X/qedfsZODCOF457T728YXPPnY0XRuLYsFYiegIUHJJ1ntdAiITwpBTSoJHCJg1+UhdNipu2zRgMbTaT0xUxlrRkbNrY2NVWY8LbY7PDtCqtWVTD8aOnpxv58vaeS6DKGHbttSDyNVrvN1edr5Zr4ej48mGvdteXVTmjwz+8V9u73Q8nMP3WSLPuddsKAPWs/Uv3X7q1ut233jzSFA13mScD3/tV678P/9+4FdedP67Xh/WGnN/+oG3fPKGL/77D+c/fNmt7syBqIY6G46NbdVlVFgo8zXGvDd5qb0QEYLorOk0w0ZNhGR5Q8ioYoGlvTIfFrUA26k7fmJ5Yak305L1AKankulWAh466aCaVAJVkSKqCra21kdv42pkdWFzBlBpjET1iWA1J0tZUA2iOGklk+maWeutBjErsuWnnnpmbeV0Uq9t3Ljp7OGF6Wrc764Me2cqFe7JNEZbYHi1GlUSGJt0M9u294e0eaJSZp20jMNaP4zl/Mrx6endgZCDckilkVIWqUkkXHbxVKdTLiwM73v06Lbt12fdPrgCOURRnFSiczNMawMlpKwCkS4zDoTrs3lvjbOlLRkPJLIoiIC8sZa840IYZ5BACsmFQPCGnHdOCB4qSd4bbTx4Yrwscm8p8IGxrtTaOBcmIRfCaMMYMiY8gnVaBco4r8KQcU6MHIK3xLXvF2lV8jBU1hpPFgBQyiSUnAMicsmE4N6CdQ6ZFwIG2dARB5JF4fJiGPhs42jcbE48cfDR//r+3YWJybPv3tb95h0PGEMMBJJEhpwbgP82jjJH7lxkQrRO0UEG5B0yxiFgngOVYeyaI1w72et6u2bDiIPwwKFWD9Lcpj1DDgkACRAZeqrXKpw8WLu+4FpXzpekPvCZg7/7ourIiANunLAec6ddVopOOvL5767cc28GhjOwjDkmmAMPRNYxILbYwZVuF57qG0MoSFt7DnEIzpT+5w+vPPAUdwhZQdqhRw5kObh1GtCzzGg6d5Tn0iL47/AIgQGQd250ZDzNh3mRExDSepkI1zdo6xRqT4Bg//2LnwgFB6BhUVbUWCQxU+61v/s799722HQrnz958vixh4o84+d+LDHOnDdMcPDeecsAPa4LWz2RR+TobV3hRdPy7W+5eHqyOzx+vN2N9z1jOl2bWl8LFAASYeFx2GsXhjoZDkrKfZpmNi+dMZ6QuMRQYLVScY4xVP1uNj5Rv2jvJuco7xsRqiCJK+G5qCiqhv1uicDJtJGVmpEKktXlcuFUtzEufAStaqJAttdWufWldopD3k0XZe8nP3zQDbJuBrJezdPuku284rduOb304JGnH3v0iUObd7R8+3iFjz//msuWnznV766O1xJGiIEYZiYrcyFgw1Ry5fapW27c88yTJ7detGPbldfd+bVvH3/80WOP3YtDTHMvxurVjZEeK0an66YvZ7FXnYqxbhd7x/NlceGlrXrTDNPH+yunRrPNey65IRymcTOqbIz43OxFl+/4v//5uee/5hWHH/7FqRPLF1w4OndypdWoXHXrxfcdu8/lxUDbXedNSR3FzUrVjiQ4uXL8YKMBFej2l3UlboS+XhvZ0gord33jS1mvP95U8dSYZvXectJ0Y3Nn5yanI1heLMuTh+cOdapRt9cZa+04cUbFVItHJ7ZtF7OLw2E7UWX99OP7vvDVLyVJZffOC86cPl2tiphwtLd26SU1RcWp40fT/pnu2unRWmS5UJI5C0xIre3aSifrD8fGmomKmbZNx3ZWy/Ofw171njGS3S1m69vV9De/s/bLnx574+9smwlb//a528ZG425ZGA8EGHprV8GZEFnsaRBCc98dxwbDsaDpC2tlnBSZE1wpWb/+1tetLLRPHdonQii1+vU3ve01v/qaf/7Qvz76sztf+Csvfsv73vEn73i9cJ6x4oEff2dDOJYPSsHCIEDFhQw5imhqrFqtbT58YF/azxlw573OiyiMlFKeoFqLqnE0NHbHnt3dTqfjV8LQyBob6CUeO2uMkoJyYAEXMrBQTG1qFiW2O5nOh2Ei3Up3pGTX7ymuebHyIdlc29UBrLiwuT8bnA3UpRRZZH0OHXCrPBljsOayDvLQUwOw7iyyBIlJVmn6tLdyp/7qf3GTlnSuVATGuliF6XDABOPApGTVeh0w9JTlxbBSD+YXlhcXB2sliAimW5XnPe85Dz9+Yv9TRy5OWh4Jw0ibvF6r1muTS5155m2gJLDY5M4zIk9ap6U2nAspA+v8gRO3aTdManIdo+sJHTohAyVknmfIjDMGGU/zYSS4kBIZCQgCUtbkTg84rzEVWOOqtUQPBsC4sVSWRcCBIROh9AAIoihTwTk4ChDBeu1zYFIFgIIlzZFKNnDOMR4yOQhDMt6UPL/iphdkXAALwLskCTxlSlUEjw1pAhuEFYd+bWWhUuNJvWZsEQbhwnK3yDwY7YX0UnZsqeIpdJ2i66rj1nGJvrK2yPc9eqrBuS4dea9C8dgBfdNlIblifi35r1+2CUdys4bMgAMOCA62s/y2z26a2Ljie+XiyfRDvwHv+zX4+Ffh649m3ZwLGakAmKAklMefOWGBavXalt1Tg+Xuyf5CkATddkp3L8lAbhhPbv/F3YGYrCd792x/zve/97m2MdbU185IOrHw3jds3D7+FUtnyqELdZW0Z2LFZEtbLrTf/4/n/cUffPmHn/jYy699D5EzRe6Jonq9xrwbDM7fsvX04mIuiuZkExh60NZ4JlAXZbUacfRAEAeKPJVae+8kF4BOrSNAEBlJBuQ8WO2cIwBz6Z5g0/SW2+6wGHq1rVA18Musc9yPNqLVQ+brvwi+/2inta21kqZPH53dvmPDxi1brrls4+HH7zty8nRzsjXeaLzs+svueuihlTNrm8Y2Hz322L1PPKk4Def0eL0Scn7+pu22dKl2Z48eu/nmvZddudkVS5inqz2ojk2snwUbJpvDvpGA1UqlyEsqytFGfNkl22SQPH3klFBVD+kl117A056M1O13nyzKYRgpUzip9bbJyb07ty+u9RgFnf4gCoHQRkoQIDIeBKiVzMuChVDk1jsmOEPJjfMM0TqHjAGgdw6BCIAxxhGRIXkKleIoQhHFzWYUyEDFSgVAaqxWaY3Vt25subIMpGg2alww7WGD9ydOJ+h4Uamkw34QhO12Z3HQy12xffN0uzOcXVyN4tBoX5SF0SUSAQMlGAIQec4Z0TohZf09L48EOrMnzpwtTea8X6cpatJgWV+Gdz/e3bIh4ZCquiXJ7NCfeXzu2Knj5116NaiyRydecNPE57+sTp0xQqgoQEDPGUPx7OodPHJCT947xrgDkIopJdCTR++sBU/EXJKwE6fusPlKyNETyUA4QKcxrFVRhsJ6xn1prdXOeSAzVBh1Vpc8soOL3b07Z/7hc3+/7/77jCsJoCiGV144JluJA87JA8vJMyDhLSMM0tT1NI83XgpuyfmlQPQVY+QcgEKU5IUDg054Q8cOtVc65abtcUO0ZdWwOGbcM+YZEKAk5IQE3oElBEDICFNkyNV6TZsxj0AcEBoN8ZxL4yeOdksNHsBxEOjREznCwlYlv3hk8OuvHzfBivOAYKApkym37dLpl7+p/tDPVn/88+X9p9WKCYhFzhKQZZwBGY4MEY1zDDl4vz5DQyBnPOPAET0QePQeEFEid86vnwVzJ+ainBHRnr1bFo5nq908kF6na6NR640vvPrBxw7mg2zm4l0n5ufufKR7+6NP7N7ZvGBXZe+OkQ0bR2VdauqHCZrefAhtwbnr9VVjxPoFHtahHBIR6YxJTbJqaANx6RyoQFGBksdOj7Jka/2SsYnLU5MuZnOPseUzs/TUcG0t766dJzRhk7fqA+dWOkOrtVBcW8+EJEfOG+CMiJCBZFTaQinoEO637Ks/OvPu5yV+1DowPE0VRa5c4mz1N94O11+V3/GL/NBq53Sn6sOYUzgsDEMeBAK9d9a1kmSjkHT08Zv++DeWV0+71EJSqrDx+29/2b6//Eba7YpAf/ZLH60kSVoYlQSjU6OrT3Xe/LtvueXmy4/f89jg+KkLt+/9xvfvDEZvyAZJEHgvtfVFvuyO7D+1ssZtDgytCvV/fOzDw474X+96d26NdYYLtt6jQYbrdjpvXKhUXIvmlpcVkZAgZQTOg7eCS0I5tyr+6RtlCgJKQBc75jhz4L33SAiM83U0esArg27HOeMdScWyYfeeu75Xq4WFRw4sK0nrwpEHICGlM856YuizrIhCXq8Fg85KFEfW2KxvkWEYqu9/9x9Xl1djZQ4/8RAjX6nEBNzlmbeWM8zWVhhRFHqGaBHrlQaR90DOWsYYAxCSa+skh0BKcKR1KRRygQhApKUQ3nrrPBfMk/PeIHDrPIAjACn4eiOJAZNKcO61K8Mo2j9nD/wgb8XRUpdrJ1hMgJ4xEp4DMWeJkZmOYaQiOmmGLDJF4QnAOVvm/yMqImDIQShujXEewIPgUBonwtAxcapr/+Yzp/bOpB/4vU0bRkbbK2vTUyrwRTielChc15SlDaqbGAtstubyuZDlspXoii21qTaKd//J5je8+QRnIePr6xwJBonHyAomWYTcGCNUpCJlsj7L0tIJsNHVz71FJzMP3/nPEoZBfuC1b/yVe77/5eWjzyAfjLcmBxT5Wtz3JUTpphqlq6sTjbFAxmYt464Ik4Bib533hliJdY6+gjmLuA+p7JT9gx/9yEuvf45qJalJT6DNwXe7uvmpb9vP/9f8cy9h114ezhUbP3PHKgXVRpMNsoG2BoEjee1zgeC5EkGk9ZDLIAgrukytdyrC1ogar7KxBteF9N6TcNbYVk3WK8LmllOwutzZvmXrWpbe+fjZ0tkwYDu3VXfuHVdZqouiMT4uuKtUZbM1FcgKyKjTFjrTI1V4zc07PvOtI1pLxhiiM6SFCGLpjBtg0Agg+vinD23YcNPMzJmolbMwu/UFN/7Dx47OzfYCa7wmBfXJRm12dv73/vQH7/r1K37jDS/45W1PfOgDP+uupW/6rVc/96p6XqxaXfSz3vT4bhfItHC2TAZdzYFxxUaqIrNOeKmLrDYZBm290vVlzzbqGDR8YdZyXQGsq3C8Up8JgkpeeiWxWQ9ya2r1uLuaeillAI0K9RdOr8wuEWIUVhvRmDcxgJ6enHT5yV6nffbM4UpcdWmerjmyPgmw3Vlz1kfBxMpqvjo3aDSD1Lr5uV4cNHbu2fWpT31pQtkLLrvqmRMp5adbYyMOiOly8ezTMmj2+mmtEYf1ikiIArruyon9+3HfgeUf//ChG6/fHkQ6H3ZVA5e75+4PhODWOcYFR7TWgiMgn2cDQAQUIZPMnTORFWVBQHEgkbFhOuRCcsEcWa2188Qls8YZEuCMdtpbzzknBiLiBOS89QykUgaIe5OmWRCEHCHPCh4FlsMwL521UqkwCHkUcSIuuFKq9KiLvCgL68lZEhzJQd5Lc4K4FinuyTrJueWcAWlU3uGwoELncTA3iqsb44lPfvuJxw+fBRPQugHWc6ECQMfQOnJE3j1Ld0Zkjiw7R+dB+n9jFALGkAeeOHk7XtXv/fNfff41Wxh0Pv2FX/7HV49kZWydR+t3jU8RxyeffBJp3RJG6yweJqUBL6Tw3jNCROQeTC7uuP1sA1qveGEzDFYpCKQ33SEM9NRPfr76wH0ZaoGoPbnCApPCr3eIiRiCM2Q54x6JwBvP19fpRITgEXMHZQa0Lk8FQu8AwcGz67H1+g7AujwD8FljxrOG1fUHAqLggjH+bHiEhERAiOfY0+A9EnjtDJWWMWecsdqkbNNEfVDYH3z1a77ji6rwnmxhojgp86H3HpE5YxDBOc8ZxBIVc93SaZDkGAIwBsBICnCefvDTg0La+UWaXyvXCpkXRgNDpzknWH9eFSAFGg+BQO+9J5IMw2qYRLKaBDPTI3luF1e7S+1sZvvEzKZRZ0AxTEabLFTDbpqmz/YpQp4WPgySQb9Y7q4Rg4ji2WOLG2cmNbEiN8O5dlCV5LUHJyMpBYaVMIrl6ePLeenjyclXvul10fQWD72V7lrrss3La8cOP3O2LK1fS6+4sH7l5S/Yd6ZDwx4TbDQKUuuNYE6ir+h3fvDtfPV0b225uWuSQnb29NyT9z+aO+mZ9IIaW5ITKx2j/URDXnD5heapw0vL8xo8SJzrDLwY2boxWThxcqJ14eatu371FX9/6M7HsM5FZUQ0emPZ0uH991xzw9bG+Nr4eKUSSYLizMlsYoPaf+DQxNaxs8P5ma0T3dVjWPedjOUeVNSMCLlLu6tPbb/iJXnvwt7JcjCUI63Jn3z5K1PjtYXe2on7T978/GtWnjmc2sWTxZGbXrphy6DsDzJRH0vGkmo7HJucXFvsLi0v8l43N3m7XWTlSLFq9t/5pDBUStYmfeOLXvXEU3dWOc7OHndM+dW1LC8uu/H8/+fNv/2Dr/8oTbOyNw+IZeqKbhG3YmMoz0pTFEnRiW3/7z79wuiyJd9Y41gTWhVny7NHz9SiKfKbtsxsuffuL1UqU46gWk+KNNuxbdoMDsej0guOUjntwlChcAA+iqWMpSAJXHmUt956a71a/Zt3//GQ0te95bW3fem2/srx+f0HL9ixIe6apSfnW7XRcrgsXfmLL37j6isuG2lOD8tuURiGXJG+/YffEZ5FAvLUguCcM0tUrdSNNa60AQ8qQUX3ciUwKtpvfttr/+wj/yxRauvHW635M3M6K+IoqjdH+2mfAQ3TXrM+jZxkQ104aW68ojWjV0898NTN5wt35m6/rU5je4d93pgx6bH7I+jY/O5g9Dxf5NnSySCUsnqZ8xm5QV6MRrwlxBTKAbIlk7atySOs/fjR3tAmlQYOltefAIExjkKCDK0ts6J0xk+NNDZvaa6uZJLX58+ciZNwelPYXeieWhzOnnjgudee/5u/8aL//NL3rhTXySjMHcVJXA6GHb3gbG6BC8lkFHMe5YM+eC9jFfA4HWaMEaIt064EtETkLCcehnFeDJCJEixydOCZ4uRBOJQ8cGCZ8GVp89IJIbmKOAA5F0n0ZQbeewNRkjAkcJY041hRwhV5x+gcVMgATenDCtfWeMRABaU2QsqkMmGLcrDWHnRWA+4H7d7Kysqll720PRhEAgXaMIjl6NQgzUyeW9K66PEKebRRre5sARbB2tLlSS0JJG+vdg8dnU0ze0bDbb8sdk7i+ZfHLCLGkNeqvLbhs9+9w4C0ThN3vYLd8chg9+ZWc6T2yyeGh8+iExlZ7REMByIKvf+Vi8XGKXrsmdybRmKc5MPRcXr//wNvG1T+4VPD7zw+tJlSQuYOstIqHrjSnplf0dpKHvzaK9/wy18+0NXp4qmlJBRYDj7f//bO7ZugGh7LV1/7hhcf/cFDB+f2f+4L743P/BX2TgdARp5nTcL5QDLySaMsD7R2rH36m7/+9D0Pe3MoCaomZFkvYbXtt7z6N+/7h8+fWFxa7ayRIN/RI9VGu7vinU+qUc4IGXYHWaMWWO85Y5mxSkrnUZOrRXGhSykYIiIKk+VCcgSMFL3tdduLsvqfPz148xsvgo1PggViUK9BVC1b2/218eb/eOSZWn0kTAYTloKWOLm6/PgX9r/qlgvZmfyB+44F1H/5qy6968GgMrVhcuP29JnHX3pRNLTw5EJxZq3fz/O0Wbtux2437DTdYPe21urc/orYGI4k45defPfPfnquYdrNypQV6MNYjW6oR1V1/MiJQTrspJmhmi5FdbzmI5+QWFycCwK/aWKEuOCMzZ2cL4GtdHqo7faZsdMCuRJDnQFHcH5qdPzC83ecnV3s9vtZWfqAWTLkDCG1+0PBQTChCyuE0I6kFA6YM1YIhohBKCMmR+q17Tu2brtod8h5d7GbG5/E0Wi1IiUfaUSMN2rNGhcoCTqdQveHU6NNIbGX9jvtXq1eE0liXT5/epZ5hw4qkWIMnbPAZTWOCmusBy6494AA66YkxpCQkfdcMAQsSre8tuK89UC1ejjMSvBQWNMu3ZOz9lNfar/u+pnzL07UoLbv3uNrveH49svqGy6MJ7prs0eyzqlto7S2FmgXOJeD99ZZBiAk08ZZ4ogEDB2gMaW1TIRKEITVOIyCYZoWwxQZAseF+WPPue6Ke+9ZVioIVFKUfUKQXAFFIDJtPBELIqGEsMbrsowlJ+QP7nvs3nsebEQulGi1dxxbTbZjkyfqMTblfLB+M75O0CUozxxZ8Hanwa3M08LC2WqNWIiABMiBRdYxD03rm4888vQH//qHK307OV5971tfJ+Boo0XnXdjiNQ3KgBXkOBAi84SW+R7KDBW60jNQ6KxA6b333hEPRUzn7641q4OeYwRklSgoZ5pJFlmNLKBXvHwDxgMIJl0+wThD5jwD4lkc2Vt+e+slN8z//Idnvn37wuKgmeVhaVVu1+27gEjnSKCEDJngjABAAjLGEJkn8g48OPKg3bOlIlAqSqKwLLLl2YW8j3qotm/buf/M0910dhTFtTtGOh3z+l97Hobq89+67Uf3HH70sLn3wNmJ6MzubfUbL5+6/uoNYQzOmnJ5ICKmuPBzbRRC1seBMwJvzQBNJqKCQQ6slKppi6eYKZioCiKMdxcuKswIRDPR7t1u24nNl19Hy0dGf3FvvG/WH+eD1bgkSMjHlRrTag2yUhtGyDkSADlfeo8A3vjSM0Lmffy9R+CWny9f8opYV/ooLZSB03GR90QNZi6Adz2HP/xj98hjHhnce6jb5qpj0SlZGqeYYlneGBU3XLwVpib4cC5bWAhrde/7l115USP4WtYnx+n0qTNJIohcMSw7i6vTzZHvfeFLP//SZzvLWVSt8Lr4xnc/+oNvvndz5bnDzsJoy22+Yoob8bH/+9iZtTCIuSMqncW8OxG3TL/nA4UIiBwZeO8ZoODcOc8lIw/DQfnG337Llbsu/tsPf8DkeSAl51g6xxwx4GgiZjyRBzSIzpJjnDNEZx3n3HvKij5jSkrGOJpCM8HRMxUzDBj0Ss+DqbGJ+ZU59OAdeQ8IVGSZZMKYUsgEDUtzG0YsCEQ2yIkhR9leXETkIDgSOSAzTBv1ZkrkSoeBHAzKSiNwBoy2njAJKt6bshiCA8EZISIwBt47Rgatds65UETOE3oIZTBMh+QIADjnHDFQqixKAu+8B2SMK/IlAThvkBEyDIgMeePDClaXu0NjrWMWpMpTW6kFxoIUijvYPDY1PRr1Ov0aqkTSoLvGa3UiLRX7H1ERYxyQO2sB0HvPkAPjQYgesHRGBIIR378S/uFHzcuu2n3P0QObx/SVu8VLXjrZGJXxqGY89sqYeh1GRZjY4sRT3XlDkapv2VmUA6WL7VPJ2TmvgIAcL3U13tzLRni0Cq5joQySRpaiIpAVgVYSsFjCPbd//Y4HHvitp//TQTF/7OmPf+5rTx48rnynIc576In9NRJxHEcIf//uv71k24bXvf7V01tf/qUnzQve9P65B+5aXH5QF4uME6IkTwsnHkeZeF2htc6H3nn5C156fdI4pVBTvzRDE1U4etNKxpna/KN7h3ryvB/+y51n2dCNT/pB35earAliobUPgpBs4R05762mgHNEXhbW6SyO6aqLZ6LQe+t6g1JrlySqKEWpqXCs0ysVyEBgdaJehH52aX60XhmfHNm1vcHdWsgyr3xtdMYQDYtydGKX12UnpziKvBdjrbHumcc3tNyureqpI45z4JxZQ857KjMAJNZGHh2ar33+Lvjz390MwsoYJs/beNM1256ebe97cuXCLdVkm/3o37/hbW/7eG+g/+zf7v+zf74LrK8Gyejotqdns/M3951dUVGlEcfp4GyvfdoyLWtjrXqj3T2VFWlaem0tYyqpJWlmvLGVOCg05bkddCwLzDB1cQVrYy3ikWCiFgSD9kkutfHlIC2dZZwJKUxRmG6vbxlElVpnjSmIZCthFbnWmfWmt9xeROk3bN9mBzhI2wMDWTlMRqoVHrGgVadk99aZxcXTldGWCIqNE1tozdWwvPKCDZEUs0tr5zXY/Nm1enNUsmRhdZZYp1Ybz41ORlu6iNzaIIjl5Vdu7gzssZNtUP7WW7bn/XRtoVeZOKeGHaaFFFz7UgrugbReJ44BA5RMWNClK5nDXBfE0HnyzjltHJcECOSMdU4bQwSeeeezsoyCgCsFaEttUCATrN0bWOuklLoojXNDBohcFwV575wNOZiOBQLGuQMY5DkgxkIURSGUkCgHpTEeiKGUQjAmEHJjALz0gjzpPAPOc1t6EqmBCqiW6Feb85fuXanwpd5s96lDy3kRMbBAGohxKZgw4Kx367pTgPW1K0Pn13nOgM8inddHVgAMURJJpnsvfm79I3/3kvpozNxhbrtvefVIZzn67A9KQCmUWGuvbdg0E0qeGwd+PXEhZKhLOzu31G4P0ON/13mElIzFDxx3M7MTl8yM513XG2Qn2uobP34mWyu1ZgxLAA8MiGDH5o1HTpwCIkAB59iF5MACAHm3Dg8iWA9+AJG59aDq3EGci8bXXRjwbI3o3Of/d4gGz7aKwBMyRu3eqvcO18FG5x4Cz1oYgCEiY558aVyoGEce1yZ/87W//6+f/ccd2yeX1vrMw8z0NjIw/8TTMpHAGJccvWPAOGPgIWQ0FeHWplpI86Ndk7p19SsRQEfDw/N0/6IBEGAI0CGVDLwU5y4tjAlABw5LQ0rJdbGwCoRkGEQyDiV6d+rU0mq3KE0ZV6Id28aqtYSQpBSuNOgwjiNTpOvHdPTwyoaJ2rC/ApJFzfH2mpd2apDPauMBBBWeJ0C2ZMxEgfDWeEe69LkpZvZMXfSca6KpXdUNTWqo9pFjh568P4goRv7cK7bXR6rf/cL+7Zs23bv/qVOrWhRiZXVQrQY2y53WGzfVdu4efeKH3z392MHtW6e2XLpr4dABknOXX3/dt777k76hPXum+zp77FT79ZdeXt1MD+1/UnIYnQwoTFVrNDRyca0/GCwr4seO94E1vvidf9yz8UVj8UivV86fPe3yodH5VVdekHbS43lxYN99z3/FVeddMvXUibNXbb1UThYrcyQiuOySix89+FSeDWb7yzu2bGhMjAtcCBuq25+X0RZbLy/aeV1/bm55/lBrszy54JJq3ct05qJtxgaTfk8/oEEuXTVY7GkwtG2mNTtM51aO+ySsjKqF0z3RqmzesPWhbzydOx0oaQdu7sTjc0cel1IktXj7hZtW55acd9rTQwfy58ubLr/u/P0P/+JY8QUOKCRjiCLgU5OTRLiYpn/we6941YsCteHeMjpjlvLKyLilDFqlmqzcf6q87w8f3jYzhvE450EiKSsKY+jAsaXLXwQF5RhwLhQH2PicmcpPhsSENjrvWzBsw7ZRzsXf/unb3//3H/zV33vdw/v3feIDn6kFycrSohgJ/uYDHzhx95Mf+ehfXXD1ecsrZb/jp8675rm/9bbgjs/86HtfZIjkMdc6cMJ43ukPW+Pjc0vz2tkwlIqrPMukVK60oEFKnht94vTpT3/h0xJ9lcugkiRR3GaSJ94RDNM0K0qAgEF1ZdEklVTB0Q+9/8rmhhJKe8ktYYWnbOdkP98qykuFiDIt9cRk0JxnxSkazjpW8gS8NiZ7LB+acHSTLwXwjmN9lIWEHC3QYOrfPn76Fw/XOyYwWc6efUVUOljr5YJBGEQiCbzWp+dXT97x4w2TU9I4tAGhiGrhKIqidOmyv+ehgy9+eQ0srLWXxzduAfRRWM1LKkstuHLOFUUexqFnRJwREnDe1wUi8w5FEIaVSpEOjbXMceONY8Ms60eq4ohVavU0HTqj4ygJkFlXWGe4jCUXzjMuQg/GAkkuBv3O+Ph4b9gBbxmXyMFoK5C3V9txEnGBDKUnw6WKVeLJMc4AEZA7Z52zPKqoSg2cc912qYdc+kuueL5xTnEwOmMMB8PSA0OCIAg4EseKd9qi9xSB44zyKIyMdlEwstieW1nOh2nJhRz68N9vz0Llt92z9u7fvXT75o2DTvOtv/u5bqrCKNw6kZyaX9i1dXMtSR4+tja3kh+fczyQnOXGAQowDgSXpfcX7Jm645tz9/eAAr8nMBOVoJkWlTq0qsOPvAfechje82l99JTzTMRJgMi0N85DWdqSu69+/RsbN0yDD41xPgrikaBQ7OEnju4/ePr882Z+fPvC4TPs7be8Zt+Pv/j8a6L+YKWV1PTafFzdaXmoC6dkglhCvgLxE3tuysrubUTjvno1j6/tpPU/+bu/nBmfPjW7rB3fMDZzzcW3fP/n3918/sZee7a3uhbLMK5EUSjzfGiNc+grAfPeKikjGXNkjLEiN8iYFFSphADQT60Po4984RnBq0k9/o/v3/OhDUmapq7nTamWz2o9hOtem/9V+7y//s9jQYU5552hvZs20Mye1cLsuPp8VM2nHzzxL//09Q4JEuJf/vWT776Rv+KyYmL7xlf96XwahNrxOx45/ODjhwnp2ovPe/XzfvXUoX2yUdWnjp3Y//jGzePnoqLMJ1ESxCo1Ou3btGtPLfS8Y/WgftU1F4KF1LmoGukiL4lX6k2pbZaXutBj0zOsUiudaMShMxlntr3aKQbF1NRYo1L1adDvWaVqGyZam7btCrisjTeeevS2tXa7XgkHhQZkWjujdVkyY4gYU0IxRA5Qi6PxxsiO7Zs3bZrevHWKA54ooD8oJidGGiN1sjaOIwJRahuATMKqkl4I1qxWWOj4oi9ZiaHDFmW5KmpDa3W3PXBcIeONaqS1U0GQxNVemjlCb5yUKDgj5z2tv/eP6AgACXGYlkoxILa+bdJpEQTKOL9WhgdsWH+i+vD+1Q0bvJMB1Mttz93Q14eHCz0meW2GXXN949CCEXJirTtnrfHkGQAi8+SNNoIzxjhKEARX33jjgQNPDbo9IYN0zYswBJYjciGkLspT80uhUsbYzA/JOiE4K8pLzz9PQ/jMydMOAqMtEoTIZMQ5kBe2Uq/21xxHFJJvnq6cWV4J48GOvRtVEgAFRApRoXdIKYqcKX36yOqFF7zN6hBxNA9rcVN7wTwxBAaWPFY9tR7Z1/7Tv/xyVqhuTtdvv3rrzAvf+PYvyVr58f/zv86/qGDlEjEOwAkAKIegBDPUZQ554m0FMFCSAUoQ5Km0YC3znpy3rPTVG296wet//bX/+c1P3v+L+wWLkmq1UR3svmp3qWY1NRAiX1qpPCMGvkJEIMrx88I3/N70pVeY23+0+sATsNgf6Zik8AIZOGuQkCGCYJ7IOfLe4TqTiIAx7r1HBhz5+j98/SxY6fT6mO256IL8mG+1xrtHTty/72E9sLJS33r+5m2KPve5L1m+KkV07XN3Pfr07FrpdYELhs8/o+85dHTXPbPXXzxxxeUbtm7ZIHVWuCGBrTS47bSZQOuAqwhVw+cFYFs4BzjHXM5cqruprCnbn+eVKvCW92DLFIEzOaY3BtNvwW0vfKDyqbPtH3cT3hqOydyascnJx8/0Sm2UUlxw651G5wDAE6BfZzRpozs4+vnv9y++aaOLTnnWp0ha71wGTrPU+HjgRupQZcWmsfDGy3dnUP37z+4/UxIHycOIY5lmS7uufYHlvtDDcrgYxud1lk80kspvvO45f/+xRxBRhCLPS525Sj2MwyCAoNfriqqECeWkH/SH++468/TJXS986z4pfSui19+y5+EDp+58vGoECWYRmfP8r/7+o7Wg6hQBeu8IwCMiR45I3nnyHjljEvulPrXc+/M/ed2nP//pxfnTOtXWklDcAxkyZWGU4uuCMUQg64Fx74kzwTgzxpRZ5nxWq1UBvJBSFxoAOcruUrdZH924cdfc0ilnHDLiCFmRSc6ZUHHcRJMiujQf7NqzpdvupZkmhlIJAMwHOZcADExhCSGMgrTIPCAgeoKgooxx3nnGuHG21AMgEJwhE97b9dsWzmSgwnU0qmBSYqjBeO9Ko9m67Qyw0DoKAyLnjBaCIZFb30MgL00RBrEz3hojBSdjFJegTZ5pC54xYohRHBKB114qNlWPd09VG1WuoKYCyZkbGQmNHzJgI636/4iKEDlyIWSotZVSGVN6T1JKpw3jytvSec0w3H9a227Hje4+cXLuR3et3P3EkX/44K0ybIPSJE9RthZXAmLZ8kL4lx8wZ3N94WVzG7YpaWrv+6d/+u03/k215cv+kHkxsvua6176x7d96q+a+Fh7SU9v3F4Web+9WqlVMQx1Uch6Y0TCD7/5oUvOD7qmcs8DT77vve+nzY1f/cO//Ztf/7Adrr34/e+/7bufHqHKgUdn+7Plq9726n129Mz3/vNNr3rbK176wS9+44tP//wz3Ba9bEiK14k2TjQXhwuFOX3zrVdWx/tFd2jJSeBWVoarPeFQhgWlPc/dhu0bHzymrrzi+bfdf6BdWqk4k4GSynpLzBfWMVELk8A6IwXLh2npsu2bk/O2NEIsfKZlENSayXBoOOempDCQTOLYaHXQ7Ud12nvlhpVOJ0warVCS6eW9lfN2bwUmlRhVI+cVpck6uhGPDvPHXJmhQV92VnomS5dqU/ELb9l2cPaot8xbD6C888aVkkmGg9IYoOZXv3zvG1/wkl0XmnzYD01+yfbJn9997PCpxhXXbBtks1detGtqKjh+sqz4wHHO0I1uGstd9LO77n/VtdeNTI0MB2WpHZDt9brAkqCiBr4va3FzpCqBK6BIqXDQj1Jrc7O2mCoZza2aSpJdubdZ6Cw0gffSaZPnIk5GRSCGeZsForPSJy0M2VCzfpYacCVCWfhYSQOscIMo8DZNB2knCNlMtTHMltNeyVjSy4SKK1G1Ouhq72Cs1ULw5TDvrgxW2v1Xv+Sa2YOzVWt3Tldt0esbC7KiWB0x6A8GBmCkGoEfWKNKDIEUY9Uo0Rzzm2/ZeeTIyh0Pnlpde+Ylt25vNKOkqdbPAimFB3LOSiXzIjfraDHBrXPGZcggDEJHRM4VZZlbJ6OgNDYblogASNVqQp6scxyAIefIEDjjPC/LQV5ackEcGmAevbUOANLCCMG4AsWEBwAuDLAyd4ILRsTIBVJYY0tggjMg3s3yVBsETCpRKMNer9PNCo6gbZlbEzAMJAjA0jgFEPBBM9IXbUuiSlqNV1zZZ5KFYZRqLsg4yRkKQCq1Zow5R4ytR0Lkgdi6CAwBiP03tAfAIzDBpde+pvrvf+9Vr/71vdVwhfwqx5KYrjeyay8S3/ll2SlFoX0nzTZ5Wpe7AwAgrPd7s6yYO7vojGeMrw+9vCfrys3bNq/0Ov/67/uakZQqLopcyLrpSSyNQO289wDAGHk/2twG/CxZC0jeAxGBB2JEfv3nECASARLBuYiI8FyHCAEBCImtB1/rx4Xn1GcEDJkn/yyq6NnACYEIjDXrDyY8V0Za/2YkXNeooSMAkIIhQkEWsfzX7//fK198i2LF8sNPBpHcsfvi+TOnW2Px2mAA3jtChusress5RgFXilgIo4yXSKc6vjDME3Ly65Ay6ciTZeiIiCMxjsjQn1tvA2NI5EXAGPMEIBSTAhlDY/SatumwTEvnONswVt28dYKjzYbDsBJ7B9V6td/rBpEKgnPELhxSbWdlmA2HpbGmKXjsaXjjyy74/MfvH21O12uJEirVaVKJGHPlmg5RFtZ0h7DxigviSzZ1h2XuVobzg+NPPuSzxSNPtZuNys4t4YkDJ259/hadLk/s3Pur7/r1j/3xB0frdcZKizA0lA3Lg0+cnErU1IYN3WF+z/f3xY3khpfedO9tdwdV1V3qzfty73MveP0LtgfjwxPtYzJRNWYLY4DptMN8zoFJE0vD5VIozp44VFftWjpy+fY/0U/f3tVmemxs376zx/c/fd3zXnzZrRdvvezC2YWnGmN879jYLx68/daXXbVhZgO5YvH0SSKqNaYKa43VipeeZFGyCES+PL94Kr3u/MY3/+Pf+6YoF9Lde0fM+eOTF0y3FztJZbxRCRaK07nJRpt1JlFXxYGi3VkphnponXP9Iddh1s2PHXx6sFxAgWVprNaoJPO+8KVLveNgvQgqQWYg77Y/8Kd/8Ouv/8u3vel37v7Rv8WyzhVzhnrdYRLGwIUIku/d9fjunVs3x8PmmIgnqzlkHCjZ1pzPS9nHwuAylDPjo2XmnDFmaAPgq5kDJYOGyVkO4ADcEw8vqHgiDpgXCeeR5KpM9chEo7G19rVPf/28iy/qLKajrUZrRD25r3vltsv/5s8//rxrLoqmJy6++fljM2/41Ic+t/fyixgNp8Z3CkLGmYhUb6j7/aJVryvBlvs9A856r53JRRlUImvc6Oj4wJqRserq0mKzUhn0/LaRmeX55Syz1rJ6o54W/TzXgnzMZCVMrPbNcHD5eYsvf82koqeImj6o1XbvIjqbQ47RFu9HWNFlinO1BYhk2Nb9lNW2RcoNTh2BdmgzQ1FfBVU0Q8asc2V2Up88NXny9M5vfXMBRyaYyIwBwc/dHuQlSgAH5B0NU42AmkLdgb5Iq4IxqQb9QVn2NYEpdV64YuAev//gS2+6pKCUvFWctE4FZ6b0tWa9LFMPVqn60uLS2GiDkJiKyqKdREESRmWWkgPnPEcehVFvOO+JVSv1dJjrLOXcOkpqzdFuu5/UEigLJRV4xhnz3hude8S8KEPvo6DinGWShSJIO2050lCRslnZaI6CMw40U9HS6nykKpWkxqQyBpwumGIejGAckTtCWxbjm6dLq+b3Le7cuKvMOlKypFbp9YbDwUApxZnLC1QBq0SNbneJSWw0N+b5gDtry/bq6izGWXO8GsY7fv7UySz3gntA9Fo+ccL84EfZrt3iU1//3DBnjdFaPy3n2v3te/b+9m//4V0/v9O7xdUj99WrYbs9FGL96ZcxJOcdIX/Hl89++HXsmk18KPsjEVQTYAGgkqU3oOGKy+CTf8J+7f1ufpWHyIDRME3jShwEIsuK7kovS/MoiKMkjCpJOhxGQckQnPMLayk/dpb1Bpsr+YsvvvIX3/3KzW+92KzMSlYCHHXGBUnCqDB9I2LJfSbCOBipDg7tP/nwkbPD1Y9+a3amOj3sUzWsTG+Y7KX5/qN3bts6vXnLhnklo6QRKtZpd0NJcTLa7Q4YeuNNt9e36CIBRVlY7xhHxtGSK4wViFOTI51ueei4lgFVPHzqP1b/7BXbG63FzKaB4t2yRqzfPfDMm97xwue88tXX3/qXb371zbe84Lkzraljz6zJTa2Pf/QLs4uH9HAgAqpVoqVV/coXb9469eRTD3rlDqUr4ZJhQewUehHIbqrvOXDiyLv+P7c856o9G3YGQurl5S0Xbn22VdRTYypmqih07p0vcPPGnSEPputR2e+LOOwP9dFDs6q0rZHpuF6dmWoePrW8tLicWhc0auPjzfGmePqJ05Lx7vJykrQ82WZlElUITEgFtWpYbzU3TM8UJt99wYUrc0tT1gxKL2Qw6Gft1eUi62d5bsiV1gMwweTYeGPz9IZGpVkJGnoIhdFhGDXrjbgWemfqtbrgPM8yKQKykKUlEoWBNM5IkEkSM+qanFkDOi0qSeA0VRpRv3BZWkgpIyHyLGcM641ESpkOC21ysOfUqEQeAazxiFwoxRgXQUCuzEvLkEWhLI3V2hOgdnD/mbU6cxfK4sKL6pnszy7u27ZnS5FJYxygvuG6Lf/33x5Yy8zYWDLaiNu9AXqvvUMApSQ5QkAS/A9+/3evv/Kmn97xva9+/qtBxC65Yu/xU2dA2EFv6BxuOe/8PdfeuO+2L/fXMmPXdenQrLDf/40bEY/f9Wh2fCnvdHQ6JJOXU5MqDLzzdmltJR7hxoiijz1dYzJCNmxtmCQuvA8AQuYFIwPcW/Dd5c6ho8NLro4hAGuChx5Z3L4Jz7+wwmXTamHyCOPxxVO99/3ph7McHEcmeKUa3LPvviVnq67ye+/65996w2VvfctVBCXwTCruHLcFdJZ1QSNH9utTB0/f+sKbR2ZAhGvIgUs0g4HVtcI2CdacLQaDlaNHDrQXOoxQcHJ2SMMeFshLLkUJbIih4WCQKaLIgSBwJKZEPd91dW3Xhe7qnx363k/aD59yXVPXhpBLZEjAnCf0xCVx4OsNc2stoUdg6+13ZPy/o6IDRxdChou9dM8FW1cWuo3JWuEpqDouRJ72sjZceOn1i51eKx/ahf4lU/W7nzkbyMAaMhQYEz5+pDxy7Oy3fjK7Z1P9puecd/UVm1oTNh+sonSMDbmUyFJfGpDEeIJALhsA47YEgcp3liEIUbUdpAAluAEwJOQQJLmdZuF5u27Ai08euf/p/rKpDbhYnD/tBKpK6Aj1OvrYWQfEuSD0nsAb65xHLn7yGG382yf+6AObC9HVcSkj4HUo+14msDIHyz04a8Sw331Bze/ctGvXRnn8YBFUg4B7xmxN5JEYChqGyhU+g0DJqjQwuPGSzZ/Ae5d7Q+K8WgnCOA6Z6BqXTCZ//vH3fOQvPnzm6HwtZGOy9m//8Q3nBtW4wnkwu1J+9PP7lFTIBFJhnWfIBARHjqw0WjmEiopzDS/kbJ3Bwhg67x1YcKA43H33T37zbb86XFhMAr6aAQukCFSelkoJYKSNXodIcCEEF+co5gDeAeecnBOCGevIGMZYrVbX1hWO9lx+1eLx08dPHXLotbGIxDgLw5g8IFel08Zo50mpaHlpmKWpd86TB2QMKUyUIy+E9A4BCYFbbdanB85YZ0OyzlpdadQalSRUYb/bk4pZ46zxHkDBOR6TdZ68I/JFnjLGlJJKyWL92k/cA3IuBOdCce89IoLz3nkiioJQClE4S+RJQBJVrDXAyXI0peecR4IrYMjRF0Xk3K4J3oyGSogk4VEVGWdZNqhypcKw+P8boDkLjBBEKHgAFKBdBWQguJcqjhNTtJ3OHdhmPejydnfxbICCyeDeJ4t3//GPPvPZtzLWQd6BpDpctZ5f/uUH7GPz9w0ys38xa9ZdJZl47ZarN1//rs6JbwOeZBJdZixVks3XjZXLI+H04aeX9t78l0/d8XkejnGx2m/PxmEMpO+676F08eyacxP1bY/f9qCL9Iee+s1wKRAz284cPsUoWZ07ibIG9fNysfG//ur9z73u+nu//eHrnvvrrZ074I5gSsnfetvz59JTv7zv0axX2G7vtk++tVJ9uNC9oIJmmDHmi0yXpuRSmvj4O/64/sDvHP3CF7920e4tP7jjQQksrkrOudVmkOZRWMvSDieMwlBJYYp8OCiQipEanL8lFqIEaxBYqZnxEIcqy7JOp19pVAIpnSnqCQayoHJhsLA4UmmOT6i85BNTu2VUt44zmQx6q0rwgPuFhcdHxkWlGidxrb3UzUrn0NXrle3Kv/HVO773k1P9DiFbv8/kyDkyA55hHMmwcvsB2HbJZMK1LVZvuXXH5+54+OEz/sZUlZ0l4dLXvHTP3//T3ZVEobQceBy6i593wwM/vS3raRWb3Gipgjxz9YmtvRLCaiTsQAYqiGqri30lo0bcsKXW5WBiInac+VJASt2BnZ3rb+EMzWrpNA+rlXhisb2QZ12lrODeFt6gHZueWV480U1T7U3AgkhAJHiedtKy26hWrbar7f5IY4SMDZSMmmFqaKQVMuESdESmTFeaMxsCaaJKmdm8MjkqRuuLZ09FrWpzZNuhp45etHPP3LFHBrn23LbXVgdlr56MV+LqytJKDNXC2Fq12Wg2uu35dnf+gt2T7cXhmYXeL+85edOVW9ZZ+wCw1h9475USvZUhE7IsSyaEISRrvYdQKbBYlIW2FsCBx0zrdYWXNpYLrp0n8lKKOIzKQhNBqY3JM2Ot05YYGOsGWcoBdaGF5N5BST5UnAnurT8X3nDWGfaDIEAEKwXnzGudJKEMIg5lHIXem6Iour1+luXGEZIvdAHMV0IRCM6FDFjRiMy2DUt7toHRK0xpom5Yh5W5UueeMSBPuN64RQJE7xxyJPJw7iIJ3vlndWDrvRYA9EhInosSzxsVf/XnN1z1XFsRx8E4R4px5jwQDlqtQT2B5b5hgXBWHz56dD3KWSf+rH/V73RzrdfLOOuTY+AgGY0360uLsy7LV7IMoc8Zq9aFg9xh6ddBQIjkSTA4cvIZsIQMwdOz4RCdG42tf6JzzSI8FxAhwDqt+hzGGvHcdIyQ8BxsiK1bmteJ3f8NLVr//mejJgREIn8uY0JiyBCBGDFA8kQMCch7HwoZMxgJ/eLRx9tznbGpif4wv+eOX1ScqZjO2Iw6M6uHpXTAlUBvPDFiDIQQ3dxGSiSyrAWgnbcO10GLBMTR8/VfggFb18MwYIKtAxWQAQALFDsXHZG3utSahnmpDQIxGfHNm5o7NrRkKK1FAEhTjVSWWU5I3uqwEq8f7rEjpUB9/uWjGle85MFowKQOW8tv/sNLv/O1E3EpRuKqyfqeNA+xlig7tDzgG3ZMbblkxw8feWBm68adm+SZ/Q/f9Oqdd9x+6Pwtlw7mG3fc9mhdqsYVstMpW4uL+xd+MTk9wotSYezaqfW+CjgSVU6cWLTaRQGPm/Hx9urit79BXkM9eukrLqSqXiueYqHsZumAp62NtWF7aThIi4GOakHAKzKq5uCo7InQXnyhUm5w+OB/3okHlZmOKrUgmjo6t/CCl/6aB/HQXfff+NypWvXC7z36wyXojV7cRKXDmnIlW1nth/WNOvchuO7sQVmtVWvJWrccESZQgRKonH/wwceCRlhv8JHpWpuxwXKHG2eLs4PFvLZxdGpmx9RUcN9jj+nSdbIVUZNbp3D+TM59kmdQLpZZV2RDCCSvCMEkWeA69QhkvOn0O8wyiMNqFLSSsL86/5V/+p2fKGzGI0b7QKrC2fZax1ZsIFVdwPyxzl99ZPb/fuXmqjnizOoQXBS4u79zZrCYbKgLGfCs5xujlKWFdlStRkVa3neo+67qNkvLwjscqn0/oW99RxgB2vugEtRY7eK91/74jp/NrQw27dphhsHLXv7Ct/9/3vaet71nYen0b/3BGxoje4tOVtkSVk5OnD50aO7scePKiihrcfaiW3d991txey01mWaAXPCRyWaz1Tx5/EykBKATgiNj6FkcyiLvWtRZJ5DWb5yZPnp4npncWhPGbKQ5cvDYM0JwAlZq7zXZATKbnT/Tft/f7vEjHtxOgxcYSBgwTk/a4U+TatO6igx9PsyZrDOTuHQNyDmqI6WDVQDOuBCyMqnTIRNGo6TO2D//xdmvPWiD8ElU9aI/6JelChR7dnXggIVxZF2RaZNZHQSBCORcN1XVcOP2mbkzsyqSjUqkl1cVg+nzJhdXw6XBYHDaTE5upkGhuXPk47AiAxyknTCKpFXOFmONhi5KrXMZG6WUMS6FjDOmdcmC0FvdzlaY9IGseMNSO7zy8hsOPLP//POv73Zn47AAWyjFmZBZUSIjZ40AASJIkqq1ZSBrWudZPmAhWG8pLzgyyaTiPDeF84wjr1RbrjDpsCcYgGOxEkpJ8AFDACaYks4Y8E75qFppDTqrOi9ltTYYZKbUjAWNkVpeDD0K45jTJs2yMKAyWypTlyRcVcLcD8erU3mZaQq2TY92T6wQoPMmjEKTwUNn2gfTp3ssxIS7UKD1wFknhW//+KfHnrw3jFjpfZ6mnLOAk7bMGmAcJfcl0QDkJ35hvvwH482xZQvQ8eA59HoGOIsjlhd246T/u98Xv/U3Gr0kdFEUutIgh1o1tJaCQBijkamyKIb9Xq83kMBqIyOri7nL7UXXXvqhT37+3yu1t/7Ke5bOLtcN2rgfJQOvjTMZ4xUmRr2TyKTtIuVx3Npw6fWj/hdrpx47tO3qi5uNyazXac+d3jiT9AdpsxmcOXSwJEMOOAQjjbHC2rgSh1ERSFjrryVxrVKtMPILs2fRObAOvEdgVjuSorvWZ4QYc6U8urTCov/13uNf/NeLpXuy38l5gGEY22FGi/dMj4j9P/lwUYS9ePJzn/3OE/sO96zP0qLQZeFynyFleX+xt5uVmxt+zy149ChdP+1/vICpN1wRomhE0lvfXl76xg+/7Xv0jte/YjSAs6dOrZ8FC4sLeZafQdGYbMo4xGp1ZNPWYpAaZtvtfHVuSUnaedHeGjRPHD9Znxj1lIzU+fzc4iWXXaMEd/3e2trA2NKT2LFjW6DqWBlhQvXTcveFewcrw+W52VDy0TqutQebN4/rXPt+0ZocGxmd7A565NN+e/7kyTPtQS8dFgRMiahebVaSSmu8WWtU+v1UCOSEzqHWxAD6vZTI1mqJQCxNLkNeaTRNxw+HutPN0yxXkWolEyfO9obDIkuLKGA7ztt210NPRlHovFUyjBKZFqVnnAW8Vh1h2hw7c4JBAnDOS70ul0EmiXyel5w5IXgjqfU6bY+gYknkCm3arqRacLpwnSfOtiYwGdHaLY1NV6JK6Fmqxu0LX77lm99fc57KvOSADvy5hIgQiWTAucBt42U5fHB82ozUYgJ5yzWXDdqnG3H4kt+8dXEwvOl5r/jFbY+bYY7guEBvAJxrVtY2bXq4OdHZsB2sj4ZtK2STISWjhnMXBOHyWdHumNMn4Ymn/aMHSmDBNedv4gjkiUrNpUYqiBWe9Yzu7Lv9vtlZL2RV5yztiJNnx46dWRzfoMEPLXLVGPn57ff+6Ac/73UZkwFDwQT/zi/u+J79EXE+GHivRv/t68+kA3rz266vJtZkgyLVB4/jV7/Tueexw66o2p7+yg9/8LnP//EYHgMzWO6Wh09U5haDr36jszYUMmaPPnb/wcOPZqlWHPQw81KFM7v+z2eOXLV7cO0Nu0daiLEGZr2XiAJJkXMIIUIFIIGod/VLZraeV/u7Txw7eCawulo4qdFpzwLOjbXAQBtDhAyIM4GAQCQ5kXcewOpzZuRqazpN8ycPdY4eXaopNdqMhlbsvfD8lbmlPDdORsfOHi3LYu+GVmDtdVdsPjvsP73U98i8odIwJbkOakvGrR0p7j/8UOMr5sLtlZuv3XbtDZsTDgZL08uJDYIqEXY4F1QQUCS4hIh7Qq6EJu2ERrQQlMy3oZiTpcNyWFobX4gv/aORypfTr96R5RTJephmmoPMCkdMeiYYAXjn/bn2PUfgEixZVI3/+mX32Ozpd74ZNl3TIOhaDdoAE+AcrKzA1rH61lZpljo6PCt02QjCXqF1wcjmG6/c0dx5bW8tNaBI1nTHCBVr39myKxlvwGoXaiNJPhwKgTMj9Z0XnPfjux/onlp53rW3jDAaiTHvRKFh+WCx0+9bNIYbrIlBWRoiQPKAhIiEaWpVCN6gs4SIyIABOe89eCDknHEGRBAKHjh+/MH9dcWHwxQ006AZM5Vq1RmHiAYMIQkpnQdYv8khMM4KJdfRY4qHSdhIbZsJNtYaOTE3bx0eP3MoCVh/uR2P1mv1JMtKZDJOKjofWFtamyMgZ9w7V2Rd70hK6ZyzxnogKbi1DlDIQFrrmAwBSqNBSskQGWfOgRSKE2dAZZkBWV1Y4AyQOENAEkoAo6LIPHlEZpwljypU5L3kPFCiyEoAH6iQyDHGrfUevFICgBsizoXzljMSXCqlvHcILk1z76ySHBmHdZ2idzWmrz2/cdWlkfMmjGuMYRCpIGBZn3xprSsCGf2PqIhJEUdV57i2Qas6vab7YTSCXJdFZ5j2q8mIwUE+7FuVD40Jm9XBWl8KIX1039Hihbf+6wf+5JZLrt4y4I2HH17+l499+5JbPp6bxytJpg2NT0+ePtb/xw++9RVv+9u1C/hD3/hkNtSRMjt2V3R6/QNf/f6oFW99zwfzsT1M39Ifnplbmn3r+z7x7W9+c/H0z97xzr898Ms7v/av33jFH/zuM/d/1aRn06WuisYXzy65/oWbp7c9feq4huGjT9+76fiOi29989Za9IX/+mYtOL6/e2L7Nv3Pb71hvHZ6dHMt+42r9z/p7/vJ6Tq7E/2SXhmMT25yknMoiIbRiOj2h8O5s1Kz3U3fZjOrvXC41pOhMp7FgWCuFF6T7keBFIR5v6cl2jLj0m/bWJ+YkFHgOLixsWrecxZCx7VlzAFVm/XNm6aybs85t2PbltysaM22bTtPMSZrIyONa1sTVxa9QWgHg/ahND85sXnrYrFYiVkgwrRYXJ0/zkRlfOqCan0nGTB69qoLmmcOLzxxqNfPxHonjoE3JuXknGgzB1/+/B2vfMl7N7dyWc+2XIRB1P/+z+6Ymhg+76bRi2Y2bZ603tiBdjyyf/GOdw773a89tG/5yNFjR5PdlUSXaXMkqieVuTRjSjTrsdSDLO0T9xzAFS5l/SQOirI3M96QomeGVA3V4mq2/4AZdAaXXzIWQjnslTo3eRlq60W42atBNlwTKjZ6Nk9Xi9TURhqVasKRdm3bemb2bDWQEkxW6tHWpBIhQBGFMRhrnZ0ab7bXVorhAHU2NTG2/fzRfnfx+G0HdRk6Ge/eOfPYvgcnt257zs1v/Pjf/MW1L4p1f4FT2V7ue2tblaS9ukTNyKMv3ZqzjKtAJCLStY1bedZevvmmLY88Or//WOe/vnb4Na+69FxU1O4iY4yhEAK041KW1gtPjMATGefJ6sJbYhBHFW+dIEJkiKi9UUqSQ21LjjgsCm+ss1ZKWRjTqFbLLNd5WRY5CnTWSyUBKQiVMdZrO9AmVAEQZrn2zsZRLITI0oFjTHCGjJHzg7SvrdHGKcU4enSWM5BSpr0eGS0CTh4LC4Hym5pr508OZ8ZWAAvJhecQRqAC6PVzbcV6/8ZZT9wiMiGUtTl4AmAA5+o45zIX/G9wEXhCYtw53DTC/vp9V1x0TYlqlVxDsQoKRWRZAKrimuOQBCCAC2Te2sx599++tPWXWsgEZ0WesnWVAa6PxoE8kOF5moL358IP8v20j+CcP2cDO1eiBVxbXWYA/lxn6BxdiOj/Z3n23/MxOre7996tl4cQkNh/z8zwXHqEAACCCY/gnV1vFSEyAs/OZU+EiN57xhgyTs7DOnQTgAFWq6NJEi4vzjPOGaAtjYjD888/f+Hs2YA4G2mQd6Uty3zwkle98HkXdxEXf/q9p+45oE+vCefE+qajtK7QjDFWkB1mwBA4A3Tkn429PAPGADlKgUqh4MgZeiLJORISIq0XtAhzbfPCaU3kOTHOCGXIRkaSmVY9UCKMYgRBBJk2WZ4bw+MkWndMnGsVleP778tGQ9ncnvTMAJ0TGAhhRjeVE+dh56jJy1wQl6qq/r9UvWe8rFlZ5n3fKz2x4q6dT059zukcaaDJoYmtiBgGxYRhcIzDjBhG31HnNSuKgmAWUVREguSmm26aDnQ+3Sfns3Oq9MSV3w/7NO/4be/au361quq3nvWsa/2v60qIM1nJrLIm6snV4XPzE5nb+mYi597+uvmLDz8yVesjB2ceuPfklIlKo8+fdoduvm6wsfmqWzvHv7JoTdEvdQh+Po3QEl+Z2fnp3szEysr6s6cWZ+ZDT9nLv/2lXzvxzAK/WKqyO9nY2lgutFFOqg3TQutCiJpRIGqqK0/c+qXlBmHLW2wcRpNTjWQu+uaZB6d6O2+d3b9mN6dv3p/OzAWuuXffsNVpFkvi9ttfd7F4PN2lqO3zlNk44LwZT3eI2rp4cWN9uX/rtx9CgYOtpfWljbn21DUH9wvvn3zy2enZhDK6VprlpWzvHD+we5qWVq94Vzf0uDi+fCwNoBrm1bpPp1WvFWyNpYRIbfHI2toUitS5IUTz0JIgCEQMjIRZbpEiCdCgtRKtL9HTRhBq50PkSKySzlnbagaM2Sggeb9SFZKw8X3v/OQf/cYtt730QO7x8oXy2Kn+/Fwj2xqMlZLOT+3a2UC6eGmTM0IJWVwKP/CrZ77nu2a7LXvxROeX/tclRee7vdb66rBWfuJw55rbbv36A+e3Ni9t9fo//nM/9+//+vne9Gy22d+zY6I9Ee7b3cu79IN/+hvEDp654G54+V0vOrRTbSx//Qvru+Z7e2Z3jbZOkIByIIRQo8koywlQQBIwyynjjA37GbYCh/aWa4+cP3Wuw+JzJy4JKqzUnhCLdGl1MY4iBMrRSGXjkGVbo+9/6/zr7gnqRp+QeUdudv4Aw0nqQJkJ2tjlYIICc2B50jGYOARitnQFrNN3ddaY7o1zQmhOEANROWF85arh7nufLanvuaAsqkIZH6UBDUN2dRKA9gppGNFEqrrVAHAEvJ+eCgmBc2cvCMCSSkb0XJpeGQ0W1/L1EpxPyvXqc9989H0/fi0PrXBaxGJtfZFTZmpNuFCmbogmtYYSo6vx1OzOMpPgDAICYXE6IfOxhCqKWuCBhixNpza2tqzWx098KYkjhhTQl2UZxw1KiHE+CIRHS9BrLZNIDAdLYRglSWKdFWnAQsy3RkApUgIejPbeq8ADa7QGo1E3bHhtnbOlrJF6bY2TXhBEAqqu1i9eTMMwToEFsbKWB5G3FlBo5RkLpbZIqNdVtxPJalRsXqRBryxRq+i6W9/SH1nnRq2p7ty+4aNnNyg4SllZqTCJ+5XeuLwMRFBCs5FxHhmlo5W1k1sjD7rb7SEvdZGl3EfE9CbE+lhVBpzxglLt2LEt9rrfXP9vb4LX3gLNJsQCSgdSO8Vd1gdfws6Ud5kZaoPWMiRM8KKsSBhSTpyzypiqKm0YgHecE1nVRTGMG/HJY8eczuvBeIGv/+z/Of/nv/QHcPnCbW/YA/pZSGo+0akHMoq7VmrqGXJeK69oFLTZta8zH/6jm/7Hnz0NeGs20i9+7bc/d+ypvdcdfubRp1rClc7Oz8+oIte1tgridlSoHLxrNzveKKMtIaw3NRM3mnmRi0iM+gPnPSPEOpWNi7o2nENIqCLss8+Q//EbF3/zXdie8rTN1xfKchFIlkX7nmGF+l+//PAJvafsjwljlZbpdOooWCTj0gKz3/OyHa94IwLtSwaHD+if7IX/8suDKogJdeCtVho9BklgrS+IXFTFdbce/I/Pf/nqLMBgbSvXsiDEI2LCo8KqjbE+c3kpJvqmw7v643VdZIp1Z+f2+1jUdRU2mi959T3WGldVjVanLI2xRIPdMTcnSx5OTBqsw4lWKbccN40J6o0Zrq2HQq2vrzXioBHF0gldl2kaMKTtgBDtW/14Ra8yCp2pbq/b5RRDipwyb0sRJTMz3cFG4S0lCIP1EVIdR6HTmoCviryUNQtIqDmAJ4wGoaidycoir6QHUkpXlfmt1+85e+5KketIkMlu24G2DvNs7IPw21/7Vv3VT19ZGRDPKGHbBzcE0WhFiJdaCYZAedwIJmYPnV+6Ug3HaBxxvkaynqnK24lUXN4yly4P2t2Nl71m575rJnliGi3/+m8/8PmHnyzHqLX2zrJABIHXygIiEKJqMzPRFbhRjc92Ujx6TXLwwC3ra9940c3Ndi96w91TClqXVx6+da9/bjZe2ZRKE2WUsublrzmSzm4BG6epQ1tNiNIH4D33RHEeWJVNT/GZOXHL7emuXdVTzy+Oi7CWLREkLKgpM+BHQNFZBVZdPLX10NdHVXnA2shUrD8K1rempVJ/+KdPVlVa1XpyfvfzJ591lSWCW6MBPDq0Dqzl2jhnlJWOK/7+v/6GDsKff8+deWE+9W8nP/KXT9NkdzluCc5pEI0N/OjPf+TIXNxk8SBjxy+O88IYIyqpuXOMQVVV3nlCkXFKqBhX8LWvV1/7WsY/8sD//NHbX/emWd7KPHfO1AiUU+HAgHcibBtPScrmbqzf/m32I393kVgnVVx7zB1zXhcV0d4gR2+s1Y4hQUo9eC6INcQbTV84NtjMSh/HZJru29NbOHn++YVx3Og+fna13sp0bSGCK5vF5mr/mpnptBvNTHffGvO1Tz5a1bjpFGVQKqUL472llHHWso585Xh13/FTjY8ef+Xh1l237Lrh2okk5TbLKQMXlMgpC5wy0mukvOGVRn+BJIkHFLQP+hI1K7YeYZE5Yiq7HCfm0G5wqmRB5JWNnA+oEQLGQ+VD4YAYAgjWeUQEb73RNmBIucih99AlfOpX1g7OD9/1OrjjZsKpixPI16HehM3++gEOE7sh3TWfD59uMCYYr6xRleYUMWoxapMwNGGKQSrLVQiLkDd3zrATW7ysvLGkUhCIicTM7YI9z3xx66tfvvdNdx988sSz3zz+7Hf80H+/9ORXpLdjVdSVUdKCd94iCxhcvRH1jFNrjRBcK+MBnHOMckRPgBBEQDDGemNZiEIELBJ1kY/q6vf/5MPPPfbQ333so4iBM9jt9ByhdTnePnMFREaoc5YQZIQDOmOttVqWmTMOiZdZEVOiiVZ1DTzec83elf7AVFqIYHZ6Zmll2VljrCaUI3jHyHVHbrh04bRXinEmUNSVZIIjeMIcZYIAGGOUKhGAcubAgfOCoiE+DiOtpFIWGZFSeUAhmOcEHTjrPLvqVTDWUMqMdUEYMSG8NUYp6xwiCMEZ51pJqbR3jnPOONPKUMKVMoQ4LoT3YLThFGupPAAl9Coh4Ly3hnhzeCo42CPU1p1eCzxXtfXKW6NCJCyOpEYRN/6TVOSc16UMkqnaOUs2w4a2Zo04wcAjcOe7nPiKFsZa6wnVwJPUGeODKJfx8aH4L792yZenw+6Edo16sOOJD//py+88fOz4lx2qYb45MdWc9INH//kncxJ6hNtf/8rnTiGt+q+7vviv7/jge376d9bZZjB50XXOJDhSlxZed0dr/9S3/d7fHHv/+z8IufjZP/6ULrY+89R9N9xwYOSq2cONyYPx8TOfVZlPWmGrk9hSPvnFz5R68j7FX3PPj125tLqyNl4Yb46zYeyfTDM+Ndl4yV3pbYdb0l/xgnA7/af/WD94rI5seXh3eNdLoomed1ZPTO068KL0oX/fDJJhmHBKKGpvrSZeKyeRMGet85RQT1Encb17X+eaXSkPEYy2NWrDpbK1qxSxtfUBCyd7c3EbQPtmu5WZvjJS5qzd9jPdTqVIGu0YDRQo7MUdFYJoNkaybMwe8iRynhNFmywKoykbzqa9VI6zqN3UeuMd33toz4Mnv/rN6soKZYIiRW+IB6sxcxYHQ3jPT/31n//BO6dmN1iSf8+33fz+v1786Mce+cBfF3/zJ7/+2fuXeBK02umB665p7z7wD3/8h2fPjiEKWS/BgKVhUykFnhESBYSPRmNmq7yQUBGAME2jpNEZ54tTs5NVOe5NNAtmFeo9yfzZsxvnLpSyWp3ZwScmIxrUhoet1tRyf9nrjBHuKz8eDwDSKLLWm3E9nmlPrW4MPQmR47iQhZIzkzvHY9VrdhrN7nhrBb2uZFnpAirdaLT6Sm9sqNGqXN8MO3M3NcutxYunTHnp6L6bHn3+kfuPP3bX3S/Nx1u1NeuDtdmpmVZztiydUp4nMLkjPv38ZWEShJgCb09OOidFWNx6x460l547vvjcc2e3Z0GYhAhUK5U2YgQyzDJtbdRICaA1hhMKgIxQyjlYB86H4Xb6lnMeuUPrfMC4d14rJTinglHGgoAOVZkrZay1FJthUlXSepfGiZSWIlprKSVBIIj31KH0mnHQuppIGsa6WAQAoJQGtM7YgLFGEmhvvbWUIeWUk0ZWlMqqWuuAuBbbfMmhYue0tsaU1rKwKUJPAeoRPP4c8S4gRDEkHj3jhHraanbWN2tAcO4FXchvxxP57Vxn2E5mQ4Kax1D/yf/72umJZwj3SXPWeeHBoTcetIfa+hopJAI4AvE+CsOiqqy13r8A5CAieKO1d/6qx8v5q60iBI6dfNrUGhG925aGPChNKSKQq7wT4gvqzLamRbYv/gAvEEXfUn2+9dC2Wcy9oPjAC9wReu+u2tEAgCBa73bO79job+Z5ji/AR+T/QpEAPCHbYdmwXWZPEAEx4OlUb++oWKSMaeMAMEyS1kT7wtKyqbREISWYYVblm9/79iNvf4v12fPFaOX1r6YzU/ZLj5nFTS8NMk7SiCJxhXLSutpApondztDz4AE4QUAQlIQRY5RwSpx33nnvMau082iscx603s4KB/RkG6oi3jFOmk0hBN3aysvaNVqQJnEjCbnzcRDWUo1LGQcQvbBJftd/ffsX/vbZBz/76KGXNueu4x7qelSILgY7yOyL+frlyspWZ7KDgdZQUWrTFleFW9nKP/OxT/X2Bbfcuf/Rex+NWh29rvtr/OyJByOY4Cl1pThzoVzIL+xq4rNf+wghxmo5GqooEuioMibc1RzXtesXpeKloWKqd+Tu/afZ5kZbigCzOncOGgeFKbQsfSUwkEGatCcmJsZbl2paRg2x4+g8jGtCp9eGkoBjqFb1eCDX8qfXjeg+uz714tcfuPDEs3uuOSAmwyOH75y8snhj67p77/uzUGiasP560egFNM6KzUHA4trh5750/lV3X4MhHLpu9+BM3uQRi+O9188gKVgPJq/tWMfrqFwtFoXFqNObmtnnsuWlrbp9TVT3yyjlecZOP02Gq60yB9jM33inPHKDCSO/cZmde67+6kN+YYlYEiVBoxuGhbMiFtIYcERqU9YmZIQA3+rrtB0RZ8AaHlAq2Gg8Eqno9TqVNnrr4M/9T0mJ1C6qSyWrcHq2ZWkpApWGYljkdaWdt4wzJKKu2F/8U+Ofvtgdb64mkW2k1ypfDpVttjtT01M12H/+/Kd+6bd+5uLlpx6476G/+P0/G671C1vfct2uYqQ/+If//uM/euBTn/iEN4O5venmVtVf3zLrixdXionDByfbnDRiJV0aUWW9M7a/1VeFCjAYZiVPmJPGE8epZxTSZmNpY63bbpqxyfuj+d3dJGiO1i7Y2gSMau1RyYhB6Ed335W89O6ZozcUZFoR2KHgOgE3+GqCsoazQBxR9SYNhjbowAjQWRJ6IIEJJlnDogjVxQsKZ6aPTFNTAKyAL9SIXXxs8td+80pB5n1UWG+3HaxGOybwWyjh2laWVWWUtPfu2VUMNzdWthj6sNMMWNRtdavhVq4KSjhjZK02y6NydQATTTE5v/PMsDqzWdxwaNZkG7qqe40JxFCqwhA3M3vN2sULThdxRLXDuq49MaEIVK21NU6Olaw4T2nQcqYgSJrNZlmMuxOThFEA0LUKOEekxhvvvTM2TtoOlNXEutpKb400miRBmOUlA1GbcZQm3oLc9st4HsdcloNa6yBOcqWpRwK40V/fPburcjmhTJbSKtVKEgt6nMu0PxBxhxCWxE1vtfe0qCoEh1SgNXUxChNd2wrQ85g5E2vklY6Mz5zRi6eOrV5ZYogBp4hIoogHwlvHGXLOtPatyW42LorhOBCR8iZsdFa3tOyrHZF53w+/DLJNpPb0ha2TS+MTS3q9MM0mLw0ZuuT/3E/+/DFzzaR64xF73TUwPc18TZqNoJD8zGVaW9VsBFLXdWUIxYmJ5tRE8/LCRlVrFgrKqTbKGBtQbDRCWctyqNoTsRDeRsgCGA7G73zfu4g1b116/S9+z6vF+ERLJlZnNcs5Bw2VqgwPW1oamVdg+6+4Z/ozN93wjYfXPvTvT22t2oWL52y+ixnWnk7rjf7ypY0wIJa4Xre3vrIeNkJrrJQoWJzr0mgtpc/qIoyFtoEI2x4oUlKNN3funljbXKXODcd12CSNXu/Lzxe/Oz0jRytW6VYv6bQ4+IpwZdPhX370z25406935sK6rlWui+Ggqk0QxYln04z+zXuPjuFzK2QqTlOaXNxzR/3L39f95X8aMS6iiDmrkDBApChYRB8+cf6Ga47m2dWij/ndU6OyUkO8ePniRLPhRex0f1yT9mQz1EUjtgYYpdDb07JIZC7DsNPvD8bjyjqRssDYcnV5w/lw9+7dQTovserNzLGkXrm0Wq2NenNTYnZ2uF5vjjLEitIQCbYbnY2sJAIpRVk54mi3N9FqxLPthtIFAg2E17Xa3FyvlOJBFEeCUje/o7cxypB4UlMO3ngZRXHCw63BCJwtjavK2jmotLu0tLEx7g9GJSDvdFuIbpxl7baYn5hYtCMgdjQcgffoYabb2hhlX3nw8x6AEe4tWOcIQec8oDHOITjw3jmqpL24kX/ft/3Iwf6lT/3j34OrHCKnTCnTH9jhCCKRjIfCLeTDorr9zvr6O6IqL0gld+50l05rQbhz29A1EIJOe4LgkQw3hwsLZ/f0BrRc/e43T3cmr9CGuXB6iXBz+tg3jXOz84fWllfnmrWSycqWjhJmbRG1Chp4LwskgcxKKDIx0Q7ipFaVVoISDCOmXA58tOfaoDOJSyU/uawur/hWL+bWaFdyFjjFvvrFS5/49zNXNmZFOPMX/3T/wetvDNszvX03Xz5dSjkcDobjUb02GFtwwMGDJR4NIGEMHHGEOCepoMaCk8CDHR/86PFzGxNPP/Z4f2NMzFTgHKc+bkZVpbyIlpbVuYtj0FDXYKwQwoeBZ8xaY4x2nHN0QBxhnBZlNc5KJIlz8VC63/vHtQdPrL73x29uT5cs8A4keItkGziv0VNnQxD6zjd1Z/YNdV+Ggnvir1xezQamynlWQVYD441h32ZanL1crNesLAn1AoRgVyth4ZFnz7IgiBtpnm8Ug6oVhNM7pvuDzbXxcP+e+aePX86V7Ux21kZVv5DQak51Wm958bXffObicDgCGqYh05ZqA4BYFrXnxBOgYSuz/rPP2o8/dVpQd3h3+sojky+7Y9f8ZD+KSmQWhGUU0RomC2fXibzA0o61xmDNGug91YjAmC8b9Xq2cqHuxh2qRbXSv6brpnrJc0ujqdbEhvaFJ8hYVdXOASFgnffeAxfOWovUEerZ9MkMfvOT484n1ZE9KRhZKjosbCxIrxP2L8DGM4sDm4wqA8SF7WCrNhfXV71wFkayXBCpoBw9yYkYSlr8yM/e8cDPnECGUTPNK5Nr+eWHHjAenlw+3Tu66zPfeNxI1ZqavPfz/8x0xliw98h1Z5970taGACIh3l09wXXWEYZaae89oEePBMCDJQgUiPWOABKgwMEjKqVkVWtVZ9IN++oLDzzQaKYEPGdEqRKcA0+ctdtbBOfBASCiMwbQE4rGWrAyCrnSZmMwkkoDJQeue8VrX3LXB/7oDxptUavKeH9l6Qp4b51ljFNCrTWMuKXlS3VdIKXeuu0UQgTvrInCSEplvPVWOQ9cBISgMs4555wXIqAES2uM0QGKMAqt89uxSlpbcN45X9fKOweAhBFGCCI4Z8FY4tF7CMOgllrWtTEWKdluWwp4gI446rX2hFJKOSXEaGWMBSCCCQ+WIuRZ5cAzwJlu9G13X8P4yHGqjRCMBhFHwoizWioPPgpDyul/koo8OMpZb3afXB0U5UnEcV7VAW87RxAbk/N3rZ2/j4kN5z0LhJMOjTWOemMI43G7pUtwQWCNj2jtBe/t331+dUyDgFFcXSha3TB2HrVCp6hyxeIq2+j95S+//9veMvuqt/3EoZuvob3mv/zc/54/NFPLqtOKfvInvuMd3/u9abXifGN9of+pf/l/7nrxS9/y0z914oFHNA/GdoJzKqB2PnfKYO2rviZyyBI37qvn7ZYzDLVt8u7OQzvmerut3xz2Bx6VJyWocbUePvi1+ld+X++Ya8sxu/8h+PO/27rptub87NRgjF96aLMVilLXFL0zGpyzSnmvoigI4khJZbUKI2wk9pajOyeagc2qmEcs6bjQekeBZBZMEAU7W43LF0f9lcvNJNixUyyvXwYR7Nm3f2OQj4tqK5aMpmB8p4lIKHFDO14bD1eTYCaePJA296yt5K2pKKJmc3mTsYoacGasage0Pb0zePmr+fQ++eF/OFaMlXds290jyxEHxow5++izg42f2b13n9849qpDL/2T/O8owRDCX3jfn3HjGmmkKmed/h+/+n98f8i1aM9OBAKK4bjVC3UtizyzGFJGja1HecGE5WiABMh5kefUBVZ6owxYpJyCqzvd+OCR2cWLG2dW5VpubgpxtuFVvlWSkhQ+y2QcdKhIi6IM4zSXm0qOdsxNcx0pwzAQQNC4XKtiMFpoJnNE27y/hUi0zQLRbMbtrM41jTu9BmPR5XNrPpwe9WGK8Uiby5dO3XP3D/79x7+yZ1fP6mVVZ0Esdsx0kUCRjzaHphXxJvorZ84jYtqIKIsZxVGVaSe0VJSb626eHm6tnT+7vj0Lmp2md8BJ7K0t6grBBoJKqyghnDJrvaxlEEXegtKSU2KUMdZdlTWUQUDKCDJqjTTaespra6TRVVFQJjxnCK5QyhjFGStVZbRzBJM0ccYhIKXonGqlYavdcNZtrmYWQGhDPVJCCKVBI9KVqmtZammMEQH3CEHAwHFDeV3V87F/yaHOzonMycI7FM2UkBp8ZQpYXW7/x0Olc57gtqsVEZASX9QZF1QrC9tsD/ynAB8ARASLHhAt0e9842QUPRo3S1olwkWKcucdgPTeEsac9ZQDo2CsJdaVpffgvfMEiSfUW+MBKBNFIZ0xBIEQYq9WlRGlrZcZEgTj/Xae8/ZlydNtX/G2HQxhu6jsW/qN/78koG8pQbgNIXl4ofge/bbeRAggIdsk0v8ddO28p4xFUWLtKqD321+p94D+hW40sh18dBVk2h4IImUsiIPB8Epd5d5hM0mKug7CIBWxMt4SyGvNXU59/wd/6OXv/enroTg+WKmlDqWls13zyhvFc2fqUcWz0lCCxqECMqyh1lAp5/3VYG0CYMETINI4XWoAcBa8A+cAAd3V8Wx/Z4QgIdujo55zygPabjeQc29cVdtalUobWdZgm2kaT3e6o6Ic5KW1bnNzvP1pnPjG8722aB498syTlzo7duzYi4PxeJhbBzlrilLnw42x7TECuSV2nEvv0Fp76VJx/Yums9XFY988kRVhuz1lVpTznTtu3H/+mfX+WuEr2e01MRHL62NqsRWidD6ejAejujvbacdiYi6eMLVKjK/53W9/7dxN3S8/8SXNZDqFyc6EFTagYYuFlXUhj7vB7nzpdBZv+FoTS7VNmW82CAsI02qqGRFGL/JIdndPzHUbg+ev7N45c4eXa0/8ZSe8/uyptTg6wESlKtdMev/lu992ZenRKhTZer+/mDnqyiW1MZpa24z6zw2ffPSR625t7zscRQRS0iuWNpOAeKLDAOIAZ3kEvjy7uiiwnkgcqmGneXBiqr6y+Pj00X35alZeIf0FSTPSEDi9q3rVaw3vwuRuENC4B7o/NRYXnxb/8LHjn3hwFSqgvMNrlgaRpVQ7EsUJJZgPyqiRtDtJ3s9brag/3HQ1kU5qpZXSYRAHQTpY1hRdHMrYIWdGV/3BVtbsxhx5fy3zWlMEJZ3TKkkbiLEuglRMewOA1iib52rvVFxuWtpIHOJffvRv0xhGo0JZGbbVYDW/tLHqLZk/3OYTm6985fQDj51TxPN26+uPPb57IqqtXDt/xoejCwsLFslgmNMg2L17YlDJe177xk9//AuEK0IRvK9qTSmJRXOqMblw6UIdCpvLZjOyVm1srTqlDUGrsNmOK99P2uZX3veawwcuRL2B18vaCkcPErgW9STHBhhPhSBsp+kvyMGVdPcBzVtOMfBaD5a4ymDyMIEd5fqZ9fFG1I1EoAgDFfAouOEPf+vp4fKeuE2zYVmMTBpwoEQZX+a1N1cV07vvedOJEyeeP3l+WA6mkyQMeKudsigNKI6ymqNoNgOHZKW/DtyDtUfmOkrLBPWR6eZDX7v35bf8z6WNlTAMeRxWVQHUcUdM3he0YgELYmZGetRfZ4ynHRKkoqgkWMW5AYC6zhiBqpbtTttCzThx1joHPAq0ksYYLhh6mwZxXdUWFfXcKB2HcasxKVXtDQNHQxQOPAHEwFey8i4Io6aUY2PQKNWa6Ig4NkWGVCRxw3rBsYMMrM1EFBCO7V57uFVQYAjOWTMYrDNEwUKCXluMgtCogjKfZQOPnljORSNI20hGWhUxN8PRerm2YaXZrpuk3kdJmqbN4WCzLiqWcGNNWWSqqBl4grox2etO9FbXxrnYeN11R9pxmEmXjes9uybbrcauXnniyqAmZLPUExPxyoZTZfDchfj8RcO/pI/MyDv24P6ddGuEnz2WdxrxViWd0XEcaufG49rUJuIcgCEDo6R3IKJAa4tovHO1tkrrU2eLNBBRM9RGW+aDIPz8fQ8/dN99N1xz8Jd+/AenmoXhZ028HHWGlHguKPGhqjQNOqh8J1XfcffM217/djT7P/qx9ftOLD16crQ+mJR1vbkm73rFWwo1oHq4Ps5iAiHxo5H0Dj3SdrfFgkArraTTdckYjMZlGDcmJ+eNyVrtLlpHYMhjtEiW++qZJ9cPTkG5UVlvI47W5sl0BM0hmb3yR7//Yz/2i7/tx8gIJRwtqLXVStcigcHFJ5fil/LOvjtY7W3YjKaLn/ntXQ9e/OwXn6700NKAMcZlrT2g9XDl1MXjTzz7yle/4t8fuwgAy5fPjYaVdc5U9Y037Z7t9YwqvvL5h5PGbK+TPvjsuU4czc22gXj0WgjiveNBVGxupU1++01HTzz1wEYhOeVbeT43G+yb3TXOZFOwVsjHa6vgO14yry0hJkqoMpWsyvVsLC1JyFS7O1lXqiqLRFDiG7NzvXI8koX06HPmC21aaZQkqdQyUMKojFHdnmgC+nI06g/zqPY6lKqugzik6IbDgfQmK/ON4aiqFKe8VL4qnVKFVkpqq2sDQJAgI+DRW0uqug54mBe1cyA4Mwy8trgdRuI1Id55JASd9976bDj69Bc+9c7vfNMtL3vREw887GollfUOKGPSeKltYStGeXHcrPVXNvvRrbc0pncEzaYvVcloDBSsB6M1IZQLppXxFJWvBqP+9YfDUHlC+rXOSK5nJllV5VVeKA2ri6f3HGgfvf3IJ7+0/IV7JTGkm8SH9rXN6EwAGIa7jDfa5ox6AE3BaqMpAQuKcKtUgU41E2VltbpMf+/9X3nfj9+5cy5s9drLo+GDXz712c+eWFiLFaGMDL/x4CNjqfffdPOL3njHlaUTaycvU7CATiuJFFUtozAAQggToAT42lplrXfeeOMRqLfOafKpf/tKZD2hRNm61pUHYAG8+tVvfu65Z5aq0qG3ToUBlVpRSrKy9LamlMZhqJXVxnZ67TRNosrNdieWB5ml2qqirJv3Plo/+fC/fPiPfmjvdcBD6dyYggcMrFEEDHLmQQTJzKHbugStd9JDvf8le7wl1HFrvZUOgVF0g7XRiaeHjxzLHnxCro2ScU6C4OomOR9JHuLK4uJSTCcmeoN+Zt3z195w0Lt6sLpy6eLK2iA/MN3Rpd3McxFmvW7ryN75aw/s+Nx9x4+dXBppU1krKSWMCArOGu+cqkvwyCiPRYIOzlxyp85c+ZsvL12307/19p133NyenPRgSke8ocb7itnTpCDINMXKWe1dSZUgjcgUQg7EqvSdNt0v9Lvecu1U7AaqfEVz3wNfX3jidGUtYSyikTDeO2M8MhbQJEysVHVdE4aVt9YIaVuG8a0zBfc+IiQQYjhW9ysvAKyqbBjtSPi4P664jXuJFl25iYgjARmNE29X44RqXQH052b3G+3C0FEHaciXNvoHj1z/0NNn3nDn4Xte+6rP/uUfPX7fkyiN8ZlDTSCMSbfB20O55q0j22fTsN2LTJCgsZbTF0IVCGrlOEVPtmNHCeNXs8mtroKAiVgkIenMJ8PN9SSOOeVZVtbKEALgPRPMOmetBaAEiXPWgA04d9aj9yhI2mkMx3l3amLx4hVK8JljD6IZzM3Nzsz3Tp07TRGV1ZwTJbWzFrxzzgngsiwpZTzgjbCdlxmAq6UEA5x7bfTV/Gzw4L0xdrs+mhCP3uWF4gELQhEIUde1VToOhEdwDBCAAKGUSqvCMAIkjEAcBkZbUysgBMAbYylBa4xWhnGujAbCrDZaK0RHCISBsMZun4tsBzyB1+id8RCGxBGQRTUlSDeoMivTeDJJG1VRIWPGI4CnnIED9Ghq9Z+kIsaAcltXo157Yn2FaKOSJHWeMUZdpTdXz3a7Uyvr5xAVIQ5EZCCY23XD1tqC1qNS1sBoqxPJbGgIkxG58ca9V555eDQEosnE1IF0pp2tnHEe66EEHx4/NaC8Ebv+Vz7z7OMXnn77q1562817nr31+mo86M0eOb145dBkI+xMXj51qdObCqQrLh376uazu26+g8/d+sPf+/0Ll6986XN/042bymvrjSwqFlokZciJjao6q+f23RQ3m1uLK7/4gUc/8Gt3kOwhzMZe2CRGDKPaeHAxA2OctEzOTU8vX3Fff9bVDxcscoxSTWsRIENnjeEMtbacEErQlDk1Cl117f6pqanGrrlEMArNgDdbo61SKdluNURsJoS4srJW14PpiYQg73bmLBv1puaVUkSrABRS24h4IDrtNCjHyzwyGxunR+PVpBlBTVzhpa9CYAkNIkpMO4CwyrIFWbi4eyidmF88dwy53n2kePeP3/qxv3qkv2a8F0QQsJYw4pljEX/lW7733+/9x92Bfugbl6OAFFYHjBVZTpyf29XbXNs69exxVcvd8/zI3tk7X3sLs+u61kVmvIE4bayMxlJmgjKKuG92x9baAHhgrdnV27G8tGC8me61hpubPI3idqsoysjptA25hCKrzj1fj5fdzj1NTypA2mklU+29Cxub3dmp9dUtF8TdZjw/PV9teZJGM/Mzo9Epp3OGOkTbbbVkVhrnpJGjqkpggtsAXVGXo5gH2eqlR5870br+7mP3PXmwyT73pS/d+eLbFhZWnnvsyYNHbroy1kGnEYRYlQV1TtV13J4hTKLgg/U6bjVbne7q4pqIG0maeO8bUzuvXLkwzFevv3Xn1vomrHgAiDirlUFK8rLS2jgAxhhhzHtPOavKKopDZWQYR4yHVVWBd5WWDEkchlEY1VJJY6SUxijnXV7JOAiQ0HpUaw9RzANKPSFBRHEbSxGcEEqRMIS6qMKAEYIeyMZmYYyptQ4CgcgIgTQNLAAQrGVW1xIZgndlngFSzjzlvqptJasdu6PDBwUVRDsIEqzJ2NVSZ275PP/wJ/KFDe6YQe8JEoJotAX0FH2n1Vxd3wSgV81Y34r8IeCtA0DikTq4dr596zVEwRqEIYZC1YULYya6Dmqjhmi8tgQIBAFQAkA8pSBY6IwFD95qbSzlQBk673A7Tod44sBvF4tcVayv5khvqzjbPbUEt5/yAtzz/8tY2+32271sVxeZbdcZegTw23lEV1OFvKOUxHEswthak2dja433/mpYEkAQiCwb6aomgNsJ3IgAfjsiCYUIjNXWmm0QGhE9eIIsjhJjZV1K8EAZ9QiMErCuKEpgcVlaL8ctOvrD97/h9rt2FpuPDlcvqhLGw2htSWkZNhK3bwcbZLA+gMHYjmoytlhIb4zfFrm2h+AAwIF1HsCD+Vbm9vZ79ICebANXBAl6RE8JUkaoYGEgKCdMUMqJSAJGKRJCEMpKYl5aAIoYCSHaVEndd9SnTAABAABJREFUCK6uBS+64Y4P/Pbf9zq8LZNnP3cuvedAb1ejUq6m1c4ZtmtnvLlc56MsDaHViPvLY1lqlpCwkYRR89qDN6ytbOYDt3D80u6JqVe97mXnT3zz7pff8s9L99dbVdwURHuLJvPoLGWMiIhsjFWvyV/xxlsf+MyXOZGHX7QrFa11ubK2cp62XNAWjJSFHk9PTspNmq9KQaIWwS5jmDIXcE18uzmZspZDCI0VYCenu6nWF88NspXcaKwgDxRf1GthJp+4uP4dP/Si626/kRq7tZo1Gu1isHL29PMLq6f3XXvEDNWOufbMnkP/8s1Pv/xVr3ny65+LRbMi0dc+e3lHeOoVN1wThbTcWD55+uINt0yGE/Ti+YXGfCfgPA3FzJ5OkruNjYebe1595MhLyycWBiuXm023rkdpFHWTaP3CRm+mwcqakYo3ae5z2zI0oPtfnvzKXVM/vQbP3Lv6kQ8PFlZBZk3Ck6TVqmtCkbSakwbqcVYXVT2qfLMz/dbvenc+XP7Sf3xCUDTaGDmKuxyM9ASyUlGKqqrTdkR5lMSd/rDyQNJEqNoECde+BIaEGSMrRJREBAkrra0F37vj1p/+hf/65GOPf/kb9771h7/7A7/6u5fOrASho5wuXVhBylrd6mPv/+P9u/Ye2rNrfby6OhyDsMYWUpaD46uXv3kMjEdGwJMkDsG7IAzOXjoLQSU0gtVC0JhQ58FZubB6YSDzVpCGjQgZ48wNyj5SDNPY5Fjmcjyob7tm95Gj+7Otp3AuoslhgxHKGWZSLiJPmdu2/rrqg7/zy/vmB2//SQ5mPQhCjeiS3OGMrbtogbdbUz0DwVAbbbVOg0O/+Z5nzl3aYYhVckQJthqxMw4Qmo0oLw3i1cvMvv079+3Z9fI7Nr7yuc8vX1lq9NohBHPTra21kbYojYsEMaby2nXTeNfc/PKVxbAdDAb9mJELg8EgK7u9CalKaVReFWncIp4NBut1ladxKkcyFHFvcmZ1bWE4GjIurALKGRFES0UID0gExMmy9gDa2lqWnAdhGCMD69F5CIJ4fWOlO9FzHkTIu53UKCuVNr62wBml3lkDGrULSNgIoqqyo/F6GHjOhfcolVVmHABYVzbiSOqMecZ5yAwS4rPBUGl9+JZrWTgzHIwJ9WVRtBqdUlXamTjqIGJZ9wnTIUnTpF1mjkBQZgPuJWNqmG02ukKTmK2vdxqhR6qltnVVOOesCSPhGVhbKPBBo2FkURd108LKejlx4OYymfmxH/j2zUf/KiA+6k2ub+bU0d07GkFEtrJ6SgH4enqeDoZVqYl3fHXkHl8Nvr7gna0YA6OdEIDaU6RaaiBgtRsrk4rAAYBHAmC8QwcBo85Z6ywhSAgxxnammwuLayLkURyAtd6oEuGhEyff9FP/o9Vuxaz8zN//ohh93YfDwuREoGik9dA4ZXVRi7awOgc6+p53TX4X229cPFqqP/YvD54+O3zDXcUH/urzItlBi+FtN97w7HOn53pztTdl6ZHEWa6AhiIgcjwKCYnbE1rqqrLUYFlZFtIwjGpdh8RPt9LDR9suu7ixCo8ck/t6Rg8gvbix9y7IFv/5YOvGv/zNH3jfb99fZHKwsDw/Y377998zQ/yrb2Sj8V/KTsIpp8S5oln7BOL+n37oup//mec//YBCEmmwjBKrDUMkAEtLC7sOTG7PgqyflVvZ9OzMviP7ds/PCRJWSHZPdvKyQiNCRubm5sIklFXtPCGExUlqR+vDlYWN1TErzy1fWfDgW50uidpB0MlzKavaMrq6sjocDxr53m7YK0uZxqFURavZ6W8M0qTNgyQIRVWV1osw7cgyV7IkWhkIJqZ71kg9KFrzTaug1q7XjJUyjWYcEmGljii1ghZFVQO1yuhSb631Vze2ClWLVrR4ZSltRd3p9mg4DPKizkvvSSXdYJSDA4NYm6oR8727p/OssA64QCaiIqsZZd4hULBGO2cBtg9oEDyAA0TXoGzz9PF//ou1Q4cPRjzR1isttylqBI8EFHhgWHlcXLVfuT8rBvS2G+P+RU0sV97gNueACN5bawmC86g0Pvr0cNf8zNyUYKG1WoKOOY9jmjZiK03NwyRN4taEe9VLO+dP0q1V7rA09TLnytZYFUoqMh7LYEo4V4BgIki8rJxzDCNgPu2Jgwf0N58pCCanz5Pv+8kvNCbwTa992cVF9dUvHd89O0nTts3Zm7/tnq898JUbb7zpuTOXdl93fbM3v8KOaa0cWkpYJWXaiDkVZeH2H3nZ0YO3f/kLf65UxRlz3jriKXgHFr0NEDU4bxQhnjKmnfeU7Z7e1Yg7f/UvHwuI904Dc4xZrQ0jAIwjACGk1QtvuPbQ4uK6d2xqIgUujhzY++jTj4YB1HVfS9eKZz/0Fw//5i+9JZ6+jIl0qJ1DgpwgACpPmAXivXCEEiK8cYRwRCBg0EsWGmfAw6A9J++aDI7eRG88KD/7oPrGkzn3Vw1orXY0yqUnxnocDgcc4NKqitK1NAqXljKeNnFUTvSaO/ZObD6xqkorYxnGFKV5yQ27bto99e9ffViHwVZlc+UlJYSE3pntbCjjjFOKEUoQWSQ0wlMXzRMXL7Y/a/dNkpt3N67Zk3Zim4bF/E4RYg62EjGlNqgKUefxxctyNYtWN91W3Y5aXdGIj+f1U1l5dnmr1WtHvdY7brz5wW88dW6z0MpyZA4EZTSIwjhpWl7Gjcg6ozh47wBUXxVGycm2kMq72gWcmtIGCrpJI0aRgD3QpW5fspHpV9/58sc///yw/9mX3216R6ekHSHVdbWJOvvy57asFJKafCxpwAXSjfXVl9ywvzx74o++9vmolvM7ppZXl1PB86qi6JevXEAaIxCgaK0lnjsHlCIiOu8JgnUaAAghHgEJIiKlFCzAtkhkLQ8YQa6tpYRMtaLf+s1f6nVbtTTZOEdKtFKM0TAQxhhwHgGs1duNY0CQUIzDsKq0Mkob3YzDwXo/4kGtLDc4k3SO3D573zcfiRkNIB5DpqRC6zglSIm1xDnnnPZIqKYDOdCmAqCMEOCgtGSMcsqklDzYdoQheoIIjDKGVCkpwsCbbdVpu1YIACAMhLN2mz5iFBml1nkK3nvtvGGcSWOiIPDgKykZZYIz660Q1HsndQXogoBZC1LXSFCEVFe6EQUBgSyrGAULYA0oZazxVVbXJRogeb/SY6QMeUq8tnVdeiDddhfBqRdO41/IKgKQReHc4p5r98bdOy+cfhSJpNZYLTkPKBn3B8XE9L58tC6lZlHcbnY9TaZ33ZCNFrtzuy6ePrkxLjnqiVYcKX7sa/8W6NLWlgTxzOzN9/zIj/8/733LVCtszTbyES+L5i/+1u9tPvvZ+/7jufqi+YeTH/rOW4/eQhr/+sQTwZHs1d/xkmMf//Q//vZ/f/Pr3vbsmWdrncdhKAfV2fueIs2De961p9tWuX/NycdOILGV7EuQjdgVRd/km0GYRhFfXngycJiY+v7Pnvpj73/hp6eizuU6rOpqiGGdBMHjJ9ck2z2oKoJ4fq1vgYhmCkHEjKTMeG9NbZERpWpKCaeEh4H3wJnbs7MTR3L3TNJoRhwDYlQcNRlv0AZogmGTK8mlUQemG1FseRALEBioS0ubk62k3eC6rnbMzAx1vxgtxdO+qllV6iobV9VGs91IknbYPcTFPKJzDBh1G5vntC+7rdRWjc7kxGBMrYBud1Irk40WdszP337LwQe/dsGC0GiRMqO0k7K99+Uvu+vI277v3Tyo9o/x2tuuefTk5XFeTU1PgIfBaLBjfzdAddtL9l63v7N+YUWNF0SChNIgTk0hBYlruald1WhNE036I1WDZV5FgbGwlZtVrUS72TN2c5BvTCYTzEMq9K49AUv5YB3GSOrNXNsxb6jJqV5Kg2KYDbLCpE0Mu51OEvBqa72IwwZ6XY1W8q31QZ5xARHvWFtYsI5wEbS7rcAbk1VFmoQYqzDUl59fFp1WTcf9wdLB614+Wl0KEyNtsbaO7/1vP31pfKYenfcy27P76OaW1nUxu+Nof+upPNuwQAeD/kyzE3DIR6tRa7YpGog+ajcaMw1G6dGV4aOnLgBAkZfOOIgEEB/FEbeeM0o4K6vaeps0EoYkTQNjoag15RSQMA/O2FKZ2hSUklpWBEicRKU2jSieiNPzly/xgAnKAJFQShgD5zklURDmtZRKWimjIKy910AoeIZYjErKCDJMW6LVCGReBQyzUo4GWSUlEOSUOWOcd4IREbGApM5aIfJdzdy7viVrNPbgFal8MSAnjtvPfYXc9zRY9OA0EDTOUUoBiAMjiNgxPb85GBoN6AkQ9OCvFsl72C6Ap4SFhkzH4sSJpT1HGEGo5BY4H/EUoOMNJ9gkzDO0ZQ7DDLSDgBLwDj1s60CU0BuPXruVjVbXFtEhIt3OHyIErTcvEDweEK6uznhVN9quL93GiQBeCNv+1l3aC0IPXOVvELZNzHg1iXrbL7b97zwIkmbbO0cQAhFUtXtBXgIEj95VdQkUvfVXn/D//xUQiHcePBJyVZtCREII9WiMAyCEkiBm6DCOWkHYYM14bX0pwfK973ndPW/eJ8SjxaUvVf2NYlBnY6zGgTGYSb010qMajL2KStWW5LV17oUocfiWOrZt0vOI38Kc/AvUlydIkAClhDEklHhAIShBJIQAQcYYOs+8FxTDkDHOKKUW0FKyPq4kYBSwThoLESh91XTgovCH/vuPXjh9/L5//irm9N5/PvPKN+9qTJOy6NdWNwO+oevKETZi1Fs0GrwTSOu8ev7xfqB2byy7CCfmqFXL5Vc/cV8vEg8sPJ6mqYpLmxufuyhir/62V37x818tc+9G9XXXHv6Zn/vJD/3B74owqqpxd66xuJEtjDZD4qUuEhZ6bgZZPxLd0XjUTXhlKhH6YXE6mW1B0CwzrYgKo7GRFbbisjKOXO6vLpXLa23eqG2wcqGUFWHu/H/7rnc0wnn0USONTJnPzE7SZvPz//TZ86ee7PXk2rnnZq+Zzyq+ct9G5Hv7p9yf/MZ3vud/fHT/7UcP7tt//ukrP/V9P3Dl4hUTFUfvmDQxjpVJp0Nhdb6SD93gxuv3bl1akpvlYvVgcm04Pbf/8qUnRCB6bSdzNRhuzM82tzb9b/za4MbD0Y/99OT8oTCHTUWFBkmoaM5Fr3rXtS//7j3n7rvyqX/N/u0L5xVWE2FDhB0ZRIPSjQcDSl0Q8ny49e8f/5PZ3h5KiAJtldHaMvDGKgTvuLdIq5EOU1JIZeXIeQOUSuWs98Q6a53zHhhWzlnni4FJW2nI6dbq5sLlL+77ZOdt97xmrT/baSe7d80trywxYRHAWqKNAmd5Ixkq+8aXvPLfH/hSK9J3veq2z/7VRxGdNabMFaE0CkIeCEpFOTaW4pOPf8PW/tq9Ryh3x8+d6LabHomqJYtokjTAUYuwubG+a6otBLMArSAYlsrIutFmg+GSceXEgWusKL2ViNSTlNKmQ+IQtWPo3fLlM0+dq1eH3n7oE29562wwqUAxFqTZmMSTXR6wcMds//QzjRnGJQBpfeFf6i98KcEozs1QZgYpIUCstNYhRx4wTLuNiwsLAPD1+++LGNu5e+71b3oVIeTTn/384sI54YuAtybn9tVSUZB5kcUpqYqqykthSZzEvelG4dTp80tfvfdLb3nti4yuRJy0kogAGFsAqiBJvScioBZcURdBFHuryqLkTERUOMAobJR1DRQEQ0CPLCIQgScECYfAU3QWRMC9hVarQ2hsyqqiVCv0miStSW/jOi8IWhIRWSpqQ+Z8LQsNdLI3Z+RonPeFEJQSIEIIvj5YSdIWAWKV8uAJI1o6bYiUFEi3n7mABSKgIQ+9gyjkTtbOuMpsIbFJ3KpL4zBwvAA0jFgCvijypBViqLOa5SovDRGEGeO8VN47tEZ7kKVNZq7Zd/trdBUvP/95oBv9XEdNhqoiw5UHvvD3N3VwY7kmk6lRxBgk2rXCqK5dElDvLA/DmOX9sSplNd3l48oQJvpjzQmxxHriktjzxHUSSAjsmuSbm7qflZczqExIkDrnjNHOWEoJFyQKWFlpdH5tbSOMqFTKlU7XKoy4Y7ys3ORkY1T7LYWve/vvfvA9r772hiSdGBsvlTJEcMaJQ0oF84Q6QhlySr1VWetA9N5fewPVohrYW/e9ptHY/5E//Zeo1d8w60endq4Ohr6ZDuq8NxWOHGMp71sb8DBqTUHdJ9ZFzZnJyBZ6JNeWDuyY35M0nn/28ZWNet+r98xMDO+5JZk6cvfJv/1M48DuuYPXLB9/sptcau8Sd1yzY7ix/Ft/93sTvYWkyY1c9+UJxjtiajcjwlPpIbbOaOt7u4I//8h3ZO968JELxhullQGCaSMus+LYmQvX33x4ey2YnJrZN7/HV37H9O6AtatMCt7Zt/vQc8ee3Vr2Ow5fMz15IIobBIWSTkud6awaVc1m48rixtcXnmpFzagRFkUdU1qNVUxYJ42cy8IgioLO1mbZmcBOJEo5kGbIQAYYutonbaLl2Bam0ZpkSVvwVpkRkKWpi7EaqLIiYKeaEWhcuNwf24mpqR7jDBklzg1WNwfrm8q6utLNbgPC4NSzl/PKrAy2Rvm43W0lDRCUNpp814Fd9UgdO3bW+lIEUVVKWavK6WJcJWE02W15YqQxnDFTsXGVeyoQGSXOGEMpc8468M5YRriRBqlDwlcXV2pVxY2wps7kVtUKHHrnKSfgvXYeKHWOrW7RL3wze/z5cVEIThylqI1lghIg2zVGxnmGzjrx9PNZOyI/+I5Gp1E7tekEd95lI1nm41anyUJn3Rg8JMLn+UDbZpoi97UuRswHDipV69HQBRsbO482q3zkIEBnjPVIwLtak4q4wtQ2oJYGSemmxzr69FeXEiFm5nbkynhZHL3j1fvvvO2ZtYuf/sbDR2f3WoXW8CIrnJXIKFIMojBoNG3hJnfs+K4ffk+2WfGvEswBGXhHnLXgLSJq464WV3nvPBhtRBgWRf0vn/nrLK+bAZdKpmla1dJ5672TUjNOOAbIGhO93cMqaEU9Hgb9tb6Iyanzi82A1bLIcsUFH9rgsdP1r/3W53/0B168a08ZT8Y2MJ5a9Og9eEuQcEQET6wVAAEAAdTGgccOEueQOz+BdNqZjdbU8JVvmrzmenHT/cOvP/z8M5cBAIbDEed0qhsurYxGpY0iUVm21j8TBazuZz4Id89M6Lx87tiFTqth6vKJx4/f+aJbmnFMRvW+a6bf2Xm1quSpUxfPXRle3KwsCwqnDeEE0VsDBGqtCKFoPKEUkTEWlgaeWzTnVm3wVNGjbvec2NMrj8yTZqvRmW4PN/2p08bmjfHY9evQRm768MHd8zuePH9mYveNadg6fIB1Z2ay5eGpU2fWJSqL3lPnadpMKOGEkLIsqas9eh6EGHglq9KYIOYkZWMGVsmQQ9qoX/eynXtbPTl2py+MylodOhS84R2v/bsP3tsKp/714Wer9dHbfvRlPBrW403emWh0/aiwX31slCSHaKSycRkGsbWmLsebw7Ix2EyQ5aXCXuoaIdFmYqI3XNsqRlveBwyJ9Xa7bBgJAUTnPKGEMaKVpYxRQpQ06L1HCkgoQ6M1eLvdlwPeW2W8JSh4NhwN+3kjaVJOOCUGAQkqVVMujFKMEre9yUBEhLpQJA68hSiJEanUkgVCaZe247pUDz32SDPRk9PNd//ILw8XVv74L/+MBAwJIMHtrmhjdRgEYdyqytI57QA7rVZZZEZZJW2YhM5bj857YJRpp511SIiWUoOnQTTTmxsM+4PRCA1QSmspOWeUUmOtB3AIDImSknDmAJTRjBHOAlVaY50zhnNBCHhnlVYBCkrQOr1tK6BWE5TMWy9xNg4TaphVeyZx14Gp0bg8e3l8edO02vHM7MSJU+tB5LQZIegkYRPTMyxKo1AgZ9YYQVGEwX+SioxzjUbDGLm8cnF6x7WC7TLqHKXGmgqx5b2Nu1OaisbMzS86eOMD9/6d8vn0dE2SFqU75ru3nM6XpqZCYMwYIyDKC40i4oHgUfrUyStv9/GrX/6y488+ZKULkjnbm/vkZ373J996zT/9/YVdO25ZuOK+/z3/+6Yjb/qRd//8/U/+9fd+/+vu/8QnbrrlDVutQzrNNxfunzo0qZXvtVvLi6d+5Wdfd/11Rysqyo3NKXvmjz78X6b2IRf93/ntf/j858ahmJ1stVZXlqOwp4SNovTv/+OpV7/zR15yXU1w0ykFire6Bx88+Ux7IrWqAEKZQyYAdCkAKDPgjVGace6spMSGcUiQ8jAwVRFGNGn4VjOcnpm2VoRpe2Y6PH38dIMlrXbqw3CjP+p02xdOnTxyzc7ebNBfLq2naEnUmaeNZmsqHCwtZyUptY5cvrV4NmQm7bQ3FxfLfIvYRrd5UIim8TUBp+q+lqV0alxAEM0ZG1bSsRCNvGLk5WK0Obx0pTN76O4333P58r+dPr0ECEpbIVjIzJ//3o9cWVtjcGB2RxqMEBuNt9589Bv3fn08rJiyHVa+7yfesLS+PtoYlf1NJDrmoKUfjMdhHFGgm1sDb/DQzqOVch4HdV1b4zodyolbXD5V1XmjsW+4OQhZ3CHhZG/3Vv8k9TRtiabWeR+XN+rpRphnNh/IICClrxbNJWh0pmd3TrYnV5dPl+Ochai9V7LUsq6lSpNUCOIs1oWitAEoQiIZsqwoWOizMoscLUaKJ6nVfXlpMSzMgV1Hv3Lq+NtvuQ2rMkh6QRRvLZRb/Y35buzM2HgrAj3ceEplVyhAFLbixs5aKRqSOOol0c5sc3Fc9p1TRQW2qnfs2wVwAQBYyJmn43HGQh4KYazT2jgw2xRQQIUDSykbj3NtrAeLTDiHSm0jez5OIkKF9147AMCiKLPBEAhmVcUFm+z1OGKapFWllFaV0uh9LESjmUpVNtrN/ua4qpTSEggA+HYz9tplo5wRMhyNylopo403IQ9EQA3hXnugTEr0RuNw+aYD1Y69Q+xU2kWqjDYW/aNP6afO2Ief6a+sI0HvnKYAgAQ8eLedsITW28srC1Yb8IwQBETnPTjnPVBGnXXbIKjjcGx1o6DktrXuRKOKZoUXXBmF5QgxoMTbUhISWA2lBh4EHp3Sqq5L55FSShmTVhdlbjVQAh4deqRAtjkm71/oqwcP3r0QP40IHrcHDtu9IPhCjf12cdn/rRRdbScjhHrwzsNV5BKuutkoJVEUaa28dZQgodvwEDjvCSHovdLGjLNtFGm7G22bRUJytfSEUOKcfYF18jxg3vtClg4cR+K99Y47J9K4I4QfrB/75R+7+TvfsL+JW1Z/dZytlFlVlcEoY6OR6w9tpWA01NkQqhKyAgc5DBUf1Mbab418WyrCq1FOBP0L0eDbtaGEIBDcZnO5oJQhpVd/p4wgEkD0Hi0ipbQymmOogTjnIsYZZVHI2m1hHJS1UbpAaZLk6pLwyEMrQJvz+677gffu+eonv3Ds9OKV83jn3hnry5BPUTsgHJHCuKgGmZ6YSFINg83xrn1tzuKtc/Xmat2K+rMTk9M7D+3Yt0cV62HEz12+sudgExEWzq5AaR758kNtHtoI1zaLWTbxwD998cqZwdS+dm6SL3zx+PSLZ9IpYB2HY5uGvBW2na6dqybm5sbZ5nUvesPpk58TFKW1vjLWmDGRajCk6BeywaAvnTNhDZsjxUJVGRhYCGeniG2+/M3v/ctf/7vZ4XM3vmYq4jBW5spzz7zt1S/9yOInDh/tXXxipTl35+c//807Dh6Ymhkdf/74zMzOd3/vKx577tJyVb3rbW8ZL9bP3nvv5C565LrmSp63p9jO23oXL66ONrMjM82tpZVurxWnsDK4dHH9K9/3Hd+7+uEvgKx788lXnh1Fim5Kc8eRGy+dCv7xPv3xr2z80Dtm3vHDN8zvSwf1uQoVERrcEgRbu14fvPe1B7//zNyTnz79b3+7sDlcsM2ZTnuPztACCEqDMJBZcW79GI8Dwlld6jAKtLNBFNRSbvfzJCkVgU8ZrcssEKSUxnjiHQAhcRBnZV7mMgiEd14rW2XaG+y0mr1O9OBnP/PQZz6945r5r933KFjT6iZlOR6PCoLYm+0NBqUCNioXPvjhP9VojeBfOXVKaZdnlfEOHApCyrEJQypAVcbRBosjbp07t3qRcs05Q8D1rUEUho1u0zlJPQCBIIk9RM56pLY/yK32cchZTJY2h7/63k/f86rm0TvCCC6QqZLN3Wq99xYpo1SAgPxf//T3Gq09c4dv/NLTp7/7hw9B/KhZWvLiWjF7vVbGjbdoHE3suwnI7Mc/dPGD/zYc5thqzkUtNt6AgPFaOsIgSoNtvNOUtQmvmg6IZ0axy5f6YcAazeYrXnFPg8GX/uPvZbnAN/s02Tnb6iSNJAirICqtN9M7Z5bWB6VD52nMwmefOv7j73rn8TNrMs+I8YQ4ZeogDJN4Js82k7iljDTaWu3COArjdGtrjbtAqjIMmkEktNFoKQItx8MoBkRE7ytTGydDFuR5zhhSSgnQRtLKjU6i7rhet0YaqaSsObFFMU7jhjPEGJ8k7UFujUclpZZ6cmJGE5tlNUPbSFoEWaM52V9flVoHnBLwYQKHZ67Xzjdiiko7qymhtVLGqECEhBipNSEBoR0uqmI8DAMkvqQCs9E4LzIBAm3Qijr7ds2e31i1zrQ60WhcUyDNZlwV2Lrutap5aEHNtjyrtZiYmbWedRrh5qUneLZ1/xPjG7/nzk7YXelnSTOhnAL6SIi1kS60acTtvNZh1JgNbSVVqa1brTJVceKUdg58Wbj9PXjvT8xP9wqoVW9CWCud4w88W/yfj9RAA8YJMlJZTQGtdtIZr6HRCJS2AYcoDmIer2wMAGitnLG+36+owDBsjjT7wQ88Py633vPO1/7E99/kN59SpJxoMakdUotCAA9sWXkpPVDPlM/GGLYpG++7LbB05Rff/6KNjfGPvfdlYSge/+zxfddMffkzJ4mNxio4szZYj3TcmBuMFojafMPdb3jo2aWV8fnbj8yiW7p9v1aF9O0yoHz9uY2k03BQVsW5Xd/5BkJxXGw4rCqzwbybsWf+53u+Z9fu0/nWM/2trDm9j4oo5DcpP6FGtZNeNBOHpazSpfP51+5fnDt8203R8MqF88a4rFZlpT3hvJEobbZnQRinQZrOX9MVUQRcNLoxEnP9jTfsnN+nbDg707Za1UXNhEekFK0D0ugkayMbdsKgMbu5OvRSVLLeKseNaVHLkgrBuNGukl5V2eblRdoOmCcQhalztNaDqlw3tBWFDVuZy6sr3fndUWdGey9E1Jnb7cotWY6srcq+3FreOHF8aW7P/pmZlwI0qlohABKqpKu0Vowe/+YpEuB6f7g5yoUIWBDxNLz1xqNnz5wPWegdK3TVnemWdVUWY2sNACBl4MjCcr/Sdu/OKVVlWkvnFSXWeGsdIhKCxDvnPRAEpMxZoIjGOfSaUpqNslpwiiQMU1UPAACRoCdwtb9p+zaGysKvVJYgM85bpyklznqCnlBmvUcCznog6MnEIycV+Uz57W9q7Z7UjlgDJU3iIO7m2jlJ8wIef3r4jQf1woYQzFN0ysBoJJOQ0AawJFWuc/b0YtKcDDuJNk6IJnVe6oIK4inb2AKlqa4qXeftThpEDCjmRQHOMuIoY9e/5I5KYUInqmG14dYbU7tsnVtjgSIConVJGNPaR7wzc/BI6cdnLj3lUIeRqKR02hIgHhx63D7r894zQr3zhJJQcOpxc3kEgCwC6kApBd5ppRA944QgpTTZu/flGNCyypkWGkXSSrWVzg6zylNgAQcgaED0deuxlT2f+dlvvON1zf/5069rtJZsWDuogEQEwVvYNgxSZM557zSi8Sg8AHgLQNBT8ASRk6iJQbVr//h75uGOl+38+ANXACCKAsoJejc72waHQHFxQ4612ciL6VY66ue7WkFvIh1uDndO9qzRszsOLi7100g5iuPxmHE3021FOHfbdbsKZZ57/tKx84uXizADbhkXAEAcenTGeiDOe2p0moScY16qdTU2hG1e8adW8XzZqauyrsdxOuFtezSSe/cfZJyV9XhF2bUry8nM4cWFanXxrEiD5ZWvmqwYjzJCkVLOKHhg3hKglAHR1htrGWPeoDJeWzDGM491KXdNxAd3VrffkNx0I7vt9sgOR/mAPP7kudkj1/RZ9bkvPqpd+Ykv/tuFDTiUesJWfVxSFnue642tbI1jdE1ZAUUrokRJSxnhwcSP/shP/fT7fv5XfuPnj9569B0/9PO84VoqV9oSEQFGcdBReh2cRQQAB4DgPfptnc+xbTJIBIzidsyQ0ooQsn2eyij3ANZ5KpjzOBzkLOLf/53ftbi28c1nnzTSMIKUEM4T6z1jzDm7XabmwDMggeCMUqOVrXS/n6UTTQ1WO2tKSSjr7Gzn5fjonXf96h/8TsSN44QSsMaZ2lFBAAEJA8LAo0PnGQmZ8AjWWQ+OMhKnUV3XjDIpq1BEiMAoIYhK10kaM5EMRtl4mDGkKNB5zyhllG3XLBrvkigySgJiGHCjDSAmcVqUJSHgvd3exlFCt4vP0HujtTOWECyzohHa63fEeycbw0HeTcN2Gnhtug0xu2/WAVzY1f/Co5dAq12Tfve+qbJWlCVxnHpVICcEAkEY8bY3Ecm8WNoa/iepiAIHQhy6/uUzMxN7p+auvXjmUiMmrckJXaeyGjCUFoOFCws/+K7/3ZxKPvmvvxNtLr7tzS/+09//2FC6u978yq3B+nw3+vJnP7VrbzQx3chGozTgeVmBXf/gX/zq+/77Dz372L3OcyuNXN46vXbJ/MQr//Yrf/IT7/5/Baenr/TL3vokgwvPnTjzlfvv+a6Xv/W//PK7f+BD7/zhX8gv3/Hpj/5us9nQde1qL0CefeZ+CNMsH33gd2946UueL8yFwOJv/dyBweIjDx7rD8+OOQn7xYYFTSOCJP6O7/3DD/zyne+4Zwcdl8dPjT5zdrB4KYE0pwHNtI0IDUTgkVSycOApQZaGjDIlfRTFQZpWeWHyrNegh49Oz86EjYBPtJvaM2XUylrRTMKQ1UlMNPUTNGLUHz50ED0O80Q0JpgkPIoOTjWMlp3W5Hil5p7PdlvjYtRIk+HWoK7A+3R+z7UEp0S6R9XD4dbSxMxUd3J6sLHKRbirs9+Oh1lxMW6lraa7cPobo821IG5MdtpeGxHxN7/hdWsbHx9lLuLUUa/R/eHv/pqFtN2M7r715j/68y8M3dn//mv/qz/Kn3r01Ghx+d3/9cXLKxdYFM3MtJzCoM3jSXLpyiZGYKx3JrAOrAk2BmMSJRHxMQl4yEyRE06cIsQ2eTpBTJmPBhbM5ZXzImTMQUTJpPA7j8w8rNYIJaSR7kh7RZn1ptuEhUev3Tnon374uS+3w2jHrkllqrjhHaPZoBBCKIecxEE65VxAqUjiloXh2qVFYyri3ZX1rZ3Rznqd6IpcurwapdkMj75x/tTcDXt5Ip5/7uT83E6EqpFAqxtXxepg8yJG7UrrgKfNRoTeyqykdCS1DaKAsWBULoPDgPCkISqLYwt7dvQAvgYAQRgWWRU1Ysa5NZZQStETxDBgxrla19uXM8EZ4YQxobQF76NmWGlTZGVdE60cAnjnPILfRpOBNNrtOAg4ZVqaLM+VcdaYMAg4FXHAtTYBsoTQkgASdEBETK11Za1drcG7IBKqqgCJVpIwarWqnSbeBaEQgnunKS8mdmzuOWjbMySrycrSxH88mD/+aH72ktGltJZysA41eOcpcVdDdzxB9N7Xlapl7YEAOHs1GAdhO8AZEAkCemN0I41yjc9cJB/62PDdb41ufVHcEzZoVAQJI7GRmdIDJaPVpWR1s0bvGIKy4ME757231tpzF88T6wnZVjG899YBhW11f5uq2R4Yku0fr768ByAUnL0qn5CropC/qhghvOBF285eoow7a8B76932o9veNiSUEW6dddZa67W2iFcjjbYxJPDgwCJBQZh1zlgH2+Vp4J3z2igEpJR661/QYhCRInjBBTjHaBDFLeMJ9/ol1wU//t9+ajo+npLn6myzrGw+qte35GhM+n3Y2tKlIVlhdAXOkFFJNgq2NDKlstaRbW1rG2S6SkQhXrXYbb8mQUIAgSDZJpsQERknSIExyjihSBwCp9SCQ0aJh0DwMIzDIFLe1M4JgEgIioQaEgjOk5QSSp0TId9eC+Z7M8Y3vKEry/lLX//WzH7tqW+ctrm65aUzp04M8nVdVtox9N6A8zIyslKR4EwRWwEQ++qXXj8337u80J/aMXPswvE0gNiIcZ17h6LBZFQb1N/1jtd++l+/6ilOtKKnnz7VbUVxEI7Wy6Vxfs3hVtzk62s1r7GBcZQ1G64tNTckmpiYyPqXV1aejgUhBtuNNB9Zm7sD1+zFxQvFYGOseJ5RH3Ie48zR/f1L6zziwDHbHHk7Xv3m46997RufePqhlDcNXJluzw2Go8//+ydf9poX3/WSI1+FexeePE2XirODJ97yql6pso2VwZ6DrVf8lzd/+YsnXve66/USWTzXN7pjpe+mjTSgauD23bhr4cI5qunWckWbweJg3QZ089y5//jCx28//KJnTz3BbPOua6fHV1Z3tboJxe709Gi4Rqvun/1l8Y8ff/z7v3PqzW9Le9dyEsialgrGQGNNL85eO/WWa9/yhl9qjJZ6P/fu33nu5OUgaDIWWu+9Mx6RJQEggrNhIpw13jgaB2lKyyIrC0mRNVphnpdVqaigCEAJ8wSUd15VhCALQs6prEwYBWEotjbKypjZ6WhzMdu5Z3qQjYpRyWKOFOvKaKOcBlsakxsHtTFqLAskYB2pldZSOg+EUABw2mstW+2Jianp9c3lZrNFfRU0eZkV3JMwCipTtDtxVVPiOajaGBO2+PXXzo37di03RBaBoIS7Os+FpYlor14MvriwNXhY726e33k3n55UDrklkaUUQC4e/xSUKuXxk48e77YmvvxXT772JcbbiqVjy5oiTZRy1vtQzD7/Df9X/7BZjFq92cbWcFQ6EoowSeL1rQEXXCtV1nUkVGc6nem2nnkOAKAVB2EaydKost6sCg8bvJO87J7vO/v0I5cXr5w8dvJMo1MiveHoDYf3TItARGm6I26uLa80RXLHHXsfe+i5088/iXFMvOYBo0BUZgGwrrbAZaBDp40zRohQW4cOGo0ZTqnUssizJG4Q9MpY0JohWq2a7VRLpb0PGA1YaKgyRgJFafPt/U093uS+Qh3IrGCMKakogwolAI1EkyFvNQIgZZRySttlnbEwDGgzJGlebFhUOR3xkHIqwLmq0rKup6ZmGSptwICqipzwwHlLCaWUelDorFa6pAXD6MDOV5w7d6/XW2GSMBJEvMPAjcsxQ37D4euePLE+zuVopC2hlXFymPGoc/C2V61VkoTthsWpxp2rC0/3l1dJjTrrl0qe7dtjzy3umhQB8VrLWsowDMfZRidlLccZj4hnhSaCEM5NNxKCDza3CsHcYl87Sggnl0f6/JXRnl02nSHOSsJoU9i7EO88DPef8DQgaMF76ikFB0q72hiba2CkGELEWBn5OA0IMq09pUiQWGOLbGgVBiKkUfy3n/z6/Q88FmwtvuMdr33z3bvnDgXlaFWPJRDNqCcJsbWm4K0svacQMGkM8Z4KNjHfQY51rW//rld633g5j3bv222z/Mxz5+f27Ajj9tqFwZnHHnjjj+5/xyn7+x/83G/80h2UvrJeOBbO7757YbLdg0pLF2AA2haVJIgMXCGnj1xrKa20+F9/8tqVxx8uLhegpIgb1drmice/cuRlb2E8EUFo9Hq+tk7bQXf6mg/96Vf/8dObpRuAHk8kycFrjj5y7JFGN82KunT+uZML22tBf2td6XLfvll0rhGKQKTDbJMTNjnR9WFKOdrCUQIijK2WsqjybFCpMk6gP9bjInO+SloT2kdCdBvdqcgPNldW4qAeb46LonTMVdk4FF3GiKyVUlj7NO1MdzrNYrC4urGwur7s/Gh34CZ7nUpq6ykPw1SFF09frGoYb46Vrbc2l0ab672kEzWSPC+EYHHIT16+OKhttpWLNLAh7t05v3h5o9lq7pye2NhYak00S+u3BkWZ1bX0SmujrTLKWEcJsR6NhoWVgVR6/84prYokCbS2W0NlkV9lnz0gAb/dpYrOEyTOIfHGODRES8NDEQVhlDaLMtu+3dlGo71zhKBgYI0lnhDqQ84qbQmiB2+tcd4CEM4Z8VBLT8DLUnztoWxjDe+5I9l7iLW6JRJZj7YI5YT3ls9X999XXTiLxoeyymujjh/3+3fEBAjYASVJlDaqNbJ4KpvZw0gkwRgaoie6Lszapr2yHHLhSIRGgtV2WAxpFOWDDL3dMdkKguCbjz3x4ld2dx/aP1jeWL+4Pn70yUtnTnrivXPOA2Esy8s9B3c6OnfnG+7Oy6XNK8c82G2pEQkCEuK3ezacs9sxAg4IoZQW47qZhmEc1spYrRGwKurtD4EypIyAB0Nh16E9YTtaXF9YObNYDtwrX/PyJMIvfOpvAB1y5qyx2nmT08CtXBlHIfvaQ2snj/3Nf/3um1/6hkM0HDqheGgoc0gccdvfnvWIxFnqK0ALrrZIgHj0mbcKSICEOlsGqTt0eBLgCgDs3zMj0kZ/a8MqGyJQhq0k2hznTDRiHhyab080OcXx7S/biZWva3J038HHvv7QhcsrL3rZDYS68XJulWlPT/o6d7m859vuvLu0Dz5x7t5nTq8WwirkgBY8Qe/AW+cUQn+czTbpnfsn3vLW2wPjvAgff37h68fOB9HEvp27Du+aX95YmzsUjIZ1XpcIXJVQKbOyelEwGnpXbY5jY2uCYRhaD0p7AAhCHgZRWWXGSmPd9t211pW3mhMSdhs1uJv2zt/elj/zg7cpdipqqLK/EmifdsM3vr73zYt+5bj2bmWil1LnQjm46c6OT6Q2Fr3aXF2tF8jjj8XZcCJg681OU4Th0sKa9MRJef8D30i6rWNnHnHu0s+8792zzZ2/9xvvM55TJAgobY7bfjwA8J4xDoA8EFwwcEZrzUOChIL23loHjhBCKTrwzjmwQAhhnBGKstZBIIxjHZJ89qnPAffEEkJQSYXCWw/Ou+19hUe00lAREsY0pdH0xOvveeupZ8+i5z/6oz/yCz/7gxCF2TCnJdOe3P/ww02i9u7YV9TNpYXTNKAKpX9huM7ZvBg4Z1gQCSry0cg4a60PhaBIrTPEQyBCJNtYGzoAbRxlXAhSlgV6su0pAwAWCPSgpNTaUM4BCCJSQiihGqx1Ni9L9CgY51xkeead9c5sH64TSh1BFlJG0RsXMH/XzYcmQotkKtscNEI3NTdNPNPAVVVet7cz2WsKMKY2gnkaOtoI4kanGJGI0zCIZDEgts5EtLFRPXry8n+SihCprDXh2Eh9f+X5Pde+UpY7++sXsXaCsbwYDNVwxzX7ikr+88d/9bt/6G3zD09vLV164v6v7d65a9S/mK3ko0GdqD033PLai1eOETsOBcnH49k9e2B1q1p4/AO//tzOyXlP6LNn+3/yN3/3/g//1Zc++TW/9cREEFaBwygY9J+vZGuiO/3xD917w237+g89voOYf/vAe2fDvN3sgdda1UJwIMpKQ5xwOUmMl/3nt1Yu9cKZCOX/evfuX/+zrW+eKhgTaSxGo9J7TzidaM79xp8891t/7BHpWkUB1roTbUPBaidECBaUqR0QLhCd0w4I4cr6VqsFyplR0Q3NzKy48cadCNidTKda7cH6mESs25lQslxcX2i0p4Ybw06z1+6Fg2GfGJ2ErfXVbH73vigm3Ua0vHw5ilnWHzc7YRTRc5dXHbVAndDhxORhJFHc3lkWJncBomh022U2yrMRpUmuSmCZHl00cCUgyelnT5am35jdNdocV6NR3GhVmwszvej6w7seevoKAokTYRU9caqAABGHj3/zb9O0NdHr/sdHPnby5FpRWQaMWlGMwWm2e+fE2uV1RrQurEDWbE4nUSOvVRSIlqtNnU91J7PRQIRBP9+UyjjvdF4jD/x4WVeKWR0yN1jeiOOWB2ZqORpKxcze2dbyurlwZbRnijZjB8raqnr2oWcIaEro2nCRByoWpEDAQBFZOVCbw7Ude65XVc45gDXjwYr2BRc5Z67O6MzcUcND68zJM2eW1vq3zM9EE9Haxmjf/NRwfbi0trF7fsfW1qnlE980pOyv9r13AYmyYT6zc0c6sXf9ynPGls5ydAwcQ4O6tof3Xfv1Rz7dCxoAYm6uU2bl9iyoaxmEgTFWSWuN4YIxJIxSxlGVtdU2jmLKKGXUK4UIRlvnPGUsYAQTX0utrY4CoSpDCGk1IkaZc946zwgzSslKld4xzrfLHgWj4F2nHVvr8rICSnjAKmWMNiJgUUDySnvvq1oDgLcGiffeOASnHacUrCUWBVW9Cb13Jtm3v/Pk82v/+MWtJ55d3diwpvbee/QGwAMQ5x0S4gHBba/bHrYNwM45s90JBtu6CG5rNx6ccwTAG79dEAbMV6V7/JwbfGz007zzspgFXYlYO1Y6L6tcXjrH/+7jxbhoQIhWKgKUErRWb5NAThvCGPHgwYEHRLRWE0QCxHlHtuOBrjrL/Lca0yhlRivyQifZdjkCkKupRuSqpHKVS9p2XfmrYI5HfCH7CJBRprR03nkDzjsAT5B4sN5773EbYSVIAIBSRtBbW19tVyPbVWieUq69BwZgHYADpB6sd44YpIQHJMIx29Ei73nXTa98vRH8OTVaHslxXtBSYj1mG1tmNIbBQNaVl9q5Co1Jlvr69Joe18p5RICAo7F2m8H9FigFL4QYACAjxHnPOEMChGwnbQNnRAhGCDgAzjmiBySCUyooIAXnBKVRwDkjSZAYRMG44KQZhmkYREHIQ9JqNGulPLvKU9x00+6tNWoDkjuIuWqFvW64sXR2TOmIpvPULyGvETENA6edqk2aRkRp5wmlzbGpL2rYKqpoqrGglnvXT9JQxQ1+6+tnRoWCwMYbLFvr33v8seaOTjv1V05tHTg0s74ySlJa1mZqOlJ5hZJHNNR53mn3Ounu/uKii5IkTPqrp1vNiHkkvGkKqVaLhKYtNXnqH1aOP3BOcDJ3Tbrn1j0Xr5yvAjM55fdMp3t2zJ+Xw/W1Qg/MP/79+3/yVz60e9j510/88aE5+sg3vvT9P/idt8zLZ+977CMPfpKMk8Eld/vBI7fcdcudr5x4+pHHtupscpdvNofXHWGLSw8Nl/zd7/mx/Yfu+uiHf62xgw1GqxdPZm7dDmWbjuVkMhVQUS6fnzgihInPLWw8M9qYnpjaWKz7g/zm/e2U5Xt2BWNUp0+vN9tiFkmdBx/62Obff3L10D77xtdM3nXPrmS6EE2uMHe4BkA8zKbz8c/8wtFffe/5ugzWsjGPQKt6u/ZeVopwuu09bEaJAVdV2nLx1m/7idjm//xvfxtHIRUADATyq1hKWUHAkVBvvCXeO7Dg60pZbcCa82cXwJKdZBIS0g0aJ5+9lMQ0CHgYdfJCW+IbTZ4XBRALBIDaditYXsqAABcEAXVlmSBBEDEKm5urvU5cV6NK1ttUSLfTNNaV/VEzmspKORiOOSO9Tiuv8kkeL28uCtHYu3vy8qVzjU5iuBEi5o41CJ9p421vuZ6ZsnmNcPWGtpus0fZGyuLKR9//R8urXToVtdKYs9Zffezy6Wfr73yzmD6aZ3jRyy4Vqd2MfvO/fenzT4Zh1BNNO676TBAQKBXUeRWniQXnQDPrIFQW/GB0FehTdT0aDYyG+blp4xwSCpTltWpMzBwlMAGaRsHFFbl04eTKlUvTu44evnYvMbVwEOqa59Jqf/T6G69UuZHZaG2detlMI0DnvOc8KGRuAREJeO+tuYo2Ms05IwSsd9oZL1grbWWDzXE2ohwYY9Z4QFu7kXMSCQoeeuWazWY+yuu6QrTGGu1MTCORNIuyjtOmrI3WcqwzQhkSH4TcgfcWKSDldFQPAAzxBDwSpOBdLYvRqC9CNj2949jzT0RR5LUhBBkjHIWUlZQFeMso95wIHqPmo2qNRdxBZl3qgLU6qXGj2MVaaUrxJ9/9+vu/8o2nTo1HtefEG2t1Ofziv/7xy77rB6Trr6+udSNPvKjLqrY11arRicKm6EvXrlxeVZJSMF4bKCuH1HLOAoomoq3JiSKrSqnr2odcTE8CoZWxdr30jpCqZk+c8LccjMFIyjlQzDZHpIQXX5t+9YRkjngnAyQJh8T5mw9M7N8fDkdX2tOtxx4enFvRW0PV6sQejax12ojStLuxvmqcY4IaaiabRI6qxY28GPqlz5w+bw598gf/6B2vmnvnO7+j2+SNRpX3h5QxUMW4gICSIIpllaWTE/Wwz9OUcDFYuDQ51xvnSc4jfu0Oly9Mcq+GTxMazN4Wzu0Py/4fV8H4N/7iZbVetOO1oJcaqtpHmiiNIDOEOyKXbV41WrPZYCFJZTEYRo15VqvanukdavEozlfXPDXxTO9wfAvvcmU2jKtZaBu9rtykf/AL//HpR70yIm3QzbWi7+UzZ55pN5qA4KPQO7eyeXUaXDy/dGTfbrtVIHc6SY3ynAptjHWGgXQGGPOIVFsnla2KohiNkFJdjPR4DFJNdBKpKhIks3PtPFtzDGZmJqt81RILxEiZGztRFgW6QmajMsuCsIUiGOeVrm1ZqzhJfF1snT++KzlCq1JK48OQUDM1lRo51ty3UqFkIU1tnEGlXVXbqjj13KlLiyult7ryWPDOVLfeKrhSiDDR6YqQKqNJ7UzttR7n61s6K503gtPtYlPvgFDqtN/qZ400mOs2VF4LRoMAtPN2u8YEEdBb74x1QJAAWA9gHCACAQJotClN3YgTWUtnlEeghBBE5zyAM84x6jkjxnttnfcenScECeFIrpZUIMEgJEZrwZnRwYXL9i8W1zpt85bXzMzvZnO9OVWVSgWpmC3lculzTz14NFY8c6p+8yumajn0pKDcidCzkG+tG0qr2UOJp4O6zmqp66zxhU+Ozp7uSk+ExYAKWSlt9f9H1X9GW5qd1b34s/Ibdz75VK7qUF2dpVYrdAtFJCGQLEzOwUQLG4Nt/tdEY2wjY+41yRgDRggQGYRQaKkltTpKre7qWN2V08nn7LzfuNJzP5yS79+f9hh7jL3PO/YZa6y15jPnb4J3YUAJEoPOZKW9cu5yq2uNm+WVUHbz8gvFZINS9H7/cu8ZY73FZRYc/PI//m05vOpGg1Yk65zvF8bt3y3BewDCGKWUGmsZZc4iUF4Z4gGAMECirfEABIExhoi2dowJC8W5c49GqbreH7XU3Jve8qZWW65du+B0xb3Xubbeo0NLkUNdlJqARMvKmvzn333yxCef/+5vuve2Nx2QfopBiaD3J3+4zxsATakHcB49eEZZAM4iJc4RSpmUkdETR6r9VVBmRafXDLvNuU6yfnHbaJ8yHzTV0VsO7q5NI2fRlIySelxWRbm5vaOkWF1cbjQXNrYmSRI5w65dv776ttc3VruzyrQjmO4M3//uUw/c0zjzYv/Rp69s56JvvQHmKeFsP8PApg6evLh1515x19EDQRA2m+OxtsKUdXmtMLYxl1jtqOSKEKQAFALJOnHsHFZVpXemSRpaRAnUWu+JF1IiYbXOCfWUEM5VGKTO5daUdV2GSniHtTFvXG3f174gk7MqdcY50Q65r/PBJA1FD6dI65mkuTO718ubl+x3/4vX1O1zAJbkWXZ5VGze/ju/tV5EMx7RstK2tK0otMKPJntlfT2h9KG/+Vzv6+//xBOfUI1mg0daIxeiKnOiDTrHKHUeOaPggQtOifCehzQMU57rmSlLIEAZNdYywgihnBGkQCnx6HVtpOLeewOAzP/Pj39ECsaRM07Ro/feeu+cowBICSFAAQIhGEA3VixkJ286/vBH//J19901Ge3++E9+33wQzPJiIYxDzQ2DOw+tvubw8nd8x4/+8d9/5g//7OVpab0nXDAgnjFBGTV1LYWMVFDbuq61EEKFPJRyOh1Z7xiwUAbWeUop48RaJ6kw2pl6aq1FT6116DxjnFPqnXPOUkZVoPaHwQRIVZUIKDijFJx33nmtS8qI4EIbiwiEUYtIGOGA3Oow5EsNtbW22VhpAPHFcBa3G9UUGPdRUyzMLWtTeGHA1DpRg60hGBPTsvRDRqmzdWaN1WVTyatb/a2Cnd2u/w+pCD1zlaFIdV3Z4uq0e1mQhjPegzW+0k4LZvc21nqtufWzp0dX737wTQ/+499szgaTt7/3O/7iD393e72sZ3Ztd/Dmt7w74nd/6Zm/651YZSw1hkTEltNya5qFKrz3gddcH68lk8c77Omf++BPP/3Q4r/9D/+judDg0maXzlRSNBUdWfPZ0xcat5yblhd4sSbnWwxVPiwYMcA5R4beurJqBPLLnzp9+wrK5EhedbTdTpvmh79rYeO3z13fC6EunK4NIkESRwHFxCtZVj5UngnMdeYtRmHQbnZsWUymU+1wdWFJ+4n1pNlYmE62UVe21Ini99220unUqyuBMcACNctr75hkUgbE05LJrMop82Saj3em2cGVI7aaEMeaslUanyRsZ/typxd6pjuLQZ0V/f4oUuFgmFcBi6IDnZU3c+pmDroLzDnnaB5wsp33G51gNttJJXPVhSK7rMngWlaA43WGzTSNItFohEr0Sq2DlLzzvXc9f+7irAyraR1wOZk4C1NKDON0WmbJ1OZxhp6U5fRbv/7uxVVeWjRAm82OXwyq8UBPRgokJWDB8EAqVzdbyU6/Pxrv+brcq3KkGtApyUkACDXHaRiq2bAG2aCqy0QaUJeV+eJiU5R1ymgV08GMW+eBBNPcUBCmtgyx9rN0Pp1WYCwAr2wxkcSgy3tz7TgMrB5Eqvaghns7yDiPRMCEVWkvOmRZXZHJaHdAPD985KanXj1z88qpDpaSBZtbg8X22Vef28F8BtKJMCjrijA8eHB+Vk36/TNAdDNuN8JmXdQBawFNGkFjMpsJYSn3fja1DvGrdus0iXb3RkqINIqzahooyqmYZUVlEYAIxgEwy7I0aQjKrHOMUQACFpyx4NBZDMMwCaVhHJEqKqdZ7sCFgbJGI2LSiCx4dBhILhirS+0JFgVoY4qiBgoIqOtSUE5qUmqrS+2901ZLwcI4FEIhAcWlsTXl4JlhEuZC0gi4892//czOp7+w9/JZbaaMek/BObCOIAB4QEoZesKAAAUCxBOHHigBZJRRAoDOImUMCHjryFdb4RH9vpGnESeVqbI8A8cursN/+oOro2HjW95/uNWmWipn6flzsz/6aP/FCwKkYgoBa0lBV0jtPj/IUwoevLeeCX6DWn0jIbb/hHDDpHwjfHXjxaMDSnAfUrSPffR4w4VEcF93R3D7RigmJGUUkHkD/x/nBxAIciY87lOAPBCP3qPfBw7t24qIR0TvCYAh+kZzG+73LhDKqEck3sMN+YwwxvbjcUwpBlxGkTV8dWnhQz/zzhPHN3T+ZDYuocw917o2W1vZ3q4ZZjCbgNNAECYZ7A7Jpa1yc4K1o+D3g8/EO4cIjHN0QBkFgP0ys/2+OkpvHLYIJYwQIMgEI4RwAkJwwgihhAvGOUVCCYAQ1FPGiGSUOsIcAhLCGUOChS7jgDEmOHemqkZO1xYJvSEVPfels0dvvr0xN5+PJ1uDvZvvPJx29QuPv/D0w9fnb8J8lFPiaqtLS9oNZhnpF2VkSLY3CSO47WtuPXDvSnclOLDSG+9mLBU8pXWZywD0NM7y4sBNh4lbeeR/PXPq7mNvfe/Nz/zNV770ZH+i9clDK9deuIIpFluOv3Ktd0undEVOJsCv102DPHBkTGCEWoVslQeNSTmsrD969M4nf/+5sw+tjbebNaXrF93k+vrBO9POIttdH+hAbRWbq4cO33LHktudfO7Dnz/9+Q+z1pFbbzk6PPfQqbnwxU99zE11YKSEY/e/5a3TLX38njteuX7tySeuVVXEoxRnavO5Ub62Oyqu6h29vGzOjc0j//BYq0PSuXD51EJ/q68L3YrTN7/h3fn1K+Rwnel165wBc6U/4VBTEgubN2Tc7/eLyTOthvihfzaX7ZXDPbq14S1tbK27ixdmv/XK6C8/2j95nLznG4+ceo9opDSHPYQQ4Np9r190O09H7cPtVlya0guhQuk8KMIoo1Yb731Z11EoLUHmyYvPfqa/ebXdjqvSIDrqGReojWUUwlh6TxgFV1ToklC1KNNcsAMHl3Xpjxw7dvLmu37jd//HylI61XVnsdFrJHubEwTodqKd/nY37t158ugrF8+madzv9zfW94xxnFOrPSJGabi0uqCULPPKTnNjWVFnaRRrYwHdeDLL3PSBe9945pWLgaDWOC5ZZQzh5OLaVn9vevPNK6Ora52An7x5dbw5WT14y4svn7np9vkf/oUfc+2EwXscPJdNN4I2dVBTm3m9t7btKojOnR7cdNvKpJievp4vLsz/xm/1/9MbguYCNaMZIwv/7Zef/MKzsaEtIsVgbxKqcHElAc431vaqyloZACNRtPDbv/prH/rN/+vq7vpe/0Yb5uLRo3lWa105sAwZIYJyWRSVMVSCWm23RsPhaxYk5V4I8tjzT3zuzHNf8+bXLS21J3v943fddPrqZVCMWREFcTkUnDNd5s45pRQgoZQHSulaezSEIgAWupJUEMKkYM5jGKeV89ZWYRgi49742joqGKPMaEsIV7HSVY3Ga107qESkCPHoTWexm48nnEipGNEeyxqIM8QlcQeAZjNNgFKlGs32YHsD0SVxiwIvjBacxHGU10V3btVPJ1fPf/nwcm+YZ5UF51EgEM4J4xzBIXhBKp1Johqt1my61l2YG+4VADof7UXzi+gd1rYRB/luf36x+9qbVy5eKWrKZMDLHPLCWb053rxw8OCth4+3eJGxjG2cLTn1xDg/McAwiFs0EqEIJZBGzM6evdTtzqsg8uhrrYl2AoNmHHiPXCriPSM8mJp2YKczXYDzlH/qK3lg8x/+piQJKykDrGE+5qxGBsR7xyhJQ9Xphi4rIuE+8PbDhAVxG7/1gYW//sToDz69U+YGCNHaI1RZsemtt95bZ4wvCYN8mhMClPJUsD/92D8wufDfP7732x//n5E3tx/tHVmJvvkb39ULq6QZL91xL9R5EMwYUBXocjpUi3PthV4y1/BY2zl99amHVg++IWCvr4N+545jZf8suDmWxslkWmnBAyBCGZsT5oguwDliUYqWrc6b8RURH04bLbQSi12ws7A5511ZDndo7SdXL8dxwg6cgmGha9081MbZ3mA06B289amHL3/+fBiunBrg+cnkimBimtUxUkTQWc1j5ZzdG924JB8+esQ7bZwJQlnNcocI1CLlWTlr+pZQgiApixw4LWrj65xLT4invp7u9S2PpKSRJMa4XqdJApnGjenueuYKIumc6ACPJlNrwBTTEcGSMjLem5mCNA+GVe65Z1hp5/M4Znx2lcwMr2lpmUebxMmkdv3BoAYQgq1fu5wmLZEkAKaG6U491WgJZ5bbKJDFpJrpaRiKYzcfyHTOasYpT2SwOe7v7u2VRUY4eI3GeUopOre/1QKnzpO1rZEA0m4ljELlbF2h9qARkVBKGXjLGHoE5z2jYBEoIYjOA/XaeOa0Y3GS5PnYGY8IhO2fNYBR8Oi1M1IIhwQpICEOgSuB1u5boh0l1jiKAFyoIDDW5ywuvPjI5ybHVss7jya7m6TUdQ2jtb2yRqhrC54i0JfP249/avqONzdVNEbhg4SkRlQMkOFgc0sIIxTd7dPTp+WXng01RBZyW1tBXKhwNMtFJBnlzkNl4K47jr2ylq9dvaSnFRVCKJZXQ8oMOHDGAyIY40AjIysri2eeOpM0WVmLybASUgjKy4w4h9bY/fb6/VEgo0xyKYVKO4sMyMbGdtoMvS0ER+ccek8pRfAIREpRaHvl4ktpI6osXb5n5a77Vs9v7PSLEXBiK7vvHLfWUgplVSnBnHdFZSvGedx+ZVv8h9959sDHn/vA15647/XHGl3vmWaiQGKJYgQJ6IpAzrmw1qB1nlBGA8AKzYywSIZf7T8DuOX44eFUrywd6nQDxWS7tzTe2p2Mso7oHLnj8JWLF48cWZ1mFZpainR5keqqPnBk6fJnnp55Uwbp6+46seaGjQAnVh973VsmG1u+nMWpFcSs9OZP3jR3bat64umLlwZmXFkNjDDqEBGoo9Hv/+2TrbqMOFYWAhLU1mzZ6bUre2EUzfVaq8vzc71WnMSFrqeVrW05neRVVRMpitqQIIwFHw6G1npKZaJSS/NKO0I4pxLAU0op0FCFSvHhtLrvvgfra5+96+1LGBkHAtDxNMKZg5JhNjvYxkS4l0s6quNxMX3PYU8Wr1mzaUZWTqBt24+/Snk0F/XCnc2pCFSrs/DAfXf++V/8edps0XZ68u3vuO/e1U/+4Z9zT6Zb/TARzAMQD14jaKTgHRBKCeXOIAhaGX/r8dve8eA7OceHPv8Pz5/+ShhxdMgIp4w57ygSzoWgvKwLzrkznlAilUDv41ZTa0RrgCDjlBLiEQnsF2GAd+gcEkqsM9vDnXe95/73vvdrbz586uylNV1uPfCam69c2oiM/bpv+L5HP/0p73aS8fjOZvdvfu9fvfDKbivUFcZCRZUuXO4AEMAzJoCxWZmhM1EYaW0QoTaGAkguAYl3Fj044wgThBAhJBJA6ylhRDEAUIwa7ayGfRa+DJWUvCpy2AekMoreU0Dv9vmshAqGHiutAYkQ3DkLhDUVnWsGvU5sremEUjHcvj46tNg5MT8nONbDUeEIq0lpXIVAeTybVHldAcTNVhpG4WQ0FGlAnK0Lw5OGkeHTL7x6zauRjP4PqYhzQZggwgFxDGF359p7vv/H/up/XHd62u0mrdXWcPNKrFR3sTcbb3/k9//6G775m+LmyiTLLr98kRvpwcW9yFnzhS9/8dHTj/32b3X+/o8/ymqRFYim8F6jVMOKv7TT/YH/6yd2svPnrg62C7t48PWHb3rI4GjSH0hZxqmfljqdW85q+tE/e5gLuXR0fjDcNp6rMCDAgQbUI5dIUPbHew88+Jq9/gu8JIyppN3j4d6xW6Yf/LHkQ7+uJ5lGwaRH6wwSWyMqLrnQSL2rDWEuSYIqL2ejPesqFQUrneXBcI9SDGU47G+DHbTawa13HDx2YkHQqXew15+FqsnAzC0s+ph5NLs7e7qcNRqHDx68dePyS7VxIprf26sj1fNeHFg++vLmtXaayE5kkwKwqmqzt7M7HFRpuyOY9AWTScoIcdYyjxSRoTHEFvmIkrrMtneuX1DCiLAR9VIziVrNA5vXZkHnKPKWq8bdxiKVcTVcKyY7rRB//Pvu/dzn1589Py615YEigEIwgwjeJ604n+V1aWWRH5431y5vHzxxWDheTKeWxGGjk0D07Jmnl48eIUypRqMarBldU8UYOsYIc3ZpaSkrJpyQkvGq1pLLOIwZD6ynzTTkSNBUgeSrR1owyqYD8/oTtwQbYz0ezGaD6Vax1O4QJ1DyYWZ9FXKqNvtbC/M30SBkloI2UhKOk7os1qabTDWIShntEaSA8vrO9PgxzXBy7sq52WwSBITx5PLkwjcsvbbeO//SM7OA8aQlkVWLB9PnT1+vTK5i2Wgs+brAWpdouKC1m8YkJVxU1lg3CVU46u+GcafXW+nbkTd2Mpvtr4LRcMoIAY+zYhZEMg7DvdFEO0eBxFHsjfXogijSxhRVqY11HpMgRnQhl4hCRkGWV3VtlBDovPd2XxMJpEKAQPHBdKSCQNe1QQ+SUw5CMOttWZdACaEEPIRxTLx3Hmd5Thl11lS2lkGMSNAzBDQEqBKNJnQb9eICj0x+ca3/X//83NoOn/brgIHF0u9bXzxQwhAIhxsN8bhfDM8YQQoEPID3yPYBzowi4D7U2e+3xdyQb6izFikPhEI3owyRyfW+/5WPZB9//OrJw53dYd6fFmujfG8oGCaMQSvxp07B0rzsb8jTL88GI2oqlEJQ4pHBPld3v52MAHjw+64Zj4CABAkQAniDa+3tvgxEYZ+zjTcS71+VUXD/OQGQUcb3RS9CCdnXlhAQCEHOmEcHDgAIott3IeG+BoP73/DV2jfcP68gQQoECSH7xnEEtN55v28+9x6BWoJApBeCE6zyD7zz1Ad/4l3KPqbttuJeM2UF29oe7vX92roZjSCzdJaR7V23N6YbIxhXxBoApLAflNt/fs6AEiBESMYFpYQiImWEEvAWgRBGKedMCEYIMk6BEvRIESlnSEAqsW9XpQSEYN00HeY5Amk2YimoRkTKGWWMkYAF1vNh7iZV4Z0TQoBDSm/Ic3MrCxuXt8584okA8mtXrqmIZ7OxVM20ScsJ7g1qbSkQVqO+5Y7mnW89Wsrm809deNODdydzwsUuassin43MVkFLZ0SxmQUMghoMBMbp4VZN84wAsk547tL6Zx955tDC8e6yfMM7bp7x2dpwquYIa4qNva3b7lvY3tqLGkHIVg3KJNjdHZ33OKuzlkMKTDRI8tLHTp/5+AuJS2hKR7kP4mCwi/IsC00UuJJFrIZgKEBjdnhp6Zt+9G0vXnrqtgcP3vWmNz924SGj81C1X9wc/cDP/MrzFwuSto4dbxTlHo3jhVvekBmZqHrr3Llmb/7oa+9tLy0/9bGHm0fvLW3nZ//LL33qLz588cq5B4/csf2FC4ea8S2rBxe48MureX9tb4NmJleHRXuhGSqXob37dXfMx6Yc9md5HjW4bEpB7VyTp0sAZf6+99+/tTXcvjQdDKfXhuUv/tZa+pfZN33zode8pdebTyTAbJM2W629usJQaJM7JNZ4oIQyBs5Q8LWpJQ/AIEeK4Mbbm4IJKYXOjACutbdonAUQTFBmjZMB9yC/53t/vtmb/48/91NLq0yXNh9WnduTF1/+SncxMkbrrJCMg5VCkL3xZodGiQp0UVy8fJYTM9idaFvLiLkalQqkpGVt273ukQOL5145n9daOGImttlurs4vXttaV0FotU1kb219UtaASART3bQ1GeadhXZeZUGsNnY3v/W9X//IU58bjozj4c4oP36T+Tf/z3fOXIHAwHFGaZyWFE4b0IQr0hJ3vO1Nj3y67xhaJrPZpDnffGW9Ptg58JHfbh1eEZ99bOOx818hvsu7rWqwR6qq21EOyGiceyTOeEJIsxk7xjYmg5/+b/+B1bo71zm6TM9tDwAg7qRB3JztDkbDPuXEOg3e1CU2mkGj03V7RZ3nwng/rg4eir7l7t5L1wZrL3xx2FjstltJ0H3g/lMvv/DlClQYoLOGU+G9J4Rxxp23eTWVqmt8NStGneYcZdIiQcoiFWajoZQS0UrOi8FUSIEUVZgUxsyyWSdtEMLrqiBKMC60nxSl5UI5bykV02LWEi1GRFnPgKmiKKUQCB6AWnSMURkIq51BNhhPjUVGOTLmkVJGnffToiQidIU/uHDs2S99/k0PvqWalY4FlCsHaHVFARihzhmOop10wdnJeN2ZvOSB4l10ptU7aLwDgiqIJv3NVLrp3tXdrV1TOQO8GFXNkKS9cFiaa889+eC9D9TTnVde/sLu5dN3zovv+p63PfL3z55by8qqPnqkEwR0sjcmDsZFddOp26azUirZabQ3dzcXuulenueFdQZVIDyllWdxGAAlDsmFUR1E0tThw2frF//v7NYF0WzjsBD9Xbc20iIQ4LWz6A1ubk+NZ1ef7XcWrvzABxZYfj2g8nu/6YA25UcfnpZc8ljVVQWeMiDNJPLOGc8CxitGGKNVbsIQFWdXL43TOFaBKLV5deSe70/+6pmPgq3bSf26O19m1VTXo+/70X/aIURRsermoiSy6xmOxMGltwBPYdp56fTVe29/sNjoZ3tDDhiKmlaVEDOdj6NWWlMHvrKuFjwRKqp1yYLFcKFlkHLiLSSidbNl6MyEESWS1Ihy5c23nv6Tpz/ys3+8QQ8tHpU/8N1vv/bKq9jin/6NT3zucXv7ez/0nd/8rn/3I+8jxEnFKSEikLWxtdHCCSSgEgE1AICzpjTmytb27bcdZ5JPJkWQKKq4pIknQBgd9fdcXVjU05kTks21g/Gon5us0e6MK+y2lhTDwvIzz51WaavZmK9G6+25uBEnVjsCXErX6nSy6bozdW1n3gTbO/lenddFHpg8pvTS5sWVTjgZZUw1GWPCKVSStRJr5MDuUYbS6elwUI93imy8MxhsbO5ZQmmcemMJhXa7VZRaxVGj3eBRGkpZT8u6Mru72e6wPy2ztJuYoWHWIvMAgB4JAlpLEBil6HBnmIVRHEiVBkZnGUWghDgg3nvYNyg4R+lXLdiA6BCpJ4RSQG1qqQIhlbcVIlqHFFEwyijxCJRS54AR6ok3xhNGjXGM0kAoaw0lhAmGgLo2pHZCCEeZdfL+O+/++q85+fTnP7+9Mx7WZJRNKgcs4KTQwImS8aSgn3gW54/MHekZJqsgQsJF0Jae08IJZ4J80Pnox/aeeYm4umMp8c4SQkvnv+HNX3/bnff+9h//Wj6Zdbup4Gp9cxiJAMzMeaeNz6qqrPJa194jIRQoUsYIkvNnXtq6PvLTkVJNpCpp+tEYlUfOPacoKKGU1ZZYaxG9qQ31PI0aSXoAvDtx6/Josl1MnEOHnngkxCMFgh6dMc1EGe3avWS4UU3Xhpeefu7Zl14ZDLYRPCDWWlNKGaeE7reLeOIRgIShqIGWBZULN1+YVR/6i91jX9I3zUc33dq77aaot9BQSqGegK8BpzIBLzqEKgBG+D4AO/aoCBhGb4yQmbG9MCBFiaHvtlrz8835yJks2lgf56yQKW0tHOjc1Hzmi08+8KYHrp89e2XtbDLfkLFokWDczyez4tbb7zWUTnV0fQfbck5yFAF3zqLzrXm5dEzdferghUuDz33hhXN7dY6itOCcXexEjRBSwNecXL56daORJJfXJmtjbcOwcPX65u7lzb3S+xPL3UOrq3OLc4rTaC6xHgtt/CAf7068KQnlgoO3pKxLa+sgjMDrotIAqHXOCOWMVaVupj27u5vIWXToqGukpkCqAuOQ0ZC2aOdQuRjBfLcbPTt95bq79e3d935gp67O6xyqNWi41/3Ur1w/i2nZjUXaNW7D57Cnt//xs7tB0EDtHn/40bve+MAf/uanSVGmbTnJjdVAwGo9AwTKmLMeEAiCB6ekCJSglly69OLm5hlvyllRqpgTJN55ITgjgB68d95T45EAk0IWZeG9t8w324mzzlnjGK3qWgoJzlNKhOSVseQG3YEAoURQyvlLZzc+99i/7ywtLiwdvz4tp1vbFMlcZ+EvPvHnJzvs/e861GJXMX7uxN3h8qmFjW31mad3L2zX9bRWUiFBZ42UihFuTCUFryuNAMCAcqoLD84HQeCs9x6VkrCPdfeeICISKSR6dN55RILOOUcZFUJIKQh4Sul+vSIlxDm0FrWxUirKRKVrJTAKqLcmYW6xp+ZaUbshW6mKGzFhXO+OY06WVnutQDabdNrfOXD4IAm6165cn/WHImw25jpMEUoNY5FBJKaWYULBJ02Fnjx6+twzW/l67ie5A+P+D6koDMNKG1M7ilKl0ujJ3k4/6hzZu/bs2pULt772jkyqaT4Znz/XWjkyGci5Q3ca8ndlpV945cvf/MGf/b0//I9NsJzwmpjv/2ff/aYHXiNYsIf54btfs/XSiwxzpFZAtfniF//4Ny+/+a4V1zc/8gP//tbb7vu6n/yB/+ff/Ls5ycOQ1FWBGurJVBhlNLKEGE7sxOmqkEmDsohSoesJRcMZC1tsZ+dSM3EiBovFdDpzvKxMfmShFfBiaE0Yy2pWSkl1XRlLDPNFaYOUWmeNQ1PUjGBlKiGkdzjqbxntg0SNs4Fk8I4HjkQRzM03gjg3uhIMFU+KvBiORjHPQ9kgggbKM594J85fzjrJIVfjQnf1+rVXrJ4xJjN1/ZbVBMxQBvnxo+1XXzq/vZc7p4K4ObFULtxybPGYGY7s7HJ7bnFcl3VR1YMdINNJdh5xZmzJmOUcSJ1VlHIhXO3TVo8JxfxsefWWWWm0JlhTJULP7epS9CPf//qPPfTSJx+5rA1WGrlFqQKr/c7ukHiLlb377iPtnhJqGS2pRmM305DMU+bycu/wwXlmSpfnLnRUQD6ZqKAtomZdAGUda2KirQx82AooJ0REptboTBA1HXrBxM5epVSSV2NCLUb+ev+MZGTpeHO0RZulWFhQs5JkPu42O2EotNs9cvhAmQ0aPEdwpSnBNHCWew/WMaWS9Z1hGqnYx8gbtxxfCcnWaHTVTq9PhnvL3UN7Qxp2myur81v581HS1EIJRut6NlnbbrRaJMcaaZV5hi4IIxV4yllVV87UtraM+WZrLopYPs2drWdZnWknvQN/YxUIIYCQsqiIoL72s9l0WukkUOCgqg1awxjjwGZFpq23FoXgxlpbm7QZa2O8tc12czIZCeBpI+KMqijIinKUZVVlEA0BUsxqbzwlNG5FADCc1t55ySkCmtpEcSgoq+sKBbOmJgbQ2l63SxwIKpQUSGqpfK9HV5bhxHIQhQXFydboUhTk1DclJXlRoDOcUrfPUQTyVZEIb1SBkf1Tzg2MPyGA+FW2EiBlzO+nnfaVA0qcAyaYjGJiNGX7QS1HkJlcfOVl98yLu4RQKTiTCQDhxDMCb7h/4Z1vMw1usOT3n6LPnaefeWKYZ5QDRfB039bkkTKK1gMjSAh4xBuYnhsBNKAA/n+Tq/f72Ij3niAABUrpvlF5n3RN2Y0P7mfdAdwNLjUgJZRyiYSgcx5uwJP2qyj3rUvocb8h+3+rRbgf+PL7FWPgb/x8jhCgjO5H3blQTChHPOXZ9337Az/+XacAv6TLi2VdGOtnudneqtb33NqmHvfJeEav7PphzvoDqDSt3T5byTFGAOi+TkcJ3TfHBpHaB28LzgghHj2nFNELKfejfIwTtMgYFQGj+0VvhBBGmeAAhHEMFROEGus45YHksWSeYMxomiaAnjEmZeBq74ivrWMq5JybsnD+hg+rP86ng6ol/Nxir90Kzr3wKjrf7SQWg82tmaBCxYwHKmjQO1/fK/XW8h2rh+59PZWwvTOZjYct0gLPGBDOBAUnBEdHrEWNRXspqLV1VjXS+LGHXljuNk4cvm+6tdtkZHf92lvfdduf/uVnFcQqAwN07+x672CPGJ/t7aowCptht3NYKcVld/PKldXb7tSvjK4+9OKtq+1yjNMKq9qGCsoKXnluRpEcOygXOuLEnauvXNf9/thW+rb77irXps8/8Ugat+eidOV1R5+7Mvj6737f2sZ2s7UEjl66tP3qs5/oLOP5V4s3vPMb0yQZN8IgiaXolFnr9je+S2eYiPb8yYPvTX/Q8+LcKxfvfE330E1zzVGxd/qlVq9ztHvi2vVJpxs3OgHtZ8TvHOl4vnvBt4/e/4bv3d0ezkgpF4RpjSm1NLBB6fuzdTU3vWNVNjtzjaR15lz2yU8MP/Sr6/d9bufe+7bvf9O9a0+WBeuApES6uB1bL3yZ2doAdRSd84YomSbtbDwmHj3xzlkeiDwzwIj3EEhF0CN6ROodMklM7Q8fP/LwYx/5uq9557/75Z/6vT/9XdGbiyF79ImnVEsePDS3szlQmld5vlGXtpoi+rz2SRhmqKuyXFqZq3YM0QaQNoKoymrOYaEV07p+9ekXZahuOXzgzMXrkoppVm9A3ztaGdKMOp1mZ5bPGOdVZRT1Rpfj2ajVm8+zutSOY/3IV75cM351Z295dXU022jmzwBcp0wiNDhrIazk0wth+BWvByqe+9Rnr3z8b68o7M3F7OWXNvKqEoYbqoZZ+NSFnNQzSZ1qpQYo6uKee143naxNB9uzwiKj1lmuWBKGu5MqiGUziq9evrq4ENnSN6J4fxU89PFPrs4tLiwsrp44rgQfbm/lVc2kq8tyamYJ8magqDEQ9Eqrs3zrnlMH9zJ6baPkUTw8cybCOhts9A6dzLOpVNx5o8KQAqtMTZlqJC3ia0V8TTkYY40VMi1M5UASVHWtqbDNzlxpndOWEOq896boph1rPSU+DGKGHMBzLnVRNXqLeTFBwlPZE6gtmjBO49bcxt51JiUHSVhotMvrLG00ecBp2PGmUKKJ3Fc6p8gpDbJZzhXzlBJr6jI/duvN02zUm2tNJpWxhCIa4xwSYN6jpphIElkCBiFpxLo0XEQiCIDJbDLk3CXNllC6LrbGs8Gsv318ucln/vrQVZZYW6WhKvprf/Df/k3ATUsMfvTbv2WV135nukCYSVj31Im9/rTXTrqLi7NpMd20FNCh8MivrV3z3mUErKeVBs4E5ZZQa8tCUGy3I8lJrt32rGZUoBF7I1FmclYBEsa8p4EXWFMGhtBca+08ZQRRfeQTa9/8zpOnbsadjcFkuv5t/3Th4k790IuWEkKlqEvtmSgqG4eq021vrG0maZTnOkrCq5e3rNFSsVmZO1Dewaw2gNiIo6TbpIiPPp0x5FndeeSfP6KwDAT0Oo81JZ46lDDmvvO737m1MfrEo+t/+I/Pvv624Pf/268x3w5DJOSaanuuvM8ojSirZgRLKXk5HvEAUITAWshD4qmvt7nbQ9FhvKWLGrSRIbOMlE4+f1G+cDmav6v76a+88od//zOS+CiWUvJuY+Vtd6y40flf+c+/+P/78ffVXlJCBFfeVkknmVW2qormjTJMiIMmF7bT6eWZtbykMkFGpGpKzKrJrLYYUB5HnY2NNV57LHBcFqMsm2qXW8M4n1VZYe3MKCmja6+eDZOdY/PNYrLLUqyMDwO62AnH06tSWUPdytzBjSsjysz27mar1WaWW1/cdPQQ90NFGs1OMN0ZUdR5YVnqyplmHiVxsfRzTaWn/Znho1m1szlZOnFnapBClRf57u6oqO3C4tLRmw5JRECIQ7E1HV9Z29za2WIM8soBUiTgPUH0zvv9CZQQHBGsxfGk3IvGRw8ut6TI84LWBAlYbREBvecMQFAEApQ67ymApzfiaR68rjUgMC6AGfAAAB6IJ8wAUkoNAgF0ziGAYNQ6Z9E7xjwCAyTAkHjOGDi01jsCi0ttS3yj1V7frO5+w/t6B9c/8anHqqKiILWzzlNCiHWGELqxC//zz65801uWbrvJmXyg0rCVRoaqrBJf+vLuCy9ONrYTNFibosaSUsIod1z8w1ce/dyLXwYWy0SBDGdFBWUlYp6XYyVDzqmeZnWWm1pTRvfnet4DY7TKp0cW+dzSHWmXl8y+9oE7n3j8+ecee1I7A3TfMP3/HeFUEgke8CBcObRIvVnf7Kswmg7BmX1bN6JzyAkCMc53k7CZxHGUkJSOdrZefu5F67OiGAUCiOC0IoQxxqi1DhGcR28t49xoI6yLGl3KmOIti+zchey5p67xT/uElLedOP6W+x9sNXoXL36JB1vHjrK7X9eJWp5xD9ZQoJ4E4BG9YV+1Fe3ubH77d389t56n6bUrw/7ebK4ZOFoKQe69586dzfXp1ubOpY3dV8/Z277m5N2vp0FKia9KMs1nxrqNzS2q/NXnzn3rj/0yymOz/gtp6nM9no4rGieLd9w3zTYoTW9uLreT+IWL61+6uJdbAFf3AlzsiPmOuuvW5Objh4Ioen3WefnC7qXr042BGcwY8KCw8tra4PzFbSAQKp6GUaRCpcK41W4nKXifZZm1znpvHQoWOg+SiSiS3lkClFBqTU05O3ToZMOdf9PrOzypgAjwSJWivvLFkIb8wJ3N6d5UW33Hkeb5cztLB1gjrckAyol66Znwf352+MIl1edDqki9PY5V8u63fhvz+vEXPjM1QxEpGslmsroze/jIUicbTNvtpK5KIEiAaK0ZQQRgjAICEIaAtbYe0ThblY5xBx65VNQjDRVBoIQBQ+ussYYQqmTgCRGBRO+M0VVROuOqoo7SUBGF3gIlhND9ABp4QO8554wR5703uHlt+9u/7d2Pnn7xX/3kT/7Gr/7G2bMv1s5RSZsNm9KNpaBK26TdU0zIpPaHl5uHFtr/8Nj6U2fKaQbWASK11lmwlECtLWEkjAIkiM5KJRjjjHEglu2HAygFROus4MrhjRJFY7VUighhnVeBClWo69LoGjwSCkYbSikTDAA4p0JQY2olMVWYxnx1oX1oLpxLpRA+boejYZHPZty6hoCImE4iimKgaGfp9psMiFENWRJNd4YtwthUh0nc6raDqDUdT70p4oBFDT7c3ixU9eL10ZVtqzlFZyWF/0MqMk4EQavSI2S10abO9fDKmQcfePDj114EqtcvXjYVamOQlWY2TWjwF3/8O8rj4tGFV15+9VCyuBDchPUFNCUDuPbsC2cee7wVBieO3vni+auJ94QRyiFkTHqz9+orm4tz977+9RdfOfPkJx9aWg7uuOnui6cfUalkAjmCq3KOxntBSjItSyFk1GRVXSEBZmpnyyBU3uh2J446TSenvO2tN8VkGkRKTxoXzofb26Vz4KBCa4wHsERQYSfFD37T8gPv6D33zPqZi9kzp2cO2cLinC3r8WgKIgCPeZYtLyV33rp46mZVlFPEzGlRTUyv203TOYSJl9I4U85GjuaHjx4sRqnNGYkaBAvG/GCykTaiVtDMqyqJ6Wi0s7c3SNstDf1sUIatpDKMzepOd3k688RqD/3Z6BnBD1hoI2tGcToa7i0fWN7cPr+zdr2zcCthikrngAE6V+eRDISiRoPWubFx0l1RPHZ6d5INdMGUyN719mNvf+Del15cf+SJV8eFVnEwmxpK7ZGVZrsZv+0dtxLcJYSBd5RyQBEKQTzUueVBSJEV9SDuBpQAYC4kH+U7nLNmY5F63owlcQOmbFmXxqG1Bn2dZT4MUudqMDaNF1Y7wbULrzTSJKsL4kUxHqqAChVkeqqSdJYXytR6UiuFWeaHZTXOPJOu02rWFZ/mZWWrUAhGJ8sLTZfraZ9mfvtAYrVev3DhzObWqK7JqcNHnj/36tG55s7uc1FKk07gEfv9AWuJ8bSO09Q5JoWLpQOfLPQOb+2eU4mMkgbjEQgjeQKohqMplYFkoSl0sxGjNog39gTBqfUYhpIHnFFWWp1IyQXZl34BwVlT6hodKskYB2ss5SJNE7TWVGaWTXyRM8IdcVVtGKPOA2NMMGapYVSUVV1WtVRCCWFM7QHKqiIA03ERRgFntCpzCsAZAULSOHDagQwQFOOccKuUXVrQJ29OVnsuTXUaeusr4MW73iZuPdD9td9ce/JZVhtKuKAUvEcgxDlPCd23BwUqQgDnLQBx3lNKAMHjfqvHDX4QIcAQnEPCyP7RhgJhQHxVGa0JIRTJfjclIegdSsUYB28cIqGeKQr3nWy//22LzeAri10VKn38SHbPXezEIfY7f6OzHcGYYBSt1YQxdB4YYYx6j/v2p32sHaEEyQ1sHKMEnf9qdT3uU6g5Z4IJJrh1dl/nAgKUEL/ftUEooUAZAYJ0P6sF6L3d71ZAcDc65insl9Lf6Oa4kVXz+38CkOzzgtB7xBtdYwDorWOUcc6tsSqMJCP/5N1v/okf/CfjnUd2t6+moTeeUgtXr+Y7O+bKmt4bsbVN3Br5zQloh2iBgkNACvsBc3AIUSAdcVQI44ESEIILxggjQkrOqAr4vrKHlAKiB1CSKEadQ8YYOM84pVLUzqtQMe/rykghJeeK89i4JA7DUNWm5lxEIkawCIQSyaW1xLqQZ9qXdZ0KmUY3GtC6S2mEuHDiwJNPfunaxY1y6mrk04GW0kcidKL2nItmPJjsbF5PF06ks701q1uEUy7V0vw8QY+EWY0qFFSIgMRl7hhUVTGtJ7Wr9Zc+9vgtK/dr5uuhvbC3NRezm+9cdFIPsf/133Lv1VfW1q6MeUzVQjy5Oli99bBmKGgwGZY2g4A5ArOebGdPXn3+D093bevQwcaAVLf3OrPa1I4SwV4+NymH+tykXr/qy71hmcTW2v5wXR9v3P/WW/Y2skvX925t3T4dnZ6s7+TRlpxbEE3EShez6fLC6jC//PIzzzfa7cWVw9Q3HI8tpKP1shHVly9d4NEiTppKcM3UTbecgmp25ADb+MozSk5Wjhz/8vNrst2akEmKcGCp9cgTa+860brn+JuvvboawgNO9xOSDS++EsRWNux0dIGkYfNuyVEUW4OL481mGXNK3nXq+Dwk052dv//T/MO/+3mdhZy3NDV15ihnhAXGWMZo3GiPx1uMsVAIa2ugKJSYZUUQSZWIwbhqy4hRTQgAMCFoZ77dH4yzXP/af/jvf/uXf7U5PPcXf/5b/+Lf/Eo9s1uTa6GEw7csnbt4LS+mutRCoAenrWUCeo1WXddVPdN1nch4e2vDloYy4IR57aipk0iuNJVA5gNaWnf9+qZiNJQ0ToO6qAjIvCBCAMnGO5sDLkkcqyTgWtetXrw3GHgApkjcDHJbMxpYU2aDfNq//CPfdb/ThkmKkHvPqmJFNn7AwCsaLj7y4d//xZ/ZWVg6uTGeIEMIRDdKXO2sJcBJpBQBRNSW+mqWv/+BD/z4v/zp7/nn30xl/MYH7/6rv/tsEgTMmVxXQgrnbBQnVeWzaT0aV4Lc2AuqvH764lecQar40kpvpb0QR+Hc8txsME2V8jZvdJM0SccmikUtfdwfm0sXR712+9Tx9Or6JMunUcjyugziQCpVZLOitEGoLAOm4qLIha2szeMwsdYTLpDYNGyAsUBMqxFqa+tKl6YUhDLKPFQq4M5pRLDeJmFqnK+qPIxjZFCbWoWR1RoQjS4AiLWm0pNW2gRjyjxn0gsehSKwVosgNraOgng4vRpHIaeECWask6GikhqtidX99WtJu1n6nDYSrW2tTRxxKWWljVIx0R7BFmUfgigrJq1onocqq3IgCUGQYcNjRYMISFTMXBi0br/74NKYqOvjpBE4Itavz6q8YoyROp+Mit/8/b98480H/uhDv2J319/01ls//fjp2+8+Oh5keWajCJMgDVZiC1o1kp3dtUantbC6dGlj2lk4eTyZP//cZ4wbyNAvJq3hKKsMRIrONbhB259VjLNYel8XjZibWvd60c648oRU2gFlBIAzAGoIodaJH/j5T3/8l04uHo5oVPTXN25q148jzAyLAiSSSyWqqh5N9HA8kZwXRcUo5YL52ksllOSUeEIpp0RyonXlqO6PTBIGlPJGIEhlpnlJJdXGI4gsZvW23BgUH/r2P+XUtZK40WtdGZb3f8s/byv469/5rwekBuacn3mS18WU8pqQ0nkaNKSzE8YtaA9oWBggpLZG8AnVzBvKObWmrIuJCg9+/4//wq/+4YdefvUlSnUUERQ8Q8cBR/3dYTnrIv3wn/wuEWlAaVVUWVGgc4oqZpyk9qf+7bf+9C99GABOnTyCHtNes8oqa1027Td7qj+cDrdnxPvufIuxyjiaZ0XUTataG8/WtgcVRS6lpMLroiwzzxOpxKFjvcxw6HRttkuwYh5cVRe+mOz20zCtK5RUGOIm5UBK0ek1ykHdJDwKp+1eJzOkvz0lpSPexyKylRHOc4oMraRECKbC+JWLl0deNnqrnEdQjeoiY8Qx77GyygPXpjbGUzabTi5fv55XMxmirgwh3HkEQphkDLgxFCh6653bByaCkGI8nQ5G4ZHDi9T11jdHFp1gRKMjhEjJPeI+xZkzZqwFsl+kjYQi8eC9kVQoKctKew/oiUNPHVCKdL8+BIAS4rzfP+J4571DBsCoJxQICmudtY5xYbI6DPiTjz37UqzarXRpfq50ToSqyjUi4VwQyigBJRkTcjiEP/rbtbe+tvfgfQvr5yezvLjQz3ayMNvmuzvIFQAz2lfee+/QUx9KQSqbV36f5FJlNeE8bXWFFIC0yEoCs7rIgSDl3HkHHpUUQJj3MJvNvBNvft1bPv6Fv7vngZPlVn92bdAJxGsffPDy1asXLl9A7bSuERE9CaQMAuW9PfPi871W1ImU4KxKRZUzTbi1Bgndr7yNQpiNR/WkCKIyG82azeTYiaP9yd7uxlXrrTGlByQe0iCZ6MENmCMjUjBmfLOZ/NC//okoUn/9B39yeXPdAHTnVvt7W5MKN06vf/JLH7bGoDXNrr/7ePbPooV776ciFNTV3ipCJaHc29riDcU0SpInP/3oysrBI3fc3mosRA1Xj3cm04wI++xXnj6yOJcq0V1ZTlz5yEN//vb3/9PI89l0AtQeOT43HM0osb3uwbh90+Vza6RZr539xD133L+wcOszg+eyCfZMq5psuhK9GaVd8bZb7tn466dazfi1d60Mr68R4dCYq1fWj588GsZJuyN7x8O7h1vg7Lmv9M+en1zZgzBJVUfVNWQV6qIi2lbFbDie1EijMEjjgCJRYYBee4A0iuoqI5Raown1HrxHLWlip6WuXj76hmNaOF8DVS1kTDC0ijiZiFBGPvC5Z0OSsHA3lxpXPv/7lz9/vvO3j0/BZ6KRxp2GyTJjoHvo2Dve9S0f/v3/bInhIThT8bp48JR6/7t+6Cd/8jeSMK2rUlc1kxS9F5TBPobCAyDxFDyCqSvKGSEoOQuUKktLHWGUaqdvTJEBGWOCyUobQOCM1JWmBMC5SAUVGAiJc2isDRXvNJrj6Qw9ck6c89p7RLTWARIuBQD71Ge/NL/c+tcf/FFRUF+DyWpdrt1yGN7/xuVmZBnhVU4ZdUh4ZUbtiLz9NVFeySefywgoyoiQtK5rTkAEXCouuciywlnDlGSMEvCUkRtYWHSIXnDOGAGPQDxSHwSKUsbJfiEfWF16owHBoUPjGadcUiCMepTEc1McXAqW55LlVrh6cE6iTUJaleW4NFvbhS60oBBSt7TQOHF0ociqdH4ZCZtMMVDgJpPjq8f6opEEoWOu1WujVMab3nyTQsI5UFp2lpvJwcXDL1w+s3W9Kj2hXgP9P6SiopjMz3dP3PzWV89+kbOKMTzz6osrd7zl1P1veenZRw3ypJeWs5lHmo36gQjdVGdVDrE6tJz8xR/96He/7yc+/JGXREiDWGxf2ZnrpXEqf/Zf/6tf/O2/PPeVT0kCBEldFkm7kXQ7X3n2GvGs1WvedlvjuY8/urNXzTW6M7SSI6iKCWEzE4YLLj1Zmy8dOHxqsHcFZuscK+IwjoCgRcS9rWE2iNmi2rq2FcfNbojZKNu42viTj+1pFjDiLCJSFJxyGSClrJ695qBXG/1339S9dbF99oUL45LayowHUwCitZvvNYDqd7zrVOBG29ubnNMoIb1ed+B8WVWF25NCxnEQR2J3ZzwajLpqnoarTIxm+aDRSEytCbeBxBxLFOVoPEKPKwvNaOHg9cvPN8MoUHRclOV0MN+++cB8G9wsbYm0jePd882le1xQbqy9TIgfzrKyrlrLNwVBk4tqZ/v8XPPWKJpjdEHwlvYF421naJy0OQkdYVVRREG0vHB0c+e8y6fNJL/3pvD2Y3cbgtbW3aXeYHcAlBHCwPbz6ajdaat0scj7hDlvZ4zaVjcC4oD6qNFAyIRkMhDWlZKx1fbxmkCrnTom+ttZhLzICqZkJILSQChlIIB430saZVZuXM85bWxsTIeakDjWFl0+4txrJlpB83C0sPbKc4zLbnOeUywdqzPlSdUJD4kgTpPQkel0tre3OzT9UTkuovSYbM/XfrK5cXlnMOlP6rGGIFp69fxXPvjdb4b81RI0TGpqvARf5lWzlcbNJExbVs/qfCRFYzidctZFH3OSoOVxFDgSEhZ4XTTTpmBkPBgpllhExm+sAmcdo5QwEnCudc2ZbwQijMRglIOnXKmymu2zxMBjXdZ1bb3zotVI0jArMkKYYtw7b4xGT6zR1towDaOAm9LVpfbeMQ4AVmtv69ojeEQpRJrEjFOLViohpJScOyBhwqUQRZ6HaSCJ66b5iePBLUfiduoY1ExQihxI6FwY8eTWm6Y//c86H/LDLzznyiLYt84QAEaZR08ppZQHMqp0QYECAef3g1geAcAjIUDpvuHRc86Ukkwwax0AMsqt0dpoQMc550x5b421gjOwDhkadEgAmESOCyfif/KdB2866ebT5Xo6g3rWmE8W5sq5hpaq/Z9/d+pN6MArGRMqHDfGGM6U8cY7C3TfFkQQkVGCN5rJCBN8/x/kreWSCyH3jVCAHpzf59s5a4Ey7zwg+H3SJKWInvJ92Qmd8/87woaAsI+zJl/1LN0IqyEAeg+U7oMYKaInhADb/wbknBMKVIoKfRip+VaoQj4e7Xz0jz46HV2JG7TbqAy4fJYXJdvdKtf2+JUt2NyG0jLrATwSht57oEQGMk1CQuXy4aV7Tt6CqP/xocdDJEHApWBSCsp5FCq0Lk6UksrUhnASRYG2iNbqShNGCeMmrzmlnPGAgZRSUsRQqUAxzgUYFikVKO0hiROKrKgNo0xIDh6t9UkSJ0Gb5CMklRSSsP2yOLC6Go3Kz/z1w73F5tLhpYPLc0x1Pv6Xn4/b7LbX3jK+OhjsTtdGuz/0Hd/w8OP/cN973rstzLjEdqAkYZIZHqVWs7yY8lhzwriUAbXOmVgpa+pBfwwsplZKA6ohR644eLc69GbV3ys8g8D6m5fb9ZbOKgsznnQFq4ujx48iS7PRtNPuvfT0U+3En7rlvhdOf7lTN7kVo4EttFvfG/c6DdAmpvpr7ujNH195+OGzW5t2urvLFT12avHYyeVzj706asfRXG+0u739ufPHFsp3vvcdmuLl3clwMAh9eNfrDknenk2Pzx9bycbTU6dOPn96MB7Q1YVme85Uw/XDN60WWTDZvOyEXb7n1t2L03r9+rlz19sJD5Td27l2dm9b9Ba/9wPv/sgf/JfV9ty3vue27enukOYUo921vgjmqGuk4yt7u5ffeN/BK5nbvnbNjngEgKY8ce/xAPHknXf862/91HreCJJgY9MFUeiZI4qWs9x6Jjx02w0oJzXWeTZRhBtda+00rcFBd36pqndmpUXGXnfPa/7FB37wox/944deeMxoEkZqPCoDpXwhHvqbzz315BNHb10oQfziL/xMd75XVDqr7MUrV01V1d4gEsqZqSuP1hNe5pXWriry7/2u71dQ//bv/l7YakhGhrvjhbY4cHP88z//s899/smdK1tveddb/+2v/nZVowqDOFAOyawsm6k4eXjx2tZOYZiKKecsTOX2Zv+mo0c3RuMwatSuimOuazfLJ/OLhxa6UhdFkoT3v+9bvGQIlYAxUpIkUpuRFaIp7jp+Mwniz2sgMlXTWSU97bUP9XeuHrzpxKzIz19YlwFhXAGHAvXP/PzPXr3+4plXzi135l46c31h6ThWJeiqP57cfvvJotze2OwjgEo4SmL1jRnaO9/1Vl0aCnZ3Y/PlF1892z+na3vyjlulihoiGA0nJw62CQtnhgZB0o5UNtsoMjMhs0u7Y5Meymfm2NGTa+OZoMQZQwhtNGJnbAgiFqJkFZPMTiUn1LqytloQSjkY4mjINTrrST2ZCMIIU8YZa6oobHjijdHE+5pnDjiTsqpzRKftJAlS632ZF4rTKEhmxcRnBeWxqw0TQiqOxFtbCRDZYEMFkQyXg6hJCBGSISVg6yAQui6EtcZUjXZza22t3e2mKwFlhQqVJVYSyxjWtmJcOWOBsFBFnEemwjzbptyFrDmajhXliI4iCaM26R6s7Wju5ja/Mh5sD7l3a7tlkzuvgkJ7pJZF8oM/9yN0b9zV/l333cqNPDQ/p1B0EqmpV0olqmmr/ML62q2Hb2812+defGEwyFSz1UsSa6YscMQwRiggVZzHSVRWLM1No/RTDqX1KEhhvAItKOyVlWM0TIJiklHva4eCEEBiwRMmtyfw6CM77/u2uK53mIOvfW1y+mLxxDbzDglgbTQAEoJSCWMsVlZKkdcVQSCU5lpLwZ1F71EBYUAQpHZ0UhM7GSZK3Pma7k/81PuPzOfTjUsiWHrkky//0T+uZxO22OohNVab6bTSgjHZnFj3zR/85TtW8L/8m+9fPr5QTF4hfNezoUiFBSsjgcaJKNXDGWGeU2uJEJR7azklbE7ZfGpKS6D9kd//yv/6+GOuI+qiJuC1dsQRJIBEEGF/7Vd/ZDLJVpZS4qHW1ltPCCKCcT4vyvc8+Jof+b7v25eK0liiiMa1lWFojSUFmBzOnD5Xa5l0F1duWhY4uvjys5YKrHFv2B+NZs5g0pLNVhjzoM7yuLm4Mx4ntB6NcjPVT52/dNOx40tJWGWbLjdEep3lRekc2otr61SKdqcxy/LR8FJHsTefPDAZV1me176n4s6o3ClcmS7N98f97XEedNI6G8+s353hXrGVLq4yJ89e3KQZhiFxxvRHk0pX8wtznFgpWFmZq1d2drd3s3zoqBlPZsQjF7TIp2VZN2IluORUIqNaa7rvPTaOIlqN19e2G1Iu9To7GyPwzjvPObeUySAVwKYwdu6GrLA/hENAioww8IjOeQRGgDhnCaUO0HtglCLuy0Se0v0GLrr/cQfAKPGAaNHYmhEKhJrCDLeHXLA0TfKSG8abNx++9YHWZ//h08RT9I4yggDGOg8kVrTyXLvWx5+qXloLSicj2dgeV804Ylg1ejyrZrXRfr81VuwXg+y3uRECYL3z1qPD2WRAMeZUEq0tmrrKgXlETwAJo0gAPcpINdPE0vHHP/lhjdWZl6qAiTzrI3GrB2+5eHXNe2ecBUKVYojgLZtOjZQ6SKQxtipwMhlywm84yOmNYwgLxfGjh0YTt7G1u3BwOWiMNna2nnr+E4qH2syUELY2BD0D2omWZ9XQWU88oVxQoCxKT73nHb7V3tu6tnb5clbUlbO1WysK7SlJwk5hJ1EzKKeZZXJtoH7vD85/8/X2137nXUQCZaCrnAtF2A1eJAC84Q33Da7vXbiwtbkzbDfnb7vjjjpNWu1bitLVZVEOBrtr129dah0+sri1c3Xz0ou9RnP11qXdTV7n1WK3feGV63PHg6/73h88/cxLJ+++9+ITf6nH2dMXH7/9jgcnfXvx9MtBaPvD6pZ7Tu1eOnv9wtA42Ujn7rr3nr999ULUTqMw3RyY2xduX1lY2rn8tCIh8+M4lg++pXfq9vGgbz/12GA8NBqigMGsyrOpTUIahlEcidpWdRXo0rIqIpQKoSjFQAaWkCrPvTeVrpqt4OiJuxOMO32droYGMgbCIWFU2HwLsASYs5bJQMwG/ZCI0JM/fwT+6BN7ZNodl1wky43F7tZwPNibcEQmYK9/6Qf/xdsjZyEkaBE9CVv8d3/vQ7YmoUyFxDrXUgmgaLwjgIBAgVhnCSFSScaZAFLVWgoOCNbYJA66vfkyrwajAaDX2gaKA0FK6P/GWiMBwinz3IGgFAnR1lol5LEjR+qqGs5mzqIUDCkwzhjj3jqgLIpDa0w+Liul2Ng3F1pjM+p002Mq+/qvOdRqDKTiTITOGEI848z4kji32gnfcLLxyqVpvwBwYLVhhFJOojjy1mTZjAKRYUQYo5RQQFtr5GCNJQQYgX3DFCUeKKgwCFTgrEGrq9pwzpzzFAhwZq1TUsqQCy5MWXHpltvRaq958uRCQ9EAkTDta1fPXJZbXQHVvgOQRKzTCBtJUmjO1BywEC3YLBv2ByYvitl1IZLK2WYz4WEcJI0yz5TQriyJw6ruW5flyLUvDFgmJDCOX4W+3rgkq0hZWw/GE+bTfDoQIZeh/eI/fpho0mgdY6zOZ7voXJgmZT41ehgzef/bv/7x03+3fGxu68LmX/z171ZVqUt124njUSycJ6LRMXQh0wERgoJ1zhLCq9JpX9napWlnZyuvFpu1XLrvh39498JL1165kMAu1E/rOqfMtDvym779W379v/5Rkp74yZ/89x/+jX935swlbyGIQsaIp14IMSbpbMaN1XnJHBd/99DaJ18abvZ5ox1jWXOBhjFTacs8Y4w06eUpuX/llr/6x2e/8Gp/e0ZlKCbT2mp38sSBhcWU0UmcJrzc0zqnXhivmt1Da1s7Cqm3gExQz+osc7VrJr351rHrm9OT9x9P9cVq7bqiPkg9C0W/v+dRHD6yMNxaJ04WuS77w6S9TPlQuyyKGgurK6J5AMHbckhInWunTVVm5/Jp0exiFMWvvPh8d/HweFQbnHGaLHVfR+jBWTnu9pY4QD7aqSyNonkKUMyuODMGWmS5ndWxdU3PSOnRKgQwRVZRsFU1Al8QkMZ6423SbGWzYqZ3AFgSxSSM6nyqVCACno3HnXZnmrmqqIVInTVV7YrKe1l547JpEUbhdG8QxStAE53vNJstFfPRcCopKeq6BpEEgVQ0W9vVNDp7cUeF4e0HlpkpRnlVDUvrxoy0ZBie2ZwudtpcdBG4radXLq5Za4KQOj+u3cQzAbzj9ISwXa7KKy9uJBHkeeZytzw3J3zAeb4wV46GVZSklASS6G7UoZRb40OZxmE0HlqkxdSY+SDwLAyDebBhXpRRq+FFEMpAhpJJH+BMqlEc0c3tiaA3asI5p5QxtK6qNWPME28t6LyurePgiSUUKCAxxuTTQkah82aSFUkj2R0MLLggUkTwQIVFPuWSVtowJbV3s3EOhPAwsFrHUsZhWOvaWR0ECggBBHDOmEowEqcxZUIRVdcFOpRYUDtNce+mg/z+18+lzVqS0mHFeKCtM9Y6BuCtcwCuPHio/MkfTNp/k33ss1VWSwqEcWqMo0D2Oc15OUNn96EVBC0hRArGOPOOKMEppXESa6OtsQ4IZYwSdIhSUirEeDIFgoJL9J4KBsZZi4xx75EgBopLZt70xkPf+s2HTx0dpemYZ5m2IgwWTU4IbDeVe/Md42fuw0efMehFO2l12otXt86i9c6VxCMSTymlhEVx5J2jlKJ3jDNA763jnIH3llBKkaDWxjjn0Xt6A2MEghLvnWQEEB0godRaBEoR9mNV+wG3ffcQUkJwv6hk31f0VYMlAgBQCsR7IHz/DYreoQdGKWdkPzYG3rSi8M1vvmuyN3n2zPlXL+lPfJ60kpAq2QwclYQLVRZmPKbjipSZI8jBA6CjQDhlaTO65ZZDw9FMcTGp8ODBxc2tnaqa9noNb8l+Ji9NUxlKhp6jiwJJKQ2oaLaihbluXVnF+Mbm7rSsK4e9uaailFOeRHGWZ2kzIFyUxs5yHUShceAtQ0KswUAAcOIQAyWSSFqHznqbjXlZcear6US2k/3f4fTTz2XXx5UnabuzfOKgbMblEF73wFtfPH/6y89ea1He63WDzsL2HgniE48/df7WB25f6SQNFaxf2qgDDFhDkLDXS6vpFetzi8SgL2vbaCWUSD+bhgdWaXpADIsp1MHtvFowO9O+BdqMlPDkxBuOJFF47tL1rcF4tj6ifjp3F11ZveXZV7fHThw79rbx1Fx71V57Zu3Yasfnfuq9Uwo4ZyLQiEVeZNN+If27vu4t02H92Oe+UJT1q89f42LpLe+964VHn3eU72Y73/nj7/zvv/ybRU/2Fg+7mN3/2pPPPPbQuYuXG1Gj39/SkeVpc2fPLZ64pRGRtLG5XV96/vxD7/vGHwrhyIuPPkMiuPLEk74mN93UG18fXjp/MW63lzurMSvbrPvE3z6xFJ70Rbl2Om+snFxaue/SehC0kqTXvPzqYx//y/+xcjx65dlX5k90jr9xqRhOpVykvPPis73nH/ry6ac/XmeKMBNV1HDpja9LOy0zo12UChGIGgsasHpqiGEhIzIMy7ISjALgzmCHEr3UaBJGzp9/+Ud+4Ucn45kMI664p2w8KuNUWG+3sjMPvvO2x558OVIKmRhnudNOBTyblrXRSaIQCWUsbcnxeEQIZrOKM0KYe/jzf7mztROnwmhtve8q/7VvOfpP3v2a6y9/qduMb3/72/7xsaevTfJGs8EVD5LQGnf88GqoxObeehAqrW1tdZQmUvEo4oz5fDa7/00PPPHso7Fsj2ZZkESb2/1mQ5FsGMJAxI0Kx4Q4BpsI6wKIpPPjsYXW7dzfPhx/MWRFFHDGmbFwaWNTMXHuymagRKvVcM475xllvA3f/s+/ZXtj7cChHndyvLFlnOh1l65tX2rE6dr6Je+KULLJJIMoas6lc4ud/VVQ57kzSInvzXXe9Ia7pZKD7f7lnb3N2dZVTu45Opd5yWhAiSXVaLq3RQflXOznltjA0+5Nbw+bt13Z3VZSgmPGOuecZp4h1rPNresXF5dWKG2mzbbTeRBGxDjJg2y2i0jipGO8I4w00oapzLTSqwtH+pM1b9F7jwSiJNbGWIJz84uTwQ5jtqxy6xLrrAg4ocQyxxVXQVJU3CPhlDiHXIBSgfcuiBNK2Xi8x2QqWDqrszBUlLkqn3FGgdMCS0DoHT9IZTgop0AJQZBMGVNxLglgXheNtItoC+0YU1R47SoOvizHQRCCwzSJiqxQKkIeAmqDMLfcvEtwkOXZM7vnLg02RvUs8zwSipBsUOma1lK8MJzePciPH1jZHs9GedUKuWBya3d3vpvefPudl65dlWgJaegibrTnzHT3xWcei3txb26uLoJrl9bTuKWd54R1GwklnEh+ba/WBqgS2tqFToAAgwxnWXnLUnzkyOJDT15CR8E7TqAZuKYKm8vt+WMrXZBVj5x7uZ6VmeSMEWeMc8YBAGXEO8cIEMncvtuUgCcAFh1F5531Bj0VXHHDlmz/3e8gP/XTp1pHm6KVEnmOmD7zmeflyXcs/PPf+cC3vfnXP/4lJ6TX1jAhKovU6kCIkYbPn4P3/KuPzbWPXNnbBrjyS7/wHW84stuKLRYTHhvUmkokpLR6C5lCbmmQlNkYKopWJEu3//QHP/9f/2r76KEjPihZRTxSSpEwih4ZUEaZcXkjAl1VutTg0XunQkkY40EgVP2me16Xzcobe6JDZLYRKC8YRFIwsrt1/fZ7b98d0evZ3tXJValnraXe9nDQ39qoqiokLl3A3hzOz9EXT5/tJPOCUg62rCshVSNkWWlHs+373vTmZx97RXma6/qOu++6cOZCqWk2NkcPHORQgd5CCTwgZzZH+UgUBUyqaaeJWheOqY1sxzljDLMWJ2XVVKrKdGX1wsHFySCPuLLCLx1cuXDpatRKWcX29oa2lZ69dGU4zrXFCuu8rgFrQWijrRinRw7PHbvp8Jnnni0mFXDBgiDL9Gg4YYwCorOWUFLVbm1ruxXKpU5q+5O6rgklDrGsshqI836/7YvuAxQJoYAegSFBD9Y6yYRnDLzz4NEhEqCeeIIMkQBxzlFKKQGPN/rCPNJ9K7hH4IxySp1Di2i0KfrjKApDg48+/MV20lBBbMvKegTGvEfvkAgKwObn2/2dHbRuczj5hnd94ImnPpMGTFcTsK5CQtFJwZwldWWEUoxSD0jQU6DWeiBAOXWIps5HgwIIIw6RIBCCHijj4D1jzHswxkgICIppVTUi8R//3c/+r4/8wTPPnBnkdRCLTzz813ubO9YbJihYsk9hJEnj/jd/jUDWm+9M1tZGe+OOigWQ2cUXTOm89+g8YcRpf9drvuEv/vGLjd58FK9M8qc45f2tCeI4CCX4G8QZ9H4nu06psL7ch4pLGoxr6tNjz13cm145ly60xpsVKd1sPCUEqOABekNclecEnC21ZelE9B5+dNI4MnvdawMSCCIZUiTcA9wYnlVV3phPW0uNye72xsUXt689v3z44IHjJ71YCJuirC93j4u9fs6cWj5228Wr16tKLxw+sLJ66JkvvxCq0KF6/vTTK3ffIhq9T336oc7iLf0dH9q50ZVziJbTq2985/sf+vvPrZ2/Jg0sdFutkJal/tynzziYH+6UE5KFSW84rJMEuI+qWZaGS0o1TDVrdIMjNznVIhdfHtkhcfnwHT90D+uFl5+++NjfbdsQro3ikTdMKhl4RklWzra2p0pIQoEziKNIlH4unOvw+Zee/uif/sf7td0iSKhsUqKAcsrAek1VA0xOKESdkMXsvgc6f/LiXt+lllka+UDxclZQ7Skh99x92/q186Xxhw61wVaDjf7K/PGdatC88+5/8cEf+vUf+5fS2vFwAISRfWUS0HnLGCOAnKHiJJHyxJHbz1w5p0mta4sWhWBACGGk0lVdVYEKpJRIEAGN0wCorWaUAlCPIENV17mutBBUSgmEXL1+3dYV44JK5tCjR0oZOhSUES6KwgL6ssZSY9BJL1/Z9ZW+/7Wd1y5NDizMUJAaHTMVeiSMWVciEMGIr4uFCI4vRrsXDQXGGeWMdluN0liDAEC5kJRzRllZa0pAcOEBCKHWOsEIAZRCKCEACKPM17UxGghSRjljwJg2BpwndF9JBknZ17713sUFLsy006ABd4xinVXWkVKbrLCEMsWg0RK9hlxangeOFGKDAciGo6pwJlzsNLB1+fQXti++ePTet4etXhqBzfe8LwSCy6aEQNykihb9UfFHf/aVL12qgSoAirjvkvz/k4qmo3E0n4wGm9rzqN0ssiHXE1bT0bhUQdRbWKynWT7Z9W4I1APg3mSwtHJ/a31jd3CuMKDNeOXokZtPfe0nPvPX7SYyTiaT9Q/+y2+pUSUNkY9zAkw1YgBuyqmM6QThG7/7Zz7x6f9elRuP/q9fOnLT7SudzpEjb3r64ac5JY6Swd4zv/1r70xU+sqzj/7cmSdWGwvMcxKEtcVS1wiMB40/+8y1b33LqmByb2zzsXvscmt7pwiVc3Uu4uj4qRPUEWP0M8+/0m0GpSYf+rNLLbVZjiwKFgUsbYfNiBxaXLJZ5cnuscNpnVUxQcNJEre3dgf9ja3llYW9zXUVtpJGp9JDryTnvPJRbZqg+MaVix2+1e6ERTljSALP5nvzX3nm1bqoAsmViucOLTiO4/FOZYvRznB+/lSseiqOnamkbPVn28u943VYbl+6KCQOS4Mk5XCkGIeK0EjGobqNs6Q00AzSKss8zYOQu7wGOw0inw9fKXVWZbvaAo2McxCHbNAfNdIOEQRVQYgCphqNuJzqVtoc1BNEqaspaO0cbcaqnlTgoDKEsna3eyJRaH3OODKaZ9nEkJnnk7mFsBiuTTaGltNOd1E7IlWEOtC2zqYzAzAeThyIsalTzopJpVSLsTThylscjFzEVBAkQgJH3cdxVZShCvPClfU0CFS3G2OVC8Q4gulER4LPdw+UYrU/3Lj9ttsuXXxEMra2NtrdKcZTPrfUudzfOnIoCoNyTGlVe+5MEHEAH8XxdFxWtTV11mh2hAx2BsN8OpIsCjrtNDoQ1LMgCiZ5Zbx1zoxGk3bKG6kssmG2t8V4uL8KpBBcSVPVs6KUUnDOlQwn01mkIu9sVdeUoneaBdhtdmbTIuAqbfYEApNBKAMHJMvKTGezPAfIAimdq01pGJcyUIKrUFd1ra02YJ2kPAlCQom3Rmvf7cwRTqnbH5AKAcSZYrGpT9yzdOTWhXZzEtAtxALRIzGuLlwxI9QS4tFMnSHDEValnJs33/xP2KykH3vEAwpnHSWMUOq9c95yxoFTSQlljANvpMni/Jy11gHu7Qy19bO6ctYS52UgKMNWpxkmoa5tFMui0GHAjfaDwWQ2K8Io0M4RQgQlRluLqKh98LaVW1aShroe8RlVjgrGNXUg8jKqrBExvu1NcH6nvnadTqs83zlrTIXonUNEUIFqtjqUUEJcVWtdWUaprg3bd3MbzxjEsSAEGKGUB4wLqWSgGOM0CCR44j0SBmidR7TO1XUtpJxNyyIv69oY6zzCPhTJIRAC6AFuhMr2s26IHgglgEA4cinReyCAljrnHRLKiPPIBb355oOvvecOnY2e3bg6m5aC8rokA2PQ2RHbH9A5p9EDdwT348mEYhQHUggE0mrG07wIlAIkBG05y+Kg0d/ai8PQGGjESqWyEcfUuyhQSRxkRWW0dg6VCqw2keAUcLHb5JMst6bdjtIgRITheBI2I0fRGsspbaexCpVGb7y3HpWSlGLAmPfeAJt6rI1hHiVaSUEqkaQpkzf2ggBLnlrFk2aabEyZMYZUavG2w//0fXf/3Uc+mm32s/7427713Z/79KeOH5s/dttyhI5IyMy0udI++8LZVNecixOHjwftzs5kh3rmPREiqosyy/urK6unP/VEwK9So9ND9G3fc9f682d1RUbjaeXiXqd5fnM9bIsHv+GOLz5+erZZnphf3HvkhWi1P097qtu+smtbneVTpw77J8+sfelsN026nVakyHBWE1MtzQVF6Xau7s7WfGFeOnns5A/82Pf/8R99ZOTLLz61fn5zeM9tB15359H6+TOf/9xfxb3Wnz/06nu/6dShuPeFzzwriEVNTtx9x9OvvnBx8/rXvet7ci1kS1zfPZO/+MTiynJRRBsXtreufolUW0dWlxMSjNf2cDd79lNPjkq3uBJvXH7y7pvv3e4Xe2NQ7a5m2pqy3D340rU11bu9v7Y2e/5LLBnff98bb7rjvo/9zeNXt/BweSiwZ1zVzHfYYKMevcxg0Gp2orXNfp4ZxkhVuTAQ3qNQdNyfpE0zHowDKVppq9k9PNy+arHWBo13zTQsiwKN+cZv+ppP/f1nnPGVo0nSAkKQkmxWIaUsDCfF5OVLl4/o5aqspSSEAReCEx4EymjPqWOUlbPKM8IYiYPIOUwSmZczbvj1awNGhPekmlTzPfnvf/X7O2k62unfeu/dp17zxg//1kf/8B8+Q4XSzpezOomNLoyv7dDXoyITKlGUeee3t8btWi82O5fXNhtp++XzL3Oqqonu9RaromDEL7btzsbpH//Z76sibkmMwBDOK3j5M7/19MtfhlFy/InP/ffF3qHjd8wNxhNCOXEYyECXOs81Y6DRcs6brXQ0mZaTGQU4P10nzjQ7yWhWUC6G45LzBChq5xQVvYWl/sZ2nMQWAjPTG357fxVQ75HArDCCOK+Nti4MhdS+qeRoNI4Xbm8uzZtx1tS7kJXUOF8ajpX1wZW1jdd94+FDB5trZ9e7IrGMUCEd+jKbUimYUt3OzYwE+WwSqsg57wGtdpRmkgLx3Fa5l6EzGDhKkPhyNst3lWIcialMFAhgHiyC95M8t0y1onniR7oy4CwTwJkcD6dz7aW97auHDt5hIR3Nhp6wvKjQu0AEQLGqSyYE6hKIpcA58NpXQiiuRGV0c3HZFcYJgZwWNcacYW1K7QLJq3wGiIJyV08JYiCbADqb7LVbvUpPnHWR4lWVuRIZGCSsOb+aZw1bjYBja3G1tPWp16705teGw/zFc1trezOTIfEQqFBT+vjZjW+8t3fwxBFXkwaRfnuPVLtBEK7vjI83ji+3I0313IHDjKApJ+N8OD/X6awc2BvWC73jjeDOrY2zrZ4oswni1FsnpRyMqt2KMAahYId7jY1ZWWa5Ld33fP0bP/PUS1KQ2qEQ0AjFG451Xri8+wt/dPHR5zbfdKRdV+IPvrCzUabotAPrvWskASV0mtcUwCF66yjljFPnPUEgnAEB9F4KLjlnQONo99N/+6aFB4FML1N/Dvq1pBGgAkIcqRxu0wb+3E/f8tcfeDkUMXKglFttPKV5XRFNhBDXNne2d3apYErJX/j5vz7Rg5/4Z++57ab5rjGVvcpTKVIJKFEzrA1Jlrhg1mT99eEXH9p6+PHBajKXTcdIrPfgEBApRUooVIWVqiScoIc61wBAkABFh54TVhvzxgdee+Tw8rnnX76xCkA3G8lod+SA17q0dd0JYNI/Zy00qHAziwgzY4hzjTCypqBYz6dhU4jd9e0kCipbDMvCOGdzw5RYPNBeXjnx2Ycfe9hMlDOOxC5pzEqsCa/LwtRutrfV4GYhwO7BxqS013dmW5seNNEUS6/nu3NFWejac6EUYwKdCRuHDyxhPT0wt3Bte6fVmpeKTWvDsBZS6LKmlDVakWduc3evmBXWuyIvdFn0FtucwnIzSBrJ+9737u5S7+Vblr/w0CN55aeFCwRtN0JtreW0zD1jAEBKh8+fu3rbiQNhVInKOkqocR69MY5QyrgE9N4bJJR49Ogpofu9Gs555IQLZt1+GTzx+zgjQqyznDNERI+eokckhHDG9hPx1luPQJAiOodeMCAAjHJramJYIwhnw76ttXGOMkqoYMQxyhgTZZYjEkp5pgs7sw899klvzHSk4ySIuGg10tFk7GqNCFxyAKCEGGMopUABbuBRiEeklHoEdI5zSjm3xvp9i5QQjAtOSdxIhFS5Rer0tJr93K//h6VO+9ZbT7x8bavdjl49c944jd5TQhln6AgTUTdsF5vX3/rae1vzsV459aWn17uNw9O9rTPnn0cEwikCegJa15/9xz9eDlvf9p4fuuO1N33nBz+GDjnnXvuyclVdC8a8M5TiLBtJrgRXQFxZ1UpG6dzi5euTA0afv3SW5K6sZsY6wgUjNFTRoblbX6mf1WXOKIxmpqr30qXlzZn4v3/98Z/7V6+97f4DhI8p4Q6BkhtTxGuXrzbCoNXrNZrN6K7jUvLnHnt2uL7HxfzSXUeaqwvT7UJxDpbfce9rDPn8XJu+8PwzlpLV1cW7Ti2iycZZ9vyjf3f/u96rB1caBxfuufe1j3/8rxiDznJ3Z5tcf/4ZNe5PZ7ZKAhRscWXp9Cvnf+CHv/8//fIvRCKIFDYZLUeXjrzza69Mt+h0FqVNb4MgkXnBZnl94OhCGIsnPrn2rT9274nv+MDEJTd/HX//v7z80t//w8Of65++XF/YGOsydTUBEVGQlbV5WUSB0g7CuLG4dMLb6byfLR7paVyTskMJA5cTNFjMAtVyQrliYssZS2Ir3YHbI8dnNbSaXfXD3/HtVy/OvvjlL881/LWdvc3r58vZVGsiqZ3N8iPH7n3PW/7p7/zJb+y8euF7v/G7lm3anFetXnc6GVtjCCP79E/O+T4m3jivXfnqtac9h7QRTmY6CmWd19k0o4QiIVxyII5x4b131iEiY4wLDt4baxGIl9ybmjBwiJwRwajzjhIOCN4jAFhjKaWE0KI2Bw8vyFAO98aNBqmpHY92j915c715eaG5c/LOGPw04A1CGRprnUaQlFHnrSeECNZqhqeOwzNXt7wLKAVGiDV1Vdbee8aZMYZ5j5QwQhw658FY5EIEgltdSyn2T2UUAb0ti9Kjp4QQSp3z1hig4KwnBJzRRhtKxlEdiwn05uOQMrTeeYhiwQSty0lD8CCgzQURxkJxyiJWU6nCeW65YBFSQbgB74HGq6snOJQxKVW5Lg1CbertkolYG+cErTM6mpSfe279S+d0ppXbh65Q+r+JXTeuB41WoM2MMYpY6Sr3zk7HQ4pVszlfV/lses1jxgKk3DnvPJog9Y9/4ZeEB7u7eVsv3pyuKVys+4O2UPVkMDefWoBRf+CQavSm1pRyyowzOfWaM0f15OG/+pmjh1Z3+xq9nlz7tC7MMxeJQGMNoZQTQimTQKQt0ctwZzxkjADlptJJJACJtX53J//oZ85IFbQV39nRM+wEkQ8EjorpiaPtgJdJ2ioyf2y1sbPZ15UWIEYzrzh0G7ybyN5icPBoa74nnntyQ9fUlWGjoXqthY3tvaL2VV4Jml02uSCUUj2b7aCbBXEw34itJjTC3mKLIfdFPJiO51c60tFyMtze3hYhqSrLVUMGzWvXtqCa8qA4enNjOKTtxiK1vKgzyaCuC1fW9ZQANBrtm1tzKxeu7BkNMiVlvtPptFpJk4p0lmtKKAMPNKekGE93KBF5Np5mtXW5E7L2jmDKsJkXQ6SFVBGhsfU0CQOd62F/HHNSV87ZyqoIEXf7W3PzotlZHIz3lpYXTeGYaKRhJ5Qx+mEcc0+8zizxrpmo0WB7fe8iqbMqc4xS4mIE5I0Avd3cu5q2m8Rx733aChUIqj0IHnVkXpJuGIxHec4qEZGlhbmtrUsqVt2l3niSrS43N9ZHzRTmu0pFHhIOjtQ691wrrgaDaUHOhwF54fSnQfv/l6v/DLc8v+460bV+8R93PvlUTl2d1d1St7KTkm0JCWQTjAGDyTbMHWAuMzBDGsCYazMMYBC2xzjJtnC2ZFk5dEvqbnUO1RVP1ak6+Zyd/+mX74tqP8/lvtsv9rv9rL1+z/qGT2Hx1q1ZVcu68MyL5y69+KEP3rN/cBCnGRV5XfhZVXqmrHKctgKJLSPWu/nRXNcNlwCR37r1tay10Fs9M6uEc9JUlnE6aLcImewfTGpdEu4gvKkeeOMt2uCdoCRmDCkrVRXHomqUR1RGZWnKOVOqgYCScw+eEgAMgvNG6fl8OptW3nueRiT4Yj7PWmnezqz13ll9NxPJwAXHJGcUG9cgkERKIRhjNBEk55yosYDJ8glz9vyg25MyUo7c4rQCV9HIg/NOa2+1CRZ94THUs2pW0qNpQggpR7ftHE4toqTee/QeEYDS4JynjCChaRINWu1GNc6ZEMjW7mHRNIQBBeScJ1mUZFG3nXXaWRzFnNHBah8CziYlML6w2AXvXnz+0v7e0dHRZFY2WhmjPWfM6DAL+LtfeOXk6UeyC7nkpaWVcqUDoSrSNJpGKYVipQ3vuA8Oh842Kk/jtbWVQa87ns/G43kI0ChTNbXWijIqGEuk4DKJY9FutRFZq5sRCEmeGqWstYxxGQtvrXc+S+NAqLVOxJQC0bVCEgCJsy5411RNUZRcigChqZoQfADknGnjG2MDAQJhVlTOGfSgaxsxJltx1sk63Xw8rqbzcjKugFPbNBEnZ0+svOWBi9o0v/AbXykqwEA98UiDDzYEr0zwPiAiIYgBKXrGkHEScZ61IillnMiq1ME5QNIZZJ7UW3f2hZg5Z44vrHTSLJJkdW2lqudn1k5NptNKV+1OuL55GxEZ45GUumpiIQb9LkZMGsUom2vFCDoalLF5nhHnIyYae/e+hQFAOyuC8I5UtaYhGFCcc0IwYnTQyrTVztlWr/MmSg5gPpzs7Wz/wMc/dHPj6OJ9p+597L6v/MaXYHSoNilMcjXds1q/+OyVdqfPkYPOBYSjvbnB0G733v7YE5evXw/g9w9203ZX004qF30oIm5NHWLs0kIupX1m7NJaxDqTfHJwjspqOEzB09k8WjyWrXdHl0dbG7ba5TDuXv1iON05+eKvvPKOj93TX5c+7pej+tbutQsX77ebB+W8QUeoQTNVB8MKOrFy5bmLWXewdvN288qrL++U6n0f+Z7NG288+cxrhzfVN8vD3Ttbf+bPvnN489bre6+vnujnK2H15AIehJW1NaNDcbRz4cTyYw89urB4ApAXpowinqxeFOnSd3/3+8DWSyeTco6f+dQXb7xU9RaODSKmp+TkqeXKGc4oY15m2IwqPY20TY+deeLBJz526fO/8eVXv/mjP/6+L/7i55fbC3G2cuXy6H0f//O7h8OV1sI3Pvutna1rHd7e2IWIYX+Z7xwN4xg8BggoGLubhAAkSSqjTLjS5HnsFRke7qG31HvJuQ/BGMMpBe+2b94BY5wLnHH0xDvnjZWSylSoqjx1vF9V7s7WnU4vU0r5gEkijNfT4SyAl5RYpYWkRiulbVWpWaEeffTindtbzbxJIgmc+lIvrnR/5C+8R1SGlPrh+97SePy7f+tvf/LTLzMiRSJkzK0O2mrkrjENF8glnD23tLO5D+ACBMl5ZerVtcF0Xk7GZZK2TOGiBFL0+vAVHlc//bPfcfrd32mBQEgQcsC3VnBs9Rz+4W9sHYqujuGlw5mXFXoA5wigCZoioCCCs0Y3VltwDsFW03mcySilhCSTcaVqLdOwstIOwS4tZrXS3ulmNpWCxZEAT4ytrXlzFyRJXMzLLJJSirpidVUZpZSulTZxLKsjnZw+UTVXeTebVf7YAxdsOsx293irfV+/u//0J5Wu0ySusAEqHQ3Be6sNBoJUKMQ46nJBEaRvKqO0iNJKlYwDuCCY4JLSNFFlmQm61OvO53MiOFC0FgEwEQIjDo1KGa09aK08UucMIuraeqBZZwFF2s37l298fWXtHsklEvTOIY8540UxFpLHcVLM66STEkeaakgZFWnSWIdMJIJWpjJABDLBkVM02lJBeBwjF/NyGkleN4UkTDcHjFHOvVIT3xhPSB1mWmulC84FkxnFlFGXdtjW5sbiUi+i+Ww87Lfy9WPdM2cHWzfvKKUuXRlvTE3tRScnTPCdjaNobc2SZFoo7pvFY/3BIEJVMuvb3QQiu7u9tXHj5v7RdGF5YfO164+/6zuaiVPWJFEeJSygSSpttdVleXwxrg9144hy9tmbIxMACXvfe07d2Dp66oVdI6R32gQEHSrVPHYhB4V3dtSv3NxWjbxlmeE+Yj54QI+MhFbMTi53h/NyMi0sYl0bLjgivhnchrslqR5U6BH80+/OjnUu1TszXjPkCajUlgQgoURTCT4pm62nhIKTi62doWcSvXeASAkECkjQOhvFnHFW101EI13ZjQPyF//Br3ey8B9/6l8/dOrBgyuvrd17anQwNzae7M03d4bPv/iqXLxw4Z4f/it/70c67Qy5cc4KKZQDj4ELjKSoasUE9R6CDd46SilSggAcEQEI+OHOMD41Wu7SzZs33pSQDw7KySTUzWwy5Yxk7YyS0ByNT526cPtwPwFOBIU4SuhSFJOXvv76Peful6trN25toIuilG/dvpRl64zxyfioLbpHBzNVlZHwjGIgCYkikePe1u35eNbUtfeUauzGEhnu7h5t7tbjSWm1Ed6nnQ7hITDLJDEKGEUpqCCi3Vsu6nJl0JmV9cr60rywFC03avv69TRpLSx1Fo4tHh1NNi5tCAIhpZODeVuQhbw3WO4+cHG1y8xsPJncfimOT9flBCRfXe0ko2Z6NLU5b6yfjGfWqGCDN74yXqM72jmKmZSMz7UOf1xVyyMJiN5bEmxwDgEokDcD7oiE3DWjUQTivUVEDAHQkwCEIAZPkRBC7tK8IUC4y1ZDYIT4gBjQaosUnfeUEu9dQJwWRZgVIYBHTwgGH6wznArEEFzgjKq6DhTzbqqN2T+YfvA7P7w+WP7aU18s1WxeNdpArRylFJy9K9dRxgRj1rvwZqU3Bh98sIyQAPAmopvcNdGRSESUorMhkbF3ftBKpXY20KYm5x946IWnX3SNuvb6liprwAA+OOOtJ1zmDz3ywUfOrIyGGzfeuNo96NaBmqL+xgufLqtd21QhOKcdCYgcBdK9wwMko5/7zX8Nv6ciLmqjjdLBhRDQOc8Z5UI450gAQJKkma4LR3UgQerR5lO//fp8/OjDF77vw3/hJ37mnwMB7x2SMJlPnr30BSCABAihnDMayUAxz89PZ/6LX759+qGzssWCh0BIIG+2N1LKkcTFsKkbbZyOMrp48ni1d+Ca7Y1n78TtxTyhJG8RxveuXzpxrDfZuHHtpSvtpcUsSg92poKn08keuSZeOvz9Qd7R5e6zd54Juhjvixsb146duG82m7cGLYisEnzlwqnp868fXxj81q/96kK77RvdimmnxaHQw73DIHJPuUw5J7SZl5JLbajgNBOGOd9OFup9Mj4qaD269rnfKEY73/vR4x8RYnww+/ZzB9/4pju085FqM9EN1qNXxdx66+5cv+71zk/+3Q+pMAoYAaKzVsaRNwpYK1jjjA+BUkx8bZRWMq3vWY82j+y8ppdeuxyj3Nq+3l3oxkRs7+/1Bx1bl9ODgkVyf2Pyq9u/RKwW0/Lj3/dnHzv1zt//9G9dvfONiBHvGwCkhBFg3gNSQIpIqLYuGNPqtNZXT730+iuqtgCADIuqTNKUUeqsvWvjI5QgEEQgiBY8oPcOwQNFJgRrGhUwGDDeekaZNZYgEkY9o9baSNI8y6xTqEIqiA+Naqq3n77nXe966PprR4/eH7gsJcus9SFYisAIgeApJYRwH4w3LqjmTJ+eXCC39i1jgkEI3nrvIBAIIDmNpDCNDsQxahlAJCTlLDgT5bEPwZpgvTHGM0YpQeIJY8RBMMY47yghAQINIaJhrRufX0g6YGKHCaCgtNVZpnH7aLhZV7PBoMsJU2WZtZOkFcsoh5AYy9GzYEzwcwgeq9Lpygjm0C6unLDoqPXz8ZHkwtWHNGkF9Ej6k1H0q5/f/dy1caUlIgY0hBEgCP9/ATQhaTEeh7rqthbmMx2cjdLUaG3cCEJQNXiv2j02n9TgPKIPVO3duR5nPRF3P/s7n/iX/+h7z5y888jF23/t1uHtg6Yaudoqo5TMImO8iHgAEpxhLDRFxZFSj6rYvDrdyjoZ0mCMcxSdcUzGGCDOMlWPg4XgCCPQFNrR8lxPZ+ut1zcaRoykTFL38Q+f+tB3Ltf1pBeiT/73a7/wlaHj0aQxIkoODpvy4Ka11WxSuxBsE7gQnjHCxckT7fvOLcpmDiy0YyiLcd6XC+08Y7op5/vGjse1jJOV1WO1rZugKZXISW3n1JXYqLnQxIooiZaX2vPSFV4uxHkvk4dHFRA0dfnQvSdefe0NP3LDw1KV5bmT3cPDw63rptdbakcRBDJRlhL0zLEo7eSnp6PdpLWkA8sXLlKi1GyDsERGTMTtWaV4SiMazcejZn7IJXilAgFvhbIsTpcbp/LOMdMkDjlybOoyompWz6XI89apyo3LUQXdlcFqdLi3Y3WwYDvdfpSgc7OFhX6jihCgu7KMnk+rUqIxQWedHKxoSs+Cmw3Hw9G0N4ipIKDpbB5WVjPkXmne662kae4UjWQbBbHOSG4dcxqbJJdEsIV278bB9ObeMM+7rbxrmlqkNOlkO7Npe7AUCcuExADAqCc+zlss67jGgCXDw21T1N1uJ2/Fezf2mpLHrOV1bTTrxozU84PN4fFzx8GrOKK0lYn4nB2GweD0fHowrHatMsEyyRJjlIypM81of7Mo9oLmg+XznfaxcVHaiLjGREnMI6SeHR6+WVBBKMEAksuA6AGrsgGKHjznvFYmz1pOK+OsbjSXvNvJnHUYUDVK62YyrybzaZa0KBdGK2S0v9BljEZCFLOaSuG8K4syy1pJkmhbScYJoUhEO2nZoCi4hahe7qpja8lgOaJyLMTcuzqEQJS1pKBggwvF1M3HJnhFeXB2Csgn+5PSibKhWhloYFrB5RvWWR68x7sOahcopXEkgwdv/OF0VtdN8JZxJiOW5vHyUndpodtKM5lIykSayDSOnPZJLONcVnOVDpIoixCJ0fre86fWlnvXb25v3NqxTs4mVQhOEFqV8Oxr43/0E5//f//ls+94ANo9pGjBlHdRYtUU6hl0Inj03uxzT5eFJu1OJ5JxXVXTWTkaTgkBay0XLMuyhcVur9Pqdto8ElES6doAYwBUMMY4qbltJwJ98MqSiFAZcc698cGZQBkDCqImDKWMZ3UdCQTegGziiBtlshbhkiEgI1A2VsYCnafEjYsZRR+sF0KuLi/XSnuvhIi6mZrrcjqdRgxAW6/8ctau5+qpF19SJRHIHVpjDBL05s3K7UAg3O1ZwpBkMpFkqZ+tLPTavXYwwAgFpFxGVdG40Jw+vmB8pJSVKSeEBxOM8ze29wXlTbVJAgTwURS1s7ZyVjk3q9VSr2sbVWqlrA0B06hduEJKwkRCgdbWAiEK0VpNCXMuSEkFl7pWHolvrIxi5XRgIc9Sq81Ro4L3nIqicujenIJpYbaGxfPPXb5xc+NtcWKG+3Z/eP50p/YHf/3/9dE/+MVfO9iYXXt5n0fq+Bn65d//+g/+je9rxbT0tTVkqiYLi7kn1vpSF8Y2SgPKKDZNgdSWqslk8sQ7Ln7lC0998L2PJIO8MYoF01pukVxcemm6c3Uz66XBGs/Fhcfe+bmf+eJ0NNzrkeMr5z73i5d/9B+uvv2Rtae/PV899rabn3lx/069utw52qtkR3aiuEeY1Q2NI7B8vXe6mR5e3dz+6he+cW69/YM/8M7lWL28OXtxZ+Rd+KNPv9zO65Nn+7PGyL2ba/e8VUmohqHyfDwcJ2Fw+v53VA7KWoNhvfaa1V3J2yzq1rN9mUVxt/PcCwdH++2Nb11v8/D2E0kn9bRjVk8vfPHTf9Q/fer4ydX24BhNzkV08NJT37y9M/3ej77fVfWZi/dceOLclTfsV37vpXR9dnT7jRe3n59Xgvr0+/70d/3WH35uYeHEb372ejFpCKPGoZSsqq3nIAWxtUoYVUWZJIIFGyd57eqiHC+eXJxMTFNrAhCndLI3Fzj9S3/3Q//5Fz7NgOqRw+BlSrXHqtYceFloVVfOWM9JHMvZrBkdjoSgrlIueCcZoei88eDrxuRJ+8/9wIf+2y/+l7Pnjg3Hcwj04Gj+gQ889G/+2Q9tvnzjkQfvydjC5dcv/eX/+Sf3a5VEWbubT4qmLlWSxpPp3Fvb7mZREmlkd7YmujZSUJ4wBESPAFSiYDk3lkYDXhSXHzzl/pf/8gP5yfVWd0VDAGgkEmtJYF0P/P4P/Lnsk7/6zeePRJqYxsYUjQu1MlkSpUlUzmaA6GyD4IxRWoUkFd1u1Chbz5wOfmnQqaK6rBpbjJW2i/1WFOFoOA9KhABOG13MLp5dvPfRh158/TYATKdzDGCMpQQJEehrKjhPY9bUZVUNt7evfOONJJ/kzAHl26NyFvEz3/22vUu3W0aZ7euERlG87oMnjHEmy3La7nS9c0jQWijmI5lKw6TniaQJRZrGiGCRM2O0b2wgWhBfNbWqCkolEgGBBMBgoC61DQBBGz3jNMYQEE2SR9bYWpcQ0sBYYyZxK++HFXBQNYWQjEcxAHHWJXGXEFNXc05wPt9N8wVXNeg4RhEXsa8aXWtCgFIWUVY1My8ZZcQjNkZxkVOmALxgIo7leDxkhBljrde99kJZa1vNgMoojnTT6HIMsqnqOWftlYXTtp6W9SjPU9qnSpVxJI+fkpL6k2vzraNZ0u49/81vRxHNuh0UIhus+LGZDG9NRgUNw7Onz+5tHZXjGcZhNB3yOO/1Ra0NBE2bWSx8+3Tb6pPXL11ptbKlJZm1U7p3RMBPpmqvCpWjgZCAqCp7/frRATuIJXUaCCUaQmPhs69PjnVxgYaegD/x7pXNUXj1mwfKBwgYPESSaxO2itnRrDSNkxHjCCBZQHAhOGe9Q0oIZYRQlMhsVb/v3at+usUIkNoCI84FUiENBrgPTaEOHedwbH29mU8oQeMcAbDGc0EgBALBGqcxVEVJhJiWuilNl2aOpYeV+pG/838ej+M4S9cXbh409a2r26qYrV5Y3h4dHQxfft+jkxPrS/vFKKK0qmxtAiVBMoLInAmS8qgd1aUCQmlMlFIQPCGUMhIAA8BSxu87sRBcZWn15i44PIwYk4IQXxLLXr+0FyWJbsxo/My0qdaPn1xdHwCKmY6t6z320F+LpDkcT+P+wyxQNy8WWylS0Ga+2OtTmYxncwhw9vSqCqqp6zvN8P6103a/iaynrF5aEO9+fE2NZ3v7E+tiB7FIPEtJHKCzPpCMWqOoYGAsYRBlXClTKx8wu7NVyZhpb8eHYwDfVMY7t7La7nREFkyWZ7DUOSyLw3F1bG1hqZctLvWd1m0GqI0UUaMNBu2siSOBzksaWnmEEEXtzq2bd5y2YJxGny/lncTjXC30F/YqjYQ5ZQCRckEI8V4b1VSlSluxMyEEAAgeAgkAIYTgnQfCOIEQ7gJeQ3AhABIKCOARCCI1PoD3/s2YDEFC7nJdCWGEgAsWnfM+EEIcIMDd4sNwlwjincdgEcB7kJwbqwgF5wNBFiB87ZufZ6gI4ZxkgSBAYIxguOt6piHcpXgQwYm2hiChSI0Pxt61K/nggzXWGIUAiBI8Uk4c2HJaZXH+4T/5l97ytiee+tZL11598gufffJwd7dsagAgnHprGSfeBx+CMWp399J2otOs9cCDF/cPZq99+9JkenB0cCcQg8TD3coD9MGTEDwS4oMbjyfI7zYNEGvcXdWEUrTOEcKCB0JA1QWlmKZ5mgVG/Gyy184ylvDXXn3lhReepQSCJUDQGvumGhb83WAgEtJU4fLVW0ct1U3D157e/OgeOdnOSVx5ElEi7n49i8SJU2e37uxElKL2nPJ0ZbGZVWpeLA1WX3jq22dPL49FcuLUiTu39hbOrjRBFBPTXeCPvPeJjZde6C2vv/+Bxdn1Yrw9aR3rVvV49/JGd+14a7B6/9vuv/nG5mK7Y5tJpw9zDRxEP195avvGiXvWJpXqRmppIMrR/tzD/GBHJNHS+lsmB5chlivH7js4mlGlaTUab271s+zJz37lIw8+1itMcWPv5rerE2+50F5pg2v6q9HiMf4DHydPP7X/C58a3pyBD4gMaMRkIgmpnR8uHJ8HORZppApDgTFT0TBy0BiPhKJHScAHPeGB7F8eX7s0D3opUPaNb76YRNQHT3ncE3JohqsL58rm4EBNP/Q3f4yb3jO//Sutdn/z+vUzg9b3fsf7PvO5X+DCQQBKCDCOiM4oSZngrLAWnPfo23nCWXTxxPEr198oag0eGAqkpK4rZ3QIQAIhhGhlGEPOuDYawFNKKWMQAgZw1kMAqy2PmBBCCGYZxeCtD5QEYAxIsE05KgrvFGOiv9r523/zr7711PpP/uufAHN4/4qQvZiA5SitQyCOEhKQhGBCAIpEa80ZrC2yt9/XHpVHHnhCUfLgkASPziN3KKyOYzJYbd/7wPHliO8dTG7fPmhKk7bFZKYaE6zzBQELQREwxlqgzjlCwXnrbYgY6UnRzcXD9x471YuEnyXcucooE7onTyoq2zTTsxHYoW+KfHEpStJY8KZo0FZ51NXVnHpXFBNGgUvGKNXlNOYs6/Ytgp/tkMhQ4RmQ0XSnASqXlr9+5eBLl4Y1yADKOeudp5wRYOGPjRRvnorqspAJQ3AeVJwNVHPovaEYdDm1BuK0BR7reS0Tgj40CoKzHomhVIH54jOf+ot/ZdnO34ii4S/9u3f9/Z+49NorR0REPBHWOe89EsYotXUJjDBGvfXBA48TBNBam3lNJU+6bVUF63xwoZrXEAAcemDgvA3q7/3N+9564nbSgla07nzdTnMD6FxD7PVWzoj2b3kk+vmvT2ngjFOZRO1O72hr0zlXWYhlLGKHAQeDNuH8wko7QUMitF7PjopZUxkqz5597Oj200meHxzNKZOtTjZtLAjSH/Saoij08MSxM7apgvWMBIPKmWK4tdNoWigXxbScKlXpft5eCZRV+pGLJ5N25/mXbjVav3DzWiRaSch6rKWbAMxHjOSRnZR7S/1BIGidJ1wVRwfBHErpi9GNuNOPeD45LEgkwDLngneWMGqUohQkS2aWtgZnyvJQVwWPiGcwVmOWRJ3kfDW5TQh3DVhZVVXVyhc94SwCFs3zXmvj2n7KYoJEN42pG10HxpLgQiKBO61MTdGrauKsVk2lqsYqs7i43Ovzne2DNI54kqwvL21s3wYATiLqpNIhybLAZsNih/koENBNZUniA231en0Xd/qrVlBGoxMri1dvXGn1T7R5XBU1EDqtVMy5E7RxGANDhcxz5KHVyrHmZWnno6nWSHhqbav0TX8tzbAjpO0uLCCo6dZhHGd5q/+VLz+zeXt7vxovDzrf/57vcEZFUQLA4rSrbYECPDQyJa1FZtRVJhZlp6O0Hu2WnBMgfD6dAvI/DqAxa4LSxhOw4CdNubq4ICk7mhXGaQE8YICAcRwzLhAppYRRNMaNZ/NAod3K0iQmhJJcIiI4F0kB3i4ttIw2zodeO1NGISgGVoqI04Q5ypvZYj594N6FxX6VLyIKFaACaxA9grVeE6YhlKoIe7fJsy/r16+P7z/DL94fIchypqwWRa0pS6qpn8/jLz5rv/qyd0Qw9JxT4x1B4j0YD9Z6CyYYYIQkUq702g8/fL7dabcyEUcyeAROqsrFEWcEA8MsjeM4Spko52UW8TzLDocTLyJI7NKgO5sWo/FMMlo1hjIWSW4tXN0JP/Zvb33w8c5f+P6lC2cpN9sOGgzMlU0wrpzD+IAnXFoZT6bVjNSTyaRxAX1gAWQkTp1Y7fV7WRYTpIwxpBSBUEYZ54iEUYYkpLHIk9hbZ5AAo54QBM8k6aXdoqzBYyoi5wNlpJelgrN6ZvI4ydJoBgWhlBDifUCK3U406HV0Wc4m48iF/mJXZIl3xGMgFAmypnDz4cF9Z8Oj399aGiSHO8XGRvPSC5vPPPf69c2ZF8I3mlBPCIYABAm5S3ADzxkRgnVaSXfQSiI56OeCRlLGrV5eV1W72+JRPJ+XDvTSIK8KLwXzGGY1xIw3jdqbzpwlri7baZTlUd0YIKxSGggio5VxhDDr/WBh8WhS7B+NGaFAuHUEkZTOGNsMWp1Bt6OVMc4JQRhn1jIghPWjujIyEMaRBa9dsOCtAwSoi8b/cTHn6Ki8cOqMpOSt9z4wvnPQXll91wce2Xj2+ecv374PUy7z8/cvXbl2Z3HAqlpfvjI6uunX71vLo4pkiS0nmzffCNTFMQMgnDLdFKnkk3HhQtVbXuQhXTmffyBHymbzkRJt3j2WzI4OylI452d7s+0rd3JBWu3+/o1ZebOSnWx3v7mzO3vw4r1/8N9u2mj7zLvfn66vvuXD5y997YtTrdoLKTCsah+4fOCeR29s77K4/8zVsh0VH/ro2dGnL93ePvipf/cbjz9w4pH7H5rCpdnw8NKru2nLLy5kDz584ukvPbfWP3Pyve893C13Dspz73lkcn1nb7LTWVxuxUyXppw2UuaqEh6dR9/MZy8+9ezNzdDUtrFgCPnajvar8i9+6K2vPXO51z52z/3vvLJ1+yN/5gNXnr/+5c//Tr5yYv38xabMRiVx/NTlN0xRJh/7cx8PrLz14h+5tHnk+x659fTRr3/q662FzoP3Lh1M5pdfmewdKudJu5NpX556bEC5k4QVu0e95Xan1b31xu58Njp3cu32Te7DkABjXNajChoQnB6Mdt/7no99+atPj480yfvf84F3/tuf/o/dbCmPZVPZyWjqrAHEqlAElA8OCWGCMkKNRcZ9XSjOMG2JRx48P9qdffnTn1vpLU/GZT2rBITvfsvxv/4D7x5fO7p44oyqo//jX/6nr73wnPJ4bP34ZFrPZ41WJoSQZ0nayry33U7raDSWUR4cizlXjXFah0D2J7NOJ1ZlE4DbWnfbd37uV963dPGsgEUPXYQUoCZQaigtUxSkhcOjo82awrR0aBsEUI2FgEkUGR8mZe2dN0ZlSSYYs873+0tlNUHmmEWjTBonzmGjvWos5cR6GE0KRkBEGDFsKosWhITv+s7Hb20evCkbEBoAgrGqqY02EEDKdGVxKajhj/3oD3zqFz6/cempj/75759uHwbeTGeat/JJxZbWlvVodLAzai2niNCUmnMI6DwiIMo4mc9mlDNGhbG1t4Qho1R4UxNi4zgdTYetpC+jhdF03zHLOEceE85twEAIl9IZR4B4p9O0rYxyTR1x4YynMVIRW9FQSvO0XYx3tTceKUtYnkblvFJ100paqil9sCIJ1iv0JIpTKaUxFTFuPppErVXmPYBLEqlqp5raO6+05ixmjJvAZw3rdpfq+YGzblpMo3YWMWqUMXNvvQVQQjLrAwkuThOlTMZllvIQQCSibsaj0V4IK2l3wJlEhODrumlkJ1rLcw9haTGZzMvVE2dq72ujO8fX8jbd29t1Tr2xsT21Jk5j8Jq007fe+9bXn316NCsaQ6/f3FkfdCs/Ymmn21lz9USmLR1sK09MU9xzIhP75tZQW+ciTiIBOUMffDvlpdLBe86Z9SFQttuwQw+Ewzc+t8d8wEAjCgiojCcAgWAacUopRMRBcC5YZYXgEMC7QBgG7+HunqWWt8h+UeqGuUMb2dCEUixK0gNMINhS7UNxJD//dP6//9zWWC8imwpGjDaMEUIJEoIuUA7OB2QMCVrriWAiTQytyrktSnsL1fpy+tvffvpjP/iRa/t7KyfXrm9vWUvOPrD+zM1v1NoQAKN9cIAkIKHrS0vjojCq6uTZvNacEmTCBRdcIJQoY2LkXEZRIkmjWsDvvLJ15c6VN6VkMLN5CQyKck4DL7xoLy8/dP7czWsb1a3D8SRQ3mCo292Fw50NnraNJIaGeeEwYIrSsWxc7WaRAI8BJHDljG2cPX5s2VvGjvaaoyO3MzyWuPsf71BRdnujAlx3imxwYljWsa+SzMc0qgLXpiKNBWv7rXY759bp0WRSlK6xTBKPpeG80sZSinEcRVGPkFDXtpOIrBt1uyfGpbp1bT9vJ4v9WPIwm1QCoFKOixi8uPTyZjMvV9utsmwIowvL7brSJuhUMAJIiUCuj68nH3x7//ZLt+7s7cQseJZyJpuqpgQpgIyzRx/50xzZV5/6JGE2WEsI8d67EIJzAEFwwZH64LyzwSOlNATrvUeEgIEEQAYUAhK0/i7PA0II4L3xXlAumNTOeeeQkBC8955xBCAQAhIaABkSggTAp1FMghMyj+N4MptZZ3utiHvjHRFR3On1j46GWtd3s5OMUcaktYZz6bz31gUHPGYMqdfeEq+NRXAhWE4ZAPgQKDqtG+eJsT6L48qp3/z8p1W6qoxvyslkNDbBdxd789GsqMqmVpQCITRLuHNmur/xzHjYWT25Z3zCo9XTZ69/6VIgxnntlAkQ7ibzvAsAxNpACCjrKRAC3lmHBAIgAQQAQOKdBXCIhAvmXDOfa0oJRdLUymhDCEUIlHCK4GlwzjkXKCV3iSYQIASkjAbAqlA3R7cmXbk2WP3c1+786PkL6KogHGD0prduUt7euB630oXB0sat2/VUz/YOW70lubZwMKseeNfb3nj1uXb3WF/5wepqlOfDw+mJt5waTStHQnew9PTrlz/6zodGV3cXz7S2xzezXkRzbC0du1WfGHS/Z/Gh4vY3fh2G8/XTi8X2QXHzWrF/cPxYsr2/3Rksi/Fk5/bNlWWpTN0aUC06e/OCyPXasaDbmooqzEHZ1vp9LZMurWfP/t6Xrj//WgC7cmGweM8ySutrX86c9okQxUMP8R9S6f/1SaUUdSFY5wPBLPMf+1P3dwZ7Ipojpp4JFiVe7zfTPZ73iGCuHkMINJbNtALamtfdo6IIVBDwmqIPNk1T6mBUjBmSO1dv65n92P/8N/6v//SJt7zngxqh3Btlg+5v//6vP/mNL13b2pAMbGMwIAFGEHjQJ7s4qtzMgPeUEKI9LSez3/7857TzCCwEqxqNiFIyIYWzHgAhBEYJY4xT4ZxFgICeErTGBILWOASgnDNGCRIEaryjhGqnrQuIEDwSgB//8PsI+s889a0n3vvE5M71n/7spw+K5vRClxFeTuZJi3NJIID1PqBHxBCsMYYSigBAApL6/uPk+h28vNUwQiNnH31keamfN1rsbB4RC3FLnL73+OpCMt09uHqwhRYEF0a7fjuRHOOY7Q6rg7nR2vOUeWsQfLfD22lr0JVn13q8NrEISawjtFnGaKBWW4JhdLArFk45iKwjISRRmqAUPoBVPkkyW9amHopIKmeZqBliKruU0ajX09Whq3aTjCg29cQWczOZqdrnOLjnpz698eLVvdIjpTo4TRAJI3dT1YTg/3AqigUzYI1zxeSg2z3pPAVvAVwApExAIK3u6ts/9Cc+87u/4/Ump5QKoQtVT8Ycybe+/dzjPxI3iWjLXpRPf+rvXfyH/+q1p16vUpnZANY4ggGIBQJlWSVxFJBQJCKJXNkIBmJhUdVWuSgwEsdxOdv1zjAKQAgQoASsVksr0GqbqtKjurZ2XpQFBOGNFXkyn89E8EuLp8DtKW8lI2qmd+Z3grUUgScZTdJj/bg3iPuD1nQy73ajYjpZ6LcGq/3DndF07tc7vTcuP99JCXM0WJqIhBi90us3TQGTQiJwEk8P9xB5r7uOoY5olLfao/0RpZ0sXyG0yDpLEKtOTldT0RXVcGcicvEd74TD0XjsF3/nM6+dPHHPYPm+YKfBKaUqzVwx3c7zntYlZTibHAVApY+oJzo0thgOusskDsobXbnaGMpEnHSr8nAyP0xjy9iasYGLjkCwruQ8Sf0UOYFAGFtAmmVZOJrcTGSuZ7g/2uhlXYnh8NYtacnBfiFPLLS7vTSTB+V+HNsTy739zVcAjFUlWADC61qLNDU03nrj+kMr9wY9TLIOR7M1upGPhOz1mENjQxz3kgQhHJJgOKckahHC9NwA8FZGGzVbTECK9GBstOTDqc3zNemFJMwCiZP02MmLzXgfYqwqrauSEmKMjiMWU6FJUE1d127rYM+xxcOjI8Pgvocu7Fz7stNU1bSuDUN5dWP0ygsvVjq65+I9y+Lk0cGW9k2ny2dl4S0Ezr1nutHI+Xyk7LwCg7Ojrxy/+N0ea52KTm99e/d2f+VEVbs3A2jWccbAAaeCAXdcDQQ/HN1ZyPrdpDuvai5S21jCWJJEs1k5rxpEX1cKCWlnaSaEdzpJYilYmmVau1lRO6uREXReRIiEMkpkngvCKRMZiTI2HnRunz9Pk84OJ5MAGh04q5wy6JsQmkBMo8zWdnHnBn3uRXj2DTIeezX0zlRJUht917QbnGrGE3HlJn/+xdLriIMjGKzz3oVAA6cMkSQJy7t5t5WsLw3OnTmdpzxLo/G07rZlsJ4hlWlsEmeMYoyncSI4TmeFMgYQq7IxxtWNCRiM8YyJk2eOuZs7xnnCaFnUiIESwgkxtfyDb7gnn7tz36n2Bx/u9AeNVo1X2WgMo0Zc3cHiqPFgS5ga78AhDRDHbO3Y8sr6SiQiQkicCIYEAQNiCCGOIgCfxlEs43E5c87XdcU5Q+KR8KIqMUAkhZGUUe/R5mk6npbahRBQGZW0ZF1VAXycJEkcF2VhnAmecsBiOkXwC4udOJXOh1ikk2HVNIWISNrOdw9un1mjP/iRkysnK9T24QeO7+/PT6/Xf/jkePOgqSoeIBBAAPTeIwGPQAhyIbM8imPZyhJKaZSkjMciSjuDnm1U1uvIWBBGAjrKY9UgArCAzpOgKhdML88sojZNwkiwNSecxMSV2M9SzwjRXmvPOW988Aa1ByajVp4zTlXTzMpydWVZq8qrcDSfJYIzjlEiOOd1URGCkoe0zfK847SqjU8pJYI3yhjjBKGEvqmhCRYPx5UD/cD9948OR6c5Pvn1L55aX7t436N3rsHqwrmXXrhSFCqRYENYa59cbK0fy5de3nglO4Yi7i90L3Jv5s2eCbSdtZ0t0oBD5ZuqrmQz1851WL4y4AxmR9VgsJD5nXqOszlWOsiQN3VTHxT+sOY13nv/6vWbE1WgAvrqq+O29wdH496pqPXQ2sFUL507sX/pMkZ+eDgrjGeLycr9D2Yn71m997Env/ENO3n5xmsbS1FEu6RsyPPXq29devaxxy888dHvf+2Zl975/g8+9fVvblwejabLn/qtr799ezIZ1qa11uxvbl+/tH7h7Nve9S7dGF0U+dIpITOtQiAGhTGhfPKpl+OIWmN45J1nR9Pw5LPXVhLei9ONN67e3J7H7f4n/vF/qubi7P0P3v8976Depp3WN77y5C//h19cP94/c/7c2Z546amX9KR57cWbb3/3dx+l6/e9+93v/J6Hv/QL/+rYQrbX2j3/4IPLx4+32sGom/0zHdmXkKTt+Ozi4MJS+8xkzq7d2Wpgp9m58oVP/bqxsLB08vXnLrdTurzc6S/En/v9L6/mAz05fH1j887exccefcfe3r6Uca2G3gatPRNUcGKU9d4TQrQxnEBgvt3NeSCxiOaVHu0Op7NJ3hKHe9PRpHrHW0//yA+//zu/6x1es8Fa+nu/8jv/+F/85u7IdLrSc17WxlPMO5mf1p5CAN7UKkkFBZLIpDVYXl1ZfvXZbzEmHrzvxCuvXV87t9KEJhq0Y5Lt3Hn1J//DDyxcqBnEBpYAuiRIAo1Fx2HEobKwK2Bko9nrVze6Wb8M3lhSlDqSwjknpHDeUUJlmvtAtPeIMG9G1mnJY8lpImIXMGZsHrQUJNzFgVpXVXZlteeUguCOrR1vL7R/+/efdM2bB1OgpKkqKaWqai6YDcqZUAOUQbZ6x5WznVQmEvXSEm1LnLuYi3r76mS8rZsKnDONpgFaC23L42lVE6TGBCRAOHdNrZ1Wpuq0O7oqdfCcUaUaRJJEqfeuUhMeEYoRIghKYslLZZqm4lEeRZmuKobSWYDAKAMbXJLFk+m0280RmFbKOxOAGWukiOqy8sCCcc5pTTmgd9pI0fbITaUCYF1N0TvJhUWM80TR0lbeex8wOGM5j6QU2tS60UCSVFCOaFkgPPakxUSqdCM4ZO02FSIRaj4ZU8ob1cQiiaVE6xPJq7qq1DxO42MnToOjjcUAMo8zp6eMUK0bCjJ4deL4qUE/dqacTlREW84Q5v2x02tO56++ev3eRx7c3d+J05PV2HzlSy+cWOytsO6xY7y/mm1duaNHdq5vryytnb7w0DMvvLS4tN5qd601MK/6cpyvJ7PazbVZ6uatJJJpymkQbwwPSmUCeBc4I8F4CEFpYFRQDgSsFNS74AEBg7EuEYwC0c6iQO5BJpFzjnMWRQQgGOOpIATRKDOq7QtvlOsJm1X2TBdkC3yjHIPSqFhAO+Zffqbzv/zH0sEio412zgaPgVDOjHMcqXXeucA58cE7YxEJobC3vxcQpeAs4oez2mzPOr3uq69c5Vk0MqZx6Jy5szn0OhCBTWMhIJCAFGXEjNKLKye++2MfHY/07qVr1668XocpBh1hMLVmyACZQ5jMZ//TR99x7wP3Pv/US3/mb/3wP/u1bwAAj1uDUwuy1ymH1WxULrb766v9re0raSe+5/7z3lkEMxsPdXPIWO2NQS+yTg/KqZQiT/PDWnXijqSdpjicTYdNU5fKKyrZRPlQx8JnTMV9c3EBcz6nBF+8emjJGd5kw6ZZ6stOq5v3k6b2lSf14YGxDXLiYEYdosdeK2agbx/VIm8zoSutvC0GabawkNbNLG0lZRNMHablQbcXMRcW2wmhkJCgq/L0etdYh44gI+Cxk3SCsa4qQZJURqvL6/uzbeeD072qDsOdUSuhF0/Rhx4gj589fjTBP3hy65svmabhyCPkzHlXFM0rr3/VW4M8eOORoPfhroaEBHzwzjtCCKPogMCbFSqEMnK3uxa8t9Yp7zildwFqd300zjsAb8CgBwyeUQYEvXeIeLeAkhBqvCOUIuMYAhK8W4ZtrTKVEYKlLJoXFZdERBIDHA4Pq6rWSnsASimEYL2hlDTacMJ5xCWButEObPBe60YIwQjlLGmaChAACaHEOEso44IA9xBsuXv7N3/2X/npvNVhSd5CTCjPKHcUpkgIUgBAba1kpNaVQ6L2bh3u7La6/boaG1MH0BBsgMAYDQ5dCPRuhyQAYiAcEawPSBgBCOgBIITg0YUQIIQAHAmitx7BeWctBkKJtZoxARACgEfi7kJN7tK2ABARMQCSu9R0DCQEKBu3N8Rf/dWvv+3h9qOPRzSdeLRvGimquqnKJX58NBsuL60N3eTUqeOvvHRVtpOGLQQGSd5KFxLtFNO0JZZvFvX+/vjWzvS+nfHq6QcfP/+QkpO4t65VcfPWdPra7rkLnaNbt169/PI9Fx+SC73+iVPDaRHlC3N1ZWn1eLTT9KU7uLPpu8W4bqbN9M9+/4/83z/5L79Tu1Y/4mketS5IS0abV/cvbW7fuk4Ctjoti9Err+7LphIsirPire88cePwspm1KY950nIsqQyQ4O6/J7l4ZvrShtGICZfGmTxrfewD77768r+/7/EFoAEdoDVQj2BqUSB4Sj06rRRWzsVE9j/z5D7yhDpdlLUH0eFJO8+6vYXd0RVJSL4MSR8+/6Vf+2f/9u+87ZF3Zg1+7/u/NxWBCX5z+06WSGcMJxysQ6UGffOnfqj38e9/58d/6DdY6HhGrIPhuBYMnfOMS0q9D8S7ABAAoapqwYVMUxIcDaTSdVnPQwiUUUo4o9SHQCEQSr1zgB48CoaEuG4vLadzQZEjAec8pUaS33/+ycfPn/zAB976nife8vM/+2u3NvatNRtVs9bmJ9ZTSry1JQTuvUZCwXlCET0QiiziSjmg4fgSfe9DvWExi5jMGF9JyGrmIfInFhe8QscZYlMN68OjubUMKAKhhGISJ712EhPbkdHCvOmLYlqZiNBz6/3zZ5eTiKtqLoP2DNC7TAgEl6cySWRVNcY0wc6Zq7AquC5YxPIso5wHqzgapGCtp1xCUMT7mEWd5ZO0sxwMMFY5labxui/GCpNx0Rw487lXbhUh2Zzeubw5Ak0wWGuD84FQQhAhIN798P97KmoahSR4qwXhBEophWoqwOCJl5QGH/aGsPjAj/2Dd/zln/2n7ytGe828JEjjlOqp+tbT5btO1fec7Ddpy+r9tcXpJ/71Q3/1f3vmuSsVw9ghEQx88N66lbWTXoX9iUba4GSGSHRlubRSJmDLcjLzgiHY4K1DwqlAIADYa/XyZHX7yqtpb9kDIyKrCx6oy1vQhDpmALU/vFUkIjYoQ3CqMVqHXr+dt9uPvfe7hJ/vXnpyqRuq8mipk1Mauu3cVXZ/Z5RJcm6tQykunjj+xvXXAm2vHT89HhcWfFVUqq6khE4/c9Z7DwHo3tF2LL0AWqKWPE7jzkuv3KZ++iM/8l1H4ymFCnTFEt9td25dvTVYEV1D+h3/D3/0eybTfDTc5JLl7VTGbDbb1HXFSEvyVPESCRw7cc+1axvoVb/bzbuLwcxrVcr0BELkXXDGBvSN1aWeM5J0cukBR7NpP+fBu1qNmZ9BA4TW0gvlZ565ONJp7JmPTnXvqd1sOK8JOiFIlLTbC8dEHCo9iSIVc3ft0hdY8DLl7V4ym4zr0gdOy8LubM+bwudiUB7sB2OiAVnqtIOGuNcCE4hXVs8RfNNM8owvLhzbmxIhe1k7Gx8dDWeHnU6/mTZJv3P8dK82piznEOrDva2FbptY0uxNX7++sbu17Sh2Ov1ja8vWNkyI0e6cMaAyMWA86FbWqZRUdXHxwunZ6KYNzerpi2CToYL149357PoDjyxkLalms3FTri9lN29eP0FWiAzWOGcthaTXOl1qo1yBAoyaFJPdN174JMU4yhbmUxZI7LyP4+TuFHBKZMKjiNeVsrrqJpG1uttZDB6c1hElSRRrEpRuZpOirOsQoDEOgEvBM5m3Y9ZpDzrdVnDm4OBoOqssorM22MClJCyOoxRIrEAOZ6qoDGl2Ly6+/NhbIW6HANyoubM2eGZNjZR503hbH+1P79zmX3zKf+XFZjRFaxna8GQxi5L20iKAB0rkcIb7R+TVq/ravp1rFoJnNCBFBIziqN1pSaSDTn7+3PrF+045pWQkvMMojwklSRJTAt4YymRjTK1NJiPngnU2ACJFtEQK5kLwPkDwrVaGhHggXFULg05d1sGVUcSdc01jKXpKKPGqrt1L1+ev3tDK6ywSnCaTEjUYCIAYC068USLioFyWxstr/aWlxayTewPIiHdYWysEjdNEUOa9zZO0Uqo2xoPnhBLGsrylq5rIqLbW1goCTOdNFHHjyNFcWe2ss0kSI0MeCSllXVfKGA8BA6RRxDm12hhA64Nr3LTQwYf9o826apgktGb7k7q27tjxzurxXNXDiGQebKvXvOtDSfd4Zzif/94f6cAookMPQO5qGiSORZTJKIqkZN4Fo/10XFkDWZeFad1JorqpjdJxJHt5uzI2kYlsU+5tVamza/3aVc55DJWkTEjB4rg0ulRaQtQWjMbMc8eJ0N738tQHYK1WY00SJ05pZCLpSa+Nb3zVGCkiKmjT2GamCGhGkJLg0EWSFUXtlOJU9JK0UXWaCE98nqbavBlKTuJ066Au6+bo4IW1bGHr+e0XX7tx/K88fOHE+aVD3L38tCD1w287+cIzr1iLJBNPfuub2Hvn4mJvMikJSBmlppzUZUO5cKp23kzNSEoKLHHBJd1O44wdzUo1L6pON+9CmBvMTp0/pvVwf3f+xFsuXPvqKzjVo515lmRPvOXk089tzID2W/Tk2tLGH95oLz167MJDR8/fOpwFg6h9EJL5ujmcjH/9l//r+dOnnnvu5Xe+5S2XXhvLFD7ynkf+4PPPDto5eLa3f7T1xo1P37lNsX31av19f/Iv6Hpy/doLu6MXto5uxZ32l7/4R9/z3Y+ePLMKshnVO2magakom0cCCRST+dF4uPH0Fz43nYUPf/zxz/zmi7PSK+1iTurG/dpnX/a16uek1zP3dhYXVs+evv9dEMKg277z0pOf/8Wnnr/66hPff15N/W/+zpc+9L53C8ZRsA/9ie++/76Hb27evLlbfvsrG09/5cbq2ezdH3hQLD3w+S99/X0fXnzsnujGjVsBB/uH4fx3fEDvRztVPKps3jt7cGdye2Pj+z/yyLe/Ut/aE8s9ec/FFuhZ2o5bq+LeOH/5jesXzy7Z6fStD937u7e3p2aunaKMJin3CEgoUhtHMpLRdFbUyhJKXc4XVhZUWWcxOTw8mA+HpYGTC9EnfvGfnjt9fnFxIRB48ulv/+zf/vmvvbghWJT2I4eQJXQ+U0CRSfTOSSqqompMhSR3emaMK6qtva3baSyE4MPRZGWh+8i9504eW/6933zpaLJ7b1ufuV9WxNeBCOii6SERwBTC1c2X/u1iFnXPngNofftbR7c3IWp5zqh1IU4ixlhw4HUIQIx1gnrOaKXqRNJGzbv9bLW/cunVa2mWzMqiVIVqFBASC1mVKkl5xJiZNZGFtZWFcxfOv7KxUTrC//hh5JyVnGMAITgjKGikG1sR8cT7f+DmG1uT/aMHz99f7M0LHneSNOe2T+3Rjcnh9k5rtSeiPF9ZMZIE4jkjEaMUaCwS6y3nSStqjyabedIK3qapVI0XnDNsW+8YcZTTGkIUdVVV2KYM6BqtKPJECgu+MbULDbjgAVzAKMkoEqV1p9MJVhHBJItNXRttKAaGgJ4RQhxzSZJab5HSNO9UdU0QO+2O8845pyoFEVKeT4uxBNTOq6YSJApAIKC1mhDiAlIESfzs6Mh5K6QkjMZx20LgLHjARhkC0BoM7joeGCfauMIYp7z3TqsmllwXjdMhWuqWuhlOi1iQfrttbDqdlzzOAcK1q6+cPPfgdLbtJkMPpL/Wu3NnNxUkT7vPfutbldVvf+c9j554Ymlwuyv05Vdfa9QE5TxKmXP+2NLqbFps7e2eOHl6Ni3rwvZ7x5Dsaw+VNos9ejQtj6+1rYZAkXh37kRnWTWTqb59UNWBWO/imHFjvEcEAhCMvfvPTpxHG6DWOomix+8/f3njWmNAWx1zHkIABM6YtVor572LI2Yx+n++Yn/3GeJZkqnqnQ+kjz+ou7nJ+9y7xU/8ws6nrxVABGGlrhuEwIXwJgTngjXKWQLAGeOclIWWsaCIzsN99zx4e+vavKgN+E4vW1nM62lz59bt9ZPrRjWLg8VGld57z2xZ1ZxRrQNjiA5EsINEvPO975nO2izuffSHPvDC00/9we//Z2udN4FTzlOutWWCLLTY2+9fGax2H3rXg8+++srdKTh3zxmf5uOi7HQX01RZW9Zbl/Nq2F9b2tk9VC5YZ5xr9veOkjQLNojIz4f7YCrw4ajYJhhR4BRUwtl0biWjs1JZS+5s7tdNSDn1i+zB1aVsUKV93szIzoHQ0OezOUkxStGr0hSWil6vs3BrZ4cQHzE7bYYi6w56K7ePRnmeJO14roNFjLLEziy3rtjb7i+2FjpJ6dKDa9dFLqN2EphJ1lKlg9JNsKi03x/NI+B5ms3nVV3PtVFVXalAKaW3d7cmxc6FB0+vnT3ln7m+vpb/4EdWT50rFvq1xPQeGS+fF9f/0bfnRZcSpmtDaYgIqeZ7IQQZUeMxeAgEbAiAgIjBe++94AIArHUIgSIGFwCAcEIRrXWBACXogodAEEIAi54SDEgpUkIJQYLeeQxIABGJ9c6HgHd1dy4Ypc5Ya4I1BoOzpvYAnDP04AMUBh2QNJEenAueMsIJAaQQEDzGccRpoDyZV1WSsDiRVmlE7Ebtulac8HbcKqqZd55SSgn6u5Yn67RCiiGLY3SV6EoRiVnjG8PKgxlzOhBkjEG4mxrDSjnnwfm5n88F53W1p7UlIiAEJJRy8D4gUvD27o0MCUHEEN50a999ndw9vUEgznvAQCkFxAAhgEPCnPPeBy6BcX7Xe8QI8S7cpaoRgnAXI0kQ7r7agAREwCA5Y4jasnHT+vf/+Wv/fP09Z9po4U3xrD3ohGo+yNJmPMuWFk4+/MjedPTEd33gYHf76hvXDw+3ckkHKaMYjDJ3rlx3TVnOSu58VM/VrGTttp3ywfqZ7a2X1o6183EaHOU8euhC67//3//ogz/2l1KOWUfc2twgnA/nVba85Irt1qDV72R3xmx98ez1yzeeeNd3Djd3B9prW21ubR1ubfBc3Nk40jO3dvLUnWt7e9P5cDxps3Dq7OK977pYibC01jFVFSSBoBkktpEi6qcY1jqHL9XKIeEA1umbewe/9jtffvvb14yyPPKhMbpW5mDIEH0/ssCZiALVAZpQ0lefZ7/7FatYzsBEEeOMNHVDnDn0KqC2gNOjcUQlDc0nf/oT9Q8ePPf1p7oZBcCklczGdT1TjCEDJixdEM37v3vpr/7Pp2fbB4sdub+JjjlPAxI0xgZPCAlAEQERgAvmXZBchACqLmLKyqZBSggFABSCWmO1cSEAFYxgoBQYC4kIRDcEoJ3QlsX93VmSCI5mEhpn5J2dZjq7av3lz3z5mwmyWtVFpRpD9qZM1UFw78FRwSXlDKw2KnhHSMKoAAIi4c5pEuzZBXrfcb57oE4ud08NMvBzGZGAUHqjLDorppXd3i4ncwdU8JhzSZECo9BiZO1Yjyf0cGe/cRSVGbSTODF1M4mQNFXDkLTameSEySSKOQAQxtK4BeCmezfBAYdgy8ZySpASL0KyWrIuGQCNQjndytcXTlx44GCsFR/Yykuqqnpq43z/6Mqnv/75J1+5USuczOWsPERCgrXBmxAcIRTv3qMdEIIEGfj/kYCmvet12xhcU9gQlFXa2RAweAiNLiNmOIZf/A//05/6S3/z3sff/dXf+mSex04ZsJahf+PS9dXFJ2jUKPDzaZmC7izf+tn/z71/5seef+06FTICglbXPpBAAVjyQ3/xr7ryxebgi0+89dxzL44+/9RlWyvkEdKADAHZm0hqYwgIIuLJTH37j65++N0ngfnRvE6YiLit1URXrppa4nlt5X/+1M1pnVLqKCBS0VvMv++DH2Z634a9w8PbVpV6TlG77kInbeXL/ZNf+PxXWzntr7WaUM5nk7muKlvL0K50EYhptI154JwGtEfjYTtL8zydz20kmczyeVXpYX1ycaGehfnRaPPKxnPHv/bYd7wHeFb7GW0Tgs3CyVhAgR2yN77Wb3dkRJkgjKCzVdOMY15Lkdsalaq5bBFi6rJAf1ipWd4ijDfzieIklpxSoJPJlHNKAcCHfms1KDoeTlo9lqe8sWMbpiLOsJaASAJBTmxQo+GtNEpqfdQbrKY82z2Y8rgvabazOV48dkF0BwfbV1JhmDWm9v3Fjq3nqiyVVUV50G2vlXWdSXa0f3Dfmfu5tCvrC37fgi7PnTjeKEKqA2uRE5wUw8X+uvNU6bnRpYUkk9FCp7+ztSmFzHLeSbMAzAvfa0XBNTTGxUELbTU7qrxjVCkR/OFYjY+arTuHnMPJU2sL3eRwe89hKGaVUTYW7UqLSs/e9fb3ev1qnHb9DBzI7Vtb073tOIljFiiv24t8vjnr9NpOOzWvT66emuzs2HnDZSuwhBOroRyOtyIfVk4uALJqVlXFvjJAZDdmUZS8SUBL08QDzIpRFmdRLFgkxrMCKPE+aAMB6HxWN4314BCJt8gEjxMO1nZ7+dJSi6JLomS4d3A0mgVCQcj+4hIELOeaRllZ2Kt39jdvXt64fnM+ntR10ZHN+R8e+Gk8mQ6TzjEB3qhG6xlh1ARXluVs1Fy5Zp96Bl95g+wfcIcA1Gutdy184VvzQRb6eeQBbhz5o5kvGqI8J8wzAUkaccZWjq0OBr08TvM0Weh1Us7SOApS8kRGjAOQYlZxQSlip5uXWlEMDEknS0bTWQBHkeZJnCfgA4ymc6s1IaC0EhETkmAVWknSSmVVlspYSqlgxEPw3gGhPvgQgqcRE/HcABoy144wzygYpVnglIm0nUSM5K3W8fPHZocFqwwFSNOUALEERCwwBGN0CL42DWcUARbybjkvpYhCCHmaHI4mFDAwoqxplEYkEAJlwVqHhDZVFUfMKzs8mnnAgMARrTKU02DvFsESF6CcF9Y5CD7iwka4urpQFXpe2DTCd73tvKq3s/a6KsXocBq1E3C3B21932n+aaKNp9674AMSoJxyztNWwiNOARmhSRIJIREI5Ww4mdbGdU+td7KsbhQhTFI5LsoyuDzuQ7AxI+AUsT6JMtqhpTJT5Wbe9tp5RBswxBPMhdAhGO1q5ShhPkBpLWFsMpkTQHDegFd1HQnZKNcoW1tCKQsA4D1njHHKEpm1EldWwREbnNYNeC0RIKAq60De3AW9bnzjZlN5b+v6H/zLvyM62dOXbn3tKy/3OptPvOXtz77w/Gg+bvX86GhOmMwI+/zvf/ml57/+wz/68f6x5GB3E9Osqcve0kJZNfO6YAyLWZXE0gU5G6koddZrU1ZKOcKiqtBUcOvlbDpbXpYmFJPZhiNua29npbu81o2aWXVuNbs+pbPhaLPZWh/QP/r5f1WNNk4/8Z3v/PB3XvuW2b6+n2RpaoMibG824/ubp9fZ9PDV+x46aV1X7Y3eevHCJz/3nCAxkGCNz1v55t7ey7/8n1+58ciP/5N/eFZMHl5Y+8T/+VOd09WP/v0P8lDouWZZ+ulf/+W3P/7+ODbl7PDU6QvFZLq1dSnppG9cnh2M0tdfvc0YQURVa8kYk9RaFHEk+vixv/iO6zeqka8XpW8F+x/+yT/r993h7s6xLHvwwpmvffGlH//7fz1bO3bqEfneH3zfM5/7/O/+0u9MafuJ7/lgYosnu0J6aq/MNq8+twyu2Ay/+bXn7n90zdLJe9/9A6SWC4tLcx+WWwlD1AvnkpUfPCOOPvkzPz2a4Lsf6//zn/kXv/Rf/8MrV64+/I5zz/3R/mhft6MwOdp/4/XLYGrwBD1o58B5Lnhd1YyibizoIo0YS7gUwpXV1u1t29SqVnkGf/fvffTdjzz24P1viVsdqIvDNzb+t3/+n770wkalmqif+wAawDaBiBBHdHmtv3s054Iq6/qDDhR3TeBUo8+S2FndasX7O3M1N9YXX396cmL94+fXVivc/Kd//12jo28naw8DDtAnFIh1QBjZu3rrn/31o34P2PIRy1q712V3cW08n4L11oKQwmjnvDWNf+Shx3fH22CnEEyv1/LGWYvSmvnu3onl9sG8phiM1SLmqrLgSRzLWltt8ZF7jj80SPNe/h9/+3OdQZ5wXxv3x6YiShCNtVXVJHlMkQBje4V+1+LZPNx43wffA5anvc50pqC2dLKze+eG3p4mot9ZOj4j1OdL2jVq3vQijJLEVJW7azJAp1yDgcYytd6EEJATFqEqfVHN2mnuTVBBUUaVq6JEegfBB+89gUBJAIJAuJBUW4s+eB+ABhuC9WCNQeA2BAY+ToS3Ps/SaVVACM75SCTOO/ROK42EGq1DxJ2x2ti81dkbby6tdFbbK7duvSxY4hEok847yokPDQEWRbKpKkOtDSYEQoTgMivrGnVNkHIR+1B5H6SMAuXMx8aoqjhqd1ass5yyBiGOxHIrefqbT927dspnkdaESzBWh8AjIQIl6xfPv7F7I7gobSW7m7sOQpqK42cevr1xPeklF4+v3Ny8+exzT7f6eytnz7ZPr89e+1Yik/FRk8mkqas2D1aVzHZJVPdW07pszw78ep/xfTmeVJNh2U/Tem7SKO728hCADSd5GnX5PCHhoMHtcaOMBSTGewYQM+oxeOcFZ4Qwr4LxMCqb129vRExY50rrADwlMCnrQSvx1iN6gmisJ943Cm9PbSuPpyr5la/ir3yVIcTOBAoNoQseGscqUyqkxDpwjScQfHAB0WuvKcY0gHU8EgGC88Ej3RtuSykBCUfRGHXj2k6axp1BdzSaHB7u9Lq95f7K1t6tulIhoODCUUsoFkUtaRRzNj4avfcHP/TGjhv6sHHrBhcckQVEKtikMJKxh08tnerxz/3et/7W//Gxz116MknzN6eAqVYenT+3/Mq3Xp/vj/MkJqZh4A+2tojnEdCyVq4quzmPYlvNZmip9Jy4EEojREwl9d4aVWlnkdZRJDMaNWPlTeO12z7Sx/PBQppMpgfZiRPX7kyqas354nBW9KKut9YahbYGXs6PtgYtNi00KtMlfTuzN4avy7Tdj1rdNp+VbmtsCoCMRtJh3o1WlhYk6pia0BKayumoKEzd6/d11XCCYGEyrAgkwaqlbMnVt8p61uLYWl3eGRbKMmeiBx757u6CZyJ77/c8kEfN4w9MeNJQlgMmQdAz5/mf+pMn//3PHbrQCgaRIkVst9vlvLK2DgDBB+c9oRiCDx4hoHPOWEMJFSLC4L3WQAkXzN4FOXmoG8U5CQCBgQe0zgpKCEVGECkLDiijSNBaGwAQgSCjnAGCFIIiQrBMMuLABzBKC8mREO9DCI5zRgglljgH2jVGW87ZH2NfIY6SJEoY8+NpudKPIxYmjVVogKB1lngIDmpdheB9cNSBQ8+lQAyUUkGps8E2FQIQgsxSq1zM83bcu7X5qlPaO8cllxGnjFurnHMELVAfR7RqaiFowNBUJkAghEII1hpA9OAIAhJKCPXeIULwwTmPBEIIFDAAEEJCCAGAAAAES4BhYJQGigwpE4wgUVpZ5wAR7pJ0gw8+4B+LAdZ7DB7BAyEQbKOcdEEp/Maz8//6M8/8g//1fHflTQmZRklZlEfjac6YKY9eHBfnHv+u0dFocf3kcPsOumTrxm1F+bnzp3Qx3987fOht90zLYtCBF7/x7Y//jScO5q5S9tuXXv3ghx/ZeelaeTjF4He3NqNs8b4H7lk+dero1SvCu7IqWmli6kZN/Eq3213MNrfHh85+/M/+eb/9+nh4+PrLr+/9zh8tnV5kSWIbsXzs5CPvf5wROz8quksrr37mqbnjO+Nya77zyNuOT3e3stYcraYgaSiAQyQIJ1ZG5P3fc/Jbly9P52Q2rkJGp3P3xc/eON9KW9NR1A8q+P7yIO33rr42On8m0NQRO59ubyRp3snOffbzu14ulKoOXlsfYuDzYo4UXV0/es/9ebr0zReeoYnn3o92hj//058gjeostmsTmllDAZO8Bd5ppfOO/uHvWzvz7mQ8qjtkeSFfkr6qgDKOlBLb1JQJLiKtlDIOEQnhhPAkyebzsRC0KgtKCOXEOuedv/sjM84ZAXAqlW6pGz/xlhPnTxxXk0lVHJiqsconT5xeW+0fX2nfubn7zVf2X70xGRWVEzHxbL9QO8NaRKSuw85RU89FngKPEcAAIhUCvUV3F29ovXGMxiQwBNVu0+98tHvpcm0UVLOqlVJXNTYE4kA45Fw2DosKOBWBsSRJkowz8GkcdZJYSCEBz68u2qCqeeMCNaVKhESkSL2MZSqACQZcAHAH3nlNA3XWC+aM1YRT6ojX1jTzKB/0LrwnrDzEfRCCm2rbmImNpbQhyhY2X934jZ//j7cONw8Op8W0LuqggCIE7RxFNEYF9D44ACQASO42mIGQESIT9H90FaWJNB45cgVivN8kUjNJvHPoEQh4YpmbkO3P/O6//HSSZv3+oNYzpM6BFe04zOpnXrjyyCNN1mMtMbDWjocHiRz+u//9/J/9O5tHIyAcGMVA/PDgFjH2D375n/y1D63e84h+7N32z//AE//+E/UnfuGaMVZkqbfBOU8oZUiNUZwRoCHOYq3dbNqwVLcGkXJFQEe9muzC7nDlqQ3/ytZ8/yBllFIKRPtHHjrjaX3rymelV5ax9mI7P7nSlpJRtnEwSir8zLNfXkpy0cp3m9nicqrHdl4X3XbSzeh0VgQkC/18Oq9WVxen40Mg3AdijHYuNDUoxCb0Hn30O+qt/W9+86nAG5ryzz75ZcLs+//kB7e394sxQnBLJ9eOdq9pH049dGI6bapx2Vnsa6f2t7ZCKGhCBI+zThe4qEstZHZwNJpOpv1Otn3rciK6wUIQqq4PQKzErY7RlfbBaMuInNWeyaSudW2OLNRIbCZ6jHFtSu1nyk1FxLKkDwBRhgz15OhmhD6NWmm2wIShLNezg5jarBPXU8viduNTbeacSaM4x3Y1V5PhVIj24sLKYdmk9bDNFE+TtdWTe4fDmMeCU+frvJstHTs5GRcs4uh7uppLEgk3m+3fyUXUWV/zxKMOh2V5bLAYGtOLs+CNzKIszWYHV9NcLi4vnr1X7hxMnQnDUTGe2Zt7R9dvmZ5kayuZqky3v3jjZrm5M2kP0m5WXHr9ejrovvDS7fH4VYizEWPL6yJNaVNVrJsev/8cIfTg+vhwRLbeuLUcQYfHdTOPYhRxG0g3jb2uxrPpzDjTbS95D4Tz/urZ+XCyuz28OwWjyTxNIsYTIDRwvjecTudNnEXG2dl0ThkHCsZ4a2wSx0RGSEiWJSuDDgEDNuzsHBRFY63PBws1xkez5tr+QTmabO+NDsfj8dHhdHxIwXnv7ra6WqtndXu0O11Y4cXkkDurfaAU5rO6bMx8Xty5o775svijb1vXYGPV3dpLxrDx7NYYbkzQeW0DeiRgPSPQyUkS8cXF9vLCwuraaqvfIZ4445dX+1maRpJRxMOjsWB0ZWkwn5cAIISY13VlQqOc4KTfas2nZbfXi9NoPi2axthgnbPa+aJUSKh21hjXNE5IGVt/6sQal4KJ0Xg0dwRIINY75x3jzHqvVZCOeucFV4+eY4890j22JKuZ2bpTXx+za3fU8dVVSqP5vIqkkFKEANZ4SiBOYim4NtYDUEas84LJqlEmBB5JE4Ju7KQxIRDAkCaZBdcoMytrgoFz3liNlAbjaqOc89Y57ZwQ3PoQKHofsqw1mo2m06nHEMvYAOm1u7Hgdj4bjQtnrNWuqa5n8bJMNeiR1YzHcV3Vxf5R1VTRXUP4XQADDTwiaRLLOIqiiHMqhKCIcRRHMiIEueSJs1LSppzZhmZJQihOi5l30M5acRpVlY0joY3VygSnCIU0lqUpHaGBsZhm1MGoKLBB5wIScOgCYqGbg9msl7byWLTz1mwycaZZWR40VgWJ2tg0ji36NEuLSWUh1MZqo2HqTKNjBOKBUMriXHGsa2ONq6vy7hS8eukGoE376eadurW8+PLzT0eyWl+SVV18+guf5VkeeXv58tZCt8uilCD8m3/8L375t//7c0+9tvBgjgsh8lZweajquJXRQksqwUvOaBzHwdwIeq+/0p8cOJ8Il/EhMxWVcf/0zEIz3hX9dHxt/8i49v3nyCzfK6vZeLJ08sz3PfKe3/25n3NWryyJanb753/yJ/7Fbz0ULTx2beerpoZxWaHDuVFpO3398Oht73/niRZefua1/kL267/77CGJJ0HgvO60IhHCwnInGiT7h+WNV1/5qX/0v37vdz58jC79yI/+WElmT37lqY/+yfd86g//IKTM6L293W8urK0s9Ncn7oAnemld9paW5pg99NYLN6+8eOye1XG12zQ6ESgFjRgLxvd7Ioyh014/++B9jk52ji6b6k5+/0Mf+aG/SiaTwthvNTc0rlzZDVMtByou5tq6etBmbvTi7/zmF1Rwx1bX9q9vr7znOGeDqzdm1cHK7UvZmcdO5+n79yfiSM13x/vVUNWj4fZL39i4/ML9q/FHf+itjZkeX0v/4Cv/bazH5x44fvXZl8wUPvi99+weTLdvbjNG19aTWzdng25caa1qJyUwioIRrQxo75qqKZtG6wQgtvDB973to9/7voUTq/3TxzuddDZVT//Wl3/5Zz7xrTe2yzimqUxzzmJWzBrOMUo5ocgkbmxu16WNMoHIkIZYMNWYsm7OnFqtZpYH4m2Qkh0NJzwRpsw+9fsvd8P813/1rzn/X2HVA0yJn1LiArOAHIFuXZf7VX8/nLh5rSSRjUC4ULOIoqO6MSwBsMAZJRF024HR7I0rG+12LEM4mk2Xu9m5U2e++LXXeSIbG5QPQBnjXOSiqpS3SmtKkmTraDLZuvmWh06vnOjSlI83h+LNDkfwHpyzIUCSJt6GqqoYZw+cPk7g8MqLL6+s98ZjfbS5nUV8tUfv7F0+uH379NkHapTTQGtDp0eTdi9t5YluFNKIEtqo2lqfZXlRFhbieaNpMLGg1Fmrg/cujlLrEMBFae4C+ICMsbuyadUoIYWqrfdackGRWnCEAPWGMeYgKF0LJoKHxthWnlfV1Fk9b4IJhgaUPGqaJnjLeaibeZpmIomUtUzEnHIg2O4sEtsMj65712hP4rRVVlMZRUkcN0poYwI0lBMgQcrI+RBF3arRPE4IkdY01lZSxowxb7xzGGQi4+XA+42uZUSC18YEp2vsRFevv/HIuz42nhSNDxx4kkXD7X3J0dQWhD5+4kLjPYkylKMsZ9c2L0XjJk/yZlJZTx948C1XLm3U5aSe7v/27750bHEtbkVnj5+9dfWlrLXcXszSJDncGs+G+sT506qplbLEBeZELkjveHtUllWluYhs4yhlsYyNMzxObCc0EzOjxANRwQHDiGIeU6VN4yEED94DABIiAy0LXwQnKSz0kqpUHGGhxSMBjAtlXFXpQKgQ3FhDWGh0FRwAQSZYrbWPQDDnnPHeE/QUgXFaGQPokBKnQtqOO1ES5a2d/R2tDACCD1xy7+xkNEbwXMr11fVbd27Hiay0VvsH7TTudVrW2Nv7284EQkkIYLQGB86HViS6Uo4Oi+LSKwf/3Y4aMpurrB51etlk6tTosBslK4ut9UH63gePjfZHRYi+/dTXVpcW7nvo/N0pUHUlDsv9g1vzrdvoaOGUKjWP2byuOQfvdXCYpalMyEKvW+AEKZw9e/7q5ddI4MiieaM6eb8qtQXOQuqJTVuterpXV5UQKe+mU4u3tkcpk+MX3d5hOrGW0UmrF1PqJBMIUNe1mo5XVlpa1UuDHC2OJjPvKMNM1X5ia22a0Uw7z4taLy32YuriBBJOb964neVtT7xIeABjDTkazgetBHzjfVBA+svnJ1tv3Nm9wqhPGURxoq0ddGH53GkWL4l2NB8fVJMCwujYcZPkDVL0TgIIDD6Affe7j//8L90YziPCmXPW+xBFMo14UxsXguMOHQCiD3f7PkgA8MFzKpkgXmueUmOsDUAoC9r0MrlIGHo7qUxtnGGEcwFIlLGRYJwQAAQkSCkF6oIjiCGAsTaSjIJlhAGAh4CEOGPjWGpVEwBAChTCXYcAtUVRBwzWeQAMPkSCMk4ZurqeI0VvbFXWJJEPnHugbIor168prRmDNJNlXSASxikBSLIEkKWdaDaZhuCN1UC9taACmVVzwhn42d5kO4AJJGBAxkQkkihKi3JolAkQCCPTqmREeu+0tYQy6yylDIJHDISg954S4gMGH8CHu/4gejeWRvHN2u83u5zAefvgmbdO9HDvcM9bw5BHUaJUFdCDBwjg7+bU3jza3bV5YfABAlJCQ3BIiQ/BO2ffBHEN/vArs0ff0jzxrjcJaNvbo16eiH539UyPNX6+ARsvX1LVER9kAeqprXWczjTd2bqzvLKYxnJn66jXaU9BtzqtV1++3FtcTjjee+H+ozuFiPKwpmdHo1k9D7LjmuKVbz7dIlk77S20F1sDvPL6pZP3n79+fXPjxuaZ+y7eqW5feuW5y1/4MhKX9XpsoUfTKM7z/tmF/tqprZ3piVOLSRRffvXq8np39/WdIGJkYePW0cUHIsCCpz1gifPWmRrQ6csOa2gAAQAASURBVAB1WWXtqNvxgcjvevD4s3tbo5HzbcY6q5XWzUy1VtLhnmqOik4vHW4cBBrSQZvi8b0N+fXn3RevijKYPCPzAhijQIFG3BrLGNnd3+2d6eWJtKaqXSFJiBc61PqDgxGX3FvgMiY8MnUjBH7ovemf+cuDX3rqS+cWf/ilL/BnL806i6vcE8j4dLhHgQYbiMQ4ThGIg2C9AWdCZZMkKqvSA6FIjfZ3Y4neBYrgtYki7CX4jgeOPXh+/cEHTw73jyZNHfVEPa1l1qKMD1ayWJBz6dqZR84f7I9eeOba55+7M5mWtWVMIKGISFqZDMFTYMQTpMx5a7SOoiRYZ5z3YEUU6caAByaYp36x6/L78zv7OhNIGHHeG2WztNMoWxRqNDMyySNGkBEqKUObxbQdo4xCkgnGRCsejI72QBKrnUGqG0PBUkkC8RZ8q9f3ItZNRbxnjIB33gePwVvjgxY84QDeBBWYWFi2DJAntjY33ji4fOnZrNv92tdeurKzee21l3U55ZlUjXeBABLrtPMWAjBGEX3wHpEgkgAAARGAIgWPHgKLxf9wKooYscGZ2n7HRz5y8r53/Oy/+cdMzYN1zgMhVFlDvKckUMBpOQ3eLy8tT4pR3VQUkm4rdxZc4+rhXpblvNOqamqr5sS6e+B8/JWXPEPvrHbOEkJ8AOJh43D8wIPxrTdujXaPvuuh7NWHO59/YU5qSSm12omIhhCiNEWg2hur1NdfPHz/O+4h8bw2ta7rNBYybV3diP7Tpw7GjsuYDTIZxXRlIV1Zbns3LWfjJBbUqqqoQbP9ibpR68cfeniQh/X1U1sb02o4fW13t7vIE7mQEDqcKZeIkACP02JSzLRmjA3H8zzLeSqq2tTOJ91ezhITRFzl2y+/8fJz35rsTjvH20GQvZH+r//l95zm7/nee4/m4yaAQd9Yd+fGtf1dw5P2+rGHX3vjhV6nnQpwgXAOwQkCEJxPkmiibRwv9QZWzW/fd99jjPRCK7tz68lOu89TaX2HUD+b1qYBDMZYtKHWqg5sItM4WOLnRalllCQC6cyoKG8Fz5Gz1lIaaV/OZgRIL1mc1zbP2HxyZIo5ksLHPkmzdr4KBD0vm8b0+hemo+tVM+ZywJNs8dSq2RnJOG7qQulqf3iIGGXpUpS3oSk4T4p5xehCEmeMsRAfGDvPUrd3UEREHNw5Wjl+KoCa7N8ZJG1JcPncBc+S3c0DoufH722V1WE9M8HUGQ8Ly+LsSmpD/OIboy1NtsZ+PB9xQoxXVeWmE/We91/Ms7JUVT2Sk/0R5VzGYdBvpWnwWDfOTA8Oo5z3BmvLZ+7bORznrbip77i+CvXUBUOcV3acRVJGrdqZWjUqCEe8qvid7blwvFb07hTEMS/L2iPq4M2s8NqKOAYE6wGFcEAkJ841x9YXkQTCSKuVzeZFMZ+OhzMPAmU2NzDX4Y3rh9vDYn97T5dGjYbzqgL0wRkM1oNz3oGHAGiD/4lffPbssYtSFIYaaq0NBIKrdXx4NK+1bhqYltgYIN4iDVWjA9xdb8FYAEqdD5wRTrHTSU6sLZxYXVpc6g8WFuM0tgruwk0Zw3aSGu2Rg/eBAgrEpi4AQsywlbGycrNZTQlFJtC7biezzlVFVdVaGxNCUI0mHuIoNdZXk6mDYF0IwXnvZBL1+l0XSKNdWVROWyI5BgCCjFLjnbI6j+H73r/+D378ieNn0igBFnQxnv3GH1x/8vnATDwcQZZHvU5nPqkabWMueCSaWgMioxScj6MIAGtlAGlADIQEgtY74w2jzBqj6zkyZrUhhDHGVaWAIGOs1qZROnh0xiNF51xZVYwwguRoOqrKyrvAOJNcGFOPR5Nh8ISAdTaNZBTh6eXjGJSvFWGMuNI1RVMf3trYm+swnALhbyaoueBZJ4mjiBMqpUAClBDnglLKOeeCi22SJJIAVo3O0tR6bElRGNfv5LaZUUjzJCmLqqyNZBEn0jgtBO/mrVr7pvYBIBAmSFxb0N4POq3F5WRr/yDL+1LEAoHxcDjeR0c4ZcGDMUFI2Upa03ld21BVhdcmjWUvz2rjGuMYInJBCDUUt8cz5zw4pM4b9SYBjQMbTgrUfCFfffYrrz79xa9S4i9/+dtnHjn943/vR3/h3/0/XGJ8/NQr374ZqWI43y8qi5xdeOvS6bevbOzd5nE8PhxSWptmP5imv7SSINGTel75yHF7UN/c3/QSfRYtdiNfaU95oJwJGasmuEn34dXtvf8vVf/9rll63nWi95NXfPO7c1XtCl3VVR3VQa1WK0uWLWOwzQA2wUOw8RhsGBjmcA5zhjADA9cxmWE82IwHnC1ZtiyjYFmyQquj1LG6u3LV3rVzeOPKTz4/VPuai/U3rPt61nOv7/fz2SKtIUW0vXqhtzg/3tiot26itt/bnNZWPvDQQu9MuP32Vz/+/T/xqT/7U7/4T/+Hfpsb0xBG42Ebt+LvPPdC+0z3cGNn6yYEPGosYhETzC6uJUtdtHX7reWTJ84utM/2urIx1y9vXLm6dbB9830f/8DDT/6oE9HaubXu6fTE6Qe/8Nt/+OE/8WNHh+XeVsVk2V5K9+/mW68f8PvoD3zik8+/+FLa48GgK0sopgoxt35hcXVt+fk3N4K0nE2OOi129fXv/uTf+Fvi5BO/9nvP2dn1xx+/+Jf/1l+0rVM49m8/+9LXfvNFk03vOzvY3tkYtvET71nanAYPPvLM/s3PfOJTH//a5z47uzs9fenidy7ffO67r/zir/7EwXjaatNhK6I972p7caV3YaGzdLaXz+anHzhTzsdCdBdX2OHx4fHO0Vsv7GgUSxBZVZlSRwkvR5NyQpNEcMaY1aaqlGsImJCg+9aX7jt7/iMfeercuYcWTq0nUVTeuFlVhy1199f+6ef+86++dFxoHOB0occomddKW++kpZh6D3EiRIRVVp1ZXLl9uKeVFRGfz3IMlmCSpolqvFTuoYuX7uzd0rruLqSq8f04QZOtn/npJ0h8pTy4lfhFIw+V3AjSS4BSbYEa9fK3rncW1/ePqy52adA6GmUIWySYNp7HgTRaMMYppdi+fPmlYUe859FTdW3Pnzr94nevnrv00Ds3b06tajlmnFFSUU6kt4KTXNbvf/SR9z789B+++NyVK7fT5WSeWa7U4krqE1hqiatbcK+eIOK4KitGMULIWVZV5eT46JXd6ytBXGayrq3Oc6yzW9+5s9BOhpeeNqdWgAs5rag2YRLbunbIY4GNQpwhQIQRggkTgcAA1mrrjHNIiNADq5pSiJhQZonDTBitEh7ISjPGGWbOWeQ4Rhphao0EZBlG0jSgnbGMiw4gIIzKSiGr5vk44JyEkdSa0QAsFlQoI0sjA8EY9ZRawoXHMQZSVlPwnhGMQTd1EwWhp7EHT2hglNTEg6MWgBGOHaLI1KauG82FTOJIOstEYJ112jPKTTMFR01jTDGJly6iIOVxN59uA3ZRGspGasGmZXH5zRfuf+gBVEmlGlM7QYUQuJwfR6qd8PbR1m5/pR+kabqYbI/r2fFRfLq3sHp6b3djNC0xAAaNdX7p9CrCweH+4VvV9ZYIBMTbdwrCTKU0A7x75aq2JOmkkUitY9nBKI3CFOMw8jFnBEGlVMRQGLdnh/uB5EmA1hfouFCZBMSJ0lY7RwkOMcKcZIVGCDltKaXOgbTeOu8qLZUFhnutZJTPeBADwpgSQLioFGDknLcerPUEEVkr5wAhUAphgpW0jOEg5AY8o/ienU0IYrSdmYJZWzcSIwwA1nlsHWHYaWONVcZtbG9678ACR9ggVynJKbHWeuQsWGPsvaYSxaAMIEa94BcfOHn7oL787KuNs0EahJ1wPJqUTfOe+9c//Nij8/3JA+f7f/ilbzBsHzp/PzU7PU4uP/ete2dBvwP5aHY0mhSTURSG6XC1EMpKlcQRIogxsLVppEZelIU3Ll0YLt3aGCM6SFg6z2bEMyc5sx5c04v7k1HeVCYwLEJRZbjywaQw1aJI29FkglQVpFFAsAooberGo1JrZaocCBwd1lwwSVDdyNL72CGGGXZWaelpQANPiYtWB62AhhhWTi3KfNJfWrGemKbMizkgqPPSGneyL5b6yeHurJ/0dkY3kcltKaMkRoKoupQetReW1x+4eDCa1VUWCREgHAtY6BUEKu8FRhgI8sghzgbL6UpfTAvvLKhaUe8bV0ktEfWEMwJU1hJh76zzzgEBggnyoJSmnBBkKXAPAMhZbymDgEE/CCLiuoGaSnfY6AY8AccZds43UiJMEWARBISBdxYQeGsww9Z5KZXjFhCxSnpMvSfaeAeoqubOI0I48mAaRTEQhhGhxEKjVCB4GPF2mDTaaGukNEHEtPPTsnnj2uvIAwEQnCOMGyVV3TjnEUI0iFiQtuM405oFkaobGoZN3QB423jrKsy5wUQ3eaMaBJhS7Lw1VhaFdAgxETBGy0ZRQhHCzgNC2DoLGIxTFGPGGRMRRdhqVStprbPevnt3JRgB8s6he84B5ME5QOAAru29iTwY68GBB1PWOQLQWmOKPQBGyDrvARBCAN6Bwx57hCjjCGNwFiPvrBWBcMYjoB7YvEz/xf/xxr849f57U3B2/cT8+Hjr+q636P4TJyq5d+LEoJOevnn5LVvOytFeOFw8depk25ckig+yu3q/Xjx/anR006X+0pMnk/YM09CgOOStne1xVhzEadLuJjSMGu9Y40+d6+6+/CrESdTqGiBzpxcunuqcOCtCjbLvfPEzb1w4scrqppzbKGaChsW81EaK/uLyfct5NR8stoazzqmsev7Za5pFuMWOS/fMoGvqQwOWMewtc7qx3oqEg0Atj957oftrn9nLifzE+y7+whe+Uzj68uXNc4t4OBBJL9VZwVkQ9eNmpIuZTtJFNU9/8zc2vnvka+cps95ZkJoHQklNPOacK+umVfbCO88nkfhrf+6nf+8PPrc/O6iLTFdGCIwZro1xzmAtnZGNbm4e65//3N6M8mxv6WvPvqKESIkIkZtLFYYdgbQyFjA1RouAeQ+yrmXTeGedta122EhNHShjjLGUUiEwMipk/NJ9KxfvXx5Qo2V9a2MjDIMg5BG3OBVBJzYGV42VKFaNZLhq99Cf/tGHsShffHV/J48tUGm8c25tsS3COhuXncWYc+yY0Fp7RDAn3BnvDfJOEAwEgIB1CIxstxwhqJjVUjKDGCFiPJ05z7IKl2UNOGJxghFQjBiBYbfdSXhIiXRYO+o952unq8mxmsycs5EQnHqEMKE4jKgPA+M9YzQkuEa+qWqjXJ1VggorRF1WYZtFg5MLD31AtIaU4WJ29Jmf+w9f+txXxrquGnU0KhDDhDjlCaqdajRlDMA7rYEgSqi21rt7AkQPcK/0SQAhzgOSxIqnK4tL16+8/f+siqaTcnm5de708Ed+9OmFS0/v3/2+r/3mbzhrHaEEI6udRd5Z6wkFBNrQ0xd+YnEJ/8Yv//3Tw66dj/e39tGjqfVuPK+pRs45QlBxvJNQZy2X1oDV1nvKEcJYIhysnk56DfNqOi5DIv/SD6819vbzlxuMAkKo88gY45xXUnLBmeCq11l++HHvdoviuKF6PMO/8YU7V/e4IWwxwYNuqGrVX4oj4fPpfiDs+kq0fuk0oXrv1mT7oMiPCkfFd9668vSTD2/u3FlfCbSC7Z0yz/Sbb+xSbyjHNoWsKQ3A4mpXO4sQi+PeJM86Qlje7SWLe9d3SbkfRdHuzTewI2dPnVIn/WA1ITc35tNymje/8Wu/ffY+ev7hMxv7c2PA2DjunNL+aHtnupdtLqw+3CJ2fnzj/kvnNm+9EcanmqY0lRG9njE0TvpJQO9mRxh1retGvUtLdlwWm8aOtTRNozFGQsRgqfNVVe4D6MZO2rgX8QiBETwBC8aoXtQOMZlnJQ+C2eYoiePaSYZwPd2vFXWaWg+dfufoaG7nRWtAt3eu29pQCkHc3ty6gW3NaMAxz45mvKUvnG4jbEtJlFfCicGgl3RXjCFLwwVnK4cpoV0cLe/vbKQcNdpoRI3gjEXr/RVjnCb2vU8/Xc3KKs8SY40NVpbPNpPbZXHMABfSdGJBQw62yrOaCrTcoRiF3z0cFbppxSzPs53d0li7dGLhzs3rYNHa4gKrHBGpS8XewcaF0/eLsLe7M5PGdtPW/HCUQ9PpLHovO90ex1K0ElVahCoKzhnvHDY2iBJx985OgNLh2v25DlvtwLDNdyOp2mAEnCLGMI1a2nmN8Hw2R86ncRRFsTH14qCNvPUYAk7A1HU2yue5Nnxm8ZWrd48OD8dFXhallJ5Yhzw4bYASfy/s6hECQOARBu+81q4G9vYmihMUtG0UoKoxqiznWaEMyBpmU9jaloJSI413jmBiAVkPCAFmQCkkQix042G/c/7syuLSguBh0u0YQ8Mk0Ngi5IOA9fuJVhqAhKHwzjOOnbPWgmq0sWY01d5awZgQQbfbaupiPJqGYTjLy6JpkjjAlgCLokQcHU3rpomS2IPNs1prG4VhXdeC8FYSLS/1qjKYjufa+KZW3nqnLYBnCE6G6Ec+caIjdp2GPNOuqfxcr6XZD3zg9FvfmYBO7+zuvP3OjfmsCgJ+6f5znSSljGMEhDGMnZZGKVtXDWKYCzIvJaYUKAbrS5UnSYwF9w4h5KM4UrqhBDlARkniHCXcYq+cIZyGnCqppdQAHmNkpLbeKdMghMB53Wjnoday2+t0hr3pwTgdDFGQalRwpiwutG3m8+PGoEySG5tOqRAhm0RCBCIKQsHZvdQm54xRQggiGDvwjDAeBQDYWgDtq0bXtTHOYu89xsOlpb29GRcMc+wwKZRxTUEwCE8Yi+qmJIAJRpzRpaVOZcx4PK/LIpvPvUcS1UppDbYjkn67L8u6UtIaibxvGjvOjp1DlLEw4pZ7wkAqaTQgDyIW00rVVYYIseC99g5QwCgJ3vV9PP7I+vW3NspGnTwZ2eIq4OPHH1mpDyAyxTt/8Pskz9tB07Zhp5anT7b/9N/5a1/5xlfWT/XOPxxdvfIqGaYsDNvtME471XxkLMyrsa/chZUHeHjfvLh18+5GPqva3TS3yswn3DgsGMHcO6nrppzWrAKahfVeOR2rj3zsA9u7LypdRXh66amLL44OFwcrN7dnobAvf/GLq70Lb71ylVFA1rTjQBpTjQsekF5EN25sPvLo/V/+yuvOwclu3B+2orIeHU8PjkcPPry+sbeHqhFPRdoP19dXTp0591u/fmV1eVnF6fadt1dOB7d2Li8NH/i+D398983RzA5F0mM4v/788y+99Nxj93V7Q3zzjddOLEXTbC4ZT3rhdHZAEBXYHl+/vdgdDlaQLYoIL//Ez/x/796Z337+9wgy1Ju9zYM+BueuXH3rajM/CAgc7mYH2t26ckuO5oPFzpJgb7z4jTiU//zv/oNzi6dmu9Nv3X3REhaE4KrtNeH1yOy8NV8/101a3RyVAq2O99j1V7ZuXpm+55lzi6cfzfINbqL7zz0Ut8e9Xm8+xydXzxIkFhej4VKyfeOdW69dff7FN6SrHjn3yKDVf+bjHzi9fs5O5mW+10rDF7/8jXcsubu1+9Y7O7NmfjzNmrk7cXp4Ig3vv7B+5ebuG7cPQgKV9YJiHpJ7oiteaZ/NP/35X3zkI3/WIu6dxZgFYeCsj6NIG9fI6trttxH1smlaQRwhJ/e3/sbPfPjDf+1RgP0ofgyjzJfjsNvCuLR2Sl3K86O3vn1LExJGvqzqyayJUuGBqEZTi+fzIk0iHMJ8XomYChpUFSozdncz39i4Bph8/isvWtfEQniwzllBqXHeeCM4DAddhNDvf/lLma4vXlj48IeffOuVN/sCu4P9BZyJP7JhGmOR1sZ5mUsuKMIYMHPKL184G2HWeKm5yqsxN/V4AstrpwqSFIoZqdpxYuuqqSWj/ODgsL8UpEmrrmYijAPG5vmEMQTQIGdb/UFRZMo7xulwYWU8PmKCAkbIKKQbhHAYiLKp0m47CMOqnjXNJGr1BA3K+RwzGgeRd6CdxtgBwY0uvVdCcIsAU4EIIQi4SGRdTfOJECIOYgCXJp1ZdtgftKT1Huk4isFrKbWSSitFiA3SYV1lSAjKQFuLMaEkoDyoi5mSjfOOCOqsdtpihLzxjdKqrjlG3SjKi4xHQeC6qhgrM4IkAV/IWgeCUoIERp/81A/H8WJVVE2WYw8IEKWhQlZ0ku7CcK8puq3lxiOP02zilpZPNcoej3bmYb62tlBW2X2Xzl+9sXl3a9Tt66Xl8x96/5/8zB989ezJDuB4UuZLC1F7QJI0Qlre3DyS3qPGp0nnkfcsTI73GcPzqhQBSaOeGh1ZY5VWrVZQ5fVKJ/AE8CFqRs47HzASMeSs08Y6YyiGxnrOsDOWEhIQ5DHygB3GBuPVhUXBoDbeUADvlLVRQotGRoIRjCupMXEsoE1jAQB5uPcN7IA2tXXGBgkvK4MJ8tgjZ2tjG22wx+ABsOecIHBWWYwwY1SEwjvjPAQhN8qCw5hTETBbyzyvCcOEE0BUGcspIQQ8pTPpXrqylQS8H3PRElUpN69sD4dJl4sf+sj7KLGXL2+/deO6g/DDH3jod37zd77n6cc/eenhAbwLd9985ybH4B0aLHSTMHTEagQO+0LXiehzRqTJgqitwCNv0k5aNtNOv1VN5pzjeDFZXF6ps6LKioVWvy5qQnFRVUL0FpbTysHQQahAgdTA8iwvamGIDUJumkxKvTfPCBDKfF1UgpIwihEzaXehqrWRhrhScF/mUxElCETAORPC6qZSam/rIO53+5fu37m9Q4wm9RxT34mRAV41RdFQRoOmmvZTURjCaIvEQbcXNLOciWBPkd29sbMKW4sxdQZImIuQOsKNcwRb7yX21iPvveq3UUB97X2cCoKRVRZ7pI333mPsCAZjHMYABGNMEEKIoMbItaVVUlXKGGtFKaVVhjGcxny5l7aEr2WTScMnzaSyxrtGWYRJoyzmyIDXZUMxRviej8l6bwEQAEhpCcXeOYw8QQ4QUEKTsOUAvEcWfMA5coZgXNU1eKCE3gMdUc4YeOcsAqiqOohYHARNURNCpbKEYYLBgvXeeeu4YASYKhFNorNn1qr5webda85oQYkHAMAEO/ASOeyco5S/2xGhYK002oRhYrw31nXbw5CyyfSQBdw4rbRWsvEeeXDGAWClnfXOeu8AHLpnpPXYOk8IQchZ6wE58M57BN4TBEZp9C5Z0zvnjfEEYUyQv/c4650DhBBG3nkEyHmPEHhrkEUIeeMswdg6DQgBwlpZQlDdpJ/9tRfeXZi2k4i7Unpfqdt37qycWgqC4uBoSjsobdInVp7qv+fJ1Vb42m9/dlxkH/jgA8dTvDWWImqNj3ZG22NTeURZ0O6H6VJv7YEVcXZlET07/d2q2Tixtj7df+PFd14NXB0OFFjNIVaZ7i2H+7dvqHqOKmVqhsUwIRWnur8QhImoJ9NstJ/tbcdxIOflQVY7Sc7dt/qDH3/k25e3KRYH+3bzyu7SCYNDrJQiOEaYE5BGWecEIPR9n7yPqe7dq8c71++8b1W876nF+XTyOy8268vx2TtVj7nGuTNq4a1vbUzr+rF4/Vvf3Hh2wx9o1TgtvFPKMYIJwao0GCNrLOHMOxclga7155/9nCbKeesdwgQ8RrJpGKWYeO8MJkgrujVO3OXw6fc++jM/+Z8OJoTEXREugCvn061OGEXt1GQlQlgpCdZYcICBC4YAAThrHUMIgcXWdqPQyqpL+PpS68NPPxnyJh1QV5bZ8bQuo6gViyCtZtP5dNymnrIkr7zK8jQIw1A083yisiceHiSJ/drrhdpCQRiosnngXHftPn54JyuKynvOI4YpRYT4e2ZC77SsEFBCqEeeUoKE8NZy5hFAVcqy0SKJjSHSIECi0w8Gaacw2mnDMBOYJjwh2AJYVUsggGmgEdJWYN7mQRREnGFsvU1aURAxKXUUJLYYGVU5TAjjZVVjSh1FpapEFBkfdlYfqG302ld+//c//1uvvn55PJEGqGZUl40ThAtslPWAnfGYUozBeYcJgnuvOyIWWQ+IEOq8RxhZ65jgBDGtSbp88sf/+k9+40u/9f+sikSbNS573/ueQFVZl8dPf/Jjv/8bn8EYiMfeAyIUe++xV0pRBIDQyy//m3ZIWp3WzkH1+Kn1iw8FhIyoiPPjWliDKeIMV6MK1wQDiwSWDRDCCAYLDhBceWPzmYX2wgLaOCiHHbG6Ov+zn+q8deNwLoMoZoyF89kcPEQRJcQ3Vb69q/7gGwcXH1h87uXNr79yMDtUSgkciKSDw4iIxD50aXH1vu5br96MY//Qe046iZwVIuq0u4ygDlejo0xv7U3ffvUqUarV4Vo1a8uRRuRgdy4dNJkqMicC4pzDRiwNW1Ve+nk2mlUwCe5/8MKjq+/5hd/9OQYmOtE7dfrM6PC4M4hmRXVwMD5/3wN72zswN6P92T/7+7/y9/7unx2cW8qqOmm1ur1HtnY3WnZS1jH4/nh++2ie+Zsqn5UPPzIsCpN2+tb7OO1VhbH5EYN4tLMry+1kPYoZx0Fy9+hGHC6ECKwhjVJBvOop0s1cO9MfLBxPRvECBa4oZyEPi8lhp92aj0bMkWpOL5x+T+kK6yfDXgCotiYLeUypN15pX8ymmgtupaa0EwehIcBipDNdSeNtThjByKpqapraMrK4ekYWU84opk43dDIvwqAGZ4wtwR93W4JoyDHZPZgzGsaiE3S7mCfjSYM6i5zOt29/m+M73WWLgyFCkLAwifnB9oEPyMLK2aKcBO3BfF4EVK707MOXhluH88O9sTM6q8zy2uKF8/23vvHttB8R5hfPrpUNK6RaXlq/dWMvTCICHAOnpheSuixc2DXON2VeUcysBaNLZ6qApa04zea6MRaa6OzpT5JgSSNPi3rj5uX96Z13pyAKOOPIu047LeuGWai1aaehdyAizgM+Pq4QsQihMiuno2ae17tHs6P96c7R9HCWz/LSNQqBQwAYAQbvncfYOa29dw68BwDvnfMUA0PQigISkc9989ow6S8sI+tyaYgtYTKDsoGjMdzawrsjOiuMl94iRAXGAIIxxlC7E/Xa6fJC79SJ5XbaTuPAOxQGQRQkTkNAuQkdQo4zCtogB5QhVTdKaW89pUhWjTXOOUsY7ybxZD5XWmpTAzKdVlQ3hjM2jIOlXqcoK6N1lFBjgqopHSbeIYw8eEcJ4YKVtXTOcsxI0rLa143y3te1dOAAYendpNR3Xts4k7RpFZAUEdF+47W7e/N4lMl2eyFdOHFt4+XDcZFPKx4wdHPr4tn1fk/oWnkLDjwllGCsjdVSIhQQTr11HDFlDeOCEObBMkLuHV0iaM/s1BmrlWnqMopihABj562RGlkPxvsw5MaaxmhOCRcCI2yR9QQFjGNB41Y4HU8ocZffubrxyOr99+mDw1s8DSaH29Z5ht3etntnA1mP4oikKWM8xIhxLpIoiOMQACFC0pg7awlllFLrwDsfisAi5ymxFs1KRYA4qUo3TnmoZjodpJWUWlsmOCNOaoNVabwilNWVKWvjMcxLaaz22illkKCNrMFpYywjvFaaYDxsL9Yq6ydBVtQQIBFwKZs0CZigjOGmlnlZOO2NNYgSbxnyGBBwiqRWVV79ETgSOkkSmXyYipbOv/21r/JBd3Mn9xJdWu9Pb89bpFtrPFbN3/h//dT+6zcnO/aH/sx/88u///md29Zqh6t8b7y5ung2jhMOUVHprMi7cbIz2lm7cHL/aI4EoQyPNyecMZXv3v/w2Spv7NFcGt8RUSPN3rWq3CTVdGpKd/X55x5+z8msTe/O0HqvN/zY97z5/CutoHN6dXjtxtF/+qf/M6LJmVPtOqtMqahA01HpEGJ1ypLgW29fHzmUYyS4qEaZq/KTi11D25fODD/+zMf/7b/41Usr5xaWg53Lry1Q3xVw/v4TN6fhzi0YH81vbB7ev34ON0kS21ZbnH7wzI03v/HVb3zpAx9/sN71SuN0hS6tdHqD7jvv7A77naGFSjYn2qEzEbNsvde9M9q9urm5sPB+3lr+5A8u1w698LtfloejO9W8HE+yfLp2Jt66vTNcaZGQJ13hqT/YH9XaTyc5MfrJB+9X46Lt/WxSa+RpTKbHGQUXpaLL++NbdrvcTYfHRuxqny8P4tluNr6594fu5Xpuk4Uepbgp6g88dfGjjz0eeouQvfzc89tKbV+/fG75ws/+3X80fHAdE5B3bhfbm5/+ez/7ra++SpMgqwrrbaX8Q0898PKdqcMuDqOFM/FwbQW6/ac/9YNv3v03cSwW2nFdKgwYO9/r8gBEdjwPB2svfeW5xx84eTAtRnMdMBoFYZlX8yyPRUAxtgCJCFwbJUFo9PE//pd/5b1//HStrzA2o1FHo1h1+xSfJaoEHDLenUyPJg0rwqixtRPUSGPAamUZBqDqJ//0j3ztm9+cjAsEvt2Jqqapcn14lKlG1tpY5BHBHvFGWVsrgkEIHjCaT8vcqEE33d3ZLyb54vrQBORzn/v9YUh9XSzy5kf/wkejfvo7z38aAChnZV4EYdiKW+PR1IPv9vpMJCSIMQ/ms3F/fSmN+e6tO+c++H6IBTieV6oTMOeQ9wFHuiqLfr8dCV9lYwemLLRPIoSwlrXXyiuv6rmRjQPOOatkwzi1oL0jti69s5iy0hRhq5s5TRiFMOR1GxmndBnwkHBhvTJGWYQ5OIIoiruVmfEgappGVTUQRkXiEfIYeBhjwBigqgqEseCxUQaooRQDpt4j3xjOGBORaoqQUyp6SmnGWJPPKNSIEqsNskCxMGCCKHJKOauMVhp7wYOA8mlhOpzrOjOsCsP0nlkKjMbIMyIoF8iY0dbk1NnH945H2NuAUM6D6Wxvmk+Gp08zxI9nYxFEYX+QF/PeYGU8OjZecSZagZxlB3XMvXXT8bzTXRgunrQmP9i7naT8Yx/7UBrCtbffHKwOVxbbt2+8IzhFFp86fTqJ+O3rG8qV/QQFIoyi6Nata6YzRIicPn9mtL83nUytsr1WfHQ8I4T0GG6Yrx1yCFOMgFFVNpxwxxRB3jpcgQYM3oM2TjvrAFll9w4Pu/10b2s/EqF3HgEISnzIrUeMYuGwQ8gZYAgHISsrFXKuCDEGMWJ+/Ac/8Nrbl1/ZnlpJEBAFDiPAFFltMcbeA0LeGscYc94B8toYqy3FWEtVlyoMha6bTGoAzxghmHqKZWOtQrXUnCPkkQZ/KNXcBvo4ixMRYBMw/MSFtWceeTgfH25tbS+mCVLhrCHLw+Uf/6s/nsTx/s6uIu9Wb2QlEaXGOWPdaHrQ6TNOklLpsp4CAHfEOo3BNQqtLy2QKDaAZ7Mxxqic53yBeK94SKxPtXdRpxUI4ndrrWQck/VuZ3M8tg2qHbkz9oxHzlmjTKM0wiaT05hGHmFlAIADDpqaLLXFUqeTEjHe2bIKIWCtuM2TpBW36yJ3VdPMC+9siKmO5XDYJwThJr175apX0lsrwa90B01dey3L7DClywutdhTiIpvZQnFbjg/H6Yn7wVniGxYFWBMsWL/LOS6d0YRydO+CiqUH6z3OZqXRIWeRRs7oWikNDqI41NZ6UBhhRrDVxnsAQN44B4AFnF8bnl9b/d2v/aF3jCFCOdfOjDKVQh30Y++I8Ga9n0S01kajXlA2alq6mdIOB8567y2yxjtDMcXYA0bWewIEgGDstbbWG+ss5wFFmBCCAIH3mDDOGGcCI8jzghDMCKnrZuxGrThKYua8IWFgrFNSU8wwQUx453xdl9Z5QnDAgjBOEHCCcbsfr60uLD9wdn93s7AlIRg8QuAxgJRKG2+MNdYDxiIKRRQxgqw0UkolG4qJJlXIWpFICcNZ1VBCaBgp2VjnEAJntbP2ngcJIUwIwRgwwtQDIkRrjxF2zjrnwQFC4DxgjwAhZ6xHQDDGCAB558BZiwkB9C7H2nuPEAKEMELgvffWevDeIYSk0oxizIludBCFjjip0HffePde0Lj6zIOnJ9NSFXZ/cxehsbfx3t0DT32UpmlycjZBcjxmrL8yTNJeUBNBdbFEJOu6K9e2LpyjVNrTD3WqsvDdNcvZrePjCx/8byeHdy9eWvylf/kr1QyHAQlG2draCg3Mwd19Wq3M9+7gGFbWFiFaXT+zUs9u3PeeS4e37ijXLJ1dMEVrdrAjIrGwfn82y53JRIo/8PHzZePHR2j72l71UBuZmJNAycYBQbSNCPekstI57ftL7L3vW0gpfe71vZ/6U0+du2i3x71/v7G1O25tvHrjT3z83NXrBy+9fmuld+qFGzu/+29exxjzJOwztrk/aQVcN7nPy/PL57bsdFpZrV0YkVkpqcIU0a3N3aqqWt1UMDRvGop4GIZaK++0KRTG2Ct3uCd1xufTu3v7tNKEBnpjdOfSQw+dbpHp8W6TZcQiabTwoLTlCTPaIcaUVN553cg20ydW2GOXFjudxelxib06NUgHaWZsEUqLwcbCcS95cWdhsTv2Jeae1tPyeC9dutTvDPLRYeOCdqc9nkzTuP3ex4OD0c2jzXxiCXJ2b2u/9/GVKk/Hhwqsj1uBd8Yax3lslMbegnfGG8DIaQMIBTS2BAmOhfAYe+VZ03itDEbMEs/jSAhmAMrGAEKMALENamzcDsuisk3u8sx6yTEHqcN2FDjqKOVRYKwztaWIEGcREdrVgjJgzFub181MyiBsDU49IJLll6/d/q3f/cXNuwdVI6vGOc4ba8Eq6xwVlBCivdfGBCEHjLXUhGJrPXJgkb03FPdudMgDAsI5CcOAkYCH8cXFZTqb/1cFtPmsWXlo8eH3PrW9N7meP7+1M2kvncoPDp1UTktMsNUaW8AYIUKJt1rODvOGBykTwQgB7fYP9u8urIheL9YWtPfWuTAl588E8LoGhDjHBsBajxzyRrvcRLQjVbG81gEMe0fbTjXPXEq+8qa3xlg7DwOPMcxmeZxykTDr4n/6qy8AsR6hBAdI4eXl1pnVYd3MTz8w7LbFtTfuHL95DN5TIUB3OA7KyjcGOR8GnajnCVLq/vdd2nj7ZrcXceGaY5cXJu2GaRpmRd3vxE2tHWBr6d1bMz2XHPnefUuCo7eubV+/eXvy2N31+xcoJxqx0krf797cnw27ncXlRcTYmYsXZrMmL9zR8ejf/+cv/q3/4UdZEBbFTMRNEsPauRPHU5gfbBOiTp24OD7Y7bTOHh8cLa5cxJwK1iIuCgK8PVJBu4s9BWKLKreIEnpidWlx0I03t65oM2dBgGmBfYOZKadz5aXAwfb2xtlLHUsMuGmlJlvXboU0xDYcnnhsZvE8nya0Mz08DNPERmxytB33+qPjQ1XbtbXTSqJ+f7ku/fbxYWchpiyRzkQhP7m8trm/4QjS0hEaItoVrXVbvVPOC1VtKsVEKPrDxVaQHB1WnPJTp9ef/eazh/nhifVL/db6aH/KscBAFwfDIpMyb06ePJfP96Wc17nmSGrts/3prJGJotsjw1Daa7txM4+CwDq1soAnJRoFzJuw0dnD91+Yb287qwaD5ZTp85fOX7+9PZsXnRafHVHj2aB/Zj7du32456xrMTE6KOM2WV1e1srZZkZoQAlInc+yylQQhstnTj72xpvby2c7s/yoNRwUkzS79S6lhTLKAo7Bz8pGa8sZaWSjnWol0XQ6xYQ20iiwyrPj4+baW9cPJtnhRE4nU6UbcBbAYnePaGgBDHgHBBiBIEQhgzQKW1Ew6HW7g3bAAmT94Xi8uTfdOZRff82cXMGBiMpClRk5msGoEduHapaTWmPwhjGUhCwKebcd9rudXjftDzoxF+120u50EFBEmNMWAasbm4SiqRvnaZQwaZxpDEHYEY8xbpTxGFIWK5kDQd1uazqra9kQ5AhAJ21bbw92DjgnIuLGwMF4RgA63Wg0nR2NJ1xQ4zwiJIkjQmlRlc57B4hRwZhTdY0QWGuM1hjdAy9SjOHY83/9xaNXd/mwa89eWpseqjffjL67MUWsfOzSpcnk+vbRRBvb6kXOoePRXDY3lpcXT64scmdFFMVJ0kgZJjzGghIsjWWYhYGghBhjGQ2llUIESRLnRTadTcu6pIwCQMACb71DzhlrrGEBUM4FIVoZ5x1n1FoXMgHOOrBra4vZLA+pwAa6cVIU9e298kvPbh0d+qXFMNWB9yujUb110P3sF3aOszBgdtAKl5daylKHSCAE5ywKQ8qINh4AiYBzyjEhUjYsikIWWuwrqxxyBGMAZMEr4xtwnNG6aObz2hEAZ4ed9ODwoN/qddthHOGyJpt7RTNqpALtm5iEDvu0xY7GszQIQs7ypkacNcrUxwdKV1EYUsSMlErVvXbEkc3zppSSUhInHGlvrfPYhxSDBR6FRutSafBYvHsUwNwUf/2f/flf+de/A4unf+7nfv0XP/2V8fjWC899+f4Ffr57Jt0tTRR/9VtvfOuNy0MRXH3ztcmbf5C7aXe79fT3PVDCuFTEWzU/3G9qxoIWAjPOGgH+8tsv7O9ud+IImmq21URhIuksxLuD4WqyMKykOJqbb33pm9mdrMuSbisszSSb7lrT7XSCb97YaoenVnuLb0oktdnWYx6zkxeWd/ft8bhsxyzPVV01DiMDeO4sGQ78/nHCVSckE1Utx4JxFHaaM2eXsoPD7OCln/nrf/7Zl9+cTdXxYXX57ZvJYuuXf/233vfnfvL8Bx957nPvpIOFzb3ZQqu7fm59c6cczW4fHt598hOPnr7/zHOX324Nuosnosc+eMl/8U1dRsfzeZaVgMU0b4Zxb57Xb7x102m/urZ4tPv27HpZzqL5vELTWdxpdVfPTPMs6iaTeeNpWkMwOp6kq4Ok19q+NT595r77nlwx1J18bPWln/+Vk10hKuk7/ceeOnP5a68/854LV29eq2ngw2R90H7okbO//NvfUKR193Y+bUAwcJBjivX1maqa/iDO7ky++G+/0gfDw2Zw8nyrt/TUR/+bj3/wk7de/fb1r33py7/8mUlhSRTOFY7Xuka5slLrDy3/1M/82D/6Rz9XYDQYhGWpj2u4v9OHQfdv/q//m8nrbifutZLaV65RSrtsOknjeCnwTz6xGiXzs6fT8XSGKmOwmc9zTFCV15HgQSScQ1JqgkwEjdK7H/mhB2o6chBikKPdXdt/L6IPxPh+zrU2DkMrs7Z/9sFmLl3WDBbbs9G4kk0g+EIvTBkqxoedOAwYBgIKfDpoTX2hnFHeIgIU43vCxl4nrsrCW+uNcd5QZALOuSDGGRrh+fGEeL3omwfPX/gzf+FvL7U7J06y0swAPg0AgHwYBdY6JVUSBWE3tApI4YrRBEcpcxgb70TgCK8rrUEM+kFMqGtq7PSJE6cOjrYocA22kHo4vG882UUU1co0yiz0er5u8qxs6gZ7pAmj7cF4Z6PDGaYeCKOaWGcooeAsJTSIk1lZpPHASES8ZAyDB08IRxgKTzDzxlbNNGj1u62uqouAc4JpVtXal5xha2QUDghidT0SYVDXWRS0QxFXliLkp9kojeIwTD1YFtA8z6b5pN1btk1FNBBCjde2Ocao20qHTXOsmpr5mDFW17N2u1PKAnPiDI27nRoXot1xBsft5Wx6TAVtQNdKxuGy986ZptNl+eQu8YQRpjCuVS4YS8MeQZRSYY3yBIxFxpFc1YCFMyqNmQuSoB3N8lLLphW1wCpEXbedekcmBzsWje+Uh2fuO3ft6taBHt138RwCv3N7p2q096HHvpH2+DCXVUU4vXD+gbpQqoHJaEyJSKIWTSKGPYt6Wpq426SRqyzbPq6KRmFOekkQUpE3/mBWAuIIYWW8d8578M4RTrxxedl02kmLM8owYG4scIZJQI9mNXZACaWcgHch53XdRIw0tSYYtygT1H/Pxwff++FLG2P7z//di6NaNMoDQth4Asg7uGdb9x60sRgh5xyAowgwOIIgikAg4zCuFAgKAiyRKsYsZObSwwvtFtveO76zX2zPwdNgMtUBAigUxrC+vPzQ6uJ0a/PmzVvDfue+x0++8cbWMw+9BzGPWRIunijHu9n4XXojiyMaCFNJWTaIJZPKOHXIie+m7VCISmfD9mJW1mESc4GOjq8vrr4nTteu3Hz5yXOPODxZaA0P88wzXc6nGntX5wjbpaXubFLWWcMNTrud7f2jTtqWRJ1aP7Wxd2CzPOXAG9blrdLISkvrgRpCqBYMm+IIqzCMPE1jkcRCkO29A24VJ1Y5E7eCRsrGWaqln+1gWTRNEbeprfy8yIK0XarGe+cZ6i+tIkSxrnWumJFC8Fyp0Wh+6ly7UhJAE8fAARcm7Rqg0hOwAMQbghF4jaizQPMKIUx4EDhEFjuDu3ubiGCDXEWa9U4yzyaqxhjAe0cw9t4TQgjBb9++/cc/+sFnxnuvv31HYQuOzyR4cJXWuZT9duic086lASGMtBcSpatxYd+8PdfOe4IIQsgTTzAmCJz3How1gJA3iAA4B/dsa8ZY6zXFFCFsvXVGeYwaVHhvOaPaOHDeeF81yjnPKWWYMEoBOSCMIN5UubWmbGpjDecCMA0DgSyO4yBNwrqYvv76C1cZRIlADFVZaZ29R7ZW2jjjEUYIA+UsiJMkGmiZY+60tZRRArhpKm0l9dBUFWMkZKG0moB3GPGAGeOVdWHAEXfGWgRISUWQQ+A5IUEQgCdSlsZ7jwFj7LyDe4wVQIA8IsjdQ4yBJwR7bwFhhJAHwIDuUVAAIQBECHH23aYPJl4wgjnz4JQywH3cSYxu7k1BVcv9u2PrmCW0vbiUTWZH144IBmZJd2mIaHthZeh1VDNy6+qdM/Ts0sULeTwdvbl1vDcpnen0SIRELeeW9/DwZNRpz7fjqpqHXF174eowCvKA15ysner6sh5X8vhgpy72RpX60Ac/UM7IH371CwnKn3jfkq1zbPV8Og+iuFEIo3jr5p3asYWT6y0XlfNjGppH37dETXdnuxv3bSG3TDUTLGBcVFJT5LWW1ilMwuw4S7rxn/vrHxSffWX36PbS2kLEyIlh0m53/tuf+TOq3hNdvPbA48Pe6tEvfdlM/XFWzcdZwNAql7zMUlv/9//jH/vEH//+f/i//cJnvnjtvWdXwqX25f29Sa455Z1+C1Ooa2UBARBjHQLjnXdgsEeAADMAj8dzNS6bRnvvQUuplV1sdy6c7H/pq5saPCZ0aTg4Ojw8deZsqWU+nkipvcOYsTqv4qAeUnqhIxaW4qov8lIH0CCKZZ1xogKRtpdaWT710lQTjVyDCBhLkriryjkPGCZytL+t6xXBQlOSlPL3PzDcvF7bnIw12xupO9emaRT1FoJyXOVzyVOKEDGeMB5bbRmnFLB3CAA7T3SDnXOE0DAJU20KoJNaeYRtY+JuJ02787rxKJDKVYp2AxFQL7Brspx6CAn2zTRtc+u89Zo514zGPggi1HGOaaWSbge0pIyhKC5mUwBjjIwCHrQW53b599+cfeXZL2+PM+IoeFQ6BIJbcJRi8J4gCv6eTx5jShAVznkisNUKY4QJvqdQ9N4jhJEDTAihmDNGEI+SdtKJh7FaXmn/V6siQZlWrCpTBfjG1c03X347H811pZkg0oADjxl21mGMrDPeO4+QA2qM0s14o5x/86Xgex8KrHQawCNSlZ4wqmaZRc5oWiuEqQMM2hjORdINm8g89/bWxfNRilEYIm1QK4gurkfPXy0r6pWynPiAuXPnO01jufMiCiyEUmkn3SCNk0SsnuiYqqKUz3bz8a2dXhB1e71Or9dbiQXuzrMmiGFp7bSaHF9558ZoMsYiVXWzPOwur3XbQ7qzdUSRcMgOBtHe/tHCsHO0N0eE15nRBD301HqnG2zf3H76o0/Hb26tri37yUhwEiVsUuOlpRM3Nkcx640nBa5w2qpkUXzwQw9fDth3Xm/e2Jz9/Z/97P/09/9ewHe9m1JXy7xxeY7Lwphoc2/eXzvRWu6lSObzeQuHFodV0SDveeDDVHiEUEw87/gCKeN1kx3ofcMYdXExl0aPjdbGeBENtaOU0YCjYq46Q3O4eRuIT6OIBgwbkiyEAlwUDzkSdSG0twC6lXQIigkdiUTd2Dg4ceJ988wcj3ZJKLR0xWw8zef9bvd4NJUV7SW9dsvNjo8dMtc3XulxnCQ9j6reIMiyYvegOkYEo+6knt7c+O680El6Fut4ljU4iACF3qJG1p1O0oArZY1wTULb6/D5UbZ3NKUikvFwbGRsy1MrK3Go1+mKN9V0No1YF1rdQm7cuFF4Rt/79NPj8XdLjbFU2MGbl183puq3eVHMEbcE8UJWDkjMUAG2NYyacjJviiTnnfSc98g7SYivi0MJ0ntRjI/fKF5rD87e2d1O+p2t46M3336d83fBXRjwdDRvdVssiOomn8/m0mrVFK7SBoxXdvcof+fqzXmlD/an+XRaa20bda/kCdgBcg4bwtBqPzi1Ojy1Ojh1ckVwHojAGKWapqnL7a39oprc2Z9nRZPnxjpSavaVF8okRCEj1jNvRSadBeydoJj2YxIIWOhF7VYchFF/0A+4YIT2B62Q8SAQBFNKWBAJBKAag7gNQwSAEEJpwGstERClTD6aO+8RQQjD/miOwAPyylVaaY7xcHFpPD7c3tu31hmtFzq9aZZ57atSMUKnvipLTYEGIlDO1WWNHcIeDXvDaT7rtEljpLLaFAYTTCmN08haV1WNN8Zi7By6u08+e3wcCeh8a8SkVQqyRknvvrz5rNGWBBhjgjUxFjzGlZJ39vcHwx4lQDBoq6VRYRAIQZz1mINgzBkXJQkFNC9lXUlVycPZNI64sV4ECSZGI2S98w4QYSIU3HqPsXdgtcEIAZAwicF68ACYckzLSmMqrDN1Xvo4OBxPDw6LX/3t7S+19dOPDuMoCBF/82p2NPKHewJ7QYlGgEQgIhpYT6I0CqOQIuzv9eUZUQg7jyiCVjdECFujHUJByFRRMhpQhK2z3TSJeei0xd5DGFdGYU+PDvIoaClp90fFQi8SPOikGrxHtDl3YphNmiw306yiQJpaKaUZ5wRjrbX3LuDCIiqNxgkNAdZW+lk29g4YosijRltsTShoFHAX4ao0RV5jSqMw0lon4bvVm6Pjg8vfgTCMm0xefeGtTz75cZw8vUArXJa3phuCRrfvbCS0Yky+fevaybXB9/3A9711+3XbzoCZbreN5tLWth5XqBzInB9laHG1y0D6fHr+RJodZA+cWn99NrWUzIv8zdcPVtZbp0T86svX5cYRPvSLoi+bcjSZUNL028H+/m7dGT75x/9k8frLdw82RJcs82Wv7SSEPE4VLbrttsXVTjUKwqTXimaVqpTZPWoSFHTbvtMJIq0vnO4vDOjrN472Dm0v6PjpbO/G60kSsEU+U507e+Mubm3e3jr/0ZeNV+XhHk3x1p3s0N2NoisiScWcfvk/fe6jP/DktSubH/3Bp29dO6wr/s6rc8/Eez949ve/sz+/oZMojBdaLrcoQAXSgImsMpMeBZ0o2x3PZ7PF5ZZ0+rVr15mHXhxnM7x26tI7b782z8t0IPK6mM/g1Vfu/p2f/elvvXHn259/48TaeieYv/cjD705Tl+6eQOn4TM/+ue/z07/6c/+3Cg/eOxssn+wR0E7TYRjnTZLQjqelNo6MIYzompJArqwln74PefWHz01uPD+lQtns53dX/j//e23vvZCa9hvLy51lvF0XhQzWTW2qosP/fAz396Y/k//8XertLvSisdHM5eK/+X/+Pnf+o+/+MJ/+ZZtTBCycmqOVOGaKqB27VT8Q3/iQ7/xC39w4exqxNHXPv/izWnelNm/+Wf/77/5z34hDpi3rtduYcy0dpyBcxCHIg7K//Vf/6TRG8pjwlYqGPDh0xiGQi/5moLgjHMC/Ju/+wem9vW8pjysq5ph0U2EMq4sAHH67ZfeCkIKiBBMK1nLoq4yBdiLkKWdsMxlNssBgGHiwEktAZNuO4qTcNBPXn/lRtwKBLimKn/qp977wx94MGid6j7w4Pbuzp2Du8Nh7908RaMJJs7TMA4ZsQr0aDweH89OnlrjAUaWHtzd5nEP07Su7fJyFxNkvC+yMubk4GhD6qbT62NCZeVn05FzXoDAGEuoq6a2qrIgozBW2heq8rhaWF5upkd1XsTtABiRRRaHy8goUxVNNQ8xafIMyyYddJVDHoiyjhHKWGC9b5rGW1/nmUSOYoZpaMBghgKGOCXYcV3PNObWaUYQ8k7bxjdAkmEtGyESIMx52dTKAvAgVNWx7wyFYLapHVgRcou5c3KuJ0KIFl+ez2dCBNaoss4pBUYQxiympFEWaKStHx0fRYEQ8YA4Z2QkpaOgJrt7YQTjg2ky7BkcaW8aOb90+vzG3iEzTlU5YR4ASS0dEOcUC4Sg3uraOIswYUFIGQ/DBElodImMKxrlmipOglgMlSKLnbZuKldV0hIhwjBC1vn71s9WxqjCHMHIeidaQtq51U0znzPB+8OWbqRW9eDE2nwuq72dbtrCZd0NqBCksMAFVU2zOOxqwJNCc0xqZTBB4MF6oBhBQHKjOh1BcDKeNp2QSQdGWWJsgHGrFTKCptO8306oRecvnA4EPzyejcvq+HD+1AceCgKe3x098/BS/Fcu/svfunv9UBNMlQFAQAUDY94tywBgjBEhlFKPGtmYooJeC3UootTLEK124ENPLlHtyuPp6dMrvUFvsHbSKmQcfP3rr+K4PcrM4eHxE0/c/+CFc0f7lZ5P0h7+3h9++rWXr9y5sY2aaWgmQbo2WDqxsTG1UwP6XXqjsTDbndAwMBoc0AYQxaHDVmlJwIeUqLqopxkuJm220I3CBJUopPc9eEbbejreBGadJyHCgtdlWdRNnWdZ2XAt0eR45AgGmZ0Mo0EkDosiReVah46swR4FLBzLwhFQyqla+wAunllcSshodKjkJIw4R0QghgL2zPd+jzYL453rxfyIMe+mk5SHKcNH114WkSiPp8r5KG71OiLohLP5jFCBrMWcBQx77drtKJ9XG7evZ7WFYAGHmFGKLGHaeKUImcaBYcQRQP7e4s57gsB6d3RUzXKkGl3rKU0TEQ2WVk7u7x8wgHOLJ3/tn/zb/+4f/eW37xxwIIQT5xyAxxgzGmgP//tnfv1U3OrG/LjUslEIecRwpf3htGaMAiDvfZIwTAkGFIYJaUrOuVGAnPfeUYIxpoiAMRYjRClGHgOABSAMIYe8RZiAcWC8DEVICXVGO4+ch4AxgnAQEu+MNuaehr4sG8aplxUiwhtwRmtVY2wJQYgwwAghaLQJRYgRZFnFOJ3nBRUUnFWyEmFojLHaOPAEUcyRUpIRwijztaqbkTFWGdXqRB55JzUmXpvGAwaPtPQB9qqplVaEciMt4yJox8h5BF4IzChziBDKmrLU2hhlMDEIYUwpAALviUfeA8bYU+zAOQcUYe+9Q+AcAocQBgAPHjwCQiige8U0APAYURpyxgnFxBkllbTOp61IG1VmNUPv5qzrvDoYV2ErhTRaW1nL583S6aQuZvm4unJts9OrH31o9Wu/89WFurh4/+psvzhWV7LxzvJCZ3wnQtRtH46YMh/6E9+jG/ndb3160lsincUHH39q+/lN1hbnL57+/Ne+I/snv+eBT332l3/9qccunltZfOfWXSTab1y+2eH9H/6BD3ta797ZX1htY+2iMKDELS21x7iuD7NqdFh228wa5GkxK+bHhyvL5v6H+50BlaVGqHDWytojAqrMEShVqihMSRDMR8Xh7vX3PTF47bv707l6/InHW+WnfXM34EtR1D1/nx3vXt249vonnzp7+XdeZx5o5Ml09Of//HsfeOA+R8nKifDw8PIP/8CTX3/28l/9kWeeevw9X3vjjf/7M189Mlqq0nlPCWpqTRgFQMYo7xFjlDKstAaECaOycarx1iMEFjxmHL1x+cWHLpxII8yD0NY2K3ILOF1YOdU9MQjQf/nK542xDgEOKMSBSESccKvyPBubShHGic1XWnZxUUwO9o2zYdyicSsKrQdfyiqKe64xvpn6mpu6sLrWskyiQVPKvMSD9up9y9OtmWwlwTvb8+2D+PwaVbJ0AA5swEPjkANkPWDCjdIYcwS4qguHGBaMEa6VYdx3O9R6B94InpQlwSwajzLlqPbOA1scDlb7th1UupiHlFBCGGIgXNJyFoOuJSGaSaVMrY6mUsJ8fry4fLq/ekohabXC2DmEkYj3jud7lfz8N16aFtYaBASPpnPCGTjMAwrOS6mjSCCEvPeYYkqpt7XVVnu70O0V5UzVEhBCHhAGYy3c47xy5jyKwsjboNdf6/U7N9565Quc/FerItk0+7eP/sE/+Nc4buVNMd+fiOAe/8h657xzmGCEAHnsrfXgAAhGiBCsrbGY3ZlGQSutq5FVBW8FYY83pXQceMefWE/lxBlvpJVKW4e0dlBJ2VlthYtYNRV1NI16gLvPv3FdtKJBi3LKTiymRVZhwpJeH5S58s4BYiINcNxm2MimbvK5Y+Apx4zTxx6/FEfdvHIIHKYEvAGGOCFH25u6OOwO4eSptdG4GQ5bRAbH46mZuTPLLVXB/v7RMIm651YOjka9wCHiRRwSxpZWWkDpg088XMtGtIUVxIQxt86U1M2bXI8HJMEu1GWzd7y3eiI1zns1Xzu1/ObGRgunO9ujf/KzP//X/tKnFqJG2rmvTZZP43bfq6VejJeWVq5cfmGpHa6eXS7KHCtiG8KA6qrZnk87wxgBN3LKMQXkPNSTbBTE/TgeeMg1WFUWsql42ra1UqZs8tzzsLMgOp2FSuaE8/ZCZ3dntr3z1rmlAQugySQTS4K1x9WccFPWY0I5QbaTJNpZJ+VgIRGpmh4chWEHRwOnYVarpHsiKw1rZFVrI7MkjAX249losNi7sXNXsCQiaVZV3rpWP0iimOHT/eFKU0+jOLCegrONNs5QqOVk7+47b7509tK57cPdM0tCliMqGMKkF3VhskvQoSvx0on7D2q9cXdz+dzwYIr2dibG8HzaDAcLg9Rt3Lq1eqJDYqYcWA11U2FW5aV1tU7aESFmpjNwuUDcqzoWmANvB0LKRmuwlXfSjsaHrYXWoNNVjXKuJrwedNvtSM02blUHM9dO3g2aSskDXldSznNOSJoIJomNUuNhcnz00nOv3Ll7fDgeIwqqbMArAATgMRAG9mRfnF7pPfHkfetrJxlxiNmDg6PD493Nw+xgVs+yuqhk3ZjaOOOw94AwdgYwBYd0rUEa8N55AM4wIWhhGJ1a7nLCkyQMQi4ED7gAzDgPKKFpGnEqkPMIMcY5Y0zwKCvyRsmERc66UHDCRC1VWUlP8HiaOW0wIoCBM9woCAPmvVO14YJbB/uHh1YbcFoEAfJ4f28fMEvDsNWKGyWV1VYbwVg2z8Mows6DQxQLTDCnTDaGINxqdzymZVEan0PdEOYoY3VVS2kwAeLBO1lVlli82G5VxJe2woSGAdEUi0BUjUKA4igIWtFw2OdUcArGmHwyWTt9SilstNVKA/I0EE0jwSHZaIoBEyoEmMaKgLbCxDQOWaOki4MUUyiqWSACB1BLCc7rRkVJcHJ1aTrP5llprQ7TgCKiGu2tM9YC2LiVCEpG04m3VlVkN0e/szuxHoy1GCFnkcAMsMUU9fvpsN8jgciU7bbbASMR5VIZjzALqDRGWccpSmKOAJT0LKAY4UHMvQVwqE9JhBHDrjS1cl4IFgRibzLupYmWUoTcOD+fVBTVjGAHejGJ5azRtemE0aSolfEI+yRmcRhgzq1g1rt+J55mhcXQikNVN3fu7lKCvcTW2cEwKapGiCCMQkLRweG0qQ1GXNbNu5af8N3SAZnX+9ePHnn4bHmA3vjmb1zeOpRMn25Hlx65OCnrvYODvC5tOX7zxd0o4pdfnyFCTr93+UDjK2/nyRBVqhGutrkTpM9oxE9c6p6wd6++WueN0mipvdqRi+T4CAcmkJ5ZfPu5d/LrN9EImrtZhNtYEKOrNGXdlnjq/MrkmNo82Hn92mA8jdJm/WJn5/XR+VOX5ri+ujXjFTq7PhQ0Gk3mBNFQoKqGpmh2ah0QWBukr92YN634yvYhRVrwKID8/m754acXQaRrp9/36c//TlXoTjvRVZNqdfeLX/jAk08dXju0S7SgOETN1n/45Q9/z2P6vu4T7zkn53WQitPrpw9ujV2h9bissrr2evONTY75weHBueWk3e+Otne8Al3W0+Pjuzd2H7ywrgFKVdeaKwUHm5PFQe94lk3m5WSaB1E0nrlsrvptemp9MJ+Uv/ZL/16L+z/56CPf/NU/CIjvt+hqOxjJYpI1bzz34knuL7STSOebh9sHFs5fXJgfFXNp57U0TjHisLdRT+SlUrUpMm2dOXnmvRcunPn5f/FLSQsiaoklT33qmY1bo6Tb3doa95aXz39kZVJ5HgUzzC49efL++3sv/t4Xbt8Ynxt2Lz796Ne//dyVG9dcXWnplpYXCSJKGw8MCYwpPP/cKyeXOsPV4I/9qY9Mf+Ebjy8Mzzz9aLO9b4vaIUwY8R48+LqpnSHdfoc5pw7vnHvqCR9KZFOQbaPjTkKnb/+fPHrGr/2gNA2xtMoPX3nxO3lxliPhgRTKNaUOA9GJmLGuUEYjNGgH4GA47F2+utk0GhPHOa+Mn88qraR3OolFVsycUYwjxsRif2VjZ/f1N+4AZc4BC6hqyPHhuJUWUt2883oW9U+0l/r5dHZvCsI4AiCUhkHAR4fb82KaxMmw2+/FcRCH2XzW60QOOddrtdtRvNT3qgGHiDVa5lrrYpZhjzgXgBhGnFHUSuLxfO4RKouGMOgtLquqiEOWkNTmmTKYUupE6DR22EZJp6pmzmvBuZOaB0iBYiGfHR0HcRJ0F60B30xl1TDOwFtKKQ+5rGtCSZHNMSVREmtZGykpZ5RR611d1ZFohXEKDlnrrXy38FTLOuSYc+78PWs1VrU0zoIxAJ54oqxGGADAIqalbscdZQsWBYBMo0y7N8iOc2/m1sh0YdFbS5lHYPLZAROBUk2SJAIL3W8dj/caaravX0/a3bTfC1m4PToGipxUyGPvod9Z2J/MEI063WR7dysOMSeBaupWmmCEEIbZbNRrrVDtTNUM0sglgoqoUf5g5zgiaHJ8jL0K06Uz5068+cIrUYezGH71V375T/7Jn1g9sXZ0dEAx7Q7bYdQuRsVsXlSNAmWRJ8ZaFvCl1WVVsFIeEywFobWBOBFhFCCE+nGcldPGWUCOYGSMJx55YxHyhMBsnrVjlmUSWUusG3TjxmppXGjtUkS+9/sf76XtF767d+7hhz/1iUc//R8/M72ymVf6T/2l/0/a2x60+2DfWOjkf+svPPZP/q/na3BHU9MoHBLHPGAMUltGCYCl3kMjG+v/+d/9y0899r6v/fbP77z1zsNPDPfqObX1IDqa7TQLISS0xpzPS4Roiol979OPxxw7cEbf1yijxschoLSbmIAHCVrrxgcTGcWJL2s7z1WYdbrdYBDhKr83BRvbY6IhVwUmIo2jOcop4E4cgSdZbREYqyvkEWq02xvHrehw5zVPKKJCYoZ9cHBnL4xEvyeOdo5290aGiUnZIIQiFje1GiwMka4B8NH8MIjIaPf2XNpIJMfjmaXhVM4J5cQxApx4AGtG40Mla+RRP6UXT544nBxHS7zdg3FWDlaWDyZHvladMAx9dbKVNMVY1cQ0c8oXqnll6izptKM4rcoctJW1wmnMMVy7tTGdTgspjRdnLi3UdWWZ59QbpTkyvR4lxMA9gDIhyHnsnLcGsN/anksQDgNgY5p8Z+8WAOHIO2339rb/2E/+kFSSWnwvgAaAEMbaGqy1nMOosoOltJcmujZTWylASrvSec/EwbxOGW6MHsQhJhCHQV6pre2pNsJ7QBicuafTwAxRSgGc45RpbYFAkoh8VlptMfbIAccYUe4wDjn3GMq6JphaB5ixKEi1nBPkm0ZpYzDBHmmKibaNsrWzxFiJnMceU8oQIESZRyQMw3ar06gqywvOqfegAJJut5Wkk8m4kNLfE4M4hwmlQnRb/aLOjFVYEMGpdSbgQgMorRcWTh4cbBNMCaXSW+ut955RRAmxiAnRlrOKMTwcLqRp4ml4eHDU77XybFLXubZKKYUJYEKd8/eQDoAAPCBMCIYoiBbay1v7t5S3mGEAcA6c9wgQAHjnMUWEUUQ5RpwSgpB1zjZVZZ3xFiutrAePkbH23hREqQgQ01qb+eGNgw2pAHCAEfcGqkJ3RHZ4+dsfeGzt6NrGqYuLRy/fePuNq9PR9NEHzy2stTKpstI+9sT7X317S0hjj+p6ViO6efnOm74YR30cxPEP/uCfLjqrmxtHy0lEZ/tv7lxZe+CRtqYce2/rhz5+/827h2AAPOKa2mI22pv5vqOUDU/1rCWm9vWkIMSE7XCFLWBcWZNrOwh7JwKqsvGOiGLKiFXGNKTVP28dqfNxb2ERmaqaH7S7ASK0aer3P7GydOJUjft1UVuB+gtV5zQ0waLN60iaZYF++E89/Km//PFyUozyar6z2+6KRx7q/uw//N4nHgzHh99+6Bz91f/wY7/82Zd+66sbBaDGak4AwFoHBBBgRBAigJy1BCPZVB6BcwYhxCgx3mEMVtYxBeLsdJpxx4I47i4PqJlevb35oQcfG/Z6I6erWgHoprG2IX02tFCkTBnqGEjwxntbVTLphFEciv65vXHeuLln0WC1RznPD8e9TreyYfvEWTbf29u9gxAEcS9YXUqi+EONf+vOd4ym2OPxXh2f7VOB9udZU5I6oEEYe8BWaw+eUCZrBd4Dwsh7ghHCwMNQG03AJgHCPWoMBCxRQJJkkNXoaFqfW05Pneh0+Yw6CzwNAiJr6RHiomOw8wBxK0HgqdKBw2BdWUguqNMyG+/yiMQBM0q/dW12eWf++uZkJr22YRSnusiUMtYBJbTVTRCGIs85x2EQMILzrGwygxhupTEXJJvUp/sX98ztI7kFyBECHoEQ1HtgjFonWLL60Pv/xGPv+9juzt3n/vA/Dzu9W7ev/Feroiigzvrd3X3CDhAFhpG32iOoyjrgArA3RoP3CANG2DtKCLbGWqUJIgi577689QXS+eiT0eJQyUZVo7n3OI6ik+sr1s8duN5Cq9TVud6SlEo25cPn2kQ3XIvhauvWHfmFZydv3B1lWUhCQhyYUh8dzDDlo1nJM1uNs4DxVow5cs7L5fXBoNcOBOKMtntdQOLESmdcQpLQ5YVwf/tGFJultti6s408MXoWpsFqgvoh8njqPdexSwPc6gqd8pBTjI0F1+0ttkJ2687BmQunW53BvNRppw21Qd488OTF2zuHYb8tKPPTMvR0NiuSpJ+VE6tKT0wad7K8LGb1wonke7/3ie/+4atHRb3/ztVf/qXmp3/q+yitjMvDTqc7WNi4U+Vq2pu1hkF3e/tme305oBiFPAionWVW163OgCApixy4D1qDygDhYUxOcdq2qiLIE0aR9YRy7yxTCEGSMcVRq8qUciTs9MtiOpo0UdBhzh5t3lhZ61CPylKRiPRXTgio797cHizGTV3Pi1L7abfdz8t8f7rHEFisgFFZa0u8Jk2UdrLZvK6VNPlK95yVE1tPpQw4iwgWjNLG114QHoW2tojSrNG6dp6opBV6Z5MgcMBExFqdfq3Z8VElbdWHmCEpQnKwdeCQYQqwwZO7h++UbDpXR02U7zSH2+X+9jwbYdOgj33ovU5tHW3eXTuzGgXMYZOKaMAXW8utRtF81pSVxd70usJrjVk4m88AmiTtYNsIWhdNtbp69q23r1tLEKbjiaprq5AUobVqFBHBqoPhQpgu9O9NgbOOCGoayUXgFbqxd3D38OjNt69u3N21Suq6QRh5Z6EBABAEktA//Z7VRy+eWltrddN4NhrvHM+fe/6Fo2k2nsuydNLgRvkGsPfeA3EWewzgLQJvrcMAVjljnEMEvAewYUTPrndXVxe6adIKA+QoZZRxQQhFgBEwTgkhOA4ixgLkTRKHnPOyLErjBCMBS8Ej01hCPSHWaIUxnhVlGIYkpdl0HrAoSXloXMhp0ZjR8Tyw0EqFbhRCJIhC8J5QHCWxUQ6INdbldeWdRxZ7r+KYxjGjSEUhFjzcPzzsBkFMiUOuUr7TSjS4/d3R6HBsrNFaE0CtFE/G00ZKhDAgBM7+zF/4/mdfu77x9VeZA60MACisAWEHlIrg3PmzC512nZUEQHAKDqqi0tISQjyGWioRYB6IqqkJAOPUItyJotmsVNpNp5M0SsCavAAtFXaeeOq0KxqprGu3ouGgXZdlgGCp3daN0shRjP+oGG889u1uZzqezKbKOVcWJccEIaaaBlGMMQZwlBMLXjmdxMFwueusaUfdtMWx98662tRRGN7LLsWECUaUkk1lA0Epwt5YTCBkwgMggpNENIVSdckICO6boj6c5dO8nmVlOwqU99YaCj7itBMH6yvL40klDfDQBRFrVEM8RQFpDzpIOVWrJIhUJYtp5bV3GiowAQUa0ljw3CqOCLKe8aAq5CTPjdXgAHlijW1FgbY6ilOG3/0wSlNAWEUdH4kg7SzdHu23CeHezm/cGbTP72fbC50gMmJSG4aQR+47L7/pWuXD3//4cTludfuzjZvtgRNDOr7b1PlhY7pRFfR7ixlMZ3Mfds90ffv9j7Sff+3ZxU4UtRJ9eBjPcb11cN/yirbxwsLCxt3y7vFhr7dgm2DnGB7+6AeXm+zFF55fHPLOWlDayWp79VZz54H1tb1bR2VVTqoKI9oKuVdaADTKTow5d2J56yi7c1ST0vfaoXY+U/ZEyop8fjyiC/eF3SX6A3/sg1/8wrODXjLNjk7cN7jw2P1Xbl196OkTrx+OCn20durM2IwnZPLY+5/ZPdh7+RvXLj5035uvvtLu9IBTl6Qf+TOf+vK/+lehdlj4TirQpDg6LAD0Mx/6yPXXXsu2Mho5RNXCeu/61eMb70w4JgvdwXLcHx+Y2KMmkz6ChcFi1GbamuPDiQC6c2W/RNlX9l77i//wp//jv/7tYWZ6naJHmr3G0eHpleVYvP5WvT/mQctLkRo3bCdvbWYxdpX181wyQb0FHNNWN5aS/Mb//p++9iu//s6LL923mDblPDuu44W1YHm9NyCnHjv/kfMnZH7QWV9++/Luay9ciRdOVka/ffnqrY3jJAw+9YmPBIPF7fE8BU0HidOQS51Xs4UwefSBc/PJyFe1KWU/DB5/+MQ3PvP5Tzz9oX/4L39vnooRv7Wy1Gm0MeBlpZTSXLCm1rWUBpU/8AMP+7iv5CZzM4NPWkXB5v/nj/+6GPz63/zC90lLAoqZKLVLGAv6rSULrpKbitSdZPg9H/sYkmr1wpn//Au/Wld1nmXST6KI21JHaeAlzcsySYWhDlBjvI4FZ8xzhgfd5bcuv+0cYZ4gQaR1tbIiiL79wuYz59iTH30k9VU2PSamMzmY3JuCIMDdwXA8zkbTMQlYV3QYIVjJYj7XuiGYLC3193amrTQ+t75ya1qGQLEyg3bn+LjAPDh58uz+7n7YTS1YhCEUyXQ6iSOx1OqPdu9gbXx23GSTMOkpaz2QsNVlPLDeOgVNNScIkHGIYAsaOVsVhVENb/WR066e1WAh7jktA84YZwZJcFZVMw9QNo4xypB3SiJrLFCPuHO+sWZxcS2bHAqGAJgzJiQCqK2b+t7/DkZYLWUUJbUrEFaEcHCMIIY9Addo0xCPnQNd50iEPE6MA8K4Q+Ltd66cWBhGaSBrZbUVghuNy+mxtrNe/6wxzqhKm8JjM5vNKOenTi7UzvYHLWYcaBcFIXJQ1BojNstL5zS4fFq7KOaRYF6piIfU06rJPQsbXVdqksbdujac0rpxDFPATaXrqNM5E61u39382rOvDZd6R9szwSlmMJnz3/itr/z3P/bDBfjZ0SyOQhK0Mba9IQ/DZH54DE5n4zEO0l4nlLyd16pp6uO8NI27W8zvW1oYdBMq7Nbx2DnHCMUIABlGMCDwAMbhw8N5f325E4eNlpiyVhiZYp7GvDwqVUWe+eQfyw1aJ8dPf+jho+uvmaKusioBevHiOi2OuifvZ3mxf/Ogh4//5U8/6SJz7drB9b3466/uV9KA9wHnBCFCUcTJyZODxQU6CEYvffGfL/VIer7r5vPU5u975tzL37714Mol26Bvf/udH/o7f0VToi0tjg8oZtX8oBrvrZxYlkXV7Q9Jtz1YPX3tbnHi/rX6cDzT1fqZC6rCEWfF7jaEw/apfmX/KE+hUciEYNw4jzBFqgHsjJSeUE6Y0SZvZMi4bSrktG5qjJH3hlAz11pJr423FuxNVRZ1rkA6ab1ttVMkQWlkpCbeZnXGGQ5jhpwrSp/nlfMsILzDE8xY410Yc12Vzjvw9MTaemmbYV9s7N4uqjqsKqSuT8dV2l1uC5PpupW2u648uP1Kt0V4GHRbMojdZKSJ4DLPvYg50bEImlLaRt4+mGyNxqVsWq04m6v+LEuqbk0x60XaySg0gkuMiPf4HhEHvAUE4K016sY7R1WNjfcIwEpltXHWOQOCYQBcaocRR+AAI/CIEoQRot7bSq0+dGrl1GLfkfVuoNVVj7Cd5iLi4E0QE4NcQ22UJCShg4VIaq9KWplQWw8OIYIJBWstgEHGY4SQx9gBRT7iWAQYp2FZSe0sRoAIMUoT7omnQEgaxQhhb501rmkqAoA9EoSXukEIGBYB4QQ7QmypDGcBBrDGWfBKa+agFQsGNmIIEOv3T08n48PjkYh4L2ktdleQ9POjOVBkjLPWAUbG2LKee/AWAxBkjQVtPSBV1ZwHqqpCHmmjMEBT1wAIU94Y22JsYW3NWdxZXmuM63UHjIA07uTJE8VsVpVzTJB3QCh292grAAjDvRS9954QDN5JLfNm2u60Z2WhG4kJRtgTBIRg6zxGpNtuB0IQyuZ5oa2yznjvjbPeOoKZt85Yo7xx2v7RL2Q9y3Nrba8TAQAjdH6Ue8SMR8uLXVPVs9u7dKl7MNFH372lmnztxMrHP/Gxt7/5bNjp7u6O5x6Abj36gfcfbFzvrcb5ZNxO2/PJTCplEJ+qvJJF/3yrFfv1j3+snmz0dTnsnd3ZvqWxrcui3NptSjk4ubR383i8uX/25GoraW3t3BWC8cBXVY5cGorAY2k1wlF7cWFx9513drZfX1x9CHVDa+N6WgSUNfPKN5I6cDxUDapMMeiHrYX24OzSdFdP58XJ+08mve7d2rF0JR6wIZte/fofKDH5n//qJ0a785NnVxI/O7qzrSoDgIdJD6GoGhXnljtlXomE+jKj5fHJiAVerz1wav/4cGt3DoCDQMhGa20AI2V0GHGrjAiCom4ccghhBwAOrHOew87xuAH2sR/9C3iWv/rcsxEni0ttw8AjJABTCxS8CLkHdXOnfvWlvYcuRsjOe52IC4aQNaq2NgRCKjz0wZn2SlUfXePOEEZ5FNVi7r1jPBbxKUL60WjOMGSzw+aQPPr+9fe0+Yff3PkvrxyFrXj7zmz3JF04ky4tt+fTIpuOwzAhhGMg4C2hAM5bhyiKKQ0dIt5pSjACBcZQrDAJfAOASJgmLBlOtsbnTw0euW+hLA+RygLGPWWADWWAcIjC0CJHkNXgwRscRgSoMzoNPbaumJWEEmIgr/2zbxx88YW9aQkac4dRY0yuM4ZsFKerZ9ems6yajwE7zLBASFaNIUB7qx/6/u+/c/t4762XCMwZhv3pLUMa8IAIWOMp5ZQzZzyhYU2Sx97/wb/3v/y0bWB3a/B7v/mPfVkXs+K/WhVZYx2yLETWOYSxtdZrQxgJwwCB19oRQMZ7ZwwmGDnvkEUA4BASAAhx3vrSW2rGou95T3u17wTuUDH88rMHn39lf5QHDEG2O0HIq9og5AlDue29dWC+cTmb69nhWFkrQh6uroQ8ct672aRWOSpr4xwkYNM0XFlbTDioWdE70f/Yp95bTPLx/ijttlZOr492pmVulcag9HY20ZUtxwceebB00F8FfGK4furg2gvgkQvR1Zs3ljsDzNJqVHeGUdCisiwcYoNBhxFYPXvSUSYhAmTAhcbkWCI3K7uKgPFRIBpbeedWl1fyPKco75wIJUlvbO8vLq+fvn99e/cyj7vPfOpDr3ztBX1ndvmtu//4X336ez94+rH3LuaTkSlH4PCDz6xXk/ndq+8kvRSFQroAaxNwOan3D0Y7q91OU2ZKVd4Z52RRNKGIGeZWO08iWUnQFcOWCjQeFWVuKRNxtwUBlbZkoRABH/YGe8c5JUzWYxIkB6MMFBFhe+/WTS/2tJlR72QFRWERjgarZxPWuvH1NwZn2oMFsnd7zJwOBMmKcn44bsVhrxMgaxZ6vd3tzRg3wzbfuHUtipeVGo2njDNOAn68U3KGsSC1qinjBqGqKhiLmIg8ZkeNVSwYLC0wahZOLBEOx8d16fBsXiIEEXMWOUtgPBnN5mZSZPMxHI5qQCTPakz4hfvXb22/HC/2FlZPexd3BgskWlCTo3HucNwVK7ye7u3vXO2mjlKgwoVRUFW4NnT/8DhKtLdQHu/e3rj6fR84AQwd7JooCW/v12K13e+hO9vXN/f3JmUVWHNvCpRUslacUg/w+huv/8JnvyhiphpspCQUA3FxgIYpO7e+/PCDa488eFprMxlPr984+Op3rs4yNS2U8cw4AISsxwRRax1GHmOg2FOEGMGEIgzYWUcYJhTqSjcaaeXCgPR76cmV4dn1ZQSM8QA89hZzzgBRDIhQBph0+i3iQRsTULo0GM6zzAKLku68zjqttmwa5z2ltC7rvJxj7wnn3iALtlbGIgTgdW0IobLRIWdJSimHTi+ucrDGO2eQAxFRTMEqX8xz7T3yXhsdck5ZAFphsEsLrSwrVVOtLHS1cbWGMBFVZefzKmvUQitZ7EWEor29cVVVVSOlbAgGY70xXgJ868W3Xr22E3LhLAJnEcEAhMW8ncbZrNm8feBWUcAIcgZxVpXlVBZJ1G23W86TRAhAuLJKIRNQCgTqSlmLMeez+RFBSFlEEXLgKadgXafbw4yNN26D9UXujVZFUc6ynFLCGO0ksTMoiFsTOyOcamOaWmlps6yAUDDBrDLWWI+8d/aPyIgOrOdCEIyzXCcLKUIEPE6jqJSl8xAEAUKkkWUSJplqCPFRHEWCg/e1qqMg9B7VyngEpjbe2aKpvHNBIIggR/Vx0l4knDnkPcdKAhVUWz+pK7XvQiTiIOikiVSql8QFsUdy3kU9xkkUcExwILCqpXfNMIlyOVtZWgaEtFRQWOvo4VTv52UaBtY65xw4lMbBMGrPjvZ7/TiJiNHvWm+2x+P7gsWNqzv97qAqZ8v9wCm3dVCG64t/5b/7e1/4yR9OpBEYASVKacxcr5PuXdnffvt31y6syjM+aXcOb+0OHuhEJ5k8mCMmMY1tfVgcjUf7qAoC3O1Nrt7M9mSHienhjBbQilLaRU7B8eiIYJm2kjPd1mBluZLo0vqJ43feapwari7m1Yw36frZB77nB3/wd//ZP7ZpFmMoC10dTUlEBAnGRWEajw1QgRi2n3r/+z73+193BOUHs8EgnUn1wFOn+wW9e2Xv5EMn9fzGyfXhIMDHm/m5Z1YOpvNvvXb7r/7NH//9z35m/fTwY+8788ZXrq8/fHZw6slbNzvH+eJx9vrJplw+2QrRCVoukW674gMVpIeTeRy3Q4OJ5ctnHgmc3d6vpoXRzOWVvHJr19Fm49bVT33y6VvXN46OZqvtaGGBGxsfH006awNd7muTy7IRcYDA/u1/8w9/8dPPtjz+6kt3drPs0pn+V7/xEtROJK3NWdFL0FSwQsCpYbJxK9/bVd1IrC+LU2cXb24V28eVC8jNg3LYC5mH1U70hV/7t/sHR1aVmRc372z8yN/48fWPfu/Ntw/uE0FemwrSstr/+qe/NCo8CYbrp8/cfen5rbduLqyc+Is/9pe/9Zvf1CKb2enRdNZCZBjQ1dNryYD+1I/9yDtvvvG1L041Jiurg8O74+9+e+PEsH+4d3jxdH+xtXC4NzIgnXFhSLO6DggXXGBPESK1cz/0E3/Fmdocvh21StdZC8LFLCt+9N/97d6Jj9SWUEAY9P/9736ps3g/14tXrm9rXFldBoLKJv+tz/2KQyZ8MX3Pe59wZb65uX08zbNKtZMAnJ8aeOp9T73y8h8CcsxbLkhV1RgjLc1ofNt6BBi0dtQi53xtbWsx5gFc29w6NW6dv7hYH7DGIh++G7euClVXu6qRRloeUCYCawwXYdoKOMNNo2azhhA+U3Awb4JO0mIi3z3MM8lIqizPDEZJOK8Ko213GHmEuu3EFHl1tMl1wZjzDnMqyiKjou0IUdbKqsJUhKJlqQg4qrOxkkogTjipswoDM944YpT7/1P139++Zgl5J7bzfvM3f09O99xUOXd1V9PdQDc0CBEaEBKSBmONvTTSmiVZlj0ja+QJwtYgWcu2rDAoIxoECNFANwjoVJ2rurpiV7j5nHvyOd/85ndn/3CLsfVHPGvv/ezn+TwWKQ5ERRABFkirKWVllkPoknZnUZt2q3u0/0bSXvU8YpVhCBuAnYNpVUOA87JotyNEEHKmkimlPtRGavEAqmKURQjKpvTbUV2XDgKgNUSW0wBBjAmGPtdGIgVV4wT3lzae8Wg7Hb3WNBM/7mR1s7Q80OkMMAmFE0JBgJSWjBIvjncf2ibOHb57Z7CyRSExurRaMc6V0hAbY6EQpTMaQ4yAZ4xrtPEoc7qpVAGdw1D12wOt3bxMe61WMc/yIgVIN5WEEFRNwwkdrG3bt49G00o7vLGxkstCIkqdwEQFSHU2+mfn0yKH2rqA+AhxvxXWZcMBBwjVRSNrHYfJ5nODu3fPprcOpUb7F+nFLFvudZY7kcsqYBFA0HLcNMpZoKHDlDRKOwcuba7M0/x4NM+mCwTs1jA+yeXDj18/mcHRfH5toxujamqs8alhrLfaOrz9bmQuIGyTufaC3tn9/RyNvvcHHjFjF5Fo/4BPMlApk8Re7HvTRck8FjCSHU7/4OAPH3+0u3Vpt72+3m+T/Tvv3Z90oyd+tvXk96fHez/y0Z/WNCzyc+RoyIJsMXbawbBj/TbtcAEUsNFobHCp7736BnZ80F++e1on/eWt3cvp/VuU63tvvhX24gcqWFkf5otGad1uebqqkoBijJxz1kkhBGUsiiKkTW95qczzsm6U1dY4COBkVlaVwdyrKg0xIgGfN5oHPIo7W1e3ZscnZVHvX0z6ba9OBQ/j+wepbMqDmYqp7wGythQNWx0JjNMlAg5gWIgmjuJ5LUmYZA2EdOj7wigwHTVh0J2ej1wjPC9YpCUF1hisAVdpHXJGnYJwVhQ4T6XfQavr/SbNGlGlxfzGnVOTBCxuh+1I2nmrF/CQVo2WdQVw1emAJAHOAecQQM5pjR8sZgGrBHj7HV0LYjGAVgMEIHQYI4Kg1voBHtZAAAA0xgLgMCQQAEDBz/zIJ1+79+3Npavp3tl39m4WpRPCYAtiD2xttMd53h62HAC2UVIXSmlo8TxVFuLQx0ZqRIhzziAEnDPGYkachdTjlzeWr6x1p0X61jv7sceyUgPnALQQOKOkwIBRjxCMCJdVY4GBEDvrtNbaWIqJNqZyElLbSOnxsMWjRjfAAYysdpogRBCC1oqmPLnQCNOskLquA0a1UodHxyfnp6ZRlEJtLQSQUGwccMBJVXPOKcYOIsAAhhBoBxgFzjRVDpwhAACttFIOWkJYJepLm1tXL18vi3w6Tj3OF4uJ0rYoReLRdDZqyswhDZ212gAEtbXAOYoRJQQhbB9kmqRTRk0XE600ooR7TCkNIYIAOAuQg8C5xWKOMKSUAAcBhEZrrQ2AGFPKiGe00MZABBGC738hW13VZSWUH5Ol9aWTo8lgpdtqRft3Tgl10XICbPDVl+5ZgP/8n//pL3z213kLTaYT3m7PFmUUepNpPjo8+tof/D7A9oPPPhqj3uHp4cbOTjqbNXnlORoi02rmi6o8UogHIfS6t969iRn0usELz3/kfO+t5V589809CoO1jU2A6bhsakalhcvBcLz3xmwybw/Xrj32SJrWeSEhoCy4ZM3AyN7FRQOp18ynrsmttbKRYX7u99exN4QeL5saqEXLbzuos1TZWk1ne9HOVdWo2cVCwNy5ZV2A2KuvPrQUrCa6RDlEJPTqyWx+UfeGV2W9UE5P6rLb8wLnNacLM57prBgd3tcGEGccQkYIijD3GURQa0sRtNBCZ6xxBGBrnXMWAQiA4w4ev3v+l/67X/iZv/7zAwF+5GM/qOrRfDKrpvlbkzdnWdo46wCwAEDMRpW8cTpZ6sXtFYaQ5R71o3aVeTQc+EGP+B2kFjY7Snzfj+IiKxCN/bYCmkTRKvDaxvkR78j8HCMXIJROBcL68afWv3hjNENwhQe3DzLe5R62EJi4N3wQ43NAAWOMsZAghrgxABHjHAIAOWe1qBF2PKC10p1ux+vvwrhVSvRErxPDBuEsCDThPrQIYS5k6qC11kKlAIGEMqW0tZZQpg1y1mGMbCMgYI2k++PyP71++42DPK8Jw5AiAgkstSQUf+iFDz//5Mdffe2Vg8MXRV074DyKda08yHm/9RtfeZFQwCn4x7/wL3/v079IPZAVY2Wc74dCaYSctdAZ5IwBwLRD8NpLX/4ff7H7sY995P/zD/8B51gbury0cvPOnf+/AprWBAENrdMOYWqtM9qAB/B5wghURhtCiLUGYOSMhRBTn1htrDEAKO1QqtznXhzd2aMfeXJ5sx+8+k7xzbfrUvvOamkVxM5BmM1rjCFE+MtfOWaceIxTQkNMloaRFgY0anxRGAQAxqrRlKKVlWjYCzZ3lzXC/X6MGukwHV2UDGE/CVkQTC4mxWQch92NlXiRnZ4fj4rZBGIyGHQ2VtcxTnIJbt291+MhhqXjZmlneGnp0qs37kW42w4jx5RWVhtaNIhSVGvtIYIA7PeWMQXaIcDQYpI2c62khi0PAAxBYB2cZvuOoKyBloXt7nIj/FnetNqXxtOp56Gdy9fCaApuHJ6dlv/qP77zs7D3+KWuVRpBK1UqhSaAY0lR6rSHi7TELYUICLudXNbd9goqmlraUqe1yzxAy0orI4LQj9s0jtDF4SFwyMqK8QAzShke55OtaOgMEsorSl+aMEqSps5GWdrmRGnrAKQ+r2WllA2iwCqNjE4Y2X/1m2GAoJwe3jhNR55uVFNrhGTSG5TzOciJj1ZrZcXY1gKyJLrx7pFB/u6jmxSk5/M591AIsDaFUzWKg+WNbdPMGKyVwHWtDMSYwPl8Hnv0uY+9cHzjNWLtfDyiBJ3fPUAq4F5gbE4CEreYXBCdmbDVMpimZ3dyAyZp+ejjT7cHyd79c0Ksz+l4DLwoopQZ2ur2+8rpMGHdDgzoAuh6MpoIaYx2YRiFsdeMm0aTbqd7eO9E5Xp0PouSBGmOCHnhYx+/edIEho6qen+0WOSikKfv+6YEO2t5QBAlw37no09e2b203IpCwkCn3YuSTr+dBB6cpeO79+9/+re/Np7Mm8I6xRoIHWCEM+KgshY6hzGEGCKAAp84ayAEBKJGKEIJI8hYE7XC4aAlhUIQI4C80Fta7mOHPUyMApwwhDAlFFOsGgMhCEKOEEbAaW2llFGgapkTDzVVWZSVF/CmFhBYz+ONEEoLTLDne00jEALYWe7TaBCLWldZDbnxQ26h3VxeHU8nRZpb7TDEYRIaDYqysALKWlqlKaMA6DgKlGj68VBUdVbmC9UYBa2mECFjFcIEOUKx7bQ9Lmm73R4vUkjAzm6SZ+Uiy4bDwb29s+l0oY3g0H/5zT3goINQWx3FrNdrC4fSvJnOMgzgZDqtqnxtrf/sE1eOTs6FNR3W8hiJkrCRUhuHAUIMdYOoqmRVFpAwoUwjZKvdJpirqpbGIgx57MWRt5gX0ILltT4yJp8WGLn+UgdBUlcNhq7RGlpkmpJxghGAEAKKgojlGVoe9MusnJydW6MhJkBb5wCACGGEGUEIJ0FMIPP8iFNinXPAIISAdUqqMEQcBxwjn/vScGWMUJp6fGNpoykba4FUuXLQYegAssIRwjDGlZGry0sYYs/3pKoJNAHFlHseZkZqKS0iTuQlrSkhkGAXh6TVXcLOykazgAMHDHAMA0BR4rOV9oqR7nhc1EJiTJQ2QkpOEYQAA8Qp085aZ7J6ur3TN6oxqrLv+6XgyuYGDfy7e/tlOV/bHvaXWNiLxQE9E/VX3nvNBp7FiAQ+VooBEIZx2Fmy8+buO/ffOzkp/tPhJ372A5tPXM5neY1SoQUM4NHeweLwJGkPHv2ex7JprTeTDbV8cN7L5w3G/OrDa+0w5Jv65p2TQdutDzvH02mbsipbeK1kGFGQmQJQ0N/okiHE7L0379w+PFvqrr93/F2vR7WRwx4Tsty89pDfie/dHkeBlETeOb270UdPPxK7prm8fu2l1+53CH/7pXs7Q7a9sfWlP7i5e+34I3/2T3dxcO/+ycq1jdn5OFxLXv3OW7Xl7bj19M4H1j/5MLddwC9BHC99708wr//WV3/zkx949Nbejesfvp7z6DtffGeekqeefOj2jaPdxy49/PCTXvDEukdf/+xvSo0lRNrxmiyN550/9SP/3bf/4DelxLTn39+7G7QDB5yEeJYKoqs0y9J54bwoDOzv/8t/O91zNQgffXx7c+faOzffrEs16EZHBzKOiQqwarPZKbx976LNfc+yRpsO1pFRux2vDWGpAepAaHQg5YbLs9s325srT/zwn1pI+pDfrhH7zrf2e3EQBZiF9PbN+5PzMwP7P/mjHx+sXv3V/8ffLd5+OVG1Nu7uN77Zj7zv7N0KLvGP//kX7ryy//x652Mf+cAgcve+8vt//KXXllvLo8Idn6WYonGRtkM9nqbPvrD0iT/z07/xc3+Xck4gpZyVpQQQlrX0KHZKbq5vuGyG66//wS//6p/9e5+op+8ErVgHO8tPPqtIS0Md+Uble3/4+Ztz6Uk9yxZZYwqEbZxQ32PAZ5NpVWXZt77+FV1qgCCNPIPV1WsPfeCJR371tz+f5vMwpIUouMetdhCDrG6sNQhi5mOtgY8IC1hWNAqiuZQCl2PGbt25SWmL9z6YFmWt3Z9U8iUnIOLYURL4/v2LuXUaGtjtR9bVGAPOadROfIXrxQi5uCZIi4oQ4PsEIFwoO9i5enZ86AOHgK7nk9aQxR13fHDOCc2zNC2quLUlIfS8wELnnBVNQT2Yg4YEHcuYNSJKHNACOOUHCXZ+o2ocrIQ0mWbj2Pex1lbpPJ8lYd+jvGrSKlsg2Koy4RpsGgMjBDQs5lPsBUFn2RkIUcg4ta4hmFRlJo3krZ6GEOhKGcU9JmWlhPZ9J4QilDpltQYAYIy5UgogrZ1WpiHWeQRPZuO8cx512sPkqduvfnur22/31x1GVXa3yOaD4fLs9IwHAUEWAjxNy2Zheq3O0x/6+Dyvs1L6Xuwz4CwSxDiFOA21qJnnE2S1Ms6ZtCpwa4A4l1UR+JGxDaYYMn46m1VIO+2wYxRR50EWYuSsKOZBr7N7bWtloy9mxjbNnTv7SUKlrr/69a9srPTqbB4GPGglEHBV20WREWDCMAFIUk6NVhD42umyARtbGxKx+yejk4uFNpYX5aAbK2NrrRaL2vd9QEgjFYGIIUAQORwteJQ4h7txAK2usmZ3ebkYVYCFDz/xaPD2O6v97ujWzV63/dIbd9NGd1c71x9aby7OOahoK9x8Yjcr5PbaIG4n3d507yLvEO1iTAWUjayt4dA66Yqz6ZUV0m0NxLyU52bW1PduzXd3Pmi8we6Tz+3tvdvpdyFWQYCa3EYh7EX+3XQCAtz2euW4PDm+ywLy8Z/6xGQ0T5Z40/AG+sthMjOSBOFb791xaRrR3GmZvz96A0StZS2nea6EDDAm0FhgAQBFVlEMGeOMkEpUs4Woa2EaWRWNNQIBZyFjkHiBb5kramNp0FmLHtrdJDza3z9qRYnldarrpcGw17PHo+zWeeljPBeAU86MrdLU83zP42ndTLLR+trKUj/2GPCS6GQ8ya2hlkYYWmcULsZFWRWCehHABjEkKwBte575dVp5zGNmuW7w8vJlcHGBLMlnzfh80eonjJNgpT2DDgh3cDi7fGUYxBhoYYtcWzhYxgGtKMXAOWstAggA4qzVzjkEb707vXlTEuAZ6KTW1lnMOUDOGos5UkIBZ40xlBIIH4B8AWJgsNlS/NaTV0M5PRbjhSmJaHQldGe5s7QSFCpjFCNI4047TydSG9zGjIbxGCixCEJuqdPGQQesA84BjLCFEGCYGbO2vXbloc3WaHZ2NvdoK53N5qI2hAkppHEQYWUMxEgb6UceV9IYhRGGmBshIEA+5Y1WjQWEedpYC4RzoFHOYxQ6gqFWRmtrAABCNVBrLGuPMghMlRUOgkY7qwFC0DlngTPaIkqo7yMHOWu1W1FRZ2lTCSGNs4EfqqKx0AGAPJ80Tc0YUVpDBJOkV1V2786NSoiyUMZpTOBSv+9ROxqfWSUd0hhBhBHhVFsHnX1gGzgAnXMAQKMNoVhIZZ1DlADnlNbWAgcshIBSSgmS0jiHrLEaPqCAOWctQsA66CBGCAurH6xBOfz+t8HR3iklLKsbdTQ+Ppm32wldiHxcawnSRXUxLkgYXOSoTM//za/8uw88erWqq9m06qytQT9v+XgyP0e8fbQ/Gyxt3r3TGCBmZVTsaT1ZRD5YuTI4vH3jxptniDCDaHu4POxFY1hmJUlvj781+wYydacxCQwhYL4ftjrR+WIS89bG1tL92+OHn/3o7HicTidShDzZ0s25xipZ6SJ3vDib1thcevKZxO+M9/e8HmdNU2Z5ZKp2spXsPrs4PrFVZrXntZ0XIpcfH9x8qzo9byVCZ/k4nXmY+lFkYWPFYno/08YNt5d6HXrn9snp4SL02qA5J5zMZwvuDQ2yHmPPf+TRb71zsT/PBpuXOsHi+HRWEwgcR5hKgJTTHoDbayunoxmA0FoHHLbOEoqBhY7AuOO/+tUv/dW/8fOMgr/+X/2Ff/hPfmE+9//K//Fv/s5/+Px4/prveAOsagzxgDGCcVRVTeJiZ52qKmiNNcBjEYEa1OdWaFQVOOjUEEPUbgrI2SrvLRuXGFV51AyGPZGX+WJWHt5Pti9D3Tzz2OD5R1d+57WT9UvdQc/NR9XaejtqM8B8CzAGmDLmJBbCYOggUshJaCgC/vtDB5g2uiwarQzpDLp+r79o6nbcptphpWWRIycAso2QmBprrJUOY8wQB05gbSAKHHTAUWcExJBgW1qlsC2b6nA83j+YUhgFDAqlykovLXVDghtjb75386UvfUvIBqE/CSJowyOOKQMREdVhOxmG2Hv40e3fMABbBR20xlmCAXDIPagoWee0UwhXGWyyb/3uL3/rc59uMR8ykGW5abL/LFXEGMEICCUA1EZDiIgfBQCiupSU4aTVS9MZgNY5YK1DjDoAtQVSWQYhgM4qQRjmBJ9duH/5u0fLS9H5pGGYJC2khaYYNcYpaRjHEFuCHPMhYxDohkIELCiLHDhXS6ONqSVEGHMSDIc+BbIVs4evr7/6+gFcCuOlXl1XnLdkkRPiOwMsdteeePjmW2/2NDrfv52mzebV1UXRJO1YKtUKmJB1EiWRh+7cvDnoLg2SLUS6w0T6HLz4n/7g8u7GzkrfmIhyj/lhtx0aUczSaUBDhHhTKwCBMDIvcx50SNzV9cSpKs2zpD0gnIm8dNYiXV+79Pjh5PDS9kYnDmbVRdsnjVC9foA5GE+K3/6tL9kfeOxjH97Kp6cX+SKIgkee3UnHuagmNCKYS4tMk89sWcexafIJNtg2tXHNRmcdaSQCZyxUzbRWZbHIgQVCNgS7JPJbyWBeTKIQcu58D89rTTy+1o6y2TEydcfHEUe83Znn0mLkvASyQDnjebiTxCdnIy/gcRyXeRCAEEG8mB222glCOM0y7IlWmy2a8XBtd3KxCMMEc8d8j4TDw0UaORknUZhEnse0qLRU2hZHx9+11STyaa93KWr1CWiQKYdeM5+NmnGOkYXAOgj8CHdiX0NsnHIICuXmmSEGd5eSUlbjQnSHSX5QQgwev35pvPdanZd9P9q/tY+99r13xyzABEGMCfNpU2WtVuR5mrLA+Ela1cpoDFxR1UI71vKDpQDk4AoYLmajpN0mFnLHQFbFykwPTzEFXkxW25Go3n8eWK156NWNgI1stdqf+OiH4wBi41rd2AA3vRjvX5zePzgcL7KqElaDYdIvuUKYlUZzRqXWlBAEDGW4LpVzjjHiEYowJAQHIbcOIkyVUtzjYRhaaXELhglnhAjpOA+gA9BBBKyF2FpAEAKYOOSssxZgDDFFhIcEYYIR1dK1WhFx2hlnAairhiJU5CnzOUIUQAeB4x6N48BInZU1NBpoGfuE+8wAWBe1wZYTAg3E0Cqt55O0aqS1xkLnLAwYbirBMOKEtsNA62aRzaRxSZwQhqQShLqIxhiyk9NRLo3vI0Y92ei1jcFskmuhCIAB9Vmbe2F7vkj9iKXTIiQIU1TkTW/YDQK/1+1XdXPz3ZuLxdxYVTS6kmI8n927d2Kkxpx73MOUNlIQQltx5IxxAGCIsIdjn1dKG6lJBZVzBGGLQMD9vGnyRVWnuahUXesg5gg5baGWRlONnFFKG+cgZVYpUeXtdowI9QguiiaJQriK5mUdh63cr7P0gjJMCUEIGggJoUHgtXpLSeRXdSalNBo0xijVBF7g+T7BEDtojJ2UizD0nEU+psoYK0yWVU0plDKU0DTPasuSwOc81I0UVdNIgRCjlFZZ4YCAmEZe6EECLOTcl1IraCmnlVJ9FgUYN1IxxhqhfY8u0lxqoJXyfWYdOpmXZdn4FEPEkYNGaY9zaw2wSDSNqqWhGFOEIJeqFoJGIYcA51n1QAVOO2dR0vbiLlzkCw96Sou+36ZS/dan//7uoF0tsvp83m7FV5YHlSjGRep7bOd6J69qCOXX//Bry6fr3/u/+aRvymYxiRhRjFUoioK46/T0/ObqUzuvH339kSdbo/PO3q0MSv8b3/zmC08/9tjTz6fTi6N773YSsn+2X/Ngia+NTt995pmPZUWqcVLJ+dnZ/KGdrfHFeTM9HLZwAYQfIIf0X/mLn/r7//wzP/6jPzMZf0OWc1kuLq9GyQAojeaH1dn8aG2JTGeycrzQ6Nad2fZypDP42u+8uLrS3bnWOS6PH/neZW9pyR+AR1evRQO8f/NuTDnAzXR+2u0vOxBfWb3y6E//3EvfeSUE8Yu//q+f/LEffmy7/d3fKjt+7wc+1o03klYofvNX/l/d7WWuZ522p+ro4mL2zLPPRq73tW/e/b6P/NgrX/mtiCEGG+cEjqLzyUWZwj7GWOlhz1u5FMtavPbHX1cu2tq6fL310Orl7W/deqPbaoceX4tNuyrhrJpdnEcYtbh99NH19P4CUH1tZ+Xg7sUqa69vhKe1OjmecWsfe3T5x37sY/t7Gd2+fPtAMO5HXegsHPRba2vBOy9/+2K8mBTkynNPXN69lFj94i//o/G3v7C7GiRt/603To/efUUr+FM/+MFvfvkrjw6ejVudtcC9+PufR6ryMeskHUetw7aRankl6sXy+z/+7Jf++B0s2H//i//UYgw10ErVQmIECURpVvJ2JKpiqRX9wW/8s+/7yWuf+ge/mJogGHbF/MgCgv0IiCwIPGTl//w3/t7+/VyEDlFV6QwS5zHgcZxni5Xlts/7k2lmjWERldp4FBpNLs7OjwYdyu3pyb0oQAaQdF56AcccU4ObxnBGgQHaSISwahpOAGWIIAcgLoUL2mR8dLQWP+X5SWut/f4rGbpaaGms066RTRQyLaGxThvJmV0eDM9Hs6bR55OMaDkIPKMR1FYp5SxByCDr0rOjhOIg9An1yyzPJue6adLJtNsbKAutn4goDkIOADWy8TBCPEQIIuLXTQVUwwg3VohSeBRbjSqd8pjLuqyk9n1mlNUaIEeiYAgwhIRG8VAKQzGVsugtrTtq8zQnLPLCqBEaiiaK2mkx9YgzwJSN8IOYOL8xyloDnGPch8B4nFOIMebcD+pCamAoBJRxiAk0TkjBPI8QrKQizA77HqgPS2F7neHW2sMYIKVyXQlR5D4NMI0xaryAA+3ypsLxxiM7fWRKijSOCS6lLjOja0J8UZUE40bUHmLYAmetMQA62fHarlIA6pWl5dOLY4qZMRoa045bGFEB7MbG+sXoRGrpRdRIU89SpOzWcIAR4QErjc7mZWO0tm42S5eSIIhaJ8eHGxtrRYXiJK6nIuFtoapCzla6Q6Mx9CikJaPUKbGx0g1CpkRzMa9O5mUldDf2l5CXc2KQ0w5N56YQBlqntBMYLSbjy5tbpyc1ILS17JVlLfLaStCcTluUzsZng6X2/u39snE0CHgYhSxQwjVNI7BlQewlS0Vuv/vV907P0h/5qR8Ne29fTItcqNPDeTbLe6sxpjzxcUx1EPvPP/89so4RWu5z1m51GlnNL44vra7W+UTMUplx7vtNVd2fHY5nc6XQ7vWHelutKy988mD/nTv3T7vdrjJWOtZeXZml4kq7XVRACXm2mFnultaGYfL+6k2SBFapJR5DAIvZDCrCOKMYM4opxVWVWmsRweP5wlkIpayrurfW80OmKqM0JJwYgBBDjvDecn99fVDMyp3VHubyZD7rBx3AwmK2ABqEhABoWzFj3K63O/N0erEoOW4gRoFPfY5YgMosP5+nQsKqltxjgTPS5qDCYRBD7Bmp87Jc6ncnJcgPc+zX2xsrBpocqeGlq0RZnGmIoJGu3U4gBIeHowaY3Utr1VExLnKt4MnxhRdU7UGHwbrXiYgHtKoxtU4ZSglGGFj4IHv94hfPJ1MqrXuwqIUBRgA5gKxTUlsLHIIOM2KchQhBC511lIOf/dnvBeL24rQ42ju6duVh0dCX37g5jPzHrq2Fa35ae810zrCW8pjxZvvh3UanmCR+n+1et3lZVQsNjZNCQwgQhMZYJw3BBAEtZqmpmpYXPfzIw1/+9t4PfvAH3rvzrbv3JxQBhCFjWGkgjGM+0Upya5WqEWacMg+Hpaid0wRYaIxHA+rxWkoHIULAOWuNepA4KEXuUZYEbaFMI+q6LAACxupGCM59jDEEAGIktcMIUoIZRk451VQVgu2off3yh1fXVr9761u33rsJkOOMCimMsxBCCBFnzAHoEZal6Xwxi0Le6XQ313akFEWZZdUMOOmw9pkvlPAoU1KZWiAIEcHGOMYwANAo5ZyD0FKGnYPOAaUUxhBhSCAilBgAjQUQIggcwNA5a4x94DJB4IB10AJpSwQhI8TY99tnAACEqDJme3dtfjbGAM3r4iTNCY26cZzNK+qTrfXlphB3vzsCkmRZky6yzmCJx73LG1s+EOOXTgxshgMf2AqEqzzqP7txBWf511/7ikriVxdnUeJjyrgfeTzaWt7+1tf/AIc0CHtezKQSUa9VSVeluY890UwJCj3EmUOjuyfr3d0iL8qsbncG73zr25uPfYRFiViMAYWYomDIGWGmOVvMz1kcEEYghh4G1opqNrfBAvubNPGN9QGg1o0m5yesd7XdaYvFSFRNHC/li1RIvbzW11mxsr6VLso8d/nsoqqdDZJ4ZY0peDqarD38sM4XVZ3XVRW1wN/+P33/Wy/f+NxXj9V4/pd/9qPvvnP/1VsXU40h59zjVklR5T7Bed1YB4EFhGBlDACoaiRh9M67r/ztv/XX/9u/+Xf/7T//JY78n/hzP/+jH/tpMIm/8eKLHnVSGav1WmB+5Kngkx8c5OORbJpGGOITgAlBWJRjxxlwTjlHmYdIErXW8sUCO4AJstCztk7P7kTtDnKq1e9PpmmRqd/+N78HgN1d74MatQ02uYBtUDZ5k3hhi0btjqosBAgaZ7RFkD5wZSDCACIIIAZAmgYCjCHm2AkLTOOkkgSzpirydD7o+jwKrIFaVAYRRDyllQOIexxirIRSQDnMjAMeBUrXokgpZ02tx5MZDvzBMPzgU9t3z6p7JwsesCjk/X6rZfXewXk+Ec5pQolRhgIgGuUA4pQA6JjO/5v/7c9s9tewAl9/857PoZZEGWm0taYGAEKHrBAIA2eMstJITTzKaUMRoQiWRSmrBlr3n1lFiJF2t3NlY+P8YrqoFtPJgmPqHGAUE4oGS6vcC0cXJwhAypgy1tTai5jf9ZC1QRimixwaARGoarW2PaTYrlI0G+dV0RjgpCEII+xDJZTTTgKzshwPQoopWt9dmmXlwd6oHYegEpjQh1eXRuOmLEG/Nzw5uH9/f5plL//wpz6ZVyab5VKo+3dv/egnvm8xvyfqpp4VNVMnF+8ZdR5TDmNmAKsAQ61rBOiFlhpBUVbl+KLTWeVRfPf4LNflxu7O6HD/6vK1EPtVJR2UYW+paRSFjlAfiHNoyjAOS+kuTi6W+/0snwa+FaqUquKUUuBpACOPN6IQja3S2cXFLUrxwdHecJhA6YRGNPGefPby0d59gtzFuPrMH75bV+6DH1j2A6VsfT49O90/fSh4fBD70ArnxOnJpD4CSYhrNY3itmo0wbxaXAhXJJ1l0ABpqloIpWQniYWYOWCMVUJPGdFrybCoCkw8HLU7Gy05qWaTucoW7U4rz3LLY6xbprpgcdJbG6B6dn50m4NFvxfJhqaTedjvLq1fGZ8crCIfUz/yzPnF8XgymVYmVzrBcexAoypVWEJwWp18z/c8dvDOd6iC9XRiOLFOCtFE7T6FNNfaKQTkgrv29Pg+5U4ZUac187lFWmpXGDQ6nvJwCdTTKkt9yvTcgIjl1bTd6RCHZCqpZLOLCfP4M088fev2r0dxBBqpTCOA01WTzusg4QQimEEvwId7h5vrfZTYpUG7T5YuRnkjKkLERT5XjQjxxfTiWENWS3mlN/S7y+2kmwm0rKrWRuf8bLHRomlaBdH7WGshJQs5cJZSr9SVVW40FsDa81FTVjUGTmltZBAyAp1AFFAOu8gyRhdZAxwIolAKXdYCYdBqe6Hnhx6GDiJCEMFWK6UdYQxBrB0KPJ8zap21SnBKgLMEIOSAczaMPIKwlgoA5LTlnEZRSAkmiBIMMYFx0CpzIa3NYGaNjWMOkEWINVXlE4opsgYVZUERsQ5KpYB12qK60lpZgJBUFmPkMWq1jqK4aurA57UQUDhhnecRiJGQhjPuc9KOQ20MREgJ3ep0G6VCPxZ1VYoqoomuTVWllKC2x6MgundyrpzBElNKndKtJO4teVVZBCEMI8/z8HK3yynVUlsEu9344mxWpqkzZm3YuXJpaanXH2UVjwLOST3LCYZSKS8JIKHWAAeJbAQEjlBmtVNGQ+BCxotGBAiP5lPrBQjjQiptrFN6vsi5h4LQAxiIRmrgrHIoV9Bqo3QYh5wxYTUQVgmdL0pGMAQMaMQg8TF96Pqu7/nGOQBUUwsN4M6l6xyDqig6rY4s02FvcPvo9NnoCgGOQhwwD0MILEAEIwxbcYQxxhgDrQijylhZ1E4Y4FzVqCSIagecsg64Sja9qNUYYpx2EDCMKY0sAo0RQFlCuY9pGEUKOeesAUIpoKWyVkmtEcQaWWkah6hjqEbWWaCdc5wKSlWlKykYxnXdGKmNNVZbAoAzBgUsiQInkbVWG6SqGrn370bTWaHOMuyq5VbiiD25OCG46yXBxu5lLU7LtCzGKcewSJuSjDTAtZwNW0nVLIh122vJ3Un++lfvzfPfeOKpAPu6341PRgrmpEir29O3H9u6/Bu/9K/ajCRJbZvw6na333/o5K03L44P7p+cbHd6XDW88da6/u4P/NDtL7wUNfK1tz6zs70JNROLxknZW046m96Hv/+J//B7v0s6bYawUuaV77z5sz/xZ0pFN7Y33n35aKlFH77cchFdubbdfXxRj0qS1l1J9o7TohItDxxNF9KwZLc7XB/UB/fPcvnc+pO032vqwmLTwR6MK0yydHY22HmsE4k0DeoLu7HzoYu5XerRr3z6d380/OS0Pv7Jv/hD337prWe/Z+vlb7/x9s27w2GwudrJyOTw3skz3/PC+b2jH/nQ7md+9bOqWjz8gY//8eddfTIZDnh7ZTjPK+75gCJhZdM0s5meF/lTVzcvXd65d3f05W+98YWX3yZMdgKwOkjShavmiyc2O2U5vrhzPugHV5Y6O0vB6WyRl817N+8+/diHj+/ORhenkyr76Ad2u167v7X77gGo/HW/s9NbiVcGQYu56d6hVsVLf3AnzcTa9aefeOjJq09feuvzv/u5//BvA0Aef2Q9z2bHN8dU4mxWUoAuXn75b/zIs+XE3Hpvf160XaaNjztXVj1H0mwWQ5NP1CwX15aWb715Uizc5asPn793sLy1fH6WWqWKoqaUUEqCiBkorWqCZOm//Hu/nDqdqQXjSDfvMHZCwhioWlTSC4Zv/vFXbt06GG52T9NMWeVHxKdMV3V6OqMs0DWoGoM9f9iLZdNcnC5mZ3NM6Xk93uuG/dV2zDv337shtApbHEGmpHGN8TDrtCIILOLJ7YPz2Kcdn2Lk2gmXCNy4vbi+0jx+dRlSi53LCvUnpQPr+xxAVIqSEAiAw4hIpdNCMN2AcqK0cRQtdVeP7+4tTkZxJ2r1VrJsjgipm6LbaVdVDaSzylaqUVL3kuHp4nRj9eFpOatcCwZBSX2MqKg0EApC4SyMeSxUjQ0AgFjnKAbc80VTI6jP83sb4WONbrSYJb01ShgkTjc1oqhosoj6AGFdzACBXuCns1mnG3kkNMZAX1oldT3TyISBx5m/WIwxIpVQSdjnhFe1cE56Xmi0tEp5PJynF0FnhTOGHazqLGJUCOGQH0VtZx0n1nFZFTOhq5gCBPDk7EALEfuJEY2VMm4lzllg8k47pARpoOZy/r3P/3SfkXfe+LKsS8OsUIpAZK2WwiReV7sGQZznOafOOucQzbXs8L5RubN5XXECMafxOD2LPF/WFmHVFCb3RV1JYHHQCWfz483Lu4tJGWLXbsfv7r+7urO8ubVy8t5JPAiXd5YNgMCRrfX1OhWMcYeagIfACQCqjZW+ahxyUMhawyZiXeA4Nopp++QjG3ePzg/nzahsGIW7uytlWlkET88ng5gGPrUIl2WDsNOixqYCVklIm7IhmKytdWmCjMkBkqOLsyc+/vw0n0hjde2yyi2KsCoAQDBKhjffejfAycc++eS//3//m0yFd96+vbXWKxZVvxcn1oaPrSNtZNXEgxbCSgowSxuJ2dojjxWzY9pqz+4f28bpSVHM73XaXVVJCwmSMp8skqDfaq1gRNNmJuegWNSxDw0lWtg6ta2kg+woQHKxmNTpvNfiQezNZ1ma1Q9UELY8B60r8sOjEYPQGddUTcI5oUBrY0UZeAhaPEywlQ75jKwuTSXQvuf34812+857hxBB5kmfwzax5cWJzEQUxo0tPvTkTqMdsoiG/K4+ESKYF1WrFXlOz8psmjcEI59SDQCD1kE8mhQBokhxUdQU+wh7QovSIKZZC7UpUUo3IfKhNsLBzsom9tzO04/fuPuOYDStMlAIhFwcUugRY+0rr927mJXUi8uZGI0z7HvUo1aol9757tMvPHp1KwSqQEAjDDDABnBoHXDCuUaj+vxe/dJLjTG+BdY4ABGEDlqjHMDQQWs0RMA5iCDCAACALAYImyC0EM6Bde1BuHn9Wn2vGp1Mr26HvnG8ntisU+UUVN5ZetYeesahwICiFFl1vrn0lFLVjbNqZbBSLqrFJCWEQguqstFCBQz6Ps6Li51rL6Q1iq9stJ56ob4/aw/70axomjrwqGmUx2nUCxGGclGpwgCLhbUAaIYpdE5JiTGBGEmrAcDEI1pb4oAUBjpjtLIYBp5vjJONUlLVTUYYe5DN8ZkHAJSygQAijAlGECKCMAfIUoAQrGRjU3Ux++qd26xpSoqddkBr7YxragmcE0Z7jHNK/cA33FoIrJKz0Xg6ObNGKaUY8611FrgGWGMswRg6iCB0FmhrIEIAYi8ImrqSQgAECKXOQgQRAAhCBBGihAVBILXUSkIClGqUkg4i44y1BiMCIQDAQQSM1Rgi66wzD1jZAADQ60WzRYkdirxQyubRJ548PB5t7Owc3r21vb2BhCwmJ9tbCSyXAOWs3d3cvEr8RDp3Y/94Z7mHUefsePShjz5fW7RX2keeun5x88bJ22+6WhGf1LmVjQw9tnXlEkXlRXpv6/JW3dQGBgKYD33kh7/x4m/7PmkNOqGHj+6fxYU1UNa1wIBNMXj8+edxx1PVrEjHp3dff/bjP76o83Ja5eOUUs1bASBBtLpbjvKsKKLe4NLzD0ckPnjrXqB0Nr+78fRHeO9KnlWoieLxnXl2LjREcR+aJGwtIXb+zrt3tRi1uTePorzwfCBP94+AdBTryek9rFJpRIQ8p2G5WMTdQZbmkSo/+qErS/2VX/nNF4vbb3x8rfOnn3/ql/7T3ZtjESSBaNyiaIilGIL3XToEoMUQu0ZIqG2UkM9+5g9/+3c/PzQOEOK0lwQEg4QzggjmGDZN/qhf/uxzS7sfjMcLtnenWuRaEwABcdhqnVnoU5IA6jvMNYgXTcCShCJMGNRaI1X0VzrZfJEuFo2wxrQxMYja++fVrWl9ntWKk/Oqfph57Ra3yCFAnWgwRM46bYE1ykJEEYcAQQQBsMAC4CxGzFrodAM14A6CZhLpDiQ0nV/4CDDVVrK0RjijoIHEsxYgZXVdTX2aQAyausKcSGFEkRonCUQiV1UpwzDRznZj+shWhzpHVJYrPctledqkheGMowiUaVM1jnOGrItCzyEolAw9srt+uR8ODs8uZrNJ3RQAWfvAHnUAOGOsgw7CB3JlEGPonDPOYFAl3lCUVTuAqaFxvwUO7v//rCItbTbPXp99l3DW6y0BB7KsdsZSTpzTR8f7reF61F2SdY6Upl6AOI4Tjpyr0grirrOqKvIwwu2YBsBL0zOP4TiAmKLaaA0scJhQwCmjDiMErl3Z6MXo9Gz21s3jcZqutDrzSR75bHuzPez6K92IhK1hdyloezff3cvPxJuv3v++Fx6b1LNwc+Olb33z/sa6kzkgpaomBzcdEMHW5ScP9m5RjmTWDIJes1g0RpyOz9Y3t6mzmATAlseH58PhBoFWlblSqpLCSdRtd1UNAYUtHqazmrWC4fpqnk2bSa2te+zh3fH+aHo6th3SW+05Hq501g4P3xOmwbJwuoxDPwmGVZ0S5jEanZ2Mmrppt+KAtRyzq5t4fHGj26bn0+qPvnErV8X3PLPT7keikBtXdmmLX5wdIyWUM34wyHwEwmUkrEbGjxx2oKwq5GOlG2tIaewityHt5gunBe10h1F352x0n2FfawAdMzVwwB7ev9lMFwFDUS8yCFnCF7rqhoN1f2WSl+p8TzTzPB0DjjxOilx5fqIJPT88MEJI4BGTjCeTSgVXHnl2fHofY3gxr4E1/aXB8Z39wPeJR9742ks+0oUqgzYLkmA+yaKII1BBh3wPGaMn06nQEJqyzIs0rbQNgEDa2tn5SAOOfa/bHTZ2anBJODdaccC8OJYOa8hTqdJcNJoMVrtK38izg+XNxDPRxcXCQRglPFsUlFlodbEQUtKysBfjnKd6Z6c96O0YWZyeHI3PbzurYVNPjhstQVZNnnvuyXIGSOgPg74VmTIwL0FRuF53cHzn7fZ2/CcFNJROF2Ho2/eBeVhq4BACCAiKELIaARaEno082TCCIEDSKOBcyycUoygJrHYWoflstrI2BA5HvlcprYwr0spBjDGLoyiOQueAM4ZTrLSz1FNK+gFhjDhttDaU+QA4BpADyA89CEnAWRj4wIKyqo2zGDsv9BFwiDqMgZLWD5lzCCPsINBKUYJ63S4wthDyYrRAEHNGDMZeyD1KpZI+pYWSnsfzPEcYWwcRItY2nocRdJRghjGw2vcYY4Ra4qwNPK6tpcwTZYExXl1ZrhtVKw0IpYxUjc2KOgqjWsjzURmHvoPUGW21SvxQAwskQBpyQoyxoe9LpeuiiQJOKSKE+AlDEGCGe702ZYwTFELCONbGzvPSNDLyg6LOwzDgjFJOgDPY8QfhIEyZEI0XhMwPAo9M56Mo4XWjAcBSy1IbBj2LMYbOKA0cgBgNB/3RZCKk9Di1ztVKp1WFjVkaDIb9ZLHIPYaxz/jOumnqw737DmDi8c2VgVJVlk+m6dnOyrDSgnqhMxBBGPg+cMoAubW6tsgbVZuQBxBCwrlsXFU37SQ22kpieOg3dSOsHZ1PYOABCDiASssaiEoXMYkCQgPuOQydNoxQR7DUVjV5oxWh1CNMK4MxJNxrhZ3x/EIp2221KiEyIcOI15WBFgQeExbkYm4Q8hk3xhRN7lFOCKLUj8JAOCG0ZFpJ4aCtoAVeyP/kYsSUsGWuT8+qy1e3Gyx9v2UBKUdz0+TQNa1hwJlT0s3LRdTqLC9v7GysVvJtXxuvE11pdy4psCin+6/N1nY3bh3MFoUGmFHmyVKP6qZ97TKt5cr24Cv/8cWPPv7s737p01u7oVQgiNae+8iHv/p7v5EV0mJysj9Jut0iG/E+F7CIvOjey2888sKzYTep6/HDzz7pf/YPywrKWnJOPEb7CdJp3l8LWcwbUY5G1aOP7ebSer3W+qNrSNW+xx6N/aNbMzUr3vjK3WI9mCyRg+ndpSe7Vy9fbi8NQOBTlvHQHOzfxlz4sJocl0KlexPxlc/v331n8q/+/SsbWxfi4s6HP/zcL/0/f/3pH/9TUcgvPXflN3/915tp9eyPfuILv3fn1muHz33oqfvvvnf02knfpn/0md/YfLTNt5LffOWz3/eX/3Jw8trvf/ZzuUMtj0YY+cRKbbXBVvtXn3i2LKeTs9Hl7fWxGpVAK8zvXNQXeb3T8ruXV25Nj4rT07idXF7vBQyfHB0DJBZVkUn/9hjdPhiFvbbxllauffjw9r3pMexd2iUrQ+O1wphVo5OLgzund/YLC3qb1x578smnnn/29N7t3/vFvzXdf/eZx3Y46tuLs/WeF0N6xgpvKSqF5YzcvXlQFLrfY37gLe0Mtj/45L/5jS9lVb7WZhCYdkx7CcecNs5oq0wiZyofz2sKcaMkoRhhCAmiyBFkKlP98M/82CI9Bi2CaUrBGWz+dUXuxuCJ7P5ZWbTSb9Bf+K+/ncfXRVIXWWWgiiM+GA5gLVf6y7kN9s7OzxanAEkaESudtY4HvNOPJ5P61o3D1fW+F6BiliMPQAeyeYUJoQRr49K0BE5sXl299nAHOIkMqlMttTYOGxJdZPT+eUE6x8ONfn/Qf6ACSslimmJGCUSqcYWuKeFhGIU+GQSdvTt3wiTBxM+aZv3KSlanlrNRPvMJxET7vucQBoRiRCQkmEJOPYEgiNojaWrgp8auJl0Y+rBpCKGE+A4KbUFZ5ZB4AOG6yb0whthDHlOVYoxvLj1hLLIOeFFbKWvKClLstPMTjDXVzoqyCZOW9bhUut3vAashIgRBIWroQOwHZTYHiGtKPNrBxBMgoCw8Onuz1+oph8q8pJQ2dWM46PWWRVk5pwFwQehhjJ2w2KOUcKuFbhoMGo9S6VxTVBAgA0gQcFnWwA98vy1k7jGgZVmWuR94VZFDQAmCr77ytcnJ3euXL9e2iT0vSrzZmFoDFLTAQq0FoQRjXIkipHgpbitdQiy8IM4rCR0H1iZxmyIAGU4X06QdnY5PEAK9ztKsrDgjWTbS1gJhTg8XO9eX5rOUQPn8MzvjuvQow9Kcnx6s7gzqOrOCr7S3ga9n0ylEBipEoS9kyTxMYIdiCpHu9QKGpcYunZW5pnUNxrl4b+9sbdi6sr3aCbyD0QwIKR0SilRCVRTe3D8MeCgqjaHp9wJ/d3A2mhzefn1lee3y9pVbN8/+0S/9xzD256WpZPNz/7u/ssGBA+nf+jt/dXjVA/n021/6Tqe3jQat+WJW16OyVtNMUOrVlg9araK8P1vkg17AkVuM5jz2Lm6/hqpc0j0WOByEUeATPqCA+iGXzjfCGX9r9+r2YnyRN+ZiUm74y2Gn1xu2lWU8DjtBR9SZytUiPcxrIypDGBIWNIUsmsUDFRSpYCwmGPqJFfmMMtqKBul8GmFk61kMdYyCRjU+d2trS+NJXpLucOPZ3vYlF5J8eu/RT1w727/bD93s7IjZHOswCfzJdJS0/eVOfDY5WVpeAg4enLjtQWt3pQ8YA8ZmVWEooQTXpZzn9c76MFpaW0wnmQAYQWEAsJUnXBxyRSDCyBpXiwYRp2SpKIz6XSUbr5e8dnBGbUgtwRp4Ee9ttw7unzlBF3N1ljZBv315Z6tp6uQK6W70n35s68Ybe994Z+80k+uNrxtrtJFCIp9AhJwTShuH0GSs/8k/v3fjfqiQg8BShDBERmuHIQQOAEAJ1VYC4CBwAEKrLGMcQrG5FAA1Pz0dDYe7jvQruOj2oMoOgFOddlKwSDrZGLC0semoi4KWM8YqpxsT9VkrxlFhedzud9cffbo/zcYYuvn57GL/wmmrFLg4Le7cON25fDWMYkdQ0GJV5lPunCVVXrUSvxUzDKqdta1zNJtD6SvTGFtKpawjlDGASmEIJQAgUTWEEOCctUAqaazVDkCAlcUBpdoBSGlMW8pIpbW2BjoHAYTQQogcgBASCAGwQMoGYUwwoAQq3SghFo021hDG27FXVY0wCmKMEESYWOe0UqIuCcGexwyAjbUYI4I9zkOMoVLKAaeNsc4ZAyAAjGCpDCMYE9xtt/MiN1oRDAlGwBhrrIOIc2oN1MpYaJWUECGMHtRtEITQOgOBI4g4+GDkDYMHg53I0Qcc8vedIjAaLaIwrCsxy5tBOypmeXl2cZaW/U6Sn08Sgk+O91Z3V9qDENLw+CJd8VeXh8uh74XJcnp8T2u8s/t4EF6WZTW999299NxMx6HVeiksdT7cupQuChYF0/kpIg3lVCI9no3Wr654Lnr7ndcZAbrMK6NA1xusDRkn9+7t7ezuGs1u3noFw4sqK7PFAkKgVHp68BqHgAdRuLQ+uzhHxjs5zqLuEuv0cVijiKdjOi5rGC1Zigerg/N3vyPtG0G3S5hP4y5RFvkEABwOvWwxQyi+/PCzETZnRwdf/uMvWRZvYI/a+vLVZU5NkVUyryyqnX+EZeNFPqCEEQgJykUzWLX/h7/2UQXF3nf2THbyp671y/R4kqWEcxCyzc7S5OYtALB1xikAkXUaOusURLLRxOjA8wywden+8S/9k89/+fO3vvMOj4ip5ADBduL+zqe2u9FhVs5pZ9BdjueCVLVF1mDfRD2fME81NkhiP2orFyrIIU2ssUUhjFLcutPT46qxHLXSXDESAy/feKxdkIN7++dNLRUEt+bip1c2IJrlVUUpcWnqexHAGgAEMccYGQew+1+H96zVCkBqDFDKWueixMNm2pxl2aJRjVje2LQV2Lu9rx0eLLUR0RpIq01TFpRRQJGDmnJqQSOywkLKYs/pB0xRyD0/SWJn64iLQQs/dZVCBN67d/HyyyMaJOeqkFnaZmhlaRMEiaxEQCEE5vRsceXJaz/+0/9l2KC/9wv/faorgqFxQAlllCEEA+AAsA4hYyzUAFLoIATIIQ5n8/L5Kx99+/abTmuCsf0Thun7VhHG0AGgtKyFqJoSGaiNcw5iDLTSuswAPAh4qAzsJK1wdXP7uSf7azuuJG9//Ui5IlnaPz94xdQFpPCf/S//+sd/7hNW+iurvcsfvDwI+Vuv3GoPl7qd+NWX385zJYR5+bvH0DYRgo89urk96LaTeD4rnbYQs3vH8/6g1/LhnTvvbT/y2MF8oefqi1+4gUqzs8taQXX50vCPv/A7H/++j6yt4GoO33rtAPDB+XgmcRQAl8TecCmaFyViQc9449MD5vd9ZkLfG0Relo0R5heTHLLWvdPzh7tL0B9SkKumQJgyVkFnAQDpLE3H46YETVe88vLNxx99HiGTyZozflEs/F4XiBQAUU0VhNzgynA7Hc+YF7UCEnd8o+tue3h4Pgmj/kOPXF/MZv1hc+P20VdePb67N/tzf+YDS4NE1AujBMN6PDrtDZc8j2fFubWb3MdOpul45idBr91WxhGLSlURhocry6aAWKt2O7IGlqUJwn4Q4cVs1B0sAWOK+bwC2mcUUVNLWacLZdC8TKGzPvARYkpa0LhBe9044MdJpeYOIsZ9AoVGJkRhEvWyEroqjJIAYbWU8OM759ChpN/d8owp6lookxsbUGNIU6oFLCGk01kl64L71DlFKV7d6gdRmU3nRZ0jlqQT20gBoa5ziaEzQt4Zv/HMzpa1ppSwKArcEOS3o3YwmeQIuTwVWqIXXnhO1Pct1LZu0ryWtTDSsYD0Yp9wzhDyaGARRhzWqGx3u6ejIx4M/LB3+fHH0I1qOs6jiKt5fvmhtQCvpdPxxvalcdE0srYBcbVf5aUXRHF79VkGBGAPVGCUCYOAYZRnaRz5ccKMc+mDU40gz6OMcMyILBudqyjgBOKmwQiA1U6HMeYA8INAaNNtt7WzFtCzcSaMDXzOeMgD6nseJRRi4pTh1At8z0E7X+SUUcoI9XhZFNoa0QiPU8/zKSZRHFptjFbWOtFIDBFlmBGitFFGcsoBBJgg1VhjtDGO+R5jTjaCEmKgxgoygo0GGKEHeEYAoFVuXqaEobq2DDPrXJrmShuMYBhzp63RFgHbasfOuLpsOOPOubKqgQOMUy/kUipjbC2EdRYgEgVJJSfaOT/yrDW9tse41xDoNCGMIWM9iggmTS0RhO0wpARVdS2NQhwTirXUYeD7YQgcBMr6hDhjQj9opEaYcmaEVoQiBYV0ChoAJaCMNEoorQM/wB7xiM+Mn5blvKoJ8WSlp4uCYIJQOBqde16joRl2enGEpDYYwlzkGmhTOyHMKJsDBKBByBntHPNJEvmTcVOnOaJ0e2v58qX+F7/6qtVyenqwur68tr7CuZfEvO95ZTbH2MRR7FHIvaiqRZrW1lnPY9ZZCrEVgkHsxTHFtJYNsTAkNIhgWjQJ4URD6vk8odM8xdj3OAIYa0hzbShEzgKoIXDUOmc0opBywgBEhGOPkCzPtVoYiyjiWuumVta4LFNKGaAhMgYCNOz2sqbCCDBCSCsxVmsNHUTSaOhAJsr1dkwwNkYRhIx9/2LUStid904ojxYGfeOt/Q89fOnsNBsMBvOLUXo+FUavLMXPPLH2xlvfvXr58q2jaej6+8cF4gzCvGlyYlkSBK6GQdBfWX/09O7Xdi9tnh/s94PgtKj9mKxsb9bzSY7qRx9fC1tEoEXrWq9ZoO++ff/3X6wu7zxSVbK/NLw7TUUmHn/yoaVB+OrnX+n58ealnbe+ffv6I5u8YMPg0uPXnn5j7yDuE+mDN/dnu485S8Tx5Bx32+mJNmFndXftYHHBE6ywZgmfNBWU0r9MEzT81A9e3zudAG63h978brZ8ba1K5838fDjQpxfHZ+Mx31lbjOGX/+AkO79gEl1bH9Ja/aVP/ejf+5//2uZD7VvF6NFnHn7u2U/sfefF97795R/7009+48WDjc7jP/3n/uztuy93VuzStfPRxVklyzlA2AJM/fNbR0f3R8uwMF5YU99HbHR/0VvzfcY6S2vd7k7eoNPpyfM/+5NtePqVt96rESUUAGtQN7EJHmP0K7/35WUIsE+qSjiDVJq+8L3PjN85fmT4RFZWZLc9ruEHPvTDJoLLw0ej5a0AJV7cN05UN9/ef+WbUWvQWn68s75x/UMfGrY6b3zj8y995tdWPHv98k4vCg/u3TNFvrUc9ZfCeZrV0zlKIhfEN6bzw8PRBz+2u78/og0RZxfbK8vDlfW3374FJGgxwjU4vD9f+fBTn/qhH/uH/+ifRoQcj3Laio1F1mlgnVSKUCuNLkt1+emPM/o1gita/mG5/wd+C/iDoQGr5MryEDw5uNb90Pej339t1mhNMSaIAchPzher7d5g4yHQiB94/NJ0PgVGvfat15LEj7f8xXieL6owwFWtMURGmUprKhGwCnJsnSYYe5wXlcQQF1Ux7FkK9fL60vi4WUzNLFOa4pOCPzxYO7+4iNojx4P3a5jO+mEglPYinyLiGrG83PcgOz25Nz3OjTJrfa5sbQnxQmYMYZDNCwl9zKOgyetmUSBCtFNNUYRhCIBOm9zrdqr5vNMfBBbrvOQQImcQdK0ormprbUMgDHhnWp35UYQgqZUSQrGoL+vKqIwyYEUDAx8hrJ0MeeIl/cXkgiAICeI+19YiaYASkHAESNkUhBBHkRYqM3PPCyH3dGMKUbUT7ORFrfBSq22BZV7LKqSahnEfIpguZmEQSGnjVqh0boz2GM/q3EMImIZznC1m7bCHLGiMoQQqKRCJkZVFnkNl6jLlpBN74bRIIdK9pZaq8fTknU6PXNp++uYbdzkjxsAM5dAgRrymnnBOhZTOakdAEnaktkJaZBEBEWHE40bmddNUjhFEvTrPh8OWlrol/aoUd/befuF7PjU/VnWVYYTLuphlpXWtRZafjeery6SrXTleXLl2pZQo6a1gpyD05tNJUTTM86y2Pg+KUhJOKQ9b8eDk4iLgbGWjo60p83IYhZNpEyx1F2V2Mily5+ZKf+8zjw42lvdOzm/fPwuw6wx64/HC9zDw6LWN4fnpqKmqJPJsLV783NevXNm98uHnfu2zf/TOvXEYtbTQZZrNhSgI7K8N//Cb937mp3/olW987rFnPnacj/7m//D3//Hf/KtLycF0fIdxr5JKl9V5URBgsfNDf+i1nNMiGrTOZ5PIY3GnFcahqJHvc1mf56JIkgg0wpTHIBN331msbW8IqB7/0JPjcR22e0VRO0fYgJIOmd9fpHkT9sM4bF0cX4SMha3w4CDfWFp+oILhcr9Om91LK2HHv3/zVNaFVDVH4OGHn/Tw5Rvf/rquhRf6tWpOxwtpdG+p3X5oJ1rfOMirJx/9wbdeep0Fm3l2QlDQ5JNUFkES+FTYKjs7nFLKRwf3alFFHmMGGQWEUsSjxOt4yaASVXct7Cjdow0GVavjAxzkTbN0qTsfnckanJZV0m1DS2Z56oRCngsjXAspcimsQu0wjpLE9zwU3r97wEO4KMrxpISYn53OvSBmEde2DgO4fn2F+v7oZLKynHgUf+1rb4Nmp91eJmnTamNgBYKAQlmL5vzCfPYzt771baes54B21iGMjUWUBRA456xzBoD3vRNtDIaIMoohgByt7baTIXbuChAtmUmKeL8bTw6O37px87nwieXV6O7BDWkynuxe2758OroPYNVuE49hHJxcergfLg+Rtz1Z2LXVh8JKTGeHPsge3qK6zKqLk5Yn94+L9V0SOwLSVCC7Olh19q2w02r3+wjqrY0OxSafTqmVa91ANep8MkUMOUgi7uVZSQgywAEAICPG2kYKizBh2ClIwAOUWlVLFHoeR9DnAQt60oB5OhWNsFozzKQxwBmIKaXUGQMQxBRrrUTTOGsthABACDBQpnESGMAINgAZYyjCCCOrlFC1VFAaDSG2FlGGKMGUUCWlx5lSD0ZWnANIG6mVBhAB6BCC82xulLbWAOiAtgghCwwEDj1AIiGndeOc5J4HAK7rxgEDIMIQamsdgM46AJzDGEAHMXjQRTPaEoIeqODS1UvFNPe64bUPX/vGH30rOwObl3YnB+fzi/Lalc2jo9Ph0kaxaEyDWMjSs6MAY1cVve3d8dnxbHR+550bzzwDXn/p29tXHnt2a2s+enO4RLVmzEcfuH7pxq1zBk0c+kXRtKKwsu7gbPbEcz++tPshXc2P33mxe3nl9PAQOCwb63nePBdr154RMMReZ2dtON2/W1fi9GzaXg1Ozi+Akc899zRIsKVJFyWhx+rDPUNle3uV8VBjj3utsRwZbpXXMciIakpCF4dwejqySmpAp6NzgHwNIqkFByjqtJv8vLOzWe2flOkcYjbsenuHWRT7SacTdfu6yuxUI+JZ4KzTUjU4DgpjnDaM2SAJe1urVpqHnhx22+7Fd86+c1oWwi+V8j2/EAZA5KC1FkAIIYIWaKEsRqiuS6BBkrTyvHr95e9waJ1QwFkhjWrEq985+sTHkwgr5Rb9vhZNcHSI5nPtcuiHibHYaFdWjQMV9z3bTJyRmBJGGQ46927c/9I37nzyUz8bQUCGdL5oiFvM8jljbqXtE6PfHReFwq+/dfHJ718CNG2kiGmMqC+ls8A5AyhBzjkA3p9FhuDB8o3DEHHGKXLcI0BLXVVWOyHQW6+fHi8Ox7V0ceuHV64yeZznI4Cgbuqet66dg8hh4qnKRK1OWTtIuhQDB6uEGqWdFBJD4HHS7rSUJlHMwpjHNPzuXX08nv7Tz/z6TrLmhHr38N7//b/5H7VBFLgOhWo+/5f/7B847awHTGbrurFGIwgBxdY6ay3CCADHGCGEYowgYThkaLD9f/nbf+3f/+IvOGLyvCAEZ2nzn1lFzhklNWHYWY0hhhhg4BDGCBMPIyEtdMJa9PN/6VN3Xnn37sxsLT9MhztPPfX493yMfPY//M43Pv9W7AezPNVa/JN//Y83L10OrJZF8+bL92VeOqfv7ufWCmssIRATEFPw1PO7vE6XOxR67Yu5uD8ph8yLOzE2wCr1ocevvOOM0tnu1VUg8N4be8ezUXdpuXrjdO/21Crvq6/ufR/cmtyfHR5N1na7tqy6gz5taoagNRBaCKyxtqnqE6GRZZgAOlhtnx3f7w53FRBhSD/2vU8cH16c3J8MWshIlenK8zAAwAKCfJcEOLtZ3Ns77Q92sdeCuOoNAqlQzBItMxpRghrieel4BpwCwC4NO9MiT/qdfJ5TRRD2I0ybsoiiltMOR7qpm7PxfDpv/sW/e/EjTy09+cwKmwvDLVZOFLLM63bkneztbV5KsnltFMQUjWaprJXvMyelg8aRBjrvbDz2lpajdsfE0SyT1oIgiI/OTwGAotDYb2MUApFi6re6wSJdJCQQujS4joIeckSTjkVQAHPn1t31tc20LNu9EHhoMW6iVruWot3qQ1REPgl5b16m25sbg25vnC7Wdrp33r6FcPjIcx8KeCCluDjZ0660CDTq1Gt589kkYMCnBBk7H02LMjMGUI+3k2he1Q5ApH1iqbCuF/Q9zz8djTvtDaOcMq4uawH0fJGXBVpMKgi434Jno/thQq0uZCmAchA6a0mjMdAAWDHo9Qn2suzIb1lRni11uqAayQqGq3Gv39JNskjT2cKNbplbt+5NLi7+7A8sbWxu5KknWeh1OivLnbP7b2OKu9srx5PiT5r5IQ0owQhzoqWRSvc77ahlIUdFmTd5pupaZdrD+KHVJQAMhqxphIMwTsKqFNJpC1AjVZZLBSwmACMCrGQs8DhLEh9BZLSjGPt+IJQWSmtjEMJWu6Y2QjV1I6Mg8AlLIh4HUVk1ShoMTOBxgmFum1bUjjyvqEoEUej5lFNtjdWOIlY3NecUIuCAwwQVecYZJQyHYViWNaaUQaSUVKLEkGmlDYKdTjKb5QwTrRVwQGlQ1cooYw1AGNq8CD3OPQYh1MpSSq0FCBMIkbW6zpumNsooA93x6CIOeamEqBqMcNvnqawRhu0wdhDleVlVAiBAA7o6WJnOJpWQAEMKadyKnHNTOSMA11ISRCnGQFtCsedzyogFgHGkjAHWYUx86jtjq6KuAcQUd9rt0cU45B5EDgEXeB6CkAF4dnoaMK6kc8DFvo8g9LgPHZRaGev8MOAxQyE/3TvvdbtbnZU8a27dOUxCP8umPg9nJNXa+D6XtbDAlFK3WgmELvBJFATYoe1LG2+/9V3see2WBwBElKSNLGdjaEDZ+KHHvQhDA6izrVaCIJynmTYaGKM1SOuFlFUYeq3IN9ZhjzSNAE4T6zzGAy9wyjpjnLPQwUar0jatKKYIWGOgs9RjaZ6nyvo+gxQQTCkN6izFzA8hKaoSKBOEHqOoqgVBtM0DrURT1oTgbhJBC61DwEoNeMgCB7QyxqOUM6qVfP9IwD5C3vlFLRmI4/DsohGNmU7HSlqeBJg6Q+3Ne/frprRGb/cH0IuPTkeqaTSQhHCtKljKlWV/Zau9sbKGz1rdliYtm9ZH21fijU1Wz09m41lrM1xZCy/euffsQ5fzg1E2l2sBe/zRDdy0ynxSjCdDB0qLZneOpu8JYOjalc1X37ixsuU998L1s5dPEjh79undN97a6w1ZiarDs/OXv/YlaZU3aPdiUkg3X5jvvnERbvtRKyiqmceQQzKrGhIAZHFW54ppioEQIunj6eimETqbzxbzuTbOyvjT/+T4+L3GFbTv4Ss7ba+UoaFcul/7O//0qUtbKzvR4w9frr/1mV6+6Mhq+u7RY8OtbUmSDTL+7q3vfu5blHU6Hgl7vTppP/tTP/TaH3/D4NuPfuDqu18r5xUaHaaFx9qxLwWonN2+vDEbTRQAHoO//OlfgfNF4MdNpYl0gVbd4fDGe+8srS3VRTMtm+2VqL0xYBAOV9cOF+bJD//A3dfu3r9zW4T4h3/8J7/94ndbS0vhsEtxQFSZ3z64/e5LvlFRHITLOxVav/TIswHWd/7w02//0X/80NOXA8ZGJwtXFLbMFffujctytgg81vc5XRtOFpXxvN6llTcOzn0DY8RANrsUufPDs1CZIKRrS4EqDG8tTyTSx6dZ0/gR29qKZqkOPCINcApYa0Wpgpi0kt43vvgff/ITr2vwIsDjcKuF4587G3l1ecmjBiRdCvOf/Ss/8Pt/9Ze8kAc8bBq8vLKcLmbZ5Oxz/+FXaNsPfAQg4NzzgZmcFkHoNUIZaPJFZbSh1A5W2+gW18ZaUO9sb+VZNhsVAfcwhBQhDtBSy1+kzcVsRinvtX1EeMPR3ZPxpZPxI1c2i3rumpX3y8iV8gIWB9xJ8yDQN5uMqnzeiiIchPOiHk2yVqvn+8HF+QxYiGVGkSeUKQQAkGgliTPWuTAMqqIosswPsMCFR31VNwARzyPYWYKQAqBSoFKukTVnqC6PGA+d1oiwgPnWQs9vcwTKxQwzHrSTLCui0LX7/fl0cXeRXd+4IqpbxipKCcFESQOsFarykE+pD5Hxfb9xSqraQiCFRNZwjhwQ1tZKIS9MsrKGmALrtDFegKnvSyUApR5ntaoJcHVTBUEroi0HUFEXWpHAaxsrpahqVcGwSxjLq5QCGAYRBtRhlE/PKqjDJOy02/sHtzZ3nhy9e2d5a3hRZK1ePJ3MptO0220ZiZSQjW6M0844KTQwRlsBGIPGhn6rk/RKMdFaSaWlNJT6aS6qWjKMgbFONRFDQad7sPcGQnp8Me+2e17o9zjxIkQ1Xkjx3NrK7CI9Oz25sjZ9/PGtmjPumLZQC2mgpSSQRhKEIu5ZLUPqNUWW+C1scZ4WnBHepqvDQeCxw4t0qdu9T+C8EHePMwPvbreD7ZVuNc9HOkVGLPf85X5XG9wY44Vx4NGz43mvHzf14vW33/3Ma28fnS8o96UWfkgtMthHULCLOfr0v/pitad/6IVn/KT/7tmEBOvfuHX6eJLjpl691IU02D9fbG9tzyeniHqNNkU+i2NWLM667eUrO5fu3d0L/LbV4nDv3TjsUJg0C5Xee1NcfL07WF5aenxx8PLK1sNycs5rjKCLPHJxvF83F63WwGdg/YWH3vnuG8ToOMLISdHoXjsu38fWAWs1Y4Qg3O+3/evb85OzptTOAk5Ut9fZubqmyiLLJLDG6abb63V6rGjm3G722j0h6Qee+eDszr23vvjdXoAUhd1hi1AsdFMsFoRGSDrRCOsMJW7YbZ+NFnmpfYdqU80WZWdpKR/nzCf9llfN5uvX13tLS+PxIi1GMVUI+gpx1m6busFaGURRgL3QAI3zNA09z7PCA8H9g/PJtBK1RNjxBFGOR6fjfNYsbayuXxrYbNpL4qubvTRVi7ru9Gm/Re+e5K++dXS0t//IDrl+rb02pIz5eVZ985Xzr34nO7wAjWpDgqC1GDupFICQYIIhNNY5ax1wBGJjNYDIQgggANDmVdrtrQ5iirKogW2lHPHYwf7o7sECB0u1SA5vjBKaXNpZpUydj16/e3R8fesqDYKz7LzN1jDCWJJeFMQdO4zBU9evxP0rp+fTw/vjTr+3OLn1zc999qgRX/7ii48/9kjc7QRtj3rrPxH8RKohDmMNFre/+e2AoEGvo0iZpkUceVtLu7eOjmWl2kGw3hoA7WoAjsapMBpAQyA0CCtpCEAaGOsAJkhLbbSyGC/SeWAl5Tz0mFYKQoIgJgAaax102lhnHcIIQGysdhBoAIw1HmMEIefcg0oYlK6RGiNgrFHWOmupg+1WSysJEGx1+nk6F0IpZYAzDjhrnHPwwYeoMRoCB4CTwjgHIIJGG4yRtQ5AgChC9gFJBkltrNOEMgicEBWEmHnEGKeUhAhBiBxwD7bOjLEQOoQQBAAiQDnFf2IVdVoRVC69mOxXM6Z1i7mt9UExWexe2r5352aRNWtbnY2VpdPDmTB2d3eVEVxmp9/4o1e7rYg6Omh3rTZXdoYqn9x949vEa9b625WhRZbt3TpJfNbaaDPP78ddYGzWuI/+1A9Fwco3v/btzY5SxdjrdT3qz0bzpfU1LVS6KBd1/sxHXrjx8ivjm+8+8uy1+4fj4doK9sT65pqTdjZa8AYYYWSlJYHFIgVKzbCFkLJuH69zv20dRQRpzmjYT/Iyn86y3WeeufHqDQLSZBAfHk4IpdhVZT7Kp4fY2uHO1k98/AfLHB/s37z2xJUbb9/EeLh/Zk5uv/7E7vpahwcA7F67PJkfEIeIn0CfQ1ubxdTlohV5xaJazE6eetx78snrv/2Nky9+Z15NTj3CZ2XDGIHOWKMQxgAC52BjLFSGc444qpXAFDGgnbF51vicaowNDV45KPHX1EeF7W87Fua6X+ZNMAeQhbHSgtJEKg2kFLBygFVFJgvOgsAQf33rytnFOfSX7t69f2kp7LUGpUuTpQ4huBmnDRPELnoMChTcO2lMiWIEbcystVJp43hRCYa1NVWctLRx1hiCibUOAPxgcRBSjoF1RgItKXW95RCdaW2pp6BT6ng8/vJLL37i0S0pq7jToxZBAJ2Sa6vrZ0djYAmLWSVrW2epqP2kbYzChFI/gM4g6IR0lPhNo+Kk9am/+Oinmv5/+88+9+6X3prG+3duf+dbb77ZYyiIgl4SwEoLDhcYNQ5XRYUJgMBQRowxwDqEEITOAQud4xg57ZI4+cGf+om79/YXaf7l3/5HFEwIJcZnyhgjzH9mFSltoXEYI0KJ1VorQyihxBdSQWcxhkpZZfJ333rj6qW1P/df//izf+bH3zqZ1Aac358dX4w6/S0GcilTUbmv/dFXKLULaghlF+OKYQiwkcYS6pY6ZG2rc2WzPVwKkddstIeTs/pocfH8x5++VSjaYMzZlYcv7d258cqr7+ysrk7TBpaylOlf+PkXXv/KS+02TrqtK5dWi7paaJIX2ha4h5Nr/TaShlgBrabYKxeN1lA1dbfdiRMeR9vj/aM6l3v3jz3/ujPRxjZ5641Xrj/36LDF9vbutx7b6Q2iYpxHhGIjpRLM02MhDo7mH7z8QURaRjfjo7Mh3vC8xCBjIUeGQ8AI0laL4fJlitDoPMeOMrrhh+dNOTua3Wp1Vlv+EvQS6p+AdPHBDz56786+0Hr/4PQ7r5/fujtb6rM/9cnnPT8sL/Ik6o2K2d7+3rCztbre2rs7ZTgWOlKykkaXWe1RaNTMD0IGsagN5UqSKvC4qIpGGWqoj7yFqSO/2x1s5vMDSkJCKSK0rKey1NlkFCxRQsOsLnEYcrz+yPru+vWV2/v3yrxAULY7y/miBMYuqgqHJhdVTD2KWrUEzoWJh3pxdBr47XBNG4RAQMLu+vXBcj8+3rt/deeRRaFO8EUr8DyKfQITDwY81cA4BBezOWGLYX+Z2AS4IK0gCtuIlqtouyxrgk3A2TxHIlOzRTWtyGTePPnkU5e341fv3H5oZzOmcebqRjQAYT/0s7ROVWWwT7oDZuAWuyTNKJ0djU1Dh12pOMocD+PhpWuvffZb77w5t771XBLFG9vrzyI2KufTykir63010khYBbuxT/H7ETvqM8a9JsvDMJBARAwTjMuqAoQH1lUaYL9tjSYMYYKlkUpq7nkAA0wxi3jTgFle5k3NwsAPvKooOQkjv5P4kdbaOGiUhRBaB4yzADilZOD7cRwUaUEIbhrZ6S0D55AzAaWNEBgCz2fAGgfcg861MU4qhSACxjlnLEQQAqO0QdpBYwxAzlrnHDSYOERhPS+ddj5lyJqqyv3Qi8PoYrQwyhoDHa6chWUtCEEQAmeBUY5QYrHTxtWVodA1TeURJpW20EKIFlmNELZWQwiCgBEcnqYLaUwjoNbIYQshVBrIWjPCGqUCPwAQUUAQQhWw43xOkEHYOgsRdFY1EIIoDBgktZEIuU7sGyGNc1ILSpkWikLIKC/qGhmnnEAIQAKhsVC76WzEGLbWIIAcNEkUFmVd1pXfjiPGRF0p0bS7HYRwnksIASSUQkSo5wBB0FIKI0Z7/QFdxk3eVFJgDE/G483loXXO93kN3enRbFHkTdNoqbJ5fXaRYYzun546YSPPDXptykmWK62MLIXvB5jHaZnVDnAKKaC6aDCylVDOAO5z7aTBoKidLBrOCUbIygYj3Ot3D0dTTim1QBsLjItaidVaKgOxJg4aKxjHdVMuisIapx2psfWotdZm43MGMQ+Cuihjn/UGrUVRri8PhdJnZzOLnJLaGOt7jECsVSOB7Idd6VzgB4t0hiGtywYjhND7ex9Rh37kk4//0W+/HhN0fjSKrV0edoPYbwpACDTSLAcJAm7tyhOoXDCkSrmYzRaex4OQ9PvdPJuWWVWl8PjGSKv7w9bluze/W87TzScGl5/ZKo/Lpf4mDX3Qrq++sP3l+18T84Zhf7CRvPed/eO9s9UV+sgHti/u7U/2zmUju8stx8nNG/cWN9/TRKhZdXJj1tm8/K2XXlp/ajeO2QrD2qMlB+nxePvKSpbn4jBdDoP8IH9j+s7uY8Pgo3FvPY767enFNCIAcTMfZ8UMaYQBMGYMkrDlrMRWtLs6Gfh33hqfvW3MW5pfGESbLiH5/fmobCrhur14MlVf2Lv5V//K9/7hr/3uoMt3r103Hnn1tcPnn3+ku736hT/6/drc+9H/4rFf++XvPP3h54IweeONvbe+8kVY5BsbTOij7We87WuDk8Nycn/W7bQA1dn4fDQeZZNpo8KTNK/Tpp0kdW6QQ6Bpfu4nP/LRn/ovPv0v/tnrb90cJIFo6ojS2US0kDk3aqqHm1s7qnyr1Yoir/XqN260PC+9qJY7u/58enHybrp/1KLs4Re+v2wNM00/8OiT1NNf/l/+J77It5faZVZbZtL5eGfr8bWteLh1/eUvfNannEC1sda1m6urz/SOX7/53ktvr18fPrS9McrE73/1TU08Cw3CFgHgNAYN6O1uP/aD3zduJn/xf/+n/8W//p2L00lZsLgXYkxLaThmjkCGoASmfPe1t7PfevbndwFYqbwEuGuMbgRRx6OpBfewuDg/fptXowrSLKcItvf3jsMubnUD4EOLUUhpU5V1nlsNjdKMkrWVtYOjs06PadvMDo/MfE4NxNQt9YP/29/4mS/88WeFGGgcfeHVm3EYJAlvnNFM1Ej121FCqKciP4zv76eHdw6XY9hptRh8/w8NUzSf534Y9KK2RHWTl5BAH8echaVresOgG7Sm02mwzoNOYg2smnR9dW2xmIhaKqUij0AMkQOEO1DDqB0jqOuyVoiGAYdWYgoD3zPG1KVcVLkRjjFPKmFUiQAz2sGAGylaoV8U5wQACyGEBAEXJyyIPQyx0c2/+PV/93/9r/5m6FEAtWyktQ5AyH1fGu0wdkBba62BkOCqKFkQccaAUUaoujRGVwjhLM05jyENhNWd/qqU4zSdQaOkzMI4cg5ggALOfQKEkc5qyj0gnXWiaeYx68rairKANPCTllHKQFcWcwZM0orHkyM5bxAAKys7KEjWrrT9MAigEnU+mabtTkIoqeusVLUfJFYpzijGGBhUZ2XECPM9pWRRTjB1tRSYOI8CiCTiZJHJOkeyqqzTg6VkPpt1OZQOr672lTQ89qhGZVN/8Wvv7ly59sxzTx3v3c+LizCiCCEHqcyLumq8yI+572NkoBvPJkncs8A5ULd7/VLTmHnT83G3EwKEDt+5FyKva2SnFz2+fenN7+7dGdX3Dy6O983T15d7/UgbADC2qrEaPPzwdRa37t05GK512Hh+9bEV0bzVYF3dTf0gKKsKAeCgUbIpufd//h/+p5kHf+vTv//ti3uv/fMv/eq/+9Vf+/Xflrr4w2++8shf+P7LK7ColVBybdihwERe6Cg0ZV3VaPPhp3VZz8bVIR0HcT8vZtTBOI651/KwaSZ32ugkhSo9ma72ZTEetdqXvRZErlhcTBqGR1X+fR/6qdsvv15X47wOPe8qqJvF6DSIUS2a5bUtyt73ilrtEMO4EMoqEQbDaD289fp7W1eut5NBVWatYe/4vSkPOEcoTvi8mtccPPY9z0wz2Io7Dpj84k4+eXdtNaYq04q4KsMUI1kOAmIIkLJBSAulCeRpNRFGF7XSgBRasFbn7P9L1Z9+bX7dZ53onvdvvuf7maeaB5Wk0mhZtmwrdpw4dpzJgQQIJMw0vaBhNQu6VwPnsLo5zeFAh4ZA0tAdCBASkmAnHuJ4UjxI1ixVqVRz1VPP/Nzz/Zv33C+krD78A/vdtfbe1/dzfa9RwRFzx3kOQLfXINbIumjHvJ+0MmzzVFQaWAskYBRbbbVPo6aPxvOxTxHBvJrVk4PdeVbXCswqxQIoKgsB4HG8urUYJiEl+amH12RaX31rO5tN1zeTM2dObq1Er749KCfqynZ667pJvjv3mKuk1RqWFSlFaJEDUBsrjLEOQIywBdBYQzCxxgIIgHMIQuccYRhBhCEMOA7CKGr4R3f3nN7QzCeMdNrt2fbBcJr5njetiw8++8j9LG16dRKIwuITruNqU5rDYn448xY5hieXLg3TvaWF863YzQ+/zWx7hbRbPb63fYVk48/9yA+8+PK79Wg6PRjFYassRVWaXtJaaiS7x/Pb9ybDo5o6ZZMC1MJBUGk5q9JeEhVEGmMBlBiAOGBnNjseBpXQ40zNazWZV9ZYKS1ECGMEfe4IUhBqi+a1IKJCEBGMLIJ1LR2E0EECCUIQQmy0k0BACAhlGDsDAEbIaAMdsMiWSjjnMIHOOmsdhghiAizM0ww5CBE6zo98LwgCX+ha1FIbCQGimEIEjNEOGOccghAhaLR5j0ixznoet84iCCDGzgIAIcaIES60hhYiTIQQfsAIYMABpSVCGDjg3kOVIICEeAHPilxWxqeM8T9qQNsexAwlfmSs84guCjsaDAMPvXb1jfXNrUrPJlldmQm0any4s3Xq3COXLty/PwMA9rvezs17raXk7t5OY30hai4ubq45UFsYK4QXFjeUzhc3lzCExLWzIi/y6fD+sLew6a3p1c6st9AO8cZkfMA9r7e2wBiutWh32jev3buGv7iy1Ia5NykPG0vx9vVxi3CRGwhckUkamKCFZ6MjGgYr683hcDCdKFDU4Xz34N1X2u2OrGpH8HatVleWTa10qfdvv50XeQSyyXRS5WkQhTFzXsBns3pc2vGtO54mHNCNrnz63NpW3/MWH06FNys/NT0YvvuHv73iy167J1TNA8pYRJJA1ibpNGqhm6te3FW3rt8FCC718A89uoiySkH/9QdiWgrrsLMQI+qAA8YY6ACAjBGlFaNUS2Ostc4abYAxFiCJ0EjDbw/pu2P33avlJ57oPHmZ9U4gvmDuwnRcVZMpbrDNMGo4RqABSlaYII/x8XiKcD4yqJ4V87FYuxyIw7GqEKuq4rhkPOgtNWMfFfNJkNeVBq/vuv/w5Z0f+/BC0jWIOgeFQ9pn2mqCITQ1AAg4bSzE1jhjLASMIOsQJwAaNZfSWkSsVVaVlDHPuQ5Bc6tkVuzuHWEkkqQR+n6tFLZwNiqldFbUcSNBriqzgTZFsxmigCkF/bhJKBblHBLeanUkyH0KcSP8zf/0O2Y+/f3f/PWNXn+xCU6EAW9zCNiHP/psJ6bX797/3qvv1hoOyhxqhRG0FlgLgLUAvf+e8DwMIQSUP/Psc4uNzZfvfi2E0CCACOOMF4VwxgWB919ZRYQAxKh1VgqDIIAAKe2sqwFARinGGXDQSnv3zoPPfvpH3rwy+Ob2l5oXWvfeHX//v3z3wplHVLp+8M41A1itFNWmv9wUuizzmkO53sFe4j7xw5c9Bnodyn00OxxLWA/HZW5VXlaVsMP94w8+fPHay3fffvPuD3/y0XbcJKwhPL8RBQvYu3df39neEZheubrbX5wtL3Zu3Jb7R/nbtewYsba2IKGztfELhYxNVdlbXKOWpkWplJnm2iHFAp9YNJGmGXZ3jnYyFS5vnitzUVZFw+Oj47HjGGIURkzJ2c27b21tLueHxeba5cnQ5GI/TGiUNMvCASM6zbAonKigA56zDUJ7k0IiWbd4i7ggnxqHIfA8ghH0KKDI8xjoNdoLga0FRTCdzUNGD3aOR1NxONc7/+71h04vnDgVXn703KW+eO2FlzKJ5FHu0+ZkV9+6vtfsRo0u1cqRwO8udPcPdxcW+pyFlhHnAEe4Vup4cLi6vFRlSjqUpjMccEgxIng6n3gBZazjeaGlSfYeVwrcmbPLfXzxl/9ffxcF+rFnPw0Jwgh5Aectp1Q5yg7XllazaR1Q3xE/bvTbDbyze2vn/p6ROFhoJVETmgLLqjLV3my7nM94s91I+mtnHiEsykrrsrHMM8oOCSwRrTAmke3UwkqlHSB1rZytkgQGJN4bzy2CCBkt3FTUElIadXM7ffTS8nTvjZMnFru9FY46GxfP37/zapXuGZmGsXVS4bCtzZSAgDBuVQQBzcv5vB41Oi2lCsJDSOPlrRO3HsjmwvLB3mBjtTGqjqi1fjNChUN1nefDxtrKyubTL/3hr+fF+1QRxA4j3Egae7sPmo0YU+SA8zxqrKER63aWi0JMp5mEshEHHETQOmdBWYt5JbVDs0pBhikMAKLaAB7EVSYjwiGCYeBppSDDDLOiqp0DGAHu+Zx7AIFOv+UAAGnOGNXatBpB7AeHw6nHMCJYK1PXUigNLciLnLKEcU8JbZxWSlZ16VEGAGSMyrrWxtSl4D6zzk5maV0pBAhEzjhHGEUYV1ohhhFFlEAMkIYGUwwJ9HxWVwpBRxkNWDxNZ9rpeVZSDAx1tVKAIIapgaCsci/wKSFaakxQp+GXAte1wBw2G0E2rxAinaSRFkUlFcKi2YiU0BBBkc+B0ojgVqMp6hpaA6BzzkWezzEzRcoIAdYijCjCtdaVVFIpCizElGJIGKu1Spoh9YO6KI6PjjvdBqc4yxXCCNOAEOrMXEppEbaiZhizIKhrpS0AwCFMGt1GwBlQ1kDge2zxkRZy0FgojVxZ69y68aBUur/Um+alc262dzyd5rtHYyUUC7DUAGNojMQGHh6mW8sLLKaYoziJ0nHuMbbSah1NZ9pK32MQ2nbSkHlNrDHOeT5HCO8PDruNpNlItIPQgVwo7Ozm0sJgNBpPU8S4xnhSC9/jzprUKA8S7QxApKjLqiwhMhY4URvMeCprp20Xd5B12jota2kd9zxhzWFWWUjfun8glaKIIEx44GFKtNGFqC+eWJvMx1YjJ02R50ZqRJHncefcH+XPwPFxGcem3/OZz9KqoCFiDK70ehOcbt8/+Ikf+bk3vnNz/czZXr91760vHo/3aTcIYoaRswjmVRmFgSyhTwOPhaKA2+NJd+F0s9GfZ8d7d9xq50nmNaE+braHN1598+79g8ee/YGXvvpqWdjF9YVaqW9/7YXzZ5dRBQ8PR4UzMLYC2lpJXMOnP/D093/t66+O3vjpn/94XW9fuz149GNPB2b3W9/8vvLA2nMXd+YHH/nw0273ynhUWMSwJVdefOfSpY/s7QzuvTDwvLx7qusv2VIrTG1vuTu4ehvxFmxEraZ399pNYa3nb/zh13Z339HVDHEiWw36yMUtzqMvfOPNaYWOhfKN7fXZV69ce/bZJwmDKm5ceP4iPHnknfhkdeqJh3/+lP9uTOrDT37yk8N0evfd22Kc33g9vfTwqdu796FCJy900sImC+2c4qNp7um02YelGi6uLLXi02/99u9lVMXALLajvATP/OCjyUr/zp39Tz//E9/6rb/15LNxeKrRWYjPrPavv3k3Ove0rE99/Q+vm+Gg3fOCZtDePHfz3r0f+tTH77/9jcPrhzeP7/ypn/6L97fTq2Lh3kH7I8+eU/X0rf/wj3h2vHHhvCulMgI63FlsZnVqbDkc32g08SzDGxeXGfFfO7JPPHrpCb54/42bMQt3B/L+vMwxUwwRQjwgs1zOxzLh8VM/9IOu0wymunvuzCc++sQnfjB47rM/8XOf+fPSAuyQtdpZrSkjgTfNJ4/+qb8+A4CAjbTksGjHQebTHZ1+HYdaJY+uPP8Tv/zFn/6DL37p//w/vuaFnVRaidhjH3piOtuuCnv76j1EQKfXDqLO7oNdA0yZKVrAYnC4shENR6lEZmnR105Qqv/xL/9zYQVAAHns3OMRgbguKstcdzFcOXPmlbfTUhAfAavKuNMiMXKef+fu7afWH3p/S0spFLC9TpOhYGf37triSlUVLIgK7TJNiDHIVEm/K50FBlZFHQTReD4SpbAGe5S1WvHh4V7g+1VRaueUrjmGcbMxzStogTFykpYyCq2znJAkibJcG6scYMYYbTBnEQROlYXStRPKcRY3Os4YaxVhWApVzsftIPj//M2/4qpCVgJTxDnXWuv3H9ukVgohhDETdQkwbid9q2xajEKfKlOFnm8JkwZhiKADKpcQMV1LbVUSx9k0swAQzoWStayxc9qYui4cMoTEGJtKFB4NAcW1nTkFPMSAVcBKp6DHqKiUcSaMIj/yq6xmQXQ0GPTXt6aTUlq2f7S/3O6ODneEkA7iyO8iEhs9L2uBHOA8uLd7dWvzRGgtYb4FDiNiIIhb8Xw6k2VhMGp1Ql1JUZrltYV0Nr1ze/upR58qZGURXFpeODw4JKpGiNU6oLybjcadCG6c7kIc3dvPk/VGoE3AeKMZztMqL+e1UAhyixjyCCQsTlYPH9zyG8YaJStMKV1e38rTqrkQl3VttTq1vhi007uH1WgqX7l9DAA8sdj0AF7bWhgc5Lfv3H740vkTqwvO2KjRHhyUq731Ir3NkcvKOvCJtdAZgDme5eLcicdefuu1ozuzU4+fe/3eW4989oeAaxgS2u7yv/r92//Dj29ZN3BWC2sAzIUqkHVSmrXTF+cV6vVOtpa8gwfvbpzZNCarp3MhMUaFyR+kBy+vdlq1XHvq+f9ulBcrZ58grFlXeTbcPnXh1IP7x/2V89dv37NA8iis/YXlEz8yvfdWgpNmotdOL7/6vZeBeZ+nMLrWwHQW1zXEYhJgo0+hlnJmMBw3QlfM6rIkAqJWu0u9kJjKJ5GYzxPgXDq9e/Xbo/s3E0s4BM4I6pDMzFyXBBmBNcFeXuWZcpSHhaioI9xnfcQFpPvjdLi9M5tWj1w4vbgSnTrRALpqhr4wIOAEOdVe7V1/9yBP68AjDiEegWo+I1DP0koZ3ew2d48m55bW8nQ/6ibZcGp9vbyxNhmOocHNhebKyYV5UW6cuji88e7x0V6ns7i8GEdhfffqWxRJwIA0xjGv0Fbm3EEglcEMAwA01Bg55IzRDmOMIFJSAwgkgEobihHH1JgaQEc5ZYxKKRBwlagXtnyBTZXT+Wx85tKF+f7+0ejed954tYCoqIyd5Vup4K2OqAbH89nqpaemWjfQ5Gh8rxHFBAXtuDvX1Qc+8uyr33v17t3DRx5/OsCLDFtNDx7+wPI47Q4OZpc/+tT6WvPw7r5ydTbXRaW7rRZALgDSr4p+I7DOIiTbC52qLHJRW8scjQwWWVlaUS90uHMq9AOGUVlmnXYcWYsxLiuJCbbAOYRzZYWoI+Y57JdKUQeAMRRBRDBGWFkbcMY5l0ZBY4zWzgGECIIEAIucgdYYpYQ2BgNjDEYYWGO0dhBQjKFxylrCfY+woiiMMamoixJjggklBFIHHHAOGEsccAgD4DDCFljrHOMUIwyAQwgZ46w2ECPgrJQGYWQBRABqrSACYeA5Z6SSSmvn7HsGEwLIWIMgQMalVRUtLT68ee7BO2+I+v2xweLZh3rrC/n+3vjB4ZMfOj+ZzRlC1uVeEO3M89m8ijS0w9Hjl9Z11apFdfX2rU5nBR+7Mk8nw7FByPe90c6OiMXi6ZXZ8GhaFNoRI3XgcaG0TFU5yliE+2vtRuRNb1wpdjBvBvORq1OYjo1Uc4hQFTiVa0JsnES6zOuKIs/LptW0qIR1k8N5ns/OfezROGSz/LiCbj7dRTOClzpRL1zfOn94/aaFFhFOAarLOup1g3jRb6/cfOV7p0+fMqX1aMiMGE2yTqOTl1WhdF3rojKAMB7QRLrZwVii+t/+0r9YWu6euDyY64StnVm6sHTyxM+7yWg23G33N8p0Mp6nLR9w7ItcvvT67YcePePHvLOywrErqkoV8tRSVMH6w8+tf/UPd776drU7BxR5FoL3nFYAAQAQQiiF8rgHkalrBYFD2BmjKwM1cpqHx8bdKr0vfse1Xxx87BL79LP88jP9K7dng7QyQJQ684NOnuVOSgAJxpR6cWshqiZVu7+wBUNnS4tVDtLCVNnYBg0GCMe+31+KZ0UpKzZS6IVbZdCsfuSDjdBp5NcQOerDw90SGRo1Ah4jyKADDgKMCQEAYyeMKbBz1tZKwtp4daFl6WkCm4nX7ISNlSibTKbSrK30HeG1Q8wLHSSZVmk1o9jb3RsmnZ6viHVYFTlClIUdY5wl0ItjQ3uCtaZVinAjn4DrB9oZtpDET11a/MAjS9/61ovLJ3sXHn2oUg4AeYbF/eXHb71zNNg9yMqKYggR1BoqAQGAoU9AUZ9bX7n0zKMLJ86uxwt//+/9w9VeAhE5Ghc88fKyitrRfJ6L+o/SBu9TRVITAo2x+L0YHgQYQAicsxoipIwBgJy9fEJMi//8m99ZXHnkwbGiV8cXLy78+GeeXthYHuysf63enu2mAVIL60mz7b/82k630/7Lf+0Hzq8IgMtGJx4PjxlVVVG028jrxP31pk5z3gkWcLh99bjVWEClOr26dXicPvX0xaO9XFdKAB0EpOXRnTvjhX7PVvR4kD+0lbSC7GSXtmwVcba00RMYO8eSIJKylkZIpxyoG40gF7rV7WpZQQqUUo8+fPLuzXkSekBXjLMoBsD5tN/P8rJB49KAfCZYCFmzhVrBV37l+z/09A9KPg878XiYOs+rj3eY04vJRY81STsoK5GEq3FIhtM91iDdOIlFNCpq6jcFQv3IPzw4SIucIK2E4HGAPLCyubLseut5Vlw49e1Xb5bAlEVN+qsFqN945bVeu722uWog2Bsdrm9s+c5LX3210eOlFEDDfO4qOfN8zzrgGAnaUQV4lU+MybuLbRqGClXUs7KY2UpyyrAXhvFK4C1DSBWkcU+n0526nth8PD24/9u/9q/7na3DfP7mlas/9ad/uhjvWOD8BhsejRnReZ5B5UZpHvcfXVt5YnDwTcadRYw1GoioqpqtL+PhwSGE9GBwRKDU81Qc3kxS1lzcKESr11lttDbL3J+N3q2UItTrekFWaS/hg4EhiIYebTf54MEe8eNKSqEVDwKksanB4f6g0Q9OX9y6f/MqtITUmHUaB7Op84N+cyOdP5gPJ4vNpVGtOZKuxqKugSuW2o0sNWY+ockopJ3BcQqa3tJaf2lz77AoTlxYfvLJTZHOos5SZ30rm0+K8cgqeXf7+ju3r9ssdeD96cHO9rZMS4RxEIbKDygnWZlhhyGC1CMAGgB0mHiAMK1BXmUQorqWeV5VUjmAFNJREBhtIZJI4yCOukut4eCA4zAMYxiG02kOgPEoJh4JfZ5nZVWWDgALAaaYIial5JQQjI9GE6UMdE7IjCBorHUWUMw96pwyQhdaKuMgJgRDrLTTtRRCYgS1UJzyIldFVRFKAMKYECnqKAqBcwhDLSUGUEgR8EgKiQAgGDkIkSMcaWABMdrILGRIQkQCJqRyABBKLABCyFopAFAldVkqYIGSGmEEFLQaKOfSzFiL50UZhNz3vUooq40lRtm6GSde2JlOUooxBjb0GXS2KoWzABmjgfAIgcZJKQGAEgILoLI1wSTivtYGUoogJpRaZabZeJ6VogTziUTYWgC456yx82qGIYmjFoZ4OB5pCxABWrow8r22jzzGGacAEwJ5yMqyqkq1c3isa+lRUlbl6np3KlTohTFHo0lalaqoZasdlWnJAmYIhgT3ltplXq0ubzUCn0JcCzWeplLWSRQ4CgEGThSQMo9gVymg8WQ+jcIIYJqZcnFh0RVVPk6pglLUgUcJIHmaGW2dtMK5rNSI0cIpAgHSgiOgTJ0EbatQaiTBMIwCSJw2AAd2bamdz7NKWASJc7ASsoRQ19JqhyCGwFlrAdFAGWQMZURpp405GM1agVdphR0wEHDOKMEUA4RQ/UcPo/SgntUTCHSFURh5FKHDvSFE4YkLj55+8k9snH1mX986/eTW8bvf0YEBBVYaAmwsBLXW41HVCjkhDLJms7+6M02T1sLy1sLgYA+w5NLjn9JVAriMQwPh/nQ8O57V+/NJScjusRnNhDb36lkecHvx7NJ6p+MttO5evXP//uHa6U1kya3v3myG/aN5uXTyiZNPPJjLwy/+1vcuPdo/ffnysCrvb5e1iYm+eHj/m0pDiBwnIDL28trz17/1/T/45u0LJxuvvDJav0Ta67rXleGaXFxac1VL1L03XnhlNMezoX/vpRocryQs115dlfhwYn/3pXvjonQSIUQDgI9zOVEmS+8/89EPFXZ+7/pbn/vUz+1vDzZOnJgcDCQBtHku8jc/98lHvv39rxx98YunH0q0BXg1FrOot7648oEPEH//4Sc/MjXmX/2Tf/rwqUjPJvujQ2HB/b3q1FNrrk8aLivuDDlg2tKctu5cv3uyt/X8c5fPrbgyn3ClJ5NZ//TK+g88v9V4+v43f3f3xRem43Ga1tPCctq88p2vV/MHtNv97/+Pl77yn78pyPj00xc+tNCBx+/+7v/3727F/pmzp+7tTX0PLK/23rk2+IHP/bGdd962k+uj7ZtWWqNAVVvYTJYuPLl54sm97S+snOl7bXbvML97PBRpzTthnhZdCriyRwdT3ca1q5lR+bz41f/4hU/+8EfWNh7z3ebjT11+6cUrzVaj1DbPdIg4s+Tw6jtSPAl4R1XdFon91tX03q+m2d3928DrX9z62F8FdqvfD372zz31K7/0+3NRMj++dPbSr//mV/shJw6ksxowNE+HEA05R0qq0Xhy4WTjJz7zkZe+8f1JnNwZGi0VxHUYM+sci0PEZFnJShXG6SDE2Lmyyib797Yaa7v35gKpqajnZaksOh6Hnbh6cP/ueyrwCBHjtBgc8WZj4/QJwiKifFEJHwOfU4q9ulBCi6DVczmsi1G33Z+Mx1EQewGfDUe1YEmzrQpDWViqTNsaOafLOkm6GGFV1SEP8jRjHBOIoYVSCy1r7kW+F4laAwJNXXnMIVQ7I6WB1GEMiNIlptgZA7DWUGqNaMCY9aCxmBBgoEXAQqSq1DpHiRf4gbJz4jD36TwvOHAUQAcJgA4TqqoCI6yV5ARP0xlOFqWwhNEwaA7TSdigBCOLMNTaGmuEYQwTFiGsQ1s7oNPyMG4kdYUwxFU+97mn8ilnYSdupmnuoK+1N80mWiMWRbPdO36z3z159s7BDQh8WSpIIXQUWxtwYDHgxCcIGmPWTm40Gi0oak6Rtno2TDWgx5WkiIs616rkkWeUC4NwPC6ane6HP/kjg93DIGrV0laljnnYaDa+/vKVrNAb/Y6txZ2dB92lpiVsZamZrPUavWD3zoFWFiPbbCZFJutKOVExn0EDh/sPuKhkBXw/QIgipw3Q3dUFZNT4YFyqHFQuKPRnP/T4S1du3twfVgaOM6HKtAZC1rLlqvHR3hMf+WA+c6Isq3S+sLbU3+qB+OoXv3NVQ986bK3FmFa1/cVf+d9//uf/9D/+3//Zz//UZx8+15hNa+bRelilB4O90fT4o2dOrZ0DYHY8OPYCprXeeXDz3ImHYx6mhYya9d797wMnxSyq0jmjXqPhyfG+KyZAyyxLzzz7Y8dVqYHBLJmO59gabPX0aI/7naS/6fKdlDk/Qi/9/n+89MFyobsSNmIrq72dg/7ySj57nyqqy4wYJz1eQ085QyhtnFzP52OvPoh8mc4ECzgmEaCtSU6CKMZ6evOrv6pkRqwDwnQwAXJmgauqOuCM8yAtcumc52GAoU+hAn7QWIAWCiOttLWWwmbQwVbU2Dp99qnHznZoVo+PgXFK08k8LaDphGIoM6MVo2A6GTiMeovB+X7vzs7EWJDVMK8koCjTotXvABaNZbbYXz4azWLGts6tdlr+wdHMCXv/xTeYykOCVhaDXh+LepeF0IBqXmiDEIEEAFdWNQQAAkgINsBBDGuhIcEIOQSRtRZjDDFyCGulnQPKGASscZYAAq1mnBgNjLVbF1YuPHHhzf/wFk/Jvtkr0qMHu0fEUSl1q91tttpVYbDvZKYXGgujewc2E4VXxlGo66rTjqbpIaKzg22YsHnUTwKfVfbo3uh+v7ssgobUFd30UalNGMGI2nrsReHJ01vXXr1mHQihw1V6eWPr7v5eEDU6SVKBMceZ78d7x9ksrzyKg5Atn946HE5u3xsrqbWy3CsRwVCbRuy1O9E8LSFmXUz67Xg+GmW1bRFvlmbOYoSBcw46xCEFECAEIuqlWeqAURIYYAwznDJKsbUOIAAoMtY6hCslMQAQIQCcdpZTwhwEEFZWdNq9ST6UxmljtIEGWgSAc0AZ6RzABCulnQMQQgsAohhjBIDDCEGIrHMUI+sAgAhjABECzhFKMEaIEOCAUbUGWlmNMdZaQIgcsAi9XwWllXr68gfObJ442n27LN6vCWcQmKJwGE3nNd1J9w8OV9dWITAf+viTlY7efPGqK8c0CP1279LS5oPdmQPw9s3dKOyKdAxx6AcwaXCRFsQNxsdCzgop4OqJs8rp46PD6iBb6Z2wfKhcgagFUBLPOIiskQkhk+GAAB8COZuNu62F+eAg8XAjIkkzMcpQjwTQn81n7ZiXtRRSr5951NlZtT+cTUbr5y60F1dfevllv5LT8vpC3MCEnLt83k4fTKrpwpmzzYXVOzt7Dz3/bL47mg+m65cu1Edl5C+IqgK1qiEZjcpcyrjNZV5Py7y5iJUNs1HuF2C8sysh0VAdObjUjGE2e/2dF5545NGYBp6VCQxmg+yltw6uvnUIK/LEs2edAlE70AFe8KLDkSCEQpt95unOM4/G//ILt24eVmlNMCUAAmuNUgohghAQugb2vWY6Y4GzRgPjDIGOIcCpMFA5XBnvV9+CX7g6/eB3s8UO6SVt3o2jGJZ1DQ2ECNeKV7KKAjwbT0cPRh4LApfpWdVfaUMOtRPI90utq7Iixi0stmfjwjhdk2Amwy9fq/YG+5/7gc31VZKO9hWy47E5HKrNVbwcNCnB0GhnLaYeAsyZ0jqrjbSqhCo0Ak8mGmpY2nno+R73Oo60V5Zq4pNWPJVl04v8xqIBSuus21+tShTEi07m1g0wNKKaszARReb5LQdjEgQ83qwNYKzHwuTzv/bPEs79BWhMPR7de+vdQf9kp73IajEzjM9GNZDV6lq/t7jKWiv/5je/YotSC2O1ZhT5AfUYfv65p4rJrB8Hm50IkVygSafdFSBwRamBchhX0ngYJgG59/9vFQHrnLHOaEAocA4j7JyxxmBKoXOUekKInXfvBCwcDa8fzvXS1mf+zM/95X/w9//eBz+81h7d/f3/8lXJyJM/8PjB9lu7V7dXuqdOnjp9b2f+wnfuPPoTp4aj7SAgncWG1TKdldiDGIB+25NQkZBlNbhdFJWcnVhZ/e7btxugNX15Z6PTXuVePcvL0Tg/HLSWeu2T69OD+em4cXB3e3A87S8tR8M64WC6e7xwYbOowGRSBgELA1YX+Syf9jsrplIQoXIyK4Wm1O3c3i/HkFBkMDkczhqWUk7CEFOFjkbjZr+r1TQ/mMPSTa6XZzoP7x/cSRZCPclNVR/Nh37CmRccHg2iJuBNGnGqyylzFkMrFRS6SMIkTQ1VRAOcTSftoANDxrkJecQjXpVVnSkvigTC4WL4+AfaL7/+yqycc0c3egyYcrHNCmqj9YVgGGxfv316bW1zwVtcYo01v5jQ6cjO0rIVolbIymriJAOi9CFUENW1qsWsKqZRs+F1PCXENJv2/dXADz3eNKJUZYaE7DKIGuFIj4EyP/kLP6mzap5W3/zam7P929gBi9Asn2GH+50VAx2A0vHW6bNPzB7cSId3wthUGp08eTGdTPN0cACkTtNpPl/Z3MzH9/NiopQTBdMZDHGVHcy017NWBh5eSE4NZgdCWVkJB0XYCBfXF492rw8O7GRWF8IQTrSxEIO8rIAOZJZdOLnFzQFHOSEg6YUkimbD3KegzEYeIwWFhVDKKgSwQxUiiBDPQoWpp5wbDfZU8N7yxEW/yp8+0Xjj3tjMJ6SAHms6qEdHh8V8XM6mgLl8miuDlNaR/74KOq224knUCOPYF1XtoGm3WlLoNM0gxhhbqyUlgXJSa5FmM59HQkltTRgHRgNZV0IY6GDEvSiKwtivi6rfbXNOnFXAmJXlhlMWYS9Nq1JI5RyjWNYSAEAoQ9Axn1tjsqy0DkilHAC+R7VSANooDkUpCWGVKI3RSlvP86WUzhhjTZrNW0mMOOOc+Sw+HO3Wsg5wRAlBEHYajazKQz9AEAIIqcc6jaQWVRgFjLMsLwnCWgiEUBAQrTVGACHsoAPAEoqrSmnngNNaaQeIMYYgyClxxhGM6kp6nBFKhTXOOgthM+AAAgWhR3Hkew6Bpp9UlayV4owSDIF1eS0QsM46RiijBEJAAITEMs4cgEoJyr1COGQdgs6ntNbqvbqBMq8hBr1WkrLKOVMrBYALAgqdgxAy5uWFwEg1WjEhmPnYAaBqTSgJIs9IBzTU1mACwoASBJTTnV67mySzydwLLBjlg8FYhhwS2Gk2ykKurjZj36+U2H9wOM0EEq7vhevtBABc5BXFHBkX+ZEydpLOjVIEsspIQoOirJ1BURgTBHJRpKJW2ngGOiuiMMEBCyKep6V1ptNsmulES10bY4HyA17MK4CBZtj3vVTkeZpSyjCCxgCjIbSAYeAyyR0JYq+upbIAEwQpMXUtpXQAYYS1UYx62AKEEKOkqmoESVkqD2CjQZWWPOKU0qIqOIGMMOD+qBq2HfW666+98vo8lYRAZFy7Hd69fe/n//x/e30Xv37l+1FM+z3wpRe+2GlALwyHo8xxorRVFFRFTSFKGJtn4/H8eFCqeByko8NTZ1Yfefi0qd28qBuRderwxpuvOySWz7FwSS5vRbduTjEgBsrmCtl4pHHysROvvn47N0dnnlxeutCmne5sL480eZDfPzoa/e3/8c+effxCwNHS5uqkkmVtF4O2FpI2FvaGJew14HDuhdBr60cfXv/y934nQ+GJ82eWmBntDG59e9jfEK2LzZ3xg05zEVEfW5TuZIR119mpgIvly813370m55WVeF7bojJKEeBchKx1MEmY59O8Ur/3u9/5mT/z2WdWTxzuFS9987WFzfni4hLtsbsv7mb58fpJE8L82Q8+vH31tY3zi7JXfGJri5rO/iuvbnZPxbT+X//1fxSuOE5FYNKtc51lEN59I281uDajCMr+42uuxChg0FYXTvZwcfCzn3vs4ObLwYm+MgiwZtRf0C69fvUrB6/9xumN7vBoyMIEJ6ESIGz4Sxc+FMUL//af/kpxNP/jP/qDKwvme7/+N97++vc+9PwncdSGTrd7IPToTPAzH/u57YGrM+Ao95PI+KBNbX9hXcRbJ849gvJy9423HCCHB0MgaRwgrFFRFBhAB6ARstHrRJ538/UbJy6e7iV+J25D7bcWmhrs/+Sf+PCN6+9WlQLGAWtNaXtNvtk8oiIXwSrmiNC5Mi+yZen5H5fxZufE35ocg2I0aC6F/9sv/kMax5hCpYt79945f3rpaOfQOOAnRGqjtCxSwT3oU4qc3k/3zj/yxHTUXz35lHKn/8W//vfbo2snH1nAAApQbp5Ndg/z0gLio7VFRg08PsraEeC4hBnAhBrEC+nVxfT69Xee/9BZoNL3rSKGp1r5hAGozj907sqb95BWqio1dEEU+J4vqjoMYoq8w+GBF/AsnTpgS1lg4sIo0NI4jCyU87RAPuuEvarICOHWKKUlJrzfXRsPDzWQo2xsEaLMoxgYXStoIMV5MUFWBz4cj48PjoaPPfv8ZDojyHk+h8BYbSjFSjjCOVAAIKSs0aImEGurdV1gQqmDAGFAiBWIR9EsmxjrGPcJY5DSskgZgQwDRnhWFZCqZq9nlY2TRMmqVnXocV3WhPgeZ0JLpWrOOEbGOFWVeQtjaSRGLGANUdXAAQoRw8wiaUx1PJgA7Flh5rM8SFqskywuL119/Y2911//zE+udBx9/Ttf7fVbSbPlNK7zuqogpYhSLqtyOjwiHgnidq0mtTTbh9uh7y/1zsyzEiOonWottIWW49FxI2jMp+LhJ5+YzQaLa0sYA0CjKp2kuZnnejDIFlvRxUsbbrpHKCGIfOub3/jjf+LPDPKSooB5HeQKSkjAicgFI5YRGEe8LCpGLPNp7Uxrsa8LrfNMaRAa6GoVREGz32BJsKIWdwbH88m07bNhWte1hBgeDnKkrYRAzu8mrYVWc8ULKQhjqTGw4eUT50YH2RuDUWkBZNjzPQTNGy9+Y+/mW5/5zF/43I9+cmf7OzmQs9mUe8RB11ls/JuvvfgP/vZfwdP7thpCD4RN/3TrXOTFYdMJkR7sj5nPeGMJhSuJ16vTiZG1yg4psSvnTjf4yvGc4xgSjxdp7vuUEghsoouqTPeJ8uMAzrND1Fn45M98BmTN8fEDrxlT5o/2RlGzCcn7d0FdF0jqqlIwCHmjiz1/Pp0SV0Indu8O8toSjkIfD2fHWZYzBrrTgAMWMBJxroDTKvciTwKDODfKaFdbTiHC0CPIIwGKFpon392fwHjdDzydz2bz3W4Mz68vXHrs8bmSosrFZB4ASqKYkDAKFapzJ6GuQSOONaiMAQurbWKKrJ4h6gLqG8by0vCADiYpk2guJgCDdD5nGsUBI1bm4zGTgup6pU9PXuiTlsOuQlDDwGchHP76lAAGLHTYQgQIgwgB6KBxxkFAKAYQaOUAgsoYZx3GGAKIAMAEQ2ABAlpZiJxSmlOOrIsifOZEshbXD15+0G+fittJsnDyS1+5crQ30ThYObFy9sKJU5vrg8FxHPHexkMEkEoYIY+tFtwLMAkOjnc5C4xyh9uHVlEaJYosK6hphHPtF0cCGeETMxjMjt66k88nBJdxr3lrWigt8tJeu3u/yNK71/e9RpRbl6YywqhWOKszzmAzoYnv9Tut3aP02u29bCwRhD5jzhrLXBAGEAMAEMfEp7Dhkctb3X0k7x5OC2iUH1WVEfk8CBmAWGntIKhgbZXEBPqY1FIBAOuqws5CQK1FhDKlJDAaOYAhJBhhhIxxGGIIAHCmLivMaVpOMcLIGkQggBAhWFeC4Pc65Zw2zhinrUXGYQQJhMZY6IABFmJsnXXOMMKse88AcgBAZwFBCEIkZF1XlQUAQ+SMQQgBAN7fKAUABCDi+N1X/vDNF74ipOKYvqeC7Svv9rrxvCgns3Rta2tt6wyliITt0f6kGI1JOv3Q8w995zsvax7UzlvbvMj9aI/fe+ull2IigdEc+ImXLK9tXnvrRYprKGi7sWC0KpT2m51GEmMAEFRWyMGDga4kprSopBikdbN2sk5abUQXjFalzBaXIl3NuuutweE0wq04CnrLQSWcFBUKyMmnnxhLacYj1nDrK8t7t8dB8+SFk4/l812v0+Ihnx3uX3nhK4utJA6j8a2ro1tvtE+d01ntEZEOjga3xkmQ9Fbah4MdpArg/KTZgLWA1qm6rNJqPrERx5v9JajzEILVpdiFoqVAdf9I1mq1s6HGogxQVhwf7Dz49neuocbGhz/1Y2B4453Xbkym6UNPnNjc7Ku6OHPuRGotrnYiTj0++YUfDO89cJ9/dbI9JQoTiCAAzlgNATLAAe2ccwhhgIBzGhOECAQYIKgdgcQZhDF1eCqDL191yioWzp8f2r/wqZNAiFbU8/z24bCQdTXNC0RhXZvh4QGkHIYB8LwizT2IN3onrj7IY4L8iKWZDKLQz6ZtYiM/yGV8+0D+uz/Y/ZnnF7vI5OmYOv/CiXWIPGuclhYjiCl1zjoIlMGVMs5AbCnBlCIcB+E4nTRbIUOwECVyLIpjgzylXRwnwNFSWgMo433EAmeNYw1MsJrvOqcwZ0GnCekiileDpDs/uF/P762d+4AYzg9vvpXdO0LYiVQgisoJOJJq7XzHOTabVM4nlARhEADpjLIPX37qx3Xwla9+cTaZc+EwRQiCfovFPLVu6Lnj55559q/9rX/U2ej/9b/398ui/L9+7Tfv7N2Z7Q8KpdqyZiH+r6gihBHBmHAkhAbGGgCAA0YbxhmExCqNEWj3H9q8cPLerStF+eDtN/71v/lne7yevP7CeLTMqqnoLZ1YDNcPimEU183WSuYyfGPy9ku37z/U/cGf+eAbr78KMt9nuN/pQixNZeo648BQ7aBAR6k7ygc/9qmPCJkWlF+7u3/nnf2N/mcY0nEgL106+c7BaHNxYffm3duDwnf+I599/tLWyfDWO3/w+d956NJmvjtgYYt5xAwnBZQ1ss2FBQ4c9tjOwQECYGmhM5/PZ5MxBx1khcfDqQq1qHxqp8cHwIGl5ZUoptNj21zrViOxf32ajnTgH6yduZgPpp4PZsY2u32ZpaPjCcBR2GYBxwfTIQXCGRrF8f7hUR1LQpfTaW6h82mgLbTYMOlqrQ0BAIR+j0fLK0g7COGJxxo4LH7nP3/t5ZdfC8laLdJ4KVYGaaOJUZ6BPqnWTvnCZsZ14oWuJBpHnNg5IiDh1CewTstcCc9jAW3pitfCj3l7OrudlqXWdbOSnAriaYQNQGUr1un4cHA0AATVlUa0OpwOugutH/3Jy9Y86PcuHU8KQjwLWKPVPDg8JMqWOhyPx7PRjensqOtF1sBCFLWqAu6gkbLOOfW0XRyMbss0M9ghGOp63OpSa5SoDWdAyAr7Pco68zKPGoucqrSYMVI2AnfrxrbTfes8TCQ0ZjiYWKnLwmWp/cQHPzKbvVWKzEPEaY8RijHSpYSCxa3Tk+ErQOvIwJCwvCw5x8JgTEJLpBCiFTaVQkLT4/0iM279/MVbg9fXF7oJIVhpmQkfIzCt81ExUulci6WFdcdTP3oft242myay1KfWOmsdNFgpWFRFGAcIEMpYq9PKM5FmEiLUarQRIpU0YRgrLbM884KgmTRaSQSMctapuvYZqevaGKC0xQQQKoy2s9kMIK8o68j37HvnEoQoRdpQBBDjsi4RREkcIWghhMZphmiaZRyzbD4nnGKILIQIQmslAMg67HsNjLgSEnF0NDkyYIHwAAEAAElEQVQAyPR6bQsgQUjUtWIw9PyY+8YahhB0LmTEQ0xbzbHDHqGISgIZJ9BBhSCGwEIQeL6UqqwVpwRo6XuRUFJpaLVzwJRFGSchxQhAApzlHCSYFaWoCkUoQ5zkpbIOKqkRQRpZBxxESGo7y4RDUkgZUNSIA84IwVgKCSEkCBOIAICUMiUVMVQpWYPa9wJKUCUr3wsxYBYCIZXHMfd9YZw1wBhtnE1abaV12wsAdIAwYwGlSBsndeWAU0IhBwPfswAiBGZpUQtxenMxSZIyLYPAH+0fzYtSCtta6FKoqXPdbqMXxRTgOG6aCjU6BjsrtT6cVQkLs1R2me+MpoHfbvij4+PIT7KyCgPGKbdGMgS6STCaDxphwJg3HE/9gDebEWeQE6K1aEWsVqbUFUewG4ZVms3KGkOADITGZEUOW23iDAO+LLXGIM/rMAzWeolTBkMsgQyoX1mUW1FrW9WVrlXgecRnWgjKI59ApDHShjiw1mvV0upaaaWNUJwypRSlzOM+I1BWtQPvz9Cmzv3Ij/3069dvxdoyAAghtagWVpZeffvWkQB/9mc+97u/9etvfv/B6unVtVVvewftHo8Wk7iutMIEeIBgKhESVm2de2TZbw+2rwAkrt16owy2n/rwpxuQzUbXBsNXFtfbO/f3Ov0Qo9HOvXshWzXK+oF86kOLSwvswdGdE4+1W2d7R2/vxhTVdL54obl/6/CJn/3oyau70rVOfeTRyfHBmS0v7sP5/ijbGejMnn/m8bfefTPoGoQ96+TTz5xLp6OXH+z9uf/5b//h3/m1aliPJ2k3YT//U3/6n/69f3Ti0VUjHpx6LPr4zz4p9m/f3yn/8PsvPPvJJ889ug784wsX1/7dv3wp4om2ygLjELTWFbkmDCurfIheeuvuT3k0CKM3XnhrOVj64n/8R0889vja5cvTnWtRUP/y/+/bYewtra4//cMfmQ23R4cHz336R1584drWQ+dPdc9kRS7Gg3ObrdnBYD61nYTcr0q/00wnY893nZOLcbuf7si0tNw3URONtod4OvOI32o0+qvLV67szPZ36oI46f2xP/czb//+5xt+DFgoa3c8HXzur/6NbSG//aVXT61c/NG/9MxX/v5f/v1//upir3PpoQ+GrVMCIWUmz/zI4+98762FpbX1J59ipfrCt/7PTpNvnDqdlkZW6HgCLnzsOdRaYOIo6cL7t8anz53eubvXtEhmgEMIENZahYGfNGJKo9ZaN1j1wq2Vn01+hm7bX/ob/+AHf+rJjeUETHPGW0CruBl1u7357i0tckorDhyg1ul35ewId5+vwaeTE5t1Hej6+OTpthgefOm3vqswLealrK2a1Y0WS4uy0+WNVnh4/8jDJOj77YXo3at7Fy+uP/Pk5hd+48Wkyz7/pS8t9S7/43/yN1+69o07d14j3J08t/HmO3cMMNyDyKm6BixptpYXtTDEzZdWo9mkbiXNyUxmkC+vrd2+cRfr9z/Js6xY2uonYSSVPD7cBxZZgCBjzdjP86qCtVYqn81xjTlB2XReh8iLPIJJVVdFMV/oLjMeKoN8xCy0BGOETalnAYyxRcaBvDryWwzgQGMklcVGUgaRA1rUFiHOfVFUfuAfHdgH2+mTTwHGsDGOc25ELWqjhYqSEGGqlKZ+gLWqy8IhRACBCCHklJUIY1nnWtt5XcSNVjGfA2il0QACHkTIGM5cpUwYtaE2yFYsjFRdc86kycPId0YqCZQpoYMYs0pUrp7GYZNAX1VzY4CUEqMcQKuM5ZhpUyIrclmHXuT7zSwX3MFmb+VwsDOdTDDki+2lO29/g2hw9vTWcDwRuWy3lqp86JGIcJbmUwQZckHMkul4xCAEDm2tns/y+Wh64PuNuBljzpSRYeglbbO0uaTuzO9f22ZOyEp2+63SzAB0gLg0S5tt/tFHL2k5hgRFSWiE21pf4Sjv0sUm7/Uvbu08+J4oLHBaW+uchRaJ0si6DFpBHATDeZ5PlClrZBVGZDacx16Y5nU6n2Ndn1rZ2MvL1Y3O1kZ/e3t05daRH/DBpAo9trbUFHn52ve+f/rs6cefuNhd7ZSpBBgkUfjzp9bQf/7yS+/eM5bqygBrCdDHO+Nvfe2/LCysnzx5uczfrhzIs4pjq6wbp+Yv/d1f/Du/8Gcff/SkrnZv3b4WdmJVGlM7HsZJu1OLWlXGZHMHaow1JaCxtnz84E7gxzv7RffcEgvZeJpbYRFGBEBrBILGC6L+8sZo/y6BsU+aEITzQs0npWOxQSZI4iyd8Ch8/1/gnJ8k44NjWE5lOgiavdjzZuNhns+LmbaWeT7yKIuo5o12e2mBgLrTaMr5mFHnKAQKenHiI4INmKdFoSs/JtYxa21dW+6z48F9Ny26QfP0Q+cms9FKr03VHAE4Gh0ZByg03eWOD1ExLyAuQ1pP8nI8ltp5GEGZyUan2/Z8URmeeAAIVeQxglmep1p1ml2nClRp4JP1XjNiNPBJIwLdhaizdAKp3KOGMWFQwbG1UAlX7u9nL12ZA9B3rhalARBQRrVzBGIHgYaIYeyUMtbC94rUIVRKMwYpJhgiZSR0jmCMiAPOOQgdUPP8+MTi+jMPPTTer4XXHuf11770+WqiwqjjCD/5gceTZufBONvfmz6zeZ56ixWo4oUIJL46nObz2gEAoWR+U9VOKulq1uy2iEMc+9RPCNF+HDKJb9x4/Z3vvYVccHA4avXDBUSDhguTQIus1yBxo2HqXFqhMpNrcCRkHNIkogzAbi/iHt+7u33/eIYNjhntNMPY5/1up3d64XD/sBknOzsHiCeNkEElPA996GNPPCblG9fvZ5Idzd3x3j6wJbIWVBpCCIFzAECMoDUQOAsholQ5ZIQG1gKErdbIWmsdo8w5ABwiGDJCAbDOai+IlbFSSUZwN47nWW6tU0oRTCCC1igAoZQaQ/xe7g1TBBGAEFpjEMLOOm0MhMA4ByAwxlJEAIQQAFHXDgjtrHUIQOAAAA5ChIADECJrLAQAGcBIUBVKCQsQfm9zAQCAxXz1zHr17narz+fZdPXUiVdf/N6zP/Dh7Xt37USJubz+7gOIvLe/90Yj7sdd+dATT3utifX0wnK71US3rt8X1VTMi6S55Cdr6WBojbt9892lrc3F1VWdTUbjo+OD+dZyv5KiLuuFhYYq57bW6cHYyjxmjia4txlf258gRIkN8gyJonSC2RjVRnAvkZWuSyDm+pzXcBoklO/d3ps+KFB1e2l1pd9aE0GSlwV0STGb1UiCrk76TaS8WcnBZF7lIz/gvU4/nRaqypOA5pLE8dL+aKeXJFTTQHXuje5LQ2If+wgjx/PjcuPk+t29nWR5sdXzh0d1TMjgcLZzd+Cw7m0sn734wVRaQUsY+GoGD4fCv+uV4/SRR1dmw8n9/d2PXFybjqazedpqkM99shUg9CsvjPdqSwm1DlllIHEAQus0gAghgCAEgBAMAQDQAgOcQwhgUtcSQQOAQwARTDGwg9EgCS8bXCoLdTl2qoZSGS3biz2oy1qjqB0j6tJZqStJsLs72gasRak/mgwJMifPno7Dw9rI6XBOW92Dtve9B4e/9NWdHznJHjvdy4p5v0NypTmLNIQWGkoYsFBXpQUAI6QNAhZ5FFVKKVs7ylCw6GRJeR34UbfZ1YqJYhw0FAAWSwkso+GKdbi7vK7raXV0FAaEkMS4xLBVG5/C7TOlwwrXtRjc37+hbDgrBkmDagcwapSAz5QKcLCatJaW/MlkSuJmOp47h6VB89mw1YNPnGrefJOOoR/TsLsQQ2vhLIuKYnOzj4rj7/7Hf/fMVjdm6Ul6/V/+X//2JOSf/ezDExCf++DHWZ5PDm/+wB//n/4fq8hapKwlwFKKAMUQQAAd455WyhmFMRcWPfnD/93rL/3G0Z2jJGJh6IbHr8Nue2Fp62g+u/T8c354/rWbV88898mjg2vf+cY3NzY7qxutdIZ+49u3Ln76MsEMIxfFzPfajUb77s07RTUNsPMhixvND/7QM//ql954dJyfO3fi9as3H728dfXKzgvf+fal0wuPXThzd/u4SaPbL725yOnC4ub3b8yXgtWyQM6GOyMB7k4YQ0rN1ze35pMjGmHl2UjwuSgVCU6dv7x/sJ/OCkjlxpnu6NAO7g0S4t+f5OfP+P2V4MGtwziJhKqK/ZonFHTQ7ZePJzum0zmBON/dnbUYkkpgxpS2PicBDQHG6WhaMRRGIQKEEEw4ZlHiMC6ySaMVV8rvdbo3t68GoedFcTEf5WPTXtjweu3UOYLn5WS4tz2Vk2PnkFLu5q3Bya3+nZvHDb/n+HxttTc6nL17//DUqRUEDPF9SqnXcN3+0uTB7eE0SxrMB5DGiRZFFILhZIZJL1lZ9UJuUgIpSsIIEW4gn8wmBOQIZKMi3d67tn13+9z5Z7jfwpjGSds4hhK+ff/w9vCVDz/3ydH2g0KVRZF6YcCSxsXupTrfzvRBtNBmPuca21LXRcEpRZDFjURTXlajTmcxpVFuQbSypkUpSDApMspQl0ZBp783HHeSMAn7osYO2SBydTky2iTt1mCAHaO9tWQ+GtK5ZAZNh9ViMw6D9PjoiIUJBXR/MKZpXolKpWOO4/t37ltFILSOuGlZERJAjvNMQRRg5qQejbKxD/nS5uakEkdH5bW7Y95cLRG7dzhYb8Rhg3SX24PD2/vprKY1T3wJZV1myGPvJ/MdhAhXueQe98LYQVjVFfMigEEhpHVekc21dF4UaO2gs4Ti2IB0mlPG1peWrINe6PucaeUAhFpKxqkjWBpdVRJpI7XFkDgDEXa+70GMaqEYQYxRIWtjLHTIqhpByBhWSllr/CBGCBilgMEWIYsggMhY46CbZdPATzjjx+NREIRpVfmMJp6fpRVFlHpIFMoowAhGAGAMtZXWWAggQKgsSwMsY8Qawzgt8wpApJ1FFmprOffKqlJaO0fKUiKCEYBKCwCcA+C9cU0YhxAjRBAFxBhrlUMGIOM6rYY2RhrDEKqVJJjUyklRFUICQozQGEPOMGe+R7BHqalrL/C9MAAAjtNMaoMgRsgKJaW0lFBEiLQGQhcGIYDIAeyMBQAiiIwBTgNnDMYYOGCttEZQL6CUAUKqSqtKSqU5gXHgC6UQwgoY7bTOlFI6y7IINfcOht1+2/dCNx0lJFxa6SuLKICj0SSvMk57nh9YSk+fP1HkhapFVpWDedFOfK9RtZpRwFFpKoRAL+rI2jQ6bUIggCZoeKpS8yL1WEgwRg6yToMyaAGACJSVJJ4nlByluVAWQOIgDgnlhFKCsiqH0Pk80NoBgLQzBjrGmai1hVYhq4FTUlgIiiLT2gilCWFQ2cDza21UWTMImYXYICVqI2XsB0YqTjEDqNmIqqIsiyoIAmMMhgA5CB3gHn9PBUXpvvvVN7rJyXS2v/dg76GHl0xRn3n0QrJ4rtwZHV679u6VN/7q3/p/f3n44rVbd9vLF1vxA1nPs9wBv6kNNKUlTK5vrTBEdh+8lY5vdzs+IqLSCwAg5Y5GgxvOx5D7PFoaztOl5srJC3z4oE7icI6Oehtnc6EVNA4BpXzb7u/tv91YCtPJ7Mq9B9Fa6+Knt6SVNXh9/+41SGVzacUw8d1vf/eDZ54QZPitb37j4fXF8XBuNdu9Kw72RbB09uWv37rQX8zk0UyR0a3qwQ26srQpcpLX1fdfuXs3+43HljvdxYg09wD3ixwuLizm++NOjEalDgmCBlVWOwcIxRg5rY2lKJPol37l1//ET/3EydXmjWr4wUsnltZCy0eLpxZuvvnCT/+lT/7W773y+Zev/Nz5X0At+OSFc3vX3mniqrsRzXbEtTeu5KP5oSsS5GPMD0eCI/fhh09ef6NsxM0iZfnROLSs6XeppHJw3Gl6TW/BFXE2Vfu7+3G3E/b4q9fHNV969ZWr9QTNDueOVlsP95//1Ge+8Ju/Mus/8sEf/hM9qH79n/2Pd9/8w9560ljt+36HUE6i5mTK3n59kI0lbihXz6688PkTZxcdBgZRQBFh0bnHLjcXek5KPR8P5tMP//jPPvShn/jbP/OjJGYYM+RsXghuDQi502AyHDbbsXL1F/7L73z88Se6p5f/m//lz9EEXPvm937sR59/5cq9yf2xkmI2Pvzc5x79b/6H50sqHcgY4I5s4u7fVPo0zAPsEI310mb6q3/t2cPpKeU8BSHzEcSWhbxyzlvxg0UqrAwXWX6cI8kMICurfmCr4fV7Z0+1w6Z/sPtg++jKj9I/ubHygah74ve+/c3B92eS9udy0G4QMa1nE5GqrN0Nq/ncwahOq+N9VTRoVRUGwP1bYy7R9XT7PRXgIIHcn4zTuBm7SmMIic+dphDg2AdKV62kWVaFz1FWlrzBeOgpZxAEhNEWT5yp6tp4fgAtT/PKGkSQxwFl1CMUVwWwwmHsRuNR4AV5nqXFMIkbzhjgEOMeBJoQuLd//OIrV5Rgs/kcYAMJhdYopSAijFELYFWmvhcJqYGxDHNgSUD92lRKCe10IwhrOXcIi8pxiiChFEMpjAYIeYF1gNWFcyXxWVnX1AFAuJRWWKGdtZZqBQAgGAIANfE40KqVLGhrCYbCCAuMgw4hGjCqrZFGuloYUXfandk8VXqKsAcpLetpzAIEQFrNm50YQDDLpwQDTDxR6/FoBCDJ5zOXuVzMFpdWUb8LEW7EjEE4n+dBGAVePCsOwsBHlACpjCAHB5Pe4oLW/oObr/Qi+swPPHm0PxJaTAZpEPGw0Vw/v/nFl1767OLHtncPWeivnT6TzwdAmoP98emHTm/vbf/Ot//9L3zmJ+ezKcY+Rzjq9vJKWWsopVrVpXGMUeNcpSuCXXdxuc5SZyWSLgT+ifWHrMh0nseIND0/4QwqBSVe7IdJ4sceeeLC+Zdeubp3cC99YWd9/fyJjbNWWOVMsx/8sR/9UK/rfemlW1JrDYA0igbw3va1e/dveFHgce/UuQ+e6C5/9cv/+sS55Tqzc6H/p1/+tXNbq09unHj44qWVE+3xg73UBPNMsnar0WXZZGI1IJgoLfzYrya8e+oTNXHJqajEfppXLOgA4kmZymyQZofrK2ujsZ06Q1Y2FxZX6lpy2mxsdb2lw8l4MpvPp8Nhq7+4fubEeyoY7o+oN4FaUQ0YDRJYimxks0LUMktLDDWFTLK82+YEM+QDL+wIgxTyVZ07QgwE4zSzCHmIlVXuGIhbvelYIKuUEhAChwFB5PD2W42m6G2s0y4L+cZwNIecg6qw2lQ10ozWGE+Oj+IYjwt1MJgi1ljsdhrd7qgQw1S0lzbDnh92zM7VK84ojs2kEEmzqRhOq1mDi8fP9Z0Vyxst7DnmSa8hoUFGWwcxZxGQI0ctZvQLv3r7cJpA5DkjCUEAAqMdYRRjtnr29PmLl7/55S9WZWUtIBQgh7Q2lGIIgJLKYCyV8BklmGirrTK5qii1p8+v/OBn/+SNdw5Eju9v75AoLC1xYXTp8nnlUP/0ZtRakPPS98Nymp3avDCdH7YbZqHj39+fHx+OR2kVNQMtZlqi9dX1xtIpv9XTugBSSVUEvRg4+C9/5ZdHR0etsLm21jm/cJ62vL2D0VIcnVs8sdmxyZMPwQZ+/TsvXXvnYJLWhkR2PPF97Mdc1HYwLz3hjB95kVzpJivLaw+dWa1rgzDtrneeuHCqymaPXVq7fuV+nRatjcWtR88tLa6HAakVuLc7ZpwDZaeTQ5FnlDCttIPY93kUU45sLcRwUinjAAQQI60VhpAQjCCECGLEjLHGWmuBhQA6AwnGiFirIaUQAQtQr7M8mg8tMNZaBHDAPEAQRkpJTRl3wEqlQsoJhACh95w7ghHEyEGHCfEwhhBIqZXWyhoAAIAAY2SdhQA66JwxCCIIHESQIEy4jzAWonQQamnIHzWgWQxe/N6bDJO6srPB1PgsaSdvf/e16WTmcg0QeuPKg5X1xU6H5tPpzTs3Cic3tlYWFruHu7chtBSrKstlI+Ber7FwupxnFslLj144OBwMt+/loxFuNB967rn9u7faK317pPYOjxGQ3A+M5mHTL+sKNrylja2jKnTDPUqAKV2/sSmBkU5BZRywm+fWuN9IDe93vAe3s9GObjXaaWiOZ4cFrFpR0uaJKctHHjpxF9V33n19aekp2t5QNfO6GygOE7w0H0wV7TVOrm3fuBt6DSpoNhs1PSx0QZXDUK2fXxpNIQBqcDwV45yFpHj5gDQ9O3Ra1Nv3jnkzWLt4rmSd8dEgTQ0CoNfwGr7DvH/hB58qfvsbI8lGR+LgpZ2dkai0qgmGDGlDRQ4e3Dw62WefuND6+u35NJWGUsSocxZCgAmxzlpngAWYYUQQwQhBCBwymFnjIAbGKAscAAYBgLTBGpYVxoBDiOpMQhI0uoEFhoUsaJi4s3B0NIz8sN2KSzCpqlRrUdRj6XuIWU65EoonNCas0/TnUjMv3i/VcDL/1j3lR2Szk9SqgtARmBhLHGC186GFss6kExg4bIk1pLCqNgZRaDQaVurkygWkRtlsPJumZaZNOUqLWaMVAEx1iS2ViLfzbMqIkMUh9WC8sJKZFbb8GIg2Mh1wTuMV/e6331rxVxxvkjB02CAjvJBnmQoaQarNKzfur8mo26DtRimKTCFoLAbKwuIwqNVjG5FZbF46v3Hm4tLV198gbskWaa8X37hx8GA0Lx0KK/OL//M/31xrW1fGevfZT/34sMo6i62Vzcv/FVX02CML1+/uGI1VBSjnUgPgDGHIWgghQNAy5H77n/+FVkIWNoIqU5zpnXs3zj370OjBO4MZXnnkcd4N+/Hm+uXovIrM8G4A9GAwMMAdHBe/8s+/9omnYafvtHbzsWLItPtMmbbnLMH+re1hVkW9wH/97RuJR9Nitga8lQYNMEoCfzRLF9ZX/O7i9Hg7bHiMxydONS9ubBbHBwVcPJ7R0maTQiJKvnf3rfMnWrxggGovMAiX8WKrsra9cTHf3xEZqSo0rQ5WTq1g1bng9zxTHR1P+xv98dHw8Q89c+3t23NT339xCEUrJnixv3X97oPNtRPF8L7TMq1rjIMk4pRjVBcYMWQ96HvGBkWuSIU57Bwc3ohZS1bahUmF/Fanj5nenaaGNj702Edv3b4yP0yzYhjyqk6H1IgYZ502QpBXJdSu/9FPfOybL3w5yIXKlRfQa7fHvc6CY0aLWdgMsZXlpGg0e4YpSlxZq1lVc8qrSmiNkmarciTN5hQjpMuFxV6WjbqdTeVUOR/Os+PD6S4h5OSpp2WOjIE0ItTHZVEQHjQX125fe+PuG19P/GWtTFEaaz0eN7BxcrIn6lFnY20yzBeChrN0odsmXMzmuanLqAny4R7DNPJDU4HhtMYUzzPRiDtxs11XE5vnPqa6qKWW2O91OouEjY53JkkvcRHc3ZkAi+dFCHicyqy05MFx8bkf/lnEhLCpJ7W0thYjMxXE6tDzsqKwgEFHIHHOClU5EviyQu3GSaMN8aoo8uaTHeCxuUkRQwxUdS43Fs4cDicPn7k0fnC3xZ3W4+Ny6hremVOnDw72cqVLITokeU8FSeKXpQCIAAisc8rqvMziKAl8Jo0xFk3TCiAcUIgxqmsNMCGUNtpNL/QpdM5CqaQSGkLEfGKBqYQ0DlqHuOdbqzGlRkHqeZxxoRWwNoo8aCGHHFJX6DwI2lVdM8IIochBCJyoBACO8VBZl6Yza61FCBhnjIEAa2XKfEwxyeZpf6kLgU2rEhLsUewT5ggAxmGKQy+0RkJIjKqs1X7oY48AbQljUta1MJQQa6xzUFurtS5MiTAKAq+sZBR5ymigMYQkL3KMCUSAeYwhJIUyBjpgtTa+5yEIfEeBNgRAj/nYiqCblEYpZaaTkmFEoaPERDwIOQ0ZQxBqqTAPndHOae2cT3ktRCFkEDCPBZFHjKyx71nnirqywlGEnQXAmdDntZJGKgQQgpBgShxhGBIDjDBVIcMkwgh4flC7AhGHnKUQWqOdcbUSR+OZj4P+4pKo65CRqhTXtm/0vWSl12sn/sHxzGrbaifFRNyfTDdWIp9RCFzAGfZIWudK1sDUS93YZ4BA1/I9WwtobRITSgGnfJJVdamFkBhAhrHvEewEx5hwktfKYQgjtj2aE4gEhIU2EFiKLGcQKVhXghIGILQAQQdzIYxRBhkvikNOkAMH4znHmHGmAVDOVLqkiDBKuREUEU79cT4zANdaYs6bQeI8BQFC0BBAPD+o0oxQzDEOMakBLGUZeLHHPOa9fxcsLXdsPXz8mcf27oXbD/a1tgGjRWFe+P2r//1f/6vf/b1/cbK9Mr27c+31Fy+de+hDjz5PD2/e2b3SXGzv3KuAMpjYiMWjw8NbN29okPOGHRWaB+5TH/uEYVjINGzJqSz2DqdZZoAfOQga3Wa3OX/04YfOfvIHX736e03PU7UuqrY7yuh4nGqtKhBAvbjFot58VtybTKecufYKEFYdzLZLCJ764xfuvz0GVn/sz3xm8u7+2slHPdic350ny8udflzenW3G8ZeufG+hGf+xv/lzrNkvK+lEHXV5BvDR7dlr26M/+xc+ffRgoAfjm/tH61vRmYuXP/Mp9NVv3k4VyB2dVyIkJEIwLwSllIdUA+Hi5jjofuAH1t6+/e3HPv7Dh0e5MEWeZc2kefv775BCrHaCr3/9K6c77R45ce+de61WII/3tt8dj/ZAQNqr/eDgaNxqNnrN1vaeOBgPk/4iVJUUeLXXdePi7MblW7fenmcHnof5UgJECU3p+1hR9NrVAV3+yNbDH8y2v5vRXR6x3lLbKPzW9WHQP/XDn/tZa/3/5eefC8f3HzvXVh6uqryYXGusNJYfWpfvyMDmwtakPkCT70fgqMSa0igdyUKJCx96pnXuEaGdFSXARjrA/c7dG0MIQ6ucNhIYwLnnUVDMqiKp4lboNxtxlPvpcDy+Z2AFA4qRp0D+yZ99/NWbt7wAQUYHB4NP/8xfA0HTAI0BM8Ypwwk7QSWnHgUYOjcC4NU3r9bsRMTboMoKxrEQrq5ktBx84i8+2eLTLKs7ibSVHUw604E71UQbUful3/nuS9/fPXGmORs5F7tf/Ke/8if/wp9af+LD4X5x+8ufJ8RvtfvrvUZJQ5EJUNMO9Ph6qFJvLKJqsF2P55sX1o/u3QoT6vt8ebkFwBEAYHNjHWqttGhECFrhKsHDtqh1KazHKCF6aWHpzStvJa1G4DNhCCRBlaUMMEVQgKEBoCiyuOM7U7dbjboYx35oLMjrGeUhpajM8gCGTYaRrR02PAgxRbk0yFpVVgYZiCAifDArIIFeEklRGAjLMtd1bQGRrohoTAixSiPGAr+hq3kh5pjg2qVh2GOKFKJmjDLil7WqqrlHAHTYOUGJb61NGv2LG8+9c+d3hCgoxEYo5hkHtYWO0YSRhtM14Y2QNYbDbWhlQNuBFxdFURUZNCoIktwZI5V1mhJmHSCcEwRyoSBjjMVpNgXCoToKCbVOK5kbi8qsbLTCoqiJR1vt5vHByPOjdi8YDietbl+pCkJU1xmgrUppqVSWzybDoR8wSuR8PGFBkiQbWysnv/SFf99faq+t+aquZrPDeKGjpIsEwARghPZ3Jx9/7llR5hvLPWlINRUyVUgTXbrf/J3fG/LgofPPu9IFjFd1iQyEFvqUF2XebjUhcMKoJE7KWilKGEIAQGMBcPj06bOzWfra1Su3bt157PIFT4HxOI8C/tj5VQ3ctcNxmmMvK+te5+Of/qGFlfjVF1995XvfpTK7eOlxGHeODwZOgU8899H9oXjr7rEzoJiVCJYYQ4BQLVLked7ymrL1Ex94/NaVVztBsn566WBcBKz+/Bd/r7bPdMKLNkxYr9FqWKP15HisKkEIpUirqsrgHGiAvAZrNJVFUtsgbinFWexXw7zKM0LYaDREoKnSYXPj7GiUWtpq9Ld2D3fbjRjNRq1uI2k300l1sLv//vcAWSfUwkKTQzydTg7LrCpkUSLCgjD048RjjFhbQSt8P5FylmUTjXDoBwgZQlCazSzktbYWGYkgJV4QhNxL8tmA0Hg6HWdZXSgVNZPD44FztahE7iXUawHW9bttpHUty7QosXIA+sNJeZRb67dCL5EGzPOShhwie3A0nMwISOdOmiDB2lJNImskRnLjxMLW6agopiHDDlecQUqsrSpgrEPYKgNriaBUThSi8d3vEVu3DSkMsIQySohSmrGQhdEo5+3oEsMvMJo5IIy2zjkIwXuTMwiQsQ5jYgzQWlOK3gONZC2afv83fuO7V1653Qg9Y/nSem9tbXU0qfJcGamvv/TW2unTYjKeHx3vZ5M4wOsXH7Zyf+f+W9N0WgoNAbLAc3TB5LTXvtRaPWNQjZEImHEazYbj3/mtXzzcnmydfXh9ZbHdahbClhadOb3y7FMfdtWEc3f/5r2br93Mx2JtcfWxh1sKxWo0YnYaLTau39mbZ3g20uOh7K2sKKsejLJc3k9aUbu9kAiHqPN5RChZ7suBHS2fOEFbaxXpSEUf+fBPHn3xN7LRmELBeECVxRiGGEFEGYbNEFiRd8MwDtjRvEpzI4X2OYEYG2MhRMY44CxCzgGHEAbYSmmcMNZaAICDQCpgTF0LZa2FzhgtMfcsgBBCbQVEWCnBOGmE3DirjcMQYoIpxQ4Ara2zxkCrrEUYaWuklMY5ghFwQGmFEEQIGQSggwAi6wBG74fQlJZSSoSgs++RRgAAMB/MhVAnnzh36/Yt5JRQKVD1/HiEKAERmqviAz/88QevvUp93O4QP15u4vTua3cDhoDv19KsnlxhVAMYZcNi/OpLcYi1ZmWtWq0GIs7OWTnIDuQbwInDm4ex5U6Tc4+f3b19n2i2cHJptHMnOzqOAOrXWHo88NF8VoKIhUk0TSd1Ief5vNEJKpln811mzyiXB5Ff1dO4rX3iUQ+Xo0Mv4AzCd268WVSlF4ZE6Ac37qWDfPNhaOvZ6YfOtVtn9/cP5nPYXDnT7xEzO9q7cqVBfJqEo/HB8sme9HpnYSu7ezNfbO1cKceF0YCGSUNKczCoOmcf6p87R2nzzClY5gM5Oxi9+65SVQlazMg3Xn6100lu3R16CZuUauf+gLE6HRSPf/pE+HJ9c5fe3zZY69UmfKJPXi2KocIOO2CdhY5iiBEEDlBKIEXAqnymWs0WQFBrY4QyVjsIEXIAOK21MvXqyjJmGCGvvbiODYEaDcfDweFUj9KQIQVmYdNDmCilOcPtzvKNW4dimpNmkydoVpUh90+d22TU232wM30wDYE+2QgppgfD6VevTv7KZ08Rajh2VT4FjiHSdM7UdaZlThml1DNKlIWgfqCBnUxn46lDXO0bL/JAnZcZmCDjuJ2ZdH4wHHEOwzA2LuXRAgBhZSriMSHRvCa4vQxI22nCMUc+l6659cGfajSW9/YG167tNdqd+fjAWE0otFYpi3b2y7QoNrp8NiisQdk0bTRbnaUFJfN8POpFIOhFPqnGBwdrm1uGqE7vhJ1O14viaCz3bx1XElphhjTrLvP2QjQ/3hZgYVAM1jZW/iur6Kf/2CoNV7/zlZ0/eGHPWgUJNgARaDVynHEIgFUOEFjUynNMW3u0P3KIDt6+CRSDoHHwyjuPPtY5t9pulPn3v/qSGR9vPLIGVPfm3hDWdOfuUfNTj2Fw0G01gKVW1UroxaVVUMwQwa0AQ6CfONfd3pmNaxKFjf07IyMsa4VlpogtTqyefO3737p0Zqu53Ni+e3xhdWV+8w0xV7MK/NRf/vP5ePjtN29b7lZOto/eeTdG8oOPn9m7d/PCI2c219vTHLN2h2SDWtPh7SOfEGGEqo/7nVY6Pba5zkRxanPp4M6tanzPAvjib33/E8994kE1KCsFcQx07PvMakgaYew3LVTHo+mFh84V8xQxFWigNYujzvBogD2NGddaDI+nOG5Ti+s8hVwAx4G2N+59B7qBh6yX1Hk2UPUo7rQN0CEtpaadXi8d7n7r9/99uxMEWGmBzzzc2R/G06xqdcNiOvUoCCMcJt39vSOGMCRcaUt9P88y6qNmu09Y6GNwtDsXRW2tS/MZA/7s4DbEQNm6TA8JalK+hB3vtoLBdH8+ExDpbrcnjPMwefzRJ3ZuXEU9Lw7aRVphL65KU85v7d5700+KcjhzwszK3VZjxcp6eHQ8nuULKw0IC4ZhwHm7t9w3ZH+SO6CNlTrXOVDWgMJUCECPh2HUYWEi6toZ6fMgL+YiU/1uUuigrGA2F2VhpYSI0FPrreHxV4OA+4DXUlHKlACysuNMpWXleZFVOWe41QqhI8N52Vu80OtdxLbav/d6EGEQJsRvVHOJIIw9r57PcKlVWjtV0IgMBofA8yEMjZyZqghhxcIA0mYzit8PHcxSBIADUAqlldHYSYNz4XJZKGlMOlYK8MhHBFultFLWOAdtFCRCqUrWnHFnnAFWWy2sAs6JSloHjIMAIz/wnHEAGqNUZZSQNScewMg5WIkKERAFESGcMmedqUUFrAXAQYgZ9ZTS2ljgkDKKWuBxDqSCCFjrrDW+FzLiUYitA3UtAADOguk09X1fG4sALGWNoFF1EQchQcwBhxEqtZSZlFI6B6NGIoBwDiDCOMbW6DAMZF1xQtrtZJzOldCG4hZPiqzyvYBRDKzDBFCGDYAeoPq9RYIUe4yJUjJKEMa1EghijyAWBQxA7rMoaCqpCcIYQGONx5EQylpAEALGhFEoOVPGWGeV1MpZirAVAiLsEWqt45RKLbV0yFgCEGMEUQYhLIrC1NIZ4nEOLOCEvUcYzyYTgpCupfN851RRFh7mjKCFqHF4PK6yzBqDlKWcrgaNbtJkDMm6QtbW0lJCnYB702m/0W03QiNkWcpmI2hFjXmqCMGBHwLnRK0CwJSynNKFhe48zWupjFZh4DtAOWO6klmmPU6hA2Uqa2OqSilrZFVXzhmGGCcQwE7cHk6GpTTM8xDE2pqqLrCBBLjIDx2wJlfYIWetQ0hDYApRSMk9xhH1MM3TCbTGIAOhCjxKHAo8rqoaOAYchNBSSpyBRV4HFCOIMWNFXVsIk6TVCKLJfKT/CLfe2upe/fLVk93Wmc2VlyJMOD6+O1t9Zv3pE09//Wu/cXvnnV/4sz914413Ww3/7IXLD47uvHLj2kILcSZCV2DqVk+1Owvab5s/fOVBc4k//Gxn/yBbXO7z1q4s2Ti/h/18PjumxKPUeNZuvzmwdXzibFeb3XfevFbC2eUPPDI53h8WQ8CzKTxqnot8tPbOH/xufy1U80HdMF7ixpMDP2YGONYI5TxtBvGVN250eg8//NDqC9eudpYXXE6U1/a77Pe//o2TvSRPszMnOk8/+/DDHzn/6rs34oWWEnmlde4McXiu1Rf+ywudqMsdGaYph53pgVruLVzcGN8/TI+1xo41kGtzUjCgDZhMxs8/deGzf/Hv/KNf+0++bVGf37x6G/OV3nobolN1u+sjWZU3+OzeyUdOsRlKWvGFp5+4c+0NrNjGI48+9ZHT2//bEVUHizGUwpVTU07FH7zw1tMPXdxoBw5DC7Afd24+eH2eHyotTI2YZIEnl1eS4aQ6JpyefsILl48P7rXJbGGr1TnvDwej/cOhz89vbJwPpjtf+w//66Wk1jSuazMZph2v5ftksnenqDJKm7UY19UsPXLvDMaMsjKXCxsdHAdLK1t+tJgOZ5CQJEACxe2TT/kLj4U0Pnv50vbuDYPkfDAnnu8R1lzyWEzndZYfv3H7O2+sxs1T5y+ns24URKODe7v3xjdu31el09JBZ00pjravr567TKhBwHM4I9hRcGSthahtHTO2sFnR2jhxM6UWGFkJIUxIvYbPkJXPbI0QOnjxzbcWLp9aW+l/97vDrNIbl069/J/evL090l7EZra7HGmD3WTvC//yn5x+4onf/oe/2fN5VqA4IXfeHFMfP/XIE3euvTvpVxcutGMv6K2sHYp748n8EMCNxWYSmqPZvKoa76lgMpi2Y+YMmOdS6xo6WqQZCyKfB/PpNCRsf3jAQ14JpR00xo0GKQi6H336J+7vXRk9eA0FttkJkVMaOKdrn3pKiFpJTLAUxmjUanWUFNaC6WzsgOs0IgtQkHRm86wqS0aBVVJK6XMmpa2mGfEhQkBbxzgrRTXPDn26TElkIZBSAiCcLJMoFCpn2A9p0yADDfQ5SmepkaUfUilrBCmmlGEidD0bX5s0qauVkxZzzhgv6twiEiSBqE0hBWXMUaQh8eMuRkgoOJlrpxQlnrZlkc99nxullNK1FJR7RVHIsvaSxFk7zYecEebxWWlEpZBRCHvpvFRKwpJZgywweZFpXUJN8rlhDCJgjFQY0oXOYlYq43ASx8pW1McQQG2s10hiv/3lr3xeFoetyDve32Wc9NutRhAdH0zDuOEx1GlFB/vbgWNqPHKN5cO943a/jwmJO8naqSUg7Ev3d6dF/fGf/NEb3/oq73g88KyyaV0I7SjBha5Djotipk1NiQdUxaM4nY+1dLpSd4rdo9koSJaefu78codN7t8yRUaowVqcO714+bkLr754M1aOOHi0fW+0z3rBhnepsbtzf//g6xef/HCzRZEFGJlnLmzVeXlnWihMvIBPRnOPe4hio6vhvbeOv/+HP/7DP3TuqY9unln99Re+FCFApXn04dXxcOflN+xcwicfOd1qMGO0kSKMo4XlM8XRDR/6hGAEnKzGmillCSGhh5Iiz5QqVTZthhHzgvm0bnRPGZvMR/OkmdS5Q9Zgmad7s9nhcbPXCRod7dlsPnlPBYdH86XlrhBAW13VwEB8PModCAOrwsAlLaK1GY+m2gs0rIABKqsx8w4OD+tyxgKP88APWHdhUwnAfQlcXaYD6nlBiAyys/FMOZw0miurK7IqZZoX41ktDzpLp7D12/3VKhth7ntBRCyoldRVSrEjQZC028UorS0iEsDcDOdH/WYDFnXskyj2KqlDS0RVSyGJwru7ImLYa7N8mhtpwgTw2IcQWmt5FBiFLfbyKvvdr9zb3gkdsZA4AhlBFCLsceq0GR6PHj393Gc+9tG3X/niq8cPEILv9TQhiJwDxlpKCWWcIs9ZWdeV1gA4axxgHn75rZtQuuVGH/Hmk089dzx4UAmoLZyNM0jg9r2BrRwoy+WNhXhzmXNy553XGkk5ObgDkPEYZ1D1ur0wOrN46ULgh8YWsq7iwPpcPth/93f+05dkAR966KH+1nlVZkZabFHs+afWtxjUpayOJxOI3Nbiqm502s3uxubKO/eOdkcZ5vHh3iwmPk/4QtSetTVvkzQf741yOZ7mSj04rCOfnT7bHe/sIuqtnV1eOtmPlteCznIxkxg5mU6Jkh4wZ08sOBq9ffXd49m8Fwcx4UU2aTC6ut4SsuSZY41wcFzP5jVB8L2kmOf5xjiASBiQZuhP80IqTShBBmKMhTEQYoSdta6WkjKEIIEIQIwgsBC4xU4jL0utLHDWvRcGhABhYJ1WyhBMCHbSKKWtdZhCaqyhHoXaWgAQcAQQY40xGiMIELLOQoecA8ZZLWpgHXkvDYfg/03Vf3/dmqflndg3P/nZeb85nhyq6tSp1F3VoTonmigQAiVASHg8tjVe8tgjzSx7RpbtCZYssSQhDQKJoAYENNCBDnSoqq4cT6g6+c1hvzvvJ3+zf6iWvfgjPuu+13Vf1339/0phKUNawjdee4eXiUvg/q2KAuC5OGz6k0EW++H1F1/Aw2NaecurmxhqWyRL7SZC4PkXfoAcN1zvYOLGrXk9O2EKxK52XF1NR7O09OvxdJZqIZXMvCh4/Mqj1156ww1qg/39iFkH2eneVt0lgNaLNBEz02jOBaEn80FRDSgJgQCwsq1abXI4nA2ScTp8993brteSojg5GNcbjU4zjJsLB0mheOoxR0nZ6dbTKjdF3ly9sLa6whErksnWG28gJ3CieHHl9PRwe7J1LPMJBXo6HM7GR41Wu9ieaJ0eH04Y1rgZnLo6v+a1b73+YDYaNuveqJ90H7ritOa9wH/n1bcbcdWdd8QRYkH7zEeffe4//kev1Ecno0Zr7qC/Pd+KT50ODvdyzlF5NHSYa7npzRDS0IXlp552F1fdL/1gUihiEbTGKGUhhA4jmBHiBZvzly6vfejbz/1xKfrAGmMkgBYhhCACWhOGpVTz7Xo+SR3XmR2fjPePBoO+cXwA8cr6nKNgXsl2u6YVwBAbhcZZIoGIIypkJiu/0V1+5NLj43vvGV7UGh7DjuJcurk4kgnFuY7+5KXeZ642FurAJVBBibAVQkmADPKRBRUXmFCvVk+rKpHaOoEXoiwtppMDvxP5nlVKhA3cqnVHA1MM7WRcMIpnhzudNRy2XAZcL/LyGQYgCuLOdJaFzcDk/ZJrzQuHeFaVjgdbKw2cDD0VYKWdAHNpAMIIMQRVkso0PVlaW2m3lweTqaDD0AH1Njn78Mbh1rDdcpgbDmYyL4EhJDB+Z3OJ45PuJFoPo2Jv0gzd82eWKHIHt7eEHTY6tTGc/SWp6L2bd7sN96mn2t1W9O9/711psOs6UlklJIIYGAAgqM03RF6CXKnMxnENOq7gUlelpW5k3cceunLr3ht/8qXfe/JU9xNPPrp8cfGLn2j//X/4z50amfTH1EQXHn8MgLLI5awYlzk+OSyJTpAH64vu9J7I07S70DzqZUkhfd+vL/i1zoLb9j3k9Wbpo5cf4xruHM2g485GGQB8bm2xmvKoW2udaf7olXPJZJiJCgM13tq/cX/3/PrC8GRMGMFOt5ebGBNRWt+rTZOJzCriIqcmIa9qcUBw4dfhdC8n3HLErj76sf1R5M2vNTZXgjMoGY+VgI5Lm0tr46MTCHC91uwPT/J85nqtaL7FDRjxkjVrHkmDqMazUmlNQaXTfZmPmXF8jzaX4nS8hUFhVAGx9iLdml+HNnSj2cc/5b3wwkE/55vzndHOg7XFU3ErgJQBbSKM5CxZOruZEIOtBrz0IukzYyEEiFoEmu0OtabKxow4xBb5NGGusBbMBiJNTSPslDl3XD7LszTPvUYXqho0Js3yJB+3/LoSXFfWKlLmnDZwvT03mB03mwv12tqwAFGjuXX9NeZjC/CgP2k0a0JUg9FWHLmdTpNrsrs/btfp4uKFRlSXmrjQzAXMjfGdW28J7DIXECfEpCZ41T1zpVFfzDPlsaq3tSV4KrTOEuF4QezVj0ZJPhYuCsfjqt1qL89F196bOD4w0jJCgrCe2wk0QjAUMQiQALz0HM+aykg+14xjuj0b9rv1xbpvmO9kqaAUNmqdLBN+HLrjMnCCxy89JKtt4rtQ6QjhBg3ipkFFf60bjCczLYnIf9j3Ibj0PMd13LKslDF+FEIi9g8G9Xbo+Qxy3Yp8zkUQeJJTJbTjeZwLAJBVGgFiNRRCFXlmIPB8JrlgjDBKrIEQIMcSYSSFqLKAYgxpYADGxDVSIggpIYxSIUuIgJYWAmgtIMyhmFqlsYUYExqGxvjWasE5BJgiDClyGAoDV1RGFqKohDba8yjngmDMMIEUWWWKqqqHIXGokrLS1gCodQERwADWwk6eVxgzAHVZcIYUc12ADMGU+E5ZVsPJFBHouWiWF9aCwKXYWsMFQBABay1glCJIkrTkRiFkDAKe41WisMBGDptk0meUeA6F2ELjYYQgpAQDCChELnVmJkOIYIyUBgRUvkeBhVLbHAChVOA7UktrrM8cKbWoCoJRFIdSSIQppVAJZSwCCHGIGCEAGuwgYm1VFgCjZuQLoZRBxgCMUORHXACrbZpnQehNkmm70SnzQlnY7jbnIy/P85JLh8EgCJTRi926PrFpkswIrjUDz3Oms3w0nXrEtAI3oNgapC304qgYz0oD9kaTmLlGc9dhVV66tZgSCoWxQGOKoAGVNsBlVgpeich3AYC5AppbbcQxH2ptMCFGWUqgxshxPAQgMooiRxvlIYQBsMRAj0ltBFfIYqus1jY3FYCMQsuVogQDABGGyCo/cIXVEEKXkth3p7MCAEMc13GJULnreUlZaaMLnvq+J9QPG9CuvXV/dW7l2ou3W/PB44+czvJyqX3u1MXPrJ26+M3fvdPpLL38wo2jo0kUhZTvvfn215949sL8YiSz/md+dOHgwd2dvX1ZzfL98ty6V1FzdG0viMiFtRUkDq698Jbf8fsnh8NiMtduYADyMmEmThLhw2Tv8KBdjzsLdUJE5OKD+yckZPMrwe27ZXFwY6FT+8SnLt96747kVrGiu+YrC6ajIp+qKikHhwwnHZ+TO6/8QKX96ST/3/xv/+ev/Ic73/jz31xerdeYM9g6efRDl37sb/zI9597udYNrn7q0tH9/Ypn2f6AOgBJdDSa9Qbi8x9/2mGoGc1nuePWo8/81Mdf+taLg0lWVWb/IIEEEYMqLTOj/v7/+R/rjdPMEXUmnv78R+/0EkwJyqdJb0bD+c5mpzW/8vJzfxrzKu3ndh3UanWtEl6U2u7HAfq5n/no//Uf/JOLF1tJXxzPhF+rd1tRcbTfXjmXCsMnBRR4nO4+8dhD48Tb3b4FJmWmpz2FchSc+thPvnf/fpwe373+A9RSWZoNiF46fQm5l5/94o+h9O6//S9/bHNxvuVgzrygHgRhQxdSYVkVjFAvbOKjw9tnz58pBTp8b6e1slbvzEHi+t255Sc/NDwaVLNxe60b1tx8F25cenzl/CmMoNdwxR2BCVpaaGJMoNSOT4HrnBwdTIe9ZjveHZ3sb21N+2lVKAB2fvTnnvr677+AOEAKGG2AAa98//mnvviILA6JDwGYKoMNgtDtGOgAqwii3/3TV+7s21v9A8/FjUagDUuScn15/hBOv/+DN3/sxzavfuByJVTv5oGPa5cuN0A5eO6160u4cerM5pSkxBZwUnbn4zdfORxNXn+4XR9zmRqNNB4dai3Nl179fjMAtY6/++ruueXwMz+z+Mt/6zNvvrXzvb94S0pv4dFl5NeWFpcBuAkAwFDNz3d3k8FgkLkO9TGyAtHQQsIhQJS6WTqp15oGwwABXeSn1tppVmbj18a9d6MaK4tMKuJaU5U58okCGmAMJCDIJxjOBBfChIgm48QhtCizrMyM1lpiwrywFhMkeW6jyF1bae/spY1WbTobQQQwBkpIZGnkrOgKVXYKkIqDRVNNIFCl5ohiYknOp1BbaUNMNaCAYtfxGRZAcF5UlfUAYx5l9d29241a1xVmNstcjBkkQggrATBGigpggYHMeGGtDFtxmUmCiOYCO0BrwYhDKAVGIquMtXHYTGZ6lgKLaLMdSzlAAHJpBKCuixxEqjLHTENIkIUAgCIXBDIXe56HA6+9d/QgimPBNXRIbjSkRJQCa+I5YT3GfkCzouwPerDmQ1H6foSQns0qXJrDw931s5cRYslsoqCWk9wagAA+eHDQ7Z5zAg9BfP/g+PTp9fEkaTcbd45GlnjvvPDnc3VPYoC18SjB1JlME0aIKUspsS2NLLjf9KMgNtaunD67de+2F0UF4J/4zM+XOa9SPtq9f/fu3Y3NhWaj6dc0RSVILJJsYW6p2ek+fPn0Gy/+gNTI5pMf2n6zOe7vbV9/7fzD55gfj07Gq+3a5z/y8NdeunH7cAwsiEK3LCVQhlFq8tyN8c371/77/+EfyCL9hZVf+sxjG//df/nfPfXxy34LrJx+fGd3drL/gOma0wzq8w1ZAYeqEhRFOfVrDS7gNC/a3nzo0JOTiZCJ0O7yfGer94YqJosLXYoYooHUPJ0JiE2epTvHdxc6LYRJVA+rJCuLIptNfa/x/iwIg/pkLMvCLM7VFEBZVjWbNW2B64O4FjkOgUAGoYeJFwasykRlQDYtJuOMa00rEMQUIhABIHSKoQVSQSA97O0dTqZFWQk3rtfcOAbWiVxPVDoMgih0sFWyt1uZzFhL4wj6DW0xpS7GYRh4OqhxKWvdJvFZOplxLRtdz/dsVGskw342VbYyqkq9sFUaVhpISguFxrDQlq/P1as8LfLC8zHEQCtoIDBqdrgz/d7zE0gDA4TWihJmEeZcIYDCwCPQvfL4k24TIJdjhwCtIYLWAgSRRQBYoK1BxoZuWJQcEwKsef9SNbcYzc3NM9KMveaFSxfffuOemBSPfvzKmYfO3H/9uoG0uzo/3ToJ/bC72Dr72MNAlMEkxWA0v7TcHx7MigwB57G1T/u1JQxIxfOIerGv3RB/+zsvvfX6q+3a5sojK1KSROXL5xaaDJVJvrC87EOx/e7bleGQxmsbT6ysLk33bt++cf/d67dISDpLrXSge8eHS5vtdqu5OHfKo2xvd7uxdP71G9vvvPrKXDOUIX7nzo0nP/TLRpSzpPJbTZNkgFuYlVAZ7BNA8cOXH7r23o1xxi2uus0QQFAPENKzjRVfKp4VJYaGVvmiE0OHR751goBF0WCYcGWUVV5IVrsxBYhonkGYKcUc5DtsWohJlrkOcxjmAhJClZUEEWCA1gZBpBRAljAKoDXGAkiQtRZZo5QhCGFktVHvS0jWAs6lVhZjYw3AlEguAISEYGWAhcAoAyzAGAAIgLHQGG0MBBhihKwFP8yfAaUlLytG3bhenyVT5obQWCHE7u2p5/ir5zabyzU7zPLxaGH+zP79d62yxTDTCi+31sfpOB/zSTpDlWN5Mk1nntdwKZR5Frme5tX8+iIIqIakmBWv72zhesOFQdbrU8YF4vNnTussG+w+aHQaVhqRppRyhBSG0HFYOcsQdVY3V/cP7p05tWBvq+nR2Ld8cXNJw0haSxg76c/am2c6ze7ua88V5VRVocwKxOn8ZZe1Y1xrkTkm7r6djo/GvftI9AkvJoMeg8iB0DYbfqd++tLHDr/1EhF5NqrcehRMgaBaR17gLO7fvuNB9+wjV3INy+H46PW35GDfdogAnh82VInf+osXTq09Mjw6OLUw3zuePHTmiQcPbnueQ6QZn2CxD2f9ww+cf1hXU1r3gRjHDbC6Of8Xbwy2E40gQgABhFyfMIc4GP/c3/9vPvOBnz55b/hn3/2SBtYioKBFEABrLQAQWISs0UBnptaspUl6eOOaD11Z4tNnrmpfxLHiR1meC340CmqthXZrNisYRZ1m7bhXjvrJ1Y9f8dfOZihor51565XvNJpOay4cH/NWgIONGrLqcIJevDObjY9/8ROr84tGE6CBgphY5AvtUWisVYg4ZZVUxh3NNOC+BSCOkrm1LmOgnPBGXHOpDEKv12fKIkapyG0cdwjxoN8gmM6yKcBRwNplouvzCwoKqFNeZK1mnM1GVTHbv3XDEanm1nLlEOJ6To6qVGjXp14jEEoTJ+ysrm/fvFNWacziuaV272B73EsacaPRrJcWhYD6Gpel4hhbjBvLNXNrFwi0dq7h+yAMSL0ZMTydTqbFyeje/eIvSUWWWwYjMc0X2uRXfv6Dv/5Hr6uiYg6jgW+00gBAgNNJZgRXXvTjP/dffePL/zLwPaJcjXWRZFtvf+Nfbj937tHFU3PoE5+4NNja+5Ovv7pcr7uhm5XSi6KHPvgUBNtSGYv0+UdPHR/K3Xt7sSeqtJyl+nAHZtx9+rHHJ+mbqcpgAJ96+sr2/rAZ+qgCe3tHp9Yuv/bKq1cfP8eIyadZq9VxPH/Fb3DOy6yyFW8A5AEadNdvv/SAO6LmE4cxdZBGXb/RadnKIjeGwkKIuLXDpEKDQhQQi9Rx7OHdcaN7qt1a++3femF+9YoYzwxNZ/mWxxjS+dJCLR0Pi9GhA8txb+D6F+t+SFyf4rAY55p6VUWIlcxCKTXxPTkZZrMZrwUb59aGx4fAJOU0xygPfRI2Fg52D1wWadUwNiQe6YbM9bcPTlKvvtFgXcK8vaPJ0voKE+LixeX33tgTeeqEQTY9UZi8u33fB6TenKf+XJnmZTI2PLdWY4i0FsiWtTroVUU9DnzsVso4DmwvxsUe77aXwtiFwNM8KUXaaoe+zxiJRMEZtiS2nmvdxdaph5befLW/unxOKXa4eziZ7Myv16bDrBb7nfnOsCeimntqfenOzTuG2I9+4mfv7xx7cxfT6TFEjLgGkmpnuO3NdU9tnNq7exfJYm1xod28IKAn8iEWsihnAEhKHeY2RnQ2TpMY1SZJdv1ur9NYyHJ7+fEzk+JdSDUARFlMMS1KaSzW2lgLAtdFGGGsy6xQkGpZEYOG44Sg1s5kqKyVudIS82nRabWow3YGww8+8+FX37j21MPP3HrnHQwDAQCMw73huyvNeDTJo0bN9+jBoEfED0cCcx2lNbWGCwkBVkJpwRfataDmYkpnVSq5poQqYZRU1CWEImsoQoBRHESREKrinLqMuNRohSgFmACAMTLAaC1lWeae79drsRaShSyVKuWCUEw9xpUkQolSaKvCWsyIU1Y5AJBQ6HheWVYAoLKUSmuXMWghRsh1aMG553pSGS4UNKaUhed7buRABFzqGWutMQhhzwm4NBADB5M0TWtRaK0Ko9AoM+OFMAXICgCx57quw9IyC/0gyXKtINAqL8qoHnu+k2UcYxjEQVXwHxqsLSi5tBoKwxFQLsEWE6ElV5xQRCCS2nKphbIUIUwwRLjk0loAjXEoldoUlQCQagukVLUoxNBkaeUxCox0MPQch2CEEWWU5VlOAHQ8zwIArXYYoQ70XKfKZVVxP3RhLczLyvGcKPDKLAuxr60mzCmxSItKWRP4Lpeq5CWAwHMc5nssDhlAEAIXEwbQdJJRAn3Hp0iXFccQdbuNfpqVUqZaONbJk0xJhSGJ6x6CgFcycJgfMG2F4+G8qAIQYoiRx4qKx7VYWK20wQwFjguAdgkLArozGlGMXdeB0DJEi7xkFkFCEiPjWmNcFEDbokj8wIMWEUwCAiEy0piFVlMonZZZkqXd5lwGKyGzqqowRMZo32cBxYwjTBlApsj5rNIuo9Zo1/VmGTdSQWAhMkoLKZTvUm0wJtijrqwKwpgQPzyiPfzopVe+ff1n/tpPf+lf/OpP/I0PZ2X+4rd2g/WzL7zwg/HWG5/4mWf+4I+/+1N/9fN1nB++8/0bL3337/wPvwJZ+3Dn7q0e57i1+cw5h85oXGhPH+2OenujcSb+9CtbCxujsAWEnUIcE0q3923gsctXHrv9nRt+gwU+RZ15GDrjCX3927mtoKHnwmbjjW++NxmptbUYkXKvV3qNs/W5dq+/RdkGVTpHW4NR79TpJ7Z2h81FlsjeT/7003/wO19NcPLSd35vulNunu32+/0zD59bvbj5uR/94FT0HDRbbrdWn3zsz367h8amqlJDYouRX4v6J9kff++5q6uXPnL+k1/+k++tdFwAxHKnDiZZJjXpBIiSrOJYkoK267WF/+c/+UfTnVtl+YGt907ygCJrZpN8OBoDA8u9wxipROvhvYNzc6t3929ce+8H893O+HhUaY+2uv65hUd/5PRgR8C4VsPOg3e2Tl1cl5p/79X3zly4sLTA7KRoNpsVq3IzjX0IlHr8mZ+4f/8OJe7Re28c3n075cDaopfmeS9TpLX01CONuXNf+9I/dfsvXDob4EDNegWEljFaqzfztMCxaay0jWBZUfrBsrUB13lrfdGNmayE314MGktCFrIcijw33Jnunohp4QVOmc/KIhlNBu3F2klvghFO0tJQK0ttgIoW/fH48MpjD/VAhP3VC5dWCp5+6df/6dOf/wVdpYFLYh+nEsSdIDeQAUp9IMAhAlOIFi3QAJRWAQMIMp1f/Y23OJoDoBAWAIuwF/8f/oufSMubLx2dfPEXPjzdfzC3cuHwcLTYEIev363y6bW7wYUrz4Ld4a/8yi/8z7/6T5JBogpwf2cWdMOVx5o//zc++BdfvfOHf/4OqOxi4CKkP/O3fuSFr3/bMMYRAITs3DzMO63Hnv1Ya+Opcv/awfExbkR1v/M+BZ7PBiej2SSdyvyZRz403OtXkqdpWQlQcQuhDfzI8dzhsO8yBiqYTlKI6LvXXw1qgZTKKlTOJAgxc3ylVM5LlxEAIUIIY0gpMZhYjFHgONjW6pHglVaSOABRX2NIMNVSU586rjNLjgkm2hikrRIaWtSqd04GI+ISJQTBFAAJgC6lKHIRxp7WZRhE2hpCrBAVIkBbUnDgYxdh5XttY6HkykCAoTNJuEcYo4HVqMizsBYbZQjErksrXlppodWyKKdK+FHd8koqaSqLgZumnApLoDEIeJE/nI211H49JEGYGakBrYyZpBPo1etOwPNpq93qDfYYCoA2VgmHAYJsc3nhpVdfuniGxq0aBiCADmQu0EZqAxgllCbjIQRglM1oyM6cOZ3sJ8Vs3JqLAp9mI4MZZkFQctVsNXmZDmcZ1sANfKXs+UevnOTVI+cuT3qDsLu4dTTe6HZQ2BYYeZDFcasop47jiqLMhWrUIh8BigyHlJGokIa45M2X35zk5Vyr3trZrvvh4lznZGoO3r4vpUqTgUf1pcevpEmBvXpU1/2dEcz4+lLX9+jW/h1hk4WNdsXAbDJaPnt6cT0ySJYzIVKRlqrRCiyfXTm9dPbc6nMv32cIOS6dTkrWptiauB6+fG3vd//k5S9+6hHP1Tt793/ub39u73g76anWOXogueT9T3/hJ154/iXHhJCQ7e1twiuLYDJJkNtpLC7mHHKhNcCeU4dSHh3etzTAbr10aqkZqCzTxlCnGdXrXAlPEcziYW8LQxw2GWFY8Lyz2HyfAohQ5DFldaF4d30NnQxVOaNWLq02SwmzqlRCQOI6XquSlSaedd3+ybbGQFiGmZ9KWIymbt2bn6tNhiPJQSGr+w/2K4W5JWlB/E6delGRFUVVMMyNNghbSAEmMEumiLBScjHK/SACCEESQqkpCzoNun/nDiW+R4mDYLsRUIuqwhgIpLTWaADl3EIn4Ow/fPslVeWLMVGGhzW5erpcqzth5C41jYsV8xH07GQ0fbAjkyrWGIgyJxhKpbAB0CKAsRf4boP8p9/5x3/2O3nk1t7PYgMLAIDavG+lBgQjCEReTay1mBJZllYJTODZC0tzcQPwRlGxdnf+2WfnrBjRhlNM5Xxzbu/46NEnzk8Xa0HUjBbaVcENV412rGQ1SaxXJ6XKIq8xm+ST2VZYC+YWa+lgz6hsa/vOD16+tbnx8On1M8CKvbvbLiDjwWAqZ1cvnp4m9yT2AVLEZY995CfH/XL/sJjupf3jSVADVy9vnhzOjtLMx3Zrf9+ZZu3OUs0xayt1Dc1jF9c+/+T6l//wjzaW1gxafPutd3Q52Nw8Z2CAqJ3OUogJBERVlmDUXmg3h61M9jXi584sbn3/VS2xx8R8kyiZQR1QRkIvqKStRUEUBrPKAIjmGrUsz7urNYzE+c06L+W5s9HewazXx0lVudh2Ke02Y4TNZFJhY7nQRgOCESYIEsQcxxgdh54BGkHMhdLQ8lJYYAllWhutpQGQMAYt1EpBA0MHMaAMgBYq5ENujAao0hBYAAi2xhhgjbHWWIwwsgAYACHS1uD/LBWNxmk+m3lRY1ZpTFAQuz7EapKSmIR+EPpg9XwnOYqjVrSzc8dtNlYvXE0yaSBEAbvz7W+SWuQvrwy1dlmwN9nlGObH9w22y+sLjVrj4N3thfNnW4ttkhWCMeP7Yiohdv1ukA2mqURRbU3BUTlKpLSE4fEoNRoHDT8gDARhKszBzr7gutamQScIO/WIWABx5NNcqjTJNy89c7B7f2980wmYkCTP+NqZzV4/Pdy9v+J6qsoO710/3fRmRlW6MFxMRyNL2Nrpqwd7O8cA0zi882BIsqocj0Fn/dJHP7L/3stBHXKXrF9ZvfCBpcPtLdryBZUeKHvD42n/aH3lCd63K6tr3/zKl5ut9jGeKCd8+kNP3v3K17ke0dmQVd6ZVs3m4s6tycbDQV+8unKWayqrSQ5s88vfeTDmDsKMIFhVwvcJ5iZy6Re/+CPrC+deuH7r3/yLf+q3aDpDWhqKqIUGWKuVRsBaDSwlcXdh0k9YRD7w8WeO7hzECw4HvOSFw8kkLaZ5gkpHlMS1VtvCJ87CypLxjBtlWQnLQVKmR3NEQhBKCSXHM064Js0meYSCxok4zM2dcfUXt6q/sdH1YJ4rrWTGvLZVjiwrJWmReaIklTMHYldlWSckjrgTs4nn4woiyCXVXJeiU3dkqvv9ygDQnpv32iuKzmstFTTEa42nKVF9yTBXAGBKkHO0d3hw961kONBVEQSUV6VU0AAgoZYWWmsAsLIwxCWhR0cHD1CerHW7zdXNpMrrjVUfaSn0/v7YqUUYUsQzkFbAdbMky4ppMp1C15w5PTfORpOCp71xMZpxAUUu7m4f/iWp6NTm4saZ8/v3bkEhN8/Qv/XXH/vDP35z0FeuS7QCWmnCCEZE26rIeteufau9MJcUs1pzPh1Na2FytiUXOvlPf+6KlM2vfu8HJ7PseJAc7p1gxOIgOOwN0pk/P9edTXWVov6+LZNESQg9vLk+d+dwX7ou0M72nfuOVUG76zposnPUssCtZFYkVx5anwrz0Y9++P7+HQZ5p1GDEI2HFUFGAVWVxsHBYn3+2ts3Gm336Wcec1x+89qNc+c7znyrN0yDsK8EnKTSYS4AraKcrp/eZLBsrS2Md3fefOlud2nJ6/SKPDtz4Ymj493DB/db3bYaQAxhkZVl12/4bHR85EWdaP4sdjuyymoR2znang/mxhU5/8SnTg7frcpM8AJJELdhP3eVE+VClKofEMgsIYHrBPGsKIL2RitcygqNkMySAmL5xKOt4tXJvZ17803qN+Glqx/o777rNYBDQFGOZ8kA2cgqG8eN6eRAgojChfFINKO4KA+YR6gT+H6gSOComUzTmhtLhJB1iqzAjEzTClHkgoAabHAhQBE33UIYYbVLHGDLcpY7Xq3S0louJLXFOJ3efPjqJ5//zjeydHJ8LAiAoTHJ4IQAoDjZvj+Mam3Pt5OTuw03KPNB6CJK/bjpV8P7rYX2ZJDs9o1Xv3hqaQ3YSms9m/Wpg0Q2SbOJUBxhm0wzoe3c4hkWrRy88eUoiGZJKQx88vGrRfYSQtZxKSKEC04YwYx6xoWIegwrqwDsdLsXJrMjGPQZk6lMIDUQgrwQo6QIIgcUxXhyFNSXjODDcY+GdtjbxgiJCs2gMvU46jaTYsqwhUoVXHm+G9Z/eEMDCCLqcGtY6CGAtTGNeiSULbLC8aHjMsYYIkQUlVbAGgAYYg4tktwNmevhosj8gCGEGYZlaevtprEgy5NWs8GFyDPuAh9ZpLTRABCELRAIgzAgc3UnnRjFOWOYUIdgCLFihAAAKIJlWVhrPN9FyKEcUIdBgLW0BFOHgiLP4jBSxHqRgz3EucrSInR9WeTUpaFHrAEQYkQMxCTPSgtMlpXWWGMz36XG2nq9CbUui6oCgiuJCAOQaaNzni8tdHlfzfKslFhpawEoiwpBwhxaZoXDXM4lxRRh6HoOMJZSWlQCUOT43slkMpyM637dZ8RhVCthlHHdQBmJEcaQGigNUJ5LIARGAyA5ZNTzHGCN57pCSCErbS200BjhM2aBARhTRLiS1ligQJbkRmmHUYSAtfr9i58pKhciyCBCJMsraoBPkDSgyCWXPApdraTH0Gg64QrmUgaO04hdSongFkIynRXSlhDANCvDMDq1PDedFv2kGBVlKwzCZm1v+6jVqDXqgRKcIAq1MUpRiWIncCAi1loAGkGAEWTGYM+RlTYKAASk1tZAnpUCEC90CIYOdeoWlLxs1hsoyyTQLkYGWop9gpixeeySs/MLGedcSFEUFlOESLvRKiqZpznUsBbUqqosOIfIOJ7nu3Q0SgnEVgEJrAuh0ZBXkiJUCOUz4rjMZY6oSoKRMgJBkBUZw++btn+4Gd1/935Z5FuH/Q9+7uMcuW9+/zWCW5unFqrterW4cvvWYc2bP945+N6rf5YXY+N4W9ePNx9vRksruLOoGNh6/bXj9/YP04lxMU+ELNN42cfK40fqdD08uTsmpjUZo8NpdWoluPvyPVEYZZQAiRN4NJgDFTvezi2P793rA3hIMOg0/IfPnbKuBS6waZhPmkThoucjVKishEKfWXhoS3/Vq/ODw3f/8D8doDZ66Ozyc199/lw89//4h/9oq6ehBVvj6te/9oct/eDqqYdH/eLU+Sfjzv18/MKFR1b6o6LIbZoUFBtE8SvvXE/NH3z+kz95svV2MdgmBKyuL2xv943Wo5RbCGq1SATh3/6bPztNZ099/urd29tlMv3oX/24qcbdxQ1BvN3+icxNbyI2Vj9Yn4tcPQBh8Xt/3F89dXll7YksN1u7N+aLVjaTeUVQbk8GvX/03/zff+vXf//Rp87sH93Y2elHF1ayaWkcPL55pNMcG9qJTxUZNjayKit331qL4XBagtDr9fWpR54O6+vlbHLnzX+mdq+fWQyUUulgHNWbzAlIGGVTXhZlVHMo8Q5GtttYO/XUB7Z33t64tNbfu3u0txP59cWzT959+7ozS5ExhCfZri6n07C73mg3HYx2bm8P9o7rdfbUM5e//p23nn76o3fu3SsGx3UKjDV3XnuApdN6+Pz8kkdGJ3/27W989qd+9tbXvlNOpaiyLK2U4ypNXn9tMDreay4MrVE8nXi1rrIVggQ4hNnRP/jbvwQRlaKiyAqutdIUzm688+2nPhk/1bRF71atXqtKkaf5xJS4BoNW68Gr1dbdgyWQzcoUirispkvLnVeu7SyfqqWovHvSe/KLV4OH5ltEv/r17188tfrjn9k8Xf/EWzd37+/sh6HpduvAjfYH04JQNrfkaBbU4sHJ5H0K0oJT14lq3Vv37+weHmNFuRKt1gJEUqZTj8VC69CPnbhkAAArRCGxj4JGADBkxgLGYi+opKCUciEwJph4BhqppTGaQgYBNLwK/FCLXGuIKQ3rnkxLgMk0nTGKDACT0UxXlc8ABCoIHEBomeS8qpTWmILhrD9Xa7mOU1RTaSwAJPYiiKiWEFgXYcWrDFtJ3cChTl7yyhoKEcKO1jZsNLK073teUZRSagMhxcCLHQ0Kq7CsFHY9o1WZc5diRjGzCPISQ4uDUNvKGsSY1UoCx5FSQGERxCxgUlgAZkkuQxpwYZq1WlGKMpeCg3FaheEcUk4yG2EM3dDnFZ8l2aWLj29vH50/v1zmJcNYVwZSk+Y5Y4E2wPfrg5NeGAdZKtpzQZLu+HV/PJtxyYjrupQgwXsP7qx+5GPZdBh4wdxid3T4QIpMKV6rz926v9P1XGrVhUc2jm9uHR/u1rX9kU9cPn1uY+fu3aXuXD6ZEYyMlWNT7Z6M3rh+32CSaZTNqtXV+cJApvyINVjgvP3aO1GtVU57tWbxxBNn7t++L5SjJaFOo06UUQu+h3zP3d/uPX7lSq/Xf/PFG+eurOdZfu9ocvqRdVOYcjqzgVl/+Pysf2ykWu2u3t55sNBqPvvxD9964+2D6fhkmk+OR0E7vPj0h9/bu/vXwmepOM559oMXf1Bfan/0r/z12cH+6man3bj88gvP14LFbvfU0ckhkkArax1Vjif1+WVAG5EbV1UZIMf3XJRN4oUoQQZIvPNgu1WbW1o+Ox7tGkT6h3vYAtcSakAUNyqeFkr5DC+tr54cD96ngCsLpSCBU2st52U1mymIWAhdaiIBgRPGuhBSm1LAqoII4vF4ZhFzqWMh9J1IGamkHvT2Npunx9kkT4E0ajLNc0UMc6xHsasaLS897BPCqRce9xIrS5JncbPueD5lUgMt07HvNRzfRwGKtPugv6srB1s6HWZWcxgHoHDydELCRmd+o0gmxOQQQyjS0WEWWyWpVyk6HFSor24/ODJacGXXO2RtPuo0UdQlc4uLFaKz/L7iBSMYYKSUBsA6DtMG5WlKHRJaJDjJVCmlQgggzLQBwL5va7HGWi2VwQhoAJWFCHmxTwN6fJjKo+qxy6urn38KEs+BLh9bx0W8NJKRS1dONxvYY93hIDMZxxQjBWTGKw4mo7IwZbNeDwL3wYPvalNrL6wE3rw1fPfw3q1b1z7/kS9yAx7s3I+R065FgEIgdOwERaYIssJUhZJxXBNVJdWYuX69GWlaTkvZOzSmUC7gn/3cU3/83Vcx9m/duD5rNc+e2cwrk0/zMYePXv4AcsKSq+vXHpx96JwXzkXtoOyXrqXWaKTF4GTIPBcxunjm/O1727LKJ/JgLmazNHPjAGG71m2OZ3KWmbIA00wyjC3Cw2lppzzy/VPrG8sNgGyJIHMJbNTqeWKHY55JWXJRC/HafOw6LjkT37q/v3M4hggRSoE2WhqNtAVmNp0BCF3XtcZYoBFUQGqEgNQKI0gQsUIQDJ3AEghWaqYTyXY7NFpBBBOuDsfiYGTHOeGSakgQhMAaC62FAEKIIYIEaw1dl75PQbtVa9V8gbyT4UhzNTmZEIdBgGrtuqN5tr83vVZOp+X82iY7c9q4tWlZAjcGnte69OiHHHbj5deOxtsXzq6Oe+OPf+jTB6NDoJ2ZAje2RvMN0ET+yYPe5HCMAQy6zXE2cSxp1SIIJeZm5+VXn/zUFxfPP5Mf38pHJ2GzJhKe9dPAg/3pdqe7Np1lYlZwIbamorW2LgHNtTGJnCXm3DMfbzbnhynxO7RJR7o4tkCp/rTipefC4e5dmGbIb7bb6O7Nd/1a8/xDm+NJWjtzuszxTCHYcDcXTjmq4Fu3CnfYfWQtU9H1N57X5dHG+Ucf7O31946Xzq8e9u6fWqgvt2IPTetRtnH+8Vt3i4aRL269tbi+vvHoR7/1J39+6fL50c7dzfpkUqVP/cozz3/3hbml+TxN82p6OBqUaGIECNuOv7D+b3/v+Otvk0JHlBAEUKPVDGsepa7Uwe4BvPanX3nh1WttWGosK8mtVhAADKGxFgALEYQEMGg7Td/zpTX2+GSQZKMgqJ8c9nziHdxJLNYXH96c9IXIVZmdzC028spMucys6Ky6vlPl6fZ8ozk4USisN7vh6OS4HnkJMK3Q5SO5EoKNTrx/ZJ+/W9WD0Y9+aMHFHFGNHUmB4Q5OecB5fX8wmDuzrLMkjt35xVYHnQz77/FEzy+c5uMc66JMCopItxUC4ARt38T1RDJUgai5hLy5IF4uS5GWVf9opxRSs4hPR3Z6hHRGQBWEACDuMACtM6sUrypjLIXQJRBgubASrXeazdDV8/VJf+pj7ESunRVA6mqWGMUdzyJoqiyphgmJurjQ017W6iw2XScbDdbOdksYV5BqH3q1sNe7BRz3L0lFZamHo+n8ynxA8eHu/kaH/s3Pnv/db2wNRpISiBFCSGlpKEFUg+HhDc+rGa7LUTLZ3/mbf6X7X//fnqW+Vw3Bi39+eGevZIzOxiVW5dJG69S5c4Ov/aCxsEGdvEYj1wuTyUnU8M61m8QU45MHjbarddaK5wDgjU64uNlyAIFcFYXujXOEqFQRYXh/7367Tj3muyzA2IESUgh81wmoA6Udzyar68uj0Y7HpNLV+pn1Kbe9rZM6glvvbrfn5+NGzIJwMhVtr77oeGUl7r29rwGgYefa7YHamSBj/PowS2eWKOiIIpc8yxrzcxxAjnHYbEEnIkG4P+wvB3CWpdDAvExq7fNHkx4lUuPCaVSdeZblZmVxAUlik3eBPGyvLUAXDoeH2lpLtMqG42zmNc4HLLAuI54PUNBtTcVE5DNw5/oJ9cOLZ2Krhsaq+aUIABE3IhTZLB07CMT1lURQAkw1S3tHe2tn1pXgmlcAMwOsKEsFLDTIJ9hvhIT6kk9810MAJsnEr3eY6yGirIBC5aJypUFRbUEbM50eAc1DgzbWF/snW6889+9mJ0euS5QAc4sdYigjIfZ85ncH+wfMkdl0WOUHS6c2Ns6cmg56WvGToxRYVfM9SDCAJvC8osglKJOyqLhwjKt07kWeyY3gsihKqyzPyrw68rFCDFWZqdebrp+m4wlGDi80AkZrjgwiGDoeg4gSrGejURyeMnSBeggqYc3YrzXynFdZgaFXj2LgECErBYFDcTtw8mwaRm6t7snURzTGsgBjXhyNF+bc7upyMkmb9Q6tUYV/2IDGuShLIbWO45ARYg0EABIDaoHvR74ympfCaMVchiihOLTAWmsJo6IsZ5LztGSOY0XlBmG91TJS+JHbDBpSGwNgp1XLC1GV0lprLQIG6FxLLjWEoyprNVrGwwQ5yWzqMIdgVgjBXDIeDTCmnsekFBDBuW7jZDSBEFtgqiw1VndbrSRNy7wClillVaUJxlYDjCgjDsVIGSWqirlE8BIBE8e+w2q9ft9jruc5k2KWVlVelAwTB+ByWjk+ms76VhvC6PF4LK0MY88qLMoqjOJKFOl0GodO4DlSFWFArMFcSQAgF6WrFYRIcGUMYBa03dh1fUagFEJL6TiutcZ1fClVKSpkgYG6xuKyKglitbjeH/dqcV0JWRRFpYQfeAw7SZ4QyqwxhDhCCm2VNQZjBAHUUgmuEIJKaoc4hBJkree6QoqqKpnjBA41CDKDk7SsKmW4yURFiInCsB3Vk5xb4gSh44denhWEOEZZjDRAOGCuh13KUCeOZLP14GiQSjHjPNNVe67muYAiJI0hFJVV5TlOsx0XpvI9aoU2hmitlah8zyeQEIaM1IQRYKEBZmmuleRCauUEwSyZGYAwZdOstMpKVRGEAAAQE6uUi4mydn+WztISWcQcapTiFoBClFkFEcDQ8KJAFsZuCLXNpynFiBrrYVKrudrRFlpZaS5kFLqV0lEYyTIFRhNKILSu4xguMGEuw37gKqDfp+D8hUgcjl798+8vLcw98oGf7J3vv/vu4Vd+7d8tALP33huf/8XPGn30F9/4vdHxMXXx4sr89Xfe3jm4ebiz64bBgzsnPMeNVoN7Yb1bG++eKEPLDKYT6bbwZHfYbbGmbwm3NQyB5kEzANZRAGTFZHN90Wssv3rrzp13tspUEhCEIeku0iee9HV5Ixcna4tnsOtOC6upGwY0K/LcKCcM33r7m0fpjmbB0kr7wf3eRz6zdvcbL83bU4P+yVf/6LkzDz95ePd+ZsEHr27ODseP/9hH/uTXv3aV1B67dPWd9AYIYZ5VZSGrXFSVjAOPQnvv9tv/4v61y8sLa2HAMMmkYohWEhRJ5gSeLrL52K83g3hsn3j0Ulivfu83v/yN3/+u5clP/OznJIhXazHx2RSaKKxPer2GTe+8/sbF8wsXN1q8/2B1eeXkzTfLPP4v/u4H/+SPbo9uJCZZffbyT96rbx08//X1U/z52/cqY1vEme800ZhDBRaXOp1mfTjY8Xzm+1YMxqNZWoHW9Xfx45/6246y99/4qp292QrNxtNzJzsDQFwKIZdGWRl6Ng5CoIyc5ePtk6ixASq+++BmFJHh7rFQ9PzjH1EC3HmwZ6nM+ycS4OZCa+Xs+s71t6bpSTUbn34o2NxofODDl0Ex2353PO6Hf+vn/+mXvvRvXjn+l34rcsKaLAWteZWYbT3/ja139x/0ko35+kb7XHN11Dp0a5ksLOQVGQyrYYIh341WW7A2ZwAFIkeOC0F1fOOtl75xG82vlqoQFhLG6pGLET4+yeLu2c1zi8X45mw61p60gA+TzK83XSeQMgkpXl2rXzzfcYjDmo2d41l3LtRUXjg1x+/lkmWLIY0g3Ti7vnL63Ds3DtPSfOATTzWvkZvffUWurTz+xQ8/eDAdzMpaXMdg4iM+7J28T0GzEToQBXX3cf+SNlowEDbblsIqraI4YBSkVW6pdVwXSUEorndrw9lUGmUlhJhKDYEQBupRMvRpaIxRmgKLgUVSSIQghqAqUm1tPQpEwjOVB8qvZrnByhIgtIEY+jVfAZNXcjwcA2Q11gDaMHCNAdNi2m3MZWk/r6hDMCaUMa/icibxcuf03tHNuWadQO0HnlDKKoit8R0Pm/ctuiYrCgRZVmRSmcpKo4DHGHUwgAgQZIiuVG6sJE6IiRu4cf/4qEawBcpaHvhEVAIgiyAeJ1PfD0SluUQhpmWehRFdaLfTWcbL0qNxxCgjtIBgOBvOd+bTfIYxQRRRx3E9J0vLMPIunr0AUAkwshBgRrUFQRBgxAajnufgqBZ6rg+pO9k92r3xVuRiBDxgYamUkdIatb97eGpSAOQoaY/3etOj4yxN5jc2BHUurm7ev/HmNJkW2ZGcCNejn/rw5f2Dw/PnzofNxVlpd/r9/kmfsmAwnNVb9UcfeoT6LAcOAmxpqZkkkx987xVvrZMVs0a7xih2IJxNptOT9Ghvv8xZvXvx8HjcnF+IW+3QMdkkO3Vms7O8bGj94qOP7G7dpAy5XkeJcHGjAY33r//5737O72JhTJqNKvmjP/b4t5+7Oxzce/ZT5x7s9b78tbfmmvGTF0898aGPXTm/9J/+3W+eXaCXrjzcaC4sL6wmiWx2l47vb5UJX1huJ6U6Or4ftRrYaaaz4/piPT0irL4cNFeSbMK52Ty1crB7L53NAuy4fgMavNZpVLk5GE1MaZhvlMEHu9vQQkgk1IQQ92S/XznEYwDZH54NVjfWirRqLLaw70NIOPQRsO7coo2CdDZmFmLGtFEAKayYrUqEFRdVnicIeBngENnA9wTA17dPoIKAOhhp4BW6hA5GrYbTZGB2sGPKalZMxCxTCvCsYlSHvq10iTxfqAoans76VeEi2hHaRI5HlS0MwpRxVVI3sA5zcY1Liwny/UBh4OBQSIgqa1Oxdu7MI1cv79y5+/VvfEdRhAiQGtzd5e89yAMXayCf+dBKPV7oj685BBtjjLQWImUsVwpBJIUuuUDIMo8aAwDCmCKrrQIKWouAIRRhjIU0jusYqaVUyII49Ai1HjCf/uQnqpPRWtM/Ocy2795ZX12+dOGpa2+9rezgvTeuX7h4mlBXTBPLQj9sFirNp4myhe/7RcK0YmkmPcZcAuYX2rHn7jzYc0wY0sad668WBi7OL5kqfffm1unHHu+ubEJTvPjWm8uNJrLQcWOHj3vyhXoUIIKO798MsQrD1nDrPjPacakTuJ/5zOdcFr/16nNalASjdhwbhaHiYaPzwptvP3T5wtWHH6otdoZpmRrtS2QMXV1aufX2i0Ax7CIjaTKpItcfTaZK6nYcqKqqCrVzaMaMGMzunoy1hZWw3Yh6AFVKTaea10nxoBdfbM7FKA7Z5qmz/ePe6krrcJygrCpzi4W9uzOebzeVFYNhYiwUwmgjCYZaGyOFVBJqgBHiM45t2Zpz45gEjus4WAPrO8hnjKiiEaFTm3RxIVhqK6PHxMFxq1UVlVBCQW93m3/7ueI7N4rx1NOWSikgQlJrCKFWBhhNGXNc7z8n0JQxMg5j0GCu48ZRA6X5rExOPXx2aaF1cO0+EmWrUQOCa+K0O51+fwK4Ojjc3Vibrzcay/PLTV3MLbkeqr34g+9ffPg8mNt86pEfP9h6M+3dT3o3rzz15PA4qab55OS4s7BUD6Pe9uGkXwCrW7XaZPeOcea9Wnt4dCQybvMy8iilHi6jybhMppMoJs1OPNbx1atPfO/rf9qs+17sxPUGjecHBSqNmo0Hga+CoBHXSgdHlZSTaRJ6IPCKu/e2N5aeMK16f1BNekUhS6FncpyX1tRaDunfKQ72yt5wOpk8/KkfuXftAQwd49YO9rb82Fs83aYRfepHPp5mhg/HBztbejrdO7n56R/76cNrryQ2/8jnP/u1P/gukvr+9Zfdc2D5rKx3dOm8fPXT07Tqp8PMVbBMTalAUdV/6xv6B7vTo0mIdUApsUIbhQBy5porSZohbL/9ra/QEHSjQOSqlBya9990GGDt+z/mAbBGqGaEit7+sCSFxQuL7SCKJ6NpvR5iLjUBNPDUNLN5XiYJBu6op90gbrUcU3Egy6PDSZKLI2e6sfkI0Nbz6tPxHVsVnU598/SCzOj9+wdrIXS63vawfGMrP7tszi/ywMmyyQlmtVq4gHBwkOWMqZPt2wpHQWhscU+AnkMw9VtFIZCxQigOwEKnaXJnoR1Chpqd9sn+DMg0sdaJulzmWZkbiCIPe0RrmI6yXSMnBBsBJEI0TXKIEdAWqip0PMYc6kEfW+jZKxfO80EvPR7WG37YCAZb7zEGqmQMMZmeZNjivEiojxvNsLU4LyUJfN+T1fbgwUKzXaXDwI8sbkDilWAce+5jHzx7v5cA8M7/XyrCDDAPLK4tQolkWfkx21iTjs9+7fffm6WaYSQktFo7vhvEYVbysO0stKLp0VG3IX/5lz/sepnG2kqYVOaV723Nz2FtbdiIs0Ldu7HtiPS157/2uR8/nR0MIIopw5A5SjFISH1pI+pq8qdvJ2nW9Vtu6AKOKyExgdgLunH7ZHh8/fa7nfZcw2VBSMqy8n2XYnc2nVUGxV47drx8NGIeoS5wo6Y3gVs3jwaD1DB35yht+PipC/MBQxAUWqQXHltPd44Ptu8VvJxlmkRexYyq48bKcnEySMbjNM28iO4+2FlaX2vUlx2/wViQ5YN8ksWx7wU0dIFWPBXFuXNnkumk2a7lorSyBFgbVIoS0hIIqRAqGLZz3VaRTVr+oo+YzE8szhEnZYkbEcuqoZK6yDhg5OLpzsG9yeL86s7O8PUXv+fpJcvL+eVo4+z63ZuzJogpRsrxGU2w63sxQxan/YQ53nSa+SE1vFQapmXFHDeZHc/XluvE5RWqhd2DYU5DCq1ZXDxVSQeYyuoiYAYYhIG1Li4oyPrDIKDMgYOTESMoaNWtxtQwaUVa8Wa9AXRkcUBpmFs6d+a8LMcs9l0mjnZ2gu1roV9DEOXFkPk11wu8riUOcn02HQxEldUagdUozTNthMsCY0HsszKR9W7t1ZvTh55YY41g1FOjivz8j/wY1IOkPKg5obEYMUIoZk4kRUIZnFUzDzlLS2snvdSvj/1mg6qFMrdOvQPItFS7SCEslbBQKVlVY4xjaGdVkQrfdZta7ldGGGJxPuwtLrRcP8+SzPWiNBMBBaX6YRpTKgWM8V1HlNxg4Xp+FDllwYVQWgqLiBQKIMgwIYQAoKUxVV4KUUWhKyudFzIK62E9bsQBwk6VZ5qbWVYSh2JCeVkBY1wXeS7Lcm6sqdfcPANcGGRokRohBKYKaKtKO0hGkmtANPO8qtI5LzDAAELOp0IbazTBBECqtRllqagEIdgqY4QMHNcYGzgMUswFV9BBGDkOwxhnBRdSAoym+VgBmJRqWpQYUaishSgTUmEIKC2FMBBqg41A2mprbEVBMk2xISeDiesQCLHR8KA36HY6BABuTFpUBGHPCTFBeZFDAGRVuozGDV8BKyseuC5gjrXAAmOt0VpLIR3CIKYlL3klIEIGTB3KyrzkUhBIrZJKIiCFg4iSyhqrDWCMaakQxBDTsiyKVDBGAER+4IuygtogApTiBMEoCIzWGhjBNUYQa+0QmMyERZhABLT0HRsEnuJASVWkBbSQcyGtoA4hxGfEhyaTusISYg2XAwKQk0vBHDbNCyjhcDCp1UMvIAChwHEJUgFGLoaVtUobihHCLgDWGIOhxQRQ7GRFDoCcazVLOVQcJGmlDPJdt1KqKDkmTANjlIYWAqi1tZSgLBUV1wQigMHJeAossgQgC6X4YZuvkjrNqyD0iYEQoFotRKRwEEh4bjGhDoMMBYGjlYIQlhVnAApZAQs9z/WYZwygDg0DfzoeAYz/82JkTZHUsCoL8I//j//tL/38Tx63dTOWenDtgx+KDh98+82/uOZ4TiN2BifVzmjPKuXHyLUKe3qZtEXAKI4nE54OB6HRAhE3Y34UxF3/hO/OUdloloUULqC+r0aTHnTdUxcXXRb37528c+vuwf54eaMRNtqNRvjkh85v7d3M5d40mc6ttyYZ96qexkDhIlPlSBzCBgWIlMWoUUcjmFHHYH/y45/6268PWj/46gMF1O/82y//vf9Tq8EGvbt7h1pdPf/UG3/eH711PD1/rRz2YSqwcc5uLvlHk+lAoMDDGiiogjA46qXX1V4ee5dXVvZHQwGMG3srZH4yTbBjwxAk4+nKhdXvfOO5h554OA46bCq2dk/+6Ne/euXJp69+4oOzlPOpiDph88z6wetbo0zno+zNr75RzcjTX/Af/cDVcZ5vbj6UHF1nuf6f/tn/pORJd24Wu+4XfvHJ8htbN24fMc/HqqJWNiUhwpVG+DV/UuQu1fdPjiaFPfv4F37x6jO7/QevfO03w2J8+XwkdVEdjrPhjCCwtBRj6Ka5ElWiIFk8vSorlUyrC5eu7ty6k4+nzeaygLlx/GD+MpydDLP7bizdsDPp9fjk7u67B7OjviXxxmOPIVTqqpodoACDRx4+7TQ2s8G1K5caN96xoU/m54Od63eS4UHsCxR2TkaTX/57f7/TgK9+81+sX4rvb3tkO7Wl1ELxCvzG/+t3/8dfeyYb9512F4E6cBoaIAzgcHS8cmntKENaKmNAmQmdaeYxcQz+46+9+Sv/7UcYpWyOTmfjWsMXOZukepxVvZMjaOvYlS9873e1mqi87PrwQx/e3EsHH/3sY/bx2s3Xxl/81NXxcLxw/kNzi6uIOmkyrUpz5urZOcpJxdO7225Fn3xk+WQw5LNBDpxZkv9wFkid5WWtHVhrkJF+QC+cW7x3e8dziEsQBKoVOcwOG6360V5CkYOAE7oNxojgHGgQe94sn0pdGMslcATnWipKWRS200q4DrZGGCUpdSlhXtMXCYmC+WR8hzgorMe8yHkhtDaUOFmp0rRwAgcg4gS+KCoEoevUHOJwAKwylMUlL5UtscVQoZX5tarYl6oMCWLEMYZrWTjQt9JaSoCBinNlpTHaAuv5oRAlgMYCiDBRRoiycpjDvEZejAMWcZGqnNfjuufSosizrIDAMdIS7DA3KBQJa0tF3q+KxLcOddhxvz8XYOKRSHoWaQ11WmXEc6OomSXUgSGhOfZYmlfQWCNVpge5Pmm1GwACjDGwGhjTqMeTcc9nEDFSQVPmCbRYJ1OCgeGVT5FVWJWKYFtrBKXlR4c7GxsbaDbZ29/DZeEDhwk03r779p13Ci2V0DJnvhsFPuMVQAD90de+3Ruc5NN+e77VaLbrHGKvNtda2Di/0t/fjrE5f3n98O6BC+0z51a5roK1BZ86w539M0vLS53u0f29tjMvkTVktnp6XVT2qDcoCFo7dW44nJpxWmGLGu7qmc0713af/PRHRrPx/vaJ4QUB/uDB1ke+8Il4nr740h05mpZH/Y1LV8bHo+PtvbWF+vB4+Es/++k/+6NvPL7yhdNrS48/tfjm6+9cevR0GEWDpKC4RaOl4aA3mxbDSQ5g1Wy0JpPJeL+/vvksbeQKwoCKk3RQzsR0hGbTSRTVXccxnGXTSbO5Lqmwsqo3l7e3D9qthc1zYZrMdnZvB8TrNBc2z57FKPj2V3/HZz8MIydZ1VroEARODo7TPPOjlirLPK+YHyx4rsz4YFTE3YVQo6P7d2bDQ4AxxEaVhYMBANqBjBkjSpxpQy1yHCSM8b2aJhYKHVgiRrkXOMY4VmgNJXbrToyksYqghuvO0mlZlp7nQ0DKMsFu5cb1kNV4KZTh2HeXu8uTWeLnsNtpEsGkUsZgoXBWCmMNZ3bp/ErQCZSaHI12aEwC10+y0kDteb4seS408GunnvrCb/zLf8ZcxAthrKWMUQiNtlJIBCzyMHOoVrLIKgSgEpXr+ZQFtsqV4AAAq6GxhlFKCFbvV+VRPBllZy/MLXWj5577xtra2arATIHVes2H5s1XX6jKNKTkmaeflZyXPJtfmsdQW5BClHoBL4qBBokQ2ag38phhTQJYJbgYDYXnL60tzNf9VcVn/XH/+ddeW98488wnn+WuV2tHxbBseE0u5WwynV/1cSuImuFstL91+zazZK69MskAxKJQSbfZTPoCEizw7JGLl45O7k+HE1KDtbl2O3a3H+xcffwqcoO5uTWhBbSyWWvtvvlOOpvuvvMOEJi5rqzL7kZtdaGjZou7968x7JaTWQsbpx6eZOpkWEFUUGh8HweO6caIVLnTde/pilA06E/7LXt5dZU50FSpg2Cd+aCUroGlhLOZxkYbnfFKCG6sNhQCKyUEOAgoANZ1XGuQ51DfMR96avnq1fbFy912jGe9PgTQ9RGlAMIcgMrztNQCY0i8BaUQIoxZYhDEQM/PZ5cfXtr8xv7v/tn4cAANwkpbAxAl1ACAHGohadZ/WHfTaoWuR2Z5+sSV1d7uMUE6arOHLlxMpBgeD0shgrrjrywe3T7m2UG3VW84OOUy8qiwdpyW1sVamtxIMh8lt5DrBt967uXvv/PmL/3cL19+aOk73zo5KRQvBAGaZ3l/9wFcXBSqMsAW5YxCYdI0Sd5deWhzcTnKxhNdKKVgbqGVBvDpwnI9LfnuYVY/cz6qdRYWmlV5nKSQeMhWMw+TkFSt5TooeF4pf3mNSi0Gg67Hhvf3jvjJM5//3L0Xn1dKHc9KFHn1Zs0CWvDEQ46WaHY0mB4cQeNEi+e/963vdFa6uS7bnVp/75BRf5yjhWa9trC599Z1H6iN1fU3e2/hmn73rW+O9g4LqI7v7Z6uLx5vvRs2KEQT6RelHgBiwubUjMDlZVCWdluCf/d18M5NMUtjAz0GDEKWIQM8GtVqDz329J1bB8eHYxdWrgelUfl0hgjTRhMEgcXWQoSsBcpCACxCFnS8wKTVRBAURMfHUyVyYExojGtMLipMYVSUGMhaN2i2OqIskzRP5IPYi2fTkueyyk0+KRC/t7nE9rYPeOWGXtcEYG+cY5d15oI6YB+sed9792hW8C//4M7nHg4fWgsoYIi5VZbs7Qy2tlJoGobUhjjfWJpvdAGZpCKbMtcPWo0EKkIjCqs8y6xFgDCgzbg3sIBCKDDGEog0GwdRdzbu84yrqlTVFKUTKCrIiJYmVYZQRIgtEYqdOGCsKGZL9WbAoASyd/u9EDoIoMMHu1xWzI2UwkgTbcnq6VPFeDbNBoM0D2sRglZBo5ApILEk3N4+bBJdzWaNpbZANj8ZG+MtXzj9+uFbf8lV1GzXO91mWWmoQK3dlLLIJ+Pzp9z/yy999H/5jefzQhGCDMbamlKW2upB75jDoZmdfPnLv9xaPpZaMCdodhmolwiDouJz67XIJczSZKIE9P6Xf/1nH/7cPyy58AIT1mvQYbOJlEZx6VBJNk6tb2+Lp5566Gg4UqJKpYmiupRkOOk7PnNooBHkCmQDpTQFoIh84QUUI8Lz1PBCKc4L4QGCaeyFqL2w/Pb117VTIsOqSs2mJcOZU3MIpv3pcHrYg7aqL4Xlcfrg1gM/CAOfTEZDM8shokG9bqT0nRAYn88wg+x4NGo0m6Ffl1MbLobxkjc+2tPAGxwOb2/v/OjFz3moHB6V83NNIQvOJcK0GYSj8R4iOJ2mEuQapZS6hMpWd7kc5jlsFTwDtIzbUZaVaZZpbbgW723fWppf+NBDH+sfH3z8o198941vFSepNq7lMBfGrTfmz88nU4nVVPAMonxa9HG8sji3Ojo84Uog5lsLa3WKESskZ17UG58Q6helXlg47dJoNhzHgTua7SCGPRebohyODqi/jGxNylmSHkIcc+5Sx5OWdFbX80mi7fBkOGGO32o1x9MkjFnsRxUy7W5siR5mQng1GDUcRHAliddNZ7LmUt9naT4d9E8gY2Jc5VwjSB2/Nh1zXfGKzrwGOt472jvkijhlohtBd09np+bhePhO7DOtFCOh60RSKakYAjLNjqkbhX6j4GUYei6tCuKlpXRZrBVyWYhRAKFMxkOBHGhIMs56dAfhsNaIB5zv7wypW8u4qjfn+/snaT7uLrTGvYkXEr/W3r/3XuQHP/zYBY2xtuQCU2wQskoPxwmXWkqhs8T3Y0xQFMciq7QSBujReEoA9mL/8GCfYg8RDAGAhEqCp9NZxIKizIqCuwiGkRvFwWw6o75rjM7GXHAgpUQEUYrSSpTVSAo+v9QKaiEvFTcSYeD6bmW0IdBlrMy4EgJBD2JEKMkKLqVkFOpSUcwwgUYaYwCAhhDEZYUB1NboqvSDQFuTZWUuleu5SilZKMooBAZa6FCal9xa6wUBxmg6mziUBqHPrdbWAGwJYbO0UkYbo5CDHY9S1xNFRdyQK1xJIYBuNuppVmVFxRjkWoXMcXxPaGmMch3KkKOVARgbrYWUFEKlFX7fKEwwgIYwKrWUVvqUSakxwoxii5xSFAZTRrGLWV7kFDOljTJaK4WBoYREccCNERATgLWxAEgIMZfSYQFEVghhrZVCYkKDIEDauItBwUUceFmeGWkDRoBHKii1NZgQQ6wB1CCQapmrAiMINDRSIWNdSHyHNB0KCAyxRRZIbpQyFeehX6OYAAygMVmhtIIQQs91rDGVENpqiLBStlK5UkoZq2dFzQ88B6RcAAGFNqWUfhxOkrSoJMHY8x2MYFVxqQQEyFqUlhWAgLpYVFpV2vMo872qkJhaB1MnxFHsO76TD9MsFQ3fLHTCANDD4QxYaCFIZiVBwCe+lmWn08nKhBEHAqCMRBBYY7KiMoAYrd6n4GB7gCOUpbPJQH/6J/7exmNf/NI3/9UFQy9emrt7/f7+g5P5bg0SsNidTyfCGGqgQYjXHWykDmsdFjTjbieshZOdHccFflR758aDN999MN0ebH7Q/p3/3ae+9UffeeiDZ1579f7xceEGPovn7r4rR+OeZpMrn91YPnY3Vy5fuvLR0XQgiwfzLs+1a0fNDPiOdw6KEwnH8XyVJAdJPmz6m9bEmPqWpStrLN8vHzr3hRe/Bov7Fyw/aV3wOx387//jb/3KTzx15lRz4yO/8J3/8MJS4H/hZ//ro9mtjQ99fqTHo+H9T37xsT//ra/gepBMyyoTgJHBcIoZVjG+BdJGnYbL84c7PW5BDlEmgJWysx5NZ+re7snP/50fc5DefpGXhfrMF5594flrr7z25pmnHo6bc5bkWucH714bPtj6q7/4s//sn/zbkY72j07OlDVknF4i3t3CH/ncx1/6/eeOekc94Z755JW57rkHb7/1hSc+Ve7+qVHlSy8fO7Hz2StPeLUVw8yDgzHFSijt1lcj18v301d/8E8ImVxdp8JEuSgtxO14ed5bqTDibmO+tQyHk2k2jkKGHHc2mT7Y3q1vb9d82qy5+ewY+M76xScdkz7/vd/ePL0sfYaj9sHeraXNhq6sBHLj0nnhtW4eErFtTm8+88ZLX6ovRLA8+u1f/Yf1U0trly7gAj54b9/z4t7u7PWbB3/tr//iw09fIJa+9/af39q90eWrWaGktoLL0GdC0LduF2+/CZdXF4haNpghCA1ABKS93k5lQZJWjkMDQtJUugx3uzWu5daN4b276Pzj9YwPLYUsdDQkh9sjPfSqvP7ss5/Z23rj+Zfv1xthKVQnAHNh6C82b7984DET1d2dOz3P766frhWznFB09vz6/femQIDlc5fzk0FnaT27cZ+ORisxGC8HJ4PJ8sb8fw4jI+L5lgbYAY0IH+6P7tx4gBDEBGVpQSkmHhMlnk0n0iADdTWZUuZD6pZlFvphxatp2eu05rD1EXIoo1wKLnKbWIRgKbm12gvD4WSclKN6o+G16vsnD9zAlUZmeU6MLbOKUqgs4FJrqL3Yz7kmmClb+dQFgo+nM4oDIcCskspKjzrEc32Frr33LeYYB2NRVQQyAKul9spgNJNCCG0QwghoSjDUhGskFcLUY9AYpfMy94iHCJRSAjKNXUfLmUymhGIAmcIuQjCMmkpzwpjRIMm5tSQvZ0qLWj2wCEJG/XpTcOa7rka8EmUQO6tLCw+2dsJ6k0StWzdeme9SKz0visqyaLRCw+XB3k4UhACgNM8C3wfa5Ol0NhtTRAEAiwvL+zsH1CGblzuH28aHTv9oELpRw2UUyrOnlm48uLu/897Z9c7J1rtYgYtXLw17aV5l8+vNg51xrdMihNXDaNRPCl615tprp1duvbV39ZErkzJ36/WnP3T16//2Nx/cubnZ9Gr++VFZSZ7MdnY77SBuR+Pe8b07PezH7kK9PreccjE8STYf+QhPir37t3a33llerG3ffXeaJJoF7aWHIG1N+0lS5HxytNqstQK/GtzHGkFjHvvAU1/7ytfaTczVVCi1ujSXZ9mzH7py0E8vPnL5tWsHHvMxy779jRcee+L8vXs3Gy1vcDhWMnzvzt4nP/vR7a0ZxcRj6olPXn7+W39+6dEPH++lhNjaypmwtjQpSW84bjZVr3/oh9SPgqyopHSJU3f9FomjWjiudJoM+rVazXPmlheI0aU1BTLZow9f5Gm+e+e+mx2/9+Devb23P/eJz78PAfWioLFQZieZxI2Fh5aXLva2rt+7+8rmKR9RNTiczlJsqZeMj0VuNa1bAADBNPAowXHDxwqWhaysMAK62OHaWooIYz4yUgFtAHBcEIR8NgmDbllqRQJlgbFqnCcOEr7nERoSjHGIdc5FkbXanbrbHfKBcVRvlkDDtbKzQmX7I0sb7YU2gRmfToHVK2vz7x3fXHnkCS/qfPNP/+ygNzTYq1JbVgBinGsluFIWMWb/11/7ZybPtLEQQoIxABZAiBGyWkOGXM9rt5rD0QAjabWlyNHaKCwIxshhEABgtNWWYqyEtNbwSrDId0N2+dKpK2fPHm5Pomb7jZffWJlbQMgojBxEa/UFD2MniPKiLCcQeyTNB9PRESY6im2eDcezw/5wUmkXGH8xWn3koceQ00KQYhdrpf1ozl9YbiydV62zaxsrDp9IWSbD/f7OnizLDFRh3MDMmYyT2c6LnmMn+fjCubNbe/c0Yk9/+AO9/V47jPI0n1Yy9PH8ciuooYOjLHbjVBha8Hqrpgh8/vWbVx51lufq77748l1ZmqQkXthoL2lP3rl772rnca1hpU1zYf7Mw+fazfp4sFtMir1ebrkNfXepFWopWh3fY3DUG4btxsWrl5977Xj/ZHRqfen82Rr13OODfskdodGD/eOTccmljiMXQSCEyoV2HBISgksJgAHGugyfvrDqxYwStnt3aIvZpcvRX/+ZqLNQGXvL9QFZVFWJeJkT5oV1D1kALDOGEOZYADG2WpQWEAhdbRJWc5pN9dnP+eMk/b2vplkREcyAQUobB7PQwZKXupi+TwEEUBmMANu7fwgryzou9JUVUGfZ7Dj1HGfW5/uD7Vpcay/N79x9rwRs9eJTDSewkFoDADGf/eTHfv83vjS/uBCwelGJL/zoZ/70D7/9G//y//3jP/Gp02tnZ8aE3TUgiqOja9SFqjcFFvEk75fjT/+1v3P02ptpf29n/zh0EUV4Z3c3jJpLcbt33PNDOt84VdhBK6rvH9+5dU2Xk+l4Nm3FzajO7r/58vpyW6jMY47ORK8/CZoUWC2LIvQchXxLvVv7g3hpsX//TtCp1ZoNwSV19Pzami6qVM5Szr0zS4c76bh3PNch8w0YhHFQ85wkb25uoLEwWXX/pVf29w6ffOLhe7f3GAmIJSKDQX2VYHvv1hgN1fD4xic/94G1S25a3WUxKnmSpL42Z954L//Xf/zgjaP6IKGOwgFhCEgElZDKQEYcqxx+5fGL3NLt/duEaKWE1gZYgDVHCBttrNWEQqU1ssBaDDGEQD779NkowMd7E5vp4yRZX6y3O/HKmY2jew/iTru73ABVQWBMuwvdzsrB9begKU4OhqDuZFVeC6KwFry+O46J3j0+9EMaePUSIOYCYtQkTcL5ei2gwMoPP7q4tZ8cHpF3T0R/mMyHtNNGNHAxDRAyRlvfJQ0KrJkkk14Detr61jBRlhBj5AbJpB/FHYzrQjjQCAVhBQPDleQJYJyFQQlTWZb9o71sPGbUBAAWWQWwO5Ug1ebU0jwGI4xKj7g+Vp1GFHsCY0ckBbYOoFhbgHDokihsNIC12NrRYMjtDNCCSwGpl2ay3o3khCsgZ4nIOY19T5hxnlc06VlI6z4I6uTf/fs/+Nqrvb8kFT382OMAEtdl+bj0QWiJEFUJK3H6tPO//7uf+Nf//i/6I00Aksrm09x1CXRhkZcXPerZ7dHJg/rGw7pksyH917/2/bnuotLlNFGjo4QYjKhHaHi0P337nf3HLyxwWSEDgDEuowBhn7a0lX6t47snSuZGV5YQt+bXgrn9kwPXBWHkGwilMgBBpEHDbwCgRmnuChi5XpmrFBmXYuI5GiBeKqucOJ67eOb8OJsWVp89taBSPuF2w+9k/Ww4GTKIWODRINo52SoAkFV18cJmxaIbr9/OJkm95VuKmgtr0wlphzHyWtVsiCgG1SFR2XIjGKk8EzvnLm7apHfpspbpX6SmpkNWaaF05AcR1n1g+/WwNFa6IVOZ0rbixbgWtdOhr6q63zijUIYZEtiMkhOZpy6CCwu1wUmSzgbPvTQ8mcKXbv5pJPMoChdX6t311XQykgBlooRA+C492t6xhFgIZalmQ4MtDqjNKoEcCKzDqwwCJPLSZXFa5YHTrCqPuF63PVcWt13HwR7pj6eAVwHyq0mqiOMEhhdFLWg7jXUIinQ6PFIaAHzuqWeuvf18Pt3ptJ16w0NIFEmqLJLCKKs2z16FrM3LqVSi1ln0vHY5O3II279/DFzPCVoSKyFVlue+6zq8zjQWKOmdHC/MexCD3slM6tSx3nF/NLew1J6r7W+NBSqhRLTWYDS0oIKWURf7mADCcgU5Lz1kxskgXF3waUvNplpBCOD80uYsFXjiIa6UVYhSSA0wQghuSz6rRBAwJW2/P8KUGmXioCEbk9FgOEx6RlbN6IdSEbAIAoMRarWaAJlkWiYVpwh4DnW9UHBJMcRWKlGUReX6ru8507wYDrjv1rUwnU4YBqQoS0hBp9EcD0dJkgSRBzBIk7TAblma2WhkgcEQCqkBQhqY0I+lmEht/XprMClHkxIY7TtUW6M0zEshq8pvtCnAQeQpoz3GrDbEasIIY6zISqkEYwEihnrID5i1gAtljIGEEoKyUmihMy5SrgxxMSSIIAQxsCqg2GdYCQA0VoUogPI8FwKihLbGzPJpHYfKAC4MJYS6KC/L0UwYo7u1RogIBAZRBkQee1gJIowF1riMWgiyonQdRjCCgHCeI4ighQghjznAAIgwxLAUqYs8ABCixGEuwsQYYyxAGAktDLQKQF5xaxmggGBSVZxQooxmjmONphgiymQhgQRllbsEOw5GCEKFtNbAWKENpRhTCJHRBiAAXEa05MgK3yVaWqiAUhUC1nMYQUACbSH0CI01nqnSQNloRZILaDRAkBIMAMhL6WKMMSFIOy7BmBltpFFFKXzmFaUU2hJiqaIMQ0IwwaTkylpkgXAcQiwASHuOU3CVTNOQegYBn7hJUdYo8QMkjTXaSGXKklutPUa1VgFlFS88FDsUEcehxLjMNY4TBNgIrYUMQ1cZ3elEguu5VtfxcO9oIrSt0sILPc/DyMKQMEqptJJSqpVGAACAXc9HFCRZmWV5o/ZDuzVx0CMfPIOZ/zt/cPPMlYfoXONf/dH/+q3Xv/KnX/tXq124/lCDIDYZyEfOnj85OpHGmU5mrTZpd9zxII87i1WpAVGpGrMGcKj1YvXIlY3V+c5Xv/+fNpyGOMk6bDnZTviouHzpys7hKDKN61s7rBY0m7rm+7VF3+ryxs03BZ847kiqBForqgmkiic9CiBlipFZlU09iEVSGhxDNxhwtXHpzMd/+q/s3LxEx7C2frhz8HJ/MKwvO5/72AoAu0VS1mx1+aGHq/6D9uXNL//q7/3kh366Fa/fff2VW9+9frF2Jq/VXp1cJ6wQFmueK2CM0FzCl99445nVsx/9/Ee/9+LrBAuv7gRhfXCUFJU5nvC3337lycvnBv0i8uJ774yIaSTD7P/z3/2rhx/54LM/9cR4cPSVf/PHWIrNC5tBZ/7i0x/f/oM/n994NqKzdsnHyYGY7v/Cf/XjztzccGca4fj47T0n777zzddWIzeVaj4Ip4V48c0b6Wj84c9+NCpSV8Pr792ygTXWSeQMi3637SDAMaZRrT04yAoOl1bObo8PlYX3tncUDq9efoqnaf+gJw3XEOSzSRBQURQkDjIZKAGtGp66dC7qzDconElcX2x6qHt878iPHcMF0HAhdBeffsz2j6+98FtvvfjWbJpf/fAjWc1+/tOfe/CtOxTJsGWa3bn17tl47mL24Ht/+Du/PhvtfPavXHlwp8iTKgrZOBNSQ6VhPrF/46f+4z//1R/52F+ZM8A11iJoBR+89RdvIVtrtutpLsqyBAgBDLVSqpJEet/47bcvPvK01YNpfuiTMMvB7FiAnvYrcPf716QVj//o6Ua7+eTPfLRM+e/+j7+d7cG5MXvs6cbS2kIQNbXy8kJLGLs4EIlxkR3PZG3lsXt3vjL6/veW5xcfXN+Z2+jsbp9MyixeqL1PQZEJx3EtwrnQ+f40yyrfr7k+hVpRRhhDAAKMkBQFcQJRlL4TTkVeWeExyPkUQri8vM4lRohOx0ee62EEC1641MMYcWMJolmeBEEMZJkOi6oy2Miy4GEUup7Ly8J1XQCktUAqMDqZtrqdKiu8MGhEkdKIuhpj4jBkhFJCGIUAJFWlDSGUMFWJRtNPVOYGAVR+kheFKBDyHBxryyHCFENljJQJwRppQgjh2mrr1lqbx5PDzaXFYe+etCkAqlVvKC2MtqKqtLUQe1JL6ofEwGyWMIdqKaqicv3AGIMACiktsnyScay0ITaZTss0dT0fFOW0PFhaXwJylkxL3wtVCVIgsLXr82cZpAoq5rmMsnE5cC1p17qlFGUyzvsJrrQu84O7IyKtg+BCq1mr1w+OTnwf9w/vby7OTVJoZ5mHNK1F0/HBuD9xHEcTv9aOls6uWVjPejlPC6xVjvoRsDHoZwdZ+9RVa+xzX/6T8+c2fvynnnzuy3+WjBcvP3nqeOdoMuo9uHfwwY9dDQJ79vQc0tWsN1i+sCHS2XytVnKJGTr78LmVpThLxkHsL51a7Y2zu+++9dCTH6u164MbN5fnO/duXovjDtN+txuLIu0Ndn/h735h3BtN9kZAIZnI2ubC0urywddeyCazQhSNmvsTn7w6ORolMXMCUijw8Ad/5Fg/aFNVpl4NZCt1b+fBWzNY27nX+/TH18tkNJ2V85fO6UQKPeusU1AoXXFvrg11PumdrC0sH47G1K8pnZ4c3PGIDWgUeK1imotiXGt4veGgGM8gkkbKw513z14895Enn/zwsx+NowYA/x4AEIb+aG8PMeB0Vy99+DPltGRlLRj4/SRJ0rS/M9DW6Z1MqR8iYCUneZGXhTDW+r7ngxoy3Lgs8IjWxiiEnAARNh3sYYgpBcxH8VxcXzo9GQ6orDZqc1t722WpszzV2qu021paznJdVHzcH7rAq4ebx7vTRvckTcai4s3AYZHjBOHN/bSqxGw8bu8OVhpBO/amk9H1m1uOCKe3Dm8cv3O0P4CuK6oibDUcY6EWszShFrTq7YXVtTvvXgcYORgJA4EFFkCjjTUGYUQwFspoqQj103TmuMShRCqltLAAYooxhBBQCAlzSJqmSuoo8iw0EABViM7KYlRrl6koplmtQaPmmtHIWtJYbvMs1wb5JAjrtUr3udZaAEZ9RpTMZ7oU1gCv0ShmAcS1pcWLB8d9rTSFxGrrU2981M/LaQ2hpja93b0gZFQJB4UHB8c5KroLII7rSpk2q5XZ9OIjT0VNp15Vxye74+lQaj2cjoy2aZFYXNq9PgbYp3ChHRalksUEaolVudGNhtu3T96TROnAiZqnFloLrZ0He4TpZz78UNRoEmryXLhhY23tEVGMW7WOoyejsZHKmqriRdVqBHIqmnM1Ax3oetNJ7+ySsxx3Gt3u5YtrO3uHy6un+sdZvzdWQFlE3dCNPGK4wYGVVs8tNqHGg0HCsGGOs7m2WBQZkFJWMmD41Gb36lUWR5XvUkRriGHHRU6leWqNkabihDEYxBBGBnoQaoA5rmaIS4ALXuXSOJLQ9mr9c58Hr9988O4tKaArjXUp8iD4qU9/7JsvfA9b+P4syGa54+vV0+u9vS3J5NpTV/TgYO/63SItPGT8tfU+BK3zjw57o6gVYo7y3qgcjQ/uHwTtdkjocJS8+L3rzflzS6dWOalfu31r45zTXVpEJh+N+2fObXTmG9t3BiwILnzkqSIrJvtHvu+mlVBoIeURbi5KsAWtPd4/XF5edqJmZUAljVOPpea9w2PGLGCwGaLxye5cJzYmAwAEjZCx+GR3uypGtXotO54W0mASp8NZa+Xh9vnHQGvS6PqvfudrwFMUM8RlenIUOBhA7dTnprOZwhCEdVzznDlkVZbbYjyaABN84+s3F9YWP/OJS7NiG8mJa1CnOccxRa0akthKXeucNQ4Z9XcZKmAzW3/ENpbul9mu3zKcwu0d+we/Ce7fTU4OxNCslUrWiLFIGqsshBZByAhwsNJyIfBXO9ErMAO44spoJRimCCGjDYZAKIWRNcYi8MM2Zm2KUzF8dLEjRJpG3kSyqUKu12Q06g1y4sSNeuS7jt+OxgkvC7nz3ns+jZBDWoFenF+4szUGVkJcaSrv9LfWI/Dpq09853s3YUjPdc7CvNoe39aqDkqnnM3yqmwq29xcIgSpIrm7vz9/9mJWTgvx/6Xqv79+zRKrTuzk8+Rvfr9vfm++de+tnLq6q6NaoQVKKCChESCQkIYZDPaMZS8bi4HF2LDsmTFmNDMgMlILkDSSWt1KHVSdqrqrqivfnN6cvvnJz8n+oQqP+SP2Oufss/dn28311UnGL5x5tix2W/a4baGqlbZQCWVnyjIPtc+0wgF2BhseRuFitgtBnHTXF42T1hMl3H+YzrJbUM+xLhOOIkoJtNzHwgEP4bysEh88ceHy3e3bi3lTqQZDiizgvhv0QiU19z0AmKmscwghxj0uiwX1KWRAB1R4oUV0kas7D25cunppZWPp4FQeNGOCge8Da6B1woi6mqv37o2/9U4+O6X/iVUETEiRFwdtH+pieqitJNT4YSLr/Nql8Jf+2kf/4a++cjpSnAFHkDPAFiDQ6C/8xOXWWay95bySsOavfO7o5NBL1sJuPIAYHuU3tNOmMZRjQPzf+P23nnvseZlmEFqTN4T7wFpbi1Y/WdvsXn0UziZzwv0wiUcns+Pju0brRsEG80apIPCjKATOCVFAYDGwSdzGGHqAOATb7ZaUNYAol5ZHUdAOn2gnJwc7t+/vSlloYQM/Mk7wAKKG+klU56Oduyf9XkvqWZkWe7fuxMvLWIuQOpEX1mPz9MRXMKreubgKf/5vdMJWhiVotfvYO1TOCBh7wUTJqsHi5he+mHRenLkNsWiwzi3OCU5nk/tRPMTYUWyTNsOwxCEG0gbUawhtt5u9w3s4W1iCiHGEcGVU3PXDbqSEIpS0TzVFBEOYN+LG9vWtw0srndX5ovSxQU6npeRBaI1bWz4PAJ0vpiqfhQwaQI8OT5eG3X4U2FpBSjutiMbIgCjubcyLObKlFjkkTogyDAPACHT5PDt1kMWwO+hdFGUDcd4048BzjRJCwZ173x62YqvB/v7rXthaXTuPbS/2OsYAbRxqpBFTaCstF37oV9kUQzedZ4a3o9650c51gTKP0FZIGSUeB4o4U4ON9bNajWngkn6ctJfmx4ezRf3DH3uySW8xTyMAGwcyUenyJPA7Lb+dZqMkanW6nbxIvcGySCfKtEwppVWEMJ/jxWyGkU8A6Q1WhXN3d+6jRiCJXJ0v8tONjTM+CbImrYyVUq4OV5ZR/zvv3VSm7LQSIQQCwIs+eCRzRnvdtrNGSQEwcE5FiQ+cIxAL2ahaGe3qPDPKco8JIfKirhpx/+Dw8toFiC2AOMubom601gcHU4aIEDatC20tomCejdpxpBRgHqUBd04iAAihQClGqbDq4GTUT2LrjDaOIGCkqeuU+dyPgrLKCUOEe1rCrCitMVIqSqmzTmmJCc6bpi6lj1lWK2NkGHlagUaqKPagBQ5YxkiIIQIOQqiUBtZRApzDaVkBgCywUjaIMc6ZFIpSTxsRBjHQUBjdaKUsMBZb5YQwDsJKOWCqABPPZ92kRSBZ6gZlXTVKVnUDMaIIAujysgIYE4QQRMzztdZaS6U0QIACwlmIAeKMl3XBuecsKKsSQkwxElow7gXcU0ADB2rRGG0wocpIn/taS6EkhFwrRQgF1lKO0fu3POAANA7YuhIEMwCw50UOGl1rK6XUFgOADAg8YpCDAPrUE1phCIwyHDOhhXXWCE2NwZwqpbQzHidFLVKpLMT5vKSUtDohJgZS2mijIIgYZZRpq50zDkGEKLSgUVIq6XtBo6xSIGmFFDpiHcTOGYMs7MZtay31CQKIAUywa7Vbx5M0LSvpgKIwiuLFbBF4QUTwUtQDDjR10e/0g5jUdW0B8BAFPlYEYQCgQ9SjiOCZUWpcR8yDpjIQcOgIxELLQk87tJWWmmGCIMQEYUKrpjGVdggxz5Pqg9JBqx+Ox6eDlcHP/OwP6/xwdDD7w9/71vlz+umr3b0HDzshaKoZqtitd247EhqIlaTT06yczgmlAI5krfyomwsQB0x7wLVC5tnza8GfCT5ifXHr5QU1Qcyba2c37905HE+b6aSIE+/aJ4c+hfnRiDMfxn7nzPrsREtxkquat3hrLdI6AigpypmtZoWYSeMwx52EL/IcSf/pix8Z6uArv/LbB7svv3Dl+8b3XikWc0JrtTefYzG8unGasXw3LW49WNQzzeC1axfE6Du3XvnqMF4+03t6/+bezt72D/zZn/js7/0zaaE10DrXLOp2Emlj3915SIZL68v9+zfuAdtgS4xxCNq45YmSfeX3Xx/2W5AEFhCv0+m2YlzL9771zYtXY8DAtSceoUTnTt96sP2x7wJ/5c998q/+xI/92I9/5s/+6I+vtHqo0xVC79149eLWlQe3DtcurU1vbn/16y9fvLqOKEiQWT3bfrg/u3HrjjbpUjdWWT6f5rQTYKKDOGIdz4UMGYokJDDY2upZiQ+mB2U9jnkviVv3x+m796+j2hCCIEXxykqyssQpUHnl99qd5XNR5Oc789b6te658wff/rIwJ0k7NgqevXLu8OHdb/zxlz72n53teyRg5n/6x/9ge/vuEx/eSiJqhOjbZP76yWIvwzAcrJwZH46//+Mv/I//9/9Tk+9uPbqS5/TLv3N99cyF/np7quZt5Z2cKG2Ag7zDA4+v60xZj1CWOICBcb3N7lAwVESz8Ywy5qwxAJ3MCudAu93Ojk2ad1rrw7Q60lVuU3Bx65HXbs/PnDnPms5ivt0GfC1Jprfu/M//n9/0JFnZ2Hjhw4/F8bDXX+VBqCsXcggJARpoqbr9jh+F2en4zq2jjT586tnzhIS720fb23OUII0+4NZ1uxH3WZRQWeBUABqwRlvPQN0oAAxEgTJ6sSishso2hDKIgNYqpAFHJJ1P/CSyDnBMfMJd2IXAagT9KLHAIQKBsFrYqhBJK7QWirqB1Ea+jyium1qrBjPEfAwoNByiEOZ1QxDyPKpklc9yh9lgYy1fTJ0FHCOIOYBMWgCMddAyRrGlTmIh1XQ+cxZgjoIkampjXGOsFLrWDlnlfBpAhxHSUtaMex7GtZj4Hp5MD6wx7dYwLU8tJtyjRZ5SCpuqFlktrdNaQ4Cgc9aqyA8QijDBxtpFmhFEmB/5zKuKrJXEVWWVMdgSJWprnM9YLS2B6GjvOPATGEKMsFCqqVSYcC20KtNOO0IQprMFIayTDAAkcacdJPR0fPz4Jz+5f/M9U2bWVksdZpS0ym1uXrj52vZkoa2h3KfUowxBDMnppFxZW6tnrhKjjcEaUw3r6Fm+KGanNk8vbF159nt+5OGtb9kVOjp48PDOaHlt3QJyfJAq6ZDWMYtd6Z3beuTNu3eLeVbkCxYybOXGxqa1uJ4VCGhdV1IqwtvUC4ebrabexlDnVXE8eq/Pz/YGq1FnUGfl8cMDV9uZmiiVV41eiSmikbBQCnr73b0g7Gzf3z8ZpTyk/d5SWcPv/6m/9A9/+e8+9tiVl7/+OesocmZ74QLCUyyZsNCypaWzb7z0erh0SVZ51jrAFAlRG10j5XyPZqOx0lVTp7M5NFIjkaV1un5hNZ+eOoh54lfzdHf0oKN6VV60l3vGIRpEn/6xH5+NT09G44OTgyee+/AH0TofwVLF0SDZuACgnR89WBwdtjutoq7m41QBQDzkrGkN+ovZVAJZSa2clQ4WjXZ5kyBEMQwpAdjVTjeiQAoxiDyKjclWBkshK7O9dyK/r4HQdrwyYNMx6Ha6R+Pcb8fzosAk8DurBMSmrm/uni6vdabZXDMYJW0hjWj0KM0eHBUPH+4ZaROHex97VkX+vFTKMhy0bz98mFfSb4WzrElIf9jefHB8WxbVc598vhV2T44nu7s7wAFjgdHWaQcgtMAA55wDEEEljVbu0CzaZy5++lOfufvN12eLfeeMs9ZaQDDBiHDPM1qLplFKGQOcs15ACKNvvXWvLvTZzbV20l7f2PLjBCDSabe1UtAYhmnUGzRFPjm5n6W7VgoAWXv5zOjwlbgd8pSZycLzvJOdPVkMTrbvEMiCpO2kVg5AhDDCojF1pUSPteNeI44hiNY2V0XW7GUj6nk37twwgD731Ccff/ZH79+9VR/mtiGbK+eqxcIKd7qYKQshR1iAxekcYtrvL5cnM+4lo+MJj9DaRmc8HS3ycrC8jB2mlre7AWNgfb3vRb6CXt7oNgtabV8b06PRg+OddodvXb3Y7k6/9u0bE1VTiXTJoAKHd4+jgLFFVXncj5JJngMxzlZXddgPQd6THtA0QkSCldv7IyudNW5zqauMYBAaZIe9gFDkAE6rReBDDGAn7iax+5Efii884lE4hjx0EBsNINIo8AM/NKbWVVaZAimEw45VvjYMYYS82OFDBBYUJFrFztUOyrV1+tST4N4DqaXxGHZOQ26Ppg/avSChH5wFvaWloNcXVe4q2R7Et9+7sUWZUfqJ735x773XF9mU+nGn1wO8e7J7m0m5fGZpfLK/0mnNZqPx5EhJjUWzvroGhWwTfoSD+dHR+XNLYcujFFx/d+esRbYmEKDOWtecnAy3VvIs664NHjlzoRrtH2zvEEjmVfmJP/uxV7/xTtyPOp14//72+cubVZ0GIVVCjg5PW2trS+tDaupL/bWj/ZPD3YP25gC6ur3iswDnOdnfub/26JPd7sWGLE8KJ22djxcB9KYnxz7B0EJCSdYs4jhalDkIwqjVwcIRppdWULdNKdHlYXb33vHt/Xl04fzN195pMQKpE1UlZHPr7btbVy64MApDb3Q6q2aGA7C0vNxo+OHnroXdhcqgKjBu2fXe8htvl6ogJSqlqzExAEJAgAMYQAIcsABYByCARwdHf//v/52ilBFlsrGcc/f+WB2wxmpGMUJAawmdBQ4jpyJW/R//yqeRTrdPRqb0KdefePpCh2FMLDJgaTiUZXO6V0KoIALMdwxDP2HBchhnZFKcsJAKKau5ogYIwCTiD08X/aXlMGTiZDoeLaoaJ3HY7Q7f2T0dT/NF2iRrCGIXYh0EwXQyo57f6rbPLZ/9ytdvHB2913a7SYSTkCwk9JM+RhjoBkOMeET8oJruhMxZ0DjfxyishagsUWyQ5fjtuzuM571QtkNSZAuOIw1BEITIonRWCmEeTBbMg0r5BGNhNSKB0tKUlgGNMGqqCuMGaOkcFk2tZS3qomlUVpid/XkDuBR5QIjQ9N52Op3Kvh9c6PlB2GxsditVmIlkNNjeL373lQfbY6Od/59YRUsrQ1HRohDINX4nJEHc7UBdFqf7Y+TE408G//UvPPOP/vnrhyPJCEOYyEo1QHXOCpQIXdS0dr3h0uHpXWnpbH4qKgKZU1TGQafOa6sdgtHnf/1rv/zXPuZFfpHPGG5Tr2Vk7kwNpTJNzT0YtVpF4bDUpshq20StfoSwUc40sLaQQsd43GSz4dIAElTlM+JTSjiwOCuq9+syhHgWwHpRM7+ztGYa1Yh8QpOgv7UsoMWcuJB5g1jYWT0VACTbd+5baKqseL6/PmAmSPC1x8++dWuPsewTH6ef+cFnACrCPnUWOgeavAYQUidjjiGRRXbkxd6f+b7HX3m5Kpu9KPCoB/dG19txy2/FiCBZVVWTOWR9xHn4obXzz7/17T/tDtz+8e3QT5pCijTHYSCtUg431rRaoTMpck0SI+eAH4fM8ChAX/vjL3z3Rz/dH24CaJu8yBap1tBZ5LQXBS21OG3HtE7H81kNLSjdUdzvOISCOFg0J5D7GIGymQexvzjeYxjVopZq6odxaRtMOfPY6sa108PbAGuhpMr2HbBGNRAQj3pQNFlTJF2U+OF0sldyjXXHGz4SB91pbjnhYpFGbWSZobgYHRw7ZU8PT9rDa9yrZFFg6sJWwOJhU+eyqSVqlobL8+k2AGw8yQmxRborG22tee7pa3X9hxAKXUurnMcKU6aUGGkb5qwo0alU2OKIBw72g9aWds4pWZZjUaW6KZFCHAWQm5YfNFVrcrJv24QTtLzUcbba2z9YWV2z0FRNBWWRT6ZKod7qllPN2mbscZTnH7CKCEbWmLyorXVSCmecbjTDDgYehEjqyuO+taDSpkgLgFBRNcChYdSjBEWxn5c1BJB4fJoWldBxEClgsM90KaEjrXaPQUQ5bPlRWmZNJRmhVVZaIzzfh8D1u5GHsdY6bvsYAI0dAMQP/TQvlbR5JcpCcc8HDhFMuO8hC4HRIfN4FChoHGR1qazVlHgCUWWlI8Q4bKzGEBoHAQBlWYael8QeRpAgaJRB1iFGNDY+CSrpikL4jKRZ7oCBmDTaKm0cAMBoYw33SCOhgzBTeqkVIKmb2hijLYOOAEpILSSwaJrWHqecUQ0Ass44AwGUWlrtPihoIUgQ1to64KQSECJgnLYSI4oJpQA57JwxGCBhFIDAQmABoJBYB4QSCAAMsbXQQQCBCTxqtNFKA+Aa4QiBEBmPMWehM0YZY6FFCBKKHXAeoxhAComDWtaS+TzmRBvbOM2Qxzhv3h9yJlwa15QCYDSqZFrZWjaUUYywcs4WVRKHEJO0FshYa4FPSVOXHmUMUWgtANABSImHEQVQMwKJg8ZZowwGuC4rjBiwFhjtDNRGRCFGztVVUeaZM4BTGnNfCbEy6BhjfJ8i4HxOO7yvjJbaeB5hvlfUAiLreYwwWpVSVMoACCCuKss5ss5RirXSRiqfv9+YCaA0QlTKmMAPIoogcNA6grBzjtEPlg5WnulgFGdp88iTm832aPfm7aFL0UTvP9g+GS9caVYHYZme4kCy0LOwGfTbxXHpa6QqzFt0dWVYVbq92ptOTstcC1k5DRZWcn9oPK+92a6nexqPZiM9OjEHi/qZCx0cgbJcFHXGuaMM39u70UdRL+rPphNLDAAqiSJdQyvrxqOiFlLJQjvqtCcyZ1B/uDG5652Moxb57ixAl597enL8SkXL/kp89sVPvvuH35mVYv3CY29++U/tlOKkNypktzd8/d991p5Oo9Xo9luvMt5Zv7D1wovf9eobXzo4OOq1vUXdZHkhhKxymSLwR69858WzF3/kz33v7/zeF7SzedV4ke+1vFffePf/8J/95T/+rd/dutwjfb+QcmXQ3n5v+8KV9YsfujZK59/12FPH711vFuNf/NG/Gshl2RydCZf+9A++81u/+eZf/ys/tLmFj3dn85KcHr6WnqYPlEHHh898/FmDNIGW8NbxbJL03JVnLj24fgQrZ8uiLspWf5ngenk5VjpaLGZA6qg3VNyPvGD7wcOos7oZLjlhMNdbZ3rjo4mHkt7KKpAazGdH199k51eD7oBTGKIG2cqBNuEXpgvM/fVqtH3x2pndu3sGCeqRc1tX0v3R9ne++NbLXy7m87gTHR5XToJ5VXxq+JxN3clpunpmkFx9/JHvJzu339Aq6y8Nd+/OFQet1WA+LU6PxpQgHwoAIaJwPMm4rJ947gWSCAWEA1Q74gVnn3/hEy+/9oWi1MuDRAFXN8LnLE0LSqlVSmXjVz/3lQ//JKbOQSJyWcAigBx86qefP7N6pkqPTt56+a2v38dnzj1/+dm2zy4+f+bqc4+9+27WNAYAVcyqIGhzRIWSGLOw7el05iXsu37gI6/90R9nR/PJcWGsjAJP+/zBzgdYa+tUFIWjw6M6LSGyq+vL1aLADjnCrGwQoMA6Z1EQDbIqa4wrVOZ7ITJynM4o8WtliZFCTmvmcT6gEDbpCcUUOteUmTOUINZKIoDqsBUAhBDHoiqBto2RlDIAiNROZLUoDALkzp39Dz3zFKVUNqYqK6uKwWAdO6xrhSBGmHfanSzLKDYWWux0UzZHi6wzaDutKeN5VSnHEIRWFBDjgPlCAwe1AdZZ65REwEiRBUEIbBN2VvJJSZxTrjG2tlpb0Ernc0xIkERSGmwBBAhAwBmra1GhhmDaVBJjGPBWGPppWQQeCMIQIggMKrNGhdnaUnexqPJFKqtmqbt+s7xzcfio4qXHyGyWIQjb3aXFdJ5nOTMcQuuFUXtpzYi6zho/jKGHI7/Le/0nls+89bnfD4ixGCjK20vd00XRHvSXNs+dWIGokY2M49b20fGFFz/h+bFN81pMT9Mdw81gsCyazHB06Xs/tLtXf/PVl4kr2gx5rNse9kBbzCqspeyuLFlyemF9c/Jgez467EUIt1hZcpkXVNYZ3jUI+siniEEPDbe2kD8YbR8wnzohP/dr/+wv//zPffjRq3s7h5WB3/v4U7PtA6GRoo6GPovWfv/3vjpcylsD7He91ZX17Zv3/YjVNL+4HD/79NXNM9HDO9f/w+c++8jzHyKo2bxy8ZWvvdlrgysvPHnzq284XQ+vXvK68XBriKqmsxzv3rweWB4gL5VF2dRSE+eCeVpvnF3X0h3PDy9fusSMLacjjJtsoQklR3m12vEfvfzk6egkTJbqXCyvrYz2d+2wczLJjHGPXHqkyT8gdiX9DooShMNxY06PJ05KYMzpySTsh4zFCjYQquF6EsS+bTCyDA1WHHQnoxQQBhAF1DPQFnkjjSsrGQS8qWqIoMbQOe9wgUNN42B1/7B0nKGF4A5i3JqMJwSzRZ6f39yYF7k1YPncxfn4gFULGkXYaqU0QARaV9f4m6/fPSmU1ppzGCATRcX6GS4N3xmZV957kzD/OM2UMxjTT33/DzxxcfXv/eO32nGw2e+8+frbJ+NUNaYVBXlVMQaNcgAB6Gxd15RRxoiWFgGolZ0dHLSfePrv/e1f/pV//g9v3L6LIaKcIkSk1M5WxuiqrKwxAEOlXCdKfM/TtXp4K4voyvKg7XCsBZyenKJllCR0fnIILSxnM6VLUc0SvzOuF6Wans5cFMP5fF4Xc2Nq6g9f+Pjzq+2tQphWBJ1uikLUjQRYKuK81QERXp0Lk5Z7+0dpjbce0+2VqED24sbadv7mqMxP9m6dP3vl0iNrsijHwDpQmqYEBB2d7iTdIWdtFwamTilhdZNvcA4ZT8JWXpeTw2mHR/EwSVY6lWMUhQ5bPgjCXuf+g4etXrK8skSMyqQOGfHicBJ3/E70cH/fCnp5c2NpIJ1F0pm8NJR3wla41iIO6oPReFqWveWViQLs3HNUTl5772td7gMlA799Zosd7Z1QjxfSJpy2fK/I02Ev6bQiIZQw1YWrZ3fvnGZZ+ZGPnVmJbyUsMg4CZwAmxikEcwQMoJgwivCSUSQ7fbvTT73kQ5VKnCHQaidSUd/y/EcgaddFbTn0e/6HP9z70pfn9cgihzSEi8a88dZDrG1B1AcJ07Lsn7k031/YvF7klbe2YQKvESyfT5XQdV7r45TSb4SPf2rrqadvf/1lRPBjL1578K3rzWKvE1PP9fdH+eAC37hw4ca/++OV1eEzH7n4rZe+sb614RC3raQpzPqjT8zn6SydYRpAn3FhZrNsfv8uQPCFD1949U8WZZa/c31cF9xKWzu5fv48hma+GGsGV1bWeNKyfkSQVy3qdqeVj+8mne56Eo+KALqyFFUdkqc/+sTp/gl1BVlD+mT88Y88/ce/9R+aKtu6kIh08fDWw+WNLS9gvt+WhBkXKk0gFVWdUkh9vw1tfebRnuaDP/nWAaXwqcvrdx7sWW6++c7ewWl64cpji5sFrtNFNkMGnRzOOp3+O3E9XtzrRTs//7PngIOWay9QoSq2LoBXv1NrYoWRkBLgsIUQWeicRQhhiIRQnGIlQKYajDHQCjprLaQAS6eg1QhzAACwgBJsHAQYISl/7NNPUNdUzm2ev/Lmq7uPnd2IWrwpMoaNEWaak2KhoVBRyFstWhdF3G87I8pKQx4P/KjfaU1PUp3VlGGRif2mQWCeaDRoRYvJaTavSJA0kO6OJ+efe/qcprfeeThtqlLkrY2VmKBFnUckXIz0iZjYIDge7QwGObRYW48jQmjX97EpR6VWzkpL+ySI6mLsh4PecC0TjvpBBHrMW500mcTKNPnCCKhggmjZaGVUqTSElDgwSJLRtGqE6nEPa2MAMKlhCFNKulEMbKOME5UI/JhSpIGWdQmAqkVz97A5mRrHTFW7OCLKkZ3b4ysb7acuhX6kk4gzFtZ1hjV0ABXaTUvbWKhM9Z9YRUJTx0Li+R6KqnwhKoBlW9aaBj1kdZUurpyP/ttf+t6/+ytf3t/TBCOC4MWL3sWPRw/u3vFwtyqHv/wPf/fLf3jotbsONLWsmCO93vlWdzCSN0FjDY7iqPtbv//yX/iLjxAKgHUQIYCJQ24+mh3uTNIq4S1qrDzcPynrOujES0tn5WLhlPA8ttxfr8s5hnBzY+3weI/7eDjseUnQpA2CTBqYiZmHKYKIEZb0W01eW4U9iMNWp8oFbLIkiYq6CYAc3bvnjFC1yOa5cSBVZirs4Uvf6UXswvnu7Z3FxUef+Ku/+Jnd27/p9ZiWXpNzKXDgA0g5jqjUI12Om4VF5EwAH/3n//RGZ+15mhxqJ8rcxPE6dI6yAcAce2MPVe2Ix3Bje7F07+5BJ4wZXyCCLbLSKAeJ54fAWo+0A8qAbSgEVV4B5yPg8lkmGh1Fwfr6+VvXb5obb/tJsDroIsZoK1ICZyUgnsUBFWYxLhf7k9OkdcZqNi4dQpKHqNteOjg9TTqgrk/We+u1VwBHIr5eN3Hg94py21rea/WK/KTXikQ9hRQ1AkR+z8J53MInpwcEJxAzBIJalEGLzaqjOt2zjPmW4WiISUAbfTp6KMQIQo0JBVgL3ZTVHBdjFicW6GlhlpJ245RzplF4npXWKt3U2TjloMe4N1fppTMrED4oxSxqJX5gqloHESUIQmAA1MZp65BVFkh9+t6r7c6gw5YJ8asKZLWL2i3m4bu37ntB38OUIkFQvdT345jP5tUw6CPI+v3QCyIjpufPxtrkXoCrE7HmL03L7bYHZSWV+oDSoo1RZbVYZNaBIPIhIo0QhPOskrWojXajvEAYe5SZRkfteLgR5otqZX3t9r3dSYE8ij1KAug7SFpt32jNA88haGoga+l5OPCotlpJoZQgFEFCQ0qVtEHEOcJ1UyFjPUICQiimlW2quhFSRWGcuoo6XtcCGMUI8gJPCGGcCXwOEZhls0675ROe6tz3ubKWAFeIKuBeI+ow8kPOZ7PcZ5TjgBIIMeSccEKqrPY9vzZGaEMpU1YiAMpGMB4YXWttjTaQglYYu6YmGEMIEHQGgEmaIV37wHVabcSAxdBa3QpjB6G1JjJESFUoSQnijAHgAACNaJwDjGEEkLXWvs8vtYBiTCitmxoRAqBzAAMElFIIIqMNwRRhgCltlDTGFWWe+CHhHCJrrauEYIQAQDEhAALnjAMAIsw41soYbZU0ACJCEQAAQwgRDjwupLRaEwAMI7XWCDjoHES4aHJCiTaWYEwIxgRCgN5/uOZGsYQDBxAiVSOg1kobAIBQEhtXGCswRAhRDHNREgClU3HkQ+Ma1TCMGMNKN5iSWhZt1mc8yusy5NxZhCkuhcAIWgUbYwhlmCHjYMQxYF7ic6UEpURIpbRx0BnjkKHKQYu1RQATLJSplaGEcoJrIa3WBDorZEAophQQqJzjHkcILcpcW0MowZgY4BrZAAeSJIIIuQg5595XQapVEjBlquNb3y4fjrLFpOP5o5O6zGAtwmmurVskMRZmaivooHfn3gFI9TDprWxdufXKe9Ltbl0ZRNpEEccBn54sEKRxt8WTXi0hrFB1WG1eHfz2t+9P6uBjP3jlqQ8H928dwUYXcxutxW3i7U0OBb6/9vFLhzerzlqbQ8NIPJnvx8wjNM6NrqT1Oh2dTyTJW/0uA+rxay+0zJbHgovcOzh5ae3Z+tkf//Bv//Y39/Nifw56ayRt9i8+/ni1z1vtZSBGYnYceyw808fQARrgZHXj/COvvfP2x1548XP//jeLQmGfBrGnjNVaMY9rC1959z0j0ief2Lp1Y38yTyOHinmjEH793VvLWxsn0yllrZPjMQfLnY2ls09funP/JF5aYZ2Nm6ff7nZ4fjRW1emrX/92KWSr7WNuv3rz2z/6/J89ePs2i0M2DISYndyve22M/UgrMTrNRmPZXn+60rN3x2ysW22OoohcWT13OC5CbIWqwmAp9ltlJRYaNSPVxFN/yU98nKeVVjA7HUcDk3DPD7pUc8wQ7Hp11VjoDzYfH22/wykiALVXe7rVgz7ywg9pgI8e3q1no8Wo9nAstWUQXLzSeenze8ZAn8RJJx5urswKMKJ5FPMnXzhfZ/mdN77iAf9//Re/zXlrXplF40iLxFfW9ETTXvrpD19cH175Sz/zOUJjSJEfh/e2D846jXzOvE1t4oZEF1788X9w4UMfe/EXz569KCRtGtlUNYRYa2eUwpjeuJGuT/oeg0HkKSle//bhsPPx3Z3ZztT73c/96k9+ZKu0zQsvPJEEW5M7+7OKv/PWxI87upGzRQUAbrVIu9s63j9AiBojrZSuqj76Q585OT64u3Mix/ONa1vDdX9S6bAd/EeryM5nZZE3TiPgcJFVTurCGuAs0EqBHBNICa9V5YBxzmGInQG1ltZiq5DWddRKrDEYsqKukJFWG6cs4xxYFHvx8eTA91nAPWuRNsBDfnfQGo/2Az+MglDICgBQzEUUBAQRFtLOoJ3v7oUB8zeWjg4nhcz8hIW0NT2ZAGh1U1GLAFAGSA2sdYry2EDqoJFSAIQsNBAzoQywyljqIC2UptABZEIPA+2EAQAghNF4esoglY7O0oyzQBkNCTl76drx4bbUBmDbasdlXVVV6YdDjiiAwAACvFBr24AGOxCEkTFWVtJgyHnwyJUzX3n5DxDcpLgddXzgA2VmX3/3Tz/23KdO5/kimyV+YIybzSbAoTCKLcJh3NXlQopGiwZTKCuJFKa8vZgvTo5PQ+5TrCezIuy1isb2BrGXziez/d6gs5gde61g/ckLrz24u1w1/d76YjoPA4YYt9R/uCtRSZcGCfRbyVLkCMBhXxK/v/E8MDY9fC9eC3RtZ/O5Zd3dqYiGw9nxdj9eSWK2tDRMli4+vP4NHLNkMIwo3793a76YWuppN4fGDnoBv3IuX+Tz0/18WnSjgWjM3Tu3SSWX1nrztAQaMsg+/Wc+yXWuNZ6d5r46+M6bL6+e61+7dsnVuErrG68/wJrt7k2Wl0MeaK50l/NSNLsP5klvo7+1dLA3XqW0vbr+9d/+3Z//3h98971sutj9wz9+OWwlG2sXrz723MnhJIk0UHOs9dm19b2D230WS7t46Zvf+ZEf/q+TeAN3h+XswVuv/Hbc6iPSqpvFy2/c7ocWLPC00IP+WrSyVRX5BzcigWyDkE/63YgAc398aK1cW1lHnl8xIM3e8moCVD0/2W7KmmAuG4sAX1m+ytodIOeH2ztWOsKK0gDuhTYVABHZmBglvc7qgwX0S9MKRZYuKq7ObF3yHLh/95bVtpX4TVZvHx56YVwv5mL+ps/Rcj9UpugvdfQkM4h7S9279+9vjzPqe4xRSiFg5KV3tu+P5kSz42lqpV5a7lVVlVWSBuTdW1/65svzkFLG6Ne+8prVFjjoeaypBIIsCGMLgFLKKMkBgcBSwoBRzumAMSXF537vP8jFNMuFNhY7YK0BDjpnq0oYayBEBLkwYEuDTn+1r+vm6vNb+Yk4e255ZWuzM1xJj09AmdqmVWDMOGmqLEsz58R4dNhfagmTlqIg9cAldDqrmJ8Mlr0y91c6K5PJhAwHHBBrkHJAAbPaW1LQzvOGkbYW+OKjL9y+/96knqsjh7DHg/joeGdltf/Iyvq73777tc/9i7PXLnbbK37AjnYeCrWQ1LSGTIsRkPro4bHPvMahnckeo51rL7yIqDKLrN3x6MnsG996fZjXcbdXqEVeV+CBZgBFUYIUCoO2qqewllWdleUi7HlFmStLw7jdCLHMbFkVGgKnhcejuLcctvz72zeWV4dxx1CaLAp6tX8pH6Fnv+uHzm6yfHTvy3/4Js5VwkgS8243XlqJABaP9TdMI4MgJJQURT46zg6OstNR/plPDD2GqzwPuz1pNKIWMGIttYBiJ6DFgMQY81a3UGIb1vvA27AaI9hFwbKzb0s1QthjPpNWG+eGK2Sjb0cjoyx2GABECmECioaDLri1CwBw1umyJtgOz/S3t08ufejTJzfvslAfPBw7i/qXtrJxPR9PEicLqVfObdlq/vCtW5gpiMXjTz27fahbz/gm4hOEzz7+2I0b3/nOO/cE6Nx+9zT02draxtXnPvrG3YPOoNsmOB9NTuYnV154+sE7D8vZaH4yzsu53yZby8t1o7QzdaWttq2kjRzcPHulAHAsDAOknBa9wTCrZylKMfaO9w4PT75w7uIFZ+w3v/GnV1/82M6D6bI/nJyKC1c6SeB/4V/+ujAScIgwRIAsra/wiBFK8qboDDcqAYp86kcsYMzW2hlF/bAW2vfMz/yFj56/tPnOO/dfevP+4WxkBTWGvPTKTQPIZr/93ItP5ek8W8Cbp82tV4+bfBzC2fd+5pmts2EJJk4DlBQ/+Ve2vv72hFDGITEOGQMRwA5YCDBwwDmAALIGAggBQM45YACCEDkIIcAIY+SMVRjBRinGGLTIGvXMUvCjL17ZOdwVNZpncwBDn3eBK62Wa+eHVS6q2ZzzAPt4eSV2xICaIN8QxKRCGHkQQD8IyqBhsiGLBgI9qww6hpd7rf2jYykLP/JYy+8tdYarGwAY58jHVpekKW4/PLlzOJ5W8kNXr2ArmpHIa6koGG6dGaypupnZPMdKY1o7R6rGNrkEbFYLu7QS5AobK3QBGfE8nxcKv/fm1x5u71JQOVMTj1DqcYqpRxeLcV1XEDAP+brWEUUQkINRHlHgcSKMFlpjCoSW/Qi0Yg8zbJEByDgjRJP6nd7BTrafakhwtxNBVySxFy8Pdg9GeV5u33uwtdYVRZGnjREachiGhBIogfFC1on9ByeT/80qwlA0eRN3I0qQpoGopRcNgUUhW3T6yc6DbVnVyyvwb/7cc//vf/LtgwPJnclN9d4t3O54cW/zd37n8Ct/cgRt6EyjbIUJLvMCyEOdzuo6N4IR5jSAn/2db/zgT3woQDkiWOlCiZIxDzAkAGxFYTuI8kXqJUGa11DT2WREAWiEjiO/AbnjRiBhStUfbGCqm0YqrYAGRjV+FHSiDnCgqUspNEa+Qq49HKbVbGfvdt8PoJBW6LWlzl61czKb8K7PO+TK1vmvvndbY4QIqgRMG3kqF1rO8Nfe/dLXvvgv/oefl8U+9T3hIocjQkoHT+pi15mCQYbCwXR+6ad/4XPvvDr5L/53j33v92zsHj5wgNpGC21I2MZeLItTrD2g1hqx1pTokx/51IO7f7QoDxjCGgLMYkyhFIVqRknYDZKtUhwlvFnAdJrLMGyjWiMrIZRSAkSVM2L74bFqMgQh4jwJW0LQg9mhETNIBICgtzL047aHmUOAR35peZ1y3n2q1+97rJ5PbsS4AhhkdeWTyMiY6k1GEkRtJfKqrEVlHQ7ayVJRnigj5oVFBjrrrFbZdG6NcL7yfdrpRxakVu1RRPf233PlAmCFAg+aZjafW2AAsWGXMg/kVZ7naRL1th98Jwh8HnaXhsv1Ykfpisd86fzWndnMZ3Ccqp/6yZ/m9LjClW+dxpTF1A/ZfJZ5PMR+K0xoVddeEM1O9hEFk/muULK9dJbGlCKeiklV7HsxDFheZsIi3m0xKQPPG25eOh9YVAslm1GZzz0vNE5gJnkbtWsLTT5ohxCoXFT+f8RaK2Oy0SyIIgvBoNOeFAWEZJ43eVpZCH3OZa2sA6zFHaTjaQXSyho7zo6NwxQj6nsAAKGds6istRISNro2liDoR347jkJOsXNCN8utVp5WeTFnQRgSX0mhreMYR52IACcrLaDCACZBiBAUso4DQig5lqXHPWN1XVVWOeUsIBg65PmJMWieToa9FqSgFho5PIh7xqiqKizjta0DTiB0zKMU4aISUjtLtHVQGW21xRbouoZaZbJMvGhWjAZJYq2FGDFCiFUGagKpUlopiQjkhE4zSSGDVK4kAUNYK1DWtVEq8JgxpmpE4HucIUJgXVcEcU6YcgYiopREABptKCEQIeCcdY5whiAFDkglNISMMGMMowhhLJWw1kAAIEGRn1DMOMaFKK1DgR8a4yphHdAeZwhR3SjlSF1JCCAljDDlHPA4K8vaaAOhsQw7qxEgnAV5ObPAEQNDyhpjMWUaGGEtotg5RbHvc2ik4IRqXXQwZ5SNp7N2GPVi38fQOLvkcWOMUIrwoJGGA6QBBABhiI0Foc+KvCYECSUYo8C60GtZ7YRqKEOQYAiI0Ir6PnA2k7W1hnGKua9kAwGgHGGgMMFKK4II9T2tBaYAYeQchAj6PrbWCOm0dYjawGeQgSoX/cTXVjlAilJoAgCAda04Y85oTrnPmXMaOkQxaXQjlSHvQ6LcBwW0sxc2D+/uSi2PTq7HvvuuH3ji2//rS7HXngG5vhaNs/pgIVC+6IXQjSeLzC8nFGv/wexocEe1hxt+t/f5L372L/7MJwBqB8xTiY88D2AiJXYSpqfzBPfe/ub9vWM7cw2Gdv9hFoacY7IcdZQVLrefePFTX/r6q3ffeJfRlq5hU+LI67fgWmB74+LAWKpwkvC+bppG2zjpPNjPuhet8Mtaqurk4PDGzcieho5cTZYFbT37t/6GVPPZ8UEjKpbgxeThxccuz0vR7qLNp64d3CuXule+/EffTqK1KE4un7nyrSTsdMPdUgNkRVaHPtMAdjxSNeTl93afOrM8WN+odVQXqmN0r9vbfvPOtctbFvJC8vWNy7Pp5MLjZ1rD5ZOTHAO8vbt/5cMvLq16f/obv/Hw3va8KPorYdjFiXONmn/pi3+0mYSZkG6O9XEjJiaDgVIwiMhC5oLyZ7/ruy8/dq4U8Ld/7dcPTt5oAWyZybWwBewWpskPuyFJ2PDSlRfK2fz+w+vcpwIRGiR5ncet/mpnqxqdYKHiFj083u9sBDxMZNOopoziIG63lCwBC4XI4+VNc7LrgwZx72A2DXgUrq+w1oX1S+c//0/+PWFquNrdOSx4sXLtuc88GE/vvHt/bUk5USKZzu7tv/xwxhUlERztlZU0vPS374iO7/7zv/UXzlx89PP/8vPQurpWBlpGyd/7P//KZ14cfPIzT2489QSjBFDOeMJZ/9/8q7/1X/3Sr/LkshGmqaTvIYxhu5c0VZ3VtHvp6be//jJdODSOq+P0+z/1ker4wb/97L8/l8TiRMDaQ0fNhOfDrQs0JvUiG/a5bGQcBtmsaYrJApWUYoqRzNPF8dFk/2g8Dt64MVlfph/7nide+YPXSBgcHoyWu9EHfIpFwWjDGESMYMKmi5xTTJkPHWwsANhDziDDtBEYkNDzrait1ZBgxkKjHAJYNJpgqJSSUnGMGaXWGgCM51FrBePM53FTVghaTmlRZLVAmPmQBVlVCSVareUOhk8/svLFb908nabTSY4xUdqOxmmlmj4UWuDa8xXFqjGEQYK10qoV+wTDk2rW7W5aKGtltdIAIiWkx5AzxllJKBGm7rZ7xLlFkWttbWMRREVTMIFouOwhvxILGiDRSCjxvYObTz/5XH9p63Q2FVIxG1srPRJVuXXQMI+lk1FvMDh77sLuaA9oq5oSEuBFzGpYKyOL/CMf+/6Du7dHo8P+WodSD+HgZ//8f9PIGkMYB6G1QBul0tonAcIoTxdQO45BPp0GYdBUwigZxXF18HBR7D129ZFa+nlWdFlHNE7mTZMuetwujve85X6rF5WNOzpeXDj/woXNC6OjURyFCIGqyBmLUdwBsoHWzY52L1x4+ta9B+H6mZpYSrFWJj95wE6bOBmsrZ2TUqeZQNxb3Tx3Oj0J2Bqg4N7N78Tddrx20Vi+e/92L1lvilxVGjEIgW2yUTqatWMkzWL57GpdG5cJAt0ky3FAGQsDHkz3D/xeMj85TpKl0MeT2cnP/o2f/Ff/9vPJvXktzca5VawVxm4upu++9vqnn3/81a+8vDTsLK+uL8by0uVrlbRLvWDv+lj5aHR6euf1lwee994r72xsPo1Wrl278Ngiy27f+Mb58yvQhKKYlNJ96NmP//Zvffa5j37/5tnnoZS62r9x/StlfhhzEiXtfnfrevrN4dl4te9rhb73iR/8/O99SQiI7AeUFmiRc4hRr+OHD2+8LatZ0mK+RbNSYuIcVGrhFdnsKK093iJQQK2kJlWRB+Jk0Iq5EALxUWZNuN4KzkmtrAkDP2o8kjNPNVNksxJTjMJBNxJF+XAy7XSW0yJrGkGIrYXAjDdZbiGoCcFON87bzoqk3Z7U8M23b967ucc5oQzpslmUJljpHo/LRoA2DtJGnL985fR03wKNnNOVmh7PoDYQwPl0HobcADgYtJxxJonitdWnHnv6+P6Dd66/l6smiiNRSyOlkRpR4KxjHCsNfvdPPo+BQRBYAK1ywApMMSQg8AIpBJa21YrObq0PVjekBkGIexfZ1vkzRrpFXrY7w16YTKrxxQsXMSQnB6JB2g+8JDlf1LPI73c6T5X5PD85NhKXVcp8vHf/aIbuP/7sCzgMlMCMB9oojJmsdRwl0gBnyWw82s1MK4qUb42xi+MDixAgctEKnzrzSEkcJG6UTvYOjx6/9hzw4GSaSiCTQcs5Mj0eOdBdunClakw83PrOe291ls+2QwiJP1+4IneD9dUXnv7wq699qUwFpB4J/FlhL1x9YqnfstgBAEAt6vnMaiDzUsznUJOikTHvHe4fNKpaW40HQQs4IprpqLD1tMl4IYRGfuv07v3tu//0+WevtrphPcW9/qPnLjeL/E06BE8/e0lJhQkS2mUFzOe1QTVELh/Pp9OFAfapa2tL/SZIiJcEDiCgJTIWAIORclAB2yDjGQcQDnDrAiq1yHcov4RIC5nKVVqMKUaChTUN+85o6+zKytKV8/n1u1poCpSz0JVG9Xu9H/rxH/qDr78FANjaGs6OjnzSlFW6vNZf5Hnn/HntBdZ1q8luelop5DVZOnn33e7ampxNlawobLorCaHrWlSgSc9cferOdNoI5Le8j7z4zI397XTseoHvB/ZwNAPfeaMaTU/ezb7nBz+l++shCCelSufp0b39TieYnRRxJ+r4bDotpDJKK9mIB/e314ftmERXnnzq+q33Wp3l9lror/a2hv7Dd187e2Xtxhv7COK97ZOVx66effEX4vWzS+cqN5nuv/O1S3Ve6VnkO+Tou7ceYgQ3V/uL2WJ6chr0AsRayEFGpMdhRAPMYkhKo3KIXSPzuBcvrbZu3jz69X/39ZzyrbPrxskntpY7S2uvvnd7NF38+u996XS6MMJoS4F2GChr4s9/Yfdv/hdLjBSyEQx7lzZIPxaTNIAIAOMgQNZaCAFCEALknMWIQAS11s669606SJC1TjuAMPW8Vq0yYCXGGEGEkTWy+tjjV4/Gxzbkw9WOd2qRDzFXDIPWSq8uMm1cuThYXl8/f2lzPju1jeoHXUQCqaHVUhnJGBWNMIY5QKpZVS8q54WzRSY7gccwRcw6N1kcD9d7ClgtnQbKKUMlPNvfqAVpZH14vOgmDBIa+KiczTBD2yOdhNHWUk+NHqp8BGyAsIcwOHxwEC+LDHgAU4OYboiyyenUmyt5dH/bk5JBhXxKMC5TGQREAxDxwAsoI3wyqxsjoUPWqHbgedgpoR0yhGFh9Pao1g31iOIESUiydAGwVTD89vWdw6mrBOHYjY5nDDhXkWHkjbCPcdMNsE+NN4iKXCOilFDNOFVpWTWNtDb6oGzw/0sVVYUq3cIUUkvCOszvWtcA7BtVbO/sStEQ4pfT8Zkh/tkfvvyPPrtjClMKOj9ZGe+wNx8e/eFLe8CwVpukRc59wngQIIYBEnVhpCYUE1xiDKfH1a3r+TOPtyyorbPWAoe8tJC7R+mZwRYAYD4vg4iubKxZZXjoQwgwJVY1x5PZUr/VbbW68fLJ8YgQSDBHmIuq8VvU2AYA5KypZe6xuK6VAzCdz2VVraz0l1aCpnSL00VZzQAy1jorgQEygxM/xJW0WmpOKedMKYGNDTg7ulP++89e/8W/fNnqsYt8SEVaHAGcaoUJ2hot2v/N/+sr7+zeTGjv7CPxzmjndC8OIm5Zq05LpSshSqVSKI0S7LSyy0kT8+zB9u9odrS6fOZ45xigQKqUUCV0JbUC1qqmhhL4oYe7gRcSaSVkSATxbF5QP5CiRsi0e0lWl1JqgImUEliktYliXylgjUUEtJiHgOK+hyA02gRRx0FPlrMyO0VYV80EEMIwEIVQKKI0sLIqslxaRULqEDPalfUcU7KorJOYs2Uv7C/SwzBMqiYvytqnHuSQUrs43QHzMedU2sIYTFl/Op7N01wrvdSLGKGzvf0gYN1WS+qaQEgRavKiohA5G7b7TTNjiMeEFtMiDrwzG8y5vaCFmICzWYYIhZr2e1t1o52zril1fmQrvxcz0okPTxc8VNnsLsKOYGCrDCrBIPW9CMK2RXC4vnz9rbeQZzc3zmSTMeDw/NYji8m2bqbzURl3IsxNb8gWk90oZErpdrvd6gz+I6uIrKwPi6LUWu/ubKcahGGsG22141FglKKIOYgrabN8obUK/IAS3E1C1GYQmk632xSFtlY0UlRNU9aMscBn68OlVhgYqTCFEKAmr0WjjXAU+0paDV3s+1LIdtJvqgX3A4ahA4px3kg9T3MMCaUQObfa6TunNUDAWGMtpVRZq4SyEApoGeV5UTCfY0SqvEYOYI4hIc5ZglhVl4xxB5A2llACnCMYW2uAs5xSKUtgdUhp4HWRRT4hESU+oUpqCCBELogiqxwA1ALnoAs4gg5M5qV1zhKSBJRYEEZBEHFCSKUkpBBgoI1zQHPmW2MtsMBqaBwnCGGqlQEAAIiUksBZxiiw5v1kEIbYAeB5XtU02AKEcNM0fuBbCx1BUtfQcoSwNYZSTxkBEYYOCOkA1MZYoUqMIIIYYmCNxoiWtZTGAAQIZQ4gB5CyuqkWDgDocMi6UqXKSIYIpZggTAikxK8r2YiGINIPeewtEWctAK1+ByIb+cQZUJciCFglpM9oI7RxVigRByGyoKjKkLKyarQx1lktRaNVFEbG2EZX0ugo8pWUENE8Kz3qGS0xQCz0lDBSNkJUPg8IIqHv+9TL0lQYW1ZlpxUDZyn1VCNk00AEPJ9ZCmRZKwVKbT3OqIMYQIw5RtgQO6kbjyMMsZa65Qe1qq1yDjhjlDPE49RaqyQQtcAMva+CyYNjk6nlM+urZ6/e/Nw3v/ibL3vTYHosTkeiBTlyWFagN+gNV/jodD4+roKI9CI/bQXdteS1d19/6uL3/vm//ldHe6+1oKG0g5UG1MOMZXlDGfUI2Oi2/vTLx5HvrV5Kul2ymB2tbw3rOUaGenFEZL13/WCxX4q1ebsfNbLxSafK/V64URSzwpRCGco9XVhTUexF84nmXms+fSc3iWrwua01dCVw+VJRzRyi29fvheETq8nZvQf3AKB5Pur3V6Y3b9h5lo0P7PJaTyUPXn59reVfuTQsJuk7X/5Wn25efvLC4LFn/vAP//0f/enXljrJOBNQVVUphdGHp/PHHt+4dNGbT9MqT6FZvPjiZQvwYj+9NFxb7vXffa2Z7deQ31te7i61jSejdL4Yv/0OKk6eer73J1+YUeQ3mTTOlLlbHJ2qmHZiKtKm5ScJ5qKp9nbGkRdKYTwa/aP/4Z//j//qvxsdjS8/cvn66GEtp2Ujeq2QWQW0BdpiHxAEHEY2SIabz8j0JM/HceJ1WlFRZul4ux0FWXEyyeqow2aTFOFmqYcn+7c2tgYG1oj6MOCUalic4urIZEfEJ8OtVQxRoWVv2KttvfX4xp1tTjyw1I3+3A/9aKVoVqKzW0tETo4Xe48/MjzZnQtizlxcbQ2WDh5cJxa0NWUz//v/yvfJo/tjsvjsv/429ilqNKMUUjav8FdfPnr11Zv//b/5c7SDbYM0C5yRT3/iez764a984zsLbj0v8YOAqUZrVVIrzMnivS+8hAidZlV8Gj5+5qk3/+gbX//mS72lWJ6KQ6V+4Rf+soEJ7LSLxcIK7FFcL/K6ssRzFoKd/e3VlbUk7kaxl6cT64qViwMHej/803/9X/+LX4H6XrzW09gFldffSN5XAWW4rOpWJ3TaGtskEQ58lmYVpVwAuyizpThhjJmmgdY6DB2ElawQIBRAAzSDRGmrpALQWY2UMZAgBLGDrqprzjijHGHUSlpZmrKAIualVYnDBPPA8zybCUhC1Yx2bmwjJcuszNIy6vvOSGBtNpurbsuP2jItkjAQ2BontFXWAaVMVTWM+GU252EArfOoBwnJlWlEA7X2CHXWMeRCipNguRPKPDtJmywMO2lVKohAXRibYWAwosRjAOuBW3p450hr4cUUA6erhc8g8n1taVUKKGEriU6OHxAoIERamSpfhO0eRNBBE7aYkI2qXa/TjcI46oSiUYxxbco8r7VWlBKpdRglxlhnbRiHjlLmt5yV0lYUOBBwDgPoAEEQC3C6d2hyFfW6QDblUe4IDuNgsNK+efM96FHhZJx0kfO4509PK6iNcsBWoloID1NENmeLN72kVyr12ltvExf78xxIO1rckIsFIySfZfPJ7EzSJyzkzHfKMMI2Vge9VlDkuU9B5AemLBFQIUC2klHQfuK5j37722+bMh9n5fHJpLW0tJjngcBNrVyjKIs7G+txu3P3zk3P2QABGuJGNksBG/idk323d9z4Yb+C7PnveT5J+G/8z//TY0+fe/Ty4613Tq8+8+Tv/9YfPvb4Wthtv/y1V77w3re1Bv+XX/y5ddLPmunlx6+Gy21H9YaVcX/4tXu7z139aCZOLzx1zepiXqUo8TwFdx9sX7j2rC7m+wfXjw8nTz3xeMv3bWGmh6cIetfvvMZ9vrV5mYr6/oOdcnr80WfPra2ee/OtW++rYH46D8Mk8ODx/Vt33nq73R/UEmhRz2bTIAmWe/2VcPOd8UmrvUKg8aHr8ODB7nHTOAa9o3Qc+mEjbXewUfjnR64XdDZsZZRRlFkHFMQAQWtldWbYlvpkELXNUhIurWa7jkjX73CAHNR1t8MmabE6vGDl4tbubqfVzWcz5ycAmyDx5+PaJ8CnLPBxbUzS5qHP0llZa7vI5tyjamYwwcznBAKhNSEoCOkiy/0wkGUli6bVbW2t9K+/8Y3D7YOsaJR11kDgAEZouDmIvDBNsywrjJIUQWeh0cZBiIB7//6gtVVQJz67dGVrsL5mikZkNfa5bUhvuYOU5QkPoghZki6mUdRuhOUUtnvLg7Vemh5NxmNndeChIHJa1tDjbi79kC7mote+9JFnvrs0hVVOFZmr08DnRW2FaUxqoGZ7e/eunb+6GO96NDi/Hpx59MJLf/TNvXm+2RtqY+bT8vJj5yZ7R6PRzvf8wPcVI7l9MErLmsWkRQOrbNwdCOHTcKXXZulkFwJ1fHwrOXceCzQ9PUlnE9+DO7s3i3KBAOIeYx5Y7q1oYCsNy/kkAiqg1O/1j8djZwSlSFDXWR6CxXy5HReNA0IFYYAMjnzKGXbOLoxYWt+AtjOIl/srQwCa8d4CnFtOljjy4aVrm1DWTSPKRu4fjcaLppyXqhKWcwYxU7q31DmZjvtLIQvaFkHjDMYQQu6MQlA5ZwH3EabWGaAz6ErnfMAG1M5N9iphQwcCg1ve+qd3X3vTuWztyhLxQqsrY9HykEJYQcAJxtYhhQH3ozj6YA0zW5QYMC8MF6cHYqY2z2SqklGU9NfX61Ny+413N57/EHqUVVMNtBFlGbe9bD4bTexweeAA6rZb1XiyRLwz57amcu/oKD17tnV8etII2u4t9Qi7em7jlYPDYTs8PZyguMchNdNyOFySRQm0sEZsPxzNWr5rnOeRC5c27t3ZcwbkpUyL01K8HlIqQTpc6hJIWaszXF/PZqeayhBbz8eFFv2Lz3hO2mw0G91+4hPn9h7cBgSOTk6Ec1KocpqbXpIkiRBiGK0VFRztHDRy3ltatbqSsrKiYYwgq8q8hsy/dePwCy+9eyj4tHS3X7+/Nugsd9f+6OWvHmcpw6ARptIWOGBsLWqJMcAk+N0/Of6R731scB5p5Qzxo1j+6A96//jXDHHMAgsRAs5ghIFDzkGMqbEGGggAgMBBBBHBAEHgHLYOIWNcCZ2x1jmLjHLYVh9/euPx51aVrJkXe9QXXrU0oKoukXOzrLam4pxGYRsCcjgpKImW+0k+mwGtEOacU0Ao1EApE/nBJJ0x34WNg1gGMTk8PZY2SRg0WpdZGQIWEqoxRDSURcGpxtx7uhXUTT0+PNVSjEZzYQ024PAga9puOGj5EeMm8n2ftSKEY+vDOBAGSKtRWSju60aib924fye9szHsRthopLOs4YGnlfUdLMsGAOMHPKQRIx7faNepqKq8roXRUjhojK2qjPke9XktzfG4yguxstQdLNNoKZlPF4vSHZ8U2nmdJHGiNMomEW0lbHpySqURZQ3WVoRIKSJ1WXoEMeYzzPuteBDH01lFTfmfWEWYhdEgrIu5VacMaaZRUxeqKZNWxPDESt3rtZFPnS6feZR/+tH0G+9OFjP8T//lPsH+w8PU91sQgaJcIIecI0UpAkK80JMl6rSiMi8haGQjoCGf/w9/+skXfkbZA2uM4bwszLdeunfvgYxp2Vmim4+eyU9GRdYEYUAogk4Dbh1zvkQecqZIQeR73KpS0dDzCHDIYOi0VXlRRSyJeB8iiDCjgGTFVNQ1RZSBlgMiYLpR6dnL50cn2f2Hh1ao5U2ulNLSaQPeHwaH1kAIqcfzwhwdHvfWP2lgUetcwwLDPM+roun/s19997X3ADCry17o92AjRDio1i8v79w/RIYjLRnViFXz+Wmv1Yeek3kF43nILYUp1gLLtOUz6RKqUaVnntdmkBZisbW0KZVf5guPxO0kmowXwGjPC/uDnjKu00lODo+8IKwrs7Lcr+ra44qxgHvtSkiLaRRHPgRCFsqWMe4xpwEAdIkx7hMIVAFYl6sopCQCiiiiqT+kDOjFTlllSuRAE22Ftgg4PpvMGCMAOQsEJOGgRx7c+9a5rWeS6GJI8WKxK8sJctQ2NYyMRQaZsEgVhCCK28RpKKWpRchC40yeLVY3Vo0EaTYN/QGGTgkJoTLalVMDS292Uj3xxDMcTOanRywx0NokbpU1YqzrgEeoEkWGoNRiEocrsqwlgUbYxfzIZxQKh6JI1sVwZS2bpsA4TAOlm6wpustxP5o3p3+SWCekyBp/Ni2QF/j+EjRgfLIbBRGzEAhTVjX320XZvK8ChqkDAABIMebtli3qgNKGuyiMHITdVv/kOD2eZI6CbqejlC7LkgfUAc2ADhh3delhDDklBAuhvNAjGFOAOXTlInMG+YGXlymAzjkDraOIUEqkUQgSBHU2H8d+AIyt65pwoqpGW6eM4X5ImAPQWKGN0YgCP+AGG2WAEEbUElBMCFRGWOMsQAQ7JSWCiMc+ZawqCgzw+wUl4ADBGECLMLYGAACdtaUojVMAQ+cMhdDBxudUSmktNMp4jDlIEPatqhB0ke9DaJu6YRyxHq+FPpnlkzmMOGeFRBhZY5VSjHjOOIgcQQQCSDgVsuGEU0LeJ9JRRhBEWllIKAQQAaesoowHjDSyAU5rjJxzUmpCiOf5zkGlpbMAYUIJR8A0omrS0jmIuY2ioGkaDDDGmFDmnK4aYY3hnCiHlBDaAoYcMNZAhxCW2kIImfO01aVYEIKcQ1IrgiF0ECgoldRGIugoghAiqBUlRGode0w7aYUEFrejqNG1VrZpNICwEBojjqRh0DGM0ixrtSOEkHXOIUowVso4a7UFFlBrMSaoqARyFFmal3nSSRD1gaqJtYxHmDAEQSUbiB1nzAkFMHZSEQSsKSlC1GMQIUIgcs5qIiyU2sYhoZGf5SUlyGHIkB0mPuUkKyttVCWJsoAgixm1WjpoCeGUsaosCWaEfPCT3GRVlISvv7VzueyUdSSnrgWRouXaSgy4E2Vdq2o+rqsMCuMkpdLpo5MTDOjOO2PCW7/5e//6h+zHLw5W33vjleeffC4JI+JHDaIM1bEPGA5uv35jZ7vJWTAElDYW11acKC6id9957YlnnyYMZTtzOnI3Xrn/oRc2MDKdNU/hkIYobfZYIMjYDLwOhCAH3KF20O6HCnY7dTOZDv32t/747aUz5/otH9NOaMhlH/7q/+Pv/j9/+Ve+97u/Z//u252lQfvMhePtfO38eeT05CE2su4MBslyLKd3RzsjxLxLzz7Xb/und7afvnBlCeNbO3cgAOlcOGU4RceHeTl/4/FLm9curb75+lQrXSHZavc24LLM83fu3I+iJG7Fj13ZvH+waw5m4VKPxdxpkLri2tnNa0+lyxcv5Hd3Hrw9zU7rrdUz+c7pDDXtTkQwlwZobVYGPY/45UIep+WVYfsf/b2/88jWxbPnu5/6vo/+0e9/USlDPBQGHAKEPIp8UMqp2nmrN9yw/eTJ5z/94O47TT1JR2kUUieq0cmUekxrHCcJjaL9VF197FkigJJpPT0O2ht86DllzHQmDw6ZAs5a7HCeVltPf+Stt08fOT+oRqIYS5NQVdHXvvmVj//8X1oRJOmcffcr+2JSX3/l1u6N4/VLrdNZKkgYRujsRq+cFE9fWz/71OO739y+/q3b09RhgqxWpQTWoJkBs5EZtKN8tGh1AeAehBTQiJONn/jzP/i5L/yjy488enQ6KR0ASpeLIvYd9tToYHbpyTDpB+99dcLHUT0VWuJWK6iVWX7q/I15trIUrXWg1Vl70J4fZX4/iJcG4+OMxd7WymYSR3u7t+thKx/NW9HA6wRHd7Pv+e6feHj97s7d3155ZAWEITzcXcwm76vAAeRFPua8bsqqKn2e5NpaCMqq4H4U4ECJJjMSIUgokUooJQGCnBBZF0YpQkMhJfU9BIgjGgForKlUQyEJaSgaY62pm6KmmHCCgCOE9WNurM4nY85pb7Akhey1or1TRKh39szqhatntw92pVEE4163YwzUDoQ+b4QGDohGYcKipCXKdFHpJIg5Nkqausl95lMYMggBsg7ARZbFcdjvRuliNJ+P8zrvtnuMtWQNGSDayV7gT2ZHiCdVaQFwWTVfWl2Lib+7e+j5vuVNU9UAE+SAMjaKIqkV82mil6azIokCY00YtU1tDdRaK4QI0LZRuWokJEDKhjGez2eM+xA67lNCuDa1lM4aaZHjDhJC6nomVRXEQV5M/LCvpCzzlBMY9Xo8YLXCo1ERt1nUbbE4lIiaSrcGy1Jo4Jzz8drldbGvmarKvIaet5hMPRwsismla5/y86233/nDp55/UbcGUXf9ePdOJ7ygbeVU1ltbBgyUs8n04T2O/d6FS8H65v3xyAl42KjuWh9hjWlEOQAUbV49P3pw3VPi3q1vGSGItV4YtXvk8tXLe7v3fQKZHxAGOSOMwaJcDDrdrZX+ndvvpfOM4+D69dvf92M/HQckL8rv/tQPfOuLv7P37vVHn3/2u7/7x47Gu69+5Xo82DxIF9/3F36g2n5oyuLF5y5/cuUjv/S3/tt/kv3j737uo35MP/LhD738nTc/9dEP7VTF9nT+v/+bv5Te20bjh5aJxE8aLZY2N7Vu5ofp4c7huWGfGn716qOEoMXpOM2a4doQcIRkpZTevnOvQyMCiZqPljcvTY5uQDP/gNJSF/M86y/3i/k4igiOCA0YAZZQxry2VuXNg3eCnufTYDreUxDpBlKANQezqm4n0azW06ah7a5m2ouozxTgICTEi1hdTqlDDAXYFMDWF9Y7/Sg4Ohm38OzSpc3RwY62ta6FblQURiRi43zHA3Z9pcMCL0+L09nxeHuPoGRzOOCYrKzFOsSghfsYHz+ctmhSG7Q32ScIc0BZgON2LOpaTDNEoe8x3ulByo0xiHDu+0cP7j58sA8B8hkOfN5Il8TRWj/udIdbK2svffUbC7BQUlhnrTHWOkIJ9TkEEAIEPdIOeULgsNUDGgBlg4CcvXLeQ/1uCDAsERIBsNzzWlubjmDOQqU0RpgC0Ip4nmJRNA5WaX6PUlOKxenoNAy6TeYNlh6BjOrcEGI8aDwS+KQlbClNngvn4+Hq+rqmTbDcObf2odPjnZtvPGQQPnLu3OWN/svffnX//vTx586c1osEBsfX3y4zj2Ls0ViqUuayFw/8jm+MAyaTCq2uLvvo2fH4cOckVg3wQ2KmxhbuxvGNqBOHMT09mXBeu47dunwJY7k08EHejMZZo4qqmFeLeWt548KzLwg/CtP09lc+3wjhUUQRYcAvG70os6QzcDBorZ0Pl9dX1lYjzpvRgakKS8OXvvDS4f1d6GuG1DhdFKkIIy8yBkNBBzgVoipk1cBqjBrIv/76zR//dHzxka4QC1uVxMeIUGUkwgyaQDsOXWrkCULcmhrgrsXnOUVysV8ubuLupjbx6DR6ePPgk8n5lQsBphgACKDD0ALkrFQAEOvcyenx/v7uB0EKpaI4avX5fOobhZLAhe3urVfeqateywupv4bCi5jjeve12ThdP3Om1W4BoJJW+MabX716+SqmLTlf0LAzurdDanu0fdq94j/z2MWje2MfoMXp9PXFSbLRK6eL7XfvnH3yiV43KEa5ZOrsR58crCzde/t2+dIrjax7rf50NFfbB2trnbq0SjQU2v07uxvL/WgJyyxVUT9VxioPs/gHf+4zr379fsvx3cN7o+P06Y+80OoHb35599qzj2HowlZEgvPvXH/vL/7n/+V7f/plRALLmuWNFch8HwKBIA/C+WSXZ6i/ehYGfp5NolYwXB6Mc/nKjQd3jqtJbmqjMAaHR4vPTV+LfK9xoChlVQtIMEEOAIAIBBAriCaK/sbvHvzcX/ZRq6p8QLzRD/yE/rU/KE5GPQ8zqx2EFDqMEJeqBshBB4xSiFGMMXDGAYghgtA5aACwWgvgHGFUC+mc7nJ3bb1LKQQKERo3pahrKaXmFPqhpxuJceAHHQ+B4bDfAAuMzXNXNxa4mvkAAUyJsk5ZVVRCEmKXlz2H9f2xItYxRBBkCmjMeRKvVePMlnf9dp90gLNOaF3Ox1HIBkmA1GqW10Uuy9GoP2g3RVnVZGc3dw1YDyPk46W4O5lOoO8trZ8txhNgrTZNSEOl6O1794HXJbFWjVRFGRLiUT5cahd5pi2zvmcRnUyl59kwauvGOu0ssMQjAAECtc8JIZQh48VECDNR9nQ3TzILkAUIVVktJScUQ9swhlrD5NLF5ZOHkzCEa/3l7RvynbvHj637S0MSx76VEkGSzqdrS/1f+MGPL04Xr77zxkv//1YRkBpwyD1fNrhpRkFM4yBUpD0e3WY+7q+fm5yMnXWEsMGQ/LWffXb6j7/+nbvVUWoczh0FwhQeI8AYBJiWaVWLHBFQhjH1AMayrqGDAFPKk3fee6h024oD4llAUJObL/7J7ZkcHk9vqy+9+1d/8cc9qntbySIr+sMkOxmNTk6574mqQCHNy9xnfsh9zUijKtXUGLs6k4jwJAyNsFpqQLBtxKKcZ+kxC73lzXPWAoCV86hx7O72yI/bldq+9Pj577y1qwEJOC4q4XlcGWMhY5RSHrbapCxyUcwakFrqUF2FrPcP/pf7X3pvvtF/rNUhHq/zLGsHbJE389HpKO2P85kft6yugK2RAp1ktdGurvLA94QoldYYE0DcaDS12vNCU7uiUWUQBABFiLuimTHfS1oDa8rZInW43WslR4c3lzaG83TW5LnHyHS+8HhopWUYc+pTxCDERd0wEnISOiUCHpc19dmyVXNgm+PdGyvnH6eIs3BFVieilCjgjQ5g4DkAVT2Zjvf3j46ni5PllaWlfk9UFcMOtwjABCFnIMqmu8TZmHTbyZOVc3U5lnXEfWesOZruBQ2HGoYey/MZ9ryVpX6dSk1o3F/Tmk/To6i75BzKRKUo5D5jAYOIJO0gO0iFwwa3FFw8+uxVaQ95EkJMm7LBXogILoTDpJLaaagooyFdVgY3WmHHEG8rla6sxVWudo+O/VZvLnXgcwhcVc4o4fUot45t7xyp6hg5g7grFlJpn7QGG2ubcRD0k7zMc4pYt9dxoa5r3VQfNPPTvI7joD/oKmuBURaixbzAhAOMRCOOp9Oj6QwSzj1eVw2yrp8k2EdFnudAnd84jyBLs4xHtBJCOUipL6SAGEzmuUeZ0c4iBxGOwkArpWTqIEIQAoCyulJNHXtcU4c90I4i2ah8VhLGOp220DLuBNCRyWlmjGMEIgwdJoySmPrKzB3BmBKMQFU1ABOLgJ/4FBFrDHAAImSt8T2CKbbaWusAANZqY0AjBMKw0oIxHnBPCgkh5J5HmZPSOGcdAoQRoZVRglGmkHHGIkQwolqaKPD9ABqIy6qujMhSATEAzmljl9oR4QxZo6012hBKIcTm/TCLNU5DBBEl0BiNMCYQEYytdBDSUhTGOoqINo5gghkBAGqjldXTNA39OPT9qhRK67wSURQoqa22ZVkiTLSWUknGPMao49g6a5yrykKIJkji0I+qvFJGQmi0MVEQZlUDoMMQAAsYJQ4QB4CUEiBkgY3jsK5q64BUyvMDBHEhs6aUjRX9Xlc1SmiFqa+QE1JaYL3AIwHVyjBMtRWcUQiQ5/PpYg4xwhY2QhlrAYbOQaWgdRYTiCEtqjyIQ6EkcBY656xBGPucGiMhJs4h6YwXesxg4yxC0DjAfc9qbe37/y42igPXGINQ2TQ+p57HEHQh5w1SylriTNtjlEVZWQNEHAJCmziKgHVKaaW1tYZ7RJsPiF3trbZkbH35UefgwzL7oR/9Gzc//+vIqz1sBFDEWVzTUgABYdziBom1s8uHo/LSpa3LG/H27en8rdH5D630onZNtjKxMFnWR5ixpJovZIYRwjdu7mfA1wj7rVhpuTSIqukB75DHPvFkYRyP/P7mJgQ9vrJhq9E4T3EowpBM56cPD3b8AY+SrtOOt7hY4LIwvWHLM8Xxg91XvnKjGy8Fw/P9uOOkk/PCpqbHop/62NO/+nd+aeWR1R//mZ+aL/T4OK+mJ4u4pL3EC3vbt+4gzq5cGDx8+7Zz5tFPPLN7aJQRvV5fV60LH3qWua8Q+Oa7iwcWIASI4Qj343EtyOl885HN7npr96S8eGF958272WKxebF7uD3DGS8yPZMxtNHFC5vffun3Rb33iR/6yBf/7Z9cuHptOjsyVTWIvWg16SRrkwYcLaYnk9q60ADQbnPi0aO99JPPv3B6nCJkNQGj6fbp9nT9whOEbM7G94ASMEEutLKqCSZnz61n09rm07rYe+fotmjyMIpD7BtdS0R4JyHY+THdOdp/9sWf6z155nQ+pdl7dGjLMo87CHgdGgViduKtrXnd8L0//TrgfHj23KSQS2e7tMdo1CtTpAwA1h0c7IuTey0F9bi+dH55G5TX37k3Fvx83NEY3n145Ah0PBrN5qezLATHndXqV//JVzbPdYrbZUOg0QAQKJ3r9XulmP7eZz/7C3/7LCExABAR2pQi2005Dnd2R37bdwAiZweDvlZVw60o/XtvHrW6cnVw8ZXXTm0jGWXVQrZ7ngfYzdu7wjBFMCmno73d62/fnsty+exj3/XpH0AOlXVuQK5RUUrmLJxM69ixzctnKnj6+s2X2y22l0/tZL6+vsTjD/Y+ZK0Iw6aWjLHl/sb1m3fj2Gc+2Nzc2D04RpQ74KTVgR8RwBzFQkunGyUAhJByaoFBGGltGIHAKYSJUcZZhFlkIULcUuMwdNIBRCmgsMwaYx1nBAIkhMnLMptl2JZ3t0c88qUzRTqzZSWVklJKJXjs10pygMKgXUHR7rbG40U6nxhVWai7Kxvp8T1EKKUBAECbqlH1oNOrC+VTjxFSNtJZhBH2eMsoaDW02lSyoQEyRsdh2yHoABFK0TAUyp7sPJzMp9eG1/JFhiE2yhGOk7BlnW6qWgIbBIHkvDZa1YJ5kBO/EXWcdLQR1indKGug73laWGAMJgQCAy2y2jmEEUTWKc4pAEhWtVMKQogNYM7zUMIUMhrkeaqiEGEuAM41BFEH+J5Swo9aURJnx9N+b31tfe3GW+9An51MiihuyXm6SCeBDZNWCxNcSAF4yXrDpz/94614cDTJUmOS9XNNo6VR/lJvlpZKecnSWaiNSKeTvffw9DCOaCNp2gjLKQ5QWuSmBp2ke7LYn44OWgGcHeXrZ68WqWeNQ7J6+8HDxy6dlQuxUPLKE0/c+c5r03zRGfaNkDv7J51uB0X91fVzd+9s/9p/+M0f+zMv3H599+pVdu38Oe2HFJNWL/bQ5tn+oL21/M7dk8m4qHYPx8cHW4+d78DwH/7f/q9/8PUv7ZTpACZ8hqtTc/21O1mhcKC+8Xv/ol6Iew/eeepDL1ITHe8dw8Av6+ro6CRcXh5eeCSYhY3DD3aOawkaxY93dw7H+z/8qU+kxzKOOw+u32wPe2dXL86m4tc/95sf/vAn/yOYAqytrTRNCR1YWt8AUXtxeMgxOPvYNZFWJ3vzpkFGQYhRgNtC1sAi5rPjeTkRqoQqYUG/04NhD/nRoplxVcbM7yUMEpbVeSPyusyW+j7hjITBg/1jbMN02hxO32OcCt04iyzgs7wBlHKKMbRZo/S8bpTxk6Wf/qkn712/xx3yuL+85JewCvvtw3unfd+fFdUkraSFygHGKFEKLLLVobfsxQwjgNE4w531syz07+8++MyPfH+dOgxfdzLf3d31KIqS6MmnrtjF/Ojw4I3vvFZVTS2lVAYCAAHCFCKMIEQQIAZx3AoubQ0urnUrJc9eOV+PU2fxSpKoqkLAEI8igus6U0YNlvqE+BBxxBgm1OPU2G4i2wWO9h5+SajTpNNzOLxy4WOns3l3K8ZxJw9gtchXozZPYgc4gMSLQ1jWYdxK5zWmcC4NRnC5210BaHxd9rsd2vXfevedaaaEAtU3t20ul1YS3o38JKgOFmnZXHpk9cLZZ77+9a9uknWtG1+d5PNirlVVVzT2D3eOqlx67RBZO59O502ZQ9Tx4zIraiKVcHt3HnQ6/VbXivlikY/8iAUeJxtnyv6ZmvehqtPZwmEaBKzT9pTGnhdJ6FcOtM++cHx3Fzh+dqPT6bDjBzvz6cHxg8Nb9x64uoJGEuQEnl8631GVGsSgE3FgUWeZrT6yNp/AV755/MWvHwfRUmf4yI371ZUrMfQjQAUwlbMQAoYdtVJBSB0IMOsiaEVWQKJR0NYQw1aO8Ix5pj5VR4f+3mQ4SwdxCoWU9aJpKqaN0Mo6BylFwOFKiD/+0lffVwGC2tXTg1spwmBppXV499bypumudP3e0NV5d7Mvyn2c6zhSyXDZhp1RLovGQdRcOneNkTBob+CmEQ4BRhtaRSu90+OZrkU3iVnMypNKaXOa7sWUassxQ/PZ+ODmnavPPXramLqG/a1HHvtEeHD/raPtQ8xY/8JZ1WQbq92HNx40GhYanM5TXbu8qZ599tM5sLJpxOmcS+SHg2aRBYzODg/rkwfDc+tPPf+JQs60qZ3XeXg4unzpeZWBSWrQ0bQ3jMJezyqThGxq8iRuWSN6/YG1RorK91sEebO5/Ne//fobDyYVQtpVBFrngJQ6tXBW1dZYhgGkEEEglUUAUkwdBABRTfBvfDN77JNnH33EaupUI5QGP/I90f/ybxzCBLxfIDTSWIUxAMABCBinFjhnHQQAAuAsMA5iTIwxFFGMDUCwbYvlZfpf/tR35dPjohQRjzDhiHijk0orgCCoihIRAAlOeq3EA9akCGFCqdEWMx86aIBVSgmnpWiaskbAhygA0Dx2eRPzk0hDThCicDKZschrBNo9HSdByAsdCwMQxlYZpYtZKSoBWTLYWG33Q/mW0h5cWmvXpTaAvXR778Ur5xZI05bhhAJkMWzCkIlFPRo188w/KGvNuVUVQS3WCilCLZ/5AeJo7se6v7Zi406VWljBg/19YNM8rY0VhiJKY84IgTkgvsdbuimEVJTjtt87Hs1GabMomySOfIR6vYRC2+7EeS4IhtmiRISU06LTYefX+w8PbKVMXWuIndYSBjwZLmHOz3coWe6dv/j8P//WH/1vVpGWUski6Pib5y4c7H0ny3eRoEH3bHtwRUkCSdsPbmtz0uqjPE3bS9Ev/+0f/fn/6tcOT0qCCbBAGeU0hhgoVzjU/MWffPTZ557542/c/NbrB6JAmGOrgWoMZ2jneHzwcH7uUpKmN5TGr3z7aDZD945O25GnVfMbv/H7SwHqtWLCvLJwIp94Yei45wF6uD9d3ljBDDsAnDPQwTKrCKEUMp8FTdFUjSAYhx5N09n09L4DlnhcqUoI2+52vZafzlJdcbqceN6dne0RcI4Q2hTSiwIpnIbMYqRrrXQ6iMkzH7mY53Pmc6ds5lb+/t/76tt3lx7derQpZxLMC6WVKk1mLm+snLn2yOH2YQi9VhQWnKhccuJjklhH/ICW1aHPmc+pF+OqSXnQ+v9S9d/vumfnWSe48vrGN7/vjmfvfc4+qeqcylVSlUJJlmQkOQkbcGgbjIGhGaCHpoEmNNBcbq4ONHRgoAmDzRBmbOzBlmScJMtKJVVSxZPzzuHN7zeuvOYHiR/8R3yutZ77fu77WcxNgEvjKwSd0chJhIE2xWkj7gonIfRRs3W0KNu8c+Xxl06H15uBo8bxMElbjSI3RTXvN5vYk9F8krZazbAtqvkk20+CJrbtVtQrqgyizAotRNZaWkpXLjqLdNXAaMWYNGlsVtUx0se3b76DAAqiASxDztZuvL+zud4JoljrBUesmi9owJc6m4vcbpy7GKTnytl9CsjS0tI03wlSRmRTuRCXAWZLBFunjShmQhe9tU2n8nk2hgErTLMFokbUjuJA5AtrPACB1rmWhQd2d3e0tbZ5bi3GZiahZ6QXpTwvcsooRF4VC6mkshXGScRDacH2ma2Dw72N1U2IuSgtI9HWGipNHnIf4oUSohFGabM7z6dAagAhjbgFBnDVCtM6Z56gmciMmbbb1HukBGTUiuHYWLq2svpdCqKYW6sroU/GmTXOe7QoFKYmsARAVGa1A4BRaKEF0CKEhZAcRgTFmIJSaIINCvBsXmaL0jpfOddqN5Y6DU4IZwQioCrtHC6KQlQSEcwYtUoD7QB2NIgWRbkoTRxyAj3C0AMCPdbCAO+pY8ejofU4CHkY85DgeZ57bJxHcciks0ZpY23AsDGGUxwGlELgLaylTOIIOEsRQhAKLaEjhCHCqLM6oMh6kLBIOWuVxQ5ijMpaj08XWtmtlSVKoNQaAKClxDxAwAchc95hzAnDSgpvnTKiE0eIkLKWEAIEIcEYA2ulZJxDDBCE2ijngdEmjBNMiNTKaocRJJhg7yzCEHqEPMYqibnWFmFijIQQQ0SddbI0nro4XDAXzwABAABJREFUjD30JIDaAQRQHMec8TB2sqyAo7O86DYjCrkHDkJAMXIOWG8JYoAi6qFU0jgV8aiWilDiEeRBYI0OWVLLjBKsrIGIsCDJi8p444nxFnpvIUGlqqR21jsAqEVsXpYEIW2QtjZTtfEOA8SAZ9Abr4HzSUitcshaaQyGBGHqoMul4IyFlDrnjXHeG8aocpqGxBhnjIEeeucQhNhDZbT3jlEupMEAWmswxt4BpSRAzDiIIYXAGe20thRiZ61SEiFW1YogiAHKa0URQB7oWiLoCHEcWeuQ1MB4LLkBxlGCQsYd5AhjU36vq8hriaDcWqEn797vufp3//X/2WNxO+7uP9hTwCgrCQsIRYNeq0T2k596jrQD/CAbl7OkYKTb7W33Rzo/ONA87va7vfL2UX3zqBm7bj8q86oU5fbTZ1+vHrUifOnCRtg5TdsR8CbXNgpTCtn8dGbnWkzl+auX7jyqomCbkY1srBceeLYivA3CVj6dqMDXpox5ygjnYWqTznR4cO2thXDXP/qk+eiVC9O9aRJRSfON7eWNwdqXv/yNclph3kE0qvMptIWTMrNCM+UVu3vz/ng8V6RxfDLPh8YzsNw7U5fz051DUuMf/eyPvfypxT/9F184nc5yozQqKmSqTHfaxFFTzOU7b79tF4VyAC0tq8qPPUS9pe3Hzp8cHd69/erdt69Bhx7fvNxgh5BsXbjEdxdvhhyhPn7l69fimAgFvUI7dUYQOC0gCAgB8LUb7370ueebMXhwcPjBS2vvfO3Bm3e+unH1o3fLMUqdrGccaWXyvOgd75uqUqPxQRT6RVGeO7dc63zj3MVqXjw8GQOnhHO6Mt7R/Z3bH//wZ4b7Dxf33q7y46XNAVCWqhKIDLrTWtbasu6Fq0raxvIaRA5bAYCKO0GY0rAd187PgBqNKp2DxbyKQ7Bzf384n+amPnt2azw8ODnK+kn67JkOmPHFqPriP/u/8ukJ0OWP/vFL1b++PbvlZInCJKgqczIsWiF+++338sVpO92ABDugw7i/vrHSbUPDmcagKgSxrmB10AgvPX3uZ3/k8td/8xfWcONgobFG3ZVOXS1yZx9bXuaq+uizT2riRvvXv/PKtcOdUSPBjX7n3q3rH/vQBwZLg3KRz41cWduMgsZUjJKErmx33v79b/yLf/UrQYOfZGIkPEeol+DDveK7FIQxDyOmpYYATebTsBkRBIUo949PoLJGS+MdDahTRliLCWAcB7SXVyUlGAAn69o5CIHVUiECvTMU0YhxgJ3QBac8juO8KKyDyioHCEYYeh8GEYAeYx9GQKnAKCM9lBZMRtnBg4O028BpsrM/0qWa5XmnM6hLXciFQQgQpFRFCRNSQIbH092IU0Z4XmtAsLMgDQfe2qoW2EfOk2pRMMJLW3lMIHDQI4AApBAgmhcVQ4iGRAgJKWzG0enhMUQIYoS8DUPGKPEeicp4JIzTnnjnPaYoioKiFBELnLWEIGYhBF5b5bxIm7ES2jitrVVSUgQwoRTzoi6c8cB5bW1MEuC9doaHFHjgrHTQYko5CZSBadTChJbS+ij1AYqixvLG0nQ8K6syP8l6jTbw6mB01N9azWucFbrT8g6DRqvFKMmKIWKeBv2H998D1TDoD7KFAIhYqUuVYcxKIQBuYpJoZ/SiiALKIjA7PbZqv9fvAQBCxIq9Patsf7nBOK9ne7s33tw4fyFbzHAQAmyDkEKIKeUL2dI1mw2HGvjRbKQcSJuba+sXqul4On7QbHdOZuaRuRf02p1xyyj95NNbD3buX3x8/Y3Xb53OTja3N4t6+syHn9jZOT27tRoPBv/x+hsf+NiLX/vWmw4cfPzTH3NpP5dzmS9oI/zEH/nIfPf4YPyw36TX33ur29/cfu6xCnnE4fbzT6piVmV5f2Xwa1/8/Ftvf/vK5fPt3tr6xScOT09j6N55/R5m8Zvv3Lty9tK8kmee2l45f2724OD+o/vf/9mfO7O2BcA/AQBUhTh7seGUyo0upA+sYj4kzpMaHe2NpHQQNM9vnMHeTcf7MM9nWSZqU1tUeaYKZ0PbbXNnMdcmkIvAWU5AwLiEHla2G7FcybWNjfZK6zgXp+OTFWZznXcS4lWWNCnmrMh1UdYBTygNpdO5LIHFB/t5Z2P5048/d+Wx7qO7u3t3p6PJLErS+a6oFmpprdM7u33n91+x2lnjvAct6jb60Qc+vH3x6kVqdTYeffPNgwPhClPytFNkWmbVZ374M++8/upczp967olLFy/fun/8+rtvZrMpT0LlnJaKc04pNloTQjAlCCJqzdOXzzz/wasBxKe744AmpjAxTRGLdG6qbGwN7fbO9TrN0+M9bQNTYamzsLVKwwBhah2QUrXSsJE+BlA1He9Ynzu40FLKKiOhz4/eePfkvY88/zwwZTJol5Wbzkba6TDuYhAIsX8yvLO1/awWcDGpgSfrj720tb0s6+ndm7cqYcNGp512m3EuvRq0GnfvPsQ4TlPeI6Q43bFiBspztsj3xD3oqaqLtNnmvOuRIIGMUgJjOFVeCGuyygu3vrpSK9dpLt2+/iCNjgKo5vMSYzSd5etrq921le2NzV4HeeVe/ea1fDKCQNV1rJWGob/w0U+hznpdh5+9+iGYP3z0nZvvnhxoNQxaYDafb63acj6LcX7xSb52ljQHLiDdKGSMUhyEBkFn/fYz6098YOXiE0f/5N/eHJ2yX/nNmtvuZ/7IJeyOWMAgEQQyCBkyHgDpAcW4DXQe8MRDblTmCfA0sTWfHhT33x9ORsnNG5MkfO8jn7iAQvjGN47ffAsazxHFQAHnvLfeaP/w0dF334JGkxVHY1Mr4225WLSW1quFkXE3Tjr771+Hvl69QGWVA1GunjtzUNSd9Q2KSTk/3lhrLUZ52FzVbJ6dTien49Hp3pntdTG1xmXjveOjnQpRuLnez4tiY3Nr/8Fef4nMp1XAzHDnoLtyZnzzwDFEWsGP/eW/8MZvfv3am2+/d+NeKBR/MsUhY4S20qTTDFLM8unh+P6N+LEnYG/znW9eL+aHtw/2VlbSJ198OrlzdHDt7U4CLcUk7s6K+b33Hoh48JM//Meuvf5lR+frZ58naF5VU1fKQpZJd9Dvnrc6RU4YVUEX8Kh7b+fk62/cvrk/rzQoikIbBTz0AHrvjHY8ClqNaDzOCIbWAecRQtB676xD0DtIKh//N3//2l/8yf5n/xBvL6lkvbvIcmiJc9YB672HwAAPPYTAQYSQB4AwaqyBHmCEEELWeUogIA4YC6wYNNDf/cs/N8seFYujpBHEzdRLvRgNUZg4bYB17UYjCG3MmMeBd1k1zZutBCEnpSiqktKYYOyss95bY5SorTEcSSdki0Cqy+0U9pvdSgkDwGJOVO0MADhtVdobY+vRnDmJjNDWBkHoy3JuT/pnljtJ2mon7z/YwTT0xgcxuPjkpdp70I5H5eLMoOWrsphOpAbQ2t5aU7h4OptO5kUIQJ2lK4MABwA7wFLw+KVtWdtFpvJcU5pAbrfONI5PR3FEoSfCm2IxtZxHyCNLtC2t95hQBC1CftAMndMRTaRDWGvrXBhSa1AjCqxD5RyAWtlancxvXbmyvegAqeq6CoKQtAYdSlictK21VpQBh10c/oEAWhABBL1VxaLOKfTS1ygIXOCMDXnUcriZNDZqUU1O98Mo1BLGsfzb/83H/9bf/928hABCxpDVBkKEvP2dX/1LVy8AgPTTzzz103/69tRG9UJFacq4BxiGSfyv/sOv/eIv/cnd3/h1D9nnf/U7x+NlHnXWLp9//atfM3vDj/7UJ37n138XRenu199Y7rYHZ5+aTCbLDL78/KW8rv08t5pQiMM0jNq9opBvvX/jwcH+R5+52m8kDINqPqrzGUSy0WhLCUQhKuF4iDmFGGOeRqP9nTjmxxP56CQz3jRSkjRQBP3acvjU1e7lc0GvARfz4Qf/UMRxhSw7PHJ/4m/+f3vxRRQkyp2gQMahHw5l2El5wo4nE3x0fPmJwfT0TupWozCq68A773ShlcEYJlEahqEQZZUVnDOEsHeqlgUmvsEaCjQMtRSUdTGZE40IAh42GhuzEmovhbeI8+F4J4hSAnCDp9CWnVYvCbm2tIV7mPBOc318YgAPkIUeuUJkC3XEsDRCUpbuPHy1KIrW8hZxAcNLhwd72u56N1ZmESSxBw2nmttPX0hQu7uqi9mN42KPBfGgs6xk0O8tI9reOrfx9rU3WOJWumR+WhhQNdvBeDJFgDbTwWDp0nRxyCLqEKVJTNJkaW0rG5fA5gFZ3jrzgfHO2yQOPASAaoAdQWQ2mSmtrAIE0CcvXMR2ZG1NqCuzTNcQkQB7bWUOYd1pNxYFAp4Z7eMgnowXadzE2vWipYWqrGOimqSoOLO8Wc684pzSZl2X0Nml1XWYpxCEp/s7rjbNbsN7rJzNFlPE/WBpkCStsawnsxrBQAl5Opx+l4LJrDDaaaPrWkHoHUSQwGYrXOSVrLSUCgCPvI2DuC5K663zABroLHQAD8dz7H2YMFFJqyTjASd4pRl2k6CuJXYEeGitK6qSYeadl7XSxjujlZYAAsI5IrgWCiOgleKMYYgcgAGjAQ3n0zmHrHQAYhIQms1HDHNEkPKgETfnRQ4RqKuqGQSckgCBWhQhYZhHlnOPQFYapy1wHkMCMfDeQQC0UtZoiAlFyDtIIYIYh0lE6koxpSmACFmtKCKUUucAhNC67zqNyDkVcEoh9BAyzSCAToKUMUKxrAQj2CpLMDLGeueQx0ZbQimlgai1BY5z5BEkjGHgKcFKG2UMQdAi4D0Io0BrBSHw3pd1zTAOAj4vZ4ggzrmqpbeOcmKVUariiERR6IxtpZEHllLsrJVCGeMhBIghzLC2RmsZRbFUVHnAgxBBKMoaY8IQkrpCECmljPWMYQAdxRiAQHpgjDNah4wij72DcSMdTzIJQaOZGq2ctt4C4kHMwqquvJaqdsiDkEeEIgU1RtA7g4E3WlgAEEIAY+eA1poipJxCGlnrCcYWW0wwJswah72DhDkHgPdllRNCPcBGe4KxB9Za54zS3nJEofeckzDgylqEkffOWc8YCxhVda2sCRhHxEVBoJT2ACZxaAGyea21yQoVEh5ynqbpIi9ULUX9vRjm4c7EKZccCLjwXcw0bOweV/VyNCocoagUwCOxsbUcN5L98eJ46rHzQbc92OiBosIJ+eDjHyGU+ThAPl0wVkAgRqet4+kH06cKoUWhV7ZWv+/jTcGy3/vKd5ZW8ZUXBpcvXc0zYj2VDmhsJ4uJrNTv/c7vLnLbjZmax6RFdSdwgDsvSTPmdYYc2N46I2oMjJpNbEjW/9Rf/R8jBf7tv/rXi8P9cddvrG0EluwcHjGmI2L+0B/5SKVOOLJGGs+dR8iRoM6rpfXuhbOPv/XKbzdXN1qDNTeVMfT9Tjs/Grqq3lhpi4ybWTbodn7wpc80B/zB8NE3335X5iJsN+YnIzGXQmqwmX/k+584fTg73jk+s3zm4lOPzw9OwqaPwAJVbnNtOY2e3rl2XOd8MaNr6Byy74Z9qrlo9GCa8lJZB2hdmfFcgByhiHab4fG0eMs96KZ4Uowm03GUts5f3fitt7/FGOdKFIsyDDhkCKXJvNLQoTjknkoUkmm2CGhcLUoIGyykSo0QUE6Y9aVmfvjmN/79nycmWVsJZaUylMVprxofi3xqURZ212Ynk97Kcj6ZK1nFLZTtnxxcO3IqDwLknDbeKQXfv353a3VbV2J3/2T/+m5h5WClmXZa+XyoSkmT1r13T3x79aXPvvTWf/oXEgIehNdfv9+ISRS6ReGyUkIPlLSkG0ribt99/7mVJwiIPOWiyojGtpgRmpbSFlnppIMYLmbFycHJ7re+tTlgS+jCe6/eq4TGi7oRos3NTiXszumRA9/kAXz3rftb2xf4+egnf/pHfuvz73717VcP997R9bY0dGlzI8ttVdbOAlPmX/6lV3/vy7+1sdWa5nU9rJtdurLSXJyeJGnzez8ihClC7V6jyOogDC0UnVZUZbjIy9UznYPhzCuQNCLvkDYi4KFUQDoc0XSWHadR7J2DEHqAtDIBjTBCWmuEHbaeAqRlLbzGFLejNJfCASi9aCZRJTIPHEF4PBwlSbO9Otjc6Q6Hc+rg0spKbmrrbbPJJY4jzjGynEPldF3J0ekMOU940Gg0hDNxFELhamkwJg4YAEgtrDYKQQoZhZgz7pRVJOTOY22ckzVwstGODUDGgCQMZFV7C+KI7d5+WMuy0W1XlTg+POn0EqUshNBanU2nSaOVNIJqXuVZTQNvNeRRWKqCAKWtNKWzAFrAjcHG1wDSdqNTZEUcRkIKA4TzDjrnHUjCVMgSAmecRSgAADCGlFxYow1ypcg9FEZKVbnSG4/CyaKu7twVVb20upyEYT2fL/e7RV2Hzaj0QhTlo8OTbr9JCMUe9vvL49Hw/MXHhqfTykCsNAAQI5DlCwR80AhjjkVVIgQxcsBDiCALU96WthZ7B/fm44MLj38gCeNaSr0ocnmiqqrTXh6PFo8/duU4K4yhVgHvVKvfwIo4I4Neyr2zslxZ6o5n5fDglndgY/3c8cPdNO1S5604ee5SKwTlvYPh5WceK6rRxafOUxhKqcZTdePmiSpL6gpuy6e2z9558O4P/+CLb712Jzs+/vALT48f3nhw565KI9nEa3gwaK9zLDgHx5Mhbq1xUy6vReV8enr39tK5ZRY3/tyf/nMOuevvfecLv/mfAHdMjSLSY5J2t7Z+5mf/9Ld+6z+iKKoKdXLvnXtv3xMgCPrR443HvkvBYG2lrmQxz0kYBTy4/NhTu2++5qpKVVl7kISoCTQV0o6m45SHQlRSQwgBNTpl2HoolcLA59lp1IX9hBKpnSxC146CGLJydbU7pXi5vzzOR4Pl5a3+mbvf+iYJUbMdOIUoBzxiPGZsjpTBk9Eit+jt25OExtOpvj++VZfiR3/k6Yimm2cTFuBiIUSt3VKLBWShFQfQAUgpRVp/+Kml89vN7asr7bVV6M2ZjfDuo9kb7w4vXXmSFJNv//ara6urxujj/aON1dUrFy5apZc7zSTlynAAIQQwDEPgPSEYOecBbDajbsrPr3d7nD/31OWz2y+98druZL6bFzsb62vNdrMsJKYpDnheqfn0LgQl1ZFopI3mBqLMQ1TXMgwZo8A4oWXVSdN8QhbzbJKfjEfHe/v7rV4/DENr3OF4vtptVYXmPMWsBpTpqtZSQY/X1zbDMFGq5kGAKVlMtciq3ZvvjI9OlroJTfn62U1UnMzrxZvvPagzQ6npxR1cBkmUPPf0J4Rsal9rWRjgnnzq6nu3bo728pShThR2k97DnV0OwKAVMN6gzZ41CymyvCJxSvrd+ODu0fFoIQGYZWoh/BOYtOg1feof3b9W700jSvLazWYF0DrtdzpJlEO8mA+NKE5uvXf71q0gcbP5IbWlLSeXrsIPfqy7vNFpLdMoAQ4iBDFm2HvggaaIAUy0PQG4+OwPLZ9deemLX9j51lvDX//qbGdy98f/cHs9Ab6WjjmMawA8xNDoBYDe+gUKkTcTKApXh0ovvfpF9Obbs+sPSwlUoeNvvD17/fbbpajzKhmODMDEWoUxUMoAjAjl/zmRD/JZGURxVYswCReZ7567kCxvjRRxnPNuE2u0e+dOb/WMrsnBwwPSjmaHEmvGCFvMjRBw/+E9gFxxdIg0aGFXjo987agVOi+Py/rlH3hptv/+ZO+0ODnaWO5P3n9zMV/0Oy0XB92NNW0ypeYHB4/0cGVreSN41q+cXcl3p6+89g1Xl712V07KvBYwJgCgcjRqXQSlCT/w0udUfre50Z+d7rzx1d9mUVNJcfJoKHzQOjtYbvZHO+PVzbUv/uI/P3j4+o/95I+UqgQGGqkAdAAjJezpyT0hPaAgSBq5oO/cP/7S19+7+eikgqiqCmAtxfh7eTDreRx4D4pKsJB44zzwFGHoAfCAUmqdgcAwSJzu/8Iv61/9bdBpdx+NxWhIGhFz3kFnvXcQIwQRRMAYC5zjHLc6zaqsMQRaaIyhxcYb0QzdIE2W250nn1qez65XwngFjbDATTrtGGMm6wpiyylSIosie+nxS7OptEK7wNV1WQvB4zggpla58kyIijBKo9h5gCmjNOhGASZcllmzkwBg8rqGAcTU1pWBhFnkXnzppXt3rkPvJ6eHSYRpFCydWZ4OVdOy0AGq6gtrvYcPd5JOq6pV2o46HB7duWvSbkX9O7ePW8wkBDuLA04GG53Zwul6lhLYb7W08nWumyEpKzCtNFCTPKvSfqd3fh0YP17cTplpNLjWqFpUFFDuoKtcDWS70yLIIouWV9cUKufzxXA2jWh8fuNMe2X95P6tWTbzEA1zGQKs8goRXxeLTjNdX1rp9UIQ+aokRCMtBecRIQFEzMp52gqdkqoQf0AqglYb4zVAAKBG6yzjaHS0W4/2kW9aAsh3rX4XeuMJCHCjm5fDF1/a/jv/j5f//j/7VpZT45GDHiKiHPjH/+g3fLYPCb169bkPf+CTX/n9G2EAtfblPI8CDGH86196q/Vn1ec+3nhw9zTtbM92JlGz2Ll3J2g3N5cHv/07X6mNZwBWNq5x+O2vfY0ifGeh16L2Y0+tz+vC4qAZpTfvzX/1y5+fz+X26tr3feIjvZYjXmhry2KWBCF1qNFol3UFQM2sY3WmaoERSFOeEVpLlUmFmX/2Uvjhj29BfXT1QnOtKak7OLPVUVkeXm0Vk5s1v3T/sPMPf/Fd6reaabsGbrTIjS62NrpPPr11f/eEUpjLwmi9feXpfT4FwOWz0wAHSinlcimqRrPpUGxdgp2ppOo1H5tNThBBBlgLUMSwcR5o014anB4ceAhhEMmsqKo5MpVTonBqdWsNgCxuRtlU6GLO+YCmqYdA15X3GcXuaHwDUNpNLut64a3LRT3obhJ3PCuyMAkxik7vv+tt4RxANlLeC6kYNsbCOIyrApuiwmyCY8VCj9tE1CgJ01E2jQY9RSJo3NHpg9VlhP2pMdSYibcGAxrABmw0gqCNMDF6gaGO4shhBkhbqKS2C8ai0JNZth+mIQK8HySn9V7EqJTaW4QDfjge0lbzuRdfrMW7xWwCjXbWzxYnUdRljFPKEIR1UQEf0uag2+sV2TzCCDsbJY2sNFNgtrfPzccSW1GXhxAjClHShELXFADkG0lwRpZoc7PH6b6Hxnu90m0enS6y48nx7ixJmywMTw5OEE3jIJovvhdAm5fCKOuBp0HACD2eLxhGs1KIWmttaBBwCL0zuqoCRBAkEGAEiUEAIuAhMNpNp/l3T8L3+2lEEHK2LCpKCPKwLGtl68GgX9XlonaOQGs0cIYyigG0xktpnAMAI8SYsi5pRpBg4P1ClGnMqMcEhghUFPs0aWrt4pjmVWVlFjjLCVtba2NvGcNaiDgN0zAeTjIAMaE4YhRgXJQV4cxa4w0UVjhvCCUIE1XXaZhghJx3ShYYw6Ver9KaMBCh2AoZUGId8MAhhJ0HSgjGCACOMcwIY4QgCIBzYUCUNThsKGMB59Y4qT1BWClJKAXAcsawdspbSoi1XggRcE4hAdAh6BEi2BOppHbGOBsw5jzE2EPoF1lhPCAOWu2UNhgxTDCB3iFrtfHIMka9cUI7bBECwDlnrWGMOGNrIQilBFOlQKU8cDKNKCEeIOiBN84JowiknAXcI+gNRA4QwBHJjUSUhJgwQpx3GBsPfEAp8qCqFMSoqApMGPFYa88pb8UBBJowop1Q0nAWIIy9QgBia6wwNg0iEhAtBLQWUsJo6AGw2iutnHeUQkqh8QYg4oH33iGIKcAMEYy5BrWDjnOOJPKAaG+dh0orFlIEoZbSeUcphtBbYxzFgOCAYuMtBM554IEXUhMHjVVKamM9plQb6wHIi7mUTtUS/+ef0erGIJ+JMtNiMTlzbq1zcWk6Z+9960ajPyiLvLPSaS83oK8cAefOn11dX99ZjFGLSmrCOK2qqrscVxNBU8+jWNRYX9lqXO3Y6w/fOtwVI91qxcd3phKwlbOt1TDGEsg9jfouVrXAMMslwua5H3zp7d94697rQ0pCLfDidHHx8vKBnA1391Yvn3El6rfX6hpGUEeBYhgHLQ4hz8QYN/t/4ef/bpwN/9Ff/duNF1r9Tmvz4lkScBZgngTV0fHhOzd0hFevPmcWkCO43IkW4+Lo1oOT40VjOZVzaQs1G5cbK+eOTw7qKqeBDDthb7U1GY5ivLh6/vIzz21/4od/+K/+pf/20f7JUxeWZ+N5QCgH/OY7D31mNlbOvv+Nmy2MaUxuff69y88/Ph6Nnn7xWUsfq3fVvXfz5565cFKpZLBy9879boMMNpMrz274V4/3dnJVuEYSKWOhw8spx010Ws0Gzz7xdPd8Pst/93feZSubTMj88N5LP/aRb752n/Nut3t2aAOPypcee/Hu7a8tsqzCZOvC5Qbq1bkt8v2kuQUiA209nRzNrCRh4B2tRzPfw3ou0u3HcGsQRVgdnFJaJjHE3ord+/VwuvBg6cl+Owj3TucXrlzoNJdu3nvEE1ZmajqYntm+5FJW7I6NUpxgXajjW7u6qM4urTSSlaS3/MInPvvc9z31u//vf3o4LQUED49KGsb9XjDPpNaKMwpjUhswWrhf/D/+zfmLH+stMw8YDoLtDz/7kY9e3JuHB/dHjU4SeoQg1EaHnHnIr92t9h7eXNvqd+KimNYYOTetFsAAj/b3qziBgLVOHmUvv3jlq/+fV5784Cf+6J/44en4RAgFeeAk4DwG1rKAl2L29s3vRL3w7ZuH3kBRaAisb+fEewbx90IHCHLKhDAYoTIrvYCLYSmkcNbPi8IZDxGuqwoTrGVVIYMQm2X197/8M/P5jbffeSUJEgustwBg7IxQCDntGMA4oN56pURpgFRl22uCA2lVxHEYBItihhCUuQySlodcFKoXRyHSBKCHD+6tnj9bzBbY+ISRWlQIM0IohCZtNR2ySMssKyB0aTPxsmS0oZWKWBrG8aPhDqQ0tLjXXprmp8gz4zx0GFkWBry2OQ8pdsg7jmi4Px/xOG12m4tFkWdFs92CNVrZWhvmgAacYEIRKyvTTNZkfboYzyHQ/9leNpyGVkmKqbUVQQhARCnLpKpl7ZxCABqjAYZlXRKMnRUcU8Z4VdfGKYchwRwZay0QuuZBggHFROeFQIz3+u27796GmLEgGAzODI9Ptciq2VS3m0Gc9DZ6TlhAKae80+Q1rsqjU91JW2e6xfEQSL/aOzcdnwJdWym9tApagpC33mAvpAIaIEcAQWHKQBRaXUnty1y2u61Ob2l22qeEDUd7nWZrMa0MNMtrg9zS1eXz7717HzKwPIiVz7SxpNQGAKMNi8Kjg9OBDXgIOymGwJ6MTssANAZ8luVABPu39lvLnTk2qsS3XrnVbMPGWi+IcBTFTwTt77x/GNoi4GDjwuZnf/Jn/tKf/5sNvm/29c7kIEcnF871X/7+j//uN99YW01zlSfQfPjF5995907YTl5//VrImmv9ZTCfAS2GxzvD4SmCeDEW1ttPv7T9zLNXX//9V7RjRSYwVV/6vV9rhxhbvHdvnqzQi09+krTOlfb0jbde/y4FSbdJWaCL6sKVJ1qNaHg6jDvMIhdx7BDwDgmBcwtp51yjE2tzFwfMGakd8KIujOMBIRi0IgDNibdEVgBjuCjnKYVrK1GvD+vx7OE731ppJAGcPTqatJsk7rX67Xi8P66FqoxAFCWNdlnbdpfee7Bz9tmnT/dG/UG/UvjhwfHxyDTDqNMJgoSL8pggixwaH5d3d3dkYaDzVlc80GlQLi8P1reWFsYXhSO5n4k4aqyFvAXQJB0siSjsnF07K8rDu7f/1b/4d8hCBXCjERLA5vPMa8MjxihhlJA46C2123GDG90iLCB0enzYW84+8OmXjPlgVWb3Xv+SkIWxcmmtR1hYC2dsoivb6jaQlc7MQxx44GlItJyoqnJG1SqjsYFhsTiaZnlZK7C1sa3r+cZawkCCkS9KyZxPMA1xq9M7O9p/JO2QBhFkpL2cnmTTk1G2fnlrbdBxs9nt2+92l5qVKU/2Hn7sg08tlMmOZppFdeU2ltGVyysMhEUNL5279PvfehCG61l2AmF15/472KsQ4HPtwdHodPd+LXMzmlTLa91m3Jwb3GskNpvWi3ErCqfHM8bQ+Sc2c0sGPrhw9vHJta/v3zneERXi3kPAOGeIIITzhUVRgzqfT+ebSy67eXDtze9MfclxOJlkl8+EH/30863B8ZnLnTjwLIDee4wIJtRj6n3gPAOIAeghRogn1qnLj5mt5YsvvLH8hS8d/s4rx+/ePH7xA90f+Njq2lZIgtoCi6mlmHtbmRwwFkKQTo/tt75y+9HR6cP78sEBG1bWAq28ncwB8EQowCPsoIMIIkS8RxQ6Zy10vjb6uxRQhhqrW3XUiMPYtBlbvug4ZN7JOlvdXj65c9JqJb1uqwyD8dHORntjPD0xINDOtgZnHDE7u6eDXhImFBTzV195U2t05vwF7AsEEbbgnTeuNbgVmbOuCs82HQ4vv3jh+qvXzWJ3DyHcWqm1+fgf+4GTezs6lxgi1u2tLW3/1MvP//I/+YeMB42Oi0LiQvfBT77w9mvXu7vLK2ub3/n6Fx48uLF0sbfMYlK6zmZ3datzdOek2UXNBslGzmT1+PhgMR5ePPusr62cTTnFaX8JQ1YXOacwmw2DtBOkaQ3At+48eu3W3mleCoSyuvbwu+cZIPQQAGCNgdZD61lIi1IhBCCCznkIAHAeIgQBsM4Bq70nykTHI3J46ABIUq6cV847hCDBzAMLPEAYUogxhhB5I+uIegKdMMZ6d2Y9Wev0Xnp2W48Xi/GkmE3TpN1sthXxk5NR2kwxaymZY+1bYUA4s6qACo1PhsBCRiKBiNIIYVrlUy0KjeKkMUAgrUUNoYQWYRYRmkJTOV02Ym6FJpy2L23vHe6GQXDzztHG2mogxOT4RpEfcRxvrixh6k7H8+HJAhvUoIhbsPtw39Foaa2d9Mlk5orJPIjpxvoAQZtVcGdCByH5xJXN+/fvZcbt3DiCgAcYd1mQQKIrWVPUSpKJnkPPjw9LjJ3aO+63W089dansn7vz9q3ZJEMsRI7UufQAExIIo6QXvW7bS1RKAQgkhK2urNlK+Oygf6EdbsTpzIwr1Qwao52xNT7qxc3VZHI6Ywnff/BoZW2dWGmdxYQCGiOeYgqx9LrIGOEhi/6AVISJIxx4650lVSmsawThmlQCIi6F9CT32PGQEYUn42Gr26O4ORtnH/no4K+Ic7/8hf37+yBqxGEa1IV557qkdG353Oa/+8LezvEtGoSD5bY2VbMbQgsIILVk/+L/df3M1n+5u/vwtfe+RbgToiDWxpwDZXcelTgJiDWXtq4SMNwrcxxzHKIvv/3KSz/+t770zS/89jfeziZZCoLBUveHP/fR55586drt9xcVSrEIQkoIIyhlQQyN90JjZhqM7t18O+50ECPUNPKZPDkoS+NXUv/HP8UvfUAeP5j3W6oZSaiFyg3liFERy0rUw8//p4NvfWf28ktXKnEKoOt1krL02Uzn2Yggm7T88tKGhnRReuP7UsyazXZdl1ErKrM8bSYYSmPmSkoMkIM4q4Ry8ygMacynulAOOWuanJn81FcVDmMpRBQlxuhOK8izgibxbCqAT6AnLNIrvbMn82RaO+sXzTjk1JdlHbLYAWqBIFGkSwmAq4TlTgKK5/mM8CpopcruRc1BtvC4yS4/+fSbv/9rnuB6UiVxkye0nSBfD6ejIY0h9TifZ5WHhqI0wL1uqzp9ZLSolfK5NDYY9Jeni0POgiAYBHEb6CLtMq9IwGMD22UJxvYUUxDEgbEu4IDxVGY6FxnjwXR4zIJA1FIbaKT5wPOPO7Bn1JTxtNYloilLWgDhqNWE3lV5QVCUtNY8p4ykrTZrNFgxmlrjoxY32Iwmu1EUB4Aj4Mo8p4ETFuAQLTeWpjlYO3NeZdN7d99f2Q5OFnPk0fy0kAJAFpWSSl03W2kjaRkShoEez7/3JGCOAUZlXhkAaimiAAPnvbEYIUcJxMgoQwkkGEGAIEAIYgSRcxo6pK2tK0EgaHdaIYYR41FE60wWleLc1lXmHXDAFbwqq5IxZj2iDNRl1kgiq01RyjgKESUYQ045gb7WqloUkFJrtA85pHCRDSMCJTVJGkGhpRIEAohRFBOKcMQxsMhbAwD0Hi3qkgbUIQwcdN5UdW2B98YQTJXWCDkIIATAAU8C7r2V2gLgMMbWe2AkNM4BaChAhNRKYYAYw0oIDxHGCCJX1XUQJABCBzxGyENQGwM8oAwbpQCA2tpaSQYowpgyQmg4my+s0UmcYAQBgBhx4EEla+8NwwQgaLzFBHnvAUDue8dYS0IJpAg5ggmDyDsIvDdGWucdI4Rxpo022iNEkHcI+CDkzhjhPcYUWochIZgDTE5nE2VhHIbS2iCMrPHGmZBT7XBIGfAeEeQdFkbWUgEMkyiGGIeQzmdjzCknOKtKgCEwtq4N5SwIGPDIAwsopJRCZKBzAQbWOA8gZZgTBiCqi4oRHIaRM4ohDClVHjjvMcLIA4hsFPCyFB4gBzhiyGtdlUXAGcAIeGy8g85jSDx03iOESCVqTwiAkDFutHHWWOMYoxZhBKnRuq5rjJCFLmRMKgEQkcYoYzgg2jmAIPAAYYwwysqsEQay1kbbgLLvUiDryhpzdHz4wecu33n30fJybzLJVzfOqaK49sZb273zfLD5+Cb+vd/8/XQ9Hs6LdKlZIn8yGmEaNtJkNNNOOWTLBmNG6xw42Q3ZOTy+PWqf7UfdPpia4zsnkxycAQ1vzfx6da86ipfo0iV6YS2tZlYfV9lED2d1fxDlHn/rtTfXPvxpb30+rZ567IPHc21FZaXeunzp+rXfh2JCea/Z8A6p4/LoRM6aNPrc3/4bh29/8/atm89eeSaUcJap0BIQMX82Kepp92xS7i8e3rhNz54JG2Bn5yFutGpp1GzWagSrm8F0+ihpJelygiIZWH90OovS+PLzXWlENTyK28s//cMff/3N18oqH2wOrlxc/tJvfm0VNmEFblUnj33oiUuf/OA/++f//Oqli0LAsNcIuujbX3vtsa4OE7e7c/3ZH/vBd07fnT46STaba4OV/etHqFak1uPxop02tALGmh0tzqylpaDXb09Ou8kHnv0vfugnPledvktABVP0zsOHq+cug0wfH5WuaWPO3rz7ismHTmrEW8PdoUh02lqDFUdGSVNDk/taFZnAtOJRQNN4PNXZjDbIVgQa7nSq8glA2XT/UGeqt7VEwmrpzONKldpHW8+/TIPVuvrVKGIW2CCGDpaayqOj+4d7O4g55QBFSOMqxGZS271K/+AnP9vdWv/qN3+9YihopdPTUiO6trw0KxZWaWMgRg5AmM81NqyuF//mn/wvf/nv/R1A1wyIYdi9eHn5zlfuPvHMss3w7Xf2Oo3YGD/SsrI6CPDS+rJScjgqukmaLINFNn/mydXFrDaAlNK+/Ec/MT8sKhy98Ic/9N57B+U7e3GMltZard5KlTkCDIMAIWPAyfvvvh+3Gh64sBlLqWtpH+1XwOql+HuCKQ240o4QHCahlFOpVCZVuxMBCI31ACBgfbmok2ZIKPbGBg0UKPTaW78exR5BrK13HjpjjLE0xARjAH3IkkougDUEoyAKcEmMcBKLZiNxQk6GRxg54BHnQYCTqpQ6KwgAZZHzpDufLeKsjJqpKvJiOlsetGHI44jlRlayggjIIiOEHR+fLgPKAgpCyzmfl9loMeIBp4wDpebVxAMDgOWcISG1qBzmCGCMQMJCTwIWRWqAlAGj+Zxzlo/LtNVigJ8eld1OByJQSoCpA3F8nGW10cuDflFNgcOU4jSKTo6OpCw63WVKsTG2qvMQpwhagjGGMYDEeiOVBA4kLGA0VlLMizECqCgXSdxCCGuvGeERSQDAdS2DIGRRYzKdEa4H3eVKZZvnWvs7O7Pj3SdeuHT+4uaNm49a3aapFbEYOVKOS6s1cDKKsVTqcIwCDYh3Wk0ARTyIo3aivLQesCCMSGqsJogZKGbl6XKyaa0zVhESZKVqrzwuRakdbW+9OJ6O8ipPIeFJCE1VK1fkNSCnNIVBEPBGaH1EXEBDhiCdjmtC+MWrL45276dL7UfXb3BC1zc2giCezU9feOnxN19/8+M/+LEb++PtC0/Cxel7r7/Sai4128ndu8eNMAK+0eymz3zwqV/75V8fv/KNH+is/9xf/Mt3rn/1kB/1ty4dvHNnZSN4sn3mB156Agbu0bs7vJF89Z1rHPPT+3urK2fS1lKhzJm1vgHDazfeiltrjz125bWv3V8Uo/fv3rtx+/pSY+Xp569UdXHv7ohRRmPHLdPKPf+hH/jib3xRmdsvPn91d/i9cvfJNGt02ebFxzjD+dHD49uPnENJ2ly9vHF08PD4eLZ+buvi1effOTjceXjQ39iMqmB2esgYCjQJY87COKIgDHzImZFKMdfrDTwAGgBI8OR0jDxqJ+lssdC2QsrChFWVPgWKhAPOvNTZYNC1RqNA9zap3wje3xWoFVVCRU385OY5CrVHoZLQIYJYJxseAsAf7T8a5saErUHSkvPDXsf2l/qLmXp0/ShdIRgSR2Mat7A/vfbOjTCmQSNN0uj9a/eunj+7c+tmXVSFlHGvUxiZFyWEEBHsNUAObm0Ozl3YOHf+rJzJcny4dqZplFRC1Nm4ueECRGTupBKiqow1S90lp1nQ6BgqWJxki6GoZZ6XK1DxpKEVVLIcHd8py6qoJoiB0emeLfNLlx5/79oNBPBgkHjkj09GS2vtwXKCIRfeekClLiEyhSyiZuAA8QjDiI1HB8u2zUu99/Dm6fAwiBLK2VMXLw8PdmfViVJVFDV9J9w6t5Im8fVbp910befOXjuJXYJXL5zzi5HJxnEUEYhysTDIHOwfZDloDy60ljbFIjNmPKvk/HQxW+Si22+22hASBHEQcgL4yeEdXUnlVKPVOxrtiko1YYAJNZ41N86MCvi//+//mKX1H/rkC9/+veslMfv74+KhO7scrvX9Ume+frmJQue1dpYBiJ3zAFrvMcQcwMBoD7HHkELU8mRkUR7E5cuDTnft0r/69/LW3emvf7X4va+9dW4jeOFq89y55TgmzkGjeZ4NIO0/3DO//40bhyewsFoKK40EyCslIIQYU4C8I1555aAz2pGAUYhqUVKEnfOUfG86VlUx3nnYHJyRPhbOP7pxf6mFZLk43BuevbztHGcE7R2c8DQizc5cYBcEadJS0s0XdV0bUbr9+uTg7rVxeZxJJTT50GOPnT/Xef13vuwTU3pjEO2s9BMWSI+LuYXDnEBTFaON5Clz9tLi4aO93WEjSYCwlx+/WrLQkoR3Gj/4x//Er/6D/+2jL79c1NOirt+79gBh9NZXfsOHlPJgefPyKJeMk+bgnLMBIkxLMTs9yuWik7SNFpOT/e//oY9wBUszM9ghB0WpAk48BtrViPiiFvePy3cejG4c58rB0UJI5Zy21nmIqNfWe+8gRIh670NKm8v9Myub51a2vvTFX/VAegcARN46650H0CPogTHeYKRwCIEH1hsIHUbEee+BgxA664AlGEGMICcIOpUQE/rq0tNn1zd65x5f91VdzzIJNEmYhwQhapRpd5tpwByAuTJpO5VZJWsJvLLaAB/Op/OIExwggKK028RRQgku5qeF0LU2tZPWOqdVu9EJoqYyRMoSegE9QR4HjALOpnM5Gqn+YGll0FrqRFUxwwSESSCFgQZbG93fPRk021hPW7BeZIvC1LwRTsbzkDdUwMIojGNKgGAk/fb1d/frTMwBC2BixSJza6vds6utye4jDmmnFy4POkV+tLyx8mh3rPMioRhafevNm9ODg1IudG2cRpwABHAjbXoKHAtiHoahsxg6hhwJTk5ng96yqcdVUQSd4GBn12nDKUEaOIzCkGNooddl5c+c25CuWG33vTYYIRwwhrxcTHDDFMp0Gu16PtUGSKX/gFQ0HR4OlvsM0PlCMkamw2GzhRGSPIrilIjqVKhS1ZkSjvKQEIxoS0hQA/WRl8/0Op3/9Z/fzEo9X8AAselEhCHrlKSqyqee7GaFe/RwNw5D2oqzeU4ZjaPIa/Dz/8O/4xYAr6wz1sG5BZ/5kc+2G+LR8QG0QM6zGztfbQ8gRtZrrbSaTMq/8Tf+1rwoAgiXQxZGzC8W4zuv+h5+7txWU4hqKMbzeRpTIXLgjDM6oK358JFSohiVECdJByzmMxCQ3AOD8Z/9k89c2rjmZ9Vas22VJsw3On1KvHFSWxEvkYiXP/L96S/+p/L9h3vbq01GDWEoNJ4zUlV5SC1yhpO6xbxbHDfSFVsqJWYYW+dVwDRggdTSO+MgwCThoIEgBxA6nyPgiLPAYOiCZruxmJzUCznDOUhce3ldnD7ytUrDOKtE2tmAmZvOR04vGjjrd8+3SePOgxtFNg4D1x70qqIuF/Nq+oAgAhyICdJ5bUEl60p6I2rV79PF8aQczoN4FRpy5+bUs1QoYcuC0LrKFx5UcQSjtqOMlrmJEsoQ9npmSiFQpcopBFRb6jirAZ0LIE1FTcBsQLTLskOS+qxSXviov73W4seH95c3unv7tzuNVVmV7cEmMDllTubOe2UQhjQcT+q8QC8+/SFiD0PKLY21MkHUC9tLWtYeSONtPFjpdzZbrdX57K6YTAAPSkoh5qbWQeiaAVZShZgwwJyPEPNFcQQZdTCY59IYNh+/L4zoLLX2hzuYdykL5sdDnqTSuYPdhTL6/KWGh9He0em5raX+YPC9TVMHirKGwGMAvHdJFM9mZV0ZCJAFIOAchZwRgDFRylhvrLUeekI5AEADnyQpx7DT7DQ5WcxmpUbAIRawkCPjoNG6EafaoVbULqr6ZHza7rTiONHaa2MxBkJWjSCJg9AICTFiHgHCgwhRFngDCSTtuCXrEjvqNYQOQgetdgHFcUQZwYggZRSlzHqflQJDAJEPEyqkFkIqpTyGzSRWomaEemfDIFmUGUMIIA+MBwA5ByihUtfAO864VMYjCgD0XnrklQbDaaaBD3g46DQd0EUptNbK2oDSVqNR1rWUxninvbdKGusIwbXU3vrAeVvVGDGPoNFAqjqKYqlqBglEgLM4wKTWBgIfBQ2hKmesEMoDTwl1zjvrnANVmQcsdA4mSaS0wJAghJ13EECpKgAg9Fhq4L1iFCGIhBAQI0qwlLWDkNIYBxRzMs/HHmJRCsoIICjgHHgAIRRSQQiBJ5gCgjEC0CslgMGUaOOEcTxgSlsPHEKEYIahL2vBMCIE9VuxKmtjvK4kAFgbI5DEMaqFAB4ELKidaTcbRVkZ45x1wAHvvXMAE+qcHXRWp/PxZDZmjCYBZYw7CJQzGFGGsLHGGEUpEqJECEVxIJxlmISMeGchZJXIvHMAeiGld4BhCCDwHliPwzBWtSAEER4YCyHEEQ2gkt46722n3RBVSTEKOJP/udZ69nCejTImzMM3HpkSj7OxUY0S8K3LF3/uI5/pNVvfee2V116/EUXJ6GT27Vt73cutC1dXGTLLK52qMFUhCeWNOGKYG55DhiuLWlee3hl9ZzaZB3iTkl4x2oeELm/3Nh9bm49K6euj9/eHd/fXLy+rsv7yV96tg45Qyf5RDSP1kR/7xJ3dsZDVYqrqmbYWBjFX2h/uHgAAaYS1XhRiD4KId6LhfDwzrMod2EpRsrU3nZ3p2MLVE7XQVKBlMvfBN0/eW2ex7M+mJKwns0eTSXTxw2sffg7dObj17a8NlqiHURW0t9cvcUUPjsa1LKEHloGU89O9o8Wtg6SxvnNqNFYrcnH+qd7yc/1Pf/L7v/ALXzk5HcYbzROX//DP/kQzBu9+9Q2H1dL5cNCv2VLS2wrffuvtlScuYO3Pbp79oc99+Jf+5e+uLi1Ng/LP/Hc/6V3v7/33/0sSE4ycrU13pb3daBw8PJke5+9W5NkXPlSass6yUsi9TP7JH/nRd7/8ShyTTqc1PXp4PD3pNAPjDed4NM6UtqzVddip/LCsRyEGHHlJMY7o6dEUeNJZ6X7s535i7hPuiH5wR4ppd6OLMDo+vT0bBa1LL/hkxS4O3/zC/2/58ccmzfOf/dyH/um/fJeSoNGMdVlUwz2s5MnJwlNYGNPrNNcuLV//1vsP8vxv/uN/6jBKV9k//a//541GOxvXWgFHyM6jU5n5iBPOiZDOe6eEA8ZqTr72tbc+9943zn3wD3tJLYp+8Ed/4ld+5b8uLS/mFFCKQxJ6ooRhTXLx2bXi1uTy2ubJOJvW5WzPLzeTt28tUg4eu7LVWmproeZ1xZrh4axce2ppfSV4cH13fjLmJhQat+MWhuDR9XcPZjevPrFdW3/91R2aAe5dEgWFcrKu2wp9l4I4Ta1SeVbllXaQQIich0Z7RrCFPoyZkV5rGDBqrZdCLxYlRGQ6ngAfRiHX2mGMnIOYQkIxxhYw7An1hmBCAdCVqljYgp7Mhw+hEyGniGpIaLXIPO2dv/DC3s671oj+cvuZp7Yf7U67vdjKSoZYc3CaLT565gcenexOpwpDXlU5waGovPP1+spZLeus0MpUFHpMuAOWIgY9cMBxDA3gQokg4jQIMdJGllrVnjJA4+HkcAl0I2MrVVf5LNla7i0lHhBrqXV1FKI0pPPFgrG40W0yo5hLCScJaXkNlaiPTx424g5yCDqkrUYIcx4hGDCkCSCcMOulUHk77SmlpShpyKxWwHpGmTHAKAOccNZqrymj3mqnFUmbDupeP+RI6QDKUu7v7odh8/zltWwxh0RQBAjAYaM9G5b9brfMDqfz497KwOWWesXIAII8CHGWzwCMIhYjRERZUsK8MYw1HBAYQCGqdkC9yoQHwBOetAiqDcKN7tJ4NNLaWB531zvelFWdj8bzjiIIs+xkiCNOiJ/OJgHh1ggHqYeYJ0Gn3Tu8dXN0cG1j62ONRh8Dn00WKgFC8YcPDpY7rbrYX1tdnZ7uVKOjx566JIGNgkGnoYBTzU5TF0Vp8j/0k5/7nV/50s2b+ySNN86e/70vfXPj0qUPffRKm4HRcV5rPhmNmyubsNG8effhSgtDJVjQbDSbabNzdLRfDCfPv/TJXHST1uCpFyIfVZUUb7/6+jt3H+wt7LMvXl26sj6flDdfubV94YknPvLxazeOzq2tqXLo5KzTb3yXgn6v52A0OTrhOB/vPmp3ViCCjgb7+weLk0Mg4cHODWmz6Ny5s1dWwGSiAeqvtrM6l1MkJZBFDSlWs7Lb73sgUYpbIcqlx9wrY0JCHJInk2HAUb/ZTjxV3oaNhtQ+aaW4kcSmDzn3xgBm8nLe4J0VdjqW40GnrWt59fzmUiupIVFzJRY1gn51o5OXwygw2c44iHpVnS93m4SZ33p9f7nXWD0gH/7wUqsXHY6yvftHMUSI0ziJBk1y584dSvGt2eRw/1gqgTHMJ7MaIqRcFAeIQs7DbrPx5HNPIM98jcIkHPQ3oJEYAuvdfHjcr8u42z98NANI8ZDGiDsnEx40e6EwvJrX49JOZyVnoBaLMApWVy9BSNLO2aQLk+z45PBhHCZrq93j4eFqN43bg7i95TDcG766gpbjdA27IAhY1GkW88VweJwVZT9qNaL49VdeXW8tLzV7G+uDcjSMA/T8C0/v3Xv42PMfDbE+Or6RNFtWxoe7srsZA8mzRUD5Zhi2vVdIypA6S93c29o4IYx1rswniHCHWUnR+DjbvtLoDJSZzY6Ojq0oW41A2lqCqJFGohAaaa1zaADE8FRX7+7NuiEjKCxq4oFbubLNV9Yh7Xz87MdQ8e5r33jrYDI7Gc2AR91OL6+HZy+cixNoiyLtRtB7iC0E322HNNBbjwh0FaGhBQhCBRCAhHjeNUDicvTkBzp/CW79nX+4uzdmY8P23xVvXM8I2I8YJgCnLBYaZPmOBVZaVCnvuNZeOWyFkN56gpEDhhBikZfCEEIsBMg7B3wUBcB5gAmm35uO82FO5CKA6RDMnnrxo4ujsfEq6QYXW0+j9plO3PCLg2I+TFpdX7EHt691li7KMOBxMF/MPPQsBseno1k+jzutracfH7TO7+0M777zrQSq7kqcj1SW1b3lqNlp2rxgYZv3V1rR8uhrX58e5U+/uArnM68zZe0sG7NWs9lbnmV+9OCIwvbFZz5w7gPPzeanlqfTw/ml8+vHN98gvVbv/MV64T525bnhjWvXv/VtNy9n0znh1tQCVCa9sNY7Ex1e3yVFns3KIEUxj4kF0PqiyIaTozSJNGE3Hg5ff384KsFYQMKcKX1d2mc+8OLe7sF0PLa69tYSilGKralURccnwMGarSf/5Z/9C//bP/6fQxpAgK39bm0RsNZC4J23AEAAEUQAeUAY8w5g4CEACHjMGWXMWx1wFnrXSdJPfuKFbltGMbdGyzKPAqR0rbVwsjLaz5TuNChrEM+8FBYq6MIYYeQMtB5ynvIgkfWQATJfzLWdQ4pZmuAgQN52Wm1t4N7+oygg/X4XWdSMQw/ZqcoHg63JyQmCzGqlppPx8UxWeGm1005DD4CqTT9t9dcunD46OB3Obu2MuptdSGi5GC7xbjMMsIke7J7y0Ju21t4KaObjLMEI+/pMpy3L5ObxbH298fTFF775jfcnNTRGPP3yB0fHBxERth7BvGynHnWbpIlFXgFPCmUf7k8QcQRhj7ArCwIASZIgZiiAQpQyqxIbYR43O8GFzWfvPdpf21waBkAUU1zagDJodT8JJ4Vc7nVVLQkCo8l0Nj/srA4Op6ON1SjPFq1OjxIPlPDCeaSFL1WpEMJG4T+4VYTlYnyc9JbD0FAMIEKy2CWBL+aLKF6SVYFhBC0FBhKOqnIMQBDHnXyyIDi+cCn+wHN7t+5Vd48lb6be6TKfnun0fuZvfPLzX/nOvBRnNnqzaSmkDKMAYVjXldEyTqnV0hiNMECcyrpYxeX+7SOtdKfVmElrcjubeUQw9IAzQhDGtYqhQR7gurx4sbF/dzJ8NP+3t//tQosf//j3DwLWXGozbKxHldAcc8ID7QSkZrDSOimOLzzz2MnBJO3EhXAwgY9dYa7KMW7KzANEgm7kIQMQQIocjqU2mIqz59ynP8zfuq8BAM1Ot8rn7bCxt3/S7Xc4VNPhmEGMQxM6saghdjzPFI+pc7IsxyFsalkGNCQQI4cJ50JOMcWEUitBFLWcDasCzudTJRZhwoGLgUsn85EHBlOPeBizjjcBj7u6LgbrVydHJ7p8r93Z2ux1lEQnJ+9DL6QUPAWiyggN4yAsFhI4xZMUYdhgMYt4kU20EWmn2WyGw6NDxkgvYcIQXTtZ53EQZbP56tLmaDo3UkqgKUTbZ7cPHu0IUQhkMUeUMTGrnEQe+nl1TBBFCDlZSlfXoiAIMhwkSYvQoLbGelXWi83z5xaTytdzXccImNl8CqGu6yJJCLDGVziOe5w7UdTOg2wmjPIB0cCOtSwhBMDTweqFkIDTk2tSLigAUdRN4zZgCxqpKEXz0ak0VYS5VXY8ngoK48aKlm5pafng4EHAGeewNGCwsbr/cJy21kSA1PAEO2mNUFpa7HKt2u1G24hus318PPouBceHQ8Y4Z3S2mAcsqOTcA6itizlvMm61JwRb/d3b9t55jwiBmKRRWFdlWczX+iuNKIkYGI0PEAw5REUtMENKWVnXaRoEDIqs1hA5KdZ6HcSxswY5t5R25vkCx4wwFATUAMA4hxCNpzMHHKLQaF3kJeVBqxF7axFgSpQYAoIJQghApJ0l1jsH8rx2kFRKEwAoY7A2tZBGWW08gF5KDQGmhFqDjLWVUAYiD1y/2RuOh5jCusg8cMA767w1vhLSGhsEGEPkvG82k1pZqc1oNofAMUy1hpQSBNB0vkAQaWXrOqcBxgB648M0QsRobSDCVhnrNQsogoBALqWEmCDEnDMUUwccoxgCX6kceI8QQIQZoyGARbEw1mPCIWEAAYKpc4YTihGqpLDWBzxAGKVRbC2YLepCqpQEECOMMELQeEghVA4AhGtdW42SKKIIGYo9xrVUmBFKibUGQgCwxwAyRAihzuo0DoUQUdgohMxnmbLQOAshAs5ZrZ3zCCLgoTN+OMmQ8RFjSktIHOHYQV+UJYIkSqKilhb7oqiFMoQS5zymECJklHbeUwKLaga8Bc5TRL0DYRg44DEAUttKaoYQhB5RBBxCAAAMMcR5XSEUB5RpXUHoKqmCgHhvEUKUYQCAt15oUVaaQEgxDnlU1tIbjYADziujIsYrUVOEnffOW4K/t08h52prfWu2P6mL+uzG8oPbozMXVjobFzhvK0uqbLa81BmE58an94hQ96ejbivMqkX/TA9jEMfcCJ3EjQil2OoyrwugmnFzPJK+sdwahHtTvbUUda+eaYFod+/B1rkLYcCioNVr9+/f25vvwqOTofOA9UmAWwwK4YskMqNRNlhtHE3SSTVFSXhcZgClWJn+2sXTxX3EQhLzfFhkaljnQjtcFFhCk/biIzt8IA4LLy6uXTKQlLrymOEG2KuOggvxHCrAw821p3G6Xc9P57u39vZ2aNSBgTia5Rvrq64wyOpmJwqbkSAsH2Z1pje31k4X+YvPXL4/Lb3Ovvbl9y9sNW+9/vZP/7kfGxZ6sNkqCMlm8wD7K89uZcXOveuvhq31zvbqVXL27ds7rcYieqxbZODends/9V985AtfffsLN+5e+tkfffnJp/+Hf/LX/vpf/NtPX+61aIACvciHS8ut4b4+uvPm3rWvJzENsB900ulEPLqxh5WAoHJ0vpicWGsMZktLHVWaMA69B6f7DxhGURAPotZiPgyb6XD/AIkcOow9OJgcvoaKjStPIbK22HsUt0MLokIVrcuPN1cv2/a2diG1zFnGi+KVL//yMy9fdMhZDObzqoeiejyJO2HcCKzWqbHEVrNHezggTzz9+OTRzeuPvn79zW8/3mhX1nJCIDBWe+9sENIk8kWhmhGT0uEAQoSlI5rTX/uPv/zXnvsUJi2AeNxde+7Jc9dHZYJaOWcneycBhSHFxOsuA6kWz6bir/6Dn/vUn/k/ChZUWpXdIC7F0devGaP7641WGkWX6PNPnr9565X7O4Jw3B2k8/lRa23LgQwHTBkZ0P4f+dnPGEsGZx8d3Du5//5XQwIU0K2VwWxYfW88yOcEEg8dBjBKI28RYTSKw/l0bKxuN1OEvahq4yiA3lhjtI1TGsWREJZTJEUdJgELmdEaQOC1TYJ4kQ/b3ZXp/JgS3OkOTk7ziKF+Z1XowjiLCFbGJI1Gu3X2ZHRf1zPrRNiINs+tHQwXDx/uPvdip8hng36zGvcfjcYoZFDW3oFGxBmNMllJJZzPEXMUhZ1eu55N5tmMUBxSWFUFdt4GGCEMATTGOyOdEpgE3gOKaaGKXqfvPSirHHMQhFFVG8KoEjIvRKsZCSGdUs6g+agoipohRDHPZyULA1GVIQ9pEBEWEV84BKRQMQ9iFmdCRGHDG1tUBeOUohh46JwmyBlRMeLDMBZChUEKKfXAYoiUEUUxXxqsOotkOcPQspCWi1KVRbfVdlYL6VxdOwdYTDvdjtZikeFK20f7OwQIwIK8EjxtAxQtrT++d3Nsqmnc6HnGclXXUiZxjCEVQlhUeGcLUQeElfUpIiRK+t4H1kIEOHSyWMxiziCxgASmlkpbQoOVtQ3vXFWb05NRe2m11YuEgEoZgtl8VhlnTbnoAriY3l87s6qKvNGKMHJiIoSSJGh6TWaz/P6922efYecvX3wg5oWQpbHjm7dOxzNvzVNpv+HA4XduPNh9tNHZ3F5eMqAyhf9jn3p+bhatpPnBKxde/db1F77vk3fv3xP1vL8VNnFjud+4/ubOS89/upgdJ6y+Nrr74nOXtA/2Zk7ceF+XRiBx5uzGD//Qj7/xzVcOh4fZ9JA5pGeiH4eYBSzl7331N1aXI2p00G48+9JHvhfDdNJrVR0f+QAkURSECXDm+HBvkc1snfE08t7p8Uhan2VloLUqc+t1woMzHZTlephbZyA2MBsuQmbXtwfaWF1VOAihRe219TIvcCk9sI7xNIzykxkqBI9jgkCxKKSSSdpZWlsWVaEI8cg9cTFYHrSjMGJRiwk3XkgLHFKu3V2aLxYWmEbSbiTNVkOcWW3xlD/3wpWDR8df+O3d129PvX7w9Xd2rpw7G8aRECBspxaZukAlIknYbnZCKYqnX3r+/ffemc5mHKFGGiHlW73W+cdX5uOq102WWqHJ/WhnN+r1bS1cUYYB7m/3aeRuv/GlRqM3HY5sKWEcpmGArFL19MFbd1EYHe7uQUqksmtry/P5nEar0iLgAYtannBHWKCcEeQw3xktdousHiynTaogC5dD1OJQ1soZhDGtFiOjKsxBO2qEAahl9vhzz57p9d743f/06NaOmM3anfbyme7jj1/EuL93733s1Aefe+61bzzy3XzzTBwi+ejR7R/5yb/21rdfwdBDPTu+N4VQZDONXCqtGzS6k6mKA55P5osSdvoEoPlsPhzuH3kHY84JgdlkzgGL24mSJlO6lcYG0tFUHudqRlGj0Q4YTXjCKGx0upml0sBvffG3VoIRjAMBKOYR1hA6/xM/85mzj+sW9whldQ5iVHkEIAUOWIw0oNgjCyDxECAEnAcQIK9iBBlgyAUV1NMzT4NPfarxi/9ximDIWaAKqyESAjjnjvO5NI5w6r01CHgOAJLAWeAMQdB5iDGCEBnrEYaMUecdI8gp6SBEAHPOPSLWfs9BDgMYN2JOfQ/C6f3362kZrSyvPHZ2kfFM68Ojo45dJGGkK6WlHiyfN4CUs4kPWIhV72zz/nuvTU5Orl7dGpemvXZpe+tywIKjRw9AbR48ODqZuQTSI5mP9kYbg+ba8qDGQdTvf/jHf+LVX/nV3/6XfzOK2mvntlh7sH32IiANhiOvht2UJjwSZzqj/btL57dM1DqduCJfzPNpyIJlSaq9h2/t3som85PhQZwkxXxyZr3jPYM+OH5wzDm+cuVcXZQUWS88bQXeO1FXGhlGg2mm3nk0un1kRpoeT6Z57Rnxa1ubLz/7yVt33tvfP3ZaCSEDxihB2aj+wR/78Z//7//Pv/fXf/4bv//vb8jF/TYPw9jWEkIPvMcYQQABgg4ihCHwHgAAIfQAAg+hhx445x1nmFOCoe8m5PK5wRMXzzejiIdAeKGkcsobAyf702IyDzBpRskiq5T2QpnJ6YzHvLe07FEiKwEB4XFKGZVaz5SCBhdlTrGHXnNNjBYkoKKslduN03arxaWQRnqO0PjwkDdQmsRVIQkM8iKbVwVPw6TFTxYlkLqYL7pn2tb5Zszu37wxH82mpVYAiaoKurQxiE1ZzIYTSTpBFO6d7C+zhAVOG6mNXtSOYby+1vMWgJOCcf32tZtrF9Zaa0vFvMYQe4Cmk4w3myFJHt2+EbR6MXOQQGkhYk5LwwEWSlnqOxEJiCfctwLEAqRx5AOKAVrklTEnKsoajHSDVrDWoLy19+gQeFTM8kqaSjukNacBgZQRyj0eHy7OtHBRnTY6gYYFhAxQCjQM48jW0gjBo7DZSf+AVHRw71EAwbIueBwyHA9HOXVTnpIgHBCnGLAIG8AgIB1ZLersIGQ93u0h0hcVlG7+uc9tX7o+/fxvHe8czeaZQt5sbPA/939/5uUPRT/7X33+1n4VR41KCuI1FD5uNxDHDtQeWoyxqDTQBkNy+71bn/z0hw+Ppw8mhcIe9PHP/tj3Xb9279X3HwQ19dwTrP/kz3xkrYFCJ5Q26aeerBZVvQBKM5+LQY/jwGVlznhCgsgaX1SjoNFsdVpH909Cg07uDw0Ad3ceSW3WGKiO7jZaJmmg6dy02mEQWuAzDxANGgCn0Etv1UrH/vk/3P6jf/d4lsceoQj4QqowTdqtRoJlWSxgAD1xpVik/WVdoRaGwsywrzjDWmpgtYesrrIo7WNEEfAAQAgRw3wiC5Iura+cXTx8K22GRs0dJsoCWolmI13UI1PqRrN/dDzqdluM9IrcsqSTH0wz/YDCoNFaX7r6ydm8HJfHRk90gRr9Vp0XUcBBwDu9thFodjoHFUiaT4JmZKg/GY02ttdHe/ci3gQQhijIa4QseeLxT1din2GKIsrjTkDsaJGzznrqIqtLFOdVlRViTGwEeTKvJ/3+BiYBUNADG0UB4GieVSsrqfMqZtY1I4oIxY2I6ICZer6jnAviVNco5KnXFls7GxdPPfYMw9JhSRgOXcsphSmo1cyDImz30rh1mp320pQQv7p2Nc/rtH8xbK5UsyNaVEBlIXck7DY63Xx4jJhrBhELWaazWTbynAdhQ9VZEAbO4/ba1eHxyIsibui0QRoILiaVdPTunXuPXVxPPNi5dz9uJN+lIA1DSIjUOknigDBlfBinpAMZxMgjIRQkGFhurcMAUswoJVLbqqqUcasr29RK4HRVyfWl5cm0LvOy0U5YGNTTElmuBCzqRSPp6LomjFRKxTzptVrZbChECZFJoxgC740KYwKhN1rHYUAoA8aFjDlIMUbUIa31QswwwogAFhCKvLfWOUc4cRYC6wHGnEfea4SQ1oIwiDQBECHsRC0QRDjAhHgPYRpHtVYRD+bzacgDKbU1lnHirPHWAw+98845Y4nzQFoZU2SsAc466zgPCCVSaIihs47RkLBgWo4aYRQnXIo6IhgjFwdMESi1hQgD7zFCAHilZRjGhGDoIXDQaOmAI4RiCL23GEIHkDbeeieqCmFMkEMUYUSs8R5ABJFSNWM0pNwC4B1AHigpPQBRCAGiyINa+SCIHDKLMqeYGgeMc7ISJAgAQ5WSjGDtvNHGOKABBNCnaVLIAjhIIfTWEIwBhB5CqURVCwg8QFRWmocYYQKBN06FcRgRrI1FhBqv50WttOXMEQqAU61mrLVBCKcNrgwUlZBWkgCzOLDWAYw8wco6imNndRgFAhiMHGOEUKS1JQh7ioUtHcCUcOsABJ4RlmWZgdh7UJW1IqrVChoQqaImmBNqtLHAQ4ywg14p7QGyHmMIq1p64DEhstIIIQCwtUCWkkaRMUobmTa/5yQjGM/HOp+6eW6KxTFNY0qj3soWhtRIX9h80G2vnO/+9i/dW+2deepyuPncVhm5qqqLWnqlgS8ICvPqII7bJPIp9mnLqUKtXejEgJoEt/s0N7DTidc+8Dxx8c53riVdvv30hReefyLLj2780vWqGgdDMx+zQerPbiK+yOnpnDd5p5EkXVqCuqiK9d42qyTDrsgPGo0zotDEhUqccKbjIEDECet4YGACg6APplnJCksQ68VlUU1MbhI6rQDQKE06a+0LamRcVcli8aFPfujuvesbg2aUBPJ4XJxU8fr6mWe2dg5Pzmxf3sneXl4Pt84n+l72x3/qU3/r539xdbUzm6qda9n2j2y0ln0UD6QQIXcXn+rmo2OVmOtvvdlcXSLOfe03v3J5tbHUa7335Vefe+bZ7kb30pNXX/0PXyUw/r/97I8//cSFW9/+TQSHP/Mzz1660n/vt24eH1etRnN1u7G0TQ4eFnffHVU18z4spgWB+M77753thHVdwDhiCaYaOiunE0mBbTbXikWJfSAB6DSbzmmglZDTdhrffXC/nSZxwnRlb3z9rXd+52sfevmjH/qhTx0+HM6ONE+X6ZmuIMlsNOt3w7TX27qytXf3Vhrj8QS2W+1FLqWwyJmNtdZJpqQVzMHldnT+3JL0gXbyp77v+X/0f/1PKA0iFk4X1VzIsnKcceBcGGJjzPZG8nBvrh2UCgLnjDbCgNGQfukLt37qx7+98eynlQ9Za/Ov/Px/+2f+1F/xKq400pQSYrXRtoI339xLxuXr37kVkOJvf/bxW47+269f39+pnrzUX4wXurL5o6rNit07B++/c+On/9QfeXT/jphIYBwEUXdrczrcm08nlMCtrbPj3YnVaHtz4wc+9lmYf///9L/+XQ+krAX43rY1+OVf+uJP//SPJRHLpoVCPs9LBHwFvTYeIlgJabXljA+HizQMkUVhyLx2jEBKGHAwShglUCjtHAYGIAeNcBwGBITEx6osZ6AIGIPAAue1sBh6UddBnBbVlKA7RalarRaGyXi+iMJBF9/NF2OZL2Dtq7pUed5bWpVg1gTtR7fuJDGt9FC5CmAnVG2kTvvdWTaHxqVJaoHwyGAIHHDaYgwhJsx7b50Mk6QSJogTY7WVRcioBQjEBIeBotoBko0Wne6Ki7E2mlFSlzLhsUPCe+e8wRgSD7UyDkCHoQegrBbeyyhoAaedtZWrECRWW+MUwNBBAAGSdS5VRnBCESvy06hBjbaEaB7SWmqAHCWM81jK0jpt6hoSsCjkYLkvsUMMA9yYZHmn0/UWU6aVVgGJinyGMOMskrnADJVlRiFFWBbz251uF6nII1Yb3W6tgEUNXF6b3BiMlOHcByFDnh1mYpWlHjCtxXySt5J2UWUUQZREdSlCHkoprdRRFLb7zf39B2fOXiZpmweNWtRWGVXWzW4niHhVls3uyr1b74uFUaSau3Gj3bPYB7yBaETCKIqpk+OVdCUbDe+LgqftfDJPG53TfL76+FlVZEKMmoNWIOmHPvGhX/iFz8crK+cunA0Stn753Cv/5leffvbKzu7JeHhw7+6ttaXHNaJf/soX1wdds9y+cKk52n00Hk+eeurJqysfvXvj9deuvfeZH/3vXJXk43EzdoWRHKbbT3zwPKn/4y/8wtNXH6+FWls5c/UDz5myWG+FFOKHo+nb+9eW1p/+LgX7O49anHUadLFYjCeTZqW0UEpiLVDaaM3mo0KLWV2nZRUAJLWvijyM4pixhPqldpIWhkKMaiQlYI1GDYCzdbsXeoqJrdu+ODFlk7ComZQ5sRXIZoDFjfbquqxMOS3rHOSzKWWs1WgJqGeLUSvkl85tE4CEZhU0sBIccQHkvLIW0dlszjB6+ukPrVyQtpphDEGhYhjGQbOSU0Dx23eO790bPfvUY5sXtleeuDAbF0C5ZhzSxnaZFyqvmz1OMX7jrbcBxWkz3ui2MfCb/dQuqnycz+P5+vo5I3TSW2ohdTy9nYksOsHYASPxeDalQbC62mUszMan06OFARgArezcW9JpL6Gw0+iuLK8ElcqrBbSq5LGUYiGVZbp5tHOPR8AL3W6G12+8/eQV5EmHcb+/c3ew9Hja5NZU08mhUQULmHHOenfp2RcnsHd4uH/16Q8cDQ+FNuWk1rOyGzXq4XFImnHj4mSiLQGzsmiXLa91WeZf+uovrrdWpwdDPS+Odx+sbq/O5zmC3HFrOGk3ezTQOqG519l8dz4LRV6Ph9NWq5UbYHKNWHpn7/iF5mbAOaBq0E5nhXHQCeURiYvSd/pNEPAQWyaVnR/fuPGqnx3msR7NbBrRZ5877yWSebk4Pn0tm64EjUd7k5Lj9TDvtumlq1HYQmkPokBDkmMKHCDWAAixRxSg0DroNCQohYGjpPjMD174D7/7ynTsPaKQQYyBMUA5hzn20NdWOQsARAQTqay32liLAfIAeO+dd4QFHlittfcAOk8wtsYY6zAkzlv7n7Ui5i3Dvhge4mYbUAgd0At3/5U3PY9Na+XxJ88fXH8nanTSlTPTvd2k2XCEZKNTStxkcnTz3ncWeb3aWqc6fvDOdUhO2wqcHt99+dNPv/I7v/fM1atf+cp1B0yy3JtPciXJw2v3N8L2ZLGvqQsSWtWkmlsIwmJaMtRWdVVWhyKXgAXAiPHOdLC9lk2qtd7jz33kmZ3r3/zYH/uviuF4Mj7hTc+GswYQGx96/P6D063HP8icmZ1OpCVnzl4oGt1M5UuDeO/ewzjqFIUACClR51VWeXZzP3tjT9WS7O8dzMdzSgMcJ9aA/+c/+B+X+i3CKY0pYRhBwmO7vLz01//mX4OlSroO4uo0G7saQwcgRM57DKF1DkHggacYewCBtwAgxhgwDkGHKICQYujSOEwD1luK/8Qf/0wEzWj/EBgrhIIIxiHyFD58dKyUhJoaYOM0ph04r0tEmJWWhSaCajKfc0wZpfNqlna2kFIQZTRoqRmwoOy3E1XKWjpZy1JIhGmIOGEBA2Q6mzUob/Y6pRoRD/f3iihur2ysOSX2HxwoJTbWl712SyttSmGz23uwu3/rYXHl/PnJwYlQVYO6RuTSpG0NpzCn1G5feFJa9t7Ng7MrqUcVDyDCJBfFSJTbFzebOMQSWIuPpnOYtjthd7y/s5w2BNLLraao9IB2agWAMsZgg3Cn1w6TsJwXGGEUR/1WKtVI6UzVoCwNixPOU2MM5gFyKB/PAwp3Hu2giEXNoOmDbDbDwFOMunFHicwZaQFlWGOPF5nmm4OlC10F5gRG5WLRDPqyrICFGgaaAG0dKMs/IBUFMQW6lqoqZLl2Zmv9ykfd7IEodzEOeMxpxGRZ11XOAh4nZzMwCXlDVnPjVRh01ELFDG5umReeV5MvnUrNKsd/6dt3P/T5r8elfWpjdeVs+/e+eg9a+92aVcw5wXQxzoAx3vuA0rARC2lvHZ66r75ycYn3aPmnfu4TPM0unetA9am/9vfVV18/IShcOtN56WqfGWVds9LYacCIX39yM6/g6N5djc0imyjtrCM8DIAX5Wxiqctm3igdJGmey1avE4USeP3sE2cTbsejUw1dMiCIKuUdRpqwptUMWsxJ5Kw3rlxf7VzcSjkFDGmMYV3MHHWidB7bgDoMTXtpQCjUdWUdCdKl0d5RyCD0rXajK8SerOuQxTFvFNWC0LDZibPyKAoCCSkMu4DBuBPVi7kQ06B1Jg4DoIH2MIqbGKeqPlkedI3nWhOvVRTj/nJXlrVQi3vv3d/afgIHW42NSxwaaTItxsIdKqAM8qCqIqgcFP2l5c6ZlxZwSap5Mb0zX9yAmA5nFcCsQUMCXMhDbMTu3bdbAx63l+PWxQfXvtpeSZPuxmJqUzY4PvzdkMmAWsK8sr6Nmqg2QYIdkEJWhAFosGPptBBWHvMQRSGPklZVGKU9JMZ5Y7xr0F6dew1xv7Nkq7lzo6213nS6S7FjniXNRl2WEHvGeZ3LMGooFKxcumTLzKq6NDLtdwpZ2DpPu+tJJGVxDKDxRual5s1OiyTG6DwbKp3xJIxQyALIaVRJUI5q3uwOp3uBswCGRwfZ5HDGwlSVcrWZqFk1nBfZolq/QL9HAQ+0sVHA+72WEkpKZ6zRDhvgAsrSdlMqWea1sY4S5CFAEDjnEACYBKOi3GindV0GIa2dNcC1WylALok49UBUdWVqhBgAwHjgkA8iXhSlUwo4h7xCiBoLjTKA4JAga4w27v9P1X8/3Z7dd53oymt90857P/k8J5/OSS21khVbtnCUo2RjwAQbwxhMcWdgCrhzC6YGXwaY6wJMMEWwxwYsW9iyJRlJlqVWaLU69+kT+uTw5P3s+I0rr/uD5Knyf7B+edX6rPf6vN9vG7yUNUcUMsgEiwXXTR1FVFvoICSUBWAwBsAFxEgAIGAHOUzjlilLiEArzo6OD9tJa1EXIbhIRGVZR4xihAUX83IOAWjFqdGaYMgEl40kGEMUGGZKaoAJ44xFCcQgWNsSGfQh4QRGIcAAISaEAO6TJIIeBQADMq1WQnywTmOCvbUQQCUVISSJKYiCVoZR6kOIqIAIUISVUgQjxriyFgCsjHYBcsHrsrQmIIqth5QQRiAklBDcOOmMdSjwiDljEIHOe0Z5zNrT2QwRiAmmSFhtrA9VI00wBLOYcouMMrabcIygsw4TRCJmjeMoEEIwQcZq5xwJyDlvjEWYKmsLqSkhIQSMcdZKitrEsbDAGWWyJBYCC0ZSToxGASLFQgA4MAoRrJsKBJ0ZDiFGCCZJNpscw+AjEQkujDFG20ZKyhihtKhzhpBz4NvqW93omAgh4hAA0MYD7KyV3lFIgg/WKBOQ0gpCwAn3PljnCSaxiKwxwFmEkLFGBuOsjbjANKrrOkDMCMXEE0TqoGwIMCAIAsM0hEAoo5RZG/7kDy07vjVTs7qTpfmiUZB913t/6Mqtu2nCI4qjjI/HdufO5OFHHztYLs498uASNXmja1V1BRARQVCM5WLpVGrimKwznFd142SIIcrLst9f3Vkcy5XkNiqc9ydP9M49/n5v1KSQy7t7o1X44U88+dJn9MEuWG/3fvonnmz1Dq9du3/6fScVMZ3u6TRe1EeLrhDB3I+51s24F3MoiwAKIyPdNEkbU4CJUdQp5CnSNSOdnkhUVTqMMAzQNN6HQEc0Sk0IS4no5HjI2OBEf/eV8t6u7bQ7Aar9xWLtgdXbV+69+z1PLJtZt5XMbl7b3upUOsz1YpYv3vzS0ebqubtv3XOY2ZYbbZ29deM6TVjcaWGeVUfl1z/7lfuH91fXo6c/8l7ThOvfvHfpdl6R1qh3ctwIutJla6P2yoq1ksfA3b+G5bSuj7KIH+3CM09//4+/60MvvPi1m6/9IVW1UXZ1g7a7J17+1lXMII9xLms42AxzfW33OMNBcN4WqTG1stp5Cb3hSKRJlmYxQgE5o20dM3SObpXLIl+WVsFRr+0UP57M79+fVFWztnlhuiy7IQKBdOLYG2OaypWqVrN3/eh3/cZvP68D0MZGSbQs1NFBGbWzNOZ9jkYr4tGHzr55+XA8iX/lP3wqjSJAEUuFn9ZSAQ1QRLFaGsHDmbNsZX0wqWZ7h1JEMQReaptGgou01e5+448/u/Hgk4hvWcSi9omnn1z5xstzzjvZMOqlZHG81FoRmJw+39HLxawAD28nn/j5P6vCb/7e167cO8iB0mnMTpzdpFbevLW7OKx++Vc/FUMccdwb1sOByBvZ6wuKs0ZS6FU1c3GXF+Nbf/jmi8V0T0TMA3P33riTfUcwffnqQfOffuf73v+20cqwbhpjmyQTHngEgxBRxOLKFT6AfrtfLvMs4YTxsizrskqymEUxoQIhWMvCWT8adIIxlBLI4LI+dsBgSkCAGAJnTF2VsWh5EAAGEDAhOo0K1sC6UTETAZBuR3zow89cvvjGt1585cknHu33W8fHdHZ8D4sYUUyxyBdTj2GachACBBSJFsfMY6xUjjAoqxpzKnhqtOKUeRfyRuEIi6TtMMiX+8OsiwLgvG1c0NoASJsKVIbFEVtbaeeq0k5yTlgUQyqg99a7drcTjKyKxnlHWYZJFDwMzmCAKEuKujJacYaN8YyyAJxzJklSD3xTLhml3e7qeLaMUt7qtLWWccwJQcv5UdzqtLKVZT71EEKHomSAM11V06opFmWlqyY/Wp57+P1QsUph7czGStYcz3bHUwJC0u2KOLFyGYKP005E47KSzeKYICJQpD3EUdvSYTrqxQzf33szEkB7BYAEQB7Prp9cOU0Qsz4gK9sMFtVBmkTBQes98MBq5yHgcdToRh1W7WxlvphhgoKXjiDMqEBEal2VS4aApU4R2No4f/qJC/euXwE0DV5HWZJkcblc3ri8G7GAWfbypf1TG8TCnaaYx1FxY37wru9+Ng0YmeL6rTszDUO3//0//pOzpX7jyq0LF4Z0uPbB7/ug1Q51O5uPn0aRvXnn+jPv+uFnn/344e41kvmjg8n2mdNPvecZpnp6dvPxhz7c7pyT1SJKwhu33nrfu56pqqqaah61tx979CM/ejSfH7TD6sEUbB3bg5tX14atwWb/uFahIYCDP7FhRhgHnIp6WeIkXRa5t9qFpMHcO8LTrtFVrrTQoZXEta96J9cZSWaTQ+iF6CQn1qhtyuW+QxFfgAARTmKWdeO6biikB/tjbz2LaJxyA7BgEeWdbKUfDddaGAE+VXleGckE9sBBEIa91YRT1SjMuFxKjCPOWa2lAm5eLGJKiUgDApjh1ShZ7Z2cHs+cDjiNz5/cml0ulKrjNGGUkiQ6+9BpnrXObmzYqqwKGXWiiQdHjdMNOHniDMIMRiDPl+sp04VuljrLhDehyvPZ9HCwmnoqGyWxYKqReaWbvXG7naZJKy/yNm7XTd4oPS1yh+lwZZDQHvRAA5yJYVU4RU2vOyyqJWPQWAMhYDFTUrfasZNgNFg5XM5XNrenC7O6vTY8gV744ounmsaH+d3bN+fq6NQDZ8Y390fd7VMPPGrxqJPGKejzor5xd5nE6GCS7y/ypx/Zitshyaowd5O9GQ5ofW2lmAfRSwJ0dek6J6PxjjJIPfDE+XmdxxlvNOqtnw0GFs3+oJ2uba5Wd8cMsul4wilNs6xuGtk4pTXNUjEaTqQ+NeqhygoIiTdrq50j473ozIsi42U7EUkcTccHk+XkaG8nANwcl5Ky4epq98T5U6cf/L3/+t8ufvlKIwvuCdBedPlHn44hWn7+83vDUfvJt22OtiISSR8aQEfBJwhBGBwEKiAHAgMWAsStc5hEDKGIc0qprBXGiCCMIPIgMEaBthYABBECiMBgAgABBAQxocYYjCgM2BiJMQwhgOCdh9554LGx3nmI/p8KNOWLppoeTtubjZ1GwzNn1x889fKXvrKy5s3YzGwBEPRayskMutAspy5AzpCn9tL1K8dT+eDjz6zEa6M23pmXHDU8xtunzr7x0lXYxIDH29vrAfgSVNlaL+l29XI5v329R8S8rhCQrQ4erQyv3bi18cjbJEnq5kAup53VrSiJiS4UUtuPPFXW83xvv3L3IgaLeXP90sXDw3utLr92+drJjROtBrdEZ7C64XRVzEuABWmNEhYf33ptmTetVoeSKM7i8bSY1QiMTvzRH711eS/fGVsBUbWUUcIIJZD66WxnNExH6XAml1YZEZFqlmck+cX/9a8mWfTC17/yX3/zP3V6pDGymSkEAyIAOAARhBBCEIAPwQUPAYSYBAhMoDgwBCgKm5tdAfz6qDfoRacfOdGUi7f2JhElK/0kCtHRztF8XjWFrKoGATjsdSPiIbHIuV6aZhnDSjpdUWtWk+z4aExa3VPbp2fzmVpO4wwQHs+bplwuEOg0Ru+M80Gr229lnXavsSROu6EpiUfDlcFrV6+tdOny8J6DGc6Izzq3r9+N24MHT22/+vydCBHBM1U3l956K26zYW/QNNrp0E/o6qgTd7v37uystlfWNzezQSfutz7ywfe/8Ma/aXocMa+q+sEz5+7tHwBKjma1qk2zLChhH/iud1x84aJhE2wLy9s0EhUwYjQ61e2I1nCxe9c6OK+VnC+1hBTEAWiGQaNzD0y3MwwqeBest0FBrb0hMcTIBRlR2Bmks8k8r4oQ7GC1p3VTmaaVUs+zqqlYjJNWrC054GzjQx+B4Xpx+/kO060Og0AyC/Oq1J5VjU/bCRXkT0lFo80TcUKNVsBxFCKIMx2vtjpbwMbTgztxC2kzCcAHyxwUcbwCYMC0mh+N063H17aGh4c3B+vwmaeTiy9OxlPLoZjsk7/5/3790Tg1gA/P9iG8RjngIoUAANZ67PTmc+NZv98OQC9nFTMOOLTI7auXDkcR+PA717cSGAS7c+P+yc7g//VXv/fFG79VSizikZk57SSNGaeR9ZANW5VBewfjTsaMnLf77boM0HFK6DwfV+XSBmAFkEbm8/FwZTifVvcOpyLlt67tfZGIZ568UEzrDp5GIiCMRRxDkqKQQkADtACH49381lXPkHCqJFFqtFlbS7QzEKLK4rk2j57sFnlj2gaA2nriUDRYf0TNDyo9n9THzsxG7W1bMBwYY5Fys0oRbQ2DZcKHLEureh51xOb2ubqaMtKkSfvoSEIcshZjVC+XRyThUGTWZt4mBOYuzL3PMUHDYVTP73BeLPXrg+Emp5GFari6Ar0vKyNQTTgLopgtd437/QasASwIDFH75NIQ4pVTRhUVcOXG9oqGB2m3B2wK814uF1ujB0S65QPK4jJONh9a+eiVN34tjVqysTpIZU1Ks7qqnVoQETfKGV17JBiPFkVotbsSR6Peo6DYs9DGSQchrMtZU9SmbiBLZ3M3H8+9LbfWOpNKEYKcMtbZAGClvYjbWydOR2SgIVYylHkeEb+cLpvyGIEI6iNXp9Di4comlFEC4d54jLyxWHY7sN3Bt28sWzGvJZweHw77G1nW1XoOXTh74oJp8lmTz8e3LfWN8bNlxUMspQIwpFmizXeuBB5HCaMIukYpJXUImHOhrccAhhC8byLBgmHGOw98I1UhGwJxxJjUNuWilVADKeHUGDta6UCrsYPYSNMskjgBMmqkqsvcOdDoJk1TaAMAFlM06LeNc9Y5CgkAGAcMENSgieNIq6CbBmMCMETQIdQQxL33jBBKiWpqgBhCCEFinCYYUU6tq2NOjQ51VWEiqsqoxhFCmqoJAMciCcA2uiaIQAKdcwiCOGKlLCiG2roQsPHOo0ApIggBGGqlkfcYCGM1gJBSqp1z1lGMBKcYY+cDCIZiEmgCjUQwKKUSEQOIEMSEEOddAEAQarQNCHLGEYY+hCji0AcAfPDOQS+lgghXoGEMY+QhxjBOGKEYhaopAGKpEAa7AA1jzEMMfGiLyHtnjcGYQ2AwQFKpOIo88JWWIkoqqUppAvKcU2gsIYizpCyaqjLK6G5MAwzSKABRXuZpFBFGvNUUI+tB3chQG+scZYwCYG3QzoTgobOCYxi8t64wQBsNIaDA9xJRKh9FokJI1lVTaYhxFDGtVCo4oB57oJuaYCYowRh551GADBEYgDbOKtNAk3Ama0mSYIJFGHOClYUBBkapc1ZXUlvvQQDOKosgQHleUSyU0RghhBCjwlrrjYWQSm0AQBYGU0ttbK/DIPIsIqZqgveUUQiRdRaCABGy+jt/aPlx2e/2JYYMCexa/fXzN771UhIJW04r7Z0NRlba1Ivx/LAY986LerkIMfLOAB4BFGl9bEnUH56sDgrttLJ51HLU4tnMJemQDk/nZK8WS5uG9bXN47nLrRRpJnobom0tzr/0Gy/DpXcuQg428/n6OinqZdIoQ9nhZEo6/aA8oz6OdUvUh3v3KU6U9tbnGkYeI91YWQuoIChw0j03EOL+3jhrIyCngGAAUQvGBFFQpgpBAxzhNO7FCapuH19Vqmr2VN1iYD2WAn3jtasrre5yOe5tDpqpnR9NgBGiHwdg7906Ojr2m+cfGZwZ3tndPd69+/qbF//q//zsG9/8/Hy8uP6tg2svXT/Vv7BiH/3e93505+JB1iZyoZ793g/evVql2dp0srz66rXjReeZ9z2cimh0YvuNr32201WjB3tqybL0AUQeCHT98Q/+9LKNe2S/i9kXf/ObNXPnnrlw/c2rTWlJh+/vHxhXE4ghIt4SbQBN0izuMkZxRnqjVQ65kwFFWFDhDWRJdna0un/7Tp6oEkcPPfUOOR5PD+7qqoIejMc7MESubuJ+JgkmKIa1mt3V9VG48sYVVeRW2Tq3NAsL2ewdlR9++7s/99t/eKrb2Rilh/cWO3cXs1q2sOm1aKX9rCwUQITSCALgPOdge6vz9nedVYD+4CeSf/krLxEoYIDdTktaXDsA8eArX3v+h39un2ar2KHQbv3i3//fv/n+H4EJA54eF4oLdmJzCBC8uT87vd45asT137n8hVf+8TIZ9YatQuWnz23v7hevXr233uG838PAjO+UyJq1tZgSeP7s2ef+6I/W1zaHK2d5t/vux8/LN4+nexM5n4SyMmU56rDdhep0U/snpgMFxWu3Fwfjr/zCz/04yzqkMvksDxBHsWiaGgYstSYEBERao0TXmiAYgo1SHlBY5nkkYKvVbolEGdNYHQCCmDZSM0QpIDxGjawgRi5YllCtFSK0NE2W9jilRVW2O20AtAseBlg3LuW9h5788LdeeO7q/dsP0NMY0zQS0jsMzNaZ9cnY5mVlPMiLupNErSTC2PA4Ng1S2qaizYAwzoYAEGLeNwlHFJJu2qaYg9ogGJx3CFDKIutkhEkICFOwMhrtHuwiStK0hxmcqwrYwABhpCOlhTZQwgUBBEdaW4wQYbGqFYYcQ2tAgAgxTq0HEWXYB6lkADbN0roupPQnN9aX8yMKMeSt0mocQtrrq0oqnhMGSdRxSjsjoQiIifVzD5SV7gyHh6++unf/yomTFyKRvbW3b3UbhkWr3fN1KRAsy6UDEDgebGqAAzDA4L0HuS5DQARYaZv54lUsaBqPGOkuj/eREBjDxVL3BmklFcIYBCD1klKY8p530HjLmdBWtuLYB1w2i242hB4Eb2gUlYulrzGPYoYYDiYiiOHgQLX95BMpy/aPD1ZOn+xE2Wx3j1EFQ1PPD7upmEyL4EVn/UyN5Pb2KOZnFuP5U4PVo2uH5dGi34rZysbFF772X3/zcx//+I9sbD/BOLz8ylsnTq53OmvTg8n1N65y4TmO3nzh4v17sx/50e+9enl/cbFKUCfJkun+tSza0qDafOCJnaOrr7/8X378p/7c0+98L84GH3z/uec/8yVrwfXXdrLuQ89989JqP/7hH/vJiJnd63nZLK9+6+6Rj37ip/8KNd/BIOFUO3P95v1gASPBhHplfcDirZu7c621wCDhiHEOZTkr83RjjQ/6Wpr+1jqECCIwm82C8a1WxGLBHEnTjimWWmscglFWW2mtg5TOK0lYMtxYzYYng7amBNqWFPn+1vBwPG7ywlLHmGBR4o2LGCMQRsy6ADwDHEbT+aKWTdTrJWkreGiVRAEUhUE0jXvRcIu2N1sKwctvXUcBrqwOISZJ3MNRphXQOpS1aWoHAlmUZgW1Y5GuttTk+Dj1bNRd6Z4e3L97e2OlvXPtqo/bq6tRu5PdPyqCM4SS2DMra+iMj0hhNeWJ0dXRwR7CjHCPsaEEmwAxFlEklsWyHXMqYk9h2hLGTIFrgrezfKlsRUEBmZjN9Hi2EL11ng4qxV093D73VG2XZjm/d+N1mkS3b172E73WvtBUwhmz2Y/Gy53rF1/sryRPPPX45auLs1mWkS4sCtO8BYLMZ7mHACo5PPFocMzr+eJo+q35K9CHqN2qS3V4cIwAxQD4em4C7mdtUxkQ4FpvqJUGxlsPCI/jpD9tpnVjsg5d1qWmEgZEMBrny3lVK+s1ikrXcqbhJAKuUJrWZUXbUf/U2XtvHV/YXvExbrA5PpzNp5eVnvM2c6gXSdJmsx/482c+8uyi04n3b6ef+b2rO39YP/7EhQcvpGnfoWQOCQsBIwMgCQg56B0ELHgLIIW6gwqSAtCLIEwCJGBeuunCas+stdY5AKA1HmHnfAggBBB88CAESBBEiPKssSXwIAQAIMAYYRJhQJx1ACNEv5NbZxvT6fd6D3UP9ueIuKPDwm6op37wRw4vveim80VddtYHsDGH929kK0Pvmkiw6zdvXT64G0ftkyfPmMrvTC7h04ON7fZbb90888i62Fx98Nw7b71w+d7OQbLZvfL6xXHZDAaD3rAXZ+GBR/pXXr2OLFpKCxqnyT4g8XL/Tq8PWKvhpBslvijucV6PTvYvXfrWyolhEoPJwdXhcLR/lCera61kHevxUx/J+u1odnOWUrN38+Xu5lqnl96/P93duX7q0adXAYjtsa+nxwfT2oDJTL+yP799ebo40mkyGCayyJcYEOdtU+r2UMyL5V/8+b/2Nz7+Nz/wie8p5ntRKxVxZJbyxqV7//3T/8vrz32t3cJlrUDwGEOMYQAAwgCABwAiCADGEECMScQQdYYxuLbeW+m3B+3+cCuLInL5W9ekxJ1OXDTmjau3PvS2h4NSxjfLcoY1kKUMiEBBUMKcM8BrbzD2qMzLOGEnt5/Yu30j49JKo2mFTY5MY6RRARX5EsXJyXO93dsH3Y0z/RVYLhZtlInkZFUWo9Gp8a0rx+MJ5e1T597W7iUE3Tk+XNy4enH7wo898OCTbz3/PEThwrlRKiKSiJdv7O0e1B9/5r17OweNbCLmcHD9mGFnBkm7l3bKxkW8vXPz7tr5J9/x1CNXd25UORrE+Pb+IlQWe3F8Z9Lktccwivz9ezc6kU47uN0ihwf3zpx7UumKYl+XR0VzNBwli/059j5tI5YmTY1Z0KqeI8oIaBPQdtwRYnkUy7oGAAxWOqyTONNd3rqmqgqREEWRc5rgQGPcFKiRhdXOErAscs5iSHCAfuf26488IDa2tqqDZXDQQ+e94ig0VRUUszJQEf6UVOQlbqomiqMk7ejFAqE7tpnx7hCRrDNcN25MIpK0+o00EMe6mVszFxSubz2kJWmU4Wyoa5ty8r53nLo/u9EoRzmBxu80lfbLWy8fdtusXJayQcjBYKY7wL79wz98/dLt+cG1TkIR8IQg67wDfmzJbz+/d+6Bp5796Hkx3UHeDig9sz54+dbBO5857USjF8HVAUIVxSmN40UNIaRHO9NRF5Hgvawn0zGcUkJ9lOrBaPWtq2OPaTKKYYwao7/x5hXE6cV78vJO/rvfLD/6/u4v/BBNIoNZZJTllELoXaiBN1ALDE5fvFmUC8uoo9gmGU0yJGCsa5fyjjSmFbVd01gjkzbqxJ2Do2kkPOfYBeQQDiFTxhNKlsUYJ9wEEwXIaBwwk7W04JCKrF5qZGzaGerKVsUiabWDLYpFSbh0yNVljkGHAuCCa5ZzpafLZRWnGYAWAE+46lFkF3etNdLU0cYQo4TjeDQ8W5ZvtVdGK+tbi33p57kQtSonhiZQ2Ram7bXV5eJ+VZZeLMqyFF0x7J6aHJcrK6umXmhbB1Ue7xyubaf9E9vlUoquMAAYACHxDmoisHGAcJ4yfv/ubZzggFzc7yOW9du9ajnR+WGWEk5azhtgLaFYJFyzqNXp3bz00sOPPyTBQkTUWWN9GbdTDiMvVZb2ItSCAWitbZgHa5Wuux1RloWx5UoMm7LEEB8caZH0cMCDLIpEy5pGNwtd225vWJQVplGnO8jzgkSQR0GpOXIGO7XeGcjO/NLNu/2VXrbeowDnizKXqjdoj9baf7JVxCHBKICqarwLjFNKmNG1c947BwNpnDbaIIIBAsAHCBEkdGNr487uvlfyeGzShKYcq8oYZQiAwUNdyiRpY4BhhGEILliSsVgzpXRECEGYR8Q4Z62hhDFGiqqpmspbjyiyShLAiGAYQwSgVZ7hTGvprccgACgjHiMIIALGeq2tELH1llLaFDXwOGDPY2akJhg5CBiLiAcAhRCCUcoDiCA1RkOAiqoJAVoPrA9WGRgCo8RqhSAlggtCYABaakRQcMF7zyiFDEMI4zS2WlJMOBUuWKkkDAYSzLgwzmCEKcEAABiQ0ZpSyhiCECAIjbEgeMYFQEApBULAjFHuggPABhJRiqGxlhNMCAghJFEKIQwuGO8QBjgACKBzLljvrJdauuCyODFWUwy8N84YCqH3DgXQKBkAgIISQrQNta6l0s4G50FKAWYw4hxBmpulcY5RRDkPAIIQEiGktihgAIF1DkMAfWCc2YAWuRx24mBkcAATrLSKhAgBxYxbpSgCJObWe4SxoFQVBeOcMm6thR4KwRutggtEcKMdp4gQ0jRSGRMg0AZwThrlMCGMchBhJ00AXuvG+2CMRogIIbw1IQAIoXM+eEkR/nYpaVHnCH37tBgGGJyFwFOGAQJFrThjCEBMENCurCUnNAAfCQIAgOBP4usc2ruzv7bWrZfalsZPj5Z13jg12hxCw5azYrk4Elm60Vvjo6j0TRoxGhHAUWWdDvNK7tE0pbrOAAqG1AGsdVeacZWKTkKHwov9yb1+N7ahgTYnMJVeQYStDgRR0OSnTp15+a032/Hq7VvLL/2PV+/cA7gLoQQHO4v7JMSjlLU6FlrHyMFkMqkkFiEEzpmjgkkojK+MbTjLVlceCSEmOO60OygcAdgAjJRSAAMHCcVE6UaiqhORZZXXRV4f7J3p9p7afPv98Q5kmqCmf7K/Odx2nBlLKGejzU2NnQtBlvrt73v33mG4+Obt7rBFCk0s+sbXb+Tjm8DkOCIr2/0Lm6uPveOj/+RXfu2xiQTZIOu1T5x5dHrQJNS/8D++fPvO4ebp8+vxGYdPVHZ+4/bru4c3Ll289rbve9/yGC1uPfeR734gGGqkf+vqwZnuIc34+bNiuahOPPSYqY9vXjnKqwoHk8/na9v9bieTtZ0dLnEm1juxMhYjKBLkK8tpRmLG4jiGpLGewWJj0OkH1H3wqb0335refv3pdz48Prh08uxjBri0Fevq2DAEaABMYGg3LqxLevP2xbvNZIER5pSwECQEUmmWJEVTDVZPrV5IX3nhVrmYtQer506eWcyOogBl0wRLIkbnedVZjUcdmoYa13U5lpDK06vxfAG1DAhABEFTyNvK2F7r1//Fv/pz/+s/Y3EnAO9T/o/+2c/9vX/+W6BZYUl3Oa7mE0MxrOamkIsbdv49j2wON1ovfeXSsJ0+utr5G3/5oy++fPNTX/pDSP3e0XJ7ow9jEglc19LccV84fBW222fOPfCf/83nVs9vHR4v4bELJu/0ianl5OCw0yXtiPTWVu/emn1nt64dO+vu5eU/+pf/9zNPve3pR88GZWkEEXBRK26aOk4ZcCGfz5Bg2Nl8cczTOIlj7wCCROk6zyVQOiAgFTAWSmVaWRRqE5w2MqiyDBGHGDFKMfTem5VunwmSFwuAqLXIA08IyFqZcRZgQjB/6ulnLr76tfH+eDgYFuUC8LZVlgLCUURDBUPIOO0ko+P8YDDcLJoac5YSbo0BlDhtAAq1brw1nNGqqcv9a1nWc846CAEiUnkbtLYNA4wQ0mJsMt9HJFAMnHFOuwxzb3ytmo3RyVoeN7rxMGDMCLAuKCMdjRLG0DIfR0mMIIWAIMwBwkYroxTAHlNojCWBQevKxdxqA0FotEqGw6IoHHAWWmoa5xCNnHdSyyrlMQpY53Xw3APzxNOPzffvTY5uxln3xOpai8f3r+wM184EisqmElnfAR4sACFU1VLpirSyVpo10kHKAsLWe8ITCJF1TrtFZzBAUDvlB6NTStUIM4iDcTZK2pjSRgXnPUSIAFTIeRYnCPBuZ+BAGAyH88UUBhfHDABqMceMltMJDdY7NNs5HD5yohnfFTCYhk1r44wZ3zgUETzeP8qGvZOnT5baPfTOM4vpzGlflsvD3btb22fTVi8+u3Hz2r2Njf6P/+gPZn/4+f/xpc8f57//f/3zX3r2xHe98PxL7T5e1IplmVTV0Vy+43s/nHVWvvXqt0Ynzjz26Lm7Fy/Pdo9Wh0NTN+/+0BOf/8xvHewdnNp8YP/OuB9vyaV+4+VX0l6HI97bXFHl+GM/8t2f/rXPzq7faI/4vVs3ISLt3gpj2xuDM8cHt79Nwd7ukVF6say4EEtZDkcdweK6mG9vrGSd1f0bX1cmTKazlEb9bg8k2dIiytpRzLRRgviwLI1ret1OdzjQk2Vd5hB6i+JGO6PcaKXv54VWVnAet1onz18gSSaXS4a8sVFTQ0xpr9/11mBEhRDWu+6wk8/yqqlrZQ1wwRJA4nZ7GNxxXeZS6sHqBuMBUUhphCwOmNXKIdJ+9zPPnFhbK8uCYXBw+8ip0BLJZGe/yOcrJ1a2h6O96XHnxFqQzilz7uTKi89/Q6qQUHJ4cCwVMGaZdXuY0qpo+p21JMJO5q1hslgeAeKjJNa1At4uF4UxWiurrc/ShDIyn+ae+H5rpZu148ikcYQ4lXWhygL5fDa/oY0TWXzv1uUkJkkn3Rw90OttHC2PSjMbrj0Vt4bNfJFP7544N5z7yRobmbJ++NTbGc3muVofRTsXvzneuRiwV7V887U3ULy+ubVy861DczQ5tznYt5c6/a6HfPPxzYtv7d69fTPB+sT5Da+ZV3Y5rY1S1aJkhBNAXDnXBKdJElTgnEacLhc5BrDdbyutdKl4N2kxDgRW1s+WUxdWg4fa2iiN7aLIZfH2Zz52vHd7ZaiJO0q6Ceskqjvsn1k/t12ZnTdu7d3hg+w9z7yTQP7oo2u//Kv/OpGkD92FjeXjD+7wTuN4PDzb/ugPPPh7v3ntj794x+Rnn3gSdgaLkBlIExeIcwgQhlkveON1HYKwC8Aad2bEP/DB4Xt+YA1AvX9N/v6n3/rmdX9v6glGAQBnnPHBhgADQBBDhJw1IUCIfRSngo/yfAwC8cEHgAEgCFGPYEDAO/9tCob9jtZaRGTUF8oFs9Zqpe3da/fsbLqy3qkcbQ9Xp3d32/1OMkiKIp/nk/39e4noP/Gu98xu7EYAKwwxc60e3dpsZe3IGL48OmyOZ3o+H0XJ2dUOm4lAxeHh/goLL7z0ytra5rve8dR/+a9fCgbZqQwwLHfexKjoD0csBp0sPb613wDDSDybFSO9eXh8RK179IkHv/C7X3jHR/4yPq6e//xLTz25ko/nWuVASUq44GR/Pqbe0bryxSTm3DWE0ISnoGn8m2/deO6taREyakmUuPnOTBmpdNPqRkrVodZJxD77u//xc5/8DyHAjFMrTbDBQPvrv/brgECeYmUsDIEQgjDU2gVjIYIQQasNogRCgBHuMMhds97io63smQ+/vd/L5vcXy7wO1gaaJqdO//ZnXxMQsRrN70xcFi2rpa4U8gA6uLE1FD0WcRaMzpK+y5vZ/kLEMYv8/mzfQmdoYGmnN+g3Tkb9CKU4n+WYxTiLclmvbZ6Unmqp4jg7PlrGPFcAXL9+gwTQ3z45XTbBR1l/y+J5EGB1O3vhGy9Dg5BWROLR+mByMDu6f98b9/CFC5OjeTktALIQGOP17TsH58+tIBiWxfHJBx9UBm+4WICDjaxettndSRMcbBZzGnzWRY20zrmk23Xa1YVcaSUG6v7aynA4Wo4LWS6LajHZn1kcNZ3E1wBlrZXtdLa0k8VCe9NuJ5RgW9dlMcZRBLwzVRMzgpnH9e007ddGnzjdmuzW1lBMiar8otQcQxCYNs402iLXa7eKUiIDuyBcffGlET6dtKDDrQAsClrVtXOAUwoQs8bmyz/dgBZ1E4KGqkGd1hYgeWAANWF+eKM1sNZSgCqElHMNsopzDATgpG1lbqRjjAJA4iS1UOOo+sCPdKeN/uTnb2jtI4DvzWSl8yxivVGvSUVwETEQSNnsLfo9+p6TnbMf+5k/fuXqnd39qp6urbWOd/faSaJk+If/8gv/92/xP/OOrZ/4oYezjvp3//Bj//Af/1q2eK3/4NkGR5y1p8dKKtc0OYlbHhgPoPbu8uVrDtDj44Un4sz26GCWL6Cz2AhGRydGUupLlw8UAFBbJW3aynYOvDURpyXBwQPG4hZCAASFUGQD9nb9l371jefeDO3h6NSIGF9xBrTUUcS89q0oCIRDpT0wwVmnigoYBLy3QSsJIWil7WpeQi8RxkR4DQzFPOJiOt+nLRJAoAgjjLDoeiitg9DqWMQ1b7MoqhdHjEANnfUlNopwWOZ7EccIJL3V7nxyVDUHo86mqcex4HWwiACOrVVHGLUhhkfjezTELF0p5l3ANOsaaMcGGFPOgFVCtMvZnmymnSy5cfHKcOMRZGGlDU5QZSUTpDy+KQTb3ErK/KZIzz71xEevvvbFpLeFHMnaXasrHmWy0YQ5JQ0jUbe7nrRG01pMZ/vb6yvHh3sERluDBw8mM55krN2vzbScHqW9E75xp0+epSJS5Qyw1EgvooFgkQ+8nQ4YY86qYjKOu/2tUXbnzuFkduydSNOOEJwSF6y3TqlSUWwi1gqIEhx7j3nCAaAiHarQYBzmkwUXII69dZJhXTt1+c710daj2w89IjGt5jOimqbI4w7r8N5iOr9/5/63KcAIlssa4aClFoRhYJ1vhMDe0abRjbRGG4KRljYA0CiNEEbQ182y1xHAwFpq67EFLFCkldM+JDHBCFnrEYWyyjHCnHHnNENQCJJlLa01whATWEoJYGi81UZjSACEAAJGuWwUDk5EUURIXeUBQYQhIyw4hxEJ3gdEEAgQwgBxI6UQhBLCOSsrhTBmlFpj4ohYhAjGhFDntNMGIwaCsd64ELy1lEIIsQlScNa40GgVJQIEAx0CEAEYjNWEUUYJ8QEjQimXRn+77p0TZrRxAUOABQvWOUwwgsQa4L0nmCKMMYYAAIwAgBBCBAIgAQePnbPGOQADIdgCgygWNLLGAuu119a6RMTKKAghQpBgprTEEHsPZWNgAJQSpRrnQiSEKatGSUSgc6AuJQjI+wChwRiJKCIAEo4WtQwqaGOjmFCOAkSUQGc1YAg4Bx1slHEeIACs0xCClFGCkA2QE1rUJURIpBHmsAH4uCzakci4UKHJWl1tuJOqkRJBQAkOwLVacaUcCN45xeOoKEoaxyAEEKC1zlrrQkABIYisV1IpCCGCQDmrIRYCU4Kd8Q7qAAHBEEFaNwpAzCgHCBEAlPPWuUhgSikAAaOgldHauOAYixkVGEOjFSMCYWBVExCCGBVS91qtCCPjgDWqrEoQQPARRgGR70hFNMKiHWlAeEqdrRyQMBJOhuV85iTR2uIobQ02jqZz3XXD7lqQe8Vk5gTKVlqgqbRGFFoSDGv1F6AxDh4uZ65uWihlxM6PbgpjOtDm8zJ4HKC1igLGHAKYs85Kb33ro1//3deK6WEy6Ig2WAJ5bmVze3Di1pWLDzwz8GVdK82iYEm3kLWnCWSYBgZ8hcGYcOBDvKzq1U7XBgNYbYIcTw8G3INQAtI0Ko/jaFmYdjxiGHq78NDXsOew4Wuru+O9CxmD82G+e2094ecfv2B8YqVPopaUDaOsUjXrjDrZ6P7R4V5++Qc+9u6v/t4XIxdObPSv3th5bq84f4rEUdFbzzYeWT9g01/423/m/uu//d0/9hcuXb+40h9OFge9lZRxEHLzjkcfOHXuVElpb2V05+LtOOs+8e53PfH0e3auHfzGr/3nE8O1Cw/8QDba+tm//NHP/6dfubM3ocpiQL7+h1+6sP326xcny1IN2lF3kDBB9nYOeisrP/eL/9vt/cuHB9cnt+4T74EL3V5HxANpDYxEAABqhWWRpulX//jVM8n6mQvrm73aMUohmh4ctjfOiSRe7u7pGibrieghXzXj6b0y5NPFdGWjc+/mOKIcGgMteuv6/VrTc6ee4iTavV74BTi3tnp7LudLNZ3IQKCI4p3JEc0giMLJJx787u9+8vIXfkvaIsoYMn5rLTk6KhmKiLMpjySzyjAfus999sp7PvTcg+98BgsO6ODx7/nEz92c/tNf+TwmJwdZq8rlcjIPwYc43jlY7Nn80uvL0gFf1Vsi+fRvffrESucP/tPfHC+ar331xldfuP7C5bsPvv1k1unefmOcYB6O9Of/2x9c2BxmEX39c1/d3hixFJ9ePdEaUY5b7Q59/cZ9dUR8/Z3BKBgdQohTDrz7wte++cwT66IXa2mbUjsbTF0m3dQY5bRcLqZCcBcgNna8N07StCyb7sqAIaCkbco67rUjBKUqUkLLUNkQjNFRljTKIoCsMwAjjAgnkTI6ydasJjFleXG/0PNYZE56i5kHlsfiHe966vj+ndXNvjQu7ggC3GRvMUjF6sn1K7d2lvN5jIej0UnngdPWGMdjrjzgAHMRG10h6IMDPniIEAKxkc55yCDyiCptHVAYYYyIkjJIVdcNi2IsgpHWeIcZAQFbYKflLsUuTpI8XxrglSxEFMOAnQuNbgiPI9YxzUxLBUiTZENTNQyHgBDCuG5qFhCCDkNAcbC6IRATDLPhoC5qxoHRtpP1qnpOEMYQOOchgqqsHLKmkoqEZJBQgvNxLRI5Xsx6nY044soC4D2wFngHIXTeQw8FFS4EDQKkHEAQnJbKBIQQArWuMUJERHVZIa+hD1lCnAdSawAQxAxAhqBfloskbqugW2kXQWiUAwFVqmAEOQedqgnDDGMYfHAq5pBiXBTNyokTzpZpirWHUcKQg/PjZjhKa1MGTBa5tIlNOEIBCCbyYt7uduTmRtyLYtFeSNtYOzna31pf+7G/8JNHx/Pf++Tv/Oo/+aWf/sTPnnv4scVsZ7SBH37qyTdevZJmHKGoXCw3RuuGj67ulrOiPjfaTrL4xVdvjE6NYsGy4Wg+H48cQsHPm3Klt3Ly1HZxePN4b7x/uLuyObKNev7FL/6FX/gLo/Nb5dQ+++EPPvzOj37tC//9YP/g2xRM5gvU+G63RyJqE0bbbY1Arytm1fHBwrRHm6YoGo059521oY/YYCVSGuWTopF+nh9jSBjms2VTmWNjJZI2jjPTGKutrLQzzCpQlOb8Uw+uPvQgorFxNkoiGoyeNCQQ3WivTKvVwoTrpmYYyqaUsqkbTRkJHtAorRorIsoioip9eDwJIGrFvttPKOeOQQ+Q1s5Iv9YfjVpJ3iwIMAIEo3PG17OusDURGk6Pl6W0Gye6Svr8cHa4v5+lbDhMc83WTp1+dDS49ebX53tvMQx1uazzKbYQ6EYByDm3BLAoA07U1ZJQXC0bhFgkEky7lGdlcQypryApksB55KVqyglLsihLgiX7F1+8fOXS2trqoCfmR8fj3dvnHnl3Fkfzmd2/faktNlqnHs9E997u5dnBrUEvIUyO+qe7gxUHQX+Vt8ls/97FG29dHq73ioWehpCkuSymg7RXuOX1S4cEtRHGgLL+6tZDiq9mHVPPN7dPX3nrnogggkZ53+2PggXQOg9c3GkNBz2ljQggeJvgDGOOIVQOu0i0uz3r3XS5oBjWKOIx0hojlNA03D0ouBercD48PQxyL447VnuHoZktVjfXT5zrX5/CrJ14KHLJTqyLjcED73vnT958/oujQfWJv/VwenoJceJDFyC6fY79+I+/7V/98jd/81OX3rwxfPI8Pns67o8aOupjjIDTgDlgIUCCBEoge+RM8n0fffj0e0bxWhOcfHTU2Ugu8N88/COrx03lgAMeWIchgNZYBGEAACIIAYQ4lNXkxLBf5gcBEQRg8MF522iLCQEQAvAdtwElcTBh59au6EVVHbb6D0QgiXtG0VaRj6dLF3SJPK6ddQ0RaXrp4uVulvXW1uXeQYJV1MYfft/bn/vSCwEmi+PJ/NaOTYNZFkbR7mBr81TXF2VZ1l74k8P1i99888QDw2IZvvLlF7wH0hDeitbPbB3e2em0+yZXUB+NieQJhoEArJFGylFQq6rU/+PTX8KEROWNvZf+eMhmh1fdWiveWN9+4RvPtTubUWoS1sItOTna7w0G2faj03zhykWW8oPDu02o+sMOrjI5y1OO+MD9nf/l520j/v4/+cf//D/+kxc+9dnXL1/DnM7ynAmCBQ0mFFVFGYwYcSjIWnkfYAja+jiOBEEOBogxQYAhiDhuZ/EwFUNuu4J+9Huf9RlVxvi87GStoPnVW/uvXdy5d2xalOqyOX9mGFFa5ItGSk4FhTROWVugYYa5wEaTpMVxBPPpIScrnAYEZX91dTxZxMx7FBYLlbQ6mGEaA+kEF6O4lQAJOwy0T+A3Xn2pFfNJsYz73Sr4/XuTxkrkmXeLu/f2KRHQeOfV9HCMMFhf6x0elHUdpKqMhba2nTU+nxfBWSNlUTeBCUL4fFZGmI/zBYknKcFVvti5v9PCcCWhVhogWDuNJ5PFcSEbLa11xSxfX+k99eSpg1s3BBTNXOqmwLYbiWFxXJN4FKKRi1IAq7ypxpf2Tm2di5EKNsBAjbFQWwgxj1vOHgdTBsuk9FBZfVjVTo02WthjCOhibjFjcZKqyiY8rqs5xIHTOEMJoMBjzCBuAtrZFwOTNtX+qa0EE6YWC+uRhygE4zXJWn861tp7B5DnER+PDykkstJJJggM1gfImG10Vc0JhZTRplo6h9JoA/sOAKBqdrLeBid2Vh16R4NgH/uhpx88c/bv/fvPUxxvne88eOHMNkPbp0//20+9PJn56fT25/7PDz349mby6tXXvjr/4IcW//Nf+QAZ/PTP/4Pf+L1P/fHpzRPVTCeQDFaHu/PJP/2d25+52Xz/e3sf/eCZ73n2/N29g0nlnPaDKEQJxZQsJ0VKccKD6MReTryzUUZZZDznN+7eyaJkdm+RULi53p3t7Kyf26xkVZQ6ooRyaoMlAkJ1GCUxgAYBSgHyXgZMne+8/or9iV/8/QatAxJtpnDbYG+M4X5eVturrdGoXZe5wGAymZ04u9qYAtkWgNB7hHjCYZLnNZReGw+cUsHFA9AoSCCYTecBhMXxLMrOsKxXW6MB70TDTjs7Xu6ur528WQlrSxxpxHK5mMStLsRusZyLVopxKPcng5VWO2kxQqJ0Sy6PlvmsVGq01re6aeoKJnHcTWwDjOLer89LTxCwAdoC5ksr4nR1dXM2m2addRew46notVWIk3Z3WZW9fmbkoeZ5wDOLRRKnPMBvvvr18yubFGXQ+ph5EBptJKoAI4JSJHNZ17rPh/fGTdSGnR5piSLZXLm56ycz6wOCVJgAJpNS+Xh744lbV28LPoTAGuuyTpvzxFooG+QwDNDXtmAEoEwU9by5Vy+XBrEeTXjZKEqhdAAGACHS3kGCVFETFsc4itKOkiptU1UtBekEEEzEaWZdyGez+fHRIaYMAHB0cA9Wk9qjzmh7cvNy08iolRlvIAh1+Z3grgCcEERKlaQZhjBN0kWRQwQ8ghYBAIIlwMGgrI9i3mt3VdVgAFUl4yRKOklUOgLB5HgSrOtnMWaURtRZ76xzANKIQwiZEBgnwDmjjAvBBaiNA8ZbH3SttHOIQIShdppDbgGEnBGEKtU4i6xVAMGIRjHlUlYohACg0Y0nIAACAIEQAYAbJb0HnHFtvaxNVRsRMeSDMRYC73yQRhLiCEYcM2cBwgFgBADABHoYnNUxZxgESoUMSso6QGy9iwFFhCAAvLUOOABCCEHKhmIcx7E2mhGGEHYBe+BBgAgCHosQoPc+gAAxhhAYYzAGPngPnAveOhS8J5hELM1VwQh3xteyooIAhAD2HnpCefAeY+C9QwhRTrVRGJPgPMSAAMoQxIQIzzBhjba1bhglCMCIRKWuAgF1rVLKtAOY0owQJaUQGIGgncGIQgy1c97agKDHCGICg3dGB4S19RBgr01tXQCeMBh8gB4OelnaiqHWpWxabWqdIhADhLTRaZxgTDz2zvrgnDaKYgSCTNPYQ1Q3UkoDAYIYaQCw0cG64BxnDBAkEiIAMM5bZ71UCBGnfQgQAuysst4TTgnEdV0DygmkkFAMiZSaQAgxAs4lnFVKKWcA0DEWVLC6LCCEPhhjTZpGSZLmZY08gAElIkYIeeCMd412QnxnMJoVJUnEzt44xiQW0fFsWSlXVZohIKhY3R40Ri3LOYk5byfVAiCaMaeA83a5gMBFrGVd0+jSBNXfHta5biyosWHUkpar53m/30u6vaa2RjGaxSmNOYDS2uPJ3NWK6fmzP/vRYnHvnd/zgV/+O//HiK6+/Md7+23hbYuq2hQF63DofQx0bQQLvUHSq/XUeh3KMQkI03gtwkCPS7v0ISbtlfZaq1nOkl6n0kphV3hJ0lHlGcfQB2d8kzfzOIqFSHeJ23ry6bs3P0Og77aT5rhA7QgFNNlfBBPyWd3Z3uitnbv6+le/8rUvNfO8/b73RcN0VUSLuuytd2/fV+fe/t3bo+0yXo6217/02ecTfyCMfeGLL5hO73DnPpD1ZGdWLeDa2lPpibeNy+bi/TfPXOhrufLUsx+7e22y98bMhM6f/cs/B6f+zec+G3A6PNmOanz/9hIR2lrvnHnPg28788Hf+8zrUMBZkY/6LMPh0WfOHy7Y0XT20vNfp8C32olX+sblvbVVf+axU97b6bwaDLLFYr9HYVWh1fUTbj45Ws6yiEyOZkJkIhb58YRQBiFimFkDjw6q8sb4zSv3fMvgfptHKXRj28iU0c1udnMh33jthQ985L2z114xNs662CKTOXs8nR0fSxxF9dFcumrUjn7mf/p4WfFIFgzj3buHW2cv9NcycXXWHYmDXVflNWTSB0FYasJAC/j7n/zs2SeGEPQ8Wa8x/4G/9JOmrP/Jf3mO0ZOytlQwKIiike2Ytwq5P7eM0royL15crHaTP/rqjQalH/4zz3zgoz/4Z/78mc/89uf/6EsvXrl2sx3TC9ujvWtH73/v0wd3dmRZnN8cnHls897+8d2rd+eLZTsWlNosFvfujjudlW9ToKs6TuD6qf6z3/uR3dcuf/kPv/RDP/oDPKLeBqVqIUQtpWwaZR0WSWfUN9Y5rUdrq9YjhyNlYaMa4iCikVMOAROsPNi75y0hPEEEIESdrQEK3bSjjBU0Lao8EEqpQFAV5ZFzmlASgg/IpXHsIDSq5DSKkrSqJQigmc2iiPGIlUq3RmvvfPadN197fZFXZV0HgpkQhFAPgoiiSiqKHEYWBksZkU4a69KoHaAPAWrjlJHOBooQhKg2tXcaY8IFtU5Z5wGCBGIIEaQwxQn0zjgLUGCEYEqcA9ZZhJGDLmAMEVuWc8aQVoFCCmywXoMQvAccxpxnRqk0impZS111W62mUvmy6J+4oF1FdVkv7ixdEC3qtGFJ5IBnhPUG2dH04OTambt3rmEQ0W6b8qRYLIMPLEnmi8Vg1CmXUrlGqyaJo4hTAwjB1CFvVAmRyMtxkgxjEVmPMcUp68tmnk/nyOsQPKVisSgMsP3RWaXqvCo7LREAyLpdBGBT1zFNOBGFLwHQHNGy0ZAkrW4vKFVM5nGnPT7aW81S2yAu0nR1cLRzMOwlWmm7rIm27SxiLXPz5atxMjz5wLlf+Z0v/NzHf+rOjXvQAajV6tb6Z778/N6Xnn/yqYeffs8HT0Fz9/rtyXiyeeGMoJ3/6Rf/0qf/y2/94Zc++wv/+z9jN9ntG9c/99+f2zi1OegNb129/vb3nb1z8c7FK1d+5K9/4jYaa18snHzkfRdu37psAJSNOvfQY9vbDxzN3fH+lBTL6auvRU1+8txDd27vl8v5n/25H7t3/+7vf/LTUqJk1Ptvv/FJ8fufnu2XgxPfoUCkGYsYoMgASwVDnBflxHPJs24UdQgkWdxdOXHu3u6b0gZfasfLfDovZiUg+NRWb1EAq0KgWV5N4xazNizrKq+aYlkIJtzt3Y3R6traw2mrHwmhK4UgUA4U0mtPtZKIUcwSpQMy2lkHbUAGSOWU8zxOBWDGY0wxoj7tdijjs6W+fv1ym5PTp7ZF18dZwpMMeIiwcE5DjxLGldFZiy2O9lsrPd00ScIZFkTwYdZyymedkc71EoOTZ09EUfrKtTkfneXtaG3rwvT660aFab0UjGX9QYDWy9Aarqxsn54X80ovmrISMQH9QVnUmESUpT6orJ1AghpZjI98p7tahdrrWkhDu+ssWu2vnx9M54U6rsaABMqjwdHcI6QAa9OWOZzdfPT9Tze+ff+Vg3ZMkzRCXuTLfNyTw/4AoPr1V7/6+svfTFZWKkAbVadJUiptD2eSLMpi0el0tYTLWQkjcPP6dZOXWmnBo3t3DzbOnDO63rt91yDQGnRDrRkAzqp+iw8ywnDMIRARc8ATxGRZlhIVlcdOOWO67TQbJa/dOZ4Ui7WsVY+r+V5BfcS9v3rxxd76aNAW1sFFLS1yAICDxTcbaYv5vNtP7h1Vv/3JT77zPReQJ8VBCdTBd7072bjgFBXAdoMVGFsFVfexwU/93Q///37pxS+8NL20031qR53dUJ1IDkftpEV4CzqjgEitUoQUf/sfvj1uNyGtg3ceEkBUZ3XxfR/qTtT8W3eqqq6DDR4QgDgAwFoHCSaYOA+8C1Y1x5NjxjOlrVEGYhwQCjgEjAghCHzHhlnUqJWmKxnEKW9XaPLaq419wSK5stmrGy/rSrEobrWhSKWLO+3O6SffsTbg1168K8syL/P13vbVawdeAcTMmbMnMYY+Rq10ODvezTrkratvpZEI1I3HE5PL00+ejzKgi3J+MPEgLaVMQQsAcO7CaeiNtlU+npC8G2cr1Cuv1PbKytrW5t3JHhZ8nptut3vpyiXj6v7m1jvf/Zf+4N//Hx9629PZ3R1gWb6s3WKeDOP2ekfrWqnFstZdGsV90qlaz26+//bvXE0o/+t//fv/9b/7z1Af/tT3nvry/3jrH/29n7395uv703EtG+SN007bIFLEOVdJZL1SjQ0hYBAgDJgTD7BzIVgDISyKpp0SjoDgAudVT9gf+cF3JL00UBwISYLvrbUsSL/yzZdfvHwL0IhhQbFdPzfq9smyLnrd2JR8luu1VpqIsLXVqvIDKWHSbttmubZx8tr1w5u39s4+uBGk1U0ZRS1rsPRRa3XV2dBpoeXs3uHukd2f1soZGbqdiAqAIbI27O4f4qOl8143CgDDI9zpdqSSEHpbOA/AifNnl7Pp3lR1EIDWteIoAPWO9zy+c3uHc7Zx9tT9+3sDxABPBsNRnU9rqSa5jcZyZmVjysb5W7uHaT87vT60mHW6bWPgYV1UtXV1SAKMAVruTbCHk8m8nlbBqwjRw+MiaYv2avb6wRzUywTSja3toWt27+7P5gUX1JKQxCwWsaybennEQ22aWQoHWdKSyC4a7QE9uFcI6qnwvbX1MmoXDk/zvZGuQLAohsqapZEGIucsIWi4sbW0ICGDXB0EH2jMYSS8w5SjjhD5vIJB/SmpyFSFauZUZEm3TzDhOApIpz2eV1NrrECEIRa8i5O00I2HwQZtrXWhNHps6lqBJuvA2EbHBbRavetdG5965K/+2qee++xXb62vr84FuPaV11VRUa8/+6s/+94PvM63dHxq84E/9zaok/3rz81e/YO/9Vh/4w6YSPv6/uy4UZNZ1OuyZL29nOb/9N/e/3ef2nv7Q9ljb3/PtEJbo7jMl4jQpqqBURi4127efu2lyz/2vidQmt7ZO04YERF+xzsffuWVw+tXJpi5o7J86pFTo/6QwbtJGpEAAEEO+E7b/MQPPWLcDgVxcFTXBvOgAb57N/orf+sPkVmHyDNY2yJgPByNuovlcr3fI4RU2gaMLOB1WWAUZ62uViESOOmKZZ5DLaGFs6PDVsYWZe1LBYUiaZuGsDg+GqymKhgtpZEVJILw+P7OvRQ7JfN8eS9tPwJcVigDQ1gdnA4uBkDKunAgtc4Tul7ODFIYu35eBsH6zlrW4ZCuCNo4eewdAa6BtqFilI3Ww2IRXF1NDr1bxC3qTJUXALFIkc7wzLZWcpSw5fFelNHGTIv5HCN3681vPPHUM7sHE9TzHnAHc8v6hKcBG4R4O8q0zTATwdRVCebzwiNOss3IFKo65i3y4htfe/vT32dbaRB0Y23t+vWvNbM7VmtH+J07+5Fopy1OsUYYAwQCYsFbRCkRqW4qBjUSNFfHWT+e7Y6DB9L4+VIhz0muB+ubSi6wYJ00Fow2s2Ojl7PJ4erqGUTTVq9vNbIOCkZaWaspx2kqEGHWIszsymb3xa+8DvM8VdBWvLfeJyQwzjCBqsEt9B0KcIBaawQRgMgH2FjnHPTKWqOdc8a5tJUyTmPnQPAcMgC9sRp4DFwIxpPgkrhlK1PIErdFcFZrBwCUunGUEoQJYRhi6L2IIopRnldlpRGCCEERt/K8sICQQADCAAPZhLqSxqkk4hTCAADDDHkPPbC6YUxghKzVjBJKoDFAewdgECSSTglOnQdQQedhIgTC0AfvnQcQGqkZ4YwS7wIKkGPcGON90M57CJVpnAucUoiC1DVAKOJce5dFXW+9VJpTSgmjhFKE5Le7bwAMHoIAlNGMMeAAwdQZw4UIAPoAMAqI0EZpABCApm7qiDMMkfEGQcAwDQBI04QQrDUgQCoYQhhBSDCxwVulKKGYUudMAC4ALwTx1kmlMENRnGptMKYUG6OlU15JnVAhOFa6clpTlHBEyqI2EDXeRW0RxZBTyBArq6AakwhCiWhMEwmhvDPWMAQZxQ5gYwJGkFJmvU15ygmr60pZ6VwDrCMEMcoiyqvCGq0A8JmIIHIAAspwU1sCIY9j74F3VjaqNAYCZILDBIPgAwAOmACcD4oCaIxnjBPKnFQUU608pIAy7AJUSgeAAsLKaONB2koJpiE45xzDJPaYYEAJnS+XMCBGiLUOAmikct45D1xwLjiKmZTa+zxLk6qWgnIIgwCIM1FJU2tp/ySfYl4pwdC4XmQY87YoFuH2+JAlZGNttcgtOKojweZHR9uPnFo7uX57MmWkp72rYVUbQ6N4sihIjPJaeRAwrE1jRdJur7RGAsehkL4yDVvUhWIIIISCRbj2JDYy8E7L81DjDK6TAKt/9S9+9W/87Y9P7hwujt2dKb1488ZPfs+jCQ88xZABeXTEKoBRRose1sqoPI7aWDcCQ+dsI0sRZVVeYCAJohj6um4sw8oHCHTs6sRj2XhEuQ2GBRxyNT/KoW/+62/8A33fPf2+R0UScSpa7bViUdbNYvXEUMQUc/xv//m/PJ4c/PDHf3LI2q/98aUXv/z6icc379/dnTYa4/TS1+9cn9++8L6TgG/3PD+5un2wa46Pp08++/6LX/ri/s2jzfO9/olTUfuR5PRjzfTmqc0LgmU2lLcvKzUNIk2bPN9OAM5CONHs3Ltnm5NZv7tqwGy6WM5K0QfHR/exM3UTZKEA9IiyULjLV9567eqtxx9chcrs7+WRCA7QO3uHvZOLRPCVPudxvTqoq1m+nC5FKu7fu9+jsmKeJR2plIlyH3Q9Dp3eajE5inhMV1bi3qgBaDmpiqWeEgm8TRgVFEcErsT8lS9+6aDdjk1z5oGtyurlsimXReNAnPHjUjWg+fCHn37k8XOqUhya+aRgIWYGTPbze7fnG2uJzfgknwJO68plDOlGHs0mM6cnk8PVf//bP/N3f96ahUfCRtnH/+7P3G5u/PfP7GXD9aWl7RaDIhtc2PruH3jy7/+dX30wTSjE3rqF9N0zm5/8wltf/MrVDz779E/8wt/4wLPv/8Gf/Pi3Xnzh13/1lwFtfuDjb0uz+FSXlePlsJsBYUYXtsezcZYNknbveHy4dmYTiezg6DtbRae2hqYpD+9My3uLR594V0Qtb1EfsibXXGDkDYEuTltlbsZleXe810t7AhIIKSGk3c4Yw4v5FAInODVKeRNUjTCKZQOMMWnCPHCYJIxQ59NlPQktyNKW95hiLE0FkPbBYEgAwIQSiKF3NmB0f2dRz+ervXVr0Hx/37ZjgGlM+Pjw6Chfnn7g5KXLV0mEdGMIDFEiqroGDlIIfTAYAa0DRkFEGaXYhgAgiEXH68p6TXGEIDS6QYikSbto8kZV3fYaIdiG2jsXANRKEsJAcIxgQXjjtHcKQtzIGiLKBReiRQD03hhrmaDBeqtKiBDEkEAcjPfBORQqY4M1CEBpFSAYNK6aTgKPeitn1XJe6aZNOyZUAUHdKJFkAcSCZ3f39rzjlAkjgw1Ne9hrSo1d6HVaxgBGSYA6BOidrrWGCPkACBZ1XbV6m1nchoAHpwFw8/lREsUoKApc1mZW2uC9Q3j37lvYySxb76ddipDGOgDvvGU8VlI65FUjmcCrG4/OpuOYAYR8oXUcCa+rbjtFgB7eOwTCiRYnpLV/lFMPtk+dd3SRzyeTi/uUrhzsHW+fP//XfvQvTg92eEu0ugmy7K1rdw+ODr7rez74xitXv/Hqv/4Hf+sTa6tPXr505xuf+yaB4dwTqx/8vg99+nc/93f/+l/8gQ9972Pvf3KwWLn0+hsITnfuXlXNpaPd47v3Dr/+e3o1Xb1+b+ZF9MDj7Gh3XCyKtZNnWoOT0rer+vCHv+dDK6PRJ//Lr7/v3d/F0/j6ZEfVxyyNXcWL4/mNO4dbZ1Xc6jz8zidPjkbjaQnArwMAkiwBBi1U0YvbtppNDuqIuOPx0WiNJqR2LDmY5LWqF+NlkoV2u3v11ds+BGd1u8Pr+aQ0NOF8nsuqrk6d2dqr71spm6rR0nECKmceevLh8RwU40knYXFEeRYvZmpxXAQACcIIUBFF1WKGYPDeJwmTdU0xxLHwGmHMvLdNoxCk3qFG2na/3dtY1XldKjDd2V9fGwSrecw9RMiAoEO1qAkL6ydW64UF2kaUOI+ni9lbL73R7vRwIISQZjpXaql6JIloc3S0jL04tY0F7vQGnAePtdLKjI9RgCtb27yz3kAGiBgNR+0kVHZGOCib0lqVLxdRQowK7U7GY1SWi5LjThKlbbaYjZlsloT22v33vve7b9y4SLIIQJIgWufSBWNUSJjQ5cGd1z5dVGmpq+D4Uhqeykywh4fnpJxefOGlb37tjyIW71y73x71WywtjpdxGnVG/cX+/cEg62Ti/nLuCFjmRVM3CYSUh0VZWk0K9ZbzLlgFpIIuQFVHImr30k47y1jS6g0jQXgigtEYgsUccxmiFoFWNXkValRAhYDwNfYU1lVVKrNztNCI577pIN9LIXFu1lQEEa+KumwaJ2bFNGjKAoucf+73vipITKF97MH4qWcSr2aMbgbfJdgCKD0BDpmHHt34pX/6s7/8L7/0jZePX710f9DjHEdtYaExvRie3+hubviTJ9MLT8Z0w4GArYuCFgFIaaes1dp8GD45Gd0I9Ozqkyc6a/fuHXzlG9+iGECAjffOfzuZCEMP6sZgCiFAABAfEASAIgiAd7X8f2KtB1sb4xt3WMt2NoYzueh1XQWj9fc8O752RRCJM0ECPT6ab73tYcjT+1dePpjslQdiMOiJTb5/l7ia3rs2ndw/PP/QGYC4c25lMLz8yhvL40VKRnZRXTk8yFayTofUC13W1fYjF0ydt+LsysX9tz36NGdm99IRXFuPOrS0EPisWoZWDAiiVmlzWN9bvmQhoCJuQ5Mm2BA73DjVhfy1l762sjHIpcICYYMgaUQKEx5k3ch6gZazrZOn8nu70/16OgbL5uAf/n/+/r//b1968Vuvjc4//PMf+wuvfP3Slz/z+ZlGz71wK0qZiNLZtIyiKIoFoXS+WMpGUcEhgcA501iAIEOIQwhRiNPoiScfPLXZ37v15u6t3dUzQzu35zcoixPP0rqpceVdrRcSHc1LiT3ri4DZ6UdWJnv7SRc3TsvGNhGtirrFxfkTW20yQ6Fk0DLECAayro92j2SNylKXLuq0WLAu2zxRL+orV45KeX/YYm8uxu12xrnvpKLRaDYrU8GPJ/tpygvvnK4pB9oAYA3ESEq9mC60rhkX7Sjb2FgpGl+XppXFF7aHu7fvNY3DGOzu3ylkUS8bAWoOQwLUSn+kGrWYFVk/Ov3g2fH9SQpDq0unkwXLOCHESY+AqcL4zNktc/NuCvlRVZxeW9ne6CPTKKmTQf/O7f3BaCCypIXD2PBpqSEhXpZ1XY/DnQiWlXFUoLwsIAAex944bwIK1sAAuKid5dA1Vk1LkyYJcLrTQQjmZzZPHAaFssFK//Tk+nUYTNzu2LxsykJLQBMhlUKo2K/K9QfOr2UPXXnrD5544uEWJ4J0i3oJnMoyvsztnzagpaQo6zIfB9hQKtqDE8WyVl5KaHsb51yu4zgiwhpXizhxQQBESMwET2Be5/XBcnZn0FkTlHaIqDFq6gmH5K/+xDsfPX/q3/3GS398b5x1Wyut1nxv392/Qxo1vXbI2m+bjWmXpsNBErnj20ev/LW/laYRjPDGQmpl+//xU9d+/8u3GBFRPy1McftauVziZr+58MDZxy6kLLMtIvstnHTCtbv35g395u2y38Ef/ciPfOMrX3nt6vSNe7cOZ3Z8YI0yAegXb9wiv3+7KFRwxHvnlUPAPLkJW3gCPGgKLzoY0AAotsvw//0XL9e2r7BDXnsLimPzxk13HrRTQvv9ltVNa9jVVYNYSOto5979zTOJww4opF0jIhRgMEa22hlBcrjWXR4tZbHfoljpkpFgZOMczFqinE191IIopk5zQQRLl9M5bRkDgcXEG6S00yYPuIqTDPC2zGvKYuRVvTxOu6tFowCWMGgSeF0W2FcYIs9wvTyGpnTyIN+7AhGztjSyYLFwyClVtZOzlK6w7qm8PG4LXC+PQKhm8zlipLe+NZ2a0w//RKnQytoj3i+Od+8s57sVrx96/Mz+eEcbulguA8iKpmCYLmYNJFHEyWoH7e/OnC1W1k+puul1H2fHRxSMj+6/Uk3var1Qxhg63B6e37t3E8Aqy5BgBGJvbDMYtpyTyvrjg+vHx/uO0ChOqhn2deOsj3rrhweHD114OsaJkgzDbrBOlc4SBYB1WnPRKvMDY2lRF3G6LiAKIWAE19bWGzmtq3qlf6qsJkUxe/ipB2YHy707x4dlnfa2tTD5dI6CbfK6NfqTFTuEBeOQkKQV11VTSU0ZcSBAr2MaA4g4Zk6ZSDARcYpxBT1LUkppsCYieHWlXdaq121RiiulOCO6UQBAQSIAA2cCAiQbmcSxsaBRWmkLoE/SltG6UlJqa51bNIpSChBsJW1jamOtNRQjQCIuKFUyDxh4ACkhzigfbBxlWkmpG0woAEE5CQO2PlASKVNjhFFwxlkXPMZIucDjxDgNEIHeSVUHiBgj0mprHREYeo6xxwhRTCiECOMkbkmjIYCQIIgJQhgiKFWNEUYYQQSlNiggADxC0BgNAcQICiGMUQhx7ywhxFjnrdHOQQgYFRAigABEhBGGIbDeEkQQJD4EZRWnkQ9A6RpRggCGEDkfKllD4Lz3VkFCGYY0IO8CbJTlhC4XC8IZxigEQwmLOQfYxVxgQG3w7ZgBjqxHlXItxgAwzrmi1s4BygiAwBgVQjDGQQillIBTCBxEgFIspeI0IpgYbTAAMDgCEAXEhQABBhCUlbEmCBE1qgbOCkYbKSMeEUx8CIQz6MFi1iCKVa0xIQGDEIK1VpkmYjGjDFMMASYIM4ShB1GSatVEgnvglW6sDcZ67RyPk2ABIgRg4qDnlOPg68UyjiLKRSQoZqSuFAk4WONc8NYABIx1SdpSsoEAamOCcwRibUwwnhLivJNBYYgZRT58x5nPOwglvt9Lgqp3Zkdlk3U3Txwtb4zv3iSuhQVmUGpRLPGsPigDFiEnjIKd/QO+ziVEImEkwrpGqyuDOlfIO1fl3oMlCbPGBAuK0qQx2xmPt1dPQK/mzVzAnoHtmGnT5HtvXt/s9R94+MHh+sbe4V1h6d6Ne93B5vkLor1isUvLQopOO1jMohaPNwPBNOnxVmpVs5xMj8eABFbOFzSdtIft+c4u7w+k0Z5SBHqc9rEPGKdOJxZ4TkdBzQUVUQK0CN3eA+wIdE/FmuGdq4dnT3VFVN588XkPl6l44HBncrCshC4//ok/f+Lhh3eu3t56fONjG89ev3Pjwvl1efUuc8Dk82Ennt649NtvvApQaEbx2nbbSfutT/5WszcGlSOLGML5tSt/8Mlf/+fPvv3tDrfWT11IEKs56nTbImU8W2HxspJygcW41ve/8abMy49++CMHt+4+/7VLr19/8WvHX4kjAJCPEwYAODjOv/8Tf/mDH2i9+NwXW203P1xgGgADCkGdl996/o/W2tHpMxuTaiariSykk9xBG2VpzDFlIet0gQ61zJVfbp3d3Ltz5+Ybl3tba+e+C+jjRk4PHKxjwrUngQgUqGN0oZ0KDjXeRaXl9NbOUV3KulbtYdYs67zUPBa9je6P/7mPX3rxm7sHt848tpluxhxu61vzWahdHRhSnNgs1ZZia3wcBQy80toCUJXkP3zq+dYG/+Gf/oQDadDYsvqv/+L37d76Ny/uTmeq1RIiicH9G3fm90785He9/9KbL/IuDZ5OppUsJSVAROGzn/vq6y9e/pk//0Pv/MC7z3D7Ux9+78uvfo3hejFZXrl8Z9Dt37ixd/7U6GB8hwm2tvHg0bTBcDSZHhhlgtPfpuD8QyeCrF577drOwd5jb38MvMjHO8cbZzst0cMcH+/tNrIBATAebSS9w+l+hKO6Vvls/8yD55Z5PjlasJh54IkHqs6bvOh0ewBgSDBnGHPaSBlFkYNEQdodbYFgnJfG1qYu8mKcsp4QXeNlTFIpS1WXCGHgnTEyoYyBQCOgAaUR6rRjr7ysVXkwG1tNfQM0wSDURSliB7xzwXDKfcARE8pXwbqYilzPBUm0Zgro4C2EHkDFsMCQGefKKrfIizh1UMrKRoQQjIzTESPOmarOURw3HgCAKcUQIm0wI0RWJfcwYGB1QzjFFFe6wchTFgXvQHDWWR8CjzLnXXCkJZKinMVJShlDQFkbFstxQAALPq8kA4FSJEiklHLBA5oqKYnDjXFALyhi9bKy0hofcJJoa1VVZ73YK1WaGgKUiERVRSAgaQ0l4CYYbJcgaAC8YHEW98bjHYKRkkppbZWhjJ156InpwQGEOUxwFLAxllDelAUTqJWxKtdRzK0xspy5ZqZKl2Tx+kq7PD7WjWURj3mLt+fGyRREpZltbLdQ4W9deh6Eam9nZ9Br0bgzHG7IydHNw1unTp8el3WatNVs8frz3zzZG61g9O6zZ644//u/84cr62cff8e7fKVxQhR0+3eKZ55+5qXnXvzM7//2737tM5Px3C2LBy+c2FjbJJ1TH/vBn3rluTfS4XA2OUJpZ7S6MpkcFbISWUJFcnBYntzuPvXo2ZV+542XX5rvH2gIWERlbYhG49uTfmflbe95+gOOJq2OLnPMISxzAr7zPCjmda/TfuTktkDuyps3HEA8HsCkjbVWkwPIY+z95PBQScWE1aHujtI79w9b7fhgMp3O8GFpT2yMitmCML+c5E67RirvfZLwRjceosYGHoVFUc6mi4l2rUEHYUpwCJCUdYkspJZg6L3zSdSNhdBm4Z2xJgRnna8RwYICYx2AjlCEmGAZWx11g5RFDorZNJ/N+mtDRHiWRTCITr9b1YvpcpqkneHGpm4aYNH8eL/V7hnvKMXWlrP54dpK18haB7c5iI+ufFnviiQeDDNukPWCN4VyWndaWdJt9TbWnNVIhQgDqxFylBHSbsVaIRZ3CCNaqcW88MBWywkAoUUGKE6reqbr0gWkoAs4JBFjokV4GxjZ2RwZPac5r+rFzt0bb71+lcft0VoUqgB8WF1pZTS7cfkFBtVkfn+0vkmCB7NmMBggA8ulUrpZVm+dWulMpvO6UN5bp5s4ThORltPJKEuVzptioQtIGQ1WD7optLrVSzZOrGBEGIv6a1tUZKYuGAsOBl0vCcZphriLdGFpL25vxl9+7hVgWYQTiBFOuAeuLDWMMcSwbuziWC2nN4wD3W6nXOa7e/uBdOOIESraLRF3GcdJrSEOflEfZWubjo+9FgghAA3yQACymPq7N45UDb7/Az/6/vd0Pvf5rz3/9ReXVU2IFCzyxl3eL5+R3afe80gTXsdeAUgCgoHFIWCApUEukAJil3RPNelpmwze9rYzF1+71JQlCAFAoLzDmBJKs6SVRJ2mKay3jDuIEfI+KMMYCiBQBm/eAwCApUXJ5ladH9+4dFA3cuU0Twbdg7u7kzt3SAqefvtTt67dPfPQg81iMp9dW9y9Ulfyve/+aD2dQWTWzwggyFuv5xsnT3kLi+XCBQA6N2NuswiLyI024qIWJzY71oI6Lh873Xeuno4XsDLb233Il4CEbptgZATLeln82MNP/tv/6x+dOJW2u30liPV0MSscIScfOTM7OFhMx7cPJt//8Z+9+uWvkSy3TESrm+2Vvdne3V6nrReeponPKyREjAkKLu70mgqvnzi5US+ufu1TyFz/G//gb/7x89ff88SGnO6+420n/vsXryCIjYGUhXavhQgt8gohZZ2HCPqgm6bmAXb7rbKSzCFfyWEX/5//2490Oun4aDd56sHxndHuAh/erZiuXn/5JuvGH3j/22hAd67uqyZcubp7/eYUEHpydbjYn3KAJkfTjuAZ5boqBh02HLTStpJVjgxsddad0r326swYBPx3vf9tv/7rv4mRDw4cHS2P/VF+PJ8XtpLFMNloJ90kSdJORBCqVF1WOaNRr9cpZC0Y3FjrQ5rO81p5TzgHhHrZcGQwo1HsY246raSTnEg7w2Z5OM9LXVnOqWowdB4GMj2uHHAs4sVyykWnN+wC2hwd7iY0HsSMRCBJ4TggGEILk/F4ksXdFNuT/U6x0E7opx87119RR7f3d/f3aaQFIBh4iWW22lKFK2RVF8vYQ6xdXVYuCayTIuW0M01wzhnnrXUuESyKBGGplKYo5zxOe1F7PpuudwRPIINhtnutO1oxi0bgeGpLH0CpNCSQcEoiEUdxWchqXnY6Yrx7o4t0ErcT0VriBY0o90RXedTp8D9p+viOVLSY5xiCKE2t0rqueDrAGAPAsDG2qZADMi9KXbZb63JpIGUQNQBlENBIDDEgKCUc4uV0bC3odM8YC6fToyix733Hynve+7Of/uRXPvmF13FKf/RjH8ySYnF30nmk49oJH66DsS5uTV368Jlnu2K4U+YSIc/yoi/Mz/84+rs/82w19+3WI5/7zIsv3qk/+423QGi98fw37lyKfvjnPkrjKqJielge3JvLMnrhpftZjK/e+Epd5EURGtt0RxmLrA0+WD6dOet8HFNOUCMNgFga/Y4nzsRCeFclrZb3xhjdlO6ti9FXX9EKs0ZLBD3HOA8AVioa1yc60d69/SyhFy48euz3796/dqLfAwoK5ZK1bL5sMCIGeusMY7GsgZElpMraiphQagUACtjFcbtaNhZpxNtCcGIbgOo6P6pKpesCo3utUZuzICCZHRcAExJQsJbBpNvq3Lt/MGilldbQFGmvxUIOAMIc1lo6RCjOEBJyscuFWS5ypWDa6ammHm1tTSeTKBmm6UmEOoi1potpL2XL45vtXs9rE9EIIaILHYU2BHz/+LA7WAkQsdSe7o3mx8dOJl6lFIXx8Wz15KbVpWrUwcH++grtdrv39i+2kzSILU9a5ze6h/tXImiSUBSL5XJ6yHoChHi0crrbGh7j2yGwspLOe4KNt7ry9nhyeHy0PNzZIxEvG9MbAZClMR/2+sNo+MBoyxJbJd2MSuyhd0abKneuEVGgQRFeNeWikcb6g4hoDzMNGU9bIEBnWdY6A7HAeFrs37ahBhw99I7V7Gjq0raaTINgEU+MsTyK/kQqgp1RhwJkraWM2Vpr6yijtXXtFk9jgRHRUlOCnbcEgkEnBhjAAAAnwcumcsB5p1USExxl2jgaQF7lGKWMUGcdwSBmzBlZ5tI6EwJGmGsXtAvSWMqYkwoRSBjFmEmtCEFRlHFGMgqD01UjE84wRsB7GDwlzBuvrXMeBEgpFd+OhseQGOOkbWCAUivrnHEmSRMJrVMWIogB9lYDBwWLXQAYEgCQ81LZIJWNCSEEcsaDNQB4axtOcSuOQXDWeUp5UVZZnIAAc1VxElEIvbMQ4QARwNRoLVVDKUEIMowoJWVdeAesM4gwRti3ZSanAQLAemeATXjkfQAgeO+Dh0abb6eGM8ggDB56a60LFsJAKMEIGeWk09YGHxyAKkQxYRRCKLVxzgvCQXBWawRRAM45HbEIYeR8wBAuGhkRRAnGCEDoA0SE8BBg2SwJspxxSmmjNUIQIyg45oKE4JVpECQAYUSwQBggAIJXjcYEAxgCwDYY6ZQAWEnDWAQhpAQBAI001ljOqdImi6KlrBEgzjvvQkJTCDGA0HnEMEPQ1k0RiSRoQyk1WmNIfPDeaU45xh4CYEKQjcIYwRA88dY59P+n6r/fLM/yu07w+PO1198bN2xGRPqszPKmu9qr1WqpabUEEggEggGxwAyDGezM7swzy/DA7s6wuFkYQIuQQCOHhCTUVq021dXVXb4qMysrfYaPuHH9/frj94du9nn0R7zOOZ/3eX/eb0yMBmlSCEk4p8ABI63RVjtDMALOYWykzBFE8/mMco6xn+WlH/rYYQCws1I4AQzijCv7/SuBxWR6OmctxjiepfO33x1/+tOXP/zDz3znrXegrcxWNafpcr8+nAzqpF4PHbbk6GCHeS4AWmgthXPIq4UxBQSnaeDBANviJM0gSsv0wvkLMD9KjqdPPr59dO+gc+mSOU6wsJQXSy0kDkYhnHu17kwGqWTV6eLtX/6VxzafAq2OnD8yuakc8+ur2IuNtDhwllDHmWWWeXB07ybym34Q3XvznocRSIQBFaqFlCAJCA8iKUofWuOIhgi4BEHiXIAxLVUFOXJxkLjAv7YmphYh1iK1SrnJ6S2G5oaR3/vad+NmdPGpF86cbyTC3bmzM51llZovndlY3Li7vdrbvf1gAe2du4+e+Mknn/2Ba1/+xW81W00rFA8ag4OHalhAY3Fld989piSDfq2Fm698997OSfK5nyr/2t/8r/67P/t/e/aDf+iHf+qPHB7fGR9V93buXPzsR2oXL/zKf/9P5jtievt3ti9sThdVlgqLEUBYpLklKgSk0fRvfvPznfWLgDutkRcEDuB2tzuYj2mLNHx+OlgUd0ZFOfYo7LAGxUHp7NUnn5pPboVOZ7MpD/nG1XOZ8cezYjwYt1eap4ePxO/9ytLy9pNPb755az4YJrTW8phvKr1IC+7zVj1M8twYrRGkhGpgMIKLQidF6YymFgQm+O6Xv7qy5sdBLNLpfDSimK9ebFsRPdg7kKPK1FsrK20lqzhWi3GuNXXa1JrNRVY5Hf/bf/7Fml388J/+WcnqCvhh84V/9G8u/tW/9PeXqlqjHheK9/tnu821p3/ompjcK7TIC4BaqNJGOkia8digapH9rf/55/7Snzn63I995lM/8TNvv/Jw506OVHbx0uadh4N4Y2np8ctHr7wlVGvt0qf7Xvng0Xud+vLn/89fLhbf7/vAEHSXusU1+xtfe3P1zNUP/OAnH+3c3tl5tFLvLm1sMuYBJaWRZTX3ItDt1K2UfkAwoYPx/vBklM6nq6vrgc9ef/3bF7cepzA0UkEPLcSi5rWtsWVZRUEvTzPizXwWiDKlDqmq4h4N/QZwMCszhxS2JXDWOtNoRdBIznCbRwf7e42lFuURRl4ylc73lq5d7ilxdOsWIVhaZQzQxqnKYcYo9bRSEAKhS4CQdS6vREACaj1CUCpTTinlXGqBkXTOam04Z9LhfnNFioFgDlkAIQIIEYTF914LDlrjjHYIQQtco9YslYZKGlkCTiFlTgEFHMU+ckQLjTFkPDI6ZZASQA3GOVRpVTmDqkowj6jSsjop0yFiuNEMlFCgsoyFSlbWuKKa11ob2KEin2AUxmFDi1ILwbivlcul8jmjcQsg3wsDD7h5OnMkYD6t1XrQCyaLrNuKytkcQkKwh1GoxCEnwAqjgHCWQMgtwMZSTmuIUi0LoHTsecC6UltkYLbIq9zw2Gt0mukii6Jak+I3v/P6hStrxjnnUDVLDSu0zD0e5JPFaHDaaV9KTwfAiHMvXG1srGXj0Te/+u1nr15qdi/jcaJhVa+HGogv/P6Xuutntp54XCXTlc3m6vaLs0Xyxtu3z1/bXt7uWMKO9k6Hi8GHPvVcvdE8HRx99btvcoe7q8s/+od/tLd9UfveaM7OP/FC1F0T2SyfLRpxbWfn5lMvPr3/8DCoRWdWlq8+cWE+OPjG1/7Pb375lb/wl//S4cnOhfbjZ1a7+ULXV2o7x6Myzxmvj09mskghtrODI87p9yioN0JrpNZ6b7DX6nQqpYpyroxrtGsry71Hu4PpJJVFYZ05c2lTWJCdHKmqnFvbaHUdxNxkVVZhq5pe/XSwEFqNZpUDGBC1cWX745/7ybpMtEg71isrvJiYIldxjMIwkBLUvMZoMoM1AAEgAECnnTFOaWAJQS5TGSEx50s+NnI+ZJj7UbBI5y7LBVPEgHo9LJGdTNNkkgWeLqFRAHPmeaxTr8VKV+X8FBiS5WqeF+vr60VejAcnFunOhc1moxFae++tlxHQLc4iSsTsFHmst9avrZ0/3N8Vo4PAAlYMqPKloB5D+WQCAVSVLLOsGUUlVpQTRlkmNGQeobjmRcCg8el8Nl0gjCGDwAgWe1VRQKPzyRCxzKiKLq+GjY7ChAbt7urm/ftfbbZQi3V27+8ns8QzSwGBWT70O61zV5483T3EIn3y2S3UWBntjLAc7R4MV+K6djyq+TIrpdTn1lZ2j2aUCODk4enp8kofUr+QukySyKeVyM5unFludPxWGwedaGUjbHWMhWE104tDkA19Cv1QGwc9Xk9OynkhR6MxAXhz8/wP/9Dnfv+3/1PssaRIa/WwBJgRstZa0vnUCi+vFlV1ChyO/A1lQRA4wzEIo3a9B4qBmcytyXs9f3y8e+Y8Bcwqp61mIKevvnLw+9+affs74yTH03mx0o/PLG39xCd//BvvvPTw4LAsp9j3VK1Z2PnDB28+2zUIKcA00gPntLWecwWElvVavVUQ73uvX7+1q2wd6lrcBM6ZskLaBZwgxpXBYdTqNtplhsbTmU9hwHDImapEXA97q716q/Hau+8BABpXL3tBdPz+e9H8KDBVvddI0lwm83YcPTrcvc9uCFmpTgyFBSLdvnTpXLRauAZq1B5cvzkan/Q3YS2KmE8X6dxBgx0OUR72SHosdo8fcCzWVptpWlVlwRF+6/3bL/7gi1tby/t3d3ub3Uc7B2fPr/Ou59d6N29Nlxu1B/f3P/CRT1VVirPcIOs4QFADiKJ66/atW0ZG/UtPpm7JeN16K67JYLi726t38t1jD/gG6jJzcRgLkcl84TdaplnvbZ+dn+7HyfD19166srnVa8YtNB2/N33ju68+PJ2wToPXVSMMp/PMIKWkAFoD4AhFfi2QIt/aWv9n/+Dv/+av/86Xv/EVkKv17c7KEj64/25wYS0bHK9f3WLN5u69o0678dnPffpr375ZADieq+nefm5Q1An3q9Jv1jo1r0HYcDI3tlra3mBaTI92rj1+YakLa50gy0bEI6EXLKZjDuBoMADUB4BMRhNX5jWOgl7HX7u8vHnh5V//jVZQMb/O4hrTsiqF45Z5YW9ru768sXP9htGqFqKoGeh5SmFUImycI5g063Ua1BFMX/zQc7s797PpUEASRb4rRreu33CGWgwstdqoAMGwHhWZzXIZAm2qhZNUQK+z1uwtrcp5Ph2eJqMMxFEQBF4qQaU31zdZqzEfT6FySZqeWWtQOxOZ9cOgXesx2gORnVu8FD92undXqgKx7MJmLz+dG4D2R4tANH0Pw6pCmHebbSsXPg8rBFnkI+tUXgFsz2ydfXR8MlM5j2mhhF3IRoOkeRFYHAAwXGQI+9o4qFkcMhfowuC8KuJWHVFaIoup6vUaoztoMph6yDPSRZ6vARqeDLPiD8ZaKwkY93kQGwedQ4RH2pQOwSBuV0VOdGmcVTkAUR0TaYHinocpnY4XQVDzuA9rMaEGqf1iOAiUhpTErZYS4ujujaD+6Ec+3rhw7mN/71+//aXv3D54MP/3P/cjAh2DIjf5AOEzZe+5F3/yf/yn/+LnLtq3qvxmA42DVtdfBvL0prcW8z5qthZ/dLP7RxL6A/9J3z0CX/vS62fObUTs5vba6lrjqU/82F+VtTrzmZhXs1yoxURp86HnL+3tHRtXTavKasM5ggTnpc2FpBhDiC1gKK4vWK/QJEBGWUA8rAXStvPP/9M+8FpULM6v+e1ucPHyhrAwTwtdSazk6VF+6sNwdfr45QurS82jnb077x9qAbbpOerV2t1OskiAhV6t5scmn96Xalbq+bn1rfv3Ty4889HT/fetUZzhdredqZATns7H3KmsMFlRRgHIR1OMdFFViutFtihUudZdz9PSmhxx3K7VlZxFQV0KVYxPlrvEYqdV3qj1s8wpx4lUEGHCsSY5wLXUepDjWVIC4hHqQW0hiyoDAk4pBM12c17YlTMfmh/cRUgMj07jyIdEB54p84ecEycLFoSSeYss8zhysKq3G5Uq8yT1MIuDWr3JLSYecptnz//a6+lTF57Rk4fvH7x/8enHTx/uHQ9OaBBTr8UxiwCaDl6OgtSLIh50p9MFwaGx1fEwPzgZMu5Hnc5ktkiK1KsChDzO/ZWNa/M0pyqlNJkcHQJA2r3GfDGrKo2YTSb7yFmLSTLPlEkxIsPB4PzjH+q0z85ndjKXs+mjZmOt1giWl88QSObz49WVBkFK5niWZqu1cG86zNKqs9zmEfv+kIwIAYggFNdjUUlggXXWEtxp1Rl0BBIHnFcPOCVVpZTSGFOhlMcZQg4ikKYLihnClHMfUVrlWWE0hNgYU1WOcw6MVUAh7KTRnudbS7Ist5UUsuKeRzAEzjFOOWfWOABJFAZpllqjK2NqPjNSI+CctVIoqx2lRFutpVVCWMwtcBY4AIB1UDsAEXKYWOSAAwwRba0DBkHnjCAEUEqVdBAhDJDRVmuFIbTGaGP9mg+QKZTAxlGGtbGUehDxSlXWAmeM5/vWAgBADYfGWiWM1gpgaxQgRBulkIPWWsq5qSqlpAPWOss8BhywTnueB50zWjJKtbXOYqGM+363F8aYAuAghgx5UklrrbNOSBEEYSlz5JAxsqok5z4LmFIKIWwQMAY4pZQzNPQ8FkCnMSTOAQBNGITWQaW0VoYyYhxECAKIfA9XlTAACqWcM5QiBK1FwEJAKUMQGaMRhEmRYkyiMAAWKSEIhM45UQoIAKYEIeB7gVLWOhX6PgFEmjLmHAKYpil01lqHKPF9hiqHCctUkRU54RxD4IBjFFemQhZrqzxCKPUgRMYaBBFlnjWGYMoYRIg6o4uqBNAxyrRU3PeSNGU+pxhqpxBCymggsXYQYYq0AsZ5fiidkFJbqbR1nh9I6BQEsccxdkoKJeVSu5VXpYbOGmP+i1S0KCsUelJbFrGVJ3sbucqD+QKI1af92srqV1/6anejbgOFsed8lNlMZfruyXtBt7vUuAiqYjyY+ZZBY9Ji8ejGAyfyT3/4Y6cn+9defHZ8dExy9+C717vLzYkoGab5yQAlQhSnJGaGwtOHt26/daffXQLxKkT06Q99TFzfWYzzo53jv/FP/tK3732ReiELPFPp0I+0EIATbQGkIUC21z2bG8LrS6XwGwG8/fJb6VCgomjUA1BUgIYM47yqLLDUsyzAQhrHjXUMEWx9MJkstKrmOggE51Qik3/py1966vmNa88+fv3N+51LF9bOrH7rC9fXNy5c+ugLvB3Ors/TKXr06r7y+y9/543TQY4ounR1bZHkb33nPcs0il2r35+ZROmk1w/rvc6D2ycA4KCxevaJa/MvfZ3lugfB/r0H//Bv/MOdB8Obb/z8ix/5WCtu516l71Szd95JJzp9MCkSf2c6k0E8nJeyMthapw3nVIsSIdzrxqAoFsOjoBYF1N87mUOIBuOEcKaFBtxjPp5WOWaNqOGvdFZ3794P6/D46K0woMAAgFjvwsVh6cLONsBuoQYyX8xzOZjvIhIhyoTgBtppJVvOYuCUdZRQ5aCBaJEL7SxClHs0qZSoTCnyv/6X/+j9B4f3bu/eeO0dws63GsrlOksU9RRpOUBiaULh4lZrhXE4ODyKO1ZocDLJQhLnIq2UOx0g1D73W1/On/xIM6z3EEgcCzBo/P2/+Vf+5T/5t6dZAnF/XsBXvv36Y2vnfvrP/3XfZv/xN3795q27mAbKgaNRLoVBsVdbC3/ut77+C7/1e00Wt7rLXRTpTEnqiNTHD/Y+f3R8/syqD8k3f/9Xzlyu33nn7iAfWMe3zrbBaycAAM9hPdfbq2uXVk7/w6/+6vpf+VOhF07HJyfznFPmMRzQllB6PD5VpdDIjk9HUeRjDKpE1qPuxe2rZVE4xj/x6T9+7+5gPhkQvtg+e77f65pKO2fPb6/OFqWzZZ4Iz7cRo/ki5YxwAoHBVjtMaaEkwc5jzEGzUGVRJNgKh20Y+X6IKcMUqHo3ZmuXRwuVJIUyDkG7SLPAiyjAnNUWIq3TGmc0EwmxAAAsnbRSERppVZSq8HxfqsIBJ61zVlVCEuRZC6wsjod3OLUU+4xFSZIg4owT1mmIoHGQE04xtEYZ6CophZSIYU55qR0EsB53JtnYp8wZ66xR1lAGCPERAFoJ6QxnPoFYOaesAwY7C6gFEGOtgMqNxwIFK2utNQpA3GrUhZwZnSJmILCUxlZpADWAzkKngWNhHQBmLMjnGcCk3trATkiZnZweUkYCHohppoXwo4bvB5PpiRGLuF4rYKFFWautFqWyBOalQ6xpgIYQBAHbeXjdqur8hedH8ynjQejzeZJhRiCkVZ4+HA4ef+Lx/cPr3fWtXKilTn1wuB81WX9re3I83rp8zo+5a3g06HrNpWKGm1trH/vhfsjknZ2pQxGwoOnB+ztvnzkXnr26FnaCB4N9X0XMw/X+6uXH4Mtf/Xq73vQ6S89/9IMvff4rb37rDR6GH//xj1578QOTByfnnj1HSHx0lBJBVCFaa/356VBZSzE6OdqZjoaUexxHG/3NelArTg/+wd/6r89cPHdm68zbb9yM6n2ZVj4E79689dhj58OACenS+VhUoNXlDkHdjKuq+v544EcUkeFUEr+XZOOyKNuNhtCoEmjnaJLJyovwmbjbXup1llrG2MJl3ZovtEwcmUzzZitmVbJ8flkrV51W2ukg9I3Gk3zvEx/5c0ji+7vDTo2HkU8Z0HaRVo7GjBEWheHhwaEfMYwRhIxyoiAa5QtpdBjH1gKJHaG+1JlV0EGYpVVAPCMEokVY46oEEBIahJ5jmVbT0bhj/aCx4sV+yJiQ2Wx+mueZlYDgCGhgJCxLXWkAoAz9mtHWaLy6ekGWkzydGWUcMmHMQDkUI9kiRnm5c/n4sMCUwrAjlIOQUVr3Q4zATKiqEIa5SmlMaZ16ftDwimRgK+2Amy4mrV7/ZDpmDCDHkRcCKwFRiFPjTJ6XSmFEw3ZzCVOwufXUbPYgy6ZB7C8tr21f+tCjGzevPfeYBIwCvPX86sHuQx51IKadesBXAoNCrY3SAiIKkaOMLIoFJjKZLyggvbWLHrNVdqKVmI+P6v2l/hK/crHfqG1I2gRxhzRbjvnAMQ2YEqDKLfWMD4Wp5kacFNlgNBrf3zmZVrWnP/HJisYXrpxb5DuJW8RVLZnqSrDjk3G/Ds5duPBw555hOsmAUcQxt7QcHQ5Oq8lEzxe9Rme9u/Vw//Z4mhZlB3GmkTWGmbz2ztcW//xfTHZOgWRdpStW5wezxfHi1ncf3eXMwxz4gY8Y2VitbW70905mK0frDaNby5HhHqBcS0gZcPlczLHOabWoOhSOj4dTaIOANNttOZ6CUloDrAGcUmQ0qLIzvdZmP2h2AiqlA3bz3AZEKKzVa93O/+OfAgAA9LzEInbxYsdff+crXx+/v9vZunLhw1f33nx989JlYEpM4Ds3rp9ZWuFxq7N24TihYWc5nw0l5uvXniC0drj7GidOA9Hoxj5h2WQMKKQ+jzGNmLQ+KifJPK8ojfZO8tk3blIAzqzUC603tzaUgXJhaq32+TPLp4e7pnzUCGGymFfThDdYe2sdVp4B7njvZq/VmQ7Lle0NC3RQ56cH92vNDnM04rTebJWVqHRpqiqu+Rhh5CiBtb3pfDVEhPD6Wnd1o/vCh559/Qtf6QV+xO3RUaJ5p7nyzBVPzQ/e2x+OAcZWGUoxxVSUlS1Ftxv+yKde+Llf/tfffeUdRrBxoMnQj3zise11AKp0udc7vDe9f31H2lqj379xmHTOXxzOyt/9xvs//MzZ998/Pt3dqXd6kKswICv9xu5kiLQd7hz1QvnktfXWasvKsVFV6MeWUwRB6IXpNIXCuCigxKMBffy55+7c3b+8vAq4t7O/XxW5R+SV558bl04mk343LiXIM5zMSVmRucAcWlXlyzTWnhpNDhEOramgJKAsISSU4++88QYWyiqwyGamSFfObdbC+nSedzqt4XBYaeMi7KpclAoZVynYiIJskQtmntxcY9QfGbtzLw0iH1EiBR/u759ZWTZKi6LUlaqEgtaurraaa63xcNhsdpMxhoBB3x4cTe48fD9fzCB0vO710KogiNWqDo1KSQHGjlOlZYPwPNMF0CoGxjNxZq2UFVSlNIhEi8k05rjIi0ZEEbQERXv7eUDc0UQvKicUdLa8cL4POdu+9PhicDrfP9y4uHw0PNRFtUgQjboPH84pUJal09kYAMw544D+Aamo02+LxEynRavTzfLCTqcBJADH9WBLzYYUI4dop3uxVJJw4/nM6VRWVRwEyjCAuBS5ttLjzdXNltU8z0qjLPX8Rq9rrRwdHKzXlj5xdfUrrz669VD/+f/2y7/w//xA2B4qZ4pcvXvrYVyl/6+/+9/9/ud/LT7fMvzOnbeuD++fXPzkn5L2nO/1hoe/zOyhdNMf/JnVP7we/bXBU8VQ/uK//71/8luxgDcEb5XSWaSkswQCKcyz5zdODx8QANqBcX1yOENKWCONMRZzLCuFEEbAOAH+1a+//fjy80+e4UonHpMSw6++Mbm1jzNnWgH+8IfOr6w0OmF4OJjVN/tf++Z7WVl++gc+/rVvXn/z9UfJYILllNdpf31FI/vNl16CmJ7fXg54HdDQFLGohDWzXi/qNK/IAm9ffLwQur++5fJBPh9hx8Kgba0Mau16B5f3hvUw8XwnKyOdZHFtMrjHKYz9ZmUMiQMIRVkeoCBc6vvFtKyEyUQ5z/LQp0R6RgEAOaEhR1rks3uPbrD42qWrPzmeFcbNktM7UQTn+ZQqE3rbUX3JizvV7J5I54x7g/SYhSQtqnh1o1hUzWajvxQ9vPNmwOqrve4gc6Rxee/6W40u7i+3OM6PB5M8K5wXJSbDUQwxEVKk0+KF538sqtEJOwy74erF2u07mcEkbHWnGQisBjYdzu+3Wx1ocyOjfutCEG1W1oZlYWg3nT9gWEMaxW24vNSSuWecnSb7ukTTyR4GE4hdUeRlSkDhpHatlZWsmjOIcmUIRZ312niwWCSnr37364y9sb39dK/Z8xizRh6c7m2srFg9lmJu5pj5deaVSx5eurosq4G2jPv+aJZ+j4JaHDoLoLXAOqB1yCjnJCkFQlgLgzixziGHnYEepYwzh4hfq5fJFDgnpDJQMcSttZUofWgRdgjhiAbOWAQBp0xqUVaVddYZiDlRVoVRENHoYHColVHCKGUMsBQTjJBWusxSRggFDmoLIOU+dMZCA50FGihIHKZUa02YpyErpYgYo5hbayGGkMKkyKQxsRczRJVVCDjiLAQAYUAJ17py1kDkKMMcMgyclng2S5TwaEgRcBAhyjhwQGmT5CkAADqghCIUUUqBcx5DaV5RgjBmyjqjjZUKOAchghARTJURxlhCCICWQq+QOUEWOGyswQghh5BzzjlnjFAaQUYxKSqhrfY45ZgoqwEA1jmEkdBGO4uFQ5RRSqyzBACfEa0rCLkyMqCeAxBTElBYlQYhhBCRQAEHgQMOQIIRhIYzBo2typxwDwGgpKDMgwgjChECeSG1MowxbQB0tCg1Yb7VTkkZegE0EEEopZKVhtB6PkcQOQchMhhCALAzlhBPa+Os5T6HzpalVNZxiADAh0fDhTLMI2lZYYTqPmIOEkSNs5HnI2cwJAgjiJ3RUintrPaYxzDNivJ7y3rAOUaos1pUBeUEISCdgc56iPs+L0thrQUI8prvObRIcogApRxzWFY5JzTXVcAcBzQg0bScO0fyUmhjCSFKKPNfjKb9jT5vBQf7e5mutq+t9OaDW/vfUXurp2l1cn1n85nW1tkuN56qwBxVWhjW4lc//exXv/AteC9a29puhn6925yV5uHd+x978afJNE9u3X3tiw/Pn/s01z5YuLVmH5fV3W/cWzp73s4nwMGJmrV7Z3beO60WC5vQt7765tkP0xzzYcyf/KGP/7P/5V/efjQn//PuR3/6qUwJnRtgMYlCxEPs+UpppUReSgZiUVINWNzdrspJc+18sj+fHc56Nd5aaadGEQ9jQiAkAGCBqorYUo4IQsiICBI/RNaaZtOtMUcOp4TqC2eWMQ3/03/+vQiGixt7dymHEr17/+D2229312LHqF/z1fEom2c2K7BDGGKPkkYt5MhubC4VhX54/3BppUYQnM1z0ug2lttxFCkQ3nm4U49rx7cfYEgKoO8fHynI0yKdTwZLK2eQC8+t90f3jvK58nkAu5Em4OHJabu38qd+4rOinP+Lf/orLeYpJVJhZllBETkTkTQbzSQkHh6djoIgjlnkebHSEHjYGQMwKQu5t7dDqSsX0wB7VUWCWm3j8oUFDGsbmzxeOX4wZXGvPBmwAFLqtJqVQl+5vBb0lr753n51eOwgIpxLZ4qiMFJwSsvCQVAhakGQf/IzHw1QWyq0dvEyrtOXvvT6nf/w9hNX+z/+4xeno51kXnoF/9a33h0PRdCMlI8YBxzi6axY29woZCkmusykqxxGXBS1V76U/+vg5b/+f/9bzB9CSJwNGxc/8L/83JV/+N/87dHpvFrYte2zh6fy8GtvPrxzo72kmw3f0FBzlpbSimwxTVi/tXSme/BomCXFQbZX3EgvXThz7vKFabqHoR+E9byQk/ygdPrtb8uNVsc5NhzK0nw/xLHXbcwHaUzQE4+v/+7Xbv69f/zLf/Mv/sSVp69cf/1NRySi3AodBVFBQ60LY02n1zHGAog94rSUlS5RwCcz5dUjtty8crF/8ujO0eFoe2tTKmEdTBZ5VVTUx816L5kuUMggRMbarNDSuiiImcNOE0Y4IdY6l+elTV0+GDeWG3G721mLR/sTCZDNcNeFjUiOkzEPGUa4i7A0SFZ6XuSMU6kqn/u+FwLgrJGNICyFoYRUuqKIMEKAMQRDBSwAkHIslWDAhD432iHjlCg11ZQTZ7V1DlOqtIEACSstsCHnRmqnNYE2zRcorANtK6UT6MLIYw4X+dxBSykpyxRTpgAA1jHma4uNA5horST1MEIkzzLrIGX1gEYAWACllKVSziGFiXNahowrYLQBaTpkhCLIGfGNTiFy2spKSABwVK9XeaGzWVrMPBowgIhGECALbeQ3i1RWicakRjHLF4nUuUf8JElyLTutTQQZJxRBkQwGk8mk1+vcv/1+Pp9SW7Xi5fFw4Mks358yj3WXu1GrOSuSZtTQiZ2cpiFkPGiFUZQLgMLQY6GYLyjKAIaDew/afsQ9gHsdV+VGyuXz/UpXX//am4CjlbXt8nA+3p/KSt54914m5TPPP7aydSXww917ty5t9X/3N3/lD/34D42HlQZ85/1Zb32z3gumkwBDUGtv+CGtcksZ7/WLQmhgVZ5PLl+9Mp3KK1ef6PZXXvr8r/3GL/6rP/EzfzTPq6jeChv1oBZbrKnPw6BpAfE4HxyOlVBSpJS0tYHQgDxJvkfB+ta6MLBO0OnBYWU9QCD3W8lpjhTwA3dmfanZjat5gaifj0ZR6Pc6rTxb1Bq9+ztz5ZnN8xugXIhkEsTx6fBQFKXCPsDetUsfZaw1yw2mocycKe3pdBZw6BFUZ0jJxIGqXWNCS0ZpkVbOEI0swMzzeCkqBzHCsZIGAlmUWikTRjVZJV6IMGVVNWs3V4WFItXpPFOmqrfCejuwTiDq5yJH0C73u2mSQYz8Gora22kmWbdXIV+c3p49uME7zQLyuBlSxuM4XoxHCFrnEivlaHcXGAdEgT3aWj5TzSZ1L8AsAtTXhhEPAiPyvKwM8XiAAXDWJIt963BWFmHUioJ1v76qTV4LYVkV6cJSRPxglfuGhJ6sCiMrPwwJrxHqWV0tLz2RTqfIVlIKpOn4cLq5+jh3pBgPPVorc6ByL7XKC9LF6fHJ0V7cjI8O02ajv7yydLI7oMbmRSGkq4SNCMaiPJ2kRZmXsmqtLGVVts3rsshzMvJaWOfagRmMapQ1LACszgISicWJSY6y8WG2mM/nSTZdFMKev/qh5565pqbHwKQIWiNdRL3t1agR9RFAjbVmZZKty5e1A4/2D8ekJF44mhko/GQ2PLOxxij1KF4k+KSgD26TzS0KfSSVPHx/8Vu/+mYmqWUVwgLoSktLAEEIGesotu125JSmPFrMzCvF6Ua/e/c300vnNz/z4mPxagPxHBBrlQWqg0FEnEyP3y3zuSkyzEmeSUQcQoQxRImPeIC09IDc2OhtrnVq9ZDGvodQNkuiVgyABQgY+f27YLK/q4kn/SYF9f7lF9x8qCA5Oj7y6nUH1eQkMahCCE7T0qXTo9M3ZCaPbjnIglYnfubZi3sPF7NmiEooE2WrKurVRvsnYaPl+Z161DTc/urnf/8v/sU/2xo8eu/9R3EL7D46xQynCrYnsBmwbssDFbrz2rvlVNQbcf9i3Razi5c3xkejZFSkZF5Z0D+7ce/GvYg1IqLl6XVDHr/0zLXb35alRKtbj8/vvKGkAMSK0iwWcy+MfBZhzJwja/31Bmtli0evv/bG6OD48NZ7myudUqX3H52OhRsW7sYrv1MBTYmF1kAMrXNSKWeUH4dG2x/4oQ9ef/etO4/2W56/HKGrz23+oY9cmg3ftwv82KWW1ert8T0Ay+m4OvvE0+PUNteZGE2W1prbzz3z+XePY38lGw11lg1EVZ2ecCA8SPPpbPuJ9U997sN398cqBc4ZZ4HTBhLhRImN4QGtr64NT0YUGoKgFmZ+CsrsyLJy66nNnZv333jlbVJr2FL4/kat0VEyU0WiZFbvNrURrqLnr35m591XO53qYPSw06sR6JVFySI/mS4cpg3Pr6xAXnxma6uSshATI/LpsAqjCCidp7mSzOXIQCRIeOHS42ZRHh0dicni4ORhlsC635pPUi+vlprNzcvXFFCpsVJoP/YTYyEiR6ezuN/yOv39R4OiwqIc9aKNDz+x/dKNG7RdD4MmXpCDnYlQeRjgVrO9ezRCxkbU55TkoyOEpCjLJz94zfj+4au3GaJAsOlRIg2ssXrD87UJiFE16mHkpGK5cAiQsBHniYyixlwA5PTw7lsqyZb92uGjPQQkqNTUiGbNd0wvsqlUYl5RHoRSWiGLPyAV5UnKAF1eblZVFVCIoWCEFJVKh6dI5dB3TsH58IjX4qoqAfQtlItkHPlrwDgEmR9Q46yWFTBQCdOqryKb3L5zk1AipfQaHdYgn/7sysRmr7108J2b8KN/9Ms/cg38xJ/64NkXn/oPv/Urswqpw/FHP/GZjXZ2/pz3yT/ymeev/tjeA3+5c/7990/Wl1f2Hn5989KGCuwE3+UrCxDJ5z7d+7lfqwbJvkDa8yFjRGsIoAO5nOzuLLVN3JKf/eFV4PX+8S89fPhIUQgAJQAARglCWFXaWSR59A+/sPMTz7abAf3o+bVR6v2j33y3gEG3Q3/ikxc2luvtpf6D9/ZFYkcgO1zIBjZSum6jBl3O1Ixzs37+jHG83fYuya1skVNoMA0QCQjBhIZGGaXdYpJwFhaLAY1aMKZFVbWW2+Ppbr3XVpWFjpTIb/RXq/wkF2kYtZ02mcz8MGIuzcvKr4WEwqqYWF0hKdIpsdpUVd7tNCfZIvDPRGH3aHhKwxgCq6tSFVXMun64ni5yXc7L7KhGbEC90OupzHgkhBbms5EPSdCIjwf7SlllcYACU8rAozpPsvnUYTbNjpSrEF2Pw3B9/dJ4sTc6TYkVWVogTBH2MbDYQmBd4AVhtDp34eHBw6ZfrbXCO2/fYDRmQZSkhbaIUDbPcq/mpWIWBPFgsLvaJSRqlhJm6bwVxcWYqJJBUQU8IIYAigCxRZEAEp5/8vydt4+cQQ5jiwhliCKQlplfW56PHx6cHrbq61JV5Sw3wFqocnl8Ox+8UWWXLz2xEj4WRXRwfK+YnFrkGImm86TeriWz7PhwsrKxbgEwShnw/ZQWa4wzFkFcpjkCkCAELOAAVXmJKQ8D3zptrcMIEYhLJQCwVVUagIs89XyvFjUQAA5YayFCKAwCjGCRVQRhgGCuKiVlKSSlBEGUFqnQVT1olK70fE4oVtJBAIxDBBMIoXOV0dDzOAMAQCCqEvkUOKuEZIxCDBnztFFRVKsqCRziJHBSl0oSCjEB1lkCKfeDgHPoYJ4knGGJbFZUhDNlBSIYIWS0BQhhjMtSKFEZZywiSltKEILQOfS9mB4AAMYIOGCdw4QYa0QpHXBKKs49TAmyDkHEMZNKEoQcANoIqzWA0FqnrHEy+57JBkAAMVRCKa2M0wAhZAmhVGktjAUYQIOcAQY6bQxjFGMEDQQIEhpgh6VWmFEEgRQlhsBYGXAv5B50wCdESCGccQBW0iAEhXbGaU4pgpBQbqCeF0kAOSUMIIgZ8xgADmdFARlAwAIHCCFaA1GJer1WyVJb55zzEDUAFqJCAEIIMcHGEQcCpQttSoQcpURIwSn1fN86IMrC8yjCCEI4GqeTaYqAlVJDABAgno8wRA5AISoLHSMMA0ww1kYDC5yzAAAHAIJEaS2BZYwYByqpIADGWgQBxAA4B4yNglAoCQnJhEAA1uM4FxWh3AEQBIFWSittgYUQlmUJEUQQCamETr6fAFVWwIFKaOCcMt+Xijqt+sFiWqsHDrjTR7ura15nqb/V2Xrp3bvLXX99A8BikWQZDDmOPNIkVVI+Go/OX71CcT1i9VJkYJG0w+4866xm9f/x//r3PvPh7atnV4tSdurRu994xc1TZLWTvF7rBAjuJZNzH3nh3Naz3/r5X1xqrxQt026TZy7Eb72zs3Nzzy/SP/Hf/8i8RJ0z5Gh2TBFkziXpicGE4NCYwpWasXqZ+ry1FBqBVLrZ4fdef/DYxtLDWdnAzRbwiSAiyViEAbaCuKpSwuSwHgAPl5VuUjzYOWgwX80LUxv5zfr4zWMzwaOHxXw4D2MfIDQ7GkHnUWeUNC6bTCqfeWzvdLK7PxEWNeKg7hGCWHo8fm9++uxTq3lWFQtZi6hZ5Ge268Nxlo9TWJVOG0ltQOP+2aXF/ukoK2oxHWCjFaTAYZWobL4Y7y1GI79Gehurjw7y11/ZX17zzz229co3bjx9PHji0pYfhlGv7iuMitxImxf5/v6ex1xR8eX2GVM3lDPHSFmZcppwzqBz0JhOt+XS40yVjz92fjKfNOv1eqvuGOq3/TKfDu4ORxNWLbRJpKim55/qYwlnM0Gxg1I+fvHCzYPj0lqMsLa2qBR2Jg4DoXRWVdYUl55cJp4YH9zxvZA24ud/cHP/3p3778n3bmUk3Htw72CaOsLGwmBpIZDqwcODjc1mve21l9eBoYu4GsylIYZwWKbqzMU/Rs6f+e71g4O/+6//yT/9y2Kc1lp1kLPM4D/xx/78b//yz5EQnHlu7cGAXTy3fnTroZhNf/onf+Jf/ftfJXEIje02PS15mVZVrgin0hhVCsTYe/cOdncOttbDRiMK60GZFoSii4+tvPjiJ3775/6dg8AofXr8/SGZUNzs1RzSq8vNreVob1D+77/w2//87/25rZV1kZaKWIJRkU+FLR3U2WRBvQhGdch8RpwYTbWQcehdWF1aDIbcZpiSQOlWqw4c9IJwsUhMkhrggCVpmmujB4PU9/xaPVBSYwCsKqVW3GNK5cBCoTUizme03V1e214fJuj00DBDF0lWVvSJD63/zm//q7VuI/DJ4HQehnVjHGFEigoS3xlZKsUYc1ZrIxEHCMFcpNKoTmM9TWccAquygPrOWEZ8Cr7X2OkIIh7jEimhSgi0MRpAxFkMgTEOOgAwIsZarStgHGY0DprAWE4xRtBZJUsrrS3LlPk+xsQ6rY2EADPiRX57ni587mECIMIYfu94h4RHhLQgBGk6qMeh1sZBqK1TGhgNLHJSGGVk4Ne0tMxjs9nEAV3KEpM4aiwpbZDNKXHQAsswY47GoZKG8iBNp9CYRqu/KHIAjKgSBFTgxxhQZ2GdcyyLqEFH+w8pAhxqSADBoNlpQYQDXjs42mXOQSPXVzd39ndkYpw2b73xlasXrpGYrZ/pc2g8yoQopoepFwZqMaW5no/HzeWunCT1ZgX9bHxwunVx2zrv8OE9Uam0UNc+8Oz6av/GF75FfPb8x579j7/9Zm1t+6svv7q1JV944tLp/u5br7zWiPyvfP4r166+0On2vajFw6jRbCeZUnnGiLFC61JNy8TDotlva2137925dvXZTi12Ff5///1/sH/npRefvhr7Lh2mAsHOcpdQQBCSSrSWAmuK+aIMay3krKjwfF6kqY5jf/38xvcoyKYz5Hn15vIc8hQFlSX7B5nnhfUWi2MexZxYF0e+0s5r150FRZ5LBRdzgR2MGDvcP1yuR6pyCAOMUSWLsNF0zu90eu+8+8b9o8HTT1xBU2iQy9IMtbFygk6hzwInCuAIh9AoQRAVQuHAV1ovplPfD51yzIsAkoSDkHppWljn1WrecPAwYiQMiHUGQIiAk4ksq6rTDJEDhCHP5xoACLBUVZ6nWglAbbNGtTBSqY06G4yq0uU6hdwPjHRGCwwtUEIpMy+qgsDSICeFzxmsVDk4aXQimhc4CKEjzIun42m1SLPJXCAYtzdsOT4dHpdpupgvmF9zvNVp9k22CEJOocXOeUGru7RU765V1WSeznktxsh6QWhoQBCd7iwArHqtPjfk8O7+0cNFrTG26x7goFuP02lycPeBovDZK8/OZqODw4O9vZNVAImD81lyZ9epRcYRMpXyQxaHgVNQWh03mqfzU2kUMl4rWqYkztPCIxOlp+VkDoAKatxAw4KIMS8bz1RVyDw9HRzPFrmw2FlSCu+zL37GldOqnBAgTw9n2UT2usvLF8+niUJO6yrnDPl+6IWRA4ZORmmlLi0t370zW2pHskxmWkVLq1eundndUXunAoC4mJdaqP079uExmLsS0MopTQkADgGIIXTIAU75mc7ybDZKhMkrlUtYqYTZeCfN9g8ffuwTTz9xZc14ElhLDVyMpmVRARhihKNmYI2V1lRKlaWCjqz11s+ur81OHlRoutRF7SZb21rWXpiN534DzOezbq9tlZ6Mxt+joNtpOC8Kekuz+/fbIZaudrA/Qhz5nN6/fkc7sChPQ04dwbWNlW6/9/qXvgLmZvuxJ0Rub770OqH+k89vzwY6HdPSnOIATBdp1Ov7zr3/3gMV15ee/ixefvL43YOjW7Mzm21bL2jDv7t78sRmbXpw5Fbbqiqbq+v9zaXR4HQ0m4fWvH9z7933H86GydLGSr/frPJ0KWwCpMLInx0fwtKhCxdtkmOPHb7/ViRkOh77jRBh5vvtqnLEq/YGt586s7UUdQb717/8q/94ZXW731uCEIliOjoa37m3v3r5zHvf3pWiRB6yxnGfWwt4jItSIgt8GoV1/trXXgn88hMvrEQSfuaJJZ9VHX9nsTgZHOrqwZ7BHu3FW1f6K1eWtq9uXn//uDoZhC4/mQ9++7c+323UDx8cD08HtVYkinJm0JkzKzWt+Ap5/oPbu4/uWuVHvqeBne4feJ5DxEEDueflaQbTTBbqZDqsr/Seu/CkpUund+8c7h6AOgNaUWBdaWVu7t15sLRufITtpFRKtDq1e/cf1KOaUDENl44n7y2vrswGp9wjxgFpkaMhQcRYmC2qC89/oL/cvPHmm1aLWuAFUYDDep4smh6VkjniSM2PzqxDJVCebXYap48OJpMEWF8XJiB8qdfQSEVxsL68/P7944NBwjBVhUYGjk7n3dF8vdMIWvWIaVl4zomD0/sEFAy5kHUdD0qQtKJQmrzIp1BVlTLKVMxDNR+2ag2ZOH089rpBw9PNpXhvb4qYr0vXbkXZpAqDRsSVrJJuLxKKOIRhaEkt1iS3QD9x9druwQPCeGndeJQEMbJyFjDWX1oTRghtrIWIYoIxQsTDKIq9PyAVOaeUFHluWdD0PX86GnssplFMrGHAaFMghpzFHguq2TzNZlGnE3rLDEUO4Cqd0hDz0FemlGVRZaWTknG+un7G4+WDOztOk/ffeyRSd2W1+V7z2Fh/VrL/8Hr1H2/fT6vXsfEwxrOySgs9PEE7j/q3h+hPfKb98Y88nZ1ivGg9uv7t6YF7anspx+DW7cN6jXhBtfuoHE1rUUgC32WL3CLotIUYF9pde3ZrfSuP0YLIdKXD/uSHe//y8MGsIA4wJa2zGhNDCcEEWw0fPqr+4e0dTtFnr9KDQZWL2BDRCXxcySrVYtWtPbakj8tRYVkQpVP1nVfe+COffX7v9o2lraDW66JavShBkUtoFec4qkcOBxDgKlc0aHHDIUwtWFQiabTblhgtcwSFdQpVs8AMorhnYG08Fu1gu9tPB0fDZms5qRJQID9oUUs1lJWirsiAlZj6jEdKVhBATIx1Lub9xSKBPvV5A0FsbBE3cVLkRSlhOcPTd5W0nSYukpQAgiF2lFBCtQG6KnOQGj3lEe90tpPBrtVpqlKM2v1eS8vZyurm6FQePXjQW0KL6bDVO5/nDktRFgULSDOMkWIhJvlEa23tsqYr6y0b9pU3vjuVJFrZeGZYvUcdBiLptpdFtqg3t+KoNRnvaW06nVZVHudZTZtIOIG9kDZi2uE4VVGNu8qBvGi3+t1ufDhMs5LwuDY4HXXXL0bdZXEyGe3d9CuWlQVFhY+wLK0uNTYUcNbt94MA5qkIPTo9fHT64NHa9hZSGLMgjOK947EUVQe2tIVCAEKDcZL2u/12//sUiEJQSqhPjdEYAFEJBUHgceSwcUaI1Pe50DbJc595EEMlBEZIV5ISz0pAWKh0FXiRRTrJc8aJsyjwiXNosUiVBdIYba2xgGFLPAS1p4Rlvur3WyejsSil53EIobYWWIcQ5pg4raUFuqgCj1LkGycgdhhjjJCWwhmjCUXAYSsp9CpnEcHaWmWsFFJpxAPKObQahEFojUYIMoq1NlY7RIEFVhugv2d0AoAw6vEgLyXDBEMspMKYammgtQpY4vsQAikVJk5pDRy02iLApNAIWOhAwLmSAkNHGUGISFkhQjGGWmngIKWMIDJNJhDjKAqBc1obyjwAXVEKTBBC2FmLMeI+c8BILYFDzgALnDYqYr42ShvNOS2FgA4ADCspPeppZRimlaqcAxgSBKiDDiJnrbbWIUYwIlJpCwSGxIcesg46V+iCEsY41g4DjJW1RjmtJYZEadVtxBCJyGNZpYWoLEaVqYCDGGMLnSVOa+0QIJAoKRnzEYA+I75PAbBKaoIxxoT7GEAYZYWyDjPkBRRzVhVlJR3EllFunfO4b5QVlYCcG22VM4RSRoi11lilrbYQKGWUNohgqTSGGlgQMM4IcsZapYHWBkBjHXA2F0JrJ03RqIWsEQhhkqRSWigNtLLG2dAzxgJjdFiLHEHamjLNpRCex7T7fkrL5GTUbUfpPDmaTEDkKFOVgjd3D3i7tnK+FsEZ9LGwVgMpc+WQdQJwrNcuN/avH7//+j6l9N5rp6TT2wi2f+Hn/tc/8seeUdmkjqP1Tf/4/n3fSEEQUf7G6tLoOIHQ0uVLw0fe9L23W97G9a9/YXnddzq98frLsBGjiAf99d390u83D6gql3oxholSulMvHW04wxYDfDxZWX/S1c4lBax5cj558PDGvZ3bw+A52Fpi2xubr373lQ3v2sXelXQ8euvGN/lKp7e5kRuzOD3BxPeMFzqNci6O0sXhLMkHaqnZMFTMdLfbyapwepIUyYJh0Gg0icpGk8Q4OTzJ81JzBoOQLsVNz4HhOKuKvLdcG6WLqvL8INByBgLSWF5qrtTu332t02jWaWwsTnTuQ2D0dGVVrzX6L33xlk6gkFQs8uvf/s7F2qe+9fsvb6w1zl/efvWL33jymUvdpfXde3uHo6PtTX6y906XjZ5+opOV5exh3os8UqlClSurLQiM4+2TasopZV7o1YjWU01wvdWyCDKD50nRYI0AGC0kY7zZ62Aa50XiZtPF1A0OZNB7agFGwrksN3duDSJfGuMHmL390o2lD37oQx975lvfeMNoZ43lBCFCZ0nqN/z1qxcvPPdYLQiwK0Ry1FjrAlU+/PrN7Vb9CC+OM/Te7aR3psugLbN0o8cdgM1OfTxOm23oxTGCABqOVG3nwbsAUCkF8ejp/Mbf+qt/53d+9yuFnf1Xf/J/+ht/82c+/OmrxUQQ3+t85Mf+m48+/wv/+H8Y3vjacvsMV5YzFgbLbx1NWyvtbJh1IoJImUgAAQAASURBVIotOJwt2p02QLYqTK1Om2tdIc3e4TBNy/sHaXcB+r3eY09eHO0fqbH8rV/5zfEkH8yys606oN9fRrZSOwdkof3A3+jVTsZllue/8Etf/Nk//CltlYHU9z1RVslcRq0w8IPXrj/85s1XC6E/9dyTH/nQE8AVeSkrmGCsxSx3nLSXwiSrZJ5W1lpsEYKUsNj3hFEOKR9hArh1AHtcFZWopDYSEsI9Yq2VRdVph3Q+a9bB5tZGeaB4wGcHd8J6mGTZa6/8yvmNRrZItCGB52vrAAKEEA4gcMpBIqRCBEFnMAIQGgQBccgCN0+PKIm1UlJq7AznrKpyAj0EAIMQc44JqbIcQIghchAhAJ0SBCBrNMIEEF+IFELrhZ5Q+vvOTQKQg0pKQrGziHkBQUhppaRAyGfU17os1YhzWgjlewQTpDUA2jHiA4h9n1fpmCKklDPAQggI9QhlQk2l1oQiwiFEjnFmbeGcxF4cIW86OIhqASee0SAv08DjYRwaI8sqlVJ5yiClaRAboLFHEEWiwBQRTGhIgkzPCFFqNpwNJbMIM+43G2kqTaahpGWx8MPY8ygBEBJoQNls+H6Myxw8++wnw0ZztiishVWZT+aHQT1azFQYLfc3Wqd3jjVM/U7UxuH963fXzvfPbUej4cE4F5Y5rczqcgcX6mu//EVRmuUzWze+895mnz31icuL8fK//N///ZU1/5N/+Ee/8aWvY4RvfufWWnextQVQhI6P5v1WWxTVIp1VWvR7/VqbT8Zzz++Uk/Le7Tc++SOf3r13vLzcOhzuvfzqF194/OJCJNfv3u9Eq+2t8xZ62aLyaJYMUo65KFOjRNiqnQ6O2q24oFS6Skpdlt/Pp7j13v12pz0/mMzHySyvBqP59kq7f27lzBos8xQALdLK434c1TH3hdAU+5xWEFsl+Ww62DuawmW8vtafjofLy12fI4CispQthj2vNzwquvGWzybHo1PrVLfdyjJTFlWr2TPaVJWxRgdBYD2glFEEj5LsdCrrvm+FwvS02Y0p9SwGgcFlKqqkcKWWxERekC0WQVzDWPZWosHA+F6oKh0whBzwvVipUkpJaZjMZqo8TMfDRmvNY2FSDHvtIEEoYBQ5qwsBHSiqCiIotOTOE6VyBBDGqc+0sdoSjZuW9QykshJQj6fzfZmkaVnCgGcqC7HtNuKDbNqoNeazDM3H48M73VaNIMiDMD2Zr1y+tHJ2M9UOzkpeZsQLWp2mqarKWgBtf2NjeHhXawRgpIoMEz7T2fnlth+K0fhwdHS6vlR/+d1bYzHMR6lItO8HwGov8kqp3n/nYUBshPnTVy9ptYDICQfjiBRSduqRdToiwXxwVMYgxHianzDigJYIWFVArZ0DCBjjrLMGJIt0XinDoqrUs6Rsrm6rxUG7VW+tgvGNuSEkbPU4bziJYCEYLqQWqxvrVWUdAhy6mnUesbPJgzjGtEIU+cPFxNVhv3X2vgQ33h5+tbl4+oUtWdi0DFLMc5FraYzUBBNMsHWGYOIs0gAvspwS6AGDnVzr90+nZQZlp+bdmM1rp4MLT3YsjGrtZwcPD5IqaayHTzy39tVXDkQO/DB0WqlUQeCQM4PhoyvbjZ/9C5979+6bgV9PCz/Necxbi+l8Ohy0Wj7BwFhj1ffXMIU2IYHV7JCrROuS9Rrn1q6dTlQyGyKlZll2/qlLw90DxNoYsL2bb2Nu4tgrF8PV1c00T4pU1nFtb5JKUdu8+MR0Nth4soUjbKbZ2ubKm8fj/XL6YHpUuvTSE9vvX3/70lmyttUFpZV68tkf+8TBo3ujwwFrbvjLK+1Qn3tsaefO7ud/57UobmiPPPbCs1LMPZ/k5Xg2HG1cfGz13MYkRZP5xKI0pFHEyHw+DeqMMIuR1RoQRh2AoFT5yf3Dl77y4NHtxy5cU5BvXdva390vT/WNm6ObQ/j2wftCGcqRhhAAqJSwDhJAQ8I2V/uLzNIq//gHtpk3/KmfeGZ2676YHKpZtT/xar3VoBmQKjs+GFw5f+b+fmI9/fJr16Gt57P53snQcLqYwVKni3wOA6yMWFpr0gAls5PxfPGDP/HxoQvqUbDZuTR992sWlLiagwpJ5KSWiPj1lQuTglUC6MxZJAJczMcn09PjduhVi8QPGHSq0W2lk3IwHboR2VhapxRZpQnEve5aMhgOD+721zbmyQC7vO7HmxfOmcb6YLggnufxRpGpx7c2PA5MmYRhY3tdmUKtbW17KxuHj/b8ZHzn0c765srq2bVxLkPoEidn8zRuus1Lq8756SgbppWuR0trKzZLBpNRmi4I5dNhqqCNukE1Mfk8UYuJ48QBLEtTVLlxtnQeTGGuJ5DJwhQUIMoB8zEVvsiFKBTjtIAOVjr0QkJIrR6c2GqmSU6lQRIBfe7SVjJfKL/phPSLwIRodDqkvo9r/jgdQRZ5hL51/Q0NNYS+muZOTIPQ295qjg7HTiSO4qVzl6b7D4VKuq2oKjVHHGL3B6QiQJAoSyMQJAJYKyu5UKmAbLURL6YnaTlqr3YlAKenE2dU3GinaWKBF3dWq8q1uv15cmSLXFYLIVJjlYHq3sMH9XrnzNZqrVYrrbnw5OaN1/ba2P3wxy7+wm+8VQtC7CNhpZQYQ2krTaFTGJcl2X76756g9l/939596os3tjeuvPny6//4r//ZT/7Unzm+/2+8a/7yhTQvdrhnt1a8pcifS2CwAcAYaykhEFNETBjq833XwF57ubZylTQvsQqe+5e/dNc5D1lcCUkIgQCKSiEMsYUGerzReGcqh/MyikME4cef2WrXkTAEYXjxXP1bbzxCvH55fem9SrJu8PDR5LjQ9aBRYy2oSKcWyyJ3wBNVLkozT467rb6PsSxFKRLqO8i4VZUwCthMZrkQOZI6WSRGvh02173mBvZrpTBVwTDy0nxsAEGAqqqSOiUsUkphpAFQwEIpK2NMFEaMcZkLZzj2o7yqpDYYKoS0pdhi0FxeBSioNeuT3aEFGkHvZCqoD7FBJp2FLd5e5kUGnMHAEgwr6KkyL8PGRqd2fnx8HSJ9ujfcPHcF6Giey2ZElK0Y9yLfP53tFR7r1ZrTnRMhizjuSKVuPco/gFemu+/0lwq/Ttd6z+8PxOHJURg5L2CiOIVOIDgXJeB+3TkFAAIUOYcD3ypVnhwcDseHvbXe2bPnD3fva2Pqnb6FcDyehsxLBjNOm3EdCsMmk5Enit5SGzgwz03cudToud3DIYbQAjyaaxozXZUiKVjsKWm1MDsP7xjIcdDm8ZIilDp3eDjgBHCfR7V64HxjaSW+3/fBPWaMycsCY4QpRRQ7BC0m0lW5kNhaYS0yQGutiTAGxHFbq9zzGCZYa5WJAgKIhCzyjHk0Cv3FPIMIFqWQWkttiOcxQKuydJhSyilheVqki4RyF/uRypXSSjsnZBUHNd/zKCJKlgQhL/DLYu6g8nzvexHOnAdVlQKItdYII0qYAVACaJQ2WkPktHHWWW1snhcAYGsBp14lCgCQMpJSOi9KyhjQDgDnHNAAcj9oYy6qCiJUCWGcMZpChI3TEQ9LURptrXFpWjLGPe4vZIYxcdBhhKBDEELmMWuhNVYbZS0AyCGEAAAQQmWcNrJSkjhW5IXRQJrKx4FR2gHIGAXWejzIRQ4xsg5AiBilGGNlFMVcCqGNssBQShBwBGFMKXYUQUgwt0ArbRAmjHlZmvOQIoiKvMCUY0ZLWUktILAEG+cAQJAHHrEEAYcwXkwThKk2Thjge76pNEbI84NSZ4vpgnsRJLjS2mrtMy6VNsBChDG2aT4OCGaEQ+ccBBAhDXFRlFVR+b7PGU+KwmpTb8eikjTwZCWccR5iYRQgiqRRzlEhK2ehgggayTDGAAKAtAMKWGMNgNBhJEQFAaCQWmgxgtYa6AAA0BinTK61ZtQwP3AOSKml1ITiMrelqjjnzsKqkMY6iKnHiDQOI+xRLEVpCXEQSmsAhQI6o77vKpqVyNcXqjI0Nlpbb719/e25SM8+vrW9dXY4fmBmkAc+AFopWRRpo+d5zt5982T6KLUahlEYrzalRUG3TwWazYqd+wNmHOfZ+9+9VSYptGB1oz9+ONFl8eQHn9/bPXry4ge1riQt2+tnllvi0eF3Fzqdz8Le+taJlEXBQI+PpQBSQ0pNJaoKwajmA0Bmu5sqvdxeGu3ca189e2zM6eE9GKH2M895V/1FmVYyv12MG1cvJIBjv+2t9M9ZkqSncpJW5dDzdIhAQGqjnZOq0mVRtNfWvWavs9EOZPbm77zVqhbEtqaVTgrjrJklqa/sLDUewBDh4bSohZR7rNmsw8WiE+IEOcrNhf7K0Ux1ulFimEdZWsh77x0vLa9DzMcL2V3vby7x137vu2aQyErXlgGxRGcyCli0XhuVpx8+24gfXHtwrCbT7PaDk3Bl9YVP/cidm+/8hb/2U1/5T7/JrCtk1l9qv/7G0elUeZRrJT2upvliq7OOUHOxGJ5dObO3c1MargoLAT0+HvdWNvb3jjG07Eyj11mfTMeAuvkkQ6iUOq9mmIerOPYn6Xw42Xci4VG4f3L4gae3rArSJOuf24jDcLI3kJnGGBlrEEMAY9YMn/mhT3/gs3+cNlay4+N8sUdPsZMu4K611cXGNOKss9lE/uQDP7JWaTR8NF4728KEJmneayhIXTIZ0zg0XLc36ytnm0c7CYWcEZjkr/2z/+OTWy/86Uv9c/kt8G/+/e984dVbP/PHf2RrswGcKcvwj/3Zf/TO7/5/v/2lX7/8HLv6P33iN379lfsP7yHMljqNqiwZNlcudJMiHy5S5JiRxkhdb4TbYe90kDqAprn48ss3b9/bf+bxc80wONnfby13tl988t1vvnZ0MP8eBZQQYwHjtLKqW48oOaGc74wmtaXueDB2EJeVUGlWa4QQklSpr737lsKxH9Hv3L371Ec+jJMZdDCIkV+vu8ir0hEm0GAklBbWQE5aveZsOicYjCfTx65c3HlvZzA4WsKtVMNW1MzLxPdDUQprIaBEQTzZGa/72gq5e+s9F6+N8rySJuJBhYCd51o4RGElVSGUQ8wCSBmF2M0Xo1bcpQQZ6Mqq9BiGFhWViHhgpMQYY2YU0tzzgYPaWUI8YJ3v+QiCPCsJxZgQDYGj1BoDAXYOGqikyjnwrQScIC2dNQYjkBRpyCOrobUAEYIZz/ISQReFUaVFXGsYiSl1TksEIIQu9DyIqUMQQAuAhVJNx7t0VWGMiMccgD6L80ppB7SsILJaWyM19WkUhePhCeVoeaW/SJUf+oTDsiitFQRihJgFMMsSiIHSwmOMUx9CIZTMhM607a6seLUOTKdWVs4CqEqIjZYFBIj6vjFYCOWUocQ/s909PN4PGxFzzGlpsM2yBEI+HI3arQhAdjIVvN7iXmxMNTsZxbzeZAFJkkk1f/WN1y9eOTM+LQDlJwWePpg2/MXgaH7nZPbUBx470+lYBW5/9z1h1ERYTHqfevLy7/3nX5K/+zU/iv7CX/7JN771Vq7Axz7z6ZPd4/PXPn73/Zdfff3bP/rTP6uhVCLzAtxdPS8s9aNmWZVLa83AC/fvX79w5aoVLgxjiN1w8Ojq5Y3/9u/+nVd+9/eWVtuMepzXdCWgA1blS+uNw4PZ2vqFr3/jm4viPrSWUggpIhyFrVDI7/cAHg4nuNFAyIzm41ICzvy1rTVMq7JymJgwqikOkEPKASOtNpDHdUrA8OSkKHWr1TgZzablzM+gF0aUEs5rFYRPPvXE6SAFs+FSvT4enNYacqpKEvGTRcpYsxkG2mnOubUWowBjaJ3ECBljlLa1xlKWVfU46HbislwgyzhChVIMk0k+pwwjYpUsgcMAGsQoq3O/qCjBRpoyK62bEkQJcMjBdrMLdDUajYoy0XLi0TIbj4mZO5DHtZ4QxJE+8ZvpznvMlwG2QOlCpUqBgPnGYRo04vaS311zQd0h4qq0KKdh6NvSUB/GraaH/XI8SZKpc5j7UaTZJC29QPhebiyEgD75zDXSjjVUPipbLTJzXiWMEpIAgkwFIRa2urfzkHkVCOkHP/Hid16+vvfo3jNPPK1EuRhOx6Ps7oO9RycLt5vUAp843F9dwr4vlcpU9exzT+7efv+dG/da9e7GSqxMpjRIS1FWIgo9aGWVzFdWW5ACi6E0ZnJ46LFAKOUcYoxz7hVFoqS2gJRFlRsFPewcIZgHYe3smfbo+Nad+3dv3dyRpLG69WQ96B+dHiFk37351tnzZxezeRy1AeWMUWrhfJrZwKxuruTHB06roBZ325gFVTNAhWw+OELNve7spHr1jqgcwBhaCB3CBjhrjHImwBQKI8rR6oXiT//sR+/upKNj9s479xvhksaBWRx/7JMfu3Bp/avffN/S7nx+v1dn22v1es98uPHYKBse7g4Gs4W2lTMWWEsILx28fnL4tH3+4tWLxawwmlEos8UYYltWVVHh0SSN/JCHte9RsLd3dDnitYZ3dzQ43Nv9wI//eFlh4NxkPB2PBrrmPzw6rXu1JE8ffveVJ598PFHd3soy5/67N28urXRkhXceHC+v9Rji83Rcr0XT2YxAtrc3i+Lap37yBz7/0q2j3Zuj4+Pz7f5n/tAPrq5MF/PUgzrN8y+/+c75lX5jqY6RcXYWBvDo3vGbNw+PhD6/xttRJKrh1taqku7rr7/22LmzjW7tZJpIgzzYbJ7dtmUyGJzWYqYrenDwqN1sWwuSWUKJ3d7sDU5vKlC0mo2tq5fv3nz/4ft3klne6/bPfuTx3/mFrwICfB+rUjlj07QilCKIAEEfeOHpND3Y2b1/abn27LWnVrf6N37v5SotXvzY+XKm4+XtedRTrufGhwF+/3BvPhnKB9mJd+7alScu7335pcLR/vLaa6++gxhiFNZb0fb21sP3bq9QSqrkyQ9d+rE/9RfeurH/8P3b+cKZw0mpT40FvBYDaGvdGqCdxtYTB0ezZ370Iw9f+v2j+3tRzWu162Ja7y01RDm58/6doFazlWyG3BgSdHClRsud1bFweVZUVSGgy1UyWrhaLdSFKjUO/OUM8zD0Q49srPZfe+feYDZY6vLduzfrFG5vnd+7twN09c4br964/ehsM1xd6TaoWxycRM0YUfpoNDk6Gm2sN9YvLX3gk58tc/zmwTGR+aPRKCiLBpZh7FtJFrnB1i53QyzF4nT67nxeX+sv9/uzoiq16Kz1emFt/PDIJ3x5eR3mSFQDmae1oFUkRSmq5UZDazlZJDaghZGNmmytL8etzf2DMbShA/rSxb5NB+fWt0+Nk5XRZaUtgq70qI+4XYlrb7++x3lEgIzrfrMd3j09DQKea20pK61ZjEc07hQexkHkJjPGmXGqSLNKoD8gFXFOKQoRwelk4dHYliGg7tKFlTtvfscnpEhxazlECAW1aDEbONzwfeo011VGgMrTAtnSKGWd7nR7eagghtFRe7KX1mIyGMBeiy2OJmFAJ6XErvxzf+4zP//vvhJy7EqNtUYMAWIthgBJHIKb1/++0TYiwftv+O+8ceaZD//Zf/ethzcm7JxjV+cGwpG3PFm+wiXHvYhkc5ukymLktEOGh37EAzwdztLTau1qA7XI7vgUKPpjP/2Rt96d/t71xFrk+Z5WQBtlkYPYsci7dnl7OFoMp1PMoMyyxx/vpPPR9ubanZ1kfgM+fDeXlaxZ1fGCwfH0mccuD5Mh9cDlpzZPHiQh4pNp5RE/jOpGnhgzD3yOLPMoRcATKkfMo5BTLEbTo5XuhhSlrkqPmd5K+2RvvFgk8XwSrpzza81yEbNIaiVq9dAZiiFwpkZds1Gj+4dvxHG7lBoDSHnPDztClD4GQGns+0rNoC59jykB5icLsSDdtc1M+PO59eMlTkEQ2nGSAiIDTrywZqvJweEuCUgjDsoSns7vdpqtoEaANvnwgKNAO+TROJ05icJ6ry/mx2p6Wm8sHe0fIK+mBaxK4Ne8M43NvEyrrHziiR9446X/nB3eXvrAuUf7J5+59mfkZB/6AjHAfZTlBSVCliOD6oBSoW0Q9BmLAxYYfazyMQM2gl56VOyLdr/zA8rqzlI4Ho0IMa1ac6EnPPbjmn8yyjwCY7/qdnoPdw+iKLaOzGYLTvy1fmdnZ3hmY42ohFOFPRGEHu90h4fHJHTSSQOTPJ0urVzQFQQV5IjkSVlKgWiUpfn/v+8DIeAc0NJACwFBDmII0WK+MAASRn2PRWFtMR0GHoPQQQi1WCgjHYIWaAcsoTQO/WSxMNalWZHKUilXFhIBQCimHvcYA9rRIIzj5VIuhFJVKWngC00PDydClktLfZFMQ78hlcbcKA3qfiiqGfc8Z6iUmnLLGUcQ5EUGIcAIG+WIjzFFRSaUdVLqSsjQ9ylhlCClJdTEWW0NUMooKz3PR9yTQgYeBQgorZDDzA+AEFVVQYQpwdABiDDF1BhLIYEYSw1sabS1jBIEKEEQYcc8BiHgXqykEkJ8r+rLGAARAAAa66A1DjljjXMAQgeRjcLYQQSdcdiGYew0dMYprQTWFGOhBHRQKu2swwgro5QVnAVaGa0FJhxYiaDzqa+1hMBCYB3AeZX51KcsRARnVYk5NwClRV4JFTLPWQsBoBhbjaDFFAMICcGxg4XWVSkVYjhgFGoHnG6E9QKkAPFxOsUEhlFsjCUUYESQQdBRjHAlCmONNUpbSxEFRhsAEUYeCedJagGElGZa60pSRIQQiIBK2qxMOCNxFEoonXOUUGONlIIzrpRGiEHoIAZaWwCc1tpaAxFCCDHKAAfOOFFVxlkNCMXEWlfmFSWMYMy4TxyGFuVaMEwatVAZq6wBwFiltZKYIIo9q4yxVgOQVlktjCFE6aJ00EkhjTNFsQDf/zwAt++cvvql3w95ZBi+ff2kc+Hs9kVoODlZ7AMCqWljt45cibyxpdTDbHDndH5EvbNNr4FhgDSBtslon6ndSadD4pBUpaz1AgDEYrSoTubpWCIFQI7u3bxfzAf/+eb/aoUMmMvytLVEpLd46oevlOEKaPdblpRFBowCFDCCtVSEIh+zEkMnsvnJYJgs2qFxJh4cJ5NWPFtq2aCdWuYiTwhKWVNlYwi0MCgHp37U8s4vdfCKNgqPBvUWO7l/r6wy3mrVKFvkx83HluaSHFZTB/P6j55N3hm9/Iuv+jTExkEC6WTiAWchnE4S7pN6v6kzkSbg4dFplxitjEMuLVyikwqgTOlGk1+89uTdG+81fC8rExw4QONkWsyPj6opGZ1gZ/BoOkIO1kIOS9VfD+ajk9GdNz743Cd2bru1ZW///Tfeeu3tiAPknbqS/pX/+u/82s//2/7Z2qM7g49/7jMvf/HNbL4T1msQ55NJrvIHfn0GBDQil04HsN9v8pPhLIz4bDEGVFPqPzpIkde4fG5revLo4PgoDGsS05UnPjxLjBmfzk4Pp5MTIOapEYq1krT18NE9Z6sKgobGpBQUe464LNMcgVJXF68++ZP/l7+WKiodaW5shWJ1e/PqW1/6zXq7QkSlVXYyHa8tow9+cnsxm1tFa3GsNC7SvMhTW8m4FtJ27f37RxXhFIJ2M9i9O6XMF84SSg8ePJyd/tOHXuPyYx/5mT/xt3/5l//jv/s3v/+DH7300R+4hkls4MrHfuavnXn8is2mJ4+O+CxrIFpAKnShEdOqcIsk7uJPfuzKO2+fTE6FUoU8GWFgRQX8IO5s9AZHk9ffG+7uLxoRPLPWYQ14uDMLuptLTII3hwAAP/SKrCIMwQIQSJDWiJDxeJqcToM4lsA4bcpZlqVFUVZJma91mrcGaZVXzpCf/ze/8Ld/9ifLfAgkkg6K0ohc9jud5VZ99+BeLY4mWSFLwBTKh/l6Z2nv/ZvYoPXleJ7MCK5XKNFOW4A4Y8oYC2EvjPu11sHey8KLd17+7mMvftzrNUyNz+ep7/nSoqrMy6RCxJMOMOKAskUqEHI1v6E1ggBqCCK/pmWhjUTOQlvWQz8vSlPNKUWmUpQGuZah34FGcM6LIiEEEmIJwpWURhqfRAzjLB9iTn0WWums1opSBImSDkIQkAA6rCoJIIQESe0YY9hhVQEIecDa02wkZRn7gYfZdDHwedNaZqRlPvfCoNKuFnWM1IgQJQRwhEBKHLBKFEVKnbLWiKKAsJbBlJJQGjAcSa2UcIJxgJ0JfCYKEwQh933qR9Cxe3ff7newKVNIDDAw9Br9BlssjnVR+RSWWZLZKQJGFKrX74/nU+5xB0PrFGfUKiFV6XlEy9SaEEBcFKbZXTLWMRA5woiTnnZ+wKvp7eHRvaWNjd7S8vGkZL452N/tL28eTUBd4MlsAP04ajYyW1Q0f+GDH+pubT268d0A2o3NaD8ZPvf8c/+fX3ppPrz33BNPJsmkBG40TzEOHj3ab7Y6/e0r12+PP/aHf2Z0sqdy43NxMDy6cOExUEpVVAYxXRS8sSREcePV1z79uc/m6XRwfNxqX3zpi5+/dmHr61/8vUVmP3rtxZuvf8sADyrgNyJCRJkuTJn6aOPZZz72aP+gVvMP7u0yP4Tc6Upa+X1X0WJa7tw7updmayuNbidoLff7W418tjClCOIGQk2IMuAcYQwabYDOi1mZTEbzBYSwKipCjZRqNqvatUirIq8qHIb7R3vA+c0G27t/d7Z4+Pizj60shVlWEu6nqe5ENGQtUaSc+xhzY6QR2kqNIIhZ09Hg3GZ9Z/c9ikLlQJmWlXJVnjES+AGd58O1uMUQZjQAhCilxTxVRcUZjRo1Ia2zMC+TWhhB4Iy1Udwuq7IsF8YkqRh3+02daGRWMERKF4jKuNf24g8e330T2UMDEkud08QaVgkvDDo43Ky3V5MKYIS1glVa8hBKbaRS5HtuAEAg8Chhp2nl8XB7Y7NIp9SDSislZRB3GvWAQC2KqrQldBYDo4sZcDSohUrZUs6MS/2gcTjJuq1LjVa1Whx/9/e/+LlP/4AtzO2bhwubzwrZaDT9RswALUudTqaFLCCyU/+QAbS6tvzF7770P/yNv0FterR7WFWySFNaj0I/rEU1LyAcgiKbAl0ASEthjA0wNkoJpXINrfU4how4QrU0FkBtODT5wd1f+z/+gRZykCvPb5oSAgOn2RRgGgXhc08+MZwcLhaaIzgbDRmjCHIlAfFAOjvhUOeiIl6cD9NBngEtxtPMi5rw7RwKdnCYQGcZxIjSSmsLIHIuCkKKGLWq1YQXrraCJnm+f0mn/tVzy//ht96ajGVruf/wwd0aq51bvnI4FqOiSLh97/aQsOLM+tLHnnr8FuPv7492Hu1SQqCPrEPOooPj0+vvXv/B5y9g6AIf+RHKrMXUhTFFFBLqjM4A4N+j4IUXru7ffXByfzZZzFcuP55WNVzNfDDZfqKP5HnpqAJVmSzObl9toLwVLN+8cXd+ct9vN65+4Ecfu7D+W//254kz49nk3EpbzEojW9npqJwgauVgZ4A7zVo6mWcnl5/YGt8ZiPQUktZsWn3uJz72YG/66194MDgxV9b48N5steJ1Rnfvn751/cQxnmZFJOH8tqwCj/c7n/npP/7tr75y9ysv/8Cn/yhNTmdHR8DvAEcVDLNKVIWK272iqvJxJrRXC+le9SiTaX9pZWt95fUb7wdKtZuN5bV+tsghSdtgoKA7mSloiTPAowQzKKRORPHmmy+3I/lTP7b10z/+w4dv35pen+EsnM7E3fcmTY8ocbf7fGtWLjaubq210M6NG4+trrz2hRvj1988PZ6pNFcO3nj3dikEQAwApKVOx+Nmg68uB8889zFM8NGr346Cfvepx/wU3L5eYEIRIfOypBjXWmcUaO0cjrnfvPPo1HiNpXOwEKPti8tybOV8Py+yVlAbTwqggOO8GRGPmuHxYke6dm9pMS9qzfbpKDkaJpTXFlNRVWk+l++/ezPst6BRsyytzSatvDi5c4M2Prjea6QHJzdv3wshQrzKysUoqaSEk2H+5Jk2icPCzqdJaYSBjo8TMH97//Tol5567oPXzp0rRd7uR7e+9mqeJOOpHuYV8ZEE8Hh/pKU0Fh7vTUcZ4OG6II2szL2hOR08og65bG4Wp7PTIfAQZY07d2dJaiuh11eavU5YPnxYX2qJ+WLvRGyKjclolE9Nr+9jLPM87Xdq8+EIREG3Xd/fP6qF3bPL/rzIZ/Oq0az3wk5hQXtpdT6aHp8eYsYWeV4D6P07u71mrRlFBvu50bVuy9MzRhy3dm5co9P6A1LRdDDkHvF9hgzwwma3uXrr1VdsMYVaAuxvn1sVunKQlq5oLy1xBCcnp34UjcYTGjqxSCCQfo37FI8HB3GnLbTtbq/5NXV4OD/32JM4eTh/dHiylw4knSey3i2e+PjFw5uPXA6sBUoaiKBVjgBAkEZ4aG1aKUg9zO31N7/+5Ssv/KH7E/ChjZjW1pfZWXya7L09u3fLlaXjYfDc052t7cYXvv0wn9pKsfk0e+oDlym/zRsxo0Hgb9ig1ak/li1egs6zRgBnrHGYWiXVRq9lrZsOT0fDBQEIEOCMWyzyHZBVXFaIHt44jlC41AjPX9gAefnCtTOH+4+2lurp6WK8O/IJZxRxSTHkupJZPs313uryOVEWShqLeOhTVeUECGt0yENVLoROi2pEWE1kikdxu9NLR+Nk9y1d6+hKlqn0gyjPFICEM69UmdRFlZR+1Hbahix0OEyzArsTwj3qeUIInOWM2bjGK5kbAywKce1qDjqZkQDYRrPnYzEd3qNYbm2siVzkZaqr3AtjB5w02BDkDOQeps55tHV0kIetJlKFKUbGAI91IA1ScTgdHV2KlygPljxcEzZbDD1ua+0WRrHUM4gJcQsc2ZNJ1Vn6ACjceze+5rDEyClhan4DAYSg326ePZ2f+DzAIALACDnKq7TUycHwlNvIi+rL2xeRINbMF/OpRw3CYDIe1eqBSIcQqX7LQARN6UaLmR/VvJA4VOWGBCwcDcdRjPcfvKaLbG2r32t1Tk+SCqbUOWozpUuCKBJVKicG1ZnXwqyhFIYaB34kSqGy8nsUFLlwwBqlIVRSKW004wwCRDCEjDjosizBGENowjBO80xp45z1Ar/IMgixVjoXthRlKbSxgAEMEHDQIkwZY0Zb5yRlHg9Da5W1wDjjINLazecLKTKjwGAwjiOupEYIAogrJYwUHsYAkSAKjXAIACUKxj1rNOMBptQqU5bSFqrIK8g5gbAex5z588WUQkog5JQ458pSWgchgFJXiFCMEHSOAAIRAA6IMicY+xhjSiVAUpSYIEyIcUaLkgeektIaQzlLirQZhxADpSsMIAa4yhPPCxVCDpC8LBHElBEHIEQQQugcwBQjC4zWViuIoHMGE6wdcBBrqxDExkkEKSQoLTJOPKWVxznECFiFMZZKGGOErbBxnDEpHXBaawUQYZwJIX0/QI4V2UxawzC12khZSimJI7IoObIYEURxpYVSkjJijDmdTjGywGnKEANIFpUDMOKsymcYAAQxBTzkJC+VUAoRxBivqlIaYYwCQHuMSekKVQrrMEFGSkqZQgIDJI3xOVVCW2sY4xpjJaXRRllDHS1KiSA02hphjAYQYkIIY16WVcpoB6iDEDgrtWaYYUSNUcYo6jDAEHkuzRMICHDOGscJJ4RQSgCASTKXVYWJj7BXOGedw5g1wkZe5MBqCCyG2Dknq5IzzhlTUhhjMELGWYCAKMV8kbj/Emu9cfHiyKKqlFtLvTsnd2hNE09C6oRQiAeEcFcuLEgtyKF2dgpuvPbw8uWL6xdrBqjJOE2mhbGomCW3v/FW0/laGEKRLIvprZ3x3qQRhw4C6EHI7Fyc9M53l2p+VPOA1otFiYicq9leZjgVrhKzouCMWW2cBdZZp5wjCCHIfQaJF65tMp28cfPW9rmPFy0yD/MKhRAhZJ2DlgeeBcZ4sTaFhtbomVIiE9pnzAIkkK7S1PjmaHgQtTcSUNY3l3QAiGeRcZkFhMOlay1e36PKw4hUwlhrllajJCvzRDILWSViRAwhzfWl6d6N5Wa7tdK5futYK7fIZLrI5jOc58W5s10EZKPfYI0A1SIPi+mNQyTcSnspm+YogAdHC+bwpz5y+UHycDhcfPMLX/+pv/onO3EO5kOTqSsvrB2e3nn8+YsB1F/67V/efIpdebb5i//6t1588QefufrT3/nO/wY5KMQ8gKysVGMFOmUH451Wv839xmw4IRSNyzwV6v9H1X9/7Xqd52Hg7vvpz1u/Xk8/aAcAAYJgAUmxSFSxRKu4ja2xVka2Y0/iScbxROO1MpOMPbFmZZx41sRKbFnWyLFik5KpYrGIFAsIgOjAOcDp5ztfL29/n777/AAqa+lv2Ova931f6yrdKG4ULhTeP1agPtFVtnFujaWpgeFBkw4OzoKGZLMsiJGDoCzUeq/XD9qvFcIS7QXew5u7K4m/vBCfTuYBx4jjSS1rQMZKTM9yAL2k18rPJsXsJCJ6PsrOhvsTmdNFEva80+MhoZh7SCqVF0BrXVYaKra7Kyliou6SIHrs4ieOH7zIHVaVcBDxmLEodE2umHjvztdPf/tsbeHZjh+88+bD77705t/9e3+HgUnjOnHnExrP33r5N67f3HGLC+8d1WubW7/6q/+EO5X06q/+/q+fjd668uxGfGH1u398x2cxt44IrjNdC7u40WU0jvwAUdM/d+mZDz322y++9sSlq49fWPznv/MDAIAxBlFkLHQAxrG/shjsjbVH0cnufn9r63g4RMBxDDvdlJbcC5P/6r/8qcPB7bd/8O6Xvnl/PBy/eefupW5gpaQBpMDRMM6LTDhpgNFGtbrd0TRf9IKEo9OznUtX1h7ePgkCSkhrOC2VEl4QMYoJJErqRpjAmYdnd2/evv30sx/vbDxSap2fjbS0PuUGspWlKwf7r0HsQcyhYwi4WhTS2iSMKWJaE+r5jTZSK6Vk4HmMcQccgLiWVZunEECHiBQCElqXJQZaiMpqQRFElCpjMMJaCUO1AhoSQAiezzNO+KQsW60FQn1EkKgLzBAiCFmHKGGeVzfKZxRoZ7TgnjebDyC0BBFEUF5PKPWkbqxUqpbY8qqcQ0QgphYQ54zncy1sPpsCYDC0qQ9lLR22KOJBEiPClIaEYaxgVk167Xg8Oz6/2s9mBQQQYV40xUTgx698odV+cuf2HyjXbGwtne7Nm+bwrR+8eOXRZ9qdbSHnzGemsZ4XlHleSOsAB9bPRUEoxtA4zA3h6dL5JhtVdbm4/IjBBSK4qOdBlB6fDpLIYwigekoRjXoLjz39ufdeeZEgAiCUYoo4efLRDw0Ohn4v8ILW7TffOndxbXm1Mxnu7+w9FIPx9nq/aeztnfkkf2uLwdHug2+c7fzsj372vdv3OWJXrp0fz8c3fvC9x5/7+HPxoihGyIH9B/s88iLemg4ynwUAouvv37uwstjxw//9X3v0v/9//q+1bIomC3zPOtRZ7v3YT3/yH/83/6zBlvlms9cHCBunhJaDYWGUbpQaDkbnLj92f+chMCqK/NPheGG1PxvNg+iHRzKmRJZNp5MwRi5eWPa5L2fDVuBxy5XUIi8crmMWyrIR1WR/555B3SD0GMaiKY3Sa+vrEtp5UUurelGka9NIBxSZF0U2m2ityrK+d3uXM848Xs4M1GmB5lMI44iKuqQegMBFUShqgRHoQTg+mpzl+y2fZ9Ohx31MoBXKD8Mql1LqwA+c1shjiGJMUJqwPMvr2OsvL0osQSFUIWtZx5HPA5/7vrXMgMWmYhghaxRjgBoMGq5kHVDuBwH2A+SnmLV0MyIchP2QoQAIDFjQXltnUbsRJmCB50VlbYwfKTUBwIZR4nteXeUsjjmAbT9hoRNVibUSlZrLIfFY02BAJnhw3I4TU5aOWASJ73FlVJGXESeqypQ4WFvzLMJPbz17dP94c/387q17s7z4/W98fzIsDk/GhkIAcTuJ1zc36qzR81pqATFgBM/H05jgXuSXvc6Xv/HNn/r4C8AQ5FRvYZH6vtPKQuMHaZMVeSaBKn3UFEXjB10PQ2EEQoiwyAGotcWchB4ySkvTUB/lo+zhg4N5bUjcWd9sXbq6ES62tPGsw3t33u+FtJ+mi52WkZWVlWYB7/iRioWaYhyKsq5LrJQ+y8vjQTmzoLH2/sPRyUD8yMdewDsj55Q1TgPnHAIQWEIQobhRi7764hevXflEaoU0uAk88sJHHr9/c/L7D99duXKJWEsatrO3z6Lkoy+sY26R6zdG2TKfZyeMRUkkEz+ZS2GdJhAkITcW5sOhkdutdqeqbCmgIyhMo3zCKYY+QQA4IX/oNjg7OZ6dHSmlO6tbq1cvLbaXXvnS150tW1vbBhBTllKV0jYPBjc3u8nJePDZz3zy3VffDHkyP33wx7de7nTSQtSUBRXkpwe7XqIWlzp5MV/cSACsHrxxk9T10qV1OymXW9FTV5+Anp4SKo6nxdkUmGZe5Ti5AGfWVY0j1o8oAqiXRotdv0+saLLBeGymxZXHH3/6kae+8vtf/fY3v+7pabe9snAhtgC1+h3X5DBY7Ha9269/J2kvp0vbd9+/boGENDzZnYrGW2pFzIKgt9zqJ/np/WpefeFTz37kox9trV7+67/0n82lnMxkUzd+TGLubSVwPeJPdGnHnRZtQ1Hv1ZcPTgcyH04+/Gib5sXBN//YSfbS/mm6uNI7v/Lu7gEnuBzMT/TDNAgp4knA5pmFQAWMpT4+uP9gc7X76U89GdbF7nt7oshKAFfOnc/mgLX72Iy1Ql7QptSzNgEIgWa2udVH2k6nwpBG1s3Dd38QIGI8zgNm7XAtaOWiGc+mrdSxmvnWU4Wc8IlRGJd10kqapnBKgaayUgQx2d25Q8fRylIrjfg4OyOIrbaWN3uP3rz7lSrLCPEARqejLIlYQuzDgyHsJkeDvIcINmB2NvX9rpdgQ0FV1v6wuP/97zTf/eOt558jrc65S4+Y/dObd6/fmZ51Vlo09C70ug9vPYBBUCs5Pj4e1N+hXrC1sn5ydCKl0s6e21op5MAgoWpdlRUhNk5ZYCJlagyjTivV2hAKpFTf/u4fRQBwjkRWpRGCQgBnlCzs3NalahGPGO2QAloRYfQs9ynJi2ZSmbwQTaGIh2LsugEpq3my0KrrLOgEFCFb1s66qpLG2SD1q6r5M1TRg3uni722j0uIQohn05N5uxcSWncX0r17pyDyHAKAskFpYMjqbJq0O9oBFnhxLz6tDCIxCiPbZBg7VZf53BCKRTNjDR7NjlE5e+Kxy1/5+h+0Nx7Hqrj/7nsbq5cGESwb7YUk4n6el04b0UjGvKbJCQLO6irTYUitqa5/73cgb41309/8X862luPtGF3A4Wd/7BMz+u7TTyX/6B/83Hxe/eIv/Mzf/s/++8oQqOHLr9z47DMLhztg7dLyZJy99vr8xjtfevOOFApij0ICMTLUh//F/+lvfPX3f29/b2QUoIgZa1ZWuucWo8eeab/44vXD4+KzP/fpl79xM0AtGtLb9/b7aWvLi39wZz+H3oXNTVk2tMWkkF4QIUxlU7LAK6cMOqZkQwms6zqJusbqpi49HEWYDU5utJLFgG/5pF3UglNsKwWsjmI0GR4mvdWULtZzC5Crq2nYiwmjAMCqbDAz82IWce212zE1SIrJeNhbaLUDPw6T6fQUWK6kmWZmfftqJ13PjndjnHtJfHxwu7+whHTo0V41axnThK1Fwn0N2Pj4LMCQctsOmarOymYexIon0en8NG6l8VKsi7moVBz4POkuRF2gmzjSWT6tM8MSXtRNz/NGuVy7+GHF2zRRK+3OzvWddK29Od0JvBnkQDZNU9Uw4ZQHDDgjxwFvIQN7waVKHOSTQ9yKiVeHUZUEvemsns3mC0kAqCTIYam0hSxkxPOxTursgHPNiD8XhXTYaSqaeZgiI+VklnmUylosdRfTC08RwqxEi0utCkKga1MfjPauE6Q54vNMlvIkjIKF5UtRuEi91vDsCFQ1QPCHqiKCCfUUbKQUwBgAOUE8r0tlTBihxgqoLcPUEGBBZYwlENRCFo0ilBCEpRKyqBzEAJlaNoAE0ECKGWG+53MCbRIFysEgTJpGzQvJPY4hlkpCbFdXVt5/70GUcs9jlGDVNNg55JC2hnhB1ZSR5zGGnJOUMAcRZRQYjTyOLTDCYkoIoQg4HnkIUQtsHPjOWYyhsw5jDBwuREMw0o3wAgIB1FJoqKRUmDBMEEEAOcQZxtY6RSBGRkiIMGUYO2eAM1ZTR8MwoYwTbAnGFkHRSAIQg9RSqJ3xWNI0GkNWy8I5iyGCAFLClJFGacoYQrBuaqMdp5RTIrRCCAVxuxalhaTVjlXpLILWWK0lhBgApqxAEGHoEcQAgABZqy3C1AFkrUYIUOyEKinHVgAHQF3ncStlmBSzmlMcUd5Yo7QKQt9oEXlh3tTAWYRJUQqASBSGRT7BDjtljNGce8A5Z4BW1hinJfAgauqSEqqUoAQ7a7WUSiqjdVlXCPDQ8wmhDrpGCoiYaHSjBICAQaC0aaTxvAAY6wCoK0UI9hg11oZ+WAuhpAFIY2SUNk0NrbOcM2ShBdYYSTHAmHqElaLECKZhJBGx2iBAqMesMdqoWkhCKKaMEk/UosEQA2iVyCqT54VU2hqjNIQAYkysNcY6SomoGsxpKeXJ6UCKBgCn5Q+posWn113LhI7euHs/364vf2RN5g8JBdRYEHUcjkFVGyuQJwOCxweDxjaOz2shtfOMtsSnYRIFUZx222anmpzlW9cWCaNtkFKLdVNb4CxRrS3/6vNP3759n3NUEENDz7QTj1lwBifz0/nhO5reCpLOxsWrCqbQOQgsZtYho7W2onDAyYWlie3B9au7IC6akjEPGogdYBgrYzEkFlHHELGBoUJUGSEaIlg3QlmIGJ5OZ9iI/sVN5VxZZZYt5pOZc8Z6XMLGVEZ5/NqPrY5uq/zAdlueUkLZYnGZ/yf/t7/VJvzX/st/sra8UJfZz/+Vn/32d6qj64O6QT71a1UlgQc4Bhjcv74fU7W8lgRpzxKW58XcFpx4w8PBeD4vpet22LSRrQB/6tOPi++P1/v8qQ+h7//ur2xc/IVzH7r6uewnX3vjK1curzzx/I+8+cb0E7/01176/h+88p3rL/zIh85Gr/ziL/yVV1/777Qq+4utUEMnax/DrM5F1SRLbRpZlqEw7vBouTJzkcnFhX45bVBdVxon6dpohD0Zt7e2OF6om+N8PE19Yog7Ppm1MAqK4v2Dl3xqSuQQAknKgROywXWlNILUwTDy7925+crXv/zYE0/jsKeRDdNydLy38kj3O9946d6dvXStm66G2xc7AOvjg8O1hWUr7XQ6Xzu/trqy+vJ37r11YyorVRbCoNOH7xXjSe1xjpTBFDngmqoG0GJNkK0P7r0dmboo3u90tw/n4u/85//3X/j8c1/82c8IJtnm2hf/wf9w+x//o4d7b6ZLnoPNE4+eU66Z6vwn/sJ/MRwc/s4f/RYpVBqwrcc2DvYGN169vUFa/U6ksd/b2DwbifVHLr/4xrt4efnv/ee/8uD175XlwQ/FddAhjJW2AIJOt4UphwzUsyYfFt3Faj483TvYP7+1HJ/bRpRHof/eW+8hPfmLP/eFf/Odf6acfv/Gzgt/+bPD0ZEUhajdYr8lhBNaX7n86N17t6zgC+v9zZjfvf6+j1k+LpgXnB4O47SDKe4vrDdKqlorIAmlse8l1Ny7M76w/VytvTpkUlmNvZhBrUExbSS47TAFQszKjMfLRTPCkERR6PthPp5hRKJWDH7Y2+issw5gYK21hvNAmhoa6LSD0HkAe4RO5mPCGMEYAquVNMoihCnCxkifc+sCZTFA1ELTby9WZUV8oIHDjFrrZC0pQlo7bhiySkqJAAAQaCcphVUpMKVNUyIE/LA9Hw44I4xJjpHKBCawtbB5MDwDyMIwUdoAq4KA1HnhcT4tJ2GQ9nrtUkKHMbSg20mrbNKBPPRx3XjDwx3f61lnsyIPg5grfHryoi7my2v96dnQlLqZTYVqrj3+Y16cKKvqPAuTLkNWK5N0+vM66/eeqOYHpc470SIQhngcIlZlisNodnZzodv1w6AoR+3OclNpH9kqL9qtzvR0RD2OvdU3rt9eX1qbjcdZmVVatZNkMKm089txK5vU9TiHV/CHnn709W/fuHyud3QUTsfzpNO5tLGt5Gxxu/XOu7OmxF/95kvPfOQaAPrBjeuWIqPMt/79ly4+9dziuaWsPLPaJa01o6zViiU05F5t87RHvvvt39rqbERxdHawl9dzL4xoEC4vb4yn4rGrjx2cjKcPx2vJkudFVT2YDysh6pXtrbw5bvZ3P/bTP9J9i4TEhMsJZVQqMzmZVekPE7u0caAxMEXt3oJsoJgWjSiXV7sAaeqpwCOE+Tt3dg/Ppqnvp/FmVRNtYa3E9rkn8tGothwrcFLsRIA0FEirrdZYBkU+aCch5ARZezoccegtb6xhFDLiKwWOB4NNf4EwbpTAhGjtlJKOEU5JnFJtqTUK+yyKQ4Q97fJaO43daDiP2xCmBAAPUF8rjTFKEt9rpQAJDGGaxAKrhcVOXRRNXQNIGPei1iI0mgGlrfEYtmFUyaKaHzOUkKCaTw7Shc3lc1fOdishcw8jCljjbNDrRksrFuI0DKS2CNa99VZQwtGJprSOkyTptz0aNlLEnWg0K5a63bkFSFvAoMMk7QV1VVXV2GUxjQgOOaEBAA5T0jQC0tg2dTE5m+Y7QQomo2LvVPpRwHx+8fLG6+/cPjge1JXmLY6pt9BtLbVaWNvUp4S6tE3S3kWTT2Z5HQaUlbqW3bevv7cSRY9sLEGgpZQJa5dCy0IK31ocQ7LlkVY2fDXodpCj3POhxgA4ijyPUg3qpqkpVtgKIUthbF2WgBoDcOOspiGP29K4KGDI5xfIldHew/3jXRZ2UoI5hdgpz2MS61JY2ziMAuWKWVFWlZQI13Xttbk1EAfh3dOjeTY0WiBMjLRx3MZBoA1oMcO92SeudZPOpK4ZYYHInFAzZG//6E8tLySyUloY8bUv/9bRrHriIx/55Uc/i3AVLazNrRVonYybB29cD/3w8UcuPtwnWTaTtexGfiPB4YODw83tC49dxKDRGiiljNPM86t6ThuIHOJe+AEKToajhfV+p7P0zilEx0Xx8DoAHNP6+Hhw7ukn3n35JaCMl4Sbly5ODnbreuYO36urAcGZH5RVUUXrj3SdmU3PdnaPorTFPEY8QAw7OHjYa6+Xqnj6qSe++/K9XkQ+8tOfLdRsY7WdT2cP39/vnlsFdl/O66/9hzc++vTjF9oIQok9PBoNPnbl8lOPdR69vPzy11/ZvTOYj5ud6/tXn3z8o08+N62m/cUlXalenBZlNj18KCsLAeuuP/rsC903Xv1eNZ/JMDr//DOE0INX36FMLfajV776amuy1Cz25oNZ7bHFlfbk3p2To7t/65eed8p87Stv7g3mP/rnP/5Tf+5jD994+TtffrmewVvv7jLGS+49GDkOoYXuyjPbIZkdH+Q33rp1/tzGSa72r5/snk1W1s7NsDwaNzYvX3j2mTfef5USJBtRahWG3rmtlccWgtG9ByL1zz93NcuqDkBNMTWOrD52YfgQ6gJG69uE0XoqGK8TT8iDd2UhACHjkzODYRAShVmuwPrGau3k+HRGKHjk0W0tqyjwZ6eZcST1u4XKG9F4zDOmPt1/AGU1L6sizw1QCLndwSwck1bMVvudEMIbL/1ONToBOL727Ecf3L/e7y33tJmdZGV22jj08Cg3DVza7sVRTAIvH02kAEjZkhNh6Lyo3/n2q34nWOovPX3l0qvvHzOZZVmlpSqL8XY3HmRF2vFSyqQfKUBnda6qDGiT+vTu6fE8Fx7rckAPjwoWxNAZDulw/xQWmR8hU5TLa6344kpT1TavEcSytMDgqnRng5nX7se8DfIZBJIY4yzUZcY473R4FCf2rBg7g2LPVeX6Ynup7RmXi4ZMptLHZiZm6dZ5ymFpT6ZZ4ZGg004dxH+GKiI4ShfOTQb7BBruYNPURubDMxFnomqMHIyXtheCMGwqyzDACMqm9gJfaaUmGVDF0spGNsutcP3OZQnbRTVdvpQy9P6Nndee+fiFfBTtHN6/cLWrAv3gqOykaXk84dqrTAOcJRhQjKTShBAIEcJUKw0RRZQ6SDBThCCAymIqWeqdkfDOw+mTW+3vfv36nm5O3j341EsPn33m3Eay/PGL21957aY1bmIvv3q69PLvvlWULyursxnSSiz2lhaJHs5GWjvK8ec+8fh3v/3dW/dPfM/3POpK0F7p/vwvfLaVVLffeyMgflnjb37jOhQQ47zjdZOej6175vlHv/bOrhcFlZifHmXrYUyQhVgQjmzVII07yUpVk/bC9uDshh8F83IiVcVY1w82i/Isam9oZBGXpcz81rYRuZR5vODn2RngXtHkqd/3o6SWc86MFFYBIFUTRp4oLZJsYXntzsFZJwprUVkNT472B840RghVLXbWEI373Q2PBmd33wbN0IByViKIuHZ1utR31kpTWysS3wFCEWDb586Nju9n8yHlige8koBBBQJEjQW2PNg9jGHIcDI/OggSE6Th5PgkiAJRIUR4b3W1Gc0C1poMdh5dVHf3vnvu4mqRyzBIrbTf/taX5mKv1eOMcdzQUlf5dHhu7YqDkRf0A4azYhfgzA8xIM41ZqnVa5SfpGlAYTF+AFATJUg6WZYgjDvjvEQOr2w9mc0Oi6JqDCZBL58LCJG0NDO1RnZ5rQcc4VE3TM+VeVEXGUQmgHY2nMm8WVp6zMJG1rmrFYWSY1KMT/LTw/bCCrcOMMiC4IdUEUYOQAsAohhCACyqZGWQMdZorRGCaatVFkUjFQWAEQ4RnWQTSGgUeFIaoy0POCQIIkwDn2DmjA08BhEjGDOPMM6A0LPxWClLALbaYYxMYygllVZJJwqjADMspLTWzPOMEeJzH0Iber7TWmoQeAEAWGqNIMIYNU0DAWI+s8AxD1BGAECEoLqsOaPKSG2MM85KZRGkjFLmAYhq0UCAMKMOOI9Qq60DCBEspahqATSACDNKAXIWmCSMhWwIxgwBjIA2xjmtrYMYKmMsAIxFuSyBg1IqR0FWzqhChFJGmVFaWw0gMEYTTKx1GBOCCYLIGqOV0tZiCIBTBBOGsVFWa0UQopRYQBoplRGNbigmlNBa1AQSZKxHPSmbRkvKsLWunM8opZx7CGKKMGewEdIAjRgABGVNhSACDmpjtDGTfCaUooQDDFjMRKN1MQ8Y01py5iEFKecQOWuBVNI5GwbcGEkhJhAaACC0GAOptbU68pl1iHBmoauFQBhR7in9gdveWgvySgBrHYIaOEwJRUhbhSh2GDiHaqWVsRAjBBBnLOBkMi8ABI0SnGBtFMaUssABZCCAiAAHHUbYIWW0MQphF3ietVprCzBqGk2I1VI5x1thR8p6PM+E1JQiISXQiHNPGl3XFYSOMaKkqLJ5LWRZ5ACAwOdhFH2AgtnekTlyN3fPikDS7Wo43WNYY2CREUzWkJcGZ0JlndCz0CDqnn7+4uLWkgJSC9hajCFhGkGDIfAZxHV30ZdNLUXzE3/lz33z336t3qv3H56ee6wfRHRw+lCJzPHGx5HUVmuAUZSsLISG2p1cC1ud7DzID/zVD3d6a7DBhhKLLSHcWU0wNCQstFYGUwaZjw00GDjnEIKIQAyNcQ46DQygzkCKlRECWccxddZB6hMWAyUo48wKP4mV0cg4TJgDVGYDVUuKItcigDUAQSvExjK6uNU/UXLr6c3pg7MgBZZWn/rih+8c3/3Zv/NX//l//a+N4ftHuysbvW47CsOolFXX7yiTadTaevqR2XhCs0E9b6QnJ5NpLgLok0w5QVmGzf/vD//gqY3u2Xw8OxDYzu7f+jWIHkV+9NGP/cjk8MEbL75lvG42ywLDI4gELPcf3jqaHQrcnueTMGEEuqbO7CFOSOqI11loT8rycz/7sV/7p//26Y99tGNh7SzXuBS1crowcHVlY+e1XZ65pMeLclyU8+lwaKyUzcjjqJeETaFlMw8CppzTtQBAQAxjj8S+NxW2kaASSonmK7/+G/tPv/LIRx4rjM1HZ20vfPPV29PpgHfDotEAwJOjk/NXN7Y21ylEzqgFb314r/n9t2+9cmPiYV8LAyh1BIzvjRnFgU8xJATDWoqmcc4RB7BDTp/N7xfvhV4ULfMLj62DRv7Bb/+Lf/Ovv/b/+Ce/srW60AzrTz7zxde+9YcXnlihXPzqP/yVT//ET1w/Gv2lz/z4zXd3Tt+vwdbh+R5oDl/+6U/++N//5b/rJtmf/N6X37xxEzGOePzuazcXO6s33z/de/MfidGooNMfHsnKQkKsdQiRoq4gx9q5bj/evLoIQBklvNvtOAvuPTxyIFy/Eq1vrOQH+s6NB2m7dfFK73Mfffb2/R3Ph0Arp+F4PuY+1wbM5/M4SAwiLS/Y3X3g+x7QZjwpCQ82L57LKxWaoBENRoR6VCvHA1pleSWF10ppa00GvNB1ZElAPAqNNNpLuAKWexGA3FYl8xcgYDLLlAQzU3Y66XBwVhZ51ZQIIYopBNgaAADQuqEIByyVSiiokjguqiYXY8wQgEY7Lw24lDNKscd51ZjawsRfwGoyHZ/ErSDPMw4CqSVoYK1tp90iwBllKeXAgKrKndFx1JqXs1aajqenaavFg2AyGnmcAa3KfMoAaOYzqBsXkMlgAJG2CHNHCIDIOIzItMyNAXHAH+xdX1xcBdrkuTQEUeuAUYPDvaapKedVUUV+oqVYWb925/ZrAALEUMoDDprCyelce+FSmec8iuNg6Wg43VrqOgdiumwMAK5SupR5yT0vm+0rMeuk64S0ldhTUlFIEQSQ6bULl2opDA2Y11WNq+bjcnZw+dFnBoPZxvnLJ5NsLuTaSg+JmUMmaaVXw8eNdAUINBUbi/0Ry456PEnxuz94b2GxVYK89suf+d/91K/+4//JC+LVfnLz+l6rFZI0fv+9o5sn3/AR2l7vWVk/87FL0/HhyX43isDB7v7FrXMA47IsZJljjuo884EMEvXb/+rXPvWxT45n86Pdk95aWuZmNj5B1h3uHZ2NThAHT37u+Yf3htvteP/Gq0vLWyRpO4xAkJ49PHrw1ntiXkPOi6pihKSL6e6920b9UFX06NWtmPPl1fZkVoQ+JwETkiES5Nmwms23gpBaWNdiaWn58tWt06OjQlWVUCRIp8L4nk8d9bm6srk4H4wGkxJRY4wejEfddgcBK5qacOglsdV0qs1iP1nqb+hq7Ld7hjotLdDGqtJoQZCLvU4hirgdI8q0UEJoBxzhjHI6ybPxrNGQFFXm+70o7eIg1XXOsOaez4JESPnBnQM5BtA6CIM4tY4QlgAhnQZVNcZWW8x9P9aZ57wuQGSWTcJ2y47vyVmptRISpa1u4pGoSwXiTaM5p0YrZ4xFEDEeoF5HA6NhVk4BlARzBpRVGinx4P5d3486Ybq4vGmwJ/XMWFQLeTo8Fcp1V7bSKEEUlGVjcUhTXIiTXOkHe7eXz607GO7d+UF7oSe047zDKCuBoRG1EAJEL5xfD6kX+cHw5Axgq2G53F6TOMLYej5n1NKA0QBZWZ2NJrqpo1YoJkOPcBIGgvhe2o/bnji749k1zHXEA2Asp9RpZV1jTW2shEg1UmXT/OR4PixMKZyhLCu0tnU2FydHExIgvBaHPoLUay9uXuR8ODspXdOOA2dVORtCK4Fz40mh6qayBgchcigKKYj9XFaTaZmg+Gh/og2mXggB85Ok1V/wwhaSgozf97V47sNpSc+Qcr2l1WJ0wIPW4eHZyUm5MxC7p3NNWHq+e7F/6XR39//z//71x65deuFnzwkGpCCtxUuWcuN0u7NwrU3v37k+mRjiRBgw2Nj333uIKe0vpTxMEOdVIykmMQ91VQEAAv+HWUUYM+YFjodeyyYxro9HwUpXVWi4P8tv70Xt5VmxGxFvNjpG1gIBhkcnaxsr1OfL5zYW1vHodD483WPYYB5IyC0ho0pRHl3+0DOc+NqjC2udZx5dnx7ff/OlL3cvXU4SkzcZjL2Do+Fjly7cee/W3Nr3DoaXrizJrAr9zqefvXrt0VVG5e7D8cVHnx1Miic78XvvXs91Kef19pWtsBObxsyKAWfk9MGNtLVY1ubs0CZ+cG778tr61qtvvHa8f3h2enZ05157wWd3ayFkMSjSxeUnPv3s//xvv/KjVz7Tre2Do4d+3zVN9ld/8RlLW+1Wa36y55DdfHSjpjwv1OpySxBv67nt2f4+5Op7r9y6sN3e+vDnnzj34bSDWNXaeXB89Qn+rRffng5kEvgQuNduvTqvS2lUXStpXRKoJ5+5EjbTbFrUjRsNjjhzURgUs7yuCmg6mCccYwUxdKKeHRcQEAy4A0Lo9vYGQNio0o9WgtaKnOSqMRsbl2szmNZ1vLGe7+1PZ5lSudbZ5DRfXtzIxyVkUFgtlRZ1fnQyahGuTZVXHgRefjy8vLWAcbG44JVNrQ2J0vTO3gHiDFM4Ph35FLZbfNq4EmNBgICOxhwwtbHV390fME4ldsdVgyAyytYT8eDB2+++fV3xcGMjnk3yw6JwDT7V9bwSScf3Yr+/sn4yHkNuUGNWFrr1ZJ5PiuPh7PJmTwlrtQVGt9p+nhdpj0o57tCQo6aDZ70ENKzwethBm40BQhgQYnSp6pF0EkHX4NqPOsSRMOQmAF5oQoxHhbl+c3dtbTVd7nPSTGZHqyt9uOCPh+N2rz3JypN7bzfUJbFH4WKWV4ThySz/M1TRcDCB+M7iIltZbsu8qVwWtf2BgJM5dIUDUBWnQ76IuYNcudlwVs7zjSubHCtVTPPBUR0iD0WzoYlwAD3LsEw8qwOCpZIjKaZucFJBimBgzl1brqaCKfKRrcd2d0Z1UR0eDyklhBAHUF1ZhB1yEBGijG7qBmMIoDXSWaEBURjgz3/h07/69/7uz/7yL6W8qkvzt//h/7K03f/bv/gr4dpz2XdvET/4wVsPX337PjPICBDGQYA0bYeYo2lWUYzTfuwz9+L33rUW9pdajtAAuGc/svWZzz//+v2HtJVMG53XkFE2ORyvXNxKOXWQVAZmkxNEebzQPRhM8wYubLWdQwA6o8tKlEZLa52uraIS4pEzNTDWQI0oh8aTNUTIhzAkrMzmp6aRkK8S4Bqd375559LWxWKShQw3DVJW+xETuR5O58YPnbUx9icHA6vMHXuwsHERF2MHrN9qxfHa0cmeR7k1bDaTyEyDsH9493umLBA1NOL5qIEo8uPS8dby8rnR4XsEiPz0DkbGGdGwAAHV7/YdWxDlPuF8Z/dub3ErYKEqJbSs0snFC4+dndyUxbGTTSMznVeiBgVwl1YvDI/uuRw88qGfmU52Z6Nh0UuUc8sr7dGkPDraTxapNaAuq9nZqLu11lm6gsMrhdFAo6STwvoU0QIyVWbFfDYHXqtuRLfTHo3uRkRwbrLJ3EEQkM7kYCdsL5eNOjQOascgQpT6wYIFQOaTomqot7q61CZgCKzFQE6HO8YQhq2sZ1pOscmsKmuNiN/WGFo7R04o44SulFL1bI8AlHa7lRIfoMBaq+vSIej5gUfxdFZiCsPA08poZRGipawgoT73CMHlvBKqNAaFgU8YbeqCEp8S5oADFBGAjbac8yRKqlpp7WQJilpxDB2iNMDNdK5qZYEF1jZ588FaRDhSUjgHIEIIAAwRdFhKzRxwxjgtGwcQokYbxhz1GACOEoARnWY5JswCKIX0HIyiRDQVowgilBcNJZRRDABAGEsjrbXGGEwwcM4jXqWkaBQSilDIOW9kzTDyCFVWMISNkVZpqzVjjFMPQaulQpQkga9kKbWEyEmlrXOe7+d5iTBRwgLnCAYAQASgdRoiZLQ2zhrnPkiFtsY2WiOIIMGiFgBYBAh0ADmHobEWAIChc8ACj3KK4QdpzdoQUZQgQs5ZgCjEgTOVUgA5J0xTiizwYwQItCr0ucJWG8eoV1dCSmmRc8BQQrkXGi0IApzHmZpbrZ3BECAHEUBYCEkYMxbUtcUIGVgjBBHBtSiQgw4BAAHEABPq88BoTQDWyhgNOabWAmudlMI6V0rJGQoCDiAwWjemZtxD1HMIWQQ1clKYiEVC1Y0UliHKOOUUIoQQYJhIpwni2hhtjQZQW621alTNaEgR1UrLUsu6kFphRpxxUmmhTcxDhFk+nzeqBsghiBopAbChF1RVXZa1chpjBBWYF4V1phGaEB6nbQeN+9O+j3f/5HoL9LfPL745uvnEowkgSjlAGXaG26YS1kJVYODETEJgZ4fzC1tXXRCVYmah9bwQACeb2pZ4XusAAURxMa/6rejo/i0c4he+8JkfvHNr4fKC9AuHddii1DdxaLV1DgSN402tbNMwz/NDG7eoAOTO7VeCa091Oo+WFiAAHUTEQuqAkQBRTpwjDmHggMMIYgKwBRgCBxB0ADlEnXMAMYSxtXMlM0ggtFRrHPgdC5BSGkHoJ6GeZT6xFLYr7XEw5hHFNMjujytrIMIQ6o989tlwEQ/fH3zjlbtrHr/8iQ+///atOUDvPbxvXjF56+QX/8Zf+sQvbP3Lf/J7Jxlmc13MsjC2z7/w/Juv3jqc/NHyavuZn75w550zWeHtp3vvvZVZCUc14DGDyB1Miy88f/Fs72BpfZ0qsT+4Px8a1Nu8/OyP3/7tYntjm3phM9EdiN568b4AzNXgy1/+l8xfDsUASDUXUwg1oRGxPrNBOQGK4IcH1fnNRwLb2rtz2Ep6b715qHzUWV86GZ/oNw84ZBcefw5hXc4GyKtzU6R+/3Bv1vNBU9cYmjjCgW+tgdNMKdEEvgelCxyCoTeR0mkIMc7m8pUf3J7XVbdPGi1sLwTQEApDn58M9WxUJYicBbPWYnp6Mg1sYk3w+ss7b7w7SnoLFMAaaqEt0pZgjCBoaqOFxhGLPA9ooByyCCulo7YHAlfY2d7h98keWe0uV5kKo9Z/+//6rWT58uc+89HP/9yn7tz/67dvfvPowXF19vJ7O5O//3/9717+1v1xzsLW9jx7b7E3V9Xgpz//8849jTfjJ5/68a9/+7/9n//l7zz1qb80m/h3X9uvAAAEzycHKPih6QABgBGgEa+LAiJ7caV/+8EYUs59bRzdvrIVJ2ng8cHO4bya3Hjr+P7p6JPXngTS/JUf/+T2SnB2OtNOMhoHgT+aTBsJpbVOYzgbE+QCJIvD+8TYhaX29GwGYVQpi6SxmHS7yfB0in3PGkMIVUJTDMYHAxrxBwf3OtvbvZVlO61EI6SRFEFKOQ+SPJsEXhh5sCz2CNGBnzAaGisgcGngS1FyzpxWnFOtAdaAEN9Ag4DTRiBokbVNLR2oMQVpGMtaCGEwwkppgv1GGgNgqYWFOopShJkUxUI/qbU2TnAv4sbquqa+70UeRBRr46yAAGBC4zTWGLZaiwQipXWcdBzUzcxYXQQx1kJU1TzPK04CbVCVjywEkBBRTDHjnbgVcH88Pe6m55Ooa7U0wGsM6XZ6w7MTWwNi/ThsCWUo9aeD+8d773mUSuWcAUoWssp57PtxP58In/pK5b00rmpnhdXWUhZo20BMEADOKo+moq5kMwMsFA5gBwMv9qN4PB1ZpTwei7oOOGeIl5PjNMI+SqazU2V0XkwbCZcWFzppMD88pMh5nGWVKmdz3qHPfvrKu6+///Lr70fM2VoACfJMnY5HDpN//ztfee6jT5WNHh2cEYqnk2xyNhbO1Y0ZzYWWth+zG2/fX95ePf/4wvhk/9HHniiHUww15UQWSFZqcHS2fW7tm7/571vcixPIEs9v9bqdnhAjKcu8Gn7uqRf++FvfunjlPPPIn/zJH15YQ5xzFhEhmm7EHrzz2nNPf2JwtBd43BE7HMwJxTIbwJSs9Bc+QMGVKxvMQYjBQo+HYSRtAwGAFEWLC51wEyG7d+dY5mxaTCk3SJGAeNvnN+Y1PJk0zeBksRP7PjR17mTljF5YWh6dFVI7Z1wlpVawnAiEkNXWqCZMsgVS5lR5BAFjjcHAAimFMTNKeD7dVbJx/qLvRZxzJ6woDEUNsxrKZri3jynA2FlrgFNBwGvbGGGlQMbVGBNCkBEWQUUIBjxAADLKAYAQE98LT47Hq0uLAJJsVlkBjIIAWSOrbHBqymp2NtYQ4ySyWEPu17UiQeg0JoHnpNaiJJ5t5iPsxTyI17cvD4/veUCX8wEjbP/uAWQeDnndKNsOSNg1WkLHAy+IU1ZW1f7eg6KqL25faS13OaON0ko2dV3X2ihDqIdgCEhqFWyCMFRAdFYTxPB43HCGfcg8B6lSxoJLl5+cj84MULIweaUBgiITujLz0fFCK2QUYqcgY16cWIh75y8R6plMIC3Gox2fEeuiMptU8jQMGbDaNiVCwFglhBC1yAQaTeTZSdEgL3Og0BIxvtgiSA59swCVcxPgez1CaeZj0HBAPCPr2WTcjhlBFkAlnawkzLIMMhr5PsYcUxQSyBUKmc8XlgdTpWgTcK417a5tRmGiXDM/PfaKvNPyv/viW4882eNmenQ6loJXOtgvwoNjOzkj+2d1Ycecj5az2VLU6rV7x/un/+JX/8fnP/18d2Vzayl+6trFl773MvPosx957OKl5LV3boGyOZtqTuHJaY5vHX0oZgpigAH1KUGk1gYDTBkF6IcZptPTrDqddLfA8tr5QAxOj3d766taR0mfN1p3Vi5sdJeEyAZ5Pjwa9dYWITQVMCqT5t4JNaDJMk7tuQvnRoN5XVtVVcqyRkASdkrn1xrv77znxShKmuPhlJIwI34j6cmgCVrxylZKgtW7O4PTUXbr4fDiRruU880l78JKlJe5ov3RqCjqOjXEj6msy7jV1RLt3J9c3to+PT4KYr6y1XbWsiS+e+ud1Y21+ex0PNvlGM32B/PhlGGiKgQN2X78eX/t8W4/ePPlr+Hp/Oje/omCG+sro+GBQwwTo6qZg9aImVcV5dmk3yI/8ulrr3zrnYZXF7toWIWj3QPBw/7i01//tz/YO5088eEnc8UyFX/ycz/5H17KGNtbXkrfvHGzHbSlslZYz8cQo26bxxwxj/X6HrbB4cFZmCa2mjtRxzEDorYQuVJXZQNTHjDskpRGnZbf0aPju9fvZqdnOKbH7nAjSFsh4RTuHx013D//1I/4nLicDM5e8wNaATWczQGa9tPe6GzAUi+OqWuMKOfR5sXHNp/+7u27aYsvreMsn4wZyopDi1HIsJpOQwc3txZvvvPm5PSUBSxN/dN8rBFPzy3lMlOF6S6HupifFqOtjTU/ZBTg5WTp4O5NSQBkqLbSGbO4lDANpKNNg5Rx0rrRvAysORnfkEq2WuF6LzncP/EIm85kwthW3zs6u39xU7fibGXBllKwCDW5CljBrA49W88q7mGMsWlURAFiBrIgGzdASxYzwT2kglpUxNk09RpkRVVgqBY69NJ6IGSmNZTKGOVmw2lRqYvXznl+Sx9lshSz0YCBljCIxUvHo7H704qDH1JF8xwAYDwA1Gjo+6y3mAJkEwGNxNJZxEBVVZGsEEfFtCZBlHppWc6NmiGXdxd6wAnkvE6vXxdNU06sLWfD+mx8YL3Y+mkU6rZY+/67N8FEIY/rSpRlHXne+XPL85mIWt39nZ26qLR2yFmIkYUAOEcx1lZbCwCACCHmMauVyKevv/mtf/rr8WK7M9w9ZQh1aDjey/7B3/tPITBJOw64QkaIxiBOvIBqmdMIU2IY0+0uhYCVRVVUglCiKFGE/kd/8bPlg90nntgcjE6IqJfPP/v6jYeNKAOu04Cvr/YTBavBKOx2aNySRfaJ585ZKa/vHB7kNS0ZB00ccutUkrbLmZLYdyxtlEQYzbITBMnC4jqVCUSplrVRDgLtsZCHnUprpZUBfHHhEehaURx5XuC0NaW0xlEoPKQtw3UNbRMgHUpd1KXuMa+SBQI2n55WdRnGPuO4ncbU71mAZ+MBRs28mXLone9fLJspJRQpQQ082DkRle73kfNUvBDeePPm8vK2NZYTWNR5XQtEPUQ8SpK6rofTeWvhuYurT7zx+jeQm66cT5TWThXnL125deOhT1uE+tSLDXFH88MeqPvry5XRxXxWksqgxXCxjUIUtvxydHJ+65KirL9yJS9MUWRam1bMzgYn7VU/iSMzq43DRV43tfYodSgPW6kXRk6EVVWESRchV8sawfrs+DQImA/rgCJXnmDNnZggrQMEE4KonEkho5h7GEJsIupOz86Mma2uxmMvHE1MpdVkJt2kIRiWUDpgmUd3d0/TOMkrS/zof1MVIYwdQo1QRimpJTIYKEMgapqq1VqIfF7XQgjZNFIbpY3BkCGEuOchBIGB1lnjjLXOWhvHCUawaQRGzEIDIcqqCsbci31MXF0hWVrCCKY0zwvGaK/XJhhCgDn3pJDIAYwsgK5pagooho4QDAlAyFpnlFa2lsYYY4HSFkLijIzSmFCujTWiscYarQEA0MI4apciA8Bl8xEAkBNPQk0BABAMhxMvSHIpI993xgKlCSHAASkEsJpQjiwgiDKPS9U0QkIArXXYuLqurTWMIoghc8wBoLWFAEgpKPW1cY1Q0GmCEHQQQuwI0EoAB6z+wAVlAUAIIimlMZozD0Fa1jNOfcpYVTcIAWCBc5oQigEhBGOG50UDKYQYM4/OykI1xmoZeBRaxwh3LsKQaK0JpQDBoqoporksLLRSN4x7SdzK5pnSDiErS2eqOcXQYRoGSS1yKaVWBiLUaKOVNVpSgqUqGWUAAASgNlprbaxFEBtn9wdHPuE9tgARcdAIKQEEUllMiLGqaGrMo0qK0IuArYmFDGBjbVE1DjjnILSu1BpC6zOKKIYAeQw5ALQxxhjnnJB1SCMEQNXUjDgMIMMesIAgZIDWVkGHnLUQIEY9AxzlHAIAnVNKyEZghhqpGik4ArWuGtEI3WBKtJHOOsIQAARjL8vrWZ4ZVds/7QGEmFuBD+4eSDhyMwkTAIiDuIt8LjWjlBGsEt8HzuiqWVpaUq5pKmWQEo1pICXYaln7rbi3ttQc72IENtZbpBAgzy6c27KwPvfMknLi3s7OxiO9KIbOGDHLic8pxghz5ahCbYtr6TLnlBC2F/f2775DL7VpummlABQiSKGBGCBkCMUIa4AdAAgi+AGThywwEEDtLHIYEgIMwCgE0AJgpJaNVtY4pxQqAUBtQ4VqEMJFVZ0AMGetRxkIhJzWcnbhqe4kVkevTXq09d793ez7A0eD517QK9tEQnbhz30CQ/lpd348Hn/kR6/cPb0RRvFnf+kjO/uTB68fdVLPVPb6m3ex1qd3D+/f3Am7Qd4IZQxeBptPhPkRBnPTa/GU6DZlX//Oqx/+7OWzceYvwhd+9uL0uNg7eGvv+lRVRTmpW+lyVerh6U66QQeFkGdwsR08fumTb/6HN4lCwJpuv7XUXyVZOTodPb76ofcHJ4vd9QE5KWenlx5Z2z2cXnv6Manz1jpTs3nAw2quDk5uLkTRnffeITTjPvQ8AgURsiG1DDhEgGALA6Wl0X7oIQchQB5jxkCikaktRJD5XDo0L/KFpZQozaA7G+dN0wSR14kJUkHeyGZ/2K0lxYzHwbmNx3/vd1+MA98ZJQuzsrQxFrO6LKhHlFIex7XSUhqKsXOAIKettFJrTCqJPd+bjJyty5PdBwq4PjCdqmnx5F/8039499pH8gdFm7T99Xp5bTlauHx5IwzL+JmPXf2TG7/z7AtPra4Ont3+gpy2uEeNMZKwZy//0pfE18Tee0Y9YaTZu3krYqrn6VbI/nQWYKudM4pgAjnvpd56YtopG4+mpqEdzru9ZD7OVy5s2MPTYmYGD+UPip0XnrtgxPRsb1YWlU+ZFMbICiLIPQ9iVAPTSN1UZeL5DGNt8c7BmCEwHmWOR5kqlXHzvEIAUs79IBC1UI2MQh4lfJLn3TghDbQFFEJRzqqy8BiSxlVKe4jMJlPfpxxAYBimXhx2Z8W0buaVrI11ibeMCNEWGCOssQhpC9wH/xtyFDhslAqiQGlTl0pLwTBumhkhyGNYyJpCtuQFoJrnSnMeSkXzTCLP9vpJPhcnJ6fryytVkUed0GpTV5XvMUjx2fio3esIIZwyFLsqK70wVRZYyMI4buqzJp/NxsetTpci4Idd41xTjnmrZ6WRZZlCs/fwoYVgeevJrFIqK5KEEGzOTnarqqKIKW0QwQS7qpkFCbdQhN2OmZxASMpmzAI/wNzCcDDYT30c8HDv4R6NU63rvJLAwwBjK5THue/5jawJx65CEGLCPT/0m7JSZYYw5CQIg1iUdjYcU2snJzs45L3Ez4fjIE2VLFudJY+ZkwfvQyGsdtVMU8JpGApZXX/79Zu37lIUhH48OSmIQwDrCxuXq6qe5NnC1srwbA4bHSbht/749fZSf1LMLABhgNbX28WoaETZjdmr3/3mx5795NnxsWhmUR5C6MdJDBDqb/SFEl/9g68ZJt+9sff5n/cjHnTi9MZbry4u9zvJ8u139peWNkPfu/HaqwSi3ZPhVme5aJQRdnx06hPkiK0RvbN30u3GgDPK8MMH+5QisvRDFESRH0YhYpAgKhvlNF7e3ozi6OThrtYWA+sFXA9mUavzQS4z86k12vf8NOBxL6nrTCkwOBk4Y5jvZUVGAl/ophBlmdWceHEcYmGWl/rWGtNk+4d30/ZCk2Vh0LZKKtkkUSJFSZEzlmmttSjKTHsswhgGScjTsNYZQWUrdZW0wmhIsdUqG48AQrKW2jhGGaUeYbSqT7M88/2Wc9Q6w31EIS5mhUf9tLNU1mWYRIhFsyrHMKAMattkg2MKrBDzWlooirPDg0nSQcy7/NQy9wPue1ZUzOMOGasKj3sWOcYRw2QyGmojfC9ud5OmUr3lc9N5ufPwRNt449IGbazCGgDlQ5qkMs+mb7/5va3L55N2xwsjgwz3IUu9dnfh4OF9g8xivzcaZxe2z12/sQ+ERRIEDm4sdtZ7MXXS42G8sgQgS3XgUToYnMRx7IWhE2B//5RQGCcx9qK1c5tpbxWj3vCsst6ScDbicvTgLa9FW8v948mgGE+QqYSqpZIqnwOjmUeFkNm0GmS2UlQCUhuUN6oymFptAlnlxd7pWW8FTXays+NDP438Xndhtb+zs1sPjxc7XrebzEb5vFTKQmME9z1IsDaa+1QI4wOssVtYafG1jZPyzIuUKvKk14bQCpFZBLNy6pSaS5TCtVv37FI/6Xf6d+7uDkXT9B+59sJnHzfo/ddfvHvrfWHk0f7gGM568SQJvNRnP/jSb1MePv7002CifUsRAOdW1zr9fl2Jw929+6fD8VC1SAIcH4wmsdbY6qSTAuCUlCygiMFaVB+ggFFArQucmd66nstKF0rM692773udlfXNS3WdiazkYbC4thbQpWk+rOqJkLOFtZVWN9q9cV80GsX46HBIEYKqpsRbXLkwm+Uia6CPL17YOLnzvoRq/dGt4q1yayF68Po7AW9xzY7uTeJVywlfaHvWwbPp5LlnNscHEyHd6dmklYSysXKap4CJ09nmUoe1Ip1TKVUrSIEARNpqMOj2w9sP7hN/4emnH7/94Hhj46IW5dHobOf+g9Zqu7XYObo7/pFPPxl1L/HeQj0+Gh8/+NkvfuHuUKyev3b5kZXjf/dvtLEUt8syf+Pddy9/6KqmrlH46Kz6+vffD/zW+HR+9eIqQ+byU/3N5Stvv7q7dvHR0h5uXblWazYvsj/6oy8f7d/FAJwcDyM/tMC1e63ROIMYG1UQUN56++2Eg1lLYuB5KJiLOko6SSsKe0meFUEQNrBi7T4PopOb95IkCbrLxTy/8ca3Ns9tIItOptPSqQ1t7GxeQx0jEyUek3U9E9V45mE+M6dhKz2bF1aY0WgqjWtq48WsqCvr0TPRLAatxy89fXT7RUDrre3e9fd2lWQG4sgn3U6tdvb3Hvjzycw6QxoHLLiwuXgyLWVeUNdgSopJIwuLUTIVmMdM1ejW7Yf1TJa0xFhRDRBHg5O8zjSn3nQ0rSEACCAMilpKBRwAksGHJ9ksr2JkpGi6oc6nd1qhDQOLsVCqToPA80uIoZTK1DV0jHPPaWEBxhhbZoStXWOsc7KSg7PccRDHYVGfxlHMCCQG+WEkNfAh7nigdg0KQghld7Onivnhw6NR6Pc3/Sj2HUOmYn7EZ8MaUNbqr9tq9meooqDVWtq4ODp+sN5NJoM5jwJjKmcRj9i5S4u33n7f88Kjh7ud9QVkHPMAoLTIJp3QK+U0jNP5eNJtpSTAFpNOuzU9Pp6c5kn3wmMfviRnJ/0F+cbuUX/jYtBdeumNm5HPVs/3qkqobJYG/vr21tJy0BTN2y+9C4izyBkKHABGWQwxJgRg4KxzzjoHKYSTo/K3fuNf+1FgLJJG+wT6ENE4JARDY+azEgFHCDZOS20aqYhjStqyqhuhvYBja4BU/X5366NPPfPR84th/ubNo1u3HKxj6rMv//bXTncO2u0wxFJpPdp92F9YzstmCGaXz20Wg7yNTX+jNZm5/Tv366PRhXOrqAsYdZUrrNSMeq00LYpTL4zKcgIJrrJhxHwSLSCLO+32aDwGmjjrtxYWx2c7UdKzLkPO9DcuHx7sBBTy2LVSfTieWG0CP8qLbOfBXWpVr+PjFDTFkYW2lSbcMUhjEnqMWdUoEm4RhhvZIMJTzlrtznSmKQ9nxSgwQdLjnCbSB44cTU/2hGZb5zZMbc6d3965s2Mgb7JZ2l5Zaj2iJY18rDrs2rWr3/3qlwzMQt9RyopSRUG3yZwHWgvdJ7u4f0s8uHLhU6KYjo9edw4gaqZNEcQLV89t3n3/GEg0Pp0HqQe4tzc62277h6fXPcRXV5epU5RCJSvE1jAXNPQDhaGeMER6/RUn5hZrDRhPzgOGWOiaJjeioozHUdeUR9oYpavGakBRO+hKk4nmJIgCShGjpaqGTtq9k8k4GytIhllOIcum8zg6v5osyCA12mblrGzykJL+QhciqLRr8vn/dh54MdPaaCml1phgrRTD3BgQ+ImQgiBU1cpoDSzw09iHzkKojW6KijEGEUIEc8qNARCSdtqRouIRdhZCiIbzGmqlKuyU1U4qaYEBFNKqqoCDjPmMUdkIB537QNUMgTEGO5ckCdACQ8AIM1YhhBAAGgClrHOoEsIBFHiU+QRiqJS0xhKKG6mDIMAONrAYzUfMIw5AYBEE0DEwr7LABBACL2RWV51W5FGWZwV0gDCvkYIiZy3UyimrIMUAIUK50jr0PGO0c1ZpBQEmGBgHnHMAgaaqnbWtONQOWAswQkpbDKnSxgFFCfY4xdATptbOceY7CJRoIMEEOIigkJJTzzhQ1QJjhDFEmCALMcRSagMcxC7wvVILBI2UkkNotVUaSeeEzEPfAIi11k5b4GBZS4aYBSCriqTd4o4oZQbzucdQUxZxnCCErNAhC/IqK+oZoQRqC6ANfK9oFMaAUaa1QcjXBlKIHELGIYx97sO6FtLZKOkaiCXiwGpCmdXGOuuQhQhggxjDtah9zqqmYhhRS6E2UgqjtQaAEuoAcM7VUkLsYw0wstZAB4E0VhjLKfYoscZUdeMQlgAACJS1GNqmqYzRfsgIINOsaipZfyCNYZR7PK9KwgxzsKjqsm4QRAIAicq6rg1ACLmsKKFzlFMMrDEgDCmzeDaunbEfoODJF57ceWlcV1XUbzdKeYQaSAxJCdIcQUg1MBYErJiKbJgtJOnp8YNkfYUB6BHNHYUGRiFjgetttsY3MdYGN6aYZKqREHvD6ZykjJD2er/fTb1ZLTUCkAQAR5WQzGusqT3PRslKrRYzMStHo1YPMb4yPrlxcXGjNARCBCAx1jljkQPQUQChgxBAYiCATlsAACIWWWAlRNBCAKGG0GJMIQ0MAA5U3RaDMwWVSBcujVRWIOW39azcq+vjuLWAPOY5fzI8IXXtgGxvOQ78bJitnls43B2+/tU/xJ99jEWWpvL44LS7sZhG7bLUWpJamfhcuLHO3t95v5PEIJdSAT8FH/7J5955eedPvvb2Z/7a051lkx4VSy/0vvIvbrdw4Go5L3S4GEyH1eP+1sbzWy9/50v4UqLqkSj2d4ZDTpegPm6lWEf6lbd+8OFnNyevna4vLt97+ZubP/FEMRftBCz3wojwLJ9EwPgJbJqi2+Gz8ogEoDZiMCt7y8l79w6f2Vx856W3ANECNJgiRvTRwYPAq0Qxb0c8P7nJKkl9ktAw8dDRqKLOtgj1OBIWG0CUkEbr1V7PAQsgz4GT0ulaDB6MwXz6i3/755YvLvyz//qfedidP5/s7U0IJ9313u7N04Nb+36Itp6JXvrBt+ZaNNLZxqyttf/jv/yj/9O//fLdrDAzwmMKLEzSMOnGdVlRVTQCWO2wBrp2FulWQgiGtTVei2fT/PR093Dv4fH7bz72yKOvffd2DfD2xza6/Ue0jr7z7Tv417/8n/6tnytzMB6O33h3h6SP39/deOzZywhYU9ce4pos//r/euPv/8d/9dFzF1//1ksYljZgYr371/7OZ/7gx/4BAIBxppVyEAqpDcBp3P3QlQvGqG7I5iTO5nY6OmKUZPMir5qljbWPioQAjbn2MHFaYmP80Pc8opULvbCW1WJ/sZQzazXxidTKD0hdKoQQ9UnciwT0OWej4VhbRAk1xkqpIQBRlDrTnA4GrW46m8lyMm8wJbYKeCsKPWu1tRI4aAjyfQyxYX7QCFurxuWDuq6ShFsQau2cxUpKghAAiBIPQgswdNDHTjhnCYNaOqWJdUiqJvITxrymLhHAGGFrrTUNQEJBIoTmAcc+YIx6sffGay8nSa/bWTk6mXAMKHKYMC1kY6y2SggpZjEgRGvpJaHHSaRZ0QzyyaAaNEjVFIF2d5FGiZZNEqZlNo2pRxD0GDSUFlXW6qwi2kr8cD4964SwqY9pmkAXVaXduLJSlmZ/f9hbCIupXOwuTapRfzPNjmal1a2wFySdWhtuKWM+xsBCyxlzBECOu/6Kmp1Ch3CA8/EYwdhoK7Q0NjLKRYQQZI2ohLM8DOpKWJtLCPyAimwWRGHU9urZDNsw8TuGeyj2mZOlNkLppNtFUvsBz8oyGxYLy72tzUcw5YHv9u7vPPX40yNNlh99VOVz/uAoG+UnDyetiAch/einHika1VtK3nvvACN8587B9ubij3/xU7dvvH/t6pOjadUYf5Ll7VL4fuQxcrR/sNxbffWPfjcM+d7p5Ee/+CGHsMVqMhsfHh598tOff3BnsLyx9e2Xri/2g5AHK8urWabCS12jZTnNVzd7aeSV5dDytlWY+y2Yn1ECgdB+GAxOTz6YBX5AmrrxWaS1lcpKjaaDuswUIsyDJp/M/Jifu9ILt1fPHg487D3YOV6BjlHVFNXp8WE/ibRDPOSiyY+GR+t8TStnrAOQ+H7knKmkvXjlSujbtBuHUFRFxhkmGCJbOtVokVnW00IV1Yz7KSdUqTrijOLGoXCUiQYjAwkmNE6Dalq7xp3tn21uMo860Wjm+cwBg2mjbFVOtbQBSZAGlSwn5XShv04934+5nOdZ0UBVWSCb4pjRjlG+kspZgK2VTdaUU8qj6Xx6NizvmmzrwuXHwzSIAmNx4HvOklpU0FqZz7WFOgy6qxsaY13lR3fv91cWrl57xO9vtysA7z4opqe67ABrrXVVXvlhvLy8ZjE4Oz64ffs2kPbC+VXqkUZAoA2FTAvXWuyZifZJd+9gKrJc5LAcNmkUL4R+6rM47a1srBckIkBYim/d21UlZBFd7MDx0eDcxcV0hgsRn4zq1SvLMFyRIFx7bMMxDK0Fw+Ohm0AYWtuQOPFlWgxHO7ePhXXGSGtBoz9wyQONPCFcUyhIyca58+3FvsKo77PibGakkNVcGzgcjXiG+dE9C0yiEmDhZDwbFzWHpJEAUD+I/KooIGYIwaQTtZIISD2fVdTj+4czmckmU7oxYWJJk49HhwDaTgfnZ+DBWXky1v12eMMcdruFRjg4d/XpT/38pY2t2e6NtQU/oduT2fThyWQ4EZNiNslgOw0Sz4Om/NqfvMSYY6mvA/LtV9/5zI9c+/gLHzlc7d3b++oYm9Ny+sknPtHtNpPBMI78fHCClNSimWvNlO8nP9yInDXT+cxgCoDzoAaRN7f60tPPPXz/QXO6g8NElPJw97i1OFrprp5/+hkWB1/7w6/evnuYpyVCBEBlSoG61mKXybLbWqi0a5R2VbOUeOXgKAkR9jEoixCy4dnJ6vYip6k04Gg277U6R3s3aaNSis+G1e2bR/1WIiBgUSdIQl0TPwhaYYhsXpnp6urCneu7EPlRO52U07QXy2xGoYOu8ZwrT49ZkyGRilnRzMvNjaWz0WA8k89+4WfOzobXb7z46c8934/d9pXzt3f2P/2zf+H1V958p9g9GZS6nl3cXI5b/taHHtk/q+uz6cZ6v0KOwVBBdvWjH5reP+pf+nS6lL71g/sf+flfUoGfftLcvXM/OznVdfX627cneR6yoJ+2eoGXZ/laPywncynEp5+/xLESR7OFtaV2ezoZzhfWznd6C6PxuCwKrXQlpSPhwvlHBiYKO+ubMD2+dwuavL21fu6RR63IlFGd/uL61uPVaAin2bycG1ynayjsSEdw4+P1C2v6fk6xXfRxlQ83Ny9O5hZwz/MTj/fq+TSvZoje7obJ2tLaN771tfWiiQCRgA4KARDutTstamxW8ThlFFoH2mlr8+L27/7ei/NMfORDV967uU9RFyvVBlM9Vc6Hrqqbcl6DKl5YOdePsuOT02ndWCCNy5sKIgMI4dxLEjw1BjLPWVcq64VJq+d7ZeWg3F5JmFPWYgcIABBAqDTEDSCQQQQdREK5KIoZVULU1mjnIKMEAocCiFl4MsOwDvppywtKiByAwEGnhCDEm01nAYftXgrDZHw2KbNcljrwl+dDyLwGE2V0AUElBaxnMyfgQrpY2/rPUEXHx3Nj9jopw4Hf6bRGZbPYW50fH6hiamyxvL1kJNTSsJgHkI9OJr3NrSobDkYDYc9SAK3lWV5DNWHRci2sApT56+PxwDbCFJNmJiuVDnWoznIcJco0xkBkEYCwyRvR7AZhjMLoyeefzUaz2XR+OjrVwjqGtIUWGACw09ZYaI3FjHCIgAPYZ1BZXWmhrLGGMOac5j4FEiCEjDUQYlkLigjBCCAoGgsgwEgudINPPPdC6He8hdbh9cMjNTTeAupsDe/uCkfzpg7i1ACHCT8dnBC2djDN0jRu9fqEt7uL3MynZVG2UvvTH//p7/zxt7/xzZf+0l/+AiQaEa6xVqYps0Nr5ayWjeLUaTcvaVzSUEIllatFowhCCMPZ6DhO0iyf1s1xP0nHw53AjyC0QjbHR4Wy7HQw0OU+9xn0XK/XVqV0GreSbtBKmvlwe/2JwawA2DEEeJgYyObDI59C5IXFVM9N5TSonbx49eLwqKhq2Ot62cmDNCXQRGLm0+D53bPXTqqTPo+Bs53uIkbLTQPn2THqyOWE3n77n09nty9de3JwnJ8eOwvg8vJaOal+8sf+4tvv3Lv/7o3VhdW2dTvjh2krqOazMPHizsrm6iPf+96/Pre9bRUaDPPu6urh3rEtlTx5gMRcGOeFveOTA5KEysiytM4aggEK0/3D+4jTxWANIqdsPS+maRo3lZjPxqEX1hUg2FCivSgpqrl0TdpeqcZTQoDQUota08QCdDica2U9DivVDEeTrYuXgbCtdOnSdnTv/sCYJk6TrHQzKYMw9KgUtW6axllb1PJPR4IzRgshZSMIRUEYaqkAhEJqRGEYxaIW0EM+8oEF3GcY2LwsnEWYMMw8gkjT1EkYWuOsc/NihgnHlkqtqmLm8RDHcZ5lRmJppNHYAT6eTKMoQhjLRhijHQCEEkioFg00gAc+pdwi5zRAFhoLjcFGWSUV4UgqU0rpBQwCCpgnrbFKEwKMs4R4AfGsBQ46qY11hnNqgUWYCC0AwEmUcBRM5yOAYbcbF2XjjPU9CgGq6pm1LuABJMwZ4Bx0wEolMEKUUwssREgrY4yBGBLqiaayBgDotNIA4VprCJFRBhBCEdXWIggBxJz42mhnFYAWQaitdM41ssKUIuCMqoHDlBEhJGeMEAQQVo3ChGhrLHAQYoq4UKqWkiCECfI4r22jrUKUxUlijJFaB2FoFRiPZwDaOAoxch3WkdAoICb5qJN2fS9kGANglNEYk0ZVSjXQMEIxpARDKLUxWgEHIEIAgVoKSvxp3TCMkANSyhhHzkpKOebMOlAUGWNMGEMgjsOolkVdFxhAilzot2pZC6cZ9RFFzgErHMLQp5QxVpUlQDgIAgeAscBnQdGUQteAIQuRQ1Roo3ShjcWQcRJKVUIIzAdPgnHVKGelAqZsGs/hgPOyEqWQlWg4Q0VdNkJqJYUDrTgVokAYQWCNFQACqQ0yGBFWydr3mZSCUtju/rAUczgdlFWWu2z58V60bDRRDBBoFCVF3WhKKHF1NrLYRt2Frmoqr53MVSV03ecxIl5ZVsJpytksOyvrPOksNIVMl3rLj12+d2CSlS0as9PTvULWKDcojHqt1mxmAXY8Yc4ZijQFEFpOSBubpLXAdH5ElQHV9OGt76099inVSEAdRIhAh4hzVkNEIcQGIAABANBB6yByEDhILSTGAYgocMZYAAmBlvKIWlH7s+Le97//uZ+6SoLWTnlcy9xqv9NJvDDQQsm5C4JW2u746yANwzvvz2bY4qVUNdK70NZxoFQ5Ocyms6YOMlDXwMD2QkflajwqVcSv/czTl8/FSay/8huvXr5wcefG7diPpfVHEwNZM5tOaa/+m//D0//+dx4efbvJtHUt79z5jeHRDffie9eW2+JoPjk4RcgWme4ur3e2+6d4cHt/Nor7r90eZ3N7cnKa9J7sbT91cxwvrhZZXgd9r5yMwzhZPPdIPimimN155d2k27329LU/+earTz156fPXFt78+tcPjjITovX1xW4SzQ+O92/f7HfMcoef3L5LHWEestSW0CFj/STRkEgrEIQ+w1pbDQzGoBZFJyLE8/OTCQ1pr+OFvWhSynbv8w8f7m89+sJSrz46eRNQkuJY5/OYqwtPXRhPSyC92ei03U5Wux4GFFj9R7//pdXULvdWPv2xT774+ht3jw6NrHxAsJxcurz06nu70hE/ZA7hsmzmkzLtesSjZaOsAbVoCEEzOX/n/hsMh8ALX3ols8peXdrY7LQHD8ff+frhv/jX/2YjvHjtygsv/7vv3jM/OHjYeuEnf7qbehYH9bQoTybJZPn28D8EZh4swrmUui6+/50ffIACpTTGGCIskCWQatNsn9862N39wXu3aO/Jgtg+cSKfh2HAKdVleflqZzycTIraaJcGnse9eVZaFEJLjdPE809GB40EFGBOw7WVC2enN4I4ENJUQiZxAFg8mQ62tnqjWZ22OqaRxminpRIVdlIJiRCzVl06d/47b79y9dxGXeU8CK2wAKA07s1mB5QCSmLjCOUQYSqNRdiTQhmDgHbEZxAaCpxxDkNgjdHWWVAD6zAGDBFIsdSAMYIVTuPlvZPdOGRaGSOU1MA6QCmT1mqIsrKyCDaNzmflU48+86U/+oOV5enl7XOmnkNkAdCMAj/gdaOCOOYsPD4bGsgA5eVweGf3Pc9XcTt65PKVV198cWVhmSMiJQHCFfMjI2vu0fHwFJOw1YrrUhGeGGVHe8fz4aHq+JiifDivGxQmvXsvfSfX8MlP/FhV5yf57EydwBSP51NHQg0BCcNKFAAYWcpWzJVUnhcCKRWipnBx4F5/7Xcev/ZcSHss6EiZMeZDBDmPuv2FwdmhAzorR2FvVWoIjCnnhYNAA8d8LJr67ORoubt4dnKqs7i72mrKspjMkFOdlQWH/Co/NrCJe37aaxulESGz09Oilqur3cY2y9uPFrUWBQgoUQIvdFKAxL0H+5tbS/OHh5zhxy71PMqnWaWN/MPf/8aVxx5t9xfvvvX+0vlLly5eFZZ7lNVV1pT1wjn6O//uN/7m//GXf+vf/MHlp57PZrU1zXA0feLpa9PpGUZ6Nh322/F4Mr127eLtWzuXtjeP7uwGXR8AdTqaj2e1tMO0Vp1WOB3OgiTcu3ffQD2aTS5srn2AgqpR2BFkAPE49bwqq6aDkdZ6aakjIWxqLSEGgImxoH7n4d5x2u8XufG4TNtpEKcQguPbt5tsHrWDhaQPa4wtZJ4XpPj44Ym2eGHjkWc+82NFdnJ2NiA2C5IlA3nV1D6zyKfU6wiIJSAG0kZKCBl0SpTjolGlICS+2NTGaGJYzFL3yPrm2b27SadbOwe0wiwK/FConBIqldDOeWFEAAZOIZ/SMLIWaOMcwMYy6GieCQMA0K6RJcLYCilVHsaeRiFZYszDrMgQ4/cG+tzjj0JGICUml8iD1jmjNYROW+kgcraxgFMvJYh0V9cNEfM6k6OzNOkstf27d3d379fd7rLvcS+IZeMAhpjwte0rS3L9eG//5r27WTnaPn8h7YYZwlGw1V76xHj+GgS6qeqo3S9FIXTx+JOP9to2ShMW9UjSV0XhYz0enzBoJIBZXlVNCiButbhDcSs4d/Vj52Sld99/P079i899ZDwaNHnWTCfa6Yiy07s7qpL5TI9naFiQ0rqqbDCmmOKmdhYYHkLqMwq8MO1cvHB5dfti1lT1fJ6eX9RGGV1oK3gEjRT7x2eBlwRJaICFLKgMoGGiDZNKhATGSTeIk85iDztJnUKBJRC99fbekYgtif2ohQMPApTNJ01RexRn1mQ1MJbmlSsagYmVQJ6/enXzQ8+trHbs/KA6uqtm4yjgGKSAEIXGdaMrrR/M5gGZxV4AEOaMYaEj6RWnJxcH6+dacHml8+d/9IVf/1dfzRESvieg5hTOJ9P15dbZyVmv3ZoVcnwy78Af1t2Mp1k+mx5PiitPXFxYWbpx4+5SL5yNTrLsxGi6dfUq7oSkOsMoOzqc3bj1g+2nn/vMT3xxOt7/3S//5lOf+BguK6gNxABYt766JQQRsyzGcJKN717fW+t1p7NZtBjuH54Cy8YnhQtTzznUhaxv56B48vknHrz6zmMXlr/5yv393UO8vhzHoC5HM2l4utzqLhhTCgsyCW/fPRF1Y0zNOn3WW+jF0ej2KQT5h5//0MPdUoxGqprevjtdWlzggeu1u4T4lz7zk6bxcbc/nx8wqiHyZiObtvrvvfi6GE5mMt/c7DYFub+z319cWLzy2GH+sH+hnfqNInQ+mGFKW9uXHrv40dv7erGzcdA2VnsxDN958U+wtdeePf/6999yzHkhT6NksduVqqzy8uTotMVQu+tf6DFpwMZTLxy8/WpHu9W1RcIEIDrud6RDEtgwTZe21lu9S8O5t7tz+NRG+xDWyeqCt7E1uzduRvP15ZVKI2Tr/krr3Qd34qX+aFD6jc0HA0yIn4bOAuJFZ8cnhdBFXXTLKSWEcaQr5bM48uJRVSEQCKNHUl15/ONeSlGRs7TjnY4X1xfDxV4cxqf3dyfjkytbK4SAvcFofXnZI2yYl+/cus18HxIxzs88j8fdRcpB3mRBV2HCaTdtqC114XPmR9HD6kxbsLjaq6pqVuS+iVtRNMntcDxO09ZkUvYTnoYMgqwX02xcaUmA5/keCTnEhABrIQaUeYwldZlnuQhD5ADmQWy10VoSZAhWGHNT6b3B6XQ+Wevq5aWorpVFLsAWWMmCoNZAGg9oIiTygOd5cIrzSd6UJ8N24oVRgD17dDypai2b0f1msnZu7c9QRYkPYyI67ejk+CyNWtOm9rkXLQRVocOgrQCx0AUhRKapVeH7JhvtnuzvrC4utju9Is/rSmo8J0Uo5sP+2sLCcmA0wo4XRdleDN9+8+BsGAz2jiGzmBAr7agZUYyreY0QXNteKmWdjQocsP5K+/GnH3249/DsZHwymJRZJZsGAQQxhAgQQrQyBGPmoTorrXbAIcKgdVaUNYKQQifKBhMKAbLAQogIBVVRIgKCiG6s9pPYe/7Dz2HKZKOnp2M0q2fz+Uc+f+0PvvbN1aR78HAsA7+32LWlMS567oUndmZnZZXHHEJlTFmPpoWltBeFJ0f3Do5uLvTXOmsX3z9UIZo8+khXgZLHvjXNcG8kLGuUiwK2sLQiBYLA08pWRRG3kvF0VMxOSRBHrAN40e2uyemcAGIQ4XGYVeO6hBh5Q9Wk3GEkw+W4u9wTY0UCvy4UQDIIoqLck1WBWZtG653uxuEgW9zcVMObyiEWrIQ+V0rK/LiZjVVjLM0nZ9nmRntej9v9tTS5eP9MLy1dW1xIbr3z1YWlFDu6O0WPXH2aji2j44Pd97FPl/qdZnywub5Z12Saz8bZmZLoxsMXS1sn8fJEwW5v7Wjv9bpqCA6Pbk5a3c+08dOh+aZ1ZDwdtkN/nhUPBuZv/IVfuf3Ob4YhjnzOPDerZ2u9y6YSbeydinsor4Euuy0vSoLh+DAJYBDwzWjT4JWmlN0kQqhAHU8qO5scJqHvtK3yUcDbzua1NF7Y4tpHCDs75z4XjRkejxwLNq6+sLc3pjoKkqegITWQlKqmzhmPkzAoGikNqgrmRUEU+SmkANwFADgLrNKUYMeZs0Br5wDSUhnjqAWyrrBzkUcpJ2VeK6H8IGwlidIGY0q5hxwJo1grqbXQWvtBUGs7mY0whBg4hIUSNSHAY4AienI0qnIFoAIIGSOwA4gwxIi2RiOFrEWQRsyvmwwxEsexaZRVklEGAIQYO2yKWlhKAQsp4VIIDCCACEKbRGmWNQg7YyTGuNvt5FnGCZXKIIrTKBFCEsiUahzADuCslBgQj4bTakoodQarRksOKUXOOYRxGDEpBUHYGAUwFUITSjBG1gKIICVEA6uN8XwulDHGMYwBchhhDKFxmlJqjFHaaqWdswgBypDRTmorLSDKUYaN0gRCBzClHADIqZ9VudYWIAgQ0lYg7BoNjbWB7zVSAQWFVBg6z+fKaAMhZR4ACgFgoY4TjoBrVB1FXpHl1rnA94PuitamrmuKISeYe94sm2NG+53FeTYHzmBMpFDOQowRhtAA7XMGQGANlMpJo3ziUYS1gdKAomq4cUZpjEnIGbROK6WMFI1w1klnMYRCSQAAhsQYIIRw1hoHpFS6kQhXaRQaobW2CEPrjDNWa80YRxQ3jWyEwBBKZUMeGmOkaKyyGGLmEUecEBIDktcl8WiAIiudtZYQPs3m1sGqbuqmAco2QhDqaSWANRjDopZV3VgDHHClERATJWUjHITECz1lftiAlg+O6qKSkW2HVucz1o0cAHU5QiHmlFWZWFzvW3TlD7/0rReeWxfzEUu5cM1cl+0wlgAZhIVrikJiwOMoTBNvNBfE4yRp47DAMdm4tDXMdwLcVgDaMqe+dL6HcWWgzzFnNsYKGu0rYIkCDKa1mRvReEHn7VfeirpPJe3IAOMoAgA66KBREBNnnQMaQoQchBBZCIGVyFnrgDUaQN86jjAC0FiMLUBzqZOFlbS39pv/31/7wi//YrtNKkh8vtzMJ3J2Zo2OedBbfkaqcJyNpK6vfHRbPVpPdg67l9s84Tt3j9a2OmHHb20uz5tGQ0gg9XqtKCqdryUw7eXoZG80xvLCExcuPrF58dGVu+/B//Br3/goO//oxz9y9506a6Z79fTKZzaxGb34laOWQw/v7P7I5lLLJ6c7+71Hn1y7+uzw6D124kKqjidvLz+eBuP5ylbn9ttvB8x+6sc+/K3f27l/9M2//vf/xuCtX3cKkyhtAycKvbmxfnK2X88rH3CgzeDoqLe4vv+w/s53fqPvOZZ4GqHpqK5t2aEuCUw9n45qKOeVUshC3W7xpjKzxuCWJwBQ0lmDsNEcuzTEELMa2agX28KsYzA1ejjJ6ULnz/0f/mbYP//eN/9IjvNrH/vJkJLd05snD4df/Kv/0VvXjy8sni9Ls/Pa9568dsnoWyojadxf275MmRgO9s9tbA6Opj/5+Z/7H//VrzUkDwMTU///8l/9jX/3h2/+5m/9kQDKaBm3OKPcYT6ZZgAA3+cCSq2lrKRoFGjm7dXW+uLiYDydioyur/zy3/q5kOrHLi3v3Llz+928hVZWvHr/5eH/+Tf//vrlpc1nPz6dNB/bfCpZvtLSk/Hrx7jhJTA/89M/1sLyT00HBGJiAAgC3zooZ0JIG6fpYDr89//u6z5H/8mffy7phtYB02hb6ZpgCBynEGLMmaes9SkGjihZWyKbxgXapc4zgNalGA2OAUB1UyNEjCN5bdR0oK2r5kZWtqY1tioII4tpXWmtrUFoOJk21j+qyvOPXSOuCaKgqWojNIBgMj2jhIomMwbKrIiSDsAOQhr5oZEzTjzqsbIZ+czj1MtqZZ0mAFMIpKwBhNZoax3GXmWkR9Ora9s7hzvQof8/Vf/9dN92mPdhq6/dTz9vL99ebsUtuAAuAAIskESRlEiqK7JllYmcRMokE3lGTjIeO3GkREocx44yE1kl0liUTNGkSZFgAQkS5V4At9/77e3t73v6Obvv1fMDLn/g/h+e2bM+63k+yzkIMJJaK2ul0YAyrRUCsFoVhFOlDXa6rODt2185On2oDDXWFmUjhem02nVtCE/mqyzuqOHO0EEyPT7GLr/14q3FYqJl9eDRJ3E7bqqmKuT61V1IQJ1PKKWchT4NykpXSDHmDzd640cHTVkMezzP67oR7c4g1+Vwzy8X29/41d+d1o0H7UuvvSSECDtsdXGST8lLX/3yZHLKfd8p53GWTQ4p8RaTEwRhq90F1pXN4vnnvga0tzg577WGs/FkbW2PUKSMylZzqICobBRvJ3yzKVYGYkyYx/hydVSrImB40F3LG+AN99Zvvp7PjvvcPjq/GyRe0m1XRWFFk2b15uX96Uoi5DEHQSM8z/kMHx+e3dp8CXBKw3h/d/u7v/8NEtndKztBf316NPZQcHB0kvh0bb0jBIIcXr20EfnD6WTst1tFJVuh7HR7AIPlZHXj+bXvf/zLb/zcy4/mR7deeXVtY6fJVtUi55FLOonRKgzi8WxRVsvdy5t3Dw6fHD74a9t/460n3+gP9xRWTx8c9tbXFxfzja3uPC+X49WN3uW3Do+iJCryYu592rNuJx1ngBaqaGqPMuBwEPt1KREkmKq1vTXdyDITs4uiFlhVThITYNfqhrydOECVBOvbl2UeZ/kEENgArUSReP1Ot5etMozi7bWeXD3xfbS9GWNNZV45DK1xFmjMAiWdMhayWJUlBE7lqslrZZrVsnxydPEn/9Tt3nZfNPX8QQGsX6fV5vqAUxr4oawbKZ1z2uqKEoOAYxBgAKRVWjZh3FVZY0VT6mXU6fqtuCk6UldlUWBb85jF7Y1iYhhsceZhHJSrlbRu89otc1HsxLAXhUBKyAghsC5ra6Q1WuvGWhDEHlS2LCsLMMLMOTg9Wi7P0pdescIum9Vsd6tFfMa5CdvB4lw1ac5jz3EKKaeMrF++TFukOdLT1XK5eFYVqTZ2NjnGAXfLJg6ieToCpBFgFiZV/9IW8fuNigxFncSVF2ej06dBsrF0etBu2QZJYaez1Spd8Y2r60HbOL2zbZ/ev1OsP3Hl4v5HP7hy/Yqol7MLY5AIOp3JqRAaxIMekoD7HQsgITjyS4cxogQDHfSZ1rCYn+S+g0GbECqrWqiqkYUWanNzU3lqGLW1JZiA63t7Tw7OuEHOKKhkYGE36HSHQxrFcSekUGiVYg/P3KRkLOmsc7/tlEzPj6Euna03N+NByC7SxhTEOgxcoGvd7Xb625vdS58Z7t2sF1kzOtFphR0GwkY08vpBqxWdnI2yspoWyiKCmUvTTApktavyIvDJt74h2Oeu7u73bn/u9t+Kdv7lr/3eRjfYGAwXuvRFFXPW2VsTTZFnDSe4adQPU7B3aWc5D54dj8YX03w+WmttqkX+7P7j/nCzhmYxLxeLs6jj9m71V6e1ugDvvvXtb/7m//THf+JP/dibb06ytMiN0aguVSeISWtNQbS9cfn0/lvYVINhwLvhyfHZ7c3LAE6Vk4PNdhKwplptbK4RfOXXfuN7qNjtb+7ki9RjKNfu+GLx/O2dulTGyQAVVbUEhCHiK7ukwvIgQdYRiIxpVuPKJx3TGFOSVpCopMKMnE6XN1564dvffPfBUb6cZs99NYpb+OTZw06fHNx/BCHtrQVhm2SLWZ5Op5PVakaKZbq1N5idTjm/30WWItjtRIXip9PR+qWtk0fTcK+3tn/V39p0O8tf/J3f/Yt/4S88//Jrp4+eeciDqjo7OwmDAEA7XiyNqgGCyEO7l2LWlIlvXnhx75u/8wet2B9sb0yOLhCfTrOV8Ycs3t546U0W9++//yQejTY+c3vQWfcJanW66crakvWu3MJr7asDee+DO/lscrQyLPaplZeurAMaLS8WcaeVQ8M5HaztjA7PECSqKs5PLlr9AUsSI+10NtnYCcUoPTh675UXXwiClh3Q3f319Om9aTVPejYmTs6XpxdzKfStl1+eHB9P56Okm1zMn37xa595/9vfp2HUiKYSI8kMBrhRy3xebW4kG1ubpeOTkqbpIhpsFCt5tJxnwEA/LBqTC1gYpipshOp2+pdvrWWLvBtECSxMs9jp8bpcYAAB9KCjVmtrACfE2MIYhKhGiFJgy1XqQy/uRhJogLFTWmpBECrTnCpmFXhyqldzyFmAcUk5VlCXVa6wZxptCyXzPIjbTVmVq0prRT3SamMIhMYeihJbKOBVqmmMQ1Hs/RFUpLXsdkgcgRBSj6igFc7mhxt7ydb+UEwFZQGPPCsboerV5GLYHQCAWu3t+XIiEDa19Pyg2++OT+Yin1u98MM2QrSui6JKoYx/8OFslA+jpNPIwvf9UoskiJwV8TpJm9J2AqpIC8JuFE5OJroQLZbuf34nHL728P2nTjaj88Votpgva8+n0IGyapQhlBKDCXKgqSXGIIiYdbZWAnECMbZSQegoRd2W9/LtnW43HAyi9a1BU6mmaZ4cHb/yldc+f+OVf/Jf/P15mk9//9thkrTj1uPTsxD4Q+7v7m6e3j2x08nFweNb25f9KDENzOfVrRvX33v68bPpKumERaPjYWw8T5aaGO/hgyedmGtnKpEbiGIvwjSAxIxmi1YY1cWJHwCIQwU1cpr5gR93qjS1TgOL/CDhtLsoi7IGEIP+Wufw/t319eHGbr9azAmDUmsv9oOkDRvpROqIwxEZxltQJVqA5fTCagsNMLouc9tbf87KJi0OAs+rhIGeVzo5DPynT5967Xit22uqfKe3meanWIBLl64SWhsBd3o3k9Cvp02WF5T7w93LVYlbPnMYL1cPi6xs8Q7x6WR2ASCfZCNl5dHphzywnh+Nz82s3vuLP/bXfvXXf8NAGLdijzlTVHMldi/ffHD0Tm5E6NAqk/Z8wp2IrM7NalXhGikUYW10HEROgc56T4pV0mnDii0FQX4si4UzoipLwsPucE3XOcaw11/XRmpQyzJlXruRGltdi9JL2sxr25gnffrxOx9gNwhbrTJzPGiubfUX6SFxlvo6YEyj2KpKD1qYag8j2vpU4sg9D8fIOKOyoq4qpAxF1PNC4qB11gFUN6WPEXAgCgIHTRxFRd0ASAkmWVp5YeKMEk1dl7WxVmlrAHRGI89TQshc+V7QirysmOdpqXSDGSaEa6MCzqWQmBIeMilUmuYBZ4ya1WocezGDFDjredRrJ9oCjHCarpyzsR/WDhplRFX6nDZC5dIQDhDSQANGUCvxnFFCS0BQrZR1DgIHrKOEZVkWhGGYeNpCBIBudO5KAJEzzvNCBUqLoLFOSUkJFkILYQBoMEbCKgARJtg4bY1zBhJMtBJKKYQhRlBJ5TCllAFnpVIYYwCgBVDIilMOHBKywNQ3xiIEPM6NdRZBxAjBVFnjjLMQz9IVhlZbZzVAGFGCMaEAIQwB5/EqTaFDCFkEcVlJC5y0CGGJtC3KjHLmnGsaWTW1BRRBioBrKm2dZRRhiI0CQqOyzCinwOGibkolhIEYe1q5WqSMM4KQNFXkE4pxKSqPYoQIApAgGvh+Y+sA+ZwRRzAimCCHKXXIFVVhgbXQckY8ystGeNxXRkkJGimt0owxjEhTN4TgRigttdE6jCNMmRCCU26MdsARCKU1mHEf06zM4iAghBhghRTSYGudNNIq46zTUjmHlZRK1JgTiB10QDdSCYUhohQjBOuyFKImBBVCKKURggSjuhHGVEkUWm2MtcA4pT51FYVhYbfUZ17bM16NvQ5ALW0do1zLCjE+WAuF1r/xW//+Rn8dl4thv70wDYZsq78ZwohoR7kLIPYQCAmUgb+a1Um71R72p6MUG4e1evzOD5gWaxtt1qPLSV2uZjCMITQOYEgjZwCAPqU+AIAyn2GXkbqu561W8Hp88+LkXRR9FmmMGUMQYgt9jKwDECJrAYLAOossdAg64wiGwAICPQiRMgIBq7RC0HHGXJxktr78E28+Gs0X4kzXhnkQkMASaZ02GmgcU5PEQeRsGSAr5rPsbDY9niRhdHr3fP/mJT/unJ0ddwawquoyyzf2NlWRLcYXGgJrYOj8rdYe4YbuJeenD+bHx6+88cX/y80/R5Eo05nPUHe9XaSL88NJp9/7zOvbyxMR9OLzRXplP+5dvdG69bOSNFsb3QeHv3Vv9v2bb/KvXtovR/dsM/rjPx3HDQfjxQu3/b1eriLvWBLoWDGvYCbWOu3jR3dqpy49t3f3zrSLkygM1/3NS/svrPHgG7//y6uy8VpBk+YXiwXfDYQsqukCtVutJHaNWaaNrcqIIO57ikOgDIYQBxHlvrM1MmIxzZLNtaDFadt/bufV/+GXvvk/+yt/7e3T4zDQHjr6/h/82//9P/gX5XLclCJfrLbWNsQ01k23nimK3a3XX2wN8tXYfvjuk//1X//Zb36QHhyfvXx9T8/Ke++8f/Xa7f/qH/zf/p//zd+9mI5CpD/+wbf/8n/4l4Cuv/6NX1WNhQAoB3UtqbPKaIOcNZoxDCCFjhiMq0I+SQ8DjGssn502/9u/+7d8zJKwF/nR5Fn39PyEXN6pJk/V4vxh/a1vffOfOoF/mw//wT/6e9/979/zYhHErc3Yf/3V9fnpM/Dp54zWTS0DP3LWIUQIJQkd7GxePhjdefnqkENTFtIY63s+cEhLo4S2xmLKFmU56HS1NEoLpZXDoNvaslmhrIJWdWJiXQGxQxBDhDFzHBGnZZJ0sqyAwFV1EVCEjNJCQGOiME46fQAkqkG1mPUv7WMBEGKMGR/TvCqhUwwRQj2Imc8945TQwI98SCA2VOhayRwiVBalpppy4rS11tRNyQjGzDcOAa0hNBEFsp7dPzi3CiLkSdU4YKVwjFFEmDTKAUMIddABZznDnMPlcvzKaz9yeWt4dHyUtEOAHCM0l+UqzTY2Lw03d7U0SIr84hinS2uN8IxTSpcyavnhsEUcS9PFZDVtU0sY89utNC1UpXnQwhhXWXX68f1iMW23o9k0XV+7HrE0L0tg7Mndw7LWty/t6qwpiuYj8WG6mm/vdimNDew9fvAEUej3uNSiVKnFSFrhHNK1AkVWigIRGQ5aRWq2NvbLfBV3Bg7p+WpijYKQWuhhwLH1m3TRFDn1idRCivlk8syPola7Xa1kq7MJvKScHDTHj54sT6Uyr7z2p8+OHsfdNvJt2OpIDRBkSdwhamH6HmYaIbS7swuU9gNcyubexw9CGpb18vxw1G3vB9vexnpCISQexbG/vUZ3Lw1u3No5vD/BjeTMu/Tc89npdLnKIAucdU8fPjt8fPzTf+In/+0//+bO1uXlySOPR4dHD1//yudWWeMxUKtmMpts7K17YbSYr772x7+GqPvk/t3nnr+FgLv/6P0XX33hzp170QWnvqeBOLs4bPe6jZLrW+tN/elDH1JoC5BoGmXqIEmsMxbIvatrwDYEIUQgY7zIsyigsm5UXZ5NRnu7gzDsAwOkUp7Hg90BctH0HD07vBBSDjcH1aoWddnpdLgXzydHzdtn6zs7mPiUM1010sr+Wq+YL4uiZkkHUUbCLkBclfV8NB5f5CyBo1xPpLesiqiqtKqH3SArbJYWltO0afJR5RHWahMChAHWWqmtc0YuF0tHeOQzIRpjtJQNZFwpBR3qbmxB4hazKUEtgzzpt3FLYM1r00hlMGlBiKYLbRHbWm+rerU8X7DNTeJFjmCCA0xZntlSlA0wBBFbgqJoKCUIMEpa1jbf/4N3d/Z2ESFe4jHiV0UDSM0oU57f6w+0R1gQVlkOkTccbALXzOYTW2OE3PLiKIY+g8RgmNdlOl02NYhCbzoabV/es5V0clUqCFBjAGgPO4WSV67s+MQpWEUdfzoeCwHXuluOrI2nTzcGNGiHb/3gW0xLbGyrPej3Fo/vfDLc38NOW9tAB6MwjjoRBIR7ft2UVgljnHGuzvMg5Mtlna6qTrfmLEii7iotTQNbUQuHLl+tvDAAADHqaVldXMwxoB51GOig40VBsLGxFbZ6DhJrhawaQnC+KOajlPmJ1+06ghaLVBaZVUXi2eevXN0c8PZZ2Yu8bJkDSIjvxbG3vd1PQueZskNI4bLFeB74cdiOCedVUwQmAo2dmnNVNxIhpUQn4VpYYcFsUq5vdwpIP3w6Yi24hsjLL+2urf8H+XJWr3LfCzkDdb0KOdja6q+UxVwt609dRdPxAlGzu9n1KPUwnl2chR2OMCxVtXXlMlB6a3MwPj8dn9XU85I1uofphdYffPu7ccBpEhDKszLtb/cgYEvRVNrWo6PT0wuHqgHDfsvsXLr6yQefJNggp8KWa7LaCDB7cm4MudQdUo0xpkIC5jTirJHC6Ga41kmXjbZ5v9Myin7vB2+vbbZ2d9ePj89u3rh+eH7eavfXt6+kjz9cLVIfilpS41BV1RFn77/zQTtmm1v9NFm+9Uv/ImiFRZau7w2LXFVlvXG18/Bw9PjDJ2UlacCDEkohT8/nu+sbk2fTfFX7vo/zor93ZX/vCgja6/29bHL26P33XvjilzcH+ObP/UQ6O6YU9Drkyffei0N/b7NfS8MJWi7mxmhCCelHkJPR4fwRc/kyNSU5Gs8/eTJ/7fZ2DF1dNxubg8K2R2rQshvr+73j3/qvBjtVe2199OyCx70ik62soDpbzk9/8PRYNCrgnna8v/vysIPu3v1ustbe3r8yX82zRgeWgUrvXr46WlwEnfjJk4nfH3jE67daJ4/vrPWDsD18+Gyx1W+XDjLEDp8cxhp7OH7u+f3H957ikAWtCDdVOp8WeeYxurXVasVskc0xJ/MyDSkadCIAiiJrHCTtXjfoRsYiowHTijjk+f5q1UhlMOKc0qvr6/cfHzLGlbEQ2PUgEk5RD6sim6azDmtgRHBIfU5qRaRWRKEaQgQ0QBBBjBDEGEEKCWSrZWEBCrodCAECGAKGMGGBC2LTExBVSFby+x8ddLtsayMatKOylDCgQcJXy6YSzDGQLst0krY4NyK/sr65TPOLMguSNo39dDpNAhJGZD5Z/BFU9OobN8rpZHrSQIJuPDccn6d1Wa1ODcwNh4EXxOOLkyTyCJfdXgeoijIbJ8haZqSDCK5WE6kVcNTz/ajdmZxNmrKKIlZZ9K1PDnWwaQV1EVEZEWWDHKia5mw+vX6tx6A/PV/22z5S6d76sE7B1mb0Um97erFAdvGZW50kCOvGGgsgRhdn08UoBYwCbPNVyYJwsaqfPDnRVhujjTRx6JXSEYKikF2/sb52Zfjc8zcP7z9d3x6ePTo9Ojzd3t6Po07Y6fV09NYvfxdrL/bNs8Pl87d3x9V8bT2wWocMTk+f9tvdxVjeaO/qXB/L82v7V+YXorssVJW5oHZ+5IyntW4nXlGvmrLe3h7GAT4dXzAfdqJuXkw39j8zmowQzLNmGXDmtdtOAi0R5wlHvKmVqZedXlsDbWCNPe7yiaorxMhoPoECIW1AvvCd8dv9TqdFGquVtQ3GznEcQxvBKCHUKyYZ1NYp67CZXpw5hF12RIhPoGv7SWbFxtYNgCiQE0rZcpEiqImiw8t7N/cvp6NnTkFp+Wbv1pNVsDq5aBG/lmT/1vPzFBoAU8MpASxaW2M7DqdGZaKqty9f1qUzMsvmHy1nCz/xtHG94TaIAU3SUMTaQIO5rHUyePHK9c9On7xFPaqENAIsTd7B0ZXe4PvjDzkfxu2ugaqY5t3BtSDcIbaO4+3VohrydULanR6dNfcNZCzsIIcJoshz0HhB2C6KmvvAIeORykv0cp55wRpCLYxZf+gbfX55v//R3YnlyXRxurd35eGDb0Y9zo2UImOAJF53UecW4v1LL9omLZpP+xR+yCFiq9moFyeDdmixN52uMEKNkFI2wOc89ClhxhpgHQRINNpooLXSQFNKlNR5nkJoMYYOAGcNwhgDqBullUYQNU3trCiywmgdMg4Il0IIqSSliGPMcF0LI0TAeRj4BqlO0kMNbMqGIh8wbCGwEAIEvMATpTVaCiM4IdBh66AGRjgBXAgdYRhIU1cKIeuUFhBhLQz3fW2MA9hpE/otY7VSje93y7wAFhgDgiBsqkobZa0CgCMIGSMU8+WycsCt6rybtCLPU0YBpTCC2jmnrXFKaWOcA8YZYwAAUiuEOAIQY2yNrSsBMPQ8LqV2BjhA6/qHj5wjDElepUEQUoKgA0ppDFGtasIgpdxJxblfiwYCiAEC1kHrhKw9zCBwHqUOAOgBB1GtXFNJYI11zhGntMHcY5RP0pJjFEcehoAAwCgJvDDPCyGFchYR5pyGzjHCMMFFXmFHAh4ZCACE2rhaCmQBAg4hZAEwAAWcr6oV49RBJUXDEObUQwivlrkG2gs8IW1dVgRDZS0mVCpdixoC5ADABFmrDXISSqWozDQhiFImjTW6gQAYVSOIIDQe48SCgNOyqsOQEwQJAVprTEnTKAtcFPm6VmVVqFohjJXSWhsPMsygaGRaNAQhY20cR9a5IqsJI8tVlhYlQpBQao2xxkAIsrI2xlAE/YAPhq1PG6bbXrqcFjIchB3IQuBF2tK6nCohMcBGawxUeu94+EXv1pWNJwdjFkbcgRBgomrggNWK+jSdplgZamHs8dV8efvzzy+KyuUqTEJMXCbm1cSwxjXLDHCUJJiHXlljoIVyEACEEEAWhJQam3scpZnVqaoX5ezx4XrnUn93q6oq7RGEOcBEaYecQoRhCByEAACtFMKscRZiDmFsXG10ZRxwVmNnDDEWuGVZSVTf/vnXQWIw97DDtfRJ6IfxWp6OxGICzENhakxZ1PddWaQHo6P3T65eu9LtdJvGAdm9drnzzV/71fG0BE5MB6sr1/t5drpz9SrxWqJUebnYuTxEGCYbfaXys3v3Ntcvz7NsfJa1OFjv90bN9LXr7f3upW9eHH00URczMTssn7rlT/+VV1LIlksPtV90vV+7+nzgEvf1O9/wfC+OBeFxNSu3w95nt7a/9yu/iJJkgNRGLywmo8jHWbEKe5tOgLMn59QRo0E5t+0rG7NFs7n7fOD9diuUcdIapxe7Wz2ApN8KbR1rw3d3dg4PnpAwwL6GRnOfAWhNVXQogxjy9nAlcgKqFo0N53Nho/76kdJ/57/7zwfbL8UHS3P+zaMH33ru9o1CZ5sb/T/4+rNXX3v56SGpWO+LP/7S0dvfw8Aqg8uRJEvXFe47X/+llz//H3gChQyJYeen/uqrQtnxUv/UH/uPfv0X/+vrV9fN5FFYfvDHXlAtEsVbfR7e/Bf/9DsnZzmQHFoqhIUEUQyFtohYiGEjRBACDFRISZplFsOzUi3QWDbaNG7ndu9i+nhv2Bvu08UC9YOIeH409L/7wT998Q3v5ZdvdpO15dnh6M5vfPvdTy0tQgjOucdRVa7ysmhEHfi+Qw5T9+XPXel4ACiJCUQSeZhyL8jLoh3HmKGqquu6Mn5sjCAMO4elg02dO5tbaBlhBFOpJSE+9cK0mS3yai3psoBJXQspEOJ+wBBwRVlSTKx1Buh2OxkfPZY13H/ukoGubpy1GkBjgPX8EELrTIUJNEY5hBvZJL0dCHBRpFA3AGohyri3A4ku8zxiTJuGIhz4xGjlTI0g1cpp1XgBBhA5RSj2SqMJRs5Cii0ACAKMAUTU07KiCDNMmrLSNRydZunqd25evrW1mdDEX02zuBcjiAFtIcvyxYphMBqdRj6JE5Jm0loY+wnSjmAi6gxz32EFsCB+iE1tYA0IAMx2N6ImrWAmIcFRfxAnDGZotJzYvAKR19/aVdmYJ3C4FTPujc9XwGJbxXWue5f8Dx8e7L14ZXN3TzRlvRKYGx6F1KrGFijGZ4dHcdyBnbjQkIZ8skoRpq2tjXqVUtLxAjJfpf3dS3VWdvvtcjHycNVO+vPZTKo6YFHAwoPjcbu93R9eleWZnJ5hrLZ2Nt5/7+Nnn3y/3fZiP5GpI5w737u80UsvDs8ffzIcxgCxwyfj4V6vqMtorR84li/t2v4lNKNZluZFWmVZb7fz1Z/92sGz8ycHF8M4VLk7vHOwuMhVWa5dulQvUwfQlRuX7r5zEjn/H/+//pv/89//Lw+OP1pOqhdvtNdivsyX8/H5+saVMj1S0rU7/vgCO+IYwKfPTv78j/+1yfHT3f11p2spqjjyPKxjhp2FVVlibLWCEEEAbNKJLsZHfzjDVH4QUMSNAcgpLRSDZHp6DoCL21HSGp6OT51w1tnF2Xme5iyKlCGyoha4qiwQF63Ed5C2uhutmZnNl5VUvI0xcFBKgJsg5GXmzk4qK1OAgU8xI/DseHZxMd7a2rz14gBRJJuC+4xibNad9OjB8cGz8aKs1McPPu71PaQbUNY2bWwtFA2dtYxgIG06nyadKEnaQgPu0XRVAejm09VcW9UsL924Hbci5kcWcyMMQAkNNuNuwDyPhO26rr0BBTpt8lzbphV7tVCZKNfXuTPIQ0prrZvK49RRv8w19yKIaggLjwQE8totoZUO86i3WxhOodjAa3VZtlrRqmwas/C5n+mCQOxHHT+IJK4REP1+tJyruoYUJx4pvV7nQuVeJ1pb312Mx0rJuq6sI6s0V8I9Ozr7zOsgcLnLSwEx8xMrQV00jnqh3yKmyeYzQ6E2GkD68O47P7J744UXdxaH71w8+ThKfKOVj3m5usiyGXC1k/W0rKKtnfZOoOpKayuk5cwDgODYEO5Jpclwo84nO731WpC8ceV0qRuBnCVWd1qd05OTbmuwv3VZBu2js7Nuu3d69MBKZ4GKWq3BoB+0wqjb9XioSuEg5LEvlVzMF/fvLjBfCyK8yEZNVSBnB/3W/rBlLDsba6dpjIPt6xvO88Z52W3zyuTDEPoMNtlqPjuDDLa6bRRQz0+AI6zSOx008Fk7nE/Lany2VFrFvj/cWMsMLrPFcrK8vH/ze5+cbvT0l1/eiLsJCn3TmPwEmFILKUsjnx5caBDM8inAnw7QtBH7167mi7LJy0VaYr/V3R1AHD158jT249H8uLfeHWxsGegjBXVVL8fTfifxwnarnUxHI1HlAcXdYVvXoAb13v7g/gePppN50disJhKZ5XxlpTUEF2UtoNm91uG89eSDBxY7VaXRWgKZ3r65VQE0Hs1iikOLnz54dun6TQvhcnWRLcCwva4reXx6ZqE5On6CnA3VXLmO8ml7c63TDQ7efszizvVXr50ejM/zZVnVnSHA3HR74drWelUnUuWD67dGZ6NJmR9MMh53L718dfP25fHd++lsolSaqlIGMfDh+WIZbmzSDoCOls5MVucegs28OPnog2uvP39w58PHd46++CNfUFXhIH3h9c8pRN9/9yNCvJ/+yue/+9b3taqLZT6q8xdeuiKns8rg/uat+Z2js9EFaZ595cVLkMJsdeFIMb1jtn/izyDPNJcTMR5x6er5Itna6G4k44N79XxObKNzGLU24jXOW/17h6XTNOrvz2epgxeWWr8VaeH6g74r6TLNk8FaO2WycePRSZ5feKE/n5bxWks34/PTRX+wyRkFLlhlOvD8+w+OqJ+QIHS2WlsLVS1HZ03c9tp9trxY1YJs9ftSpMgohByWhiDa63dRy4vavfnFGeUsAGCeZwZrUZUceR5P1vuRElnC/TRd+RGlPhHlTBgLnbUYSQAJ8dsR03IaJd1KC845BAA5IqQEoGHMQ0o5rYEhXhQrzYx1oqohshgw65xUShobRnRDguV4ymiQG/p0rMbLlMO0F/hBF0HfhGEoJJDzKp/PHbAYm3aHXszOqkI2IPD9CANW1zWBXFEebUZ/BBVJgKNBxwgBoTs8OEOQ9botDxvgEAqSWgNtkTKaIh2G3vnBNPBd1aBVXt1+8bnz4wOLEcRd6AV5kZejwjhoET49ke88FadNMByudwaubpp+NxrurN/58P5Mite/9GqEZ3JVWgMfPr5z+/rNssyaamkERzYJCXVK+0EgVO0xraTzuI+G/t5mTCPS7UcP3n/kJ0mY7M1m29Ko48dnxjifo43N7e/9wSeDNf6lL20tGjF+ch9oW02LftLxul3OulJk64N2o9LPfuGFjz5+G3vo+c2rpU51Wl7a724OBveeXPzkj/3o+9+9d+X2C7/74bcoQ23KR9nq0uamQ1Aa0g46X37zR3/713+LMzZ6dto0Whu2Wtnjp+fY98pp/sKtnTbWqJlEMMfYTfJZnPjMtCDs+n57PH23HSS9/ZuwPBhN7nsxF+UKWRx4kEFtOMpJkOm6N+hJWVGa1M4/u5jfXn85F/K5V784n7wNoamk7nTXFuM5bbdR0AG10s2otblWCjGaHLz84o8o54mmrEu5NihWq9LzkEO23W0Xxr744usP7z87PcnikJWNKzN87crrezGKYXRw/yG07ch7fjl77EQ2yedxpw9xx4+8g8d3bl65CuIYG9VI5ZyezatsVTSNG80qzu/+83/+H+5tDp+7vl3ntqLDElfXdm+fH72VTu4HAy6L2inmtXrIj54ui5U1WMMwCACdf+nNVx7fnT25e2+r0z+n3gsvfXGvlcDRSZ4+dc4SSpPBlaq0kFKrMp6EeS1XxaQdcmmpkzWkJur2g/iaczYIQJHOW72ov8VKyofrt4+W7p073w0QIJD1+v3J7ESaMltJ4vUZaz85WWGRW/Cpvm65ygjCvhfMFgtCsXK5cygvSmUgoggTRDnDiAihOfV9ylfpHCHY7nWV0pTx5arwQ0oZyfMqCEItVZEXjFHnoLHai0LfT1RVKGk9n1dNBiCOIp8Yj1AShQFwoKlEVesgoAjZwOeh55eiDuIQcWoh0hoZYBj3OWVCaIRtwBFntK41xA5TFHuRA3A0m6x3ur1eIhsFMYrDUDbGGmUMEMY1TelhZgHQziLoaW0wpQYbC6B2hnq8akQQxsoYLQ2EoFaVBY4zEqEYYJrVFUIAUw8hgoFDkDRKaOgwxRhCY03g+dZBpYyzxvcYss4BBxFACGmrrbYIQYgg4QxhBJSLeehRCjBQsg6jqBGSIEIZt85ggvMqp9gjiBR1GcchlA4BhyiliNRVTijFmACMPQgq2fCAOeMcBITRvFKMAgotpcg5AxGQdYOgr20BEaAeI5ABBCnzimyhjbLQGWcQIoQzVVdWGc58AwDj3ChSC2UhdFZprbXRGBrnHEFEWagbZVWlpUIYiVpoi52xeVX5nMZRYrRBkkgpGGc+5Y2oMaVh3NESaCEcgbWoGbBpmbfihIdBVVZAAmeRNWBepcYZQjEksKhqDKEQepVmkACrfK2MhQ4SBBBADiIIlVNFKqUQQhkWhYQQ51xRlNqaUlTn00mjDGesk1BrnDYGQFhleRyE3PMhhHlefno8gOK5r16x2IYtOL4YI2cx8ikI4n4HIebKssqXsQf8XnK0KFCUMOZBbRBHkNbZ5KJSsB30JpPyOt+TNgUlrZfp4ZORv92hLQLiJhd5A2HS2sgNrO2GU8F8dJ5gwCLCmS8qgAHHBFlZGldACnnQajNWrOY0ckMkP/pXvxcNkjf/xlcyEdQWOA9jQilExhoEobPWOUAwUUZDBJBzzlXOFQAKBxBwkiPYAAeh5QlVosZBAJAxDgIIq8q0AWZ6icwUsQp4XFRC5ypb5XqWVRduZ/02EK3pqlw+O/nmN//g81+8LoQjBPa21r77wdPcR198/SZCbn1zJy9rnK7S6vTJs6Z3eWeSu8vDPtnfePa7h4j7q5PV4bnc3A3T96rvPPjkYuQWhc0K12n7qbBe8nztr/W9Csb65ht7YIDPFwzH0AGwyuDjc8ln8Mt/8q98cP7JMjuAQrV68XsPj16/tX56cDFduWf68KVb10xtNrf2wuGGkmg6HXcuJVs7/eQ34/nROOlR0oucliSgk5UQihDQ/ebRqpJonssXN3f8ssmX6c5Wl4dO1jVCqJYlJgYT7BFaQGQI+5//p3//b/yv/pfonW/81Zc2nrzzTkDOnjw+h4wvJ0dRBRCSs/Pp5vpnz+dVzUYTsdyKfaD4/MKzzYDSYrRqFt/+lX4S7d18cZL5kLBsVVa2uP35N3c2O7/4z/4fR4cff/FPXT1dfI8NjLcRtwfJ3/xffPGT95788i/c83BPGVRpqWqrhDXYtNo0jkCUNG0HkZPn48Vwq50iBzjY3olW8uhv/p/eWJwcVVnxc3/9z9x7/1k6X33pj136l//tv8E43rrRSUdqeXa+cTPQJe4M6R+2ikBVN0rU0CGtFIKIUGKEW03SKPCUUHUtgtAHztVaKgUgxtAZTjAMOcEoK3PisbYftMMkKyvnZK2FgyAJW8oqTIg19mQ8C/rJT/3sX31899350f1Bt13nEmJKOZO1Ypgy3wdYaK2ldBt7m3c/flRkRRjEPAq1tVoYiqHnt7J0whAwUgDrKMG+H7tGc2atKYx1lNMg3ta8o1RNICmalZVV24sAABhBQqGUgmJoIXYAWasZo3nV1MAS7CttOMaM4uliEUVRXhfUER8BUdXEJ2HY+uKlK//u3/2bwaCNUZjmFaZBWVpCkDMuLbNyVYQeVhUQTkrTVFkGtOU4QDQO46QsR3VTA4gXAjPON/z2/Qd3b1y+Nc7r5XzezKp2O3AQKecfPTlc2+xf3dk5fPwUR5HfifL0rMnK1fE8SBLGvWLVOEP6wyEk7Or+1vzJo9GjD72Qb116NROkcnar5UZHJ2ESxt3QWtHbvLUoRdxK5hfHgLAG4tWqoA4stIXhhkN9U2Xl+BkildLFwYPHjFHG44j7HoB8sGFZ9+zpY2aWhMmkl8TteDivabvFPFBmFXakXlZBQ57efc/nxopVXQIHSdKOvH4XRtFqkUGjolbsoGt1BgAHlEMvwMjhyfmcQvbKay+0fDJ6dlIZ1rt1+cmTDw/vrP7Y9Zctb5SW4+MnN77wnLDW87qfvPdk9+ZW+8ZWg/DvffO33/jcZ4u0KoXG1NVCUg8mXb/Vb62tD9rd7jd/+9eSTn+yyM/OzpdZjRG+OJru7tyyJCOef/PmC8+e/oqC4PjRA44+fQGtUcI1UDU1psBQsMyWcdS1BhVVU6umNDpo75TqYDmerURhCXCU2ahfA+4lfjvCoigZwgaTeLAu7mVQ19OLZXc9bmZFPivjBBhURK1+GMS1LRpnd/auPL7/bl7IXGLk95U2oFFOK2s194PWRsdbb5GWt3f71nw2W47T3/6D97GBzuBWFFTlvIfX1rZ6HsXL6YxYbZwRyhrjEAAEQRaycjRNWv3hYAAhitsdwlhdGxp6CCBrgRcnGBNpHQv8fKmN5SBETUMpSXSTlfXSbxAHeJHVAIFGyW2C/HbM26yupOdHyhbAKmkNYKCz2akdqZHu7qxTW1Gjwm47rQrAWNAdEAsaBHvra8TAWhfQWuhAXpS2FrZwpqRWeVlTAmzCdqjcyg9dsUwDj8nA8yMnTLGsm+PDgz5VcRIOtvbLGmlDGe9QFi2XaUhMXedlaghiWsFVenH/vd/u99qzs4ccaiwbghH3yfHxeV4oR6Lj09P+5haP4rX+/unjR+0+4xxDDdMVyppSaSdL1WhJSYCRB4AgCHPPdxYwj2hXH18cJa220e7hw4e7V16OqD+ZncVRkIui19tc2xi0Oi2DHCbUaIMQwAgtV+rodPX2ByPJ+8/fvoldM8/kYlVSjHc2Bpe312rrYOSvrW1V6VLJAhCQ1FXMTF7BwaDvQTiZjI3Uw80dh7jFXDiGOSLOeZS0Yj/qdnZts+yX6UKvluVgsLWs5dFyNnl4/DSAL775yvHp5G5weOn6bcp86gW2EQ1WzaKZjkfOmpWuF41a/8PLs6zKj58eIMS3ru1pAepaPnh03lQmWNt+fHbcbccAyTgOL6YqSLp719Ymp2dr210cdTgLYTZ9+smT/cs7qypTuUYO2qBaHL+/c3n98UHlBf789DhdZmtJvFqNtZMAcOd3MSaD3U2j6/ZazDi/mOQd7hESpotprYvlMNtY75IIA0eKGt548aUf/OA7CAiE/dU81TXptJJ8fAFJJBoJSvN0fpIv87p0g/2bdMjWutHxnfdMGQ42oqqslczrrIiigBFKYV4sxlfWe+s3B3533e/vdl7vre1tiPnxb/zCv1mdzNfbkQTwybPxMtNrW3t+FI9Oxvv7m5uXt0Gjju8+ozi8tHX743fv7l/Z2b12PRufRa4c+NVn33wBKfsTb7zQjtm33/6gxHYxW9m00gY2SfLVv/xXzTe/5ecP7p9NalO/+aX94W5YfXD34tfOAJSRh+rc1qMmLxtjlMorBCrVLGnYcZTlZdOBXVHane1tmZ9u7ezW1VG+WiT9bouyT+5+cpAvsTWE+kHXv3H7hbN79wiyZWNZa93z/JODey+99rlmVqyqvDe4tGzY7u0XTg8+KISsLG1Us9vFHUy+decDa1GE/cnJQgnViWKhxGJcOgisgVbjojH7XlhX80JZT4RRK8maOUGwEk3dCCFxHABdZ75HOsNW0ubaNgDjJAlOxyup6mVWBL5HPbdYpq2E5VXDGeU+c9YVqup4HBMfAGCdAhAhCL2YMIghMA5DI7XRmlDoMHDYkcC1KN9Vrap0IQ9PZ1VRyJN5vYpUp4Jb13s09n2hXF37zEAE9/YH08WFMtZK0A4IKVNZKI+wuL3uMTqbLP8IKjo6Hn/mtVt2uRRCBl0iinpjoz87OIAoWFViOLCXL/WLfFY3QDsnLPEd5zQYrCVHxzOPDLZ7fWUcZB5Pumk+Hw7WDp4en5R5VsDNXvfZw+PI9yFCgtJCHHPsaSkvTtIX9gYPHx1eujT8Ez/+5if3Rh99dPdnvvb5fscK0JhE33z+1t3378RhPNzsPjtaUd9P/ARZWBfZ9HDle4lTTbVUSUgbQ248tyWBCal57/0HrT7e3GKjswPcDuJ4fZiY+Sx3xuuh3nQqzi4O4f6mBu4MgOFePDkbpasl4cQJvsqwsiVivcePx8bJTx78YH09IZQMIt82qslXk0K2Nq6Ml4dvf/iB7zECSafTmmeVHw///e/8noLw53/my31fLcrZfLREbmGhwQFotW9oHcgaaF3Fva3h4NbThx/53UtdD3HMfNbrBO2q0BKa5WJpPTtdVQcz8cJXf8bIizJPK8L39q5u7/T/s//8P/lbf/MGYbaBsK7UYpQC4DE/oVEgq9MsGxNqqlz4Gk9O7sKw39263LFXmuasKYrlQgKrUaXC/o2jkxq7XlMEcXutvz7c9IYnq2nZnE1p6W0PQOrOjxceSQhzgAbOtayV8/ni6tUfJax/cva07SfO1QBZTZXfa09GzZOn8/0rMeRkvijyTFeF4+uJx8t09DYS8zB2ccvUGJwdrlQ1DzbWSsoG65+ZX4yA0XmWj2fporQ/81P/8SqbfufR8bwus+l3y+UU0BoBAS1tEK4l4uHa1ctvnhyPhl0SJXOoz127dX781POI73es61m7Ikh0Bl2t8mJSr0edanYmU9iLPR9EplJzfWGEQB7t9DuLepA3CENWVwT+oePdGZOlRdMIAyFjWCtnncMUWWuoYcaAumy8TifmXsC5UpXnexR7edFwzutKamW00ULKoqytcsBajEkSJVJr7IyFwKjKGd3vbBIGHcLSOqFVZZpB0HPS+L7HE94KQmA1ptgaUWQFRTiIfez7TVUjQkI/kE0jtUIUQwohoswLHdBSS2UdAkCqphPH0KCy0EYBpU2CcFULCHHd1AYDQokByAEd+oESUgpDKVFCa6UQwIxxBBECwDhYNRJB4AcegNpiwzCDwGKMMEbGWOUshFAaqY1GhDhnGeFWK6MlBMhZp52xFmutjHUcYqcNwdQ5DRHQSjvkuOdL2RCKGlVx6hPOlNPaKICItko1krOAE6SUNVZFic8pAg4ZRJQUymgHgVCKIKydBpBGkQ8AsBDUQkqpa+cGw/W1IRidj5arJol50oqrssEGWeAQxggBq60UzjpHiWeAox7HxgEnCIHQQgdxVdYsIsoaBxGhTElltHXOIQgQglEUaAMbVStgWt2okTWnuKw1C7mUWqsf4jmjrSGUOeuEVsJIZy1jXOoy4L7nsdICJV3IQidB4WrorNOgVLVWkhAGscEY101d1cJaK4QqZe0qVUuJnMMYOgScQUVeMk4cwFUlrLJJK2ScEEykaJRsGqXmixWEKAx56PuUIkpoXRtlHMQkjELGiLWu+UO5e7XM+11mVT05WXRbvc2t9vRAWt2FDDdWcBqAWP/MX/yxzlbv/PzEi2JtnAFaIAcAEgzzuCslvHnzBfhxlh8USuPphajx6ecud3K9PD9KjeWXr38pal+rQdvHMaaoUB/m8n6WjwHCjMbqh5Z00UitmO8hwp3BTQ239y7b++bx8dHqYvGb//g3d7/0xvZLV0WjOPt0j6a1QQBBBJ1zyFkCOXRWypkDAjnjIFCmgM4HmrrGUA7qsgIEKB/VVRMGUcdry8WDkTiptMKo1xv2ytmqcMJpP+qsn//g6dmTVVaPV83y5//jV9/87O7awCsv5ot0nOz521/9yfKkfHx6BJpp0BlgTbL5eK5X09R3h7IpwO8ffHRpkb705Vew9D/6nQ9PjtON9c+c33nn9ETVjmZpnrRa7TV+MZo++vj8pT//M++/8wtnj34laOWrmf9oUcavPecN++1G3rjNynv63d99787bj4OYOaKfnU3iFjzJi+5mO9joHmemsLYd9y4Wy/2N9aIqHp+JniEHH33PErJ1eVunFcZg3tS2kAQGDoTt/Rd/462ve1gDj/zO/bOX9q+9/sKtQW9QT5/puigaahAwjZXICC37u1uZRd9+78PnXv7q5U2lVnfXt2ebW5ufwG6w1bn04ouj93+5UsVsZCs9rqZjfYHXdzYinxVZq7+5243XcjVZMW3SuZOpWLWxbOdQ9Pf3ssKVSsBk76/93X90/9G/+Oj4o6q1TIZrfNhbLhdJF/31v/NnHn70Dx4frAhoYwGSiFgHDMT9Naig/bv/u7/z8QePPv7w3t/7yz/yj//1rwX9SBSQJNVP/dRlBQ8kyKO17tvv/L5D/lk9+v/+s++LVdPd8IqyfHY0Mw13Gi1HxST91E8BIfQYdUYSxhGhUkhgDCGYEGyVJgQFfkAxFVpCR5QSQpnE86wDy6pux5GrZFNlFQEKIkSoA6QbthpjLAYAQWFdwH1ivFeef/O7b/9WuZhE1i1WuQFOioIJFrKgKUvrLAQ/VEVD6ZqT1WTLNabOgna3kQoCBoyzBgZ+KMWchVwrS3gPEK8bduezx0bXXhDORNnyOrLMsJVWNchZSnyjlTGNVo3vhYQC5QyhPsK0yCpLAYEkIZRCIpVURgNrAs5ElTGEdjdunc0eUd93GK+yoqrk5z7/elGvmkJQRmnM1gbbp6fHfuC3ulyCBWUe5SFU0iib+An3w9pgguiqyLRxDCGMyPNXbsl8VVzMN73Lk6m58crro8OLsqkoN44S6g0Z9WVVnY/HQZggQo8++VgIHfo85CyIu47BpNu9fPnyRx/ca+eaM9LUS4DYt//9t/7S33hhc2vn8fl0fjHhKj5/PNrd3cln463eHsgaq+t+SPLlSpew32mvVsvL168V81F2+AeqSJuQ9Ad9kKyNjud+LxGVUtbTol7bXz+cjPvcY84ihHTpxlU92Nr3W77KVk7ouB23nJqcHTgxQ36Lx626NJRxxOPh3vPTfBlRJoQkxGEMyrwWZTFLKwzQTrxl60UQ6TAJraqwj9NCfu+933r11ZvzqW5y3V/vj8cpipJn5+8M1wLd6MXxauXpL/r8re986/xsfvVWR0jrc1JXWTFetMMAOnxxNOvESdRKyqb8/Gd+bHwy8oIo4nmSdBDhT48PeOQSPz46era3vb9z4zPFxbMyzQB4CwDQ6kTzaWq044A7h6PWQEmFvZAhD2rSlCaKPIWMcZIlxBnUVM1qNuu2ONSOcaB1PpmJChg/0f2dNUwqPdGi8Ry3lbOwKqkHhS7nqzMrIaTmwbN3JBQA4rX1ja29ngOzdm8zX1Syzh0yEHGIaEhhL44GcQiu86pBj5+dYquskUELluWZh6MQMdppQ2BYGFiLPI8DI60ExonNrSHEMYCQ+56xwApjrQVWOYe0MRhBTqiRQgoZt9tNBaUxa90btvDm8+9QRow1mPJ5Od3c2q+L+vjgnJBZd2tDS3hwOm712hBZRmDUHiitIsLTorKu9sLA1NgYoyoglMxgmbRiliSEc1PWTkgMIONcCIGcY4yurW3wkI2ns6123NSLg8Njj7Uh5Yx5Ji2lKomQSMjp03fXX7iCPQ9AQz2vzHB/40pWN4FPxqePAJLdfqeuXDRsBy6YnZ+m5ydNOcHIzGerTrvNOAUaVLlUEjAcqlSScpkBb2f7ei5WVtfIGM+jCnJXOeTAeDqL45BbxrkLIh5FXYTMarEaLdOskccTs7Gxdml37/ff+3Yt6+du3EqCsJ10ut2e53PMsSyFakrGPKuaVV09fjw9OMuE4i6KxnUGmqyoawjlpd1+HNHMGN5t8d5usLNu0kRnS2pWw3V0fnh3betq0upao9PlDEiJAsA9Jg2SUjlVBpxDBhthpLKU0d3re9KFG53BZPxkbWvz9Jn3ZDT9+ODJu7/3nas3n39w774XtPcvXRdaQsqkplF3iGCzyNJm1RDM6/rTtUGnF0+OT5PWIJ1cAI2J5+1d3Tx6egaMmhxdeJubJPZ5wi/ffi09fLxcnHXW/SrP5EoBL+pvX/vM4DPtwZpWRdJDvmuePbxTVrg+m7bCIMEZbKQQKooDjTqqqZEl9WrCujFLSLUy7Z4fJL4oMyyWu+tBJ7jy4TsPEUTQ4aOno85gEJDo7v0jP2hTUq3yIgqp5zkvZMtp6Q8ibeB0tJBMaqI9XRw9/MTU9fal6NJ+7+73Hl174aVl47qtyFp959Gpl7smn/7ln/r573/nmxEFIptrGiAcnR3NPchffv5z49mFqtImz6ykqUltdRjEk+0rG9noKbQ4Xxbbyf4bP/ajP3jrYPfVN+rxTIkVtuXibLI8Xc1H03RVtDtdW8If/9Nf+IVf/k49q5QQscRhyyzFUbLZDptrvFndvrGbVSD9cOKFnsiXwCqVK4y8pM2krU0uivMx6zCvG6xfvfrw7kMO/Hy2DJLedDluB2R8sVikjRe3imUxfXQos3klRLqo80wFh4sX3/hMq8fbgbyYpecn0+He9V5rN5sU08np2qW9s/MDV5PCcwI0PEx8v5UC11/v/uY3ft0i9vyN53da8Gi8KBZNnc6IT6wBCHmLSU0I7m/01rajspKkIU+PT2aVhzBFmElpAHGdOALE+a1WnhV5nQ5i1vL788WiTs/bEQk8+tK13aWgNB9zaBhCVsu6yjj2GaEhB8gJj3KHiJIKYIQ5Ek4iiAjGwFpgiQMEIeogwkg4oyG23S4ImUuliEFTIRD4WCGblyWG64FrmrJw1oYc1407ODnr9YMo6tTpCFujazsbz4tacUwU4TEnfwQV/eN/8s7z35h85c2rw52wHDUeteOTSRDGAPKNrWvn54/HFyfT2ejmjRtNIzbXN6Emaam1tYPBwNSwkgpxUq/GXhDGYfLs4eyt752vMldUqDEXnRZXWiPCptNRtxsHHO1c6iwKdWnvSm/Y+8G7Hy4enE6Wxdb1raAVA19QJOtVc76oFI0bqerzJSZe0tkQdXXwydPhYDPPVoxHm9uXTk4e+wbZ2tDQm2ZZVqerSgCM481d6GZr2/1yog7uHVx64ecIX5PI27rR7l25Zpo6z8onj+Y+SVrecto0169dOn16bJzd39/95M7Z4fkUCuEhHylsrLLUIcctYKXQnR5pcY5U1etG01GlMukM/Ojdt+tlevmFz5FgtygeYlj314dHp6e723tpNmXQCzxqXVGVDfOwI2hta0s2y/bWraJuLNLSmUo2jdV1WcLK1JPy+t6VyfHDdkRsXX/+c6+ev/f1X/7+wx//yufPx3c9qhQQndZep/v8yclTq5umnJls0o39T+69m3S2SRgWRRUHqhLz5TQLAks5McglSVeWVZ4XQetyd3ezt7k/nc8ZTdPFR8vxgYd0b2d7tpTD9sCzKi1HwMqQslF+JnKbhF1R1z6H6+vXoU4ODj+IOjBZ7967c7cU9HyZotPJ9qWtOKGyLGopQ1QANF9MxhBz5ntQAKgEhUvSYJN5UwWtkBElQC1iCo8O7i5S+50P/1k76UVoKed3Ka1ZYCEFqhFlNZOOi5xxHJ2enEIaWQu9sLUcHzEC+mubYRgg2K4t84ONMptiSBjvNoJpk1Vl0Q6TyGeggQZ6yIfQlaYSgIBqpSfnC4IJj7wb1/Z/mILAD622hJFGSYig04oSHEVBI5SUxmgQxR4wEEOQLuftJMKcaOXacaSlWq1yoRTCiBISBB6GEFgc+oHWxhiTxL410Fjb6nVGi1RqG/gBEMpCksQdHxMp65BRYa00sChrAEySBBTjHxp8oNUYI2OM1spo7Zz1Ag9SUgoFrEPAaFlThJbLjPncWKu1AM4rygpgBCtLCDHKKmOCMLLalbIOGLcWGAuFNhRBBCGBGABsrNXWWiG1ccooRpg2EEBitLVGKy09TgGGAEEhBUIQQGgdaMoKIaSgggASjKwxjGCGCaOecxAaC6BrGimNwRghgITSAGGVl41UzllKCGgUQkhroaShDFBMOGMAgkZqADAAUCkDnKmqBiFQ1AUnvjXW45wSFLIgzSoInHYOOEcQIh6XdeNkUzda1DV0yFlTiRoTyDCVWmGACGSNaoSUhNjKNNI6SplSykGkjSMElWVlrJVSUkQ5xpjwpWwa1bTjiDqMIYAQiqaEwPo+y4os9BknTBJgnbPOKmWWqxxCpB2AwBCIHTKccmchsDYJQ+iAVBJA4HtU1FWjm4gGEBEFpIXOj8NFugoDTghvmvKHaEw65RBQwBohnFLWaqEUZYx7zBpZV9ZBSDCihEHgptN5EHuEE1lWjPCgGxoAfY8hCJaLzFrgexwTjAl1wCEMyae/AkA9fHq2UrBu6pz3m9/7vce+3Prqz3ytNuPj6WO2ExpKDULz5dRAWhXKGoChsrZS0Ho0FAKHgO6x+OzkUWIQBqjbiRaL4vG9xzzWl2/diOLNUY7jXtSNO76V04s7n9z5t1de7bXbJq+WDYxZZ6NIZRAyn4R5JVUuKiWxxs3Hy7f/h3vd1nDrJrtIp+/86u/UOtu4fl1x4xkQeB4G0ELEEFNGIYSssw5YDS1ADDhobeMQ14Baiznk2BSBBxGmAALKIAQIAGUDZyCmjuvGzc8fynkTmCCJbvzav/ju8rSBCDpiPvPlzcE+1UCcTso6S2triqXsd51DbrC1jgw7XSyYxYYxA8gbb17hho8JillYTh9eG147eWfyyqX1LqYPfu9gOaPPX30D+PC3fueboBTpkYgYO33w7tl//5/Mlo9RPTcF2Vq//fHHxw+efn82Wn32c6/zdb84LA4+WiLdaRqdiazd9fZ2oqpu1vtry0W8FgRE05OLi34vvPf0TsT7+5f3WnH21rfeK8oCN3Vo5KJIF8vC87xW6Pcvree+S9b6+XyELLYAj7X1N3ZPz2dx2Btc2WkOprasmmJJI+qgi4ekaQxA8z/5x99E/Nlv/s5/u3NzZz/6cq+/vtXZPXjw8PEPPnECbl/f85P+Jw9+E9XtSuvg2tZFRrd5EEKuyrpqhDARafLvff17e5dvsEusWIxkyUU8pIw1FLrO5e9/8u1aidutBFSdyfm9MrKDivy9//rPfeNXPvp3//rgK69+ZjZ5IsrFxm700htxWthHd37xWx9lfocmL69+OhzcfuP2P/kvvs8RrpYzJOLl2dne1ZBobKDtJHxx4RuPrqSS85q02uPj5sOPZkCiVz/33A8PyVobZyz3QogQwaiucgIhIMAiVDUNd4xShgDzfSa1ggAFlECAIfLX1zvlYqxE3kq8LFsFYegxVmYpQAgxLETZNBJhrgKz1m0/ufcDW87Xk3g1XTlrmEeoZQThrCw4o7UQVjukFQRAlnB90EeEaWXqulHGWG2htVWTN2Ua+IgggjjW2lErl+KYICCdKcsMkxgBFhMLjc7z0vcpJkzkGaYQGgywdtABqIqy1g74JCAACi08hgFUUcARhkIKjAD1fSHEeHXKPJ54rVWZYY60kQ6AMIrKquZ+tFwspLbtTlzMVw1S7YgvxjNVNXtr646FIjPMZxCx0WjsB3BjuLaazLb6fVQsx6ene2vhrIClw4ZH84vl2WjU3rxUFPnWHlORT2PfZ6SaFVk6ww5oqUAcvfL55x89O+utdRutL0bjjWGHGDMaj/PFbP/G5T/+Z39mVi39lb9zab16usia6tata1me7V67kqfTcjEZHV5s7V9driQhvDR4uNbOpwfV+AhUImr1dm4+Pz85Bxb22jtRyAop6gYPhsnh0RODmAlbc9Fc2xxWhTC1cMoBnDSaAK1h2RBTPrz33tUb16fz7NqtS00modcqaXIwOscOYSJVVVeqSlq9dieuZB6oRDTqe3c/uLKxUcxGqzxr95KNva0X1l7KSfdzP/mTFw/eeevrv/4nfvZnPM7ufvub9U33xR99c1GN1rd7WomNrf7j99/Zv7wf9IfUo01dQ+Sifkc0eRAFZ7OLazuXju8/dY1LT45lngeR3d3rjS4OX/nc8w/OzvYuXYGFGJ2c/qW//Kf//W9//ye+/Ppy+qmfospLU6nxxUxr0DRifWfLWcl8QD1myrKaXBRzxLDLMyMasNYbnq5OAg6BUdl0mRvgcSq0rmXTaZWtnslmddJiR+dT1wloHMVJH7paKyXKVZ5XcTf2UOC3+pXMqmqqm14UeVVVSaNqVXJEEbacoV7AhcgQwISGDsCtzV5T5ouT3DqNKRK5AahBPOh2egJZVRvTGGxtXYu8Lvwo8pidz+btzg0AgDaGeQwTLmpLPGu1UlpFcVTURcBQMdcobnc629NidHx6cmW/B2RlCNjY3EzrXAFKNbHG6LQRUhvmKchbAaMEaYi1g6Y2FFigtRZGNRYgF7VjXJmqqNrdpJ34nAJJLMLUWieNqYVyFjJGuB8Kk7VbfaXTqnaB33aWhkkLGY2BCQmyGOxsrA83B9CLLfaWyxpSiClWTgBUj8aL2XLRavlAN0F7iD1fKcIYtrrBlCKoAULAQWudNRgiwjhH0DEIgNU1QF9+4ys/+L1fwVhDhDAPVkVWroq6kdrZxgEHwdpwq6qWVZpaJa3TUEsGnIN6maa1qs4vjnrdDQ/z3qDjIASO1I0yWiGiiQdUo1QjDp6cvffx6UUpLKbrgxhicj7KxbJZw+JPvLSbirIA+NLNW1PBs0Z1+uuukkYthBbDjV1LQwWwqoQzkBEPOeIFLaeMU9I5SlngsbYnsF1oyqDvw+liXmZV4GEpst29rfX9nXYrPB2d1ePjRsAf/N636JdId3ODQjHsR2laES8MKez5ipfK1c2nAzQJkk5nMp573AfGQopuDK/KbtQUBnRCxOyqqNdaVFfp+elJt1132q3Z8VkY8sVyura7pxSDxq13O5NPDj66f+eNH/1yU+ZJCz15fJxVqxaNEEOns0nkE4IcVLVNZ8YBjnml4cOPT5I4MZVtbcBOj7HQfOVrtwMMVxcLSCnHMInDVdkQ5BFs1bLcv7r96MEBCjBI2t9/5ztFVhM5y5p6Y9it5pkuzaAbqyLFLNAosMn+9dc6x997K1+O1jbaK2sH4c533/p90Zw7613bv6UwOT09HVy/RiHUyETAKR89/8rueDzjyBXl5OhoKXSphPW5J6TS1fIH3/gdRQabg6tPnh09e/b49uevv/ojL54+O1pOi/2r605URVY+vlsj66wG0iHlI8icWlzc3ukoOej3np+N54CC1no4PzmTi7S/1aY8LGaz7GLqEDdK5cuMyKADu4f359Wkigak20nSpigq0eM9A0hvOEjLzEvCsCDnx+lgf/f2yxtvfevDupCnx+fXbw4PH31oDb370SNMk47veT5rJ4mUAjmaj87r7MwYFHsx5aoNzb0PfmBts3b16q03XqlGz9Knx4WugIbAAOoTqI1zJgg87sF8MS/LcjnPp1Uaku5mp900K6v13tZgshJaydU0NUoTFmBEjXVamaaQfpsBCIBvexEqSqAAmSxr4xSnFOYOQxMlEXIWqQAiIISEjQSRbyF0wlgKnNPQIeesMMYBa42FDlBE4xARAJdToVStFQwoFkYR4k9GE1dz66yycHtt++zslGJtapOrhSOqqBpRScixrXS+TDf3u3En+COoiMr2/fv1vUdv72+xL7yx+6UvXzWqXBaNrLJcPdJ1PbvIkiReLorBoLtcTFnQwh7FtXCysgZkZbqzvRHHwIDmO999+smjmVA+ojHgAjMqG6WdgVCHETVGZalSWhOE7t1/66tf+5FnT3xCIIbqZ/7sl8dPRuN5trfRv3apXziwv7e1GE3LOg28LlUmX82iDkgG0o9tntZxq5c9XBoR9vp9yEyVaRp4n39hPYz6RZUFAQoZATR87souCgY8bjXCKgop8ijh1G9DxKUqyuzYltrkqXFNzLzJxXlAQSoWw812MVJEujRf9uLN81HajdmVjYF2ZrKYdeNLAQu8oGoN43wlyyw3SvzoV75koHamTEJYaNQOWpzYKGCL2QgIn1IQtVqiKRjzV1Xj6nHaH2II66oCQBNneQiTFj95tLj74eLFl7f2fGRkQ5T7l3//H10Zxigc9m7dNj4wjdG5qs0MoJFCjmPmO3uaH4+mJaZhXVulCuuoAefYz5xFs2nuCOoNuzagQrKdvRcDwlaLu1oeXUzO1pIQ6VXLswTD9Pwc+4mUS1lNs2JhgFvffE7nRWvYAcY7evQAoQjqJi3Ctb3LdT0qZ3Mi7EYnugNdHPco4atM+kDFSRBQVWqDLSGElYXRkkMUx0HDgBTjA4ACYHQFLcDlKlsx7jPIm+njaXrAPR8Krp31vEgChKjX7Qy1qGIvKkth7cKh3NUjperBNmxH7Nmj8yiIkqhtG+h7MYC+0SCXhcZQEdvZaDd10YpIqZ2zBhoCbdzefOXw4f3JosGOFnUBOUfs0xQwz2tRLEWtVjavCk49AJw2zgKbJBGhzOMehEgKi21QpspYixnC2MeMcZ/5SdjUdd1IrWWQ9CjxgNNNXQPLpKC1KD3OLEBJ1MqKrGhEY2XHjwjCABvu81pIbQxBjBNPGxn6PkaoriQHziKrhdbGeCRSUilnfIQ4+aESCTBGGY6LUjASKKMBMMgpY7EXh42SBiBrXF03hDHRSIwgYZ6l/mI1Z4BCgKFzHvcW5YpZBq2ljBshEcKhRwGwTVNDDCLPd0YSBAEkEGAIIMaEU1qL0llgnVONgtYxSiD9YfEIQGick5wh0WhGWakaCDBCyFpLGP2hiphzUjUFIb4U0jpFOSGYWAMQIBCComkchEHgOQswBqt8BbWDCBASQIcQRFIbBIlVBkBojEGUlEWOCWUI+QSZuszzIg4TKaQ1GmoCETLQAIScdbUutREAE8x8oHAjFYAIUW4xEao2DlqAAMFZLWKfW2MsaAhEzAucMQADY4wxJgwxMqDRthXGaVYWoHAQQugoIdpY45xRBlLkEQwsMNr6vuesaxplDEBAWwCt0No6IaU2thEK6kZb7UcBsDb2AgRBni0aIS0AwqiiKLXSGMEw8hQCWS6KqtZZGcRB5HuezxGl3TAoynK5yDDBUprVIiOERCEKfC8ralnXWlsAIaEkDDzOqVaiabQAQIpP+xRKN+cn6daVloD68dn86f0ITfN//z/+H/+zf/h333juCx8dvFWQcqXH2qW9/mWrgbSW2hy7BiCKsYe0DPmwfnwC58vlbBEHcVnVbI3tX9uXNI8GUa1cZ2+wd7337OP37nz4nTo7fOXL+wI2Tz966rizlDDouAuUgIpGECA/AJ1uCAT49r96G4MQhfqlH7uSfWO6g7zZe5/M3v3k1puvJC+9CAUgnm+ts1YBB6DDzmkADFAGUKqNoYQDzI1FCGmE6ejx414v8teTpqkocsTVVTbnPkS06wy2Vto624p2zt8Tv/6NP1jNBLaGe6C3B376P7oxK7I0K2jMvWE3DOL5PFvM73d7nZ1Ltx59MvWgckSuxARgcXT03no3xlQA26wxPv/kyezdZtC6/eQ7FxcNGl6Jfv5v/zwNNt755DtrIQGkuvqZ3Suf38090d1ea/Jqa9B+7+QYz0fX8HpcU36SZ2cjPwnW1/hnv/Bn/w//5T9ECS4h9MJIV2i+FAbTV7/6U8uDd9Ny3toc9LY7kzvT2ePzj6cHO73rjyvwtR9/6Qe/9fvVshhEHRpxB0ADsXTN53/0tUcffTRZTK7tbS1XcjQab/c9yelxpfgw8UtPutxb4y2KHp+ctDZu3vrsV9PR7GI0rbJF4u188L2PN1s7uhH7W4NP5BJBW80rVc09Hr3xpT8zOpoUDet24j71Hr5/L53UF0ItTPWn3txJz0bvvf39n3r+8sHR416yM/3gmynge699/uGDo2xRXbrMmBP7N/ays/eUsuPVjOq6t07+5J+78b/52397cf74F/5//x8Ci41W40Ta7USXo7izFkpYi3Q5+/DRqxttEsQeWiQhavWwKFOCjOOQKK1z5bfZ/i04O8iI7VZpVJzTPG+a238ITCnBCGkDtVIQYc+LoTPGOYpgEHDoEIBQO2u1tc60w0iISgqBMIuCpE/WxicaU+oHIM8KiD2PsEY2YRiZpmyU7oZ9q5CV1guoVUxWjYWAEwoshoA4DQlmjDPoQKNLP/B4yN75wQedsOczDLGzSmBEQr8lqlQ4sd3fXaUXdaEBksRvGaU4gxhjqJ1TKOQtaBvRpErWACmjjHOaMgwA9LyW1kJVNaceNNojflMtaqsNtJj1lbCe72PChFCUIACRNq5aLn1Oz0aLuN/VylWZhE6vb/VsV7SSANeiWM3J1rCDaDadY4CgmfuUrqYTJW2/1y+KXGjXDjzRiKYEvU67FFlg/YB600I+PRgN9q7Nju9v7693B53VYuxhtDp6GMQd0AoNRZmQtN0abF6Zni0vLiaPzi8sxWXRYMqxtmvtdrHMum1/+8r1ukbQ2snDJ95oGQXdRhqrwWq+5K0g6kSn0yPq07XWlqpqjrXfQlSDFtPPDu4xyKNB2zheFDkkyPN5f7j+4O7dp2eTuoHU7FFK2u2ot7FdnwpppFQVQpAzItLMApDEoXYuXZy+9vrnpquKIigLW+TC77evvvTaex+OI2KjiJ4tMgTUdDrxAmoQvLT/ytNH7+/2O9uDbubrk/FiNNLhaLECrZ/6C3/+0ccP9bLZ34jLxZT1Wq984Zqzozt3nsBCF+n8+vXLBx+92/KY32ljlS3GTZVmUdeDGDRFXutCAn3pxRsHb791ZXcrz/L5NGt08fTJ0cb2oKjIwaPjCKkYsL3Ntcnq4sr1S5N0fP7s+IcpOD2cc8Z7a8PTi0ljzXSa+gFRtg4cLJYZZcRKPS6rrBSgobPJfGN9vd2JOEUybZSm1vhC1pTGp09HRbminpfWBQaQ835RizTLCLceonVTxaEHrFmkIjQUEb/b2SS0V5RFUwnkMHdrHvCVaVxVAi2dElUhaK1p4G12ec09pPvLvAgDr1aiO+xVpZFNJRA2WjPCy6q2xgADdS0coRsbbQpSBkLjrFPih2tsjCGjDANYFyXFTgg32L5kAi/y/EN5OlhPPD9AEBVCchRsbV16enQwXOsDChvIMAZRQp0xUX9AnKuUIhBEAZYqo8Bi7OWusE4DSClGtBW3+l2fI91UGELgjDZSqEopCREBkGhdE9/nTdzUK8oCB0RAqIXWAhFTVsJgheHzn//pxKsQXGBnGKkQNRC56cVMKMUMClCSr+ZUAspwvcpZux/328181uQSmNrHDFlc53UjDWGhETovp6TVppRSiuMofuUzN3//O79BoQ+YjyCCCPQ6CfNZ1shilZ5ZEsc4YFho1FtfC6h578MHpSYaVp2ktdbvv/nlz1pNJaCe51mlAoyQrLJsmjXm5Kwpa5kWZUOQJAYy7gVA1Au/4xJmbq4Nl8vRqmaQ8Go89eNhrgCCrW6QVJIzToFYGuxxP3ZFUWel0kI31FiPssinzEBjja6NbRphEbUQLpf5xtpeFFAhrDJ6Ml94EX3ptVvX0cvT87KcZXVWPnz/k5vOxC0jRO7HvlAESNDyMQO2Vu6HKRhsdIUBfq+NtBG1aar62f3j/k5baAAYor5nEHbQZPMLz0eIallU7Z5HUFNXYnx8dzbP4/7W3vBLH58dW+xtPP/Zo8lHq5OHW53gyq3N2biEGDyb5ZSH+aoarsUbuwNTmfNn0/PJIhnu6rKoqjKfWqA5g5ZypqwZbu9UZSpFftFMu8PNxUk2X6ZVVd9vxvMxJBYa2NDC2Kzp7W4O/ID11l27uHLtWpelv/HL/+7Wc9faLe/Rx9/Pm72gUaAur165uaz9o3vHvd3u+npEABKqTFdZp+MXs4skptl89uTBk+deu7K9vx61wm574BC6OJ2VmdKy7Gxt+UlnMV/sJoO8CO59+FSW1aXnXs9rLJa6E3bH59Pt/fUbe5e/853vU262efTW2cPWwKcIUKhHd58Nd9eRl6iNbufy+nQ8cx5qd9yySJu0WArJMM5rt3PtpfHRs1a7Nc/T+UJtbMVRspblF86s2huXQh05G1Q6lxhdv3b70d2303mxfeOSCaNgsNYd+meyUtqcn80wTxqlqRfV07m/1l/b7gTKG08XDhtDKoI7slQCZg56lCGCgqs3X995+fln5zO5KqLNIZFJsSrW+0E6mUVdb6JUXjd7/eFyNluk2gjTGnTb7VbE2fxiKRu5yESeFth5PPAox8j3CcM8pLTwKeRaGkTQ/CKt9ULKxkHkNAMAM0YYxMo4OkMcopi5duxzr8UxsAAA2mCkMYAOIgSBdsYa7ayVQhOECYZGAQRdL/GyrAbG+Mg/XkkcRiz2c5P7HBNHV3UTDzdjP5D1SorZxcUY+52IIJktbJ711ta5yBfj9I+govZaUhW1c/TwvHn8P947OK8/9/r2WhezXlCLRSeO6qrD4vYonRJmJqN6fasTd4MgipezKbIo8DtnJ9l8lj47XY1XJpNe3Eo49zkJ4jhkBt159MDzOaJEIQIZ1BCubbQP7j+OfXBtv48dLjdaF4enyKrnnr9+9PRMCrl+5bKtM2tEv9czEtz96H0OAkXoo+PFtd1WJNCjB08vXXoBAAcI39wdQHZ0dvB4c6N7eDgDPoLEXozmIeoQGh4fPFq/9pznJ1la+ZRihJxDo8kSmmndlJ4HVqtK5A2EdVY0HqEMVpe3N94/XVEv2L96SVqIiZLKVFrsrK8tsm5TFaerRRSztQE9OTv1esEXXnq5WNGnB6OXduPxbJ5bp0F8OFn57VgiPVnk3YGfUNiUTRT4/UGfUFI2qQKOJh0r1PT0EHjs3e+NlwWdqfhgnP/knxmu5md1xQ1j/evPrV+7cvzoPvJAGLc8z0uzrMLHfnfTNun4/GA2v0BhO6tI5MEgCgxARV1OL0ZSmv2r+3m+Kpps++oL/cFaNn6MPEvKMp2aK3s7ALnpNLeYUMCLIl/Ml9vbW9wZR3ASeGV67mnNVL6YpWtbuy++8iNf/6V/O9jdvHV7971vfdBUCy+kjx89sXXTZYS7mkG41k9Oz+aTqvEQ6Hb7AAFTlRpRjzASJACqZVkEHCrniiyHVlRCe4Hgvk8CrxZNtRinBdja2aiyNKs0Z6zvr+m6Uk76YRCHdufq3ifvHWxtrZ0cPygI0CI+OZwXLeiQLaGxBmiDras9ypGtKPSgzwohJSSTxXmvvTGd68XsQGcVsqabRKwCnYgvZ5+GoREKQWgdjZKWF0eEMi2ksTpkFAEX+cyomnJPypqTWAPIPD9p+bWUy6xBlBHGkdKeRzv+4Gw0biWOcdDpRFUu66Z2hHlxUtelsaARAiES+T6EyALj+8F8lVKMoihEhEloCeASIqg144wQ7CAUpubcE0ZBipkDxiiKfWf0D5X4lHpl3ggtBNbtduw5DB0cLRdhEMzzgmAY+35eFcwShljlhEPKagCgxhBJpYV1DiGhFMceRtRAVTdVGAYAAE64gxZiQBnFiAmttXPAAudsrXQltRAaIBd4fi0rzJjWyvcjgrGUUktFMOaMVY0AiADgiqr2OAcAUUwrUSLPo17QGOsgVNDywDO1oYA4B5SSzqggDiHUDqIszbRWwDrKcOB1lNLaNEpLTsO6ko3WDljkTMAD6CDGdq3bzbMV9z3jjAWuHUVGG6UlQcgB5BzECHA/KJpGGmGtpYRiTKxzWVFwjq0zmBBlFWGEUGSdUUo4ayHCQtbY485o4rgUJk+zRltIsTEOAcopgwhDCAjGBOPKVT7nHuGq0QA466B1JmtyQr3E9wMvyE1mgeE+owA6ZzFBHuJaG20MRkAJVYtGGe0w1tYqo62zBJK6UWVT1VJIqxph4ijGCHfbnaKq5stlmmYIQI+TqiiiMLTWKa2XeaWUJoQoIKlHIi/Q2ohGibrhoVeUNTSfGrsQsnv7XYWKG59hskwO78N1GsYU/Ov/9//11mu3dn90p8wX4cBfZKnTZW2g54WNgF4cZKWxwraTtQ7Z/t6/+oUNyXqDlta200me++nP1tSdTXMXqkHLnNz5nemdX0LSQs/52H/2MfN2X24lO8KeUzbtIJPORu3+etAJZscznU/EUf4r//BjpFoGYkHxx3dGSvsWIVeh2GsO33pb2WLz0rVg/XJjMYAWOEAwtRo6gBkhwhhtgTVOAQScgQgxRj1rgDJNpWrV0IAqKRUAonHWQCVrDxjPMFrvfvcbXzcS79/w4w549au7Jcku0lPEeLzdslFYHS1xVSGRr2+umbS88/Enjlk/JrnM6lhU8xWCYXau588qWIP8SYbkpX77uY3t5z/3gvvnX/9mO/He+e7v3j8++7Gff+7R7723udf9zI+88t3f/caLP/vqu+9f/O2/9vP//P/+322/evlr/+nPvfev3nnjlc9G62j+9OzenWnkDf7Zr/5bfxhmVYMEfPxo2fXZyGRZmkn+W0MMElSL5TweRo8PnrEkfvXm5TvvpWePz++QechJyyOTyeTa1q1W1z94CuvRWC9gYj3kdz1JEghms0XQ2xQ8We9E2fEhw7J1pXUwerKxOXjxx54brcLzyceqLHd220nwY83cUuCNlvc2dq84OZ0vjy/3hvVCNWpx/dLXAH5hMft+6MGEleePPzbljEL4xS+8XrcSUL/XveLt7u2cjO9cufGGlGCSnw6GLzXN/bUrzr+2+eThsS5n4PDb3cEahh1oPUP41svx7z76xR8c/VKb9+A239ts3fvg4WqmUjhvbYcaVncejLavbc1P0ngvzmt1nJatnLW3B+NDFO8+19tEn7z3rmXB/q34lTfp15+cjx+x731HYuxDYL/xu4/+MAUQQkCQAw4CoDFjjaiVs8RRjIizxkInrQYAQAfLprZGUIh1U1fZzEM0iaJ0mVkCMfHPx2k3dAi487OzJPIDSjphIq1rtCzSGiFUFZk1kPgcMq+pinRVEi8QFGLglBSQ2fHFUZmXl668lDeSUkQdskA7Y6QW2piKCEioNQZoQDGFSCkralEZoynmdVUkMJCyUVqHgQcsgNivVQatM1orB7utvclk1m73g7hvcmRkXleFBg57uBIlhVZbRyAomjJMgp7fmYzHNIkdD5Vsom5IqMuLqlyVVb5o6pr7UZXn0ACfc9lkYdDCWlfLC4/HyyKNoliLhnPEQ6LF6nyZOudaYWu8zHvD+As/8vJH7z4a1by77rf6G2cn43i9de3G8JOPTyOESYR7w56lsMwlAvbFV28fjS76a4M4YfNpXTdyMpo8ffz0ynObQANTaRSgW5+9fnYwWp4/pFHYvXIL6nQ+mytxbJQ2TeUzJhuRp2m7u+ZT/Zv/07++9ZkX24M9GsRl1ixXhaokTsUErMpK3Lx8pdXem1wcb+0OsZHZxZPIqvOjU8bCKBkoIYmHu/3WJ2//1suff83V6PxijKPW+WqkCW73dvHwueNxU2Yzv+3P502ShFrJunZCWwLZ0eld7Qqj6odHB5RawNxLt27Mjk5Hy/OtSy1m1h69c+f6rRu/8ZvfvfGFVywk589ma+trg71tjL3d526dPXl259Gzn/vzb+RVwzwQxtxo2e+0ju882L69l6UzZODb3/nOK6+9Ol4WT07OPv/mzVZ36HkozayF5Mrlnq4cYt4733qytrs/7OwU8lNURDmNBwPCOC6UlvLZ9Ojatf0rl/uHT8+8xKMMjafj4d4uzbrTk3kUcMTJ/5+q//zZLkuz+7CdTz53Dk9+3pwqV3VVV3WezDCSCIo0LEq0/cEiZMCG7A+GDQiEIRiEbcKWYZuyCBGkRNIUyQmcRPaE7p6eDtVdud56c3pyuvPJ5+zsD90jYP6IhWuv377WuojT4hIA4lV15XhdKZqKc2PgvJQhconnb7c6JW4POkznpxeri/7mJrTAcQiibDUpjNG9Yb817FZWx36Eq4xCg6z23XYuplYpaxWiJgiZAQqYsprn0NCWF0znmUJwLmtt5t24XUnud+J0vpzOSy9u06gVOl7dFFLpcrLUAQuj0PNJU0ihfQFJ2O4DgM7ms7XxphJa1sKJ/dgLTDOvF8+u3VzXjZ7Nsov5fG28dro4CaJIaoEQcygRTdkYuzboNVUZ+AGGCGKLKaXIxxAYSwzTeZ32W8Fmt316voTIaiEIcRAhvKyMAUpL5jjM8bUxDVdc6sW0ktzQVgsxJRqhZGONtABwKwdrvSByUbOq64IZC3CuaqO0LBZpo1AQDo0sXYe5LNSSQCtVVhiAoTWOQ3yvz0spEYUACF1gKIwR7ajrMDYe9Qtp/vB3/5t0fgIJVUYlk6lUmvq+hibwHUsBtMhYraFXa+B2h067B4huDy6aaVrrZmPrBnWtRzGk0AkpwZhLtZwlf/rH35WudFqj2cwqjDY2t4uL+wghgIgoLVaSGhsbvTWKwiAWc8O5zqfLftAyBubzc5QXQkufsmIhUKgYI4Zi6jKRV2kxA8yJLHD8HnVdLmplrUKegvLi9NjUaDrdDzzX9QPPo/3hWlFk+aIO1zr99d6NO33TFGW25LospzWy0vUxwBgiTawyxbIuf15M8ezpvm7MeDxM6ibs+JSQw5NpXpXtju+7raaS43GvKeers6WGCHmwqbjgtctgnhYIobWNvhfQolqk6hQTdvzii1oAjzEKLaWRVbVHQIRMtSprhbXbWpW2XqQXB4ki9PX3vr5398e+No+Pjl7auZoup7PZor82iny3rOYlL33fOXz+FEEwXZ21vajMhJQqy5ejUXvBE2Ca2y/dmVUkc3p5hg/m6YyfX7v5OjDcZZ4K4vWX3+N7nwRV/uzps7AzDCPs+DTsdRenU7CoEabRRpAfnl8cLgUvusMuB+xiVkSDnWTJ41Zr69q2Ak6aHh0+eeQ35eXt7bVru+VBSVwKO+HmtZtJVWYGDS5ttUkatZ2900MahZ7v9bX9yldfna3Ot69fv7S7tTidGIJanbbgvK6lMOriZG7ncwqQshgAbN1eb62/FDbe2HRlmpoy2N7wNy4nDxJe16pO/e6mDyjPj7CtbZFemDru+rJW2VKc7Z2N47W//Nfe++f/7b8iOl8bXZpf5Jvj9r1P799+9d0/+fB5tLsRIXB2NFFYRV2PeMrDJULy5stvPNp7ceuNW4EXCEQJA9GlS8cXq3y+UhRlSRowJ0mas1zFvvf08THG1mm1IURet4Op3T85XVac+FFSaakAlzyI21GHQgBllVaIEhc3Qq1WOVOQN7UwkLouQKhqais1RroV+K7nImviMOoEvpTy4DjzmI0i1u1jQoy2BmDrUASx0YJrgwh2LMAAuYgYWZfQQoeh0IU9r4tagPYDN2JQVIO2n62UEgYoxRE2uO12emO8vkoXWKUuLbpxaOuMryyK3b+Aigbj+OK4BAAZ7JiSff+nk8PT4le+sbN5GS8vpoOw8Vjghpvru9cvXpzcuHFjlawIRLPJrCiLVqszmeZ3Hy2TnEMFDWRe4BBM86QaXBv7cVgm+c6VNaXT5UrUpdDSOoRAotd31k6PL4pV2u8HYSfYWBvMDhezkyOe18h6y4NTz1GGV+PLw4Ojo14Pxb1ukYijaZIuS90sW2HMoOUcSo7me6tylbVbIairy5c6uS22Nvyne3MuRDqfdwY3OGfEJeNxv1zMjZJ1bX2vna9avhNefmXn+fPstVdePjh7Thzb63fOz46efZFYQWUpV6RIEz5u9xXGx4sVmyCqdCduF1bN0oqccs7h2fn59Ze+zIB8+5U3GHz+xYf3b7352uN7n0ipAHeYiepa6JhQp0U8k6UNsdgP/CzN0mXa3RrnNcTu4N7dsydPJA2Ig2id5If3HvtdgrrBrV+8U6TL4+Msavl+NKwbiTBgocPLGYAlNDDL5lprbAECUFXaCR0vZrOm3BqPZ8tsMUmDoNuLOh33crp82mn1eHV28/XXV6cTwyeGer0+ilv9k0mGGRrFfYpbRBsCy/nyVNQmWUjGiNvdnKwWT55+xpz46fnR5tKupnte2z0+O01qlYtq0Uy26WaZL45rTm2wWtoUcBqD2IHINqZpLHG14WmVaKsgNAAzwHxeAr/lGV0DZKSp6jJjDnLDSIoa1A3Utla0TM4cKl2v1eqh1eT83gdnF7PjxbyA1snLptPpQFjmTaa1DgJS5TlvsigIykalzZEMxgA6BYdOuLF+5+bZwWHa8BUvWiQKHffk9CQIiDGa1z83yRgjl7nWGAJxSH1lgSUUImyE0E0leGWViJy2GwdFXcatrlVcKSmVwBgiRIq8opAiAsuqRhpmq9wLMdS6qXXFhSFYWdFrd6o8C+KANwoiTAixtSmSkkDoB47V2iDhBMxapLh2CGOUaCWNNWEQ1SVXgkMMADAIwqKsEERFWcRRJARHEA1brXlTIoNnq5xRQh3acKEsghAqqxHBmHiNUJXkWtWxHyNgMSFCaNlwiihCSElVGAstwIQJpRxKMCYQamMsgABaBYEp6zoOWxCBumqUBQATjIBGBDPXYkIRllJIiQhBFBGMYMNrrY2GwAALMBYKWGAItZhSgolDqObKYqQh1FUlJaCMzJMkcHwNyHyV99o+QhRYQBnzXSa4bARXug5dh/mxEAYhSB2mtRZSaaAc6mhlm6rQUgSRK6XwMMYQSGOllAj7WmulOcbMUIgx1UYTRBHGFkLO67woCexAgoo8dxzPCwNorev7nNfYKAgBBNhlBBswT8uCq6aRFrNOGBJrgLJRGChj0qLmXGJmHEaJMVpKAGwjOAUIGhj7sdTSYRACgZG12hCEIUauS6usQAB6PtPaNlUljZJacWPqojY/W2QyRknZ1Im2ljnUcwPCAHMwwvB8etFUjTEGQgwhQgj0+u10WVoIORe14FJryy0iOHQcSokBuq6ENqDKS82F/XkwHzDCrEV7xzXww8PHhWhYzc/XRv08Y9/94w9+aRdvvNRbqgmJ3aROVIO4pIb5pa2VAwlziKhWj76oDuZOa0iAW/CqtRl+5Zff+Oz80PZ0NwLF0WeBrRkGgLmURIPNy0l2a333dZntuf7J+3/yj9Nn5+VSPCyexIGXzvn0pO622714yLkx2Pou6HXR7s3rz075ydHq+o2Nx/f2X/zgWfpsdeuXA9btYte32ggrLAQAAmWMsgoTKGUDrbEYGAOmByenTx6+87WvIc/lukQYY98HvMS4a1BT12fasuWx/KM/+r1gSLY38fbrPUXlylzkaa2gywIEXLfr+47bAcZipIb9qweHT1YpH9/a+OGPnoy22dqmHzUCuaG2wf3955c212vXKd3rv/QL//HBn3z7t/75b/2d//U3XiTs+bPVH33/+O0b7eePxOn5+Wg7vza8/S/+wR+H28N//E/vj3a/OT2Wf/aH9/rr49N5Hano8afl6rmtgSxXFbKWCEM9K0p9kWRf/UtvHt07z/YmitG1tf7+UxF5MN5Yp257fePW59/5/ntXXvrWL27/w//PP9rYWZfQ5FnlEdl1+iBkpWlWq1XO69OjI4TxdOZ+9vjF8NbVr335arwxmO1nwhPRpVbYd4dvXDcLm1TP88l5WXJRH169/Y3j53eX859KsmXz/Gu/+rXVoYnpK116SSQgWxaj7TUn8Muzs4MHH1E/N262fj1+PHfH17/+4unD/dNLv/TLX73IZ41Z3fq1rx8/B8vJ0/ns2N+l3THhNltMno7YS/3+68nFcS25BPUrVzb3nn6yc/Wld3/ttYuD49GNl+NtDVmK8yZNK+zQtEk5rllYYBw0tTw5F5vdNqDUi1qnpx+fpAvH7w17zouHFwT3PLcPUaKR4aUy+OezoGkEQZgxhoAGwDayNEBbSLS2FgAAictcrCUjTllnTd14voswMMZA2RRl2gvjaL0/yavxuEvmc4eoLCuzWhAWtkPPUmslNxY4QaDEyjKgGiWlAaAyitd17iBSST4c9Vo4vjg7ePjoi3ff/SVJMJTWpS5hlAuuFWcEOz7NqzkhkDmeLUtsLaQ+LyoAsW4gcVmn1auLE2y1pZTSQHOptEGUBqxT5CmlAFFwaXezrsp8eQgRCINhGA4Xq5W0uhW6TWk7/RHBzFYNDdxF0dCwxzymIW533CbJNABVXUlTMYSjIDQaeFonSS6tFVUWht3NnfXzY2QMKdNFJ14zYr+oznrt2GrDAUROnGs7vLYZOixdzEU6b8eXH+9N3/vqzd5Gr8gWsyUO2pECaHmRGaV3L69Nk3x5Pj1fzLVUgTWgJMaYTj+yGW+1umkiEZKBH5wfnXC1VqY5Lx68/N6XJ8paQUfjMQNNDXNCnSB0MqXbTm8+L+QivXb5K4SuJQtDudS6UQZBC63iGureYO2VV2483ptAh8hcStGEYWtVC4odRqgy0vMdniez5Uxm9fGzF4ZQSAPH6/Q39XDrCq+Z73URBuudDqDy/Lwe9jyrDSMsjDrnk2OrjeINr6qi5FErNAh89NmH673+xtq1b//D/w6K1ZWXLu0dHDdVoopq2GtPHrJOONi69Q4GT2cnqzxrrt96CaAWggoTxusMYXu+WIlaKcm3toaPP3j/8u4VF6NW5I43O5LrtKqp34FG/tW//ldfPP78/PR8a7f9C3/llUdfHN378AOgfr5hOp8utDFRN968NNxYC+590uBSP7z7mCDHx0GRNWvjTYpZBep2v2Vqw0tQpRIz6nZ6ly/dwCh0F+en+3tZKrKcG0BC19MAMGSUzNL5BEP3/DzRkGJPeQEPzeEyAAEAAElEQVRDyJFcWl7rcuGtXTHGwhI0onIdn2AJTC3EihEYRrQpRVnlZV5K6bVbXULCHbRVpNVqqZQUWnOrdb43R7ywGrS557WoS1yNGik40ODiePbk2ScamZdvv7y2GbuxV+SLxTxvSk6k1gZDGoUjF2JydPCCQOgAlFvgh902d6DxCWHddj/LBKys0o0fBZRiI7iDAdKwFTDZKOq7jaartEAIkSDYGMdNWp6ezglyBp01KQolbZ5lDEMGPWM0tISCDiaNBSXDdjTsnc9AmWWqQsRgLQGhrDTauvrOq7cZTh3HemHbg838eFbUc4FxwOIgCBRDQZdh6lUVV2LKXKfOcmOIFg1yKaKe6/J5nmoDlUKGN1BWLGpJQ4saNLoRTQEANVwLLgnFjVZVyY2RkvOwHUUxjtuRgTTwYgCissT50gZRT5xMtfXvPz/Y3Oyn3/lke9zbuXlJKtMY/PH9558fV6wXtDTX1gHavNg74rWy0EaRt7HWc1WTHayqeYU1bY86wtPnR8uiXMW511tfq4XKdcUo45WxVmtgtGo0JIT5AqRh6EtTlo0hVWaRo42HvDjqX9ar5cjrQ+0dHJzm2couUgzsaNVjzHV8trj3rLu1sQIo8kgY0qqmlFoljIaWm7oWK4e6jkdc6/1MBXWhZCHMEHGO1mi4dWX03bP3tTBaUExbm+vtTkwuqjQgXsaxqHCyTLmoWu0AYL8quddXvC7++J/+9xFztUA/+v5vXX7nlud084vZ+bRQGp5dnPrMHw3aL87O2wGdzqavvfvaMDKfffD49//gN201v3Hz0q//8i9+9v7DeNDavbNuK/3RBx+2u92ta9fOz04Z8wspxpfHrgDFRIWhG3fHFvNX37v1xacPH9x9QcLulV99Y/3qteP7LwLcivpt5hWdTvzixax8eh+kxcb27rKaIS1kvqqa4HiC+73NfJZgWLBEZsk5kgISronwR3g6PbsSb+u8Zi0H+tH+QXH99juTyVLW2cnhQdD2lWTcsohifvEiz8rckNZo88X9w1NhR2t9J46ePT8IHPbyVy4//BS5ONx7djgcjbZub5188dxveGcwYOKgKBPiyfZ2PJ8Wq8Xy0u5bMIoXB4+DCFXNMvShQnrFF05Atzs72eQkO58j7CbJ0fr67tCL77548tqX39X50ebacP7x3YPDi907d7q9NuW6XM6tcvYfL0adwad3H2y//vbg1ho4XXDOoeOmqWyKg5cvj8raPHm8ZwH3HHRy77HX7hjmdMdbdc7atMPGnqrg48+fny7k+ULduRqNxj4harS7dvDgeHF8slgVVaW1ho4v81L4UbC2PTTYIa6lRmntusRaI5RvPejlde3EAbFGKoUgCTxMPMIcSiHCWIYhcTzRIOlEbtv6Uqlac2NdC4mUUmsDAEHWAAuIxQAQZbWx3Fhdy0pJqhESVk/yBel6w35rOlkElC1W0ArS8AJaDbg1FLss1J7TpbFZrGppiOsZShfnp76K/wIq+sYv3nj0CXrx9CxNCkYYtXi5SO/dPUJqI3I86ziHp2dHR4utK1d5o97/gz+joHj1zmhtY0NC74c/engyawDwOp1OUTaKG4gwIXD78oj0wkSZizS9sh1cv9z/5IMXIcfTs8whbDVPfShDly3mGWU2q4qSJ77LqLHt8dhyrKlndRJ1+y9Oz2aL41ar53ftLJlt7mx2Ynd6sXznK199/6fPumvrcX/j+PmDq196Zf/wsS0wDEk2TU7PlE/aWIPu2tDrdSsDDBfpfKHrIluu4nZ/fRjl3vismaksdt1hmSle1CFGWuftTivyesqsTk9PiBTWsPlq3m/7QSeqymL78vb+x88HG+vdMIhj/8bO0JM8Mu5Wu/+9+3tbm8Wtl146OtxjLOy2ulevvff84af3Xjy8RYZpdjje2ERSa8ODELshRChYnk/Pz8vY72e10Q5DEBAgeNFMz+drZC1ZqrjbavgKQwEIK6qizIum5gg5GFIlIfO7lDhBq5XmS9/txF6HMMx50wp8JYGLXbezZVFvVc7E8aOdEdvYdJTe2DveJzV1oPro4fduv3JHTZsqzbKs7A5vYUyfHx75tiCBphHNz5MYRq2IXep1cbMEMLeNudg/8nsd6pvAC3pjAmFrfHm71qK70QIie3HvSdi6EgeuWK5MBEPHWoiQ0WmaurGT5vlscdbrbEI4IKwT+ng1P81N7WBSaV8IImogmgumRRCNSoF4Xcq6KNKkyFZAu1zCwXDM3F7c2lmkOS9Ps2USxzElFLuaEoSVjygRZR74u0ZagnGI3SSvEQ1tUYUMuYELpaCo2rk6yqvCbfll/ue11tZmWWYtIARqrpSFQRRagNO8MgrEvo+dqOS5MjZwQ95UUsomaZTSEGMDBOfGEBJ3W8ClUvCiro0holFFWRNGMNJa2DTNKCMEgUY30motcQidKi+xR4yxtlGGK4Axcx0FgVDKAmOVhBgSTCBAQjTYwcx1mqrCEGIowsBBwGIMKUVWm9B3jTEeocYaj+JFlk4WyVqvAzQFGEErlNQuYRZazRsJVUhDiG3IgrppjDXMwRpArQFGyKG0LHPHDSiGvOEYY0QJgMB3A0iQ1sBCRCkOGKMEZ2kppKlMEzmYUggggBbXQlhrhBRSSEyp6/vWEKuBMRhBQiCBBijRAISapkEEOoy6DCNoWmGIIYbGSItkqZURwAJrrTSKKw6tIQgiiIQ2UhsAASEQAFg3tU8cqU3shlm+goQWVYMxRoAIZQkhGAfGAoKRMoAgJHhNCbUANk2NCaaUEQRCnxkrtTYIYwSR1UobA7FVRgJrXUSAIWnOtaoVhGHo+Z4HMWGUaC0RhkJyQqnnOgAAA41DCTFAKgUQ8H3HKAkxVAb5LDRSC9EYaQjCXHAIYcZroyTBCEqTZTWvm5o3Zc2xw5TSgitltNKGMQQRcDAlCDe1IBRpLatGG62F4ABh13McQhHCRSWKojYQSC0xJY1RxlgPQmKxkrLOSgshxMgKA352KBkAAECeK2XZrRuvprOZSGedqHaxBI3j0/iv/8ovqgq1gsH+9FS2EZYKQ+RTf9lUAFGjLQRIzxtWqNdevfbe7Zs/+LcfegP31ts3Z+khlPNhILKzM5HMTg4v/DjoDHbvfvDJnRvp7rW2I2/uf/r9/ae/54SOqnxdedWybIfu5rhnmrLWWnvlyy/1nz9bHUwWN0ry/LNVeOnaf/5f/Oc/+kf/9Gvv/cJPfvTx4uHke9PvXf8r7w6ub2NEpUGQIC24MdwjImAwkEmdTR2fffGjj2fnqw51dFF21ta5UqrSAGpsADbayDrGbKvbe/L4WRzpf+9/fgu2ecIbZxAbG/jzHNZS5dliL7N5DBrieGVZrfbMQy2kkXo1WWyttUeR6039p98uDo5Lgcpxd5x9nG1uDGgV6kSvb4zeems8P794fAD/7v/jv/Rbf/ij3/1Xnu+88u7Oxjj49NOPvvrl3bsPJtzKV371nd/59vuXbodr6+Gi8r/9W59ny+QrN15SuXq8P1NEOgS0GZEl+ea/90tPV0d7q7P11sZciavj0V/7S1//vX/37cE44gJMP7i71mfXNuJvf/v3Lt0cyWLp6abv9FwgV/ps48paA9q8SYqLxbDjpjX3PCSkevjx54vnX3S77VuvrmNfOJG3dnU02Xu/zMTw6i4O1GJ20Bujg8NvF+l+LVKQ1h2nS0SbQAdWDMPZ5OkjZ+ZsXbn07O7Jxlrnyjdasybt1ut2PXzl8rhF08q2Xe96++o7Tz75HiD5SdbMMXE3BuujKyo4cDd4WcyNENN8AtwptFoi+f6HP7xxp2MjoNnK1hGjr7/2+td//OEfY+fD0Bd+P3Ri1wqDO/DSZf/8XBRWR74Poe6sdb2gmJxWiJtXXlsbB+2TiVgfVxH0fvx+3tQQxo6x6mcqMABIY4FUjagphsYaDTRjAYVEK0kQcSCyUFR1YpT0mOc7oVTVPDlf7w87nZYsGy/0u624KVLfcmS0T40Jadz2ESbLZEURxSxMVilFBllCgJWySYq07UbEoqqct3qDxfGJ4byU9Zfe/qZCuGyUlZYRAAAExiilvTCwyISBZ7VCEFZW2mZBbWCBotixzEoFkmxiBfc8SgCoigwojShzKUHKUISBFlUyyw1wfOaEBCJUlgJCBInPFXTD9ovzw3sni/XRGmOsTf005+3Is8wbdLqHDx82y/PV8jBox5euX15NGwTI3ulJRzRuGK+vX5qev2hUNVueOpFngePE/vn0GQVybWMTAtsU1eb2+uMnB8whW+H646dPk5PjXtSlDrzSGT/7/FNWz9c3ewr5JKJhHJ0mJz6yJ3vHSVnvXFt7uHfqYbQ6X+UY7lwb3f/sCZJGYvPq9VfOT6eVUG4cZvOyroASycXTFw0kzIWroxXUYpEebewMP/vpgczlaKM32Bhgzw/a/c7G5snjx5BYjJq1trsxvPq9P/3O6Mol5tBHj/YAxO1WwK3qdTvEC4QxyODlbEEkankIuqZQGYq8g9Pl5u4VxNyismvbt0a71w8ePf3iB38Qhu756f7apeuXtm88f3LPDR3BdVFlWlRSiuGwn2c4XiPzyYWquUYm6Mdlkqui4qr6/PMnw3Z/9/Lgra++9MWffnL99Vc40L/z27+xE3V73VYvcqHXPj++8DotF7E8W2lTr44uAASw0SZffvr9H37zl3718PjoOKkQNrC0J5NclrzO5af/5L8Htdy6tEkcvPf0IF3Mv/bNt3/wo/d/poLx5ohBInM+PZglyUpyawjod8cnR/MkWYRRpExB00ZDCAnNy7TV6Qb9zujSukYupU6TVgDa4ahTV3mn1yOUGgUXK078Ou440dro5HxRlqLTHgVBACAzTRa3gzovV5B6gzTyY6GwMYQbsGwW2iptkQVICwIAowyHiKVZVfPCoT0/wAAqrUNt7CSTQkpGIVTASIvKym91p5NzXmTURQSYvRdnz46rsN31oqsX59OubWOfAUyzZpofp6Hfd0MPQ5hOz58//vTSeKvVbgEiMAvqwlMGbWzfFMU8DCHBWAoDCABW50lm/FBK6RvP99wyW0JCA8IIYmmRcAuqJK8r5TC4WiwRkFJqaKxU0JimaUqMrAWiEVW733GY+/Gjs6LG7SD0u3A1OyYuQ0THUayE8lU2dHovjueXr17Jpg/mi+U0mSG/34sIRNIEFSGUsmAyPcyS42tXXkaUeCzmJdHIKg0JoowFQum6WAmudVMBi/3Iy0pT16XDLABIGcu1JhhIq5tGMEysAbwSURQ6iNZcAU+NN3pAsnJ6ON4aK6QOD6dFU6bcwbyWLxZK1qPRQHmkUFkwCFnsRS3ndFZ5CFmtgJWeQ3cGUWBAnVeHZxfJbB5+dvHr21c7EQxuRIssT9LUeB7Bruu6UdDSaqFqjInG1iDrYe3YxvghasVuLRreaANl1aS2Xq2SmQI0Dt0wHt1Z39C6LtPF/OhkmaST86OrN3ffeeOlVXNRZHMAw0ZWXNau7yqJhOSu69o4rooGEBx3f97ScnlrPV0VYehubK/ND08/ODjIm1V3MGivjZusWJwd6AprDSteCUWADFynfb6cDvodS0Sr4yupilq6jt8ZdgmAq6Y8e3bkJakHcLVqgsBpDwZFoyrNI9+Wi3MC6MnDFzZlgGYe1q21QZOpex89IBwQDnmyohj2eu5gq/P48b3RYHB8cgQJ3FrrO4yFBuxc2f7iwV7RFAsHbez0qHXzgq+efrFx9a3U9Qnw6nxKoLCwRlRfu/VqL+6cHN47uz9hSLSHYbGa8CNQxi0EgMrT1QI6DiYQNRK82J/VwA7b3t0//L6tAX3p5vgqvrw5LKYXlzfXj/ZWRTJF4CWuzTyZ/srXXnvw4SeTVE20+ea7ryUXD4SqG6FVw5nFVsHjvQlWVuVJ4LtKqMOjs8AB+f7ji88/I1giSrprEW94pxe2g7VpcbYzaHc84/oaevEgXD9NZYwFWwuUKOscNbLSpnLCoFC8KOtOr7u/96Qbt6v6dDxG3X6Qn02qZd4J3VYnhJSfnSTjbmf3ypV97JSTeatKrm52l1WNvUhb2fIilSzRah72Ws3iQlf5yf4J8rCa7UGEO/3BwdG+xHq6SpGNfNdxiDMYtJ7s7T08+jifF0YRiBl1PFtrqECv04Ut3xv08jwt86IdethFhHBTFEDZnc31omqw0VKWrmOb2gBJEUSEIMUVMjpyUavLiqpEDAVtqqQmBEsrZS1cxgjGRhljLDCYEmKMMEZAK400gevmBgMDCbCYkk4noIivtZ0iUauiptAiIl0PAdQIIetZRiljDCuNzi8yCF2JRECBi/BfQEXA2rXNtfHG+Iff/2y2KgS3vIaf3FvUFfzWV64tm8bpx9tbmz/60Se9eEC1evv1l/b2L+7eO8lkuVoJ1+kRjGulGyS76z0lNCUoHjAQgDhu9ZHRdbL/eNKL+w7nOq2iIFZABdisb47zHtwdtfefvJjzst0tI+LTWK3kcmv7nXKySi7mtBMO+xtA6WePDhA3WpUhoR2n9eDhpz4J7So/mX9aV0kybbARgouL5ZlHPZlYAQWvZ51wAJrMIsRoRLBMbBGPfKSzRbY0prJY1M2i290OMB/YdscjmZbXXnnjx9/58aDX3hzHGefjjfVksmJQ8ayYLAsWwGDc1gjFUbyYLUft1q3Lr0iBT48Ov/Gtt1Tz+I9/+yeFUr3B+o2NWycvZlevvCw8Tsj8yQeH2Uz9lV9/5WBvfzVPoIf2np5BQdqj7W/84jd+dP+f5LzB3HhEXvvSYPRSG2vj5/zq7uUkMBhhAKznBzObAmuqOonbQwRBlk+4zYlLe06PshExCDFDqDPavpbOYJvuvPH2X8/T6WL12FE5g2fz4xcGkfWtL+sMLBYnt1/5mwQyoB9HkRs4HoBeUYKtzfcef/6bmyTK68rz25evXv3ps4sr48D3yNrljV95480/+p1/ZJBOZiWDtO+xy7fX63LpeohZM0lP+5sh4CsX4wgR0wBgIHMjrWoPAVmpgTda1kiUklBuuRWphYXNedMPdofdnafHn2wMNlSzAop5tGuhNUAjQ7vtdcxGGgSDwD09efrkxaPXb23WGYDaHQ1vYSdsBC/KDNiWsGo+m3WiPia1qIual4HnO167mKdWI6Q1JdXmuDM5PdOQbHRb6Wqq9f8oAgOMcVyXUgSstQoSDKUSTdVYLUHoKCsMMAgRCy1CmOvGWEQIBQQIIRDDlDLOC6OM1UbyJvBdTKgfQAOBAqbmDXOCqtFKCa1tVpUdL8p5XuRlm7SMsBAh3jSO61spec0RhIpb3/VchwEtIbKUIoCgtqgRRitJIXQcgBFshIAWSq6DbkAozEwmlFxOFxhAjzjLZTIe9yLX08Y4jEBMpRLA2ooLz4EIYGs0pVAL6dGo0YZi0jQ15xIDx0pkISTEIQRBYCklBCNtBACAUQQhQhZqwUOPMkqnSZFbHVoCoQEQNlILLoE11iIHMQyJBgoTJCWoGk4RJBQpqxmiCCFKMIYIGEQZqXRdNgVjzMWOMQoi5AWu0hYj3IrDIq2llEUpMEJaWgUMIAgh5NNANdxCnXEJjAWEeB4FCBlpAQYIWgAd3tQOgAw7BEDkUKUhVxJQjDHRVitjlJLUZQgiNwhd6khRYQK1aCLHQYAenF0YCdzIJcxxHcdz3bqujLVGCgwhRhATBJExUFSq8KhLEDTAGggtRAARZRQhqBEVRq5B0HEItJoSqLWpuBRGV2UNDAQYNJwrLaXVopGmEbzhCFEALGIkClrWamiMBdbYn+WThVaW+dRxmQbAdR1R86zIa86BttYCZbSxWBprjYrcljFKCoUI4VIaraTghND/kRVdv7376NOsPGPmfHmlC67+yu6bN1/6w7/3R7QJSFref3oYXxti4DVV5Tse1ooR4TfAcuUw7JTq7LP9/rCz89bgET/65f/yV+oLcXi0d/LTh+31XqRdmGO+8t76yl/KxSpZFqONyIIVzx/eez+V832izPb2SOPxbN54d9TOG8Nqnvlj4+j69vr6oD32nw/DyYpR4u09sdndB8tpr47SD1+svpgM1kLFpd57sqqW3WEXWL22uylgXvLp+aP9JwfTo09PjKXMg1u75P/wv/+bR89X//Kf/u6vRrHHSFYUBtrIawUtfz49TRfL+4fTtFi98yvrWT0RJSexs5ga5rG1G7uBAOI0f/48USVxKalzYaVrKtuLuxbLwNPLtGyduI9+b5HsR/mCW8/s4+XWiD47Pb71qt290f/X//IPVcZPD/H/5O/8n37ync8+/M7vZMmMN43nbb/Ye/Hi6cXVYHz6+QwE7r+49ztrV4f/0a/+jf/2//IPFyty+9L6PBLAyZNSjm62SdPMpmW9aBzSa7VeefeV/yA5+H/33Obate39p5+POrALRaRaRSO//u6bQTL9zu/9wcnhxejO1V/9O//LynS+87s/iv2JyaZ5MqsJYrSkqn7p0vWT6UXZKEZQ36V1Xr6Yne6+Nfrlv/Gr2uuxs7vLi8ct6lBzmEmu5dF0IvxwtLG1PZ3Ozy4WhS0CLsPwldsbt370G//1cKPV5Mn5w6P/4D/8z374k38j2cq/3r+98fJyVsjmyCGr4a3lFx//g6fPng1HG62B9+TwezRa89ZH5cwAGKeLi25/Lb2YLbPH3T5TqHaH8Tu/dsnroJPnR8tzJJR/cOBcfvvdjQ774vC33vjSurJ2sH7t0aefGlzkVqMYrW+zdujYilfmTCo9mZxYbLpX1leqvf3a6OzhH/e7fmdbHB8YIlUlmz8fBgZCoA2wwBqtIYAeZYgAbUQUubxpqrppjEAE+w5RViVJ5jB3vXtJ1StJBXXZKqlJGHme61C0WM54xVUjrUHKIMf1ZaOIBVYbB7mEIQ9TaKHWqMWcp8XJxWy2JrlLvd6oR3inUBopQRAG2NUKNEYCZQ342SISsBAhS8qGExoJZSiuXawAsSUAGFFsrUFISGUtZMyRlmNAhVCNnCNKtRK8Fn4cGqF5Ywh1Ci7ag/7rV24m8+ndD35AXR8jHbWG12/u7O0/fO3Vm/MXD9ODo/sfn6t0MRx1SUg87OLMqsXUD1rr/TZxiOR5kR66zAAlbQ0UsReL5dqla24rglXZCA0JEMZmVT1e73XjYHZynM2SwdamQ8O0bEKYYN54rbZmPqFdfjZTyyR2GWMwnSw9wuoqBUpczNLRYIsCVC9qpC0L4l7bTM6P55Mq9di3fvErJw+ejLbGWSoPnu91ul3ad5aTo/lial2bNbXVeFamJPXbYyw0BFLkJ8+BKVVjtVWcqI/nP0WYFpn0Awo1dAOCCFIQc6nT5SlvirDt99bWu51Li5Of8qZurPR7nVY8CKJ+rYLe2iDPanUx1wT32rEGYOfKNVGIs8PnDFslleuHxSJxGIJK11lJLUszgWDrrbffLBF2utHI945sMZvMfOosJ7N+y3v4p5/4LBpcGhxP65euxwefffTe1775z/7Rf/Urf+Vvr9KUoPDx5580xXx0Zfzs/vnO1m4QIb2Uq0U5XL9+9+npl955W9Xnk0l5Z3xp8/LwxWefxBtbjx+f33ntdW0buSyJQo+f70n588+znWuXRWOyNAtqeXaRbly5JnVzfN5UNVTSeBHqjlqr+bKu4Nnh+XBtvHPzktOJwkEPAAKkjh0/g7QJqbskvKYABx4mUDWe3xJNVTaI4Gi6ON3ZiaNhxxpiOI5C5Hhwvsi8i1Sw2mPEcX2LEATAIxA4KM9yJf1Gh543irxuYJ+uxKTO9wCIW27ojVuQBcuqooFvLF8enABgFXQbi5wwkKpmnnNxfsYJBS7evLSRNOe+h40FdVomqxlGtrd2iSKXtSJr+eJ0PyYhsHCZ5Ix5WDXXr44Vpo5PCWVIG9/vPHzy1HcD33c0DKIwzosyT5sizay2nssIYkWZuZ5fTdPZ2Qxi1xn42XLqh64SygIgpUyTDGJsRE1I7cYBQv69T+5+54/+LBHN1964HbuhEYb6OM9yXlVdP7RFcXFx4sfR6fnqame4L/egddqtLqNkkmSQtXRTV83SYtqOB1CUHnEQKTkGjt+imOliRRDVRhKhmqZhjAnOMSoZhpEfKFMCbTzPdYLAWkJZcHb+kJciDqO421GN0hRYYxuDscuMNb1RL6+XkRMN4jJ05eHheb5cfuPOdWAr5ot+26ymBx72YckN9LA23XavAEWSFxTGdZ4XzH//w0+AFO3+4GSeVAXqbEbUGMX8pqxc5iMrMcNeSGfneeD7VVnAMvX9aNjftDmHBqyySgLZ6YwDjxhUGisAbHi2SgtapYL4jt+OwsGYeVE9OUGel9bLz7/4/u3ro2S1PDvnfjuIBh1pOVe10JUEjZQqzTMEqf25RwZZk3/5r3714Mnzo4OT1WzKXNpvRZXMsvwU15bnpQzDsqkFam6/+vZPP/iu1HK9GzX5qsjKtCFBpxMy79J2W+omCtrZYUYSrStXe4QxR4paA+qHveT4fND1d29sPHs0Ky7KupgcHDyPtnc8GtUKblxZdwtd1xooeLGsBTf58dQBhGH21hvf2n/xaHmWEMZ6o+HhchKvt7vO8GI+vf3y7sHzSTnL1KMHfqNu3Xklz/jDD59DyxbnWdjqwbC3gjHe+OqX168efvKnUZu9ePiF1jx0unXB10ej6fJo1B8hI1UjNvqtaibTykIAmkafPTlgEIF+tn7r2vyQWupKWXz00w+vvfQ14np/+qPvLubL8fj6tTAUZd0b9L7/w/drw65evW2UbzTHCGRFGrTdnZt30ko4kWfMaTLfr9Km0w2llnmauxWCTGerJuisqWTKUJ0sVq1uVxsMdSNWZ1ZbrRpd2byoRpsbYTTIkrki8mj/xc76zmK1yOt8dOVyxDwt4NUrbxIKstWqOx4Cq7ev7G5euzR5Pu377XqyREYGDAoMqEOXyxVgiEVy63Ln+OmLOs25NAT752dpp+0vVntJuiKhd+XyblaKy+PtUcj2j6Zc+qBCgRfbgC2XC4uh20IOwsyHN+9sZ+nKIVwY7gLXEmJ1OZ0sMYoHcRNQYzmnxIQOYloiijEhjsOKQgHgcC6TpGp1Yylya4rIYxYwCIFBtJLSRZRAa6RyGAZQUYqBcKwCVcE1JEkBkko7lF6/MtReVaRZU6O8hIlgoWscqBFUFiOhBOISSFykRud1FERZUeMwBK4bDUd/ARXlE220X0p565Wb9otHs5Rri7UmT06r4N6FF5mNAXWD8tarrwDlCin35ubHj7PHe4fjUXd9MCKUQQqSsozbHmHEaNgdBMYIOS3L8ymoZRB6rbWbJ6eTS9udK+vDzz+9f/XaTjeKj06TVUkjqqEl4+318U50/uxw0A6dgCySfVnkVSOySRZ6/nqnM5mddqJOaZEWtazE/v7h5e3XKrUCAep2g8nZYcOrgPoQjwbDN0+OP9u6HGdzoDKrHZEUC61FGEWAuVEQY6yprSxUENn9w31B5q/euh221xlRNMtPHu6Pw16dLRxCSK0W05WoZUEraIHFpl3YENJLl9fLpozb2/328Df+8W+S4Prf+k9+tSDp7//r39noxudJ/df+Z/+7H/+b78ZOH5MombGo4997kUWLcl6db7dR6Mt3f+3dvUcnh2eJW3vv1qXvmzBGkcesgTdeuR31BrpqrFqcHz0fb940wLs4O6gs96LWcvKMskhYYYTyWhFktkpyz3W6sT+dXvTiTi34/Hy5uX2rKaqnn/xTGtnNS5frzDSlBhgibGVzVtaZhPOaj9ygn1Zlu+3GvoNAOJ0tb73x2ou7jUFu2OnmAhxMJmHQ3dm9nRw+KIuje59dhLGvqc7KWkjci1uSC1k1nuuZWvXC8M4bX1oc5cY2FqP90+fDOJpkqYtZ5LcAd5X1EwuIVa5BgQ+kKpELu/5QlVyYCaxKaoAECGA7SU4tDRsFYj+6tPWG4450I1STvPrqy73wWDfTrm+Qkb6rlRKDzgjibiqBtKzVKeeTfSwksFG31d97/HyVzIkPe0NP5JnV5mJ6lpUFV4wrd7UssYv/PICGDbE15xZRx3UpRlVViqphDFEacm3iKLTW1I0oheCiJq5DMKmKmjGXOi6ExHE8TEG2Sh3PiynmSiqtMMQQU0odNwgoYrwuuZIIwk4YEoAqw52QOT4DCGtrieMCBLhSCCEALARESoMdiwFS2mptjRSqVlIpISUibDpbOK5PGWuFESaaK11Lqy2WSrS63bppyrJCDFcGYik9xiCwDAPqO0KIiIZcNhBha3Tk+xpJoZXWGkHEqEMAktbM03kQeY7nuoxYY7jgBHtSGIQgYcxBXlbmhOAw9OrVKvQZxBAjrJXURjeKCykCz9fKIESbRmhtjDGUuRYYbSACoB21l8kqCnyuhUWoKquKI0JwGAYWwawsWm6krYYAOJQZbaVUGiipJXJdSAgERitBMIEIWS0JgVxW7bAnRSOtYcw3FkktSl74rqe1dogLtLAAVE3tuE5eVBog5jDMnCxLjIXaolpI33UwskpxaywjBEG4ShcCKeiAdtAK4payXHAlakEgAsAAhDUwlRAMUlWJRigKKQHIKAktgtbWvJbWMoCMVdZqLQQlDsZIAZikGec8LRuLoVYaQZwltYFGCF7zBmMAEUYUB74HDHAYYRhpY6WUEEFKCRccAEgcxDCRUruYlEkqhCqbxhLkEJIXlTFGWSuMcQgqqooiaKERXFgAHMYsNcYCCH9e4vj47nEXDZPz5dff/eqyqowYf/LTdPfODTCVRwd1fsZhiUgUGK4sZRzXCCSNUTJbfPm1O4//4MW1YRS1KtCOW5vbqy6ZL7KNL20ePDsdbcTn+2Lzyq/4r1z/6SfvD7dahfhQmXr32tsovHNt6+355/zSm14hK+L0L7ID7uIXpyvPZWANb496o45/9Djrv/7SqBcF80p7neefPHnj1S/PC8JMdPX1d2ZnJ5LUk8P75snzpOHVsjyGQGIQRmA1B8z1hzzUjhvHQTdA86PUcdhLr+zmatnb3ExONEa25iUqJRBclbXvDQvPoJZrWMNc2l73QaahZTpD6Uow2xtfvtofBIfPP1+uFr3u+rWXv372+WMxO+Hnz7/15Vei/fUPP/qEBf5ah82aAiDm+p20WaWZXJwsf/LR8aXxFX/tPTd6+/zFPy7Ts93LUbmEDz96/O63bm/sXr730WHP7YvKrGp7+on6u//b3xl2rsvi4uju7MqNtb1np53xRiXLr95uPTd1tmz+8l96hUH3g5+W4frf2Hv8J41drpZcfLCnS2ozp91ZMzn8B//sX1Mq5hq98tq35mib2wEdp/xk0vfdTtu///CxWOQ31tfPzicKmkZK2YAk40HoaKa3v/zO4VlD1apbaBczHJvDo9PK+rNMGuZst+/MioHRZz45Wl8bLR9WSPBH97/vjKJpVvd618Kw9fj4KXBT2gcozsvsI27I+o3r5fnRxfT00m26Kn+opJu7bmKOfbJVL4ZQI6xHZsmUoenqiSQHzw7uQgt2WyMHt2P88k5rPTlLhsOvvfelf5+56Pbr/cTbVUaJWi4uzn3fVTpfrDIvbLXGAeIqKRaS+fPTmc9oZ2jH0eD7Pzl7Z+32ZAnPnp93x2jYd0RtG072H4qfsSIlJJfCczxKWMEzFxOrsYWoEEorCa3iohqEvX4UTxaLeLB2MJt0mC8Mq5Oy5XqQMCVU5LrCQA095lPqGs6177nSAgtg1WQBY9baqtYYASkrYNH+6d6inN+8/VIrHqR5YS1FQABlIAIGQl7VnudoAMMwgkBa67h+cDE7bgceRgATEvhhuZoTBpN0GcZbwFjq0qKoNW96g800T1yHIUqM0NChbhBowbStai6UaDw/apRgYdjfvLrYf/zjP/y3gUtffmt33aP87LP96UcUk6dPPzV57vguy/K1fidZFtF4LWrtOgTHzio9nzrdtuNi6rvVcukFnqpKzVzmsVtXrr64OO0GhDieksBhDDFkJEAWV2Xz0Ud3SdzxhH92rFK5/PVfv37+9CSO21nOse/JilNSAwf4YeyS4f7pBVbs0vXdeSdj0aAVUJ+a47tfvPvOdQCKclV0+i1M6GJ2ZCqhSbZzc0NkdTZNqkJuXN/dtlevvXL9R3/yA+LgZV7vnSd+r1wf9xyKpTatYd8irBoOkD0/fb6zdjsar+dVbaHQGmWr3A09TJxKyEtXty7ODpFC57M9pW3Q6fOkDqN2b/fyYiZWpWXU37y8dnz3i944JtA/OZhATZFGkKjOMCoabbUKfL/bc6tqVRS5UWC8trl18x0/CJbV8uLoaDGbLuZTN0ZvfunLP/3BDwxm7U7fJfT/+ff/3v/qv/ivZF6ffnH39373XzmhV1tstS3Oj08f3e+N2tWywIoRpadH58ujk/VR7JLm6ZMvnJBQH3sd9slnP/zgg/yXvvyls6Q8Pb+4d+/Jndu7PrOq5/7J7/27d776zs9NcpIQ7CNKAHKjVm+RcIzRalEDAxyCHYIoRhqa2gDDvDhub270BTKgqYQAnkehFaKqV/MMKcfBLrd0MBxRx1XQcFk49bDfx7uXbzpMM0qiwB+6cbacAERcz89Wq0SbwWjkB5FQ3MNuusqhyrQq/RbSHCsDC2UJ4wQXy9WkE7xELTKAYsY6FHNlXOa2ru5aQJQCZVURyLywW9f5/UfPs7omjg9gOTltRmtj5qq8zPwo2rp0SVm/SEvHQdn89GL/me85FpqmNAgwLaVsUoFwkc87vlMUqYZpxwsQpFDZTnugAYCYI6AEbyDEjh9ZZRwX+4FbljMaMMRYvNZuqkIWeTKdIWKZ6wGKWeDwEkCELVBZOZemaPdj0TBu5GQ2DwPEBceEEayiMEzq3G28sBNEcThdPlfM62xtIs87n6SNpYSF6aKEAlCGoOOWVQWNKOtZf3RTACKV1UJZLq1UhBBGceAFQtFZkUICAQKK15RAhwWWOlLJVsfr9+P95hy0HNryRkFEAB1d2nhxUZ9ezNcHY8p8k9ZKB67b17Zc2x2Nd41CcC4QysT7nz9JGpw01cCLx4P1nXBYleXGeLC20VG1zPPm7Gyv0w1K4XHHe3F89PGzJ2/FV+NOexC3VNDKeWMBhBDwSqgGVLq2GpZNA13l9roDo+o6pa6vrPE8BwIRuK4xXpLnCJBWHAWR24gKZFUxnV9MMmjK0fpg/yjh2vvs7r6Q0tiClnydbUO/LXwXGihzlSUlRExbxLzgZyrQQD+6+3A1Tas0HWx2nj05Hg9b04sLRzbrwzFmdHqRKi2Ji5+++JRriJHbFAXwnTwt1qI134uO9/YJMIrANGnarR51o2U1V4jXXIhGfvF86retTZuq4qdlQ6nbjoJo1H/3W4PS+LyqQtdvcHw8vTg/PPba1fr2tt9ze51oY9AHCBw+mxPPI0ByrYJ2d17LoDc4O97buHT15HAlFXj1nbfO9k72Hz07PT/e3txGVYMieO3O5aOLwo1cE/WSWVnNle+N83QZhj2PCX8QbL187fDRk+H1y5u7awcPng7WBqPdjY8+O6QAlkURDFvYjSUEZ48en58cwdpWSeo6PsF0dvxofjYhLpIKHjXHV25t7j25ePJ8ybo7qxr/2cd3jTW3r42F0iz2NSbTJKcunZ+fkNWKA0H7QTAY14IPR/Hy7AJat9XrBC6W1QQytNbfXsyXVtbMSt8huhJJkqSrShtCWPtssdrdWb84Ody8usuAscqE2tvuDE5PThTkdbnsdDqcw9nJAgKHazJdFa0QN40EgeN2fLUqJtPZoO9UjbUuuHy1c/7s4eGDZ47f62+vXUzzMPK9gZvORdTrAoItBAhJqPJK+CTu3XzpjeefPTs9OlWWUww7LWfQDU72pgTB48cPOJfddkAdkBW5NcAoobS3bJCTyG6XuZTUZd2URiuLHaitbbjC1Ku5zCrpKI2RpsStitI6jkUA2J/ReMCF9CkiEDS88DzPAAgwFDWv67Lmuig9oZ1cmU2jZV5ww/JaplxMa62x55qyzmTkaRcjgLiR2ihYC1UDYr2IDTYgRXvH87+AihyPesRLziZxt/crf+md3/iX3220AYpoC09O0q9//boxxdFhVlc1oc7x+aosagCDrbXLw2EbKVA0ouW6oR8ZpdNV6REmsmJe5lFICIJB7FgDaxh2RzBfLYEuiQsZrPJSOlGAMUug8QcOJeL46XMoIajSa7d29w9TNwwgRgM/nkzSEqDe9qWu5665bLmcCo2i8ZB0XVU2vY2WLBYYFZtXO8nc9Gnn9Hg/Tcph2oaNLvOkws7O1ZebNJUKzGZHkWUVF8nZOYYopu2rm1FSaFrjuhbCEdcu30mKzN+MW23n80+fvnp15+wsPRcvhuPAeKCptTFsuaqFlGHgJLPkIpcvv/2eaL/+xSx79uj3p/my2+q3/PjTe5+5A6cVxL3dtcHe54+PH+MQ5Gl19y7wX976+tdu/OY/+fjTveI//Tt/uxX7H7//p4vj407I1rdHqm4CGFfzvud2+v0iHvUxhJXGvXFXKWPkdH3Ly9K6qJXijY9JWXvt9m6/vVU2y2u3Nxo5T07OXd+ZJWlA2p1YalhQfXJ0+NAS7QduUcjTo0eu4wC1sDyp5bwpillVVo5PPTfohEeHH2sGlpWgLp6sMtfYWy+9PT9J/O7V9fHuwaNPtQQeAz4G8SCKPZpl2frlDSeI9h4+Ylp/cW/PGj9yfez0d69crpoJCqqg06pWeeQPwyBsXb3K7erswdNVnhAiB73oxcn8zpZXTE5bPbpIngm5anlhkWZuzFqdnlBgf/9RO5xTwXm90Nybrw7bnX572DHFqkiXDgmL1VFVVCzuh+GYV8vAk6JWWcbrgp+8OKka7AQQ2xjDumjmo36n0w0KDq2twhbTEP/5RzJS2lgApUK2UapRABqIETJYW8iIlyRcGYMpAgRhzw3DVsN1CF3MvJCxJF+UVYowhsRRylKKOBdc8ND1uag8EjnYycraACgUYBRrY4xqtFXtMHQYFFoJrgHGPmVaNoSgwCFCGisFAsh13DwrlDZGC2OAhjgKRrLMkHUxwB6mBgFlpZHGQuwy4rih0oZQusmoloIhIoUCxgKtcGCF1IQQaCABGAAgAOBKaaMQhtJwBikltOE1ocwPPWOB5BK5LsCIEcaFUFIihJXWhloAjdJgni4ghBhjSiiFQHLBpTTGUIcVvPFdv2xq1/Ot4RBhihkllEtpraqaGkFcNxJTaDSAADNCECUQKG1s6HlZUTJGMNDaStdxjFGuQz3fK6SAACpoAUJCKCWFNZoR3I9jYxsNBCG+4MoAgBCI4xbGhNecAGgAAtZiRi2EBqCszLqwpQlzmGchBAgBpKmLgQae4+S5enB8zKByHbfVaXdarhFamVJpiwDEwBCMAaDGQClqow3QkGCHKx54UccLGp4w4hBsGiUwgAhha3QrCrExSptlXqRFqbTQWhloESKYIN4IN3QaKawC1HGCkAFrXUZ9x7HaAmuF4EJKYIwBVgjeCEkYaflBzRvOOUbYcRwDjQFWcFkLoY2GAFhrAYAQIqkVBNharbQmmECAEYKCCwzQz1QQEdet8itt2oFur3ODxz7oFO5Geqff+sH/7+GDF/sffOeDN//j17SuueS1VLnNACU601/8yd7H3zn52p3hm68OuQP2Dg6Nsa31mDHR77VAXYRs4JMIgNbm5ddY5+zaEIKiLJd+RLdJq0v6wd6zg3m2un6zc2VnpHwEQz09O08mC2HaVQ3jzngyFe1s6TewE+3+0n/0rblZFV45qxdv/5VbztnG44+//61ffOuH/+TzbGI9J1SNDjDs+zEjPG6198pMU7695ly6OdCLxG+hqxv9BurF82e+71OX7r943gp6PkI3r106uLdY2+7vvjJcnJ0zD3VbnSbLqlI2qJS5IIYAX57MzvobURjv7j29WOTa6VzbDi4ffpI3e+SLf/fF2rYrPDifZJ2hG0SBpSjx0I133oJOMM8hrpfu6fuf3b03P3z87jcuf/CjBwyTTuD6dXHx9NTJoDRSNThq+VyZLz49725nV7ZGRVoM1t4c3nyvNjb7/ElVTBigXY/UeTp8ja23vLdf+urnd7d++N1/UEqrzuYYdV57afzal978u3///3w6W3kRmdfV09O9m7/8lfJk9vW3bu+ld1crs5qfa1GWlSrzBSJ0UmS5tK2+7/WApcZC643g5m6TvDi5fvOVR3cPFMqpE8mZQaBVSzA7OfP0QldVdzyeT6lLenzOO9vD7qu3aO9KEAfL6b1V8hjFhQ7TYMxUsqBAYebCgKt5LXNlkCNEVkLKG4mcJbIaSM9HL7919a97CIPsB3n0Zw3eJ0zPTxcjN8boZWbWv/bN92bHR15YJ/bwJ9/5v3av6IvZMnQiXNVGISxJ3PbcsI0bCSEi64EX9+9//GjQ8+MAVqvytZdurmaHp0dmMbODDoocvT6Ojo7J74PqZ2FkBBHFjjaokBI7HkSYMTetC6OBwyIpSNjuZFw0TRZ5ETZmozcsm9ISwpjHJXSZWRt2Z7P59GLGXAqsxRhmVekQ2G1RSWTc2To6faEbaSFC1tYlJwy78XCIe3HcqUUNgcpXCmNYN7Xneg5zFC4FXyoACakttpE7PL+Ybmy/yWydVodSGJEnDFslZZOVsa+VLmuNgRYuQqrO/KiDNc+LQggRxn5dammgYmHgEQ2sT7y0LHr97o//7b+Kq/zyqDdPVsf3X+SrphW6RgsQUAYRJJARTDqBhLi3fWX79a+tJheTF5/0u6GWHvHcpmgkknXRqEpQRlyH1llx7/kH61c3iUJNhVZ1sxYN/EBgJZNVZhB9+Z33nl1kQXtIiWxRcnh2enZ83DX21luvJZkcX+pnF4d3H31x45VXyrxBYQswilkYx1hAu1osreOOR1cPDi/W+sQCvLM1WlUN47whzEiMgFvVZZFzr9eCriPK5ovP7saRA6y489Lmi4MpsVLLRmOmFYaeDUJvXpS5aC5t3UIA18USI2yQIQxjBso0XVQXBjR37x90wq7nO52Nm40YewFi7XI+L08OJxb4geNmZyfV5MilZnZ6RCB2sdnY7qWTE6klkaxZ1XneQIL25rloBPNjgGB+sPrdP/7tL73+5dvblwIroh6hKCjr/Ad/+Nt7s+Q/+Tv/x3yyPH62xyf89//xP9m9tHn7vdunRwfzC9rdHk9FOns+G/THFyf7y1UZuANoYRTGTyZLkTYY8uuX1mOffvDFk//F3/qb6ay5vLuVzpYll3/rP/3bn334ZLhx55//3/8edlmr09J/3lW0OLvQHLph0Ok6t1/dvv/8UEskqsplwagbOdCmq6I3HriQCLZ691tvnhw+6A3WuWyMNcjBSot0ka5mJUbu5Uvb1PNIoy1GuajiQa9qdDlLHc8FmmfLUmZK1wJZZa3qteKiLC0mSdm0tq9RWNWgOjpJRLXyvHK4sdnUjUYYYEcDt+1uEJtUJjlfzdrxVQiI4ziUWGo1ABAiJqygse84zGoRyv6lG+W9zz+PXAJN0xn0izJjnq8kaPc2fHe95lk7dr3YqSYzXvGQEpGnQENpctWoRbYIWwOgYVY3WpaUeUHPLYqSMEfJFGNGkEQQAAKNwVYBC6pWK0irU+aDyDBoUJUumqrWWlmMMYLpIo0HQ6utNlYrKQWfzS6M4ndeuvT6Rmf+7IXfbSPNz88noetQY6wUwIA0bxTMGtlkR6caBouy9iHy41adVMuzVBbQR0EctDwvCiIxOZukq3Q4pP0oKiq94LlsKkigJkBjWGouoK1MXXKPEOI7LrQ6Yv6izrOqIDZYH4xOTxZ1Lucn2aWXRkHkIUYvTg+7bo9uuqOtjaoSCLFW1EUBmAncDrpyckYoMkLWwvdHnuf5PdeLvdgoQxEeD9fiwl/Nl3HUohhu7m6dpAIFLgKqOj4q0vNWy0PYV0Y7zNdGtqJusjpveAkI0lrrpgSg9ts+T5kLPBZ5ymAliDWeUkZBNNgYMqSbpKZWtzvh8/tPeY1wZc7zs8PZATR6fpb5lgNsSFuM1oLp8pTyZWtrR0uQ5/OGc94oiGnYbv1MBWWegUa2wmj79rZFeHML+55BxjrEsxAVeV2URdyPGCPpLEsKTqntEOGGwY2Xtq2hDw6PLm1triZLaWC/s/n6zas//fint29vzFfPBu3Whz+4vx6uHa+Wm1tr06MzdSrH4+Dp+Vm332oFPg7Fl752u0mm+wcXKp2OBu3xzS0l1e7uyFQJscvYoad8usq16xlM2OnBsbAqbpO1zWGRmDjsT6czkuDB7Xe4t0cEf/pgL3JYVQiu5qCBH/+b/+Hau98oWTgYBukZbVQzurzeJJmxtlYrASttWofHZ04rVrVmkLzy9Zeef3Tfw8H42s5qXlVNERJdzuaiwa6LK15YK6kDa10FhgWsXQOAXPL49PBoVcIwZAYmmeFlI2Cyc2Xb7Zsm04uL2e1X7hAKPvjkMwrK/m6Y6soCgPwBZjzsDjv9bnJxhrBE2JR1ZiGyEECtLM+LPMvrZLDemU/Lez/5yXBjq+wEBvrtTscsD66+sbM8OP/xn/zus72Ld7/2eqvvJctV4DqaAGHKJ8+e3nABQOzi+flqtjfox4yq8cBe3u3MX5QE4bNnNaJOiIdZpdP5FIJKywYaaoE0wIZxe5XWWqpimphQVRY7wwFbHzNRyMWq34mgLOpCGG3rhHssxCgoitJYCa2rGy1qLTQRFKx0AStmmqzrs6puXD80AFqLeFNj6BtDqlpriR3DHZd6pA8UlForyQ2wGkPHc6UokaOojy2iUmujjUEAEexDNlk1wCCJwtNE3brZu/vkjNcGGttyHKXqsm5cFwPBrWOMWrkuinshpGZRSK87XL/5dlWukqP7fwEVLaeLMIhHnXC1XM3mjUugaLSDqeMQYfTxPLU8rTiwBjFcenE0CgeTae4wioH1fMcNIy9kDW+4lkEQqLwoi6rV9qIeRTSanU4tCY4PTm+te4HRedn04g6hbjRYk2kx9BmU3LEgnRVGIaX4Yp7qe8+9uC+58Al2LI+pyS9OCGlhHJzsTw5PL3avtOKY5cIg2J4v9PmLA+TJVoX5oiBWjePNKNwQQBUmI722E3sKWqkx84LR+q4B2Dp4u7chpTl4+mlukjuvvckMy9Kk13OTnK8S2dQSauf6rdfzeXlre/fmrl9Vx5Uux4PA1WW02SbAlEUZhKxugEay30X/w7/6b7p0cef2Bk1ptz3oOhBvtstpcfjiGeAF4Pz2y/HBZ/t/83/z//3oj374Zx8kG5d+hYwxUPD3fvOf72x3SOhcubZxfLrwlXKMee3tX5pkji4OadAn/NQC3R91JuenJPTzmfbdjrJcEKdSiluHBmspB0bBvCg1kE4YOq4dDob5LAFGCl09eXTiYI/XDohflk0TdUFVHgqzaLjkPLPIQBIUxjhagir3XdvvrBuitdbj/qAdbHHd9Zz2aVMtZ6tVUsUBXS1TXnHCvLJOB+NuMlvuf/KkacQbr14tLQ7jbj8Oy7RCTU1kRaCu8xIqc3G41wmxnjfAo2qRGq0qLJe2dhwnTYsiX0JG2jRCaOh6EQAcWq+ZLy1SXqddLC46vuN7NFvkDJOmzpKFcTGifgCxoxqJCOVFmq+m56f72O87JOjGseA8aLeYVkGPSGuMAV9+7Rf29x+0OyOK09kyK0W9tnX152Ig2At8rTWAWEtloI1jN0myJC8o9aQCouHGQsBI3GEOQXWRagMopkbzrKiklghi3wnKsq6ykhJALKDMgdb6nmO1JARFgVcLSZmSUkojlJAecTSwQiljgLHGSJurzFrFKKEUl1WDgaVCNg3nUgALCaEIoKKoa5lgzf3AjaIQUwggDKPQGCC1AdoEXmuxnIvKRMRxg2AxX1klMUEa2Krm1lhrLKLMagshsEDXTWoAiNzYxR4hGADje642NgwDIaTRpiorrbXn+doY13d5wzEmnuMCA4xShLrWQltLKSriMUwgtKjJpVYGU0Sggowy+rPSD2ANN9AaYBzKtNQGAARM3fAgiAghSmqKUFFWWmkDAGYOo05VF4yQsqoRQo3gGGFgdJ5lmFBMiZQSWsCIo3gDXQdY02q3hILWQAMhJIR6IbCWF1yI2lrpMMKNzPMMAuxSD0AkhEAYQQiwsYyygLC8KpOyEqbphfEgaJdNrTgolQDWOIw5hEqrlDbaQAMsgsgaEIcxF0IbFXgugpbLGgHbyFJZwAiRXGirMMBcaKN51TSrsqkb5TLsuY6RzAIIsfEY9Zl/sZgLaAOPxIEPgdVcq0ZABBsuAADWaodQbWzRCICw0VBKDQxAAAEIuZBcSCmE1sYiQDDRSknOIWWiUdCxvsuMAhgzA0zR1DWvMcLmzy+grXe8zmZwZbzx4sMsHKWY+UXtFlVVAHd+uPJsPD0sAxBYbTWwxlCK3V6npwv9/P7Msd2sChs5WKbzjfGo5Qezw8mySEWRuVfa3YGXzZ9vXLkWUdc00CFkZ3Pwgyf3Rpu/dPLo02Z6Ko1mMcvKeYSpNtZjrBtHrbDTSO0QJA0axfDVnTa/d4ac7v7T0/D1MTCGdOQ5OpkUqX/V2ZvuHZ0sEI97MamtWq3qTJmqRnaevfnmmztvV240Gwz93/7vvnf99taqyHdfu9VwqUBtRB1jvxW20mmOOarP0MGjo9GIQqwABqujeS/ukjqxoGSRy1Dg97vz84xoDni9NYh0lgV0DIFqBeuHL2RBHacrF2VB25R4ACANES1L0YLgz37j9w204924WulG1dpv24ZtDgdFXXvY3P/po57xuxF9752bL57On+zPr1+ODxyZVPXFg8MW6558/PDOV1/d3ztrTvPDdNkUdeixLz4+6d8pI9c9e/bATWbLi4u/9j9996Pf/kQB/PGT+//1H/7G/tHKYsqn2XvvbQMkZ2dznsK0XgHZ1MVke8vParxsYJpwP6AOcCZpvbHjfeMXb86mE28NHzz4F6Fsdwdbk4w7/o350Q/jwXgQS9E0UTeyuQS5YNgePH6M6PD2zrrnsxrVRXKICZw2xuADPD4um9NCrlwOWNtSSl+cfYbNerD22nDQXiXnWuSUqm4vVFJBbVccDHq72xtXZaG/+sa//8GDowSdZc1kc7wzxLtttz1Li+nxnzXV3uneTLtVq1W5roP8tThqLfIZ8Ieb4xv84kAvnLXedV7KxXy6NXijnN+7eaejYMHrR1I8fn7/aZ3qII5n0zprdODzzuDn/RQAQoih1cZAk4lyLRxQa4zmHqWQuhzCrCzbiBitLdB1I3mTE5dSYhzGlLJSaZGJRjzjQgutLSeYIAN1VXLNrTY28rzz6RmGCBKkra1qSR0fGFGnTdjuLNKMl2W/15XYaKkwJpzLWnBoOLSV4wcMEyFlxovrW7fev/+nm6NeKyCO61RZ4bdbVZPiFpOkZK7bcOu5garzp49+unn9LQSA5zuQQkMJxN5ymlJqO92OBbw6X1bzC5qfXwl8bRpZ5zJJMW13YgcDyzzPIIigcYNAGlzXsDVc6/XWZsf7/dCrIo+0vbVuN5lXhsJG1e1+L58vK1HPknnLCwELl/NyZ3MYddtOo1e56nlS1rkbU21NELVe29jUWX24TDo9H2FNGJtPk3LV5GllVZYsy/HWOIwpRjQtjYPc+eEsDD0KNXOI46Lt6wPdgG7bn6/SeVElRbneG4dbQdvv1PmFFXbryniS8v5gp6kPZZlZYLyQOMgc78kmywoCaC9uaptbd7UsGl5DIKmjXTfWyxz7bpUUq3kZBFTq+vadl5SFZ8vZlSuXuHYaBHPBhKps3lDH8TsdACilnrVm7/H9oOXvXt2RSTI7Of7spw8s54x4FkClbVXk2PPCToxIZKDTGkRrO1euX3otSxb7jx9VRt54+dbx3hGyi/1niwdLc7iXO7x56drV+5u9wM5/+p27THyFV/r6tRuHR8c+dZTRjo9MLSlyh5trhp/nxzNT8p2dnZP9U8i5b8Xf+vWvZOf7VqRA9KBlKrMX9/cvnjz5+5+832ZUZ1Lr4umjn88CgKyxGgNVJfmpKRwp67oZdahoakocbJkfx0HgB9Zyj372yYeXNkZFlgZRixGYzOZVXVd10en5jhfFo6FWHPB0Nl0h6gjNgVEMcNPI1Tw1FmqmDLeiqdzQoY7rBSFhzuFRWi+zXt9lrldxM12VrtCbaRN4EXC7AHvIOqq6gBYzIuIAt/x20NqsraiqGbGgLCuEASIIW2Ok8DxWmebl12+4VKTJzGPYi4jNjDF6fWMMsWmKcyU09RwjmmJRMOQz5ilYrZIzVJpOe8ycCJIAQwt0zVzfdaOTi0m73UeUNVICLhEGnU5LNERwIGSjVQOBUySl67dc19FcNqtMarNIsyfPD42SQOju2tqt27ccB3rt0MWQVlALApYgmyQBCYxUTV26lPGm9AOGkYg7bSUUr7MiN7o2QvF2r9VynYLLqsqI2+6urbfDLjNwf3IW1qqopdvt93rdLJsAA4KYCSohdZwwhItU1KWLdT8IGEEa6IwXW8N1SyyjJA5ibRAj9CvvfN1AVxotNEjOS1ypbivYGsUWSo7o+NIl3cjz6cRANcKkWtYuDDvD9vpavz0Y/sH7XwSsPe639aIwCFvWSQtLjQOtHwcRwghDtjEIEYGRC0B4qRMgrbVjVOjRSlvPazVcSGWZFyOqcECggUhpBBW0QHJT6gozb3GWhK5PmDMrCxy4Sms/CLI0PZnPgkE7P5uNt9cunp5qaQ8PDik1W/2gqXJeF6XN27FPVobLwndbnusFrHsxWRmr9/eOf97YtbP+8Z8+GPc2ndNynuRXbtwqphfnR4cb25ezpsYB0alaJPMyL7MGHM+TYb/z8u1rk5NToOhgsB2lHUA91jVtRjGBz06fjnvx3vELZUoE8c7VnclFOu72jBJ1lSTLmkCpLaRxoK1ZHJzczXOeZBJAhlBd1TEZUAfPjpeDGDeqqWZLK4TrwNuv3tk/OFVZCSFKD04c5r14trp2/aZs2Ad/9uTNX473n35ybXNnbX2UruZXd66/ONhbZVUnQo/+7I901CHjDTWvjMDIouV05mG35eOt9fVO0H3x8F6n2xltjnjVKFV3HJIXqp4sYi9ioBFNqZXKKsssyxouoJivSuwH1AkPjmcNRj85eGYVUppqrQw0DmECyf2L5ceP9/ptP3bCzQG+Mpkf7X9ECdYqTFacoHIQdg/3n26M1wGs0qyBRABEw3hYplOpOESK86pYVBoaLoQwKimSKzvXzmbH2YrRYKQF7PTWjk4mH3342TLV8dr24bTpBaTIuB+Ew+3u0fkkW3KfuYvzGRCFLqQf6Dyvru+2xj22eHIQkGg6U/7lrcF2d/Lgw367z3nVD8PZ2bFSDsCozAEhNIrpbJYFQdTy4uR0UZQV0420vMqM68NZojgN4iBYlALZ2meauW4r6J8k51oZBTRGWnHQyCYiEArDsJOVMnQdx/EUlEZLqUwQetACZYxjZFFqQp2KV8QqDSwggCPrM6ChpQ41QFiIIELEgcxnxqLhmm9XdnqeLKw4p9SUIHa7ebFsez7XtVQIASKEklBbJQwEQGIgOTI8Pz/K+58EoY9N8RdQkba6qopRZ5RnDEDzyp1rDx7sV9wwDDv9zipTa8NR7IS6hgBaN0K8aTZ2eu1OYASXVY2xhUZSaIVugMIEAkqg67HXbl6uJX/rzeuPn5/2VBRqYR1QC3d785poFkmimMEOr/aePdzd3ti9unE2SSDu3d87voXc7UDVeTIYrzkuODk91YLZTB7PLqJeNIhDWdayrILBOlRl2czXL60/v7hIC9UeDM9PjwKf+JIqoCcni3bETDWL/fWN8XapNIBxxW02z3rtthbVZv+axbZJeK7yoN3RiECFt9dGnXYvnZ1maRqEztnZ492N1qpoOt3e/vNnu5u9MPZX85TD0qNuMrMdFz75+N+msxfWw50XnZ7nvPVr7x0kS9dgbezeo8nO4Or73/sBDJP/8Fd/4epWnN0Z3b6y87vf/vGv/eX3InTqNMvzC33r5vXxZnRxkcdxoHPRj5wVbyqBtAJAJ7Yua0Moq/zQ0XXMK9HptrLkCAluAWGk2Np4be9FbYWSQo+GvaZc2OJpxLxkvoj7wyKD8fiOwp4wndFaR6xOpul9ZHmrPUCdjdnFMUTo8Hj/1q2RA0SZFVqYdFE2Ort25xuHz5Nf+oUrNy4Pn88fZsp89KzJllJJgZhzdHbeDqJuNHz4+EcB7HsY5csq41Vwo9fAXJjcA56ltOc7Z2ezwGUoFKkslnunjUDj7lonaivaNHWJDbTauqSlkYCAWcuUhV57fbXkrXCseN4OB2TUgWUOADRWCC61bABH1rFCcwywE+xW5cH09Nz1vDDcVgJAxOq0SJPzds+bLubZqsI09HBzfPSFg8H0/IWCoN0aQYFb7fafP4wAAqQqG2s1hLYx1gqkESWubwyoOS/LklKXUtd3QqCF4zgaYFE3AFltlOYSYpRlKZeCuAxrrY0GCAndYEsVwNrOIy/gRdpUQijZagUUYgZRniU4aiOEMcEIYKUkBK5DvGyVYUTKuilqGYUuQq7WSgNrjHE8DxHXgWHN80oIXesgiJQWFGOtpBSyFI1QTRh51KNWC9kKRFPXnGOMMUJGCQARxlYqJYQCCDrEEdZKITEGXAnfcYxSSiqIIAQAISCkMsoCIzAByloDgGwUhNwoZbRxmCdkjaEBGDKKGUVSAuvYeZVjwhDCjBLe1MAYY621QFvDmAMBamRphMKUWEwlhI2W2ACsNSZEA4iBVVqVTe4SQhlTWtZNY4CFxlpjgQVGSCWEhchzKSGo5beMEohQAKHDMAC4VroWQugMWAOsNtoQ4udFKgyQGhGE2q1AKY0wlKLxo9Cj7TLNTpYXCOqAsc3xSEsoGi6MAoA4BGsLgQVKagCgARACLLiAEGDKuAWlFNAARHCeJzUmLc/R1kijHOy0PYcRb1aUeVGWRd0oCRDUGFqEhbRAKyM1osj1WFWtjBWDUTxuBfNlqZTWWgMIEYQQWIRh0wgueFULZAEl2GVUcIERANZqpRsuay4sggbYqhIISWuhUJZYzTCyFnAhlVIAQKOkNApCRKkD8c8DaNm0HN/ZmK2qVbNwrbc6ubh87QbdbD1/dPDy6y+9/+GfUFo+/f6n/VcIBwZY7rnW7hdkEtx9vBoFveQw/f5vP7r96vr7f/z+7ZdfU7XxO7GGYrbQvnu6OLjbHnQBuZ1mpi5nBLSenp6++XYQQpk1yG+NjahrrrTJK1NEdGhNO/RDVKSFtGELBM1U3j9ZfHq+ufNrw91+4dOL/UccNk1dW+lPpictWpOhC9JQuq4FkPQ8DUhWVoTK7378nf/sjVuHD/af/uTBzd31xTLtjfqR68XjznSeNkXdinB3HB5+MbEwnj7KLpbmySjfvG4QFtPZye1XQb/tQI/MZ5UiubC+1lWZJUrVdVFZUS2rSW/cLR259+DBeDSKx+Ta1s5vfe88mVtYNL2W6wJE5cl3/9lvrg28cReGV/o3Xr6x1br0B/+vf1TVtYPBZjeaprM37vTXAhi5J9/65uiVO2hzO7q4CD97eLFMIW8mdnVx948eZ7o19LzMCgTU7pVhxf0Xn3/Wu3G9t5Hc/+Q7v/DmzU/+6J5KwavvXvu9H333bJp7La838KwiazttW84Xp093tu4UTy7ms5kH0fRoVeccU9saBnuHK+jha68NvQFod/DGpd2UVp/+yQsvFeplfZDOttdfunL7G9PDCYUXUVi5EczTmgvjo3VbhtH2jbizXYinvF5oIFkHQcgWi8fRKDWwzIo8F6A1cuvGYNZWjQSWIRN7Xna2fO4F/miweX5w5jnq8rVWtf8vH5TfuThMnLD9pa9889NHFQILz2vVQtTzbw/Wr0mNIAK96+tex63L4eGTT4JW1+/uTLMfY4KE6ZRF0/NeWou+WfDzuVqs+1/9W38z/tPv/N9ufW2D4PTibFk12hkGa/22VNL3EPE0VH9uki1UQvmeVyvdCjrSUGOkkRpjrKDwXN8Ju7wuCZBuRH0nmE+XDAqjje97nTBOVGYA00BzLZx21OutJbMZVCYOKSagLjMjBPR83nCEHD8MLWiCICgWF06IymoVRyGNYiVVVtVx2IkoulhNGDFAcY95GNKy4cDAkl80TfvG5k6Vl8QlWjUY0SwvRFNS5GFNECUWNZARDAMc+q4XgCYvkhUClmgJSLXddyNiT598ulot2yzoEMKnS2JBOkkpM72oDZRtjzrEobyRy0XpD8ZktAmVDjqmcoKjIvEED9FwmRuuFSDi6vXbk/ni4ec/2fHCs6wJOsM3vvLug08ekm771Tu7B4/vtt24lqLX6hBZrFZTid3u2lqd1VyX7cC5/vI2VOXk+NyL3fIsE8vzfrelUuW6HufF5Gza7W4im9aVvro1evbsWTRuj3cHF8/ONcJcmqKGABloLDYWKk0wSNJZq+3kc8GrJr9YYRRTqRCCwOB0UTm+6cSBVIo5jlaqFUcX0wuMXI9SoxHXTegThwKpZVYlvueHvZ1Bx+c4mC+XTm+YNMBoKTEmhDp+O0kz6sAo9CbzuVxdtKP+aLDDgVEykM2KQgQaLWvDQtI0ZRC3keOzfn9za2txNCsqBQzLk1U6X7I40rn1qcMLsbXVT7PG7dlvvfVaG800n/zzf/hbb7/7yvc+fry5vv70/vOXX7k1Xh+eF8YoXdYpKFLJpfZZ1I7ODu6BIoMEzfLCGW8Mr8tMSnFctFxclg0NPMjz3sB77c7mt//tHwBoC+Zs7azdfmXrd3/7+z8Tgef1TJOni0xZOU+Wo/5YAZFmucXwfD7ZHK55DpZ50pSgntbtzXa2qkTTdONuXqwuJmcYMK3t+vVdjVnOBZKKANDptKVSUjdNmQOphZYYQi1LrnW3dcl4sYVGCQQR1EJ2XNWy816wna+K+VnpBANoRZnZ1tilDmsANYYAFk8OH7U76y3aotDael4XGXaNFwYGMGSwi51FlmhhCs4bKahH41aLlyuE6MV5hoAHbL0zpiUvOTCEMpcAo6pidga10ppLIyiNDLCMOJhAzOpxZ2dyfuB5blWLyOsQ6LhOlOWF1daDWEoAIbNWAui22526WAV+WxurGu6HoRf40KEPfzrNoNeUVmd6WefXrsog8KCqJYflonBc5/rVLx+dPi3UqeZV05QMGq8dAAsk5x6jtZZawLLgQBqXIUbs7OL4eJallemPvG4vCjvdfL5qt/oISgwVF2a+nAKgqeO5OlYcWkMRhAQLSyCEAAOFLbPa77YDqaU0DaEesCJvjIHera9+vdvucgiWZ4f86aEWqtcPLanWtm88fnSCgS4as7Ro3B74fsBble94LHBXdVI19eVB+//P1Z8/7brlZ33Ymtc938/4zsOehzOf04PUUkvdEgKEBBEQ48S2wK7g2HGcMsSVVBy74iJ2inKIIY5DbCCxASPMKAxIAqlRS+pWD6e7T59pn7Pnvd+93/F5n/me17zyQzdJ4T/iU6vW9b0GKeSAIby3oQMaxn0OSbNeDfI+C0JCodXCV8pZVyK9sZFDGHS1Um4d8pAHCceuapdledHr9ZyTSRCslov+cBNjnA0Gq9W66WwUuIjH1pl83CfbW8CrgOegU9qsYzKoi8XxxcnLyQWLkLRN1IeiEZeVFU0lYLvQamOTci5bYA93GWe09NL3huX5WVWWP/wdQ0AHAYjAZClO59WHp9/4hd/79o037n7n3Uc3714Pka+LRRwOuhbqlscuZAqdPW9Vy0kPNxX4uZ/9hQ8/fHch5qNhv2rhjeu3Xrz/zTDpsWBgyqqU7WQ9HfSGMfKvX9vQV1EpQVV1H9x7emN/exTkR48u8j7nWbRxY3M8HjAHJkeXZ8dn+MpeqSQCbYBCBEFdiN5w69nZo+Go//Th8fbh4cFrd8vGtVgbX64r/+U/9Av3v/VNjVQju5PzI6MkAg5gnMTZ1iuvtGfnHrk4z+t25jHyjoIa8SiqhWAsBy70jNV1d3Bwo3l2Hg9C4d32djqbNpCFe9tb/mRRlJV0BgfcaF9W7bKVwuN1LYVBkGAAASSWUoIRxkYB7fMs096/OL1gHAX5IWIq2yAJSZpmNRgNLs5aGG1fD6+0zVPolWwrHo1aQZQlBFtOia/oi2eTMAv6/SDCfHs4nqzPFCe3v/il1arhreiT8Cu/9E+w58PeEMV5r09d08RpWFUyWreMkoR2YrHyynpEXvmxd1ZHD/Px4MWL06JsEhQZ0dEohVlAvDNS6qZy3ratQyCAEGtv66bI8+1+Ly8rt2pEzFTEg9mq6hHEMKqblgcJ87jSwPKkFauQw075WopVt3TOE8Kw19grLZ2hZNG2lPYQwMo5qUREkbECesQIUR4KbTlLlO9apQlEngKKPHKAEmq0MBRh4L2HAHgjOgwZgogRXDYCQpYx2A98pwGle0q9FKpshciZhNZ5QCENAVSrZnZtGDAovHTVTKhOWw9sNU3SgY7JvyAV0YDEUXJyPDEG44BtX9vPevHZ6TTLs8GoVwtlrOhnsWeoFopRnIVRGNM4i5BXk4vzMCJWe2Mcxo4yMBhtGtRdTJfvvftQlIvPvvFW7hXSq2ZVYYosgyshkIE4osQza8HW3lUD/dPnU2NIPgh3rlw3WE1XKyUtXNTWdSzt718/fPpJzYLg2eRItQ2s1Xhna2c0OnnxdDmfaDmKUL9aLBcUJhvXpY+qYu5sgyhet/bOwUFb2hobD6F1KgyCnY1Ul2titDMt5wDzugGY90Ma9DMvq+UaoU4Yf1nV77x1dbgzJLoui8tO1EkUIQZr65TWw90hArxT6tqrWx//+rcqCn725/+VF199dvPK3ZOL2no33ImXRw8MWe2+9SP0+xvXX317tLPz8bu/fe+9Tw6zLyFA/vJf/Ss//crhxVpd3evXwj18cjZfNlkwoFnv0QffoeN83AfArCHrZFvNL6u8v1MuG2AZIahrSll2ZbH2lM9efgqghWksWqptOLsorW2KwvSG2xpR58iNK188n9cOd9YQWUpTzHu9re2dzWenSy/gaGO3rMqrhzu2mZI0h4TyPOvKDrrk5eOJXLXf/Cd//fHY+3h15fpb4yQVopEOdW23OdwYpv2mWPfybG/vWhTYup4xBevlEtoojfvAgScffLC52z/sJQDCh6eL+fzl7t6oPjPLCjaduHv74N7ZN9NeLirpFN/ev3M5mWQhMlru37q5FudRMuhdub5YLDPMmMuL1RxhaEBIgxEJtjt1rHVpFWmVqQTG+XjV1quqDXA4TsLzs2eiKZOc5TGJWdx1VJfdoit7g5ASarQBwG+PhsX8h09CWbYAIkwJwgQTzEkgrQvzBLFatq01OqUpxMQCX1YVxcgB5AEQxjWixYR4D+pVaZ0HCIQhAd5Y7ynCRsumKXGQOMSbunBWEYR4GCFPMbVGC8oDD5AHEELIaKitBtCu23WWJm0nhbcMM+E8gk7ILiIRIjhARBtnvGOMa228g6uiUkpySpVUBENECSLUYySEjngw6tHLiwurrIK2BjLhTCgpvYEWOg8IZAgS5JSDWmgR0MBYZB0q2zZgoQeQckYZnc0X1HliCTQOIgA9qOrWeUcIUV1jVcsx4YxC5xjBjBOndMAYpbQVykFEwlC1LQQAYIg9BMA0Tae1poRoaz0iRkpCMfFIGQMQTJMIetha9YMt004J4J22BiJqPeikcsBHnFutCaUhp8YY55DxyAhrPcHEt6LxmGihlOtYSCiCwphV3RonKGM0CjDC2jupVYgiEoRCKOC7Si6tlTeu3DTW1k2jpMcEB3HkHSAIA+QQggjhuqus84RRRKnUwmtjpKeE5GkoRYuCAELkrccIDrJMK+c8EEZ2WlrvPQQ8oB54hgiF2FuLMLEEIAgA8BB6rRVWZLJoRaswApRSpVVXS60MJkgZrZSS2iZhAKHr2k4pTRnBBDkPOq2Us84BLZVUBiPkAaCURHGAIYLA103rrAsDihFMksw433Xynw+gAQqSyZlEk1WE80fvf9S1Tj5bbu5vzC+nfRV/4UcPVOKv3dherM4RII8eTN9557Wnz9ZEuTd3ri4WS0eIWIJv/Orzi1P3/m9+nPfA//I/+qNhiBtw0ZB1chXMpt/buPZGyHunUs2r8y/93i+FWbM/ZEbllZL9/l5rzPnzh4qnTQvThNDAaU+Pzxa5Vu+M0siirqN8uLvG3VmzjHYSu1otLy+DZCcbDQiLtt7MJt93L08LmtEoZPW8iRjhAYrC6N4Hk9tvvP3dx9/9sZ//3KydHD2evf/Ri9/zs18+ujhDUPOBzPmASKFd9aNfePWv/533svHuzjVrOve13zpt5OTWq9Htd64EEp/NG7KeowCM0qictG3dcd5yDsuqscxs/ci1ltCHzxb5+jK/yovpepCly1XtJP4z//Ff3R/0QSR+8f/6v/1z//b/7k//+3e/+Td/7fzoiGbDi3X7+Lm+lvUiWcb9mDB8/vJZgOn0eREn/MfeyjvhW9WkWfDyefvp04JHcZX025qdT020UX/uy6NPv/+N7z6cnj6cBy9TocTP/Z4v/vf/9Ncr7eIk3DtMaALauVPCV5cv1IvESeFmxdYOfvJ45Rw4meua4DCmPABpRlNkwwbNPz77yvMnb/z8q2984ac294LJ/PGqeJRfD/LxH2eq8dU/GqYWRZctL/K9PHcw6d/Mt9P64ltNcZLtHWrYd6Fp9ToZMJqmRB/aDrCQVSASrt3e3O7mS109lMXCSYUs8yI5e1J2UpmuzhOY5l7pi8EtQtn8bFqmIY/4iGXp2fPvajNbofsE7hBwsL37jjbEue2Na18o1fx8dQxx25aX/Ww0zHaR2X7v/Ueb/fj1z/8UTcVv/O1/FGb586etqlvVEoWSMOsX6yJMnQWF0EjVPwwjI4yiLK3KWnubpbkjCBgYJaGSMsSUIjirptaoIKZaWkNiHsYUOQeNMm62rqAHjOFO6SBkmAV102FMjfDWWc4oRlTrJouHjPW89cW0bOq6olXXTIM08MrXqxpgFjFKIa6bQrXNWhTDft7LhqJb6lZRxkJCwx6z+lgJjwhbtw2nUStsELIg7BFEuk41suUx7ZpuPps6C40oQ92ltqHQTY8mu/t75UWxqBvZWa883co5AXXZwIiRlCqtfNwbX3mNbUTL+ZQnKEp8A3zZKgJRP0uotUqSjatXXkzPyO6bnkV5ws87ebrUn/09f/C1d744PZ1++vGD6fkqCrOFWJ+9fLq9MTZti6ho1FlkLQ5CwkNCfMbDYlVjADgAi7M1lGi4lUGM5otpH5tqtr48Xh3eufVoOpG+Dbivqu7e9Fgr2s5bDWdipXlG4yzuOnnlxo5YlUEYMG9tU09OZuHN3STPp2enTV3Y40+3t0bJMAeGFnOl2i5Lo06ZVnTOQGgdwzDtZ84C3QDjDQ1iaOsnnz4Ybl+78cbnJ7NlHObLyaJtQciogQQx5IxxzhrtMaVtK87Pz6IoU1p1QkFGKMSzxdqVeufwejQatpWomvrm9euf3jtxCCHaPz6ZMQu8196b5dnMNFbpdcAIIXh6elpezkkcFKreT/Dlw/dOj17sX7vesfxn/ugfeftHfuIv/Sd/4Xxe7ynsIWt1efXgypMHH772xutVC5eL+0JW/VHIivjx05e/z4G9W3dn5Twgiunmnbfeml2szycXBKnHT5+eThe3r+5BCI4nC6G7fr8HwAkAIO/lURg160Un6nqhqso0rZvVLoiTKEallGY5Y8hVM3d2uti6cmgxSoaDed0EhEZhCgntb25qmpa1IRBR4HkWBYiuyqKpWhLElSysgx4SZ1GUBRp1kEOCsJLGtjZL47gXna7O+zcOHhzNCsk4S7WVp2uOIrwfR0a5zkDis3R4W3kLrYdW2dYtJ5dJP46CpJf16qpWQLGIS9XKrtPGqE6oRsc8l9KslkuMOBmD2WxOYppEfalUVTfadjgJYCukkSyKEB1oB6RzzPsIu8n0kdJaiprSzCG+qKoKWwRJHqUIWAcghAQAjzFc17VXmoW4Xq+9jx0kNKAGOR4F09V0q5/ffWd7uDWIuGvrQnrUlkJJjWl3cvJtgORgELVWIRJTAlgAumbJKemasm06AnC7qsMAB2nQInBaLpZKtQbtxLH3rqvXg52RpZXuuv3tu0LYteoYY1nU5+FQw+V80XiIEDMYwKKsAsbqUvZ7IUEO+I6xrGkkgC4b5JCmXbFYacf7SZyG+Siv2k4pGJDo0ccvm7LRusmy4W4W91MWJhymkXZAOL9/7XBjnF67KQCwbdt6BE0AXa1lWVFXvnr3Jk/i4cb45PnzcloZ7QQBOI7CKKmW50o4ihkyuimFUS2P+3nvxmL+TBgjnNJeE2hIGCQjzjxFiFWFuXvr5rKcE4yL+bqL+jGkGU+E9r3eRh7EL05OeUZ9CKMcKi0bpZRTPmaC8YXDpNZRny3XMxI1/Rs3XQcGDm1vb/3gLfjGVz/yXXv3D1w9+Nzt5//gNw0hfH///rd+e1LMzSN1dXsEA3Tt9lvd4lPO1U4/HfWGF6t1mvelUt/45tfmVfv2O6/3ktZCV3nxyZOPBv2QJj0LaNscv/njdzshj15cvPnm6+WkFkbnvd4g51nFYhpg7LPd3vVr4+XCOMpKFr3/29/ZCoJ80Ds9mUX97OD6brVsFrOiPHoe9Xq9UZaOwz1+7XJRB1sgSs2P/syd3/7V6uzZJxtbt0gyfvTg/nAj/fDxp9e299M8SoYRDnMaYrYZVc1ZxKh33bU71xenNUSoWMzDJHbAVqYOTSxlff7iRTLIm8IgwEvpPQrLbrkZ9nnUBb5jLSFJoFsvG9W2nYR43rZxmCBCkIMEYq+BBRoB5DAmGHprk15yNJ9+99OnkYm80HHsBmm0vzW2bfvsbPn40cfjTZBE0BOgZd2ZY2s6zqkzClhtHN05uN51p6v1GgB26+1X+lvXV4VZzruNpv3w+OOAsmuvvPbhJ6eyajqFhynNswBYrIQd72yKILGWZP14vr6gAeMBkF3jGUjTUVUVq3VTx106fVlysL03dF6LDl1eTrN8HMYx8NATzBhbVHNMIAsTHGDl2yxjvqk5YxwPEbS1bNfLxWp1GQeRhxRZpSyE0hIAAoaVtQAaa5zwIAuTRlqIQGcdtmDpaogQ8ijgnHmOKQ6DEGgnVrXpmjSDQT8oqjVyIfTQOmgtaIVEyEPvjDEIYQQQI0RaH0Y4yUBRmweXU4CANcajoO4Ugc4DK7vGuybkPh/2oFlr6a3WDsDOmcvpMg5AlIf/glTUdUWWRaNx5jxbF10nHIijdHcj4cwYHXKU5aP+IO9aN0xHjJNyMndGM4oIi0a721FoL16eZIMgTvi4Nzx+/iwdxz/xU4fLk5N2vfnrX330mc/eRsxwjuIe7QXJatlBBgb5sJhdQKo2rm42LTLztqsaikQcYVfPhteGQnOjnbUBgsnRCxeODrav7IFn7nx+xhlp1ujxBw8bKQHObBMqpdpGZNkOB7xsJwYoodTe7pVZzS2wBKrHjz/aHA0C0jULKbv5elEXQni1jrMeDVjE+/MXj8I4Z7q/XrbpYD8K/BYgq7O5t3UcWozZfHKxdWWcDiFBFEPEycgCpPD8vQcPLXVXNoPvffPbepH87PW3Qr6mndKr6WT6/dHe4Le+8bdbIsLNu4t63coX/8a/9TMMRf4+bkS7aFWwlVdgNVtUo61NwMKTWf3scpn1Tnt4iZ2cXbxIe7lqBCZJGG2QuvSozAbk8bOXTT0jzCHGWtkupi8GV3Z5GC0mFAIb8FQYu+rWmENRLSsDKrsK2ThPgrqaB4kqa7ksQ55c1dq+fHb/1duvruazQS9ZLadFJftXxmRx2q2EkNmXfuGnR2n2yTe+spUNnn58TwOZjbPlzJoOONxLe2//2q//tdfeeEvBZbmqemkyCEZs0GOQv3h29vzoxSs3bopizgg8vjjzTm/v5cNhIDuwqkTZzq/7YGunTympSktY2Iu3i4hjXzeL6bMHH2R8czY5a2SXJyFwAQ2ZExJjx6BoROdpCcBAO9gCaLDSVkvRrNvlqrZZmJFySaiNIxtRwSPoWiFFMx4NEd90dmWtEp3XHkIMvPshBRC5tpFByJMwvHN1XxhfteJytu7qtmlrDCGmlCMuatFIgTltOu+s1cagkAKK6lZa57z1yLmqXGDkA8zWxZogTDB11mjpCCYAQIwIhlg0NaaIQIg9tMY4DzyCPzDLIIIwjjCNkXZJGnttgbEeIU4Da4ExljECIVROcx4xFhqlvXMcUyUbj5xD1ChjtCHcbw5zaAzxLGSJD4CnqOyUs85IjSnwACJCnHXeSe8sAC6JhtD5uiqEMQhSisPOKu98JyXhHEAMMCrqhmBMCdJS8TDglGpRO+uMN0rIPE+lc94pDwwApmsUoVRa0bQaeU8YpRgbo7x32lnnoXbGASSalhKOGLXAKGOB99Bp64EFjmEMIfXWBUGIEa/bzmMUJgEALqTcW4YxJMAr5ZTT3gNKGEK4k9IDZ6xBmCBosXdlXWtjPQJFI/owIBQ553jAgQVKSymksd0wi3c2xwlldVO1QmLKKSPG+x8MtDltofMeAuUlwghj7KG1TlioocdGKuoDZw0nCBLkEWCISG3qVkGIqk5B7yjndVVjDCGm68XMGg8RSJPYequ9oZQs6loKSQBB2ndWee+tAdooraU11gFYlF2nJHSOYWS0g8h7YAFCUhvdGW2t1NZYDyDUxnqIPMIeeEwJQthb64BDBFMKsjhwDnIaLKvCQwf/eVfRG2/dQiD8xnc/Ptj2G2GCsvHpSQHQOkz7WrvPfuH6RMwCp/BEBmD887f/0Aj2xlfg3uvbhpJSSu9hwvj/7T/4f6dJtF6Vgxjz/W2ALy5P9DBmWUZO338S96Y8SP2Cvbz/9NUv7LSzj49nCyGmZbNatm3ZWeD5cDNiHDWTyXQivIX5aMBctzyZnhzNPv8T/6tlpBqsa68ph9C74ehAgtiZtrsofa1Xc0nCaL4q9GWdhSwIWD4KhVSXF9Xhtfz3/6v/zvHR44cn1dbuZmbdO9e/uH7a/dbXf/3aneGLxWS+WBweRM+fvxQYFM1kPuuyjc2f/MNfevjx40dPZqPdeGvr5mSyhLqzoDo9WzenxXJunj087veHnQaOwrJUjrJkY3B8dNpHbG+HqMrnw2z2dOHmvWneu/ra7V/89/7azSuv/Kd/9r+9XW8ebvQbAIyA0GGMwt3D8XL2NI/ccJerDmOSGN+Iag0A6vcCAMtRv71zN+syN3xl997XPrl1mPEMLl5+bOYXwIl/9X/9i1/9Zxfv3Lnxk7/4U7/87a8Sm1Yn8/e+//yP/bt/fDNk3/r6t+9m+edfufnu77y3188j3MbMAcbTgiuLO+MOD3b/5X/9X3r3K9/Zu75r9WR3S6q1fjR5znYObr7yzv1vnI3gC9j9ahRcWc3XrRawq7YPoq3N3uLRfNXCrhars+935aVPlzC6GphAtUvky3petp5jvAug8YZpVV4uTzimAaV9OmwV6Q1f8Yp3yBr93cEWUKVq5mXey4UHMNSNnhaN9Fi6aTLMh2spLo+Ot3cDLRJJXmjgpdDjawfHn3y3XD5OU7OarRD9iNjtq7tvJ5m7PPnt6dOPhJ2NDoPLAjQXXYjTyVkb9xMMGlOteGRuvH3l+SdnavVDV5F3XirFQ247uVpMwyw32oaDPiFI6taBNmAQ80gjQElYFgJAzyMOEJJCAkIoZsooLREm1EmDXJclQ58hvepm88oq2I+jetV4VzNICJRpih4+PwopRjx0BnpGgiDu9bOumS/XSw7gTpgREECHnUYR60FAoC29cGWnmlIl/R6lIOJM1o2TnHLOMLXIGAgI9J113qO3X3tDV8uLp/dSRhx21NrJi6eqVgZgB6DW6HJxmVInmkqDGMXh5u6eJmmHjOgqi4lFOApwUxQhIkZJDmPvIPDmwYtH1rF0N9p/9fr9730ah7itirbI3/32N11bovWyLJcEu7euDj/63odtss1BS/IMEBKlUdSPZtPV4vRCJX2I4XresLX3Fs/OqyjsYw/PJouiaqPR8NSo7//ud0rRHYzTL3/+lqUuDpM8608Xl04ZSDVnvqlMgJlYV9Wq8sgHYbKcXRotXrycbqQhRiQZhN26rFfUBUEUxcYW0EBvYFnJMHIowEQhBBmDqFUVcC7Nhy+Opto0SW+8vbM/qwuhu+VsjgBhaUA5nZdtEiecR9a1l+fnHANrzerybGtjnxKfJKZqBMSMkgBGfLWuIfE8j2dCPDu7yLeGg0E2WXdIAOz059987fv3vkHZIMiz08vZwe0b29vx937n2zfuXPvNr393sLvzmTvXv/kP//sA0xvXd84rbGb6++9+cuetN9MQybpxiENkHz/9eDTa4VnsGCqLZbGCURQPR5vXZLd+/unB57509OKkFPbqZlItV1aC05O6l/Kjow++8JOvLk6mRaErYbwUvX72w+8Bx0bL0ebQ2zhOwqbWhIUwT6pWeAeTOKuKs2SYWmziYRSHNAqho4glUbtsVOc29rLhYNAovJGGVhjZOdG2AsKiLutWaO3bWjPOkjhMsxRiqBxs2pJjZzVgmBgrpVTPX1x8+vA3IhQlaeoJB4QQmhuVNJUM+7luhTUQoUh2SwpVlnLZBuPRLYSck1B5I6RACFkEpKk1xBZaAlmaJYZjItx0fkkZblVVi9Ug7hXLCcKUpdy7pigXASFJkhiIVNfRADViTUFANTbdmtCIYtAfxZNKJFFAAEGQJ0EOvAQIQu/TOOi6pqsajIFoqzTJrWUQEFML4+2tjWHvsyQfZ9AJC4RWNuCccGaBK87Ldr1GCCBu0zhTTUm481a10g/7O7YpmnodYO+cYlxZgGTTPn40aWSTDXuDXpANkl4vXE/X52VnIGZBiFkCnUTWekCM0ggCzoPtzbiTplwUSqkwCpV1PCS9lENQ7uzsnp0tvHGYBwABBBwFJk+j8+mFMRIzOu5FgIeqrEAnKUNpklJCgyi2zq3XFach4Fx7WFTg+Gh9fPEEo3Crt6+t6fWIWrVCVIcHWySEnsNFWye9NIkZAP7lsjDKrpZzBl0WxXmSNEp5jGLOYsiVkv1sYKXaiENgiEeiq+qEu6IRTSfjqHfy/GWaBNPpS9CCR2fP71y7Qnw3KYugvwFxQikLnSaaLGUbB8iwEAOgkSeYVrPVa7d2o9BgAoyy65cnzdpiQxCnPwyg9QfPX8xmT47XT062UhZY+PFXf8s33U995u21ajUwU4N/41vfvpP1fKT7Gblxc7P7uGI5Pbhxe671y+KCPoOfeXNLOzufL5r1skuHTVOTfp/wvu7gwe7h2csH66JarNTZ6czA+fbV7LXX7qi6EWKpkKIRE0erRd32dLJ99e0Pf+ebMcdY6itxuP3K7e75FAsTEICNpv0B6XNrwOTx0rJ5n6h2mTjRcNmhaughHB6MvVHjrV3Jo9ag7d29fm/04Xc+HfSHXsLpcjrIgQlla9bjja3Ly04KoxxYFsXOtf3dLH98//loOLBSWSlXwuS9PKWmXC1j2iniAsC1iM9nk3C4A/QSeBOGKcHUOkQAQhYqI6USHngEgPW6lV3AeJ73H76Y3N4a3dg/PD55cfvg4MNPZtxm28OtulqONgeiLUWnAQRSSdUK2A8cEl1rlNWTi4tBLyzKLgiCdYedBDTGWzub0w+/992PPgkTfzl5GUco4/xiJWCLD3d2J2fPGKSYkaKobexff+1HwOMCV7PqbDIe777+udtth8RK3toYvf/4ojvXU6oDrLW2UiqaZJ2RgRfABdT7jDhlLWNMOydbY0TNKDXIEQKbroHWdaLGViEIqnWnOA4ZNYAQTx3yXlvnAYYUIoMcwB5bFxAPmQXWaQs140Q65ZRzzvKAyq4UbW2U4RwaUUCPI0Kt00GcEooccEooFngMsfXW2B8EL5yzWCllhTOlLzwab/eNRdWqCgnGDGKAdH15Zch3r+4A1ToAGfdBiNpSWsJboeuq8NT9C1KRlrJeLHiQxXkGdQeBp4RkPALOF6LaHW8mnBCgkogx6oVYIzdPk5gaU9U6GWW6OCK6jL3dzBOgiu1Rmg6T9dm0nKySYEMq3nWkK9acWyMwsApIY7SsgY4ZNgAxEkQbgwCVvX4fqm59sZCVWqcCcNQf9FWHaBSNxgOtkFkfxa7Lidm+sXvxuLEtXDbl/vVt00pOqXeD3fENj8S8qbe2dy4vjkVVAW2nZw+u3ri2vyeLxT3eyy8uz71tVaOEVm+/ef34ZI051LZYL+bZlRuARm989nMWKlEH+xuhV+bZw8n1H321zWFpV62s4AqmG1Qj07Xrtu6YA5OizOLg93/h7V/+u/deu3tjQRehXqbt6ne/9b2niwc/ffMne7S304e3+rsAwl/+Z3/rzbd2/tFvfEOko//L/+sv/fk/9ad/3x98J+z7v/HRPx3tXsGhdkqfrlbX60DUxXIp2+bCwiGFWZ7tGeFlJ9vmUnkHubp959qH732/vzkCGLdFqR7MgziLegcYDQBQ04unJI5w4Hd3dtp6TpFDYHl5NEnTniPMOGUgXReXEODrV64sJtMkjdZl1UqnAVxMLgPI9m9f1ckrKxdq0YX9/nTybL5sWq9C7ZMgGW2kSbRrgN+7tru15cMoqecOscAIb7U8PTlZT0vgkCEkZ/z04uXL82JzmKvaq3aNSW+8mXhAjs4vBnnUdB3jYde0F6fPwmTbrKswSrzqlD/b2Nk7ma0QFuv1kiDfyyl0oGkndaekXiM29NZaKnxViovnzXJlPdwaXxkmrKzOJ5OTLGewAY3xo3i4tdHvDF7X3bA3bhbnNOj1d64FUbBar35AQdMIo7zRRkj1QHeiVZ1SxuOuVVJpTDD3sGxWlHHGuYPeIy86IdoGCWuc1dIAD5x1zqkkHWghMTYEEGsMokRJAZTHYQS8U1pCD5SRxCPCGaGEYAwBtNZ6o6IgIowgh0VdqE5gipVqvXNhGBMUeGCtt1IIDyyATnfCYw4RDwiUXQW9wRiwgFmtnTbYe+RQ3SqnO4jxYDBwBDvSOtVRyoyxwhinFKNUG+u9ZYS2QlqrtJHAERyEy26dxwkASCrhgIMIOGMRBABY7wGlSKtOAkOAZ4QZqxinxmgIIGeIeIwR6pSzVjviNLTAOigMZhABq3VHEZZaEso8wtgTZDW0kAUhxh56Z61FCGJHVNc57xGERmttHfDGaYAJjRnXQiOAIIRCyziJlVLeAeutUvYHe/BBkFXVAgPbSdkJjQnVwGRxj0BitXXeGuO0FF1bDvp9zvOQUOvsfF0CCAHEUlvOMUFQK+MxBAAZ5zohPPYEU2AN8AZDNEx6Tac10ARDKZSzmkCAMXMQSOsAQAFHUcBaoaq6buvaCNlJ4bWWQmGKq66GEBEEKKXOaIxRGifQGis646zWBkGojHbKIARF10gHMIKcYMJI07QI4VYq6x3CSBurjUEYee8oRh5BQkgYcug99t46BzHkjIaMUIKrpq3bRhkLIfLoh76ihx99qmfduBfsXudvvPmTf/dXXn7v5MG/9tNXd/rD8w8nHdFhz25kgQSZL92V166wLN8cZveO7pfY7O0kALV/65feffR0JjqKXXX7jZuT2b0WLoxw5axtfW0hD5PW47i3/Xq2daVtiqPZkzCG3hbdyYQnMSq8EfX06SmnyCmQ9tLtnY3L44tXXjk8+eTcu/6p0YUsOqQIqFV1QRozyHPN2fl5QZ0/vrg8PdFBkglnpXIYYgh9U4mqaKRXH374aHT9ze3Ra7PzOujkfDprp/TatR9979vfTpJkWTRvfvFzUCkrRLI/79xlOhwA2h3e3YjJzfe/9+0HHzxv9/Qw3pZqXtbFy/sXYh1jnH94//ILX+jHebCe6nJZkRR4hJOewq7lvUDUSpZdJN0f+9KX/v57D0Y3Xtv4TMb1Bz/3M7/43/zRvz4/50EfR9Aj6IZ59PzRcc5dfVIJ1qX5KDo4bJaPnAdKG6i6KHeDfZx7/N69yaxcvb0bvH239/B0/tG774+G+5/98d9X4z24xS4h/K//4v/j2bNFw9rPXOv/4h/7V5J3vvzkwYP/8u/8W3/tP/7f/M3f+gfvvPF5s7JY9pxfNsrs7h7c++b93uZmL+s3U/3ajTv/6Dd+t9HVtbuRPNGLswuwC1++fLA7hrKZKv0hj0l8eHc+wWFYtWAFYBz3uQ2vlK3dvT54cu+ymJ2w/jAIVZgG89XE0gaigQehMKCarY0sGqm2d4YsDuqqgHQzigZG1M6a3d1NJVfeZj/y47/w4sk5ZrSXK4RnXInJ9Nx4+OxkkY3GO5thb3RzEF+T64VSFXFi9vwsho0NMA/QeBBxkPbTQVF8+vzx1xIGLh7VURJS7ueXumnk9t4g7WcdsFnkoHHUwZP7VXVKXz6ufkABRkh2OggDCBFhgdOOENK2CnhrnUOQAExbrfI489YxCgDwbV1QgrVsI0rqqtZCbYzGrZFhHDFH2qYqV+c89ki7PNlgRGsnZScJZ1pUF5NJ25TJ1mg0GpaFIEFgnV3MJ12zctYgFGjrsPfOdHmUQ88ruSZUa6kZDwkjQPlqtVC8SrKeVCXQTPhWOdgIpSVua0kUnD1+LquzkBMLfduYsmoJIWkWN8rpVgFIpVGMk3R7myQDF8RhP+MeamGjOBFQ1KuSRHRjmEIHO2qs91mvp20xShNZ6/L82YPF45D2+yy/+s7V3mjz2bOF7NRydfkjX3ztvW++9+hxsbG7FdKBqXwc99LB8L333hvvbepaEI+LanH7tcPFXEOH+binAmLZD5xdikXRfF33RsPbd2+JphlvpJwRSs14d6teFE4LAoLx9WFvMC4a8vj9exhnlGAe+PV0xiKSjuOdq68vHz69PD/du7VVzddhGHRCMsi8B3EvWl/Oy04FUdQb9kWnjfZVUzNOYBSGw/FI6brB29t9521zMRts9i2BAAIPHMKUUuwcBw7LqqSEhWlYlst8vCd8YG2jl0sIAyEERm0eR7IxjEEWklHSCwMshVxePIc+4BDOV7MPPmo6UWsBszwfJb318fOjB5fU4sePPxr3e+vp6tf/3t+4srljLUlG18e47brWrS7zEHBOLk4uSqdfefXmk1L0b+w33UooiQjev30IMaRZ2B/z73z7N29+9vNv3jlcTVcXJ0cfffI028iuv7JPvPvOt7813huaziLoXn/jummF/ee9dXUtQhI6a63CSZxZV4myDiilEa7n1emTIs5cFm+KPBGuW6/mCCQ0joEG1DovJQNEFBVnWYBxZ21dNdp3HoO2q5zVFMVp1md5H1PfH/Sh94tFnUAcEI0wW62X0lqhFIGoWHYgJEHEgPdb4z4nULX13LoeckmUSEdgFgeha5bL2fRFlO7FOzv1em29QxqGLK6bCnHSy7cq0QHoZxcraDSltGurw9s36/V8OZ81zfy1V29GQawNJJZ3VVEtF1tXXvNYYguA9UDbQS/nBD87eeYIi1NEeCqt7vdSCLATypvGSRPwUBlvjQMeiq6UuhJtneY5hgBB40HLCJSqGyQwxEFnGkxgp4vBYKyU1wbUbbWo5uVqnYSYO3T88si2a0ojxhOAuXakM7AxVrYtw4QFYd7LT2eXa6nD3uDOK2/34uhiUkxOTpFDkLCAYt+qWs0hjcKwL5XsOq2thM5ladRWa1FXXVcu6qo/HAZheD473dwYTOei66DR2jsYs1DZpiSXjPgAeA8xDagFAAIIGDIGxFFojIizUAobMoq8iZOQpbFFdHqxHAwGk0UesFEYX3328JNer394cyvv8/MXT5pWHm5ud8YrKAkPGeVXwt58UbTFSkodGmWMclojxLM4Ntos22k+2mikscB4IKQCDHGepovpzEnXP8iNBhSbXkYdNlkoDVoz7vWyWL8sVhcXxFmgzLJYLk0x3Bgxx62S2inVtZt5ADtpOtYof3Q0SRiLwpTTaLA/+AEFaUI++/Y1Z/1GzA4yjvf7RlaiS5pOd5XuJSgl+cl0leyF2qPGig8fnXlEppOLtZRpiL2nV7c2zh9cCGC7qkrjePfglfsPH03Pno0YHHJdLlbE6PXZ6e7O7tb+/ocPjpeF+s2vfHRzt98sTmmWvnj6YrQ13s8G8bgvYfT8w8QKmfRZvsVLb6e1Ho7Gg9jpznXO+rJUJ8V24AY55B5/+M1PWD8N+8F61q5PpihUaYTy4Xhd6/29G7pSs8mTAfZ5gD5dXI7HmbF2OZdRmJxfnNSyeePG2+ezWmg/5LxqO8wCoTSNwGw9oyQJk9F0qWS9HgyivY2+ejZjfOAkPG1agBEjlGgPjQshi3CMEa3MwiFnEQxogJwnBDurA+q9oTFn737z3XQzbps2GuQxIicvzhHysi4qJ8r1YtjfIYiRgBPCtVbaWYwQcABSHqd4NBzUusV6FUqhZ+sXn77vDIhY7/LFMu5t5GlSAtkL87IpAPJtWTqMsPNOmdMXR0YpVYGD3VdmlxMxr5PBpgd2XS2uHETTpfLaNY02FnRWWy44BrattBEsjstCd1UndAsD5Djx3kEMAQa6k0Z1iCCMMQRKKcV5SjBgAYLWR9Qr7zz0hGKGEbQAed2ZzmOY8gSo1nkNMQQOAAssUN46YKy22jgLoUcIEhK1rUUAe+8hQg5A4LwxChDIaGCBhQ4AB7RUzhAhtVQQAdaWoo1lNgiiQClljLLQiNDpLAoZ0h4DKYAxLghxFBMLURoFASO67v4FqejKlb2Tx6dszBBsswGWVidRImRDAYYIDaOdXtYYqAa9XlmpIEr60TYNmFI8jMhw2PcUTo7O+4f9arbwuuoPhgzCjSwITN6upadm+9pYWCbWl8ZJVXUhix3TxrfCc+c8aIyuJp1ScZwOBv3QQ7A5fPDsk6vXD1RrhVAeNgGiq/NC6cbadpil58eLYHSgLvTNG9mqOLMepYOelnrZLOpurp2Zr6fOmtl01h/euXP18NOn9wACozSbt2W2kRMbLbFKtHnyYq20zYdMrqvN3e2N4Va1ztt1wZNmXU2YToiFt95888WTp/tbZKs3XkyOIKAxCKflGYGKIDg+2JzfO4Vd9PR3z18//PyXv/S5F3q6Mcbd5Ys2LMl+b6XlxfzyJ+5+7u//vf/i9btfBnBzlN+9sdN8+uLxf/4f/O+RBpfPFy8mL167/eqN4ebZy6nGZnNAlLvM+/TZ0yd7e3uM5kUl4kGYJIFzzNkEYnO51qYDW1uvBMkgG46eHj2i0C2m5x7wAMVB2jvce3vd0K6bXZ5JDdo8NMZZD6i3rcE+zvqtqKKYVQ1Yd+yd19948fQ9BJ1lzgGAtA/oRhhdi/ODrc1edfnpxn5Oru4cf/Xeel5aCYcHfaHsRKx2DsiXv/SjVC+mk8XGeGAQbkVNsH5ZTJ3lrXRC+QDLJEn2t0NIQkDDiChAPI/yrgMcBULaulEHOz1L9Hx1vjvezMc7j+99c2d3eP7iKI3y/ji17ZIC3haLfjIEGEQxk7oLSAfcZVEUmLl6eemFCGPqnd0ZmLZ+7mSXRDGAuOhklscw6W/sv37ZVN4zCsU45dGg//JUGOiDf26xC6KohRIjaIy9XKwhgARjiDxlmEaJsQ5oSyijjDonmk5JbZ2zwDvVSCFlwEJMsLQmCELKGETOa6+1McBQjyEClFAEEEDIGqGMwYgwRgHw1mmEvDE2YEwbA6GmCGmjEfQOeWu1AQ5DILXS0CIMlZQAkIBRABDwEDrndNM6E0UBowkE2HhrnI+DlPNoMV91QjDyg0g6RRCOBikmSdea1bK2qkYQeuA9As4AZZ1yHUTAOAIcAsZFNKQAr+rKaNjILgg5JgRBKFXrMPPOQwSVUtpb4F3AiXEWWcgIRhAWq7WUGhFMOKGIUEKFFMB7D7xSylnvkFRaY8gggRQT77zWdt4tMaUBY955qTT02BqjnI8CDJCFADMSOI+Qc8BC4CBEkOHYe6iUcdZ7iBopQhZCAJ22CpQBocY44xzGABEfIsAJB8ArIR0AddkRivu9QUw5QM4DX5fCOwcIEUIahiHASAPrHCX8B2ZPDRxB1AMAkUcYAQARcAECPMusNxhiQKBSbj1vpdaUMcYQxkA7X3dSK+WMUVpTgjzE1mIHvNKOMhCFAbIeYQIhbEWltYAeGm0RRt45CAENqFIKY8gJhhBY6FZVBRw0XjvglXXYA6Mt9BB6pLX2wDNKMQIIAGst9I4y3E8Tba2QqihrbyzBOAgCiLDu5A8oOHvW9kxU1l0QNK34CIbbv++nf2T14YnGk+Fg4+T5Kaa6vqyhCpJw6GB0WtVx7NIU9tOYqvl/+ef+wXfeK4Afmrbeueb+5//Hn/r+849treOU9dKxqtp2vTp69mt7139/PIiV4Zsbg+L0vemz6fu/8SGlSd6HlIXTl7ITJOBRWwhlyqxvGQf68Tni0e6P3qn6dmWE1tJhQ+IAGtWqBiAdb2zP5peHX7i7OpqLVkMjiYfAwfmLFR5FUc4++/bNh8cXT6dHP/9zv7dpn1E0xZL9hT/3Hw5He6/fPTg7eTZb69evvbI4fvHy2fG117Kf+iN7UE1pwoVcb91Mf/b2l0+fPjs5nvY3VFusZecy2IOSMTS0ovbzRS+JtBdsY2iCkKZE5B23cv/a4Gv/bL2ao9qpX/rlv/Vv/uk/+Ku/9d/+13/l3//KNz767l//2nyFs0HYEXgxK0IFVsFs72YWh2FMSVOWrpbF0SkPyXC02wlViYVYIA8ihsHVfr84rckgev58qSP8xT/0ud/93vmv/NI/+j/82T/yc5/7zN//G3/3xdHR7RsHlYJ/4o/87GZ2893fmbl2+Bv/1YPLj7nTeO8nDj1qpPB7hxpQU1Xt7/ncjUnVhkQFoVnP2vWzy2wjnD+8yLc5F+DlN58cfLZ37YsH89k5RRjZCY+ipOfSUb9aLU7Onof+0MbJW2+++b3/5u/3+wBFB7T/alNMp/WpinW6tzk7uThvmjDIrSRWmiQCQpSyKznsECQUOe0u2rbz1JtShJA8ff4As15yZWNzRF8+eFGulkZ0MOF5wgbjw/LFpZlDbya+O8/6G7AXFys5jHZWxXI+qdI4jHy4ms6TjXz7YPPlg6fJMCYEnTxeOMgRSRbTqheETtYo8rdvbhx9ulgVbHYqt/ojAOYAAA8c5wRTmFCulKecS2UwxVI5iCgOItWsha5pizgm3iMHgFWGQJak0bqe9IZXZdmtigVkKCbUWu2JwByyOESdPF8cDwdxyrJejzpr1qKcLavTQs70ouievnr9jjdCalk3KwxxkuQeAalrQA1BlBIlrYbAURRJuyYUcIp005iqk1XLeSabkmDWCg0CkiVclV03ryPKTp5P93ajuukwoxBQzGJv9bp10tksSTQNR7euLhsVbAwNJMq4tbSglQQh0VkpbZTFHlipDUZEa4kxkKslo0xZ57y+ttM/OXn26dGR0Xajz975zJvLyYqwMBxtn0wW0FPTaT7O143e2Nw4PT4vH38stCHTIDao6RoNtFjVCIAg65VKaohs00YBu3PrZjmdQaBIgK7t8eWyTXtsvdbWocW8klUnhY5c7l2ymBsH40G2fXZ8XLTyJ774xrJ+HqUB8bhtW2/sTm/77FndNuD0ZJaMhkxACG0jOu8Ms44AKuqORymkgKYZS1KSDQjiHk0B8GXXBojlWdSUHYlDiHBRr4Io1Q6uy+mVzX0WUiCc9iZKe94io0EjG8QIY6HDmmJigDMBDHjYdm0rK8K4Fg01LXNGd9C2etGsb7/91vHlNN3auPzkAcfg7q23XxwdEe8HvcE4s56KZGe/KcWjZxfry1We0GQYQU0MIsaCcl42ZwtRl6vFrN+PvXaV1mmcS2XaVmGe7928+xf/wn/xb/8b/4uvfeNrnTNhBPsxPDt94oDvD/oxDlBEZadWy3YzYeez2Q/eAoZRnPU60UKPIk6UQDqgpbGyWYuqEXUbhfmjT19++nB999Ytw1RJRJpHvYQ7IYabPQwkbDVUbSP0Yi5mxRoHYFm3aW/ECecs2jvc7ZBjSTS7bKySjCIWMauksJJHkQcecLxz89A+LzkKjQNF3SWuV0mlpbyUnbBoPIQsJDSItTdCWGTBYDc1MVGGKmEIDXRXU0bDkENALKJS+wSlVSskdLqGGDkKA7HqKm++Kx71w/jK1Suc0ul5WZS2qKoowpRwaC3xXLVeKEnwQDnPcABAULc6wUDrTtcVJcQHwBpotfQeOG21LBmniPU9YloIJQSLSZT0q2XtkVWdkFrkWQ9T5OUadH65LJS1gFATcERkgKBsGwxgHIRxyLQ0GFthJWi7EABrfNnqdVc1KB9duxIn8UolrdA0iiPiZLU0UO9sHy5nhbEw4AGLAsaY8VYWdTmfTk5b0XSEY4RxhCOr2LIqx/0RYfl0toxoShlUXgccQUjq1dw09WCce0AdIVJqSyMeJNQSI1UtWmeTkFDvRBYyKSWmTMsugu21V3Zv3tg9fjx9/uzhzRv51Tsb0AofwK39Lack0w1ACHpdr1eeMY8Yh46mqWXOe9C2wlvgTAM5VtpY6YCz2ndVU5MYOyW9Femwnw3C7vjlywfnCLrrVw5gWayWlwdb6eXq4zWgQUCCiC+PCw+dxqr1XeE87gRQrdTackSY391OsjAUiE8v6+XM1b65eTNJKB5myQ8o2NnbRLL7zm9//+qdqwmz84vng15y7cbhYiVkLyOqu7isMcFGqDhh5xdnjqTjfg6hbaSVTbss1XfEh7ujpHUmyIMR5dPpM566g9FBe/Ty9PGT7SuD5TRzHjJiwhELmMWIX5xe7m7mb33xMy9PLzoji1pC1rL1aZT3X39r/3vffbh3uLu1lfX7fjwIceEp9w45V6pxL525qZZ6bYO71zY/+M1vmBQZubq2s9XrZSRQcRiPB+Pl4t5ienT91l2vBcns6eTBF3/vT548eKLr9vxyunt7Px+F1s7P5i9AEI0H/MH9T5LB0HJMYooiSguEGfdexhFDPHfGlCultcsD/85nP3f+3W974601wHnoHMGmk7OARwBYghzCHnoXQAIYUtKKRR1T8vJI/oGf/de/e/T4vYfHfRxcvTKkIabEiq7DLOQoA9Yb1DoCvcNl2VyenW7sbmKOAbY4gvt3rnzvvQ85doPx8Nu/9c+kcnu3dp1TCerDAK+qSS90xfqE2jxgRCpBGQ3S+PximW3sJqNNryuh67peEzeaXSwW01mUZVkU6G6htUnTJKJZtZoEDiDTBYApIbUHQmjjCYNmlOf9qwcn57PZ/CKhyADMGVuVS49xlPSsRRBFyLeyrh0miMUUMYmsljXHMQHQe+WAcxAorwAUxgjksDHcGscQIgxL0SijDIBhP5JVZzSGwFJoOMfQGKOUh846CSE1RiBvIQAEYAQhphRii6kPkuD0tIizAPKobbQRFgFgtbx5bZiEVrUdozgIQuf8Yl7mAS1bTT22GjT/I6koitne1e2mVqenJ3euHz57eWxwUqzmSZZsDTeNKysLCA4LySDl88V5nICtXm6UNtrXbWVFi7FdV8V4dwxAdHa+2o1YREHZysn5Srv02YujfMgpdiEBBNHxxmC2eIEx6Q+3y3kFALRW9RLWzyPVapZFEMe75G1MbFF1qumAzyd1PVlW6ZBDwKqiTTZ2tm+/WrH5J+99dbjBhTEYuzTPVGdQOLx5ffP+R18PQxIMkxYXdbviYaSNU0qdHD/f3bsprJCAS0sjF3pFLs+hqGwWbG3I3epy/vDiN1/90fydt948evjcQ45Ir78xKqqj+aIQMGBJ/HD5YmMQOuKU9toUt64d+Pb6p2WZR4NIVeHqKMzx/RfvV8Dfvrb3k+987s/9+p//6c99uZ+OVTb4n/1rf/L42fNOl6aefuELn3/2+GgN2WWhN/Z6MWd+LSrRyWKuesHFeTsY7ncCZRqycDvgmTKlxTocxHXd7u7eVctLS1Ells15xWhsfZhu573xYHZ64VqT9bMe28Fr37nyfHaGtzLTgiiNst5G3dXV8jgKwn6vX9QOwvTldNYYgAj2CN64dnvy5Fl/eCfmA9PO2penxflDlJNpeXF5cjre3DE6MDoqq4JkaZ6k5eTYah0NtpfrwpmqruZdqzY3h7OpOhhEWWpR56KYC1khqFsU9ke7Wq2rbt2ULY5iTJPR1o40qzTHq+n84vjpndtvvXrjjbY5TxB/+fR4fHi9WS+ytK90PVt5LQHBnmCkjLS+W89PIKRxEDcWSQQj7q11y0WBIeknLOTRVDQ7o17KwkbXNI2tJ7PLZhAPWwuCJDDaQPfDG5pSyhgrnYuTCDqPCeaUFnVLOCZBWLfKKLNYrNJeSpwRSiltCKNWaQJQEEZREFutojyFCEkjgfcauFp2hBGOiPcWQWy09ggxRgmmHmMEvOg6hH3IvfXeSwkg9EJp45Uy3gPrgXOe8tiKzjqnnaEOGyMCmkFIlJNK1hRC4AxhxHlqPXBGWg85Dx0ASjceAEIxIITwgHLelCUDXClPSdjvIYKdaDqplVCCEwoAtkoRhjEiEFGlNcO0Fp22VlsbBDwO0k5WlCBGE+WcliKgAaVUqY5Aap1vtWYUAqVR51rrhHWY0ihMvdIYUMSgNtp5rz3gjDvojEHKeGcVZMha6zUwVkHHjNHAOco4I1R47aElJDJWxGG2XE2tg3Ge1UrHUQysbbW0xmAElLLCyijOIHAIAamllB56JLXyzjPGtLHOeu8khKBtGwtdmKQAQMiIp7BpJFQAYIKo19ZBghhECFKPLESgUcJorX8QFcSkM81G0g951MpOCa2MBQAFAdbaNFILo2trnbNxGASMOG/Wq8IBRzmBAlKC0zTsus46b51NksBITQD23gLgnHMIeIYJAghBrJ2VygAPEMHGOR4wr4zV1jnkIdLGGm+DiAPrnPU8oE47hDAFAHgXMEYJhsBRighGCCGllFDaOcsZM8QlAWultNIS+sOWFuBixaJXPrfTLha6U6vjp5tJtB1EQor+yHcaVhUwWl493Li2u3vZTrps3E1KCDSW69/8lW+9+0FtwChAIMz5v/Nn/vAnJx+0Ta06wQZ9wPxod9A0pYkXpXi+Xubjjd3Zp/e/9Xd/E5ABJdvnp/rD9+vGNdbagAdb47wtXNdKHEDKUFF5gXDU602rWZpmyEnibURyH0TQkxaAZS1aP7p6+2C498nLB2dAW+A8cC4fBEmKs4SJbvHm6/0H733lv3vyvRwDRjrIcNnWixf3D27dhZzuXNl4/73ngz4go7Sz7dNPz/f3CcmdQ+psdjEcb/Y2rl48vejauljbXtpT1sm1NH5WN83ylIdElWVx4+6t1tN5pTHOOwUWjIV3wm4iAEdxSp49fvb1X374l/P/7PV3dqZHiqeRtLAyYGM77ZN6c2zH2xhh2EsSYarFchZZgzAFjiQb+93UWuihJwRJCGypVBAlup9Op5fDSwnm+EYw/s7f/n/O6ubB0eSLf/gX3v0Hv7a1ddX1Xr+E+Ts/9+aT775Xnk2+8FNvJPD6+dNlrTuX9BHhh5uD9fzTL//orX/y9fvztfjw3mNTVHfeGDRVKQw+uPK5z/7Lv/iVv/P3cLT98Tc+sXi9cwC74mHci1MOsSJOuDg85PCzVbl9en8Z9++q+dNisdhOQTE7n5qnamBAptt2iRwIeZP3bk/OujAh87a4fv2z+rKt6lVAi3C3M7M23dpQqC3PXiJ3RsJ88rvt7xSFdmJzI94+7EXDrHiwOv/o00T0sivD6ePT3gD4qM4287qBAAQJv3Z2dh9iMD7I52Ia4kVZFtEg151yGjAevHxZBKl945XN6aMjpIGG6fuX9vnTAAMsLsT41g8Hkj0AhNKyaqIoIjhEOPSu7NoaEIoJlkpAD5MgB4BoA4UWUcLybDBfL0MYjAdbkBLHkbXIeiil0Z3iPGBpumoKZVHMI6c1Q05VTdd06/nSW9PrJcfrBoJiIzxPIzYaDYLR5ny9DEMiVDkI0igYzC/PamQ9iTgOqrbChNR1STzkOPDIAYSFNhATCJ3XihBlVqt2UXEcAk9YGNWeCkTG4/F6sfBxsjWIjyZTzdHm9auOpzbgEQiMdt4q7Nyg37dELFdLrBiELgoDY2xtOuAhj1MHoRSeARJyH/YiYztKMEySYTp68vHH/eH5q2/ecAAsa2u0Onj9TY9DAH3kTQzF/Q++fWV/MNx/Zefwlafvf4AY7g/5Jx+/q7W6czfZ2t5xYsIDMF9eyHpBIGXeW++Onk4hxN4r5zEJmfdIKXHl+rjpjENUWxiG9jNffP2f/PI5xfa9e59kkPQHkLEUKTi6tvXp+dH2Tr9VbH627oxrFoskgONB7lKaxOPO6OHmxqpoeBjESU8D2jUmHfZhkr321uG7730T8aS2Qio0yLi1dpgOoFVOqlHWF7pTHkCGhZIMx1aakIYozBA1RbPSmGLoOou0lpzyJO1zHsquBBTvX78ym9TSlNsH27N5eTqf9wYDIerDw83Z+VldFFFAe/3hg4fP7ty5imncipZGCUj4/igvTl4EnCymi1EYBgN+fbyLnOpvbg6HyYvTkyzuAQeN0hwxSEOC0ejKrbJw33z322FA20pML9eLyfLycj7aHQ03B9sbo3XTjQB87e5r7/7Wr/VS9sO0gVA1KXnAAhIBZ4cbgx7YOFuslGyjftg5++DlvOr0N779Msz3r2O2sZ1TSEynoHcs4HUr6lWNMbLOSuUVEMggxuJ50cQsgAS1nZK2VUJQD71TxkGtNHCAxz3ZdEUxpywiLL7x+sC2zWpRkRivxQrRgXJA4eCyctPFlIbo6uGm05CwPB/s1tbpYoWAr8pZFA0oQUYZrXEUhCFjmLhwK+oPhutVUemVIU2wmfWt+eT9j0MYeKO2O4shc14laeiwAYQKK8J+0u9tLKvae42wGWZJLw9XZaWNB21DkEvyxANoPaqbGhISxWlVzCAm3nsP40576C1A0Fp3eTmVUggjrLM8iEVrlPN1p7GHylhMAMQEx/z6Qf/0+aPRYJPQhLAUUINVJYqF7WoEbJanxxfL43k93Lu5ff12FI9lWUNInFEOOGHK88m6aOqyKYI4d5JKz/s8Z5QTDzWymIPnj59nyXCY9ob94RysPSf9wxuYcUQwY9wouDXY1LbzQNOAdkYbQmvlha7jmCKsMAkYY0HAi3LGUIgIoekAUzVfrYQBtOiMVJDY+x8/7Ky/PDnLkmHAbFWttjYH6+VlP41KKSeXU8I59iBkIYIaI5nyQHpnaailQgR546wziGLgNHZ2fjGBQGchI95qD0vVJTLhPCTSNKt1WZX1bAUsnE+Ps95QWTRfXva3ho7AIJbnJzPJ5Xi8MS+chjCJ4uniMmZ8Z8yEmmabO+fnujoTyPUR6hAIwyA5fjb9AQUXL8/3DwdvvH279m5ZycNXXzk86C+PV8V6TTigIf/ag8e7e3tf/+DBm3f3rtzae++T81fefOvl4/sc2PFev3g0/fjJad0OPWM/srVzbbT1zQ8+vPOZVy4Ke1SVvRgPMcrzbNU0j47Pv7A7fu3VgbFdGu2vrHowXQbOht6uyiIexKu5jAzY3Oq9/dlXR1nUrldH7913DccWP5tPrtzY12V7cV6XZSM9nl1cOF1A7Bm2ygOhNWOYs3jRCMCWvcG20FYCYglZFY0D8ePH03olnLGs1zcsaI1kvN/5dv/G1eNHj1MWVZXSCrbICVlDzITSl7PCIYIhaFYtIQyCSFD2vbOnZ6LOGUcOxhSmUShUBxCy2FsPEhpJ11qhPHDAGatlwoM0ClaLxQcvH/xP/vCf+Mv/nz+fB/h4MoNKe2VeTquNjX0jyGVZbO+lqu7WL6eMwTxN0ghjzJwyUZjcf/9+iKOA4ouzSbYxnlxeWKXXVfv67duFEV50CJnLh8fQdIOkXxXt3ddvPzm76LQ+u1xcvbZdL9sEu4PbV2sHcZaN96/P1tV6ooSPNHXGEAK9crhuGkrCVVFhxJ2qOiF5HBMITk+f0cHm7vbVi4tzVa2JtsD5VmuIkbbeY96LMyCdQw5R2ojaAYJDFrJIK+ugMdpi4BFS0lhgBKUUAtbWChPooEFWOGNqJUjAGAES2roVV9IYegmAddYhjB3CwGNlAAYGOAmcJ4h7gI3ziJBa1K23cZ5hFmoPJvNVDANsrfHLQbCthWCYgyBSpsWIhnHMdZMGWHdCAg3+eTHFD6Uiobp0FAvjQdu+ODu+emXj5PxSKV2W6zjMatn0XEjj+OXlizRMoyxIMtaJ0gK7ubFhte9tjqvJSOvWSEV7vWQz74/y5XS2Xre9/dHg9iEoV9Wy6gU+igMFg8nFEvoMB2MhEoB8HNAYu2pdd4UzLjpfFXEKwqzHQ9O12d6V3WItqBHbVwcQS9HNislZmG1OX5wEqLd3ex+acvJsTlBIAEY0Mz7tmmRj/CoGq1C1WW+kO5nE43SwX1+s9/c/l/V3lstPB9vR7uHh7OlsebmOh5uHuyNJUtgV453w4OrdKBGffuMbi8V8PL6+rorpZHlwEGHG4h5lMcLGpeN8OpsqgXkJwnj4qCjufPmzP/3WO1//lb80zviLex/ff3Y6fvOt5RR//N5suHH94Nrrs7/1j9PRy4vymYkClARXbx/c/LEfezmTH370yfSs2T2QF+4SIoRweHne3TzoEzIMWO/i8kmWdGF+BwtZLF/4OGxNSWiy2d+YFJPFctWUzdVrd8P8wPLt8/PT50fPspxWXYHK595Py3qVp1t3dt8Q3coT0jSmFL6rHdZREnjRLIebd7BRztRlW1uAGM4uplFH3rxz94/O5u+FdrmYT7LNrJotzo7XGPNa1lrZuRwevv66EgwgwylorO2sNd7NTk+8Ac4FII+fHZ/uHmwspguOLNUWegS896KYrZSSBiAYRqHzgVMxUHS1bLOeGSTRyeniE/l8a3OQb1zdJeF37n8wJluvv3Hl5Gze3x5SCMpaqNajkEkXMEoiCh2gZSPO1mBjd7u/maHuNA7i1Wrd6/WKWmnpB9FeLUonRa2QgBgztqw87XTXVF1dDHrpDyhYryvGIkyotcAoq1thnIaQYgB9LZS1RugsTryxjVQAWMZolmZ1WaRJiiiU0shWYRoAZ1SnrbGEkoAx42zTlhhiignBBGIcZ1xJW7U152EQBtYpYz3AGFGutSOAKmm98x5AYICxDnjjPXLOYgCcw95xqZ2yjfXGOo8oYZQ5aKuuZoRjDCMS5lFkvGxkmyVZVTXGw6ZrjDMEo7oTUpogQhEnaRRjgJxA1lNjrfXOGccB9NBabykCnVDaaYgoxjgIY+0twMB77wFGAHIaWAel8ZQlGELkNYK001pbS5DXAGqMoANl2UKrna8B8pQSTsJ+b3tdLa0WznqPbcA4gtwBjTH2AAIIKeXOaGeV9BYhiDwq6ioKybKcYUwQwVVTYcp012AAIEZJmgkhStGWTTVGjDKvjbLGYEob0XngEOQERcqV2lgIvNTKE8R5gEMaMKaaruqUNYYzpqzBCEOMCYAIIC0kAN5BWCnlgUUYYoiNQwQmogNltUYIAIS8B87bzjoh9bJpgpiHCYuCAHpQigo4Gwa0lsJKFTCKOGnaumxaYD2mxBqHPNBSUYIDzr1zHoCyarQ1BgDnvVUaQiC0bpXCCEIIGUOIUAuxQ5pCijG2zmAMGCUWWWsshMAjhAkmBBljgiCIw8AZLaUE1lBKCKHYOUw5AV4D5dwPF9DWkwb24WC0N79ctZd2GA9evz3+1lfvzZzbfHO3rqaiaIIQyrI6xUdonNw63Hr8vdPT88nRyeIf/+pEoxFBUOvq9/9P3zAJWDV+kOTxKOmcXS0vbLDcvz6gfQDqp2l8x7Fwc3RwsTo4X2hthFg6wsOy0R5gacD08mj3SrBxN904iHErWlkblgtGMHLKd0gbp72zAZaB7KSLcJoORhts+uAC+3rvIDl/Xp83bRJku+MMAdBWXWdDJEts5OXpheyHG+NwerKMBwFn6HvfvIeR5wN3/ExdhqBUNRuQD7+7sDK6lqDBXp/nzDqdDfNXyBsvHz2xBC/XTSd9MvLXr/Rpf5CkqGvt6cOSsNm1N9/mIWyhms9Wa9fQK7C0lYPwXIiv3j/dPBy8/6jM8/D4ySwfDO/fW6ogQJHYfi3Ocvvy6CLfSi3x/VtXUu0YwBcvH7OiJlT0MljWqyvXrz15soBx/7XPHzx0oB7kadb/7a99WlQ+6hPmLnwh3tjqf+Vv/UPQgKI6+8f/4X8+2N65+YXPnHzwQF1c/sJP33B1cfX6zmd/5se+/vWPBxAePXqpOrOYL65f351M24uzNoLk+XHFsdw+2PrWV+5vXHnx81/+d3sbkfXlB8++7Zvv9gcLz8Ty/CQKWRpcGfV/HInPSoWImjF6dVVOwt6hMpt1rSkLy1IXs6BP+kA1at6V4XEvSOSyhYirKm9rlfc3muqe6Ark02Y5N203nzVTaZUpIYggYk1rWYtZoWSwCmtyLXlrN9vNTAD3rhbNk0cffrLcmS0bcHj91c//+M+2gpfVo+/89ocb414xeVHNljQOaBBMX8yU5Ugz1oavjd95etLRmH/8oHpR2MCPD3dZ73W+Es3/r6vIQAMg9g5bB2rdhGGo2o4jQjB1xhESGacd8HHIEhIoLVdVd17WA8c9xNavk5gjz7SH3qEwjJ2nEMHtvf78dJEPMuNrWde21GEWBVE8eb5uGGwNOlu3kb3YTKn1HaV9B71pFAIMQnR0uk7D1AMVUdo2pZFl5vM0HJfFucOA8bDp1OxiOhjmQigMKPZGVZZYlvXH6ebuybPHbdNCT+qiUYh1ng/DQbAZ9JKoizMpDGw1tggABJThxKu2ddoZjYpFzUOkKU17g6Y7RZCQiFPGC1VH8WBzmD178r4H3Ss/esc/XVmDtnZ/jHVlUyxWi2U42LBKcx6EcXi5XgGjDTE/9nt/XxLq6cIcnR+HG8NWuQrSK299qZhe/MP/4Z+88/k3NjY2t3bHa7Gspov9/e1lJZAhUigPbGCMZnx7ayDX5dOnz1997dZwawOko9WLCyrbh8vzbBQdDLfXVR1os7W39/6H986mH8YBHQx6jVAYuCTCvWHuke3l8Wpy6ZyMIo4AraXFPKIsktLQIKARy7L+o+Xq9FsvonDc749qyC7PZrGCASOYYKNbhiEGRqhaCsUpxYg7QGmQc8SVFsp2WZYuGlu3XcyDOBw0UsRhZL2mLIUgBDRi3Fg0j3uZ6GTXdFaTypZREmkLEIqCPPYEXbl2YLS/XFzGJEYRoSzc2d6Y3v9exwBFcHVZdgRt7vfvf/rJtc07zJIIYmBUHDHOQ4iZc0DLpsMo68Wda0gYDKK+BqSqmyRPV7MiTvnk0lEeB8CfPvnw9p2bQZqCrz0AAEDkm7IMwk2ICIAeeMsYGQ4G56fnl7NmUbalaKpFu703cEGnPZ1X5dXxjqwaIDTBQBrbaYWtX63KOE16G6lxWLvMdDVFcL1cyWZFGVbCKqnCNOmPrgTcVaUBJiCO9IOIJ5mDpbMVZZ4TSQm+fu3Vo2cnWjmjfIKDqixcBXV3fuv2JuZoKeyVvYgpMZ0vZLHWtRztH7A4ssoqZTB2CGiS8F44sMRtUWccL1czVaqt8dbG9at3DrY5RI2sL+bPb135nEeuU4oQEoRRByzO4nQQyLrGULddxQjrZam00ipFaIwR8hBAAISSC7O0yhEacMK0pDsbPduc1WWnAVAGGgsAgt4rqzEGTCuhrccQ8SDoOtHMV2kCFss2572c9BxiQBvkpa7L4vIY0qAW+nQ2LRQKe6+MD99Mo8BoHLGAhhFjGJjGaaCMQJ5cHBeYtFeuvsZoapU3plvPzqxrG6FHO7sc0uEg6ZTOB/locytL08mq9BANhn1mEQJWFGUQx87S8eZOV3fdqmS9iIewH+Z1I4lZOmp6AxRK3FVrYjqIcBjYAAfIwsq1ABspXNuJ8ThgAKwv57Jew67IEnT69LSt7bif0DCkwGOMrJZAWyM19B5jaLV21ltIkt5IWQ2A88ZJqynRIUe2bAJGNYbOEILS0dae0ZbHseiEAWrrcLetJQQ+iLO2lcYb451BALLRWgGEeKNc1VZJFA0TTH29mQfXd/OXL+YJSdpS7V9/9Y133r58+THwPzwhp5FPOQZ5eHm+XC+bqZ5+/+n5Fu9NCgVtzYIoyLOikmVDBouGckzi8N6Te+18PRrnFLtRj3WKNh3b71+/OFbF5TPGg6cfve/z0fDqVj8ORHlhPb1+60bxyckH759If7mx1c97zAt0//7k2ih5/cfuPnj8lMa0Uk4UDVZSaVQs1+M+SVHIIJ0t1sKFG+Odxw8fKYk6bwA09bJdgZxLYVS7+cob+1d3Tk9mvc1NMz2rjDk4vLW6vLi8vETOewRG/Ux6m+5tAWMnL56365JkyeW62jzY0y4ZbF5ZPD6K00FAZZSE67XFPIEunM5LFsO8lyhiw4CcPrmYPr8AW6M+T1TbWNvRgAIAEHCIkEJ0jTYWxjQcEyTbap0EQcpwJ6TXstfLnj/8/p9/9L0ezxmLrPMWehbHfRJbS+rOAQet2q5Wx5DnyoCtgy2nG29oFPOmUV4I3qdCN03RHj0/DzHsmhqR5IOPTzcPhtPTM1F1Sdrb39pazxcaopPpeV0WScIAkCnD80b0hhllvFgt9vavTS7nHmFDoKEAUMZ5VK6LOEiyJETQSWWdBwBYJbSDLWZYW/+tb30lS7K9QcYjUUwbYQCjoFbSauOhmLdVGFLvjNMWI+KRzznHCJE4W62mMY2B1+umTHiMEFfOGwkpjT1QXac4x8CxkFFGaUo4DRyDzGgRJ4BSb4xB3nloAUBOO6sBxRQBqI1zHjpntbAEJaKCzrmL+XSMsjQJVGcgdBtRQFBrnRZSSqchQECLkBEoRUqwAIBwHEU5ALP/v1REgIlTokHAI3h8etZdtFC7NMuSODaeRAnMY4SxGvZCKDVxTrRC1o1uBfQuTbhXKIxdb9QPwrzokFKordvFfH06W7+xvd3qznqVZj0j63LV5IOxc4ExNOwf1OXUKkWYb9tKqoYSDqDaGHAjWlMuGYgimNVlFWfby9nJsp6O4xhB8ubnf7xtwGg0ePTxc62LOHG7V/ub483jZ8e9AQvIwLp0fOWN08fftRZMFzXDpG0XZdn1+zdVN1yuis1B+PDpBxfPvs9NFmfDXrpRLF5WVY1Mq+RaY7cx3ki2dm2eERqBtu6PUx7w4/MjHurQsTRO6tnZRj4olKA+cD66/uq1SOuv/L3/6oP3v/7lP/DZEKc7O9fOy3PbthTD3avX/+nv/urB1a0kS7LQC+36u8PcJb/6l//q5UVnOgM4P3jntere92EgRsNEiZbSkKcjb0HEBxggAtuqrarqZRztDLY2TdetJs+fHd0L4kGQpS5IVs1qnG7cOrxy9HyGzIplnDDbFHPvJQ90EPasFd61CJjm8gUhKQ/Qi7MnG+P9jdyt5yeGuijDsvUhIh7BbNy/LL5TtyexqzwVt+5ef/fkYVWcpszu7G0vV353eyekBMvCdl3SgwHmH374QUL63qDDg1dEoxfF+f5eToDIepvAMUYdDIHWcES5tso4ncQhhqBt9GAUx4hjk0C4lFYNxwMWqMniuU821GIFEC+WjcwGRgBodNutNjcOq446S8pVQyGlMEScKNVsjUIeeEz15GzRNI1HtFXQKERg/vj0Ms9YEst+wOdVE8XMELeYzgCE1pkgiH5ot2YsTELGmLZutSyEkQh76HS/l4ecLcuCRFQJ1bUtYySMmZK2LKqABd65spAAYetB10kIvbWOcAIhxIgY6RAiQRgQhlXXeQNd68Ig3tzY8EY7a73nRoNGtF1dRmG66gRDNOSJ0J3WQhulLUAAYYw98utqCSHGSAWEYuwxQV3bojiEAAaMh3GipNTeLtsKYo8Jl8Z5CJ2xFBOKSSvK45ML1dlbN2976Ix2COEkjhn1TdsiaAFBVhnrrdKSRyFG2HsCMQbe1+2s3xsFNjbWyK6FmHgAvPVSS8O88xZa6xBEDngPEWHe65DzAFNovFPAQ5vFSas6qYRUwhjjrQ+CEGDiIdROC9USyiACCCKltTPaGI0QwRBp4xGEzlmMiRRK+xYhBIC3AADrEcJGG6u1dzagTBvpnAMeUEYhgpQw44x1umzX1hqrNMM8CeKqraCH1BpdKWtcGFADEQQYIggc4jSs5NoDZ521wFedsMAiBHpRIpoWeAQhUgQroaM4gsZGAeu06iqphAoRxso74B1QDti2rikh2hgpJQTAGe28E0IpoSmhwAMttREy4Mw7J7Ux1tRSGm288xhjygjC2BhjAeA8sEZjghIee+ibThIEkAdWaYYRhNBZhyCUxiCMHQTOOWA8gQB755S0xiAIA84AQpQzqETbVkIZijAEP+wqun4wKi+a7/3a0xLaf+//9MfvffDy6OHjbDP71gcnf+Mf/M5+gPaHPCC4XnYQ4JCLJ5987cGzp0GcTsoKxTnvvFfi5h32J/7UG/cu7g2TCNUCQZL0N5SFCFNMm27yorz3dBQ3f/Ev/bU/+W/+wT/1n/yZ3/j68/e+/VsfXXzc54QQCJ3tpwHNweGt4J2fulOuF9On87c/f/WJsovFAgfMopB4iCCVykeYUqyVLpGuaMVOv/396rlSS317e/hn/8//6XvPZn/7v/srgwHc28/W1aLurJTSUb6QWiw9jhiljkObbPLt7b0bd9/++P5vDAYM5cm662xLHrzXvbzXbO5M9u/04zTkg0JfFLyBGQ1Wnbn7zm3YlECVWR4xSFnM7tx+55/9ynvrc/LWlz8fQT+AwAIoS4mlMh4rAI4mIsnpwxMzOT/jFm/meudwOFsLARCM+fXPHVzcn0wWq9aIK8PQKiNUtbm/JduqK7WzKkyj4/OJJ5FSwClx6+rV3/7+pL4oWWcb7T78zsnlZPjWZ29BnH+eH3hGR4PD1wQdDoNPjk9Daj/zmWuLeVmu1pPV4qsPP15P2jvcv3p3Nw4QR+nO9kC5+a2N7Sf3TjqNru5uZK3ZjeG3f+2XvpH8zn/0n/3fgQvfDP9Qs7bt9HfS/d1hcD0P+usOy0kOUFMugV4vQzCAySbf3u5QU2lZKEf3eoOdu93FfRBgSAEOc9JBY+peP1a+MMTUurDawM4TGusKeEtYrxcRu5ijZmGSmA0DSgo1SkfNMbqx/Zk3fvpPPvqbv8QyCXtQzvF8rtu60lDYSspZ8cpm9ryKV8VKhFUUJE6CyayiocIsGg+HQ6r2ejuTp0W3proGeuG2wuDhvZMrcW/v+shMfyiYegi00QwjThEEFFqEELEIt1omFFtntNYYUR5FmOnFdKat1dLuDPtR2ANWIMgZjiu/XqzWYRSnUdQ1HcJQFh4jtipaAGASDZXxK1EZRLNBuli3WhmPAByEV69trRbL4Ti/sn/jbH4KUKKVDmNJiLZWEuySOFAQUUCNUoQwzBiGGDooIbU4MW5RzpZhCvM48cis5xPlmpj5YZq3nVl3Ir2yn4Q9h2FEGYaOYEASbg1CzlktN4fDeT033q7rBQmiQTZYVvNFVZbSYE+1s9W8zAd5GKCinCxP70ccBRRPH532DZmXeqVNTBFf+uVlN2IyQP7yybPNfZ+lEWesW69Ep1gQImIYwlGI0x5thXIuvHXzR3zNNnaHSR7XnaFwNJ8e4YAsrf/Mqz9Wzy44Z20nRdudPX9m6wpTKKRaL8quUt6052fruBdxguu6U60BVn/3+x8U1dpK54K4bBz1Ns/YeGcDMmKEV43I0qxpUbaxMZtXjXK9jfGwtzW9mIeQwLauLo7Sfq+XXAOQOOa9txvjHoS6KlseBZwHreo8NDwNgecYYEoRCfuyVa1cOaghBMBDDiHlAQIOQe+ALaoKqi4JAgd8vVZK4DAclMsOAZQnmTe+rOrh5ujuaOfd979bexNAMk6yKGS9OAsYX63bMAkfv/vBelrEWa837teFzykcx+zYehrkF/NTC2FVLaMgjYJ+saq17oIAF6WsOzVb1Rjo/cO9OKEO8TiNLyezZbkOM/bGa9ePH70IWRj3e1m//wMKjNOyNapteZKwmBFMjQIZxaMwXQdsURc7u8Pni6pH4FZOVVt4O4QQcEo7qYuyNUh7iozRcZ4SGiIUEEBxMEz6qayXwBkIHGFsvV5YqY1WRSEo8wgZ2VQhSgPGKfHWg7ZUwBtCMMGsXS7zIHt0/ISTUOtTrbowDdc1Op53RgEagSsRW1ycF4ulMnYwCqQp4yDqpDJOjoYbneq0ME5VeYygATwIZbyRaN1/63awM8Kya1eFsuLGjVfShHkEpTA8jIWSEcIBDWxXY6gYAZTGHmJjNSIhpbm2QLSdVgpAUYvCE8JJQICDEA37GYXaWGesl8oCECvdRelQOQw9wYg7JSFwhAYsjTpjGQSiqE3AtvsbEBgPZdMp5TrbKMazdV10tVkt1Nbtt+P+HrASCutbGWURQTKNk2LetXW7t79/76NP8s3D0XDE0n6Q9qAGbVG4TkLo6uUqz0LT6GpVxL1calAtVwCB/mhogYMQmVIC6Md7Ow4h40ndKac5g7iaz/KoD4ikwFfriyDOAx5qacV6JVfOeBTGSdrLqkUdRgGhbllV1bLyYeCRtVIGQQC7dWucLNquMJoTn3uDEaHMO2GMNt4BbymkiEIptZQl41RhJo3p6k50XW8UCOFEW7MgjAajrmi6ogoo3bq6i4JoOV2LZsGIplxo70nHlovlat12HljGWm3asnIESeeQhIziaqk4inWUfvA75zt0fwHqZJDsj/rP7t/bHY/zgx+ekFfLOsTBYlXeurG7SpqPzhWO2f35ZZTglIQE4R+9vf/y5YJHlFD47MUpj5KcEyEUYLiSPuB8c7uv8T6NUtud94bZx0f3/6U/8JPf+OYnq3W9SeIfe+PK//Arv71hg608KGp/+9abv/Ubv3tta3eUDF4KvlqJs8ns7XeuN7Wo2mrzcMSD7NlsPlsXG+Odh09PrOT9Xtat7bvfeV9VRivPOJvPl3vbG2kaWtf0NsbxxqhT9urVw/ryjDoMLFlNZl0rAaLCgCiK9vd3Hj+bBr1oc6O3mh5HUUCDYLi/j+Po5PgyxKC3ffji6Vxb31wsQBDMzxdK1KoVUUhIoaplCZyClFoCTN0hYzCwGLGy6ExgvVGNEHHajwhtO4Gtd+3/l6r/eto1S8/7sJXXk9/8fjntvLt3T8/0pJ7BDBgACCAIkiKKIpgluWSbRZfJUrnsM5XL1IFsy1VysSw6lYuUKAEQiEAhAzODAQaY0NPd03Hn9OXw5ievvHzQzQP8AevwqvteV13372ozEttGGqOQ0x56BHFCWYYJJWA5K5RyFqPNgzSgtKyNckRJffj8vKmLpS8rKb7+1//+0fOPVFMMtjf3B/33/ujb5WLirqSvcNTtjddS0KqHz88d4zhZm9d5ylMAcVm2ytpltewMYyuBttKouWr21jauLxYnsm7Uorn4/vvTydwACwiW1td6JcPYa+CI77HUiEq2WkPlMHDWQaB046yHI8rtoijbnPUhtKYtLXBEG8UJs85oqwQKmlYZ57MoIch5qRALDdQBI95apTQHHqk2TCIMiXXQUaaEtpBAQIAH2CFl8VVRjaKoqWaEI0iYckoZ67SEmGGMgdUeYMBDjLDxyigDEAkYjQJmF4pR3BjJkLfOCaswVOtd7r12UBMnVeOBZcjBshTWsaZqFaa8Q2th/lyqSMt2cTa1hG5t7y7LCmGilQIOtS3aWe8F1EQImLpmTqfdrKkbCoK81VK45aPjNELdmAKv52ciDIFhBAj0/rOXg/WNN7+6T0zbj0I0zGBnrbycZDyKexuUGcjQSkvGMU+CF4/u04BsXLvuGwCk7Q/Q6eFFtay53bIoIIFY1GeEg9F6f2etd/Zisri46idpsXxx7+uvvXjAO2mQL2eXFwLIyDV8tBk/O68J6/XX9imcf3z/R/v7e6vVvD49WV5U3c3PWCk/fvc9zq20JurHDpar6bkpc4DkxubY2eT9+/ehMnXIl6Ya9TNRm/3Bzg/f+1E4ZHE3aGSp3AI420OK85Qk4Vw6Xyxub3a/+8E3B+t4817vGz/4Vhyxf/g3/tIv//Kf3bnz2uPDR3EQ3Ly7drloPvvVN9/64cNZG/1P/8Mv/eN/8k//j/+H/2t3owtrd3z/ed8pDFBb18v8UrX7JEk0sFkcdYdRUxwlg/HO9uaTp5fMWWsb5GzIwvH27WwwLktdnjzBMh9u371177XJ5Oj48mTQ9Tiiox6riyuHmrKZ6aoNuttrO684AVeLp3sHG0cnZ0m80SwmOoCYUyVgtxMD2t9a3zg9f5QGaDjcfPb4+UdvP8yLAnpHSXg0s5vj64PejfzqmW4XLENny8u5WHY7A1SZLM0asWraZnp1fvPuXqDqaT7vDzqQMumw4TTOkmp1booJVn1MMeJUm5UN6GADl7XOV2fK5CEukLDleRsAtJmM8zm4ihkPtnt9AmBjFTufyleuf2X7OhXV8enzD0pZGNhGIekkVi7PoPFR1ssLiUisraRhWEvETOBrKRfnynqsg3xVtlWRpt2twaZs9KcHaFIBIhFGi2WhrUuyThwEqm5l2yglRsNeZUR7VQAElHG+Vca4Mq/WRiOpKsZD65FzVlgT8oBxQjhDDi2XK8powAkhKOCMQFpUrVQeA4W9Y4Qo5SAgUigMeNNqTiEE3gEvbNOoGgNIKLLeA4+aRnmrjXWUwpBzZzSA2BtDIJbCBQFB3gNrIQRKGWcd44FWxpjWWwcJcsbNp8tGSK0w9KCqLtd2x41QHkAMIQaAMewRdt4YAKB1jEXGOe88hMA7Bx1IaMcZjIAFDlASIAyB9x55AjAGTKhWGUcZdh4gSJSw2ljuEEAGQWCccgSs2oogbKHHkGCKPYTWekygt85Zy1mAIILQUxJaoIX2ACOMKQQeAecdgBBZB43zmFCMsLGWslCoxlkLEWSUEAqxoQRj6Ownl25Wam2AcdZYDRAKKPeICCkr0SIEnDVKeNkIC7EBjmLknau9jmgoq1yY1noYhVEtGgUMdB5ZUJYV8M45yIMQIRhnoVGaELISoqhaYA2AIGDEKCO0oAgkaRwFgdbSaQWABc61TdUKKbXDGCOEKKbaakiI9tBYV7cthLBuFQIAE2StN41EAFjnPIQUQGeBBz5vGkaIsw44TxlhkFrvKSGtUM45hBCjDADvrUWEcoYJRkZr5x2h1CgLoNXOEQTbpgWIeAiS7FNi17Oz2d/9+b//3d/+HlWL3/sffs9U5NXt3V/+9m9ub25f1c2pMxsHca2K/Zu789ly0Fs7yRdHtszCwc2f/onffud3kNZ31vgbb7KHj77jOICoSyESjUNQBpzEAcs6gTRM6vI3fulsf+fOf/+vf/vvRujVe7fvfeHz/wa9NI7OT/Myt5qJjRuDv/q/+plv/9EffOlrr3YDUfuW8m7egma16K33gacIWa1zqDSGoJcQWpTnT87EbJIN+j/2l1/7nV/+fnH//tdee3P7n/2tD777zReLs5/7B19//KP7h0/m4+0s6CVKyWfPrlptP3e9/5M/+2OzWa+tt/Y2NpfVU96BAbS9YQpbUF7o8xxVywb56WgUYQjjLInTOO5G4+19Dps2P8MezM5V0hnuvfHV+JtXDz8+nOft9TvrB7fSFw9XqxertYAVKW4VCB0MCNI4WFF3b7dHK4EZy8+XJqa/+f382dnJ7e3OV3/8C2ePHh2+aHa2e9bKZSVZ2OP90cXldFUyiqkS9XxZVjUOel7lTuSOIoAV3OgN2pZ9/GSpiqtSqZ/9+Z+599qtnUbSBP7df/rX3/3md86+9+HRoay0fXlyUbkCKhek/fFA7mz1T5/P9OlSyWL/5ut/fPpnpbGaxn/rP/q5//N/8y8t0UAf/t6v/Yuf/0f/RABv1dbhx2U6eLM3+FLoEukwo4W1KulGdLx5+bxlnXGt3eX0QWc0YtbWSpizx15WFliWDYJOMr14lvUwgioMmgpOl1cLgE1Ih9iH7fwStG2KkrVe5+TRNJ8jOUd60nZZPP7cT29/8WcGJ4+f/tL/hSLKo1E06lzK6dlpc/YiH25wuZo3C/CFN+9ev379+Hy1uNAXZdHvx2t9DBFsiiYsHBA48xGI44Mvffl3f/UHLIghqnc35N4+87a5c2MTgBcAAM6p1NYbL4yEoOkno1kxo456gAmEACNM46YQtSiNrRj2BHP+SbASWaUMBqAsC2sdp4FVFkWu24mvpnMpVJU3LGBffvPzrZlure0/fffhJZolcXj6vftOG+Hh0WWRhlfba72Lq6lWolbF7rVXb1+/e3T8vKkm3oNVUYZJBghzxrWicN7GNC6KnAIW9jLAvGmg8j7GvFTCKjEcxtZCa63U9mhylY5304094ZFXEjtInCpXBYuCKO40Ze2tXDY5BB5DFEex1Q5iH7AYYHRydjbudhjzcZQia33bYmeE1Nl4vV5dNec5JWyU4jt3Dp4+OZTKb16/rnQJge+mzJQXWbjvtDONxpQ1pVXCY2JbLcMo8EqqRi1U3RmFyShtDNA4uP7ZL64NesfP3+tjJstSKhPwJM/nmITEg3xaddLk0ePnBzu7luJhh9VeEhYCpax1g1F6+OCKUZCE8dreGmUhonh+OadJPBilxaryVkUwqYToj9c0wDRO1nY3zqbLEHgaYcjM+kb38vC8uz2E2UhCoEW9OJxxpBUyDjhnjVSCYiRl1Rl0CHDOUut1QFON5h67IAmF9BCAgCAMAIbEe28tUNBubu7rqoRWxOmgbiZhFwFjTVvS0DsNpHFaXhxNjtcH3Ur6pnBhEDX1LETJshZSe6eiTsjNxvrzjx+OBhs8TA9ubT5/5/2U9SFpvdfaWgeotgjTkBBlZRvyQGqfJClEuGltXtblqmEBvXn9Rr8z0AQM1oeTo8vh2mi4tbaclsX8U7i7MQGPkFG6LmoPHA5Dow0P8BtfeSXOgPpIfvjwDIMw7iZXlxPWemGnaxubSUQRoYQnhHlpSoAtpwR45LSp6gqyavf6ruIhhTrPS2Usj4ih2FpLOaQYyEoaDy2HhAVVPcsizgBWWvY6fWSRkY6RsLc20kJYb6WUYYbjJBRKamEXy9X3/nCyt9UHlnjrQh46BNpaOAM8QrK1BDGHnFbCOxkGaDZd5avZ5t6Ih8wo0ZYVp4EVCoNQSIUpwZhBwKMkiBGri7IsZ1kvC9NISoEcRpAQFlMCGebLpvDW5suVAQ2LU2E1cBADgAJgm3ZytcAEdodDK/tN3QqlDLRZGGkpAhZQSlqhAHIAO2XamAUxZohb6mWznCZh6HSzrFarVhStitPh3d298e6NJl96OwsA8l4uL4134BIhAGhdCgTRtf03mzgc3tyRDWuki6GLegniVT6bQgtB7UWet4W+mXYQAMg7ZAlwgfUGExx1uAMw7Ha9U8C5yMdtrRStx+F+2yxtO2MEJ5R4B7AHQFVG1ko5gJjTXrUS03R6kcu6NkArp1pjJCilcqBJJ1MVBqiuPeNpFMRaFp4wawzwQCpttEUQOiCsEtZo5Hw9nQ53rimmLucFZla1inXDdiZUbRimRpsAs2qyijLmMYIsDKPNAKMgs3U9I7TI54W26HLRSuyNl3UlPIKEkTCMvfYBDqdT7RyAku1tBZsjGgbJoBulPlPOJRn/RAXd3miyFItpPRjbphLdfufmq7d+7/d/v7e/Ne71zh68PNjbqWeVViqi7FnFunEaSdrN1vppiCAchsOXL88/nB5d+8qmAP7B8+etMw+ePaxWs0EQhwGf5CJlAZTVrdtr//M3HsRldOP2AUegkNOf+ltfevHg2cWkhaoixFqJ55O6bGaUZEkc5LkWFgGQtRZeTBeFBMOA9ylolB8NOo7RSlZ3bh1kw/XpwoYRurp6JFox7K+FPCAUNbXsr2/OJyunzf2PnhE8zGj6/MlL4zEEeDkpe9dup2s9j/np06Nnjz5uhbUNWqyqtb01qA1GOOZA1pWQLYWQBXyyKqw2UFuxbBAGiPEsZqo21pph2nEOWWGIsUR52QoYQ6cVgCAkATBGO93pcdEqhGklW+tp3bTRvLIkWJaVpsH2/vbpw0edlHz+7ud+/9f+4Bu/8d+/9rk3e7cO2ga9OC/jrYPtuxvF5cWf/E/fgEFE9+OtnfWTyeVkNfvo/SoLelGacYKLfIWAHmcp8iAMmK69bp1u/KJY9QaJa3QT0A8enfSDuGiMJ5IzyqypmxnFTCB6ouz1naGHxcW8StMUYaedgc4Dp4uyjgKqtStKqK1z1kBIiPcEWI+hUUbVRgMMMDXARgwpITzEAHFMaVnmhCApBUUEGwaB5jSZFyuEAeEUE+CcdQhKbTziynIMwrrKEYIYE6Ms8J4hAqwB7hOKiaCUAO+M9UAbABElgbBifbzJLCtqCb1HHocEUyiX+SLNQgg0QVBZwUlaK2MB9CQolAVLQYn/c1ZRnFFOaByHZ4dPcV2mw+2LRVNKlwY8YBlGjXZ6siwhBo3wk4sTzAKIoBJmfXvbSz1b5mvbWVvUy0uxsU+Ma/q9be2YtjQmRhQ1T7uqlBwlEckuj85CFjtkLLVIVsVqupitLHY0SVK2odrarlrraBqvR0l/WfmIBJNFHic0IPj4xUtvcNvAS+l7G8nZrG1BZJcWmJAAt3nj7tU0OD5TUuE2R/nh5XL+AGL95OMz7VGcdrzG++N4Wc4Z3zBWHD47jtbY3uZu4Nnh9CwaRWfzJdQyydYgDbxTsq5BJ94YbR5OZxdtsa67A5JII9fH4+VqxmgIJX7y4urW62+sra+9fP7tZMv0tw9+87c+cobjAJyeXIy7/bd/9L39a7tdxt5+58HJZNnF7eVl/rs/+LdfePXuL//qr/E0Jh6mnHz9p75w9eFbh88vx4Ogm60tpjOeRWt7u1dHp1eXFWEgtFSqOB2v9TbWnz76U1ELlr7Csi+enZ8P+uF4ryPqxZPH716/98XB8Aakg3JxPCvPe+PEQais7GRx63mJ+oLvycWxrVyJDOusX+iQJBnjDGJzfH62efB5Bvjk6pEoT3qdrbJqo3SQ56eOsZt3bi2v9Ac1vDfYaFenziyTAaxVvVzUNOz1x3cWzYcY+enFNI7Dja0DHhNK6hc/uB++/urNV/aqGk8r284LrS0jBBGbq9XO7R9PWKoW55SRJBtFIX78+NA63OuGrSkQZWuDQX6B0uG+9c1sPjHe3r2x8VoAl1cfF6gWZhUlWBcgTGPCLUbWG8s5Czv9WZ4LTaazyyROAkCmF8tkwLw1SuuiqB1EOEh4Z71azq23ny5G1sYEXZ5dSGUJI9Djsm7buk3ikBKcr6qr+UJqRxFuq5Zx7CGIklgb0+0lURS9eH4eBDSkHHjgnYMWWmPjgBOGO53YWS8boYwPI+4h8M5ZZaSBHlKPgHYtwbiTdpWTLKSq0UpqCIGDLi9XIYsd0FVdO2cRxIxQb4ExRosGE2K1QYRQllrnqqZ2n7gJEEjRRDwAmAjVOGlUq6pKgYCAmGRhBwXgxfmVVo6xGCEcx11iWFEXEABGKAQWAG8haKVG0DuIiccAGgtaa4wBwDqYcE490M5SgvM6N8ZAiCFkURAq2QKMAYZSCKlN2un2emutWlkPMIacBNaiRZ5TiKWxEUWfPHPOWWe8B9pIiCDBBGHsHXReQ+icd3VrhJTeQ28gQdhYg60hCHNGAQTGGEKwB04qzQiGlADrpFTGAQBtGoZZpzdbzqWU0milTRzzNEuaqs6bChCSsphhor0NGEMQ1tZ4iAOMrJBlXhamHnU6TkprbBhEQRAopZW32GKjtdKykaqRyiNEGAswUbr51KBxpm2FtSbP21YJY3SrJITQGEAJhg4YIQnCxoOmEcDbkFHrfUQJQggQaKW23jNGIMAAAmUsBt7aTzhXIAoC761xQGsNEVJaa60hhBhh7x0E3nvrPRbaxJBDACjBCEFAoNQaAEhIhKmyHigHlPk0bn19e+/tt598/OSSYvXe4ytG+V/95//gf5mk/7t/8T/SrQRk/sPL2VrCmqMr6e3b33lHUb997SCON/4f/+1v9tJovRt97nPwvHhx9kfux/7mqyiCvd1X5s/mFuA6n1ITzepQzQelVw8un4xY+Zd/4avd67MHH7/T2xn82F8L17e3Tn40e/Th5PmRKLX44OnLbPvGxfkq8vDDD5+nu7doN0wZRU4jSqOsI1ZT64WBFGoEKA+y7K/+/Z/+vX/z/ecvX772hQMcyb2b6X/3y39IaO/Vn/0Lf/r4/chySGJRKaFyxOxoLdYQrW0k8xdnL1f2xr1rP/uf/cRv/ZuXy2WjkLUuoJZSxgEj82URs2C1oCFn0JPdLc5kc/n+D8MkRthqKzWJlj64Pbr1N3/hF/7tf/d/q5bFg49UmQ+XuSd41IpVXWtPcBhALZ2PQbYWpzfH54/PEox5H3MCPYw/PK0fneVPrqox5/tjFta4Ew150JtPplDUqxaVTbO5sRlnEWhRGMeQBqVu+uvxOMQPPzjrdkNAAwgwjkjoxbf/3e/+ya/8xko3d65tfO9fc30hiGc/87/4maXa/Zf/9S8GjCQp5J2UxKNVqxzHQjfrGwEQzzsdhUr4dDn/V7/574I+377TxSh98fa79V85omnn4HNbD36EEz5EfCyrkgdua3/v/OXjjFM+zPIFf/F8ur63td7vOZ7cf/gg6nNZ5EI3LYCYirqxzGey1JVWS3GqoGidgSAwrNvpDPTksi2ddPbibDYtw7JywwpnS/53/jf/7Nq9/5A0vrn6fbCcxhtrIW7CTPzbX/ylJ0HQUOi97+6Mpufi27/+3utfeP0v/qW/eni5+vjYpVx2kP7cnY3Z48cP3n58/cbOrVvDdz6+nMwRjcft4tx5w1j00TOTZATK809UUJUCIEQw8t5CBxrReOuN0h6AomgDRpOMO8OKXDAWRCGsKuEg9NoS1zBKMEYA2FZoCEAUcKOtcSJImNKepxHW7uFbbyFXvrRqcjbrrg+enU6khRAhjHFt7Nm8RBT040SJenM8mBy9uHr5XAnZiWPOSC/bqi1qbRMQIOqlES1yxErrgIIiB9Ihp/vdREihpQ4iAglVAJWmidOgs7nTW9uzDlGMMUBBEMQsWq6WxFKOY8tsY3TVaspwU7cMMR6gumikMWEWbq5vWCkNZIPexuXp4yhkua5LTI5KszvKbm6Nzw5PIQOqmbT17Dd/8M1/9o//93U156ipzDSK4P/v1//FT775s+vDESZISTXoZFUtZGucaSG0EDjOCeVJsSySUV8pK9qSJPHa7g5iECHTzbrTy0WakdHeum2lKKsiLzaG3TiKvv/exyHjvU4nqKvLk6tKKhyFxTRPWRhnYdzxSlQC+Ea6+ayKHCQMdEZZK1DZugAw6XjUT86niyTt56tStZpHvJE27na1B9PjC4VQwAhhlDEo6zZgSdbtT+eTpNOJcYxt0DYlZwHDtK0KJST0gDFKZeiU8l4rpyjhRrbGeGvNvC7ikFBDinLlnSEhtRDI2mppkQVBGAVRWCxyGlNtTA1B0OvT0BZl0UrdGwwuLk9D1o87GyGeIIgX02lVXKzvbtAwLKpCGhuEMadAGV20S+GVZyhXYCFwL1u/mr2olWPEB1m8ms0/+PChEDrudspChiwiSW+5VGEYzJeLT1SQdFJAaTcL61XpIXYAQoyVBwDR/Tuv7tz8fPSN784uLxWUiGPZ1Fcvz6+/up9kYZJ1Y4g0cJVBtpVSNNgZHCCCkVZycXUVhkm3OwjjhHHWtrqoRNNI5TG0Ou32+klYFAZwSCjjSQKBsbn0wJV13emuj3Z6z94/9VIkUcxohxFYV/XuxoDE7N3vv5yDFjBICSVBBHkw7CR12aIYO4uqqsGEGquyXmS0nx9OlotcSJf11st2wgDRgBqAeJx6iREEBBLnUStajpCShVRSI6wA9yyhyIhSaCOYBwahed0AaBkjGgmIGAsChJlRtmlVnGFrrQcUEOoxtRSAIDJGeh+WwiBIPMOAECd0tSooJGujIYaaEKtlpZoqIB6YebtcTM8uGhcCGo1G13iYVPPLxcU5ZmCxgGWVp1kPANRKN1jbicKeN364tqYDToDDFIRZZJtCO1mLGjIwHPSQ1oSOAACtcWG32+sOOtmokMjTNN3YSjnM59O2rQiGVsu2aepGUEqldMChgHWadhb1MgtMvpo3jUSYRh3qrLu6uDIeQJiJSjkju+tpHIeu1jTkiEPCUF5U84VOoiHvxDRmjS49Ql4a7z2CBGHotBJSadkyHnhAP/zgwWhuI2IZdULV5UVdVKFpNLJ0tSwAQqPxyEPTVMJLCRBuWyUB9VIsJmdpmqxvjuayrRclZVy3jdQ6jcIQktC6/fVOxNHRZFmVppemysPh+mg8WJPSixI61x4+/xTu3krbH6SdjLai3tnv0Qb/8I//IECwma8OZ6tOEB0+fwat7feTxaocbW1cVE1eLz6zObCmxZhHBN3e3f3gxQ+On7y7ud+XF3XE8fHzOeUDFsXvfPxc3Njl3Z151c7EYns8ni0rrRw12lM6PzzCqgUWrfKiv96zWgvpptNqczMCyClRbl3bOL0wl20hoecwrI0f9sOjp483Ng8Gu6+dT2eQ985mdYoSp6QFBlFijGt0Y7zu9btNfrW12ZucXvI4lHW7mJwtJ5NiuVpbW4viMO4Mr+aa4FHal6vpb924+1nXgPWNoQO+XJTD9Y22KpxWZdsOtjcwD2jMA06sBUWw0srN59LbdtRNy0q7og2CFDqHEGpF0+1Eq9XqCzdvL+ezTkyLupBSiEZp5bSwGNM4DbAgiFLMEznLmVfT4/PxxqA/IPWi3l7fff70KO6t3Vy/V0m1PsqakGg2Jl1qefyzf+Pn6ma5ODvKUnpwa/+jB5MOhYyKVprj4+PRuBfFZFkujQVhFNy++8rV5QpiTBC9vFxGvU43DZhDlFHHKSegR/jlos6yQGhQtWpR6U6KvV35FmsEfBAshYbeAweKRlMEi8ogYL3DwkptnQZWQ2wBBMAgAI0yrdNZkLRaNsYGgCEDME0glJQw6ey0ahJCYqCxc5VsIsRbab0FQZRRwBaNVpWKAAgIk8Jg4gimVptG1px+CtRHmDrpHPAQJw4BSjEQvjvIHDQEYaddyMJiNfVASqmR8Yw4LyWPIPJOKgmRahsJIaGEF0Xb+ffcuk+tItlqniELkafGeaNIk21nWEeLaTNeGzdihjhZC8nZ2YWqvfLj0dpBmjbjEbMQ2KbmPA64xCufOdTLZB34tcHObLYCoEEcQWK91wGTUYiELFlKinke95K9g/3n732nLkrAgsWiWitAmIWqnSEIyspiUnOYRb2xViaOoyBCVlRAM49NNiJVUfk2crZM0kSsVrs725OrE6kkQh1sCYm7yfro7bcm17a26vLCCxPFsTUyi+Hs+H3hZX9j++nLdufg8xvrB6dP7we+Ijjqb9xFjhAnW9CI/KybgmAtu7699rt/8PYH9x/fufdZq3w+rzkjVakcxIgzBOFwrTPKxmc//NPvf+tbyA4/c/uuVfXNv3D7/vMH1VKJejVY3+wM1h+9/+7l5Pzrf+X1B49evvfR+dd+/m+fPXokmqmiYtmq/ma/OwofLi/iCDhgC6E1MU01VaVFTT4taToIgkr3N/a0ItPaQNqP+6ONtRuXczsaj4G8XFYTHjk7Ny8//lHW2dncf60bb4jD+84YHnjjciNMdzTmwdo447LBEmeL1Xm3s9/fe+Ps2XcQ5SFPgjjI4uugOY7DImOcofrsxTlM+0dPDofjrHXmsm3f/NxfZz63streDS/mx/PlSgpHvSvnVw5C6lw/i3kEROtefvg8CBni6x/fPz85Oks7a/fe+LnJ4gTQ3rX9L69qyALDku5icpQh3BbGQaBMxIJBXU1lxRQKfJb4HHLanSye7B3corCbNyxfnl+enUOPoxilnIY8Pbk8vbW3j2l1eZVL5QmGq/m8E0fIw73d7efPLjMSe+y9sCF3pawdDCxk3nq1XPbTZJh+ijLFFE9nU4IRCRGn6OzyrJPFDPMqrxGC1hoCoQQOEhp3QoyRc040DSUeWFYWZZokEDtnnZKm1+vGcbRcLgFiSRwZbaR2nHCpSkI5wdRrDZAv2xJSFgQs7nabMhdCWmA19rJpnDYeQkQQo6HS1niLMQEAhVEMgS2LXFnFGdZCch5nnYxyRigxRmOELfAYUg+Bs85DL7X+hDRAAhL1ssDZYj7zjaeUBCEj2GMOHZEY4QynAGhrXF23WmvvIIIEAE8pwQg7Y7311jkHvAdAKCe90sZgjAkllIVSqkaUUguGIKE0CVMeBMaoeVVI3YQYUE6d0ULYRkgehtbZOEowIMBahDAPmLFKGqNaBSyEEAOHvbNKaR4w760QwmmPCOKctUJjwijm0DsMMfBeGyU0MNY5Z71jSkvonAcgiMgwzYQQeTFVWhOGPKQMY0pQW+fFqiYIE0oJRNZaKdoApNorB73SRjvojDXOdrKUEawdBJY454umhpBgABH01nvRtgBijoi0xkhdu4ZACCxEGDlrEAZFoxZ13bQCegcgYBwTCp21DqNGtyELG61qowgEPZ6WbQ0QgAg66z0EQciM0hAAgKBxDhNMMZFSOO+cd947Sjki1DintVZGc8qds5+4QoRy4BwBEAHQaG2kJwQDAD2EhCKhhbbGOO8NKP89qwiL9uj9+xvjbON258Vl/ta7F//Zf/Ff72cc8ghSl20H2SA5m4iXkwnjSdmAza3hiyclKU57TQQ9zJdT5YPBZgJT8PT9k+1r5ZysrEaeb6fjHevdvCS8pwY/Q/7OKxshXU5mxyfedj8blcpezFV1eUVj99f+3td+61c+rKSv8unO/g6uVpZD2uk7QCEGOLCYw3ZxhWnTQiWRQbQXIoSjHhpHi7jfvX5j8vDF1qj/i7/17bU7X7lx87og4Ks/9fp3Pqhe/PCHu69tlapdzttm5T00AQFaNju3XstP07qZA7nMkqg7TJYcHWxdmx5PWVOHg3jzlTfN5BLmc+StJ262nOqi8JAwEtVNm5fn2cZGszw7ef5kZ7DXDZhwWgh8//FVI3iHhKVkq6bElDJKmkbHEVvf6IMwjLfHZlXs3lk7P1oKYw0BFLAPP15QgF7sRfTB4mDc+/Ev3h0fbLetLJppkqF8pRSxWodFq6/On6eOi1JNlUGE1jmoZYl6CkQEhARLF6HeT//cX/ze77/11R9/lWy2VyfFy4+f/f6ffjftc2TsqwfDpxfV6dXjz75xrZJIzAvpyJ9884+Jx/0USaHvP7vsxmx+Im/e2Yh2WVVMGeEffe+90+PjXvdH2cFwfZzmi8Xx0YVrzDyvvOs/P3tRVQW48BTz4c6NzbXXXk4exUKvbY2Zc3qh19cPGuNn9RyuayUkSgDEzramEXPQ5A7o3rh3/thIGXgVbAS+p5r/9T/9e3tv/vTVyxfnb32bVY/yXLb5KjgI/vTdPyo1XWrX382s1leXOa9AlnXvv/Wkt/bys1+68eWD7Y0eu3zw8f0//c5yoVdT/93Z8YOjxWdevymn4mCLdW/dePLsyns4rXV52oRAfLoYEQww0lInYYiIF7LknHgPlNCYQG9UUy9YlGYkkqqxzlGKlVacUiklxhggCBB2znnnpRWiVWHQUc5rq3AAtJAXV7NitcABz6X8/ncfwYB7DI2wGAID0MWynC/qg3F2Z38AnBz3O3lRpjwOCC6q3Ii6Mgx4uwDGA4yD2DhPo8hoX5et0tpZwxgiLJQSawXPF14G0f69Oy6gAx4vC1W3opfFnUFSThZKOugtcLrMZ21bQsiH/fF8eRmFsQMO8wBToKucIkNDrBFtpZ7MTiD0nKPN7rDS041u0EMaqhYCUqzE7PRlsLEv9OnVaW6hRq4lKJv7ZDC4uz3Yw0xiAsq8dt5ZLT3QPEgpoVWtMQfQQS9NsSx1413kjGwhZEI0k+lZJ0sH44FooWs0BHB9c9TpB5P5/Pvv/DCvAemEzjHEWHet28MEBsnNO3dsrVbT2exyirD33g36Q+rcanYVJaiTjuJBP8PYOE9ZgONOREAQdKBXhEaQ0sn5cjTozCbzIEqRA14b4JrxcNQwqjVqqpbQUAEEUcBxEEcojBJvndAChVQaABFAlGKImQsbWVHCMbKM+XylXdsq50bj7ux8GtCAIIB5yINMKxmGzIegampACYSqF/O9YWcyeRnFAQJeNW2802XBYCVh5+DgVsq9L+Fkef785Zv3vnx+cQScj+OwrAWnMfButVzu7t9jyF4uJYCLxfRKa0mAL6Zz63Qa81aZrJPNp8vR1gZEcJEvB0lQF0Kr5hMVBBwUrSgKl4VhwBwChjJMEW6FxBjHHf6Xf+pNZduVKN/603eIp8rbMI1oynwQOOEghhu7W81iKVdOtA0PGaJU55WuHVSejzgjWLQOexhTMthIkv5QlNXsfG6dh1pUSz3eGgShF5XiIVNCYYhbW3e5e+XO3be/9V3jUMCZkaYo6lU3SJPo1dduXM6WRzM56JJeGgEHvNW9OIQ0qkoVMqyU0hJUee2dYCxIgy5o54/uPxitZ9moq5CSUhKMgHMs4CGOhFQWKOShtlp7ayACzpZ5QyA2xnfSjrbeGEGIDxhvbZ30Igioc6otqzCKcIQ7a0m9kvlL2WdctapRS0oRZxxCpq2ljEuhjdXOsO5g2IqiQDNKvQNKC2PLXGNTtcXifNpUery5NRrsPn12pICJ495ofYgoEdIj2AuykPOAS4TjLqGcBEEpKmYJNA5YqFvptC8XeZ0XURJ5VTmrojDV2jhpO5sdnnSSTrac5Dt7O5u3rgeEleXi+f0fqXwJvLamBaZRlnKSLucTldBhJz2/mIwGG7J0RiGpLXAQA4pwZI2YT6fTySpNIo99nIZWWgi9Mc5rJKVV1iWZa+U0LzklkAYUMS+qxngvPymZsU4bo02hJOARX8yuNHUW+EbWxhgtrIGYBES1Kk0j5Sujc+AMsdQYCbUvCw08ijlty1VjjbLC+kpqwAju9jPnoTc65OjGJi/KZnMUCgV7aQYg8o4Vhd7dOMiLCwKSV+68/okKTo4mwf5gvpptXtuazfP75yJ3dJ63AZd3724tp4XNG5aQvesDVYSPj4vQtH/561+8/4O39nrXhTAfXzy6/uqN26+tv3x6crkqU2z31lKMsHPg5MXTvf1tAHFn3DdWIcjjqH3nwYvGu5gTZmA9n/7Fn3rl+Wl+dTS5/pk3Tg6f5hdTzqKikUYZo0A8EL7OOWpuradN02LXFlXBIoBQeXX1/s7eZwpF927cWj5/UFS5Z5LzUCmbxUlVNrN5oUpXFiDEYHO9/+zpZat9Nu5kaXo1qfYO7kbhXjdtOgHn18ff+HcmX8wSHkY8mF4W1BldrbyVhPjhKEMQtKJKeRJRiiMz6m0u5sXGThRm6MGHH79ysIkgrBsdZ72T08uUI8D02sFgvjxF3sc8vVrMCGIxTytbKtMKY7AFRtnZ7KJNZm9+6bXD++ezop4rM1uo1WSVDvjW7ujo8NFgP/WVhvF4a2vjj77/3bt37101xeHk2e2DGxnV86Ld39mZXlotxGKWN1YTFpS1NEABC7Ost5zli27rCe33u0mP1+ABRujrX/rS++/8SYYopmB3nM0vz4YRSrGda7PSrD1ffel6llDeGtG0MnKOEKa11tJJCyHFBFGCMQSoFMYTDB2W3nsEEsatJsZ44LzWkkOHMQuw9yAslCPIGGud1xRRDXCh1HleOOQccTGyxiIjGy0rD8PWAQQ99bASAGHHqEMQIkqFlBBiBKA12ChgvUeB89SulqrR0GsXhFkphW21Rz6ijGEJrfcQzRaKU48aR4mPGOmvp8vWewmqUjjn+gPy56wi4+DxWd5NY87Y9Wsbs9olQWgcXSgznZwM1zutaRFo2vwKumhrsJPEnIVVTNRqNaMIRDymHPe6BHnsjA2D4OT4aL6c97aG7aJc6/Y2NrOTi8NVvgqjbOfGrad5iYSdntRVKZq6bRs17A5iFtdWK2s2Rz1I6g/efm8YrmvSyCLXzBMUBTQ4OHjj6OqD0TgyA3lyOPUm5B2qnZ9fTtuivTh8vrabYmCLixlpTre6VDdt0hku6suNZK1qpmsbo9XFhdLo/PRI1AtaTM/nz6KgN9gcYRIsFicJS5yz0sh+lvR6dH5+9Eu//JtVY/c2tjjnTleiNkKCQlU8ZJSUKYuDyHzw7u+998e//bkv/dzrX/ib3/7t3z340u7Snzvmj1+eFUJj5X74u38UYfTx5cWNZvOj7z9pWry/ufmdX/ntALVyWYaDdG+z9/u/+ocff+e0xyLhGxrSvJCdSLvWQIOiKB7vrFeH8yCZdNZ328XlMCWAILF6oU/LJovTToBVonwd9SKGWF1ePfhwtbl399r2He/9w4cPu4MODtsGAFte5M9Xtlym68leb2tyPteTDxNsjRAWkjuba5PT95rqbOcgc8ibpu0ObA3V66++cXJ4H4/5cJiV5QkJsHbVoqqPXhxZiTfXr2PC5pMLAlxIgiqvGI96w/5od79oVbyN+gG+OjmsGjPJJyCOh72hN5ozj6BzTdFLk/nJOYUQwBCizrXPfOHD73/TyaqyJkjjq9lquLXGocrzq5CxURCrcjEYdVerptvvL+b1RKvO2q2V0djqxslk0IGq1rogQaY19JTV3sGmCTvhsixWsyavGx6nu1u7RZmHXnR4yP59xK4RSiunoTcAEuCGWQcDbyHo9GLnTFlaDHG/G8RxAgCwUldlTaJwOBroRkghHCBpyID3cRSGEQEIYEKkaKqmpQRiTKu29B4oaSRQCMCQ8CxOjXHI+eVyoq1SrcKEYOCBh4QSqbVRllGCEKCQEIqt0d4LZU0ra0SJQ5jTkDFOCeUEiarAhLCAeGOsMwBC5711PuIMQYQRjjA0xoYIJv1BW5WEMcowxjiLE4SIEloB3BrrEAiiiFtrrNfacIpbrZ0HykgAAGE0ItQ5b7R01nBCpVXwk8yRllJLwqADnlgDgMcEhQh7nmAArJXI+rbV1hnkAYOQBrExnjCIEHbeWuOVNdB7hEAQRnUtGEHAOsoYIcR4mySBVLqWkkAexTGGzGkDgJNSeu+t1RACgrGFQOvWGIsADBhDEM1XVV4WzvgwjgH03mjnHMBEaie1hQQxjGspA04ctMtygRGR3gdRGCCymC8YgcxDaAACzDpt/Sc0JlA1LQSeYGSdc85prTHCEICYp1ppqZXSAGNXtc352YWUmlAGINDWitZABLCHwJuYhwxjTamwjhFcNLV1BjhgjRXKeGdbCQEA1loPPMPIOg+gBBBY4y3xCHhtWmssghAhFBBmrCMIEugxwhgCoTWAuGhaba3zUCqLEILIOeud8dhBB5BF1ppPb5IfPziMXQQdeetHZ4PtwcGNtZsbO+/+4L0S+j7lmNBaCZ2JG29sz64Ml1HACK9ItVDU2ab2u+sDKuyLD6bjV3pSV9YcG8xj361WsL+xQXEImDbUzz2IDtjm2tierJydp0wzjGCSYoUanZ8+ub/eNRpA3k4Xj2b5xdIgF6ZdYlBxCcK+lm4RcDu9XKC4l3VHVdmsqopgGmK60suDn7wTv7FZn9Z7B53f+fD/k+3T7e21dz78d0Y38TYJorhHevnqyFgUdtioS588vVL2vbXde/NHp4zSAUwXsgIG5RfTkJfX7/SI56vVFRDVK7c2jz96WlqNEx2PIuhJ3ZRR2o2ZlE4PN0ILJunG8Na9/W+//STgkNCEWOoFIh5EEQfOxxFhGDlrYotWZ/Oy0d0wqlURjtLVyUQIFSR0YzdezOtStwGLvvfg4qPjxUY3ub09uvvqzcn5wgO9KIsW0kbbF09PWx97FsY86sW8XbUb16LBwd6v/8lHWS++c7CJJ6urD57d2797cr748c/sBBrkF6dm1UaDiHg4O1+6xrSFe/tP3yEO88jrhjhpOwH5ynriFL6YloPNzBn0/OOzzqu3ri6nOC94Rl///F///Buf/de//Etr+yTu9eNw264UjZLV2Q/E5VudkZstnl/bWkvZYMnocLQTBE01rWk36sBkz938V7/4TTUmX/sHm9P8yHsdYESHbHFhsDfeOlFIoyLCOnZZgpPi+l6099re9Ie/0Sqd9nKYDfa/9lrxuDh8/vKX/uDD0wCADvYa9PtBxt3yvFy1FFlo6+aj8keieUsKoRovNKkFrGywqsRpXjX5k/FaV7W2csnG3nbSGQ7Kcnl+jGT5CcQRAeAhxJgIpSG0yugYI2e00BXCgSeE40CWDcYsxKitGuBdFDHkAPQQOKdFq4z0ViMEMcQUUwxxSKiVTcQ5SYKmxGna/+PvPUk7AU5iFvKdYTrWer6sZ1cV7QSO4tO8ImegLlUWrtbH3XJRhIMsinhRtUnUkyKXylgHADC5FIhGVpqIYUxg1s2uLhaK+GnVpCz67L03a9hqgJx0ZbXSwqRZoNs6byGA2GNCk/DZ0WGvs8EJdh5PlytnQch5WS+dQmVZhwHR0iCCIPCdhAuhkl6nLitu3Xoa46ZYVQuq1LDXZwFHDg1u9n+8/tykenD9+i0oIrVcqVz+hc9/iXI1LxvohLXAtRpha4Up1DKOuTW6mhvnMeYBISiICcF4NNx48bTI0p6WljCe9NZJ4oFDomk7Ox1S146ily+Odg+23/zqV7yEtp1vrQ96o97z0xpTVIhVf9wLO4FtRbXMN/oZAUq3xhOFCF0tagB5TIJlU0OGR4NOnTdaqP5gqFXDA5qvVgjRTkwu54VznhG6yqVXGHEki1ppE0ahaGSAecQZ8AYSP19cDIebTWlr2WJMVdtyjOI4aUQbRKxpGo/aolJd1MkLgTlzVjeNIzykPAGIaqCdNYiQQb+rrJkdXrT+Ku5mAWUuqN58840XL14CGgdJf3J+nliLGRqOsiy7d7Va1Y1WVZMkYbMqayy89aIUqn1QVfMwzIzQl6cvaBBqa/LlqjPMMHHcUCXawSjqd6OP7h9u7m+jiJ88fDDeHX4yC6ZXSwCprU3r5aAfpmlQtyXhHELU1DV0LuWcJB1esZgELASD2+tpFnWy1EEIMbTWAYCzLD5bXDIGW6mFtEYD4dzG9X6QUqnccjkbZBhZL4vV9OxC11oK5whEBBOM21WxaCpKECYYGI0CgiNMjE0QYziqa5F2Q+JAIUzdKC1gSN3u7n4N8Xgrrafz5bx2Uo26vTQLtXLOeQIgbK23TrTSY9TdWLt+897h0RMP4cX5AkgBvbfeoQADgistALLOCquwcybtJEnaV00LIa6bikJorJbKUgwAgVKo2WTJgiBMQoIdDLzWrSkqp0tlDIoYSyIHQRAmiIEggEK1Va0cYiSKgNHpmAESAEOX+YteqLqppRwJqyZnlxbhvESLNqQmPD054jDcun4Pht24lwy6vZOnx1IX3lEEg/FwAKMIYaicIkEMveeIe+cxjlg2WM0XLEjKqsrShHOysb794tnzoDNGQQYcDwjYGcUhWLnViQmj80ePXTnrZYHHXEiZ8m7TKIB4d7jj9SKvq7TXK1tJaGixxQHCGAGrw07S7XUsmqyKZSEKPTGsbjY6qZVtXQodhwCzbq8HvGHQ57N6MEwwQM2yrqvWWK8R9BYA5bRSQrdKgFaIpDcA0CghrIdBEELI0KAbj8Yqz5lR1tdRlnljF/NpXecUx3WrgdWqrqAFUttBkF5fQ5NGIuAwwo3UacSzCLWt8NpvjsdAg1Ve7t856IV9hhOH4cHdfeBVWX96hjke962H127c2bkxfv+HH6zylc/CFvj5dHnEndUoiTrnpTw5qYYMUNX2kF5OTu/duQNUTT3cWOs6UcXEIe8QphqiRsBiuVpfH13f3w+6XSXA0bzY3+mKWXt2OpteLkgU9HvDvX63XK0upmWnm6z3+r//x98plqtXNzbDwAOC2lo31r58Pm0bl3QC3RrbNFsHfauaV67dVK1xCA8T3MzV8eP39eIKBWzQ7UcknE9zAACPLfLYEXp2MdkYkJOzKyFUW7V337j34Tv3r91+zcj29Ol76TgoWvDywUefuX036GenL2bcQmvtdDLJjMHQaakS7ZQHUZo1q/zobDLcHgxGvflkqhpHUsZZdPzyPEgjB1Bet8aCVsgIh3UhUx60uTqfzR0i0jrfKq00QbgTB9DYbhhWokHanRyd0Dj64p2b3333u3EQdLrZoigLpSIHP/iTH9zYW3/v+aN0PNheX+uj8qe//rm2umqauD+I17aGjz9+WS/bVVEuq1XcjyyBrfEM4X6SQg/Xtgab9/adAGcPHwcoFk2tjJhdnhACGKRFXZ9NFkCC2lhvVdMaThNj4elZ7jATwAdhoqSfiJxCOwgS2QppjCHYWYChBwR7AKwxHnjlnIWYOGh1SxAtahESQIjzhCXpuhFcNzWCiLDAOZcXJaLQGqmMntSrtU7IeOScts7HEW2VDOOOKldC6iANm7ppGxt24oiHdVVo5QmlGIXaGNMaxjUGmFCKoV7mRTYYTOoaYktTlHBaNwvIoENEYVznCnizPgSRs4wgpQAjoG71YtH8OasI00C2dUsclvW8LW6+8uZyejEeBapm1JswDAf90ZN3z/ph3yN86+DG+fQw7WHdum6nT7yYz4XHGIoqoMw4wxLf7VhlHA0wNmnd+j9954fXNm90eFe1ebuaZRkgQC6XedzlxnFeB+P1rb3rt08VWb+9c/H4KYedg70vlK3rDGHQpdibKA50446nLwldX11pw8PBtR2HkquXH0UEOYeUcWv7+09On924vjP0pljcv3lvePhsFff2b4cb7UJdXLXD2HtMLXM3Dl5tHz4wVRNQFgacpnEaK9aoXk8ajxiNgyj5xV/9/dnlfGMw7o2jOGCkw2I6tE2lEA5Db9varZrCye71XrY12vrb/3E6vnaYn/7Ef/IVwOAPf/fBxcszQv3ablcQ9/7jZ1/76qv3vnR9bv2zo+XP/9hfvvzwbakLyqAJ4T/8P/3n3/rX/2/wyFYN4oRSTNbG2dHhE27orZujBdCqLIzu1M109d73dm/NLRARd5PLSZQGTTmZLv3nvvqTkdtSYmlFtXtj7/z8DBF/fvRuPxnfuPe5L/2Fr02uzh6992dxSjsRoVHGsyCflrQXw5Afvni5tTbAPCAsiBhsVe6IDph++uGxWpjbr422t0fvv3WYRUk/jkyTd9M2wGjerl6+vESmu721OStzTIQWkgdwWSxp2uls7jqIPSHUowBa3OFbo1uTyZR0YBINrcFOCyjkIIOqXArVQOClBhvDnReHZ81i2Ru9KpsSGBWlKeRLh8xqNUEQIYpm5TIM7XA87MCslpJ1U92Q69f2Xxy+07ZNROhanFw1F+P1LiHR8fFVfzzc3Iyass0CPlsJbyHBkbNgdjEJoU16XOTLtmg/UQHnXKoKWkAw2hgnjXBV1bR1Gad96kDVtMB51BijkJVKtDJMAw9RWYuA0LQXt61EGBshCUcIEVHXRlvGGCGYc+Kc45wCT6y1EHptLWgxcAo4J42EHlJMeErrpmlFiwFQ1lsPsqgTMmaBsk7IVjqrRdsACLQwXjkEIE8jHkcAAK2sd1QpI23DKHfQKqkI5sBBQomQCnqCIdZWWm1ZSDv9PiSEM1rXohFKmyoKE6ehEMo4xWmEAOLUdrOQIDxd6LbV4JN9wFrtrTZKCmu04twrJxm20APvIWMccwKdt84JZaAABDjjgYHISIUUss6wAEEPrfFC14TgENM4jEvRekSNMkYJgolQbRBR6B1jIZCybgXGyCOICOjFEYXEa+mh1tAiCI13CEMAodFaG62NgQgxQhAmGJGmaKUREMCAcwKhAx4TZISVQkljvLPUw1YIZRRECeWhaiv8aUO5X5YFpdg2thHKhkxYl3AWcF41UktpjSOY1ko5ZzACyimKeEi588ZDK1RDMSnqdrZYWOs5/wSTqb3zhFLnnXFeGs1IIK0BxhAHrNKjfs8a5b2vmlZXygGgtScUO2M5xRhjgDwAHnhPCCKYaqMhBAAhax0AzhonjcniMI5CraU1FiBorHPOK+MwwZhSo62QNnDIKJ3EISW0LCttPjVMf/yrr549uPrgyVx0wo8/OtnavvPw6VmhXWkcqezF8XznWgpAA42MGIIhHQ2zHo6/9c7bGviyBHMG7+rs66/vFZ0cduIbr18DrWvOl3eu7z1+PknHFrHAayXzGmJ1lTcIOhYE88tZPZGd6GBtoz/RFZDt1l6gWmmoBTxbzLHWhssqDvtfePMn/vDbv0eGlg445juB9f5iwkHlmxohjpKgKLCLRbiWpFnv5s7mj370Qz5MHl89astWIW+EO14uicdJhCA0KNAA+t56nw66NGU8dgd3Ph9F5Ds/+D3opW7mB/eu3Xrt88uT+8xrtk5lXX/8/vlS2y/82AiEyDMah0HTLllI18a9xfQ8u3X95PRhf304Gj8hgYliTWH34dtz4bF11jtoHaAMDtd7BGskhVtVlHQbUSLKBjsRaVG9MjNRKaPkAiLfZAnvbw2eP58fHk0/ena+NYi/9PWvnR2+uLicrPIqQMHaoP/HLxbAwFR4a5vNXkdcHv5v//ZPnsnwZ//OL8xOjn7t//5f3b09/Cf/xX/+jX/+Lxbns3gcbI77/TQtVxWlIBwGrdXzSksh4wCWK2gLyVq3nfTX19Nmudwbp9/5/lHJ4D/6J/+guHxUFgIHO+Obn3VB8OVXP3784o82tvjJx9/6ys/81KKZffiNPwNlfXm8ojG7PJwxErYLeHG5eP2Nu6ftNDAZUNGf/NnDxRQdHS13bg2DdeydBQwgzJjVg0Gmeff5i3x16exycYPguBVv3vlM/vFL1L+2fuOW57A6vBB2ei4/Ams13AxmM+MN1pN2a5xFEXG91Cy891ZWuLVACicMURpCgDmn80VlEKYhvyx8a8vd3cEqF6rFlUG9Tti7fc0spwAcAgCss8g64BxEWErtIHIeEgRiHngMWyGMtSGPysUiiLmyGnnojYkIxchQBovSeEQgwx4Y6VxAOfQGeMQgaRetM3Za6wrKYHsgjdndHCdppzdMGcbtqnrw8EWu2stZweNg3ppiNdvpxGnMOY8Wi1pogShG3kJrodGcEtG6h0cXFwUAHg8jRrxa3+hI4Xivv//aZ3f21uu29BiLRnhpQ068MAuxCgMexXFV5aNhx1vQjdM4zmS77KRxXaiqbNIs4ozKVlAAIcAeQGNsK5rQhwgHHgU0sqJqADDruz2iO6tF/UffevvWnbso7ueV6Mf05cf3uzj8zL07R9VUa0Btc5V7yiPYOmBN1KV5XuxuHTw7O6wKsTEehiyYz+fSqG4U8gC3rZqXlqRB1MtwGGtrFsuWUBZ3Mxyl0DZl3oy3t/7SX+k0YQY4m0zmEQBEmctpKaTppOn69Y3q6ipizDjpQp9XV8Vstr05xpAvl213fdtC0ButPX7/yRCsFavWaVc3FcYQI+d03a7yTmdjulhZ64IkoUnktTdSRBwDjkNOEPIWAYdgLWVIEASsk44YssN+9/hoGrIkjkPRLpN4KJStyhZ4H6UJDTh0zCgPMRJKO6MhpoQH8XA8Gu8/ffw0ZCY/f260iiJGERDKUCm9BkXReB3o1o12srb2VAdOicv5dDzeWs3LmHPijZQmieNJkaedjKesrRQhAUCmrOYs5hY4jMhwMBIETXJra5F1yLWD3en5qhuSh+9+LPaGjNGy+PR7ADBFgAqpOYXWQamxMpAybK1OkpQwVtfCtwZT8mNfuHX69NFguz8ah65pAIaMEsBwsaoxJGHc9VJY6XTbMBaSIGi85izC2N+4u9suzo10bV1qY68uCg+A8Wi4EUEOO93+/GyaJpGRumkqFvJeLysnOYk7129srcr6ky6RbIjnhaLGplHjAhKN+7vb3TKi5aKgQbJaGIAaADShXIoSE5dmsbfGtsKYqtGQRxh4d3p4dn13M8hYPpkRTJ011oGqXnWSntfGAhLwLnDYOIegIRAjD4XwjWrW19YhEIvFBIQ4jKIwiKSuMIXGeivA9OQUsaQf96SW1pI4gRgDhB0EwFtUN+1oY6PXjy+vlmEvHfLO6clTosvZZGFdW85WlHZp2osDgNZDCR3rr29trYVJKi3MvS0nCwsZ63XTXiSkyHWRAuy1ZdBTCrM0rlYtwSgMoVI2ilJHvZYu7o4tsAthtl/5LOp0O2kYOrtaXiAAdD07P3oaRjHUtsuhbErH0rXxtvFgi/HpbLVYqKbS1AtKOAT46vJk++BAGc8YW82lVgKtEFIO6DpinCd0KeTsRXV9ve+tK9qy3+saKcWqEJhyHlStHygKaruc1kEQBmEojKiWlXUSc+4AYJBAayx2QcQxQd1eR1aq1+22cuV1C7xPk3Q+myLvIAs6UeQs0LRtmjpgfSy1XMwHSWoAF83UOM0w5FmytzXoEJdEXOfGKza9mofJKIx3aUJGw72rq2m3m6QBQeBTMEV3vfvko8OXE1jbsKhAtTBYt4H1UZYmyeD8IhdeCQWhDAJkbu9lWRIez6tkfzSfn/fWNiot26LNwujLX7rb4mh1NhOV9pYscje8tp1tRTToueNnoxvbf/z+Nx8/PupmfWUAMmwxK7Ajjz5cQrr67Ku71ZXMZ/JxfUk53bt7TVbLVla+UmHU7Y97xfOnqlnpdlwJj4lyBjZVvZFLIyVQOqS989kiTqF0hbbm5fTw1Tf+YjmbFavV1sH6uOvzvMWchRCeH58B056fvtwajev5yefe+MyTd58e3n+XUbCqa4jD46OrTpgGYQgxZQQY7Y2xorV33nzznbffAkmvqLzn8tqd/dV5c3aVU0I5I8r4teHwfHLpIEbYNxog56SSUcS1t4N+b7aaCqDH6yOZzwNCvXV13cYMQ4CxB8vVmbHT116/eW37+m/+2u/2uj3ly73rm7aoV3WJkX7rzz5e36v2h/1upLrJiGTdpy/OFlW78dr+4bPjuhGE4roQojHrG6OtDX5yPKNBduv2LWFBW5VWmrPLZdYdRll0+Pzi9euvv/v4UYPRvFKjAEPOlAXMcaWM8XBaGc6BxpZFNAgD6pCVdimsdVA7XwvJODFWI4iVdwBBj4kDvjGCewiJ04C2wrgQBdy287OAdu7e/tr5Mbo6e5ISYrXwzgNjsJO6kRqQc+ADpoYpiXhMwmjY3wjDyKUJKlZVk3sUSEynK2C1yqKMED7LBcFIax9HMMEEOOBko4GujTEli5KgNbXTSiGFtRUaAAyMMAZ6gNF8rnhos4wfzRfOII7Qal7/OauorlSYJDgOhLQe8dlsSr09OXp5sH/z/LyFqYerXLt0WRWYgw9ePl4fJYtF7hzojaNhyikHjYX33vixP/3eW1vrg8E4On1yeOvGTc3Q5erMgIDSzdJuHp88jEOYnxjvrDXFfJGvb3cRRju7G5sHt2tJW+Gg1zgYWZ0v2rbfZUqWFLhuymVZGUcCSnjIk258dDFxVBs5gxqwKJ7nsm6t9XLj2jUnyxcfvM8RaJNgvTfUql7OJgGPXvviZ45fHq1lIwzg5eVyc/2gzKuE4bouL0+fJntRTDUP5t77j947Ol81Do/29q9HDPKI1bN8kFGAsUdEmGZ72B+mg3y5WMzVrFiak1UvGpWL8vFcwmpx/OjtkNNrBzvf/f53/tFf+fn/6r/5lQSBH//i3jf/7bNVAeOg+/f+wX/63/7K/zck8Kv37rS1r8/P58elcz7JImm8w8RY3+mMWRQ+fmltRXpJcvp03pTT7sYP2AABAABJREFUGPcvLk7X93eUrqSW0OjKzDd37jmE8kIENEzT7Phk6ozGHkGmPZInR8+j8cHuzu2LR0/b+ur5+Ytmb2+0flNBLFucpJ1AGoBiD9BiXgwHuJxdlXLq3HxZ2lc/++Xf+sN/S8JDhJKvfO7u6elJGIVRhO9/+FG32xmNr01Qe1YpwgBULUaGRxxTvnVw09N4WdRQOgQghaBalIiAGJEUQNC2EY0hA966fLUSbTGZLDwBvaz36PF9DzEJIxKwJOuucXRxfpzFLMQcB54TV5QLaYQ39HJeqEZARDrrvW4/PTl+UC2uIPYhS8+vLoMo9ADN5sswDOu6oZy42p9dzikjAY8U8lEYilyslgVP4m4nNc7+ez5F+0kLlHVuOq8ooYywQY/JxqzyCiOorbPGLpcrAhHAUChjrW3aot/NMOfO+UYo4AAGQDQCQRgExBrLGM+LwlgdhlHTFgjCKE4CGjStANYhDIQUnHHvQGssxJgzhpxVjfTWalGIxmqrKMFlUSD8Sf05RZgAhMMkNA6KVmnvAQIYAG0FJgRCQClhBIdhKLQpq7aoqoAy6kiSRoRSBBHw0HovjVMOtI2AHlqnhGgABIyGEBBlRBhRaVFRSme81spCoLTT1ocMOmcBAmEcaafjIEYeta0wHkAHvHbOecaIMgY4QBgmGFhIrCZlXSmvExxFPFbWIQCMcg2S1jpljTXSORiEoVQWexjzEBO2LGrnAMKEBES0rdHae2+8dEYhQp1xznpMkdLKGK2k0toSQsKI9LLUON22inECME87WdtKpe28KAkAAPhVXcZhHEWcB4FplQfISKmVgt4j6I0QV8slJWjQTRXGFGHgEcLYAVQ0ylpPIIQAIoQQRs5DgonzgDMGAGja1nlPCYHA56tcCoUgChhTRlMMPQTAWaVFwAJCsLUKAGithc5xRow1xhkhVVnWAHprnAcgIJxQAoGHAFjnOafAA6WNNRpBzxkTQmKEMCPKK4QIJ8Q6o7SRUiOEooC3UlOCAYTAASEUhN5Ck6axVsYZCSEMOPpEBSFGP/a1g/2b+bmKf+8PTn/yjdu/9PvfTXphgFmrV51hHI7imEc4WLOyqpvaTduJbTu3Ojf3e/qMfPzxUb6yf/8rX/n49L0VKAY0dSjtbY5nF5zRa/PiuL/m0w6HeeOMlKu2cCgc9rz3R6cvbt5MzlamthEG7WgzPfn4Steqt0HT/SgNYlOZk2eH9s/+5D94/We/9fY3cgUWvBxmzs4vSASTLFPakBinIQPAozRo9erxi49A0vjUYqgpESxgTd56DyFEHRith6PB5mB20pha7d+6KxfmlTc+0+iNre2bZw//x/6uunWtH1J6cngq88JG0nv6Z7/10vP9IDQIMCHLWpVpz4Corcoqyhubq2c/+mF3/Nrma9f+g7GtJ+fvfXgSBH59PTUOYkQ7cdgI2R8lNCWX9WznYC3e4FpJ2GJEQ9lYkCZpD/u2ifpQlkYIkYTk0ckx4yGn5Pl0eTpdzPkP2kKud+M7t259//RH00XLPQMKC2vu3Hv9c5/dfec7f4yXc2c6z+8/zk9PrY2BC3/nX/4rtxIfPHj85c0v7O/tXJ5eVHULIcrWAmCddb7QIiDcQzrs96JOdF6oj48P69a1L/NwlI32wmL6AOlqbW3U2b0GW704+4CiRY+7jSFe+3r/7MU3V4XnFDw+9dBvH2yO89nV8wdtEve+fP3N+pze2/n6rduv/T//y//y2bM5i1jq4vvvHv3Df/7lh0/+uK09dDIO4noKigVazD3WaFPYLab/5j/+G/tf+SnBOyzoeSXd1bF+/ti4eZ6/hAPbvR2180VGMcVwOW/7HMdBQHvUeHA0reuFQQBQxkRrpdEEG+tx3ShqFIw4Ue5yrrnz3lrrtDbKAMc/XYiAc45xhgB0HkIECETG2k8yPJQG3iEphUcgzsJGKhqERmkpTQCxsVo0SwhIKw0ipJdFiyIPIPXeaufCgCsApXE37l6/yhdbmx5BECCiBES5pmkQD9a//GZ2NpnSF8dtUQ2GoW79eavai/l6lvUDmhJeSyWbRUIQcDiIeRyl2XR5XBaAoTNRq1aGw+TLX/hsPOgurspiMXfaBoxGIZdAeuABBghigCHBKIpCpawQFkKqjUyi0NsmjADE3GjRWLUx2p7PSgU0BKaXdaxTBFOlTaM1Jdh64FpfLB01MIkGH9x/Odq/fvPWTYq0KdCX/uOf/1f/+t/Np/nWemy1rVc1CcNsvGZRDmDd1vXWeMAD9fLk8euvvU5Drj2eLoud7WsbG7unRy8x5fNlEcfh+bQ23o8HKWfUOggAKlZVzHB/uOax5UkAeVzO52ESdRN89PL+cGN7NOpOTy95kmbd0fTyUlVFgHF3OIo7WZqG3rlKmKgfT6a1XJQb65uE4igM8jxPYr69NXr54vnu1nBFwNnV8Wufu338fIq8u5ic95NOVeYk6AcB062SVcui4PTy6ObuDaNVW1c8JKIBmPl+ul41eVHWjMPDs0OOk043XS1WDDCAWFXUjEBMHPCumyR1WQacSVldLadpL4sCuzzVsmlGax3jkSydcZZ6Mj07K5YVoOHs4hzTdHdjfXJahTjkkObVijrqtNzdvvny8DCLE8oQRHC1FMDZpm7rVlEAYhrUqsaMbvSzVa1bJu99dufDt+7bUiNOowh2R+PNjdGzh88+nQUBrxud9pMwClolga0Hw66UMuCh0FY1gjDGkxQCYEpOAF3Ny7RXI4cBhYIQZywA2EBP407EOKQSYgKgBRzhmECEq8VMA1DMlpPLszfefFN42K7eA8i1Qo42Ov3B0EiMg6C13nughCfMW+2jiABkB4O0rook6T55foSxtcoQTnrDAYS2OjnOI0TCZHO0jiw9m560c9FNSMJJ2s1QDQACcRZLK7H1oiko9piyvZ2dR08+rKHfWBvwhbPadrJ+EiQsjKhHDtNOHFXFPGCQB9xHxDWOJEk/HpTL6Xx2jjAKOCMUONdIUVMClZIIhVJ5bBrRVJ4iBKhsFISOBwFnAYDF+tpguN5vpciylAZJUTW9wfrl8xn2IaG8M+oiFofdCBDSCivq1kNMWQiENY0kWeidxcywAOb1jBJuGyEBChi1ECBPWyQhgFUhVAO0sUknUsKtx2lTt0na2djbFsYDxqFV+eJ8c9idT1f1oiCMam2kNmHEaBgspyVhJkjYxeUpDqIkxc6wpoKPnhx2Ol1i4cXZeamgg3Bv5wAZaGTDEdnZWLv16u3BeOPZ6dV7339P6hoAOEhiBhU2rq4KECRV2Vrn8mXFrEfQff/973/x3lcdcKJtKMeEYCuNBl61giAYREEUZa22FMHF+WkQkSwOPfSU4DCIEUDEKoyB1a6FJkv7cTd01SrtJAzirnQJ8a2RlMFcqtiI0BMkfYQwYmTt9q2ixLOXF9HeJu75/e11GrOmkK2Un+JZrL5+d+vBx1dPHyxw6A/urCupqkqsr2cwoN0O6XRib3S1WP7E3//Jdx98f9kIBFi+yMu2jlTlHWJxkM9kLUw0IP00nk1XmNB8svpg9pFPrXNBB9anb38n5dmt7b2GJMDbnbXB5fGza5vDl5cLgHE347uDToTNqso1i+4/f0ItW0/p3t7+8fGpFmx9b5D02KpRRplFMw+TmBBydXUe72winF49PR+uj8KAylqEnIfhxnJ+VS+WnKBmNXs2FZjw0WhEY+4J6CQ9UbVX8jxMwm/8zh9+63e+S1HSGw46EU4D6tLYOXvn2v7D03POEs4Dq3QI8dP33hL5wnqlJF7NFPAtMLQ7TK6mS0p5aSQoKoeIlYoTFIRBVbXAeYgRtl7WNfGkkWrh236QSFE5DZ220vumEkLK4TBJ4uDwcPb46QTTcHU+v35rMEzCXMrjZ+fOu8uyWTw9U01lynz9zt4rnztAMDHNcnZ1dTW51IAiZHgQ3H791nRygjToRakAfGNtc7nQ5y+eXVvbePv9h8N1TmmCafjkolAoCsOoUBPgLPHeaLMxGB4eNySKG61CzqBFQsNO3N3cHFzMV7P5MouJzufWKIehB9giop0hEDvrMYTOO8JpQljZaoKIaiTDEENwfvbgPD8KUJcTUi/qJMUAA0Agd4BCaxyphZNSpZyu9wPdytG4u5ICsyjuo0qqfLVY5g0NQ6UUcCjr9IKItEIYY9vGI0KSkK8WBSD49o3XnKcvXz4k2DtnWRhGbFDNl94BB7xQFiNkQ3J8XFBOGSXWOe8gY+jPWUUhDfL58vhqvrOzdv3Ozre/8/7u+u5kqhBrIB7XIoxg0x11W7HIOsGH909v3XhD02gxKxzmyhvGeT5fXk7rtLe7mlUXl/Pdwfjp/eejzQ2sQW8t+uJrn33nw/O1tbW97eFHHz0sajle7zqo8twWE7O2Nuhm660s12LgZLsyZa8T7u5vtvkqv8yJB7Km4/WB87CansxWeRClw4N9EgRzrTYO9qtaBOuD8XjzT771vVvhtlhdbh30dK3nc0mR6g773RQp2aRQdeJ+J+m+PH4BWQRD1VZF0KPDdR/ErLcOL69Wf/btw6mEkePjtc3O1tpyJa301oDtazuoQ1oj44hv9FIM7WW7ssSs3d1lcRyD8Pl3j4Vcfv4v/AxYPkwDiAP35PHl8ODz/69ffeejx9W9V9be/97jd95dbex0Fq23MX/vyeV00UDof+LzN9//0aPd0eDJ5eTOzZEU/PRkXrVyb3ujLFadYTbc2iwvj4b9FKhqrTdatS7q3LL1CwIvsrQX4gtVzrWuxr3eZFLG3Q1RnE0vrnb3epvrYRQT49Dp+WEp2vVr+0nn+qOP31YgeHmyREoP18aYQwY05aG0JU3ZYJyevnj45MHTIO2eT+wP3p+kLHMzyRgqDpq4E3IMiyInIQ85q8sSOqNFeX3ren5+OKnV3MPBeGhoCGCYxQhoAb0TVnlG6rpBFlWrmgcYGeuMWC0nECrjjAeozNu6msQ86A5GOuyZulZNa4SLeKilzNu81+lPLqZpL0q6mbdgVVbdKNJSTo9eDPsDZOsIgU630zgoDeJBVqzK2aoBKBZStxqUra6lyqIwXy1Y0FMW1XU9WhuSIBBCte2nI8F7hxk12mIItXROSWOsRxhAqJ2jlEHnEcJhlFopAQBxmNRtBYFXSofWh2EglIwitlzMAs4ghIgg5/xqsTTaeeBrIwnBhBBnvDHCGsMIJgQNugMLgPeAG2eMYzxomgXnOE2DIi+1Ns5Z6FwQBABBxmnTKoxQnGWAUKed9yCIYgAsgjBlqZSKEQYxZhwr7SBCPAwD6wAASZYmMdcOUkwYpVJoqWTAWWOdd14piTFGiHIWausw8gSSqhFGW8bTmHDgXd00yhuhLIYIeKQVYDS02tdSKuOBdyGnVhhjLDLIQSiFMJLwkFEKIcBBEBHnQhxQSB0SadSpq8oaIyDM0lhJLQxknFLqKcFGK2VbKQQESGipjILAa6WBtR54Z1WAMedMaa2MaeoWIyy1xRAFQRBwqq0QrUQAtVYjgrQxWmtrHEYAAOCN60QxwzhhlFLUKAgYNs5BjDywmAdC1sA5AHBR1gAB5EEYxk5rY4y1DnqACMKYNG0LPCaYGWMwpgAiY4wH3gEnpVSiVUoC77W3VdtC4J11HkDnPEDYeocAqoREEFJKAkoB8FfzFYJAG4MwxhgiggjBISPQWe8gpsQ6p7W23lvrnbeUEKkNIgQjrJ31AEIPnPdNK7S2HiIHfKOU88BaK41mlDNGjTXOQWW9NtYC77xzGn6qAkQ5Rr2Av/vx8ddeufHa9s1/Y79FKVNej9YzS2DemrXu6HRagRiRjThG2fx0eVVK7pc3bt9+/HRx/2X1R984vfa5jeH6QMku7t7KRjtUHXQadXb5G4vLd/nOKB1ETSlLSSynrJ/WYnVw6xVbUB9GrSiRgytRKN4AHC2UJJRWyHX64dpO8uC776hz8Vf+6k+dqMsfnh1BUELGXUAEQha6VtUYIQvBslFt6/qdEVKTq2IOLIHUMuZ4nyS9gAbd+koVMhCLlGU3RrvjooUdzjrRwfs/eLDZ/7FXXvmZuf6jL/z0X//BD/4skNo5NzuWR4f15AgaqZxrLhL62a/tHp4+rU7nmDW9fufi0ZygtJTzQTa/fCQxpJ959U3j06S/9vC9iQthuVgmFPhC9bcp6pHRnTul9rOF6KdpplJtad9nYSeZHS1AiXcG47WNtZv37s4fndWCf/DWB75WB+OOFU2jVSvtqmJn8xNLQiXxzWu7q/kkGGa/9c47F/OnO/3kwZMH6Xj97Kk9u5h86W996fJ0drA+uv+D573B+PDl1cW0SALuGCxrmU9LqU3I7Z2dNENM1rJwCqN45eWs1ZTQq0pVQP7cG/e6HaSldUotjo83t2PSbYc76yTdgK2BFGKKzy8nv/vrL/vdG6uczcqyUlTXDXPLN64lonX33ew3rn71cj6HDA+3ksmTyeVh8we/+PHGq6m3phUQGDJ7adqrOsHo+pgH09kbr76x/xO/UBjqaORbS8tl+fhBpObn0+Mnz88HKv7qF1779vcmIkcmY/OFWuMo4ahVKo4GxunSWGJs6iGGKIhDHiBugAu5s6SFMM9lY+WtjbSXhshDU660BcE4+TROAbGWBkDgPfYeeWe0hc4Aq63H3nlslDVG0BQ6AJUywPuYB8YZCDyGAGJCLGiUrWpFIdNaE4KAN4hFnFDb1Ajh9cEAWaWVZRAqaPu9ocBwvhQeUYzIvRs7qmiKxdwaUzRN4ez5pNlL6c2dURQEpRCVggDDfFkhDV69ffPx5Aet0YCQn/+Fn4k5L/NG5RX0xlY2SSKCkWqFtYoSGoVQeVBWBaMkTDpWtRAQRpMk6TSridc2iCnnmGEGnZdKeQiMBU47FXhMAwcAJbguK9pLeNoxIdCdka0nvZ3h3/tH/+GzF08/eKu+c7A7ObqsdCsV/vjhkze/8nNVVZS1aaSdXx3HlHpGNzdeOT978fDDt9/86k+kETq7FBjY9fVrVetevDhOwpAQGlDW73QhaC+XRdMiIy0nNBqmKaJeCSdAVeiAAKtkJ+Jnx5dqAZyEs7NFtt9JO30YxckgFcY01IOqnE+LOMnKlnPOLPBXVxCjTDW+rUoeam9pEAVYo2K2YBA1ddHkJWhUs2wIo864APWTeKhWoljVPGZSKqkN8LiXruW56mWZC0XAkRACQdBb39QXqF2tvNEUhKIRgCALqCwtoRSxOAyDvFhQiuarAmMiHVzNVnjZIqkWpoBahxyXjaRpX8K2kwYRipZ5HoN+XptWGmxNXjR/9tH7P/83/pNq/mxRNm3ZUowWs4mzVrYuL4qmaePesK0KgHycZbqRSdJZtbXKW2VNq3zRuOl52806UzHf2M02IT8+udDKMPZpG6ZopLXeeFg2xjnAKK1rhTCmMBDVjGBubZt0OpQim4aVNAT4ppBRGFAIrVJKSA9RrRQJOCURppABFzBmrLb5rKxXtjWrxs0mTVvHjx68yEbBjdf3rFKT08sQQVvWJMy2b2w3WrfLyshGNVW1gEDFzVIEUdKPTJw6oGphtPCQB51aqDDEm8PByaPLuDvYOmAg6va31kU1Xy3qNOk45xiLlBTawEFva3J+5rQJM8bCUNWKx4Nf/81v3nrlzptfvAl10wrLQwS0IlEETIuch84642gWIEKkrb1V5aoUZS1ylXZ7FpimkSGnXiuAom46aK2N0mw5WwEF0zS2llBKnfd1bZdFE0fJeGPTQiKb2nlwcXY1XN8cjEblqgd0MBx0dSPyusaMAw+9kohyCAhH3FvHie10Q+Dh5HRSzGseBpRHnlRJSDFB0HvgrJHSW8eBh1p71ToPrGi1hc4ajAlghDHU6XXzyzMG8WI+N04MxoFRzhqjpdTGgFJoQ+aLc++dVw5g3khTSTFfLQGBqpFEa3U1r1sPEV1N2pihtINYBHosKvMr6Ao9r/Y3U6ksomEy7LXlrGnaJOI0CmirjLFONgY6EoVr473Jcpp0hxZhJX3Z1s4CgJkD0AGIEC2KZnN31O9Gy8UqjsMwZnEYLBdlON5SCjV1453x0MRrfWOVVjXmPYcgxT7WhkKfC9E0VTJMKcbtslEAbo3WS6Fx2IsAdpjrBgFP6rLOOIo7UQY+VcF4SPP5ajxwn7l74+nJ8YtFZRs3JnCd4YcvTte72cFo1BydGY4+ePq4u70PVlXH0sXiqndw3WgxHHcv58VkUgDEWRcaaxolxjsjxNTZy6vhYPP++0dvbvWHnfFnv3Dj4mrZu33vf/6VXzt8ukw5rVRjGTxdVOn985PLKUHi+nYonVceLs+ueDzkVgAFDl8uNvbHm7evn/zgg/3xuriazC8X9772hrOuzWe1aEMWWqiXSxlwRhjpdONi1cQ4WpaLsMNZZ/3poxfUN86BQZ8HhCpkO4PO+fnp2eIMBx0aZvePqj4Dr15LeUiKvM7rshN3lDJlUYQh4RiuJheMBlE/ml0WQIHaaFGZ/lZ87XrPIDqw+PTFRRCFhFNVNW2tgPHOS1VDhBDBPgkCCLxHUAFIGTPAKe9JwNrazYVbXtXElo1UAQ+pA2karA83B904puL0mZEabN0cA9Q7PT1LOHz+7OyDh7/+1376zX7PH50d9wfJrLJhJwGYXShTA/O5zWssMIdnp08fPgQCcmhmy0VjdKs6l1c1cGBVzgMUjbKgz/BGj7w8fBmFDCCPoXKGhAE2GHvn5pXUqFLYkCTNyAAa3yN8Ob8UUnqEnAfSQwAJ8NoDJ4yGGIUIEYIqJSmCq0ZDCDgLXd22RidRQiPkoZO68cZZbzDy2DkjrYLoPDfKFtub4/cevrezt6MRIzwQiDUAO4yFc410rZK1vOwNO4N+pArfGn0xb7qp7XT7GNAuTT588aEoVzwErZRXOfIUGhosK8kIqTS1jYqFDkI/6AGIoQNWKuPhn8daMwu2hqMxSMKUiHK6vzPq9QPaHwMLOOPdhACjvSq3dzJHzZ2bG4vzS8ljVVctaSywQZSOso6oF2kY6QZ4CyGDndGQhXG0QcrZfHH2osfQCqymx1cpVFu3ti5WZZoFAQtgyjcGm8vzk6opfRjqpkJStPMGMrS2M3helEnaIRwt/v/s/VmvZUmWJoatwcz2cIY7X5/Cw2POObMqqzKru6uLPbCILrI5tUh0k+IgChAhCGgRAvggQoIECIIkQBIgQJBAAS0KkgDqQWwClDiLIruL3VVdmVmVWTlFRkRmhEf47Hc8057MbK2lh32Ou2e2fkLYg/u99+yztw1rmdn3rW/ZXqzPPn0+nx+YwKYT4Yd7r+nQoVPnCLjr2033zo1bcnXWXqd7X54/2jzZOzy8cThfnA956FGkW6wrLq8XTw4OJghkpk6xnDgJ6f7l+o8/evL48UV25Re+9AV3ceX69nBPPr46/zPvfb1fLHTIRnD4+vTu4eHl06fOhfnpPkBjQ16dPYk5ILayfvbv/zv/a+mad7982/swn08Ob9386O//PPh0crL3pz/68L0v3/v5o1U4PtxUKLE9Oj35l/+t/+H3/tbfqjftpz+5P795tHy+brvL9cXizZt37n/8ZJLlxvFJU6zOzs7AN7NZuY7N7GDv7OEPCuzn88nQNAfHR/Xe3mzKy2ZRF/H5o5+Vk3L/cN+7IneaClqsz2Zufzh78Pjp0xuvn3z9177x8P7Zep0idavFGUspig/vX+/d2Z8U+49+cf/yyfP33nsva/Xk7PF0Vnft+be+9YU7N1978ujpu994jfr44x/fn+8fMFe5u574cLJ3sLl+fHH2bDqbJ6TTmzeLolxvcsGQsihoWYYUU1VWlnNZFWi6Xl6sVqv5nhva9eWT83bThHK6d3BUTw6Obx8nKpPvGHi9agnS/Hg6RMigXHDfdYXTGDdOhaESyIf7s6HdpKFHouvlmqpSjPveFhcrtHIYmls3bvz0wyeErihwNp1eDEsju1xfLC4vT0721+thr3AiafQCSTnlbKpc+K6LqlDVZc55iDEOeWgjKvoSCVBVcoqbGMk5RmCw2Gz6pnOFW3cbTxxTTDH54NlxzDGrsA+GNp1MTZUBFRUJsubUWREsFKHvOsuqWRebZShcSimLhKJwwRuYiRoAEHjv5hx8EciHts1lUe1N5450bxI269bMEeBi3RU+eC9FXbJDo8wNeucILQ6ZfQFZu25TlGWoyi5S0zQA4JwzM0aLQ9sncQQqQiZVEUSsQIcEUsCqW6rypu329uY+eGZaLRebtimKYjqpGNAxhpJVUIYeCAbJ/TJ5HtCsqPykKtumEZeZdWOtifb9EMpSKh+C855VVQkQ1Qgk57Jw/RCD5yGllMV5NAUz2N+bp5hTjsMwNG079AMAkONQ+qoIOcbUJQIWzEVwRVGuVsuhi5JVcwo+9F2czaeE5h31XQ+mZeE2bRs7bftBs6qoc4imRM57ntZ108bgnCv84mqJRM7YOSrLcmhj3/emmZhN2XtPTMv1Zr1u+raTLGrK3pmIAahqVZWiaHEY+giESXLtQ1TRka2UjEzDkIKjMnDhWEQw55yTGQ6S1TCmaKqI5B2T2TCk4L1KHmJU07IqzTTHBICAZmZ9HJzzAOaZmNGSBMfEbhj6votqUFWVdzx6we/87pd+9AffP337ta8M4fHZ+f/+b/+7UYEdoCp7A1Xspbtcq5JX+uqX3nvw+x/0D7u9Lh8el3tVvHun/ukH7b//H/3ki5+Vf/N//rtrgPM1TH2xvLpmYlv1pVDarJfDWmIMRsOmvfrxowlL2R///Ic/n93au3FvguLU+i+8d7dpfERvFFO3HoZ0696NCb/x/OH5+z/5wendt2YLi+01cQ6TqqCySRsFdS4BALM/3T9ulmdiA5Ar6wOUjeVUoqPG28qqOD2ZnOQ+WXdBmiAXkHhB/O1vfon48stf+dr9Zx9//KNfyOa6fm2v7+PiweLBj1bWhmcfLzJItvr9j96/+9qsX/dHd6baleslJaWf/MmzTz5s/uI/9luvvfeVy8XTWzffGkyTJ7cvt+/Oypk/or3jW/MQiur4RC+V9+yNL9Td+fMnnywhmo/NO6/P/8I/8hcw9mnjz8/6qph+4y/93k9+8ez+g5++8807i8+enR5O5q8d/+yj6+x9cu75ZePoWbfpV0/PPfsf/vxqMWvu7Nm/8K9++Ts/+uzdw+LR9z/88KOHv/fVf+oB6Ot36k+eNG0rN6ep7xdHpS8OpoguD0PRadJ2flCsu261uU6b7Fl95e6+dXeRVs3l6qPv/ODe7dNswH5WTI5++NHmp+//rOb4pd84LWsczs/v3LhF7bOfP12fL81XMk6he5Py/OwD6tOg5ivvS+dAH3x8Np3zrKhpaXcP3/jOH72fFn2Oq7KYlWt4Z16Ej579tX/uH3339/61dapib6zm2kU5nFuxOH/88Z/84AHcvvV7//3/Ls72/v7vf/b/+cMH3rkEuLpGV+YKPWY9mRROIwiWwQFyIphMPAKcIBoWXZc2DRwdzQ+m0C6eq+bXTo/WnfbrrdwaQIF817bOF0ysgpKHoijMFX3WclIzgGqKcciGvpj2fZddphSrkifzvcVqCABYhOApR0AwZJyU0z4mAESw2AyG6LzTDJeLZTGtrpulc+7u6R6zS+3FZtF7wDunhwD5C5Mbn52173906Ujc+eJoOpnPiquzRTWdMBBP/fTA/xO//ZubdXt451aBTvtsKetAhccsudts6nmdBdrNZjLZL2c1ORZlVVSEtm2dKmjuNwtSYU91WW26NmpkxDz0wYdJXS9W627QmMSTmMK0qqQfjOj6/Pzh0wd/5s9+6dHDT+6+cevW8eQ/+E/+fuXy8weP786/8Jf/0W8tzp7cf/BMRX0RZEhRspHnsnrw9LPVspnVr/dDWJxdVJPCB2z6CAHnR3vX19fcg4FcLxcp2cnxvKjqq27x5pvvbYaVjY2qimoa5pPq0w/u1xUd7O27qr43f/Pq7Pq666tJmB1Vm81Gzd5+9/Wnn9y/8dobz55enZ0tTm8eT05O3XRfYuwurg9P58vLNYCBL01xeb1EFC5nR3dOBd16000Pj+bl3tde+8J3v/N3yUEZPBc+pkxiOaW8XnfdxaZtXKDjct71TYmJHNTTyf7e5PLsSSgUiVer9cHRgaeCg+tSq46LQBRAhIBcm/vZvA6OF4/XsYmS0sX5+emdO3nV37l1q3n2qF2ti2lo+i4g3jqgytf3P/nZ177ytYv1o2a1IodlqFNUIq+KZe1sY88XV66czurq/PkjMx5SfjxsmHA+K9HznTdv/PRHDz/64NPV1eWtO3urq65brd778psuTLp2uyOa1UWopp1mLgpN2iUWyVWtEnvPQfrIanm1cAU9/PCDsydPQpX8tL5z86QuinXTEBgCBAfsrWs3Q5/YZMgoIjGl+WziqPjs4YP1ulksmzYOXz56C82as2ZaVIxu72geBTFQiUXjuoPj/bjO14+frNF0EF8FV7kPfvHhAc9ap806P/5k7Yebrdvc+cb05p292EQnw6f3P3zj3g0/KWyCEXpA710Zh5hyR0jz/XnKfYxD2myKaXjr7df/8p/9tR/94sM//O763dvv1i7tVfkw+GEYigLJWz0ri6JYNpuyKFSidw7UMkiYz/w06CAOmTxM9+ZpiDF3LrikQyi9c7ReXk5mc+dss2r7rEVRTw5m1cF0fbUqPC0Xm5uHBxXFTx596lUn85mmnGOqixKiEsJbd197cHVe0GRxdVmw94WnaEDoHaahJ4MNrfb2psAUqoosbVbNerkumFHQTPeP9rt+gBxTH49uHAWXh/Mz5+rEoYBicdXEvtm/OWuH3K47A3KzmRCvrq82bUOWyqKMyZJ0d+/d+t4f//DRZ0/LedVPqrTZ3L55yE7Pny1X7XB6uu8zS9bUprNH19OiaLs0uzHpOjTUxWrpRCwNWtEgLXvwIIZQTRxQdhDbdbNYb4j59OapM2qbOD2oy6lvrjsxuv36yfHprCoDBg7EzbqzoQOjYjqhlIvp4dB3niy2K+cCUmli18vVbG9aTELVHLmLZd13nSkMg5bsg9/krEZD161WcnTnNV/Rg/v3771ziwBCWZhtveD+R8+ZabI/eXR2sWqka+G0rr/6+iRlunF4PKT40599enwyFaNJWTTPNxQjZLmxf3P22pt/9Id/7+nlVeyb+X7dJDl7+pwEOMnFoydHp8c3bxWPnz6+e+fovS++tbr87MHDh02XL97/yTtv3tC15gyfPH76zre+fvXR83Wkm0d76+tEkT67//jd994qDidx2Dx79mB/Wh/7AvvF849W86Jab4ao7vWvfX2pPl4vj+fBIcUACZNzhWTVYWiWlhsp6zA/ql3p/P7sxr1baOJUyJtJ7mJ7vcbSoTT5vd/4xtFk/ug//R4GVBf61VJ6a22wcjItK5V+Mq/bYTi+fXLxbJFbPTqqYz9Mq8q0KUJZcanEbbPZK0PKMYEYqEFG1P1JlQWKIpjJ0DZsJmZ9q13qESCrQeL9WS3N0DYDMxBBs97M5/WN0znh5o//wQ+/9rU351VIqfvmb3zh4UV/UqXnD55dXi2vGvvP/tO/98Z+2abNpK6L/XJ+Mnn82eNbs2Mrbz/4+GEeYFqGftmi+Xe+8YWHj5ZvfPltsLi+uGQPJ5PZ04fna67WkpY9FpOTsDc5X264cABq5Pp2YMZZXUkemMlpw2x9lGa9dojkuE+SzYSwT4KqpgboYjKzGLMRkWYEQDWLw0AGbJkAZqUjByJZRNWM2E+YhaAzvFo1Qy+ucqeHk745u1oNF6vGm/OoTR4EZDLxpauXi9V6sbEUMCcyKJhTssur5RDjB/c/joN45xbL7ANJlBt39yTZst0AsxKXVZkptVHKDkHFs4MSkuIvUUX3Xr+xuD4Lqp9+9OFv/ubXry9gcXF1+73Dpw/a+/c/ev3kTtScVB89OX/vS3evVo/8QDTbn01PgluoQafd4fGhMT97dI1EVVEO3TCv6+ePrycFLa/Xw/u/cPV+Wdhq0UvKq+sLD3ZyY/rRT58Oqfjtt956dPYLLhCcZJeObpS5HZpNnwYV26jSk0dy4+TYsQ+lP7178+GDM8I0n1HXpfPn68neJInvV/lgb9+zu3BOlNvlUxmuMcL+wa1m2Za+PD46+enHD5DZRQ2BNpvu7munt7988rf+H//Pi0uYBcl+huoguQHw4rPr7rqa8P6TJ+vb87LYo9e/+PZPf/HjaVdxCAKpadaz2o5n9Wfvf/S1r/3ZO8fu4c8fHn/5q0/un91747SPq9Xi/O4bR5OflKGmd7/61Z/97KP6YPro/OHf/J/+a1A/veqf/fV/+q//8E8/+NEnD/7yr33x/s+P1rNq2abSl6eHsxKKUO1HiT/56Ooe79+48zpqM+Tiul+/efsOQzmZ76duoanbP6a2H86e3p+ESVU7KqvspnVVNe351dX1LTcJ5ez48ODq6TPGvLpeNr3cvHNa7ttmzewSinCyusR5qc3i8WrZ33rnixns6WeXd968+e7XbqZ2vwrF08unPCseLdvFs+swOwqhvrxaiRnnWBXzxWaJ3q2GbrJ37KhUMR+CgZiPAGpmPngi7DcRTLp+03RNsvjsum+uzxGs2J/vnd66/eY7ZhxjXF2dTUsiFMVYTrwDl8wOjw5Z8rPnn2GWm7dOm+WyWy+9K5g4AZVTF/u8WS4nRV0W0yEN2c0vL7rDeZ0HWy9bs0pSXlDXRemhK3y4e+9NX9fN8io1fHRaboWmcVAgz4gEaJRTTKZ9Tm0/OF8wEQOJwnqxrAtfFZUPLqYc2y72Q1UV7FjVvGc0I/IxZjMg4qqasPMcimygoKZCjlwoHPHQ913qIaUu9nVVZR3EMqDGPgIYEyCR864qizgkBSjqKg2DihmxGs7mk65Lq37jzEylbYd1t0Lk2d5EcvKOXOFiP1xdLJi4mtbTWdWsOwEtHKthztlMmnVDyH1O88mUkTRLN7SE5NhdrxfBh7oIk0lVF8V6teSiqOezZtN2Xa1Z1qt1yilLdr4IoSyrGsw4lJItDameToahTaI5JUm5qAKgDX3Pjg0NkFdX66Jyotg0LQIWwdWz2jlerXtRSVkm01pTxw6nVb1arZioqKo4tJYkJcsiMaa267IIeVax2WRSBK+aQMX7su06RAORTdOmlJ0PrsCayjjEsDfdNM20Kvo++sCD2uXlUgBElZjJABEn82koi8vrhTPX9omYU5JNm7JaTnFoh+m0Vs2DxqtudTDZq8uSydq2bfu+7bsYx3fYMSM777pOCLGuKxFt203KYAjBeecIEeOQzdQQYsrOeDot9iYliqZ+AMRuGAABEU05q6phKApR88F7ZnLC7NZNm8RUjZKAKiHElMq6BLGqcHVZdUNcdd3QJzYr2Mc+5izOuUEsKhDo6AV/9AffPTrYPzidTB4tqqFYP1yVe6VOMPfw7GJdnuzV9WS+N0lX1yXNLx48efbwClv+0r3DW3PePH70l//Sl1LOTy7x97/3tPi3//Rv/M2/8uZ7bzOdxNhRMC7ufvLRpw/ub+68dWtv5pfDY12ubt87XF+vP/h4tYwYWplNZ88/e7J/PD06OfiTz35+dOtGTh1I03WyMjAuD26W73/3Dx787Cdvf+tLV+r6MF1vumwKNAfvs6GaIWTgvu+uo64m87Iu69gPXFZVmNNQmB36cn9/ss/lsxS7j+8//J3f/ufkYhPcXjR33aQ3v/LV73z///atP/9eHH52sX74/e/+6Z3ZrX/kd77yT/wz/+RH3//F/Y8/EZcenl26SSgqv5Th/Olyryj2Dva+9Tt33njvm4enR+9/eH7n7smNt6d/9J0/uPHe3Wk/zG7Onl5cMvv9N97olqnZ5HIy0SbGVZ5N9o5PbQ/z6Xz/cO8W4Tym4vpiVRRu/8atD8/Of/Dhg32v9z/46RS6jdnByUHXtTff/NJCF4NfHZzc+OPvvr/R7ErXmHvj9KCJ6//4//29R+erP//nf+eq/+z3/pt/bX29/vCnn9557/jjT88P9qZvTOxf/Je++eOfPLxaakCpDtiMrxf59Mbt5dB2bbN/UFwtOj6YffcXD269cXTy1h2Utnfykx/94b/83/oNNfz0/vDdn4a/9Ff+0v/yf/bvv3Z8NKsm/+2/8T9w6/t9f2YQEZ2iiEOeFFwVi3h56/X9wfDsefPeW0c5t5MDv1l3q+eLpz/2RZ71awDj2zdurc6u5iv31a/9uXf/6f/OIlWKoZpg8OKlvfjZd588+uDhg6d8w335n/yrC3tnkif/xr/6r3z3D/8XqxZODmZ9n0Xd/tH+4nIdIp6im+67pouSMgUXRJvNMK2msym3Lha3j9v1EBrdK2i5jLJcDU3qdoe7D130JZY+UPBN16Yo3rtk4l2oXDXEyABIFLuuDBMkcoFVhRCBuO16BCGwqigA1EwMeciimNWIkKaTOmfpUwqBPSPaNKumJmaI+9Pi8nzFlm6e7OWY7711q79a5Zxv3SpvHF/df/Dkx5+eHdbh9ZsnZaiKyV5MsUvp8cOFEdfTvdnefnu52JsH8FWXbW8+XS/XKRsYG+ONG7cl5jSIDrF0PAwdNW4+ncVh0zW9N65nddt2BmDGfcyz+bQsp5s2eubKY5TWebKkEvNkf56GZIy5a5ZnF9/9u+2dw/n37z+uptXJwXwyr3/n97792aPrg6O99fnl5ro9PD2ZHc2vYLF/fDS0XdvmaVFOj48nEAbMVJXCXpMhw+Hx3nWzbtVqHyijKLdNuzffyxkq797/4Ae3771hQsMm1aEUyedXi4PTw6qwPmNGj7O6u1otl8N7d+9qwbYmiIlyWZBPQ08hzPan85NDnOw1kRhc8EXqE4kcHR5vhpgwJYOD+WHXwqyqC1oPXdxDWFw872NPmoGQOPTtQECTqubATdOXtTu/Pt87PGlTnsz2yoBDF1NW9VRW03o+adabycGBmnbrflhcvPX2G5fXD2PfH00OtfCr5YoTHd06aNcLH3D/5u1mtbpYrJrlmvUaDS9/8fP5fN5gjFFSr81VPM8PLi5XX/9zf2GTVtY5bWw2mW42bQb1wa+HRlWA8Ps//NO3bt+6dTh/+OyZC4UB5ZQ9+1lZyWa5V2NMxDdP7tw7XZ2tytKXk70ubupyx5dKHoZ2b3/e9blZN82w/sp7byls+rZz3s2qA8DULpbqYH213j88GJLbPzkaJHU9qGTHrh2GwTSgSALpB3IM5C0bg18uU9MsxXGxvwdZB3YXy3QycYc390PAQamczQuBKOZL1mkd11eb61bbVE39OrXPnz0Nlc8D7e3bfH+K6FsVB/3J8a120xzcOzi9OelibtexudpMZqGo62a1Cd40gCuqQgBUirKqaJJTv7q87ttVKN1vf+tL77x7+u/9e//lpz97cnh891/9G//4/rE111fV5JAyNG2fDEC4u2wl54ODOTrqhsRlvVhczaqSkciHwJziiinszY8IuB3ipmtyhrZJbZezSNOlk+lhNZlenF1JNzj206qaVn519fz8+VlZF6aAjMzsyiJU5dC0q80ipcje375zq+36+d5UBSQNe3vzoYtikBLW08OiLJabtnJOsXAkwdHQt0yU29RvNmAaU9/EqOb29+s0OI8FOw6+SkMbXKmI1bTebJYSZTP0BmVR+/15mdq2qjEzXi8uIQ0He5NoQg4PDvZXm5gBD4739vaPo0grcP7kjHO+efO4X2/QOVPu+lZyqovDWTHBoog51b50hEADIuQ+pzQcH+5fXa4QKIoNAl3s58eHfl5nMq7cetN+8eTOwcFktVh3rUTEqt7D1CsYIuztzR4/f0aunB/dGDbF0K5UBMBu370rouCCr6tbbx5eL5Z+3ZQHnA5ijNp28c4brynY5AaF/X2gUPuiOJ7P9vbbzeDYj06Qcl4m94U3Xv/gZz8t9w4pbepaJiUOFuRikOTYUwjVKufzJt25dXr+6YO9adV26x9/7zvn14v57KQQ3zb9om2ns2kAm58cHN/Zv3y2cJuYz/qe4Yfls9/9c18kunz8dPloff3O7cOzjy/7PjZr3edDlufXi7Nv/9q7m/O8uLr8c99++2rdvXZ3b71sfOmHNDTrfjqrw6RyVVWiXg5dG5t9X3sHNbsm9U+fnx0c7c9vnyxXTeGhWydCP5nNzh+sN88vXwuH3/izf+YHf/jd9eXl0dFbvgxt+zjl4cbh0Y2bb37y2XO+sfrWb7z29NEzK+j4xmnjrwfB8+vV0f707umthHl9db6izf7RVFEP79xsrteb5Xrv3o3NJulK2MWvfvGLn3706dPnzxwiFVXOwgSkgJLyAIoW+wFMqQhdH8mkHvFU1iTJBkl9Jsfz49npV2+sk926d5vT8tbBbx689cbRA9w0H/3su3/y3pffe9pf5SG/96U3v//jxznb2dn61//iF4Lq3/+Dnw1Zvbn9+uTZ4wfaZgPeO91ndt1ls7xaXT1fvfvWaz/40Z+slqt7b75u3fr0sPa1f/ioKYqiqML11TA0Us7mvWIGqwsXu8Ejm+PNuvVNQgIzqsrCnCF5bYZhUGJSHQPAwEwpgZgyupxScE4ERQXAzMCYr5ohxawVMbAjSlmIyLlQmuYE6yE1CmfnlwXnq6srKiZxkHLq3jg9uTqYfPjgsRr1feNKziAxp2a5nNQlESG4tu3m+3VVF+u2LWu/uurW3YDkr9bxnbuv/dpX7/2H/9Ufp1yZmTgqSNpGZtNAjhWVkv4SVXRw8/D2e6e//1/93fe+8ZWPnz4vj25842u/9kd//x+Q+G693rTn7379xvnz9nrlTeTm6fEvfvzx2f3lN77+zms3qrOzc4zy/GFarYQJ6iJ848vvPH7wWcluaNaSoTyaNX2+fHz5hS/eOX794MGHD9GyMzn/7NHm+vLplWXpT0/nMevDz85unOzvH9WPfvEZO4vt+s23bprY+WKle8Xb731jcdF+erF8tryo4oaf7zuezeYTrotg5eXT6zSsTfXwdKbQ+9KiDhEEal8czmOz+fjh/dNbeyD45NOz17/yRta9n/z0O//h9+4/PtNiclJPDn78o4fHh/Xkq4FOjg3y5LhuLu3x8/vf+ua3j08mXTq/MUXCzcmtww9+/unx8Zvduv3k8fl0cvij7/9gc9ndunv6xW/9+rI7u//04x/+9LNv/dq7Tz59cvHkml34O//lD85a+rP3vry4ovrJ8+/97ff/zX/2t/+Zf/Zbf/9Pnn3hy+/c+sabbz399HHTpsvrP/PtN4bL9OCDJ6t1U87nfRw2V1evvzEtoXKB53vTQlrvuAgTTCWIhjpcL59PCogpd208vnM0JNtcryezSqBX7Qoprp49vTw7A87Tg2J5vbJhrm0sQW/dOTx7fHV1uZjtTVeXl1fPF5uNTI5P733hC77cu76+vH74ZBq4sTg0abZf6HLgpLODaVHUsW8QXTmtFtfXbduL2o07J6bF6nIVauRqigTzeb1cLdUMwGJOg8Ru1avGGFfrzYbYEOzw5GC+d+Tr42YRY9/NKqwh2mDmsJ5O9w/3gnGzGrTrnj56GHUTVS6DL4DL6mCyN92s8/7xwdAtU2ezScFsZ8/PVotGXFlPpo7cx794sFluyrqoJnXTborA7Oz4eIJRTPtbN/cffPpoP9LLBDQCNei7ngCMYLXc9CmZigFXVekIJeYyeHaEAMRsMZH37AmYEame1NP5ZHl5vV5v2GNZVUPfexfQrN1sXPC+8L6uUhziMACiZCmCBzA0RvbICk7ruurbrqwKQpAshjS00cyAKQ5JsrVNG8rgne+bIWVxbjKZzrq2RWdVzWJADAiUUn953uecVKIZd5vGUt8POWXx3hFhWRY5xmbTtjG74NWUCLMMpmqmwzCEENCxMglolIGDJ9PCEUE5nRZX59cRRdkUcP9wPzgPktIgJkPKSVV7HSyL94xIHELwzhPlYSiq4Ityc70op5WZEoPzYUiDaMqSRVQAkEhEN8slEQLRZrWSnFS12yQmrOqQUzQT7xE7C84TUwiu9C6laEnVsI8bJPLOrTdrEQNyyeB0PpPY14W7vFqWk1JNFss1og0xK2JMhiHsT6amQiwaczsIG0nOOUpRFmSaY2TCFLP3LsZeUgrsTiZ7RK5rmiH3fd8vFytDMIGqLMxM1ZJkRqcqsU+iaRiGYVB2zpMDE2BgEEREgKosXHAEwKKOQQlHcxMwx7xphuC4CKHvIyKQYY5JcjanCOaYY1bN6h0zATEH4qxaBi9ZNGsgn9UADInUcs7ZMjJ7EB12aZgf/PTq7deKbnX28x9fpqm797Uvfv/9D8MMvQcRMMZN07DLIj0M8/t/epn7cr+uF4vN1Ffn61X3yYfHB4IVf/23vvqdP/mg/j8v3/z6HTP/xS/82U8+eGbFs/Xz/ucfPsxLuXNnv12k5VnjlJ8/k7On+tbbd2ZzK2rvJ/Um58VnDyPSMl7+md947yff+1NmX06LxDg/rdJwkHr98Effb3l1+yt3LUqTl+X0wIU6tq2A9gnbbpMCSnbX1xuhxhfTutzfXPcTV4fyMK2H1eWnw/qBwOCUpb0u3TQzX148OzwoxPAf/St/5ZOPv/f4wVnv8pe+fO93/5G/0S2m19fpta++fvtL+4CL175059MPnpOWZ5vhrS+9/qf/3/9C2s1b73ypPrjxi58+298vmuXyT/7rnyv6hOp9CdH2J/Mu9c2T836ZJhN/582TNNnDtLp89hy65vDWjW/+xq8//7S31tlymTbnX/1z7/3iv/7Rd/7gP967vvj61/e++etf/f2/892mheXlWZ/pj7/3AzIuQvXg04+PjnxJfL6Mecg3372z+eSDnz8/N8Ee9fV3b06OJ/3VMte63LQe6L13XvvSVybrdrj15uG3vnCnX5x/8EefrldWT6qLq6uLxfrwaDaZlNmES741p7pK+0elbtJ61d57497FZz//3nd/9Lf+d/9R509/8Yfvc/af/fR+GfA7f/dvTqaz0ofmrO+a3iNVhRuuuwxFUU/Plu3JYV0f2K//haOj05sk9N2/80F9UJ9/er1ew15RQwf9+xc3k3v3S29++5/5Fxfr0G66/ZMSctdfPVg/+skH7/8Q9r277X/tN94+fvfu5dlq8PHW4dHvvPv23/7p8/PLbnpvb7FaVemqdrQ/qwEs5+7oeHJ8tPfz+58uFl3X6mbRNn6BWcNrR2/cON4vtahiETJ7vLhYFVyMXkCkue2RkIH79doAj/ZOsgztepNkU1Rlu2lObx2Xzrp+OHt2f68qT26dpiGVZdis2qqus6Zmua6nZVWVOea6qoZ+QGMAG6IQU/AY2x6YfOlrHzbX12Z68ewqpRSbdnK0t398iOBmB0d9is26f+34cK/AIkfz7kfPz+4e7kdCTTqvy6HN5nAyKepp6WHuUTarvmt7dugLP5kXhpiThgJDVaUUxdCF4vHjq4i54mlRMIBPKQ/ZjKjtWiZ2VYGIfd/PZnXbNsOmAYhHt0+XS3FMy9VVc9XWe9P50ezk7ZuXZ60VYfH0StC/9u69qnTPF/3JzeNnnz6eHsw8H3z4wce34q296QGTB2g2i1XKgyFvmv7k5n7q8ywE56C5btpN1w0pTIr9k5N2tSKzsg7taj20Q+nZ4pDaxbSY88lhP2wuzs6Iw+HhQXJoOUuXNueL2vP01kHpaXa4v8hcTWcX158t2uFq8/jOm/cuJG+alPsNFfXN126fZek2l8r56vpsve7mB9PZfoFst1678fSTz9ouQQGf/Pz+wcFee90XtePg26abTOrVqlGRKHZ9uXAlzQ7naquLR4tbpyfz+V7fD0VwVxcLZuIEErUqeL3qNSWJQ0C9eXR8IU9j6hbXFwiUOjs/e45ZjbnNxGF258135vNC1sumiet+iLy8fNxUxeRg//Dp48vrYfXn/8I/Vu8dfPLDD6aE/abzgV0dFper9XWDztpuOLx5c5E9lW7ZLff2J0VRXa/jye3DnPLquqOFntw8um42MWFWA1ZEef7w8dANB4fb1JtQBUlZVdrVen11OYhePMPjm4dVHTaLlphS32cZxASY6r35nduvU+VQSMiooLZPZ5fXrUTvWDM553AA50uVTMRN00fJMUvT9kd35tOjOYH6SVlPCstx6sPmei05V7MJo6XVYjKdXnEYxJqLq8163Q9dqPcUEwX89m/9xT/64588W31y4waCW+ZUNKvpo0cP33773TfuHK2Wz1P0d2ZvgdizZ4/qg/nsYI89I1K/2YSyNkYMWBeTvm1jM9w8vvnX//l/atXSf/4Hf/didTk/mE7q0Cyv1m1zvljsHewf37pjAQ2s6dqcRVNU03lVeE+SUZK2fQJDMBn6JoQypS7nyIXvcxy6oSqL/b35dG9mmkVECQQ0zKpnTx8vLp5M9kpfVwrJlYFCyCJJDZzrNhtQqyZ1fbhfmpkpKR4cnGwWi7BsDfLBzVN0IDlWVYEmSTKHEGMfKu+8b+PQDb1KjnlYX19BKcP+3nR/31U5apydHLo6JTL0ZUpdPwyxaZZN56taEQqPmqWPfTmbxqwHN45XwzOP9NqNw+ZqPXEFEkxnkzffudm2/dOnZwIRIF+tFpYTMl88eqboWKFpNlUoq0CF85IHAyQAHzwCySCS8uxgNq3Ls+fX11dX7Ir9veLWnePFs4sGhhv35ifH08XZVY6pX3bT6byqZ+06Sepj1/VtUzpX1MEXYTq7e/bk/tA2pqIExEGQQLyhIrpqMgNVpjJbrOqDRg3UDk4PqSgZ/OmdkyzYNU0IdT3desH1ql1driYOb53cWKybmyf1n/+tb3z8D77vkHTVZEk3vvhGu1pcPb5kgHPNRe0nB9VHn358ftFMyvDem6+vnz1sFk2h8ctfupO77uzB07k/bFnu3j5cdcRV+YuHP3/7UTioE6r4qOvLAYyD9zdPDz/56U/1fOkrABj2bhxL7BbPL2+9dmNa+eePn6Xrzf6Nk+Vm8/TZ1Z23Xq9n7uTWcby+kNXlza+8viJ6+Onzpm2vLhd9Gq7bpqqmRzf3E7pN04b69N7rd548e/Ts459++a2337zx1rnrFtdP2rafzWsI7tnTZ48ePH3vS+86GYj14vzxjdMbYXb89Om5xDStC3K0WG2KqhTB54+f337zdDIrm/PL/dlsMi1wMpM2U+pzs06bNQpIikJMiNNpbVmcx9xL07WudL5gUwQGYgDkLqW+awL7DERZb+3Pj6ezs6tnRT9l0Wc/ef/gsN5/663NNd147c3r1aMeV1//9S9dX6/bX6zPn19+/Rv3lk+fSyuX50tK9vprtx+dn0mz+ejDD1B0XjtwofCYYzp7/vTuW6+9devk/o9/kNdLyHFaVOjd48W19DabVj3Q8/VQTasowZGpiRomJePQxAgxMyKRtptOgfem8wi+rvy6txgbQBMEREyCDhAAJYljLZzvUmbnU7IQKInFIfrglfFq009LVzkWUEnZjC1pzcR75WbIbd8/PgfJ2m0WaE6TXAd/cHzwjbp48Oj5VdMSs5mS83deu3V5vdqs+2pCYCi5r8uCoVw3nUPcP5yUhb9aNT9/+IR5/htff+eDj8+HKJuunx+UQx5s0+9PK2ccd8Gz3bHWbdd0+d23v1aUaAP1cfj4/Q9unt6ZHpTN8oOLpx/ceyuo4M3T482yQ3N5iNKsH3+SyuLGs7OzL3zpjeW6vTxb3Dg57Nr44aefbZbNwT7fevPGumm+8ptf/v4fvX/zJm2WzbOnF0ry5a+++cPv/axfDccnh3vHReqfNG3PLu750nqQng9P76wvr4pA5lgzzmdaO3f+eDGd719fNL/x7W9eXa76dUeMw2DHk8PLy6fB9fUkPH505l3q+3j79u3ZXnX1fBVXV2BZTCVL6fHs4oxdvFg/v//46YcfPUziAviT0/2nD59/44vHJzdmjx99XJQ1ALuCp7w6umGxffjsIU73a1hvyrlvr9df/eLXhs49eBy//a3f+/jHP3fL54vmydDC3/vP/nMBwCC/9dtvx2b13e9/CMn++b/2jx+/9rWz+x/+s//47159+95e0f3R33v6ha/e/dn3fiYLPprOPvjBL7yvf/Mrd/7kuz/U5aaPOUzp1unx4a2DP/nDny0vbXmgNgvd+fVXvvmWL0Nq2rgGBee4cOxOTwsAHNoUAmluNRqoOQyz8nB+MN+sh+vnVzdfP10tVpfPns/me88++1QjTPam1+ebop6+9ta0T93F04uD46MQeudAm1V7eTXzdr3p4aA+vHk4ibo3nz795Bmuh8zXeCSAul6t9w4mV0nWq84UXChVq8n+fkpYOOu6oVMoC4dgXdP06xWZLC6uUt9kSXUdTm6cbpb9m/duF+Xk8jqXAYeUvGpZUopS1pWIdyIxJ0fCJe8fVVeXm9u39lNSTep8iea9c+vFtWqviILcte1qubm+2uwf+X5YL8jOn13mVqwehiQ6iPT5zut7loZ22VTl3nLRDIt2Vc9HL9g07XQ6AQUxEzMgAgRSIHJlYBlSFlUwAjMlMxhiQsC6Lsu6IMTUD4jWbBpA2t+fp/FQZxf6fiASNQuMllHAJCuz895Fi86TIeSU+83GDEIRQvCqqqIAaAqhdErqHGc1A4gp12XwpVtebwiwLLz03XmzQQVf+JzFAJfrXrLU01LN+m7IKbPD2HUrVfaeCPMAIrImQuBQ+roqiKhdrrz3kjISzWezmJKaeu/aTddrq0VQMe9d7JMkQaZpPT3Yn7Zdp+MLtjYNgMYuqYmIhNKLKBg5dUOfJrNJu2iL4J2jocuaI/tADFVVdG0fCm6XAoZDTCKKTGQ6tAMj+sJLNkBwzgUaU/8kDkOKyZKyw0lV++ABMccoKjEmGRIydv1QTio1ceyqwhE7JFqv1g7Jlz6Uddt1MaV26NGs73M9qQ8OZsjeMzWbXiUzOQUA035IYJhy6vvIxMCQo/QiIgoiofIxCzlaLpfXi5VjylkcERElFc3Ze0cOYsopRQJrugEBnWfvnZlIzqrgmJgdA3jnyjpIzEyYJSPiMGTnGNREjYgMkFCr0qupalZTFyhlATA19Z4M1ADFwAdnYIYWc0SkLBmJTBXMmraTJIQIjIaKWRW3hOnhzTvPFldPLte5oEFhWC2sa7EhRzadB5Jh+TyWPK33JhefXiyfdNTlO18pl+ftwSG00K+ef/buF+/mZ2l2I5/uh5/80eN/8IePvYN68v3UQTkP5iRp+PH66gd/8HRSuNm8ePJ0rdmR4qzu337vhkF/8+50/7T65P1Hc58roE8//IUaK2JZFEB5MqPbb9cf/fj55dlygMyTi1bSzbv7l48eYzedH+1N9ydD3/VDNymq2X4Vu847OpzPZvXU/Cz2unfD6xzTKg1NuHGyv+Th8tkn7773zclBxdyVxbIsTdl99vHl6ml38Nr+7/yF390L87OLRwjywR+/P79RIPUPP30/NkBUtNnly6f5SlDhO//FH0b603p+Uh/ML5+fNavm9ffuzm4cnJ8t4sV6/7h67dZ+6dzlcFUSvzH39x8sY9d84a17w2rtsvv0g6cf/eRBXi0Xj55xCaurT//4P/nIaXHv5nRxfvX9H/1kyA467bNcXcehU2aWPCyve1fQdL8W55acvvfdH51O8K17p7/562/87X/3P/kX/pXfmyw+++7/6w/mXGmH1g912ITZLLd6/WyhbOTx4K3X5sIPP7taXWwArBtSu7nMqqnd3NgPt9+Yffb+LyDb1VXzxm+8/T/63/xff/id516nXT8szgZTq0Jorlty/R7L0A6asivYQNn04Cjce3uvnMI3f+vuL370i/lkMtNrOs9vvPv66V/72h/+/k/SgPG6T6XdOt3Ty82v/da9f+Jf/5dW50Vdutobu1W/+eSzP/79Jz/56ebqybe+/c3XfvNbrVXF0Tw2xoX6G7f/J/+nf/NHf/1/vMnD8vLCUupRanJZtKoLYB2G1ZOLZTFz+74yGK4XHVdwerO69Rpovnh+td4/rBPEnHPl/fNhG0m+XPa1o5Sy4aZrByBbBkLHXRr6ptdcMnHbrlRAWPu2pZgPTw4UyQhdCM0wiEJZeTCVaKCQYyZ0WRIQZhHIoGpDO3Dwohapi30PlpXc0MXArEnKgN2qlV7e/NrbCnD96OLOzcPSh6uuS8Zt0w1dzO3wxus3yPHJ6d6k5rhcG1gm8EU1ZXJMiCiSHLkAmvoWg09ZiFwc4qSemuXYD9W8FkXnAjFbimrIRM57NZCYqjq5wFVdpEHaTcPMQydF4Wd7ZbZUlJ4d903sFF575x4gERXnF5snHz9+5507Ryf79bx89vTq7r3bLtDTxw+vLwMVod6vJ/Pq6mJVeBz6gZA/u/9gb782pSIgiObN0Kw22axdrBzhzbs31svh8HDy9MnZ+nLR+zaKhsrNZvO+GTRnlULNOY/HNw4NYL1cPnv4yXT2BYJ867XTT9rnhsEzKFI521P0dR02m/VmdYFkR6dHfV9eX14O6w1Pq1CXWWS1XPu6Qr+68/rxsyeXfdcD+0TKPscuM+jQDXmIRV3M9mvJw7BqJJtFK5DJUdfFuirqackMzNhv2na1nk0nxens+ZMnT598fHDjCIhNtK5ns3mxWmwAbe/G3mrRLZ5d1UVhAoiuHWSxWN398nsxtkKLy+vV26/vLRdp/WBTON2sH89DSbHbn5V907lK+r5rVqtqUoDBbK+6o/uPP/7kcG8SvAPoHaKkhIBF6SezChzNyurp1eXzzVoNZnuTlIbLJ4u8WwuENAReny8D0Ft3bl4uLzfXz/fmftMMDkPsOySbzOph0966c0/ZcV0rQY5Js1ZVtbi8iG3XNW1k6qOUk7osq81miWaAmHMmx91qZVlmxXGVAdou+ZARYp+OTqYDDGDgctpcLzim5xfXmuTGzaPLi9xvNqenR+V0sln1wfuf/+xHd44O33n9WxlzUVdDFx3Y6d6+Bx3yxhHsHU6ZUx4GRpsEZ10DwK7wVVkzs0Hg2R6ztogwqaOkN946DdM97R589id/58989W80LQK4y9VqcXE5q0qGuE6pKIKq5Jyn87pZNZKhmk9y1jxkQESmsq6c5816lYee1Jxn9IxihQ8He3sHs/ryajOZ1W3XRgUyjzZ0i/XElwcn+5tNQ4hodnC0H5M2V4sbJ/Nb3gzYoSGTJhNCh1wU05u3bzWrxayumAjVzDAOUPnSMUIZ+s2GPKXVgGqEcOPGUddlF1C0b9rF9KCqKnzy5GpaBSNq+ub68rLwQYFmJU72pyLQrvuu69MQyYrD/ZP9g+Pgp5aGYdOVGO69edMxxX44f/jw/GwJSNPgF5s2ZykKT4gEFBw5gsOjsm+HgIWZVVUposzO+WIYuslsripZAbisq6QuOcASJZAe7+/palOHaTck4sJynnjvGbv1UgQm01moqti0AKlZXy4l7R+dsg+TKXtHSXJRVqJuvl83Tc9aKFpKuR3S3uFEFURyWfpQVntHxxKHw/l+23WYhZ2uV+vRC1atu3H3NoFbPut9UQTvP/7oKbmD67N1XHQZ4rJdS0FU+ZzyYtlky4+uFxfNUFeTejox7ah0hzf2676SPm1WvWV68vD5dFbc+8Kdy/7jogyPng7LiyualzHld9975/py2XaLw+O5LSWm6OKAosvnl8VsMtk7qovaMi6uu6KsqonXnESS99QsrljjmYt9itNJuV7HVqiJYFxWk+ri4to1zbRazQrcdDKdzbuuZ+TUDQTpj//gvyr3bhycHl48e4qmRzfnXdTFg4t+1Q/tskdfzudf//KXHUPTr09u37x4dhGqwlf+6tm1MJbz0k246YbNqp2UUy9FOZ9OqHx09jR37XxefPrJk5zEO08ImgQkmeoQQRWI0LKYZ0NKUUUMHQBgEjVIAOzZdV37tGvY6fpyITFrEkndpu2Pb937+re/UR32n3z44w9/+nNfTm7fOqqq4C2dr7qqwM3l6jd/64vf/e5H6+uGVZ89vXKOZoc3q6r89OfPTm4c3nnjRsqrTbPJFhEJs11dPcsIqzbxIG426Te9msUhAWKM6gunIn0XVU2SesIsEm089AA37aYfUlYvpuQgpTSIAYEZxCiEAKBDytmzjWeFmqWMgAjITdtrxMIh9qmHhGCOKaVsSRCdJfVG0qfrpkdmXxXNpsdkD+2iy8Nbb919+53XHzy66NbrTz55EmNsA/oy3Nybtm0PEh36tunrWXXn5OTyfJ2yapI0yGqZPrqfX3/v1pvvvnZ1toTLVR8jiJGjQVQHiFQCdPB5+bx8Xj4vn5fPy+fl8/J5+bx8Xj4vn5fPy+fl8/J5+bx8Xj4vn5fPy+fl8/J5+bx8Xj4vn5fPy+fl8/J5+bx8Xj4vv1q2p1v/7jt/1XVVKL0JEakCA+p4UkoGNcM4xLookAgBJJsIoFNAG2J2TIZEyKrKRFmtCCRZRdHIiHSIEcjUBAFzikwoCM47zYbA7DHF5ILXlAHR+8LEYlJgQ1TJAiDOORBgIkKKSVxR5JyZQNSCYxVVRXJM7HK0IrBoDo5QVA3YMQGJQskkAI4JjVJSIkEiVWNkBWIi5xhUFMwx5ZR9QEMpvPfOeUxXV9fLaFVd5TZO6sLMYk7BOwCsgleRqir7PpODNAybzaCExNx32bMj58qqAlEGREfkOGcj1D4NVVmYgBmlrKH0aYiEVNZFTslETSRmISZRQyQRQeKUkneURAHR1BBNwdC5lLIjyKrBOVNAJCAAhJyyY4zZqiqkmIgYHWkWM2MyUfGhyEMixG6IoXAIaALAUHiXkrIL49FxkiV4REIAFlHPaCqARoApKTkiQhH1aADg2AV2YEAMjhGMHBiTeWYmZIJQuMm8nswnh7cPqsM98s4G6RZ9d7HQTQN9hmygxkAmgGYISogGYGACBmAiAqaq0RfEBfMkhKNpcWNenM784YyrEh0TEwAgoRkQIRgYIgIiAgKAqoEhym//xb8KAP/bv/VvmEd0AKoiyI4A1XknEczEMAOb9wSIaGSixGiimpQ9g4ARMREIIDkmR2iEkmOW8dwyQ+eDmiESAJkakIFmNRtiMtMhDQZKjBxYkgAoMapIFkB2BJBzBjCRnLOgZ0/gyBGzas6aiTGEaVnMLGaTnLMCm5p4H7wLYGAiRJZiFLVs6gM75xGpCsGzy0kVJOUEiECsqpKygiGZZDUxUwDHBqaWgcxUxXLOwsxMlDN4dozExCYgYkgWHElWA8kpGYCB63OMOaJjIBn6jAil82isQsAEWS2LSgZTMDQCQzWDonCqJgbI5F1JAAyCloesRo4IYxx8YAUdWg2+0KyK6tkIdUgDAItCjsockE1yRCRkcsGbiCmkJKIpDS05QkJVABjPJmfHHHzwjlOSYehMs4H5wKYI6J0LVVk6wji0MUfnWcVEwZSLsiC0GHtijENKIuzIxIiZGXPOMfWKWIRgGbJkRkAGM3PkctKc45CTINZFYGJCQoMkKiCaJYuAAjIXvvQ+ZEADQTOVDKTMDolVDQDBtGvX1aSEjOxQs+asRcEIkPpM46G6poSI2f8f/lf/MQD8X/7v/3ZWAlQ0s9F3VAERwQDQdmuGgZkCESAiAJoBEiCgqY3uZmZgqjBWA5AR1AC3b2UDGH/E3UKEYDbe2MzGWcYQERTAbHRYBADU8f5qYAQEKoo41gkRFAwNxm8AjR8gjFVAQANDADMYPzUDBAMAA0RCGNsLoGYAOB4dpaZj07fXERKimSGiigICIJoZEZraeNVYB0QwQyIgAFFDQjAYX3JnKghIzGNNzczMiMkMCNFUFMa/IjECIBKhgYoBggLQWHXY1tJURYSYDcAxA6CZja1AGpu1vQeYqer2eQCEiLu2IDIAARiiqZqK4jhzjv0KZjp+abzKwEDViGAc4/FP2742MwNEIiYEAhwnW1Pd9uvulgBopmpb4xmrJ2YmqoRkgEiAAESE9oq5IJmZgeH2plvbIaZxNUQwEVM1RHKOzPSVb6OZISGImgE6kpxVbTydjYgI9L/3r/+bAPBX3z4qkvSivaoCqCgRAqKNVmymIoxQBT/EnGx7JjwBoBkRMtM2h5hwaxTjoX1jBwGMbz+x7RfNIbgXnmUgAFnAcOxNYASPOJ6jl80EQAFwtF4DA0Aita0B0ug3AAYAiGI2XkTjMBgobI1ZxxE1MwAFVAMF2znk6OPj62rQto4JhmDjOL+44pX/bbRu2F7/6mdjk0cH2XkkGIwL+jidgAESISD74EOoyklVlFU1qcuyKsrCOe+C88zE/IoNvdzIvvzFXv64q+orz/yHPnvlj6MBwy/9fTs1ba0bf/UB//+LvfJI+OWnvnKR/cpXXvnzP/TR9o72Kxe/0opt95qamopITnHoh5TS0A/90HdN27RtzmnMQUbT0R8QX/FcIAJj5LOmA4A/+S8+YJ2OLzIzUB03UTDOjjs7BhgPv0OErWXgODuhqRnC1sW31rb7MmznSLCtj4xjP9rOq0a4m0/GMR/n+q0J4ssOftEpv2QWL2YtRKRx3zeuOPhyjGC3IvxSf5upmu6mNkJE2jURYJxDcPcwhBdebePcO/YEIeLLLoLRcwxsWxMamwS7/176FW6dcrtaIb7yd4AX3virDgCvzNe/3Clbq949CXbTw0vD2vo4bEfAwF6dCl6ZUV5MSkBjQ/Hlk1/2oiG9rPO2Arhb3+Hlsv9Lfb4bjdE8XvW3Fw35h8cbzLa2iDvr+9Uv/ooLbufDl5X55akDDUwFEMXl02+9CwD/x3/rv+EByQVHZDkxjw+FceljBNSxewwRxdSIk0C/biQNjig4DuyDd569I2JGIiJEBCAEJCIkQCCk0W1kXIBNzUwV1EBExjNyCMdFG7ceBaCGamYwzv+oZmLjZG5RNKoNSTZDXDdd1/dNN0SRYXwPsOS+H4aUAUyTpCwZLKuOuy9AVMCcspqVwaOBmGWDfkhtN4xuRYSqu43B1i128+QIc4lC4SdlWXhS1WzqHJlazuo9SdYkwt6BQU45BBZRQAqlzzEB6GxWWRIzQ3JMYADzSQ0ifUrIVHjftV2SXFcVkstZh5RijEUgJiqKEWqx9zxuYL1jVCPnjw/3p2XJQJP5bH8+ccSSxTGoiqikvskpdkM3dG3XtllzytlMR49UNTNT0XFza1tDRiLi7fxHRJ45FCEw07QuTmb18XQyKTxqNs2ggojBsXMuOOccO2YmImKE7Ry1/ZcQkbaFiYiQHBKOGzNAMiRAMjAVzaIA414UsmgWEZGcJYlktWxgwEakQNkgqg45i0LKuct5vWmeXTzftGv2VAQXY9q0bdO2SW3sy//g77wPL84qosRFBegsac/s89DXk9IUsgyqmkRDyWrJsnkmJAxFELWu651D8iiSFNQFkhQ9AxErKQMpQE6D5WgMMWdmIgcACiLjLtdMg6+ZLMZBTcuyRMtJkpkF55NkkcF5RlLLWcSAHZghBe9QNCMoIpgmxx4JQI1QkwAihHKS+4GREVRFHaKqMKHnou8jEBpaioN3bEyhYM0CZDknJDAicGZMzrt+SGCyf7QXLee2MwNX+CiZCF3JIgKq2dQ7SHkQAyDKCH5WNO2AIhTQGIENnYJl74ps6ooiST+kyKxmOcXkQhEKNouOMzmHmJyzQZOSASugichomJITsCmxihARkhFzHhKqAWhSJRzxDBCPJ79kx2TAqtGAEAVpXKKz6kg9KCQDs6TGzgyFgAyBEQCEUAEyEZsKkxBxSsn5QAxAJiJEmFXQkZmmZISgTJ7ZESEoEZMjQgADZs9oaBAcE0Lpi4JD5UNRBWIFjUMbcxtT27ssGjMZoqFJBlUiNhBiFhFAZCIxHVdh5wswA2D2FYB3oWL2xJ6JDZCR1IyAFG1cuAlwTOhGACNA4xcLhbqkYkVRIXmJLQenlgFcKAtTiUPCNDgXciZkZ6CWc/BOAZxjUQHQ4LyYAgATmAzIYJZGEKKKloHZEbJmizH5QDkPgOAdDikBJEI1ySAVICBYHjpfhJwMQckRoYomYgBVRnTMKpkdIilojl1EdUUxZxdccM1yBQCFcwSc+m4ymUYTAGNHMgzBsUkehjSf73sKDIyozpfmYsy5HQbyqKTD0I8AZSRwU+rJs3OqaEmiqRSlH1cx55yZMDOYIhEDAYEYCBiYhrI0ombd1qX3iqt1QyWbWCj8CHUHkbKY9zFpTFXwJAYMfd+UU59TT6wGJL2RFl3W6WTKiLlrZeiBhAMEzNK3yMTqy2LWyib4wNT37aaqy67rg58QBiAuJ0W3bDVH76mofE6Q2qiaQsWInOJghqpYVAERYz/UsyI4BqN+1YZAKhD73ldTIzLDoq6dC7nvABTy4MvKmIirFIEQnCcRMMhmsQgMYEDOkHwRUr8oA0cTQFFA8l5zwiT1ZGLqLCelTKBVWTgix54BclJFC6Vv1pFQlaEsi9I5AwJERcwxhtIBWk7mXHCeh7YHtBDctCj7PHjnBDDniMyoFjwTo+TsCsfObZbb14SLKoARbpkCQsqmgIpEJkpII8OOiGYCQKpKRAAARqrKzESsIsAE4CwlJBBVMlJTBBRVdoyAKrLd+hCaAaCpKCFlSeQQAEXE+e2LpYhQVQGQmBFBRBCRnRcSNMs5ESOTSymPGy/JGR0hkAE4dqIy7kKBEGEkD9BUiAkAVRSJAEBVnGNUYGY1IADNQow7zEgiYkAGwETjvpWYU0ojQCPCMWSCALx76MjqEDoFMwFE8I7BRERc8Cq6pbVwnNNUVYHAOTf2hqlKyux4jGakmDQLEbrgc0ojkySmlhUJDQ1AmR0iWsqgwI5ERQ2ASGysL404BBBzyojAxDkLOyByAIao7Elt5B7U1Jidcw5MRRWJchyQENGIWNVUxTGP+JQZVQHRvGfJMq4ipgaOxhFw42Ea49nqZjByZAoGAESmAoCeWQFMbWz+SMKNm7GR1mFmGUklUQQgdmIiWZnZtodumffewABR1ZiIkLNkZpezgAERmQKhyyagQmgcginsOB8gNSiK2HRArKZGbIySBYl0xOSEaKZGCmhIAgYACoigDsmAdMe2juuOouFI8QCoKiIZmBGKGgAQooLBGGwzU0BBNUQZQeg2SIIGJmZKOD6ORuxGaAB5rBWOzNl2dwVAAqqAhFvKwwDHFsrIp4IhopopkKKpIWzpA4Tx9aX4gs3ZQTAawdYWso8ozBB2sJ9GmKY7dLZ77Ig1cYfugBBFYYemXzwAEI1GrIiABCNBN0KREdSNbRmpWHxBMr3CBe2w4xYmvsS5CABkth3iF3h1JA+3TvECWr7Amq8SEq9QDLse3nEHOw5pR7/ClpkbO+2VL247w2yHhrcfjHzorqsAXlzyKq5+BWS/IBRewH/dQTXc0ZKjSYxQR01MZUcdb4mOEa/u7rcl6GXXRR4CoY2RLQIGMAbakpOAunMWAGSg0erGMR4pEx1DZGqA45KxbSWP0YKtAY1tHC0Ct1PtS75jvHBLo9DWvn95eGDXwy9B/zgoI3gVBMDR4gARyMDwJQ0yci+GhrZjMUYCGxRGS9xVjl4dCbQX5v9i5Mei4+MRkGzXmvFBto1d0Da6Qjtm7JfH9SVhOXrj1nrHvesLK33BkLxkQra+9Kuk14uq4ctuG3vqFc978YNuLWpLUY1dsuWsDNBoZ+xmY/XxV+69/d1+uR5oLyIxhvaCnNz6w7YBsLNeeOG1v8rb7nwRdyEh2IY44JfGH2A3Q/9Kf+w+w9G5bOdz2ypunZYR0eHOup999okppIyoCJaIyIAQiNAECAxAYSTeTUTMxoWtDn5alNO69ETGYICGhoxIW3aQiLe7AzRCQAIAAjUmstFMzYxI1ABQQBgNAYhoG5oyFpWRjtStqyhvp18DQ0+ODIgdhaKo6m4YJkPfdkMW6WNsui5LTGp565AAW/ZjDFiOi/K4CbBxTmYgCo5SEhET0NHDcBvreUEYIRIYiKqCkaKAGKKiBe8doakyEjtCywDKnszGkJFJzqEIoJazFIFBNMbomJEs6zhOFlMWtaJg2YYdMKs507IuUisk5ByrmIgCIDKwcyYCBkhOTRCpz7nQlAVd2yQP5INjYIM0DIBQOJCUUKJzGgqwtLUqMTNFIDQzIDRAle2ih0jIbheQI3Y+FGURQvBuWpXTuioCEyEYGrIxEhEwATtgRnbIDIhAtI2MARoiIQEhIiMhMTE7REBi2BHyBkRAgOOmGnjr+qhqOC5y9pL9dYTZwBFlo6wJzQgJ2LKMoTxiYucdgjIgEyiIoSFTBqXcj3fZUkWKfjCz1LuCBZUM267LKZLnbFlUgQmQQHKSjM6JRhFzhUfKQ+oM3biDVzJ22McBybngQAXNQsWDROecSCZGVUUHSTIAh6Js44AmCgpIZqSqxuSCE5MkGZjNEInRiYioifMcY59yRrJQ+KQJPZKjOERkR55THBxT2240KXkfCp9SLCY1iIJpTAN5D8xZesIxwp1FSU0ZmRwgm6IAYkqJOABzQhWm/eN586Qd4uB8yehjHJgoDpnIwmQ69CvHXFV7q/U6JUXH43pN3imYC9CntgyFebQIXdtvCUkm8IyGQJh1xABgBppy1wzIzIzGZIjIRoDEEIeIiBnVApmBZDFNyJQksfeSBRlG/YJjTpTJBQAVyT64mFIITMTDkEyyL2h0HkP0BQ8xqYKpsncjrR5TZPYuuJTFzJwjAyWmcWOfcgY0dKxRkcE5TkN23oMae3SIrOI8IRtKIg4M2TORIYF4JkfgSnL7BUxYSDSZphzXHSaVPqMBORTJhAZq2YQZE5gRsGeRLDmhc6bKjhDJVz4T+NpD5aAI4AgYcVzjxiXbEIDGLd42Ho0ARqODjV7Q54xIICJ5FM70EjOTmxTzvl0aYj2ZpdirYQhFjKCiig4Zs4AZgEBSVcBQVaPyIydJSZGxcCGrSE4sipAliyIMfU5x40IAxjY1gFZWpYkKKLBPCuAcsGdPZjTkFHNWzTzOSuAMfYRoZpKjam+QiSAlFLPOBgnIDhVRERDdIGkQyIqIli2PqjNXhC7nPg7sSzPRYQiBhpwGFcgoKbVdJCL2HsuQNaL3VIQ0tEYkygJAxilnRPCOyrJQMc8ejAjImJq29cEV1Wy16Avnp6VnjI4pV5YJqPRgIJKDr4koZXBF3We1EPan08X1M1e5IQ8GZjkh+mo6FUMzp6jqKgnGnnOOibLzoV2siqIo6vlq0xZllW1omg7Mct9JzpVXVxSung5Di4UPlVeQLmpgopJ9yeTJczKnzI6ZNRuz07wlMgQp7NWaupiyn1TZBAnq2UzAKSE6AnDl3kGSqALTqjAUAWw0dSkyg68LzVmGVO1NgUPOVs724rBMgyZOgq4op9YLK+eECo7rWsTisApWRWFwnjyn3CUDEdVxmQBwoSrKade2oppBkyTGAsGyCAiwpx5TICsqboc2VFWU1PeZQ9XHZJb2q1pVyQUV0Zxm890bknGr8sg5E6CIqupOskMj7zvuYV1RqGbJ2zDgNkJNlFXNkIEUDJmIEQgNgMmbyXYXDcDOGYCIjXzuuHdFQu+9IYqOOwwkZjBAYhjVIWpiioTOjW9nQDAVM0QiZOcBkVKKyMTOpZSJWG3UYBHIeC44SMoqunN9A0c6hpwNAZAdixgAGKGOIM4Mt/iOiFnNDGmM56moGcq4oSMaQ35mqKoixoxqyrxVq4XCmSoTDH0CINtSLYqIBJBFvXdGiohADGI5CwKqGDAQQVZBGuk7NiN2XnISVXI8LuNJJIQQkxAzMW9394gqYgJgyI7JUYoCgKqmSM4RABKPh6ia5KwAxKSiqtvoHyICuzT0YzQc2KlmVVMQQvTBE6FmIBrxuwJAyoI4imtARMeeBwAk1pzVwPHIl6ljUFPJBow6kjimIx8EiICoQIiGiERslolIzbJsIQ0RbanG7U4bclbnWEbaB00VEUC3EBWRUESIGMbBZSaCLBFEAMh2+ECyisbgKanFqADISEZopjiGHxAIjBkx48h82QhGYYzRK45QRAEZdtzAlmIggDEkO+62aQfTxw0/wrhWgZrhdrXaLVPjtlxhjOVsubYt9H/RB1uiZouCd2F0wh0Y3vFhWwUEAm1/A3zBeYC9oGhw9ynAjrHZwQLYMR27y7ZheR3BrsEI0M12LMb2DtuZhkaQtSMXYCf9UlVD2Gk6diIbHI//BAPEF1968RG84LJ2mNO20oYdrfMKzHxFEfMKAN1SVS/R+K69L+8AO+YPXgzIC4XLVkuIr+woXvx911MA+OqH/xA59Op1O5Zg1/u4675XeAnAHWP1ogNGAAcw6hABVLc6QRUdVXgvK70lNV7SUYZGLx8LBE6BAQBpq0EbR3bUZ7/SFzvuZYfyDUwBdFR/Ihqojpo7NBqFf7o1Mhhj6ePbsF4h+HTHPBjugP0r3MuWJ3zRD2PXGAC96MWtb2w1SWCjCMZAty76CqmwHQYbLQsMYKz2eEtAQCJ7cefRRgAAtpzmruu21d/WZSvL2T1mR0m9/BO+HOjxhjux1St8B+w8Vw1G6ll3ho9bsmccgxeE2Qt1565/dgaOoyFvF+tXLAi2NOmO1P2lKmwHdEdbIpqBgSJu+Uh4wYbtaDPbGartyKSxFTvb3BrWCx3Q1rJf1Sq+YGx3rOqrXf9ystjNO7q70rb/b2+lv/TVl464eyTuhLKv0GkjM2akAOMuCwCePr0qXBHTlnuUrEIekMxUtqzZlhg1FURzbCE4Zq4JgXmMA4GhKigQI8Ou33Zjj7txACbKqggIyESURZkJSABQVBGBkcFMR0YGwXCbX4Kj2doLG0RCJsZxPXDehcKVvauDF9VuiNO6ZEOTdRTFwDm2BIa6DWM4R0A2iKoKoDnvVdQHL71GQhNTU8cOEFR1FLYgjRXbhmFElRDiIB32Jg4Qq7JgYgLrh6EsgjruI1R1JQobEUMg5sBsgME7BK2rMvaZiH3wq3WDTH3MAJiyQJ9d8FE0iRYFIqIjZuKi8GbGgUPwoEDOEZKSMUIRQtd2qrlrWhJ1xKFwg8iQ2xB8joOKIKHmoeu6YegQNamI6XZbmxEQHXNOo0IFkUBVmUdtBoyyn+BdWddVNSmdD8zTwk8KH7wjMDBWNUJiZiRkZmYeOWgAQ+St4Y4q7HGzxEyEyDRKzgHZzABJzZhplFCroirqGC5BMrQxnKmmWUGNABGQRqoJkEVUwJIJICtKzgmBQRmAQuEQgIgRMWcZ+addIOGFqohVYgbHsc+ORNXMKJswgiucYlRNouCdd0yqFlzRSQ+IIdRqlBVVIGUBMVL1HFIUMFGJzCaaQcWcQy6G2DsGzYm4ENWoHbPF3FdFmYcsw6AGOUsoUCyZZTQwpThEBhm1W6IZ0JtpztlQyRDVzMlosABoOUs2LgpTS1EVlL1PuffITMQUuiHzSM0655HikDQqmIrKuAYyevYh5y72HVFpnp9fXRweFq+/efTwwUVuk6KTCIgCGdVJm7qyKmTIKfcIGDgAojnzLnRZJEXKYAh97M2IwTkyCMUwZANNScnQRFPsiqJKMUrsXOFdcEmMjFIcmAnJTDMgAakq5GzoHLAb1zVi54iAHYKaZQCQnNDGsDmHohqGpCIhuJQGRnHss5nmBGPyxLhZH8O+GQRFcmZi53xOwiimqiKApqaAfqfKIU29IhAyqIBg8M77oElY1aExmANgNGRm5yCbM3EGHohVQVIx8ViRoVkWaXNaNrJqISaLCZEsq2kG5w0SAagKkU+SiBBMfaCMYHmb6mJj/ksgDmw0EvZko6AIEWAkjtB2WmiEXVrJbhEDgKSxrqbO8TBsAkFRuGV3jlmGNhG4IUfTwBjA0NTYNGcVG4pQxpxMoSorzWqAajnnPsY+DgkJEVwfe5CcJAUXkCBpzKbeM3kziGbsPasNCJHGDBARiR0xiiF6D5ktJ00DoDJ77wKYaR5UU87OZPClCKvGjdrEXDnkxAHV0bq9ql3twVJKktF4xqEKhUnuTBXZ5dgTSNaUDBAgouU8AAqapdg7JkSIXcNoTI4MdBhMMqNz3icDhwrMko1Mycxi9sERgCZBhz4aiqqmgsLx5PCN01uLq2efPnsQMDAzoMSuU0OHgRkpQ+qTixrjci2JQBxEdhLK0sz6aGY5cOjaTRxkAKZQl0UhKadhLaSIOuQmylCUt2PqVZuYhrIsAHk6CyV7sWAIoKKizoUcEyApkBkAOU3IPEPIRCV7r6mH7KriwEGV+w045aIkAy4p1DgM3bjbVhEApzIQqiIrVFW1h5pM+z726IPzE0chhCrbppigqif2mlpQclCUnrvYZG2ZwRs4dJ58EkATyLpXH6FSVIxZoyYkcq7IuSvLCoAAShXuh0E0mgIxeO/UmJh8AUySh2j9OlT1dDJpmmE+P1lePweNtXMxWpOHMa2mns5z7mPfyS5EbKYqkFUJx+SpTMRIPOY1jGjIVJF4VKt4xwiqIwgBkiyOXTYxA0mCbJJ1zCEiREPKmmXcCjKrCgGZKo4kFJGN2jdiRJCcx0eMK54bs+rQckwjjiXmGFPwzsGYzSWqwkxMnFVSFmJWEUMzQAFFAxNFQs8MZmoWowbHOGqCkHDMlE5DKGsTAwDH3lQQwDGZGTtW2yrCAZGQzMA5RqAs2UQVlACJSU0QxBTBTATIMgKxgyRDTOocS1ZNyXRMGmI0LFxQU1A0As15lHWF4HLWIWdgElXvizFk8yL+FAJLyqNcS7MYuRdoUyWrgYEG71RFDPMgKsQuAAKqEBKIApJmAVQzQCA1dc4Ro0BkIEPNsSXNzntT0JyZCNAli2BK5HJO5Lwj3vXSAACO3JCigtiLNEQmMxMQAkBQkS1bJ1nMbOxJJqJtqgcQEREKIBCpiorgNl6rjKQICkiEpipZxt6QKCIqqo7daMjM7Jw3A0P0PsShVUmIKDmpGYcSiTw7G0QkoxEXvPUCBlYUBCYkRDFIYqOS1zuGLNu92UhDITGOW3wY48tIqKrsGLbSLLBx+7cFTiCqBIYGiMg4Tvo2hmRHJDtKqEattAIojBhXmXAL5QwItmmEZsaAACMy361n+KpyZUcivCA7dol7ts0sGlMkR+D4Eklv8zR3EHSs0gtqyNR2wMzAdsmMo/xhqzIyHS18TCLcAeIdJh0DqVuSaISrY+IF7JIldZv/tq0oIiG/oDa2CGmHsO0Fq4M78LrDlPaSwdkyLroVl9h2h7BTP724cod6X2QMbv95Rc+wZX4QXpJUI0Tf9dgO/eMrjNZuUH6JNdqC1h1oHHt7VN/RjvHadhkYbPU1O+UFvCT6fhVab78C4ztVzbZZarh9+khT7qwYX2jNdt8eFWvwSjVVcSuOAdxWEnY1f/F8s1GOiChmY37nOAZMtOXuAGCbR2yjFeEuoWps1Pa5uGvFjq3bSd5wS4e8UI5sBxF3bdzSUYhoYmN+B5ghsqq+kkj94u6vUCUKY+7wVinwchR3xIbpltfRF0O4I/JGtdELD3phiVs+avdAxBe00a+SIVsq84Ws5hXr23XIlv0ywxf33FV+vGpnJrjz0G20w+ylimxX9ZF82hGoW7XSiPq3XI6Z4DbxepRGKhib6YglbcwefvWmuyHZucmWgHlJKtmvmD+8mKPwBXO6o7leDtVLcnBnIyM59DITCF9YwtYZd816SajhK+OIuzEwo108CEG2z6HtWlBXNWSoChq1JOZV2ZmZmSpt06NhDCoaoFlg8MFPShc8OwY0NRH2npDAQE0ZcXs0BhiRMxUENZExOw0ZAEg1I5hnMkQgj4AkOiIXwlEGhMrb/k+iWysxIzBENhvlfOSZDQyZGD0hFy7EFAsfMgBmZA6DWRwGBur7HiRtG686LklZt4njzM77Qg1jO6AwjxVTMx1VNoSA4wQDAKDKBMyEOqr1jBFUhAyabkgy7urVIRXslaxDJGY/KZzDTdPtktwRSIcEFNTATExERTMCSE4hhCxqCilnMcmgTdM59mhWBybmlKMD55g3qQO0GsoxBSQmcU6pdNfrhtiVnpvNxjF47yWZZun7DEA55zE3HcZMFGQwQCDn/DivKJl7EQZV9b5QI2JXVbNZXZeMU8/TsiwcMSqqGgDzmEDGzjGOWWXj+kEMiEQ0ChjGnQ8SETMREI0nEKCZoSmMTOKYpQYAmsHGs1lQVUQEQFVFNG35VzPHDEgCZpoBTFVMZQzIhcINOYbKd0pM5AliHpP+CJFMhfz2nbBbqijH6J0nprF7uq73oQC0oRuQicb0/IxJ0ftSU0p5MFPLMCh4DkhgAJKTGSKwKRJB6nslGSc0VRtiP8aJTUkVHFkIPqesgOw8czCnZugICcGTS0NfuuCcb7uGOFhWBHS+7LrWFx4gx5TG/Yj3PsUBwEJRmJk5ZSZDI4+BQ1IkZQQz0wSqZIRgUVQisGREQsfOE2rOnaNgIpJUMSNKPalS1MKxqgxd9I72ZsUmJ8lWVh7QBMWh1wHFNA9x0Ehci6lGQ1RAqMqq4NIFltyIJjYWEUIgy8wgGQEop4wgRmzARN65kCWJKZqomiFuZcjEfd+PXDIRZgNSFIUQwmjSiKIpecd5SEw8psOaaOw6AjLNkAnFVDJ4RDBEMjMVAYVsIimFsgQEUWEmMMg5oVlOQzL1btzQIpjmmMyUCYnZFFWtKkq1jApmyQMGcA40eE8AmIUBUVMgR9lQwbEjIl8VXJLzaCKU1VYdrAZqBwJAIkNSiWBoeTzeB0XA1BwGE1LN5ABJ0QSARihLdaDSK4FzPK6OYzAfDGwXYNz513ZjuQtk7pYw5CRZFI0tadahYw9oETQouqKYxpQ9IqKloQPNTOScB1PvvRkak5kSQtuuUoyAwoEYUT3mnInM+2AIAMqBCB0iG8J44hIggXEcYqCSuTBgz6qSQZUDNW0LKkQEzORKVANC1SjjMSaKQxyKEGTIRcHOOzFCEBL1UKlxtB4tEzkjBGBGl5IpmDhTQ8WkmvMYUU+iuQeNaojGKgYqnivpLZTkkRSsnu213XqctXLOSD44H3woOAC7CZbzemZiAsbJQuW6RmehvlMd3qhmHV85DsHBZoiEyMyWIMvA5FABk5AAOhKR4IqUc1nOANHEHBEBmmTTZJZSHoJpNww+MHBIOqgnVQRpaujQQRw6z6DAxJXzwRn07SZ3apbLqhbNDiEUpdlgaIV3Qy+gVoUqJRo0EntfHmtMznGWqNJXZdWYqKkKOCqCm7JQIPZs0Vs/dAil8xPnS7Rkw+AYCNmHCsHlIZuYoakNMQ2QEQAm5dxSZwTeXBqPhNNMLqCnvu9MhcmKokSx3nJKAxE6MzABBO+9ihkCEiGjc+icM2BDByDeBTBlUeSgWbu2ndXTvlkh0HRaMYlHnNdzyQqEfe77tkcB77ZLAhEBokgGAGBUMzDdqjqQR3zinR/P38FRPKxqZs55FR3jXc6hqPD2/LJMiKKaJROTd97MRCWN0x8bk0uSCBkACVFFBVVHwM8kuoXHSfKYIjR6sKgAQlkWeRgQwVRhzA5C885bBhtlzeMaDACjs4iAgiMH49Fv7BHBTEahh6mpGrNnJGAb4vBil47IZmJqTKSooEZMwQcwUJCcoyPcHqMzHjUwoniiXWI/ocE4sgiInn1wOQsTM7Op5ZyJSDQxMzKDyJi4KqpI6D3TGMlVYRqnNjDDLBmQVY0Agg9MKibsAjk2UwYmBDREA8kqIs6FFPMYdGJCELNdqAsBck5M6JzLo2IUsY+dqSGTiSKaGxVeYFkF1NQUHXryMSVmb6aS03hYTRZzzEycRYiQnBsPDgEzxzsACebIMbtRMpAliygAeOedDylHEMDtOUG0O/nECCDnhEjOuS3HpMKEgBg1AZjz3gAce6CtRh6QVCRDGhUW5EatG6nq6JfEzMyj7GL0AhFFZtHtHhy32AQNIIsCjposzFnH7LkXkGiUC4HZqH4a6aSRq0LCkSbF7Uc0nuSiYI5IRBBJgATFFMbjZPBFJtOodFAjoi2cG3PH1Lah5B1UxvFN54g66qnGpW2L0LfW/CotAQDbLK9R2mfj3gBxPHBkpFFtZEIU8EVaxriJfuVAEwTacSNju8x0q4rCHZMDO6iLuJWmjCH5HVAHg7H+Nqbb6cuVetfqETqOuPclCfOiB0ZdzC62jrDje7aX7nD3iwHddcbLyu1u9rJp+GLbYFu1yihK2FJCZluCDm2bnoe/hH63P7zaU1t+bMumvZhi7MWXtiMx4v+XEHuLcmGr17AtFn7lu7vWjMdvqaptFUUqKi9OAhpFIS9FMTbi7m0uGNvLmm6FIq9QXVujetH5L4idXQ9vT8kDzKYCNp6W9eJoni1HBbA7QuAFWbXt63Ggt9Wyl2fD4cux2OY3vYT8W17vpUW/9MkdJ7Gze6NdkHDkc18QQduvjMIIBFPbnhcymtWOlXwpaHu15lvWxrZE5SuaJYTRdW03ULijY/ElT7nzqS2f+jKty140eEcgwivapZ0/vMqwwC//+kqf4O4bLw1wy3WNXCEAAOGY5WYvpJoGiiPJbwiAimDGajQKzZBG1RfhSwPZmfPOImBHkuH20Tv62mDHpb1Sz1fpoxfV3jn5zla3FogIL9uDLxr40t1ecHuv2DK8kB29sNmXfYsAMi6Z8MKAJnVFW1EP4nigHo0ruyoiO2cjmFIEA0fIZuhdFTwzghox4jYPevTl7VFFtHODMfUId9P8/4+tf2u2ZUmvw7DxXTKr5lyXvfe59AUNoAmAAAGJQYhBK8LBoG35xRFy+MHv+iH6E/4TDkl+NZ8UDgUdDtuyZdOkZdCUBRAgyAa60d2nz2Xvvdaasyrzu/ghs+bazdDpPhH7rD1XzaqszKoc4xtjfDwmLA+VXTIRjzcvDl9E+CS1M4GgScyPonkQCOlC5KOel7aUpROClYgNmakqaplffPaGlD/ujR7v787Lxw8f8PGlWU8LGqmIQpzk5qkuhaM3hJ9O63oiFUmiZta7RWR3Nwu2SYgIYlmVmeDB4KKkIm6W7NfWejhfLudaAt6s9W7MbJbruZDAg3r0z989Omj3OJUlLEVYtHIpbTNiKip77+Ml4oHuXTh3T8sojK11VglCC4PNZNhmTozuDiDSI7FK8e4vdrlbFrNeSJalhBIxu+9pGrH2jIjgRGYQprIyPT3RIxkp6cLJSkUQoPV0fry7f3s+VYl7kcJcRBBBRIEQIiKMrF5hgMYbNul4bTKNgAce95+HrnpGNeYxI3lsP0DIdEIK80iNyPRwP/bjQ74JZRYekYXkPlWlwpwxn8zCxIyxoRwvDrNR4cuiejMZT6pIpBBk7Niaua73Sbq3J2ZiIpUS4axrc+xZmKT7Tsozmdd79BRhZKYwsUZk5JhqtdlllO80/NqvIuEZupwipXsEkkkoYt8bcyEwi2R66zuDrPWMGLY2IkbGdr0UWtOCJKtqKW/27ZoB4UW0jJBg1RVh3kNZQJzOROu6StueVSqSRWTbXoAKSBJHWNi2rqdSHjPpXNbL05Mqm6c5R5Bn1OUkFY32+vYs12/2j3ut58v1GsR1OXU3b+4mXCszUzZZuJzI9ueQM8rpemlFqooSF4/I1hYEAYyStAZetBR0a92ZdO8OovQAhtJg9e5pnhSghZlIpLW9qvbtqizRk8ajixm8iBRvm2ohLiIM0tYudVmEFKBE51JJi/UWDpaCpEggC4MZJQiZkbJYTvcEElLI+kYgZh2uTS6FOGBgGUp+AVAISigqtahGr0gloUwlJXidT0CwJwtpPUmtVIob2/Pe3295aWXuEJipOAxUPRqII8FcR34KQIPRY04IZYaeVl4q18q1cFVdyhEEDL4ppGm+o287GyaK8LHpGqugaI3uslJKiXRkKXyWUsO994vqui4F3SJbqeeRyaNSEBARLmV/3jLCyNINhEgUkaenl/PjKdoOIpXKRNbasizdkyS9ecTuYSyFiBGUGZTOxMqFhLf2kshS0LadkMJLgiI3OIlIQeEMR3j3LCS1EhGiFw2zCyyq1BB4RwQtsrBWZGzPm5lBuZsjw9x1qZHWWycKoSSWaF2URTWDtZ4ENCFlllUfXfa2t0WqhVBQQT3TabnEZ+e3v/HZ9z57fGfX9t3Tx4fz0ijspL/x2z9akqLZh++ezHJZTyh1u7yUehbxCDdrYf10PhHyZf9Y69ta79fl3iJZxDnJm4iTv9yf1TzxtFcyYVjYtn0oi0bfSnlYljuzzb1770XXcnf2FO/tm6f3SOJSWSuRpjdKc/8gSt52EVEptveTrvWkHy8fIpvXHmTNrqJQUPSXRdlShKGshRdEqmr3Fwuv5QH6AK7dNuTWo2eSLAvLXfqE6JLtur0XXbSuZtbcRGShNSGrlu3y8VQqkfdsuiA84d3jKVPgoxpQIBCkIOEtvUFOkQRsQrKISJKHd+tF6wjNkeXUEtkt13Bv+3Ura+39qlLN+935rrXe9k6hVevBFE0cJKJEae7MRUkCPraB7k48UkwhxE6UTpjWK7CoWXfvICEC3AM88y9BGE97JEkJa+Pd5BGZfViXIlN4pPxJzw5iZgWMxvcSi1Iki3K6ewQC1tuowADh7pN3yFQSSyMm0erW+IBfRIGMzEiPUdDLGOwYmNMHdT4KGgyiYJbxuMgEjTAmVlLqvVG4tY1Ftv06EtATALFHY9ZhYBDi4JEyaCAKuIoQeIgm0jNpUDA10olo0cXNBPBphiNmSXcGCSvgYS2TmFSGQQkEoJQS4YkgJpgjHJAMV5KRBZY0HGnMolqruSNCRCEI0NzpZyYQ4cRQ1e36wlMGkyxD4xk9kpOkiFmCSKUgEQhmQc4Q2VqrmSnr2IoMDCEUjhCCMEWYR0dyZQal9SZaIoKJWEWJhgOpFh3OPuYBihNEGGnV42UHsnQd9CAhkTJ2ZmAi8d5p7OMzBrWUZCKpUva+ywytSs4MsLKkGwKsN+Qi4UFES6mtX0Fw5BAjz08wJ5KZWWQ4wvjgH8aNG7zCAAXmB4c6jkATIg0l3iAiBpzt6UPiM4jCwVkkkScE08SXB+TzQ1ZBzAmMQ8fwvk150sgfGrVoyoEAph7hfxCM0dRs3J4Fg71FTr3GDWPmAY/n+c+sijmLMsdbfJb2MUblRggc4JxoKLBuP6Y5bpNEOc7i0CVM2nMM/k1kMMfuuIQBJ+nQ9dxG7PiK20UfKUwHGXKjdQ7q44Dj48KnqW8CygPT34hEwnD4TMvq/E/CJ5uQ49aNgw/XwIizTSI+cPDr9d6Q9ifn8+mYHNoKmrzNsEbmMCkmcATlHsLrT4b0OA/+9HbPv4rbxwLjETBi23D77CdDPTDObRjnwTNhEY7wmTtPPCBPIiIOGDzvBmOm/AI0iNdXCuA2PW+ka96Ix3+HVbjdYR7BMQNCx4j7ipilgkFBzmiiGIA9YvxgUE48BUWEYc2kGT9EyYciK4/xnmd6W5GMGKv/OBod85heyUm60SU4SKUb4fHJJR33/DblDobwkBbdkniOOX3ck0+IqttsuXFtx02aszNvfA3xTZGYebPfBVPOdTqERKlnymZs7EC+KhHHdx+arxvzO/9wMG3z/uW/wwzlcQvnvZ2rjimPq5+L9Tam+cnvTq4wc3JtGGtvrqXXAT50eJnHfTvuAoOS533k8bqflDuARA0icDIlU3JwUgozZGbK3P4XkcwkTAEZwx2JETfrCYosAVJmIiCYCWmAMIhG9wbK6XOdeW3zYacEUcgoooEx6en5cMigTMNI7qPRxiE5BQSHCVOgixTiZFbJysRp7pla3E8nZFIp9+WtZnq365bOASisE7ETg1lEWLhbINMjS1VRZdZlWTPBoq331pq17uGlSFWuhd2dMoS4my+1WmQyaMncWwzSisjde++RzMKiGulS2U3P50fvXUiqLKLczdd6Ei7I3ROSvNRl72bmzOIZZkM6pd1MkplHPRIICGtGFOFIZ2BdF7LkTMoIi+VuBWsKyrIupextU2bbvIhubUc6MiKDMbpE9USQEMISyZnKqeP5BV+1aqnrsiyFz0VPypzEhIBMvhsQ5sIslDLlQsIswgTMdCIQmFkYxDxmCrHgSNNLIOHHK0SORxsImWGUSRkUEeEJCmTRkcbP7kMWQJnJIuwWGLljyYn0udmcC3QkMTFSXnPrjqwiM3BG2yNTVT07sdS6mu8RuSwP1/3JfAcve2uSeX8qPRqTEpH3viid1rV5M/fuV2smQlRkbDq7RyS5hTIL5dZ2xirCvW/kTgKEZbJQyYjmZn1nlnVd93YBWSni3qx5LWDhpFyX86U/I6Lt75UozBkiyeneI33EQiQcEc091DO3XZWZgtreWDi9BYH5LLwmLkLZt4uuSziC+XyuXGUR/fj0TMZ5DXcYke3PX3x5+uz754C59dPj/cvHZwhL+L5fWAsVjQhPf/N5+f5v1g9fffjmVx+htdRVskeE26VwJRUPX+rdbnG5XkVz5JRHu0o5W+/Lei5y2ttLWAvqZoFMoiRmQN2MKZG9VAZIWHvrd+tpb9vwFPDCEd1aR1KPDqD3vfetahk9t8RlOEwjzMO1LAly63vfM4OYe2uqlUKsNWJR5ohulqpI8wRR1fnqtZ21ZBIl6lIomiIrcyESM0ZKZpUEQaLzvIzQE/OZIBQ9bQt+5rxwtrHHhcMzApzhe2RP0UwjCLO6XVkWHooAEDGVolmCStDw8FS5CbOPDdOAQrf3H902NJ/WPAFQ2mkVhvV+af1S7x6jp4ISGW6BCCnMYu1aoooKBbiF8hLEaL4U7nvr3vt+rcuSXN3s4X7NaI+n0ppd3388ne+r1PQsRNvLswrTUq6zF1X1kASFQWtNe3K07fpRej8tj0IJpAoRo6dTiiDCI2HR94zseyfqkc1b9Hix2JfCvWdkhVNrHeV6qqtbdLuSoPdukefTnUYyPG3P1iKdykKEJEYQR57v3u5h0bfCdDrftT2fn5882ZKtpQW9e/P4aPyD5e5R8Xu/9ePP331/Pd2H+Ve/+NV2F1u2clrPb04vz9dvv/0GFnf395f003K+Wx5enj5W7e775eXpbtVaCUoFX7I+JnEaVHT31pH17t76+773YE0p99//4X41J7b0sjxWlixqKI5ytYugJpFwZee2X/aMDmMhkDZzEpR6Z+aRjcFSlsu2M6WKhgWxUTSz3vePb99+tl96pDAve9sjiKVCJV3ciSE98RJmGauUIkuSh23eNy0LYvFcI8DZ3F8obduf3RqQyRYJ4rNQZTn1HmZ8VyT9GmER17ou4Z3JSaj7pdYzGoGpLqvtXWvNDPMdrM37SeqiZ2QSiTCoLoUXZbHtSbieSmnXl5dr26/bsqyJvLt/c71ckN6tWfaynINp3/bl/u51Zzm3VSOZCMxKQySQw+lDo2yRxB6eEUVkGByIoKIZDkTRYjB3J+EeXUhEONMSFH0b3HYkEJwZIlUwLQOZaTZT7dJG773EiLFhde8Asaj7JqSJDO8IKlUB7rYvyzk93GwgeUlmkkh3cwh7OBFHBJBKXIt69ER4RKQjeeS+kRPmxmvkVQsAFiVKR/TeRtK2R/beiQWgQLpHEosWSowipEd42HhexXj6iEZkOoRZlGIEYvGoZg9Ek+Y9E8TsmUysVdxahIcbgSM8yVS09WuQVK6EHOHfHiEstWggRcS7ZQQrezhAa11G8lQRST6QEjOBulsSRNWtkycAGWVMMBjMFSVuMofWt8hkoQhXKFEupXYbY1KbdxEBsRK3tqkqEuE2yMxuDsqIEFFhigiWybnEaAzHxcNyuBEthhPNwntGRgojMBvBJEKJcchDfWStj1jxw0ZkEe4dkaVwDlV2oqgmsLerlJogM0NRZVlWCfhcAgQRRnh6SGbk6G9JZobR+W50xxOZkqERsTRLu0e9fwiDwjkhTDfMNgxWDBbAJxMZyCCS17cRMo+IH6ccqIGONGgfcRWZQkdVfBbvPzElHYiWDybqZgEZXrM4ZDkTeY6aeiZjBgyNvKRxDkfNPQ+Gkj6hSDDapE1qJmMQKwdqm5QQH8AUQ/EQFDQiwG4xL+PcDkIiMiPggUy60R0jXPSmLzh0Fjdu6xMU/gkyniKLG5uBiSpv/4wTzVcuYORzv47kRKfHpR8wOG+XM7Rdt6/45OAT197A9EDnc0NyEEIZ9GsnQzRyQKb257idhOl+oukkGyndcwAzKec/AHJmYWWOvN141b9NVc7krCbvRZg8ym3sXlmQIamgQzJFNKbcp03zXsco04e6cjIQI7AXlJwEIs4D89OsmhMNXuAgqY7JkDeu5VWz80quzIsc7AkfTNLB3h3zZUwqGQOHmBu+w0Y5h48wWXIi4cw4uKpb5tdtrs0xpNef5ytPOQ81A4Xm+rud9rjdPM+NjtmUx9yYF3Yc66BT8rj38/gTdB48zCez/JP5iclkvhJyOCheOkQ2B2NCINziy8bcmm6zAGjkyCYxelb46tsv/vt/cv/DP6LPfieHW4ppyi2mAIpvoBaZM8X7pvq6cUo3+mY+SQ7SNZGfrNLjtG9Lelw7HXZVHJeKTwjPVy4bNxb79oH8ZDTGDymP+5e3IRxxSuNwL234ghOZTMmjWWQ6UxRJFSakMCmPfhqkVUVoEVVhEY1hHQpLQEQiAqojYU9Fx7lGjIdJDCM2Ec9QaQAZwhKRozzimIwYjZAjIsmRSeoqcA8Q0j0RDEpngrBomotqJFQKCQ2/ceewbpdwclUtWM99vca+QURraY1ednsJMoc1E8a+dw9Qklv0tpcShei671qEiR9OyxZxue73d/W0VFF6eXlJ5FLK2BqZxWdffubffczIh7sH2y97t3cPb1uz03pqW6tMTy+7dVPVAPZ9G9qZ8C6UnOG2EYEi01wXoUBaLCJpXte19WtVGcpzJvQeBJRTSWse4da1cHOjLEzZe8vQx/s7IMx6UUI0NyPY5frcmznYw8wbAplpHkJM2Sw6EZBWZTjRhk+TMxtQ67qqahGpysLEownAUTNjSGFRFuHAUUmYvfAYRIxMYiFiYpKZrMvH23c+zWIUTJiSZNZ1574Dbn0KzgCM+GwlIk5QepinxyR+xncYgUaW5OiMXFiJCjNZFnASlDF7q92oIkKCKRxEi2Bp9g2TISW7kaD5lTQL1CBIIo8UKNG+76f1rkjp0be8XLerRdRl4Qpi7Xsn6YNqTdBpOUfuLCCqQsveG3Fhrlw4DBxV6GGP90Ypy8LI3TYSKuWsrK3tgJd1cTO473YJjwQHQmvRAjJSFVZGHzVXDU5mgo+OF7u5karW0vteZCUowyP7CAySdWVG742ZLa7C0budyt3d433uuHx4So96V6TcW7sWtfUxrk9Xd17PtdnWbFOm3t3r5oFyoqe+fQH57Eer7de2504G5tZcmUvhoBa+Jy/dEZlKkshk6LqEa2Rs1ljcYh+9hUJIpQ4LtachQ1RKqWatNecEazo1y2bm5MxKSaS6dL8yU4Bb26HoZAoJz6AUEUDcrJbF3CNDl8XhIjXSwxpxBllqCCuJMDgpY+AlLgm4u2aKVikKUGFiDQVXISFngGWYH4cJcbQOJmJNSpw5zwyl3LtciJ6v/vyErQEZEgEIk0cnEhYEA56R3eC8RLKNXP8crVg4SEGLyKnKoqTCouMNwcemjni+jMbGbMCS+Sim17fwWqqnp5lk3tXTqIzREL9RaK1Mnhn358dwzQjmquX++1/8zvPH68fvfu5xMWsg7pcszGbZLv3u4fF0un/+9rv7h/vz47vlfMd1/fbjh7tlKfX6/PRLQrKgoycRqG+dipxreQj/sLWNaan8wHRmacQRaXPTJpLwsO4GwhrprfVyr1H08vR1xCUFRU9uhXGqXELNsLl/3PbmviFZSqllCQ9P7FsDAckEMUd3VxUi5lZSdYsPROmtLUtZ1sen64vWkgZKp7b/8HT6EZUfPHzx2z/6vVIfrYUb9qdrDT2dqjyUfdv9Gvn8wt7fvL37+uOvrta+XM8rqcrp4/Omd4/1vJLg/fNTMKGuZT1vrS8DTKK5R+YW7oE7kfsO38HP9n5HO53Oiz5k43OpezTP54hNq/K6wLu3F2QElyx3928e9ucdweBi7soVIlKybRuzSzFA9r2R07o+tH5VJckL0dPVnpRPRrwlFZbkhUPe1AcBGi5BSuVkyYTw/Tn8IuLNGyVR9vBnah+EfbeXy/6SWYV4OfFS1wxpjZVPkrXoSSXa/j65sz9bN43lrAWlZn4cPBcJdrss53MPRFDomlK33pQVpEWot3ZXVyQ63Alc1wQ8u9QSCDWxyLquKI+86kkizMF1uX+3vez35wc5VEVMBNHwIxA6u8VGmSxKMbIek0Xdw2I0V5Ic4SSsyBBV600JwpRClOSDyDgabyVxIpAc7pYuJBHhvhGrlsU8wpKJWOpgohhgKQmPDDOLzE6dItflZBnMNdxncDKR6hKBRLDIgHHmjQmcJFKDEDQKymTp7rYQe/iodwsrkVhCpSQo3JXAIm5pZmBbyr1RR6bINIG5m3KxQVsTExsyhJf5oiaYNWZiIJiUhVlYl7CNksDs1phBiH1/QWSCStVMQTgRk5BHmtuUHA+hEzh9RBT7kGO49WTOCAsQcyLMtlLXgdAgLCq9Zfd+2fy03FmzwhIs3kcvr4gIVo0MYXVvIshstVR3F108sllnpsoFlJYGIuZCRIHWYz6RPDuSmaRoufad2SqxljJSEmFdeICkSBJSSeLuTVkZbGmZnOFMsueeACe13oVGVo8RyVDZM0smPMw9WHgQLsxsEcK6u4EsRxYJjWwd996FuPVNhKWU1ruCMqfNMJGVWYjcdqGKo4bmFqwkTN2jhxOXmNCJczTPxqBUkhgcsIjIFKZh6RhU1nBSj6iiHF1UABlShQN5f6I5oEnOEUBIP3p2vla/MU/hQFQ0g0YGaB9Y67UafatQj+jowCxx4gakCSDyyNGlfJAWU4QynHRTykVMZKNpHdHEUBNJzwuQQ0ZABJ4p9VPWRIPwGqwKJR/GrRtO/LVLnFa7zCEOHE2HCHHgthyMTsaI8JxqiZw/HpcwEjOmrocw6TPCjF+hTwYUwEiTuikWbkwIKOfXjj38oWgYrjpMXUJMviJ5juCBYg+GI1/Ju4Ms+BQpHwTLgZGHeGLEgIy/GPcxXzE/0dCOjfHnY2IQ0Q2wE+GIsIb7iLOOKWebaP1weN0A8Rz9V73H60cHPLgFaR18HyZzMhQck4PIyd0mwIhgEBMzIEyZMfNabxN+RLreUPmYt2Om4bjcG/6/hRnN8z7IiVeq6kYpHfMhE2FEerjrknwQDIyklASceYrghjQ10w7ebFBZk8rDQfBhRjsDw0RxsD0HJTFHl/LXJhPytuDnMr3N+IOxOP44p1nOJHbGrcHVMeJHotUg3W5U2sEXvi6VecixzAKDpjr+ORjZm0KHDyLMJrM8Xw+OUI6k2Laf//Of/nf/+OMv/tnD3/6PfvMf/afOp8gRYj/XFREygvgowN5oq3nC+GSgcGu4eNz76SrkT+fk5LbyU5rpxpoi81VudPv3uD3zwTiFasCcorc1Mncj85toskqfDNH8gxkzCY18WwQlzC1JhTKcG5zSmEIkmKMWZUMlIEmpMKUK55DODWI3OgUxk/LofsSUZMHIYTdmFSSIWMJ9tgbFtOgOLazPlTD5c3Bk+nCuGSITDg6PqQINwF0YcErWhlCFEYGInZZCVUOXwlJsqQ/n6q16hEjecXm56KVnh+xB3Ef8EMLTvAsgQXtvgAkVkcIUDuvRAQHBI4a+eC3l2rvHsG2NaIp4WJdvr8+cVKWycll0761ZB0aZIdyaZWitxOQeonJalw/PT6CUwsejN1lo9wZKyqgCss6zM1skJTGzgJPMgoS1MDXydOJUWTpDTvcq21nq1i8v2/v7dS28nFRLhJlF7CtnkIdncI/wSIt0JAkNpb/aUE1yKUXLelrO96d1XYWKJmHou8fzIZmgRELzXTl6YQsdmjIi5tFGYNzakX84CPCSGI59pyk3G430OKKPSTC7FjgwdEHjvTi2XpQRMcRDzIxwRGQ4wJkId0KIDjrMGPCIWShheLgc2rrDgMbs4arcWrdMxNhkhShKLZYG4FxW340oiHrbWyBKObcerZujEyOISMQC4S5JxBRpxKjKEb5vG9JSmSHIXciSJMysa1XNsN7fh12TWfVBMoJiqed923d/gpAyiLhoTU4zX9Zzb41gSE+3he84CemFRi0uimjfG7OeTgsF1VoZkqDTeodk23cpKqV6M4Ei4OGJLOu6lMX9KkF2dUdTKaeV0z321FzKkkW3zz/jD7i8/6qLl/X0JrxV5Z5NkETlxKf+cXv6yn/jN/PzR/r2mwYrLlxrzb5bf3E4vO3XJ5JzIXFvoASTpbW+Zbjkaq0zYSlqvasUFfHuLCJgs50Cfb9qERWsy2mzq/kuhUCUBPcsqbWu2/ZB17O3zuCy3O/tBeDzsl7aVsuy789CYEYV2a/9tN73aJfteanCRYoKkCrKCPcLwmqtFFKX0943FQZKOlRXQZD7wkx7W9ZlJZBbnVPfmZKRSAdMSkWGlKILExn2TUzpY+THZ+nbKFODmSQJAaJ0H253RCdaVJRERFbvl7kfpdTTwncnWoueip4qq7AQRsl3tiWdz9jxtp4hobdd163fK4aSk1l0LdqbcREbQhIJSl2XO/ZGJKpra2093yUvl47A6e/+7h9S+/2/+sn/7+n6TZfyG29gnd48/qCW8/VKn787Xz77Nlp+/e0vvnjz4+/9xu99+/6777756fvr35zk+vT8DcjoJNv+QVNY1ub7k33nfu1pj/c/DkuzfbTQJqW2vQDQmuYmLGVZ945C3n0zZNiH4CaFPKL1XnG+15Ndv2XvKbm3LTL1dAfA0wjo27ac7jM5YaTi7mAhR6bUsix83+xaVe7Pd5fnbzy870/XvZWUeN4eZfn9H/7gD7/84SnXzz7/7XJ6m8HWr3CvtebJIiGiD/f6/ptv7+qJinz1s+v3z48/lPJuvXezD+zL8mbTh95f2Ld3D19c+tOeYddvmXhzYpDFnuTd3bxXvat6r7TDr2u29K1ErkxmylVWzd18VZhda1mZveRSymJlDbvufcvcVrlDtHBHUUK67eQNcbWWtbzt3k7yUOo5UUnqZb+4YZG3RZY9NgiIjAKL3A+BTGsXJmMtHD22D9kbJIg1fKdslboKdWMwmCrLLlyVZeX1JOfWSXU91QU9gCuBuepujWI56edv33yvxvbtx1+d5HQqe/OwZopa9X6z7dKelCuBidZ087gK1SI8Ghf2NE0wUkgzjJiXetYgIj2d7yKYtbDEbkYqvX1A4FzeENtYBREJ2IhXx7SczCbFzDo8LzEVHMwJZES6HGXcdBvaa88c7a5GL05M844zS0QMP7YMFREykzzSW4uEEAMU4ZkeGQSiSNAItSUdrzV0zyTW8A4iFc0J2xzpEaayDH56tKhIzHBVDxcWYiGmEeHskQgXESLxiFpqJNJdVT3cohPzsHZlNoQxa+QwlJWxsxWF9e7hI4EsvDELk87rAwc8kcpESGsvzEIEs64qAxYzweFMcE+PxsQJ4xSKUBZ4uFvRarEDIcyB7NYSKSIs0txEGO4ihSI8djYP2JByhAcBBBKWzDgtq4d7N0dUqWaRwkRCCfMGYGxjrDvSM4gSSqBIkLV2ZVGm4unIZBX4iPeGgEWKmyWPAPKYHYIRPgJ6YrzlJFladCbUUiM8og+gO7x+wqN3Ow57CM1qP4iYZq111N2II93cZgWcIDqDgpQ5Mz2dmaoqMffYa12ZSUQoYLN3XqYHq7j1osvs6jZ2REKRaXvTUhbVIVEz75V5Yg2ACZ5OBJmFfghRclJOPQgBIM9MHdOHYNP+AiYK94GomEZS8w0fTizph1iGMZ0ws0w+WCrGJy+vZNCQlQ1D0wyZZxo5ViN7ZZyhj4aGh8ZgsFrC5JGU4KFrH71sCESIgEUIzQ4SjFu9ewSh00GzjNyVpIRM3JpHI0E+dC44GpUnMgdVh+PFDMLgcZkADKOoD71VTnHT6DFHOAQpOdiqITu6KRMQRIzjbwHQQc8dWP6Qq0xwGTyay8wG2wkaYeLDCZiHjGPCx0kpfYJthzYFw+73Kmoi+oRlGGXkwdeMH4xzOqrFN7vLhL6DpqPptzrkOuMEcp7/pPIO8oSmYG0Mb7ySJgdDMcD5gC43juV2aTd8/AmfML967JZG9j/olgaMW+T4DcMfA4QxSwfPNkhioleD0NiRDRpzfitwc+/hEL4cVMbk/G5Q//iVV7nIEf2CgxdFIsE5ku8j4Q6VqLwZotk58pTJPto1ISmdZBgwfJIVLIkEDV5ynM0gZ4ekdpyLH2lD8/7QoeXJg1kDQJPgPc6XDi6Djrk8eZ3jOAfFkvN/Yz4PKdTcvebsEfdrv3jj/gbnlHmjRPOYWzmVDszHmFMeN4yADCeacDGQbC62U4vLz//kqz/9Pzx/89/0/lWRj8/fyMtX/+vTZ3+XikAmxRgIGglQwEzPTh8XOWVQk0ya93cysL82bfKgf/LwaeKgXnEM56/N2YNnwrE+5n/epgImW3wQ6gehiUFf3zRG829f19O0hAJg6RYMEhYPBhF0NXeLUIEO3xkiuzO7gXpmT1hyQGom4CISOU1PCGR0xCiwKREJSxKnGzKQNEhLz+Shxht9RkAZMdbMqLoxDjkJUkUxcqZgOQn+2S+WIWGNZUhjQ0nAAmKzIBJWPj/cWxbmUmotRc9r7daKalq8u9OXiA8GYT1VvT+pSF63PRN9N4s43dURhvP48PB8bUp4OK3KQIR5J3CCLm1T5sgoZTlr+da7ZL48P7uHLsu3zx/uTif3jN62jIggRlVBWPZelZeF286eLkJLkSRoUfMUlVIFLTNyWWuEi5AkJaKoRvRSdCxsIRZWJGjERqWLqAp9tCeDVxYIsqXKPaEM8oOYgCA6GjWGUbpwidDAdGMjnCGACQshBbzIsrKeWAonz/CymNEMSVMsREQzzHrsFmlEx7DIKDfy8Za7/T+HVCFyMDigOMLQAoTRcioTnsNrO1bKOCQrUyAsLJIAPuRzI07TiYmLwnYWMDuRuGePDKLmYOY5iQC8GtAiem91UWa3dK1rEpGmuUkmoGlu6Wke6aUya4m+B5HBtwwRFaV9uwCizCD2CBYNJAuVUq/XFxJWOe3twsLKsp7un6+7ao2hcEqP7GW597Zbf0nR6K5EWqQli57TWuw2CmkswUroMd7xCPFwEoBTCxHEIMRci7Jo0UVAnBrNm/eyMMSKIrMPa0XGDIW1ffPeusZM6e4bJCyuIJJS+jW2i5yXxwjReF7XqLqp0pabo5MUrhnW49raXqzj47f4nd//cjt9/fkP7371cachl/MgoDtXvhctrY3UcHl6fimrgCFFUyClaC7WW2YRFveI2BA+rNzMwSzmSSSlFHMT1kBPRLCXuuzP1yEJVVmEpGo062kXhHdLZPfslgwR8y5hY5/VW3P087pENhaiMLMuhUlVSDKJUuAZ7IUg2ZGFuaLva6FSuUoolUJZk1VURECRPSjJR+PksqRIuCWBmReQXm0xs+fNtn2gAGLSsrTcxlPcfZQpCyCiJ6FM7xFBKOkggdTCRSGQRWUpUoSEYzxGZ+FqUkUjC+N44dPxFuFjfc0XqXevWtN9EYBqffyyd1sKEIBTZFFdgbIuFUCGS9Avf/Hn+vzyH/zhH3/x9/7hx8vzX/3Vv12K3789ken3v/zBT/78r5du/+E//J9/+Oa79vz0z//r/+rLz+9/9Jvf+/qu/vK783fP688iLv0JLIs6OVjlBd72D5zw3uTOkjJ8F0oPR7LQbFEZvRUWXpiCcvMqa2+R/l0RCoruLaiuer4rd8/bLxTeM5i0aI0Up1xOZ7vGujyscieJp8s33rtU7WEW7e78aMKaRObkaW0jz7ZZMq3rwpE16Xfefv6P/vCPtg/2+Q/+lpze+m5htp7W2J4TVk5S6tL2HtYXSmX20B+++eIHRdOjZlxzf/Ouvnv39k/++pt6p8xvvv72KyEhb0UClCZhhD0byLoTpRib2UcwkW/em0r0/p1Rkr4NCWstoqfvZpeyVERRfpMkveX9+pbw9BIvyl0poxTPvrendZEkWOwQQWKRUxoluNIdRJvvs90uXFUTvduL4oEq774RR8DCm3B6u1Y9RcHmV48C5iq6jPGTpWdLrHdrRaSHI6o5y7KAPeRC6N5bWJKy43K+u1+pFKa1PryJcJQn+4ZiP9XHMz8KnYi/RfEktsgVIsIWzqSkwqrR956A6EKe6UTNHZlLUAprkXptGxE40Vsrpa7Lsnt3e7m9EkTYE0wciEwolwjPtNHFhnJofEbhgj37oHdHHQwRGO0FMwOWmSoy8liEyLwTMc0eOuFBxGy9D0O2ea+6DCgVkRFWtDCFm2caiIWUmSwigUh0M3AKURHxcJHF3SJcWIQrgZhBpDyCg4ffzUxIhNjdii5M2vs1x/MKCE9kpkXAEREoQxwhPBXpvXckujVmJtJwQsK8sxaGglxZEMGUEebZc/Azo9fJgO8Zg8k27wAopokUKsw8+puMAhIRxdymJIGFlDCqLCDi9J1JhACAmQopIknY3JDJXBM8eoUkeEY1C2WmeydCNxvC58Gnj2SskbfNUPMsrGCmFIz2JiAieDixZhA4iDIiFCUZ4ba1XUAWPbIzqfJi0RDh6WNl5UzCpkXXQC7Kbn2UrJkrD4xOFBFCMkQMN7HBqKWOXINPXReJ9NEiasCIJMmR1ZsWQUkRIB6d11C5WrepBiMdonAghQRJkUGsrED2sQo8MXMnkyhj/EFFpuhmOAYSEZkjFpxpJEuOnCABxQipiRASEGX6AFADaA7VDSEFs649QT/T2AryYdo6kghmU4YBEOKQAYxivEdMjgMUh3hkwC4iQoYwZwx/49QYEIFHSG0MmJSUoAyaBMWAThNtMk/tjR86mtFyZW5XB+c2+ZQD/d+UEIkpAHqFrziCuWcE0iFsoAMcHoHWU180eZBPDnswKAMlH0KMcS4jG2hOlYPJuol3br91HC2PdsV4JaQm6zV5kJvg6ZW6GfzP8K/fDhY3Juegw45ebK9kyo0aovHzgw85asFDKHOIGw6C7RA85SGF5rG251hMnmIM1S3Oekgjc9hrzYAkpgHeZ4Q5AZnMfNyBSW7dqKNPFFLjSoc8e1Js8zvoRi7RgbWT4APNy2j6PSdMDksvTVgbNAiDyWQRZvu/4/vjoJUm95bHqR200W0ciQ7tyY1XACJ52MwSlK5sS/vafvFftv192Be8/BHe/BFO55CzCLOSWyYnkYzg+EwnohETPOgryqFKAhEC6Xm74WOBxJy7B6WFSR7h1cw4x+mQydCnM+k2+8cQv3Id8zvoWFH56dK6Te6DgbtN7lts0PzmYfLDcTm3STuT/hBxI0icPNOkm7Rmv/iTv/mX//j5F//tdb8kXT/GXkrs/+bPly/+69/+4x97fVRRIJBOR7M+EEVGRiBTprrhNjaz//aNpzqu+pPnw6vcalzdMf+PtTCOk4dndn7mRhi9zpZDpTeWEh1E0e0ICYy2lXR8TUxqdXLkAABLiFAyGWWQMykIycLQBBocICUA4fCeZJ7JJVIjtRmtwrUAhZZVRjE/nSBE4YQyq9fEoBipcIgcjkliDnAig4hAjkwM1sMjbOwEwOQIAgPkEWCydFUxYKQAC2A5mAYIklgSRFQCLkpZa49ooSoVafWytK1E9KpaCjz5w96vAFGQUF3KyrkIi9DHj5fWOisxqUX2fe+t71tfaqnLIoTuTpDw8JRwjwTr4F45KO/fvn36akPKx5dtXe56fzHbmYjGMyCTYnR/Qka4h8Ol8uLFugkLUYpyrRoezFJq9QjPVlnRo7BUrX2gThYIeTRP9BZrWbd+Hf3GTnSKbqS8bXtYMOXmLcl6s952RHPvlLM5OKUzFUpwprlnEMXIAxGGCiWBVGrl0V8GAlamnDq9GLy5ioiwkBIP9tmFVGVmnPOUHCEBRo6G72NtH23ohiRKiQSD+Qwf3SUTiNlrdRTNePjfDoPyJO3D+rHGjzcKpScjqYDELSK8GzJJmJiVKe3XDWhgkXpK6iwkUrZ9D0BEwegeAluWtfcOgYj2YZOn2Lb3zoVr3XwnN5+KjVi09n4JJDJ6u/bsgECYy2khIZD31psXKYzs3ZGA8LrcvWybaII8mMu6eFpr+1rOpT5u8RHpbnE6nd0vfbuMhjWZXJaTNwdzqUt3V133/cLhysxazDyc6rKgICVRxDPq/WNiB8l162VZt96UUypHhLvDvZ7OqRKZbhsRVz0FzJ2//ebl7j6+eFiiPD1+T/tL9s7394/hOyW33s2bpTBrj+XnP/tmWfPuXZ5+2rerb9suImVZ+7UlyFM8WnpPYl0qs7pfSeh0Om+bI40YbetMtK4V1DN5mK1FlaQwc2aEcd/b6e7eWgsKSklnqerBXBbydr28JFnAEcRMyXHdn0i59Va0IhFmWpYkiQGMktNSShVZIwKEAHv6utwxSyJFdK2L9Q9EUQpR8FqpFNIMiiy6wH1EURCpFKJAZGNhMIiZmLEuqndqTD2yuwSBiEq1Hgl1r4SgAsuezEV0i5AiUgTRQIhM5pKBsrBXQhFaRO8Wvaujfx5PWfCQYdvIGSHiqWmeCnnLuRN93TeCaFlX1sJc7+q6u/VMZmeEQJRXrnTdnqSUEfRRC+lS+1P76a/+de/Pf/zv/fEPvvzi3eO7b3/5E9vf+/OHqz/9+AcP/XL57if/4uHz7729e/xH/5N/+Mu/+rO3+fTl3Y9+8Fu/+/Nf3l++5fv16Wff/OzLd2XbPuz9hYmft8vj6fHN+QtNDgomSkbrrsZM5GFgdt8Itcj9Sddt34Vrh+xhskiYLzgxnYjKbt9d2nNQKN+ty/f65ZmTHJ1ZVRMbb7vdvf3C+naNqIuU5EWYM7kjEFUrebS9gUpzWLP7EneWf+93fvsPfvQHSnenM2t57N2QXUumm/uekboOzhC97cv9KTO4yXk5X/d+fb6Utb47f/7U9o8vL/vlWih00fNSr88fRlH86em7ctatX/beFHxa3ilKWHzoX3/xxfcuTyFlTd12+7jIO+UAR0dPgkit4GnSLsu+P1cB2X5t7wEeRhfW3Pru6OYWHoucZTn5biepVRcl3fp168+h0RE0QFDC08j7WteTqrlfri+OkLJEkkgJRDB1u4rTqawVSuDWjUh36xm6CsiumihYo9tyAmvs/dL7VZgiWahGgii0RC0O6LLetb7TtoTR+u57GqygJarzC4rYpStBGb29lNO5gBaCxV4BuHe3UhYOqipIKjPdJVhSl2LdpSxO9Nw2lUJa2taOZYCM4Z2nmYTCDCgBmSEiHgYQgzNcWTwTRJ7gjFuR5PCvBRFHdEqEu2A42pKYzTuzWsTojUVEpRRhAcdRPqQBYVUV8GHHMLfIFC1EmsiMOKrriNsrc/i1SNLbIB3cnISRriyO8DAA6T3JiUAyFVPIYHDAJzdCw9wkEeZurCVBSenhxCMBlDJdicINM96CkuCR7qayIGL4fmK0pceIsQ5hKAuOki+zJDh4bEJi1JbcLRNF6oTf4d3bgWcdxCJqbokI7wQWEk+T0VSLsWVXlkLCYLNdVE9yau2SIM8RSs4A3I3ARUt4IJJVwPAcsU0hJJ7BLB4W4URpbkXXkYFSpWC03MAwvxBAqisz995p/nA8riUTyFGvtkQIqw+ChoQw0vyDRUDo1hJIKGV4urIGRmM9NjeeDeMcGaOfL8AqxcOJ0sNGfxmMBnkAi1o6BUgYidZmFj4oPJx5uCCxLgvCx4ZrboiIVJSIiqq7eXNMEpSG4jwyBShcCHFtYaObXA6yas7g8cm5pjC5Gz4g48jwiAxOGjzdZCQYt9SjUdZgjE0nKOKwdREAz8CwDbMERhIBCUAzPjyBkf9yhFsTHXraA23hpifAII8EIKKZk0BjAzqZhFuM+oFYB9cwXV10lDQH3hvSqwFNh4Ei4tfMTYybMmJwKjn/Zhw6Y54BJhNxo38yR7w3jXbsmMVW0JG3MqWKk+W5Od5uYPOmxAHxaAk9GKtglpgX9grI+RaL8ioPmpTHZPbiMPXdYnswT/0mc/hkDhzfTvNJe9Azc2Ti13KXXgfsEzZn6ommZmTK1G77mLwNobsnyDx6N3dPT6KDmJzXPxiExC0153j4fnLKx7ScPqtxC25kwxEEc8xLTD3JED4kpnTjSIo+7unYiB2zZfQDPA74Okq3cZxupjkEUwEyQ2ZuRB5uU3qI0mV0HKdIEigM/f2vvv7T/+ybr/98+4C3b36zPP4D+cEf3//4fxGnL03PpmdlHWEHhBAOAMQSwC0qhAYkI8pZYySe7DAfUpZDu3IYm24cFqYRc9zLwQmOtJlDQ/XJBY+5O1fKONa8dBw0JG66m8yDWHnlF28kyu2j07FEnxBtSePYM+8pkzzZA2nIMH755d/8s//dL3/yv9/61yvXHbU99+egtEBvP/uz/+qHv/s/Xt78cUfKZLEJI7CPEYnI4DHF5kQ91HT4hBrCMalmXlngRv8cJO9Bkr1OWxy79sk6H0fMG5/46VrM4/smB3s8p44zvk3om+LvSLg+vnKkxXuIcpE6+D+eCy+FC9cFCQYhnciBdOJEaT1rsgEnoUGnMTMhLELdnSBsrIWICcGEmWyDOWF4PpuHBXF60BJDlOIJDwqCTh3XfOuMdmvMhTxTGPAYcVKWoyuiESkxqXAEFS1LYXgX8hQsrJtoWe8yUjnXRT9/PF+edw9q4d+9XO6LmjsBPbkl+T78V5St7c3oICcCLKUySngQmIuqCAlvvS3npUaGW9W6bbsQWr9urYnUWk/WN+smIosuO+8qrMpalETvHh7Cs/fwoPSQlU/L0i47A+6+nM/X1pe67G0T4gKySM/OqJ5AQLKk+enu3M0Z6p4K7tu1a1ER1bJvVyJvfScuoNp8ZxJkRKSQjk4oNEtaNBsae9fRTAKq5a6u61q4SjInMc9ItLDMHI3dmVNGl5fxjhZhmcHVRJREAs50ogAIMWjUCPjcWc5KxYxZiyGJH2shA9OqjBh7cKRi+HJusV453lbmYQEmSYRZMI8W8BwevTuCgkhLGeUvkrkMJlWUGX3fSXOkbdHN3e2ezMS8RwdbBiK1e/SwRUtQCMuIryAS0Zrmkfu1OcFhWOrao6vWCNrb1d31plLM4XOPZTkxuPXrNa4eXWphXXozz86sy3ofve/tAxdQVoyNGJiRImV0wuvmWhUED49o5gzISPym5CKZRDNzLJiIORyZSajLouVEkM5cRmBwz9H1JuDmTbBEZxBe7DmBZb1Pa60hEV/+6Muf7R+kt+vOVXSzrbWWcDlJb30tuO4o/LZvHx5/h9ezZVTzc28wRSA8mrUoyxLoFp0RAFVZnrcnoZyYro52d1wKm0OoalnNzHwnIhKyvhNzqYU4taibLPXefBOpqqXvW+99WU9bv5ZlHSWWLEHZSz1Ht95a1ZGQLkmttaaVY2Ams0hTraLCVEW57zsVK8pCjUnKQkIsAkqwBqlmACLGqMJCZeyC050FssAzKAi96yqkVPpek6Qj9s7dKEy0yLmSLhYSEVu/hiUJNTQXgnBqc99E10ztYQQSIV4UVfRu5VPlImVdROXY6wSRTi0f0Xh1x8wliSkxTPv0PcQsWqTt+129Y1IGsXsVfVhPvoOILBy89qRaaqXabAesLsylfPP07b/88z/9/R//ne99+RvvPvut7b1YoH94f4rEy7U9v2zUrqKP7x6xfwFrZftqufvsd959fvo7/yGKftu2VS9/+a//5OdP/6Zyu3t8eP/ddVnvltPy8t0velxamK536bjuG1Ff1rcstOjJL3vy7q1HUz6dlhIkxsHpkZIpfvEtyhCexG696KJICYd1s06o3du9fPbw+Jauidi650LFexBLM398/Fz2D3uTJEqSvO5fVP2j733x9/7gD5keeuO7t587CdKyXyHBkewX0hMlpXl0oyQlavu+cDFPqji9O6+Pb3729cd/+837n3/41cPj3Q/eLn/51//mev3wcL/u0V8uH4TKtltDiqy2heF0ljte4uP+Uw6qC313fX5YviB6Y23j5epoe98y23kR4mqwnmmaJAt73/dLNzNIPZ9229X3SK96FqGgvXv0Tgh+LHVV6RYAvDuxpCzKSE+zbSlV5Xy3fCFAz525MGWPkLoGp+27h4DOwtV6LKUiAPNlKRC8tD1gspaKE/FpUU/frterCta61vXh5Xp170nZ+77H+yqn3dreL3UFr1SEelyKLsILRYZf1/Wcp6LAbjuXc9HP7nhptnGeNBux9PBwXuXsBjMXXUGLR/JsNraDWlkfntv7cCrspHOrHhE0AhrDeYQEs9DIU6PhOxqtcSTDDngw6nMDuGYiWSSAoMOzMHaozIKpphZRyolSLG3hhYn7EEgOkDzOxTuzDkRHREQihIwUke6GDFYFJImQDqCMnlxp42SQiSQtpZuRkIcnIKIZPlI3+GjL08KYNJEUEC6ZFu5TKyxFpEZgdBcZ3cojDLdWXInBUxlitEQVUUyl8Wg6MbpZEeVodAqm0fcWBKbM7H24wDKHSmOcZGQGC7tbUloGsypruAMUBMx+veB0lpGLqM0bAatOL1hQEnNExCBwJZlgFBlO4MIyFEw26LMMkZKeHl5LzdG+KIjBnsZUq9aBTauQeWcRHA3R3f1cF7cWbkQpxCyzv5WPmDpQhqcwUbZ+9UgQyRC9uAHi8FE/ZuIMmk5+IkJajEKHAuluoxvIsOQiIyOYhjWGZ9PYTGKqqlJPmu6tM6UjCOTREykkLKCR8gU+L4/Idr1uN2wfHmatKjGFMjtHEHl4TjAWwjx4FHcHc2Tw7KoGGq4zpEzNzNBOzAK6THg/cR8DhGQewjrHYKYnc/HKqviIs+TJyTCSwXHAnGnBGlj6aEI/8fb4/E0382tUxEEIAQAFgo6PDPH9qLULTwMo4bAPDcpmoDzCQWkNYpZyskdj5xpThnTQW/jUQjTqNwdgPOTAOJRHmaNRYB5mrLkvpYkgh2lr+qxusdD0euFTF0DEn5pxbsBygMPBNkwh1UCzcXTAmIeavMz83bwZ3W7MxK8j2YMkole8efsT4dc+8Sktg5w0y/h7esXTefudgxPBQeHMu3gMaX7yz1gag/mf0qwcT7ApXooI5k+oAxrSi08Gim4neCjZXs91DNa8O6OJDx1XNDRxw3CWARqBFFPyE3OqDrqNbkvu4EJuN5hvQTR5oyFfBzqPm3GjGz8hyyjjyFTyiGSV7EH+4Wd/+Rfvr9AVX//0pw+n98vX/3T9m39S1x8v7/7+mx//z8rb30bRicUyAJ/jnAkks8RcBseVjhk0ANDtq+eo5dE6LV/PfJAMx0oFHdbuW372MT+O33rl526yIkpkxpQb3miyvE0c+qTz4K9plQ4J4HhqxLQDZhDreIMRkBHUWfan/Zf/9F/83/83l6/+otKTVf7w3Q5bs1EGXS1PhOev/3T76v91+uEfdBOpREQZn9zDY/4SiGf62lgrr66zQ5/2etl54/4+HchpW5xPkXH8166CRyTp61qZnB4mzzp54cmv3hjIeRduNwbDkIZjO/M610fv8kwepGNiZFozI3j0GiJm0cih7hFCWs7OYQaiyIU4kEmR4KRMUgsiBywLOVGICjiBkQ+Xx0oOpkyAOdwDZESjM2qOptCDA0iSZIpwilASwAejOVo8Dz0SMUa6RwYRguHMsAxHLJwIiYhCdCrrhUUoy6pu24n4ofIXBZvDzK3n1bH1Hpke4+3NW2tELMqkZdWstczZnywqD+cHkVKrrudFhdP7aa2lFhVlyq+/CxEiCmFiVVCoak1W1Wvb7853tejWGiHvlrpIeR9U68k8iLKKMqUoE1ACK7AUYWEWDYRhNLce9bAe0c12GQosAjNdt+18XiGp65q2A71W2bcu0Ah0i0Rxt8z0KTuTkaE29ITM4kNIK2QO1cLl7nw6r4qqWWhsCUc4YY6wRmY+mupiRLcpS5GhHiuYXmzQaw9SHmmCmXErPBASo6o0rNWJ9PEInrGBSRlgIRJKGc3XwOF9rPRMy4RHeFKCIqF14aClrOfTedueQQg4MaoWIYqwON6rkypyDl3r3q8wAGbuogpAtCQk3MuigrVZYwqDCQ2BCWXs1luGaymtXxHGq3IRN6gsZq5cKZIT51p728CjULcKY+vPGeZWekSA13JOGLxbNLOmupg1ZlnrKbKrlrZdEdENRSXMw7ywbnsXVS0rZ1auBPbM03pvfrUA60kF6Z0oOVO1wLwH3Z3unWqGhfVSC9dFYEnRMyhJeR1PMaU74hHjEt1a2y+cVJeTNb58c/3Rj77YH/vHP98AUS62d1koKEVYRGzv1rie78LtzVuhELNz+u4IVtTUa++BE0QFeT7Vy8sFlOe7CrhUUaAU8ZbIRJiCrYWJme8BY+Lrdl1PJ5W77eWZmJSUM7w3uIdbLpRoXNizFy5C2t16u3JhZaJ0FYpMIbaI1l5ElYghKrKUZKY1k891zeixeylcRZel9taLcqnVup+W1cPCOpH63qCynMp4xHX4SO/UE5MSMakoJQlQFuFKizojpbJ7B4LOmlJShKoW4m33pQO7tL0z47SsKsxpUte9NRKwpKhyhSxKp8JnlVOhUogFYBAP5cIQPAuLj2jto9aTr2+HUUeYr4TwCEaVgvTe9wycShHiNUuKQvPD/m2EJQioaQGPTMrwsurDw/r8/P6//5N/vv3u9ce/8dt3n325q+abL+4fNNvLt3/zFa4fy+mtt/3t997uH5/2q+3t29PD2x+8UQv+/O1nlI93P/S3i/zlt3/x7FKqb9k5LntsZnuwKD8021SSQyRLWU4r0NKu+8t6Ou0XhnWghWdmgJHZ3V/Cr8IASQvj2JQKEwvLtRvJwqh3a7lcv+mtc+WH0+PXX3+z3r8L98u215Ne9/ccPvZqq9Dnd+e//xtf/N4P/9a6vt0utt49BDOiUXSkM5NHUCk0lbYFwlRAnEVj23b3rOfiRM+Xp2+++eq67V9+/sXbz+9//m//4pun99fINXlrjZgj+HLd5E7abkvenfXdw+n+4/6LiGzbJWl9XL9ccWp0ffYrfIPLyvcCUaaP12+gYKyX/SNIHle9Xq1oGbukolLAC6lFP9dlU/Te4FyIM2zv0PpQgDsnJ9PCIEr3QqwoJz6vfLrs75u9SIH3ntmZTx4UDAQXvdvsorIsUpfUFRHZkyzUEFuGF9HI7wK14dIRq7wtpNj1RCc61a2Re4TI7l0EC4dHS96f+teP3InvDa4Sp3LPUd+cTpHNXvZVz6f6UJNfLu9ZOOzCWYPEQ8BM6Nb3wgUEjx45JBV9PdWWe4St5ztvnV6XRQ4v9JDnjd1ezv5cngAzK+ugZtx9IkU+wkaYY2RvIiJMSZBZuHj4gPV5mHTGdoxZpnt+7L4zcvQKIyYikRJh6ejuRYqyxg38RDCRuw8L+KA1hhlptGZG+vD+mDvJCKIYPbZgkYEg4tG1jZk5eZA7EZawEXcS4e4mpQqrkAPUfR8v/Ll7HRr2zFJKt45j3DJ8vJGZiFBE2MM8gohZBLPgj4FgmQVIBovoeCCZ99Ex3dxHR48iZTYzBoN8XD4yWUSIA7N4ZNkIgUg4FZEMEqkBj+hJQUTNOtxES04B1th7mIiGjxjNZr0Jc7KIyFHxFtYTEbl7IMx6CA2kV+pikaDx5AYxhfsQqJiPejyD3NOVKjE8GlGO5m4eXoiVmKUmBqmQzMrEZh1SCNFsZ5JV1x6RiMjBxJVuTViI2N12vxZVoSXJgjqSMqEizAxvbi3Mk9kiaqkGQyaNMNt0ghShfX8ON3fILcUxEwm32MOnWydjxA0k0g+Rh7mbGVgFSECPXNThduCZVD3CenxaRl47gY16O4R5yMhn2BDzpHzGnjEjxkbwAH08XWYpg5/JtBzF3UNZjqRpID28DcP2MbxHB54aeH6kJjMO3gOZI4QI0AO2jR0rE3nkAdJzBCENfcgQViSSplNtfEXkLcAIR5ekSTrR1GoOquATDocOHDmr/4MzzowM/VQqMHKdRi+3Q0QwmBQ6IqJv6qFBa0xIOpFMTqXLOKW5GcDgmGhKuqZM5ECiE39SzvHBgVAPGut16tDNiHeIIz790ydyo08IpCl4+AT4HiP2SuvN35z80O1oozB3e1bnJCjh7jFMkvkaao6cl8uH8ok/EXz8GiGEw0s1ab55a+nGrOEYz3HmPJKspu3h14iT0bqOiGiG+8wcqZsmZr5l6LjpgUMRdlwsfp0VO66cDv7jyEsGHdIbD2ZwEe+GoOef/9X+9NiucfmmJpXqsMtLe/8n6/lf7r/4v/Rf/h/Pv/W/uvvx/5Tuf8N0detMVDiGgY2YPH3chds5EY3WRaO73zFJ6BN9yxyBudwQw9GWQ/aOg5mcd/LXaMXjAK8//ITLpJmKdNPqvGoFxwc+oWAPNmkEHk1BGDElPOFE6WkZHMRkTtbz+bu/+X//b//qz/4L335FZbHt9PSh3deHSqWT9745yp4ST++/+sv/5uFv/y95eZOE0XcSmcwcFDSZ6kGjHSK0Q5c3VkMehF/OaYbjUQLMfP4AzecDzQudh5hL6yAM8col/fqaOtbmuC+vzOexDn7tP2+PgmkYPI4wyjqZSTQaRNJ4w4FEJEfzKiZKyXDh0cL8tjCH/pFFlDl5RA9SRqAnCE7IUhSZTDI4bQunY8bE9AfKbDMHALO3w1yOSZFEJMLD60MiGSP5moilkEhKZjoRKDyDIxFkhBRmJNUiYDdjgJZTPLQHb1eiBIuANHEmqpUN6N0Bu1cOULPc9x4sq1aphYoiQWZFuGpZ1spallKKltPp/nQuYSYI6/u2i2cIy/27t4+nFYz0/nzdI2Nv3loudVAdwrUmU8LNfelYmDljb12EgXBvLJyTWiYB0huVSoBFeGRR3fveeu/WWahb51ou+x5JvQdTieCqlQME3ntflxOvZGZ9JhJh7C7APNlZBEUis8gIvkUSZETs8LrWu5OWhVNG8nROVXVOVkiJiVh48qYsDCZGpogMfgjIDKfDpTss7CDQEHeP2ssU0TsSozFhzph7yeMFxwyBj1QsgIboN2KQwYdGlolZhrlcNJd1aXvtXpWCI8WdGRRWhHL2IbhRReEM7b1pWXtYPa2jud3YWiiB3UAk4ayy6GIh276rstvOVJXFs0MdSe7GkenRY8+eRbNouu/r+lBkJUI36rElQYtQatGHy/WagX3fMz2yc5FSCxUOIw+77BemjDTPTsQZtHcomNJFTms9iyCaM6hrh4xKVlNhArK3brEuxa27odRO3lV5t491qd6dwgTu4Znh1oos3jYlSlkI0u26LjXMPI3Vajlfn71f/P3PbeXzeW3l3N59H/uLv/eo96dIJ+vp1rcUkefnpxXJfH571rj4V7/8tix3IGXnUqhbEU733tueZFqUkXvuqkuCwty6pXupxbwTg2vJtIxQre5ZliUpM69Ee5gEGVNqIUOyVuLkQgm+vnxc5JTpHllKCXIwAxHpKswMYq1KMcNGyNuuAVkoQJS7wMDJ6VJYcxMF2NxCZWnNRUikEFctY6maKKAglnpX61rrnXAh4hTV7I5wEUKmcpDDWpM7XuqCRVFFTnfgcn258LZk5/zQixZRzXR4MMg7mGtS6qogSGWHl1LL/SKnomvhoqBk5nytuPkAYwQedVcePuqRHDE22ofpoCzq5iwg4RYGkirKTJQu44mCFM4e3VFLqZQppayiBCrr+fGzx+svv/nVz/9UPvz17/3O3374/AeXp29ePr4sLG/ffQa21tv+EbFqGoScGf3yLZ/vuWG7fjx/9tmXP/j83Zf/o+Un5S9+9Rdf/tYPf/LVE3sULp5X9NavH0W1iLR+if5SljdIa9cXYqprDU+w9b55MpfKnDwCo5LNmtmL6J1ZJ4ZlBNhYCmX3l2QWPvXWEGDe11NN3wh0Fhkez7ZbWUprIdv1H/z+v/fv/9Zvei/WSylrWe9637LtihRG2B6tgYRBYVtEJypEkbZHWGFUEYg8f/z4cd+Wsv7dH//2dn35xS9++f67D+V0PrO01k/LHXi5bL8q5QQYcTys54WoiNe6nuuXRap3qsv3i9C+bZEOaHSrKKfyrla9bqD0dZWndnl8+8Xzh1+VmsZp29ePKgsWwpmCxK/kjAynUF1O9SRJW98r9WBTco3w8FLW5x4tTfGg9YsenUQUC0t4exaY799FdMWSmSf9vOLOyTdvoI3ICBG2wTcip4zmL5REqEFK+hD8jlE1I/oTkSsnUSnLfdvbUpil9+cXhX++3pfs28dfnt59X4qc6L63KESdsggTZaGGlGUpRjsiRVOCpK4k4ZcdRR1UhPbmyiSyRHuG+935+4yVQpe1wmestYjkyNIbcDmHhX+oNHkk4iUnJSdGAg6QyaRIJyQlWVhEkIhyRXhG2sx9SASES4SPfkzMI0mUAhE2yiFJNNgThO/MOhZnYWFK8ysRi2hkyIDRGekoKkIkxASPTGEhQoB6mBAvUpKKiDp3897bzizDH8eURKQkgQgkQZCjuZkyqKUh3c0cfbzopVCGCylLyQjQILsRORQSoqxbu1StQ5qe6ZEeQUwKRoQP0xwDwkwMz/T0IY/JsIMFAzGPahazUES40YAiPvA5C3OGcx58QgI5EsFgERldRmhidAtnJregA2pITDHFSBtRLUCyjr1DipbWmqG5c6mL9ZYOYmJCLWXzBMuIyUQinSy6MhFLuoWFyHqx5wULpyARFiIlIYQcQcxFaiQkQ5gLwbxFphYFsaf1aCkCDotGs15mzDmF98zh6e5IxAh2AJRrRhg29yRmYXX3cDeM0HIicFiIspsN6c8QxrkHwRNMadZNqR7vBZAwJaz3hcVzakRGYMkEdwICFVGPYSICIiIGPzLL2RkZCB1MBvMUcUz9AxFRJAWyR1Qe7M+kA31wLpHEfMiSRhkwmSgH3JzIaETAUE4rUB5NqWao7ngFSk5a8wBLOdxfU03D5FPvMEHq1E0MMDbSwW7IKicUGuXPvMUo5SQM5t8TAQfGCvgEgkgg50/pVSlwu56JZWfcTkTQ9O3cGIfpzYlMYY5hej2QNBLg2aSMDt3QUBUDdIDGSWrcwLynA2DmmDD84GMOpcJUOgxqeOgKcbAWr989sCRPeuXGW+Qhcvjk4wfunX895sTtAsciHdBgMjSfKiBusqyDnLpRbYnZ+CxzZMUEkG7m3dIjZ6zwJzid4JFCdDulg3i4fdVB90wmZNI648NMRy+wwY69nuPcfh1MpoN4JpIfLdvGh/hVcpOzP93xvbc/g16VMfMMbgqxV27ulRy5jTMyhSXC0vroDan0clJN4g8unemy7Vr04v5h29+9k/0X/+Lpq5/f/ez/8Vv/4D+Rz/7I9Q2ljN2hCOWkWYa5MoWFM5lGElPiVcdy44sm8TBp2UmPMMxIUlhiOkxezYP0Ot7TWgUkY3zx67Q4vmjE6+Ig2o77cGi3jomRt7t5MDSZmZS3nK00AIzsTttL+/n/91//s//8/V/9n4Ke4NEjfG/ren6+Xrfrh25xydIMC+Ks+Yt/+9/+zvPP8uGHqIseaimaV5U6M1gox5XSzGM6Tuc2nQ769sbMDPnGq07rNhX+nT8ffjWK19man4xjHOtvzqq8HfD4Wp7JSZMOZ2A4fcZ5HR9lIpIcmw0AYGZSQkfLkamXnokcVRwCzQ4SsxoD5hyGBmIRKUQ2XxgcnkERHIWJ5IDuMtok5BD/4WjJPi6ABwXEklPGLUwyxNyMsAg6CJRxrcyUSQm2jiBOwByZhKFaFeekuckA1tOScd83DrdrBiFOd2fU9dKaecdpgdRStHf78PGiIqR6urs7PTzqec3uxQ1AUY2MZSn3bx6WUpmlCNm+t3172pyR7m7mInwuIsoEfXM+JeN6tdbdAs2bZ6qouyFpWatUSVikmXVZTshgTmKaDWo9kCSk6V5LCY+R3p4e6aCAELHAsivVbe/nRVhRRaybR0pqPb1hlXqi4SulyL5vZr23fdv3gIc5QFKUOBlivY9XiIcvXMvpfDrfrSqVgfAhJ0cQp4/pIyPojAbaHLc45sMDMipK4+GckR5GJESSJDmFjZyJjBxBTjk7FVLkSNrlofr1Y9rSfG8HUQm3TPLIGB5vgtZKSJWFwx25EEd6XU5b3+EmFCcFOLy5iGzXGUxxZBUlLPqyLGCYuaeRqGUifWU5qXbvCUfAdoewm3tmgbLWIncfL0/u7XReKCWsBQKckb2eThnJVdeqrTVKZ2FRjd4ACTCDul1VQ5O2/bKsd711D08K8SCII5mImN06CVdZt9aFhcje3X35fHkJb1XU4aSViFiXU1m3vhMyvEsaEYRUNHs0ltE9jz26hS/Led82z6q1tP0SlCqapVoYmbNAAY/uuTXbqVKm16WUcjLb3n9o3/+Dx0D/4of+7S/7Nx82rRwJh1EwLEstp/XN+69/wV5B357vVqYQ1kig8Mv1O6E7SUkYSwKICHeLaKBKybFftZ6TWaSsdd2tLfXucrmwMpAeTbic64NHO9/dmwWaaVUulaFE2qxnEJEs610GRzgxr8vb6/4dA6XW1vaIzDAQmMTDAVFWUogTS1FwSSeW9f7c+kWrZryQ0rrUvWcSW5KWhbOFI7lrQVn63dv1/u1yevO4nBapUlYFU+/eeyBJJSkN4ZRQd7ZStCARq9Q3967LHuyttvcbXq4srpnZIx3RMgLRU0pJdmKQUmryqfBp5bVQYSqj82AeFUXkSHFjzkwVvO6wc7wVArdX7LHFGK5+L1bK0mz3KAz2UlZka9cwh6RKYSpEfFruC5g9a6lts1375z/47J5Rt/3lw89q6/Xhs73l/vK+lmTO5aT71aND6go8aU3vgdhV+UQZ2wdwic6/+fZv64avL9/G+fGXz9/u2xUF3nfGVuRh3zaA3bIrybKinCzipdnptIYbW8+oACAcxBlBY14lcYRzZ0JPs5RFT+kdFGHMJG4dRGbKIGhp1x2ulamFO5SsnLT8++/e/O4XP85YiIWpeqT7Tr4hPcN7a8yGbomWJXy3SK51jdjbfolI1VUEkbaovl3evHk8tWbs+5Lb42dv/tVXX3/v+w+fE3/z9N3V93q3Zojb5bw8MImnP/UNQo+nN35tqqswBzzSRFklyOtd/Vz5BGp35/vePl57T320crp/9/n15WPCHk+fn+SOLM2DiWupCReub+6+77wQqPu+IVr2RWTlte/P7p28qHDRx0XOrHntzyjcEhW8ru8u+y8L8fBsnU4P3i6E6NyM6GPf7pc7yYKwxCosrGRmVURUbbsEW3cqfB8ZRk+UO0W7W95l7FzWKJxU9fToF1/qnZvxGh01o2eklipazaPUNyQL6YO1PXWJ6OvpLZknFoBK5ecPL+vpbOlUMtvOUsC+LieYaZ4QrffL43rfjnZKeduNE1nE3LslSCQPGwMzU6J3E64z5SDsQB8kQBzRP/kqlUXVxb1jBiQkQ0RgZiziHq9ohQeZm5mCQEQyy9hjitaI9MgcPcWkhA9REEV6eBNRYnI3Vb3BUPeeiEyzMAKpyHgOKMvQtseMN4nMLiQCCeQIghn4ioUjnJJUStKA2JRMZrtqdXPKVF2st8ioMoInU6QwF/N+2GBZhD18xHx6OubXQrWAtKd1s1HsEIgwjxyoQI5sJGUZ2hMgOUemgR8Kgmkt8vRIKGkQUXKzPYgyhYiVKZnc0zIycikr0ggc4ZFRyxJuvXcRJpmMQTMbZhwmBPLat0CO2BkmZHhC5o6W2Kx5dgRVXcbeVoaFGpCMcE8GgdwzEAyAHRBillHvJKQHEwkJCAEYx8xUIqIkJhLOHjby0ZOSmWiYKohHaoIP8VGEsGTQeMSPEHF4llLdIsOTKMJZJCKbm4K4FCK+UUXdQoeXAFARC4Ap3QdLpcwWwTLtg5OWYRrU1agW0vTgEECDTWJMi1YMfJyBEV/FU4JkHsQ8NN9D2hKRn4CrydzIsCXMBmQzyeiA8wMBUeSkdoalg4ebZy5n0BEZPFfmUHtgEiUTcBEwIAjNH4/TFkIik3gKcW6V/YlKD89LHkiZXg0lOGKMpgBm4lZ88iuf8hSDL4tXAcoQQA0T2rxQmuzRDf4NQJ6ZAIOHtIemqisPXvWQ6x8E2S0MJw+m5ID6GGqpvCkaZuRwjmZtyJs9bSLemwZzws+plLhdJB2+tMxXrmf852QN8gCs8zOYaTg5WaepaLgdZ3zsOMVxGRjWSPfhbJqOp0mAIZnH0YhZRs+f2bn8OMtjvs3W9fkq4wDmxAbdQPnt0o83yPzJOCWagzsIyJvBjY7jx6HuGJc/guqmVuhQZ7z+e9us0Y2NmznilDGn6hGOc9sKErttv3r++N/R+lz8zD08AEbzRF97ZN/pVPbHh1+Ur777m//bT9/80X8sf+s/tvJFkQJwpFOkMjw6EZGMOANiHg+AW1jVvHZKPthUUMbsaEWwNEYopQdYCAgQT8Z1qKgwXDNjtPP2NsaNYTwm5Y1MwjF0EyTi1umLcPsUHdowoqBkYPT8tqQAOyL7nt/+zXf/6p/89Z/94+++/lcrWg+xLZ/2ziz75aXHTom247TKsjB3g1t/+dV3f/l//eL7vx/5hY8K4WQajwrGMUs+FTm93j8cGqLJjI75N61reRDEfBMSTerrkzl6e/LhJn/7hBziG4N6kGl0m3r5yeNvyHFyzim6TcUb8clMzMIjOZjASCdElYojB9vcgeAhpoIEAplEkgGzaCxbh4V4MphKofSROJ+ZME8iL1RJhJLHdsZH370E6CDEmYUoMFzvDAhGP4bB5NPU1mRKRsRwZDMxzXKSMnkQ9QAcObLvfWryIliwkIqKKLVzTQ++3tVI2beV+CHy2kOXk6UQIeGnt5ePHy8gvHm4f3y4r+vCTFXZIyiBiPVUVMvIcfa+k2RnCqSZRwSNJw6y9RgiHQBr5YdVQWyh5pnM1yu/ePTeGNi2VrXGAgIikiLOaxnrzhMWTklFVTifn66F5do2gbbNaymZsfV2f3cSEhnUXebe9nefvctArYunJZKERbmICGBL7dYpo7fm4dFsCGrH9bdm121v1phQT3d35/vHta5KhV0wQpQNwGh5JsPxy5MtnUwtk4xsJ3BO32WOR9ht8fLQ/WUwJAIE8bTM0SdPDJnpQ+M8tsvEmgmkIzPSmcQjQezhQ8aUFklgHoLlZCEkdwpLSilIabsrs0/dfUbvN2Z2UkWilG3oKUYTG/fck6qqMgtYGcyszpu7EQcYZluDULStX5OUSVozpkxP0rIsZ7OreUPE3qVKPdLZiLku6xLh0VsERW6l1qLn5ldSYl7cvOgS1kiguqSLKiOAcPeurAym9EQnsXrSZVF3Jlrd3MyBBndiurs7296YFShuGyBIdnci2rt3Dz1pWG9+Jbo26yzc+jUyWESLEnFkY0IyDTKvt06BTjunPz+1y/vz4+cP7y8/f/dm+fqXfO1pbuYuhURRF7HuvaWi1pM8vH2If/mkiKfLh3q3sJTM9HRd6r6rxUA+dV0Wa7GU8vDm856+tz29swqHt/YM2gio9VSWO9sd+27tqa4nFi13b733DIJSa82ad7+WtTjZsjzs15eItvWPzAmHt1AqvFa3CA+GriqJKGyZpqzRryRFVHs8O7FUYuXI4oRgTerdNtWS/sxlqwvd3evjl28++96709u1LAVaICVHDYlI7+pDLRnJ4URJMQRcENFI0nrG/Z2e7pPKahkW2+MLtu7PH/vzS+z7/tRdrJaFTK11eOpM+1epgkooJIuK8MwSy+lfHll7EUGHUYUmLKTMSI8DkR49h4NVChFas9F5yLxTLE1TFVv2FC4iLLqQijFTYVb0DUT3dydFF9XH5bTIcv34bXTjmuibKmnRvr+g9crqu+dyF0aX5+9qXYXC9l2XGntLWgru6rt3n3/2D37y05++o/10+rrt/uzfQYnB0beUffP+eflc5ezO5zPT9mx2RXYVLlSJV1AtUnusYU1ICq0gJC+BGF6VBLw3TmJelrKa+bKumZYGQyKztb6WlbSoFGrJzm/W09/93b+zrg9mfS1LgrNfkx37U1Vpo4lAkey7WXPbMQBwf7F2TUepZ9ISHvvluSyPUtf9+fLuJJvyz/rllPXv/+Hff/ew/OynP7m+7I09wsHRbaPT43p+25sEo4pwryiL1JMT79FtM1XtZoXu6unzVfnj9r4ubwHvbbs7PZ55kbZTyP3p/iQcfLboaV4WpfDWnrMbK6uKCltva6nG6q0bgeWkWAARgpCUKoHNonlzIc6I7qai6+n83J8I5PEBLPvWoLvoqUDJEFQpljWBeCIKJzK33S611EwPXJ+j17JgWTwRXRSnKsWjeJplEXlzfvMDoDia92/B0Q3MAy567z0SlYtFqGpcPDL1XNr1iny3yKPgOTLceo+u5bQW7eZckrSu9fTx+h6SX7x9G9v7IycENHPfp1mFRYCMDEowzzLwMDmMOGHk0F8LAcwYHco5KdMom7AmlQxKShvvY2SOl71EZMqICxJxc7OegI7OpkgijGBpEjHrwmIRkSGqiNGQm2/9uRnCwuPMWRQIJnbA0gkYTDPRCB5IJMIbSJl1BLHE1EyNUmu622hLLzzqolM0MZgCwowhUlKGDLNnIRXRrW0izMw+hBGUnqlEmRFhzKI8/tbcvWjJjEjPcCckQngKrcI7UCNdpjkvDyaAIn34ioQlvSeSWYbCAgDzqKYlHJkuXJl89GiNcBIeCjEh7h6IFCFCKJLSEzE6SK71lEkeLkLEQhlCYn0TIkBZtEcDS9GKTBXZrXUPZmERSRkJ3Ele6xLucfTvZRnAKoUlyFu7kqwgYqIkTvciam5uPaNVPWHYnpiFZG9XoWEvhIhygrkyc8Nl9j8FiS4YbKAqEpE50mqImEk9w7rlEPBHEohJRnqWkEY6K3m8pv4qT0lPMwdTxGhhxkcgB7mHszOLJ8IDBCEwiEensJmvNMvTdIAhQjKxx+EFIjIAETyR35C7BNNAl5NQIWQMH9nh2JpIbBQbpyIFzJwRkhSzEA0aW1FQIpl4vPwIJISBRZjYJ22CaYI7aIWIm/z8KMzTKzFwYNcJ5+NgMgb+mezJsLbRJGFGOZRnNPhU1NORNXvjq4gm5IpXgc+IUAKOlM0J0nJaVD7RV1CmHz3vJ5MSs8/pMI7dAlwO1c7UWA2GbnSoxkG14YY/QTPz6NgzTGplgmEcaTt0QPoDGx9XdJBzOebmTcIzkPFNCzGA6oTQfLu6SRSO8YoD5uYr/qZDwjJ62k2OJyOGLGzeqKlFQc6+joPxylsyUuIV2B8E3S10aeyqxloGJSWnJGJwkZNdHkarjBlujRxNRvjIOEfycS2DTGFCWg5+h6c7bd7IgwuhT85hntinUU4H13DcyvlrwxYVzsJpltvl21/+ZGv7tYfq+Rz0cr2G97Oc17J2bx96b4inDW/Nv/l/fvP4i3/z/T/4j/TL/wDlHbF4uCcA4XkPYjwYaWTEIQ966MaN5KhNch6vKApZkDYkbDL4RmIwkqfQDp9cIm7N5Mck4dtFT8LzCLubB5qr8nVWUt7W4LjPyATFQKwABaVnwFPa8/7Vn/30n/7nf/Ov/s/Rv1Ohyx7PuyMSWFt391xqZeAH7+5edn+57j39fFZ3e/7q//PZ819neQOuWYYCagjwiWfY/MEo5lQoTxw8L3RqJOdJTnZwXPXQIs3CzPHIu83LIRJ8FRkd5sgxqjT1b5PGzeMgt2fZfHDd5F+jSDWXwGRQjlOcjPVouEERHukeYEgRyfkwylFfmY6gOdcZYItsgebwIAS4DFs/ApSDNpn78mCYsI4TEtbjcpKE4U7EAYCVtDAR0omr+6HOTHASQdxNhUaL08OfTsys4j0wFMXZeuYIyM0gLxKUpMK7mZyqLjVJTu+qEVtKT0oS51KEPIng1+v1+enpfP543a+r/P+5+pdYW7Y1PRD6X2NExJxzrbUfZ59z7r15H5npTLtsl21KVVQJJOQSlIB6dEtIJTp0ER2qQQNVo0AIiQZIQINWSVBConh0EGWgURbILmynXekXZaed6cz7OPee536sNeeMiDHG/6AxIubal3N1pa2954wZMWLEiPF///fAESG5pmFMSTwgJXSL/usIYK2qttpKaat56cEbgsyMGOgW5n14DZEgBSMgxJgZAGTkMR9KSY7eWiEEFsLEERZI4MbgVR2IVc3CCEMG0Q+F4gBhHqEWh8NQ6kpAhNR0ddDmxC6ufi3lnsfWGkB0/kELFU6BEMgkyJvcL5RbBDARuhGiJCWRpEOSfDw9vDidjnlMbAiK6Ag9WQ83dSrJZjtI20s+0BEgogWk3i/BCMBubwQ7Hc1p0/N2hzT2iACyMA1CYA2jTUS+vbI6b6mDToQSDuY1nkHjYGKJvY2Eoaaw7ZzIAA2AWRw8FKu2QNjSNgDgBhW12qxZmHFOCMSSEIActLYmhqCmPuSpqbKkpgYBx3FwVO8mTcBM0NpqHQUNWOoVqbm3QYbEoqVIzjlPqiu5qTdiILRhGKoCYSzrI4CpFwDMKFidQYSltopm/dlwxLCa81DWNRGVuXlATlzXIpRlSH3DTwgOyExmTbUhqbsKD6pAaLrOacyhETTWlTAGRFSvvSLStsI+cuEYrQFzTpmFAQGMvUZZ64tPDoTp7dfr6zevX79uoeXVi/imVpRhXeaUEgI42+Plg4GXWmTKi13HbGU+H0+5tjpKyvLiMp+1NqhqWng8qtmQBhFybTwOAcbjQNbxbiPwlJK5hzszSGLyoGFKMrxblyEhRpR5VXRJOYkIj0AI7mWdTdeUBm3rcToMw1iLIlA4JpkcnFAp3N3Aa04inMKAUAB8ykemwWFWfexJw3O5qlseEvolZ7//BI4v84s3d2kYhkPiKSFzQLjrOA15GAFRVTHUrLpWRO8Jf0a0Ls7DwMMEFFpXASVt9Xpt87XVqrpULDiCoQXAStVnzxX96iwZCHlMkJkOiceMwsDkfddCBIHePcyRAWMXafcwzx4Js3W4YGdKAEBKSVJyVYQYEwMEkgM2rfPs1GAZOAvwy+GEiupOCEk4j3dCgG5Jy+O3P/v8N/7s4fDq1Y9/8O5P/mB9+0uh0QyCxGor6zJMB5FcLmtY2FKWZZV5kpybreCLFk8Pn4RmOdz94Hs/mp7WaPk6XX/22IJimLIuJclRkAbIgqmYc+LEkJATYUQa0wMiBCV3Rw+SHL4GAjNbrOJpxBzEDua+1FrG6V69kYyuhhgpD0WjuZ8OL7yt4cQyhFty+P7dq9NwtNbCvJFZrYQWrUVdVbFdZwexxm2+BFhWBm8VANFrLSmPoEj5LiBSGoPZW3v54vD47suny9tPX33y47vPlsjL9f2k+Mndw7f6OF+rMw/5fuLpKHhu56I1Gh6H+3UOiDArGDGNdxERBN5S+NzUweYk4ilNnJhQWrsb7tg5pcToK1KLiARKngmhRpKxAY08ZhYYYLWiEUWXxs2Cp/QgwKVVZPR27QFDwknkUMtjiyVCmy7IqYYbNC1XSuMog7qFzcUWwdPErzLmuZ2LXRXRPRwtxSFMgQNzLtgDwogwtdYGFPRS7QrpJJK54Si4tMqExbTHqARHkCEGkzTzlBARkYIAiSSnY/aj8DhkOx1ealszTROfCha1mZHDqjc1d3doOrSVbMdLAdDcIoKBkMDDbvz2XqIwEpKAaWAQsasBUwC6q22cCjQ3d+NbqYDh7t12J3fLZ4Qe3t3jPVQNuwbEIwCY2VoVyQhgbmGNEMwciJw2tQh2BnZglsFciUiIm9b9aUZ3dWtEJJIUFAEDQk25OwgiRoS6YnQ5kgvmbopiG1UeHSLchTsKRtD39b4xFfo6o1YhnCmZmvZ2GVIAeISFInfPSwbvLpRhbuqKAETSrCEAC/WFWihZqJsyIRF7KCGorsypU1rcu4NNj0sPiCCRTRGDBOFNK4sAcSABQWff9L2uhYY7ITNz31hQ9L2LI0ILN6tmjiwQ3v2/PTTFxBAeDhBIEtirbiJwDiagZiWAsMdRdXdNcDNFJOJUzSHczD2ii/SRyL2pKRASp41WgdDawjLBhkQiAffBQoww1zBi8Z5H4SCCSUTdrGkHShInQmpmQoRBEW7o5ooATALmzStQJ5sgIhMCkTgoeJAjdONv1Q0P2qoJZGY3QwTrLWCAbnmFCOzeieOGIAC6+cCgd2uhmxEUBLhvecdbaUsetpXAW3mPAU5I0bnFAIK4tWdxww1ob7R3nNLNb4Uk4OY10z9ykxn2lkm//b048xt/41ZxRQB6//eNmODRSxzYYZDYgJqtQOupdh/TfPbPdijq9mZF3ChlYLs92a0SZsSOnnRAA2+H6N38HSXarqo3XPcqvF8kbSyb7SRpQ5eeXXJwU+rBDabDnecCexW5wXfxDE51ffqt4N4IQRsI9WtTo48gIm0pZltreCe6PK+ZAbcr3NVGO0K0ASCx+zv1Y+54UP9V3GGrDba5HR12HA1vLIwOnrm7bUluHq6qYT1TGTaOJNywlb776URFINp4cDtLC25gEd4mzH4Pd4yqf97369kueE9v2qhVGwIVN3Pr3f8pbofpo78zm/rnPz4J3M/lBj7sWNx2rxEifFcGdjkcIHaOarg5h+UECJT40AoCODIR8hrFmzswSp4XXGf0st7dfXH9J/+Xr777my9+9K+dfvJvxf3v+Dgwd0TBCAN7Ok9HAHvcwXaxdDvfbdDcA8gJiLmCA7/LdG8q4MHQ4UVCEu8j1VMKYDf32UHbfaxjgzW2yc4fIZL7aMVHk28To2xzH9CRIsDNUYnCAazi5f3jH/yVn/6D/+j85T9t0eoCTYl268FBxkCbhru1zcjcMeUsyYgVtRi//+off/r1742Hn+D0Gly3HfamgenD8JEdOvbVYRuX/X7/GjD50dzrq8czhLQPaCeL3R7lAKTnabA/Is8DuD0v8Qw13laNDU7qA+VbS6aD3Tf1J4CZbzVFGLh3iNkD3Fvrt2SLpMBNUgjQrBIKYnTjVGhRiJe1HnIOzCzYzVR7wqSpEvXAvegJUXjDAbs1NRALWbc+pC3tYVdABrhjgJkRUE8cBQSABABMKNTfAwiYCIDIIRyDSL2oOzi4ESVBQGRMqOZJkoLgeOI8BQ9OjIAkMmRWC6BAwOW6fPjwdHm8rstTvbwL17DiaIQEzrxtmdC0hSm4aS1aK3az780rIBiBWJqZe5gaomMwJiLAVlqSNOSUEQ5jUlM1JxIErGbhAEiEnEgaV8BgZgCw8NMwYWIFcMYkYuoBDhTCXNfGScaU3ZsgubmWallwzGHmZqZtHFJmIQqhHmhujGzRUAQ83M3dsmQIyMJZZBimh9PhxSiTOIUjQfTUNwxCoRufKAKIYufD9lcIEDk85ysioEdvogAgR3RpO3USFrh7WPdJMGDsOvMdJvaNH+fP3Yz+tu8dMQQhMetZHIZIW9w5bgRbhFD3Yl4BLaKaz2sRkSx4C8TcoKJqlkTUIZTdnEhyPtRWBSHUXBjZ11I1uHm4qQMMSawWNQTIa6tjYkBgyQxpXa/IAOYkvLQrC+XEGlUjiIABrVUU4cwWrafLBikAOCgaDiThwXlKacS2kEi4egQzo3AehsxZWzTV8XQH1rofZi0XIidKxEIRTd3UMMCBvCnTndm6QhFh9UaItZ7DGgOlMaspEvStlOSBAl2dMRMOZhVEjZxReiDdMFAFm1C+/cXbT17enz4bJNW7I771Fsin05EYwNr9Ybw8ltNpypjN6MWLcZxwVUAUrTOzSp7UW05JqFEOTBFItV6FEwCU9YwJhnz3dHk7jMM4jc28RABKaxraAEwIecBxkjFoEJPUTlN+t5yZySwxDqVWZBmye0qRclQIkrmWIR+y5KVdkYKzgxaGSGkIZ0aErVViWq+DZAIFQvUgntSVQccDkbQxx/GOXnw+HO4P0/0kw5RTRiJzz3lAZAGEugKaLYu3ZqVYrSwc6G6+gAWQn+n84ayREicJ9nCPBoDhEW6SiYTkKCFUSyHW9rQ4GyJKzjgmPAiNAkIGIP1N1Dnpey+r9+VujcvN4nHbi+xhOTuriFnCQbVyQEISJq1F8lBtobg7DEOr1wROAQxETCEcyQPCauXgnIY3r34L1rSG4xny/edeVo4a3lpRxgTR2rKGqGOWNLIdn95+cTiZjK+MUqB6m+vlO65DU6V0PBD/6LMfD6esv+Kv5i9rmGsID5nlNJ0syAUdfBwOoa51NbNhOKkW6nA9KgpYWw2rYJY4EI4THNTAyRQLpcGDrDSQDJt2D9ENEIRzs9bNINFThvE+37fqYDblFFb1es7CAGrr1RCW8zmNx6KtlUWIyNdo1cJTTtYaADOPXgtaYXRvNTuuy6LlnGR49dkPcTG5XO6PY5lToby8C4B48fqzWi05z/NVXJdy9uGw+NWHrHhRUyLJB1qf+l69Ol4MCdmX9lRjGceTlsWcVsvokvG4zI8tqWHzqKDhEE7OwQKWQckquGJzMBxkVFMRDNRAZfQsU+BS3UlSBqRwSgxAjvkyP2bMAVF0mY5jW1cDcQCNQFNJaF6WOiu6cBYAl2webkWt5JTHTOH24fzFICfhlHICrsxZGgCUhIVI3I0QhJPBGoTmvJrWpplTC3e7RpxqXVq7gAAoTzFmxwz49PZ8N7w4z98dT68nvtfl62M6EeXrMqu2fLwLpDye6vUxosG2s3UHZOK9Fw2IhOSEZNYjvcPMAKDXqkQMgEDk3nGMDRoiJER2876vZcoBm2zFNv4Ld0MS8M5XggAURDdzd6bkgB4WBASbV40DcDA6gJtvnewIdwhUd+GEJILYtPXNMxMDoDtEBKcEAUTAXaxEEADNjbbShsIVOrKwdckRzKBHXCEioGmTlJCk9xebrsKCxODElHu1TcweisBEQMHmSN3LAGwzbvGAwCBwt8TSq003C4dAdG9EyCIQ0EyZJbRBU0jccXCITirHiDBTkoS9tdSROOYgwWgYgSCAjt22hHerGogwDaQhTeDurq1VJOysGUJGoBbN3JkkSw613twOCkLenKUQFcC9KRIiMwiBNy+EblqZgHlTyKv1ioAi3AwAAogChBE8oGsTm6tgSmnqtxsBmDMgqakDMjIzEXLXrzFFNautRnggQQADBwCYW29LOnSyqIiQiFpzBEdAJJZBrTBwuFWrmVMvvwiDEShlq5UY9qfAmgcJikjVwsxqDgEWmxgQEZlAvZtHQ0dJbpXxLXPLPAA2M2xC8Nh8NTrtBbsQ2kOICaLbudsGkezgEnYWzlZ8xsZX2tKVoM+oDeAAj+gozK0a6287D+gt/o/qJ7gVsxvysP9sVzd5J+xs9XDQVsHeSlOEvbvf2Q63/jyC/zq8sXvYb3jKzZ71+TSe/YE+qva69CbMwu3ZyAY/Qg0QN4fvLXT+xqqgvVLeKDm2gQgbaQgAtriibQbQji3szKibCqAfqnNeEDtihrfjw/ZYwRZc2OvjjqVBRPQExV79esQtJhn2X9rL122+wE756bwUCLCe/HgbXdj2LxvnATE6kttPJ/qt2BRzbubmPUhxG/ANsNtmAW4MoU1J51t03a13tt8g3JhrEYDkt+IBNpAOdr5pP7F4/mLcLg6ez3mDPp5BwO7Qjz1Dk2gbx+352GYS3LDV55+GDVXeLm372G1u7TSWrgvwOp+Xp/dhDiAWVq2RkCq4NUSgZtag1Dgcj3NpZZ5fvhq9/QLW/+fjL/7xi9/8r97/8C/Iy9/2dOjhW8wC4cRbobZJr7bno08gIiB3YxT3AAoFPRzof/0/+nf+3f/O/yIe/vmVxHzNLN4w3AkCuK8H/c6gWXfUptsoIMBGtQrYAbU+yemjUenX7AC0odcbUBp9UJXAOdyRWrSvfvbN7/9H3/3Rf3y+/qou1wvJrJBMpmkY00NOVMusXAorCQ15EoKHdLq8f5pVn56eBLmd33/7T//ab/3oX6/1DjIKIHST7xtkRT1JZgdtNwx7g29u5Mjn+XRbdCA2TDUi3LZ7368Cd6c23Fe0/fubQC2eB4hucGv/xPbvsaNSN+h4d0u79Zr+/+DbvvYQ+QZBRgCYbzAMQN/XdOtrQ+iguUfvBXgs1a8Sh5UOiZmYCLJwhKNuYlV37wEXYYaI4I7EgEhEW2eb0kYcxo0yhEAA2kl8QQKBgkCEPWwEAImAwIlRiAODgQQxQVrBFkANjGCEoEAgBgAORwYNS4yClVDSNOEwMTMCMhOSmBsTPRwOL1/cu4eW+Ztf/vLpw3fzcsFATFHmJkRDloHRQFuUqsW0gBq4cXAXrbs36CzXDptgJ0RjrZqYOrBibsLEjImZeVCHUdJc2oxFA5ZWudtbbi8PUlOgUKyIOEkGQvOS06GpMYp14ReBA2iznMXd1EERwh0xVGMcMZAMHKi7HSkwIooIIrK3iiaIlBLWurLINKZjlilRImBERg70rrtnZOxCFwTCBIAAhODdtq2/GLDzE4FjDyPATTaJHY8AAAgJxAiwCPVup90V5QAAvmmpiZBh66d0wi2GWwQFIAIxYiJS69kyfYkgBDA3M6u11NpKs7UqArTqAGyOHuR7D3kXoKVUiook4AHCvHlzCxci1qhLa2aNiaqGCLEwBK6trbX2jDdmEpHVVrMzGHiYUI4IDxzG+8SDgE0iqpryyDgk4kZmCHVtppAzRxrVK8vYVtPg+9OL2haCJhRCaIaAFITDMGkrYOAggaQGYzogFPOQNFib1Qq2UFXzqM0ASx6yN1/W78D9cDwEWqsLC1NKFtW8yzgu7iE5ubug9AyxcRAIIwhEMy2JTkji2QghVEFERL742bd/4Qc/+PD48x/8xudffnGui6dBws2br8sSHqWW6eHuwy+/Ge6AJ06WF7dhEoC26OOQJ0FxyZx5ViMkD0DhNAq6O9iyXg+HAyGYN2GZ1+bOh2HSej1MGaDVqLPBkLLVEuSffPKivj8vlyePI8oRTcBXEUbJlGTVqqpMCYjNWyJiQsIcoBjKnNEAw1FQoQUj8aBICBqAkKaiDcJEbBw0HfyTTx/GU57uR5JEOISzNscELIklQ4Rq07Ks80XriuDgSkStQgCaejCYB0pO5BRB4khgpiLcmhLR8e4OBdyhRDT3QTBwXucWEiiAE2kyToADozCJ7C6eoe6ERMS3jUmEY0AXiPZ3Vni4mm/87NvGxjkxKQfg1ZSDT8OLMd1hZQdqTQMzsTRzwAQMFy2AMEA6paQIxyT392+OcRxO91dtOZ8IAEuTxAFDm69MSCxlnhVXmIAUchovl29blPs3v2kW+eFkXtr1O/Mq4/zq5fc+PD7+8P7FUn9Uvipvy3m4YwcDh2I15TtwA1BCCUsUaOGXqsN4xzgyLhDoXgkTSapaDnlAGzxg4IkoLe3MZLNe83AqzcZhJARvC5vV0IWFACxgSgMCTXxgw/W6ipmqujdvVR2uj5ew2m1hV/0wX2cAGIaxXhZwzdOwXJ46rgduNn/AaBCNhjGA6nzh8fD6xQ/WZRmjHlI5L1VKe43j4eH71mq4vKvFCKu3RugOtZXqjceDe1Ntw3hf1zXl9Hh9SkmbXhVxyMc2z4ji4QZqwKvhIR8BEBJ7NKIQlnU9Bw+SH8KBoLTwaJoxBQ8OkdgSg+ri2KoHo1Rf1C+Bw+H4Ca0eTtYwyVisIqZAXutZmLU6BtVqqjCMU0ANMB5FLbzSJA+UYikLgo95aMoCCZfZAwfMrkv1WVLuLmOCbOWylArprtVwQgzz8Llcx3RydUMTBnTMwqAVbB0Zz8vijuNw78XO86MhMUA+3jn6vD4SsZrPlyeWaK2NA0uSdv0mceRhuO2LOjWvM8M9vANVsRkQECCYKyGJ5Nqqu2MghRAIE5rv3JoN3yEPZ2ZAADciRiTssIGbmxFvrhnMvLcAqZPYWyuBkdJAiKENHJioS7nMLTEJUwR49CY6rLUwEwKGh2pJwwCA7m7eEHsie6+TokdDCWchYohwIyRH900HZNCVd8T7JRiACxN0kCqgWeu7eQDEnlYbrtESsYMAomob8yThTRUCWmsp50AAx5xGs4aCBNRsFUYIDMJqNckAHuGeZWDmUgu4I6Kbds07oZi1sA5zmysSYZCvdSVh7OnyJKYKpgKgqo5OKCLJXSEAARkgWokueu+AusMg2Ryse4p716cQCZkbcwpAtdbj7DtpCLqnowxqauG9syWSmy4pjUjMTE0rACRGxI48kjsAipBUr4jorps4hYgBEcAQ3CpBoDsAasTAQgDUg9209SpazQ1jSCORm6t14gti4gTQuUUBYYygZhBBgOiWmRm4WYvu48aY8+Cm6mpmYXELQOv1uQc0tV7WMqH3GGIHIDQI8GCGLHyt1q1kus0QMzfVXTWIAMDEaoq0WZN73+7HJnvc9o5E4boXt4ARtEUn+4YQbfAmWgQRBoL26wKw7nCKABG0daQ7SrRjNLC5U29imS1DbTMm2uRUuGWr9c0mdnRjq+a3cqk7RlP32Pad6LJVWLcS65mkEzuUgx0i2t7IWz3Y/7RXjB9VZ9sHPyIOdMPm8AhHYNzxKXimG2yGNbd68eNTeiYS+q3ExB282srAfmk7WIa+WYf6DtxsaxpstkG+by02h/je4PXdkRf3UwKAXzNs7kjMRx5P/RbsGtftX31D4TZUZiM33chJG053g5Y25lfve/XdDgB498xv6jfHoh19hL0G3+Ea6vcncA/Lu41k3OgefZx6Os8NGtmwu2eiz278HHCD27bbewNzsJfa7jcO2a0pAR3qoH06bGFuu8Rym1/7wG9jtv98bDyUvdbqV42mERbBMUx8DliXOckhDwdFMONmjDxYaVYqJLi2hYPvxod373Ukg+XL6e6bt5c/hA9/afz+fyV9/i/Ii99wHjQCgtgdIQghXIkZbnyx6FPNmbC7InlYNBvn5b/3X/rLf/Af/s9+61//H+j3/zxMDOBIIEjdVaQrsnc3LLgRxm6Dc5vt8Mw426f6PkaInXeEt6Kz05QcwILUxQJwWec//Jvf/MP/3dc//0+TLVVtUZ7NJaXjYUKeFIYKTaVlPk7jNI6TNQSvzWM8Htp1PR5fWp3n5dKuv1q+/Sf58weSk0MgBTIBfPTjOxwK+wu+Y+0Rt/v30QXdJgTtk29/dADjWWb2jAvvf+5/H33R2+WoHW/cBjBgs8vybQxxp2JCX6NuMzEA6AYUOTgFqjn3TFjwrs3DHjoBAO7EBAG+MfmMKHcTOQtPTOih0arCdY4pkTAMCZkAkJ6zJ7tp4H5AxMBtoGAPKWCI3vVA2F4aQNi15wHR+UdMtL1ONxZSNIQgjK7k7+6A6BLezLHzdgkdwADSrcsQ4dQUsbBV9pSHFMj9PS3ITMhMp8iAGHh888nDd9989fM/+enj0+OyrEIMBK3VAHNrtbZ5WVpzQhKUYI4ICGeU/ix7WA/fJmIHBAjzLbSEmZkZ3IUkpTwiDWLTmB6O09WqlpLGjGZEVFtLxAgRai+m+1bbOGZEB+Gma6cFuLu55JSbttVayuTWVMtSYkARoiFnACYWAChlCTUGJCSWVDtRuu8W3ZBgGCdJeRzHnBJB4OZA3yX/QD2MjgFAqQctRRc5wn5nySIoelxwf3NEQBBtDYAA7ovlxiDvpn399vcXQxhs3Dnc6EoRW1ho9yDYFd4E5GAYIECEoBEa1tsd2r0b1dq6Wi1otuGxBkgIJNa2FvJNgFZdVcEJJAKcwKJCYKIpENemKVEAYRiAE6SixajSmFUdI2qtzRegxpIIRdKBuftboxBLWIp6N47MEuDhi2rNIi0gU26G5hYeFMxMyorhazuzAHGO1hwMiVM+zPVqWvu7dDpOpcwI2pqHNYBkBhZUawsv7oGJggMBixd1CyWOrK6IWswYQGSAgGZrrYiolMQgAL20VQgJQWGRNCCkwBAau6mVugpGvcww2mkc3r99V9Yf3b94NQD98PsP//Rns0O0WnW1w4TDgUWwXS8vTmPOAgQvHo7S7LvHp2mg8+N8OhwQzcqa4u7l8eVyfZQ8ILZE5mFZRHUlRiEkW0XuIrh5gHfUdgAYzauCEKJWcfI//OqLH352/Ozz9KuvzgiWKSc+fni6REAeIfGIlAiAo0Iog7V1GdIBMDgLhKEHIyEKEQSXoGKRQj0xIlahyANOxzi9hOPDeDgwZWbknMYkB/cQljEP5quWs5ZSSynXWXWRBK22cCMWQgnAwCRpJCHJEwNhMCOaNUII9DyOaRpZMiBwHpJpqytUm1tlcENvUCSPkFByYhEk2poQt+jcXv30hKW+daDbtrKv6IBEYF1nsffcIEpdm1ZHh5Q1MFMkrIkmJI56dhmMxDh5WzdiQsiqqzVKkux6kcvTizR8lv/U4fUPl7df2nVlbP1VUauBe10WM12bquqU0uHVPTyVt1/9XOQhj3cVgPKx1XOdv56GUbwdJWubfufN9xZv7e3P1RYL1aqcuwF5BCSRMQwTMVULKsE5AD2IKGG40JEgcbqapAbVegUVWRJKHAFFQxMzOQhTDUskhAmQHZ1SXpoNSON0IKGyrNZMILf1ymRzWeanWYjDAaiprst1HrIY6XqtHKDNERpnyK2YGwRJFo8mnFRrYlyN6nyh6zu1NQvSZXmRj69Or841MvLTOmN9rODTq09+8fjl/ZFV66LnqJVQD/lwysfqubbLw/HOIhBtSAOQk5A4DZxhlLnWu9MnKb9crr8ssKjP05gAgjgnGjxi9cjDQ/MIWhKJ0HAYpazfRTiRqGck4gS1XFu9DINEA8ExgAIrcQJUrSjEQ0C0UD1N48nKkpEwygznh+lN6EWI0+EURInEEJvPiSXRBJ6mdKzF83Ao+gQ5mbxywLJU5gBoamaZTsfDtdaqS9GVmQIcCR2phhLmLAcOaa7dqGVuC2YZDgxo1eC6nDPD4TC09eoYrbQIn8YxDLVCYkBTxDQeh327TuZhrj0Ddm/obZt4oEASDIgwtba93gDDFJAimJEjtjSnFsGE4ORdAB09/BVg1/wjIAEDopk2b0xy8xX2MCbqxBcSAgQ3A3dExvBEDFsXr5dLzASmrYUTYUAIyxYxDphlcDXiVLV0IrCFMbJH6y2frXYNRGAgNOu7PO9vdQcAIJZEgeoW2yUAEmNA1RagSKNQSpIjetedcs9OBMjCxRSJVHsDEC0AAGsrSTJRQuQABYjdltKbW2zJ0sDEwqRhCOHaWHq1GoiMQV0c102p1Q0CalmTMHU5BosEmoNbBLi5JcpAGO4W5tEGGQypc9cR0MMIMHFSMAA3VwUgpEB2RNMQYgbUUCKu2gCpi9+8QeYMgFUtDyNLam0ND+kyPXfCCOjoigOJRRCREGoEWBcGzokyIUU4YnQdWQ9gJkAmcrSNZY1EPeYCRb1CAItQoLZqbhpXIUnQO2xESEDurtA9IcwduzlEBGKYYkq9TEBHEu52EACgZsIb14OY1Bxho30n5uYORNbxFDeEcPAbqwQiiAl2eQABmithwO43csM0doda6BvZDsEQdLOwXo9sPAEitA0+QHBwANu8hTesb0tYg90o+kYt2pqumx+nb48x4G7cE3v7/yN8BbsucYcScKtbe4IddI5Jd/HeDhg78wV34gDuoAje6FF7qb+Vcs/cAtiJJhtfJmKzeolNioMOz2N2k8f1YelSpx05gmcABjswtsupNpBiv1B8pg9smMZmy9Txoa3bHeD9lkLs/KGIzUQWdtfkneZxYzpssq541rBsEviAHXgC2KE93EvlThe6cWluVIsI7ytO3DY32zFhc5eGm/W49yP0Flj/7obb7L8V+8jsNfrzsO/43e1ftvHunLgNW++zCiIAAQAASURBVNxuzQaQwf5RvFE0Ovx4gzD6dNlAnA3c8NgCJ/tP7bchdpOkbn/UyyTcwMwbS+UGDO2nt9HRIPbr2UCxbfTdGdkAQZe6LqEwpVSNkqSABg6QJuCUGIAhlVirUvNFNaehAL6fz4+mD8P69E//38MvvpBP/s6n//y/+eKz38bxE6SBgDtoyJhgexoCAjsg0eeygxIAF8ik/+B//z/57Ke/Z19+8ze++B//1/+9/8MHjMbeNUVRIATMFVDICdxQqBtcIfR3Iu+TfMezd7zyGX3F6B7wuHE2HZF3KZYbhINQc1revfu7/8ev/s7/yS8/Tbm8e7THcwCPchhFYFVmAxYDovvDpxi4ltV9Icfa1iSZTEUIF3VtBP709a/Wb/7R+OrPRJtAmHYi3rZn2GZ6/x/t5L3tLz96QG5g6E4+w50yFjuZsU+XTiDc5lif97b5E8HOa4vb725P+0adu8G3HUmM6PV2L2z3ge0TyeHWQlbbTMjdrTu8UQhmj7KD/+YKTMxdj0ho0Ah2da1vFnaL+egyV80c5EApB1hfSHrRhYjojszb09C1xT0OIYIgCDc6qocTCwUCE4L1eWL9VdA3bBRoCGGMhIRIARjEAMjsjpn7+ustmllfeBAF0BG72Y4DKBp5mZkZxoGYgQLAhWSYBmHCLak28HQ8nQ6HcfzDP/nZN199WVUtHMGpkJqpt1oDgCysL9YaFqGdoBzuxKjmiI7IBA6I3ElbCBZOjkxIzO4ODMwxMcohD43rIKfjNDxxrdFqQ4C1Wl9c1axTGRkFwdU0CbtVA6zePIKFA0GtuUdtKiKdvIXu4YYA1FsCEOHN0SlcayOIRJyIVZWIhzGNg+REgn2PhQj9lQREHdAjpGFb+PoGAYJJ+s5uZ0H2xuW2wgUIAnWtabhud25LGe1Utv5Csv1N653CBv1JD+fNLJHUWxD2cBFCxB5cEAEBBBwB5oFI6qbuTRuEy/5KZmGDaKq3dssGFSGFDOLuasWajePEyAG1rO/UG0EAZOLMgBTW2sVaNQKmwa11Tlc4EGWGZFoAHA0BgYnYrse74eV9Gqe4e+C7u+O62rzyZY55jsscKKjGYeGmbJHRBSERIyVCyzkzUlU1WwUbhZtjlrTO73IaSZKHRaDWUte5K3On4VBqUVujtVVnzOKAQxqsLc0gsPdIocwrSbJwQBEZTBUIchpJHMCZCUJLbYMwRAFMYWStpXFgCWAIsUh49/LFT3/2y7/0F+/qZT7dC0PRgkmIp+weBDFKxmZtnu8+ncaDPH39vrUQcy/17nhMw4EJX3FuZYlyzhxC4BHeKoSDYEpg3gIFmAxbGgRaLYu5p8fLTIDO5FbaUhPkYZrA7N27y/Dp6ZP7YS16NXeMu+MRAA1WEqFm63UejseUhtaWKU8R5OaYMvFILOBrwOxRiNztKeggwhHrMHrKcLqT8Yh3r4bD/ZEoIeWUhmGa3JETs4iHqdflel7OF626Xmcks01VkRKlNAwsYx5PwTJOk6SMAQioWgSCmMyBeOA0IHEX7YqHe23LaksppRpiGlA58pBRCJBiA/O3dycj9w0LIW6JpFtIT3/dmLvfdh83wjIAeKcbMJq7WwXgqmsGIqMMxCwt/KxabUVdIDDl5GqtlNUxD0er9jR/4CxjGl/mYcrQGpdLDQpKxGnwVet1oZQY4Pz2vR1G8GEYHj7//unDd7863H+SH14DJB7uUd8+vv12nZfp8GJ6eDky/ubD9x6X+vb6zWyLM4dIsxIeidjakogEEmIQDG5AYM3teHhpLbHn8CDUhtAIgnBti2Ad6ZApg3q4Yri2IjhlOmY+lFYZBMSv1e5OL1C5rE2Pq4aE6ofvnqLO0wTLslzP8zjcnz9cjieq9QJhgbX4YtUDcgRG2JTSev4AeeA8eQMPa+taS7EQHiDWa1zfjZPUdf3k/s1FD9cWn90f6zx/d373+u5QWv2mLJN7Q0Boq88Sk1udhjfkMuXEYCE8rysAVyTVFch6LEQAHqaDIJhfCnyoaOPxiAFRg4GFcGmzQS2+IgqALqaM4SquWuoyHk9qWU3BalmryGgA1VqEogeLW4QZhQnxdJyGdV4cB5JJzMyaI3F66TAhwuJXEVlafUGnhBPlyEzaqAUL3x1OQylvIYqnyamnERLmoVVsbhe9qJvICwhXmxmxtPeOwXmM0AinSO5m3qqvT+vbfLhzh0u5nI4n9wioAXAtT62siIScBNkMI7zVcpqmErrWSo33zX0QEyK6uZl2FQoEIhJSEFJPYw7obWtAIKbcY6ciDJEQUIgAIbhXIu7hzLTVfrEhJr0Q7cyKrdTpjnSIo+TWKnOuWt0NEHvtHRDCFIjaSiAI5QAodZWUyUG1ppSZxC0cTHgC96a17x+XeiVJph4RRIKEm/CBhYm6jijcXZ0IhZOHq9mmkrMAMANXb+6eZQAIs4ZEgY6A7hpbChY2MyJiJIxQs4DwTtIIzJzMocu+mDgghKi1NQJJuDc/kZCCCcmsMoqR9XLJ3VlScw2PJFndhaiPrYUTSurWfsJmTgBDGqtW8wjAppVpIMwAVOoKEEwMTu6BgDnl7vJPgBTYzQWIecviCWhaHYhYILBvL4iFAwAJkKI3kSKIhQjDwKBhhOqa0uDd/YkwPBA9kRgAhoX1tN7gACIyT25hoRCOSFugCyEAuLVmYKaBmGRUCyQUubWmA93VFMIJKZAAKcvo0TA2N/GASCkjkno1D5EsGB6eUzJt7gBEai3zcGtPM1MAdHk/bZbhqGawuxEFEndy+QZAoLulDiDsQrBwx+hcoI0otDvf9A1Zp1XsBdKG/Gw9cOqg0FaTR3QLir0LH4CAQQAdXCREjb0kwhsLIyx23gcCYncS2qVNcMME+o3sLJqPGAC3Wvz20Z24sNEVvHf0N2uirXzfd5axPda7Qw1EdGXJxobxLnPbg0hvb+Htxd0PFWbg3p9TcEfort5b1jQi+uZGBLe68aMzxhu/CTZfhrihDdsFfQTuIFBE7PoUeK5zP9KzfVzW9lii/X5soNiOymzo2g6m7NjWZjV+w8aex+uGwWy4fETn7/TR6x+lG5tpv9fbiN++uh/YPbbl2N1MAWHPfIOPbmx/gGAnBvUK+8bwev5cn/yI6BHPzv/7NMEOSG3gV8d3oOv0AHf20Uas2ohpnVmHcWNNxXP9js/zb1cERn+mAmIr5jfIZOc8bVBkxObhs9FGAWCnkyOCt3JZr2d3QDMiyEMWEW8yV18tzFwkkyBIAws3raBGY9XRSj0vehjW0+kP/foH77/5m5/9qb/8+if/tcPrP8vHlyRDF3xwOKBvylokQPTWEJEEmMif1n/wV/43/Lf/Xrs8/vjVJz/U+E///X/3X/qf/ntPcoR8uD6tVChjSYMej4f5XVnqepCJtpBs76TFHXzZgZjnudyT1GB/3Hv0XyCGgUOEddwTCRT0i3/8xe/9r84//+st5qfz+u4aTUd2BuJxfDUNYHX1iHEajfm8XjNBILtD4mniMeW0PL0ltYxm0mWwurz7Jy/WL+D4Evi4MUW6FRHsTVl4pvhsj8Cvz7K4cYP2i3p+/Pb7vE3HjUy2LQE70rlDldvM3BemTcq2D8ztuHGbUYgbPNq5RRARwBuH8Pb7Hk5E2Pk7EOHhUJnCrTClLs/s2vKbnzeA9xwGNwUAAFZLxWytMGOlIIxIjChpg8yIOrzZFYfmyh0AYAmACHUIxOhLPoBHKGBXOCUIgwAk3sgru3QNXGknjDJuCDcgM0tOaBANHJEM3M0hFCDCgaIzypnISDXKGuMkeeQk3QUYkYQTcQhzHlJEWB4HyZQHBv7mmy9aqZKgms5rCd9CWlmkF17i0NvsBobgDAzE3d0lCwdAx0iRxLsnf9dlsfSR7LuCQYiRCSXj6emyWJYPl2sEX+YlAjR8bY0RQg2ZE3FOWT1qWAJBRLAIQ3cipIxs1kTSUmcDqYjdmKq3m5jFPBCCMRA4sIuNkQgT4cSYKTicECiAuzcgESKyCBJ4d4Jz29pCvfdBsCVoBkSPy+13Mhx7Eyicu8FCWF+7e98NQDEcOh64idD749HzKjrVw/qxmah7ViL25dd77h0ERfTovDCA2rSqqZmZArqqO7i5O7ErhG5PwQYV9fKAUMIpJTCtAeFeQDj6VlettBUBnNzZMbFQAvdMZI4ig7eKQRzDhqQyIiExHk6ZB813cjjxy08OLx7uibghlBLnD/Wbr+d5be8/rIkw8Firao1xJAzJ02mZv0mJSDIsRkhpeFF1NQJHJDLw4qallcQDCYA1jwBfjK4Q3M2KEkvAaLool+ZXUW/akiSmxJGsrUDqEdbQqpn4eCdJsDXVCt0kzBDCm+QERMQJQD08SMPmtQyn48MXf/SL3/rs/vRieP2j4/0vHz+8L5EHW0LV0WKF0u3zQNu6XNwPZdbpcJyO97XhOBzPT98eKUk6rFF7Z4BQCMQIkUAwmBiJIESbI+mYgUCuC9VmbbV8GBCVE5uaupqCY/rlLy+fnYYXD4ervnUTzAeSYxRzrwF2ur8vHoQAIoTSmksaERXsgmkK8p6gTDAwAjEgzijz4Y4Pp3Q4AmdMhwEEkZkkUx6qFUDOnNSWtbXr9WmZr1HV1EstfduQp8zDGPkop4dhPHKaJI8p5b4RRwjR5NbcTYiEKWcWIdOmZS3Xp3o+z5fzui4GaoJFmkNJ0dJuv4lMHs7QOcC2LfvUYwG33cy+zSMi6H4osZH1tldSq9VURRiCAbColqTg7TRNpkUgoq6lLWk8YXTxNQkQIefDEOZpkLuH+4O5LR/i6atgYk7T/etyvYQGU1NviEzEiT1nAqB1qe4j891xlMcvfvHSDO/vx7sTCdT5uq6Px9OhPL31p/dvPvud337xGcb6SPlSGgG41mZV6yXI1fQkMA5ZiIorhkpitZJpwBg5QUjW+kEgJE8R1a2Yr4buTN3ZdxoOZIaBnEIsQIHTlBIP6d7qeswpbH2cqzRPLFa01Pm6zOQY67W5qh0u82WQxA5ruSSZRBi8RpS6aJIwYsnsqu4217m1MowHXM/owDZnHmpbz8tjSyMmKdWAysOrSRKWgmvgB5KlFsAklAa6H9IUdli8HfPdkNjQUoI83q31SXAdxmEtszaNyIyCuBa9RBgTWAjbcZTJeHE7Q7QUg5eWRiB0JEYRa2Uta5DX8ggwEUkSUaFF9e54gvFQ2wVDEsO783eJxjykurTpBB6X5tfDQEy0nOfjeMc+VJ2FYIH5Xk5qXkiFDKLVRsKH1XxFGfP4+vCmfvNdVUedkQQhB2BOJw1Tn+fQnJHy3QCEcAUs5pFErDQC0aatriHYgtJ4hxBRL0McU4MIwCSAda1LmKdMLrSsRSSNmTMjwSwpBs4sAreXQS+AEKErXAjUjMgRt/QuDxMW6BZ+RB4tInpmR8dbEjMCJqDePu56CiY2N0RmxNYaQOdRA3QbJrPeE0WMolWES112pc62oUOA1lr3h4EgrYZIzGO4E2KmDOaAiohCg7bWyXWuHgCJE5AEtW7iQSh9W+idx4Tdcdlxe6kaAgpl5tygQBhCqLWUxm5pBIGJGJFYsmpFcwfroVfCydw9rAdeIMIgea01IhyUKKlZzkMpBRENgEgiwC0Awq0QMwEjkAW4G0DXNCVDc+hGCVHN3BxZAgCJKTaUKokAc2sNEYoWVZNNRidECdzcNEnycAJEAejSNkBEICQLZARVYxQCqOYkHABuLizNTc0ISQjcGgG6acMrgEga1ApYgENiKVaJGIiYk0IDByJGIWumodw7kmBmuu9kiIlVGyIxyjaHXHspbqZAAEAIuDlfqXfCG2C34FZCZE6G0LQimMeCJOSoHixi7gHg0SIcid0MECw0fACnUtbDdOTMfmvSAxBCIg7XTVPkPUamb4dcmC2AAhDRzIiomXVvYI8g7P6UkZiAsGMQgH1m9TKob4A3ZAf7fq97a/dTwNjBENjPByHAAyyCEdyDEAVhZ7SAEFpsPedbAUY3dKN30zdcq2O/WzHeCzWH597/jiDd4CTsTu0EYFsJDhu1A3tthRvWgc9HuGlxCADcd+HX9pO003tuJ4s7z2Vj3W8DtGELnasVbhDSj7oVjntgRdyIRRulqkdsh4cH3kCb584QPsNKO1DSt9O0ncZz9bnpU3ADgfY5siMsneuDzzhM7DUrdCS8J7jf3GRudIg+Us817nZW0Rcl2HgTe2V9A4i2G7Un1t9+/UYgcvftRNzD3HwTcW0Uq57WtF8p9LSmXsZvfLSPcLudxLR1B3C/xj4vbkgb7t/qMMZ+ohFboXSrzw0CIMydAsCBN7QKPirN/VbaPDcAu4vXhpEiwC4v2u8kbqY2+wj26rJDjRERrNEkrYnhaqBtzePBAYETIZMXBu++QG41J0axOrdWi2tltyENy1xQI7SyNF7157/3fzv/9D9/+aN/7tPf/VePn/xuPn0ekT0IMTGnsEBXRAiSQFdwdcXl/O1/9tf+5eHFnckn96/Pb8//yvjNP/uf//flh7/95sdv3l7sZ1/Uf+2//d/KEy9fffEHv/f3/txf/jcdw8PDkSSD+22i3ibL7dHu/tDdwyRsw5iRqLMQgsACUREuH+af/v2vf/9/e/3698EerwSPSHw8vDq+uZ4fV8K3eklWM2FmZvAk93cnqa0cxpfC7jWICdCG6RhooLooLzPeHWH++g/nL/7u4fDjwMGZWRifbw12FSfARsFBxC0Q6gZ730hDO0Z0exi2BYJuSrVbz3d/pD96srqGDHeoFrb14xmY3SiY+xO1P8u3takfD399jdg/EwHg1PMboKPV0TOqaNca98xTCkBkhCAIDyOi8P4WgOZRPapCY2AMik1+jIjRKVdI/a1AxF2lj65ATLcFEqlbrGEY9gUQmZFps8ZHh57WDrGDSuYNgQkFMWGohxFFzhTQASGvhi0QgixsI/IFhmmAAQNrQJV0OCQZGwQCgTmwJ0lMmCgRcyQcJP1YcIjI4j/92S+qWWnWPMKMEQHB9gcUETEkMMiVcFNMdS0LEwc4EnTnHyAkDGYKjN7u396QYRiA5o5IEKcxOWCENvUIMANB8QYsAIhJUkfCiDChDDI0Y9NAT4y5v5m8q7WJLAAtWmuJqIEHNIYEEX1EGbcsFMbEJAKSkCicABMRuDMi9GwlTsEJ+7137Qsj4x7+iAyYYiPO9rQOI+osB+rtpIjeee2v/XD0cOftpsH+vqYOUSJyOBDAFsDXe1LUZf29SbG9JTtbHbp5ZHAfTzMLt25QYO5VzYAQGSJuPndbeeAWEFHXRfJIhBbeg2ITZwi3sEB093Ecqq7aMyDdVJtHEKfaKrkFqHlwcJIEELXU04EEgj0OiT797O6zHzycjveUU6mmzV596q++d3h6unz9i/jwWM6XNZzk7qBrhbh64OGQW6vNysBJq65akcWjZTk2IIBIMnggYLRW5vZBGKvOikeIaTrc23I297KcARqxTEMGhMAIchIs83J3f1pq0VaHw7SWGoaXVXNIqIrkNCRzRyEMVkAGt1jJwMyQnSEtpRCuLx/evH/Xar2ePuPf/p1Xf/dv/Wxd0Q1oyA7eAorHYTpkSKfp8GHO968mNXemOi+TLfeH0VYztDxKz7eptRExUjbUQANn6LtVcwIyBXOkISdz1atCKXXFwEwDIrRYyTOTLIqD2w9+4+H8VB6v5XB6OD/aAFjXs1NiTK0tRA6MREzIBAKiAa25Z5oIE1hALJhannw8pcOdCOMwTdPdvYyJECWNaTogiunKyYCt1OXx8fzu3XthCFVdWwRIEgTIw3C8eyHj/XA4MWeRYTwcRJKwmDUINyON0FZzZozWrheNWOdlnef5fJ6vi4cvayNGZVRQYG+kIWBuCbemKXy0F4TecnTkzQRj34VGdKYhMiF0VcRH/3n06GqPJik72ApKoWMzryuJIaxNNcEk+QBEEUhCbqVe1rvTfSb5/De+V7761fndV+B4fPkJDae1EVu1y1vOwiM+vftqGAaPdry7b3Op13l8cTy8esXJyvqhvbtMD5/JeBpefP703S/end+9eP3aF9PH7z7NRzt+8s/W96us7BbJDMG0ebCjrA6jUB4PVgo4atRJhnnxY0qHIT+VbwmQzKE5wcHDQtKiK40n9myuQINqQ/BDnrJM86Nlfnl/eqXLNQVBrVfXp8fzhHx88ao2bfMZQw/M1/lCKb8/P0WzlHJT1xoMWtYPLInELud6uLtLPooGeg0K0xrmen1sSxmmA9D17dffYDqmV68XpunFy+v1Mj+pDEcIWKqFk4hkOjqMeRzG9NLdStgwjkRa59WBh/FNbQoawOYYxBDVRjmmNKgtBDamoenadEYc3FYWWGpd65LSmHKWBLXOiCQxVC2OAEg8yHleH4aXx3xYcSZC8wp6bnrOPK6lAoQDmONwOOYhKJ8FQZIQpvHuVRCaF4tV3abhGKYDUWstTwM1jWbIeH84AUOJy3q5Og/IiTxme0r0oAoARgSH8W5ZllLfJTkMmaOCOgJmD5I0JJTWlsAL0zEFKzAl1vnp7u5NHu6W67shY7PW2kowBIIDjYd7cg83GPlxvoTTON0n2tsG3UKF0d2YpNcfQOgAEIYOCCiU9k6fBnBvw7RQAGBkEUZggLDOqYktcNT7ZjfcPJoWRKLgcTw0VVcVooggkeaFkTeLnAggNqCqBgpMHG7WVMMo59pWJiFzV1siJMuQua7VI9I4BhCiSRpc1cxIOPYYaSJ2CKEeQRPukSQlYuh2huBIEea25bgqIbppNzpBEe6tdlMzD2YgQJKtMEZU09gcirmzQtx6yBSEoVpjpogQ4QgHFAhiBAi7WR8ERCdDISKhdKrw5ghN3LPPAqJ4C4SBUu9GqDfoSkCIGk7IQaiuiIIgtFkYNqHJwYXJtAEGEgeimvYUntY5Y+DoPS0uI/aVoxGL9Q2TZNXS4b9oGoxd0Z8wWdSmpfMzLKB693/pXJnORANzCPSuQesuoRGOQMQZIVRNGD3CiZHYtEZPGDAD6Cz9CHOkZAHm3vkFvQ6NgCQZgKoqk7mrRyAxALTNbYoBGLw5xJgHBPQAyUO1gkhJhtLqrTwwM4xgYY+e7YXbsxDB3VvEPRzBou9pLZyZYDcQ2OEV3PAKehZW7BBIENMWCN8zwm6b6Y9QFISNzXErWwg7NSg6rcn3LvPN/GOvIreD0Vbzh3f3og0D2ok2+zk9I0UeuBdbcYN3/OYXg9BjRG/F226xHRtM5ts1wEbxoI3ysBVrv2ZrvR2x87Y6M2er2sI/4v1up7IhVn188CN/optN+C632ZgXO4vh5hB005hsKNReI8b+Tdx1PfHM49hOvIsNt2/gzfN7bzbtR7sZVMOGasXz+PRt+tYJ3hfcXhw+eybGtlxsaTj9fuE+Z3ZuyU6f2HC7jZvQi4Vuxdh9GHtiI24G90QEEc8JZftU7KO0lckfoY19le8zHztXdL/RgJvkZ38Ad9UbbCqgG3VEPWgHK7vUwmIjwZD3hl7fsu2ys4/O4nb/4Rk6xQ0V+Pgk8QYLIHSrso78IZAHg1ZbIYIFpvQCcGIYathi6pQkhYNTkDOrghvyeBRSC1AFDJc0AUprfj1HKWU4Xo/pj75++v9+/Sf/yW//F/4b97/xrx4++fN0+NQs0H3j8wE6JgsI9Ib6+rj+uZ+8Pny5Mt5f5uPx01dslz/7rtQ/+Tu+vPvxb/2ZP/1f/rfXv/s3/sbf+Q9/+vbdn/9v/tt8kg9vl2GYpN+vbcZEd2TrBeQ+KB023YR/RAhunccQQErgXmN9bF//yeM/+6vf/cH/Ky6/Kv703dv1WwXOx2b6/vGXKVwbTtPDaTocxgnCifiyvG9tzWkIsKgVmi2LJSEGQKFm0Awj0ofHyumP79/+LXz7u+P0LwMdLUI2p6H9HH997dpwQPhovfvobn6M0Pwacvn8dN82+nibDc9/3h+9/gvb9zC2jMr4CAvfptnt7J6B64/JdYjI2FvRCMgGyoBE5JtFMUcodO5oV/5B1wmB9qYapU5TUYemXjVCpJPvzIO0sQhi6k7HxBQf8eTczQIwgJkQOgTa8zxwU1wiAAITRUB/dXdJP5gHEVFy69ZR4BgA3r/JyO4uiUYCQgD0noiNzrBJTcGiubkrOgEW8TYDnpiSmwJEJ5cw9xAFEmZBJMTv/wA5kZv/9Bc/g4CckkUIEhJ7WCdGajToRGxwROz69E4a6MzyDmV4NKbEFELsoGYNEMaUEd0BwKk5FG2mzkyEcH86eMSytnkpkgezCMSUBiRyCHPD7pkfTugK0Wo5HcdwC+cAQ4CBMmBQH+2+4gG2UiRn5oQ9JiUCnRETsQw5C0M3IgLwzqLrizkSARJu/DQKhN7/6/b/GH0vgfQsLgPqVoShG+bYEwqwE4DIt5eXRw+ChQ4+OgYHgnXKLW7+bT13zTGQGHEz3UMg6gZgER1jAmJyE6KEJMTC2c3Cg5ANvRsm8G7fuJUHfX/Gwq1WZwqklHNCdkUKWdssg+RBSi3qVQkYWa0hIZIgsWsxMkY2b8xi0QjtcEjTwBR2fzwxgkSdDny8k/F0cKJ1rmtpeeLTy/Tmzd1Xv3z67pv166/naliCyJGppUSCMq815YdqNqUjGBos1hb3pdtkJjlWqwYr50QCCOOlrAB2rjMTI2RmYSFzZSFFD6Jwcyt48EZXFaMhzW22DKvHspYjjAJAbpQwYXZz88iJmN3cAyPMOKWcZL3qPJ99oF++hb/0wzdaf/WTn3z27lcPf/zzJdxNXVXHMV/ncz5CBAUGYONJdG5lbS9ejMBXU1BqjpyZMCojpQRhikSI0dQTSg+g5kFEuGktS9EwGWRM0rR4a0S81kXVVAsTsse7x6X49PnwkEJzq/XDt4dEd3evztc1DAhDG1sn51KYLgQUoSyehEMbUWHw4dho0vHg45GGScaUDtMdBISiTCMGQwvAgmiu9XKenx4v52tppS5moZaAhmmQlA+naTge03BPMjIN43QY8sCcwx1CozXTVcuV0CTW5cP13Ep5WtallqWxcGCU0oAQmAq49iCgJC7shJQSIrkHkbv16KIuGe6Ic99sdvY++cbl6xTSzTvy41cRiQBgZ27nYAx0c60GnlNKV3vU5uDVURy1WGBt2db7kX/8+RucBS64fLeMaeJhuD5+SLiqD8PpXi+PXhURHBSg1Su72dnfDcMomM/v3sOrlSceZVy++aZo88Px+Op7PP7W+2++Pn/7/u74an58f3qTfvP16/YIqZXH67mho1dryKO0WleqSAKqkPJyWZm4EeMoQN5oXeOpmCfmhAJ45HxAJMfFtXhd0JkMeDgi4WKUEFHy3fBmonSN+TKvhe2dtjtKjlxqXK8zlHZIAFTn9X32Q1QkgLIsTV3yuNYzWRCN6K6hpWkqSlS8FhgIwJvOTWsi9roqtXmtYnQP86jf6Lu3YpAtF2NVIpXTmA8wfHdeUTjLoQGAIGC7tO9Sbg1bwDDCkA+v1isupWmmSJgAOd0BDwjaygpeIxLSEdm1nRdrC5U44CExWtO2sLA5sox8knLx8Cga43gSjNYugHQYj4dBWiltKTDlupbM2Vq9tPnVixeG1dyIOVwd0zAeVJ1ZaqljToBsSwHgin48HFI6qCpBRpK1PkEKStAUh0StNYTKOShwLRrOBExMpmupLR9fpPGOCNS8GiihYQt0gAi71loQhkSfne5+B2OcrxfQWK0ic5ZjRGqqmSBzwrjymJsa8whE0bzYensMmJm2GrO3sRCB1Cr3mLKtkiHCjMIAoKY96kGtIlIQSM8rD0Dq2Qys0dyVEcwtpwQw1tYYWc0RkFkY2awSoGBCwFaehA8AZBaYkMMBjNyirnp9hC5Vcg0kqzav7e27a0j63T/zp4ZhtFZEqgMxk4f2Rrug7AFe3OtSNWegrotRN+4VntUhZ0nTanOv/bp1HbBEBW09spMSS3SyD3hzw1tvHdBd3QxZlIAkQQCYCSAxR7fmZjZrjITIpuoQQmTuJBsTu29bM4n3TX+AqRJSALs5ce9daUQgkJuHWXdI8n5rkLIkc41AplRKFebaTJgFOJo2L5hGAvIAjGAmInGA2ioDMFGYEYvQQEDhLiwRhsgBFuDV1b1RCAEGJgAi8DAFZgczt5wGA+9FTu9rhQEBJWLtmiuEjiX11qy5cndsjgDiIHav5oa28QTMt9xKsyBEFNEAw76n3sgNHgHmRmGBzESwG2OFA4BINlMkinCWhGYQgRTdY8CjMeWo9fYIxDNm44QYBNEL5l559B4dIREY7cUIQid0IIQQ+hZtBgAI3auy+4gDAHRDcdoawdgz2ruN0U0q8Uxt6Z0OAdSOKgUQQQQ0d6TnBsmNZtEdLZ9rqw0IAkIywB3k3X9kd1i+1ec3jgwi3K54w3AQuspnx4k2Tg3s4qIOqvSjbQq0Gx2EtixCwO3F+yyo2eCz3al6g5m8k/OhSzgBerWHALZxYWinCGwwECICbKpXeGabbCeMeCsmNzhmK0VvdIbdLmhnJuz0gmeHkw3+6efce8IfF5XPJS1sVB3oZIBOTcLnT90GG3ZO0HbdSHCjYHWeCEDsoXLbZI/tb7r8c7ufALDxt6Kb1sS+y/GIHb7cOGyx3WfqlXI8n1B8RPDZqp4dtexYTOxq/h0ei+1CnnlVsKl5Y6eDbHSlnQkV29TqJSreKGbYf/HXULQbIBC3RwI+xrK2M7k5AMcGOhFFBHf8zj3MdMX5XKG7okckxuqBgQPzdV0RkqqqwjCccDBvljJgElTzsmTK9+Pd9fxNazWQn+ZLfPk4TEi1/sO//n99/flPP/vxv/T9P/9vjC9+6JjUXbKEm2kjEHgqfP4nf/xX/4OX779pZx/jZDXWGPnxLa424j2O49PPnqbf/PLLX/7++R9/c3w43d2/Ob//8jh+isRboKMTMUUo4BakCfBshYXRpY3d7FYBBSAcDRykhq+/fPcH//df/r3/h3/4Rb3O5/Plnftl5VKYoMohGxp6MKV2vjxebR6u0zQJIiAPRGDweLm8OHDKmHl0swCtrTmEqQfIea6fBT3+8h9Mn/1RvPqdSANlie6XRNLj+Ag74I54M9p/Bqp3wOYjlPhjhOm2rG0f7UsH3Z7OfWJvDyruY7MvL72bsGcU3iBmuNmf39houK95Hz3SAEDcTU95yzoD9PC+stn2Q+zbX3RfIXT33hrgHmqG0GGKZwDIA0I6rhXmDkYkQODRcR8HgPBOKgEMjjAH6ilSuLcd+BkKjy5765TELuIFgHCQlN3bxjgDhLCet4AITJCBCIKYG3vTQPUgcrUgB1MGCK8UAJ7b8ijTgaYHRti85kKBmFiICYmIiJgw/FP+vqo9Pj199e1bCGCSngeCwG4GEETsHoQUQd2tAMz713tbHxHDwMKxx5YgoPftBzcPRCuq4eyqahYIOTEiIKEQboFUROOQOkY0pMR8WlcFQHNVV3eFRObu5Hj7VXA1ZaDO5ekATzMHFndq4VtjJoyEMXDIOSdJ4oxBYRQYYV3GsiFHiEjM/W2yvVAAIogoINwtAjqqGIBEiZwQHdC7l0Pf2IR7oPUVDcEx3MCtH3uT/6OadpE0gtMu9nQP4hTESBxmhNIdVmBrbJBwckBnByjYHbgQLNxc4UbLvTH+blDRNA2ltiGl/p21lWV1b8YoyHg8HjSUSYovGhrAOd/V5dq90z1MvQgRkYzprtZ5GIaAmakxO6CXel5nfHxfh19xW9Y39Ml4d3rxYjSclkUv51JHJRlPLwud3n3z1aKtWAFExwBy+OTuTTGcDh6tmNU8JSTQVUcRs3VdrvO8GhaFauydbw8i12UFGpbL0zTl8Kha17XgOHXZAlGbDny+zJQPa11GcRQstSCAJKFhIuRaW2tlzHx3/wLCzeZpOrq1IHMIV2AmpFirfvezt7/7488Pd9PlafnRb748z/Dd2zkoEMnAhmmotXAaiNy0DphPDyNB9mrLspzuT4lZGziEGwIn4QG4OfVNhgAwoiMBM0FYEnlxHAPxaT4zkzDjMKAMjbHMtam1uuYhm0Z7KoBPCWm9RoDnw7CkGOSuhOecDFpoq7UBZhKpUN1KZmEARiNUGezuFeDgecQAE6ZhzCiQhwElp5QdwkHrfHVUtfX9h/P7d08ArK0h4TgMQsx5yuNhON7LdOThRJRFhiwDonQlfdOmdWUyMF2Wy+X8+Pj0VNfV1RHJNWxdPQyJRXIwaKALIYsDIjECb9hr32p0gwl3IsQtToJ3tlH/f9/XP28cNyXt/s4xRLN+a7O7U7TaWoaRYzhwmtczxjgMTJjbauOUp3H8yYsfwuXDj198P8Y4SZAvpkVJUWKZ3+NIOB0BQQaeL0/DQR5evMx0evf1N3UpyPTi/lQu9fL+Qx6HcUz3r94s87w8vmtNh/tP7o4v2LXU5m7l8n4S/M3Tw6HAP5vjMdogueXBPY6SW1OEKOsVcj4cpizyocw0pHM9jxRO6GzT+KJVTywjp4gWlNdWM6eBDrhEustI4OquqipDEqyrXWex5gjn88oDyzi2tT0+XYZYM9B5WdFQjC3co2nRBFyXVZfH4zDQMF7PHyKlYRhBta2FICKirNc6P3E0HI+lrmt7qrre338yv/3C6LCapXw/Dp+Pw4vzZX7x+uUM77/TD/enw7U5cmrmCDEv3xJZphGyodIUceC7s7wPJTVPiSceOLjVtUIxcqsL8ZQZzD+4N1VHSgllwtRadbIhTa4irXL2KVE1QtLDccJSCfjh9FKbUajWKxG5ocgIUAnIIHGAl0UkRAgNyHwa01O5gDgy1DAiNq8QhEioGoBCIwFqm9frB81OJCJTqwVFKI7uTuADjYbuuiY2tMr5CK5IcsxHJn67lrmcnRWdACTQmTKDSKyAATwnIkO4lHocTugKRmBrYiBb1dZXL3/0eL4WfeQ8WFGwW53T32jegzTcDQD6buPWwhYSM0V0AujZZeYGnWGLaG5deQ1bWehBBCiE3rX3CAzQsiRE6R0YU+1abrUu6gZJo1olp9Y0w0iuoM3XRRiyt1pndAO3Mi/hPkl+mdrj5fLX/up/8qf+7L/4vc8/nR9nAz883DEBEUliCHRVZjI3N0MM5qytETMJdYTFm0d4taY1kECS9G78UpdhGCXlVoswI0RpBR05ASImTOHhrtuVwsZ5rq0gohCbGUSAYCcqeFhXi3i4UNrqT0J3t/Dw6OK+rULzbd9MRBiA3cY6HAOEGKFXz1ssUUpJVZHA3XvCowMzYU+OY+FwNHOijXed0hDhtitxiaX7Tw1pbK4O7mYRIYSMpFZpZ1NASKK82gxEgkJBAdS0eISk5OEiySzCDJBYMngIUykFGUSShSOGhbdw3CtSIgSniDBv4Y5ARAzgqsbISdjdw7VpN7pENydkYQmkCCekGgUxkJmYO27LxBFGgI6AhK1VJuqlg1nPpqOcBrUgErfnIHDcPXi2W+DRN/xCZLG56+w97l4U9904milzZ6ZgdyHtEqStSO/PT/QeIBhAB49iK/J74JjfNmidNfVcvQfCpt4K84C+/TLf8u03RdFWZsWG+HQB1lal3y6PNlwENnDHN0AjYieePJfsHbPYCpENzNp3xP33dvAJETZWEeE+czfW1A6Ewf6TH+Ev0TGIjQuyV4PY3cR8Q+ncYguBgdtv72jCLkB6JhJ0RY7f8It+yt1V5yO6wQ79bIzBnfm1j1JsVK9+nrRLaXADo3p9ecOUdmvt/pGdJbQ79T7jH7FDRv3IO26yY0i4569RRzr2EQPYbBcddmelXarTF5NeBThAX0ZczbTBbsPvO2zWY7Z8q8L6TEH4CCN7BmJu2yakAN+quI0d1WfU7pkM+7f7Dd97dLaBhztA0I/oN0wQiW5Y4vbdfWz2mQc7TLQPbmwkpn3bt/1lH0vacJT+5U5yw6BB4O7l9OI33n/1x03nIQMbccLT4YjIDcCaaVMPKNbYHcxDrTs5hjYTq3Nzb8Mhz6uDjEqHep650pguVH6/Pv50vXz5w7/4b9z/4F9wOWoYMgrRMWT9xR/93v/53/9knj+c22fy2krJ0sr6iBGUiImiBJD/8q/9leU4vjjd693w9KufD1mWOtj0W5//+HcakDNHVzNvvlQ75HebQEiA4WDUZYcOAZ7c1l/90z/62//L81d/d336IGZLgceCTyUU8GHiQwI+puH0sMz1stbqPuRpSMNhOg1ZltKEEJHy9BB+RoshJab84f27NOQ0MC5i5pikoVFWLV9wXMPUnJih8xn6SvkxTor7zYXn5Wi/9Tsz8ObcDfu8en5i6XlRCtjxn9tU2R7o7guGAJ1r0ZfBG8y6LwnbIr4/m31R+/Vj7evDxnZRNybGLSkSBXnDMjcR2fa4MBOEC1NPZEXgGwgawNHxXwx3BRpga11bbEul3PLXNmALPILclVgQ2bcBcdpc3aKTSruRcxcXbc8XQQ+z7atORBAyhu7YPBKSCIUbMRDp1gkI0VYwYgNsWqSs0aqXFdMBsROOwZVdNpwOid2ViGQYR8Tvf/83fvdPX56uf7+UlYUg3MHDw8DQFYMI0UkQO6UbkDrpsANlfQUx6oUbogMJ52ZmoeTE5GYOjo6YkrDwkMDdS61EuY8cEyUhwHANIUKkNSqBTMNk3mOBgXhrDkYYEfd2ECEyU1M1NwJQMzclcUYM9CBLIgxERJkhk3MPovcId+r3mrqLdF+6vVMWvCOQSH2ZBsKtV0Ee3u1AzftEjJ56srVVepIrQnQ2vpv6xnzf5N4R3aMAERwIt0XYg6k/fb43JqjjyUhdZYdbwiCCuZemVa07H2m4B3VKNfFNjblDRbVqGCxWJYkkPua7ea0yjuFmFmpQTRWhqak1FnBYCcJaCwZgGmSEgGhVfQYzbY0ZvJV8HMYcAhHF2xxf/8mXl7ffnd9/9/r1y08+/2Q4nY55yK+OtfiQxpyvMvHd3eXbu+Hd10XVLTzMEyNCCl9ENGK1XJp5GjM4OQCOFNqEh7osNteUB0BnYRmSm6dpMDQP669xBm5W1VVYcj6WtVrTVi0PA8YiGCnJui6JMoFFxpQHRDLzRLX6FSCLUDOHCDdbl2U4iOTp4UU6L0/pmL759t33Pn3xvU9P9VreX4w5eS1eHEd2gHJdjvnhzSevkODx63VMx5yRaZnjkgbRoknGCHcvzN1LPYUwR7g2RGfOWq1oQR7DYpDpOs9oECpujpAJQ4YMWZgkEtFh9CTFwBnvpkM1b+dLtWqB+XA84uDJVihruzLfmRJh7uZnkFWOy3ivfOgvJhYWlozMwEjDSEwBqta8NYRWWj1fLu8+PNWqhOSK02nMhwwBniiGKSS3QDQ4iAgnVUPzISfg0FqT6DI/nZ+enh4vy1Iul9JR0400iCAkSRiFtZlHECO6gymYRk/yccfgvugH7HG1u1gAo4PXfWsFiOFmHdvvZfBNjcnEdVl7Wl6zq/AELQ7DQw4Bz5+//OcA5H35EnSF8IEPp/TJizTdj/eff+83Pvz0Z4fj/fV6fv35m8i5zfNg58vXP+PZ5WFKUXDAWoh5sKYVbHj9MMag5frVF38ip/vTi5MaNE+ch4ny+69+3spXVq+H+zc8TOvT4qUsxQCY7u4+HV7Vh0+Gkb9tyYTLdW7XZczjYTiAUT6MAw3XcznJwARPVOblA4JPJEwJUhBQUJR1HvPhON6VyxNzPqVpOH2iXuZrfTiMM1EGfLib3v/8w3K5ptOdzwGSMeTp/bdM4R6Py4WtpkhC9nT5bhrS9PDy/Di3qEMAA6zrh9oqOlgzZSWMzqOw9RJldaZ1Ldf5DGRJ7tLwRtfrUp6m1z9J0+EKI6c83WHTRYDejAdJUYAN47vHp+v18vLwsi5XnZnC78YDtSXKB4kYmdYw0JbgmCmhaQ0HBhaBMME5rLQ6k9MwPRz4IWn3EotMYw3EdiEE0gWsYUYGECLJ98T31/MjYJXBUj4si43jvWOJpUjQkA+lXYfhmAf22oTGzGlMx0KewZc6p0SQckYmIgEqdR6yuEhbrgQNABHobngdWlrMIdRsIRTMI7XKGENOxRqgjQmsNJc08EGs3cuEuF7Lot4yDTxMtVb3qwympVKWYqtINH1CCaRhTDIwMyRUsZrVr07y5tWPnr778rYFI6IAClcAd4f+MgHEHtbuEeFu6Ejkbr3KQsBBsocCYTVjIuEBwVsrESCSIIA2hxRUbaaaRJi4qam6SEIEDyNEETYPxLBAVaXSTomgLaCKHqorNUVbweZwDRC3JcC1zKz6vdfH0/Ti7/31vxH/4r/y6evR0PoPiXBVRYTWLINEAHVbJe/ic9h4uxDCEkCqhltUuSGJm4kMZoGgvRJDAPdgQLXmCMQcaN18FLr4PBwCzZy2y+mvXPQgcyMiCCIisGDk6MZPBGpRvTVtRIyBZr1nhsKJwXYKAnSoCxxMGyEHhOTUC0YCYGaIMFMEEBY3Zd52rqqlU27GlGupvZHWq97WmiQhwNrqIEmtOu7dfqTo7j4eAEEsCGRQqzekRMgIqK0IJXcQTq0VJiAS84ggBIEgs4rofXnuCSNuSiRCHBERbqZCsheeCNDdDtEdEzMTmysBNDfrDUkzptRdQ/tmXqORJCGGQEFyCFUF8b79jw4LbTE5/RaCUPJw1bZnZQJ8lIZpZkKIRG7GTNYVaLBl2ENnWEXvkXYEhzyc+05x0yagudMOuuy1Uq9oe3EODghuTrHFdwJsgFJP8NyMKaKbE+GuUULqRqCoXVPWnSDc0l7p3Kop2pvIvXyPnda0QVFxK7l6zdERoZ0JsnX/IQAjfOe9wM2cGvae/3Opv4VPbehIeBB2YGqDZ/q/wk6nit1fKTZFzXa6Gwq03YqOyzAEIm5y8o7jbSeHO+4TG10mdh1Ar4mii7I2U+S9WI0bnLPDELGhYuG+X+czjHEDKvqTuBWdtyr39q/9Uezbc9wAnRvM3pf6HWva6qxnkKWXDX1wCCKgk+NoW2b7XEDCG5PO98vvYoZNnQfu7o4fMXD2km4v2vEZcNguf+eobB69+wza0EZExJ7o2EFP/GgQsXc19+IcEYFu4BFsZl/d565PL6JA2HAEjNhcgPut3I7ZUTncH0bCTbyxF+TbXIsdGyDYBsKfz7vPKiAL1SAePxsPP3T/k5xSsSbWOKcGjMPLSV7PT98dQxbU1Vqziq25Ng9sZmQOqOY4UHjAtQAK43IFreWCr18e0r3b/PbdL37f1utv/Rfl8Bt/0YgQidfy3T/+/3z4W//xXWm1wXcrq+Bf+OQHXotAmfUMPk6UxXCYBmD/an26LHp1uf7xhzr/0T/8vb/px7t/57/7PxwffnsxD2cE6SZvAAGbas+69AkQwJWJELn7sljx5Vd/9Cd/+z/47ud/Xz98dW1QaltWYBmH+8M8t6+XlYuhN3o6H/JB8vjq1ctpeiAnRFAAmXSZP2i5DG4cxVpLqQkQ89BatbYwRTI7kF0+tIc30q6P87dfHO9+07f1hDuIsYUl7tyK3UD/tlT1u7w1cW+3fJuY+2d2Ze0zNrbZM8XtwztzA/enbD8E7pt8hJu4aycYbd5ptBMW93UJbn5ofXpRR1U7gcYxUldtIyJwhPf8Mtiwyn5JnLiDSJ39sfW03c3MERjAhWU7BUoOwb6drW9gcz8h31btoPDewGDc3jvdroYwKIiCEKKPte8CT+gl3fZchyNGd8npAA/1tjqxmXEKRsAWwEEhzYyR1AIRojTDpV4vWUYZRkK0sO6R3LdYREKI6AAUQna8O/34t377q2+/++Uvfm5NDb0bRyIYggN6N2QC9xaVMfXEyZ535r0rhuCuhIJM1TRQzKCpE4cREGdJ1APmmVCYNDAwzIw4UcpMiQnXouZxrdobQObKKBROBFUNO2MdWdUSZ2QR5sPh4OjsUssabhwdju7UnxQOYWTukkRIEhMRMAYhEAEDcU+sIwpEJAiw3XKoz1YI6F0Zj9gMCInAt79C9PBQBAhg9wAHA+/y9gBwDVfAPWEtAjwUInoLtqe99iYiETsAEQNguEKAW4MtULJv5NPGtXO3gGagCmbYDAA4PDpehx43WfTuT4HMOU2Zi9ZhGK/nhQKdkUXa5aLAJBgEmImMA73EmtM40lDbQjCGR1ghBEJLKVcFSvl4yuErgj68PD7cpelh4iEF6Hdv16enr797e7l7OL369NVwd8xpohfDeEpynpF4nEbEx2a+XBbPsZYPrvkwDeDpcLxbYkZQhrxcnhxAhpFD1vWMApMcmfh8fS+IAjyrN/XDmDyiWYMgRgc2xnDXp/MjUTgockAEOIdaEE3jlFi8rSmnYUwUxAnR+ZBfGphHxcSJA9wPkgB5LUvOw9fffPe9H352/+JVInvzanz/5XCZaw0/3edLeXrx5gWn4bM3P27loV7xxadv1iOu53J4QG9fkRcPRTAR1kbCzAxG2LqVXUAANgBk4JySBEteizdFRRbJy3IlB+Y1SRCHQkHTpp45XYtlHnCQpVRkkREsgvBkTa7Xx3ykTz/90S9+9Q/NHOlgzlE0s5q8vXszjEdiJL0GRx7ycDyeJGEkMajupGWNCItoVr/88tvz5aJmiXPKfLw/UuJAHIY0nR6Gwws3Z2JmEuEAMFMhcoNW5lKWWufL5en6dJ4vNRAPpztJQonP52tpK4ALUZj73u2EwK4E2DaC1GuzhkHg4LA5FFBnadIWhbVrNPcuKKC5m4V1R5L+nwdjAgQRQ8jEAl7ArfgsNPz88sclzpHANEIjJ/nksx+Xr372j/7RH37z+uEnb17kBCmNqu6SID9AjePdG12aP37Z6mpahce6mLbqXA6vXgdMQ+a6vFse3x4PcTi+WC8lxtNwePn6h6Ll6ent23P75eHhkzxM43RYq5uVdvbp9d1np3tM8njFqDYMqNfldBpeTONYJ/IDBv72n/5zf/DFH10vX2axOqScBl1qtUZATFSb0ZAbADnl8cQ0HMaT5KNfLFtg45fTgyCfnz589827QeTp2/MkE6b8dD2X9cq+uNW74QCWr7ZWWJ1J8rSs+jTXiDYwaFhZVkRKjHVVspLSSdcVtISGIZKIeQBS8GE4vqjLY1vmF69+onVsgXHMPGbJiS5+PV+88IOcKqd38yXRAaAUK1cthg3DQCnBYYA6TMfr47fEghgGAwCqFXCbcjZd1nYORkQhybXME0mi7O3iURssCGKUIBNSWEgwIwtE1lbGgc3bdDqV8j4Qj0Nu82MCVF+KPk75U7MZMRRcWNQbk1cvMqRwq9UhIKyOnCYSD81INXypHzJPiz6mQZBF4D6PP4nyzXV99KxqV8ZJveYUEsQQQFjtSgpZJnOpDjkNWmbTAq41zgjCEJgyhXuok7a6BJi5QjDSen/63Au6Vgib0sHK1aLk8XQ+XxJxKcutOgAMZnZ3JAwD4Wyh0R9CAMAe0gZZBu9GJF0kHwCAiQQxwGsAMFEXjXTCLWBnpCMGNremvgeFeSAJM0C4Wm9EAyfJ2ObvWlszha9nYUpIpS5eV29rLcWDyK01q6su6xruRMPv/ObLb774B/Xp5Z/6Mz9+Wq7fLMsP3ryGJOD/P67+Y9eWZcsSxKYw4e5LbXHkFe/e+0SoFyIjg5lIFMFioRrssEmAAMFm1QfwB/gDbPAH+AEEAYIAewVWg0QWwEREJjMzRMbLp9+76sgtlnBlZnNONszXOidy496Dfc7ey9dydzNzm2MOYd7FWpnhYiEjooXYm1kphZ3DSkJURWYDOXNbUGuCmQohlJzRex+8poJkKIaApoqATB6kgInzXorWMr+YUK36gQAKE4spExkgs8+azVCgEnvUOWcoaZ4cO+cdWKnmB8XUSjUEhZyzaWFyKZcYqjrODCGrznmskvaSCjGyMwRIUrICOhJRNXVIMo01maPMqbLH2ftqJexDFLOiAkStb9MSl1YjTpGJ66q5mC8CACiRBw4K4LwHAHJMNdyNGYCInUghR4aKhEwINTMPnKmhFDCoHAcVLZLqRgirGZQoGRgqaO2rGRGDiYIxu8C+lBmgCo/NDJnJAamKSgE0qv1kJDUUzQv6wSxaAMExG6AuLqWWNHtaQgBrrYKINUiK8EKIQAMQEUZEAM8IakSIpoSUVRlRDXDpQCwowQIK6QJRVJYMVsNjIloEngCLEAJVlM7mymY1/AgQjICqX6kuUb6IiK4WH7pwh6BmilU8Q42oWn8uPjVnCABhkQgtbtm4oDoVwznnaeHinrS8OxGc+SR21nHoGe64EGo+RlEuz2gFI4J6rXQRFS05ZhVH+1hwcgl7Mqs+O3Xd0aUfa1bzrfCSS7YUhBfg5b/AOS50BjrXkBU5gg9I0PKxF9HWArDU/xc6/wWY+CfXcNG1nXGi86HOt+Ljrw/8qQtQdvnJ8qYfuEZLgUy17wVnGR3W87zASYgEFw0jgpFqqUYWKlqNilQFkarLFgIyLSWrVRah0Rm2ulC87L/41PXwZ3wVz0SeDyPjQuL5EDWOhCDL6Ield02IAEpnJhoYMhHCxWnnAkMuV/uMBS5yCTzjeUvdC2eQsb7mTNSCy0yqTCsxMyUXyLIBiwVTQUvRd94FMDQxK5hT0jydDm8M2bFrgm9Wq+NwmDME5wPYlIfV6iqlR6fAg44pE6ljVvSTuPd3Zd2y8cMYf/n6l//va3K7L/8UzB1///P//K//r5v+oV1TL3SIfhPC+zHdxpjGx+jXKpGEyGwc5v0G6MvnW/qMwnr1B3/lVH76P1u9fvUL6d9MtMH2lpuVFS2KyDVsm8wy1pBg0wqbqpkKCZIpHu++/t3f/d+O3/9d0dk4pikX8zPaJDblpMDsKIlFjOR8XO8MeBo1pZHZERMLEJYQWr8KUyrOEMz18xzROyZ2m5RPKiUwesBVbIDWaejn4XVMR/Q7VSdgwLisFJX3+tE0v3DEPqIMfcBbsf71A5kIzrf98s3FnggAPppuy2S5IMJ4mbpnVBc/ALWIeF4WbPGtrwu9nQlwAFD1HIgIzA6gCnCl0npVgQgdOhNxxIRAvBjCVI1ZXSTPLmiV8YdzlhyoUSMkVyVCTACqNTZAEICAqpC2EKGBqhEBg5HogowSAJhW98BKL0Gg6v1DSNUFuS6+yGyAREucmNWuQYUuCBBrtoQhMhKY08pvRWHNxQOjKkm2TDqdxBPCDmPH4HJR5ykX8QY1VgLBEETMgdJ2e/WTL788vHv7OB+IwROrFk9eFRBVTSq8i+Bqr4OIikCRgoZs4Mk5rPvG4tiBqiMCBwDCjr0LJec6DdREC5QiOY9dd1Vt/TZtI5ITUjGVUpWwhB4N1DkuOXtmLYBGDtkxIyKhRwYico4JvXMsOeeUi2jRUiQzeiRSUyJ25BiAwAjMEVYvSAIjUyZXrCz2fABFFc1o4bhWAbahsYFDPD9dTAygiJGZWTVvLKoK4JEcilRHQ1EQMAJgOhPTgAzFajqt1gCHC4ReocZKMqxzihzTJAbEFXMXKTklSVNJc05zSjmrAp7ZXgpEUIrUWXC2tc4FWIQDqI7DJKkA2jSeQtOwYyNERlPIJROjgE3ppGYewzilJjaQctN4cgEIvIKIVCF5G+z6yl9d+90uUhfi7oqcL5POp+G47/vDfb8/bW/aqydXHLrQdDc76pr1sAvrSHfvjgdKWVgL931hZ+NxJHVNYMuKwCF0UxmKHA3mLP04peiaEFshYkRRmEsBtZRLUUFEx26eetEZ2GsBQkDHw3Qic/MkjuNYxNh5YyypaXi1ZsQRBIugqDF7R1yKZinRBXYo1SLHYN7P73u8+0xuPxFJx9tnn3xyyg/j3TwWMX6863/4w5eN2+CcPr/9q+tPXqx/cD1k/OSLDZTT//d/+H9+80vJpQgeppKIPBqyIYNrsUwlIQE4U6CUjAHYdz6ujv2hlHK124kAULq+vXJuSmP//LMX3SaUYSrzRNEdjuM0Z0OQSYqIrCRNhQrFuM0THPN09/Drr756engYpn5sm4Y1xCatbq9iW1RExEfG4GOMjXcOyFRLSQJnGvO7u4dX79/nbGjctF3wrmlbRMyqJNT6LoS1qmOmGGJw7BiCo9rNm4Z+HMfT0A99nyQDNLFpQ9M4zzGwmMbQDMOpH3s20aIiqgai5hwhOyO2BeQTRSGAcyurdqr0jA4te67lgYUgZ+q7GaiK2TmdBYC5ZSqSxVAJnJoTTSLFRzmk+4fDPVoyy2Y5xjbl/de//tfNPKbeXs9zef/qq6erz19+rvOQR8RAVKYGIedJskoyZX+4e9i03oMOx0M/n8y1q03bNKHpNmkYZL4njJMCtY3bPGu2N7x7MT6+u3v99fbq+dXNMxHTlMxKonfd6uqzzXZkU52PZrunT0kFe73tbr/48i///ue/err+/HBz9x/f/adm1eZCFJkcl6kPbkuMhjMgxhAlJWeqRed07G6eat7SWG7Xu1SURYZjP/Sj267u3t598umPhjTTNKQ0bgJwYbR2ysfTrCtyV7uXOad3r+6ZoIscA2WZh2l2vkFPkBIjZclomQoiRIN5zpMkEcNMG0zMwwMVHPti9N6cXD+5Gvf/kLk5ThjC5unNDWOTvQf/4G1uyu7u/etNt046jfkhYzCKIXRZiwvtLP1QjtDuJB3BlaDozYCi50i6mvPjDCLsihWFI1DJKgYwlZSlxNjO8ziJSNO0qysRbZvoCBz7xzyZbylHD+uVm0GhH1Wpe/Lsxdw/5jk3TYuBOFtBe0h9F27MlBi2cVN0MpkUgUwQFQg9u5Ln0MZpOgXebja3zCae82FqYsPNjaTJoRok5/w4DE1cG4ZT6T+9fj4OLtf9SxJTZR+8rZmwpH3AzpHLw4Rsbbfi6Ka+JwjTDKZRIBt54UAU2uDtGNft7XH/rlu3c54/7MjUkBHRTAoCKoBWQ8QKFCGJFiQsOp9Jz7UqkWVHiNVMBepOBepOiwgAyDBwKFIIQBAWsx40RBMw5koMprq7QiAtkNIErJGsPzx4CkjoKM5lUqMi0kUHIsqoLkzD1E+Prts8ubq6f/PmzSo0Lz9LmabVFJpgCKrqQ8xFQM17z4hsYMgAWtv0IoKEapZTaoInR4vkRoXJMXsFBM0gFXNHMk9sRQQVVXVBl2pEKiiCeQpFUi3CkiQCYnLVe0dFamPPwJLkSmAnwOjjaRhFCzGBAqiKZUYygpRzLdxK1oxZDbIKA41jBkMXXMrivDNRCiyVqkJYpe4OCQwcsCcuVFJKLnhRAIelFFVDIinCwaNI1uIc5zLWAF8iFqn1GwMZqnoXSikAUqmfukhKuKqwRLMBENUeLNRvpUwAimhqhZQDO7FcsgCSEgKAlKS1SQsApkwOwcwEDasVQSnZkzfyYymGmCV550QVkOpoLSUhMSOfqSUGAtkSAjF7qJ4fyFDbBgoAxuRABQhUlZgubQNdwANTBaYqIsMKaX0IyTIjJlEFAlrcIbC65gBi9YG+1C21oFkEPyYGS+VMaCDLA6iCs0RIiFrflBYybIU4wMyQZGGUGBnUWAo0u1TzcH7DM2SykHAWx0s7E2/wXMTDgpxcxB+XMzCDGrWB59isM5PGKitq4cPgWUkFaJVvv7Qvz8IwVUWwamZ8doOu9kz10hBcXGer4XGV2ixNcKhYGwIgqi0xNEAflYKLYa0initONDg7D8GZirLANBe5yQecyBamjH24YHUzAQvFChYEqkr5tEq6PmBS+BGxCBYIyaCyfhaXkMqjgUsJerG7viAlAFBdTPFMa6q3a7Gjsgr5wRLFXZ2vYAHaEHnBVgxUF319KVLR4fp5ljysJR8NiQireSssb3EeDx/O5HKoC1Vj4ejZeVAtufRn5lItQokAkZEuzKUlh/aseKSKJhkSAi2w4vm+wfn9zepdQMDFhuoMsCEAVjJdTUOqzyDTih5WsNAWzApAEbSgKikQN2quCKKa8+gdtb4biljqS+7BMpg9zikgqzxmNIeMWoZUQtP0CcA6tRJburqxbcP9IIqdCc/TnKbSyjTt7/avfqEY0U5odPjNv4XDIffj+zeP69Vu7UI6DhJY0DlRm8ZNpFKKsvXbFf/5D/1nL148eYbNevBdOj2+/IO//PPdv5rVi8/MpzxNzjXMDZQ6gRaqHgLWSARDsCIIRqWk/tX+m/9hOPwc6Lg/zv19JnLsELVoScE5DN6b37bt6naLTA7ZuQhqzq1A1LF3hKEJKY0C1OxW3npJk6lgTjIN03goaU6zeI/qnVHrqfM4u+lrPH3nmobAW5VRIYJWr1A6iwTP6Kydx9B5Hp3R8vPfz3j5eX05I4//5deFP3cZQgDnfX79C50R6WVFXvrM9sEnG8/tAPho3b78BBezIaIaaL8s11T17QDIFXOqGZUoVXpSY9XBAAqTI2YBmxQboqIoQGpgaMgVx0ZGVtDFB69uIag6ZKOa1qehVYzduZrbWlnMiMyMoIrkiNlMlhXpvHYAsoESmCy02criLABIyAQIBExIZFXuhghzQTFelkwTAkGZYdwrAmINbkfOmYhqake1MHPOi6ihdG378uWnL569PJyGDEUBEFS0nMFirLFeiAiGaigKBIbkANEzAygymKmBeGakGikg3pGBmKXgkZEI3JyKFZTZGDoVmMtspGq5a3gqHAwD+1JSE9uixdAcuhpckisGjciMZgJWVF3OxQMJmCiwiyqqBo4cIyM6ImYmz8zBIxsCEAKD1k1SJakZgiNnyAJoCljbpKjnDBPTesOJKn1PQc5LLauIqVh1tgYAEzRjBDGpUJEiIrEuuFOFFB2xR0IgWXzZQAiygVu6U1ZV1wFMa57i8naIKkWlmBa1UhfMUoM6itBCszDipX92ZhURkPfsnaiVnNabzXHau4UsZaZqqeQMqgXMfPToTE0S5KZbOSUfm8a5Pu+JsSiyscvKMxBL42l73V09W4WrG27WOcMUCjWRji6P/f3jsD+dHh6P7Wa1u97Gtmk4xi2v293NlT8+6969PTw+zpkMTLYY81DISFmm/hhDw6Gb87ElEnVTzpMkTeCZyIoYNo0vpRgIMQIQEucyAphjBDPRktPMDARmmpVcExpGDwW4CYScJnJoDiS213keBACsgBqBt8LFCoCd9scQW+AwD2UerfEtJplTWa141UI/SQzs2ujjKqxe/G//D/9dxK7IXfcMk+XdM386tf+7P/vf/x//+/8z9KnpKElyLqY0o4JqCezaEJEdQ8GcywxTyoKJnOYsOespDZvd+vPPnmZNu+tW1d0/PLzf480miIy7dv3p7Xoc5tW6ffP1acoQV939/d2p7yf4znWuZTfIeBrG2LBTQTtptNUT7yN44jSJY1yvuxAiR0w6Fi1N16igZDgdx+Mw7o/Huc/tqmubdi5m5IBDVgHn267drK9EmNk5F4KP0VNOSYumXETy1PfzPAITEDfNrmnbxjWeGACrzdGcp3HY7x8eUv84G4jpNGdFVIAi6gGrGN+bqUh1J6v4DwKaVeY/L4vB4puy9DrrMK7bpXOUCwCA59a1xIR9ehSVBCOQuDLPMjrzzAGh9P1IDgFwTYFn/Ozl83yT/uHn371cb9+//f7l9VNiaLtrQUv3B50f0zATU1w3Y86rjRfJIUSfeTw8kB8OJ5IJMOpq103j0K0Cg0zHYyjioufmZvf81ofN/t238u77pt2tNjfH+2M67GEuq+BfYFOa618cU4wuYAlHe/nFF5sXnz19VY5331/78sWT54c0z6JgoCqrZuWga7kDKEWKZs1SBEqaTl78ur/30G3WLYqxymkc53FeNd5Sii6Mw+hd4mmIzGQKhKfhIafkYlTf9Nn23z9SzquNJ81FrR9OcyoNt5bnzkspdurfrmOr5OZhNLTYNadckgoIqvWNJyJN+Y1v10b68O4/sQ9jCeaecrdexW0AfRyOn242r44joELwU9HIofW7ZNnSJHbo2q6w80Der2bKxqp5Bk1FwHFAQSKP4IJvDuXR53dqa9MVuVgkzlIGebD5xhVkRiNjES/a+ChlYtB5eL+Oz55cfz72b2N02RSQm83KWE7TI5o5YM9h5CnpzH5TQNkEcnLOKeJJZ+T2evuUtCO1rP2cxqZtPbcdrlvi/vH9+mod/BZSDrE9ygm4lyyxXXmWlltQ1/hVSSaWNE/z9BAZyJNHdrQjhwk4hrWpMjpm8BwDRSsTkoW4rSoiJHCe5v7oufMOh+F9mvvRY7Ne1VlAlWxZpM4LBZU8MwdCpyYGCkt2NRQRRGU2VSWk6puooAjg0RuoaDGA6g3D5Ezr89jQzFWbH0QXXC0sRSSXTICEwRGLFkNqV+uUT1LGjMrclJxAoZgWBTEHGNKULJukstttaBxymi0nCOn26e7+21d+RGqf9bS3mw1GT0RzmbFKtBBKyaxKRKVk5wMxGwgSxRitJF3MJ7WJTS1PBKymCqGZo5DKBJYzmAEwsykYkpGBWspJ0RAw54kImdExoREYlJwUtMYFayXsqAEDkbOSqwqfEAVhSoUMUI0IimVCmLIVIGR3d7ef8gymT6533lFoYs7pdBykZATJpSCBZAMVAkUzIppTslK8Y0dqVgDZsVPAsN4A+xB920QztSTMrvXReW+SKkdizqNjbyZFtAa1ICBR3eGJoTF7AStmlhOYOc9VNC1izgXv/DwPKrlocj6qzAyRHGdL3gU15RCmNHjHrOacz1rMGImLipoFYgNSKSoGAGJCWAXFqSiaiZoSBkMEYgDMWhrvq/9DJbs5Zk+VGZeLZgSq8oQiialabhLwwpWDS21Ry1BiRHNIRdVEcIFOsNotm5qoKYCrfqamjCgLKmFVNFS1YxXOqJZAtce4SD61WgghoPHCkjBCMgUmMqQKnZwJNYt4g+kMT9ST1MrVAzM4B1Pjma4CiKha6bi0NE8M4WP6zFKp4+IsQmSL9HT5b1GPns1kF9LSRyX9ua5byCZLHtmZ93Hm+iyY15lOUE98KRihSsUr42Up9s+qOV1OFGsLfMGdzwXn5ZAfHXmpR9EIWCuEdBb31f/PQNX5n6rOCxA/IEGAZwjHPpzphZawXDS8GDCd3/ZCOzrTdOwMonx0rS5Hu9TDgGB6NrkwPJfIZ62ZUW3CL2SqSsQkMCUiBCi6uKI75lIKLRWq2llXg8SVw2WANUKxfreU3menpgpsfVyNX7ZHtDg61ZtwLqPPpfj5lxfW1QVTqmbGdM6bPd8luxT3l+txLt3h49tSr0Ad/AtIdbnugIaLaOj8kmX4WDXyJgJVNJWUsMze4/rp8+71rU4DKOaSkTjrfRYcxz46Lj7mgkSW5kyK5Nzt1XbVUOP8vk/7yXK2MUs2W7vYzyl0XXBx57vj4zG02Kw3PjY+cBnuHn/3t+R4+7SJ//yPHv/x1zEnUmhS2rBjmdCib6NrnZaCEe321j7/Sn7yaby9klJaTpSneO3S6e0h+RP6sB25x9huTZqCK+bGgE0R0CGggUMVq9pnZFbT/s3+6//p+N2/s3LPyJYc8TaBEmjbrr666cCVCdvrqyeP9+/e9UkQWkItB+9I8v3KOwCNPrSTR+9K0fHwgDaumraJXhGSlaalMVIpxcxKAVOdTgeErDJJuNl1rbkfUNxWPjHWsreOxMqeWe6vwRniXtCb83oAZwodnEfFBxLbeWaCfZiIdgaALjP/vLLhh2Fq50NdBt4FriJckOVaJZz5cfWLCdlxVYQuwhxy9aMjVstUM1UzyCKEpGaiQsRqaiqL+45UrzAzwgQ4ZyvF1C9KdgBYUPGqKqrNOTBUODOMqqMYmSmYqBoSg9XeBagYoSIhVP4zklmtcRSI0MBM0aw6+onWOa5oqECigGZUZxQhczWqMAucYeEmomVUQillVqZo2DvyxRDVnMGc1at5z2Y1dg8RiNE2u/VnX7789v3rh2MPVKqxMTkPKshMRqWoGYoheidkqOLQCM07XlwfgQ2QwMRK8BzIgZmIqYoxZQXTpGa5yDzPAipqaOCQkTF2jqbiyIukNjgFiZ77dIp+DUBErEXAFCxX+yc728RPqXccAVzJZkqqUrWzhMzM0fOma6L3LRpYqk0fJgDm2r9YGI6Ld7WSqEqqtSkSA6ACIjKoMYKCKlQeKIjMJtlsViuVbYxmoGTMSChQlt1IbQggmNaMCsZFrGxiiqAANSVToUJvQAzAhiZIzGxQRAm5qCyDkyo6QlpFhVBrZ7IKKJ3n3AIVuRBKkQkBAHz0syStE1qlpEQIzoOZsGcEI42reDXPk6q56CwXExqmCQOKVU1YZEzE1my61VXbdF1cXzXXz3zTiqIbppRmjM4NDRxaTfPD3ak/HYbTsO7a9e4qxBbA7a78ZuN3N+3bN4fHw3D3djRzbbs9jWMTO/ZFclYTTVlKWTer46nMKqmkq+3GwHI/GqAiOI4ljSJzdlro/Nxlci4M88l5B+QsqZgGZsmSS4LoJIsFB0zeRwNH3AoYsRUtntd56q0IIjY+mkP2gISH/cHkZRc2ZZ5efHozjATuQYJbPX3547/6l5rc1afz3duvdeg3smPN99+X6xefJw3t5vl4//30+Cg8CCdk72PjXCCd1YCBQS04H51/gGG12ZWcQ2dPdmGzcqf9EZHnkqbs0PT69ubxbv7u++Hqpjt+1yu5NCYtIxcuhdUGNXC+mcVEEjsh4PvHvGv99c1qLndiY7f+RKYhj4TgunX0KyYSdAagDIqYTsM0jXDq82lIQr7dNMFTKomDX69i308Gfr1dd+2mCMemDc0mpWkcp2EUEzOxVHLdSPh2FVvvmsbF1gRbH5oQyAcfgynQ/kDqeOcmhke9n9KMXAXuKKIAdN5l1DVQi8xIjokvXGQiT+cdktauopkBELFoXvwLRC5eRZ6Img4g9ROQ86kcIhoDZc0uBCmzpZkchKZh9SjhX/6zf/7217++ef7kh5/h9SY82bwcp35zFckXAcft1oGOp7fj+N4mT2EjKedhbzGSaQhudbU7PUx5yuPhUHfuicW36JBZbHo4+I257VV48aMnT17sf//Lh3dviLFZNwqsmtPwfu1Xn4Wm7K4zjldXdJhP9/1p3fgf/dWf/N2/+b8/fUl/+eOv/v5Xv37YHzbbG3+9SqcCCEqjE4zuOqeRwQxhvd3RPM6ncY3knfMu3k85Nr6/L2kYN7v1/vh6++TGZEa0GEIeesMUmBwYN+37eZofZuzzJzedw1RKOvZFCjgXmbyVZN7NKnkcQ2zAiP3WZAJYcdRWiurYH79xzcp7QtBpf6Qmzqcjdbt29xmvN13XTsOjYJ7HUz9QmY/Rc3O1HUu6OzwwRc25T/u4Xh3HHlA9cYeuIU2lb6M7nlJK0q4a13aYMdJqSg8tMerEGIrfjHnOZULCjqKX2gGJm7BrxFpyqBkoq6ovEvlY5m/ZKXt89+rbrnuCYsf7tyJKxJpsolkNA6FqSqlsYruKrZRCKJCSA8YsKQ3bbnsYEjhgVNXZk6Xx6Hidy0hBmqaZZ+3CZp6OZjKnnogAZ9KS2XFsIsrQP6Y0uiYUopJS8E6UYrx2LubpaAnaq2uPMU/+uvm0aFJ/6uf77XY1DrPmwTkQkCbGLON6s03Hk7YXaoOZGTGbKQGaqiMPZsUmhx6BmKiaMlOtPKuByFLsLIWoaKnRUUs6rqppRkRRIWQ1zTkRMdbZWXf0CNVNxETSPJCP2cCHtiiRwDRPBiUPpxBW6OE0zB48SemHI4impM5PqCWPU7YRxv6Hf/yjuyxT/zAntKsXkgvGEJxHBFEF1ZQVjJhJRLwLc85O6kZKYwyF/TSnalk5z4WYarVSckZTNXBcxUNYsjjnUirBB0ODAgZQpPjQgIGZgFERBS3kvFhegABAVWUmyBkrs9EBAk55dhyc4/5w8ORWq7bmQJWSicI454e7h/7uPg997IhNc96fplFEx+PRlLQIWPExuuBU0TERQtsGDC4aGBFIyZOKFfLu4fDQT3lWcC5s1l2Ifr1aXV/fZKRuuwUwBGJHOWdCx4sRijJiVlHJZspExM6qJkgVCNXAEVfTIkRjArRkQgCCSI68gTnXlFRU51IUHaqRFkNwqmhgOU/EoTLPEYk5iEmWjIjsnJlWPSAjFuNcxDnnaNnzO2IVrbWjoalkQ/YuNG41p7FI8cEx0FmgBYSBEMGkPhmsCPnFsUvEGI1AVQw8ExKoVFa56NmK24yJi2oNcMeFAIFVbMmwVD2LJfDCVFq630sZVCkRCnVbBkupAmq68EXOnquqVkEtsyUVawGKoLJfljOoncCKKpkZLO2TM+BxFjnp0jdZcoIWIRVW6lBdAgCX8N4lJgLOmIidT8TMqjNm7bp8SOmCs89srW7OZ02VFrU413yM2yxmDfABZqmOOdVVvbJ+SaXK0NRMAGihLAAsnJ0zcgaXL6zlqZ1ZAAs6oWCLsQ2c0ZHlVR/nn1kFmOqflUJ1BoSWd63w2vny4uWQy10HONOWlrFRtYRLsx/PDAu48Hg+YDYIUKP6zpSoemxUA6pOJGbkqgClyg0vMi0sYKBKhCpVMixoQOQqxK+wwCkL34rOYUjn22qL7dAZelk+GFQKEy0Zc7VoPyM6Z3gG7Vzun+8tVxOYy2X7cHdrmX2+T3hRqp2HWIUOYUEBsLpfwwf+Ei6f4IzB/RNIoAILYlJMsiogUhFF9oK7EG7moQco3rmcJedxygOjP40zKBWhVds025incSYbUWWekxG13Yvbrc7TvD/2Rcdi+0OaD6Xv3z1br68C7pr2u7vj51+8sNCWZGVMitw+28H1k82fAH1y881//IeVT1bSjJQDBM9TQdysYbPFn/5UPnv5/fFNfvMd7Pfj+4d+OjHrNE20bstq3W63z5+/3N3exNgaxhAbcq3za3AdkkNszcCKAJBIKWVfhl8d7n42H18f9/10nMm3/TA9FDgdMhE8mK5bfrc/9v/4rXMe40ZKgf795y+2m12HAbEMc0lYuMxAzEahWz1pw6af8ru7tyBFpTSOzUBAyHsxfbg/OSbp39Pjq5zbuN1s4grMmQ/EXJcUALt4Fn3gWp4HG3508xaUdFlNPpZ81lGkF77PBz4S4mXeVdssPONJsIwbvYzVM2xbk+Op6gvq+yJ+NJDO7+KDIyQ4a37VFqtTMDVgUKsu8mCLdEFNAV0RtRqvDufzMQAkMZiLzdlSNm0IkcGMkc5suMr+KwqKZoykqgR+wQjAmNCsoJEpAEYEURWzgibsmBzpsuSxnXkedV1dtE9mVdGL1Q2nKjBMAC8mS+g4iGbFYkzJhCks61HJqE5hUtmDAbar7FxxhEylTNR0iJStJsSyiXrnnz19drvb3j8cLAsBETgwVPCqHNt1F2NwnmJsrzbImI8Ph3dv5/EEVs6SEABARTLgLMDATMoMZIxEKRcDy6YEIEgANfbDWoeNwy76VRvzZIyOmEspDC66FbFD0izifCDPHAKABd+Y1YQxJAoKNIuYmBUFIEIUKewJEWNwm9Z7dl6FhRFNS0GHrApEhHwGxIkNDUTEtAgwItF5qKOaglpG5QoHAYpmVZM8mYlZcc7huVNRN+RWUdDqFcrnlgQi1owPEDjL6Rm5ToPK7sTzCr1IEA0ckxqr2RLwURSkIAiYOedEAdnXVozJ2dTwAhXlnM1AxmyA66YZpz7JpATEIcRGtYhlYAg+lmlULXMZrCro5uRcsCxFpfaxZSxApGpmGn0IDmMbd09uebMLoQXgpilDP4C4DEEyluAVeZrH4W46HWU9amzieruJjQd0zTq8jFdXU9huwv274XgYlMXMmk0oWX3xoDpll0pqmtU0TKlMScoqxjKN6N2cjRsnCmKmIuyCh4gmkmZVxKKCGT2YCIAECt7Ftm0MzQeOMXhCM1GdzJInp2lCA7EiIlfrqznL1N9XYh+wnea0fzw2V3G12W6v2y++IhFT3/7Zf/Vfr29uh4eHh+//Ybu5mvr5eP++vV599Qd/OBRx3P0v/zf/6//H/+n/4gQzTUaFPAd/S0KA5rwzU5NEAZorDs9b7/XhXU792HB3s9s2Dtc3oX2ykSzHh9T32pYAAZOmYcBZTDSu15s8jVJymuemi65prIjmOTiO7fV8ykXS/XHcPcEn610uIyKyo9jFdtcgyzwN3gfJNo6a92Ucx5KUfPQxEJoWzkUBPQg97gcEF4KLLuSSnXNKdEyjlgzFSkkE6D1hZGJHYKu2jTGuus75SOxjdMRUihKgAKzXq8YzoYCe4MBFpIgAsIoiOVVTUTATKU6dOaobdgNTNWZHRApKViF2qV04q4ZvgIu1ZN0CnaGieZodwNCPYJzLFF3jUciQwZlVZqBZETT0Ljpc9Y94s/2j47t3V+uXOr+eyNbrVUozy4MYesxgGSzldMSycwimEh1bydW8Kc+pWTUqgBoUIbYth5DmQvmEm7Zpg6Hk4958EzZXt1/+Wb/+9v7117cvnnjvUp/zaQSvN1fP3dWnX3//NbnCRMf+UWDyG75+/lLKq+Pb8c+++vN1OPz+8e31Gt+4x0Me15E9oQM/l0Muo5hE76fUR9wU1xGTZO0fD1fPb5vQsvMF9HAYdcquk4jKmSr43K3W41wOD3ezpZJh5YjQTodjaJkwDuPYraOZy3PfhKg5Galst55j116dTu/zqGriA89Zm+ZpIX08Pqy6zgWXyzhMpeGNQnCE03Q3Do9q2a26BsMu7l4/HhLrDJTB7Y/3q9XVatsmBPNtQ834+JvgCHKxMk4GSYbTfISmaf1159x8ms3Qt23OQypFaQRAM3ZmjVsVxWM+dLEJBiwC7Po8FJy968g1mek49uR8ydNqcwvkybnT8RE8+WaV+5Orvt2QDEaEZlYzFCVl59q4QxdPRRjQKSBEBJtS7/0G/DqD67r28PiOPQTP/fEUiRiAyIuq71bkGKTPc585BPaNB7DADEguEHXts5Rn9N47HwkYkTmWKZ8O+0+//PNJjr978+/IyyyDss3zGMlHWg2S29V23h9LSaThUmIhEiCCkdUNuhggqkHNNiViARUpakbOI7u6/VJZ7IHNUJe6oZZjZCiIUOt8Jo9AgErAZiqaVYWcc+yUlAGKCDlitJJzmqfUT6fh/c31CgFVukkEMwRu+sc9gW269Tydgrl5SEnH7WZ16uf9af/w9vvN1Wrt3O9+8/rxXfn8+Z8UpjTNjfdZVXIqCFjU3EK/YMaSsg8BAbNYFlVERkxpLvMYYmcGbQiEiOQBLEs2M0RFs1IKgIpWq5BE7JidqQUXZhUFk+qkr+KcR7KKOKBHFXHEOU+EaCYAGHyoHgdtE477Q/Q0zfk05NCuSsa//8ef+XzfGVCetuu1Qzq9vQ/eQ5YObLVdW2EpFKLzwaMLAFaKxS6q6TjMznkjTPNUzKgoO+o2zfzQD0M/TUPO8sknn7CLT57c+Db2/eCYRYoPXgzE1AykFMempsy+JrYQsXyU1MzOqZqUBMyMlFMmz6IllUSO1VCSIDMgqSkSi5oizFMiVEAokhw7tKQqxHUsmRmICjuHSNXTmtlKKaKKS3wSLjyZGh5vlqQQEZETAGRsgpuLEjkxVQAmJ5JNlZCIXE4zEHrnxT7wE2qJf2ZnQC6legjgUu4vFX+W2ry1YoaAKtWEEmv4pqmhmYJe6tnK90G7FC1gZhUBqkpB0VoMnOumaiKD593egipYdUM6N6Ot4lZwsW5ZIofO4ooqHsOL5ZZV2tryawvggRdu0KW+UoNLAbPU5XAm7l4qtEvs2lIowRlHOh9rqfaXq3dp9VdM7cxmwQuUsKhIat2lyyHRCqFHUMNKvUGAhbK0vEnd0tqFDmbnCnThxNRcufq7ld8FdmboLBd4WbE+YhpQJd6cm0l2RrMujK3zFT4jRxVY+UCNWT7U5Wd4ZsFcMI2Pvpa9P5jqhYxV3Z0rjkOMZhZiW8ZheU8EZi6iRA7U5qKAhN6BKiLmnNHUMYqpqETnNtur+7t3YEAVbKw+Vh+7g+MHms/y0W3hO1QExxbmGZxhtg93/+PzPRub4/mmLygU1Av0T2hgy6xbPFSWZq6ef1LdrC8skIuNzUIzWTAqqr4miyU5goGJgYAVA0AM2DRm23jzw/b208f3v0Z1FDI7bp3HmFEdMjShIQ7HfurHYZyTEpKDq5vbAu40zYfhnjXPfX8YZvTu6c3622OaQjiu1wjJSorbm0mDm6Fbuc3TXWhX2G5OIa9/9GMHRj/5I/vtf/r2//VvJtbVj3/iuvXxYcJVcy/zb3/771/9Pk3zVMa09QGLFYTds83m5dUwS+i64CMzqsw5J+Ao4yOYxdgit4DMzTVQp9CaNSXPpf92vPv5cXqVID88ypRof5gfp5TY7qaSTN/lvPXhk09e/MGf3f78598M4r/64uWXux//9pd/u9/vg6OGoGlovfZkpBzEXJmPBbHtnt9cf06Q9oe3uX8/THlUtWIduJIk38nLZ6GLIZ3u3//yb5Q2N1/+q+KfCCDTIqe5jJdlhnxYaxf0caGanZlpC670Yb05r08fcCH4aMBBzZM8rzq1KNf6rove8zzxLjw2tfPBz/Znywf86GvRjQKKCgKKCGJ1uOAKvEgldCGDgYghcRV9IiACL7MaSExRAdESSlZManMR74nMsJYnCy8WVQgM1BTVEB0aMFwkmGf2qAigIICp1OupJg4A2emHVRGq9rc629TeuKmYgBmomlpGpOUBjlQ/NiF5ZCUGRjUUrKpoQBMC1pJMS7GCZWKEUQsgisRcEnOoT2QAc4zIdLXZfvbsxdffvZnSrAYmGZ27efJke/Nytb3dbTdtcGImhEiWDt33eXo3n9RUxAywZrNWERcAJsyIioCOjYiCZ2LkVEoCwEKAJWcVCcxg1o8jIyipAhoo4CK+I0SFGhyCzMSEvHA3LUkhJSA1NWSPAFIZpgSM7HzThna1CqsuBEYqyYpRLlTZsSrMTMxgqFq1YgRmYpJLCcgEdsYNZZFrARbRGsItOVvJVrJBzR0Gx1wvg51NU8ghABlQUQOozo9AVkmiKiqqSstARiAlriRcY0KTOsOoKrkrNb5yklWMDAmZyIEoqqIVJpdFVc8PzQtUVCS1TSOCIozQAU4u+CQ5ZyXnEGDMPXHI1Y0CwVA4UHTOTJuwndKRI2VN2dSzm9LUODPCIsU3mxiDD43z0ZFH9t61jqPnZhymGJrDwz0qM3vLESSdHg6pYZOprGPTrXJGRNyuqP0sXl/Rmzd5c4T9Y57GEUyRkT2xkp4yCzSBCXicTje7tulCwcBa+mkwEEKKHMyslIFQ1t3GMqoLQpJKIVICzeA4eCMx3065rNEBKKKzInkeuQnBw2nScTwUPV7frhVmQ81JwDBLusYVEwGwDx3A+urTL76K01/+i//m7ZtXD99/e3W99qwBFbeyfvIU/M7Eg4hrM7tT25rXcNQY2pUqeWyZuUBy7Xp3e6XSsytDHqGAzuXZZ7vOPy0p9cOcEtAg89vBqdzcbn7yJ5v9mwO4AGiH99P9Y37z+nh4eMxzDk0ILmZVHSYtUyoTW+shsWVn4oG70IH1RSiy323XfqUAE4OF1u/7pZzMydh1wdGcZwQLjvanIc+ATF3XEFMIbr0N5FKMUSHN02FOxcyYSaQAYfArh+CYrq+vdldXHl0TW6SYS6lmWo7VOSRyIDlxebg77g+HcZrK2doDiQ1JjOZinVTovG7esa7bWOsQBWDSuugyE6HUuBCDqiJRUTVDhIvuYJbJxMyK82Gay6p50tJY5oFCW2wKwSk6T2YKoiU0IWP40VdfvvnO/vbn/79//tXu6ar1wL7b5SFxmfK4L6dHF9oYr0E47d87Nt/4eUyIHtGlY48+ra9aUe77ibRAJktATZRpImcuUGCa05QPj9Kt2pdfuW6zf/WbboO3n3469SMCO5tu17v2i5+8ev/73fMbQF/evdZPv9g9/+z97/fDkG+fP/vR7bMVX9/d/+cX3fqxsWnSrFLsLuPEHNi7XIrjq+36ulNum/b+zbAOnYwlj3mz2QzlgGAOpAENZqqpzNnH1V7SPs1Df3r+4nYc8/hYbea9qB7GyciJ0TxPKlkFQYGAZZwDTcoDIDjXegRmwJyK96hp3TUu+GkejFbr3TNcPY3rF9PdK8UxBKYYgIY5pXkUzziIQGiZ865xgKtpugt+hjiVIu2qK/mUIIOD03CAGHSCee4jjuivQ2zGIZSSk8Ax58glAxGuTTMiIfF2tQbRQNR6P5c0lYnbLgkZw0yY2NixzOoIwCYURgRfQw+cOiyZUh7HwF1YdWM6FmNJOfLKtbcFnZIGg31/XyyR5yzAjhKzN6+KwbViPZgoHLMxujInjKtd7G4e948NtasGy7RHdFpy8HYc3m+aq1W4Mlsp+dM45g6970yLOuI2xuIP/W/vh6/DBqVgzhOwZwoRtxv3JEvZ7D4p8+99sbZdXUqWc1ZQZQlRrRiDa4nRDCpIhMjVxXHpcxA7DnYWS4iKmRJ5Ay2mhEzoDVBtLlLMRFQUc621vONS3YANECm4VmVEUzKZJeG6e7zPdByuVisyspL64TFAuwpuzkM2jF27Tz2CEVrKuetW2eB0P8mUt895GwTHw9tf/ebLP/vzfsrzlIc5rdYdIEouamGaBmbwq1YMLGkTY5pzP6WU8tV6ra7x1BQxU80sUMs1A0Qax8ExAwI5lxOcTvubmyez5SJKTCS5AIqW4FtEl8sU2aFZzpnZ1/LJMasKs6tKeUAywDQMgIaqv/j5r7/60RcFud9P6e7u5//w756tN8+uXJpndEknGeaMAJowOt923rtJ0ah1TUNpTiqFHROjVwTisI6GQI7b2E25nE7zeBrnIpDzetUaUmgCBsZIggYmTfBzrvtIUq0eTODYGWBNN0MKgIvrIbNnAiJIAkYCCqIFDJm8KqkooyP0RZMZenCGasDRRVUBVQJjdgjA5E3FAHkx9dVKLmBkUCtYOz7mnDOs2pTKP6kfzznHBmQKYIuBrhRFLtlGJZWsRJiKeF54CkQoOhMTVY3A2QcGAJgXnILYqYGhsuO6c6sc1WKgCFX1DFXUg+AcF10YG2JaOxJYmSwLSbyiGkSAulQwQGCEqLpEPlVhUd2ya03iuxQz52bi0nRdJENnqguciT1m9XeqgXrtTJjJGbrFqrBaFBEVDViMavBcGpktWMzHBBNYGpmL50wNOgaoldMH2kj9aNVTHBem4RkbOKuI6mfEj1CJ8z8swMvyuxWyqn1ww7MJuALW3KeLm/XlgIYAi+LsAz5zvmRQBbZnoGZ5z4UPdi5YL/hHpVudFWLLqdtFDPVBnXZhbi2fvn6QS1l8RkguTalzHVybxnrG7LDCf7i4EYGaMQABGdVGNOxWG5lTMSPmqWQDZB/rJEh5RgNEEjAxnYbZo7OmHcaRnPvh5z96tr3+m7//mzr+Kputnvnl1tpCubBL9BOcbxFWhAdqatml6P+QyPeBUAWwGNbi5e4iwhljWgRA5xyqy4U4m73DGToiAKgVngGY8lKs1l+vrQuD88BdGAVLBjlU5xEzBSmqQuSAUIzi+ma1e9at18OQ+nFu25aZSkawguCPx+OUsilm1ZzUc3QUj/s+aRpyquSSOctu1w5p6kuP5EYpp8eHTzqX5ulHt59eX9+Qj6ubq/b2CXLEZnP7zJ36kxa5+fLLabN+Hq/L/tD/wY9Paf7V+NvX7343qRxmg6KrTWSR3fXm6vkntto6x9HS0261+/zLMg0OLUZWFZGJYyAiKGOZe0JM/TtyzserAlelYElvH9/8Zj6c9g/leJJsNBcyo5UP/iocsjHgw5Dmb999//rbQGET3Ph6/NnXs6ScihUpjYMYedXlgMbehxha72XOx+HN1Nx23VPEBgo0zk+ljGMGlE0MPnoN8eZHf5Dtahzl4Zufdd1V8+m/LED11gCSoVYR5AWu+fibM2QDF8vRCtsu8CrCPx1jcCE8IF7A3POsrDOvEmpsgbgvK9B5NV3wkYtDksFiJrcAT+ciWUXO1vrLwK+rAlHFK0sxJXSKRgCIvJzjIrYrwLV2VwMlZCRSxNlsKjYVC0UQDVCAFY0dMjI7cqqCgqhKVRkBiuDY+QoMEIConDtwAKBkIGIEgmKAbBexBC3nUFEXMFYzVakxrYiVzI2muuSpAVYAmgWQTEkqY9SxN1BCAUAtpgoqksgxwoyQI2dGahhMEQSBkRxgiTG8fPl88/OuPxyJKazW11e75599efXkmYutJ3AEapXlKtR2m6vrvj+eTr1aASCxLGr04fZixS5U1TskVEZklTmJZxQ1M/Pe15V3nAUMmRnQAMkh60JyNUBofJRi0RGqoQeFgtw4jojkiLU6ThKQ8wbgHBsSsQsxtk0TggukjgXYGahTIjbHTO6MjS8DCYlMTNVADQnZoFqVZykZDMgAiE0tF7EClswEFMw5BkapDz81VSOqNipsRGrVJwrRQFWYUEXUDGsQm6EtPotcHcIMFiIvqIApG2cpqpV7UR/UCEhZakpVQQBVKVU3QHh5Yi1QUWh4ziMhq1g/HooKBTCRUjIWNBI1Ay1q2HAAQ08haSbFnGUuRyKovQ+RIufuRAgNIMTYhK5FDkQBkB17QIfovG9cGEMk3zX9YT/3p+nRzACKppQe35/G47C5Ls2qY0KbgVTXK8TPwpB8fOv6fTk+zFOy0PoQyUGxw0BAj0m8ZwVtu/B4GhqOkxj7AFrKPHrv0cCR38bbEdM4jY5JUo5NhwhtaCSP9/t5t3u6Dn6aBmMIIQDqarVWQt9FL0MuY+fbMh8dSLNiETLzeRhiKE+frtdNc3PzpKCD9e4Gb8twN939/sXnV00gM5r6E0bH/tqtno1zUlAYx9N333pgVorAK26JvEuGZhRznubD+4NaXm9c06xKmg6HsU/73++HJ1fbl589Z8DH/WGaJfd23M+vXz04AnD4/NmVd7JZa/hB987r44MYggG1HKCIFM+KxJzTxEzslXjw3rUr56LbrDYgJeXsnMvmvn9/OA65ZOrabQheLI15nsugArnPIsJh1cQuRp/yHD2ApdNpFF3VHY8PDYIGCtzE2DaNjyG4q6ubtu0QfNuuwLAs9tICZt4jg+bpdNjfPdy9u3/39nQ65ZTVVFQrMU5rO7JKPU2LFhRmYCSuBM66+pspsT9v884PBDr7ZwKKqNYMgbqccu6nmYILrt3R2rs49G+7EMRKKTMhkxKKElkIcOjvf/7tPz7cf/Ni3f3Jl5/B6f0o6rdXaeytZBmPMg2SZgACxZTG2HmRucC8uW3nMQErsClaSRNgZDTNCRi7bcsBicM4pJSJueRcXJdyThLbtru5fs6Pd9+d5tNqtcoDplEADrfPPhW9BvLjITmAcni8vrqVh8/LoL/+9//+j3/yg7/40Q/6U/OL3/8nBvluPM6WqXGn1G955TXrfPes+wJmWW2uLUuZJucQ5mLjyFZsTgxKJWEBk2Ro0SNzPg0HG/qbEEMaj/uHKxcCwH6ek2QzBEcKZiDso0FgboBkGufgj+Q9I4jmtusA3aZps8wynGZhtQ4pu+i2mycCaXz7K5Rjt9utbp5O06hGESmamuf2+ub1ft8GRNcVpXa1Oo7Hvu+BvG84J+PgNc2BHfk1rGEek1omx6TeENKUEiQLXm0W1zSujeg9CxY0KwSzp40ZMXmkltxNn+4ADIs07aZkAZSiRSUH1zZxJzbl0htoP41FE7ITKERebTLUGDwKKIlCkiw55cim6djFW0dtmodMY4hr5uA85pwOeT6V/ZP105J98L5xbQedj4F5MOKHMgKCMLnAXtbedU248eGlju8CJGQMTWepPx3fs/qtvw00os1oRIqGDEbOsAstzXrjG9gf9HgKDne7D1ARM9Xumaogs3Oh7rQIFhqeqDJxraqo7uZVDACRRAsALoFfiAzOcSMqqSRT4SU/l9AxE+U81eNgLZxBCRCsgFkupZQiOYlMT549nQ/7u/tHD9Q10a22OU0lpXEeSi4Nd4G7zSY+HN+N43EaphhD49tp/4Crq6ttOKZx/83vX4X29tNP0zhPx1PksLraGQYT6ZrQD2M69EAKUiR1bdc6hCnrq7d3fhUiRW8+eJz6ER0556QUIDHkIoYEp0P/4sUnf/cPf73b3c7z3LSr6l0oqsy+5GxYAxnZENCBAiJYyaIMWGV6iM6FVAoiGCkTvH3z+O03d0Q4punw7m3k8szTVRCcZ5Dh2dPbd2/vG+5uduuUExGbyHQanYtEVJKF0HabtYteiqqoEbjgpMjpNGkSBPNE623HU55nTP3knHvx6XNs/NVuoyk/vnnvuhZDQOa5FCJi7x07BJ1zMkQVMDViMjKHrJoREZAdkoI6H00FwYhq+45MIeWJnDubnTAAiMwqys7FwKqiqsyu1G06gaoiMyGpyLJxRzQERhZd1n8wdVX/TwhoKoXZAWFJ2ZjMoPHBOU45qyiYgpFzroaMZJkdOlPlxRgSAGzxmIRapVMx0yIIxrVbZwCEksri5kMkZnaW9OjZUrfW1aIACJW1RoiMIAAXkMXOJtZnmslilP5ROk3tDS/0oJqSpgs9o6IJZ05NrXxqWaRGWF9thlRPpLrM1ECT6oZxLrgWGQ/CmWS7PCo/+PrYIohbTF3VqgU5WBWUXdRdBAAf3H8u7BrT6uu8nApUIZZWJ+KPvEaWLJjFrQjPQBOIFimxzhgCQGPkksSHasEKZzrPhyu18HkqKnRGhxaBCbrqCYu1UFwkIbicSFWu2ALc1NPXBfaqZocXrtfHnJjKiFl4SQoCZ3bWmX92LlwvL7wUposGsHqgIEDVu5FVP6zFN8WIqYCmkj69fXl6/z7Pg2MidmtH7FtQPB6PPsJnnz5P2ZLgKPg3//APz198dvf2+/Fxv+42P/3pn7x9/ebf/t1fS0qEDHAWE56L4fqpLpQL+/Bvy2df6mtcIKP6e1XQtkA7ZzpZvdV0ZmedhyfU6ukM//3TG2cXoKA6k6ghiC60tuUICzHuQ+l/3tLBhWUGQBcUz2plZcjsUE1yAcEYo48eHIvmatKs85xyGsbk2KuUQz807Yod7jbbgLEfT6oApG2Ic7FpTurw7WnMYnpK7OiLm4269nrlXt50bVynQbrbrlmvD+/3q90Nc+IkjRVyBkWmJNeff7K/Wv3m+O71t6+GPIwNd832i91uw00M1FyvX/7wC7+6cSEc3r0KkGIXR86OHVVHNhdC26IjJLQsMM9EKtOgeepP3yNs5wySX3uXTGAcCoFHUufherXa709FgES2jWPI0ZDMX+92n926Msjdvfiw2V21ZTwN09EM3z8OmjR6f7XN8Tpu13FDWzEv43i6f9fgqCZElEEQ1LPm4Sgzvbsbv/jz/yq/O+Spv/v6PzzZfum3nwL76hyFgIp1XurCoVtA2Y+myWVMLkvrB0bRMppwoU9eqEf2EWz0T4b0BYuCBRNfwEvEs9/Zh5lZccczu9LOuCVANdo3U1ViVlOt62rlapoSogMCwBourFKW/IS6qAGgLbAnEyOiqhhDAUqiWbQIeDZQUYPCgugdIZFzuCyFQAZoSEaMgEpEWuQ8sxSQgEg1i0glQrKp0fJoUDDUfCYLVvUuSjUzllLPGJlledQIYe2aLDPaTJlJkRCJ0IhQJSMAFjGzXJIiewIGnY4cvXM+0tKpEDRAZvJ8db158ezJ23d3zuFmtb69frZqWwKDCiiQQ4BqjafAvltxjHLsRUQsqykCm1Uga6FcAqMZTdOUEAcAFcnF2HnvXJayrFGmUpQdI6LBzBQRnAgoqGNnqI6MSB0bOXQuFCRG54jYqlgSVTIhAQghMzsDrqM0xMCOTQVJ2RsTogAzVg8iwCoCA0CqmwkzWx46hAompWjJORcC57iQQ1PVVDSL5bJA+KKqqpqJGYEB2ZAAUNXMSs2SpOWhpJUWR47VCqiC4VkmrACqaqKWCxSprtdLXkeSUkTVrBQRg2KQDdKCQlVqLp59FZd5sEBF4ziTGQc2y6loMSUoTL4LLrqr03QfY6tZATBJBingIUvRooEYAYqKC64Nq3HqSdF7h5RFM6JLOakKBe9XHaMD45JLAQQX3YoBFJw35riKzSqO/VGSlzmVMUnWw/0xnfr1touhKXMWFRMKZJ8+j/q0vXuDb94P+/sZHXYb38+kyG50WWzqJVIs5QG95xiiD2CTQXI+SvGIeMonU113nXPsIZdCJfdjHj0329V14I1hMkeG4IIzGAxZxaZhlJyb1oX1qm1X48Nxt10b+v0+ffK82bWomXi9DesboXj9w592uT++ew2Wr28/uXv/O279evdkSK5Mdjz+3l91m+vr0MBw7B01CFQSD0fbNE7mvLt9uvniybtXh6i8fxzePE6Tppc/ePLpZ+spHQ8P09dv5u/e3zmh3fW27VZ3bwcqj1cvHHfRUKfhcXPThqv29qa7+XR9ej/uT+V3v3trLhaR0LILDQNoLnnWsCrbm7ZtjCG12xYsg0GM3dv3p/vjNI8aQhvJk1HSvlgGLLGheS6OGYW2q/WcdTgMTHDSiYbk2DEmDj746FwbvWvblYuND8453K1jjNG7ljHKXLJkQwUrXdcGR0N/eHw8nvb9/nDs+8M45SwllZy1GBiokAiraUlQnIk3CWRh2dSoVl9IAEVghJo1Q9Xv/cJZvYSbwFkIWmdB1zV393uk4kCiI+MSN+sxz+yx5OSUPDcxNKbYOW+BxvF0l9Inu7aj0gDkfszc+K6Z+5OlHk3UNI09KDARMsmEWVBNy4zgOXSbdv1EkoDvYvAqUrJOOZFQIA/OAXuODr1L07HdkMzFAoar3XXXTcMQm033dDf12p/6+/ev3NX16tkn9vU7TzTdv58JN09W8bM/e/s/vr57eBdWzZPPP/+h8ZvHt49vf75acUHVECRlr1CGsLn5MSL0Q59OpxADqomITMfWu7LZgXsHUkhxnqer3W6eB8CiQE27gZyOh8Pz6xfj/WMSu5/7m9V2HEfLxusOALxvAJ2ogVl7tWUE9mnXrOdZSjnOYhQ7dq27vm43L1XZ08x6OD1+Px/22DJ5KrP2h6mIuSYG7m637evDd/3xzuakVhy3BCU2dpqENElRxq2xm9IYlK7aTzQ8HR9/dt0QyiQ2A1uVAKEUz1bKyeSE/kasqDpkn6fTrrkl94RQ+uOBrEGYEYoWYfTEYLmkeWIXmrBrqO3ngtgGv5ryw5TUhY48WzrN09FAFMSFdRrydrUGSm8f7h0zcehCR1rQgRk4dF1wU7/vons49uhi419qNsCEMrg8BD86ijkjhXbTPAGwMc8qfYCAquP4OKtrHZlBLmN+PKJpVPMIq2D9+JaosDplntVcWEXv82QkYxsbdCRgkvNwOFwKhEqdQABiVzf/dS9CECqNj51jZNHskaAaV5qqKRMyu+q5WNtziAAoS5INO+fZ1ApUS0QgCmqiKgRERKhoqtkGrB2X4L2ynhKD+JWHgof9iR3kcXbOiOnJ9dN5ztMwH8bjlIInii46z+/2j7IyBPfdN69X223cBHX03W9+PYk679ebtQvUT9OqbadpboJLRYqW6ydXs8i3r/a3WyWnTeP7fiBy94e7MqSf/ORHAFlz8k0LBtM8h9ikMZeU5pRrjOk4JlJSKapgYm3bihYAsKJIaFCsSvuYRAqCIbIBzLkAUEDQoklyn93+/f4Xf/fraPjw/evDPGzZVg7Xzp5cueN+tEHnfelc4z1nlT4VT3Z1tWLT2LbtejtPs6cGlBFa4KKS8iQ5FyRrY2ijGdDshmL4zeHRMfWWNZXHN3fk23wcfBN8aDf05Kpr68KVcwYjYgDDdds6bo79IZViJs45rCdvqCqiimDsgmGwkkRRNTt2STIAOoqqWUVqScywADxocA7SE0ImIrFSPYfUcl2fmULd5hOSSCFiJjbNBIuHiyGaQqmbi9AgmEpBM1QopTA5hUJIjnwuc42GAvDe4TyPiEToiD60EQiBEIuCIpCB55BKYaJkFpwvUnfVZ+viRaeGtSPODgkxixpgUXUL5IJ2TrNSOYegAxqi6IKPmAEjIiCC1gzB2r9etDlmSESL/ReqfpivgNXEZkkrg6UyQzOjxYDcgKoX2MJ1qf33s2fw4pZNiKU2TmzhzyAAn1lOFSmxxSFmqeIIUczEjKky542RavtFF9IRABgh6Rk9wJqOaIvy6wwjfAyhLPlfFXUjxFLm/eM0zfPqRde2Tc5aSiEiOBtsnEtDNNC6lzYQQ1psdrBKaatew9SUFsVezbj7qLqslClb5OoVALIPXe2zRdFS4F7SkOo91sWq/AKDLb+MBnZJdreFB7ZQFHChbiEgcJ0GVepC0MWGEFLKiPr5yxfz/QPKtN10D8e7n7z4g67Z/eJXv8mZP//0q5sn21JOx9PDuo3/+ev3t+s1ozf2f/EX/6wD+Ydf/uzd3Ts0ZHS1p/uxTI8+nMCHAfXh6wzj0Bn+wbPtFF6su+HsTrW8RBcIbpkZZ2XRUpHbwjNaxEL1lfVoVWgCWs08QMWgMh0uU63CsbRcxDqcDc91JCAQkBkbGoIgOTBVyR4wMBaQpl3HuJ1jaXc3Cbzz7ePx/dppTrMBbTduSAWBppQzCKKJaM4JJZ+yjsMUm/aYwUK8uVrPY/rk+bMJ4OZqu+oaom3TXdPk5seDkX+3f3zy7Jl6N42PXdt4biLC+7uHtw/vEcrKd5uXT9iHrmmvVi1kxJauvnzZPbtNp2Qye5Am+tW6kaG46Bk4i/m2wyZSaPI0q0iMqJpDsyrFOPD4+KDDgC4RMQLePHs6w+F+f0TKXUuqq7f3+9t113n4r//qr/72l7/9ft///Ge//RdfPrndhOaKT++Hw8NkNo+ibfDQ8GhySpJP82x6TKUJIba79fp6d8Wnh7SfymTWBrfbuMgQHOd+ePPLn08ZfvgX/8qHm+N+f3j98yfdNbgdIKlKRYug+uufwZszsU0WVPAjaAjP0KqdkeuPx9nCiVw4a3We6oKzLyguLg1l5PqmH451Pt4Fz4FlVcRFqVrJkwBQgXLAGh1lAIwIiGK1lq5dAFMTANSLHHfhlgACLA7BCyBuXOt4QwPORedshIaojhARi2QwQ1crflI0RCQGREBHiCzAxGxSCBUR1DKgZ3KooGYiVrO7DACBzXLtGlBF1xRUVQuoqOZsBoSqwkC+TjYGMUJmD1rTt4gIg1vSgYjIiEqWQKZSSlYbOENBTY2neezYR8+LW08VgapYjM3zp09j/G3OqupylvHUK1AEDsHVhBAAlFKAHHBAjFYZXAZq4ogBrYpQEdEUF5KpooAyIhh5x2hgKoaKAItKnZiIQC24ZgFxrCCad6RVPuC9Cw1SMCQkZnYKWkBAjH0T2CloCAHRmUERAyJyTkzNOKXCLIHReWQfGElAanwLFKnCwaJQiqiIaTFhIwIwLSpJSwZGQ1OVLDKXUkrKWoqBOSIzIiICj+iQAAgVBJSWkPvqBoVL5NyyX6hYFLlqh2MgZIpaDFnEVKjGpVoNdwMERkQmA+ecARWFuseoaBQBFBUFQ/yQCevOc5KASdG4YU0WfZQM1adymO4YVSUhe0YEMXSEzgiBlb1vvQtYejORnBnBOQZEFZOsjllFwEBLkZyM0TETEzMoACnEbkXJq4J6JvZhvZn7Pp36RKPkeRrGPOciY4zStY1znJJISpoKhfji083q1r/79vT4fjyV0rU8D5k8F1Mxu+rabm7HgikVQgCdvfMAWANmoJgDc8bDoAA+cmzajY8YW5+Tzv0db2C7u/ZGx8dDu7bNZtOPk3chdDipNlctMWOvwylvr5vNmnbr/OLl1dNnz/Kgiu3Np1+xc8f9+4Knqy8/HW0qCKoje9nExvIpp+O6u8LQ/fYX36S5efL02Xh60NL0Y/abuLnuEhzvHvvN7VOcmpdXT0/94fu377/+7q2U4elV98//6Iezzu8f9m8fj6dyfPd+/OrLHz7bwePjN732vukO8wgWXZ/uH06OfBvdi5vuk6+ev37z9ve/+MbFVQFhgyaYdtOTG+faMhdBF5I0JVmeZf9qP6esRt26jcFJnmeZiubYRERmJpV5nmYCPvW9ioEhkSuiIBLWMbjQtavN5qptr1TFkBwF733ThKZtvSMTzGVI88yOu21DSMNp/27YD8NeBEsyUS1mw5SGaZpzQkSkJfLXTA1UpKSUnMshKLEaIVSrNlRUWjZDZy4RABmYilagVUVE1FRrzlGdBUM/oYFKnnRqd+1U0jTNRlDGzOAcEAAFDvOcs0gI3HXBsnvz5vVejy9berZ5YimDl+hpzgSIGBooYsaiKOa7T/7Y716Woo3zYHPqH2fq/AoEol+1FCORR0JzbFOPqKRpOD44Ls31zXA4eorZhsxT3GxiXB0fj6Xw6upzbDbzfBoOfYbXods1u7Xuj2Lzu1e//PG/+J//5f/if/Wb//ivt33fPI6rqx98sbp9eNi/Of4evG3Can11PTxMP3j+Zb8fEeeOKY9js/NvX3/Xhi7GkFUVFaIf58maEJhQiubZuSZ0cZrn+TQ9e/kklywq++PeszeEohJd1ITO0cq30zyS6fpqU1IZjXQYihwQHCIbdsF1xA23raHkcZ/lIPl+LvsRjvlx2l191nUd+85Ff0xzlv44H/uSJ5Usdpx73xC7OPVjMiwmKuk0aGFpwopmDv6GaHvbPZnL+0x5KkcmDk1Hlk77B5+mBmP0LaCf0ti2DZHr4lbR5VKSljmNPloe7tZtN/dzJFY9EZoLXsSgBqWxKEAbduP4wOBa56bxcds0WTOYmMhUoMG1TUOCHi2fpvdd+BTJzZaJXAwdAc0lewaAgkgyieMw6sy+o5AxtsmsWJ6t2CwBvRoIwDA8OHCGPpcU7LCOwbD0/bddu0tJkXy3fortJuvjKqyC3766P84EgXYBFENxFMTZY/+QjGK7W988vWzQTLSYIHG1tUbAas0rJgbgnC8lEWFgLypmJpVtjQRooIsLrEhx7BAAQJmIHZpilZGCKSiJJVB0js2glEJmAExIWuUjaGZKBpLTPI+to67bKLUP79878VTKnOZt69F7WHFwLZqeTgcT9d2q3VyllAO7lDLNs2vJEOcpv/ruV7vrJ2OaNuSKb0K3brdrmPrdpnvz6s18Cqur7YH8++/vzOfPfvhyGg7dpoldd9j3v3r13Ref38is0zBlsVTsdDrMc247VqXjaeyunj0MJ0zzLnjvXA2ML2oIGKIXETErJTMRGoMBoZvnLFaCDyWJUBHJ6tx9n/7Df/jF/vs3n7/YBJIopWv81VUHU3/a7/OQm9iklHwbpyRTnrKU1e1WwLdNS45FzIdI6Odcjg/3vvHMPqwaHxorkyMyKOM43N3dvX579+6+fxjm+2MSgKcvbn/yw6/m/fGH11/e3D4l53GeJ5Gm7SQnIB4sg5oTBBjTPCMSGjlHAkZYo5hrzW+SE5LTunNXExFyBGZpHgiomKiqCxxdmNOkRQlR1ZgZEEwXiqkZkKvpTsU754gVUC2LiYKSIZGvXgyqRoxWnQXAiF0xdeQMBImLlCzCSFVKWVNlHHs0JHQC6mJjYKUIAVXr6FofiImpEjnHbADOcS4FF2dgBISs6ogAUIrUVxKRmohoVS8hMRoy4QK92sW1enH8LWpKtShAtMUZ4JxsYkiM57z3WhIvH02NQPnCt6gXvX4wAMQPxiBUbfkQiyqY1Y4xGjhCsSVZDM9MmWrlUQshU63mz3pRJNUuC2INRbGzg48sVKIqDTCARTZoCMSoCoS0RL9XOyc4S1AqtkUfJGSLJASwSgwQEU2pmI5zdP4P//BP3z4e37w/jvq7P/nRn6qAijJRRbpq3rOKVizrXA/aIoY6N8kXWA1oie5eKtELGeHisWQfwSVnyszyuwp2ZsecKTIAlyNcXmRn2telZsSzty6cbaZqNbtYoSOyVYEOiXf6bHf1/s3DetsBl9vddTC4u3+/7tbHw+HLz79wDv7+V3/7/NnLZ1/88PExH03GcVbw79++//r3v9ts1mTy3/43/+3rb371b/7NXxeQmql3CYOzGo1HZ/cT+xjp+fBNBQepMu7ORfbFeasOncojWuR6dmELnS/r4pa74IuXK7kMZ1xQy+Vdz1rKhfhWo8ABAFHrDz/oF89o06XAv5wHIpInY4OCCFiSlR7LzPPh8M3PTu++l6KMLgkKsYps12tNw5thn4o4000kF3CekyjOKaW5CGHwriA326tDnyYIzoek+uT25rDft+stFswJu7X3zOzRKGdL3c01ulxk6to2BMfEeZwkiWZs2xaKPnv5/Nlnt1bypmvnZNQGaILNc2AM7K3rRGXOXCPsjJiIXNNUpbcFTvNkmsFMS2EK6Nl3RtEDpjzPzeoJxHyDgYMfp8NpmJznn/7Rj0ux796/++tf/PbxPmsbv/rik8NpcBw+/cGTlrvTWO7v3k0Kb97NXRtXzapBQcrDIFbSpjO1wQzTOL55vz8mCa1PxYZMrus+/cmPadZpyuPj9PXf/vWnP/6Dzfbp45vfNLtnzdM/pmaDbKpyFuzCGSO6zDO8GH+dvz//edGrnWmUC7RKZ3RzOY4tA2hRjCoiGuLZJ+08B/Hyqo8gUrzQk2Dx7vmw3AIhgYFzjIgMVFPbRBSgGs+TgYBVUYSBIdc+GaCaLDRVQzMQUDRlFwwgi86ic9GY0REjZWX2aFjJoAZAXG1/TFXJmMFAkBxWhfWyxiMRKzCAACmIFM0ICJgJvZnWta8aB2Nl5lR9rirUnZsWImdMRk5LqrHq3lcQFmuamiNCAMekAPVxxMRcCpWkgAUyoJWuTfPaetc2sQkMFRZERABGuNl1N5vu21f345wOw1QQt+TACNpAMQDXZbXq4wgWBq1hzeHQjLVrAmYmy6JSURIj53BZF6AS982xZyQDIAIxBQRGEBVc+FpcicmOPYJz5JBQTRHIESMaMyOikUfHjOZ9ZOSUci7JIRiqiUyjWcnsSuOIPVNN7zDUJdyPFLSIZVExEwMgZ5XTpCnnJCVrMUUyywYokrMkrRieoRgVQEQMwALCDqmOOKSiyEyBHKBVC2smj2R1w626hAVoNdWtTREQUazjm4gZiMwUkLEm8AJkNNWSSkm5jrsiAEgiagbEH54G7vJQyCkrIzkwUNF5lkmVEdAhE4GIoIJfrZhRrMwprdZrEDj1h7jaeEZEUgQfAqEHYIZgZFNWUZyTzP2IfGKfXLNx6KUUhYJ1+hCFpjVlACdSiBrvt81qzPM07o/peDgd0kBz2sw+snPRM43jlKeB24zsnr5Yr1ftq6/fInrBnAoPYqAFUZwHMIkN5pRJTTUZigtNs76GjB6V2SY5MqI5aLon0/imH07bbfvsk92f/NmXv/3Zr6aECpRzGE+lJG3WXUoDgMz9ibfd9ctN22ykaGzh2dNw3RGv/c2Lp89+/MczkI95mA+3Nzeg2F3Bsc95srE/EA6xc6HhghnGh7//D78M2krqLZiPDKoJhLebMpwav9sf9qtm3Pfv2s3uh0+eeef+83/65ddv+19+/3cvnzU//ePPPvvhKqn+7nf7X/36734n1HX05NMnX33yyXw6xia6QHO2+8NEFH77i/dtF3bX63/xr/7Zu3fvsDT7d3erztYrHtPjym2YnWv4cBhOh3mYiygGH2L0TUNzGg1ztwmO22kqQ7L+sS9zDg3XrVwbm9jEGAIir1Y3rl2t1tvA3iGbWYjB+RhX69C4GELOxzSMnprgwuZq69jG08O7/ducihYFjsiBQy7DPM9z0ZRT1iXM0BC9qaCklFzwPhctqlnFSg4ISG4x3q005lrkcn2QVB/5urUwq4FLpvYRqygnrcxeYmHMKYsse1wAw6LAKqc8EOL+1D+5ff6DT57O91NndhPWnE8WIztNwx5kspyLqpaMqkgxjUNOD5awKUPJNo/JOYQ85ykzI/k2heA2V9x0cXNFbaTOCztP69DuGGabTuublUpCMDKzuXfg5vE4DidGaa+v4vaKj/086ox93HoOTd4P++8P/XfHNqw++clfvfr53/7gy9vHvkD2P3j5R+DTd3dv5wShQXYheMs2gBYk3j+8Nkpxzf3hcd2548OwQfu0czBPDI5ZRJNqnqfC692qaYfpbrdqX7353fpm983X33W71TCPdW/e+Q1innNmz56cb9opT3pKszowit1mtblRDK7bqs396Z74KKnk8T1zmfNIwaF6iDsMcZqmQtNYxgxuGmcDakKTZNquVm616U9i4kPcopGkaZK83WxkBh9uANsy3kcfDsfZ1tFcRgVVFO3a8Kwht+ueWZFi1ASnUkhFNIP5Q7nbbF54uzbde78WyezdLKmUqWTjENHF05ygIXbm1dL43qQgumnsr7dPShrn6USuaX1IY/Etdz7094+IDQCNhQx1nI9d3CGEBBq8A6LT9B5VwaRAzprJwqrZjlYIyzgdEuYmxjxPSXLBlt1mKhN7Dy54DrmMCnm7eqkWkCbnrnNeH9OeOMikmlatNR2mNe3y/D1qdsFP+eQdkgslaZnL8iQ4J3cYAKGz6iDmvKipCBIWUSYPoEUKEYsWqFngVS1PWA2DmZyZFBDI6lysVSkhaRHnPALmAgpSpdSETMS1YGHygFhEckqapvqp5jlLEXS+22zzMDgypPD2/i4E8qGNzJgTkM3ztL87NrvtquPpcFx1K3D29s2dc01s2344PhRZb3OaYbKIMz25aRvChzcPUz9556SfN00znPTN94/ggiN689333nd+5b9+9/r25bVk1WPfrldA+Ob1OwXcagSh95LWN7uH/XE+Dd26I4XYdDnnenalCBNo5YwQmZqUjEDTVIppkUKAuZ8lpx6mn//tP86H1z/8wQpLn+f9H/3gxdw/rmmmhsQ0rDf3j++jjxgICJzazdXGk8XGjcc9YWi6TQYzG1frzdWq69ZRRYjRdD5Ow91+uH+8H8fT6TAM/ZhSPp0GRTicyvz93bs3jy+fPTs9DApw89kn/+xPf0IuzJMYsYsNks3zNE2FiNWIDLMVZjGtAUUEBoRqqkSMSOQQ0eWSAXQeZ+edKSRJIcbqZjTb7L0rUsnqALDEtC/ifLAaUhNdA2A5Z0MSg1Kyc94Ac0memAnFpJK+a367SeVZG3vHxCWVGm9vCkVz4zlyfTqYQVZV55pcZkIkJpF8KfaZak/LVFURiooaIGEuQkSLOw8tCcq6TB0xMyA2k8Vcw0zP4iZcAtrr5TJCZALRRdcJCAJAdV9cw8g+EF6WGmYpXRgRqrdR5XogfCj7l3a4VCE2Lh8eF+3OBQJYfhPsw2GNDAFFF0TgzII5i9FqVBAuldq5kw9wjkLD6iF7rr9MLygCGKKqEdSeqH6oDhHVjBZEoJoTV4VR1aciIXpHq1VQmX/7m3/84qsf/ekPfvr922/e3b9btyvvnKhURomaEhOiMVIu4j1KNYEClbM0D5FswRxQTS71Y0Xx6omdRXBkZ+gLz+qrSxW7cKAWHtRSt15yVGtTqtIp4axnWaCjC7EIDe2j0vgj7AnRTOYfP/10HMdO6ZPdk/14bIhev/pm2zallE9efNo13Zv3757evvziyy++uXtgwuD8m4fHh/uH6XR4cb273jUvb2//P//T//ju8S2xarYaL7jc9KXeroVf5XCdi3JbTvHDsKuUHzyHV9W29pkdoqgf0Br88Bpb/EdssZRaWHd1Spx94ZfDLVfnzFOrfXQSW5wByJBwoT5V3lblglUEarm4lZKEF1PdmlFIKopYQtCSpzLdS3kYpgMQ5JLHw2tix2zqce6HzvEq+tJPpmqlaLFxSC561zRDsqm4fZZ5VBbXF9t4MqZx7nebXQyuCcE79/x2t9s1x3EkwGdPnnZXOymlazeMeDgdMqTdyxXiPEynJ7c3u01jbTAs7AlIfeeBiZkLuNg0Ou4RCqCAShMjEKpaCAGcZ3ZoAHlCmxhBikEpLkRw7H1mR1Jgd/0UoHl4+56v/PW2zXL99XcP33//KpWi1EAMNJc//PLFz7773fMbPs34+v0hz6Vh2T25/Ys/+Pzt3f3jIId9Pz08FjFu3WoVffCrLgx5fuinw2HUOVPlbxQ9DXqYTv769NkXn7/8/JPDqwnH4903v3/+I7y+otPb/xi3t+A8kAckFVlc26guWsvEgI/ZgRdECM6gEpwRYDzDgoBnf69FNHoB089UtTNiCVChhMsxzzByHbh6+X452BnFvKy+NSgdFqizmiCDEatmR+epTU4VDFDRaqaYwmKebAZV/2vVLKcIsJHjLFoUU6EmcBZGJKcAZGpqksGAF1ISSymINWOBAGjB6U2IiMwBMoByXdxQEWpsJwGyLYbyCQ3r8xFMqFKPRExEVZQAlAQNFXMpGdBJCi6aaVFBZDbx3jMxIJkiMGhBIgiouYzMZmNfTo/SRFTLpfGtI2bwXU0/RMSua293q+++e12KZFXJOU0DM5tmycX5wkwKWEoCRB+DApYiSLj4c6uZmQAwIqE5pOo3R0hEAEilAhuAMXirgZA1j6AmGoEhE55posQOkYtScBSa4DzDouxORuhcp6aGOAsgsSNyRMgcY0QGJgTElDOpKkMRVVdZY0YUtBSo6S7VPzznupmxYsmEmbQsPp9GAIDZrPpFCdUIDUarzCEDpOplrWJgWbAQcjV0MROqPEt26Dw5B0amAgpFy7nTBaIL0I/IpkroESsDDhd4DMgME8+AJEVFAARNaiOMCU1r2Ov5IeUux3U+EJqqmuZp6o0suqs8K8fgHIOB46BZVIuJOKQ8pSJi6JA6Jp3nY9ExhEhsIrNvXQiNSAZA0Sx5knmQeU7H3oWVbyMCZC1g1YASGQM1LkuZMQFkQDTm1nPcNMPDIY3Hw2mmSboOPGN00SGkSZp1W8i6dbh9upnTXSRdNQwKDpDQWBWSRI6M2K1WqOxiBEBCnfM0lsFFYhJJUynu7Tw0nmsUtff2s1/8LCD70LBfrbuupIkDJUzttrn55Moa4yaU0RpP82Ec9g90e9utnijw0y9/ELdhuLvX/Xj97Ad2sqF/1z3pnEOhnPIwj3lVvEWLM75/lW83u/ZH21d/99dXXfzm7VsnbQP+sD+i0vxuiG28Wm9vd+77796egNDB9fXmye3Vr3773devx9dvf3HduR/9+PYHLzZXbffu9fH9w/Tbf/ftv/2brzcNsdof/vTLL758SdJbLj/9w68e7x9Pb0fZydMXT+/e7JsQLc/zpG27IYzTOM9DSqlM8+zYdetIRKJzmguoBe+nWVOaxnGeijFwCCE4BybPb14M6RFU2UqIse0iee8JPRmTNZ3zPraxFZssW7YgZVqvt45XjQun+XDq302nfRLYdNdmtE/zOGdJ8zBNeZ5KmkqeJQsgAlIphQCRGTiXUkQlixQpzGzmVAsAOXN2oaEiqBoSqCmRgyWYGi5u2OdvAABAbX29Pj3Ow/T/p+q/niXLsjQ/bIktzjmurgyVsjNLNqYFONPAAKARAM1gpBmMxr+UjyQeaMYhMZiBYQBYT4vq6a6qrMrMyAx1pYsjtlhr8eG438jJl4i8cYVf932O7/Xt7/t948M2V2TJ1TsiikQtajXQIjXG5vz8msXZULXfF6ur1Wdn5xclj5p7LdUDWs4qksYeFSjEdrFyTCY5P3wInpgdKKR0mI2lTXCK2QvkD+8evlNuYnt+MeW8uLriZgkeus2m5r5MNm53hMShweC7q2ey3/Uf3qbtm8svf7VoO2+LccLhfnJ+ffHsUx3z44f3L7/6OS6vv9nDH373hy/+9C/LYQd3u6vV9dsPN459nepmsR53D3mYkIppycPDoNouF1wyATSOa7+7iPbwuFdpwSqaNY0vUgEgSUVPw2Gi6so4Bu8QQQ2JmL2rVlUm8pHYCdn99h7Blu0SITq/COGMqbWaxu078qhaOUbnz1xsFssGHm5q3cdAzeKane/7+0FlyFsfLxbteQS3Hw827FbrBhGrEcfFqNgFmlQ9cmNu1FLTVLxr2qiWKHYFIeW9o6WmSuRaf7ZpOqe+mIYQh1KK2TDu1mfniDjtd40+u3r+i5vX/w6d89Hvtlsfogc2qQhBSgXnRVW1OkUEtwgXinjY/ZBrKZYAqAmLLAWJqtahQGhXIObDkjzUcgjeR27BPIWYSh77vWM1dqLZh1hyEbVSc9VMjYEnx8EQRQ5apq4JGJteRcrIVEW0ygRY2+ZMEwTqzpZfDPl2239/sbkka9CwaZzYMOYPIsl72qcepFmdXz309er8bBz6n8wHeCzoRJknYVRjQCQS0LlHs2olQBM4yrEIIoXIASASnzII86aO0YiBiEGOl6oZGjIAaJX5bQxrLcEHVRXNxA5mNy+wAS7aLvePjhgQhBS4TGPvgNZNJ7Vg1v4wEFTXenYUIYzboXpvFSvmLnbsvCJNwzRlKLs89uXsmVO/vn3z3qZuGdyH9w+Pu22h8OWnl6//+J2yIuqb798Q03p99rDbLs9Wy7io09T6mNKUhylj3Q39cnO+H9LLsxfffv/7X19e3b67b70Tg5rBBfew3zbRO8dFxOyIzydlETGFksvu0LsQ29ajaUqCDN/89T/effPNWcQN2zDsnl9csBYUZYF+N/k2lDoEbtgQSkVDMz0cHgkxJN/GRdstfRcQdHN2horjkFJfxmHo+1GqYqBSqGk7x06Sdq1j199vD32qjUc0nSbdHnZQS+y6P1mtGCG2ToFSraWWUrJnZudhFtRVnfdGc3Ye6xzpQjBVds7UVCqQplIYgJgBAAkCeckZkMc0OefFUUlZVdpFC4QgCqDBO60CiFXU0AQk+ICIAoYKs4iAiKKiQDbveWbzC5KCIKIqiAobVamiqqBFkufo0c/TsKgJqEdm4iLFOX/E3528IU8eEUZAU0duTqwxzgameR+PaqpytBTNNp3TBUT85ImYkzZzmdcJhnE84TZwcxoB6UgdOE6+82BwRKjPJxZmswY3J9EQkeaJZLaKPOWH5uAYIIqZzu0nxw8g0VFgUjX5qW3Gnoaq42n/DFiYD6HnWNz8vD1VgR0f2jyGmQECA8x0qtMEdgxomRkTupOTCBTmXxYRkOj4VnzMFZ0iSG5OvaKqaskeKhGkYf8f/v7f/+a3/+E//6u/ktge85CIasLE8yNWNXRYET48bi+69So4VhGqCanKCRYNSkeDDAGSmSLOcsnH0fKEvDkqSSc04skONDuqEH/Kwz2+qMc5057mXDiKbSdnAwCA0ke5xNBmi4+CAbMTnf7k5VV/eBxyXl5eJtHg3fbxITqecn3+/FnsFn/88Yduc3Z1/fnNhwOWolZ/+0+v8zDFEJarJbEr+8Pfffu6jmPj4mFUKRX5tCN6SsfAR6XvJIadlv9/lEA7dpIRHfW/p1UDiHjUxY7GIvvJl6KdpDE8ikInD9AxeGinZ8lOj2V2dB2/qaHO/NaTjvUTZc5O0sARMfNRF5j/TrPwROQ9a2bAqkl1Wl93+we/UyGyGNmgqlWpOFU5JL0fUmBqGq9jJWJz4bHYIPqY6pAThYhkDVLTurOGi+yvX3y6WWyWzWa7nV68eLFZLe4f3l1+/vXq8sLM52KIsVIcpoMgMHnfNmcvrduw1eSoWOAQSQtP2fyqcW07q3BZRKuiC8E7qcUFbwagFZkVae7KrFPSmjk0oOaCq1pBDdF5pCpKwS3PEaEOve23BbH5+pNXlw3f3fd324f/w6+/gn787u1tTen3vz1cX6+/+uXLd394L6iPr3/YL7nr2ufPlp9cdIfdtDuMZapZ8X6QPu8VrWbQZKvYRY/Lzn358mIc6/vb7e2b71J/Ty2sLr+ktJ72zfZ+uyZF3A8f/nH12XnFJTme/TaGR0/DUa45XmofJeTTB+1Jv5xjt0+Rz9llchI5n65bgI8RNfyIy/oYdMPTOpmv1pOUNN/n7LTc8aff8Ohima8CnC0qgA5YnyyL9hRHJQZEpLnZSk3NVE0I5pMwQ6AZUWdAAqBIivNJJZGQEjEdHTNqpkBgIjUzMqCgGSoiOzI0sLlRQU1nj63NzZ6z6K6ipoCOePaDOrM6O3BNRaTCzNgTk5KNKrAiN7MfK0utaobOIaAqKKEamYIhI+usQGlBFTarVRUyGuR+x00nglAnB43zHsgZsalVUUTYbBZd66aapFYTKqknRvO+1sq+Is7NFAJWkARAkNDQ0BCB1VQBAeb49dMNxeZcGKLB/KzO2REsogrIomZggRlxHv38TJBkF+bDSWIOIThHVdS0igggZalgIKiAEJyL3kdHszWIHUTnCIGck2IVMat4A2bQoyCIqmDm1KRKNZBSstZsIlXBqpEpAOFM+TzirgAZGRwqgsyuNkETQ1QwtaIqCKBonh0RgekchITZqEw4i/JaRWuZNyBilcgB08npj8wE5ARJkQKSAiGYADAZoIlVBWUiEyFEnMXoozb7sYnhKBU5x0TsnUu5AFlozgw44IL9WMukRrUIBQAQ76kChND54IvWcUr7w75Gx4Y+REDyPjquBCpWCWEcp6Hvr1FdDAahJnl4uI0Tx27ZtF1KlQqJSBYlQgNs2qWGVDyWwjhZIYu09rJ4fHdbhyHnHMhiCI44DYPV6trOzGLDm7NWCfJDKkW9QdPxyroMKRVuIgdGcN55V9JY04ACwcdqiVl9wz5cELWgg0md+v7q/NI3SyJfsitprA6lnV7+6vPa09ViDW768O51k2PejdCFZbu4WF613dq33Sdf/UwIbt9+W/b7zeU1QtgP78hR7p0NB5ZSDkMb1/lwawPsCR9+7Dv/6rff/Q231qy6xeEsCd33N3/6yz89bB8A3JDLhw8HR6FrzriOwzSB+O1wMLVXzz9JQ+5L/t13D1dD/5/8+kX0fnURN7f25n1S3+77/T98/2gt/Tf/3c/fff821fH55y9M3o9jrRkW67CK8eHd7XAYnPMJcsp1GDOwa7qWieLCl1IxGbMfpNw+bquYVCFwwYembcwklSl4uh9v2OPmbNOFNvjYNh7IQRlROS47hoxWh/F+2SwDL3Muy8bXcTvW/WNJo1QwDN11gFjGaV8nIWDvyyh1GvM4pmEoKVUFIkdsKqogXAW8qtRaSi1TrT44DyamrGCmMh9z/WQzggQIqqCmIirVVHWWUn9yHkiEeSrztg6IS64M4kEJuAmtEWsWIQgex8OBfHPzdnq5DBceb3/8m/Dyy7OzzwXdVA5aR9WplkJMgFots2sVrZQh+g7IiajmgljJcyk29DtyXsAIAzUduyBTjS2X/n7Yb0VLe3buQ/BxEc43pUrKI0zFe6LgY7es02H7/s36+bOmiaCuujCV6prV8y8//7v/5d+Ftnn+5c//i//rf/2v/4f/x4svP1ssm9W66eIX3dvv7m73ax+IZLlsH2/vNhfRIVaoZoVdCJgkFxSFLIuuuzukSeuGfR768+vLYdyWPLSOZBmnYTw7v3rzwx+983zcf0vDba7D2WqV6qTFqqgPLRlUWlBYqrKMQ556sNKuAkoMGG0SXC4FF2MFDNQsrhwauNXw8KOmnV92y7ZD9gauCD27vLq6uBCC7bhbnq8fD7e5ZhVhggqQ6igyqVapotZMee8aSnU/Jlhv1qvVZpyGwdBCO+4P3kmp+wopT3kRNpBUNEWdcPy+RlrwyooauoUHYm+lRwom6Ijd4qzkIfiwDGH3cGgcVU0BTYZDiMFzG8yrErUdANY6YeBQU6qCQMrZgSO0WoZu8TyVlGSAhlPRCqMn6pyv1dVanW9m7GmupfGR/Vmjgzc4jHcoGFx3mG4Xqy/Q2mpD1p3UGuFC5QeyYd0tc04sq9X6ouS+VjFJVasPUUbTCuNUGw6UepLxeBUw03zGbjo7fkUNkUULc2BkFTEAQp7NCnRyDnhyYiamjjwCmgkCEDnVufJMTYnY5ZQdsSGYKgE7181WjsZ3BFghe44AZozFRhUBrVPJYFaLpH0fgltFh747PB5mFy5adZ6mQZMlEGOituE0SZk0DykLYfQPOW8PJTLFNg7DNvRLWoSUtG0oD/Vhez/W+vaffrfd9zLK+TnvD/eKrlttYrfY3h/G3V5M06EN6/UwTu1ySZ7M0f32ZrNoH3a3IYS079PjsLo8R60FOU2HcUqtD1orB0ZANTSgaZhKqU3bTlUetsP1ZQvopE6Pu77vhx9ff9eSrYJD0a+/+OT+/qEIrLqupOJ9ADPTabVa55y0lP4wLLpIHOJq2TRxuVwPWcfp0C66WmQas6o8Pjyyc4ZMIYTYUICWOadU8/S4fdwdJnXozcUQXLtwcfPzn//qk08uN5ebq+srmobDmImYHJUpS60UkL0DIqkCAATsyddakBiJAITmipSZMyIipsRcUzZAZBqHCQ1MwQUvylW1DhOYAdPh/kDEc+KRGJldiI0ZoioTmmGpWqV658BUcmViR0ioolZLZueYqGphZjpZYKSImQAAHI37rHMRiJkBzrokwmyIAkCwKnYaV8zmKJ0y8axNwezcAZjjUQZQ1eL8ixsoqEMCxDJ3JQCYATEeuTBPA+7Rx2EmBoyqBjTnqeeyEtQZY3pCs5xMLkdKjB2PpuEkusA8ds8P+uROQkQ8dpsQzl42O47yRvh0jI4fhym02XRz0jgQj5SfeRyzEwzJ5nQbPRVIz5KSzX6PeXwzINKqR3rUzOicB8FTyokQkI7ajM5cifnBKSDoYtH2VQ3VAzlyIqXfPZwv49X15cNedgf53V//5uWnr7IpBUfkg49Sy5xPV9WcR5Py2dXF+++///STT4Z+V1xaLC/HWkAdMCvOPq8Zej2nqfBJJTnNi6fRcB4oP0oo9jQ92uzzmhNrH1+Ek09mVvSO2tFRwkDT4wh8dMwBAAIRmMxLDkUi6dV69UM/LhfrxWYFZm9+fLP0fHc3AmIFGPvD5ctX51fP9zd39zfvQcrD474gNqFdtU2eHob9g6b60O9v+3GaskkhmpOAP9F5YC6ItpNiCccX8/hEPAmmR18AHMNliEjHrOP89Jzykcdx304zPD6JrvMyUqDTGH9Sj45DHzwpPEfJiHC2Q6BDFrP5LQaPqG+Ejz+QnnTXeQUjzmfhhqdrB4RMwjT1KZlmNz3ouMsk5AGBVEynKRseh/bk3E227a4ygncq1ZLhokEkbNrgQ1sFDOXqehFJF4ur9XrJgIJ4/emnm8vn2wGuPvuLZn1pDkGk7dgMcim+ab11iFQlA3O3Xo27EcQwVddyMZy9SxQaIii5zgvGeUdMAOwQkwjFiDEAE1mp4x5B2PkimZkYyESQGL1DwOg8qJqp9z7G5oBjSiOjXV6uV+tV+PG97B76Q51q/fTF9X7V3fTjuz+8++TqOZfpZ8+vfvdP/3jXF8LH52ebly/Pn+tCs9w+HA6HqQxjiHHZtBaw5HGfQMj/8f32vAt/9pdfqSsf3uze/u1vpuf7T3/5z7vLX72/uT1sD8+uXX78xi6+5POfgRnNLaoqs4MHaaar4dML+aRYPy0Ue1pKdlpOp9WFT0Pt8ZI86Uj2k/X1ZJE7Xbynf1Z40hjhCap2/Jkn7jUAABE65ln4MSQFYGQ1QVIDFVVGBkRVnR+CmTKRGMpRHfaEYGgKyuQIHaIqGDlGh+jIcKZgo4rOnjpA0Fl8AERRYLMqR6VMK8667fEXneHvc3RsLhlUs/l2LGbAs4XWQOsRBWMz5b3O+oKKVcPKTp3zSMhI1XQo0jATsIpUAa7gPYkkgIgmoIUAqpJWARWCKlMuh32ZSu06T0taLrVM4Nz8tBPCsuu6GB4fD27oAiJUqEUmJheiiwGBRESgSqn7x31NVisYoJgRkYIYkGMyNQOb832IqKfE69wrJ3NhAqJ3bMBz6xghzAXzR4cTMQKRg8guxsABzCqaeXLeATKpqlYzYIp4vlysWufIrIBJWUS37GLXuDpVh+zZACqBMrFKPTLcwMxEqoqI1lqnqYyj1kzkDXAumZtpnkRspkiEikxExEBkBEAERmoGiloFQB0yIyM7MGbmuT51vidDLYAqVbQKgs7HQzPWGoFnJRUQiRiYmdiQCFnU0ExKqVVSkTGVIrO2aFUqsTOEKseTMz1dIe7pbl5Fa+kVwDlXRZlCzWOtfRN8keq8KzU770JciR6K9cN+CiE0TTOMmZmlZDJg54qUQM5MragJIIYp1TRNLpXYLUIbz0NT6yEd9mbV+RY8JlEkUxNC8KCK5JsA0U0OU3Ij+VLz+YuX4+P99v5e2KqThjB0TdIy7nfsOt/GxardDWnRhZLhsD14pnXX3j8mR5wnG6oyQy5VpKpMjps2rEqNFKhpSYrVnGrKn33x6vLZdU73YvXt27efXn/66ZfPFxethMm3ur3v//D6bRc9OvMrwsCrtf/8k5eaxu76bP3yVRp7xElzH9jKIXPs/EIorIZcStWa0rifYpd9w8Mh333zmuXMRWgX/vKLszc/fJ+V+krr9cv393dgtWlc62Mlt3vYglPXgUH98Yf3igSkNzfvuhgd826vw1Tfvf79+Sqsr7vrF6FAvR3v1+tFnuo339/U/8/4xcsLgWHf95efrFPRCvnFi2e3P2zjPp6dX+0PD+RICV1sxNQ1LEUOh0HE8phqrWKC5AKxa8B5V6rkNBJZcBi8iyE45yItUVmqy6l6n5er5bJbINGyOZvKoenOmLQfH/tUigYXfdO1j7sxdq2JTVNOaahJLDaAZDKVNJU0EFrJVRXArEolQ1BwxFLFcnWuMCcfgtaa80RMDsmhVwBAUlVBIUVGmhuI7LgVNwNVUDURlVrL09vFJKnBUEtRsGEYzjbrMoyBSUoxSlpqmSZRnUzPVmd9P15dPu8P02XbcNvk/mFwDdYk9aDj4zTsxBwgem7GlIJlJkSSmkeTAOrzmF04khrGfkRnQT2z+eVCBdNUm/X5+sUzCGuoYmqKGpYr4eABNR+0TlbGvL03qxxbAbn/8YfVBTUXr/qciWC33y3bxT/7q3/5H/7637TL1eLycnPx6evv//ir/+TXTcRDSp8+e7HdJieaxvHAxQhExTOdby5sSpIqs8/jNpC2wRmIJxkO+65tYuPT1DehyUMOIXaRdw/7tvXsvCIbQklD2yyYDNmNeUglEXpTRxQQcTykcRQiDB2xVeeojsqE7fIsxnjoHwQoI5U8+hYhNuzBxaDoq6Sry3PVpWHDxQBtKIf9MPXDSE0CqCJjVQkhOt889LdGTkJrzQJimIZHMKKq69UnZqFCEcxqU5Gh2yzTdLN9uOXFYhE6z1zqAFhcw33Zu7TbrM/6YZxqX0zYYh5TGzfrZkPI1dPjNADUXMk3kWI4HEYlXwUA1yG0adj7tuF2Pezvp2HrGr/axPGxJ20X3QUKlwxtu6l5lLoX2TZ8huyi61p/OU7bgP7Q37XLs1WUu8ebZNydX4XgQT03zlNwyWqp14sXON/8p8PZxauSfRO7YmORqZRdoMtu9SdDGvb9bWwLsOSUcHRB2251IaRah8dx6pb++F4gM1LXAIDZFakzfo6dg2NfoM7DpENXpdgcfz7mRxTAQAWIVU9FUYAKMm+8qhl7B4aBfZGTn1zmygVFZFCw2aAOgi6EppXY5HHyIdRJVClPdUgjsXIInrQAHA5jrYZIuehYAbQ6R+wcRZz2U+onGUuPPIh7PPQXamJ4e3OzUBe7s/uHh7u3N4+7u2ySjHKtHt3r95ORgHKq/vqZjGlQa/zCDylDPyjhfpq42rJb7u63YDjWPKTpzdsfx9L3JQpdvb+/2VyesY/jVNhqTQHQ+jExUZL66fNXb96/SYLL1bptGkLYDukx6Y/fvjnsD1+/2HTRty2+uduSKqN/2PeX6w4MdsNAgOOwBwNmWq5jGxugpmnW7LECO8/LiwvH8Hhze3d72yzCar1edIuci2/bPNWadTr0w+FQplKzHPYTMT7bdM8uLnfiPvniq3Tof/eP2z/501+9u//jrz/7JHZNnQYydWyEbGbjNIEBKBBiyRlM1Uy1xBgRLOeSckE0qSpFKrABpCEdxgmZTcGzFzOjWkoZxqlbNLnWh8d9E8N600TWOiREBEYgroZd8Gdd0y6UaBY+tJYsJtGHWoQhoEHwYeYlBR9ySYioYoRsoEY8M7DNwDFNeWTntSo5Zp7zL2gzHtTMVMl/HA/w6PIQA06lmtrsylHRGS5UtSIQE4gYwlz4eEJiHEvM7LSdBwBUMD1tJ+mYfMJjbGb+WgCaVaJ5ApoDOLOjB0+CAx6pTHq0MJ0sF6dp6Sh9GCoecx6z6QVP5+ZPko0drf12MsacBvk5qW1GhKcU1uknPGlJR2QSwKnQiBCR0YjAkD0RQRUttaqaC24+8Z6PiksVUJ0xqchoIsQEgCbmHK67xc+//OVvf//3ZczkOTg3DIdAVD88uCb+6S8+u795cEE3L85v74daRW3u99WmbZ33AbG/+5Bv310twsVZ8/M/+fx3v/2nOh5AUgUvvErViJ2AeERTZSQ76iQIcJRv7Cf2mWPScB5Bj+aXk/ZzStk9mRQ+vhZwcjU/TaHw09nVTmaZWd8wU2DGkoYX52clTQ35xscg0+//6Rsf2TULYnv28vlqubh57Fnpu9/+dn/7gf3cdwQX63OvJfeP4+Hu2bPr25v7++3WhzjW4vgk9xwBvfD0Uj89vNMimSfqj5a6+QtPHg97+gUMEfTo8jmlghAQ7cQk+qi9PeHe7SO5/FhF9fHDesTE0FG9IgQ+tvwcn0M6vsucfFlwqpc7+ZIQCY+N1JWI1EAUTaRmzUN1YRnXNt78XnIRLdvDXoDBKBcfg1s3vgluV8dC2pMlMGGqRdmwJVq0gbxX8O/ePTatC6qR8HK9kWyNd2eLMwgBI7x4+Zx8q1BrFiYCs5TH0GyIUZFciFhYQWVKOU2OXGyimnHw3EaKC2QsdWImNC4IpSagyIEFKgcGdsQsNaukWhIR1wzonAtNyRNiZWaVWk24DVABBvKxI2e1GB7C44ebScv6bHn97JKdg/q4/cM7WrTPXr56+MP3abT3uz2nVIb0pz//6u7wsOvz3X0/DhVNFwHPz5eLaFhxTLBsQpJqgCqsig/78jDmH/tvlg1fni2urrph9+b1b+XZL//z1auflbv7h92e/ePw4Xersy8q0Fw3M79gRxn1KKDPouXpxvQk4dhTbvGjsHi6Ap/8cQD0UTTC48rGpyUOp+X+UwXp9NenVX9UhI953J9cyYRks5RtJlqPP910rrQHdLMtkYgNZ+SOIZKYApKpMuFMz3HsDQDRcG4aQ2LyROYYCd3s9SRDUeFjExnOCrupVAUzI0V2zEcO6+xrBbOiADy/m6gSsh4jW1UIwJCJzGh+owYzlWqiWvUUrq6qUkvR2GGMgCTVslRQi4yKwEWriVpBQsNMVh2LzCwMtKpCWYgNEY19yX3ri3PzAUWUggbA7NquOb9Yv9nud/0QvXcOVSyljOMBQExNwKaUxinvtodaqojV+R7KlUDd3NqABgBiplIBjNABgp5OQcwUmQGQ2YHNXmdgmuc+dOwAmVxEJABtQojBMToimvLknMPZ9AsmaETYeLcM3AUnqt6BW/Cy84smOs+mwECkQoAAqugUUKyS94iKogDV1LRKTaVMyWpVTaagIgpI7I4L24wInUMkIHZzpsyYmEkRENgT8mxcm0F/QGCgMxUJSdRMK2I1BbOqVhGVieEo4SsgIjsmVEBgI1RPvioAoaCZqBnlrFOqU64iKlURqZRKRM45BTVTOMUwj1JRKeL97IUzrKWUYpRa54kQPDKREqvkWsuQDmoTU0O+odAJZI6IMxRAClRAE26c8863QSBv93l9HYZD75qDgkMkZB+bFXKsVYZh37Rd2zEiT2MRsVKSaZ3bFjx57pzzbhxQnG/jC+Zm+3jzuHtctk3XBO9AUUEeoXYh+EXrd4ccGmrNU0tXzvc73mbygdFQVBEtukbJM7QyQB5LINcfpuCgbVqOZ2C6u3snWNZXF3/+l798/uLTdz/+9v3tdhFLf1Nk5CWzYzrkYb9LrYuu4MPb96+++uT6Zz8/W3h4ePfw/TsfGbXUNLZnl+ZaF3d5HLGMMiVEGNNjXC0P44dm+YpLhEzPz1++++b1y+vNd398vViuZUrg2qmkcVCpQ3t+1rZheeGSPvCFme92u2zEeRwXwSjyy6tf/P3v3hDEwfThzcPijP7iLz57f7NvYjeMnAXefxjLtP/Vn756fmb3Nx+axRrGunt/05CPkasOy2UoZp4Ia8VqOc1VerWoAIMqONc45xGEEUXFN3a2bBxxrQZI3jcgVqfMvmlDcC40bbfsujFvV3GT86iKKU1Veg7x1curNPYp1Tfv34bYGrpSaykawmK57sjx4+O7/f42DVup+dDvc8lVVcTIOTVUVamCJFZrLUVjqKVIKeqCiHiHCgpaVZHZwREHBqpidNwmI4HVGaF2BIAd99wAahmZc+2l1hhoGLYB2ARQedr3jnzjAqiRQtodPPPDw02X6rsRfvnVJ8FqrQesNXifRocUQSylVAEdXcOE/XDnm65YwoAqzvmm1kdCFUFwUZo2XL0Ui3zx+fkmKpQ+Z44R2TQPhKUMh2n3A/qGmsa3C4vO4ia0i2kcQTLrhGRleoCdtmfXgf1+PAyIy+76V3/5n71+/W04v/qz/+q//1//v//Dr8yaZXvY3izjZhUW6PyUsrW+aRvJQzJZteHD/dvoF0e/J1qVjFYjQZ4OI2YfOzWoxUi1pMmxkEvIKTZYKkQKRuqoMVMwHMZBpC7atpobxhHRGAQdg6PWxTZGQioVixG6OpbsumUb/DRkAnGIAQvWfdP5QQMS6+SlHIZ8N5l/nMrj0Lu2Wa4uhqn4uPKuEk5gkgt63AjWsHimZZI0ERJws+5WFD+pZZ+mByBZdL7f3ZTYxIAh+qJJvF+vLvGg+0O/PGuCj4J2P22TFWp0v7sj2+eocdVqVc05l9yG6IisKpGy92CcBTGsYtxg0cUm1JxKf1vGnQtcte6Hauasolb11BqgAQGPmgoq5qEncli5DGY5oHfL5XNHHsTWcc0VyrBdLJb9tG/CBhnGaVh1X19tvuqH+356523p7IxjE5yhTBjiOIVnV3+l1vTjt2qj6JjrqLVAveCwVHKjPCpkx8guzFcBEymi1EpEJmU2XMixW+SY80BkAVUqyEhIM0GLntiiMGMFGADm5jvH5FzIdTqVeVvWPF+PUgYmLyamCV03n3HnnInZe18yBd9ojtFBQammVi2JaNJUS2ycC21cOZomZ9UbTvupTyNbl1NumJ69vHp78w4hHnYT+YYY90OqRXyksE6gmQrePTxM+aAM/VQJahsXNWf0uF4uqlSOWAPud49fXX9Wc+WOm7bp+8kzBHL9cFhuGkXYD5OIIvHVs2eP294jewn9tFWrzy6ev7vZvX7/br1oP/3sVRmnP7x53z/su9VysViWotPd1iKHC/f27vW68VfnHTgY+4OVfHF2djjsV5vFKCkdplzNOcSUgbjrWkNtmo3z637qp4fDcrFou3Dzw/2uf+jixXp1uVgtmmXY3j0YUN9LP/Q1Q9P46Nk8pcg+8DTmtB2GQaYE+7v+fHN2/dnnzxYbz7B9szNS9MUIg8OSim8aZio5x+C9cwBWpc4HWf1hP6apn8qURA0RcbloU9W7d49TTlkhtk2epjzVtolTSYZaCerDPo9TNRVT9yMEZ4u2WS46YyDnYuD+Yfjhh5tFFxZtt1i0bYzRex/CUUWBeRMPjDh7Rf0x7meA6MjPvlLVimYVMQZngOSczamv4wAtiIhE7J2S/kQgMEM0xCLK8GQsIkGdO7zEQExNZyxFKJKJ3XwOR3PrJj1RVWddZoZm4NFAjwZoAoZmclRnZqCvHXWjp0PvJzcLwDyNzDmF+V/p9ClPQTM4zhDzKDTvNU/IpJN3A3860p+UjtP8cWItnxwxczbupJCcLCanLnoDNDABcERVzQx85FI0NAEyVBEAKFXYz7wP9DGoVFPQWb2AI+rMByLE/bAL29c//+qL+3cf+setI+eb1hRVKpaaD28v1qt087b17rK93tUBtQbnASlPKaWigb/+xRd/+zd/x9z+hx9v/tnVJ1effvb6979z4IbtIy+l7VZFBQlE1SEbHO1MT0/5USCBo7PyyWGkx7mUjimupznzGGfBY/zSnhQjO2JOj0AUOMa1TIHo2MmmRqSzVacf9s/W609fPLv/8B4LtqswjAcUgWz3N/fr1br18ebDfSp16Pc5j5uzDsnvdw8XF2skmXZbtvH5xXoY8++//dGItg/7EBiApAocuScz5Qeffr/58R8HbjKEjx+GjzLliaxuR3Drk0DzU03TAGCun6Mn5RNOkttp2p/dV6Z4skUcc4kni8RxfEdkoNNDPa0ywhOuHGez23wdHUUpmysThRDBCAwRClBm229st7978+H93w5v/7GMt1ozkq/QMDutSU2rZLD8ckmrQKypNzK2wrqI7tnl4vXDoYzZheAW8eJivWjjso3EYbO5OFteV2vOL8/Xz9ZMYprMAGr1XYdIy9WZmkcQBJW0B8MyHqK3brUQMdc4k0kZfTg3ZFCBUohYStKSpBYfW0QstVDTke9UAWoGqGBWSzURZC9Ggr7pzljrfnongLFdAVbftKbJphKbZkE4DYfpAHePg5qk3WGxpD//88/++rc/fnh8/y//6tfv/3h7u3voni33H3b/+PrHT19c/Keff/Z4//DYl8UifPhw//7bm03D69ZX0/v9ljxsls16uQCzuFynIQ/7rSR99/394f7h00+ewfhufPv3F6s2XF/lQ+twtOmBy1bdNc62Q2R7AtyflsqT++cntyN4WmcnSQdOmKrT+py/4smL9KTc/sRR9NSz+B8bl04muKf1d5SV9ChbfRRMZ2ul6JxXQKqaHZKccpxqMpehiakCMDEaekdmqEAO0UwAj2QwBSNwhmimojKTspkR1Qg9mKkaMxwDeuRwrq7UWZZWrZXZEbuZW6QAdIqW6fFaNpy5yrUoFnQeGOH4KVW1aE2mpoAiSgBkpiIAJlQBGblx4CbRSubQE6AZ1Dr3vgFhBVREQQRwhsaSFcxqzsgkNpHlaXCuidR0gFQqgKICBOfP1+uW37zfPlgteb0IkQIZEuRSpJYiMk5jrbWokKf5oIV4pjs6ADA0JAYAOXoZ0RGpHRPVZMoM2YpjzlIRcC4BNTjWm3EIhMSEAAREzqEhiFQjnmOuWSsBIoAgNG1YLDrvCM1UAECb6BeLGL1XAM+BCcmoDZ6tGBJ6RHOADqBiNbVcc60pS85Sa8255qICWqvMTX/4seQBUb13QMAEzgfyXpnYeXBO2SEwEDMxkVNQBpRaFInBMZojPNL6rM73ST3uB4zAkJwRGhKQQ3KEpEhMaKpABMSp5n6Yci42N34iqNr8RkaEVupHNsvHAJp3xA4N1JQZImEpCZGia0ueqiT2EdGcC2iIxpG7YTpUHQwmNK+Wgvfog1QK0XnnVQc1n4qNo9Xsc64qFSURu9wnmZwPzplwJNVp6BWREX30rgqAC7XkuWduLg2NocmWFXFxsXYt4K3VPBwOhyZwE7ye+mNDcE3wqeam872ky+eLL9zlm9ssNe4fDkNJ7NVxCCF6jBaidJDrlAsw+fOzjarknBar5Z/8i79YtIvH715/+83/7kwursLmfPPD9/ddu2wRf3z/3fJ6dbaKlCWQNK37/Osv9+P29v6Ad+80jYuzRjVpLfagwB6IS8mr5YIQ8zRUTh8+7AXlxRdd8dfX1//pD795R3/rh3z3yWetC1iyO7s4+/Bwn4ZRIO93t6p53zM6BJ4rh/Uw9ctVk4YJhvL719+szs52D0N/P/q2ef8mb2/fROfWa20W8fxy8eLZs/u3/e//5vX5uXv22VnXrrCOUrVbNZuL1W77uOgCO3r/4/u79+888WLdpZxn75nzviYpVcach2F0gOy9FNg9ZkIMbYNIQzqsl0tuuFt1i27hQtMtloq63lw7dLlqXC4ECuuK2W/307TvDdCFJTV+rBW8X202TejGx+327ofdw7tpmEqe8jSYiapKhae9toEKFEMgQhVXcgohlFq41qigqkhEODfdKIM77fAIAMH0p7VntdYnkWj+r8j0eH8Q0cYHyXW92kyH8bDdnS0vInXR+5xT42nox5QmAwvL5fXZ2VVo8oTdatP4MG0fSz8qKKIwA4OZqaqwOcetGQZHaCJlqKBIRYtKEY4XITYyDd2Sy+HHx4MSEzdhGG9KOoDK48370EQoGdHiqiviMTZxeda0C24uRKFpA9Zp7PeyT41ncc169enu/jCoi83q+vLTm9/94/PPfvHPfvWfjfscnO9W5/cf9l2MvU6LVXfY79m4DV3uH0PjXQhaZS5ezrkQkUpdtrxPBTCYaUlJkbRCaJuUB3PmXGNATCilRPJkUopKtmrVEaTUm5Chi21kBnQKnsdpmoYhUGiadrOMNQ9GFhcB1AIFt1wCDnl48KGdkHNCappFdz6WO5DSRNcgXTQXYwVT26wX2WTlNkO2qY6ESgDZKtqIMHfRAVPk0FRO47Af8o4941DU0jI23vnBECSN4/tFiLWU2EZ0vqZ8yPvVes2l9NNtrXuyxi8uq2M1DQGmPDbhXJKVeqgyOaitc3Fx7pu1VenTIXZY6lDVcQiPh9tnz/7EQEAqg2E1jlxR+roPLpKnaF0tCXxFhWW7KuoHPbjgyRidq9tdZNe0S8Lg4qZk65p1WC5X8YxL1nxYNm0N4bAb1uvOLKd+j407X31lkm4f/g5oCiufh0cm9ogBWkKY0k0/7RkgLher9fq0LTIzc87N467DY593lerZHV0YCAAoakzHAIWBqqGhEbGaKQCzQzCUOifqTQoBErGATjUzOQI0rYAAZAxk1ohp0clRE0NTpIqQZnU+xLbL48HAKkxo09mm7bc9qIloGoZFDISmUsjRqotkMGUjpT5lsUf2fpwqq/W7XV/qetkSIaEO/bhaXi7X68ftVmwCTw7xsNuOnNfrhRmA8XK5eLy9t1qChyw1OJenujw/29s0jhMjLpomDeNYKqJNaXIUhsOwuVhrojRMeTiEbvXjD+9/+4fvLi7P1uslmL3/8O787Hp9dmGs7bIBKQrIgv/0v/z1hvFnX34y7u8BRabUBLfrd44JzO2noSRhOranmpHnGAL1u8dcbqiJbePQpjJNhP767OV6c12q5CrlsXe+GcZDv+sX63UhqLUYltvt9vbmcdinrDiMqWtstVqPacx3+f3D/d/8b/+eo2/joh/Tv/wv/3J9scoOm9Yb1pQqMBzGCQcex4yMTRsO+z6nOo1Dt9og+8O+R4XD3W7b11QgNmHK6TAMVQQUSk1iVaC6GDVLlVwMxjF7hET22A/ubptrJYR1F9roF+2iVnx3t5O7g4pppeWm/fST68vNQmomg1xS0wYEAwIidkaAUOdDVBVTcexnJIznCEZmqZiKAhGCztsnqipE+LFKGQEMRbUYeCIgckSlZiQ3G3Hm9MQRoQkAx8410Fk/guMh5ayu6snUc8KoAgDaXDBzbEfDY6jJnkJn85DzcfxRm4Gb87j9NEKD6bEDBU/wDTupFU8Zs9P/ARrOM/ZcHw1H8fcodswuEXuCU59OM+dxno4Go6cSq6M15pi0AjM155xrGyT2jUnJZhacExFzvNwsVWGakqrNNRWMgGxMrKKOsYmeEL3nhzfvr74OX1wsdlz3Q8qKSPT59TXoUEcbx75tw/tvv6VLvf78k93tByauKpFCP00P46FCefXl58M+/8M/fL84f/Hzn336XNLu5n2bD7u7H9b8eXWhugDHqlScHWGICCdF7PSszW6HeYZ8ilTNiY+nVQLHIfMotMHxGTqOqHAUzj9qLQgzqkk1zL2QVi0XLiPn8a/+7L96fPfNsnEWvAv44Y/f9oedojMXMWC5eyylLs/WLnAZXMn55t3ri82q9A/TmBex2WzON5vFN9/9+NUvPvndD+93h2wCxOAcz1U8CKiATHQcPGfM1Mnec3SbnRwZpwdMp6UwH1YfnwA4sR7nJXhsOvup0HT6t6NUepQ15+JtUNPZUHf8XD35lj5qPwBGNH+E9CQVHFU7QPqJi0sNgMhUZYYUGSCYOCuYd1be3N7/m7d//BvZ79e+bdpYKwXvqihoik4DVlEFQAc+ODhf+GlXarYFY+fYoSvZskFY+q6JnoiQL19erddn3txhquur9tmry7jAwzaZWrtcBh+rVgEFRypVVX2ITCHnHBYryTvfrmyaRJV9CG1n7A3BATG7NI2IhJ4BPZChI+AOYisiMk1Wp+C9VFU133qDaiiirGrDcI/OBd8RhWoV2bMLGScgh1xXmwUhbLc9KZdBxlKDs/PY3PbpH/7hj//8Z1+so9zdfvjVz56/v9n+h+/udn3+/NVmWTKn4c9+/uXDw/79u9sqACBEzqHWkmsC76nrVi8vnnv4fLvf3t9/UJvu7x6Wrev/8HfUhs3X/6e43lClrGnc3TXNJZxaHQEAafaWPd1Hjq/wCZz107356S52knZO2s7Jm3S82f307v1RCJ19cE/pR3i6kJ8u36fPPt5t7Sc9fyehSmFWlgGJCNBMZjfg0SB1rA9wzGjAePz+sxLNSPMJAM0pUBWoJoBVvKITUAYl0tkcNM8MyOzYiQAhH8PrR1aGEtqJbTz7OmX2EB1/VTStAiImFRANxcDN4w+oaD1OPypOFUEUzEDFAKQWdh4Z5hiaEhWEQGAgYjZLHArI3hF7IyATNsVSTayUmSTPRDj1fbdaWs0mMPSFsAHy3HbLs9X15ebD7u3dw+N2mparbtOy1jq/K5aSTQGQvANiAhM8Yu9gZnJnMRAlOpVyAsqcvANkQkQylRAizhWcCITo2RsgEwcXAJCJ57dRh0zMM2SPAb3zSAAC877XI3YOO29N9ARA1Txqw24RQ9M0uWqxEr0LnmNDlkc8EamciwC5TklM1WxKdUolTaUMKY1JxETEsDIDoHnn0bCIEkLOFRAcA3NxzjnPzpGPjXmPLioqOTLIKCgznIs96yyEgamJiqkCmpkYzsWx8xr96BCl451WVWzeexQpU5rGaZhSAlCYo7tgjt28JohJVJ46YY9SERHVVADVOQdWCanxKzNfVMw4+qUBMPuuPctlIKNSkUMEE8SWyFcVEnNATB61gjBATWl0TRgneHyculabZtc4Kgq1qG86v7gGJkBMVVSKllpqIkxzeNUMnItmRJYRwdgBWi3F0MKiu+QX/X67u7+3pGbVLLuqDhfrhRtHX0SnrGUqoICMpSYHftXFF1dXqzXv9ofhkKbx3rdLbiBSsz67Ks4edx+a6P4v//f/NsvD7eH9H3+7zzfb68vzwExTGh4yVtluP9wchmIG94OMvcO6XD9/9vmr4M32H3wuMYabu9ssuyn3behiLu3ZWZl6YJ/HnimY5+CXxBn0sPtw2z3r+vr66//iFfl/+T/+v/6HZz5evDx/fKi7u3uCcbkhgBYR+52q6H47ugBtEzzbL372ougYw2Z/yA+325L33pHE0Lbteul2h32q+P520vfj6n2/2gxnZ5vofe6n/jbVPF1dXwLjnHC9en4Zoyfmdnn+/JNPp7Efh/725n3wXgFErVmHMtVu2ZxvlnlMu8NkwDkXM6y5OjIO7Dk0i4VfLpTYkGpF54IpT7UqItXsAi8Wi1xSv9u1cZ2SDbUwh7Mzf3kZJE1vv/vt9t1j34/oNHrO08xet1qzGuOMxEdAdghKoKa1lAkdhdzkXJwrUmul4gDAeZu1ezAwIeP5fUj1ZIuYT0vn04mPR7ew3289Y/Rh6AsyS/HBdLG4vlpfP94nFAroF861y4ixAoAH2W8f1mcL4Yw1GHisIrlKlloqmIHWlCqHqSJ7H6a8R0RCZ8YG4lxEIJNigFIJTQd5WD1/oRQ1ZSuQRDGeucVy2b3IUsp+GB8f4HZqWm0Z9h/eJgTuNr67GILvll1cXBI6G0ekR5F01l4USGq+684hV3n4cHmxefv25mpz3S3WHvDV88u/+/3fbVbn2Q6Wsg/LVNH5EIiG7XcX55+A52GqZOobh+AHqGk8LDtnYIoeYkymoe1K/9gPAykD4lSnGFtHXKpW4FSdeI2KhAJmOsPrJlHfVMvsELtAgkISAwP5x/cfmqZ12GDrEcZyuIsX18uzaw7dOI33N3/MtUKzlORIEIUvzzYTVaBa01SkGDJwUMvFctUEdU+iE8SsLvoul5SmLai0MTKrqGWBkkrEdrN5OabDcHg3TVtiTiVjaWthcswkWXoZ+9ZHUHDkYTqMuUDwPYwAZ95BnRKjaxybYc3FxqlpYtO2Ux0QYNGGBP4cLnWawAMzo9ToA6CpFgRsMCKlRLkKosZFc7YIq4f9Y/AVakqTdPhstbySQmeLy/vtmwAtOBRJjiPitGjbh35U5iHl882zSK5an9k4RNSw2/2OqC9a8mjexRgXBNqETmxAGxiydw0pHB5381UgJz6EqjE7QlQVBfPsjvxax3M+PJcCKvVI2yVVOfm/TVU8RWavVRBpZkiZoagQUXANAokWYj+TbGfOiBkyRVAi50DNOe9iMz3cQxWsSi4smpUYTeMBoGpNVVpCEE3OhVpKGRMin7Vtj/XxMA3FDmlsu+Cj82kqKfVZK0DrXacYu7S/ufMCaTrESI4pxHa3H5oYANBy2d3eNIsOxdbL9dsfP4z9ENCcabFl1kSGANx2GyJ4fHwPjFK1DeSY61hDDH/84zcvX30G3eLf/G//+sXq6uxiM5Ux7+tnrz7dD3tlf3F25sDubh/IN2W/23777tOrzTT2xNYgtmeru4cHcNY5t3243aciRaODWnXJwZj7NJYKjhhIXViIyfDwuFhtui42bbMd92ZmyiZl3z8MqSzb1VRKmmQYD6gZ0dourJcy5bI9jLe7+v5ht4hxsWxQnO9Wl5tr785+/s//5NWLzZt3b/s0EjGh6/djFnHBG2LTRmYqua85i6Lj8DgOtVRRDM6XXAbBvp/s8dGsCoiIoqGJAohvOd28jezbGM/Wq8tPXqXDJAquW465TlOaxj7ncb/bW72Li7bp2vPzzTJ4K06rfvvNj98hXF2uz5eLs02H5BzZTN82IOcCocz5KVA8QrWQBWoVdezUEAw8s6FIlZkt6sH/pMfqOO3OVYAiJVBwTGrm5pJNNTbzzCriiLKoASqawIxXwDlUo2rseIb7mM39zh9PyAkITec9H57MKYAgqnxKOD31vsORYgFq8NTO8zQf2dPoNL/HzbqQ2exkITx6NPSkWeicorCjAPTTmrZ5IJ/fJ/UkXsFPStwIUOZ8EIKBEQAwqarUimAl4dXVFTLllPYqaBKCB6B1F5zzpgtRS2PKqVSppQiiBe/ZexWN3jXBr2ODaa+Inz9b/Phj3hc7iP5w8+Hnr65TlaqyXlO+K/v3r2njX16efdjvDXDVxZfh+e3d7dvv3+bLzS9++Vne777/zd9crZr1+eWPP74l71eb5d3r324++YooiqPZWqZHqQw+OhqeYiyGBoonc8zHp+goAs1j6qmHy05pl1OqxgAB5IQ/Qpv9a4CBUVFjIElpvQiPZZuHD//ZX/wf5eEDlsH7+OHt9xA4YN5sYnd2JRxW3abWcbm+7B/3+7vb3c1djM0XF9d9OtRhXPju+dnFzfbDh8f7XEGAQPDVs+vgwvu7G0DN9Wirnh/AXCHkmGfNERFUjQjmnib6j+0XMKszx6kbnkQgVVMwRDqtXpvVtiNA+Il8fpKUEEBhdv0cZU062iBmQxc9MVTRjvximhufTiKq2tHbNcNdAJ+e7eOrgEhzWJPAWKoOH37/t/9P1PH85b8IryI/vmX93ePtD1pEaylljC7EyEQhlxKIHx73BZx33MYQHV9enP344X5O1kFl4rJaLNarxWJ9HZs15ro8P/v86y8RpofbLbv2/HKdcy1Fp1IotgRIjhw3xBEJmR2asWts2jpWJofERKymqNn5mJMhIwEbNTPDuBTRbkXIWAdJe0eu5kwmSMzkRXFMtVlekkxm0IQzIBTQUnrPTE1XSopmeScIzntGsMMwdJullcmTfvnJ5ePv3//hh0NOf/izr1/8bPPZzZubF69eNefyzR9em+rXL9ZWpvub99efXMcF/fD6Awk0i+Wqa7UcBJHE+odHC0O7Orv+9Vcb+ZP719vh4UP7DFY03vzh71y4WL38C7+50Lyd+pumfEa0mm+CNHfknZydR8HlKRT5dNOZr7jjRv1JPzoxSE/X7NOdcxa7j814p1Tb8UYGx4QuHq/U487/yZ5pR/DWHI/8j5Z/rUJEiKRWj0FeRENEZNW58rLgzJRBAlNEFC1qdXbmIx4HbiIGm4v7SIyLYikKzETHQngxI3SIbj5pCMQigojg2ECZHDIB8Zw/mp86AjKogFClGgBoRQUQBTUAVS3gxCjY/EFDEDMxEUU76kSEs4e2mAqogmNPXkCIwDlPSiJSqrg5iceETAYWiNCKohWwIiTZwCqBG0cZxxrbSWTI26K0bBYN+7BcrT95fv32/cP3u8cpS3+Ytg7YETEBmWllBiYCNWKwmVOtIKKKhMQ669aIBsZAx7MYMCRk5FnFUkBCMkJiDwhAxMQGKAAMqKree0YERBE1IGKWKk0gIjQDdsEjtE2IAc4ieCsqhIpdEzebrm2i92xmLvi2iSG6EF1lRNWck6PILoKQwUGqVtEp5SnLMOQ0pjSUqqqqzmEkaIJ35FWB0EyKygyfolnOEBVQQKssgk6cC2ZVCysiEaNjh3PZhonNCcJKoGoCSIqGCA69ERGhYydmAKJajjdeIABSzSmnkkbThFprqWYqIse7tKpjRHZiHxXTUwNalafLMLgupVG1EIdSZM6SlTJiFUkJTUVMahYsyy6UOjFHRFi259P02MTYeKg6LpaNIXHwk8rNY7+54H6c3HZPhKXUoT8Y+fbsWdstCDR2WnMexkm1pFynsa9VCdGqes8I1QyB0LetTSRS0NHqao0Yh+1tkhQdpzRBaICxia7xWRQc+8NuT0iNw+3DLgv2tWaLTaQ/+fplt8Bq9eZmO2yF6m63vf/lr7/487/8Z999+5vb9x+ib/7k1ef+02cA9Hj3+LDr+/djreV8vQzcjKWws81yvVp3v/7zXzcx/u7f/8/Pnm9qllpMSFTy0I+aLOUkzGgpNH5MFdGz6wzcavmSaS9Sp4d3Iup92Xy9+i/+b//l+3/4nyU/EuTrq+b2Nv7ww4PjDhCW576Wgl1g5/r9QTR/+8ebtvPLTUCEz7++GIayf6y7BPvD0GjTtDFEQAhN6z0YO0fR2jP38uLlYfcYXCBEdsw8twvFPCUxaZaL0MVLfl5y/uSzz6fD3Yf3N7thTCkRUht9nnJ7tlyfbVKy4ZBTrt4xilxeL9tlNCvDtLN2hRyqmSMcFdvFsum61XIBtfQPN4/3H1RNYwNh9ezTl56NIN9//+Ptu7fTYULitgsp9eN0GMd+SnXoJ1BgZACDmWBiBmpWFbAaM6qWkqWKqolUb97A1EQEiMxgPryawaHHnhXTj858s2M1+PyfqkJwWWuzWjD4lAYWi+SG/eGzZ58HYHSotS8ldUtPpo2jVxdnQSoqAPicRiOMTauiZsm0iJjjWCWZWYVixNM0ONeKGDKqqmMGDoaR2qvNi58b9W7dUiTXLLk5M2TwruTaMrELzgfMBzvc/vAP/454Wm/Ohu29yeTqbqpcpmGxOQfvGASgkptUH8CgqvDibHGxTru7Uh6Wq/W2N3KBwpJleLa6oLH4UlRFc2KEaexj9NOhShnMIHgbamJBMuh8e3i8W8XYdNE5Uu9rrqZy3m00ZawQgoPQqWmVkkuv2Cm4Wt3aX6fyhj2O46QIuRwwrEL0HlYhXC3X10Wy7EuCseka9iFNaeE2Uiu6RlWyJIjLwFyqRwvanOfBnr/cKBy2w93D3f0kB9/EQx7BxwTlkB8b30VaVSCliujiovVEeRyq5CYwZjZVF9ZJp91Uq+yD75qwoqXsDrccfbu61rlumvI0DKpVgcnManZFIyuxHaY9ez+k1PrGN4tAXFUUUSCsl5fB2eEgaIHcmuNC+kFF5z7OpmvrWKomduTYDEhFtCRFyVqLcrtudtMWnZlW13gtVgGdb5BdEmqayyrquvDh7jskqlzLtIOmNVBP5gjG8T7ZMAp6C1aGNO4WF66MebFYStYxQwytwrSfHrqWu+AXy+fDwwM1pwAaoyI64nlsrqJzyp2Z1UxLdd7NhFaaZSEwNQvOI6CYOmZRJGQwk1qerjVCNjYT8S6AiqqYZuBgYCJCAFUqETlkZCxlMEQTLqlgiI6UTHaHMQTnYpeyCo4uMmPcPryfKC0XZ8G7rosP2301ALJu0Yw63e/yJBNp8c41MWCZplymYaqxxNXoOW7HPUVfylRBHccYgvdEYOCYjKc0bvcGTMvNugo4duwo5SHE4AH3DwNo8T5ard55F/36fFMNYZpSSWdnF6zyv/+7f/t8vb5+fjFMQ9O1i7jox5IKLtv4ePPo2a2WF6Wh/+l//h/Pr9fLi2bY7jaLiGT3w84tPIOB6pCziJFp51szyRW0iqECAgTzjS85eYSLzcVUtB9Slgeg6CKTC0i2XGzO1sE7p1YPlLvFVRoex/2hYXrYbd/vSymQxYhYidbnZ2erzSGXlPuHD9skDz/+Lhz6nIwNuG2WpUy1Ejva76ZuHcdh7LrF+eWzu/0jE62Cc4586PbDfrvdofNWstRpsVwG70V1GkdDRRPWcLF6frE+jw19eP/2u/sfRK2KhXU2ESYNzjlqPLFILib7/X7oDw0gIZ9dnHXdqqpMWd7dD6/f3L943p2vuxC5bRoRMQRmNilEAOyrKpr4xs/iiCNftQQm1VKLEPM0jT5EA1TRJ8H0yPKl+c2HAFHURGamr4mZIRSp89sSIFZVmGnQgHhEYAPRk9+EZguHznwcVXZz7THgsdH96M5Qg2PY7WTvMDxakAAA9Ih14Zk0pIont8ipy9PM4MjzPB27z9aW0xudnfIVc5XzkdMxO01+Gn2bWUrzX55IMQQo9tROfvruYgSIjAamadrffvBtLGqL6FKGVHXROOmH2IQQvDpaLiOtW2RQs9AErZWYlzFoHqyIlWJFkshNGtuGY+dqX4rq7fbw6YtnQ7893O6CuZCnN7/995tnL9effDkp3d/fL7vm6tnFJ58///u//evb9+tnr54P0/RP/+Hv/+q//T+vX3z18MffePCrzcLGHUNTmwDEojof9Z+A33CyGZyeJQM40XZOoCY6ftI81x5jW0YzyvqoGh2VuOMLggBqOJcfVSkFVusGa0bLue91vP/87Px5u/zhu28kb5FxeLgVheXZZr1ZNm23HdJw85ZQ39++qVOSnDvGzuPdh9e+jc9enL+8fv762+9Qcxny+bPn20P6818/d8F/8+3v6rtkiEjUBKemTGxmLkYwOBK4bAaNGznSXPjj8H4ycuARsQsnGNZTVFJhziYrgKHNiiQQzq3SYDMVEmzuvZsXz1wmN393nem9R3lI6acK1Qk2DwQ4N3I+jegAhlANUA2RmGCOZgDM9ggDZLQM07sfv/1Xz67Xq+VXP95av3uA/n572FHrrXAtxflmFIOCgW2qMAxTyqRMF6uzxyknwrGiuphLvbi8GA67xTI+f7Z6+fxl13VJymK9fPHypUPYP+4pOO/blDXnDGbgYlycAToXo3OsVUUKhQDmau3NQEQ4tKUKmqlWR7GUUquE2I79aAIhBkMUBTRnebS01zxhs8o5+9CIQBUAYt82THXY3bOL6lzNyQXnnINakGlG0UfnC3sN7XJtYnDop8M2dS00wf3Lv/w6/O79u4f9v/7N67/88ur59Yv7+31zsfzy5199/7t/LDn/7PPzZec+vH79/NOrr768ev/j7VjFF2kwgNjls030RVKpQ95td+76T57//OvHt2932282K96c593DO2yuXQBuvUJfy5Z9hy4gysmN+XSzOcmQp/ve7Eazp396as37CQJ7FnSeqFizPHRKjD6ZH58+82irPN3dTi5AsBPbCOeFfVqApx+CSN6JKJgCkmg1ECZ/zJySHksYgVSUCMAsSzkm8efljwqGRDQfzxOighXTojpV6Iw8gCMHBABiRIAM85RuyjNyDskQmXiGQaoZoZMZqWYGetykiVUUOUrVqgpmWthUGAAYagUVUDGRKkpqVoUJCAmQlR0gATI7D+QIGJnAESiBQqnZVB0CVjBwQB6RHNXotJYqVmEGyhf1gtNYpmGUWg4PB+UCeE6MTWg267NnVxf3j+M+11KkT4WCMyCRwowxeMTKxNUKAjjH7hg1BSvzFtQhGiEGx6A6p8mQmGGuXQAEVFViT+yQZuacMvs5IhN8QCIBYyZVIecM2DlaNq7xRBYQKBATWhdDywhSyVwbove06ZoQfVULzvvGxSayZyR0CFoy1ALExEFsMjUDqCKl6mHMu8OUcs61gqlDYkLnuA2RfUxFtBRUZiKeQVMiqkKKWcQEsaB3Zt4gMCAhITnnKZiKSRUkVTl2EhjOohkYErCCsencwnq6LI4HAI5RagUTrVWlEChoJTQ9NXoYAtIMW1I7Ge/go6sIgRtfpE61qBkxA6nzzrFJndQSsQbnxDJ6bHmRc0GqhbMKiCixG+vkPBcZmuDIAxJ4MjJxTQSsqYgigANALEPRiuN2p4LTYe+7BhCk1CZ4QIretZFyLtOY+zRMWUsuYNZEDsE5T943fZ/TlNr1pfdhd/8OUZqmyaV6j03nFsXnWti52HkAvTwP6yWlAqCQU7q/n+739z7g+TI8e/nijnutw3//3/2VQvm3/79/pan84heff/rFi2/+8bsisttND9t9QK/V4jIkSazy4vnFp19+WlIax/L69XeXm2bVdUQieeh3ve9caC/YdYdt7yBM+wnhANACEAI0bNMwbqs2y3VO7xcrhvQw/viP/vqr1fPz62f/3Yfvvnvzzd+b5W6hX/3qWRpk99CnXAHGrnE+uDYsCJfTWKZc7m52BrWUJnb61dfhcV/7A+4fD44bZiO0y0tw3rrONUvLqe/7McRWiw3bnpl86xERRBippsnmjWeIQH5zebneLF99+vO3b99//+3v9tvDNOXYdCIgUp2js6s2Ljsp1bE5E8JEISCVNorWHUU7P38eXON9rHl8882baZhkSOfPr5ZXz/xq0zTe1XT35u3+7nZ/98E1zbJZpzJmTdvH+34a8zioQC1Wqx73MTTzyKBInnfChQrl5HwoteScm6ZRA9Ljjhhh3nkzHKkCpFKPNIc5ECyqc2izyvEqICpVpE4+LGpNZZqA+O6QNk13c3tYhUXwbtWGy8szUPCBg9mw3VeSF9cvgvNaS9plBVWsCpbLpCWDFcNa0ZXce98x+SKGZlqVQCUXwBZNZXvzAKksnLcX6xdfeupgQrAaogFo7XcpHaZhrHnUPHUXr/b3b26++2C1xmhVresacFzLUCSTDE3jMRcXiXnKRcuA3pNzIvmwPnvxWGs1nhIQmw+UxkekDNKjcoxu7DV2a3h8X+uBkUodkXSaimfv2w6mVAydmuYUXKMioNp062H/CGaGrFY4hFR7InaAWgxJR7tlFqZIhgLmQ7vaPF93sx5XH958X8qkNQNSu15vUc82m7fb79YX0bcerBAzdudNaEXUjMJyTaxvv//j3YfXFfKGqGUix2yheDLRiuhQAaFoyTmFuPRuKfVBbaSABjXXQWpe+kWMnHBImj0uslZslpiLggo7tMqMTElm/VqBiINfB38drE+lgPkGGnRhu90tu2jMeczIvNmctX6Vhz2KY7foVo0nJnOZa5G6G3dNw00AHUtOAG7RrV6yTKOOrvUsuUihmHwXfvj9Xec5a2KPYocEoIp1yh64mCKs/eIKHTlsHh7erpfLsd+GGIhHsd399p02y3J4I9YuFz4ddj54wlDKwfFqvfh8v/vtIT+QXwbaiLTLTetPQF87DYeEPNNYmNjZU4NEQDUDkJrJETFbFUJUNbVK6MwETBDAMZc6p+5JREUSOzLUqsVEFCz4Zh4knIsA6AgJbQZJkIPg2t3jOKbHhkGgiigj12JQ1YCzUU6T1KFbxpx1Nw4h+Mb7sFgPaSxTMcBu2XYVDuM0jjU4q0WZrIrVIgdNzTicL9oxDaKqUkQqsVf2aNWjbVZdbFsxFDNvYCKlIHM4DCNgF1cLs5wsx4ZKnQ7T/rK5ZqSuW7LH3c2uaVpifn1zaxNcvrzsx4GI18uuf5hSTs8+f2UmLspisdx+ePhX/+//yabBO9xvK+WcEA86ZtCOPEvZbg9VlAA9kQCPaXJGwfs2NCUP/ThBTk23FlMzE2AX1MVglmJYeo61jrFxKpTSoYI1MRx2h/122/fTj7fbxzEBMqO2jV+tl4Bwc/v4/u0tIvoQHTbbDxNfXizPz7jAer3ePWwXC45Nw47XK/f4cJCahrHWm9qsz1vv8jDc3e1wtpGjNhSxwZSw5jElYSAG67rzXHKusNvt3ry/qXnsYgNIc+I1PZpJZTDneXW2eXZ2liQVsP4w5lSyKVr98ce3TfsQGyRXoluwd/fb9P5u+8kn15cUuxBNS84FUdmxiRkxu+AoAlDRoWoFSS50Ki7lROC9c3N5woychKN/B484IxNTSLUAGhJpqcBz+xqKGRNWVUanc1yG8OjZOfJ+UVSZuVSZ3QmKaDi7heaid7KnCec058zT+mz5kRkRc5qjiAhVj8fuM1UATM0IjwAXOCo7AGBHx9D8dU8ZtePpIiiYmdWTNvQU50A8DWx2jM0ZHPNxp8jSieWkRxnYjgXNOHs8pCqmrFWhiaS2aMLZInqoULJJMaht23rAYDOlVcgDkTaMFHWsU7OJpdJhVENXJmk8LBraDvKwHz3cbJZxGHMqQ+iief7w4+uw6DbPvkiprDbtw/045eGLLz799rvvv/j0M1a9e/v63W9+s37xcov8eL9fLLBt3O9+/M3Xv/4XD1PxsUUAmRU3RFUlJAUDO3leTswmPJWc/YQc9ZRgsRPsaHYp0OznnwUU0wpzUlGqiZqJi7ENHj2kfGhDnXL62a//dNu/KbplmPrHbYywaGOaxn4Y7j+8mVImAxNVrQQYF41Ufbzvz55dB9/UQW8+fDCEzdmld0VRzy7a//Vvf/vDuzdt47tVU6qpKhERoFYlIFYTtRDcarlYdw2qjsM4lpIDl1zdaUw+4k/o9EvOVdx2ksjUBOa1qIAEBgQ2r2Yzg6NySUfzyJMIZGAIR93xJ6PILEYSACHSKWQJMOOOjI6eE9P52G/OU9iMfNEZTQ+GgAKaSEvdvhkOb/+TP/uvx8dvX3/7u/G2X/sylA+73etxPAxpBHJTgaRNNdZcxgIEbMRN2xUQcnZ+uTKFXlUJahkbJxebLsQuNAvTul50X339JZTp7ofXHNrV5asQ2lQyIrTtyocFOFSkOo7ChIAqBjwLZYTkiVl0ZqgroyPFMo5S1LFJGmqpbrUw8MSeiC3vrGYkNJpbucmq5JJdFwkR8shGoWtzKT4GBGRURZm9LMTkQ3SuAdQVwUxSy1O+2W4dwKXol8+XUtIP9/n7u+3mVfj1Lz///es3Mfj/9M+//sPvvv/+ze7V9eaTzz457LeE9fIsjAXyWMZU2NPt/tA09uJ60yDWw1tu3Obz9dnLP33/4fP+u79lelyfX5jB45u3F58+xy7X/iYunldR5PkmdVwUH7XJk/oKHzWe00WG9FP5Bk4xspMa9CT/wIlydJIWZ8XTjhksfPqWJ4kSPpoB4Sd//kRZt6P6DghiQsSnOM88QwihE1A0QUIFmQMLx1gxHIPI8+VDaABGZPNQAwgKKAZqMNtRARkAgdQRIFTPUW2utq8GaCBsBCo0/2FoMmeG6/G5M0QiFTGRmbIDilqPxc80M4wETMGkiqgDqDXzvEtDBQZFQQRipwDg2RzBnFaqlKEyWAPFmyppYDKsNC8qVVFDdKhap6Qp5d5qlX6/m2CixreL4KJfnq+vnj9fv3+s+70TMaVZOBZwBMhGopaLVjXPLEBiggZzA+nco8KETEpkzhOzMzMibybMbvYO26z4IwhK4EBERI6IAYnYMRGBApKPwYyY+GwRL5bUhQAihESAoHLspmNXq7gAy2UXo2NPJAgGTdv4EMA5I2D1WY3IgSGWLCWrmRjkUlOpU6mT6mgqYMEzEnLjYte4EIgcmaEQeednDQysSlEgAxNDUDh6iDVnIe9d8AGMEPlI7lMAQrUKpgZiZADgju2Rs0kTDKsBMHlFm9lGkwiBoYoHbZiiY2IkJDmy2OX4bnV8K/x4FZ5YRUyiwmiR2aFDw7bpUjoQCKLmaWAOsW3VEFA8eyKpyAzOt6uasppqNvLufHmh2kfnoZZSqzK0XtgKmdda0KSWsmjbYSglF8pTKsVLma/LWhUInCMxcc4vl10IzW6/A8W+f4DqysRA4MmzYcssak3TTqGRNCAZspVcQbFpuFkEMWTvELRtbXMWiigaaW23D8jBjVmZ+O27WwW7vj7/4bs379/ebRabT3/1ybd/+Pbb735M41SrMPsQQ/B+sVieXayJ7fpsMTwM3337ffD86tX11dUS85Br6d8NDrDpWowczzfULPphzCV1wcdm4ZpQSiIKSiCa6mEK8bIOuKN96KTmSSSItbLeXH328/Prq+3Nd4s3P05jKit59jzcP4zDQGkUBRCnVmRzFlfqvN9ISofdKJV7y53HsyuCy2YaSmxDzSIyBG7I6rTbL1bni/XKN40p1SoUHDs20VIzh9i0sZQaggPTWvI0b0kjXz379PPPv/7jN7/5wx//uEtj1zSeHRMKaC3T3L+YpomZakq16uPdwZFDd3/3/u18s/XsfFh03Wp1dRFXlwZBijw+3uzevC5FrEgIrfNNGfeH/cN+Pwy7vRF6dPPukzgQMQBVUTP1jh0zHakMprWkaQrNJNLUWmstxGRS0eHM2bTjjkcA6RgptnlQnQkLH4+RAcBUibDxUXNJ0yHlYbW6aDaNFeau22wuAjUXy9WL5xdnTYRUlh7ztGu9GlgqNVA1zVIzVAFTBCpS8/QYwjqGlVEEYTOpIojonEMgwNq23ThI1YG75Sevft1dv6potWRCREvD6zeqVvpDCBScs1RzX7b7cnbxxdV1Y2hv/vjd7e371g/tKi036uKCRYyAe8PDvjvrfFgeUsbQxsVyut3RftuE9ZSxi02/RQfxkPpVs3k8bEEHRCCEWsURaRkXy00VbUIzl1U1beMO3E/T6mwpWgRq47EWEqvZpMJoyqKiNWuZ0X3kCMwE0ESp5KlqauKCaTEe5LC7q5JdE1wFdnR2sYmxEa27h+3u8bFbbMx1i2qpTov1J5pAFQxqLo/373+Hng385vx5fzi0YTGBTjJ1rA/TgQACtqUmIGZQQGPBaHi/ey8o4KPM5ByZLE/eUwGYyjAhg0i37jarJuUpjzsryq6KwTQcYrtsIDJ5QFKsudZSsxlSHad0z7QsJTEZAbLzi8WCk1yuz29StkixWW5vPjTOPbv67GG7W/h2fpfth9q2q1IDK8GUIE2+88tAKwnNYVurnnfRpOYpL5pVKXsAY9+NRXxcBwPLj6FOwXUBeHn5oh+2iFRqPky7VHe+CRqD1ep5BZQ4NEqomhY+NnjRKm1tatp2sehQPSEtlmcF9j/drKVcAEHNmBCB5/oFKYLo5pneBzIAIhY2AFRUIDIzFS1SPfuspoA8T86gNGNPjGpJokZEQD44FlMEUlNQQlLnGwOsUss0eK4575ia4Dy52jRATEMvvolLgkMpY51SFi01+phzlToqWNMGDzhMefvYe6amibs+aZaaCjI13iVRVdtv95Ed+gqiTRuLWFFdrlbrxTL1+zrl87MNurZxcez3MUIV2W/3y0WzjLw93AbfMMC7m/eff/rVen3ZLJYMZegfQAKQbTbrb775fTV8+cknxOqqNS6KgAG0q2WaMtR6toxjv/tf/92/TQ93y2U4P1s76deXyzSMVnTRLci7x/tbJccgK++01mnq2xicgVkdpn0kikQxLlp/8XB4X6YkCGfnXcMLYMZqwzSI5lwIAKVWFVAYx8PgnItdWHTtF004e3adU3n9/uFulx4PU2y85qoCy3W8eLl68fJ56kt/mJwPLPli1broELk/DCE6dKSay34Yt4/j1O8R2Aw5cOPREYPkPB6G/WwXn4kggXl/uM25zIkIMA0uFoHYsA8EhCLC7NI4iWi62d7fPAL5Zr0gYEeahmxIcdHlUnGEH//4NsZ4fnW+WKyW7WrbZ4HpvIvrwISVmdl55XoohTkWAK0FEMUE2RUVNHDkHLuZb0Ds59pyAFA4nrKhKj2JL4Zz4m9O7BDiLMDYPMDO0o4aIampm32tOGe2ju1pAHSiXRgcQdd2jJuZEZw8KrNEA6eo9E9O0edx/ZjEAcQjzuhIh7UTamY2qM8/l5BmHxDicfif86XHdMZpdpq//jhE/fQ8/mNC5DjaESDPmtNJIJmp94jIntmzKkTvuqVrG88Yx6n4MmlN68WqbYJKicHF4LWWUrMjDyKAepg0jwfvXCpjSdnFSOgKyG6YCGgdfRrzYRjbSI1zZgJqJFT25dvf/NOLFHGxOTzsP3326T/85h9fvLpera7uH/vl5bI/PP7w+ttfvnwZzs+HH75vxY1j9hm2Nx9wuSZQm4+kTQGInp4/OGbVcc7AmM5si1M4a54AFY6RGT0lYuyIfUZDNRV1plhTGg+oCqo5l91h9/zF58tm+cfvfw/7rQZ8dvFSjN+8fQ11ijUP+4EDayqquN+N5P1F15pKn4ZuuRCDw36n5FaXF6vNqhzsYrV+7B+bdjmkcd8f3n13Y8yQ7Bc/+2Up9s033xLPwFYgRIcciNkwRrdct5+/er5um3fv3/cYf7yZEDHG2AQP7x8A5msAj77rJ8fUrGPO2Aw7FksdiTAz+YsQyY443ifz1XHL9rS+jkrQRxgWPJX/EcxgrJ9E1czmbuzjY5h1AQNQUAIARFUDEE+KOHIZCpUYF+9e3x9+fN16juvYP/z48OEHmTICszpE13QeKlWRMakgdV2kamUeiw290d2+B5Fl9Izl+mJ5fXneLTfVwJM/W22m/nH7+H6zOe82r4y6YZgAxLtA6NJ0KH0f2hWyJ4zEkd18PVZkoOhzcYDIjDWnwCMRqYohqBbTAigGldmnmin3lpOWqoBSy8znQBAEY3agqrmYYhpGDoxEpeSZ6l9zMnTILpUDEoAQEMUQu05ccMn07m7bv7tfrRa//PzStGaxb97cdUR/9rMvv//uHUH56stX3/9w9+b2EJ1bLheLlob9lB/H5Sp2V3A4TLloLfa6jlcr9jLA8I13fPaLi1c//5VcPNt+//f3dx+efx0Dr8c+rWPS6UbzHttnBmIqQHzSawxO/r1ZmUH8yc3p6XPwtIw+Sjkfb43zB/Hp1nV0I50+/4TC+snnH82WHy10T3/B00873vgIDBRmqBzzjLhTme8TdLKQIiiRMzMlI2DRzIiOneh8AzciYgRCZARm54OfazjVTFSBkcgTGhESG5EyOQPDk1akMrf0KCiqGfAR8qSqNp+Cn1Jx8zsMYlURMbWqhkcOABqKmIiqVKgmKkgOFAwVraIJoSKCiKL3Sn6G+LMPmHPJUGpFUHCMVEScqYIhIc0ti1Wrs1rSLifXUyhFh3GctMbdjmjlPbcxnq2Xm+WqnyZVMSapCmqeGEA1CzKToSdyCGRGSEw0v2TOEaEy2lwBx84xeTNVUzplD4l5PslgdkiekYPzVSsTkXc0w9lMvHeeHRo2Pqxbf7GKwXsQJUCtYsLkHBEQQds1sW0X667p2rkh0/vIRMTM3hUTAwWrgGalZKk5jbXWKpqLpixTKUmkzmxthOgwOmYmZCpSc5GqEAnZsUOqWhkYAee3ZhExKWTEjkAIvTsePSgalCKK5JCd6FKUAAEAAElEQVRQZ3o0o4ISPcENzazMCxOQcdYfEYHQEwqpF3SemE1NAK2KyFybpza/lqZCOCdv/uMGNBNDE2JkJDAU0Wk6MEstJYYOHaeaUxVU9Ayp9GbiQqhJyE+iZbk8V6meStYEJREaOSGHGJyLXmCaChz6ulyhDz5NExn6QC4SNw15r2IMmHMF5FqLGVarpWit2nVtCME5KlMqOU/p4JBVZNEEqQIOz66+eLj50WxYhOYw3PhARtjWpggwMYFhQBNtnFNRY3t2vdYQ9D7ZVNjwxWfXv/jVV9/+9ptPrj+9/XD427/+bRPB1K3WF+x9WPiL67W3OcenarWk0QX+6uVXXbPwwcr0qFPfRAoOpQg5IOZccliG8+fn/fbGtUjeha5r47NSgAhg3OZ8cOHnz17+8v7u9+Tq8qKbDjsfYLqZhJt2temuv+4uv0p3Dw9vfxzToS5ptQy7x34/mNTqWZkyBY7BzIVF+//n6r+eJFuz7E5si08c5e6hUl1RsqtR6Eb3AMTASBhpMKPRjO/Df5dGgyABcIYQDQwa06VuVV2VKoSrIz6x9+bD8ci6PffhWlpmRHpkxDmfn732Wr81qMHp+MDo0jR1resiIUjXNwqOvA+hEfNI0dCbEiC3m943Ta4JRB2z1LycT8jsfd94ZoVaVACqTICIUr/42U9+9td/+dUfv/3//et/0zBvN32tNp7G6H0By1lynoANAMmAXGQvbSPEvttsXNttdne3dzf9tkH2y2k8vn83Pj54DrHdQax9h2/ffnv/9utlOqkAWPbs52k+z8vapIFVs+p6q4Ohynpu28qjxGBFqqhUqWIqIqRi6tRA1QD0E5cLn7uB1Uz10o78HEsGAFCSmjU0LZKxcxG6NE21WuN213dDsxl2sd10m7SU47Lc9p4Rdlc7MstltqVYrYguNK0WQZyIjJBVnQjUvCCwQdWsjr1BcaElIwQhxtj3zrc5y/0ffufffmuAUgoSSck+UHd1dXX3apyW7ubVNjQIULUy5lpMRX7+j/8JGH3z1d+dH99P0313Va6vuppVZCbgGR9cn5r+R3k889D0ty/m+8dcx357l3u3/zZ57h2dynzs2LMZUxhlJm6u+s/Hw1d12kcExoYsl5wchI3fzMuhjBkjMiCpRddMS2qa4ZAXtUoMtUItSlzBU5HU+I4IiyoF5hyUuofjAaU2TXBts9sOZV7mqbzfn1L+iERdt/FX291PfrFreXALWSo5o7s/PB3nKbk+IPfNcIcUueluYzNnHKesSPvzuZzvPZxYT+dycrHXvLiQhi6WemgdFYNxPgtK67aN7xy3x7xvh53voi4Ja8rnhI6qmmJQ1TrPUGrf9SrqwQI1uQjKkxgDMxkxU6S+CS9UEoqwx9M0lfK+Y1cKNA0ogszHPrQ2lTDx1oZSvXFnmm24stgFBdDRNZldLzVxksbHoK5INd9rEAwb4jbgGQwI1bkqdmKQZV53FbWwd+2Ql4+b7iY09jh+66OzytUi2e7l9pfH09+YQSkltttQmxfXP89yNGOnWJbibNnsfF2m0/R4uQvUiElNEMmRUxFgMi2mGHxXpBig975KrqWyGgIBMQGyC6UWBHPs13akdUWPSGs17OXRipkIRERlLrrygkOpmZCZPAKqAiNgMHP+9tWfp8cHqXPJFbWuw6qkHB0lrAvY+TyrqOPatU1espplMqzqHTWRz4dlSSUGAgUzXRbxqIBWRKKUOk+OAxOaUAhci1jJHiv5tgk96zCfs99Cv726vr399g/3JTl33R1TfvPlX/76v/9NUOr9pkxLS+7l9fWcj03o52m6P4/YzKclvbp7ndKiAEO/ub59uYyT27YEsO198O3b77//7a++4uC2u02ex+8/Prza+DnnKgVVqOTD6TBPS9c0AXGekxH0fSMlo8FSklOv7JqmPS9pXr4HKz74vo25pNMyNm1fUyLqEZxoMTVH7JlP57MLLsTmXMT7ojl9/+Hw8cPTXG2pxsQAHNsYm6a72m1vb07HpQnN3dXgHdcppVxS4qfHgygI0LTMfReoo1R1qSlJ3W62IXrvef/4GC9FSFjNkEgMVTXVxIwh4rp6bYfek1umwuyqltN+H8ipQr9rkND51oo7nMbz/LTbxKttQ6pZKS8lzXMl7DY9AOTpnKbj1fWrDV/tj6dNCH7jdC4AlCQ55MAhApFZsdJ4t5TkvctSnbEpVhEi1zbNMk3Mn0QZXHFaAGSqiJd645Vrs3KIxC59VwiEK1bXgAEQzK0mH2aRSoCEF7qQ6gXwuw4YAHJhTyIBKBkArHn7H2hDqwbzTBFGADW4KLBqa2xqHajXJ2F8Dl7gs7hjn5JlBgCgF6zM5fmxPn/KD6Ib63Ie4OJuf5ao1uW7XuY3pLXoHdQAEX1gRCoiitZ1Tdv5m02Yx7lOy23XEsL2xavTYdRlYkbNmmsKbE6Vq++bYVrO6LndbaBCtpKLFOGSc1pqEhXR4BRFx7FSra33oaHD4VyRncM6zx9+/6v/w7/4v/7dr/7T269/95Of/DItx6tt9/7d+zd3u3da37/9tv7Nf/jFL39ZSllynZ72WGE8nm5u70QVCAkICFQF1yiAKSIioqx63TqAAa0bfLg4WOzT98me43hoq8OFtKjnSCynxw8eRXP2RCnnWqqJPx/H7//wu4/ffbfz4Xwqd7vN4XyGCkNovv/22ynlpt0UEqvFTHMuR8laCiIfp6Wotj40m213dWMYbj67enraP85TOWbRcndz+5Nf/PS3v/0D4fjizeZhP/35l3dLLmYITIzYBO67GAi3uy6yK4tMT4/RsTW2HfwxiQmUMT+/F6wXoSHSswmILoCmi42I1gtyTagR0Rp5uMiXCKarB+giVDEZXoJ6F8VzvUBXSVUugON12F/ZtWsMFNfrDtdX/WTmung3LjcAKWjNXuc8P+SlOt/ErvRfvji9+7sP739T0lNsuu0Nz2Vht0ARj4DLPKE6H8kFNmdUrWYzdi4+7k+pSBeda4KV2l9dX736kgH7dri7ft1GeHp43zZ9t32FoREQJAvUqso0PalJN1z5uEPnkRnNqSTQUvMChECqgKhaUiKyEAbQgmZmJDmn6USOJSFiUAFSrSlDsVqLVPKxzSkDKAJ5VAA7n/Y+dCbmfQTTOk1gQOTACMgLNeSrNyillFnnqVqFENs3r1+7Nvz6q2+fxuPrWq437SRYU/7V9x+V7MsfvTg/3fctxD/74v7j+TSbYnUhhN1mMFyOuW3c7nqHPh4e0v5Y3s35ZhNfXW/md1/3N6/dT27ij754efd5+tv/eTw97n784pQlz6fGN1b3qNdADEDPRCH8k16DAOtt9qz7XH7TLt4e+KGZ6NOTu108aZ+oQz+UhS48n2er5J/cQ6uK+SwuXV7q+VXgB2cwISECExLROjYQEDk2e05XrqxgJEISKAaV0AD0wjZaIVxIq8xHCN4779h79s4RrO1oBOhU1Yc1ha/M5JgRyEQULokIqWJozLx2mgESmpiC1mJ6OaGJAxkjoqEBKgiYyupnhdX6YwpayVRUVRRVV/4RggcF04pm3lEyUzPk4AnMql+4qFXFnNEUAiOTKgICEhMrmOGK+WC1Os2MWqqaWs35vD8xuWG3CWEYNnWzuxpSWomzUI0Ao/cKlRABuJqsWFPvnSMkQzVz3jMTqATH5JxcoNeGRA5p/d6vSxRgCozkCJEdoaG61d3AbADIHLnxzNFxYN8Gt+liE733bqVp11Sriq2HlfP90Meua7qWfABENIoxAiGTAwQSrVoJwGqtOUkpJef1yCuiFaCICSAiBnKMwkzsyMxKqbmUtGQDAocgBg4ZAUCIPACZoioAUhFjNMdkwKvTspZlDZYTr90cHgkQ0PlA7IAdOm/oHBkSIRKTZwpMpOgEULQyGjOQ1VLSNM6piF2OZyV8vkPM9GKMxb8nFTE5dg2DLDm3ngtQ1hxc28eb8+lozpipGphZDHHNzjRNU6UC5Otdm9IpOnIOzKztNmM67ra9qBGzIHbtDl0gHnzo240rRGnMClJVtFYE1Gq4Qs7gslxAJGKwKsu8pJxUsouM6ACDSlUr1UzB5nluwYHrcp4IIbTXCKZWAxoioVHXIrPPubJ3Yk4LgYIE92f/6E00/fj9g1n92//8XwL5zdVwdVO6ArurBp0bNr0WMVIqiiJXt3237bj1rSPX9Si27MdSFkujLBOjFywuuNBF9jEBO99aW0qO5kBAm36bqDUG1THuIpKclns//EPnrjSfD09719zl6Ryb1sps2QH6KhZ2L16023H/0cePVZ7a1jWH8XxWsCbnuhLO0ZlIcUTbXZ+1AnWIrmldyrMICtQ+NirStLsCmKZSg3EIpiCpMjvnCYHMoSMFw6ValmRqzndNF9cfRClVDAvWly9/8T/9P370X/7Tv3z/3beuC/1ADx9OKjCnSsQ+Mpo13nkXmb0phja27dBd3fju6rzY/uFdSQuDgamLMW5uhCJaffv1/3a4/56dEWnT9krN/uGwTEteMvngA0sF7x0FL6JFlQDpmV0vCiVnLbmmucZm3aNenh/+tG9Y155qACJrGyCv96EB1Vo/6aZIGNuYUgnoZXUxajFz7N15XHh53AP8+NXr68227ZrxfAQvUtvoAjNhiHUcVRXBOCBalpxAlS0ui5qvzjGBpjSRj2YVwJqmK6UaJ3PBb68isSxnggzA5JxzbjmriC7LYZzFe/f43TeMRGSC1rbRN9E5Wg4nF5of/+zP6pvPHt998/Hwneap77ph0y5jLnlP5/fb19S2t+enj+12aK93+eFjLU9tdI0f3j3dO3WaauObkhdkbkM358oOqiwFjDGIzB6pqsbG+10jpc5L2nSbkjNVJgCpgM4xhiWP3gUTiy5UKU7NswdksVpKaiiq2oeHfdsM7aZzACD1/PAkJVMIXb95/cXnLz77or+5cdvecZ/vn5w8Pbz/7nB6ev3TL9rdTXv9sr26TRnBNVV1mZNMYxKcSy1IFapvHdEVCkEJScSRlpIXyTUldnFcztfb63Qeo78ax8fqTySOKrbdpqilvC9a0lLNuBvuIrbLnD2GqF3OpW27p/G+a/ppenJx1w69JYjEHpmMSlqSZAw+m3iUjC4v8zZuHw9P1Ibg+353ZSF6LjHuXLyd5nehpRLg4d0f0HTYBADn0LOG0FxFZo7aNc2Hx3fFSsmn667JOZVyYKJai6Kdy33TDHOqTXdTBGP3EimXlBivh204HZ/ybJt2k8tbFxEqoJIlMGuN8e39N+DCZuPm88RO53Ro3KByGQ+YOdeKdlnqOe8JCJAAUEwMjNA8sQO/TrZASMhVShERMyZGU2ISqex4bS1c82xmKKrEq8m3ABAh1wqKhdfxQQXQ2QqlRxQVDm27vZlP90htKcd5To6i9+3h8WGaKrND8EKWUj5OowPIVeohbbrYthwjDX08LQsaHMccgisqeZL14XEa5877V68jkn/8+MiMaZ6k1C5y19+0mx2Hza4TVRGlp/Np2G2s5CIWCZ4eP/ZtV/Lsu0GZf/zTH01zCb5tQvfu7ft//tf/47/+X/7tz7/4sY/44eMcfceurUVFtXUxBjefTvfT+bv339/v95Fwu+nf7/ccWRWLYrvpWOXwdMypEKDkyo4UgcmBIrOvuXjXtL5Z8jxl3bSbaXwIHgmNkZUdoQ+uUYPG+So4iQAGI0ylKPMiMB/mcdKw2TmQr//4AZquzgWgWJIY/dBttrvr0HaykHeBYjyMZTqdpvNYpaph0zQCCECh3YkmRQKUoWsJgQDLvEzHI1idy5KTCDlGhwbLvCDCdtuqamDHDDXLvM+zYS3iA3VDs+v6AJxTjuCq1JzOYhi8iWjO8+Pj4p1zzbUnqVmLljElMGAf+jYSKIBG5/enk5WpjXEY2jEduv5Wks7TITQRDavUWsrK+2Hn2FOWEsjXUkQMVH74puGI1jtCnmUXUWEAeg5KXDa5ZheDCRKuQeZSiWgdk1d5xtQIaR1h1BQBVOo6/+Bl541qAsCrQGPP6S9CKnapkVqXHgFhLX3V9QUv9CJcc6D2nMdA+sSBAVjLd56jbfCMHDbTi3kKcP0yL2AaAEJS1ee4GRigmjGuROTLMpvWWIZ3IFqLOMbI6BzeNK7MY6EafJscHmdpIx7GmcAYIBB1my4EDlZDaEttRFLX+v15il23n5dzrgImkhmt72MUmFIyUefMAJX4WKpXquKmkndDIM/LdHp698fW8Yevvzk02/b6KqVlOp6P97jdbB+fzt/+4Zur6xci9elhcjEIWPQuNOHpPBNo48LaEmNAAERIInVF+ICqqDChil78DmZM+Ew1WdUTAQIxXVN9HomwaNof9u+u+k1Ngo5KLuenAwB45zWdHz+O42Hc3HgAGue0exHaNqTxQawycypKsQsuzsuji1ENUwFiKo7C9vrFy5fd9rrZXR9O53f3Hx4fHtkFdv5nX/w4p+X/86//3Zqf+/5/O4S2/cnLgb1DtdC0ORXn0HtXc2YiBgUWcg6y7p9Og3eiWkHLD6+bdWheqw4IP03T9uzrAQQC0ovYeHEDyYppUVVE4PViBEYivFzJamAXh9zFIGJgZAqXbzGu1j28eJouJhOmZ1r4JQ8NF3j2Rb2sZEXygX25vfusLNWXWi0v5VFp6odBGgCi+Wnx7JYsU17mmhvfCYYmNrmU/XExckvRIiKGRc1QHUAT2zb0Vqnvu1cvP3do799/9fLzV/3mpSjl05mDb2P05HPN3qOPt765XnIlQBKTNBFU1UVqZu/yklKuzpGJGaiKHvd7Di1QQIRcloB+mdSbR9/Wsg6bqnlGw2qu1sWF4JgkzbkmdACBEcgIyjKjGbMnpAKQ0ixq4J0UDLExNe/DPI9zKp5wt+1//pMvHu+PHz4+cttc395izIeHw9fvnhTts92QprH3sv3Jzf5xEpHzsbQ9tP3AVBed0Kzz7ud/8Wac/eHDIT+9fbwfm1Affvfr19evCoZ2+6NXP/ur93/4j4fDx+H2BsWm5eiWe9++MTeAEZihiYGDNVz4HDn8lAx7Pobt8pO+KEbPqZjVlPZsHVpzkBdxG5/l7h+ERZ9zbZ9koWeB/AfK0bPryH74oey8Y0YAUSFEJhKpioaGYpWI1hwxGK4OF2YPhoBBdMUVM+B6uiARRPbs0BOz2SpyiKiwVinR+VXD9+yYhZhAFQkcg4qtlWuiYg7JCKiaCxdkt4mpqSgAWc3rTamXPYYBoZowgFg1Zb38t1aMZ0IDI2Cv4sgBGWheUIldl7WKd8YBXOTYcs5LXYqQihoLeqcIAIqEzrNVRQKRwsTzNBuIIWktWmCaJtf2YQDvnXNN323beERQw7V1AVBNUKLzhjiXvPJjCZnAUBWQgIiZ2AfHBICe2+e9PxC79ZgiZkZkxyu62gCYCBkdB3YevQdEBuxjCJ4a5wgxete0bWh8EwOaoSkQaykiCoSha0Lfxr73TYvkDJFdIOfW91zVaiYgIlJrLqUUqbXUUrXkWpZallpTFUHUy1VNBqSKtUKtuZYKVZhE1LKQOjZDJmQAWGtSta47DEJmZBATMNHqCJQqMDowZEBkZsfExIxE5AO5QETswDmPjMgNrllqWks5XAFVKVXKlNJxzuMiYnR5hAGUqmrqmNb7zp5bwj9JRbjWpA1dZCPRyRFVkVM6BUditel7Vq6yGIvzTKZFTugcISiV4CohVckGJWFwfVMA1iVGSVn6ln0kQAZnhVCDSalLBq8kSbSaoYoqahVVqchExGYitRTJ59M5L0VUQM15Dg2x4yVPbdO1rgWow9X1stjx8H7X9E3jo7OaF688LzMNjQ+ABMTAIbb9TqEmtSoLiH75kxuACrhD55e03L65qsuChOx8HWdCt73eMXN0IYYIITS7tm0p6yxpZJ/Zkah30DpSNXXOE6qZhqZLcyVzIfZLPsYmet+7zWfzMuens+aMrHn5UFK7e7k9Pp2wTOaOtzcv03jQTMvT7Pqh73en0ymEZrjZBW/Ho5mU6+shesqVzmfWUgldqTM7do6dC3kuZlwKo0GMgymgVlETTaG1pgkcGt+13MQqwESoJlLapnPoDFuCZpnPZllqQhaR7I1VBSHUCpyL2JNy+B/+j//i29//9t/+23+zaaIopFQcsxk4coTVc1jXc0DkmQnleP9hj/daJCIO3RCaOFzH4apfUv348X7/3dvl/l3wNCcA3BYdpvG8vflR0y9w2FMkIkM1URG7JHuJHACWqpfWxFpqXmqIUmst1TuvKiJr0caamwdAMhOAi1gqKgpQq4jIpy0CADjnNAsaMUYxIeRmaIKLDV8F2FzF68bxbncXGLu+Ha67Mh2jbwjRdVHnGSQC1unhsaQMwLWCZEHkkkXrcrO7q+XAjotk533JFXEKTWfOVauwHCm0rGhZ0MXAodbatF3Td8XQRY5dnMfRR0YULHU+PM6P2u+6pgt1GlM6qdjt6893r958fPvtd99///LuLsbBo0qCx29+s7097j77+f5pH9srdnR6fLvd3bz87Prdw0e2ABbTcmQ1lRnYNY48upvdyzTOCpaXp8ZHF0PJBQ2Da1SPjlwGVELNhR0B4La//vDunptgaApSwVkGZqfMuS7sNuMCh+nQ+NuWibFIzi0F79v25u769cvu9q5rWer09O3fHR72Vk2zTONjldxtd829otjT4aG7yimnpRZ2npxTyspulnrIVYNbtKpH57xUsaKapQs7zd47fHp6Mt9a88bhIjW4iKjnvg0pVWWxigZI3msuxABYBZBc68JtWvKmvev9VQ3qPBm6KZ28J4e8VKnAxKhNqEmDC4HhdH602N5srkoxCkzki0IKWFHBQYNwmn/bh5DSSBiHtj2Nh7FQqdAO10O7qefMCMSGVl5cb8/nJaeCGMXmRXLjrlG0lqXtbsUFHzqg7n7+0HZdaHvMGGcvYw4ybK7+AdKUy4OhZallsbaP7dUXCUf1Zd1lsHOpnrrh6vT0pBccCqiId259tilS1RSJDcBUVmaoqozzAWANh7oqxXmnYFWUkA24qqoagyMGtQIGJojOma5LFgCi6FtDFFWzQuSQdMWQMZkZmGapDGJ1mUFK3w/CcD7Z/umdA6mlUGBlytOcS54qEKApVoCU6ljkMBZBGfqGiG5vr75+9whgpUrwTkXnpbR9A0hPY9pMC4I3QhfaBk3VTqezGcchSOI3tzddG/b3J2Lanx6vN7tXVzfn8cPgM3QhMz6dnozo1W672LjpPCJQi28fvmMyIDou53bTeh8V5bCcvCGq1Jruj0/37z9QE65ev3Ra3/7619e74ebldV7OtdR9ymg1V0XHXeRNHzDnq+12f0xz0jlnqeIJzklQqxVZ5rGPCMxFCqSE3a1vbrMpgix1lqoGmuuYUE0M0fXB5enILN982KecuzaOi6Hzr17d3N3d9lcbEmua9nhOzvuHD4/1tBeRac7sPFHjHA/bbSlzLklKnabj2hOwGULOo6ktayDdasrTZR+sBmKMiIijTesy0HvniM0I0FEIw+DN5qFrVGVaZmc0jqPU2mx2kVnUckrquBQ9L2+boWv7qOYB3bzktJzxpgs0MhHFtnK0ilLRHBI34zTlUp1DRAApCuyInjHFAqir/a3UbAqCf3+yWCu5gYsJAnpGXQNJazXwWiROqCK4UnsAwFDUmMkACD8tpw3XJhy9jEXEKCLEpBe/z6W/CZ5hrqvpCC5eIUAwQiJENXFIoJeyc9V1p7GGoZAAFS+yDj43BxngpTnKwPBPYI+15xkvJil7Lvy6/NtXHOtzDOhZvlrTQgRmiIRNE8XqmNLNbgsAKLBtfN/y0Hqtrm02v3v7ERW6CNfbeLXtak592yrYOJ4YOsMw5gJYUhqhLHd9/3Dcm1qtMi2FfQeOp7nOtSxVQLWLoao+TYtjDgQA6APnWk3BiP/T//z//sf/9J8Fhj/87m9//tf/p2KMjh8OD6xAqDKd3v/+V/0Qvvnq7Wevb3OpNU8OxJv1bZNTXftxEHjOS9d0ydRUAVBFGQkFVYvnzqDopX/OntN/QIiKgKAI6M09Pb6727bNMFzf/OWL3ea//qf/MI5jrrmKGOhm2zKglOoDh4b29/s33RvHXlJ+uH88n7MI+tYdykGWhRGTFCQM7MH5l59/effFj6BQ7GLJuSyzs/rZTTvsrr/6+t2/+rf/7rw/3HRbHyKYvXhx5ZtW8+KtICPWyamVY8lAa/KeiYrW3a73rHc3m65t/vjh4zgXbfgHwzUiItN6bF+SY7YOamArCv0CtUJTg6qw2uRU14Z7eyYdAdGnBkBEompGAGb6/LeJmq0wa3q2LT0HMI3xEmdDAsd8CQlaXW8mIwUTQkBg5wan6NI5Pd0/fvhK53utw273y7Tf7x++RZRNaB/GQwg+9oGLSYIlg+VS0+KcP4l8GMftcIUEohXJMeNuNwyb7dWuf3X3ajw+5Hm/vb72vE1zQq592wI5qHLOZw6+DVvEeBpPzjWWa8lnRAeMJsrIqCqqZqxVvWuqQF3GPE8s4rtdSufltKehj21bc8Gs4GZHUqaDLJMHQQTIGRm8JylVc2ZuPbe11jSeoBSoa4NRVVmgLAigtdSSmLHfbfbHQ4jD8uEoKUnKLeKPv7i9H/jbD4dv375/sdl0m+Y8nr97OPZt8/p2c3r6WOan256apqlZGMk7N5V5zPXptFzv4HRKtz/+8e3PXuo+PPz+K8cOp/P8/d9t224U2L3+yZ3848P7v9sW4ejVQMuJda7aPWs9nwKdCM+Rsx+gixQ/qTl/gvxfNMI/hRufH/UvwVu4RBx/ECX7pJ//yZH2JynoObR26Tk0g4uL8PLnIYQ1BuXRy3qRgplWA/DsEACZPt0PK+wdwWS1VBsiATsmxJVwREiwQrnMckpCRI5EUJVNCY1M1cwZkiKRY6sZBNY1jKiYmhQtpsTOTEGF1gyUmWkBxarIAIakqmoqZox8eQcxpwaItFb/VKmoxQAJWAhYTcviARz6uszKSMFZQvSenHexj6XmbCoGploNkIHNUAHRe49YC1iuKqUSQk5Z1GoqrKyVzJJqQdfEpt1st9vTMLQMYI48M3rvqykZilmqRdRUL15EqWuDJK222ksVBBoAgYGYETlAUxXHfkVbEzE7MlgLBgmJ2TlicuSi85suNNF7JhH1zse2bdomBgdr+VjKDgHV2Hnf9aHtXIg+NkgMiOQCOg+gWisYgprWmlOpCqVazbWKpFqmZZlLHlOZFRYFBHKExCTk6tpAVySnYipEq2OIioqCRscODMkQGEkd8ZofQ1CVAgBVFBw6Z4CsjIyIzoELRgTsgJnIe0J2FJjYeyNE5vUIL8BmWEWWovvzfH8Y3z2d9mNaqpqpVnVrA6bB2q0B+PduxotUVBUd8jLX2IRSa0rivXMhlpqJ0BHrNKMLnkBTQkd9vxHJ7Bw5LsuMir6LPjCYI6IsolXZs7HEIbTbSI3nrrGmXb9aHwM67PsGEKsiKpwOowGWkmutyGxitda0LEhacsppIccpl3woTRPWur9lqrENMZgVa8hrvAbQKhqCj43Tqp6aogYCCLKJgzoXmYywJzZjVCBm590yL2ZVROt5atoGCbqu6V5et6E3ZUK7vuqbbsjEqaayHJb5KMsU2OVaiU3FzKwJPnaBfDRido4BKTiAHAdfx7R/mm6udgjFgSsCLjakXNKp1jk0zNBO83Q+PbVxQ9zuD2dTqyUN2+tlWXyI4foFetc08Xj4KFkMyvWmlWq5YvSUSkp59t4Pfatqkm1JxQxiE0JgYmzcoJCAm267E8IQOqrVO4emoKSSqxq6IATMjBZ863It8zgd5hRD9FHu7l4DwTyej/t72l198eYf/PN/4v/b3/x7tqWLRISlAqiEJhrouBxi7AFwnueq1QfHPjb90KBD9tlgWsr9b/4w788qedPF7edvjoepTnPcXVHT3mx/vKQEh4drpwXyNM2lCqmtJgVVqZqqgKyVrsRNH2utcFmBSq4FxXsDMcwiHlEUDZSRwQBBLpn5y9PJZU/x/MYEIUQwNpUmdNHHOs9O6LPPXka42oZus2mGlrrGP318H263wblV7JVJTISYwHtyLaQKLiAzOWTvvZRa7OHwofXqQ3TEki0XW9KpQ+d8AbP56T42G6S4ub1equTTFLshDE2aFiOO7ZDP82azAQLQ0t/dlTwtx1FSOp2efNuGlpg1nZ980766e9Nxd//xIV9J09/a8qCw7N/+Dpi77Ytqwk1LuTmOe993m812fPzATKhONVHjl+UYXY+Ixq1vBUVR1XkyCoZQlipYBMqSlvUKS1qcC3lZGLDzvRn7EFIemVvNQghSZzbx4J7O503YdH0AnGxJVDRuu0175ZsAtb7/6qs8HUuefSAVReJ2e/WLn/7TaZk3N3eAjE17Hew43aOjzz67PT3NyBy3m8f9OJ0ewbfjPI8lQ3ToWFQVgb1DcmA85orEZHY8vwWTlq98E9h6hwDVtNiSFjLfho6wKXnO44kZEQJoiQ2ncnAMXXOd01mrdG7QgnOZmqattWqaAYwAJWckar0jKy07qZyJkO10egQVDKHkPHhfyrjrh1RnW2Ror8i5AmVO0+P01O1uGVwZR6Gp6bWWBVWGIaqmEBzDDqBVyE3Dp/nJm7UAjIe7iEn3j4dyM2xjH9haig7Zn5fpOD2GAUXEBEAIYHn4+B7AKqJWm+aJ6nnyx9hHen4vAMQqQgDrgqNKrbUQkmNCBe/cqhgFH+aUQRQJc04IGNgDEDA1FFQyrLUdhGDoPYsKEDK69WlMzdAxorFnAxEpTJHYqxoiexerqGdkciIZychxv+mg3p4OBzHg6HvcIKiCljmXrI7YeTQL5vThMC0C53k5jXMTGRCDc0XEkUNkbCCnBORi12kp11eblIaUlNCRQ8l1Gcdx7znYvHFtuGqDr1Vvd9cp58fzvg8DUlzKvLu+ag7ZUUgpe0ZJklJ6ffujrHL36vPd1Wb+OLddjwgh+Gp0tbkuujw9fJRUb69vC5qUiaz+7Jc/+/6P3/3Nr/646cM8Tjfb1nKqKW27YAo5GRo9PpxTtblUUO0cidqcFlbxgRU0Utu03ntuYnRdqGXSom1sUjrXUoDBU6jVACkvyzSXVAHMXw2blOtTrp+92f18cx1CL5WmcyHm4+F4PI6pLCVPnkkMxTD4jiAgw/npPs+jC5BzllK7dovkapqQaipTXrKPsSwjwBpHFVVDJCA0oyoCCCE27GCaz5vhVs2tjphadDouzdB/8cUXT4+Pd5vdZrt7PJxN69P+6ebuReObZZyBMaqBqieotaAqsh2f9mlc8GFstrcvvrxBDI64jaFmzSUVKb1vgDwR1VpWQwOiiaJncs6taz8ibNr4aXwgptWNukaKDHDNKonKZQkBoAaiBoDBBdW67kdXK9Cz3mKX3faa2ILVxaOga30Lrj7YNUHzp5AYXCwVq4ED7U+jy7pEfk7drLajdWz+UxrK/t4ktEbYyMAIV+CL2gVIjWafmtERwBAvgvFzXMMuHKTLdEZAKKZihswMbppyE/lnn32RJYXG374Y0tNZ1calBk/HfNg0BBj+7MdfmBxTqaG58v12SePuxXXv3em0r0u52jZv7q6X01OVKtAp0WvqhMK74/L+48OU85IlK0aHVSsTMmIpOmcJ3gWPbISIrNC58M2vf73tu/t0+G///W9evv6x05KPuebFSoWa58OTMfsYq2I1E9EyLXWqbsNjXioqBiIRQuiaoU7ZuVakZAOz6pvw4f03r7Y/NTXiyzeMkFRNYQ1imOREAlJVxunH//AfjSrnefr+4d63sZyOomqeSoXjODvjlpGB5nEBhNgOtcDHj0/v359LQWJAsVoyASoRAkqBYRNf/+hHuN2ZQZrn8+MxBBccvLi7+eYPv/vbv/3V8Tgz8Yth8IhlSS9ebSNJ4wGiR5VpTOlc2tA2jGq62Q0i6BjnkqVWVHRIh/OkCggUvft0F8Darrz6eJ4NPhdLEQIQA4Cq4DMvS9dHqj8ht/AiOa7BMiS6bKoRTS9XLlxSZ2YCoGB0WUmsreWE65qdLneF/Uk1QHomFwMCqJijANjWmo9P32tNfP25372y9tFxmuHXgMea59N+evF6h2jTfGLvD/nknCOCGEMl9zSOLkQjJ1r7rqfgbq+2V0M/9O2mbQjHD/s/vnx51+5uyDUiKUYHkDUtVbTprwAcAZvWQJWJNBfHTOxznhGMGXOaVWvotjWNc5qmaVwy1lzKeLoNnaWppPMCiaFnRyVVioxOJR1NU11GKwcEVuizdYDMgKAlp2PNOQRAs2XO3ruyzGk+g+ScZjOax70BeG4NXS5Lv+ut1tBsHp+OFWzYdK8yvf94rEt+9Wb7UJbTWP/zr779iy+v31wPOk9mQoECERjWCrc3LzuZPr79UOeZpdx/9bvNZ2+ut93LLz87Pd6fnxb5za+Hq22zA5n7zfVterxexql1vYtR6myyIKCiEq7c/U8T6SoBPIdeL2fYmnD/0/G2nmjrcfWniJrhWsO3+jw/6ZyrGPWMz74cjj+QxH8QZoO1eWg9q3+QiQNgdGbKKzbHSNQIWVCZLtI+mRpoVbU1tu/W4vtKgIAYvF/1K8KL187AqmlRJWQlrQDmyExFihIjchUjBkBDdGAOkJEqOzGVqqZqJSs5AEM2ABC8SP0kplLrmghWEyBGwNWNWqysSr+uI6uqqqLY2hOPiipKti7PCbgFS2CiKNoE1wUXg+QYY84pkxJorWvHOzExA7MjJcmruLNW2hdVQxPLbdsGFtLsQRrSq03U1zcmKTjy7BAxxgDEhJirzCmraMo1l0s1IyJXs5ozIBBYNTWDlDLhCsAHNbmQAsGc4+AY0MSgiYGJ2DnvPBp2MXbeb/rGB6dmOZfgXBtDbLxzDMZmJkrAgmI+hNj2LnShHSgExPVf6o1XrJioWRXJuaRcSpVSasklV53mPKc8z3lZcillHTFFARjMtBStZpKLFAEVT2BVhZUJHJOCrlhxR46BwVZzvZoikVmtwH6N5zI7ZuecQ/bOrbE8T9555zwDMbB3QESO1m2QAAFyEZjy8ng4f9ifv/t4eP8wHcYkiATmLqZjYKSqAoCAqGb0vwug0ZoAdh4AiwiyR0aVAgTKyoQktabq2gBgqDWXBRWa6F3wJspkLhA6RcWSi0klH5Gw2fp2G6hxOGyaH30O2110tsnzfN5LTWuNGzGmNHdDLAqYkbgtoDUVp47YSsnlXKTUWkpeZiZK87I6QWaEJi0zsor2wyAGauaCE7PtTa+HEQG7yGo5+nZohyTZ0mwmyhCaYAB5ySWBlOKDv32xAfZt13Z9J1kceR+a/mrnu4awjOdxmRadJ2IIgMoOoZBlUWEzFKXQhGGj6GqtbJXZOwqlUAgRxeVpsvFxaGyies7ZhQYR5/Ec2k6regeEQWuqnl3gmxd3KS25nsuk0ffjmNpmE2MXvI9tR/gNnc51ESHKtXZdQ4kJFyRLZQ4++s7lLMRUNQf0RIZsVrNOhxqJvROZmn6nVlWs1uTYMfDp/ESMne9qWQgBFV0MrGwKtZTjaW8mYNZvWkO1YL/8J79sO/7//vt/IyJFIJcciVWNA4FiCBw8KST00bUdcXCuS1LzPNZ5RFukVg+hi1tqt4fzSXu8u71TsjQdy3SeHg9lKURQUzYxNlyl2GmacimkSOyNkL1DRqmlVJ/THNuu1up8WLPgdiHboaqCkZKZCZjiJT+vuHZP1vqDHQYwcYxNGjNTA4mddP3QtWGrE5zGAyzju/FpM2zfXA3j41k754eOmWodCUJNilVD22gdy2x52jOC1CWGXiQ7R+fpuHPBgAzYKJaaSiquLQSESm0zmGsgbGJouG1916nZ5rbl2Da7DTnPTYPsDRAkNyjxM4SS6/5pevw4He9jS6h4/vhx2Gzu7nYB/cfHxw+n313fXvW3b+an7w7ffL39XGm446aN3fD0/vti6fVPX/5u/0GnYgV8bJFcE4iMrGBd4v7p/d3trts28+mIJHVZmJumDaW2UiuTiStN70zURU/ARB7WVJKpgbng0ZAdicCH40Mpoe8atSR17pum23VKHns+Twc9LaYkVWsGq8y+cSGG7tWhtAb4/mkaNrv799+qpawLD12CoNICh23s+6uXw82XD/Opnj5Q4ytbqUu7iWCSFhWZFWB7FZc4vPvwVYsRskELh1mvhl7BGHPfbsgNKctyHo2AMRAVFxo08z5XQ7EkS+m6wQcXEF1oszK66MkDp1IXMGqau1onwFJNHW7nk+U0GkGeR9aSxmMbXvRdrPm87brD+N772PtYCrMGI2HIkuY0fntz/ROn4ePHpyUb1hnMylKCD3VW7xrRwKoiS9bc28apu9rsEhw11ypZ86JaDF3fXp/T98TCgX0IKaSGkL2KHIhywJrSEY37djudT4fD0+76zvv4/F6AaoiIVaoZMnnRys6ZqWOWUtk5VQGj4GIRRUDn1hSM5TqT+TYOixY1yCUjmPch17nx26qyxguqVJPqidGoSAJEx41+woA8OySWZQ5QyYEKzKdZVFxoN72dD++WnMxgu9vudo37ePxwfxxz1ay5VHD02Zu7bz8ex1TRh0UQVEqRLKXt3GboO9M5ybkI+e7bd4fh5uWbn/3sfDrPh1Mqy4sfbQ6HsWpdDk+PHrZ90/TDMi3Lopuuffv+6x+//kleCmWRnFQSMYOW6B1x/Ph4/+b2JVa7eXNzejiw+XmsTRu6pjMFsLIs6fb6s8P+8fD0mOYFK3y8fxiX/eFwLlM5q4oRoKfAu74fokvz9HAu43gauqZqCd470wa0mkRGImraCCZCcE65VfZRAqkjBuJSZk+GwYkKMxXJ03wkaBQMuOmGNlz7h3MNTd+HJp0nJJqXkpacc5mmWUxqXjgQOOedH5qhcc15nNJ8Pt3fSy0uMCC2fSuS03RWlTYQk202fpznnGZRNVREZGYzW4o2bWQXAAAwgPO7u5vQ70wKKVYp5EIbh+NxLPMpNvFwOu3zyZPXjH2/zTWXWtA0xKZxXLKaGqklqez8vJQFaudcPX0Yjua221ENoLKWvveNDwyUl+SIVIwck49SE7NXFTBFouB9ybJM87NgCqrKK+9QFQCLCiA6IrggohHM3HMwKUlVNE8AgGKEhGqqgGrKgAigsDIjaN0PA6ywIb3YmwAASU0RVzusruvxC6hoJWOvsTIkAgCk9W4iBNVPdSl4weXgc9bseQpSMwIDWl3ldJn4AWgNnT1vTZ6X8HB5xRUk/FxTZWiI5Ahj26jSfEpt0xYt70/76J2Q7p8sVHVAyLTqOH/5419+/c37cRyNdRM7VnfcPzUxCNDb874L8aef/4jreHx6dx7z4zF9P6VTtTk9VlUmr1LXvigDY0BVaxs3eH86zyimVcz5eS687j8J94+HDVjjwnQ6HfX3u20/TyNIbRwtCcfjyG3jCEvKKiKSx/EYXEM1I0jbNE0Mkq3buLk8NNH13fbh4UP0cZpTSelm8/n5eNhtrpMk4lCkNs4TkYExk9QcfQCp03j8+Z/9eZrz/fHhr/7xL377X3/19HBwyKK5piIGrOYDnU/HYbepxMdUxaCqfjwc5yIIuCTJZeqDE9PJ8jBsm9DSZjt5txwOdP+464fomb29f/vuN4ePp8NJF9z6BgJpNQboh+BVZS5TFYfqyXU+3l7d5GVBAzFLSw7BLdOIzC9vrxn44yG9/fbjsdRalfJzDHO9ohBXbiwSgqJdYl8CdiEHEbF94g+ZAoDaZTBGW3NnRKtWpKgGz6M4MoLoyqYxABADM5BVkbqUXwOsSbQLwt1EzdGadcM17GaiCAIAxNHQwO0qmGta9eTZUZni1cPyzX+u8zsH87wcjESsMfFjOszLGayiaUDPEZ/Op5oLGKS8qGro3evb7Tb4wHK92Q5N+/T03W53FZvtfN5LPDGRcE+BXWCm6GInamlJZmZUNU2MoQJCSapmdSlaDEyK+uEVlTyeRxXRBmrNU7JhOrs0yXxC7OZ98g2WnFva5rTosq+STMSxd2HI8+xz70OjWoEi24Apkdo8L5KZYVNyMnMu+GAkUjbbYZ7m7/7w7XB193h47HovVBGJQijLVGvuO98M/f48u4fTl5/ffjzMp9F+9e3ZOxc9Wq3jcSRmjq0UK3lqG/vpT19P02k6JTlOtX4PL19fv7jddcP5MOfj++/+69/9+J8MFlva/uTqxz86ffgW1aOxLEs5n1yLeLmYLpyIC938TwolPB9IFzUHL2fTpz/GT5rS83W6BmN/2Kl3UdyfP2QVleD5L4Q/veTF/YbPCNQVNPQsFTkmcqu1Y5WbmB06MjCTuspZhM4bCvFKEAJV74IjWunISKQqBGAIKiKiFQEVHCMT00qHJ7zIqsYqohXwko1AQCJkYAMHoGpiVUCtghgS0YqiWy1ECgCoBiqCUNEEgNQqAFcRUzMFrVDFrIKJrMxIXEUWcmstJ7pgELxb3e2KpggUQkCfMSoqEKgWEDA1BATvHDOjqnecRasSiDkyVUN2zjvumtCGJjqHtWvx9rrpuxsmDJ5DbEJs2Dmi1URISy5a6pKKVKhVa6m5SspVpDKZVVlqRUQBNINlWVIRIqmX3IkRo2ckcszEyAAWfOMdR+93my46F70jR0uqwUPfNU3jfWzYBQZQrQicc2aU6H2MTYidD82leM0FQLKVpgkkVaVoraYCtRSpRQFytTnrUjTlutLMcQ3bkhNdrcAgYqVUUFsty6KmBhUBgJxjNXPIRB60sguABAyyQuLW92ZySMYuuNAyM7kAq3HKO2YKjrxj9p4cAxEyAZGomamYpZQPp/PH/fnb7+6/fft4XvJKgDNTQKqmYMq0hvwA0LTW55vpWSoSMXaAhHPJZhW859Dn5cTsmM05B0zEgb0TSUxoSE3bGsiURnTkYkh5DA2BCSMrqDECWtOFdghNpIYRAYraUtUDxhDC0Bg1S7ViwJFKyUbmw5rVtNB75rBMoaTFEZWSSknSRal5WSYklippSbWyIkkFOY9EDAZC1rTu9qprM+Tz0fdO1NjZrJmdi9SN8xkYc9VlmhwzB97cbZxzIQYfezRQ1eF61292vh9qzfM8p2WWlMjUeZeWqSyT8yxV8zKThrbvEdXHAOjByBEHoiUtSAAIKuj7FkyP77/CYHUekZo0Azpi9ksCEwUpsfNE7jgf0B9uX/85q5FwLYnYNSGILLHr0pLJ6PoOCb5/Wh6ItR/iOJ7buAFD1UShMaCiiQMgUMoZkEVgCK5tnalKHlU8UlUAomBGfb8rZVHAthlKXYpIFSEEVSBz3WablgkdTctUcu266D3JUo+np9y1X/78zyvyv/tX/6/g0fmoolUKami5Q7NpmZom1FRmmcjSbLORxUjemykRh6vhM+e7x5TNNSHwPB3yNLFUzTWYNZtYchrnSmJzLsuSVSozNhSACRBzrQUKmDQUtFYR0VpLWUJwtSzOETlCVVp3uesjhYKqSq1aq0g1VRVBMHm2FRFBaKIDp5FkUWZo3eBxuH+7f727ff36lgW+fPXaKHaUrgbv/EpPI5QASlhkOR0AzCpr5RCHNJ/TUo2KDw7QfDssVQyoFCmQfHCm2erMvtldvwzNTnng5u7FP/xrt92i90jAgS7ICsD1nLrE7mA9YBKH4erF57R8qOdv07jvhm1Jetp/bHc3P7v7/PH9N9P5+7kc+3YTN93Du3ftHfuhhK7bvXpzePf9ePpw9WI7vq1ZteriDEAsKw7d7U3zuUH8+o+/2r0Y2tCzqiMGlja4coToPZJDUTargKmaj22zff30+NbpJLm6Bhw7UMFieT41jMOu6TqWmhkb5mjqsun9YY+qbSDfBEgKBO2w4RC8d8uyjOeZVCqIlbzd9hx2oe2629elunZ3o+ic5dPxmFMBZauVEJyCZEuaVNTITuePIQw+ElB4efOZaH0aP0zyntqbj/P+5ebWN37KEuJWCyUcC9Ul7dvAQxis5nlelMB7Ny+FatqFYRNv9qelHa5lOaRlLpirjM7tkiQkUtNcUvCMBOBEsaJINwSwzgPWZWxMqaF5TMBe0nnb/qSOVGvpmqajcnx8iwpXm1v23qz4rj9PIzBWYIgb4t0m3KXTH4ql680tWYOCo42jTnM9Rd+Xok3TIUVBOp0ffWwj9jZnz63VQkjj8TF2BrDQMrvc9dvP9g9fbTYvTC74FgBQVQTyzEakgEtOTKgIxJRrYWI1QGLnQuua/bg3USMkREYO3ABAqdkQAWTlSajVJnYmQgBFytqt7V1cMajOeN1aK0EpidGBWq1Zmde8CwZuITqRuSySC6G7e3H3/sPbh4djGSLA3A3Dz7d3f/u731bhU5L9ST6OM6FLaSmKvm3Jm0ESsQp0XjJR+LOf/dX9dBj6DgggtrNCu9kNrjvPUzXh2Kvo7eshL/P9w9Ptm257e3f/1TcAst3dJZCG0LeNKXRDw44MeR6X2PHN7UvftE8Pc4gShmHRxXu/3fR9357PY1rOmnWyfJ4WALp79WJ/OL9qX129+Ie/+tVveS7TeLi6uXv77TeGeMz29f2+99BH6oehDxHbPC7z6XCaajWAPoauiSpVxZrADOScd8zznHwwQ8153DbbKRUjT7UAwmYYiCJUzej2+3JaZo7b8VQP+ZjmhAgiVc3mea61Ijv2ITiXS5Gczh/ua1oMQEBqrnFoEVBEailgBQx3Vxu0sn88VMtLriKZ2SFybGJoWgRG5Bjb6COY5VQbFxoXl3EOgRzAXIQCH8epFqms46i+u7m9fvn08I0LaGQhelPxzLXqNE05L0O/7fsBZFxK9g2N01jz2DXN97/5anN1++aLn9VkQMqBIqJnLLWaI9HKEFWFyZmaqiFArbIWBn9aMK/aiYjh2qTu2QQNcA2UERPpRRFf4QJr65CIGgDj+gszQiJas2Na1+3d5c3mEkwzWFEXa83LxTqEaHpZmV9EH1gJpesUYlX1T+lqfM5VIMiq68DzgtUMDAifLeSGsganEGhNqCGqrrY/1Oeo0SfvElwW67jKVRc8LMLQtU23+TieuA2pVEWNyq3jm+3Vdujz+YS1/ujzV2/v3zsKH04fZ52ueHt9/bILbj6f37z5IuVFw6aPd32z0eVwPj3un84m/v5Yz+B9bFSXXXTn85zVnEMXCFJxiF3rASFVcW6lAGPKhZHAtFQL0XFo8rRs+34Csel8yHNVCo6lytquVUXBcJ7SOsIRaB9iTfXp8PCPfvxX42Ekhs3WzUcHSqqpi559UzVLWdj5cZx8iMbaUAjkTU3RmDgvqe+bZf+IVolks91QCH/xxZ8/fP/+/u2HTdsD2fpDRSQptXUNhGXTN+NcAbnm+vbh7fFwahxrFQUS5IIQmZrQ3l6/MrSp6OG7D0Ft1zVTLqnM++lYkuTz7BgxOEekCHFw21272Q6pCLHbbN+wziWlKddTrgqY5uSJhG18moe+yQKPh1RNPuznd4dxMSTEJjy7ivBP//9EdcELPOZ5qH++cHVlyCit+wC66KmIZrR2RNkz5giftUpEJlKD1WBkuPqJTEEVCMF45bwYKCgS6TOxBj41o+n6bTUABKYiWpTIR0QgyMvhnpejPn4t+68JJOcZEF3otbZZoKgz0BC9pFqrnKZyOKck0LcNUxhzIYQIBEV3r29+9vM//+rX/8lFi34LqusUFkLsu+tczlXFRV7KOS1l1cy6vnfeL9O5GhA5NbQlkSYkJPJ5/JhPHzQdVWSsoqWKdYf7jzw/pNPBaTLAkquKJSkOhNKhLjN7pBhSqaUSWYGFas1huBLUMp3TuRQ1Dn1e5DSeYrshilJsyRJi2wD99B/84u27h6ub61LzeTqrSpW6bYeyYBL92U/f/Oe/++Y4Lcen8/WuQU1J7Ks/fP/5m+HVEMuSOUDbM7F7ev8wnlJRCV139+YllGUZZdofiZgt3L76ydmxLKeHr//wqttNC3Wf/bSV12n/4CMBU82TX638hHaBueF6Ll6cjD8IAT/LhJ8uR3v+xfOG99J19oku9IljtHpSnnvTLm5LW2XOT07PixiFePkyYAWz2adzFQC89wCgUtk5QuPIuRQxEzQip6JMkQyBSCSbqYqoAhqQJ0ZDsCrVzBRQRNcvcK2MrArBVqI+kBHBJRmtorKagagSOkRCZgID5xFAsogWExDJSKRmzBdigIKAqorVWhArIgF4AzBLJpKkgLDUunYRgKlIVQARc46ZvMMARg5JmYDZkXMedYVNUkDf+MaCQa3LxWTLVFGRHTN5jqlURk0igITVIDggF7qu7dq+a7oYGM1IrrbOYEPsnXc+Buej82HtBDVAWb1TYqpWq2qtKdeUJaWiqmSGjk0hpVKqiMq0lDRPJSdFUdQqpQ3RsVe1tWGOve/ato2hHxrPfkWoEa22gNgMfdxsiDyIkGQDXM1psetC07kQyHlkJvbEDhBNqqpevskCIlBFRLWoFKmp6lRsKpbEFFCB1hx3KuYYKxitF4MaqgFdTkRTJccKquAZAcEjsvNBQASEV++wCRETkXPBeXI+Iq1NbYwETIyAROxcYEfEHmlFjNJqIS6ic02n0+nj/f3btx/ffXg4HKeka/OrqYmQW+2fti6fzNRkDfz+76QidWwpj023saq5Zll766SQd0qmgA7QA4lxVXGeMZBZ6TtXq3Ggrt+YgzQtnlzsInpshmjOLaJeDaVSrW2ABgxLZdI0TURCrsW18gPXp6+qVXOqgIhOgbzzOGyalMeqsnpYc05MqirLlKb5TEQApEWYvSmsWb9SatNG3bYxMLpIhOyACRXysO0UqnP+anMzzqOAokXCpm/vlnkOsXnx2ZdxiKWMeT6VknWevVrjVEo2KAgpRG9aVtIhkyM2Q41tZCBuIkgyzcEzgHXDVlRUCjCO5yMutl4bSM6EuLslJUsp1SUtuCxj10VNedq/215f9/3N6TxN51NsWnZN1RSaNksN7eblZz8F4+PhEdV1w5AWIVYFa8hnFfJRanHOO3JVBJFTkl0zHE/3lcpmd2sGVhbwyKg5FefalKe2vUYrKqVpIhGXKqfT6TSeO9e8GD7LckzgVPTju7fb69tuFx6f7tXRj/7iFw9P73/9X/5XH3zRqlVjQFCpSw6xBTEsuVYNvvWExBSIUy2K1PQv9knS/vvYRifpvP+QptEhuSYUAmKfZZmWsZYyLzkVzMW0gkPLdUFEIAxNcOwU1h0pmkKpVVSrKYqES0AAL0kzUpDLskvW/IwamMnaX/38ztSEDsTm+dw0uxjDq7vXV8PryN2r12/84SHU8eXddeNjf/NqfnqPsgBAlcqGACLLSevJd0GVeVbT0fse1KbpuKTZexdiVPRWiyHWRYnRFJdqUuv1bcuwpXA3vPr89q/+OjnGGEQEAVRR6jpIoJmYCphCTYhgda7zkUnFDOqZyNC5PI+IFFsQfbBUr29CNw+Pj3vhXHna9iGNj+YQAlKIw4vPp9PXjlI79FqWbG6aRuc8EaV6dgHufvqLmy9+/N//17/58PHj5y9vm8bnkpZ5ZAw5Y+tYMHTt9uP7++g6yUTx9uZaTvu3fXul5KfTYdO1wH4scz+82AybUsY2RBFaasm2JAUCDMyFENCUg2/aWWua0tB0cWh226Zr2yrVIQbvmraPzVXaj3NJx/t3pYrWPJUsADpcv7j6s1VzeDy8n2xRL8XqpnmFyFiMBJy5oupiR6q11u3V1Tjnnpox1St3G70z7LqgR5DGUaleCzvnBc03WGAxx9XgOCn4QQDVcRbxXYtoqlRkqTKCp2G4YtxxoJwfx3KOfru7fnU6ZijQQPP69svDfL/tWisTArf+RQ0ynR/FFDyAszEdgdH7Ns11kSVBrUWs7q93d8xzkiOyOIVUqvY0LmrLEzZb13VSmrZ5Eb09nb+ufKTQBh6YIOV9G7abm5v96b7p8Onxu3YXa60OxrS8zWWUgkB50csCgZBEYfW8ElII0VSLVA/sXQSVFVy9pGnOCwIAUy7VExGjqRFBzoli4xlqrbgqsqJWpaoyO4SVAqKG1joPSEtOAGIA3keoiuiMiIip0eVwnJ6eWk9DdA4wdt2ynEWWL794HUP34ePDcSydE8/j61e3KdVuY+P7w1IVmV5+/nqe63EpytSGdnPNJSVTkQJ/+PqrqxdXoXE//tFn+7GCo7vr7fhx6qZmqhUtPz683/SLQ7d/ODq38dAPmy3kOUDwCFrTtu/uH+6HJohWkxK8B1PHXK1e3+yi428/7snz3etrrGWcTqp2Pdwe4XDM55vPb8fH8+PHD6++uBk24b9/9eFwPp++f4wRfvfuv9k02wpfMgukQ8ubyA9lTyiejIFDExyHWtOplMDMSjWBY3LBnZfz5mpLCKXmNsTz/JSkgLUIhOAlhlTTDFxQlopLxTofHx+Pxs75RsSQybE1EcIQDeLD0+Hh6WE8HpqAIMLeMVOVdRdZcsloGGLYbLcAdDodxvFUSyVGphC6jsn50LRDA6gmEkP0ISxTQsO2b0NsmDFC3bSeVT2I768/Pn7wrWv6wMbzlL7/7reixZGLzOybUsoylaLKntuumZZTkjlsN2+ub5fTabsZjk9PZpKqprf3NSMF//lPfmSAOWWQ6lwgYnB+qdUZouNUl+ACIZtk5xwCAVwoLSrq2SGBqDrmNTEDtpIa7NOgIaqBGUxVkQlXVz/R6sMAeQ7VGKx808sm+zn5RZckzwXfqyvHRdRoDflcJpeVS6oAKKqylnMRoiiuvwPICAwoayRtnd/xAvVQe56H1ha2i6v8oggRXUJv6wvqZRO6htRWyrWt1VQMKx8ExlzO5TFVMWBmvL3b3XQdmp1z/uZ3jzet2zb+m4/vyUoX2lOZ/uKv/mw55Wk+exebbTiWkxcpM3R+t//u92l8aMi2N9ffv30EBw04khIj7acx1xoax4Ap5Ri4D34pNVdRha4Pzpv3LldRVTIHqJUo5xJQHsphNzSH8yIZchWMKKJSzcByzt65WWp0DFalpEXHdnPXxqtdMwwufnx4jNy2u83pPFWrjYvB+Rk9D/5wOigUMw3Ok6rzXs2AwYfAwI1z1ITvf//NP/u//IvNyxeP756i9yXL3ZdfXHXNf/yP/2FK54ajY/dxOjRDaLftZhg+PLz33HbN8Lf/9b+oWakqVYtiAY3M4MOrF28ezouKRM8Nk+VlPx2XZalVBKEJrg/YNJFjjJsBCZdpmavIeRSAgGHK34tMgdn5ro2+3V07wDwvzdCwwtNhnOfl/f542J+JuO+jKap8msIvMzdeRMQ1D4ZrC8garrl86PP+HACZLm44WJsBV/MPABOArW19z5O8wZqIIUDg5xwawroDrwa8ViEYOAJaq0rsApMBE0C+lKVdMkCqRMZIICxCy+P48dc9K1Ydz/dtg9y4adRu6I6nOY/f5VpR1VPHYGp5XHKxoC5W1T4MotI0fH29CQSx7V//6OdvP/4xeL69vYaaxv3T5uq2CdEjzuNTMeCwYeklZzDnnXNIKDIuRwT25FFtmY+slaHm+axIxizzo6bJVOdlqSmZ66b5zDJKTTmD5iTT2Qyzn9vN1s9pPjy4ANIGow4As+4JWdVAZw0nWQoDJKncpFoxnc82T83LL4LvxkRLSghUJUfWVWW5u779ze++4uCFZHfd548P+bT/y5/dHZ/mp8fHSebXr7b3eVTwh1q76m43G9Osy4TOXd02os03391/ff+4P+fPXm8+//ln04eHSHo8z4fy3avPPqeSj+fvm/vvr970y9M3sX8D+irr3EUkEpNCLuBKy4e/d7FdLo7naKL9IHsLz4fwn2RLsE+q+/qZ+Kfw7Q8+ZY3W/MmttP5aLywKg+eD81kZxfXYu3y8Dx5UldA5z44VlCsraK4VzdWyhlkFTZwDNAQm1dXfqWBQzcSwVgWAWpVALzWCioDCoJkskNYKuuabLvR3BDCzghddzAyNiIwdOXSiatVERcQM0QiRwAgNVbIJgKEY2uWoF61n0YqqKgiCZlZq0WpgbAZqqACYchULGNQKopCz4D15rsQGjnxoYpfVwCBVLjY7g4QKDtFF78A0EzNgWYlPCoTgnA+bYbvZbIe+b5uOQEkSOCYEU0NWRmFIoLI2uAGyiqzmWOe9C+A5AnZJnSjr+mBUjcyqQinlPM3nMWmaS5pFliKZ2KJjJif18n7rve+3mxC9D15VpIKZEhKCNU1s+yE0PTpnpWhGqcYBiCi0rY/BxcjeIxG6gMSrg5eJZDWImVWRUkrRUlRzkVyqAQo4Dr0lYE8qVasgqCFWUVWRWh2u0XVUvHCigVmZixkpBfRIwdCQPaMqKCIZMrngQ0TvMXpwjpjBlKEyOofqHDom54idA2YkWN/ARW3Jsqibcj2ex8Pp9HQ+76dUgBTBTBCBkNUMkQCsSLGVvGPGa4XFD6WiGJ2BNW2jVUw0ughm3gcfIzsFNjAxwXmaiJUcmErN1QX0ngkVkSE47lqKOp1Hj+L7gK0jJqgS2LVNy1p9WcBqGsc8jeRcE3keszZR1KwWUFvRX565iqhUJMjLAqrOu1wFkYL3DUdCg1r64eqlf5XmpDmTChClpazQOwQOrQdRq6VpO+c4hFBKNYHQtgaiYkrYb7aha/qmA8WU883d7eZqpyzztNeSGABy8kSiNS1LKQkMwKjWorVYrg5D322VZgMA9hx755ssSS0514c4ON8czk8hesNkM9Wc85Icq9UcmrDkNGxfUNv4HNM05krH47nx9PH7b0oaX7758XYYzFypWWGWnELLw3Yj81wTvfz8p+Tt/fsPTRsBTEq0QkVL0tS6ITQhp8Tsg4tSdRrH4H3bD+N8enp43G6vqwiJsHdWFh/Uc0jLAQwAeGUMgkEbWyTL0/L2w9fscbu58cGT43kcU0oNNB/++I2l+le//OX+7bv94SlGr6bIjEhLmipgi6TsgNAzIlStci5jkUrclTqT5CFUknM+HLwpBg9E45KS6e5m8Mh1PCkYM3kFik4c1JKjZwMywFrUrLAnqYUYa5lriVWqiAZAMFARY7U1PGCkpmomKmq2+pxV7bk+43I35FKvtsPAg8d+OlfJ8PDh6fT4h6ePT1c1XQdOx0nO983upg+07fs4DEgoWh0qmBKvjzIauia0bTp9tCKND1WEiQ0IAZhYRHzD3sc05xj7bvfCDS/5+vNX/8M/dS/uio+AICrsGVTRsSGYCoAhAagQGVIlrAITkpTxQ0kZwGmxuP28u/VQzk7H5XgPhuzQtQPHcHp8XJ7m4qm7fZme3jHeWrPhZnf14tXjUksdXYwMaMtabWTkTGiZ89w2w1//j//swzff3X/8ppJ0XVfGYwhRkm1vX5wlJbDb1y/Pc91df/Gr3/z6JvYxbpYsTYQ2xOhDNkQ/qLlai1QTw1LrojV6bJqApkyYU5GiQIaAXdtsufUxhDZOOR3Op67tZrX9w9EFD/Rtmpe27xWl3e5KrdvrbQL4eB6XY/IhMrpNdzMgPJ0e5/nQtdtxnsB3xPX4tLdQ+81Ollm0+FqYYSrnYXg9j0tL1iOZQhNvgA24n8rZo7kY03LoUYYmcrEmxEM6n/dvPXHXrHDKrqqAFqm5c9uOGEsqS2nb4KzXynkaSVXrhB6m5UOWZV6W6FitHVNeSs1FBLCaFdQKBuS2YeOlynzYDF3KWazqsq9Wrq5ecdh8eP9OQguuLFZz3l+FHWPwwu48Vn9M6WFmGdo3SlDPx1KS54ZxYDucTo9xaBTsanul82Kiu+4No1/m+ROqCAB8cAQgVRDVIQICE60IUtECZgRrtBuQqNYC5IywaK4qkWNwvtYKzjEQe66CpqIIgIaEiGxa1IyIsphUEUNRMSCHyMyl1CImIoDmg7+5vv748R0b5WVWxs2m6TrYPx2all5/+QLfe6wJTQxQa5WUMFfQcDqNAWPbb3/y+d37tx+lLsG7EJomBiiGFq6212Ws7//w+Gd/8edv33+shzS08bOffvH7P967Lm63cdoflTT24fB0vxv6XdeOda41mXSlVJHR+SCiVbBzLXnnfZuX0SEimFRpfOjaCFVALAZfifenQ17Gdgi5VEn22esvUhp//fvf/vrvvhmG5s2ffwZa+Z0dLKmImJrYqqsVw+i8Zg1t6xHnZWYtjrGoRHa77c47WE3BQ3+95o1vrl6exsfQtgF6UUpLNdTH/YML7ZT9CLGwP6dpOp7YeUDq2y6nZZxOIlW1nNJ8PC+5JERqGwapHHy3abTI/Hgm59OcCF3T923bLkua5iUtCwA2XUvIatzESMjMAcUcI7Dz7FQwdLHv+sPTPB2n6+uWglWRrDrWRadH30QEPI4TmC3zUkt2PoAPhOHd+4cYfWRe5oyBxSNWrRVTOs6PZ08cuqbvBwWwqI6dQKlZP7z7ppbh9mpACBygWlUF79yarELyVcyxORdrrWD8aW/ATGtGxsyISNapBoGIFUxUL4EYxPVtBYARkAnlebQh4hV/vBaTIayyzxo1Q0QkwCK6snqB0AzE/oTTWLUkEVlnJ1rh08QmCusOAdegJtjqaTIDu8Cz7XmCNl0bu8DAxFay+CWigZdlvq6L+Ms/Fp4nfzMmQgUAEABYvbdqSOSIcq0M2AQedrtus92fjvf3R3YMqoUMg5uO41/8+ZeEbuh+NI3L+XBqGB3GvosqNWl6cX339u33AbXb3jw8PhzuDwquGcL+NOV5MQBDbNs251Jrjg63Q2DkJRdUNKIlSyTQIiLKTGCaShFTJBIgsXqcCqEjRCVdm5UUkJDWh3vHhIhSZTqdsaXh+soxvP1wv+2bu6vrbbd99+5d6L2pB8P+6vbD077lOJ3PZqZqWpUaqrWwWtu019fXDx8eWi/c+n/xf/u/U9vu9/Mv/+Evp+PT9vbl9o7/47/912AQOZjCeTpH9GWpfRvOpz1b3W36X3/131JOjtlM2WELzEy7vkXD/f5cUfKcWhh4rWA2JTAwDURWqzEVVSI+jwnRPPnb6yswcyGaCqCvgoFc7PoQ7HDYI0ep5Ztv7qdaPzweSqLr7fYnb17MKR0XnQ6nQK5vww91Inh2D9nF4AbP4/pFEjW8UIfwQsJao2kXBA1flFMFIjI0AwW91O2YAaDiWg+1KkwXpWkd4s3skl4D0GfOCxIxETwbSnCFHyFXAzBzZA3ZvHzsm8YrPT4cHN/kcjieZhFSARNiFqwcvMsF0zxJnT3Hw1gOqQrFh+McHIboGiZTub7ZOEf7/f5mMwC6Zdo7JqilzLM4Uxfi5prCJldRtLaLNU1pyUkr++CZYDnpfJS6AJpoKWVRMx87zCmdDgqapzHNC7kx2R6hEqasInmW4DTbSSfipj6dlqcHPzizRm0BSRDIhzarlWUU15K5iPz44Zur1z9FZF2mUuvhQZvhVd+GUrTk4p1/9eLut7/+NTlmF3/0+rNv370/pLPH2PjG5qlM0nfhs1d/9u//l/8cgT+/u/n9N+9O6l2eYSOfv7g6Ph26bVspO6B/9IvPNt+citrXv/m+wXB306vpF1+8+P0f7r/+zdd/9Re/JJ3uv3u32d11d3Epp+729XLGCgvIbCa6toM9J8yez127eIgMnqFUzz/nT96g59959iF9sg1dPu5iTcKLUfOHIhSsjiIE02dOPzwb1fAHL/IpjXv5JOTgV1wdOfKOIjS1JMy5FiXGWqVoDcyEZCZIhFYBoIqaWillVTNrravQaagiq8cQU8WlQHSURL1KREbEtQiLiAgEaNXrAfVZdiVAt1LexRSqgVUkdAQgsobRKqiKiRmqZhCzUqtlQ9Gqpk4UzAQURKwKZK2+gonFjtSg5MX5hkwZ0GNQYnSOCH1gKIRKxM5p1KqeCJwyOyY1REfoiINDRCLH6zP8tgttw6EJ5AkUCdmkAigiQtFaUl7NQznVKkVERAwJkImDC65t2tj1EIbQbNgHz4GQGUkBRWUzL8fjXMYxjeea55IXHzD4VSoSUVOFEEPsGmbnvVtyQVLvnVltIg/boWk717SKbMYEpILAjphc03JsyQdgB+yA/RqptXpB7YvaashSgypQq61nkw+x72Xt68gpLylZLk4lgLAVAyZyohkADBGYXHTMWA0Vkch5xyE0wEyMxK5e0oVARJ6cRwyOnz2Z67m4vlsaE5JjIGdIiEREqyurCFSgpHZaluM4Hc/j43FajJRMa1UTb7QOwgKCBLLajZ89eMT496QiIKopI3hirrUytDmPkchQRUUlAWkXr2LTm2bnLafKQIFRajEzEI4VuI7DpoPBt7E7jlOI1AQEYGemtUqW6Tg6E1YtFTXXmhdgmk21ZjRDw2JFzRBYbZ3p1QeSVBGIya2KnGMXgmcfFdV7jr53JCGgSFHzYKDVSioucAiR0JhUSl1hYxgcOQJiVEKk0LSxCWNOxPziizfNpi15ARWoxRGiCmk1raUUVZVqBKimqU4tNcDMazzW1IfgXFAFh4QcRcGBaSnh+kckhjqHANZ1i1meM5DPaWZfusilnpquD7HnQC7F8/5okYFhnuThw7u7129ubm7352NJ89AO47hnvnJNSxzqkl68/imS33/83oHFZpjNixbAYGIAxkS5LF03IDgX+6VOO3d9ff3y49MH5kAcSp6id0as1RwFxAKAVuecxbmIBN55H9iZK0Xzednn+6ub6+1u27btw3f77atXqdq7tx9+9osv/8k//6t/9S//ZbQgAEVSBgM1JkQiAWHAOZ2IiIiqqnNDy4PIInVZ0ljnsYuRQ8hLkWrU371++dnp6f3+8T7PlZARBFYzIgE4FIViNXJkUyI0QxUww1rVbI0BV5WiUpWciqmoc2sFhyGslksxqaYiUlVXxOflberu+rWP2/3XH2+HZhu5s+7LN59tf/Fzj27o2uApogb6uTMCSc6DABFHSROxGhI7l4+P8/5bssY3TM5JcarCTISANUsVKRra1tDVUjfXL5r+Lt79/MU//j+HN5/rZluJzcA7JlWSasssZQmBDNlKZVd1PtblLOmseSIU5hCdbzcbwGiQSpnSGZlAkbF/4RhLzlYqeWx3V3U51SVDPrPh8iGF61u+DnEThtc3qnZ6/74UIY6mNS0navvYxCo1TQdw/u7nnw1vrt//5lfnUoerO3BlOdbHw/H2yxcVdGiaST8An3/y8y/m01TzHMjYC6IW5ONhani3aQfB6mMgcqUUNGcCJVUy8W0D7NDAhS7ENngikOl0mucTkDNyhbMRXN9eLUWp6fu7QFAlL4fDYSk6gktoNTjV+v77D0Z0c3dzdXP7+van/HTf++El4uN8mMvT1Yu703ggKwIwpidiIHRszmNWnxxHR1zmTOQAeM5T14eSznVJUmdmLvPsjMxJlpkdkQpU8c4fT6e+u/JNzymxsGXqQlQBh41VdNiRObAUWiasp/mdtJuFVZpNt72+v//WbEKf+xhOY+k4+NgseZ7qoyunJmh0XJeFwZxgWNcaRn13ldVqljbE6oZQzrka29VPfvbL3/7+/+nZ5ZIw5Bib7AqTO6ZT7KamH5xN58P36iK2N+f58Wr3xYtXP6ky7j9+0/DFT4FEIqqqhBQ45Do751esihkSBU9OanHsAJCZEHRWI8cm4tGZqpg5VitFlRxFETMAZkdIBGiqjvzlqdBQa0ZA5GC1VqjFgAnYC3PI8+JdAHCvvvxLnO9b+PB0Oh5BoYhiIzkj4c3N7rvv3y5FX7y+a9sSS1Fsvnk8xe7u8eno5lrZ397d3N7tvv3j10VhXOD1Zy+IXNO2jfH3v//Di/PLf/CPfvLh/QMIPEzH/joo/f+p+q8fy7YtvRMbZppltgmb5uRx95x7y1yyyOpuNilBgFoCWuC/K+hBgB4aLajRRQKtVpPsYhnWvXVs+jDbLTPNGEMPa0eeU/mQyIzcsSMyc6615vjm9/0+/XLzxRt6Nx/27DQ4ONz91N3cIhs7Z0y74fHm6gWILZOnqRhWcp4ZAen6tnv/4eHm+dabgSqQC018eNxnmdquGbIedvsVNfvdIZ0eGPAv/vJbOY5/991rJD3thpKKIydVak4ZIasbizjT4HA6pWiFQY30atV3vu2iq646NCNPyGagJauzIe850HE4bbuNCPlV4wL1fU8Upo/jTz8+Huc5p0QEF1fXgd00HYbTqFqi48NwGsaTVQieQtOIiJo5F5EawNJurxHdVFLs+qvLDUM97A9VsGn62MXNpjvuRxf8uu9ynh17yQWd67uVFZlyNbYimVBVK3F3c7XeP8zDMa/WVwJ8mvaLCw2QDH1crZmCd+40Hn2IMbZzGjkE37W1zufgPjXTOFayLJNrnRKwd8CeKISmGUpKjyfXtIymY6ZZiTmugoEKoPOe0GoRjFirssEZtQYAS+kSoi5HJ6qAYEafEjmEuEyuQOez6QXue86jgS1sIwRk4qUwZonffIqGGaHBOYBR9eyDfRJuQG3hd9DTqIN8PmQnM2NCQqxViM6E6ycw7NmNpGa0gJHOh+WL4R3VdOHEPJ25fxqxzu8An8CwZovoRIiy/FszG4AYEFET3M3l5uFw+viwr3WJrDrn6/X1etUGtVBVHUE+PFopF7Fpu7a7uDxM+5qL981UNHT9MB3vHk+HQy0lzCp3w1ikOiZPlEtdOoaj95uukZoPaZRKVVAJRBEqIGl00EUuSRydJbeiS14Qg188J5BKQUTHpAZMxGBZQQTzLBfP+g/j4fn2T766/ezh7r5r/bqNzTpcy/bq2fXf/sP35rjZtpVK6y6oAqrVPMewCt57x1X05WdfPOwePv/qM+awbnqKvJ+Gf/Fvfh9q/uv//P8d9sfjw6EOY+NDNZ3n1HauZvSuswj707GJm9bx/cePWIGZU6mr6AE4rFaDqUP2AFcXG+oKZri/v6+1IJOpqqnzLQLOKVsB49KwI6QQ/W5/zDmjQ8lZjRjNO4/+oe+I0R+nx2GYQ9tdN6vt5+v3Hx9Fda662Wy++fL2y8Ox366GPP7VX/8R4IlU9LRGlgQlnB1DZ7vGMtMumysiBMNlLcNCcTo76WyhwCwpZzXQM3wI7FwGZQZGtvCN0GABZ5wDQ6qG51K/BeV1rkg/n4SrEvOCQmJmrGWejjJL7F+kGXC7CfIwf/i7cfpouYi5WkqRGttX4DtO01BmBx7IcpmsonPNMOvFbe8bmsfUb7e3n39BzJvtliwddu/QamzamnMC6m5vXXdprimqgIq1pPmkZQapXbe1evIK+XhXxwGQCkjJs4kocJ1TGvY1jQZF86R1Lnky5ZRSE4DXm/G4c55ULKU8WnFZ5uGoRlJPxK1J9t1qzrUaiivC2VOYh1lnqaM4N6GclPPwcBc8Urhq+66azNPRMd2++mye59PhsVr57OXFz3enH9/sXz27Ta5erNp3b+6s5z/5k68/vr/b9vi7b1798Y+vJ3L3+xLC+Pz5s+VEocxJrMae60Gvr5/98YePwQW12t/y1fPb7//x53/88fs//6/+4t3Pr9//9PZVuwbXaqdNv6bMdR4ZC0D7tCc/89hgCS4uCqOd9Wz7JFMu6+bXHiT4pCXBUvC4ILXwbKvET68CeIrUnl2XTz+fIzdP9/zFe/SUTIOnjzO7Je7KvPBfgqmGxgNNGbLW6h15Fx0AmCByraXW85xhimLLca8t6WVEMhA634G1Ks4Vm8qtOVFXBdiRoS1nA85HJjRilOrZSalqyzfPSOwoK0AVUymyBEAVRKpBFc2IAMCgarWaiJoaiqpoKWYEVsCAENVUFMyUAGAuxC4ysyhKNSlVCARBoms6wsC1AdBgTsDlLGAKTrwL3hkoOCqOzXszQNeEdtV3fde2bRt9oOUkvZaaappAi5qVVGqVaUpzzvOcc61VKoIRMwAuwbS272LfNf12tU19v/Ld2rkWHSOxmmPvyPEcyDHWmaVEteUkgADFsS1U6+URWGWBBZH3DpH7PjZtG5vWhVaA1MgA1Bs5z9752JAP7AISk4t2lkxkCUuKLocsYGBAtPQEG7L3jqhCa955H9vjfghNJ/MMpbAJlBmxEjuVc7d9FyOjoimKIQI7Ck0w1thGAiD2kotUc6ZMgFAYMpsSBmfmgM2E0CEpIC8ivIAYgENYGCWlaqo0C83ZxmEehvH+fnc6TrUuf4sKptWMlps6IZFDO1O7aMnLn4uRPwXQihIxmoDlvuvAgF1L7AywCbFpN1M9IlKeR0dLTas552JkZSWixq1UxzLsj+WAZDk6QC5TQYiMPKfsh/nyJqhyXK3rNKIXBiiiBjrmFMkhGCKrmpoSOTFN86CSgyMAkHkqxVxwnn3J00LOKGImjgiNAYzZOVMIPlDApm2BwFRAK5h4vxi3c9u3ITQct9V01TWSRkKIHXebi6Zra55lHpxjMKy5zmkEEBVNaWIMwbeoNZcS3QYRzVKI0bBaVSI2MOe4SCLCpu1VpMLQ9fiwd1KUzbFrXKixkaqZfcxFaJq6bUAEZibuwPkW/DTvDQKDfzxMw/T33/7+v12v1oc05zKENuR54ACIbn11s3vQy2dfeQ8P7z+cTjv20anzLqacvAdmCk1fJJvTEEKp7jQMtzfPNt1VznOzjkheTVTNe5fTwRz6uKLqPJILTqQUKXXOCI5iuN6uT/v92/dvm+F48/zF869fvvnpp6sXL8KqffP6/cvbi99+8fWP3//MQIoChj7E4GItCURjpKLnWgzvvNpYSlVJppWsxiZOc8qno7Lvt7dxff3x3et5/0Ftcg5MSdmJR2QhtMCNijRAbCSqSCy6HBIwAWlVq2IiIlJrIfaLZdnMQEVFDM8VwsvR4LKvrqKf4J/5sRQdv3n21RdXL/7kd78LhHk/MQGiheABC8EsecjzBELkHDF7H03ncbjzGNBF1/Sxu02nEwA03UqKqpSqRc2iY8dcxQiMTEUCNVerb/+y+/a/CV/91vdsqFwz1tFOh/3PP82HYx0HF+jiag2ItcxQJ5RcyyR5LuMAJr4LagKsTbsyE/QNmt8fTuaM21VoN0YNYRf6TfADagEp0+nIjoHCfJ+9GMRtuOhbg2Eah3ejJwIjor5mFCjEEJz3gcs8NW3/xW9//9f/6/+v97rZ+vf3H/6bf/5f7fbv+qvNLLC5uAUkUStt3z5/dff9e5jnJgRcdouek5QQueYiMCODc1SkkIGPvloBjG3fA/lcJM+jYs2lqlU1DGEFJ266ht3gXDzt7gCATEMTX7x63l1s3z88Que++/g+IzYXPOXy44fXrx/ef/7Z523rhv2P0ziMRaDzIbQOjjINwdHV6hmAix6q6MPxj8SR+PaqeeZo9fHhXeyj88A9H8vUcu/9pkqt5gQgyeybaM5ODx97C47Xq7CFarkWwAgQNa4hNnmesOYurmoJYNFIMgwA9ZSPZqkmKwLAME330QvRzMTMJnPyoWZJw3zqnIrllE7k+Hh4uL36wtQP47EhRreZ025Mp1Wzic7VeiwJfdOddHc/vr9cPW8dl+lHaj+f8w7QkY+743tP2EXnJBr2qq69vBFKUzrVNDH/smFa9G9RQTKVAshzFiKoUogdgSuq7IOCqqhUK1WdI1Rz6BAxS1YwICYmEKgqQEDIxKxWRMUMEI2J1VSMyHlVJY6ArtYsVTk6raXOYxrn3fERtPz8+qcNTq+eXV9eXT+eHnaHCarGrnt/t0PPoV8d9uMfvvvoCINvnr36zQHeIIdu1e8P43w6ScppHm6fv5jTZOabVQ+K4256/uXzP/2Lb3/8xx+Q6ermqszZoXv54uputxOC3/357z/89NPpeH/cP/TPnzdduPv4sG1XaAiuVx9ILBKz5+jckE+YvFbcbtdzkaZdO8fpNMYQU7XHh32dMztfxSLhs4uVI+jwwtvlf/jf/tNVt1XO19fbWu2Q7fpqbVX3uwOxy7mIoiqmqtRgBGXUy3Xb9u5q3aU0i1mtuaJeta0iTym3qx5cMGREe37zch4/LhVGCNR28f7++PHu8ePdozKejsPFegVATDgOs5QS2rbUOZcSm4iGoOY5gFRz5F3rqQXv+gjzmK62lzE2ninl4py7ut02YZvGEZSjj0gwpaSiLhCFUNTmClgVQOfh9PzF7z6MH5gLaD0d6zTNITS14FhHM2KCzjfDnG62V1OZpmnICVzwRPA47JyPHkmNouvAG4cwTjn2bWROpsSRvQ9NkFJzlloGBqgZfi4fj113se03qxC9TjSQw65b5VwIsEgVUyYMTVNqepoazABAFQHUlGnpWgJRVVHn3AInUjOzhbeCiIREotUWWDWYmTFSlXpO5PzCCIalOAvUiEGfysWfxp/zWfmiQCHS8idLxbIB4OLwAyMmBYQzEP4pnGFPcPin8UjtqdL8HOx4OntHUFUiooW6DWezEhKYLFVoaLawYJ7yP2oenJIrFb9/u1NTEQsxmthwOPz+29uvXl7+9NO7zWZTFVSlpLTZrMwoAWBN6Onq8sYgfPhwPw2n4NicC6vu7u54KuWY62XTtgz73SMAlTozQ982wYfdaWZ0zlF0gIEB/TQlRGUmQlx1fYjNYRxzzVrBea7VzOqqiU0I82lCdM5AAFShjx4gt9ERIub06vnVtN/Tlm+uL5q+PYxzv1ldbC8R3J/+9tsfv393GqZVt56m4zxNS1Ygdk2MUVK9ubhoAm/Xq5tnl6t2++7N4+X15e8+f4VJ/+p//vfDcPjii5d/8+E+9jGneprmQLRerY7HecyDFWq6FjO+e/igNaOhlOq869dr59q3w2nW/LsXLy68e3y80yop16RihIakYM7Hio5QKXjjwCGG4JHwOJ+qifPWdevt8+frm5cE4p0XrWzJ+/Z4fKy15DE/fNj7SF9f93Hd/PBh+MOPb394dx+j492jhSeB8kwberJzLEaOJZyzzNW/EhzhKefIi8T4FBU6j+4LjR0QAJb0x3nv9iRT0jLIG8LCmUcCBUT8hGZUAyJmxCWcC6T46YIBhMVtrgpoyII+ztZnVMVJ8mOa7l10HOAw5LhqXRuT9AqsVgyAQ3PazyF0Fx5rpu52fXXdvX/7fnWxefbiBgBPx2O/6nQqZTg5trBpxbS9vHGrS8Goqum0Z6s6jahiVhyanaDku+H0ANVMai3JNVFVa8kioODLeE7NpDSXkqd5Xq9up+ORWjcj5SmNh9I2rp6OpymhOilTVcARfMyeedAjOs5VGJna3sifHu5EfS7jeDzmeuoubz3RvH9Un7m9WvdrD3IcDr7zyFBys7//sL242KzjrN39NOVS53z47IsvDof75y+2dw+Hv/np7i9/99mXXz5//+7Bdd1Pb99l1K+/eNau4ocP9zULmx2OD37qjF3N+WLbHz6+f/7lN2m+etjtfvjux69/86d3b74fj3N/YVay+c41ndaTygxxDUgmusBQzvr0L1iip/WGcNbnn6JknyJjAE8enU9r9JcF+dRw9mRIOi/ep60+/uprfvITATzRtH79nQAgs5RKwOwa7wIiKlXTAojOUdMEMzJABpqnSWrJSUVAZZHrEdBEfmmSWvZXS72mGRqYERWzIiJGRY2kEoBDDDEaABEbM4CYCiyHC6KqZopmbGDLlSRQGLGaiQmCKQKYEoCqAqhIVq2GamKgBECiYECiasAVFBXVtKIqlYqoPgefFv3fLKAshyWOXUAtaOKQDIpIRU/eMaEa+hDaBr1FRSTXxth1IUbfNkgsVq2KlKpaUs7zPOeUp2EqIqnWUrWI1CpEjIBQzTtS0Fxzqurm0s5Wq9a5mEDTGTrxTbNI1N65Er15r8UBgBari2rsI7KRC+x81WKLIdE57z0jOq8hhth0HFpkTwrIrkrhGBmMvePgiRmZiNzSV78I9IYoC6kaFmCRKaAAGQUO6JoIqqErfs7clqZbp3mCkkBVq9Sc0YQk1zwjgEOKjGympaScYZlJEdg7URGDnIZSiqgRcVULHEVBDVBNRRQLOUC2pZxtSS0aMgCKqKIVkWqYFSti1VzSdDoNp2HORdTYQBnRgNGWjlVFM5WKn0TVJ9n2n0hFLjhijyYmqEBmGVSAsBRVqsJBxYDRgVctDjBGFksCHIITqQqjYQVEUXYAJYFjNDKZCjjlrnWMp8PeVm3WaiWzmoiKgHfsnKtFHGCRIrD47nOthRS0iAKkPDvi4HxOs9VpgbxkScwO1WqpFLyYoDnnHEpRZcdswL4LTFRVmVDVTEstOqdi5S62nYotVPt1t3ahTeNAIG10OWcAEilWjcmlkoPr0RgRc5kZwDHlkomUHZkRuuhdRGB2zMSaCwCIilk+3P399erVfh6ccyaiTTPPsyULLogqqyyozFqEnXOefIPU3ZSUrSQEJwKvf/zjF7/5s/bV5x8+vnfMfdcNpxkcTGVa314P+8MaPgNjH/eH3WBkWisHMFRGEqguUK4FUZcmvNNhf339xce7t02s7NUAUhqYgwigmbI6z1rVYQW0qgLgxCqYFqVu2/PMH9/fB4pXt5cvv3y5e3f0rkF197vy+7/8P797+/+YxuwcKwiYGQgpMHgtWqSiARFWVaWUq3qiEEKesqqMOccuXDy/dWG7u78jGfuGi7ppqqVUA2C3bGVBRVEFwUTy0nyxBJlFcskQpa05l1xCc77hq6ooYlUkYqQi1aqAKojgwu1S+fTMAIDr7rKD9f/h9/8iopPToZoLHDVlHxGnqaYDU2mCd6veeT8NJynF5MRhHZrnVvF0eow+cPMi3f2XcnxEhLhqQ66nx0PKhRpwXVit11rc5We/vfjyn9HNF5df/hb8GqZDeryv6TA97NLjO++YCUKM2GDsA0eUVHxozAVCcwihaVSqaTWRmgfTWSTJPNg0mEFwDrjO+zfl4S37rukuc7sJ6849/xL3D/n+Yz6dupXlXOa3b8NqcNc33unVi62BzI87EwqxkaoiWmuupVyELRmUaUAO/+Lf/Nd/8x/+6vLVn7/993/9/sP7Z68absPpOMRVbFfNdMhk2t88LxDKcXh48/Nq7ajtcq5UljN3d5qG82k3udBEaEnVmqYvSGTmzqovhtWKfBN8q8UUlTxO04QwqnmCANTUEg+P9fWbv59zfb97tHUHLc6noxH2mwtFun/7hh1cbiKCRsZcMpJrQphk2ea2LlyRHS2Pka6AyCrMSaRiv3oWmybrXRr3neuj74YyAzJyMLPT8AChPaWJQgt+VdWt+63mOVCD1GhD+3FMp0ckaXwFx2Peg/XBS5VjTiXXk8rJZq25hIDO+5QGpDzkYiEKypyk7bbM4iPvjveuALHfbF5lcR7D5foynw7oW+ZCpzFgEqOuaS+vt3/4h3+8+Pzry+t/IeMHcbS+eGZ1Tvmg5ly70mz95pa5vbrpTjmrs5qnPCWDSqrNeiVPZQfL3gyZDUy0AhIH1lrZBQQCpFqLqnofkUxq9S4YqJQCSEu/lZ739aALG0VFpWLsjDxAxeVwmCjnGZAVhJlqyUTL8RmnVIbTUOf8+PExp3R1tf765aWm+D//L//ry89ffPv117/77eX7Nz+HbtNerFWtqrrv3t3poVtt39wdH988guuPh+PNs/XV81tP9PBxPwzz/buHi5utCh+O6ZvffUnPys9/+O6bb7+4vrx6+/0PjuxP/+Krn7/7cNjv+j7MWVBlc7m5ebZ9vO/TcTi+f+eV5/3eE3WxlZQYwDvXsDNVNNeE4LrGkIfDhABlHrdt75pw9/Ydk7vaXo5T9iybzSpND1Xqh3dv+qb71//dv1bFd9//8PVvfvfHH1/ffvPPp8cP719/oItY0yCSAHQaEwVsHDDLFy9vQYVQP9w/BudWqz7EqEDcdDESArQhuriWrFLypEdQBDTvtBa9fzx9uD9NqhRsShkIgDwSn9IMpjHwx4/vSslgGoMHwlxMta43a3Xh5uLFODx69JtV83i3u7i+VpXd/b2BxrZf9/0wTFJEVZu+R0fzPBA5As+BJJd5GsgoNuHF5auP+/cC0rRNIFeTOG6nOQvkY5ob3677zWE4AfvHPIHm1dWqa5yJ5GrNZiXqXK1znrNUBXLEs8yieQuNKo2nzNxCNgRADzEEzZkA8og/3h/e3pVnl82zZ2vA2kZXSmEO4zQzUdLqCAFmO5fFw+IERQIyBDBc/peXqYKoqi4jCxEAgoggOAOtooaoALwAzM7m8LNZXMwWfgAvCTRE5LNkQ7iINXb+hQEiiBoAnq1ACEsgzBb+L0Jdvh84i0eiivRLTdDyDvp0QP6kf31ifdjZ0LHQnuFpBiM0XSAai33jzCpWRIfowHzgxvsJ8XicSrWF3y+1mMmf/8k1pPR3f/y+RV9HlWgZhnUb19vV41hW3VoBxzxum+vdbgfk+m4153RKMqVSrDri666VWu6mKToGIDZbdT442h33YNhwJGe5Jkdkll2HDl0ucBiqyNQ0sWujL1XGQoDkeC5CWVYtIyqiNYzIPBcpVrdXLUgNbbj7uHvWN2Yndsxrg8qayngc+me3Q8rrzbaL/TSNbduN+aPWbABFU+wCgDFjleIcffXVt89e3ny8u//sN8+un9/Mp+mv/j//fn4Yb198fnx4PA2HWsvxcHCOTO00TSlnUrgMz0TGh/Eul+qQxGy16ZEJgjvmMTA/296mcfj+eAhEKVdkAuY5ZwHUop2BSCaQro1N2wDYUEYfIEb/2YvfPvvsy37ltZze3e3H0yllY6LGmeRdTkdPFCiGVXy4f3SE6f2HuL786vnlw6RCOM850Kc2TEA454TOdh+DpQkb4EzpBjuHep7GcVsUpk/SqP1T48byiFmm+7PVDmDhmywFfQAIKgtLhj4dap/f8IkXgEtPL8CCERAhW/gsCiZoHNYvKqxBd62TOn4ohwdJCmHjN9ehe1bG+7I/aBlJSxPdKNJ2YZ8mNvrtn3311z+98RRe3mxXTXz1+cvTlG6fXU6H3fBwJ+nURD8NZfvZN37zErmxcdb5EG1GndQyWpmnU9Y6DQMhNoHmlEtOTJSOQ6nZDJF9zlJzKWOuNc/TnEtK4hAtTdXKhKBpGqUCms4pj+Mc4wpU61R98KmWrqGUR/Zg5IFVpz14KulQapimB50zCOmUXOvqPAakeryDiKtV6+LFdNxJ3XVt++qLr+7fvomerwMeDofPXz7/7vufP7x/b4zXTr/6+tl//Nv9f/7+7f/x99/IxXCaH774/Pr1z+830X39xdfDWvfvXq+b9tm2v3+YnO/3jw+XL69Wq21K6eWr2+Ph9PHndzfPf9td3u6PDxe3ryhCUjDwAAha0ARhEaCfzEPnGfUXuQeeFt6vhZtfqZJPL3zKReKne9xZb/plUZ4tQvbLLz6935NxaRkF8Oxa+0U+AgD03gEzMulilFM1E0cA3kVHqjinlOaURbSCGFUVM6QnhZ3I+KxKQbUKCESAdq4ORF6SwVTVDMgMRQXIgSp51uV2zLTQ3plNqaqoajVRVQUVRMdAS+wZ2azUs+/VzDNVVUQ2g1qTLUUNVlSlmqAZomucA+RkNpvW4tKM4tXT3HETpATwWLMrkwsdBjYKtWRmNiArGBw4pkguFfSsrYsEiI65adg771kZjBQApWYp05zyYX86TCkVqTlLVTEQUSYCdIZkJkzMjpkJ2ZNj9JwlzdPI5IgOZtatSJOgiwiIWlDVOareCSICOCZD8jEwo6nqmWhGTOSbtvUBxABq17e+6Si0uIRXiJGcQyAmZPLOIRETo2MgBsAlZrtUyGld9GszJFE08wToXXShJQKBGrsaUipTttpBTSJaxcQUTLEmrVVKLmm2lMdpsipo4Nmjw0k4ZVMrUgqJMJj3Hq16Zls8TVphQS0BEhEhe3ZMi1duqUIFwCoqYKDgDLDWmufTOO6G6ZRFxBCXdOTZJr1cdIR4Lr7ABYHCZ+vTP5GKTE2xMkKpRSUzLy0ECKZScNYU2Jc0OYQQQ50G30RDAHUmprmYJ3ZIwYsZAdQiqBWYMHDsWiQkNB+IAxsaMZec5ynFEOYpAbulopDIm4ojBFTDioS+CYDg/UpETJdzhtz2XQjeagUkNHPkAVCKiman4pBrLU3ToUOtDn1j5NEFUiNqiEvNc4jO+4DGjBC6ENpOVaEKR1el5FJU1HFoYz+Oj6DVt2uCWNLRMyuKocQYwNgQAZ0jNlOy6giB2bc9WAFS59oyFpR913ZFigIQmfME6gjMFEzBqvrlL0GhlMJMznswzgKklQkfPj44/uNv/+Sf3Vw93z8+itamD7lgnjOArbYX1kWVYqSplloFlKU4AtNach6bpll1KynVe2QXCL2BbC6vqpU2hlq1CZ1zZlDBxNKg6JxvxnnveO2pa8J2GB8Q4LDbhzZs1uubbMPDPh1PL3/7u+svXh7uHjeXm7uPD498+PLzr3744TtiyjITYyqJ1CqoZjPExnky9cRVCiKJ6jCNIsUUVrcvbl/ciunjx0dJo9YkWuZ5ZnKhYVH1YGIkqgIiioCkRKJ2Fu3JVKqaUxVDExM1NTVVrSLIaEBgWsxEpIpWsaoLD26JVgDT+ZHwzWdfRludHsd9Kc+urxQtrJ3vMTq1ypJWWlKpiUQMqydZMF6IMxpVcW2zslqYcbXtTsXP4yi1sMO2iYlYHQyDKNLnv/+v7fafXf7JvzpO9/sPP+W7R8ynenzwrQ9dE5ro22iGroltu3btCn1oQgsuGiycPLFaUBNZIqhBi8lc04RaSLVM87jfgWL0NKVDrfthOhg1zfVF3DyneNldxdPdd2WeLNHpcKRwz4eH7uq5sru42U6ex/uDVrFSAvlKqCbjMAEqIfuArvG3zz+/Px6+/vPfF5d8f11lvrq5yqUg8uVqzTXXqYTIQZuh7UZJUtKqbaKLKU/sI4fGqrVtx95RINW0ajvPyI7R3DyObddz43zTpZJUhQhcINeE0G5IwcX16mKThtPu8DilPdTahfZmFbKH/qIZQ0iljkMyJ3G1GQ977Z71m4tu1Z/G+ZRO1q26jufhsVhMeS6ayzyh0zZecAWr8mz76jjuUz2J2Zyq96Eoi0CpIjoICCA4VCq55DR46cI669R3TR1GMC1Fakk6HznSWI8CE3GLJoE6tqmYeB+5gUHuPa1ZKruONSaZShnWzcqHy8a/cC58OP1ArrtsViB0fxo7doh1yg+Ba0R2Slebq7ucW2vEsTdL++Fqc5kPHxywqYMiLOtpvieOROTYlLTM6ry/6DfJ3h5Oj3Me2yZWkblU9v4cnVli2FVDCEAoVQxh6XVqOC6YLwUAsySpmnauMZUlPK8GjhAVnHdGVE1FSyBHiIBUagLA6KNKMdOyTLZmiOTYqSkCEWNFQ2PnHEa6efE81+mPf/sPLeYuus+/+Oru/d3/+Me/6tvQ9c08v5lyMSQxjyG++Prr9x93/XZzvx+221Ufw937d+xD36/Wm6uvv7j98H63XXer3u8Ox/sf3nz51at/+d/+y7vv3z5/duUZj7vx9fd3F/0mjdZ06xC0Ztk+e76/e3j57MWdvjntHuPmCqFFgDzmpm1iDGWcrIhzruvblDOBM8GSizJcX/aewg+v3wTn275RBcc1p/T2wyhpkCq/+fK3u8fd/t3HKuKM7n5+uLl4/vnv/7mr5f/5f/9/j+VuRADvmUqeahN9F4hQhoRlKk3EUokRahHmGmObpkRKbduNp4GyqAEoOIboMHQ+RH77/mG3O2XF/X6oKWuuqCa5TsMEWud5zHmcx4kQfQib/uI0n2Ifbq+vZS4+NrWOno3Zj6m2m1WpdRom5HD97KLOdbffx9iHhi8ut8dDOg7H4NGxc4zjNE5ZvI/PL798OL3JZWJUALhcbRTL4/7I3gGTqXUxeIRp2AGYj02IzlOHppJMS81S0ZvKBGbBATkgRwLler01BSiDkKVURedTSSIWGsgzO/DgGDnGrqulvP5x2B1PL66725vePzy2TdO0Teh6QypSVBX4vDE6a51VlshMruKZVerCfF5yBJ8mX0I+G4hMDYCITNURlKpIrCbLvh3PeQc4A/KW0/Bly3k+p0R4grWcXUVnW8cv0/LS8HzOWiwj0z9Nj53fGPSJwmGESAYLDObp7H35e5w/U/VcmkYIioiIuhxL2zmrwcig4h1su3jI5XGWVJUcZ5E+MBo6ZidY5uKMvnj1hffdMe+/+e2Xj/cfd+Ow6VeBbK5yubl53J3uH/adc8fjeBqOeZ4Q7Pnl5Tjm+48fVeV6taolTXOOwTuiaZwIEJiLVigF0ECFED17UKgobOI911pFoIp6T2paihAQAtZS1z5WVRSJ5GYtzG677X786eceGufiw4eH9QV5Fwg9YVQ5Q7KiYza7vtg+7mF3HJZCMUKopQI4ExXTi+urVbtl5vfv77eXq75pyzT/l7/939HBt//iz9/8+NMPf/+9qpVc2hBj6/KYAHC16lYh1HJ63D1MOUXvHGAIIcRAzJJzHibnmzwO3hGRm3IBA4+YSwmOlUj9mQG06vrVZtutLqpWo/L1q5fO9ftZvnv9YT889M7FbhV8m8eJENrYrW9uNg3fvX/TBLe67De9B9PTcZiFH4+DQvDr9Z9/+6eFK/zVf1omZ6BPJ8vLannKAf2TOfys9ZwFoKcl+mn0X17ziSnzi5XoU3ryEwXpKdq2xJKI+ckpt7zPklqmp6K/JzMKGGgFY3aoYoCOQlfmgjp5PYzjnTpT17mLr0K81VrKmGOoDmyyKqpV6u6w6+Km77Y/vnsbGPJx6rtwfbEuU/bs6zCMDx+PD+9vnl80YdVsX4btbTXW4WTTrvVap8d03LGD+XRI04nQVJ2ZG+eax6QgFcyq5jwjM7lmmJKWKiWras7ZqgDQ4bBLeTYonCyXpBnG0YosnLHMCEVyVeDYjAkZgHKNbShV6pgQsMzGwU15AKGgnA4HqI6iYMONc/X0CK260F9tnx0gD6fqun57eXX38Y6KtETHh7tvPn95d/9YUnp4t/Pd5qLf/vjTu7/5+5//8k+fH/5wdxiGm5vNu9dvGmxefPEljnWeH1aXTc4yzkWL7e8er55/OabI3F7dzLv7D/fvf/z2L/5smkIy9kouBlRzbau6OBqWWOKT++xs7//0k+HTnepXus6veERPiwieTJtLD8CndfbLov2kHy3VZr/6Ir+6Dy6kt+Wd/olQVEVi8Og8MtdaERlMmRkIU06mlouWYrUoKBA5QkNiWNQvVQJzHFSrASDSOZYDaCDn1wM7YiRHDESAqIyoaGJKCkzMnggJGdBq0QqoAAJaVLKqmVRiU0BmhsUBSgjAZgJmtlTGEwMKqoos3QVSpVY1JDYyYkB2BARgYppyrcdDq6JGPSIxE7k8DQTgvEfHAF7VCNBhBagMgEjMru1ZBdCxMTnvkWDxBlarIrXmNA+naUrH47CbswJYFU/snQ/N8twlURWtBlQBENmz98E737jg2DkwK7mmOQONvonOGMl5Hxtzho5jLFLJFBUIFyZ+tSqlZOZoZrGJbd+1IWopUnNsG9dEIAJkNEPneXHfEjIzOQZEdA7JGREYmC0kFBNVOd98DImAyLmAzM5FH4JzJFJCaJrYlCahiZZcclEkAbNSoUxlGudJa7FkUlQRjdAMTESzCggxmkeMzjOAd+wJznA2VZUCTHgm4BgRIIMRGNrSwKzAAKBWi0ABqgo5pdPpdDicTsNcZEHKKdHSuGG2ZCOZ1OwJ+M1MBAZI9Em8dU8bo8JIguhCtJrJN8RmtcQullrRBde0Os/ELIi+jegDMgCBViP2ioRY1SojmRgTuuCQyRwjE7CGdRNXjfMc+85KHcCcWiq5lkIiYOaJDSuwLeZocsBAuYhI9SECUa3FMxE3AKRqwXvvfAhOAWqRWp7iEg58YMDskOs0ah6Q2oKE6Cg0rBKb6INHI1PeXF/5LpY8SklN8NM8LoomoXMUsky+cYECgNV8UM3OMYHPZQi+BUQmBlPnDBGIPbFXICQy0eAiOX8aTj7K5vnNu/dvQ9eVOfsYGJ3VQgjORakVDELbi4JDtLkoSNeE4EJJc2QA8qfD8PDxw+XLL1zT3t996LqWnbdxcKBolaK/ePaZD16LjtMxzTpWdRyAkcKlaDUwIlfyyM4BikIKfY/imzaUVKcyMXrBuUotaWpjN+WTmoAryJJEK2bGENpOQQ+7/Wq92lxevHv9+v1Pf+wvNt16QximtN4Nx1d/+vuPj2+n02wKpZblOZ9LIURDKKCAWg3YTEmqKZpeXVzcXNzOYv/493+oeVg3Tds1tIlZm6ZupEjKOeUkpWoxUGBAI821GkAqGZEM1HNYGIe1llqyF62lFJeRXPDB1AwV6ClfClgVzGCheT7Zq89Xw7ObG8/rgBYIt33QqU6H3SASuHhDjyUERgXJQghSquaEBMQ5zScEhxzKkCpF3zSh7Q73+5oSZInkXOsMLZdMxKcPHxv5+c1hbrZR854qhJbJ+SauXb+NFxfu4ga7FYSo6BDQVMVUdUGbCpiiKViGeW8CKAUoui5ozVqFsNn263za12nwsUeQOg77D3dl2uNq79ZX4eKz1cWL+d0/aNJVbIZ5evz+o6Vh8/IbbBzzOnRdPU2nu6OoMCMAaE2mqVut0lwIYL26OM67P/vnf3b35s2UBiImcFLUz02glvOhQl618TAfXv7m2WEeHt/OyDhbDtGT465rPaID10RnUJgbhzSPYwUtKoxxngCLt3F2EUsZGR0VouSHR9FcxcWm75lwc3XTXa66zcrUg49uu0mgueo4wX6/56AQ0MNYjHa7U357Ty5aUpXZtxBh6XKqBVTREAfnN11oe3YNp0cZShrUWQxdkROW6h2bac5TbElLPex2SBFkjquN6pgKTMJi7AvkPJEZOacyO+/mNPexsZIKMGgIsEJxdZq37ecmF6H6WsCFy5wPJU8gBSwwSgMUK8eR2/VF7G8kfWixlLxDJdQCClzYHeAGX9VTOia9fnF9GO59L4yFqQHrkFaeV5mOTE27buZpaoLv2o4NlKoLvlXvXesC5VyJmzwl+hWrKAQHpqjgGRWwqlaxZHMTA5iRZwPKtoC+RKWKamC/bMMWr+xSQx5Do1qYXK3VVAn5bGlQdeSJoNayOCBqnZyLzvuSCrjYbtw0DPdvP7ClL17dnA7Hn35427BW8n59eb+fjqLG7ubl7c8/3s3Z8u54N8HqavXi8pbevLt/vHNATb9OuSr4w3BUrNr0dw93XXvzr/7lV7t9evthl4bCbl3NffXb37x7/WH3+n7z5apryOq8e9gFCLe33xDRaTp9/rvf/vT3f2+kmkfCoAYutr4NIiVirMUk5UpIgOPpVCW1vtn0/R/+8XtNFcmfDgOQBYNxHGLszEIt8uP3r8cprVbx8rLZvLqqU3EhvPvu3z2/efGv//KfvXl884/f/ZfH3SEVRQy11CHnpuGkzKGtKiGyZ2DvSkmBQtOtYnBixN4lzczNat007NNxPx7Tu4/DYczONR0j2qHmWubMRKY1eppSORx2C0HWMZeq73d3set8bCm0aShUC2t9fnP14+t35iCEwI6Z/ZglFxhOJ0KMvnGhOgdqRXKhuHUM97uPFxeXaBBi2A0/e4+bPk6eG+LdbneqIzFbNZFqTMgwp9H7uO27EL0RT8Oc53rOL/abLjrLCRFjt8aSSqrGdppSyhIck+fV9rrkVMuslmutNWU0EiDneNV1tUjbr0pOP76eD3Ndt3zZYhzEhRz7RnIJjtbX26erYKE8G7ErtSDSchf2zItrQpfpZomSARAiI5/3j6qmYkiOCQEISA1UTcwAwTGfJ2zEp4kIkQieQgoECITLuK2AurRLmS74al3mYjsPSYsjSH91cP4rXck+xTXoPFrZk2Vp8XwAIy3gmCdOiALAp7JoBUNARnR0RntUxTHlKmCICkYIqaoW7Rs+nqaIEDt/qI+WHpmbNIXTrA2hNuHjtPehu47dYdoFoix1tiVR5Tj4v//xJ6vaIbW+HVOd59k5D8inOaMpIQFglhSDC45TLt57qTJPJRkjkWStqkQAIIvVxdSMYKq6BA7SPAKaB/UhCCK7zerm2+FQWocBWGoNjS9auQ2R3TTMD4+H62cXKU/k4OZZf3e6mw/m2dVSHCOYEqJvupvr28MhXXTWx+ZidfPD659qOXSh7//0xXd//Xenx4/OsQc/no6rvnPMFrlWuepa1vzxcACxwKFKXXWt98EAdo8HT0Bg0SGTZinHeUZDR3Y8jB4YkFXVgxpZaMLlswvkdkg1tjHEzbu7lPMBfOPXVzfPvwIwaiJhub26nU/jaUof7/c3m9itLqbD/XR4ZHbNavX85VpF/duPH2Z7d7f7weyzL58vVwEhPBVMIdA5V7bsmXDpt19o6Pi0jPEXAehXa/FXPxai1hkYv2Csgc7ZSgA4m/roSR4gMERYStkWeVU/2Uk+eeqWDR0QMaoKmGO3BtEoo84P6eHn4Xgv0DTPvm5uvynjfnq8C5QL1WKzYqlAouzCCqE/jXYa62rduSYWKLeffaZS29ZNx7s8T23f9auNa676F18YuzIeGhaCoRz2aTpYmdNpHg87AI+IrlmnacinXZmGEDjXmTFKUTVDN9YKJiJ1qlIdSJWc56y+y4KW1YecU5EMWrEUKFWoTuwAtIpUh87YUZ2Jl2JFKKk41065eit42DMygRvnwnzFlIfjfeyhd96Gx+n4/uLm85tXz5rT1f7hvlvZy36123149+P7fCxEGKJ/+fzyD//ww+qSvvzy5n6//25/fHW4evXlq59++v7m6vLm2c2716/XV6urz5/dvxvzPInz2PhpHvJhEJXVy1fDDm+/aNPxYz7dTVPxq5cSNqCu8YGkkkVyftGxl733k0r4pKF8wqXBr9hBv1h8znL5L7+FxXz5dFP7xVn56TW/WpD2i4/irDnhOeH2S3fkspw/1YR7T86Rc+zIuQBVDAxE1KSWUrLUCiYBiM3UQI0UEUgJz/fs5X3tfB627IIMwRiRCcjM1BQQvHNMykRABsQKoEDGXoCIHSG5ICpSiZHMiBTJTEyxigCAUTUMZsjgkWEJrKEhGJKLmpOCCZgC1WIqWA0UREm9R+9j6DokX7Op5GKmhWtCKoizGVQoULGyIZAZmhICcQxBysIUQ3aePHXosnPVxCGaFgZkxaJVVEuqaZacRcQcoCkyMzOTD+Q8gnnnFVEMilqpGdApEnJgH0NsYmwdBx8aRK/VQMBQfAhAjnyI3M2SjYkZTNTEaimqpaSE4AAQyLqu71cbByrIyuxjwxzYRzAyQLPCziMy0ln6QiJ0/sxSUzFRUJNalrMeVVkKtpldjMwuuBDZMSM5bgxATXPJoCK5LPjFKiI51wFO81hqpZKdiYKQZyRkJFYlMwdEAA2zZ2AGR4xgjp0ulwoGQ2fkAJkcLrQBQI8YUAGpLtwuVVVgMci5zNM0jlPKuYqVKmqqip98c4SkAOc2gvMFd+aSLni4fyIVgWmtSsTAyuxNFRG0VgPyxB6cpnnVt6Kp1InJm4kDY7O2CUWN2YhNtQIwIRgakidGQmUSH1hKkpREFLxnxD42Mqai1TtkJltaqRiIseZqkg0s+jO0vGp2TL4hAuLF7kYYgjczQQ4xurC4dGrNSa2WksygigXvAchMgncCIFIYiBCc5+CbEDsObk5TYALPIhlBiZmInWtKzYGjqYGBakVjqwSkZtqvtmisgsxOaiYGckSeEYGM2Ef2zTwewbBZX821xFJ93NRxzy66aEUmBDaryApkZlVNkZGdc3GV8uBcxKRaq5I2fafVvX79lprN9ubl+uK2polYu5UvRXLNVDH4bnXxQgXLm/9iDnHdTHN26Bv2arXWZCaxWbPzRDyc9i9uro+TlqRNt3Whn+dRaiH260047nax6UEJsGRFqGSmOQ3BO0Bq2n44JeT87NXLn374DtGV0ULbb2/W2eDuNG1Wl8eH72NsxqnkMiMxIQEhI6NZFZE6MgJ4h13/7PJWx/nH734+TsfG87rvvfelLJv7qAKiSi44QIQMgEvrTK1ZEXOeqwEaIKEoQhXntJYi9fxDtBqoaEUxBEYFVdPzPahKlbqQ6wFV9RPj/XR/SKdHTRrXjfNyebHuXFsFclLv20BODXzfBcY0nLDr3RrKeJC0Uxkl5XZ9ubnZTqfZKoSuiX1LwR3vD1KkivguXF9f5aS797s1f7jYBiuh36xrxf72trm8ovZKY88ufCosXo6tlwcqoaGpQUUTMAET9A5IwRSkqlZVBjAKrCLcdMiu5qlOCci2z1/pNIwf7+3hQzjuYrN2ptO043hJyIxuf/ehWNtsX3BsuPG+abnthuOQ6phP++1mXWqbrTYXWxPsuyZ9zHk6NTGoKrlGwJq28RyjW6PPRNxf+ClNotV16xA2w3wMFDbPb5ChojTOsWGtNecqbLOUkpPYHELLgQjJkKtK7/u+az3TOB6nkhkxdI3FzofWDD7eH4afP26vLth12DR+LPHycsJooVVfx3GfH4em12az/fLbV1Dq4XCk05HnXa77OdOMeZDKbUPkI+I6XtmU5nqqTlyPDmgus1HWOlaMDtqS05SOLgRSUKt9+5yRPbfo8mm449CFuGVwOnEMIOV0Gh8itWChKilBKXPvQ+db8NsP4zsx5zzNdS8FmnAZm2croMOQQ/Dz9HgbaL3ZmvBwHGwdLzfdPHzI8zjCzBUaCC+3L4775OPFy4vnl5YV98J2mEaXcLVh1wXVsk+PZgjkzBxC0KqeFWB8OOwpBiTEGlR8lZLzbGCh6Z4eBUs6AETUsVMVMHPMgCh2tmZXrd77wA61kmM6Gxck5wRERJxyIqIlYCCSnWMgqlWrVk+On/aBKssRiLFDdlRrCc4POZ2OB672/OJiPO1PeVCSZ69uht3pzfvd+modt3z3sJtTvRuGi+11Z2jDMIxDt+3+x//hf+iQ+u1q8/z53d0DO266cHl1vd/tNhfx5vbFD3/3d7vHu+cvP/vm97/b392r0n43j/Pu9vm1zvlw2ntHF+3Vyy8+u/v53ft3P96+fDYlRoXNevOwfwQjAMHoDNWkiqkZf/fdd9cvLpp1+/Hd22lIt59fXl+v3r19l1Ppur5t+t3+wTk8TsOLl68ej+N4GqY0tq2/eHGNxBLCbpgf3t1fbONlv/78+W06vX/47vt/+3/7v/67//Aff3r7zm+2lE+7u7txqndD8uxeXm42K7/qCS1HF9arTqqMVQrItr1Yhd5cRyE87PfT7iQiQyk+hM26/88/fDhlQWJHjlwI3uc8joeTJ3TR56woQoYpS4j81Wff3t//7KPvVq0U/fi4z2KfvXp2etyXuQx1WPfrMs/g+PryoqYyHYcm+DEd20232WyOx4f1etWvY2CipQ0XoVZQoyGXDFqUrjYXkuo0zbVqH9tAHpBztsM4GKIUaZpOEXIpYYI6CIiKIh8qg81pQsKmdcF5wJhLndMAMud0IE8OnZkh1lLNjD5+OBHANAxIXird3+26vr+9ulytGiRdXWwBoHH2cT8uV4Go0SKUwHnviICETMSyNKs8eR2Wek1EFBUCW9JnAgSwfHwh/BgSg5kZ6hMce2l5UlsSDYaACkaAVcURL7wjfJqFn54J9pQIWj4XnoAxeFafnk7RCZ/sRQhqixHEznM8nqs/YdG5EEGNnjiwy47xnB0CVFACQBEDiV2rBgrEaKLaEDG6VASR2OBitcrzwOyHuWy7uPLw+PG7JsSL9kLA+fbi+ur57vHjfNrnUo+nYXc8VfRS4N3r196sIWodl5JFNTjvXCilMEATI1lFBEdeEavYpmujD3rOYNBcpaKF6GpWFXNkCOgIiN2ocppz730gT2SEuGpdBTvMub28nG0EGYbTibr+YrOdp2Q5Xb+6vhc7nUbXhCZGKfry8+vGubvh5AjHWhxgGUcfuOvbh8e7q9sXl7dXvsLPr3+aplPfN17D7mEn47Fb8ZiC7qfrqyuVkqtC22zXlxuG19/9AyH0fTyepjbEEBrVmoa578I4T+tNP48lzamAIkAbQprGwF5ES6re281F9/zF5WbVmRK4Znv1cq7TbpxVsWubLKCYi8A0F54STJN32PX++mo1zY3kXF3YXr0KGOechwlmscvr/rpOerDkbEzlx+/f/UrcOQ/s8GlgX+jUn8TJZanBEtv51Qx/9gDhJyvI+YPwq7n9Ses5S0yf/CJwntMXewcTyjJsnwd9XaY5NFs4G4SADgjQzBlyKZllj+Md6+MoD6710b3i7SvRgvWA6ZGgnubxdNznLCruNGShsGrXbx6OwwybliSlL3/3Bbnmbnf/9UWsJRvV62dXCn51dVurOc0kQ54GmA55OEgdht2dWiF0iFiLnca7PA1YUs0jWMilMFEpVaUSmlFA0DqdUi4CXLOMk0jSAmam+4fJoUtzddQNWZfDVQACBVBQzsLipCKzokbimqTWGQxBpZ6OourblXNeysxMzAGlmmZC7JzN9z8PoetvvtnyM+fiw92b1aq5vW1fv5sOw+Q5ng6HZ1eb/WnXxP6zZ6u3d/bXP7z/7/75i5v1ZjqMut6u1+t3P/9489nt9dX69Aj3j1MC163XKU11fFi1bqqrdJD+8lnR9PDmzfNvb5UichBjJDJT5xs9+xzPOKonMAp8Ssza2XL55EP79Yr8J789W5LsjMb+lc3o/Fb/ZE3ip3d4eveF5Hb+gvhPvs7yg5BFjRG8dwaQpdalsyuXWg2MmaOhAhYDWdQirUqEoMbkpJYlK8nolxs+PnUgnJlzgEVRzBl69sZMjEK4+OxEzcgUQAmNmZWxMhZQJAQCMTVVETMRAgOqxEFMl00WIVcFYzAmdE4VJVepVUVzyhVQzLJqbAg8OHDcbLGNBCpoYlgbPyt5VRZlsmmcSpldjBo9uQBWiZbgUUZicJ68d+yVEFRMRM1phVJr0SXqAezbjrvAXVezybkbzpxfBm9i55nRBUESEc1SpaigKgCw99FxdL5xzhGRqqHqEmPyISJx4M6YTQVETCyneUoTKxiJVvXe+ab1IThEwmTgfGzYR0BCYlAlF1QFiRevkDEis5E3AFz8WWaqKmpP3iIEdhxiUHIBnffsPBITYXBkSzEXYpXKxB4ADUues4k4hxRM2WNgT533S+yLFDFlEPGE3mF0TKTswBaHLyiSIzJiBiJyvPxrAZ0dR2CGBqqyVHaImqBLakPKp3EahrlUUKVqaMuZEJqqECIQqQgyI4GJPLk0cRkEiP9pAA2AvCNkzrmaIUdfSiFwKh6EtJqn2SKCzW3kpvW1JJMK6hiNHYolRixqItXAQhPVhARJkUHJzKTmlJrYaK2L4cgTZjNZbmFMYuYgMgXyZISidU5TFSUiKSV4IqZaaogxdCtkMs/esZrmnBZMXvDB+ZDy5HxrNeUymVLOyUdbGGBNf+FdaJoQ2sjsfdOIaSBi0CmPiBBiAwCIDAhoSkTk2zJPaMIoMXogssX7C46QmDyILRcME5IhcHB+vSx25JrTKTqmMnR+c6h7Bu+wVnJqpiiAENuoILHxqQiyiy6oZkLG6LxzuRREDdiPh/3u7v2q77vGZXa1UGzWh/t9SsW1UQ19t94AI8qH1z9RtdkEwasiETchVqhN24lVJLJcvOTL1fa0Gxz6dtOFrjs87qfhoGUmbuacTdRXCu3GAJnKMJ8MW6l4SnZx+fzu/m3Fev382XwaDbiqNb3brsKbHx++/OJf3r+5T+mU5zmbeo9SC7AHBGU0Nd+6VRvW6+0sePjwYTodybDzXk3HcSDGqqCiBpNJVQN0vkpVFWYnCgYWg9eUHTtHvogy8XKiZEqItGzKRbRWKVW8KgnKE0e0ioqoqJ7bjs3MlMnTUwBtnE4BaHW5GcbpsJ9P9/nLz55dXt+EVXQhMivXWeY5I7p1O89ZDSBGK0Zd67duPqUyolUnKWkhH9tiRjGQYtnPzhw4/PJP/yTZ1c8/vNvWdP3Zs/D8FV19KW5t7AXJTBeGIxmYKqqYGVoBMDRBnRgEpMAZASkGAoR2rjJfhnwGYmSHTUeywjDU8aB1Ruq9ZoSx7H/mHL3X9bY5DQNRvL5cD6dj3T0q8OrZS0Ssau3Wc1zlFLtulefM5E/DfbPeMFPNx9hs6rjzPrRdWzJKmlebfkqPjEo+O5Sac9hcBQhvP/z08pvf7d7dpcN0OMwuQr/pxuHUsKtSEKDMCZBj6Nl3vrkQgKZbI4UiaZx3MubAXdO+evnqmY95PD68252aPIcm9I3bbjpDJJYqw/T+MB1+xKaJ7WaF1EQZzH744Y9G1ka/2dw2m+v+8roetZxS4NWYdmRGYCHGNTe7+/lmvfGOskGSMtYEKFIrQvC+zXmqNQUXCByD27aX5G69c+PhSAwogFmRZmLvkLTMpRTEdalByaeUBbiPF6Mosve0pjD74BEyYl3FRuvAtM1ye5p+Bg+u9R9PH1siUKqSyjAIOmZRhm1/dSoG0d/JRCt8t/9hwmPn3M3VRT5Y03qQeZofwfXeNdM0QDXQnFJG7rvVhWqZ0kk1pTxnUU+dCgggc+RgteanDTyKqWfywIvbwfsgInA+/gIVJQSrJiZ9G5xZsZKlIhoSGpIjUg6q2awoaK2VyQXXgolKWShh3nMp1ZEHAjB0HFXQak055ZqdGgq/u7v7+cPr/XC8XPeu5qzWXWx24zyPQylVDY/7NB7fplLW63Y8pEeC33z11XF3//HD3X6cL6+u2ejh7uF42jkOp+Pb/Yef121bq/z1//6HD+8//u6bz6mz9WV3/zAoFSKtkm+e3779+ceX336FWk/vP2z7cHG93j1MVeizz55//LiLnXfEXFOZhs2qOZyStt1UrAU317lbN9tVe9of7z/s1HnNdZ/3qabHw7Bqu/cf7k7j6EL48tsvHZRu3b99e//2hw/OilRJDcW++X/9T//Tl68+/zf/6i/evf/x1cvrYZAfv/vJAYiAqXpPzjeAje/6U9q3jl11pbrQhrbrIuOq6Y6nYR4e7989iMWGupJPBL7m+ShyvzueUkZPZRKshRyfxrGqGrtkeMj1s9vrvu8f7g6b9dXbt69R5269BoAseX8YVImYJ0mNj9smvrz6zd//w3/utm3XN2+P+9sXz97d3VW1hsNueGQGh0EzqyaVuu7akgoojIdBzbJY6zqZS0ozmK66rqqNU0Zb+DmIjMhcahnzHIizFsJKSEXMagZAR8CIUkettRbNKkSemJm9GIu1TMmBYmCzsV3HWoqUyaM6hCp+2tXXx7Hpom9Cu5sRLY9j3/tPV8Gyk0+SGGhJSi57LDtnZM4RNCQ0garGtLgwzgfVCmCGy4zxNDAvMIFz7dknB8Zi/SGiWmXheJ3nczVatnHL1u3p7HvJ9ZxxGucBWxfhdQlTP3UKfUpuLIcNtJiFFho3GCwNJwjAjJ+aQAkRAOWpBGgZ55ioKk5FSXIWFT2H48S0kq6acNXH/WG83LiL55vhlNpVEz2LaNuu1l3/9vDQXT5To7lk34YKMuYUYleLvf3whg1azyB5LlVKjSEA4ZzG1vvInHMBrY6XyBUZgpE7Vqg5iyHUyoAVWKoZoWevailXx4xmgpgBvUHLTAxShUVFUdN88SIUzRfd7bTbF5X94eO6v2zb9vH+Pvpu1cd6mgV8lrIfpsDtfJgIjYiJmFQPu9PFzfXzm2slQ8L7w+5u9/G3X/+m1Pr6/vt3b3/qVn6u9TRObewu+y6XOWl58erLaRjffffHMc1IOKe0+HbHaTAVAJ6zIcZxlDLXXEwQBSCV0rogtYrp5ba/vdyWaXjz/vHdx0NkqlV9+CnnJAib9bpp/VE0QevCuus2TdtpkFSS1FLL1Pg4Vd09vomX6/VFWKE7DvLwsPvw9hDJrfsIh70klE+DtYHZAuRawsKLeqP4i3sDEJahHz/hZZ6Ey7NpDgDpU5XV8okLGfuccDQ0WBRLAkA48zIAkZAWvYkW5AiCqS4lhQiICmjGRJ+6BM9+OTHvA9dc5wPV8XQ8Onez/uzPxlRgfiTNga1anctJqqASioFY267QYYXKpFfrVbHTzbYHSJuLFaQ6jwdiS/N88/w2dsEx1GEH8x3pPB2P8zik4VRK9iECBKs27ne5ZDORXNKUEhfvaEiPzFFyRlRlIcZSJBdRgHEuUknIEkJKGpABdM7GUEpFUJtn9QGWCgKcqucCxAqhTpCt1iqxpVXfpGmYCxqCDxWQcDogIzku44gAPsZm1dSSj+/e6zx028vLzUUTru9+PMTbC9fyD29OZUrgKjuwMs2HvA3N4Hk8pTfvdp9dbYbx3bv39zfPrkLVh7dvXr26uH3RHmaY30wlMa85371z3x7bzY3428f3/fbm2Y9//JuL28/jxRVKxprBR9esAImZQZSAYPlfhifz2i9ijT0FFwk/CZXn9Nkvrzl7y0yfLG3/VBqCp+LJX7QfOyfe9JPStNwGf8kzLl1ln0xJSBi8J4QyT0VLmqtUAUGzAGCEiEyqagiBndZCKAymVXQRwQj1STQV1bMlxYyQFQQBq2oWSkVSQW0cApiJqhEhCFqeUdm0Ai0CEzsXnQOpWpGWOJSqommVTIhSJkQCYWAk55U8LNLTAu7RUosW0QJUBEVqBdJUKo9r78mF5qJvVmsJDThvYFESWp1KUoLoEITqnDwamnhEUHXsUAmQAXBBMDlEI6wCRKyqVWuqYqoE5H3n2WHTqyapVc1qlSyWRAkRwJiIGJ1zgI1FlVxUAQ1AikpG74F0QU0TsmNPiEwO0dh59ojeg6HVKqXUquwEkVCqOnPOsffkPRoQC4EFF3jxDSkYkSk47wHPYFxkBmYkBlHUBWhSVaoqiJgaEQesymRNGwCZPSOSc0yIVqvUWnMRKaqGi18HFMy0FBNj5/rNum1izTNYVZGUEqgRsBMLjgmVWZedBDsyrQQAaEyApI6NeWkrIVr4SaCmIoimhuQFQIznKlOp0zykaTTVWm0uUkSrqi5GhKdwOxGpmS7EKDBCElMi+rV0epaKQghIxAQcWQUcOQMFkL5dTdNAhI6Yib3vQavW6omAQhOjSAWyKkLgPAZgAAbn/JKu9N6bEig1vmlj08bIzG0TzRBqJbJpHl0wQGDwCOxi8D5InV0IaZ7GIdVSitWUJnLISCVZzsJMaXTOsw8uhBBilFKKMDvvyJkosG8IDCW4QI7Zcxu62MY2tm3b5LoA1SA4X6ulOTkXEdGHAGpihmZKDhHJgydSYVDNc1YTYiZkFRdCJ3lmx03rgdX5iIQGCmbEzrdr5mygtdbxMIa2a5qmVnEQgqkIkXNgVqXG1iFi06xKqSrmuVVD5yI1pOMMKial7fp5nN69efP88y+ib7TmWqDd3GKZak0lCaD3cXP9/KvxNAzjw5b7Uryh05JW/arMKZdKjBzZXHfYnS6eXUTPYCaiPsSvv/3Ttz/+eLx7h5gdETPXkmo6kiOx0oeNd/1Yh5ymw3C6efbi8HA3z1Ma52KixVzEZr15+eLq8d19F9cpDZ4JDYGWJl/MJauYDzGExoDevL+vqq5qbIIPrqREAEZkIOxdbH0RkAIMJAqgSI4JiBFEK5g5BGKuYJ7dsso/XS+1Fi5ZtQKYgooqG4EaEJqhqkoVU1U9ny4sTRznPTjAV3/yeT0qEV3wtXd9G/r58fWPP/7h3e7Bol+78HLTPdtebDe9QYnRqZIq+v4asIJrXCTiMO/uDO8J0ZkwlOair76ICTkqJe+nsX/x299//pv5/eHuh8eXt3+qGsAARIiViECX9VcRFBGACcyBKigAB7UKBoikIiZiYogOQAAVfURlrVVFQAldY1YgQuxWdTpJOgWokgCmlMZZHJka5JpzoaaP4vNpHusdArQ3z4VJajU0Dm7dbvbHwaAJbcfU9k03HiDjtLp4Nhx2WUxRRetpmpFwKlO33RzGEbkph73mEVRjs1I9kQOByRuZGLkgJr5pVMAUgEhBzRDy7EIsw5GYm75tN1tin4aye7h/HO+7jj/74nd/8Zt/fXjzw+HxXsahYbe66K9ubj/c7VZrzjSD04/vvje3iv3qanvbrftp3A2nD2U+HA971/VWJw7CjnpxjXNMBil11NxcfoWkx/kh+zrMpyEPffRs3nMk8o6FcUYMUBHBN+3GVMtwsDo3XZOwE0EROaQj+m30XCs0rgMMSDzbDGiiGQF382PXWOtXJqmU4shnwDRMXf/s2dX1mN4jmNS5bcPD44fGd1UyAh2H6aJvuJovFnNVOT5M+3W/7jtvcEoD/ni4dw376E5DHsfiPJoXsOKYFFwgL8IO3TQfi2gxjW0vQy5zBixFTgyBiPJ87n4iomUvA2a2JFgNEKBIYvKIwMyROEsFgFpq1opopebgWkBkRFFRzbCsVHSOafEWgRkzKQA7riLVdJkUkCiXU+u3YlTJ4mp7//bu3Q8//sf/9L99/c3X33z9TfB6/9ObwzB/3B0PQ1KRMiXvyHl/PA0AqJ3/4ovbn35+uz9N3lFom+PjjLpft93V7UWI5AA+fHiYJtQqrQ/Pbi/Gw+H19z/Hzl/cPltHf7rfr5recv3uH/7x2fV12h2bzuUB9g8HVqDYG3qp0He9i9yE4BBykVUT7959HPcP337xJ2/fvln1vVp+eLwvpwIVu1U3HY+73WlzsW6ZrVbVernpJatNZXd8/Ie/+bv7+wdG6mIEBcxZK1xsnzlpcK7b4J5/8dWNu9xW/eHn7/ymd9FqKnkY7+9GzP1XL7aXm6ZvGzWcyhywz/P+3eNbUdMigZmbOB1O+bTfbtfIGNuw6n3MdRbhwCULIjYuzMOp9eHy4vrlTWTUJHJxuZFSPSO44GPz8P4h5ZG8u9pcPdw/pFRaFyLG4/7RSiKI05QdhYeHx3Gc+665ubz86f1Pt9vrOuXxNDJZt2qTYip23D26wOQYzBTZENEHD1xUxzmlKn3fNT448p4RmXNKbeM9oUn1sU8px8hVKc0TgWmt43hkIq0l5dm5CAS1JERS8o5QBWLTGo1tXFlFwAa4VaySU8nqvYMp5Rke336IfVdyPrWfDs/OUy4jOSRRUwUBZULTZUcFeEYaPQW/EBfgqcJCgz7vEIlwqXYFs08RnU+wFgMzNWL8xKVeGpVNFWDpncEFIKymqkZPAzo9fS58Kg8/j/JPjKKnYQsACIDO2Ngn1AcCAdqv8xgICCC6HM8/AZWWfChBBhyLIlhVazznuiC6lcF6j3mcdqk4v+WDRA7jMTU3a6IYuvb9/r7pO+R4PJ2q2Jynjx/urVQCeHzztgN1TChqVR1z0zSqVkpdBd84QoAsCoRGYGIiQgCZ5jEBAQhYt+4utpvHUypp/rh/ZB8JKQTHjmo1VFPHQ1Ei3vigmogJTVGqDqd18JLr1c3l/W6qWeCC1PtN3z++f+z6nqD1we/u9/xI7Dn2rSRFr+wxlRyb5mLd53nA2I27/TBMX7z6qsz6w/c/HHcPFxdXh8eH/f10e/ns6nK9+3DvXXvRXcA8P/z0Yx7Hy4v+OMyMyM4xc3TOiEO/SbmSVqsFGSFTqQqlKsJUq5XSN4GQX9/vJGUwi96tGiKENE+EWAU+nh5Fpb9arzY+4JR30zAd2n617VfjdHgsxbt0ub3wtaRTqlBCoIvNJYs8Pu6LyKzzxXbzuJ/8UwPachWo2nlsQAM8k9/1aTEvlJknHjXYU6poAU5/Wu/nTNAy2P/CgrGz/wOf0mxkZAhwlquepFTAZStJbAt/FRENEOgJ7WVnXUvREEyKysQkJUkXb2N/XbkpcsR81HmoWsbxpJqNzDFXKH3bbFf9dx8+Hqdx1bZzOb14tmmd34+Hzc2zeZ5MIca4Cm3ftlpzGt9BPrIdLE2nh/en/Z4pIvtaGAGGx8c0H4sKkqWhnE5z8OxJnHegMueqkrFBqk6SpLkighQ1I8c85YkNazYDJcVSpQghaCGQLGrQMGIxY8CG4fwURsTg1I2FhuSmSlVkqnPX6+XK55S8a9FSBQSDLNbF1bq5LMf9bv+OVzc3z181r169f/N67evVhZ14DhFU8otnl4fx5Ey+uIg/1bKf0ufUrLvu8ZjyOG827eHj/WPch2erF7frfOK7x/vm5iJNk83Jb6LzDcd+muHm+pvjw6672PWXt1Cza1vJhMBmwsxLkvYXw9knzegsEi06usKTofJJSLJf5dDs/MJz1f2nPwX7xCE6+9eWe92ndNvZrbl8ITu7Mn9lenu6AAhJTEoxkTrPsyqe/SMAxooAgMpMQM70XMLHgAs+uIqc3ZoqBiCqDMuzwxaDASIBYlUpIlVYFQwIgQFRtCKgEUitqFTOF9oCAicidkzGnElQTLSo1Cpn6JyhkQMKXqhhjmTI6MgIjdSqmlbTolxrVUNTAYDsoGmipROvehc49B0Te02gqeSgZkVVVJFIIMfK6J1D4hBc5KoG7Gx5/J0PKtjAjEDE7HzjACUwxtC0qs7DIl1JEHW5iKiKGJKaEZj3jtjAYalSZWlGETXFM9QIaZHoiAGBHLvA6Bi9N8NFdnZeSDIiKAB5IkT2wYCQkJ1nQvIBnF98w3COLy72HAEwIAJiPT+LRaqISCkiomCsJgrO0AGDIyBGXsQbAlXNIiWVlBMCMrsn3pqiiI/xLG1GJ3OS5GoaJJudSxLYOSAiJsf4dFOEZQ9NCOQcsvPkHLAz9oIIwLikwkzRCJDApComxanqOM8lTUTGTAZ4xpeY2pJkX74TUzhnMJegDqkqwbl645Pset4Y8dLewcrMYJhT8o7B1VM+dLHVXNFgnEvLIboGrHpPrmFHRoGJgKRzBGKCCEbLUoUQuI0xRr/uu67pPDgSbWKDgilndqFtV4yMDADVs0OmWqzmErzXKm276lbXIpLmqZRJTU0kTVkVwCAnybnEymWakyPnuGlaK8WInY+lJKkpRFaw0F70m22Mq0DIhEXMh8gcyLk8jcTs0HHbELoquWr2PojW2DWEqFaVjEOX0xSbBsCklpwz+8aHwAyaZemoA2QXGgCPJgozsonBans1Ho8gGLw3cVIKozlCQkdIokrES6uiJ5dlBh9iuwUiE6wlt6u+5FkrxtimPY+ncTyO26ubrg+iPCW9vLo6Hu4Z1TexFPDtxe1nX8DPc9vw4ZhqLd43hM4FS6m60MlcjU0JUOduTe12ezzmeTx1q+75F7f92n//h79FUGWaa/WaZJj71eWUD9pWAyh5rIe3FJ/1V9ePb18DmY8wHNJh9zHV3G1v/EVoLy8U5/2YsQgizWkiDIrousaIawGd1alvWpaaSq1VCqr6JrB37ByyUwMH5ENEslwKIIrUMuWStdTKIoCYUpJaDdDMiKKZmFEVCcuuQcRUTKrWrKgKHhVVQaqoqImqVDUBFSZGwk+uonfvBx3k/v7tar2qoqB4FSgE9/WXN1OWjz+9/8ObP/wEw9efv2wdry82sVmXKe3u3oYYY9eydwrUuJZ6V1S7VUBbPU47JH9x8wyo7B+G6e2dTX+z+fY3/faS26taOag6NilJU1FFK5UZkY3IABnVTLOBLR7Is8QlGZfLgAIhmoBqBSAQAmOmCEaSBQAZvVqhdm2h4dU2P35oqMcy1bQ3K4CQRLjWQLxeNXPKpzc/Bk/981eputi4aZzBct/GechN37DjeTy59aqLrg7H7VU4HN7H2JY6EK1EJHKIoR1qLeKz+K9urnfvjt12hY0LXbveXui4M1M19BQM2ByjEhE7pCKnND7IHK0qu5AG1276dn253lxsL0LTrkraPf704R/+9ocvPn/2xW+/EJ3AQkr5YTc03XYY89XtFTbQdBdVMjDvTu+4XV0/+6zfXJ3G7IajcankzMo8j6qlbTovDii+unqZc69WDlUFMktZN9FqKXn27BDRcuFqXXe9Dt1x2gtYLRMjiI0mLktB7hTcxeW3j8dhnN8h1TlNwfcMa+/buYwAhbxPVoa6c4bBs1kSM0WD4A6ndxxvV90ql6FMU101jizGYA7nVFx05CnGZp5HZldKabpVHpOLssvjtrnOKWNhq1WNKlBkQDg2WIPvhlNx3AXsIWcSq7UIindRRAnnzl8ccs5aSrYYzs+CUgqyZ+dqSWZA6EyVCBkYjRbKWAUMMeZaswqIMjtPrZlqNWBHzhF5MC2lEhMiOHJmCAbe+aIy5xRjZESRzESiQhSICIhKGhGVPLYX/X//b//t4XB8/eb1x/cfdZ5D52vN5JQQqrMpz3UaFzT2/ePpYTe0TfPs5fPj43BxvX3x5SaQm+fZiB7uD97b6qJv+gtPggauaT//9jd52EfTd999/+Vvv3l2fTUOU2w7PcrDw67t2pffvCol37/7cA0FmuO64zTmaZqunl0dhsPVaps07w/3u8Ohjc3du7fzcSDB4zh98dXzD7uDFeDpMA3j5eWz425QknXfEEJsaS7j658+PH68M5OL9QUxPn64X627x7vHqU6bq+sPp9GTO07p4+nvOfP/6f/y58//uPp3f/W/1JS0IsUOQCx4czH27Wk8tW13vdkchuM4HVvw7KJSyjJpHltHzz977ht1rfuHf3xHUBuCcaoNMTGUNFREAY79VdNti8qcxtPusN1cHPfH9dXGOzcMpyKFjIg8GkxDChT61WYekqPar/vAnFOyoioWOF5vtnfv3/UuRI6CMh5PznsjfDwOjKjgiFjEQghsdBqGKgoO5zq3bf/Nq+cECCjDYaoVaxKtFZnmkhy7w/4EgMRcqqoKMZkhEenic3BuzLOq9CEiUgwXVF0yAYmRN3k6mRQkK3NlpyF6ZPMoeRqc57bxaqOPeBqPTwMCqsJSSQaIRKCijp2pMPEynJoCIck5O7ZMLYoIFRSXoJkBIKqhmYo9JSrMmGlpNxNdPEhIyzYOzmk1j0iEokaIqmoAakZEgIKf8L9nwwaQLafWZ/XpbA5aTiHtafD6xABe7CDnmNqnmcoMzrkzQjgzC2xBFIMgzqrZLAt4osY5ZuocKyKLrWL86sXNu7cfb663w1Tz68PG4ZdfXfoQQwgQyFEIm8ushMYpy243iqACvXn/BlQ8omOUWrsY0URREWEVfOu5Vj0MU65Agc2slroK7i++evXjw/2Y85wEPGtWd5qvt5sXz/5EHf7D99+9e/PRO5ZqqBoRUxViTqUkRjFrYwNenaN5nDevrlLKxeDZl183rsuZaBieXV1uL7sx7br2or/s73cfSVJAfX578Y8/nrbri1xgdzq9ur1dtet3d4/f/Nln035cd17r+OOPr9++/unVzUvw8sPbH15ev9ps11W12ebrvp+m4cPbN2UuTbvd7x5KBXCenG+aponB9+sEdHvTjQ/3BPV4yNN+KkUBzZMLDlfrdRF5PJ7MoHEuBGemczECc+x8YG9UlOaKx8FK2V1t2xgjeL8/PJS+miIKOsIE0q4uNKeaDvuPH7fZ2q7dwmYaxvc/39X1hj1/YrwsffbLHA5mC2XlqarqV+P32YfxpFbaJ7vbMmjoWSGys86z6Jl2XnjnL3V2ezzBhRc+19m+hOAMl8o1AGMkOp/vodrZN4KAS18Kk6s1RcRcbf7/c/Vnz5IcaZYn9i262OLud40FSORWmZVV3dMyJIdCoQhfKHzhA/9oLjIjMiNCzlQP2dPVtWQmMrEEEHE338xMVb+FD+Y3gJorgggA4e73QqCmZt/Rc35nLhBv4/3fHOUxRkiJD08HqVMrMzkwB61W2xLCDlqtZUKHHOJ0nsfhCzeotSXH8zw3abu46Tc7oqBlsvOxizKfXvYPD/N5CiE4JucEQKfD4+n0IqU0g7rMon5cfFDsWJJ6k4KI6AqliM3WRATcWhVTcKuNgJqRFWUGNJCqxbAP2MBdSYxBPbGBm88StKrOzNhvtiHA4XB+Oi7Vg4o50FW1TFDahO4h5mwdR2oqR/UxD6jRZZqfPh6hDZu7N2/uwicuPcg0L6VuxtSFOC2wTPP9zXbfhk/Hw4/70/3d9jhLmcsUnPt+af78eNzcDTdvh4fjAxI5hU/fP9//oltEbr/86uOf//j7f/f3//LHf7xTXfVqg0B8ERgv4av1hB0v0uCrbI0rUB0uKKzPvWb/Jkr2cyEIf9oTPxvYfhKJXg1wr4V6P5eDXr/1GoFzd1hhvq/2pdaqFa9NkdCUkINaIMQYw+o0BTd04JBEBZsBJHVbKwKA2MzWinsAWlc1gTnCSui69K0RinsRr806vuzCFw+VgQOIKhKD63ohEQCBRUQIBBqKihOo2zpXKlxgkd4UCIwsiIFocGanAFylgZOZKIC6m5iTzOcZw2HgDDF15KmjbrjOYUO8ayqi5rWpNK3KRAJEimnoKEZw4BQMCAFQNDF6ABITEUYiZiavrYm5ugoEQo95iDGxg9QSzFNnaiqluoq4Ihp5CyEEztFVmrrDpZdIa+QQGZkZLqArWI/UcS05Qow5C6plM7Baiplf3DeAITADIIQQQ4gdUHZkWPnNxKuT16QSOiCvd1F3e82cObiDqoiYgTkip8wJyQlhPUNqKrU1aQ4Yui4yR44BwaWZSUOMOadaFgxJlnn2E6jogmbGDoEC4brFKmHgQKsZk2MCl0hExEyAIWIIFDvkiK7k6qIOBsjuDOiOVJoUtariaoHYyBFR3cVsNaGBrWFeNAQAisTivj7WiCmvFi+ztQTt30hFiNwPvWkJIdciCrwZ34i+OIFXUZXUJUwJgShAZApchj4TuViLFFJkVxXHEJgiBUYHTzl0fdoMuc+ZEDkFzqmZkik4oGHgEMaxlMUNbLVl23pv4E23nZYZAIlizD6MA4VgpoDuRDoXac1EAI1B1849bEvE4AZAEnPKOXKA0A1huOqGDQPGSGAmDt0wIkApJXXZwJE6Ql+7figEiiFydBVmVg0i5OAcAUncBBByiIARyAMy8AAoqsqAQGsAqhADUnRlAw5db2ISnPrslgKigmpTl1WnCLU1A8FoETJScMI8ZGmWh0FJ7SBq7kRx3MQuPz9/pMDj1X0adgVmhxYiN1EAwRDNIfXb8ep+Ph37vhF3x3OblhO4bHY356W8ffPl89PXIXVA1ppmq++/+nL/+DAdDgRwc3/L9O//8i//XMoxpsRqQCQqKeSX/cP26j730Q2ODz/6jWxurs+HT58+fj8Mu1oNTkdMXb4eN+/vHp++67pU29nNuxBDjMgxZMY0tEWm88JupDORxsgYg3sSDA6xGaEzgDKRq6NodKxN3I05NBZvuOLrQoyGWEWJL/WobEzrAwOQOzZRUnNfL3EDRxW9nDKsfhz3nx5WXu8fh+M5Qfz13/xW3ZT4+nr38t2fq7pPRzjL73/xfnv9t9vNgN4Sg7WpzTP36eqLL2o91nqQ037c3iOJG9XzEzqgG5qYeVNorYzbcRw34Mvpmz9B/6a7LUJ8nA5X73+jCDlHioQxeS1yPhNU1SUQuDuF4ADuihTMDdFVGlIEQnMDIoTukjpg5ghakSN5rW2ZOLE5kJFDH7v3alssR21SlxcPgVJUoKUsqY+5I7b48uNfq1p//VUY87Ddqhmp9Rs8nY4hxtiTIvbj7uiqi2Aa1QygU8i5CwihthqAOSUZu3073dxsjy9PnCgNQyXr8iCnc847kzM4zFoBpGc2FyQIsSdmj9BEQaudvNVKGDhEU0sB8/bqb+9u90/ff3P6Ybza3v/yN3EYDfNwc3sT+nmaptOePJCd2jJHwflwAkcatte37/j4/PDwjQE4JgKt5z02jtQb7x5bvB6vop55cfTGqBhTAzRrihxDdOn7PEboWi3gcC6TS9304ybuqoj6skzL9fYL9BT0lHiQ4A25tLOVU7K+xxikzMseA4pAXay7vt/1N1M9VVsaNKeuuoxX7/z5uyZqTsFzdALy0/kBw5ho56D70ycPbJ546Jf50GHnLdQNDG/vI3Jd9l7nNFJpU5MSKZATx81SRe08cqKOaPFMNB33AYODzuVQpSxVQaEbNpd7Aa3PS4bE+PrEJNKIOHBEF1Uz83NtikgUwaS1ltJakupA5Agco2t1BaagWqvpmhUFRXEHZjEDQOZg2kSMmaYym2mfE3HA3eCmf/3Xbx3afHg5HWdpi54PdZpiZgNo2uZSHWm73ZA5IN6/uXOgzc39L3/Zn/cPDlrnxR0Oh4lMzVDBu47HGBPj8STnp09DZCD67d/97fFwKOepVtve3N++/8WP3/15enjAGIbdiATT6Tg9tZurTW3SRFUwUjw878tS7m7eTXWpFZ9l7nKclglM/uWf/9wcc97Q0tD0u+//eq5wc3u1u+37RP/yT3/GtoSEoTNy1Houarvrsd/GeNtf31z95a8fN8P4q7/7++H48t0Pn05n/5dvLI7b//P/5f/0n/7n/+nT4yFsxvu796dPH5+Oana6v+uBw7c/PAqCaAvDtomCsgpthv7NdVenk6m/7H2qYdhubr2+7GdzRZHzSTGG3O2I86dPe0Doh27Y7sS8G0eptSwiruLiCHdX99IqB4jMkaCAHuejMZj7dJim85L6PnZ9MzdHcHo5HmqtlIID7/cHMY9jDoBS2lJaSIKgogKGOXc3V293u+uXp6fD8Zgimmtt6obSTN1FpUshEFNg9wYAIotVqwZMBuitFgM3M3BfpIE76zM4AYY2T4dZr7bbkCMCLlKIoVpTNSDIw+Z8fga0yPTy8PSzI2UnWmmXDgCmBu5mSoTaNDAxgiGYOyHJSoIEXuGGgVnXRDSh+uULEeiS3/HLU+fKjgZcUULmvl57CA5ubiurbqUdIawRsc98l89FPv46XPmlEuRyEu+vlqK1RBfhc6TCPg9O8PrnK53B3F+5wpehHWnlslb3xd0Dg3lyS8QMXqW+2/Vj5vNxT2akvu1DQhpTjCk70jK3MYGDozUX+/Hjp9Nx704ch+f9D2bWxxABWmshkKKLamRmQkRoqiJKxBSwmDugeniu8v3L8+31+M3DPsau73MK6fh0aqYPT4+3d9djN+x2t/PxsLvKhF5P8xCoG4e6zKaVKJgJI7pjLdLmmmN3db3Zn8tNf9MN43eP33755k2XNyKnVhqZX43XYxf/8t3/8v797Z//8tcAXKRYpF/+zS//8R//8+7mzfRUWq39ED9++HA4Pf3hv/rdy4fHr//4ze9+/Ye37744nfbtdHz79u7l4fHl+ckjXn/xtjUUULUmagQoTR72x06sqX364TtSq9MJzPouR1Zy2w2bpdUipoYppwDIgNOyDH3uup7AS62lAKI7e9d3VbSV+vB0AMKre+jHXV2O3G03/aaWWaclbXYGNQ8jo0tbnp4eE8XtdvvurX27P8Rh94tfvIf/7nV0X7/WsQUviR9bK89+yjmCrz32r9lMXzuNf/K1+eeV5pfl99kQchnlXyf9tdr6pzjQmkO7WPb8kkdDt5/sTLgy8S5PQNJaBKUQMQ+8vc3jr6zP9vjXjo0DAeDYXU1Pz+j91Xjz3f770I39sDnsD2LW5T7Frs9h01/N5+VmcwNL8VY2my4PY0jdskxuNdqynKb9p4fD0yOmTBRCGprS+ellOZ9F1AGmc1mmYkBLgeV8vt1xLc0ViSmhm0zMsQghhialijtBU50dq5BVZHcCaBaODQFgTGBqcyNiCE5k6IYIhu5dl1TsUPXTcVqqx74rVaeCixvb+WbXscpmpxBl2pe4uVOgQ9PE3O9u6tPjy9On07lst/fb7ZUwBvY/f/dhPzeLNHR9q+14Om1z1DY+7cvYU+qxlmU5akohjf10PnNaNtv3t3fXx/3jZnwjbarHQ9xe1xRV/Hk6jndv1LwZRYhWjRE50Nqw+Lpn/czDczH2mL+qPI4/UxRf/USwipWf1UjwFT+8KgeXt+Lryro4MV8X1uvQC+C+xt+QwM3xtTr81Se6vlCaqEETSSlxjMDszilGJgA0IjIT8JXghWoNDA2AkRRc3GztogF2oJVys65cWxm9CMyERAbQTKt4bcABE5I5gtsa0EQ3IAdbydSgKoGwEXAMapYgCSoaqRiYm7qtTlMn9ZmhqSuYuYmauCmtnhkEWVUpk9YEEeB0NgqGsN4QsSFst6nfcNqkyKbVTOq5eCvWqhMbMsbA/bBOYuweAYDcAJKDSpNaixsBBuZm5sBmrkDsCBSJuMt9VBMR0wY5m2kptZkBoAMhh0QxBFdVdTBTUzeV9TCDkBF5XTFmzubgwBwAgdeoD0SVBpDEfe0M41UyAuCUIEaI8VW/AyACNwdbDYwAl6pSUzVVUTEzNWuryWntZ6SVTQRmpqtZEok4xj4HZmSi9aTZgURdLaKB11AXnssyzcqpIGHR6Gx6DqyBwNWJEhMyo7txCE7IlNiNmDkwhsCpc2IEB/fWqrurLRwGQF13RnEQBI6xYw8WYa7Ai5pLswsDcUUrUlyfR3TNn1/YXbAejhECrVv4z6WiGNlM3NVByFVbOc2Pu5HmZRlSH2IKHWOkGPD6OqdBrq43opUZUt7IUpB5/XgEDoG7Psc+DdsxD8Ow6UMIues4BEYDFSIyUSnV3FpdUkcxdK2oSA1o6rZM52V6JiCxIzEbrBgAVnMHiYEYqUsh9Blg5UEaIJnJKsWGQP3QuQkTjbtbituU2OoypLFZW23bJi1wDDFN86HPW2Ka53MfO3A1NwRkzggUIsaQDUB1cm2lnEK4YDuIAMQDrxxy4BgdiBgp5Nwld2gQAEOMoerS5n2/uepstDrnFN0EUweOHCPHxKAoU8p9U3Q1Qk4pulLf9V4KOgVGTZDDrj18nOepHws2ITAEyDke93tCUpPx9u1k5eb+fYxMT7TZbubzD4JpWaarnZ+klVqG1JmD1DkP23aelzx/8Zvffv/tN7LU4+EYh+EP//X/4X/5//2/y3LOIRklkdqqX22uVE8587JgECuHI9/e9dv749NcilDozVFORxQltuFqmzGJmxYJgBzYQHUucjhRoJvbYRiG1uzm/s2791+G8Tp3Q6vL6fAiquV8dl1qmeu8lFpFBIlRvTU1MXA3MZEVZQiB12SMM4fVj2hqKqZq4Cuq08kdTBEDAYlUdwfX9bTqtaxG3S+66f/8P/7D7dXVD0Ofx1FdSAXK6e7+XRf0Kndj8i5IqzWk6HGAtM27gdCJLbk6EYCRiLwcbHka7t+305OjXN9tnx8mhGCqiNEArNYyTcun41uyHOJmd821dH2PAUxNxIxivL43c1JzLaCLQwMXMAIwdFszvWaCQJ8jBgCAHB1AXTFfuSPikuLGdYbliE21NcAcczSLHCfCEoMPnUhtFKm1ORClnKDi/vsfQbsNEuaQ+wGau/m4GVqtu6u+VDVO/e5+gZAwQ5moCYXByVZcamBiksh6Ph37bVrqPF7v7u+33377wLzhsSfuLHSqDbUQqTkihchIHWNgMSNVMGEKIlPggC4A6Mafvvnz7s3t3dvbNNzuj9PDh0+72ztMsOyP/S5f37+5uv/i5eHZfOpGeH7+7mWavvv2T44QNzsNeHV99fFQno/7TB7ChmOHkV18wuPj91+DzbubEYvUdiBKRiSqIXXnIpxHDhlMy3QyKzHmvBmgtdN8ynEXvUshHOfz1Q7eXt2dz7Wc9ilGMFSCPo6vWxh0MYm0rttl77FhWyZB2Y2bSfF8PiNkhzGniI6c0vF85BTbgiHh036+u77Z7H691BYCI4R+2InJOOzQkis/vnyoclIqNMQqs7Q5db1Nz0M3iBu6pDE9v3wyo0Cx+SKY5vkxYiplSvHGiO2nYwNstQEbc0BCJBKREBIiNJ18nd5cA3pgzpz6cXNeDqJq6imQuqgCEscYVq3/kqWnsCZxckilFnN1E+YUKAIKIYsJIOcQzqfy8eGpzEsX4nizDb2Fu+Hhh/2Hv/ygxc/zXEylytoDezxOqBZj+PjxIVD4/tsfurHf9F3XZSK8fXsLga34ps+BfJmWw7nsxvH9u6uVrX0+vtRF799c7Z/2XYfz8cNUcLhKtsDzw+Ow6fJIOs/LeaqbVEVTt1FpIIoEzdvz4QnMuq7rA55Oh6pi6h8/PV5d35Vp6bv09Pgkqu/fv//Nl9c/fPjw9cszurNbWdp0OqcUHECqpevrAKRq337zY8DNYb/8v/7b//bf/f1Xf/Or29PBDuf2n//Tn66v81e//J3Tt//wz/96Pi7bbvhwWCJt9WPp0owctttu7JgAT2UWgfurm24kk/N2122H/P/5xx++eShP0+JmqgqEGLipBojLch5zrwpFRMhbnZHCbtypyvF4RMBabbO7IWdHWhbZvr36uH+4Gm6Oz3tO4TTPZlTUNn3ejB2jVrMAdD4dAQFjcJV5Xrp+g4i1zKy+2XbIcVmmPg2bfhiH4fnw8s2f/yxNQp+baq0l5YREqUulKCu5tyoV2oqlsBgJHYKRO4h4jJ2CIIFbrbWAabNCQMSMntT8NGvuYohXwNHZ0Q3Ql1KbegzDPNXj4dlaa6/ELlit2eDmpgaAwESyGiSILrmsy2y7ZmycLk33l8HnldXq1VZOkAEAEaz+o88goVfIin2mTdPr9EOI5o6vBCRDcAB+TZkBADi9nsHDpcHs8zPumkiDy3T+k8Pjp0n+gpL5afZ6bTf/HGRzQHVTXdHXAGtchKjrIkpNHBl9SFjFu11K0e/uxnKe397fSMDzUr5889XzseRxiDE+HZ/Uaojh5Xh8enwq51NmIgMAHXPgGEyaQwicwLVKO7cGBoSgDk1NQPsYhdI/fnx5e5pSimAm0wRJUkJry7jbnfbnZqft1dW4DaWJLXVzt6NAzKnLGUz6YXOaDqYeuk45tVJN3Id0dz1+//L9//Hv/69HKC/7w9t3X+x49/I0AUVCWKRstpvUYQqrg0232+tlOf348vDlr35/vek15o8fv395OnSxPx7q1x8+fPXFL662V8fzeVmWPqanT4+n0+H27g11qVX98M230zyrma3lv4FvtpuQVzgI1qUu3SB1cZFAiq0dzqdmoM3W/qQxYmAaumhgz/MUHHIM3dAj+7lW87oZczCcpqkoHl72cNpfX70t9UWWwzCM+6cjVbnaDOq4VMkJbnab4/N0OB6BEUrTtn8On2OY6ypCAsfXxJkDXLDuK/ACLhqoE5q/+o8uoX/AlX71Kiq9llvhGvlBB7sop68S6HrW7RfJifxnBehrLSGuJr7P7rrVzOev/iYkQnQ0SAoxDnfd2/elHIYwhHY6LgdOoS1za6eA98fT7G45j2r1WM6iwlAJlzfvfrXZ3jx9/LbfbQCqtdM4jFebnhBrKZusddq/PD6dnw8hbqpj6q/B+fjyUA6Tqs9VW2tT9anwZPhw0gjQwCOCm3cJEpi7pSCNOjd3Y1cD4mLw3AiA1SJITRyqcAUspqbY1NT8DKTuY8pWatYaO54USvWHc5kWGHK/aHwsWCVa48NS1hapeiwhECXz5SFfxSbelkkg7G5255eX4+EJ1DbDaPXx3V0nevPtx6fzfN5td7c39vDwqY8owdriXnXb49M85zBaqyCtqZ0O+6t0dbftnh/a8vJy9ys0mQi24/ZKitlpubranJ4eru9/RZwBwqrFIBjiWsyI6Paq4fhFTl+Pq3BdAg70OQiDl/XzujrX3c/93/z7Vy2dfgY2uiyWz1Gzix8JEYDhEujFi8vpotu/fhqzmYQYcW3fCNEBkBjRAxFRNCJwvNSYeTKtHIJJBVFwNQNcITKI5OwXEyohvk7tYOiIGMyxOTWPwR3VAwTAtfoYL62OKgQqWt1EzRwMEClEwAAOYAjQzJXc0NFUEJQcmi6ytl6Z4aq9EqEouUfiYu7EwNwcUAXOp9SxB9dI4FCq2GBh6Hg3dF0HIfS9Sy22LAwGOYZx9Bhj15tBBCK6pLgU3NsiZSlzrK3VVuOqoRKAG4GrClEg7jhxAkARk0VVumRiKiLg4I4BAydSs2aOHNZyCXgl9iEgExEguqMbI64KByJHFZUQOCCaiaQUAoeIDODIATEgR6AIKx587X91BSvuBmDgQM62Vp2p2SWOoutpDDEzESMToqOjgYG5UiCMmZwYOVyOWc1dlSJ7XLEOATgx1xByQqZAOWc/n9rxSctJpTKn9ZApxLjekJEcXPhSeZY4Jw+RiN1UpLqLAQBGEXcXQ0BijIk5EjOBsWk1ByR1b2JmAIDmF+ibmhGhugDgBRaOhGv4jMBM/9euolYt5mACRh5z2CAD4byICAo5EYBY6lIX+WrTbe/iF7/eKVlK2PUBTc09xhA4MAbiEFLMXQ4xKgWiQCs+UdWbAphrbctU5vN6ndYqdWrslLsIYICshm7QRNgFFBigTCXmREytVols7tN0Tt0mpf6CkuKQcrBWVjoOmKU0pJQ4RAIkg83VLYd0fP6hH69C6qsBh2TEw3jFTuoSQ0YAYIgcAImBYuwcDBxKWQg6iBxTNHOTZgiErE1zzGuCETmoQciZAgPGEANGAkdyFHW+nCSaO3JIwya3Kk4UuwhIataapS4QIKeMGJkj5y70GVBwngLT/uEk6F0/LvPy8vhw+8UuptzKuco87q5MgSjWZUEPeXNVypQGnWrpuyxBMIzqut1eD/3u5eVTt00EMGy76aDtfF7O83Z7d/aztpf9x0/377/89//Vf/PtX/7y6YevGYlC0DZpdWIobUndFkO3FF0Ox9Rtrq/f7PcvDz8ehn64f9fDvGx2V4dhu+zrdtidZV+nsgKGXBREAmJywFre3b0bBt4/fHz8+i9lXgJCJEuxSykOY9dtxnhz7WhNyvlcn54PGASTRw3n0xQgSmt4ITmsBYLxVTBxUzEzM3VTlUarsoImYqKqarYiynwt5GEmdru8ebvdDNudSstd3IzbNs3j/X2M8P7N7a/efoG1cgqlqQMjYitza1PMGXDNu0YjIiDYbEKKZMlbQYOwiRvvTHy8uirVHKC7GVy1J05J7fDj8/EY8xUNfdhcU3eddm94HEs7uxM5gVZwQ04AyVDB3bUCmOsCCKjipoDACGCy0qHMDaECEBN4TOSBQ27n55jcq7l52GSELSVr0zFItaE3s+U4ARJTGHe7TdpM83R6+JFy3N7d93lTpcWYQR3Uc4xqFMZRi7pZjtENnANqAZdx2Dy/HB2NEhOn7ea6nH3oWFu52+wCqtaiTdQ0pQ4hLtNJXKXU0AfEiMhdF9dzcTGwBo6oBNoamMc+PX16rHX5/X949+VXv5ktqGoCbE8vaNj1kIfd+y/fvvz4I9Z5l3e545vrq48fPzw/7V1rrTOJXndXoueAalbVoZodz8oRQHE/ndVr4t7EY9ymcdNaIwSVqUGdysymRFqmss1vCIsH5pCq1rXbwVxAbUxDDG8gdU8vR4Fm7mJarYlJtBhjDx4hZatLszNFra2gZDNRN0KPMbotiy4pd6Jte3V9midzFzDuuuvd/fnwVOYDmqTUWzvPpZxeZK4HQ8HgiM3ROEdPUcTPuhexLoSn0ywMVes25nI6eejGoZOqQ9eDk7q6tNcJGYnQwE0bIiIQuAGG9d4DhKbWaqMYpMlcXwL1YxfP57ZIU0yGoFIIWTkxs0hdszMhJNNGhA4WAquJM1NgkQrMBrrU4shmXqqmzc3V23g1pg9//aZ+13oOgaHvw1monqW2usYYiLDLiYH6oTfRcdv3Q08hbTbj9mrXJTwdTrlPY9+xLH3Xb253w8B1acfjkdF3V9dfvPtqv9/vH0+M1PdZW3neP9B2ez4vobs5H09SBawNYzedTsf9dHP/ps6T1YqMTeTx8eV8rv0mTaLTXA7HYyIKiKeXB2l2ZHD0vh9SxK//8ueXh+c1fl5e9kAtRkx5XO+SOs3PZRm3Y5vKze0VAjTs9yelDN12Z6T/9X/Y/Jf/8p8/nva//+VvtM4fPnyaUa/evOneXac2p2RIGJgOp31EQLfM7LJMJ7+/7kOgb797+u7p/O3z7GR1ObUmMcfaqhtwEEpp//KgRt04WpmllK6jjvHj4ZQjStUUuje3d0Y+nSqInfbHcdOBwvn0PGx2aibmFNPtpnfw7394THkrtZg5M7lpDCFshr7vYsDixhmriJtH5BjiNC3Ph6NIJfOYE3JIlLrcdblbnTeJBFzUq3rT1kpRYl5VjpxTLRUpEJI1DZk5UABuKhACAszTOSceu06s1ioKnWoJzXNKMfQIZoQh0LQU4OCmXRrg334R0QpCYSIwA3cEMrMLVujVl+qv2hA4mF2mVjd3NUJ6ZUw4XnI0znxBZYP75bkMwVbUClxoQeuAAeDol0Lx9fqk1+QFXExJr84gxNfK29ccxsVShJ+JrQBg4KvB/Oe41/VM8fUw/fO5vRMhAblaRPTVRUVg0jqAmJgc53MR80xMkU7znGM4LFMfw/3NGzFhJld/fno+vezBrZxP08uTTtNANAREU3JCAjIdmB2hgak0UIvEBr7JMTXt1AyhNqlCSOGw6NDFTZdrbcvSAKBLaZrmq93uy7ubrh+e9sfjUrn3QHI+zy/HE4XoRFUJMc7l1HVD18XNdnCHsiy//O1vD9VrlS/u3n78+O3VzV2XEnVcqxHBcX/ut1en8okBwC2gjYj/+T/9f7d3X2zv3h6eD3/65l85gjv+5te//9Nfvv3d7/7uajs87x9CCgMkmaYQ8Ze/+dVhPz89PH/8/vs6n7uErfnxPJtBAaitwjR3w9CN2zTkYTOcj+c2n8+HvauBG5gxeCR2wmJWRBw9MQcKCi6Ecykx8BCTiLV5UrNIkGNQwFa0Ho7GwTVC7Daxa3OpOeVxk3topw/P53MOXep4Wcr1rn8+zc+fHn+6APDzRI0Avio75hdhRj+vIPS1ZNbgs1cOAGAtu3xFYaymt5+7iMABDMzgNXHz2Qt3+evfKKOXPyXANSO0ttjCq0Pk8w8MLNBRf88RGwZ3JUITlSIUegw1jJvSaDodKOYu9Y8vDy+H09WwmRe7u+mvrvtzPUMwg7LMJTKFQMSiegJv1upyfto/fwrIpWocdiBwfPko08SMIl6qng7tWOx5wb9MXpmvo9fZkqpX2HV2nSAAaDKIUNS9wVIdTIpdmCyohg4qZitqU3xuK1zcHLyaRzECNLKzSD22x0kd2Sl/P8PToZwlqNB78a+2sc4HCeCNnlVubrdNT45PsbuNqSutzXXpNldGx8cfv/Wbt9fX28P++as3uzrXx/10OJxub7q+y1KWoUsfZz+c/WqMzF1FIqK5LDGgN59f9u4WqRPAp0+fvvybhg7ddrO9u07Dbei6eflUzmfub5CDigckcLCfs9Bfuec/+YcuEbXXFfizL//p98sOfHk1/Js3f0axXXxD6+4Jr6Y2uMiLP8Vw8dVD8Tm1BgAAIWXgqCvFDTwwExEihYt+SaviJaIECB6ZkwcBYiIntLUIFsjAVvj1KqU6mBGF9Qc1EzWkEJGD0poVUkJAgkCMvBpc1UzNxc2lzaauqgjBnBxWeYHWHyes5Hcg04vNpOFrutkAAAOzI5qJOTKjqiGgqpRSwf30DKgGji447iiGCGTVFh867roQcwjB8rBWpE9ViBhLA44GnkMgRo6RACgnyyl1sSxLKWVZFlEzcDEQFXRUdUcKmNa0MmPnKiq6tlQ3FQc3B+KAbgxqsEo0BCuWDOGSCPE1cGe04i9XjlFKQVuIEV0VAUNAZiC86CAxOkdgdsCLH21dD6tDcbUUGJqaqjngRf3mQECREZCYmGCFQRsxIjixfRbTHV1FVp1plbfcHcDMLx9IFLpxlICSogYHqlRIloXcOTAQGgXUSm7gShiYk1OgyMbghOCGpm4u6s0EACiBoyFFiEwpUQjEwdWaSjOcS6uiBu6rRfrzVYBASGCKK7oXfb0q3XxtT/r8pPHKKgqJQ1apCKhSA0dAEPVxewu1oZMsS5fBg7ayVJG5hrQJedPljnIMnFJMmQgjx9xlM88U1lS+m2qrUqqKtfOpzLOpmAuhtzKLNlUjgNLafIC1oS3EiAC11TWrL2oA2AqKQq1myClwP+bALNJy36WcS1nqXJFw7LeqVmrpNtfb65vaBJE2403s4HB42d3cr6a1nAfwtcmZ1FVEYuqY0ED7sWOOWjXmDhBNGwd2DapNdUEAIeeYAaAFZQ4kwa3GlBFDzllQHTnEES6JJ0gxTeWU02jWT6VETkhInJtZzANzQEekHLgLjByCiVNIoRuAuOuv0dlMdvfdvN93m207tDKXet6n7Q33/fJyHsdRxZp5KROIO4c87EypLTN63L98TAGX8wmTxaurPG7crNalzHumvi0vh0/2xe/+XejCcvSh25yOe0r46z/8vtXTcj4AMsV0PJddNx4Pz7ubMYdQp3lMeToeUk63v7hv/3Iqx2O93rRWgcPVm5tP3/35y/fvuozH/ckUnp9fYgppOzjDuTqe5mn6Ybvb39zdvb/upy6UIiJ1npfW6uG0L6XGGA0hMhN3u91tKbOdjwBOm1TVzhNZcRMVa4Skpr7K0mZECGCwapOqgMDEq3pqAGLWWrvka1Zk10+3JvgP/83/thVCqUzo5suy3I1vru6uhu3200O56iIuam556KUeCS2lmGICjGAOIQBYqzN6aFWgziEPqFhOp27cBUx1esbMy6k9fzyCt5RiOZzzsAVepJNt95Z9yoF9ngk6ogjcO0REB1IgBiReH5pih4Sg6q5m4iqOhKoG9bLPmbrUwGjuawczUheuvjRpYkeQhhCou0pdF3N3fnqIISuoGXF3nW/uFcJ4/6u0f1RpQ99/+u7r3e2XRJT7IYS+ltaKMMaQ07C5AQBbnlKXQsplkhCjkuchmdYhbcHz0nzY7mKixF4PJwyWYlBK5o5MEYBjVIuAQ5+y0CSmoBEgAFiIOXXbUo8UcRjjef8p5SRoy3n65k///P73f7j97f/mXPX29n55+PHbP/+xH1OghNvd7t2v2v7FGpxPC47b619e07j3cvzw/NGKMkdtAQITtuPhU4w3fYgOPKsRYearwMEUOeSpPCGhm81ldokJvO/6eTrsujHqFiyM3MUQifP59JypdXQsZXZytSUCbXYbcFnmYzUNHFJIdXlRCDnhqc4iFjbXTZ/QQ99vmajUo7gaJIrBLTONKUxP5x9TMIImy1x0zqPmjknpfD53YwpUbJHJKmDFAN12MJ1FGwKlEKUeWjkFHBBDKVOI/ZhzjAgaMlwzU+AK2fbP0831NX1GmSJwSuiwHqesk6pINW+BI1NYN2SRRhjE6HA+55BE3IncgSgEAkJF9FYWoiDihAAm4IZmhugGkYO5tDqnlBFQATMNaihzHa921zebl6f9N99+rNOyux4PHz+6lM02vewP01JiYEM3tcihH8cudd3mKoVgoON2s8xizaazZO7vbt52m7uhH7NX1VpTQKYheZgWdHfTp5fnvtuYYplejuXQ9/FqN0otUI5Wl8MBdzfXx3nxPoLDfN7f3u9MiVN8ft7PWpHQm2CTl/3LsiwI1qScTuec8nmam7Tt1Za0fvrum9pKSkmaIIlj9VojhTpj4pEIBYw55di9/+Xtfj797u9/XwTY/McPn5CXkMPt+1+9f/v+//mP/zRNx9/+4m3O8T/+538lhD+fzl++vUdOdar7+mxQ+5DeX9/kbNaW3CWtdT/Z47lKjEdvL0+n6MqI7TytNOSyLJsYp2WJqScCVyCmmPjx5ZHMlLCp7676FPQsZWlzn6I39eLHdubQp9CVcm5l6dKAUj/tT2pIhJEyURDVm91tLecY08vxnAnVxdUBsOs7YGitSW1NNPcdIIBxnze1TAHxcDgFYBVh5hQJgXIcKmiXyBCn+cgBEHEYN4i9LKccgrlqreBkRmxRbEohufm0nIioNu102I1dLZO2pqLI0QyX5mncEoBbE724inw9/HVbBwF1Q3t1Vbg7AhGr6Xqwu8J+zeDVGQSEZOD26jlys7XSmy8If1j5Q8Tw+mDmF1LvCq/wiz2DEC/n4P4ajMDPs8zl03GdmC/PdK9t0A6AYK8xjsuohQDrifmqFr36kF7TQz/N3K+MGjQ1AoxIBigGHDiFEIiGSGDGBDngVZcjRzELHOMwhH6gfsNxeD4+Df11CHx4PpjB4Xx+fHiQUnrGSAggDNankAI5gLa2qBigu9W2pBBjSKqaA277KNL67fDhWBcFBJ8nVZkRPG36nNJ5KmJI5zrXT1djt717++7Xbw4fn9ns9lZn0UD49HQEpPHmJkeOMSCjQ4upyzlM5WU35PPhhz/8+7+b5xdvbWnl3S/ePDw8fnnz5cP3L+ClmI2bcVlaF3MX0nQsX/7mtkxPf/zjX7shooXf/e1v//znfyaM5Xz64w8fDGzshkhe6uRg333/4fnjY5sWrTUgnY9zK9XN0MDMxUG8uXk9Lynn3d3t7s1bBOyPL6eHh+llH6Oi1BjSuakDE1JkTCknQPVWxBFNxXKtKcV+GFpbmrSllmHc5aicTKNxjsfTS87RFCl6yDxuthFl2v/4/PQ8DH2rre/CfkbXSwfaxfv2ap5bTXMrGGsFu6yC5qo1fkbGIFzyQwj0OnOvWulPqg+sbiMEMycgh/WawFdlgNbX8+UnQP/58P9aV+WrCgqrIW4NEYE7GIRmgePOmIlTxIcqi7TGPpCZlBLTQBiPKP3mSpqZQOJOPAq0t/fv67ndDEGJuxD302Nm7LpNCLHOM1kr0345nPvUlaIUc+BwePokywTi57nVqtJ0KRLC8NzmH5sVxVnhk2p2GBFLI0UYyKB4tNbU1Egwn8QK8HoYaOqZQB2rGSJVdTUAJlwrvBVdVUGm6h7guwkOlpTjp/156Lo5xAX8cVlywtwPEYOYhDCa0tPj8fY6iT6ziHTb8frddKrH09Ltru+4f/jrXzh8gRCm08tXX96Lvvzw6fF0nLpx8Ly0pmMXnk6y7UcOrVm57tPSnvvhFiS5kzUHUXY8PvzY9g/U32t3dfuL37elbHl8nv8KMpOKqYY4rosFf/I+0ueJ9Kf9zfyyDvwiW+NPr3iFsF1Eos/vQbyEaC7b3+uG9qoorm/8HF+7wG7cncBhJc6taxl+9hVTTwBuLtKIKAQiokv5MriBMwUCBGA0FxciIuIYo5tba4oAQH5Zxati4wgrLsbMFZGZmJHB1YEvsg8EYkAXWmk/gKQEZNLEzcBZpZm4W1FBcHZVUEN1EIkA7i0wKbiZUgB2qmgKZO5Igd1NLEEAAVBF4iJKyC5mUC3GWltowk1DbZQKRIKC6hpEJCyGbHY5RJlbC0tFZo6REJcQQggxpZC6GJm4oy5mTpxK6jpRaVVKqWoOLqBqxVRzc6Xcc+5jwozkqirSmoi7E7mKm5g1UV1Tr8ABQ3REIvqcFrwci1zC1hRCgJS8tarojiFG4kgUwA0RkQNydArrbgNusFpUrF1CJ+YA4j9R1gg9MCMRItPFluYKAAyo5uarS1Td1FTdxNYujEv+nFzNXE1FazNdT2EAIHA/EqA7GDA5xhQxBUMC4FBmL2dQAHIgpMiUgjMUIDPCJtaaaNNL7YBT4hB7TD3GRESAYGC1tKW02lTNVXW9KMyBmNduwcv+6S4rCev1gkKmn18Gr1IRYa1z7jIgMgUpkmNKAZnC+gbugpkShVIbHtrxIXUVUER7y9sxhYEtciBGdglW66mcRb3OxdTAxKW5WV0mcDHQZZrdXVpbyhmBwayUouIhJDVvIiqttsYhAMJSBSkQAFLMOW/v3tzc3QMDBd4OQ6tFpDE6oJnCef887q7zZhdTEG0hpd31fatVahvGq5A6QJjPxy53gL4s0/2brz59+ma83o7jeNofUhxjyo4hEgKASAsc+zEQDmWZW4mIHnOnBoiIpGrKiTJFcGcOFDhSAkTAQMRulnJwMChJ3IFSHq+ZCNzUvUuDI4WQQojSDDkAYuw6EceUqBukVuKu2+R5OhAIcZznEyIurR2O5yH0MaVu2IqausaUzZU7RmsGrMjD9ZvSvg9d53URq9F0Or1wYsfgCPNRux42t5s2L0/f/3Fz+85jUrIehseP396963/7t3/47k9//PYvf+o329z1pZXNsIOmyNb1PREx2Ol8HDFtbjfLvDwfn7/61funx8Ptu/u3X/7y8enj7TZttpuUu93t7cenpyJm2sZxjFG1NGkwnU5I56s3766/+NIA6ty0VCchsOl4OOyPy3ye5r2HZOt128zBBaGWVquuB7xEl6cKIlqjLmCmqivXjZFUdb1frFWH6gaqAECM4GSmqwYDAF/+cssQammH/Xx6Ov7uD798u31/nmfj2N923k7BnESX/SNQITSg8PLpO5cWM8XYt3lRFQeLgev0FBDarCLhdHg5H/bmc3f1hXlMt2/6lB5/+Db3MY/jePv+6qtfYejnqU3HwgnVZqLJykdHWlv2nOBCEwY2QANDTjEPwDHkwSEgRA95BTk5I3VghG5KnF0LmIIKdRuKo7fF6kLdYPXgNEQfXRZ0ud69X9zDbgdq0/mhzacQs6d0/cu/XaaFwcq8p7xLOSCBgeWenDu1Tj1zZHWJuTNTjkOwVOYTQHCSGEEdOWFTtcCpz6UeGxTKFkOsrY7b3XmepBQJzjlx8xAHVVI34IDEFHITMVSgtJSSxx437G4PX/8pDzdh+/bp4+P921/8uy/uDg/Pxkygsd+iQ7fr+BAPp2MzwDTMzW++GPS0P54fVUvu4rk8B+rGeCOwvCz7WduwfcsWAw4AUmoJxOJYoS31EIeNiOkiN1dbE5iXp8hsYKKqgGYOOp2mJroY5KrS1Wampm2Z52Zzv+kjJ6tORCnHpq1CBQhgGTRAEHB1WOZ6ghCvuxv0XW0lRgxxSwRMrC2qVq+tutV65nxVIJhBlTL7VEIr5q67iKxWvJkulaoxb90MkE5l3saBMYrH3fXfxfDu449/7se46HN/3QuJzPLTeOBgakgrqxQ5BFVVcHBbpnOKCdfe4oCBo14IfyGgm8HU5swhrGcXxCn3igXc1IGYOcTSZkcgQCRGJlUghEWsQYixyynWWs7ffdw/HkspBvDjw+Pjx5cfvnvkGMS867sQUJoOY7+7vUMAUzgfTgsQYaizEeY+Upe6fnMfg7rO+/18NW6vrm6uxtHaAo7bq9ARleVZvAtprMdFpiMhT4dT07YZh6HP81IBwjSdW6vTfKIYMYTH55ebK+yTffzu43g9UOAQg7kty1Lr4qBMxsQhBkDlGPvcufs8HRF9qtN8nIAA3YcuzaUcDqf9y7Efuq4fk+rz4w/zuecQv//Ld+4xd/H67ialrhY9z+Xd3/z2/3Z39Z/+4//4pw8f/t2//8Pflvr046fJ2neHqPweobsdQeq5S6lKba1ejent7Zv9y+PD/piu0q/vhm8Op2mZoSgHdnOpwkQhRhVLnAKHcj4hRyQosrTSlqVySrvx6s3bt48fP3R9ypHGzeawPz4eXiKnGHMRlaJM/PbN+8fjgzTvuiF1m9PpGQCI4lJEqp6nvQMWASRCwhTSVObWliH3IYWQYqvN3d3kpSxA5KbmljGu7KFmZGhYFQyZkcl3/XUXhiL72qrA7OjElDkbZ0OIAKaVpEmrRAGcxr7HWeq0P8qRmLrYIaG516U4eAxBm3RdJvpZ99N6j79EEsgR1QwcwtpTaY4r+HlNygABOAKJKf3scMIdPj/NEpGavY4sTsTu7m7rHHIRldzw0ne2BhXc7fPJ+Osc9Vkoeg3u4CVZgev56CsdaT0MWcct+6wLua+ZILjE5Nz9dcKiVTN65Wa/Ps/iZdBBcMLEFMyuu+zeun4IXsZNhxwFwrDdbserl1KR0vPhoBAU6HieCuKC4eVUamsd0zYSuMSQyCEAEoKYA10Qm4F82/WuXtWd6Dy13CWHKFW3286XKtUA0YkopJY3lPu7u4HzeN/n0+nZTZ6fn/7y/bdgFEPXb3pX3226L79697yfS5s4j0td7obd7f3txx8/RN5I0y9/9cvjy1LFYhop5GWZWmvkriJ1Wm42w/F5zqHrbvpPD88Ph8fc932kx48f0gbTEJOHf/iHf3CVHDtEkibjbouAf/rz19pqLVOtS0QidiQrcyGkruuRVocZKjg2W20Nqu3x0ydOL8ZxM467t19Q6E77R2IoajFw7iKrgKEBAjKwR2J0QARKBIE5BKJ+MwzKuYF7W0optWkHmSm4Qzd06HY6HLvb3rDrNzciWsq8nGbu+rHvgcrPR+V1HdnPsI6rO3Its1+1zp/QQhddiS6v+8yevYhIq+ltTUwCXprN3Py1rupSeGY/TSmvPK6fHHDr91s/5idX0+pLWiP6yCFpU3RiZ22NI2M3nmmOWY4/fMpdOpz2yAa4lNqAcWlGzNvt9ngo797eEMwEtc5HdAghxzSU0rzWLuNc4TQvDhGwC3Es52pV3eF8PhNGqyZCmNOx6uMke+HGyMznauh4FeHQfDZ7EyEHoGiMVkzIkzSjmMW1j7TUysilqUI09F1EXA8HEQMFVl1mpYAhhJfWTsSV++PUOA43b+8+fPcRmFKPFlUB7jdbk2aqMfZOdDpPoUtoHmupKtubX0CjaX+myFfv3x7P5y6ndpoSpfdvulK6uSxV9M3VAOfT7cgTWZUWEy/zgTf3Y9ygu9oU8xZDSja2Ni/tZXr5cfPFHyB1+e7dy9d/vcLURMt0GrWEGNWJiX/aufCiTP/066uZaMW2wc9daj8bXn/CXDt+dhr551dcuvFeIwP4ugb980t+bs2kVzXUX0Nqn9cWEEcAMNSAl2+EuJpT3AAiRzVdo1cOhkgxZjBRFERBJlwz+Q6MyBzU2uonXXtCkIL7alphB1wLnTlgYmTQxIEJV28ZXHoR3BTMQMVbFVAzdTCSWtG0SXNVQIjkaJKJmiMHQjNCqqCILIpuwI6OF2ePAabIDg4GgdDU61zGLTJCjCygiBBiBA6uaODi1QAvvCMnF0FfaT6rURaZmAPnPnOklDtCxtBFChExppI7rdLaUsREymyq5tZEinlOue+GGDOBBzUxUxNGcJdaFi9FwYEcmJ0Imf0iZZu7OAR3M1PGgIhIkU1CSroUCICIvNaIOSEzMq+0oXWZgKO9loFeqD2fb7SIjkgxINNFpEayS1zSHFzVvIkDGaipmoiKuKmqrndcRFdQANBWa63SmjZbj3fUBIibk8aBR4h97+jAxIGxNnZ1a0ZoZhCiBzYCA2wOYgwK3oyR0JUgICakBJwB2YFFXaQ11WUpp6mcq05Fmujn/0B3B1BGNLU1DWpu4MAhrIsbLhrX5Wq5SEVuxpddGJmy4EIc0dVlBkcP0UHLsuQcdUxEaT5XpvB8OpyjT/Gwvd70YwcIoA4O0opKBSQmDymvj0yUAvejSfHWKJAbsrcowdTUFgR10CZiZq201lRNS/WyVHWMCQCIk2+utufZ/HGP6H0fZJnNXWsNzONmM09zirEb+mF31W+upGHKA6Uc0Hfb3fPT3luNKcU4hBiAfJf7aT6raAobaTpub1I/AgcFDsxaKzsFdBUBsNj1ISkhudWmayylrUxxVwcgaZJS74RIHGOn4qZtjVGmkNwhcAI3DoEoRkcO0Q2ZckodgToogBIyo6YYGQECAgRRoDhY2QdGYRyHDc+zeyFvIKi1mUOtU9gGxkvtIcZuuN5Cq2DelmXcjm6Azi7iwK3WlDhAQFcrBRXqPE3HQ7+5+fD9N9vd7v7L3x1fHrsc3v/yN8tSHx4+YgATHaK1WsZxcCnneaF+bCrT2cABiPfPR+bnuzfj/Pw0pO6b/cwWFG3+9O3t/Zu/+/1vp6U+H57LUsWIY5jOyzyfU+KllH77Mty8GTY38e0XCuLNhjt7YyJ1Kud6Oszn895tOR4P01LmpUbMRivFzFQbEvLlYcLMVUV8FXbNTV3MfvoDEVNBMFVjJ2Qgip/drf/9f///8ALHU7l5c3M1jH/99sNj+Ke3X/z69MN382mJUoNZNNsM3fUmqtTaamAksPJyNo5I6FYDQZs1pj6GgRFaeej60PfvTseX7TA6SRgGsPSLu/8Guz4Nm8X7E90N1zf5XceU0BTMyNRlAVivYZNW11ueu2urZT6LyPnhB0DM3YihQ0Ag45CIgxtRYOJATA7OBBCSYzZToIAhAAXULqQNxIW6d6BzW2YwDVbREEqlwN2YnLmpUb8NFsmXcn4JDuP1LVCQ2lzOMW4YURFDihSjLoKaGDrn2aIrCkcYh91+eSLKIhi6beh4gUNkAsqRgig1aMASxqTQIqTQdaqQ+mEuVd3cRD30w3VdHlM/mmXkrp2PIWM30vN3f/71/+7W++Hh8ePd2zdI4/mw9DC7Ueoyig3x6uXwMJeXqer3L9+Fq/7pfBA4V5nYumGzlZcFqw/5neQMtK9Lc4A0dESVdTGE/fGpgoQumwpRXxWmyglil0eps3gRNSfqh9Eda92P47Y5ishU9rJw7DN3GZ2K1CJzxBRSP+vcRMzd5gUBcoRm83k5GSoGDYRe9946xCAWqlutp64br6/e9toIYS4zciIMZZlOdYkhXr/54tNRokixWtocAkuzUBFhB5zU9qzcxxvQIOZ1XjKf7q5/udt89fW3fxw23eHpkSGEkD8/Y5kIhxUCeCEFIAZYSXghAjKCp5AcQbVE5kBhXirHiIiRKUaGViOzr1sTcy3aXIkQUwyU3QCRzbXUulaen6sSs/i87I/LXF6eXo7H8zy3Hx4/LmVaytxt+uPhVJbFFcXo5vr6atwcJ8t9DpHBMXXd9c3WHPqh2w65zGW//2FIcDVkUNM6a53BHjMnR3JXSGnoeZEspY097IarVsvTk0CF82lSqRzITGtpTcyaqnoVPT632G2kuQKe5oY0C6CUucq8tCLSiN0UsSxIAG7TdK7LxIguzUHNGiIb4PPpSOCJkAO32tDOYey2u+tFzm+27+amu6s3u9uNgUfy3d3VSeLD95/A4Xb39sPXH/67//v/8OVXb+7f3jz8+Hj8+tvjj8+x79NXb3IKAmZEfbd15j9//7h/3gNCm6Zjab/owzTwk3hZKgdSQBWJfQ+Ai7YeUiu1G7g19VcST9cN43YzL3O/2S7LZGrnMqt7ClmagbW2FATquu5w/nSelxR6FdmfnlVaDjmFMNdzIGN2dY95yHGotTg6Uej7wd3KsnBMqhpzdFOwRjFHirUJx0hgoA0QA4TWWoyBOYDbUk6H6QncmFllbYLGCMoc1ZA5uGvX7SoexcDVqkgMWdHUXUxIlYExMBBFJnBjxlpmlYtgag7gq2GI1uAVmOuaRHuNz6zwFTUDd0ZwA2TE1ba96j3w02hhDmqOhKbGvNrR3S7hHrQLyHPVnXD9dIXLP6+qjbmvZF+CdeC56DuvMw3a57Ho1UTkF8PH6khyQlo/ZDXZrw/Cr5alz3aiy1yGiGbqgIYeCCEEdIeATDRmTKwcI0RSTYsQIPZDV6V9evk4bN6MaXg+PI9X18RQylSbPf34UA9PI0rPzFYTQ2Je4wLmsGpwfUoGsCyy3uxEpQowwVQWDNwA5VSReNh0YdhQztvddX91bVLRbH88f//wYNZAdDNuf/n+l5vtbc7htDzGrkcEqf43f/8+5/Dhrx//+sd/rqUEyrd371KKTaWVl1bmPtKbd+8+fPfDmzdvoGofod+y8BT55vxyzGmcZZpOx4Y83NzeXF//+P13s8zv4i84pER89/ZWmxORgr48Pp1PB5Oq1bx4H/vEPs2TtpI7jpyWpThAimzmQa3vmBPFyPNSVSAiijVQqQLjbvvVL+7nZXp+fKzTeS2BYUJ3m0ycsI+5C4HBDVwQp1oiI3MKHBHUoWOMkcJSjbhAGEprg0Mt7Rkex35o8zRsOktgtVZtZZLWfjo2wNcM2CoT4QUwQ0xAr8HGdawXN3rNASESXSZ4+2wH+mlgx88BCCdEBHQnR1AweF2rBsBEaxrydTWbX5rS8LPB76Is2PrT0upCgLUM2pk5o1dwjTlNU0t9guXMDmykS+vSoN5i8un51EUOkXUpy3Qcd1+aHNRmrRrYKCCt9zxTm+fDy8PaQyhuOi9troBQ5iVxNIqnWj8c6kGkKkFO4FbFP7WmgOZYFCrzqUhReDtQUNh0OWJx8Sgwua1Yym2fwAAI1LACzkDXRA6goIvVPpArzpg+TvSdBx82h4P/+v4r5/DHhw8NMTFF8n05i3ZSypvt6JzAKzKRd6psHuq8YP0WpG7ffNFd7x5e9uQgItal3dWbl8dHHuOX78aPT/Dtw8ucO6Z4PVKdDz1lRHdmdxu77KZkaHjyuB1v3i6HD33szh+/8V99jLv7YbsJaQCk7XhTlwm9oCsTgdOl5QteOVX+6sDEV7X7snmuYsBnAcgvmxNc5OyfFJ3XvdPXne/VOnTxEL06ky5K/esG/uqNA/xJOfpf96SFyOYIGFYTg6oxBw7hIuszBbpo/ABChM68tpqHlN3BvCCzmYGqWMULLRhWxtf6fRUAmEOIkTkT9JFy5IAc2GnNBKkRmKqEQNZghXYRuHhTExMws/UoHB1ExdwZyV3WkwBiJrSOsSEEJjMPzGIQAnDgqZmLGbEaGBJz4tCVpfJxoXDsaGut2qruEaoZRkAEZ7rgj8ncFNhWWpmoenMqMBfmmGJaOMUUQ2CKIYUUQ5TooimWWmXtLyiTSalaLPWuJfWbGDOnEJGiKboAhJwDl1zKgoArlggBeK09IwIHMyMzUEGNEAgAiEMMUYOiKsdExITkYISEFIBXUNHFLEnooOLaLrdlZAcENwRgDEhAjK+QNTRwA1ARUyVUJtNWXQVs7UozcGe63LgJsElzUxV1ba5VRVZFSVQAsLUG7iklxMyE7gqgQE2kIgLGEIgsJAhxNTxoE6kLNEVtgCsJ0VdB0VUJwUEdoNV2msvT88t+P7/sp/PSDMkJwHQ94iJYY/QKQICE7kigtqIZ0aQFZnu9Fi5SUQiMFBCbNDGlgMGbImjuB0c0A61T7kJrbZ40ZRdVlUqMKXrajef9odW5iZCDtBpSDIlS1zkRJWbOKXWQQhVPtCW3Npf5uGjZozNjVKzmXlpr9bLWTcSNQozQh6pggOYeYp6PJWRw8G7o+5DqMoeY89Brs8Nhyd1w9+X7q7dv3RGMQsq7mxtiaIrPL/uYYoyJYyLytXKuiZrpsLlO4+gOgZkJ5+PBgCBnjjmmHhHJkYCWaU9gTMHNCd1MrJxjYJNSS+EQOJkjh5CcmOIgthAzJUrI2tQRQohQIxOjQUhDzkNtlWOMqQvJW1soICIHZuTgl4xitFYRAwdu4LHrKHgEaEVUa0jJwB0p5G4pi7Sa4jAtSx62OXTf/fm7UpYcB3BO3Zj7DlSlyXB1dz7tDSDlXhXGq9vTeXr47l9//fv//Zu7X+z3j5SCKSxT8Wa/+dvfnc/LaToSEoTUank+TopeZR5z1/WdzgsZDV3XhXk6nvoR3fDq6vrq5vp4ON5cbzFuPn37eHp62dzefPnlF8u5nY8TaJuPh6otQJpPTcrLfDg+pzTe/6p/814dy1ysnuflyFVF1KSmDt9/9f6HHz+KoqsTgJsigq+1wIiigsZquj5ZGLg5iK4WfFTT1pqZm4q7IjCspxH4+bwK/vinH1EhjaMfDj9++wHR76+u/vjNh8PT4Xya2lKYmR2utjm3ct/F92/urrf91c0WzLA1QARXDyRVVRSiaa3os8pCMec+nQ5PSqCHechvhvfv8+5tvtpt7+7NCZlNmvkSw4pRahjXrlACSBT7tQYS3INL1hs31VJMRUUAEdFNrMl5WaqJ4LpVBeDQx5ix28XxCiiaMQJRSrqcwDD2G5EaAsjpAFozA3prS4l5jCk9fPpuuN4BJZEaiWI3VpFlOse0UXUkiqkbN4OV57UHIOS+zhpzbjI5YuhSNtCmjIFDzJik1WaNOXRpOLWTISBGE6/TtLkKdZmaB8o9EZm0HAmQIRiINjl0Y2/zid0Dhf7q2rUphlLsh6+/f/c3qR9vapPN3U1dprpMoA7IOffDVbx9f66PJxV9v7395x++eTo8DX221uqMbnmbtqOxn+0qb7ZdPJ+fxGTG05CQuLnWFDMZ7Ta/IJdzqa08szJhIIqpFOtq5wABAABJREFUh1okxqFZUxfVer15Z1qrzMOYF29iGVLnMi+nOeaYuo6N5toay1zOA+eEwdSYoalUr6ELLy+Pb3ZfMG8C7iDm0/ISAhlijk7QCCwgh+1uaslBwfl8mhJ0y9NDX1Ozs/DMmFPYODsDVRGZF9MpurGDiMY+u0qMaX7+p6UVkJPNlHM37RfvLn4KDkSEbi6tMTOs/C9wRFDwRFG0EQcENG3rw9rSakwZwNx8jFmsEocU8lRnDtF9jXcDAFyioi4uKq2m1JmKGQxDKstyPp6b1nOZzstLo/L119/MUsUaMKlpKaXL2YmBuhjzMkvKHYUwDOnN/a2amTQOMTPsPz31XfrizZW0MzHeX10RAIowRuI6DGNprZ1PxjkQMniwiqSbIc1nfjkuSymqzQBUNXBeas3dWE3ZvC1L4FAWPU3zVXf76eHjOF4t8+RgRCCtBcAmrrWYawhhOuxdxCOjS6sFAbU2AHCCcRxz4O223/WdNIkotpyutkOOyj2fTg/AizcC9rAs2+sv3767m6fz9t/9YeyGP/31Ly8vHwhK7lI3wjxNHYXn40uAcN2n3TDuZXmZ5nmq89H65LXVmOMv3g2c6F+/fvxQzwgeCGPuCWCpZX34DjFwIDWVJg50dXX//t3fLHJ8fHruc6hF2C1HDoGlaSslxoiOgVLidD4fQuqur6+W6eyAmBNjQLAcuecsUmLMImhtQilq6sTrA19MwcniVVdqM7VhGEQR3ceYqmltMwdiiqaec2+uU62M3ufMGsAxxNDaXFtpTWbRFMzVpDUnCSmGEAOAKZhZay2mmFNsttTz2eyUug1TRBdfSeyB8TMRaPUorAGZi5QDQLzeLejVGAGvdhtEdDS5FN6vnqPL8bWaEwCCE6AbBF7NRP4TMMBfQzdu6oCA6q9hnIuJwl4FpIs345WNDQBg9kp5AQAgM6fXMf41fHEZwF5pRK9SF6B99ir5aty/DF0Xm8jqqHLfdFmIF/EG6G67bff2bihuszBWcoXSJNhCJJhCCuHh+aHrsnk97Y/opstcj4ekvonYRYpMDGCqTMQczR0xiNLStKpjSCnSUmYISKJDjMgxDcN5qSkxx8ghpHFEgunl5fnhCVTBtB8Gb2Wz2+3ejf0wPO/P3/7w9fVuHLd56GPX9ynk5+dTtf63f/j9F1/cvTwdn58fh3GbQgb06VDB4rf/+pdf//3ffvf1tyGEC9KcHRPmqyvKOXbx8KwItN1sxnF8+fjRy/mXX315e/eLD3/5etgOHEKtskzLcdqX81lqQ4SuSxDCspzPp/M0zQAgTWprqODgzZ0poOOaFIHI290NE4PZUipD6MeUAiPh9maTN7vluJ+fX6xOy7xshwGZKCVw1lI4haFjAiLTwOiOTaS0pc9pGPvY33+czwAGhERsCAB2Ph1z5Kub68OnDwiauoQKqbm9XgUXI8bnaRoBAGiNDCEQXvDsqzATXkd/v0Cr/bJ68TPQ6ELdeh3qLzY3ACS6ILfWMuTPs//rXP+TmcRedYLPbPnPV6LbCn+39TutWoQZcH9t816cPFDVag6ttb7fnqdJVGuRujQV7L2Zap9vyaTVQgApB/GmK/cXVdo8nx/rXBnzXFrfD3PVKgVAOKXW4C8/7F+O+nJSjdwgztaKWFNLvGoDhkTFzNSPzKMxVgGyPnYcoGidRFOI5iRi2xwIUAsaEhNOtY1dIAB0CZGZcGm+dzpAHIDe3e2+/OLLH87npJttRV3a4Xz+cjtEDqkjRzTwcdwAKLqX2uZ57jvSVtvDh1qW+y9++eb26vB4UIrnwxF317ubq8PhhTjc7Lql3pxO5apjSMgpFJUuoDETxhVPFTgM3fbTXLnfxG7ot5syHZeX5+6dhDSELiGvgjsBqqmGEAFeRZ/L+nldYK+S0U9LBsFfeeqvy2AVDNfPuODPX1fKz1Qk+JxNW3fZixHtsxwEq8SItH7q61K82I5+8h6tyx+RQ1hNpCsO1d0jRwFxMCBAI0CNHMFE1ZAopoTuxtLl2ESagJrCKtZfKHUGCGrCIQGzIzEyYwhIES0zBfIYiAIBkmtDQUW1QARWwMAMXV2xiaiiqloTV1nLddARQJhIVta2iIOBIXNgciaMjObURENADhAFZrWAISAiBjNQ1Xk+UQTBtokkSNDc2JGdHCglcHcwNwhEzmwA4munvIsIulupHBrFAkxDP/Q5WaDMFGKMHGJInESk1VrKUs1MStGmUltYlmG7zSmn3IWARBnAAdIQnTkuy7z2NnCIyLyqHu5O6011LSo1BmbgiNyICRCZmYlX2DmGAMyACIQX2i2Ym4DJ5UDGEYgREQ0RGYBfb8YrKd3BV+6U26oAiawenKZmpoQMhO7mYG6u4ECEhEzEkbASBmmlNlUz1zJLW5gJMSAHUyA0LQ1FnRgQOYIjKbMagJiX1pbFtKG5QVEidCJ0VOcKDM0AgKM4HBd5Op6eXvb7c5nmqYiIqoqsGT50f605Xvfjn7xGyKymgen1uAvgs1QEYKYthehM4JwiIzYiUm+BcoxdsRkRzbEubTpoTOrTHBItKDqdOUIKIeQUGSgFSrxUlYDjZhC2+7d3zDGk1GqBVqaXZ5TJ6rksh9ZakSIyq4GCYnCvGmKXu91SW6mqDWrT/npz/+UbQsjE47iT82xNng9HcI0s3e5qLi3H8d2vfjVc7QzD9vraGgzX14xwOh60tZs3d65aa2NENFtaGdOIIMy43Yxaz8u8QG3aatcNakDSWZganojYTDGkELMsdaonYu43V8iRKDMHAAu9mFRtJYQYu6GJY0gJs1tF9BhJ02guqpbCJg/9epPlkBMnXpHxBCmPIadWCiAzJ2kNTBVQyyHlEbSBQ4hZ60IeVNt0nMFS6HpVDTG2Kogx5rRLoG1++O7rl0/fjMM29J2aIVkMudnSDcwdUYllqSlC09Z1Epje3t8fn/+C/e3V2+uPP37Y7LYpd4fHB0r+q9/94o//5b9I0TJDrbLMhzfvvjzPJ1k0JJym2QFyl37xxbs//eu3B5yai17D/fv7f/2nf8r8S04x5M7BHn/82Orx+up293ZXi21ubx4/PtfzKXSJutzavDy8eMPkZXt72zNajlbCcT5YmUOHtZTj88vVdrPNw6eHZ+LUxJZlYWZbOzReQ6dmDay5RjN7veVAE/ULzF7RQV2QmIzXyPv6muF65whIzh20hghhcmdiTP3ui13MrKUkxp6wr3UT0nh9zUMu2BPTZrsJnFKX6zIHIJJST4+mS78dpgnqtKDHkPJ4+268+XWg0Tb3tB2M7PT4glLXIC2YtjZTYNOKKQEiITl1gAmQANnMVlnITc1XcjVbK4jmLgQtRQCEVkog16r18CzuwGnY7jiP3fVbSFvDSPnas4Ibc0TC1Hs5HUI3AHhSooDU9+MVmbq10m2StdalTX150daY6rjNyyzWptxluL9v04QIiEoRKXOEDoJ7WLQRAW12V0raBWrnEsFTGJEg5WC1juPttD8O/ZXalLqtVmhWa9Uu5RSTgRInUgNTFCbDWqvIfnd7Zx7T1Xj75R8ib+YlhW3/8P2HcdvdvrkJgZB7qW2pjZi31++O88fH7//c+LwZU+BrEpvVtbXGriFW9dvN+PXjP2yv70Ek5j5GSsmXaT7PDzF01/kGpGv1GAjvdpukCuxL+yRoir7tx/OxqVbSRQWhuVcI3RU7DN27FPHw8n2UIecUPC7zY8gJEJEhk4O1ob8DQTDou2tif3/3PkFEB7NIHpn6HscUedNt29S01diNYq0qnqdzn3yTrzM0UpBac459l5d5RsMQslkFsCo19R2kzlozMBgoRSjLyU2HYbvQSLrUau6NuFuvAlVfqX2qup7akpOaxpgCICMWkKolhdh1nbSGFBUnBM8xi9RAGjlNtZ3bBIjm6uqMGGI0NZUKgGbKnGPIZbHz+VRaU7PlvBz3R3N9eto/PPz4fHwudTns99aMc9JmZgSM13d3u+vbbLbZ9sBRSmnLfNy/aFVUzTFDV4YuQq37Tz8QwkLBpSXQGIP3Q1vMa3XwFDhxdVMHP5/OUpeiggybzeZ4ek4p7s9lKSUnraWpqiEBBC+lvHycJ2zL8vj4aMZACXwBdZE619OAGwQyMAAMDqribvNcXMVV1aTLOXEMOdxdjwQCXtRIAU2snuZS5Piy7N5IFzKJLRVPxzJP7fr9szhqgzdf3L351U28754f7v7xf/qPyIpAqvby9OSmX737BQ7jXuUwHbOUt7d38Xb8+Onh4el8eIbDqQBgzv0X74apzPv9kYkDJyznbthwiG6uYk2UQ95utiHEqT2U89wlQLaY+f3tLXo4lvNcFmRH5ggMDk+HY5/z7e76PJ1BZew38zIDqaoFxqZVtZmaGy7l0PfDdkzVsdYWOcWAOWVpNaaeeibEw3mRZg5oSAF6b1JbdWMNEDiqCzGLKQASBhVg6nI/ODe35iaIFjsU5XWcFFF0yt3o0E7HFwwp55EgTMvcWvWgXUyi1soSIWOgn4ZkuIwGF8sCOK9N85d02GXOIQTzy/iCZoroZkiMaGaGQBd2ESICqr2enjsAopnhOvc4rn21jIiIK/hyHVsIL2fhr/rO52lnLST7LBmtqIb1mQ9WBisS/mzURnN3+KnHSv01hHbBZ1yyQuaXRiIHN1jRrBxjrNaIeOj87qaLiXI/bhqy4qf96SqmGEjUcuq6nAFB3F9e5pRDm+fHTw/QWg6UAwSwAISIKfcEa20sAtDcwNTdrCjMIiqIkYeUSN2JltooUDNTNVmW9njshsiEIUUOgQMxGIRwmks1CKeacrq9uZF6+uHDi/7wlFMYh57DBqfWDG5vNl/ubjiGH3/YpxiLTLvt1az248N3X8nffPn+F63IsAnQqJ3q1XjbvHWbocspHCGlzMxyPn/z9Pzb3/yaNH/zx3++ur3e3V21UqbDcjy+DF0KXV9yHrr86btPJgXNSm2GZq31/RBjBLXSNOS0HUeE1XPIBnY6LcOmG292v373y+P++enpKeYQA386HbfjeNd/dUSQEtLQmUgOMTISQ+jHqtKFgMBaPVAgYnNxApNZsVDE290dUbeoHI4vsc8UKadc51Plbnt7//DhL9bUwMfMZTq/jsiXX/F1YazRMyb4aegGQATGlYG1cq9e/Rt44cH4Z/faqvU4EHyWnhDWploAx9UKAWsHFQHQq6sOABjRYK3WsJ8wYHiZ+y/ygAMTgxthAFQiVNUQOoXA3HGiSSZG4Bioeaunvtvt9y9iRsxSFyAar3dMFBSIQ2Y2mFMMhOwyt3rSVh1xqjX2A4dYj/ucE7s/npdvP51+PNbTbBjj1OxTK48Gi0NxcCAyrw6qSMQpwMn8SQE4k1iXQ61lO3StejMAjNyWIVMMbGYsbSBKgBQiIZgHjqwoJnCuLeUQRFVPP/z4x4/HU3XYZRIXUvmS0kg4pBgiACcA5JwISZHdXWQJKaiGcjg+2bfD3d31zd0B/Xiww34/DvH69vbl5dHq/P7+/q8/SFXbpNSNw2l/uN1dLR5mD7suq86tTk7tanOjHrvtnegUuvz043e73xYfmYcxdo6iRkIO3hpk//lO5p+l6te/vXga1/+xtjoj7SIbrZ5Hh9Xuga8mtNfFipdEMFyE/tdn+5+Wyquu+Hmbp1fr0s/tSf/2CyEwq1mIkYikIrqrWCBkCo4AboRkoMBEHpA6pIAg5ASmAmuLpAGhGqzIYFCDtTSA0F3N0dxNBBTBnJERjAPGxI60RvWRNRBqLWSG7t7EAylT4KiqBGAOlynHSdSrgoI2d0NsBgTAxJGd0RNCQIyRmBF4bRZ0jqxOiZMCY+pD33sMBSNjkmZdcAOJgIDEkNCJMBgQIQaKyGxo6C7SLlZWEzQ3UFcAo+IzVhM2iaHrQogYUwwpUOpC38eutNLqspi5y9JcZlBLo4l0OafchZiZmAwDdRQ6NQm8dp1dpCJcq+61OTkYA/B6MwwxuGZVQSJEYFqDs2tPG7q9bi/uKFWlAQAhOQckdgdAAjRCAiIgXMWfFafjrblUFzERMHUVV7mEwBEBkCiYKdIlr4XgwdxdOSZRDakAYHWYlwlNXVuTGplMiiOikasiIYWEIaz7oKlCVauKamQrLp0V3KCxo9VCDGYCGJvV06IvRZ6naX+aTstSTYro52VPlxOv1X+5GpwJEQCZCRGBiRDB1eA1kv9ZKoIYA7qaamRUbePYNykAhOp1OVEgE0V0CrG2KSRGBnOJGY3MmUMMYezGcRg2W+I1ztdy3499tuVkQOdPs2uTMpVpmY5HN2TyRmJutTbGwI4xD41lmdtUz80txX4cu3EzxD4YWohx3PTPLydYVFspUN+8uUkhTYdjt93+9g+/297egEHfDVIhd93Q5/PxBcj7cXDAUhsx1nkGpzyMTZY6nTmSihyen6WqSg0xOtUQclWVioyUc6cuCTMGRRICNZmXUw15QMJWKXZjCKOmPiK4NDGnGGM3Spu1ChKDAzIHjjJNKY+hGxEDmIWuR3NwpRABUGoxJAq+IrvIrZYjYKKQHMwJQ9/H1BXDMp1CDAaa+q65nuvcQYchq6hInfYP5+PD8eUpRY45qlns+s2waU3N0L2UuQx9CsPg6P1u83Le58BA2JrL/MNN+HJMnVkLEVLfTecDevjN3/zhT//4X7Qu/ZhO++l4eLRqz48vt29uMQQiPBcxLDdvbs/nAxC+PL28fXP3/v370+mwuxkaFDQOIdrSjuUZjxPkLg/Xt1/9rdbzy9OPx3LKkfvt5jydpn/e99sx5G7sr4m688tyOu9NNIX8/+fqP5otybL1QGyJLVwccVWoFJWVWVUAHkCw2aDRjG1GckL+Xk444qxJGmk0awMbgDXee3j1SmRlRmaoK45w963WWhz4uZEJxCDCLM89Iu/Zvn1/3/qE9/HdT/ehG1Uh19YMlZzampVipLZm8apIrQWc91IMZe2Xaa2qainVVNYtRhVqbeTos27/eJp9pFbL8SEP3bbWvHjr+t68IyM7637c7SJx011vd1e7zbgZr27G8QY5EILnruYZcbZaTLOYoffHc4IWN7u7rhuX5ZRLPnz6ieIeMSy5bofeqpBO4nm8ecnbLRIZMFnVS0OokiEigVUEbfC5gETBrLUmTcxg1bWAKeI6wUZpTVvxnQOprc71OCkzLj/F/RuijbotDyP6AZoBKHnqhmFtbfaIUhZVdF1v1gzNhz5NVs367V1ppbRG3gMCekLnHI4myoRFMzpPjtmCWlNQ9thKGzbjspyAzPWIQuz7kufgOxGKPmY6Vqnbzf74eGB2iLYZh1wWEVrSY4ybQB1wRIzmVa0q8pxOLgakwUXPPO5evuBu8B6ePnz68S8/EtYvfvOd864pJ0D126u7P7z/6QOfT5GsglOyXCdzGCnOrYqIzPbi5oslte3mVfVSpJ7nooYI4253461zdjWBALRh05fT09P0NOy2u6txOefp8LbUFGKY2xQMh7AbMcynx9YW9o/T/TT6Pqkcz4/fXf0uk727/2F7dU3MrRYA3bAL0HPcFpClHJk8MChUaRjYj24vTp7O0ykZY+zHXqwJlLpao5nj0LfTA6ArjjhE30etn5Zl7mlCqA6d96SQx34zl6XKmVOV1jqO51nicDfG/vDxByL0Pnh3KcVkIjUspTrHZmagnr1zrklTFWVEQBMgTyFElZprWgXfqWVHLAq11aYKDJ49mqq2NZyUEBUJmU1YRKTWktI0nzbjbrPdHPyjI9Mqjx8OXejQKNeCiLv9mCuMQxe7LoTQRT/NyyHnaTqfztMQXEmp67p+HLb7ftON0zSJqvfBgMfN2PVD572UMwfvfJRWTmkpaXFsfQy1ZDToBx87nB8nNDocn0Ta+ZTUCI1Krgp4Tq0UI9cawNt3H8bN9XGeR0fErAYEri7CzB6d5NJU+64vuSwt1TKLtBg8sctZo++GLsQYRNvx9ATSnHNSlNi56AiAFJrA8ZTJ8rJ8DMO42e37ze40P55OSRv8+Ne/uM1uePWi29787l/+2/t3Pz08vCeOPlJdSk3z95/efvvF5l//4TfUyvFxPpzzp8N0P9WnSc5Vcm3OJlUL0fvgRZXWVGYDU6211SrjbtPF3bjZzMtU82LQYvAottlfORfe/fxJQMFojP0w7H9696P3PQF734NaF3vnnYp1/TYvc21CqgxoRIDODEJ3VYEe0wIGaGxix2NxrjJhVSEmtLb6vEQEHak0ptXssAEAs7b1jpwjIwMyxZKLQzQHAA6wLWVypj5EAJYKRcVMvXelNWbe72+R6jLNiNT53sxKStAaIPd9ZwbSLoG+ZkYI2gSJ15hTz05+0eVc8Mgl1cAu+dGXCKA14Oei5kFUVP0MjFcGBgnQ1HgNCHiGKbjSP6ifjRW4QqNnugpgNaAhrqQVrKHXSoAAxkQiAr+IQZ5VRfbsuUD69VT9M1Nk67D0MnqHdapOSAIGgNIaGLAxaZNaN9fRsRExgK81nVOtqtZaM7fZbV+9en08L6m2VJvrxlLLp4fHvMy9g23vAzSHGDwDEZN3iKqCYKo1sG28BceUtFUz5yuCNosA53Met4OoGbra1Ac/Xu8MtJUK3lc1P2ymeRErm3FrZksuPnhpKo2ur1++uPu2tPnqes+DD9GdDrNUK7V5x2++/YqBvv/jPz0+PfS31/3tzcfH09VufPh4710sHKRJ7OK8TNvd1fHxAZFD9AiaakUXM3A9pps3N9ur/adPT1oWJdlebaLzaghsh/vH2FEt+HiciKlz3bC7ag3Ik4qFPnjnszUmJ8y3uzfTcv7y6x1offf08PbDz/WciGi7uwYp3+33x4dDOp+1kYknwN1myHnxBCVn7yGopuUM5BFaztn5QGAhBue8mC3TPB9T3O7YuzGGWhp7h9KgtcdPn3ZDt9/ul9OUpVRtu134FU5eHWXwjMcuLCc92x4/ww9E0rX+D/Ez0l+h2Fr59Cvv2WVJXiD+c8D6hTnCy4K8GDGfgb9dyKb1ArpcHXZhbe3zu6kZ2loLborGTOS8rLwTmtRKRAYE1qLrzJwqIXpCds4J1HEzSKvrXpRLNi2xZybN89lU+n6TpoUdINHh/HEc+9L08aH8+H5+e18mdhiQAJdzbkLOsANjMqciikJgiEuVjce5tQhBixG6neEmdmp2FWM5p8C4GcYuQinUcUVGD1gzIGOtogjOBSa+7f1LlrdzGxiwqNRyx6BNX8ZoQeM4voq0TMuBkbfBj8aBW6vR+27ozEgytHpWMB+66TSnXPcvte9Hq/FU6ZyKQL66voJPqdbTzcadp3o6zUN06ogBOmJrjahHZDPW2jyXrtvGuL1/f+oG0Dq3ujjvXeyn6RHJUaCq4okUjQku9t5fJGOfacPPW+cv7sb/9ptGvIRhmV64ws/PXiUmgKuN6Pm5z+zQZxboeRTw/AGeyUa7/I2/LOyLynJNESLEVXOA2pxzROzYr6fxQFG1EXWgUiyrokoF4pU8vUQcwxq8DmrKyM+1A6hiKuvlqGisqipr670jx7CC9zVlAwgMVSzE3kydk+aApTQpBgZIZlhNpma5WVJbRAXAkNcobUfgSHvHHiU2YULHTLxSI2BGhhJC35wDH7jrXQzBj4GjQ3UhMnvFNdbHzCpQUCLB1ZfsIqP3QaWWnBXBrJmBVEHUIqathWBVSxb2JN0w+tiT9+wiU+ddCT7mUkqpzXSZUyvWxEqVAblDitG7ENgH8L7mBUyebXh4uWuZijQgQ/VojMhGDM6Ra2JyCe4jQmJgNkJAWNkfNAOtpmprFKJjQwdI68AEjNZf3ucwNEMQtVVMdDGSG6jKKuXFS2sS4JpHSMjk4HluY9ZEGzQBdkDkomfPeZkkZWittialoZm1Fpyvao54BYKtVWkNW9GWkdboc77o/9GRAVoTq2lJZq0KT1XnKqm2OZUlt8fDnNul7gmRxEzNiOkinCIk5KaNwBySqjC553kW/FdUkXPOMQMYoSkIATQpACK1RKbggqB2Q6+1OFYVZEIXzEfYbGMcve+73fXV/u52t9mGMGhZRPT49CS5TGXRnDU3q40J0zKrArGrJsfpXGtxvhuHq1YsFz2dWm4ZTDeb/bAZQyBCY7Ique+7ZZre3z8ixrK0/c1V5zpE1wyR4Lff/Yurm5dqdbvbBt814Zu7l9P503x63OyuA4fpfCpz2m62fexLXZyn09OTA27zfF6knGfnnKqkqUkV8sn5iOTRUSonAljWiw3Qx05bMS0tnYmdiljpXBzZBxe3jSI0JSbTRNSUgGJP6lyZmE0lUujD5roZgxqGSK2pVPAeAQkcmnAXpDVUgZbzfBy31yat5sWkoiEAIaJ3gbCepimnmWMYYgzOLcuplfzx/Q81nVXEBLzzOZe42TAHIh43Yei4lnI+n5jNO5dyJarXm/h4/2kY74KPoKCn4+3Vqx9+/F5a02rjsC+5ubD94tt/+dNf/9EEgPHDp/dj2BoUkCVEbEVR5NP9z/vxlshtNsP5fDrcf3rz+tV//Pf/3hFWaHmZup53u9t0WvoeypTOD/fj9VPfD19/8+VpeZrvH5Z5JrKKls9zWPLh4WTk0Nrt9fbTaf7padrGbbD4+Hjsu0gIKg0QtAoRMDpCVsUmaOgbEBkIAIsYgiHBWsOoIiIE5hjNzHlaZ7eX24dZPidBenX7RhOSs2F7c3h6evPyyy+ur+C0OIbffv365atXfRedd845a8XUwGpLs6hwiMFHBtLsnKPl8QPpMi1zyonsSYmITKCqzVdu/8VX+27jEQcMPfgIfmvgVQ0xmCKyQyso2SRLzRdHgJpdgk6FANFQSnbOiVHJjZ2Z1loTMiK7pqLsmX03RlMkpmbSzjORcG/MFcAQvSJR1wMMkjKZ0rYP2mpJ1A4GZlZ9IG0ehBC41XsERHNgIss0XL9wsbOq0NowdLmaEvi+r2VGg+BYusBhhDQ5h30fylTZO1nAc8c9AhXygBVaFc8xp+T9YMLS+JyKc0PJEgZmz/My9YEMsCl6NESraV4eH/pdN52XGDebV2+cH+7f/u3wePrLP/2X33z3m77fWZF0TgzdN7/7d/vju3/++CNaSi3ttoGgCRkaKMBpnoljaTZ0cdNvj4cn0+YoBCcPh6eXu5dGjdi1nPK5IscukmV598PbIWyG2DnvntLjZrtzwIpYUBuohlAh7e+Gh48/VZFNt5+nB7D6xatv5qUgUTeGlA+gp1azmqqLjcQon/LTttsNYSj50VgJNPbbBHzKc422ib6UVLH6jk/1hE36buzH/UB6/+mxnE4gCuKz1H/9d3/4//xP/4+b629FbJ4XIncbb1VKajze/D702yTtxc2g9cMyTf1+k55TWta5CxgxErLLUtB5bdkxgvPrmSkQN8mpXQIhxAyQRVspqfOdonnnW0vM65QFa2vrrddMGRAdF80hchf347iNnoXhzVcvPL5++vh0td8fDo9//rH/6eMnM5geEhe5uhq9w/PhMM0nDkHNrNa7m2E6nD277fbq9uXdyy9fOGDfx9jFNBdgjhSsJIY83l0DkDXLdcGygDGqNmnT3Gophxm1Zkkpp9RUU9GUW27KIQKoiIqiIjviVIoxUi0C5jpXU/KMuuY7MiJyNSlWPIWGzaSpISIJYFMBQuc9OJpbBhU0HoZuE4NU9czb4Xp/9bqWeZGGCMDw4subbrOtNXT9fvPi5TeDC6bv//r+f/6Pfz58/MTdNu423/7dv+p+7N5+/9euD5txmK389//2d/+733/5D//853cfPiwZUoP7U2Hnb2/HodTH03FZalWzausxu+msomoC7AybKXvXMWGpxXeeUIfoNtu+ZTmfp5/ef2AL5BjQarP7wzt1TR1v+2vHllvOc2X2xm4VIbgYvScyTGla8gxqSM7AITGIqVUffexDq0ohOkkGZsjB+9VHwt45VMfkXGetHs+nNBd2rlJZtRRVm2NmhiKptNq5sN+80CaICNrIKTg0UBMlIlNVVSLXjf78dAqsu/31NGNrKddKDglAnmMqGJGICK2umy5cepXXmfZ6tjIDIGtqjGvqBBAiGZiqrEEUAE3lEq1C2ET4Wfbza0MFXuRL6xsBf47WXrf857Ma4loKdEEx9Lm+bAU6Ckq26t4RyfQSbISXmrTLUXcVmNsFk382XfyiVtLPI3a7MEyMXJp0Azlx0WFwNPYDqE6HQylVK5hYbjpuYm3w8Hhqiqk21/WqcP/w6XQ6cq1dxw7NIXUhMKKhoimbqRRDJAAmCtEbud7HoWsQvDGkJbcqu5ubDAgxQDeCISi4GH3nd9d74JCXWs9l2Od+JCnl8emkAktKT9MjULSH+fsfPvTjiM2Gq+3rNzfB9ze3V/1mbKyn6SH04//wf/k//0//r/97Tdkg/u3tn1/82/8+dP3V7c37t+9+/7tvPvz8vh9GC7nlSo7iGHvPP3z4ePvqBTvbjHx7s3n744dySux42OzQ5HR4lFIVxJsuOd3f3+elAfMwdEWgiQDYMA4eUU1ayalp9P79p7/GrvvrD/fX29s3r17cP9x7Dh7th7+9dQ5ijGk6BfSbq835CWo+PT08mehigKILTACtj87FAdBCN4Dzjlml1SzAzscYquTTU9dFZs8+IkHX9ZK0peV0nsfeAyGI1VLhmTC9LI1fQrc+A+xfAWl4hmrPi8s+O4OedR2fJXj2jO1/sbVdZHHPteXw7Ia4vPLnl7p4gggvhCk+Zx9fuCe8PAC29q8xoBIYExiggCeOVpJINjQADd4tAK21WvKa1rGUevPqarfbWn4yURQzAOaOfSAEKcXAMTDCHGOXat1s9g7k3fvTn79/fDjXueKxipo6UUewZ1zFpcoOiRtgyrWpRITBe+cRyZTgrK1UwDEGMhW57bzkercbxl1vajUnKXnsunlpqQg4d0oC7ABb8LinJUS4Cm3cepblZsveyFGJsV+KEmBO+TzjLhLA0jnZ9r2K4Hr6p6FreJ4WIHSBa6uP9592u7LZjm1x86TLnI3q/vbF8WnuoDbmCoREIfAiy7YfluksNjKjaxC6kOcJwzZcvR7yFCIKaJuW+NIFF2w2rTaEKDV5BGJnqgh0CYi5cDkX8vCX1WGfdUcXngfxWUB5WW+r7fczSfS8HNbXeQ6zWg3Cz3utfZZ4XF76eW2j6UXP9Eyx/yLlRGAmQ1IzRmTnWXRNs2O8NAEQMRqiqUAztDWeWm1tYWB0xBa1NSJQaCAGAAICBiZmoM75lJeZulQseYxswXlTkZIQgJkN5LmNAIEch+hbVVVtoLqoNjKSKvissyImMBXAJihmYqqGTdExmNBTa564YwtMwZlHYwTPsIr9xIRQDSpaDI49imdzbMRCkRQQGI3YAJjRCBWgmTIQs2dajexUS82woELVyqCMaGqiRdCVah1ZPc+DQjSj2AGSD533wYXml5Rak1YRpLUZSZfJSAZG8h0T++B6z9RavZB/CkTryjEERSNUQWlIDpAAiJx3ermx4uVmi89SNkUwlGq1mAoarhlGBIx4GaKtiwovbrsqTbRUbRVUCEGkqVRVVVPVdQq0FrFdViAiAdLlNIEEEBAEnToXPZKG2HfjPC+Ss6Rclzn2QVvRmhCEDEWKpoomKgXFUAUZCVbvGAIbo0OMaNrEpAqyF0FwRMC975RYGy7tYJSbaakVDPCi0UO155g3s3Wga2BNhRBsTWrnX1o+nhvQALU1RAkheHIKq2I6xNizIHmvklsVa61ojZ3XUosTZgYzRvaOYvRsenp41PYJVFptTM7A4tABxKVMFKKYWsSWci2p5NyHIfphqTSXej6dng7H/Waz2+62uz4Gdz4vOZkBmel5yqoHqRBi3Gz74WrkjlvVMmc1+ub33969edVUOxcZsdS2v74Wyct87vuND/F8nFx043YXurG1LNpMdeg20tIyLWuSmEiWVp3zxApap8ejGjOv7VrWdb4pIpKPUVV99MTczpVCICo1PXnHdPUKaEQIRNDy2UyQO0MGZy7GJgmZyHslT8ggzUwNCV0wIgAEJlNVBDWrbW1Wsnk5qQgi1ZK64QqY0XtsJS+l1qoqEa20fHj8uJyfxKTVWbXlpWyvdt5HotjtdtBIFdh1wB1SjRVFFzFsVfNpao5c8Oen4zBeYejU4PHxoe/HJsLexT7eevr//c//8V98/Yfp4f7h07vgXN91reQ5l+6AXR9ZYBO6XKsaINHhcNAiyWr3auv6calKgZH4eJ+s3Y+9r7mFrt/FzXI6LofH+fCp2+2+ePPV4/3H0+FJnSISCR7P5zBskCFGfeWu56oqtWmJvSsttyoIyHAZWyEAIxMAGlYRNmgGdW1XIhQprba1m5KRV/eWqTLYqtJZr4LNttcWsrSBGR1vbl6g3/7+mz90eZH7h7/77VevfvvbMI4ubkqaCbQeHury0GrN8yF2g6dtCExWpBbAQsFtX7x00as/17m1pZ7P6fbV/ubm2qibnv7yt/t/MqM47l//9vfYBfJHYiTnAIxUpBWj0LSBj2CGLiIQekZiMyUEEzGR3keVQk5ccAjVRBA2wB7Id+zNkEzJwMyYcV0GZkY+CKhkAXbATsRciNQFRFsjjV3IFKKUk5WzthoDSTby3OO1NGkCIXalpppnN1yF4apMj+gdgxEGaY2B0QeMrplU09B1pqogxuB8DBFK1q7ry3LAQME6IKTO77qhijQxQBWT/e52SQdwaIjeR+fQcZmXaXO19SE4jJIXtHnT2/n8gHTjwvDmt99FF9+/+9v3f/6nm9tXuxdfWWQpAgY31y/fSOnk/P0P/7whtZat1T7uzjX5GBDEO9fmCVG4JXZMvi81qRoKTPnIkYN4hzZuu6aHliuRj+G2i4yeFDkwaqoFwcWN4zzn4hzlUtHHXT/uxts5J9A4xJst4Zwfp7KY+izFMaIlAHBUqyzRUYV8aiUioTABi7jj8tRHOJ0fanJ97F6OO8t63W0IIC2n03LPDp366KmqbHZXeZre/nTfhRelGA1sDpYpR7i66t7U/gWKHT7+zbl2X4GCcqDpfIibcb0KWhMkQCQx1VKUZEonzyGyE6k++FyyigBSy2ai7J20poZIFCgSOTYAUE+8uvqRIPi4TooJGQxKzaaoBArNyMQgz4JIC1YhVIcU+99+94cPD6I1v7jbbbe9Bz2cDy9+92UpuRX98NNDS4od3V29fPPFy1dffAWOfO+s6TKlKTcDaC2nOmFVz61JUVVtLVcdes8cap00z55RwE6HqZS6TGfn0cWoDE1zbW1tbSu1ldJcCEBAplKRhxCHwcfQd7GWRB5c75c8k0PI5oC11lrbEENrys5pa0zkI6tISdL3PbGQmAmZkmNblrmIfDo9TvPcx03fdbnlb8ZvZTbnXU1a57yhYbi5/vpfXHfjiz/+w98/nJ+OD0udIgtebbY5lft0+ld/910Yxv/vf/7zuw9Pr66vQg/14TE6Ox2mXM9AJDkzmXeEALmoY261OoIhxnlJUmTcXXXDtuSEWQbXObSx7969+9haNVPnPLuoYtvRn05nUfEYxn5TW6l5WUfFaSnkA4CCiXMkhTwxGzjvOLicMzEiMHUUu835dDIjYk7LRCTOu6YqTa1qq+giNGwpz+yiIzAR33MqCcAJsBmCw9Rm0aZig+tKmYScs64pb4cXKb2TWmPfKdWcawhO1v54xt31NTQ5HE8h+sEPrqq2kquE3j/D3xWi2AoFcLWVGXji9oyMV+sYEYIpAa1zRXqehDNRE7kQOqaqBIi6empW+cMaZW2AaHpJ2bgolS4ZG2ZwcY1dYPQzYEczfUZNv8Lv8LnwxwDWOIzL3P45seii0nh+8edMIrgMTHEFcJ9/SO3ZwaYpJVEkYsG4NEapS0qO2MB670PsvA+mLS9nBfLAOi/H8/np/XtqdfTcO44IwTkDa+ssXZtpBVVyzsyOc348p1NVI+iYd9thHCMG5/tuqth1Q9zv5ir5lMfN/u7ly9x06K+epuOSa8dKYCmnq+HKx7GoEijdWc4Vpc2t9v1m2w3Q+SKylPnhfAKx3/3u26u4kVLr9Ok3X373tx9+dKNXcodzUvap1n4c53TiLjYCVTahru+WlJ6Oh971ToGIuq5/99f36ZS7odvudy72p4dHBIohmjVz/unpBID90PXDsDrkN5t+u9shoUidl3Rz9eJ0nHbb0Tk+Tdmbk6VOT6f9dojE58fDbgy1wovhRXJOJbtgL283D1IlaFqWPCciaK0BVDaQMm/HIQ6juQGou9ptvKTSprkWJWCGtCxVy2bHloV92G9GI5um43lKmisiYPtllIz4XGgOz3Dp82O/inS5INh1Ka8RGJ8rpZ6lG3bxjF2ifC880ArF7PO74UX/YetFB88ZW8/UACDaZWRKK3VKq9XoEiYGBGTrrGNFkWKozcQQqoqUycgZeQNEx4KUSilSDMGTz7VdbXYgSVpxjsggRF8lI3Etk5oCulSqWDPJfRhiDH/5yw9/+enp/lwSkKBaUwJAs0DQBaZqc7Oz4FLVHKPh1tEXY3jREWtbfwWRyURbzePNdhPCfMq8G3Z3+65bmb7aalHBTayDCAWK51aWPE11nsuotvdcz4uGdrtxQRRNaxFoIGZzqYC0lPa308PrFxtIE1/V7c1tquodU0dUiNGfajGQEPE4TShGIMNmE8wfZ6hLm0R3+21tx2YwpxaB2QWjpiCxD1kwGhq4mpWpC8MGu6G7upN0UHDSUIsQwuryaQIuDoZsSOs2ue6Nn1nzX1GIF4LxmUCC50eehWi//JeLKBIJ4EIJPUOCi1kNf1k6n7fGiwfsskx+FVN0iUa6POH5vcygifLKNNhqwSJFFVWHhghEDAZNFZERVK0hgqE670xZhJ13ogVlfVVTa5cLBAAQVa21AoRZXBafWo0FmBoogXNMztBg7We/xPIoICIzsWPnnGdXCY1jNEDBagAga6E7oSOrAsUMEJuaARTDBlgMkhCp9kYRZSBSAwPxvu+iM8cYnIsuOL6EkDOG4Ciy8+ES8UNM3hsRGAEQMgMiOUfI6IidI8ZUGxGqiJohUVvDgICBzDOaTuvXH2JHzEgRSdlFqqWmpWmVpg0KGiyiqz7HO8/M1nVWqLUGtuqKPnPKl2GISiNWI0MiIL4QzIRAtPLNgGCmaAAi0JqpggE6hz4AOwCyNduKn1eg6ir9slZXTyGAijTVtY7TCIjI1JDIAYA+63ltzRU3FBECBUI1RUTP7Lu+1gIUIkXpq7bq0oLpUJcZGktJ1kSlWK2MANIIyPko5gEaAjMzEAJwratv3TGTAIoa+hA8tmbB1PeNZ1YAMTNYfXgg2hDZEE3ViMzAESGiWvPIiIDEZmK/uv6eqSJmRkaSVsWRASCoIiA5j1hASwiuSWNPCEQOOBKROnTadDnPxG55nPKpbIY9+a7vIw/IZA0VABRr2HOap3lK0pR8R65D0FTy43GeUkKg3f7N17/5PWMSSTktp+UEHGqW+4/neREGN2w2FNxmv/ee+t6rNedQKrz66pvf/ov/dU7ncbvd9Jvj+en61Zf90H96/2cmDj6cjgdpcnV1taSclyl0YzdstUxpmVI6mcp8nERayQValQvZKLgmwTOroQEti5mR98FOQMzO0VoBQZm1RQQozEXMhZHJ+922lVnU4qZbiUsVJGSAAlpQChCDZBPEdX5YDH1kbMhaS0a04H0+nlqa/TiWeYnd4ClaM5UU2SZJSIImZTloO87zeXo4OQcCKrmI6jCOKqDMoR+7bgfEho5916rEzrrdnbRcSwrdzNDycqbgnAsl58BunuZ+2O52+/tPn1TNMCCFL168yZh+829+M/3Hp/mYtIrzXpd0eki5E/LQ77wPXqFejd37T8emukxz6Pj65fVf/vFPd3d3Hqg2Los6LLm1riqPQg6laZuX05IhLdub6/3N3fdvf5jnOSIOV1sjzkv58f0PfXfVgZ/qjCjSEBDYkTVr2pjBwNSyqKKohw5FQJWkalHxHpRETBuoGgoCEICqqncMqtYa+stV0A8eqP/q7tX086fh+mpzvdt2V9Onx82me/1vfne7v/IxWk15Ps6P75b5sMynYdzs7r64/vLLko2NIT1pLQAJzM6nBShK9ZvtS9xAmad4Q6fDzx/+9HPobyLt9l/8bnv3JnRDGEKVbOW8LNl5R8wcIofOKLKL6HpERPbPsiIkaabNNCETEBDH9WCl6rzvQNdUjGRapVWUKmU2aQYNVCEMhA5s4LUKDRu4oEQrqYzMgE4IgbwLvbNNOz0hiFljrNpKN/Y117rkGDsAafXscYzbvZhBS6jJMfh+U9OZmRCJ1ypfvzs/PoQ4qCjy4B20lkOIaX4iACQadtvT00mIx93mdPr45ps3P//lx1LOBKEKxRiHrWvLIxruxyuVpk14DNrqcniYRj/evq7Tw1Rpf3v14ruv4z6eT/c//vDXV4be4bC/IsOf/vZjrafazrd3V7Ic53NCYKsNNIslkdCEiMf5dCJ2I7skJbeJHR+XRx/2/TCCiQN4ePgJMV5vX4h2ffcaYT6d7lGDltK5F/v9b98/fW+lSk1WCUCGGMFy06eaC1F/mhcD8rGzMvWuq232vXhcqkyynJC8EbOP1eaaK3EXXU/ob/pR5QTCjoYd7+syUbHt9ouakwpm5e04fHj6gENclsJ0jjT02u8DJZv6AD6COV6q/+qLf/X+/lSXt6OXCvX0dLp5fcsOuo77bvh8LyhNVSV4T2TBDbVVAJvyRIhS0YzUNDjfRJAJiZBYm7BjAZNaiMCkoDlyhCYApKaOXa2XQdqlg9ug1MZEpTak0MVwTlMYRt/3Kvbh8fGLr99c7Ry22lH31z99X1JbWknTMvTDt//q23FzG4ex8z5gXUpLi9ok47Dpuu08T4RtG13VLIAMuMyzgTiw/bDJ6Xg8PmqTmidtbZ7nbhwBC1qtBWorgFxUFVHN8rT44Mn5phCADGCa27hz22FgRRdcldr18ZTLtr95+FhU2tAP0uoQNqq1qUo1BNCmaCBVnONai0kjBUHMNQVGdNwhhOBvbr8O3C8Zvnr9qjYDiy9evB42L6f5IFZPn6Zc89Xr7f/p6//D9z/89f/5//732nJ0LvSxio3ef/n6q3T8+eePDy52P394ent8OJ4mra1kZfbk3LaLBvrq6ou/vn+rHppc5iFiUHJGciHGXCYA6GIfiM7T+ePhEZoi0bDdksHjIYXgU8qtSegHNKtpMrVuiGANzLgLSBGtEBIAgWJbi9yRSHt2zvvY2txU2vkQ46YLW61aRIKDqoWkqRo6IjJRAcZAHaJjQiHJVi2ycz4tCbQROATMtYGKtKZmrZ7RaCn54fAXR+xDWMoUXGDfd8MNqkgrTcpSqmfvAkpLucwhdiogoqU814Sv0ol1krsqE9aeEYTVH7ceTsEMjVa5BK3HebjwPmrGRIisqgZ6Abqr4GI9tCFdOsLhUs681oFd1PRwqQNfgzb1mbR6Rhj0C/NzQRyoa/XUZ/roV7DqEpL9WUn0rJtH/JzdsabDfA6aWUG+EaKA1VIZyfmYDc4Z/vrjx+uN9+qKGrOjVQKL1aw5jg2A0R4OTz//9B4UutXZihC8YwJTNTC3UlLIWej+WJ7O8yJUxBrTwBiI0PC8tDjuQ4wPD6eadXk4sPOvXtw+nKc//fVPLnb46aebF69ur18MWD9+/LHvNqKQcq0quebaCpnttpuvv/p2e31XD+8zyqvX14dPT4QeLCxPx3M+jvvxIz3e3L25W8r7t99jGB8ePr5+/XVrqyIAAqmCEXtmvx3Gw3Hi0N29euFD/+LupuRiasNuc/3y9ubu6uOHp37XkzfJ84d39w+fPqqK92FzfQ0GZrLbDDHE8/k8n5ZqGvvu4XgcQn+YEgDc3FxvxnD/4eF8Os9HITJm3my3ZSk//vTPky7OOC2niNyFru+GAblMqZSKTLHbqfcqmrKZHsjPDYPV89gPcRhev379cDwo2PRwnJeq0kygpiSxbTZe1JdkLYHzHIawnE+/4PHn0GBYNe0X5w/8ku2Cl54gBVDVyxTePksz8MIw/SIeuXCRa/LRukzpV0a2X0RzF1LpV8oSRIJLWvazOGRlA9aPhaDKhGIASohIQISOALWJzYmbODRt0m93LROiE02OvVMjJPS03d9G9qnmwBhCVFPn3Dj0+XyPAOzddJqRHZFzCD///P7Pb++fEh0Fz9KkGRESGK+fOrde4YYxGx9MShN2jB7vc0XAa4+kct2HLYOXFp2PIYxdN0ATCq7rXd9rE0I/xKsq4tnIgdbaxXZ+Ok7HOSL3HUYApR5dbET3U/JIg3Onc1GTx7R4H4MnJPen96dvv7rSU1V3vnpxZ2ZgxP3Wuxq5luXkqA3dYOrvH9PO6Hq/MYDjCc/nhQJsdxtc0lmWUux6v1vS03aMj08Pm81Nmz+21oABhE2pv7rFbp/vf8BkZZrJEzGlvAAY8+D8DrgD4F/TPc8+l2dD4zOzc3ncPlONzxT7hea+dJwh4mpE+jXXdFkrz1sgPjNEzzKlX7FJz5/hvxLKrXbIz59h5QAcIZM2YfaAUHNSayaMTEiutWbPG7j3HUBjUfRmItIyOK/NBIWB1uICNQEwBEZbdaDIiIIk5ItCMxCBUtFBaAUZkTyvyTIOEdGKCl6ytp045wKhsXkBY9KGitqUiTvmkbUZFoUqYp6AOAMmsSJNDQ2xSmPgxAboQuiFXAbXDZs4brpuM3Zh6DoKvt8PfuwpDA2BGFXWSxKBSIHByFSlqoIykSNmwuBQrVYz9CQKCGhqDZTQyWUA4VJuBobE607BIXgI3HXahVpSzivHBWZQUiIER+T60chBiIikIqCy7j+XezYYmIChSUMiQwYwcg5VLzpHYlt3EQQQRakgFVUBHXlWckAODA2UEAEaiKGqtWzStFRtgiag1VTW3QkBGAmYxMCTgzWzHNBAQJ+HNGBrpJGKrSHbq8qIyRMLUBXhJt4IjQRNIKvVpqDI7lKETYYmBgxgTEyuY++UQJWCM2it1rk0qwAKHpSQyHFtUg/H03lazICYTZoZiAqRW2EzIRG5tYFUTVdX5/ozTPirycAzVbSmNjlAJKC13F4ECasVkgpsCFpb6UIk75olAfTAVSSQ78JA5Nl1YRxuXn0B5FttgYFJzVrJaVpaLRmRyA+KIKrn8/J0OkGpQxy//OpLH6hJTtN5Xk5gq+Q6/Pz+oeVas4buugt7IwjRbeKY05wshchd34Wr8fU3b8RSCDGGeDh/2u+v+xAOx/cAFMetlByDc5vxdD6bYYiDuZillbQ001pby0UVXNyipVQO5/ORQU0aaGLnCEGVjJ2qAHIXPDP14y4nMSAgJmKemZm6PnLJ/aZ4F+RxUWnsneqA1iE5clyW4mMH2LQcXAgqSUXDOEpTlUaU63xwoWtLQnRicjo8tFqYiVCX+WiGm6vOSk5lPh8fVQXAHu4/mRVVMbOSJaUFFbqhd96D+m68jsPOgDlEQwRUciaizAEAEXjotkzg5qmWc80p5yoEu9vt6ThzC91uCK4/nh6tlc02Hk6P/sX19tUXiA+H+59KTkPXm2BeqhUFmsWg1VrqVFq5erk30E/3p3HXkeO0JAX0DN5z1YYqXeD5fHS+c52XCrW089P58fG8efHi9vWb9tP7Jc2kFgnRzOlOltCWqfMoSClVRoI1BdSaAhqaoa1psqLNmUep0AA4NtG2jrMMRUTExATRCAgMCRkRPquKci0m7efvf/KGx/mjH3a7YG/u7vZ9t7u7QeKcTprupeRxe7V/9YW4DtCl87wkcD5AbUggNSOCKviwn6ez5FLynJdpPs8FQt/3X3/73e7NN+xDyZbPx3R4irtttxnjZhf3ToGZUAXIO0AUaYCiqqaN2CExgFL0ZmgCYA1qM+zNdUAOzYk0agvUE5SHNj22PFtNgNbSzKSIikyg4kKn3il3cXOj/UsIVxCvmjnSFdIxAKg6pOB2A1ir6Wx24uC1ZXLCXtQWZLKqIgvi3vVjPRWODpkElbtoYoDgPSOoSmWP5Mh30SQTWSAQKaEb+72fT3U+Ta+++OJ4OMx17vfbbjvevLou57ykitQ1VJEZ0RTUrAXktCTfV4ddrvnp0+OU6nZ3hQUP6Qih7/rhpuvB8OHjD6Y2nB921zcYlZuU08SbmIwqgomJAoah1LTkqbTS9wbsifC0TOxo6McswjzE/jq3XNuytDnEPi8l56QCpvcln5vM476vJQCMh+PTkk7L8rDdXVcB0LIb9k2SgPmORM28liQuDpvhpaSHpZ2tgmMVa00rglY1KKQg7IAcC0FbFrTkmYZxPxs9KTBb8G4q1XPsNttc0/2y7L7++uNPf85VHAwY9t311/Xw91kaK02Pc7COrP31+39PwYvOrczLkrphPD4qQ+zCtcPPuXVGhM4Fz2wAc5oYEZmj61VaaQUUCK20YgBoLK0RUPCMhFobOz/EbslnUwACdK7U6mOMXY9UlunE5AiR2dXWyDnvmACLWm45xlBKXSfGbHZztRl6bmn+65/f/vD+55evXqLD/+7f/HfLtCiTNsvz8f7p7Bi311chBsedVDyfDgwQvZZl8R6vxu18ekTvN5ttOn18evixpTydnkLXgbWUZ1XNeWZsRK0pHg4n5n4pWpoiITsmJhGNIYq0yBEjOedEG1bd7IdEicFR1S523ncJFwMEcIaOicHmWmrfhdURa0QCiKKkpqa15qbqx/Fmu1exAORUtzt/fbtTxGG/QYCffvpb7B4Qod/srvZX8zIdjnOJ9erFq3/37/63f/nz92//9m4cyDCAD3/809/a48/OawRrOd9tr95cX4cQTqm+ezy9/3hIc+uCW+4/FQUgrLkFJh/jspyRbehH751oC10kAjWpklutQz+YUm0QzXZjFMCmLfYDsWfklDPAiqSg5AbEgWPNmRwxx6yLiCCYJcxWUpuJkRRCNwzdZp7ydJpqLuRjCAEQQvCEFmKHxIoorVCkpjotx5STUUtp8UQApKrkIqqSJkNk35MxM/kQr6+Hx9MHaUW0tibLcgZwh+MDUxw2u+3mVvGouai02HfklFbBpgrhc/cT4jourqtcwdQURdewbVijfIjQmjKgmq6FtaamsE6ALl0wTKhrQgVexOxrhe5KyOhFQgErJ/U5UAPXTCLENZTuV6acX6NuBAPC1SgDa9j2Z/PYL2KoCwQyBJS1FeEX3PUZrz0nFl/CO8guBSkgCIAYmE1MsbFz81IolV1wrZTrq52LrtRctYFg8M77fs7607t30+mk0vzaz2IQiKVW75kRa2tmtuT2MKVjg6loEQREcrAN/OVuEz0pIUb/eJp+fP/g+p4DtKre9O27nx/Pp5v99Vdfvrm5u15Oy/sP7xaWzX6XllrK7Bw/PZ7H7Xi9v/3f/Mv/7t3992/fvf3jf/4PXEq33R7ebbS23U3/5Te/6Ycg1ZMfMo2n6r79/Xfz+VNR9+nd2+2wi3ErUltrzoV5WURTvwm5Jgba767vXr3u+75JdY5wE69u7q5fvDo+PFlrCCC15VJKK0Va1w8evVZ0Hq5vrn0Xfvj+5zzPwzB45662u9OSBJXJLSl9//ZtZIxD//rr3+b5NKdpnvPx4ei0+ki3mztSfz52jmGay/GYBqbN/mqeTlWtgK9Zd57RMzBoU/LA2g5PTzRRaYVib0bj9g7gXLU0k5TT6Uhu3EADaO1qt0n5gB6Fn1fGCqwvxCEgAuEq5vk1lAZYi//gV2N9MzBgwlUjp/rrkKLVILQisWd24LN46VevCfDfLNRnexE8swW/MAz2GQeuLJEprNpxj6R5gjpZTgi1SVWp5J02KXkmBFDVphTs+ma73QRpRZpgxwCosuacaq2JDAOHGYj9EEJ/vL//8ce3KWkqpFo7ROfAEQQfPICKMhKb3Bq6ZJ74KJZMnaJnrkpTw5EIAftAnWoM0aG32hjdZr/DYQg+VkgeKThfMihDCINSsXp++eL6eHwUgbGj0YFDVOdLVbGNVe2IVWnOi3GXm7UqQOjj5qf389ev9qfjFAJtt1ujaIhx6ME7rnMtCwO6wS9i908TEodh3AA9nWtKLSCMfR/OefVTD64jwnG7nyQNHk1QWgpx0296jIP63e12//Z/+Q+OAK0hNUA11LDZNwieA4Dhs6/nmT9//sI/I+vP3/Ov6Jv1hz4bw1Zqxj5b0i6yo18tGHuWtMHl8AjP2+sz2fiZUEL4Zff91fp7/iCIWGt14Jk9IDpE8Cra1KxKYyBkYiAEJQ1gQGTOxapCzjGxYiVERw4YVbQBIIBIZQQRJWYzUAUEVCDnwhpYqloVyABE1qwtMtG1zhNUVNTWGFwfog0VEQxUMjCAqgrVVRzC5Az6wMCuiCpSMOqRirRSm6iaEAMCcUNy5LyLvtv4fjfsbwYf+y52fReGwXVEntF5BVJTAyVQA6ytAulFLoUAamIE5hkYCH1wzKRWWA2BVZSkGUITVQFBasrYClYXiJhWe6IPTBaC73peltZaa00BalNYErEzBN9tEZlW+brKhW7mFc2tukYBqQaAPhghXrhotEtxI+CqODRVrQgKDoGcMtvFtmYX0tIATFSa1iq1aJO1QVtbUTNVRSJEJFNRwc9KNGsAgNoQ3XNXHyCaopmZaDNthMxoAOKYiFxtYqmiaRNoQioE4MwqsyOHDGbCjLRyYgSqa5UeBOdCrq01O8x6XjIyho6JaJnnWdrD0/HpdJ5STaWVWgFR7cLmIwKYohmarG1QROSITI2YVBVsTdK//PmcVUQoxkRogciaCrhVccoCBdFYm3drO7LWWhnVjWvCF5amkFLXxX4/TOlJkWsVh2TWamstV8bILhCH0qa8TMfjicn+8N3f9RtmK3k+fPj4XmqGUpBR1Q6nfDzNuZUY/H7cxM31w8M8bLvbN6+kLRwh9l1prRm8ennXjwMAbjZjqQuDu7p9NZ8f8/kh9ps0L1pld7UvpRLysL8h78Gk5YUs+UDNBQ9dyZPkBdrsGfbbjdZa8qLi8NKXVVGiNTWAXJBQrC3sWIUw9ArggneOpM4+RoBa0SF572NZ0IT7nUc/SEo1LQBKQLVUi15LNjQhk1alFiFC0LYUEzVDcC3nyaqe0tQNfauVyUs61zTldC55JrJScimt78I8z02bqmkVJg/CLnbd/np/9dIUWmuaUi2JHRGBVi1itWZ2nombixSG3e5mmQ6hW9LpcTnN0Ox4/zjsdsaNyajjEGNa4vK45EV313v6zv2Xv//nrpGo9tuw5MUtftz094+LzqTevXv3ZES1liUB9/3hMA8UvIPl4XzzYrBaztNp7MmFplpjt7Em2iqrHX78Aev+69c3f35b6zzXqZABVBHM0fulLUUbIIspyGoEB2AyUEQ2JAFpVrmha9hQkZlpTQlWUmvSAHgtyFQFNWitsXOfY63HPozd+PRwvru5RXAdwujwy5fjqze3ZU5QE0MdhwH2e0bM6VRhRqA+YDs/pWlBJWtPiLWWJFJNPQlIObW8OPZDF7ZhMGyH9+/OD48hxM3tV/3NK97s/ebKkEsTNAdEpiaqMFdmRVCpjZhNDAIDBWQPBkYIFA17i3skx2DWCpQz1mObPsl0IihrJL4BAUIXR5TccgLJgCY5SWUxgHaC070b7nj/jeu2zXaEBiKrNteUgANgcH6QuEFdvCQ5f7QQtLVuGKlinU4x3mLsZZmgKEjlzqP3BuoQlWtJZ/ZkYGotDl2Zzt45ocoY1DG46Lb2+vVvJS2ECZfK2L39y7tdGHZXG18WYG7L4tjAYYEWXIxd8F3QqsINgUvO5L3JPI6DVHj/7uOjC26Ir7/6vbHPaZme3n18/+7qdkylqbrTKVXAEEcTaTllwGMVcl4Hz86BmAdTs1aqiMTNPsA4uE2uRZ07TGfwW3HurMmgWhN0ZlRP09T3L12/mw8/Jkxx7By3VBOALblIkYrShQ36WFIdsJPj2bgn6z3dinHNEOMQ/biUs1uLT0wYPbSk0EApZYXYA/hza1XzNnhx1Lgu08Ju7IfNJJ14vv7CL4dPrZ3eHf8XwbT58gVO2ujYhZAOZz900ooRbW7D8Qnvrl+fJz1nuLv76ur6KyuP61UgejlAibQmLTgX2KW6ABKYBfKILJcxnK4SCARTbZ56B1BrqewQnVjFZmYWvEOrbIVAvI8AWFs1beSY0DGCQ2dSkRwYgufcchi2NxwPT4d0Or396/fnOf/rf/1v3v78Hir80z/9KZcW44DNrm/Hq92261xw/jgviyVQJ7VWzdH1SzpP51TSpFWrQF4Kt1RyckjOx/vHg5bcBw6ExykvuZalhL4jjmrYxe54fmwYwds1e7ECtYnIZhfJzGTebMd5SuP25pzfd2Hrw+SdddvN6XQutbGjKaer3YZzNIRipqAqplUJzVIjsCG6kXrHKBWfnmYG0w2pnINzNmejIB8/hnG4ur3O7Xw6TOfjOS3z3ZuXYYjH4/Hh5w8O8fXrlxUApBx+vu8Bl+PR+3j71fXY+04lRP8Pf/rxH/789sPhXCsSgCFC0SLJezZZXRFA3qf57H1gpibFDLVocelpzgS23+/7bpxrHfurNn+qaclVapbgvAsKgFIqAGa01DKBM4D5MK2N6GYV2WJ07HzgfpqXIfjAfD4/zvN5mY+IGLs+9B4Ic8lNQJuxtbLMzQQdgREoG2CIod+MKZ+j57QsZshM0koMIwOrIUDwTiUtJRW11HWBXZ/mxZuCWc6JCWqqH08Pp+HTy9dfd/1mTk9VMjsPJmPs0uWE8xn+mqGu/IkprUDZcG0jAVBVBEYgRNML4DEzJNT1FnM5NeqqRVrH3qprGgd+5oTsorqAtW7ws7zCzJDInkU+BvaZPIILcYSfM1718/B89aCZrVyXPQ/p7fJWF0Ctn5/8bPl4tmLYJWB4LWRDQLJ1bN+agIgLTA672Bex/aYPm03VHHzvoyfgtJTvf/709uMD1eqJPRAYOEfbPgKYiCZVU9Omk8C5tnOWLCBNPRGReUZP8OFwUm0h9rttj4Cx99yHftOb2GlZFO2733/37Tdfffo0ffz4Hpo6B8Nm4x05F4P3p9PpzZcvllTunx7+b//j/7Xk/PLq6uvffEUEeSl3X74JLIePy7u3S7fPu+3tl29evD+1tx9+3vSvX7z+6v7p3md5+Olvr7/8d4a8Du/n5dh3AT1baWC63V9t99emYiZXV6+aWLftUymtlu12Ox2fWk3Hx6ea8/X1DtFpse3dro/u9HT86cefkN3rN3fvPx2h6k/zOzB1IY790Hfuy9e7h/vTxw/vnx4+Krjr/Wbotz1AQLl/+PDubz/0boxh9OSDh6vdME3n4Mdx686Hk/Mh19Q76iOxa9ost6mkFFwfHc8PBz8qj31p2oik2NhHQ5aal7TUfM5tqcKesTXpumcbJl60bc8hMoBov+g3nsuo7KL1WPuhf0ke/oX7WeOwL/41W+v9xJSel9/l9X5Jonm2r32OM7oQSs+L/9lQ8qs1fDF7AOjzB2UwkJokHQmltmkceiaPwN77+ZRKyxBG85zmvOM4BnDWcmpq6FyoZUIE5B0xtLL0fscoqpXD1szeffyZ0JMkFtkyglpw6FCDs1bFe8dMDLYz3fT8urZG/pzAWu1H3vYopY3eXQ/cR9j4kX2/3fS1Vofex46ZSRqBodY2J2uqALUWFZEyVakxoiqUmgfXMdFmjCGGIlKT5NOskjek2y02xeMpT0sTqcD+z3/7+OblYG4iZ3cvXp5LM+IwRJQNzUoVRUt0IYT+eJxHdd0YNtQty1LyHON+vx0fHs6tQB8CkbvevyqoJbXd/npaFkBg57s4TGnYf/Ht0/dvXb9BUgIg1KYaupFcBLBVbYq/xArBJfF3tYvhf60i+7yvXVbTs7Lokuhvz3zQL+v1eWf9LK4EAjWV5597FmE+r+FVn4RIq4nyIjf6vBoBnHOlVMQVoYOqIgKRMzMVIaaVQjUiRqciJo2ZEczUixQXoplJM6cKZo2RAFWRyK8MAiIBQtNKCNH5oeu3HZKzwB261aRPcKFjEdREnv3KTGaMROy9qmgEVgQsQCqgKpB0bbZynpl96NiR81kgN6nNEuY18csAgR2zC76L3dDvbsftbjOO26Hvxo59MB8MoDWRWqtZlbVaWg1AV8EsISIxI1IzJKUGLiISETN3ZqQq0FQNHFJrAkQKICK1VUTgWmPwBIJmiITkmBhd8L4rtZWcpYlpA8SSExMQEruIxOgdmqoqiKzfGzHDSnaIECCxM6I1V59oTQnEtd4BTUAEZR1pGrqL9ex5C2ogAlqhJmtiLZuugLOBCZKRweeZCiEis4qsrUqwmnBBANs6mFm7iFTXe7OpQTNlNCJzDAgaGM1xM6tiBGRG7DtAlLIg0BryT+xCDGCttdpqY3QMVBrOSaapTkWnpqw417m0lprcT+nT+XCal/tTymJMLCrr2WPdewEvjmBARCJCkEuMF6186rPr/VdUESCy51azqaGRWHXeSc1m4HxYY66AkRlSPvWD98HIaz90TAy2/upJiiRbUi3d5gqdVzEDa9IUqaV2Xg6tpU7hD9/9Ztj2x9PT4elgZTk9PKFjUfOOHx7PKWupRhRu7/a9dx9+fnw4/Xxzd/Xd71/l85znedh2iC44uLm5e/nmjY9u7PuUZ6nt9auvl2U6HZ8c92gORWPs51wNeLPbuOAkH0+f3i7He3YwTZOUbGotzzUnABVVU2siCMC8Op7REGuaANBA0dDYplNxPnjfk+9qEzUh5VZLS7GVwgAE1G03xE5bcy6EnlvK0kSmMyEDOGdba2IKJU9mVbSyY4QKgDUXdrGlJZ9PTK6WzG4NRLd6POXzk0gq6Qwg02kC4FZqScmkIlHX9cH1PsR+s+k3OzUtaZmPh37oPEPLLUTf6gSor15fffjpgw/DclDX7chGRB43O8eQ0slhc+oOn+67cSyt9ENwLoQ4aNNvvnj59s9/7KN989XLD++Pjw8T+UjOp2mptaDTL7559ae/vp0OyYClpu1u3O83x4cTok1L9qjpZJ7dcU7+bmAE09ZaGbebZZqhQc3y9PNHLfnNzd1PT6ciBT07BwoK6EkcWEMwJJa16IJ4rfozNAQFItVm5sxMDUUESl2LB6qKqgIawuqWVzNEZDN7trlDW7Kh2wSfDof9cONa7a5Q53z6+eN8PEhukk4dmaFAnVsr85RAzTmM7G9ffw1GRMrRE4W0LGiQ8wPIPGwdmE2tpuO9G+PV1TXxMN68cbsX8fYldhsFb2bIazmUaCsmzZF5z9LMrY2aiGZAIRoHs5V5AWRGaSgJZYH51KZHkEQiRhz8xmJvptKyigEjQnU+rw2y1rLkwiAqC7RF9FHbo9vc+v0fxF8heRMDArAKTIYIwBRGa0EgQjTnk+RFTHx00hJoYwyx3xarKs07IiYtsOKMMIyiBb0DMCLPLnLE/TDmVMEoXt/0fdzfXpeff9J6+u0ffnt//9DVzYubV9//8e/7rVctKo3JueD61y+JnZo4dsreD6OCcXBSluO9HPF8e/f67m6rnu6Px+9/+Cdo8OaLNyHU4/efjqXuv3zzbjohWitZQdFkavMs6ZRK1w3O90CappO5frfpa0om2Nqy6V2d3rU6Z0kh9CXnVFLX3fgYRBtRm5YjIqjh8fz+6fADEO2HWJfqMZBjT14gL2VCC8Rgy1ksSMv99g6IC3VNGpH3xrk8MSqB5umEjs31c3oC4V3/YhhHx15M0ZGZ5adPMYTu2pqXlipb65HkMWueApSuC2Ik6bE5QhGnzN53+74JLPmBKzNcB+xRoY/9GvGuVS/3SAB2BOsYQ6WpeaYq4lxYj+kOHREs6RTDYKi1LYyefQSFlMt69MvLoojMZCq1iUMGhNM0IzCtCMI5NSNiUW2qYmXtyVnvtV2I2orNi5ZSlsW7bjd07356f3o6b8a+mH7xxSvQ1sXOBX8+J2NDUEaIXd/7zTw/Bh+mw30f/bi7O53PtdZSa2ko+WRoh+VwPp2UeHD9UvPf/9NfhpsbbLofO0QsVVJrqc4CYKBgfJrnMQYi8j4M4/bjw1OTTDi64AFNxHJrfT+qFAYauq5ZldaCY8eMYFq1mhRDUQ1MDrmpEGLD8DC3SI5a3m38y7vN/qqLsStLrSDjPpDzBjxNLZUau+79Dx8fTvezJsLdb3/z22hhmo7z+bTZ9q2E3XXdjz2b1TSlx9qZ/9O7T+/Ph/un83RcUBBEOQIZmun1/nZJyVoxUfScSzK12A2qWtIc47AddikvnkMfgrR2OhyY3FI/peVUa0H2xFhbVQImYgdMJFIFpA9+XhZRa1PrhtjFrorVBmYq+Wwi0mo1DXHcuqtajs6zotaWyYRIGKi0hVCZGcicc/O0eA7eu7Q8uebRqgcSwlwqWFOBjMxmnkORVLSxKWCbT4tz0Qcmw1YECJlczZWI+95bm9/99Md+2G/GkYFTWojQrHnv27PCdMWmTVSBPKOB2nOk7po2gcxNFQwEVQ3QdMUYqAoAemlPwRUaP4eprDTNc0rL50k4/IJ2CFH04lZbb0yEoCtD9Kz5WSe4q4tnVVf8yq0Dl7dY0ZbpyhgBGCJd3lYvOcH4/PEu/8vP5qBV9YQXMHYJpHGEROAYiGjcbCwdQmRpyzhGIK7opjm/f/fpeJp8s4jmnTFyEmGEKs0UpdXoWFVKLUlNqwyMG0Df+RjCOSUXWACoczc31xU8AFlpQxe2u6tPD09LaerD9vrN7euvPz2cQ3Dj0Md+UAwiVVvOpZ5Op+U877Z9DOZOZcdd9+q68/E8p1evb2+uXy8Ft1f9t7/fv/3xXZmX6sr56f6L17/Z9hst2sVeatoM14/H85IkV0vzIqUSYuz7w8OBfHf78ou712/YBQR7/eplzVTL1FKJQ+xiKEuZT6fj4TRNM3NgYmk6bnrH7unpdH44bIexIs9L28QYuphzNjBGPDw8UcdWKyj/7pvvTtPT41OazukI5enx8MXN3kH48ubVzx8+PpyW+ZzubvbLiUTy9Revzw9JhXIrMXJw3Pl4e7t1MZymmpb5PKWSc9dtpWonsBm379s5w5xqJSpSciW37e6QJslJWk6pQa2fofczQ/SLsee/pYFwjc6Ci9LoOelqhSGX0bz9KmprXYSA8BzdZXDxGdmvpRwXkd3qsLxcKusHUbhcDfaZDTB4NsYhIBmIAhqTNCmlIrB6xI4Fml4EfkxIsdtOzQhc57vOUVvOKrW56EN03udlZsZu3DZR9F7YpFYkJgin40NrxuSdZa4tMIRAnXdg6hhiCK1pUV0vwo03Z2C19PuwGVyudeiQHIGZ1USNOs8hOg7sh44RgKC1rDmh1Y61pOQQ0pIsDLHb5lpPx8mqIUDHoWQRg6WVGBSIYohXt31dZk3L+XSKwd/saL+xw9JKM0X6dD+nUmpKTvHq9aslJQx9v9mSCmNdRHIpm91GcmODtiQF612s5nIqYxcODtDxuIla09X17cenB8TBjAmqi30IgybdbK/Yd357y8NehKGJI4LtBplUxdEva+eibnhu1bvQkKaXre6zvugz5fO85J7ZSrwsvue8o2d+8fNW+Lyvmf3qlX6Rrv2qXAANFD5/smcOav3j2IlbSxFUVIgZLklwIKog6hwwoagC8Vo8c2HriZxzKg7FkWMSBAQiQmVEM1h1AWtyEpKxwdqEyjGgi+hYmdAjEhqaIohIRW0qqp/3fSZrzsiMKjJzcKoCaqxkIExOBXWVr4rGwCHEnkNtqytNl1JE1ZBc7JBcCJvY9cPmuuu6Td93vY/9gDEUhVpbqVrEDEFUePUrOVyd2OtcSJqw8ytDay0jonOeiZkDAACrtKpopAqiramJtVIYsBJVx8zRSEHBRCkEJjBQ9EDOl5JbzmatlgaWAF3ogalHIHLOajWA54g0ADMTMSRTMRVdeUBDQ7q0zpsR2Zpdu8JBdA6YL0Z5EwUjM5BqLWspreT1dS43VcS1bRuYEIjQwBREwFRBjUDVCJ4ZIhFVNSQDaioGoM+6JwFbJZCEiCYi6p23vq8IlUlrBlobM1SkhUjEzlwHUFGq89paa6LNxJjMIQ+9I225lpwOp+mUys/H5eM01VKqmQFqUyBYpZ90qa2A9UBAyABgKs779SFGIHZrZtzlKlj/0daMjZhqq2LUxNDMhNYqHNVWVCXXENEzsVN0Yt64Q2VhH/q+V+XTY4odAfeHD7PBUcAcR1PK6ZhrZobffPvN69344end/eEtZOkcz0W988xYanv3NC+Lkfpxt0GHaSkffn6iSrevXvRdPH06RKLtdjtsNh/vP755dffi9Ssk55kBVUSGYU9Ex+M9ioa+Z++R0dAPYzdEBiiP7//L8vAJpBnUdM5tSYYqJauaj0FaU6lmimatNRAwE9Cm1hCcqLaWldB7p8Bipq05AgW2qrVeED7ZKgxTsWLIIWQQGnbYNEnNAA3RLUuJtWqTJtrHrrXZmMxTXs6IrokRXCrPVBappSXLObMfoC4qZT4fQMt8mtfll6ekYG41DFkTaGDiujClc57PvVHnCMp0mpfWypOJd569+7g8lCk3zt1uRzbptNQqbrtrLftu6Ppwejzu9hsARHRtTnMVEBFp3WYTxusPH34Y9v4GRmWXl1Tr4pgwq3Xtp/M97OP1uPvj33/fD/ruxw/bTV8kSVUOrGZPp3kMfoh+PjUxU6iKadSGAAp12MVa4XQ8uD6++fL2z2/fApIBlDwHF8EsQshyqefQi2sdTZGYQQmNCdDWTHAUIgNb07KwqVzaEBHJ1o5ABcdm1trzJBn5eJjKUrdX+yWlN19+2TsHxY6nI2Ol0ubDVKB0Y7DWfNy7zW7/4qvu5rXFwXWja5NNH+v5AciYZZk+5WVuUqaStBUH0fUjd7fh9vf93Uu/u2nGi7GeC2oOIUCbayuQZ8I1PKRJDMjOKImych+2W2MH66CPHWslK1DOmmari5aC5il0oEaeFA00qVXXXamJiiI20CxtMVX0wQdVLcjIBFabLI9ixVHgjTZ/x8zAjkylLYBErgNkYw/sFYyg537TpjNoIhc0zUR7H3spvraqrXjn1amZOCZ0TqsQO+cIwJCcC965zgIO2w13vWk7ffyxHD5tbvYLmRs3Tuo0PW53PUDjGHofY7epUp3npjJs92Dc0AAMS8LaoJacqiIdo3eBvBCm3PtwOM7/eP/j9e1wOh9ont8/3sfN1en+5waijg6ne25aEbyqlVRlGXa3Q4jSpKmqlVonElbjmkFUydPgYl6WSN7QOu/z/DjPj4xucPvo/Hk5xn5PBIpCjgOMzTQvi5FXjVprh6w2g2nXIdgTILvgrYKq7/2o4XDKDz0G7uJcz1VKR+zjgCxZC/GYc5IsCAUqCOh0OnkzRpZWUtWSF0YOvS85OUWUXA73rRJFINerCIJ0oWf0pCQV749vd5vX0fDww38JL+YXX++fT1aqaiZqiN6H51heU7NSW3REiH3s13Nz8P06kzLE4FG0+TAwYNMCSNWQCR1S1ebYg4KIGAIAOXbrmVFUHEcFYyJQUBWo1qrUMt9/+nh1u23Hp6fz6fXN1bfffn2e5irVB+tiuL4a5lPBqyF4BtU+0LKc3x8/gCqj0yw5p6XM3lMM+vLmzV++/xtwUNUltafjtNlev318/OHTY4zbJF5LOtczM3756vX58bDm+om00Q0iCTAOu+taC5Lr+10/9Aayu9oA4N3N6+PToetDFbkZr6bzgcQqAAB2vptDaPMSnNcmgJSbVlM0BLBU5i56Yo2eht1YDD49nm5vYjfu7t58cfvV63lp2rgsGoD2L4a7r7XM5z//8U8qn07Hh6thc/tq9+7h/uE0gejrF7fvfn63ifF3/+K3Hdenp3NRGv1m98Xu/GI6PJw+fjqZai7NMdXSPo9PvXPahH0gdq1VH2I1OueiVdn5tZnde0/k83KW2swQTQSkaO6ll6qMqApKDFrNpEpDinH0zjW1ZArpnJ0PZhB8jG4oLdfSajmYVC9sBHNaQgjEWHISqbVk3zl7zhAtRsjkPae5Mrkuxk0/7Edf2/l0PgmIaalSslAMfhy2OZ8odiW1MhcwaiocgouBnM9pid6hj6W04+EJG3T9EPtIZrlkzSL/FagANTO0KspmRGBoosrEF8EDIhFVVUVCU0ZCMFNFJAYDQAETvfSEI62KcfxvnDa/nn6v3OSz8AIAUURXO9uagKBghtjM6DIQV8Q1BuL53Hc5+V0QOl3cFRdhyMUJZwYE9hxdDYAKQEQE0J4DvFcm65dAD0MBEwGqqlifzqfewbnKm/14dXd7PJ6mx+PbH99blY0nYGRTU8tSnGdHKFVTKd77ViXVpk13g//qZptTyQV+eDrPS0IirMV57vvhVCxJDcguhmnKj9NHcq6Zffdv/s2Lly/e/vgeJG/j1fHx9PTDuxVutdpidA6gD+F8mg7Hh/1u1xRqRSKIPnzxxW9f3H0XdrvD+WAYv3vz2+jyX//x7z+8fduahe2LqZT9/ur29jWZ+9vbf348fJDW1LUquhlGEEtlvt7f9t3gN9uh6wL7YRj+9vg3An1x95tPh0cwePj06enpUVrbjBvvKaVy9eLaM6WUltOy2ex87Fygq/04oHt4uH/16qa1VpYyjttmNp9P2trD4cAIr9+8nE6nXOR3X3/z00/fD0MX++7u5e0fXn3VEf3Df/5Pu/2+KP98/GSmX79+8fjTz3fXd9vdRrCzYXsyV/Hcx8QBjZbg/ZytSF5SZR98CA7ANDHhPM2oNUaeltKFwEHPLX9en+vu/4u846L4sc/6jdVYucqJGFcDJ4IaIuAlmhoY8CKEwF/4J/wM6G0tQl8fuQg9Vs7JLj6hX1NV+Csj2+VHP19QuIrpjMBAABBdq370uzT/6AjZEKoQeKQICg4RW2u59sGzx+ACxOhd305FqzAhaOjiVU4fPfkYqTQh7rqw/enxr4i+1rYOH01BGhRQp1ZJnWdTcN4zsUkxsN0Y2FTQCHCz2w599J45Dog0zy1gG2+vtesMkXMp55OLniTP83mu9TSVXPJms92ZHObHXIo2Y6MYXSBspaiJM6mnFDyWPJkPPvrdi6thv/n48aloZsLRUwwoAEsqp6R40JJ//k1Or77+qllp1YXtnmKux6WzKsu86ccs9cXN3bsPH5Go78bT8WE3dkMXVYTYBb/z+xuXS5nPrYEKUN/RZmOA/dCB3wy7r9FH7xirOgTw0XcbaY1xrbEn+JXzDAB/IWeeq8suXsZLPd4v3689n/6ffWSfNWWfn/1LQhE8rxVcJ2GfKfXLovrlqetqvUjSLhKmy1uxDw6olSKtIgCt5AIREa98Q6sJwROHVptqWfuuAAAFkYidqwWRmF1AUkJhYiUzUTEhQANVIwKQWmpLZgbsvXOelEMgYAMwqyaCBKCARKgkUkDWcDBC9ERB0QBBUQ0B2JwyKRBSFVAz1LWYyYV+7JwX4Ga2c66KKRA470LHru+c86EbN2O360J05JwFdkhWBapiM1UxUASzNX+IcT0xMgMzI5Fd4p3X39EqGCNCQkZCMhRnxiIViiCooZlpayUXpoXQm2vIwRDBMYIRQGRk5yqS1CxSVaGWQoSIjOwQ0ZgJzFbuZdUV2mq0FhDPSAZ60WUZrRIoEEVdb3YATMDegJ/lbEZSUZrVJDlpq2CyWs4VbOUEmWgNRl8zq0UFQZkRlVRNwNQUVFe3GwGKgZpcpJFABKa2io+gSWNmM0FACsEzkmeqvmSPOlgTzdWDohQFqIAIDA7BIZggsLUWwCg6t5RuCKfDU2klqzzM8/00T7WKKQCJKDEx0rNCdI0CXzvQjHkNnqKV3lrv+CrmTN2zBe05q8gxAKsWZFJT8rGpRR4UmtaTavHBm/MIhkxVkzcDc7UYqKBWtQWtlly7OFLfoaOmSsRsreTJe3v5xatvfvPVdHx89/4H1Xq926Dh/cNTaarq5jkdznMtNIy73aY/z085tWWSrh821/319da4OeLtzX5ZyjnP2/32+sVd7HpQcMytFR/j/uZuXh5qyZtxBLBatGHcXO+Ds8PDX8+f/qalmEorUnICUzVhBxQ4pcVUCJppQgACDc5UmqmKVFh5CPRCimjSpLbqvCMmWSx0W9NWskpdmC/DR3QEoApUUpUG5jYCUpcTgqz8Y9ZWczFErVGlACNylNbEKrFDKdPjx1qOQE1qA5N5mkKfnfNS03K6d461ZTAyhVYyIJHz3rE1wUBdF01arWW776enx8PTzEQ5FUO4vXvZJJ+nE7UueHd+ehRgQLi6vSWANC8lZXKtiexubg8Pn1Qljj4vYq3FsZeUqrYXX7z89OHD27+9H8ft9cvd2+/fffvtb/7hn/7xd3/47Tmd/9Pf//HVy69+/vAuDnx1t/v+hx+urvdXt1f3Px8BDU2muU6u3Ww7IGceYkBmqqWwGZAYY4jRCn94+/H27vXt7YuPP7/nwN0QNLfoPCADYNGqIoYgqibGyOsBW1q7xLu0asYAxfv1V26tVSQidK2JZ0NCApRWAYi6cLkYmAu0OHZX19to7urF9RfXd+V0bJHH7pqUdq9+GzZ93G6AoQmjj0QoTQVAWyklkRmGIU3nJuC70Uw1Td7HWua7l9+N19/Q9kWqbimSH88ueuqCQ0RsOp8tT86ByUwmiKimaaFq1F9dCXoeQhJhUeTLUQglazq1NFvJjMRdL6ICyt5LKSVPKAt5RCtaCwLbpWeHG3ptzMyECrDWKqsbvbWDnH7C9sjX/yuzPWBv5NhHlQRSgBDBATP1O5VsLWNHZI4ATJSdGDgkx8EDKrqglhHJdbHW7HzoRkBT74OLxuQAOPRd2PZAUg6ncnjyMdTSZEpD7FBlOifvGJF8DJvra/ZDFZVaRFWZUA2lmFQtKU+JPVHoRPHjz3+Lw+DJl9JEJFXtb3ZPy/nq6xf/4T/8pzJOm3Fp5VDMHO+QfavnzWZ/XxZpzTMDZgq+QatSEYHYIcdDEd/vS07VSo8sejIzabLklGshP5aaSi02n8QasjFRdL5OKcSQ54eEaUEwNrC8nI+7cY/Gx/rEwU3L7PyNo3i1vV6e3pd22O+2ZcnMIfih850u9Xic9/supXKc5s3VnbXKUgTpXCYD3vmAKsA9emZGpOFhPqd8AMIX1zcegLAzS0taljZxp4yFCUK4LkK7eNfFDWEZOv70+OP98uEZHICKEuGaWoWEOWXHDGQxdKiCiMENS5vVLDDW2kRLk+p9dMGlMsWwFsd4JCQyAUVCIAY0IkXiS7soKKELrl8DWcyEGDm42nRaliT63R/+8M///MeU8t/9698fPz3OS0bEN1+8jg4f758+vT8v09KYN/04LTk456hZSabAgVTbKS233TUDYMuf3v3J6jSVMi+5LWm73bsYPp1PCGEcxzgM0rmfP3x0IXz/4YFE+7HvvZuXBUCCY+9dadLFsVYJIYzjtlUZ4n4p9eXdy6eHh3EYZMpjPwT2S8ohOKllWRYE9jFIldqktiaqbBSYmckR3u663rn9GO5uN4Tcb3fb69tWOCf49FMar25vXl4FxIfHU00LAW524//wf/zf//DXH3JpHz+9+8c//qMIfffmuz99/0dP3jgg+3fvPmwGdAZffvO65fzDn/7mO2LG/dU4z8sqiJnmeY0QQ4NWKiEF71otWpuRi5FbSaLGyKataSNiIlCKYQyO+pIfPGkfPCJpa63WnEq/3brYM/M4bs+nuSynxuZcRGTXD6BYa2t5qgWMjIjEBABXrfVm3KnW0iR0neM9SFWs5KDkwsSglFttql3fS8V5EUIDrE1z6AORywnmlAXrlI/LufnYcbdRYgRiwoixakupQROHLGJmzXl23td2zsdlHMfOd4QCjpz/ZT5tanSxOLOJEPFzYKYSUdMGgHohcfAXAAKAALoKe1aBxYpwzAhAL9U7v7JKrHNKgFVasRI3gGvWERCRwsXBtiKgC5RBIIRVMPsZc63aIv0ll/o5fcNWyZM9f6A1d3MFSOYZkbHp6oBDxWdFiAEh0OVALG6ti24iyI/HRNfjuH3lu/h4Pn/4+f3x4XQzDGlK0QM5LBlSrkQYnXPkxVoSeMrplGpW+v9z9R9PlqRZlid2yUeUPGbMaZCMSF6kq5oNAAHWwB5YQLDHCjv8S1hgidVABCIQCDboaWCmGjNdNZXFMjMyiFNjj6rqR+69WOgzj2j4IsTC3PxZeNin+vSee87vqBIcann3KDNqlBlRL/vwer149mwDPh7HOp1OU1HQ0URc0yq7rulOD/fbNz+YFB+aHx4PIsk1jnlOV/i2bU77x2F/AoRusRzGXIVzEeuMevuHv/+7f6J/vLy+Qt8CcKkKlj97dTOeYBjH5kI06+l40koUXNdd1NOx7Kd2tdrd3a9WHQTqlpsXr764e//+xcUFM5vYUJMgrS6u7o+nlLINp/3+3qgaqPOeCJbrVb9anbYPUst6tbq8uTmOw8W6HU/pYTyBi2ORMmVC6taNb5qreoGsx93psD8dj4cyTIow1eP1i5vt7vDxw14d/+Hd/+AlRecu1guZ0pucTpV+eNh99cXr4bTteGHMEzFvlmHd5duPdT82bUuI5GouyTQWUwQSs+ViM56OBlZU0/HkAxcRhEx03sadzxA+oawYP6WCPk3iZ/TVp4DY/C+ENF8ATwP8HNucSer203zQrBLgk+MIn1SDH0NuPwqrSDCH7/WsQc2JEjirUE/JNTBAImIwA+caIyYSgGTsTKtzQSq7GAyxVkm5hAWVlK83N8UoAqU8XF+v86TeN/PGwAByLblmiv2Yp1wTo5oWIyDnssIgsmRCE1EuU3UojjE4XLQREJx3MlV1eHVzffX6C44xNvHh7r3lU6mlccHU1Agdqw4gadjtT6f04f60m/BuMkG8GfVlP7GmMee+6RpHrePOOQ2zqAa8jA5NFBApa9keM2O4evZs+/gwHffnwjjCvmsPkx1GhEp/+PZjQXz+7JkPm1yV2y6q+UzDNAFRqXl3eNxsLj7cb4lgtb6otRCR9z4lWVxfmQuh7TM5MGJwod/4xfIcNQTG0DMB1IxWnY9TKuSj5JNK4WDnr/oECMInVvmT9DOPsWeZ+uxRe8qm4flcPOlACKazsD4P+p9MS2dH5GweQrSfSE5Px9vO4KKzx/OT+mn2E5w7ETrnQJXAFECkgAGiI2LVqmpCRmcinSHx7FFCZCLPLCbC5ISqoiCRneVUQHQ8++/QZvJ6LnWYytDyMQGSsa9O4UxxAzJCZEUzwDJ/o1qqCoiamYmhAZ//UojE4E3MDEWQWRGJScmU0Nhx27sQWx+ESICB2IDIBXLNwjvnKTSRmkAxchOEkQHJqxMNYlJqlTpfbaIGYPPKxAfPjEQIhKWaaDURs7OW5bwnQIc8Y5bYNPpQas1Vaq5SRUrJnICO6JkckrGKutAQkTETOVIo85uZSSkCmAzIx8g+ICEAmRQAUinEzgAB5ubnYmLEbrbGzodhjnIDgSkaETIb2Nm8aAAqKFXzqGnSMs1tv3NIEAwVbPZLEgAgqs6bVTpHcOHHsKCAmZqYIpKqGqKCkRFonTmGKmcjW6oZTGfONbJjZnBkzkspWsUFIDWQCcBATU0ABOeWDWOmyYEwCCpmhNGjsR7StB2GU5oqgpiizQnBWa83RlKT+UZrAESMZ5OVmSkQITEoMFMfcL3q/gupCFRLLkZqRo5oXqsTIaAQFufPbmgx6mITF77vIThlRM+egEFsmk4h9CL19PjIbfS+AUST/OrF5me/fOUZto8/jFPul10MayQ8nfZNz+QW02jv/3DMA6wvrqlzu9Md1YpFmfDy5mK5aLXIXNY0aZ7yGCg+++rz9dVlCC4EIrBSpG2bnHbjcd/2C3JNGnO3Wi7avqTHuzffT/vbJjATTXVCR1RZqhmAFJUqaF5KzXkyyaUkZiQzstmAwoBUSgE0UBEtBIhG4NBEIE0VwEwYEVREymQnF7wJas7EGNq+TPs0PkBoS0qSJh+8mUqZpFQxNW0MVEZhblS1iroYNOc8nBRHEyHgPB5NsUwVOE3TAVFUyzjsvevUJq2GzHOtc2yWXdNyxo9/emt4wlVfsoVmEUPTNuV0Onz8+H3TLZ1fsV+mKV29+HpKY2yCGgbvUirdsteaT8fHfZ4c22k4Otc1MUityNC1cfd4MJWvv/7aJr29e+C+YKB/+fYPf/4Xv/1P/8N/+6//7X91tZyOD8euafaPu+1dWTbdh7cfL28u3373QKpVpFQokeU4TdVtxK2WHp16zuBYRU1SiA4VO3L7h/ubz16F18+/v78lAvMOgaUWImObOwcVEcGxGoqqQzovr8xEhOcbhcweRCGiUisjMhF+CtvM7zVPm2TPvHpxc3o8pmPpnJPtcJIHT/nmxWe+CcNUkRfIcZqOiuC8K8eJdYQ66Hjimn0gdlRMGuelXYMSA3l3KcbLTSOGHz/e0d2W0K8vLgLn8W6rUsdTkjQQEaOZ1FKKix65SUUqL91y44ML/RoNiB2CITmq2Ya9lEFrBRFTp8SgpIZqZMQUQwzOpKkl1VoIvampCPuWfAygGioamhRCmt3c5hvwHcAkaaDDO7dEdQHU0AUyb1KtVCRHGBQCuWDICoQC/aYZDjuwxI5d09TDZLUCIzGCARE59qqV0RMH4mhYm35RBSlG9rL/8IM3vX52tdvurl8/J6wP335rqeiptJtVv9lQ0zbrS/ANpmSa6rjL07Ee7iENBIV1bBpXRafjHpDIoGLCpm/bbkritJqM+/37D3cfh8OxW11USE0TW/NTpYDIoU3jYAgxLMjrPpXQxEo0jsNl69vQlCoVi4AWEEJD8N5vfEWxVvNUshWuGScUoqpJSsYhkEMNwVHJuWsvpWaxKcbclAK5Q3FD3g0wdLAWU6uVmI/DbdfYcMSpnKrAeCxtd+G5Hcv++eXzWqaW0XHto52m/d3tD8vuwrkW4zMkPQ07HxZt0P39h37ZUNeCJzZ3Ess2erdGcKJlnOKq94gPp/G2o1WVZ1+9/q++fXfbh6PKARlCjJ8uCWLy7KqqioKid56ADATPM3oyFZFaRAgDAEqt3odayqxVljS3hwuSC8FPucxvlKrC7HFGzJsSsYKpVUAUKSCA6Ewl5dy2fpjcD++2x9P46sWr2493qzau1z1V+PDu4ZQHj9Q0oVu2se1u7x9LUU/gfYj9ZtEHs3w6YBPb1q17V47jpDV1kcGFYRjbtk05//M3f4guLC4WTcu//9N3Psa+6QQoVfFEKYsBd8s1AC8XjfMdGsd+GaPP09i0PXTedytIU+jp4nrhMTw87NrIXbvUokY85pOnoMKqrAixaYIqojnmNgaZchfd5bp1jkuqD4+nzcVStDpHy+Uqm+9X/TAMpw9pJGCEcSqn00SU7zytLxe/+fL1979/c/m4/t3f/su33/xT8H673WsuuzERLXa39//uX339w4cPZsbBfff243Z/IuemVAjNMQmYqqFjkwpEyI6dY+ZUq7EjcmiERLFZ5rQTMZEKUAUsIAlKzRUAkFGqqCFj6JdtqXWmwWWanm6oXKqailczYUWK0asUAHM+RtfnPIlIlcouglnwwVARdZLShOA5UJOYg1ZAZ2oFpEjJqlQNa62IOddaSnUuAFPjehOHvh4ORx1HUo6xabsm+EYreER1ojnVUqsUMwjBEaKaHo6HkYsPHByWfB6SRZQMz8toM2JSU5nXi2cjBSKxzdMHopme5+SnjM5PNts4z9B05h6dv8o+vQM9JXPwrPXM28+nZ7snKwYA2plLbZ9cP+dF6Plb4Bx8mymjnyp+ZoqsPOlWYABqCOgIHGMgQIRAZgCVoIjO0zsR6lwhjAgz2AEUCCCYMPbL2PVut7u/+3jb+p4h7sc8pjGovlxeeg9VxIpWtWkaSi6a1YXovSxCvx2mMQsTeyapyoTe+TKVezgepjSJFQUlEFEza9vYu7g97vpFJ7elDdH7sLm6QbLj8dj6OHcDHQ+72w+PNaeZPe4u3PXN5eE4LYi6PnhHqkpkh+1dSVJLbhd9Lfq7Dz84ZuAgpp99+eVhPy1W67dv3l48e3E43aUx5Xo1ytBjG4JbX2ycc7HtFqt1EQHV4/Z+tVywc9PxFH148/GP0zgYmtYqYptnz10bDuORCZYXKw7N3f0DGa6lV8fNsu+abspVXT4ej7UUkApGYx6X/ZocBn9xeDhs97v9w5aj72M4nE5a4LPnn+/u3hyH9J//8MfLZfOb3/7lf/iP/9+Wm3/4/ferZReW+vWvn4/TUMqewFZLXS/646kSYutCEY/m9sOkpahW6Ls2hEGnoiJjVgaMhBG++tnP4L+9g59kd+ZJaJYQzwduPrVPPIvZKvR06Ge68MxwIINzcch5PLOzL+PTyxjqkyQFM4LrkyBwfjyb7Uznz9sZAH92nPwEXEN4lgU+qU6ExAjAAOR8YwZAhkQ159A0VSaz3PXOGKOLzkc0ZsM2BJVSynG5eWZUSi0MHmpFdUhwHO6cA6uVQEquanw0OBgdU7lCC8EDWNf44G3VYx845Vo0hVW8eHbdLVbg0jQ+HB9PWgvV0jsD0XookKYkBSTnMX97u38c8H7Ax6wSoqFzU2ysrDwrMMyPVJALJk9ExNE7IHBY2Ef0nRlZgWEY0IbPP7tets8f7vcPj8P2/pHIxybkKtFzqe7NDw825VdfUAjtlCp6blxQ1dNwvLi63I2P3WLRN2EaB+ebZtHBYSLgpmnV993F9fb+brFe2jQphLZbO++hegeoRtwsg48yTUzIbZfEM7di0xOdCmAWxfEsMaL9KNOA6ZNZyJ5UoZ/kweAnLzGflqdc2qcn+qfj8/SZOXQ432afnERPrPYnCd3ONjZ4uj3bk4FJqqgaExkRiNhcFM9oCo6DmqIZk59buAgdgRUpTzlMAjQko9noRkjegc7WIDOkGdCkpo4xV30cpsYhEZKYa6vVEsnYeTAz8qBmioQOtGitoFUVAKzWSavO1m8gdMxFCpNWQO9acr05x7EBJIwtNp2FlkJDwWMIDDxLczAbpIqZESGZqqp5NAiB2QMY0RztZOdIigDjjOIJMTRd45sGAcHUgLyqqJqIlmRaAeecFc8LDgJwxMrGjl21iavWYojVlE1KOjEaEzrwYDLbsRSYotI59FfMDBRrKY7njeZcTnpG+yEAmJ7faGtBN3c3PmXTVAgA6NxqikQABMhmgKagamWCmrRkkzLTjxDIZj+S6ixFIiIiIZKYELPjYCrzklVqYYKqhqACSPjJdymoYgo8u+WQiN0cNmcgBZ2Dc/PpQmBP8mSYrDajlJBYFbSqZK1Jq4ARVBXLaNWkDKdhtx0ed+NQ6lTnRBwSkHNOVefLQZ/mXTOd2Ys675J+cs9Fsxj58mL166+//NUvf/F//q//3z9KRaVWJibGlGqVVERNlVwVKcQJGQmsVIPYpEmMKxEUVxbUNLQgRnDaLELKExE1q0hGy7ZHg6vnyxcveinj9nHvwd3cfNb2C6tpu7uXlCPxKZ/efntHSq9evjKs+2Hbede0y/v9w2K5aBpXc0axpmsXq/6Uj87BzYur5cUKHJYsTevVNDZd3y5TOnjfqFFO0i2Wwdnp7vs8PUAZFl1Xah0OB8UKQGbGzrfNUoukcTQrwISUaxIwJ7XmWtBUDZgYEWnWwNGJVZsfC6UQs5bqnBlArSl4l7MAVpg952jsnRGTWJgOUqVOo+WstYoVZtKSRYtKJEKZeYAKAFym0WoFLGk8AUjX9lNKhK3oKKlOaVTNSMDoaioCJrV6jAR+rgETwTQdsdZF33uNw7AfTve35dQvVoFCJH+1eb26eaUEp8OenPFIOefddktA/WJ1OBzWq413cjzsY/RMNO6OWdQ3vqRUki4WzTBkNb64fPbmhweT6ep6+fH9+HB3/9tf/vZwf/ficvE//tP7L3/xZ6MWTZmRawEdC6sQEjo2MCQrCmMqjm0ah9WqFU+uC8wsanXMaoTGZvbx7ZtnL559/ux6d38Y0JAoNDHnyccAk9QKVaCoIdHczIEIJmJz8YaqiVQoVURNkVDADJHNmSqGUEo1RGQSOfMp3n77w9L3WNX89NnnX764ul40EXXIx7Gc9n6x8g2pjOqLgSPKAY8kD45K5lG1YLU6WR5Pjhp2vtbCBKfTw5gr75mJ1s9eOJ7G4/Ht7/6+TsNw2NUsUkWmsUgpQ1YVZEfBu9iXnDNw2KwvL2+uXr589qt/Gy9fYMtoqjJJHmspoWnARzUGnin0QMSipqAErJKoFkQiVYJMetR0smmvcjQrJtmKKiCxRx9lTEYhhJZxkfd7LZk3DM1FKYAYmMjm1YVkQ0BiIKbYQ3Vm1UWsOaNHcgEAAVkN2LeMpjUhMzGFvikJFH3bO3bBajGrWqRrr6BODx8/9MvVtJ/S4YGMuO+bZdtdXvmuB9eAb1Mqit550KRs2WSo0zbnseoEpSsi01Qw+ErYNo3kcshDjA278uaHNw/H+4STUc3prmuuAxJBKFVEaNFf+ZRJWvJqoQx5jxiZAzhSxZKmKmKsZbz35k1LktJgZPaD6lQkg1atjfOrblWnyqCt52F6QG6Cu15sPhvrkKd3jbdFDLv7fYCFE2669ThVGWvrO7GYzXmIY9qd0oOHhi1Ebhwt7h4/rrt2s1jc3h0JMHh6ePjBkVtf3DgMUy6O/VSOITQm5bTfPV+/AAVP7jhuY+iqyG54f7NZzUjdVd9hGgC0azcuxj4uDvvDi4vPn99sfnj3D0B3wT1tkhFFodRKiI5ZRZHOJmEkZGYih+wckdVaZh7KDBmeERCGJU/ROySqtRYxA/UhAJEjVJVcpZbMzjsOaqI6Vy8RAQFy0cIOj7tt3o/j9nB1eZFruby4WHfu3Zv7mqbY959dPScgZrvfPaY6LlYxcGxde9jfVUkfHneOsKFmvVze3t6PmglTVR3GMU3iBDzz3eEYYq8VxpR2u4EAgWy1WtYC5N32cZ9LzQWMm9A0rlmuulVN6lzgYBeLZcm1XSyatpWc5waT5WblH3fK0C263eMHprhaLNq2o9JUMEgTqlAABEACJACHWfX2YWiauF40FWB3TKdTOR3KenOzvLpc9IvNJoJxVTUk2Q2n24d+3T+8vT0u999+82ZzcdEu26vnF6d3D4vVWiV9efX8u3/5IZX65c9/fr/db57feIeEfphyzfUwTd4zgaUpOXYhxipChITEQCIiasgsUk/DAUyA8TA8eA5Ns2hCLybEHrXUYjF4733OVUFTzo4RAJAdO1KpZqpijqBposi8+LeiVQVVgBEANA0nsRGhqIn3bSmKqsiCxOakbVupdRxHQ/UO8pgUnA8dk2saP5VE5CwJAE2pIppKUdUpFUZrm7jolgpQ0iRpHGpKfvRNZ4CMFvu25FIqG6NKHYYJEJsmVjFnLWBAevI6EICiGhCaqjJx1QpE82MiIc2RbJsprSpohkRPzopz7AsA6NPAc6Znni0/Om/L53H5R7fFU+Li7BtChadXA3hKlsGMQbIz1gPOutP5DyrAky8IYF4hPqXqZlAfAeo88TNAIAiMMz9EzVjUMYiaGIAqADJTETVANUtqKloHW/S+Jzs9vK+l9GGxO44f9oNDaBluVosuhjf7XTVGwqmUMiOuY/x4nBRxqoOpdG6O2VkBBQE1NdUjqKVMjpMoMoMaEB1PaZwqNawlxNXCx/bFqy/ff7hlp4SUpkmOpeQ8DccqulottFYXIjfN4/7QutB3rWq92Gx8jGbIjgHsuN9rEYw4jgVRH4/HP/7jP0qulxfPxKT14XQ8Ncu4TdthPK6XK2Ts+xWaOxz2Xb9EYsgVAZvYHo/HklNgvv345uHxHkxrkej8xcWF77r9dHj14uV4v70/7E7b281y1bdtMWgXXevC+3e37F0X4sXF5njYnR6PWi2VfHic2sYXX4IPN5cXq7Yf0mBWugX//pvv7m8fffQ//+xrjul0OL3/45vrpvPc/PbPvvzv/tP/UMuH73/4+MvPnut0bGKHDXETybQm8V1vCkWzI8HICnwcjmy17ZfDdBLyYCpiLtBv//rfwP/pbz6N47MORGffjuJZF7JPCCKbxcmzl+184D+ZgT4N6U+ikv04lpyRW58MRj9CaH7iOjoLpfYJ7XXORc6TlM0OuP9CSJjRrIiIjORNHEIAdABSpSgoMdaU06FIseCcInZtYwqmCKi5JsUAwYMjIqg5+0gEbhLIaZBpMiOpJmo0u6dUa4VKeDALVlcR2uibQMFxqmKqMfDL5zfr58+y6ZSLk0pSpVZCjG1MpZTjVPN03B8nsQ+P8u1BDuYG4JMCKiybeBrL1aurpc/DeIKcV60jB03wpXIpSobOk1BWgRA8OUW2hYu15tP4mDK3m/XPX7+qu8Pt29v9fvDeRGyxXp2G8v33e6K761dXIS7GUs1BiFFVc0qNX6rU6Fxhkaqi1MSujpnIh24N6ticiwv0AWLHcVUTenZklIZx8fzF/uFjHzvuIlYAT+CCcbCnOC2cK6k+ZQptnvDhbM74RB36EZd19mn+hFEFn9Jjnw7Mk+Pt/A3ONrcfM4/nXO1PZaKzOcnOX/jk9nz6aIbgPFmdVHGWeQCY52iYIYCKuhjMZH5JIn/m1iGCAbMXp4ATMbMAYzHAakbEs6gCs7oENia8P2pwGIAdiDVATsUKgQE1qGilSs4zJBkM5toEAlQEBRArBgJYmLWKGQAyudCIj9yskImQRECnRKJeo1T1oXXeE7mKAABVLCfJkoDZF4mmpOCdeSbnHQIDEzMhUhXReRggBmYgMgATNDXHjp0hBAkEOre1gQKoCSogsKoREbFjT8alFNRapRYtKAhpltIAOEQ0A2IkIOcdsYGUAlLETLVCyRkAyXtEAsIzH8jU9OwyQ1MzQXBgCqBnbwDorP7ATC8CmOdvUgEpUJLmSXIxned9nAFpqgKIxAQzwZt4fpqbjVMzF0SrqohIpZJQMlXVSlaVGEl1xgQqGLFTNUBCmysxiMHN51DNEG1esRNglipSa62Gjs4J98woQIoEKpWh1jQSaR7H4Xg8Hk/7U9qNZahm8xu7KZyj6MrEen7CRzt3yCEgVKmeyDEhoCN0ZC8v+t/88vXXr59/9eWL+Sr4hLUmUZuNcVWBXATLqGpABkGlGIkPba1SitVTYsc+ejU/ThOitNBxdIsL55vYNFddXNbhOBVxHU55yil3zXJ1cdkurqZUhmEMsaE+fvOnb999vGe/eP3sxeP+Yxmnvmt8hHcfHkPTdl0kUB999BGEtx/y4+HhV3/5q37Rs0PnnOfAjlMal7FXKSUL+4Z9DIGDx8e77zTVEJj8Ik1jHgdiimE1DictOcQFQVTIgF5KkjrVPGgtUjIRAc77UkNVIjQRMwIMKDNMX1AVxAOxpMnIqWrSrFqgTADBDNWECGvJpDAeHoWSpRSYRKpgNSSpSWoyLcyIyM75VMQgkOPjcad1zCmDFlOTJIyVY6g1iSgo4czQqsjOAfLsVUcAy7y7O0AtjHBIWlQ4xlpq163AQuwutJxu3//p9u5bZHbE7WJhVc3QamLfqJbNejFNA5FuLjclF0KX5FQOB62hVhv2x3a9aLvFMaeLlxe/yD/73d//y4c3900TD1M6DdP1dRda+Oxm+e7NPz5/8frtu3cBQ5Fpue42l6vd9sToEKqJjEUhOGfkyGPGHpwcc4iIDG2zsFrREQhKLXd3H6+vFqvrxdvtcapa1cC5qsocmJCLWhFA1qdNAICJqkkBdPNDhYIZgoogOmCuIo5J1ZBA1YzOCFIA6JvXX1794vOXrz/71S9fvrzRaUTLRMoKknZ1OEAa0cwFMijp4baMWwdVAKuYVCSM7Nu2W03HbToKBQeG3eVV5CYY5sPu/v33eTylnKRimZKZSs2l1mKWCxR1ZhRCRGYnRuQWy0Vo++mU8iQIQOwkZ8uDiVBoQ3dp6IxoTqshGIgwIZaCYKjoQNSOOh1Vcx53ZBOamZQ6HV0IaJ4cEM4h+gJpJBe1Hgh903Saj7r7zoFA88KAwDFhC1CtZqICamgsSEZRjV1L4DIAOo7sSKqEwEOpRF7Yex8MjF0wMATHzKUMgErsm6Ydc0Zqrm5e1DSddvdsHJu1Xy/DYqO+USTkAEAhOJCa91vd3svhzk6POE5YMqMTRDAhNQRunKvDVACGaVhd+pSKazoop6Zpeqi3j/+I3jxf9s3N4+N3uQ3OJLB2HT1OD+v2WeedSTWj1jmt6TQeg4tM4Lgt49YRioBCdbGfhkKBe7fY799HYsAEeLDpGMPCWCMXleMx3YpJ8JFlfPz4gIZ3u90KadP3OSMH6n3D5HOhSG4/HYPbhOhq5aLSL93Srcvp+HH/XdPFPFYEamhtKMtlu9ttn1/fjGaEDtVNUrx/7q2dhse26T7ffHX38Kd+vaD2Guzkozd1ZZjApIJrVmvkUtLHHlMe7sx++fzlq+3YyulxvgqIiM+8B0QAZgbknEckdsQ5p+Bdqck570LIRQ3ZRZfS4HxgJNE6c0MNkDmedztSAZz3QUpSNTKnRYWyoUnJnoNDZOdqLWg1hNDEUJbYD/XycnncP9aT/t2//GFzuWYflv1qmrJkSVosw7Cd+k1z3B9yVAKuUyXvYnTpeCo1uyhW8DTVcRir1CklMPiwu5tqbdoOiqgoYxyzDYP+cfzQhma17JfrpYD6pgsumBYo2aTGGMygprK8ubh/v+vJVdAYnfdd8FFEmjaE4KL0zeKSyaqMuVRAXK0X193Nx48fRDXVmnLJqRKomK3Wi7btHu53q1W/WC0I8f6Qqh+OBe7uxqurBflw+fymafmzXz+7fLU47jNwI3lMh+O+HoZ8bKIH0cfbh6YNoWsoxHqS03YgsA/bjxeX61zrz37xGj3/8fsfhqmqgvcREEVrLWIIwXkkEpEitW07IDCwENsYeimmVg67u4Fva56IfM2VvCcwIuTQOueRyIDRjIgJnJoReyLz5HIakGIInejk2pZdV/IJQXyI0ziSGbNH1zvypqCQ0UCKoKHpVLOQIyQ0qc5RLpqmcahHIrU5Z2EEVp1zSIZgklVrBqAktdYa2qbr2mlKzDgOY5VCSCFE530IrffxeBqlqvN+rjdC9FPNGkLomvkqEJvnFkRknHvQiA3JTM1AzBRRDBjpk7NHDeg8qxghVVMBUEC2H5eaP8W12k8/sB/HbzSgOcgx8wsQ9Gm9bmdD0VlngZ/gPuat8Cd8h5iRAT8xhkFshg6oPrVLA8wt5oazpR5NDRHIsKoJgCEVMVUTwNkxD0zGlFSufd873y9YIbx5d3jzuDMX+sBXrV8Ff9yfyEwN5v5gAhoFdkOehEEVpSKaY0QQMIuenCc0a0Ik5465LPq2R+1CPAw1m80VdFeX15G4JCm1fvz43kSapmm7kE6DKh2HoebEocm1OsRu0bq23YR174JzoVmvoVqeKhIt1ksg3Ww2sQmC+HD7qCD9OE3b0+9/9/f/9n+27hYbN41lm9pA4zRoTYR+tV4zOAXUis3VhbEjFO8wj2RqDlnKdPf+3W63XzTesb+8vuz75Sj288+/eP/+h912D4Cbdnmzvvn4eBu7BpCB42pzIYYqdTw8OsLNxUpBkfDx4QHRto+7i2Xr20BMUm0Ya99d/tlvFn/45m4q8h//9j//L/79n4/jxCbLVfuwO26L/6t/8xfff3v34cNjdPvXF00uuRSlUsV7yXBKpyaSd9qu/MOhEjVoWoYqkgMBBccit9vt9esvjZc/0W4AAM6Hew5eoMEse35ybwA+barnMNl5GKenVNGTgDM/oMEn9BAQnYf5H+M+Z1nznKT5VHJ21ozgSSQ6c0bmz9HZrvckESChqiGSc1ANrTWI3jV12jMxsYoiII45MUeRMta6ulinNF33jdXMBJ4cALL3UsWh8+RqmqCqpokApNSUynztqJgTXRmgMSLF6C+X1EZi5JI1l7JadK+/fO3amGvRmnpiFzlVAO9TKlMaxYgZpmES1Xf76fvJvTFvwVU18+zbSF1DhNt1N2Rqln1Tq7hU8yGAOAQBAg0mZsAgtR5PLpJacc2yWV6qseZy2N3td6eL9ebrf/UbLdO7b98+3D3oaVwvL+sU371/INarz30MoeSE3vdxsztlFJ2GcbW8OYxT0y9Qq2PfrNoivttcMTumBmsxgubZ5/2zr8ePu9CRA8op+82a0HnfkUFOU9suyAWAQMDn7C2eKYg//tx/PAlzzd6clT3rhp8k9zNlDT5Rhz4pPk+3Q8OfqvBniXH2apyDbvbpez7Bj+zpHAOo/uQ0AgCoyJxRIqJZ3DEQAJ7NRWwoUhBnJDICkpkysVhFBEQmDiSKmJ1jqRVRwPTTBYBo1bJjNjOUIhVOye6O7DQ4VRDmKMTmQBArGli1WpLUqlrAlBCISYxFDbEwkkEGFLPC7MEcoKlNhGQ6AXpwEU0lJ62ZdDIg4QDOo/NGztjLmXDsmSwLWqoEtbJhEwA1NsE1kYIj5xsXCNlUUbWK5FJqSjklVCMG75h9cOQNSFRU1QylFGYQy2ACxIhGasEHQsiYzrcHVUWtOWUg5yLRzL1ho2C1+OCRsECGmQhkIKXSvKWcM9tnFuR5WYImaAhSgBzMMRSdm+Q+edV4PiakCipQi9ZiVVQqGM6EwtlKOVvEiAnZATtAQnLnU4mzAAocTKWwVMrOaSgpg6tSaq0iWgmeGigAEEysnnORdk5NzvY6BSEMM1e6bdvqaRpTrjXlkQ0dKhEwBcFqVkyr95RyrlJLlSGV+8O4H8tUBc4arAkIgBGYoaGBmfKPfmQzQD7H05SIvKNIsIi8aLx3MOwf/gupSFWlqkq2GQsHMMtdgDTLeVUmUUGPzrPvQteH1XrhHSLpoluAB0Vp++b66mo60ru3f1ht2l//9V91ng/bx259sd6snY+nMeVhIqIx8w8/3N/fJ0eb1eri8XiPVFdX6yHT48M28CICYzbB4mI8lbLm9vHh9vnrV+uLjQ8MMwjJu1KlX666RTNNJ0AjJGZXUjrtd1arj4HIVxnm0H+tmsYjIYa2R/KGzM3Csy+WcoVaAYCFXBGZnRHzY5uqmInUqioA5r1ndqVMamJiJoLODNQ5rypZJwVFYGY2EVQ0hJImRbVaoJoBKhsBi6qqoqGJmg6IqIImRQpqmUqe0FREy1RU0EjydEw5S4XWx5zGUiYURhQCQgOHvqakOgEKOfTRH3bHVGsUaaIzraj+/Q9/apumXTYuMLIHqWk4OhfYuz52alBkzKeTc7HW5Dn66NKUfNO0uswpO8+Li/7h9gEf96ubq8PD4erVxYvts7dvPrx797jYLOpUU0rNqvmrf/+Xf/M3v4MKp6HQql/crG+3t88ue2emDCtqSqrVEE0PY4kt12op5y5SRAsxyDRFIoVKSAicBrmzcd3Hq1WzP6bHnBVcLVaqmemMNkZTmr33YgDGTGamZhVUSzZEAjJVRDEFYhZRJCOa2aA/Mt7/9/+H/2NnG0Y7nY7DLrEYATivUgaTGpolYa2nh+GwVx3KdJAsWdWHhrs2LJdQXBk/5mnv4uby4rPDbu8CgtbDcLp/vD8dj4AwnpLzvjpIkNGhmN8exmlMMTZu3Q2pjKV4oOC0b1yMru+71fWL1//q3/D6Sk2kJheDUYvNsgKJgYIRAqowqIMMZaJpK8NOcq7Djh0BmuuW1LYEvWBAcPHGIzgAhDoSEZQRIHOdrE6IqDlrBgTBOpTxvbv5FfUvq6yAvEMEFKnJoQK1jKSAYIxMaGAiBkpMtYqKEAK6EEJEADVhJoxmCuhJqzZdQy6ehsExxkD5JJZSs+xDE0LsjBzGjn1E76wqmOmU62krp910f4968uSmIkwOmKc85Tyw7zxGACyEWVPo+6kcck3APq42o0lc49qfXOwEJMHu5mL13fDwOI4rz310a7ykol5tHPddfz0d9r3nGPrTtN84F6gxIrOJiKqKaVgsmjqdakqNc6d8VPSE6rquSnFkCJZLYhiYXBN8Hg5SchM6XODRUSo5bC5AU9bC5ETrVEbfgtYKUrrwbNn5aTqoVteHMU8hYAHUXIrG9nL1cXu/6l+Waii1DzFDmoYffPvCMC7Xz1M1IF72z7SeuArpRID9uj1tH0T1ZMOV9KxQaw2bxiXLhx+qxpZb6FfzVVCqAJIjAoRSBcyQFJkBQEHZOQVgF4EgTZk4gJmUGl1UtTxN3gdCVJ2HS2xCFBMpgiAVCyA2TZNyAYCq2fvgQ1QxNSs1G0Do2o+328OhVNPlOn5898Os1F8+v764voykU8rDeCBz5Ng3/rNXN8e0jy6CY8XmetlPqTqurieraRFohHT74cGkAjH7ppSiGPvF8uriYr+9a/uOGt+83fq22x13ucrbu4eL1RUzAdUGSEWO+1PXLVaXVzVh112WCQM3l8ur28fHTRfzkILzp/0xOpKSpFZ0IaWUBmmiv75ZHofjm9u7dJwEZMppXkvFJjBTTeWoh77varWP7x6ubzbrTYss7ZJE6+3jY8q2PY0k2t1slqula+L6mZ+S27y8nIZpg2u2kou8ebcdxiwP9fJy2TGn4VhkXF8vb9++L1PG6LvV5dX6KLqf8mQADj0CzNWKjqiUDAB90wffGFO/uJryfjzuRSo57lc9OyJYq2qeMqCVlKQmqzYckpnRDD2D85MDeS9izgUwE5mcPyHMjy3bGIMjssmc7zy3asX5mHLxLvR+meUUOJRa2KGPHolSGQwFCaNrTE1KNVMfYhU5Hh4IRbXWWoh4fnxBdFXBtZzTMBe+EnNoI4AiWJpOaUohds7HplsA+OF0AqtEnNIUQiiKlp/GAzMCMoQ6T8VqgGCqs0yjcm7iJARRferNfUJr4DkONs8lZybw0ypcFZB+nJDmP3FuHf9xov7UT3Iev5826rNONBvan7I35wQFAp6ZlPY0U89FzvNoLqqz2ESAoqaEKpDFBoGnfSYYopiJgiKqgRja/P9uHsjNrJoiXr/+7Je/efXDH/5wuz1MqXRdMyq0nd8sOQSux4mRRUtWiy6MIocio6GaLhz5gMRIhGrAaowQHDfeJ6lAcLnuyiSMXExL0alIu4x901xerd798K5Ue/X5Rdf6KqRWH++Pp9NxfzwxUfBhdXnVNp6RnGudC6vVCmpt2uViccmOHDMyIfJwPKKCVSbGdbc5HQ5VysXrV9x1/+Pf/+2v/uLfrNaXd2/umxCJw1RsuWzJh9NpfPbs6h8+/uPzEEDFB6cieRwDMxO9+/jmdHzsutA2nRLHfsGhWRPtd9vxNCzWi65blFFvd2OM69cvX5KD7XYb+75WODx+uLq5Pg2nw+5QS0m1tr3vV4tu3U7bg1lYX764fk4fH+52t7eAdnnZ3O3G15tXv3//8PnnnzVo43b34tULr6JT/Yu/+vNvfvhWTsOHYei70JOWUz5C6ZvNquuO+3dI5qIH5TENgZ2Ljal4ZiDXh+7utPvFX/77D7enH88mnHM1P/EJfRJ0fvrxOeNjc+k5fDrOTxrPWXjFH1WdWQA465SfdJ7/wjMCT3mSs1bw9K0+Oe9mLfWTqvB0BeH5QlJDJAMH3CI7kRF5HuchLiN4i7E5HoojAK3gHTiuqfrY1CqEGEK0kpEJAEuZ0rhnbhCKaTWz4L0RVFAk6pnVmFTWbWiDLRZNKZaG02bTvXz2bLVZVLJxGBkMzMZ0IkRCtnMzoBwP4zHJruAfH+tH8Af2jePIzofG+5DF5W7xTw9w1T5b+TX46SDb637d0Tufd6tuaYSMDhwAiVMvZfKtLyKSJuUQQnP14nPVlE7Td+8+NC1f/OyzV1+/+vYff59Ewqo9Heq793fYtauLqxCasWRk3zTdOA2mkEtqm2C1hjYwJeeC+QU266IgFReLzWk6dTdf0+Imvd/fXN3sD8fm+kURoLYXHwFLIUVyM7oDmebOrKfud3giCp3lQbAZun/ONM53ODC0J7vQ+TR8YljZjzrRJ7va08GxJ2FpnsXPFrj5q+cDNZ/Vp9jZj50D9uSCm++etVRmZucQsFSYn3K9JySaZ301Ey1EdMZpASOSIQLR7LiZrShEjFAdOYUsVgHQVAFhfl4iwpQmNSGiYBqQREWKRFcjCSGAqIhJFVWRKnNrPAKhCwoOAQkKGmhFY1SbWRuopigFKc0t0NWszsProRBzbBrnG/QRiDFE9aHpeiAE58w5mS16BGKKamc6HjkXO2RH7NEU0bhUGCeaE1illFzMdG50Y6ZZiZhTYyp5/lFWEwLyzpuYkfMBVYqZ1VoRsNYCSJSSEjVtCwZARN4bGhMCgJRiUk1VEaUW9mddcPZ+gRqwh3MqfHYGC84Xv83rKAAkmBk9yHMwDWuxkkHETHD+2c3MMzRyDGaGhI6QHZCDp1vJ7B7+dOpYPWpFxzVnMAZJevYvGTOdfcaAVSuKiWhVRJwL1ma+lUOwIoaGCMSuceQiRcwjomkpU0oZAQE9ArMrIrnAYai3u/HD4/DxND2OOUklpNmxBDONmxnMDGg++AioarO1ionRgPBsQkKEtnWEoAal2GGq89/sLBWllAgInEdhNGYSASipAgYFAzUmAoexiU1HAllNh/HY9O755bWhUet9XFHxf/qHd2Mef/ObX3/565/lYXj/5o2Py963Vn0RYUurruzu7374w/dv39x2zWW/2gx52wS/vFjc3R2HEzroGsdN45Cq8xSjH9M2g/vlb/4M2lFkivEiNpGZVUylxHCZ0jhNx1V3GZuQZZiOj46o7xfjOExpmg7bECMQITIRaM4hNqWMaKWqAlCMzsVL8aGkAQ1UUxpPQDDvxVSQyLsmTKmYlpQm7wMCgqpnbwooBGDEEasyAYgaGDKrlloyYnDAUjKBilQR8DGoJjACpTopc0GCXDNBRGKpGVHVyhwbKSUjOlDJ6cDYVGFjYoMpD4EXtRYAJAPnHLFHR86R6HQY90ULsTdMRcu0H2O36VpnUKfTiUZX1cCkWy5P48k3TQheDJ0PeSzYQAjtNEwGudTS9+16tRmmtH/cG/HyYvXhu7eziHs8HW9ercThdx9u9f70+Zcv7m+3u+N+efG4eeYD+ZvHxTCmZrHONlxcXJjZVEpsHBscx2JFBL0xNt6TWJHs1ZUMRsXQiLwhhiaq4DDkIdf1umsXbTKbRmTyuQ5iCmBVK50PPyKzzAxPwyp1puCfWXAGCAKo89U+xwhUVWpFx+fxIO+PwxG970LkWrtFi1mm7YPaIbaUh8N02stpD2ahjaZG3ChBKnX47v1p/Oemv+yXm3j9i4L9w1QBwjSe6vgwjoOJsW9KKiF2AibjaFW0iNS66pr1cjGNWap59GBUD9Ni2YLWcdheP/v85a9/a8ubZC2HloKjvq8VAQ21BAY0cZBK2pbjdtrfq1bP7H10/bVbvSYX5hU1s0crJIkk5enEONk0IYg6rzUhoVrH7dJUkAqSaRlBJwarj98FzWH1dZXOiIA8M5tkxAIACF4QEJjZl5zNLDStgZgC1ULeODZaBQ1CiApWSxEdmq7h2JoKa2Wyab8DyQ4JOMZuyV2H5LiJVRENNU8yncpxnx/fp3HryFRgGCYDmXKi2KsZWOd4NZ2qOcI+NLE1spKOAYgx7gcBqJryZbcWHJ3jw/EHxt4rJhADRJ28gVUKyOCWSIsmLD3KIT1E3ylqrgczQrdxTTdNOzNwWqBOqUwcmiQmEltUqRWoSjn0bsPk2fF2f+fbheXM6A/pdFuGr7/4oghIGa1k0ewsxG4heZ/yEWmajod+/QXkiWtNYzmYdssNuBA7yXny1pj65fLaKWM9OSvj9Mgxegpqmm2M7obqKQ17ZudDj1NsoA0UTh+3VCJ0jYMuuquSH4zk/fbDghekA8o6pd3ozt46ZhLDuZJGTQnIAKtUx46IpFbRqgCELrSNydzYrcG7KSVH5BwiOiZfpNTZPFsEAb2LaoXIATGxEHFkLyU554uqVqmlqsGYE6mAYN/Hfd4hV+fweBqfX74ITbj94U+tb189e35Io4rVPEo9tgG6ZeM8bx/2VoAw7B4PZBaojLtdFW2DzwVThXe7e3bsQhubZj8OQM6AapV+2az65Z/9+hdXm+v3jw/vPz7c3t4Ph11C6VfLxjW15ppz6+Pu8eOzV893WqYy9n232x0ubnpAc0Btv3jz/n1Kk2PIVtqla2huFJXLi4u66A6H7WVcowuHw1BKmYp0MeZSBh2988tlv53S8941gXb3D+vr67ZfXHcrSXraHk73x4fvb9V4cbFulwEDIdXhOLJzr7548e3b+2nUWqs1Htty/cXF7ZvaNcvmBb/74cO/fP9xUrm+vrjw4cO7D4BQS0UCIiImAyPnxQzY7YfTenNxGnZVpuVyowq1zo/LREwKFtqWGRZLSONRVL1zMz0nT1OpMjfXEnt0KFK8D+TRTJnJRA201jpMJ0cekZ33SOTZi2GBsbjzks3FphrlNBA5F9kxnU6TaQFEqEmsjOVohtzE6HgcDg6x5IxEKKWUUQWcEBpUEUBMBZq2dc4FH2up01iGcY8TxiZG3yCjGZpC0zVgVsZx7qcHADwDUUDn500DPvMJzr4JMEN8qvrGT1ygT7PPLPCw2twNBWBGSDa7zp8mXVObdxX4BBGYAZOzgUOfiqPP3qB57z57SJGQUGcsNJw3k2e1zj7xjEBt3iIaAsz2clAlQjUzhIJYBUBBbA67zf/Z+CmNMf+ic1ka4WznAPybf/onq9MC1LkmTTmSrVr39csVj8Nhd8rZDrWcchXDUy2j6FjUEa167hxZlarqHStyzRUVAWisxcBYUYv0fXeq9f40VrGmCV+8+qLrw7Ecr1++IuybwGqGiNvtftgPomLg++Xi888/H6uaQd9EYLe42MS2bUNYLDZN2xtK03U1ZzC6ePYKKjw+fthctp6hTHx/++Zh++H1V7+AP6T333zT/PovQ9cOaXAO99vH65vn7DxY9RySHIPjeQQqUgCNmQ/D9vu3f9JcLjfrbLy+ulpeXtdxOu53w2lYdMsp1SpoIT57vlnG9njcp9Nx0bZNaB+G3WW8xCJhuXi5WaHS424aj/f3H09E4IM7HHd3d/fLi0277Bfr17dvvvn65y/g998Mx4fjQ/5uTM8+e/bbv/rXf/Mf/8Ovfvb88cPH0+5D25JftdtbA3UvXnS5gk91/3iAMjlypyw6iUo9pWnRN1VrDFgJF22/+/hm0fWbzUan7dMPH58sQp/CXj9O4/hpop5HkLOvDeAMBUb6lPoBsyfGFj4N+09/eg6g/diB9WQ3+eQfmX8Tnxwi+MkWdzYu2Vy19vTZ+UWInnJMABSBN+SWoLDaXJ7evI0AJVdiElNEW/c9qkYfPOgxn1Z9ByDeRx9aK0pAzJDrQVHVMGtiVmYcq6ppUVHHTBQUrjv34tK3DtI47Q/D5ap/9uxqcbUYyyQ1QxViHoa9d2RGRS2EMIxjHsp4KI8Jvkuwg3gyV4AgwWKzCaFNKaWM7HS1bMeJx+MWfIVVK4p1il+0VyK7wMChz2ClTqrmSKdpFEMwRormAqwv2tXSdytf6vb+h8PpbWD48i9+dvthd9yfXn++2T3A+zcfPfn19U1oujIlBOjadsrV1Lp+cRqGIK7xIae8uL50/cq2WxBDDt3Fav3sZwTMjtgTTuR9U4twiBxaqSMTU2gQiZmRGZCenJEA880K8Un+O+fFnthX+KNa+FOJ8NMP+qwuffrRf7K52ZOodD4/Px7YH7WkJy7SJ93+jLOGs9xJ/PSFynSOFxPjHMF0iGTnP65gaFpFo2uRHM0tgKjOBTEFcsae2QNLZQFMM3IHZ7uoyfkCAVA1Qii5TpS2AJ5ZFaHqwpmyMACYqKgZiFQT80QwV2QSEzE6wLmXitm7Kgq1CjIxoVI1yARWq1aFqehQdd6u0FibKCHWxaJnrYQ+1cn7COgQNbJ3hA7Bz6kzMFCxNBkRhjiTA9QA2MW2Zw6OY05jGk9qWktFAFQgJkdBzchjJQIi06xa53A0M9NcsYmIoKaViJgQzKRmzayOHToAVkAiAvLIRoAVFM98MgQRJCRT01lzOXeEkjFqhZlJpAIGgArASGd35PzWjCYoRWu1olYVzvsvImJDY3YwHwZ0s34ESOdOPZhZ57MWpPONzYiNAgdC1VyrCIEZu+AozrUWZoDqHFuRykS15Ccq+gwKn9dSygzV1ACNHYfGIxgn1ZrSqKKRGK2mUg/HaTukD4f0bjvcD2kotYjNbgkAQyJ6MgCdwexqOD/7Ic4tdSpA7JiRwJxjAOxXfYwxxCaG9r+Qitgxz8jSUi0nMyUWJmACABPJ1bIHX6bTZOBbRSUzM/G7w4Fd8ErpVE7brUr57V/9+evPnj+8+bDb362Xm4vLm67vEOR43BHlYdx/880f7++HGJdNvzwNxxjNh3B3vz/sR6Lec2y8M81N6xFBcuqbxYvnX7z7+O76YtX2i9A2RM4U2FEbY/A8TNK1S1U5Hu5LSXMN4/E0aE2m5ptYpaQxS5HYRmwcki0XXTodmwAoOo0P2/tHq5JOO6mGWJkDqCExGqpUJDCCEJxWTTrVaojEjsWqSSUQdk4kqRVQBpsNlmYmAIqgVcRUxepc02WqqtmMkT2AljLF1iGAIZlZSYPUSeqoUFVEagYgQiJDMyEglVTTiIDsneSRnAuhyTl5z65Mw2FbyzRMQ9v1IWrje+cj+bDfH7UP45hj2xpkVXTeyT65EJqwKAroG9ctfW9oomVsl8s8jmkad487JOyWFxc3zf2HD123eP31V7fv3texLNeLP33/p8v1zZefvX73w9vvv3+37Du5S9//7dt2Q9kPf/7nP/+7v/s9OQ0QTexyvXnc33piE+3att/E45hKKZazIwYkUgzeiWRqGzOoalaEHIGYFj3shtx476I5yJKjpyxWVEMTail4foQ4Z0elFuT5gZ8QWaziDOWZxV0kMlWxmTJudn5L+H/+3/8vq+KvXr18/ezaqYzsAkMZ7sZ0HE6nOpZp2nfLYKblLqPhcZ+OQ479+ubys80Xf3Zxc0OoSqBKgMk4gbqEnIuRbyN7qCKlTsPoQPs+GnApmYDZRVEgdmnITdt5rJiTC3z5+udXv/3XfPUiWWQf0QepWNQMgDRFTHI6lN0Pp8MdkQvdhrtrCK1rFoZArtM5s2sVainDwAQqidhh6BWq6xaSJwMG7Y0A1KljQGAQLSP4HusAkixP6e73QYXXX1VbggGDgalVAYdGnhHBKgBR7E2SGnvnpnEk5+Z3FmYHyApz0M9ibKtCLYagoQlyGrWY49isW+KAPgI/3dykap4gjR6rypCPe8I6HfamycfATmxSISURZp9qVeImxFqVuB6HU6kTUAPRL31syJ00lTQk0InAh2cyVmeGnEvFk4kTXDWXNU8oJFk6t4w4dZHACQYUqZ477zamMuRjDQvSdLW5zA9bArfqlhPjYXyEUqMjTxd9+yqPQyp3XW8uMFrHXSPH3euLz1xRB+Z8A1ChEvqFGg3TQIhMneuW1eKQblMti8Um58rejeMBxbxDBjmdDqGJtYyRPPhOwcXg1xWtOAKfTntGhFxc3xG6brmQqQTvnHZmnFwEH4exymGKfYRymuTw/Jd/cfd2yBiai+7pEQrArIo4x965OVXOFFQt52wqzjtVMTMAcmgIIIRVBRBSTaRE6NSEGJyRgpJzACpQmObtjSIAWK0CgDxNyQAVzrH3UqFrFitrvv32jx7rark5HPbPr67H3WE4HS6X14buYX84jMfLptVUH9N+c7V692GruarUkqVdrAlpGk7dOmbinKs56nw43u3RqHdNCDGEuNvvHfDV6nJ33PJot9uP97utd9+8fPn5L7/66stXn2tKbz5+V01lquqb4/7oV9gvOiJYLPrbx/fPnr+sXIUotutxd9+2cX64aFv22FXVq/Xm3fv7mlwy/fDx/svrm/vDbjedSk6qFZGOpwkJfCCTdJxSIH/cHZ7fXPaL5vHDPYfjapObZhWCS+PUBN+uwsf377/7x8fnrz/zq/7V81f/+A//fHG9vrlcVNk732wfd6Zh/3DqnP/2j9+EhteX/euUv/vw8e3335vM9pgzHAfICLnmDIih6UxtsVgQceSg1ji3yuWIUOskwihEU6rOeR+4WK2TqtVpGhGBmNG7xaKvJZVcaz4BOGKUMhE5AyiiYKYCHKFtGgBjdobiHNeSZiVKAMxMiirUaTgRgIHjyTOKqSI772OBSiBoakhpGioRArngkJlo3juqSq21iiohMGMMIY+DOm+iANh2DQLmMhGJkrVtX9JUJUutiOjbRupZKjIFPSszZJ8qw9SI5iZedIh6hhQA4syinmdc+KQOPT1EzsKrnT1AMxjhJ4GbeR3+NO3inKw5L9yJzkMMoj1Brs+9ZPO725MeRACqpmZEoPLp+yoCMvPMw1AwQdAnwlHVMxh7nsTnANqniBydh/ZPySJABDQhIKrwd//0x8uuf7nqAuhl51cLz1D3Q96f5CQ2TBXAHPGYK4FtPC2CawJaLU0XTWnKqWjtIhOymAFxaONmsT5N+XEaHw4HZXfz/Nlvf/ur+4/3b9+8b1ZNtwTv7DScYJLo/apvTFLJvNxcPnu22W0fycXgmmfPn21e3hxPE1Fc9AsRu398ZCZD7jo/HCZTcME/e3V93G4h+lLpt7/+i//m//X9hz/83Yub5x8/7IOPxDgOx2W/2D+OMpkKEeLpeOj7S0MSMdVSBYn9NJ0ePn7IadysVgau6dpF19c07rcPiLBYr7e77bPnz7vNxtgtl4vD7UMTfd+uEeDx4bbU2rX9mEoWicvIAK+fX++85tXqsNt6Z127GI+nUoaH7+6NXTry45uP681VDKeLFXy8P/3pX75r/PXr1589fLz72ecvx91hOg6ZRhjq/eGudcsXN5vPn6+Pvd69uwcXu8vN8Th4qS2amDgEy5W7pl81x4N89vVfVasp55+KQTOG6ynRAZ8OyZON59OcDWdVEp5QM5+IMwaINhNe8Cez/1koetJ5ntSjT2M/PoXVZoTN05h/lmbPV+Uc+5xnsPPO/szAMaK504eBWuTI7Cs5ZK/GYAHAT6lWVWDwwZtoQJen0W9WaicIKCxaBAiL5CQjhJCGolKYmMmpioKhY0LGLBc9f/6sW1/4h7vD6TB1Ib54+Xx1dZUlK2jTBM0pHYd1u2TF/eFuklHJNKmVGpgQ8JBVQ2vFAocutJ77YarOh4uuQ5KmbUn9YTd1naOLz37Y3QuY33287hHhdDodfbfxLhB0ROQxd7HTKmkYvdXD7cfDfr+4umlWNy+7/nR/N27vbt/v+kXAMgwPd5v18v5heLx7cDF2F88w+CkNzpMzrCoNd8gO2XW+lTI1y2fATalG3mWBi+vXvrs0FHGNUBQyQDJFYC9ApvMuu1EBctGIzz9H/JTcOZOJ7PyJp7OB+OOPfD5Mn2obn/xET/dG+PGXfdLUn36XnkTIs0Ppp18Ln7xvPxH65zDuJxsomBkyAiIzwzl1KAggWh0BIBHyHMIREY/nhB0ykRmYEylIxOxmF6qhIREy48zvQQZVETGYDXJEgCKaSj5OzgM7VaM64eTRHCgAEDCYITDMbzkmZkxECIwI1LZSKxjUKrFBRavA1cxwFkbBzIghOm9Z1MCAVKzmOo6nAOqZgw8ixkCeGcAIFRHnAEOVagUBAHMhVZ7hHbN6AkzEHCMTBs+1llKKmqoqIDKZ8w4AUBUKqiCqmz05zJ6CJxUzq7WACoCpGmFVqSVNjADmXWwQyQCRZjOXsnOmFWcmtBGK/pgqQzWrTxwpQobZTHR+UyZ4cgHPAWwBragVtQICMSnMSjedw7TMAAQIMKfzzvFvQQM4p8PhbJw0NYT57ddslmUMEMizIc5h8hmLRKCgSsLGXAhVXS6TmllVFVUtqiJmSO7TNQAGSC40nQHkKWXJdRpz0f1peP+4f3843qc8iNm53tRU5wCvIeLMw1IFBAJEmqnes1Ym5p0jOqtGaoJogYiZY9t8MpA+Ya2LCqKhAZKxEQdzglwEANWADJHRg4AoEhOhQQCPYjmVjtpywu3uw2qz+vNf//l6s/rw4V1J06Jb9/2GwI77hzTtieywf/z+7fvt48HH5XpzWUvadNQu1t9/+277OKC5dtMGx2jVOer75VQn0bS8WPzx/R8u+nW/6AMFEwAOKZUQgw9xGA6i1UTnhrLoA4iq1ppTdGwsaSqMtWk9BI9Sp7RP4/iQh3TciyYAmP8fWdWmWYK5qkrEkrOUPLfjKqqWiZCb2BD0JUMtVVXYkSmIZSJCxjPGrBoQEgQDJmBkbwKSC4ECzy8mqgamBgXOqGASTRSiCNSqJgZqKgJmZKBqQFBqUStMVlKuUxazMQ2K6oVFqpnlaYQgeTjE0N1srkuRdChsksIpRl3drCl2q5drKYVc0/SL2HfkY7tao7FHNdHp9Li9u695aqKP3iPBYrUeTsPDw/tap9Xq4vLZ5XG7LVN+/vrLuzfvclJU//7dN7/+s6+2u937u9uL6xfcjlPKrSynPH10737xm2ePD7uuX92/uVtsLi43r1FlGgbJpZbaeFx2oSad/fa1qmhpQlBFYmOirMAi89LTRzelihXUzAdYR7+fTNJcDMmms8GeAEwMyQjNRIoCMoGBAZFoIXIqld3M0J3PPxqcgb6/+8ff3bTLf/7mn0mnRRP6ECnVftn4COubi2F3mmTSQ5IiAclpuPn8X//8N/9udfOCEctwwPQo6ShjgpTJiuRDzsdhOLXdoiQoQ5Kqbb9Zrl+rIwFn5JHZO8jHkwvkvItE4/YxOjpth8tffB1e/kK6i4IekBEYRX095u2O2UMeh+GDojm/CBe/8t2GfZCqwGTspRZFMq2SR0IjRO5XYEa2QEJC0pJECzIbADoALTBvnGtVIgWGsODQWRmpjChT3b0lFX/5ZwLOkMhHM9RS0Lv5ZmRIRGTooLJr+5omahmZXPBS6/npTbIPHfqgSWJAmE75uIeSGHxsF6714AKFRrXYDHcjHyOoTMPDx/3De44I0IBfB0bTI1EFS+k0pOnk2yvXIBCQDlRSTchggE5MJA8QgkcmDIArq/k4vF8vQx+dVxqULSwO42O0MspWUZHqOKTFRQ9Yl6vm7e2HDT0n8m27HPep68P14uJ+OjJCg9wsVyx4ShKMB4vZwJk1FjA3mHLVYs73y02ebstUL9uXyfyKbbe/j/21QWfWYW0L1cjrrDtCpNDNMbc2Lj24RewJuE6F2IlUYgcgNSk7ypBMhD1gYD2CQXXcZK8lT00bGYuJKPJejs/iS899PT2s45q5WhlvM4b1YtVaHaYPD+93uwNYy8uX81WgYgrgnVMRRRCtjp2CVS3zDoKQCKyKgBVTc+xryYRI7NpACFyrxshN045jVhFEQPZTHhGMgxcRQmLyqZzAkJDZeUWrpbBnF1fH/TGdxo6iAu0OByIyJN/y8mKDRe8e75oYGEN0Vgy6rnv74a6W1MfFpECtO057S7Vt4v64Pw7T6TiaFGBIkvuuI0Cr+ViTCz748H5/e9EvLy9f/u6P3/gQy1T++M9/fPfuY9+tX7148Ytf/vYwjduP9xw8AgEjEC7Wyz+9/e6r158fD4ciqdRyOJyGNMZ1470/jCcDswLeN9/86Z13oVt1ftl9+Wdff/zhbZlGEiAVVMQZAIJGCrXmUmzKaRrdccpffX79/MUlgJ4OuzTklz//as1y++ZhTLW7vn75xc/++C9/2L/7cNoflpeLb7/5Pjb9cXtcXCzZu+OpBB9WbfCh3Z8OKediJQS3GydQJQJTdc7DfMkyiwF5732LRAaW0lh0AqWcikFlYiQKLphK37czx4E4GNeSdbW5LHk67Y8pZxdGMgOEKR8QHCEROkDnvCcmZh9iyCURzr5naWJjWhAF2QFaShmAfPCO/XJ1oVWlqpFpFTADk+Nhbyqh9QAqKg4xl1pLRTR6Sh5432QbECEEpyIlZauCFJxvbfb0oiPk0PQ5TWM69E3fNYvCSbSmnKRqLk9w97NT235UgtTQTBTBDOisoKgC0+zQPz8s0pOtiBABcX5cmxMU9QmKBzTXc54fZef2lvmDp1qnp2FHbe4Mml1+iDC3ks0jzPxJwJlAhHj+pkCMs+9ezdQszdSkeT9pYHCudZu/zUwvsnmna2ce0iedaP5AQc/yIiCjWQUlOEhpp+F/8vPLTbDtcXx4OEyjnLKRZ2JRsVOqGWAR/XXjynFoOCh5QgaTRXCAFAIex9yEsFh1Q9L77eF2nA7jtF63P/v8i9B0v//9H4L5y9Xl6uY5NaxWVqtYazk+TPe7x/3puFrdVMEPHw59u3j+8mch9mk63N/uyOw0PNyq9m3bNovD8ajjcY9y+ez5ar148+a7/sufb9av9/v7Uac9Xfy7/+X/+r/5v/3XJR2Hw9047F69evY3/+kffvaz32zv93nauXCx1XHle0YoaWy61sDQbJzGXMb7D+9X/arv+5TK8y9e6SkdH3dV6vNnL97f33/9i5+H2KhnNJh2B8uTianp7rg/Hk6r1fLD92/6rmWO41H7fmXsl9cvdoeDHcvj/qHKcLlZLT1fLvXbP73bXG4A5Hg6HfYjaVkvmno3/dPf/u3/9N//q6HgNx/fRNf++i/+/OPbP0YHx4Hvdvbw4ftf/Ozq5avrL6+f323HD7uHScW5SL5N0wgIjj0Q3B23Lz7/1bOvfgGAF9c3T+M64MykmCNo9DSHw9xhBnPwdD7KeLbinZUgwrMScA5iAsBTYdoTKGQe/89pyvOUDj9qUE/+o7O36Wmy/4RDwqdXmf14TzrSrFDBjOaYwx+MrgO/CM1CR4cIjtm5WLMiso+BiXPJixBNhCggBrQcfBOcS1I8Ri2F1JORjnsWZWDHXrWMiuZcS7Bp6FdfrLoGHk+H7TF7Cjevb7rrZSEAwIhe6zSNJ6nTNNVSB0J24QIQSto6B4y1Gt3m/EBQ0C+7ztDdnnax78gZYG479/b227bpO/bL4E/Dtunbg6y/HxLc/7BaAFIwyQ4zUa5oBmncAwPlaSqeLbZOxuHdbn/37vL1F5tffEVvF7t33+aHaXN15ZbL7e398vpi2h+3t7eh6/xi43ihUhxArlIlN40zY+aGVzFcvfTspzSxD836pnv5FXfLfDjEsKrGHAOS05p9287xJ1QiYgOk0NknPPqTTPNkDZqpVDo7Mn+Uf/BppIVPQ+sn+Wh+HQM9C5NPB2a+qT7ZlM7koyfr0JxC+4m6hP9/L37Wjp4O3HwRKAgoIjh2jNEsm1QiNwcf57eAamImVYr3kYDQVGolAmY2x+IDlAqYPccimRAJsVqdk84AoKIE5yS31FIQJqV9QaimDA1YRfCQGY3RGMkwKKPBuakT0M+XIxFRiATAgVQUAMjAEyuwmFYljxocJeBIToBFgdkbM3JTlK2CZQhddC42IQRPMbTESMwuOOcZiR0TaqE68wHRDPCMV43ESJ697xRMRNWk1kJAAObOvCcgcBXMyDsiOq+FjcFUhNGpVjBRFRMTqGCYSwEyIiLn7MkuhmCOSZHhrNcAzT2k8zFQnXWemUwEKvMP5GwnUAUQQEI1QANTqNWknhlSSIhETGp4XtUgntdAOvtv51OqZvapyUJlFsXOmyJAMKmSE5h6x8Cks9KkczzOzFRBSMHIfIimYoSmqCyCVaWUUrGWovmshSMiumKqhhzaSKGWEYnK6cSMMFdca821plLUFJCQgZ4WXXbGoBsygKEa8JNkRGY8319NcF6weUImQF+UYgzzVfDkKmJSgVpyDIFcMIXZklRycSahYUVV0DY0zikwn6aS67BYdREg6TSlY7/u/vzf/tky+If7D2Ky3iwvL68J/TgOw27HLLmUt+8eTidt24uu72oaYiAf3Pff3+62Y98u2q7zTShpIig+dMmqkq1XV4Fj68tisWRyLoS2bceUu27tKFatJU/OUZXUd61T52Nby8CsBm4aBpWqUpvG7R4+7h9vyZlaXSxv+tUruH4uZTLR0/7APhBhrbnWbLXSTFMnJBHnUOoECmaapgGRnWurzPlUmbsXqlYoSoZMTk0NQEzOvaCiyOTY52kQrUazyYJqVTAgAkYPxqA2K0REVAVUVKvAXEMiAuZVimp13okU710+jT56NDLAXAp5bJo4TYfQBgQaxgTgXLOK3fXMA+gXz6d8Cq7rry6Nqe/XY04Ym3HUdLyH4X7/8MGkWNVSYQzBx+BicDE2i+VNoOPu9sOb7zbXL9ebi5I/puPW0FznvvqLn/93/91///d/+/f/+t/99f/nP/zN3ft3LNmQfdsfHnP+cPLKwXHnFS6XY5VtsbX3wG69Cma1qsSWYuuZGIhTqbvDXhAliw8Yu8VwHM5+ZoOUEzGXnNgFIAIiF13EnKesqgg8q+sEaKJAoLUgOUNQKABAGABBTb2Lc3crAs3oMsTzVfD6y4vxBNvpuGngIQ2ZlBzudYRhoD++ic5FDsEvf/vX/6veh+X1y+XNV6o5nXZajnI64bgHwhhcXPSHx0dFFmzb1cp5dB5io+iD44ViSAoGPnYt1MxON6+6Ohymx71rWMWsaa7+/Ld086I0K0OPBAjKmKkWGe6jZ+Kcx4dc0vr1rzBeV0EAqGDGZ6wFNy0iQEHkDuYMPzoUNROtVU1ACZUtj8SIhForgJEPxF71nKAWAAOPTIiR6l5332QTv/lKcF2Uzp5QE6tq6AFhrtcRIFWJ/aKkxB4NxEBnWZ2pMw65FHZEKKKT1tE5H/qFa3s147Y1RCsGCkDBpEoZxuP9NGzVkvOL02jSXUnJ6XTUomW0kjHGTRWUlIDEN5zGw3FMvm98uwi+eUhpyAOHFgldWJLlfukTQYXJkVeFqWbDUFAGGEGVwS/CslbLORlNbVjUZOZlb6nvAsAk5cR19MS73c5TiOhUaDweV+vVLt9XGantmbTt+pxoArg7HjdNC1nbNjocS059HwytCnbxIoTu48MP6LopbUXN98AA5nrvm3w4LDfXgJKb/jQdDsOOXQzOdS62cZXzNMjYEO22g0oCkykNm/UXFcisri+66TRm0/XychySAbSXz0xtPB2kHiuA+VUtY9d2h92daxsXlmdBD87LFCJS1arCxFmqiBhCE7yUUqSqifdzBpPIO8cWKJzSKfigAOTAAFNVA2SkqgXVmJiAEDD6UGoplme1xPHcLIlN09ScAnIgAKhx2eyHbB5fPX9+d/vQd42hnoZTdIEQ2Pn3d/dDSff/8rBZXt5cX2+3u5xyvwpd8MMwPj4eDCXl4r2LXXv38BjaLoZ2OB6laBYJnatpEinipTTCwVewpo/FMlk9bB+/SQN5115cPP/i59vH+/W6r+MAJeXhdLVYDIf9YuEo+Ab0OI4+8LuP7/OYlouuaZrtdr87DZeXy69++fUxH4+78faH+w+3e4ohHY41JUJkpK6JjQNyWoWmXHwgJBuk/st3H94+HJ5fLaKL66uLb/74jxj7z5+/rNPwhz+9eYfl2c21b+ntt3/87KvPvvzVy3/+p+8ur9a3Dw/L1YqR7h92j6iOrF8uj9tHdHzz8mI7pmkYregTGATNQAzIOUQvVURriKzVzDkXsdTJ1FxDyOiCq0WDJ1U4ngYAx+x94wAxNBGwDSWVPJhVAOgXl4GaUk+1VkNDNkOtUuGJRloladLxsGd2hMAuuBARCBnBLA1p7huY8tQvlkyUJafTGEKUKmkSRCw5EXtRVbX5KTalQUVHOc5a2Px0xcwKJlIsT127XPQOgGoBMoyxjWhW65gnUAi+dexKKaHVh48AAKrGMNvnz6oOIQIgIs274BkD6QgZ0c7FPXrOkj1NwqoG53/Oa3OaHUmzMXTuYYEnlYhgDpfNIA8EMJ5L6wFmTxD82OAyg/bO0wwhEc31N3PLC5zrZs4rGPgEOzoHtc9U7LMC9bTF/xHZ8TSwn6d2PU/j8PS3BgAj4zrV2yn9p9/Xzzet1TIW8cQXy26QEowtiRQQICWXjT778vV6HYfjkKeSxwkBi6KqLPoeicdUq8B2LIMytIu/+Mu/tnx8+/ZDu+qWm6ufff3rKZV33/8QPR/H/cPj3TCMY0r9cnP57FW3WLXLGLx7vH8/HbbrfuGHxZc/+9V6vSwp5TzEhrtuM41jNdvtjxzCi8+++tOf3l1evPjiZ1+/+/gHqeOU/F/+9f/8j7/7m89+Fv7D/+P/+r/53/7vNqtn++1DE2i7f/zM/arpFkDEgWPjEQCJh2F3//F9bH2p+XK9IeP1umOxu9sPBLhcLQ+nad1f9s3Fcdg588vFYjdswyJ2sf+Xf/odqF1fbx7228VmFZwrRqtlX1P+4YfvFssLEbi4vG4jvH/39v7+vsJ00S0+/+WLj7cPN8+fu4d7MhjHY7uIV44/3g9/94d/+Plnn2PU3b5+8+FhvdzE0LSXMI2SHuFue3q8f/z1r1+tl/Hm5Ysfvt09HOsgOBU1dYVkHVwdTosvfgU4AzL8Tydp+gTimDVNmrHBoHo+4efUop2ry8/M6bOA+5QEOjtH8MeDZT/6hJ5O31kn+mTzeBrln8QlRHxCgH36Hf3JuP9kTflJoInRFNE1GJZCDTedAdWSOTBHZwII7DgCqnO+TEfCymRFkguumiqZc1jHjAUDec05BFdzAjAzrkoC1gd9sfbL3r39cLg7ZOfc81cXVy8uhHAcx9b7KecyHiUNZFVQANQ3K+IbhJL2O+89SK1ZpIIGZmYyRIZ+2TYxrlfd9nGbj5Mku7i6aQNXGR/eP3TPnh12tTuWX1/2ao+qMR8nD8fFagkOx2FPGNAFDK6UMqZ924Wmiaum3X37h+Fxe/n8RfP1L+4+vL/f7pvoYtNrrf1qMZ2Od++/v/4y+LDUSkM6eedNtVstT9uR+9Cvn/luDTkjOCVevP5SuwtBD+jVmNgRUtFsDMiIUlQqMbNnI4O5cWnGuAGcFULUp/zYfKd9knLQPp2en57GJznnfNd9OmKfqNjzwPCjGnnWDmeR8UyW+elLfTpoZ6fTOYD2o/PtfKLmu+nMekMDUXkq0lKerR/E8xeoKTtPhmYMCObUaiVyyB7Zsw8lVwRiYrIiJlXNVOeYp6oCoKgVwCkRATKjN2ACQiVTxEpkgmhWRMBmL4ghkjpyZEDE7JiYEFhEYO4XACUDMCVGQBfQsbB4FAU1qiYVAaEauiqKqsHAOw6O26YJPgIhBs8xAM53ANNacppqtZwKEhEju/lx2Hvm6AMQu+gBIZjB3AGvs7tWiZm9VzEwY+JZnAHT2fAnUlWy1QRqVUWrGoGhIKMDRXYASIyKZDLjlGcrjz3doub7iBjh0w3DwdyDRgQ436NmY5AiKKiAqqEizxlrAiGY36nJPWHVzrHz+ZipCIKazI4Pm5c3prNbF8xUCYhQap2D3jQzrWDuCSNAgzof0TmSzlrN0DvnwQC4GhcTrg5yBqm1ljrfaatWRkKgUgWIgYNvuCdumhjapo3RO+fu91B1qJTrTFlEVZ3RsfCpsoZMTdrYOCJRqSWjAQGpavCud7Tq2iYE75yp1Sef9ROraJpMa4i+lCMj+OhS3vuwMoWqBcUAC5I3MFEpRYmlaVoROBzHvpNXn3/267/+lZT6/t23TO7q6nnbNeOYj7tHNCWst+/vPt7t0lRCaBeLlem0XDiO7odvP+4fUtMs+/WSSFWKd9R3S/aNkTWuXTTL7eN2EdaLRedCAKZURaWawVQGj+SDc+QM0HFXtB63D0TATHkcrNboeUyn77/5F1b+/Jd/1qyv28UCjPI0TlNSSTmNQg+7x1sd95JGYgRUARBF71jUpCoCSM3MvpTKoEKMhqYmtfoQCAh1fnpj1WoITF5FnfeAzESeoIKIFjABQnUmKjOLHedGFSDnvFGQNGktqlVFaqlgWCXPrQFgnjGYVEbLuZBgwHVoFvvDlh3JMOx1RygQjVxDIbpmHYKvcpyvmt3dh7ZfTg+74XEHKLvA3Pha5fT4yJrZaYgMRAXEN1FNtvf3/WrjihbVru8vr7/4WL7bPt4Ox2Z9czkeT8NpTMcT9vzFlz97/+1/W/bbn//l5//5P//u9eX19uFxOG6pZhU6HCSuYiVTP93cXH542HLjG44OqgBH78xkyEOg6MKiWSz7i8s6DqfD7pCOmX3TNyhTsGYqVlUcoCdvAiqQoVaCENoAYZBTVSBgAFAVAgYxREZARZNzZr0QOiRUECKGOWgMiuwMzhfDf/+f/oEmIYdTx/8/tv6jSbYlSRPElBg5xFnQSx7Llzw7qyqrmqJ7MNMY8AEWI9gBAhGs8Rcggh8FESwgEHRDRmaALkw1uqdJVlVWVrJHLgvm/BAzU1UsjnvcVyOIxX1B3ON4PNdjZvrpR4ak6kmhOLTvf/q9T6+/99Mf/WzumzbMn+6seXXp4sWw3WI5UOmiA6gdtJegMh53h/UBger5TQ0gSrGKJeeUUoEoRj7G67YmKToej/v7w+bpcdg6R6YO63Z2/Xn1+nNYvcxEIOAckmZLW9CUx8HXl1jNhvvfoOHqy78vXKt6scJkFAJNucgiUJKZ2tiZFWInAoBoZeTgiKOJAgugcmQoBQ0dEVjGYW+aScVMTBOSEylGDKEGrlyR4elbEnVXP1JemApMbY8AUDmRaRkcsxQl9t4DO2fkTM1M+eQYpd4hSZL9VvPRO+d9jVWFMSJ7YK8A4ByoUkn5sO6evpHh4MJs1qzMnKM95iH3u36/6w47FAkOCyax3nMsY/rweExDIldRxTqIlj4V9dEPJWUHqujiDNSSHYoAKKU8ZGYwRh8O+bGmiwrbMeEwjgzWpbHP+3kVjrJtanJUjt3j4fjkQzzmccTivDPdzeN107Dx3hUxoxjCfr+dNxdxthrymvxIMYaF9fl9xjIM+xCdwR5URj2UtJovqgJQ7Dqbcmwf7++regY4q9u5x/o4vOnHtaISOzFA57VoGTpSX8GyJpYyJqvIlZzzbnNgiKhNGheood8/LK7aYjmTc8ubbvMgNgbHC/KSErkqJWvbz8ckGUTy6WhERCpaSkHE4J0ZILHkAoAZs6ExOjJiYgBULVmKIxbTKtSiMrW4CJDGERVPDL4ppMYAwUQLmBKSd54Qi4iagEASJcJx6FarWnP59s0HD8xx/uH+aVXPXry4fny6m88qI3h4WO/3u2ZRSe8+9au3d093268Wde2Jum3ubAdaGKxk8aFKY37/tJPMCNp3OwJr2rpmTlZQkMgPw564fP/Vzd9++00imHlaXkRE7odSQPfv/mD7h1jNM+cY6zSyU3AimsawmBOBpGHo9kM28j7Eat4sfvP26/24/5Of/HQVYb3dDIcj9Kjr48I3T8cnh9AsZ5Kzmf7we58N3fZx/VQ531ZBSlaA4ONY0qEfw1oXVUzDYXl987TepOPusxfX/8t//g/+5V/8d3/49g2zu/3ysw/v3w9ZcyrdmOfzxebpad7WacyihqjHfjCTzWbL0RGoc0jm1bTYdBpCsQTeE5acihZ0NG/aKttQSqqbJno3DP3Q7fq9ACKjVXVTNe18cVPyWIqWlEVyNohNA2pjvx3GzkCYmJkBAQlVRES1SEL00bu2btwC1OVxRHagJaUR1TnHMdYlF0NAJhWbza8ADdDV7ELQKfp1zOOkDScGJFTUlMdUjE2JkIgNJtX/RPeeUM5samimWtX1ip0gAxqamSGKFFUAMBe8r50L574BTwdNPY+WpzZEwKYM+xMMBDYV8+QnjUQy2QMB6qkfmljeOB1np6YIAQB0mnqa6tQoK04Wp1MIj9CUwvDcC5xb3wlOUiQDUDNFED0zhs4t0XP7c+q7zlZHp5iWqac/j9vt3HmBAZ0kP2fPjjNV46zemLRyiNPx3Cgjf3PIu6St2dXMV46QSh4GQnYIqOqYJJUEpChq6puqaqvDlrSYFRlHTbkk7YHckGzIsry8+OmPfvDw9GHzeDdvF1/+6Efowv5wLGN//WKx3m6//vZbMUPg7//k568++SS4aJjJ9pDw9fc+e3nzT2EY9rvNw8PX2Q5VbCSNh93Rh1hVYTZv89g/fPNVjBVn+tuv/qsm/NNPP/386999Zdh9/uWPqbJ33/7653/0x9/87ncvX37y5u0fbl9df/vmvY0lIA3dcLm6IUJUyZoP27WN42HYaU6qcn37auyHNA7HlF5e32QTZnd78/r9/fsXL64rH8ehH8c+ePzm7j7nPAvzd+/eLy9WpReseEgHwLyYtZ988dLAlRHef/1VTts6hlxyyX697/tiJvC4vasXzfzm1cOH+83jg+ZSQaFkTw/v/vE/+6Pf/e4psN/udoHjfFGjbNrbZerT+nFnX3WzhV6tympWpdQHCKXr1oeSve0Ox5/+7E8w1rGK3kXv+WMTfqYJTbqzE5ln0ojZyYVrgnZOwjQ4QY34sQyfW/5nddiz1AfO6OUZNpoq0hCfKXjwfImJPXQSq+H55RGiqZx/OyIAmiKwoU0RS4YI6BFnaDVC5auGPYZYtXXsD/2YC05us2gAQpMNr7joWxNFEEQbxx6AVAtgQTAmyTmbmieMni5bupjH9Xa42xdTXkY/n0VCyzK2oaJSHjePfbeZx6ptZ2KlrueOmqfdUx52k2HI8rJZjMcXC79JtFhcxRCslHk7Z8fvH94+7g5zd3E7v33/+AHJLtr61cvb++2+63fzdEe4N9A0bOfNa4LY5REp1otPBFwmV9WNA6jMgeTc7ylhXc3Gw/Gp/K69vrj54vX2A/X3j8EFKaNZDvO6DP3h8WF1GznEdr7a747eG5ixrwxdvFiR927IZcjeNfXqEwkLEAZwwdVGjslLBldXYEgmKY9ANKWPsWMAmfrjjwF632EYPb//52/YqTqmJfIUbjU9jc6yHzhVEzyjO/idWjhjhh/LD05u6s/yMzgj+afrPAPypx9KEWIjdmAgpQDwRJQ2EzGZmFMAk58RmSmIGAlMMAbC5KZMk46SkNg55xMlUmEiEU0nx0cwlTNMRlJkHAZTD4GYFDgrqUgKpALiCQDsdOAnIjVApy4wewMv5IxI2Bk7FCES06KABGHaGZGo9mgGqjbk5IhHVYFkCoTsGUPg4NAROudccOS9BYfMkwF8TmNOQ38cUy4qE3uQQuWjZnQe2FtOLkQXA3uH6Aj9KVRe1UxVxABEZBI645RAD2BqokaUlRgBTLWUQohmogJaiiEQGLIzVAA9Acaqk1s1nrzS7LRvmRHTeelANECVk6caEpxi68dnjhsiA/FE6qUTyj0tCudAvQmLVCErZgqmWgRgknXRdHEDA1PIolBgUoJN7xQYwlQeE4RoJiCqomIARqQiDKRWCAxQ0BuTJ1YvcRxTSmPKBVUUBLQAWErJRNEEDZzzF/OmZp439bJ6/BDXD4du349dymKGRCLKzk33h4IFRw4n60UgInaeEA2BiA0AUNkxAjAKM9HZsesEFaFDRmcs0VcTM2u5fHnsO3bk2BuMoYpqOqaeGAW0beuiBVXbWXz9+Wff/9EPhn13f3dXx2qxug6+3T3uihmRlNKt10+bh8eUwYeqab3K6AKJwrtvPvTHPF9drC5vkmZJfYyRGZwj71xCcaFJ2cacVhc3PngiYOcNgF0c84ClkGupmFoysWH8UDRFdiBkho6Nmfbr9Yc3X/34T36xvL418MPQ7debPCYpqeSUhmEYjqqpXtZFD8MojO5cE2CiRGzEpgRIouLIS1EmYEdFBYjFAEFMhMjBiVIkhA6RwE3/RUAS1SLqPRkoIBKyqppK0VRFfzIZkFHLmMbBNBMxIYkCGgM4zQpaiH1Ow2mESdjnoVA4DN0iNjpILuPiuu5Kf3X76eXV6+3uULNaQXSeiEOoFMVw9JE9C+LYNM3ju4c5i0HZ3O+HihDckEuoKyaufSVjjqFS0KHrraqurm9368f9Zq9IVRUurxfdIW43w2revrq6+ea3X3/yk89rrA+H/vUnLz7cvfv808/efnM/pvL294/ff3EdQ02Gq3ouuSDQMA5I5MBVsULvmT07OnYbQQ8iq9VFI7OnzdZFsgyoqWlWT90ayZVSHDMTBWOQ4kiR0TtXxoIATHQSGKuZiTEoqqF57w0MFDg4MFNRxQkRRvb8zEatQvPp9168effV43qjJpefXP4n/8P/2Y9/8J9czb5HBXdvf/OHv/mLyxfXP/ij/3m9CMiDdION6qq25ARZwZKWLGgQGjAeE4TgEPJ+szbyBTz6uqpCzaV/fLt99/vNh6+Pu50Hf3H7ennz4urLnwBQscbapSCQ44BOJRGMRqrKbnaBzaUButsvwShzZULk0JEDyNPfjAimg4kAOHMVlUyEqGraAw6YEgKDZCgjgSCoqaAZ4MTSTFBG0JF9MEtajAElgUhCQDV06MrTNzIm/+KnEK5FFdHIeURUzROTC5iJnRWAKQNLExI7H0VEVcyK8wjaa+klazOfo2/AByBSQ5xAdzMro4yHkjoBR/VSx0FKGrpNPm673eP+zYeh27uq1rHfH/sqOl97K7p/fDocxnYxqyuvjl10SaRGYoTgcbBk6Nf9QJTnselVi3EVaiaRNLIhWfTmU1FixlAN/QZQkBCowJhkMPEx6YgxqOO2bdaH7cXiOh1TLwNzxUhmaSiHoXAIcylDUWt9MMGhO5KOZKyEFOrtuF7OPKsbSx/Ixdj6QksX9yXnLi/rphRipw4gp7UjZnYCBVNu3KqimeMOlepqCdRUddOtO9DNpvsWKjef19xzP0BOXNHs6qrhIMlyzof1Zm1IRN4xB3BjGXfd8cX8xmPUNs5bf/fm7fkgBSe2rRmhz5osK03xJadoJws+lpJOx3TRMWcpEmIwMMklhChT28A4HcEYmZhKyaISfM2EY+6LKTn27JBczgmJ2SE0dZ+G7XHftHUay3Z/DByvri83m81qsdxsNh/ePxy2B80izqXD6MDaYMH5xXxRymCax/3o0FbL2eN6e3d/fxiGKkYXqsViNgvBIUjKpYx1U2tWAhhzATAs3Zcvr/v9QUyPxwEEQ6jrpmqqyvuYxcZ9X8U4u2hDE/p0V4W6S1mLHrdrQSUHY78vSTdPT2zy/ZefcinvHzeAcHN92e0KGRxyPgwbiN4xjFnFbL68WtV+2O9KMSQL7PthyDawAXs2yUkQOdjYX7fVbvvwm8P63bu3l1evPn35+r/58/+2DF0kjozXi8XT47fqfIi+G4YCMnaDqZSSkITIjV2SUXACxydjAyITRTTnmNiLWFM3ZnY8bFRKUzVpP2zTPqchZ/HO+eB0kkV3h/3Th9T3zHW7uAyhWc3bnIqfXQTPrvepdGnoT8xNKsyOHIcQwJAdHvd756qqWoE6yZlQA3nJGUQHVR/qi+VFGrcl93ksSJTHFLw3k1Qy82TGQJMFqJZsJoFJDWrfHPo9TjXk0AwRgJki07QVmJTxuB+63rEPIdSztm1mx2PHAAywP3SKAoZq7rk7tel0C8iEpwP5xPlBLDqdRokRJ/dIItYTnKPnewgRTNQIgBH1nPRj5wOyqpz63inMDE+59UZgiFMECzMXVQNUpKySVQ1AFAB10q+dkR0DPDU/k9Hvd/qos8YNAMwYQc+4zzOVA09f20eqxrmBmx5Epy7+dPvDdGfTZIcESckRBR98ADUlRBYg7xYtJbXIFBnHrjuoFiAR6AdJOROASDHV+SI4F79+u315fXXz6afrt1+Xkl/eXL/65Mvt0zBfxeubS4T861//1Xr95Jxbza9ef/GDZlFvHj/cf/OmrT0WWV1e9b28+d1vSftFs7y6eRXbi1dXXygUhdwd94fDzqZaoghSlssF6uXD178PMd5evR7SeP/23fXNiybI3e++Ua78IuRRSgEf4n63W1yt1vcfrq5uS86AXqxoGQDz5v6urtvZbI6Odoe1y2XezNkHZljOLvb9wyfff00JttvtmPpZ3dZN+PD+w2y5SKOsLq5mbTtQD2ivr16IWQi+G46H4+PmcUOFJNlxf2zqcNkuHzc7Ta7xvl2Eu4c3sFjcXH0a3Fy7zTHsntb9uO9+/e//6vL61XI5L9jud7kkm81WdeNcJOnL3/z7XxHDVdMsVvPN5rjb3v3wk9tvn47UutV8NlsuSgE09j4Qnw194QQRnerzzOU51fGJxHFCFu1U4R9Lb3LCAoST/mcyEDrRi75DOJrEnvh8yRO37QyqnigheGYLnYsXp1Q1OGs2nzWV+NH76ATVgmMMM24uhs4DeQX0zZyZnGNDEjAffDYFZ4jFrMTgvQ8iyQiUUEGU+HjsyAUjJUdZLCl60FmgZc2HIf3hIe+Su23dq+tlXfkx9bGttJTNw0N/3Ef0bdUOKWOAhn3f7yz1UJScz0kG0fmyemH4raCKKDJ72nWHoe+6dKziLLj49vGrIacvv/c5i3775s3XD5uqDP/wZYWIvubmYiXqmKrF4rOsQ1XXBkFDyEkY1PnGETn3sox9GfvFbDZ2u/3798MqXby4Ji256/NRlGDumID2j9umWTRLpy7GWBUZzbBplkitq2cwRXobr1585tqriUHkkMUA2ZG5Ihi802KWihYhH4wYmU443rSCnFDGZxQHP1qdfwR2noPF1UzPqxjARMqE0+niTEF7frsR8Vk9/BE4/8hSOjlZ2bnA4BkKwHNxwxmSPG8GVqQ4YCJkJiQ0KSIiUpA8M5BN2iaa5EVkQEiGpKCA077EzpFzNMFBRGAmk6GNipwExABmaqpKQpNVrglBYDYQUKJK/WijRyAb2QojgikDkB+ZvfqIsXGRwMzACbCCkqmqFTVDU6TzvSsGgMCI7ByNZs47MyTHPvjKU2SM3oWqRnboCDy74NXUJJU0dtv90A99SjkXAAQgZk7icnIuRPVBfCg5Ra1Mgw+NIQF7Yja2Ke1NJ5MRAxFlZkQCRGACKcZE5shRKWkKeQADEyklEyHgFMZIk+m4mYLRNLc+odVMJ/uhadejyWXJAPS0MxPjqZwAbAqQRkDW6dGIk9n3aWGCc8i2oWkxNTRFVZOipqeJCp0AT0bIJZvpee46Uc0Y7IQ3nRq0k8BWpji7CTJDAANBUJFkkifTKucr9kbsXPA+paGnlJMUALAqVuM4jscRtJBlstJW9HIZW79cNbBYu6f94dt7OaaCgOhYzQhRTXmiBqvlJIXAOTYEIMpFkFlSYcaxWEpZYfL5Ot0Gz7bWxETj2CGRw0DqSkIwYBKTxIGJeDokECE4TpbAbDG//OInn89Xy4fHx+P2WMWZD/Nx1CRb78GTrtfr+3fvjn2PYj4EXyGz1BVng/cP6+5obfuins8LggOaLy+C9yWPy6YZyuiRHFDK48XlbTuvgCSEipCInJoSY4xt9AFMEUwdkLpZvVytLg/rD1KOHuDh6XH/9PBHf/pPqouLMZf+cE+ImrKmXsvROVU7Qr+WNAyipe90OBb2wbtxHJEcIAGq5HGypjIRBCbAkg7ORUciQIikUrznyTWAkeyMwjl2k5lZDFUaegY0FSScgtUgizEQCVJgYhVD0pITkiOicX8wVURlVFAoRa2osIIykJXUl5JDdey2DxfLL3Dshv5IDg/rJ2N/fHjbel1Uy367N8Dog3OQxgMRm5ihT9J328Pv7+9Vibzr+2TsVq5NkDuR9frYhMrsUNfVmNft8nJIXZbhYrVYXL9wXBctw+4RiEMVVI+haj77wRf/1b/888tt92d//MN/++f/5hiATAw11E4VF1x3+2y1ie2bOvbDdtFc+eXyOPYI1A/JMZSiqql2Tkvqu55sVByvV3FMQ58MwOU0Vs0sjwMA9jmHgAyEBilnNPORBCnnSUJKaGCTv+fJQEJNiiEJKKkxTbfGxJImU5Pz5rS+uytPD00L/+BPfvI/+l/9Lz770S8qqg53h/X7b3737/4DpP0//V//b6ura2VJw94OO2fkqiBp0HwoY3bOG0ZuaoeEzrVI0nXHg7j5pauXHEIgHB8+3P/uL5/u3h9323q2/OLnf1wvb2fXNyWPI9fMXuMSqxmXggYoI5dtHjq//AQ4CGLJRJDR1YoeyBELaEHnAByAQUkACMjgAgOQSRl3ctiBJRl657yasfeoBcCAvJmCZgUAUwKzktAySEYdLY+mRs47QM3lNDF3AZBl/2FIffPJn2JcKnkwBXKIzjSBjIQELqgREYn0WtTFOWhCLaSZmS0NpduiQNMuuGqUK4qhpIKeGYyg6NCNu0cUcb7GebTcD0M3rB/6/eNxs9k83IGiAozlmFJXocXouuG4e/yg5pqL62q5whBTMUeuCV4PXU7bJOCDA/Qh+CygUookBcdcnZqXYSBQ5cE0R9+KPZkd2OE8VsPhrqJG9jtbXJtmhSIle2pv6pdDJzb6Ks4k564fUB1Bw+QdeRNhyzXPhjGDD7tuPa8v6rpZH9dUXSoEjy6VXuKYci/iHDrRMYJTlVJSKYDNMmrOJSFK4zzLHIw8mwLV9S2LHfabsZe2vXi5/EH4MMu445LHfLy6/EE1u9a0Jyfv77+e1bOZt6Fbz65erndPayFkvHpxobtxMbv+8P5rXKxssLY92VojYfSxpFFFxpQMzBErqkhmioRc0kgIQIBqyN477vreh9qsqKkPcSL3EqKpFlUwKJKJPSjEENCKqBChRwegZgUUEcCkCHopdDwkUDaV9eNudtG+uL3IfXcxj3eP66+++vbq5c3q4rIM3W6/u75cltK9eHn1tD7sx2N/PAaQV1cr9vjv/vpXQ1ITXl1ffnb7gtGO43GzWZdSKuetSGOYcwJUAdRMMnT1vLlcXuz6MhazPDrnm6aeLVdlLOvtcTabxapGT2ORm5sXu0Nv5HzlbX8Modrt9+v9U1svDPSHP/xeHvZ9tzOwyle77WGzP267w9NuN6gowHjoHbNH1x3WFy19/tkLBvzq3bu6nd9cr9b7vUdKYmno+36Yzecia0QLoSqDPd4/wdu3n7y4/fH3bh7udm/ut+/2/c+///MQHjaHY1ENwVXOa8opSTbpD513aCqxiiI5TYOm0xQNkZ2RQ2ImOI4HMnWIs/kSHK/v7ky0atp27hTMsRu7QQBJAJnrZZ26tDs8qtzPVyst0B3HuiEK7mJ2k/KgRY7dMeUBFErOCUd2IZJv2pkW64/7djHz7IeUYmy9a3PqUUp/OOY8MhkxHvb72WzO3uWcDEENsWRVyaU453IeEIAdgRkR7fsDTMiOqZmJIhGpFEB0jsGBqZWUkTIQg+lxdxTR6CuxIghVVedSLA+azr3BSWtmapOCFicLUFMDQmdGhgDKSICntDWeINYpeWTClAD47LuBYIQ02R5NbAoknALFZGoaABRRCbOaAkz4lImIWRZFRAVjANXnYF5AfO6lvqOrmNyIEE70JnhupfHc3X+06sBnU5BnaGjSvz0reKZu7vywySAUkMyMDR0aBeeda6NfzJum0nEYyVXDKGNKDcDSuVkIfb8J5JypRzyOA5TkgKyIBzCkMdPj/rC4ub68uXzz5is5Hq+vr0Fos969+OSLtlnevfuwXt93fTe/uPrZL/7xcjH/j//+P/7ur99qf6grr1YRiJTa6+zq+rJtKyly7LqHx3ff/P63alQHd/vixfzimgG6/Wa93Timfbf99Msv3/zm7f03vw5x+f2/92fv3jy8f//05Y9/8g/+0/Iv/m//1z/5/LPLi6vDdl/FerO5X17OTMT7ZX94WzVtv+1k7NBkv3n67Ac/cHU15sE5P4+X94e1Sqnqtu9757Dy1dP2aUwdMY5pfHf/NlarxcVylHE1b3cPG2Sr65Bzfzj0/aEL4JH9olk50D2KSnU4dul4zKOuN/vVsr69Wr58cdkdUu6247GzQrevv1D3MB66sR/S9mkkufnkdj6bf/tXv1UiwRLn4Wf/4BfXV6s3b9795v3DGNAW8xfLCOQuw7K9WAULHx7f/fhHv/AusHPIHxv4c8s+KXPOvKAz++zkC3PGGs+leKag4TN2dEZ4ni2sz5ipAdAZgdLv8EJO9LcTqnTmIcGzQ83pAgY4AcN2Dl8jxFOWOpyT+xCViFytvAjV0jmfx2zoQ6hJdT8UQi8FmzoOKTM6KcU1NVKU3EfflGHHaJ65yym4QFAyFEUDcjMnLxq3rNyvP2yHEgPzsvHR0dh3PgQMst483t3fz6p2ebFCVGKrYrN9fNgcdoGqpnrRpw7JUiqW+FVdXfS7TBIJK49mdn/IDLTfPI60rZuwikumeLfb/O5powa/WIVo3bYbL5tGCmBwWsWxqlSrQ79HyVwVMMslD8d91baOqmq2LD6Ap7rx5eGx26xLzheXq2PlauTN0647DnXkUmT/+KFpop9VSpUdkTAUNV81cbGCLJKLIS9ffIa+hqSRXErCviGuLAkFD8gExUpW1RAbYjepjACe2WDPnJ8za+0jB43O9kTPsPd32GtTyMAzGRLPTnWTTZY9/6aPRDY8iZLOXLYJVpgyJ+07S+e5xr7DfgMAUBUEPnW/hghKBDZ5S0+vf8L64aMIzjQb8km6BOCcUyshBBFLWQC981XOhVCZmKCoQS6Z6GSwVQAYNBk4o85UiTPDYNaYRcCKJxTKaArpMaARvCu+YtPO1LhkXxV2EbSQgml2pmpqYDQJwgHBip5uFIo+FGJHrBR8iHXTxKphHw2JnGNHBBlTAknjYei6/njMOYuAysnxHgwNVCVnQhQAAiAwyUigqupDNFPggETIns14atFUVexMMEQzYA6qRZSYkImEWEoxFQTTokKFiVB1wmdsmouoghmCIp4kZzCVhwkYgSpOiMG0IKgBFECHKudJCQGd7asRwYCIPzLOnpPOTE9DGRETMVUiMgZEAqBJgyaSUVWtIEzwIE2snYmYb4aTOw0ZqAmCOgIQA1MElSKmqqY40dOYJw9vEXHeO+e9986HlNPY9TkPaFD5yA0ed/t+LDlntgw5B8JZ4Nt5ZZLuEHtAZpdKnv4QUVVCNGMkUWDCyU9qesE5FzA5AJSizjsDEgX477GKVBQNYojsY+61DKNvfVzWkg42SioFg0eUWPFy2ajL81l7sVo0s8YE7r79IH1q2zl6byBDSYRhuz2M+/UwHPr+qAW887EKbVtp0uMou+NBBFfXLy9Xt2MeCcyFgAhMXDdtyiMiLJrWDNOwD3WL3sRAyYthzsU7HzwzmSPIKSUZFpdXBD6PebO9k/EQycDpYf/uH/6n/3yzPa53jzrkqiIQIShMpev3x2E39MfDdq9DEnQMoBnyMEATkRgQlE4gpeSCyEQoqZiY917FSimAyOgRnagSs5ogIGMkdAqExo49h2joVACIcx5jFcF4TKUUCcQTWsrEHDz4gN5rMTQjFy2XnA+gRlpUMiogEXMAMLREqKDQuro7Pkg3EMJ2c6hnuLxaaRIcctevF9e3hu6w2YJQ7gcMZCZf/fZ9Tmnoh8XlpYDtH3br3bYK1Xa/x+DCvFEOu74voklGHwgdVlVLZP3hQL6q5nMX4+Yetw937NQ5n/q0fLF6+b3X3354/+X3X19eXRBR086++vrbfITVxaWvXLYSCMdcbl68GPpShEIdQDvJ2Xv23jHWx8M+DYN37IilqBiEhurKl3EoWfOYQMgzMSoQixQBBCNDFDCHjEyoJGKm6thNNK3n+QQhKxjjxAOECbtHoulutHNA8udffvpP//QfffZHfzp79Zkdu//vn//e5cPx7Ru18pOf/OLLP/vHRlXRbN3RiUip1CyVAkYK3s1XxjGN4qMnS/mw3R42/SjcvKpCFZDH9cP7d79++Opv908fXn72/T/6Z/+M5ysf54f9sLl/53wVq5mF1rfXp2k1jnn/gX1wy88kLiULe1LNmpXAg04uowo6CRfMVC0VjpEclW6raVP6jeUMgBRbN58TOUMCTZA6IlIj0ALsyTkRUfQQCcBMRpMBcU1lIBs0jyCAGExJ2VQtVCGn3fHrf12/+jEuvhBjsmlBnHZwAzMF8LGWbkQyYp/zmMdjdMZI47g1FRdqDL4gAnNJwuTByFIn49FyhypEFRjl/thtH9Zvv93ev3FB1vcPYzcScep2xEDAgtYN47E/WMlx3ri2GbM5LXU7wyJSeoeWRQ24zyVpZ7FijgoAkoa8rwxyLqlANgaA1oeCeZRtzjlBGvpcBxj6Eipom1UBGqQHyI5d16+Rg+RNhCiDd86Ti7mIc2HIBB4Btbd+HCRQg+ba9iWoDr1oIvYtcf2wfTeFgY9lPIxdrGZxNefU9YfHLMqRDGa9lb70o+YWo3eQ81FLZmuZG+/6KqXD8Q5IyNmsmaVSgPtq1hJAyYd+3KYh+9nSgMoepHNDtaeIw7EQuE3fzZzfDpswc7u0Dv42VM/+FKhF4PnkhUQuigyIJCpITM6lnEPwVVUNKfcpkaehdIEdAk9GU5P6SEFVhdhN7v3MbKbFtJRC5AEJwIpmh54dSxYwG0vfp8Ku6rujr8LF1UW3319fLh/vHw6Hw09//r1d0sf7PRy7qq5mq9XTk37z7l5HjcG3i2Y2m2/X69/85vfH/fjp60/LmPMw/OUv/7JImjVV5bz3nkE5oFmezn3MFFxIQ95v9lU9Zx+vLhaaj5rH47AfyuAlYEZLeNjmVz+8pCGTp6TZgS95kJJr75dNFcNNqJr9uuvWaxmHponcNuxjN/bDIJ7c1cWqL4+iEJsqMDpw0RMYOQ4I9vL6ejek3oxjEwydtzpwyhnB+iGJaKwsxnh5sXh4fHrzzVskF6uaWPpu/zd/+0sfPaEyQSk5l6Iq5NmrB6tER0MUUe8iGqac2bEWIcLAPIWXs/dVjBfLq8rTu3dv98eunVVN3ZRiWiQGpwIx1IYWQsiiw5CrWBfNrvF1rBOVhsiBjqMO3ZgKulC3F/MFqaSh746IpoDjmEUQlDiEcezMEQLmvlenQIzk2sUKEKRkdP7lJ1eb3QMQjCaEzrlQceyGnfduEp/IRP8XBRKzYhO1QQAAmLxjPjEfVEuWEEOIvoiqFgiBgIakAOYI94en+eyiclEAcxk+Ng1qCOaQAEDUGADBmBH1BIOqmYKK6qmnOQ+lmdAR6cljE2EyD0KU0yekAKLTUANFTc5Ns9nJ51oNxCZN2ynnF8wYTpawk1yHThwfeJYCfezX7dSv4LnJxmeRx0dThxPShGffomd5xrnRPnf8p3+nz5RgCp1gQvKgRDiP7rIO7NyYOnb+YrZ4elo75CjgGIOT5e0qeM9cpZK9pCCExMrETMdkH7aja+phkOHdHcDw6RcvDse0XCx//NM/fnv/7d3Tm/3+0DaXL7/49OrV7dd/8+t/86/+nG0IBPV1M2tbGeTm9WftxeXl5U3Jab/Zx7q6fnU7n39//fSEQP1+83D/4Zt3XwVyV5fXwTlTzUNeP6xvPlkxkaTyze9/fXV12426vr9fvvzk8+//5De//dXr1y+//Wr76vrlb/7wl19+/4chVI9Pb4l6SV0IlMdhNpv52NbVfDgMFPn6k0+//v1Xjqe/OmAV2rYtY+76Q9v4m9ubX/7lr1bzi6KOiGoXnx6exmOXhsFURbJKqdu64ma724rqcewQoZ3NshoVBUiHYXza7Tbd/pPrJYfomV6/fPHm/vHpcPBVWN8/rZomD33ZuZ19qC6vbq8vqKTD/uk3f/OHt/fvf/JHf7x6+VmXXCJvps63JRUymlW3h+PTrJ3VTQtIalNi83PrPM3AEQyI6UwqOul9Ts3/id2Gz7SNCXrEkxvRZDf7DDmeyXB/x4hmatqmOtXv8Nqef/PH2j499Qw8nKglz+V6rtTzi5n+FgKMyDOVEKp5kR3HCugUIYiUNZeAsQ7+mHIMtUjRXEDEVTTkXmA0wqzHKsxRaRyyFmLVRcszh/e7cZ85VG7RhMuLeky5jdGzWz88rddbAmxnraEeD4fZYnbY9NvtqFQpV4euO/RP0TUVh+L5Mrof8fyvPnRffXhSSIs2LmbN5WpmunRNVKaHh/6Xf/h6v99j9KvIF1YaTfPZEpUAfTVrO3MPhyG6yF2OWioy51AsFRnLvut2dHA+NouxoxjCxetPD9u71Pe79+9p1rarWU5Jx+Px2LeLRXc47NePF6uLwkH7zntfenFNjYCkmtI4v/nEz64KMiOBqEp2MRYV8k4cTnZxpQiSd1VjzOeIMzvLWu0sf4Uzn9HOGOR3yuL5y4+PONXKd9AcPJWFwXefeba8wvOzzxV5gvynGsaPWBQ+E5TOS+i5Nk2FnTcwAQUTAiZyxtOfYWpGAI6cITD7oiKaFYS8RyNiBkQWMS7E4lxMvqAWZmZiI2CUbAUAshZQ84hgKKKKZmYlSyIaHXaImahBSloqIBREMBAFE4clswQZLXs1daWYqnICojKJmglKUQVUAAVBdoYGDIjkyBt7IkIfzFWuan2syQcBBISCQqU41jKOQz8cDkPf5yzg2JMPnh0ysWMinFRg5FgRi5qVYogmZgoli4sRuTgfcWrjCAHIVFAVRFUMECbUCchPYhrznotoySWNkpOZSSnCDCjs4snsHCeaL56KihCRzM7Q9ukBikCTGOekYTQFh6co08mr/4xIn+FLmrZeID457oMSqkJBUENDMphSIc+aNQBBR0DE5gGA2CEhAE+L5YmodiYcO+/ISBkdowmLZAEoRdHQDH3winzSadKUOM3EzsWm0iJ1pSVLSiWXMRUi3G90GPMwaBlLlvHY69fv9/f7rj+99+CcM0NTYQDAKSnLEMAhAwCTQyBTmdy2EYmZY4zOe2RSPQV9nKCipg6liIiQgQ9h3ix63C4uA5rHzEigZkzOO2ha4Do0gRzl4+Pd4R48htmsZZFh864EJ7mksaQxYR6SDmNOgV3wziOWMZWCQxKE5mIx99VMpUTiKkaOLlYuMqeuIxIKnMvY9yN6bNrgArOP3pEJaM4UguQcXEhjz2w1VdiPh+2GQ5VKt5jPQrBf/uW/+uFnP9w+boecmJ13GKH03b47HMfucNxsUj7mklEtcBhHI0/sqlKGPIw+Rl/FY06mCsnIyMrkd0Bmk0vBZHPpUF2BBFOgPXpVRAhmikg43UMuELipJXEcGQismE5meh5QbRJdGgoQucikhI4CArs89loECAkV2auoI8pSVDOxM8FiKeXBh5COyUci9MMx+4r7Udvr1WhcVctmQZa7hMeHN2+eHu72O90+jaLF1eu69peXF7erpaVy2BwEIQwjMMUQyLkCYwhtOu4xl1CZklMcQ92o86GeXVyU/W53eT17/83bWF8GoO7D9hvFahaLprHPzvlM0g39mJNqWcxXHPzd050KHI5PmeaXL2/LcCBz4ziApRCdocVIJeF6u0PDkYZh7KvQMkIZs6kDxJITIBmCFAV2pRgSTQt8CLGMxUBFEWRyHxNEY+9U1ZABUCSRrwhRpCCwABhOByAAgP/j/+n/rF386v7uX/xf/h/5w92r6xe3FzPfvvzFf/Y/XSyuRMdh+wTjY3TeNZXQUVJGDOQaP18IRvLUtMXyIF0ac3GzFxevFrNmUfYf3vzVX6y/+QpSf/XZ9372p/8kHfrDZj++uycrzWzZLm/D7Sta3RQKpgWt13JgL6660XCtPqoqOkDMLL2RMlKWAuyIvRErTCNnZUeW7iUdtR9FAXnBixaIERySmY6WBwXkegFWQAqSMxMAJHZ6ms8wsAOYY1ySFklHG/dQBtSExgZIjEbG6FVK2d6F5qVShaamRqZaMqpC5U0yhOgQRJPlHYgEylCGlAZNEuLCfIRQA5DlgRGdJcuStmtQcLGi2DDguP1wfHh/WN9tP7wbkxyPw5i9j6t+tzHjykdETWN/2CfItliumotlNQvjIQFI2vbgcUg9VbPFYmYigKUvWRwLYgIgjNGBWBnScd8PwOSCzxxy6QuMg46DHthdjIWEIVur4IZun3Nezm6B3F3/NKurVfMiHcTFlY/VmPbk8rE/gg+GCRiEDRGRg0FtPnLp076f2admqFmvwg8zpO7wSN4Yghaz7jB0j1rSor1xs5fDMDggsTmgFOmLChKLxNn8xf2uu5xxdqNbkNK+K5L6Qi5XHuoY0jASYsLy5t0fLl9eu7hsZheFZSiP4MoAxyhscOmXi6ft06JtK+8Pu0fvTzYt3lEuqlpi8KY2lpJlKCXVoWHELCKA3gcwyEUAFMSIvSNg9gSYxx7Zi5XgPTmHgoikRFIECcn7UhTBEVDWQsBkHtAAKcQ62+iD80RPm/U4lIvLpXOwur7suv7tu8cQgo5FVBYXra/C09PT/utvDPHl9a33fjFzH968/ebb3/32m3cXq8sXl83Dw30e+tr7JngA9oAoVlUcnTrnRTJFp0D9OPQlqdLNZWNWjvlIqKnYcnlV0iAiKR1i07SLcEhpuWiT9kN/fPXycjQ9rvvZ7ErzmMYeAI6HzkDQLDgE0OGwU4chzK8W3gRCaBonfZb1+03l8GLZXlws6pa1S+l4rAN98uLq3dPjh26fjLJA8NTEWkAFrFgWs74fUiqBKeecUxmHgaKviA77+88vPxtLvdmPYHnIo2khJg6eA3ed2TgWVQItRUOMBgAOfIjex5KLr6oYmizl7und7rBtXLO6uEAyMQNRR+R8I6Ylj94HYleGfR2CD4ENiV3fH5hd3dQEsFhdDcduSFJKklFEMxEulpeKxlilcnTMaSxpKEw4lBIqx45KHooqAld17dghQH8cpKiq9zEu68V2/VR0HEScc2qSc0ZAds5MiZ13UV0BQOcCmIiIAJiUIgVUJ4L5mBMYVKEW0XEYAJzIyG2J7ezF7SeH/X5IfR2rqj4BpiaKBgg6jaTZAAFRVREZUEymCBsgJOTJUmhSjYlaVhFD0VO4/eR2KQJAIApGJtN8W+xEvrOTUS+dOiAknEgWJ1kCTvk4kyTn5KB08k9gmiDdUyd9toXF53/PXfszX+OsVzthQHZif3zUfTw3as9mMqeAtumIz0RE6Jhj4Mo7dG4xbxdV9IEIiBDG3C2WoYrzUrKYMFEZbUxW9FB0rBzevl4q8rf3u8fNcMyWgQ673lmPFblgT49rii0wv7n7oORff//HfSe3V9eP9+9++ef/r2H/0DiqqtpzbNvZ7SevXnz26eGYyUyzhRBXX6yykhnuj0Ns5455sVjVTaVSvvntrwnK1e2tYFjfP63f3g2rcH17Wc3q/fqd99TO2263aZqXP/7RH929+ZclCRCrYSh+e/ceovvtH37993/xT3b3b42QHKpoKWkxXz3dfXC1n7WO0S6vrqoqmkE9q5H57u07h+Ss+fbru2WzXM7mGCjn4eHth+5wrOoKg5eUTSFWTYjVfnd0wc1mjdl8e7+WkpaLWRoyo8zri6fH7WbbfcgHNFBIN7ev5qtayHWP9z/+/k2/fry5mpG3Yzp6mMUKZJfmq+oFXDxthj//F//NFz/8weLqer66ZIcwJG5YHa/3D6bp8y//2DlGz2Y8xVpPNYBAp4k7nFVogKigqnima/wdX6uT8+NUfM+d/wnbsb9DDDopMk/AzxmWgpNS49y1nS2QTq/nhP+cen843ThoZnTKW4MzV0lPEOgEIiP7Zjn6Nlbt/umB2DeL2fq4bReNJmurloCIGSMLKyOVYXBVhRZAhZBNhMDIEWbWAgJmKBcXrZjePfWR/O0CCXNKVs/mdV2tt+vt5lC6slrOPEgqY9uGx/vN02ZUg1DVJEzUxVB5rpiJOHVpqIpvA2Qo26R3adS77qLd1SFwFfcp9cdxKODZWrC2G1dtCAKacdN35NHsgDRcL1cmB4Cu77v9TghJioEWHzm0s5KHSNn5JvXHYUB2dP36k83dU86mdqznHqtmtymiJcSw2T7M+uuw+qyql4bsYoXkADRE1+d0+fIHrp6LECOSjoZGPrKPIsbIk++G9ANRYB+NHQBNy8gzxPd3cKBJ9usHXy4AAQAASURBVPUdMRmekEg8lyFNK9EzlHNatMzO0rLvEILgrGM7AQAn6zU4gQh2JmKaAeFJVIsnHItgCjP4TtWd6pXJASIjGyiYIBgRk5FYYWYib1aIHBmACU2KNGZRQwB2XnImQHbEjkqaTJStqCCCO4dxAZioIQjTSZNsk5JcoRAhq5jOHCEwm1MRVOBpf1KFnM1EEKogIqOPNTKrsiIKIQCLTRRZRETyzsXafASugJ0BkQvoG/KVc4Eds0MiBTmmlPuSh34cjuNQxCi4EENofIzeMzkmQueYpozO08Zx8qcyE9WCoJKAWETVaYFQATpgQsesAKKUi5203dNa4ydAWTEp+bPgsYCZqYAhSJ52xsniD0HRDO2U7HXCoUw+rhUmJz4wArCAAZqAISKfnIZOqtXnFQwBGYgBEQhQBU1NC2oxUERECkBsOM2T1MCQyIHBNAElImJDmsCpkx266CRrUxE1JQNDduRMVIUFszo/6eKQI9HEWbPJFAlUDFBNzByRQ8nGHVYlj8Pg6t7bE+v9Y3k6yN26fzwc3+7z9iijgCKqFO/YEYoBTlo5UAV0zM/EuySJ0BFRkeKc80QIyDSFyOHfgYpyLojoHDMpQio6tq2W7rGdeyILVUDQMpZY+cAQ2cswduPgyDMwOzAr/XFdJCE7MpVcfPBJ0zCMhr6azWbzxZgzIhrSbDkLVds285wzIdZVs1q2YVE9bd8ZQqygCBtRt+tEsIlViMFUmCMgKSr5SIjeoWNCktmsLmM57h5ny1tiR72w6Le/+9sXi9ft/HIc957MM5U+pzyksc/DCFqsFFDU0cqYCEoW1S5bKbkoIkgo4ziya7vDiAWIBBFKGRxHVbM8AhBzNAMwYucmihYhATMyknPkg/MRiSdBNU7cOKJiAnkwLcwesDCxd46ZER0gM7Miaim5pODIORKz4MOQM2PUnJJm8pwGZSDpei2JnRXVrusZQ2GUpPXlsk/5op2H2TWIZy3v336z3z4gSvDV7cvVzY2/f3xXJFWRj4fdhAwHtmyQhh4Qh90xlfHmZuFIlUeN4l0dohMDFNnf3znyN7evP7z9Cpxc3qz22+4HP7m9+/BmGNLxkIvpOKQXLy9StzkeusXV5bHjzT4t5vXQDXWoFk3bbfab6DzbclY570vq1UMqaShGhE3bmmpRqaqKiZwnYM4KRFiEJ5krMhgCEwHQ2I+OGNgq56RInwrA5JR2gnNFFQgMjZBAT2cfQkQEVX2mW//+P/zVf/jX/92uDNc3nyx+9Pkc5m0z+8Hf++lidkVjl49ry0/zlo6bx+Pm0CxuYzUXCKmwukrVORlSetjffRjEcbhY1JcOdf37//jmL/8VWvnBH/0kzlcZqrzrZMo/my1cDHF54S4+LdQqMTMwFkkF/RxCLVYre8PCpKhZuw4BgFlzImbVbKggypgx78r+ASxrGikuqJqxn6uhaUEoJqOWwkymOhFFCRQkaylTQDZIQhNmb+RUzbgBRCPGOCMOaNlyZyVL3yGSjArkYlWlYZu3b2j+2gCtKGABzYAIpshUxs4kS0mqU8pSknEnaSAXpCQKjZqAZk8GaVCZVMeKXJtrrO+7w2PZP1jpTI8+SJ/TMKYxl1S2Ohyck1Q6MO2HQSG0zTLEpj/mw/a95sShcrH29ayO81G5ZERgkQwKGYXbeNxu5zMW1TEPdfS7w1OWRoEehi3BGIPTgpEuCGdjWoMKW5IhRWo8oY3FUKMLKiVrqUNb9DCklHKvNqBO8vAC6ubVjAp41aGs09glsOjmVnpHPtTerKRhizAAgJQ8r1e1c8A3IsiuIoYBcxp3VTPrB8eOVLOzZta+KImvm3YRuwM5ia4kBUkaYbma79bv86BoDZB04zrbtu/ZBlwubisNWVwI7Re3F4enrYwBRh5LOiZNvUaapVye9wI1cOwAgAijd4jERGYqhmZYhaofeufYwExkFmfb49b5qCWJAfuJ+Yhqhiqq5onMLLioWrpxgJO7C5jKZAbHZFmKEpoZkzk0z9xeNBSsrWPqhjdfvVs0q+VV8/Dhze+/fVxdrrwqSFrMmlDVY0q7ff/h/bYcd3l7/ORyCQTj/tgERF8RIBCQQXBE3hfL6XCMYRZCYKQYKjI49F2WdOx0VtWvV6thKKu2ORw2Xbf3TKvl/DCMJe1nVTtud+WY5ZgdUzur/epyXl1/9ZtfgokjSykjIDrnjNqqYQ99ORiO1xfN2Hfj8bAk/+Jy8bqpYqAQYyql4qqwgmvMhMC/un59c/XJ/vD4/u5xVOn6fiy5iYGYkAhVShpjFesm9n0GorvH7Ww2W8yXx31X03JbhhCjelVyKqJmjn0dWwuVqXT93kc/pqEKNSCgQsmZia2U9eGDmhDxZTv37M1k7MQ5Dp6Jqe+PqoakwzD6EKpQDUOX0ohu8suGxeLi2O2HNGrN5OtF7dQyFjnsdwqahr5IQSfeEZhv6rqqSEXUcklDgbGqawaa2kCRTGiLxptCFWNK3TAOEQEITSHnDGAlJUBwzKWoogwl2ykN7TS8ViQpBRlDcFpkOpFOFqOIzjkAAxPpu/049pc3r1eLm932g5YidDoROQJCVEAVZaJJgKZMhDSqThCRIWaxCWRPefJLUDWdWg2bWBLncalOvgeMiOBs0khPHZGdu9zTsPyUqIJAp5G1ApxdKGEaUp2aoBND44wOTQSMiVAEZ94QPE/V8dw4/R2BxWnCj8+PwjNTY7K9RARAZmIiJiZE732IPvgQYghErqmqqkJVQ51VrUh/c/lJt98goXfcKzjEnPuxG4OH6+VqtZh9/e7D+013GEo2LKYiUnsKjpoqAujQKxEPvHzx4sdXF4uH999GSL//1b9dP36omOYvrvtOvvf979+8eNG2s2KARJdXq/miVpXUHcBURs1lLIeS8+i9I6b9I7Lz8+XiuH149/VXN68+u3pxs70jyfnd79/+0Z99OvTDfr12jk2p3w/zi+sXL14/3N3NZ/PDcc2O7t6/u/nep5eL5eG4Qxe3T08lqw+0WC0r7/eb9ReX3xu3+8AcYwTirADGT/cPWkqYNfs8cqiXixbTcP/ufX/YpDHVTZ1LEbWqaQCYkEopl5dLonLYr010Fn1hKpKOaVdScuTauoqh6YfRVIPxu99/FZtqcbP0onnoZq7pDnlxzaoDItSz+fzq5m//8i/bqi6tYjd8/Te/js3bP/vn/9nV1QussqbUXs5XwMHVrmqMANUc83OLfG6TDc/1CdPy/ZHdgeeKwnODfia3PWOgz/SeZ1rJmYv03Nbjx6o0OPf9J0EJnLHQ77wkPLf60zf0LLY8F/xJYntCASbAgR3G2tUL6kI1n6VhBHRIWM+q1BcOnms/plGRKAQGKZqATLVoKUhOIYc6KIEqdqOZ2dUiLpr41d2D88Hj1G1rjN6c23TD7jikQWsf0jjUDRHBtw+bw67k7KLnhiiPu1BDXS2zYoGxqt2M4CBakcwibTq1wMXR+y7BfkTcI2Ht8XpZ8zg04/Dz21mw/NDbQy9FoehYvf/QcKqbrRpJTkSUREApCxAjwuhcfzHj49Dd3lw0y0vBujsMH4a7drZq4rx7eqd2ZE9N26aUmJxk3H24v158XtULFUmi89kMDVXRh8Xs8lVGFiPPZIOqaGyDEdk0EzIEUwEN9dKY7cyInECe85v4HUxnKpiP1QPfKZ1nc6tn6e2ZGanPyPYz/HT+6UezNfzOV+eyeXb0P0t4DWHSM00vyT5eBQCA3USYEETSE/hoznkzRQAQAgQxccwqBU4xl0DTgIFOSAR776soILE4zVxGOkmazaahw3RJnbIr1QgBiMDAERGpoWbT0dSVgpad5sBIBIEcT6HnYGKWi4D2MUQDBnLg8MQNITAiYiYOFCoOjkMtPliokRzYBH84ZFIwQzFCAE3DsT/0aSilFAN0seFQh9CE0Hj2ofIuusnpgwCccyflFwLYJLcyRNITmCOmpqU4M7bAxIAO2E1AHhaRnI0MphgHYAQAYiuZTSYXaNOiokpwBoXRVE4TElMkBsLJShwITU8bpk3pECpgqKcdDUAFECcDNIBTmvmz3hHxeW4zgZFyApuey4imhEcDVDMjZgOYYriJTjF0agCTpNDMVJQJTEENEehkIWUGhoyO4mRlM0GHkx+/mQIqI4LqBDoBkIgyezZGEhnFB+eUao4OGrZc8vBuLU/d8alPu1TCFO2ECAZlctBGJCKTgohMDKY6UbBOmbFQeUegpqJ64qAVOf3Jp4MROYohxIDEkEvvGeuavZ/72jlGHQdiaxdxzFkT7LsjGpOpcyyaQ3DHgZjQB2eAUkqsQu7TOEw9W8NV0xmGZhmaWd1USEyOTbViWl0tmjpWDvd5/cmnV4HdN3/zdTqmbAzCjni1WAKSDyFMZBAzR85xBFI11WIy6PFwYOek9N1x8M6lIXXrx1d/7+8fD/uS9sx13u+GY3c4HFDEk3VddxyGnMY8pNIPRUbyHjSrZGTvqmpMfSiTeMxGUZ387cGDGhqpoHPeChhkcwWR0JSI1YqiBR+J2YfITMTeVxXmUy2TZ8nFOQ888eIzYiD0ZsKM7H03bhxhJ5JUfPD1fNUfntSyIxYpABDDYijiw2UqAHkDolISsDoMQ1/I1T5WYjiPvHv7B+Q3TKHb7rIqusDBu5w268ckKE5ddIjm2R37EaXUzkcXgvfHvsfCNbXbxz4nqerKX9djfyxSiNihaNakw8PGfvQnf/T73/12WbcPw3p1OTPG3bZftvVhm3bbsQ3dvKmD0zpyTqBJt+v+xe187A8QfNOGMiRwfttvfB2qZuZC1LEDlXwcVEoZFR35pjKzNBYkx2YmambsvRgkGZn8tCh74pxl6Ls6zJkcYTkz/sAAVJXY0xQLD0yGYsLIaoZqiChyotj9i//6X3TvHl5+9oPZ4saHeXfIf/xn/+iTz763ffs3uze/ub26OOwf9m8f56vLZvUp86wIg6+CJ2IF2JfdhzGpX302v7iN7Mvmzdtf/rf3v/nVFz//xcX3fobODYcDxbppLzUP1WwuakBBfSXNlSTFsXNVKGnkeqWuFiYFAlOWAVVJsmJRjOwaAyXnMA+QeyidpiNIctU8DQCLClyNpqoZco+a8QTBROTAlTOZZBMjcCCOU6ThxKWc9maCBNJNTq2gYCrIaC4Ce6YKlSz1oElSxpzL47cBgGafiCh5BBQdejAgjppHyD2rMGPudgSDjgciVJ0AVdPjo5VRnbdBVMi4Yr8oano4lMMTytBvdw/v3u4Od8fjfiwg0DQXK9s/HQ6qBofDXiXXYcl+johPu51jaOatq2YYq2p1ka2UnMCFtrrph31Vsfd+m8s4yMurl+vN19nKcDguL1/MYGYwY3BjWnOMaX90hkAcOOVRoqsZcDh2bdvGUtdhnrUMJgJ5vb9b1TeAYhkVvco4ay8d4dhviWop9VgoGwGAWnbeD7pL+Vjzha9m/XEr0ofgurSufF2BaV+tLr88ipCtD7t7RKk8qHQVISMWwwAGqcx4cVnf5PJm2HSDd4w8C65oll7Ya3/YLtoVVoU0URjBjoGXufRVM1PJXdpydmDx85sflu6hmbVJtv3QFc/N4uLjUZyIECQX551np1LAlF3IY1KDouCCx2kmY3nfH5AdKjJT0YKGRDTRbFMeUIHJVy6aYT8mJHQhGAqA1RyyFEAyI8aQxgMjeLT+cPARivUvlquk5Zs3H2btQop88+b9cBheLG8lG3NZXV+mlLrjcb5cHved9sWKY2ZHKEC+DiYGIDYdnooUA8eOAaOrnXfkOOXiuJhIE2LUaEX2u57JsQlliAAaHIGm/YYMhsdxsboaH9NhL7PVDYM2kR8O63dvP5gdYsDASAaxCpvNoTcExXngvu8S2uXl9Ww2X9YNhziIhfhiPAyOMCZrqplvSUQBsT/sx+EYWdvLVRvCtuvuHrcyJukHQdiXtJgtzGwcx8VykfJ+PpsTuXfv7i9fzEDkMOwcU04SQ53GHpEC89j3ZugoiqjCoUhhcv0weO/YsZRCjMPQi6rz0YfYRC+iBhAqZ4DHYXRT4qcVIo7BF6MhZRVz3nkfGMOQYHsYGHg+u9mNfc65ic6hiYivo3Fo9KIbNsPQFaVRs/cVAsWqqlydiJC5qEJJhNyNfXQu6ziYkItjSk0TOBCajankMYEJe3bOTX6QCABARbKZEZNYKdlwGnIiSJE8HeNMp3CUMffe11U1y6l3M29mXTc8PL6ZzVYvbm+Goev78XQLBKdCBmDmkqmYZcNJtKCGBqZFdSIdqeB51o3TcBINbXLfhImCLGLPqhs421ozngCeyRNYzQBMFIgQCeU7zc2ZZ3Fi1k9s/amnevbfOBE2zm3yMy/jYx/1bP773KedWzY8S4hO5n7n5xIiIhGRcxy8cz6EGGJVxSqGEGPwjETeEUAEZUmbw0Y0A2ZJ1h2f5tVFaGaP+3tOeVb716+uD4f9r75687TuptG0FLEkTWDnwTuugt+Mo1L8/pc//cHPfq46/of/z/8dSgaQ7rCv66qdz1xs//h/8Meegox5KOyrwMhm/HR/9MGJcToMksYqQhkOZdxSaFzdMEi/H4yYgIdxeHz/7erl6+XVfBxEXPObX/3Nj372Jx8+fNvvd57jYz9cv3715Y9/8ub/+Yfg2EfywXfdAXJi6x3su3FwmJvoLIbV5e2x68zAMRtKrDxo7vpBjZuqa6p6VCR2TR2YUHL/7uuvpe8Ycda0AuYczaoY6zal4tiN3XG3XQ/DHhAsFwbcH7umahbtsvJN3205kpiBwmY3XF7M69rf36+PX+/ndbi8Wn34wxvHkNrwxcvbr7/9Bl++ju3nP/mn//mbr3579/DLn/2jn7758LjfD3/9y3/3j+b/7OrmNTpmB+QCswckE3HMYPQxhvyk5DjN1c7FCydN2QnzQTsJ1gyfKwwBpmi/U30CnClqp3I0A6RzNp+dO/rnn9OpJCeo9fmZzyjnMwrwTGv6TqWfPz/fQUgAUydZNbOrYVtRsfn8WnP2gE1VO7BQzQKzmGo2A2eIiOI8W84gzlEcyiG4kAy6sWREZqyDG3IZs3nHqLTthqbiWq3k/mlzIOE6xNQP87lnpIen/eaQ8kgUXCm5bEfEMgdPOBbClMerxetR1uOw9+x//sNX4dvH3zzs2Tv2TooAQAwUHedjfx3g08YvbADgUWnbg3MESJuuD2yuFzOFiflCru9GY88OvFlA23fFVya6vRBdvnhVzWbH/W63eb+8scX19fZpBB2c91nQ+8qGfv/42N6u29WXORUugIIO/NgNs+UtVnObcpPMSsnEnjiAESAwezTTImoQm3Yybpmsfe3vcsQ+vuPfoUzC6W0/A43P6PZH9AdOQVfPnEk421iflsUzTG4ApvBdIBw+Ak1nrOqZ3fQRlMQJtDld3kSVDF10AAAKpz0FldHUNHgEIJmgfpxmXtMZPLsQkJwaIaszp+qlUPHETMjkQ8y5kIgWsylRHmHy71Ywxel/CKiCACBjARrRCAnNNQhg6qlER0TM7FTNMZpm7xgmJg0SolNi9AFCQOedr70LSM57QgIhFEhmqoampmCoyoSEKqMkkXHshz6rKDnvQhPqGftYx8r5EEP03pMj5x07P0m5pn3DwBDUSjm/AxNzR2FS5eSRbHL3q4zR2CF5wwm6KGAKWogcsEMgQGQQ1JJNdJJIKYAI0knDNqFFCECgcHLqwwmjwdPgBECn4DA8o0s2wSVTavNZ+XZexKbEbDNQQUBQAclgYqYEAEgGiqfrTgZWPN0IeKLV8uSXNI2mJvMpQEIUUDQQQDYEw+flUU0mbQyKJIcomhGyqcGUDWKGSCYCYI78hO6Q896CgoKPuYyz6N3FLKXucRu+eVApiac5rSpNLisT7IiISMxBzFQV0NTEEROTmamqoYUY6jq6ENkFREY64bNnryKDlIUZQaWq29lspjIScfAIPCwvbzhAU1VAFBmHoXd4UsKLlRgcMQFaYO6PRx8dAnddml+GUNU+NmoWZrMYKwGsPatq3dQi4EIwNiC8f3hq5xUU+qtf/a1TVxQPxyOxRiZ06EIgwJLFRfc8IctyrJari8Xt/sNbRprP6r5L43E3v7p6d//25SffMyx56C0L+iRj2m+fBFilfPv1u2HYmSqqoCgUBfaIAUlVRNVSyp6DZth2GyiTKpJyzkzEwFlGsALkA/usObhoYDkfHXtyHomBAEmJwXkGIiIUE0NQMwYiDkRR0ZDBQJCYyBsUQ0TQEMOYjgZQEAtFrpj6ToaRnQNjNibwgahPB+9iQS7A5No+l1KQfdONYyCWu3cwVCWnWAUX4m7bA/uUQcmcISOBauqlaJnVPgRi4KI2CPTdkQKBZMhcVXNikKK59FnGfAQ97GZtUzkNsY4hPu2eYn1zuXq1f9gShPX9/vXr2+Pu281mz2G2WDa7w0EHqDznpx1RFBNU7PqOCPd9t5g3oOaQRHLaD0nUV6JMzgeoGcgB9UmkgDFYBmU0JFIV731WM0DvAgCMKamp4wBgnquSy2hJEYhIpBjSZOyvZp4cqBkIMpuYABvIaVB73rqywquf/Oz6+hYV/vpf/+v/w//uf//Z5eLpt/9+v7tfXC0+rDfDMdXVLbVzrhbDUdHHGFqyd3n7++G4NVzQ5S+MAg7d7u5Xb375X9ez6k//i/+CFp/mEXPqmQNJMTRkOu72vmqQQNFkEFOr2kpFoVoJV3ZaGoQQmNEUpGRAY5pynjOW3soB0qGMPcbWtS/7w0A+svego+WBsJjsiQOyo7BURRMz0fOEzSPTpKRFFUADndybyGQEUFRBIi0ZLFnONo3bmYAIRAEVmRmdaifr35EUrFZazEoPMiChGUh/BOlQRhWEcasgaEkLcXNtpvnwYDmBZqWoWTksAANwRdLl4yNZp6U7HB7WT++LJVANvnr52Re/+eu/2j/cBxV2yFBFFxftDL3LksmFZjGLzRx94GpmVeUIISfNsM9jEmkWFyNhRbpfPxQqfYHYRhLsCl+9/GLclLv1HyB27dWn3ThIt54vlszYzprN9k4zOHQu46yeO+BNtwOfncemWhlJHg7L+WtwcewPYGImsaoQKrLosGrbxZg/jId+wEPlax9rBdp3KRcbskVEh80Prn827vbXV19+2NwXO4bZBGwGkeE47JEBx7xqrjQ5F+cKftM9DmW76zZQX69mVxxn1t2X0nfjffCVyWFYdy6Ez17/lBj3T7CHLpVCrO18eTh2zfLT1AMP4hd2t3u7vHgxdqlpq/MBDkou6BiZDWjMoqJiQmQ0ue47NoCSypgSM+UyAri68QBKwGaixkXFtDARIKSSgg/MxI6RcEqSKiUBIRCNuXNYTU1BiLPHuw+SrFk07ElEttvdrG1ibNHGVPj26lYpPtw/ff7Jp/1hFwPeXi/vPjy0DVfh4vH9Y9XMGNQQ5/MLQjp0eyTQbCY65kIxMtIicinFOXR1HLOgJwMrY3FITCBjcp722y2HGH1wgP2hY6KqCem4A03kakQxke37O8gDp3FW+e7Qqxppyb1Gx4AElruSJkT+sHm0sQTnZpcrji35qr1sak+IpKCSSlVXsYpVHcexZtPdw5Mnv1wsiH1ThzSm3XFL5HOWLGqo5X7dVPV2e7i8WNXz9qu3Dz/5wWv/BA+7TpNJTqUIIQJqXTWpZOKqaVvZ5d3TW++jI0RQkaIGx2NPBFUVisDYjzKOpQg7RiI1ZnZFjRCbqk2pM6RJVDmbzZ3nw7E3R8t2PpSOoleyqqoCQuVAx9HFSISbXS8IalI3jap45sBV13VpPKbBfPBMvqmanA4pjSESMpYuD+MRcQSAbj8SI3v2nrSAAooUmTiSACFUiKg6qc4UDdgz4Sl90TueTvop5RCCFHEuqMh+t3HBe2JCvrq42h8OJZWnp41n7/1JgHZ/LGBsoKd4EsDJwnOyrCYCOUtomKfO4FmeM5F9QHWSVPz3FTRTy6IndyH7brsEhFODcuLdnc+SUz+kZ6eDsymHmZ18hk7sixNGBABASGaK8JFs8VF4dmZ1fLe9Pn8YAiBO53BwzvkJIoohxFjVVYixqpsQQvQheoeESKhFvRU9lG44kqvGQoThanmz3mx3hz3V+PkXnweUt+/frNd7Nnd7tegzPhw6MLhezRjhOAwo8LDeUltdf/LyYtn84a//Yn3/dj5vgFzXDbEJL25fUAyr69e56KZ7qpDR+8Xqqm5m8+UVMWTAfuhIFYb9/umrdr5QWY396EPtGH0rzkfvgmj+6tf/wW8anS2RwuLqdrN++OqbX19cXqY8tG2bjuNut+E6LldXKR0R/NXl6u3X74l8yvr0sB6Ow2q12O47djxbzD48fJhfXPRp7IYe2RezojnWAZC6sStjXjbzponDcfvh269y17XzlpCHfuj78fLqBhGP262adFlySlK0jm27uJSSGeT2Ja63DzRgPQvLRdzuNk0zk8txcewfHh/yMIpoAhy6vB3uXiyWq9Wy7w6lT69XF2BereCs+fxnfzZvL3/91//mBz//0d2bb779Zv3Lv/iLf/if/0/a5S0yAyACqwgRw+SncW6YT5azhAZ6Khw9e82cCUKnMj3X2KkADW0ylP6uWzGgwdkGafoVH2lIH63WPyICE/h0vgf+/3zYCRud7rQTbnDGUQHQzt0+TCwPJK5nvm6RUKwggfcBLRjobLk6HO8qV1ExUDUSIEMmHUezAqYMhuxrDAdLyaRt3axyHx62ok6nmYSqb6o8DiXr2Asz9eMw91zH6mHdPa5746DIx26oPKCZJ8fG46HX4AhdfziUw9GOQqVyA342q9NYfvdw9JWvGMdRSPLFvL6aN5eQKy2ScC2CiIKkogamiIeMZVADTIBFtYBgMcGcxGaeK4JVpJiBdBiLFaXL61ezps0DHDZ3pZkvL29292+yiQIO2XxwKv1+/aFZfR7buofRUMmxcgjLF+rclNsNODlloq9qAQTPiAil5Dy4KpqjCas7gUHPq+R5eXvu0RGmxCUAmKzOTwvsd+rnzGb7OAjGSSJ0As+/a+M/UdWeMxunS330bDM4+7ydyExnguf5AR8/cMq+RM05MzN7JkQgBwSEDDqBqCSmRQoSEnrQCfdRkUzkibAAGDESEwXnhEPmUEjUew9mqsqCgGSGTGfmHqAZMHlEIEfBo0NlEDI0daYMWABJJ2yOjGAyRXaTzMwRIzsl9tXMqoaqWmNgdAxTQJsUzWnshShlEHDkIvnoqWL0UkrK+bA/pFQUyDtfxYpDQ+QdnWYcE9HUOR98JOdwMjw677lgk3ReVWVi0EIpWsoUQA1FICUD4DAZQ7M5RmRKpYhOboAIiMhIAETsWIszyQZqCkqIIogEKmCgWiawA8DB5EAJiDRZ0sL0MkzyaVnQaXukk9W1GggACiLZxCeYxjdWThulCpggqsGJLQUT5UdFQYFOpkdwxmKA0E6BeNPAZkJ8CoKqyYlFNgnT4MSVNgRgQgYmbyYopFKmZUdN8aPtmyIpoB9TdmZUjMlTpIAGPWvSeTW7Ws5XlWsYACCJTlPbrEJnzpQYmukEohmoI1a1Z2UwgDl2zgcOEdipYcl/16sIwNSkCKHhYUjSH02I2MoI1UxyEEO0GftYAVgVIzNQxd5xGodARMQ5SSnFzYKhZRFs2IXo61oFkD0RqGQALJKZMXdHURxLryXmfadSumP35ptvhi7Fan7sjn1Ky0VTzSo18D6IiBowOufJjHXsFzerT29e/fZv/5LVaykDazGpZtHVpeQuuldlinyS0u03aexcyHnfbe42OuSoCIIADsgy4ZgNcpn8VooKEnaSHBJ4YMacOqQggjmb92KARFVWUxvYxwk9JQ6AFSObc+w80pQ66F1VO+8tKXvvNRqoqRoZEpMjpMDoAMC5QL7hEAkPCIaEVQhk4LBaLj/ZyAfnAdBSPgYMAGLSiRUFUaCcoe9GFVYTrjxD7rvByr6pfIFhvx85tDLk7WPpdZhVvJhX0aNHZ1aNw7jfHwC1bZxYqoP3jguaGI/DPizrZtnMaxwPjzHOiclxLLmzyfBfod/v+xGwapob7N5tvvfjV++++QM19WE4vPzy8vdf7W5WF2MuT9ttG+aOidnt98cYXRY5dgU0H+24mAcXKA+d5q5IcS6QD03FiXwQBzoCYFWFpFD5ml176AbJaRzSNHUldlK06BShpEA8hd9M5xNVVVEkMjFv5s5eeAATKxBzKY7p+Rzf7WS8bOPq5duv1//lf/m/+fzHf7x789e//9W//ezzz371l7/WcbxcXdvsBuKrbixVUztU6X97ePidi024/mfq5+Sq8vjNu7/5f+fj5ouf/SK8/BzjZT9kzamuWh+jHt4RoA7ZUlQCv2jcbJWTUu3MRqpmatEko2aTjL5GM5BBUkdUYbgyFNQR0rakPYEXqCFeGHNOUlcBtcjunUlPishRoYJwoeTVAjIYFtV+gs8194AOEEwEDMh7QFER1UwIoGTICGjkIHgtI+Ng+YAAAAl0NCRLA3tHpDLubfstoxn70u9BR6djgZ2VQpg0bUWTjgMSShmQmeJS8x7GziQzsZlIUXYVV5rSwbqds6HbfNg+3O+fHsxKPb9u2jpG//br3+XtBzY1ZB+rULegGZ0CD/MYXai4rqhtgKORH7MJsYury+Wy6+/7Q9kO3Wg6W129aF52SZ52NHSDgm3GfVi2JQrEKKk77o5AXLUzyeV4fAINzrNfXo27FFwbo1/vHslRP3SzMENqSsFl/SrawqxTV4gsVLzrNl4YVdpYydAlyVV9cRjWbX0p0kcfjjICtc3lxfqwu2wXg0BXpMq9wRhiQY+YtIq+H1DMCCofqnUuNUVEjV5TPmy6tZ8vgfApDavmtp2tdsf15fLT405rf0t2OA57U/eQH+rmolDeSbqYL4N3dYZmdhVSMG77/baMOlBv4reb9XQXqBkyG0FwsRQRLYjkXSVSEAkApSgzIxgThOBTmRwZe0DwLkop2bKigiqhExEALTICBkMDFRUFAyZWExDw3JqqoaDzfUlJraqa4AN42z6tIWlbuzQexv5wvVpIKV3qXr++fnxcWy5N7e/ePTJhrPwgvLpeYpl5Z4dDr6piAhDqppn87ME5JjrsjgbqQxDJRcSTVYHMMDbtutvMvPcI5AAVFXMakiISKlCJIQ4py5hJJQ/smcoxiRQ2i95T5URKdC4NCTUTQx5VCJmZTNhARfrj3iAjbet2Np/Nq2alaLFtATiPeRyzqVXVHKSsrigc95vNDkZdVBXNFs7jej9IEWY059fbdRNDO4uHblPPF7eXl/1xCLHRMsSqPgyDEQwpVRTJIaPLmk16H2qOlZmpigIZpKIYvJOcRFTVRCbQw1ABzHxw3od+6BFNUZwPRjxrL0ramxmYzmetc3EcMyONqc9pJBfZRS1Qshy7o/MOXDx0u4A6jmOs4pgGxZHZIzOoliKlHIuMYOBCXcTyuHeEwQUxYA6YUzf0yDTFaROTmpFjRpdyKpIBpwktEtHQD9OUD1mJHBGZQRGNdTu5rjJHEy2lSDbQYga+bhcXV2bkCHcP6+bilAM4qhKSgqkB4/MEW6cTFihO53kEUFU0mwBQOE1QP5IfzsOIs3c0PE8mTh5G59HjtDnhc/eMBkjPQT+npxLA87nu5Lv6TAk68zKefatPl7bzd6cT53SR88iNvhNbbgCEhIjMzI598N77EGOMsarrqqrquq6rGKs6hBCDPymVCCWN5bDTyjdV1R/77fD2cjVDsLaFL15/tmxmd/fvfv3172dV+6NPPrl/2u/yuO2zASDSmMuYh8vlcttliP6L7//k088++fr3v92vn6IPpUjXdW07+/zTz7tRy2AAjef4+ra+aGqCnNO6e3j38PW/VRXHjkNdUuccs/PZFMBcPZMihK5aLNiFkgRd88lnP9hu78li6g6Dl9nlvN9t0rFHhDQmhrK7/1C3y8V8fvfhsZMcarff7E29M93eP8W2UbXKN+jD8qr5+ve/fvHjnzuV3aYslotAXLxeXCzGMQHB5cuXHmA4br763a8d4GKxGHLOeWg4zC8u+9LlIUMunv2sqbUOBJbHvN88iiqqFslAoMb73T4g1vViSAOKRO8+/eTF2z+8iZd+zHq33Ycq7sfx4elxNoP1dnhx+3I4bP1qJt0OZqvXP/05za7vHr7+yT/8HzeL3xx2d2+++vWf/SffK7kQE04hTaITOvSs9MGTAMMmH3ckBERUmMrwmcB2crTQCXN8VglN3IwJeprar+fqt7NSUp/9RJ7ZSGf5GcD0oDMb7/Qrn5lEZy7J9PRz2494gojsI9YwmWpP9yvWvrqsFitwNl9cHI8HK8ChDb4y01Jk1s7QhIi9q4goyUhMeewAhY1UMsAYEBaxAtBhSIg1M41ZPDFCUNDj7rGtLkpRNKljHId+ux8UQj/aIZcx5SZiJHKI2SQ47g5jVdVkD7Oquq6q94/l/uHNoevn6n648NsuExI7iMwxZS1jZhgRRVEEAKxYMRUBBtMsOnEdBwVkZMYaULICwuOoCLQv/qohGHLaFLMNIc8WSyYgtW7z4KvWzy7SdoNWxrFvLxvIoRyeSv9QLb6gZExYRq2qFTdzZAIFz4xSTNRRhcgANklGSDWnFJoFsbNTA20ABsin1ew7dXJ20f8ung5n1Mf+DtYIZ3zwTFUDNDtlwH/XzdwA6LQsmp7q6SNsCB8Ja99VsMG5Xuxc+gAAUNVtKllEBAxMnbFMXYahogKYmBAQaDEQMKdGYBIIRVU1KxlzYI4GBdjUmwGq/P+4+rMmS5IsPRA7m6racu/1JTwicqutu9BdQIOYIWUAcCAcIZ/4l+eBDxSIzFBGGk0AjQZ7Q1V1V2VlZmzufjfbVPWcwwe71yMbkiIeEZ7u5tevqZnp951vUauqRS1kN3UlhwBV1dxdgQANAUFXaxUzRUmBN6yRJTmIanSI7uTOAI5aTRlQ3QAgG7iquyEjp6ScAqdIAQAQCiqUvMylTrnOVRcHAyKJscMmiKDP83lcliWvmm+RtoupJWkZAziauq/FEmAAzmsL4RrpEQIQA/PlXHlFd3a1qmCKol6Kg2o1XNuEqjouBKtcg4EE2NjrGtODWn19FwAISVYJhhu4rSZHAF8xHq23IP7MEa83IgfAtXTC6yVkGAwUwcWhgAOygRG6AzFcgkjMEQEVLzFDgOBrbdAqNLuKhA2ufOYl5GilqFaOEgzNwZ1crRZ0Ba9uttrTXfViLkRyQEMHXr3mTISuRFidAIS1ai0Z3dQqr51BZQHIbKhaHbxUq1UZBVMbUXd12TxKJ8wOpazFTahuaoaEiGTg5E5ERORgSGJgWYsQXzYtuLKUF9bSgRz+qaqoFGXmaZkCIjjN2UulNgadUUXrUtXc6wlxaJokXdKycGCOUmtOyE6QlyJE7liXEpIwxyUDoMYgIsLuOs1OlJoGzcdpAJJNuyPFxw/PtUz9LubZYth++vhMhH3fbHcbiby52WhVVUPEasVLYRJh6WP4za//gyuGFJ1pGM5MGGL8+P37JvZOpmp5yuV8mo6HUuvT07v9u0/LUJrUgpOVTEFYABEihFxrLpUQRGIBrqrAaLnGJFXRirlSQClWAT3ENdESXY3dShkkNCTBwJmFhVmkaXuRFFIbQjJxEmELqhUF1wwtIpYQAIlDREaQ4BxV0QxLKSyE2QBKd7sZ8tnK5FilaYGQwYOEWlCkLa5eC0FUoLZNJY8Vy7JM4+BnYTA1ghQVC6GGXlI5H495NrcgZIWCo7sVt5wLeHbNjiBBhEMbuynnp49PJ8sSOMbz5v4OoGdMZI7ofdxEoA8f/hDb3eZt//QDLc/nLu3GUcHg8d1jcK7DvNlsrCttsz0NR2IiwiDoDtP5lEKLRONYcNEmESPWRXWZXOZpMFVW0yYJAFDkZc5uSAW7bnPf35yH8fB0nJcFgR1XQT4w87JkIfa1kA5cmNd2SxJaeyyJSM3WpNA1fNz9ZYIL0rdfvv3i/N0x5vbr1189/+Nf/4e/+N+/+OOv//a7P5yOw5vbh/NR795sidomTrR8Gh//4Ezx1Z8a7yS+0nLS51+ffv2fbm42u3/xb+T2zTCYjnMgaDvwfFw+ftJ8IN5QaMPrLzD0jljHAUCADZDdEGwGLSgROBASalnZLuDgTI6Zy1y0crMzD4DBjREqe/b54LpYmbHZcfsa5MYwuLAjeIGialY4RKgVGcEFwAjMLAOoVwczrcXdCS8pGaYVgCiw88a5w9CgTl4W6Xur1QgV7NLClo9+cIwd5AmxanZDAXfXyfOATGS6xocwsg0ngMbz5OCp35RqyG71aCUYbaiJMNZ5PlWrTd9S3Fpz+/y0Fz0eP+695Fd3d7ZKNaGYZcY2hjb2G2Ku4HnKzB660Dats7iBzifXeru7o2UWrbFqI7Jt2/Dw1ePpu+gaOJT98zyNqYGKocw5bbbLeWkpECNx1wuzd4trWE4aUtfLZHPPaRtaLeTY3bavTct+eKxSmrABhzbuoMTgoW3a0/BxnheVGigt+eS0kGPEjiBZ1bc3HS3jsXzf37wZofKm2y/DdJ6FNq7JqPO1a5EKy2YpZd5/ut3eWnku5chJSz5ITMv5oP40joeGmlZup+EMvERiN29oG/sHsBIUwZm0lDxMQywquxiw0sPNN65+nhfN150U+Mqf1lpqrSu2JOKLsdsB3JkIhM2t1OpmIiFQdLRaaykFGZvQmqmjs5CpN7FbysxESKyqhBQlLGVmCe5U0RmplrLfP7lh2rTLvNQ518V3m21s4HnOm3ZT1VWhb/qy1IfbbZ5zqcvd3fZ0PNRc5yGLMBHoPKcUSBCdWvOlKrjO8zwup7kuy5JB803bgZvVsmkDughyAH7oWzQTCtWXJpEDE1Zd8rIMCuqWXTmlVotVcgdnTk2Mx3Eeh1lLIca7m5sBYPZigJLaNoSljOfzaM5dk/p+p2hoM2UdH/d5eL794ouaRWLX9puuB8vVSo0NT+ORxJsmRZE5j1nLFw/3hKfzeRjOk0Xbtv3xaf/wk5+4xEBBWUvOqUHXOg5HVW1SQ47onufZAdpuM5yWtm1Tapf5LEIAuKpOaikIvkyjAyDLsljTNDE0VSuYDqcnNzWiWkYmFoljXYiICLnZarVpOuVpiTGkQIGDpHY454Lw+uFNlw+n0xj7XpDrsmdhEGmZ5+kUmIMEBK4lu4NbBYOlVFPP+RwDu9WSZ5PUSBOEHEC1rn6EWgoRACmYVa2OyHhxsQQhdS+lmBmLrgmetTq3sZFUanHLZiDEEgMiClOdF8XizkqyudmGeAUOiGqGjER+cUKYAwL5hWEpqnjdJa54mlYBBr98+0WhY2s+5VUtsQLvqwbjMyBx82vI9KXdDK4WnwuO8kupD7zgpBfcTRcDmq2tZ1c/Gq3lUHBFZ+vfkC6B2des4lVDxMxMxMwxJQ6haZq2afq+i03btk3f923bxhibtk0ppRCImYkUvSyTLpv58Nimevp02Lbtw5tuGp5v6dVt13x4966epj/92a+Aw7t3PxzKdBpyir2om6oT3d91jgxD+dnPfyYM/7+/+ssY/OGL22myaawSUhc37x+H0PV/8qt/frt5NY8fy/79Dx+ya87zyBhCissyWgwkzCxMG7IgIWw2t86dKRCD1pJzXZYSWDl0qpanYdPfHB4fb79oQ9gRcV7Oz08fHl5/dTrP8/mQEi3zIKklJmz53fvvcdZxyulmU9WAuIlNHjMoMOEwzsWquo6zprZxhZIXYjatc54fP/5BDGLgMs255NRGRjqdn1i4CUGavhadpkndTUsATqlpu7ZM87IsoYnLUnZ9l6eJUtzi9vTp6fi45wQP96++/e4PXdN/eXd7OM9nx5/dbFouzkSp3YjbPNWn47btVccvvn549XY7T9NP/9mfDPvb3/3+H54/vLu9fwuwpsMiMV/pmhdU/Rm7I5K/tI6vVTl4AUbXnOD1/9mVsfksH/Hr3/CFy/TLqr4SOvDSPnRhOX+cNfPZOXkhhfBFjIKf//UjQciFtHJfPZsOhODsIkCb0LziuAVK/c3D8f0fmq5dlpzrICGkvkPn9dEGSEBSzTwgFDfEJjbj+Siiu20bmR8PxylDZVddAguYn6cRcm0lBZSpjCGyEw6THhefKwzZT4sD0HkquzaW6tm0D3UbaV7Gm50s1QLTmyYgQKPYRl5c8ybMuYTIpp6L1mKeuCDM1dog7jrnUgEUwNTAnRkdvQDk4lZwBNsG3DIms8Ugl7wfyOfacXOmHOVMFLs+EUQs8PTu+/ubnUgwMSCftKYYa9Xp+LR99VVqtgTBM4ZXOyO80gEADubGsTF0ZwImqKZVkSimxtEQ+eWmeomgxs/E4OezDFeG5sdn/1Lt6C8nGK8r6iIaejkIXvVEV058FZa8mHOBLsvookGjixbkKg+9mqSu3P0LPXlzu1tyUVPXVVrq7rBOL1bJjDBdrpCXJBxzRdD1nUEnBwQCYAAmFnYJMaa2qVbdi9VSEYVpFcMaXH9XB3fQ6pewI6A2hU3EFihYgbqwu2bXWtcUIFdwNxaKgixcEJnCRW46DVAGR3MkNZyzD3M9DXkxL0ChiU0rlYq0jlkH1XGcrHpqUgxNTF1qO6EQY2TmECQ1KYTAgUgIAEyrERK/WKBXkSCSC/gqb1JcfxNh0IqkbmrqpnV9LlIEkGuSNFWoa6v72ojIQATEIESGbuRegVYD4KX72S/0RlgrRte9DSC5O/qq4zFwBVuZGVof5QAOEFZLN7iABDdYk9fB0A2vbPh6E7mQQn5lpd2NfDWRETABmJteFps7moFVdwWtplq1al2JwLUElcHdURDRSXBta72IoRA5IIq7QzFgMi1U0Gp1NTBwr2peSwWrJddcC5GSLoENBe7u+lf3u/T+CEUv2wf0gLwOg5gIkcwvif9WldAJaX0/zL2iIvhaaQcEwPQZHV/+SMISMDAjarUFfXPTMoKGMmsdP+5f398iskLIyArmFWxWj86MiF7dogQtxQxF2q5tkcwcmhAIDfKsDOgAlYdjrupEHJP5Mjwfh3FcRECLjdPsWhk4hNA1PRGlmJhJiyFgaCI4Coc2NeD1++///mZzp+plXobzabXzbRrO5yUEDmGZjst0mNHAJnv/j99P47N4DKF3x6plzrryhgjVbI4hVjUBNM2zEyPWanZ5yGABJxBzMrcYxBEcwcyJyQkQWuLkYNWJANbNFhGH2BF3hBFsIQwhYIi2zDMqhtCHkJCQQ2ACB2UW5ggkxGLmbEYiHFjaeHP/djx9WOwsUUotZMbUKmGttsyDqyJGBKpZGfh0OJZcAYC4ArM5B1tIJwjStn1dljyrgzVR2ti4adGFMGomVKh57vs+T0Ui23gCEiAuUJv7DXEaR6fDaOKbngFsc7txTnc3Xx7Pe3LbbPuxwi9++Yv//T//TZIwnsc2tedxrOSRZclDEJ6GCTES1r7tBq3gXnKxShJ8LmWhJQaWyKFJy1xqnpFxXkopy2bT3m+bPOt49k+Pnygig8XAbQzmUq24QSlWXAnY3MCtmAMRmAMBMgJRWcfltbIIMbut7Wdo/vlh8yf/4//59Ve/+PbjX/5f/u2/iU39z//lP7W36ePpPMy1Au8X+7N/8S/fPtzm+ePT7/9+t7mL2z8uzc4oBgQf/vH8w9/4vL//k38R7n6eyzJPlQH63n0+wXTK40BAEO5w8xMMiaKAnfO0IN+l7U7B6nIgXNADcQsoiMaeNU81j9J2IME8sy1uRSQamOYzUStQsQ75/CRxx/2XeLMD6RXQgNx1pXuggoQExIxuPrplRHAzAmOsCAXqTJYBFUGtzF6WdevlyO4tUHTugCOQODTmM5ChlzVQX5qmLovOB0JCcqhLHfbU9AAIdYI6oQI6LLmCGSfQ5dFhY1kpxYUWNefUGkYJEZcBNB+fPwzHQy7L5v4e424/2A3fH779fds0tw+/0GkaTmdwZde2aWPTG4aq5A6ha9Nma06ceuCICMSIwjYnJbrdbp4en/Mhpy4VqLvmzfn4EZzaNpl4nkfgehyOWQHPYyQP/S6lBmVH5IfDvk/tJgZkLMWaJiEiVwLdcNrmPJvPxNi2nYFprcJSbDD0MS8LanvbL3V0LdWezvn4uvljKlWYD/l427yayyFS77LZbvh3//jrTDbmYxNSLSfmhRCbriPNbftwKJ+6JgasBezr168/7H/YhZu+e32eqwG1zf0yD93mVbvpHp+eU9P3TeOWx/NToMpqtpyPBLuvbz99HLLqVz/75a//v//HzaZfxoKSvv7Jl9c9Gy25dE2L6MDMHHIZrRa1GiQwkTuCrRo011ojx1xLJRJhRIwhgoObrnH+pkbouc4rzWRmjlhUhZJQQCREymP2VWGSNQQJHeXqXLFtOyQYhnHbdhLC4fTctm0IkQBMyzhMRGxamhgctN90H999QiIFn0sBpdNpXJZctUJeArrWKjF8eXeLtiREIlQVIQCmhDwPR4oiiEvJjsaMIhgNTsNQa80KtZqWxTaeWjZd3AHVsi4A6EBRohLNxZBos9mM55NZJbNg9aZrUmi0ZK9zJNzu2nbbaaXzMethn/f7m9dvaLOVfudNnA/zPOc5A7fbtPXFqp1p/PhJLTcizf1NEN7PMwgKxx8+vXvz6v6rr76qH/aH5/zw6ua7D0/jaUTwvIzE0YsxN8BgtXbbFryGtDme98CMhoaetabVj4ZEQohExKZ1qAOacyQ1A1fNRqt2uBZXICIWGYYpxMjC0qVaCqjleZE8IyVGfn5+bLt00++ez0cFCKnLOdu8yp9lnrIWjRKrKseGOeU6WK1uhki1VgSopSylzDA0qREJZRWv1bwiiloLAgESE6tlLbrC0FJKECFmJqmazZ2DzHNV8SCCxKYKSMRhnSgAoJnVXAvmWqAN7QtyYaa1sd7M11TxdcbtjgbATH6VQgBiXQmgK4uDl13lWmfzIvjBFYVfEh2ueiH97F/DqwriirFfrBOX7ekVO8MLllgDOl5m4i+5LusFd0HddPUBraXljPAyUSdEIkaiEIIIN02T2rZp277v+rbr+q7tt23b9F0b15Sipg0hRFk7ctAAlrKUZWCkhrDFWOsy5tpstn3TwuKtxDdf3//948fn86dpOLnXN/ddDd3hMNrsBXEXm/04/PSXP6lePn04CFEbt8fDEqPcvtq9enhVFGXTf/nm6y7ab//mzyNmJpAQkDCGlsiJoNtuJSZEadpN290VdQc3aMrijsaGrrBtuihUqtYZLPswfRDwFJvh+Tm0/Zzh/u7+u2+/G/uB3A/7b5vUoYOYW86gerftfvsPv9neP3z68OGPfvYLtRi65vDxQ5u2TKC5ukLNi7rf3t2iWpTABNP56fh8QLUkMQYellMTRct0GJ+Fo5DUorlUc5cQUtNsb79kDEsumhVN24ZLqbyaKYj2+z0AAdD2drd/PuR5JOfHp2OIMUb5/f7En8r/8qt/tuj43ccPTUg//dnbaZ7K4dB92Q7TebO5kbYnwtc//cn29S+G4d3DF1+ZGhCBo5m+wGP40XrCK0NzWYIXmY9f/35p1rmgeVu1QFdgfiWQXkinq5buygrBPwmovhrULlq3y8vxKxX1IzbohaJdtXMv8jvEC6FqV1aJCM2cEBzEsQn9V13/EyzacmTXtgnTUufp3G9vHCmGppaSUkANZEzqboXIkcThkuqx7fvlPJoiUMjV3WtiUcchTyHIJvRPpwHRexRHeDyPYw3HuU4FjtkYYSt8zv5cwVjmUgIZmhUzRHec5qXe900bnZMdhgyGyFhKDsSO1iQOjA4oxKdactFZvQCbreEnCOYAXgCLOxBNxnOl3st9oA3DXKobj44/HCsBqY8htY7EKQJFKHmZpvU97FKT5yU2AMDjYe+aY/fKJZhS026AGYkBK4KjuatJCiueX+9AtWThRDEaEVxuSlf/l19TWi53tc9LDl8qzy6Eor2oJPFHX3bVaq7ryq+80o8+4kvx/cty+1y8dv36H901AQF5pRfXd/BlPQIAoDIjEIYYiYWjmEJV06JWzazYulmmiOhutro0XddPm3vViozBVM2UWAJGdF01IxpDjVFqtaLERu7iolZXGaiZi7Ca56qFcMkYHIOs9WJo1dwMgAw0VyiG5kwAATl5jBiDAWety1nAZzD1aiRTpmHUyWjKUBA4kDFxRAGelzLOtRYD56Zpm6ZLfRf77SowDSHGEIg4RAlRLgXfRABECFgzoqERSwRYVWMIAL5WziMAsXtArUQZTLHUK7e8Kh7cURyAgMwB3FwNnZEAQB2MLsUe5g6gCkSwkndmDhWIXde4JF0BOq7xgOuz1BRM16YV94IioAhAF1KRzEkRENiBeK3Z85c1C5drCs3XgwA4rcoDZndHNLeMQAi42svc1M1MKwLWqrXUWk1VVR0vpS6r9FiFBUlFCAkFCVayEQlJAJAEWIvmpfoAgHrZDTDUxQly1lzKeTjnvORl9ppryfNcDViI2MzcUfjST4HojsxrSR8BYq1lhYFADqbM4g6BBBFXk6BqrXVmuVjyr7HWaGYZq3oICN4GDHAGn/p+xxRVO4SwzDl04TycdtLGmLTkmkvs2xBFc16mxc22NzcSgkQBzwCkNVdXM601ExOT5KzuhII9d/Onw3kYi9ZtGz89j8AIxRF520dmJCQ1I+ZpnAnlptloLVpHA5cQXt18mcdhnqZa5jW3uI2bvJyYsMzzp+9noJCzLud8+OHTdD7d3N6eh+l5f1D1EKJIRHBmVgUkB0RhdrNiNtYlImFFM1hKlsAF0Y1adifjgAhUq6W2NwTz1bGJSChNQ4EpBA7CIYSml25LACGlkGJZnERqtVpKSAklgYNIcK/MFIM4c0hxOVZwy1nbtglNUxVYmoc3X3zyb/N5Sg3HjodzNZV5mqsG4bhYzk510VLqnAWM0ErOC6Jvuq6T6A4BkS2LeF4KBS55zss8zTM6tt0uSFtKvtvdPB3e7Xa3IVG32z6fT+22I2yenh5vHl61rQzng3SgfRdCt9SJRbALkPnD+4+7u+a8P+z6Zl5ODzcPAT0ISuzWSes0zn1s2pjMPBcAqO4UUhL3cTqRgSGByTzbUj06NTGp6zLl1KZdSqdxj0zEcnO3raeSiy655qKuSuSXC9vd3QmJSUpd1s2vI6oaIpGsbam0TrfWGbGpMqOp2vUh9MXDm//853/55Sa9fdP+5tvf/P7juxAlxA4g5cKwu9v0r+fDHw7f/3b3xRdy/8vjuWXBBmZ4/u53f/u/fvXLP+v++F9PuhsmFdYmgA9P9TjqtKBXIqbuS423Jh3bUIZP0/F9/+WvoH0wABtnAAGJgKEWAJskuJei5YQcITYWApjqolbAXELXpGA+n8vhh9DvwutfGfYqjQIYsIHVWhgBzcScbKblEcpTGfZsvlYcuBkiomUw0zoDmNeZmbzOCGDzQfOEUMxMmo42P9F4q2Gn1LiRGnAir8V1cXVw0DwYJ4zitSKSlQmRwBzU6jKbu1UA8HH+ZCChZ6cIbnkZEYO5SktlGnQ6TPsP0/EwzAsCHJ6O43JUQ0cZjic7TezNPA5gJTZNG0UkUgiCghQwBOlvjBsJLceQ1XIt5lUVmVOZsS719v4rlObw/JjnvE139/1X+/m4KBmVUisydV17ev/tzf2vunT/5uab8fxYZm93kbc3VhYTqmqLeiMp1znPh759S3Fj9eSmCOwY3HWY903cIYFiHeosYWeOS14CqxBv0p1ACjEule53XxAzYSoehmk47X8wmENihNnjPE0fBEO/eQhMWKuPh500iGFZClIccg3pRiwG4D7Rx6czSB6W5y48bJrA2525DnosOszDKUNtmh601erz8nqcnrMP//BtfPP2T5bT98R9bLbPj+frzsiDiJkigJkzETgoaDFDNSc0V6smLCuXZAAkDMTqLhKWMgcJtS6qlmIjoVGrpebVhl3VYkwOVLy4GbkiiYSABGMtsWmD0Hw69ylhCppVxAP3QYKR3jevGLzkakt289vb7nSa3r875jIfDo+H09mrL4sC41gLApRpRqtdkIbx9uF+yWevdfj4sW+J2ijMUdaUQyPyh/vXs45o5kJLHmKMjCgS5qVZxtndxiXvuqR5MqLQi5s5FAIuEIBS03fFiTzHZgscX/WvOYRh/wdmq1pCtK7t5qnEwCLRTZj51eubnHNgXo77+bRs3oi0HXJiFnbPy7LMOW02abPB6st8aqJTBFKcFJZSHUCa+On51H986ts2T7p/Pt1u+6enAxIseQIu4BRQAsc8L7GlqhbCZrd7qMtJc0X3gOimhuRmqOigtVYHRw5IUHNmDsIMjsScc4lJkNHQA4sZolBVm4dJi0ogrxkWA5S+7Sk0utirzds+4TicKFCUNM5DaEIIWimba/FiDmUciZYkwcAkMWsoObtD1/W11FLLvEyYZ6u6ameI0FAQyNyqKnpeA1CZyc1jTGa2NuuG2Cx5NjUWAcK5LBEhpriUcj5XQlZjcGPklLrYdcPpWEq5QlYw13VEeTHtOtqFc7lwOWvPBriv6m5/wcUXobrzpZsWCFdBOlw1Pr5GLL18y2WUbVeJBgCs2qLV8WNuZkRIl8BNv2Js//wZvMRRE6KZXeM9AdeEzStcW5NpCC5JxUzMQhJCiDGm2DRN0zTdZtv13Xaz6duu3fRt28XYNCnFpgkphpCYWWTd96Ajci4ziLfzdD5jbD2JdP3DF682LX3329/svrj59g/vPh5HB759+3WXwvvv352XvCzztu8f7r/8h2//kAtIs5gWIUopqWIK/e7VXWxSyXY6n1438fzDP74/Pd7s7lGYmZvUmBEJ1jybOjjVUkPECeY5f5IQkCmXUbh3s2F+jiFhCRV8KYpO283Nx3ef+HTe3ga3UvMkxsM83z68fX76aNnmYdk/HUNI1XR/2EvAT0/vqOWqS8DYNg9Pzwevejwc+s3udDyXkjkAJSaUpVYFUKtOVqYBLCPhMtdhHMs8cUBmDCQKPuvsZk170/fbm7sHDoFCOh0OeRn2j/tIgqha6jyMeZ5cLSQydG4bEq/gyxFaocMwD7k8PNydic9L/X/957//t3/2pzqPuuj7T4fdzW487bnftrsdCUYJJWue68Ore35zk2tes16uro01GeMFll/hNOFVtbPOT69AHK+6kgtiv4oK4KVU77M37EcEz4tqBD9L5T7Lga7ykqtT6DMPcCUN/AKAANb8I4fLRUBX6d7nr15h6yWA1rzleCubr+r0ITWNc3WYlzJz2nLoq1VDd1dHQXT37GDmyiFydSta8xwlANBpyaaoRrUqowfiuSwphTZuclVzEMQuhsfn82Gws8Op4vNcV+9Oy3zK+sFkqvZavGNHx6aiGRRVZqljUaDTqOcq5+wKmAkaJyZk4XPRQV3Ji6MqLgWZMIIxOhNV9TVeLRIgGRDPhtXDkm1D3jFGQVfLVYcZROjp8RkRQmd9mySGaZ5T6qpxrRB4I6JQVfOySt1JYlmIRAy9uro7SbBSnJESA9OLIt20SLMxpitd+FKl/pLJf2HBL3uONYT45YzB9bxfqZ9/uiyvX/dPiU1/ESddvv26SD9/hcOPFurLS/OXpKwr+/kjYhLmpV54fiKJkSWYQHDUxsghL7nWuWQ1dzAnRCRGWAteiYDtEj9XHd0JCQkpMFjIi6zPVyYiErZagRANzQHUVtckVq3kvix2VvWio8AgLpAjaAQndXK3akWtAFdwBskgABEUxUB1yVlrtaVoNjWSCrwYLsAFyJEaQiASEXTQUnNZAEKTLvZjiUmIRZgYUQiEkNkJDQEZnEhxDeQ1AgCt5KsO5ZI1hYQk4g5A7AAOBKuXRysBwNpoZgbFyW21VgA4oRuYm5vp+vXr/YnWsYCCqYHWlbIBsJUWdgd0BluFZPYSimZm+HKzMUcCr8VpHfoQgiMLioCtsq9gjugA9MJm0o9vWKtUeG0lXVOG3Ay9Qqmm4LWag5naJYIbq5q6q/nanoTg1T5nCCopIAgjCTG6SEAJDowQgAQRGZGIAoHmWdlrLaoGTOSMzMTiTOc5H4/jfj/MJZ/Ox/efxurwonRzBjNdzaG1LoB26VOlS4sFIZEEcEC3qsUhFbVSai4FqcSrJPBCFTEDIWPgICmIdAmyPpFgqSMFdl2meU4pstLNTQtex2Fw97Zp1fIwLEGka7vUtc2my2WueQJTFjbXUgqi01p7ScwCiAQsalCW7I5AwJHP49B2zaRlt+ubPkHVdtOEKFFoMBNkqnXRJTV0d7/LU0GYT4dnqyYBq2WqZallmUaCIsx5WWo+PL/fPz8ebS6A9vH5UzFt+8Yx5GJZjRGmcQZwYs7L7O4piiFxQDMjAgTJVSmkqr5MKg1j4llRUFKzWcyZSI0BTVCQJMSGRQEBSQA4tL00rVVlXjhEU5cY5rmoZSeSJql6Sg1AdXIJwUSMWdVL1aI55SU1rSuY2e7rbwzph9/+LVFTyhySwOJFy1QKWmMVzsO0FHeF6h2B1zrpAgIwuxueWCw6+zjOcyYECu5WAgugIOL5PFGuYHO/ab78+s00lfN0mgr3Xf/0/Byl2968rrUcj89t3/py9lkkOBidB49xA0DlrJBkzONN0/Zh9/R4jE3MwyRCbx/ePB8et5vGKzCCgk/zEpEca9GBY+huYghxHIdN11ih8+m81HLE3PcxddvDMBR2h+3Hp6VrStP7btMuNeyPRZhzrsM4CrdFCyNV12rZIQCQqgOYASIxI1nVy9wA0VC9OoCju7pb1WsbIOw/ftDD05/+n/6X5fz+P/z5v5fYB0pkdn7++M3P/+iX/+wXPnw4fvzDq5/9mbz6epgs9dAnHf7hL88ffv/1z/+v6c0/K5DQ66YpsHyqzwcfRzLjfsftXcWtb74Ci7h8qk+/B5d0/yfYvDWzsjyTcUi9s1gFLXMbUee5zGcHSLevncTzjGhIFEKDFXA5LcfvEAL3PynNa5AWsa7egzzMgaUnFc5aDvnp43T8IEwxsYTghkCRmNQVkD10oIqycc0GCxAAdFonbGJoq5YBpnM5neH894Ai29fSP0BokQU9KBgAm87oLiHW8Ylr63UByFagziNxcFWvFQG8KoVYSyWkMhyAO7cI5tyk1N6UXLgMPv2wPH5/fj4dPh43Nzc5V4w7Lfb+D+9snm4an8/PeRm71AQCipxzJeLYJelvJEWS4CHkZWJQJwIm4L6JG3DlxIVoqhhiu/2iuwF89/Th7u3X0wc9HU5M4f7Vw1xO1f0XP/+fuP0mIk+qxY2jF5wgIGNEpxijxnbWs3Dbb15LuIO2XfJkiqf9c5Q7ks6lzdQSUtHZi26gBQqVaL8cv+h3bF12BgCSgADD4SMCO1K/7RbAx+P3y7mE7e1ik8oA8PCYz23AZB7KtG2/Tt2rXAfLNTXw/PG7GG6X46ISN7uvpvmd4EA+VhOJOOyPQNNYHs+l9JvX7nLbf4llQsubfjuf9/Ppsdve+GK7m69u7r8cjuPLZqnU4swsEoSLLkjOwEQM7sXKWorjgOoFiAIHtQLuuS7oyJzcXTghGiG5F3BHJBGu1UUYHM1rqTVSJFnDjICJAdmhujGoC7K5xsibdpPLMo5H5s7dGB2ohr4xL9/94Q/vvnv/4f2nJddlmUPgagqWQwzJbTqdt4KpJbKKqp/efWtu2+02BlmGYyM3bsps5rhMs4LO43PTtV1qs1m/ucl1UVOtiwgJ8qi6qM5asbhbbsKIBMwcUuy73fn8DHYkZGJSjcbsxGYhihADS1tqJXARULNaDVFNF+ygTbG92TjCOGA5HU6P77c3XzV912+aeZqOh+H5/WF/+rDd3dw93H54/z15fft6Ezfy4en0dFxQRCR8+PD8zU9TSjKe86v73e+/f19KEYnO8Xg89kAxIQcSYQcwhK6/m5BKeYaVYFetqkQARS9uKUS3YuZB2FRLBiNAMIkSUisSm26Htu66sJbspUoTmamAuqMI5Trbspgel3Hpuvbu/u5wfpTYtHjrXoNjKTnnMYQ2ps7J1VQJFWGZl0BY1UspIigpqhuYqzkHBjN1qKWSGZNcZpbMCGZ6SUBARncrtSJkt5hSIxSGaVxnvaWWWhdwYIlro9haZUZWynlvWpcpv4CK1dn2I+/XOm9YUQq5mV9VD+u43GGt5b34ua47sc/Y+VIUhWuxieEldoUuYOYSOX0ZAprbJTl7TUKgdbp5/czlNf549rl6Q90RiNCuyTAX2dGlaW31jSARrtKwEENMKYbQNKnt267t+r7vN5u+7/tN3zZt23WpaUNKMSYOUYRFAhER04q9dRXmewObVmAHG9s93A3H6fnd43fDQTX/4XB8/vR4c/9wf3v3Dz8cf/vdY80eEv7yF784T9Pf/e63ROHtF69KXgit71ppU4otY9g06f3TB8vl/uYGx/k4ne/vtomhem1SYvBScimwublrYhinbOBt31UFAE0pRuGqFb06qjPWfB4mQ6LzsITU5py9zPP4zKixu5OYvNT908fN3dsQ+0/vv+0aPg6jCE9LEcUudmWeYytWsgOcp0MIGIW9aNu2wu4AfdcSIkp0N4lhPg7ZstWZdKh5ISSoWbwEiOiQda4Et/ffvH79JUunRnMtvpRp3PuyMNmb13d5WbzMQ1kARwk+11mnPE0lpGapOaVme9vPh3Ij3fy0f/r0futhkfj0vPz53/3+//n/+Hff/f53Piwm05tXrz69++4tU6BIAoHZTI3VzFa/44tL0VZE/kLkrB+vhqHV6nHlleBFl40XaI0vSg/wlT2CdY3/GOO/8EkroHshj1amgK7MkAO4G7wwnnQ92o8lIy/Hu/h/8Eox/Zjq8lV+SBdMyRg23LyBkjFtN9tbIqvnI21fh7j1ckLwIIKOhAlBGMxAiTEwg2PVGlM3zaW4ztmWogagDsc8JrYubhereZ6TcIohlzrMOlY6Zvs026TASAX8/WynDM8ACzpUqKM2QbYRvKoahIiGkB0nwyF7dmoFqsOgyqrmXh1mtaKYzcVhhfDuVNTQgRCJhcjFoaohKiAtDoXwZJYdbtgDgTvOs27aMGZvz7kFSgwhUClT325SaJ6H8yaxhA48A9h4eu7eMCgQCFNwIndbQ0G9GoIEadb8HnLwUtxriBHWTqhr5v7VHHaRDl058csq+7wwPi++y+31+unP0rR/Kvvxy4l+ca6tN1pfZaB24aT8nxz3qpVbX4n9WO8G+KM1DkCADi4SWGT1mhi4IAdiIQiciolWn5cC7lqraUYQv1AMRrRKagicEFxXjgNZJDQp1dKEuORlMTNhMc10YW1XAtcY2RzMaSywqEu2nlXQOoLoGAzZcU3OMWRgQE5MqSjQoqNOAJaXOist6tlUybNWYzYGYORAsYshBWQrOpXRCD0liQlDEyQImqOqzgoMaDNWMSaQQMqkAWJxis4MEtdMG3e3qu5KhO6IBqaVGYEACB3Z15QiBGAGcAJ1cHAFA1yd06buun4EZNTr/cAdTAnM3Nyqm/OakmSGYOiAjq4FgcERa77yi+u5NlcFdwJwzbh+F6sDAhFARWBEA4/ujsiABAaIBODgeklWggumBCRkcTN0B8tgBVau7iK1RzMzYF2pIlvdcWzmsL5UWCVX4Ai6hqopoiswgquAEgt6cYpICVdTZ2pBAtUEJfs8OwranJoA5B00TkSMuZTnD+en/flpfwaErolTqVkNEEWiO5jr6sMlBHMjpItF1wmQ1QqAC7M5GDAQq5m7a63rVXChirabSCGysGXN02DMMcbYrqPF4k5N2vZdf54PwHUZjiS4u92GFBwgNrHddJISIEx5YcIUollhEUVyxyDEyA6kkNGMEYF4nudlnIEwtTEvtRY7H+fAoWlDSIFjQGIEGufJwVIrjiqgbWzV8TwtsJzBENx08TLPTigtEYPP1WrO4zCNp+n0pMtsTgA1SEwUcymGVK3muZpWAK81a67A7OAzCQnEPoBBNRBAYplrWZbilc0xSe8IZuwYDCFIqmb9pkdSsILm4CghSWxSt+G0AW6gzGXNBmuQJKSuml6eZMwCIkFiKZNwU4CtuqQAJycHLQoAXqobzEO+//Jnx8d3ZVn0wpznGCVxnEquXhWdoyxTPRwzEc+zbmIoNY95aQOq1SRktQBRaiIui4gQ4DJOgWTTbRwAKOyfnpxv7u/u5ThPQy7srbR5tnM5G2YSYEVocebRzft7aVPPTex2vdWyTPnVq9enx9P/8Ge/+o9/8Z8UEYlzrp+en8Zx3G56acJ5mFgoxeiMIXZVM1UlcUHsm2YazjE2m66dl1INZrCmiYw+LcqrRwR4Pqnj2Gy6tgnjaQCitklaLQZWVUEwIF/jxJBUFYnAkRxw7f11W7PuaqngjmBVdfUjrFfBH3799//8Z7+46fQv/uLfQwoq4XCc3mzDXb/7Yncj06mU881Xv9D+rc2Fau04n377VzXPr/74X0H7pqjUeUxUQfd6eq/zHNoeIalHpztr7gljqM/n93+z3W69e2P9A7La+ZM7OW8rsGWzajFF1aGMRyCmzV1BAq1u9TJlrnN+/i5Pn9rXv8T0BrkBQtPJrJijQNwQwvgM52/Pp+9St0mxl4efU+pMC4aIEGFNFPZ6cSi4u1f0Sl69Fi8zo2mZtSwGA8d73Iy6nGEZ6/CBlkeWSBxJGnAyAwoBKDiKMKMumoc67alpEdc3mYRbRLAyTMNxniaEwGEJWyAhRHGteXhf8ziMn8anH07HZ1XpmjCdj7G528Tbv/3Nf9Rx7vsQROs8b2LXNv1UBissIVFIxAkoVp3z6UkRA3e1NhiSsagzQgE0CkyMKYZhPh8e3y3z8VyW7/ZZxNs+VioE3jbtVHOzeYUoPfh5/6Fvw2F+Pg1LQ7ttunu1efj0vOemq/O86RvxKD5N41B1mOoJ0VwLEhJulcPp9GQw7uTWx3mz7RfZAuE8VSLEGBR0XD4GbyjSkuGm7c8f3pvl8Tj1r9rT8bntAmltu/5xfLx7/WUARXSDZZ72yBC45vnw+u6hw07HtkrY3rZ59NqEaSlQjv3tjaFWcEkyL8dt2OhSTtNTJ1DGvXC6vW2qQsX68z/9Hz89t03z5en02+s2DEOMboYEQYIwEsm8zJfnM3oMjZkCeIyNaUUEYUZExGRutRQCTLFxMGSep8HNkVFXd7QqMQoyMzExIotg1Vpz1pKb2AghsyAjVggxLroULbf3r9RgHiYE6NttE+nv/u5vDx+fAuhPvr4/nZf941qMkfvg58O+Vpe8xEbqVLa3m6aJtZbQNO4muHBD3abru7ZNvMzW9lnLeB7O+8PzHCankLxF5vOwtKmJMbQ9FgAtoEBIODucpxnRujaN8zg/PpLWFLlpts1Nj1hJQsmlTDOgS1yrjlFEum43V0UwAnc10KyIeVokxu3tLTK32nz7h7/bdbdpuwvd9vb+1aZJ6TF9/PDxPOdXr9+enj4K2Zum79pY4TnnYuZDsfTp0G77zU37vD9tNs1+v0zjTFHbJmmdpqH0/b0btE2jw2gQbu++1nnJy3kVeiMCE5uZamFmJlazwMREWqoBxNgwSdu0qelzLst5AnMRycsiiQhVazFF4QAIMSazAgG9Yl7O1Wf3ltBrnhkbAFiWHETQU1EdhxMCMOG0DMzMJIDoCCGImZZc0B0BmRDAWYKXAsQIUK26GROt20EmBCStdY0wECIENNdpnpiWdWwLALUWWOt20c0LQRAJzD6fj4CoVVOXLvBgbeteLWYGl5ACuhjEcFWmvhgZruiEEG3d4Jszrg0tV3AEHoj8Ou++4t4XVH75UbQmYYDTdeTunyEOXkue1v7ZC/5BXJ9g6O5Oa0AmCK2BRJcfQ5fe+/UjSwgSJDWNhNR2bdc2m82m7bp+0202267r27ZdM4lialhCiJFFiITXy5Ou2g9CBqcgdTYEi10aRv7tf/vDsN+/f/dDIG6oAYRvfv7zH54Pf/6f/+thNGk3/e7my7dvjsen3/3w/mbTv3nzui4lg2xvt6fzvP90ut+Za3388F3TNDd9d9/HnKdXtxsQWYoB0fH5hFYxhG67qXrKS2raHhnRPTKCwTIcTAIBlPKoOafUi0i1ag4p7ap7mfN0eo5NmM8H4dZDBwTLdGy7m5LNrJ6HnJclBCa3hqW4L8PQtGGsCwkWG1Psq1Z1Vy2eMUpspT2ep7iNHuRwOOdhgroQ5jxNy3SIIuBC4G51GMe+37y6ewhx+/79++N8vt2+adsuBtl2Tdi2uuRhPNe81DwC2c397TLNIfGSnYLkqUC1sUwLOFUj8p//7O3T+8cyK7Xc4N1cyp//9V//9M3PU9IYN8jxfrudjlPXmZuGmFTXmTpdkrQM4KrT+SfyjRdfl18WovkLQbrO2l8Kxa8Y+9JBvsK4z+qey4VyVXv8OHjmgvCvQriXC+tFxwR4SduHi/bEf0Q8vBz50o51JZGuiqSXfK9V24GAFLC9hTrxckuxs4wpbJr2VV6toYQOUA0ohlKzAQInBQV0NV3rr3KZx5wxCKKRg9oajBQc5HSekrCBIfl+mB9H/7TgU/HJoAKSuyLMDkfA2Z0ZBzV0xIwfiwoSEdDialYBjSjX6ohWwNwZIQRZHFWNAAI6uDUrEaMahZAJ3Q2pgkeChrASqrmgu3p0IiIzG6sJADEqwjzlwFJLGQ6LQE+7jXCTSw6cGJXZc1GAKiEMw3njF2IbANZwDmbyqloKcXRhI0Cg9XEuqYEg16zyq48Q/4kWyK9E/PVfL3c3vAqO/jtB2eeF9Dnr+uUQ/13X3mVF/ndEJVxURXhdpxcH5bVZ4CpP+5GoCJDW+nMmYiRmYgTUWoRkHYIlDB5BUnQHzdlqrEXLUs3MLQO4oRMgXjMfVUHdEJlCIBEUosBWFkcFNDX9LKxa7cxE5sCMisYEGbwiApACVYMI4O6BmZBExIHAVYsNc115YHeqiMWxOmp1QGYkXgcFISQmRqv10lbWtonQAyOhuWZ3zbWwcIbCC61RvCnGTBCDhNRKCCF1YAlDq8jC0QnXh/i12R5dEXR1ItarNxsA0Fdli6kjmOklMGR9gLmDqaMB0tWpaOuwZO2Av5w/UwIHNDB3dVyb5w1B8/XkrovPwAx8LZ0gAyBCd3IkdHYDdAZEEF8T2QDC5Sf6ZzElwKU67qJ2Q8eafVlMs1VdO1jd174lRMILr77O46wCIJGjIxABkroBrNlwbqqonqsSYS1VAoUQiA1cDWmN3AaRVUYkzJSzkddFuWJi9oZx2zbhVdMA1Wmah3os1QqaM2HVNSvRDS6L8Hoft7VoC8hLLYwEbhUNkQwwVy3Vk/m6cYIXqqjtsNZZBELiTR+ipNkWZHRjkcbAJrd5OCBBPc4OIUVxFOfY3myQWYWcEFXB6kqCupNW9BUXOBYA9Qww95u25DKWaZozOqXYNFGOz0/slDNs7iQGrLl2u51rcGl0Lrt+F6KUnNtu26Z2//hBgABgXjKhLvMgSMRCgFqXeZzyecrztAyneZgIPbZRIB4+TdnqUotwqMXyrMxsVua5FlWvBM6EImgxBCBwsOKWqyJYK/E8z6UYF+UmGiLy2peM2aYtN8wYQ4fOgYMWiJFTvwvdTjjMSwkhzfOCxFq967Z5LAAem9YgckjgSgLgGEQYpaqz0DzbPE2lZBKJLKBZmjc/+9Nf/bf/8hchNkuYRaoEEkSyOizVnLSqKWz7Tam4LMuYZ6+zgQLGKHE/5YBcASfkJvbzcZ/YGhR3LIwkYA4Um+ms3x6+v7+7vblrK+Qv3rw9nY/VMKVXh+enyBGRc0E9L7FZzM4ppbiNcoDzfiaG6uMX33z1/P9+9zr91A3AMS92s7sfh1NoaCkVK6ckaCBA1alkU0UJtNndb+/944fvQ2hTaMbnAbJP+YlTSjHmcQazS6UiqC1z6jcidDgdkYUJcy3qam4I5K5qBoRMcqFNQcFxdReCIxLrRfTspVZQ1av49Pz86F9/88OHfxyWs1Eop7xrd6mJbzZ325sHaZvAm+aLN77k4f13MfJ5eAba3P30XxYoVRcEiJiZZl8OCJUDIZFDwP5L2HwNCj4+ldPvb+4fVLbz2CSG8ePfAZXu9b8yE7OZhAVdl4Plk5ql7WtqN2VZhJ0xQxnzMPhSEZr05t9A/9qp1pqtVJ1zSKETsvH98bv/5uPSdZt29zPcPkCK7lwQAMA44Go900rCbnpVaRJAAGQjwACGBhI8JIiteQErnmbUgnUhr3U6BnIbHtFcOKE1ChFTS+Q27pkIU2duYKimAITIbtUNdK5aiISFIyLrcg6SXH08Dnne5+U4PH0ydvc6D8tcXC09Pf7XeTm4gbqN+Xi3vdEC6oM0QVJDwpKk1uF8OBcYQupTd5vaHXDimChQMS06nM+n6r6oF0SPwXnxzmzxSUufYq2L+zyUvSPGmCwPrHkBbiK0kdXTuGhHMXqdjodX3c3IKXS76fSOW52Pn3K2tOtOBbv2XjjULD3dT7PFOsRWetly5jpPVYemYYKc60TbnVUXgZS0QPZ5PM/Tq1df+7J528v+8W/DhlK6jaEh39y3BMsCAkgGYYok59MBpRmHYfP6K+BNroVFfR4bbaaTd9Q9vPriPJ0cXg3TiGn7Tf8WC2aYz0OmSKAm0TDQkqEN3cenIwKfn79bxqd1KTAziFjJrrrYSERWSlFjxECyzn3WaAp0J3Sti0hYNTW2doQ75JxFuOYMzkiIaG5qZsLrNJKEk1lVLWvfkgFGCeCOTDFYDIIlMZPVHFNPLFqn3a5rY5zH8+9+/5t5nPp+06Skmin5zTcPLGRlQcTn/VKABYCsSgypa7xM5DWkhoPUebZcObUgoASyiW0UQttM2RXG0+NFOmy627bzogVg1DpZcXczBXdmHnK93W6Q+Xwec7YkQoqwzHQ2kdC4ukIIgUMXQ3SbiiOSmmU2WCmPzV2LFBwjAp+eJz/b3Rdv+tvbX93dH5+fp9P84Q8/vPnJz9vt7i78bPv2y//Pv//fxOPd7uvvfv83/Xa3a9Mvv0r70/DDp2czPJ+zG8RN9w/f/u7+/otnfUSHshSJQU0F01JLYMIMaA5W58X67a1BrrU6Wi25VldTQjJzswKGxmAAIhIltJu7KA2UOg9TrQbkgORVK1Q3QAfVGpuuVCDEaclN5LbpppyjMAc8DvvdZtOEeHoagmCeM0dA5kAwT6ObcooImpcJid1ALYMpshBz1brSHeuMCxAdbZ24SmhW9dOKC82NieCST4rEtLaNqKsqCJO5m7pIRCLTak6LQ1Hf9m3qErLkcUa+bIzULz/nkgyN16zoF/h5VfFctqR42eWuIztYXWCAjp976+2y97/Iz1ehBuBaurICpUvAAl7F7wZgdjkyXKmfdX7+ovhQ82vrr/MaWum+1gKtJBEAMjMShCDEEmJMqYkptX3fdn3fd5u+6zd9v9m0Xde1bWqalJoQQgiRQ2AWEiZapUiX4IOregQMrHplDqXiPNXjWI/7YRhKuLn/6tXN0/vDh+f9Px5P3358coO729d/9u/+dR2n3//6H55/+KHfbQ35+4/7283WiT4+HtsU7nap3zTn89j327vttu1oOB9IUWMqdQSgEBtE5xhDCqpqgypVMYhRXLUuk6SUmtZKnqeBYhSJyzzlZTY1iVy8WcyAgaiZz89BumP9uEWkEKfnTw1hcV7mfan5dD68un/lOiM05DicpjY10zDu7nt3N7Ml1yVXMkdxFp7rOM+nvk8CfhqebDFCdZvyPIABY9Cac164CgG684fHT0rH2/uv/uiP/lUimpeTawaw02mchmEeBkaahwkl1Lkus5ojyc32ZlPb4enjhzyOuWYrNtcalqxOwzDO5wna9otffPHdh6f3++ev//Qn+9PHdCNvH76YztPaVwRmfA0WXskXQ18jKuAzyQIvCOkCXP0FNhk4vviIrq16F3z9OSnrcqjLNYJXgP/fxSHh5+N/xveE5GDkdLlAYA33cLheOUg/Eon4xYL50rS+XmB4Ua6s1K45XQEcIHVbn4fYvU5pdz780Pa7ePMaeazTKfDKPiNJgCWvKR4AsfJc64EjgXleChLnaqWaGgaixNHMnk8Dc2ChwI4I51kHD+9mrURTzYSAQRbVxb0Cl4vFCJQoq0rgVQkoCLr6ZEyNAMAruDrMhgYu5GqOZiIUCII7graJiZwQyD27TQaOWB0IqWKNaDsmMyUnIgYzJKxuijLVinNpO0c0Guam3wJj0YJIRLg678bz2PTtPA1mVWJc78frDYlESIvXShwgMBKQGbvlMqftvcvlLnXJnl5vXNfbp1+lmKvGcuW88Soue9H7XJbIZ8L8yv/8aAVdc69/vK6u7rKL1OjKfl6IoR+ZHK8v6TM55QCEcJVgAgARwxrFCIQGAESG7gxAzOKoRSsRxYBM4hHRm1y0Zsh5WSZ0y9XXUCEk4tUutobswKU060Lgmxdfh9iXhnl0B3UNiO7KEBlBiJgCMZpXdZAAaiZhpRmdCaqVUj2vfmdEQ3RUFHFElATOjNGQ3Jk5iDBY8bKAMyHHEFIMIQZAq3kAnevazgUAhLENJIGZtSwSRGtUw6BmhixuxTk0GJlAiAkQiQwcwfTlYsR1Ob9EkL2crotKzC59nQAIZu7gBlbWGns3Q3BclUGwsjhGCKti6dI5bwqArk5e3SqYA66iXUdwB6KLH231lOMqR7o8TrUCIIAisiN+XnJul4zBy4tWAEMzrBnKYjVbUTM3tUvtvZM76NpS4eAOl5JtZAJfN8/rDkHN3C+nmnDlsd2Y3cwVmR2prmVQa40gABExSmNOVityrL6QK9WF6txR+WYX2p/diS/fsr8HX7yYI3i9RB862kqVE6652oBAzOZrjpG41TUYn4WdBImZg13R8YUqSi23EgEroLGQgDeQ3KAsplAlkGFWtz61gsHU2m3b3m4MoXJgIRZkJKslCIF70epAYOrmzO7oxVyYCZulwDTN6hQlSJNEKC8zABGhA/Q3d1lt13cxtOdxptR2fV/rbLmiQtP6MI592y3j8Xh+XqZTTCIsIURgGad5nmcwPewfCdCAMCYiPJ1LwK56WuaJULSYAUiIh2F+ejqNJbtQVcMqKdC2sUXPu5tms1rVdELXOc+xCy5UkQBxu7nNy5yEMGAMDOgSGzPyUmXTAyIhxdQyBzMDQEkBByKSEKMQwPPBrLp7bDsJaTkfJcXQtgpIMQCxAwFBqeV8PELf3t7dmC61TqHpUkp5MAKXEOppJA+CoUu9CuRxlsip2e3PR8pUiyk4s5xmRXMDbwOOxTVbZA1O6ktz07v5eVrAwcGahqNzv+2WaYltMxyW+XfftZuGmjZ73d7d1JyH89gY3j88LNNy09l4+sSpZ2YF6rftXG/GfPq//c//+m9+/Z1Ezud5OC6JKFGbi2+2t8M0IXIpdQWZsU0hpDzqycdXb3ZvXr95+nQAKZtd8gpzWaqpeSYCRswlcwhd00xzyXnqm9CEOM/VHWjNoru0AK73tVUjT2qWvSISKq59xkhqCGqmqqaGCOXa/XTzetc+9L/5h78dtUBVgNhsUn+zQdqcjvPu5rZpoO4/jvvnECJv75rYu9dlmdyzEIMh+qjnjzo9AZrEjRXC7s7TLZPR+NHKkQOpBOM27rZWDiCcbn5mnrROrnMQBptteQZAaHalubWSEynVUc8fvAzzMLa7n8bbLxdvzmUppaBq4Ngyz+9/eHz/NxLz3cNX+PBW+lcKpBxsjZteJpQAkB0M0UjUa3UtJAHXGAKt7rrqIk3rauNVRNPghqBOwERiVqlJZsVIECsyOigxgSSnBkMLOsNwtFIUES49AFbH2aoDRCEi6ZAjmFvOw+mgDvN51GWAQPMMs5bD49m9ub37ajyO5+OnSNbdddVcsHOKGWat6hRBETVTqUboQBTTZvuNgSBGRpyHZ4q4aAYJTYeGlAgHnUc9GTol/DT9wEF2MQCj1ZKXpybtADy1Gy2mFTjsMkVOIdYGPbgHt7BNXyx2PJwOea7U4sHO3Lbncn7Oedc3TFBLnabTq/sHV9dcoJdp0hSFyw16Hofn1LWfPn37+u4VsH48vG83bbrZlCkcHTa7Vz+9+dnw97XycJjKl90NLcVxcpnO09y2LdQ8530XHyJt4+3NVPBQsmxomKbGsIXQ3H55nod3T0/b7R0Fvr/92W9++x+breb8POPx/s0flfNQwDk12etcy1J8zM9Y55q6h2/uXx7bVioBrfPVXOs1GA/UjFnw0gKiZuoAzJGIXaGCV9O1BN3cWBqtBaASC9GKjoCRWGKu2ezSzx4l1ZrNDCUwwjwvm76JgXMp8zL2TYdIoGXbNTGG4Xz48OldXcqbr16Lw/5wqDnHtFlfdNumeZo397G56ffvnwI6x+AO5Dt0Ldmq1aZP7ete1cALoxNHAES3drcRiaEhq/P+6TBMGYmPU2YJi4FxQDRdMmjtQgKoVse5WEOEkd08pQDotc7mSguGEJM0Uzkztcyxa2+xnqyqs8XIqs6hlbSdM6jh3ds7RchT0XJccpYo3aZnSr/7zd+l1BG1m/vNv/6f/u2HT9/Ntnz9x39MxIePjwH54fZuUXDC/eOxi7ztNl59mkps+yUPSaJaRSStBTAzh5CCIy6TqVrb38zzudSjm8JakArgCMzkqg4q3CBK0/Rd3Oa8jMtQi3IAZw/CtZZpyRwCWEhNs91Gd9DiEqXUiqbLsmhVAhduN5v7aRqC0O7uzXD62G9vSp2G6SRCIYRasZbq5iJS1cxUhEvWmgugBgkr7lu3E2bmcKGNECrCWoFNVwh78WiZGq/V70xuRkylVL9atZjYzJk4Na05znMhNIfMgK6XZwEirmkFtgoaXjwyF/ECOlzyg/AKQBxedp2XkbtedplXuwTixSlhds1nMbyW0b6Msa/ijtUMBPwZAV3h0I/Qkl0EUACITGRmhLiWZ67/rZNICSIiIcXUNCm1bde1bbvZbLu+7zd933Zt1zVtG1OKIYYYZWWIiHkliYjAgZhflBxItKbdkWMIgajrd5tchqxpcYGQKITfvt+/f/dhfzi3Ke6a/l/86p//0S/++P/4q//y9O691+Xh9W427FP8+Tf/7G9//fdVM8fQtZvAvpzHQLTb7ObxXKvttn3XdLlo2zYxxdhuEWsQKbkCR3eKbVuROcWyTIxu5ufzCcEQPE+nVWW8zNNwOiBasUDdpo6zFrAyF66bmzgMRwwNYnj++FjUpmlU01ZinhchKcuEkKJEqIjgZDAeR+hDrkWEQ2ymeQBEMIoUGoTp6Xk6Pu22t7X4NM3uAEa1WK15LT0QsGWaeHO7e/N1192+f/9k83HTCYKeD0c3k8ibbY8gt/cPQKFUB7WcF3A9H49l9tv7V3dv7rWW9394P5+ncVzcSdrtsD8w0ek03d89aM1Pp/2b1685NGp1c3ubUnImR3ADWrOx1jrYa3Yw4cql/Ah0X2iYywVw0RkhwEWVsS7XC7d0NaetKqErgr9SNpdP4I8x4orf/Uof4fUoKyN0KSq6kMGXQ+NLVteVGcKXL/ixFuTCYa0/Aq+aFSRAN4qctjDfNJtX8/LY7X6OTUdWAQXQrRQngIiWF8dC6+2CseYzEApHxACo1SwboJOTV1NTK0WToEQBrdNZq/K+4NGYwFaf6JBhMFQAR+/JCUAQGTwS+tph5a6G4OBW0S/hZNUdiQCwqJN5FzgIBKYUmGpNwimhEOWs6M5Eh0UXIFUCBwYG93USbuZmIAQBwKpntC5wMRtnTUI5oxuJkNYMgSU01WCaModmztY2NJ+PmwdCwarVOGAgQDetjhD7TUVY4ynVnJseU3Q0h4vj9fL+X3OtL2zR5/N4OYE/WhyX5XYlIT+rkfxlgb2c5x+tqh/dbeG6Rl9WyufQouvCupKPn9cSXMzCP8pRuiRdwYXUATczV68EAEAAKCSICGYAvo6oE4cQMVRpWil1Xual1lqKkq+tWUQsgGyOgMQSAWcABFxbeNY3ydYYVqZLsbppRUcFEARCQeBVGAioAO4E1TRXU/Ol1FzNAImCeyEmibrqZpSILqF7rtUUHBkUi3QNCQRpokQmqdUBtJZq1cCMghBT9cIxMXFksdSokFrJikEpRTJHUQNwZsGQUBjx2h6KDoBmvi4Fh6vjD/zSMHppdXA3dXxxZKu7XfQv7g5oqu6GCBe66Er4rcpoX7OrwdDcNYO5mwGxXw+9JpkhsaMByhp4D66A5KqIDIiXDrWr0u0q6zJwhbVd1AAdoBSvannRqqYXUy4Jq19D29f+CiJBQAQGd2AAuwwiHdRUL6lGDggKAGqMZGamWHImZiKTIIwmEgCQkB2YKKoBCYMLx0Aq9ZiPx/P5PEzjNJeJxd++2pymXM5ldl1/kct+HsnAAMBtJYUCIhA4hWDmiBgkCDE6EqJIMNNA1+qz9Y+mFSBEjuYVkdyVEYACBwanEAFBEDywm2mMkrom19pt2jYw1mpTzaUQKqChgBAZeC0VkVhk3TohJq3L4fyUsI3ccSRERaq5lmUp4N5tttKGpqEQI7i7L7e7Hryczocu9K/u77IVxrhMZf+8N60SJeeMDsACptMwIgC22N9vzvtxPBfPEKKkIKrStciMuSq47w/nYShD1ufFHkdNfZB15nuavpC0hVDOztIgKGHKdUpdhxxrJvFUlasBsQN76lpDR+aqNcXIITmjpBg3u7C9lba1adKS3cAMiBE5xCam1OZl0uKpD6Ft59PIFBAIiRBdYgSHIDyOp02JWkGtKVOF49PN3aubh28el++avitlCqmZJrda0D0vMwo45Kl8XOal1nleihUzwHlWVQV0xtq23Vw0kd43bdv1z8NzF0KKAUxTTDG0Hz9+5HDvamPeP7zdlVK9Yp1ttmGz6YiaJNWqrZ30pZb7+9vvv38s40JBsmktrosB4IcPH2+2N33XULXY8jhMQSQw1gYYDCFwIK25lhIkgkIe84cPz1/87OGWaP+4dwd1IEEJwUpdLzy16mrFQqA0LMvEklLPZOM0ESAEJCGrvm47CKnWYkTuSMSKDmDooKubFHyNpF9J3Rfe9P7V7sPTd/txz9wQSdNIalPJ1N7vvvzJT/tkhw+/LnW4+fLPIG7RXfVdHd+xxLi5JWk8L1APZXms88hp49hQ2vjmFZDYdPDhAxMtVQMZkklSxandfgG4yfNKJWI5PRE6hi3GW2xbsyX4QsvH+fDt+eldu/tZf/8rjbuhUtGFJYFrorI8f/eP//CXd5vd7uYhvX4btptqkDlqLoLFpwmJhZysWK2XOzBFkg6wgk6QBwQlqJoHRCFQXSZwRFBCdBdHd5yxZvBM63DRKqfGDJEJfEY00LN7rLL18AAy+1KiSz2/F5xUTwaZmclTancIOEz7fC5aM2hew0UQ4eO7/eE4NemGgSW9CrI7le9iJHZrWKdS3G2iQasyR45RCLWauxNySJsQN3Vesg7LCSmAtJIzAom7sIS2aU7jMUnY3XXP++PzcP6ifTN5PezfpZhA5118y4RqYx+bY81mmudCJLP5aPHt3Te27FHo/eG/jVKVWSN+XPbWlk0SmNoubceyZNfYdQwAZHMel6VmOYfYn/LA1t+2W74v0zK1oSsQKmXElvxmHkpqWq1lOLyj/mHz9usIh2k6u+VahlyH2/YBMaJ5oKh18/ruXx7nKeN+OL+rvrTNFqRQknefHuPdQ27stmtmP511jP7w8MWr5fyxZCeGPI0V5oz+9Pjt/d1b2WyPZe53nc1UbVlKs14FLESGRA6IS14ImYkuAJrWx4wSCuIqmnOtdnn6VxVmCWJWUNe8B6eU5mVBIiPxi+aoIgIxI4E78zrZWU1ZSNttb2VN/vPbmxsO5GXZbHc15/Ph8O6H71LT7b75Zhwny3MKqWvbovD0+MSG03GoZTaf7PyUgN2QRaoCxRBjalosRcEV0ZkxhhasMMsawKFmyzQsGUr240KPI8Wunamdl1IWjKF1RMUSgp2zNhTGRdsAsPbgMmgphECB2sS1Zq3Z3QIHrBNB0QyBsWmSubWtZLVpPgf37esvkCSX2rX9NM5CIfBmyvtmg5tt393+9PDp8PT9D5/2C7TS32wS91ig61sRHk7TcRikiRgSn6f9cP5p103z4ERGNJWpJcxLjilKDEVNAZaSpWl0HKwW88ixh/lcSyXhVVu5isKYhQKGmFQ956XkDI625ugAFa+lIoCTSNNuY+y0KCCVeeEQYXU4hSCxkYqIdh73KQbm+Hh43G6UA0pKotHca52mJRNj0/VWDUHRqruBIQAB6aVpT42J8KpnWYkRM1ergQVgVUjj2lZ2dW+5u6urqSKTIAMjMJnWeZ5DjMKBmM2LKSCSOjVd2257BIPvfw+XnExzuBTbf5b6XPCyX+J6fd1QriPoi959fQatTM8qP0EkA18ZOUREvPQA48rNX450obJeJO9XX8UVBa1JUp+1F76KZh2Arj808NqHe3F4SgwiIa6R1W2TmrZt2q5f/+v7Td93fdO1TWpiiiEmCSISmJmYkZhw7TOCVa51BUsERG7KRGusjYGbUuw2eDobHu7evl3y+MO799M0tX37f/+f/913v/uHp/Ppr3799//bf/nLjuiu79Lu5qbv3j8eas7/8a/+vIInTqy0Pzw14gHpq2++Pp3PQEYAZnA4H5p2Z0Cl1vlwUK+bbru9vY3be8Bg1dgUdbq7f8ttC9QKS1kWIdBSh9OzLidpTpL46f171+qT6jwzaqle8zLJMcVumTOp5XlRNAfPRRHYwRwxGzQtFy1ToXmZb8nzNHfddhqnlegow1xGCykR+5yn/eHRqoKT5mUZJkRD92VZ3KupouRpHmN730o4jIf9/tM2yF0fhuG4LDXFNqYudW2IUU21VkLIdQYoMYBmvb/Z1q6blukwjYT89qc/vyvLuH/67X/7ddtvbzbNYPjp8RSbqYvtuHTSPhBSycpcUYhIAHydYiLBS4r1hZu5kJsvUPu6AlcFnduq+4CrCG5VGAFczWb+45ygK9y6gC7wtTTtgtSv6/wF418ZgRfgjoAG1zCjKzL8MWN1IR4+v1j3NWkALoI3vIB5WpuJVm8JAJsCpk3YfOl0G+L99vUvFurqMAVpkBanS+qNOCmgMFcHQ3Kkpult8Zz9eFyskrCrQwUflxpFgJECV1M3jZImtdNixXyNzAWSpVoxREZQb5gu6biAhFBU+ZqQYuDmhEgEboYKkBAZ0Q2YkNyYIAgGpjZwn9itsDiLgCoDEMKoMJqDEAM54KzVHZCpqCPiGs5fnICIHJa5pia4mVkB54ufdX0/EUg4RrE8eR4lsBkjAKhLAFB1rWDKMTgCO5Gb5hJit8bfXu9bAG7XpfFCE/rVL3iVoL0wfy8c4do8hdd14Xile34sOILPHNCFxsQXMdxlxfglB+661K7008X5uMqePpdNvryQl6OKBCRkEVwHB+t03ayqAgIjEbKDrxnBBgYISMaCIomNRdI0L8zmprXM6KxWHFDVL4vUFVwRVp+QwcqhrXttd3Bwcw6MTMiCDIQciUSIyUhLrTpnLdWLujnUimpSAIFEDRFRs7MYCxmUwCAUolC0FbZL1wZCaFKMbeKQAGhtwALz9SOZEREDuDMGBADUVZpDYAgKVt2wQKJVFwnkBMGRiPj6Tl8CzleuDWGtYAbQSmCAvF4LgARe3QDMGC7V9GvW1SUQDQlhDVdj8MrgdJXksCO4oSuYg65hCSvNTQ5OsJJUl+Sy1ba9rhcEQ0J3RSMwA3OAuj7q1ikQuoJl8AoOqO4OXqsXtaq+8l+XX5CICJkcANYkQ0TElZlfNyRrOKSqmqmt2ilYk73NEIKjObgQISKwsWB1YzTRGjg4mLtnq8rBDQwDti25yWarp+ndYf/4eBzG8zDOi+nTWAzXnC5EEEBYB1TmeIloQjC7SJBXlSaBq679fR6Eg3BqmthcOmGvDWgSAIAIBFmEzd3cgAQjkiuhE612fzKDze0mdJFEhIlywWJQDR04sKqRobob1HUnsfaFMBK7L9MULHbtbigzA9U6g9Nc9Tid39y88loJYdNtybHa0sdAZTqen9gwBC5aDPx8Ok/PRzALgQjZjBxM3VWrGdY5o1d3arvG1aTn07DEtj8/nfrIoYnH06HkWkzzWppO9XbbV3BzbBLvNsIMithubw+n4e39bphGVQNoVN0BtTqw1loQSmz6qc6xIQNiZGBp+o6YY9P0d3fcbRWwqMauz8vU9F0uFYCAQmrbqoVYUhsppPZm55ZdYhkHRlpvDUiUYq+mq63QqmGtrtr2m9TG6TxTYEkASwYy14JWnWSY5rrkpXgIPM+uiHPVRbODBUJFrKBguhQ9Oby6eQNUPc9NK40EzcbR/+R/+ONxOHaxGaZcxmk415vd9mZ7s18OWDK47fp2KouVEVqaT4c6FVjo/Om0efiiafjs3jBvuuDmXkmLokFect83AuAE222c52nJ5zbdhTYSEqDGhgPFOdv5wxCj3HS74/FsVoTYK5BDrRUQmYODLzlv29R5k4uO5m2MzFiqE6EpAl7KE6uZA5asTARA2aq5msFad1a1mioiVDPVl0EyTHk5Ph6BIoVYl9puQuJe5/b+1dvE/un733CA2y9+qWGDbuX4sZx+SH3HsTNIqID5vBzfI2QJIk3jzo4dUEtWsAzGBKFtN68cgqGXcqpLDTGYzgRVQlOXhQCQm2whhAhQSIcwPx++/TurUwi33H45QZNzDV1g1ZTHT7/9+8PpdzHBT//o5839a9WkHLMBOKAr1AlYLrIhYYAA3jkwALqDIhIs4Ao8ryNylARmKxHgrlAn0jMBaB7cFLRqqQoAGMAdJSA3xMkWNZ1AhdudjnvELJyxnOx0xDzM49FB81IRqFb3cW+qSEIMbsDM59Oy34/jMOVcumY3DFViExs4PH+/jMeUiIjHaWBmdBMK0jTmUFXnYWIkCRGqOk7zdFZQDok5RupMMbTb1G+LYq1qC2nmsepxWIa5TmNZQDNCCG9ub77Z7//adHZfFMrTfh+a11kfA1ot6kxd1y/l4PUMwgULxKbUMukyzc994oB9S682aflu/K5a6Roni07OqRNftM7Vud8+JO3bKA1ls0wh7Oczc+h3v9x1X2Gfl/lQdRr3H2E+iQTEHCKZzWnXB72tlc2wlqXffHX38POPp8fH4V27JUVt2u22u/n46TcfH78VkI+fTtvd61EKWC7TYF5321vRru3paWm1AppG5dftBkzPy/OQh6/7Pw4sEq9z5cumW80QGWKIZqauaioUAwkCGGKxioCByR3WdBUijhLNFN3JiZnzUhDQGVZXENCq5lA1ZWFksmpIWGo2dZaQGnI3Z0aHWguHGJrGvbabzTAMWuuyLK8e3haAWmZipBBr8VxmB9hsN1H1+OFDRFDX+TR2fQfoglU2XZ6XZVDhAO4EVmslEEQBd/csHIAI3YCRKfyw338849mb5aBD5SAbNYSpKhgpbRIFLByi1kGc8jK6ghAUoxiCoU3jyJHb1LNaYopC6iZRmH0dJ52H2RCDRM91fHpOfc8odc7ufjqfNGNdTlpK0wkwUPCf/6s/evftx9Pz/vjumCL89Mu3h+NTbBm3XdxIJXrcnx7ud0+P+0+Pj6lJc64QYt/vlnlW1VqzVBRIrOZu1XIUNlcAaJo+zy3U7ODCoWqllQUxIxQtxWyNrkYSJharqoaBZZ0+tbGv5nleavFAITZNUQsU+s0uL3POs5Awxs32zuvCHF8/vCbT8XxapgmkTaltm8iIudZaCzgGkiAswqaFVQHs4shgcnAiJEDiywYaEZjDRdLuDgC8TszWtBEzWJnIAABka4/uWtfFDA5aS8kLkfkKhdqAQnmcmzb+COReI6mviasXp4uD+0ohrQIgvEov/MITXQT7a8cuwDVN2t2J2S745yUt4yoregHUuFbav2THvIzJrz/ysvsmAGBc60yAEFl4zY9nYYkhxBRjTE3Tdl1Kqeu6vt90Xde2F1VR07YxxtikECIz/1hGtFoiCK9F16soAsjBrjDvSh4BEjGLp76/+/J16mJ+fvrrv/qr1uz+1f1u9/Bf//q/fnz/cVkWBbyNsU+BEaepTMNxf35mgE3XI3Ofkmq1ZRFqRPDT+x+aGJuQmiZtN7cK2m92lDbCEtstxUjVjscPn/a/cStYhjyemgbqw0+2u7fDYilupmk/nJ4xtqUqq7rOXnLD8Tzul/Nei0Z2rapqdjgtLizdchpzydiEyGJ5Mbdm0yOQG07jiPT/5+rPeiXLsjQxbE177zOY2b3X3cMjIiMzq7KqsqrYTaKaalKAIAgS9aRfoTc+6t8JgkCIehFEQRPY3eim2DVkVkZkRPh0JzM7wx7WWno4Ztc9GR5wXLs2utna2/b61jdAXUCQ12XZccrnEwlE1KdPP+X53PepetkdxppnbRWE5/VcS1F1AjW3eVmFwFVzK0I8DodBUl3XXmA6vr9/RonD2N/GfiQMhFSWDGBlzWWdc14JtLVS10pMCMRCr2/H1nxZ1sgyvHoVf/sX3//wR3JhpG4caqsCOH9c89frzdc3wkISrh3z1XILPyM3G57yP6H8vPB03C+m7H6lueGViARXVt2Xd4ErzPTyYBeY4EuWx5/QP/xzs//yAwICmdn1y+nKYwK8AqlXyMBejJL/lFACL0rJLU0QtqE/EoEzpJvu9hvszbudN27GKSTT2TeWgqEaGDIIeTE1c6E49Mfp9Hxa1EjVEAxMibFUTQJEyITaGoHn6sesCsS07V0yV23bkzsIgLhvjMXt+zEKN3cFRKBqCkyAQLCl1WLHl7Z2FOrEm1Y1n4uFADFIYDQzNs7NGvhwOV6pkiCgAkSn0pTQEiMCMgABCJirG3gB0+TNWqkzS0JhA1MrCAHQkCNJaJdW2Z2wanUiJAattWbpd05OgGjmW6cUk6Ftb/7Gw9w+tj+xXbuUz6UaXwSLlypx/+Liy0f/AvDAC/XtCptfa+5LetrLE1xvgVsI1pU7tFXJZUVc5WaAXzwgAFwCCpxFLnXk5uAsjBsdFxzQTR3AGdnIDExIEEkbgDsabccbDeZuoUpeV6qkTQHZHZoBkiCyAyJSMb3YIZsrAJkSeghkjrRRCi7HdXTy5mCGtfpadV2qGtqFnoZK3FBNmyMCIyuKWQwioCGFvuMkKCBRyL0RCQlTCE6szWpT18q02R67N6PISIE4hG7oQhdixxJZAgmLRGIG4o0pZIiG25kQkZGErxgQACJYQwM3BQC/QiiIdiH9kWxey4jg16yIq3TxkuN5US+CXbcUhIsywgAczEAbtHK1wuKr/ZQD8kWtiIqAtllt0zbPaUAC6AAKoI4M7gC8IXfkitbACph7MzDwLb7Udfs4kBmRgRgAHDblu8EVDQUDU93ioNXcDVTVHUwbuJubuTmgat3SVt0NGcm45cLNiKwiZqpCTMwsyZmM+lKyuSqKSye7Xdjv6Hh6OtXH47qanta2xZUQkrbt3YbmbTv/bKtlO6IQUXXHzRaJEMxjkMDCWy7e/8SrKHbCLFvuJG8aKHQkQDACNjPArSRCSpHZCR29YoNSsmtztSAEKk5ohs0aixAjEyJtgryaWybmIe3XsgpDEF5NEbiUtht25iBBQuwRwzQtY0ddurFWtVQJEdDOp2NIcTpP6MAE6zobKNQ8xDREPJ9nahao0zYRYt8f/uZXf/bp049D6dQghX45rg9P87KU5mAsi7fTDAVkWdtS9HA7vrrdk9UuhWk535+PUfqnAklSB4guqZM1KwZOSRA9SCJiQmpNo4whMjnhRQBF0o0OCZHcgDiIqPsM5t3Qha4bb26n6WzmaOBqXdfP09qltJxPqUvEvJmrIXhTX6tNxxxj5Jrnp3s3dkNT61IKXZHcqFHq+uVMRbEWZYmu2piyzcfTbI61FgXoIjvAWs4MqAV1z//48PC3v/kK8/PT+TimIG7zcbUB7nbjw9PTq7e/4taQp2maVerwan8+PjIlqK1UJXbwYc3aIRKIZZ2eP0G8SV33wz//sR/lN3/5y8f7uRa3XNBiVrjZD0+n6XV/+O5t79jW3NxdAbwpBxgOO64IVNc196nb3962hycDMYeqbgbNVQhBPevKkhiTNV9zNoAudkh1XVbXl7mVq5upq3tpDaHpht76JUGx1qbaTK2ZuXu9RqAtedGqyKSogUNIA2F6/fbmcLM7/vzP56eHX/+LvzMKXo5QJl+eumHH/R4wAKDV7MsDtLPRZhaZMX7l41u3AOWM2igMSBHjoa6rkE3T0o2/JJc6f+IQzZhlcCflGPtOMMPyNL3/w/x8787p5pd088sahqbQDQMsy+nn7z99+v5wePv1X/8X1Wq62VdTRQVthIKE5B67nemCjqoEhu4AoUNggAa2YJtJJ28raAEDaCt4Ayuo1esC67ProiUjC+ome2dUdFOOBoxgxYhKNYp7IIEQa0gUBfJsx0+en32ZSl5UtZZiDmUtMY4A5ubaWnOdp9Lcno/rfK5uVoo/np6A0s4Z7JSnc2RJwrWciTCFjjavD2tEpG0F8waOMYRArRUSFAz9OPS7nkPflB2HUggo7G5eLXkabmIwezh+csA0DN5mbbOglvwDRXazuTzf9m+EyOB8O0SvZI4Y+2YIliniebp3QQIDIyIe0t1tx6EkBlbvGKKq6bqS6qzt1av9u4d5Xef97U1kToHX87Pa7A0j73udUMI8rWQPN13y6bHhNNxRRHN3VJym1cBSTynw82ktHl+/+cvjcs7r76w8dwlMkVgl8Lo8iaSbw+vn472kG8Nwmh4kFcPGGuYSurvDw7ufLVCuC2jZj98BeEjjcnx+lW5pXTZlVSvtesaGzQZ3c65AFEYAqNu0YevFYTtLm290A+ZQWkVCYimtkoKjxpS0mSMICyBVtSTRGNEZieZ5EokOSuBIW1k4cbDWXHUYUp8S6mrN1lzrUpFk6G+AseVlTPtFrdQaIwVJp/PUal3mBVBPDx+L1pjS08fnfhjyOknfozkDGXNrS+pSJ4ERXae6pY3EAczLOruk57V9avzjeTGJ62rnVcce6UJKBnPXCsIyLfqmDxUQkA0bMwHh2koMkZnRGZFj4nl5yIrj7lVTQxlDl6CpmUofXEs+LeV0fv/zx5u7u7S/vX1zc/fmsK4A9vr8fH7+8JEQqvrz+hxj/Oabr358/zirxrG/ibel1GUp3djF0AJOzj50ab9P426o5zW33IoiggQGhFJKFFmXpUsc+k6IxEFrbg27ftfqXGtR021qq1pDTNWUAVmCWwPE1hpv07Pt8K+AIUbZezsD+v7QA4JqFZC8VIKIIEKgJWctkjo0MnMJ0dF3h/40Hx+PT2MXAks3jFLatNZpmfajhNjN8xMxSpcS9bk2cNdWwMh10/87ALCItoYAYOYAqoZIRFs3AoTk6NthdBumbawdNOu6QW1zB1AElNAhoqvVeSEkBpxyfmkYaDOmAFC79qZXkctmPnBJi/vc5eLlcHvxt94eBz/bwryQkgA3l7hrv33BfT43yHgV4cBmq3ChWWz2vvCnU9qNSCQixCwhbKFmqe+6Yei6bhiGYRz7YRh3u6EfUkp938cYQwwhRmEhYREmYiJGRGImvAxELzK8i1fxhdR1df+4wAgOiGCC1NxjiM9z+fju3W4Mv/jt35w+fvwf/8f/4enh+WYcHrIVrWgtUOSAhGE6r69vDvshlOo5l3VdkEiQwIEQhy6N412XYgjQtPX7HbPteim1zM8/uYM4DmN39+rbfjjU3Ei4rMfp6eHp4VMueXbI8wTQmIMrPN1/MseyVvPiWuuyqHoTzrUCKoeQj899t7SmpRmWpta0FGaCgChizQC6IEMpq6rNp5yiTtP9zZt9zkvLc6uTBtNzI68i1Nal2x3GIO8ePlirQG6tbTms67KwBIhoZfn04+/N2rNbN46xG4fxVRhGtVbK1OZGhFrWjfUzpNi0oDH3wayq2ml6HLWEMBzGrmpbpyUM+7/6m7/+p3/642kqAXE37ndxT47i/aeH43e/PAQJBI5IpnZFKsnBLtKszV0S0e0KYW7NHDraZ64QXp1n4LLaNt8ZBL/G831GWF9a9uszXBr3rfe/EvO+aOtflt7lsTeeAZJfnujymvClx7+YzfoVPXAA3HzBXhgB7gjeALdwNbveyZyoAmG6i1Fh2NVzYUoADEplWbowEkRTDqF3LRLScnwmZwAqtZXWxt3weFwBWoqk4EwCAObeWguAqoCE5wZTc4NN4wcNobhv7TeCNwdiCkSuLRIyuIBvOd1CpOiAsImRmEAQ1ta2UEV1CJGZqKkDs4IPhE7YsdRm2blY7YK3ZGtr6KLuBBiQCUwRKoAZJAJhAHAlMZbVoBMxIHdEj4SRISOzg6uDKgShPJ3BgYhrqSEkUIVWrK7d4VbNUQg3EgoRIuAmaHmhBG07xWeHqQved9nyPltiveA1eP2o/eUSfHGbF9XjC7h0la99Bic3nPBKDXohsX1Rbv7CdLkgntdt/Xo1AMDGgCYiZqbNgY55i80BADBVYEZQwkAojMho2+veiDjk4KoAKrSJkRIxWQtA0BwaKi6zGbSmvrEInYqZO+RqZltCgl+WLIBAS5GIQWRzCVR1yOareXZ05HaVODUDYiBiRWBmJCSJsQ9JJHWcEga0SOxgLEwxUuqcWM3MAQzJycwMTFKIKaVhSKmL4z7GlCTG1ElILIFYkFBYkJyYKQiJIAsSb3q6bQkDMhC5b5S1SgibExGAojuYXj4JzRuS6WAITtuyNUUg1crgZpUQnAhAt7y/jaO0fTeibhZGW1K9gTUnAZDN5Agujmxlg11JHHHDvQ0RwRQ2a20zhIbgDpczBnqBlkENHEB946FfbF6RSMRpg4pQAM0U0GlT0+l10OOXXQfUTc3V3fSiZLRrch8iCRs6sGyOak5i4GQA1hitYROkEBqGQhLJTKvWbC23TuLrm307nz98eKzm59xmdSBUdSRHJFUjugTRqxu4MwHRhkCDaYnMiE5EIYULjdrBDOhqZ3uBikKKxAS+qWNJzYgQUAkpkDg6sYAhCQKBI7Wm1kpgDkwhdeYGrqXWaXlOIaZwgyQsQbV4M5GgUDGEAg1NQ9ep6fN8ZpZyXj2bJDSrqd8BYCtTrsfXb75dpueqSERdFwixqqqVlFihkVZt2Z0Cdwow57IuKyoz8qz1zeuvn386/rt/+z9QWt/+8hetWD7WZV1a9Vy5sPxwf34629PUUorLogqxzn6eH+4O/V+//jZEbsjH3DDnPVEfU/ECGobx4OZVHatFSUJJLe/GURXEkKIgC0lADsjMgRBZUyqn2ZqCI8UIxGaQUgfOxKJqY4qPHz+JbKXrVd2BgYXJrK21LmGxuZT4+vVyej70aWkldikvuayutYKaKiwLLquf5lYMx13ggB+fT6WgOtRaQyBChEDg5Gocg6Mxw7qU73+8//VtN1DC1vaHWBvbBM91QZQfv//pdtc5WvVGlTnXFJI2Y449hefnZ6UpjpS5uOdcTrJa+9jGrhv3+1Kmrg9zzrvYvfpqN09zzX70SUge7s/nJ4y9DLv0+vXNaT4O+77MdW1r7GLsu5ZhPi/IHqKcTjmkKIHbxUoeiQiBatPqMxMl9pbzVFFbQ0AzrK2VWs1BkWpVBzQHVTUEIWqtbv4obZslqipuvdtlMZi5BHGGbugSpTzXGtqru30pj4/nj9/82V97PLTliWFibJkAKDl1rRZC0/zc8iOxpd1tW88UR9y/ckLzM3p2jhRjK1pLMXCXQ7r5leasnjEm5AAG2qxqkBAJFaYfzz/9x/X5GMfX6fDLlu4eV5Ig4vnT3//74/t/uL3d/+a/+C/raeIUWvWaq4PHcafNUdhrUVUFIUZt5oAiDJqhzOgFdfG6opubIgMgN28ASmxMpLUiEaaeYMQewdmtEjiYwprRK0hQVSLx7saAXTpGNjeG6FqYuUKiuCcMmgsxessAFKRzFHfNJU/HokjzWs7LudQGFGIMrpXA2Pzru8NxeYg3SZshqgTGCxeairfaqiAAByIJ3T6kA8KautgN3dbMu4G2pq00d+UIaVyPH9WNhuBM/dC7qEYtT5MoLS23qTGHjl/djndk4fT8eNgPYjzN63jYNa/T88eGDXeh2IzN+4pD30+Pk7Cz9OtUWfPNq1+faF3qY9XiXLjRPOf9/jY13R2+cYUkbYaaLSthxJtD153KI0HT5ePS2KVmW+7uvvMCp8eZQ0COIbDTUBkkpVZ8Od0r5rNOUViE1aPx8DwVBkfucoa8Vujivnv9ePzdIHXcd+zweH/ce1pr7YZwfHwKjiUgthy5RD/veLdMP413v2n5MqPZjmUkvLFB1QzQCYgckRABwIApIEKzaqqIaOBNq3oTwEBJiZnZwEBYtboBMZfWYANWWnMrBAEQ1RTcYuwRvLWKtGnfmIXHvrdWN9QXmLv9Pkqcl3NeJjRdJl1nXecWhLQ1LYURLUhM+36XirZx3E2PD+fTmYhdvesSO5pRv+uIzEFjEADDogjsrZm7hOBMz+fl3/z+09QAuSVi5n7KWxzoHsWWfD6VKqGP0FG5P5fjN30UtBAFwREYAJmFGVueV0dm6UMvJDdvvjnWel5Nzfs4AsVuCHdfp3VdulN5Pj8/fHi32Lw/HFIYht2rbvdm9/rrdcklK8N6fP8TR/vt3/7lf/inP/7w7qdv3766e70r+bHNGYqxy5JVAWszchKkXMuyLCFFUyOgVpuIEXm1IrBzACJkDqomIaEzbLM9N6bIzBfSOJEDbOzpbZzHzG6gtSDQuh5LPiGhGbTaxZTcYeg6FFjzqoYpILNQCuDYWnVzza167YYudulNel3nUymlVfTmfRoArJVVBYZ+V/Ks7ggUOLkpkHIUQN8sjRzBwYIwETlALVUCM7G5mRq4GyhsoR+42aW5xADm5q66aXpaiKF5dUBhcTQ31mbdbrgK8y88IcRtaAabBMDgApwg4ibh2RjcsEErF87GtYm9GG4jApg5IRDSppEDJEej7U5/kvHsdHUXvjCRru3StZ3ZQtiQEC8jxo1DJMIiMXUxdV3fdX0/jGM/jkN/+ZP6fuj7GFOIIYSAREzMQS6efrg5+21MKbpgQkTudmm4LuG+m6X9Zi18kSEZOCATeowJqkXEIEzj+Hz/9PMPP6YgX799K5KOc21LjSjPUxHhtZz6lGqBo1V0qjWnEMTh5jDshxFJ337zTVE8nh4PNoS+YwqtzPfvfjRtiGiACLSWMz59Mq0kgbq9Gu7GXQAcbzuy9vzhp9Pzp8fHB0TsYlxzAbMYwpLP5k2RWm6l1aYaIhi2uq4A0BzJGeEiQZyXFTbRdVMSXtd1rSo3fWm1KKxZ1zWrrimilrWWxjM0ITOr8/PH54e8LtC05CLMqm3JWUu9uU1d4OP9+5xLN4xpvB36Oyd+PJ3K8RhFxn6QIODWHW6t5Xk6T+dnBIwsIYiqI+iQBqs1l8ncUtfd3IxWCwD85V/+2n/8+ZRbVtx9dfPVYYeJUGxDN93cvG2GVhta87mv3upvaxG+ZJhuDDe3ywD/M5JzZb1tt7l25l+0/XBlh1x8ha4hZi/PtD3gtk5fGCQXs+OrIOiFnORfLIINtL1I3r4gRm3EhStn6fqcL3wWulKktmLG0HF36201dQIjdNemWgHYjbwBArlCSNKy15ql6wzg3fsnpxgSmzcFFwStTszqaubqAGgDUXWoTkZkqmBWmikiEUZEBGczZq4ACJaEOgLGLZEImlsgBOLqim6OEAk7wZFJCBkBmo5BgqAaufm01OAgZCHQvicvoBVJuJeG7mTe9CKD2XzammIzQ4I+kpCpWeQ4rdMrjgpYch1ir7W4Y+DowObMzHU9g1ZQ3XyPAEFL9jpj7FwYNsMZddfKMRnaphn6E4Lada+7fEbuVzMpvKLmfiFaXqhgV0DyM/xzpYhd4PKXsr1s2Ns+jS9MMngpn035uJkm+9Xq6KLkRbzkWQJ+ZqV95icBbEjR5Qvg8lDX14uwhZohiYOpKzhtcbDNKjjRNnIwQgetVQIzi7sbUTTvFBt4OE0IR2vWqua11ObNoTZt5g6k5sBYmvrqI4qSB5HdKGNkMmtNczVAcanErdSt565EnAJLEAJAZpYAACGFLkhE74XQqpE3ByIJIXVxJErqVGt1cyFmAg4iQWLXp35I/a4bBo69SIwsEjuWwCEiIm36cDBkIgnA7CRbDAISAhLwxrhhcEcmoIs41BXRwbyhg1u9fIBkcLENuoxnNmoaoiEYEOAlct4RbMuwcHAgBkMnJ9vMhAzc4MJduu5giEAIm9bKFLRtPtdb8wcgbgbWXPGSGLZVhRpYgVZNFRxczWELdiOHLW9UAchBNzcCcNgOxpsu3cw3U9wLvOSGm206A24maNdtyZCcZIugRXNX3eS0TSsROqijV815qSzsnNycwYPXXOdyPpXzU16XVetqbbG2tsbkYK4GhhBErsxQJLxo9Zs5mAEAojdXAQT3QCSXz45MvdT6J1BRTBcca1PcCwnzxuu6fGLEAoAOBgzmKkwSBnaqdVlbra6ATTB0/beBEwcxXavWjUpdVNHJUVB8kE6bTevSisd9anWpJZOkpvkuSN8PqHXku87lbPcSDl6h5KabrDHnus55nQlQC/QhDfFmzs+aFzbJSzHOgySvJ62n28PXS5unB82tnpdKElD03cN08vrh4+pEHENZm2Mg9GWuLcD5Pj9Mf/hPfvn69d3N0/KeJEnqgF1VzYUgZiuBYkhRm2mz2MUQY+wCE8YYu2GQKMPdXUwjWTNvhGbWYtfJslqpTAwAHMK436cYNLcQE6LH2LkWJtCSiZkjazFhRIdEWnPxdreeZ2RGodTJk1XiRMiIWRiP05qtjbej5tXJ81J2w/A4n9KQbLH9rs+1Zs9uDCiuLIKlNAB8eFrgnN/uQm2ZmBLRpw8fdof93W3/219+/eH+j+PdjTZeS7On89DFEESzmmPkqGvu9n1eHuNhPHy1v78/j2GHiKXlcZ+6wNoW5Ti34lCDhKbeRelCmo9nInzKc80qkVqbUxpC5FqquzGH7f1smVJHTZsDIqM6mhqqAzO4gReWhGpaIStaM3cvpW2hEk11A4jUwR1as+ZWELxpa+oAzUzNtWlpzb4YojVzrapAhxAFBZ3GXbo59H/8x3849OOwf1U1e5lDjzVnDoKxA4leGzEjldxmFHRvgBGGX1bvxZW1tDxjGh1QDcBIEERi1dXykVMCScw71dUIYgrcTn7//cfv/21Iu93rX9vwZvFU13q7g+Xx9z/8x39zk3Z/8dv/LOzvNM9anxkLcaLUmSN4JJvakgnJ1USAvLoX9OKnM2gGVTdHZuSEEoGSaUZreIljMG0Nws5IkKMBQqtgqrWgN9SGkMGLmkEaQJKEwcsj5Sevi57vjRGaKgagWImzdnzzG8pnzg8h9Dmv8zKrFlXlGJZcSlkEkRhPy2QcwHCU7vbwdl4e2AtzyDqDQ5Dors0bGFTLahC6vYRoRBKjagZbGCVn8yC5rhVbSCOFLu73Mu5WI2RRa1POTa20xZmatWp+XhfiYJpGHHP1GIJnfSO91L60KTLW5mWdCYVjyI1AdinBPJtWVRgAcKEddIVyVv3Y+WpG95q+ff3L9pR9OcfOQhdJ4/r00HhptcAIHPjjw08xpWLT6XTqAhEPi+Yi8cO8z6fzXff25vZ1xp8I8v2np5ubkEtupVZbm823rw+lwLSezN/E4Y3CM6B0/Y3DftRvbm7+9vj0h0P8qtvDfLy3nEf+6uv+V+vHc71/uJW/+Itf/+aHn87jbSz5/nx8fzceSHpoa4w3G3t26w0cbdPzEyBzcG0bQZSJEbHkouaAKBKYRFtBvHjnV8sNGkmfQmQSYzNzFlTVZrbmZ6EYmImZMaJjqUW1bGcCVzd3kTCw5OXsTfvYA4NIMDNkcPMgFDlMrSC0FGMKsugydj2i4ZjWnMECqD2dFuQQUy+BAlM/8HpeDYnkEARVbWMHE3cECEzn6ckdcivnaWolA6ZmqEqIrcwzUQCYRJCDc6QorMta0XpJS87CWood+o4ACB3QtVWhOHQjIsY0CHXr4wdr9au7u353+3haY4zHeV3NUupefff2Tfpzy+vp6fn06bl1NT+fqAv9fhd7Dyl5od/8zW9//sPvptPpF2/fPt/fawMilw7zcb296ZuX9ax1gseH55ub/XktMYWutbZZ1yIAkrmGBFaWlp+R2A2N3KwxUBoOTlDyigilekxCFE3drKk1Jg4S1Nr1sAWSGBxMTd3JCQxqWWqeWcK6zG4cQuyGLqY0T89996oaICNS6sJY5+dS5tbaJsbLpodx12qb1nPfhcUW9Qxhn2isWt0B1IAphuCmTS2mbmMpArq51dKIkFls61WBiAERzQB5I+qQuW3emu4GoE2NgADBiWLXlVKaawwpphS6oZQ6dunLVmGzjng5sF66360hIbweQ+GFAeTXMfW1Db4MzokuUgWEq9jCLwwtunSvvrU89NLqvPTbflWwOYgwIggzBw4hEFEIQUJIKaWU+mHsx2EYx2EYhmHohyF1Xb9l3scUU2RmRNogNiTasCG+HAuB6HLi882sFWAz033Rhrg7X9yLXlo0vArzAADMod/vv/nlt1qWH//pnyPF/Zub4+P9/dN7Fv36zeF4WtUhMez7FIRbrdiARbphJPSxH4IQMYS+X1sFh7vbuy4EIDwd76NgYEGhrh92h13LFcxM0RyQeDofYwrLw6ks2cBFuO+7YT/Wqnmdsi7TfD6dTyy9gBeFogoG2kCdVMFMTQBUzQGDFLfIwc2rrhsm51ZxXkptEgc0r2UVpuVpOT4eoS7jiCVXRhDkda0BZZlgzauj11JbLi1y1fp8XAikPzCsi+YqQcxL1fMf3x0FZdiPaRy5Vc84TY0Ic2wACDSM+72aWZlPy7IuZzBoZWnrOs+5Net3h93N2I0JGyx5fvN6f2j47mG6uxv/4re/MahmKBKQaTOngIs5y6XMNlEhIl2BnC8bZv9M6bjiRNucHC9l//mqC8SEL9ZdW6m/2BK/SCfhhZX2Bc6DL0DAlSeCV4BpA4muHeP2JC/8lOtA/jMd5YVscrnBtbO/vFx0QCCDZmoS+m8w11xz4LBoFQDhqLiAAaiCKwGSI1rzVvquq9qej2eEA6oJc4jARoBetXXMSABAVc0Jl9JWAweoVSMjItoGHKCje6Rt3VkkDKBJyMAIUJirmoMr+hZw0wAYEUlvh2CtEoB0Ua2poZkRxdJ8bX7oBa115Cp8qroqRhYxzW6CiCjq2vTSKzqCs2MAc5MtEwZZzRGsGQLIRicDdFMbxujmRKTa6nqEfrRWEHpsVuc1Dq+QGUHdlIGaaiCxK15Hn/EWvH5al98QvuyWG5S3wYP+md7j1yL9TMvEK6XoIii6cokQvlALw8u2+SXiudHh/E/KdwPBL5s3voBGbq5f3BXAL9QeRHQzYVHTbTjxuW7B3bx5ARAWAQZEBiQARQBXc1NiQWRVB6DaqqpLCKkbu343pb4ZzqtlxWWtzbxtsCOAqTte7htjGMbY9aHv425I7Npqi9UkhK61sq4512cCI2QOsUshbDs4bm4wEkJgCgimxohWlWKQEEUSMaupqkKzLT9UQuiHoR+HOO7isA/ShU1xhhRFUAJxIBJiQQDePCcvZCJGpKtLNAAKbOKsDSBUJzQEc1CE7Q1ncMVN0QWGBnQROvsVdCZEYyY3J2YE3+LkLpAfwoXOe2G7OroBqJuCGVxoNRfzcrxYYyK4grcXti4AAiiSbBlqYIpX9SRqA6vQGmi7bHFmmxURIME2brfgAEACyBu+gw7k22sAMNygMQYiImB0ByffLJO2XDYHNAQjMQA0ZzS1pqrgBq6OIBzQq7kBWMmllhOaorVlWR4/fHj38PjDu4cPj9PjeT6uuZiTbIEel8gSvS582swWyBGwqUYic2ciM99M6N1RHcy9NkVSui6Eq7t1EGI2MwCj7a0l2uAu2vZ+RkAmCZsZlBAXK9UbkpB5H0cKCAQErE3XuqI3EbHaADyG5OC1tT5IJLxfjt5siKNqq14psAOm2I273ZZZEkU+Hd/FPhFgUY2UHAwISs7rPG20KCYhsqJP7qsIGWIaUhccKf/00++i3+5f3fhzQ6nrNOW1nU71D+9Px+JzaxCx1kYKXeDd3Xh7s/unP74jwHmu5wX+/o8fv5nz7UFSJAUVil3oAEAJw9BRR+qWYuAQUpdqq50lClFSdPA49t1hR6l3x7rMtVQ3rc2JKA0dqHb9uJ6PqUsGjoRNa6llf7Or1QBAYnRijn0uC1gLbGZ1WddPn97vD4Mdn4f9GHsZ98P5uQUGsBLZy3IexrsKNQU1qzVnlF6IWrXb3eBND+Oru6///D/9l/+b/+f/7b/5+O4f+0GWeQEE7mIxW1u72Y8EiqC//uV3D0+nMtd//uGP415qrmPfmVmUEZrWqiHFnOfQd62V54en27evtKyABt7KmificT9Mx/v9GG7H/ZoBPXIAIAL30zSNg6ZRmGxIfZ5ywq6uPrfzeNd99fp2Xp6YJdc19CkEWLIK87wsYA4OtTYw4wi5VYlcqwMgErJRc625lqpFm7urNlXbjOxLVTMzgMvEB8HcSlVzU91g+886eUTsul76lCiQeWAc+8DQ5vPy61/8q7wWZEPwUkqrtRvuQFLLs4hZm/LpIyLHMLS1oNx6eMPmVk6ILn1v3KkyxRGl0/pseQKX0EUFJOLWZmuFhQOu5fF306fvyYd+/03TYZksdPUQ7emf/83zp49v3/ziF7/9l6bc2oyeU58wxNYaeEZnW1XLihIJEdsC+RP6SfPCCOiAccA4OkRkvhjnATgPLgS4vUFu3IgubxSCITawClrASssLmSOaV4OwgC/1/BPoWk5PzOKqwBRSqqUAtVpyrY0x5fksBDnPZV29zgja8qRGdVq8NQBE00FYKKrDzX5cyj2BmzdACELMASm6e4ECACzCQoysraZhECEzDjKGxNUMGG9vbmPYIY/d4fWi0ByGfii1OGEgJgBQ/PT4VGntJIXhFTC1hR4/vP/q9a91Lbt+/+r2sNS8PhwlESQm2T89PlCLRrgWJzfG0W2MQUIstSy1NRds06eO46vbr3WtHL7J9oGsuC+n52NHdNunlPz9u4/ZNe72bm3ErqBxVzWmJyvD7hd7fpssBHjG9cfp4amDGCSm4avm566XPoKQPz09LWsFb1YeD/3rLo6fyjMgTMfHxH5IISy/j/Wh4NTaGPobb1atnBS+/au/+3/8d//n//w//Z+X0yOuz9ODW7Rf/+a3zw+nIQ7PT+9vXg/M/LIKmplI2Ewlm+nlO9OUmKu1ZmoOgYWIS13MjEViDK02DjyQbM2RcHQ3BytF3QFZEAiRUuiWvBBtQLDZNplRDTEgk9ZW3RCpjynX1VC7fjD1WosIqcJynrQWDmKt5DoxQ8ultlo0L8u8PD/v928CM0K3/+rmfH5Us1KsG/Zdv3eHIEjEuRRgLUVbLVpXYDJzYDzNp4enTyEN4GzAabg5dL3qUsqEhHXO7ex+fOyCyNC6joLEus5kNBftmJa2cuAhdSkMSLLbjSGk+ZyX5QQhfHj/2C3fHG6/jakf715Va0QBPdQZDru7m5s367fTdDrOD/cff/rjfti5aX94BQbPx4f9btglOc7LB81zUTnVTjq5vXk6nr/6xavnf15BGBB3Q6fNmDAEamsjxMDsaGa1rpsJeYnxgA1zVUAUEdwyskLYTsSmnrW4YeqCWwM3MyWg1pSZzI2AAoshMbGDSwohJa2q6hIk51zqXE/ntUjk+Hj/Q+puTZ1EM0gMfeQ++9O6zKGL6nCazmPshz4GAde4lnWdjyn2MaWcKwYyta7rGUlbReJaWwxKBMt0BGjIaE2JmIRMVdU3AxCwLegEtvGgmyFBCKE1R6IuijUlli6EpsboVqpBRoflPL90Ni9tDQGoGV75QXhpY2AbD27HUP/cMfjWAdPFDN6vj4bbHYnI/BLpcgWILutuG3Hjhg0huDkxIVwkHSJMhCISYiSilFJMcUOC+r4fx3Hc7/thHHZj3/cpdTGlEGOMkTbiUWC4CnY2qOjyki4dFr1QOXBD07ZE9EvPjV+8vkt/5xcj0g1lcAwJVR04crp//GRNY7f78HSmguLMrnXVkQNGPIzjnCcrtWNMKRERMZKb1xa6XdcP474nCq0BUUTiJU9dt0fH2PUApbT88eM7RgpC83luTVVdWOzs4N7W1RE4dNNpmuZTywUACTxyEq6nqaBrqauaD6l3gKLmTaOwgbmpSACH2rRWj4HMG1PsYwLgnHMF6JC0tfn8HAU15zqtTJynqtt52Fd0fszKPOSmgVGtAvq8rqWWUjSmdJpnb0tA6tyx6DzXZsaAdTnHmFKKOe663U0uZ6CzhMShRyFhoS6GLt7eHkxhmU7mOuY6nZ7mtX44PqU5DCx3d6/md++GJP/ir/8iCs3rHIKkmMy8lAxIzIJEbhdjnWuP7VdBmH/B1tgWwgYXXdrzy8/wwuK4LAu/gjPb5av5zGcpz8uCurKEXtrw619X2SVegadrcPYLUw9eKEtX+ccLkHUhiMClcOGKxdKGCGw8Ile9rFdwBESOnPa5iNqCTCgETuBooGrVoCGDEbiptsXAAoeHTx8NJISoVsyUHMyU0MBdEAuA+nZcRUMEBlATQgQQosXAwck8IIgDmPZMHVJAjwxI1MyZt7cPi5mZCyH4Z2MXDmQK2V0Bm7oryiYMLqBgr/YhiQ8Bq9pUHMyJOWgzAHNjBANzBwFmwIAIaimxqpHXyOCqqhWYnL2qcRebOrICuYPlWkYCrTOEhd2gqSkg99T1hmpuuJEgJDjDNmq6Uoe2Dv4znAjXD8w/f/iftWPXrLTrhop/WkZXAtGFbPKZd4ZX9Ol612tdvBhRX/fZL/b3zxfxaiYNW7Cj4xeqt0tLD6bGzOYGm6HM9pYCIDjDhUFj7lUbORCHq6cNAhGKbF9NLNLyCghq1lqz2gAAiYravJZ5XUvVjcJn7oguzOQgSOPQRaHINPYpBAJ3Yu4loDRiznklawDeu4euRwosRAhMoOZMhIhEyEjMGAIzG6mRcEpd6HqQsLkruzUh6bo4DmM/7lPXQ4xMEYDc3Ft1YkVnJANwMzdDJNfGtCXZG7pdaDub9im8vOPklw3FLpxEBHNza+AK1jZ0zwnB8aLIvvgWbVlzG/akl73E/JJbv6045OuesLHV3LVC2/ys1QkAnFhcHYm8GpCAAZICOCADM3gDI1DaMtE2EAfcXBtqdlPwzWAILhvhtssYeQMQBeSL8nBTKV4CHwCZEUyEL3vLRs8nNoSNnGS+6TTdwYkZbItZUTUvtWqtCg7oSiao3gozWat1XdZ5Ws/T8/H87qf333+4//BwPhU/tkaBWW1r6BAINqf6jbpnRgbuRgjuLkiIBNZAHQGJQETMjIjdXd0M0S8Zai9QkVwgQOFwQVsJzbYDfmTCzZ/DsDr4ZqPDEhpSDAFAmzu6t80cqRkTEoDVRkwAYsaqVdAR4Hk65pxT3BPicp611CTdPE1fffftzeEwDMP88CF1oZZ53N24VYUzhhFc13XyUoMIM5R1FYe1rBJYQicx1HkhaiWv5+V5ONyO6evT+VxrOz+dz+s63ux/ON7/06fnu8N3EdfnaTWDdV0PXfzmF28+PH/69rvXz5+O4N5clgbTUm96MjVlzLXuu7i0lZB3/S7T2u9vggcOhKCp7yUEYgESpyDdDqTj1KOhmzGRS8jLusFs1rSVbGb97rBM07Drnz49IEDsUmsrAqRAaM6AAigxtLw+z9N8WlO3Cym2zQTBLIbO2nNecxr6+3fPT6fpTbohaG1di7XQ90C9uuSsqnY3dLXW+w+//2/e/30vw2E3kCigzedlXXV12PU4Gnw4PfexV5IuQhSc58qBHHSenlBIpM9lAndOMuw7NeMGdZ7ztMMYAEPOlYxXNRmHea63hzvBj3laF7RhCA/H+6/u3oSIyIoC87wYGsdAMdpiWsv5aTar464LKUrV83lSZWauzRF4c543AwCCZiFxa+qgF8knUs7ZDcxBzVW1qauaGbi7maqaIai66tb6WtWml8UIRJ8PRjGmRMIYyH2ZFhnGUvLvv//nP/vlX3Qjzfk83t5pcc05xtHgsiNwYJ2fxL3ZyPLK84P33zr2aEcR9+YUO7WuNUIZsFUrlRMJ47rMsn8jIuX4QWIEnfPTz/OHnyiE8Rd/dl6sWru52S/3P3z49P06nX/1t/95uPllNm3zp156TkFtsaLc37V5Bo0OMXEH5Xl9/tF1CsIVgIbXir1Lj9Ihbed7Q94QfAJCsLZFoZj5dhuwAi17yd6KMGPXQ24oQloAzDzX81Nj1FylG+X2u3qepRdEmMtCRCQkpl5bmR7z+Zi6nUpKQ6zLY6kZrLZi3kqtmSj0qSPs56WFgEueSaAfhzTczNMZKa3LzK5qjrqmNCCF6kAUQuiZBJ274YZQSRS0emCQfikYqLX7ewVwcWizNTPE1O85Hp4nftPdlvr8+PDeSWuesPGrw3Cz2wu2ZvbT089rWVLiss4MUqztdkMFXKb29e3f/tm3v/3p5z9WP3bcQsLpPKGBj6hNVRv6I+SP9fh+L0O6HVpo65wPMowp/fz8qE5euce7E3yqesRSDjLIMHz6+D7Aa8P3pIZ2dCrT+YQwvH7zq4f1Oeu6u7sVonXJHX/L8DpFH3BXW5nm9ylQCOOa1xjLsOPjw0/9IZjJdHoUZlATCI/P/xZL+u3ffdXv33/4/ndv3r758DS7h76//Wn5cHPzCyAFbaVdm2QkJkAEZq61mSm4JolEUNZ1Y0TjlqnsetErgbfWCLE13ZjIpVUMFxcVYHI3YWEiMC2tAG6sJAooW/O89SpgLsyt5US0tiwiu/1NjOLmuBRGLhXyuk6nJ+FBy5rL2byh13WaUhxe9a/C/m0upYUuidz//H3J07i7JSAAzHWN/S67QVUDipKC12ruDoyuZdXs1tRLbqra1Nzq+lQkoKOje2kIFCRqXmJ/eH03gC6ulkgQoSNk0C4NRMHATuv5bni7FlWmm+9+tcxLv7vJeVnN1rxO87qTt5Li4e4AIJudcG2YxhHTKx5uD9/++ud//N388LFWvb29YYnn+awT3B5evb17lfo9IpfyNAxdimpU+th5O+ZqZdWmQMQhxLU6OIgEApyX2VH7GJf52KWeObRlZSEkiLFTXUsuLESoDqjaEMhA3AwRVBuzCItIBLDWtGBzcxPsuqFVtbkiSjfu+27o+6WWUi2XWpB3SOBmAB5E87qGMDY1cMYQVFUAHT2vC4vX2lLXR8BSy7quycC1SAwEUMvUkIlIS9FqSKBqqR9qLa0WNyw1E6GZxxSIuLXWWnM1MGcmFgRiB0CO5Jv9A7ib1hokEIpwBAQgCMz4Ek2PF3cLAjC4lOjn/pkuMrRLRs+lJaWXGfdLq+3udE3/3ToncwOAzQ3TNjgGAAB5W1O2ZbQAAlw4/LiRiSimwMwppdhtXtVd3/XDOI673TiO4zj245j6IXUphchblpkws2xT5Qtk5eAAqgobcMu8uR4h4kY0AnjR+VzlGVdwAK+qJbzM5L8EFJDAg4TDYb/WswC+efP6e32/725x6QVpHIujnp4nQ8hlikRp17Witeq4T2MXEgNL6ro9MQS1VpdarK7nAmCg2ASRTs8PLMhBrE4GuOQ8xIG8sTZXZEZvJtBqraXl3NQdmeh0XpdS81qmUoEDq7eC7r64Ilozs+biWE0djEjAqRREAhH2DcoGnfNqkLTf397ctedjqa3b9U/niZBcWyurkImgVVcrpeQQA7qbtpKzBCnFz0uVMMQg65wRYC55ngttYjpCAhv7MYWyTIB05seHKDgM3eLPqpD6DiRoayjirWmpCA5IIcQxdbv9jlJ698efPzw8rWbD4QYAYkgxxHUu+ze7WorVGoK4owTBy5c0bXHRxLzZY+FL6/65SUazTXxo1wL4fJOLqdb2e79gMBc1BbwQLsC3kMIX5c7VytjdN03zhVJndqUbbeSfbfHBVRO3yUfw4v1xNfS64ldw5ZvAC9RwYTRtNXuJT8ILfAC2PSSFHUBPmM0qEaIhOLkqUWBOCkQUwNVaCymFFJ/u53mF1wlzbYBYTYcQc9YxxqYNWZoqI1qzXLEab2M4JDI1R6wOg0MSIjJnDORJnM3BLWAQoUbMAAEtIBo0IUwUFnVEL+rRAQyIYGmKxEDY1EV4NYcKaQUaJTAOsc0VDIQAGS0xNPNmxnQNsnRAwCgcE5WskRiAaxO1EDt2MwnSwN1NQtq8krSAtdLqzNBQmxDV0kg6kqioaMrMqEC0RTlvxcPbvndhD131hn8K1Wxlc9UuXv66qswuIKR/SbK8FiAAODh9cfGyEfkLl+mFx/nCTXqpXryqIb+g1221R4gO9qdhA5t1/2biBkysqhdE1Q3ACXmrva3qVC9Ku+03hATiptSsqoG6q7qpaillXebpPJ2n87yupS6tPi+5ti22XNRULQ8xjPvhsAuHUVIIwpyLMmHHBE4kklAIqzCSA3OAYBVRFYlkE4+LoAgiwua3lEQCE1lRLCFJ6gMKqGtpbq0SYQwhpD6k3jk0IGhgWhibM7sIkbqyKSAxMysUQGRmRWBRbkgihOTMjkjMgIZOCAmI/LLqN5zQ0BVNHdCakm8J7oZECArESAjIeHmTAcmd6Lq0HaGBNtAKTozqKI6M0MAatuZm6Khqm0/ZJogE38AfvnxROwEpADgamgLQFhIFvn36DoBuhtZc6xZRd2EsOaAbmJuBkzgRqiATGLKwGwFsOxMBuCNxDIAbBw3UHBDMDN0JgQCbuevFCdLdNt95h+a6ZXRDM9TqWQtBBmtEblrNbM7l4el8PJ6mWrMaBtacHVDdCUHd3cyE7PLvBne48oEc/QL9b9AVXE8p7ighIIC5l9qIi8hlhHyBiljIHIRkOwQQbWsjbgDexYocnUkYxU2dzQiEWLU5+MYI23ZiYb6EyJkKSmAGdyKPIZnpssyEsgUqF21qpuDmMN7cpb4HbcJExEw7aGZmXbdHg9ZUMCkqBZTkgMGLokOMcYPMa8u7oSMLcznfvbk9vj8+fXhOsdeGsb99OK1//7tPb+6+HmPM82knYkgfy/lf/91fYjf8h3+8/+7rb4Z+OD/eO9ru9rDbD0vO+7t9lzowNMPI4z7dWFn6ITFILUitUZzH/SiSWEJ/uEv7fdq/5u6gFbRVlMAAeS1p2OV5zqVwCK01vES5EDGYlmmZgckKqKo2ZwA0R1cicy3WqnsO0Ze6eOxFk681xC7wFGNXivfdXd/Pdzevfn74vrnVosDetBJzlEgIrSG4ItU2rT5oDP1aK5hy5OKWAlGgwyFRGYj7x7lyK2NHw7CLDIy0mrPR8/k5RSDC0zxByV0/IFEMu9ogLxN3few6VeIQai0xdE/P89D3H3wtufUd3Qy71rICipC5gVBRRfd2NEJB8lb1dISiMFfbDx1V8o0MhEFCrGamlYmauTpW2wwdwEw3bN5AVa1UvSytdv1Btba2aQ+aqjmaWbsEajjgNWP4+pWQQmJgzVWZ+vEOpH+YSsRyTjXNR0lS61JrHcbbuhZhRhKA4vXJytEcxte/LudPSD2Nb2pbsE0GOXaHVlvRpRvfNoWWV0SJqSvrypIcaJlPSVBCmd59nx/fE4n043lup6mMu/HTH/+xnL4f9v2v/pP/FaRDrqqnaTeO2lpds1nF9Kbp3hyszt6e6voRbQICiR3EHjF53Dl15oSA6GbaCA2E3dzBGJFIra5WMyBy6DalLjBBGoGD1oagSBWwNG9lfgIQDMnUadibcWvkoctW0FuePETviUgEkCikuLuteU3jDrWAGxg6C4oxt92wA+BaW60Th9h1vaHsDjfV9JRXVzQrjlZrQYlBooNqMzckSmO/V0IQAfJ5Xpi5291YN1IaapvneRZcMJBpW+tJuq4ZrSVrDCZhbhqlj+OrnqjYejP28/x0Wp5rLSGlxbVG0cBmHYF1YX837qf1HDTj8umPv3uvRJI4L2cyxDJDK0pAwIf92/unHwR8eX6uvixRfv3mu3wWpP5UWnMYUjfn5eH4c+i7KZ/BMa+z2yP4bPlhP9x5W0PPRWv2wgAP5Yn23Qgjxy6fns5L67obLRWoLWWZ6txhCSWRYVmWsj4ffT3NTwF2a15aeRz6V0OfkhAzPN2/R4Z8PoPz5PVhWZvSd/Tdt18PeXpXgaHWvJTr+coA0JoauKqyCDo5gJoCIjMhAxKpKXPcpm9mDYHU1AG3g1TXdebmiGtZ+zRwkFwrI7VWUuyJyBGbq4MLcalViGtrm5uLoFEQt5b6gAzVvVZfVk3M53k5T/cpRVJ3IcbOahvDeLu/W+c2TecPx+cUSafnk1ZvLcR+t3/lNjfVQEwiCFS0AeCSM7mZmaoTMTiSwHffvR3+6Q9bJKJVRW+qZrW1lvuuU0UwY9RD2At7a8pEfddHIVDtu3QYds1WCby7uxUSIQOwtSzSiVrhlA67PuccgGtb3epTW1IaQ+oA+7h7W7Q5B5e2TE+vf/mb7jA+vf/xw8d3Mfa73a4e2/n5ibSclue7N39GyZfzkQC9+i6GQwrH0/nxuDDLeV43p2pEWPISOY3DoXlTNyKZ5/Mw3MXAzeq6NmFOYWhS1au2tiEWxOhqEgK4tWbE6I5NTSgwM6Kpt6oOpRKKqroWh7nmVZhDTLvh7bzMpWQiWepZOLFL6obj8RSDNGtb6yDEzugsMQS1Uksz1ZiCG1StCFqzooETqNaYRCQQwppXEYlRAGAcdufTsVRHdEdrrSI5OvRd7w6lFBEGcjR3QG0ZzMmBkZSx1cYMISZGUccuxtSlUsvnfgQv7ioEYG4vZinmTi+kCdwS6x0ut9maVdzYTZdwZ7y2q7CBS765AV2EOUSEF2ecrcnaxr9IyLwZAeBmQR1T7LuLD9E4jLvdOA7DsNv149h3Q0yp63uJMUggIha5tm1IRNcX6VezpAv0g5vqbIs8uwhGrjYNF0I6wvX3BpvHzWc53oXyAYBbAh2bx8gxpWG8fzyHyCHGOS8sRIAcpNQ4TdM4DLvDXtCsAKeRA3WBUWvoutTtzvPpNC9ZaxeGFFJui7tVPYMTuTZtdzd/FsObp3n16IjBpveubZ7OSI5My7quy4rItboaAlI1fzyeS7Ns0LxEgF5YTasZgjc1QgT0jeSIRg5WzcAZaovo4Npa6SQ8ZwhhcPfpfBbx54e5risJulp0beid9K7uCuKkZa1VWSAFWYveP8xqdncnT8fjOCRwbq1o0/2u89rEoO8iuZlpqVXEQCxXmD4+c4hd6pmgLhMhSIymDQA+vP8oEBCNmKmPEtOuE7GY16Xr+t1hfyp6K5GRz+czkccQalUzrxUAUSSEmIw5hOBIBI6EiEiA9oWDEbxwLz5H+130QpdrHV7a8Stcc+n4X7ps+JND1ufCuUh//oQncrner8DTZn27cVKuDLwXpol/ARtc6/Jim3R5zI1MsoWXfwEWIIAjOQCT9Bxu5vmZwCRFnQEcW20baQHgkiedc+67Ic/r8bgAhdLqulY36gMLcR94syPL1b05sxNSbWbq5NDRxi0xR2IicgsX11gMRGCGBClIJ6xm7hoZxhCqb37SDu6BqSEAQatlCMIScNOwGCIDM1UzBqhqS3ZMPPQhtVobXLzzARiAzInJEKNDIugFmKGqciAAt9bQSAgEUNsKsde29v3oaObWDMDJ21rWp/GuWputZWLhbjTaBFtGHBzdcauSDQS8SsTcr7DQtSwuhJ6tl76Ig+AzVPkntfTFpQsn8guM+ppe/AXQhPDZ3+1iX/SiP7wSQv1yzRcbmMPnp/+i2i4vlPCiK9o2eWIzu5Lhri/O3N1JAACRCBA27ujGPN3oSE2trKWWvM6n5+djzut5Op/m8/Pp/HyeT8t6/3xuhkE4CRO6EKIbM3RdiDFqtQwNakaVDpVjRDBAjiKJsQ9hyRWmspgrAyCDu9dGxClIEDZ0Zk5BBAAdh9CnFImjOa651ooAuNuNMQ7EXXOpFcgU0SOrE7oHYGJgQHYAQfQtYYsIiBERiLdP1sDBDIm2PDICAPSLnBkJ0DcZ6FWK5r55V7ihKjg6NHJ2BSTZEnUcAZwdcUsxu5JhHbSCOlhFSY4MaOZGiBs3B6x5a05OTJcItqZI4tyAxMk2E05kdmO05hXBDAidZGOZuZm7uzUAAzW/uBg52Ra9vtkVAZC5GTG4G9AVZxJEYCB2ZAcCR9iYaOa+5ftsQUsXriZfChtRzbTqulYzaAaleVOyWgmaaVFv4FrKnEvObhUgjf1rUHoidfdsuqgiEpmHYO7GvBlo40V+B0QbAwuRtq0ZEbGqCouZMiEgWFN3b7XRNRnwAhVtroaXswHAhfx1mRJcPyUEB1W/MKdwc75mtI1w7dRczW07RV2icjAgkZkFCeY+LYtVj5FV16qltRqC5LK8evPN7vV3sZN1mbsulTIDOqASGrt4KW05d3EPGOe8IkXNKg777oaFSlkl8Kubu3Kc/vDjj3GI509zPq/jbpQUw05ODR5/nu4/5P/Fv/7Vu8cfFeswBpf4aOWT6v/hv/7fv/q//OW//f/8v609/dlv/5ysffjx45Tw1WEsjYbEWQ2qCnPT2nfBqs6PR2/U3fQGssyLqh9evULirh8pdiiJQnI3LerogUVQshUi7GJqpZopIoQUwPT88Cl2PQuTsBo5EAuzkJvmpeSpuGOK49P9h2/673reMUArfnd3tw6npdjjTw+RYxdBgjasTpT6bsletTY3RQT34/MsQaIDA5SlZi3AWHKJozhikvg8rade0a3rACCP+7io5blUgxRt6MJS1/3NkDpe1iYoOStTYeKUBCyTt8ihT3Q8m2vd78d8XlXPv/7Lb/75/gjkSyl9Hx2UWI7r2qdw2PWb0/xacooYCNqqqG6Ky0nlK0wBuzGe1qM2uqhOzQDAXNURGzE6eQMANcjaNn+iplZrM/O6RYkCql3YRaqm5o5oZmoGhHCx54TtTL6tgnG384rF1368Y6TS2vH87M/rt3e/ZAkhktaZXa1VAKQwIJHlCfKky0SQAFeAAsM3GHd+XgRBc/XQWplDfGXleZ2OYXwVJJQ8Q4M43p2X3HUsrdSn9+X8XltL401dtdXzzXBz/9OPel5+9Vd/293cmofl4ccgIWKCbG6Lc+Jw51ls+uN0/gMTQl1EBARjPxr22SOGHULY8EK0GRGjRAD3lrWZkIBW1RVxA3cEKW6JQW7iWr0xm7b1DHV2rW6otKMwhpTKfNZm2opjisN+uBnbeor9Qct5XZ4hn009hNRaQUZzt7xKSMWgzOuyls2Fblkrhi704eu3Xzk4CZ9Pp1zrOhfQyqxqlUkQEJBZYnACTrJ7zeMdWiYmgyZRSPpxfIUdVWjcDSZAbrELxaoSQqBAbK0el6WKNOfzVJHs+Xha1Z5qY/AuSezHVutc5hLLQW6I9292r9t0Pp8fj6cH2ziHXVerHfZjH/r747uxH2AKXlOQTi2saOjQp2g0rtp+vD83CGs+fXv7y9qmVk3x/Hz6sNv/a6G7/DgnGkp9DrhL/DbJMGP9sJ6r6v7mjSpM5Qy2xG635HJzs18b3d692u/4d7//d6qrMWSrY0+n8w9n2zAQ9v1hAQ5pZOD9/luw0zSfjNistOZpN3CUwpaGXhqfHn642R90d/eHH+5ZYj+MlzOXAwlvSRCBGJHdq6ulGNWaOYgEMDBrbl5bERYkiRJLW4VjbTmIEEstM2CMMTUzbYqAphZCVGtMRCgGWOtKiIFFmM2VBKABKuRcAkIXWSIT0f3pBAZPz/fHT/cd7W6Gw/3HH7WWbtiPQbTWDx/flyWb4n7X3d3G5XScVsJu6Pv9/dPPh924P9w6okAAgBB9Ws7YJkDM81I2EL/qfF61tK9f7X/4eDSS0EktrbZG6iIcxrQPwynXX377i1e7OC+fOjPu03F+fnWzlyDNdSqTiLA6QTArtawdoc4AoaMES56SAGMIXRe6rswLqLU8eVlqfrc+vS9V+2H31eubBeK8ltIfvv5N16bn+fQoiV+96T+dHr/71df/4R/efZQPu5v+1es3nz4+E0Y5ZqgtdHF9PjVQdSu1bKcaaLVidYdhPxxPD2O/5xiUtHm55PWkXcK+1Eq2WKso1Nom362ovJnaNlPXSsTF67gfdsPO3Yp6XjNgk06EWVsxh/NUOturs7k0lf3NG1l5nicsa/Mau24b/KtBKTkKmKmZMaH7FrVsQGRq1fzu9lZr1VZEeFkKuNdatTQiNLWcCwDUVlPXUZBcVnDdeh41b1qZhAhQMIWhttUdVMGsobATkqEIu2rWVYSrQ64NmGIKn1vaTbew5Zy9jMA3rYI5Alx8ey7Db0ciR9hAostNATfS/QYDvbSp29QSEfjSk1yIPFuKGzNtifVBJKQQQui61HVdP/TDMO7Gcbffj+M47sa+H/qhT10fYgwhShDizamakOgiY7u2PQ7ojmiKcOENvtAv8DN1yK//VLz829wcYDNsohdu0Zc+I3h1iwUgAiSk1AHS9HyCsvq00JzFW4xJTW/S7u3uNnWpITDBeBOQWa0hYSsUANv0hNiQKVAcusO8HmMf9+M+gDw83U9zbtZ+f/p7b4W4N6esjazq+ai5QQhLXXNRSX0IoVR7OK4cQoyh4eAMUazOc3EruTKhW4mRqwKREUHfJTUzQwIzaAG51qrknLo51yFKRvvq66+9zqEL63I2sNxaTxIlbFSB2tS8agN1V6u5mbg01/un9Vzo7Zu71iqjt+xVl2nKgDBVWJYyxJiiDX2Moe6GSOw5FwCPzE5wzquFEPpOYtS51KWCyM3dDszzks/HuT09O+nd6ztb17W4ux/evrrZDxKpS6JWWjOtxbS5q6sBQWBJw9ClDn3gGC/h8Zug4Eu22LV04GIHdKX0vLA//pRAhy++VlejdsStehxgE5JsdjlwQSzhKmeEK/nkM2j0Qj+5ltfLL3GTeV7IIJc7XUhIF4LT1ebEXxRtCHDFf+CKfTkgkgzE4lrQ1Vsjs8CEpozGlOY6gTdiDBGXx2U+r5F7li1kxcVMCDCQOk2lObEIaqtAF37NgIDacvMgwuYOHgk2U14gcABiAtcglAICsdeWiCUAIzUTU0vuWIqSBEJEAkYFZfQYpKjVpoTaJR4CbSk3CBAiheAFDYzaJb3OEN23GbVaJBuTMFGuuYtihIaO5MyMjEWXwQcCBickCsLlnF16plDXJ9OFwMEdUUiYEFWBUcANzBgF0GmjUV4EZS8F5S81gRfI7wWTxC9wnquj1fU+L/ZGF2bY52sAwOCLG2/Ms62Nve5PAO74QlOCL9lFnyHNFyzz5fIXjwmEuBkV46YP3KxWtuyzbYu9xgL6VY+L25YJCBdQwDZjeDertS3r+vh0vH94eHx4Oi/rus7PD8/3x+nT0/m0rlWBkVJgIYqMakYsjFKzo1lE7W4GBopCIRILEwszM3qr5swWhKpOa7EG6Cw9B+EYiQiAKYgk4UDB2hoChy44Qi0NGpoCS3TunJM5NnXQRgFYQJhTkNh10o0cIrEQsbAQIRIRMyASAAISXb7p8EqJvXwPqoErICPoZ4B4A+3Mr/8raEM1QndshODQmBkMCRFIgGXzMXJTtM1qTje3dTAgYqerkNYdWvOmWisxqilJvFAdN5sr1o0UhhLAHMhBFQARDNS2yQq6k6rDVXW2xVxcqTjgFzP3KzZtbgoIaHVTcwMCMENIQGKOUJu2sr1m38xhL7AkEyFs5B6EZmZqVdVc15JzWavCmpu1hlBzXRRgXpa8zmVZ87xaVmbp0rgfASi0p8mRitpK4gC5VQVoqgjILxs4Ign7RnTWzRHLkQjIQxBiQQ7cJaLPMyH4EirakHskAncEZEIDBUTaUuABEUEvGzWi8/YE5opAmxsMACAxAruWzQAViYDMUZVizeu6FpGUUso1r/OCZgB6c9N/+91rji1XLbV2KagZISGywgbMV8Cg1IB97G5F1driBsDeagZv7DQdT0/vnsd4+O5X3316/0/jzY5YSinLcT4e8zSt+7vXu5G7GZEA1FotPfLz/f1/99/+t/+7/+3/+l/91Tf//t//w//9//v7vh9ffUUJFBugwlraCjh2HQKttWKU2lQQwVTRglDz1l3oZoBIjhSH0YzUrJWKoNrauk5lLU7cWjPwppq62JaWi3b9WL3l+bxNcppaM62qLFxcW7NlmoPEeHc4PjzXrOON9+NQ2uIMIpi6rjpE4Xl+hNIC9jFKqRXNWFxda65YzQFqrePQRQo5Zze/uRmcPZ9WrxSTlMV2KR6fl9t9t+/HskyKhE7e8NPHp24Xy5K1onDEqlGCN6gtn7KmfXIirZmDtzIFuZlPc6uNnBFy4obV+12/rvNh7CR0XokNp2MVoZjEHbUWdUxdVHOvFRCPD+d+lH6kPqVTadoM3Ju2DdNnAPCNmgeEWM2rNmtWa2tVzaG2tuG0WyO07UZmhtchMW++YnrJsuHtVA0AAKW06PHu8BWGdDo/TudpP9xKv0tDKmuLiQnBhR089L1eAFP1hshJuhu1ajHx7qumTmxtXUIa3GXT6KzzJzdHbIBC5oa4LBMRIWiZnp5/+sFVu92rkMJ8/6S1PTzdM4bb775LhzetPeX1mU2FD2qVOBoEprv1+VTmR20f+8PB0dSbAZGTeqpNPEYhQa/ozkGIgjWzZoBA3HF03yB5Z4oRKJqRmZlVcEYkRmqeATNgrW2NjEh9w640WB+O8+PH88MHQs3zalWHN3f9eAAIqd9JGCgCtBUDBog4dK1pMy9VwaHVZgqGoC27h5v9NxKptjafnwG95gwSwE1CcEd35S6lMLSmYNyNO+r23u2Lr1bOCG1ezl3qgOrHn+5ZRAt2t3ehD6UWrSQphBAjj1NbsNY+BQPrIxFCl2KtCE3VFiUP0oSpTk83PQ+H3pYqcHht3aM/8W5Y9JxNkwSDZFnWknJZGF5BaWBETI39/fR+ac20RAmhO7D7z09/uA230qAeP+y4/zh9iKF/i9/gspZst8Or6fwEPMbOAc73p8djO58wJOy6yraczeBmuPFay3JU8kMfltPH9VxqhgrgIQ7jL0o+Bi53h5v7p/elnLrd0HMf/K1Db1MizsfTu+J26Hb9MOSqdZ6CIGP86tXNenqwdg5d36d9EJHuOnIlUFVrGkScvLQ5cmRmNc2tsYjXykTMDAgkQdXRdWozgjtUNQdoaNbHzoGQpbYCZMJBtSFR4FBqJjRE7LvezRjBvPVdZ67LMrkqp9TtBmee53VZznVVV5unIxFx5J/f/WEIsjscgOLD/YeHj+9NdezTuDs8nZ9/fHpvqt3hoC3nBQ/jjpyWZRGRgnp+/IlpDbIvpZKwgwKhI+bSzNquo//Zv/gV/Lvff/9QzAAAYuKEeLPbLdOKkL/dDfvEJU8DigRotbw+3DFCihG8xiAphkBUy9r1vbfqqnc3h9O6Ysv7oUNnRwro4o2jOIAbeMC0G2s563q8f/qHf/r3T19/91c3r3/RsOXSWJJIms9TCj4MN4/3p9e7V4jkTvNa+7FDpHE3DLf7eTrnUk/nybUxmJuFEJoqoalmhGEc9lpKmwNh1Jw5bDYI0AyHfjydFkRiZHUFMAJ2MzMjIiG+qqu85uWkhaUb928iLTnPW2uEaCyho868lXoedrfEUuYFkGNMta13+5tlqaYrIxNFI1UtRI6utVlea9ePIYbNhWFzkRFilrQsMyGaeW01dVFVAZCECag1T30XADl0bqYtuzZtVc20VhauawFzRGitdX1njdTBN62ybdMszHn95qtfrGVep2WZ4E/+247+6GZwJb9/7jzhKo0BcCI0ty3NGnELXL4k3hO+qHRgc4Z+6cdxg4jcRQgBtzO/hBCCxBi7voup67tuHIdhHIaL0mw3jGPX913fpdRJCJs6kJmJeWtdaLNa2JRiL43aNtC7OCttHfllMPgFVLQxAsjANhncFVDaXi3BFTzYuFYbEnbFu5yJLHY8HqQfbm5v60z5tDr6/u2bw82grZ3uz4HdyW+/+RasLaczOCAnSgEGN0QzT8S7OK6lNseb/nX1cpyWeT6FGOPYRfCSazcOguHT/SPpZgDDkvj++SgCr2/f/OP75f3xuVnDGFsrrj4MSRAjmalhVWJnJgzMItXanGskktJiDMu6xiApCCG48eYBighrXg3TKPT4eHTXmgt1IfWDl0ZBGK25rbk0LSEO0zJtw41c6rSW51yrdM0QrQ273f3TXIueG8zFz0/z0EU8rUlo6HMkHsMqBLELBBAFYx8lpZIbD92rt79Iu8N+f/vx3Q8ErYEOu4jGOS+Oyg5jiFUbIs2n081XI7qtyxKE3XVdc54n1WpaWajvOtVMtmdGIgCJSIwv3IrP4ptLwV/9p6990UtNXZbIy2q5rgj/gsaDfwr4IMDFTWTzcfkChv3cqOMX5BHfCg6/zDD/TFPZhMtOG91vq8UvWv0vkYHPSjU3J6jVXd0xKrpq01ZrKewKFNS3FCVTdXVrACnCT58eCjKSc6AtO4kJ+iGeTospgLnRhTrY1BxIANwsgVUHAotI6gCmUcgBmRm2GETA2qwyHfouRLHWQsfCbA7naRIMyFgAkzA6NYcQuNWmtTJT1wdBd3MRMEBVLwaDo7iBORFvciwGD44VEBQS+8CeEqtVVxeiKNJMY+DScvBImCIM6q7VEhN7UGvOaES1zlrPicANUYKrgjNe1be66YYuLDJHpM8IzWcbrOvPF2hoq4zLz/6iGbxwfRC+wJPgql387C66VcIXSNCFOvQCDMEXWOZLdX0BC71c+JLXdr32M5C07XFmG2gBm4j4ymNCv7A2NrWhu5sjAAU3ByKzBhu5DdERcqvP5/OPP7/7/g8/vP/0kAGqNlumac3nZc2lqHsD0HZJGZgWOS/5vKxvdv0v39zc3Yxvbvrb293d7RhjCCE6MxOhtlYtrgXXta2lmVcEBAxEXQwhYEwiISBe8t6idMwEhG4bwQUZKYYYYgwxADOJxJiGLqUUUt93XSexo5BIwmcaImyEEXLfen+/GEsT4eZisOG2AL7lqLr6y17xEoBH7rjFF+tF52XNwGCTAysCCiBA7ADNN891cDdDc9AKtUCrgAWYMcjGbNqobu4NrG1cpC2EFNwA3c3IGcBAwlYPboaoaAqKgFcLW7fNVXpjGNEFFyJ0cCRAAQBwBUdw3fhAl+3yQiQjFDIhIwIDMCc1s2ZbmBrSZk7kAEiXUFFmciJVZ2ZEIAKz1lorea0511zO87o0naZ5WZfTcSpVWTAwCFhetRkyhxQJmgKA+oWVycLaFC+rbWM/g22hH9voy01IhFGYYkjCgUiQhBBV/9SriJgu3w9+iR5AIEYCvKTPbrv1JtWELfTt8pyOuNE7CQA2QgKYASNsztjk7tDaui4rYSBmRKylCfFcZiI9HF7f3Q0O1YyiBHNzq0zoYEQE7oQYQp8SNa9E1NZnJkoSwLLETQ5Ynp4eYtoP4+7j/Q/LWsBtN4x5XaqV/evh/X//I4c4n4++5OBwOOwfjqd9F9ep/Z/+j//Xru/+q//lv/xX/+V/9nf/1dN////6d9////7jr97enN//qCjLarFPFKIwBkdh1lpDoG4XHStLL11/ePOa0zDc3nAMBAiq2gwQQ4xgdXk+lrUgBzfSpmYFVOuqWovEQIK+5i7QNM3MRCE020Yzrq3UvEaOjEFLq6bzNEcZOPLh7au69M9TvXs7vv/H99/84u2nh3vhoSnq5j4Lrm5VWwxk5urKLFmhtpqYKLAjIcEu7jHX735x14VTW1o523HR4/ppf3NzPp6Gbn88T+OYwOV8LEEEYBr249ANwrDm1hrEkau2+Xntul2Ke0RyMw4BytJ1vB+Gp/eP80QphtNSesK+72pxIV5zi7GLUhXbaT45DQRszZipOZ/OpTbrD7vqC5E4OHNwNfe2wfdugIR6EV2TalN1A9INvTcz96rW9OK85hebCEAgAtjI8IAbd5K28z0ApCFyHSX1a821VmbUPPm4CxLKXFBjnp6Qobt5o0rEbOtZQEEc1Im7ujYevsZwo3nBthAKQrRqbqB5LstZwl7XyrueGFotcRCy2Y6fpk8/llzHw522Oi2ncp7meR3f/OL1t79Sa7l+yE8/3r5569i3smLoqYtJ6f7DP+SzhS6Fbq+ttbaYlnT4GmUoLoCRvIflyXXibhREMyIm2zijWk2re0XuqH/lHLZJOJSFCaitVhbTFerSWnUTia+0LFaX+Typ+fnh8eHdu7yeta55XoncPz2QY4zp2z//81ff/VplZ8AKtWYkUK3ZFRCYwBLLcT03lITx27e/qZqnp6fWzilEcxQkQoqHvpl6pRgjErqZSJ/2NzH23O2UwGqZNQ9DiHF8+viwfHpqpd29vk39aOV4zrB78waQ11KoKZiH1I9dFxjm9en4+G6UYMC/+OrVcVnfn07TOqHhRIgC4OAldZb28evpfN/K4o5ROjVP4w6kpxVErREc9r3p87mdY4LsDcMuGlY7kXAn3VqWMaZcjkSYlxVxAOcYuYKfz/eH7lXqujoDsu/33fF8dmzelk4sEUBtr3bB1ibU1lLVaykLeOaY8jwNO5nUlauuZzvfi4ivJza96YY8Lbks0Me3w1fT07NHEtzvXh/qdFwNXFEczNxgqe2sUJu1OpXA49hxw3Y9JVGtLQojUeDI1hFQrhOxDP1uLUskBiQkUDeW2LzVUgMLEtRcY9+7NgJ0g1yXECI4EIC2YgoN3NwDB9MaJJpboLiLu7U9m0NTQ+QuhSgETA8PzwS8Lpb6tJYTEXW7fp2PYx+F6Ph4P52XaZli4Jvb4fHh+fcf3yMKOBCHPR/GfSekSH4+nUvR6TR13S7gIJDc0DSfznOQQCzzMlFgzfj+3XPo05+//crt4WHOz9lZWZ1I0u1Nl0S+++7rvJ6FNgbiKg4xdERaWxUi5n6IXWuLmUVCiMFqXsoDOuXZIxoCVaWysvRdSN0wdEQyzecGLXb7/atxrK+X6Xef3v3x8f277/7ir8ZX+6yQ0u7hp59Xba+/eW0H//73PzdPu1c3nfC6LlF83PVDCuvHrOoIXtqFVGqATAIArZZpnmNIanxe1m9uXksI4Apm1mqQPoSd6lLLisQoVJomSe6GYMBotrnA4uaUqAZlXlp7vxv2b15/O+fFtC3LktcSQ6BIWhZVYekY+XRcOTEa5jwjUy0LITFGIWvmIsmgttZilFYWQA5xTCGgWwgBiVUbsiChubWmzLyJOVI/WC0E5s0N1BtU9dSNBFZzNrDVFxJw1ZxziEGYSy5MEgKXWsw9dWkLxFRrT88PXd8Ruv7pPHlj2bxoWi6SMaTt3G+2kVJRL5QKvE7ON4TmSrtAQH/J7LlYebyANMREiCIiwpsRdTcMXZeGfhh2uy3ObBz7ru+HYez7vu/71KUYk4QgIsS8nalwIx1sHRi+zOivgBHgZiYKX4RPXab7Lz3XpcEiR8TtNIgAV4MYBLAtWJfo4jMKtJEzLjdEdCQAczD3dtgPxzLxIX37679Z1HPNJPXrPz/EQNo0RSlLDQfaxXGa7VSyuitSGPYOabbcBLVlaDZ2+7m1w92vYuqXdc51ubvr8/zw/PD/J+u/fi3bsvRObJhpltnmmDDXZlZmZWVVkcWmabLZLUgUQDSoh4bQgtBA/xX6T/Qk6UHPetSLBAlQS2oKDZItkewiWawqVqXPa8Meu80y04wx9LD2PhEpBZC4ceOes8/O2HPNtcY3f9/37RDLmKcypTRPOZXtxSbn9Po4/OrusC8QENjynJM3KqQ6l+0mXmz71vnD3X3JErtGqjTRGeCsGIGRAgexpVAZwDtfTYhQgUut3WYDU5Fp2B8PlmS12orqjAXVRIupgBBjYwUdRdGKDlXK/WEoxJveX6ydanz17g6Mfes263Y45t3joMjMtQkMiEPK02TO0dqYSaNQsbwCBeA85fdjbVfbTz7/8id/6x/ev/7mZvfu/cMtTIKGgDbpsc7ZtV0ahnoYu0/ZeWZgFSm5lDTneZKazUSFHZOXKKqixgZqhiqAiMQfM0WndXGiAE6LH5+0GjvxPHgWjc7Qz0k+QltK0/W03k5Gxg9L7TSTP03viCen2/kFThrR08t+8Ah9IJrszKksCMfpdc5fsrwFtLPwgKxaARydOk4AUp0evu/WXGpx4Ig0q2GtTIbopBqYxbhiksMwGpAjLgUMjEmJGJGRuWYRM0Wsqh7RO8KyMAzonAsqi4KFgJGZUJfsGDEQtcCcqiBBJ9hEUiZiI1JHcdWtQA1yBhPvfCCXRACUGUOICBQCWy2IPM4pFShKaTTP2Lgm+TlnadxpCmfPi0WmcXDZxVQyIATn2LHzgAACSuxNhVxrzkupwTdEaLUgoAub4PtiKDKbZqviAtIiLp4IA0U1YDyhaYs0tEgwH8NfT0tq+djtRD191IgG56UAv7Omnr77vBg+/PkHVeejTx4XnmhZAXa2t330FvCsXyKc3Ywf0KPfvQsAnQOYFsx2MRKDmagxM5gtDeSEJLU6x2AgVXDxK9iyVZJKEZGa8363v717+Oqbb9/f3o21mmoTXE5zKlUWf5xproJMYFhEp1pD42LYvHx+9dmL7fOrVdM2TXRNDN55JQQjLZldNaKplICuopJbAss4Rhe988F7tyTGIJgFF0wEmHOpRITAsYld37d9G5vYhrbr2raNbdfEtuPQuBCQHLpwOvNYKulNcaHDFrVoqQBExFPJJi7zP+Kp2N5MTjFhCqCAUkEFpaIuPWFipiAVrMKir6EoALJfODFyztgBAmgGrSAFVEGrSgFEVETyANEAcamCUFGppkiGS0ISGhkqIqgAnnyQS/e7h1rg5FSVRdwCFTNBqx9V6SHAUvHmjBQXSxUYaEU0EAIGNDZTJA/EumQzaQFTq7OU2WoGPe2EiIzkmJjZLY8GasBATATeNW3DjhAVx5ExTJAXq12d0nQ4PDzsplqPWeZSgoPGeVAxNRGda0Em750D9MEtxeBKy6djqgKGZli1VgFgNFVekiUAmBwQI7sq1gAQsw8nzto9/RXYKTRqOehaTKS6iEZu2QCWhwskM4VTIt3JH7SkeKupqjlkxwGXtG8gMyMFrIIqCMzMaZ7zXAAUDVfddrO6MrMyzaFv1LRI8T4wqapIkdg0jghBiRVrUSmqGZDFrJbatYwI+4fdar3xbj0eD3MWpDa2m3lK97cHJdGMpWiuVhDW69U3b29X3dozz3PtV62K/F//T//NX/27f/Nf/C/+iy//3n/y+7//5c/+/Wd/9d//sxjyl3/4pSmNuzGVkidBF6TkEBFJveda8jTn0G8EqQksMou2Mg8+deg737Sl5GG/A4RcC5j40Drnc0q1ZE2CCCnrPI1t36c8H3cHMc6lLKq6SkW0EFqtNo0FGE2l6VZFLQAJOBdbwqMjcgTiMGVdNsea5ERG1kSgiERsWsQ5dg6hmvfOBdf27f64K0JX2yB+TibrdUTzwzR3bVumsu3W4zjWnB1jGeerq36aU9e3ScBS8WSiNs6jDOrb6HxjyGpFSwXmIqCK81wuLpp0DN45IuyCJ8e7w76NHSAB65B3CBYbf3lxNY0TOacktYpkAdMqFZgZKKdURRFRQKqKIvCCAiGoahU1gyX+SxVEDABFVEQZyRhFjQjNkJkRkHAxDiAAOM+LcZb96SowRUAdx904TSXn2Lh5P15fPjfVpnPMOk4Pm6uXVQRDo5olZ8cmc4LYYreWeRfatsqRrFidkRygP+4fQn9hkjhGH1r0nQEeD0PsW6MC6SE/fFfGY7/eOsd5GsbHh8Pu8fKzn1z+4KcAk+4ftJbQXiYJx3HcXD6Tio/3h9u7N12/4U3Dzh3396TGzsXmQqBNFRS5cT54q5KIUaXWkgw8+ghAooKowMj+UjFUanTZISHTUkRpSeqAUBFqbK/SnE2LiNY85/FoiHMaD8MRQOaxpKkqmCTtO5/L8euf/3J/GNbPnjddDI7Q+ZqmnAsxFa0553EuZh5cw+QPh7e5HAGsW60ch5xrbGIpWqXiwkc6im3HSqHduH5jSMUkT5mx9n0/Hfbf/Pa7cZq5azeb5uHhPe2974Jr+vpoobuKXYfqBIAVEDAwb31LLpvCbj9zVSTXcLh8vn7//qu4uvLB16NNtSN1s1RtGgiX94dbNVaF6bEkq22MoBBNqYh6smB341vv1pvYQ4QGMadx1m9KFaVaXF1tL4eRXvZfYtrd7v96/fxFlmlS512Ertd5//iwn+ooVERrRHx8/J43v1d5o2U61pGbdePaIaWM4BnRqiOrNRM0DiO3myTjcTpM8wAxJOEqBepQcQWci85qVUsq07R7fP83fvr3hrvCvi1ms+H+cH+x6YE5Dcc596vLq/O9wELwS9YKnfzHepL4a2H0BlaleHKErlZRM2YuKmhGxAYQfMzzrKiePSpULV3TpZQQdRFtUy0q1aET0QJlrg+qxZETBfaRnS9W82ECMWSraFqmNCUTzCUH9lrTPB3H8fi4OzDzKHZ3d384HlVss123TfS+SXneHx6DN5W5cRG5iY0rltZ9c5gOX3//7bPtxXKQXFWnaXa+fThMxXnlBoI+f06rOb/nvZgaYS25JHn2+aeH4yOqGWqaLTKQc0lKMBBBFUgpT2DEJiaHw86hikq6fQxt13ZtymPb9FByqUqNTxXSMTctKwYmUHCg1Vz84qd/Mh8eHm5v7t6/W1+aW1/E9cWLL7u7m9f3u916s/nsy0++vxt/8/3bv/3THzeGdd6hAgGvN5v01esq6rxb2n6lCpixYzOcx5l6UjNEqiqxafI8IVkt1TuPC5mOLFIRuPFB9NTwAUoGKFIYHAKAEXn2nqvIfv+Ya/G+aULvVq3UUiWXkqXWx7uHpltfXjwvxYlMoV0OVxWJ0bBIllKIsBbBxRLPy7GU1TTVSYkRk7eisY0GVouw86BachZVYh73NTTBRW8qhAQIltW05pRqFSDwziMBsrOEZihq7F0uGogdenWmAIRkhqlUjHB1efnu9evfGZTPh9hPo8N5DlE40RaLvwXsHGb9dAp+hoZOrd52HjYQgJcnJ0J2TIjeu6WnrGmXHKKu71d937dd1/frtuvarm1ijE0TY/QheuddOItEZ3b7/MMWYOI8ip3eE51n8ZNVYjlVtqfHvCUx5jxBLYMXfSCnQE8ZrR8mKDU98/ILC2AnuAPMAJ1z7ap/e/Mag++7DnyMyKt4FSOn3VE115o5CRgyxCHJq/s3SfLzT364WT2fp+N4fEySQliHsA0O0zQCoZbj/f5WzAPxQxrTcJz3U67zNB4a4G69Glubwe1NfvPVzWOCq+urBu3y+mKapsaHNE8jzqPiYaRA3aq//KKH737zFy+vu+PusL56eXuYZYKt1lUbq+aSimNWxyJVDUURDana+HA3j1PXrzTwmASdxK4hpONeHXCpNtXsYyDDlECKHOeK1Fyvupfbq/vH+6pCGJ4/v3r17iYV1zWrxuXjcXQIxHR5fYXHXRpHEzgmdI6OyWjEcXbRUdNFp1Qm+P77cnPYrpvu8tkXWMDa6ogR8jQdUcg56vsVlHK8u+k2lyE0peo8jbXMBlU0E/EHDsPOH53pYoiy5bL43UnZnpgigKfYd9Mn2uykgixzNCEAnmunlsvlCWQ7TeRo+lRgtkAZgB+q+T4OuPlgNwIw/eBFOlvVzpqRATDh01qFj6SF04S/yJ1mYHW5RkTABDzJ17/8l+/f/fd/9Lf+vlQtWfo+VoFcZkCdy8SRSjL2NOVjLjmEJnBIc229U8Ka1Kqyggk00Q8qRSyConEg6IjGWiNRhkoGEaEABESvtnhVdQkuQW08g9icMiGF4PrWSTUx7RuPhuwWBwuJaNs4hToMasBE/lhKKTDnavWksE2z5sYaz+jYGXg0FQGySdWTmepFCAvdwUBtoG3rp5QQdNU1Sx85mqS8dyFUrZ6joE11ul6/EDNFQMI6D+JrdKwqaAymQEu9lyF/xH7ZiSg5fRpnlfCJxHzSfgwAQM/4GXz4Qztxjeffw8lb9KR3P23P598+CYi/ywXBE0f5YZ87C5xP2W3LQvldxWpZoYsOgkjnRHYABRNTBUUDZgZAM1miNJHIVAjO5xhaFxJJStFSa04ppWkeU56H6TjOEwCOA6maLlV3eoq5W3wPhto24eXF6sefP/vkctu3rSFXgTQX7x2bmABRNGAFVSPHkUmbEBuiGCMxd32MIZiaI6pIVcQFR1gFrdSqgEqOfYht263bto3r9apr+7ZrQxND36EL6OIiAMHJhWwAYLIkJgOd1AM2MEQGQkRG4gWxPe0fVk4fglRcTGIKWheGSFUq6AkpAq2mBVTOy0NB5OQyqoguABqAmBQAOYlKWlQNGbGYIQF5OxnbZGGHTzYxZlNFVTNFopOGKQIKgAXAABcQp8ISTSgVTGARsE6N9gy4gEu6CM/LQgBDMFqaChEZFYwIFIEQNJtWLVVzqXlWMVUFdIRIjpGc4cICEzECsBl5x66yIQDxwucldqgA4ErFUiF6T4iqteYiUpJCyQVMHZGZAtOyjRI7QgOiRctf/ICiSoi25PvigmctWS4EYMzMhAbmgltYp2VNwpNUtHDQiKQmiHjC8k81dbrIgwAqSxqRfaA8zQCMAM+9HrgcOiIo+OCJ2aTWXGtVUCAEH9xUU87FREF03TZ919WqgVu2UMsOqDARGoGSCzE2LYKATKJlno4s1vgoDGrg21YtzcPRijHjeNxVqTlL430e87u3tyWVtncIdd3hMeesCb2023Y/HzVL6zzm6aJvlPk337393/7v/w/PP/t//M//y//qj//kD198evX2F3+5Xvv7+93FyudpuPzyi1/84rvj/YiZQaRQKiUzeZ1KHXNxrKnwGmIXtSYVdLFBCi40xOBTTqPUnAqxlmSanfPzPKuF2HVS5O7NPTEbqEolx4w2pzwMBzY2tfWmIU8+8NWz66QmCmnMBhi7rs7p4vr5+++PTB4dROJhmseclA0hIxQptBQ0zjlRrYHdfhJfalHJJfnIf/DHn02HW52gVMjz3DVumMc+4nGaXmwvqG/vdsWHZpAQWvQBXIMpH+daV902tmEJZydnbI4ATW2eU63Wb9r94fD5ly/G3X46VsbA5Oacuy6C1Ri9M7+f5oa9VMxqjWunlIg5+phFlrK8NBcAMiLJQmRyiilDA0FDMBURg1NVoqrVqlUVzxeJ6JJ+wyqKTI5p6aRzS+UHwjnVgZ9qwq3YnA4pJ1OIMfgQrj958ezyc5XKHo7D4JjYBfQBJENJPnaWdpozuVaFqb0CjFYykzMXUYvU0TuPWuZ5321exNClkqRMznPbehnflsdvjndvfLwk4jTsddod7t9eff4Hl7/306pZh9dMrr1+MWUp2HTPXpS6v3t3NxzSav1JF+Kc73eHnQfH1CJ7115PFT1DbDdQdjLcglSKa+AGOZBWlFSzGBDFxuLGwoWhgRmbksxWBkxHVDWpFFZALSqbVpN3eT6kw6PVyVhv39ze3R2T4HSY9nc7MHAtBU9KFQ1EyuOrb6f9Td9w34XYNDXNRSSb1pSPu2PNwMRMVuSQ5l0be0BGA1VhH4CcUCbHTQhaJDSta7uuWWNcqWtqGn3kWJr5Yf/q9fuvv/v2+1c3w5gORbeb+MWnV13jVOerZ8+7YVptJvfymWEj3BA4EQXPF5vu08tn37x/O/tuqJCOR1U5yMH7MKV5sgzcaICLzVWo/n483gyHx7mw66f5Xiqs11fScJmhU4KcrEJDsZCauJJGT6rARC1Zg0TiSgWL/lOM3TyJ1hrdBg2zDLUedQ7blh3AoWRuHBvWo4+0ftH20S7ZnpsdiRxhr2yS75gtxvVuvO9jv+6/ZPCqWiQXKcCxaYgIBcr+eFOKYpnM6OrlpR7kbsgu67q93O/fouQYtyruYX93cfUFIfveTfMDoq9zehoMgJCQzOw47gk1hNh6DuynUs1QavUuMDECqggxcfCpZu+8VMkpKRM78IsDG0JEJlTveeHzc6nOB3ZOVdDQoIpY8H4Z+AFM2XLKDrjpNg+H+81qU8eBGNFLg5RTvn1/m+aU5ik2vOri96/fPTweYtN1a4eBCtVx2OFRCWGUsl11VXMp43oVt9vtq1dfATSOV9Xc4zj0LR2P42q9+e71/fvj/DCXYjxMJTYxgDjvLjf9/XEcsnz58urHP/7y1dtXwZGkPA4jtoHMiqlq7fo2Rk+oc5lXbWycm4c01eScH3ZjzFJJBDSbMSg7nw5zXD03x1ALs2PyZlpFxIpzznWrT36vGXZ30/hw/ewqldxdtP32x+/e3t3e7rq+7dfr3757/f273U9/9IMyhcdjQnSlAiB678REqgIQOVIVxCVdkYFPLVHT8TG2TVFhH9KUay3kHYXW972VWQQZHVgxBXKe2ddaiN0isWsVgaJQ0QUDLSWXlCXWpt/22+fDsO87N+zvp3lWyykffbPxwofDPTE1fYvMJc0EgJ5rFa0pl+Rj1Fq6bqVVVOs0Ds55pixFUh5qrUAgKiDLASC56E1sHol8DI1v2zalbHpqGfaBhuHoQ6jVHFNoOiaa01hVl76OZ5fP9sdjqVlNfYjkajUF9t12K/XE1p2HhY8Ekie3zQkXOhu2wE7lZaanyQZRl8PyU04lAp6esQmRl7xq51xwwfsQY9u2bdeuVqu+71erVbfUmXV913WxaUIMwUfnnfN+ab1fdFs6W6eX96F61nf4ZHw7v3uEk62D4JytDWdPEQIttZgfUkSevtkMTqfET9PVqUL6FEBxnueWFzRQNDS1OteH2wdQuNw+f/H5pw+HRzQkJE8mYDWlzlHX9Xc38zCM9/uhXT273Gx9t7q7f8SaOtf2YU3s98N+X2apVbWiaQhNH7uSh93j45wSu9Cyv7jeujn/9tXD61lf3z88+/RHP/6bfyC//VVKY9+6yK1gqfMMQM9fXKJrX93txzQdZQC3/s/+y//61S/+zeqy3j3supZjswLNc0lX283jMAASOd96byjMjM6FtU8yhJbaVfPq/f3zy6s0a9d2pIOLkQ2HNAiSimkpxzkj01BRjJsE37+9TTUnQWb/9bvDfsaZ4B//g//xxTd/+W//wy9q0cOQv357t3H5i6uLnLKiOeYqYka5mmrhhlpITMYcJc83h92mCat1N6ZU5rkL8PJi+/71DQCkmlbbjSqMh6PEUmupJdU8my3GcnLOex+InCNeHFsESHA2jHzw95zQC8SlYdngHAoMyy0Cn2C6s86zzNn6ZG48XTRLUeCp3u+sotqJSLJzwvvy3eeUI1gMIGc85Hyy/zTF21kRXWA9UztfnOfJ8KQWnV4TgU4GOQSTKmIl6avXf/3NV/9i23IZHlGxKiN0hBGIxBDMIS8Fx5YnAYCGsY3+YT9u1r4NfD8OXRePKQEhkTl0YyqVQBEahwHqDFDUmDESOQVVBFLnSFVaz6B1RkCExvFS3puyKyJd633wDbNIKbU2TZAkCAaOXrx88TANs0wqMqbxWAQFnbEDMlHH2kbz0YraUiWvqtE7BaAqDPqsdRFtyOKdV61t4xjVkaJ3q8ZXlYLOo1NRZmRG59iqGGJsInsvImTi/endsgtgZ6la9QRlPOk354/1PFQ/bSWL1nO2lp2QxcVveP7E4VQQAE971kkFX4SfjzREPKFN55/8sczzpHB++MqPdCT4CIADeKKYfve7AYCZAJGI6NSbtkQXEQDSUtG1/JRThZ+ZKOFJiZBaDRQIRQ2QapGSsmoFMmJDVO9omgsC2xm1euJARYwRYuRPnm0+v95cdY1nPBzH4TAy47qNeUx970Ng5wWAtAiIMlHXRuyIHMUYQtc0XUfkcjJJyZCAlVDVUNTMmNkTMYW262LftuvNdrNZxSaGdrW0VAM5REIArAKoS2HmCUrVRRxAQAFYrNa6RPABLvHTJ3EYAEAUJYNWUIFa0GxBhU0qabGlzmxpFDUjMLRztBCI2Vnk06XNS0AynFjORZYQMCACqm4R884SI6hUYj4tD/yo2gwJakbmxT4PCHjykdaFewEpaNUWgIqWPRKR1GxRiNRUDAWZzBTNQA1BQTIYI6hpQSFDAxXJ1aqgCMpCXTEigSriEupDiAyARA7Ioakhd52rqTJ4x43PCTGI7tq21lrmFNabLj0WT0mlkmEBAIIKAGjonIGhLnUWiA6IEQFV7AMcZ0ZIFZQWD65ZrmBQQ2R0zpAVQNSItORTcvwTT2HEBOfQIsCl650ZWKGq2klaQ1rOHxChSlpsaIsLzbQuEDQQmqn3ARdCQXUJFSbnm66ZaznmWcGQ4PLZ9vrlpSI0MUYOANl79exU6mIaavsL8njc3dp0aJrI0BJJVlFVF4NWg2LzoOtwUWotZe+ia12gam/e3k5TaaJror9/eLho6c37eX+3i04vfBxTesy57+M02zQPjmy96rzq8Ouv/o//m/81NfyP/sk//qM/+ePNuq/usUM/3L2NfQsAmsGtw2ZFTYO5+ufPLxjZEVxdbShwyolzBDZyiMJ5GtM4LYFfyGYEpU5pGtCqipZSS7XYtiWlkub19hKdk1rzOGpOCMYcmbDopA4NOHKY5rntN33wkMd+1c1zOe6HZy1/fffeYUCpaoWshEBC6JyL7MGgqizTQq3iGrec1uQ0X6z99WWXD3eN1GbTB2xLlmkcg6PognNOABXxcr05HKfX729eXHRYLZoxYQierDIioctJyWpJE4IXFTBhZFQIPqTDUcWIwjwXJRUrFjwBzpJj02xiY4Y5FSNsmh6tgIEsLAmiiDpF5/xRZlh8iAC4VGvjKW6LCE2sVDEDJiY0ZgIzz7xkW8jy8Ox4SRUldLxcNMTMyMRI6L0jd+4BZJgkGxgzr/tVnqe+94yybpvh+Gh1WG/Wi9RqYJKlaVupSZnd+kLJY9A0733sNBuI5TQ2qx5Qcp5if4WuFQadM5B3gev0kO+/z4+PSKFZrdDZ9PA4724vn728/MFPFU3Gh0CNUTgMpH4d/CZNj3ev35iGdvOCfdjt30gdHXlHTc6567dzRSLXegO9L+UBZEbeQvscsTM9aprIAfkGuMVmA+wNqol4R5onrCNCAcfIAbIrasRr1KHOt2gjN8yZ9+8Ou/0+lYKeWB2HuNpu52msIimn4ShOtXHUd56wPNwPR+eYmMGogbDqpsNsYkS+zDOU5L2DuGIfU851nH2ISK7WDN6hi0quv+ia2AM21Kyt7UMXO+28s5//xW++++7Nv/rT//D97b1rgqRcjSYt7A4/+mK9XTVQ53yUUZPatLr61DUQ44oRSq3leESwbbsWmdareHs/QsW3N3dVZ+67+TigxsZqxpoHFMIVU2j6rnFv64AxzPXucXDPmmcXF8+m4yOJDYdkvivoDG0u4p2P6OeDXFxcehyiVtg/drFFEOriu/t5vx8xNn3nc02uv3i4mwbJl5u+C5efNJ+/e/Xt5SdXzrVzmkPTgJrZUKdHkgrCRe491DwdNu11zXmow3G+Ezi0q2stwC5uVmvMxwAtRu+c/+bb72s18Ljpu7ky0AXRyAA17TdtM8+zi51kW622aLq/fzg/IaHUioDOkXNhmTKrmIEwMwAxkoiqioGZVVBSAEY0EURzjgkMEaNvhmkkLMRYqpZSHRMiBu8BkSlqFXDgXKgiRFRrAVVmZnLeN2B4SGPTtSKlzLOWtF116fjyC62aAAEAAElEQVR4f3t7HIbogkpRoq+/eX9z+xhcw2tv4KZjIrTonKmA53MWhCHabne4ubutYlqH+ZBvb/Sq67Ic6sS/uXv/mPX949HF9vbhqEiHqWAtCjgUUUAyyErv7qf7hxGZ0nDcNkSO0KpnV6Aej8dSPAGo1DQ5Ry7NY9PGro3zcCiF+vXLcciP06OhbC+2JWPl3ea6S8413bZZbYAbQmiaZsnoYCLn43Dc7W9vPvvsi2mc1xeX8IKZ6fb1e2oDzPL6m/eX7ebzL6/b9qHk6jwxghBBqd75UupyupdTaptGpS5FqQjmPKOB1kpEgQk1o1BwPrADdVqzSmmbaMa1FDVlZlV1zoOJiACoGDQ+QGSt2gQ0leGwW5pwcsHVesvAaU7DmDbPt1ikM6s1lVydC1JHQCNPWouaGRBj6Pqu5CQivvH9ep1TVjAXT9azVDICsnMqVqsqZO+dSqlzFWEpCQykFGA0UxXoV1sAzTk7piIVKV5ur3aPjwAwT+kB7zcXF2mmaRhrTsF7Qn7/7s16s30yIz+NC0uQ5dPos0ybfLbJwJndWQIaiBBUz4+C5yw8XFpWiBCdd85x8D40MYTQNm3Xd+v1uutX6/V61a+XHKK2a0NsfAzOee9PANGiDS1tZUQfgoQWUoh5cZ+dDt/PItHyDuEcNbWMMh9JSKdzeGJEezKDLDP88h/P09EJHHl6lXMAtp3ZFDBzxMXyw5vXaRovn2+bEFKeHZNDnI/D/uFBjkOMzrftd99+n0v2ji4vX0C7mpmLiHcOzI7TY7VsgLlomnL0vu87RHbIJR1zeojB+n4VIT483OUyC9pxzIcJ42Z98fLisJ/MdBV913XPrr74q9t/982b7y8vrml3fH71kg1q2f/4h59+++2rP/0LiUIX8fqTLy7vbr5mTavY1iIK0LS9LZkVJiEEADQjM9jNh8v1tmnavgvbpnu3e5Q4M0PTtEyYVaxUUJxK7WOoatVJu2kj8nFKsemutte/+varw34moqzl4c2vd2/faU5taJiAmDy4YRiLmIK1QVfRCSAAIfA4aUpzXOEK0/PLzwJNAep0nC6eXaU4vv7+1y/42jUXjttsisShCU1oSqmoNQYfPEutREjsQozOx7brvQ9EvGScL8/yJ+rsafUv2MXy8S6xFst6ORM8cF5OZx0HP8I64Dz7whm0w4+G8Q8r6iN7x0fBNKdh8yR52scvCR+b3j6EES2zoZ54pxPWYqoIZKqL1mAGpgpQa02vvvvVP//n/7ufPItmtLu983Hj2GUp4Lww5IpLEBYAqFGuAEhMhqaC6IMvaSSvVbN3zB7UTKsAICKnakTYkCbnSxUF9ASN2uJUZUBiQtSAWAALYlFZt65WcWBSZRpT22PXR0R3HAY12XT+eDyYo9W6v0/TUKQxi7SMhYpqLaM4JJRVi01Lc9YYUEGhLGOZMXEEvQjuIdVZOQD3LTqPRVMbQzIi8Hme2ZPzXlRFIVIg8pOUEDp2HpljuwYMxIxoiCeJcRFwDM7x/7gM+0/m2/N6+iDLPAk2+LFxcZEDDGyRGpclc46bPrGP+ISKPWlSZ2vh+ZXxw4p6gpFOgNuTQPQR5fRh6Zy+8olP+vAfn9hJRF5cNbQQdLwYmZ5OEQzUAIoAE5tVEyklIwMaq6IZ5JSHcR7neZ6yKiCwSHanCnNYYKLFywaw5GNYw753vglhP8yGj+yIEE7wB0p0GkN0DKLqHQF5cBjVq5hvY7/qQhNcbKpQ1WwiWqqpKUEuYmoCxMghxn6z7ttwcbFuu3UI0QdHbIBiUlQU0KkaIxqY825xC572C1BTQ0IwBV4O3wloSXzWRSBAUNACUqwWkIKLccwUVFEVtYBWfPr8T5UPuBCOoAJoS6MZAsCpPlXRZCmZRwA0QZMl7Ai4AqOCEhg5wgIIJxzsdNqB53evCkgmdfl8F+HipNWJgFTUClbBljdAQAjoT5om8qKqg6pBBUKFirRo2nry1wnoecNZRBDRqkbAAUAX7cXMnlr8nPfog5E3MCmlGoCiWatIbBZDk9zkXXQu+dhimH1s/JRLzSJqREwUmAAInSNiOIckGi4x7ESBl0vu1OuKKKpMLKoA5kqVao3j4B07qrWKVDBwfJqO3dPVoKKL9wxO/bAk1YyXuCZWqwiERmomUuEk9C8nAqqqYHgqjzN0S6Q3mCEtQc6gQIhWteaUhqFxTWD65JPLbtP50DgfRLLKRGjAJDU75rZtLe2P+5ynsaNekxkk9lxyZXaIZpbzmKX4SUuuQ66FPWSZ390c0syrNmzX7c37Nx64d81UxsdZA1RnFNkxHD1Y3/W7cacmzuq6jZvG++De3+7+9P/+L97/6qvf/+kf/eRv/p2A8dlPtoMO/+if/E/ffH333/0//2+X69/zxN3ltl93NRs5J2p5HLvYoplpzcd7yWPNGdGmMZe5ErBWSWXWKmqi82SEitpSNCsOCEqZa1GrzNh0HcrFeByhVpFZs7RtUHBTmptuMx4f13yB2JuyQch6JAceXSxYpTJJ17pxHtkqI5CjyGGCWYEBdEpVxNqONh1fXK+kpDLpuo+IZi65llz2RFiK+siCMByPndMuUtdcm9TDMFeEtiULXEzLXL03I2KkGLjUwp4iuHnU3cPIXdxs46efw89/8ebqYnOYdm3jXQxSxYHLKbNH5zxRLAVmSbFrzapKXZTIIjbPKTbL0ayKLmVYSwC1yeJsPO8ohKigROYAVZSWZ+YlPJQAwJCY6RQJQUhG5BwRMRM678/JCzDOqeQCZty4nCbNFmpz3V588/XXV9trGcVdBfQeEEArOwQ9luN78h2FHgxknljNtU2pB0lH770UlTwZ9xTWCoilkGHwKPm2DO/K8KCIcXtBwQ2378b9g4+r7e/9jer8vLtZBQz9ajhkDrHbXN+9+/bx5j7ahV9fAMk83edh38bIAIJzd/0J+DWia3wAe8yHe6Lg+s+BVoIOy9HLZCLYrCGuzHXgG6sVoBKg5QnLCCLoO+CkZQJk7i9AFMvgdE6laBrT4TCPc82ap2waxjGBxuNhr6roYZqzStXDsG6jJs4HRTIOntmVOXEgvz84DohRalVVR2RaFLAwA6p3njDOqTD5GEPXrp1vrz753Mxn4/6zTzG4JvB08/bP/+2f/tm//9mvv3q7A7v4yWfpWPCQeo+RtWolV4mj865fb7OWrDZNDy3kxGZI6Jtu1czjfHH1ZXl8+Pr19xR0KnNcr3Dyq9C7ogrsZTw+1nW8QCBSvmjcN2///Pnlp7WCjUOMTX58N7MXNQftTz7/wdfjIHBkTg+7G6EA4bJBVp3B9gEdFG4uV9CWb+++14YAzYGf56nvtvv9PfvOgQO3LXk9ZXdxda1yLFyAYcjFO17113UfK4Tgm3G8dw05CjIXKTbn7OKWwFuSMo4Fc8f8rL9OB0VxNWOt8dllh2n+9PL69c0RpOm61hmsXFAUh+lwd+v7lY8bQobzLcF7hqIGygjAIKrMXkQBKgJ5dhVUVAgZkNkxmIotN9latMbQ1pLJYcrZM4Oh9yHlmcgTntILELGUmWxxR3swNlBiZnJmWkoxU6vY+Kbk0aBqLWxkqu9u3uVathdXb797Exjev7m73e/YRXBhLtnX6arvVVPr/TBPiNhuNsN4DIQMpgret+2Kxv1h1cfvv3vfOffHf/jFn//s+5u7h/XVy0+u1gDorJsU9sdUjJlQl0cTtdu724f7e6jJFNropMDj/thHvOwdVGlCIExqhiaekYjYsY0C5ShSwdOr+98+u7hgBEWch3nMZt7aOeynw2oNfTXns3NEDrxfqaEwrK8+I17leXx4/+7i+sV+fyDvr3/w/HE3IdaXn22+/c2bb7+Lf/BHX15dX662zXR3W3NRMOfcknEMBGZiCgLGjsfjMbSNKIzTfP1sE0TKPHl2UiYJDrmpQqeiDEREdkjkoahIWTZVVTUml+ZE6BLMbduVmrBfN21nAjIVLTZNFdZ++/xZmuX2/e3+/W/a4EPgfnVxOEwxelCe5uM87k0rO2a/dq4BUwSRImbmvFMQZIxdW3PiEDofOQQwrTk3S+gymYmaYWjiPExd3zsfvY+7x3sBgQIhBNMTmJ5z6dv28voKAfe7/TAlw/1JxyFkxzJXIhp3hzNg+mEqPc8gZz7oPLg8ERhIpHpqu1kQDDCgJVmaiQiZ2Xv2wTnnQxOD923TNH3Xtd2CEfXrdb9a96u+a7sQmxCiD94557wjYmRahKFzPgA8sUsnCYuejDrnehfkxaNhp7N5BFieSJ/MFvh09Lpk0J6GeVvIqOWxGs+ONYCz1+xp0sfzmIYnLQlUdRqn29vX5mroV0kriNap5Mcdk+Yya53Wq/j+7u5Xr77xftu028vL1S6Ns6Y6FQbGkktORRJ5qFmd730Tm+CyFrDycHhkpFpmdi0hvz28j8FHjndvXntHLhdcX8icN6F5rThVm+7GV+//RRa5fPZl3zT3j3f7+TjP0zzuv38tn37y2Q9+8J+wuL/+y//uh5+228uLu3c3DoqqtJWDc5vrZ/sp7Y+PVxfPh+MhH9P2so9XndRiajE0phIjuSY49nPaqUho2rEcPJNnLqZTlrHU7QomGSaBWuvXd78eqxq6OmZw/q+++k0eh9W6PRwzuRDBA4KYxjaanbBpQWVGAvTOZRFNkneH1+lnF9vNs5fPnKcpzyH6L378t2+/e6tmsedPf/BlqnNsm+jb2HDN0wn5XzxaQC447xsfG+ed957YnXusPxzFf7gIDPFshznnzizaqD7xQWcp6dR+/IG3O9sPzuP7WWyEhTP6//m1vObTUobzrH/SCZZnuo/QkNOrnWSC03uws0BAS8aFAZgqgtWSQeqS1DAOxzdvvv+3//r/Appbv9IxpSF7v+R9sfdtrY+aRkZEgCIVzU3DQI4DNYQx0aTkVqtVycUToVQPiM6PVavVycAbbBk3bMV0ZJqViGDNlk0QkZhABU1JgZAmYGd1jeoDsDMyr+jnJE3Jq81qzTxOeRxq07XU9GoAta49YSptG4ecxjJ2sWkDFZWucWQwzwWJHCNGt4gCxZQNWsep1lnBMXlv3hujdn0vRZ24/VQQGc15v6p1NADkVtRN82G9vSAQ4rbtrhVapg1ywz6elbgTjYgLmvZBs/ngCvvAisFHW9XTx4iIJ//jkm+lp63sycn40XaMT0sDn5ilDwto2eXOXNPHUj9++MplWzuLByeBCj/8kMUe9aRbEuE5VcnMDAkZcQmGYWITMSQDOykmiCqKgCaSc1IVSYJI5EIVmab54WF3e3N/OB7zqQ4CkWwJs1M7/WXCGd8zwznlw2F4d3Nv2y07utquXHCx7RtHbeR1v2JnoEpIoQ3OzIlXNTMKsQ3eqUBJUlRUxdCIF9tARSQx8d77ENqmWfXNZrNebdYhREYkU8izZHHOIXi1pdbVO+9JDUAJyQBsMSEhMrIC0BOBCni+mSiCwgexVwEUVHFJ0lYBMz4F04uZLDoNmJrpQsUtbj80MFmexGj5wyX7+7RIQMEqGIAgSMVzIR0sOTlESM7IITKepCI1XXy2tERtARIYmFYwM1WUCjWBZLOyiC1GDEYABuTAlkwuQDIkWsKOTuoiEaKa1pMRV9EAVNRMTSuoAjBg1UrICMTMjtgjMXmHiIx4SsqjgMQUFGYWpmrgVVemgJBqaebknW9jKF0DUKdSBdCYgMkTKSI7R7gYxNF5D7g0lTkFZUQiEjM6b6RmYCA5lZotOPSOluMJZP6Ys3vKKjIkUlEAQyRFMRNHTkXNhJANBIDgVHmGpou/2QxQVU2NkZbdXKrQgh6qVRVRZXKAGMilaQCrz5+vV03br9bOB6SA7A3Me6zGiFArEreEUFNN476mGcFpWM/TIXauVpBqzmHjeBqzSY7BmRTRGmPrEB9vbjrX+R6uevf4cO88A7v+kjcbLlZW6364u/v05brpLt/fH486NcED0ZyT1FKrsNoPfvSpZ3x4d/vn93/683/7Z1jtP/4f/f3+xWZ1efWjn/74sz/+Xz3+9hf1cA8o3Mbtp8/Ye/McvQOAcThUQQO0sZQq3rkimkR98KVmMzAmE5vmkb3z0R8P9yiKgMNUs+p60w+HfRYr4F2zmQ+Psd/4wN57tCIVxuOhOGiatuTUrlq/m9Da9bNP3+8OPjIXExFCm2sGKWaiAkCqqEWt65gIurbbdo2H1JEogmYaEXKeNpum6VY+rO5vHw9jfd64xtHq6jKlsu7W73a7zerKlThOuZhQHps2MHEuZbPZLB0zRmpkxKxShuO8uVq5y9AWYSy5JHasoMG7IgYKm9XFYdoTOgGpUsSUXGRiU6xSCBiR0UE1AUBboCJlAROtTIE5VNWqVVWf7jjeLTU8oGpESzXKksEJxOSI0GA5nlUk9uydY0RAdI6fbgkxNqLar1YNEDB/8umnP3/zq9h0PdX1uvOxrQqaMqshClgqaCH2FUBKAVFPTtMMWqXOHGMVIRc4rIBCnSdz7GIr5V6mG5iPaRy4WXHTTuPu4d13SHj5+e/52MzjrndsKo/3I4eNd+7hzc/fv/123X3Whe0w7cH2Je3briXEedptLp9haAVc0zRQ5+lwdBSUrsRtUUeUg+UCztPmCuK6KJORlYSI5BopVSWTD+ibJW4AAYGiVpVxx+noPYqWYTfOSebs3r5Nu8Phm1cP728fq2mdKjr0AQ3Ks3Wzim0CJcBSMM1JNNtCKFLZ9KFvJETu2jWqV1SAalIBmhC7EFYK0UmJXRti24Suaza1UNxctS+fw7ol1f3r73/2r//slz//9SymMSLa7mH/4uKSfP9wd2+QPnmxXl9sjodpzTGJofPEWK2Mw10Wcd2VmARxbb89HPGTq9//7ub7JYkQ27ZUGeRoWKkNR911HCWEsU635X5rTbt58VCp5DGrDrvb6349p6NpjL5NGIKX/eO7Y74hn8fj+73etu7y2fpZmup4HC/i9VTzcHyb57G7aJKOWQagw/X2y91xZB+J8G6a+3jx6XaTxymEbTapnIZydKBYvWdHhH3bVo2zTCmVbdgKVmj9m8fDJ89eSDlWgrbta4VBSkmw4nj37n718tMRpjU3tzejCuWxSOuR6HH36FtZrzbjtL98tlVhrRiadrkKqqiBOecWtJyJgT2TqVUGnnIyBHYOiUUFEaNrcp2996LFgSNH7KKKKhgBAMGYZwIiJiTMeQJDh+jZqUrwoZRSSl2iWxAh19mHRqoCwJwKgQkwNz2QOw4PUqCKHHaPnrCW8rg/KJIL3HaupkSOpzIr2jROaOxQjrtb58HHHjEcdqOJ2Dhqys53L58/v90d/+VffmPJNqu+lglFQohXfVvJ1i3PRQXxcExzqZ4ges9o3eWFZOlWfj5M3rFjmHIF5UpEZCUXIkMxMShSGxe9kWNIU0EAxePVtsvTNM2jICm5asPV5XXVZKbTOLa9r8WH0HkXaxEM3PUrdjRP8zAOl1fPHneza/uLzz65ef1qnKfxmH/z29d/88395tnlnPJm3S2hUQoGFZq2Q6Rc5pKKGYhqCIGI1aCKiWJAl2qtoiZC0jRx07eb4/zgvEeBYgvHE2oekSuY5pSJOHbBuYBA45xLSV0T52EicI4DedeswmF3yLnuHnbd+vLq+lmadlWOw93gu2NcX0+ZiS5W3bqmLFpznZGU2CtZs+rRRVWrIj50amWe5sBUcyVmFHU+AIgZxtDUmhaPvVbpu1UWRcUYmuurTx73N2pGzGevviN0w3Ds2vZi+6xttrvDY8rDuf4dTc0F16/Xh8dj5KcnojPVsFS3wNO8ASeoHozoFPHLTGflxYjQOUJc7BsuRO9DiE1smqZpm6bturZZWu/bru/7vm27btWH2DZNDD5y8M75xezAzHBKiIAltvqD3+cU9EFPZNMpb+gMDp1mfjxlbPz/BXCcv+1E5J8e3nDpmj5HF9sZ0DiPZXB6/DxpRqeft7wHKbWUQaaBkYviV9+8er5ae4892fu79wYF63QEu384rtZXfnU1F3izm1RKrdUTVcvzODpiUwzIvoFxGuep3lttmo7JVv1lKdn7LjbXw7zrVutV36fx8OmL68tt+ckfXf6LX719dsUXly9uhvs37985csdpKgAmFdJ0cXlxPBxI5XK7RcIvPvv8N9989ePP/9Zm/XKuN88utgTN3d1N16+EHIIdhvzpZ38ir3497Ebn2V2sMurls2c3r9845hcX2+NuJOQmrAENQ0tqVsuqAy1JW2+GVWYX+8HMgv/J55/91c9+2/toZhbCxVX/MB72aZ5nWbyEvecX69WWpnEeCoAaRBeMMThomkhApc4huLYNNWfZpYepyjRfX10O8zj5Znu1efHyxeFwW6TUMlxeXTf9NoSOkEx7PXV1IyEgkWMmdsjsneOzTkRIT86tj6ggBACg5YEfgBabhS3zF8AZK7LzRXHShs6T0wd+7cMl9DTenwUAo9NqPAFDT/DS0xK0J4ca2qly7wPGBGdL3IlBoiVaHQwB5XSmryYiUkRrSvPxOP7q26//3//un843f/V3f3gJc5WUMrkFkq0ihFRzLSkHdmqqoqYEis4xZlGySU0MY9soWbFKzIZqah7RE81mDUBR6wMdclUMI5FKbQkDQSXMah1RJKymgjwoVqMpyapj71CqRe/QbBxmZbrcXgbXgh05AMXWB5fHuWV2K4+oK0eBQdWqWjZ0St6wDiU2DsCpmnNUqnqmJriSUyU2AE8WHBDUzWojCEUNAXOR9bZp+9aoAmjfrBwwqqLnbtOjY+LoQleNzMB7b2f0Ztlw7CxjnwAbevr4nxSY02T6tBQ+Rovsgwr99JXL/nLOuXr6hPGJ+DmzRL/7Q35nreE51uqDhnliJj+8GTi389nplT9SIgEAkMhUCVEBmAhOQOU5FQvJAERFanHOAYIaaJ4lF1FVrTlnQAwNGkJK+f5hd3P78O7943HIaS6IpKLLPL28HTVDUwBckJkk8G53zLVOc85SAHl7uYlm3lHXt13fklcib84BM9Sqs4CYmCuKWkFUAMWAmMgHh1qkWhUxAHYhxNh2q65t15tNv17HtiUCUFWpJWVVQ2SDRC6Q88ikxkYO2QEjGqDx6epc8MOlM4jOkWZEiGh61tHAQAnFlnwftIVJBzDBhU5C1UWXeZKWF8LKxE7leACmeMoPqosOwaZmAioAZkJWKzxZDR0TBKh68tCdNiJWAHRsAIBk9HQEgghsVkAEpJyTs/P5sAXVAFHBKiCBVDAEUmAALKDV2AM5BDIjAzAkQzDwoipVDM1UllurajVyCsLsdYkFXErF0ERmBEEjQgKHAKhKDoM3VROWzMsRkg/BBQIM3vG696UmQ2B0YI6QvDMgMAQFYrfgyIhIS3K0ZyIyW8LdTvBfrQqKjrGJROwWMExqJRcWdyk8SUVSFc/VdQBkVhFNpKgZIRtQNSVgAATghbtavPkKJ2tlqQWRT+lRwEZkpiVlJjQFUMs1MXODdnm97fueQ0Ryfb92xHnaqUygtdRiRsyhapbhkciQxQeulJfnJ9XKngFsGo55LmUubYgYSCzPkm/e74O7kKJsKU0DmrkQ393cIjafXXev3hzuHvRqvRrH5NhWK6/o6pShiHeewPromjbMw7E69/Lz53//H/2D4Lqf/du/fv3+bf7qq27bX7786rOf/vDyxbZ2uLneGrp29SylQbX4EAkhlxmAzFgqAVGepzzOec41CSHnOQHmWudSsyKi1SoFAVBAlYBoJ2lZkDlBiC2x5vlYTes8OQMXPEDxoTUTImj62K/C/cPeVBbtU00JII1DJLxcdzOnUtFHtyFvJtsWV603dZYrKqTdFB3F0Dpy5hEZ0vyAHK9erh/3EzoENHVQZzhOE6ndvHvvyTerBsiN88F5qqZqNqbqXMxJA7lx2CE6gDoPw/wdHqb+Rz/8Ybf9SqoEHxzpPE4eCYCGeWhil0oih/2mneepqhqSc46KmYECBGZkUi2mVGt1SAbm2MOCyttZJkJCWjxqBgiMTlBhsQCczKVkBszkiJxjIjJkIGbm5Qzl6VGmlgoV16vOcwyCXX8l7H/79vuLy22jly9evCgViCMDkGWwucx3yMH1l1nQIysQqJDOopVcx+zn6THEK+O4OGMdO9R92b3COkiaiV1cbREgP95imS8/+aFrL0yLqyN5P+0PED4F9+nDw6ubuzf96jm3/f3+DcnYN2gVDKTo2F++pP6ZKDpGGPeSRs89xcjtRutAchcCCrPGNca1uYYMwQwtmSYoxQqCspEnFdPBnMN4CQKQ9x6rmYz3+2l/HA/z/W741S+++83Xr0qdp6kuKlhlTUU016aN7w91NGscuOPoCVY+HuZJDbxn55xkzig+SLXqTMk7sxBD52JnIrlkJODQhM1FCIzIucrFi+f9J58nQkOZ37//xZ/+2fs3j1/84Kd/+t/+s9sy3t7NL59t/vinPwWRP/0fblCspHp//3i9verbfsqVATgTsRfIUGaQSYCPwxAD7w93Bvlv/sl/9Jvf/IVhO2kqOc31AGl8/qJ7cfHp/Tvomj7NFpCK1jmFrr/y3fO6f9isQo8qGbYXm8N4vHn4aixpON4oHK3kYjm27WH/lvKhWTebZ9u7u4m9X10/vz2+0XEWl9v+ylDvd68EuYA5j1vnXr3+5cUPOn9x8fhwO88jORfCNYPKJLUm8mHUIdcpQzWldvsC012x8VKjQ8C4LsqAZiBTnpvm+WrrLyBmP11fXdExT8ep7y+Q3VxGpnzxbF3zICLkrHd1PO7TZO3FerkKVG2pHSyl8Kn7tpZaAUHIAIiQSilExkxaaxJxjFIzEjQhZimiZmZMzN6nPDJ6ETWrqsTEZoCAyyMXIYEqATvHtWTybeQeDGJw03homkYqjce9M3PsgIPzzfDw0AW3n4bX798e5jn0/VyLK+jElEFU55JEad32RhiYtl0Dlud5cIjex6R60Hy/e3DtynmopZDjxoP3tQ3r4yj7Y9JSVz4EMGMO5uaC1+vVYX+8vL4kguNhWjftZ5crT1ammQiQ3DTPOaW+Dc4DgLShHaa9Q0rTHENYr9Zv7w77ubBPnaecclFyDUzTPLU5Gs7jQLERYMmzhuxDJHBqDAgh9sMwP97vCLnpLu9u5uNhLgadd20AKfmwH6++uL7fH/OQ2JRQclUHDGglFx9jqaJWalEC9a0n0JLrNI2eHBgCExIpaJbCLmg2qYKE8zDMaOvNVdOsHw/3fd9RKTlPtVbvGzUg56uasffIpYpKLkX9mi+vu5Rx/7C39LjaPHsY3/cXnmoY93doRWGdxXvHvts21A2Hd2meYVIkRG286xQADDkE73w6HmPX+Kbe3b0DEafVe0ZD0aJA3fpinkd0ZBQvNqvd3e3h+HhxsQ2hfTg8isD24vI4PJgoO6tVp7kUeSSAtu9TLSrZ0KQatR6N9oc9mJHzH0aPD8TCx7MIECLoUgUNYMCMyxEl86ndzC8us+hjDE3XtV3br1Zd23Vd1/V917V9t2q7rmm7pm18aGIMzgUfPDEtAOxiMTsDRHSeoZ/mIIMTAPLh/PxkycCn0qgnTmgBhJbzOz3PT+c5/SlpQ8/1zwBLYoh9kIQ+mHw+GNSWd4amYCqipuRd12/631vdv38/rcqnn3827h+Z4PGwqyB9s379dhzK8Ozi2lyY6jSXWkdZdU3f+jQcSimEHtmVaof7IefsnAs+bPsLRJJcVQGYQ+hSGWs5Xm2u7+9vLM8E+DBUmR+vnI7394PEH//tv/3df/v/2h/3S8Pd7vHhQIgiJtp0seXI6H/53ff3u/xHf/QP/+5/+p/94i/+z7uH2zROfb/xzM6jKRyOO7z/+kc/+eF3v/6lFO1XzevHd+vLTakU/Prdza/JNRzcPN2F1dp75zky6P1DygK10mHM1QhiTNTOqv/0L7/hrLmWaUjg/DRNqeSUMxsgsffUODwe7tdb+vwyOtfsxknNpIoAWCE0c4ikOE0pBFq1q+NxHmeF/fzy8+e7u/3x4V3bNp/96Iu3r95iTY13ITYhdszLQfoi6CCYnBYWIrHjZYZAgvM8vUwRH0Zoffo3xJM342TdUYWzm+O05ABsKUOms/bz8VL7QAN9PKIv33jClU7JJmYnd+ey4he44mQEWyr4TvrTU8m6AeCSuYFgi18EzNR0Af+klpJTrdMw7t+9efWbr7/+y+/+w5vH317rHg7BmtZhixhqyeygijoFWgwmaKo5NG1Os+XSxTCnHDxGAtMqBZrY1oLBhwJyHOfofOvhIHQUWTtrWu6rWbVGMRVbM3WsE+Jo1oF5j2YaSRi4Gg7FesAYXCWtde5iA+THIfVh7vt+e9FXyWJV6tSEOM/VGItIbAKg1aoikEqlSrXWJoS+3dzc34IiB0SQwMhOEGiu0Hp2oGASmxBCeJiOWZBMLy+a0Lp21ZZSQASZAG3KowuxW0d0hM266Z4fhp2jAqBqSi6c5mhRMENmOAUO4tNHfpZb4EmIOcmPcIbGTv6bs1JIdJIK1D7swaelcv7KD1rhx7+eFB77+J9nFeq8h52MM+dFe1pB9mErPa0cPO+WACpgIKYO2ECRUEVMFQ3U5HRdiJhBKQWERA1EyjSXWq3KME0A1q0UfZxLGfN8f7/bPeyLJEQoRZbQGzgH+5w2eARGE1UD20+W6jzMcnfI7x/H55fdH3727JOf/KBtlltMNHLgnSGVIh7T/phqNdFCKEDgCEPj2q6xmqHAIKJqRMw+OhdiaLrVarPdNF3jvGMAy5OClipSTdWAnUcERiAH7IA9OG+ES1IewAmIOvdGMpxd2GeC/HS/gsWYTQzEpwY1ZEAFQBADIwSHKEuUjcHClCmY0RmyAgATRRS0ilYRBXTBiIxwMZaeMq+XmBDERRMH4AgUYCn7QEIBIIcIgA7YnchYqWiKUq0mrKNJxlpNy+nUhQMy6ym9m4AAlrQjNSOHZHb6e8CFNQJiBVARBTQR0SoLycRozGYICipKCx4lYGjgGADUBNkpoBqYGMISfSaAQD66pvXtHFNuu2ZKTZKCgr2PXg2ZVWvwjpxDXgQiRsDlHGtBMgFQF6KTiAnRgAjNrJSKjCIYA58fHJbyARP53Vjrk2RrS3ukACov9wwzM62YERFMRRVhcf6e7iCmqqZ0CkeEJRRGtUpVAliOjpfrqmncerPywfkQVJ0HH5regHNKJU0mo2OSUtg3VWuZjgTCjgHEe1NIzjdahVRiG01ymmY1aS9W29XLtzffTWUkxbbpd4diZXz58mp3+zAd1Hli7bihL1/4+VAeMijY5eWayuDmyoHbTbPu+nGosdsg02GYmfnzH3/+wx/9sOlWys3/5H/5n+dpuH91Ozw+jvvxqz/99+urq857AuQmjsdEjI5pKsLOGyq5OE071UChARDRoloAglJMcm9lFskGIGlWqFXntl0ZWs6zj4EN1+ttSe83V6v5WDm0gFqnMc2TIYzpANheXG6Qjbybphyd37TtPLxGBSnmXCAEsZC1kGJJQ7tqxJSYG/ZOpaZKIFaAAQycZzcO43jUpmu1+hdX17vdkUCDGptjh8EjtS5N86r3ITisplQYrQ9MWpijgi/FUp6ZiAOAUlLxkVa9m1SHN4/f7Kae3YSQcwUHTeNAzRFNcwHlmgUdcE+RWYG0ijF2sckpaxWpRmBW6hIXr4sOCmiAqrKYe4kIENTUAQOASAVEt5R5A+rp8Z2MaBFX3RJjBOS8J2Iidc49SUXsODoXAesuxfXlj3//9796+9uH/L41fxhsTJvN5lIBuc5ocx7fmxbfvDSMWAZD1TJLLd6vpWTkKFJNqmo1nRiYQOo4kuw8UZasaM3mkp3m3c3h/ffBtavLl4BQ56NKTiXH9gq6Z+M87B8eg+uI4f7265eXz7TCOA9Ifs71+upT8KtiPpI2bkjjnjRQu6XACEWHG+8tD5X7T833hkhQiAKQSZqRnBkpOowdQgE9SDqSe2kQUPdUdvl4r+M+j2MpdX84/vxnv/3Fb76dao0oCNo4E1YfcBzScci5wOyROx+Jp3FOZCWNZhJjYMA+tG1syBUfo2pRIEKHAKFZkYsVsig2qw1QAwCW6yjzD3/6mZbh+P7bksfuInz317/Kcwmb5//NP/9Xv3p/K4Sl2A+/+EKS/qs/+4vd/fDyMoYYUKKhyyK+XREZolMBYieo4/E9+VwlC1JchTkd66P/4uWnb/aPWdQurh/vMXZbKjQ87PuwmYabQNBontMYjeW4M+I10Yqhc/5Yy+v96yKJu54IQ+6brn98fEvoczJDBxxU1bOhx/1xAG8MFDhmsEBhKugFQ+xznsbhbnO5erGCfHgFcX0R/NXLv/OXX/266xxxCQgzZfXuODykNPumC203Prw/7t/cjW99v8mjUEDPbjgcLvrm2eaibz8bHr8KdT4cboANqjlST8mxHcax7Rw71uojRdc/P9xPKcOUtT3bMJsQclVmt3hoVDTlzI5FKxsRGhOS90SMRBUJAWQxjYtKylUE1Ni5KlVUmbzjVnRAApXqnEOAWiuAqWjFUq067wCwbbp5TmbKTLXUGBtVyzkLSPBNjP14fMg5N12Yh92725vjmGPbmWpE9ALMjMsugbzqOwKrNXd9m3IqpQz7qWlagdMhMzku84BSoKhv2/08xwKaJzO37ptOTQ2hpClpg0yOyjj3XWQwFPnB5y/mWqGmknIMHCMTY982eYY8pZxzVYEqwRGRaUQpdRqGTeMZwcD61faxPGz67jhOh8Ou80FTq3NeX14p6WE6gHj2nfc+jckUkM25sN8d89ub55+EZ8+u9vuHh3fvc5qfvdy8e3s8TPPxMNeKMTRSCjoCM2BQldB4U22CT0kMFBFKzgioUqfxiN2q6ds8TYDonctF1rFtmqYOMwf2VUWLyNi0V1eX14fHO+e8c/F4GFbRwDsCc87VPOdS1+vrVRMtSBkGw4bDtmmy5VTLdP38+vbd9yH4plsddo9Ny+1qU1RISbLE0IcY0jQhIhOO4yE2LUgFVEBe9avxODjnri6eHcejd85qnVPxwedcEbDx0UBKmhJCdI67UDUH31xdXI2Hg6SMirUUMGv7fhrntu9Eht3jXS0lROe9TyiqGnxoApeUSqm/O3ucD8uXc4hT9S/gUmNmwExMiH5xiaFzHIPzMcQmtm27Wq361brt+vV63fd92/dt2zVt0zRtCNH54GNgdsyMRM7xogotNutFdjqHYODJSn2a3M8H2kTLAcmSI3lypeGS2XlCkPDD/5HToHSeiQhP0AbCqcDkNN+d/D5PY9WT2+MD1rTQ9Ys5yRAZl9IT0TqP4zjUeWbnhiG/enV/vN8jwu7wPZm+2HQDpWpDbBvnAyCO++EAksf9PA5ipADoiKo5dhdXV+xpno6Na1Z9bw5W61WaU/T44vLZYf+47libtabSTGVKpbGye3efH6hM/GJ18d3+kMVcYFAVBcfoPKuWMtkE9vyHX/7jv/dH77/79j/86//Pj1+sQ2yji+Ms85SqwLPrSx617O4egl5fX467ISf57NmLeX+I7BsfvWvByAUi56KPlQShaC2NJ79upnFSKRz7v/F3/t7F9e999/q9W33z8//w5zVlAbU05TQpgHMcgweAknJViZ0n4SosoBf9apwShyBmc1FmErM5Ze9cLdU53wSYj1POutqur1dbhJznUni62qw3265tYtNGF1v8QHiYGuBpncCyzE6BxGc/4xMd8qRJnmOEF4rNTqPQcgIOBkBLmPpZujnPErrkoZ9FA/sQLHRWNOGJBIJTAxra6Uc8VWjB04I/IyhL9AY8xYTpSVQAMLXTklQ8NfKQmYmq1FpKSvPx8Pjmr/76z1/ffPduePz5t7853L3brkP/WZ/nyowIIlCsqhF7ZgHIaXbUIKICMYeckkMA01LyypFVmWfpumYYci0S/OKeU2YjBUOaxbxq66Gqrj0dhRSsJRiWTibiUmuMrFnWSIqkRrWqijbRI5GaehdIdBhHVW3WnYttqTqPMzJ2XSSw6EKqc3Be6lxFiZypVQXn0XSKDAosUqN3SFZU1dRxMMHAGAiid3M+9VI03nWtN0LRioS+iwrova+5hOCRjEKkbl3QOfZSsyNjRwoKSHTmupBO2VIn9e5JKvxIMPoIWHtaLif18IMOvvyiM1lkT3Lj09o872a/84+nxXr6Uzu/izPvdgaa8PT6dhJQz9+5hO/qUzj3+R2pKSxC2MlGIws6ZAoGYrYoI7WWWis7XnbLcZxqzaXU3X4wk1RK7FbTPKd5TnkWK7kUFSVC1dPWelrxutBxsPjRTNXQ5iwqkopOaWa41GeXrXOrtnWe0BF5NsdEoUoBUsSqWhcAMPjAjN47dpQqjKUOqZYiTfQOyPkmNu1qtY4xMBOgqqoapDlPs1RRA0ZlDBDJAbqlTJiQkFiJEMkW5dZ0+d+Z/zI8XakKgKYCpsuxApgA6NMtbBH+kNmqnYRhOEE5TxHnBrAUmoEZLAyRVYCKJiC6IIRPC0O1EqKpATM4hpPLzCMiKplUZDZygAygp6MgJEUDQ6sKtUDNkGcoCUTACpJbBE1TRgPDCmpGAsTADGZGAm5RttiMDCoAGLIAgNGit4lpFUMXTEU5nGDIWkQVEYgWeA6BGJBPq9xMSy4p57mkOedUSq1FTJHqoqszKxqxA4DIQN55F51zxI4YQwi8FKu55VmFzmqsipoYmqpWcUwiwo6ROaXlXD8wOymFEE0U6eS5OUlFqrpkMi0OZDtJT4aIZlJVCT0vXcdgKgKAejqGWir9FrueqdnSU76opFrV1LwLITRNF2PrEVQqMBMBSS6GWeYjWg3OgxZHCJpVlA08t7kUco64AfBgqjKjkZmJFBNkwOh5rneCld0qz/Xx8aHrw+bZxePD7f3+uOkuAKm/dCLj885ZWf/Vd/tZ4bd3h88uwtVVe3uzc9aOzir5cTd457Btnv3oB//gn/zDy+fPHYWHI1RCRPriDy7bTXj12+8lTYfd8fmnF7Fzh8MgpXbtGmPwFPI0CIILaEJ5njkXtaylBLS7x7vLq8+JdToenaNSxDmnWiXnwnMqJXDIc0qZse6dw5oKOZyGJCV5BiVjcuzWQN00FqA859puL6zi9ZVst23ntj/7659HkrkcVquVTSNUuL5cizPP7e5xUrEmNo7UrHZNN04zOReiUdNU0VL0brefdb7cdLFxjO7h5qDF+avO9y5JhYDrDe/vj7kKjoEBRBApby8uppoAET0DC5m13qd5ADMokorJNMfYzWVyjIzI7NnBXHK/6g9jNkQyHI65iVxVCKlIddEbgmMyMWMg56wCIFbV0+ZzvucwoakA6Gk3MGR2ZzM7ArEhOEZEQiLmJUyQHRMgLdw1L7H2H4bkJnD0QAS4enb9zbvfvnrzSzYZdrfD9ipcrBWFCa2mWoacJqLg+ih5cqQsY60Dx61ZdGRGnMeE0JJvDYHyUdKROGAtUueaZxdW5GM93M13b2Pw/bOXFiMB1GFPsW/79axhmqfxUI5TxmDTvLtcrQL6t/s3yNw121XTCzda0ZE6HMfDAwqG1TU4p+Wg4yEimhD3G2t6MaSSDFqzYjoj98ytlSOaujpAPpoM3F1os2XNXPfj7hZBzWS/293cvP2rP//V97d7NdmuwtVlHIc0TvnV7ThX6VrP7AeF14/zoJxd+KS/yPMeIwcoXdMFtOhEcW6jJ64EFEL0TaulgNU5F8d9XF1wv3HECAVFROzm/Wtmv7r4ZHwc3/x6f/N2rxR+9eq7P/vmuwnUUm6b5sc//eLXv/rqbtyHLuyH6XigLz676vqeyKjxAMrMpWSn6FwUShnqlIYBsVt3Tbgg8mZep4qzsKdNvxWpxWbIxo6bbf/u9u2g4n3jsS9J1k1/2brH4d373aGIbC+fqyjUaZ5ysdRq3KyuYb3aHQ4h2Hq7nefj+LhrXYx6a3NsfAs6NU0nMiDEdfv5un0xHb+OToLixebCQ8CUPCcuD3/3b/ztcff2/c23yRl4YsczGDh3P0wXF713daxD3/SOkEjf33276l/26mD2c8mQ78uYuq71/Xox19R5lPIgxeU8Ou4SNuOY1PPFs0/vjq8gsCfMSZ6etbzzCEbsDEBVXWyAWbWaqCmAinNEBKYVweYyN6EL7KukqtVzAFLyLpcCBqBW9ChSCIMJCFTvw/J0QcQI1Db9nBODVEVDWaZiMqy5qtrxOKzWXYwdgSJx2zbHYbe/3ZVUZyNP7CRvw9o5lyVPtZZcuzYCe7LxcrUBlVygCLkQ5zRXsnnKVXQcs5RqgjXlkgCdQ3RIvtbEAdrATM4T9OS8o3FKaSghxFKQSA6Hxyx41fjgvWkqU2pjMMke8nrrb+4mUd0PY9P4vgkxhnXTj2mIoT0cj/NcH/dQLQE3283F2/3jpNSE1VQyT/OyG+M4hWkGxNCFeSyllKZtS5H7x8f5u+8/LeZNdg+H3W784idfJNt/+/XXwbNn365CFkFHwbMKTGPuVp4A53kOnlVEVdrgp3Fu1+08jKsmxibO4xGNU5rbzYUPoe26XKY5jeSRNEzTVMr99bMXq+16t9u3XQ+IVaQNbjgOWkuzXofQlFLv59TGdv3yB1/97GfXzwAJj8Nj0vnl57/32e/96Ptvvmv7dn3l9g83zk9AYd1/KsUdBgzekKKqzKkgOyDEolpyqiqh7Teb3f643awrCCEqEqs1bYecxYyCB3RZB3AWXMy1qlQi3/f98fG+aSMwFT0AoJTCqPf3r5roDSy0rpZiWom81HIssgkXIQY9n+ue7iNPJIWZW+ZqA0IkBF7aRZgI0QfH3scYvPdd3zRd163Xfd9vVut+tV6sUm3bxqaJsXHehxCYHfFy9kcL1oGEuNRKfYgHehKMTuP4Yq1ZupzgDHQvcMd5PAIFQzw/nv1Oauw5RObjIQsJnx7tz9DIYrVY0mT0CVsCMLBzj/pHqNFTaigBETqm65cYm+7u5t3Fi2e8Dl+/e0zDhEiBA/leSMCsSjke9zUbmgeCw34PoOyda5rL7WVkYReGw4RVn19fMjXzLCE0Dw8HQHPM9487KRmAUs4e4fpy/bi/v/z88tW7dH/MdffdH/7gk3l4uB3GWnOuAmrqKGfrnIfY/Z3/9H/2y1c3w7/5H649/vjTH/zxDz79i1/965/8+MdK/ftXXztUU1GdnWug1Grp+ReX3339ek0NOZJUh8M9FnTBTdO0Xq+hKpt55GEuhnK/u9+NR/R9aLp5t//q4WfN1fV/9B//ndvdw29/9vPG8VN0T1GEaqjaeH+xbtpIsWuP81S0rBow0aJV1ZwPCMbsFNAF10Qqebi82DD2+8P0/uaNe355ebF2kV1wTbce89xZRAjMy9I6pUczkJkss/PpGckW3IzgyXhjH4QYAHgqtT8HvSAaMJ3n7hOTsfhCdNF7TnlFeuaUTorB4lN5kjntxLsti3z5DeLTOl9Opu1JWzi5Kc+2SDhP/6fXtqeljoZwwudUzdI81SrDYXjY3/zql3/+17/+i2M5fP/+5t37V7HK9uWVKRaVXGW96QURAQRUCFMpRLjqOKuJIAGwc9E182izlHUbAM254GM8DodqqgB9G1W0VInoCvMkZWXgHVnKnfOupVJqJH5UVMCs9aJjIgCgOuvCRVSxVGq7ir5pQUkqrBpHZNOUwZEL3vvIxE0nViU4n+axReeJFThJjj64Jf49UtfHx909UphrvYidQc05ZRXv2KoEx7GhEN0xJxFct23XhVJz367YSBB80yx8kJrGsEIKHKNrV+A7lxCEiU7OEtJlcSkinXivU47bOYIazn1nT6vKnvbYJ7oMQAURnqxAp9X4waj4kX54ToZ5UiJ/BzB6Ypo+cr+dwbeTpoVwjvY9cTx27ox8+uaPlaKTD3fZo1WqIZmaabVq5JjUci1oUHKtUmuppWapetwPc5qOx2GYUk7Zhx0GHscplTHXuUhaBvbTHqwLfaKnmw6eOt4IUc0WItQMRMWFuL1cb6+37SaEjn0TYhvFUIG0FjBJ82SieG4oU1XHpFXnKe33w/5Qal2KlsCUmGPfr2LwgYFArGhJpeSSi1UxUzYK7CIttZpLRHdRh4rm0DlAAmQ7a4LLFU4GZoJaAXRxrrLBqdV+aUCzuqQUoSmeF8py+QMxmgAYGgGZqqIh6DlAzwRM0MSkAAiAoAGqoKpVQUM1QxKExUpiRg5dUPKgBKoABuiBCBfQbIHTlq56MEBBMLCqkqEWK9VECE2lIqFpBTVTW/rODB2yV2EgBaymoLz81wAmgKhaFtRM1GqtymxABhU8A6CUzESGiMhGJibg/OlvkUEXLU2kTDnNZUp1nlLKNdc8jPOcZVnazL4NqyoFAJrg2bNj8CECEjv0ziEysltC0BeXjeoSOgkCWIqgGoIt5BNPCUHbENkhLtDRssGe2LGnBjQzKQIf/Hy4HL8aGgODoqLqyVKIpwDH873FjMBUpBJ7NEYiAjYxYGRCJNc1TdP3hjDm2gbHLgTfe6epHIBMIaMVAy4lpzoRoiqhAkIlorbrmPqckpYpOjGxecpYDdSxC1Lx/t2NIBq4m9vHZ5d934V3795M49Q0bezaOZVA/jhN6uLK1T/8weY3r8ZXg73a6Zx0tf5kynkY2ZEx2cXL7cuf/vjqiy+HCcub4dMv188/WaNWlHWdp3EuF599lodH1wZFmJPGfr3q1ofd/fF4PAxHYjIwmxKSZ+SUpjRPampaD7v74HEudRyHGAIAD2lkFFWZD3tTgIjzVJU5QMOg02EMnoNvGcBxQTKrqIIplTRT1XH9/MoYXRvT2/erzt1Od9fb5v7xjmOXwR/2ed2E588vv3/zNXvfOw9iaE3g+jDvHEdzsCujcXDBkWcfqEoRoeGQGDE6t+6jotsdcrv2/dWzw/5RJ/XBt6vVMFct4sDVAsM0UTBFSYKrpk9z9QH7Tf+wewTUi+f98THtj7kiOCQVk6oUXJGKNrMnMCylAnPKFZlDDFJUzXwI0zQuCavITmoCQ8blEdbAFIkRSEWBCcQQyNhKOdn+1YwXN/7yvE+0yEXoPALiYsUkNAMiNoAzSAnexzIJoPWbi6PMX3/zy8fDu+2qmecanv+oaqQigTTNOyuzcy1xo4ZSZ3YiZap579cvBLjWEkKotZLvAcjKXMuEkEMIyWa1SqEzF7VUq8kwdVeXq0++rEYlZQxbCu1cQ2XPkd99+03WtAG/XV20Tff+4T1Su+ovEa0oypTbpomB0rhHkm710rVdSg+QHz35Wqtg4/1aKBoQQADiqgoYyPVFsuaj71aUU80HdB00VwCmw7083iCU/e3D8Hj//W+/ffP21c39QTCsV10b0KlBNQJIY5qyri+36AhyVY37KWeR7PBF71WLdxwdBkDEgshVi5TAngmdKQKRgICRb9dufdU0zby7X/Vumus0ZrTBgBr38vXXN7QN683Vz3752z/987+mTaTDJEW6lv/ZP/2Xb27vLCJJ/eIifPL5Veh8BY2ucY5ERETMCKxljI51LrPilI+TxRfCzXCUTy6fv3gR3eH2Nzfva8NFcz7OX15dTockfY/gWGq3iiQdc64yff9wOKSdEiNwSBlAtRSmYg7nlDQYFOlCF7zVaQaw9UV/e3Oo9eGTz370MACZSS6AUMDfznW9an5w8cXdbk6H4xGpWzHn3Lbx7v0b3d1rLaxCCOMwKU/ObM7jPA9VfXVduAp1Tofd4+X1Z5vrT1zT0BhSDWOmH3/yE9ps3r/7jW+apRkw9iuRknKJfevYs+PYMGF9GN8/HmvTd5bndC7FXHIoCHmaJ1wMaFIJgYiqqEglcFJFpRAgEK/aVamlqFURch5dVCkKtqT2eQ5JBk/BFic6QJEKp2g8KKqsUksh7xUgNp3WmtJMRG2z2h13EGJoOjSTnDXXEIKBHXb7KnJ5cTGMQx8b9v6Qky2jpmOiACoiULLMaRrm4olMdEyTotUsCOwZPXlERNBqqAyxCXPNbd8BLj8t99Fj9Ca1bRq+6udU1/313f2bOQ2RYhO7dXTzlJ1rHPNxTFXg8TilWkWYXBTFMVU0U8EmBjNbrdeplFwMyY3juOpi12yPRWTYX/XrKnUcDsT8ON+C81d01aw3xWx3OHZNICAt+urm9bv3x9//kx9fPLs4jumbX37D5C627rC/ezg8zLlDAhOtJmBspmmePDMRlSKihoQpleAbMGiaZpjGTXBgxM7Nc45bAEZGV+fsHC8xrv2qm6dy2D1utytVVVEEUMJSio9RqkguHHg5uxqHod4//NHf/JPvv/vl9fVV0z1/uNu9e/vt+uLZixdf3r555RtabS6G4ZG5Bbtzrmn7FRgCC4JUmLOkaU4xMiHMqUgBQ+m6uHvYh+hqycG7y7bbPR7bLqY8TdPQtK13/v/L1X81yZIlaYKYkkOMOAlyaZIi3T2907O9O7MAFrICETzgDQ8Qwd/FIwDBAtKzOz3b02x6qiqrKsmlwdzd2CGqigdzj5s1VzLzRno4sQhTO3b004/klELru9jlZVnmxRp/8/L158fDfrvdba+OhydTcY4aQmYouWYpaFBUkZUQmLHkGcjFLlxaDsRzYAqCmWNe+XSIyERM4Byv5tPeuxBj07Rt17Vd23Vdv912/abv+77run67Cs2CCyEG5xysITErSLS2Lchnw97nfsoMic/gzp+oLS5fn8kaZxuFS3e+4kzrBo1+RkJa2+ovNi9rn3TpjfAZTUAEW8d+a5ty7pmeyUfPfjR6aaR+5lS/0qE8tnvm4MPGw/dyPD7dbJqDaQIYs757PLqH6WbXdx1aLWmaq5gCRB+bJqhkVXx8OBpU73jTtU1oTqdZtIDpMo9N9C66okB+c331VqCa8yBlfDzqOP34/f1x1FLDcZw/vH/fNM1XmxebtqtAUqtvOnC7m+31tMynzz9cafbLQ9ttx3z6hz98vt29XEaNL3avv/n1w7s/gBkwT2WJpTPlp6fj9uYmK5RhamIzpxkJmUCkqtbldFK1ijIMgwtiBQJuBPsmdOnuXorcffj9KRXO0MUota4GUgyoiM6RM7tpeRvQQx2HoWt84zxU884DQqqKCFqLIIYYiKCoXO22tZpB+fr19eMySJVlHPfXOwFutzctcWw6Cu7nPJ4Lm4wuSjK9aKzQzuffzu7sSBdnKoDVOQUvrKC1n7NzD6C2Rt2fgZuzKS+BrvKiL2DrMx5wKdVLM/8nTf7F2OjiVfJFvPZsR/RMLlEA+rn87Nz9A5ipVFMRlVpzLsPhePzuuz/87vvffPf7fzjV03E8/PGn94TCZLFtllqYnEckAu+J1FRLzUbIVXXOwtSYlzxNodloGjZdM3w+IcSmaYuIQ9523TTKVKtnCoSx6FQ1UCy1iGj0rgtuLrYNbpRiqD27UXk+/+jKiJvAWaAyBOZSMxiwmQt+gcpkXRttMSmiotG3bQwnWgpUsBocL2oVKjI2TVORApFKAYRspelbUXbagQmbkFkT/GrLEAM6h2LFefK+9W1r3qoyuX6cxtA2hG5KM3rHoe22vamEGAUEGEuVzrd6pqNdIGZcyZaXM2qK+mWm+wXA0UuiIqKd4Ud9JpJcEEC7EN/wLFx6/u45ivFSJc8F9IWq9LN6wv+6uuz5mc+HesY7z/rG82QAL+97efmZB3E2xsBaSznjCFRrVbVaKqiVWmqVkvJwOs5zmpZyGoaH+8cixczGaTEyMXh8ejwNU8n1mTR0pnF+sWSCNYiTVrdsAxEjAhBpWrftwlcvb65uti56BVXJNUMVEyMVm8ZFyuq8TaoqYmVasiNmrlrnJS25rNpOBPTs29gGT0yqWsVMBXIuudSqRiEQB3aBmbwjQgWrqoRVBRSpOnWIeL5zmZ3D8MwAFRSsFjABMCA+/2BSEVBxzQRdz64hIZxJVZeTZHSmnpiu5C1AWJ2wUYtpBShn8hVc4tgNEElVQdRUDRR9WMVlxh7QIxJChXX1QoY1qF4zoANyQKRWAWwFoaxky1lzITCxisQiBmhaBR0bg7EzzMjezi93K6ERjKUsa2WKWK1VFIuaIkkRIEaPRKHmBclnqUSMK81exZmKCHFQFhFVNdA1fU7UChKIiUg1UDMjBNVaa1ET5xwR+RCIyTEGH8kz87rHYHZ+NYpy3jEyEtLlTi91fSustdaaRUxKccxMK/WHkBiRnxHen7GKkABRRIjIzuxkIjBEXq2lVMUxq4mZIjpCUgVdY0gAiNw6p0DVNVCciV102902EIOpC6EDT8jzNJzq41LNEYTYkAMwEi0qBQSASWtBoGIuhs7565KeuGLwmzbYOH0m0+CbVOsy5CKz86xataY3r14u8/Gnnz7OY97vb8h0SeOyFAUM3j8NJyR9e8V93KV//iwaPjymLppD3e3at1/f/OVff913/hf/zV8Nxwoi2+0ugk9Dim00MGCFbPPxyVS7bjs+3AOrjsfpcM/eoyM1I8BUFwAlEkTUlKBk9jTNmcDnvJQkKpqnUmsGxqIVGWrNbWxOx8HF2HUNOZyGpYkNaAEgMapJmYMZQq2BxKB4IhmP2h6JdjXx9fWLP77/T9tNu+12h1OqVd6+fHF/vH88Hn79Z79898OP+24b2Z3GxXF32++XZezaEGOzZBuflsi43Ta3u8azOzwNj09T3zev3vb3n1MadbYZ/eb6zcvh6dEWIseOpWZtQ5OrIlRHzvvmOC5DhbZpj/nQ+81us12mRDrFrXu/Rm/FICmXWouVvt1KLiolxqCmpWouwozkBYlymZsQYuuryGqRzs6txliEKCZwTuLANfkVkVUqGjAz0Lr2oCEhrVUPzLQS8WgVphEDEp236qZm7M5XQRXJKZOPovX+47vTdHIdI1ug7b7/umZstx6W++nxfa22uXmD3JoCI0JebClWQHIC7+c5c9ghRBd7KSfHZlRJUZeT5QHBXHulApaOeT4R+hj3pqRqqOzbvSCpmvP+t7/5T+M4X+232+hjaI6HezTpmn3HIcmEpo2LXKfl8ECo29uvyLdp+SzTsW2iD5yYoX0ttEEwRiAT1JkqmmuhLlROngyWJ5lPWpE3L4wi18mWhzo8zsv49PnTTz+8+/zxCaltd7Zr29v9HnK5v7s/LaNx+OXbr/JS70/jIZdSalFLpVaTB4Wbm50yLpIt5U2kq83esXoWH3pEyoVIAZ3r9humBv3ehd5Um6apeUpFxtMphObqxc39w4d2E6h1P33/6fOnx/vjob/Z6JHefPUCkd5//pyqAkBwtrnatLcvStOYsibJkNDVLnQMDBSWWhWxLFVd8S2PwzFlitB9rHex3QLv99fe4nB6PAqZcb+5uXKMVe6Dc+Pp437nlnoaTk9CtCDu+i2Z5jLd7m6L1jTP11evjofHh+EeCm9dIGDH3ePpvVpjZEmGYz7Eq0aWmrUMy8PV1Ztl1BnS129uxZ7ujrLb7UPb951te/7ujx9sOjFSDHHTRs2FGaFp5jRvoG0RP7772L249d23V1dvng6fITy6DTw8PtzsX7+++lWdpgzYb18isNaVvTzMILvNHmr2aKpD1/u8zOPxEOmmaXbits3F3N1Eq2QiYsZcSgiRAEopQKiiTWwd07xMjhyBq1ZV1XEoeemartZiWk0yqGfilCeOFohiaJaSmJ2pqmgIwTmXUzFRh671jarOy7Db7YidC94UljIpWtdvmRxqdT4Ye61+yOlxPpFvc00xsBEtJtkUKpDn7WYTHVsdYxeHaVjmTHGLRLmMxMERsNVxLsze0J6GYR1CBTbRZdO6TccCQOxUkBCqCnuXBbxD12JKH9vGtu3GNY1qTXkgNqRaJBEJoqRSfAg1A2MgFNOaShlz3UpHMMcY1aBq7TeBwKbh4EKXZ6si4sK4AJJ/8eLN+4/v45Qe6bSteBrrPJWyZPLum3/1y3f/v3/4T//8u9pcHQocl7S/6R8fD58eP/zlf/9v/9e//7v/+//1/0br/EEsBKpmilZVXYwmuUhVsJQyBmq6bpnnrmmC8xwieteI6njQxru2BfZmyTV+DXhFZAAYx5mI+80m3z+olFzL9upFylnEsNacDvubVzfbF58//3RqeLvfv/vw48uvXsXNphyn4/3T22///MVr/vzph/3tXhTFtJYxTycXGhPi2Ie2dT72hE+He9GcS3LM6LQsR2o6H1BEkHGcl65xIQbnnXGcTqMU9+L65mE4DtPSdcTR32yazx8e9i+um7YzUWCL0TdNM47HJrTe96Inx5jzgkguOCIsc5nLUJ0P3UWSf6G0E+NqwkBEzjlE8MGF4BxzbNqmi313xoS6zabrur7r267r+r5tm6btYmxCjM45IuY1l271DYJz47wOeJHwQgU628ecW5svaBEA0sVAdD04vsh4Lm38hXp0tn65cDjOjjDnrgifibeXPh7P3zg3WCvShGhgZ7eXCy/kQmWiZxLTzzuxde++voI8YvAYI7m3b645UFF4OiXQ3NDu6XF+Oo4iiUGtSNNulnkexpOKInJVQKY+tHkom03jGZq2vbnelzK3jZIrwe9SwuFwSHU+Tcs8HeuKHm1fNI2Mc5IyEsJhHh8fTh/IIaBzjsgVff/eYxd85+xf/+qb5he3v/rzv9xuw3/+3/7hL75+83R/GpbD65cvdm33+NO/xH5rTLVqnhcvzpyRc9fX++F4Ci5ohdX88AxwKFSA2ETAqd90c1levLqmhvpYnWt/87txnsZlKX3L4yyMzlRqkS3z1uubXXSgDZtDFgVPKCVHH6qW4Nh7BlJu/ZIMq5KntMiM+Xp/NY2TUdx2W8tFULHSkA5d3FzdvnAePJ+diM6gzBnvOTPRCPlSUHCxtbpM4n7Op1gNhNaCsWdO2cr4sXUs99y4A+BFq7NW7gURMrgcw3M//zP84JnF8Sd9+gUfvSghLw8+p2YZPMvafpbFZisZS2vJaczj4enD9z9+93f/+Pffv//xOD7en55O40lKyVpcRN912bAxbDyZiImwA8ml5qoixL4KuNBSXWpNxFokNc0up3ty3qCqVqm572LXp6eT5io3XUtFuepSSusoSd50DcvKmYDW81zkhs2bGbGphMCDCDIFRhQNRME5leJ9V0GqQwJIkrsuElPKVWqqDFdXN9NwSMvMDCFGMYvssUip1jDmZFCFLW67JmVDQFKYxsX5qAZI4D06NvJQS+mbLYeWHBrk2GwzkHkE75wLsFTNzExN1xCT5yBEqIVA2QcmpwBADtXIQKogO1AAUDgnaD8vRXbxG6KfMxQvJXbBgr78P6wGz2tDeka91yJYeWMX1hB+KYzLnxVt/2J6/eV9LyDTJcXv+Q2/kJLw+ckr7e2ZT0HEwKsEAQxqzaWWKkWIiByJiqiUlOd5VqDhNL7/6cPD40GRUlqeDodpXkopy5LFpIiO07AsyRBEVoRtVYKa6XkqsFolXfShZ2IfIjqH295/+/rmar9BDsOMUpfIpe21KgGSCYp6EUlFxiUVMKmqAm6VvptUkVqNmJ3jtu92+12/6ZvGewKta+w61SqIPrSRfeN8cMRM4BkBkZxH5vO0hNYENCMFW0PNwEwV0UAqaF3j7cEEkejMEDSziiamFUxAVxYSwjlZwSOSVQFTVAJFBFpxQzABEFi9hFbao8rl9oQGiiuB4LK4gYCRABkgA3hEtyrewBjIIaFhIBKUy93zfHSoCigGYiZqYqKGpsgAiApqxGaCYubUiE0NCAwUqJoocDGqik4V1KBUyEWKYl0puoTIjoRIFmE2mAGZXSBAg0KGZtUYsIohA6iqqlgtUMWq1GIqWkVKrWkYh+MwTEXF0FZNDJH3HGIMwZNzIUbms2k3nB2i2DEjwNr1mgqAqQNVRQB25IWKaNTgYHX+JWDyIRI7pPOO6MIqOv9lxGR2Mas2I3K6Rr2trGZTJNRqqIKIogqIzCxSL7cNC94hUxNd30ZyhARFNAQ3zvMyT4ACooTA2ri+zVW0JkeY85inpyZeiSohtKGr4MhzKSfUyq4xk1qLcx61JJlktcUCFNXpNJMRUBmG+77dd+02iwgAGROKd267jWzb93f3H94/7rbX/7u/fFmVhRmRvCP2XJP88b+8M4H/+O9/hMLffPP2v/+fXoQN+dY44HiY+11XVvZNnlXKq1/1y3hK4+Fwf++jFxFTFR+qakoH7xsEzkUAjA1LSSlll92qEy+pigIHn5OymnNxmhfng3OEKGlO0bvgIkOL06mWhXwcnw6eG1Mream+c/uoVU1Kc+Vv31w9/Msf/t2/+zd/9w9/qAL9tvn+u/s3t69GrWVWKJub7abO81KJicf5MTK9ub6+Oz4St31wrfOENg95GVLfhd2+n5dlGEeTutns274dpjQPc2hd6/u0jJhhEwMEl1L2yEgsWSzXFiMoYsXe97poIFbnp3HwTpuIUs1QDdSjA8RaasnVQJelIELrXQEqomlaog9N8CqVnWNgBTADx6xqVaohOfYAIKZEjKZrCqzCemtBRDJEQGIKyHzeF5Nbq5/IAZiCBXZmst5LEL7ktiKK95St1PHp7vEhWbb51GPvAfJhCq8cq86njw9PP/TdTVXCWlVHx2ySzAqCkVkW5WYjRuxJy73k0Xc9WEWFWmetC5EDM6spT0/LPO32O9/1ZuBcYxxyliIZ0R0+/zSND5vt9mbjcS5ZBxRp+y0zL+VJcmljAEzzPAaK2/23FPslHVTGru+s5pLzYm0MHUCGUphJRTBGdA2Qy/NTQAVIkgZNQv1LaDdkVYfPcvig5enu+x9++O7DKctc7cPHo/O0vwp5qb//3bt37z5sXl7XBbyLX7++Dd3D3ff3vmmOh6FkmecaGno4zpsXfXC8qnuXJQVHENCJMUjTbzj2se+vrvc1VeQGnYhBaPrlNOdl6jablMsC4jbt/Hh4+nSfT6clLQns/sOd5WpPWKWMwxJj2CC96t3/+G//TWwa74LnwJqAFJGRyDsvVpiZUBpH2VLrthVE6iNGLZVp6oBivr9L+oHcUtOcxYYpBfYlpUxiDh6nwzA+1Dz5doeuEeM0zi37MadTvl/qHOc2onu9+yp7D2mYjoft/u3N9duSh6sX8f50OB0fX9++mZbsqUbX2TT10Fs9/vTjj7kmMiLFrzZvp8OHz+/fa1lOeeGmf7t7Oc8H5xsgrCK+7+4+fegKOCoN2CZs0nAo6VNDyUZ9c3N9ur8fSj58+hw2HUdy3p9OY+hbAS0ioGCqBTRPy+72Rji/fPl6Osnd5/dff/VLqGcBGpLVevYn6prG1JDCmhkfIpWaSy3BxyY0p+lkYFVQRDx5qYJgNS9906QiANg0LQGL2jgvpRbR2rYtIaWSxZSAN11/nI6EBIzeuWUZyQWtZqK1FkTYbDqGUpMsUsjx8DDPY+r7K9e20zxFz30bnw6H/XYnSiE2YJpLsaJWrSr62GaGXEuuiZVEbE766VSITWtmhF1wHFzfRKmKoGkZmalxfWVg9rtNT6q5appLNTOtfbcptfoQc0KRhZGGeUY0BRvGMRVx5Go1UXGImybUJSmS5HpYFt8Yot3smmXOMfjoYxW0YkZGyMVUiH78/O54OO1uXuaUR1rmJHMWhLI8PChKs+kK1P/lf/kbcs3T52NoG6N4fMz/z//H//t6f/OP//RP3vu8JEIwExRA1qaJ5FxONQZXVU3Mk5ECqNUidSlXu93j8Sl6x6AlFe/D7uZ6GB40VXJUUiYKIbRNEx/vH9So3+2WZUjTKU2DKqkU6tqmDeN4iA73bT8fT/ubm29efnX36WPb9P3t1ecPTw+fftjuXr56+XI4PnjkWpaqYKpQ51rBQxEhYt80u6vNfhmfFixo2voGnU9L9r7ttt2SRjIBk03XHI+PHEIMPs3jAfnmxe3D3Wkelm4TweFuu7eiTfRWCrLz7EAthCaXArBIzhSZiMDETGs259iHkHM5Pp3OGyNHDgguTU+I3jt2jl0IzocYfN+1m+22bdvtZrfZbjbbbdN2Xde1bRdiE2MMMYQQ2DkiZHbEtPr1EhESPpM1AIBoBYaIiJ8FFStDaJ3b49o4waWJf6b5nPtnWhn85904nPn8cOnMfgYi/eyxMxfkGVDCS0b1qv7RSx93sZKBszYJAQwUbf3Qn9MGwAyIUGTtxghD4NjZaUTjMs+ejNAN0zhPAztwZF0brq6imVN145wgQLfp265hhrJUF0ITaNcHAq7LiAiPD4+qkvP7Ciq1RBeD2+zefkvs0TemIGW4+/zB+zKLdY+p83BMNZnFoJqnQNhQ3DZeSvnDb79rt7vv30+//Pb26xdvP/308cWL12++evvhh7s0zdt+M46PhhgaN5WRTT25Kinw9ZzurYJonZaRHeaSrWAM7jDct6GZlgKGL292gWVaZgEqZfZOuUos1XLqo9t2m5KGxjnUymhYq6jOaGDmnadUgmNV27TdsoyA6pwLHELDS0rBueA9m9Yq+92uTBUdvtjvxnH2ofn2djcfyvB0//qXv1ZVZLzAK3g5O2cv9rUv1jNwiGf52YphfmFfnEtgLUREA3vWIxKcDUvxQvfB567vGac8Y5Zr1az9/hfc6LlqvjTrz9W+PtPOEqE1GupPLH/XYz5DEXiOb9Y1RBthXoaP7358f/fuh3e//e6H37x/937I6Xg8PBxOAMoIhOYMtu3e0mIEYetrrSJmxISsKCktyKTnrYvWLBR6bAo67to4ZDUDIK+Ktebdzj1lHDKOSyGCbRvmnIg5OJclR0fHqmJIjD2zqBrgZOhwtelWUGUiI1AwUTNVImtjYKC65KLC1BGBYzOAquqib9uO2cwqJLVi6B0bqFQicA5D9CE25HwpJ+8YFJsmFg2gqqV2bXBOffRMMcbOmJDLZtOixdN47NrQRJ/S3HWtAvkQkdnFmKtGZJkTEqFzhohEevYsWY2iLpTIs7SLvnB88Lw8PXN3YOU+PvMRz/DMFyaZfSkShMurL3X8pRaeIcSfy8d+hi1+qbQzdgl28UKHy87/eWU0e8aafsZYuqygAAh6ZquVJaWaC6KFGIip1FxKySkvVcclffj88OHjB0Fkpqen49PptMZg5ZLNrJSsKioXz+wzKoUqtjI+9Hx4ZqoIRmvUphkCNuwdgqi++/ywjNO29Tebbkm5aTfALNXmVEQ1FUlLmnI1JgRaPaYJIQbngmN2fRN3226z7frOEYuAalGpokDOOw6RYutjQ+Qcu3XcTo4R2RCRHH3B61avfEVGVV0Vb6CipaBWk4JaVmQHEMAE8YwuqK0MR0BkMwJkIDUDY8AzwWdNzrqQvy5QMqiZGJghXkLiDEAMDdVE7awgR+YVyAIgELVaQSoiI6+/bjRjAG9aV2wLTaEKlGpV1l2BmYCamZ6LGkm0Aq2f6wwF2KutSWcGXAwm4E7AiXGuVoSWFSoCIyIDdCForuicICMThRgMVopJrcLMZ16uGSKIqBkpsKFTUAMjhpyXaRrmcZinoeYFANx5d0HB+xB800T2gdg5xysCKQBI5D251U1ivQQEzDR4t94eUirCGIJPCxGxgXofjc+2if81q2g9DarKjHCmHqOZlpoJybHLNa85dnBW+XiRun5wVaEVRXKMhKFtHYBjFJVlyqZmggCiqmASokMgx95xq4rMxBxKGuqyMPpUaoFl43oFFuTgvOnSNAG1IPB544SiMvqmzdWWZVnS2IZGDdnz9fULqbZUqyAIGoO/Ck1BzHXWpbbNjl0vKoGrk0zRx6Z9eJiHUXTCam6p2fVbZfz8d7//D3/7L//j//Gvf/WLr3evrvqr26fh6NtIzLHr5nFQQx8aZghtX/IyT6OWfH93H32nUFArkcsL1GqKCAgl5fFBDSw2fp6nEIMUS2l2iGX9TYN4J+PhQIgUvJCVWtjxze2tEsS2SVPSBGyQaloW51xuipjWEH0e56QOoBmOQ5EMaHO++6v/9tv/8B9/c3f/2DG3sX14PEbXOB9nXX46HiJ3WtUwE6IP3DZ9Snr/NAjy1fWWMS3TvJweN7umlrlkTVRffXXD3OVkKc9ErmLNy9zzTqCqgxDjMIw9M4RGdG4bTlpu9ltHtNzP6VTHhJF5zjl4j2TOcxVQgFJrG4CZ1BCMUipMWFTAyMjEEAxFDYCZ8WK3+Wx1uC6pBODIkYquAzJaPdXMiHkt1tUMjc6rBCCCYxYTIgZAvdwTxnGGou1mJ5aqLofTU2xN2Nc5taTe6tNPf3i6/2O7jSYL6UnVGUdAV5YBapY8yHwPQZhb9ljKApYdESgQKFgBUMcRANHmko5mue179o0IObfl5jrNo7Kg+r5p/vm3/5iq2/gekplUz67pNuIw5wFK7ft9Gh9VcvR9v/9KQ8xpJDQHjecgNqecw/4XVE61TgQMIaCLigE5aCleC9gi412VypvXtrlVFVdm+fzh8O7DND18ePfp6TRQv/nnH9/f3Z/+h7/6y4fT8j//zd8mcVzl//Lf/du/+y9/8+F49/d/OPz1v/ozQ3PeO6ZIyGpAcHect03sd9GZgeo0VvXm435e/HbbtVe3rtt5HwwoNmxE7XYzpsIBeApoIYQeObBdp+l+GUZEbNr4n//4x2FZqkgtojJ557u+Q4BSNTTd56fjdZWdQdhslC023shXrZork/fOL+kQ/Qa0z9OCWIghtDyO48zl5Tf/xu5NBa92fc7506cfXuz/rN/s84/T5no/pmyS29AVbZzft10vS9q0e6hVqjXuOqdNXaLo1O53wFAmbZoNxkZInIQ6DJxqGwoOd1QSs7/uf7GM8vRotQ6ah2bfFSw9w/zwqU4jKzYYN2/eFOU61vl02ISOyD+NR2y6t7e/1nR3teVyPI7weHX7tljv2/Dp4Y/mS+MioV29aMZFpmKbvms2Vwa1jYGqLocDB4cxhs02ZyhzbSPnOndhz+CyDOtVkHMVteC9quQ8Rd/Ums0yGntsUzVQVMIhDYAQfStmc0rgKDB4F8CsijFBVfEYq1URUTQz8BxyEjBj5nFYgvNg6rw3kVJz8NE5npYFDMihQ0/IDFlFFcjFDcUpqxrCdrdVwLSQszDP2jQbDD6ia10AkOPpvokbI4xoknPDXMp8dXX98eF0dxozWMFYs3gOBnLIxpLBwBk45AqYQakuWVLT9z4wO9517QgafEgLg6qQDId3yDEQ1HX047AoVmUgV1WLFFOtgGXMngkRs4FvmsJ+mZObqkfLdXRXbWya+XBsYwOksW2I8HiYH0/a3Q/Rp/5Kuv32u//8h198+wpRHu4e/+W7d6f5SKpGMUl+eDwwubpttk1flnrAY8pCjgHA1KQqOxLV4GJJVWtlZoFSpURiJJequapcUwxNKQvHNnbdPM5o3rGvSES0zAM5XYb7ht901zc5Z8chtlrSXCVtdl8dnj6VmqLzVZbjeHj14u2nT4dPnz786ttvX4Tmh+++e/NtvH61H59Odx9/uH118+LtVx/fv+s2twA6Dg/VKgfnIpR5lpKkJBNjshBYgUpRBmIktTrNQ2CnIRyGyYDByECccyWVcRnc3FzdXB0fnlDp9DS42L64fvXh7p2obbipXRiOj7GLc8pQc9t18zRt9lsGEK3jPAGSpuycu9BlwDn2isS0xtO64B1T18TQtbFpt5t+23eb7X673W23u67vm65tmjbGGGNk57xfCeEMeE6V+tIRgQGgIwRYASM4y3KeGx47h9Ujwjqfg3W++jNWBpw77wsGtHbkAOvm7dLu4/PDdnYfPTu5fGnR1pb7zCpCNbHL9B7OX8Dl+QRnx+GVQ3JJJlrvngoGq27AVLTMS05ZDZtt74cTANRa1SBGR9vdcSkFsGM1pLvT0G66rmlfXu1UtaSSl0xagw/RYcrpbhm1AjOWtPR9FDMf4u76pmlbrbmCZ+fGYS7LIEsaD08lFdZw1Yeda94/jAbqFBrHyBgbx+acyayFyGHVlkSfTn/7D7/96uWG8nL70v35L9vDU9y3t7/97p9qweDAbVQyOCSBehoeUcGwxthM8xi7KIollYZD27VgVMzmcbm57qQmX2k6FfDLrm3818342+8bJpRqy7Bx4DAT0ZLUPLaxkSpNdACoKmMqzhn52LSbUis5KlYQKrMKaIhtXvKUl+vb3nEQk/2bG7hbHh6Pv3r9q3a7F1PigOzW/njliJ1lg5cGe7V/JuILrWyV4egqVHxuxZlodS7CtWV6Zm3Yc+99Mfo49/LnZl/PdJFLv24rGQ3tQjQ5i1bOJKZLeZ9VKGeBzPmF50q+9Ph2TmlHAF0tcsDMZMU3BWCe58fDw2/+8Nv/8vvf3D+9O6aHp3kcpimlBQiIaZkzmVBsDuNgpUSKVY0RXWjUKoeYp0lqWmbYb2/SJJERCBU57q4gpa71c8kl5bjZzEm11L53N/uulqWWIiJtbNqYFYyYHMOUShvaksUQYnAmhkpFaTZ1qoG9gBGhR/aOmMGA5zH10PTbthLVnAErIIcQBUhEISdVC6FvQhjgMXrNZioQGp6Xoe+6agWZCbltWkcIhgTmzKWlMPiVauZjiE3rXTAmM2jbbp6qqhhi18U0jU3X5CR93zvnRK0NvVZDM/YboAjoEIiA0HTNAjuThpDQBM9fPxPGnt2v8ILF/AzLXlVLq73xBe8+m5Laxbb8vHLCM2Z+oZzZn+CPzxFi5wL9GePombV0BjEvMMR5QbyQm9bjoT95CfEqSkCpkOY0L2lJ+Xg8Scl937VtI2qppGmZFZ2oVquPT4+L1GqW5mVJSUxFhAlFVbSuvoFqxky6yvIRVxmYGawiwTV/av19EKJj1/oQXJzH8v7D465rtyE47xWQPcXoFaDmrFrzvKiaM+2dw8DE5D1779GA2FejEJtt3zXOtrsmRtIqAtXUCMgRex9dbMkFR44dITM6vyo4Lipm1PMlf75CkcDA1lwDW0O9Vc1kzbDXklEKmqJWQEUQAGDE9YYC5BDZcPWZRkMDQjhjSgaqYPQM/yHgWmwrQwDR2MhUQcRUCRSs4vkGxyvySlAQFKwYmIGQCKFXYyBn3GDNqBWtWs1Qk0kFNRAFFTBRETQzFQBUNEMWFEJUKUhOsdgafMZktaiRoFalLC4JpqKLQBatq3yRnI/e2BmxOe9DxCQ5GAW1pYBza0IpMQEgmCLwSmsyxGJQxHKtIklqJdM2RE9OlHKtgBbaGGNw3ocY2LnggyPy3hOigAKRX08QkiGqqoogMJqZqZqFhtJia2dMQErAzHLOYsUVEYJnqAgRAIlAV9kmIa2SNEQwIAVHfB4OGKpIEcgrZdpgNTl3MTTACkSlZAXKSyVUM2NePbSUiRz7VRuEgevqXCWmNUstzFyrOeccxj5sHk5H32wDsSIWGfvY1SJJkpbMCKrudJqmY14PKYa2qMx5IcRqkHNumtD1jZSiIjGQE5ZITwuUheaBiLgJcc5FioDS7nrr9vHdh8erV9dPQ3n89Lhrmr/6t3+Byn/zP//7/8P/+d+JmLlw5W5VVEXYBZFSloRkQA596zd+GYe4gTQPOU1glVwwaGqlVIvWImp5zN57Jp6XbGjTOHrXFAQAjMET8LwUZnIxGBiRI8eiIEQutr3vBR7GpeSUEXCZqubH/cuvTDW2fbO7WlD3rzcfvv+EoC9ud3cf3hPKv/6LXz08HWrKBHbz8vo0TrFrarFUFSnE1kBKyjUtRRpzrXu7uX18PP74/fjy65ttvx/HBY13+36Y5lzK50+Pm+t9G2I+1FoLEoYuAmvrQqkLSGlCSHlGEud8LppSiW28eb0paKZyui+VvY8xLSk2npwzFeccMCigmaqKZyZHhmpgtVZDEDBPvq4DCCA1NSQzdeQQjBCRWGpdI2OIwNQcsa4GcGsU9/mexY4ZAJmYefWoJ8cMYLUK8bk/KKWSYi1prkOxJUQC1MOUd5E11Ifh0x9+/0+Olr1r95u2qjSuqwIixQQYQDSPxwcIEq6+zfOcplO/bcAw58VTlXLi0CqQioHkWhdy5nwDGLndG7lai5Rs7MmHP7z/6SCy3131rpseP7+8vRbjpVYxQ8Pgm3mc8nLa719cv/xFKlUk1ZybLlKAnKcyDoXeYI2b9i6lo2EMvlMXq5hh8ZDJFRmPNk9GwcIWiRkyjJ9PH39a5ulwf7p/HA65/v3f/8MPnx9//e2v//m3P/zL9z+eJgl9//V+8//9j/+vD3cfC4ar3eaHd6d5KcfjfRVjIjEE5ELhWIDj1lsJWNNS0GgucPvidnO1uXlxRT7UUrtNzy4IsjE6xZomdLh/tS/CoJDn+8cPHzlSzvX9/eepZkUJkfKSlTmrucC1SlUY5qrVrBYteRzuOID3HfkA1AIRcidWXPe2gtqCapjq01wOJQO5TvTwh0+/9XuWIX5+eIKeoYjIfJgHbbpTqqa43XY55VJyrhMuKUCMoZ9lKDUXLY3b+9ASuz8+Hm5uupvrq3qUYcoW3Mur2/mUfH9bq4oDt2lLRskbyvXNdQtueoTh8XTou91U8XQaWbXf9KyFwSlqkeT6kOsSgZqwW4yE3FffXD99/nGegLX+xYtXT+PjOH7YbK+sgo+9edjvb47fv9u++gWrkKbA9v1Pv4Om6dvrJSVu+xB7U2xbVuACo1p5/+6db84CNCKqZwKsOWQkDjFUAXac0sKE5J0iRt8t87IsyXnnnKsmHtmzzzCLFGJ2LpjZPM3suWmbWlWKOHbzPIPjbrMpJSeTANg1G80HFZmSqCoQhhBySoBaakUgDgHJVdGs5frl/vA5D+Ny8+qF5jCnsb/uYxPzktOSmWB7/bIiPz093Wx2few+fP4QUGathxFCv9UiG6LH07Hd3JSSFpWqCgJRCoH0DQfPiuLJWa2SFg9xSI9Lrlqx1przYiZFkpWSDRQgySIZBIJazLU0zgeGrFpqVQBElwQMq2c0AYeUi7noREvKE6G1jVe2x8P44qvtx0+PSfjhpL/cvvj04cc3XXt8//n4dPjncXhxdZ3J/ulffgoNpOPIPt9+9WI8jFlwe7WZclaDXCqs5EEEAPSOAKHUygL95qaWe4OsqiJ1mgc1cj6ia/KS9vvtOAqiTfPQ7fY6pyY24+lRCdumAbKapmE67t/+Ag5DOg5NiwSotagJEavoNIxXr2/nMd09Pr765ptPP/34/uO77X5/+/LN492hbdgzNtv48PiAgX3TLEthpibEkuacUy6577YIlnJh51LJDhjJMXtEIFBQAzQTYXJ938zD+OJ6/+nTT5vNbrvtp2l5ur/Pm9psmuk07273jw/D4+l4c/Pi8eEuF+3aZlICEQYw1WoWYjsOiw/Ny5tf7q7mKvn4dEDQkst6FbRt0xL7yBxCjNHH0DZN28S233SbzW6z2fT9Zrvr+m3bddGH2Dbee+ccM7P3a+D9WbpzhmNoNQEiwFVsYOdBEl565As4g88TeLgwK56BJtLzJHylXyjAmYf/DPo8j9rh3MmfTYu+zFfwsue7zFzOYTNw5vyfsaWzPQdcugK70ABWy5HLZH5998t2XkotaTGrS1qWXCUrK759cTOdToQ4j3PJNTRNu9nsgo918tyFxuVpTgmCd5ttb2CByICQQxclpwP74KB2MWy6dinJ+QAF5mUqeSTXoFSs6pkDAXf94oNv4XFJc55QkDP1nhygVvUCprWCLuMcmz5so5qlUr7+5ctYl/Hh4Z///dDfbNr9zTLx7vp6PKXT40MbvZmM46Hd9vM4IzjnAgJbNQ8opdJ5BI2EtgndV1+9fZpOsY270N+9vzMM7+8Ph1SiQ+d8zQlFVKGAEiI6JkfTvKwzXkPzzKTmPUvJo5h3vgmdaPYEsfezATtTh1D1eDi8fnu76beHcX717a90GUrS7mVjGAQYVXiNKLrILFbLdDV9FqDZz4GelfnzTOy5dM3npp7OYM3ZT321o173/1+a8TNACfhcbBe3r4u30bl08VmydnaKgTPy9AxyXooVznbrX1p+xDPQdCY0KSFWEVMT0FTS0+n+N3/87jcffvy8jD98uj8O9+PhWKWud7T1HwCqgp8fj76JVUEMvPNIwORLqr7xNIHUfDg+taH1beua7nA8vHi5VxsALRBMueSUHDpQmJfq2Hddt8wLRz4ehjaGIdWl6k3bFiiihEa1VkYkQgOMRKpWxULApKbr2TFb3UG04jwVcwVZQyABnKbT9e6lZ5ZaVEpJSQCnOTO6tveYixapOcfYGkGkpmk3Ms/ehTaGOR+dIQMwkglG7w1KbFvn4wrVeG5qhaVWMSAfk1pC2LddqqNv2MxUJIS4iBI649ZcB+TXlQfRAM1QkNSAYc1SPEcVP4NFa3AeAq459xdu2rlK1tXsgkdfkJ6zePYsUqILt+iZdPQcoXXRTP6JUdGFOwQ/f/D5M+F5gYULo+6LoG1d4OBPX2cmYqWUZUk55ZTK4TAOx6e2bV69fhGcy0saxlkM5lyHaVxyHuZxzklFiojUSsTGtIZGIiECrQDH+iMQEjAgkV0MAZHWvIDLas3OmOesS9XA9PLl1dXN5uqq33UxBHJYRaqn6qFwdHC2gmJkRuYQPQcPhqWaog9NbILf9sExGagqnmO4vCciAAZT0ApChoTswRTtEtAGaoBnyRcCnHWsBABAzlCQnYmD1Y1Iq2k1MJNitUBJa8j9GltNjgBXV2kGjoYOKKwGVXAeX3w5yYhqUA0qoABUQAVVsAoKqLBmxpkqEqgYkFdRrIIkYNlAzr4lJgCASsBmPhgwAWBOKguUBWsyySsQboAKK/1J0ExNkINCVURBMiLVQuwV0spKUNOqIBayQFa/GE6LTElylSwKxIrMzgOxC4Fj9E3DIbqoFRdgBjbPyERILKu2F9iMVVXQKqAhj3PJ6/0UuQ3BwopvEjgwZnLkg2d2jjmGJvDqZM1qCgie+WxGCJhrNjpbi6mKSXXA5WybiOSc81gRiOmy9J6vhWdbazAVIkIgw3N+GyCuZEhDAVM63yTwgoDaWa9YEQylVtOKBKC0pMoG3rGC+kDkkIiYg/dsWGPw7FsEZWaTWtJMULOUkgu62PhYDYLvd80rZ7kaBG5RmUBN19ZFhjmlIUuGrtury1mmlLKZ4+AdEwGqiUplBCP0bXuzux1P49v97uHz06d3h1zs+maDKNO0dNIvJjVPcYMAZdNj08Z96ztffve7f/rFr7++erFRTU3XOWeuCa7ZSq3LPJm2JklKyvM0zzOYxchaUMmNh1mXwh7UeJkXXKG3KuTj5/dPxlSCTbluPElKRSrjXusCTG3fm7GZ1QrBu24TU17KNEgtbd81TRgeI5ZMUJs+iCRi2L56peWfy3xaTiBlwpqzp9CEu89PZnqz3T3mGYjVQezisoweMLBHTLWqUwo+GJmILum439rrl7fLssi4QNc61HfvP728vQ1tOD3Oaa4IC8X86tXV+/fvFaDvmqdx3tAmhl2RTEhEhiIplbbfxG4zTCN8PtzuupZ0nPPTmDJ7MrRFqpjzXEGAcF6mPnaRXc5L42KpBYmRuFZVg6JVVQAdrRfxaq+pSoyqSoCOCdVUDYkMQUBXUfE6rUBAMSPENc6FEdW0WlVwaEZoRM+LIJRU+93WoC7jGBlzlWSQUrq9kvfp/U+//8Pnh5/+1etXJS3Q3Xi3M0OsAiaaq5ZTzQM6Uti2ri11FKtEEaQSodUFdJUqA2DIw4mApEJW2t7sgF2VNfM5b1/cLnn6OD5utzdvbnfpcLffhoDyNCzgKYSATE8ff2KQVy+/jpvrrMlUyCA65+pCkGqZam2bF3+pOI73Hyn2IW7NbQTYVDyIzXcyPViaICnf3FqzZyY6fLj/7X8a7t4dDk8/fPfu08Phjw+nTx+fdIGc+d//9o+51qv9ds7lVKfPHx4AQL39/t3TNg5d12UpS5Vdv91FN9cakEvS0zCFAHHjbl+9VaG4v9q/etN6MSkuutB5ZmXP6FpyrpQjeQ81Cqemcae7ezMIDU1pmcfxcP8AVJaaNSEQKlHJRbV4IhGrwEaO240PWGvWXDNWjsJEwERxSZCtePCtC42iVXVN91KhaQLUMkIN2O+bq92UD7tdjM0Ap8e2ebHdXR/nJyE8LYMo5YBVlp5aAk2s1HXLpCF2m+3tOD0I1XbrVOepmBk5jjnlWkJwUZzNIkPNxi5rkeR3zeZ6u30aPpFNm9bfvvzq493n5qqv01E8M8PHT+/aJmpd+ivevOie3j+iv4quPUz3w+SqwPWLTXkaluk3bVcxM/k2uzobLBxC3F//IqTCIIfl9ClxZXLjsDDNIiWkvfcoFmO3n5enbnOd5ylVCG23XgVELjbkCaqoI855IXRVqpmVXKILxZaci6PtOvohpBh4TrNWXWxGBGZWW43GOLoABKUksVXpTV0TAVRB266fhqFqPlVRBEYEMB+bKmXJOYbGsys5OxfmZQ7BHKNVY27iZr8IfP3Nr398/zEbb5rexUjUcGvLvCArmvWbl2B2mg5dvyl5+eHh8PuHwzdvf7ksp5qk8d08ZpVajELXCJbHuszT9Aq3vZToOXhnQNOUpnlZpkmRkHDJOdVkBs57RzSmBYBBPQHmrEuuxLgsFRnBzLsIQMZOzHKtKzet8UFXuo4AWjHoDCFnB9hXa98/3RfJpvr9T997lE8fP4PZkvTu4+PN69dPh2Wch8qh27Upy9tv3/zu8Lsi8DDk4fFTs4t1SgBgaLVWRHTEIlakKi3cRedDleJDNFOppQoAaGVWrSDY9vvxOOEyW9f5NsoSus12nkcjcuytwjyP9PiJ0AsJ+hg23XLM0+kQ25iXxcyW8cQVuy18+PH33779xU8f/5iWcXvzaqyzemOs0+n+zatffLy749jkZaxUrq62XWhoTqsJapoXJhIttVTvoic+Dseu6zZNZ6qbtvl0f399vbNFi69J7PrV6+PhqSH0wbHW4fDgw0v0IKVs++bx8cPu6l8RN0MeIjbdriuWVc0FmOZT8FENp3F8J7/f7zbdpm+2G6vFiV+vgjevXwWm0ETnQ9s0sW3att12Xb/d9v1ms+n7fhPbNsTWeeedOztJOkIiIlrNh9Yd7zNixEBnNxYkBCBkvZh5Ep2zbC+R5s/Enkufve6+1t79PBO3i/PQz/hKZ0oH0J/0Ul+a+gtKhetOGC5xwYAXF6RzZ3cGgFZiEZ4tIfAZVLjAROsn08ooEpUqJZc8DqfT8ThO4/F4qgpDqmKYax3m5IgjQ07joE5K6TxsotvdbBARBZBAqszTkEpNuRJpYLjqX7RdV2o6TSc1rlWYikBtfPS+SzIyByYXWpGK80wVgp0wYt4w32zjJJbMrvqX07IMy6AAL+MGEWMYt10zzw/bN7+GaWivrhXwNBSFYTgNu6sNirWBm+AUjIG60NY8VZOqlaAaVClrdkJZZlg3EqZwmOdZBed5GO8BS1mslgQVqtgoqWFomW11wAVTARHqAxGo1FkQ2DWBmMwcQVVBcOMyIiF68oRWM5rttl1NlQhOp+nqm6+H+3mcH6/2fWiv1JgZCZHZXaRg65mitU8m+jKl/9Iv/6naBuDiRXUhGF38X85sMnx+MuBZfnSeK5/HeitbAxHwAgI8o1LwpeU461fAjOCc1LYKStD+tPP/8nF2/q+t8kdYuylVUdBlXg7Hw+ePPz6d3rGfH58+HYenZcnIjOuqV0pVUTXn6ZByQjDnRcU5BhWENRCdmDyhI+/GcXDkwPfEDhQNQ9xch+Zjk2lIixZTrF3LSy61mkeaZGm6uO27ccnsHSCWWjvPqUgI3tipCiGiiiMMjgLSKhkTdAomIsGxJ2cGuWacAB1xYHQUfFdUgwO0ygjbrbt/nKv6aqIoMbbMixAG751D1Oqcc32PiE0TFNMii3chg1o1Hzm0Tb/tRcwThBDI+DSNaanBdZ48Am26Xal1f3vNcYPkfeSiBbEtRdquIxeAAwKSAZ3tqVbWws9SkS5Ux+eSWgviUl2XsgNck+bhC0T+LGldu9vnJvdShCs3DS5GyCvMY/Dl4+ALz+jnZKHnEr8sdWfUCAFsjU1/5sr9nL4JAGCiamBiqqa5liUl1Touy+fHp+O4vHr1suZ0eDqdTuOcl4fHx1TSvCwrmUhF0UyrrA7WyCsaonghgzI+858QAFcxxJlwujrNEQlAValgIfCbV/tffHV9e93s+9YDkDOpk4gw0CayAbsQigiHxoWAzjF7MyZEUSgVkNGhglarJiWrIjuvZrrO4lGtihkCASphNVQxVES5sEpXX+P13DhAQnJ2zkwgMAJgAA8oRlmpGhY1AjWralJQK4IiKF0S84gRYCZ2hp6QzhCw1XUtUDUCBCAghkvqOqji6lFmaqarKg1BQAWRbQVbyH/JdjAEYgMwZHIOHBs7QGdaAdBUTAtpARPTVeMmJiaiKgYrp0iqkQkAkFNDANKqBmgI1awq5Aqp5qSUVU+5zlnGXE9TLYYKgOyRvXM+xup88nEJXcthRhcAwXtST8SMRKJrvh5UMSQCpqy1Clb187yoqNbCTIjogvPeKwo1wZxfsUXv2BEQGiCrCuBq57RyKdBMQ/BgIGoIqMpOnEhlYgRgYmYGAnZ+1a8h0vO94CJAQ2QmVVOTM4EZkZjOkbDEpoCGqgamxMTEALKOfNWMTGsVsWomjjwAEntRQ8+8rl6wOnRVIkUCs0xIWRZJiYnKUlQkhDa2fQwuzwsTGyy1FmIHBkWy1ISmIDQdT3nMTWgtcLFlKUMXnXNBNDD7WtO6S87TLNXabjM95vRUEFTtEB28fhFVIY2lmrW+aQO7cZlVf/XVq9N4ErTtVd+H7oc/fGx92/bt/f1ht9sRowEpMvroYrfdXqXhYHmseQIyVClLerx7ElkoICnUKtP4BMhVDREUeFlyUWPm+8PxintTnaYj1BpiV1VEqnOMKZlZ47iAOsKcspmhKOUSA6Nj6/ZpPAEuCpCrGPm5WN/Hf/77PxTqNptmOi3I1ncea58ne5zvrzbtcUkKvioHdd4MEZwLHKnmPKSlbUIkB3FrGZY6MToRJ1m++ubl/nr/hz9+aNvw9u2b9+8foeA0Th7smzcvf/zpc1m0dZt5mSi2S87sOTSxqjIiKjD6Ul0ZoUa83m9+/S3943+5O0kNTISkoookZswYfGsA7LBxwTEJaK1ZRYmcGKjqKhxVA0DiNbEYAHQl9sE6vS1WAVYzFgUCUHMcGBjgnC5jqzsdsoIiOABSMxWh1e4aAACqyrzMZjWbzPOY64LBSYXjMn737o9suO/iPB58tc3rv5BaDBBzQpMyHS0flzwEakiBzRWtbb8DjKKCrEgZHICJCSoqO78O6YiCi72AJ3DznLvttszDh/ffO4XrbZsPd62z4OkwnJC7poug6fT5gyvl5vZ1v92KVMtFcvUhOELvdDrdF3F89ctKpU4fnW9it0EKpVZhZMeWBzkdYBoQhF2H/ZWgUTqWz78fPvwhHU/3P3749O7z59PwcDgR2c1VvH/6oY1E5NOyzHP+/eG05OIYHdEkKqkA55umP86jLLNv2ikVH2MEWcYRu6tcdBOaQLHvOm8SzGKIIbaq1VSJyMxElH3wbWvcaD3mnBCgSDGQ6XjMy2Kl/vLm+rHUT0POJkTYBmSwhil0vutC33RYlZQDnKMwCavVxfmAaKoFrdfMClq0bprtkE7BY6kLGJEscpLYxZpGmbNJshryYYyFYKmh80wmZfbUbWOHpqUMFYQwRh9QJZ0elumQyinRcvX6tvVhLrbrGi2UxqHkcYYcrm8fPv0U27ZtWhPNrr47fT8dn2KHkSF/enyzeVkb/XH6OJyW7fV19B3W7D3+8PH9K/5WEfLyyJwbF4+Ho1aqvvh9c9BpVhUOzHJ//OHNi18yb8ZTDmDz4/uIcvX67ZIOjeGCc2gCVG7ZQ7FxHCHL3dO7l7f7ZVxSxVa7L7ssNST2jLWK836YxuAbNQi+URPP3hTMLDYN1wqGUgsDIJqsdw4zMenbXU6ZPJJjsQLK3XaTpglUyJGWWmT2TKYKBtHHlBdRY/SI2IU+5ZnMRCsrhcAAGtp2/+o16tzd3GymhMbLdPizb//1i69efP/7H5tms2naLB9QK0ENDe+33fSUS55zMlZ9s9nWeWi8yyLddsPcbLrwcDwi2cOH4+6m3zTRDD4PY6nzy5st5qVx5CK32/YwzPOUUyo++CWlYD5pUlVVjSGaomgxQFNDxuN4iq41oIchxQYXKzd9K3kFuAkZ2LsqZIaihogNh6T8L9/99DilWsVSKT/kPgZH3LdxHiYvBuPw+PnJTInIN9F7msbFedcyfHz/43Q8th3MaRYFdmREprbe5wXAJImWru/nMc0lq4iiIqGJEjTs6Xi8R3aI3gUHUvOyaBHvgvhSagZUcr4WGY+PTbNBM6k5Nk2aY4x9KQXNmDjPhZTIXOeax/snpzje31nBhjiNg2tj02w+frhvmlZ1ud5ujulumU/OQtte1bKoZDBlF6JvmlhNxTTt2qAqotkBjNNxt2lyXq43m8g8jHOIjadYihCYmfV99/j57np/NR5O/aa7udofHz85FzZtN0zTJrg8T54RzfqmEVVAaBoPBMPjQ0m56Rsg0suO6NWrN03wbdO0fdf3XdO2Xdf1Xde0XWzapmlCDOw8ETETMa+IEK2OA5d985qzaV/0F0arf+l6BwM1WCOpLsliqnDR8DyzNs6unwCXkfvKJLp0TT8LPr8M0vHSdZx5Sech7bkBIztDRHihgKzZVWeO0npLXY8B7Blu+gIffBnOf7H0IDMtVbSWZVmmaXx8eLx/eMw5Va1Fikpt2pDH1G9j59vh8MhEpZTWOQW8Py5+VMmLyaq5MFNtXLzZ7bd9A1hrLtO4OEek0nZ9325zmcm7m5ubh8NMtNlvv16Gh8fDOzDVspyWg1YkyfveM7vHeVoEbzs8PR1RYNf3kd3904MMMC3ll798/Xf/4W//7Ouvrr5pprLkkvFku6bRedU5aq4pxuicG+axqiBYSrK/2pvUIuqDk5JCCKHxh8NjVSDUzjupuW2aELtP93f7gJ13dUmCoGKG6sC2vZ+rGXBw1DAyAjMYIRmWWkANIUTHqLbp2jllKUqd2/Xt4eGhi52RSLbUhGnU2+vXkrL3IcYutJtcy4Xy9aUFvVTgKg6gtTnG5/Yc6CL6wmfEcC3EL38MAOnM5yBcJ3Hnph3hAiGdndPPQKfZKlcwu3DfLu33OVlwdUG1n9UZXjgFlwKzCyUOz/IiPffzBACghrYGlEsex6ef3n33t3/7N7/96bePT3cPD8e19tbPyaXYGu8HAIBFtagZQi1WkvpNy0CEDoBFlZFySvv99TzNOW09e2ekSa0JHJp+B4dlmbWwQHHekxfRyK4NLueCprvO51lVSZh2bRzqiRGZeSqVCVuiqhYD994XU6w6Z3HOs2mIFDoyJMNVt6rATkWrUNcHBAVkYwSP/bYBC+O0pCkF6gIH5crIzrmqCt55x0YoVkLDjHszBCLNFro2ttE5jyRgyL5Z5rwsFY232+uUxtiFtu3nJb149crAl2qb231O1UdmDtQ04N2FT3ixkXkuN3zOD4Oz0OwZHsQz3ezCOVs1recEpbN10Rn7+Tl0+bO15ova9kICMrgEmJ3bgQtQCZfDMnh+10t9Xlpe+pkvEhiowXm9/gKFni8ZtTXOSTRXzaWM41CWZGaHx+M8pmGYpNTT6TjO0/E03N0/nJYpS1UTUFMVhySmoCsARM/YvqymGbQqhdc8d7ss1GZ6ZnuumT7O8e2u+fr19YtdG9m8iaZFz1ogICRFQMIYW2Bw5BU4+GiMzgd2QWtxZ7L46gVZEQMROefNMpNDVHbOeWZ2tIrO0MDUxFZl0qoyAyl49qtWdB7ZAQWjAOxsdY+uxWq2kqBWqWIKK5hsYKZmpUAVsEJgAEJgRIbkEAmJkR0SGzLRegmwAV3M9gmIkGlFCbWKSTETMFDFNSLNVpzCe0QEYkMGZgADUyQkAPIeXQAfMDSGDsyg+EsOWlHyQIZABqimqlrWfDoVBJJSjVhRAJ2qAZCIGlAxy+rmJEulSWHKekxlXOr9KZ+yFkNA8N4zuza2fdu0Dbe1SsnAzD60XVRzgFFEnfemZiJFpJQqZmqcxQypShatAIYrPI4ESgYSvOfgfewMzTl0zhnQOppSQWBSAAE1A6cECGf2EinAaiJMYgQExOy9c56NgIgEzOHab58vBvflcjjfC2hFbR0TIq8qRENgJAIWUTKqgGoqWgnhbPu0dvOmjlnApFYTJXaOAzi/5nswIZoislYhFAoRpJqWNYUKmTk0zoVpOEou19dfAVUzC74FK1qX6C3N8zgs02GIsecQS5lViifnXTTJClJFVSTPc2hd23WSwcRssWrQtPT48ehYnZWubbF1YGqMm37z6FgOyuRaxyklJ+HTD8fhuPzv/6f/rr9puWm3t7fbq5ss4HxjyjULR89+u8xzrRa3e0NGGps+T4OOp2PK1ZRNcFmyb1zRuqS0LFkq5rmUHCA3UgYAskKecbbkPANZzksMzoUIhI6bOg3DeIrBl3kqyxBiE7t918bT/Ycy5eHuePUqt698c9v2u3h/yl//2df//v/zvfKucTIs83aziX0nVF68vPr+412IcbOLrDpOswIQKgXb+M2S6lIwxEBQCHSzD/OM07L87rsf3nz99i/+6u277z9+Pjx221Byjm3z+HTCALevbn747of95nUfO0MxQhGtUmotntqSci1mWYckwUvN2Lnwy7fX//zTPYgD70oWKbWW2nUB2ErVEL2A+sBkqCIEK1ZcVVezN11xZERBUDNF5xDxTN9EQ0Y1AIU1NwGRZfVlV7X1dUBmULSupE7DtUel9fHzxRCxlAROxzKtOZUdukx1GR8fXdegDx5xs9lvXps61QWBVCYpCSylOrPzNc+BFk2HkqbQblVLLQMVCa6ull2qjpuGHcynhcD59oaa6/HpibxntFqrAIxJmhBxmbDmze5KqgBIv/Mi5XD3vs7Ty9e/CJttVQMTJqfggnNWTtNyylWpu7HYoR4xz4iM6LOS7yLmxUmqD+90OGJdRICvXmLbYlng9PH4/W/KdMjjScdTCKBgp3EpxDH6P/7m41ilGkwpOc9qAOyUcFzKnGoNPqh+tbsayxKcY89bv1MkpdBe7yl0jad5LN6VbRt1HKHzBC7EWJQxblx/jVnmp0c0NWzMPFqo0xBjI05kSoql1hKd/qu3VxTg73+8/zikLIWBDIGbsOs7NqtLoaZJuRDFljquauCca3IugaXloGpIxqHVkiYVYa5WTuMdh6YjJ0tIpuy7KeWsdd/vx1kcuNZ78v40fco1teGqQcx1YXLEfslzVmDybAlQVGZIYxr7qtOu6/P0btO8XpZSBHyzK5VicFgyszMZ3DYc53EyXErl0/LN5msZn+4+f2CjEGMMLkUcDyetddNdpRFQAHHOy0O//auZCX334XQfgsws3LR1MRPZ777NapvovNtYetx2IXg/m6t4HduNjt9NdbruXsZusyTZb/el5tv9NaP5xmsG5nNMOHs2gZIrETISY4yRkBlNRXOpS+u3TDYvKUYkwJU+3/oGSesKjSOBoUryjqd59tholao1sQEWMEDzwXMqad1BLHNa/cbQVHJRlWwJkdQAjRiZfBjHk2+b/eu3NU1pTqG3wPhG8ObmGgO/+cUv2XlEfNni8HSc5qXmgm3Ly+7+4ZgmMAjLMtbgqfV/8e/+ejicRBT75rprP/zw02Z/Vc0eD3ebdsPcvNh0YElApqqWl6H4YVgKWAFk46SMcw6okjIRKTCCApoLnhByWq7219NUFLENPhc9papUWnW7Lh6HIzf+MC6RWZiq6fh47LuX7x9Pd+PgunacZsql3bbvfrr7+ps3vImffvvu61cv/+4f/qXGTlWllnGk6Pot9z8exwQVGA1qzUrM1RQBmADArbwUT1jyvCC6bhtcXGBeWS4hRBFJy+Q5kvOn4dS1fU3AzruuLcccG055duTG8XR1/UbHCbSSSbfbjIdDxBaB8zJcv/j27v57YvLoaikf7z+/eP3LuozXL6/vP6ZpOr1489U8j6dh2m6uBcdlGab8uNvsOt+kXOZlzsW2feeDN173GmoKMQSVBAhtaKUsKdcQPDuQUpwKVDHLu+6KKSwpeY9Px4fYNpstj+PQ9GFOE6PvujbX2u42h58+b9q263fTMjt03gXNWa2ImacGQpynxcXAIvOS16vg13/26xhi37dN1zVN0zRNiDF670PwPrB3RHh2sDinxD9jKggIq8Dn3B8h45dmBRH50jY/M4EA1rTdS4sPF7MOuPRD61mzi7XrJcgeV3Ph1ZJmRabWBs4uLfdlzk9nCMDOXffz2B4u2cz4PO3/0qE9/6UXeYadR99nbGDl66uamWqROo3z48Pj09PT8fg0TdMyZ0YjgFplnmspljDHyLtNk1J5OJyQXVVxWrcBN5u23+76tnWMTkRVUi2E1TN1221w3gDnImMeFYUB//Duh4ZfOuYfn/5F8rzrWiRKad4wd+E6kytpXJYlAPV9m/PctbEsBoYlJ08cQpfG4fHz41V7nU/14eO7N3/xZn/78vQwDMNjwwQES8pU0Ac04Cml2DbD8RR9I7WUkhld32/SmFVcNez2W7YmpbIJ+HRIeUhjWkzEm5Ylt5YJ3bKaiIEiQB+ICRChpkye2UV2TquYATGJaq4GKJwJjATgOE177LfbTZUS2nB99eKn93dPnx9e/vmLEJS8qyoB1IeAqhdqD53VP2CI/AwDrZoR/HKq9cyqWA2JfwYRPdftlw7czvnhazngBfhcYSO6IJ9wMTZaHXnPlkdwccZaeUfPxsKma1XquUrXzh/hZ938ymt7Rl3hcr0oogIO0/i73/zmn373j7//6fefH+9TTj6y5grE4zTNKakJEYGYqUoBD7ZMp9KHQczoxodgqgTAztXsjGIq01IXsSxlbNuAYISgSH57k+4/braNTas3cUg5ZavB+65phjSBQmAOjlLKuWIxbGNTsijUtvGkIFUZkZ11HlMFcE4BKkJ03rfOCNj56FswqwAx9kV5mWZE7tvtZAdmGpfBlDZNzKkmSMjY+IZQlpRMCMCF7iqGYHYiGS1P7bZZZmXmsAsYGsdRpBA59M2c65QWFyOpIieTxYUXYMGsErhS1fmNC61Up5W46V2MyOsYwgjpQgO74ECwmq89Q0KXmDr4mUD2eeVZBY34jCYhmJ3xwOfF5wLcIJxXtdXYCC9MnDV/Cc7KJTw3sj+zSYJnn7gvuNCz7OwiaruU+PqTGF5EbWtpr8ZCZlKFiGrR4ThN4ygi3nFOy48/HlIp0zRO85xKOo2jqhlYKWWlOwkK2Pq2JmX1mz+jZJffH9kK4agiEuE6DwCD84SJkBE1Rt/G6Am0ynhMyc9tCE3nnXOO2QQUWECsqBKG2AMoApkpmhKqaGUUA3AIHpHMPLJo9bFFF5xraBVQs0MC1YKGZhnNCKGqmhmoohRVBauEsJoNowvEHtmfG7VaoSSrWcsktWgtINVqsiJaq4qYCEoFKaYVTc9Rb4Br/gMyIzIxk/NAzsidxYUmF6kz2TmdDlEVVEERFEwrAAAxkjNugFtzEZ2D1b6WCQDQeXARwgZij8CohhxWlIqYdW0az4VnoIZGInklLIChKCqqWDEgFZFqRUENl5oXsVn4kHTMej+X+1N+HHWqKxcBohdHlJeSp7n2jaQQm4BMTazVhDUoceh6dpHRgSmWBDahSa0amIoioZBDNXNMAEbsQvTsiRya1loXdi5wZA6KXEXAxDMr06qkMTMARlNTUaZ1LoCGpqupjmN33gUQnbc1TExISH/KKlorUs2YHBErnuNb120EEqmKAAChyWqOd97xqGldTUtFiMlMpaopoPchBmImI0KTWsURmjlmUDRCKdlkAZMlL8gYmy6GrWYDcyEyYK61eh8VxbSAKaIsy9OyJHCemyhoOSVEU7XTOBOgAk7z0PtN23eAsqSlVlCp7BsVUcLXr/e76w0jcdyIoXdlfDoe7wfFwgGG6bTMebffOQ7k01/99V90HXqixgUCzik3u33TNBUwOFKRUkrwQROlaU5zyqkAEfvoakulHg5JIR6GCnNGVEL0wQ3L1PX9nMrj4725FH0/5+mQpnYbkEyJQvAIWrWi4lgenHe7620tC7hGFQA519L52O+vLU/F7HgcNi/16f5hv+ueTk93j++//sWb4zgLghCOtfTil7kaDP/tr77+lx9+nIp1nrt9W1WcwzIl76PruuNYBIQdienT8RTb5ubl1TScTk+PTLrvNjOEmktepsY32+vt3dN4ex3efvPtu7unhjgytk2TSyL0nt2cS9O51ntFKRnu7+fY0XYT37y+Ps7l06fTqaambZyiwWo4dB49gdo0J/aOkaSqmICBiKwBjKaEQFqFmQBVKigiM63bm/WegMQgspKimcw0G+G67VYQFWFkNTlPfWEFmr9MENqWTocpL2W37WpOS51OMjBSmatG279+29Ug2Qe+lpSN1Bwfh3ctBZVK6EUTNwyUVRO42LSbeXjPoUGtikU0ERAieQJZJrTa7q6K4XwctCI4tNAA0sdPd5n4xVWX7j71XZ+WWkpp91uFZTw+5DLtb25C12k1BDFUkdm7KHnRMteC6rfN/kWZD8vxITKFps9FoGuqAeVZxo96upd58D5432HX1yJOlun+p+HxVIss4+yDv7p1m1xzts81/ZdPxyrgI06nRRCQoBarIsE7NG0aMpAK9d39hwCgCsOYuPG7fj+OM7oOFEVAs1nRlNJu05CLtWCazbedUqPKqAVrlboQeoAdUIntNuUnk6qSgcCci/ttEXmzu2r+/OoPn+8+PJ0elgWAxEhEtJrn2gRtQlyymPhFMoqQFotEVYIjrVCLccDjMnzCJXRci+aUmtBYLGbBHMRuoyynqfjgHTjGOk9jGcmHNqLPJUXeKDpERtJDer9pXxXzw3j6s6+/Ccem15LM5nwkP+esCn3RihA+fH6/ffWyC0GXaT49OgwnSUsuBbm1fttFwTqXJzJ1ZjLnT/MYXP/i9tXHT++K1OJPzsHVZk/LtiTLOXW7jQ+OWw/MBbTbbw4PP369fz3MC2WTMkQGiBFiP5wGqp6o++W3/6ehPICP7w8PN1dXVudlunv96sXn013bd5tdvHAdwLFTMGAEtRDCtCyGBFZLTYE8hU5MgYnMAWLOGRCL1Og6QwKsKjVLRaZTXoJrXNNVFQEz0LykGGLTtNN8im278lhynjofak2gxESqGkNIOTnvyDsEyiqQKiGrKLvWu1hlaXsKwb/kdtNFZqLTtN1v//iHn653N8fHKTZ9exXipktZr97IP/39b/bbGzsm8wG6FtvuqukRFZnb2HATNducluZma9XdvX9//+Hzm9f7YRhrKTfX16I8GvsuLlMZjplUr/ootQiyGmpKfePnaWq6TVU+jtaQq8wlLW3XlqSvrm5+fLgffUN+2+zj+w8/3Wy32OCHu9PNC+e3+x9O89+//9RH/6rvyzw7kfkwOMRxGGLk66urZtP8/scUFNrOEzlVCl03pIrRldOJI9eStGoplZgIyYxFq9maQWtklqaT1uI5dP1mmedSaq3VRGvJfhtDaNpaDOp4mKxWv7s1AKKIyKplE7d5XhisSimSOWfioBWDiyWNdRmi9zWlYtWHZpE0HB9MSxPxxZsX7z4+vP/0/qtXLx/vP6f5KbAxI8e9qgUXO+ry/IBWHu4/bjdtCA7MtNaSMwE3Tai1lDSisQuBPG92XUpsBN12g8Edx/Hq6m2eP1KVxoe0LEgmJqYOAMYlXV/fpPnoBQJDKdV7v04CFUutqiLFRAyRod1sJKfYdrs2fHwPAPDNN9+E0LRtDE0Ma/4ZMTM7v84/zw6sq5wHzGhVPX/JJ7+0JM/g0YUkAStRHwCAEUxMwWzVbqyd85kxT5dZPNKaGvOleVrbmEtTdO7NnjUX+OVDLx3/8yz93LitYo418OrSkvzJOP6MT13+PZvMAJ05HV+OZuWxq0otOc/TeDo8HU/HT5/vPt/dT9M0Z0V2OZenhwcyLUXAsEl0nDOqoAiK9NG/enHz5sU1oPZdr7mkXAyRgzd02zY6hlTlNE2ilqqxior2V9e312+t6DJ8iDBvr7en06mIed/sQjsIpARFbS4sRqdpWUrJaBVwTtlJdlBiKL988zqND+HKebE0jO9/+xPQ5xg2oNnFnn2T1KCaAqWyKkJ003TIriiErjOxwzAY4JSmPgRAKHlIy6KpkoFoPR2nrAgGSbVto0y590wEmyZKqYzYRs8UimNyTgxKNVbMpfbcBOeq5BhDWnITG+f8PGeox/1+0zTtnMu84F//N//DP373t/xnf87sa9V+E88G0kSmikRn3BLOEp0VzFn5G/Sco3eh61yK6Gdw4TM1Te3MBEJ8zpA6lxhe0ICLfuhccSv4iLby4FZk52fixTPdbTVAgi+1Bs8V+YW2hucHz6V3ATrNVMQMoUr69PHdd3/8zXc//v7T011sfdW65DIuJZdcSsmlIGFVEVFGAkBmyqUuRQNyETgzBcyYWciJMcduSllr0loKGzks82KO4357erpPFdqufcyzsCfMOSeDRlSaEJc6i1nDBoC5mlSpimuuPAMRE5GaKCMiQBfDuCTvoyogahsjsstKpBaYHTNQiL5JhgqGxMzQNk3RooC51BDDllFMmQjZsRczUCBFxBDbXuenkVwg34AkB+jaGLq+LDLP4/XNLWNz//Qpm+76ndXEIWyaGx97QPQcutB/Pjx0r79KSUOzkeziZg/OA10Ws/XsswMyo5VrtgLXhpcJ7AWb+RlRzfRMOHomBsEzKel50bSL4/9lvYKL3fnlKSu98rKmAtKFm/lfrXgEdn6BPRf0CsmfxW3Pr0ICO1fmn/wxQFMidp5dOQsbcs7LMhuaorInLXIax9M4pLIsSzJUrWKmq5RyNapTOa+0JkZ0oVGd13tdrwf82bVhenHxVmBiF0Ip+HBKP3w8HIb55a7fbEPf9hQ6MJOqxKxVVAsamWKWkUPlxofoFSsgVBUk8hQZBcy0lqLgQkRABgSroKZWzcp6CGWFdVRWyTIimipINTA0ARUCQjNiT0zIDhBUTUvRmlWr1lRLQgOtFU1BMppILSoitUBOKiu3yBgRTAiJCYk9EocYkbP33tjLuTQUwFDFrIKCVEVVq4KiKit+XQEB0QM5Cy3GhnyAZyEVIToH7MC14Dt0DQCCFAgNNRFKxowkqqsp0ooOnrlFIKIGLFWAoJqImaqqUhEsSllqFpiqDhkeZntM+v6QHuZ6yAoInfNdpPWYcy4oDi3nyTVtcN7XJpeSALbgPbcb5sChYQKnTWxClrzMJRcxdVpVTBXQO2+mLjQUA3kyAFEDNTCraobIjh0TUzCtZraumWimUBFAVQmAgA10NYtwzosIGKwi+jXq6SzgeWZhfrG1Xnv1875BkZicW69cZl4pYutFXcosoKLKyIZgIMwsVURBTMWqJ25iQ0zOE6FZSYbgHCOZqi4lNexj8KpVJRNaleqYCbdY/bIcEaHd9rlkH2JoerNsWmqdp/Fxmmfmhl2L3luZm7YppSwlpZK2bY9mfbMFwSUXtYLO2m0XqGk2265vai4yp3GYXdueTo+EvvVWhmRgm20zyuCCJ21A+fAw7nfXN7dXkMrmxa7fXzkfY9ub8TwsFBgJzcA5IOd4NtOMYIg0jEuaj4qw2VyVchxTybCQuDqlpomgmtNydd0nORK2u9DOw6mWauRLBZEFm4CItRTQutntd1fXJtr1jfMwDU9IdHq6M5FlWJip2/T5cHx695OnePPi5efv33HX3V5v7sp7PI2g9evb7TDMHm23v3p4+sAe921QAhcCB7ekmalpuibl5By0EcBZ8CgCYJjnpKUycjJ8eXv1dP+U8+lqty3WDafRO9/4ZpkmZt9tomM3Dcdt44FQNJMRefABSln6reto9/gwjGOpUE7L8urN1jf8w4enKouBB2eGEJz3DkTNDGrV4AEc51wBSdEUEAxqFURzhKoACI5dqcLMtupdkaQKGCIwk1u327h+Z938mpgBs1u9IggB1pgGAxHAi8c7kJU8E0KdOfiOMflIjFBrgQbAO0sYXLtpfJ7vg6dSKlgV3KrYshzMzY67ukx1nrnpU8oBiREM5lKmmksIjWv3BDkvEyjGLi6jOg7O4ZIKdc3TcXi4f7ja9TKNnXOdj8Pw1G9blXy6/1hTutpsWx+gprbdE1JaZiJhFJWax4Pi1vdbmY82zR2juaBxb8RMhssjjh/q4TPMczmN8WXP/VZ8S4RweDj99N308ECe+02fs8ApbZD+7PX+D3//m4PgKROMlZDMTOe6et8vWlUhNq6IlimZpz9/+2IbYplyQafqvv71t9C3YjBM46sXr7Q8kFRH0LStVBYJgSJThCrp4ZCPJ7G8PB59vK6iWQQgpjGnUok56QwMoAol7Zz/669v/83Xrz8cn44pP51K8Bw20ZibwOyUZSV6WMq1cQ6MDd1cSvSRQ8w4///J+q9mSbItTQxbYgsXoY7IzMoSV3aPQLOBwZAGEmY0o+GJZnyAgT+XJGgc4xgHY9YAGjPTum/3rVu3RIojQrnYYq3FB484mbfnPGRlRcaJ8HNi+Xb/vv2J1OrTSb/02+qMZJeVnufkOG47Xx/PIrZuIrD8+P6HL968zkRzgeyBBAIRWMrTU/B9qumua9tAuWq/6oanA1B3t9p99/A7Q55S6Tb9WJ5u7796967EdmYjz6HYDBpi2LDYYT7PABRi6fwZxSLNMo+j7FZrGWSQfLN69eXN9uH5CZwMcjpYA2VyWgPfpdFWtIOCOifykEsF0eO097gejufb1R+r7Y/pEalp4ysqYxUmcS1sTW3Xvr7Zbs6Hp93d3eE8Ho9jbW0VTdLFB55TBmIiIECp5pwXUyZWy0shzpgHQgagYRg9MyC2oTMVVUWgNjTjPBIxKBhawTpPQ3DxshctMsnJeVapJrWYOB8AuNQcAzNiSYIIjBic847nUopI9JGIc81M5Lzf3AQzq3NeNWurosDrfuWDf/XFlzgdmdtuFcDjMObN7tXJ6K9+8z/++ue/Og1l2/Gm7R9/+PGbb75m76aqwzjc3d/+9d99R6T/+r/6b3/32+9/+vaHbrf77uP89osvy/n83VNOeVLQG+6npf10mFEBtURHZoDkZYZ+ezfNquhn0g9HEcNV6HKh/Xn+ah2/+fLrv/v2u3GevrnZvX3z1TxNQyns2gzNWGDIJ2eQc33aP2mtDNgSJrLecxA9n56BRGpVccggtRB54/Cwf+bI48fRVb5s7hKBWVX1zGYqouw9IiKjEwOtBcHxzntSGNkzkCIFlTqcD+2mn4aJ2GktkWlmHufcdH2tGYGJnaqy8ZwHdK7rb3ItbVildEr5tIg0Fcw7C+TH8Xx/e//h3eOrL26appmm8fH58eZ2ezgeQKohlFREdU4PYrbarESNXcNsJSdU6Teb2LCKdF2UynNKnjG2XS4zMBjSXPJ2vWGPaEMt+/VNPB1PoWn35+OuXYsaOHYQrJSH49MXu5tcC0iaphT8NjZNykVNkMWxY2YxUbHzcF5162mam9VFW3f/+o1zLkS/lFEs6QBEREx2jRw1M8JFr0FIuGAXujbqENIVsCx5vnTJ/EHSa+bCBS8hvcDx/yyixdTkhX66Iim9kk0vEOy63f7JFPY5ULtup1zA2XUT/cpbfeYLghfp0GIxerk/XI5weZ8LXLdF/qEiVURyTqfT+Xm//+n9+9//8NN5GHNJx/OsQKVUh9fnaXXkuraxqg2Hmz7e7Nqb3TpPaQnFlZr74No2MLs5lfGchlwUFfKEKt7Frm0j+9Ph6d3j+zpr18UY3ekw5VTa0Nc57+f34GPnm7jpfEvDODniGL2anouoiMema1sf4zieyjkp5VNKffQyDKaj+BEdHsdJXXBt77wvJddiIuV8SsF50uqxZ+9P9dQ2EQhXkbvWPz0/OeNt50opKc1DOsTorGAVZbMmcsvhPNdqAIoEJFUli9LcNRgi1EpFFClEh0Ws1gyAc7GuW9eamIpjcM5Smnzwwbn9uN/QV3dvXw/z05vXXyOzI2ZyF3/Yp+CYKwFzVW5cR4YW888FR19ZoX8CkhdR0jUw6xOh+EIfXiUkaJ8P25WrxAtUsU/ftUjnEPSzMK3lVa7huUvqk12JKASTy3stJIIqLKOIaGAidZ6G0/x4nh/FkgF8eHie53ma0zLr1ZSYYYn0JlQD75yiHlI65EpoSQDIEzkw9AiC4JlzsabfPj28twpARCEAoY5jWK+6rp9yGc/jzXqVrTqD17df/O5wYvINt6t1OJ2Oveem4VxqDJwLDFNp2YlqQfWOm8CqRoEce2+QK5EAO0J0ZNB43zSBgDj6ueRVu6YCYslQyS3LUlvqyMxmpEqmlaAVRecbR3iczgzVMVPXS2qgGnkXQ2ijq5N47rMNTXcH1IgUs9r03dLzomqhadrOSc5NH5XVdV6wrvtNykIcfezJBby0LBraEpdGSKTElwmzT9lqyzpj11H5tMJ8WrRe9Dz2WbD0si59Rkhe/uH6+ssidmFA7RPVCReWEemqbnoZY7osgRfr44tkCD4N9DXaiBZl66cvAjC85sqBi943Ltd6PJyyVDMLwfvgFWBOZZ5mEV1YFQI0M1VdGCIFvfw21BZh6MLqLjqNhYddCpltOVGIluNdVEbV8Dinb398sHl6ddc2gX3LUyroM6hGDqYkCqWoQ1Q1wOJ10Z0qM6c8mzkABDcZkQoAoAsAqUJNxgxEynTx+S1XIzUVBRPCa7CSXqROC7eFgGQIy447kamKiJYqUkSqlKRSzIQBERRlsYyZWTWtUKvWgqpmSgvliEvTWmXnpZoPQYtSMEIlU9BKiKRVpIKBiZoKCKiY6WWZAkIgZ76F0JD36ByyA2YAAueBPTgG34NrkQOYgVTwDYYWrIAKKULOekk7WgZ8yeNiVTR1oiZGVVUUi0IuWsSS6FTqftbjDB9G/Zj0edahgGJwYA7ZoRGoZ2Iiz2CWVWWak5dGak05A4AAIjn23sUmNC0ym/WpVN+WnMucJFRJxRQViTg48n6563DoSG252yhatWYfAjMSOTPHIiKqmk0UAIiXyCcrWsHALXsJCADgnCcuxMRM1XQp/UPAf6oqQkK7SEBRTcl4qSlwxAoKSzCwqpo6XjxpZqaLf9NM0JaIM0bzzgekgCAiWcE8ezGwqmKwaBoBTGoymBkUAB25wJ6Rx3Qs5bjp7nLKIQQAVw2llEBQaz4dTt41aOhaX+oEeWLj6ZwAOYY2l1xFQ4haNae8uWnblb+7fT2ejqmUw3MmojpkU+vUsapqnQeRXMBTSvNq0w4jKEMfwqZ1X/78dvvFLVqMm9u279kH8m7pe3bB2ZLSlKc87tmAAQiNyWLwTdj+9P7983gUATVddSFNpZoO5wE8Gtrz42MtpVKcSpmnOcbAjhwjISuYD05UYtO5pqG+b0OTTkckatudgr7+KpjafE51nIuJAeU5Pf30k++3YlCKygQdNx+nDIyKOQCUNPWBtv1KZgkh5FLMOLpe0dWxZqkGmPIcXUg1uxB9AwRxOKXgfNevjz99/PjudL+9GYafyliQ4f7N3fNpBjC/iptuVZ4fIjLHzhBEUaQ45uDammXZXvWd29j6uD+nqYyS2wqrJt6tYiqQFcHHNCYRIU9ELEWqaKZCjmqVqgpEtVYmElUAg8VElotjAlyCHhYqFD06AyiiV0mzLZ44R6xSQQkJDVENltBRMyB6uWhcrhqlpt2rfn8cijjPsXEtmTBU3wSMMU8zYLvbrUqapaYy55KLmufQ55pLrt45IjJU52kQ7XuXTgdqqOZnAwRoqkZSJ5iMCZGmlJv2Dp1/fn5c3dyByTzsY4TgtAPfxlDy0EQmrPN5L3NidqvVmoC7rsuWap4YK5Jjwnkc0EV00TnFckTIBh6a+4TRQFyZ4PmHengMjcuzhaZ13lcFcAzDYf7wXRqeV7vVnKRQ3t7fZdu/JWg8cPnqP74//qefThmoIqghEIoZEVYFcpjEpBo66JvVtt3cd13YcVx1pNi3FNj1gX0XD8Nz7zMTl2mcwyl6qyer1qGfyHnPCn2Y5zo9P6dcgJzzQQFIW4Y2utJwTjBH5yy1WaSCAuRf3q1vXt1VRAX88eFZpUzT1Deub7p5TqTS+hA8A3sC7zB41061FC6vbrc17rjOh+G46+NM9TAMXd/58RiJjtMx06CoQDzNJ0dMoL6LY85tw9lEAk0wV5069TZnlBoZhEnK9PTwsG76sYY5HcVOu203Ds8tBiTUUuaSGtfEprtZffXTu29Xq85qyVJO9ZzTAU3QcybzMdT9k1+thvH4Zf9lOY5DOTWrWLWWOu1cXK820zwBNVOda6lUQ9+Fdf81UwWB++2rlu6qCPjbuchcTx6dZ4SaoncYfc7p+Hwm8uf9sWn8F7evK1qd8nyaLtcCREBwzpc5qxZFiN6nNDtypRZixxSRqKooM8WoKs6xqWnJiJRNFhcCI9dagCmElgxcbMo8GZp3nhUBqaqQIhuSQ4sRwIrIXHJEFrAiUlLywZOwmeVSRDJxKDWbgHcU1z2iDuOMRH3XDOeR1WLfJ5vuV68oYIiiJbVtvHt9vx8n9c2c8N41t7e71sV37x/vX71Gh5tV/Oe/4mk8/Pbbf/ib73/6v/0P/9f/9Bd/vt7B/vzwcSiOUVEV4P3H54UHJ3ZzVST3vJ93u10udZqyP02rpnl8fOz6tvFhP6YT2Ha12t7S8/6Jm2bbtCXnKZdphHmqbR+Cd4cRCpTzKUmuMfgyVjT0hHUuEYhFno5H9u7DwzFEHxwLWAYrZWZCzZbHDKKGyEQqEr1TVTXIuTBiDBGIAFTkkkxU0wz6ROgYgdTIOQCs2RhAk9SSGDGDDcPZhwCiTJ6M85zbjRunmQFXXT+loWvWKFq0OtdMKbfdihmlzFIrGGrW4+Fwf3u3f/745dtvzuRKntN5RqHTafbRac2lVOcciuR5Qvar1SowSpqZCVSiI0Obp9F79+p2o6UQ27pboVnTNrkIATTRnY+GZuvVejxPiNi6phS5Xa+O53NouY8xT+kRj3BxLpiYOufPp/MSHFBKdZ48hWzTkjiUx2Qoy1nQ9ytaCkWYcHGZXfggvHgm7FLtBABAiLjkUl9LEvClWB5fYNILPloC2hbn1gV9L9yTKhJd7GmfAlZtYXmujwMiXVKELqBC8QVkIRi+hIGYXa9uF+Buly14e1GBfEJvcJGdXN/0k4vjYvzAz1AcwqX9Fs1UDVLOh+P58fnw/sPDDz9+eNof51QMKjKiqWcInrniqnGvdnd5zKWU1bp5c9Ppec7TeSbom9g1fjiPoeHhfD4cdC5VQPM4E7v1au0R2zYg4jSfoImx447bELcANc0Tgrvp1k51HotSi9akkqXOCCZlnFNhal3wDJJqenV7H4IbpD49HHGaNqvQEXSB1l0z5qoGPjqVOpc0TQgCVpWdC85lACanqvv9c9+1jil6X+pQqxwPo0cPhoxNLjMh3t6szvtU1GKIuTLU7Bkt8rkIAjJj4IAAZigFsPXeUfA45+yY2IEoqGERmYZBa+l730aPoMTECLe7m0MuNR/vd/en09ObN0iORYVsCYsDIjYzA30x+NjLB/dC1ywf91VLtGx5fqYsexFgvOD4C6ReCEa8js4lW+sy/HhRbCykzx8C8usMXfnMa2QWAL740dQuRpNriDUAotrFVolmS93RglfUrEh9ft7/7rvvv/39Dx8ePxzPx2kYyBETzLnUWhCAGFUMDVWBANkUtB7Op/n1fQKccqmiDXsVcYSOwDcBC2RVCM3DYfjq7Z0hLfE2lAoh7DZdnkepNfZdPY2K5WZ78/3T+XlKv7xb6/kMpH1PdagApWuiqjoAM8yGTC44U6tEYCaEREiNp+iRGQgN2doYRU1M2q71HmP0qJUIQtPWRX/BLnqfpsH7QOidC1wFqOSSkbmkSuDUpN/cpIlynpr1hklYtOZcSorrnffx8fl7MN+6dhzP5LhtWyCrUik4DnGS4vo1svOxnQvG/hZdXKxnn+jo5cO+MtWfIrHwZX7sum4sa9nLKNjLVF4+7Zfl5mU4r+P32ffbNbHomkW9MN8Gn0bvxUN0ia1cJuwPOfjFsvYyYJ+x63/wpOU/CouBV1SXEmpiLwpjSqXkLMklN5Uy52lKU5FiumRwKQKqqIFVBViuEi8/vqohIl1IKVuymC7A+pptZ0C4GJMRVEsqKee+ccgMLuRqz6dpHiu7Q9e0TeyIqYqIKpuBgiOK0bVtRGHHYKoiqKrg2cWwmMfyXJmosluaf9jxhQ9S1WqIZot8ABBMEBBUkMBMFiyGS4M10XLWai2mVmstueSSpZaq1UytCoCRCtPlNwNVCAxUlqAqRjSTQB4ZmAypalGpEptoUpecZjRBU1IxrXiJEIKLDGBpbkcjDuR7jCsMK4odOA8+AjGQA+eBHBCBb4ECsgM1Cwq6Ns0L56xqyk4IBUzBREEUDVhAFbCqiEGWRRYDqWouOs11qjLM5TnjU6J3ox4qFOTQNIQUyEirqlUgQybHjsmBxOgcoYGBFk0y7RW0ktUmsDKhd857dr2LFmKe54SuSBUsy5oH7Mn5WA0JyVTFqvPOeY+M7L2qKiCTsXOOiaqaqREv9wlmCmAIZGa1VgVbCPSXVDg15cU1B4rk2F2s9J9irQGAkBy7pbhRTQFIF8ZoCTACYCB0AFrAUNQAgJbSRIDovCAAgWkpksiUkBkRGUqtTChVvUNmIsdVUxt9yamU2aP3oUc3Wy2hufWxr/VExG3TqpbgbT4/758+WFGHSM6ZVtTaRHc+5vE8O99gQJHM3rNzRHW9WW+262r64cePUmvoAkFtXafrLvguNn0pY5Xq0DvH4HUcj80m7j+cPhymtvG//i9/ZqChaZruRswb+WnOgRvfOqKl3KYaKiMxRU9efD08HeZhX202pdXmbjx/mMZc6mxQiQV9rWDOOyI+HicrIUNGLsCW6+y0IFQgbKgdUrrZru7e3sT1ut1tnVK3WhESlDzPoyMjBJ0+up5LnV9/83p43o/7x9PTc87SrTf/6W/+/L/4+utX97vH/YfbrkcKwyTHeXQgsW9D7xpun/YDEXZdf4LJoJKj6FurteFQAUhqqblbbdI0j9P05du3333/rW/91z9/+/33PzWxFavksHHxcRhcv97sto8fH1dNq2IMHGKTckKNfRs+Hp5iRAtjt+3727c/fv9xfjadLOe8XnUwjON+7rgX5MN5Ck00rY2jnGrO2QfH6HNJqQoRCIiZEmHRcuG0EYiAzKRIEXOsaNfN0cv9zzL1sOxGERPCkglIInK56qgioPfhRWNHADnncZyoIQzh7vVmOB5MdbXZZmApVYJlnj4e9psgmueu3+xPWWQyOTOic21J1XNb6giMefjRcTrvH5rYgZiPK/XRO6jjgOpCs+Nwh2E9jYlcQxjG02E6nTzaTdf4mjRNPmBNls6H8Xkf4qpbbdC74FojtpJQC6BYKechk18Zd+gby0XLVLWEzS1ttnU8tV5weKz7J/a9oSMe481WuJVmQ0ZczsPDuzyMjfN5nKGagBFx3zY2D//qF6+3bfsqto9Deh7yx8MkjuciZoDMMYacqmP35mb3yy/f/PIXX+JsqLjexA4dDmPj/OtXm/Pju7e7LZa0vr3ZbFapTI23VefScObgjLFZr6Wyc83m9k0paZ5OCkmVvA+1VM0jE8XwugGs7WMdT85wOM9q9lDn4Hiz23yxCuQ5hGiSUVMTadGqsi1cu4nz6pwx5mIkkXNatet1/+Xh8IN6MWTA6Xj66c3tr85FEEDmlJOtvnzTtJtvf//3EErj6XB4v1ptxAdlBHT74eGLvoVaxnQ2gt2qHY9jJs5ohzm5XBtvAeZa4Ga9mYDPzzM1jetuqmApMufxdH5qNvebJo5jM8t0v7nxr316Hm9vb7KHx8fnm7jt22DG7Xq1n1LgFqD6VqY6mxghmsJ2feNcJzAcjqd1jBSp4qnMc4h9waz17NudJ8p5nPLZc691Op8/ts22JYRK55Qoto3rNVy6n5zjKRVTc45KyYEjIzlaQugdExsDInjvG+8cewXK80BIPgQwFNXgeM5n4tg3XVEh8iUn04qgoODQpzIj+aWxcE5zgBaNSkmK0DQNAkWOIjYMU9f2iOADmRBTCC4O82QiHEKVpALMLEVLEiRuW1bSr37+x23T5HlkJiR+c7t78/r+w4fnbtV/9cufOYWui+168/Vqt95tjo/HLOLbfrXy9v5M+en/8f/+/9y27jgOP/7+g4qdTYNDRF/AosNf/eJXf/nXfzGNsllvE9v746hVUpLgXcHS3G6r6P3d/SG9O4z5NDz/+uuvzvI0vN/H6HOSA+Y+NpV8d/PGB37anxyIKTIgqJaqDXPrY8chlfx0HvbVCnnf+jxrqpKrOHJIUKeTAmAkjr6UAmY5Z+cdIBCAYzaFWow8AahUVdUQgmcPZoDK3jP7eR68jxw8ALHntV+Nw6AK4+m42t0vJl0f48KAuCbUWdq4maeHNB677d00nNl776jMIxh571UUCXzgWqfjAETuaf/k2KGjpmvkBIhQc5ZSmCmEKLWaSR4rSpEmMFJo2raJ8+lMyD54NatiJpbydL9azfMUexexm4s2XShCT89nns2zm6ahi+HjeGjbddPsTufDum8qwTydCZ1vfU55Og9d15euE62MFFfd+Xz0IYJZrTKNQwj+avuC2EZEJkYiBAO+eHnwhbIhZrxSMMtvFS5MEl3QraFds1XsAluuFgtYxPzX5JbLM+AaIYlmZvhpkx0Ql45z/CzC6IKdEa91Z1eIpp9qphaH2WeGipfr4wsU+2QkseuBXZGcvgC1l/iZC45bPBOL+wygpDSO43A+Pz09ffj4cDydcs4GoADEzhSyJpgFc90GGg7n5+envm0ThGOd1pG//mrngPePh5/ezUUgdgzVSlXnm9b3Tb9qW1dK1qoFlTx1feO94+C9b123G07Pzabrm9Xw/HCenxQFnE51HOehzILOb0OzCqGl8OE43MRVT1zm8d2HA5nbNJECgSVQnE4TlIQO2n6z7ldZcoNNMj4djs6xAvrgT0OueWiapgmNQzelcpAzMSrUNM/eeVGd50lrSXluChACak0iVYsnTEUUabNua6kBsY8+54pESnYcp8bH1692vTbD4ZRLbdsw5CpqTEjOlaKpzH3rKkrX6ZBnJq9l3G1vUyQRjb5l74GQmU3lirPpysNc6MVPIS8X1o+uXCRcxD+fbZ69TPcnk+Jn2rQLm3OVC13In8/VRpe4dIBL795FuXHlqhZ/mple8P9CAl3G9fLnpWbos0ldcmfJENREwVLOH969/4v/8Nf74zM5LCW1qziltOjdqigvSSvL1qPUru3YOymSBAR4Ung6TVOB3lhrxsghBKkxcpgnvVl9/eHHv/n6zes2NuN4BPDtyleD2K9v1vLx4eQqtN1mGJ53r3fHUb/d7w8ddqtVxKJz8k0EciTSejQRQqfmxNTUiChySLkgUFH1DmNk9hpiA77FwB7IAZDvfIiNSprOQMjswVDmgswUOEKXc22jd+hFtCoUVAMcp+nWMZHz5A01lUPwPQHNeiLHvtUQQqpTlWrUE17yvJGYPVWx3e5mUoV267j3zZ3Z2pGGZkXskd1l+cJPPCHYtd/u5aN9WXQW9eIyhp+qvi7U5OJWuVoYP5Hr1y+8zuknxugPuvGWQdYre/Q5t3kR6cA//fqMErrM3GczfNHzAHz6C1wFL4jzrAamasw+Ng0xnB6P4zwXlbmkYZrFsoiYqqpcqM5LbBwAgIDRkrhx8ZwpGSLzC8ulL7zthRNbfsXEtIB3NDAiZO9F6XCc6GQMXAzQjo6x6UJgx0yNp8b76D0jFLDjXC79IQjBezBCrIZk5AEdeG9SYNnnFlnyt1QUDFWr1aoitBycKZogKqgsAbFmXqqKiNUqIqJVVUsptZRcSy4lV1EFkaqAaIpLliIYATIqgy4lHJ4AERwmds45dd4ZkAcwAPYMbEwIIqCKWlGFDUwqLJyaqr00MmDg0GNYUbuCZgXOg2+APSz51sRGjOSB+FLu6M1UsWYAsypazNAjBgQCQFUTwWqkQqKqRrmUqpCqqUqqdc4yZxmLHSf5OMFPAx3MF3bkgg++iY5kZkWQwohVBNiIXQyBgmNHS3OYChhKns/ppAOr0yIIrIU3WwyN94584DCLCM4iSAikpkCEusglyLsOCYg9MjKyAWtVUQVMkZnQgncqIoZmQIBMrBWkVjAoJYtZzkXViJiQTSsugb5Apia1LmfBC1UEzE5VqxYiVhMmIseG4NipqYExu2LlIvsj8MgEVg1Ml9q/YiomygimtsiUqllJQgaGpFA8ORdDlpGsaGWtwhYav1LRkkb2AQSnfPRMy8FInsyl83Ca07kLvXOuFnUeQXAueT/si9a+XSvWEGLbt6YEyOz5eX8yM6vALq5WN2g5z5Mhsjewmdna9cq5WEs10rv162EavA/bW7i9W5NzhlGwnRIiQLP24J2PAQBKkVqmks/MzEglJzGtYi40UVYi8PTwZMgcwq69ff/uu+Nh8MF5F4+nUaXWWh3Gp3P+8ss3pT4z6mbVS02lJh+56zexX8e2q1X1NJQsu5tb5zvvomu61e2tmZiRb1fz8el8PJzO59hu8uMQuoigkvLt7s3zcW5iWG13GAihcoBScrtqXRdKlcagc66keUQTEyYwIGJniCq1jVEKcADfEGKc5hS65v6rN8+no5tpvdvUXLVkrJU8B/ZPz8dX66Zf9/OYeNnIgqDK03na9b0nF3wExPPxeP+q+fLr+x/14XAYjuN0f7e6We00jcOUVbQNoaqZWePCPM9WTUi982qgpgSEhCrK5AFJ9KLPJGSpuuz5moIhaBVEY3QGhkwLTX65xTADo8tW1XLPdA2bqNebKrgksklOlSjvB7q/2RoQUlQxBRXU9+PTl7ved3Q4DZtVJ+CIQCWXclLLWhuREjuex6Nb+TKfczosgvAsxwg7gmr17GNQF423yitULPMx9DcU4mkYyNPb+xsHWuaRpbSr/nk8qkq7Wq22O1NPHKtZnadSsuN12/Jw+ADcAPtULQYoeQIT9mtyK5lOLeaQnvPpURXa+38xPX+U5bLEMTZNHZ+GH387nQbfrNOcfNuC2ek0xbapiXzoa+H7Hv706zjmfB7nKekplTnVajTn2q86Nez6eLO71xq8ABqpaAAfmcOmB7LDeVy1/aaPrX+l0zluIzkwpfMwkkEZRvJRzNVSFFAEFcB5Ph6es5DDXqwCQGi7EVzW7Ppws7uTar5N8zzlWkPbliSA3HQBFA1wGicDTwhN06tJrY6b1aRTqpOgoy4a+FV0qjJM86yWsmQpPfcQWw3hlMem88FHNMrqA/Y2SMVZWru5a6fTeCqFOm81e6CxZBfWxerj8ceTehLIo5LfoWv71k/z7HoHAZ7Gg3LINWcJjnkVVl/e//Lx+AOpNsE1szrZfH37VRrfNxznWUsV6Zib9knkj//Fn777q3/XJ9cTYJSS64fjNIqxQt9ub2IXBEK/+bg/EHYf92drm69fv+XGj+Uxl7l1Lss5mzNO3DnyVlMVv+3ufkVpmJLV0iJ3t6/uvv7F5a6q1qpSBYE9OxcMYcypSjWCAMFRzPkEi3uBSHJaMiCXFg7mgGQguW/XCipaAbCUXCWDoWcHAFWtmmnJFDj6SMZmWErNqbjoybnhdGxj54LvuUOzYZxaCKAmUg2wia1w9SGoyXyavHfsrZr5JpjhOJ77Ve+8A6LIMI316fnct7cizx6habyXQNiEZr1atb7hrvF5Lu9/OlBs7n7W/ff//Js/+zf/5uff/OIv/+5v//v/+//lN//w92r5t3//rQJuN93++fj7Dw+vv/jm+flwOs8lpWXjL/bBEQ1TmZN0bfzx/XtPsHYOEbMKkI9tFFXXtm3Xbnd3OGfrm8M8nk3vukDD7MFa56ea1witAqREpkLgkA9p7ijGQN65KaWci3N+Pj75NrBDH30pMxG4QKqGSKUYAYiCAQcf2HU3fWMmUgsy1VoNVKqaaWh658i5OE2liDRt5DmVogglTQNyiE0/T0khW9WmaU/jXsvoyaU5uV63d9/89Pu/bxuqi3MKuJaCDk0zEQNV58M4JaZKCKWWNnRf/eybh/fvMgKYpVzA9CqxgZJLMZtTGaLvm1ZVbvrtkM6OEdDLbKfz0IQQOeSprJsYEOOr7ZxzziV27XA+mto6rM/j2LnWc0Ps1GYtAKzsojlQZh8a4pEJpRZEbHxEIAohpyKaY9fD1XjA7GHp4sAlBcPomhC8NCdcuB8AALuKtF/21REvnNMV3gLihQjCy9bx9VlXYLXohq5Gm+WpdAEVn/xh9iL2sc8g1fKAXhM3PkNDlwc+7asvbNMFmF+FUVcD0vXgXnRGVz3KNSV7YRHwJSDETLWWnOfhfHp4/PjThw/vH59Pw5RyZudMoZp4HzzYm5u1q3Mjst20f/LzXdW0f0pNdOOc/+G7IeeU0zSl2sRmDX7dtF//6hfTeJqOZ9VaUjKAvm88GwZ3f3u/XNxrhel5X8cTRz2OxzqMDUX0HqJwmgmapmt/fDzPhRXrLHPNksdDSvlm27/drnIVLXNOxUcuaqu2ndLgKiLOaf6IjqvL4AIRVNH9PFPJiN4cjUZS633TaprP++HVq3s0CC6gAxWpouS9wz6lOc+JAZaKwC6GjJoqYAWroGAJainVUELjyQiqDufzbrMO2+0wzjmlLtJ21Z7GDEaiZuAMKYYmzTXGur1dg9n+6eH21St0tHAi1wigS6sYXgOJLrc7iAYKn8TUF2HH5Z8WV6N9hqcvH/miVDK7FvPBi0zoir4NLnJuxSXx5RPZtEjnXhJgLm5MAFhK9xYaFUyvidaf3vuK1JcXX3DhIl8AWwSUMp6H/flxnz/gutSzffjpUcwk5VrKPCdCZFxywhY1krY+OmIFUEVVPB4HCnHM5Tym7VYb58SUnUMfyPmcxhv3JVpXqyF7YJ+LicJme3sejt26D6eUZHY+bG7vDudT1zV9G85ZO1f7LgI4mSZE16+aYd5P0+x5RWJgkEqJYYngFDH0ziMoITjG1XpVuGUfrAqRU2OmADizD2JGwMCOmFELIhBhbKMZhrZlqXkEZiep1lJEcmjXRBBha2RGzTSNCty2Kw6OmE7nQV0MzWZKR2anRuh8CI7YN00/HI9dd7cvud9s5yn77ha8M0a4JJjQldu53ExfOaIXFvJiDbuwNhcy+qK4hKuZ62XVetEfXZehhVOETzS1XVak61heR8wunMxn/4L/GU/0B/zQi8Tok2DpKln7bLA/fYvqwu2hAeY0Hw77w/EECCpWS0k5D/M8TlNRq4utYQn2WRrNrmeZql03sVX15RwjhEuu8/WnI7xcZS4/BJgC8dXjRUXkcRyPKb0HBZGA6BoPVWLATd+1we9Wfeu7tu3aGAIjaKllUsRahBipCwAkmpHI+9Y3xKxEnpwTWcQfC6dpKlVLNlWVagaqgmagYlIXRWmtqkK1SkplMVLlmkWgFJVqqRRRTaXWaqXWpJCrKoKoMIMjah1E0taZJ2ucecbgHDM1sW0kEBIQASKbgEcFJUAwRRFTUSMQQ1UERTBRAWJkb+wttNCurVljuwIXga/EEBEQA6AhA7vrR42mYr4FycaNuawUBEgQq5oaVNVqXI3UzFSqwVwtV8ipllqz6DSXU7LHQd+PcAQ3k/e+adrovHOOnQ8eK5TkpOY0SM3iKiisnHfOOUfek1QFUzVNaaYjEyC5CIjgHZFj5zyzsjeD2LhUxJBUkZzDpZDSDIgRqWjxHJakJ0OrUqrWarokjwNwIKpqBEYIQJjBahVVqaKliF7PKQS65LiDOBdeluOXBjQUFQPwbumZQ2aPfHGrgSIiqdoiL0JEKgnAiBi1Lvl4ZkZgZsrkgQxAYEmzN3DklpW6CY7JCIQhpDRbFcJQqgTn2GLklVoCYwBm4lomYhvm03e//7ZvNq5vS6qETqvVXI7HJ6a22TVNS9kcmxHilBMZgnDNNTRNs90w+mmcVad+xf2q86HhsCZMMUbnV8dxAMCG4XQcQWG7672T6LDZrhSjZAYx0kyePRYDKDmBsWShwMYUXKs1ax1xoZlDE1xTapU6PZ8O5LG/2Uwp759OrM3heez6OKZqkofhI0JlwpFTtfnm9rZteL2Jc7Xn98dpP9+9eRXYYaVhPzctN23ju7Wq5KrhZtXdfA0//NYNT47t5o00Pf06v/1//Y9/Eb2b51EVnaNxLG3wN5tV37Tk68fDoaEgRbq+EamipW2cipUKVcpyAtecd6tNKoOQhN5x4P3Tx9vXXx3lXKcc2yimq9V6fngGlHXj92NWdeQdRmvYH87n0zCqoilM5dyvg2BlCivXDk+HsI7ffHULDtZ36++/e//2ftfEMKVkouTAFFTKeVbnqEidUhIzF7gstLxaYKcXTpqIkOl68UG63mgvQXGgIGqGIkBMxCpK16I0RDDRi8HbABFVlcCWfRIAEBXQerPtjtNcs0wl+HULJXVNJ6cBTdv1emIbnp97M5fRoYJYLvtaRq15He+mkrSSiDY+lvSBDDBghcHHtbkVIWo+UkPOR3Uth07TUOt8s1oVq/vh0PXbrluV83MMjEXzPOR5QHD95s4HZ+CdD3WeGTH4zkod97/XAnH9dkhDs37jSbKckVyzuS9ALLPLj/n4k+QOtr+eNVKIbDcWeuKgdbbTh+npIzpEC7EPIYTT6bR2QWtJp1FT9p5j3/RTeXzYd8GXqqdUnXfDWCCE4ymRjz6ExrXO+Q6gWzUeZb2m8fjs2Kvh48Nx8/Vr5ZFD+OLVF1Lntl8BMKGG4ELYnI/nMo+mFm9vDNw0Dl235eAfnp7BieQpdE6KRcB5TorYxk5phgbZat+GEJkImt2mDV615OnUhVjMcpE5zTF410Re7ZKWoiqqKDkfztv129iF08OPitWwBHY5VUKea27j1kyZYhuCA5I5/cl/9X/4T3/7b7S48zynqQA3Hz/uv3j9xqOklI/TqOJW/a8PaSKQJrTO6Mav+eS4bBvagodnfG5WsaTD7q5/vXZ+KD67m81X67tXfQdNzTpy7LuZdnNByVCzIHN0TsqU5vM2vC7TjJSjEfsN+VjKAHlOMu5uok3TfvhwPv/0uvtZv76jstVjkTzuD4+uayTDcT6EEMt87PpNGpKPKxf6caatu9+u+z/9r/+4u7l994//sIuX7qdaKgIzYp6SY7ew9iFEkSoqYx6QzBGrmmdOMgtgLRKcEy25pugigDQuqtFc8uJUcBzZeag1hjBNIwESswGogpqRqaG46ICYMIawBjPVjIRgznuHyAoFEEsuFkwV5pLUFDw2bVSTnAohImJsu/E4lFTRoOk6kWQIv/7Tf/67D+9LpZzg5vXuF//8F8AhZUFjco5b9+s/3U3nw3HIYxpvv/j6t9/++LAf3/9P//b56WNcbYpRmZN5TqnC+fzrf/6n+6dn34e2a9R0f5oIY82lbzqRejyPwbsYfLWqQvunY2xD49rDcTTG2y++mkU3t9s0jiTSePUOEVXUziUx4ip4FDmn4kIgsADQOVPJp6QxFkCoKgyMUL3zVSX4MKGrKgauKqIRoPo2RELPjpBVdZrPRKii8zgxk/eNEQpqjJEMSp67th3T7BiQfRNYS5F5cC0YRhd8ORc0a5p17Nrj+XBze5uyTOPBha7b7PJ0cM6BQVVBZiQClAULihZmz45kTiVLmsairSFUEWIGUCkVAXz0Lro2xjSOqjUlAVPPfJxOwVEtpVv1oW/OwyRgfddJPTsfRCVQQDM0cSGG9Urm5AiLyJhHUK1C7IPWcbPeZMFpKkCcUnYhMPo0mimwb4g8MypgHsfxPPSr7nJj5HhJ/l22U/HFsYNw6dA0gyWOegFFxFdcuyAiXTxlV2yC193wSzYwLlXI121z/NRu9omOuaD9z3D3VYB0Ad0LLWSmF6hvF5HSYtO54CeDq7eDzNRUL+/1Iit6kW8YfF4ydRU8ffp+uPwMF6oJ1GqVlPJwPn98eHj//uOHj4+H01BUxFBV55zWm7X3/p/97F/EcqRUutj/8Pgx1puWuWnD42E+DUlkiMHFtn212/TBHZ4/8M4/j491HgzL7brr+269e41aHUoFNw2nnGfnIwFJmr+4287pfS61X63qnLPmj4d923ZV5Pf7D8cMH89CWret6xjXa+5ftzlNYDDmqevbFGzKY3TogWMXPfNchYmnYayUm36NKqVa531Fd57ObWwdcdvw4+ljG1zTca7ZQKd0itB2sSHSrFByIZW2DaScpjHEZtOtcp0NAdWOx6xAzi890NiG4N3yy9bTeU8cfNsYYi3FingAYCq5FJPqoUFjRz5QaMiRW283RC6n7KPSy7wt8PnSTbUkYS28H9JSMo8Ay+70dRIW+g8vxsnP3TiG14G64PXrCF40a9fnvPCUV3LTAFFe1CLw4k6DT2/3AtXtOtYXHgEvccZXgfjyy7lSSWigqllUtUx/95d/+ed/9b8djvv945Nn1zb9OA+AdSlTLCoMpKZi5pDZBSYa54kASrHzNN+t22xySmmac7eLtWYjBHbI1O+6VI93N/fn87HrIxrP6TCMvNvdTHOqMt/dbx/3ewMBicHTdJ7e3DaHYcxKihy8e9Wtn58fgmNpV4pOqzU+DmPy5IhNrTJfKtyYsWlDCM75hkOPrgFvxDhOSi7EEBE1OGdm3sfMU1BvKg4NmBkckkdyKsIO2oZZs0wn2myMUJFW69s5mTH1NzdklQxNZB5nBSeSaq59bGOIxOoiG6Aa9mE9Hx+63TpGmycLceVCgwgXmSUIIC55bS/U4YvxdaGQrnfm13G6Mkmfzxa8KNBeBJoLOfSpoM+uwkh88c9eViO9aoJevpb6vU+J0fAHS+cntSUAAC6+gz8cO7jSX5+znYaqalp0OA+PHx+//fZ3Hx73QC4LzdXOY8qlLLQXAaouqBfN7JJ4ZMtKDmZQrX5+mixnyqVJE8DMZElqsaUGAdkx89JvxjE4RBDV5/0AhhEgeOo97qJrou8ib6Nbd/H+btOv46prmxgJVEoGcyXPVcWBEylsQUWhgndERt4Fig2gI1U0UKmL0K9gVRBTRTAmYvIqVcRMwETUqkpVBVWF5TyrqRRJRXORKlCLFdGcRMTmXIaiY7VRJJssTrzA1jKuGDuHvYPGYxNqGyhLMkOiLMCAwQBR1RGKlouwSlHUnCCIIlQDhSVE3cC5aK4B10DoIK7VL37Jpd+DLgF/S4atgSEZO/MRfWtlNgwKQc2JkAgqsJqqwUU1JSK1FtFUIReoQnOBoeBpsMekD4XPyNW1FJoQY3B+MYKTc7NijJzmVKyqFZnKVPIscLdZNwZIDgAcMaIxMYJJlfF8AucgRAAOsTV2hiTkq6hCLbVWQDZAJoe8RLcROwAk9mpGKmRAZqhSi5WC7JhAFYmY6Xohd44BfIwg88zOmRVTBYNFZiEies31Xb6uWUWLHGu57Vi6EBEQTKsiIbNTFQZEZAVBQCBWKUDo0C0XDVBRXZ5Zl0uOijAhM1epTBQb38aQa8klB4el1saFVJIHT0jO+TJPiJXIOx8UFVBN87vffzsPad34MqfomxibVJOZEIauXce+KfPZzLJKGSoa+SYSw2q3Cr7Js5gMDrHtu/VNe3u3PR6mOZ2bGNIsUxoUqN/spAwcu1dfbm9udr//9nfIDFKZ4mm/R0Cx5FdtOp/VjEIDSI6JkGotZrWqxL5bbJlqGmNIOTchzHOZaq2leAIfmCqFJhZEdWrZpnFoG0/sainMTRU6HOdh+OB8d7u9JW5ESWot87nvbxajJTkkiliqpLlU6ftNbLiUcW1uOh23d29+9c/K89OjC3F7H8s84Ji33foff/pdv7H1avX6y1/c9t3+48PT4cEHjwjGSEAOoIqG4JnJBHPKMYZxPjWhbZp+u/V5GO832zRPkYOKzlPumzbXHAMH4HFOb17djMNYqjYhDCUF38xDSqWs+3Z/enYZocYpg6/57Zv2zU3zdBy/+dUX0zADM3sf2RWpbErOl1IQQFSJSEWIwaEhki2RXwjVCpMDABNBR0RoagJqqsi45C4zM6guAsIqldkDXkl6AAQC4KqCCsrABIiE153kWlO/aQ/nqXOhAjyenl/f3np1jrgPzXw+B9Z9Przf//S/e/Nr9Os01zTOnkWlAjAyS5aUR3BU8lyzMAgB6TzFbidAoEVBzLyIw65XYAWM3dbEhuNz0zU3t3dQJ6jFOzfO51rmEKLnVWhWhtVMNA+glTmgTc/H34VY2d9M89B2K7Japz1KIbcq1qgWSkc5P6ZhH+9+Aesbpey4onfYdGJA6VyHg2vISeuDS+NQs4CUeThbrtOQTcUHXzN2jQ+vnWg97keXSvDcUZqMbM0lWUTardq2bTer9fn5iTmTwf0Xq7ub9Yefnr9/t7c321Xs9w+PQVzwSDIhcWgDOWDvY9dP4yySpvEI/ba7baang/ft7b3tz2M1MRER7NqdlVJBcklqafXqPk5BoLY9IyH5EB2jWeM6JGbjdtMXCl0TB5vP8/ezJvCxWbXg0KFLVh6fno5pDx7efvHq8fl8SiU0oRI0bTtNQ83ZiXCe53Nx/d3Xd7/Yl1kgHccfKXoSns/DatebwcbHfv3qw/vH29Wu8vDxsN+GZkeru83rLd9xoB+P7/p2Hdtu94XbkXXj/NXuq/j2/t/+zf86znJKdc1WTlOc85TzartddbfclWwVyc6P37lXX/z81Tc/7H/iuP7TX/7Rv/uz/zUif72+TcMx5aGp7KLfbW62nnSwYgFpdT5NaT75biOo5LtV0yCzQWT261VXZ51OZbXin3/9MyctPM7D82/7uT68/3C5JjCSITMjgmMSKUsMyzXgjjw7RFLLU56XuxwyUERHXjSbqIpOmpEJiZoYU5oQgAmFbEyjdwEQigghVcmpzI674H2tmZmkJtDqQphLbkKfa2ICMAEkJgjBqUEuyYwRgRwLACI7BwRUag3Ba99KycyMgAzYdPHX/8Uv//2f/c+ooVlvvvz6dRNCiB1v2ftQUzkPmQgdMc4WqfmTf/1fH375/EeHw+nh42/+8dun4/mYj6JSUvbBsek//MNfa9XovAvddvs6bgdilunsEaZU1ustGcTIFKtWYO+MhNSapn/1+lVONisEU09mKN5jeTpSKi35saojIlEG8uw8B++5ZXZKBXAwmXJWAgErWj0xmDFCEyLubkSEgZjjnKb1ppeaAKHWMgyzVK25sPdM3DWRyIAAK4qC5NrEVsDEIMS2i63IGUyRaZrnddMO+6d+s5qGMI5nMWUfO/Ypa/TRY+Wa1v3qkM5IVrVUKQzMiGKECDnNjGw1x83KN2EaRyA474+1VlVTUSRkh20TpKjmUswcc9P3tVZTM5VpTnHT3q23++Pp5mYFwQPZcTys1usQW60CCF9/ff/ttx9MSnBhn8+I5l3MdV513el8ct4BQa662dyVIrVmhtg0/fH43MZeTKc5ty0uURshRjWbxnRFGJfd4Gtd5gUCX8JIEe3SMY8vcSt23Q6/4iR4eexC+FyNFpfwn0vGC16RMixZFp+sG5+Jgi4Q5pOL7bov/9n+OC6RLvDZ7v2iygdUs8se/SVuCS7dafDpG+zSbbvAqGuQyCeNwEIh2GIBWQJlSq2n8/nDw+OHjw+P++fTMAgsd4yIhuum9xB7jHY+ns9P0eW396+++PKr03H/9P7h4eH0eJx396v71eZ4PnbRPTw8DcHfrLsyDlzKq93u9qu2pDHN6d3HH3JOUOp29crmIbR+3XcljY1r6jwdjsNqtWW/ZhhgHm66zZRzzak16Lfd3ZZMNZJNpzPUzNVHR8DYttuUS+d5tWoRVJRyWeI4WUUCe2Z/3A+IS1GbxNUmkhuOZ9VhvW4ZkRoSrUWKCHjXlozHUpMUMS3D1CNIrYGpaRoxPU1nxxCDP84zRxaRXGoTI6EzlZy1ia5xEVVCbNr1bhjPaHI+DsM0ee83va/gckqSE/nWxBBdv1mvNzfkIiISUS3Z+fiifVs+pevt/XWE4EpUXjUbcGWD4KqC+wSSryD7ZaQucBqXLTzAZSAv43eB2YT0wk0pKKjCRbt2yZKBz0yXFzPOZwqPBV3rxaC0PKZ24Y6WnGBBUEOdx/3f/dX//Nu//w/byP7+lY7z3e3dw/PpN9/tS77cTDqmKqpijOR9QICUkor56Byx1hzZnedJkFMuAq0CgQKxU4V+c/P4049vv/4jOf6YZShVDOPpnFwofbedieZyulmvS5I5le16s38auxgGwypYgQiNovNNk0E5oEOfp6ImMTg2YcSqpQkxCUdzBLX1LnrnEIFdWG+qVCk1tBpiVBurADEjIZJznsGWxcdUDR2yZwRrulYkNS5IntCErDrnM4Ennmyi0GAIkgTZzWlC9mBYNCspBeeCExUAcqEH8A1zGqbNpqdSHbfkvSEgkeE1GNoMQF9myhDAEBfcvsTuwKe4oRfW+rNlEeGl/A4uAesXp+FVnPTyxych0Of6tOsrLzzPMuALqW8AQBdW8xM7dWU5L2a3ZdHV64u+HNs/obIApJYiOs/56enww08P3/7ux9N4qlX3p/k8jnMponUJdDZTQliifggupl57oeM/o8wWvR8h4LWbfDGILVsMy2LO7Jx3zD54t8QMAwAgKbMWaRBaxjfbbrOJm1XcrGPjXL/qb7Z97EIfW6Sl3aIoEzCRAjkHQLWq9843TWia0HeubYwCACNWkwoEimBoRojOMymYgSwhNqZVak5Wq1pFNCRGVGZQBnboFFPSWjVnScWqgqpVVSAEdirVCKWaFC2mCPCM0CFGhs5hF7j3sAq269WRc6xGSqxqyzwpIoEqmZgZipgoSgEQA0V2sPhj2AMH8BFcY75RdsvlFS8uwKWnAQ0UiOFiiUVFMnTKXoGrQBGsxWqxqrZUeJWapErKSRRLwZwsVx0nGQTOQifBxJ5CE11UdtEzgEbnDUjM5egT4bmek4jNRQQsD9txLkXudj0BeAcQIzlGR+ScgtZS0jzbcPZIhJwtGzlBylLrIh0nIuYQG+8CmohektMREQmdYwJCJaigKiIiRasKM3nwzFhrJXQGhEjsvPNaqgCYcx6JVCo7XlyWiP8ZVUSEyyeyRJyLGNiF0YTL0gxIi4NRiQCBBc0hiFQwQWMBQzAiAkE0BjRkQmIiAhZHFr0rdZYqBASmROg8JgNyQbSSEiLWOrddF7tgMhLY+x+/+90//na3ugvBG5qLUcFqTbXmNrRopeRsVmKMquAgOtchSpGE7BSs28aq9vXbf3l6/qiW3j0eA/sudF23m3PNmm9u7ueS9/uhCZuuD4rw6uuvBEgFPbsQicmBIyBAIBBqulVoYqmqat7xdNprrloNkXKqea4p53GYyGHTt6Xo9Hg0RgIWUyKoWhUMsKLzRbTK/OrurmQ5P599JHPGVKc0jaViYwqEuCKiXXNb5+cQqwLEGAsnKcpOj6cBgG6/+MX78W9cA/+n/+bLH7+Xv/yrfUTnCCE6bPC/++/+j3/7d789z/n7f/xw1zX3q82bt28R9PB8ylIJ1MBiiI6dopkLw/jcxN2r129++un9in23jnnMVWop8zBbu10fHo8OyQX/fHzyoRvOp3Pjv/jizbvf/xCavnexpuIIzufUrzah6a0Se1eH/P5hGK388qtXTdP88P6Zgq+B/No9Pg04gzdhprEURFy6BhA5i3rnwJakT0PCiGHJovPkFYwAEUkBjAgQdaE6RS5XLSZEWsyQYKpal5v+Sz8sLTQrI8BLH42anIfzOM+7u9tpHPub25IEwIZ0gqIAglJJ43rzmtsbxZDzOeXJBVQwM0vTbNnImaJn7LWo1Vl56903Wteua6GMAIAYIGwsrh1aPk6m6FyYjuNNu1m3IMchBgcmC+qOzSY0nZhYFRcbBnGmdX6SeXLmvLuryqYKJVmZ8vgU2g32O4PqytnlQ52P6FewuUUWSGcr4uPKmijnUadzOTzkeRiOs0geTk+a5hD7Jsas2vQxdqGJ7TmMyr7UuWYJHFqB4MXu7d1R5ufh5n795v7+dBiDQRmPd7d+u9vmwm3ntaY2UBe7nPJ6+/N8UigqQje3m2E+EYcsWOdaC3vXjs+HfHxSfnf/zevtph/OmapuNg0WOx8+sDkGMAZhr+TGOQXvpQap0LTr4qTvGh7PUFJY+aowzYXbJgdOjg7TVObqfXfKqim1cXsqQ8ofQhtZyKiM0wBYHEofWW0Q5hLMgBC532whJU3Kwrtu9f75+ZvXvzykjJabyPPx2HEb3Uaz+/WbXwx2fKrS0iS55DiXME3+9OrVDlJe+7bM6bZr4XSI3abm8Xj8Ybda1eHwcJhnL6+6NQlmOp90ePPlz3//7j3pQCK3t98Q437/TNwA2TGlX/2zX4rM+4enJPDmzTdf3K9/+u7HTbNqXfvh9C40/o/+1b+aHt59eJzZzWk8ssMq1nabVXMbvHe+qaP+4te729XmZrWeTto0DbZvPv744OPlWhBDmKeaS3HETDSl7L2rOali40KRLOQIsNbCxM55RFOVlEfBNjpH7BCRgNRgyrOhmRiASCmqyMAIIKZm6qgRyYEbUCo1ixT20dSYgIn7doVAZuxcM6cJKqDjnIv3kZwLTfBEpVSpFWFpECEickudOUDw0QCw43Ec59F+9uarx8OBAG7vbwFonqZtu3OMwLTbdACy3t6t/Ob5+Dwep+enofXh7u7N9tUb8/Dh+enP/t2fn5+OAFkQj2PqV6vT/qjV3pef4npNjP3tveYEHTrHNedcC7rQr7sCtl2vnz883WzXYoxEt1tqbX447fP51BLiJKGAC5zMCImd01z7tp2qkHFOebOKSqDzCZlPUvs2ApJVkVzEtCgAURvjXdfnPM7BpXnMc62gRTWGoEFd41UhzcV756KfphnNdW2vplkkOC9owN7A9f324fF9jIGcM8MQPCDGvlcVqVmKAsV+czMenjxZzYWoAVBVYXSCwIRqIqKeHQq4xlewnAuYqgogINF6symiUquIVFlyFYGRtcqsqWn8pu/avh2HIWstkqbEX355X+Z51TalVk1CLTVdrFlysu2mFfgRDNB78JGsLluzIBq5maeZGvfwfIrdqmmbw3GcaupX27ZpQJEp6lyrqIB2fTdO03gasLuEOP5hquoF0171OHSR/ixczxWOX41mF7vPVQyhy9779bUuMOiacApXcLRctT4HQReuR+0THrtQTvZyPJfd8Ct6WuRIV2D+8novAH3Z78al2uzyXnRJUbJPx/mS1nGB/3p50wXpX7bzdcnl2R/2T/v9437//uFxHEepxQC8Y0IEBcmpaxgU3v7s7bnou2OqTx881Vrxzf0riMchl+8/7m+7DlSblmIbXAxf3N73nj58/P79fly1kZTa9e3d7ReBm2EceLUWKx+fH9fBgeL+dGCIdXYiwzS+R3Gxva3k1xxcl573z01sTERmCewQqBbMVhXBoSUhIguRmJTZUc1TmolbBBuTDFrAx5xzE5q+a97tP/roaylmmJKL3s1zmubJuaqiXbMCoCnPUy7BsUMaplGtth4cqCGnnD0zWEpF2EHfxaIKxqIGIrlkBCx5bIPXVDnMjlCLrldts4qn01SKEVrT9gDU9ut+d++bOw5tVR+oiX0LV9ckAtoCz/DFJHkhjK6BQguqts/poeujn6XJXFnChQha0ow/DdqVvET8RFle8P4iO0NQAFB4oQwQgAwAdAHLn6HnC9W5VBktD1woK1uITvyD56qo2TxNp/P+eTp3u1uA+hf/4X/yjk3L8+FBNANeTkO1JXjdBAiJzGR5D1GgQLWW9w+PX7959Xw8vN310zgFx9QEqKVp29PpvNndSctp2t7dxJ++//Ob7c3+eBqPAwfutr1qqefTahPSIQHV9aoZUl21zWGSCuwcE7vNelUkd20E52pRrMyky7ZHqUpsqIYGbRcUK5Br+l5d50PvtBSais2+YeNuKlOIjCi+8WVCHxtRyXmKoa1AStisWxgqS0hpDs0KuakZXOPa1c5pyc8f0bXOkaqvVU/TuWhtmtaFJs/WrDokZwi1mu/dXKaWGvYEaFWE+xWEeJVTvnDKfDGaLAY/WBajF3/rZwuZXb7txVJ1XUiu1PfLyrjQ7P9UEPSJPXrJUYMXpnN5M3qZKHyZZIDPZvmFqbq63F6WuIvgCK9BchfO6dNZ8HQYx2k+H08/ftj/8PHpeRiPh9M0TmMuVWvVYlJV1GQRBC0Om+sAX1d1xCWZCwhhSdFARDFzi54OAQlRrqv3chxMSPwi7DAzUa2lMmjn3V3v395vfvbFbnu76pvQNsER+RCbtiMCI0eIIkrskMiHBkkQnKo5ii52vu/8agsxqg+EAbWiWdGkNalUAAJiCmyGVEU1aU41DTVVyQUvWx2qpqAKpmSCNUOtWmvNmpNOuWYwFTSAqloMhUhRHTDWix62ms5iSXEs1mTtA6YCasXRBOxWzlvOTh0YBAQAIVTTjGag1QxUMlycj9exQTZC+5RGRJeF60ILKpgZXT9XuvLQCgqkRqJQFUqFXM2Mi+miWKkGRawIlyq5aFXLCglgNhyNM4fCQZwjH9FQQAkgA2hFIHr7L//0V3/6L5+en/783/7Zwz/+7fD0ngm1lJC1Qb+KXfAITEDgY3QxOo+uaYF8rWKpGBX0UYzmWtXYDJi9j4EIY3DEiOBqVTUQFR+Cc+CQRIQIInk1V6pI0SoqFaSW4An5cpEwk5oTgtnFcqYG5Lxnx6aCSMTuqkO6UkXOu4uzGdQMAdAxK0CVwkhgi5cSPdMyzIrCRIDmCAFYRJmdAZuBcw6NAMy0Liym1eyYu9jN6TFQJCS1SkxzzsFFQGSO7FyV4jAiWSkTaall/uG775y5+93NcthiIHnKee66zkGoNfnotBIgbjZ3JZWSc79q7vptaGIpM7e0vX17OOxFM1YFx81q612YqlSA0Macxul8Wq9Xu9v7kqfYhlXbpgLRdR+/+7GJfWiac05NZGZXK9Q0ljRUtVLnJrYihZmqLkK7DMhqSOTGcd4fx1T1F7/81d/8zX9kaue5kPdSSqkFTKTmVPLt3e3j6XA+5d1mM5fSrcO4f5aUkcLtXfPFmy+G83A+j9Ncu+gOh4+7mzst4bQ/WSWiqvOs1T4Ov//qm5//5i//I1m5WTf/+3/9x8fz/Lu/+y6nPKvqt3Wz23QG61Usc3ncj2X/uFn3Dbc+iA84jklMwdV8LszWdOvzNHBcbVb3p2lSq8TYdajY1VrHedq9vjs+Hzyz965fUexux1RxmDc3d/vhbDk3HBBxqDKWGmIYxiOa3NytywzvP54OT7//1c/evn51Mw7p8XAiwrYNplCBShViJ1UQCUHFwPtYS/ZEiEbkRCowXe6+VZDIFrr7sqQzIVa99NSYWdW63FibKrFbtrcWrYGYOaBle4rwEyooJG0MjYXT4bnzIbLLdTzNs0S869eu34mImdu0a1U8nw50enbE7JrkQi4jN0wVM6R2HS1ahRwjmwi5FgyljITGMdRSqYmLLs+I2MUpjUYQY7ScrBQXwnAelCi2O3Y9ONApobEJmhYpZ9GZWmhoU2tIc+5vbxnGKpMLjfIWMeB0gOEhDT+BSPP2VxXR8kCo7J0a1VSkVu+Cj93Thx+mcfLN+u6bf7bd3eacyTQPz03nCeHw8NxtVuNp2D8mRerXK8ua0+jZ3fZOIWx3cXvjbjc3ndv9/sff3bz+4m67/fj9sy8e0XavQ394jl9s3p2Gzc12t91JmQvV1c0auQWwXI1DH8j1fSrHSUp++uEHoKZb33j150Pyri1lYAnOD3Od2Pem0MSuliESrpvVPCpsAyKBCKABmSnGppnFqqp5abvQmJNC9ZyCxcPxlKtublZDTs75pJJVQ+PUsJzPKD7Ncn/7ejom5+LzMGnR6FelNne3Ow74+HB24szo6XjqDZvt3YdT/fK239323//mNyfLOatBXW3iQDjO+x66r37283m/fzzsq64hdHjTjjPcvNl9+GE2g6oQmhWv78vw1DRb4fDxtFeyJjZ1qOziWEq33jVNX8r44fG5XcW7V/dksV+/MaPhgKv1r822jqZf/Pp2dXOzCvmncu6b1fb2tefydDgj+lr18Phszm7e3PS71c+/vvvpNz8+POx3r3Y/+/WX//7f/8V2d3//y9eXsyCX5SY7l5pKQabgYjEBWmo/vYFWUHbBEaqpiiIiccBFBgwotSJ7MWCkMick7Nqm5MpEMca5TIjogFULEhKQgbJjQIeIbddMoyoYABKamJqUru0lapVSFYtWIFv8/N6RiPoQRASJpVSp1RMriJmqGRE3bes9fv3V23fvH9Z9W2ppmj40rlaxCqrChN55CkSeVzdvT/tz4DgMp+l8OHw4GCoY/eIXf7LfPJ6e3x3ODwbI5BGx5kQI9Wzm28GJc9T1K6mSJOeKjfdTrdWqO0lEndPJNW1/e9/38f1v/pOOqau0ie0kxya2z/PkGmKPz7WuHakVIyOKgbgKtbF9+7p/nIb5cFYA1KqWqxgC9KGbU9WSH08lpxpbbPpmzMdSANnHbgWaT6eB2Xm3dPfqZrUepynXyftA3tWqm9V2zHkcT23XrFdrkdp2XS5VzUOWbnVjwrmMc94zSZ5d07TgoBgTmiFpSUjgHZtWBiyqFWS32pBDgMoEKWUkUrtUvDOA856boCJSSzUhxl1/g05KlnmcgqPW8U3bksPhMJRVWa+2ava83+esec4ixs7plKGUu+3N8XjWKp6dVak5g+o4j0yOPS6hjOfDoYnBE4fgtJYYm/E4cqCuawy0lpyrGLJvo/NXFIEvqhu1C2VCcNkNBkN7oWSuXoel1JjUBOBS2AREV7kPGOgFDF9poqsIQy/bm0tw9fL8q67nk7/j6v26gAZ8MZC9wJsL9kHA5Rg+aZHgusV/cRHhFRpdEJfZ5/hpkR695MtcfnpDXfZj7RommufpsH9+eHx6PBw+PO3HVMg5yxkMc8lN9If96f713d3bVx8fHk/7dBpOa5Io8POv7s9W9s8HMYuB+76ZTmMVXa/7N6+/vNmEn96/+zhNfcud3/3sy190XchT3j+8m8jYsRQzg24Td+v+PBzSaXpz87UPq0rmo6FgqtKSHZ6fIWvXBnDsG19rqeaCj4x8Kmd2YR6neRYFnHJxRCG42Da+MVWdxqmUMsyogQGppoK+cdERUGibw3FKQ7678d6M0KkAIh5Ph2XbHYBTRispMjpg7yjPU2zam1XTxn4YJ4aETNE3NefjMHvnItntZiWQiFEDKNspzWlMgRx7h47W6/XxOFZDNWNANd3d7drVOja+CZGY7WJbvDrCiD5JJy4DeNWyXYKEEQz1qqm46Iw+idE+Y4vsUkv+iTT9nAj4TClhZgpG+E+ESXYVy72g8Es2+wW5v6DpTzO8OHbgGghsnz2+gGxQlVLmAqm53R3fPf/l//Jnk81hs/3djx+ej0dDULOSxdCAwEwdsWMPqqVk5z2iOqK+iWWeTnMy16QKU5Euq1ZZtQEpurieDt/df/nllBM2bcYWuJlKxoiCWuaqaF13Vwo4TG0MVaTv2mJzSrq72ZZSpNrtNnrH06kCuNY7bGnISgq5JmXw6MEkBg8zeqSubXyIznloWkNSM3LBNwgcUMv65ibGUFImH0LbzePZecfqQuMkw5Tm2PTTdA7orVZRwCLsvIIjxWpJQNd9r0C5ioj50JOLBEqENze3MThVckSAVEr2zicdV7udYa2mDpDYLdaTqw5n0VS+sC94nbdP5OJl+Vl4EHyR9CzM9cILXRfQ6+f7B4N1eQpehmWRRep1EcPLQXyu4vzEQP0h1/NptF70SkuC+nUdBPysROA6cS/f//7j/uHD4+Pjw29/9+7p+WmcTynlXEuVUmqRkk0VCQlwOTq9Tve1Wf6anH11bH46nwzUlnRiQDMVu+YyLd1nIGKIUEGNyBEhcK3WBWyCu91udutV36/62HoiWnqrwKGiVBUSQxRRZN80rdUKVGlJP+4636190zrfIDlCB4YmWktVMUJGBjVShapWxTSJzpNOxzoPIACKl9wlFDOoNVfRkqVkG84lZatFTA1NSU0UKmBSregseCsEJhgURU0BVJBRDbJqLpZUS1EwZFRih84LoBMFASF0BIyXEjSQWpcmL1zMesbsxIiRjZwRGyyJOYJIagqiCmZLtj0qEtKiT1zoN7FFRiTVSoVctVQzEQVchDxVrVRLVUuFMWsVmKsV4KSGITpoG24QWQBV1QGZ1nw8q8IXf/SL/+b//N/++k/+9HQ6O23/LA9l2KOMRNQ0Tbfe+L5FNEfoCZxj55wP3oUAzplhqbWm2ZFDT+iYCIGdI+fcIj40JGRioGWYvREgIRISmBYFIjRg5wAEEVQEzKoqmAoJIi59eSIitS40JjMhoQI4F9gx2Kfz4EIVtV0jalWEkJbTiRBRFZbUCgRbroeAjhEQVdlMEczIoZljQwAxAQPVJfPMHDIYqUHb9I0HwVlAkEhqBaresRZi5OBdKZk9eib0LQVnaBS73/zmbz8+PH31+j62EVwI0bOjmo2QAbhIRgJysdaMqmOaGGi12zn2RlRrUQOndPjwkDKs16/Ycy1FjcZUSpH17j5GGsYhOHezXQMZskPXEzcEVNnffPllOg2n03FMs1YhREVk9nkaQtOxp5SOVbIUAW5NrW27w9OD78wV0Tlvdv3/9rd/PWn++S//5HAaTj/+ziEL6jBP3kXNNfiw6rbfv39HIVSoDqWiYdQQWyv0/PDuH+kUwG223K9ZBLxz59O5s855qgDrVds2zXd//10I8HQY73/+djocD787xEC9s5///E3o+Le/e6xztlJQy23futW6rOFx/3Tcn05yavu4WbfRd1lGNd1s25RKVcoZhtN0/+qVWDKDwzCsXdOsu9PhrKWmeY5tnE6npu3meQpxPafy5lW7P51X6yafwYrdbLs8TGOaNze7Kc05qdXn292ujN3hcPqbv//xqy9363VsUswprxvfODoPuRTOpoiGwGNOoFBK9c6pCioQATm3LKAXCplQREEFiZFQVQzR0aW2saosy4GCERBeNoEVF2caIoEtQUeLHGE5C1SXBklCJY4ewVaxXeFqyKfifFWckyqkSHbTBjWJsbWUxzwbc05FUYpNTbOtIpZGdgGslHz2zSsmglo4OnSoQORbMTSVEOOUy3g+hNi1/RrrEZlrSVYrWWD0zpFqDT6imtVjGR/IZu99SlPoAhG4wCAT4FjnMYQbXt8WUVdnm/bp9NTsvoT2hgBUMntg50QCOedhkPP+8fe/y8dT2/ab11/Wqml4noezDaltV9U3c8rFrZuu6UNIUsKsogDnrNx7YKfSbbvVyjvId1+8HlI9v2mb26/B4eu3vvV4OjyWcWysNJbzacb1boaMLI6beZi9mwGdjzeCcDy+N6oU3HQ6PTyNqM1265F9BXTrZnP7+sOP3wFR022U+pqyIq66po5DylPc7HxoW0tAMqepzFQ0EIX21Zd8d78/PsbQlfx4ztP25o6QzvO5mlQjF2MBacK9jEedAbNfrVpDTHooSZAq6FGy+W7LPYXUT6c0z+obNx5HDn51e2vHUtiv77tTOvzFb/8aHehsPjjPbrPZrILDAZ9/eKBWVhTub78AwufhfH6Y3rSrbRuPUwlx9arzFeJxqNPTvL5dt6GZh1kOqX97KxsaDtPNr76xBL/5zY+7m800K2p5GI7ebV6//qJfvwouWql93+V8Oj693zY3x6fxV3/8Lw55PD48R8avXr9+evjAbfPmm3uo7stvfp7n8ru/2d+tvhZXfvxu/8PDX3Yh/PTd908/+ev9lLJzzrlJsgnUWhNNCKqqji9nj4owEiGlPIExETESGUjOWsWHMOZz45vGNVKLaDapqrWKKSoghhimKWE1UwvBi9VVszkPxzLNaNaEUMXSPLexaWIvNaMtaXy0XndTKZ5brSJVmJ1j8sy11lpTDJGYSkre+SK15Np1TVx3Tx+nX/3RH314//CLr7/omxBXLSpZFecYmHLOTKSoUoXQ+lWz2XSnaXM6bVe3xzKNxyEJefbw9uev/3//3//nVOaY3HbbnI/nJuBUZgI/Z0W0ogZgXeP6xq1iczrvVY3KKA7WN+3uy+3jw9OHp8cyHiKwAyzzvO68Se2Jkgdu3JSNmSNYH31BVfIhth9PRzVDprYNClYKsGt9CFKy905F5iJFQdhBlo8PH60IUkCR4fAcmtA2nQDebHZPTw/R8TSfuzYq4jglQDJ2s2TvcRxNpKgKuNg0Td5/rBUIl882NB3XMquWNB+U+yZuoeLpfGibMElWU4CqIuw8EQNSBWQTEBGFru9yWZTQknKSKp4ZgCNzMa5VhrmoHd6+vTEyqCAmpRbyFii66MlsGs7r283rtzePj2cxqaU2fdus2mkeY4hMU5kGVA3tapwfYvClFB8aE0k5u+BTSeyo6TpR1Vwcu7bvq1QCBUStkKaRyAFALdfqqCUiY2mAvZjDLnc3CEjkXnrAlx1uVSVeQosILw6Nl8gMXWSK1zBruML2iwthueVFoEtCywKJl5d+2TO1T8zQJ8vbJ1/DCy/wSUkCV4JJVZc7Zrxuc19jiq5aqIX7uhqxDfRzoA+faKOlUwJVJac8DMPpeHx6enr4+DCcT1VqScLOheBLyYb05TdfT2p/8+6jpSxFCepuFzXLdz+9AxUmItNcVKB8cd93fXz7+s3Dhw/vzsPtpu92rza3t6fh9P6nH+Y09RyagG3vb3ernHUuif36+fnRNL26uZcyl5rWd1+gX0+HA2M1yV/sdiKWy0yOYuNyxZRNZK6MgQGs3N6ErWEVGEYFBUERIAUAVGRrQlg7/zxnJhonm/M5OBJUMSVm51zOuQkx1zoXZWYtmRAdO89uzmKASW3VeQCIbet8+zylH/ePgQhViCDlWatGcqL5rGJJ2cGuXXVtp6Ai02YV5rmSd+BcFbl7s1Ww83EKXbPermPj2XOMEYFUqzfPxFoLO39Bw2BL4cxV9kPLgLzoiOwCnC9ijk/T/BnENlVA/kP8jqbX0wOv4391BS0mmiUKYAHuTKj6h695pR6vkP8PMmyuT70mHi0WkcXvCMu+t6ma1Fqm6bu/+4f/5a//QzYf160lXcf404efBHTJFlZYoCAAArLz3lcpsWtKUgQg5lyLKubZftyfVz6+f9hv+zcMrhZh75Caea5qEBocSubwNXJLKmzcr/x0Gk3K6XxCt4pt0ymehqwmt7vN8P7hrguH4ySGihGRm6arkLGe2xhnycg++r7UOYK0q8ZpoyJt28YmEjsFIse+CToWJo6Oganxa5EQg4thVYHZ+dWqT9PoHHt2GaWa3uzunx++b1wbgjMMhtp0QR2lw1HK2Pf3fdenCvN0aH3Ybe7mUlUSk93e7sbhHEOzWsdhOjXYheBKnpCBkckYzEwKsANTNLt0DVtdGBpEtgtbpFc542dEC35O1FzdYS+qn+v/L+ulXRVDnxFHL2lEn02R2acXoBf95WXBxE9M1aehu8jl9EIhXQRIS5DNlcX8Q+sZvjBMf/XXf/Xx3dN5PE8pVS0+hHmeDKSWpJdOJ7Alb91A9IUqspdXWpLnVAHNgAg+EVaKRoCyMKOi+hnFhQCmtVQARSTva9VkRZjHRAj5w/GMaM6xGUW2NgTPwm6o3ndddKEh5xZbhVgl0goFcEnCajg0TI6kEExsHoBVlS/XA4fMUDLUoklKmup01vmMNWMVBAIjAxPJAEmBpIpUnOc8zVYERcETB4eN46I2FJkMSzExUmUjJGBD8GwA6MxQTE1BQLRWkbMVnCozsS/kZwNofAQRRWaUwMpQyRRUFksqEgIQGSljAFJAARI1WvLiRYHQlkK35SKtskTXmiiwUzMttZZSlhRopWIuC1dlqKhiYqGIiroqIsJzqbNgESsAFRGJEByTi74B4LxY1YpKLQzgWIPDJjiqcxB58+qm7/t1H8bjSIguBgwBwtIDI+yI0AjF+QaZOEYgV3FJElcgMwLvAiI757z3YIKEZigiCMAXP52ZmamhIV5uZqiKEGNooplKLaKqYoCgprVKVRlTOg+TlBJjJHYG1977C6VzOQuuVFHbyrKxBQqAUuWzc2VpqyRCVjVCWgg7WBzrUhFBTcGUwBGiqqmimTIiGooqAxIaAkXfEbNqRVjOi4oKqoWJGDk4VzQDGXF4ePfx+9///mazvXt17wIDAAHNc661mmEt1TG2TTuXCmoGGGPjfYyxuZSxIK3XXQjh6fG8Xt04r6fnfb9aixTXtN122692Tx9+yHN69eVrDOBjG8ghRY6NR1ZDqOW0P642K2Rc/Hh+FaTmknNoWrNaS9GaEfl0ONUy63T+8dt/cL1LJc2nqelv/+SP/8v37z7+/T/83d3tvQeWVLzDJjbMvqTSb2++/eEnKNq0Np5Onv//XP1Jk5xZliWI3eEN36CDDYDB4e7h4ZFREZk1dVWRrGoKlxRpilCEC+4o5J/ihisuuOaG3FOEwgWlqoXdzS52ZWZlZuTgEeHucAdgk07f8IZ7LxefqgHZgLvAYGqm+qnhvve9c+455wpiiCGc0n3jmjzg9ByhcSc8Nc1GkVzL6MBA2i4+Px0eHw6+Wb/59ps8TMM4XL1ar29amQTr6Xh/HzGkMX/11SuoYAWczN//8Q/qPCI7wrevX41TKkWOx5mgCk/IGjk0TSvo1LDUvN/vu9iIgWpTZwRvjnwtZRzm29tbKzrNCSgMo+52ira/3gSb5+h8lhIjhYF1quP+1LVtzalkTYexX0Wwfnec3r177lrfrRpEryKGBEyOEMDnUyGA6H2ulQAXH+NS2ia6JOpdxKzAS6Qogpnxsl+LCRog0dK5QF7UB6qCYEhkqmKVmRFMVZn5ku8AACBKT89jE7gInKptxG278HS433arh8cn7zeNio4DxkLcIxR1lObcWtBi7ep2mLOSI9cDr4A78J1KLimXcggtA7kQ22IzMIgJMmIlI8jplObabG4ARXJyjlMayXvnW8Bzm847xJpUTgbiQmtInglEoUxEnUoyLYwMwFKzh4TzQ50OvtmE199WI6iz1VkhgA8CTAqQT+P9u9PDB3Akczn++Ac28I1b3910d69M3azRgm02raX08+9+ztPcdisDBUeBvKXabbXfdsxcirR9k9L45vbu9vV299OPq8YR2DwMhul6u17Ftl29Gh6G7U2PdU77w6pfe8+GlMukOkNJUhMQoHMYGtPm/buPTd921z0VutluHn4S00nFs1sHtWmeLEQTATGstTGUaZ9Oz+Qw5eJD1622Q2hn9AXKdPj4dPzhavOrYT9ZTV/+yZun6enw8JFcPxPOQ7rtWxYMYZ2HxJsIQfbTvFo3DpXBz2ncNG9++8/+Z3/5n/8DxwatoklQt8EtxgqKPGd2DV/fvPvxnfPrTc+taTykrmcpnlzrXLgKm6fhdJJpPuUyw8P0lKY5MI859ZtuGOoq+Ju3X8xSPHn03N12dZj6tr++uUv3Gptwc3unea6HU9O9uQ5XX/zyn3716z+bhkLMkTCPBa9ev7n9OtcU3IAmcMgrpHk3FBIy0lGb9opd+/Pvh+f748cHPrwN//A3f+2suIBV83q7nctpWQXMbk5FdTlhm3euijCD97GUmmpl70QEVL1jx47J51KZWEGqiqGpUgytIzdMR0JGpClVZiaSXLKpdQVfgQABAABJREFUiSqzY8LFkG8KQx6WM0PNVqUiYXABkUWKqhmj97FOEymxAVi1WtquL1UAMZVcVZlIzTx6ddU7XwdzkRTRRLtVC727+mLT9G3T9KZoYlWEnPPedd6BnbsbkgsyKZBn37dd27rDI6QpbwMXLfsPH16vNjnEGINq7VdtrbnkFJv45vr1PI3KViTBPLFYHQ5R1Yfgm/ZUUphL+v7nNtd8GGIF55Gq1pKl84TYxNh0bqwIBAIIxC4EERlTOZXqGHMpTWgi8ZyyR2JET0zOpzwxc4uUVUeRVDMZIEGMRIguOHZuHpMZ7MvcxNiFEL0rmkzNE5CAiz4dj967pok+BNc0+8MxVOhig+iR/DLlqvE+Nu04FKs114E8E3ckCkrLgHZipsA1i6kS2zgdmuhjaKqoo2aYnkUKe+6aftbZVIm461qpZbVu97sJwcbTGGJ0DoNz3ao97g9atWvbWuWbt2/fP3348uvX41T3hxMihsA1FRPLRYygaCk5G4L3HsAInSl45lOdWued8875MY3bVV+FEEFE+65/fHpomgDOoSkgNU1Hl5750iBfYAAhGixTXS+pdxdN0DLDHuzs7jHQ5WvAzpOgXrrIn4benO9NaBcGasE453lnL9D58uALjrHPrgzgZXrZAtSXR/CTQwhQF0y/IKIXggpxybI5Q6gLbn8hl8600TnG4yxNOqN4RAMTUamSat2fho+Pj4/Pz6kkci56CH4Z6VYV+e7q9v3joyO/DHEfSjocpwAarbakkeOq85oO26v47S9eMwoWGZ+ftk0k39xur46nw49/+N4DodY+xlXbetZSy/3Dgwu+7zfr/mYkVc2xacqYqtQyJ6u56brgIA/HcXdKZfbBiUoqoFVjCIROwYoWU1gGWpOVPnrHPOVaqk255JxLrmbOMTWe+yZOTg9zPU0VENXUewYtJeFOSsklG3lvaKq1huBr1ZSr1cKOXJJimuckOAQXukieWLM1rV934XA4Rg7g3JiyGZ2mOs2DwZGtNoH7vmV0Vq3pV56cTLVdNbgCbuPVzTaElrghigiqJmpiCsHHRZ3wmVkQLzIPvNQswIWDQbyMpDnDabiQlZ+B7LNv6AXdngvYXgRGyxizT/Vpy1N95mTDC/uJZ9vmgqXxBZ8vrqRLrAxe9BZ2ERshLFkpgCAiJYtKrVpd0Hk4PB6HcTg5T3/7h+9OKVWpKecqVVXPRBVS9KEsMxaMA1OR4h1r1SyViH68//C67w+zP82T26znufS+MeKu2UxjWjnuIRx3z+vNbT59IPKpVB+cc1GKPTw/dc0qtq2Re96dRMqr6+1+2PWrflbcK1w7Zwohdrkk8n7Vc60yziWGLtBEIKvepVzBoSIuIXRisOo3poJgBLVbdaIVk4SmMfAmhoRN7NUUi6sivEQtEMW2B0GmwCGSp3nct12LMoQYgDoAspq85831Va0ZRY/HuW87AYldC8IVgF2UOpu27DhPO6QN6eD8nWplElAAAtF6ZsFNERnMluAkOM9JhAt5cyF9PtttXhQ1/4g+xM8qDS51e36uc7TE8t+njY5enueTLumiV3sRwOHlk5eHL4K5pcpe5GqffTsQwD9eAXD/9HA4HqtpbFiFppRrSfM0qQgudKTqYnBYsizOW7raC0kEcL5eQFS1i9Rq+YRpFUAkhEUUsvyUFlh9nteMqCpiOiuyaq5yGODj7vj3ke4+PL6+Wd+smttV1wfoA277ZltX2Z1CIM/BMYOIAsGSvAdIiAyCKqhkmkUE0JkRkQNklawiZUplOpVpKvOgZbYyMbpl+KABqYqpqCZRqkVqRilWC+QqgBgcOXZCxrXOIGWSueokkKQIIiMqnAN/0Tn2ULSioFUGEhEYq+yGzAiejMEgqhAoQiADErLKCKCy5CThwmAQokIVJQBdMklMsQoggJGKADo1NammVc0QkQWRnQHWVEpKeU6l1GqQhSuEasu0CixVRbFWKxWLgBqKmZhlhWqI4IKLGvoCDMaqWItoKSgFpHSrWJ8//qf/x//t6Te/hWN9un/0D+97sLJMKFMFsyqKoi2TVV08WEiO2DMHjg2CGXshJMdtiI6cgjGBgTjPeuY6QW0ZOGaMLCKmKlVqybUqAJ7TqdkTMRJizaVqSVpqmXOd5nnOWbS64NgxMwMwEisoABGC8xfn2fJHv+pFDcxUqqqpN3ux+ZiZCp8jtZCRVAUMAElNCT0YiKrhmVY1MUAyFQAFAFYiJAYlJGRvVkL0JgpazRDAl2p925liFakyNmFd0/zjD3+IyF//4iuBUlXa0NWqJRWptW0akUl1daopdi2F6Ml7cjGQDwaqIQZGLDkfh6fQ+NN4Ctnf3L1WM9fG1Xpdkv744+89cL9ap5zZfFytiUmNgL0HKtPIDOzgdNqZ1vkwSpW1XU9TTmOSmkODw/EUfAhdyHV0DFPdrW7Xj0/Hknkc4PnwU+hbH2m7alKautVqmMYqamIpJ08BKucJurBlENFUFWsyslznA6yvbrpfRm7a0DYhzIfpVA/p1HAMV6/bcTesr19L2qec+qsop+SgyJSHw4RN7133Z//q+u/+4q8Pj6dZ96++uHs4PF01/qtf/qKqHYZxPM21zm3bXG3iPE4IFNvmlE6omPIE6PqG00ylquiMzF3XSNbjPK63fZ7c4TQMw2mz3oz1/XzMZrje+N1pF9rV7VU7JZvnxGnuIk+pRiI1dY3z2Pz804emb5Fsc9Ued8PTU1LwxNK1UXINPoxzEoR+s5pOk9bqmJaD6cWiggBcanVMArLEri3cJyB655admphVdRlwrGpmBRCIX0YOIwCrARkue7qoEhi5s56iifH4eHBthyqp6ng6rrcN6YTVB8BxPMXtWsGoo92wy5o31M6let80/dVpPq6u13VQs4jgHRMgiAowKho4AtcohJon9ivzKyYnWZBBSgU1F4OZMjEaaMo+9Nw2UgtxAEk1HQMVq8UHr5DzvHPudRHP/bUDb/khnXbO966PyID7J9l9tJLp6gvobkCAZNKagRy4YEoEReqQTs9m6szX09hu1uvXt3R17W6/FPRlSmKwdjbu3h/f/+ChvH59o4r7/XOkGKMjB0rQrz1yrODj+rpbrw73H47v/iECl7nG4ETL5mblJOJU+jXcP93Hb16LgwBsoNyscjECbRoXyT3d3x/302FMmSFGavpgkkTjeJQ01girMgwc2eoHLNopl+Op1JGZPRYnxzQcSI25qbWSj5k0Sx2e7m0+Nk1o3AZK/mpzHWjz84en7XbjMAyDISogtG+uxcHd5jfb1v/Hf/hP3dVVLSMzTtMhOpZsOk8/Pb/jvhOYPt5/6H2wLPm4610buP/my29/uN8J8ubmq5wtRJf2x8Y1DlaOaDyVb77+03w66ihtfz23RTPu98V57l69Kj/ez4/5ZrOiWsb91G3XnMnm7Fz44u23N6+/PX6cX9++Cd3qGzQtp/1h+vabr+Zx13SvbDYUqykH52AEaanp16t4Te4wTsPVNZ2eH5SNzb74+tXzUz7uT3/53/8NaPz6V2++f/fDX/79X3759joP6sQOx9HHN/thejkbee+dIywKBE3Tz/NIZrVWRmpcQHJJimgBRUMrWqpIFWCyuCQaAGipMxRCNjXnnaix833oxmkoRdKUmpa890gIiMG1qRybJqjqNM2t6xHRsKoKMRBQKdUAmhjRlIBEKxLlUoEwldx4z2RgVmsBRFOzKo0PonU6DdT1pO75dPr2T/7s9uY2jUNs2MdQl/sSwAJdGMhAfRtzkfk0iKqUgigo3HddWEWV8XR87NuGVNb9+ng8CkHTBeUwT4dyql9e3T7uD1sGLGKFGYAdaRnK6fm6X2suajofZ1ZgDr1vUzlFHzm4VOahTo3eis6eGdDNAJgLMxpJJswKs4rWElxALaUWQyKnBBiZO++YLBc9Pg0G4LvYBpfmIiKgnOYZTUutsW0FVUHuXt887Xcmekh5HhOHSOi0KEA1cM6turViLT40YFgkE7i235oM7BuxoyM0qdPwHGOJoQUDqZUIECIBFxh9CEiUqxi6YZwJ0eQEKERgZrmkEN2UypSKijjCV7dbvObjcTjsxvWGiPGYDm17e/36NosEH+bp+LB7fvvFm2meXt1clWR1rpoUQVU0TXm16na7fUU97A/Re278PE/srG271rZSjcycC1yg694cDjsFLVqmKrFdOY+i1YXIwdU55XyeA4h4dliY6gVpL6W9NIjPA4AQCfElwwgvYOXMxdh5osJLUvUZKl9s/ucevJ0hOJ6PfgafMqUXKGP4eeDLC+/z2ScQwFRf0L7a5ckXq8eCq2w5Q3+CLede/Odt+Atr8PJe7Bzvt8hUzMxMTKdxen56fnh6PpxOc8o5Fe9d1/fDMGJov3n7p8/3T9e3199+89Xf/ue/OhzTfphcE8SQmV/dba/6q9P9+2/f3sQ2pppVNIhQLtREVPf48DBOowEYoVFlctOsmbSJse865rBZd3n+UKqAC1VRQZENYdaSxtM8zNXAWteuIsfgpmmqgE2Iy6EhggNLRc2TL1VAZDEBMpCgoWFoerF5GvKpTqOgaGbvZ1FR8IGA3DCn1hM5Lwa5ADCLABuYoS7HaBGpAoCzVae66nsg3a5XN13z/sNHWrcq8v5xH2LIxWRODtHAtm1LyOOcwAiYd6fJIWWpfRHHbrvp57mgd12/8rEPTUtNQ8yEwS8NYABRu2gs7Dwi/lwn5wIgZLhwPi//2mf65uyW+ceKDMSlF3epyE9moaUs9UJ0XjRteFE0LevkgtMvPiOFixnnPL+KPtFFZ8EcnINF4DxbcFlv5/oGNZFletd+f7p/2t8/3Qv60IaffvqpQAVCWMYVLTBeVcRC07BztWZybtEnqVrgAFa1FFAZT8P3736623x9f9i5xkXCzq0NWhf7uRCeZLXt9vfHbn2zf/quW1+LZDPx3LLidr3e7fdtt45XNyGDZEGom3WXkgL6Uy23zbbp8jQfnGPF6hw13pFCLoquFy2bpm16Cz7WgoExBA+xZSLvHQJoFeeZKYKL5FiBYmTt1wEg54mQRAQdm1R2db3qx93Jed823STz1jEoKPir7eshJ/bepil2bdj0VDxWI1Y2JqIQHOhCVVBwAKoUnFpxPkgerZ6MOylMFEFelPlsRmcqzxSQX2KIPidfPqsoPN9rX4SRnz/4shv9Y00QXiy3nzasFznbRTJ3mQ7wknF9kdGd9XNwUWEuOyGC2cUIhnbe2uHT/MiX4r68jSyyuloxuZSn0/H4/Pw4pklNwKRKXZj55XvOtKbaeYs30POaufxUzgzZi20ZzIxe1o7p4iw+L0uiZco7ES1LTKSqYjKsRVQrM37YjfSH+z7gbR+vWr5ZxbtNd73d3Kz71vs+xKaJiMSeYhNiUO8VpUBBPMfNE7KpFkNToipWapnHWcdJpoPkZDURVER3DoNaJH2qooJKICxZpZJWk1odKDBHz8w4qaWsu4Tv9mksUk3FAMyYnSFFH2LsCQP7gMwesZZZ0lizlVSnKvtJGsoBGUqNjsCzoAiqA/HEuORsI5AZEZM5MSVjAVcNGRDVUGQJ3FFVW8pIxURqzUjEBOS8ikkppeRSSlWtqspOOSg6W7RLSlVRDHSZvWYV0MS0isuGYgzoDaB1bp6K1QJQQQpadQ40l/x8/PnP/3r/h5+4Qi4z5pFwUTJYyZLGqW/YQA0cMAIgu4jkOAQKXlljaIQ8kPfeo0dPaIBV7RLvhYSMqGRABAtdqVZrBRGTagAotZSaBcnEIQKg5ippKmmu0zjPNdeazTDG4J0PwSGRgDlHjsi7IKp0icq6UEVdV9VMdVH7LPcSPW+rn9R0uDhU1ZbdXWXR8ICeZwHqRSK6cKkCBiJqAI6MDRG05hOhqpqA1JrYe3IEhOy45LRaXdWSjrtnz/KLX3zFzplY8B2yAy3EzK7xMVCSNqyfh4frfsVIVs0RSRbn1AWOXTztjqfdrlm3wD5E3lzdLHqZrtucxvH0sE+zbL+469cBgeLq2sCLADFrhZoTGjjvD/tHrRz7MEuRWuE4DFOWMQ/HcfMq1JrLfHp6LmqUpnGeT+xZQBTZNVRGt7vfhy5UteM011m6vk0lLdGfzvHHh/djrc756XmOHtabDqwGDtBA17THwz6nSSCBA4Y5Ng0yq9Vh5O3VqyoGKjnn/eMekdqrRjXdfnl12O13Hx5bT2//5BftKpz2I4bVP/kX3+5//mEYxte317453X1FZvD994/TOELJFJvWNWvEWgSVFKSqucAuBpByGmY0XPqmaZo8x816/fTwWObxetsPSg/P4+b1+ljo43EwhK5p130nKa365jA+K7jYxcf9Q9utrm9uStXn551xcU2oJk+7IxKehkqOQ/QhggKUIsDOeZ1TYgZVY2Y1WQK3/KIqYmcmqsBMy6EilQpgiExkgFBFmfGcwWBGfPHkmy5zBREIzJiWFuinVttwGg1tdzxWb77pHobT11+sv7j7YndMKOiIxzJ3sR/mHNuo0jB1FP1sYNhw7/fHct3fprl6Z2ZacmZkQ+TYKHty0YgdegDSlA1HsinPIzt0ITKRlqq1IojvVsQBDAidiaEYOkbnrI5IFdVcE9FfM0QAAUhgJ6RKzRrZy/GJ8kFkov7KXb2tddaUVLL3HsghseViMkCZ2en6tiXzsGr625vm9rbEq6mSFOnawJrr8UPZfZBx8K71LuZcX79+A8jEDlA4+sAujVoxcNNJlqf7d5s3fbtqj4dxGPdo2jXN/FxOz+M33zSOddx/EJjbpqtSOSCg05KF2pRyrbMnV6rt59zP5itdXW3Mu5TIVd/2ryelnLKrO1SDglqcCxSZ67BL4mstQL5M5puV76+Tp7GemlWTkQ+nwa1et/2mCVeaxrvuLpnz2+316/Dd+/erDu/3O0Q3P//lVequvnq9G56LQp7nw/7D7ea1N396+rj9KkSHz4fDF1+9HU6paSyVNEtu+22WoWsoVbje9u/vn56G8YtXr/tmw40xsMebIydrkfvwNI8hdKpYms327S+GY+7aVxuvbcOg4e3d9vruzarfSvGrZkPO2EW5KdurVZ7yj99//PLLL3uX3r8/vrm7Svu5DLabdl/98sumgaZzylaK1qq14P4peefa9g1AedgP//V//PB33323XW+wWQdu//1//Ivvfv9hvW3neRqfD4Kw3V5//9/+5/H0BOfNHIj4fPAxnNJQa/HOGVJwnokFsCH2/cqkVMsxdlW0lFLTBKi5luBikVpr9aEh54i8Qq1FRgN2TYwoakyUSwEB5zDJBMiGxMF1zLUqGpY8OtdE59k7gWIIBnQaB9+ENnTTMItK8M6TJ2CrdRm1ZiA+tCnn5QQW2qaWLEU0g/ch1+pDY4BKCEwKBkglZSZKs6hZaFlFXQxWS5k0l/q0nx7vH4d0fLq/P+0OsQldGw7PA1SToshOquVZ9mXo1fOQTMrV1TaZhOCQqQoH7bi0cx1KLpGw8U5FLc+esUgd87InRUftBDMRTNPcOkYfFDR66rrmOOZ17EvVXCo5F72zUhelZNM2oQ2H50Pr4qu2HUVPUgp5dVYESLSURACEXKtVUTW7M7hq+zQNvG0GrwUEGBrfDNNcjrlbh66/lmGX54zspvkQm2uP/jQXIFyvtyUPhJimcRr3hafYdt57IpxT9j548q0PRTKpzPMUyEUfcp1rLY7ZVESglsLMTGSI4PjhYR+b9vp6PRwHgdJ0q5zgNE7bLTNR33fiwQCnKRF7UmqbmCTNObZ9c5oyIYIAI0ot7ChrRWHyXgEZfUNNgTmGAKDB03F4jG1TqgDVOSdSrYoE3rSAEpFHqmcsfDn705lPNIDzfQSJAEwB6BzqixfDwVlIQQsfdHGhfdJbfGp006W5fbFS4HlYMiItB6dLSMfnzfZzpixeUMkl9WJBNvg5jn+RZyxZMXZWaLxc2Ms7tEWHAsut8CwhQdPFZXTBNgvyV1tSzEueD/vd8353OAxFIQuSi0i4RAfPY/nd3/w9qm3W8c//4u/qkPM8m9W+6Tbd6qqlKuXDw8M2BER6fNz1m66L8aqPJe+r1GEc85TZub5voc5A1K/6436UojH6dnVNSO/f38fIsd0Ct00A15NCSUM6Hed5Hh6fBmVX5lxK6aPvmtitWnSUS606R2JQIXMI3Pgwm9WSa4FSRRSQQ1XzPrhQG+SiNs0iguv1OqUy5wSigETe1SJMDplU61zEUFdNQEStpY+hEhph9AGkEGIXfWB82O184+ckw2kM0TXeccMq1fuYxOa5zPOMas4ZGjTet4Hbdvs8pCHlUtOrt3fXt9fXb97EZu1845jQTDWxb+CCKi+UJC2Duuk8fIrOkrDLTDQ4a+UWC6R9Vjb/o49ehBef0UR6mU59hsJwUbrBS5INLQKK89nKLllIC0101sMtGGJxdJ6TBS5BL3oJMzJQtaXvp4BgWmutaZ6Ph927H378+f2Hr375i6fT9Ifvfl9BBKCqlSqLLQiZTJUdE1AqGZkcs5QcXBRtalXT2oYYgp+Pp/18nM0+PA1du9q04XiaQtc1m7t0Oj4dju22S5Cv+yvigGqOKYR2SsUZNk0sJQzDHDnGdp3KibF2bQs497z+i/c/vWm6ltlQ0UdDLSmxg3XbWNdMqVSjfSrNZpuzMLrYr7LU1jETxdghmmRl7wHQgIP3tYKLXmLwoDySVm2abi7ZEYsmjrHbgNaaSrJFvY5+tX2Vcl313XgYaoXV9S1wREOQaXt9c9yfVL0BE6OpNetWdc5F2+DZuZoLuVzzg48dcqNWTSphQ+wMyWixjix730Iu62d6InjZvs4sIF2+6sWnBgAvZOHyl4ss6VyU+tlXLrWKL2Il+Fy1ZC900Estfirmy453/n1RtJ0Z1XMRg1423PM1nN/C7e2t1nLYnz7cP77/cH86jaJVazGRM5d+7gEoE4jIOdcC4EXGp4vdc3ndM2cKn2/CCGhGCyKxM81khPgSKmxqWWVJzgakxeMmoqkUBBgmfD5lT9hF3oT91Wq3aeO6bTdt7Lq43azeXK9fbcAA2w7MxKRU1ZrVOccGBARkWjTNaS6apwzTZPNEIIyKl5w+IAARUASg5X5oZgikClIziBEokzeESXQ35Xe7/N1OdrmK1oDqCVlUJBcgLQXBRWpdt8GmQ1C/Ui7jeNwn8HkehjnvIXtCpx4DoZAjVbJAICCMAIjOOanGbEYV2SMQG6mhmlqti7/vXC4EYgaqVpKIABoRoCio1py11qrV0MRUCZVI0Smyai1aFaCoVgAhrAZJtRqpsUHI4E2JkBvADjBpZmfJKlv15IhdmguUKkkMrJpV1JRVFIhR1KTkmlIkVFPm6JiJGJARGdmRc84FIG/AjpyBOecUAAxEQc2qVYfnCCNTVVMwK6VkkVIqqpqo1FyrqFkuS968DVMZxpxmS9PIgWJw3oUYwxK1CEwMAIQhBgR03r2g4zNV1HSdGICqmcjitly6R0uT1vSyoM/RmAuHK8voKAPRM/EPYKoKRqqqWhHQAYhoIGQDU0GMJorop3Qg51SqAapZKQUAitRax+eHH/u2iZGLSONbBldLqSrALnqHDHdffHv//l0XoyNErORstd4AswtBpY7Hk2O6e/sLv+5DIOcbNdMiwPjx+x9NJbarb765O55ORanrrlyzrYrk0MegopITMSYtt1998fx+V0tltlUfD4dHYmckzDyP+zqlPKZcpQrsHk/zlNA3VXV3uO/alrCsVpjmKc+KFXIqTWznSWou5NGvXBp2IUR2WbxmxZSlIavz6BhLnuZp1PXVlE8AedtFCNFg5aEZnsrh4en27kuZjhwbIgCiD9//2LVoK7l685YwHD9+ANTQxlWZH58ffv7xp6/vbl1/9fzxHqMbJwVw/+Zf/Cvv7cP7P3x8Gg6HYdPGJriATCHMUkutJWXV2nedggqCI5ZayZL3TRv7Oc8+S1x1cByO+4MVnOb5MZtu6dXd6pSz1cqq4zA3m67vu1rKqo/HVCw0Ust4OpIL/boZj3l/mA20XzVLhXpHUgRdaIByzaIKS/PfYDGWqVVQIWJ2tJxx1ZYRZ0AAVc57MSEpgtk5uwsI6DLh5dzWuLRVDUzlnE9RiniiuQq5Jo+1qH9/SOsMGFd5v//Vr96M8/NVtzncj6e54LpJ3gcXTikVwzKVu23HsZmOKSI5DElBxawaAgJTMddQAziN4+zc7D3P5ZBFpeZ+vTZTzZkV1GAZUkixy+PAjM5HmYeUM0Ioqfh2y9hAXIlqnUdPAMYcN9S9MqlcDmV6AqvkG4zRpKJWNHShKQWlyJI0lodDTXOZpnbT9a/e8u1r7a4AfBTgUGn6OHz4MR8eQakNK9dvYxv6UgyNna8CtUyhbYC5aRsOK+c6lry+vYs9skxy2u12h+317TgOQMLRIWixncMr9ug8GoT5lJxXYlsC1YigX/frPEEyD6DliAqvum8f07ECgPMUmyqzIyxS5/HY9hvLoFJD147HodTEsRECCGxKY8nslOSEdXIe9qqgImmc7n+6vlt/2I9JqXt95V8BRdJ5rjoVxz/vfmxvvgrOzaeRguv8lqDt12s50K9e//O/ePhvGuox4F4tH46xa37xq29Pz/nx467ZrEqeWr/tyb++Xver7vh8EgnNKjRd9+HjwzzVyHB4/ul/+q/+58MjDge84Tdffu338MevfvurfvX6/Yf0zd0rNVFwnj3mOk+TMNcsBzq1Lf/yt1fzDCGunMLrt69+3H/A6v/sn/zJu4fHVbMWgxAQGZC5jyG1LXs6zPXH+/1f///+ardL4+S38Spr+U9/+f/tA3/5djPMwzTXilqy7B4fQT4llRBhyWXpJjO5Uot3TgFUagZCEO+dSsq68KEJDaTOLkTfBABm9EwECgQOwOZ5xKbXBXoWdcSOnKmwC2o1OOcdlZzQuVoqmzeDUnMMbWw3eUrqTGsGxK7ZDuMzEJW5MEAtmckLCSNXLaBacnEUPDMjEplIjU0jIiWXftWD9wbmI4OqVvXkhaVK9eTAiYnLUuc0lf04H8dSppTn0+64OxyGYXp+ejoe93NJAFCmHJlvrq8fd88BTFQQjRuXBH/cH+7WK6r4nCYwLKIISOxSVc1DTidSaKI7Ds+rZjUOlQkoIDKfVETt6bjrIrgq0UETwEjHNK3bTeR2IvGOHWHKqYKZD4A2TTkQpJzJ0fXtpqb05WY1jGk3G6JhDCnQlAsvhw9DJgQiEX0+nX719et5cg/PpyhQci1ZmbPzzgymcSA0z03RGkMb4hWYFDk574AIwaZpCDGazWCKnqd5cOwAnZkhkUqe5tkAiFyu2q+bEOJURmZ23udSqlQQIzBgV+aKym3bDMdDcJ0DE5H9MH715nWRlFN2zu8Pu69fX6lZaLQkA5DXb24fnk5qqEhZTFRkLgYw59mHiEaqSoQppyI1NO3xeGAnq+sgKee5tE0jZCYy5aFv2iK27jYlJ2TwnuESzEKEl9iWCzJZMlgvuAPsQg6Z0QLUl7BpokuLGvCT6+dMBF3gBy4h1i/+GjgH9gKCfiYfeiF84IKdP7uCS4jAS0D2GX68xCKdVUEXNH/WCZ2ZrZe8oxcktbBIy/fQIm+ysyvphe0S0ZLLOE3DcJrSmEp2oaEkYHnMaZjG4GMTg4uGwlBSTjN4LIUB5Gq1aZru4+P93c367evt2tvw/HiziUR0Oh6fH94FT2ru6vqLts0PTw/P7x9b7wPRcNpvun69aZur2zkXSMdNtyUHq75PplKLFhmGEyj217fUrr9co5Zaxuk0Dh/uH4+ztUle3axjCARKoF1oksBUKmhWK86xKSNwKrWWPItI1Vxr8D6oVHJjkiJD50NwCy6q82wExlgUTdRcCHMtc0WPFUGnUkDNkzOwEFzX+dZD0/j1xkUOT6cc+gBKr9braqlxvgluNwwHkrZpTseUSyojiGDwFIa8uV7FtWu6rhq66IkJ0dVszIqEzns1YXAX3Gz0UiP26Q9TAUJ4kRrBIq1YDvAIF2UcXCxfZ4C6QOeF2rEXScSlb/xpwJq9OIEIlkBIuwzDWgR3gMgGZ2vjRTeE5xekJScSPuFpALNlYonRWVmyCFtLlZLLuNs9jvMOWT5+uP/x/qGCGKpUm+ZUa5FayPGyZrx33vlUkg/eOW8iQIykzEGhLvEHBWyX0n/zu+//7W9/u9kN21V72B2vXQjNmqXkYedEQIZ53jVxM04zRA0NgXDOOUTX92Ecpjzs1tu3MmdB6Dftbtg3Tl712/1hFzcxNq2WGdmtujimh67tEdC7ZswZmLrGA1TnMEQGT2TFrPjoa8nRhyaEImiQQ4zsFB1hMEfON8HMLXM/TRHV2radFF2wmjOh1jl7Ftf20zCaAjERC6EiUQy9SWl8yIHRXGhayWOV5JsmzU4koRIR1pqa2JPNrHuwXrUlA8ACYOjcZaLXJRoaED7FVL3IhvCyw5x5aQA6a4Veos5ftrrLPvW5SGfhTs6FfCa74dOu9aJCsk/j+C7yoReq+x9tvpet1S675gsgOEuTPhHwAAAQ2+b5/ng47R6enofTJGoqqnrhqvCsmThTTHQx0X3OW8H5/ZwVQ3Ym9pfyX8KZz9PjDAGRiJAIP2s26NLnVjVbwuJkWW6LSMoMxIAMx4pKfDxKV2qX5luFO/Zb7533zBhbT86QkfCc3KMiohnJGZjUIjlrKZImrJmXVCEiYl4WrdSKJI4ZBAlZhFRqRZm1zLXkokAoVYrgoegPu/mHfXrMygwxhIjQMYZapdbZTKFIHQX69uauv3pFZKXOMO2QIkEUYy3HscyHqXgwKaiBvZPGsYIGRCFAdmpEvHROCA0FWMgJetFF/KQLI6MqgCJAIIK1GJioMCMamogW0SpaRUUMUNSAGH3EGg3FsArAQsEroFIQosyQBWflbM57121cRIPYyKS7eeSkrGxk1bTxzgBESjVTgimXiqDnQF0AU63ZPBMGJAA2IEJySB7QAbABIzITGRjosrKAkBQFgNiYzrMMzDuXi1ZRU5NcVQwVRGotWQFrVUGromnOh+Mw1xp82226ELx3jhiIUEyRmT0zMSA4x4wERPg/UhUFH+RsoTRRPY8PN1lG0SyrAAEBXoZZgorw5U4gdjlOmZlWMzQ1EV5KHJkWNxEqIjVVBUhjXKsgWCX2SlBzaWJI4zCf9g5r3y62Okb0VVTAmnYVnA/s0jyO43F11cauY8cO2QG54DnGZaBrcNGHzrmems5MUpJhv0ctcdP0q7ZbrQ1xGPer69XmeuvjajlZSU3lNJmp1VoNapqiJ0ad57mmcXeY53FoulZyOY7T+qbTUrSMq1W3e54Ite/W7z4enae79avnw+N0Gn3HTQyn06mJAagVq23Dk5GIpjFpLtE7oBJatmJmOBxn1wXjus+7vu9BUhN9HoaH41PTNpvyatVuXNiYCmHd3r1KJU/T2Pft17/86un9u3k4NuOm6YJ7czuPh76P9GqLPz6srpCxKIdfvv3nZTg8fPz48PH+bw7vTsP+7d23X7y6S30dn/dpmpwj9uzJiAGID6c82ASGTVw7s5ILeVDC1XajRzrsxi7G63W/251WnlZxNZzqONbDYexX65IKOp+EkEIIgS2XMm+vVu/vDzkVUBzHLAZMLng01XF/8DGQ98zL9HorqgaISGIGakS06OtNwUDNQLUSkogSISjiWVMECKjnqHvFl+aGoZoRng/RKpWZ1ayKGeilbwybrmfU15vm6eFgetpebRrm1WY1VPziN79+//ixa9paFZCpa2dClAq1eoOpJEt5VRpfRu4cBC6qfrUed3vfNkaChERQyknqJFat3NfnXRlnCivmGJp2SrmJnLMomG96Rq4AGAKxmWkxJLd2MUA6Ciq5Rs1QxTOhqFRljirFacrjc02p315bu1FRAAmRShJDADIwYecYDbk4D6vrm/buF7S6xtARgp2eKQ31+Gx1ZLMQtnG98U1TVc2qoUk1YvMEIXbgQkb2m2vkyAKH4Tn0bnu7efjuYdgf29C9en1z//4HBum7cL/7+c1XX5U6rVcbQuMQEYOBciSIXdi4SrZ/fE9QW8LoXMJuP0xP3/3OqsW+3VxdM9p82A/zqGogVsdJTbOOZZrUM3uyxcQnOjx+gDYI12mfRc2FdtW1U8pF6j/502//h7/9b9evvrQRhjJxF0pJbPl5/+H27otuEzEfnK1XvHbOJFaPwVzgnv7w81/d3F77gryllWtSbJUZR9zyavPFrf+i/0JfHX9++vafffvzzx/WRg1Fx65ZxZ9/+OOrblMCUoDf/Jt/Pd/PW7r7L/71f8G9pbR/+6e/SUotN19tI6fkGjenvN8d7u5eQSRhRwMa8ePT+Op2NTw9v396ev2L6+PTU7+KK7d2YHfb1f556LcrqaYZ0Kxh9+b1Omv9D//xr//z3//027dvEY//5b/7zX/4f/375zRedX1J8/750K9XFcHY+m2nqmkqbezOG/syWIG9Yx7Go4J5Qma37O+OCdFExEwBiDimKbNz01RUKEYOrjmOe2YyUEMXm76qOs/eORVTqSIafDRUR1RyLsVc8I4pFzMw75rGN+M8eu9DZDABJBM5Tc8lz2bom85IXPBoZGA5zwiMYEvSi6qmdNRLaB8AOO9cdBtqc5F5To5JROecAJCR8jwxMpL93T/819v1N413UOu834MTpCQ2xTWuIY6Ttt49PRxKNYiYKy7gqMxFSiHVOWvsmr3M85A2TXfVBkIQtaKlVquWoyNSrVK6rpVamSB4mrUOxQa1qnXduJUjApKiZmLgtv0KEIfxRGZWFIHevnm1O57uj4mZFaioVrOaCjq2qrFBbAIRitqccqrZk/ZNO6YC1aDWNvoicNgNx83qdtvPQy3TsW+CWUUzM2lDN04pD8d2c7deXc85aS1MknJi4xA3Brrq16Ky2V6Np5P3Mc3JahXvHJFJEcm15NA2RNCgA4VpOlmtgJjyHEM0VSDzznkfTKyWUhA2mzYGt+ma45imuQzH03bdpSKvNtthPp32p83VuuuaCVRF++Cb2E4pT0MuSbxzw3DyzjfNCsxC46ZpCk3jGAHEudi0XZEipSKid05EtQgDNuyXjI9asw+t1Nk7DsFdQPKirzFYQnXPHrSzBuesA1qO+ASXoKIz/7IAHvyMYblMnbKLwuKS/3JGxfqC6D8hFDNcPPyfwBTa5xTPuRmPF4i0mHngEo7xArZePD4ILy35C0m0PAECIKKq4uKmOzNZdD75Le1IVVVV0yJ1SvPD8/P79/f73T4pzvMcosMiN9utoR6HwRj+9Ff/8m/++j+xY1CsqRLjx4eHvXvetjGGxowPw+ntmze7xw/DkA0wUMvAN9v1w8P9fNrFGLro2yZgLU10tzfb2y9uqO3zNLp1b0TsYE7peDpRrp4RVFT1sDvmPJdqUoABnJOv3t6kIuOcDvtD9NxErGAWvJkxQNEMqIRMRIigSt6xsTvOYy1wTGXIIszOO3acq6ZUvXcxuFwFCdpVcxpmUzAxVgJU37IIAELjGquac7q+6l/frq2mWrUqoJVhfwJ0XRNO03DIc8ex5pxqDuzaLmz6OM3pNNvzbhSp1XzIlc2Cq4g4n6Z1W9g7JnLeSS1ojOREhV0AXGoAFQzMCC96MTDEi+DobMHRc5G+1O3LULxzJ/5ScZ/q8QW748Vodhar0QUHnx2LZ4R+iaRZXvrs73nxlNlFcIIXmuBMIOmSdWsG54tkXbrYoKKS6nQcn94//Pj+4af73cN3v/++W/cCMM2lLi3rZeKtgVYhpuBDFcEFiqnVKuxM5CIiUM2zAqCI3X98fre6v4s0nKboPRjnWtu+46m5PxzIN967aU5MLpf5cBpW6+thzkXMRWcT1KzzMDUh1prrXL9689XhNF1dNcN+3A9puw6GRWotYMRdAWqb4Ixc68Z5CmQYO/bOmBx5RgvBVbG276ZpMHKMWE3QMROiBx8DAnAITnRxhTrfhLA2hJyAUNhT1cpIWpKJdv1qTkPTtqoSuK1pbldbkaZUCbE3cNT0ppmt5lrYdWBuv79/Fd+CmlYFgJoOAJHj2qhdZgGQVEAzAnRn3eVZNoYLToSXcP9PosgX9uSTiuhCpVyYx08PXTKGPk0oO3Oc8JnE7ULGXMwBn5tpF3YdXzxxdv7Up9e+fPaFab+QmBdSCwAAfvjx54cPHx4fHne7fS1FVKVmpkuc9Vm8uSQKGV5K+5Me7yzWPF8YwaVxcL5eBAMiWvZbIjyP4SEEM1UgWCxqi1LponciAgBVQ7TzslMjAjUgtdZxG5u76/W3b7bf3G2+vN5cd37dc9MtAjUl8iagKma6cFRVqs6T1VzTiLWwGSMCCC3LnQgMGMOiT3QMJABZK4IyZuBZYTJg4FzscZo/zPLzKR+qhOgbhOi5ZWoQmuBqnmkas0HV2bTkWr66ul33/TjP87EDCypQVHQoueYxa4OFKpCwOcWggVEW8wiqmDAgI4BYQK/kFJ0iiqqKqAGKqoGqAkI1RBUSVRPVCoqgZqJWTaupoV5oa2U2H7UEcxWA1ZISKZoYqJEaVsAMMJtWKJ486Pj6yy9s4sefE/vWB8WqZlrOG6OCUhHVZVgbEgI4RNJiecaCjkJkdAhMwJ5D9IZUVVGXlCV2ISwtKBFVVCTHjlWMmNTUVLz3y+WXWqoaGKkKKIBgqSqitWqVOs8ll9lMVl1suy4ET0sVEhMRITCSc46Yl3sHEQGcyxJeqCIXPOmiLDVe5geA4eLzAdCX0RjLuI5lLbnFcG9m6i7rS1XR3KJJIhEzXexsbACiywWhJS1Ozhw0Bcdm6jybQS0l5dFRA6K1lOC76L2AaSls6hwKGTjXrVpGISYXo3Pe+UaRpFYz27666derKtH55nQ8TKdTPp2iD6vNZj8M5pskrlQzaWoOu4eROZOjAJZSavqVC2gl7+736NxptzscT45RplxKmY4JRec0p5y11JqLSXl63qtAyWBYAerxNEHBmrIjdzwmwVyypDrFth+nqVYqpaJzOQlhkGwHOXxx+0pByBAx5rF2K2zbaFKn095Ba7JwuohHLeV4c7e6fXN1PB2KAnJjxT3eH+7url99+fV3f/sPZf591/eu32Zo0yE5g6sv74rK4XG3exxAHOah7dyv/+lvCtA01nF3/Lvf/e71zfX6uovmpuMpj2NNuVutPdPrt9sxD49P91JHIGpDrACnYWoiBM95hjlPm9v1Bun5cbfuwzjCeEoxYAi+gjjmp+f9F99+2a27CWH3cbhpuq9f3zzQfnecyzirZYTskPqui2F9mMeSZu8ckwc4S9mXflI1QEPCcwUjuSqVmU3N8XJKQZG6fOAdA0BVxbPE2hCJiauB6kJYvrTFCIkWB8Lya92vaUjX3R2uyOR0c3trA278akinuVSJPrF7nveG0nTBMM5IJYsT64JdXV0dS2lAWceWh5wLFiALCBSaa8YA5rTktl8f75+KE51RT+n29jV7MUVWUU0qhZsG48pEQarzHkqWPDvfALfqGjazcpKSip2apkewNO2lDM6vA/F8eCST0HYQeo0rchHyyaqZglUkA0PDOtZ5oNiv3v4K4hVvr6wUnEY7fMTDEQAsFeQY1z1eN1U0awXXOYfYiEOP7LUKMxtGBMdmbjxYGnx5vun9x+/+9vT05Nlvrq5NpzIfr67e9Kvu4eHDN7/65cMP38skAqnZvAVeqaGLPGUTEXLd5vZOFJ6ed7vDQVXnWhsO5TQR2BxirYTUqBxVK6qgFRPIRQwQ0YlUj+Cjx+kUWBGCIoTmKgOFbuNvt1AmONrf/uG7pu2HZM/TqeMcoEnj0WG9Wq2yzF2/Ls/JhGL8Ovb9U/q7PE6u6dvtKrauDtUpddL1ABJcu92sV1d9u/7uuz/6Q4QZNvNVA/6bdkXSjjad7net+d/2v8xl/nAamu0v6vinrml++Se/gLLfP55Wq6vXb978+R8/rnSz//DherVyAZmgj9FBNYLGo0U+HBIghuB97H/769VpHu1Yrzfrv/zvfvfLP3tjgfpu+9//xd/+mz/7kzLJ+3f3X39zhWhB8bZt/8lXb/m0e/rjd98NZlPtVqvWw/cfdiE283E2ClKogjKzWS21nO8FTLmKai1nuKhqClXMNISAaOM81SKAIDk5T95517TecJhSNZWcXPCEhorsHQOrFGJSMEdcRHIpudbgmTnknL13tVYEx+SHefAtFjNmmOeZgJgQQH1gQAAfwAxRpIhD17RhmpNVpMCRmiKz91RLNQV0DrXmOSOhqSGgdwFFog9m5p0aYq01hgbYtFqV9Jtv/pULG9+waRX99vl5f//xw2ni/fPT8fFEirdv7lLSp8fjNNcqj6A1BI9gCpBzKZKGhH1sNIQTQCAX0KRmdmiBcqree81pHFMTGwJABzPYXmQnuSKtu9YTkpoLxMSbbn2cZ1VjT1VLCKFvQ9uGX3795m//IY1jUcL22h8Ox6dT6Z1eNau+WR/HqZjkZH3bKkEbLVdpmH1L05gRkZZWDvHT87HzfrNdpZSScS46zskMCTKBocmUj853SMIg0bs5j7lM0XGV2VELxMTY9j6VOYSQUyI1dOAD1QpEDIg5peCiSDEtzjsRuTRMEdRESp0rATtmUCMHpZauidt1DzamWpo2OMrz+PTV3WsR2O/mvtu0gffD4zjgqtsg6VS1SPWeS84huBCiIQIC+8zsONCcU7fufYw1p+E4hfWqkmKlru8MbD5NBgCiaT5Vyv1q5T2mOX2GNAzMkC7tBtWFRaGLisfO7NElOtrMTPGc3vsZT7Qg5mWG2aUrfn4Cu7SyX9CR6tmvYZdWPJxx1oIRzOxiX/iUKHRpjCMgLB6jc1//IqG96Jjos7SPFz3TRQWFeEYyy6Oq55cwhMVsoGYGOZfd8/4wzLv9yOzLeJzHE/E6tu2sVfJ89+Zu1fu/+t2fjzlLlXF3JNBxLBJl9eb6zddvaoVjtlb89+8e6rgnbU3lpMk5N+fZmet8P+YMrIBl0zdvfvUL3/YHpOHjfe8D1KRWas2dj9fdutqxpsFqUQBQauKqlr0pzXW2ktu2jZ6D7w6HQy0a/bqeE9Cx7VYhcCp5zllARKGIVoNaK5qJQKqQlSe1guKqRXKCy/EZBNCQpoJ9fzVNo4kCGChuQu/IHaadWYmer7ZrRi05B6QuRkIJwX+98mkqeZiF62+/+TJPFRSf9s9jlsfdtHiuPLu7q22SVBFTVpxr493Nm+t+vWUOTQyGuoz1UQNQYecQliDyJeP1AlbxxWB/GXL3Iqu+wM6XUr8wj5/Q9Ishh4gW0G6fSvksvFj4pjNJBGBwTtB9eV6AS2TR+X80ADvHvOulvQdoqLCEVy9Dc4CQVdV0ccyIok2n9OHjh8fde2EZZH73+GG1Wc9Tej4cShUDXDI0lsFPgEDOMXuFAoAAJKqLssNHr6iGGENDIs55ACtSnna7Q776w8eHt3e3+eHhV3/y7fj8odtsHt7/tF1fK7nt7eun9z8Fj3lM1mrbtARG6IjATI/H55vr22bdDOO8bTd3bzp7PlCNTmtVDL6TksicqCU0Md00DUtdNys1QCYXvSiCMPtowXtuLI3BBVBCdrFxhszOiZWuu5qGvXfRuAqI5wg+IBlz6PstWE7zIcQmTQMycCB0kU0cs/fETmu2EJqSZ0PwzgszBtf77XCoauo8IYdpP4CobzfVOFJHSJqfvF+ZbZFXamhWmAmhkOmSD3PmXs4exIvg7KJ+PG9Q51rAyy5kFwWcAXxK0jd7mWt/URBd6vOTwc0+PS/AJZNILw6YF5XSubbhH7NSsOhAAejCDdH5evBCv19ayD/99O64Px4OJxU1k8WFIKIL+MUXUHFZRxfy6kVbSkCGZ9Px2QGKly39Qv2f4TA7Z7g0ts/sLQCIiIqa2mI5fXktovOtxxGyY2bysbnqujdXm998+eqXb66/er25u4595C744AFJmBxzMDWtxbSS84CSa8mp1GHUaQTJCBUQdYkEomW8EAEQUUTwsMywYAXQmhWgVJGsOCtlw73oh6m8O9VUMVLTsosoLXPDtI6hJQvbVZHy8HzcnWYbnuvT7/MH3v76X7568/qp4UW9blqrTTqmbGmuEMAcGC60hSh7MDRENVLEeP6nR0L0dWGxFM7D2E1UQVUATZSWyQMqoqDn27aC1LIYvs1MRI3I2FvoqVYtgJocm5AiGnCjpGYiCmJE5InQALu2X62up+i3k50+PAM6REEVZjICtYWcJ9OqarUKEjtn7NjMoKprkEDJYQwRmM058N7YoQ9JzSMqoGMyM0RWA0/MgMZmSKJQTUupiljERNRUTKuVYlJrzSIianORacrznNvoV+tVbCI5ZkIRw7MYCAnRecfE55o1QCJCR/yPqaKFC2JmMzWCRZ0GCIwEJqBLNPt5zxfTcxPhPP7D7DIfUFBMlQxUlxjLBWnYkqNIKoBI7BwFkVjnHFwrAgDqPUmZ5vG5pOF6/SqlITZtLTWViVwbm1Vswmn/6ELsVh0gmIKq+j4CYqnViFdd60OT5jye7of9KfpArOvb61e3b4fnoWTsr29v/+TX01h87Im8qQGIGqLq8ec/HncHOswIoiY+NnMpEHxdJGI1q+TQWCmnUudutT4dpzpJCJhmSUnHKZ3mfS2kZqcCRSTGxmFREcc0zNl0Tnlu/fZpnK9eX6sikfMcUGEecp5PYoerphOpKRMlw5oA1KHaBKt13/Qes5CP++dT166NwmEYv/xySw1OMw/7o/Pu62+/GfYf2203AYar6369Pn18OA37PJ2219ebq838PBVz81ym6Vg9ZqWrL26vv3lT52Gejo8//bRpe9fybPz+/cOcs3kIntroCfE07drmRoG887kmjtxoM8zTx6fjtl/3fbM7zsaB0R2OI3ttmuijx6Ecn582V00IvN1uRayqGCE77PqQs6jUAvDwOHXruF6tj8dnrXIuXVUjUDUgxMtpnYmrFjBBNJMCiEyuSAVTxyyqSHQemwFMCIxctZperMiEIsrMS793MZ4RfTqo95t+s73eH2bVxjcrjl8en34iSUS2O+4Hy8HKdeAiddj9vO1uyHeHw9NX61fRh5IymFZ0u90juM36+rZdrdOwC94bkhRAAvZ+nsdhykeFLOGLbmtAtdYAiiilFIretb1SY3WSkrqurzULgO82zE2eR9EUY0xSGVtEy3kg3xBF5JCnR7PE3sB1FdvQrnNNDgU5xGYttaIDgwJoAg67O0AVMx2POB9wHCXNwYWSarNdu2ZTalEX0QMCQ9wUldD4c/JlOpkIQmWpXIfh4QMAMFmeJ51Obd+0m9frvj0+f7h79ard9Ai2WvdSs5bsnUcX0DCXyceOiRwjGeWKx2Muiv16g0rgMNbRM00OwPn9WJBCd/tqvJ/qeBQopjClyRSRyaaMKFqspAqWM6lET7EjSdz10zTJ4Le3nXkuT0+krTIqq1t5tZoxVZXQXTV3rw6PH29Wm02IwzQSSb9pyyHXXPubrg1dE+Dx/Wk1wTrcudXq+s2XZUpz4d98+8shUWz01Tdvxv20ubqLXVczyDjW4TgeHpN7/nd/+mcfn+fQ3Z48QOfLgFevtrF1H592nfetg9evt1Xp6lX74WFs191+nkuxEGjaT+vtJph8/3NCkfVV3xb78NO72Mbn4fQvb//08bR/f9j9u3/1m0BwMrt7e9Vv2lQmz+1/9V/9u//j/+n/junhf/2/+181bv3//vfz3338+Xic33zz5bQbUpl9aDNNNee5ppo1xPMqqFkWjlVMjNChJyRVrVWZqxlIzc57BAK12ERVLaUwU2wYACXX4IJ3nPLsAEvJyIiAkguiMqMpGaBzcRhH0dq6FshP40jsHDtRIaY+rABGZjbV4zCsfYuG0ceqNcRmHAYzmHMmdKtt/P/87v/5L778twxulmRmq36dlyMgCYB65wBoPM3VsF81c5prqr6NjesOp6foOLSBhd/84ouUqkhF8C6uY7dVx6/fvvr9737nYjW9+v2P98OcfONQjRkBiYiQsO9bU+2baGBzyQDkAk2gFcSkOqbDlMn7WhNqteizKYC2Lo5pmsEM0QE2CB27q9XqYdi3TVeRzcehJq61aRvN4qLvVs2cp3/5z79N/8P3H3ZDZQRq1NVRy0/73XXbbVbxOJWKZRzHXKoLqIalFOf4at0A0qrr7nfH0yiHIU+l3K63q74dHw7RuURkhqVkUEPHY5o7H2MbTdtpPhJC6FfzNAXvb1ZfvXv8XqiaQBObSjBPIwI1TXs8Dktkg5W5b1eKlMvsCGOIUsRMg4sAVaUgwpLH0zUtVOlCCMFPp/nLr25icE+748Pu+E9/+/Vht6tanA/Xb7ZTTRGob9ehbYe5OMclpaenQxODC01VXTqfq64HKSDCzqFHFdleX5/2u5IrWVWjOaVNs/Xs2xjQhNlqquaw1updpMvNgHBBsajnpNFPgh3+TGG06DXODy0fL81mWwwC9NJM/jSmBy4d7BeMhJ/8ZZ87LD7ZOBYRyAISXpIjPvM2XPrU59Rg+OylLmDNlkUNiOdoyU/g6ZPnCF/YgAv5BWhLdBEimkma5+NhOE4FN1u3Wn348edSCgCmOUuBftuuVlehWf/97/8eq65imCxh36ScyfDu7pbR/fTheL3pgK1lYuL11W0fN9P4WMEZ0zQO03wOWLi7fXX9+upq3YvafBjbdXj79ZeWrUd33L3vtpthf//w4f64e05znieoyrFrFa3vwu325jQ9OIql4jhWIl6t1zE2Oc/ONTWXOedZd00gz81mtbl//hExKJCiEzBgZEdkpKJJ8VA1MmSsYJblnCSFZimn0pAppqk6z1nru+fDqo1fvnkzzScdTmAZQUFCu71abzZFpv3TExIT+3bja50/PDxJLsxBUFbrpm+jIY5Jx2NaJAWrddN3jYlubjbXr2+56396+LHbXrV8rWCOmYhgSVi/wO+lls7d4HNSlcJFPnapiU9Mzwt8Xmidsw7p5SsNznKJMztEL3KPizDtBfyCXRQkF57ULpK35UUWmskAwOjyvWCquLh5DMAMRQXPZA8hkqkhsVHNeRrG/ZRPx3n/08f3v//huzmNuepxmnKtixpOROsi6zIgZu+CVBFRJgbVVHPjQ66ljbGW4pCbGNM4Tin76NtNW6juT8cmQBLJQx4TF6Gu75ibYU5X1nBwofEEkksteeqabh6Hpum70HiCD4+HcXhe395WdAosyfrgrt72h+ddzjKksmoa74LMmdmZoIGLXQDmWr2l2bFn8t4FF0JRaxu2SqgcYshSAZGcMwEk5qaj8UDEIqPzQYCM1AenBr7xoCAWs4hrV2qYqnjPsVvn046d4+g8+SKVfSRw83Tqt68PU+5XGzee8jAAldC0sbke5znEnpt1VgRjZknzR3Yj2hdAawIDSY69ASxRg5fd5rMCwUUQ9BnnfSm2l48/UxLhxZJ2iZS2z2Vtn1fkuS4/7VvnydcvE8fwQld+VqkvAVlw2SRfusQvn3tZIvppf/3w4T6PKc+ZGKzqwqEv74KQlxI+G29e3j2ifXKoLQMB9dO6wM+WyznEGvDsO0NEwouhDc7wWs/YG4DovHLw8gsMmJmI+ia8enX1i9vbX7169du311/c9lebponoPYboHZpJWiSFoqBVgNiMitRxmubTIKfJ0sSkBAKMzIHYAJyBAUQkJgwAgmxGzrSQCjGZipZaqqZUB6v3U/441X3GSD6ALQ5JU2HHqOZdvH375dXr1+9++OPv//53IiLHD/t38q4eV7df+6uvutDNcTPF2fJR65RznmqJIJF9qbT8gIoCKyIZE4saEDKSIRl7dM4A1cC0iugSp2wmAFrECAmlgoGYkAJetFoGi4ONFFCRlJxwA75aUFCznAydAGXQhJAgz6YFsQALUUUrKgTWsw+KWEQKZIGGEQkNUc+eQVSDaiIiBsbE3hGbRUJWYRCPQAjeBwNAdhSjIbFjYiI0IqqigMDsq4gQNL4xU13abirZRHWJUVdAXViLnEsVE4NpzkVqv4p928bokEDknKbFzEwOER274DzgkhNnxI6Imfll8z5TRbQMlVy0c0uiIaCp4NnZA+ebjS3/MA7O2lOyTwF0ywfL3z5t/MsqejkZVTEEFkXi1ofovCMKoEZQTWet4jnOORESI86WTLgNJHk6yQgKWDC61gdkQAYmZEJQK9fb1XDc/fTjOxFqooshBGIT2X94+JB/TnMGCf22l5JjuxLy6D0SM1maipoh1f5Vj5VPu72UXMfdsN/XUklt97Svp4Ec1TKZFK1J0yiZaoJ6gjHV0SZEQFCsEn2Yi8ypjnMKzq26brc/xRhyVQbXr1r4WDAXRZ1SbkIXfCQ079kDNF2f0klBp2m66hrvfPBRxepEkyuAUEZob5sfvv/xq1/cbvtop/exWVV2FLvj4279atXGVTqchvFw+/ZP+pXbtK9ybQ8Ph3HK0+nUtk3sgxviPA6r1idN9+/+vtveMPN6u7p982/ff//D+3d/DL51rTOVVGqVDABQlH07zhMQMnPb+pwn76nT8OF55yi+/eorvzu9e/e42fTjWFVAxczMatk/7qOj2HlwpeniWA0Rg3epFOcxK6qCAp7GPOSnq/WqJJnTREzoGcGsKhohI4CpCpgiopgSEiCWWgCEmM/9XkBEEBVEJFBVq2BLE9hUF+vv0rkQKcTMzAaCxC+3BFWL162vqcw5V9PTvWtMWBE5WjyW2q363fN9Ub/pV8+Hh6vNF7/+zW/15wFSd3PzJqWn45Syaw6nFLZYSvZNh25JgLOSZ2qc+jADDNOsVuLN9TQO66trRko11Zo9O+fXcp70HFE0TwegaLQCIsMpND0DsDMDpzKSmXNYwBlxHWdSKaLIMa5eWzWYBnDA3cowqNbQ+EX1R90aR8K0t9PPkE6WEhgH3wCjW7UYmooOuxWubgDJK5Ga5iPMB5QZJDsQzZOVChXSnNCU2EOgQOjBXFy12zWTdLEBAB9imqdmswYfY7tSUwYGK1axmlpFpB6RDDWuumxCiC3H43gcJnMG7c0tIHXd5vDhaZpTyqZqx/lIsTF0VYFFmMgUS0FEERAIXgvVWh3ltuvA7Pj+sNuP0Pl/8W/+ze//9v7p6Sn4m/c/P61eNVdf/uLj+8fjobS8h4lpHNq7sOrx/v3fv737qjYhhv63X3wx/jxT9t+8+V/86tt/Pj7/8f27D3/4735oaevXV7/8s1+sr68fPz6TNf2r2/1+cJznOQWDm/VdPNDzwwx2fMUni6EYrONrXG1yqu8+vv/Nn3yDx+KwhJX//e8f1jeEpQw5315fHQ6ZRLc3q5ym1bYTj/OeI3FFbTu33a6++eoLTWmz9cECoOasMTjvnKTqAgrVYdD//f/hf/v7v/0+aPd/+T//XzUO1clUKgwnKzW2YayplkEFDchH1s/0C8isUgEARIBMTJg5Or8Ez8UQVbTWKqqmkaCmUsJisTFTM1lGgJqKkg++1kpGbWhTyZ4DEKiWWjMRRh9TmdgFABe4IS8lp5qLI/JM7LBk3a7XZkqIAKo5F6I2RhWrWmutnuK//vX/UqdpCV0MsVHVkpMhtN6d5yFiMVAE8p4AffRNrVp1ZueQWYqaQJlSzULMx/3J+3w8nqDIkJ9BSu/dw/uHfDwEAA7eimyvVjG4p92p5vOZcen2qaqaRPDjNK+b2K2uH4d98b5KlVya6LGqqCGCaZlVh1qJeRXcqgkx+kLl7eurUmCuMzIz8PaqbWOUIlebTd/7Wub9aVqt6fb129//+ADBnRAY+H6cpzJc33y5Aim6H3JpuhgiWpWUMoiFyFlFsby+3ko9TEXvn46bvm/atmlKNgstz6kymJm5GGoFkRybCK5Hj/P07Al9bLXokJ8Uc8nqCEGdaD1n0xozQhFF5G59ZVKgChBU0VKrmRECEuSUXfCrph/LSXWJnjUDU5P1ptFS+obpqq1Cz89D7Dplx9756A7jiNRnMSjqvE/TSKan49H5a2EKbWwBT8PIoSHXpHFmpxxYSgEfEkK76qILx/F41d2chlFNyIecJvYewGqq02mQVEL7KUZ0OfMTLZV9hsVnuHHx4+NCDi0T6PECQeyFXVKwyxiyBTycIcoLkjk/7QsawgshBRcAdfkLvFh1LqqO86P4mYLkpdFu/yib45I5tEwOXXJVLxE0Lz1qvAAqgHOe0WJOOs89A6tSU0ml5t3+8O7nn3ePD+i0iU2aCYkFsO+v6jz/xX/681dvbmJDJadvf/31X/7571C47ULs1vvdM6Z8dX3lVr6/6uaP9Tge9uPHTQTJMKRxEx1uYtN111d3TQzHYX98PjaOtuvY1fn5r/98Hk5FFKiKzoF819/cffELU8iT5FLVbHfY5TT9OP0RQLvgQlh7x7lkwzJlXbUdmAHyplsbNR8ff2RKIcjt67f73WNAX4XmlOZcazWpklNWdKZYlwwptSVMmoPLpTrCrKWWyg6FEdgPVU5DrR/3vaNt0ynJ+nr1anudM5zyNKfSxE1J6ThMPlDnQtd1se2fHp7Ir7qu9Yy+gef9UK6c8/7peUqpHp/316+u0lSPz6fbJl7dXimaY8KFJLrwPXYO9j2XJVzO4xe92Qsgtpcygxfpxj+C75+V4yJ7wIsW6IUGugBzAD3Toxfi8+U3fva6utgRPofg58vTCx+KZ9Wa6WKGPR/2pCKQESpIrnPOQ7bxH777h5/ffyT0283N4+4wzs8CagIiSzwtIICoovOITkEWi81Zb8LeyuyCT2kGJDE1MmBQhDGVq9j+/OHx5mpNCOPp+NO7n764uzIau76VXM1qWK3p8SGAui4iWKrVdethzk3YIBVH+2Hax9w714bYNI7zIER4dX399DSUSgXMg7XtigJKrSiGht53CF7yzAht2+UijSOD6hDUOUJyHEQVsDBFIC+IDM5zEJiZiDw5Y2TniYWcgUNxhqbDsWnbOc0wn0LsKDTctHWeo/OmKlrIB9DimExK17S1Ugzrmop3JDX1/UYsybALPrjG15TiemOEYCcHo9IKkFXPc6dAFXkR5BCCXJj0i14Hllnxn6mLzqV4KT3EzzfA8zakFzXQeWbaZb+0T1+OL5zlS1ybvhSnXZ6L4EW3hC8JRgsPtcQEXzLozs9hC1n1cqGnw4kUAFXFDIyI1GyhGJaLUl225wvDCZ8xYHhR8RFcJFKoi23t8vOgxQHkHLvFnWZqy4jM8+CuJVPMdNH4LDIsQsKFJiYi510T/Kub9Td3r3771dtvrlZf363XnesixYaYFeosWhEqskfDWlWkkAU1yWkox2M+7i0JSmYGg6pGpsKGxopqZIbojBfpFSs7ZVclK8znH6OZmaVSUi5zKkVcCB4YgfSSv+MYPfmOr+42X/0phtXPP/7xdBiaVTfO8vP7e//4vH39zO2btumntpXaamlUMxgplQrIhAxUjXJVR4iCwMZMAugoGkUjb+h0EQ+CmZ4906oVAM3YoC7RJAZSqzlcLH4CRotqdvnXNwBgb8hAhMzoWCtnrUn1KDKbU8fqHEJw3oGm++c5px9j3AzPB6ulQk2ghIvijBYZPIClUkWFAAJSdIQigQhr8uSC847d4iV0jnwgjB7IFVVi8o6WoRzLcE0AMEXRCipSReoy7RGqVFCVWqvWqqWYpiopyZgmMNmsVm0bg3dFlCqYATExIzOzY1wckHjmdZeBCEhk8AILLlTRksul53bEJeIUCcx0yRsiMlVEWgjOhctcVgIugU9mgKCAYqqiqnK2mC4Lw2wZxIjI5CMwmFT2DpkUhEhF5pxH731WcSGS2piORD40HREDljzN69Xq+uZGrJRZmnVrgCVNUisyvHv3YxkHFV5tbzxzaJs0nQhqyTV4123WPq7ZQT7spsePdZxC1xlhncdpGJEDMgJQHo7ex9BGtJye9yUXMZDjyAYBmuNxdAaAYkh3N1ff/cMP864+nSr2TiynqTAwqDoigsrohzklqbWoEDoX9sOBhhMu2kK2pnHTcIpO1ZxqiZ4Ph/tAxEjrtk3TiZoWAgQO7Am0NE1wpOvGAVUZjmOZRQq/8uhbVF3frol1c3sjeXYOn374w0OeOTY+BsdN0xADkmdmNcCSy/7xEKlbd7cm/ur2ak7PoPDLX/4aIT/f78hpu402cR6n02ky4FXfueDHPLVNN5wmAzOpbWher69OU/r4cP/N198Mh6P31nFQsWlK4Jzzfpjyacr9VQ+ncT7Nq7ZNfbV60lSBSepZZoRGeS57HRsfg3MpJUSkJX0EKoM73yqQzNSzFxUAcOxgkR8hlbpsB0BIaEYEuhxQkJwLVQUQRcQQiD1cEhzxPBrmvBjGcX5y+/3zEzvOVobxtGpj2K6m+59pOHzZhx4z3l1/f78bD/dftWvbnxKfOkSSXIZjyqc5l7Zrn4bnlXyBSrnUCkIer3oXiBGlpJKmqes6UC7TadVfGUuWVKSoYujWolxFCcAFzmlQkND16GLJg6kie7NkWA1UtLrQqkE1i6SxoemQQ7+ldk0+mmZiQN9XCGpC0YkmKBWRSYsO7+b3f8iHh+3tHTUrDJ0oKwC1K2pXGNfAXoqyZJwO9fik84HZTMyFQMGDFvCNIlui9npNIE/vf5Kart9+2d2+EQDIg9usfRNTqVwdEhFGDo2o5uOkGJqrV6ogxmg2Hnc1ZfPGwT88HCVPp8MwH+ecE9ERPK2vC0hhAECY5lnRH05zdKuGo2qOrlPRIQlSYaeOQURdDAT89H5Ht+vm1RW1IQM9ff9hPJ6ihzRMb7c3QFT2Kc7wy7svT6n4vu+Qt+224/b1L1+30O72AxVs0nXbu5uvf/H+Qd99Nxx+OF59+et/87/5dTp8/PnHdzY+vn/8YxPWv//rv/7VP/svr19fQwBtmBSS6uY3rzc38eFv/nPr9q+/6Drh7/7j75svX1XEGK6OJ0vFHczkNGfh467uhoSOgyvj0d68bQw1NLHU5F2jzt1/HGPg6DanfYaa0jA167UjRYdawXmWJMdTWm19GosKMtv/5L/85eGn/ObLm/0A3//wfbdehRaTyXE4oQ+lJlQiHz27+WK9QYRaspk5H9hB1cpMIQapVkpS0dj4KoLozKppMRRPSEpTGqNvovfsGMgUQUQ9+0Bca5WSFUBUVE2k4jIlBMk5770vuaY8RaLoG6llTtVEiZXYi0gMTmqdpzmEBo2qqYqYWWwdYvWIYbuaUmL2hMAIDXgz8C7MRce5IIJWK9XSw+jZM0IVEzEffGy8iZLjnFKai+hQi+ZS5zQNh3GahiZ4XK/+7m9PTYjXG79eNVo1zVlNgmO36YuYiBQppZQYW1FwSOgYAQ9lGiSlWjdtD6aBw1xGZPCOkcw7CsieKZBd9d0wJr9qg49IdZbcd9FxbGPwzl3dbAHAciahWufrruuv2uvb3/7h+5//9od7UQ0+Vs2//+l9E+Nm1RqgmVSBQK5KbmJoY+wYEZDQffP1zfcf90Y2pnR93X/4+OQMAsJQa/DerGrObYhWcxmOrV+zthKHYlUIlaROR+e4ZvWxASjjMBDSarUlBLEcfQwUgo+naai5xL5bDuDsGNSkluvNZpxPqjWyL1ql1MY7QgNVBNxsVnlITds8HWvgbrVu03GCov12pRAY0KzMx0m55pLSWAJzSckBMMK67abhhJLbEEDUx2Akjj2ZURVklZpQS86ndrX9+PAQQ/Ac8zwFzynP1PA0T9169TliRlooHzJQU1v6DS8o53Lavxj0P8c+cJYcnWVAiw3i4jx7EffAGYCf2+kXVcZFG/KJMHr50guyvvThX77zDKTO9oSz6Q3hjLFe7Bh2oYouwdWLXW0Jk3zp1iNckjoUDAhVrOaS5nQ4nv7qr/7mu+9/2B2Op2FwjhWqFfENucaPp30ex75tEXjMerVev/vxvYIw26aNz/cfRNQHv9/tRfppN70O/vb1VzfXmz/+9V+AhPW6/8UXr56HnSH98MOPSNL5IGP54u5q9+F4Qo2BYvCd94jm6IqZXfCKiuSbts15FslvXt8N42kYdJ5rG1mY1o1Ha4xlzGrmpc5ZCyMglNfXd6fjYR7GUkYGS/MsQGCCCOycVSECMmuW/FCFGLxnqqU4xBBDLnmeZnasaCJFAPqmKUWGMZ+ypBX99pu7adwnl5qwYoSbVzcEOkzj+vY6Onh+3KWUTuOQ5jw8Tk/HdLNZtb0nR6vOq9oXrzeN758Ox/V2pchlyiC2Wa9ubt9OKQUWAF4sZqJKS/rp2f4DcAmxsgsaPnNApgh4hsf4WYFdDkXwj9QaL9W1yM0MQT9pkfDzIXkX1hM/GSjhUwWj4QsZ+cIAwEX/sXT20UxsgdK2wApAR1LVap3TVLVkzH/5l39+ODw2LcWm//7dj/dPT0QAsjh/VE0IQU0JwTkHgKKKCN5zLUqGtWZPTlIBgdAGR2xE16uVguUpo9RKdJqmj7tnD+53f/fuzZdfzfXILlqFIqzOU7Ot81O38pIzqWPnsyQlrKBt3w0zffyw++rr9cPu4c9+/U+r1fF4avvYXfUHUSCby3zdt+SQAIidASJw1/cVBmRfDDD4WnJsOyhzE+KUk0AlhioAgOxdqWJMgECeyJOKEDn2wcBExTU9FHWmXU9qEtrOainjsWsaamKpSRUYCZ1PpbQhaGjMUHJpulZdA86Ba2qti4xQq+Zp8E3Txq5KDaFTqUVGdCNCf7ZKgS7zG8+WrqWe/pFy6MJBnz953mk+qdfs08dLjaJdOOyl7i6WFoDPSc3P5JBmFwpyYWcuX/mS7fbCQb2Qo58UPi+PAMB5FuVlRBkALJk+aEvajF48lXp5q5+GTxp8xrC/XOOLpOqseQI7GyXO4UREROwcEiGyqVaVhSviRSwIZ1r3rDk665kvfmFmJPTM2659u736st9c+7BuG0YGRUJiVM3JbNHPVG+ETgEBkEVM8pzGk8wnkEyXCXUASy6SSjEWY7bzqAREIjYzMzF0SM4QmIDI0BRNrBYU0VzVUJyokQEWEU+ktZZa3apXUPCBa3QWRXMCz96r2nCY5ul71z42/3+u/qPZti07E8OGmWattd0x1z6XBolMJFAAgSJQjqRYZJByVIhUSxHq61+orR+gBntSSy0qQmooglQEK1QUWUUWiwSL8EBmvsyXz1x73N57uWnGGGqsvc+94IkX7957tj37jDnXHN/4zPbSk1TiSsHYGYiSCJmCKpka6PJWkHQJD2Gv7JTY2C0mlaqmWpfiUBERWRhDJ/unM8pxTnpHMD1xfxZRGgF6h8Fb9SYERGpFtSaxbCjOC8RivttddE3Q/nb/cPP6YXS0r4Bk4kkUYPHDYWImBD2depkQPZtpQIukkSAwOEYiIwIfmsVbzTGZCQApATOXWpGJkMCg1mqASFxyVlNCB1DFFA2LqJS8HNGlZFMoKeVUG++Cj110jkGtLi0yGzhm790JcARABAVjxOU0oFIrIBEy/U8EaGB6SgrUhZS1xG2etnwD0CUSUexRLb8QU03NYDESNoOqoqchIQAAiMDJA9JUajU1U2AwigsV3SSDVe9cmieQbFq8A+ewzAUxMgbHvuQxkFtfPnERk/QE3nNbBEqaHSAjlKqiGnxDnqOTcT4ej1py6datgTlC1fn+9sgxegiS524dAZV8h6qOnYnd3+7VLLh69+p1CO08z4CkANVgvWnu39zM/T6YmVpKNQ8y918T5DZGHmxKCwzdTKnanCg658g7X82nkphcfywFhV0HCk1s9+Ow23VN206a2eEwTd2aOXAdc5bMhMdh3q5WSDSNI/myDc6kbjYb9vzQ3zpg0FW79evNhWpcM+wP+1mo5nI/Tp8+e7KOnaxEvf/6m9fby2539eyTz58fh/T+fT8dDzVp2Gy3zy+Gw6ED9/bV3etX+epi+4uvfvXie5/+xm//zle//LPh4bA/5mPNbYhSeM42TRWrcgjVgBXU8DjOuGEgtAqHh/Sl/PKL33hyf7d/eDM36DcX7TTPjnHKen97WK3Dert6uD8yGDeepsAullLAsNbCZKZsRilXQOyC98GKZDnto8vBRRfavwKIqp333yKCy755Hk3UWpnOpGxkAChST0F9uOBIqsYIpqJIaAboHmmoMqV5FinFds8uB82HfkR67TuB+xxKnA+9X0fI9VnbbZvtQ9LgocFi02DsS52KYtUJZVyvVzdv3gTGYZhdbC9NRRK7xkS7phPCnAokba6bXFOVnHMJIUJoq4L3HtUQak6DAnGzNmIEQ/NmmGtPLjCFOk1WpQr6Zodl0DKqAXPnQmc21nkgWmPcgGPKM3tvqrVmMLX9m+m7nyHh5fPP1YWqgWCF3TpcXosLiB61kBWqB5seII+QRhOjdsPkdC6SvIWN61YKMbRqtZ8fXoem883T6tYYGuh7E3NtSyGwzcF7Co0psXM++lJUlUQNwHOIYOK85SIPd7f3+1uzJob26bPned2+/fZ11ewNy8Prtm2mYZ6OfR6LAnmLOVelumLJlcB1ftNJ4w2zIoTQuBizw9Vmw7vde9Hpvr9+vvFU37775uUXX9Qaaw8u2WZ3+ezyGUywXV9c7Z6///I7pheF1p9//0cXq09ePvObq0Zy7RwUo4vnQfP4/PPLPNS//vr1/e23z1atM3v46tUPfvLTf/Rv/Ns/+/OfH77+q9/8oz9y226cUj8UC8QXzYs/+rv9X/7ll//ZH7/88Wffg6t8P5e2XT3ZjdVcE7Yd9mJPP7kKG7joApPTuRJByTblERx3XTPu56YNvgt+5ZIiuPjii6fguapKlQwks3qEd2/20zhePX9JhK7iN9/dR25e3bz+n/1v/83/x//rP/v+b/74eH+wTCqGSo66wgnZSplB6wK/AoCI1mqOSUpRM0NaNW2WmmTJnfCLoyL7eNFsRQszkqEBxRCJsZRsSt1qFZtunKdqoKXUKs4HVEEANUWj4P3QH8kcgYWI1buSKiimXNCEEND5JrbH/qAiHhkMffQLl5XRVSmlZLNAzqxKVtBSAre1ZmS3zP1SnWspiBhCyDA1HsGacchNG5CUgbSqlFqKOpY5Dwgcm01cu2O/d5U2V50f9Jtf3f7Fz/8GzF3tVpuG286P/Ujg+zGjQXDkXchT7rrN0cYCxswpV8+kzIfjfa2ZiVJJWiXVQkxTKk0IJmoGkZ2BIVFR3WzbJoS28Y3zFBoyDc5tmqbx9HSzPgxz7DbHQ4/suQsKpvPwd370SdeEn3/1JqtYcllt6qe2aa43lzf793OqVmEVm6lmm/PVbluqzvP89NnFU9OHu8PD/X7Vxd3lbujHxeQpqbFhmmqD0aoaSfOkUbVVsz4O+3kYmfni6mr/cBdjzClJHdAAKFDo8nxw5ENYBxcf7m6Z0ccWDYtUM2vWbakZAQB9jMGzxtD0/Ricc8zPLp/e3NyrQwxx060utqv5569zmjt34dboVY79fnVxPfTH2K2Oh1v03G66u/5hFln5ULXUWgnYB8/etu3mOM3bbqtQ+35+tr5sg5Y0omnbrqaULB1W21XuZw4ulYSua1btNI6O+Xjszx3D+fh/njwvo4tzM3Jy1Dgr1OAjt+lz76Fnes9y//Nk/DHtHj+ChvADWePEDzq3WOd259TQ22Nn8zgd/6gvf3y+M5Vk6aA/NPz26KP0yBw5+yYZ4WOnc57VL9MURJVSTfspv3p3+26/zyqx9cOMLjpQcME1bfviyfNff/1VbFahRVMuh6l6rBk9+p/+1m999aufzXNy0U1p+tUv+s+/ePnp808ryO1gr4/3L774cYPu/c3tv/rld22gzlN0sFmtGcA1jQ/gyEwltHEcE7gAYoAui+G8fEiJqSBKTdPDKP00kdusN+uhvxNXd6GJsS0ykNZV3NbiXA6yNAe1Bt8oahvDw+FGgYd+qMBEARwAqwsxGtVqRsTesXNzSmYqVZigSkViYiopKwAyAYBzTq1C40e1b9/d/s4X22paQAxoKEZYhMmTm6a+AAmCamay6ycX1FVR7udS86yzjXlerYNr8ubi2gI2sXn5/OVYEP3q6zdfvXz+qXONSjEERDhlenxEjjijOHjmwen5F7rUhZ2rDk5jZHtsaOExDRNxaaXsI5bEmeDx2Lk/8o7Oj15a5nN5L65DC6YKZmSP2WaLMYCeqg4f24aTKk3IMSCJZqkziOwP+3/5x//dzeH+0PfOu1zLYToQq1RTURERrbZAF6rsmBGr1sVzQBWWDkiBHFAVE+PFKdaxI0YzcAEAmIN7+/bhybNtSvOv3x5v9ne7RtbX619/fWwogNn24untu8GFTuTeEL1rrObFuHW7Wc85AahO926Ww82b2Dm8uMrj3HbNMGRnGqNLMl1utzlrbH2BisHFTbt2VzUpkYZV23RdznMIDZBHB8xh+Y3ikrSI5pxDU0fKjDkJhSgGDgnQGRMRM8YQwjT3bdvO/T7Nx9auKETfdYvRc/Qu5YrE3rcILteZCLhd03zMOTVdkxFASuBmGo6xJe99VQRRZy3WAfGVC63hipb0ugXrUf2YSnYCZxBPkPXfvuEEID3CinY2/PkIT7fHKv4YVzpzJ89P9Oj1fILvTzcsfyE875aPW6Wd0grwDGiZne9xYkCcive8DaoICADCghibnd7jSemmp7VECzNOlwk0fGSqdFoSp1V2AnARzBZeB7Ijx8t4YMExAICX642e1wPAKRYNAOjE42MiYiIkR+QBnBkTNJGJKrKo2TikeRTVbEDRUYxEuARGqQGJYp7mMo2aMqsxMzITKRgRoJkgkKgSsgogqebszTE4QqkAqsaODckW2EVNRciMDEykVkkOI/vlx6iqFWHo79v9G7nZlpt3UETIY2ypiWbIrFnKMD4kmIm9lKLGzkdd7A8ZiBUJlxQJAQBjU0QXCJ2iQ9+Ai0ZOEVVN0UwX52o1W1hZgsgIqGp0pr6piAkg8JnEpohAhEsmgjlv1GadK0hFEER2sZgpurje+PV203SxC8NwTPNgUKsYMpsImlRVyZWgAi6hF2qEzjMRNZ5aqutAzmGM3kVHnlxw6Bw7D4BmWKuqFiGulsmFqoi2yB6XY8USN89VVKuoqKrWUrRWVMjZ0iRTqioamJrYeE+OQUSMeGH+BeeW0gEERHTkljkWI5sKIhAuQwh6BD1PUJGoLmlny5JRUFzmTSpgSABqgkCAoKpEvGBziKgn32oQOVl+naSVemKTLtsHwmJxLUSoZmjoiHOdUCuC5WlStdh0swzOuVqlqtaq3a7xHjarXUlTiKSgKuJ9rCWnbEghNqsqe5XqgzPQw2EappHJDIwc9/045xqC90yqwlkPc+9QpzQ553M6QK1AYKbDcdSc2xU13frw8DAcU+yarPU4F+dTHSUP+83FhpmhsWEQnlUF1aprYE6jhyClEoLzNOWaEvkgQyrsARCzQgH0zu/3U/S+QURgkQoA1azUWjKDCoF58sS+CsyjNY0aQuNCyiAEN++P4DN27fc/ef7XX/789/7g7w9T/tO/+OdfPL988vLF008/GYepZbYxfffda5HUOm63XU7zt7/6Zr8/bD774vlv/e749ru3b76t8zj1AwOlsf/sR19E3h4Pb37rkx/t74Y3r960TaP+CNsgBuOxqKpVy6YgZKVScQHNs1vHrmY14Kuri29f31fVr+UAam1cYdVS6nbXvhn7dtXmLPe3/fWz1XrVPfSTVW27GNpQTWs9JZGZCbMTg5LrUMU7duxKWZxQSc9ZHqZCTItvPREuKORCrl82ByReLjOyXA+0EnlTUITFEM4hmxrQgpzScpV4BIpyyQ/7h26zlmxZcB4TzGkOef1s1e12oDEnPKr/7Ivf9vu7uS/k2vu5Hue+Y3h389XKxdXm+jjNl0+efv3uGwappQCgUDtmQxPf4jSOLrgqKhjG6sa5gGYgQ7SSpyLFu5asWE2CihR8XCt4qwlAfGxBxQCdJ9MMpAJq2BAwk85l8t21667Uksw9UkNxA+StJrBaUzU1Y0dazIrrVmHd5lkAI2+fYXsJ7baEYEDMRGWEOYEJNmtaXfKV6ZhLylbNX15Tuy61FAUFsAZcJmpaTxeuXaOB5IkdUNyoLK73zM6ZqZWZPagImgKyiZaUGueI65yO+/6ulhQcrXbP02EGzQ6FIMs8TMcsNW92rYqF4Eaivk+CNAM4r+yw399cPfnExcjdukpSK1OaWm8Xm6cDkSG23SqNI2scp/rT3/uDw/3hYntRj+Xls2c6O5z81e557J7u/PUf/L1/Z7Xt1IfQRC0wQ7mIfDjev/3Fr1/85Pt/89e/+q3f+UlY01zqJz94fvnpthyPLrD+8urnf3YT3O3F08/S/fuH2xTU2quWo0jR4yi0acPf+f1nz77/8Nf/ioZXLzfrb372V44+37z84m++uw+fXwF6IaDGpWHwWJzjZseFFGN4OM7Bx+urZh4yODKCsArjMLQRb96/X/FV9Xq53QmUKrVt/dXuqQosUvcXL7exsd/86feP2vwfuv/j//3/8h/n/VwVORB6N83HXAuYsnO5nMwalx6XiAAQCKFq8A6JVKCq7rq1aY3BmeGQUwwrBERyIlpqBgJ0wYOf50mnaUpz8LHxvs+ZnAPCEJp56mPTpTlNeQxdAwZGOM4zMfuGRVVMvePYuH5MkswFn2eb6xx8CE0Yh1mlet8Aoo8eEJ1zqYgkIeaUCjpyIQz9aIAitdTcNq2UzExd106ztpvoPBLQPCbHrpY6zdN6vVlv1sOxn3NfM4zjZEWZaOrnb199vV1trj9ZNY7zNKRcD4eijGOqQ8rtQtnDuF1vpgJTTmbWBW9quaSF0ht9AAUBWzebcT400SMhM69jk3MpAkaURVY+Fqlzxi625BCmedM13//kE6CcchJJgq7Zxq5dHY598GG7u3QM3/vkMqf8zc1+LsXQzTW9fTikrq5Wm6aWh30/5zKXWmptY1yvuxDcNCRWhEqTyP6hR+esCANd7i6Pw8BoRGhSiL2qHI/3Ia671W6eegheCQ772xh42B/IU63ZULwLJU15zl236rptPzwYABMjkamoVDAq0+CdM63TNLSdD96r1HXXSKnMZowcnCGUWdaXXRH94vuf6JTSNIBDF+PxsK9wWLVdKvnystv3MxmrgQseib3DNBy7dtW0IiBotrvciSQiAtJpHgixiNUs61UnOnoHJeWaR8dd13ZpGtsYu7ZR1XmYH1GSUxt8ksYsSsYTzgKAy1gZTj0zntVnH/hEduJinL/szEVaQqhOGjQ8Uys+9PinbuUknbBz0/TYVj0qy+z8Ts/EkQ8N1Afe+JlgBIj0iBcsPZqdKEZmpgaoenapQTM1RCLEKiIqojKndHvY/9lf/PW7mxt0bprn1WalVYxgu1kB0pubV92mia5t19spjVdP12JyMzx0m4vXtzfv73tmGg/j0rrf9Edu+89/73ukkCscp/Hrb99cbv2Ti61z+Oz5phyO727uWkeN8/v307px11eXVWmzvRZy4Bh5afWrM2VHDiqr9KLjOLXtGp1ntIsnF65py1zvHvYlTRyi0JxSD2jRh5t979UnsSHN43x07AldbH3fj7nqlBKgWz5UVauihMCMyAumJmjYBF7C0zwFA0q5TlMBBWBrYsy1zAVe3Y2/+fkPXn766TQ9GHmHLqd5f3/MfU+x3T25KNPh8LAf5j4XMBcvn11Hlidt9/7mZrZpmDLFSUfddnT92frJ1bW34soMorVUpFMbpAb0mHH2AZ9cOnAzeyQ+nFrjhUL2iE4uVAf4YFf9t+gfj894RjJPtbhU79nGCOAMRj4W9KnHP/fny+vIycwCllWwVBqCKRjhQijCKoJmKtUq1FoU9O371//Vv/wnX/36Z2EV1xe7vh//5pffVCtqOk1pCTMRESBYHMIYuZQC7AAJgcygiqABA6SSXPBIwOTath36fXSU88TIxLikP0cXHg5DsTynbP3BLlvmLoRmHqcmRKSsmUE9OlOdHTupuRq62Di/GseHKnkV4+uvv9w+ufzkix+9HQePngEYwXuvUhQZve92V6mKUBAXXLN1XGLnK0YhjwQKJCpEzvsmWyVQFSEPzF6kuNjl1BMCgToiQVSVuNomqS60UlFN0bEqdruLh/vbedhzuPZxJTlVqarStHHp0prgC0iWyhjb7qK/+2q17by6UsCImUIaj+ii9y9ECZADR7WkdougCq0hIyhBXaLulnEsIhmc0vfsXC7norLT9vcIDJ4se+yxJtHO/KKPyUQfsYoQ6ITlfFR+j0+/AKSPcoHHjfWEhZ72S4TFQB0/3njPvu4frxh9dJJeHm5y2jrPDt0IZrBQK4g+cOqWGj+T9RYt5wlFOoP9C3ILp44GzgjXGdxafH4XJhESLXu4mS0BBWYGsti5BlUuAuTIBwCym/0xTTnXkuaZnV1t1s93TWxaZkakxTGwzKmkXmpm5kXQhnDKVQAEq7Y0UCoFgVQNOOiSGW2FHDii6pi8Y+eJs3cWmBrWBoFNVaoK5ZQbx6UmYs4e+kn061+nw8P+9f3t8UirS6ag5FWETYhQVY79UIoCMSA36Mw1xOo8OlRDFCIlXIhdaASKgZyRA3bAjohVtWrRk7e+nihgqMs/lm+YgaIyKACKViJVEFVdzsBIgMbgo4WKFdBlBSGytvFmXnOZBUWlSr25ezcd7w7jVEoepwnJm2tzhVRzkROyU81ixBB907Zt1xkq5blVbQgis3fLlyfnkRySr4BFVAgYfQFz5KpaNUUyIkYkXALszURVTAQMEEWr1CpVSi7TVPtjj0hNcCF6XADOhZCGSI6d84uD52mREhstLuogJ7W5IbE7cfVOX2cB2olTBCc55sLPW1B/k4VnuABxCnBiay0ltcynFqMs1WUsoHJGapdfqFmtFcyYoJQKkj1JrROZOcYqlR1YdSknNfDcemdgc9f4LnrTkuc+eIdoZLVtPZJmmVdhPY7TQzqKzMH5WmvO8/Iez/JWy0VBfN+PbSTvrEAvuSL7eQD0jW8ac1SrEvrN7ll//34cD7km7znEDoqhOe8AbFxf7CaWm9tDE6MPQaq2Lc0i4zw5769csIKHaZ6LONc454YMpmykx2m/7dbrHR1nncbDJ0+fHPZH75jJappWTSTCEFsGI8PWu8YHhVpKXj58djGGbdfwMA5pGtfBM9v7m/d/9If/sM7T+mrzxaefyDD/0//8n/yDv/f7u9Xu/uF2nkaoRrH77s3rEOMnT66bFUzD8c1//8eXL75++uTZy89fWsl3799CLip4e3NgvB+O+y09bXyY83Rx+SIG/+uvv27aZjpmAENQMiCEuVjOkth1DXaxYdZjvxdfVh24GMchicD2Yqs4zSkLlsVVLBd58+bQriORdk1IaawFXHBUSCdhh7UYgJ1wewUFEyOPzjGKGiKSLUtjcWhEhGVXXcIszEAF6HwMXw7xKCJMfLZ6tGV31PN2jqIEKGQE4JjOR35omtZK8oRW69raz774wZ/+j/8SGv/weljHF/vvjt3maZnq4a6/0HntgyfeS79Pw+RwddHdP5T9w0N1jVM/TdN4d//icnUZd6Jcqkbv+37Y748QG1GdJzn2x6cvn5skMiu5IIJV4wCQc06zAwY1QAfsrGRPHshbRQAGKVYOwXNOiGFFViQ/oBQXIyLWMhOhX2+FIwBgzZoGYocmUovlsfR3Kc3mHDUXfP0D6K40ROQIgFyLpVlLJb+h2GitdU6sScD8ZmcUlB2EQOo0GzNB2jOCu9iNQ1UENmubkGRE53NNJQEIhOBLySXPKti264fbV67bonNd29Rq8zSXqfi4Wj35DKql2dorG/bD+4fbGMmF5tXr+eEgbw4DMcQY02xzwWo2AbKikq2aVSrV48RN40NLNRgYGk0lKzcdr56//KJXqseSpwShfbF7fvdeyv30kz/4d55cf377dvzRT15ycGnMHgCsVrMyjhwd+fHm1auH25vt9eXVs6sffNK3MP7x/+e/uXz++fVnfxCfrH723d0vXr3excvPP3n5Z3/+Vz/+nRcreDEO4/27h9WqsVJUoLv0k2QL2ny2bf3vv/kv/4S//PqCLP/FL4dfTZ/98Av77qB5FLF52qWxTlq2F223dSGAAUfbYK7AiIjTmAIqMPZTWm82zz97dvdwt3v2FIoyWa62u9o4hFSqKZRSa5Uyw1jgZ1+++/Iv3/87/+Dfiq75T/+L/yrbnhDVVLOqGALklPnUaEAVrbWS4+hbh2BWD+MotTK4OY/Rc6qgQMhurllSbpbsLhHmIDkDgCGKWppncVh08N578ilPIHqx2xyHnoBUwZGzqiKqplAqM4pACE0pGTIBk2qN7UprmVKeU935nfeNkilos2qq1JoyiDlmR+ximKdRiuzzEdSQnXctgmdytU41p0xOsiiiZFKV4NkHVysCUCk5VxGTXMT5eLnpypR/+eUvvv7ml8+ePvPerVuYhqEmBNRuGx+mVAld0zKx1qpWsZpnzIRM7EIEEzFNWgUh1cLsjbXY3ESfSqlSneddF7NHFSyGDgnUCGgqtSN0JNunFy+vtrudO8yzD45ndFjXV1e73bWxmdjFbpXHedOsf/xDNxY7qGSzqy7mNN4OBw4XrQsIVqswESJMY2bH3jlGNrGmjSXVOdVt17k2pOMAnSeAXGpNVcXihilgjLQ/7GMTd8+fv3/7reTiPeecBZWQqlmWtFrvvMPsGImP+/eCSo67VVckS9Gma1KaRQojkqOxzlQ0RkrT2HQrY/KOah3azl9sujz0q5dPi4pB9QFiBCBsN91chSlolZKlbchUbK75OGtWrZk8+6apaKbmmohmXRfzlLq4nkqpNQNy27SA2g9HM0NywGWejyFQ07YppWyitWoRH9yHpuM0hliGjUvvoWf45rENOXUAS2/xiN6cOml8HFGfssmXLsaWw9WZBXIK6jx17Y8NxmnqfdKvnQGkx6bmdMzBc8PySDx6nJ//LfXFCVMAODdotkQD6XI3fJSwLT+lnU52KiImaZ7fv7397utvTfJ63T7cD/3DEJuG0eWSYggvXjzpB3z64rPp4fBwfOAYONKr797strvri+uf/eyvfAyqhqSliKIehsn0u1c3F6x11cb7+/vdk1VOY5qnyPzrXx2iEJjrri4CwVwVQvP25n2gGJDGPDVNcEjrXecaJKTjQwVBmKZ5nIyQ28jGtWabyjzOhBAdN7xmF/bDHGJEFRW7XO8kFyxWpU5z6iXlkqs4U1UpJoKMUpVD3K1jseWYjldPLvOcEAkVcs7zPDtmx5xrFcM2xiyaRUzMFClDfj9P46+G4di1oV1fZ0kE9mS3HYjujnm6e7Cat7v1E7f67tX9mOo3v/r65ZN1ORyfPLl8874c3vZ1mJ59/uLJsytQrGLr3baB9cP+3oW87tYEi+fGR7DOI1NMT3SMDyWy1IfJI4nixGVYGik8H9o/QpUWePFRUrl0ubogUHAq3w+r4UMJLyKiM7h5piGdKG6Pkcq6VD6aLTjRqVDp9L4kpZxr7vPdP/tX//Svf/VX15eXu+uLX/zsb27e3nGgWqjOyXkHaCVXUSHiLOIcI3tQXDAINSu5ABg7h6ZqAui8JzQVzYAKGJq2m0uhwKFtaoFjn6sBQHn16qvvP386HrTfV/okgOYq/PTZi/evv77afV6lFAPnmJCgKpNru5WopToiGkU/PhxuwlfBh6bbILuS503TsQuInpGJm+CcgEdaAUyxa9FDE9epaNPsPIXF08ZA2XGeZgcLfuvJDAwdRXUmMnoz71G0aMnOAag5MgDowkWaynr3tH/oa98Xds31k1yZJDEuaA7noszelWK1qG+b9eV4/53ME4AjsDSm9cVF0bt5eHBN61wwrRwjAEu6dw6YgxqCiVVBDoAMBqBohGfc+W8BOXAmG330LzuB2Wagizbywz51fsAJwPkAN53KF5c0g8et6zTi+lv79+PT4AmyfCQu4fmxejY2OhczfrQI4ISjQjXlZQE9etmdPXxPfM8P7Dok5EeNGhKd+uTHxDc4dTN2uiQs7ke22LyQIzNDQtIznvSIPSGegbDFgEvMINUyZptTTjkN03jo9/tDf3N7nGtFRy8uN0Tx+a49BcIrgIKVUstcJZNzDESATGSa1byJoBphtfNYA5CA2FR0gbawEBGgMSM59o33wSXR1tFcakNCWudizqFzDmYxgKJpQmmj76f89v2tVUsUHTuoYkUBMPhQJQNxrWUJsAZDYCXW4JBImcA7j0BMaAiGHtirC+BbowAckBwgEaJWUVAmYiLRSoSEXEzO3SMCATMjKJgaFFMPvFBoAABVyUyRPcYVCvtOfRYPPmWxOdeSpVIFyuWujvP+/q6WuZYpCUgpOie1iqSlmgIBMzBRu47b9Wa9bpisTo51U8sm4CqwJ3OEzMhojAZkwIjegfPmmBYljRkjATpGMhMwMFlA9qKiYAoiZMpoc63HfuqHyaHF6JmByIBMFRavfyR0yIsJ01KlDO4cibC4VJ9qmnGx/f6wmj5ARWoLFwMWWH3hgJ7PHgoLswNUTwbjgIS6JFMu1KvlS8QATPTsNKwqsvjJgVkVO117SkZVJpSSvedSChKi8nrzDNFN40PbNME7A62ijj27tqQ5BCKjnAs797B/ZRWJWjRQLVmKmkTfgcFwPLLjqjlNg2EbfJyPY5+nGIMPDoDyILnObt2GVXd4uEc1VIZc8jSAofhwc/faV1D0btvutt2333wZKKy9B6HpfsxlPs5KyFix5sSOQG0dOYQ4lYoBCdLU13a93l48u72/bzYrBxY95jpXKQTYUQjEVqTP08WuqzmXSeLK3/f77cVms1kTIDlbbbqHh5s0UdeESO6zF1ev3rytYq//4k9I7JsyXn3yzDn+nR/88M1ff/vN8HNkcY0HBNd2Ly6242F8//V3vmua0F6vPaf+7c9voYntZsvOYfTbpuuCOz7ctd1mPvZzntFg3OcQ1m27TqkPDQE7tWxSq1QQjRxVcR5lSn3jEQAUYb3pHu4PKs1qvd739xdt24+ZnSfm9/dD2zZV5fbmYXex2W1WYcpVhIEcR7DeREEBiUQEbRFGYikLsEhMQICL+pSJdDEqg/MwVBeBpKgoAjpiXarOkBENjIwMBE9XghO4+jghq1Uck8gSgAsAoLWsN13OhaaRm+NnF7/zl4i5am1C2zXX3+umvrfcT/3D9fU27ediybc0og1aWW3CedWsaupvX43BMDRB0PphankckHLjQohAsRQhD6XK3XAQAgSdp7HMc7MKpnPuAfNIAOQgH8em3RIaIAJ5VRUtHDqdb02xiqkwMqpMtSq3V351kWXM+dh2zxQQbJI5EQp7JtQ6HGGe5vu7Og+u2fj1FV98UptL9A0zW8lExFAVFUIjBqqCzoEXFaG4MvbYrJbBkdaK7QaJNCcpo5ZKzco5B5LSMNSUHHpR8N225kKc0Sp3sesckl9fPmt3lxxclgygBqVpI1Uk8HMap8MBLe3v3xGjsMxjRiZzfpxrzqrjiKqOfLUKAPNU71LBXQw0Ob+xJM2qZWeRyFaIoXvx4ovjKLJPjpr9t/Pl1XPpm81q8/f+3h+VQ/r0e58I1eurCx/h/v6wudzub/aEVLM+PMxth1b6P/ln//yL7/8UL6+++2p8eJ93u6d/9w//8c//4s//r/+n//Mf/rv/wTjIb/2jP8KSQhd//zf/fl9lLnVz4Tu/BiGPzTSO7ObQetjSXKz5/tX3nvwb6Zdf1+++vohN0137bj0f3jQbDU8/ef/N3ZOnV/t57Lr24fYALY9TrdlfXAZCaCISMhKAxydPLwK7w/HwySfPD2NVJ2A2perZPJ9OTZtdm6YqVv6f//l/8enzv/vjH37v+z968t/81392OxxdyA5rLVMMrhYVE99ErSc+hWMyQ3JcarFaERURu7aZ0kxGgVoknLVE50UUHPvocxlC64h4kaa7Nnj2TFhKCqs2jbWkAdCkSNWiKqiASFVqriW2DQPVnJq4zqXOeeyHIcZVEwOKHh7uPbpV0xYVzeWUz8pYyAyoVpE6mYn3vgo2MaacRbFbtUNKagJWh9R3vkOMZth27VxqzdXEAnPfT1WMCKY5O+cYVxfr1eFwc5zv93ev83z34nrbxFjKlI99HhMY7lbt4Xg3pwJiq7Aap2O3alQlpbzpYpLCyFJFJDvvArMQASKTmVHNqWkbQg9VA4A36Vr/9PoiF0tzJTTfht2TjWRIdVq17dWTS0SxYnOaTaQJTYxRwLab7f72WCtdXV3mObuevvds/V0dv3uYPJoDRhCVYux32+2+HypZrTKkuSlOSuna9WHfq3PoaZoS+BEQ15v2dpiYialhyOM4gsfgXNXxarOaxtkyO3KKGdGQ0IcIVgO50F44iGIafMNESYsL3nunNXvPY3/YbLegDtScQx+CjFNJM3ZN221C4+Z5XrWtzKXbbgSFPGaAruvK2BuQj00FzEV2q64qV9FacgZu27aU3K4D3qGaWrXYtlZRVAGg1LxrtlYMGEIIjhz5dn9378kuN9ubh7t5mNA5Dn6eJ/Sua9bTMCBjLtU1/rE9eBx7Lyd7/ZDffEJh6BReQwqL7fpiLYGPDz7hR48txEf6tWW28VGP8/jXpalZSEenGJ0TpePEaPpo0ndq/m3huNvHfft5bH/mHJmJ4tnjwgDtrJs4oQyPOVlmgKeJ/WL7UKUeh+Fuf//2/e0wzXPKaLpady5EKbLpVi+ePR3H2YdVmcr7d7cvPn1WBMuUt916HTfvX7+ehjklQTRCaKJT0c7Zi+v1u6+/6x+Oaqgiz5+sOafp7v43vvfsB589/fJvvm7a9nh7SzI/e/oUUMNutV1f3b15u2lj66jW+vDuJq7aGNg7T6Fz3q/WKwDbz+luOG59dOqGOW13ayUsOdcyh9ajVEAFoyLFrPbDMaUSfSQDyaVIjZ5AWYOBwnYTE/hcWQyGaWbH4ByYV6W5FgcMFMD5Q9+v1q2QVkQ1M5V5rkxk3jPDOBwe3pfVi2fbzrG7PhzuxjSnkqTOaRadcn+nRkgILy42nz/ZtE0QdL7hT15errv25v7h2B8s+GZ1ETarOeXg6frJ0zQXIlYTtJOLyVIT+ohanjlCS1+qjwqYj2vjZEuKAHQq8Y/5GOfhm51UM49EutMwGRFOAVdw4mif58fnFQB2pll8VON0Qj0RlsSdxbgXDUERraqaqcHxeF9y3vd3/90f/9Of/+LP15vV/Zu7n/38b/I8pizquD+OhACgKooGjlhUCYGYEUlAai7Ou6WxZiIizLmqGRKyERNHHxkM0aILjBbcaS33Y29Z2mDjMM4ic9XDOD304qg7TsdPn13pm/u7OXfNqmtR556Yu/WqAGLA2LImP8yp7doyyP722LRtG9uLxg1JGTG2ASp656VoWK0Pc96GrRpLGdiRZ15cUpHBDE8KGkQmJgIwBa1EgIAi4tgRUkmj9x2z88RzHtBx8L6KIDgXvKJrNrt8vJunBPujd0G1VinkCbEhT1Mt5HwuoqSefYibVOe28djESUVUwdo0T3bzbvPEO7+WSTC2aAoylHJkd3lSEi4e/sCG8ijDtTNGCWciDXxwSH8s0kfF7Pl4rh8JdB9hmDMUemL0PJIrHwGXRVT2qFk7IzELyHSiBJ2A+FNZnwGh0+s+qsr+9pctQOpJiQBG+Mj4RDWlc3Amnl8ZwPQ8Hlhu0jOIC/ZI+lysXgBrxbO/0oLr4iOwRXgmOKHq4ioNaKBnJ21A88xSijpCdgn8L94c7u/vj+NxyMKOrjZNqQUlsRWiBtkZOsAiVhTEmIkcLtEISEQRQZUEDbXW5bNGNCBUq8QOSRVRDU2qoTMwx+gYfXA8F8cUmRq0oDJqKplHwGowpUxWEYHm7AgBjFzj27ClYKKaErOrjKXoNJdSxQxQEmL1TiNbQxgcM5ljBkAkY/bsfAUEx+CYfXA+IvHj1U9N0AAViNhMkJe+RdWMmA0X0bgBw8mybwELiUwEiQGJmNUxIkOplEYGlZKP6bgfdRJCQ6iKSNxs1blqVkGJyLIwEjmHgYCcGIbAm/XOtV3bbr3Mtc4OYBW5CeSd8yFy8C4Ech6Igdj7Rn1UZly8pgFATU0dGp0UigpmuGgFDQC0iKhYlZKmeU4TmPgYfFiy0xBxoSIRMxG7BR1VMyImJCQ2MxUFBKJlXZBjQmZVc8uYEQA+gopMln0ddNGdGZz5paKLBRQuSCqdDmQoi1/1woCxEyp6csc+oa2nkL/Fosh0gY4iubgwFiF559gHZiwJHPkYN31/R2TOcckzInbNysAoMIdNgyGN+2ncGyot4WekUjMTi9WaxPKccpYKvoihgpFIzrAYgsqsaZomxoHRBQIde8ncVAED0GjAFhiR5nG6unoWK3/5y282xPfTsaUmzdmsiFCuSM4b6JRNiokqGAJISbWKiGLX4Itdcz/W2ebpqABuGiWVCqAFqmsaqWJKgXicS66Yp9I0USwnmdeXawQ9zMPGt5BlHHvHhOiq+MO+/9lf//m66XYXF3fvb5omaoBvf/ar3cUqlwoFDS0QBo+HfUm39/f+HlRD48YppVX2wTf+wjUOtNx89XVcdaFtKtnchs2uu3/7vm29a/z2cvPmzeuS5flnn9X6KqdbMWCHuWo1m0pujJF8DN0h7cnQOSsl393PDnlOkzm9eNrKJMCOwYERMFfQEGicZmLyDbedm4vUUhTQxVVNmbQioaqpLvZEDheDITvV8onqr6epARGrWq0JEVUrnV0k9LTHn0qbgEQrASKymQkqLsd9IsfObEliRRV9FKAR0nobyzSzksB0mL6+fBIKlveHY3bh6Xq3fzi6zurcC3m/8ft3rzft9tnK5iGtqL45vo9NRwjzdKxMNV6/nvNnm87Y7vv+yWo9qIxQ98Px6dMnbsUlZfSKSfMwSJXxOILbr1rMx+N620oRJscuQM2oauwAlRECgWgSmUGJHRGJzZOV6lY7ZTPD2D4Bv0Ezq7NH1ToSqZQZtBhYuHjq8BO/fYJdp8CIHqcRCJg9CEkZEQ0xUS2ICq49XQG7DcQWfEuCpkgBuCYdb1AHCIHCjqCx8U6nO5mnZrNNhXzcGHnftPnwnYI470QdQNg8+QxcyLkgcVxv0EGzvRiOur+9L3kUKUS1aWPK5e3749zX+/v5UAyaldY8T5OBBUZR1VLnec4eq87zkIJfP7va5cNtvLicDIJbK63m0drm6d0r99kP//DZb3e//Ye/tc/HJy+uppvsrg2cjYM4qGVAKPTw3X4aphefPfFPnHF4dt32D3T97OVnP/y77tLY8+XzH+dDz675o3/8b0X+zIWrn/z+duTSbX0/jm9ePTzvVm/f3v70e5/e/erW78LT32hqWNU8pYcUd02d0xEm27Xyg5cBjvs//XlEqewe9t9GSk/aI716TZ99vt5FzrvyuoeOw3r18OZwsb7cH2W3i64h8qSzQdUv/+JbCkhZR5SnV5dzhZVraiqOEIlTqkJKAayE//Df/w9Sgi9/8av/+P/2n/7yy58jskM0E61JEFWgluK8k1JOxyIzdo4Qs1RiFtGAXsWImMHllH1c/PudIy5oU567ZlVzXmbEjY9zySUNzBRDCOjVVfTcxGYapoXozMyqCoTOe1JABO8CmCJC9JE3Ps25DX4cs2NGQ+epzrXmYggI1Lguzb0pmoiPDYIvNVsljrxc/U01uJjmCsirdlPm5InJoOS8DDtTSUgqUlMqhIZAec79/nauc56maTwEz+v1BZOazJJks2rBIE3l4XDYD8dS0RH2452itN3VNC26Lfa+ISAykGJMMXgrJVcTMM/EsQk+xqnvPcJ61TVtCDE2bdM0irtNbJopzdG5cZraJjx5stldrR5e3a5dm6bStd3u4qIK6HBkgUDomavKatMNw/CTH3xysY39z1+b1JzFDNM8S5XL1Qah90ih8YoQu9Vw7Pth7Fbt67sDokDRbrPqDxOQ6/tD03Srri1kFZYguVJzjqvWN75KXe+eDsebeRwJSWquMjN55xpiO47jdrVzDtgRmJoIsg++220RQJbplNaSzRCM2A3DfHG5Q+dCE7PAZ5+83B/uri+uxPzb/e2Pdp85XM1T8mFlte6n40WzIcdEeHm5G/vBAENw5MkHdjFIyo13Ahica8JqFiAITaApz6vVOg2zadls4zwMTAEwFNHtpp3bNvV9nmdAF5qGHJUsUgUel8GZ9rPkJz2KKM6txMLGIDjFigGcD/cn8YDpaQhxHhwj0hks+tCdnDg+5/H3GQ6y5VXtrBI7adnO3KPznNkATuSgD/ZJpyH3h27/1FMhgYGBntPQTq2Nnu6vp57LTnQUBKyiOeUxp/v94e5+X8UoxJtvXgXPp5mLwmp1OUwzMzexLblcPb0axrxddTc3fT8O8TLe398SAai161ZK0mlOU4pdOAz9NOSuCXHVOXI1QPUOdff0xadf3399Z8nvdfVs0wT/7bd3FG23ae+m22a1vthd3Pz6SxhTNn/oBwezX3WrS7dqO2Qo89C2TbvbkFZN2Vk0BbHcrbtaITaXabwXAXQxHfts6JpWwA59rwA5WVUzYjWpKkUQnWe/ntJcTSsis5uzoGIu5TiOl7sntLLDlDZPP615zqkf5wlBGU+z4iEnqbXbxer9XGoux5TuxyKQjNl3G37SrW5fvzsch4fDrOyS9S8uGshWTMd3fQjBt+3FxdPjPHzz5c/nMf/wp3+n7ToVAoXgPTHbEhR16nFPksnlV25mhKBq5xCmpWz1o7+cq9nATE6aso/thh9b+rN255EGcrr9rLBZKh7hESA6FSJ+cJY5LZpTEu0ZAXi81VTRlImKYkpCHquUN2/fvH7zyz/+0//er0Jw8Hb/mgifPnn+5v37++mgVquAJ1RTAzU0PVnJLs6ishiswunnWvhQAoS1CDOSAyWNMaAZg658IDOtdbVamSibPXt29e5+jLt503RqrlQUSZG5GptQ6o+X3dY7N0hyYYWoHsgbdJsnN8MrD40jYmdq0A/Thczdivqxdut1cDqW1K0vcxF2rcjInkJoprEisWPvWAysanKhw4qAyMbMpJJAEvv1OA5t0x7vUtu0zqFVZDHRQq135sk4OJ6GQ7MKzWqTJYe22ayefffNt+1qrXnywedcuUjsoslMLoKid5xlRKT15dW7N79ggOAis+YyrTZbmUTSlI9v/MUnNQUXWnKtzLPHAcCjdwZ4doQ5A+ynGkGwBeNYsoDPWLl9dJ/HfvRcuPj4QDjBJx/JzE5bHZ6creAccf+BC/eh3vAMEemJt4mPdzijnh8V4RmQP93xnIZpZnoKedKTiMwIcXFsOaE5cGbU4UmOvIgV/ifyzBMWdvoPaeFViOJiygq4+FAvPfyyHsV0eeeE+MhOAkADXfggugTSIx+n/Pb9vpZpnFMSo+BDCKsYN6uw6byPTskruNMlQ8RUHBHBAqQIngRHCMAKhC6cPfJPXa4hGaEBqTEs0CuRD75pSkyBx8xIAWnl/M7XlAQkmzkBZFpyQCGpFApKPsQtx60Aq1THAVSqSslVBHItRM47jMGvV7ZmC1ib4N3ygoDh5KQNPjTWrMy36BtwDYeWfaPsQVWkPnq1EZIttjmgAIIGxE6tMjkEVXALXKSS0ayagQE7j44RAT2gZ2ASs1Ll/jAcJYAPjhiRnQ/RNQwrWG+pVM3ZFTGqITASoXoR5IAUgnM+5VnyGLQ0hI3DdXTBuyYG5x0tDChmJCeA1cyQVMSUiJCAFh8sNHGOmXnBLo3ImVQwc9TnPE1pHEcV9TGw46XcHHsgAjNiZO+Y3HJ4YMSTNOe0ImSR1qvYUmSgxsQqWf8nXkW60NIQ5WQ9hKYKS0gNgIieBAtmDMt1fzlbIICpKCyr1RTwEdNFA12CZAHQFNgFQkN0HgDqzL5tXAQtiFxrYs9tWB9uH0zq5mIjUlGd8+1s1LZNIUNVK/M8jQsu5ZoAVoZ5D1Lycikkp2q5zAQBiefpiGiAYRxGSzNqYh+qWgDnvD3cvG1cULP1xW6aD1JltbqqkEPbhlbv7t5ECs8/vb7rDw2CAjbd7vXrV01cK2I/TDGQKqzbdSrjWBMjIjKx37bNXNOQctddBdcc9gcyq2YpzdeXF2icJTfR9/1I1VLJTePUZM6TY1qvuzkNaJSGhCzbFSWrm3abc+q6lYoa0qEf7+4OXdtlKV3TuJb29w95VkACAiCzOyXvmD1XyVXidoXM0zSlfto/9LFhZvaNBxgcyNAX6d34/sY7mqsMpfSlRyIOdDwMwYU2tmnsGWzVxikVcq0IVEli++AAyQyha5o8jA5Jrd4fD269jcirLlqth34y83Otqdbtyt/cH9HRdtNER9HTvp8BidhDqSLChEQOcAGMkN2iNcXzyBPNlMipVa1ywigBiJyqnJB61UXHKWZkACAEQAgMKECLMF4MyEBFl4GDfZhiAABsL7Z3b28udmsl7cF/9fZdqTblORCO/f7WNE9HR6pa397v29Bunz0Z59HypKJy7C+uPjHTYej3x+Nqt5Jh/5OrDTA8TGNOfe2LCJbCu0+fjv00pCpWp2kMKujdYvYhsS9FGyI1lJIFKoDVaTJynoPJCJpEelU1ZCSGkkEOhtmtNm61keW8x96Q0jCwI2StU181S5qci0DRrZ4oRVHCUpnVakYUQjSZiNg1wYhMAXwDgMCBgiEkRTYkESfmiYwtgY2WD1oLr54CbamkvB/Qc2yuDn0fu2tF8z7WcbDoGX2I4fCQnPdZ1AT9+qoID7PWmXM6prGAldh67uD9dzfAgL6JcTcPPaLMJfU6IVAqMANA0QacVcGwygBplpzy+qG/3PbteuUdN5dXN9N8/eTqeG9/9O/9++vn3zvcD6snW3NlfhgP9x0CJik6ltXlWuf8cHfc7DZdE7rLVkCkB08wDaLVXz199ud//Vc/+de/t27X1cy8e/Ptq/gbn//o738/pcy7+vqbu03Y2tS/+5u//MG/9g9/63vf36zo1+plSmlsasoxRg8cY1istaSKrNv4u7+d3++5j+GL67C9pga+/C9+/eOf/F7/7bfNpvPbzaehkZihaa8/2RzSHn13P1StFgK0XeANf/qT5+N02M99QtoWBSNiZEYkElUjTHNRRad0/374z/7z//r3fvrpdJOeP3n57evXdzf777/8zALV1FueggeQWTQ9nsRqToCQS1qtdkSN1jJnEQTvQzUF0RDDNM6IKKZLXql3UazWIo5AitQqEbGUknJBJgRIRURlEVpP84DM67DLKYGpCADBOM0p5dVmDYhiOclEgaMP9/eHZBrYNU1AojTlw7D33hUVQPQMphabJQpQgH30EcBKmVMdovNoznnPiCkrsyslsfebuJumGX1chXa/fzcceqsFDLuuiQ66JqKzaf8w9oMnQHJzzWO2fT+/uznkDKvVBqWmPjVtdzzeoxoA9sNoStUs17pbdSnPZxwAas6bdccARLC52EDKbRN3F1tAqFWJuWnbzXrD3jEbdCFK2AZ/fHg4jkN0PlfptlsfopUsVadh2uw27AHUcrUQ2pTGi9X1F8/h7bt3hcruausYhyE/HIdSTFN5+Xw71TkgSohpSuxCqVZMHcJhnNWAEbu2G9OYpsl738YOCGrOUub9vl+tL8ahbC82jg6oEzpy3pvWtl1N46xS15cv1u1lnt9cXF2Nw1iLVDAopY2d1ASqc5qEqGtDyTn6UKYk09z4NqAjw5IrU+AKq65tnM9jwmJd141jatt1Fw0McslFrYtdrX0/TduLzgEtQXhIKCmZ0aZrYxtqzVkltsGFYKGt83s06TZx6G9K8V304zwfD4PzEVuhGMd+iG07DfN6var1cRV8aBNO/1AzQjMl+NAbmFZi5sUixQCXFCp7vLZ83Hh8wG/spERA+9DfI/6tbv+kV7BTh/PhGR7n6ktpLfeiEwlpcZj80DCdWvfHgfZyGFvI4yd3bTu7ZuMi3bAz2lRrAVCRfOz379/f3O8H1zQwHbquA6sG6hw8f/lSBMzg+tlz5+PXv/y627Rf/OD517/41TymH37xxVfffo2e18Ex4ZyTgjrvgoKau3mY/t4f/u5f/umfAsGqa7765c3lk6s1t3/zq1e5HDbPnkZsq42zVHT0/e9/OjzcXT5/Imrv3r/fXV7zKg/qr3dPrNwlHQvkV6/e5pwb4vXljmqMjsEHAqc5ewNQ0FLfP3xXaip10mpSZari/BqpiR1MUzaruZqADLNM2bJRyXKv/ajKzhp2RBxjN6fMnp89fTZmzEmr39yrwzJrrp5h3XTTdFzoNCJaUEuqh16e7LbH+wcOcRMvpjxN8/2h728f7kyUVv6HL56+fnuXC3z1fn/9bPf5Zy8un24P/ZRmC138yfeePdzfjvc3v/7Zn62unj97+X0wEhNPp5HxuSs+V8iiMjtRFR51O+dIcTuzzU4EuL/tY2QncsjHbfejeA1PLjLn/51vW255JNPpGWlVOCV+I8Bj7w2wCH/krFtY6E3LuFBFlT2mPL5+881xOv7L//GfP/R7Su79u7vtarvZbL7+7rv9eEhTYofeOTCbprQ8DRE5clKlakUwJibEUgoTN8Ev9e4IQSE03rMDA0VYRT9P/YpDzsU8+hiQ2Bw0q3b8+jD3A5td7OJU8pNtW4bxMNXQrLjm9w93n/7G7/bHdxWEzJiQTSIKQS3GplRqbTYeEfsph7bZXb8o2DTMTCZFvIuOMXpXp0PYruJqlXKdSkJmVRUxRwvZS0AV0IhRSkYuiGjMxZQlY/BVpRQBp6X2Rg6Zi4JrWiDC4Jz3+/5hfbVbbYeUEoC2TQdWU52AGmZX55nZx7ap81TrEBxvNrs8DaWIY59S5abG0MzDcNjv2cXQXuo8hfUauSvz4Bo0F4HaR0XWGdBeqovgkWvzCDXCI/J42sbwca+yU6U+Vt1ZDPYBijkz0s5WQY+I0PIMcLZgN7SP6tceX+3DY+zxMXDWpsGJ0fNR7YPhYhv/IeENFq7XqT2hk6qYgMQUF+KHnUGtEzCrZyLThxsW8c2JhARkYB4JiZndgkCZKp0/qoVggouPNi4RloslqxEbM6Zab+73VSp5JvbrJm6D3/h40XZd1/rQAQU1qmJVACkQC0ghMKkzGlQzABCpzG7p35dPycAtnzMZogqAEDECiRoie4/O5xglxpBzrrl2ntfMB6hzyYW8a5qmbVsHBFYUkDySJ9c4QquTIamagStVUkkA4AK1TVx1vg18HfXawQV5RgNFMSNEYiLnkRlCozFyaIEDOk8+kAvkvEplBFjyEE8fN8N52rIUBxGfpYGsKmhGiNVscekhJvbBCKUohYZ8ZzYTk/c+z8WBp2gheCBy3iMxNE0DYDmZyBLvGzyzcUqiaIqgYqXUMjy0LJuOOyZGCN65ENh5IjZcOA1QdTHnNwBCUzM8oUhIgR0SeseIhloQrICNczXN4zD0x1HVmrZx3nkiMOCzbxY7R+yA0EBAlZnBqi059UQAKLUs+6dUVYAQPDELkqjiedM+Q0Wiogu+czrKiC2e1kBnup6qIaCqgCkh29LJm4LqaaZhhgaEaMQKwuQWE3UixMVJ2CojMRp4h6BF0FRrmRxQG/n25hsQ3zZtUSklrUJUshg7YMSaMA8gqfG1EopgmnuViuAMvWpFIJUCWshM8jRVNIOcZ4PqeaUUcqlAgOxUcer72K4QODifCgM0TDr1Yyrj9DArWOdp6vveUq11LMAIs+n24ooQh1o8+mkWrcqsx5x225VnvLMxzxWs+BinA+Zs4ziI4ZwzkhfgOWtgV5J4diWVrg2k4CI6oLmfKpBWBckx+mdPr9PQbzerMR/bGObj/vbt6+dXtGtXd3lYb9cgpYr1D8mqUhVAZsexCfvDiMQccdVqOvY6w7dfvWnWm5ef/3YebxhQLE9H8d7nnMcurnadlswAIbihZCiSxgJIm22D1UDh+nKbEkoZRJWACS1JASQwQTV2TlXmadptN8f9DEJNaEtfyCsidT5oSYaK0c1T4tFiE4vCcTi+ePZiHMvdfhLFYgJIViswCwgCLnSx5fCiYEWFAE5Zvo+7rCHiYsMJCiAiDkmX2YUpGCqaA1pMExVVwUwRCM1M5NSyEoGJsGM+qzGrare5FCOAit4bBSCOvll3sZdSxnu2uu5aIT/WsJ+MyBdYr9ersj9MvayudquVTfv5avfD4q2NOSLM/aDYueDHAnkoUisM03iY3r+fXUOIhgY+eEMzhilXm8cQ12VO1VKVRKhGCx0SDVUlm2YthZo1KkgqahXAs99CWGFVY8feg2VHo9Us0yw5g3lVZ7FljlZVdUQTxoCFtAIHMrGiznUr00jkgNHQg85IS4IAilsZR8OA4kn2OL6v/Wv0a25fWthp3+vDd4yFunYayG9fuNCgZpsPOt6BVGw3Yi1H59quCWQlIcr0cHPcz46dWg2RXev3d2/v930uOg6Siknh4Pzl1fYAx2EqtcI0icWWXHczHlQCO0oKpnIVwuHbXuz2p7+9sZLGfvJPP2uff/8n/+gPD9N8/+bPQtMN371tzG+fX998+U3w8Ye/+0V/TB4gFVs37crb/mEYxofDw93VxW+2G/ezL7/58U8///R3fg+/PH5yub7re7ddYaRnP/j0MA4PN28hQfp1+sFnPwyi3K7/wb/2jw5fjfZyfvvt3fd+8gNF1w/HdtsRmiSoRUjANV4VH+73LrjN9vL4L/45j6/b//m/Gz7dbv6978Gd0Dw+PCj+ya/dlYR1KHh4cz9efHrpUOM67u9HTJjrmDOYyvWLK+8wFwGDQpqzpVQoRA7sPIlKTrlKWa/tP/oP//Hd3Zs/+Ld/9LOffxneweV66wCCb4fDQ9Nsm9bt714tRvsAYAo1V+99GzpQNaSqpW1XVYsLDsxKnVQNF7330j2XosRg5skxGaGGEKKPVmZCRKVxmiFajA2YEnllyprnafJEzvuaMzsuRZvYajUEbVxIQ1ZDWmHXtMzMBCZiUpk5+jY2K0zHOU+HqWeCjb9QrVoVCeaclHC320aGeZxrKUTkQyuamJlcLFXBwDGPczIRR/zi+VOVPA2TMZhnMz+n0rTBu3Y4HB/68dAP72/7nOY8m3ctYL3p71ftqp+n0JEjZMexcSWrqqGaYg2RIS/ANoQQY2wYpGviIU1d21ysO+9YwFabBhQiq4PcRTflSuA+e/k9qsdUSvTu/vYhKa6wLcWmYWBHYCXEMM5T261UtVtH9OKm8qMvrhoPX2ZiFINSRc3EN00/pX6aiWCaEzter9usHhwxeqzFimzW3XcP49MXP35z/wvNUqZa+15Rt9cXkHjoj5tVd3XZDem4uXpZlFUmqgJAZsqMSqTIQLUKhNi2LU04mGIuOQYWhNA27Lnvx1QNkU9ejA6Dc86Ft+/eP39+yY2fpbZOWhesGhEVNdd0VYQseB9ynhB0020OvAeVdRu3K1qteiAiH9n7nEpoGyJTsFLr7no3zfPlxfZ4+9aq1GKbzVXqZ+84Nl5VmyZIKUyOAfM4mUKqGNxZgHbuhk+SLqKFP31qUgAQT0Gxp/Z6MUj90OAgnoEgAINzxtTj1wcXIXxs3T9WP9gpbu1M6oBTA4Xn+J0Tk+g8Yf7AFlme1Jbk2ceG7TxDf+yNHkUgeBJlPPZZgICiamq5ln6Y3r+7eegP3755s9ls2uhccKoARJury6ury2nO26ur4X1/Px98jJvV9ubu8PbuYchQEjl0sQnTYVhO7jmLR9ztOgEa+/FXX34zj3rz7s4YLrbNzf3AO4vPdvldM94P7+f7fOG46ieXu34awYV3r94FlItuTWpV5X5//+rNVw7qKnbrdhOB1t1Wa4Is4zhMaG23HvMQQwBLQ+rRPBmh6KrZSdGMiRiGrMc0EVkTIhhDpOM4qgqCBcfDnARcEVS1LIVggv3RAImpCQ2G9pPPPi9GN3fv+uHQIPTTnGthU6niWaLj4J035zk61+RSqM5T/9aqlDyCypwlV5g0zXNeBb/brr99m7/51dvDu/2mDdcvN9vL7ng4PjwUxwGc7+/7fqxNXF9fvwDCmotz3k4iMFx0iGAnY9qlwTtjmoqICopnIePZWgsN7EwwOtfSB7zxsaROZX4q5w/dq52/f/7j/B06l/cZ/DyP5T6I43hRG5gu0cwAhKpVrOZc3r3+bpwO6uWbV7+g4GLgm8Oxa1bvbt7dD3sxZcdqAIQp5/M7MCQ2IJGCRMSel8AjdgaE5FOeqooj75mVHTetSQ2eV6tGNbXc2JIZxOFwPP7mjz6zSBgJDMs0uc06TZO7vhjm/s3hnnX2kX/+q69e3v120z6xuifyoFq1r6BZJ+cu2UKGGEOgmso0ReTVbgchso9NqyKFfFROMbphfFhvO/aBwDWxKTmXkjyvQI3BQNSqeOcUseZKOHhigOD9qszHJjQ5HdRqsGB58purKc3btnXmCA2kUggxrKqE1fpK5nmaD6Wg81utyXI2XwmN0YEpmYzTrM2Gwm68ffAu40pSyX6GuFqv10/u7t7397fXsZM8SFozt4hoZXY+KTpEZ8bLzgigi8UomC4bKKqeiulDSZ3LyeBEcFjSABbv5zNeaYtY6Gyo9sHm+iO8/BELOvHn7MO38KPK/WhrP4NJ8Agy4ek57RQc8JExEp5MThca5mL1BYuHD+pJa/kIAtGjdgwB1fQEUCCdmHQfJbgRoegS2rNImnnJb0VmBCMkhUU3erJkOjtnL0or46W3NkNERSuwuEc0sW09w66LV118+WTz7Ol2e7nxbcfBA7FKVfBqBAKgZlpMzEyklvOYohowEgMIABB6ADAlMAAyNQFadhwDJgMIMfoMsS0iUaW2OV+07iHnSbTUyoZNiNQ1LdnKUMFZEUAzqIyKyHMSA1fEgNmHpm1D2/Cuw4tAT326pLAKDFXUMBVFESQjJg7R2PvYgI++bV2MPgTHqERkZIpmyweOtICtthAw1VQZnUlhdqqGAGSkVtFk0T8piOMIzKQVTTxzt9kYgHi3OZb3w+A4tjFwQNGFLUeMDSOgc6qC7A00MKBoDGWa85iGuQyg80bHzkHHIXhsm+i9BzJiR9wwh2WXllwUsyoyMxMHIu+InTGjZyAEJGFQg5rynKfpcOjvhnwcU63KzjvvgvcLRsTslwBAJiRSWDK81ES1ViNSVRG1WrXKUgGyZEfOuQIAMy2xcEutfsQqAjAzVQEDBVs4GssaI6RlSnYqpBO+KScm8ymn09AUANXUdFnXBACqlZDMVMFAQbUKSvBILpSUU64eabPe3t59pWbdehODT2X0rs0GgRtiZzLD3Hu2oiVNCYklZzCRUoGY2ZVaPLmcZq0CQmjAEJCisU4ps1bHxE2by+QcMrMhcIzoMVerOZOCj22MTANP/aRWD2M2QWr8brNK8yilqmqpdto0al6tNsMox5TZdcc+s0MfAngSBXTQds2Y8zRk10RgRIctNWZa8rTqQq2lW6+mNFUFUPAeQxsQMbTt3GsDOB/vG8fTcFSt97cPbROocVrk7v2ARDIXLTL2ubJrY4Q8c/BZytRTzSJVh707Yq05N80qcgMHvP3VV5ut95FInXdccg4Q92+PeUq77Wqch5waqbjrLo91Hufhbr6/vL7GBlMa1itfM+dEpVTn2OZca2ZwWitZEFBAwZBW6444j6WWlJ7u1lNSBdtddHfHWVSaGMFExdJQLNLxmDabZtVgGUoImA0BHMBilE4GQM6LJiYCJEYyNUI20FwLnRXzhGQo50MRwuLvSbRQ3TyxmrqT6zvQKWxORNU5NgVU48BqRmD1LDqYp+TAC8rqcnX/MD7cHZsYPMM0F44RbQpx1a5Xt/fv59Rvr6/vxrfr7hpQ2MOm2w5pr7U41CzH66snzuJ4nERcYm+sa+6GeVSj/l4DcIEMqW6ClyrTNIDUnIbYBjNNkhvu8lhW293cP7imYwDJ2cpIKEiMrjOKCAmwGhh1F0aNCIoIqkBJlve1DFLzdDwQt6G55Lgh13HozACo+BgrkEGg1aYIAHnApnqvwOw9gqAWgFnnySyYb8A8qjk74ny06Y2VETGqvwByBInw6FrIOUzZiV/7rrVyLP29piOQBd9mpWrg244Z5uPtPB5TGvNkPl7E2Bz2k0c8jIdxHuecctUhi0JYXTw9ptd9Sq7pGrNxym0Hx5yzJWSvprlqIcrGkmqD/HU/fjrnp9vtk2dX7Re/UWf3y7/4k1zz5ir+wX/wv/j5n91vuH3xm0/v7w7fvX17/cmTecyt2bdfffXnf/oXf//v/xt//qtv/8P/3R9+94s4PEyry+7Hv/0iYp2O85/9D//002f/uLtoxoc0TOnZ801seLUK85vj1Wb3L/6Tf/KDv/d3rn/4xfSQ2q559r21/mp+983Xfndx6Ge/bmuuiE5nIHN5EN/gsydrBna/8Rur3+27ld39f//Z3X8LjUDz4mX3w983Ro7AIP74utbKV8/Qta+/eZ+dbi926t3usjWCkuD2bX/z7btpOv70937QPG3BGTsPYiYGaFqlDVRANpdNiP7NL8t4TLfffffw/tt1dzFYKUIM2h8f+sk8sgvhdDBCjU0UFUBKaeIQuhgJ6mrVTVNCJFNQUjCl0zhY1EyImBnBpjSbGpOBmWqNoRFTckCMXdsN42Ag7IHLsjhBVIrW4JqiIkWDcw8P903o2rYpVUqpIQRA6tar437PxMxUraZ8qCVF57xvqlQVmecUXfR8gojznHNO3jtDMrNxHkUqUkQEx8jEjojZgVrfVzE1JGMuOXsO7AjGuVYdsry5edgfjkVyrTkGblycppSqrruuTLlpQ+tZNFPAEEKtEzILeRUptZqK904BPaIDcABcwRJQR+xDnovzjAIqkhXQsNTEsfPOldqXOipoyTWl1G4u2ujHfry7ve9ijMHXKW82K0BMRS8v1iK52rhmeLHbrH776puvfz2mfa51nKeL7cZ75uDRdDjOsYsOWbHinIGMGFLOl+5yPL6unchkgWm13eQ5jWkcjgMJNO363du3u4uLdrXSOm9W6/3tSMihaaqoaDVFBsllurh6UsbZATlGH+M8p5qziKxCW8Gc4zRPjOidb5oARMM8h1CePL0kghib0AZqnEduXCTCfhpA1LmQUpI6LnT9Y/8+BOuaaEW2rescjUVMwW9aUODIuZS26XKVWvI2MJX95Sbsj1gLHA7DJ0+u3968d8HN4+yoc9zkOYUYCWmaJ61V+NGr6CO+DZKqnEIzT8SNkywMT8NAMzPGkzb/FCB4lq/B2T7m1GucXIHMQD/qmj4Mus+Uj3M7dRabATx2Q2bweIbDvxWzdmrF9ZEbgh+6qvPznPAlOGmHTlapJ7jLTtwlKFL7vr+7u3377v3xOFxfP53G8eb2JqeZmWqtrWvv3/ef//jzh9vjPE7O4eX19W61+erLXxDi05fPybk55zlNWeYXT65vb47aBB/inMv+4UgizrlPv/fpv/rjP21W3Zzn7a7lLlq7Ij/QNF5x/PFvfL9tAuXx7u3bMszX22672vz69au1j5DzOrYXT9Y5jSD2+t3NbNWHGAi3LbFjz4S1boKrlkLwhA1BQxhSqVmtwDzmQY2cxw0HOR8mshZy1LUBc0FP1TswV4ZqCElkTAUBtUjXRUWQWh5uX6dpHo57KLmigdk458DEQI4xerKUxKkWfbh/v+68oyYlCeyP/YhEV6vt7b5XA6s251J1v25cw13K8v7dcH8/rleNao1tu77Y+TU+ffkyNqv9zWsA3FxcMj/2n8thW+BEZNNlnGZ2Cp85e7vgUhcfSBMnmPAMLC519VhRCxB5Qnkem3xDxJMH6gJFnYFGMwNQXRKaYBF2nOoVAc58k6V6F3MKRYRqVclMVNVEpdQidb69ffXlV3/SraMYpqGUuXYxpmnop0OeswtOF+qGFNVqp4BCInJqoqqOGNSQqdaCSEtbWKWoKJA5z2SmtXgCRi6CrWt3q+tazZEEH2saIlAuslq1tebONcOULzabh4fh4WH0zkIwH9zls5f/zZ/8j//mT35QZo0NmGKIW2Xz7VYLZhHyMfhIbCmrVFm1zd047FarEDb392+7hqsUIBqnKeW62rVzmQ0oNmsFFBCtOYQ1IMhJviq1lsCK5KXYbrN9/fCd63aBw5JPobVSyQEdiZBhzbNv1ySmc4nb9VBuunXHAd/f3j65/NwqTnUfGxfbployocXuoJRqxWKz6g93U54ChWnoCXC1ulq1V/v+jT8+bJ+0UgtxywygSepALgBEO5UHnEPOHhGdR2bk4552Yr+dDh1nHeOpID8W0OIZDP9oMzs/mSEAEH649wce0SOw+Yh1Pj7g/LRn+tzZEAnPGL6d6UqPz7D4xKiZEhJ8GAUsx5gPFKOzQOzE33sEoh5vflQ1L2g/LUuFkInYO8e8TMGZCJEQjYzsZMx6EociLFjNCQJDQJOqVdyKvQ/e+c7jtnGX0b3YrV88udjtNrHtIARBM4AKUJTE6JRZXoqIEYiaICGaEZCaMSmRAqCqELIZmigQGiJhAhEiZAhAjpCCd20TVKpKnUvZqF5mN40ymalYVihAJNq4xmMs0qtpkYE5FEFAAtMQA/sWiaP3ji06bojXAVvPDoG8pVwJ1BTUlAwB2DnHzmHwoYk+eHboPMtipwRqBGimWsB48fs5Ex/NTJacPiA0kaqCcEpvRwQmR0Coykjee0ByPjB5c+5F1kMipBAYHWFFYnMJiNkH7wAqOWfEpSQCMcieTKDUMrJV1nLhbeu5IQ3OkyPy6EJE9saoZipitSghGgek6KgJ7BC9B+eJUBkFTVW1TOOU693D8aGf7w/TkMVCDDF474nJMcNCuIATMQ3AxKCqAVDNYnJyAlOpBljFZEEw1RBNVJ0pESIwqJ2vCx95FemZOIqAixf7AsGR4aJmP68NAwWkhY+2jNPOavfTqQYRAVRVZFl5p7AP01ormDLoXMxsKmVYde06djd339R5buMaCadSwDwgdasWVaDOUPpIJmWap1ELAEIq/Sp2IprSnnyQqtUyaGVHVUrJWUwJjRDWTUDBnLOhVUmll8iEKnfv32yev2jXG2Zm1P1+BvKAGFZts2n6/tg/DIc5ZTWplcCAQkqZFMAjArWhkVqO48B+1Tbd/W3fdXG1bd7dHCSlKsaxieCZcRYap0SI7EPKk2NnxqWIJAEBLTBL3nbtnNIwlpVr98eH3SoKyAz1omtUNFuNyH1f5zG5SCp9iM4xMZLjtXCeSg2easlEbKgoLrqIglZh8/QCRCPitB9L5Hmam/UWQ/TOM3ibscepW3fznEsRlVumgKgEeOz33ociGaF2HacixD5N2SGTowqaa63IbeDFFKpioY7gKKR4GHNsglFtNq7uZ5UYfJAk4yBtE/JUbvX+kx98evVkd+xfizCTKyBk6DnIcsZd5ISguLhiISkuNpygBieKMi27rxI51apaid0S0ilqgIoAYksGhhHR6fGn5bFciRbZpyItVsCQazr0dy9ePFMCkTwrrrfPUn8zjn1Au949HftUFDabFm208T7dv9r6YB7cyr++fddungYjtcJci8zjWKjb+hjQIOVkvvqdl5Egshl0q/A8XuSHXgqQRM3M3OWqDAZQp7n3IdSsTIiRwcjmfnG7kCTgG/aNDb2pAgdqt6IOTB2o1gFhrvOhv3sPzIRBjatCQNKSBFQNXROrmrnWcGXWkk6MpvVAoEggU0aqVsWIqX2OFBEKlHsoB017yxl9A+1z4At0ESVh7ZlKAZNwyb71jDgey+EtpKq5tNcXwivI1jbRWz2+++bh7bsiSE0TwoZD0w/33lmR6dXb14f9UcDnudzNJcmwcjE5f6xTFRrzeJgnNSuA/TQrOlSdk1BgqZI9Dknh/fTi29vu4spSbXMd3n9rAL//D//uu5v5L//LLw/3uv3+D//Jf/I//PR3fyNqKPf9v/hn/+X/6n/zvyRw//Y//rdW4TLq3ZuvpvdfP7x/0Lh9Eeby3Xdvr549+Y/+9//rt9+9/+TZ2lW78D6JTrfT8836F3/238Luh7/7D/9Rvbh62KdhvHm1f/fqf1jvtl1BvHyywnXL7CjilGW7CmAwT2ZiBjbuezZ/HFI3jZfX37/68W/f/9N/BQ8PD//i/43/4Lfl8xc+wXf/v//6+z/+LWAoT/PFJmrXNFfdONe5KAdSgvXFytVPsqQ/+Yuf/eE//NcIEQNNc/HMINYf8mbjpPLbt8O//OM//f7lk/030+ef/eThqIf7Q+fjZtP1AMkObbvu798DnaQ3q27V96NoDTGsd7tjPyC3UOc8z24BU8gTc5GSa1bTJrSq8phfqoqOAoBM0+CdL0mK5rZrci5393chBBMoOQfvfQglJ1Nk5HEYfQjAOqd8eXGVp4kYSTFlrXVmdJ5AIIN4gOCdL5KbVTONyXJVMXKwWndzPxtIqTXE1jGyC2DKTPM4S9WmjaYgtarWytqEGJiqlfV6k3LRWttA29WqViCo4WrzJk3zXRKxqsU52qzaUnKpRbReba6O/QhRN42/2u7evn9fZh2SEXEVEBElrgqND0kEkbxzVSQ0ATE2QT05q6XpPIiWXNgxMuesxerT1VZTOtzvAZOijdMw6/jZs8+qDvuHGzaax9S0rYB5F6SK1mQlM3gQBinr4HdPnrx/9/b+9r33gURzUZFasnhGM0N245SmVNiRSE0FRindcexiNwyvf/qDH/3V3/y5ipFzu9XVNE6a6zSOwYfjw549+HbTtC3o9jj2ZH6eegBkpjoNWNzMZIirbmuJEbQNVksuavM8N6uWeAKtSJhzBYfR++gYSYMjM2ubBpxv/Grs920XQ6BtFxmoatpG109vV90zlTI83BDHcRibQHW2rnGHUQP7i/UqsVy/vPryq1+3znu3nK/gyZPrNAyC8fZhXxX3w9g0XVZRKymnbnvZH29A0mKnq6J5zo8Nwklzs+SBIRKSgp5YrSbnuZmCEcApuxgAzok8H3qNs4sQGC7C50V8QHAGZU75P39r1H5uJ07AFCwmEggfNTrL2e4DY+jxjZ9f+SywPo/QT+CXnX+uj7qxj5okABVV01zq4XjcH/qp1nku6Pn9+/dVKrFzwZHAbnXZtfHu3X44HrbrzTBOV5ebPE1qmRm9w5u7G5Aa2Pn1Nrbts2cuq719c+M8XVxshofBN7sYENGIbTjOZT54gr+6u7+O4V//8U8aD3ev3hyPU8P16Xp9cbEC6A/pcPn0aYNMNb+/vz0WrnmgpIRu3azabsWGzuFqFaY0HY77qtSt1oQE6Mc5qSZCLhmA/Hbz3DktZXh397Cf63AYUdvRAIAIo5jNSZyLHXFPekhlzmXxr3WRFXQcRwCe87yUgYGMYwZGVTCBGBiJyIw9j2pPuy5XRdcUAYvumHNom3lK4zygSs11LLJdNWWYgmNEjl3HLXqzkvrL67af6vEwNOgeDsPTza7xUbUSGUI9cXMUlkP5gkjaoh5ZmBDLr3nxyzUAEDuxLE58IzxHkz9WyUeF+Nidnu71cTv+t+58Rh9twT+XrMATCe/UFp9ULY/6ylOXgQAES+bB0lgmu3m///rbb1wTmi6u2g5zVpHYhFJLKtWFMM9Ja2HCLEVqRQRVA0A+MZoIAEClmKiYc+idM8tqsuRNIyIjzf3c7tYlJ88Uo9vrnoKG4JuGcw1dt9q/f3DIq+j7Ydhdv8RmC41/9fDz33j+cp7G4zzF7eZXv/55+uEXXXdRoeRct6unw/A+Oi8ANZfL6yeeOeWRfAyrNXH0SGaVCFbrNSGx94gkgv1hCG3Ttu3Qj+vNbrXeptqrVEQihgpzLej9/5+t/3yyJMvuA8EjrnDxRERkpCzR1d1oAN0ASACcIYdySAyHu8MZ467th7X9T1fYkjtcKiyGxBiaQAONBrqrukSqEE+5uOKcsx/8RWQ1Z9LKMisj3nP3l3H8+j2/8xPRM1uewWku4GMXm4tp2Ie4Gkt1YFqSluwCp3nm0ImMaIpo5JCCc02rBOjZe181udic9rdSgJw3TSISu+eRKecZgdr1s3nKp7uvJYbVujvc3AE3bb+N+ZgPN9IyNmyhM/aoBmVEjoABqLFHMiY8qlfw0UoCHogGC/6Cj2X2LWgHv4UT4YfF+Iwx4iN0/l9kpT2q1R40mB8Q9seX4cO6/ghT2Rk5fbiW/81y+sBUOhfwGYJ/OM3DJzB7BI/sEdZ60KA9iLkeAKbHEcID7xNNgejRbs7MTAEVBBGZSU3FTJaEtGU8cR6Cg4gyERKTd8AOfGhW/ap1Vyv/fLN6uu0vLnoXyDknhmaouVYxA1ouUKSKai7GaEiOdNFlEKIDEFNTVUfuMSH9g1vfoo02IFMm7KIHCSgipa77Ple77rRIvZ0l5ykxTsERObSSIYNkNTTTVEVNibhpVugbdQ2BlCxkih6ZPCiKGnnviCCwSFJCqaZF0KkjdqFxbcueXfALrIOOUY3Uac0ACEaLdTijqdQzw8wqAZsKACmoIaraEsYFsMigkJkMoW1bMgK1VdfpLZ324/P1sRRzDkpNkUOIrUcGQkJB57BpjZ0Xr/Ocp9mmMdbBQzIwT3DhXB9ddBhCZM/kiHzg4JEZHKNjYGJHsXE+xhBj45mRmBDApEoqJacyz+k4DPsx3x6nYUrVkL13zhMzEi3xkiIqaICkVbTWBSMragi0RD+Z6RJ7dl4MiQkQ0MjRUk3EBAbOMz10x+7xZhAx1TOoc2ZtmD7An/QgrQdAVFQz1TNh+gFCMgPAKgpmDAvLFVTlbHatlcgtqn4rqaTZbOpaulyFw/2NltqvtuyCGZViwhzbhghRMupkJRXSnLIUi64/TSOiH+YklonIKjjwKU2OQE1FlV1blECr6Oy915TZNQAU46XzIQ07NLu+/rSIS3cjM7gInY8yzJhJah0tATh2HmfLk+Qk/dp77xlziHEoMzp/c79DoODj+92wueiyYDnK7em4uVgByOt3d21sjOg0FzDWQuyoYa++Ki4WAOCjIyNmZvLjlKwqB3CM/bpvGu4aPo1jJWtC3B2HCaXzwXWhaVwVQlRFy3U83Y2asxpvV23bxGE8xdj07Wo83XcrN1V5e3PrkVbsu6sn/bajOY8qVXW8e7eN8XiaaCbRyg7JoYAO03EZK4LgVLP3jUdXREseHXlqAkKaclmM0FS1VgCkomKzNjE+udrs96dpqtWgbdB5brt4cyjVo0dCslRzH8NU5/1x7Nbduu9vd6MCcPA1JcKzt5VoZTB2aCpLlduS0ghgy4dfrL8WmqvK8i4m0gcLRxUhIiYWESRUg6oqqgYosjhX8hKtGsz58Cih1+jdMA5zkjzPs8kwNxfr9Ty9S2k/zJwRbm4OW8eofeMvP3r+EXhWOQ7jBBDAmlXviuwquGRlEiAjalzZj9XmLbUZ6IS1NXO5uuCV4f6UutCQp9DGkpWdR+PTaVh3bZoG9v7py1eAiow1lbi5qMNRldg3KkrIhsTNRYVGRDwg5pTu3o41i2VVhwDcRMOILhg5sFrLDK4TiCoOworIk044f13nW5WT857jmiwItdReAzVaktP3kO5Qk4kCBfStxSfmOgAAnbQcsQyllCqBmy3UvdMx3X8NAtx0oX9WQpMqoiMo8/715/v7W9+toLim35yO8+5wH6I7jYebm29KFYamIqmzm/2NqLuTUwzOha6y6WFiH9kBSo1tmDMS+qbTomZpTjVn432BX74//TCEy+urebobTod+s/rilz9P2m0//exHf+P76Vie7K6+erP/6q/+82/88Ed/5x//vcL46uMnN3e3f/pnP/313/qR2fid33rx3dhMY+n6zkEcj3sC8I7ZcNztr15cFM2+ccdx+uHf+0d3f/mmvXLenchJQ+Ef/IO/td8JscxjEkkll/tUuzb4EJCNCCkDMCgSNi2Qf/FP/u7xP/yr9a9dzZ/S7rMnl+H5BU3wW5+WTZBRv/P3/5vTF3dvvvzi6d/8yBHPmnIDPpCIWcZu09AKu7YBiFef/NBHmEWLWuyjU9AK68sGwRjk6snqb//tH37nef/X33z9y/c759evXnVlOIzD6TQOjmg83s7jcX25WJjDNGe0RYmjjlzjfE2T986IvA/MPueUckaCKoaIRSogTnluQlNrFbBV1+c8RRcVJHKACk2MeZYi4gkUMDQx52KlTHPuutZ5hyreN/e7vaFWc9y0cy4K2PZdnqc0DTFA8I1jOhxG571ZZXaAVERLqZrICQG7qiKAtRQTKiqg4gKwd0AKREjsHddqapRrKbkQgTzsvoDDosMQkVwAkIj4ydVlE8kkT2MZhxzRtdRNx2HlkTr/4vrZN2/ehegc4lC1Cg01q9bWdd5RqVUAq4jzPnpXSp1pIsZiOpbamjOTeRjXq7VzTtC6di1SVPM4DW0Xci2I9uTqOqLd3b473B1W6+00zWHyfrWpyGIpeFJV5mWX2Dp11PD3fvDZ3btvlHBKPE6zVNFZP/rOc1WdpixgvG7ykPTDMLQ6puN4+ObO+dUKTEtVEGAO6EFKRY9VbH8cg7jtpunW2ymXNB89IXsfmkaLIOBpt2v7FfutM6jzSUw4eFZT0zQlRy62QVRzzkZSc2mILi83phYcgWodJwHufEtAVs1j7Nv+3eFtcD6GTcrqCbWaFvFENZfbd7eEToFd8LnWy4s+egoc66xt35ScMDpmakM3pGHV+yJumE6k0DTt1cWT/XEMZV73q9MxL3PhKjV4/7gjeuiC8bElRoBzxoI9KnMeSEbnruHRPviBJfTQWH9gbcC3dWZ2VkycuUO/0rA/vhrPsc9oAI/BOvCQFn0OxHmQr52J3w9kI4OHgTk+MD8e5EVL87Zwxs8ZOAu7yLSUdDoe94fj/XD6/Bdf7U+jqqhKs2qOd6dxPP7ge9/v2mbI6eVHL3Z3TfDN048/IpLX37yZkyA6huoI2qZpo785DO/v59D0P/jB7zTtX7kgucx/9u7Pj6fd6+N+e31ZS3VsAtVHd7l90hj94Y//zEH5ZNvG6J68uLYsX9/dPdm2BGTocp1BUrtdr0Kv9ck2xjlPYxoYKU1pzJolI8GSgTPlkvLgPF5tXt7v36NWAhrnJKBpGqpUR81HT9buOe/2aZdkTKMkIzUHxOSyoSeOHodpdA6JoOTsufFNMIU5F3Yuz5kZjamqOnZMCGbOjEoJngKzKTRdO6V5TtUMpMimaZoYpyToY+s8ZvGNj76tKaVZkAsgzqWK2X5MHOL1sy2HNRqNh8k711x1oqKIJMJIwI8t7jldzB6cWhDJQJcB8GKotYgP8bElX0Q3Z1bZMlx7rJDHOtVzf21wHlafK16/FXC1tPKPBI2lBzYAg0WVA3i+vLOVy0IlEhEVraJaDcfT6Y/+4//yV5//eEy3vvdf/dEXCuo8nqZSTWu1lKVqlVppEWPoeS+39NlFykLRWKgZtqh1jFSl1Axgnjk4n0vtQtv3HQOKWkrT08sn0zQCQNPE02lsmoa9Ox5Pq/V2s2k9c9+FGPDu7v1Xt+9fPf90e/XsJz/7yXe65rOPv/vm7t13Xl1W4WNNLbQ5V7HqPCOx71hrLVJi0wGjksW2TzlZqJuri93d7YouYgjOuVRTySlsO9/EaTr1q7ULa1NMag6XzCMQrYiAjkVKTQU4uHZd8jinAdlVFVVzpmr1bJJLAQGtZEdoWpi9SPbOxW49pplBm66bh/vj/r5bdcShlllNkWAa57Zr+25VpzjLlCqsmm539zU578NKSh0PR67BtZfkLsi1JsVsNpkMA7JTq7SQbfBhlfs2LvQrYIzBo+0JPv7xAa08s9oA/jfvPQOgZ47R48sfzrhU8ocz2QMn6dv1vFQp/srx//fAoiXRCcCMHk7CROelfMnOOhuF6RmBRXhEiJasSXzQhOK3rv18Z5w/v6kpGhIiEwOAqAIiLH2NKj6MCnhxfFRV0yXk3jnn2aORKXp2bWgutqvVKvRtNJOacS9HYoyOGQmBSExEay05pyrVgAGB0REHMHHk1OxBN8eG57Mt/EEDI2IgAkRVA1AEDo6xCVYl5yqC61xzrrUhtbIrUmuZThP5YN4zmqeFiqtSigutj41rWmSe8kAC4Mk3gaR4DoQoYOTYuUDqq1oWqVkNSE05eu5a13XBMy2Obcu/JSEROw4igmhaZyI+j2VM4YyVLOEND1SxWhYauIE54GUSE9vIiB5dTaUUartVCF1EdlwcyYZ9ARWnE2YBVw3MFs5VQGShTIApTdGycxIZ0awN1Dr0joiI2aFzgLgkM3IIGALHxnVN27XsAzlPTKZaRERsntI8zcfDuDueDsdhN+epWlULsemDR0NbxqRmCFKkEBkQm5iZMTGgemIiQnIAqKaOUM2IyOyB/6mGCETogl9C0wiY+L/wKtKHycOy7VgcXx42MYsFsKouVbL8P+Aija6P2yQ5j69UzUAVwXiZL6gwMwBVqaoVTNjBxWobvO13b8qco2/Jh1KFkX3TsGNmZ2XEkogEUKf5lOdM6venAxJ45lpr5MYArahkgcxAVDHnnB0Sks9lZkA1MdEq2TgAOVLxMc5TcjT7rhlPp2rmMPreqaX19rIzGsZDSrOT/PzJxqF7e3OYkh6GIxvdn05PnvTMvDvk1frykIYWIgD3m5WKv9sPx8lcpNXFleOoUJyUOakPDEBjSVlrpGBMVYC9Ny3gUKtZLZuuU9a+wU1sh+G0vyuGuNPZUyUD4ywaD6cp3xf0vOoD5OQZRCCgQ/bjOE+jEmOR+Xh87UCz19VF74FjiDLC3fs7gbm/7BGTFexEHOZ23QxTOh2Sc9RtGgNVySEycTjuEpi22xUFXV1s+hOncQJADhxBDuOkgCaWzYXoGbkaDuMQTdAIVJtIOeWG3Wbb7EdBk9C085DSnAIDIBx3u09ePHnx6uIwzpJNgJaVU0GZyBbUHAEIz0ZxSLXKAtAvpm62BB8ulbnsSKosuDwtNkagFRAXDLTWarocYdFCPqZtmlmtZ5eWaZp98Pf7E7vW+xXXcc7j3TSY0XYTu97fv7vj0EZ3veWLOVmRk4NkopAL1+KkTsPgCJ2jm7vXapdp2GNgqjbXgQaXy4RdnKehjuWyu2ixauMVDbQCQa3JZiMXQyCL7rA75nG6fP6JlmJTDcAgYAAWNxhbmw6mpjU51wJHkgxlqmma5ymV2cVGjVfNOpfkYgixF1ME5tCBW5HvGVDzAcpO08AMrunFvYBma9QAkUOPkmT+BssAmsBIkbHdQriuGNBFSDOWAzD4thEs7Dp0hGko779MdZRq4fIZd52FTU3qMUVNx8//8nBzS77jEKvQ8TgexyNxSDndH+7U+9VqOx2H92/fAMGzvrs/ys00cdi8ux8UmPx1KYd5LiLggxPAVLKxq6BF61gKB1KyQTKjYRmm/fuu7Q63701x8/Tjv/yLnw2z/e7v/vZpGHPL3/u9f3h6O7iG37z+5t27t3/vn/xd656sLi83QQ73u8DIgPO+/Lt/9dM/+Ke/8x///Y8/+f73fvmL3S9++uXf217GjZ/ThI2jhmHby1yGN1/0m06P490h5ZTB+7/48S9e/ui7T7578XZ3vLp4bminobJzRNS2KExAkqu114227dv/+f/Z/t7f6a4//ebzH1///q99/cVPXn7nBz761BX/vdWLi4thLFDL2/2bi+/z+llz/fEmlTLfDiXD4Sad3u98a822vfr0CbfuuBs7H7Rot22m0xQic5pePOdpuPk//19+52d/vfvx//rjXT40VFOS4XDo+yY2jY8up2G5C5CMPBPgNA/RcwhN8AJAY5oBAxIqICA6diaADCqVXfDsiMAzE0guMzmSnM00QwWt86AA1rXRB5Qqq6Y/6FyrOcJpTpTh8qIv6RSY2belJjAoWZIkh4QGq/VGSlUt4KPz1MSYsn39+t3N7vjs6nq97mPwwzQSQnBtcEggquKJ5ppRHBGZ6jwnU2i7GGJMWaRIjBHAUDI1AcHmnJDYVZsPqRZzPjYxFrMAWLJgna4uelA47vdd74LnYZ7evvtaBQl9hVpK8sF16FHYEyUTdo4NiRCIjFlVKgixFwTybi6ZwULbAnhEl2q+Wl1jmbLW6IjJ3e7262a1Xb04Hu8lla5thuFEPsxJurmiaB9jmgoq5um43a5T1XHOUEoXowGVOac5TaV48vtxvkiYkuSaDdEZnXL1VX0kqbobjllVazmdRgVxBl1sUpUxpS7G4Jmc9b0vRdOYJh4vLq76zZVqmWth56SKlqIguahv3DDvtlfPrGzv33/NTpzVeRw37UbMwCx4l7X6GI9TAaXNheuiM1EA6ryPiGbCmAO30zzNJH3TMbjgLsX4uL/ZrDel1P3pvmmv51KfPrv+y9ubz65fbFc+BLo73jar9n44eS2xcU3bIdDV5cWb90cfKHg67qdV05eU16srmjSX1PSbpvamlfLs0LE/b4wQH9Q6S/wrnrsAOgfYP/bAC2hz7tLP8mh7iPR5kHfZYzLaQlN6dLf4MMvWb5uwwgfixwfIZ8F58Jyu9tDF6/kU5zd9oHh8aHMe2zU7G3J8a8p9PtGZu7TElajWUlIp6Xg8vv767dv3b3LRnFLbtzjaXMauWz159uzi6QWnOa6bJ/hkGKsqzlNer7epKPV0dX3xevwCAUrS8TiULELH/fEUrL765OnrL99try5++zd/9K/+9b/sNn2HcNoPpcrbm93ucHp1fdVu24+vrj97tp6n+8srf/PN/snTdRcvUfPN/U3n3WV3SSQUmiOU22GSmufDYABNZAdMgMEH5oAGwFgyoJXd6a67WBHpnMuKN8ddGWosDIfxtN8NBQUUVlfXbYLxOBcN81CmlCrz+nJdh/q8jV3TpFIBdTicBHSek2MqOUvNSMEAQvBMtDjLbFe9zPsY3WbThsYrmENjMDD07MYkIlkM53nuurYLfk7TOEMXOKyDc7TumnGsU9IpTVjr+5v7/oJevHwevdusLlQqmiHQBzkLgumZs0NIZ7QIVM/ojT0APecO91FfuXA8zq3sY+V9qEN4/N65RVgGyksBPzJBHmPNEBAIjBZG3LLJOuNIuLTDgIhqAKYiIlJEJJvNZbq/v/3DP/p/neb7jz7t//jPdm+/vHGARJBrLrVaLlOqqRQgIIZaxawAgogAADmnukjPSFUWBeYi6yHmWpNKFVFwXs3YU7uJw3x8dv3R8XBAJmQfvSfHqLi9vLofhq9vxk23vr5cg8HFtg/ed/3q/dv3nvsv3r97qa7vn077qZYyOJtzL8WuLp9ZLqfTHKOfp2l98dR5JrDG+7aJsetiYAMfmE2OBLjcaG1s+j5qqdNhoGbFLnRNNx7uYte1q8vTJFZmAkZEJC06etchCMnRUkZ2xG2ZbsgbN42KYi1IRDGawhJVGWNM8w7kMsQ4jiWGNnpIp70whhDqLmVJXe+1opbEzCpCDLlO3Pj+8tnhm7/WfPBXgUM43b297LdSKxRiEx1vjdjCBQBCHti1qhk0wOJYsqx4ywq5gN0fSES4tJ2PdQbfMg361V/2QH97BCThAYf81vGWJfrhr98S7X5YDB8g/4fV/fxKWLjQZ1ctOMNP3yJ4ntlBhAh0vp+IeQGHCMnQ9MMHOfOnEEzPaNSyjuMjJPaImi3noHO4G5LzvITTGgJCFVWthFhkobQSAgiYdw4XHIxQKyICMhmhi8HF0HbdarXabmPwXItMc06lggqqeaYu8CpydMwIOU9VJJUls1w9B0IPYJ49LjENqmDe7GzGh94hsYkwApNDBANlJLCqiJ6ZQpCek7iKYzXLtSjVygazniSnVE+lnpx3PrTO1SIIiOg635OPCZByxlKcoSP0Aq3zaBXIeR+JefnQTmPJgk4FQICUA3cr37RuSZUwXRLmlc2E0IxUzBTBmaGqEACaCRgBqxiSMwMwsVq0lgf+LgARsWOA4Dx79uSd9/UgPsZ+u95crms6elTHUIFmLESYBeaqGdAJgVQ01Xpg2XV0jFYaJ433jEyO2TkkAyJDPDtQmQF7blpu+9B2IfomOiBQVBWQUqUUqTWllEpOkpQMGNlxQAUBJtSaJdWKwIAmlf2SdYZIwDEY4ELKc8yIcM43A6KzIxgudkGEoCLLxh5woRbhoideyvgDq+ghIBXBEBDVhJexgInZMkszQxCpy05IVRUWZpwt8jJVOCeSIJgKqBiRmfISyqNKACYWgmuDRyeH00ELNf0lEhooO/ZxheRzTVZG1moqVUueT4RKBClnM2V0hIiGhJxrmafUuJ4dGhQAcYwPVNbqfB9Dm8qA5MD7Wk2kRO/77RqN47pbPdtGb+l4zw1fPX9x93rwsY3kY4t5zMM4JrX1tnO5piyqEjkWtaqlmt7c3xqZRzdPM6IzlKwyHwUH8t4TlXGaa5XNZjvNUxUgZjXdnSY0S2qziUNg0a6Nvo2MsFl1p+Nw2s+RESo7T8GzVgVkIjiejlKlFK0VuKQIGjfNqvMlzWaVCIl80/mKtu0vbUpVEzGNucicN22/9eGLn//8xbNn4NEQAtGYx9BY34TdabZq2RUxVUmKHph736SpDLuTuby9ftJvu1xSKSIGITSx1lJrLovEvqBK1/azgiaRZKIwjWW9aeeU27YJNKS5zlhiCE10aoqCp7u8a+c2+mfX69fvDgpmSoSAsiA/iLAg1guWr2DgiQxIRKXKkrSw7JJElB+c5KqIc+6selc8M6RMDazK4oFtoqJqxIYIDkAqfUuVjM3FOqk4iLmOTWigYpH501ffff36S8QUnTMXYty01h3efPnko4sS6pvb48tPr7/88iuGEpqwPwwk02l413WBIEhOjptV8xKslnpyCk10uRo6vT3cvGvWz9dXYHUck0nVSQrXZtveH+6/evt2tXn6/nboGtpeXNp8qjWT84pca2GyIhXDGnwwMLNap+O8u6+1kHEpKFnupjv23GIo8ySG7KJvPZDoeGNytDKDFt9dqXlwPXJnSiojSCKrIIkcQNMA9MiNKguzYTAiJkGeAL34mCAqOc/g8t34/q80ZfLRXawhriE2RYEkcbo/vf3i+Po1+PU8BQvusNsnKRSRWEtKYCYFK1lOc+NJqghjH/1cIXQXK/XjpPtTwmZVT4M4r86lOh1Pp1KFPNWSgaBWSSq2cfvDMYLd78an37148YNXP/jR7/z8p6+fXK5wOt5++fbt559//MNP5a78x3/9H/7gn/+TT3/tu8d3h6/+7IuuWx3+8ssf/+LP/9Y//O0vf/Hu3bvb7372/e9+76li+bW/+d0YfTX7737zb/zp//J6e92G1hfWXSrdxxe9QD3eIdrTV8/2Xx/SMMDq6rs/+NH62QoJri42scEKoITeUx3hdMzFzDNrrbNz23/2T7Y/+0n+4uvVi+f0ox/Ji+76SbC3h5rw9Hq/fvaMr9fPPt2cDref1Ks58f03J1TSKs3aSc2XF2s96eqSobEQMaP26wjZuGE18dGDiflw/bT58u3p9oubf/3/+ZM8K4Ctrlfzu/3l9YV3bnd74x3SQ/cIZiKKBE3sSpUiyTEDViaSkooBqATnSskPRnY0zkOMfZ5zSjMAOuZ+swZnUsz5pljNc1KAYoU4SNZDPaph17SO+P54dORKFkPOeYhs0btldOFpZWDI1PRtGoZ5nnwTSajkrEW33WbdX8XgqpaSU2Amx1KLlOI9i4iKNH2LzFahafoqZbc/eQ6lFgNMcy5e0aTWikVMKyGFNgoIeR/ZyGtw17/8xc/7rh9MGlTRqjW3gZCwSLUKJIwGUioBbLr2UKojh6pEJmhtjGWeHBig5Tw7IERKKTXez1MKkYAt+MbAcpG+vwCx/XEPVsVknnfOuX7Vj/Nx/+7GeZZSycCBBpLpeH/qnfYROaRa0AUBc5477k6D9M3q6vr6/vi5QSXGy8uLrz7/4u7+7Tp2beCkqOIb1xjMzE6KplyaVTOXst082d2+GU9TCnV1cakKIKK1KmAM0WqZ01yYj4D9uvfb652pSDWRQccIsW28swrzbAW69vqAN2yjI26bjtA1TTwdjzG0DCo5rTdrqfnm/ZuLzWa17vOcQ9uAqdZcrKhUH5p5PjbNmghUKqESQpGSUsKqu92uDW2Z88cvLp5dNC5AjPEw7K/W/eF+WK+7YZocIqkFZlTJ0xhBG2aTXAXG02nVtaClzInJ55Ids4qoPDYY54fHefuP32LlnGVo9ID70Fkadja9PvODHufcdp7BnWfddu5a0OyB9/HQkn8b4sFHXOmRu/QgBDqb0Zx5RA8j6XOPc+5yHshLDz5EZxhJbYkigTMVysDQxADpnN0GplqrpJzHeRpTGVPeXFy+e/12verb1Wo6nT796MXLT75/eXE5jVNwLh2SlnK13QCSNexA55Sy6mF/yFM57E/TNKeaAeqrZ88PU9Javvzylwb1o5ef/Js/+g8+xGmu83FEE0IY7qZMbk8Ut60R/fQXb7YNvvvqF5vYYM+S71HnGNiqTUnytDvOo1HwRE3wMUYjXDXBuYDs5zxO6WSi7H0XI1IAYseBPVbFoorBQk+SSm/slKtANpkOo+Sc5rRahWQos+Va6zDKrKHvunbVr8KcyuGYJY2qio6YyLWtmTEymEYmMPFk4zytvSc21ZqnuQJdrjty4BFrUbDKiH3fbVeQhkSBgg+5yKpt76bJC5MlT37bd0+eXKQ0Si2k4ME8upoqB5dTim33sGYrGCAwANA5X0zPgOJ/CWg+VOvDQP0DqvlBsPOBVfRIc/vAQlq+tkBDD/qyRx7H+UrONDhdYIFF97MUqxKqqpqp1KpVUI/zbj/svvryF2/efDEOd+Ddf/7JX757eyvVfHTTOKWcDExMi5TlxptSBkRiKjWbWnCeiJIUPn9SJSBiRnBMrFJqycQUm+i9Q4BV25Y0Pbl+Mk0zIbbez6fJE7Lz264f5nJKhV3bXDTg2BM7F9rYp1x4FTnSYThedBclh2Ma+j42q1ABry6f3J/e9NiJOcDYxNCvLruuSaeTqLroV5ttEYxdE1BPx2PJVbJlnUNchdgWTBzcPJ5if2mAsetLmoAHxBhjNxwHDwTqkJpShFCIQEqyDMEFAaciWisRiFaoREzkKjMBaKkze19z8r43GFNVdoE8qdk0TgY013oc5iYGLAmNTVP00ciXPIdme7G6TuPdOA19bKfT4EptOx5Oe+63tc6YJnBb5xxAEUmEM1ijyuTDw9L5bcTlbJC2rIsPDkELk5HO0eyGZyXBUjUAD4CjntdNeGxUwcAe/PkfcfYPrKMHdOZhKf1WocPDmvyoV3tYk+Ec03QmWnzrkHZ+L/MZuvo2VXMhWJy1lR9odd8ihJ5vEHjUhD7ehkTEzhEhmKGhqJiogRE+EJQIHTsEY4Mlg2chGREBITJi17ZtEzfr1fX2Yt1Gz6gKs9RpTIfTNM2zmm16/+pyfRnpouXoULWoGKBTE0WsRiAlclA0AjQgJMblMbc8L5xTdIxCVhGVEAAUSAiR0BFFYvRWYwONimgxWBkNAsoe3Sz7ZLPVlCWp5OTZnENuu/BADaHIkRwFwOCgCw4BTFREEAkIkBEVgg/W0Fhsie5iHzm2oWl5kYqogSkuRmnEJgKE7L1CMUMzES1MDyMbQDs7rus5H8kEedFjATP56N2iaOPAqM0a1Ycp5zTtT3ezlSlG9Cqos4lpwqBoqs4DQwJRngebD2w5MgSmwOQcISN7ZkfIjMTMnsk5H11o2UXno3NMBAsRrdasQCYKVpmha2LTNavNJpUyT/PhlMacpqlW1SoGCILq2ZNvnCMfPDkGIGQWNVVFA3YLtROIEB60jEQIujClF1XuAmBRVUH05/3Or0BFALZ4rS9oPKCpySI1Pj91zsWtS+afoZnAmX9nIgIiZ7PGZft0RogMFHUpRdMmdLHxjaulnE6nIwPFZovBFS2BvWNvwKYZ6+iYCK2i5ZxMFdDmPKK5rl0hwjjurWJOE6gyhlq15GyYnV+gBQ0c2RClpJSRPTMqVKgCtcxzDt47R+n2hKGz6wbj5t3705MnCK49DSNTRjAtQkjTmEOboaacBueatrtIeeLARpDmqWnaY87c9VY053m9aZTiaSxFQKooOed5zHPKCSxANWcgBs65gGwAtZRKkNVEQdIAgmmevQMXQ3E0lcTQKJR5HNarNgRtWw7VORef9OvdzethnDFPXonJrS82yK6UcUzjcTf6AmBKrYPA6mQyX0R+87d/5+3rWxYQKX7buMDTlGCao2+KWa4qYB58zZLqceU7JioJg+u/+ebL7eXLSKsvvnjvnKu1kKPO9apVFbzjeRrMpG3jOFePXq2OkwoSo/QXfP108+71cRiK7zl4X2uuVYn8/jBfbPnJ04vDONdTAk9VxBAc8fkuRgYQAUXihcNpproYwdniiA/0uH1ftiWwgJILkR5FxNSYeLHTUrVaRQGI0BSQl2cSMJ/VmKu+O94eSMDsuLv52Sef/gjR3ef65nY3zZTq6Ltwdzjo6v3LTy7bgZLON4dD5vbt7ZFYCI+b9iJNRr69fvLRNCjL7MXldOpWm0MuqZw2OeQ0O1wIQe6Xb7/axi6YSoWuXR9Px27VoME8zO9v9lcf/zr1EUl98OMpW9KmXYvMAMUhlDSS74GClOrBUh6qZXQOBXwI++FGoVn3z8JqI1Ack2Or066WGdTIkbVb4A34LQKREygHSBM6R8zIZMBGjL5VjAYOvKOF/WQKOZsZxS35XlNxnOzwWo63ZMLrS3ANrFZwcSWGtrvrgsy3t8P7d2acMdB2UzRXq6nML59+fH+8vT/ctOurq/by/uaGg++1qzWfpvGYUoHmOKarZ68uuy2+PcQ27Hb7r2/fv3v7VnNCgn7dzqUunPdadU4luG30USC8+v4ncbMRbP7kz7765NPf+dHv/Z1/+//4/5bTN1eXT9ObcV+gd9t3X8673Z+4fPqz//jNj/7e7wPD7divL191O/qbv/WqW23advzm/Xj1LHz9+vazz57vbubg7OnTi1oTNnh7O1QJHmM214dAAdxl+Ph3P705lOffvfz656emuN3d0UX2nY8tOQJNwOq6nlDh7v1OUc1v+Q6aXYabP7n/+c/W/+gfuR9cK2L7wxehX9l+/8UX3zTr3HloNl1PvRzXeZrNAQW3vW6R6Oqjaxn1zdv76Vh46wlxGYBJURAdh/L6rZ0OguaBw+XVs//qv1398b/5968/vwmRXUAtSdXSlLbXq+UuUNGaim8COULmlDIDotNVt04pW1VAICCO7TCcGna1Vs/RjFKapzk/ubgyrZKLSF0sYXLJhshMy84ImUGti+0hnfoYuzYC2DSM5EIIEZFSnmvRputAocwFwdI0VVEwnqa577o0TT5A08RZNXocT7nxrSIiOrMkBiqVkZkxTyNhLKPUdu4v+mu+HI9HmcV3jQuuVAmBFwOmPM2Owjweai4umI9+PKRpOvZ9V6ZZculCYMJhmojd7jiOU6nFQuOij6KKUolwPOliUVG1MIdaCoKlkoPrclUfGcmhSC2q3oB9hcroj1PxkNq+lzJJqW3bnuSeiC5WV4w2DlMRV9TSmEJLmqrWEnoN+cp88QbrdW9zsaKhb2RMqLVtm/XF5TD+OSqWVOacuouL4DozVpOSrU5zcDRXy6mwY1S0SpqVKIIgEBYpKU3sfPBBclGoU5oRsF9HEE3T6JvYbbcup4ClTCWKEhOoEEKd0zTsXFg/+/jl3Zu/bl07q4mUtokppC40tapV884hGqJ+/e7999ZbHxqpk+vYKklVCojBg3UKoFZSLsQeGQ1od7+L7YoIx3EfOF5smj7ANBUK3eX6yd1x2PRt38UqmlJetRVCgDYEwv279130p3FE5DQPq66vlZrQDIfjql+TIy0Kj4ApnIe/H9RhZ2HP8rUzvHNmDD10PGdxAT5mPJ3fYKpnouyZygr2QIh/aLK/1Z//KmBkH64FzhKgs0xNzx3T2YXvrB87N1hL7/Sh/z83KA855mdI4LxrhvNRzUBVS84p5dNpGtPsmnjRXJnVkjJHfn7x4pOXn3brtRCg8223ch7TMCKiEbx9c6cohiClvH/99ng4eAdI1bMS4nE45qy16M3t/e/9rd8/3d2m/W51ceGQZziqiKIRsZje7iYAPB3ys5XftGtqkNo+NGSQvWcfV6nUcc7k/aa5QvbbTXvY36aTSLVpRsNKzN5TCEwUlsAgrVmNTCoSg4vTaRebprmIzWwK5Prtbj/v99N+qsiBqCg4ZqwymvE8ZzM6naacRKSKKamwd4wqtaph8A4M1NSTWwXvGyZAkdyuu86r1dqvL9KcjofxVOanFxerpr9q7HQ6TWVkcN77u9Px4mKFGEj5Sb9RMzFL1RDqKjRPr1+oVLd5ks03rq1AbbMOoTWVZQdEZzqDLGSaZVP0QMN4pArheSe/0DwAF1fpsxzmjDb+qtzngc9hCg8FDw+V/QhLPnTv8NAjL4VFZ40knkfNhugWpNLMRKoiVJXd6f2X3/z167dv3rz52kVD1P/0x/8JSR2zmeacqomheodlrrlMBgCqzCSmagIGxGxAVRQMBEwViDxzcBRyzXOeiIwcq2n0DpHZh9A0llMaJ/Iup7n1DObmceJ+Rc7fvvn85Wff/+ij519//VVjvF2v27Z1IRJYLhr7ho3HcZxH7QIcJ/5u/+z127+66Ncvnjz58hdfbVdXUx5WF5fNkxdoSfTo47rvNwLsfPQxoszsiJBi159Oh6bmKprKfPXq6Vwo16IGfRfZNA0nCjPFPkY3DadIHRHXkoEUCVhxTCfvHblGZEqnoVmtTEvOELqVilJspVRUIKI0jw02KFbmRESr6GfR/cFieHK6f39I93ztvQ9VTWRmX2O8lEwW4ub6k/3rlNO+pNE3UaCmnLo2jLv3MbRCE7cVXWuiUAvAETES9w/EnAe45xGu+cD8Oa+OD1w3/YDjLOvYtwS0DyvqAzYJCA88oA/L2Hmy/MFIixC/dREPytxH7OoBIkJEAP4ASeEHxfHDcwDOiM8HLp2hqgEu7iqL69bDNZw5ng8o0sMa+8HVSB9YRUiPjDtEMJNa5ewuDIvZMxEhEC5pP8xLBJwZECx31yLHQB9cjGHVx3XvW0daZD/M4zgejqfDnIaU2LvnF42VSpeRjWdahMdC4BCJnRMtnkjADJ0Bq4GBEbIjZsIFDjI1BwiqqNVUiAlNmFhREAsC++C6rqkGJgqAVUCUuapj6QImIzEAQgBkisSBGFEHLC5AWEWOaG307Cg455kMkdghVM/ekTvDGNGzkpi4xjVd23RtbDuulXNxAFgyIxsyPGZGECn7Wg2gqlYmjwhn1RUtCV9ABKpGBOQYAIjIeR+8d84bM3BE0+i9IK4vt/PxUsajgjlQ0RpUVEFKhlKRGpIQogMAraOhoCNHFrxrouewZJg55zzTwhNjRGYXiBlNSCsKKmC1xa8FabGnjY1j54I3JAWqNUvN4zBPOU9TTqXOpc5iRY2XD4zEzi2ID5AzrIsxIpJbqEMmy57EFq4fIS0/5OVlRIRILngARMMFu4JHqGixunLMC1YEKstAQk0QGcBUynL7qpzZqqay3EuLElNlcYYXNJVlxwFmgCbGhH0TPZNHEi2pZFDz3DrHS1Bm8A0AVVGHBrW0zCq11FzKvMzehmFw5F0IiDCejiWXJvamWDS50JRUDBHAaRUEdt7llEFNdEEKDUSVrPGhVCVGY+MWTatgalafQGh//dOn+5u31uae0nj/FQ6i5tOcGdkyIPDT7SfF4uvb++jc8XTs+7692gxJ2IfonTCOonnxDWPvQjOPp7YJrIqkpLpqr+/2dz7G6HAaxpqNiEyQiUoRQbt++uR02JHT6yebeZ6fXF4Op1OaLY3pSdvvDsc2BE9Wq9o8fbPf9dE5xqZpHeCU5Ob+TjVLma6unxj72Lfrpn19f/vxRy+61u/fTSWXb16/cc7H6NMENzc3Xd8xc4hBCaapnE5TNX666j1zhqzsYtMc010eBGk1HGew1HeYsomhZGDmJnKaxyVdIlXVpKZGJG0bd9M4jrlp/GF/uui6ewYOnFSHLKs2lvmEzFLpcMzbi/5ys7G6O40ZiZMA0JLRh4gkqmCIyAbnRFU1ACARNQXBxWEBgEAFwIyJzUxk2UOriaIBMKhZWeIA1YyQkADPYYS8qHcBAGC/P+RUYxPZ0Ucff4aOaq7O4dv3X3XwrG3bYZ59aIZ8GOs71+LudF8Qs0WuNOZhu7m8vcuptOyaseRxGPJhvO6azSbuDu/X22tyfUrZSlbDdc/EeDMfn+fhxeqiWVktp7ZttNYhpZzy6vJJJT6eRteGogDcuHihmjTtnW8UjOIaOKgZqUiahv29mfq2O+5uqc7snI9b4DClFFBFc3VWy5DluHryPQxb7K8QGplGzHfoZnYtogPqIF6ZzUaTESsGUxID9g40L/H1BhEoVOxFDKB6mqsNgnP76nmFTgyt6YV8PQ1cax13h9fvchVsV2G1vr3fsUF3sW3dtaBNeW7XF09ffKeOU51T9DGuujQd1egwDc/W690M0zSZXzfbbfBhvh/69cUT0TzcpUyiRcFyFVNhciHQk85fb7dXL58r9c366erqM4ivVs+ff/1+96O//3uXz7f3b+/I9Bc//umP/sHv74+wevHxx5+smi/ed5edfx7+wacvb/bzaZfsUI9taS/t6zd/8cPf/4fesZrth6l/sppKNq2t8XoTtJoFvvztT8pP/0K/mXCqaRu8xZtfviejIhMTjhnWG3e/T13k8T5tescEwHDxso/Rlz2cVqvm+98Ht91CTfc7wu/nMOe77GCClr7zd3+IV3D6/PU3f/Hzdv1ynvJ3/sZLd4FzzqfdfHcrnK3t8XCfqfVhE9G7cT80wc/T/PTZGgG+9/14c3v43t+4yunqr7+8/8nX39zsdt9/fnG3vyuzGLJwvLi6Pp5uzhsjghiDLnMyBce+b/okp7lMUx4btzbCIpXAmiYScmDvgzsNJ3Sw2q6TiWdWsCIS2CHiXMa2XXsfpnFGT0jA7LwPl+ECTMUATMQ8O0LDZboQnLN65nh0fStgzIwdn8ZJAZ2qqDUN56lqMediBcsped8wEYXoQxjnoyiI4mbVA45ieYnp4oCRfZEUg+/bOOc8jlPTtH3Tj8NAhN2mnccpl0TMWtXH/nSc1tttno/jnHMGM8iCoel8pK7vkpZ0OAWOyKlt/DxXC96BxbDaH3eoFrwPPtSSTbXWYqJGUAVUkYCOx4mIiTCV2cy7EMGoFOuDb1H3h51UA8+SRQ1TrX10aRo9cWdy+/Wdj7FrWynCZiWVvm0qijBePnu2uriYp2msZc6jc9H57v5wF4Jr+/XpMGotNc9oSE1UMTLybb8/vNtcbe7vcjXbH+5W2wvngiI2oa+Sc8nMsLABTof7qtauL4bDrlh1TSMlVynjJF0IjmpJ993Vms5pvgBQHcaGHSJ6ImEtWp1Iv12DdffHyYf2qsU551Jkte6VFMCMoKIiU+jieBrneRAzahoKLCmvLtbiw9OXT9Gw446axgO5QfI0bGKb0iypzNO0fdIAqWfvnauK7ENVqWXO85GcN6Su36R0JHDMPqfxA0gDj0QdA8QFYqElrgbATB777aUjYGR48HPEh67g3Ck8mAAt26tHWc/DWP2hpT/bvdpjv4NI8KB+eOjDH5oYfEiyog+sJFzS1h7a/2/bgyyaIz2TPtDOf5zfssQm25J9a1JKPg7T+7vD/jgAyjQWNOxcfPbs5dNXr27f39aSfWgHmVKZnON3N/fv371PKcUuDuPYtmF9eWmepRxt0pJkGCY/qhRBT5eXl2/fvSvzEREZ3cWzZ7v7O/D85Gqzuz+AQQbQbqXO1h+9+IvPf/7q+vKqibvT3bMnF5Kn+/vD/enu6XbTejqcdgG9ll5EQtes+jaf5tM8X19twcwzNU2XSnXOSU5Trs4775r9VNeb61qGr798E1xIuZpPU65ZYc61phTYLVYQ21U7JMkqCABStOo8zs47U62lMJPIQsoSMmCAq4tV6+M379+uQmBTEClWV5vet4EcqtZN9FNKw2naXKxW68vnm9Xrb96ZwKuPX+ac9rvhPtUQ4+WmdY7ZsaIXs9Mw9v3Kx8a3bbtqkV2plVXZoZki0CPSAw/MMcNHtPJceI8d7wN6ZI+goZkB0rlcvwUvPrKIkM4Y6KPlCzy+9vzb+TsGgEBnRtu5OWcww2UPpyZWDVWo5Dr96V/86dfvfnbY74/DKEUOd8NXX/zlNM3Oe5EiNRloLbWWIozjtOQpmXdcq5iZytm3a9EcEBEzqxhzQEOpk9QEYN6FmiXGxmNEJCtapxlB0jjGvg/BB+9UxIXYr9a76dBtuk+frufdu56x8Y0PbREMHDTlfMiRYxOb1uCYdm7dW5Gv3uz75mqEXJIlrcFxoNBfXZoLWqs6H9YOOKgBe++YahYC17TtMB7Eyu40bJ89vf/8bT1Nvl2Zo1o1pQkUXWwEpuG487HzoZnHoWk6BFIpIsKM7EA0AxEpz2XwEj2gi02t4n0AYyhzyTk0IaUh+JV3AUSlDGbCwKdjstVG1ecyn04DB9f3rRjnOls6tbEdc2XXbJ+8fP9NspLBU7u+TMe3NRs365KGbv28lNk1a1QCKeBUdUT0yO5bQPdSgufyeCigZZldFsAPsCaCwQeOwrdJQt9SWsIDug4f2EMLa+hBHPOt6sXHMn2gGT2ujfbgjETnKzv78z6usY+v1TPDDh+EwPUh9n6ZYOOvnArOh8Zv2yA9SufORz6DvMiIBGZqYsuSf35gABiCGntmZkM0Q8+8wF8L7dpEGTH4EHwMixWyDyaaC4yTvLk97U6nYgImkWyccU/WuVIq9sH1XeuJiNi0Lg+tCpoUGBAR2XlbICpmNSVcDO1UJbOBKjAHBCRCRAUoBoAUEb2PfmWKNQKoVjPwOE5M0kaqguQcMqkBOF8UgZEdcXDBuY4pMreNY+eYORA2MbrI3s9EiKAE1sUYPGkXzRActatVjMFH70Jgn1XlrCEhJYeMvlYBQGRyiNmqw2BamZ1oRXJIrGZElRgZ/dnGDcB75/wi4DJBAVQmBoXgAwJX4KyEBlqKSSFVKObLDDk5rh5bECEGsKpO0DlTc84RU4iB28bQISIsEWwLQ5iA0IhAJYsQoiNEJmQkckTsXPCEbvG/N0UpXCt5dm1O0nfzPI9VZrEpi1ZNZXEbx7PwWOQRr1zGAshsCx5kDomYGQDZ+eXRwA8yeXILSEj0sIdwj1ULCKLyMLAywsWQWs80bDM1tYUdp+fRASHK8hAFRgDTiqaIwATEbKZG5AITknfEKFWSVSFQpoCsxEoMhGxAtWr0HlQUFEAQNOWRTKXWWqpWaJvGoO6Pb0k8YxCDKqUqEqtjADVCV4rUkoQSGrBnRp+yMkNwTmqGImRODMssCHk+pTa627964z3d6098dMVys+kt5XQcyHTbxTHPRjadhtMwdv6p2shu3bVdLQAMuRSVcDrkZJiMCajkekolZvNMKVVUCavYP3kWyHewklrBuwaoXzsVnOehXa2mYc+IkzjwUUvdHVN0vLs7qkrwfbNCwELRIUVEiixktlo/UU0c3TSNWGuq9cnlZRefnY5D4xhiyLPcnPahj7fvb6aAr179WsrT+93tMGYF7ZtGIZpJzmIIBhKcc9BUxZTmVISbbqhD0tpswjxVh11JJ6S6verfv74jCGA0leydb3uf06yG3reOcS7ZQJjUO5fFpiyHMc0ddJt4+/6o4CHb8xdPven9/QhEOSm72TexW3W5CgogcpGznpEMmJwaqEERIyJFqyKLX+EybBBVRJSqCERIqrqYPsiZBAdaF8dEWXbGhqgAYuCZnHfec/Dex7jcBX23alpNac4KF08/Snm+f/embcJlexXwyeV2u//yx6v1E+O0v71lbdp2bSJQldhtV58S9Bxmp7lU7cJ6tb5ooQnVbt+fTmkS2FHTd7E7Dm8oeHLUreg45a/u3n589dGwz2C02lzmPKqluVYwRKmlZOsiAjneuLjB8lbLrNzYzIQ9sAN0qnMZjmmcjCDbXFRryn13Ebcb9q4OM3hAp6fh2Hbtpvsodluwkm5+lo+3rgqyxYsnBCSWyQWt90YNNhcCHgltEUBqIs/oWlFFajG2VhVFXBnk9B6yGK2tuUSD0LSVeinCKt5k9/rr493dXKTZch1SbLvgu83V5jiM035fTvr01ZO7m3eN8xfbtZlMqPsZhyE9vViPMuUhvXu/H+ndXZrvdwcVAIda5j7EmuuUppQSRTbkUkp09bd+6/tNbE/HzG1rEH74X/83PjS/fDMGtlT13Wn/n378h3/rt37/7/4Pfz8n6Z/yVz+/3d3bv/p3/+l/+If/dWxDbcu237z64Xecwr/8n//kX/xPf/NvUnd4PXz+xS9/73d/KBuKnf/qr981ntuwjQ5LkAqiCse7w5OLTWzczc++7J4+++mf/tFv/8M/6LZPxtt3fUWe9WLlAWD2wIjjOKuo95SFsaPLH33y9f/9X67GL7cvt+2r63LB/Xpz+nzoZhhvditcyRfvXC3Pr67bV9fZdKg5f5VZrX+xevmK9l+PLOn7v3VdTWYRQNpc9FJk5VelWMlKlCWnNOTPf3F3PO3/T//j/2E4nYb7z/e3JyxQci6laD46emgP1Ji5Sg0ugmEq826qjEiEDbeBIctcpYL4xjliHKdkVgMjGTddn0sp8+yapu/XOc0lS9tsUGEeExiUuRDSbNk5jjHWovOwbAWIhKY5heAuNldpGJznouJXgSzPqVjVquYIpepq1d/f3hkwEMwle/ZLi15ScQ7VpOUYQyT2hJjLCFyp2HycShEXeZxnrZZLnacdsWOmcRr2WqJrCMgUhvGkJTUhrq6e5VLAZDrcnEatBiJYxJzvvPOE1IRoFderDRkD2coHg+mY5lWMQCAEJWlgZ4gKVFTrNCOiBT4V4WluvSuSLi4upc5IcCwZa3VcSwXXhKnuCQuFwATTknAk2UEAdjXn4Xgjxec6n3b3FxfXp+E2OueD92JFZ8fUNE2pCQ2nMXm2e7kJbSxFcU5zHZUMmGspkhKTNzPnomiZUgltDIQGajKfDjNRiLFzwRlRkUJITefmMc3DcROv19vt61q7wHBUyUhEVQqjgM7TyKvLp9Pu3vtA6AlRQUpNyC7lSU/D9Wo9lxzaZruJw/D21eaFR97nuy2vlFlFpBqha5oGckppBkdlKjlXUO1DMxUaRYMjAp+Hctk2X+0PuZaPP36BUC9Wzb5Wj0RUthfr929um9Dc3d+DaPBUwdJw6PpuKLnrNkCMSP3Fhk4fZs6PCgh6sLE4y3gWH/Tlywh4NgB90PAjPQh/Hofg+K0OwT6c4MOZlrGcPnA7HjuJb13MwkQ6O3osk/BHJcdj13W2vwWkB8jqLBpaNoDyMPl+aMO+9b3zEURESq7jNN3d35/GU54mZlit1mkuwXda6bA/hSb6GOdxPAyn03GYTqfheGTHLrjD3by5erK96A73+9fDaT7sT6dkYKFpmH2F8vzj5w7w7bubWrIUOx7uxzIaqlU7HocpVzJDpq9f3zRsb17fvbzs37wfbCxXPbzfHcq4X/nNRxefhc6TTU8uO1QcB0ljgaCOqm/XT5+8BNBpOBFzMZQK83wMwV9tV1Mtp+mESONpJtSry4uczHu7Ox4JKBgFoKunT/I8IFGxmRwb8anOpdQ0zkbsvUO2xQyBkBaVhvMsuSLDcThlPyMjee4Ya0p95wj1ON45x2i86RqwIMXuj9P9cRjnCYlM6uE0gEj0bMZGdBqTUXlyvW1bKiIxduN+JjfXWfpI/WajxKLiyS8kUjNDUDzziR4xInjw33j4sZ81h0tUPZ4ZEA/kuQc2xAdeET5aFZ3pErC8yx5Nsc7VhI8UDzyX9mOjvOhxEBFVRVHBapFye//mJ3/5n7/65osEcn9ze7FufvbzX3zx5hvUHHwntS7AEJiBqZhItTM3XGoBExEzpYUwbpKlAiogiygzV0lqlc7+uyxmPrbORwPL8xwd11yItdn0uZaAWFWQ0HEY83B/uP/ux68ut/3+9r4JTdetFZkJHdNYJiIJkXKdmSO3vt3GcVfe3Q4vnm/f3I6XDW22V/W079dtu4rGaEmCj4ACgO3qCk1joHyYkBBRW6Jdka9vXm9fviTiw93t9cu2lGLqXBvm8YiETeuHcfAhEpBlJSdApsqagaN6T2nKRWqIUfOp5OzaIJa8u0h56nwragRE6EVmlQmZAMwjHXfvjd32ojmkjA7BaL+7a6NuWvLcpCFBunedtO3laX+M7ebJi4/r8FbmA6xXrulz3q/9paQRdWYZtKycW1sVBGdqgGJqSABi+Lj+Pa5rDwvQOQIAwYAeKDlnH2ikxWT3UaCLCLC01B+IbwYPYpfzerksdg8l+AGRefj9V67ifCn6cCh44C09YqkPr6UHoc7CzVu4EMuvZfRM+Ai80pl8tFzO4qHxIM58vJRvAbTL60DF2DEhMtHC7kRCM3PePzx4iBiJCUyZiAnVlJCZ2cU2NGsij2Ipz20MVeFU0lQKEbYU2DQQBCQRO01VlWyhbwaPSAhsWQkBHYqBQ3LswDkkFjMAY1PNyaGYVFI1IkQ2MwEwFUJi5409OOfUgVZgtOgAAgGBq+SDzzkvkeyL9TQ7RVByChYjO+9jiF1sIhMhOkchuq4N7FyL0COijAu1yjO0XUTfEAd0njZX3HYcAyh571WrqgHzQ4x7dUSmQGbA6L23UlUrEQEywCKxUzFjYPKOyQGjKTZNQ4zeOQDw7IGYDEBrzSmdht1+3A/J5SlyJc01TaZIUpwJaPUqxEyARmYMSATADsmRek/kGF0kIjQh59E7Cg5ZAaopoVjNxIimAGTs2TcNxxZ9gxwWPbFqNRTgEFzDoSklxzaGlJNalzXNZS6aBIoqnMMrEMkC42KwRYTMbnG4QyIgWuhlwERmj+a/iECLMT4y/5e21g9a+EXVtojLVOoSGvWwZJs8ZAqoCjEtfjGEJIs9OyIhLy+mpb6ZPbvF4r1WIOTQBtKqIqKZF1WxKpgwskg2rWSmoirVM1dJNWcp4miVcyo1aUVyrKI5zYTknJc8WU6MaGaSsmMHoAIFDVUyoaU8TfNpnmYpCODBoO3a1Wb76jd+EMkdbo6EdbcrbcukqrsjjON0v2tjX0vOdeSu6b0DjAbTR88vx2lWg6brbw779WqdRx3Gkk1d9IGDlrwKDlAdYa4wpZod5zKddu/ZETuGAghYpYomFT3d7hrvcqpjnrpIAfC661iLRwL1PuA45FKmpo0dNfNpahw+216+u7txzks1pigmz549O56O4zA74ru7UyXXhFBSuV5fxcZv1v3bt7fzfOzW/TzP6Ziqy33vZZnanPI8T92mceS8sSqCo0BQxYkUH4JzOI9Dreob17Rd2yc7SfFcFEXUew4hHqeBtXHmHWVDLaVIRXQBxUh4HJKANQ7UbM71lLL3aATTlGPbpjnHJrTR5yZMUxVR79hUbTFO0zO5nh732YQL93MxWTcVdo4QTUxB4WHJJnYGJqKEoA8A6BKCaEzOe8fs2CGic949pN7UqheXTdlp1Zhu66kUIn7x7PrLb3aznFJun2wv3t1+3V9eCPnoutZaGA6BpZQSQtBUT3W8uN4cdrOou372qk00Dzc5G0S3G+6iVg7Oq2fwmu14d8yzQBeGQUNYE2lRmXPOqne7u9Bd1+lUJNq6rbkCOpUZ8gxE6CJ4Ay1mVuepzlPOsyKUMvf9WqVj7ENsZTrWCfM8V6JxGLu+laTHw/3dm3egM1pm5m61dtyIsqm5xotOBE5dyJVsiZjSxQpJLY06ESMAAQAASURBVDhVB8CGC6tLHQjYbJKwuwz9hYJwTcBsUiXNVOeyvxnu3s9ZwuqS201Gd/nsajppqg4EVe3jH3z/dDpdXGxqKbCKb1+/KxTSIHXSCjkpOlREmafb/fE0jfNmuzqdEihAs8gKhRyZmdQaUX7nO09/47NPEOV4Oj3bXIUYTse5aeKK0XM4zSeY4q+9+I2bLw+3b8tv/sart9/cb4Nttu7/9n/9F3/+n/797/7tv1WYT3dHEl31/T/+u79Tk6Vhthqv1y/e/vL07t3xt37nVedjcMzkUknk4LQbN+16/d3v6ukmdBer3PaXzz/9AfrcffPHP+s321/8/C+/+3s/CsxAcHkVnQFyk1JpgvvZH//85dMrcf7J93/YhNX07/51Mwu6rX73Ol7HEPvw6SXcjONPv1z94NPw8sJakJJX2wbmyArjOPmuXcWw6rrj29uMpf/s+c3dGJomOmawWmrTOgV98XIz7aaG+W//7g/+/Kc//+av/trg/nKzHo+DA+zW6/mYkB+7C5hTYsRSMpEDtFSlC42qoelpnrq+8VqqoPdBrQbPzMzsVdN42hNzCN5M55wXzlAqpQtRqqCRDx7AWCHnIqoIFmLwPkzToFq9Y1Hdnw6B0BBrLZ6pKpRcQ2w0ZxdcCM08T5uLVZa8WTfTXI/7sW26EIMqhOBFZJxnQozBM5OUuW1WEouIenRdt5a6zzkjGSESURO91vLq8jqnejhOtcpmu1bpCWC8v/fkpWnev0tqEBoHYJBRiDx7FsllRnIxBk9eQdLpGNA2rQ+ep2H0hOBo03X3wwBLaIkIOUaPWWoSqnm62HbHcbCcXNcoeayVzHW+n/J8rGMA18WmSmIH2Q6R/DCPiG5Oxd3vZ4kvP/vs9ZtvYgxEjghEjcmL8XQcWx/3pcToiqhqnVKl4CWnMifRigTRhySCYMwwHfchtE0byLDUAgZN8F3XzcOUcx6OOx9CbCL7cNjtCRmJ58NgVa+eXFy23XjcBQ6uBYO6hNU6tWF3uthu9/U9mlJw7KlaXblIRsfByLCKtev1eBptntdNOw5HdKEJDbNDH0TIwFSwllJTmtMMoA58msrFi+cBcd4f+3WX87x6sknVFKltG9ifmpa2q+bt/ftPv/Psi1+8/bh/MZ6+aJqAlch7h8p8jv1iZsla0tC1fSmZ0WPXfaudeJRNPDbcy0bqYeiMuLSv9thpPMyZ8eyNSg/eEmAPHfaHJn6xLrLHWfOHwKAzY3zpZwhNz+4v580fotq3gqhxseN7EB0t3tXLGNjMFtM+NACjxcMS7FHWtlzUAy0ExayITHO6352GNK82XZpTmhMHevr8yfbiSiq8e/MWGZroT/v7i8tn4mk3HuPKa6lffvHzVEp//2bdxZJ13Xar8BTtxgXMWbIxJS5DfXPz9nAYt1cX/dbf3rwPWrUUq3hKGYliDBeb1f39XTYUpqF1vomVlGMLWGJoq6EQff7Lzy/6SGyppJ67rm0unj2NkU7j2PWr6bB7/uzpeNzVmsFBEyKqiUBJGileXFzPeXz3/iuMbtX4ec6+YM4mZXaItRZAaiMfU8lVpZojbAJrH0rVVKsUQ+LovKr4JfTGIAZHBCTCooHIau2C2zZd5Nr5pmvalGb2bn8ap3ncNt3KEZKnpMjkPJe5HMcc28CM01x869FoPs2mbM5jE548W3OzNkSrVucSYgMYFhN1NSNaWlN9aHtpsdeysyPHkqFs8BjAtOh+HrtqJD3nHf+K/vERVXx0vHqoT9QPzTg88C2WdysiIYA+FiMuKd8qJooyzPvPv/z5n/3kj4dyHIbxl5+/6zv/xc9+9n73XsACMZrmMpuV5YprLVWlFGHHCLq0G0t2DxGawDmO+SwSXaaIcnabQVIxJCZHKgmNiJA9FZFV19RSocr2YgNq3ru2jff7wzzI8/Xzm7c7RNyuNoTs2WfJOU9mGtp2PuVaypimdddaKt6BX7Wf39z+qL8sSUstm37dNF7ECLVp+jQlkey3wYVGaqoiVUSR1YXFwmSapsO743TIKaX28oULzqzOx7Q8y6aUaklSArsIaKkkYEIk8q7USRVCbPenkQSD68pcojfVCZqZgUsdg8M5jUxBbRaZqwbnnFXWZMhFxVbry8NwW9OMbMfjoQ2h3W48BrOS8+BC0/VRSnWxJW1Nw/F0d7F5niWJGCOnYddebQ1AySNXU0O3hB3JAl0+IOH0wWMIvgWJf9h00MJee3T2eSxUewBszmjQWdP7AXI5H8DgUdP7yLuEb7/mW7X6q4j8Byj1AULCD0d65DotgL7K8hVCOLMw7BFjAnxIGFiWfCR4ECmfb6IHPukSqfaga0ZgJse8pDYvTwaxRaYDhGdiLiOCLlbtDAAgRp7ZuRA8obaRPaKp5VwPx3HI2Qi8I4/kkTyZBxIRMVawrJq0qAABMKJpNZ0RovekROKCOUZUrEWlSE5cs6KBFEZDMsceqVEkQEIgMDQgMGQzQBAwF1zrAJ2rVJDRRZrnXIsQUqnKjgsiOh+7Jsao4Nqm67o2EqGZI/SNb9uIDNE0qkD2aEKMMQa/6rFZ+W7tfKihgxgoNERMBqZFzdRQqqCqcwCMtSoxGQI5VFJBRWY4K/jA0DF7AGV24L0hIjI7Jlqi6cBqZqY655zn+/f3797dvr3djcPUaa2UgiYoGZEdKBMAE2hyPhITsTN0AMrIaOADsQ/kA4dmwfbJMTkHnquZqUmVuATeMxsxsAN2SIzEhkuSDCAh+oCoIFQW811ik0pENWUFMgRkMpMF20RAZmdozLSQPZ13zLwosgGRiB/2NXQ2PjRTM0cA52CpDzeZe7x/cBG/mwGAiC50UQBQNRE1MwIUBUSURe6Oy/MJFJToIX8TzJNDPLtDKSIDkhkCIgOzVxJFYqYGGbFILWTGyKogNSMScQQzrZNpsjKWNNckTLHWGQEdN8tP0AMhmCMU0LO0U4wVtBYFAM9aoGZBchgaBN6ugg+N91HNP71+/s37L49ib978sveu731jMQTe3++hCCms+16EkFFFU0reGsbm/jiEMgpAymWWw5ySQH9zKkqkajXrIIPzYarJk3c+uoYjI/moou32wsi5huZpPu2OWWrfsDmQYlOScc59F9GxYr0bxki63V565Pv9nWOXa5OPkOyEmsnc+9Mdkh6Op7brANGEb3en6Mx7bvvmarMdBTd9G7xLKberLm77jy+6YRco+tjyaXfa7QaVEAM3MZaibd8oWBZx6PJcgQk4kQ9DngOBZlGVpumypjnPLno41j6E+yHVqiH6pu/UrFSZ5pkdu0hFoIig1jkpIXd9e5xmNqeijqkMU9vFfu3f3kmZkwvtvJvazofY5DIHk2pozIpQF+7QokhDlJpNFRREZBneqlREqmqMdM7bYESkxahMzUTU8LzPdkSIBIjeOx+cZ/LR+xDYe8dnqCiN8/b7r1599v2//l9vjcKUdl3wb27fP/vk2V/+9c6XFOKq2177LaU0ODEf8bh///zJy/7p1c3d4TSWcU5tM9Vx0uLev3vzbL15+b1n0y/3LXsHeDccZsMYgDzFxk+ntG1WIlOzguleWuY55f1+xIjUrM2FlGvbtEUhq4YYfeRpf5A8uTWrCoOrNbuAU52m6ZhFiINz0aioR9GcJ1Ez53h/t6e4TpmO+zvVTNXW25XvtjE2rt8458WwJtDgsb0EvxJjdt5BgnmAOgNUjVvj1sijC1BHlJlqhnLEesRmhd0Tcy1Aqa5jF7EMARTK6f0XPzve3vv2ycWrT+4zuc0mxc610Qef73XdB+9cngVU8nzM00SI3WrtqjgoX319c7V69uqz7f7P/+pdGo4398b0/s0oBuj8MI+1VDP1TVyUiR9d9f/d7/9eOo2TpIsXr6ixu9vdd0CaHtu2uXt786/+7f/7f/yn//TVi+eua//0P/z7eR9v3x9KPa4ufnA4DL/4y7/+7d/8Pbeir/7qly+evuqfuz/78X/+jV//9X/zh//6n//z/2O73bz/+vZ7v3mddFxvO8nyV3/x+uNPnzSrLneTmLquv/mTH19fmCtwP9zT5ctmvZ1//lfPf/037nZrdq6WSgie6Oabw/WLC++dMn7nNz9tHaW74auf/PlH3WUbr+DzN6dfvOv+xX9funF4N25WT2Q35WFlx24c3tnFigLVfb7/5tBv1/wknNLcFPn6j3/GbXe7u/toc7nqWvBweH9oHPvo73en9aarqtfPt1/84vPG+f/4h//GWzbxKjqfivN82T3VfN+1D+QEJhZsGjcMQ9NiF9cAGDyOc0ID70MVNQOpJRUIvgHKpVZTYkIk72MsqZCBATrfjvMUXYihFVUVQRDP3nWreZydMbPUWqepiNWujWo4TTMKFDFF17bbnE6mEmNTRaTkwA0btbGb0uQolFkYQuOV0JpmNQwnAyy1KvLVaj2djk0IICapoMcYggnM4wnV1qtV6Pl0muYit3f7Vb9+fzt7VDMjDm0TrMp0ur++6FDyX7y+S2NCY15uC9C237SBT4d9zXaOZa01xrgWQZEsIiUZwHrd3+rRoMbgD/Pcr7bzOAMhiJrZaU6rGMdcQbQJ8TjmvqdSKhJ1FKeUArceqOYyl9FxXHdXQlSGOwYSsSpiNuR512762/fvXr74GEDnOXHoL7pN0V9iaLr1xX548+CiiTlnRKumIQQpWVWrLJM3JUQVQQUk9q4bTnsxaVd9iC2giNRpnFxwary5vEx5MqqxRbDxcJ+3T186huPtXdM0eR5UCpbq2HLJLBlNXXDBoaituismgGyNDwxaUs6ltl0spxyplcZnw7a9WqZSIhrYOXZg5XC68y4QQhXEJgrFpmd89/7jp5+NUFaxmUPxSFBx0642fbfu25RSGk8I9XQ4Pt9cfvXuTclTv+4PxwoEjsgMvWulJi3pWErX903bPMbWPKoNliZCdbGNsEcnC3uwxPvQS5uBnneYZ/bPmSq02EmebVyXiTV+8M5YxBl47j4WutBj3/PYz3yLwGFqD/KJc4v+cLCladIPnRQ+TLN/5XiLiax9i7YEyxPTVEVqKilbPaY0DsPpdCIz752PTdd1u9v7WhMU7eOm7Tb397vD/e00T3Kaa9a+W208ffL8177+6hfjsL87HFOqL5++uLq6/OnPfjIMp9/47vd+8dWXyBRCA6rDdOo23eX26Zt3b0OkVKwUGacEeCTvvee+a9/cncolE2GthWV4vl4XyKfd28snF4YKzC+++9mn15evv359KvP9++PFZn26vT0d8vu3O+ekXbfkedP2TYg155WLUm23e3s4HZKxTTBbTlPuYn992da53gzpm92e2Uk2QF9yRoUI0AbXez9nvTtNhdCQe+9SmUGRnScreZx9DJ6p83ARggeIaMw415Klrtgz5ONp7LsGEVNOjp0UnWvVah5RAfquWXV+TrVxzWF/ury8OB1m14T1+mouKZj0gVcXGyBkH+dpDNEwxHMlAJnqktIE50hrO4Ob8GBBvWCXD+VmAEvD+u2G+9wbfIt+8dAmL1LK/yK4/KGvhg/978ORljJXIDZA0SyqVevt8f5nP/+Ln//iL1Ie7g77d6/fpKEcdtPrd69dQwBAzIfjntiYXTGZU1ZQtA++3EgIakuTU0XPkBcgkwMAx84ADHAJoRZRBCJCkaSmsV0Rs0h1LobQkxUDLSJVsg+8G4f39/ur9eVmc3Xz9ovtdo0Aqz4aYqSmb1fTPKHjbn2JZZQ09b5/soa720MITrHrQzvvby76drXepmmM3MbQ2zQcx6lrvKo5Zi2a5ux9TNVCv027uyaGuejNbnh6/eJPf/LH64unrz77jikO+9t2e6VaCTBynI+33fZpjF2eD5Zz22/EYU0gWXwMkdDK2MTVfj6ZOId+Pt00q0umlZmqaJ3VQdSSDAyalRUffDPP9/Owb9pIjZ/uEqC6EHZ39371LPab6XiQnGi8Zd/G7lKLz2VofOOlSik+tFIS+1YLkKEaIAExa82LKBJMAf0j2HJ28/+w/jzA7o9IOXzL5soMzGhZLAmQCBCAzmX2wQzogbKDD7yds9fQQ8DaA9bz7Wr+363wB5LTcoIFcrdH5c35PrFfWZDPsP4STAYGqo8Il51pQXi2ssYHmfLjg+J82+BiIczkPDunAGfrIiQkcLiQmdhMAQlxMcsAYvTsEEmlMDskbkIITME5EanC0zAdT9NxHBSsb0PvPZfiHTmGGBAICSmEAIRAjihYLSZCxFUMuBohOAIwK5nKbJJdqSQgIghITARkQAbE7MgymqCRlQJFwAqoMVZHyg4ZnQE7JleFyZdcEak3RsfqgrnQttGHGOIqRs/sQIQJQtuENkSPZJXm0fJMJkQYAgOjscPYQdNRiEQNuojeAzpAAIwEAKIoZlIgzefHNTozEyWxSlIAEIiI3AKHOCbvArAzRmACIE+AUkwNVGqRilqS7I/Hu7vD7d3hMJ6mlOY8rTmv2SLQshahQ1hssR0hKTtAxwAUmQgZFykZO2ICamFJMYuBnCf2SAxIAOeSLyLEjLUQmmkBcoaByakZApmp1WJVpGapNc9TqjKLpaoqokCOCHXJmyFHrAhLf4yLWREA4mLXuzgCwzJp+HDHnAdTisQPlsEA30pAW+y5TGoFQBFVlWVno7ZsTUDBCBGZzZSZ2RETAgDTYllqj6Dx0rMD2KK4QxNCZGQAMPIIymYmMwCpESOIidTqiI2WUVhVKbhgVaLBN6pK6BCxlGQinltTJZMqtUxzyWJiVoVdQHUiGnxsmoBNLNWabYeOCW06nVCxlvkXn++Cd4f3x63zqOX2ZgjcDqcDEcWuGXb3YF5EsomoAdAwz2iaU1UFiI6Y15fN1fXTn/5yV8HIkWcGg7lMSNauVwgkBsM09t1qtb3c7e597Pb389VqJY2uL/tpHLeb1f1uv9qs9/fH0EQF3J/mlxet6FwVSqpjGsZhaJwn9Kb6/Om1c7lMY8nJO/7ok6dKbhzGbtPtTqfrJ9fH02HOCqfiXBje3ybHIQbfdV/99IsXl5feu/c3e2JigvW6VcbTkDirj8E3PKaxJGE0QqdmU6osEj2RFBExMINaq5yG1PiWHJpCDH5MacpFQbvOz1Mdx6TgyBEhE6tWNdWkVoe5VDWAdddPudaKzESO1GatbphT430plZ2PwQCK5KomauIcP1heGZgy04L8iJ6tjOw81EU1AzVHi4XEAhwiAnjHBgYEAOS9W2JTyZFzzjt2zi//sTvfBeT9L/7yq8/zVxfNq1V3kd7viuwhDnjcd13j+1amk1i63Lw8ff5+N8/2rGn71fF0OmHO6ELTrN3VOKRpmoJrAevt/V4tEwS04piawKvLF7vd24alzkfHkEvKWr66ufEaKtdUBJqVsOGKmP1hnK6fXKWpwhptiWoylloRKacUyUmdLI3DfncaU+gvwCwllQrGkIZZkcAktk1smsKijg/z3MfYrVf9ZqvkgGK1lVZFM46+grMiWk4hOsp3ZPs6HKVmv9pyuy4i6IJZBitoGSybVWyujVohb8CO2MpYyliOU4vT8fbr42nXXW2tub5LTrv11YuPVqv+7vU3p91u0/t50N3tqUx1ex144lrqarud0ZgmoCnP4+pi+/qrN5+9eLa5ulrHrpT5dncc1UbFISt7ByCegYt8Z9v+sx99/0Xf7o/TKVe/EWsKN/7m3W2z3rAYh/Yf/+1/vFk/+8N/+0cvPvvso+9/p3vx7KXvn3/ya3/8//vxJ5988gf/0x/c729ePfvub/1XP7zfz8daj/spJfmDf/ZPIISf/+yry/WqWbnTmN/v3n/3+x89I3eaBzlIjF4E+enm+rd+j292qeya7764udsfvhhro6MN220A1dOYNtsODIT851/tn173m7UzL6UWulw9/2f/rbwb5Wdf8MZ1VPnCQ+Ob1Yt0qv7T55cfPc/lcEr08ntPvvrpXR9s+8lzcUgbip5I4Gr1G2zK9361igPmWqHto+aCjjYXrQ/89pv8n392czrir3129cvDaXP98unF9osvv/rks+v5cDvubuvpCD6c7wJEF9ycp9C0XdMOeVbjLBJcZIDTPEaKSBSbWHLJMkYfjsOxDZEQOfhSpIv9OB2rZvJhsXQ5zYc2NlrZsS8pZxsRbcrStAGRT+O+7aKoiGjbNmlMOWVLSdpac/KeyIEaUAi5iujIPrB32/X6fr8nVSsKxKkWAa0lMxNYNSzdtpFivuuij8fxgEBzTmCA7CqJCYe2W639pl+lNCtgraVbdTWrFhWpzrXO293dvUnq29Cu+/F4DL7ZtN1hOlUA8+xM2Ifo2tNxt151uRQXm6aioAEWY8gwP3317Gc/f+d9W0sBK73vp3EwwEmrI5SCVK322HSxplzKGKJ7f7px6LbdGsFiDOWUwagig8NyQs0ZvSuo0Xd39+8/+d5v3L+5n8b/P1n/2Wzbmp2HYSO8YYaVdjj7xBs6ogE0EgmIEEiQoEpF0yzKdtnWF/uLv/gfucp/wEHlsmRbqjItUzLBAIAERZDoBrrRffvmE3dacYY3jeEPc619LuxTddNec6+97jljjvmOZzwhMcDF5dW7t/er1TL3+7jvNUoOSUkRuWSJKVnnRAGKMllCrVxdSh7GgADEMAbNSsvVo/nChjiEMRWVs/OLw3oX+v0wkPEtWuOtL4zDMBjmLDJ2h7qexzqkYTchJmE8tPN6YaoY9o1l0WyZVDSIsPN9dzCGNWauqN9vZ037/Pz5zfb+0cUjNoAOdmGsEKCorzmXvmSJQSrnh3BwvpWC7+7X58unxrk+9LOzxWG7ndeNMZRKngwIZjO/WeOhDw7NbrOzllIuyMTIzrjQD84bVYghWkNFTU4SxiHE4E4M02nDpieLXiQ6cjEQQeRhPa4KRAYkH3lD04J1wobwwV7jRMaYvn2aJ+C9SALxGPf5DeLSaZ2uAMfMsgfO0AT96Ek5hkdikMKDnui4mT9KkU5SITiRPABFVVVg4oC/V1xoyXkcY4xpvd3frTfhMFjnlrO2qRtjzWa7jjkZttaakMrd/d1hvy2lPP7wqRF69dWbDALKv/jqk/v1WwKOWZz3Xei6V109q3/7b/3un/2bf33Y77iybElVQ8hV671v0pi48jEXFWHLIrmINm3d7/qxl3sTa4fzyi1sEwWz5ufPV6Ev88XSz+39zX79+i6V+OTq4ur5k/v1bR8DNs2wK99+8fz80RkZfPvyRiQ5z1okjHHfd7tDZ9t53cxcVQHh/f3dthvSkGKSma+J7D6Nhstq4YcuKmARKYKkYhEYAUAMStEccnIGseTKUmPRO17M3HjocxLrOeVExP0QiTpPIEibLpacKqJ2XqmUfswpJ+OcEoVcfCIGqOft+flZLLh8fEne16uly2Ech0okqxjyScR6TzyRqAtMAR8TIIgCANNEgUhwdKQGeMCHThYTf32MVjjter9JxDhee7rsqMD5JukDTrfIBJROoOZJiQMAIKVoFkkJy1fvXv7oxz9+/fpL5bS9W9/c3jSrOpZ4+/rG1i6HaAzGcRTNDCakFHMKKU0TDgIgkIjmIy9OJviTkOR4T2CRAsigqiLEBAqEk+0vA5SqqkVyKbGua29djiGX7L3t9sPl1VlIYRxHgvLxi2f7/R0yNG3jq1qZYgrL1aKQpCRaoHVtgqFtZ+2i2gzvmnk97OLMkI7do6szVh3T0C5WxrQgst/du6oyzvf7sDwvuduvzlfbPgooOUWV7hDY09vb23n9aHlx/ubdm8X5Gbdz56oQY5EYJFT1Imca+qjKRFZKHEJv2BvKUUNK2bm662McRuvaECMVnLVVCF1jZ8CeyXX7XbtYaclh3HDtikxmJYqqb7/6dLE4J7WljHkIg8huv5mdPzd+lvcDekmxZ9M429jFZR4D+Ya05OGaJZmFB2AtoOWUqIQomnUCd46FNP1djuIsFT1h2Cf46NSmjk7VR0RosqECnmgr+J4K9ECL1COZ6FirD/ygv84kOhbHschPqOZDgb9nD53gJ/j/e5PTEk2/ocFERCQmQgIkoIxFi5ywfyXE0yPi9LZ4okkBPLRlAiDGiR93kn8iIhljJid4QphYG4YQUNiwNQZxUmNVCjhrm+V83vgKRQEhjCGlOMSQQYxh54y3drmcNZVhQmegZlg2pvKGUZzzUkBBRQ0crfcIEAVVS8YU83CgHEVUgfEBtVMtWhiUJSKWLCAixBWiHvM6Ub21sRRrqQU0SBgiCkeeItXQOl+MY9fWs5n3jW8aa4EQSxgB0Dcz33gqI6YMyEUZEdki2wqZbd2Wqi5sChkyDNNvkD3iH4CMpEgZsKhwScVYq0AKoFnQOpRcygn1pkkRhkRGQQUJDROimfCSHCVlKQqQcsqhH1JJWXPR0sWMYzKu2FJcZdgaOvLnAJjJoBIxE5Gh6UUyZI0iARkFZEY2FVgy3gMqsZtIyCJAAIQiICnFnCB0MhEcmB0RA5JoNsZnBQGIMaYY+zHEnEcBIRbFSYPsjGUzpTmd+Haolnk6G8hxp0XHWPsjfoQKCoSIfFSBqjA+cIlOUFFJRYroablU5Hj4kAlKpMnuazq/CDMxkzUTWkSExxLQyR5RAYAkl+lcIiqMCMjAaoyViQkjqAAiYNiAlpD2RokmzVEZJQdrsB/CMPZF0JKqlsmuHIEIPSpY1BhyTkkV2ThlQeeMdarkjTfWMCkoVLWR3JegAAVK6Q8H1zStaY3hbz8/392/DlE+fvEbrz5/G4ex9m53uC8xKsgw5iEWNd5ym3SHCIvFPKU0RB1CysCHsjmEUIBjHFOY0jotmyorq3Lt62Xz7OLifL0fP/7VX/GL2ec//UUK4/5wf7G6KFIPPQ79GFls3Y5xYIQ0jHsq1CAaIUB2fNU8iYedIi/OV/3QjZtDY4nRDCFhNwyhq6wbhsEac7+/JwvDkL2exx6ff/irT58ud4ebPoQPv/eD/X0gx+3SDf3WeqeUFDAgbg+R+nz5+KypudMOgBkNIYacc5aWTUrRcF2gbLuDd56ULEM7s3EItVCKnESHIdWL2llKFoqhfYjWmLY1qhL3otnmVLx1hzHHkJFQkbo+kjV168cxb/bjqsWmNiVmASVmxIyARASgBYUNn9hFjHjMNVSAclLRPzyIVPUUtICASIgAwkiqAExoDCEZY4wl6xwSGmuQJsrnETcFgn4YUEwXXv3NH36wwvOvfvbZx88/uN7cxTymYPpdb7Q+vCUOq8bPQGtsbAplc3egxirx3W79necfLpbz27tXl6vzcNCYdYhoHEKJBu1mf51Zt4f9gqsxpuXZY2H/pttdNKtu1zmEpm5vd9sMJo/BExJ6BpsEHDHklEfZ3Y5YB3JVTjlFSH3Xd2MMmpnOzp/g4RUAjUP89PPPXzz/QdO0601fM8f9Ifm9sXZ19WHtqyIAxNzM0XgtPTGSq5Cclt6C4n5Iu/sMA7Kl5rHUT9WeKXosBfMeSkLNKJqhgeoJKhBmQIfSGw05JJYI3cv7z/6qDNnVy9hcrl48B4S0u795+2nc71l5HGi3HRScd27c7e/v7mIsjy/PGyqf/OzPx647X6zU8dXzD6/fbR5Zap9edMMhXSwWF6tPXr59ux9v73dV02AqHz2a/fb3vv3x5UryGIv/4ONnY4jzxdw/ujLeo7V9l6yz9/vdcj3r7+DW98N4+/o2zxb1xVnz4ulHJLldnP/8r35y+fzjIrLZ5No1v/rLv7Nf3zz67mpz14VtTEhjF/NQWt9QAilh1tbXN93jx/7+9vbJ40fFQoKOHVVtU/e7ctd/9/sff/2Lr2FxOV8+qb1xFg3C2aJpC1QOP/uLt09W1f392yff/tgsnG1W5dOvOOV8fz382V8uf/OXYLcdPn9Vzc43Lz+tv//h/PLq7uX+yffOqMJYgAv0Q4773qIL98NZTfefvaTVOZ8REIUxnK9mIqUYJFTK8tHFxV9epz/65z/69V/53U+++uJm++Xd+o0xJoc9qczO6hDD8SZAGPuhqKjXLCKKRgUKZcloTGMbEi6QFFQI2ma27w910wBQziMkLSJ6PBaqs15VY8oIOMVjxhCL5Aq9QJ4vatISpMzbWco5DCmGcdZaZqqaShGVkIElSwwhpITEzvpuGCpARX13e1vXXnM+u2gO/ZBinrezsRtQEVQPu327WgBpjlEEQAkKEqIxnEF32513xZDpU+9qz4yWbJ9D4xryNITBQ5Vi2e3Xt/e788ur1BwSKLMrQxItdeWyYozZ1nU9X6UUlro0RDFeC0JT192+c6yisqrrq2b+Jb2dLVdjvy2i86o9dAcACCHvhmgZMcmqtpv9DltvLCOCRBMYQpGikK342ZmWOO52EMQaO/RdZTGnUHMl/R7yrhtuQ5g760Ic0dptN/T9IZZAzqC102CoWoi5iIBIEc0C3lrJKUtxtUspWsclZgQexwODM1xZS/e7zQY3xNwsZjmmEnJEFs1NUxHS4bBDNN1+LTm38/nIpdvfny+WIuTny9ANKiKSDOHl8vzm/kbyIIopJyS1lW3m1TAMHNPNYX0jm185+42QroURwTC6ImPWpCy7zYGI+7jvQnfZLudN+/p+k0qp2mZacXpfg7VVY+rGJ+kXiwYguhoX2hQJSsgM7dz3+xRLBMv1fOYr148DW7LeacDac3/oJCaUv8YqOlFyjlswETk6uxDICUcqOU2XER43xTCxib5hKn3kPpw4QHq0cj0CPaBHlxl4P7I8kID0gfqDD6+rnqQMDzP/6Wn4TVXQN14+sp6QJpzrIXVoktapyrTgFxEppevHN6/elDACgDGumS8XbVVAq1ktiofNcH93e3P9brk6O1s+ds6qlvv761A6Y3zt7fXt20fPni6r1aef/bzrhzAkY9hb/Mlf/nk3HNhwGnMuxbStCsQhrdc3aImd8eJiTGwopiQizCiVWS4uLs7PS9l+fr8/s3jZsmf6/OXaEN6ut2dtrQUZvTX1eBh//uaNs/r8xRNr57XHi2b28uefdMOohZxxKUUiKJqY+Xx1bquqCOxub4KULJJDGcdRETVTKEMpERRq74qjJOKtHw6Hylmlet+PIgWy1mxq41NJbe1KyqiaQ9yJtM4Qw6w2IOVycXm3XUczzhbNyjtV2tyNY4Iv3u1mtZnXtbNGRL33KechZy06Qjc3lMDM2nZxcd4lXS4uaCZZgMHkGBgZSJlIJTOZ034WAUFFCPUo/ppyXgn0SL54yIM6IZXTAAxyGpnlyFN74NYhgJykZ6dCmngcpxo6MuBg+jEEqKJKqkWRECFLLppyHj97+dn/8Of/bn2/z5JjjjmnJ0+ubu/fbe6vAUvt6iFLLpFIrDGqMAxdVp0s5FPOBCRa5AEqnW7Oh88GWDRPCWuAAESTQzsiIBhURYScBUDq2laVtwQphpxS7Z0xpuQSx9jt+4ury8ur2f317dli1TQzw06LGrbW2j6EJJAzOMqz2cLYtB93lE0zX4V+Y01Zrhq2mIbu0fIJQRPGaJlKlsXi7H5zu2xXimzIgKAqIWVmZOYwROvcq+v7j67q1UW9fnMTt9dLzwUlxDRrVof1V5UbjLoSksJAJNZSd9jZ5oLRMDNoMWwIzNDvmvkFmNIPuyU8L6XEOFSNV8SSs2ARVS1Jxg6pTpKtr7Db9pubhtm3Tb8Pfdf7xu83d3W9MsxV641hUE6hs0Suaupqvt0ffLvylg7rr5hLlBGRpCiT0TBO8UlSBjQ1oDly3PA9XKIn5HqiRyLgA5cIj5ZbR+BaAIAZCHUaZU+UnyO5Uv+6SOzUJ08MtyNupKDfZM09ADWnlnmkGE3+XSdo/z029NBEYaJSwFGUw8yIPMHu00OCkYRQAUWVWOFoY4SnHqzv3woQJhAXkaw11j1kY9KRgmGIEYBVyqRg4xOCRkdUiUSQvW/qajlrV/O2smYa+Puhy5pVxTK3Tb1sqovZ7HxR15Un4raixrE3aBlBiuaY45gykBIpkaIlI6Jl7DSNEEeKA4kAckFlQGIroAYR6ZTFVooAK7JCUQJUQmBLDskpJVAwoKyASJYgZi5Kx8mrqtx8VTcLNs5VzjoATWAMG2+MJRSVUsZRwqhKwBatU9uYZqXVufoZOgfESBNon6kAMauIEOnEwyJiZlRXykR4IyRSACoZNCGSAqJxk4MPsxUVQSXrCIRyFi1TRBkSSxZFNY6L5DEOKaUJggmChUwGNqTMSGSmnmfYKAkiWCY0bJwDYiU2ziOxIiGREhJbYovWMFsgmliTeYglRGCajDWmbA9QYFepKFnHxibs2Tpii1Cm5RQAEwoAGGMEiABQZbIbQiIsWqZmP7HTpsY5pY6BEkARgQllgonFNkXzkYpkjQ/HjxNUpEUmK6JSVI/pf0REEwyISoiGGafcKMOGDTMZJp4uAdTJ1QiPuv4pdk5QVJSRaaKNiioIaj7S+YhLjiUFBGLrlAkk5hQINaUcgkixRKSSikjKoTE1GydoUNLQ7Qi8ZHXepZwAtGoaY50UkqKoOQ2B0ZCjEqJkLRLPn61W33887A4G7O3rt5/9VXe2WkkfX376I4M1QGTTmMqG1BtjbUERKliNQwpjZgPGe2OkQnBVmwrdHMb9oQfnyBpRRaZSJCR17ZJsbdxcTbU6f2r03saM+XDYv7toHl1+/INX19fz5dPQD7DpAaVIyUMqmCpvhCiirpbNbtiN3dDWDcYwjHncdrXHNAZg9I5Lzl0sZACNCV2fWTDm2bxazubz2hnfvnn5+uXLr77/3Y/8bL549vTyw/aLT3706OnV7l3qNvGw7Z23s6ZClJTK7ZvbxaJazObbLmUd2TGzpFLGaFIq1hVjXE0gWdliHAdmRgbDslq6XZc08eHQ165GUVawxuaQ1JqqcrMG1tvivU9ajFHVXNUVshYFEGRVR1oYY8keeDIZKyLMpApSjokdWQUmgF1FRYjI8LRVQAE1hCJKAETESAhKzNPJl80UFqBELIzEhpGscdYQEHpvjbNIZO37BLTFot5q3t4fXMKvfvzTs4++1S3O1nd7773zMB52ta80I2Q4X144V2eb3ry9BjAX87ZA7sPhOxezePvp4mzVGCmHbmHnQbKEUHtr2KYh7bu7qr2sXcPJWL+yds6O1+OGPHjJKaHhqnK+j/u28QTS7beumquWEkLUKEZf3bzWxdVqcSmxH3ab5vwsK4gixpK7w3i/WZzP7vp9zFDNLkn72aLVYQCU2jVtNSNkLVLVlWAW3THUbBGglBygDCRh6PaSOraMhNXyDOuLYuYIgDIwZtVYSjbOQQY0TiSpFjYsMki/LvstqfMa33762RjGanXmHz/hdkGmaAyyu0uHvau4dna32UqS+mxek7z64nUcu8vH33IKL3/x2fZu5xrjK/7F658+++Dbv/Nrv/wv/uRfoDFn1iVQI3HFkEnOL9vFzM9d9dGq/ejpubcsbJ2trbPPP/qgfvSovnjs5ucVwP6QRuw+eHrxp//y3/ze7/9+8gX9+eYmSsz/7k8++fLr1//g7/+Nn//kJaFdzKgv9OSxQ+3+q//m//WP/+D379+8+dFfvfmdX/2NGAUst67uvt598cXb+bytG3+FjBCfPVuE+3vNuvzuL6VffH79P/ysOm+++uSztm5ni0U3JA0ZW92sh8bxENN82bYLePR43rZu9cG3r3/yqnszPrp4QfvCgZpn3z2MY/zTn9z/9C/b2Uov7Mov4LD3V0+HV+vOW3TZrGa2gcZzBTUAuzNXxvHRr3+nWlr0khkNVbmUMKQUQ7uqHj2viXC9M8+ef/+P/tWP//if/tEHL9g6TWnY7ffW2qVfuiOpCAgnwqyrvUs5GaLGunEciwqhBaSSc4gjsUGArttZZ4cwWFtZV9WmOvRdKeKd887kknPOSBhiVsilxNV8ljKnnABLTgFVu76rmxkCOTLMgKTOmOksGVMCBLCUcnG26ofeGtvOW0Az9kOK0TJbInbGoxfBLJJLIWTnPYqWkAExppQ0O7Ih5xBD5Ss01LQNADeVDyG4ypGaIcXLy4tx1znvQbXy85i6mOPZ8rJ2eNsPBqFqfEejMzqMglJWq7M45oIFWBdny5vrt0kyAyqC5NhU1jhbSonjILlvHIw9IdNQhiLJWWsMhjiIMTUZVUlBOoKG/f3dNoxia7vd7cZUmrk3BZq2JqHQD9ZzJnL1vDt0lVskxXE/WLvKpaBo13clwd3tmyQ5o0YBMiylgKqWggDWGAUqpRBZZu5SLlAs2spNUwGUmMKwB7XGVc6fzyqNcQDWprUhBs1x3jweDutu359dXKqU/aEn5N1hd1bXbTsnCdbYnMnWs7ubdzydwW2JEIsUYziX7BuHiiGGkLIhMmyA4dnq+SGtCURi8L4pUpQwpRxiYDYBEhhyVQuoWVPKabsdVThkXLKdLefsbRqGInnWOD7a0OJqOeuHHFIklZqpGHO43wtBbY21zsZEICSFgSQVACKCFNI3gRaASczwDWeWUyQaAIoIIB7JFKoAfGJYnIaj9/uM03xznGToG6owPU0sD5dM57/3nq56nFFOE/6Dyww8IFnH8ev0H9Mwj++nlAezpG+gR9OHmz6UgGQpqZSY8+5w6Po+jbFdLKqqTqmEkFzlSHF9vXn96rWy1PPm4tF5HlIpsdt1Cuby6Yu3L9++ef3FfDU73IW7/rNUMooAlcV8PnZDGmXosnGGSFtfEUFlDVszjkPt/dANIDIR1Q1T1TTMxpjKLufbw+bpefv5l9c94902futROxyGpqIffu/jl5+/8rPF/WFtNH3MVyzOkdm83nbxhhD//M1NDFEU2VIKMaeyXM1yHp5fvrjb7bvNTkWHfgghkq0NGcdWmbIm49kWmwttx7IfsqqkMBKo5qxJLQp7UxnuugOgNUSkQAhtVY2xLzmPQJZ5zAIKt/u1IYjj2DOS4XY2e3L1KMR0sz30Q+r6TIYb77XvDKO3tGjrkFVSWZ6vCEgTNq6hySRl2pS6VgEnqSAbAkWkh1Kcyoam2ZfwgY928mA58nHgZG11mq3fA0hH1czxWTANE8dqeeDYnTh3D2gq4WmCh8neEBFUpUjJJSdIP/vFX/6rf/svd9utos19BCyW8O3rN+9uXwnIfLbcbw5x7JU0FSXGlGMuMrllf4M3oqpKSDAZZBMAqMjkMX80BgbEnCIxqgoqEqJlzhIBwLBBZG+q2jX7wz0DGeaqakDLZn/Qkpn908vHh7s7KXm5XFmyxhjnnakMGQehFKSzxVJCSDICalO3javubrfeVleXvq5YS5jPz4jdodu17dyoxpL63ZtxGJaLy6BGXNMPIyKjiOEKkMcQkNv9IR5CnhujSuu7DbC/evJ4vRv2Xb9aPN1v31iuECxRKbFHbytbDcPe24qYYyha0FX1Yb/OoTPc1vVsGEZXtTEFoyKAxro8hBSScW44HNqzOZINw6FunDem3+2tnRl2Q+y7Pp9ZSYetm8+N5QJxtnyRhgKqOUk9PzNaU7Uw1lXpUEDYuQwGuMqpEHIW4SPBp6AKIL8n6hAdGw7IkTB5NN8HAFGgE3YzdUMEnLhyx06HD3iLvgddjlX8gMkj6oNIUk8w5vEVOLnMwYnQ8D41ACYtHAAQHtVr+o34tukGwon0gZNv8QmjPP4AAgCEgkA4xZNNltnywOM7Qkqn7QMRIZExbIw5EqyQvLXHFL9pAEdU1WMOlYJ3zjnjzOQ1Y2d1u1rNH12cO2sRFBFCGG1xMaXlcmaNXTb14+XsydXl+bwlY33TVJYgFxEh1pK63O9yTgpKhFKSYQuaMYNK1rGHnOCYx6bT4D/5IU44EQGKKBuvisRMDAWSMQbUFjCIYHRSSisbatizyS6TAJGxCobrxjet8xWwRWJXGcMeoqXpx6UcwyBjX2ISKUVMBGwMp8Qus6TiMCsVS0SMKiCJcPL6NhnYoagIqBCiIoECIjMpgrGamIwaNgoE1ioygTKxTtXBx0jQkgtqQQtgDOeScgKQkhKkLHGcfM0zkDIrKhIpiLJhY4mQGJkUyRAzMiESGgeu0omUMMHykkGtTpsnMkRUiqacNWWRJJKIQIpOXOaiIsgIKGiU0HnDhLbySAbJiAoiTQy0okVBARiJj4GDIkAspZw2SjLtk+TEx+OJeaoZAAyZE98YJoW9HomZAA9QUU65FJ2gpkneR0fjaz3N20iGDJM1lg0zGwSdcCJQABBmQmSRjIRJ0gmymoiqRbWoAkgWVUABBRRBUtGcJVtj2XvUrKLO8dDFMQwxxJyyt160ELG3HgRzKYIZSrF2TmiRk7OTcI0VIMXBkK29YUPiXRoTW7NYXErWIHEEfXXfSx9r49qrby2sGbuuaoYy7udnjw4QttvbJMlSqzkjABto67rf3LH31rW7fpclOJ4VyLtek8Bs0R7GnBKEMQCQFpBiCpQPv/fxxdVHt9fvfOO++ux28+7+d//+769Wl3Efk4yHQ7q9v60dnT9/3nd9iluBfV1ZY/Bw6EiIVpyKNu2ibUzs88wYTyhhmM2qynsVqIx2h51vPIJUbd3nAgbv1/28hdt3b548/ejRxUfnZ/Ntt4Xc3776yrTN2ePZz3/6lzNyxLR6dL7b7Uscw5CV8OJitT9s2rltGzuOWUoyBl1dZ7FKmgs65wATCoxjsI7rutrvd6psDHmDKYq3Bklm83bbDTEpswtRcw4XZ4u2KW+2w9V8OcRgkERhNqvjOILq+bLebvc5a8k5BLaGBGECQIlQi+ZJ/jflVCJMeNykPjWIRcWSkVIM4YPdxqT8RcJjujDI5AnHlo11jERkrSFgYssAOPnIPUiQb2/2Xd9fvbjav9qOQ0iHWPMM3Lg97NrLMwBIEZvWPH324e6nn0SLI5ft5m3bPk4xuno2HLaraubcpYEqDdtSdu6sYmd+8NHf/OSn/65uMCITOS69FBoKeN8qhs36Xq0TgVyEqdqON9bOmW2M2XlizyEGQ+KM6Jj6YXizHmajzM+EUQrAOA6llHE8LKvFbntzcf7o888+eXl91/rzxepy3N1Yy4dtv9sP9fKqni2RkCzFFDQPZIgVlEm0iERTsORDCRGFi6mqs8dldq5mgeRg2BqMgFlLJq5ECpraGJfSQLYyEEtY58PG2Lns9+P96/3d1lUrrmamqYZ+G7cbYwBzz6SS8m5/WwrP5jXBcHf9dXfYWW6a9uL+5Vfrl68Wi/bmsIuhlGDGXfjz9Z+61ho0jbO3++3N9VqVZk3FoDP2T2bzF1fnrOCq+fzZ07Zwu3BjwqZedEPhVZEEb+7Wv/prz3br3R/8Z3/vp3/52fOPz13deC8ffOfJn97v/1f/mz/4/Kcvf+9/9Cv/j//Dv/qT//7Pfvjb3y9Jgsr/+n/7n3/2o5/84Je+/92uNVpddzeN1CXH2axZzut9P4xj2txvr54uQLQC8/YX18tnLm90tbqUb80/ePHk67+6/bA+7958ngp89z/54O1dctbuQljfbEgXlvDVZ7/41t/6ZdPOnv3wuUW3f7nke9jd3Pp/8DdyNPNv/3L7vL35r/+42uz8pzu87c5ffMAKu92wW4fFsxaMcOWURGvgys2uqh//6IsXHz7hmhhZJDlvkKEUVcSbu/WTj5rQj7/0S89+52//ZsnvXn71hh00i5aAU8xj3013QZFivCUAy6wIfUgjqjKhQipZAQWycYyKCCaWoeLWeN7tt01VBenYgIqAckoxiyIiWy4ARFy5VQxJiYihpIxKUqCpl5Ly0Pc8b5Tt/WZ7uZyDFikqMRWRpq1NZVOMq0VdRGpvRMnO274jBUwhiapxpCKODdZOFaraH7YHyYmNWbTLvo914/thP1/OcwgoWldWFA0rWlrU5vpmLWo66RFhu9vNmjak8O7+VeXc2fmy261XTx5rlmHoG4fMFEUXri5iiXapjN67br+NMTL588Xy5u62af0kWp8504/9o7PHZRiGOKaCrUBdVQU0l1IUQRKytqKlpLqyvq7fHW40m7PZPMbBejeG5AHjGEDFV44gAUFfsiGz3+1c62M/ysil6eZV1W823KxiCIToanf/Zo2llBCQGMEgkAqIKDuPwCEHYw2z88bnpDlBSqVqmjEmlBKHXaeZjS+xJIjO1DFEgHTYvJ23Z7tu2Gzum7pandvQjUV0t75fLeftYoExWcutcaiiotY6thTD4KxRAF87CxT2w7yuVMA428y8SHp2sSplhCKNryFlbFjUpDET1jENueT58rIUHA/B2uri4moIxaghMMvLc4NQW5PFVeSA0BAuF7Mw9pIjSen7kXybh+BAamcOqXRjNMacLeaHbu3YrtrF/e1arWNv4tC/Hx+O44IAIE7ZtqdBRaXgaReMR9UX4YloAain+OUHDtGJGTTBNw+erJMOQ4+8j0lShqdF+IQCHVU/JzmbPsSUKD54G03u2u9n/iMx5Bs78hNzCAAISVQR5HQZgYIUKTnHWIYxrXd7V9fnvsoxh36wxpxfPNof9j/+0V+kPqKaxx8/WcxbZ8wAh5DKxdXiy5fv7r683d1tiBmzlRhUIzG8eP6cATf3h8cvnsYc1cJ+tyu9INMwhNmidd7f361nbRNDEAWVEkNeLRYplf0hVA1czKsKUrfb+9pst93g+Lvu8RDj4TD+m0++Ln2Wu+vFuXv09GyzXVMq8/ps33ddN05DHzNbY0PIdTs3laVZe7m6vH9704vEECULqM5mddOe39zcjVmDZiKs0Hhr3x66PrECGUNjTK2rEaiLO8OEqADimUWpIIwhO4JYCitMTPs+paatMMeSxsVijmTAcIwStl1t6GzR1I1HgOt1d7fps+WsMaWSFDIooBXOlaTW1MYQN46ZrPoQY1E1IIgkIqAspRTIk50pwoNp7nGGlckVF6ZhD04mKlMpnXy49L1x0cPYDO9/4YQ10UnqeFIxTrWLxwSpI7o0lZUgGQXMOeYiuZSffPaLf/5H/+puc00MzOIr7Yf+zZtXXdcjiWToDl2RCCSEBAQxhawyUZdUSxRhZibOopMT5TT5H8UVEyI2HREn/QzhZPoloMAsoATKxgKgtY7Jbda3ipnYVVUtkEpJpeTK2bauL84Xd6/fPn30qG5qtqZZLhgYnYhCGCKTnS/md2/fEuP52Zxbf+g7Zby8WD17MuO0Z+a2nt9397aqm/n87Zc/HVLX7cJy9Sir7rY3tXe1reMh+MpjCZpD7fzLTbfth/1+PD9bgqWs+eb61bxx7fz83WbLuKpny83ty0V9gexDkpJjPV923ZvKOCKHqKUMxvuqquK4900rAiUV21ZhHIykgohFmH3fd0296PY37TJZx7vbvllYx3nsA5FzbRWzh4xlPIyITVsJURaK6maPnodxtIS2bZu6gGl98DBsUZOrL1Mm19aEpDlZW0uZwEtFUDgp0VBFp8jICd6e/uUB4/5rjQ91MsQmUkQ45tAf1Vk6DQBHhEmPgrUjyW3qfu9Jlu//Uji+z4Ru4glfOs7L+t5y++HjfZOdiXjiPp2kNXCssemjTO23lNMOAU5j8Tc0wEdM7ORUpADEhMTTOzMfrWwMMRKScC7TKGQmwRkTO+cY0bGzxtTN/PzsbDFvl4s5GQsIUqIikrFV1VTOt7W/XMyfPzpbLeZNXbumZVdRSaCQEUPqY+dU1cSoKUoG0QiomiOUyCKURRUYQCSR9dYZMlaBCaFoNIAKSq4pQMgeDQIkQ8xsVJnQqRRWFGE1cnRvQnXWiwIwChn2zkBBHQTRt61tnIOCmrAkGaPERMPBlACaUkiJQIpGcpx3jQJV9aytwBiuHFhGpJKTJmFmxGicU1UCBkSRYtgKoCKwkgIa9sQGEYGYjBVAlUyGCRlQFURLAbLGIGhBEAUQHIiJEb21znnDzGSydYVLLBPli9gwGYNEbAxxsQhATGT0WF0F2aH1hKwiqFkBCAQlsxirSTPmnFMfUxgQhUnBMBEdwcgiohJTHAoMRZGI2Pi6sdYZJEAmw4UQCrAxqoUIVaGUMm1zS0mTbB5FBeRoI33cN5FiUTwKdYscjwRKJEcqPmVNfw0qEilSjrzS6REyoaREZNkgAhuyxhETT5jiUYmJk5m2ihTRib8qKpMof7p/UBGIROTktp0RCVSttSpRCdE5JkQsOY6WMI5xkvMTYOubWEKR4q2f7PgIlRyVAM42u8POs4upMNmqqkA5hWDYIGMuUUSNb9E26+3QVA0gOd/OVs9njdtsb1MuIYdqWYek84vZF19/0WRw9VyTNrNF399CPzDqYdymFASbEmXeLO7WN+189m7bs6niGGIBQmZAV9elaFaoLe3W7z75i8OX7c9Xy9lf3QlZME375uY+hDz03fnFWWuDpkMR2Xe52+9JR8UcgmghAnPowmHb10YYZeh777hd1GUMyq4/dF3oQ18UyqzxJaT9Yd9A1eW8vJhVVXv14fN6/q3S82e/+Pz+frbfba8+XP1Hf+tXf/TnP/Fl9vFHH+1ubkMfhFikOGuQJeey7raVt/d3Gz9bnp+ttvs9GxYQYwRUxzTsO3DMYexc7bohWc5t2/ZjSZHYYNWQQImlGCze+XHIOYExvkAaBmmaalmXHIJhAqSU9HY9IihxAZUglKTELElT7dBVho1JKZYiUwQMAIhqkkyn9RVMi3EFLEAISAyoTKgyFTwZJlExxhDBdGyY+IfGMMGREweMxDypW0tRY/l0LCLD7m57OHu2SjK/A2yunku/Lod+GA9xiGf10xna269fbrZ3Z5cc1KwuzhDsJo+//uI3ubq4e/kzZsk2tbP2sjkbByiRX3/1mUi/Xydo6uViCWU/ll1fQqNzE33RMYzD0PpDH+bu7Pzi8avrO1e4xP25u8glz+qqO+wtFdZwv94V3+RZlcjs+4BV20sacuJ67uZNd9e/Xvcv3w2vr8O3P2wDN1Kttt0uRq6ai67XmPrKWWygaWYpo2WHonnYq5Nqdj4cBk3Z+QrV0+oR1LMkDCWxFpYChDFkU80RqMQoVCQntp4UtVuHu88gOLNo+vt365dfCpr56ukY+vWbrxjZOINqt/e3ZGbWz1Vxd9jVGjHvb19/jWgLmOt3t9u3b0MYZcwVWd84w96o7Pro2vowprc3m8MYk5YETAIXl8tn5xeLylRty7ZtnzyVZmaBXO2RbAbTx/0F5yHGDz+8SIpfv37z7PLx3f3hyQfns7n/F//tPzf8NzKHP/4XP+u7vrpY/dbv/ZaOPVe+toIqt2/uZ227ebe9efvys5//5Hu//mtdHxaVG8o4hGitktX5eYMG0z6lJBe//t1Cpvv6i0Ww/Vfvzn7wQ/lBm3D3vd///uvX25efvqrOrlwF7cIC2JDEVN5Xl2//zbs8butv1anm+nde+HB++LNrvFptf/wzfbtZ72ftP/69miD8N//clmrz9RaF5t8/s4bA4OZuT2THIc7O5jHEqvUff+epQibkYYzeGRQ1hqraHYZxNvNM8Ob13TjYH/zw7/zT//v/Xge1LfVjRoNsrDHHu2AcknHWVj6kWIoUkJim6QRAJEhq28Zbm2OyzAabmNMYBue8IimCsz5JBmN3u+3MV967Po7LZj7mlHMgKIfDsJjPZ7NZUcmSrGUyaGvH1omqq5oujMbw5cVZ13Up54mxHGMoWYE4jTknQMdVbb1zw6FXLd7P+zyEMbDBWFLOVNUVE+WSYh6t4/1h45wjwiIqJZssxvkw5pDSIWyIbU4CuTSVZ1uUNcWxrVtnmFTTEKq23e0HdI1EZeTFzJNCOOyZtZ3N05CGYXBVZcdQZLSkpqoEJMY8xGSYrMF3611WIGMfXVy8etkTg2MWazWllFKOmaEYlP1+N4TwqF5t9/s4jrbxLz741u3LL8YgFsBaCjEnEQmhItNWNowRz003rNuMTYm7bj339WF/AIAcEgNUzo3DgYjZGCKaDh2GzHHtDuicZ+u1CBNixQjAk5pDS5GxxMyGUKWUBIZzLvv9lq2vmpaM2e/3de2rpoJhSDGMPcGsWcwWC9/cv3tJJVVVtdlvVu1528zjcD9v3dgffvn7v/LZFy/X+/4/+jt/e3P/Zn9/jSmTxTiWtqoQqYjkkovAGMb5bE6JlotH1jWgkAapG77tdobt0MdqzrmUi/NFyUUks+EsBMYSs/O2j7HyVZtxnVGdG9ddVdlBtaDpYlot2sWs7WIRKM9fXL273Y7DiJIfkKIHdfMEDk0pUTApJh7MpBEe0mT1uJPWCUs6jhjH59bJ1/TE2TgaIZ3YGadp4jTZnJhADzwOeAACjhq2B5KRTFaqD98qD9tyfb9Yn+hF+rBYR1SdNogKWo6WN6Ahprdvb+5vtsw259JWdujH+/VuDEMI+xzik2ePZ+2ZIMahBEm+qqDI7f36+u2bFBNi/vDb3yfkv/qrH1d1tWyaWbt89/ZNu2iZjOZkyRlhzZpI2HAqudvGnEpKGRVyylVTpRAYKZRxVq+s92Hf7XeHYX8AkFFKFvjR11/PiWoHWcJZ5UHiRU2VyD6Vtq1frdeEysaoaDNvmM16czDG1U3j6joo3F7fSBhtbc4vr67f3iD7McQv794Q8PYwVlXVkEfwr27u+1xiKaTauooQxwxZSuUrAZBcLHJd2yHEhKJoikqSsqh8U1V9GJ03BtU4WxlXMprauKpSiFNgzNBFscY3/uJq1S4W6/WOXXW2aEJ/QIEMFIvs9j0qUz33IqBovZ1bq4ilCCJMHH8AkKlaVCafT0Q8RpnhxPRXJjo6lZ4qSY95aPqNaR2PEoXTQH6qoAfWh+ipvKbpB0+3yWn2BgQ66Rpz0SKSM8Gf/cWP//Bf/+H67poc1Oj7fX/br9frm6Ip5VJZTpJiTqrCxpVc+qHPIHjMRJeHj5FKRuAjAeSk7tRJ8jBZxCCCFDnOIDotqbWIoHpjcs7OOaOcU0glEmDVVCDqXZ2A+z5drarW27BfS4hPnz2eLxrnfFs1m013Pq/7mPoYja+7bvSVvXw8X8z925tN34/Pnj56+miGOoQUF7PZPh7I+9nZxd3m5t3N3Vl7HpRXZxcQZf36c778kC4ep/GaLWsafeXHMSpyALPu4wcZLPBhH548f/H69Vfnz/F8drbb3p/NrHe+i/vKGGROoTODc9Qc+p7QMtKQsxAbU/elG8Zoq8oQD31fVT7F3lm77e5dfS6KqBYLSOitY8k5jqaZL0Psdof+ovbOVSWUbGXEMQ6H5eWKYaaphFKwPbPej/lA1QLsohSialHKHRgnqMCoKIoqzAIEWqatLiCAyuSqC3jCd96LGY+1BKfX4IF+ie9fP9akfrPkjsX4cNE3mGfvmyh+43J9f6GivL/6WOBH3TAd3+II8r9Hi45w/dTpRRU0w6mdq+rJWhtOmQPT1A0neyN8zw49gkaG0BrDzEwkonREOGmKL1dQRGsYrWVnSVSsta6qKu8d2VnVLperR+fn7cwbb0Ww70fv28rXRYpBqJguFu3jy/PHZ4vaefYVslXiylQKMsSgahIZRWurBQLnPNhUYQxcAklRLYoZEFTEWMvWoDHEpigVAASXJTtjRAqxASJEMUSICqiIRSESYpYo6JTYUA0IpEUES87ACGzZGSJMeTSusYaIUFM2UiTFMo6aisaUwxhzSjEnb8XUyFQ5AySVI++Md9Z4h4YAjoolUBWRFAMAEhc57mIKIYuKMhNaRNECk0c4TElHQmjsBGeI5smKCSyhEkjWUiZXNGv9bDE/Px/Hkqqu78Ze4phzKAhFxSJPFlNsrWFDWoAss1VAICC2MNlMsxGJ04mgpIwKBWKWoqCqgpidR2LnrEUmYieiSAyqRSTEkYtIiMMYhzAeQkJgayoRQAPWGLLGWiYm46yyzUBsBUQZeYq4nyoQzTENDQEARRAFRNAg0RGxnQhFoJOn4QOt9MQqmigcx5tDEYmQENFMMwQRMzBbOjI3TqozpFIyIBKhIoiqSjlCqio0ua1PqZmEiCqqPHltowKhpEJIAtkgoqpjk+MouaSoIebKzq1CLhmNU9VcCiIYC1XTqCllKG3VjIeeEDNwLJpzRDBgSRIQGmYANCUkKxB3m1wC7O/Xr75AUIdQNWdp3/ckm+7m8vGLp+dX0g8u893mthv3zPz48tnt/VsywPZivVdVJKJnT64QYbWs1NIe7ZtNHIY4axtr6TDkcRxH3YwRYxzD/c6ZF3mMuSTbmrtrn3IS0cN21+1uU+piiaHrQQoZljQqaJCyPF+Bets0lRNQ2N32gXhbbgGkhDCrfeVdKQMBgQizffb0fNzn8/Z8fTgA8c2bO7grT5ezx0/46Qfzs9XzT37x5R/9d/96WZ999R/e3fabD7714uzp7Iuv30xB9N6Yunb7EPr9OKubze2BoSix8y6UEsI4X7T9phu6zeziMagPkqrG74bd1aOLUfYMpqTY1F5J9t3ARtq5c7U97NNm11Wz+noztgGd89uxD0UBSNERcElZdQCCmGUcC1rbjYHJUgadTLYEAAi0gGqWjIAiZWqmWcqkwPTMp8O2AICxOBGIAMCbKT0AiPm4DyCyhpjYGIuESsTGTrU5GW5Nd8Fqudh0w27YAbTDrizPvLP2untbyJfgYt//yq/94y8/+dOvv/pJXduxSBiGxcyNh62JOG6/nldX/sX39tt70kJUChopGVCs4WV70bT207dvgEOKGxau20fnF6vD3TuScja/CEl8VXdJhu12VCmkMY0XVO1H6feddYtyu7OQE8g6DIZ8GpK3tu8zscXiAXh3CGPCLoa1sdi2zAxlaFZmd4h9jMaSrczL68OyrqVUu90aITsI7Lymno2G4bWKsq1hfi40R19Diq5unANNPUDWUticS54TJBQRAnI1xgH2L3fXX/PiQ7dYDncvtzcvc8r16vx+t467O2991RjpYwZombT04911KcUiSJfjcKisGfq8vGoO4T6FNVldLebXt2tAXC1nfQ6L8yaibne7RJpIK+tnvl42fuHreePbypBzZ8/Ol8+vdH42BtfttmfLs+rxs+XyclyXRQXUoLUZit3d+g+ffbfbhv/6v/jvfv93fmMY+T/+gx/++X+4/uCXvp9kQOOG1P3Tf/LP/+7f/VvdfvjpTz75g3/wu//H/+K/+h///f8UAU3tbWMRtRsipDJfOEucocQuSlbj2FzwdqeX/+C39Edf0Fdfrjd/eZhV1ZP6y1dfemtrWVkEVXDe+Mps7g6Lq1lbPfri37/64Je/E20QpLt3by/HA96M9etNef25axw9e7zub4e6OqT11erDunF5MR+GcLe/u/jg+dWzKyKMg6hq3VQKSUoc++hb0617XLQpZDJIhN7an3zy86vFo8Xs6n/3T/7bR9/5m7//P/9f/NH/578v/UBOfF0bVQU/3QW+rhTIGRqiTpaLhnk/7hrbIhkUKDEW4GEcTduCqLXEXDHSEHPOkdEgcSiprat+7IranFLAERnn82XfbxftEgCKFBUxzJU3JUuldu5Nkhj7HEMZQxbYeO+QLRDuDgMDAeMwjIv5kggkay6pJCUy3lIYBpRcSra+saoMBMzDODSVryq/3uyWs3nXjVwbY1zRUPkml4LEiIlYra2K9mery363tmTGMDLbpm6dVymyPL8cukAyMnu7mpMkiFJKrjV6W8UY7g93RJBzDKWb4Ww+a3a7rat8LEWUvfeb3RpQ25mFDCxjXfsuxVyKq/yYlJHjmCwDG7fdbjFziHshAOMq617fvXXOLLzv9/uAcBiiAvdj0BqtghSJMRumnFLoe2ftOIyHfr9ctfj2hgiyshKMKSKSAVaEAoCWLbMi971Us3kRBQ/etyVsS4lsjfU2hlFU89gZNqK6H3qunJRSEPth46Gp7dxYDmMgkrr2UDKU0O8l9fHqu0/u1i9zyTEOgOrQ3N+tK7Jk4Jd++Vd260Mf+ssnqyfPr/axPJo5GgayZIVDyk3lCsTQdY68NV4EZrMzYLcfu8Z4ydjnEHKw3i8uFmMMjJRyZOa68oSdc3a5mBkDzvv9treuoj51w1pQ2nmDqIccDREI3t7dfv+7H5TNPqfYnM0/+vhqs95v17vTcDD5DQECKQCAyHF60KPx0MTuRkTCabuMIkpT/tSkQMCHIxccN+D4oHrQ9wPu+wW2iiCSnvKVv7HX1hOZgh7oQognNsV7stJDwvRx1JmgIDw5KE3cIkA5IgYnWEFVUsyxpL7f397dZChsjEdAxM1hv++6ITePLy6fPG6fPn2667uxj4u2VuL1zf2765vtfieA87P5rK66fn377pasyWAZq+v13Yff++7V6vGPf/znz771/LA9ENdu0ZFzFeHrr7+CIkwUx5hzLqUYY2JJZ+dX6y8/mc2WheJmvY79qAqHQ59TQYV11H2Bx4+adl7f7zYNl8rP3765cWjfDt0Q42rZ1gx5jNgwoF09fzGfVWeNG/uD7keCPL+adYfuF5++6YaA7EVKXVeqcrm8IITD0L/bH1Iq1jjv0Vuz70MuJSURIQS1AMRoSUsMTuPZvBlCniLyEDCGYEilaOhDYeNmLqVyyH2M2TJcnLc5FjQ2xbRfB1YwhhBxdwj7fmwrM6/b1eysH7ZIGFMa+8762lQ+BeDKHckZRKpaVBgJFESKkh49XeAUMHP8JSKTSxHKZGakcDQtOpYfnuDGB+IDfKPu4L1v0XF4PtbwlHlDxzdQmPhEiIqYS8qSYkl/+Yuf//Gf/rPD5t45myFu1zfbzTrlsRRhNjw5isLoPI1B+nHIkpNmVSTQLBmmdTeqaFEtxjABlwKIRzneNOojMaEWEQIRUWNYVVQLIk9TR1ExhitnKkeb3baqmNkUnQRTmmNWjE1bzb15/fKm4dlq+RgV5vPKGDG1GqYY0sXqMXIFqVuszhFks+3Xd/HJ6uqjD5+0tu/u7ufzJgwjKszOH+Whe/P1Z/PZ2X4fxpS5MofDOzlssqvx6gX4uuRITEqy2e398vIwDjfrDfCTebu4v73f7e7PGhPW14Ppr55+eHfz1aKd3d9vxmHnq0oDlnEEpZA6Q5bYg0gei7W+FIljnK0e5XFnMEOGjKVt3RjuVD8UWwplBNSSgMQZV0Kwhm1luj5oKmzcEHoQr8pdf2hDZRsvecTunZ8v1FvDVSlgnWHn/Xy+2dwaW2WdgTAiigix06IgiRSUTmC36Mk06MgbOoLt38C48fg6AIASHVGZqXsdu98DNfOh6wHA/08k33sIE0+w5glsx9M3nsr+/dfhGxzM4xfwG/fBdDFNiBKogOADnHpS88pJ9TZ9JyG9J+rB8d2nCGYAAEQlOrrKKBCxIBpCQC0qhGidMcpM5B2zJeuMq7w1VVPPGludL1fnZ8tlU7FjYsqiTdOWlFOMxprWm7O2ffZodbZq501lnSXnlUgB8zDkGCVHzZEAvG+BfXZNSnsNhhhKP5RcQDSVwjzFnREah2gUjSrmkkHUkI9ZjMVJ+4qYCQsQIRBCUg2iTNNvA3udUAMRUBTKwAamaCTJoIVLMikjCcQ+99scBypQYk5hCOMQRIsAGzStr9p2WdVni6ppvXFs60rJKKIiIaFMxDERPSLlAioKogWQFGliEhEZC4hIdvp8ogQGgVhBiRSUFFAJFYVUIWbQrKTO2mRSXVUXZ2fC1B769W4Xx74aDsYE4mydI2OPDwBiQwbIkTEACAzABg0BAYBaSyqTqTOQCpRcoKAWw2Qss2Eiw1MsmnFAE38NRaGGeQs0F40pD8MQxpByKQJjylllHIIMWlkzkSHIOmUmImZrjSGcLLQYiUQKHO+byU2b4LTPmvo/G6NHR0ZE4IdstJPBNSIRTdxmJJ6ciZjZmKMSAQn4lKiGpztENU9PipzydP8Q8pH+gRMlpEwbCGSamHrADEiomksmxJSyZeusD+FgCIgQgUWRTO3dYr97VY6KQ80lOVMRcQqpjGE8HIypqpn31oJw6Puqqcd+dK5Fa3LpLXO3u0fiunKzRXvoVNhY5tVidthscxrSOFTz+Xc//OHdZlPGIfW7oUTNMUdG5pvN9Rh6tAaQKpac8X7dnZ21Xdcn5GEXGWrLQI0X0d1hzEVTToC5FFAWZnz36uWU36kd73qFjE7lcL3fr+98gykPuT947yWTIwQwj5886uMoOccw3Gy3qPTk7DL2h/b8LCtQytvbO5XiEOaNrev6frcrRShDP3Rt7XIf5RCqZb3fjufPzqLK559/dfniUfVx+8VPri8en+OW7t/dh96u2osAsD/sAfD+fuua5tHZ1f3t26tHVyJj7JMEaRYzqqmkMKtdHvJ+GM4vq+76hvMCCu3XXc12UFGDMY1ssDasrErlYtVYoCwSS6msD2MwXFvmmEdRqwhkSKmUosjEQAQpxoCIOaVilXhaHIlOMWgAIMKTGRgCgNhpmYaAQEUEAZitTtl8p90AERnjRAsiMjHApDQj4okjyccFmQIzHUMMAAAgxWA0/MpH3x73aCjr/Tto6HxuuBRnWr96ktfX27s3ZzM3cPnwO9/9/LPP+jEsZ/MP2seU0v3910+fPalKk0IHFV7fXremWVzOwbh+3d/d3xGBMtvFWTkMhuH1q5/PfaulbO9uF2ePprBDAqntwrN/1FyWPJKr2bu77e7R5cxY2d/2+/26qYxDRvTjZiMaTV1v73deedjnL+5ub0L/nY+frd/sdtfXtoL9etcfRm6qcOjv1vvQxc1+3y5rB6U1nPutp7RYmPX1prZm9ch02zd+nks/zOtKxg6Slv5QoFirjFLyWgixtobYxft4/dn+9kv2T8iv0m599/mPIRdrbUrD+uuvNIzBe5xXYegViSwwU0gYQ1BmVO2GIRb0th3ub9frmxija9whhqKlFPZEdWPv9vtDGomASBYzX1lv2D05W81rH7sIlc0iSeVufThbfvuD7/9GTV3Y3e23nUgNYxP6Qaxm9t//5e/86E9e/t1/9OLrL/a/+oMfPP7Oi7svtu++WF/W5dEMkvXb2/D4xdLzLy/mjTXue9/7bhjzP/yD328a8/Zm7Yqb+xlb8+TxrGK33w9Q4M3N/qPn54B6/+7m0VkVwj7rMko8+/C78PTb269effCrzz/9o08Xq5mvq+vrbcF5Ee22qXIMRUTANaiAUkRZzr/73Hc5/uwWNrj/4nr1wQeua01/qH77Mv3d3wrzc3PeQCmF6GxxOTtvb9+uidoyFtDoWvf67btm5q6eXNQLz84KZHK234+5mNDF3/7NX/70p2/8rP6D3//B+erJH/3LP6QYDalaCv2e2JR44lOIAELf9dNqt+QkhItmrkVjioRkrMk5V64KoaQclvOFalJEa9m7ZhhSKsUY7MfRewuSvauYKaZ86A45ZrZSu1pV2Zqc0jhEY5wzdr/r2KB39vL8crO+yyIxSbcfvGM2JDkD0vnZmQiAU0nZGFBAJB5jJsJcZDZrixTnfE4ZmWZ1m0M4hI6Bur4jov1+Z2xFbAqU/bCrfOMru1q01+/WqLLfbxhQQb1rx66LMakYAd3vRy0J1NXVbLa62GxeZR0UtIAS2yzD4dAtl8v1/aZGz8i5BCJ0zjYgKVoylipnlaqZ6+73Bsq8rcdDcZURzdYaEjCWiCTlVFKyVRXSePXkUcgac1/PLkokQJ/Kmg1LyVYtIhHxYRw9aTfuNac4QHfgi8vzN9cHEMhltEwxhPOL8zjuD0MAxjHFtmoMHo9XxETEoe9Wy7NdPqhGS5STIoCzruScUypS2FnNBRFLTMzUVFUcB41D7PeM1DZ129QiQk01jr3GRFVZ76433d6D5pSbtk0oyGydDxT2oe9Cv4v9H/y9f2TbLPkw5N3lbDbEUBFLLqCSc0SAikxSmPkKkQ95tGRUxVVme98t5vWYU5LcsEiM6UBuNVNEywY9VjUXFUDjXAXSp2Fs2HQxSJGmrT0MYz82bTVvlzkkS5gRWKS1ZFrbuOXDXAwAogBaACa9wZTmjCqCyEdt/fGgI9NTSUFxIlaD4kmerw+uFlMG0Mmg6DRvnGaV0x4b6WhE/OBk9ID9nAhBxyH+tPf+5ir9NEF9Q0n0wA056rKPESXH78lFREQB+yG8enu9jzFkUZVw2N/d3RfQprG/8kvfB9GS5Pb2vkip6gqA+i68fvN2v9+lnIDtk6vnd++u9/s9Clxerj68+uDtl6/dshkO3b/56Z8aC/dvb/b3O0SgksfdsA19GHq2NsbsnCOm5WoZxtjU/mb9tnJud9h6b1LOjx8/ffnyJbJxVlWVGcHAdggxxCUDO/vZm0MZR4gDO98u2t2YRyjnyxk2lW8XTdOEsXuzOzCUmLMh/Prl7fX1ekxQ1zalXB+jUpERCmjICRTm81aLVpXLKSMBIRgmUzlE7LuBAevGgeOcjQJ4Z+KYHNNy3u4P3WzREEAKJWQpWXKBWVuHEJWoG8oYY4XlfLmocrLGxZj8kqwxhxhLKZv9Yci6mDktJWrcbfYAPFvMbVXHCEIMSI4NTJkzqhOXjQAnuGfyfDhVGB7Z2Ucmm+iJN3GsnuMCGR50M/DAfTgN5XJMn5rAIZkUbcd6xmntPFUyIoKIKEjOKWL5+Ref/rM//sNhv20bd9huY+6Gca+aimRyxhlnUTbbtYigQgxjiKMgTHpO1YKnnbOKIMGU2fxe43mErGiCRydjFwRlAItcVMpx/gFRZWBvuZS4P/TEBEiEXLJw5RFAS3p6dZnH8fNXm5lrv/29p8u2TXlUwVDC4nwmmULIVdsagtXjS8bx3Vfvvv7qzlH14YfPWyf7zd1sXmsWRG3n85LDzevXkEwsEMb00be/FcfDm/Wbjy/Pvvjyk/rZR+1qkXfXEgKoArIyxyT7YUwxVtYA4H67N9mcXZyNu7udb2bVfEzJkY393hJYY8MwODfTFAtlRDSGUhyVNOeCNPZDb0rOENGwmBIQXT2/376tZ8uQBiCKORtrER0p5tDVlTuYPB5Cez4LQzfJOLphn8YzY9iy1WHPsRM7QzOL48G1FhFCLm5+Xtiq8tEQfRIa6QSU6NTTFACJVI6GblMtHkHsE5fo5C6kAHSSD5xa5gnS/Gv21O8Vk/oAXz70vxN17n071IeO/h4KfXh5+icdv3biB30TblV8ryCePswUeHVEWY/BbZNjFhC9x1wn8fIDpHQCq9Awn0BeAQUiYDKMiIiG2BpjjEcgQjTGWGeqpvJ1VZnK2+pqtTpfrpbni9msJssikGJCBXHCs1lV+ZkzV2fzy3lbt65ZzMgaIC4lZ1HJJo8ax5hD0lxAlQidYUabiwFjM5oCAgCiFgTYWmIr7MlQLkULQFEUARUGZQGVYgwjTOlVFtCAikIqqowW2YCpiqpIMWRTyUSEbACUIKcQDbHLwQxbDEXCYCRDjCmlHEpOSQBKyQjMQIy0bOrL1bxpPTvPzgETICKxTCIvJEVFNnDiycLk2Kxpyn1XAEQG0hP9CYCOVj5AMDF3pmx4EcHjHxUUmeIrmNlYNm1dx1IMG2Ic9sRYrIIxBtmysUTMbAFFCcgQMBIyG1ZkZQtskAwTqygBMFsEJTYAOl04XW2MATJAxJaQDSCrTp7i5BgrQVEti1ZEUykh51hKP6ZuGMIYQUtOWVBVyqRTs06nqDc2dvL2BYQJ5FGYKEuARwSN6GiTD0jAzApwTPv8a1DRid7MzEhomA0TExKBZcOIbJCJjjgtHrvApJkXEQQgOAbGHX3mpvA4xeNSToEmu4ST4t86q+gcZAZMabSGGCElyJpTGheLlSGI+9Gws5YRxBg05KWkHEZDVbu8Eil9ThESYVQXm/NL27apC+M41LWOYU/OANCQUtqvcywChn29vt+n8WBn7vEPrm7fbHbDLoR95e1iNduv72yF82bV7zrQCYk0KSaNBdE654cAkgiNYTaphxxzESRTGee3+52dm6HPApjGHjH42dxXfOhTGkttBGJBwjF2aLKogEbnuWlcGFMMadrEpMPYzqqLsxc3L4MIHrrBYGJiyQlSPDubV5YOu0M/5CENbGrJpeQikOqqev7B030/VqvFoes/+/SuafYm5zEg82Z5XgfZXL1ouh28e7fu+7tnj8/a2bzvD74yY4m3mxvr7P3u/ny5KCWIJJOzaezYh8q72rn1YfQVLBaPNGEaY9+XxXKm2olS5Wf7bkPGsTVjzojx+bcuze3u5Zt1LKnv05h1sapXDraHFGQ87HeWqXLVmCIzusqEPhaBYShs1HoqKkUUkJAw53xsyiCIxMTHVk2kk5xzOrsDTDw9ZiJUAlRUYiYEwwYQkIkNn+xH0RqeTPSI4CRtAwCoFy1FTjlnILV4dtau79908ZAZ9uv7X/v4mfhbt8B2tjzE8bPPvhyHct7WTNyVcdW2dcTc7UruVcemnlXOMVbX7za2sh4LQCYIUrBufXD7oj1T1aeSh0NbzWLREneLZiagm7vDk+YZuPZmd79qqpmRg3aXV8vNbn/96vDBxfeu/Gy3WTOmknMpkBL4aklVJSH92598efb87C8/u/7B6rxkSeshpWgay8aC6Ny7bkh/9fL629/5YFG7Lz//cnG5Kn0//mJvPXx81Sa5mZ+dVzahMSy9hjzmANDkglR5IoESnXdALmzvu/t3cbNrPvjNDFXpt/u3P49DqJzbre/W93fhEABSLFEkmEKi0TY2QCxiELiIikDOpAUKSioDKIJxY4FuSPP6zFdVUloftuMY4hAN0qKu2RAQnJ2dN5U1ln219JbHcdzdvatSeNvvxne/ePrk8XAgqBfr19g+8s++Pb8/DGWU/fZO8v2bLxf5MLz58u13vv3i//pf/t/+Z//ZP1xdLP7f/89/+Xt/7+9cf7mef/8pgd3edi8/f/n0xbN3n77tx0O6IOJqv4sDpUczqD13wxBScGKfnM2cQp/CZnd7pReLp/NhU8yZvf/ky5nkp029/nefffTsYrseKZnFypvGGsHhsKcs6xjPzmZni4uf/uG///7v/dJAyRmGzRpKH3T39H/6n4cvt/KuC7/46dmvfdRerYpKSrv19f3F5eMyQO7C8qKJRXHpmsYjY7N8gYr/9l//+w8+fPrkg6cZIWluWl9XLvXS72UMfL3+6m/+5rN/+X/5Jzf/4dXKzw79fjFf3dzdFwBf8cN4AEUUwUwbOCDUSY6v/RhrUyEVUq3r2RgioMspK0AMkYnIWiJYtu3Q96GPOai1bG0iT2yNFgGFNAQGKiLeWe+qFJIKihQEILSx72/6m8WydpXtujyfNykG5yvy0HfdOARfV91+Z4xfzJe73aAAxhGgknApisSlCDH1/XZ++Xw39ICWK4yHEU1dVT6GbIwVVWtrFDOO422OwMaAhBAxq6m8IfbeqSpE2uz6xWqmoMAWRPbDOmWx7I01q4tme3cnWc/PL2McJeV5PTMs1Fhr2pySL2FueBx7csaMRYfUgHnx6PKvvn4tOVliV1cpZs0ZDQCZm11fWeN904V+ubgc49Y7vzp79NXXr7aH/ZjK0/Pl2G8pCRZp2G8PnW1nWbBqZvvDwdSzMZU8DpVx63UXu2gRUaM1FjEisYE8+UcQc38ITVuJsqIhqp0tBGRNJRmUjQK1bd0POxECBFFABWO5Hw7OVYQoJaYU27bdb7eWiwKkGJvKpxC8xtTfeAZWJMDHF+ehlBcfvPjzn33iWyoaOjpcPm5Udi+/vl2cr6Dz+/s3FxfzytkMlCRWVZVTHiFkyr46S0UsgzXzEFLX77x3hy5IKrf722998ISRrXXG26GMAeLj5bllFQVmX9eZDANCNatxbSWPIcVHF6tu21UzV1nnyX304fnnX7+76w+//vyjfqje/eyT94PoCYTBB2bQ5OwDE9oCRHRcm59WaN/YfiM+DCCnt1GYMnQQQI5T+Te43Hj6Ep40E3jiI00vn4Cjo1jthC49jCbHbflxbaiKk5DhOPkcBXEn0dGRZz7N3iKl74e79fb6btONEQ2JlM1ut93tl2ertp4N/RgPY1XX1uP52dLVzfX13cs3r7b7/WIxH/vuctH+xY/+sttsl+dLEXTs7nb3i4vml37zh//uz3789HJ1vV2/fnMLORGWYexTyqDazNowRuOMtSbGrKopxLp1XT/O66aoCACDKRIVIqKkmJG4XswOu80InInaymVjbg9h7qwirNp5KKkvw+PVbLZszi+Wi1Xb3W+6zTb20VhXpBy6Pmdh49u5rwy1jkR16Asa8+7uHplAcFk3sWQD6hFjDpY1pTxrakcMKs2iZudc5UglpWLYHg57R1SS9GOaL9vz1WJ7twUUX/uUUz/07KmUElPprnNTGbR0vdsPknw763YdAIpK27YGYRhTH5N0QJjns1pK2ncHweLjUDeN9bWiJEnOWkKj0xIcjjIBPaKA07w9/dEXASRUkRMTaOLiSJkIcqdU86NCEU80oYe5YCo2UcEpiRkQVB6KFR8OVQgKJJByjqnAl+9e//Gf/tu+6wFwv9sP/bbvD2CAiOumts4N/XjYb3IKwBRSSBIFBHQaQ6Y/+VNE+kSwU5VSiK0iAQri0T0ZHky/QAlBUUQn7cwx+nuKrE4lW+tUMJfEGbUAEs6ratbOx/1uOZu9+foVAWx2/bc/fpbToZrXZ5fnm8PQ1Mvbtzfnl4+rurKgs7b693/+875P5KrHHzyaz3h3+3rZ1sx66Laz1jPEw3a/vnnnZqtDV86vHq8W88++/oUVe7vtXt3cXr3+4nvf/u71sCF4Zt1ZULsdU0awTFByVA1EddXeb68Ng63maX+XyLizS1O7GA4aIxDE2BuuG98e+mtDYtgehs5XxhgT42Ho1mdNPR4O7OwQtBG7WDx++dVn56tHUVKI42yxkJxBhZC1cNWSsxRzXDnbzHweB5WKFHabe9uuqtl52HYatlyvlOfWzyf2CGog49Q2rDXlCKDAtbLDkid7KAQ6kmlET5wFPEFCR6O1Y61NatgJfKFpGYzHryM+RJ2djIUebIkeWvVDwX6DEaffqOLTzz6h65Mx++nqE95+vOoYgvkN067TmyMSHB25T40ZmQ0/kIt0il+Fo+Dh2MZPUFiZTNmPPw20FERSECIzZXIxGe+ccw4VSwFEMtbWde3YevbzZraazy7O55cXy/lyYQyTMSll39RUwFo7r2tf20VbLepq5o1zlq2Fyba4cBm7MMQYcikqRXIWQiJCg0UENMsYJCfVgiJIpkZGsKzOCuIxskoyKqqQAoCxCpOfVAFCZKfIgCilAFgyTEAwRQ1JAQiiZBCRAVAMKE4BICWbcUs6ICGrSk6Qk5YkUgSE2LPagqTtbHb5uD079/M5V46d0+PgJwrAACpZFVDKBPiDKopAiaAFdALsSBmQ7AnjQ1VRJWCDxIBIRUWKSFHNBCQ5qRQoEVQRCFCMcx5VEKMkkVKahktmSprAUCZG5ikazqJBYQAyhi2oAEwsKgBkJCIGY2tAReMIEZmPKAwbZAQAM5ltk5uimhABwJSSQTNlZWbjHLPNokhcVGMusUgX0m7fDXGMuRRQyYWJyFglq8hITGyOSpopRI8YAYhOC6TJN8tYJjrx6xSRHi6BB6ho+kYmQkQidJaNMQhqrCOESW4qKKd8THgg5SmUCZOj02HFTFGBCKpAyNN5ZWr3xpCqMgIiOhIFjoWKigIZZpGY4gAozbydN/7Vy08JKjIGUKclqnNtHA+mtow8xqwEq+Uyx+Scrxp/d30r+8AJcsl+NnczW4QK2sNhnXMkskFU+yghVAZLTu8OG4kyjgcFKpr6YVTRMHQkDFnAYFO1RYqKWjZc+ZqoDwJq3t5tHr14FL2uY1Ch3a5fXC5cZbpuXxgNOQTORWMJOCioGoNUIpOEvk9pcJU9HAZQARU9jFkyW2tdM6Ysmh3L9c3LgqVezD786Pubd78oOdxfv176pVoachIVgzgckvGyWrZGy2x5PvTh7c0WmMN6v5wvzx89u/rwKfSbzd39l5++zmcFbfn5J58/e3QFzGxle9jHGK3hD548+/L2nWcf0qDC680OEFWw78LMO+8qQD27mEUYU+QkkEMgppT0MHTojKRRRK2tCyiTyxl3faj6/umzuXXw6ef3desi8O2mm1W29hXJVCeSJRvHvnGHd4dSBJlUIacMQIRMQFlkMptDABGh4yoVAUBEJ/3jBIojQtGjHJKYeLopjQGEydkeYNrvElkmMopwNNtDMIZF9MQhhb5LVduEQwyR5meP7Ky9+WIrJUrVLFeP+n6/6UZu3f5wQDIzv/CGlqt2t9uerS4EUs5pv98P/XrWtO/ersnSWMbtEBpErA3VFQ79fv1uv89M4prF5y+/+vDJi/P5qrFngKYfD9vczc8Xi6YuJXcxPfvg+7at+s2hNvL1psO+z8aLbe8OKfSpriCp1rPZoYvt6nzX7z99+wYbLAwh2+p8UZ2fffHJdSrjs6dPb++75azqw/7msH+5eV3d++suScp927x5eQM515jmzZxns5V/NO4H1UOfQ5FYL59js7R+nnKOu8OibdKwlsNNfzgAIM+fZG23b1/J4XV39846M4b49s273Waj1hwOu9r5GHzrKmbNQ0qpsG8Nu2EYRQSESVEFiNm3LQviGMW7erYUTSmOY0jOOmON896xCyE7X3nfGEei2bjK1m2CLiX1xJh1f7MJUbohvvjObxRTj+C6vgyHcXUx94aWv/s9MKy1/bVnv/V22/+j/+X/RMDuBvfhxx8qj9/+4eOhD+tuUGuby7ldVVeP5//sD//svILf+eHV9u1u/tiPIZvGbnbd5eUyjl1M+fXucHax+uB73zv0++pslSpsf/At489s1reffAIpcTY5mU9+8Wff/49/A6Kw8+dXlavc/XqHc66Fn/3SRzyzjfXlZbf7xVcIDrPf/+TLfZJn//BXl1WMuz6kgZyx82p+cVbPZzfvtpvhsLziqp0d9iMKp5zZGFD52//Jb//0L774+qvbp9++mM1ddxjGVMjS+n7/6csvf+s3f/D5V7cf//7vfjHE8fW73bt1C24IZcjDxaPZdBc4yylJVdUpZWudxDHGxAaTaNu0CDpv61RKNw6qVNdtlghAi7rZD30BZLYp5hxTkng3jo8uzw2hoLaujikByjhEUGFrRDSV1IWDJ59VqrpydWNiGvphHGUc+5yKrxx514fgiIsUQCOo8/NF6OO+29xv19aYx0+eSilgZBh7X/k4Bl9Vl5dPxjSqNY335Knv7lVMzhBSzFmnY8ZYEkKRAmzsYtnA7uBql1ERSyqpWdQpSEuNqQmAcymWkMk29YXEnEoIZeijCBAam7p+1i5e3b49r1sHbBG6nJ1xhWS/OximinjfD7WlbehLKSRUGbtq5rt07yt++mzxs1f3bVXP5pWxfj4zt5utb+ysqt68eztp5n3l+xjZNzmPQTKOHaqmPCj6TGXT7dvVWbcdrK3QQjOvwRhbV0MsyP7svFnv91A0DaP1lQEEa4mM5UJAfT8Q277vF7NFglwKxBibqiYxddWkNI45EDILzeplDkkhT+04pGhdte/6yllUZRUBSGE4rMNs0VpvDeDVxXzTpTf7++Y7j54/f/rmr/7iO7/5fEEGUl8xj6hDifOzC5QSSwllrNiBEpAxBis3zynmUhQ5QTzsD8w87MfY751vrzc7+uhj42epiGMTiy5b9+jc56ED4xUYyFjvqraKUHzjk0jKpWlsfbbyrTGIYx8o5baqQ4hffH3z/Nnjy0erh6O8POy1j14sCCAnG42JcDE9k0SnTdwE8aAeYRqcJDmkD5PKNKtPEJQ+CBROxKLjEIXH3Bw4okdHj6GJMjsZuMjRgQjfT0vH4UrhiP4oTPZDD+MSHp2RTtvwiXmkpUgpKacxx1fX16/f3XWHztdmfX8f4rBYzp88vqqc36/3CDQ7q5p2Yci8+fr1u9s3Ivnx07NxH7Uvm3EdYsqM9/udQ2rci9nZSkv62c9+HvuBsOz3a3Z+vmi6Yd8Pfd3WhDbFgkovnn94t34nQ+j7mFIOY7w4Px8Oo/N1UUXDd/frGFJRXS5mAqSMpnI55yi4VwNqQdLcV95yQTUiT5vZ89WCi7z74u2dhX6/YzbsTF01h20fisZQ0LJxthQp5Ptx6EKCPjhrqqqyyruuX81r51zVzPeho8Tni9nybIlFDrseraubOmXthn4+mzvXjGFsGt8fwux8nrU8++hbTz/EP/6Tf0ugztrLp1exH0xlhzEXRjH+y/VhyLmyvEpgkMOYyJguADKEDKCIKVtjhmFMRygmpxClZJ+S1rX1nhBElYkZCJhATrM3gIKAKBICEBzdnx/sh6ZzFQKAiiDxhCfhN7yt/xqfQvUE3zxoCeDEYpvk/3S6G4qCiqSCere//1d/+ie36zUbl0PX7XaKxXhrDceU4jjm3BsUJBhSLEljDGWCnECPHA2a7LQnlPYIcSoAoSVSKUKGJ2kCTKQVFRGdPF6TFFAgQFa1hIyQx9F649gPqSARG9aiEqWxtSMspuzHsRsSKfzNH/7K2fkqI7XGW6xLjId9trZZrGYq0jr6+uXXhyF1yqJyPqu79XVbV8aa7X7tuWaL++3tYbd3TZUMccOzi/Ywbje3dxnyXd/nXG7ffPHh44v5YhEFgtL9ELaanQXLJcdQO2ss3+93F8bdvH17/gQrw1ryDu/b+aWaZoy9McbXsyENji2Q7YdoyRA4RCqxsLWKmFPpur5ZLbxvhiE531jjh37Pzvmq3eyH1aJGRtWSS6i0ntV+iJIkuMam2MeYZm0lkvvDXXX2CP0sjYPt7/zKF0M5HQwDuaWwQXeOY1ENmgq6RZnWugpMZuKIwQOhCPg9oeebAQACp86JwAQTHHPCbfRUXn+tDb/P7Ts5SR875QmzPwKa3+DI6ftO+MANemjAU30fLdzfI0wPv94zmo6dGifLLpiy6yfYfnJ+Ezyqiqf/QSKa3IselMjw8KEQidBYwzhZ4Rtm64wzSMeAqknOROysr101q+t527Rt085bw1RKCSGWnJ2vmnY+a5rzxbxqTO2tN2SNBcRcVAWkpBRyyQIqSMiOSzF+4gSWUKTknPLEkvYenStyTNISJiGDoKUIKpSpj6gAAIMIMzAKA1gjINNjSMkgoYKyMYpUpChkJCEE1Mk0LYMCQEEERMYpErsAAOSUVQshMqOtfEFWgWIMt3Nr69rXjDzJRFRQkFAFVEAUcgHJIhkRCwAhqRQt0yPCAGQQhaJsaYpLRCJEEgUA0imyTrPkknMiRJEEKqAZCcgSFCiiyAhJEcEQMRgSslyhE0UgSAJJAJgYrSUm4KNJISIXESIgZjIGrXUO3ZFzxIooOqngFImBkNkCErElImImVjYkClhIjw93AC2iE1/F4CSfLuhtcmT3Y9/FlHNRp4QIx89jEdGwJWa2FggnY5bT+WHyFGI8ZkJhlmOKFNGE9h4FyCeoiIiQ8Wi1RYbIEBlDSBOsxITH8wohCxQAFdWs5QgEE8rEFD3yXlEVpg7BiADA03tpYcbKEhNoimNKiEhkFVUlpyFKVASumiaMI4oxhg2T5qwIxBRSRiUgnmreANIY8n6Ieb+WsWnmZNhYzvt8c31rnQNgJXd5sZy3btt31cWT1Mf927vKUAxjl9Ps8Xll6leffmrVTALqZvbR9n5rLfWhN4hV5S6X8+1mP4YwxlQUJeP3v/t4N45xDM6RFrTejBm+873f/vijD/5P/+X/2TnioqAYQ5E8FCAmE8ZoGUIIYwhRUsgFiIhdBLh69iyJXpyf3b66STl+77vf2V+/REJD+tnXv1hR9jZ/6+MPNjdbUFienZc8hC6gZ+v5/8vWfzRLlm7tYdgyr9km3bFVp6q62ty+97vucwAEAiAlMshgBCPEgSYKKfQrNNffkOYacKChpAGDCgUlACQI8APw+eu6b9/uLnv8SbfN69bSYOepakSoI+qcqo7MnTszX7ee9ZikslguNuudY14dH9umOZr5u6s7Gfjdd99iGOrlyRd//MvtzS1aOX92cXd9d3J+1FSYoqSI+35ImmbN7Pr6ZtXUrqqHsG3a+dhL6qPOhDz3YagamK+qbh/ikLzleuaGblDIzjFxtV8PJQEABs1sLXrqk8jt/uWPz9C6P3x9Q86D6Ho7IlDVVDFla6gU7GMvKIbIYE5FrKEUhVCRZDodFymHDFbEKcBiMjE8OLijMiKATumEigwqHxJhVA+YKKBMiWjTOBcQRSiABIQHbjMxm8cVHFDRWjf2GiO8fvd6p/l2HJa+OT87Xl++M1SCgWj4aLna3a+BbKm8t6frbY9DTwHPTpanT5qvf//10fH85csX335/uVxwyunhbh9KqRetZTuUDoR2t/2z0+czKc+WR+9fb4EaAN8jNba9727nQEW3wd40ujLeO2eVQ4hdZkArd8PD8nT27bffpa4/NbWg3fcjGbx+fyV9fLFcLhfny5NZT2HLOmuXl9drX6/2e31zeYdVcUa2m+vni+cPV9c3r1+n1B+vznSXvvpmc3UzvHl9T313vDr+5MuXzfPPeHGSMofdnXTJo24ud8PuvbHsF0/c6XkI4fb7r70kKDHm1Pe725u7ru8z6ma7VcAyqmgZhr6qbDWrM5R+VIKIygDIpkWAFAOYhJJJaFY550xh3PdDjmM7b9SwFvFV5a0/Mo3xMzDIFkSE7XJIXQGt58thEKr09NlRAHN8dvbJP/757W0t2e8e+sVRNQzjYlbt+75tFoAYhvj/+W//4p/8w5+VEP6//+9/95/9F//EMCDJ77/6/sXzF8dHs3zk7tfri/nRf/Ff/KPvv9//5b//6osnxwZ9Wxsjev3u6qjy+83m+OKs8giCqvrm7fc/Xc0jSO5h+8131vlP/tFPxv5BKzZ7+GnzBWzDu+9//+KPfoqOkiTD2K2H9ffbZetN5VOGGMajn362Xf/G6bCXV/ZHP96WcUQ+f9LCRvptPzOuaJAYzp7P1QnV2vdBALqusHObfRn6/kVlXnz5TDL0Y5ZxdMaMaZCsRwv3xz/61CT79W/v/2//z3/+X//X/8nl7dXR6XK7f3COqG1jOWwJkjWnlAiZKcVRtNS1QyIjucSiTP0QsyQCQoWYx5SiYRJngcSYCjOplrppmvm87sOy9gDikMZxSCkbb9q2BdExZMM8xL6t5gSqIrmUu/W9NazMd+tutaonAkfta0PZkC7b47dXl00qddtIykj05OTpbrvOIYIUZi5lJK1q70PMPY0xJEly012enV20syPNJYzRMrOhFPvWV6Wkulpdvr9Gh95jXXlCHvd7MJaJUVVZrDdFQXMJoatnC0Mm5/Hk7PgPr1/1266qV0w2pfvnL55fvX9TMLI9ghAcY2B0TSMQYCdNzZWzYxzqxoJTMVQMq5RYemuksdyw5DySVq033dDVftZUbh8ib7HvhsWsGaDvdyOagmyE0HpvyGYGrjw5FhHrXY4hRxMLeGdEdBiGpqkUeLcZbG2RuDAQURJxRFiZPuW6ne37DRnvfe2bKlOpGtx3w3QmsnWTUqqrJquK5DGMBqmt5/3Q+7qqrIslT4d6Z7jyLueARptZW7J2XVhVjSO3u8/HTy/C7n5RQ9rfpzg+3Ky7qNykVbu6fvvqaLFy5IyCJ4oyxJKc5SGJq1cLNy/ShXFErsbUhTC6ZoWh2CoJsRrDzLkkBcxR05CWVX3Szr572M+W85ITI8xmzcPtPY8FkxhrxwRRxdce2cxXtWpmKYva342hSLm/v5/P2g/IyuSTokUeK+aP7hpIiIoiBRGVGPCjQQweYsVUS0ZiUX2k/XwQIhwoP3JgZ0zNUYBDuvmjrm3anj423w9bFB5MPh6hoSnSFvVAwDjUKR/pIx+y2BTwQ/WkjzQSUSkiIebrq5u7+/th2APpfre9f7hrZ4tnL19iSVJKM2ur+ez07ChHff3u7fX794pqjJVUDJfZqrrZrdWIBPXO1a7xXP/+u3efffIsl9AezS9fvXe1+cd//s/+57/4H7XAk4tn1XK5vr5XDc7zen+3329RJ79LUdUQg0oyzETwsOtAwVpnNDdtu1mvh75brNpchIquN/0WzdPTeqfsAIFl4aoU4/u7TRhiClJVXooYVzTp9w/vGYyvfDW3zy5Od6mPoby/3u2HsTbmuK5jTst5u1nft61ZrBZJ9aHfLeYrY2s0IILzo6aZJWUyzP2YEtLy5PTu7u6+37y8eJZysm31/tXV6Wb96ctPnz67uH64vVoPXSWz2t1fP/jK1m72+8urql4Ie0Ht+zyf+TGHIuXu+lYZAKCyZuZ5Xvm+j76SlKVufC6SQX2WIlpJUanIiLVOMIMykDIhEKNOteo0OA/jSg8xfI8F8odSWVW0ICggPdIsfgBAHobcQfalCgDyqOuaRi1Ndu+qqKgFUgbtUv+Xf/NvHm4vmWW72YZ+raDe1cSw7TYlJ18ZKWM39Nt+F3IqIqIFAEUEcPKSOdhowAE/PTjBMEHRLKUAopZHuxqauB4ChMgEBJONBmnxzI4wSam8L6pdGHKJhjGrGCDrm1Jwt98uFm0uSRgbsn/+Z3/Klty8qttlCMWyaarWn5wSR+/x/ubuD6+/t7Oj3f3w4nRmdLeYIUkJYawMWoKUh+1+k2Jq5/Ndsouj+aJqbt++erh/cE3dbwcE0hxQesnZcknDrvXVAHBS10/OjmMoNVCJqevHqnUt+t1ui6TH7Xzc3WUEMp4klJCbZj6UnaKzsNyHe+vJWBaRHAqTxHGffYuIJQQk5Grm/cKwS3G0lU85+3rR94OxZgzROEpxqF3dp2CtlVTYzepqpRKAkBC7zX3TXhQQnzKnSEJgrcSkYszsCdFMZWuAULnglIMW+MN6VcpEKyJkOYwu+gGj56ARRASAqX3Lk93bYaE74EmPzm2HKEmdYEo4NI/xw9IKE66pj7zLw0B9pHc+DuIDWAUHvuXhmo/A0aP/Kf2Qt4REH+/38GOaFPKIz06648OLPDoZfZhDH9LSFA5yLRRENAaNMczWeibrrSWiIlJERScFGglCASA2uZSsJQt0fexkDGPMeQJSkNlba2NKPBQnRRsvqEhGBMcxlJxLSliyCiKQIQsOhXKO+xAOPsmQAchab4EQRbQkLaKlQJbpk82l5FzwkKBFygaMB+uJWWhimaEiAZUMmZiEnUzEGCAQkhKJBFRyzqSKhBMbCzGLZCAzocKKVFSR0Tkf2SowIBvypiilCKyYhZQRQckiiJYoWVBFS5mSxWDK7hIpJYlq0aJIpAyYpcQJkyP9uGGToJaSx5hzEVGQoiIIytYgATEACwNpSUhSSkRVQ1h5T8RxUAKBMgIQTRI1JjIGjFUpqEW1MDtFRGJ2lp0nBmIwBGAmoyUAEcrlwIQCJGNEIpEFUAAWQCVmY5FgMi2aRt+kMlYVQmO9c857Y33tXB+SlKIASgKI1igRs2Ge9HGs05KJh/F86C+pEjIzAQKTggLT5DRNj3PnESoyzJMruLXTm2XLDCBENLGs+eCbWD4kbCA+SjEnDRrS4zWnBpcQIyESEQAyImhxxgIVQjDGAJqcUklQV1XOOex6UmFLlmtQ3m72CJYIJiyzco2iojUWq9iNhNpUTYUwDB0p5VGbemXQ78NdHHpDlsmUUJhkHIar3frBirFmWPek5Nl5Y52vjo/nKYTd3d3nf/RJv94NXba1u7t8mC0XBgFIwXIqueuGlIoQzhc1Ort96BRSTJkzmkT7cWirKqN++81XXaD/6j//3/3N3//r7e2ldZSlAIAzHEMqMaphUTXWskUkTkXbZmZ8RcatanN/d5tDOmqWVzcb7dOqqVKQk6OFz3HfrYuENIYS8TaBNyBZqOLQB2fM0IWj4+W89XfrDstopak8N42tZrN6drZLtmmXVMqr776Z1VDX1fpmLYCL5Wo+N7AreZSg8Wh2QpIc14PuUkreVxizlOKAGbkUiENXWSuFpICCVjOfQ0xjsK6tnB01WWuHIJIVEMIunz1bjffh6dmKCv/t7y8VeDWfvXt/j2BSyGCtAlh0MqpFGlSLgGeeOkciGlJi40BAtFhjVQspAE4/kJkns0I7YcNySFhQACZiJlE1zICcS/bOGmYBIWIk0kkjigAA1jjVTIhyYJyCdQwESjJbOuIY4yhp86c//tP9+q7fvK5NqhxWTb29XK8fRiuaE9xeplKkIjc31Xx5NK+q37/9FdV2F/qr92+l6yXHxdGiMPd9XjRHhgMOwGheX1+dVE/MrLrt+ur4mHDVry9t3a7XQxQ+XRxZEsCwvb8cuuQE2blaSXNkyl99/evd0C/bpRHjx2KqWUYY+uHsyfnJ0dGPn5+LWitpf//AWb1v1uu9bXDo931JqYuSrJXm5n6374tVExOMfYCYsKFNLmnbz8C57M3Rhczn/TiE69uZnRa+DExmdsKO9tttvL8vQ6iMm83nv/7qNXPuNg/vXr+zlVMJltC5SpKASJICWcYuFUnWzDAhk7WOCUuJkRmN81lQwTEbqxgRUmFXz4BJQJgcWkdgFssnMRuqneUSh7UQWl/bMiPn67Ydiwwx+7p+fvz08vurUD19uN3+8nxetfz+u7vNgz0/mf/61982y8Wybv/T//ifzo6Bqvo/Abf6ZGFbx4JPl0cO3b/7q6//k3/y47/+m9+crerdbvfJ09mPXnwRw9B1YbsbGsfLVW1bdzw/KUUeHvr5om4a98nzz6Eoo3JjYL4oeRZzffXVr+qTGVETB+bz9vSTlzHC66+v/LJ5+ZMVA56+ODEljQ97TVlu1uJ8//vXTVs1TWtPjnRb0u2uXJaYojeL0uvmzV6Pqtvx4fiTpQFeHdU5ZkBg50zUFbj9fmwbJxrns2ZE2K/3T1+c3FzviOHZp8f/7i++/5vf/AoSpXtJD0Pq1qXbqkpVN1rS47EGrPdZSgGY17OQBiAOIaiiIqy7u5VfKiBZAgAmSEVEYBxGZypVCeN4OEuwLirLIMPY+XrpvEOKZHiMY7fbrfv+ZHnmnWM2Jeam9kUygMza1lrLZvS1A7GMuN1ugY1haJw7Pj2RJOMQoXCGNGKZrRYZxLBRVF+3SDYOPbIppXTdZjZfVjDvdl3KyVqe5DmN94ZaRRhjMABPnj0NKYriOGZidXUDILGLqaQigGgBhZUX9UKyjCGxwv24ZsCzk/Ptbl/I1rNFLKEb+7OTp03TJJWx20rJrC7GaO0EY+ustou5C0NUVUNaGcMIs9ms9W4YdeYWM+fGIThmydFQezxr9ruOtcJMFqwllqRZIwJV3g/7fHx2bBoEyeMweuPImpRLyFKGtN32E1bADKy539yLxNrbGDITpLh3rtESS4SKjUVyzAoY0yiSUgzKNMbB2yqOERCX8+Ox31jUGFMso6u8lJxBDRuJccyxdVQtFk9fPL1frxGl68d6Pj87Xt693Z4/u3j5yz+l6+9//+1vucC8Wj7c7nU/+mWRqBYgpzBbneRhL6qVq7UIqBAoKY1pqznV3gP5MYQiijkBqMIoKc5n9uikCmlwpmbXKj3MZrP77TaUfFxXYxcByTiuZ34EwFJI4Wg+a89md9ebcbd7elT5s2UKeVW7m9t1DHr05LiPH22t9ZFSNLWFD9SdQwmL8oENpDJxuEUKIh0qBTwQiQ5Pe6xU4LGmwUPqM0yi/ikWVh6rn48v/VjvfCg79IAN4eP//8GlDyKLQ8ny4X088owmysijRgI056xSYso3t3dff/P9m/eXY+hIabPZzJp5Wy9YsevGdt5Wzhty68324fpWS6lm9Ww+K1Hu72+PVgsFeXX5LuyG1vumqRtX77ru/GjJUt69eSuiJRdv7a9+97fOOwZBsvfvrjRlJg1DyKUwUAEBBecMADrDUvkcxmEYa+/6bsiKta+Wi+X19TWAWazOxv4dO1LJWdKYfRdUQFfzhmu77ofxZheGYi1rNxzNm2Nb933HQFyZjOCdv76+3+x2Krjfp3pWGUViLmm4ub9ZtO3x0enNemub9uziORYdulAwV7O5ZZdyZEt3t7eAZHwVRMY8VGS9a0uD79/eVrZd+Xb9+v3SmDJvN9uwD3mQAtaI9WMqxlTdELebu8+fPx0F//DNpTFGtSgQZCBDw5jCyEPIrbdJpKiklKJPTS4lF1XJJUkRU1WAqJOv+qQXUERVnNKSQQFUVA6uoQpEhy+eJnP0yd1DP4KOE3//o481PNI95LEUJ/w4sB6dZmQCNUGySBH57e9++913r7yvadxj6ZlBPfd9F3M/jiMbLgL73bYbulSyTvSFj/ZDBxhUP0yWD/ojQBVRFCQCFSI8QAwKhKBAxmDMeeoLliIVQ+udqg5DJItZUs4qUogsKtHkS5X62/X64uLk4XbtSf/pP/n5k2WdJJ89P6/NavewPXlyXNUzVRGgfrd9/+5yNju6vengYf/y2WpOggTjEAxiVTX9/j7GEAdaLU6HoBWbo+Us9ftX331H1qYx932wNnvvEDAFLYWFeHG02A798XJ2NG/iGIrD3ba/H0aH6gmGbQLtbSkGTHi4s4sTy9zFMcbRoxvH9axZhQGLjsAiJSGBgilCQz8aroZ9VzGVGM3KAGKM2aWsB59zLSopZWO9aiwxNlUVY1rM2kQpiVTG5xJUkkEK3b5pq5RH7LZK3lkLMAlNjAEgKIBaMhM6KAoA5cBhhEcZC35Y1+A/WND04w/UA8ftgBMd9F0HDtDhQZMtFk4RePARTjwM7AMJbfJB+gE56PB40B9gRIc7Avy4LE8j6eOK+ri4EiIqylTtHvh6AB/N4A9Up0fUCkVlcv49vEd4VM6pAgIjITETW2MdkSFqvCNiBGBUVUkpySEKCiYfIEJQ0VzyMASkbUgShpDGUbM0jZ/cg4auGx62bcXHs2q2aIx1QAbQlJKnXcqgURRklqQAiTCKhBSHPvQqyOycqQhEsUAeRaKUyFlEQUEIsUjORRgIichZdhX7Gg0DApERUWIHmnWKjjes5IgqQsI8KiQCkiIIRMajChs6qEvJoU4u5moYi6qKWmPRemRvTZWKIHljjIIIaM6ZSkI2wKCSQAsUlZJLEdGsoLmUSayqqKXkUhDIamFANZa0oDVMKpPTM+ZUIopKjrkUSSlOkdnGMLEF+rBhCqIwEZN1DkWZpaSiwZEMJIG0jICiAARKE+PsUM8iGUI2xjlmZlJClFImIboSIhFCQS0iIlKQSQQBWcQockEANsqWrQM2bKvD2CEiVJxgyknXhtxY1oGkYCgpKzA7IAZmZYNMAIx4iBSjiVE1AbSIIgdW6cG8DsAYehTS4wcF2gEqstYQG2OIiZnIMBFODug8IXSHuY08TSYC1OmzeYzJPJx7cEr/AIDJNhNBp7sRIgUSJrTGlFLGMZM6T0bF5DBAEVUtMtT1Yghhv3+oTEMgyOS4ZuPAGjJoIafCKWUyvN/sSkq2coKKQMM4GOsNGBAxtcshSEzGGRFNGUtB6QdDRDgMph/6YG9rzooZ7geJoYsh2wieZBhunXEFUs6p20eISQpEVbLc1Iawury+q+aLxRyxcuFekrU12ZjK7duvf/t3f7FaNrYyhCpDRmQuqjk77wAkpMwGnatD15+cLJqqvrte00Bioe82C7c6O54bI9pU1XKOQzFsuv1+HAMbahazsU9CstnuPVsCmaLi7+53Q4CcihLOKn93f09M6/2OjTk5nW/32pjaz9yPfvIJ5M31Xdc49/bd/RBuZvOqnjd1Zd9f3iNaTWUcbo23UCCEwRg/8+1+uK+a5mE/Etiq9knEsBmH0RgU0KypZjBzK5QQwKHbPHTG2zHFh9v9n/6DLyUV+8KYRf27v3k15Hx0epSLWK73+14V54sKSAm18lwKGWdoMhEtijixiHXidiKxiE4tMiKjIKgKiBO1+oOgk4mRQQEMTREYSMRFAUSYUaWAIh6UbCgquWQikMnTEwAAxmHIUPI4HC+OaleGXX/eXPDYwP57p9Y4unu4b/QEi5TSO7aL5uiuTzmXxbFnwRfHz1794e80prKYNcez7f0dqNSGJRfLcdXS5u7tbX8/P1tZ647P5fjJ+d31uxknB7JaNY1AJUYZ3On53fVlDvvF6RJEQspm1pycLrZvt9bw+en8m9/fAHI/JOtMszoa9lFLShlPjp+eP1n6EodejpbH67veqtluN7Zq28X8/vp9XVUPN/1qeWawFi67ND45nbdKmvLqdFaE0TpetAXceHJ6XTXbb78/GbpZ4rt+18UdGefqNgtYst36jg1fHB29u37/9Te/QREYxhhkeXwSJROwbgeNgYoQW9FUgLhQ7Wfer9QmAuUJlSaHimg8ZFDjiG0OneRcOQTrme1+3ynZ+dEz6xsSbwAJrYFxl7qqns/nFxqzMJM/rgi79djfySnT98PbX/5nP/n8uP5X/6//4R83Pzs6Oht6e393c7pYzGZL2xhsCje8XXdHK7+92iDnChyRWgjPT49CkF/+/OesinG0BOvNtqkMoTlZHlctOtt8+4erp8/mCghItXOSUm29dabb7kpbHf/5F3APYsG0M7t4tnh6/O7r70wLzdkS1H7WvgQQYyHtR0P+V//yf/rFP/qPwM/0jOOQ6y++4NVyeLfe/Df/4skf/+nCNtywj+27r1+/+MWnRy9PE9LJ6RPg8P56t2xczNk4YwwIMEEuMV9u4slRPexLCaUytUStHHe7oWhcP9z9H/9P/4df/c3b/8f/9b93vlnOlwCl63oWHUN/OCMhVLPZuN+llO/Dg/eOCaQoqBDbk/ZcCzrr9+Meoag4ZyoiilGkaD/ujbGNt1nz0I3MBNa2fi4lG8NZMoyBECs/P8HGWFfVLuVBkiAVAU0hD9Bbw43hsRtQjWMCIMNOswx9EhFUHfthuVpZ3+66rTV27IYguW5bQkYmsmQ9I1A7n+USETkOwXnLaJS5AiwhxljYuuXqdL978MR1VT2se1buQrQzp5oQTMnJmcZwc9+98VwhGGcYpBjrC+BicUoKRRFYq+V8t7stJp0fPWEJQ7LbrQr7DKQqltk7G7pxUs7vH8KyncVcNMZlPbOlmIruN/vVbHay8DGM1WwuSSWnkAZnydTV2K27cScSK9toLFl4BF6drZ6cHndp5wzGXTLGRk0QcjVfxbFDymPs99vu6OzUzl0ZdNiPjhywAmEcutwHv5j348CClp3mwOT6bqg8o2FJIeQotrZ+Hvrka9PMl+Noc9mrSkkFCVJKlHJdVcpu3wfB7RjyF58+n58sr27uc5bNJn75i5/K6kTaElmaubMl8klVm3zz9Tp26/u4NYUq96Qfbi3idtvPqkXtqv1+fTQ/q6yVHIdxaGezmHqU4gmZ2Tvnw5y47sebpqq7ITSztmhA5Pl8ud2sd7v0VCbMRknw7GTZx/WL508vbzf7/fjJ4pkVk/YP93cPf/7nP3//7r2r6ofV7N31/fX93Y++fPZYI09lhCDAVBscAKPJbwjkUH4cyhKcrFwA4CABA0CgA5VjKnYfY+unS4vIAQk6XH0qmQ+11CN1Y6qSFPFDPQIAH5Cfx3/g4dCoIgdW0WO1f6i0DhvcBymRIlDWIqXELHcP29fvLm8e7je7rTF0e3Mbw/DsxfPz81Xo+8Wiqdt5Xft3796RJ0wwb2fWx6b2+7T/7PNn2924uVuHEKejYClwv9muzs/bxXzs9nnM1roQy3zWNr5tqwoV7+5uSSVJ7vYjgrIx3pu+H0REBApwIcPe5AzdduCl3e8HdJTVXV5fKaqKpLFbzGfXV7eVd4jYjXk2a9HQLpZdyDmRavUw7mM3FsUHiXdJjxpmFct8ujrZ3q83+86QzbGkAhCKs3i1WVeGNanMquvdYNvZ4vxpQs7dPqRI3s6OzxC0v1/HEHLiuq5eX/7h6OhoGMKPfv756z+8X52dfPn087//q2/eXd/82T/4Rf7Dm3plb9f92+u1q7wov7/eMqFlUpFnT04j2le3V0iUUiEUnQ51QgAwZIiDpiK146Kaco65hFxSLqmUqQFeicBEPSuGrUMwU7UL5VBqE+qjdkYVQIpM42U6P6nKYZxOCv7JeBQQFRU/ptQfWG4TanNQngl8sFw5JLKhJlHE29u73/z9r3PWlMbt5jaOvbHElostpKVICH2OKcQci2RRUS36aMEFB9nk4y0/zoBpAiGCaCHSKdgIgKfaHEBzKYyIyAQFgUjFkjpmBehS8HUTcw4pKSAyhZIxBrIuS0iJhCCFOO67JyfzL56fFEmqtvHH3a5Hy4ujIy156GMuebuP233cpPT6zbsvLp4+PTtO4WaMg2f07IrkYdyXJFU1s34xxv70aFkhvLm6vb3bLJ6crO8flLmp3KKpJPZQ4BDWE8dZU43bLsV+5n0hDDkVpq+u79qzlYpuu9F7XNTUd/dzb7K3aPOYNkZbBE5lcN70494yZ5CixYptHI0PD+JaIh+7vj0hUaza5W67rlOvknIMpjKPMIlUldvs94vV6ZAzkzOzZhyLdc6Q2+/vZ/NT9svQb5oWZdyrzZJZtVfnuJpRDiqDIKFt0RgoEaSQqRQtEoE8yr1UAQXgA91nKkw/KmKR6D/wJ4IDGeFgmA2PSAwAPDqsPbqh4EfcXD+OG1CFg3//Iz706DM0XfzDIv9ISoKPABNNGoYPoCjiIdTs4Ad8EJs9Kh2KHG5ywvBp4ujpobVw2CUmaioS0kSfQEAEImIDNAkkJOeo0xoAgkhFNediuKScu2FMiVLO+74DWJdQNCXnWEpTShx3+8q5ZF1v7dg4f2Ocs9bViGQsO4fGuGwMoEqJqEVzzGHIw6ACll2BUpSKqkpQyJozx8JFsGRVSEVUAFSAkA2T9aauTeWdM6xAWjRHRKslEgETMBuYEt64oBagUnKC6bPjycAQyXogNUxAToSlZASBAojMaKy1aNjYOmRwnl09r6vKVpYtIwEaA4SKTFJARUtRURWRUgS0FBEpUopoLiWKcpGoLMCOBIg4ZbKGJrMiBAViEFGknMp0BWLUKc8RCqhqnhQPQgjOGBAAZErZWjUECVGtLXEgHUGTKoEQE8DkpU0KhGwmxo1oloxkRAsoUjIGiUlUQDJqRgDIAIhIqBmBjAIoGiAWssqstkJmNZaNR18BOWKjgCJFVFHBsWlsIZKioKgTQ2JSw02+K5OPIQEAoSpMfGmkg3T5UZwHU51MxCDw/w8qImOnt0SPpkVME6ZPyAKqMjnDwPR6j8bvBzeYD60BYlJRJtZJ2S8y9YtQVCSTRUXOsUg2ViGmsYxdiX2JCVFn82OCvL59U1cNoSkSGZDZZaCqqokkdCOynp6dGLC3/cDMRUSNDuOYxtFwXTKCsvSJiqZ9UmLrrfcui0JUNZxStJLr1lJlDPL6dhPutihpfnJiHGz625JSt9lP20w1W61OVpvNzjm3Wh199e1rg7VCs1ye/eG775ShMig5B5GqqbohrRa+SMwxT53IXIoUUSjIggDO2QK6jfCTL/+j79/9zRhT1Rhv7Ga791r/7OVPH3Z/cFxVdgZosLbX3c5LrrwvsRTDs/lcDSZST5AElqv5xenJEEnJ3Fxe9jmnGGPfu8rEPld1K6LL1fG85curN7bxRBKGUoN59uwic9ls9/2mm+eqqisEFG8moq+vPCJttt223xFRCrlxVTeG+20EMkxYChDgYrnY7Hf3u+7seGZ6iDEhgfOsxpRU7rbhb/7+zf/qP/0TuL9d79Z//POnf/WrdwNSH5Mh8JXNWUPIzNA2HpCHfpxCZFCl5KhkpsYHERk2uSTDhgzB5DLEBB9cRRUQyLATyYoyeTSKCBuWw1ItolPynjEEAiI5ASERllwKALMhPpx7ohR0Bl27Hrttn9RR4IpkbGbeNSXkoq6+2/ZJzXZ7+eMXn61WdcCQMwZIMeS7zW3QopXZS6QoOsLpYtmn8PffvEKUo3k7m9Wcmpx4tw1G67vLuyI4SqqNOsHzJ89SHq9uts2pXY9bkmRjCJsxBbHG3N8+gGIey/VV72b1GNPdtjs9MWBYBGIWIeOa+dCD962tUKsFEmy726Zh1AwaYjdSQi/2+cXp69+/qdv5slnO3MyydJtdRjuW5EiGYZgf16pjv+/ZHB2dHe2vbjpFt1oNu27bDdbaxvDy7HwM3bvrV68u3xozN8YvFrP7EJsFy64zZOyyFVEiZgKBosAWyfm5sbYgaEkANOZUu1ZUQxE0PgmkFAF5hJy11Ggla90coV05dy5oydcI0XlVDO3qhYoZS9e2raScw9a0q+XR06enP36yfPH13/7rhfK/+hd/f/H8yWJlXl8Nf/HVu//9f/Wjh8vx4Xb94vT8q1+/evbJxf3lvr/eHT099b7dbceNpNOT+em83ff7urZsuTlqL+/23VA+mc1jl7f3D8fn9eXVQ12383kTUpgtzO3DdrVoYsghJAUMIWrGr3/1dz//Z382e7FUyA/Xr06et9CYv/u3v/70j34uJddGJYOkvOvSn/7n//l+M3g2mqIh6HQvdlbmlf18Dp+cxm4T17u6gWVL3Lj9Zl/VbSpDvaiett5bGHNCoJy0pDJrXdXY9UNXz6oUsptVOct2GwCQvI8JP/nipQ/jyo6xe9vOno67B+dnKSctKYcDq6iU0m12k0FqytkY3e23zla1d8PQG+cVNKdkLRq0RaWoIrmkMYYRUAUkaU45KgAS5SJZsrMmjiNZnleL9fressOK3nVXnzfPjHFKkHO0VV1xhZLGkJ2vVvVi2PclxuaoAeUUYimZ2LRNjcb0OTGWum4ExDc1KpDlsQuSc1NVBXIM0RADMVsDAKLaxRhCmLcrKTAOoQKUxExsfB2K2MpjUdQSYu+sLaJ104bQD2FwrgJEMAzGVm2b0jjmhJkYsEiZ11UZx26zvTj9pHJV/9CTwmK+WHdhkCSqhNrU9R+udklNk81uu7mY1ZhKZZ1nN3RrrmhRtRGMbz2QADOJplCw4ozq2bKrPJbYy6Kdj+S3KLPFalY5IYlJiqB1jYDU1Wzc7yuk+7sHES4xjhFePPn83/7Vv1ws52EwcRwVqKr8EEspuUhCNiFnzintYlsv2tk8ht75qk8BpEhJAEW15KjOeGvb07PVfnMtUKzlceiRWHFKlOcUZCfh6+/e+pv75WLe1PO2aj//0R/vZTgjt4v06WdfAuK/+Nf/8rNf/KKb5Sxlfr4yGQ2Z3W5zerQ8WsxySgnk7OmLMIySOutMVc+BWbUMmpqTs/v12OVUN4vNlur2WLNiyZhDGbdNRTnvNtuNqtntRgVsF4uHt9u6qpyzimQ2XdtWd1c3y3nz2Y8/ff3m5uphOD456zfdk9Oj/RhTTPhIjX4UeZFKeWw6f8BuBA65uHjwLwVQFYKJrHEoRA5cjQPPCD6KJB4vNrWdP9Tfh2rnI3voUD3ToR8OerAX18eiaTruHp78yMUQPYBNBCL62G1XFcTJmGaigaRJfLbb7f7u73/9/u76/n6bY5KgjLRYrtp20Y8pRbWtu77bAN5agNX8+Ozss1ffvmqqmcbS+BkCgOKUpuOaypJLuSyOVnXl7u/e31zfOE8hxaOnxy8uLq4ur/e7XYoRQPb7rmipWx9DVoWYshRRUGYilX67reomKy6Ws5IjonjrDPN2/SBFyJhu3ztXH5+fSS4hpoft2NQz07AIZQK/PNJx5Cy628as933os2yCrUD8fvf+akeafFs31t5v+qRQ9sPM07y2TVsPPfZpdFD7ZoZ1M+67+bzd7/pqsfCzxcPdfchydryUoG/efvv05AwyQDSE9RDKMdJu3xtntiHcrrdg7O5+++nzp5td6LOORYQ5lhyLGqZ9LldX76xBIijTWR9hQh+JCFVUdVckJE5Jam/aWstBWiEggo/MiFKKem8AFJSYgWgK2QHCIgKPHlUfsMcDYIiEOPF0Js2W6hREA2WKBf8wTCe481CDTwbWPzC3RkLRoqKi2I3hr3/1my6Guq62mysRbdqWCHbbB9XSdbtx7FU055wlT8rHDxX140/9MNY/jOpDkS8CyKIFFJitiBBNUd0HoEGBvKviOFrS2nJt3S7GUSDnEnOaSnUmVMUEJWH2CgBonXPOGaKf/OhHjtr79e5P/vwfQ9dbLvPzo1zisN9Zoqbht1fD7X7zsO3qVp6/XBXpYwqzujIgUmTfbVLJlZt7U4chtsvaO+h297fX77yrikAWNUDLtnXWloIhB3QWnU0hG3Qz6xaVH/YdqLUW1jfbq4ee+vhHF6eupGGHFRoDHPqdZA8Gran6bmzrqh/XjZulkJQ1plJV7Ri2WjDmcUxJeTHzNqZQIxSBGCJgq4Bjv1/Yhq011sehF9baVhL6ylekSoTGGDBsLBfx/f6+gRYZpAR0NWKUcctE6FsoVHIHROwWWSsAgBwACYxVRFWlSbc1CXcfge5HdGhi2sAErjyCOR9G3qG6nKRbPwCYPiDmjw9+1JQ9glKPDKNp2Ewvjx8CAj54Bn0UWv7AHf0RVxVQ0Ec4dBpfBABFy6Q7Qz0U2/SItiLioehWmSYJEiEhAiloKXm6fTwg9Xi4LSQAzFkEQIsQIxgzYadsmIBUNaVIoCXmfSnWsrWcS3FkuKj3XLvCFElHiA6rWiPnkZnUWWOtJWTvyHljnLW+QVd560BYiuZYcswoE6Ul5pglFYYsZWRJUAqkkmIspRQFRCIFZmTH3nlnXWWdAUTAUrKqEheaLG8maw8EJAENCAgoxKSExJbZKDHbSo1jJgIFtKosOQIUI1MDxRAz+wpsXaElZrLeOWO9RyYkBDNhKCiaoURFUBRFFRUppeRUUslSSk4pB0UuSmBQKGMWQHauiqGoTgZMwtYawyolp6IKREZFmTDLyAwABFKIGL0DQ0iGQ7aluMS5SOVZapdjiIPPsdM0AmhRQVVmQwYBEZkQWVVzyqJFChhVQ4Ak6pAZ2VggBGWArCKgWtKkT08KCmiQmI2BzDmOoIBk2Hr2DTnH9dxUFZGBLJKzqnhvWWEMOYqCFhDSQkAgCDplNE6biAIiiRYimnTrQKRSmIiRiaBMR5pD8jjAB6jIOzcVdcaYiSPEbFQLIJASTPAQAsCE+uBH6HdiBAIA4GSYh9OsOZyFBAC1CE2ZdgolaUqBSjZQgEEhlNQbZ0WoMlTScH9/DUmRsEguRaz1QGS9t02bJRDMF/NlxXz19i0yqRTrrbIQ2coZBNzvRxByhoHUHdVCQOSIGIdibeUbZ8wsxxGJH3a9tVU1X50tF/12XwBu33+3mM2rGV8Nt1U1MzY+DPH6/s1RVWPOV91WSlBTt27x7uq9MW67H1bHcwrqE97vYxpHJVUVIMmlsDWoqrmQgVyCAgJpjinD+PrmKwaEDJkpS0FXHc/tvrvOJZexgK8u1zc//emfl826u3rbWhfjuN32sO5cbRa1B0h1Xd3f7e/ud/P5YjWfHR3Pj5wZwuC8YczMXJD6mMPt9eX7sFq2w6531lTE+/0evTs5PlEthptXb96crI5CjFy5qvZYipSEiM6RdYSGQyxkrEqwbCPgmIWtCzE4U9c1Puw2faKzi4tXr98w28VR/bCLVWNyhK/+cHX85PWf/MMfD2l4uHn48scXl+v4/fv9sOsZhBGNoVJKLgBA87qWnDyxq/wWNGWAolJEAEIOhrloYZy8DAorER36rqIqKAUTTcRBpInuyYSMVEQssaggG1UoUhCRGBWUgKZjh9Kk7AQAKCFCzuzNqLhoZ1e3D5fr62en9ieffPL27d8abmIpMedmvjo9/mXc7y9vbx7iGKEuOPzJT17k9RYaDft96FJE58Vs+/xmfXU/7vsMO6guHBVDN+t3x/4Yk4xlO3t2DN6N98hswMn93ZXxzbevvs+qrA43mdEdny8X3oWh/PEv/mmRYbfpbncP3pe6gufPn4OIoFZNVVDub9ezlV89OXpzde3TLsqwD+Ny9dSV/TgOu6HfhtEsrTuy67RtdGaZS4kFijKJwuJ4ZYlLGesWDcrNH/4QHC+fnCnJxR9/wauzq9dvKMNyvgiXV2i0xAczq07PXq4fdh5N6PsUgQtUxpYCTVObyitxGPYVGebFOO6Mc2HckxoyFtgIQEIiZsnJ+CbGIRKyqVArD8TsNEVXr6xdbffd2YufqfHns9n11W8A0FQNERXOiSXHXKQslk/Yt6Ps/vX//D9c/OwngPb2bn92fvLdd5ebjkqEqysZb7pqVv/9r759dn6828Qvv7y4rOrFfDGbUezT0tf3b9Zv12//0Z/94re/fnt61lRze/Z0cc6GVO7XvZ87MvaLL16+fnN7dblztbZzO1ssCM31+4FynJ+0rvZc4+knXxCZujF3378Pg+dPlu+vr89ePuWWGrTvX7/+/PzTas5xDIJYxq2ACW9u5s7rTXdz93en/+U/weefXb3fLp899w2Ob74vd11+u6/nJm7D1c31+bPz6/XuyScngtnWFhQJS8paYvFGNI8grKVUtc0xM1PJ+Nd/9c0vfvZJbfnZM/fpT9tdqvffCmQVoKrxRZtpFhAiSGFLxljUzIyVb5AxpATGAiIKEJJFMog5BwGIEpy1s3b20D+QpZDirG6CDExExCkO/ZBJ1Ti7KwO7arPdz5rZxeIpxJSRckqGrSYMaXQGUxhjSFasFiUyaRTVlEuuqqqUUjRVjcOIm80Wq5oQ29aXnGMY23Y29uNuu1scz0NJJcOQQzufKaKWYtmq03V3y+SspX7os5acC4ANcTBITVMJqPOzlPM+RmOMiik5VN6nGLu+43bZhU6LsvW1qzbrB0mFDBE3orQ6OTesKQRGBkLs+hhDziWX8tDvR5GI/O272z/98hNJ+9aTcwYdZceuavpurCuKuXfGxSjAtDg+9stFt95jKAxYccPOBrUZymI+e/b0CWoKISCyITPkwVhKmXPB65srNNztgqudqkApq6NVP45NW3edSlHjHY4ZQCUWNI4rp4xFMGYx1okUS6Zt2q7biuSxWxPWiCb0IUnGloyfh9CVko2xhFiKOKa6sSkUQkgxbjcDARvfnD+Zv798/fT5Ewxlhs3Dm7svf/nFP/2T/3LdhfPZH933a+bWInlEVDCmyilRbZ89+3LX7Zxmo6JoGIsm2d3tkwDNYdxtKY/E8rC9+9Nf/iPiFEqwvg19PjmeR8k5i2je7taeXePI1JV16BwPfX9+svrqq9eL1ZkWaptqdbTcD+HFsxOL2N/eLZdtGuP6fv1Yq+hUvR+a249yCAEhfORX6AeQ5gDlTHUPqCoUkcdkekQElA/YzyETRwAJkFQ/nJfg4NJ6cJLBD8XQYyUEPwSsHgN2AB65slMddeARSfnhcyZaFDwqM6RIURmG8c3bN/frm34YUkon5yfjrrt92Jyv5vP5bN911bzJWe7vrhfz+fJk9fL509t+0EqOzo6Mwb4PMXQ5xbubuxwTE4/jAAWOv/jRyfHqu2/+IGMZUEMsL14eQ0lDtwMtAKUfR0U1hifWPQCYCTIBcc41ruqGwVjJJVdHq2EbtOScoqiWUpAQUMaU0cnRyeL29qGeVcgQhr59evT+3bVvZmIsaexTZu+xhDDmfiipKSOAo4wgZMjleL9NOaesGkMu4hFVdVTQJ+1qtphvM6VuyCltpdwO+2erIyXyta+GuqRytX27S/vz+tn66nq2qK9v70xljGaD+KMvn17e7q5uHn72ky9u7u8Xy8U/+2e/+Mu//noz5u5+YGNiSCnnXHLlTM5FDr6hH7EcUZE80aUnjr5mlVSkzjmLTKyiLJpKKQqNKCEe8uRF0ZgPOeEfi3I9dI31USmjWj6QJhQ+jmbEA5b4YRbggXB0YMWhHrhpqoqohVSlFCmh4KtXr969eZWkdLv10O0FU8gxjrvdbhfTUIo4W6USJQeY1JeiH8SWH0bvh1d9vIEPuACBTgaoMBVUCigiU/6taNYiBQmZLCMBZS1JNYoWSKrlcT5Ngk9UJja43W/Pjpea8+po9vTieRZzulqdLhY312/rkyPPRgtYNAywub57//aVllD67rNnx1+8OJX+oWLryaXUS8ndbsO2rhbnGqJxOFs2ffdw93ATcucr7LpdycliOj6+cM7lVHyziMoPIfjVKgxxVhn25v7t3aftM8wxd/1mSL/Zq/fj50f+Zr21zrd1k3NJqWvnCxEA1ZyLM20KhdBqgZKjMYTRqGLKGnIxMS+MyVlSLpa9CuQxM3MIY8mGrCEmUCpZlQG4VBWGPMybxs6b7X6HIFU974d9MwdRjGFkH60AJhCyxjmUoGVE8GTmoAwAKKRiiKwCKiBNZSLSAYbBj98tHoadAk1W1o94jT66UMFjFMBhBcQf8s0Oa/I0LuEA0fwAggH9+OQDYXPC7nESgv0AWzpMgkcQ/gBeTogRAAAwE+o0Cyb2j4joJA9jxCLAH+MpFZGRFCcYBBABikzx8zBxMBSJjUFiw0xsCFFFUhE88AuSiDIbKYWMKEARTSkXyUW0BE1UnENyFTPVzlkcLUjF6km4FEQrigJQMoReCbA4Ts6yNVU9Y1eDc6I5jV0exzz2KklykhggDSXkogUlEYKWDKKiUkpJCmTIMRrnXVX52jtXEYJokiITixZpUhYxExIpMgOxQZWSAZTYETliZueNcezqwnZyDQIwRVBLAg2SAkhRNMwe2SEbJIsExjKjgCZCh6igBRB1QhnZkMYsKiWrFClZJ1OlUlJKKeXJqwgygHEaC7LTIoSa89TGIyNSEiBAyZKzMDMTkUZmLajMFhCAHTISspVskMM4OlB1LOI052JNtJySjWEoOYhmgzBpYhSBjCVbARCUhKlH0ZKzgBIhFLLOAAKQQZ4iyYpKJhBV0ckGDkQKqUbEKa+NVDiNMcVEznktBC1aBwdBGykWVmq8p1KKaCmachYtSVCJlXTyq57cJJAIpBCigKJanUTEmC0zEE6Gi0X4P4CKrDEAZBim/HEiJAJVQgDRqZJGwoNdV1HhyX1IAUCAVETw0ElDEZks7Kele7q+iIiq5GlKKIsUyIWSgWKcEU1IWrLmIXbr0RivogrKZJCZvK2WMySKo1bN8vj0+f3b78K+t0oll5xjSGM7m2mMqtIu2tDFMZc4hlnlcTpxhJjHwoYBUZm6rqtn7dnFE3CtDDmmMQNIThcnT9mby7dvlHQ7bvd9b6v5qpovmzpA6se0Ol8+PfvkV7/9g23djF0JHSsO+87RSlNCwJiSqOYYVdVbm5WUgJGRYIxD28ytdXEM2u3busqiu3208+r5p2dPm1w2+0pda60x8Hzx1IzD3fvXNI6waCrvsMGShA2PXRy7vl2qt0xs0hBw3qSQh257/uTo5vJhHOPydPXFl58Pd9u+75ml8bzdj4ah9Q1IUebN+gFBGcznn/4ojPucS0rFetCsjmgcBuOdNZwVVGUY9obZOdf3fV23lqHk0o+9s2yAt5txNZ8dL453fS9aGGHsOjDe1fVf/vvfpT5++elq/tTtf/uuAnpyevRuSKSSUtQCiDj2g7PWEIesScEUNGyGYWC2bADwYD+EyNPqPZm3i4gh0klrhqiKRcQy55IRiYhKKYBKRDnlyZx0WpEBcQKIsghOTY9HKikAqOi468pOMAN5ns3rzyp7efn9Xz3AcmbHNDjj597YIi7prtuj59oQF729uZGzo/W223Y7BG1dU2Ml0O3v1xjlZDZvUF988lna3scMJ4vzzy+evf7mu6OZD+PuqDrpw2779nX79HR+PNuMO9ex904GTcPIVeXV1mZ5/Oln4lcPd+v95v7TL87f/fZNxe2JXUiGGDMX1KIphH6g3X5vVEHRzurjp2eFbYa6399x5RqkqnF3l3cWLAGplDAOy9WcTWOtXc7nKQXnlimJltHahh2tH7qTZTvs+xDvmnZlwGTAEHV7c28MVHVdz7wjM/bb2eLINlWMe5DgvEtjUjIFwILVHLu4R2YBdlVdMoY0MBQmK6UgOettl/rCmJXJNqTqyBukDMWywaRVtWwWFyHscwoS+9XpsygxDH3lF+1iHrue2Zeo/f4eoOxG+eMffXJ5+Ybt2LbZsjs9qtvj4+1d8Wguns+buayOlrvLsL++N5TfvHl3nJbEvN92Z6fHvzz/fHs3nC+ePnnK691GYnr75urF8+MXFyvn+e03l3y8PDmujEdyYCz3684Zd3o880f27s2lnTuwzs/h9bevTs788acvi3hx/JOfft6F4Zuvvvvxy/NPf/IcEBW5OW936y01zq1ctTpbv3m9fHlxd/MGnxzFGbhPZgVy9zDa2oPy/s26emn8snn2k+fG2BlzFyXFuGQfxqSo3pq79Xa5bMZBhi6fnMxKkc12c3pyZC388k9etC070X//3/7F5vXYPj86Olqt11EykTEhbqZZ0NRut+1iErasqvt+R2AtWUW1bA2bFIcYojiOiETknck5Fym7cSNaKIE3br/rjMUxBcvWsvWMzlabfpskvDh/YdGWMpaUMqGvajYuhTQOXcrJNL5p2yLgfFWshC5a4BCjd54VY0xDyr6qDfJyNp8OJiHG+ayVTkBKLtl7x8TOmxRi2zQ5phAyaS5SYi7nZ0/W9zsgITVN1eQUN7uHxXwOWccxlgKWTc6BSMIwhBBiLiZlRK6syTmAqrNmLHEM2TnLtYeS4tg3s8bVtqTkqiqoQoneGghYLNdV04c09RKtt66yWWleuzCU+7w5Xy5qaxLTrKmMtzpqhe5uvzNs67aCnOtG+zEV0dvLdY65du74aEFYui7suuAMipS6toqYc+iHJJAUy8P1g9H885/94u7h7Xa3Pj46u7x5T0CGjSQxhqUkyTmnvfO+aZo0DKHbZGcNFgImtgpaSpmUhwvnLbKmpEXbZgFQUAMb65kQpPImDYFR4xB9bWpvYt9VeDZvzJPjcyTr61bKzdHRk3kzGxZh7uuXJ1VEG5mG+02VkyOcrfxuf2/ZPNzd5BgaS4AcQoo5gYglO5Tc7TaVRUKTcjmqedF2Y+J5U9Mk8icN3VgkE0NB6IZ+lj0phT43VTtW2SJ/+uXL7W53cXqWQ3n+9OR337/PfZg3NZweX91uwXzwW4SPuhiZUuenulYmztAHUtD0QPxoqaKT9xBNle3jiejgaSoH2yBAQKTHYucxO+YD4+OAGBx64ROuNBU3jwKgx4ySaW872B59tOjQ6eT4KDw7lGYIOhkdKQhAinG933z7+tV23+WsxlaW/d1wbw2fHC+7rg+pJAx3VzcWiMT6at7t09t3l599+mmJeLu+uzh/sl0/bB7WKYSSi/UG2ByfnyxXy91+Q5CMMyHKxbOns8q9ef19GAdjzW67LyoEKLFkzRO+llOmA29+6uhAHEcmHHY7LQUQUwiUCzACALNhJI3lx0+e0zjGVJBNyvHqYb04WtzfbbLi6nh+7Ku795cxB0KoPVEJ1rvacUpirHrWkBJIQZHKMTF3WQuW+dFiU3C93vWF6pxYS7/pVOFouSoxlyzG8fv374egpprPj0+vLy8tYtgFp/787Mlvf/P18y9f3u3fh5wsc1PX67uHxVHzoy9fvnpzE7txzIkcI6LkItMxPcuj78oHgAYezdEx5xyEpEjOpYiUUkrOKqIgB0RHCoJaVQRF56ZQe0Ca8ubxo+fvwW13coFWBADBA1dNVKd6Xh+bzD8gv30guMHjSWmSMygoikopUAT0Yfvw97/9uz5u+rDrd+uqJiC6ud30/S7mWFJxlZNSQhRVKIcKcxr2hxr9MOinUv+DYvPjfcgHA5yiMsUOHERGRAiQRYhgKmvQ8q7vegSdEogUaYqgEp3KHyOeVUcUKSn12Zrq/OTCoC4Wi2H/YMk4JM9wf31n6kUC/e7tzd1D13ehYfr5j55j3BFJ1focYwppGHqmqmlXClog+blPJd1d3vXrsXKLTbdh4ArM6bxeeKcIfRznq6MUxvu7BzalqXHcl6Jx1s6GFAGgNmZR4Zv78Zubu0+OLpzw2A+sWNdNH/bGBufIkNESc0mVb3RQUgaAnAZrIMWeQFVKLolBS7ex5UJVjbd9CGwIWHPIZJmsM9bF0LeV7UJqVBezpSo46+czxtRJLgpVjMHYqiDkMFJOZBUdqyYsAGyNXSblrMUDAgGxh0Oy3oSrKBIdFtTHdZKQcPpGfwD7HNapR2nY43x4RBPhg+nb4VKPkrQJdPoYLwb6Q7D0A2T0gdX5AR76OLwOptPTfYhOLKePYQKH2zg4XQsAEjDjgVdHQMhSikwGMJP8jEgmSZRCkQKgTI9XBUVQRiEtIHliTJQiCGoYkYmIAYAMIkmWiIBSYLKEJBDCzODYUuOaxrOdUsZBWQsWSGkUJkCBnFGAFNUQItnKlbqzzmXHAKoiKkpaVMEAZi0KRSGBFmsQimjJAAg5EyizITaWsfLee++dYaZp2ShFJv8dIkRjVAmAFPIkSgJEJocw8Y08ucpWDRmHXE1xWTTtVKJSjBYAKUCTXAQIVSUhIimhZEAkLSBRFSZvI5wgCEQBmggq+siCnLyKSi4pxiwoAMKFuAAwcqYYVbOoZlVrvSgKg+QipQBgzokJVAxYBoM0kTQBYHJztoiUCZyWMkE8mksaR2cwZQwMIUEpRCCgQAjEBNYDOyylpKgp6CRpR0BklUlDI6RExiDi5KVCBKICePD7ByBVVpUJd2H2CAyIWlLsoqStq2dgHJKb8ignEpgFRdQiucSkwEUwQzokRsHhpUQFEZDg0AwgZGsQEQoLKBExGXo8ET3aWltDcMhQw0cro2l6ErFINsSTFF6AEAWnwAKVRxB2ukU5dCEABIQIdWIVT8Z3CgSYczFMOJngGVGSormUgMBdt4/7PTsPBCJijWe2osLWGVfnGJHsfPVk2D/s17eVc1Q0aAZk72chKymFkAGKq43X2prWFh32fVMZv5ynJuciZD2oVovZfGHnC8q534Y+jqEkOT99dvX224fr23kzZ2esADkA46/v1tjljGDrqkT9+tuvm9Zt+pC4KI9Naz6FJ199c4+1qys7bDKR1t7nrCUrIYw5grEspjKNCjFy5RQQc8GT5xdfPH958fzk+9/81bB/sDmfPXuy294HgVZnIPtZY8nSfowsibPkLKvl+Va289lcpAAjWQoh3N5vK+djF3a0H7cj2+r+/RbSN96ai4uz24eHbtur4v1+I/M8xFQMNvU8D/shpEL96mhxcrradWG/6Yf9vjo7Pjprh3GUnCUHQ76ezfZdF+O+tW4coxglw7vd5qc/f3m3Hrs+vnt/94tf/Pjtq1frvq+dayr3/fUe69nDRv7d37zSUv7kzz85f5auf/3GUXV2ury5fsgFRJUtAZoiIDkrUFbdh9Gy8ZUbY3L2cTiiKggiH7aEafIAiOrk6giKCqRKRDydwg+M5oPHHcqkmgNkNlPXjpAmDiIo6ONsODlbXF/f1stmcTTHkueuqmqIr2+KunFvx3F8cnbkIfUPD8vzJ03ruV1e3d2a0v/i4mR/dQWZ6ra6udsMwMjdnLGUNJs1luXlvMWhHwHGQRfHp3f3t66uQkom4j5GJPOjz35yd/22zMK+34s1FcJ9P87q9ud/8g+G202A4btv/832L+9enDyBqPx9fzRvvvziT8KYQhqQsGnr1GlQ/PWru6pdtW5GAK52dQV1hVjcUOCTT579+ps/PD09/+ard6vZiWtsLM5aj+RmTYXWCBRn0CJCwTHm07m73odkEyxWP312vn57Vdk6dfur19/OoVqdn4fSu4rvN5fO4OmnL+LQkaQcZb48dd4Yl2IBb/w4dFBGpiCCalQUsnBBw4adq4pSQRYmJFexNeKdaZMMMWVGzkjV6cVscfHqbrvZ3i1aqmeNqecJnKuqqjoq7JHZeFfINL4O++38xcmJaX7z+9f/8Bcvzk5vX3z2WWX43/zzv+70fllf3L66nC1nmcLV7eb//t/99f/yZz/64o+f++PU7YSBV2dnZBWNKSn84dvvm9WXZl4Pu3FmWxp5ve/7IVTsxm3OkKSQDnkXc+rLspnvw9WRrEqIN9+8uvjypT1tn312PF7vaSyb224I4eXRs6b2Lz45ds7mQWLoSNBWvLt5ODpbUQ1d352d+OHVg8SYosTd3s+ram73u51/cSJYpS1WR8233/7u4iefxiRHiwZZ0doxiZ9bLaBqT89WJYyL+fzhft3tOkV88vR46MNs3tJDf32XX/1652c/5TB8cfZFfPLtzdU9k+y3u6Y6bAn39xskqusqpYKEBsCA9db1Q6fAMY9EQAgqGEO0jkWyAteu6vvOsjGGu26o20pBjYoWEVH2NuRgyYnKbnujkkJMB8NL0ZJThsIeyTalFJEwhsySK2MZMI4js+y3D6ujo7ZphzHEIeRc6sqSuhQgx3EvHTIuZlWMPTPdr7fL+UyzGnL7sffkfOPDOBLaYdNbhPVm2zSzHIOKNKau/SLJnhijyv1mXznjnBXJbI0lFiFQTTG7yrb1TKRQzM6YVJKiAJmuGy3OKEO331e2GfajZJ23C8opjAOxoJJPev6k9ra6OG7/+m+/PT9+8nB/b2duNmuN5zrledMUzMlm1HS0aCDsc++ttaEbTD3rt+uq8UDGQa5czP0AyaqCcQa0EJJB2A6p75KfO0mDZLCu+ve/+6uT0xPN5nj55Or2bhj6ytoUxPhqCCMBimrKGnoExQLobAU6DGPvbVX72RgHBcgp5rwFNLZuUtEh9NbZnILztnZGQ6idnXmjaPZ9jDEYq8uFv3v/7uRkMS5Phj4g68mzo6xy3w2umgctx6fnmWG73j/9dHFSz371t3/5/t3NvJ0x+xJ6FgI2BZOijnk4Xl08dA9DTkftMsZuP3RpyJ+/eFI5yDEvjk9iH+tV68hvQz+r2+16fVzX79fvX9oLhRJznK/abtxKKi+fLX/z1baUxNycPTvedsNuv189OXkyW51fLu/33fy4fawN9OAlNAEsk5BfJwHFwfNlkl89li7TCWxqjukjbvSo3nlsaB/Y1nLAdJAYf6iz+UCweDzRAYDqYx75gd/xGCUE+Njn0B8mpSNODKaP5kdT9AjqIdNHtKSk2/1wfXO/3/chjLZq6oZCGrKkqmpur++bpkHiMQwx5rppZ/Nmvmj7Ep+ePk3bsOv2J0+ehC72+x2zemdmbYtIktPpi4vlsvnVX/62FNht+ycvL774oy/3m13dNpLSw3o9X7R9N4qKMuWckLhydT8MyDqd/q03o0bLlFISsWQYDNXO55xT0pg0a0bkusZff/eVNe7s+PTm+k4D3N5tZK4pxCHcO4Pz5eris5fff/P7bre3wCq0G4JlQ5aylpBzyhkAfOVDli7mZlbzfLb85Nn97XroAigBad/tMWMRvVnvZmMWTdfX17tuF8ZUzerduCXrXnzy6V//9V+2dX11d422evfm9v5huC5DieaPvngRS7p8GG53m9MnZ3Xb/u6rb0EEEMuUITYRFQ4jZaKl4ST+fWyzooCmUhQBYhZVQBIYAWkKGCJiMlGJmJmmxOGD5wQeKEqPBfihwIZH03Mi+Uj1UJyMRkXwQ/zYhyH5QccDAIRw0LUdTvwxQyrld1//brO/i7kbugcyZdsPDw/XwzACSlV78m4ch6HvYoop50chDggqHLKVP+ICBzRW4QdMq8fPBcpBwaAIj2LPw9tDQSYpklIhAWe5T3migNDjjFCcDK1JVUsu1jATlVyePX1SeY8lrE6Xlq247LiM+zUToKSr25vvL68E2bH98kfnJ8tKhsE6k8OgSKnkGPq6XdX1vB+HpmYkvb592O+2M+/CfkhJ6nqeID05PWEmZqtqfF2jSr8eGEFF5rNF7MdFM0OjTeUx4rypNlf3bzbm67cPP7uYD0OvUmazJaDGoWckNk60SI7oXeV4GAZHPIS8mK/241qySpGcU0qjljEN61wKEo59nK8axJJTsmzYMpnWFCy5zznk0LmqVpJUer84K3uqDJTd/v7h6uLiRwlkHHq2tfUooCUFAmZTiZIaNmhJgsqI1iEhlMJE8EF0ddDj6vSF45SODgDEQPS4oH0Ych9ZZfg4QuFg+g+P1/zY4kUg+EEUPYBCkY86tR9cGB9/HFZp0I/I0sQBUkU6IKEfr3BAIEBVixR8ZDox0wT5EwAyMx9AfJhAIpEyRVEd5vjUpkcgmt4BmcnRZXqsME9+xsJseQr/RmUA0YIIgIUnNxhQQ+AZa4cW0TA5S5qTCmZVQ6i5gGQsCdFM1gTsTAkpapLAUFljDJH1zgGbHOOYBpRsCE3bIDJokZiQWEsCQFIgMoBkDdmauWLiSQcqmIkVGJH48KESG0AgdmwMsEViwxURG2PRtegacp7YEbIgwsGOFkBFU6QCBAI5as4oEUGwAIEQMBUx7BEIFVSVlAG1aAJAFdBSUIAUUEGKFilZSi754HxTJGQVBDBoCIgiQMwlT9WhZtEiEcUgQ8mTalKICCkqWGIpmUDwAP1YQCHIxpCQKqAAomPDdc7RJjAMLpmUgpY85XcTGWWvQCgRsBCBKjIZ1AmZFwbSlFRAcxRQZQfAhIf/CEiAaApLnSYLoJQCIhIFjQWRLBmKsHVoayADxCpUQBSAESxhIRxzTlmT4mQyDdNWg1AmAOeRbcxMlPMUADXlRxUo9FgdP0JFNAF1j6LOafkFEFAVmfSWgARaUB/zMiez98f21/RbD4ccERVAEJEyNeVERSGXhEQpjiDIKs4ZRElhgIl/pJolsDMKJDmjrYoCWrZ1m2KUlOazxdz72/vXgAQkYYxgDrqj2s3iEOezpvI55c7Y2XC/lz43bVVX1RD3SUssWRM4pKr2WeXqzeWw6Zu6OT0/engY3735drPZgDGL+cnb168yUFP5fcoXF0+w5BALsjLZDNE5N6Ld3Pd1W48j7LvNy8+Pv33YbFMw3pQQDBERqKFUAhEUyVKgdj7nGGNoKm/Y5ljGbr//w6/7O1/FsJzNhXW/6ZmbgnR2fHH9h9/MK3N0tJCHu8bWAFmUQ4m2sZg1R9zsxnkD1llyhhtzfvzk7uoOrHE1Q9RuPzyMZbPrDWNbGTBs6zpEHEYpqMvWsavHENjQEErGrMZdPD/tx+12P4zD2DTe+1rTZHEldW1CBCUDMeUgwARk1tthuWqGUHYpvr68/PTTi/T66uEuGstsebfdKpTtIH/9uzeDyBcvTj97cfGrr15p0MpxziwIZVojCUUE2KKiFM0lMbP3jKCiBRUmW+tDp3QSAYMiIKMS0aRxJMIsmQ0eiKaqiETIStMaRh+Yo49nEp5MAeCg3gQA2O0HUzlIpcXm21ffzk6ec1Uunp2Nql9f39mT5R3zjxfLBcF2u1+HvqmWfnk6i12KpY9SzRv2vM/4Znj4x59/Mb67bFcLP7N3u+HhepO60C7mx0dHcRzHOKZUassGkK3l6mgzlK7PqHF33XPl60V1cfrk4sWn3e7h17//d5++eNk471en6GT1ZFUVWs1fzNrz26uvQirOVyJCTMbAetx2GI4XK41REjjjOJYcxqapUZJ1kSTc3t3/6ItPQhhTTsd+FcZYHZ+AoVRKUzekklIKJfSxd2hCnx7uNt+Zut+Gh7eX+/XaGLdmNEerGLXfFHDLPu5iQhBb3Grx6ScqkIYh4+hal1JIGlIW42dYJIxr3ywUrEoREGTWAqZpd0NvfG2sLQOmPCATKaOj5fKZcjtAtTprl/OTyzd/UGvt4oltFzmE+ep06KJB7IcA1Qy4dVX9sIPl+ZPVmR37FNW9v0/Xb9//29/9/vnFZzdv8x//0XmM20Spsav/7f/mP6UydrH3rZUiZFgSPWz6h/1DZasXn58nKI01x6ezkRMB1RZGoqautvvh+8u7Lz+9OHu6CONgrQ99XtJTX7t2sRjWOwkFBYce0nqzfdieffHl13/71bd/+93q5bn3Jo5589CvzpZSim388tmT0PV0Gaq5I6/DK2meXfhZO4z78Sb53NbsypiyBvUF/Pz8s5fgQUb8+jfX508b22Q7a1xtx13cPKzJZO/c5f1+tWqL5n4Y/NzlLENKi4Wrsvnn11//g//4T//XZ7/4P/9f/pvzo71zdsxDDHE5t9Ms8JUTmA40qa4rYh72QUQWy1W/HyWrmGycYfJZZciBCxuCQbskKUp06tiwqlhnU4lJi3NOEHKWyXmlkEVEX1stMOachhDH0TceLaaYNBRXUUHNY2gXTcakqGC4Ws6HOPqqYkeWXUzB+jqCFc6Na4YwWGuSlvlq0XcDEcessUDOqaQYSnLtsqrqnPIwxqZpamnR+Kipdi7HtOvXUvKiqivrcxcq3+yGXhWa+azb9yqS4mitL1LiEJnQWLPveiZCIEFoj5YSB4lDOzuSFNFaIFM3DFKNox9zKCq/+Mknu91uuWyGYdvWtXE+QHlxcgSIoli3s1zEeruPvXdMKCnkVrRqqs22z6mUkOq22W73KYdmNN7Y7WbDzhw3p/frLSAMaSS0KScnbrcLdTsbh1GTTX05O1nut/cAZJwPKRMyE1ljQXVeN/3YhXH0VW0tpTR6y4BGwfimyUUFo0gahp23dVM3jTP73VqtVQAsEoe49JVns1gsFvP5+8vbLgzdvgtDdmxff/u2OXouzgbJZ+cncQzO+xjy4siFIY9DWrZNbfDf/uX/uN7ffvb8Re0akSJjtpaGOKQwsuembm8fHq4fLo8X593ddrvZ1E1tyTbzuu+iJcusJWfFEnJCwyrFGrpbPzRV0+97UMNMoGY+W91c3VRV/vFnF6GPQxTCcnK8uHx30xdYOXdystp2cd8dzN1FQFEOBzZ4ZFYAPBYQE4tHHouJ6RD76D896fQPNU5RxalqUoDJ+QgPQNOjwaNOvezD6zwSTPRw5UPcDwCoFvkgv8CDfmNKrZoEGAfY4YNw6QNDBB/vULKIQN+F95d376/usmLTzvf78f5+ve93J2en/TaUDFXl1+t9iMEZ27TziydP7x+2x6eLsYsMUs99u2jHrhtSOTk774bQ9TFLPn92slo2b79/vd+PIjJvmufPLvpu/+7tq6qyIeWjk6NSZLPeEyEzR0nzpjk5Pn/z7hUKhlyyCJOxZGaz5vb2NkRiYsvOWqeClkrSiABGyTlLaMcuXYU7b9yz5Xy93sYQfFPlbry9vOn78OmPP3v24pPvv/tWpdiKmsqBSIq5apzz1qrZD3E/jiBwfHLy8sdfcFV9/fb9+nbjAH78/NNXb76dL6qxi0Fpux81Zmvx/vq2bqwF9mBiFxThcnNtrBXR3WZEthkwChTC31/ebmNk63/xRz9a3/e3l/fdOALZkkMpWUUOkWSHMlkP5fQPiBUH/EhBFLRMzj4KACqFEB6F9mgYrTHics5gjNGMPAUTfbjMo5hnarNNg3giGcnhPE8AcDDGUvhga/2BiqGHKCs40JImlEsmZhO+uXz33dvvdsNu/fBQN267u7+5fhtDz9631ayp691+J6LW2e3YT2U1AiRJU73/QS80vQ4cyviPt4AfOXyTm9PkSPv4VD2ATio6JU+xYUe0DamQMEzkA0IFBiQkUgUpY7c/Ws11HBHh808uxs396qhZLef3N/fE6CTDKMh22z+8vXlfvIk9rGr32cUZiwpgDgkJU4r9sCVjq7oNIUJRb9z7N2/7Pnhbtx7f9XvveC+Dn9v5wmUREMy5GGuHbh9LHLveCVWn7XdvLz9pZo11I5Ta0E5gOW/HkP+w6Z+f1DNvs+Bmv7EGi4Qwkq/EWK+mDrEgoWHa77cEMKbkbN3DYIhSjF3fzxfL7eaW7ZKYx6Fr5s4QdtvdytVACJYxomZxxkopmkbmltCgMoIBlbqttQ/DuOX2GMpUpyYRMga9q8YwIjuiRktRzUoMxsiBBiaMfFgH9eBcAnAIXz8gRKSKjxJEPBDKDnPg4yooP1gV4VEj9gMg59HwBD7Qk/ARINJDCMHH0fwf/A0fMc8DdD+9yvTnP/RE0kdaneojlwIBnTWMJKApF0SQLFMW18FqGWgqWUSADSOqEhpCQnTWMjOIFBU9RAtON0sqyMis4IydMqum6lyLMKCx3LStq2pjK+u9994yUylQimYpoCkPKoFALTkEtdZw0YzFGWJLzhpjHBvPBEqCgrZypq2c98b5IhiHLuw3OuwhIEkmAGJDTL5y1lfEVhBVS0nJ6NQ0ZERhAgVFVjaODZExaJ2xFZuKTcXM5GdqGzJuQrFRixR+tD6eVrYCFBHTAQREZcNkABnRWkUAyROvS1EmRsqUL4+CkrOUIrnklGOWLJoFiuIBjykpAeY0WiuGGFRTTjRRoZSCqKIaZNQ87asTZYbB5qyMgkCMcmADFwEpePhaAYwFUMiJgQEsKDJbJs45QooEOsFqRVJJg+RMyFPuGSKAFgBJWQAIckFUZtKSFIGZAAQIBFEmIrMqSAHMioRoVAsAljLGEdkQGwNMppqpYeMaQUbjBIhgipaTUnIYYwJFZlCYSBUiIiAKoHIYecyEkYjNI1SEhgybR4xo+sVMk+DzsCpPkuDHbQIBDk5fKoBcSiZEhUf1zmESCSjqpDUTAZWJ74wwOfRJKTIpSmnKryIuEhAyIZIxKQxh3CMSMamS8bXzdZKCjo2vShiRmKwvIYFAM3f7+9gNPQFEGef1saQyhiEiFXQl22G9rwBt6+raX16/N2Qc0+nxYr8fLODuenM97BtnK+P3Xd6/exh2+zDEkgq76ts33zeW+k20UKHw9f2OCYxx0sUIo3Um9ONmH9nY1CtFiGENvsyJuiHGmJA5i5acJxjC+yrnAkqAZBDBMiDWdR0xVFTSWBAVSHbr0RLCEMS6um22l6+a1qLYu00CdAW0RNjuHmprrXfD0Fd1c7w6KSlJkf1DV3LZD1v0lGPZd3titrVxxpSUS8S+H2ezuqqwG/Ztuwxp3OxvmVCltM18HIuMqQsboNLMzKfPLm6u7jXrth/quiqlSM7EwIb6ftQMvvJJZAwhxGJN5TyDNu9vN5++fPHZH325+ctft219lGi7voohIfF6D3/3q/dY4MXLs+OHVf/2pqpMyHmIpYigFmRWVqXC1pSsQEzOIbBBkpJQCxJaIgCd3NMFgImmoYXA067ASOXQ91VREVFmyFq0iCGezBdpirpEZqZpsZn4yB9srVMUv7S4H2o4mzty2mw2rxQ7N2+Pj2fO8jAOrT1ODCVR5ZZjv//yx/+L9evfA+TFCd3eXoUcxqIXR09v7m44jl5dEgrJnhw91fnondnFrm6qEO3t+ur05HRW1e3Zha8u/IBg56dH89u7brFcOZRmdra/u9893P30j35GhS/fXRYif7S6j+FnZ59/+vLPYp81EykY5/qHrVrM3cgxrLfbZTOf22p3fXd8fPb26rt55b2vN9ebi5Nn5/PTcfzdqll+vx6HACUlARHNhuu6XRWNTTtP9zdVOy+m7Labyh1Jmt29fyj9zvjEbIjodi8vn/xjufpW9vtmboC7q+3GccrZzOcvMma7AB9DGN4B7TQV62cIVuLouclFu3FvSYFpyAnQAVmslxGUiExLItH5RcEdWp2v5g+bUEGul7PvX33/5Y8+1QJd13tytpnn4lOAxs6Wx2cha7tamFm8e3/929/85Z+ZL7dBf/zF58vj5r/77//FP/2v/uNv/t3bX3z+8pd/urjcXH/yyfP1t1KkNCfVfbcl4DAE73W3jZL1i5+/uHr9fjU/ebjru4CnJ+1m3RmSs09ORSJoXh7Vv5y/hFJu395Zy2bpEZCZ15dbJ4hd9CtjlL75+ruf/+SzXSzG5c//5DMVeHO1/fE/vIhdtpVDQ3koMSXXGI91fL91kMfvtg3Ptr/7ZvZnG/98hUKyKfvfXR99/tSQrz9bXf72m6qux0zVyZPTZ1BVZjuMGMfVEY3DqCgEtqraKONsaTXzOAQZSonl4XLTrurFjP6jf/ByHC7/+b/6mwC6X+vDdtvMjW/90MdpFhBxScU4bpZH280GEXOIlglzgjw640xdhxCHsWNvgL0BZwlEi3VcSrG2GoYeJ4azodpVREZyIVDvfZ/2+369auY5FQFomrrrOmOcNTNCQU7FMBF6R8570dF5s14PjZsXVS3Yd/HoeAmlkLMKMQzb06OTklPOVLLc322bWZNTYmTLPEJhaxft0cPdutsH1VJbZ8hY5sa7WLIK+KZBDEMIJWj2UlXWGpA0lBBVNBoiwlJwPlsab2/XG1IwxADQNjUA2tru93syJEJxFOtsYWJnq7YhzSDijA1RA4ha8tZjkfVmO1vOXz1cKZXT5VIxsfWTKwCjqeuWEUxlUhLDxXpTLY9SjBxKisFaL8zrXe52u8ZYVujHKFpU1dcmAy+P6yBJpBjnNYzGGzLsjFkt2+udL30Cg2OOVIxomZRPqhAlUWZrmzEMiE6llBIhYV3X9+td09ZxTKIoachGmdg6z4Zj6Ouqbo+PmhqHvrcRVst6zq2vnl++v/aN995dvv76x7/8ZeXsdtvN2nYcCyAIIFk6Ws7Svvs3f/E/EeVnz37y8sXF6+9+H3NfWWsYQwiEQkJS4vZ2XYN9fv7i21dfbbveVn7RNtZwScTWOWuSiDGEJSmqdcZbO4Y8O1493K+X7eLd5fXKts7ays9mrev7uycXJ2UoMA6Lptq1dc7grbt49uTdu7uT5eoDIIQHB43JKfpDB/uQXDYdeA7uQnDoT4scJDzTkw4Ue1BR0YO946O058AbmlRCNJUpcoiuenzwAUJ6tPCYnveIEj1aayAcJGyT2APgENKG8IFeNP1CFFABiSlvNg83NzfXN3fb/U5RN5tt1++IbAy5H/anZ0expLHEEELT+tmiWe+31rJV3u5unr94Xq/mSBLz2CyaGNOEbxDaWdN4LXfXV+iYEp+eHoUYx3HQkve7iEyztllv+ylbFxCByVdtTiNosdbFJO28DTG0VZXGsfJV5et+jKAaQkQF9tYDoHH9mNoUnpyd9Nh341hAqqa2Q88kMWbDCALr9Xb86psXz579+Kc/e/3tN3EYa0X0ZrZc5Ji7IQMIVfNPv3z2ycWz2cw/9Nu//KtvwjDkLHXT5Jy4MW07e3i4Eed3Yx+jpjD2Yw/4/yPrT3ply7IzQWw1e+/TWne717s/d48IjzZZZLJIZhazEqjKIiQIVUBpUBqoppoI0O/QSDPVUENBQGkgJFADpSRAophZzCywS0YEGeH9c3/N7a077W7W0uCY3edMXfgD/No1O9fsnt2tb32NTeIlmt39JsudIVTEqiwE027fDaJjCsxm8PHd3ZbzLHzx9afPn/zmt1+OYfTJTwSF471UQpqK4aOl83GATKnc+L3CWTUJJNGY1IfoYhLRGGNKEoOPwThjVBISowrIgYFNSEfkBQFoGlRTtQMKBAhTsT4d5BGnE/tDSa1H6xaY8kEm7JIO5/qksN02X3791f366vbqSjVutt3lzaWIXjx6bjMX/PD27TvBFGMcQ48ECBglAujU+n7vbPpQ5B/+OA+yIdWHR+Ew5GkiqOiBLSQICKSSEgABhBQWecljHw+vx4nKQnTIHEwAlinLTNd2s8X8yelCUsqrYhjalEZ0Ze5s33Sdb6/vN6FtSgsR9Wc/+cGydBL2KCGzbgyd77qhGS6efWisCe1uvjy/vnyzuduU9UmyEFJrFfKqeru+/J0f/yTpCIwiRKYSU3ftzTB2i7JcX90tHxXlslrvxycvztb3rz94efbd37/KLSQ1r+73H57kp/Mz1jT6oShmQ/Aao00AVpIKc+73ewKyTAH6ENE6w8wsCZFSiH3XY1aAjhADKUnU6COzGbttMZ9lVbnd73J0IGHfttVyYS2F4GUc87xsmru6zgush+F6npcaAnCpEMmUIHEYe7SlUg5J0beqHoiZ7OQ99N5OSAVUSPW9ygtpEm4pkB5JR/CeIQbTKvaA1RzZZd9jBb2HffABDj8OmIefHliheISHjsa6Dw88XG4CKnH69z7b4PBGBBXTFBwPoHog/uTO5ZkzRD4Jc4whKoHENMnijHFwuKgqqypMiVHOMZM5+GSDxJSYaPrLoJKIGAYAZCLHRlWMcc4SiteUnGG2VBZ5XuRFXRWODAOJp+g1ifigKqBJNQHJtCgbIjbgyjwvsjIrsqxk65BJY5QkRDSbr8x8bss5EYVhGJsNJz/6IeiIRARkmW3uTOGsy4mZQTSJqpIqYDpsLaDWEhpky2zMJFYwtmKbsSvYOXRVsiUCEyhqPOC+CpAiaiIJEkcIHcpAMJn8JQYiBjQIdAgVBUlH3zQEURCBlEREJYr3Mml5YpTgJYQp23raCiXGlBRUlUlSiilNtuMpRTCZAkRVRjDWToIrJEQiEoQEzMhCCIoaIQqpiETQiNaipsksTaZ8VLaEbIAAiYhVZQKSIIwgCZEmEyjEqBoRSUQgGURkYoGgKQKrIGACxIlORKQJIakIyqH9kw4zCCfxXRoVjUVrfAzWuTh2bHO1hVIGyClBCkl8gphEEtrDuqkConJYF6fCWQVlKpEjMSOQKESUlOI0C8zDxjTZYD/MPjnuZwAomiZ8biKKTnfrsLfIkRaoOBkSKZJMpknAIkFkEidHJEoSQIlBAZOoEIGmiCSqOva9iDpXBhmMYUJKGsmwqyqEg91SVVZGjYCmlLzvBSKpyU1d5atB+iIvZ/UqkPfcn9ZVSXR/tx7GscgsJqIYQ+eHfRvRjWOCxAKmrEu/30iUIs+MJcPFQPh4fh67tlpQDEHG4O+7k1XtnB2iZzZPTs9/+/UrQ1mRFxiCaHj6dLHrZZ6bLeBoTBDN82wEjSEaa2h69ykOElCSMSQptU1jCLyneVUbQFBPAMZgD4kArJq23SqPhtzQ4ryi3e16UeTnq8Wua8oqs47CCF0XJEbnGCX5Nraxe/HseSdNkrFrR5ukLrK+94BAhd10fbwfDRNoAxBDUCaLBPfrXcZZnVeWZ+O6p9Ea4kdn83FM227M8rxv29H73JQg3uWZcRBjsmzD6NvN+OKTJ5uu16iD6K8+f/Uv/vkfwKc/+OLrV6WjxbwK+27fDqIQo3z26m2QNK+zk9Xs7eVaUkLUlCIZGKNnJoky6cJAUKNOSWfGZIwikqZR9GB6KKKT/DlJBEA67EqKQJY4HcicqEBMCEAiB+N4PVg6KB7cIt5vPgAgURezOcCMePZoZd5tvsxKGwLzYJahjN22NHl7tyfNT85WQ9jvhv6z3/6WUr+aubbd7zZr58qMqtR0683lqjjJFqWPoYv9jx599O7yWzHU916TBdWnZ4+r2eLu8jYrRr/+arftFudn9dmqurzxgcaA49AbFz1qg3bcb/Z+u5jNS8y73fji5y856e363icBAh9SBLaZrari4nxxfbvO7aK4WM3mddCwOjmvC+dTr0RhbL67uX/69BFxrOsMyJrcFlk9jL4yJQkZylKCwhVB2phSVc7bZp+XhaB0sWX1ZWmH0b9+dfWL/3SB9ccAe0t8+e1Xb/v46LS4uPigCeTOaH+3zaQ1rmr723o+b7Z7UEWynXaD3w+xFxzZ5nleK1YJjJkvUJKoDiFV9elscT7Gb42NwPliebJcvdi3w9PHHzbtFoVE3BiMcRlTXc3rsd8Nw201P9mur7PKGeP/4B//oN11T19+8Ju//O3dV/R7L56Vsvtv/9t/+m//L3+xvnlalvb26/t/9S9/m9nZH/2zT+3MoIOqMFmZhx4cypsvb4zQ3e72yYsVlCQqq8ezwtqmGbpdF/xYVuXEwT95fDp2frcem344Oa3LepYnurl8W85nbp5/+pOXcd/1d3eIbGxOpV1au3s39l27ejJDk5R1c3t/8uiEknbXt3Wk+9d3L/7on919+3dps8+erfajR2Py04VXbXab4nx58ewjtamBuyyT8lHhfV8XmQpFCWrM6encKOx37XazdRZj0LrOyzobfSyXs3Ec1pv+xcuaitk3f/i4K07+yfPf+T/8d7/KcodkxnA4GBGgIQ4+tu0ICsZiUZfOmJjGEEZ2hMhVUXpOtii872OI226zKJegulqsdt1orQVNIYqKWlbD5MdRUhKJbNmZLCqMPhwLIc1yG9Lgmx4Jgve1qVU1+gCSOMPTk5PBp1ld3A33Fm3bdmlIeemqxSzPy6Hv231r2Bpn1OLQ9YgkMe73O+sMgcbgi8IkUWTHZFRj2zZJRQkNm7ZpnTFFnnf90PVeFUEZmLLChRiTpHpWNbtm9P2+2+679mR+RobCOLo8A1QVMWR8185mcxAcxzaRGpfJ6MUnUrNYLNANY2aEFEjmq5nf4d1277vmk+fPVb1xhg2nqD55Ap6ahMIqmjQkG9O8zO/HvqgLw1nfNfeX96WzDMlmhS3yCGmMwTrHLmNyrhcd+/m8urxrYgjIFo1LAuvtsCjrYbcrMxuCRwBjTIwy+EFFlHQc+hTR5plEyQyHkCT2WW5X89XQN4wAGrtmt1g6MCR+gJiW9WoY/Nj4nPBssZSkxjIQVrO5K/I49MnD9XfvzpYX2VNjajv4EHw0xqCXlER27fXbb5zll598wrx8/ebNdtuWlfFjShglRYnJWRNjSGk8rU6263ffXX75+PTxoi5RYGw760riSYaMIpLn2e22McYUue27MURFm+WLUu8wKSEDG8hsbeIWEzHryXK+247L+Wy96S/OFvN59ei0bvZ7eH8kgqOnrx6EEHjAg6YdY2oFPxyH4NBlS0yHXvpxrzk6ED3owR5OWZNP8NRtP/Tdjv1zfZBK6INNzIGO9D206FDhH/fGB6BBjtjWdEk5MmpTjPvd7tWrb1+/edOPbdO21jITGmK0HPz4+OkpgG7v196nMnOny7moouGzi/O+2S8XF7NqZcn0/b7dbOZViRW++fqb1XI+jD56+fqbN3fbpiyrs7MVAtzf3DSbHRIsFssUw3bbAKjLTIqiKmVZ5kWGEp3LmMiYSdxEwxAfP1pdXl0nVULNcocIXdeXRQ4KIpIgbfb77utvnz99PM/m+83u6urGGGNNXpXm3dt3xJRZI33/9utXZ6fzT3/wI2txaNo2JrF2ZoqTk5PVsjJloXHcXF5+8/Xl6H2WAjsakcZhHDUUxWzX7ASEGW/v7lgDeqmdG9p2Ma9QZNftf3D6wX0zJoVoaYz69r4hZ5PAOA55ljPj2HdX74IOYRyk6weN6UBZwOnATw/3/HC2ng7TeKiaCYGIEJQQmZARaLLmPdBpplP6QWUWU7KiooIKhAYODXp5X0MfiugHZ6sHGtxEGjpQON4zelRVACYziwPzaWoOCypEBR/18ub229ffrNc3psCx9VeXl1VZzU5nJ4uTL7/+YtfeTXc8eI+CjBhC0CM2RhNydcQ99UjgOEID3//uPetq4jTBAWLQybQLEZOqKrA1SVI3DIV1YZwCkuAwRUlU6ZCKC3YcJSZ8/Oh5144E/nz1SXt934f2dD4ffeqH8fruvmkGQyi77lFZfnA+l3E/jmNVua7t/Nj2zQ45y0wWfFfmxnebd6+/Xc5OnTFjiG2/J4N+6M6ce1Jl/f3aZCUC2iyPPt1db0CgzOiLu+9+/rOLy8uQhmAUbeIqs/Pc6mZj0c2W89eb/sUiPJnb6EcNuYUspOTHkS0qQErRuHr0XsmJ9gyUQgAQUW/ZiWjT701VpWEXYgIyQ58wpaKsdu1tXlVoqZzN436PpKDQbjZ5lmemEgkEOMtrSYHASTBxGDLrANGPTZ4XhiGCmrxSRVAP6gEZuZCJ+gEw3ZRJ2whHD7VDgB4ZmWpy1GPdeXTmf1Ccva9Gj//wOE7+4XB+QBaPcDzAA2vzSKecHnzA+49cNfgHX/iAGen7VfqAXh4QEVRgQ5YoY17mblaVqrAfxkGSIBJAZq2qKIOxZvIFC6O3qEWZzXOX5Rkh+ST7MXpJKSkRqEREYgQCMMTWmMxaa5hQjTFEZJiMdZiEGarCzerSujxzXFi0mjBFkKDRQ/SIqJqY0BkyCM6azLIx1pgst2XGxiKmMIKQApjMZpnLixnNVuQyECFNsRFLiNaYzKIoIbnMkWW2zKyoEVKClBAEUBGBjAFCYkPs2DhjS2JDtjCuZFuQseQcZgXYEtmiKGoASapxWlNEYhp78C34BmOP6gmQ2aAxaAxbp0wTAe2wjqmARBXUlKYMRQVNMYiklIJIAI0iPqWQksYooqxHNaYmSkiiKgIiQozGJohRARwyMCESGiQgFRSJKhPXEmTSySkgyIR76pT0mywga4jiPU7KXUBEYjYAmjQhCAohG55CSAmBjUqEJCpKyNPMUElAD6ufTMpI0KSSEARACadcvOkTT5LkSbnFAqAwkmUgJkM2K9F0plgE9GQyBVYBEEFQY4wyC6BBBkBUAjr8GeHQiTjyRwGSKNJEizswTM3DNJlklofDUDpK1I47zcQ1hSPvUxUmo2oEVU1TKoJMh5YJYhMRwaSKgEmSImgSUSHAAIFADRGRYcx830nwGgQSjhrROptXiBiTZi6zRKlrJIqdLWcnq7uvPlefZIQ4RMbJPEvbcM+5Oz3/MIV4u7v/4Mcva5t9+Te/GobY7bp6MUsAIOn2fueQ990+AAG4btDu+r60vO82Z+cnmSmbdbjb7EI/S3GMUXzo8iz75NnjnR/vNg0M3uTZl5dv1bCxLgTfbjazeVVQfd/fafIvPzz99bv75KUfParazIkkVYgpGWdiFGBSIsMsUYoir0uLISyXp3c3b0wBhs1i5qq6GNsuRH+2mr/44EfNtr/67suqrIyzHtDl1Xq3Kzh3WQ7Iu906z7OnZ4/aNuCW7+4aAW8A8jxnp0kTOyRGyrNd0xqXG4LBR2PkZLnablpNaIiT1+16b5xph+CqarsZEbks88g8Dm05L/LkRABB98NQ2Py2HdmSCHgfDMDpfPblF9/NF4ub2+3rd7cf/fDlfbOmzfj48WI/hB7GcfBszNu3uxjwhy8fn62qXds1g2cla2yCNHEPATAmJRRm0OSRBKyazCE7QG/YSEyTazuAMpGCTDnERCSoBIoEIqI0HbCI2ISDTdqhnxBVVRIRHVSTIMwMCA+n6nlZtduQWmW3+Ue/+MG3f/aXhSkfLU4323By8mxo6sw6Jry6upk/nl/vbhQxK+qPXr589erXI9Js9bQQO1us7q5e/ehn//G/+6tf5vnZbDH/xYsfr9ffog7bVkavcT+WkA1D18X48vGnktBL38ewu1/vw9aWFQzjftjNH2Xnj+svPr8sDS+Xp3Nb56u6tsuTwj07f3l3tem7HZJGBM4s+aiqy9WsHfde+G8+/+bJxbPMxCzPYTTsOFMTosvq/NtX3z5+cf7iJ8sv/+w6r+rZ2coPozVWQoQ4cuFcVovouGtU0IeR84QWIfqizMna3WaHMYR939+8s0/qq2Yj+3Tx7Hzf1hdPq9/8u78+WZ3/8OUvbvb3dzd3syymaMi6rCg29zfqw+D3Li8JnbNKGigrh4GtmanOkh9ctchyZ/NZ56Ve/LBeZb332eLcE4UoXNj5yZPdTeuWFyeny836bgh9VnPbNdVyUc0W6+3d/npLmclr+6tffvvsR88fPV1u7vePf/zJ5vLrX//pb1///euf/vHPxuFubMJ/9p/+fFHPsABeQe9j34em22VceA9lXho1V+t9iNGJZeIQ4pCImOfLcvT2ZF5u1vuE7vpmzIyWy3x+URKrBI2jxIwDWdnvZEjadOef/hjBfvvXX2bLKplUzOv1m92stDQjm9v5comR23Z/8uGZzQ10/v6v/szeXe3+7Z8u/M+yRy/47LxnKU6rOcN4N3T7dnEx78foMn3z5gqcLp/O1utdlVd1vfB9vNo0Ze7OFitn7NC2pHFkNABVzu1GQIBZb397ff31uqJP9lefG0wsaMg0/TjNAlfkWV6u7+6qMlOFhGgYm7apyqKer0IcUxCbsY8NDqgSmbTKF6AmyLDt9kR2Web7rsttrs6020YiqAKziTEaYwF06PqqqFL0EiEMkQtCBGCQBIbr3MyHuI+DV4GMMaSkoE3TXpycdF1vrQkJQ5Cr6xvHRkTYZhAFQNq+rYoKCVMUJlJRYmzbbvB9vVzs22FWWnSu27RVXiaG3X57Wi9SHBNiUeaGcehHa+1kX4YgCCZjOxCgM4hacmmYksRyVpdZvtmsQwyWTDlbCSQkE4MkCcY6tBzBSG5VoAQ7h1DZ4nrfictu9juUoqLZR0+f+OSROCMbLScFJnQmSyE4YzHFrunqck5FUdblsG2TCgqJQp+8B3M+WwmMgFiWM8oLZtWEdZFbJDHF3X13cXJyc7fnpESWyZyt5nd3N4xIxhEQCExHGzZGNQJhSAP4YNl4paRYlAtJowAW5eJ+d1cUJQEECQQQRm8M9t1omIeghaf7fX8yX+1329Xpahx7InN6fkZoLx69IOI8s3WZDT7Oq3kII4iObR9DV7jVD37wzNr95ZtXaWizojCEIn0/DOQDMXd937QNKZPY715/k4G15Ia2dc7l2WLbto9OT6y14xgmUYSkmOW5cEyiu3335NFKMdRVNoawXM5uts0u+Cfnz3SMbd8Pbapd4UvcbPt+8M65oq4sufdV6ntKxQGvgUOLWlEnfuv7opqOOWlHf+ujvwscuBoTaCQiRPzQ9piedDirPcj6Dy+SI/JzkCkdGU6TzwEe6nh8iBif3jF9T2XxcIXpWxXVMcRNs1vvttv9btvsttuWGQEgL83i/GzsBgBq22EYk2GTF0XX98W8XJ1Ury+/m5cVRLaC+/urIY3RczWrru/eVfO6b/elEnO6ur13zi3q+enq5PrqrYIgsTEmpTSGxIQhydB5tkaSWIbcuc26PTtZvHl7jYiZtf0YgTAryywvYoigyTLZrNjsGiMIgnEcJQaNdjf2N3f754/PF8tqv+8iaEgxJLM8mTe7TjTmeZ68tLvmOvoyd8+fP/3Bk4uiyl2UMDTXm6vtm40JyQIurQNnd7G533WbTmar2X4YZ/O5GOOv2kVN6seUBGIiVyBYBvIpxSTb/VaSCUpX24acy4q8j9E4w0xtPxbWxBBGCVeaZkVOBkgpxKiqk1MYH+P08Fgdf+9/EAAIkAGJppxotEyG2TBba6yzZJgMIzMSKU5eIfTeYV2RDow0mYhBdLASPhTK+KAsO2BC8r2hC/AAQR3ljQoPiBGkGEWoabqvvvzs9vrbEJrk5fbupqqrZ89f5tZ9+fXf+9DnRaWKyY/OGMvUDL0qsDEoB5bGBJQdbGImi+v3CcoPcxG/N4ofGE96iNZC0MmaSQQIQxSDuBuHPM8NYDrMwIOUBxE1RWQwxoYYfdSLpx+0XTvPkoZ21w1gchTbd+N2t/ejB9LoE6t89Oy8cLBu+sxZ3/Wh7yFKHNPs9IkCuiwf2tu3by+tm1eLCw77dhOIbDJu6MbF8txRtg9QLsooQhBjGNumlSSDxGK52A8tAKrN2iEsl4vLy92z56eXbbi+7eaZ67vh63e3Ly4+Adm13X5WrXzoBQCS5SzzSUpbeK/iPSGOQ1fkDsgIjCqCmjSm6HuD1iS679uynItGkMhkJHaZNeSw5yy3Voa+2ezyvDj54LFXHGLrkCGGNHprXZKRTTn4vjAFCaQQMXOSlCCgRpoyHY09uD+nSASoiDo5Gx7ok0AITIoMiCjyfqgdkZ5j9/b7C9fxH37/keMQOC6SR9olwHFMHNZMPI6fIxb1MPwfqGyHNwAA34Msv0ecmCifh+sbYmd5WWWrMs+sHbwHFZ+SANjMTSpIIkwxMSAbPClmT07qZ48vPnh85vK868Yvv3v75bubdTNMK7qKIgIQqqjJma3hLDPOZJlxxiIooSBAXjhnqSxsXmaZy6rMlZg4hqSj6pg0EiZGUhAwNiEaa03myKDLnHE5JBX1Po6ITNZwlruiKObzrFqoyRFU4khhyDEia3BswDIREjtrp78uSoQkUzUPKGSMYUJLxJasY0PMBtmyK9lV7HK2GVmDWQEuB3YMBDJAChr6SXkmMWgaKY7qW4wjqlhiADTWKRtgJ4DHrfJIIZSEkFBBU8SDhAQkHbAIPCbcEVKUONF5QZVUUSe5EwNATBEUo4KoCkVmo5AUDJJREGtQNEyXAZi8OAkBDkaEKhA9goAgpoFUOI2cksSkYKadPB3zJVGUCMFazJzqFEyaNCZRQUqik/pQUAV0MlYRVU06tYRQQRAiIgLEg1gdWUAkTjZPlCCpSgKWkJICEI8RjHOcCLMKQwKySVUFDJuEQGysdYZZBQQmt6YAh5bABLuhHjeECcx5qI7Nw2SYHpjSOqZex1EXfcDwJ1aqqh6QLZ0SPQ6q5UPn60G/PGkQhQhJAVMSJktH1MyyBZWQBkkjghjmgKQoNi/QELABJcY0EULYWNGYlbOmb5XFOu63oW9ClJRlZAh86Gd13jZ3SUO9mNV59fnf/Cq2IbMZLxwac3+3riDNFqe79i5ZUIBN1yqIneetRsyLd+vGoMkSV1k+dAMhIroPXj7uu927yyvlDJHr1WKza5gMQBLfliY3ixo4v77dDPvAzvZDOJkVCc39rhs6BcGUDv6EY4hMhMDdKPO5mc3qsWtO3EwhXK9fF85K1Pum+/FHZ/vrKwvud3/+4y+++fu3X/x67EPoO4fGZbP7TUNsq+psWSx/+91X56v5s+cXw9jfbW7m+Xw5r4Sr/diO3b1BMeSaYcQoIkl9EAGlWFflMAySzPXNBpkKZxCxG4eJZWhLd7e9n68WyJn3UJWLFNVmmR87lJBlqGgy4D2REpKhoHp3dfvBR09vvruGqFlW/Pbrt2dPnnz60x//1V/9Kk/05HyhSdZNj0Q+6ZurJsHVJy+fFFnuuBsHD0Qq6mOybBhBkyiSgCIlx5yiV1RQQwTMhCRRDpIJpUOnFPFgNqTME5GbiCbxq6YEqoBMxgJoEk0y+b0zISSJx3lwFOEDDL3XgC6rg/Sv3nxW5nmV5ynivFxmaMjNTM6DDDrTBjyYep7l99v+/v4eUvbt1fbTn/78HBJR7Gv32be/LirMirwuipvLV84OZPH+bnP65LGRbE75TXv9+NFPlPx3b79+/Hy1KLP1bbNanO/ufd+OSBxkvLnf545v3347y5eDRwmYo/v05Sf395vt5p4MSdSECM6AcyLR2jyMIZ/lmbq/+urNH/7wcYjBmsy4mcvr++13JsYPXn56fXvTj2oQDAqRGESJMpIv7CwJxgh1fTa0+4TRcM9lGQA0ABAQMQO6MquXbgzbwpTZrJw/nTkNX/zb3xaLFz/42Q9329uvvvlza+z8ce1bD7rY+/1+f5fVWYbLPJ2PoSVJRAHBDipmPjflKZhTclG4YOsEHRHkuYw+jMEZslgY6QgZut0Yg5w/XfTdpnQkUZfn1ehbzpwaIYaTJ0+VFSy5pd01++X5895+t3wx/9M//ft//OyPn774x/NZ8fo3fn6azZ/Ofvs3N2YWniyXymqdKV3Rt0msKedZt2kuns/zufa974dUkP3szc1PP72ABEPXbbkrlpmoa9fjyTKzTsd+YDBK6mb86KcXvh0BsqwyynbYD3kOs1WOS5vN6lHTx794OcqeLSlT28U848Wj0+bq3bDv72X/6OUnTXN18Yf/yUBAPRRoOqCkMYKU59W28XRuz4onY0qzs5mrXDbDoKIR3l3dni5LUazqMvowdH2eGwAkZ30fd614D82uzcvsyQfnf/CH7n//f/rbD38Hnj95HLpWYiqz4ywY/DAEY4xh6oIvinJW1RGiAkaNY0wQxWRlnpWk0I0+z3NnjIiURdUPo0pqh0GSiIlj0zOqdcYHtdYYsWR48IFdFjEOcchsXpZZSkGZzs8f3W924xiu92/LrJjVpYgoQhQxzmZI4+DDGKoqJ6QYg4plgK4PLqPoBxQUpZgAUjTGIFISIYsm49rVqljlRfCDqjIZMgQos6oGgno+2++bPDPEFEMEg2OIo/cut/3gN7t9TNB3w2JRl8k0+6bIbbvbb9OGCYu8KPLqbndvEIAgotq8wjGEeFOXhfb50A2SNEeTvJ9V5e5261N0efHi8UuPgmycyySqQEJCYyw7p8gxBpsZtbUCKKRiVsxUY98H1EdPnzR9s9vIXReqChlNOc/J5RSjyU05K64vbwc/KBEZKjLnx0AWjMvq2TzLimHsLKPGKBJi8NaVMY4TSSGJggCYjI1BNCklSZNilxbVUlL00fthPFktjc01xjwnl9vlapa6FqOOoa8XNeemXM5TTErkCl4tTro29H7wG3SuGkaZOkmzZTkMlDwh0d27fRzGvKrHIUkKQxhS9BYw46wZdsh0slyCpohxtVqdLmZvr949fzqLMTnrGCn4gIiSABQzkzFT5rLZfPbu8v7pxSp24WQ5f3t93+z2jq0fJRWMmE5P5773xCk3WBWWQVMMs5P557/97UPZcOTq6LHwnnQ3eHBQfWA7TBqYg9PGFOdEDx30A/9a9FAMI8Ex2kz04BUwiSTo4Gf0Hl2CQ63+vfBynMAqmZpz33sUpiS1Ken80HEXQeTpUKYAKaUYw/r+/u5+vWuapu32u7btejJMgEtXn83Ovt1+2/QtZ06ZI1ASmFXlfDbf3O+MIpP98MOX3373uVpxRVXVc0tpe39HAAgEye/3g4/+xfMPltV8327W++1sXtd1icxVWfj13dj7JFJVuYgOPjlmjSJRNrtGBYylqfRLKttdS0CzerbbbfsxmLw21o1jr6poSBOhMdaY9f1G/HhxNp/Nlsp4e3NnklirTIZRASRzPLPsgldN119/c/f2lYWYMzuLJrNP6xqdLaqZMVbT2NeuuuZwswOIfdsb61IIMaXY9YyS8ZTGExFxu28yYyVK3w5jslf3rcldL2FeFNakuOucIXPgo5ESdH3IDE+jZcqNUVUmQoWHRLIpNO9AJwKc3E0NIRMy45TibA1nmXWW8yzLs6zIsyxz1lprrWGDExRyGF06DTg4RM3QQSYgaaLKHXkaD03e97LHB/rFBOXoEQE9kucwpQSKKcm769fvbr8FlxzY7aZZLlbL5enp8vybLz5DgHpWI1pA2G3umVzXNZrS5K+kqkwsmkgVFOWhnv8eNnCs6aef/QPQcxr2jKgHsadIOnzmBOCMBY0pSWWMBxnTZPfEqoogCJpZQ6TbtnlyeoEp7Pfrjz993DfrftDHLz7ou2GM+6bZk3E5uXUcnz87Pz8p+26vKSCbrmuYTNsP7Kq8KA1J26w3+xtT5OJmLud230kKyCRos5xOl8uuaceYyFgl5RyL0iCqZbu93QmZ222DbN9t7p4/Paty2/UdL2fzOlevQvHiYrFrhrfr7dM5sXEpibMZYArjwExCblBlwyqRmfzQmx5QjaFcY9SoxvLY9cLWlMWuuzpZrgyXQ98Za9q2O8lmLnO+oMH32WLW3fbN7n427iU7D9GLDmXufABQGfv1LC9ykxkiP4zOUPTeuSShm4YRZbkCTaj5pBE8VI4T+wsRkIBIj85r/8HNPjqyff9OHxynH54Ih3VXHx54AHuOTKMjX0gfXnKIXvoH1334+g8ufxiG/wCdNAh0yIGfeEM8z+x56Ra5C5K6GFKIBAiMKge/IVC1xKh66rIffPD404+ePn/+JHfctN11GFCjDyGmg1zCIDKTQbLMmTGTpTUzFc4YQp7sWZGrIreOM2eMtZlzOZNNHjXF4DUEAwzGGCJAVmMMG0smM1mRMROjREAJEpHU2ZzQZJaLLDdEBJLiKMmnvottozGAKDGZskAEJIOqACnGBBJBgQANGyRmQ8YaYDLWGeOYHZExNmdXcFawdciWXKbGoGEkBQ2ASSFOKusUo0pS79V3pFE1GUNESmSQABiEEiADKkxOZAd8IaEEkAQAB/yXDreWCIgR4jToBECTpJg0JUxJYhJRTZJAQVJCpKRRmAXVucwYR2gmOVbSBKjACIwPQ+lIrhHVhJMzkyiqQAogCUEQZDJrJyQmQctJWRIpWIQpghFFVEQ1RU2UUh9TCCFOYjoiBI2oTESqlAQEcLJNEhUiTpKIEIhh8o4WTaAKFFWSCgKJoECSMPAQOQMKguSALbAFsmwpt5ZdjtYaY6bgjSCCkE3nA1VRUUaSJIAaUzoInvXgQH+AilTk0Ew4CIAnL+pJmXzQd4IUQTW8AAEAAElEQVTiIVhTYHqiqKaUDrdQYWI4HxR6B6JXUlQFZeJpC1NVA4gpMejYbkvLEr1KEkjGWZflQuhjclNUwOEIF5B5dnIeuz3EMY7t0HUigGJQyHLZ+130AbAJBD/+4e81t9+R6Oxk3uy7fmxJbFaw9rJuN30agFL0qSyxH2HXdCBJRPM6kxRUwuKkjEAA7nbdvHp3GYYxc1mMmLzfqUTRoe+R6OLRcr++XyxPX1/eg0Kdu+goQXti3BAj57zxCMy2yppuIMdBwTFB0spJ6djg8OLpwhoecbdamhgyExGy0qK9WJUx2r/+zd9KPxTGM5nVrEoh+rE/XdTbdlflxejXTy+WYz/6PtsK18uzWV2tb+/avn1x8dHVddcP49gOgMbHmLksAViDojLE1AeW3rMDWzglcYxkiRkSpJPTk67dOIuX15fL5cnji8eQUZZVY9fstxvnKPa93/fOcg+iksaoN7v7X5z95OVHn/zm15+xodubzW8++/IP/vgfnz273796/eKjCz+GfvSjj5apa8K769190//kk2erk8UQ7zsfAbTIsjClAhKqYkhCABgDM6lACOKY5HA6kENEoigSqcrUNJs8uiYvRtRpPQcBNWySRI0CSMYYABJJImFKd3ygYT+YOK5OFl5DDFqURb/u6vxCNOw8LN0ZjrHKVjf7jZgkIpffflPOfvzxxe+ezwefLsdseP4M2Wrb7Idmy3U+Blmcv3j06MN33/0acNiN+37snbPjwGNLH/zgDz7+4GI/3t91X1w8OkWKXdcbHpwMs8rEQRcX537bmZDZbDGfnfoITbye19Uim0evmCMBocXQDiarkhdJmlkLRVXOT/oUYRy/u/7lj87KJyfzPM9SwG075lXpAMsiV6V5mX/6kX779asUYlFwWZQKYAyMPmC381BX1enN/RUoUWYsZX5ssqywjNHaEIdxbLb3V9XTp47yf/1n//r3f/jz/+V/8l+++eqXt9vLJuwuni5n1nz+5VfzgqXd5bmdn3/U7C+tcxqFwbb7Pi/MGMcsnyVTMufKmSTuVU8u5gy2vQ+zolqtisub2/Wtb3brs0enSuXubmNptrvvxrY/P1l99u2ri5eneVH2e68jd3v77PnL715/DsT7mxEf53/+y68//kflV7/87MOT8xcvn/1f/4//3T/9z/53xa7d9eMvf31fnzx/XBWv342rhWPGWcHdbhz7uLvtGMy63589qlzBaNQy/fiTx9EHQDTsWM04qmIoKw5BYlJCAqa+GZJJmHB9tz5bnrOlMcT+dkPFIq+z4vHi8ovvjFu8vtqcfrRURRn8m68+//Ds45tXzWyh+flF/KEL1fnT/+R/FQjqq5G++CZ8dbP4ybP4+lZDxLx2mvZf7XUxlGfVfr+9edu/+NGzwpWB/MmFy41RtaJiS87NDFPqutjtese8vd4D0YuX51++ulka84OfPj7/6NUf/s/+yPD+r/6nv/jV3/yWj2cjwxCGQNYkSTLGXVgbUmstA6eYLBtg64f+9Gyx3zZ5no+jdzk3w/757OPd/ltWb8ARsYQU2lgWtcaQOxvGEEc1mdMYMTOqagxbR8SkYja7TdtoXriT07P9djN2Q1BFpizLjMTlvGh2fTsMeeX2bRM1reqaooxDyHIDpOSsMU4ghjDJ1jiGGEV9CMayCr55/a7IZquTIoyegLqmV6N1UYzDmNZbQqs+jikRU+bcOI4GKTfOOEqiqFiVM0UqZmqdW1Szd7ebZginq/kwejK5NZxCsrbIipKUAOS+ffPk5B915jJ3Fkky54a2W54uv/ji1Um1KGZZMc8KSyhks8xTP4wCCELqNbChzFjCKCGm1FHkJEzIxmSOiUl5OY+opP3q5Ny3AYEFUNE6Y6yzRVUOoyfjxhCUkbM8ry1ET7FnYzFYZRn9gMyqDChJw8E6IYUEhKCWOfrohz0gJ1EgzaucJdNBAupu35RVUVaZy6yBCHEoCzaZq+rMsmFGAgFjjCVmO6bRFpnNTJSYIAqHxu+qrGbDmNnM6N3r71LsTV4gkQ9r8cNJteihCV0ffN/168zlCN2mb50lh2m3u53XuWoIIQlJlRuJwTme6lgikhRDSrOZu3wbxUesXFYad+8kRY7QhX6EvLTGGjKkxayIw6gpSoj77bpeLHI6BAEeVGbfpxQdfKQFQQ9xyu9rmam8OLTI9PjD96Y0AAioIogkIAg08dgRj0DRQaT2IE2CI/hzaOPh4V3gQYt2+IUPTzrUPUfJ2xHAkimCBwREVf0Ytne79f266xq0EsUnjZI0RcEdfPf6VT8Ow+hjP/gxWuvMyWoM8f7qrh9CVZWny0c329fbsH68ejb2fdu1i7o0aqpFllLQ3L7+4lsic37yKAxNSmm1WHiBk8Xs5n69qBcpSlI9OVne3d+PIbKlWV3lmVWQvhuAAFH7oatmddOOkKRr9kXhQDTEMAz7usw16d39HRm2zvWdr2tWpyH1t3dJ4+bR8yeL1ard76ImY5GIS2dIYm2hdiZjzoiJwBgqqyLPCjTWWY7ikWRqZi/PToBpSOFuN4AmGjOxNKLGmACjMqlGwKL3XZVXTA4pJqQxRSKQJF07iGgCPTk77Zt9lqk1zFGiTl0plShIzMcT9yQmpANmcyAvwDGDGBGYJhoRGUPWsDFsjMkym2euyIsiz1yWOeessVN9aQxPDtXHMYQP1e+h6/se/ZxAz2NbVxWnQPl/yLWYwosnr+gjy26Kn1UBHcbu7eW37bj2MopI1/tnzz6YV/Xd3Y2SzOfzfvBVWb57+05FY0r7YZz8vUJMx49LQHQkF00I54QNTSE9etT+TJ/jIM2cbLVxcj1DEEIVFVBkPBozqQL4GFR1Vc/uhzFMtUwSY8lZtpZCDKOXF8+ehHEz9rss+2C/a/PZuctc37Rj2zvDXsfepyx35+ePy4yH3c4xxTAiYIxpt21PHj+zJCn5zd3tfHU2iEMsVFTjmOcmJIwKuaHTuurbbQIWxejHCiB0bRz3jtJuH5oxZaapl2cSPEWdL1yVw6YfffQmz765vl0s6zy397tmbl1Z2DGE2Xze7K81aemMyV0CdM4VZbXZrQmSHwebVaoJVGNUBI0aou1Ks6rKk3a/XaxOASSzbt+2GAdTFtm8Ti3jEBfzKvm2v7uqn14ou269zm1NBiXi2I1j2WXWoiKrVREDQ2qujGFml1RwqkuP8Awexhkd7hwxEukD0edACNLvkX6O3Mn3K9ohSvL769vxCUdoRx/W18OieFz94EhSO47/f/DC6Q0cLqLfG/LHzEB9eGiWWVKasFJLlBlalPnMmsKA+kgp5oQhalT1KaWkSRMLGIJFkf3wo8c/+/ST5y+eWGdj392t969eX7+53uy7IU4sGQU5pF2hzWxeFpzlWZ4VzmTOGkQQYTbWsDU2s5lhLDNXWSoNsGpKQWMEUUQgJmYiRpNZAnSMzohBzYwT8ArMRMTGWmutY2KVmKLXHkRB/RCGQcKQggcE5xwAIJkkMQWfQpCkKAKEbCayERIhMpDN2RRkHPEBMDJsjDFsDVqHWU5EikSaQKKigrVERlJgAxIS6MHdD4FBIiHBlDGPE8sqQUpICSUeIiFEIAWUCIqKZtLAPWgYmQwzMGvSpBgAEkiSlFJMAJQkhZgICQQkJSVNKQmKkhMUBZ6EWMwGiAEZYNJDEymxRMQwKblUhabVMA4ESTWJJCYSSYYBDKHJlKyQgaQgSWUiGU3jilWSJBuiCX5UihIFJKkAI6AkTDwhNwe8hRQwCQoCKoggJE0giOhUfAQQZVUkdAfHO6GYUDShBrRgMwZUZCQ25DKT58B2ctvEqY8kgkiTsTgBENIBNNLDhvX/BxUduaf0kGImkzE7IaoAaEoi6aCMP7CHkiqkpAAgEx1pSrA8AEdJ5VDYH+dsAlACtMYYVPEDMQMgsemGkcgI8IM+FWmKlMOYPJDU8yUb2u73cQht049jENHkExlsmj0TQErAUJhi9+by6tuv83KmIfb7wRnOazMS7ZuubwfKTAhp7HpDtnJ51w/NtkUyEcAkGAGVOpNRntGqzlVwlzAvitvrez9GtaQENrODT5c3O0m0vdpYMowYIZ2fVvvBpx6HfT8rzMUH51ebtsgtjH7w4WRZzTLru+FkUSWNCGBil0ZK3s9nq8a342Z78eTFm2/ezBjnC7co5t3IGmW9b4cq5c6Mvul0VxblcL8bwlhUpUU+OzmzOl5+d0mbvlxkZWavrr6JEiR5IiA280W1a7qqLjNDo/eEMK+dmVV9CrfNPmKRpx5iRESOEuNNxnCxPOsx08g3l9er0zmgnj5+7urq9u1byyag5pXpdoOIoHW7IVxfb59+9Pzzz7+SQdjS61dvnlw8+uiD55fvLlH12YePBOC7b2+GEIzjYUhDiJ99+aYu8sxyUonx0LBCACJUEUZCBFHRqJaJUVMKzIiITGbiEBEzgDAxIiVJTDxJKol4csYiIsUkD55Eh+NPmvaGJELHHGRinmxBAUC8N6j9dl9ePDF5/t31u/m5vW6+K88uzldnXbtr2/3TJwtKpjp7HPB8VZ31w5dtv57lELr4tM7u78fb++unsw9P67OQ5Ntv/zp0bbmomczcFd062ijPTi6e1Mv7/dVXV39RLTKjaeyG/a5ZLhdvvvvq7OL5bGYXNftYFMY9fvqDbu2b5ub3/tEP5vXpL//8lx/+/p9Ej0Gx6T07l+du7CbzeFKwjx6/GKJ/+dJI48+qsh9D142FLZqwr2bluO8hqauXbYLzD34yX3w8xDsJrXLIMy4rhK23xlibTFmb/S0Aj92YLTAvy5RCP0ZF8H5UCuXpKs/mY2//4Gf/Au+7v/vLv/vm3d//9Oc/qasnlnloNvPZ0yKTPhZqJDCVq1xTcAhjo9UqA/Qg1hQL5HoYqchz3955GGTMinpVPjrZXd4rpn5Iq7Nza9PXX3w2RPuTH/2YFIehq2c5Flw9Wm5DO2g6e/x8c719/tGjv/z3/27obx7j49//J/9x5er1/Wd1/nG+WBSDvw/df/2//V/DeudK9Zz//qcf/X/+33/18Yvf6T1KgiDi7zoC4kkGZTE31O2DmjhfrvZ37dh3ZHhMaXFSlIXpx87mLiFOKCYRqsRZZUhx2PuL1QmENA7eLIvlqozbsL/crT+7vHj6wfp2WF5UpgIkiJJ+/JMfSkPrr9a8NwawXLj7m28IH7MOr/+H/9tqq+bk5/aneYToTmaQ6+xpmVQu367VADv49Bcvvvj8zcmTZ2yEVZW07/qh56zIidCa0Da+qItylkegmMTNzayyY+P3/f3vf3jy67/41d/8u3c3922xyHdX62kWhCjIPBmfVnXR90O370Ehz3OJIkmtId/1+13TbtuyLFxVADAVi6ZbW2cMkrFWJBRVZkzW9N3QN4v5kq0LaTSWlJgI8qLs2iaGyIoEvCgWhrEfQ8TWKBTzOqQYRrXsJMn19Z0fQ9+PczOvylnfdf0Q+r4PcVyeLPfNHsQQ5ggEDMY6dgaNDt0wq+rtZpMgrmZz68rZbNHyJvrAnImCy1wSGTufFbao8rRvrbVh9CkGsjSMPo7BZsV8We32/X63y50hoH3brU7meekJdTmb7ZthVs02232MqXB517dtaFZnP+h8ICSfhsV8tt425ExZFo8fP6nmFWfgvXeceQCfBMgwqKLkLotJQBKhI8OYeNc1K3tSuKzd74IP1ezU5Xx1d7uYuUerpQ8C1iHhbDYLPjGCQGJrQxxXy2q9awJhAljUeehCZuH8bPXNVRLwUcWhMWhCGAlAU0oijIjMKuLH0ZlMJFlnLLPElIaBjT1bLtt+13ejSgqRd2NnGdshna/qnIHQEFpNqAmKKlvMCmvLIQRFToBVXSFYApkVc0aWoOpF4rhczYLP+8AEiakxlmI6+PZqTIayRb66ur5OIc2MrfJq29ybLI/e7LbNycmFJbNpGuTCshkGD6pEdui7vM5ms2yzXc9WL/phJCA0Zr4qN9/t15utK5fMJsSeHFnhzBo/jlmdGUuzWXUsVfR9DSEKhHqkoasqHmI+jv21Y2EDU/ULeiBxHHeWQyF0MFw4nq5A8HtFzMEt4L2CYqJ24BSUq4oKAqrTIfUYVDvBRtPZUt8jA/rQt0d9YIMjCSAYE5L0Xd/s2uCjQURCURn8eHa6+vvPv+p2HRBawwipG5pu12zQzOaL07MVSNy3beGqws67fVMWzke/OFmNY2uteXd5oyk9f/GCEW73O2Yex1jX1W7fMkLX7sZhyFyGSiKQF/nQDSazXpKP3vuUOWetHUY/m83b9l2z3SBh13ZEhCAiQgKO3KKe7doGmYoiG5u+qvM0+qhQz8p3374qq5m1bI1LElRSHGNpUGLMqmxZ5xhTnjmbWXac58UQY+d7Z01Z5MYUIcYk0Rj35HyeW95ux7FteFaVho2KI3ZkwBGSrOqqD6kLHhi2w+CFV+eLq+t1XWYxxXHoc2Zn86oo9/tWE1R1jUgOY0QNceJGAx864od0VkSYMpKncCVmZkbDaCcykWVnjbXGWuuszfOsyPM8y/I8K4rcZc4YZ9jQJAh4kDEeo6JEBfXAxpjyzqZBdTDLPgzPQ3XyH2gWD0/GaTQLIGrSJJJSWm/u1s2NycAEHpL/4NmHi/mqa7fdvnWuGH2Xmbzd984aEuN9IkRBIqRJYyuH7KMDOAQHGGvCiRT06It8eEfvn6YPljQTwCQCCjjhWapMCBJABZEiysYPyEYTEKjV9Gy22Ox3k/bFAD6+WH73zdcfPb0YupCdVsVi3o1NP/YhBWZQHwzIk5PTeYZ+2DMDEWxuN87R+m6rlNkiD34Yuj2xcyaXhKqx3XWiXC1mzbYJKc7KSlG3TWe41EGbfnNy/hgMrNvGzla/fvVNXs9yCx9+vFqWN7v7zccX1cJaVbvI8t9ebwmK9XqUvrPsPnqUxXFEDClmIDZo9EMwJrm69Pu9yXIBsZnzyYMKomjyY2BWtGxiimnoSlf4vhE8AceKEKP0w1hnubWc52WIo3Pa7eLm7t4udkU599atb67ni4WrSkinw+gx82OCsix0d+ds5uoFJE6qaCtlBBREhkn7MzGFGKbF88hVe8gW04Nq7D2q+TAa4SD0fe/JdcR4HnD0I+ZzpH8eQiHhAUt/sOU6PvM9SHT8fcerTm/isMofpGkPHCeAZW6NEhMgqkFyztaZXRQuM2TZoWjmkyFsvccknpEQGFLF5qPHqx88ffzs8VNB3uz6u9v1b75+983rq8v1rps8sBV4CicHKKzLnXPOuswVee4METMBJFVrjWE2TICaWZuTyQ2wxpRCinFClpnZWGMIjMU8N4xoyRjCzLIhQJQpngvQEk2AQExxjGkkJFACERSZJjYCTXLUFFNMKQ4REiCiQSYCZoPMbJgYDFsyOZvSEBGQYSYABmXCKQRNyShNmyYpkk4QDEYCkDgSBDJCqJomsyacaLkT+VERUFElieAUEgIyGfv7ybIE0AIQkAVJCHz0E0SaiCnMbG0IkRAQBRVBJ1x+yp0TUEmKzJSCR2tTCmwNoSXkQzCFoJCKTkBVUoigQkhTuhIZAkAio4KsfFhsCcE4MA5sJWhRhVKU6BUOihZVkhhSIIoCoKIYJCYBFUiilBIkDyoiHtkBqJIIJiQGVQGNqglUBJVUyIhOPv4wYT0TwAPECUSjZ0S2iVitYbYWrGXrlCZ26/SXQoGESGzJWEY4sIIIIYmIJJUHYef3BGjHlAMBONi+oYIckX05xGNqknSgFglM/YVDbGZK06lp+thH3yMlUgBlRkZmFNTkSEDaLFNNAWMa2oHBxBCK+RKti6HPmEE8cqGiKSWgNJ/N/fodjK1BAo0xjRp9FHHo/OiddXGIo7RP6np7/6aaV+XsbHt7k2dFCAOIDcM6pQBgJJIfR8ZyGANpogRVVgBTFONjUonSYSXFftcawsU8Pz8puhCXy6LrwggwjF6II0gYxY+xzIqyctu29SmWbWqbMYm6yj599mi3a6qMHabTed4PRpIaEdaUMw2JrM0zE1E4dzONaIyUJ7kx3nKmSNudJ46Lk3rf9BkyIAYJzpIkBbbGEMaQRLdt/9nXXzx9dvqLT19+/vkr6VT8iNbImOarehh8PyZ0WV7VIaUQxiqzfvRAUSm6svhw9Wi9a2JKdZlpkhC09cNooHvz6snZE03JiuTOrZv7bL6arR6Dmtur12Vdth5kbDUKWBm78Pbt209+/IOXP3vxq796bcVsL/ef/d1nj//5H3zw/Okvf/3Fanby/MPzru+u7/YE1oMkH/b7rmn6WV064yRFQDSGY4xRVFQMT3mmgoyKKpoIMWmybI/uDCAiSDq14Aj54YhBQEnjtGQwW9V07JNhSklU6UCOxQlDBTx6TAAAABEDaVVVy1XVx7W1yYGb52ft/r43NiuLx2enFFs2TsQ+uTi5evOr7v7d2cvqq2++Tgmu3v4mxvGTHz0S7L764uuz1dP5rBjd/Pp+Xc9QLe7TTclwuvwh5xJjmi1PszxudrfMpqgyb/jk5MU8r9ebVvd22Gm2WN1e9U+WZRZnN1/fbavm5ZOPZkVNEXb3LecuKLAtADwbUMShTzZ3Dm2ZW1Mma+zYjJmzgppI+qGLUWIfssyllDbtPjPuxYsfDn2z2226/a5rfGZM1Dj40Rm7uljt1pd1no+xqWwOiM653f0mc3YxX/Tr3hfd2+++/Gd/8k/+/m/+5hf/+Hc/2X6wWbdf3b57Miu3r7vlyUVd5S4Tid1ufWfZuIxT6Gx1npIHDKHf+iQGgYucy7LOoMoyjcP68qaaxSy3tjh5Mb/YDveuyn766e82W6nL+s3bb+uqni9W3Rjnq/PLL19X9WmMEUA45/OLxcuPf/j23c2r797++JPi058+XubUrMOf/g9/9V/9b36B5/rt//fPymenbZXOGP7z/+L3rr57ffbkLJ/lb692Ls+yChRjXVfDbrw4n715+109XybvU8yABy54VZbjOO7GQAoWFVOSJEgmeSFCwzh2g2V2y3z/TRuCOlaxSStz/vsvu8sOEf72b/7m9/7kD22GwEyZCKA9yR//+OMq6p//9//nP/rjf/rE0t2//Ffd/f35hz8v/uSPegU5hdhETrD95tIVBc2q+aNH9WLx5Wev9s3VvF59+evLjz45a3dtdVKsVrMQQdTe3d4/fTRjgjCoVqAary5viuxx6Uy9zJum+cMfXvz1rz6/32R3G5dlJ/XiMAuKPB99jNHHILkz0+5tjUkxMLGzqKT1ok4xWFsysk9DCFCXVRe2jIZYm92uKmtjsNeQWeNsbYGbENiAks7yXFXGYV/mGSKPflBNgNCPqagqSZEAIEUQVB+H1Hd9N1/UzuXWlRmjD+PkUk/oCMh3PK8eg/ZDPyJQ37dlVTrOksQ8d6xpVmaAdlZVu33XNJsxhNls5kdf5q7ZtZKSYbPrurI+I2YkHAbf9f18PgMAssblBlCUkjFcukwNd92QI3MCP3SBKHd2Xs5DHOPQMzpEWW9vz+Znqb10NougIJBlWZfGJPH00ZJYQaXIagIrYxRFxy6aUQUVUpZlIImNiSmWWcaSwtAy+fnMtr34BMQ8W55KCDGkoQ9dP8xmNSo7AyGMhtChOV0u7rfNLDchYPDJICXQqrDzKhPEzLpgTIpRJIqKcbkEPynKSTVJ1BSYaXL1sJa9iKbkkwxxWCxqEcOE4pOGVMyrvHKqAlFkTJCb2WJui5KsMSZjMkVu83weYiCwSdGgYc5Io2CsCqf9AC6/X6/DGC0ZFqqyuYUhJhm70RomcvtuA5LKzHmfmt1WVLebnRKxrW1eGiYFnvpvyARs/JiqukwyPDo7+fsvXp09/jCva4dtQPU+lAYF0n4cTpbzlMax91VuTxf13eXaGKJq8lo9VBKTLIdoalse8SBVVHxfuRxa2nooX0X0vZjsQeJzqMQnHsUBfnpgEh2qpXQsUA4VL9AkZAMAFIl4JIpMu5VIeqiwJlrI+268Tp3AyYgmqUISUYCYdAwxgjZNu2+bdtcE7wUhjl4VjMkub9b7fUOqBETWMsH+/p6sTYpks96Pg0afMLPZ/eZGGcqsJgy77W1WuM0Om66r6vrps+ftZs1MbMzqZJ6CECAhtcMAxMaaduhiDIzETEU9i2EUOYSweB8AiaKS+HH0WVkyAhWm2zZ5ciGMmMHZxUm4ij4qEWZl8ezJYzb026++0THkRTGMvYrLc6MKPsZIxpZZnbFh9cEXbC1nMRGIVcrYoAhEUQRSMmyIgGnflFnhTsg3Q7fvozF1kecSJEoUBKIMnWHQYUiIhLztwqgA1gPrMI7VbJ6AhjGEJGerxYsX8+vbuygxDMmVFoEIkJhFEiKCymRWjYhMUwUHPFEDaCLemQNClFlnjDXWZc4ak2fWuSzPnLU2y5xx1jlneIpVmpg1B3RlGkuIB3f2g84AjzS1g5wHjsgjAR3y844Fu8KBpSaKE+9bJ66aT+n67mq9vQ9hTGOCEU+fnN3dXavE5dni7m5TLRaQUkqxqGYxc9tuIxoJafRdZjNnzOB9msoNFZzyfhgRMIkA0AHbPCoe3tf6xxkFAKoJJv7VdM5DTKpTRq5lFgWv5AEkpaTJkM6c7f0wpohi+jGczecxtO3QJYF6viiruYpE7/3QG2sAIaYmI7uoHKMnlczAbrNF0MHHu9vbpy8+QpHofeh75zJmjG0ffYAxMVvRhApGta4XklLb7h+dPgWwApl1OSTT9zLmWcqXXRwu181PxljnvB6aPi3ni7N3310nn37yw2e//vIdEw9E74bxapPOn1Yh9V2/r8pF222TH3noaCacZTqy+EhOiJJoYnYpUhhFJdgsjyG0zV1dn950PkxpDH5wmbu5e1fNajRkCk6hNIjLM9jcbLu71848XZws3n1945sdagohSApZXtrCxnGr4AzOQXNJDhI5ZxFYAOhAGsBDMXqQnh3/O5KBHuDuQxv3sJpO4KUcCEf/kA6ED9Kw7zmvT2smHNBDOji6PMjW3r9cH+AoeA8SPTwwDe7j+vlQUQAAwMW8dMigyRmyzIYpz4vasSMYx5CjLQa0KgXajnjnoxexhpaL8unF2WJWYBq0x269/uarV9+8uXyzXo9R9NiZIUSGScXlnDXO0KwsrOPcZQqQEVGe5UVmGK1xYKjMc4eJ00CYFEFQnLOkhMYZa3NjsgwLq4Ri2LEKEyB4jUIGUUGJBNBHMWgBAyooGFDSlEQ0hkGSpkkLJDKlK2pkA2QsGFJj2VhGImQ0Njc2By6YeHrMMBIj49HVB0nZHFm3QminGHWQADJQGlA8QCICQYOcHZDElEATiJCqTv7ZGlC8ikBUVFGJqIIgqgMqIBomFDWohEpEaCxHRU6qItY6VU8JNSWEyZNkAhZFVVIKRAaIgIEYFVQgIjgVAQERwgnP0zRRgBEm9XciZGAGPOgSgRCZFBSZFQ2SFcqImCRiRDUIhMAmkUFFioFG4jFCDBIVIKAmBU0hakwxeQSBQ0CZJBRhVsKJFREA5RA7l2DqHcEEt7OqILMhTgrErIbJGiA8EKCn6YgTWdUw8mTsmwAQiQwbZiJSQFVhRFZBzCb+xjQLjqwixSOteQJ6CY/8QZWkMlGSQETlGCKV5KgdnN6tyIGWOnGRAETBEE+uw4jKhibkTZKH5AEFMalKSqrEJq+Ac4GkqACi4kEzIEbEql74fuiaxihkeXk5tClFRLaZYc5RMCURpbpcadT9bvv0wyc+jX30gWBU5MBBuZrNUmx631a56ftg87IPIwA4a0Qgd/UgfdREaGL0ZVUK4roJKXZFkRvjqpnLyJh2uNt2wxBs4UqX+873AbKsyMn5Ifox1KuTFmnTDY6gMnSxLDab4evb9cc/+NFutysXbrfr1cnLD37468/+/ZPTJyb219fXi0VWz4vtdm0Ms3Xr3SAS1VgsSu97g9wP3fm8SMko6L5tmIxIOj2ZAZm3l9uI+bNPfuT79s3VlSiOChIB2ICTdhhQ1BDGGOZlVc8zl9txlKbtkIUkxphi5rKsCMMIqsTY7fydbJ49exLDsNvfVvOZxGjLs6cfXfTRr998xYjOUQL2SYoia5q+a3YvPnz261++k6DW2ru7zW//7vMPfvx0/irf3N3PTk/mJ6t165vt4LLMAw/es6Fd6wGJmUAwSiK2IEKg05ENFZlRVYgMIdJxMxAVYpoyXgVEUiJCBk6SEEAEVBMhiQoIEJJoAlEBNcyTOTtP1nrHbp5qOlDCATbrrpov2dnb2x32PWjs223JTmPohtYu8iJ3kuLt7bAfwPObxcksN1XTjHm2CGC4npPx+53von/58e+9fftVUchmGDptKJo8K5b16aP5h6vZ6v7m3X68C7H1oe18E4JPFqwtHBlVrEze3Xfz+mQMOObhRx+eX/9Pb+bnF10TsK6G4FNQUzlhBc8hpaCeVFXAGnRkF8tls9sIZU0X2LqokqQXTT4AIdgs5yK3KGHcjOPu3Zvd4uTZ/OQHReHvt+8ijMbh1qvZtYu6TAmBCBGG4Jm4H4LlbGi3MCbttyqNYfPvf/PN85dPry6//bvf/PZ89WyzkZ9+/MiC9Jt2ux/2MRSZiVzl5bzpGjal47Rvrk3GARIEtkXF1jT7e8Zk0LTt8OmPfn5/v3FVHSPmFtBHIhcgdmHgAVePnhrEsWu//uK3L15+dLqqiuXiuzeX/+SPfrZvx7/4f/3V7/7Of/Pn//qvf/I7P59fVMW7d9dvX51/8GG7vhy/+fsP3Aevv/vl2T/9r7U8+4t/8xkq1g723eb8w7Oc7Pq+yWoma/ouBBGw6fnHLxiojymyPz2tej+SFTttgQIh6gQvxqSIZvRRYgxDcobiOkBuqpXBSgOksU0Dh1AFY+H3/8vfBRfA5rEdU4h3b9cG7On5+dDHn/6zf46nq9t/+2/P/vhfnF7f7q72eTX02LUDLp7Vuguc1zwrlSg3tSf54Bcv/DC0++7xR0u2XJ1WzqDN+Op6vTxZPXm+Qkg5snGubUaR9PzZWV5kV683xrnQyV/86d8uH5+u5qvq4se//dt/4/tumgX7/R6JqrJISZANIzLROPogcVZWFqCPfr5YxJS+ff3OM7vM9m0/K9CkDEnzqoxjCiBjAD/6LHOANsaoEk8vLoahA9C+b7KsTOKJWDGqMhurSF7FkEFO3TjU9UmeyTgMJ+eLcQyCISUJAxjLox+F1eWZD2HX7tXvz+eVCBBpXldRYte3y+XJOIy+b0OMTdf7AEjgnEHm7W4/9mORM5EtZwWCUMi6vk8xbJq7er6sqqLIMj9EtkZF1rcbznNEQmeVxGaubVpGU82WIQZBv27v/dijaITUR2+MYYR93+fGZLVNKmPwCq4LuKxL3zeGLJNTZDCacdn1O51E6CpJlZnZFD6NqELkvEb1mJeZtZlAIEDDzM7OTHa3eZ3XM8POjyHJkDlrjBHEsqp2uw6BVAOBDF1X5hYgPjlZ/OWvvuG5JYUxhMlVB4EBDU4ZrAoMSAgqSVSDaoheROqsZsSIDJAVeT52zcmydo6BeT5zlHzfj7OqcgRguK5WTdpHQSZmkytmyBAkjNEbQxQxxWgtI9lmDOPQB98XLmv2LTvMS3v39t3Y92We910YQ1TtYxSDcYzj4D2xIWYBFNC8dv3QpuTLctl1bTUrs9xtr9e2tKBoDC9ni7/8zec//uELAzCry5vb/vHpyRpls0/rfvjg0ZmmkZAX83JsB2Tqm/7kdHk8ER3IC4d8pWOmmB6kOvhe/6ATdYPex0mBytRJI5q4qwgIKEeQ6H2T+1gh4YETpKoHu8mpmSdwFAsdf/UxTO2BgvS+FjpUWQ+EKAU5FFGgktQPYbfd392t79fbMSVBEiRmnJhPlvHq8h2oIGGIKUfqmr4os7LMh73vu/3jD0/yWeGFK5vdrt+dXpy3bZdRNAC3m+1mfZdCePrs6dC2bdvMVrX3mplyv2vv1vfnp6uul4GjzcrdZnOymN/e75zh2DVj8CkEyyRH3KRtt0hGUZLIMIzlrKyyjBFtnu27gY15enF+fbvtfM/Wfrve/8Hv/rEU9btv3+z3TVkWvQ9h0y6LLDcOUEUgRbGFs0B1WZfVbBRFBlAIwzDdpZQghmCshaSGMlQFDPOZS8TXMYyii8y1w96Ra0bPgLkwRiWjxGY1K682bfIpz3JjcbtvHDMiZc40+y7L86cvnr97/dqnmCKyYQAB1RSngpZoqqomMxIiZ5iZnDXMbA075iyzzrk8d9Y6a43LHDM7eygsjTHWWjaGmJCmynriUk9GWUem2mHATNbUD/KcSc3zfmgdivMDiHQYYyKCdLBzTKIiCQCDwH4cr7cbtDSsowZ6+uLDXbMdgy8q13StJrDo+tQZmxtUHweRyAoxxcw6Z9wQQpL0HlBFUoWJJw4gBjFhiknkgHpNn+SIFbynqMA/+Cg62SxRSkoguXW99ykpADIxqszK5dX+HtGGyM7Scpa/ffOtyni2qqt6NsZokmdAUEiixCRIVV1WDinFOHjVEGPatbu2H7KyUpTt+r6uq2Y3PHp2qkE295uqyvMqG5pehyASy5ktKu58b6qcc2584/JKkYIfzs5Xn13dIIZEsh7Tm6u7p0/Or262m50vq/rFM/hu14pvPz51MYrLqi7Z7+7a5ws3y1gFU0psjGAMaeS+Rc7F5Dafd8MNApIoAo0JogbRSINXziF3Iepy9bjt+9IZbdt6UUnI+mZk4zkvsrqQxhPnRQ1JY9vs5tXy7MnL7dUrwY6AleztzV3p7qtVNVs+9+rBj9aQzebIJRAhJFAATUAOCOV9JNlRsQIHK/UH1Zh+b7g90MuOFkV6oBfpwQfuCPLoQapyhMoP4+E/SE/7/s/fI0ZH2uWDUE3xYB93iGcCIDgw7AAA4KzKHBkmMIy5YWuJjXEErFqwyQctLFQOdwPvhphaUJ+QlKwblXqg7b7Z3a9fvXr35dvrN+vdCAjEjpmYJqvlqQ1AzNa5oqxcVhAbYgMglk1ZFa7MijIzNifCAtSF3mmEmDQlIiJEZgY2ztnCuSIjy+lQ4ohqSjKZ1klgQaQAKRHYFJKGRMiWnEqaWCASRJVSwpimPmhUUQYmQkNgHBMpMhMzW0s2A+OIEDEBAKJBRjRMmUVmYJq8fpQMIirQhBZAiqgRUkRIkyUyIhNYIVYyognZcxwVBCbNGghogNhJEkmCklQTIouIKCVVwaioSoUoALoprxGIiA2mxIZYCAMiABOrJkAUBlXUKIYMAjIzEiiIkggIQJxwmZjQEDEZIlCCwyKIk5sSo7HAloxF5AMiA9PARgRgTKAJUdGQolMCJQvEIoqQIIICiIjGCClq6A5yLxXBCWFJADiBL5O1ikBS1kR80KCrqCRgBNBpuE423EhsiCZhLyEyMRKrTniXYEpM03HmwL9DBGMJifiAE6kIIAMZY5BUCY85GUdW0XsSNT6cP6bRowKgGpNMW0WUBKoymVcrTEjRdNY5OBUJAikiMDERISEhGMOMysQggRGRDMQ+RumbTidCblYXeR3TXkIqsiwlEtWkUYiL4sSPO8OGVIIfs8KNvW+aLstyQhxCKsocUJBl1+5BODTx7v6mHX1dLClw1zSgAM7mtZNmIJQEvvOjYxOVUhy99zEkR1hlMxDZjZvaQoqgys5YRN73vSJY6yqrkHN5fvH1zV1QVRKVqEiavMhYWzPL7aLIUaCwRfT317urMl+UBUjc1YVud5sXz56u95tu992j05qxVfJ1kfsh3QxrTYJR3UzrjAQppTFFPDlZ7va7ajZHS/3oIQyYxDqmKHEYiN04hHevrwFuz1fFxaqwubu+TRqjte7x+fxusx7bAQ2Tyd7drF3mVovSsFksch/8ybJs2nEck0pIUWMYy7I+qYv7+32xbrmQ8T48yYsQ95gtzj758QcZt9t3JmG/Hfo4ahTOeL3vLl9fXnz8w/r8YvvuGhXGJr3+9vLi+cWHL178j6/+UsmeXazGFO53r4euB0VnTZQkEtk4IBJRVSZi5oPYEVUQgNkCoiEDqoicVBDAGCNHW6ypeaAACYQOpyNlooM8UnTiNKoqE097C7M5JngqESMpk50gcACIaqqTZ5XJTytzte9Wq9NuXPdtoNhWpx8kP+zbER3NF1mE0GzfqWxJZNiHxeq0qk+W58uvP/sN7bLzxYu4bzT5sR26sZvPXvhwm5QZa/D1bktv3t4uP6r3m3VkHJETSJ0vy3J2efWutIs8z1PwL374u59/8be63f3Z/+P/KQN8NDs3zs7PTrLSbG/3SSMkMIpxCEDI1vjBF3lGQGnwJJZNFdBH9e3uDhirYg4KRKBWEqEqF/Wi3Qxju7ltNwjl2aMfnT954UzX7zd1RdD36vs8r9u2EYhZUShiGL1RqGbZrm/cfKQP3Mcff/LFV+3f//bt8mL+4z/4nWGL/9Hjiy/efCbkF3WVPIIxXLpZ7gyMbj7zfkzt3lQnYdhltnJuJokJKDPcN2MIaV5fvHm9OX30aHu/HXtvTV4vao+hD/3Zo5Pvvnx7fv7Bzq+fP3n27HnOhInyrjPvXrfb2/V6t/+T//l/PkJ69uL5kMx337ybl/yv/9Wf/snL/+a8sN/+9//Sf/f6ySlB/zVafPbxB6+/ujt7VJIzfj9qJqia24KZNXqIYezAFUmBUDErrU9RAL0PKpJZEkyiEJNUeWkMdINvBr8s66IoLOPmshubQTfJizz+4SOVttnui8olwKKo+873G48g8+Wi3yYYe9V2fXM1+u3669vi5KP7v/5198WfLbLluzf/49P/xX+1Ge+5fi7dmNcnorq7vjt9cjoMyc1tPi9daYLH5n5cntR3680Y0sX5ygdh5tB7k1lJoSwdRkwoPobTx4uQpFjY3//jT3hxJlCVj3G3+2z9LkyzoCjyGA7Wk5KSQJKkCloWZZSEAppk394J0NnpTFICoEfnp6Hr2DAhppicM2zQkKmryo99XmWR1MfQ7vfeD6yUxNgyH3ynqlVRhTEWWb0fmjj6JGLI1NUsJN8NTWmsZYaMDWdN2/lxcNnsdLFomg5AFycn95tNkZkwjqopBrRsSGkcfd8P3X5dlPUQo7Gm63pjiQwSGtXkrA0htu3mo/rFbrdVI1VdJyrbbnu3b06KUyXXdruyLMqySEPuOL8P2/V6t1jNSZPLjYhaZ0ykXdftul1e5HmR931DhE/OT8Pmbe5mRvtow9AOqHpaLzf7vWMLWeF9hKRIqgACGuOoYAHFh1i5WZRo1KABxFTUdezuXJ6JRtWAonEEtkaT7vx+sZwpptyxECGQM84QRulQNc/yXRsQsZ5Xgx+IkC2ys8YZm+fYd8ABNaWYJIUpzhSRiA1IQgCmTDH6GA0ZY4z3oyUq6lm7vc5tfrLKKW1nxWIXwhBSkdVnyxPD0g59Nu77kM1Pzyxk1pRJdN+1ljnL3bIoVBFSrN08alhvbzeba0o4r+d90+e5dZbD2CaNYwxM2jb3bGvnihgaY0CBvLBAmpW1dWxru1gUbdsTOEPO+52GiEzMZK3rhsYZ8+FHj77481+OzYWxFkBS0EGjMxCHPo6lc6f9rrEqWV6WdSGQ2qarZuVDTXFwdZnCwyfuwpGdMXXUpkbdg62miCIqHrK4j3DNVNhMrXPRh73mKAiaLiVTIfxwXZwyg6bQhiOX9v1JbeJ5wDGhCmg6ecGhFa8HgZuCoopqjElBU/T7/fbu5nq/23Zd62MAhFk92+3bFKOKjN7HOOGDKQvBFS6JDmMUjOxouTjtN3v0JGp8N8yrxW7YxiB+lDDG27tNVZYvP3h+fX1dlOVivlrf7iSK92Ne5mwopZZRvQ+ZzULwiMoWu75JKZW5G3xAYiZkwBi8qlpLRJB8qqyLOEgMWZUZ5r7vCPFnP/zo81dfkLH7/fh3v/yLn/z0o4vF8rtvvkkSttuGWL0kS4KiUWIUVjTGYdKRuGKj/TAYyCV0Np+llFASMzJj13aCUlZ1s+ursvaxyVPY9v3I+Wo5s5QS2iCQsa2Wi957ZsjrvAths++bJqJlUdh3vXW2ntUIcLfZd8Evl/PS2uC9ijBRSgmZUBMq0AHhIURkRJpknEzWcuZsZl2RZ865LHcuy6bUM2PYWmOMYWYistYS0ZGhdmClKQCoCAnAFHcsBPQ9w5fD6UmPICY+UHWOGqHpKyVBZSBQkQmO0aTAmFLaN+v79Zswtuo9YAbi7u9fnT190rfbpmvrugbQMEZjXAg9oZb1bAwjRyyrOsUQhx4fsDJCnUw5AFIUZmZmiCAgogmJDu6yE3AK+sCJOqhE3tcuCABMGGNyTF5AARlRJCEgEt51O1FAID/GwqJzZn27e7GYvXz2VElGH2c2G8YkKgYojTGjospLA6LBS/TIZtt0by8vT1cXCWXo2pOz5fZ+4xMgAgrudl05m7NxSZrMuW3X5kWd2+Jufc/WWUsxBuuKmBRVlnXRf37NhW182A/6t1+8efHkFydVbLaNddkHL8/+7Wdf3a/3s+XyzZdX1TKP4xgK9rHKl2XTdJayPMvJpCGMMuztzAVns6Le79cG2BZVt98aa8ch5s6lGAl99B5wLOql324zImvzruuHkEIcXGzFq8mK5DONUs1nKY1haPbry/nqWdGe3Fy/evz0WWGLm+v1KLxSHnb3aE8QeypKV5ZgrCpgVIQEkgBk8mSHg9P/lK5+jPrTI0dIj5qz74tmDy85yiUPqM/hR99XrB1v+pEDdKSbHYrgw/P0ey9ROFA1j792mhcH3yJFeLChO+rkAM4XFSMZRsOQERlWZjaICMqgsaDBh7G0RTMUFonwrgtdCGFor26uUMXE2HfN26v7y13bxyRsDBAhW2ZknUKbXW7RMlt3kJGxIUQmmxVZUZZZXS2XtbXOgBjfs4+xH5JMCgo3rV3G2bKqZpnLLDJFnPINgiYFVGRiZCQShKCSEAzJBBhwpEAAE8xHqCFiCOCjN6CokQgNGyYkRmYio8REzhpXsMmZmEEZIpMxltlatgYnXQgZRD5SuSYYY1qhCIiBmDgjSDCRhlCIOE14SgSViJJAAkx3RkSTqkRISWIE8QkQ0Ag4FRVIgKI4CkTFCGBECJSYSdihgBgwrIYBVFBT0gikgsKGJkfCgwSQSAEsG0QR8VMoXYpBLIiCIishKhKTEiAzWQvGocmBWZGnSEtUxeApBpQEKMoGiImdEE20NU5B46BjK0Ofhi71Qwx+Wl3pkE4LoJokgCIIgQIKgqookjISACYCnQLYEAyQTYCAjDidA5VJiVEIURUkaYpKrCKkyqqcBDEhmYmUNLW66NjimqSAk7+Y0uRuRf8AKjoswIdvJnoz6sHGDkUBYbLvlpRkajUcEFtREcVJuaagoHQIIBciBhREMhNBDCWJMuhkO9P3MYn4mJyxmpSLjDJroyOsmFQRQBBUM1f5MI7jUBZljDpfrq7vTYgCxDFGooEcZot5GNrtvhvH4fGjD7yPzb4R5cAJlIrMxUmSmLEMgAkKazQGZnSMpijW24QCmSmarjEO8jJndmPX9e1+sahsnhkyAOSAq7og4FFiVbi286cXM9/7/W6MoLPSzWfldnd/6s66LvQSNMWcKj94JOmHLrdZWRQpRAem2w4CqQueEQ3bEFKR1wO0yfvrq3tburIs1LCEtG1uxnEUYiyyPC8mrLGuqu16Y9hhVf/s01/sri93d+vRh6bZk2VGnVVV242b9dYyuXnVND0pZbkNKbXbzpisnmWIOYhWZR0NRB/YAdsiiqR+gJTaprmYnSDC0A6ri5Oha+7u70/Pn330gx+/+/ozNJxn2dD3QmLQvnv1+vmnP/qP/ujn/+b//mepj9GHfeu/+/ztyx+8PD+5eHt9n7XNPDOPFvWby3sglsMIZEI0iOwsGlZAFSGYFrAEiJMBvSYxhhhJD4sfqB40mcyGAAUEVADIkEkSaWILIeqRhm3IJEiHVBo8kPydtYBAhMYwHCfDk2dnN9fXj17+MEPy++H8xXKzv8tsiSjsMooI41588n17cTLf3O9A0vnZ88t0b8qqi/3ff/7KN/2T4uJicfLZ69f14gSNV8hPZ+fjoGMMzW63+OgUjbl4Wm/am7C7b1MoXSnoi0jStB9ePPXdmFWFsXB/9aVs7ubz6vFHH91d9ndvt+cXF1bd0MYQAJF9HJjN0A8uL1DQGsvMEgQB5uXsdruzzhEyM2W2hEiiaiwljSAgCdiYrCzARkc6bvrt288C0/mjOvVDiIIhZVXx6OmTN29exSSCiohgTbttMiuWi+522H23BRqWUX/+B59+8+rbwlbffPUbo35+Mn+7ix988vT/x9Z/9Vq2ZWli2DDTLLfNceGvzbxpqyozq7pLZLdE1yAgEAKoFwJ8EAQC+hF60ZN+gPQoQe9qAQIhiRIo0El07cju6q7q6syqzJvXRdwwx2+3zHRj6GHtE/c2oR0IIE6cffZe55y55hzjG5+xKt++uRr74CUViUnHurJjCtWi81WVDgckCyoxDYfY19V6uXC77dXVNvrF48Xy4nwNU3//5t3ri2cn/V1vFvzoxWNLdiimGAaDF08Xh20Ak37yg2euWX7+D//q57/66e27O2vkbO2/fXN4etL94Bd/LFXz7OmH/X/9jyk5a9fjf/8ufHZ29jee1hfPK+3LFLByg6a68qRl2I3dwp6ctFPqXe1jTs2qFgYAIUVCBpBwGKrOzClHMQUQVNG28UlkirGuDdp8/mzZ34ayy5vL4iohUVIQKQUzGWBrracUJGzS7vpNXX1w+uEzfPai7B0V95df/be//Nf+jnn1Nv3+C/gXv/MXipft/otvm5//TTi1Duj1r7+IYoeSH394GsbedytHdhzTyUmHINM0satvr3Z1be7u9k3nOePrb6+s94tV23RV2I79fmzr9j/6v/63Vzf7L17+xbi9nfbDQ+mFbDDFDAIhpaqu99Ngkb0iIRUtjW8YKMbJWjPGyBZQhewcalnSYWRgazBOYRwna2h/OBC5yvlpnJpVU2IextKnoeTQ+q4kkZKncDCEhj0xpxTHFOuqXpuzMO5z0WmKKQ+Vc23b5CK7POaUcylkQ5KpgaX3LXNIKXlr0LIpEEMExaEfXOObrr26un508mR32JecoEjTtL6t8Xbz1bdvL1aL2tury5tF23bt+qTqQgxM+dGjdS4SUjxMA+bJV06TxjGM49hUpm7qzf29t5VzntiIShZBYoKCGnOe2BhJBMhsrU7FkYM0hUOPlgyilpJzNs6VkpGMdz6nRGBQ2RubcshS2BkRaesuZiFEY72KMgJaO06BCLzvztaP7rZvyHoVFOCcEgMT6qKt91PW/WCd3457LSbkiDE+Wa2mElQUFWYhDOHscIIAWFKyhlQ15xFpnroKIrKxIpBidGQ0pTKqq8lb82hRWe8EbU6xssgkKU2H+6vFumuqSgHZe0U2rIaQULMkZEplfxg3w7CrfcNJpuHQD8NJV2OS2+ub3WZjqQDj7KgQ8ySax1As28ob533tKq5sVdv+sMPsASVnnVJGMjGMgmqJLNoYc2X0xx8+vdtc+vPHOOaTVXdzeVUAPZur29uPH68aX5GWYXcwyJnIGLu9331XEeH7TmXOM0Y5EnyOsc7vzTP0fRtzdIOUh5A0enDFhoehN7y3XT1Gmj9MxeH9Q48MpgfbjYdn4dEY+wEL0CNPRMuR0KSgs2f2nE8NIjrjvJBS2e72m+1me9iMcYpTssyA1A9DjCmlXIYJmeZRo2XKKRnCnHIqua5817b7+x1C6eqL+2H75PnT7W4DqM3SDwPmm8Eb+umPfnS7vRvDeHHxZByTs5zzsdwcx5hCAAVvzZjiMI4KGlPCopX3JRdGrJwB0SS5KOZcqsqLAhGmlL3hzWFfeTKIKef9/fZlTqumSkmqRT2Gwze/+2v29bKpAWoqEnJIIXhvjcEiEiOMQ6jZFYKUEnvf1J2ZyVsChkxK0VUViIKUGIJlw2ydQ9btScOpuH4aV41L0/Ds0eNtPyAjMi3qhkRDGk/XrRYsMg0l17WdjR6Gfly0bWWri7be3V43tcGmPeymkDITQoHZnPmBIFFACUTnUA6i2aKIDRtrrHe+8t55550jw8aQsWbGU+jIZpsXkwogqM7VDCHNDLjjCjquuveco9mrERUKKsDRYOv76i4AAGBFUDmmUhc0LCAppJDi0G9COEjJvjKrevn23TdElMd0c3m1PrloFt3u5r72dVVXY1+mCWIote+adZtKudtvGJCYj/O9WfJgWASsZWtnOQtIKgAgRWYD7ALlO8mQvlcvzR/Mfa8gAAsQqDc2vVd6AlhAVR1zISScuzKEErMp+NFHH9qm3Y5T07WSJYwToBHVGENlrSEAkGkcDUAM4+XV61Iog4nT/mTd4VRu316vHp0jw2G33+7vVlOHmod4cNYZbxf1Ih6GFLKtbcgxS2q9JaZSJEWIWfdj2E1hDPAuxa+/uf3Rs/PKyLvb2y5Wv/rs0//4H/3V5aRmvT4Mh5KL+upuPz09bSvfiWoqyXsrUXKcTOiZHBCiaixJ4iggWoozNpdkjQdRiVHJSUqVbQC0AALiZnfXVk27XmsWYWQ2kFkR/WKZN/uwuxlMsz57EsL0+vW35yePa9/t7252VVUviGFy9gKIC6iUyKae5epHVaAKEhXRo33W97gI7+GiB7jnuDniETr6jm30gOfgQysL3yGDD+RLfPjMvFfD8Snv3bkeOGc4M5HwATPS48UcXbFEQYGOUJZ+7zZY1paJCMEaskykxRpjCFTFEKhQXXFMpXK0GFLjcmt5HzmXQjHsb25KiGEK28OQRK01glzbY8oZgJrKMCEZ9s4gKh91dOiM67q2srxaLrvTk0VXUxGjRTVLoTz/mNgxkTFkDCyWTV1VtXPOAEIscVItcnwxAqI5ek6kKAhKBiQmnhkscrSQySogRSGX2dLZEjIig8zPFJSZk0LGI1eEyKAEMyJLwASGwVplUiyIRTUDoIigzsrVgkVkVk0RK6Mqqcy/lAJAdByU0JEZhAqIcoS7QSVrTppEypwff4z1nKkswgmIFEXQATgtAGiO3zMgGjYqIAJImgsoGiZUEAJkY9jALHsys/B+3mCkFDUGRAWYBY+bLZgZ0bfABo0FJiWa91yEAiVhySpZVZBJCJGNKKqIakYRzUmnoYxTmkIcYymiUhBJRYkMIIMmgFJASy5aRAFFCyDMys7ZpEm1KCAZVgEiYmJkEJxhHUJEQUBmmPPEcfZNB1UpOQGi5jmIVXGeVAAoaJaMDxXPTO48nh0P96h5uFfnIwL0iAGhzjGuoiJ5truaIaqjIaPCLEGbAbeiCEqigg9xDjOliIkI1aAyAapawpJyyimOMQdCNNYulJg9cdeY2snoDGSVMh+i1hhXuTj1vqpN21Q1g5jhgClASmCtUxEmBYAYc87i/UmJeH13u99N1rgc941vu669urmvsTHWLNdn43brAdma+0Nk5BzHqnEmQs4jcK7arrb+9nanpRhb9wn7u33tvGo2QGMsh2kEw7U1ruVpv7XkG+f8otlO4y5GVzd3d7sYi3PWkK0ru91tl21rvLVIHisLtN3v2kWnjJrR+rq2bnP7puJ1STKGsfV1CGkUkZSNMatlt6gX9ze7+xjIpvOT1eGwTXE0bEuC/f2ey1fTYcCSIQuhTiEUkH5IzlaplKZxdevP2uW0CxPF9aI63B5CkEbskPYAjJRTwW7RhTI5Y/p+8o79cv3mdnO6WscQpzy47gy9Pdxfr0+XH/3k59vrN+0iq4N+KszYts3m7lDi7uLxBTHmIReBMOVv31zWi+7nf/qLN//Zf9sPk6v56YuLw5i2Q4gxIxmHnEsiJjaADg0ZzVlyRiImFiBC1iLfNz08sutV51TE40BMhMiISC5y5LWqIiEiiSgqChYinIMHRcsx7xiE5rxHNu9R0qbh+nbf799i1zVnq/vrfaPtadUuT5syjVNKbVcpe99pn3Y4ZWAdD1NruXF09frLktLCdHXj6pZXbfNm/7ar5dGylbjvQEuwHz77sfdmOLzEGm++/r13DWRanXXaYOxTbapDTLtNz+wXrX375rc//sFP/tlf/cUn5+Y+TI+6Z3WzaH1TDskwpTHXTaOKMQqTKTnQrME11vgmSb4ZXj2pPswl1b6TkhVCiCPDgoxzrgsp5hTjUCpnunUFgoY7cv7m9Stf+yK4Wp+OIe767NsVhLHEEEIkoHbVSJyAga2hSfKY9vswvDi8+vLLF+azX/zkF7///OtPP/4Rv7l8+dWrujWLdZP6gKmMvbr61JBWtaYS2Lj25BEAsUiWgSEtTtv7/bef/uLf+KhUX3zxBWL68NlZH/p2ddZ1FwjrnPfcUN/vpjLe3d1ut98uli/+xa8//9nPfnF/uPlAzutuxWq32/1nP/m0ID7PZ3EK02A3r6Q780/+1b9Vf/G5Wa384qK/31d3vVa6D4e0ySfP6+Vpt99L3ESEAsrjlGzjkYgtzekSMRVjWXKZNnnaZ8uVcgJSNMSM1iISTqHYysQp+dYp5+WLuh7o6t3u4lGrJoHAzfWhrqQf4/JkUWJxyDfj9Kt/62+++/ztGgO9vvv6P/yPP/6T/zntw9Vf7srf/0fnn3745q+/fvbxvwlyWvkeUgQ2dm3PmlOy7Tcvb3OgR48fZ5DtXbl51z//ZIElhSRa4vJk2VaYM60W9Rhz5dr2ZFlZZoW/+GevFlX34cfnN3fy9VX6/Ve9S/tle7TgHYdgvSMFAFzUnbFVmKJqGcPonZnrrDiFWEQl5hhAbFZKqQgU632CbAwzmaKkoqrkfaVZrDEGyDIXxsowISzOzu6uN5V1gAApVlUD1hTFkKeKXAm5lFhUFKnt2pzyPJSytko5ZkpEjIonfpHTMIhFVSajyuM4WOe1ZOOcRRqGlKHUprrd3rV1B6hUVfvdjjytzk+QSQmMb84WGFK0zqzXJ2+//UKSiwJFsxap67qkUNd+0qmkkS1NWXQS71pLvJv6kkNl3BTGXLSuV9NwZRwzt+NudMYiYpoAsFSV0ZzBOEElY7EIkhUQJCQGBk5pVA2qRkCqyu9jdFQ3VWVZSilMjJYBsmhoGxczA5q7/lZQWm+lwNQfJEYUJYNTigSl9Wy4XCw7LakyVRqmk3V3F0jvYeZwGmMBURWIKZdjUIBCSSXXtmYFIohxUG2sq5h54UlBK8cxTXe77RrLalGLq+MODDlTgbEIaML+sLaNWy5DnokToqCplJJjyWVzOIACkgOC/ebWcF62pnXl9evL+80GRY0109gb5qZyt4cbw0ZIs8TaV8tucRhGjWpyd/Pm/vzihWrJJe/3A6I1tmQZY8pE5rDf2Kq+WLV//fLbm3r5R5++ePPqG9vU4ZDqztxtxtvbzScfXEhO7bK5vtwsz09zeSCZwwPfZ3aeBpztM4jwqBfA4+BBpeiDGfF37qvvrTKkKCAcw2TnI0wBcEZ43kvb5uNHvmtO9ME4Uh/cPY6DuVn6f1R0zOqh45z+O13Rv9TniCJgKZJimobx+vr6+uZ2mkbnWYGITT+MCiigpRTvHRFIKgqYi04haSmsFCfxtlLJbO2ji/PL370+Xyxvr+/rrlJr+5Q2ff+3//bf+eL3vwaEtlktF6t9v7POhbhNMRJhTBmRrUXLGAhExc5pEqiVd8EaROjaKsZcpqIMde016yxaAUnOmFXdWaBEmZxVge1uRNONh0Au29qPU8JY+u2hXVRV7cI2zHEwZGyIUlmTko6h1G0FZBWEjXNoqtQKSM7g6lYKsbWak0iWnEqBuu3a/oAlB6/ZUD9ONTiLerJsN2Mo6CtTDbtDUGkW+uRpG9/E/q5kARRghRTCSJhcXgn/wS8+u3p9vet3XWtri4dhIFDVonOCx5GDJkhGpIiQlFwKWkOipWhRECSYB/vGsjHMxtDD4yEwmh5IGvKeN4EAAEdBosgsEjnG96kC6MPahuPOr6j4kGQ+P0IKmpEMIrNokSlMY4wx9tN4e31JyHXdDYcpl+Q9rs+efP7554bZWk8CghpLpjCEOO0OByJzsTyNJV/dvUScbXENqqhqloJsiBhQjbXOOFRNZdZvzqM/nu1uswgC6pwH9N1we9ZdzuIzYjZclJilZCQSBQPomGPJx65YwTJ7Z2IavYGPfvRJEAFRAo4h9/F2vX5SJlEobJxBKZJJCwLd3d/shr5uTrabjZWslG73hyFvPzz7eApjf9gsqibs97aAt1aTsjhbmTBMIs5jKyHWzYKdHcd+sVjud2MBvr0biteEOubyF1++Ol83Hz9fygYur65/9oOP/t5ffPFnl5e17U5WS0jx7Rjd1ebZafXoZJ1zHkICa6xdSMmSckbgqkHrh2lcOFIpCcQZzgKpRIe+srZIKTkwVqRaYjbGLKvTcRj73b47r5EIkAFdibmQeN8QlsPtG/cILj76RCm9e/P5H/3yb1UthXHvsjHGadmBVAqZ5u1OBMgQGQAATVKEnH/Y1d7vq99tkg9AzxHNge/QP/2O2/bdDnjcA7/DMvXIGjpuufqAIc1ub9+LQoPvwUqz+zUqHANtvttcH4yRj13s8QLaigwbAmBCRrRIzGRIEYlx1uxIMLnytPClq/NpK31MgFwQQSlbMzAV0DykEosSNcY676qmMcwK4C2RYTbceFdVtnKOjW2rqqrsomlX63W7WjaOKRdMUwTJJSsoE9vKM3Hlja9h0diqdt5WM6hCqpJC1miJgZkJiAohxyIADIBzUz6DywVnxTmUUkSJkSRnFGBApMKEImKMFQUQZjKogFJQBREImYgJmaxTdkIWySEaUFDJVBBUSWYRWYRSAArM4hEtCoDIOENXKiAJQEFmpySFkgAAYTZvxqNm7MgUKCpFJBadiUmSUQohkC2YFYsBK/i+IcSZg1mC4OwJDahoaFaq8ryfOsuOEQ2iI2JQgwQlIzIbmnkHhIR4BNZnkhUgAAqpYiogCUuElKGUI3MKmZBVEaFwyZCjxElikGks/UFCQDWEpGxmfZaKQsmqSSGnolJEiwoWBQHioxZd0gxrElspGdlkBSQ+as4YABjJCTCyA2I1jmyNzhM7YiOgpFpUZwQOj+dAQTIPhtakqloKICHCUR8NAN9jFR3xep1jPechQpEiAgo5Z1BQwCI6c47m6E8AFBUVIDAyz1RpdiBHnIlTx5ElzIJ5fbjnCzDXS2YI0ygAxhpmRMjMYF2dpigZQLNxxFwKSVX7mMri9Dxup9jHHAoz5SLWVWhA4qRZDod9U636/f7q6o6MZ2tvr7fdChaPniM7UfW2MkTY4G5z7SrnBwwhsMG28YUxpni+OL07DNMYtvvJIduKRIpljhkq66YphL73xpaAMUZf+Z/97Idv39zHTZ5isgbHQxCTVMQaN0W1BnCMoOicscbEMZJKzj1SCePga1/VtbUcwuHpk8f7YbJVs1h1w25gMl3X7TdbURxjmcK+O2nIYx+SettIYyBXXb1crG5utnnoc46WTVZdtXUephgiiI5lUlFiLKJk5NGzZ+82V9t+XD1dY9LhvjfeAVPlKxinUgpJrisnBXOcjG/rpvry3eUf/ezjb69+Yz74Qdvw7urqxtsnz9YffPyT/fY322nL3g/TgGNoKvf27dvnn5342u6UioiyPYzyuy++/tlq8eOf/uA3f/7r/WZXLLWrbj9mw1qkgJq5Gs4lWzBsDRGqASJIOc+lz0xKVcSiSkiGWVUM2SJlNmdEUCJ+mIoLAiGRSiYkRWVCQJijAYsWUGPIsiFFYWZEUoFShB+8ig7bLWm4unz77RU8e/LCWAMciJkX7u5qM426aNpDms66Ok7DJx/8ZKuHb2/enrTteP2tlugrZgFnzfX99fVhe/7sY83TiOHk2aN3v/vL09UPh2K+ffftx49P7vPt2adn128vHXWlTItu4VTut/35R48xwP5wd7J+enr66GazzVAPB+x8t+jaumtDSKQlS4k5LnwnGZwvzBhjQSLjDILJEnMOJ90ZkxSY5SoiIhVWREqkRaKxPio1q5NweHt/t6l9h8ZRtXryoy6Om5SUu7ppmnGcnD8xxh/SFaoAcdZSd5VX5ca6lTt98WiZcFfGxz/4aEjp/nB4/sPnv/nN5/e7ULNa77q6OYToTH3++Pk+pMN2F5IDbowoWw37O7ICJhaZ+u19Rc3Vq5fb+/2LFy+4gnpJhwGL0G6cQiiVq+M0soNnH663d2O3atpF89EPPwolPH60HPrdRx+fL8+rm80GiDZXh1cvb//4Fz/YftH+J/+v/+jf/jc/uf7t5z9euJtXv33xJ7/snj2R0ruczl6cHZyJu9Tf3znX1mCai+WQ+pQZCStv+80OTS1F5lJWFRNou14YTxmytUYAc1QChVzMLKOsa0Q4bHprqCQ+e7oaYk9Zqto1TYUE3cIjKhm+3/Qf/uj5btD6bFFZTlebON0k2n/0P/lb53z/Z//47z/+N//Vqn18k6V8db++OBvG0U0GRAXVtPSDnz0eQ7nZ7BcnNfvy7EUrEnxlBEJV8TRNKKapzWE7oMOTs6ZIubvar5ft0/PFYtn5Sp48OX+15z/6V/6t3/6Dv3u/vZ/vAl9ZAOradhgPoUx9OrAxhBbY1HXbDxuUrKjIqIS+q49EVyi2cnOkO6jEFMi4btGN4+RsTRbCME0hNpZSTMZYyBKGKYUJQM4vTvIQYo7DsDlbPVr4xhl76PcK2LTN7tCraFFdNM1+v0M4nKyX1DrD/n67IefI2JiyN0YYUymAlEuJqXRdk3Kecly4prDdjtuqXY5lqInPLk7fvL28uHjkyMQchikykxGKRS5vLp2vD4c+CrSLKoyJENn7McZ+GOrKNV2bUgEFYCRLRqCyVVM1u/3eWcO1Cb1aYwAkqSAioRFSX7mudCVEg1BKaapFwBhKFhUkzXkCJUYWyUUzEKG4Gi0gAVpEYUIRFSi5TERqgJ0BJFIQ45ZSSskJEWIOZIx1lmgiLKerekoFJTPh5ub6k4+efH3zbnfonbeFII6jMVxUc06ETMwlSynJOmuMKyJzHq53DkCaqkppJF8hFBCpnJmmqTRNGvOqXfjTdX+4LxIP29FWVdMsri7fXbgW3Lpt/djfDvs9MXWtG/qBiFWl7wcCRmDjjITx29ff7u633rr+0EfimKI1OITeoWt8pWxzTo5dmJTYL1arFEoYQ9v5cRz7w6at7OH+pl1Vzps0Dtb7tq4MGufhFz/9wX/xZ79+1la1q6dx0hzTkMfD9NWb66fPHpUpAyRrTZyGrm126SjDfJiCk8455zqneDDOSd2zUBrpobPBYxP6PgL9mODz0ALNhGukYywQzRZCc986e/c+1GBH4EdwnqwqPBD3j1yjOcxh9pSZLQvmuemsmJg9gGfqiKiWklUoTSFO036/u7m92273MRVVCDGenTYIfOhvCNlXhAre2ClJiMlYAEAUdc6dXVygoyjp+bMPDqm/eHx29mj1+y9+f3WdPvzxDwDrs4sXV7dXQxqfPX26aM4EIUJ5cfH4zbt3Krherm/vbxC1a9qcQhgHKGqcGcdcOa+5eMtdUyHKOARQOetW+92QIHtrRM0YppOug6S1oXFK1rh62aa83/eDNzbErBAZoPEuxTjuh50qI4JCTNlYY6pKAZQRQZghp6HynWsbLWhKPQ4HVHXMWUVyDFNPRLkUIB5TWS7XNB3GfopYbO0t210fTs/Ony5OrnYDIVWuGvthv0/emw9ePFbe3uyHqFEKEEIphVTf3Gx9U/3go2dxPHl7dYWtkKZ+n0RBABCRmN6rCVm15JRRDagwKqOKLSVJsaVYUACZ1WrEREf11tEp/QEnPHbQKioASkd/aHpgsM0DYDxaE+k8b35vVDp3ZO/bejhMfQ5pmMZhmIxjKRKmCAT7fvvm8g1Ze311qKulsorI4bDJcTp/9iGCvb+5DyF0yzbE8e5+i2Q//Ojx5ubm8vaSANlbZznnEqcMCN55RFRRb60x1jkXc0TiyjfTFJxBa1zKoUghwiJK85Uf79KZCwKgWhsbEYJoQ6xI/Tiptfwgg8EHbIIQK2tkilXDf/KHP63IjttxsWxJYbfbr9dPMGq/u6ra2nCQwiUGyCWU8s3bN93yTEbVELi2KZVD3/u6QZAQ02bYTZBN4n4YumW92w0p5FZjLpmcQWNAIxISGQQsiNVyOaa3RSQlZbYKdBvhL37/+sXT09Pl+vLd25vr7X/w7/5r8P/+e1/cpDfboV0vO9NCne+2cdGG1luJBkSJENmolAIaQkxFiakgonEWqEhARBRU5bnT64e+bry1lLNmyWxNyqkfJhcj69TWHojG/lBSURR07I3fXV1W53T+4udazDevv37y4iMRjBGRSr/f1bxEVUlh3r2ITC4FNZFBIJolLTPx/zu8CB/C+f4lNct3Wy882A69/8wDEvqeUzTjPe+/4girf2dq9L6rfZC1wfcfR8bDESWaiXlIdASo8HtXA1DNwOxD58yEZpY3EbLOCK8wsSgUJud51cBUPKBRpJw0hTzUzjoLNPCYg0JtuWuabrn0VW2MbSpmAlFhJiTjK2PY1t4ummaxXi5WS+ecMchIqoUMG2+YG6O1Md4gGJKq4rY1tqoISVMCZS0sgERWcVZwzLiYEEJRZbRH4TQiIGfNCCzv/YahMMyGM2IZ5zcFIiKDaEgNiWLJc4iXcTUaQ8YhWcE5i0FQBef8q1JIBXKAHKAELQFQcM46owqQgA1I0RmIkDIfboqgDCCoJQHI7CwNeJzjiAKIlpRFZzslKFoy2IIEmIQQWAtEISvKAAxMSMAqWEBKMYQy6w5FiVBQefbjJGQABkHN856KoArlmDwmpChwjJcss1ZRdGY6KpaEedIYpBRQRGsRWfHohw0lQc6Qo8ZQQpAUZttsYkJFIpNzUgKRJAIplSw5y2xZLkDCTAI6Yy3HmkBVRWaoSgGImJnIsKIqsiIDshKhMeQ8+YqNZWOIWJCLHn/OMNOkAVmxQCKeURicE8OlqEqxhlDwfwAVwYM9tYrobMMhRfUIDMEsPCtyTM18YEbjHN5a3ocUKCAqHVcn0jH7U1UKEpSSiRgZqHFsGuMs10Po90ClbZvWUZDojNEIWbOiZeTYD3VTG0NgF4unH9z0X8yReagwjsE1jbF2SmOSTK7y7cm7t7+vq2oIpR+nqqp2+22/O+vWj24vv6rsRVZ0vmoWZ0TadCmrDqGkfSZAEBqnGCSHKXbtIgUghpCmaQzeZVouVm073N+6Zpkl5oxS8n4IfRwq30xDOj1db/ImpyBaVotTHkdinsYhl2C8maZx2Tb7wy4kbRqzrNqr233bgrG+n4amaeN0aLpVnIq1LRu9347WeltxTBMQ3R22Z9WKFPpD/2ixyvFw2I277cFZJoNnZ+1wmFRwO45FChCwAcvEiJagTDlRub1/+8kPP+nH+M23r087v153SNoPcZrS6cnp1fZ+VflYwpgnjbH1LRv3xbfXjx6li5MPry7fnUi2zRmWcZg6f/bk6Yf96zd/6YzdCwxDary5u7r54Y+n9YpvLaYgh8NovVEsv/v17z77yScvPnj05edv9tt0NyRlKoJKWDSzsQJISKmIxGgVKmuQwSrk2WaBkdkctemoRYVm0J+OFl6AmEsk+v7BgXOvODvTi2ZFQGQGQiYglFmujAYJARTfZ94AYJGf/MlnX/7Vt2ftQkN+d18++PDR5bvbe9gmTd6a6/39zRiWzcets5vNrjp/fr5aIR+meEeuWn10vnl1054+jWN5/MFPtKu//vKfcxXS3VcBdsHFR4tHnZ4cNq+v715/+AfPvvz9tYWprp+8fvf2zD+Lu0KxgnFbQSWDiaVdnqw+OnnGuGu7CmI5X9bjdlhU9TREQybFZI1zhg3bSFYJiFiykiUJxbMxFjRqigIq3rEhLCUDIGomsmyYuTFTzVDG/oCUrLhce98+1jHsdrfemDhuBZmtda5lzIBumu6HYdJCacjDpvcm/Po3v318dvLBDx779erv/+PrR8/dZz95sb0v93f7zeUV2MqQ222HrJlbj950iyeVrw83b6bQA8hw2K2fnHcnp9vrO1dWd7f62Q9/qWb89tUXF+kiBLM6PTXEaIMl2u8GYmqXzlcaD/pf/Cf/zb/77/3Pbt/eXb07UC/fvn3XdW0GVMJ25Z8+7gzzomk++fFPn/3s09/83X+8/ea1ubkLv/ndpLFsu+23t/Lhs+1d9qdnj3543g/9fn+N1QUaYrUlxKGPddN6i5kAEYYhMrGzPI3T7mZ4fLFUOfZwScQwxDFYrzmLgqRUSl+mwwRVOj2riyCzZy6g0qyqaQxpzI7o8796+cFHzy6edn/1X/5XT9fP/uB/8b8ad9X0L/67r/7iv/6jP/7Zmy//nKqPP/jFH76FN/4Xfzy92XDbDFdXu7vRj2KoeN+FKZzDWqgYk5FNnqSp293tYRJIlXSNQ+K6cqhlf4jLpa07fPJi+Q//yT//xS9++jf+lec/+9PP/nf/+//jo49/cLir4UsAgJKyapoYVcAaY7ECEUGNOQ/DnogNW1EpMpUsrvKixTB3bTfFMcYkkpEoS/IEqoSiOSRBAZS6rYkck7jKouY0jou2m0Iah5hCZjRW5nBliZpdbXLWlAKpEhsL2g/B+SqlIoqoJCS1rypnQ56aulKFlGPXLcnZmBMRpjCgcevThYbsDX189uLq+mrRdvvDsDsMZ0/OYz+2bX15dUhpe7pYsvPEcjgM7WKhKmnXU8nW2agCiiDSNg0CaEpQNMTU1LVoWa8XOeTdoScmVR23d1ByiUVysraSKYhkNlwI6npxff/1o2UHgCKxqbwMMQpUthGBVDIjW8NZCpFFIC1jyRPzwnJlbdVPO8BsDKhwkRRkqKqVc11lqhiDllhXHiWHKSJyU/k40np9/urt9fYw2sqHoTfGxJD6Q+8ZRLIiIjGKMs3ZSZC1EEEuhVABKElyZCvrARQwdU2LpCSQY6gXrTFcJFxdXVvnpWoWz56M2xscCxHEMlb1arvdEgYFi5hQlAD63VgiKOQYp4VvCSCltNvc7W8vy3BwbCRngGhsx5OJcewaqxmTZjBcooaQF11XGMHazWa/H4tjc9eP7N1m3y+q1tTsDJVIOWvOsYhLCnVXP+lOv3759uc/+siPLoaNAVytusu7zW+/eP3zHz4veUwxxJCWF3XdVA8V0YzPzLCOHM1+tTx4YDyI7meTh5lUD0hHpzydzV0IEYFm2bjqnFh+1GLMrzZzvPFhYA7f9cAACDOzu7yflavC+9C0Y+uLisfB+GxN9NCAPcAHCrlkQE05jNNhSlMsOcSExMSmbdqLi8Vtf5j2o3OMAId+RFAiLLlMSaKm1dMLQ3rYH87sY51gP+yWy8W3b94VCWyohOnN1184Z+4Ow7Onzxbd2clqfb+7d7Z6+fb1/ebWuzZnjSlapsrbm36bY2SGUrIxXNfeOya0j05Prm5vDJMDIBWLKloWVQPA2z4aKlXnUkrnp+u7zV7TcLpwm31vvFl1yyEcppD7vgBiSlLVLqasUlTQ5sSGsmbrF9bTbB3NiIYZjFPJYRzJoGgi62aPd82SNTVtddcPxjUV4nKRdrud5qlq/RDw+v7uo08/czmP20NX1WuQXUq3fTAhV9422aokMKYmF2NBEefc168uCelv/PEfLR9dvP3y85OK7m65H2KMOUuBY87Td6IAmJ0eSsoJwwQExZCiKmrx3tMs5wCjImQM6Cz6UXpv6qvfb3cfWos5KPrBTvi7SL+jCudoHH0s7B/a6t9/8bvDfhimPqWoWtAYzYAAqYSiOY0ZQrbNYrO/FYv91FtvFsvTb1++9NZUzjvL3767ilnbbt0f+qv7S2Jqqy6LCohIMc6oKLNREWZT+YoMIwghNFUVYm6q2hEBaAHCjAjHeg3lSKVDJJl1Z8idXd3G+0K0j0HRmNqFWIiogJJgESEmUCBCQ+wqffrk9MnTJ1IKI5esMeWQx7V/tt+8C9N+vV54xjT1UIrh+vr2dj/ExfJJ0FtiqZp6OMQ05rapvOLt9Q6jtK5iYwig9uZ6GNfLNZHNeds1bdv4cYpsLBsHKABS1VbzZBgyQJimDMDW/Pqbd2dd9z/+G5/c3N5/+fLtk2n6D/6dP/n1727/iz//3T72UhAbv4+YixprPWZgnaaxaepSomHDAADFOS4lESgzhTwZcMzKZBBdySLEKlkEiAlKZoKqdVhS7nduhUULN62jk2F3axiJ0Hr23vT7u1SkO/3g8uU/u7u9fPT4wzxEUDKeU+rD7tLapV2e2qoWAGACMGIMEB1JPN9BkbNMVt/vePg9LOg9Z+E77Oj/P9wzf4GCHC0l5i9F/G796+zbBQ/8hHmfPaaqPfjQwYwrzVeGAIRz1vf8pkcqKLg5JV6LM0CIBAVIEYSQCQWRyKA1rEpqixcoFXQyi25MSpItNbWdxyLUh20GY52rm6qq2sWyrptV47wlIlVjCqCiMcbX1i1P103X+K62zG52FixJAa1r/eKkrbwhpDIaLKayvrLMpEUUtSjlJIAKhATENNcmCIrmyDsVBEFVLABIDKylUMmSJywwpz0gMxMygzWgBGRmMRqgRCpo0KgUtl41IVmd4T9AkswSEVDJISCCoCTVDGlSSSqJGCUjGQarAn5m3QLSUYeFqDrzhARUURUlqhRQVS2ioIRFtGSVWc5VUlHIAFlLme17kNSAAGeMwF6AEY2dpzqISohgCqBKIYOioIbYEjIAzMASIgqoEBCTMhmCgqCgGQooILI9nviSZ6MrzBHSpClqSoCAxs30RiBGVZAMOUrJUHIpokCKzK5SNJCyQIKSSRNolBJzTiUHEAIRJURkNhUygBZWLTCrz2YyT0EF1cTIiJmZkAmNFXRCDsAoMhmP1rG1xMwPtChBBKTjxArmvFSaKWCAAMwqDzfSzKF+AE2PUJEcKVAgRUopZUaFZtSuZMDj9SmAiMDsZCQqkOcxBZKB+Z3n5DhCnhGzIxGQyDCCsvEImcABO2MbjUENIYLz3rkGuCi7rMhV5QFLnowxOQbDLsXsly1X1X63EyhSMiFZYxGJAMM0aSmd8emwG3ZbazwSiGSCXIbw7Zcvf/zTT0ZToyARoUPIVHIO4dDWDjFnAW/tFEIpoqkslqthvwUq/ZCs90CqkPbjdpoKsd30d4joyDS+2by5s4qLVRNdGcNBJVaVKeDGsEMoOQQtual90RJCb7p2GnvvaskJQRvDtbXeGG99KtIt1siESo/Pnt5cXQfatk0HLp2/eHL5ZrOgBUzZZAlTvB0yW1mtFof+kGMRwSns5hseSVGl8RaRJOcQE9UVMS4XdU75q998/kd/+DfxHPrtrXFad75o7vdTP3hMPOXsGmrrJiimnBZt9fhssd/dG3URE1DTlaodQ4yqaNfnT89PX19eXp4ta8lIjDnru5fvPBoEQBHIUBDQN7fvdk193S3b5UkbZMJDyCHnUoylIgIimIuzVuMxUlczKCgZ6wh0BikRQLCIGMNzGZNLBkRGziIAymxFChPNy7hoBtX3huqMdvYiNWwBcdZGzgdSEZkHBe8nCBLlzcurCGPllnm0z58/Q5MgjXk73Ia7Tz94XhNY28h4tWhWznRTOLQyFiz7YVd3Zry96lBkdy8RnZXbu01lChg1Jf/80x/fXYbOTf3mq3dXl2D4/uXw4uLn+7ur89Wjb16X2Lif//IPby5f/fDTD9I0hgQU0pLasLt9/uFFf9fHlPMArqqwchnFOkfMxhoAKFNmZiEUxVLEGQKkWIrJlsghiOZUCJgZQUNKhskbMWR0UqOcc266Soo1iGE/otNFd1rSHgkIzRT3FTSaExMjGe99Crld+O6k08L32+kP/sYfSEo3170b9KJrKp5ur94turM8xsc/ezSxQVWb8fLd5frR2bLyw2E7TneYDyqpWa3X7lFMYxoUSvXssz9YaOz7q5v7rz764Ac5uovnq+HQpxgr752tOjLGU0khT3G9Wp+fvnj57fUXX7/6H/3Jr4aw/XD1wi7qDxqXcz5sD5DT/fX97z//sju/+Py//MuFP/3k7/wrt3/vPxPwWpvmoxcrdxanqV226DyKOIfnj0/R+N3hsM+he+Q8EzvMIIaplMIMxpBbsrFYCaesMQRXVdayCBOhqy2CMBRC7E4XMJq+PjSnPhXNQ9ruhzQVS6oAaSxIOIXps8+evL4ajeOzT3/pF/Xm66vXf/0P/uCP/vXd1bvNFp5++oPtq23/9uXpT54e3l73u7F9uuy6Lk98enH+8q9/Gxdd69eb67CP+fwxXb27r2x9XtnFuq1VYpEp6m5f7CEwpXFKvrUVKjs8X50Nuyzjbvvmyz/99OLr1/vu/MnxSDCmFJmmqAJtU41hMkzMaBmJyFX10B80i7EmxVxCSpLB0XY6pDg4V08xrNrOGA8IMYUxTjV3OWdjbdvUKcbtYct+rTFVlYsiC9/WdXU3DV3X5CxlNs9Q5VklCtQ0XoRUaT8c7MJZonEItXclF+9sTMEYgwg5wbI7XS7q66u32922qduL84ur+6uOuyAFBHLMq/YUsFhmJtrv+jKVWGC9XtbeTMO03fSLRSsRpz7FNO73u2EDSuyXbT8Gg7aujPeua9r73dZaLiWlCKIc49Q1NaAOuykO/WLR7sadc84Y04dbstj4EwEiSFxVRYHYoWFQcZa9d9v+rnLtOI5ka+U5o6ikUoxxpFiCGhf7qWdDoGxtVYoAKnHjyLGi5JBSn1Oy3olkoDyFMIZQd4tN3x+mHg0vl93gzX4ciyRHqJJnJ7cQBmaq2MWS2XLl3RQCQBFSYmAiEkUFa4ymCALdSWessdyGEDUlstC2DZZe1aPjernsGndz8zaHClo1lHM8FCEBZaJcJOZccmlr13qfUu73d/dXb1IOmmLjqxiGYei7rgphTCkumtb5Zhg2lryiJSbLVlRSLsPNze6Q1usnU38I4w4iDhK2YVrSKk0BlbpFG++n6TCS4em+/ODDp//dn/36/PyJdaZqvGhyrIjmty+/PV+tLlbO1x4A+8MuxwdW0WzrAqAix2YE50nYe7ULgep8zMicN6Xfa3aIjnIxKKqCQAoPMM4sZ0N8CEVTnVGnozWRzlLiWZj0vjybr+G7rLT51R8EFd8boD9YT8yRbUjECKIFIUvKElNOKRUgVYFv3nzrnbeiXDtEiCkRw7JrNrtJixpm723nZxMffPLkiUoxbAxRfziYygyH8avffa5FDNH5xePl6mS9PslTGA975/w+xMq5LGXbbySLWrrfbQ/jqEwEigKLRV0zWSnLVRemcf45OzYOgb1bNS5jQSIt0UjFhgrqclHtd72kcn66cKoCyBIvFu2NHMYpK1OREiYgAMuMoIyYYy45yGoJxsQkTW2ZGci4piPSMPSGKIZUN0akLFen9/f3KiWnUFmXU2ayTdOIFgYkxsYbkTztN2vfGCcp5BzTqmmn0EPWrnNxmjJSUkBC7+1hP3lrFlV9++7un/6zv/yjP/jxpx+9GLe3tcVxTMN+KiA55lxSUYxFAMxxrC8SYpwxRygZpGguIBk1IxQtDrwna3NWNmbOu4HvhJHfNdWiggjH+D480pfgwXmLEABJZW6mafbu+D6n4i9//esU0xgGppJTaFY1ouk3B+udqDhnqhrHw+H6zeXyvM1ZF4vV/d07oFS1dW3r16++NtatzlaU9NXrtxVX3jtm3I2jihIiKCKTnfObrDPWzhl8yCZlcWyZyTMniWMQRiSkrICoRYvK0YZmBlad9fdhU7SQkCIysRYFATBAcOSZGCJSRck2Exv9+INnqaRXr1598vw5ac5TtMbtD++mYUcFSTKTLTkZRER9+fqL5fKcBEIeK1cRYhjGrAVB0zjtN7t21UjSIoKMXDCMU/XIhjCqKhpQFmRAwhRDVTkVgJIdyaJiT7QZYpTsmnYK8g9+8/KjF6tF7ZX8F5+/QZFffbT8+ZM/vt7t//z3L0MofRquBgfenLVVlkJMBbQUoZwq9gCz8g5TnmOFjQAygWomUCZRhClMRM6wl6xs2DlKKY7joW69Micorl40KuGwnfrI1tRV3YSc0litFh99/LPt7q7f7mtbFyXVShXDNDLUJUZEVkNcWSCrzO8RHH1wsT4i7fiADj2MgQFm6eSDmfRxM33Q4sL7V5j/vl/h+p4Ed4SH3mNHhPp9ztIDSPSweQLA8TkPrzrfBg+gkj7EqAEwKmNmJkNAM9qggoSMR0WDQCZCkWyQ2JAIzCZBUYBJEgiqLr0RpYhcxqLEhHPsGhhi7+qudr624E1WLALOVJ6prVzrrSf03rFKiikLOl9VVdWenLTVPD/ZQUnWO2NZVUsYgXAGvJiJFIgMIjAhKqqACEIpooXmk0cBsJAqipQSWVVRiAFBEMEwIyoREiOAGFSCjEJAWHIhwlKCYUAoMmOBkiQlmuVjlBGVNKskIIGSEApAUQFFUhBUArIwx7IjIwkAKhKQaiEgJqQCgppBEijP5IFSQAGLQhHJMeecsuock1YUGBFQBKOyAVsVBeCGFEATSGItGQoTgUDRQsjEphxHLiIlCaIAzdpFJAZUJKOgKhl51iYyoKqWGVEERBDFFCAFKUlF0VmdWX5ED7ZaqEjAToAQma1otlAKmcycDDOliJqzIiAQCBMJFoOATERGaWY+qUh6MOlVPKZdFgQByAR2No0nNoQ2ARFZZIdsgZiJ2FhiK4g4S9TYKKE8ZEMJgIIykhy5fiQqdPT2/Y5dd4SKShGZcVAVEc1zeN4slnsgGx0dp0SP/bsCAh0RKEQ6UomQiRAEUWbxGT2o0RRViQFm3qxByc6SoqOuRTKlJAKs6g5AjKXcm2mnqoWZFTSHdNY1aTxsrq+s4RhDVdfIpAhICOJCnKrG391d+7qVksIQhmnofFWKbG7vpvGxr7v9uGvbdVVxvy1xytZQ13K9Wn359VvJrTXVoR/CGKiQSqor9lQnQetdzoWYwhTbrslSGLD23kCJWUtW58zQbyomZV6sVveHQ4nJWweV39zfe2fCmM+WZxRTR5V3TeYpFhlTdlIyZGNZkq5P1l+9fblsT68P9z/+w89eflXHGA/760vYThhVchkSe3/2/Kzf71OCq12PkhtfSS6IysYCiDEmiVZVZQEnAAJ1xiRJY5qQnOby29/95ac//aQ78ZdvrrBo1XWu6cZdrlw9DhtMulwvxsOYY+gsWVuS9rZq+/1U2faw3cObd7i6IDKnZyef/uzj168+R/RxSu16dbeZ/FdXq/WJ0JXxJApxzANPKeqrby5/9jc/qTo/vr5edn4YA7NRVVIsSTQXwkwGERnQhijeYVXP5RznUkA1qzDjTOAHEFSejdIISEFAlI7+W8cDY9ZtAqqIsDEzIR8IFAoCMtn5qFIFJBAp8/0DACFmE+D80eMPnq3+yX/156vzDx+fPd24ZL11J+cYt/vbW1uf3w5pH2KzPGvX1evPv6gWi65ZZuhL1J9+8ON3r7fLs4XqMFxextTvhunjj0+GKTspw/3buj1XZOvNZrtbPXrx6GQ13r3uAE3hfrs7W66NxsvN1xfnL1YnF0DTelHdXe51mp48/qxyC1BMU2RCX3lrDAJVpo4mKnIhJSK0aCzbZEQEgetmkUzSwkRAAIWsSGBE1CxAvlvmMh7u3tRVJWUiK1pyPwWtya8vdrff2MoiNyAiJSIwWU9AORZjiEz1+MOP3r66FQ1vLl+frp/KBH0/vnt5DylffPRsN7zc7bZc11e3N8tq6RfOgLm/uqxdnMbdoj3z69O3b38HqzNfrdNh54h/++t/9OwPXpAeLlanjTvtc3TIh2lChCxzEEtOUQwLKZy9eKTOnj85cd5Nafz691+ddqfvppuLD1Zf//bN02dn7cn6//v/+e8+/dWH9/fDee6IO0g5jWLe3Z6d/51RmykNyuHpz38gCS+/fe0dZOy5Wz1+9BgPoW6068x233tnrTU5yxRCg6hF2toK+BiHpq00zyg6KYCSkmplfcm43+x3+2F9us6KYUiQhFl95WQKm3fbaYLT8zPXTUj46LQxVSliMtr2xx98uKplU/ZVVy1O3Se/zNv/Pp2f2pMlIMh+mg7DtNvHMRpa1p1bPWq50thLvynOt10Ly6b93W/ffvziAim3LcXEjXHIAQBTEprk+ttN3TjDVbU6Wwd48/Xbf/Vf++nt//Orr768mu8Ccqau2qk/6NwBM5WSVIyx1ltvaZG5WKeEmMioCBhDgDW7xdIVyW2zKinGaXLOWmvPmobAhlj6MB7CpFm6xYlEski7u62pXLVoxn6/bBosYpi99fv9bupDt6g1lZQjMrjKP3l8fnq2iNNIhjf3w3Y/+GYBkPrDVNe1MTwOAYQR0jAlb5Zxn27gMI4Fc+9t7SxM/YF97b1RdQpIbHZhn4bkuIBtDJvaeonUNd0wHIjg8aOLw2YIQU66s3u5sWiN4TjFHhGYa2+nkKbDmKMfB7Wn7vLwemE7RIfoq6qVMCDMAggEMjmJMdI1J0goSlIULZaSkci4pRTfuQpIRXOS0TICEjEbsgokCEQWoahoTAEZo4Rle45gVSEjpqKLbnnY3YOKM1xEg5hSyu3tHiI9Ol96lL4oowlj8JWbpgyATIToADHlQggpRucqa44W3aJsiBU0pDgbpjqDbLSufeWttdU0TCmNzgUGG6Yt941VI0CrkwtCinEKd1PbnRpfTyFVlUPBfBisB6tld3uzv7+PY++c966apCCjSG4q0/juvr8hxKIYS2JLJBjGKUddnCxiSkmlPV9fXr9tU490PoR84nzFeXe/GU7ruvLD0K/rVTe1N1eb5VkV43j++LSt3Mu3L//gJx/4mq6vRl+31hEw/vlvf/tv/ekv0XEJyRrX7/sHpEgfBCv6gAEdxQpHv6GZj3H0JzqGieB3sTn4MP37XlcDxw5EYQ5u+G6GrkfDbDjGiUieyydFggcHX5Uyc4jmZktEjhf5vbc40pdKBpj9kRAAShZVDVMMY4AihkmRQo550rEP3ruSMwAsu8UQxpRkdmYSxhcfPF20bRzT+fkZoGTlxerUURmn8fpm40B3h92LDz9YtcuitFqvjdFD6IuUReffvu1B0bPe3V+3vvHM0zB2zo9jJCR2dHGylBgha22MstxrEdVV4xtHYsk5O01xCLmtGwXypirTJAVikWVTpxSB8qru+mE4XZ5mULR5igkE5k0L59QMRABk24YMqhhzQmY0XDUN1xWiWO9KGq336EmCNN3qfrPJMpki7Jqo2jYdsQgWQKuqOvUZyuGw7ZbovJrKJOP7OCiUwxg307g+ObHOb/thexjYmPV6EUP03j97+vT65v6rz7/5+Lwdg6Jx3cK3dYVacsoxTDGVGEvMWQBSUZ5FVlJyBkIIYSpFcsklp1KSyz7naL2zzqkYZSYyyKzzqPihvnkQtgki0bwyjr6932dsyFHBBjM8OpsBHdflsLlDQMI0joOQbr+9BUbLzohhRt8u0tjvxzHISNoaMow49Id22SrCZtiQNwYVyO72N03bGttM0zSO45zqNzsL+co7644nDaLzRIT9MHrPDOycY8TtITETK4EqZAGiuZdFollnV2YjDJztNUAKIOJZt4z5Dgp4ZyQXJkYiqyo5dm2lyK8vDzKpo4rQ9H1vGlc11XAYS1RrlgQu9JMpgZy9311NQU9Puyn3Y+qbbum9P9zvkNA5U8oYct9xd7i969rGWiaiQxhc7YfdwVjrnbEWRUxWIGV2Tgjb1dLVbb6/DyhTyP00nZ002zDtwf7Dv373px+dnJ4294ezv/+bq2V7u/b4Bx+e/tu/+nG7XP2TP/vtu1243t796WcvqsYhFSli2PS7vasUGMcxVtZarlHRomFyRaUyPoQ4S+GGMNZNTa4yCDkVLAoJUonjbsdrnzFpleyitVLkcC+aE0dQSiFu4H61WrWnFyVkdI03nYgjaHzzGKkDPk0xWiIsqiUhoGphYhUFmCGW494ID2v0fTd6bOXgQa9yRHy+cyxCnJ+k33uFh398398a8YFxBEdZLgCofIdNKb6/R74DsmbGAyiozB9/X7DGBARqUC0ighjkedsFLQQzsZRQdQ5wZEZQEgABAwIAQoAaB4faWTprKnDYx5QRSEuehoQaDFRYOwPOW2ZGY0ChMsYgkIoBRCkgQkh1VfmqrZarqluYMlEsUNWWGpoD0YsQiuqkwEyGAQDFED8IfkiLMCkTz8yQnAsqqWguGaQwCOIcuIXMRqQQFkUgMIwEKqTZECEoqUEFOspaWQFJBXOkogiCwKgIlEGKSETGMhO2iFGFQIFVkeGhfZulSKDzTgXHGYmiKkMBKILzb0YZwCEkUJQipWie5fpSioiAiFIpRQmUVSBhAWVBi4gWNZUcVYoBKFKOunER0YRAGEfrPJSMzKUgsCUiIWRLIFmPWnFgaxShIBIAgVDJoAVzhpTmzHtgM+vBZCaKihxBd/aCCPY430EplIvJmeNEYUojgATEaqb8oErJEZCUDMxpqkVACpbEKgIFiQQUCMhZtUyG0DCyA/aKRomRnSoSEzChMbOiBhgNOSQmACRSREKF4+plBTXMgDCLeOa8yFnGjIb/h1BRyiIiqlJKAYSc86yfVwUCLiWrKoLI0alxvhFxRoEJ59TPOVUWZlEnIBytio7jOAIFRGPYGVCQwsRiHBlBICDnK6ugglAv60NKSBTDhICEBlDiNJQsQLQfQyrqREASAaQ4pRiZzRDzdj/YyoFySdkqlVwUMhi+uro5f3pKKeSSxiEbsn3okez9OD1+cvHTz374u7/6vaVWQY3DLKLKbF1bLQ/jUEoBVQ2JlA2yMbjoalF49/XV8mTparc79CmmOYj4sNt3tS+E3Xrx6uoaCUouMU8tL1iwsn4cAjlRLB6RFGNRY8x4GNK2f9KusnII8e3lm9v7W2d86zsRd7LsLlbL/f1+OwwTlWLUsh1TcVXjqjYdDsiwWK/vd5uUCxDFUgRBiOrlGQFrGac8OIN1XZcQX335sjt5vDx9fNjdkoVu7aZpU1naTTlOCjv2tpYUAdE3bpzS9X5njL/b3Fhjt/fXT1XY25DL4vHy0QdP3r3apJh39xvjbVlDmA6iGRRSKSkWGJMK7Tbj13/9xreVq/ztXWBLJWrJOq8egZJyICIoSgaKSi40TerBVI4dAzNFTnOdpKKAlHNB5NnngZlQNRchnrNglY2BeWZLdFyliLO8gpmZbS5HHjPDDPOa9wX76mLd70MLze9evglWbvvLbqjIuCmFEV27WIxyP46HumkOKd5eX5+eLJqTC4YsbAyvgOKbm2sx9YHCu6tXu9DnHKq22e5TieFkcWZX69dvrt2pefLi2T//3TfeNM8/eHHz5puqtvXpWZay6Xcr9tXq0Vhk2t6cPHH3++vzk2fBVNSucwFrjLdMdCDCOW0wR3GV1yAq2TIV1KJine8Wq5BGyHVTdylyDoERTO26uklpSikBScjJuOb04tMUb4ENWGlqPwYY00bZG3Oa8x7RCoQMUFJEGSxpt1zHIR+uD4hvEemwOdzd9598uGgWp6+HV89//vHhav/f/JP//j/8T/9v//7/9N97cfboueeYhh//6rPPf/Pq5Pn5esXX1/cxwO7+vmlPcrZh1PXpuYsx7icxtnu0fv3l3e3mGxD/YdV2ywaoSMwhDyFiknJ21lhXbi8vv/mrvx4PL+5v+6fPH3/6409Shtvr8Zzp/Nkptfz68q46WXzyhz/5y7/7f/nBj3/S++F2uj77238ybO7C539tn/8wR03O3L27MbauKlx/eL4f66pZvLm5EUOV2mESZigihyEgmbbpGHGMOQtM09TU7FiTKKrklBhcTsLOTmPQgrbyF+tKkIHRth5yMUDb6+1q2SxsJ0Y+f7P/+Fl9++o2U/3x02UqMcTMbEfj323vTv74j1DMb778pz/8O38YGzdxUU3tRass/nTlT+Hy/lDY3ey3pc+NWZyft3EIoClq/PSTp30/Xd/ef/Dx8vJ1D9I8e2ZNy2OKq0V7+SY6Ms2i++LLuzcvb37/De6+vHx96y438XgWZB0OQ4qJCVMuyNA0XQ4JVdLU55iLZMMGiUKY6rZer9eb21sCUZWYhtqukGixXJecRCXEcRjvmnZRe5YYkbhq6neXl2fLVb1oEsAwTURGsjJBmEbrMyCYyhRQYF513RjGFNKrl6/7cVovFsOUiJ3xTFZtbXzyOWe01C5qQpymsW5qQG9O+P7++uLiNE1TP/VN2zx+/uTy8j4HyVGnHLpl61pT102KcQypdr7pWkFKKZycrPbDVglcZ9cXzRh3dW0sWxVgdjGUtqtynErKz1+cb/b9UFJfNuxsu2qq2oxjn0Wsc3E4KJKvm7jvWbWEyVVGUTUmAQwxqWIcQlYIUx90sMYatNaaytkpZ0Ks6nYz3BsgYzwyxDEaMobI2EZKRkN9GJKyQz+NuaiM49TUFSikFA+xV6Nc2WZRb9/egBDko7ngMPSVtalkb62okjUxhrpq5kRsUGUkyaWwEnGJKTN3TWMrt1wsJEXJ5KxJGqaQJ5neDcPzjz/wMnbtqj/EdrlCoLvdXbdebXabplECs98dSkEpCfO4GTZx2KPoom1zyuN4EJVpFCKDRPuxTyGR4dvN/tHFuSjs9wf0vhTp+wEVuTFjiX1ObRpjib6ux32PYSxqhvt7e7JsWj/0AxKTsW/e3Z2e1v1m+6tf/vg//y//0Qfni7arxn01TWk69B//7ONvv373u2/e/vCDi36/W5y45Un7gN3AnC+G39mbokhBgvc+QbM7XtH80Ik/zKXh2AsDwPyfc3onIBKSzscY4QMX6Wj7+NAnyZGQfyy+HnJ74L0vEsy5KDM3hN5rN2ZSkgq8DwnSooplTh8RECRAViSdHTOL5JQRma0mUW9sSBl0ThpAUbDOsam243ixWpVc+jEsulVRffXt683tDZRYL1tfn7x49Ph+s+vWay3ldrN98+7ti8cvbt5exmHMaWBvF1W9XC8MW5DkGJ3osmvvN7ctwT72p2enJ8vuq1dvjfCjhT9frbe720VbSywFpKvskGJR0TieL9s+Q1O3Jefzx2cv345dUzvDOUxGYb1st9t+iMlal1MuRQgwptT4ZhoiLhsCrJxnMt46X3m0VpKp6qqPB2utd+2YJ5XS1YtecyloAQyZWFLXrSpv315et+3aWQMkl9db2h2Wq9Xd/tAuFmk8PF6th6/ejSG/u9wYA1VVF4EpxJzjYtX0w/T68vXJYjWNYyjN2dnZ3e1VmiZL4J0pjI60VCJFQ0yplCwgUkpRQJBSkqqUwpxLTmma4uSd93Vb2+Scr5xzxjpmVQVkxmOZf4QOH5hw33W+M7YCR7UbgB6JaqoFZhWb6ntWkW/ayjch70IavbdsKJWyWp1KUuJSUrq9vsqo3rm6XoYSrt9enl2sIBNyGQ9bFVl2C0Vhg0VJc2EkQDTGgjCy9c5ba4gwhuKcs87OzmCVdc4ZZwwixZjnHqYzBkueICtIDwUQmEhSUUQkDDkzABEZ4qzShxhLstY4tHmavGUtCrl4a8hZAmWE/rD94vr64vRCPn2RpoFhWRl/PW0aMgYhTwlLrJtqyumvXn61OHuGWFRLtzit66UWLRKsNSL5dnOPQJRVQuIFkoEh9qqFwYiirbx1duynkKZ23c3CUsnC7M7PHv32260QWEO1p5VBaO2d1v/067sG6ZeMTy+6+6ncHfLX2/7PvvrivKG25o+fPnq+aPZ9uNvuTrnxVSuQRQHIFNCiGrIQqa1syhGO3BcKubSLRoDY+pBiLsJFnDVSCipZNFnLNEakQ/24TlmATb1YMKRx6lPOxtaNqccI4zAsTlbqoRRWJQ2j4woI1bIaQ8apQZl7vpIAQY4Cye9x1eD9R/hAW9DZUx1mVtDRRkgfSEQPy/m9NPcB6PluWR8t3h4QfTzqoWbI4eGdZxHmw/s96NGOC35+VZ3t3vThOucmWYgMSkZEnjkPigqCZEUEqQAQGaNAhCxQZpoUzecEIiF4ywWUDSfEyITWJJj5nkkSTiNZ0FyKL4WsdcYbtsJs2BFZoNn4JllvbG2btrJNQ7OGhIipPg7apaBGBCxFVZnYMjOBgBZEFBFAVmJGFElSRERZSYoCCB2ZWmIYVYSNQQAmo6pEfLQoAiEABEEgVWW2qkpAIoAFIUZSBVCRgmQYABlVCkjUQmoMkFMBRoNYAEiVREGl4MPZePyBl6SgqmV2+ZmltapFpcCDc5BKLjmnklORXErJOUsRKUVRpQiIYgYmyKqc2YHiRCoFhJGyFJjDHgRUBJBBFIFER0WKs4TcFOMrLaKECqKiMzEIRHAm0ACCFCwRS9SUtGQAmAm8yjPB82gDp2CAUJGVCJBEZ8GXKBfkiASGAA0YjznFEggD51xKYpE5+0wQtUgB1NnXyBgGYiUjaMAyOi+EYGrhSskKMhArEBiXEAwbNIzGCRGxASbAo6oNAYhJtJRSCBlwri8I5j9whHYJCL+7CwAAIKVcRBWwZAGdZzM6Q0dz8QIP866ju7zIPPJCVGPMrIbEY+9NCHpkE8G8jSsR0awUBARRwEyIOWfJQsY6XzvrZ4Fpd7quay79Pm5Rib13hEVLYsYUw353GA4TsmVX5RANuoxDLrFZLDe7yflmGhMQpJRJNKQsJeeS77eH6mTtXKc55lg0l7b21lXvbspmMz29eP7Zj9K7d9cu22XbbPdT1XhCiClIiczmfH1e5vvP2hyn+839arFYr5fU+LE/rFcnLS9iLlmwrvxq0X379vLQ7zpnQ4TaVQaz55xVq4oyJDZIkE9Omn47LZpztrAp25ZqRlw07bvNfRj7zlUhRm4bmbTf90Zku91534T7YZqyQZjCOFja9tOiMp5sCL3zJmVisgqlaBKArNrHQ9u0jUCMMWusvCv9sI2vq6776IPH33z7hYa6Yrnbvq277nazb1LwxviKmdUBlYTTYeyWZkyH5elZPw7D/dWj0x+M+9jyycX588s3Q5KpM9Zb3m/uzz446xbNOG5AgQ3OSX4icn+7PbfnzlVaJiQuGhVBj3DiMQCkSBnC6IzJBfJMMozZO+LWGWuMBQwxSQEQnlF/BNAydzbzUsNZdatATGwMzAQ2YgRAZmbOmkiVmRVkFm/ODMz3qTfouaKld80XX37NZJdP65ubL59+/EdfffP1NPWvwt3paWMNrM+6q3dTQxj7G4SuW61u3920i1VrWh1zRr29er0/9GLJLqt+f7B+pc6Wqr6dxp6HPGY/rOtuZTS/evW5M7Do7PWbL8HVYZqGAdDgetHupl3/pl+06yxTwiI6CrmciyHKudTItqqMqXKcrDMhH8W91nAKiZkMO0n5+vLbR4+eLRen0EIRiVNEnYgQpDirgEDoNdVOO9v4w2GqjSlpjGFo3Itm8eFm94bogKhsk2SVHDIpkcsS68qB4dXzx/WJBYDrb6/Wa0l92N7569dXz158+r/5X/9vX/3m9ZvX1+sl7m92Xw4v4/3U8zDunfXL5oSU6KR7Esb4+9dXALhu1hiv77/89l3IJxePlu4csBtCbGpYrRb7/T6PcnJ2QZiUhnf95u5N+NNf/cnXbzc/+PlPd/c3t9c3627x+NFZ2yw//80XT16cxAz3b4c/+3/802f5UStVfLS4+eY6Pz9//NOfI4LefgOenvzip5e/v00HVE2pj84TeBRvHj1bqY+llEXX9sPkvVVBFRCgOIE1olIkmzjlmIOv66applAM2rifdvfb+7vdD374CQgBowoYILJUUm6WlasxpVAb+vzrLz578ctHH52/erW9fTdpks+/+frHP/pQDSx/dAYh1Ytlu/X5rH396na58HXnvSPj/DQmIvG+unh8+vbN3dnji9/9i+uPH3eh75PQ08cuZNmH+Pz5er3w3077tfH71zfubBkPaRd361XjGlwt8O3bu7NF+/Wq+uJf3EeFs4tT+CsAAGdtyYW89cYNc0ASwpRGC2a5WE4BCOGwP9SLuvI2jGMsKccMaEjE2ibFHPMAGUS1kHhbV4YgiqtcVbW397tshkdna00JDa0X7c3VHahta2MYE6Ezatn0JQ3jBIrG+CxgfRVjqqqmblpX583t3bPHT/7y87/+6MMPioCI2gLsECQ1rRtDjmn0VVfXdX/Yq0qOMmAwZkNahj6Ro6qqShJrrJSSY2ybJk1BQNg6a7hIOIRxYSsFHca43+6WXQNOxiEYX93t7xfdxzkOOcrd/c425uNPnrz85joHyD4zAQCxqV3jSyEDlXWOddAcl111GEYAWjb1NoRcwFtvPKWcF22b8tKgG4b7nMtEQRViHAvgous0w5STs6bC0zgenOlmOuqQoyFjodGY+uleyuStAyjb/j6EvnLOVhYVKwey9Lv9VHREQxqlrmopBRFFSylCZMhw0bk+gdkrt5QCIFDQGM6lTCl6b6LIYrlAJMtMVEzgfoxA+le/ffvoIj95DMvzVZpiynnRdN52noQQShw0psN245njdICSThZdmHo2mmIEndaLR9v9thQkgZxzSqV2PucBwIaQpmHqLINofxgU+Wy1GIe8WrSr5TJOMU5JYhKBdlllLYchuBorBGsWy/Xi+vrKPz3Zb7ePPnj24ePz1y/fPn52HmPoui6mMWz3pyfrv/jL33z45O+0y0V/t3Gth4d+5mGE/Z2PCwIe6UNHOf9DYzNXlFpm40k9SingYQI+40YoooJHP2wVVDzaGsF7EQUgIh2DPo+Jn6ozXDUHn80BJSo413NH+YXMTPj5S1RRQY4TWhBmAkjjOI7DKEVyFlFIKaeQiFC1DIeRmKYsTmF9elJ5O4xvAOBkvZr6MY9ZF2dtu1yfnyCA0fLu9dtpPy6Xi7ZbffDhs+3Nvmmai0ePLt+82d1vpJ/KmEoaLcrpqgLR1dljg7rdDx+9eLS927aOS46fvngMgMZo4ynlaUrBerdetWRg0darpt3nndb+cjsAUsj5rKrPztvXr95Z4spRZWHZdo1zlkWMtRmMoZOTTkpOMhug0pRzZ1xKKXNerNqssWhBUPaO2LDhSaRetHHcG4KSo6sYAqzWp4fhCsGXOBlyxris0C3W5xnRdjlsc0mr5cJbnoZdxdVmOyJRn8Pzj87fvNnebfZRMCk4a8+6+n6zi2laLivJmvK43Y9fvgs/++jFo7PzN2++lZxTAZrJ/1IsK9e2COYsqpSLFJGjDFGTlJJikcICOeWUcvBV5euQrLfOW2uNdcZaZUuIygxIRMcA8rmlFpiz1N+36wpKMx6qKoj8XtrzvnV7fH726s2NdfjZD/+oqpsQJCl55+/v3+7711MapxSITN2srPd3l/dd1Ti7GtPh7mZDbE/Wi7Z237x8NYx9VXcgqgUq55kIAciwM66kBIZaR94yAJYsIYTK1ouuKSU4a0FFSqmN8cZITpILGheKyMyaz4mYclFCRFIpeQZiRSGWQorWChnyxhTNipBRDaFxvD30a+mA6XbYXW/fLpomjjvnCMrk/IkpkTACFmD8+uWbcZDF0pJByrxoTpzxOe2QoFuuxuEQp6DCwzgq5EKSSk6HsnCLmnhboPGd8/725r7kVC1TbRrJmQFZ5HRZ/fCTx//ZP/1rj3hxunx7e/+3f/bJf/7nX5am+4e/f/303P3hD+pPNqh9v3i+2m3NcDe+u99Khk+fNU/WTQ6DhU5yKqqIJpWMGQiI0cysEDamHw8rb5MkgAKECIZRoQwpQO2NcZ6Iyziw95pzjAngYLZoFgpmERBM3ZmSh+GaSE1NDlhCnPY9m4psTbZWKEguhqG2XimoWwCzIs12+zij4Q9AOOgRDvoeZef7j6Nn1/dgmgdHI/2XsJsHqElnphE8RCTDkQp6BEBBBY/Mz/Ldc5COohh4UNjIexbRTEXChz35AdqSAnCURhEqSAG0DCA678JCwPNVqCZCIDREJABMZIooK3tUi1ZInEkBNJuxqGHDBEwlTHvUXGUX094yY91SXZNdajLgChSj6AjFGqxrb6sKVfM0Uo6W1PmKjSklgxYtQdKIklAyA1hGRQRlVTHMgE6QCIHUaDElF8KsRUrKSloKsCIiAdvZYlglz8gdEgEIaUHJgDw7BIDOWBfOpmmYg5Zxtr5+7z0EklAzCJqcQbMgIyIwqLKAohZimKmMIDK7WKtknD39RFVnkyAQKZozAKoUzUVEVDELpCySNRfNGbLM9jhFVQWSoJIVINEiiJyhKEIhTiAqQsiliAoAWymkpRCDIoJxSoAqgirgREkIlUFBEAAFETKJxVRQMkiE4/VkZIs86xnnkJeHdQgABEB0tAdCBBEsGWdNMZDaGrkC23JJWCWNCVPg0GvqcxpLzCBJJCsIGkR27CuwFtgLmkIsxpKxQk5MJURCDOwFUIGJLRrPrkLr2BhiQ/M1qKqqsYaJBNmYGUH9jg6tAPw9Hej7b+TBqwiOMrhZglZkNiGaCYDz4EtpztfA4/2JhHg0vzJIcAxjgCO+No/Ijqmcs6u2qmhBpFKKoli2CEjGkKX29IxEUIJzvF41mPtNmJCd86SUkub97v40y/Z6u7vZ5zTvP6xYYulLCggwDoOrq+EQkEvKSZEJMIyjYakclzSG/YGbtiTp6how5Zxjjl1VSZKbq3dPn50ctocy9obg7PTEGByng7Vm1TYiRAgCIZasQofdUDd1xfVNvFxX0lqKKT46bV+/fvPi+Wf7/jAeRlNEDun07Ow+H1Ia2saAapomkXxyur7bbRkMGUwybe/fOVO3datgwnQYxxvvyCtOY3r29DkY2MDd2elqe7WvqWnrjqRUlp3hkKBqTu62hycnbSwgErt2cb3ZpSkuWm8Ic4xGJfR961smG8KeDBMkEuyM2bx7e6PhYnWyv+8dVVgaNNZZ510z7nfNyVIhaRaJysQL317fv3r6/CLlfHd1d/bhx5YdKuQBcoiAMoTkaxeSlphPF9XdJcQi1tspJsmlbquQcOyLRNWcCMlbTrkgQpF8HGolRQHmmbGppZSoajxJhjQl6y0gOGukSE6CR6N0JSJjnEhSJCYWEcOOGAAFZ9YNAPEcCQkIYL/LSuOH2TCICD0koI37Q57S/eVr9rDd3eW78lG3fvPV5znHNJUxHRYn5yjm/nYMOTAkAJpSwH3uQ98sVmmS/W4/yp3rXOXbZV3tti8Xw/SDp8/QjDn1r774nbjm4tEFF1ky5cOhItes1/uwG/M0xaldtn1IYcjgzKiHj86eXb59w0xJ7DANy26FTFCyt5YBGVmLMGKK2bAVQDLGsFMlJKiMt9bGkq+uX7359uXFk+fL1Vm3bNMQY0QBLTmKqrV15c4guVSmuqkAhdg6Kf3m1q+7RXWyv73znqGqSGkapjBOOSUiPnm0sDVfvXzVHPjs1McdmFY/++mKsDx6fr69H2110sPu6ZN2Ufm4L9P9WDer1cIcwmAJ436v4zjpxNZ+9qOP1mfryy+/evbBkylst4OcP/mAkftDGsdxgjKNEyhWVZuG3tpDKMMf/OoP79717XJNN4dpOvzui7/++JPnlbG7m42I/PTnT0Ps91P41d/6Rfzmav3Bxf/h//x/+vf/nf/l6N6d/PTZ/fXb4e5bJds+Pdt++w7IPfqD890bf3u1Owy7j375k5PzBVKRXEA0xkxoUTGFkESVXbdocoyrtgIARiRjQEFyYRGBUC+c9Y+Wq4s5IAENgqgzPB1CKWKcvb25b9pWif+NX/5hv+1dhx99uMpjTkg//vgFiH71xfXJk5NnT5uXb+9O1nWM8ey0W6zqFGMOeRrl5Ve3H3xyPt6nm1fXjz5up/7w7PnKsNbL8/0uv3t3CEO+242fnK+mnBaVe3beff3NdY22qZUql3I+XI1Df3jx4cX//T/95x//ya+uSv713/urVO6PR4VkZ00IOabADGOZuBhkZuL9sEsAra+qtkIUIEQgymjZ3e02Tx+fhjAhGMSq84s3d9do0XlJJZyvzw7DNoTJmMqwc84dpoCi/WEkcr5qi+Rx6EVEhEBIAbx1Ywzvrt+B0mLRMltf0c3tjSI4695dX5+fXQCCq6oQoq/rfrebp8pERiXu7neSi69s3TrkYXfo62TZOtaEVnORw2aonHOO5pqSDJVcpMg4RN+Y09VZSEWSVt503cJ5K0WnEM9PVye6FJ3qzvPEIhoOSdKwbFtpC0gxzlkpzpv+sAMENCRQjjQU8mglTMEooSKplhLnOVvKsSgUSWiYhIgMEJSkYRiZoKmqQGk6BMZGAfuwL1gcVUW0oWqY9iUM47hbtAsDOo73tffaaL8f2kWzaJ23NORsjBnHAUmtoUyoitYYBADCkBIhZs0q4q3LKeZSrOG5sCilkEhOsR/RWnZtrSKQxbgKSC5fXdbOkyFB7fvD+vG67pr9vi+AMQYGmMIYD4OBsvLAOYehXy5WYRjDOJWciWFVN8O0TTlolmkYnSXj3X44TKm/2b0zAK6yjlFQFaBAOukWw/XOI8TDLrhqOEzLpSeF26sbLc3g8+MXjbFu6Mdm0SzX3fXlu9Xpent98/Enj//sH/9lE8+ati4lWKS7q8362SMx/h/8k3/6t//kJ+wsWXq4C+Y+AFX0QTX20I6AzHNbOKq8VKTM7bgezTiO0jURmWul7yX9zJNvUVUFmQkgqkd60QMCBXPGyYwdwXEygkcDornnAoH3PdFcp70318CZWg9zDomIxBSHoT/sD4jovI2pkIpxJqcESGSIiQzxYtWC6m6zr2vfLVuRHMfcNovV+kKRnTXh0N/ebQzr42eP1+sTJO770LQ1kN7dXm2u32HOzx4/8pw5jwsqj06X3rbbfusren7x6M3rTaPYLmrJpqnd3Xb79Pz0dLG8229PFm1l3ePT9faw7xbN4TA0lR9DilkAsGl8VVfG2drZirzm5K2prSEFktL5Oh1SzBm0vDhb3m33gwpaJDbWoDWEYogJBXPKc9I8AjAQsyt5cq7OkihNq5N1H3tibZtujCIABOptNaZJsLp4+qO3h/HZ8+ff/P4v6sobA662tVuML6+HPg4xgyVfuVXX3G8PMUukqFC/ePp0e3erSSyhIbaG0yRfvXz3/KQ9P1nGsYecEYshTgoImrOKYv7/kfUnwZZtW3omNIo55yp3dSo/Xtz61fEiXhRPCmWEQEaKTDPMMIwEZGSSBgktOmQDM4wWDWhAmz5NOhgNDMNSJBhKUwokpJAU1Qu9iFfd0q+Xp9zFquacYwwaax+/NxPv+LnXzzl7+/G15przH////c5ENajZsYbYVGawfpask0qOSXLIcUpTEXwoysKHEIJ3PrhQOOfYeTxG0h56g2a+NRxpWQ/X4sMJfDZlfJt+BQAAi2pBevXJh99Z1s0wqeW82Zx0/R4YpjTtu/3MPXWO+8O+aSubdHd/O+VJAeqqZee/ev61qJxuTvpxUjMVRYPCewAiz56dI6emoQiOAJFGnZqyKkNhoE1dmcI4jm1Zl0SA0vVTICPmQJxVVYUBAMETMjFani14ohmQTBCZ8KFkxysllWSqAGOMCBA1b9pquNu/evli+cn7adjtRT04RkxxSoELwu3+7uXrLxbr91FNMzhXF0WpEvth8GVrSkOcuqEvuNjvO2MyxcN2DLVvl/Wuvw9FIOZD32+3XV03oBpKr5KZ3GIZWps2kH73vcvfvLx6ebcLnv/xn3+ORD7ABMW//MXrqY8//OjJNMmfvn2tistNWVC1Kh0YjuNYF83cf5glE0BMkrMisgqgHHURRw7ZBxeUCkMCwL7vC9dM05imjlrPRemQYhqnnEOglNL+9qZWbDfBhTpPwEVTQ4w5wzgQhdKH2I1QeGbPvvWlA2RyzgXH3swm0GCzQ+CoBMCDEP4N/eed9INzkhffHR3xWxjrB+fRu18PzqFvbEX47sN3K/RRWjomX44B4ofKG0RFQHi3Itsxj/YQRgN7sBR965VpfsOqggqmCMKIimCWCZkxIBKCU1MEoOP7J0AFyTxXzyl4IjaqkRZAg1oyADRmNzOLMuiUJ0PiwCIoSXJ240gz4qUoSsdUkNecc8ogqnEKCK7y85RCNGnsTUaQniyyQzI8FhRInnOZxATICGSKREws5pKmhKgqhuRMAY6IY0MgZAcz4hsFwRBtrkrAI6ucEBwggSbIooTAMzabcNZWTEwyghohCIJGQEJCy4peyTwgYIYZEwQPihGoqMm7f4I5lobHrtB5lgWmpoh6JCmDKEbBrJZVUPMcexJQJ2IW2RWGSI6NAJAF8FgLiqRzmsqAkZDAEEMQxwTOjFmjCDA6JkAkIwQjNBN0QqpgYnlUUzQ7RquQj0G1nFEUUQFIDcE5YzFBmHsHcrQULSeY81rmBZ3M7w29oAA60AwWUZNhBmY0I3DsCV2FZQ2hAAoKThERGTkAOkVnrhAEBadgCMzkiZwjZueMeHb26DtzBDMz83F3MXd02IMWdHxUzPygd5LXNwG0/I1WdNx1qCoiEJDYw2wDgJCNFACZHSA4QsLZWc34oE3N5itmmtvQEEzNjkLSLDGTV2BGCEXpHKMMMo11VRYhMPB46KeuNw3eFYdhcsFSBkTudt2h69OYy9LFmAC5KOvdNjKEOGoSMcsx98OwK6oTP1niyWEOxGN/mO5uA/icx6puXB1KlHF/ONksb262U0y3d7uzx6dFASa0mwSdVY0+e3K6fbXtp3jY7denJ8si/OaLNynbx48/evPq8ylPZbGuq/LL6zvfu+Cb/X6nllNOIbiiLI2oKAqV4yayWTSHXR+Kin0vCTMk8rpZLXd3h1AW6IWSls5j4OCxXhT7/dW2i+QwRHeYMpl8/eWXTzbrcRr2pjP7s3DYDQdwPqWJJ2IEZBLJ8zEXTDxVlCcqtK4qMJRkw9izg7Zq9rsRQk2hAtCLR+evX78KTNt+WLb1MI6OYN2cTocbMBrGFHixvd6lUe+vbqbDwYU6LFabR+fub6hum1c31wS0Wld3b28DFqhKQGLgnOu6KZQpid3c3S1OTm/3w9QPomqzA0iNjusHgqhIL6AYSiRS1SSATJYhqyAjAprxDCkSFaZZfMzk3GzHR0fAoKiO0BCJkJkNUdWQ5iikZ3ZmKpYNTdU8ExJ/y9gfHZpXrVent0N/8uiTm9sXjXMNBaHihxc/evHLT3PN0/ZthHRx9ujJxfkXX94T8ub0EjBMOpWLynpty/LF/urq/u13Lx9hiF755u2uWSG7doRQt5vh6kZS2pxt9oft/ZaGtF8s65YBFhVPFfRjN2w3y6YpyCHE4XD27PHh/q53y9ZjTLldtLeH28ebhWZOFonZUw3WM3lTI2DmIqZYVIuni4XadPv66sWLL7/87NeFd48enS8XK+dKdlS60sCrKlBThOU4dDlvfcFlWRx2h6F/vTh5UsXTNN0iGrIiU6ir8bBX1fu3L4tFYz28PcTiu0/++pef/d3/6r+V+l40MvrNuuy77le/+IvNJz+WennxyaXkfHsd1+dn+69+9eu/+dX3vvNDNUs51ku37e4G7hziJPzko+/K17fDbsxJpn7anLVQ4GGbQlhIL1evn19envQ9/vrPv3rx+W8++OiDxbotivz9H33w+HL1s3/92eXJ4+vr9OiRffXr62pRb+9v/uH/9f/0P/wf/Hf/W//Bf1R/8uyrfP/0g4/efPFZe7F+9vt/8It/8enqldNF/fpOUtJV/Zi4jgOkHA+7w8X7Z6BqDAgoIuRDPMjC+dhH5y14PHRjWTgHHMeoTva3hxACL5C4Ip8kxbu3+5OTUxmnX/7q84+/9wMOzMEvNxsADFDYIb19cygv2raQzz9/e3mxXJ8sTOjZI357b/0i7257suKsdYddjlNfFrS/6SHQ2fni9Lz61VfXd6/Gx++1Q9LtoZPKN4Sv377+8L3z9aLdj77vJ4eFDfGLX7+s63K86fto7dnCjK7e3g77IdmdLwLm+7/+l/+Xsxb+zRfX810wDdMEERFEBICcrxwwuiKmyXkmUySQMaOj7tA3TVOEkJAuzh/td9uUUlnRolnvDnsmLasGNJchdGPHrnTs+/1kSW53N1VZqAiLolpwvO9HdJ40SxJgY0dMbl1UQ6iGbjBQdrhYLDQpIXl24zhwoDxEzewQpmlkZlJIScUE1ECtqoqsutsPDti7ahwysg3jsCoWDAjEwASBNcv17X0oiqopHRGOM98Vt7v7k+UGUNlBSqnr46pdMOYi0P3VtlmVrmBil/spOO7iCJiWy/Xt7XaKEzJOU6wJc855HDz5qGnsD0VVSwbLqfJBxEAFRcEUmQR8Nw7B+9K7KNmyek/z3D/HuKorCO7uri+C74bRFWE4ZEYT2o/DvQkXvpFsw7izpOwojrEuQhUKFkUHXT82yzNfRtK362Wzu98hGDMDQBIldsyIYMlUTck5lsTMc79MVkXv2RU++BgTpMyE5yfrq9ubuE+LxSIlWZ6fffCj78nQHcZxO06VK/KUdMqx70GzI+u6LQTebwcf/K7fj+PQeu8YppgPOU05BQ4ZwUzIV6JdjBK4ZuApDd6H7jCA58WmoayHwzaNY+2L999/7+f/5suM6emTZ189f7m9vf/w0d/75VdfXjx+RMQAIpLqqrh9veONkzgs1+vN2cn99fak9vtp/+zZ0z/52W9Onl6ePTp/9eL19f10eeKv3zx/dyx50G7wQSECeDh0IJCqgBkSfUsHmkfUMh/SHsbUD8fv44YM0AzoHWDI9KE97XhYMTQEmvHE83Z53sJ/kyLCh/c2/29DO6pXDzrSbHTCnCXFnES7fhhj5MJT8Eg8xWmWqObjnPNORcumQiKRJKIE3G0Hz2FzsX7vg2dq4jwE1Dc3V+PQlU34zg+++/bldQjVar3q+j7KuLu6Q9Fq0TDYfndTOmqWTeHCbnt/cXEePGcRBFssy4vLZtj2aNiWZeWdQ3KZWvTnqyVqrr2b+m6zrg/deLft4pjKIiy8b4pi0bjaUcHQrBZ1SU3D5FxbrquqGBuzKQ2DhALb0jNTzLpqG7Ukam3b5DR6EFc2LhTsA7BzvmDoFbGol2w5JQVBcxQTLFcX6f4KhKY8qExVWUGgRL3xDsh/97vfff36msCyjOjdj3/8/Z//6rO8HWJK07A/P1uVge53w/1hEAGHtNksskia4jD02fuqLOOUd/vDk9OiXbeGNMUxTdEZMoBjE81OQQ0kJ1BQh2ameuz9AYOsCmJxUhUWEXEp5+SDjyE4532YiuCdD8QuM7NzzDP3mueuvmPK7CGXNnuL5qtmpkXMuOj515ubKwfp4uxRU9aHsfMTNsv1OA2+CCmlvuumOJUB66rIU66aMEz9MO6c823bFoW7u74CABd8Pw6SRcGqunFMjrgsi5TNkUsxB4chYJxG511dVsUM4kVrmvJmfw+aNoulN5lyTM5z5TIoggKziVSFjypZ1Qi9Y0MUIFJBIiNTgGhKpuMUS6YM6lxhpqh00a5WwDBNMek4ooqJwbDtm8UqpRE0EQcgff36NUPZlAsCDnVQEV8W+RDZyLkiDbHvRhFqV5vrN6+o8J5c3+1X68VAMOTcNityxd3LrwvfgBmoaRYwU7QM/Pjy4stXV0+fPHl5u7tYnvzsl19sAjeF297sq6rZZ/qnv3ipvvjdT559uu0/vbqZKv7OB0+Ht29iVACCEg9TqsmFUB/utzBTdU1zVslTEQof3DCCd44MhJSdZTBJFhw7KigEM0RiYguuFjG0ETmr6HTYF+ypBVcu06iKAQHiOEnuVquToq5C2ZgvlWtkNIuIgp6ZgxqBKnNhM7XmIQr7DcLtG6/O0cH2jZlhljJhdgLZt4i63/r8bylH79Jr87fHY0rgnTFpXj+P+vsMA54/BjsCr+c6ZDg6ieYvIzQx/C++kAmRm/k+gIbAMxAewPToUzI2ZVQzAYSjgmBKx2+kZsJIIFoClgpBrTcwxZylqioFcOwINTDXZSiDr9saEVSy5Tz1naXMhBZDDqEskgNwgOQJsgKaicLUYRwgDZQjocwgZGKePWZqiOxn0weAqhkig2NkBnaenWaRJCaoOeH8d8rKnvHIKgIzcXOSbx5/zlYjQxIhAxNTZgKH7JC9oQObUAfQNKc+gDyAISiIIhpEQZx/bACz9REMUc3y8d/dMuDsLjqmA81As4iAKGQBiWJZQSynnMWOerqaSALVpGhIU8xEgCkCIhG4gGAgOheSmhAZYMac0EVDJHSOZkBjMGYYMVBCJHBZgcylrOjYnNM8gko2wHkDAEjew3yQBSBVmmc8NgECA4MgEAGygdn8XlVNMljWebXCYIgCqjpfKRlUTFSzATKAsg9ATFVtxcJCBb4AcnOf3THURoToFZ3NtU1zBH6uYZXsjwLbjNRCYsfMZAYqM/R6Fk/Z1FSZ2RAYcO5vhXkn8m2paJbs5qXs4Y6eayltzivOfHIkYkQ1PQpSgERMNG875p2RIjIB0zF1elwrCGd/EtJx0MEmSkSSE2qacgxEDsKiqRk1T5OZb1bnw/310I00THXZytjfX98e+qRAajp2Y7uuoooJFt7f7faStCyYNBcOHSbwgD6r5KyOyedxwDSUJXRdV9VFCE4rAkqnl8tDp4f9IWdftU3uInZ9XdeGzf11NwzZiIqmjZL73X51XkC10Ioz2tPLJz7wNMZN2Sx8HQPd3NxvzhpiTIMV6EVpFHVZIVqzalPqy4KnfleQtss1moSCQoE+EELOmct6VRThzfXrvkMzq+sqBGvq8rA7iEQDrOry/OTs1fUrAlBlyQIAaZSiKR0Hhx5TQgPniqHvg/Oq2RcWIUFSIE5ZeDZGqmbOSej1m7vzy0c5TwF1uWz7Se+3h8LPErsSIwKNUx7iyJ6GfjShnMc47Yu66KauOF+0F+vhettWbZqE2PXj4MjKJgx7SUkcYXDOse+nMQ2JiuSDcwNn0Jlu5dCjmGRhR0Azdk00jogUfMhqMqbCCkAygBCCGjF7VZ0dbPP0VlVDKICAZ5vfvGrh3EADRITuGI9XldnSH5yHozEQ7QGUBwACEOpisVrd7nZJ06G7rjkkwWoRut3+5dufP/noUTfR7X14cvksT3x3M4nYzf3bZ8+evPj6ZuT0nU8eyZuBGcoKd1E74OWySjYQOgt8cKnncNXFR+uNpnFzsbnvdtvd7clJeXvzuvLN0hVxP0777cX58v5+f3d73dSn/dR/9vlXlb9chLO2bkvmLFIWTZ4k5RhKL0kAEJ03gpwy+8L5ikNtIojmXXj64ebZxz+SGLf3bw+7+5vbKwNYrxdIWLiQpjGrEhcQQmF1P9yTs1AFxXTo3/gy5FxqJucJOSNJvarfvPi86589/v6jstlc3V+dXKz/4I9/LyW7upo4wNvfvPzgow0B/Xf+vX/31RfX//rXX//uuqqRyqb+6uuvHz26cISDQrW5MIO6rvtuu327S5HXRfOX/+qv6mUrauV6tVwtkkw3r6/QwvnF4/6+u3z2pGqZFrwb7Ic//V2JGR0r2M/+4ufjR5+cn5w9eu9R2O/Rwfpi4Zj67fg//1/9L+9vX/1n//d/9Dvjj8U13STnf+un28Pr132+/PEPOdFtGC5+xHq9evk3rz740elvvn716OnGlYub60myrjeh79MQ0/lZvW6Dd6YSDajvMhjnrCpw2PVNU61PT9Vkd38IFYEZejp7emYRRN0HH/1wcRKmId+93S82xW4/1lUZI108O+WaLacPPjkPwd1u+243nq+XIxwuH5+UNedJnPfWxWZVpZSa083JpX/15f32bjo5W7ZNy0ELp09Om37bJ7CLZ2sL9Orltmz09FHT7xLVBUahwoVF+2/+9FO7lg8+WJ9dnsF50w368v/9i8PNm3/w3/53/uzP/tl73eNPfwMA4AtOUZp2cegOhQsGOHadqkSZGtcWRTnsO7VctwuzCoiazfLFy5cOXSjKUSApDFPSKASMCnGKTByHIVShqiqssq9LypMRgjkOwaNc3V2nHJumbetVF2/qsswa+8NhngmHAgvv8hRvb+68C6HgNE1jGtp2UVWLN69uM+aVa1fLRX/oiI/NJOMwZLCoNo1TXZbtqs1xNLCiLacUxyHGNLTNugz1bddXbdt1PRcOAgFqSilLIsAsacriGIvCYz8C23K9+PSzz58++fjFq6+bZSCS3X439qQGlmXqr7quJxQugwIaIRVVkrRat29f7UvvDQWQ2DMRiUbBDApRVQ1FZa5szVnNYBwnHwoDjTEnggS5qSrnXYwjgU1pDHWdcgJNSAQG5DCl6Iqiy9htt459YMIshro8PymaXTfE9x5VBdO2Hwrv4hRnUQNUCPA4viTMambK8zuRHIJz5AAsTbFY1XHMu+3IaFOfxjT0u/5k3QoxFeHT5y8bpEePTlzhY8xFwdOhn7pD2/o09oxyc33fdWm5rIlxtWrv7ncguSqb/W6HAFxoTpNniGnoui5nLQtOcSKAtizvb3dlUVpWy9L3U7MoxfjzL16+ubkRTOaGyrv1uv189/LAOVIe+hGRLErZlvVmkefz6r778P1n//xf/dUH5x98+eKzy81ZGzjd788u19vb9s9/9qv/2h//1uriHN5tiY5jbD2GF2w+8MzHmIfxtgEAqMoxAmbHJw/gbCmi2cV6PI2YgamagiIdaUQPZ57jGQpnoUdV5oPObAnRh3iEvcvBffv09MBIegAGgEjOImqoBuM4Df0Yo6iyKUuawwgI3iFCziopg6EkhRJXp4v9rh+7abVepTGvV6vApWRbbRbDME1pqpfLtm4Pu6moqrpdGdPd7hY1oUjbVJvVZnf/FgOcnp6kMaLRo0cXy81qe3fb9V1TFWUdiByC5pSWbbtoF9M0lcHVtSdn/aFDdFUIKcPL290gWga3rot1VXn2eZTzZtEuiphyMFyF4J13Dgmk8lg3C7jHlBN51xae1Q1TdKCTaIE09Qktnm4WPviccxVYNJkJE00aF6v27uZut9tWi5ODHgIEg3uFWJRVSlNJBYmvq8LDqr+7hTKsNuuyqKZputt3kcP3fvTh8Gc/u0vRl8Xb2wOahYIv69X+MKY8bfe5KatlXXVdT6aikXzBjrfbvqnLoqmpCKEQSxOmySRjVgBAVXZs+uCKOO7QwdRmiGgWU9EoUTmnGMm5EPxcPTAx+6LwYXYYeRcCkvNuzozMpWezwQMRHlIkxwikfisLOV9e+OOf/PT89PzQdWXdUslZzTHpFLvtVnNiAEcAMzJ/nCYZJeeyKFDzYbsb+8EVntmBGjeViIaiMFDNycgxY07qOFSB2SOhFr4ERUh50TaMOMXRxvx4cxqKojvckqZVXe8Po6ZcFgWKjSmtm2bbHxhxPqdlVSSe05uMIGqo4BjJsC5c2scCMSWrAn7n0el+e1825e6Qh4QpqSbtttu2qmKa2to5ht3+rpvian25bFcCzA6TJEPp02CEnrA/dMSuIDdO4zj1y7rRoUOZ1svq9fMXm5MPAKwbDmMambQqfdboC5fHNN/4q5OT5Wb1yy/uqlB//er6cl2flp4QrRumcZoCBl/+5vl1Oow/fO+9ksIX+8Nnb26WzI8cIWCM0Qlm5hB8GlJKyXkPaIaQRDMYI/iyEWS1o83QBe9dcXf9an16wi5Mgy0aTlNfNjVWbhgyQ4rTkAWCC0m4pNqVi5wnUjMxgTylqSoCevOLVRQw79jQhcqMJdvcWmQmiAyogGigYAD4Dq58XD8fPELHxe/IkoYHF9BDoOv/TzA6eii/pRcdudQPMvmxcOAbmyU+LK3HHKY9KBA4v9AxADx/wTtzxTvnEwATAcymh/n7KpmaKREhMJiCTogGR/swgSkcbSSomgnAuUJydIjsqHFUO+iTRVBmJ5KRGQHQgA0qX9ZVXZUtBY/Ok+OcckowieVJoDJP5D37wPOwPMWIYJaFTE0ykzECmrL3QKymKIhGCoYAkpKIgBg7j84hMuLsOFHTSU0RUbMhoGNHoGaJEXFOV88umOMohBSBVNEhQLbj3IHRBSMGNEIzSArKx0xSengCylwzf0SGqAISqOADYhkkAx1RNzNRw8xMQMUsm2RLMauYquUUU845pZgtqSpozlklZhHDYtJE6NQiQGJCR5iTElAWPWrjzKKmFCKwIgNB4ZiZCigTSAaaNCEQ5LFwHBN5UnBoTIioKkwMpkyO2M/wZhRhZNBkRyXSjj0C6Oa/DwKqqEo2FdWsmhTRjBSnLCYAWQwMNUeTqCpqBMSIhogcKuEAoVUXzBWIrIY2C52EyAzAADxrqQQgZiISU1Zko4jEwOI9gR1JQQiQ1VTFOSBE7xjn2xXQjraeo8H53fn4wVWUxd4FQ2dNShXM6KFUwJDnmxHJPIWZE2xAiDj7qIlozo4SAqLNNxUhIhoiECEgM7EhP0zIFDRJnCSP6/XCe1it27JuU7wf+1GUh2TdMHWHsSoMg2WZ9tv7LDF4RyBZtQjlOIxjyhicoCKDYnI1pYNZngDFB4DkUGxZtGMc8nQQHwQ9TyYMSc05ISTQYbNaDsOIQL70VY865cDVrhsc+rIpFbUqeXi1KxnrIP3NSxtzaIIhmMX3Li/fvHm73+/YM5A2hUdDT0XO3WZRgkC/6xd1200xp+hNvK829fLzr9+cnC22h3tkKH0w727v74ZdqnxgwJhzN/Q+lCerxeH1zjmqaqfDeDe8XKxDTFrVze1957kouZAsgCAqzhOoOAYicOwFlZyaahIJgeciPlNSEJAUuMrTmMaYyaZxLOum8nmIli15hyn1kveIkchNYyLTahmWi3rXd/Gwh3bZxwSeseQpp5jUNE/DUFZO47heV9uhswQpZkRMOZWlG5J23aFp6r7PSDyJxKyMpJqYkQjmqjua11Z2BgoYAFDU5k4DSSYqwIQApmpMjITAyIDkAAQAeJ6egc7tjgB2/AAt6RjY01wESIREM4sLCfkhgHbYHXixOD3dXO225+tTn6bufiDX5DIkV/SHvmy1j9kVJ1NP/XaMpeMiXC4uXF1kZ1Xg4bAPRdhvr/r762XVvtlu64/fH4aRC7jb377/3pPXd9Tv+n7hy8C//OLX04STq605gxRUfb/PDv2i2bCa7QB5OTg/8P58fXL18vps9X5Zl4ebvSMGyUw4aWYuTBVUmZkdg2azATQgLxHF0iBJM47E6FxYnlyenD3VnNOwv7l9QRCxLDwEciSAzrvSL4Bh2N8S09B3JeokpYyoAOioXqyH/h4pLNvHcsD+9fXX919pSfF+Wl88+dlf/fwnf/QjIg25WrbuX/zVn/7xH/zwurv66d/5bUj5q6+vPrh8r4DgXeVLD0nvb26fPn329vlnVzdfPH7y8bI9D1iUnslNmEtLXK5lirhYnE87efPirvLBe3z56vXm0cl61a4W7atPP4cxUm5++6MfXT5+cn399suvXhtmGGGM6enTR//0579OB/j6+k9//Hsfe50uP37sGne4o6v9YnrZ//SHF88/+2Lx2+c3b3N6ce0p05IfnS+Wy1IRru6nsimAOdRctaEIdHtzQNTVqpjGFMoCkPv7MYTi4uxcs4mIZFk0LQU3dmNRlAYgJOWqJoEXr/cnm/Lrm7vfOf+Qs7z5avfm+vajj09Oq8UUBQEcuoKK+rz0DG1ZXL3aE8B+1+dWTs/qtnW/+eX9dp9cc8GefSBk+Ku/+PM/WP7+1d3d+jIEKoJ3t939pqqbEKCQaYrb2/2qbVcnvhumpLvLdTFASpPs9ttFmaWbTpLef/H6Cznrh2pK5XwXzEU74zSGopj6Qwglo27Wizc3UaN1eYfGiOHuMKF6h7K9u1+3LYjthz4gBheKUKAhjUMoy0MaSx+KUACk2B3IeN8dNhenh8PQDYMRTjE5F5iCB9/v93lMwlU0FEVTqxv2xFmsKop+GFLMrliQC1VdScx3276oAhtDtkN/QIR5N9P3gwoQmKkVweecccp9N9Z1XRX1/f6GMWwW1bTtpn4sy5BF933H7GRCM4xjLKvmbLUSpOAwDgNXBfI4dvHq1e3T84vPPv1FVS1222HsxvVmMY3T2PeLZZNz9N5tVue/+s3fLBebqICWmqbohwOAEVaMMI7btnIMULKjshjjZEDJAMRw3tsSO3RNw6JCiEXhu/2kICmPdVlQ4Q7dCJAcWpTUD9FRoahDv2vLAiUNhy2Sk2ks27BZtzdXrzzLwlPsxtxTW/txFBe8GiQRUynKQkVEhIiCK2Oa1MyIJAszA1Jg5xkc2mbRVGXRLqt20Tx/8/pk1SLRmzf3k+T1Cb7/+FFhdnd975uKMmrtZezKQl6/uY2HUSSPUyyLQtXI9PbmZttNjv0wbnPMnlhyD6iH/cGX1TjqKAZokPLTJ5fXu/3dbvze5TPnU4xpv+9cmYeoSYlQbm9uh3H37Mkl9VHc/Y/+9u9zWR22+9PNuaXIwS2Xbc7JVHPW5my9WBX3/f4nP/rhX//8V9/7zo9+88Wni3W1Wq+vX1x//fL69/7gw3dK0Tc+nqMshKamIAD2DjtNdqzcmje6cHT0zAcRNDw2x9I3Y+pvUQweZuHzSceOXA8zUzR4mLvOp5n5+GTvvsRUEPkoEx19ILOOpGYmImaa5hGmGXtXFEVRVm3b1M1h2qUYs4ocFSxHVVWdnZ4C4N19D2JlUaxPVu1ycXF6IZNUm0YNvn7xYr87tIt1WVb9NK43q/X52de/+SwPh7YIZR0YxxxvSwYuSxQnKbbLtj3ZhOC6w11JVDcFkeShb0NhzBlJZHRobRNCVex2+7ouAGm3l7c3N/1hOl3WJPb+481+u19drNplu92Nq2V96KZhyOfnZ7c39yfr0yl2MnFRwFjiXW9xkrLw56ebL149/+Txs89evRW1bGBAbVNJTnW9ccSmWhZeUxjACFzhipu7V03dMokv6rptD7t75wsFMEuFo2E4+OAX6+XY98Fxkr6s61Uou35ktT/+o5/85rPPn1/3twca+l4nJQDvUFWGLkpKUnjH0JQBQCX1XZfKthiniNwbzpNX8r4AmjteQQTIyGy+DcBmGqsagIVgqpoFsmJWA9CcBVVEEpILyXvnfErsvPNj8N6H4L1XH4jnHLEzRsQjfpXnUmcwRMZ3p+qHC/Ti5Ol3Pv5euyiROKPRaH1MLriUpxhHQ0kpzrqz88X9do+oOeXg/eGwlZSc56auRTUUHtENMYpmA3TOmVlKCdXKxWLZ1tPUVWG5qNvt4T6U1Waz3m13ZlY7d75uuzjG4EBLIjwcekArgusPg2Ooq6KPIxOImSdSBQOrAmdRRGKGrKqMyRRUn5ytX94fmsL3cSJIDFOz2Axvt0O/H4ZV5bApitT36CBDSQBXb67JNauTi6KuFUhi3zhPKXXdoaiKjHJ/uKN6UfrV7d3b/TSchTZrptKbEuZch0pFcxwAZLmpp8MQtEhTZPIuFGIayuJ7Hz755Wdvi5P2ze0b5BDqgCTTPm33MTA+uyj3u/iZHHp6+dOffNT9+eejqhxG5qIqGQ0IaYxQop9iBCA1CYyWIhBpFg0WyqoIraQUVREYXaFRmAIB1e1pikRFWXoDUSBvWCBIcDbGaez3JbvY34msvF/kKSFpsQhTyofdnqwpWlc2DYK6sAqhIvAgRnPXuJutIAJGD/Uyx/VpNhjAN0vZbKV8BymaO89mmeedovSwtD4oRPguzDZ/g4eA7vwiD5LUu0/D/5K8/m5Zn+8tNJ3F9uPR+EEkerdQF97Nd4VmRSBAEFMiN6sdCEpmZhMiIHpEJGRTUFAFRPI2I1YdM0GSVJahUmNJKEqGBJTjqFgUofSOkMiFolgs/WIJ5FSBstmULI+I7MgVvqoLMBRgE8tIiERsjgwJZ2qyuBDQeXUBDSEnVEXJKY6SLEYBACfJic0FggggwMgBczQTYgQTQAUQBjNJRKwPOcIHLxWamoOY1TgwkUdiYFZiQAOLYIkAkYLOE5SZtmeCYEcFDQxUUA1AwBRA50IGMDUBQD0qeaZqHsQgiamBoQFmkRjHlPIYUxaLalPOYCqSRUXExFKa0WxGM7laSYNiUgMlhRngLwCgZMlY0ZHnZKbKiInVYRb1kMVqT4KpCMFIJIE6JCBPCExHdwMYQCacmOeHPM84K0BU0zm8qCowd8fnBKYiSTWZqQCapKRJZ9K4ZDVSnSdFaOCymIEDUI6CZjqOGmYyIesx7EjovCEAk5opoElGFTAAcALATJKQPXrnCMAICEFF5qCYIhgYeY/HFj981485d9fDA0zoG6kIEEDB0I7DLpg/RgRS06M7CJHJISiAMbGaIBgRz3RiQ6N5RoHH8QUf83HIxHCUjvHYzSkTqpiJSfTe5RTLsirrUm3cb69NFUD7/m67OxDTYX9Yruv7q20cR0YhAiaO6WA4hwgh5gnQLKcpS1mGUDaWldSKOqQ+d1NfBcHACcCS5jw4wLIom2Ix9IeiclVZ9YcxR92PKXhXNw17lMHWq8356enPf/WzVd1sh1y7xqObrqa2qouLC3A45Yjo3l7fHIZ91rQ6WRdVsd9tKy4Wi/rwZhvILzen97c3KY8g4J1frtqbbbef+qoquy5VdWs6dYeD9IoCHLFqqkAgXb9eLeKU7u9vLSYynobEAmnMTVFnHfM4LZsaFS0lBOunrijQeciiUSN7lyCSehTwzjMxoRPpFdmH0tCmOEieCFljaleLsY9pGNmRY0tJyVEoF9moqIKiFXVxc7g/9adTTN122F3fnz96XBZV7CbZx/5+8s5PU7zbdk+a9ThlAHaMGMUFGoaJDMycGvT7yTuerwYW8wiARt6pZlOdlWlVY/amlqaMWDjnY4qz5BklOscIhgbs+NhsNlesgiKgGcpcXoBIJkjovFMVQCBmRAKarXcKAKrq2M3lhu+eRqq4vz+MO2VHpceC6tHljG7axSHJyfpifzi0zRoTkuO6CebBB4pDbwOeBG+ih5tDaNqTs8fTISfFboBPX15/fHm5f/kqVIiBWkEFZM5Djvd3txk9Onn1Qh+/98Ht11+L5tOzM1Bij0+fLF+9uCmC2/VT7keTdH13VXzy4+RzknHScdSJQ4gpE2JMU+EKU9E8dIersu7a9dKH2jKoTCKZkOI4KaDMzYnBrTcXU5y6vitc7TlAzlPMwCFQM8qBGYoqiGam5EI47HeSUlGtiAOZbS4fpWkzmUyafvLT3//Tf/YXveoPfvIxUOqGmCDXJ5d/7w//WMf7N89ftOX55ZOn9ScfYNKz8/WrFy+cU09clH5IByNct4/K8nz/6v7l9qp+5D/46Duf/eWrN1/fjgcmWj/96IOuOMQJLNurr67O3lvVrf9Xf/Inh7vd7/3Wj1abVX1SP//s6v/7Z//ie+9/kkf73u+8fzjcFChXt7e3t1/nu7uyiKefnGOMsh/fXL/64KOLDzaru6v7FPTiB2t/gt5DX7Crllef3X91ff3b7XfM6cWZR8JhPzjyRaCYY9NWOUoWdD6YqCRhpa8/e3Fx+Wgc5OSyduwPu0OBMHTj0PVDn1frDVYqIOcXFZD+zk/eg2n0pb7/g+XZsBwO3fX1YTok4mZP+fbN/uxxffP2jYl+8MOnXT+Uy5o8X+37/cAnp8u6HJ9/9jLu++bjJ69f3H340feLogpFx0Kf/eo6kTs/d3dxfPn18N3fW7DX84vN1Rf97eG+PS3AyfK8OGs2b9/sT0/K3N9dPF399/7Dv/+P/1//+peapVhd33w13wUpi2bdb/fAvF7U7DhrPkwdEJRVs9m8d3X9umqKIU6HQwLCsc8A2LRlVRdBQLON45A17sfdpqW6rAK6Qz+SI+/L++t7tMkxGvlFE5yjYeyb5TJntSxl7YvQqgoTlr40NRC9vds1i8ZYQnCGsN/tCJ3l5F1IUV0FqjrkWCBL1rqpRXNKkynWoUEwVRnTROjJKPbj7e2dK50RkFbsAwUp6jBt+/NHp2x+HPu2Lajkvh+ruo5TBKMpT+kg5KgIIQ7TfbbVYpNAS1+ZwZQjMj66PMmSYtIpjVfbfP7kchpy3Zzsbr6A0nlXjnkMuBABKkIXR1YpfBjG0UyXi3VUBkf7/Z2aGM7wB485TlOMMZdV2XUdEo7mgZRckGxT10HOliSBqMnCu0B4c3unSUS1Do5R9/ttXTb3d7syuKbkNMlm1WgVX17dMwJIAjVyDlGRZhdMmhlwnp2BlMGzO5ZxAMB+tz1btaUngPzxR++DTE1VrRbLUdOr1zeff/bFwlftoq7ZKeDhdr+7e1uVkMYpgdxt90XpQ+lDcPv9rusHo1AWlKfRB/ZMc3dvaIpjS5hAVVaL9erF9c2+G9bLsiz57m57e989fXL++ZfP28U6AwDnpnTrk83udn+5OS/PT+qSu/vdcH336PRJL4OnEgmIpC7Lu6tD9/b26eZ8v72Vpt6sN1m6y+X68Pa+WS52Zfn8yxcfvX/x7jihc/XJO0MQwsOR45tDzjz1NlUCmuu6j0qTKiKC0Tz/OHbzzLthfBCJZqY1PEzBj194nLSbHeEGD6mKo7/7we1EOuevyauRgR0nlqCSU0qSsiQxVZmSpJwE1DEV3nt2zDRNg6kyESKcrBdNszDJJhqAnrx/uV4tBdgVPse8uz/4yk272B+6bFaH6v7t1cWzx+TceL/r7+4rLCoMQ451XZLBodsys5msz8+q1ZqDy8MYwDfrtaNiGLdJpa6q/tATsisCk5vGXvLEAFVVvXh9N8QpD/GjR+cG1tYuOHBsRXBVCItFE5yzPFShKotyuWqK0lVV49DFYVgX7LEoiFPSFMdVWyJGAxM1yVqtaiVCQl/4mPKyPbnthvmgtdtvQ1Genr0fY0KAHONysYjjwUSruvJFCaybdnl/c1O1i6popmFUywgp+LI+OUn7w3B9d758xmU6TfH58zdTP3TjRC6oCHjMYEPODZKlGBwDmaqMMVYOh13yM0fDVFDVEiGCEQMAGhIeD7GOjk1lNgM81BslMxEzVc0qYJLFwDTnhOC8Y2YffGLyRXDOh6Jg59kXznvnHCCxC+R4Hv/if0HE1Hc7oh/86JO2qU2yc46ReulEIWXtuojs0EAtjXHc7W99WceUQC2UzTiM0xQdE3sPTGAwTFEtmgmosSs4eJNcFt4TN4V3Th360heKUtdusz4bu6gqlS82j6rKyaHfO8CiqlJK7MlkFlKBmckwIGYAQxDRmh2YOKKMNM2iLtO8axQzM5jUyMgXhfjCBZ9MyHE2QFeMomeL9X5316waj9APaVLenLzXrDZEGrse4li2xcsXX7MrPDjnXCbYLDa3V7eHOCXnuzEVmh6fne/3ew+e1Mc89t1QlHWWXNUeUVMcMOBcnqVZFovV9549/up2+8n7j1++2W3vxuDVZX18skga97vt73783mdfXgOkn//i8x+8d/b2an9AHIbJswvsLdAwjXWOWc37ckqCzFVVas5MlJNUi5ZDkVNm5wCdZCiKMrC/u7ldrJ60i0U3HeoAdVO57LvU5zHVpRPEaYpCu7ZsBEoOjQt1jgqGjpmdV03jcM0huHoZjZmYjFiJGIhmoUgB4KGh8UGsmU0E3+IXHYmh8CDymM4rIX4j3bz79SBV4DsdyI4B3+OSjA95L32wQ3zLfWlHYybAu4KzY7Z3loVmzer44YNl4kEq8qICCsD00DDJR88pZFQAPur3qAjoZidpMkEMZM4MRIWYAMUFLhjrBLXDrCY5qhqAohiaeecJyHlflAV4D+QlppkebKIuuLIsyZFgBhNQ9UUJRASKOtFMh54PO8RIZEjHSoTZlCMoZiKmqoKiqsSASI6ciaKhEaFzoApiAgKgc9+ZyJyoshmrBKAmI4KJCTjHRqgGqmYZDGeuPBEyBgQEZGBVU5rjUuTgqN8ZIAFmFIVjr1kCIJuNsSo2E2ZRNSdUU0mzgDTzQ7LGrCmJDlmiSMpiklUNgJNkNVWELIoCgC6ZCOkUzaGbLVJJVefLDUWMlYTVmzlSywrkKCFNyaqSVTEQT5I9qSfwpIVjYDSHjh0xKQCBgJoRAzAQAc1bhpmARUeZAnDuIFVRM0EkA0MxNQBRkwxZVUVURRERRSyrZQUVVU0Ak+PIZQZmCB7IAQQjR65U0KycGDI5na/urDxbm2GWrPTYaSqJ0GlWVZMZWOU8AfDxJkQwU1MEdDy/bTRA+y9hrWEuZoPZvQg8s2EQAIDBISLMPijCB7A9ELl3OTZERFQ0IJzrqGai2dzRVsz37TwSMVMCYGKTZKChCGriyqpdnqRxnOKdJimaJR5yTlklB8Ihp6pdxj7JlImoLMv9zXVZlhrzJMkzakLLxITdODAasTMDyYbExhjK1pDqRcEO46S1D9qP5EJRFgiQcmzqmq26m25CCDlH52zZLiYau/3uq5vDetnOZjEwN3aJCPtxIEegAJrHPqvZcrmKolMU096DU7X73ZAT95qo3K3P1pLEMnkXdsPh7PwUVcbRDrcdsysKM0iePSFXRUDVlCOqpDSxh5Tl0fnly9dXZOiYRGASadqmPwxADEaaJi58XTUzH0pyzopNWQ9xNHCaE3gEIjT04P1842YxJV8VXR/7/tCuGzNjIE+hZJgSEOGYxsZj8CwpieTQLLshVwbB1XfXh8MhFusSGIrKn5wv7+7GmGy/15cvd23pU5rqxiXNlqEoQuD6/nCIyDnPJlNCQucJncY0zEj+B3M+ynGHDWDZdBShsvRKBEgBvUieMVtEyOxVTQEZEcgMwDkPQGLi2B1jzUg+gIF59gaGoEmVkQhncCwCoom8m/K+/+F7d6/vdttYNDxuOxDXtptQrm7vdlXIw34KKNN2++rV1Xc//l4cE2a7v7pfLav97SvS8Ns//KN//mf/RABuhvTJb/3088++qON+UfhmVYwNILq25K+f//Vy+Uw0NIt6HS8yqfSj0y4drpzjAvzrN1thOVktJcUcu9Pw9PTMvfz8q5OTj5dVOfXbNMWqrlQWoMRkMU0+lOwDABJrlqE7XOXUOS79+gN03hXMkoEYvcVpkDQKIpq50JJvGfIwjALKnp2VSFiGBprTQ9wWVTUe7lLcFbgoCxqnQafJoeUp7fdTWD9un16E08Xd/vri6Uldnz663PzNr35xsjq/33X7IX751VcfPz39+//1/ybL+PXbN75cNqFNeSTny5pNdLmoDqNsLjZvv+r7od88Xrcnixd3Lw7XewW5/O7jn/zOD/4///Cv0vnoS1F2jsJ5eYaUX355/Yc//VsygquKP//zv/7+jz8qV6e/9fSj06L67Jcv3zy/HeN+fbm43+//x/+z//Bv/vGfbK8P/4f//T/8n/7H//7ybFGNMHZd4qHZLN7cdY9WwaH+4i/+4iff++nNLRSb4ruP1vf9sD4rhm50xFEh9tGV9fV1t140jp1jRLY3L66XTdP36el7z8qG9nf7/Ru9u71aXzSf//LLJ4+fsPdkedlWP/vrz58+PtttJ816+nh5/eUViAeuBoni7PLJ4vZF2t2mqgwK6e76br3evH51PdyPY6cnT5YCg0eoKqfJNme1eXDrdbtcTrurfZ/fbD998sk5GJ2UJ5fP6qma7t8M3Pq7q7HsMmrx8qvb77//9OvPrteXdH/oHjUnJmqit2+mMa0+/fz13/z8Ny9CPNzurL+Z74K6bZwvb9++qcs6DoPNdQWIq3Ylue8OVjqu2BtmK8089oexqeq328PZajENPSgvT1YZiu1+ZxEDu8KXkaxsGkU9u9x097cSbb+/W7XtLh2KopQYx34iorKuEEhE5tzE7dV1GcJmveq6McfMjpu2RnKarVo4NDIycOTUhqH3YQ0qu30PCI4rX3BKY0zCRdicbA77rlk1+13nqby4OHt7/XJMFgxQXOyyU1+qywhD7Ivk8ySAGGPqDn1T1mVVShYfHBGQWWCUwjWMWPo49r4oZMxTzLf391VZN3W7HbqqaaSPY3frWE2077uiqE7O129evXbMjv20z5lUSclsOmyHScA5sFwWHl3VT5OyEoey8NM4AptzLCqWNcrIxGiWc/ZEiqCaq0CQxpv9Ybvbel61dUUm3WE8P9nIME3jtFy1U9aUJjRMqlz4NEzOuRRz1owAYBSCV1NvYGAppboqTM0RTTkrUbOoMfiIfkjAaIf9zfnmdMzT5nTRpJ4BqkV7f9udPblYNEVbLX/9q0/r9WbY71++ehsY2ZBFcz++2u13hx6Am8qZ1zxJRvFtJaKHw7BcL2/uDsMwPXn23n67u39zddgfFk19sVm/fvXi7q43Lg6RXVHHKTXr1enZ+eefv9ru8rJpoKre/9F396/fFFqu2seKnpg8F7fjFaP40CCJxXG1qLZ31u3vHp01fb+92Jx+/erFxeXKHrVdN71+e/3ucPHO4zMrPrMuZN+KO8yx+pnKqSDvohHfkFlR8V224WgNAjMFm//giDl6aO2ZjUWz8x4NTE0NkeYd2xwPAj1CNx4epSZ5ZkGKJFNNWaaUU9Jd193dHcxMVGOetvv9lGKUOE7TNCUAI8dM6DyfnW3KUIjByfmZc57NJUnTGOOQJj+5sqoW7We/+XXp66aqb2+uTzbrYrFEw93VW81TWfk8daxTyas0jaXzq+XSGIrFErwfuk4OexcITMa0MxvbuiFDdhi8L+pmt9uRUelCT/HV7f1+GBZte7bapGka4/b8/NHV1W51stlsNllSVfjNepUGkphOFgvvrfaFI08N06LGt9smAMh+MJeGfLZcnp+0n726JgMHxmBVWZT1ApSLIqScwQwVgwuqCs6YfUvuenudMp6sFuvTk93+AM5zEUaNFYaLJ9/Zbu99HYqqGYdDmroAihpPzldpUd3ebJvxvvK8/uBiP4zPX71NGRS0G3ofXOWIQFnVERVVEUKBmpx3hWeTpDLOtUH0kFicd+oEoKRHxXDeaRvMRjUAZDNwaIYzR1WyCqiKqcE0ZUB0iT07n5JzLsbJ++D8yOydd+yDC5mzE2LnPBLO0EhEwG/Zik7PF45YDcRIxDRnJiTCoesc4hRjytHMUr7deG8aU8qE0G2Huim89ymlwz6bqqkWZckuEBL7sp+mwnFb1ya5KpxOU1O1i7q53V+dbs58CC9fv7hcX3BOi5Nyf3vTFi0oO8+GB8/cVKWl5BGI2CAz45CiL8uUkwP1iIwwJXXOjVNyzpvpBHyTNagigljuo+3H6cPLiy9ur59t1gVlHyDDRF6GvFuGzXLd/M3zV83ps/NHzyQOpqOmw2LZXt/f3hz2l5eXEMc45YL96TK8/fqm8ps8KuQQvC+q9qs3z9fL8zh2kiOBZUnVonZK4NiHyrKBHG/hqm4+ee/y9u6+aqvD1zcnwX/0aHWo0p/8+jNXVt0unrjXhXOe3Oevbvsu//A7H/zy0y8HkRUGJprihEAF13nKzht7j4DEaGLZMmRB4lC20zgSaBHKSL5crDDL61/9TXj56rf/rY9s61THYejris8vLw8313ncFa4RlDj2w92bxWkwY1eVQGxpAJiYTWKXty+apgjkwTtiRQVmgNkPYcDs7WHhw6ON6OHUiQh2ZPnDw7X2INUcl9ijhcgeYr/fFKcZ6LcVnQcR6biMvkuczYLEt8Nr7xjuD0oUzCwY+NYlT2AKx8nIu2JkKAuv4iQnywBoWdQAEBksMToEAlNVIGIzNES1jGDHiYcCqBKg6nHq4AAKhKDiVIYMyuAZybELDoni7IxJiXJWjTrF6dDBNDI75soYFCRLJBPvGkS2WQjSKHGEnGg2dpiaxPkZZXlSmbNl85iBZkJ31IykgTijGjrVB4QsAhKaIqiCpOOTkD0SABIeNTJUVYdkSirIjKgJJqHM5IjJW0accdAzOAkATQgUdW5BR5t/1POTVDOYmInNXQI685RsNuagkMikks1AM2o208yoYFnVFGjKkrOZmCSZQ7qIYMYmyQQTWFJjBAZzKMymIBlADMXM5uY0R2DRG3jPpMAgycQLiEr27Ei8CwxWMtWeSIFYEYHQRJURUNEY320PDAjmLBWgAQO52SClIADJbL7K1IxVRbPkabAcVVQ0i5EqGEKegd6SJSdLEU2ERhn37JwRsi/AeXYBfIWJwdXoGwKfBNRm2w6Rc3PRpKrlnJGA1JEq4PzTPYZpmFAyHilFZmLqyOcjpxsB8J3I8w2raO6zF81HfRTn2rfjIwRMEBFnjR7nn8ssICkgMs1EvFlNPKIeEYGPH9EDiOEoLptGYlJRMJg1VJHUdQOAAYQpWcrHbc9uv2d205SHYUpmWTWlZBm9L9++uTm7PN3vdwU1ZVVNQ+fQmxCAMbmUM5dVqMrd/T04NM6aAc00W5pGx46pdAWGwg9TH0J9drHsYrcoV0PX72+HPMayqV3TTP1+3O8X6xaQbq62XIa6CrubrXo9OWsVjbAWy6DGhKFsFk378uWrtqX1ZrXd7bqhr8t23Zy8ePXVo9OTq7v7FFllRJb1SZWkl6xguFkub2537FRMy9JFjf0wEYIPRdRYlKxg7H0RyijKak+fvvfV25dt2Q6Z0CEAmIonBixMXbAiY3LEGkJKggHNIjOqCZjLSRQggQGDqHZ9N9cGjt0Uyjr4+ubwpijDGFMVCj3kaYjaVnd3wwhQVDiMcbvtVs2CA4cmZJDVpkbV3aFT1bKoxmkqy7rvcYwZDB2xGaVkoahilCmpY0IGTRMSJ8lERMiEkHNicoRIjGAgkhVQFOdmBXbkyUeZnPfHWuL5gmRmRgNTUARjRmIg9GpGs50byI78MQYwIEZyWZIeMUXv2hcg+KKfhrfd/e/97u/ff/kGrVwV4Wa7Xa2XxUn4zb/6OTrXnvlyyW8Obzarx+2qvPrFy8fLC0lTiv6zrz9/9OjZYnn+2avP37zen62fbc7iX3z2F+cXNRasYj3m1dNHDqu6PrEpkS1rnK7u3nLh12EZVs1wP/DCLSrX3x7KYvW9P/i9v/rsl5vEi/Uyu/i2u7rPQ93WfRrVskqK2VJKyMxcqUEap6nvAXMft+PrnyfQqn3iqUThcUwIzoXWVbXKTP9VIyFEFwwgG0KgYhz6yjebzXvdqwPG6ImpKCxHDBPmNHQ9iFbV4vb17eN1ebjb3755tT47DW15e3+XWZ8+uyT27+PJ3f3Nk6enUeOnv/7F5fny/P2Lu5f9dTctFwvLgbkwjlyUFseEAMxXV883P/id2368vPyAYKISzftff/Umlfnz518/ev8c2HVdVwRbnbZFiYH5dtiVlf3eH37oPD//4gaD/psvvzo/O12eN3VmDrio23/5j/7xv/on/+gf/IP/0X//kw+Lp5ts+vM//+V7z84/u3/+t//473z5m+vLi404u/z4k1e39v/4Z//yf/If/eFooy8dOjZWI7doWaJqnM7P1ggwHtKbl9cn52eL9jQ4tNbv7u/evE2iHibu0tTm5tkH79++Ppyd187pMMWPP3lcVGxaueDSaOfn75UtvLk+hMaBc8+/vNm+2Z+vLu+6m2efXNQOP/30OhTVX/7ZZ4BrfB1/7/c3OaXd/Sgq1eVisawPd/3XL24vv/9ExhjOqE/jYTyI8X/+L3/zt/8rj6uWNo/qfnu3XrfXb8cf/953AoO7WZSVWzL98t98dbfTJx+f7Dr8P/6n/+lyRe15S29fjf3dR98/+82nAADjFLUfiYmCoa/v9/en65P7/X6zWngXyBUAdn3bq0YKXPnKtYyIZdMOOSVUQnCslmW9bA1gN0Rz3pV1zFFFmqa01Zqw3IRqmg59TOjDItRVSWOa+m5oCi9iYz6k+1S4kFIyT67AoghqygRRBUzMvIGKRI+hKApPLo1TznOGXaRw0yilKxyZQyzZTczc0JKaw66/vnkVvKvLAgRvd4eTYhXzYTpouWg+/uiDbnvQmELBgNY0pUp25FJW4TzEWJAXwJzzft+jZ4doOZnpNCQOxaNHz1gnusOSEYMbDvu2aSp297dvq6bup1FNgguSM7kg5qL0Z6vFzet758pDvyPCnEYjQc8GTOjZlclDllS3yz7uVcRUkSjG5NhJTnEagJgkqEA3jMxlGUrVZJKdUijqbhgd+ylOVVOGCDLepW6qPGFEREA3R3FnBURjjJ6dmhGzRyJPpqZzlYD3oNh13XJRry+WjzcfXz1/40J48fI1F+7k/Gy52fiqy2D3+2l796ZZLmq3mERW52cV4f7upl1UOclhmkbDTVNUAQ0zeSjKQC7EOC4Wi6ubG0WuquBIGfXx+WJb4McffPjZp59n0aKuXNPeb7eqdnp28vb6brs9FEXYbdP5abtanrz6xecNB19YdDjmCAb9sG9CmzX144gEXZdQkdn1w1A5ZuBx3G+Wi9h1NmZnSvztw8WDkwMUHsxOCGQ2M++OWCJ85zRSFZMjOupYbSY2h9DgXaRh3mkfzypznewsPOk8JEeC49FlTh6ZHY9Ixx7e45fPcFIAVZUUBSxLTin2Y357fTtESZJilBhz14/D0B/6PuV0e3c/jNM0RjVzBo55c7IexyzKJ6eb5eI0TinHvNsNb9+8/eTj95tlfejGr58/z5P6Aoah+/DDD31RCUAe+7u7m6quKsdGUq8akYgFX5w/FZ3hEQjDiNNUBpdjtmiA4FzpuYhxMFRyGKdh2TTDYby5vyPCk6IOK79aLeI0ouGzDz+ATI7is8fPpmkoqmK9aOOUVuvF/e2NcawXJYtDhFCxiFSLqusOm3V96lZfv3nbtvUgUUlP12vZbeu6qKpKkZz3HIKquRBKX/bdPYID4JimsqLz8/O3V3eHcfBcFgWOSWrDmGHf90ae63aYpspzETzkAFmSyO72rlkszx+drhblbnc7jsOqqU+WT6/e3u93XaXEjsvATChTzin7wlOOVcEIINmYcI54GIBDNkICQCTV44FzJpzrXHY1K5iGiIxGpgrI5M0MxKkaSDaxY5lKjllR4jQxkS+8c+y9J3LsvQtFUZTOBx/K7DKxm9tymJBnJwgAADB5zWpAoBaniT0SoOScRR17yJKnAYIRQX+4idNA5BRRMYsV05QMFICYXF23PpQxRgDIIoBOTcZpKue3hK5pWgS52JyWvri6vT6rlyfLlvKQ0lgXZRqsbUhJ+hEYuHI8TLFkRsQccyBqQlDGbGRitaeCeULKiAX5SYFdEJEMdtWNROiQlDiqa5arcHPfErSVK3LyntM4LprVqm5evHl9evp0vX6PQafhwJyrqt533dXVjaoj7zWN3W7nq6rLUZEbpklk07aU5e5wZ4KzV7KPgyu899jWdbcfXAhct9N2yNMYKmcKzrln711eXd/eX1+fXZx+9Zuv/+3f/V66dP+3n38WWKsAL/pYQNrEaXG2+PLmZkoWSnZFGckP49gsiuG+w6JIosGUwIHzYjgrBkxkeYw2chlynDSbqzmKVutTDs2Lly/f2+592ZShmu4sRuVFUS6XY0qaskFkLuMUx37rQ5XEoS/LshqnXsXIdNzuWH/ZDDu/fJSbM+O1kgPieQar71Ans5kHjyKO2QPF/2ENPBqM5mvuXeHSN8Cid7GwbxmI3ilEx8Dkwwo8f3y0Yb7zMcy8/4eL+iHY9g5T9A2caAYdzW9Lv/kDZkKc4Twgokw46/bEfAzCHbu5CBHUUMEI1AQUGIHnFyHy8/nXIRekDmdzoU4puqpghOCcqiJiFp3GGFxk07jdTt0hjYdFu3C8wTkDTY4pmIJqNs0QO4idxdGZzu6OlBFRBSYARMkKTm0mXRM4kpxxNgFpxpl5zwUCWhZTRTRUBRE0U8mEBEBgJoA0G2fADBRMRNGBkyxzHzmwACfLgMSALpsw85zDhllZMTFEm+crSKhqkszANAOAIeMsqBmCzLFqBUURM5tbyyBnBCOauVFmpiZZcrZhiipZsiKSgM3rn852S7IEQGYOIZBhToggRFlsMjUkBEMkRtYsgshIkJIDzmpZbUqJAJ3TwpEGdMSeqXAOHqJnZoDskQiIFFBVjhe0zERvb5hnD5opqAhkUFGNUUQ0x5yipMHymI1EsoFXAzHLc3JN5nmGopmqAM0/PM42ogrNAOxMQIldVvDOWNEZEvkCcyZmm5Ix27yvwGSQHfMck0cwQBXHKGiKbEbIgHTsVKXZazT/o39LKkJEVVMwRKLZK4UPWPijm8odK8wAkWaPHyIhAtMR3kiI9s5NiIjOOYRjwp6QwNRU5iefY+eQ0vwCBmkSKPI09L6sxKzbx24XxRKQ7YddUzcp9jFGzRZ8wViOccssMeaADi2ZxnrRTtkzQD8M5LGpQ7NYKmK2FKoKGFdL74Hfvn1dLsrSt303Imjr6tCUCRKC1FUR99043m02q7HLB80AYqJzmE6ydMNeWUcVSdHcHCo3QMgow6ErgFzRZLBs0DYlowFbKAgRJcWbw01RFYR8tjy53W5Xy3LiXZapLhEQybm7fueCm3K+j9MaXVkycZiGaGQimRyhmCURJ5JttOGuvw8F9dNO2SFySjHlvGw8kYqJ4egoEaHMyxRy1uwcS1YGAxRgN4gQM5iqpLIu77Y9O8/BSZZQhLIo0jRITEwcGCbR7U10tT89LZNMWVMWCVVgx/1h712taCkrEd/vxsBh7IeioJy9CA0pRbMs4CTHPBFSTCIITM4xCyUzzSIMxOwRAFENYPZGGrMYsyCApTTNwzBPzoBNjdnN+2oFPdr+GIhITeYQpKgQQfBBUdWMieenSVZ5xydi4tnjDQAnGz9eLoXh1fPX5/UlhSdPP370+T/7J2m4e+wvluuzTdNMuv3ujy//83/0J/oMVqtPosKvvv6qaMaf/NZv//JnV2fN5VdXr7sckao8DkWRL9crr+LXzd1df3t3T2WxWZ0dbuB8+ez5y1998tGHeanQyuGm95Bv7u+Wl+e3b19ZX+zdOPiBQA672GN/+rgtmuYQD6K6qGqNWVPOoON0cJ5NzJcVA4kmQmXGLLnfPYc8uPY8+DMkVBLHNh9w2BNipdYjJkJWsG53x8amOow7D3xy/sHdzZdszsgUc44JDNKUDA3TnmpgV049/Oh3fvoXv/n0D3/nky9/9ZZyQcp397ehLC8fr9++fHu4utm92L34sxd/9N94vL+5hmpBxcn509P+cEgDY/Rthfevn5tA46ru3txoYOPbV3flqn10Um/7mycfLx+fnn/xyxfLi5IBX3/1Jo/SH8ZX035/c3/69Gy5CSmOdekvL87W5RmodH38+tXVJ995UraN2vi/+N/8x//Z//Nnq6ffK9p6//b2R7/90dn5eXrD60U43ZQvXrzp31z/5Pd/cq/47/zRH043w07G5ZPF1dvtOGqR8el3Tojh7VW/WhMBdl1aL8/6u+wL/OUvf/P0kyfojYkfPzn1COXV5WHblXUxDnl/PwIldlXZOsyyu5+ub/YV8ZvPbi+ebqScGi2Xpwuoy/VHDbuygeLl12/KrF/96je/8/s//J2//bv/8D/59ao9WbTBkUxT/PyrQ+XdYd+fXpxVhbOon3716/dXj7vrt0+ffTgU+Ecff+d+H3/9i6/ee39TFyi9vfnF7XufNKPJl599gelJN22L2hdo55dFFU7/g83fT3JSLrf/2//d/7q7eamhmu8CSaNM6grX97vgQ+ULNCyr0PWd5yAKIEKIYdHoOE59lJRcIEI6jANmdEzXt/dtGVBh1+1W602XRpJwdrK4vX6zvY3JDH1s6loyN01bt2F7fdM0y7kdZjIcZFyuK/XFzdX+5GLTHbZtWw39wVM5ShIzBJymjEiLqp1ijEMUUSTwBbXlSX8YBHI/jW3RjjEz8n7XK9p+26GimDGG6SAqmYkAIYNV7QJVun6SZczZ2IWcshmEukKkGCfyjtgtFmUaJjNUNOfdbJZ3LpQN7XbQXd+/cV/UvoQMw2E0wNAuxjxIHpPQwrcg2oSSUO8Ou3ZxGpVOi3rcXUFOTbMaLBIHx6EfJ8o5q4IfhQoDVkndwTi0YuK8T3mY4uRZTSfn0ADGcbvvbjw2Z6dPD90tWFq25djtPcf1SXt/t1e1bugeX55ubse7Idcu77N4QkfonFeAFCMytot2GkbH7L0L3k85VcEDYQTLU8yGqHB1dZ1V3CHe3tx9+OzZ2clH+3EisM+fv6y4wAz1ennyqLm/uhuG8eTxKbKNu75c1N04dv1UNFW98Mtls725piE6cB7cLg6/+urT8+rMwE7ON4Wj+8Pw5Mn5p1+9NMA/+6u/sSRlVfVjDHjIydI4GGG2sS1q3zQf//53v/P9H//yL//y+z941t3covPdcH/pV2YERGLZgAgwjtGxzzkGRoSicf5qv1+dbcYpEtjZyfrFixeSx3dHlbl7ZY6PmZloYiLVbKY4IxdnHQlRTfCYNaMHmqXqu2PSg+4EKsd5NpI9+MOP7bTzpx7FIZ0Rj3MF7Twdn2mpx0OW8SwlSM4555TTFOOY4mF/GGLq+kGNAcwxgse9pilNUxq39/tpmsSUmLwjBgIjNMeuAOOS68PuYCiL1XLK/eP3L6ImkvF6/6Z1bRWq65v7997/sFmepWkoPd7fHTxhGQpnap7YkVEoikK9SzGHsvSgkjPTXBukZV0mE0cch2l/GJq6YR+8cyYy5YPzuF6e52lfFUwMyWSzWW4Wi7dvu+Bx1RaHKHXlD3mqq2o7DBcXG89IhILgq9bFPHXdsg3BF/0ggLJZUFvCCPRoc1J5c+t2uV4oqGNzzjGzZ4qOmEHRJKW2qTMAUGD2TRmSJlf5tqji7VZyOl2228PWSmgX66qq4+HgnQurdpzGw/7eob+PE3usymZ9/uiw3e7229WiWLatpLjb7vo+xiFKiuW6MgRJyTtFI8tZKRAYEzvnGVkliSU1JdMZaDpHZuZCYXtw/cPxGiJDMgMwOXpJTJXRDEVAzUQA1ERFJOsgE0IIzjnnJKQcc4qOfSijD55ccN6Tc4Lo2YE77o48cxIkopyjigIYMHTjfkxdWdRMhRk7cuM4ACoD1GU1xjhzSWIU55nRFVVFZTkOmZkdYUqDdx5SYo9t05TBseec0vpiHdgftltL05OnT0PhZIolNJlkDNJWzfawZXKl99myZ3IMhESEmq32RMGxmSMgNl8wMUbRAtjHDCgJtc8GAJV3i8qnmHe7q1FWm5NFbbhumtPNo6HLeaJq0URjc5vLxx9BhrHfk+SqcNth//z1KxWtqiqng+Osks7Pz2+G+3axnLbjelkuF8ubt2+og5ork0lhjGlY1Iu5sY1cqeC5Oc3bFwjivZcoBog+fP9771/t7/uTevHD957fXr+92SbLMZOg/elX/UVdXFbF3/vOdz67/qtb4O+fvV86K4qqjxk1eE7TkIAZDcZ915ydBS7GGB0EUJSY0SZ2HKfERFSEmBQW7eMPP/jyV3999+rL93/wk77rNifPdtevte9DKP3pxbA9TP2hacopDtOhL922OamNGVxR+1ZztpyJ+n57c//2NZBvNo+b0w+as/dcuYaiBVcYGCDhvM1GADkmy46wmm+xbx6yabOPQo+f/yARPWhC9pBH+5ZmNCtFD1m0B/zuu9+Osd5vfe7Dnx6J1/PLHL2jYDoD5+b/fFdHAABEqDkTAvB8jDV9mA8QOQUlMEQ2RHkAJolmATRkVSVE51hw/mQwEwNhgpxiEsjzeZxobrQyMzM0ZBCbukO33U6xZ1THbjZeEAIDsmVMUWKC3FseIUfIKZkoKLID9gQAmmyOUCOoypzpUMkCoDmTCiMoCKMSMEhmzYQKkjBPppmQCBSAkViB5pYSBgXTGV3OCGgZBNEyCSAZMiATgD2AbmyOwh2h1zNKCZQgm86sIpX5+TlLKTjnAMlkZrTRUVc0ACVTBTXQxJDIsiPyZCCSsg6ZUibNhIhJRRQFWc3YWAUTmgKBaEFYEINqFFC1yTAhIaAn8kCMhiJOZoZbCo68wxmd45RDTCBWUFE6JwoAJaiSoUNCYIOZwkyoRukAInmKmrMZIDIg4YyEk6wqkpPmpDmnPKmCmYJmMCIzQzRVNfRzzGvmMxEhODElh0BshEBEjg0BMSJ60KhJCAJTkY3BFQQEGUEFEZE9sLeQjqlMcqqGBImZvcesFhwCKTIRI6Ey+8IrAXgmpPxAd//GVTRX3auKHRUi/LY/kGZwterDjTyPzWDmXgPORmlkmn83ZkKc6xVYdAYoGpGb2z2AwRC8d2AGKjmO93dd8N6VhZgmESCihAG9h8IRj/vDNKY8jYww9f2Yp6Yogyvuuq2ZE7Tg2DATYCA/jTE7qcrSwLjwB5vYmQCqpKJpJhFI2YVAjhw4HRUmgoIl22p5bpXEqStXi/Xlxa9/9vPcHZqy8M5lzaq5rhrzDsywbkLwu+1+Gsa64XVbF87v+9QdUu1bE+v6HTvypARuUbW3+227WKac4hCDdw45HnbOec0YcyrYE1ldtnfDWBASesl94TBidliOw2SzK8xQDTyTU9FuWC+a+6k7jDpBXCwrRUJ2UGRTmCyqpSoU+8OOsMpA7DyCEpBDZ14ToalYSgJCxCmBc84AYoplUTRWqeXgAzkomDFlIF4uGrXp7Hz9689fdtth9YT6bmyXTemDJgXDBGAG+RBLR76msizGMU9TRqZQFpk0afbBwdz1Y8DEjlmZVLIxqwgaGkDWjIjEjoiJAqE/7npnK/4c/QQS1OP1SWgz2J1IQec1iYg8B1EhQmIGQEf4MEKYL1Eym2+6ue0HAOD5L54XgRtNfpgerXGw8cu//rP31lWx3NzvBwTICoc7yJYun3zcuMW0HWsLi+Vi3w/b60PtNyenZ3cvXpcFIMdQFvdfvyybsL+bFnV9Wte77S1DkYZ+v7X3nvz43//3fvpXP/tnN7c3Ty8uQVFG64XdLkIE0WHqhyLS2I/rVVuGpYeiLMPV7hby/SU+CcnidKhq5zzlNCAJi09TInKSQKMAmrihu3uZD/dFcT+bRolVDeMUJSVkVlSxWFXLdrEsQ6U5s7PDdlcE512oi3J/GCUTEQN556Qsi2RpmrqqCOsGgeLt9a1jf/Xl19bn+7uR8PS9D57+8q8/nVr99c//8vHlo7/77/7bt2/GEfTJdx8Pu/Hu9Wdq6eLik343dbvDsw8XUi815LOz85fPt03pp+5w8fgxl+Htp88VE7j2q6tYuOLsbHV/lavFul4slov67YvXtGm/98Pvvvjy+ePHT99evXr+q68ph1Bx+7j57idP+n3/L/7pP6/H7sXn2/c++KQ93fz1n//N5frEER7SVDi4+ept2r/5nb/z219e11Ofm8q3pQhpqMsi8GpRPjoPb7/e3b3tmk2zOd+wye5uXzVhuQl3v96ePT1/L39oGs8fnV7f3bz84utXL69OT07WJyfmoTmtVutSI2bKf/3r5x8/fjyOuW74vQ+XTz9YJoPCw+c//+LT3zxfrpoPLp989fnz5qwpvF9tmr/9d393f7P79c+fI40Fp+df3dS1Kzx9+PSkCri9jbdvtuN9XDX+pF32+8Ezf/XV84vTZxInjvLDT96jJnp2L97cPb9/7t+Ggfj7P/047Q8xy+Z8uXLY7W7HKX7vR+/9J//nf/ry+otV5Rc/+Pizn30+3wVJRE3rogJwwbl9GqbUq2pbLw9d7wufcopqpa9EJ4uxrUsxrduF80Xf7b1jJmeABmoyVO6UlLp9f6vJEYvKZrncdfuxm2I3KmuZQ123qtJu1mmMoiJR05CHw0Qsz19/WXBZFSuHAQ2HbsKSyqYKhDlGcAYZIKsPjgOnmIfu0A/70JTOF4auXSwQzUDSlM4vTm6vdnVZbzbrVy9fIth2u1+u2jiNOeXSO8h5e72PU27aMhTN7V035GGYhlVTBO+maUIooqZuvzs7vcDexiG2dTlEGaeoZmfnp2VwoKYueR/6/cFAiuBRhMuKnZ/GHRHut9u2WvsQDrtDXbXBhYFwe7hzRaGaoqp3yI4UOKXk2AVXIoBqljhms6wRQIl9jok0O0SR6DQVRstyOY7XKe82dQOQQ3CiIknqqpQsbw5XOiw2yzq8yCerKqn046RJY4qAiKYePSNhGeiIt1XLSZmYsCJaV2VJ8N7jzXpz+vrtHU9wvtqY5jdv98HzxeakrsqmXb5+dU2IeRIQkz4FLNt6I4lWp+393c412VUhicSY66ZZtu31zf4wSrWov/ujv4W9vr2+Gc2NE8Ts98mF9ek0jsFDUQZEkt1gYIhW14ucrKhXH3383qub3c3Vq/vdzUW71py9LxCZDMfu0JYVUBCI0zhwUUxTrNv69ds7cguQ1MfkPffj2DZtzhLHcV23+903UtE8yVKzo7UHQPQ4a557cGxWfN4ddN4RWeGYNAMABNIZSW16jFaboek8gjtWhQCIyvxkUjNEOhbRPjA4jtPyB9qGmIJmVVHLw9QdDt2h74eY+n7oxxQK74NTpX7f9X0/TuMUY4wxpggESOSYEdETVWWxWi4Wq/Vqs/BG4xQnmVysfVHfv3mbpyHsfVO3y6J6efX28vz84vR06IfNqorbrrvfl6Fet1Ucd0WzQMkwTd4VOeXaBzbYH/YmyUw1xsDODFJO7CHGQSyG8sRAHUI/dKX3i81pUu3GsS0WArY+WVZlM0xJLD15/4kxETpTVJH1+iTKXV1VgBmVJMVmHZIBz8jXksB6BThb1WenZ1+8fHmxbDlJs6yXm1o1hyIwo5kYIYJMk1bFYtQupRGRgQIY+VBKhhRzUTVnp6d5OOz2g3dVf4ho26IuNydtHBOxK5pVu17GaTzsdqrx0IEvabl5VDbL7f1NykpAp6fN2SPOOaY4puHQ7/YZzDsuvAsheN+Mww2CikZDIMTKFznnGXVOOBfZv4O5fAPMejhbAyLwLFMaADoEBQMmUANz83wWxUxFxXQap0TJxciOtSgychr33gcXggulK0rnA7BHC/NdELOKoCQR1TglRJQpS4pESM6xd0CWJQFjjKkKpSuK2HVVVSNhVKl8632ZkyJLWTjJcdh3LmAIzrmiDJ6JqiJM/bA+O1ks6sP9QSWfbU43bTkNfdtWacyW9OR8set6dlSUITTVuN0CITtW0fkI2hSFoXFgTVoSOyZSCcxlGaYpqZoTky7NtXGk4jWmLk99/+jivA0VudABqWmWtD45B7d4/GwDlqfDjlGxdPuxu7672++mRdOqZE0DOh9CVXAxHXoTTilfPD4bLO7ieFIsNIuY9EMSc0VZi0o/jYFrLhfRKJSNUZ4kkwuILubUrlY/+O57019/6QgMrC78KriDapqkrMJB5Xk3/MN/8Ze/9WhZu6oMjhn7biBfle3J6y/evPdRNcaprktyPMWU1bLBGBM7Dohx6MtyOXT3i9WFQ8JQCvD67PzqZf3151+8/8mPNqtN341Vuxr7A4ADtHazQY+QesdFTHE89GU7FasTwobIN4uQE6hkac93169Sf3f35S+2X/8Cyqo5uVw9+qRZP/bNiSsX4EojxxwA+cEwpAQPSx/RvLI9LKrHq/wBSI3fWIEeZKMHofSd7jMb7ezBlmQPctTD+nv8vIdc2wNB+yGkZgDwblR89CiBAR3X8YdngSKhZjFVRs6QiJwBzjmieVxNiAo0k5nR4EjbISF0YErkBBTIz8cfVVFJCCaqyN4RMxqBiWaMlFJUMxXJKeY8pWkq6jDT/JDMNKumlAeLg+XJ4t6jQp4ICQnEFMUZ5oeWMU6SAU0AADXHSdKkAqYZbB51gKGaqQNCHQkymDIkg4zgUPWYO3NuzksTAAAyOwI2U8fOzAhmFE82UzMCACPE2UJiShTmuunZZmMqKgIqJhmO3WoZgGZyv2k29oAE6BQIgFQSmKqqqoma5phzmhGBYqKgSTQZDsKiNrufsqKgIpADFIVkqmAAmkWUAAEToJkmgwwICJotSnTmkcRJJkyBfU7CjI6ZwDipOCwQpuByQWoklj2xIRqCgiGo2UwdEpjGPI1pnCQl04wPWUpkp6qGOL9/NTFRAJqPvZ5IRWcANBiICakSIQAD2v+PrD97sizLzjuxNey9z3QnH8NjyIgca8oEqgoAQZAEm+wWRSPbyO6WGc3aWm0yvevP0ktLepC6JUpqqU1t4FxEoYACqlBTTpGRMfp0pzPtaS09nOuRBcnNMszT4/qJCL9n2Ptb3/f7DtQfAiQlZmBCngZGKJoPhh1NimQQBbOmMNnPcs5kLBmbBkRCJBTkyXJnnc2BpadkLCARsrGWLaExcSBjbWJ0xrJzf0MqmpQh0Lvc5qRe4iG8PH0CCkhM0/LnDuwIgEREAHTQhojupKKp/kzv/HyTTQkADDETHPBCkkS9CsYYQBQ771VSTq6qhpQBKcScNt3J8XIch32/L6o6QKzmJVcWvZGkoJi8T3moj+bD6zUDWSpQuW+7xcwtZ66qK8ooQxCyq/PjdrsumzJnzuJF1Sgfr05iyKqAanOK4k3XeWPdh598HJD3t7fgE4VUVo1I7ocxRbHkQjcSwqyp6rryaQArbKXILoweUau6yinTNJzMkYB831XzGRhGhNbvZ8tZCAnZxsE7JgHwOVbWefFMPI4ZKSm4MaoryhBHZ6zmBNOWfUyjD/WiLEtTGrq83OSyQiVUYFBQJrRAUFLpICjYMaS6tKgaclc6IiAiAGXHnMWvFifPX74hMxOSrvP2xKAhFFKJVVO2fYQkOaUxytGq7GJCrPdtEFVjLVijScc+xCTOGUFVxMEnsIY4ZZA2S+NKZ812vQMBEQVNQMCThZ4IFJAMESCmHDMiEDlrJvedMk/OayVTkmHCyXWIoMCGmVk0MllAQkRRYDZ3/cJAiMBGQKIkNnbKWSCIMS6rGGIEZIK3pAgAuLh4cHl1WdX1e4/eef3s8uPf+3Z/a79++lTb9PDk0eq7q1/88tenZ+/su5cnRw8ahqYcn7xzut1v3z97x6/15vbN8ohtIZLy86+fvffe++cP7z3/+nmd2UrliqYPbYnL5XyGMbx89eeQnmbqT46PFvXq8uUrEXNxdHJ0Mnv2+aaol/76ssbcQgzj/vH9b62v98y3eQzz49X1PtiOV0fLXbudzeYheYQEaBHU2sa5ZU4jEAIYMjaGmPwNIBBDzh6tSTGqgnrxIQpi8mLIVcUq5EEkV9V8fXU5my8IDJoaY0rjDgRSVHI2Dcn3MvayXbff/vjJn/7ZL3/vH/7xs69/dX7vyWp2/NnLm2YTCqhqnv/jf/afv3nxcp/Ds83Xr5+++vCDh8POHy0X7IpF47qimy9rYyMo3L5pbXJDtztazpp63qdetrexHx5+98P15SAhrzftfL2PMc+XDWBSyO9+dO/P/t0vX/3m1esvbx8c3U8SZqfL28uxrmbkKKT06k37T/6zf3b5y+e//tOv3eny9dNffPR3f+f1F9fr/fj49Hxxemx8Xi3O9msZbtpW3J/+6a//yT/7uO9zu+01yOK4iXmoj8vaVQbh5bM38+WycHMf/NdfXMdufPqz2771F+fFj/7nn/zuH36MVfPwdDlv3KvLa9eHWVmM0b95ffneB+88Or1XOOdWJTHdvtm/uNwvZ3X75sp38e/9nY9vrluMbmwLxOgRPn/27NvvX7T73Kn+k//8d754flmY0hU2jBEg2qK4ODvdvNmfrcrQ5tosWQpT35zO5p9/vn2wmt/edMmmx4tjS055+OTvfHfVLJ9/duOH3V//5peffPLD2A3Pnz8/Pj7dXken1/fPZs8+y8v5o6++/rywxXQVzGazcQhE0LW9FiUxx5AQMIwJyRVuCcqS2xQ8g1PtU/Qxpbq0Ydgj5qY5GvpORELElEy77euzlZ0l8WhNFdL+ZrOumiIOsXQWEBkgSiIgFAqjB9DZrDYIhbOVqR+sHv785z8vbXk0L03BGXTX9ymnWV2FMaaYbFFIyoMfazO3ptA01KVr5othH/3Qg7XVzHadr1xhEUiERDa7S7AJk1vVK4EE7CXOEQABAABJREFUDDpK0GCdKyvXjqnIYCtuVrWqxJAwYt95Rcg5ImDtmm7fk4KrnGtcG1omwCSOZLao291Qz2pVRJMqSzn5ENqmakjEIA1hdFwhuK3fL04vsu/3bW+NGQZv61XCmJL64CuqiBGIffR2GrRhNtbEPkLOaCnmmKNnyaqiIJJzTQuD6vvueH6G2b+5uTw7PrOmHNp1UVRgsHLzfuiOz+qTN+Ww7uvCZVB1OvYh5GSZAERSBBBbuBASspZVYYizQFGWhS0XTWXK2SBGpJjPF7NVMUiulEtTbPu+ms3HGMrChpgEymIxQ2dRVY0jZxyFGRlmUxbUbXwOQUWzMeUyl2WtbE5OT26uNsfL5snDs+ury8Xxsd+2q7q2tRmGCEqqujgHTTEGcZZcNdvt9l9ebuIYj7kE1UffuSjRrmN482J9/uAIM0vKttKQRldWMXsFiCGbYCTqvtsenx8X1jJiCJlUy8JqljGkt/uDrId88rTgyVPpyJRnUIGDyUdIaVKVsgig4GGkMaX6DmgjmDpg4IAywru62Ley1ITamIaqhz2RfrNrwgNdgxBBJywpQEq5HYbLm+3N7Xq/70YfBSQpuIGdGY3hdhi3+90whu2ua/sup5SzSEyMlFLkslwsl4ujI2tdP/RVUaDjh8vjZ69ubl/fjO1QVkVZ2TT6V7e7qqkef/B+yqmZV4XD9XpfOLNcHDuOhW0wx5BkuTgewmhQdAxjiiBZRQpbKzrIueu3iMpkmdxqWbN1xkLB1HdUuqVxlnI8XZ05tLu+LcpytTja7teLenn/3sPtdt2UlUpsZksBWC0WMceirMfdUJo5CytztgRCquysG30oygqQWF3j3L2LxRiGoiiQnSkXZMsx67KuurYD9EQICFVh+t6rS+XilAyl/S2kJLF3Rd00qzyOgCYJdG1njOt1MFzloElBuXbV8ri6h2kYQwwqbRidq07vvZOy+H4cujbHTG42ny3jMK+b05z95fVXztpocgSPRakpIQloQCUVNcwy9bPCnRT5W8V5d5EZnUgPojJBUqczh5EnV5pIFhEkFEYBzBmzck4CoFNVUAyR2Vhncg4YrfGB/ehc4VzJ+bA9yEljlJQSqmhKVBqJyfcDpsRMOSZA8eNQz5YhpKza+8DGsLEpZVMUiVBBJgLTOLYpRWY1zI7JGUMwdeBqU1WzZV015e312rI5O1uGfn/veNUNIxZYz5vb201TGMu1H0dRMIXFFEjBlaWquMIZa0syIYxRYuVcXRaRR0S0lhtr+/1QOHbkehVjbejHo/OzMknby4Nm2Xsp5vf6wj368F5dlo50f32pfvChh5S4dG9ur/sc2lHKsg4plBabqkTNvQ0eeyKTU17OytOj2eub64zRlDb2ylwGr87Nqnpxs76eudoa53O2xlHdxO7WiFFI5FxZ1yn0x2en3/tI9798drXZPnpw9p9a/pOf/CooR9E8xh41QRY86dpe8hDRyL6dL08iSB9Gy47JKFAKQ70g7z2R8SkvyjLG4DL6YWBTjj4VxGVRbIfUzOrziwef/upX18++/ODjj1NTluWMWjPu9mgQDBtZhJ0iYrNsJEnwoTaOqyOCguraJI5jWy0u3PFHcdyE/bXvrob2qr15vb96Y4qyqFf18qxZPShnx83imF2NXCs5mNwzdDBc6je4treAITwk1L5xEn3zgfrNp/DN3fKteDT5Vu6+hL91i51uv3jAIYEqyEFcml4Kd4Hj6QWH1q7pQwQnjw2CiBAaQAaYur8REJhYUaYdsEw7fSBRlKwCwkBJhJgmQxLAIRuX5ZBSQlJWlRhkoq4SUGkVIMWYx8iihJRBsqYwdkAJILLvxLc5eqsxYSYFZFICBFCJWVhFgVggoRKwikJOMeaUUgZFBmBJiMLIhILiAYBgomVnACC2CJYIFY2iQVBCQJp6q4h4Ss0Wk2sMAUAiTPchmlQ5RAJVBTZCDMTEFu6MZZAzIAoKCChkAJScNUtWQVXNgoTEKAebrVFJEjXknBJqhiwogjIVOyCgQU2gbKJA1JwAMwGhRRAFEMAkJkvOmiwoZQEEOQxndGpn9zmzYR8TEnMSArEkSGAMOqOowqji2BEPPngLjWGcsILEilMPvEzeY4lRc8wxTTUTU2xYFciwIgGhKhJhloyATJphyopmUCFElQwqDICaBKZQelQ0yCWwTaBsLJrJiu3QmEmlAUUki6KTUTgrCpqYskiagOE0CSVkiCgBTNDrnDwgEBEwAzIwR6aESMahYbHW2CnLZv6mVDT9tSbs1SGUOHGKmA4gMAJQQp4WK9PFjgdNiaYlERERH7xFU5wTJylxaq08DMqmPjZBIEZUxBRDjKFwJuc4+j4CUWELQ3EYPWAWSCn5rENORTEPPoUwNk2dJccxGVXn3BAC+0SzKmXlhAikXgXMMEoZgtWi2w3Z56Doc6jrpSudz3x++mR3/cpYq4xkKcaQhcAVq3oefUxhAJdt2RCZpKLIgMJEhsGxK011s1tnY7moguYYEoqWhRMocwgZ8qJq1ts1qUlxnFDcQUc0SOyGvl/Nmj5FIkXVpqyTSN3Mu761Fuely0mashZR48zoUwIhy5ITadakhWNsKkDsugGRSmeOV7O99xnx6HhppL+52dZVzUwxp9LWQQRFCJAAHVciWVUYsHR1CsHrFsD4AYuCbAlAEH1gh2XVJONTkIIMEHkf3n//4uWb134cRXS364YYS4PtkMS4kIduSOowq4IkTSLJ7fdh9OCqUo0F4+r5PA2+73tQIkN+HC0bOdyVDDIpGMCEMPXAAiNMcjYzEpGg3NW7ErMBmZRscLZ861kDAEA2TACZyEhOwGSYpiZGUlVNTECgRGaiTiAQiCIfUs1jgOXy9NXLVz///KtFU/27n/3okycfnT5+ULji179+Nl+cWHeyWDbNjD7/6nkbffnoGMoKu+F63RaLe8fnp0Pnb686sWMWf7u+rgyXs1ldl0GKppjff7h4s7m6/PqyLFyM+esX25OjRXPkunZXllVCRjb7sYPS/sVf/eze6cmXL96cPHhQKO32LaiOfofRex8+fXp9XB2Z5XeBckJBzYYhyUgIoskWJTHFlJICJAUBw2wtAQAwjb7PEnPOTMyFsWyAcNfuCE8ZKt8PVJFripja6QGeBUUdmiwpDm2KIRVV3feQUNZ9vzo5ffriq+XyeLvbRBFiv23T8uz4q+dX2pzsklYED985++6Te2GQbpFUfEpydXvVjR2xvLlJJ2erxdFxjPrk0flyIc+fPn1zefPee+8Jz4aBqmYWa//g8UnISlUx9nF/0w+7TbOkhx+9R646fv/s3/78p7/7w3fQuZPTAki322HXj0PXP//iy6d/9YtP/u4n/92//J/+l//o789ns3TW3/vOxU9//TUV8UG5Mm72kz//8vf++CIqnzw62XpfVOXjh4uf/+zLo5P3wFkFvXp1a9jce3Avkv74z/76k299697D03Q0rtcjzSus8dF3Hj9/uZ2V83Y/WpMMQ+GKBBCDPP7okR/C7fVYuerzX752pXvw5OhHf/mTf/FP/2FtTXvbfvZ0vd9sH9yz9x8sjs7oetyfvvO4QPv448XTz559/eySNC9OnGS4amO/b8e+uPzsetngyfvHP/v0c73cu8Ux3uvfeVw9PDkvbJpflMuzurb23/+bn+1bf3o+Hyr51c9/8/0/+N5idq6Ks1kxPymbVQkhtevN6cPzf/6//m8+fXX5L/8P/924vZqughiCpNz3UlTNOI5lVZnChdFHCao5jZsURoI89HtBUlQFW9bl5c1aMoGBwftxGKG01ayMEvsYx7YtqtK6ilGGNprCoQKQlkW1a3fi86yq/DBG30GOSqSTjGzxcv0mmHxx8cC6UhiyClmznC8QJKdIhKoQUhLIaEwffBp7QkwiuveF49v1bXV0L4GagnPO6+0eWNb72/nx/OziXtynLz//+uLR/HKzmxdN3VTb9Q4ZmXUch9eb65PVnJWsTaZxg4ayKGtb7/drQ9YYvr1dN1Wx24yk0Mxm7b5T4/ZdVKEcc8x5asdgtpmoqYthcyU51lW5GbcagjtqksTsx7KsrIVN30PoxSCzK6lC4JwlSoqHWlVBFLImxyQxSMzB9zmnpVvlcSQDRKaYLfb7LdgSTNX3nWYqralL6hB8vy+K8uh4dbO+CXk4PT1O5Gg9BB+8gHO2dpWP3hJFnyRlREACNlRWpWYpEE5PZieL5vT0DI0FZx9/69H15dX29fbJB+9UVHS+ffDo/uVNG0ctqrqosW87IizrUnIgQONtvw8MRV2WBFoWRuuFDwkxzypTumrsYtqGwhSMmHxeLo5BqannabMnKIrChTEYS9aZFCKg0KzIikXduIIZSaNmxSS57VLbjSmhZBpiRFIxUQGRDJNUs7LtujEPBUPtEDQSk6uqGDKxKesqg+w37WFvASAik2Ho0OMDB9Q0AiCSSp78RAIgkqcBxdvty+RgvbN+ICKJTMmygzYE38yt7wbdMKXZUEXwLpNGBEh0xyuCu4G5qGqMab3dv3hzeXWzadteFZNITGIIkLQsjLFWVDLImGKMUbMgUj2rht4TYT2riY0CSMa6qY+OFk+/fH756nnWnMOwWNVd5zeb3dHxUSaqTs5CJqMyq6puv5OcT89P62a2214XhnNKZAqfsqo6MplSloSgZVEVdjGMt5JTWddlWWPORQYRKJxxhvzQ13VNXBIpgtjCpZhXR0vjzBi2lnV1etwOa9FouFC2ZVOnnHPORdk0s1mOSlhmSVmlrOswqgiyiVYts0HQqrSucK62wpmIi6K0xqaYnCu8H1XRmeJmeGksA6EKxhSVsazrvttihUU164ZkEF1Z2rLJhFVdee/BUGkrU80Smn7sWz9E32PuAaUoCrQgOSZbaDGv3MLNFhJ9BiHn6sWZ37cp+PcX94buVgGNnWvwQ38pcW/ZSp6Q1Yqa+fC+T8NbVc2HE28y/ascZCQVPSzFYWKdHKa7zMQMAqhT6yoQomFQFc0m6wSb1mEYY0AkdtaztYHYucrW9dttQco5ZUEm1zS9+BhDzokNV3WTYsRMTdkYdn0cS1eEMRh2KeYQR5hCPCrIFEMa2h1bZ13BaAywplw2ZeWc+GRn5bKuxPck8fh4IRKPjhfzxfJqu7k4vxdGTwKFMWwAAKyzoo7atiA0RBMfl0Tms0JKe7tvkckwOSoMomVm5HsX80RwO8QuZR8BFqfATsY4QOGK+uE750f3H6CoxDH77X7onEHwLaGawly+eRP8GDP3Y6ysoYyzumjK5urm1Wy28AGGLq1mdl64cWw3N9ez2dyxGeKeLc6LchSJ2aexL1czZWOdNQyJ0JS1EkW/N+zAUD1fqaT7F6v31ltJcbvpjurqk/cuPnu56aJWy3o3+ujsr15d/2effPTp508fPHyXEjo363ajVxz8oIRjigTQtbskojmBR2hIU0LAiT5RuabfDSfnR5bAmuL04aOvXr36+vLlBz/4bg4oQMVspqApeVUqZhUDZt9FVWMIXdnu9ov5O2RWmQxWrmrqFBMaKsula+7PSTHn5Lvs+xg2edznsd9fftmvX7XNwtXl8uzbtj5DU0xejLd60EEan/6X7hSit/rNN3LOnR709psOJjvA3zraQS+6o7HcKfKTWUjf2obuFPw71NHhaBO89LfcSPBbnyIhG8UMd/S6CSaHREKExKDIbAkwJQUQSBP3QjIQAMEBF50BOCtk0SRIYJidAQIR7yMAla5kYwghptD5URUsc2mcRYDUyxAihJSj9i3E1k48aQYimxQAKYMSokhSIBRBZCTMIilDjhKjqCiTsgojMhlCIGDFrABEBoUns6eqIjtSUEAlJiZCJWuJmIwFRjaM1iJbBVZV0gQpylswlWbShJqBEa1TYmULgjCpAgYgR0wAOWrMoKACOWcRlZyRSAHIJCEENFmSCqQMMWnMiMop5xhjTJoEk2ICzEiZOaQcgQIiEQExKgURYU6gUSBIdtNPn5DZ4NTbppBFgmZQEKCsAVUJyDEbgy5jSmqJrMGQNWQImcaQtFYAIWYwBgwKE6igZhERyaoKFomYWQlNzgBwJ8dIhqwKAZRQUTFPAyGRNGWLFbJKUFDUZMipokwIeMpIwMZOATRmZuuADVmrQHmqTWUFAgUSNIouk+accww5J02RCMkqCQIoEyKSxIMbjgimdA2RASBEQ4xIzJYNG+L/H6no7vKkg9P5buQ16b5Eh5goHuYbE7IaQBEBUQmRid9ijfTgkiWd1GPJbwllSQQULGRQyVkIhF3R+54S5qS1rRXA2KIqbdjtvUFjTNuOu81eIYuKURsiGTVJM5FlAQJjyFIU9d4gsVpJKfjErASwvgToQxiCMVTM5xBSP/povHWmL7mazUXFutrraC2bugljzCIZVans991w+dywzpd1jqltPQAVVaEiUQZTYk7ISpK9ZRtjZM1gcswehUYfiYVRk6roFEQMjePNvqXgo0cACaF1XNWuHBKJYFm6JN4xdzEUthJNApBzFMjI1hRGghgixUTGxZTKqrrZdQqyPJ/FG98PYd+t7x03e8sW2VqTBHxKriIziPc9WWK27IwEwYRKrMyz40fbMIQMDhBFQjgwhsys7H2POS6qpjSzPbSD9mVdAhasYg1LDFRRQjBVlSF0Y540NadCCj4KEIeUkYiIex8tF6a2STiHCDmXRZFzDjEyoqoaNEjAiExW83T/NcYYYAJmMtOtXAQQiZCUEAg5a5KpBmzSm3BSkZWQCBmMIhEQ00H7V2MMASXJRGzZACASsDFwl0ruB//hJ99ZnSz+4qc/Pb13/pO//vpn+rpsmoePTspZfXN9Jfkmyvid333y+vIyW/dm0xdNRVhcrfcfvHPsu2u/Xd87Xby+ae+dHu/a/cYHrkpztpJRh7D/6P3vcbgujDs+mr++2ml0VzdtwEwa66o6Pl9+/ouvhJld+fH3f7dPLuOuXBxr1w5t11SL+VFz+/WV3+hJtdhe76+alzU3kGlRzRE5Zx8lkQoQkbEWacwROE/1AUSNiiSJTIWzTZIupRhCYJsLy6mHXb45PX84N7rdvXaUifOw3wLOsw8xxbJy0zAwZWnmZVf62cJk0cXJaojJAFGpAljV9elJMwz63gePUPXVV5vi1fbitL4NQ9vn3/87f/vpF59JR3HQJ+99kMZRSZKkEENTzV88f/ks7xzY99/9mKzNaXf1alsVxjblsM/OwWbXOYbj1VFrq9VZdXPdWzdcfGfhlrnft37YK9hy3izP5+eny5ckq3K+3T5cfbD8b/93/+J0cfSrv3z1rcf16+evv/Xk/vk7xV/9+6cf/s7Zu0WomiKu++8+XNoRf/H05fd/8OTD9977v/6/fvRf/PM/MpTnq6YoTQiR2f7hJ59ED3/6oz977/x8ef44EmyvrstFcfX0TV7Or7ebswcPT47qorL76/HF51f1t05CSEcnJ0L87vce5uzdzPyjv//9F8++Oj+dHz+eMTUFu/3Gp9wCgJsXZcFf/OIafTLq57zcp3H9eleUZW3hnQ+f3LxOj5/cu1l/1rL5wT94L71M6524C3ZWfv2rL89X1b4QqLAdducnj7/10REn2Wz3JyfHDx/f+4s//xk/8/OZs4U8/fIXp4uTs7PFr59dQaMn75z98//Vf/nsV9/6tz/+7wFg6P1s1mTIxDbFITlBphRzWdiqmu027eJ42XetYw4a62bRj50PSlwQs7HWe184q1mjeGuNYp5bk2Pe7da54ncenN9ubmQq+cimqUtFtI5Tphh801QqvO9HTbkqymWzsqKZQPLQD2F5dIIpVAVrltHnJAIopEBs5s1i37YZXNVYP/YxDMzlw0cf9NvNol4EZ7ZvthYMc3VxcXazWZ8eUdRwej5vqnrhNeT0ers+mjUqWpaGAOfkUvC3m75uqn0/mML60IpENOp9l1NZupLZ7fZ7hRwFyRAmkZQh6ZgGMkZCTuItsSXrw877gQCUGA0BQmlX0XtWCZoMmqQJ/OCQi7oexsDEBMTGOAuaJMaMpGMchxgscN/uVT2AjSGFMBbkCGT0G2tMXRfXN881m3lzVNXVfr9DhNl8tt9u50ent1tmNCn3VcnvnM9OTtzt1r+52iGhADMh1UXOVgEZCRGRsbCmJFwWsKjMfFaWzeym7WNmNgzK129uIaKw7HZt8kPpypjSzXp9fnqqkhVk349Nc8SUkXNVWmPIEhZWlfH15XXO2jQNZDTGmALmrlKRvg/GclkVWeTovFHSvvduXpBBRg44sqNmXvfDkGJyxjVVk0NU0q5rUxQ0ZXJhjAN7mS+WKuJckWMSFDDGumrXvzxZMnOOsUPHAhYMRklWMxvTNPPDwkfkkP0SVT2oQhMHc2qhAgCVfCBaTDEIBJyc6DoV25NOPCOdxsh3jT9Id7UhdIi5TaQNFQUkogPcAQ5syQkuOdG1VQFwMokkAEGEwhlnDRP44P0Yk0g2HEMceyZjUvI+5RTDYayHXDmXEwiAqcpqOW+OFou6aTf7X/zys2HXFoU5OjvOrm9Wi6++eoWIfTdmpaqZicpyMQOJox/qsm7qJmcpnXOSQ0xRsmgiYiIcfUsWjak0aT9cC8ayqYuyjDHGMDJR05TGMimAgLVFVZdZc07s2Fib2ZlubK3h+eIsa05hqJsi50hkkERTcoWrZvNRoqscgwmpt85qjMxouEw2GMtRtXDU1KaqK+uITV25EhVzjFVdqrUIoFGoKAqag/gcIzGyIVGpq+rk6GizvmINTdWEcRBVxCyorijLutrtrvfrLaJhVyCwzQKaw9iG2PkJYaKIxBFyNVtZa1GJIIf9XkGZSPIQ46BpSCmN3RolsASACIoIOYsSIkysIrgrvwOlOxrEtE6fCCQIcOfwRwBgYoQp6kgT/2MazBKDyAQ9EhAFoqllSAQOyNQUgwjnyMaCQtKDt04RCKQobQgRMCGkcei7ti0KO+YQJCpBzBGScc4xGBECQykmSZnZggIRpDjEPgAAM5PhmIKxXBXOOuesxSRFVS/q8vr1embNUeN8CKt5IxCXx0d1ufD9m6Zxgvb1+noIXVNYxsyEU4ijLFzKubDMDK6yJtmsmFKeN2VdFuPoK1cdzWcnR1VWEZainG29A1cdn1/MSntaOA23w8ufWlPG1hdlXbqmKIuu26Ipr3e7y/Xtcnm2XV+HPC7qhTPlspnHGAirRXP2m08/K7g4Xs6U4ouXlyGCxbLvWlfSfFnPbdleXcZQErIr6jb0rnSEWpQFEAXfg4CxJiNHhWq+qkp493wZ2t5ZeHl1e7ac/+bL1yLWlAY1pDFvc/7N8xfv3l/43lNOY4q/+exTZ03MmWzph7hobNf6mCIppJxTjE3NOQgZcAZ23frhux+NQ+/KZUhS1ycnx/euXzz3u66YnfXtcHx0bI+K/XbdrrdV4cqqSJYw5rHro0+kgw47d/YAijoBoEEboobkjBVRNIRkjWUFkJxREqiC5olpS2zYVmCcTp1IilNFFuhBmpniLHcpsr8ZMMM7/Uh/W8C5qwq4U4/gLfPoG2lo+grq3fF+S5B66yf6G5LUoWsQ3/7e9Cy4g9IhAQOoMLEcYmsEQAqZyBCyAiqSsVNnfFLQFHNSiJpVwBASYMwppRSz5CwI7NgQUxYBRMkxpUyqGH3yY84jca4Lw6yYe/AeRXPqxAeMybKgIhNaNEQiqlkxHah2yGRUEYkETRT1KeeYMYMlciBI0QECAh9uMgd+FE47KSWECJIIWQUAhYGZLFJJbME4MKhlkV2BaBAZRDUn5IAKMBWqSkLIk2okSECsyGAIVJQtcAmSgJOEESBACpA8EUpOiphEGTT7ESgIeQFIKWdREcwy9duDCIpwFOmjDBHHBFERyGaAJEqgCgykUzOdAGTQiFaAooIDNjqBoSiBAEIGDFmjSJYJFwUGxRiymGtDzkBTIjP6JGOirEXKkCUDqjIpIxBpnrxsqMjCqGSFVU3OCppVp2Y0FZRDfFynPnhJAEoAk8gvU78poB6ccVkBAC2QAGUkYspkJv+aECZjCAwKFQSkqpKjgiRFZhtNgciQMQ0+ZZ9TYkIGwwQIMNWQJVACRZ5ObRUFyAkVgQWIJCXNKDBNJn5bKgLQw0oFVQWBJ7AiHZBb/M2rAKYE0wEQM7lgAQCBmOlQhaaIBCIyAR4BDwWHAKI0LbwKMqJxOhAAx5CZwYfAVUOgIKM1qgi2LnXXxhgT5uhTGiOh8SGozcQkOYsIMwWfmqWBxgVNpXNd2zKRNSRZVXV+smhqd3u1abdpsZq7gq1zxhYRk0Xjx6jAKCgCRVnI6GfHy5xhJN80J2Ecwxg0T425ZIzz3ovmoiwqdNHHuqmjH5jUOCfA6AwIjGGvKZZVSa60tgwpWXT77cagFs6mFNngolkGHx0bVRzGwRUG1Ew2vARx0vkKZ1JOExmstKWqknHOVcO4FjW1LePgBxurkliNhtEPPCtrBQFA64oSHEJ0ExqBKGEyGCdLjeRApKSYVblkoTymVM4qQfQ+0BCcliAEYMcYi7LZrb3vpCpSFiKiNLZ2UaV+TEPPBDqtcUCJCRT60SNZZIdkEKgyRhQymrKghEMMYwpqJvucQEoJQA5KIxlEnMw+jEZBNScylokMWSZWUAIQTROa05ABhKzCyAJIk/sRpoyATnqQiKKZFvPT8h0ZiJEBlfmuoRMAAK5vXsef7E+Ojj569yNS/Nt/+MOvv345xt2nLz+vQRZF5kHTmD77q6+WUGJjQqnU4D5vLh6ejO3tzdULA7icL1ar0zGPzrFK5QNurrePL85f/+aXv7p5frRcYZQ9R1U8eefo+uvbYd9V89nN9Xofeh3T6mSJRR4wPLz/YNcWXTcO3c3D2Unf7V5/9qopirEfJPLq+MiQfXP14uQoG4Is4spiQggyW997RKpchQCmMCookLOGIe2IjyABgdUkOWcFRQgSYpC16V1lnLOOUs4KRdmMXQbNmIc0ZtDcNDNQjd6Hfkcp7W82x/cuvn52dbJ8z1nKhfFjev38qjRliNE483f/7vduL/dlycz5f/iX/3o7yMVqdXx+tr26Wb+5gqRu5nwcx33eoY/jcHb/bLP2V62X0J6eHnPfn12sbjbtYlWlEOqmuHexvH6+rsuyLIr+9TMs0fj++VfPfu8Pvt8PMAxgi0LH8Ktf/nJzvVv+zh+evfv+2OebF9fuYZViVmPK5VHG8Kf/7tmqLooZkM3tzebXv/z8Ww8+NFX18SfvskvozH/yt36QuiikfTuWxTKOQQsxhjnL9z/52Ij+9U9/8+pmc+9k8b0fPP7utx8ClA8efTDue0UcQr9+ffPwyRwgLs9n//HHn987rk9Wi9nCdeubs+Pm8f2jBH7fjqFvyQEYcVV9fLb64vMXu9vN5kq+8/GZLRCY5mWFRDlDELy52f/8r19898HZx996MMp+93rtb+WmXWds33/4rfffuTiel9fjprHy8tmr25ZOlvUv/+oX9989fvz+8r//H/73P/z4j8IoX3zxbH42v3f/3esX6223OT89+urLV/H1Kxzai9PZdBWcHK+2231R1YxWEMcU2ZooajBt+65oqqzBVY4Ux773QwdZ9n5fV7MQx4h5PpuHoZOYVSTF6L0vbGFdadFokuv1duj7qipms6bd9WVdhJBIRFFPVsfbXZuSr0u7vu2qsjpZnQ7tehiDq0vnKiJTlRJSuH/+cLvZJIKiNvvtbhiG7fbW+5HIKlbHy/OuHSwRpjCO/XbDgIaYlkeLy1evUgqN5f3VdhiDtTyEWFRmv2mrqpnNl7dvrsvabfb7qnGzpnLOhhC++OLpg4vHRcVD21WzYjavYxBgk0mpcohpN3TnzZFjp8l4HQsqmE0UiYMiSt93DMKuQRVmdY4U2Xfr6DPkFIMwyWx2FAVUNMRIzApASAJSl877PsSQszBiXdg4DgaSiDhrQANCliyK4thoDu16XVu2VRXHjlRyyKhT0p191xVMhmg1b95cfv7w/HRh6ovT05N53Q79ej/e7vpo2ThbGwYxCoqimNOjB2fOUVnPNZvdpmuamtmcnh7142iLYuw9Ae03Y4zCGpWwrBs/9kx8tFpx5subjSuscyU5d3x0vrt9mUMAR/Vivg7j8uwMhn7sQlEXOUGKfrlqPvrw3R//+Y8be8xkyEiPCkDOOBWxxjJRDHlWL61xhkxWiZBPjldAtrS06YdgE5ecY7BgvB+rorreb5OMKLLdBVXX7rujipP3pW1yjMylgkrO1pqK3m5E3m5UphUvHbL0B9r1ZPZHvUuXTXmEg+lnqiY79JoJAOL06x1f5jCOUwAAEQUEgsM0bmpIRyQ4BDPoUBsEiHQgWyMxUmZrV8ulcW45X6xvNpv99vpm0/kxxAn2h4P3YfQx5xxz9Kmpq2Y2TymxSWEIpXP3751lcu3YXd++7Pf75dHxfFYfHR/LPL28ulys5ikmEcwBC+SysKL59mZXVc1qeVw413Y7SxxDENSidDFyynGMHpGMtTEElOwKZ22DagzbFMK8nuXogck61iTWWWsMMUvOzGyYiIxPiblcLk6zH/puYyxbMj4mISFLBphcTbbR/taQYQTOhGAypCRiCIlZVUlyVqmaqp5VbC0omKrwoycGAsiaVYANZhI0jOhyDkRkiCCD4aJYHoXYxRDKGTXlvOt6JG0K5wcPbKpiHqBttxtrjWQJ/ViUdWkLoiqE3gdPU0hKU3/9ApK42aqazStjBj8o2KKqtCgSG8lx7AdS1CSaNUlEADSMokh8WDvfDWgBkXDyUlM+dDzBYbF0aFh5O8dFuSOz3IHVJ9ossDGgAoB5googikjOWYQAQFKWLBKFk52ugpSzKVxWQMOQY/Bp8CEjpZwMqSUiwhC8IqpSktQPQ7VsVMRaC8op+Jw9M6hkQxZSZqdIbJgJgAFYNWU11rX7zvtxNZ9pDnXJzvJuaBeLpYiEHIqyiDENY+9cQcIpBWuMMZassYbYcl0X1nLTlDddnxOyNYAGbYUJRoBX223AZAmO59W92t2bFQCZ8Baz3b9pc9fH2B2dn4kCCkQf1rsbizpr5mM71G612++yhlVd12xz8nXliI2tqxc3l9aZpm7Kyj5/+bRth6ZqSjKpG84eP7Sz+a5tgWzsQlkulV3m0dZNBgamsipzjEpWUIksokkxOVM/fPhgfb3tuv5sVmeDf+vb7/782WUbxsKYylVp33ofj1dHvu0ghMvL66dPX7737v3Btznp2MfKOR8SAoJGJhKNWYGEvB8I2Vo7joOtlqapx5hF4J3HT65ffP380y+/9bcf9+th3HamxpPzc5Nx6PYZEbmoC6uZRURi3F+9drOLul4kIBVhyOQMgbJzpiwmyK8CE+BB2RQBOmDW7rJfh6jtQX15K8f8ljTzVrr5G3Gzv/mhd/myv5HkBf3mhZMQ/9uHe2sgmu6939yPp6TbgX89/afyzZ+ZRQ7mUuNU5eB7IMBDFaYhMpOkz2wECVUBMsp0eMkCWVMSVQKDHJJEhagghIxqKCNozDHnhMhTdVca+2G/0TCW1lgrhgOpZJ+Dj5RHFCEFgjQ9AQHSVKVJyqgmKzMZRCOT10VzyjlLRgRDYECY0tRocAhRkQXACSJNQKIgwKSI06OJkAwjM5DRSWERAbCgDMoKiDlhypA8SERQzHECECsKIAGzohXNwAAgd28DKTCgAjvNaWojAplcV1P0VjSrACgZQbgLxpjJJyaKWSmqjBl6wTbpmDGhBsEIHEFRQBWsYWYVnBjkgJlUTFJlZkIAZUAQFQFNAkkg5pxSmo4PCobZMcSolSVEsmQ8acgas6RJyyFQEkGDhwe+Ss6ARolFQFAUUXMCmZQhURFMAURA8+GRrtOuVETz9LNFQAQiwAl6SAAHL6ixahiIkQCNQWMUFZGIiY0BYBURQkGFzGRcNjYBK1nIpF7QkGhmY5UNEoK1YFAnki8IQkYVIsIskw10CpjRN0Cl35KKJmLene15GilO7CH87QUTIEz1H3dDqqnaFQ4c4mm3f+fsA8DDOgnhcBaIIBAxS1ZA4wyhBKRCo44+zealijq2GcAUtljO8+1arRt9ADIAmkQEbMiZdPK1UfQpJckxA6Dvoyt4aLds583RDDN0yQfS2cJ2MPjsmwfHj1cn4xjKEsvFbCP55PiEY9Ix+DFm1bxrjSss0VfPf7NcntbN3Db18PKNyVZRWSIyOzKiE5dNkveYOxRiRlW0xvZ9cMRDGpBtVc7yODrriBn9yGQkpLJuGCFrlJSVGNlMffKQFNlpzsRUFTUSpZxF1FirSIguRY+ETVkyIzHee3h/u28zSFE1XT/Mm0YoK7HP0RbNbFm3vnWmHAfNg2hmJEAyIQ2UiQ2VdREGZUbIQmXZLGYGNI6KYACRnRWfSleOMSFhVVVhF60UUbEf5Oz+cRqzb3XvIqhNYNAqGBSRlHIqjIgqMBF7mJbxQoQKGGMCkKIqicFYzilknwEUplMIFCRrEjzU9AERoSmQ42TCRyKRzEwEfDD7w11Qc7JnIyLAZKoUBUKWrEDChgFxmvFmTYBMREkiMRlyeEBOAAD0wcNOb6+6i4cX7eb68fdqjG0YUhzMbDb74tmvV2XpY7xc33746F2om9e32xefPf3h99598+LGGnrRDnWz2Padtbp+dV0t64uL4+SzH9r1V18cVWxMjilxUEWAsv762eVms3OcLtf+/OLxfLHoqv1vPn/67pPTeWH9zbX49vT8XpdXR0enn37+G3IWG3u7uZybedcZC8WsPpk18xSCDLGgI0FWFSU0rpYUBREVEB2AAAiRm9njlENANeSQHKGiJeOMGE0qXbeh5thynbwfx1A6q4y2siGNEnuUyI6ZuGoW5Sy0u30xm6nEwtDt9YZ5fO/b51ttky3HPj94cPry+mrMfpf6/8v/+K//N//lf/VP//E/mlerX/78Z1liF7sZL9v9WDKe3j/vKFjG2XKpnDzZ2WyZ29Du9j60N5c9OHd7nSyXbdtbwuij5NTeZGfwdz754Nnr9XuPvtO3KhkNuaPjxdBvw5i/9/0P/+//x//zxcMTVPP4gw+ffv7Fd3/4vaSDdYvVCo1zbIs//4uX543lRfU7H/9gcVz8+D98/skfvBtb70qyVsPoXV1Wpbt9tY4jH9+rnz17cX5xWjVOuvTR9x6/2z2Mg3/xxcvLN1fHJ2fLo+NxG3w73H98anFKG2OU9NGHJ+fny68+u95ebeqyhmj+nz/62R//4RPbUB+745Pl8sH8+nV7td4WtkI/+nazX5vHH5785c9ffeujM7DSD2l5Vre34wf3j2/erD/7i8//1t/7nt/Gl1+9+fiPP965m7FN65e3f33z6r0PH0rg+eL44bsX/XqbbPj166vzxek//S/+2z/71/+23+xIUcbQ344g1O/Gse1I86xqzk4vPv3067s1Fh6dnhVF0bZdUVTL4+Nhf+uqSpD6OCb1q6KMPnifjak0amEKcWy5jDHG2Gksp8j1bD7z6zWx82MANdtdd//hUmJi5DF4SbQ6nvV9TxkUoB/bqQlIMVf1zJb3vvrqubUm5Mmrnpko+JYAQPVmc6MpoDHtegTJFoRs0SxPtlfbdrftYM3KHiArUOEEWfqBWfdDx0W134+zWVG4xoCpLGx3XVGYs+ZYwGzXfVLwPmMyDusc5M3l+mi5XBydANIweMg5dkkFfBJn3H6/B7Inq/PCBUhhHwJpH4ahcCUzJkhlSSSpsNmZ2ZD2KUbvFYAdl8H349Bay+V8rtFXVY3IqHnotraoyXAKMSsRSWHL+mj5+vaNZohBQJP3W0NO1LNyaSwxiabK8K7zYfD333kwtG21cK6yhtzLF2/QULloxj6crOavX788PT9959Gj9e26sK6Z2YujGk/KN9e7eWlGA/t9hCSqOmsaZtswrGZLV+Li6ASVLRgq6r4fmEBVrSn67JGQ2M5ncws6hsEw+iEgAXQjEywX1XxR79uuG4bdsAFDxnIzX4Z2c1JyjH3hjE2KAITgmuKr2zfzq/lHH30ne/j0y68++OhbY76UlJt6vt7cNuWM2I4+GiAxxays49gba1U5+BwklUWxFDWAmnCz3RWVDSEaMoPP82rR71sJaGcAOZMrjCtSEssEFg1ZIvTB321N8K7p5vCk0btx9rS+m3hDMG3TJ9PQga3x1uVxgDeoAoggEvNUmvtbJGw8PMAQUZUOCzDkCY90iFDc+bgV5LCnUiViJq1c4YytnVs2za6dF868uV6/vlr3o/fWxpBSjmH0ZE1VlyGmeZaCeFCcN+WqqSAnGeJ2s18tzh6cPy4Xc2vg8sXVMIybdndxcrwdYzf6wlVNZRm17fZGaTZb2bIYfUeqlo0QoWEDCKyaxJDNZHMKRGQMVVUjKZDmod05UxrjVGLV1Iy47zZF4cpmhgA2EyI6NuPYOxKLRb+7DWMHiAjGj4GJXVFa4zIXppip5tI5A3noR5mW45IL51IUVAHRylWD711RGGsUyDongK6qFUBTBqah650tFJNoKp0RkTgEViXNMcViPjs6vujbjUpWitWs6tq+EJgf1X07qs9MRTU/Gvt26PaIFPs90F7ZNtXMVnPf92PfAiMxK9Cwue7bG+dK66pxt0FbTFBPckWh4HsPkpiQqdDsFQhQVDMi45SdB1QgVFWQ6ewimjYiU5wDpg3FQR6aprk03SknvwYf1vCHYTXCXV3atB1Rw1O5kOZpuCspjHe768NgOYY4aZXR+/1+i8zj2MfkQXNM3lgryfvkyZQSAwAIGVSJMVa21hxSDqyoUTVZNgYgG7ZVYYdxQCycq0lCac2sKmLoV4uVZTDGVM4Mfa+SRDHGcbfZzIqGkEPqLVNZFIWzJLFuHJE6NmVRLOt6vx8YxTEV1k0rIMjQDdr1w+3Wb27bs6P6vYfL/e1uv+8MkIg0R7Nuty7rGhj2fduN/vH9J7/+6vPsY2GKbtxZpPPVyXa3PjudF4t6fbPzY8xBF8tVVdmb29fttjNiT45Xfj+cnp7PVyelKXbbl7WdBZ9OT+4NGurFypUFFm702XCRnY15kvySIWbDolQujt774N1t2/XDsPbhwb3VsGv/6tmlx0qTzyyL80WX4tXN+vRkeXNze+vTuQ9dv+uGHoVCyn3bEUuMwVlGgjEKMJmETAUgD8PO1DMgLYzJkuZHZ4/eefzF15+++3u/V69mvvNludhtNuV86ar69uXrlMZyVpXzOiM45L4d2le/qedlOb+vaKUXwwgiSizEhAYYUBGRp9o+JUIEASFE0MndM52lcMf4v0NH4J00r5OjE+9cRG/51L+lCB1Oc8UpBqbw29SiQ8Ds8Kmiih7uyfB2E3sn+P9WbO3wu/o2DPf261mmIoIJrGIUjYAiKqAhSapIdzEcJAIyAGAwp+wNqTKIomgGhZg0I0TFIcUxJSW0xhBhAlXFkNUYVOYoGsZBQ7CoTWkLDo4VckJJoBmSMpIjLYxzhpmAmZOICmbNWVkQBCkTqggmzTBF8Rg1WhQL0eLUAoQgykSAU2IsT1oXEqsaTAI5EBO7Qp0DtiqIgqAZYkZJlDxFRwiYoqakKUxIbMhhAqVNZi5iQ6bgA4mZFUBVJlej5qiiqGkqmFdNiJk0iyTNEpMqmpQCECuQHP51BCACknIKKQ0+DlH7DG3KCSALTL8qKhqrYAxM8y5FgwZBRAU4ESIgHxRBFZAYU54ycBpUICtI1phzJBDLqoCYEACUHI/LgoOokkMyABNzkJVYsygwKE5iG+YEkiR6iV4P/JNJW9SJ4YMwuYgkS0IFIpwALiBJVScFBhAVDZBF45ANGTc1MSEiWGMMIyKgIJISxZyIGI2NqpZA2IJaAETMOXoRUkRlpKKgokRDNIldmpkJQFAVJZOqZkUVVSFUkLdPjW+kIgAAYpogeTA9og4Q8+mhIoDKyEiMADy1+03ZtGngdQDcHzhlenfFEtKUoD6QHacfAWlW8RkKtrbUom582E/m7pyCIiUtRjWjFqoFo4OYcg5NWbRZBFVAIUWPiphyJgVAIlEU4tls5SMKWcBsi5ktzMPzi6ISY3nc+zb5PngtZ66YWUNsrXRhd3V17/TxmOKmXRsANfjhu98Zw7jbd7frfZllbpyAskkpQzd6BC2tddYEQmcLBAvSZ9Bu6JwpNYkkpcqxrVgwRw8MzXImUYfksFpaG8ft9aJa9KN6HYWspDBfLXPWDHYMnXMG0aSMxjpAEO0AFNBqkqyEwkPMaKybL9r92ogSMCAUztqiVqSu8ycXZ1iB7+NsVt6OJNzkNGKCsliICrBLxFjQsp73OWDGpqxTGIHB+zRbzH2CLoQkIYtwyEhoXElGWt93YVjRETGGmMPIouzqZrffIHMIXkRGH4isdRbZqQoSBe+ZLbFBzayCwsyGiAyzCogkTUlk8l1DDrFwBRlWkRBHS1RaBo0ANNlDBRQkERGRnd5+w2wQAcGwDRKROGvEqbKPWUAAmZljCsZYYxiADJEoIWIWAdHptgEAi+MZkbl8s56l1lb1T37ys7qigs3FvcXtmzeP33/0xa8/rY7O3Fn1bH9tcnzn4SMa0/ZFO267bvDVclHMF4pG4o6ZSTOO3YxtP+6qRZ1y3oTu0cXD8+r4i6+fL5aLHMFAWh4drXe5nj/YrvdiCjurbVUvZ9XLLy+5dLuxzSl/8fyrMQcZk3+VZrZ8eP5Ov4Ucgyltl4aKbO3M4Le2OhrHwRpmADZ8MGDmlJOf7FZEtqgqiTmMAwsaWwhJFgVi61wa8+36ZrWqy7II0YgAYMhK6Ko8DuOwV2ycbZDk7OEjKAxUqBXaI/Pww/euX758+Wr/9ObLP/7DH1y92iWgtvWZ88Mn9/7F8T9rh92Xn3/KxbwdwoPm0dHF6cXRarvYdjEN49iOw7wyr1/vMubV8dHLr752YMrSUgVPn311fn6+3fuT0wf3Lh6Mfpit9M3Vq6J5KMrPLzs1JUBaX62ZXRIOaYjDmHP/6S8+P334zt/+B//Jj3/0pw8fP+i7zY9+9Gc/+KPv/sn/51/9V//gDy9fvnj/O+/dv7e4dz7bXt927Sbx4sPffwhlMqboW//mcnjvydHtejufzxbvVV9/OWSS+4/O/Rh+8mef6Wje//Ddseturm7Lxt178PD9bz/+f/8/fvr+h+89+M7Z9Yvrl1f7V7/67KMPPvpwfp8RhiGc3F+4mcldIoRlXfZdPj2tTyqXUmpjcI2pjIm1v/jwnQ+HeymNuxSe3qzrsrCl+/KLp0/eP+t23Q9/9ztfhPbR4x+01B49uf/s+eW/+Xc//vC752Zm7En15Pxb5Cgo/cc//ZnQV6eL4gd//AdR+F//3/6Vj5e21HJmTWlEYh92GZMtSQQtF+0owcdqdTRdBT6kkmxOIYXBgIZxiEOI0WeQ5ng2Rm/LxWZzc3p8MoxJxNrShXEfwppyWlbL6Puu27uybPutgtiqPD66uLp9cXLk+t1G2J5evPOTv/7zZbUyBROXGUcfva1qMMUUsb+53prSLRdLH31RGdfYoir31+uSqW7K3a4bfVuVcyRXmQjKN1d75ywilLPKEA/DwGyOjma3N2tX22JW7oahH/tx34HI4vQkQ0pGVaOpHHQMAIUrPv/6y3sXD4i4WjQZ2/3YFY6rYqYJZqvZq/Wb7zz58M3Vi6biYRgXzdGyaZqCFXQcNiqaEQef6tpSUSUFP/oQfGPMvl3bnEWzYlZN2WcmN8bOhx4NGuuAXNRsC0sxGEQoayHOILYsNGg3jIZNU9q6mQ8pxujVC1GdIKUQwJUGtW3XdWl327Hvuw/e+3BM3a7dP5o/yAIZcgYJkg2ZKNIYO5/PxmF0hWFbGOMMUExiyuLkaDVvxqS5L1I3DqfnJ5vWx4Cns1IEimoGRM42cWrhLNzoB1Ft295WLkXJqtaypjSOY9mUq8XFZrOPkspZGTTd7PdNUdisIQ2reqYmb/Z7i5TGJJajwZix9WNdVEeL1RPbXL7cKGtTu3tn569fv0KEWTNXA3Vdx9Ebp85yaV0OcRw7yyQoIMhkE2QkLq3th6EpqtGnZjFbrzeQQERvtv3Lqxvx+9N5PbY7tzIxexUcwtDMm9EPxlpbuLsV0bRdmdCMijoNIA9qECHDYU9/t/0+BCEOxb/wTa35oSmEiBERFTIIqALSgbWKd8N2AJigSArTZFoPY5AprfaWjw1ICErGWFVAyIUpqTbWFYgEoO2+G7334yiI1jkEippHH62xp/fvv7l8A4bnTZUU99t2vlq999GHitzfblKI/X5YX2/R0vHx2Waz3t9uwfD9x/dri/1+W7piuVqtVsvR70CyNTaGUYEAOOU0wbaRqe96AZnPG2dYRVNIhqEobeHKcRjZWCKeeDdFXTG7nKNzLol4SWOMINlZg6BsjIgaY7IIMhtTCCAwESJpjhK8ZNGpzFcZIUlERBVkdojWFbVx1hZNDLms5n23qZuViPbDwIBMNiQpK06xUzsnmJJ/WJSsDMpsi6KReoxp7D1aInBhDEBSz2p7cnJ9uZExuqpWCDGEft8BCLILpuD6qDk5P7G0b6+77U3MPVhU0L7tiEYAktEjgCnIOkNIRKqEE12FycYU797uCW51kAvhEH085BDxkGAE0TzZK+it20HzYQ2OQkB6CNkQHJIGMp1AiAx6Z1ibvEksOWfJcLcgAkLMWZDRGh5HH/p+2K5Jgh+67f4yBZGkpXPOmCGmmGNRVQDZcuF9IAYiLoy7vr4ixnpepRyDHxkKm7m2FLp1Uy98ytF3YqAorRrUIEXlJISmqFiz73dEPJ8tP//F11Uxe3x+/831ZVOZ5bzKm3ZRlKmPi7JIIjmrIzuzZXaRCPox1HMo6waMbdcbRK3njUO82rdPbzZ/+cWzxWy2mNd1yTdXm9XY3zuaYxZni5TjrJx//frrbRgfnT25fPkFEc9nzbrdLxb10fnJpututrfL2RLVFQWP7X5zszXGlAaQvGuwOV8W83rYbye1pGwaU7red7NmbopyhMxVGUIWJlvPlWyKwRaWLKOUKPn45OT9d8+G/WbzJoHD9x+eWDJ//Wbnmmq9HZui2K5bQR4kf/risvUYogxtn5PEMbmy2PfBmogp5miTj2XlwpiPT473w9oAzuezGLOJuXSuH6KIPPzgydf/5ouvPvv1ex//cH8zFq2tqqIfclEtVw+Km1dPg49oclHPjLEVas7t7avPjuoZu1U0nHNgRFNYJARUEKEJaHwQgKazlHCKa9GdYIPf0IUU74Sib+ht+Fvizx1g+i2f6Ld0oztXwnRwvPte/cZA9LZYafrew3EOt3F4qxQdXgsAcBDk34pLADkLiCLhpNGrKpDJOpVbESplEFI4VIQTKqAqWSoCpumuYgRjzjljQhqytEGGLFnRIAFAjCkCKlpQzZJSDEMWzLkunCW1BKReNakmUCAEh1AYLJxBRqLJwkUpZ2VLZARJJlYyMCqknGPOmNUQOANu2v1IVlCiycZKU2EVvjWKEKsCc0mGsKxzUSIiCUDMkoLmhKKQCXxSVMlBsoBMAkMkzNPYHxVUCTRDSsSsyMigk/wBU6eoqCTISVKSnCf1gAAMUtKsqjGOMnl10WR1gpVMNVFZsoiPyYv2MfdJOtGYREQnJnKWrEIMRogZia1VTWx18lGpYhAtDCKgJBHNYBhQUBMbiSlKkqxCgHGSpjKkHEUxgxhD+5B91gykgKCZBARAs6KAZgVNmiOqQgqSQo5RJiacKrOZnumaBRFUkyICKiFP+aupNFgntzG9XQNkRHuQXNgoETGDIyAznX0CygRIZE2ZVFWUmafCOcskjGBMmnwDyBnAEhnn2BoiYhXMd6VPIIRgCDVlhKwaQRRyRvmbUhEiqoKIMvPdtXnw56EIMQIAEU1YogMOfXpqE0/ffrCBTA5A+eagBxVNpuEYqcLBAZs1xsQlGSN1ZbY7iimbkESHenHs+xDGgIaTgDFmHIYYgqtKpFyUTIZS6FFTxS6nxMqEEIYBDTfGRe8LzvNFHWLOMd98/nQcx6IomB3Pq/vf+6g5WcE4uuzDzVb2/aJYtN2tDyn3AlLu/HqzviFnyvnp0aw+cjS8fnG9uWJLqNaoMcY6i4Di6gqQc4LcjmVZWhsQ82a9yyBGyLJmzOyomlVB/SFPaLAompOitIDInqs0hl4hWg6WWIEGcQmAUDNm5mSrWVGZdru1VEeIaDgSeonab+5dnG2vNHrRLJKzMVMm2BLLvu1+/w+//euf/0IQ4aR+AxE9gURU1hyHcajMXAna3BoGVhoDGGeddTj2CBEiS6YYpSgLAPZj8BBXJ4vSByXadt38pFq3O4EyCpiCu37HxlqwYwCyVpQC0BSUz9NdwKExxlhOWVIOMQExK1hbYIyjJcoppZQEAAmzJk2ZkUEgxlBWtTHmbc8ekpmIoVmSIh/iAFNGVtkYmzUxMwAqgeIhfQyqzrjpiSOaRXXSl/RuFT9dBW/erJujVXO8VLVf36zr85pj23W7dJmX85VPtB8DM61DK7sM+9tHp2VZjvOm2vt6sZitb26/fv7qj37376Xti3S84yKuL7ecioyz1JydNqkJKv3m88sXRVHFPm7225NHq2zMcXGyv9222/bo0erxu/fXV8/HbmbJnZ0//uz69awuN6+vjup7CNW4848unty+eTVbnC5WxSix1c1vvrhK45jHdHr/g08++iBJnkJ4zA4QLVcZOlcYAez9gAqOXTVzjJiTdrGLORGQySZ2Yqqy79uEYMjlFP0QXFE4U0c3OjcYYwFx7AYzn5W1jTHG3heA129eQWVd1Zzzd25u8jjG29ur5dGp415jePrrr/1w+0d//w++frV98rga92F7u91dXVVlWa2OmmZZNc2wb1MiQvAhLJZ1gY2oT+DvPVldnD8878Owt2kfP3vx9A++9+3750iWjk/qfkin759K3F/djE/OjhJBs1y8eP7mox9831n72Y+/SN3V//Q//p8+fv/i1fNn7U179r/4g7/3+x+bgrbjaNgU1qxvkmZydu7KeXMCT19cnS8qW7kHD5bscLFshv1g3dwxIIk1KJFPj89LPBWhlzfPP3j/Abvi3/2bn2624eOPvwWcbq+u/uw//Pz99z7+Jz/8+F/9yc/yzTj6/ezh0fxseWLmVy9fL1az3/vhxdnR7Mura1MW9ay8uellBFoVr292tm1/9dNffOfJxfJs8fu/cx+j9Rl/7+99Z9x3l1+v//pPf3b76vWzmD7+wXd//flfPPpg+fvvPvjpT3/VBDq+906/7aKP+/XtH/7gu+9cHP/7f/vjT//yJ9cvX9MwUrqnJOSQDaLi0A1lY5EoRZi5E25Tv/PDEO5WVLK9XbuysMYZCz52QLpYzD7/7Dfl/D1I+OtffrWsZr0XY1wecrfbAad7x/fWN7c5JmJbl7VM+QJnXl29dA4qi6Efi6L+enMzWx7/7ie/f3P52nuJeaiKcjabo6W+7WTMU57fsgFH++3GLJcEso8jYhUGn1Pikktrhyi/fvrLjx49LKiqjpbDOO4ut45NKmiIsqxdH0Cp6rd+d7MGyfVqkdq233cvXr46OT56+erNycniaty33leZSsbFcoVqjmq63OwtUPRqGY/OFpub28Y1N1cvb8o5ACUkZ5uu7/b7bTkzi6aKQZwt45gcmSwgIAgoqETGSyzKmaYYo2d0ZTNX2eccldA4BhUDojGEkMiYqiq77cYQimpKia0BFlDatcPtelstbIohxyGMLQMRlYqYkXLySjRmkaDIblbP9zfrqqoYTGWqrJGySj+62qWy6IausGVMvgTQ4KkoXeW4KPpuwCSLyjHhyONQh7OlXMwKBVdaS8WsF4khEoY+alWVzGxqpwLApSsYYogx+qEtitl8eeL9MOSkjkihb1OOWpdVjImIMsMgSUC4LjXI0XIRspec0MJqNvNJr7ebwacc86opMScEnDsWNd7vUUmyzBdzgSyiyBDyaLmioig1MRgABcwxAyuQsUSFJMxBQaAPsazql18888Ntoblr08yZGINk1oyIDGQmTLXeLYwmfUhlIt9NI7RpuyEHTUd1QkpNhoxvEBl4t0cCnAboNC3JyUyNaXrYK03+WThwIvBtDZAegJF6sC/dWZd02rpPPn1VEQTDLICiCGQSAlFRVZW1RkCRiRVnRd3GXcyaUzKF2/j2+MGJ3uyZLZdFNWvOHtz3Oe9v94tlNUp4ffVaLUUfbvc3jsAwIMrJohrDgMiz+Xy2OvYxSNbCsPcxxpDCyIjZB8OAhrq2VYCmnBu2+3ZdEhTWEFNVGEOGFNjyYlZvbm9dNSubuWQtCidpzGMbUrYTeYkg+IgChbOAQgTGsEhW4apeWnTR7+LYgyJhIRBBUTWmNEpGoKmpOlrLxhbGFkiSJbKzKSVDdgz93BaGbes3s+U9dnXy2Tm1hBbZqojGOPZ1WUjyMA7HJ6s++LKey+j7YY2KkqlZLgB5e3OFUDhjeV6E0EuW2G5C10ZX23uPTy/eOz57fP3qxc3VV6LJOCcxMEPBBkTD2IVBTVFZ5zKgSIYcJKk1Juc4qUAAiMhvE45v/WV0gFvhNy42BJjuPHBgbB1Ca4h4KAi/yzkyHs5WfLtTh0Mu6OA2eiuAAiofiKQEZIz3odvekva3N0/JkLGMgS0xgQUJQMjIEqMg+2EsixIN9e0upLG0lVBOE26XGBENo8lYWrxt1838veR31gAZnR8tjXH7/a5qVpJjisGW9nq4uRnah6v7x7P59fraGuOMAZHCUFWXx0eLdkwhI7GZNVVKPktSIcUMRsSQO1qG/RY1ozKXtSTdhP7qug8vb4vSFmyOY1uVtWlcBmHjFPT1q2f18mKzu+lDrGuXUjRcPLh4p9tvNpu2KOaVm/lxpxg3202MuSrNcVMYovp85eYLJjuMmYzzeVgu58BaNHNTNIhOo5TzCnMPCEXZoAASMwBSVkQFMtXy/jvv317ebnt91Xbs+Mm7xx3Gtgvf+ui9Lz9/8fvffT9142++eNFFGILuW7/dtfu2deD22132qe/HVVUlUSRHSmPYZ2hFN5YXOXoha0FySsaZnH01O7p/79Gzn//Vdz/81urIDd2usSsN43r3ZT1bnL5zr73exNFvL6+XqyNTFhSxv75k+8vTx99HLEUAiBDoLpXCCCiHHJgigIp+Y7SUO7nnoHO+Vdjv9J9vlKTDbRgOetJbZMpvvVphYr3Bb/GF7lojpzun/I3S+8mEd8j56uF77nLF+I2bSO/cTYfvlCxEU+pzyspHlSyQFBRlusKAQC0ikiGgrBlE5JBhm8iqUliKZLzgGKRPOkQAoqzCqpoURcGILdk6AkioUBquHRcOWRLpyAQq0SJZUWfQcpoMRYAohxo1q8AKwkQEINmnDJBRQiLJjrFiLY1YAwqg2QAiaIbJdDX5DEERdCrHAgBFo1wKF0BGJaoKaFLIiFOvGQkaRD2IGjKJMCokqkoqkDNOwrOoisqkqqAgowJKjlPRvORRp92bqubJxoKgBkSnY4mCAGRJwEGVs8YYkx/jGPIQcj/GzmufJCmIILKoguaslI2gsa5yjpHv6uHQEaYkCESWCEhjDuNIiAQZDz/DTDyZ1QQUsqqKRMAcskcAQ2UfTkNeJm1idowIIgeQEmjOkgOoSsoqWVPKehCQiEg0gcrEJlKdtBcEmmZK6WAbzomnBBwRqJBhYDd9jhIBLDNTUcBkxSBWMoSMhoksEKEqKmayUZgVQ84AqoRojBFAZnIGjQECIjBMBGisQTbEPHl5DOPbCwdRVJTk/49VdBhlABCRQEYwTAgTZxEPyc5poUPExL+VQTswrd/GRA8wsWkkBhNmjwCBAYAUUJXZSMpElBF9jkjWMOcsWVRFYsgp69CNxhhXumpWh9CBcSELsqJkQpz+pWyJ2FowXdcT1BJy0IhJcw6jaAxZczbsFvXCWnf+8D4tSzDYXq1hHFhjSdylMIxtuZrz3C2acnuzZzaFKVfn51Lz+naz3rTDzTXkxK4ubDX0Xc5ZHRnmAFIUxlQlEWocxz4JxnpR7fd+vlgRgp3VKslUlQaNaVyt5ovzefJKADYrIdgIlasGH42zgw/sQKKqklE0bIlNTP7k5Ljr9yJomIMka61REJ/9bmRUtMZLZmOI0I+xmTtm54O2+/jek3eev3iVG1N6CsKOWDVZ66ZTwrIJfl8vFpadz8PknsOCgbmxZdf5LMkUlStMBnSgYRyb0gQv7fX16dkZIaZMPrAIOGPSIAxkyBI5QgbnEgBIRlA/jioiqGwKvLuf5KQ6kVSRgJgZJCfJCRCBD9is6dQTEREwhkQzsSFEEQFUkcyGplYBJlSd4A6T2ZCBKGlG4jtIERAhTXOuScBSMUyi2Rr79tmTUlwezftuuL15/ukXn//Be38EmzSr6qtXN8dPjm6/eoE5vXzx5YMn78A4tjfrm+evY8hs4Ph4vl13M1c/OJtVCtebvjL2/PQChmtLdavy0988+/D+6YOqSSpB3MNHD55+9UWOrD73N5cziqv5vZPF0fXtVdtu7t8/xuTI2s8++7WWiKY4qisJOpsvrO0JsbBuu910IT5/8SWZJidY3lsV89mue7OPZxDZKBbWWouac0ngSqsIzs0K2xQsMQTvWyQ0lQPThBjJFEm4mBFb246dTzIvC0V0ZZ2i+DAWltW5lKEoCiyU3QzRxlGY60VTvHq2fvTxI2PDAtFwms/Lo4u5RNld3u5vdo8v3mlvljev9gbky88+M1I8fPQAEJs536xv2zfrth0A9Fvf/oBMfPHV5dHpAxT6i5/8xcWjZszxaBERMrGSpW+/+1gVdjddlLZA/eyrN8l9u3Ll/fPHzdJ0bd/1/dn95fZ6E7vx9YvPvvP9J//if/tfrx6u5hflhym+evryP/z7n/zX/80//dYnH7x+c7u57N7cyj/6x0+6W/3sN88/md+/t5pXJUvGoPKrX756/PgcmW5v2uxz7nnz5na7ldlq+Sd/8j9///c/KQz/6lfPVrPVnJoiaOxuLq+eP3r08Pxs9fid42Fz+8n77xy9615dv86u+asffX5+fsGpy+eYaACgxWpuLADKvdP57W24vO2Pz1brq+698/dPFvP9ePvi9U1tVi9ebv7g9LvlvDg+rf1md/LOxcsvXn7xm+f337n/xS+fpZRmVMqm/c3Xf5lS/O7Hn2RnL58/210/H7sb8BL7Ho2u131REjMNvQcA1OyHvirnrBxTX9dNCMPMHfwUTe1yFuvM0PYigQ0Tope8Or1QISZIsXcPzjLifLbqu1eKyRlsx86Uri7Kvhu4tCGElNWLrE4eFKaKcXClJZb37p/F/ToX7mjWMFMStsa+en1zcrKsimJQrOqCUmAylnR+76xgt21bV9pB+831mycP3t90N4FM8HB/tuSUn775/PGDh1VhS1PfXF4dVWW0eHX9hhXnqyMfx7OTi+3uxvuxG/bIOHZjDrJaLVKGom5E5GjWbHdDVZXPXjy7fzybNzPH9tbvgG3fx2Y2y+K//+3v7ddbNz9ab/eUsKlczjD2kpL3w0jaG+ME1bcBNGcRH3xdL9vOHy2OyWlRz/ubFz70whhDKGwJSkXh/DAuFrM2SYrKFgtXomYkEyWMcYwxEFkGZcvZ+3HfV9YAuxRjDN4YO/hBg0+SchbHxbuPHmaN6+ureX1kqRh3uTmu62bWbrew2c3uPW6vN5qi72Wxmj+89/Dp868La1zTGMtlZSB7FT07WsZUAWSfoqucdTaDlkWZkSxTzQ5Uox8Ec12UQ/JJUEVJ5OLs8Xq7LiwVRTOGtJqtkh/73oeUbGJJUBRmuVpttltDfLI8urm6Uklh3FvnsoVMZK0hB+++e/7pF18om/lq+fSLV8tmFtO467eP338SwMcYmsVss9vHOMyrRYiaolaFC8mriiFjyJISMosEILvr9/04xCHpft9dPltaqaqmH7bLemWdBQERIQU/jsyGEFPO32wsdIqOyTSONGQOK1mkt7uZt5PrQ2r/jkn5doOCAIQMU40Jokycx7frqcMY7U4IAFREnYShu1zGXUDijsk6/W0ARGQCZEx9lyGkLNINoR08AI6jF5UQQ/YBmKrSEaADmtlmpwMh3js9ropZ30YAefDw4tXTL9+8vjRs6sXsxbOvq8pITCnJg3ceZSp9xHsXp7YoJSeDgEBt36YYUxjuiqVVVEMIKlrYsq7KYdxWxtZFaRliGhUga64WMzYGBCAJW0fOYRRUGIYWEApXETliTcknFTZWpqJSIjZEhGpLNpWEfvSdAlaujEkzJFDVHFNKhJaRQhiNqUBAFYGMAei7bVVXKcayaWJK0fcJ2SIm3xljc/LGFVQ4MoyaHLjUtj44W5SuiJpjU5ftkO7dv3/5Io9xXzingtbIcjnbboJkITbGuCCDK5y1nPywefmZ75bN8uzexcOmMJeXX4xDz4g5JiFhskVZi0jKXpCJ2RiUrAkBBBgwSwZCVDkspQ/aENxtu+9oLwCTRUim8SwCwVRbRADyzVkFk740TbPhYFC7O6/gQJa5wx8R3Zk/gAFEQVLSLClKu9mub77e7L+S1DluSNESGaZx2KcUjXPiIxLs9xskx8TG0uXVK2s5hTgOHZJBJ5Yoe295Dpj7oU3eO+NCC1lRReZHTQbxObqcJAdrrKjubm9X8/nDhxdOBtR8slqMg3eWy7JoyBbOuLq+3PSmKEqV0rld653lgrQw5qr3TTUjE3fdTlElZzJUUp28HzPt9mnujMteYqzqY0kokmOK7BYhpziO88XCWsSYH7/3eBjHN6/eHJ3cSwl3+7Zk2l5vw5CZiwePzktU73MzX1l2GnIcgygWZePqRtG5qjK2lCyQxWRNMWUBR44IgXKGrDlYUzK5nHI1O37y7pOr293rq3WKlgu8V1nq4+e/fop1+Wrrb663V7sQ2ApQFGmHcb3vjmem23fRByYTgy5nTnOIIammod8wWZCcYiKrt29eHF88qpt5CKSQ33n/g1/86D98+bO/fPTJ97EqvMjyZIXXMW734qAqLPispoleAHPh6qYgf/Nmq5/OTt51ZQXEAIQ6nZCqwJNphCYFARBpqjxDRDzg3PDOVARvxZrDflIP8bG70Nmd0Uj/hg3oQBeCQyfAIa02ndoIMh0VVPSO8zX5OZDe6j96d7i7XSscUHR3LJbf/sAkgkhJMgKoJiJWPOSLJyELySpiFhUNIJKi1yw5ZhFAtmwQkFVMSDD2aUyaQUmnaidmRDbMjFWBZcXGaEE8d1xwMiQWxAABKCOjZufYMTER8+HWKHLY2mQFw0zMKQlqzlkkAeRgQCrm0oorpib7A3BGZdqdC5EFUDm8W4LAmlLGDESUWCWjBA1BkyADoJGYgIyYiYftEHhiXChO1fBTGCkiJ4AMOcvkQyFQFRFRJdGMqiBJJy6OgmTRmKcXSs4gkwgjOccp7CcSs2LKmlVDDKPP+y7tx9yOeRRRxSQq6pUYVI01mlEJrBgSJUJiO72x1gEhG8OIjKRJCHMwVpCNj4wikDNCIkWQfHigZ/VRswIxNs5tRzlNEJNiTHIwgVLOCRSQp5+jTPkuVEUGlbfSPE5s3KkdHskoAKKSkqiIKLKlCRNETKrMKGAOECJC1aTIQIrEbCthg3c+I0WDSIaQRb0iJYEsiJAB0DgDmFMy1qElY6wjZiBDTFMLOPFUujfR54GYcSp1BVSku2vgzlUEd3jqg8o7uaVhetPhzvY8Ma4J6bCRJ0aEqW7tzm4Nh8mYHtZNCkBIEwZyGs0xESAaW6boU0YfTWMXrkx9u7WCnGkYfUyqqoaASMtZ2bfFELoYkuacYyJCnghPNPnvuLYVlFVqxzSGoiwl55QyJGWyGvNstjTzeSzsphts29ZkHWJhtV1fh3E0lmb1cVRJo3DFR/OjGMP6+tpLZ4BD10uWsqjQmJgjI6YszlRmVipAjpFydtaKJjtf9n5ryJoZSdoRV3VTxUxlUdSm7NJ2tlyWtuh84JwdC1ecfTSEVNSmKAhDSsnXxveJVZ1BQs0Ix/MqHFXr29AUTZ88IVVFsV1vO+aidF0/JkjOkjEcQzIl24QE/NXTpz/4wQf30vzV7W51Wu2JZUyQ0JAiWxAAzEy286PFrMCuKnwafACyjAi2sQpFLuyQYtnUffCIuZyVAiP1GPbiHA5ZQjCr47Pr11e7dp/EpKjOmISmahal4Xa7BVTm5ENMCtamsiissVliDFFTZucISbKmmBAMaCZGAFRNSOgKB8iqk/nOAb6tLlZEgwQIQIRJPJJjNgAUYyLmLEI4KacACFmE7YF+bZEBURGYCRn5kGvju6sgfPWbTx9+8Ega9w///h//4sUXVfINV8cPqjB20Q9lVR2dLDH6EPt7x7OrN2/KeiHsQ4rz2ezN7e29i4dvXn9WOuy6/PmX16u62m7fcNn83e89WTXnmzdvdvsNOd62t+XcdZAlgc1kKCB1cdhQGkTiru3HuDteHQn3CM6RuW7XIeRUufuPlq9ffToMw77bVzN7cnFvv+lPzpZHpw2M/tFs1bZfl+UxIA9jLpKtLXXx1pMFcdSvLbp9FgBlazQJZI+Izs2UDQhbKrIfMaainCoVUEQAJMRQlMTNchxz4WxTNQmY0KFC7FsktkYrJr+7McZtb1NZNmz1+aurB8dVSNVsNrcW/+oXv/jOD789ny1l5ILLf/8f/+wPfvidqrT97f7hO49mxfyvf/zT0/urnIp2F8vGfu93vl+WZTvk7nrDJu738fT81Mwq27gxyoOLiz78f6n60yfLsuy6E9vDOedOb/Q5ppwra0YBBECQRKvbSEnNNpPa9EUm05+oTzL9A001KU4gSKKAAgs15BCRER4RPr35DmfaWx/u88himmVkDB4vPD3uve/stdf6rfbTHzcvvni6/W79zVdf/+TLT7abCAWff1QYY9uU/qf/+X9Z7Tp107/51VcaO4duvrz6v/zL//Xf//++/vgnC+fgJ3/64ul19F18WD0s55V4bbetlu5hO5ydzJgbQ1aMxK63VTGZ15ubbZvis8vFX/6ffwZCJJNXX113i/T586fTufnl3/23px81AXS2OJlOzbtvr/fbbnJ+MZ0wzsovnj9Zr+Rn/+QH28OND8Vhn0/mze37+/PTGRLNq6LANJlMXKKs5t/+x7/5R3/5i2Wtt+vw5Q+/WF/frR7uK0vb3ebZF0/+6P/w5W/+6zdvV9tydvbym32v25PL+ad//IPuofvV3/23nLvDZmusXn28fPXNG2MwxVTWrjg5EVGJUaNUpet8lz367gAmGDPMp3VMx9abw2Fg5qHvjGEFu1gs95sNM9jT5rDfa6KT0/lkUSbv3998x6LTWelD8kMonNGcDNJuuzNFGYZhfrKIWfu2nc2bu9XKMk+Lk65rIUQNqShL7+OkaSauCPudBwIiVavZt4eBlRbzedcOvvc5iwG+mF0Nw0DZVq65v399dXF1trh4e7vTbFTibntoymnJrpjBxenpt1+9mtcnTy6fPqx35cT6/XZe1PW0XiylNlVMgtbc3N6dTurtZsdFWdV4Mp90faKwO5lNzi+WnQ/tblCCrvPlxK7urz+fn6qrRWNRuIgBCeq63re9M0acid7vD/vCuCSJLZOpzmeTIQwhxYO/I0RnjCFj6pnknOKQRay1CRKzOHYiSUCJLCkSIDGHENka6VPnYxp2DDrkQSXmFMRnWxeq8eBbcLbN4cXJ0pa4ax9q17CaTFo2JVs6OZ+FfgPJqz+UVQll6YfwcL/69LMXhsK3b999OvtxOYHoB581xiyMmm1ZTxyDEhuyAMZV8z7HEBNY1/YbLiZVOXVG+zyAYPaZrW73W0SRJBnYciEhxNg1TenQkAo50/f7rm2zFyZdbW5d5bquc7YqXVkXxeZ2PWuKEPp+tT6fNjFKe2jPny4Nooglit2uTSGVzviUCJkJnXFDbI1xaArNnsiAHhsNJKe6rJNPmSEkLctq9fpa41ByJs01a8FgUWjcbwOllFxRphT18WT0SCZSRDouoFUeu3BGtej4QUT4OFwIAOPj6PAHy/ERVCAjpgOJxv6TcWX+Yc19rHI+sjLwD5isj7rRMaMhxwQFIoAAISEhAVvjs673h+3u0IUYYwQEzTKWsOS+tzbFGDTnwtjZoiEyq/1mMVvMXHH7/vX7h1sRnE2n6/XN4mIBOa9u70/OTueXT5SK07PTEDPbeOI4tIcwtF27d7bAjIW1MfQjKbTthpy0bqQL26I0E1MO0TNQUVRNUyOgKVxKIfgDEwAIo6Qc+sOeEG3RAJICpzBolqpoUsxMAEiKisxoDZdVFJD+YHhk1SMk7wyI4OC9YcdIOWdrmElTGtAUbEwaBpUA4Iq6EM1lQTHuJCKyg2xm89lD974sCtGsjIp2Mr98ePcyaVQoJ4tZv9uiTzambvCf/vyfvPr2H26vXy0WC2MoWynrIgyEjGSNaxqJMcQOUUR12G01pjxbzE5PuaTbt9c5DDnFEIKKn0xnxXRxVhT7w+7g28IQqCKNlFVhItGsxxM0ICKN9qDHAXrUCFXkuJXV47IX4OjlH2Nn41D+eJInUYCRujte2HJkRHyoDR9jAB+WZwYImAQyGhRIq/VN269iv7OQh34rKSNIzjBW2ljDIpENhdDX0yah9ocVYEZ0OcTEjCRaiPdpuZyWruz6oe99Zap2v81930wnObJztQ89IxkFIQBQa8pu75+enk7KJEGnk8lkXrWdv9s8GIZJVZZNsWv9s/MLUF8Upc4gJkk5t323WC6sBPFRkkjKASFFr6QpadlMMtF6uwOUwpXGsjHUD910uuy7lgiLphru95HRFu7q4wsEvLu9mU9OHNj79fViNnOE7XYzrU9OZ01TFX17mJ6cT6dzVYm+TznaopgsToUKJOdczexiGIqi4iyiAUGLapJiGC0FKXQFl4gIZNjIk+dPf7B6t99sf3+996mcnZ32EWeddER/983LaWE7RQNQWPB93G+7w6Gb2jLmrFkBKYtEEVs2sd2hYR+krE+EJzEjSCCVcDi4ulEgL9ScP18sTt+//eb84xfG1UM7mLqsFss03La3t0DZukKGGHsedhqmcXayJDS7uxtSs3j6AkwzmiMJAEQe5e4P4+AHxVIBkL5Pkh3hbkfumx47Av7g937/Gh8iYo8+I3p8rArQeNJ/NGuOz0mVD1rU8dtj+uAP8mbwGHx7FKY+EFQ+3HOPHwYMpHj0zhwjoCNfBhmA2NhxMFaNmpPkpDlLHhcEo8PfsHMKBfVZYehDSgKWFYmI1FgyhGVp66qwCAVxYW1ZgEW0TFbIgGIOhoUZjVFELYxDGp1UaozLoojMXJFlQiKF6ANK0hgYc2mxLKSqDDFBTgCoEkXlyMxn1uNDgmhcnqSgogQZRTAHBtIcRiaVxMevGyja8uhMRBoBNTBShBGVGQCEUMEqGAQgUIA8hmpFUTNrjpTxUWZSUFIQEB3zaJKPb+SgUTWPPhRJlAVDhhhTH2IXpI86pDSkmAWyHGuRmCkpGjZMqJCRHBvrilJSRlBmBMmGCJlDzpbZ2kJQ2DIETgpZvaaEkEWOzacCMIb6Op92vd8Poe1DrAhAGLMhAsyIpEjEVhHAZUkRiBCZJAtkAEJURNAUxkUPjp+bKQUANYPqmD4/Zrd09B0joUHDjEBslBmQQBNqAPEjgxeIkY0CAxKRQYSs4ARizJiDIVLio0hNRx0GgQjZsmNmVVEYQ37kipKNEQAmUhVCAGT+/r0AAB6PO3DskBrDZfzBAw1jietYb0ZHBykR0oePHpXjUazUYzp/dM/ih0ApIo32VkAFyDmRYVUF5M4HIKfkDkM2lgqSIApsC2tzUjauqArYthLy2A4RfGIUZ1AZRQQ4FVXh6bhliTFl8YUt0BGyPX1yFQQeek1361nT1E0JIa5W29ms9J0UZWUJb6/ftdu1dQWZYrXZZxIsuK4dCqz3B1eyAnofISgGUckxJAmhmE/7mGOItXOmbKqyhh31bZvzUNcVm0IA2JXGutDtm+m0ms6ayVL7dd9tykWdjSgsUkpp6EUhS5pMZ/uHdRAgwpRSYUu2dufDZDLptg+EWLDTkIGYlTCzIYMaq6pS5CFkZI4x2KLQzKGTly9vnlzVn04n6e1uqEwGGrYpE0kSsphjLkr2ubfEfSdoGrZVVVrNmiELqGTt9mgYyaHPuS7sofNBoIv47mb32dUCGDMhm9LWk6i7lAWQU4bZ6SwXFpCm06kfhpxjiF0ccgheNDtXAaghSJCGISISIwMAE2WikXdgx54OBS4MGYNMRy+/KiKxMaKZCZMEQstkADDmYNkaw6qKxKOCKCKIYIgZiZhUVVQI0VmLhKqZiJlNfgwdxJzQx9wlCfjm+s35s7P1d2/Xw7a6cCF6BNTe7zcP5aT0/ZAaxrqUCg/Sv757/dPyx1bN/du7aV27SeGgyUlwMrm6nPm2Z2fbwyFZM3l+whbud7tmNt0drqmaXJxfZZ82re8Hf+gHb61kw8i3D9uAsWR3e3sLRN5Sq9Kc1w/3SLaoF2W/3z2dnQlssua7tzvNQpuskJYnCBlMYYvZ2cNmZQwFn0qqn54+Xa0PLIwZbVbLJqSdc5OiKEKfrWNAurl9u9q/q6vJj3/wi5ff/nYxm7HjInOSCIhorBetCqNBgm+LqrFlFlU75W17QCSL6KbNdD73OYcUT88v/vY3X90dDrDr/+jnP65nzjw/f/t6dX9YffbZJ10XJ4vy6Q+fd7u89v3TLz51jWFXlTWt7u8fNnfz+eL3X7359Mnp4uysmJN1Zr3ZHg4dOdx2h5ub25OL0/fXtyT6wx9/RK7kaMra3b293+83edDvvvvOzidf/vSj7buHj794/vqbh6vLeYrdxfPlJ5+fvPz2br3u/uNvf/1/+sufz58sysolkXpZMbnTqnQOmykKBTZ5cVq3u3h3s2Ey7WH3b//Nv/6Lv/ij//jv/+7y4uyP/vjTluXf/dV/++d/8efbg37spuu1f/t2d16b02dn5NYas2WZn9W+G/7h229P3gxcUQjD7XZLS/Ps+dnQhvWqA6WmMX/39c2SjO37xel5yuqmJq3bl7dvPnl+6mIDBOkQXr7eTcrV9Nk8G3a18Q9s+uLQhm+++ooEqOxdxX2SelIONDSnTYpx/zCUOfsQnCvL0iXKZLmmJoUwbeokUlXF4dAliB/OT+zMZN4MbQ9gsqixtN3tqnpaFs3srLm5W71//d6imsJtD7uYXB9C5ZoolauwKkoabFEVrnJDO1R1qYYHH1RpUHEhKQIT2toqiClg12+jzyfzSZD0sL9Fewk5LxbTzXq32UXr3OJ8GlLWRDHA+7vb8+Vy27eLi4sM+pvX36DFfWjZFefPlt12f/3+9uS0PGw2MfRvvn1ZnSxfv//22cnUUPZ9t9/ds6tSOcHMwOZkumTOD9vNWV3bwjpn0Jqu76/fvi+n87KaWEZjsJ6UieL51Udv315HrLhxvg/50C1ms4f9NpAyabvboOqyMevVer48Vcqx3++HwMwZhNkoKBqTUixLGyWKSGEdEpMxKLHvh2YxF+IUBAistYpoCxs1l7OyW8c2i0GNbVs7LowTyD527bBTRlI6rWfnk0m72kyaStgNMmDlzKQahm46aVxpY0j7h9WsmVWzSQx12HRv3r36+PJ0t52/evPd+WJel1yUk/2h7yUpEmZy1pBzGSBj0YckiBZZAYqy6XOWHEflg5ROT8/2QxcyXZ5eXL//pq5PQoxjpQk6S0oGUs6JnM1s60mhGtocqmLiVPq+G1IoS1dPm4zD2fnF3dtVSqmZVICZDYhP3g9NXazbrixqQQLmZtp0+34IXhUVNKTouCakmIYkUXMkY0U0Sdhvh6jct/37u4famMvTydu3b5bzM9XQDcNZc9G2g8XaGmPZ+D5kfAygjdwAxLGGZHzvGYMT+mgaGlt8GPm4DxdRyOMRSDQDADONJiIaQ0EAIoKPL/KYzzi+FACAqKggABHpcSB53K5/rxWNuQkcD4WSJYnknA7t4eV3r7797vXgB8lQWGvZ+BD7NASfLLNCDsEfttsYIsPMB7FF+emzT377D3+7OWxrV2Vr7x/uiQU0DV1nC2PrsqjKSVn7flCR55fnbbfr2z2IWFNlFSX0MVjSHOOhPaSQyrpxzqU4IJZ9DMBqy8owsnWElFKSnBfzxfX+MJk2IJpzZGfYuLJuBj+oqHMmhACgZJDHMgoRGN/WmQhEWGLK1pYpZQEU0JQzIasIsVGUBEnVoCqIphAkJ0Lru252NhkGb7KwdQI5DF62Wi7Oiqr2fV81RdmUgNznSIUDQFVsd7tJM/HD4cWnH7+6vn5/8+rJi88kyvbhnXVORMkypNwNPRk2CEVZF9b50IJFEOj7NoTgg29my9Oz56ub1yDZNYX3Ifgh5OD44vTii7x78LsbA2qsKmYABEFGziKP5M/j/EyI3xPW9fF8ftQN8XGs/nDBwPFakvxolJAPLGAcD+36oUn8yDp6HAUBAMqcu5yNQUCKm9Xh/g3IYdLY3aat0SUL2+AVqSyceo0xqUrrvapYY7q+H4Z2WtXWFF3sDDdjEJKdTktXQhricH/3cHX1Sdwf+s16WruCq9i1OXfOQEw+S2BA7/1isVguJ5LVZ2gWSx/6yyfnd3fv0jBgVZSm9iIF8LSZtL1fLma7wce2E3Lr7W5eL3b7gwF1xgBizqgAwxAMGBQuXaEp+b4v2Lb7vpgulMv7/W0zqfwweD+cLprnHz1hzO/evppNpoVxD7d3y2nDWa5v3s7PT64uPxp2B2C7OLly1cQY7A/rEDpiqCc1sEGy5XRCTDF0MYbFtJbhEFLHzkVR4MKYQnNLZpITCAy2nEjMtpw8e3o17Lar7eHVqu0kkXFnp5O3qx0jJAJXF4f9cFpg2w7DUPWH1BdD4dymW58tZ5qCoZoNHYZ+MTvhXPiAi2Y2DCtj5OHd25wSFdwsLtIQIOknX37x1S//+s1v/v7zP/lTC8Z3nasntnRauv3mrQFTVBWbqt0f+p3Pw66anjaL05Ruw9CAsjUFAx4zvI+Ut0dj5PHo/lhlr98bJMerl0Yn5pG1dTT66B9qOAqP3GiAR3/Q+F1EINI/eDqOtiD9IP18rxfhIxjlv/tHP3CSjh+FH2beP8i8gYCiKIA+GpUEEBAVUZkt4AgyyiIaY5QxICOoCmQYrGXjsG6QKosBXVAYjBFjRgo22sKUzpVlWRS2KoqmqGrDJXlDaBgsEEomhoKZzFhHjsQGkRQBTYnsVASyQVOgsdZaBoh6kJAVkZis0aIwbHgc8cem9lFSISJQySBw5JuJ5CSqCJBFAFRDAqKjvJMzqtLYkkVKGhQ0AwDy+LcmIkiIjDRWdLFVMEoWkAAR0KDmcTsPmjHro+4miKSaVVUhi+akWXH8+4Vx96+aVBE0acqSMPjUdakN3AXpfQiaUxIFZmZEMdZYa611ztnCOmOcYWvYTqqaGEQlxgCAbLgsy1AXQIoMwYfCB0Tu9xhTUImMwOMzEUkAUDT4MFjt+j6mFGJQAIYI1h5NM9aqgiBlRIWsokePJxEgiGQYy+VARJUeq5YAKCvqqI8QZRUkHNmIikCWMiGRRWYgJraIrFkEBlBlUJWEqkhWlMSoEKuiqOYUNKG1BdB4GRkBSCLIdLw5FEb2FoCyYWOYmJnpqG8SExECf5Buv2cVHf9vR30XBFHpCFBUIoYjKBgJ8fjDsfXj8QgEQKpC8JibBkUAApRH4xV8f+cDqDBzFlVFVYwJgAvEKgSPiACcYxxV5rKoVWU2n61uHwhp6AfDnFUMoKQ8ZF+UJguCCvjAjNaWhssIpppNvEBm+zYNlrk4nSwNUD/IdkugZ9NqaPtwiMOuE4lozbSZGwNFXXa7Fti4ukTvN5u9AuQkw9BXy5nmFIOvmmq/6yaWGmN0MdcE0PeScJC+qCtWdGwB1RY2JUnEQGVlxThCwZwlQyZDYFBEXV3D4E2SrKCcs1HAJBoInYgym2SoT/Hi7DS/v1fMhBiCL8vGAFIGkSw5zudzMma/O0zrCTGrQFU6Edw8JFuk84vmBx9fdIfrTkw9m6YQICdDhAaThtlsZnPRx7DeDbbAypLk3Mdc1G7q3GGf931OUcXiofM+5L4PfbCZJSnXtVETfZTZycV8l1arbVUUogbrenm22K0OJmOkRMRF6drBZwUfUs4dE6fkUxqyKhEqEhKGGMb2jayRbMXH9wYkNoxKiIYMHMFDAgACyGRpXLchEqBhk1UA2RibcmAm5lHTJAAZT/OGeFQ2mTHlsQVT6JFV1MwXDllTVFTEXFeLB7opTEMCm/264CL5uBl2F/MvTEOJTDE77Q73SnB5+qLtQ900VTN9926lGKL3YmLMxj8ECfDw8CC2rJaz+aJZPawoFf37rjI1sLk7tK6adodeI8Bk+vnnH71/9ebVy2+enZxWppjV9ZBx8fT83/7Hv/5RcXbz9Xds5PRy/vZh89mPP77+/WvDbjGfJ/X74VAyNYt6vXo/m55Rkru7WwPJla7t+xTl/ebW8qyeXKqINcYZZ4YalRW4mlhAur198271qmqq2bQ++EFdHZNiIWTy4Dvn6pgj2SYJoxkFu6QQEAvftpPFpJxNBYxG3O227CbL6fw3v/7uBz/64vZdv7l7s36zDtkNfVLfn13MF5+dv3r9XdclzKnbabvv64k7bfK7179jpour06uff/r2zeHLH/w89Zv9NqNBU1JZWYl69fwsBnhRTyDLZjdYoRT6wuV6ubTEu+3w4vmzwz6eTGb/6j/9zZ9i+lf/5n/7l/B//d/+9//8T/7ij6vSvnz9UM9cXbtqVv7P//LP4j7EIVoL20375OnJ9t5P56WCKKT1/cERlQW/f712NU8q82f/7Od/+9f/cPN2/4/+5Gd+2PehO1lO/vk//fF2tf6nf/Hz+4e3Ss0//x//7Pf/8Ze0aJJP6uDrr96c9LF+sfjJnzxPq51kuPqsGULOoN89HJaNMw1vNrQo3ZMn+Pab6xNIV6fub375n548Pe9Xd77rhjJUSNQU89M6dl5Cur3eRgnzk2m79acny+ns9Hb1Hg0XBX319T9cnizC4JVL0FxYd3V5HnPu9i3P0FonWVQhREEltsXgu/1wQLSExwBaWdqu8wUbEc0pIXgUuDi5FA37bpdzURpwqu1+/92bu09ffKY5zOfl/c39hJbTJ5Pbr28kZypZoTDOdl3vh1QU3FSVkPS5e796+8mLjwjizfXq/MmlGOMqq8hg6ax8kvookja7jbOuqafbbluSaUq33x9cbZ82z2Q4DH1wpixKsokW5/OHu3VYt3HR16aIIpmaAcJHf/zZy19/45OcX54PoX2/fllys93tFucv0Dq2yYDYEpqqXO3t/Wq33vOsLrOE6bxenM7/829eni36WdmcnZys767DMJiC7/ztrL70MTfnlynTug+gydZl7wdDZrV+P62ru/VemuW8sSF4sLmezm7v385M1Uyb++t3jWjYr+uiMIZFMITMEwtgttuHsimNq4VyGDwgAJOKhhgAsChwMp+ApNBvkNnZYr9+SKrOlq4pul1sbJlisGxAbDbh6uLC51hLzD4ExsXZ6f3dukSC2BJVrsTnHz357dffuNXhxz/50X/5zd8ppc3eG6uArCi2cLasIoRqYrrW55zKsuDahQwxBnJlQ4WPMfpgbSE5qsbKurYb1t3GTWZkpihrQJ1OFqiImg9+mJaTpqkz5CF0ZV329+v94bCYnhTlZNJM36/vHdr5pInSc8mn07Ntuz20qTK8uTucnZ5DoZ9/cvX29WtSu9ocrk5PqrL0va+cG3xgC4LIhhEMoSV2pCbHHHPICpjpcP/aUfR99/DgTxanhS2sUsbManwYXIWiOeaEbAi+Hw+O+s4jbxHg8eT3SLX4/tdBRwbBuP4ezeSAgELjiQyJHhfXeNyLj/Vmehz4Rxi2qoKIKOTjXHK0iBCSAqhkHMmpSMwGCCADEqjIYX/47tXL27ffkWpZOB9yStmHOPiYJAuokgJKvz/YjM18eXq62PthOW2+e/fNJhzKqk4+DsF3w+F0MSvIJDLVon7yyccn0/n+YdOKXj059V0f0oCqhniUrIbom8IasOv1ru0CIzOZHLUsK0Q0xk7nU0vImlIYNGUANk4P+60htuz80DFSBiybRdKIzEyUNSOpSCIuJCdFUDRKjovGmsIfNjENSI7RIhMq9KEriD1kAJSYEJSRUJDJ6khSVXTG+H5gYGNd6FeFtQAZJPk2U3lQQlRitmQpZ1Htp4tl6g/EzFTncRuZ09nZ+bfffusIn3zyWRK/3z6UVUNCKsAj2s/vovaAghli9MYYa5gpDYeVApbVfH52uVu9BQmucDln0NwfbsnI2fnV3vL2/o0jQTIISUFVhYn0yKj+3qSm8JijOWZrcLwyR6T6h2tSFPQ40NJowjjG0D5oSCCP16OM1/Po7EDkDy3lLgRb1j2K9KF92KTXN5RypxHJVFV56DqDDDRupTX4gIw5CRtniESCYYtIOWdGLnkaw2F/aJ8uF5h9YZ3vIwAn7UM6pNiTBmtSlkFTLqxLKaWQ5s3k1bt306qcl9YPYT6dde2DQXrx5Fm/3r169QYRken07KQoi1lTpJt1NjSZNf3gUcgg5nQwVkqqtn0bJVlbZlS0abvfkaGYEqkO7WG3P0yashtaYa4qN3SH3TBMp2c/+/LLm/t3KR4m9WS2mH338hUTOmtX6wcknU0bMGiaZnp2hj662hLENPg4+KqusmDpClsV7BwCtqtNNZsRZOAc+na2uGKkLEjsKKeklpg0C2hiQyJ2fvrixZP2p5vD/vBmJ8XWp6K0P/rZ87/71athSD1SSImnVerjugvKbucD5JxFRNUyGWfUsALmNCgaRZdjYNDQrVPct7v39LLlZ0O1OEs+Nsur0/PLfvO6u7so5lfdKkBUU1U4qbc33fX1GwRTN9O6clU1pXhIm3Wvd1hV3qXJ+ZTSFMzYgKT0qEki8agEAH4f8xpV7seQ1yhU4rFI+zg2fmg++yD3/GEc7A+uXwAYt7xEj8OljvQKGu+JD7/ve8EdH1/uD+vYRi/eY5W7PJYyf//H0qjoH9Ff4x2H4zySFAOiEUWRJJKP5QfEOFpD2JArsZxoUSuXEHswByFiUGZyzhlnrCvYWVsUZVlOmrq2WHF2RkkDoSBkUrBETMCG2DpUMmPyji25UoBp5N+LMBIjEqplYBCiXFlbV+wcE0GSY3R5XLojExIhMozGkiwiCfQIHSZjRkFJFZEo53DclxzNISqaEAGRxy1JThEIQAhEQSIBAAZUQmJkVhpjSeOX3IgIQEQNogI54QhFIxAAZDaIKUUGtYRZyKBTwQyAkkY2U87ifTr00vocc5bRWQCaRZBIgYiJDBrDZVU6U7iimkzrs2mTJMWcU8pZwDonwFkkg+QYikKg7f0gGiJEzskippyzqiChaKaRYBQSZFGFGONoNRAB0GyYJHgCAVHMEXNASSoKOSIRIANozhnGPm+2xAZGMo/qOOsSAqgwgkgkgjzKayiMrEiIRpEQCEbCbhbArCkCiSghK5CRkZGVRUNGAYMGKaOxXBhVFkA7XpmAZIygMpNhgyhsrLWGmGiEVgMyGSJEoA/WukdXERIgEPLIgiEi/LC0QEACIjO2ntExkn8MphHx+O6l48CuH1L6j/sOGAVdUs0Aj5m0o1lNsoooCFDOCdggp5gxJMwCIfj5ojHG9lldVZ1enDw8rGFU5pISAiuSMGeDjMNhb8sKyYhXKJvqdLbzO8O2qQsglj7K/Tar5i4osXG86ldDd1AAV9dlfWrrsrbQbh/6XZuzcF0nr+Axe2UiyeqcE1Hfe0zQtwMzDltvmz3VTdE0ZKhvO1FA1aKuUkpZokge2faCc2cQRYL3k6Uj5qquLJsUJeWcAQAhxghEne8JxBhExsIVCpnBxkjJS8OlJkVAZ6sgaipHBhRT0RhbwtANoZfg1AIXzuacDDnI3G6zxu1nP3j6w88++odv3vQClSkkgKBKzmVVD31KrOQMZyFhIu4HH0UgUjdEgLrrSYiqhW3brihLayF4f/FkmaPvejFsQkxFPWkm0773UbSaVFVTWWvrasIYRGHwPWYg1ZiTZlFmKkoaJRuRnEUgMzIT5JRBhdGQqDOWFTEL6/FaGS39x0QzwLGME0BF2BAji8pojhNQYx0SAaBhmyWNxG9AZDZZ8/FZbsiQxT+IKxfG1WVhLPR+P23qu1dvNCfRcveQjS26dieYo8TN6r5I5Kry4e6t9/vZcoEZz84vXt+8WW3XZ+fPG0uvt23Moar9/vbeBze5uKhPm1dfv0FqDrerJ+eni/NmtYchxPvVw+ycXFlZlbZdvfvNA6hdnJazk8m8bg7bdnXYzOLkn//FXzy83jp2hu37399zwe++eX1xuoyA7zdvbVEOLiOJQ2OrCSLlkOum3nV7U8qQh9CnGFNTAkBhsgZjAnLBhGyGrqvrOsR+v79X1dOTZtivrJ3N62Xf3QmoIS1MIwkW8yXZ2vcHZ8g6O/iUcpYYUCG20ZWIxLW1++2uLNLF0m0S7zft/e3mFz/90d3tO2cpR3ny7Nz77q/+7X+YLqaf/uiLOPRW/NPnJ+8ebtmZ2fxkPl883D5U9XQ+m92v/ec//fL1y3dVOYHQ7lY3KXTbh+r2XfzhDz8vCvvZi9O+382WzWHdv79+2zTFt999NVlWiDZz+ac//5PFJP/f/9f/mw/8//x//C9D3z25mj4/re9u709/eHbz9uHZi9PV3T7GUE8WRVm2+/gPX9/u5eF/+tNfXDw7o6yrd5sMeHF1dn//8NuXv39x9XzqJqt3D0yCjN99t3raQ79JZGjn4pNnT968XG1XvcOGfbl+f/f86Q9fLMvOqxVzcTV7891qv2mxSMuTGUi8vFq0+8PJaelMvr6+VoPzsiiJ993myUeX4qNz+f7N3X/+t2/OLs6vvvgoiakLDED1xJEti8I64v3t6nAY5udNHILfD0XnWvKTRRX2ISsWJddVud3vIYtIjpqJrQikFEBC24d2CLaYNrN5iseC5N2+1aS37TCbT40pimIy+DvRCAiFdb7zOUhRV7igTxaTYUi+3xspDoeNJfPmq/3uvp9O590+x0DTstQYDRvr0Pv20LdYOVe7aM3E0GJWV46IaL8++Bzebe5/8MUPh+gL55bz+cPdXYpCjjeb/rB+awjqSaPEGNOw7zRThtR7H0O2hpKRIaV6sry4muz3D6HdfPPwuiD37NnTl7//DnJ3ceoKa3eB9xL7/d1Pnz8fDv3m4fZgsKmrrtfz0+ft4fbubl3PqqKKzy4WBvVm9coPbVG4uqxbv23D4NKDqnmzvWcjF6dP8kAYWNOAzp4sJxqDctpt1zVOZUiVM6m/d9F3IeTcpxjJWFErWZ0rfQhZwQ8+Jq+Q1+v3F5cfW+uilxgCuqwSh/7AbFVAQt5uV0XhBh8OnZcwzM9Ot8NDWdehD9thv1g2pdiS69pOJtXicOjCYaibKUrnyEFK3JTdMGjXg5Ajd7V48vrlt5zhxBTbzUPZzE1Z3W16Y41V7/0WChNhKLgAtBk5JkU0Ly4/e/XwHUI2Rsk4ts5VtRLmEOqqmBST1nep3+TYT5pZzGKYRHxdOct1CoPPPgOJ4vn5eeiTgtl3hxB9hQYi3t+sF/MJBuoPrYhfTGtC4ktCyffX91kkHPZNdXo6XbBx3X7HTMZaJZQcMzFARkTNQXNQosGHQ7cvqOR+2Lz+5uJ0EQr0w74ui67dFZMFMOzaDWBGw9a4nFNRFiE+euu+D+KM553H5JeOOs/4s+MS4rgKF5UxcpBzUkDGx4ZohSRZdTSXAQBIziqCAKp5NAhlFcn5A2JjjJAAEBGp5hEHSzyu+4hNEXMmAUTMObddd/3uer/fFs7OkQ7QxdASakoxS1IFydnnPKnKnLItbTOfDCETk0F8/ea6apqzk4tXX3/dlIW9uAAAYpOzvzg5n80W67tbvztMJjNXlNaYthscW4N0aPdKUjhjTLFd3W+2WyY7P10SU8hDwaWoVHVDSPvNhjGVVWWtGXxbuTpETIhIWhZOkhZFTcwSPbtCYs4KWcmwIXIZUVWNKdhUJCz9HnI07GwxZXbt/g4VynIuKbHJoEqqw9AXZYHookRTFrYoQ04AZFzRdz0ZhwCYoXQFKg9dv98+NLMzGTyCIbI5j5xwmc3nu93BGmed3e/3N+/vLl58dnl5ef/65UmUFx99/O1Xad8enGUE4yyaajpp5ner12OtECpGH0ZwA7Pptw+QEjNV5aRrd6MhIPveJ8nxThTq+hQms257N45qgCpZBRSPzdOMHwZdwsdFNcn3zN7jXv9o3Dg6IxSQ9agnoUoGOPZUIY5D4B/SghFHctYfDOb1fmNUZtYUoGXGj9AG67ohi8bBdyLChrthyEGVdLyuidlZ27VriQGREKVvd4aoD/skMWfsBj9UhQ9eIQ3RRwnb/cbkAKw+dNZVmLSqq4NvRZJlYJOtKXKIDnFizV3qS8oNpk9ePD90O8lB1V9dLLsuFs4+u7p6d/8wK8st2e3hUNvGlJZVswQiTSGIokhuylJFhxS7LkBhwbqYs2hmdAyw228N4WKy+PgHX9zc3oToT5enOfH1y/cQpZrObt7d79rN5emFhSJ2YXF6WjfLQ7gjFN91SbSazENIdT1x9QysE5HQD0jGmDIn3W+31lTOuSSK1sYYrWSA7EM21olIipkZq+nZ8uzpi+c3h7b/+j6Ih9QOGeW8nLzdt8xQWwqDrxV9zKLYBskRbS4PXbw6qTRpTmqLKkhuDIXkc0g5pN1qW7gqDd1qu81Rnv64tM7FOJxfXX79y79fvX1zUboCJ90qutnEVtPz55/nbr1d3Q+5CztRMIWrTOF8O+nZdLuVdSelnQuSYSUiUBxtKUeI9ePT8w/ccI9PWHxMlY1Wgw8SkX7wFz1++4di0ffizxhsEswC3/9xDEfH0THQi48YXTqK/PoHqKPHDNrxFvowsj9+4PHWIGBQEWQkwCSZiPPY40SspIqKigooQKKPDYZsmBjZgXPgCi0qwVJNJuvKwkmWomDnnLPsHBvHVVHUzjpUB8kSEorBMV2HDIZZXGnQMpsC5Thpg3PKDhRTCqDCmCF0AFZUKSaTE5HUhS0dsxnD4QlEEXT8zB95NYoqQiACSkjAqokNMZECg/LI62GiR8QREH14FvEoLwMpqiIqAo6RlPF5gqqgGSSqZoCMbAFAceQEoTAAEhkHKsjKxkCKxFlzIlRgpQySMyvKsUfiWOkQQ0xJfFSfUtasSB9Cs8w8SukGwFlb2sIaVxVuMZtMpk0S6btenLAtXOEycoypD2HoUWJCDKNxk5lygpGrlEVQEyDk0bZkGBAQhNCOhWUZIMOYlVPKkRBUooofV0GCigIwQn0MARolBuLxSS0j2pCPBamqIprJWZWEAGwskhV0ZBwSPfrcSBSJrRCrMpIhY4ANsslZgUAFiNSxaoqGHRpEJjRFyKOjmR4XWqCgQEDExo7ZG5Ml5zEQCo/nnMc77kMAjT949gzz2GdHowcJkcbvjfaN47+PhZuP5yQEkA/Pgu/tsqTf/+RoORpzYqNvekRvYRaNKSOxohGBboiFsbbIyhmcMfW0ID87nQnJQ5LYe1BNKaNhVNKMKuBMlQbB5CFwnx9OP/poUgQIpAcvXYSQnIJBFp+UZb1aex9tYVFyMasN2s31zarfVkWZCYpFZSdFhtzt9oaMtTTkzrGDJJoykZEEhbM55O7h0CALYzFtvCUKknZtTh4Qu66bNFNVnSxmVBqjNodAzkJRlc0sbO+6/b6czLfdqqymorruhzaHQrSwJgaNOc6bydAPk+nJOvUMrnR1nzwBpNFuzVwW9bpbV9VUIoVekIqoKgpsOaaQklZ1Q1K0W//dq/vPf/LZYWhfXd9rdJpZUkxRxRgi2w3+4nQi+cYH6Ac2ZIVMjDkmzNmvB19zeT652GwfIOTe+wzhfv322ZefRx8AjS0LUtMs5qq63uyNs9NpA0VR1zYErcumH7oYBz0uVzGlxJyMs85VIYaUQpKEjArChJKBiFMGJjDEbCwRWstAOC5hR1oWqIomkYw8CuNm7DQGts5x1qxwlEMVhS0TEiLTcSNgmI+kPWuspAyPuqlPvrvZLM5PikkxrVx/n5ZXF0l1e7d5cXG5oz4P+fOry1998/WPLn/sgwIiZkg+ibYP+wIVQdQV9e3Nd4Z1sVi8ffvy8vI52LOvfv/ryW7i28Oh4MurF4T68Ysffvuv/3NZ8qmt1YfZ5fS71+/mtvSHgUpdVtPtardZ3Z/Ol5dn8269ihw26/WseR5TbOomqkxmk7uHd5PlCRV1Nvzi44/8ZjvsNybpctnUttyt+5jy4uRU0mZo++nFPIcwhHuRUBezLuhsOiscWlvZwm62Nw/3t25q1rvt2eT8+eWfvHz5t6I+Z2PJEGhdFChY1bN+dwDiGNWYYowYT+cTNBBDTDFjbSSaYYhc0/nzq30XUMzNzcPf//a3/+O/+PPCwL7bJJZ6cnJ2Mn/76rp0pu/a2UXzyUfLw+6wX3eVTrq79noQUahL/P0//KrvaONvNnd3P/6Tj6K6p09/cDvtCza//OXfP30xd5ay6m7Tn13MOOPV+VXuwqtXt598ZL6+/voHn18tm6XJYnP+q7/+lf7Jl93DqjmfVPNq97ptNv708lTVc8GThlnhJz96Ujcf/et/87t/9uefrFfbk3rx5vXKzYxg/MEfffnu1furT85nuXz78h4hffTJM5W+Ls3iYnn3dvvq+q0k+ebN784vF8uTy0NuQcA4KQ3ev1klvaFSJkVx2ObG5vv3987VhbNh8CH7qydTLIrf/5dXm/Xh5v7+4vnysF5l3y3Pp83kyfrdw1d//1ti98NPntWFUbQ+pn5zqF1p2Gzfb4wBoFgsJp9WX/zyb/+qmX9c1VWKGlMMmepJIYxsTYrJ2soZBxG83wzd7umTy+1e+/1u8EepqKhryAiiojp07Wbfns2qh92b09lza+oseHo+63Y7y66wBqPf7lcuDs+fP3/77dt63lSTarqsheG33/7+7MsfUjXphh1xcdgPYUhX8ws+RHloD5BNgn4zrFJ/upztVoeL2cmw36qIH7pDQI89JX96cn7ohrKaWyYRz5hDCrYwi1nzcHfTTBYpAzKwC0N/2GQNIaHNAMLJdKF9/eorY8p/+Pp3X/74xfs3L6vyYjkpN5vV+9vb3GcDJEFn1WLVbd7l68WknDbTth3S0E2tPTltdGi269Xk8unD3W2k4awucreCoTidXvleZ8X5zer17mE9nTWhG+bTsgtZCJ4+W25evW4m59c3L0/OT7koYx+EnGjHkBVFRG1ZDru7cnJhyfRZ5ovFerMatns7myokM5qNZVjOz/f7XVkaJT3s037Is2bBEriK36zul3O7Dn10VKgOOWCMGUhAUvCV4RT2oTSMzLaZTc9taYih22+mk5PNw3axPHdFef/w/rNnT/n6bRe6Iafz2ZLUyjAEDHU5bYPPxhRVMaRkrfPqX25f2qqSFHPMgAyEXQwkyEyr7Y1zzNblGNlUbEtln2ICtNPJWfKHjNJMZjGEoe9n8zqkUDemsLOccl1M+/0halnUC7apH7YfP/v017//PbObLqf3d69+9PMv/81/+usfP/tB17WSclE/M8UEYggpxCSFsyMrx4feqCJoSZxzRiFI7Wa1mpR1Tl5Ric2kOkuma5MnxBIrJlKQvh+Kusop5PjY/UQIYyMHHL09iKya4bioUNSsMp5uxm0pIFCWqJJTCAJArgSDksedJKiq5gjj9nPsx1HNOapqzlkeZ6jRXfQYO0IVGT3+xAwRRlt4FLTsRDVJ3nX9arsLSNOTU+Zyvd0J0JByaHslFhTRsSeXwRmvmNiSc+PC+2G1EYHF/CxraE4my8VidX+jCe5X26Zw54vTdnW3unlfFdXkZDZp7H57n1NGcoe+jRBZyBau8+37+xvNOmkmZLD3vStwUheTuok+bu5uMWUqTfShj8GVhQD3IVtb167a99shhXlZKSrbxpAR6ZUIbQkKSCAKhp0rCkI0KN5vSW3VnCqarNm4ErOiBsmJDKSYiYwrJ6CCVEjKzpbAjk2Zg4+xr2jKjrMvAKxlOvhOVU3Ofr+Z13WIqeGyquuUvCSvQAgS81CfLGf45N13v2s294QEBA+3b9E8+fyHX7589XpzvzLMorEf9tPTi4vmR+3mYdhvi0kpErzvVBQRLXMOrRqLTNYVSlgx94hpGER0/e71+VU8mVVWZ+vdGpVHPMd4uCbkx2yOjkhRBfkwQY/Dkx47x3mcch8JteMu7Tg7j/YBxHFaPAZ1xkP9WHkmcuSTfwgdVG+/ckSuKCaqy779+4TvcprY0hqrSQ5x08XQhcE459iI5iGKscYhDl3LRCIpeLHWZclBIhIwwP6wg4t5yrHzQ+/DwXtLPcchaY6h920sC+dTCwxZ0yAtore2SdG7wuyHrWUprBmGtplNv/zyx7//1S/D6oEX00lBzqCdND5FajGdz757P3QxFWhRtfUHJIpCBOB9qEoznzbU9hvj2+A9mRhiCL4s6/u7exKeL5bldNK3W+rbk7NpSOHm3YP0w2TaDEM7hK50BRvXduHi2ZOTs6vNbl/XZUHSD61lR1RUTeXqxpVNHyL6wVmXEYhM7AeK6urK+1jOpklRh2ANDX3MMRp6nLhFvaby8sWp774E5/Xb/n7Xq9GkOXT7Q1tO6wZpUlcPD/uJAhsaYvIATQYVgDx23RhGQkmEjoRJ1BhHKVuLtjF9Cv12tb3++vTpx9tuaCanT57/qNs+HN4I18ty8Xx3e2DrGM3F57/g8tX63Xe539uibtvBlYWV5IrKtLv9zVfV4kdoqlFeISIUUAV6VNS/Rwh9yJcgfnjePWo2j8yS8VLWDxSTP9SJvrd8jmMnjX1NOT/KoQiPT+wjT+eYXXvUpOQReg1HvK4eX+NILcJHG9SHzxEAyFgVMYZT9kBMQIB6JAoTIxlEBSHJAUGJGZDRFGALKhy5MiCSnUBRM5YYshjrSmcAXOEMgbO2KGs2ZWFri2I0WUSLWBo0QISiORgG46yxhiwzM4zlCIjqSuUKYyYfEQBUc84hK6jkvnOY69pWhXGMCooqWVRAkQiJkZhsRYCQBwAFiKACoMQGyAJzRiKwkse/pjQi8UQRc0QEFUFiYDtGtQnwSCkSIQRkzo+as0iG40TGxxQiATICILADAGB77F4TwRQoBsgDUMw5aVbIlLNIIs1H5X0MTvkQ+qBJUUSBdHxIyvGzQVRhJAJiMmVZT2fzqpmDYQJgIYfK1jAbA4Jk2xBikr73Xdel6HPOMeYskjOAoggwoYAYNsayILJzCCKSs5BItoYIRFICjagaJEuOAGlsPlUgUUBkZKfGIjlFJkDSJCKoaAwr8rgXQkICHYfTozGHnKJREEbNqsRGiQUNGEumIFeAMeRKZKuAxACIBhMoqARiqwQCSECKaGwx3jGMH5xMmLIAkBVAJsiKZOjYszluwujDXfAhgAaAyET4aM9D5BFYTeO7EwEgECESIh3bPj7wwuCIKTraPujY9PlokR1//TGAOhLSs8jIiBSFrCqjFG2tRgkxGwZQrQv7dretuZnOq81u1cwXoUu94nAYvxYYs6IXVMWCkoTKlWqETNw/vLOWqNfU5WHjDVAA6YEkxRACsREgYDJktvc7zR5yMJZSSplGp3hOwSMKAsSU2BkVCYMHFSocggvSszGasd90KXgiYFuTw2iDJs/WTmZLzUdVjSA7y52HqiqGdi9xCP2Qw8BcGiEJsd93Y95wPj99+/orUMvGHfpYWOfjUFWmrt27oSNwMUclClkq5xKgMtSF6bZ+6CMbk0B3/WHSFK6yIpAwMTnxsN4crq9f/eCLeZLw8m2LzlhCQhNDiiJdzvFhC1gma7Ka/eYAqmRx8FKWZAvKku/WawEbghRFIRru13cin1hbpcOQFFHR2HK+PG3bLAKQxDF7Gss6gJnHOKPkiCiGWRREgMkyAThiyaiSQyYkUUVBMqQwVjdmy4Soo1gpOprvVDWNX1zL9vgGRIwwctbHspQxLWlGIBHRkYqlisTGGiOQFEBFAZCJH4dku3x29f76vWpOgx+GHCVHgAFp33lrp3GfDxv/7OzpdreSjB9/8uLmu/ViMmkDrLfrpqorU+z2h5CkKKiP4elnP10/yHo9TBezYf8wnTbVdHq7Xj07f3L3vptOT5oadg/v7lYbLczs4qwK6cnF1a9++zVGLCq7T/msLM7PJvuHbUz6g1/8oN15LacXJ+7+7i7m4emLqwjso+Tg23e3lNQQ3q/uFtPLbjg0zdJ2GLt9iD24PMjOgZwsn989bHJUCxUi5aSusp3vr9/fiObpbHJYt5cfX3791W/7YfXRp+fb1aYs3P1u7ebV/PQ0GyymJWYdg5mSFAjYEZYGLKaoYrVZVmR0v963auqmfHpe5RB+9oufbtftb775bvX+5c9//tPz84tyYrLV6byZ6nSz3q5XB2eL5fllyvD0R8+G4B9W6y9+9Pz6zXq6rC7OLx/e3c0v7XZ195uvf83SPDm/+sW/+HFhpd207x52ZbnoU/r9P/zdp1dXkuXZsxNb69VHH4m1d6vud799/Y//+PM/+Sc/m53ay88W+6FVxPOz6aRyhzZMZxRDjG2MfV9PnLL52c+vqoX79qv06nX7+s27Z1eWC7vd3/aHgW9389PpydNLgPjmze1iVqHhTdtziWVB0ojv9K5bHzZhdm5//frb6cxVzdRmVbXl0tmSD5vUtq0O8Prr24uns6I2bIsuJ+Pj02enwVaOnELoWp+joGElPX9xsd/vurb/7voNsZmfnc6ni3W48TmYKZ3MT5LmlMF3nQP+6KPPdvuNq3xRlIWzKQbXFIZJUrTsok9ClHISyfe7dxn6ZnLFrpxOZ0fBNMTu0BnHTdVEVbauaU6Mg8rWN3fvlYuqLpdnJ93Qo60IpXhRRRsOvps/+zgE//TF8u2379jwp59eDXF7Op+eXD75+//6N8+fPLt7iIMEtVpP6v7QTU+n316/fbe5rSaf9IcNF5Ua//b25un51fbmZlLbZl69eXs9ncyDxD6Gq7NFioAV5Bjut/flvEqa1+t1wuRTmM9mIXTddt+c12bqDrF/t9mcy6U11bMf/dNiUue7b/0+bHe/PZksQxRTlZO63uy2N/vtzu9x2E6aF1yQE5yUk/V21b/rMiOa9O31N8uTy7vV/ec/Pf/d337z/MUfXd/sGeP98ArL0JA1pUldbru2D3673909vJlOjXOZjZ7Mzzab+7Z71zQfhbBvBVGHnESBm9mJADABImVNEXjn20uubUlDnyWoMmeQalKlGH2MZ1cXp08/+X//f/5f/8Mf/6M3t28Xi6UrZL3d1dOlDF2WkDNWzqZkJMa6qgeEfr8Gpel04VwZUrc4mSuwSJosmxi6alpR29zsD88/e7Z52Fy/26YiDj7Vxg7gmdiU9XTW7PpYVlVVVjmkofcIhWbVJKa2ZDBkiSGxK+ezsyGIUF/UNuyHzh/Y4Mlsedi1YehTCkrGlJW1WBQ09LEsGlBhg7Yot/s1G1zOzrZ+b9jYonl9c1OXZbvfsVTTanJz/+Yf/8lPN/fbxdl877v9YQtobFFZNUE7UE0xJUlMNueUgu/6HlRkGDa79yBaT22WZIAmiydtbgZNVeWy962P7FANaFQFyABZjg1oRxlofLsRVVDSfDRUqzwen47ZNBE5DuuqIinmqECckwZAIhAVGc09WWIauQwp5xhjyllgZF7Q8dwmAEiqAmPySBLkBERFUVhrIWcfo7IztlLJh67dde3BR+S6NKAVbPddTJoEXFmgNQCQcx6yR4QUsiVtZovZbLa7uRWJy+XFoWrZ8O5wKOuKnZtM5/v1fr6YXJych+Gw361yjLOnH82WJ6v1qlBloq47DENXla5yRQp+t1ppznUzI2N8GMiYup4ZnnzzzdeOkAEcE2XYHw4EYMumG5Kxxaxedv06xTCdzNgaLxDHWmTJiGSMjTkJAFnHxhlbaEpD2CdJha1jzgI+i0dmY0z0CawzhsBkVNQYEDDEgQpHZZNEEbloquD3koI1M66qFDIFsNam4Amw220mjgEwR+WmyUJDv2abp/PlwacupsnFlb17fXN/+8VPfzbEfn2/+fY3v3362ScnlxeG7GZ1hzkbg9vV+2p6Mj9/Wk5P9ruVhUDWkKQwxKxJg0IIZAwii2QlnkxPO7xPMTLah9vbNJuUxcyWs9huQGWcWxGPHKJj9/ax+AwJGY7NcwLHMUwfbROj941U87hGFhUaZ+PHSfwoQ42v/Hg4/3Ce/zAlNw+vi+jl0A1x8G06C7tplsI4ZOnj4P1AiI11WYA0a47jQWtsQWdizCoxkzUSlcZVWtbWD4d+WDbGoDGoKCn1h9KhoWyR0+DVUHvYEZWVcRJz5RpJuWDDijG2OqxEG+dOEtmTyfKjk6ubN7/Z3rw+v7pMuy1EOamLAqNLle+qzb6jUglQU2TDNZNP4kOMKSwbskgYU+nMZtu2hxp00e93u52vF6foyhDidN5YKjeb9f1qrRkvLxb73eHV9ev5pKpslcLw6cefcjM9DANiaGq3ubvp+rBcnHchTOcLU5ZJvEJi44goBI+YkVKCXDpnXZURgx8sQwh+GLoUozHW1U5ilJjJWOFidvbcb9Yfn1cmnXz1Ztj4eD4vmTFkSKBd17OlGDMgbYMIKYQsCZOPhXPElgxrCCG0tj6JyaOEFH0zseMYKzns724NglucHg57M7ngmHabjWkHNNjYpjuswZVg3ezFl+XFs+HhTRqG3B1y7LvdnueG1Yf96271ujwrsTB6jD0SM+NRc9GjP+BI+n/suH8Ufh7B7fQYRHl0uh2vVMAPl+Z/pxPBEUXxSDo5zqCP+iocCyJH/efIy4XjUAofuEjw2MX2GAaGxxd5TK0BqC1RJeeEZkznoGFGRD3WOo2vo0wkKmQtmRKrKVRzLAxYx0hKlmyVkkBhiqas+sKBWmuI0DCzZcNkSCpHjaPSgSO1BhlUciQGQ+xcQYaBiIwjAFUhtlJUAgaSjPJxjCllQhxbwtUSFJWx1opkyQJ55OEYIiTDOIpcCgiFCrIRgqysCAaQRhVNVQUMSFYBYgTIAACUVQCI0KASIFliAhUQRELNMubaQABUUDPkMUQLInok9xGNqxcgAjJKpaIVAZWk6lFYNZFRgURAAgSqSix5lMNlTN8mzcoEikx2fOyIKqhkySIZwI1VeKR0jNQRhawxJVAcnUM5ZR98H1J36Nu277r20HZt13dtl7wXzZoTjUwnQwawNFgQVoaMCqmISMpKSDmokSjSqyQ+Cu3KhFmRmRiNAikZQKfIRAwKADLygACADAuhtRaJxygl0rGxCwFgfLALAggSAROMAiUbMI6KEp0jWwLZMdE+ZsJYwankkVJDJKSjaQjZoAoTiUgWhZGgPRb8ZSE2zKSixAjHzQHoo5PiDxvQ8BEFI4DAZOCxlRMRmcx4azCSHms4P6Q9Rwy6Hh8Nf1h1PjLNVBFJRI560piXHEUi0ZQlZ405o4oiIZKmHHwGhLYjg5Ms2XMoJ7XfDdPFjFK2kNu+DyETkEQpRIHRWRuDL6oyZc2HIflYZERhygoqfd/jeGzLCpozYuyDM8wEEjwbBMCcs60LrgtwTDEBQ/ZRAbEwOSbpg7GWCqvgQj+YmIrCaVbN2q33rkpiLZZsq6ZfHQwbVo4+N65Ezb7rQM34xGNrVAGh6PvoqokfWt/uS9coYOi72k6GGLNKSAFMVUI2zJp6w5hVAMAQkjFINvqudiUbkPF8qZC8pDT4WbtczoeYgx8woS2sqL1/v6+M/uKPL3bx3f27IYNTUYmDZCotex8Bue0HEc4eUiYjlAATQ9U433tJw3IxfVj3zIYLO2z769cPl8+fMGHwsl33TFS4opo2SVKKuWJjCo2FSX1QALaGxeTkQYWJjWHJaIxl0pyUyTIiiQEAwpxSgkzCrICEiihIRhT4mHlkVTHESEcTKbMBBOvKERUGo0BNzCNMi/i4XkMgYxBRVIJ4Z+240GDrPiTzwxB2fTuE/bQ4KYpyI33oB9EUYu99i4a5rIXzk+fPXn7zcsJVjt10MbndPiTJRo2YqmgWkichPPTt/m7wlz95Pr1cXg6HP/7TF//7//dfNW6uFsH0n/3kZ+Hd+93u3Xz5VEpab9bhPi5mU+Oql6vNxeefVuWsmNGm3Wy3eUg5p2wyrfv7EODgOVOVfUBJyZguBlAwADxo7gGd++z5T2KPu/3++ac/3by62e23/WFf1XXWfZviti9KJyQ6rSpjjCvrlLUf2vXDA7u4u9ucLj85aT5/77+duVqTj9L1WYq6FLZiTczeVU6GSGRjFGYCxr5vyyJrzJKcDE5LLOwkRzy0YTrFcsrbLcwmjihdPv3oy+dfgoR/9x/+65/9xU+vzk7aLinErBAGfXJ6TqXZ73uTYxiG3X63u99NirKPw2H18N/+2++/9M9twWfni8XJ9Ouvbxcn9X7XG6w//uzzGJLj3ZeffHb7+1vXxMXTxaR20RZNQd4Pv/jZl08+mv+rf/9f/vKf/fTvf/ntdD69/6pl4Cef1795+e7HP7woJpZYr9+G51X19S9/Vy/mu990k5lt8/7P/vHny8b97X/++xefP0nL6epueH23UpTnn5589PSU2Kwedn3niZRtdk0JFFFizn4P+eyTi5e/etkc+nJhbGk329a5Oh5y0ZiTyWy368kvZstqL/F2mw6r7dNqigjb3V22+8nUxFSLpBxzyB5F5vMGRH0/rO/v+u1+cbHowxBiYMrMrkDTd0NEU06qPrWH7UGaPDm/9CmKoMYEJkNdEJnBB2vzcj5p9+Wbl19buzp78sXp8/PjwSjnyXwaUiyr2hA/HPbXq9cFiFTRWGRr2qGDmNkWkAYELK1YZ31HbcZv3r796Pkza4qsg2J9/ea2HeDi/GS5mO5Ce7/f4bKZX7g39185wN3W3PtDNh01XSoGiRI7nC/ng3jDpQDf3xzYlARFMrk+OdkcQn9oBXNZTrrYdhoY4mI6fdjuQTAmU5czg5NpY7+7+Zab+nn9Ig3dkHYlWsm5QId24GyWJ5fbdbAKh+j3h36+nJ2cTqvS3txeG4uVIS8+aUqem9OZF35xefn25k29LL795h8Oh/2mvK2ffNZIioeOpCvqWcg7Y91kam/eXtsZ57BXY3yK0If9be/7gehkevrpL3/zX559+dPUBgQxxGCsRBERVzSHw3Z5Ot9vt6vVtmzmZClLbtuDnZQppZRyr7DZbydRfvCjX3jvmnI5mZ7cr64n9exhvS0NnNcuJEmgAv0uqC3LGDKkxNZyxY4m+1VwCQSo67ZPTmYSe5R4eXHxzdffdN5/8uzqy/nJ7fYWC7s4qSutA9j90Fti2xQ+wdC2woxA1hpkNoVruwApmcKaxolk6ziHZJCHYWDrDLnY70IRwKUowdjSCfZ9O51WQzdY54C4qurd+iHnfHY+7Q4tgOSUl9PzHAfJbTmdEdPD3X1l69V+vV8fZk1z/fZ3l2cX6r33EVyB7AoNcYhhyAQKhkMIOQ5skA2w9DoEdm7b7idFKZqHw7pssu9W8/JEyfR+fzY706TWOFDdbw7HFAMcHRnHkuXjEDJaMWiMSyDCmLMYJwzRrAKi4kPsukFUc86GnaoaphijSE455hRH9GnKSQRiFjYjUJJgxF2OLnQRycKk474KyTATqMSYh6HPysQmxhRSzoiFK4xx/aHtuz5474cgCqogUZwxQbQsHBFplqYpS4Oh3Q5Df748eVi/u3ry1LJl5tPlAjNll8oiVZQns/L2/Z1KOru4nJ2dxZQaW6Zhv93umnomiVhtTqntuu16Z12DpgiiJ7NFiRD87ne3305MxWSJkAwE31tCRANIRTUhn3LutoeHppqTGgVEyAVZQkwICio5qWR2latrNiUiKnpSmpSLtg0kAVGrcpJCJmMNUQweIVtGjREBVZKxhdq6qGpk13uPiMa6vj0U06VxTll8N0yaZuh2IMYWDg0aRo2DsYx2hhhzFiZGGXx3IMNlvXj79TfvXr51k9nkhAjo+tvvZqf7qp7PF8vdKpIhspnzIRy8qU8Wp08Ou7UB1hSwSSopDgMhSE6opBkO8TCdnTUnT7r1ncQO2bS+C9mrIOKRMaQAqjJi0UeHPuBYwayPrGsdL9RjM9/RkoaP6HQe52Q6Tr7/XTXasfUMR6jwOADII+j06K1LD9eag1HOQ7dfbU4bXkbMoeulPwweASfW9YMHzTEIgapkETVskgpIstbEmCALAKYUc8p1Varqm+ubk7p0xoAPpk9ssylL1BSDODYSUwYAh8xWcjBguHA5dMmr5uRINQy+6yNqYd3Tj5/F9v276+vQ+8XizPoWkeqSi2WhunzHuOt9UnCOdvs2CWSBvuuSyND1hTEOJftUOhLNuR+MGzN8OJ2Us9KqhOt31zH5girlYr/z767fYkJj3MP68MnHn51fnr9dwbwuZk1h0r5r97OT894nJXTNHCz37baulqUrd/t1VdeFgfv7jbEVm5KLMoTMKVhMbbvJyYOkHAMkgjigZoBBgHg6Xz7/OPhD9td3m/TmbovOLOrm7OLJt9dvdg8bQkiKWXIUEZSEmkWZCDWBxLKqBHwY+rIZoh4mzTyEoENQksXpotu14vvu9l2dewG72x9Klzkl1djevJ7MZxVW61dfq0YxhZvMSou2WUYHqVXjiiiQ2l0bvCl+WdYzgjMoihEBkXUcswnGh5GOFOYPjKIjPPQPNCA5hlQeBUuFD96iDx/zqOR8by9CpbEKcESmPEbX5MPrPDqMjj6mfAQOwfHjj4/v72Wh4w11VKDGIXlykqJHJJCskkbYDwGoJmYD43gtiobHDQNVUzs/k2ZBVTlKLABGkSQGMwz1pA6DNwrGGERgNkxcEC5qN63ypMLSqEFCFIkCAoaNZTaWjDPAbI0b35DQFGpKFlIZRARQU04xI7IhiQVjVddF4ZgxhayKgESoQDi+g496iSoAWQAkYiUZ67BAM0BC5Tymq4kAjUACzZgFJBE7JQRIyATGgHUIYNSpKigqGyWjqqSJclLJoKIydvMxASqgAmZSwlFRSgARFTQNEIPEAJKyQBYVzQD8SAZkxIygqlHUiwYAZ3hsWMMRJA4AApSUEnBCUuQMIIB5RJ2IdH3IIoxIHHMK/TDsO79vu65vu8Oh61rfdX7wgBqTMlJhCkNoGR1JU2BjaGKsVczBRycmHy+fnH3OESQTHm1TagyxBclojMDorlHIESHjUc0fM4CowMQWrANjge0onyEQjuqMpnEpr0TMRoGQDBkL7MA4dCWYAtgBMcpYJZdBBJUYjY6tD5CYCkAVRmRWQUJgY4wq8Oi4ICSGEVmlCGNb/SjRqqac/3upaAwfjnCs0XM0YomOATQE1JFpjQSghHQUZQlIR4gjPN75qEdo1uhEUlUEURnl23H9liWLgIjGkEWO4DMAkiQAQI6SZs26Xe8VNaTIODNcdskTWFuUjBokhpRExVgCnzVD1AQCUXMMmUkqttmn7GP2WURGsS36jISQM0BmY6wlIsgGkVHGm7awygwiOQwSkzOlIBJZRU+mRDYCkEASgVX2XcQBJQmXMnRDUVfFtMmKtqnDZicxqCAqRd8Zw4oUc5Ycq0kJhlBTs5wlxuQPk6aKjDkkBquMWZKAWlOpMYmpcXa33iO6mGLKYgwyqlFRFUY3+OBDYjbM4BBzNrtV68jOTqeogRhzykZJBrh+s7MF/tlPn/wXf/d2433SwhUy+NI6RY4ph34gVADyUdroy2npu5z6mGKARRGUyJj9bghRU8DvXt7V0wWxCSG0+w4IUMUaTFFSzL73Iwoo5aSizDweW0hRsgKNtdhYcMGIMSVEqKpiDIaGsVqPkBkRMeVcGGONPR6QJDOOhLRj6pER2TpENKN0TKPXGkfNFBEtW2A0bMfdrzEMCoSWGETy4zsWAIBBMZouLi7SLn/18vezTz+eufqwvvvi+el+l1xVpD4C4P3NtckAmjWQih1S+/zqYnW3ndjm9fv1bvvyp59erB42q166m/uLi4L6wzd/9e7Z7KNdm3a7dlZNt7fvbt68clW5Wq0gw/PTM2CO3UBuNjE0L8zDzbvuIUWU7hCb56eFw83rh/Z+X5nzujilFkqe1DO32++yyKSpS1f3q+HscgkGN9v1s4tPNB+uN+89YI1Yz6ftfndx+SS2GQaJfX+yvCBwKWmJmHN/2D2k0NqKnDHL5eXNw02M99N5vXp4MFxIkCzJMmefQwxlYfbhUFg0xqQcmdhVhaRU1wWWje8HQvJoDZvFvM4xAWkzceI3v/vt7ydnn796++35xfRnP/ko9umv/v0vf/5Hf/LV73775c8+Pf/prF0/3K3Dr3538y/+4k8qKn92elZY/bu//ebyotkPdHl5MamXr797Oz0tTuZuUlfLRSVS9kPuD/e/+fWvfvHDTyXaxcVHV59fPdxfb/e7//Rf/+sf/+xHRZr87Ve/KZs/3t/3fk2ru/Dnf3r1/t26aUrv4s//8XPxQ05JMz59MlmcVIf7iXK1XM5JwsP67uuvXl+dnz7/+Kw/tM1ycnZpWE3v+9u3d3Xlpifz2WIiKR+2O0lQT0rX2CH3zC7FeID+9JNLA7K/v/MZnn326WE/HHYryNruVycn87tX16pn06ezi6UrU518GrwYNpIETNYUUMGwcaZIBpHAGUsoMcTN6gEtVHUFoqmNmcQYtsCH/VA2dj6bMUDXdg90X7nKKhdl7SUBgELKklB0iPrkoy+qqvnm99/cv3ttmiPWutt1RZ0Q8f7dezamcjb5FIHXfW8dIsfd/uBMNSkbymGzul0uTydVcXd3N2nm//jP/yIMB6W0eli9mM3n02Xw+ebdg0GlkkrD717+/laTa9yqb4uy6sTvDu//9le705Nn1sDbl189/ewTdNXg8/s31y8uPrVI7XZfnxavr7/75OSqLhySmy8u33z7X9ysrE2llgBlOqnOL69u3lw7wn3Hh65tlA8PD/NJVVYFG0ErHodisZBBt21QSXfv3xWmocoE73MKQ5R6Mp8XxW+/+nU5LVCprKa7YZDMfr9Z3b6u59A0M67T7uah0DnndOjaxbQ6hNZMJ6G76w7d7e1bD2ZRX3Upl843p0XAvYdhNlu+vv7Vdn/dh8uCk0YFFWtQM6NqNWliymK8c0XfHSQTldUwDAomA6sRgTgls5Dl/v71zFFlZPAYc0vGDinFlBlw8J7ZbLqHCL5Es15vLTEBZMmHoW8m80mah37rLA9s3t7dXUyXh93h6sXTir94d3v/8vVDaaWube9TP/TIBVImwaELbDSpDiEU1QSt7VKs2BguPn/x8e9efVc0ZZbgrJUU2ri7mD6pqb7b7FLys6ZKEF1TG1AW1x+2tirbziMVzWSya3dd32mGFGXoQaJhgkU1875lwhQjEhZVZQvnmCPE0O52h1DSPOyDSigMi9/uDvdAWFST2pi+HfpWQIEIQ1DfDRB60pzjYAAMKhoLKYV2t5xMHUrnQzOrOt+WzCkEiTZ0/fc94ar6SG45NpEDHY9SAo9Wo0efRx5BhRhiOhzazXojKE1TF8aN+7QQog/B+yGliKBZJKUERMjGWGeIRhws0bEcVkQA0Ro2jIysoDGq9yGk2PddjILESQTYkil0GPq4367WD7d3ve+Z1BkKQ3JMgghWQSnEbAwDibF8f/uASAlgOp2fzk6v372vy6px9f3NA1udzJpq4lIIm9324uzUuhqSTiZm6LbrzW1paxIwZGLOXXdou8E4lwEFtCkLjWndriR0U1fX1TRH71OMKdeWiMyh7y6bKyI9dBsiKYq5LWdJc+5aZwxoEtA47MduXGNLLhq29Wh3dxOXJSOSK1ByACDJaNiKJsnRGNKcffI4QmWRkcnWU1WwxqIxoIGYg+YwDEVZiKCCWFdXk0noPAr5EIpZ2fU7c9hMT56WeXbYP/TdwRVlSNJvNs7Zqi5v3r/96MsvDxtfzc6HlNvNrtsPs+V8drLs+j4ctmRcURFIDzyZL07FuzwcvG9FVdWJZARFZcpqUGKKoNa5xodONGvOsW3HK5BpjJuNiYQPPEX8IBGNx28FGQM9Hzgv+Ch0PsZ+Hn/wiImhI5wFicZ4IxDyY/iHAJKqfjgRIcQoKeQkkrlx2nUTkCIMu9SLKGVxIgrZg8SQS2ODj0rS9x0jgmZQDjE440prWRVQDUDOst7tt9vDpGBHlIaY2Zqi3O96KtmUxrJNISiIz1FSUMgIUSF1/WAgGSYffOg3Zb2Ih5ZJL549eXi4ff9wAzKcnC4kZk61m04XJaTGDX44dGGsCU8hZJGCJIWUgCnL1PGhTSAZFYd9i0WyZrYsq9PS7PcP795du3p6sjy9eXMfpEupYzaAuF13Lz76+OrFp4cBQWk+naDsVvcrBJcygnWz5ULZhjiUZVPawrdbSclY5/ugwsY1yg4A87An8SG1vjswEiCqgh9Cjr4oIIQ9RkA4tZOLkydDu/OfPdftfrjeDbfb/W0Xcko5A6C2ISdJcRRaCFPKAI7ZZsV6tvRpiKHF0HJwUE9n82nOgQPZAg1kshCl3688g03bw0GDlNrMFuGQtrtNvTg9O5/5/abfbyHsg+pAPLZ3S86lbZzjsq7w8Hrz9/9qcvHz5uITqGpwVg1nGrf/MNK7QHV0wCEoIh/9b+P+nB6BRh8UoEck6feGt8dfxO9VI3jMsBHQUd9REJQRkPSoMeH3j/SjTvXBM4SPlQVH1xEdpapjnvj4Z/DsDHISdGHoDUHOA2oCBsxZIQMd3VIMMadgTGnqKU+nZr4AVwKbLJHAqCoPZOvK7vdVXbLoCKIzZBybeemWEzupuHRAGhkh56xKKmwIjSuRR6iMIcOgqERAlkdOgCjCERPDCgRApEXpXFMTa05BAdFYUCYEZjPSOMgUmhEUkQxIAo05BU0RVUcfEOCYO2NUAYUsKkCEjMyKCARkDTmTjVMuABBFYMTtAQNbIibNkAKogApIHm0+HyB9oJqTR0RJPaBITpJDCl4z5JxUadQKJHlNoDIal7JKQpCcAiGyMgi7ojDM/dCFlLIqkuQUfaQhpsLGru+stUM/hChJxGfJoppCzrkffO9DH/Ww3/puF4ZWJaUQUlIwJFyYwjETSbYEhdFJxTXGilVzSimlBEACOcGoXOTEj2gfNAjKOWdrOMQB0ShkQgFipJF5SEiATOwMWQvGKlskFmQEQmBUhRwVEWl0iBTATskqEhCpMUgGyAIyjG0GOauCxJRTyjkSAOGREi6SICe0x+DYaPhDUGRGJlRNKSuSHC08GRBTSkqMoKr5++l4/A/hkfE1tkHhqAYhACgR0zG6hh8+GB/vfR2rPB5/CAj0eIeJHPlh8tgRIiIiCgo5a86aRURARVPO43NEVJgNZEFAERAEWxQ+pPaQc0DIiMAEhotiMmsUdeijikQUyVoam1LQIIadhui910FVIKWMgGObjLPlqC8gsrVWNGlKtnTGFj5GIINFmYjAt5h9URY5KSBZS0OO7GoikwUYpTClySI+jKBLawgYZUhceCAQW1SLeVgfxgJOg2Z+dn57976uyugcUMHMYXdIPgyFDJqL+ex+/e2nzz979fJ9SAMS1K4OWYA4GzpZLG+//boqqm6fgIshJQUktpK9dRySiCoSaIpsXemmsZPdFmwpzgIZSFk1CLKRSNdvDk+z+fNfXP3Hv3/9cB81O0UaUo5RRHIzq/sMkgBhIMWoyanFqETm0CUOqWjqdu+drYlzN/jdbjdZzIF16DsfokisGicSnSutCiKQ5ZhSzskYBsTCFUkwq/qcmVnVF8aQIVVR1PQYzSRkY53BzKTGMBEDsAiwMQAZWZlIIDMzkVVUtoUxZkQ/IKmiEh8bJQ2PxHeDhEmSsRYADBtRBoQsmfEYDz2+CbHuNjtrXRJRVAN2vbmxOtzd3tblWb/3ErSaTXfDuiz4ZD4JbfQxX14tb3c3Wc23724+vvrsi6vlzfXr9w9vm2bS3bx+COHq4rLfs1lWXXgbI9XT+cPtq1wmAZzO6jdfPWTJlSsbLL77+qZszNuv3pRFlaGfnM1i79+/vjEQZ7PTF8++3N/sTucX/nDwXfQ+CslkUnaHYKlUtm2IkHm37U6XYR/Dx4tpv3nAlJ3V5nQ5KWZtME0xd3UxmZ4lP5RFpSho4na7RcdDhsuTp81stnn/rpkaskm8GmfIKEMuKldaExNrRgCMWRxiTgIkAiRsGWdZA1kkLnPKIXaldZDczZtbKuKzp/OTs+eLp5++uLr47ptfP/v04xDw6bOf//Z3b/70n/35r//u15OlXd29/cWf/w9PL3+4ub99fX1jbfr046dffvF5kB1qMbNUznl6slS0998N1ze3LJdEOJ3afej/8k9/8v7d6uGdMNu37x8wSQ7panr15aefv3q9+fTzF4nhT//yjwDtx09OH+4OZUnNojwceg4ZkvpW6qn5+s31X//nm//jX/7T//o36//w6t/92R9/9vHPnubf526fq4KHPt2s3n76xfOHu7vnHz2ZzIu7dw/XL68/+/IF2Wp5Mmm3/rDrchri4BWHkyfn6uMB9/OLhalOfv03r4ff3+y2Dx9/dtV1a1NOVePmvk3G8NnETR1sw7tX13bIZxfz99s1CEHuJQpZQ5aYTEg+xcQEZVNBxv36LvT12fmFR0yxz6K2tNOi6tsBcyCm+cmy77zYFP3eWkMoIgmIy6lDwBiGvt1NquUv/vSf/s2//6u//+vV8S2hslGptkUaDprVsjWWfOeXp2fbbmvAimnu1rvVITy5WE7qWXtob+9vp+Vk37WTeXP//nY+m8kkQU5Pn50NQwCE1Cc3K7vdbl6Z+5sHwOLsrLp9ezef21V3szkYNUxUaqVkh0N7z3jCFMXYcl7uVzdvXr4vJ9Xq/qEw9enp7LtXf2viUAlLF9chCybvu9v3PJ8tb15/ZcJ8cf602+2Xs4vCIBUTa/Ru/XD//rbeTyZ13fW+NJMnz758ff910ZhuaJtYfvub1z/+yRf3Pph62Wl88ny6Xd/X1XT19vWB8MWXZ2/v3mWFToYh7k+QdnFTL3mzfzgM6588+4UMh+/efttMq8aHGn3fpfe32x9//uXX7779weefvnn1+/Vu9+zsYwXabNsSjMa9q8xscbbf7zMogUWi03N39+5m2+8vT2tWEcyHfp9jKEpu+9ZlWG/bZx9fvPvu3bSuHh7eXT692LdDktKEaHwy1oCyijM08V2cnNR+6LxX2e5EyVojA8+n87aPivrq1dun5+fv3rxZLmYvnszfvF+vN9vp6bMKYoIchoCWJpNaiqbr+rqpgUxROBCdziar9Uaom6fJZ8+fzRblq+9eSlLU4uLkk6QD5DApHGlylvb7rp5bSAkQkvfz8zNUr4FvV/eTuko5u8o5Z7MIlxVGFd8Ts3HV8uQshFYyYEZANK5MMeToa1sg+K5fqWHJWZIAGB86P+wLO5lPJr7v+7Y/tF1KZuIq73sFLwrIYJ0lzRjyjCgGH5P4vZ8uKgCI0WdN5Oiw3j2OIEfXxeN3Rr+PKsi48zqOGiJZRCUhaAYKIbWH9tAefPD73cY5x8hMnFKOOfngY4wIIDmPZy1jnQkJcQSFqoz7dzlmg8SaRGyMNfb/T9Wf/diWZOmd2BrMbA9n9vnOMUcOVZVZVSxWk+yS0Go1WnqQBAF60T8nQI96EyAIAgSxSTXFLnaRrCGzKjMyIzLufH12P9OezGytpYftNzLpwAUcx4/7ucd92zazz77v9xEA5JS6fh9zjFGGJGrgfEnc9W3TtN1u2/R9nyTFoZcsI86mj4OIiqmIgVnqht39WiPWk8oXZT2pN/utaVotDnMaCo+T2XTbD7u+v7u5ffLsGSqawuly3u5v728vwbiaTFJsc+6QQhIBpKaLijidVnFottu7uiyqcqpqMXeSE4AWgXNO2XpiMJO+26tlhyW5ApjyMDhgSTFLJypozL5yZUAqkLxKAhEKZXIl+LJvtmwIxDkOiBFMvSvd6LZK0QERcx8zMOUhKXVVOcupA8zsnPOFLwlI+nZX+FmSvB8GF6YGxAZZBmAuQtFs7qfLw8nhUQedxNYRF95dfrgAMnbY3t++//53J08+ub48L2dV3mvTtHWsfV3OV9Pp5GC/uWr2e6KBXOuqSVFNkg9cTOKwB2ti7KP0oJnQSNT6jVYL7+aM+9jfA5ooPJiAFIhJQEbC4u+rwx+gRA/XKI5ARgDQH8xED194AKrDCF95+JZRJ7KPEYgfeO0jSHR0MI1BoYclUeyZfTNES9FEfZbHiM9zVrUmRybZdV2J4hHNRGSsBoIskR3v+q4AMoRk2SkwEpI1fc9FAR6+PX/z559/vlhNNm2ah5VZaNbtZF7GuN9LB77wrgBfEVGOvSYhVdIcPBj7pl33qZ2fpIBF1ybPfvXo2d//+tftcN8N6XhRNHFouzajZ7UStEfJqsSgoEPXIaCaOOM0DKF0yiZEOeXdtilX/vT546Cwu738cHk+WxzU5ez1mw9d7J8+fX570Zhzm5v94fHBJ19/jmW538fV4dm0KO5vdteXF2enj/dDnB+cVPN508TgXTWZEGhzczddHhcOry7OHZeh8N672O3jblOXNPRdHgYunSY1pyoRVMA5iBlwi1xIJvIL7+ezevOzL0/abz5sGsxmWfPDsahozGJgqGgIMWlRBe9cJhZ21ezANpkMpNvlsi4mk7TNqhy7zkR8MZEUU9dyWRWlj/uu2+x36/vj50+pqLdNc7+PBQVyXttNu99x4GJWIhWWOiNvWmX1EqPs3qdNt3/93eT0k+rsiZ8fWAjiwkj2NdAHHgeMGKMH249+TIyNCJTR6IMIAApjyumjv+ijTPSHnzzoofgg238cI78Pq31MmNlHKPz4jI8Slf0hRemHW/3v5aiP+4KydoiZfblcESBL0jyAZkgDjifhgZ1zIAPFSL7AckLzBUwWWJRIHEaLhBoAVylX5UQK0ZwJiVQKxtW0WlR+NS/L0kiTA8Qcc84pikd1riBGcoy+JOeRw5ifAjVUSTkzAnvPIqVHI9YcQ+GLqnShIE2WBQyIHBCRR0aviuAduAAFgy+Bg+WMuYWhw2GPqTNTQqcG4BwYgpjKYJJNldATEzEqmTkGV4DzyI7V2ERMNUdCJFeBMQAAkxoBouUEgKCKSS1HMlXNNh5eihmapEFNQU1SHhHGpmAIqGSSTAFMTGX0UHpGUnVohXdIzhGPMoWpmohBnwibtmMgERXDLiZ0vutiBhNJEoes2ve5H3IX0zC0FjuVaJpVlH1A8sVkOa2nLnc67HJOoSrKsqi5HNtWx3gdaAYZANgsj3RnN6J5CAWQMGQAAmFP6L0hI3s1RWZAVHbmS/LeCBnJRFGVMD34dXBM/4HCmPUDBDbwSGRGpgRmhA8KEYEhoIigCEnCEYmo2QMkyQygJsCFY2YswDETMTsEQyY0deyygYmJqclD071IHkfID0PhB1bRmF+mj0CvMTr2UHf28NDDL4EN9KMFEPWjOQxxjDKOUjEZjG/1QcUVyWOJm6qajjqRqSgYaM5jtYiO7jU1BDRDYkYD8C5M6yFlEHLss2o9mbRdDmU5QyDq+s3+gVcs7KwgKvKQSNHM4tA7733tEC0npcSOx9wsARGTMaEhGaKouKLw89rVJWIGNWAkdGoExH1OSJ7BpZTLapY199oqoBE65wTT0GUgxYKQERmKeSjmC4kqcWBmSdCkPDs5obJwzsGgRVXpdu8szA/K1EMc9PDo0XY3xGxEDsCmRXW7W3PBoXC7piUqObOIofcqo7mUEZ0pxihATDiOFgAC9kVK7uZ+mM9sNg++YFcVu23P4rodvOrXn34W/viro2/s7vyuT47QfO4TI5GDUPhu3/naV+javqvKYh8HVUw99RoztEMG4zAMqhSHGGdk4+2+LP1u3+13iZDMdpPlovaBvXdFgAYR2bvSco8IUbJgDoUHMIVMSEXwBppFiKmoSkwZVJgB2Zlzhjg25T0I5wjENGL2iZCJR27WuCJHQtGMxMwekZgZQM3EbEwXA5NDAIBEVJohMzOR2oPFruna6XQes2KBn37x5HxzW9W8Whycv7uuiKbTubR2d7/W0pB0iGnXDTeb+yoXXWw+++RHF6/bwg2XN9+VBysfl76EeRWk4CEITYt93zkuvvryk93mwy5GWs6yDpmVJsX783d//MXPVuXB6eOqbbc31x+Ozg7rWfGLl799+vnTnLq+6y5297DhSVHe3p0z8yDt7eXtfHkQ3MyArq929WQxKBPx6snzQdK0qi+//Z0TZYLV/Nj71YyfTFezWZhZaochMpELmFOMsSe2UDtX1IeL4/X1B9EYKr/ptuAoa8x9nlYH5Ip2GDxjSoOaMRO7oJrHCmc2app1WU3NwDRlhX5oyAAUP/v6k/vuUgKtThbv3767vrj86Y9O/uPf/WJez2f1rOnizW73/KvHRv1i/un7V+8ub9bPT84++ey5w8H78D/+9S/+q3/5xfa++e1vvv1nP/2jto1f/fGjfrP50eGnflr8zX/8p//Nf/uXt9++6jozrmKxO1jy9fbDn/z8i369y3Hyj//wT9Hgi6+fb3ebalr+T7/4n//Zz39SzX0zyIebNQAvfLHfJ4++mpYH89WE68vbps3ypz//8YfzD421EtEXRZubk6eLWVe17TZ4d35xTSTzVYFO3rx9Xfjq9Ox4Mp36qtxuNkVZRs05prIOy3K+a3u/LP7of/VH719dhFDqZHj87PjuwyYJHD+dJ4Xri7swlNXEff3TR9/8zW92NzvwKBKrutI4mlx1iAOwEfPIwJ0dz93OieH13X3pJiHwbrctpxUHTw48hyzZRIuyiBKbrotC1WwqQ2I/TgaOXTU5Lrv7nWb44ic/fnf+ehwFm+1GMnZ14QgDQhvbXdPWRX037EWlBjg7PtBmm2m43+9oSMx41+yIajZmtWfPP3t3+TYRDwLa9rumOzicpibdvL4pp1W5OHxz9WE73FXu0e32iiKK0+399e6aZvMziM1vf/dt7vN82cOA7c3f/elnf/T9N7+KqgfFGbvF+/X94vS4F80p7Xa3DgJGmpytbq/Wov1Xnzy/uErOAVEq6yI26enjx99893IY4idfPe1kc/Xhu5//7L9+9/YeY//q/G+/u/3VX37yZwDQDd1nXz6PkrDwOdDm4kNB3fp+ewG3jx4tr6/ewZD392+N4PB0NvSNITX9MFnVVNIg8P72cn91PRA+O3v+9//u31Vh3hvc5fx6vW2Svn718vbqwpVlWE4v7/YLX1Eo3p+/D/Nlu73zZeGNtQ4IUnlYrJavLy8v764EdHIw3683+909wMShQ1ae1Ze7Bsppk/bVpJ4t5rteI2csfNOnQpgRF/OKSO6H9VyprOji/U1ZhLKeWBHA+au7jS9mWdrycPHL9++ePDq9v1zP5xNX1wfL6mK3C75iLvrN5vD0IEJab9eHB4d9l+IQQ/Aqst9vqtKTd62m9n7XpYocL5eLq6v7NvXOgyYQgGyIShiCMtd1FbtmdXYUJemQLaUwzm2M+/0moA8eq0l1d3//5Pnx3V2z2WyqaekC9duU2ui8K8oaPLHj5vJtRTapgzeOIOjJ+zJ2XV3O9ju9224Jc0G2rJZt2yKmEJyJKdEg0jX7ErEiul6v67ow9rsuzg/nfde32wY9TOp6Uvsfdhn2QxXIw74CxFRFERFMRQQQEDQOg5n2fZsVh6y7Zt91/W6/QwJR8c4FX5hhkpQliUhOiZGYmBCJhoemTscGCoxMQECeEAByGoi4KKqUTSCb5RRjlmHfDkMGyaA2SO5j16ScxWxIQ9sPfZ8yQBIdUk4pp7FKGFANgkoXOw8+S3KBGbVtu+Vq2fa9c84VAYnWt/fALicKbrLb3D16cirSXN18CBRm04Ns0ratc6Yi++1OAIiorMokZipmmNV2Q0RNaUCHUBSucEEyIlBR1KnXnAZ2ha/qlKTvOjJRIjUVJGAmF4gLYAfAI+oCCZznhEjsi3qeu83YNiwqBIxCAJTtQQ5zSOicWgIzk6g5qmFwo9yg3hfOl6LJCBcHx/fr67ookXg6qYc0HXokVKZ4f/5d+enXi9Xp5urN0LREqCr9vu3joD7s9nt5/6aaLTa314tlXXrX9i2lPpRTH6aT5Rnu1yJJUj/s4kCO/JS4BDZ0UhCBaeo7MCUmAo3tXQLW1KUYRyarI5fG5Y6K4QNkiAjG2jIAA8PRhW8jTeT3ZXwfnf0Pl6saGD7sIx5Oc+lhew5jiOfjun3cY5OZ0g/ptHF7oNangcEyEaDWvjjOw89ni7Lb9LHbgq3NAtPMhzbmVtU7VENTGYZccpFFEEFFe0nVdCIpJUkcnJiu+7xv2llR3d1duGGY6LQ0rQzSft909+RdSEL1YWZvKae49kgOwfkiFNU2a9tc3kt7fHjG+65LOC/82cnx1fuLFNdpcAdViXtWDAC09KbtcN9FGJSTOMmhmFzkuB26iaaKCwS0KMOu8+Xs5OCQs8Vh16b9bDKZVdOXr1+z50dnB0Pq9n0kStOqfPHiyeF83rZ9vVwdHq26+w/n718++eTFdt2YC8V0JsCArigDgm7Wd0ZQlW57f51Uq8XE0Lr9uttvGKTZ90PXAOkQe8hJIYNkkWjN4JzLsbPhQzldhVl9dHJ4ff7LPMSffjqflfTteb/vEhuQA4+UMxKTcwH7WBQFMmeREBwahGqSh1p0iPuhmmYwDOWEyXLsJavEFuJgcS8oliD3XWx6g7h992F+tKoOD8RPkcPm7naxmh8ez/p9ww4dAmNwIahqt9t6zN7VgRofs13ut29/ZTT1szkuz4qjU6qnVtUWnDASohghKZp+jL/QHxiMEPAhI/kg6pghwceeso/4ko8euv/iwx7GBjwIP/YxgvYxc/nxJzywU364zNUA8EE0+n3W7feptEwOkbmehOmE2EuWNPQmEdOAZlQEVxWOyEuXus4QLRRalxYIHBTem5llNVNMSJ45ePYeDQNxyTip/MG8XExCHRhtYEQQE5WcMxo4Rn4oPFNmh8hIBC4YAgwdICioMXpXIVrsekgqqEXpqzIgjSoMoXPsayhKDA6A2BiZjZFDpWWtwBgH6BVUKAewbAiq2cSA0fLooEUDBGIjMhIjIyYjJ+QAEVUhR1NRyWZGzKgZRkuLZED34Io0QTQlA0emikiGrKLAiAZEwXQUKlDEEFE0qZmpiorkZIAqWURAsyMrHBYMHQE4AuIxXqtiItESqpiopq6vympfDY48IonmbCIp5hzFQMRSliGLSrKcH0QuIjRTUdCUTfLQkWgVgiP2rnAOUSOPQeKUyQGPE7kxGjAH5zyRM1BAB+yQkFnJuxHzbAamCMTkPPhCqciApCKSQcVUzIwcEZNIBiAjMgNyTAqWzRwaZmCHzpAYDRnIDEB1dMOYieZokgEQVBTMctIsKhnDhLw3rYCRvXeO8QfyoqqJZdRxk20GWURM8Ye86B9KRTTOJYQjo5F4DOmMpkhEwofUGT4gzH9QdmlsTzAbX4doNDKNEi6aqpqpZDMY53JRkawqqoCa84M5ERQQTdFAVWXUiBEx58yIxM6ScagtWOwjjZXnKqEoCNkJxP1gon0/OBdULPWJRJCMvVcfogND884r5qRi2rMLakaWyMgAMBuy8cRJ5cwhpkg5gVmSvqgWWRCVc1K07IgVkuJuMeVu1yEzF0QU2EBjci44qkST9kPvCBYl9VCXRbPeKoOfTruYJt5t1ufHp8d90613958uf7S5Xg+3A4Wi6/ucMBODSRsHJqvCZFkftuubwteaevaQTZiIgTRHF5iR0iCE7BkJldjnHL33rmSg3MVUi1YBfelmvNjcbgM7A3zz9vbkSf3V13N6befvO3OeuW67mHNWGcgkxcwuAKS77rKqFmglUGByvQ3btq1KX019kn63vl8tKhugb5qTRy8GiX3bAlq779ZXt1VR+FCGKjjvhm4YywyJiJnNTEXZU5+yd+yJDQkVzdAMq6IWSY6lqAskAkJiHj2T4/1bEZg8miEAk0f2Dzyscc3N3ghxJIM+LMFHnJYbKerOlWRKRB8J2WM4HwDg+mZN6Ferybv3H0zqo9lyt969u9xnobt9AxwQslb54OnB9c153G+GbEVVllxtUnF7rsPN7mX/cpuvbNt0m06d1ocrkJ6G6MBiFu3T/fZy192X1QENtrm8Labzqjr8o58/6fZD7G/r2SrXfXkA+7Tdtvzpn342K+z69VU9CaGYXF6el1TIkA6PnscizuuTwIeQJ8taqT5W48KF3759qyBY+D/60z+NLYDC6vigqA+sdzL0uU9Ds/EFx9ShWUhoYG07tDF2XfPTJ19PAFpQCrzZ3oXgsyh2oknPVmdMJWrvCndzd+W5UJOs0REjElMwQMuSus4xZzJ2NCsLHdzVZgfOdThcXN/pOu+36B05V//RT7+cTudv31/8xR/96F//f/7ueLU8XFWrVb3Z3v3lX/7F69++8T4JDLvYf/nJiyqsruLNn/z0X6CJNK207cWbV6vVCpv8yB/t3/U37/rjnz6qZ8Mjh8u5/5v/x7+tcj8rV9PFKmVwCID48t2rv/pX//zHX329XFUX13dnJ7NKhLgsmK72+3/4x9/+5MfPWFDbsJX7Sciffn3y9u0vWV7AoF1qTx4tv331/Yvnz6vZYnfT7m93Q7fTVVVVbrJYbm66Dy/fsQvPv/y0qBdN2/tozWaXh+g8gaPtpinqdPbl0WIb1xfrD9tNgZxipnI/ZFgdziYYNvcNecod35235SKxz947YnNM/TB4r+RKdKHvO1ONOZJjx2Wz69t+KE+XkyV0zZYzDE3fGR6sjvbNNsammlci2jd9XU0Y1Xsb2q7PQEWo6iOeU8pdXa4+X07HUTBCENUcIqPDfbdz7AT7dr2ehmW7a/pmMykcVa7Pmlinq+VR5e6vbh+fnEm/eXu3aRW4LKrF8uLykhGTBAtUzdQzdKk7OvtUc+zbxMXk4GiR9guX3z0+efLdt79bzetmaFeTBVLTlHJ3f/Vv/vVvn51+hhb3w1UyePTlwXnzbV/0Dkm91mf1xa/ex3VKmde3b6owVNNK4u7y9WZxcDKYfnt16Uqn2dZNe3vdBj09v9xeNbuuuY5qP/rJn27ed7u17Hr98sVR12w2m6YoJ08WpzXM9zurp353v8Fs/dXe59Vmu5NJOWzb+90/LVez73/z8vDkJNDBbhOvmlhU81cfNk8/+VkIx1fr92dPP+2H9vjssL18a5mWx4dcBjQjx8L69PNPb+42k8IRz5E9mKaYyKhwk5Pjow9XV0C+szYlODp9vL2+n83q0lEB/vJ2HYpqtaj6od218f5u14l0bT48Wj56cnr+6nwCVc55P8j1bnBk09V0v+3fvvvw9NOnxay8b/vUdhxwdXxUdNsIJIJvX97cp+RXbhjyfnPz8z/7sZ+1fkq73dDt+zyZlbOZK6sc1RV+sFxRMSh5Aj8ruwg7iS+ePL3Y7gmtQNAikOe+GZCLeTVpuyGzAodyOt3tGzGoJ6VgiwQExFhIJ+zDEHMVFm2vXUq+KJoYl9MVDU3hnGQ1MM0wJIHE2XoxAdTtZi9Z+7zx3pdU3O82jWiOfeXdpC4QLMaESeoQwFPOsc0RuACGKAaCfdvSdNoDQsoAUHqXYtd28Yd9BwKqKsDInFZVGVGLIklUNetorVfTZt/cb7dNHw1BDYacs4gkyaLZmxoSYYxJNOeUTRXJVBXRiJMJIWBWD6DjaSIBqR+bPZFJhF0WEeAs0jZNjH03DEo+i/Z9rylpzgbWpyEOWUWISVPKkiVneODY6FjBFvu4vr2fTqZHx8eO0aHOykoEyUFRFgBweXU5NI0r6oPFrGk3k2lR1+Hth3eA7LjIKW2720CoWXb7NsbkHBelR7DtZjvE5AlSykhQe8chFAQ89v8+OMwpZwEjRBZVQGEiEE05ARJzya4Ex8isopoiWgJGF1wXN6E6RmMDQHAydKDKELKk3ho1zVnATIEGyTknQiNyYlwU0/1+CwY55TjEouQUk/esOtRU12EqQwdCXTcMg4CQiRBAaPkSvj37/EcuLG7vrusAhtoN+5Ri7Nq6LECKHGOoJvtNW8+nQ96jWLO+86HjEDiUkAEgaxbVSNKrRvJFgdPY7gkZxkNTSYiASLFvIPVgY+O3ieSxn9VMjFAVswEq4Fh0/6DtGKIRon2sqEGCH+JpMNqFkMgUkT5ytuDBcIEP5bCiD/YKIvcAwgYau4p+CB1AEgYDViFRUCaowI4AvyxrU3mZGpHsEFSFmIYkow0k5UTA3rksA34sl0qqzAQiKQ4ZDB3/5v27f/Hjr2vH++1lLlFZdhiYAIKpDtvbd9zc+7LinL0DiRJNssWAxeLkpHl5+/bypkm4qifoEPa7pxPXBLre9AaSFI5qn4YIAOTA5cj94AUnoWg6vNjtBsmFUYngzCqkgFgQ1FUxdSRxfX175Z3OF8dv3rwConpakaa7m1tADghnZ0dnp6scI1h1uDzq+023vS8Kn5IY4PLorAiT1A9VUVZFWF+ft+vN/GAViN9fXlfLw8IVuR9Euri9JwZG6Pa76aRMKbMrQBR0kKEDD54LGbLlHRTUoE6efvbJ0P3tf/jr3GZu09HUxVyu+7TJecjJeQfs0CwEODhbhOkkdrGeRF+EJFIUAUH2QyP7O+YiZvHTOucOTdPQO8sMuLu/K7gEAM2o7XB5u1+/v/UlV9N6ejQ9OT1p1zeNsogh0mx1UC9W5CY5dug89H3ebizfibp6OivdBJKH6yyJevbmK5hO3MmZPzrB+ZGrZlZPwbN5Ahj5I/iALcGPzp+Pd+GHR35Ipv0QRftBPPq9E+hjfOxBSBov6RED/4OEhKNy+vCsMe3ye0YSPoyih7Tmw6tSWTGBrytflswshr4ICEJqiArecVGiqssTLoYslhHJBXMBmBkJEMxRzImLklPyha+KgtiVCItpOZ3yYl4WTI7AIuiQUjekYZCkqAoMgB4RmZhMiD24AOwRFZnAFA2QHXBgX5TopWu9s1AURGySJUcEQi6wrLCaayiJClMFU3WM1RRcIE2QBzSxse1sZF2TMzIzl1VUo2rmEfpBZmjgPDpvzhMQ5oyWIA0y8oSCRxdGX5jmDDkhysc20Y90cSQEQudAAf34JgydsKlIAhqMMMWeGEApmxgishvnX0ZgtOCp8BgYUFPWPCKPU9KcNQEw8zDEKEocuqRhSMGNFoExxTSGm8YW9mwGAGIgxONkmdVMVVBId71PHQOJK42cIhhZ7akoxgI6NEAkJgYEYWRiBgIkIwzMAYjBB2QA54DdqOkQGDISkSGhJssp5WyaRzIdAKIoMBHRSLLOqghKlMwMMtJIs0ZiJBVRS0ZjjROYCiESIyKZjIp9JjQG0TxI3yB7F2oKnhERRiMWmWZmxBFOR0iII++ajETl4VjhD6Wih9MzRER0TGZK5AwNx7cORoiqRoymSsT00MhA9gfDGfCjXwlRx0yggcrDZ1kli6hBzvJDVcPDn00fZOCxPU1BxiI2JB45SC54Au9US8057tkKNkp9E7yn2YSQYhsxA6Kh2Ri9NgPngnEgj2ZCSK4IookgAxEZOzQzLUKJ7MyZmzJMkEgk95pHqI2DkXzwwJ0PiJQgOiJHHMJE2YBdn2IIVeWcOsu5J8TcxqquaVKnpCJa1ZUvHSIwUGBeLuf7zbqu625oNm/PXZKCsNu1se98YCo5NQMalL6Yl2zNGlKkmrapoYCWE5GXrKPpLCdDcWhgIIDALoArfFn6wMwExGDleL8spqHKZXPfB/TdfUy5O3vMz07mhYb7bWwSoiPssyrkYeeJ2TAjhaqs2J9fbCKF47MFWs+EBFpWXruuH+KgysETSNNsp/MZgg19Bwh3t9fTaTU5OvYhMBEghSLklGOKjiHGlBMgBEdMhoSeiQGjSJIhutoVpWfvODjnPD6wsggRmBjwQdlhct45QEQczXSKCAZKzMzeEBh5HMyOnYIgAiEhkJky8ThRjNyicYoBgIOjBZVsBU3mMwJ68/0rYlDv2WVGvNneffrJSbzbX52/7vu9QzddLCVC3O6O589XfvnJT0/+8z/+zfLJfLfvk8GubYuD6UHJ7frcc8AwmR7MyCmjS91+WA+ThD99+tO73fby5uXV5WVs2uefvZifLqrDqu/yzd399fn3bGlWzdl3yaxQ89T+6IsXzV7f3Z5jvZjU3pkNezleHsNQfv3o63/x9b9kS/eb7cHqQFauX6/RIK13MRJqjO1ufrzYt+vMOgkFsFVlDTe3weHp8ux0ftAN+5h6CmSYxCAEl1Oq5idczYfcEmhqezBSBTIDG5VuzJZFMkCs3BSxyCLKpGI5pe9f/m3qvvzjv3z2PjXHZ0/urnb1bPr//R/+zfJgspqvvA8v//6f/uXPviqCu3h3NXs2ffQI3nz/8u7+Fq7z8fF8slxIN3SbvW035ze3z5+sTp6W6NPznz46f3//6HD1+eEyZfnij150fX7/6sN+//7xX/38v/9v/9vZ5OTf/803//WPfnT56uL2zYe5f3JcnqR7uXx3fbKcSof1pH79zdts9Or8n7767J/9+Y+/HKxDRmV3eHCW880337x9+uzFdLZIkprNID0vq+Ptxa7uIrP79NPn7169u3q7nkzRF0GzHJ8c3Vxef/MPf3d0clROJ5OypMFnUDVh52cTHvouS1sRQ1VLhrPjw+9/9U1uOldO+u1QhODJ+TBZLSdD46LusuaYnEYVdkzECDa2AiEgoKdqiL0GWcwmOUrbrkMZ6jDNKcxnk/V2c3V9X1QlsN9vm1k9jUNc39z4upqGhWR15pp1k9TN5mXqBBS6dhhHQQhIVWmW95u1d9jn3nMxmy6nXObYmMeua6dh6jNrkv1+74Lv296B63a7nON+vZ8dHrFRe3n9eLJwxHEYDCGT7TZrVyDsdpYiTs2BULJAzontz98+O5q/3r3/4sd//PoXv1jkg8nxwexg+vjR6evfnAPDo6Pjm/ff8bAS5QjIKd71m1x1fd5S47iafHL8k/3tBRVKiIWbxkHWdx8A/HRaT6spip0dnV2fn//u1ZvBx6Pnx69fv00dwH16tHh89qRmizE17GR1PHcb0F6/+Oqz37z+7qq5fvbo6f72fjKvKTh/eHz99lenLlI5WfGLyun9xcWb7YdPPvvq+nxXVvLi2bNhc3s097q/vnx7Hh6fqFF5sLq8bRxvDsNKfdVHGXLbtJv9pj85otniiH1mLnzgzd0u9epC6FOkwQJ5j8zBhzL0bds025PTo2/evP2TH//ZL/7hl113P62rLGnbOvZTF2Z+vommqvn00bEQv3/74cmT433Ou+22nIWqqieLCi2cv/uQ0/lqNr29uVtMl/PT+fvXF0fVoksdBD2/vFqUYbveLA5myrOEcHpydnNxKak9Pnx6PdxhWefdfh3Xx4cnTuFFdfrqm19r182ms7bto+rB2UFlhSZFNQ98c3m3WEzXN2tRIUQXPCeWnJJG56iqa1DULAYqcSjYFbWPYG3TJM2zetJ0TeFCjtFSmq1O99u3Q9MKMAFHSbP5dHe/vrt5h6Ho+v5oMh36TgOXZVBTJBykw8wOYIIYGJ33VBIzQa+Q2n4DDsySbO6jC6zysDAamXqjD/zjMkUIUCSlFIch2gP2xUS1T7EdhvvdHpgk69APo9yqapDVIIFpFjGVlJOpmfOI6B2DscLY2zKMyaPgHToAgyQCaAA5aXauFIGUpe+7tm+HmAyH0c9PAFHG2l3w7Bkdm6mqiHARwCyhPHBKs4Ckdrs/PDgcK0ckadv2rizmqwMSuLm97jfdcnWQU8YsbdPMjlbtdotZprM5CTTNdrmabW7uU0oCKmoM1A95vb03QCBa1JVznggZoGRXMmYdYuwAyQCFWIC8Z0bMcTAwQUO0EEpAJgQizmaWo4kQGBEZMiiHMCOcIfiuubahU03jZlEkgWUzJUR0AYSSmg+TPOyH3OXYHbqvTKAfGsfqgxeFkrxjTDn13boqJ0OWpt+aQNv2mlVSKLwP89n+9ubO/Xbx+Cdgm6Hf9zm1QzZRybLfb1xRsZkjFnSpy6qQLRWFj7FzIDmhgYIRMqsM3bAn9gzAyOwdp+CCsxRVPx6SqioIoDlixIdTVjIDIKVxf4Um43GYAMOIg6AH35sBjnhafTjpxY9wLYOx++xhhf+QG8AH/jXaxx/zsBUfN8hErPZDzOehnlhQkdEXLCY1BxQIxsHBosMZtFvR62GYMu1VjRABhyiM4Ey8Qh1CFhkkx74rnCcDj0hIWWDddLf398vZJHe7pF0mffXuvJ4szlb1ycFqv9/lfstV2za3J0dHCKGTRODbfj+ZzKvy4PsPL1/fvX20mn7+9GxS5jplOay073RI+61gTyqEIICgyULWpscdwK5LQ9aJwgzstAge8L7pNeXpQb0sXdpvtvvomQ5mh9c3a0myODsOZXj97e8Ugyk++/rFwaI2MWSezJZ9n2tvraoIWdL54qgsKwBUhYODantztV+vXSgmi/nVxVVMdjxdbO/vJyVvb981u918vhDJDkliHmJXe5e1l2ZvJgTQxt5U+nZtgFT7Bt384PFXX/zk9bffbm27Kh2uZqEZrIkRo4Fq19ZsL46rg4VPmk3ZYuusMOKsFgrmotq1TVVwigksBRIk5KLE1Kb94CwQAHF+/vTR+lr6Ifad6i7fXV7v31yvp29DUZQHq9nhKiwWbjJT9l3fY8yY+3h3XaAvfQkc8l409XWYQzt4RR4FzS3Z1UvzLK6MVeU++7n79MewOFDP42WOxAi/hxN91IMe7Cg/GIDGzJj9oBQ9kLx+UJYMAT8y2T+KRh/ZKR+7/x42n2gfH8LRZKeGfxCBs99DjmarJRJwUY6eHlP1oQAEEyECY0bnCDBHyoYY0AzBwcdN7UdrFPushuQdu0kIobBZ5VYzX5boHTKoxn5od3noYhvzEAHQE+qoW4iQY2Ic6atjuSEgj+MTCDmUxkG9QhrAPPnK0FRNDX1RgS+gqLCakK+BA6ReJUMo1HlAIIggjcZONRGxmgKTmSiYKmURUQMDYmQGIkYmdB7YMShKhPxQpQVI5JjYAzvLEXLSZJIU0JARSJEZgNHRSHUZEciIJIYIaDmZCeVIjD4g9phTTElIvCYBAVMBJCAGdkgxsAXMAZ3kmJVQQdVkPNtRNaAUE3EaUsJuqEvvPTtmARjhNqMSqIDoCAUfarZUHSEXwYWKmEQSmCGYIyACley9nzhHmotQMwMRjNwcx4E8j+leQwNUsYwm+NF2Y8pjlxqBARqiaY4Sk6SUxxwWP+x00dCUBCkriYqYWMrsyDEDCrNHoCyKloGYPQNqYEJCBiZESUoOgc2MJSFIMskoPQ4N+BIl0ceAGz5MJGyoRBjcg2yCNO7oHgShH1Taj64iYgR88LgaMIcHMN7DUBw5TKM2iKaQx+GIo2NjNHHYmCYczWpmBqamJqoiWcYKO1E1swdlT0ZVSeVB2h01YLUHtBEZiCqapZggMJpS4YNNck7EisAYZNStFFjjFlQ1i2jiwiuQZk19RJcdeLOeDAmcCyGLMBsRkQACk3fC6BaBZz6TYEokwuxTBibvuY6pMzAXKgBUSBicc1VuWzICTAiKTBIcVyUF6brttJiYQtq3VSi9921zP6nLpt+uprOimrZDU3jY9/c/+uqf/7t//T9W3RTVXCnzoh5uB0LJBgZoCgB5Uvmbu3uJ5Wwylbxmco4Q2EkywIID5JTByLsCEZDUu4Icsy+RwXssQlGHWrQxk2qWoJoq+GHdINdxyNdXw2Tmzk7nodhfXu64DsHVd5t2NpmmTlIUrz4NtO+So7pJqdlvlgehrnwaEioGV7ZNavaxmrg4dOn+/GTxdHpwkK4upe9dGc7PPzwpi3I6dVWFfQeqPjhRZQJhVTMRdexFkkFizMH5DAqgjsEHZk9IxC48nJ6pjkAHZCZEYgcARmhmhIrkGBwAMDtAQwKzTBTMbGRvwUdJCYySKgMjykdpdGS6AwAoZEN49d3LxeEizMpi70nUTSYvPj347p/OZ5Oj9eVV23bMeTFdXr5/s5gst/drh9Mfff7Vm+++/c27cypsMkEgzpinvJxSqftdQOu6zaQiDJPdeofMd7frFyenf/KzP//X/+//F3F1d3M+OZ4sHh+8ef3qoD+cVFWoPCrEpi+KAEVRzapC5X5z632xWW/vGpwtPikgb3d3wYtKGHIugt3tvh2kzF2K2aWhx3pu3X1ti3G9KNotD+Ht979shghcLz/7RAYZOpEkdTEtl/W+36QhldUEvNN2T9459FzSwdOn4DqVvp5V5x8unAuM7J0j701VQRDZMJlJL3typJrbzVAwxAH+8s//1f1Fd/XdPQr+0/fft01zcFx//vWXk1m5vdmePn96e371/dt303K6vt3jqysLKpZXp/PlbPqLv//2ifmb82u/prOzZ5ttM3s02+36331Yr+aTPlabdbrf3zx9cRrYW5aT04PS0+Xl3WZ/cXD8qKrzt7/9NnXy9Z8+vr1dT44XRnC8Wm3v24t3aw/l+rYri4Oj+R+D8m7YIupsWr15c10sjp9/fXZxcTGt6ncv3x4fnA6b+/dvL59/+ijFVkXI8v3d9fJgnlLuh+18VmjM5+fvjk5PvS9e/fZVVW9mi8l0Ni1Ctdu1qFbUIVsetj0E5Zpzm3538R1N3LNnTy/fXXU3d6zDDm2Pu9PHBxcX/+jAtk1rxQySeRTvXYxokpNkEStKH4et86V5EtCyqgFDjNFQ9vv10cnjaq7D0F7ffTg9O4udddtdUVdDKyltizKU06mqW0zm//Dqdz+afXV4cry/3jr34Kdomk1VV3HIOQ4iWM+KuIsx7SRhFaYpS9/2rD7G3hGTZuvaKjjWMAwNluwKZ2nPzg0xVqdPd3fXfewfP3/xd9+fl+wC0GJafPvmu+fPvhJXff/dL6rjZ3XwPAwCvm+C7Iqz+fM4SN74VT2JN2rosjUXNx+WhwfteuvLw6qe9LtL7Uw7X4DToevjpsb52/cvD1ZzKiflrGYcalIfiq2224ub5WIOFTfDOXE7Y+amm/i8efebg+pJmzq93cZu3XctVuH2KvMOJn65z91ydeims+3V913fL04ez6exvX/lcrq9efXF4+dxkHYzHC8PTeOqmPfcSk674e7u9pLqKdlAlS+XHMU3a/XBPTpYtbtuLbiaH7Trq3JZnxw9++2v/vHz6aTvmhCqqpgh4eHxaXObesirxWR7v9/cNlEiFAcODkq5+/KTF4Rh6LuD1WGaFqGE3cu3bPTJZ8/ub26ranl28vzy9T9VLANaWZft3WZScTVbpqTSbIqDwVdlE6WN6ozue5NgWXeR9OpiawRhWlaz4ub67tPPPo2i+9S6HN6/+/arFz+5cZcI6fFy2fSJmB1TwXx3+6HCR0OnnqeT2uccKSNE6dqm5CrGAb0/OVlmlaGL5az2xEM/gEJRVN3QsedyUnT7qGplVcUcxbocxZd+Wrg2qVOy2Mccq9I7wr7ZG0Col9Y3oUIgV4WqhfvZpGAuY4ptsy+YLy7fn50cF94ZQz/EgiqvajKU7Eozx75tm2VVlFW5u9+HSZUNgTANiR4WRCCmKA/26xHECKJZcj/0KeUsJipji8gwDH1M+6Zv2w4IwDAOw+izhpEAIGQmMSUDGTcuOUVEJHM6LqDNwJAI6aEEiBXYQDXnrEnHBnmzru3bYUgpghqSFuyzUpdSHCIRgSmTI0TJKQR2vjIgMkXPTNjGLFlMhLlyYWIKILLvh5S6eloGch+u3g9tV08n9aK+urgsiGZ1Na2nd/fXZZhA1M3uHtBS6+IQVQZwLoRqvd3t2yGJFXXpkFKWLsbgcFoFsaFJ4hmDCwZIztVlZZqM3MglIHYjHhNcgYCGli1r1pGuY2hAlFP2xoLJoANoyNooXc6dCDhfGwUVIoeSxhWwQ8hgShxKV9+uP7S7OwUZ+tZNK3Y0DINagVz7st7vN7NQVrN629/ttpsilHFo7/YbV3DUoa7qt69+42ZnRNDvt6o6qevNehdC0bfabBp2E2IfhxhCGbiIEQDQh4DoJPWAaArMyEwEKHlQkVETdI4zF5jMcjLMJplICBGMDMARAZiaeCIDFLDx7PTj1sZMMT+suY0fCpUfjBJm+gMA+CNi4od82ZhYe0jfAKGZEI0+bgDAsd/jYZOF9HsTBwEBGYmagYl3DkVM3BgkmpbTucKt6Dn537VNIl6LEhMHjkOWjAjYp1gFN2RBwiH2RfA5ZUDynge1X7x+9+cvngxRX13ePjk9Cssn765uFV1rnVFBviSumqF5+ebVyWSBkMs5lFzcnL+dlOXzJ0/+6fW7717f3N+1U0rT0j9ZLZ/NJ5e3O+itT5KSjI3coBaH1CfXpARJ50SSJDgXNSOyIfG0WsfYrDfXl3ed8MHxs7eX1z1k8B4kX7+9NFNUOTk9mc0mqc+r40fJIPXtwdFye3X3/v3Lx0+fMBVU1mU9z72w991us765L0LNRej2+9vri+Ozp9vbe6dJLX54/dvZ9BhzstybJOMyp1gyW2q7buN8lUWGtimqqgrV9uZidTrpu9bq8vTHn2GBMdnlet3e7yshLwJMkOVoVf/FV4882s2+WXfd6tkTJMqpL2ar2FDslZCyDKnvPDrZ7hIlX9YFT3LmuqxdMVnfXHX7q5yMGcOkypgZrZ6VgajwrBqV0M1rXq6UarVc8YBpu7u8t/2QDTulViBQVeEqyW1ZTADIe++CV3HoCQWcV85d+t1vQZQ//TGuDqz0SsAIoPaAov6YsPwhZma/34r+IGPiAywFHsbGR6XIHtJrPzwNPgbQ4AGa+/A9Y5XlQyP4D/Y8RKKPT3rQnMK0JHbgHDESGMsYDKMsSITJMCGAmjoy9EQgAsYkgCAw1qkREoKomQoWhMUszCpfO5sVRJhQogxD3zTNvo1DtAwOxiQy5KjJZ+ecL5hcBb4ALgEZsgA4U1VjNSQA8h4MKCWQgUHBDI3IFVDUVtRUr9DXwAHBQJJpBvWg2RGSJpVoMjCImZiZAAAyWFLRhwZGdkRmpOgcERAjobJlExQxAELvwAcLZTZSMUxqfdT8ESGeDRiocECM7AzGCnlBYiAAYCCCoGBGEtkFi50BGxcC0SA6yAhZMwBhzkCGnqkouCwcpTF+BSlGUyVCGLF8YACaVS0LuRAFFCxnQWYAIkMFJWQmMjQgQGa1nMwA2HHtuCRLkhXU2DlyTgEQyRlgTnXFJhEUmQswZSCigOyBCdkZkamhCo8HSSM2kAGJiBkBTERMTMCyADAAIGEGAGQaIeIiZpYtxmwpR3bBGYiJGQYmAgZ0SI6cMx/QOyyDY3JkqOCLgKaSkoIZOlBHOQLEDGo5WowQooXM7EYEHgIgsEMDxpH4JeOsEiMg28Mx0x9IRaNChMg/KLD0MEQVkR/iaUCqAsA6Ol6RRmAeEpvZqBaZiRmYquRsBjlLzKJmoipZAUCyjGqeqoxhNwBmJH0oSjNTHXtos4hmIwA2kiGjKoXKqETICKqSmUPSASmE2hFT3G2GbYsKKhnYvPP80L/OAIVJMiBVRPII2ROwR/YOHTkXaBLQA8QhNr1TyKhM3lT7tAdAQlRIhJi0JTc1oxwJxbzziDQv67Yd7ob+4NExUZ9TBoN+6JH2s4OpiStqlPu0v7k+eP7ltm/JeyJ+//63X/306+32ThR9cOV0dnl/V7Lruh2bd+jBwWbXtM1wUB/kvjcgM0EAU2M0xCymkpXNMTnAPBI3izBB5zg4DAiOkbBwBTghIlf48oCI+W7TOyq7ddclMJDFpApneHm33zuaQ9FsIxN0Yk0vRV00+33ykGMfY71eo2JIKUnGPub5ohj6/vhs6Qvat+3dzfVkvppNp/dNm7PEFK9vbo6r2hUVE6cc2bvxf83MQ0wAigIhsJogEjpkdAgmJp4LX5SeCdmrJCIkYkQlZjMZOQ8KSkwP48uRiiJ9LKZkKH0lMtZoEhGh0chrGONm7Ag/yv2q4D5KRdf3t4/KAhVy1rv77uj0yfbivTXpzW/OiWmQ9Xq7CUUdo3Z3u6PV8xBmkxXlHn/13X8MzJkV0O3Xut0PMUtVV3kXm3j7J3/607fv3m2bmy623rmJK1ark7p8+v/8N/+OwP2zP/uz3f79v/ubvzlbPqpfHHJd585KLlxNjw++ur/ZXmx7HOLJ4dHxsy/j7f3dpr3qE8b+sGAymc5nNZf366unn5wsnp98/8vLdhtn9VHUdjmZOa62bV9OixRFcn95tbvb77IzhjCkWPBcs0mfHp2cqcuYsgUfTTTl2WKBigFLq5Sc9rt18LTfDgBMTDyGt9krJLXMwAiOfSkGogoiJBmYYrvZWn777vInqx/Vx8dv/vHV188evXv9q/rgyePnP+r75npz/vSLT3717VucTr48O7jd3K+39z/50ef/4T//+ng5/eLHz4Es7Ojw8eL7N7+7u72Pdvz08ZOFc48Xq8psNnPG3nv49tcvPejqcKKFPHry9P7vr9++uj46WU6qyXrd48S1F7mI8d35RQaczsvpweR20zz+9NHNZRsR1kO7G2IVXJvSlz96tN5sLi8iq1OBR4/O4jBMl4Wf8C5tiQCSHh7N97sdF3ryeHF+3r97//7w4GB5sNzd74ppPn1yuttuP3y4nSy6ejaflFVWae7WoSwGAQMsZiUVOMnL9cX+zcWbOoRmf/3i6xPscnu//+35b9iRKSxWy6aHaJlyRrAy1Og4t7tQE6AqUXZSlNV23XWaKw8G4AMvl/P7+3NfYHBw8uigadYMvgqlDKmqSvL25uV3p88/6SIeHp396LMXm+sbl+dFVVf1QwMamg59e3dzN51O9zGi8/u4bnfNydGjIUdP3k1KK7DZ7VaLk1lZrTd3kGhzfweYZ261x3i32y3ni+XJ6TbKdLkspLvaXD9+8ay/b28+vPPz4Ys/+flufZ+jw3LWF8WXP/vjt3/7y+Xx8l98/en68rIMMKmns9PP0/p2325vNpcUOqVi/unXkNc5C8TUDuvV4aOcg5A6tIlfbNfXs6N5m9vcsTXbw2nIvV61F49+9sXVprnY3NbTg9WXz6O83Fx/f/127atPbM93/bRYYtt+wApy3Ne8kgH362E29U8fP/rwy7/Gheyal5989vPv3/9tIXtfV48eP72Da5quZjPUPu72W3RV3mzc0LhJJdnK2UowE+PKzWIjSna8mHfbu+PF5E3OSdLf/ervv3j+6d2+xwqPnn36j7/75fH8dM7h4uKaQvVhN0wOjt79+pyQjDWlDJ7vtrdPlk9CnOVNX0l4f7n7/Mdf/9Pf/ScY4OzRUZeurzeXs7JC4Yv9dWYOURL7y77/+ZfP3r98D6V79sVnb3776vrDXTTatrHkYjmr5yendVEI8dNist9sZydL8Typp4fHT7qcytJjk548PVtftW8/vBHBThB6Y+TCedHYx/747PEQO3FiCFDMsBiYoVctpiVJfnF69u7qckgIYMliFjgIS1TIeVdVpUcygChDqHj56PD9+6vFYh6mxf52a1m884Wz9f1FVU/7lLo2ooMuRuemCrK537LXJy/O7i52j794/uu//Q9xv+Gi5lBPfGXMXewdlao0qSYq4gtX+UnsU9OkIjkC38W8zy2Ydvt10w3zgwkb7m6acRTknG2kQYMhYk7JQGMcurbNKmbqnUdCybrftff7zf1m03TdWOziyIlqyhGRHKNIIiJAAiAC+r1XSY3AxvCOc0QATACG6Er2BUnOsXMgljVKPwzdkDRlUbHgAyJWIUSUmHJVVCJCqKGs1AAGhKzeubquHECf1RDKShxTTtmgmE8nRXD73c4Qp9NZGYqb6/Om2cznR0Wo7++vg6N6WqPivl3nGIHKzW6NpN6F27ur3WYfiuDJ362bXbsLofKevPd5iG1KgQpmXzivNiBg6aeOKaXBEY37ENUsYMzsHRN7QzZAVRPJZkJjgw86QBRTZDTALCJ5K2kAGIbYx6ExY86IrnC+VDOBjKooyQxGCW+ISZElR48QUfuuCT4wOQMchljWU9Ld3fm748enh8vV7u6+i+2u2Q3DoDvoFY5WbjKpvvvt33s/T02XAdF5RTBRJMo59X1Tz2Zgfd9ZUVehrNXEciQU9oVIQgRRBQNg532RY7KURfo4DJJVRIjJVLJmNMExEf9wJG2ArA+GCkJEx6hoZmoPddYIgKCmZCaqhOM+d5R6EMlAiMhsPOO2UWt6yPE8QLcAEdWUkEfnEfHYpDP2Ho1dyQCjuwMZMKEKmDA5RIcITs0RTZVrouOYzyo5cvhN372UYR1jVblY+E7BFDzx0MVZWTh0CZJmGburSzBGa0ReX948nde7Zv3h5v7g8Ojxo0dXH94icTWp+9jerW+X5LZ3DbZYcKY+F/Ws0Nhutt22ezQrBkd32/ZOcpLu16/vAtqy9LMAllM2BXRJgABTxqla5V0jKZlSwX1WjbAxycHtTV/uetJ0VIeD1VHs23Wzm5wsZquD9e1te79GdmfPnzx5cra9Xz9+8aJpBmJeHTpr78+//81suSAfkrhpPRPRHBvV/m57XZRljikUPOxaU+qaHvo8nbt//Ie/IYDaOzHd7tZFYOdD4UvUlFI/9L0PBSoNnfjCTBWYt5uLyfx4u21d6cqD1Wc//qJ+/2pS8e0usiauK2JW5Dd38ep2HRDmLEM75AK5oBQjsc/7XekBLMX9sDp4dLveViE7RKvm7ELGpo/t4ug4+GLXGcT+9LTmSu8/XGQBns6rahJ7tQSWxSH1ncSu665fdefXu9tOe2QoyQBy9oZnKz1dnWQT531GMzUQYSEjdZ6Jxctaf/frvtnTFz/Cp59qwYBAZAg6UtXNAIlGj9FHPLt9FH1+z0+Bh2jZKCx9JKP8XmL6GCn7KJfCqKs+QJAeyLjwg5fPAD/WFcAfvAy5YA/7YxYTBdCRrsw0bq0z0UiMMUWRCEyIrIoEJOP7GQ/zkbJIEYpp4ecVF6wkfR763HXNet8NwxCTigbm8R0jExACOWXH5URcgWEKXKIKKmpKZoojLt5ABAA9s7fRoKtG7NEHKKdYzs1PgYlk0DSkbgfIhI6ZAE37JneDCWhOoBDToFSAkSWTOKgIoiApMrJjcIwIhBkNTRTBkSMgh94ZO0EWNUuJUjZ9kP0RWBHZeREjZDU2IHv4UkICRcNQKAVFQmMgMBhRQqRoYhFAARJhVjAzMmDvQh2g9hwStJJyJmSADIjIiqMJGMGcwxCIHaGJKagRAQCh6HjJPLDfPjbEk0NFw5iy6QA5qgwe1AUzSSKKUGoamIUBGUGSRFNiJCRJyTMRM7IHVwISaQZLppI/9qc6giyZ2BkSIhmCmJmIGgKAAmQb0y9ZJRFQEk0KMLK6FVkF2WXTMUpDPpB3rqy4dGUZnHNMCAagCXKCAS1nQtJMBOBGdjhk0sSSYDTtgLFznkfrGxgY8uivA1UAJgIbk2H/hVQ0TlQjoH78LcODGvowXFSF8KFsbYy/j7qtGcDDOZACmKmZiorlNPaTSPrIJ4KH7jtDQBqNMziG/TMSq4mOB02jEywJiFA2SwKa0TKaJmuQZl59TmgJyLNzlaKZ5ODZeSqLkPucu54rVgXpEqF3hGbeV4UCm6kJFEVhqhTILwuuA4WQSAkyaxoLPsdFnIJmE/aleziPyYUnBJOuZwFkNgYxg37wmaQb3D7VjnIaimqae0nDkIaCgk+9BCsIHCJxMXPBSOqbNx+efXUCsxJAuvUguKsLJvIBl/d3bSSZTZdt11rEYsoCUbIAuZyTooWiyqQ+VHG/YZyICjsickSe2XFBfl74qgikmKyuKyuIKRbWQ+UcVz2gKEDyzS6LdCcn02pRnwSzq3vvi4p5AxkcZa9d7hNyMQlYm2hKSVNnMan3U5IuSUoxxqblgrTRdt+o4sFyXk9nbd94D81mPamnla8adlGAHCLCkAcmp2CgajkSFwaAaEPuGQnBkmiJNREyM7BDBASljxlJdoEdIZFn54DNFEEdIaAHUMcOUIGAmYlA1UYqNjtGcAhsiAxmltl7FTFTZv6hIPnTT5+iYV/g7eUlGGco1B9L2xVi9bx68tnk4mW6XUvvp198+aObV6/b27WG5Eu32d2++Pwr9tXSh+GuyWqzRei395Oi+uyL03dXr7pMFuZ3m81ssmr2+4ODp9+9f5XD/n/7v/tX/9O//ffROGY9f3unpT39vKwX5fb2ForS+9XJajpA+ubtP5Whni0Opssjwv2Lx0fXVzdDc1M7rpH6dpt9OL+5ubi+LXQxKevV6cnFxXfGMtFws8+fnn5STvXlr17lHNF7HwqLgDlPDsrNpi8mRZaIJN4VOeehb7x3YjlAcbvbLFYrJ9YNkqX/7s1vPnn2YyJP5kAZhMfzbhjTzzkSurjvmbGuqvXmFjxeb2//5H/952++f/nibPHVz59Rv62WB9CHf/8//N2k8PUE0xY++/pZ18Y29qvlbL5YdGv98vGXCHRzeT9deBFf+uWL5+XTg8c353cX7TU5t7kYBOPzZ6eEVJKspgUOQxqG//A3vzAo/+TP/uL/8n/9v//v/w//zT/+x793xfRoGVDJOjs+O3x1fk6U2Otmvd232Aydr4tJKMKCvcfNZj+0fenLEkvwaXO7nU6nzqMLLojb7joA1Sh319tqUgx9DHU4ffyovC1uLm/CyXI2r4fcYxkOzlbzg9XN1e3l+4uzk8PFckZcaNLFUbXbNeu392Z89vTJ6qhY317sm50Bvj//jlx1f3VbT4iAm506LCcVs8VZWVtK9/vbMkxOz07uduelK4tydrW97Zum8F6yOB/u1uuJLgI7dGje77e3rnCOwtBIPa2qovxwdVNOuCinl+8+nD17/v77l8fPjh3J0PRYUFE8ELuAFR0cHM9U8TistvthEAgJdutLzwHdNA5dFh+HVLR7C+wKv9/1i9Wq79u7q4smZSCn5IoqPj5YQIq7dWJiRUi7ATg4B5Oiurj5tqxmhRaFKz+8Pc8U6uVis913PZShyq5YVry/2ommp8fH6ob357eXr3634NIDfPr4RzTp+q6/ef/BK2C5+OTRweXbb6vZ4dl09p9+8f3jw0+OJoc3jRyF6dK7O4/DnrHlu6vv7obXq5nXxGc/+2/yL39d0qwhe3v77U9+9l9D/XmOPhyEy83fD/1m/er1+urvfnz8l8flkwj3zebN0eMX6042+0zh5Ldvz08eH/3q+/90uDr0s+LRZ4/v9vero0Uxq/fN9u7D5b/8V3+1ubra79pBbYBdWeq7D2/3Mff9ULmw2/Wr+ezd+euvf/zHRzF069vTuo64lUp++au//j//H/9PRvD9y9/Nj2YGxM5dr+8fH7vF8nAxOXh3uaOq+OXvXtr0lOezk2l8d79Wz8KurotmvTk4XdJ6+NUvX5939/OCjg8XeYgXH95ImedVfX23X50cxyFOjo4/+fGLV7/6hedqsxkWRwdaFp89+nz94Xo2PwXbllwvS7g7v0WemNF0Nb+9X08noQCazlbJcL/eH0ynKW4PF/UwpM16U9fLru23zX46mRDy+d09u0KzOV8o8vzwkLoeHNanZ+Cs6wcYKCn0JL6LXJT7ppw+C78AAQAASURBVBuGYYjD4WK5295NJ1OzeTEp2uuGgIKv+kqQcr/drs7Obj6cv/zVb1Ghi2cnn3x+c3HbNHFa6dX2zk+mk9nq5fvfPD19vGnvyjJ4X3YWgw/COj87Pr943+rQd7GqS7M2V1KfvdjcdavVIcAvAKDZ78fdwnjopWYiOeeUcs45jdCYmEVibrtmu940zT6rpDgQQhrviTYm8YGIVHTchRi5bJmJmP3Iq0dGIxjzAoiuqiaunBE7UDXTLNFE1LIZIpJ3jJ4ImAiS5pR7hAxgRVXUZVDFIWXnCjcp6qIERTKpSQVMwFIWNVvM65JRc+xjP5nUwftd01zdXDx5/BSg6GNjQ5pUZeHrtt1jigyQYzupS4HsULdZirLe9/nu6rKPuSqDKOY0aE7Bu7Lwk0lYLqYAwgZ1mKTYx2TMbuzrQAR2HtBxCMheDEANFFQSgHlEU5McBQ3YGwg7j0CSIWbTnMxSSgbmQVFEMQ8IzCE4V8SYPIMPZcpkoKFwviv6pmGErOJ96Jqdc2U5maSY3LycrQ73u/V2fc/VtF4effjwHtkpDmLYDsO2uVeoyIX5wapN2dhJykReUmQk9phSJzkwQx7aIgQoAvmg6ECzSkQAIgZEVVTL415ENKGJpE6S5KE1FQQEEUQCIFFhYkSCh35i+8EohAAPfAhANUQgsRHwATYu6wEN0EAQANVGLCMhGRiOcKLR7YY4CkOICKBEo3v9oWZnNG2DmYo+lH/AuKLPSoaEGIKoEZJTJykV6FjNBa4Lrbr26Gi1bEN9v3kbB/B8Z5ByRjNGB4VLosZqjEykooRj9oNE9MPtZuWraTW92613+2YznZzMqv39ZhLcrKo+7OOm3WkL+/3+ZDqZxOjbzf2mud3Ftdj9kAaBKOhKNygIMmapGp3s9XHNB86B0i4nFKuQAiQCSQ5Stl60VUvObVUQYGC8E0vb/CdFqIdu192fHj3iJP37V/2uC8QHJ49OTpZ9v6+nFSA32+7Fly9Q2l//p/+o5I7nJ3HIYbYsqjL37X57MZ9WwXNKojBM6um7lx+GoVtAz2549eb7q7t3n5x9zjkOXT+0m0V5hjGVVSlpSF1rOVuWlJMaxNixSrfbab6YEHBP+/tOzeW+nS1mVcnV9TonebdP65QvO+kHrQSOA9eHpXeQ0zD3U9UoqXfsTPph38iQ9gGMcTEpL6/vqunRdDm7urnwJS0PjurpChPm/dWHt78pQM4Olrt16q/3d9vtZOqr4Devv9tdvp8/+sRVB/u9XV3uJJINKGkIRVG4wmEF6tnAmzLE0a5AXCAKGlrqTJHYcNOzqqG6cqYnJ+ofZCE0AQMkBjX8WP33cDl+5FKPkicqfsRX2w+XK36kYz/IRfbRUveQMrM/eAThh66/0VuEH3Woj//Gj6IIWSSZycgTIxKwmDMAkOcEikwAAMwIPOpGhiRiI713fAmNAn0qAWaTes4WIJoMw9A3m92w67qmyzkbIjsGAnRY1qXzjsvaT+ZcljpbWVGbqwiZpYccDQAYIWdTscSGjl3JzDmZGQOysudiCtUC3ATQYU46NDlFTQkZUA1VzLLFXlOUFDVFBRTgnLMoQzLMgIaASMyGwr6AULIlBjXJyAzkyLFRAcwACFlwSNoPlpOIGjAQKpABm+EoEZmIWQSR8Z5PLozWRSRFy5ajDR3EqH2fY9KcUDVLBIsGCYiQwXlG4NrTzFkBompZWRGJwbIggidnJkzsg5tPpux8FwdNYqiqAhKJvJkBOUQiYoOkKgoZLAOIJhpiJM3skL1DtoBaI/rcVgVXbIwRDJkdkqFlNDJQTYaQGBKxKtXAbqQdIiiwAyYlNCZFBhkPirIrajGFlE2SSATCZIBqDJwMk3IUNGQAUkFvBWNwzC5pYGQkdj54H7wrnUPvgJwRgZYgybMbmctOnKB6j4WYYELrKXkYeiBH3oEKECEqwFimKYoPpZijvuWYlP5LVhF8jDHjePyFD+hrGpNlSGOq2R4inuMUIoD0A1tMDQBA1DSr5CxZTS2lnNT0wVKEaCBZGcCQck4fkTFgpiPEW3KCJBAVo2mfKBmoyTAQKzuULMyDxAFFSciyAhl68hyQSdEbBR86LSCnaKblJFii1GViDwFMgMg58t45X1CxcFY5qgowxH6Q/WDdgKLMXsVElNiB8wpj+gnNDJTSINYJAjOqL31OqP0AyVh8c7fzU0OC1A9FxTlr7Adfe8mSu6FABzGzULdulqt52myuLz4sP3+87WCIawCbF7Nd31YhpFIyW04xDtFXYbCuix0QIZEjL2Zk4snFIaNjM2eQvSuYiIFJwSR5Py2roirY5+xIBbXwIfjZzX0rOc+X5d3tnkuHQn0v72+aafSPTk6Pkuy2OyzQlliCwwawNZC6z7koQ8p9wZwh5hzJ96EKcWiHPskgnn1KUY1Fe0euLAL1HaINbbu5vjs8PSMiRJQ86t6FgRIBiJhi33WOPTrUnIIPRfDMDkQ1JXJOJHnnwCjL4NAZqCPPRED0cZZQx6ymTOwpjB7Vj4E1DM6NoDJCpI9fUwUc2V1myDTSQ8chEPddGpKoffWjT//2P/897KqiPD599Fi3N92we/nrG0iWh77ws+3lRXP7ATyCz1yWj+Yrv8/lkGPX3N1fD2mYHp0ePjm5+XCz3ufDJ1/cffv9clqTayUO95ubn3zxtY/DLq7+7j/93axYKJTzP5797vuXKYXNetCape0Ky9tuH/Jkcbj6yemL3dBdn7+bM03DzBPQYslV4UgcVylup4fz48Xid99cTI6ObzdvNq//09Pli8PpwbDZrSaz7flNs7se9nvyfLt+M5svluWpU4udaDTvmQvcN01mTTFJasswr+p66FLy9vlP/vT2/NW6uTw6nh8sTuOQQ10ULiA5A0UmZGZ2CDp0EYlFTFLummazWV+tb6Pm/bBduqXdx3ffnNdl/vLHL95/f1FOJrFvj1ari8s7LMu6Ki/vd8+fPDVzCDKbFe1uOFhM5suyouLizfVsXnp2R2cny4MgoPs9nBwfrW82m80+ph7J+WlwE//P/+znw+Xm/XTyv/yv/qK5P18tJjnD61dvPv/qs83NLg0SJGwveonx0eODgni5LEh1c3U3mU+QaDmpiWi/bxErQpxOps0mTmYOIDuiWV2Z5qZvTRGAHHG/2U0Wy6PVAWXc73b7Np08WkjWbt9Vs9nJ6en5+/Pry4um2R2eHNSz8ubyajpd1lxfX91dvTtHxHpW0srvts3mvtG0zX2Xgk8p5Wxd3yFrXXC739VVXYWi2+3XaJJyI3uZ4Ww2vbu/x8xJNDBOptXQ9tkG0JwlTcp66GJdzYXbzWZzdLjywUtM7a6/vrzISZ9/8uz81feT1WEqHZIKPATQdvu7k9ODOCgz3Ny+qg9PV8vl5vZycXCWsrTNWnJb8JLJ3l+8XZ4ezCerMhozIaSmvc9GRTk5Wi3ZXOq6UNB8OTN1TWpnizkVMMDdfttMJyc/+9N/9a//h/9bvN17Z2F5+ury/vryw/H8MGMhbfurb/6hVJxVRex1dnA4L3rd37nlckj5H375b8FTCJyzRNHV4fR3r37db9YHRv39sKzrLsnl/Q07y113//oquHBwsBxESdjL/PTZ15vf/sO789vHRbl/fzF9evrP//ifvTn/TREr593Vu+bZ05P3r17Vs6P6Z39228qbm+2Jrp5++nPsdh7oL37+o+9+933XbW+vdo9PHznyw27z+uWr48dngHx7ebvrhqKqL277X3/3zV/+yV+td7dJwYXKO7o5f/X5108vXn/ou/VQ4vPPPr378Dbdy9NHp++v351fvz95fHpQL/763//bn371F68jp13yBiby9OzMTIZO3158GGxTz6eb97vTk8/rOrz8/rcvnj1q+2F6uAB2V5u9+WJ7cW0lkJb7TsoQJ77e7lq/qBeHq3fn9/OjyWy2WFWz/fv1HBZtTOWsOjicXDfbuN0uy8nZ4vHRyePzi5dk9uTZi9fvN6Xzu+12Oa+enB1fvL3wlDVBPas27WZo+6KiYciz6cxEn588udrcFKVLErftzZOTx7E3r2WXy5mfdkMcU+vCto+7VbEqJn67a9LepWZ4dHJ8nm+NBnV29smj87cX7JySgXfIbmASYouWhUWgqGeThf/w8t16f7E8qpHcwckSLQqgIivh8cFxr3l5fNi3TSeazcpZAd69ai7a2O0pPvv86d351adfPNGeb++bqpoNH+eCj5AXFM1mmFW7tiOGlLOqsXOD2DDEOAzt0O37LkkWFQPMoszjWRqDqQAwESOrZiAcm+tNVQ2NzJgcFWhAwN57Zs/kHKJZjkNvKgCYTCWJATgXHLCBoCESpgzE5INjxxwK5ygPCVTrssKycMDeEQqEuvJFue3bthuKsijKQk1UsCwLx7zbbZuunc3nRVXdXNwCSll6IOi7tmuaxbQui3poW5U09F0ydQhRZL/dgUpZ+klVxJwdgndcBF/W5XQ6jX0ENkDbdzuT5MlXVekKnyWzD0YFcVByBqgqpAaaNfemWUwNQNWQBNSAQMYFJajzNTJJ9iqCyoAkOefUVyyInl0ofDA1I/NFKWLO89JXfa9AkkRyFwOHvu99uzd1Q9+vDo7ur6+bbrOopr6qUkrezCOrSBpiAwCGTPquf1PW0wygCuh8QVXs9jll7wpQlZQk5RQ7zx7ZMwciFjDTnHJ0Y8Eqljkncp41aFLng+YWTESiiqEZOEIzIlQTRKOPGF/8mB/DsejsYzwGABgJPqJixQCBVBVs9G+AYh4DOwjmmcZyJxuX2Q+77gcozJjYIWIFVdWx8Abph5034IjOHmM5xAYGhAAWglfLIPzAIUFIErmeTNn9+v52bTorWKqwbtpOZWM0ECvhMOSZd6X3WcQhiBo4AqI397d/9ZPPSHXfD00fL1PWKEPWs8cny/n0su2b5BqRm/12WTlF8J434C5i2iYSwwSkPcSUHaIz6I335kR4D7pgcmJOrSJ0+FDYzN5FB71IQmo5JNC9mJDzSnWHKt3ptC66rr3fZcgO65PnT/xkmnNmhKqq9ve7s08/oxzffvfrzd3dyfMvhmbwYV5VpeW+3d5UwUFM3aZXo4ODeXe7uX3//vDxI0jd5uL88vX3EzepikWz20vIZQgKlLOVhJvNpggETJql69ZMpaRsWYpQXNy8zTs/O3iKQ8+GzpIR9k0K5mZFJbftbRM7dEjkAaXrnJpnNIM0DH46CSH0bSsyDP3QbocMnEWLkxPm2HZNyhlzuHv1pnQ4O32m6qr6YMCldOurbpfE1c6VEz777JNQhc3m4urufHW6cvXB8ddfNPfd7vY+xVjNw27fgZWHE1+E0XGZkYiCB0AzAR1RDyRqecjsHDV7ef2qhzLwn9PJSXaEkOhBMrIHNwI+eOE+uorgDxjXY8eSfdSAfh87+4OO74f82h88+pHx9YMwBA8+z4fCpx9k0o8vREBmURSyJCAyBgUQEQACxFAGF4IZRgMxATBQJUCH5kcLhoKlZH2ybigQa+9KFo059t2wa5p9H4cohuQLdFSUPgSupmU1m7piYr7kMAVyKVTqKyxqRtAB2XcWA1oGNpWsaSBgGvOkFIzR0GMZoKjVV8AeU5LYS9+nOBAhAUrO2itqtD6aqAzZDJNIyqLA2QzFHKLKWHqPPpTkgrEnHENJCOyAHPrC0AEgpAFShEE0JlHICkDjH0nBjMcUjDoZ70SmgEyuNAyIbCmCDqAqsQHJmpLEQaOIZkmDagJAxAAg7FhU2fGk9sus86RrgWimBsY8dmepGREimjMtURlUTHvJ9hFqDoaeHJow2IiDGovbPDwgj5DAMzGCZ/RoYLGgYl64WYF1acGxc+DLAggJGEDG/jxTVc2QOwMw9KCA5JAJXaHEQIbkABAdmIojbwhmys5DJk2QNINYFCXDpJCMoxgQCZARcgZH6Fg9mxmaAMkPOC8DUCAEZHCM5AzAM9FgGdWVhQqSQVZUEBq9TjmiY1MVAUA1BGRAMxEVQyR6aB1EQOD/QioapyR4kIfGVKeOKu04XY3lZwaQs4w4pLGpcxR6fvg9SZacJEcZadZJRETNQEVHDrapKY6+P1Ide0DAEC1lIqKs0kUcBKJCL5DAUiYBSUlGX4nv2BKisDrNBCAUUJxRwc4XVhQwm6LOhzhI36sIKpWGxg4RGAMrUBQH6BfOrYIGVgVqM0cBGVN0IyoM2AX2VSYi0MIIzMUMkhWUEIkLR5wVTXOGBJ5IwHICj5VB6tuhKgu11Gx28/Jsspgm7WPTbW8vZi8+ub/pZ5OV9+fbqw/Tw8nq7CD3u7RpAkOMfVYNFZkZKkDM9eGCA3dNNB6bx0FzRgemQC6YavCcVB5CjiBjq4jGDEM3O1h6xx656TrnQhGm05KHuAEH5bzsd0PhwzhQ7u6k3d0tF/Xh0URt06e2MDicVfMVX1O/vtftLqtjqmtjQY+qkRFANeakzs0Wc764Qkh50Ka1ojooq0psSARDu4vd1Dl0DrNC7pJzPMQUHGdVBBYBMQvsTBUc6rjGRwZkQHYjnhuJ0BGRAqqKM0BkBUVE7wtAc96ZGBADPviJiJ0B0rjoETYjVR0zmMSk9qC8GSgBjkE2ALi7XU/rxeX528m0fPbkeTmZfXh/Pj9ZXA5tu9+tVica9JPTlXfVq2++K1zRJZisCifZI+zurrfbrZ+UoZ7Py9C2zfa2kzbvb26ODp9WHgB0Mi2u35//8z//yc3771++uj0+edztoa7IrGeXFvNarDDB4JzOplUJZLJbbydQN822nFXVweruzev6sJT+uuvk9ODo8t1lzFA4kN3N9zfvN2t79Ij+F3/13719+U/Y21//w7//F1//5f46Fi622/18Xm3b/WQyWc6WE1ebcuGLzLDvh9a2UZOYmmBVTobUZ0iOQxH0t9/8+wBuPgkX76+z2WK2cmx1XQzJiMiMDCU438cYXOl80fbD8vDo3cvfXF2+4RmvwN98++4qnn/+yR+/OHh+fffqu19/s9lef/HJH23T0A3r4+ODzfUVVBOOsj7/0DVxMp2go5jBl0zbIcZ8tFoA+dsPd+IzVkUIVVkWXRzQ6PDsqKxcUnn93fdnjw8mJVbz8t03r4XzZz89vr3/tqpn7ca6Zi+Ybjdr0azilsv5/b51XM3qIsZuPq8wUN9FRFcVXqPd39wdP154c9MZoWUDbLctew4l4ayKUVLX13WtXLSbXZhUy9ODsOW769t+15b1rJ4UI67u9MnRfusv31yh6upgtTw4aja9mV+uVuvb67YZ/GGdpGGHl1d3i6ogjBdvP9SzpfOFWFKAIelscajJ2MFkPuGiCPV8d3P//s320WenlS/FkmW73W6n8+U2rY/qWUnlh6v3i9Uq+LLZ7VKGOOSr26vZyfHlm7dlUUyWy7fvLnqJh/O5pu76cjdZHh8/PxlHQShcOwzSRxOYlctdr6zbysdmdx3VeQSV5vTw+PLyll2/ve7TbjspS1LXrq9W0yLMqu39uvZD16yvOj+dVxIjYjg8XV1dXDT77Zu77z999HzpDy7ev19Uq5Mnx/vu8pffvFo9PakeP7p8/f5s/mXli9Li7YcPtSulzx/e39B0vrm49kyr1Xy/WwNNHBeRGzascF9PFx9uIuZJKCcvvvz6l7/4xa6cT4oAnXVdPj06lf2+79ahWh0Ws/XNdtNcVPvfmD94/OJJb/q7b75Znj6awuTk0eofv///RQszq+Lt+tmXf/XdzXdfPP0ySeMof/fyf/7ys5/dXf1qUeLxau64jFIwhfcfdsflMSBfXt8tF0eOhk9/8mjX7U4OFje7t0bFwcFxurtprrY/efrFMHSfffbJy198t7bYDM1PPvv6Yndnbtbn16tHh22zPzo9+fbvf3F2cPLFl8/v2+u4SUdnJ9+/f7uazg3YOae2++rTn16dX+f23Wz2JIivw2Q3XPe0u7zbhpnu451fhf/uv/8Xf/2b73zTs9E+Aa8mFli4+KOf/bOEPCvq4Ke7q/3Z0z++2l2mbvvi6cntN7+qi+rw6cH3N9+dnj1ZLZaFs8D+8y8edfuGMqniZtvNpisQ7vY7P5827f2kns8WsyFdiWTIe8WiKvy0rJrow2oWRQvHPmrbdpEhUGmMl+fvz54+erp83A6tQPzkq+cX372ZFC5JO1tNl6vq+vzOlb4IkzxENvfs8bPXH87r+WRa15ub28nps+bm5nhe7m7f+spnYSiC8xRcfbisvv326vRg2bXDYMmzixKnB7MY5fBw1neUyZfTYr2PRT3ZK/DkIBsDcTk7eP71j9rt+3EUSDY1JSJibwAYh7Iu+qaNQ0w5E0X2ZEZDSk3fZ0k5RQPLImiKQMjE/3+q/qvJsi270sSmWGttdaRrD3HjxlWZNzORqEIB7KJVN2ltRrL50P1GPvE30owPVMamdVk32awqoCAKSGTm1SE9XB695VpzTj5sjwuUhXmYm0fE8XD3s/bZc8wxvkEISGNZCaACIQI6RnscSwSURIVIcYSJAiChpr5LCUAkdppULI24awMEE9CkquP2ro+DI8x8lpVT/ehG8cErAjnOs6zIS0hD29Z1t40x5ZnPi9I5JnLb7RaoHNqua7rMZ7P50fX1lXZSlWUCjSKpOTgiQBBJauocZt57gs3Ddr09oCMwVIB2aNnh0Xw69ApEJqmrWxINwYupmnjnXVYY+V4szyt2AV1AQEuShloljX770Y8++gOIvAi4wIxZTDGlQTQRmBiqsIhqBOcR2TnOVa3vurxwPuRd7Mk0MJMZEhE7cJZlTrU71LtJMQPArmlDlu0f3p+cH3/6y1//8Xf/sV7vl+eXy8Vyt35AI4vDkFoYEhrnueuaOyPvq5wQkho6F/KiOWxNFYmJnQx9f9g67znkhoSOHWUSjc0eK5mACcFkQMSxawVzAwAcuqHr0EzSwGSEjzBQxJ8zN4/33mCGH7eqo4wJpgA6GiPYDMHoEdSLj64LGNNnIACq8vhUBP4Z86uP4GC0x2qi8X2jRyr2R9ALISMjAMhAqmA/x3zQIyuRqMWYXCgRg++6gv3U0Ye6frOqXUFQFQ3Auz6+6qRWIceiOqSYZ36QmFQ9BiF9MPmfvv3jV6dLc7baHYbCc3APD+s32+352fxkfjzElETrponIdYypjXGISUxbJYXckAFKx3OEOfHEsTOYO+eSuphyn4EDUGPQ3Lsk0cTEgByrWS+miaKAzxwwF0bWyhDrgTeiMD0578o5ZtmAeDI7AY136/2Lr345W06vf/zd9v5msTj2yCI4Wx57nw/7TWr27GhXt0PXLxdLl+rvf/pHxhTkUD/sP7x/PbTt+ZOldKv9frs8WQaXd12bTaqu7SQmLAoyyn3RbofMF2mIQ+qzMg9ufthuGPK+e0By5POiLG1SDU2cePp0VqYYb1VcFdJhyCvOS98Pnc+qoR+qGXVmHiwJMJIqdLWhK8ym/XAf4r7I2M+PHu5Wq3cbbXBx/qQ6Plr88hd//x/+XSHKOfkiT7m8kw4szI6ft7c3h9ub3E/9yadf/K/+1R//w++bn94eYo8MyaLPfV4WSujYA3kiGknUqomMiBGRNUZQIVQCgfc/asZ5/l/AZALu0SMHpmNPn6kCApj+M5fPaDSyjxVmH4Nn/7wr7ZFR/RHQbooftaSfhaePicyPs++oK6nBeMHFnx8LsjyDXmPXaRrMeUNHyAysaIDk2XtkBQbiXiMAmfVomDExERkmsKgAUbxYjhjMSFPq+njoUxvTIAaOcx/ykJchn1T5bIF54fNSKZgLEYjNlD34nDgjMHDeYgbkNKkpEjAYYuxMVZWNvAKAy1wxU1+YK3BU5eOQYjQRQ5dEEJIlg6HHmCAmBDIFTaZp7OjyY/RJVJmQmZgRHRuPQ5MDZvS5ca7oUA1SpLER1HQURdRIFQBYJQIkZODR2QoOwJADUAaQmwKKSKql75KkNLQSRy0jJQFRUQGDgGAGCcEQhF2u1mXOFrmdl9aJ9CJxvBoiObYoSoBmJkm2h10ImaiN8oeaIQBBAgBAG6srANQzD2lgNEZSZO+cQ0LQPHDmaBJwkbmZs2lOhSfvXcgYCDA4gNEE5pgQ0NAxKJBEQyHyamrKIANSoYAIZKN04rySAjMDAie0SNFQwUTFaFCOwkIhMZrzCVGZnPPKzlxQcojOkJxxPyhiCvw4HiuZ6EiacgSKlJCSETFllobAHpxTBBlHZcMxbCyi+pi5NAQGVSIyBOIxmPmfS0Wj44gfyxQE8TE1rQAjxFvNHs8VfNRu1ZBovHExG/FEktRENYpITClJTAIAJmJiCCj60QT7uHlTG6UjQBTSRqCP0EXtE/RqUS2axQSGJjIWwg11S5yIzUCRSIERCRVliOSSyx0wY5ZnAaGKCCpJCGQkVzFn0A+c1DFhhuZYDYJjgxj7mLqIY2k6ERMpc0ICSKCi4CQmiMQYIgA4AU9mNrStNIbiTI2IkljdxLJ07ICIM5+1fYx9PGRUTgsaura+m/MlTYv9fj+7OBfAZnWNFPPFZLfdDSJFkcfBFGBSFl3XTyYTU+m6qMkUdEgKiiHkMUVAI5Qiq1SSo/F7M26ciDHTniJKbLRYeB/csso1JgE7uVj00O4PXcbUAfRDBMdZmdshHZphiLIv87OjXzt6tdrUJgqIkykjVYrhEGnXoEiHMHRdy0SSJCh1Rj4LRVXEJClKTH3TtjmxpRHQCUmjCyQpmSkidH2PAM4FI0tJRRWMRIgQUhxMFQwKMzOLmnLPhiYmxDh6hZwjZh5fBpiZmQGUENl7U3EhOOJoEZkRcKzoJfZEhGZq6tmrjbWCTISIrEk+hiyh6aScZC8/+019uLdeusSg8Ps/fPf0+Wez+blGt2lWBfD2djWIes+QT2bHZ/vrHw/denG2qNUno7LIc5K2b8n45OKYJO63D/OqNJWcq+mLTx5erc252ZMj8hgmleTuj6+/e3l2sjw/Acbk4r5eq9QlVwGpCsXQiq/KTtPUT8py0g/7mCJz+f1P35dFuWofIPUXJ59Ws2PD7rtX/9PN+q9fnD/pYyqPYN1v1OWDA8z9vmsIcVZOHbt9u7s8+SQK9En2fZO7PhQFASbTvMwhmSQxEJZ02NyV02XuQj6ZHB0vJDGY6+Ng5MkTAfXRBlU164dulmeifddur95/6/J0d/fQPjTL5WWYX9r8pOjS6tVmlsN0sXj79i0ZD1HZpbMnJ/vtRhEsqDPq0n51teEQlsfL7aBDTFk55L7wE/K+YEe7/UH6Q07B+bwMLjIi4ItPn2dluPuwvvj6yYny1Q/ffvv7Px6fLShDdLjZbMpJYTbklT/UuzoF78Lt7cOTJ0fsOcucJLXgh6iH2PuCY9LVZuswq6oydurIZVlGJATgPQ19TDGt+nU1KX0Ifdu6vCiPpoZD3zV9I8vZ8bSaNm0zDCmbVIvzWX3YC8ppuCyrsm56F6iaVYqp7Zq66c4vTo9PBO2w3wwu5KKmIs6XTVN3SUQaJu99IKbduslyKhfzFg4/vHk1KULOuJgd3f74NifWvrmt2+PFkcvzQ12DYOAqL3nbtW0HstuLuc1hyI+Ous3+5mE/HOqjyzMF7Ou73frxFOSFz8py23YMnNAmsyl1vfmw220vP/3q7v0HAlyv1/v+IXn/sGlOK7/ebs+W022zMQeBE5hcv38t5s/Pnx7auFrtnr94CZk8bG4AjKfF09/89v5vvt9t7o4vlvsUX99sl8uTh6v1xLvfvPjqi9/+2U+vf3r16tsyd2fnp5vV9uHm6unz56qhHQ6yOTT9/uJkOVj87Otn6x9v9/fvy8XF8dOTLIT6sH9/9Q1S4ygLk2qeu29++nGS59ubPcz5t//ln/+nf/v/8mAuhNOzor0drrvtZL48ffaLwHmC4e3hzvIcHV9cLrTv1jevT2au3ry+HVa/ePJlNb/cxZzB6t325fJ8OKSh6ynYs89fOKO7Zn/5/FM3DIHjh6s/fv/6+6fnnx/qoRUt8t4GRQjrut9an/cpnB6Zc21KP354M51O/+Z3f8fTYeZm9x92L//k4t/8b//391fv1rfrclEm8x9uHkJZ3a7bKuT/xW9/ffvv717/8J7Q2piuVxvJqI7ttqtzym8PD+dcUijPXnz+9vpAvnz62fHTi/P/9A8/npxOzbjfdn5hu80Nu+e1Dp//6Z9s1wfgcjjcru52Z5NLC8Vt18NkJuY0meOyS0AMgSrncFPvKaqpUfDlbEaeaTKTOJimoixJ6ej0edP0oNzU7d2+W5wfg8Pt/XoC7pOvf/Puw0/zarLfbs+ePUlGu7438e2eF6c0mU3z4HaHtm6jaj+ZLfpWMPjjs6PNer9vGkZZOtps9p7h6LRoVpwAaHJ09Cmvdod3fTw7P213veXp699+/nd/+eqzL78+aAelm51OhroHg2o5VadRgOb+9NMTATy/uLx7ffvlL16s7/ffvbsHR/s2/TyQsGNVU1EANAUzG4a43x/quiaioszEcN+0u92u7/txRs9cUFPPztCYnJo6IiYeiRKiSkgEwRDMTEXBRFUsAgklQxvbvMi19YHAwKyTwbng0JkBEw/SSuxUBuKQ+5CFktgzOzHr+p4Ak5qalnlV+EnT1Nq3dbdHcHnIOMscu2EY+r4VEU3Dbl9H1bPTy/X9/eFQL6czIjistxzcvJoQqiPq29aQ2GWeoe3bbdvXQ1LiFLXIbTYtRGBf6+7Qnh9VZe4BIjEiKiCBQlUswI+IZ5/ERpc4mkpKDEnBROJI1nyUJ5iQgkMgzpQKXzDEQ6w3h3qNnAe/YF+o9UmHcS4Yy5a7LhpmSEUcdiE4VSxDpb20w64s2Gc5dXXfx8n8uG525kmG1HR1np9+8cVvfve7v/b1ZracrVd3STEUkz62HLLt7r6qKjVOQ4MOnM8cs0QhdD6UBphSImZ0HLu2b2pwOedTBSA0HzJElJSSJkQkECAkI3Quqio59pkDFjGNAyGDxZHMYjBSNPCjj+Kxs+lxxnicfBkAx6r7EV75GE8jRMOPm96f52UAQDEzlbETFkwQyMa2EABEUPs4UyM+qkb0syz1+HeAPYAQ8CMtxoQRaOyl9RxNwWASSmzbopie+fx5Vt7udoe2L2fh68Xkp0P6oWluhzgw9kggSi6As9GdG5hD0u6hmQZ3VFUpRe3ifCwTfL96+/YheG9keVLZy4w4BwtCc++KOecGBbmCMEf0YBmzJdUkCGQevM/FRAxVjQCQHlWGEfhkKkLoAo/ojmSAQxoGEabpIoOi2EQVB+zd+enx/vam6YYnX345ncxu37368PZtOQllmaNJMSsxhEOzbR+uCwbtur5tq8kEpXl//a6p91Ux0669ef267Q55VhRZ2O4fmNBADodDNp1l3lYPD9OyHIbeTPvUAZBIkj61Xeu9m8/nt10DYZBWhrbXuhVJDFiWLpTz4FhFh83+sG9K54qJK8uinMzyshgdSix9L+1kNtkfNsxupB3vU5xPy2b9oPvm8sVX5y8+2bx9s7tb2XYfl/PFcv70+VIPuzA9uTxZNt167SnMj6Efvvz6z5sPP1jdDXQTJkcv/+zXi1lo7u6pi0uuZhZIkqOcmRARVAEI7BESbSCghugAQAcDEacH++mbSEBf/8s0KxXBO8axXNsew2T26IV7pBbBR5vc+Px+FID+qQoNP9qH4KOd4ed/81FvQhwvwmPm7fFPR8fJx7PzsziFZmTmiJwnLEgA1MyiKhg6EkqMZgRiJmQGY4k4IoiRGiCxo4EALDBVbKQxDp3ECGZI5Is8OO/LST6b+Dz3kyVmlVEQdgAgmgBQARx65qCGRmN2k5RIzZgZVC0llWRgijm5wthBNolcoQsAgCna0Me2NRs9PSAiJh0BwDAEQAMUwEFMjYHI+WDEkFRUFRQdAROFDH1OPqAiAiCzuRzQgwGYgIKmR3QxUDIZNwdmYqqYREbbDSGaReeJQUgHGV0sKerQSuxjHFQiIsY4IGEyNUMxI2ZTRXTIxORAQYSdw0mOZxH6GA+9DFEVOTCCkRlGUUR83LSkAZEfwdLkmBBBEYCZmLyBERAijdErAFAgz4RqROYd5J6XZVgWtMhpXnLGxo7G548ijMUCzIym6AIg2ag3IgCijk94Q5IBFMQGYg+gxE4NkZwaIWbkkjkRGQRIiZQYQiDOnHMYMkdk7JHJsUMfkHzy3sChEgqAEEcImFg7ZEZ2iGyGBg58ThgNUU3ZOQMEJjBUTKADWWZGojIesqTqHasBex9THIvvbTTt/XOpaFw1AI4WIUJ4JJ+rJUfOzAjxkRT/mPdUAAAZ+8xAzSRJjEnVYoyxjyIaYzIAVbWkj+FSwMedmIIjJkRCtgQWATul3qRJNKi0EZJCMjNAhcd2T1EVJTKQEX4tH9OWQA6JDPukrSIZBFZGF4zYSBVlIIQoEbllUB8yhcToBcipsaTYthaTJiFGA0AS8l4zj96ciAOIyWIfAxcJQB2FPANoNNacyFORDAAo9SJEqR4K75BN254ZA3rphny5bPt+spjWNw/t9bvi9JOu8U2n5axa3bw2tfzpJ365lO2qKrK6Hg6HgxGRcwyj0GZ55tHM0KIZEJB6BmZygOaCUwVGZBqvrZCS+hAAaLftsoBoMpnNDbK+N2Q7Wi66wy0G32Wy7zoGIJEyd4Fcinp/l+rm+mhRvvjVxZtXb9frQ+4zKCQt2cdQ74dtlyyCYycpAdjQdLGLjOiZnHcCHZgNdZ3Pp1mR9Skxk8SOA3MeUteNvmtiSqLEGJgtRTMbUiTEIgRkdsHD47WImdHQGEelk53zhiPuH4ILoy5q+IjYgvFGiMyTH69YwD+vGhQRGUlVwJAfxTVAAB/CIwwb4LCtT+fd0fykqJ7efljfPhxevHyukQaxPLhqNo8V9rZvk1Qn8/nZybtX96sPt2xuGCClsO13Bl60C5jNZr+JvSsy18d9Oa+Q1Kz44w//+OnFF+dH06v1TxeX1f5mKLMiFOHl80+8cLfbRYvT4ymlXlK23/euPD45el73+w/rB+/11fffPb+8KJlVd2/evVML+XQaxU4un65v11nRzY+Phy579eZ90w2zcjFbTA7xPi9n5bzq77N+10scdpvrzz//1UkxY6Ku713uPbuAmmIHLhOA/bAVYOdR2b34/Kub6xsXaDaZb9aHZjMMA55eHqmoPbbYyeO1UQyN6/rAuVXH4cP9+8lksb/esvmiKBtnTdolsMJnQ71u6kNZLdrDToeKubn3bddqKPLDoUuDiaI6KQprZFW6MKReDp1VVd8mb5n1gSFOj0oSv1/VsG3NuSwEHAYAnZ9Nr97fhSxfHh+tH25TUnYw9INTnVRlvdtP51VRhd2hmc9nQvGwrhV1NpuELO+7PSLEPlLmAvs09L3UgEKGoJR5ShbbtkfkYpp3dewPm9iLd3lRFbv6gGxFmelY+DjUwo59ANGhjeW0zEt/e33v/cPR8alyOvQdBzh7Oq/3HRHf394aDOBiQq6bPg/eTES6OAwG0DUHFaimxXRWTI9CWw/rXTM7nUA+26139/e7Zqmz4+mHu7eXZxff/+F7BizPJrvNBiK3++3i2bJcFDfXa7pfYWQd7LDbV9Oy6/qul+t374+Xp3WzbbUfT0FMbbdqmD2orNfvXh6HRh+ablvvDk39bLFcdLv96uHOVZqHUHcJyX3y8snDm6vpvPiwr+cnJ/3dPQY4muU+bA91dXJ+dnVz7bOp+e7Vq2+effZV8KhZfXn5/GF7+7DbnH76abeRGZZ//ptfzDG/+uZKD22mZX581qa8yrOn83wuBWTm5rO2jqAz65b79YO0rW/dcnpC4CX3wdP9u6Zv4wDsjTbvP3SZP788m86r29tV3MV//1f/cVYcTQqYhXn34e3Engz7Q21UnjwlKh5ufzj9/MnTo/D+pzdiweKQyvn6bnXYflhjW9z/oZxmICH6PJ8s0SrgXTYJllW+cDevvuuj6/2eQOr9KmZ09vJPhyFVVcmqdfxwev4kHoVNs3t6fvrT298vp7P5yeL23bUrsqHr5i9Odcq5yHRWx65+aGKosmCxM3NZ6FP35S8/X725TfXu7//+bzNfEsFiXkxPyndX+9OXz1brD8uzxeGwf/nyF++/fcPNcPnZot9dLeeeHd9e706OzmPT9F105Vl2cXQyWW6vDlnZt8N1r8zBn5ydJe5Pv3pyuKn7OESJ0ZeLaXG/6Y3z5+fPN6v7uu0++cUXIu36/V0xmZEvfnr95mg6SSjrw8H7jNA9rGt0LjjIQzEjykFEu9Oz/PWbq9vv1y+ePNvs6u1h92T5VDRNZwvPRZfibnWLyZzLQjXFbphkR9f3mzwLGHyK5Nn7EPIicxQ49PHQI/JsOT3sWjc5cifL6rm1b14/vVzGajU/mn7/xx8+/fxLN69eHP/i+5/eR11Optn6dj2d/8Vk2tztb7CA9fbaM1+cLx3YwzpNp7/crf7H7e16Vp2Mp0B0vM+1cfMnmurDfr/f1G29b2tC3reHIaYYU5Q0GkbYMQA4ZAAgIiQIxJ7cOFsTISER8ljGCYRoGIc+iZiAmAolBR0kATkEA1QRDYyODA1EkmhihLIsPS+BMldU7JykNPRd3zTjBlbSELLssNvsd2tTKEJWZVUIAREx+N3h0LR9KBeKvN00ucuqSaVqu/2mqibJdH1/Z5qOJ0tNKWRe1fJ8Us4rGeJuvbq5u1cBx568p8x8FsD4YbXd18NyOnl2+pSokVhPysI0JVFEaFMrQ8zzipGk79gTmSGymjGPi0xiH5AYHzulKak6xwnJcMSBVnkZ+m4VY5+oBiAi6fsDGLPLFMD5XNMgMkyXn8tQ7nfXwbnV7qoqT/Kcog659yHk5EN+/KSzUBYukMow1NSePf1q/uZVHJpyUk2OFrtNq8myIheTEPK2PxRl3nS7eeAExmGKxIjooVAQAyPi0YovqYvNFgFdXhmAIjN5YzVgAkUU02ggyKzMKmPJhnrHBqxprLh7JOsi0ji92GNT8ViKR+Od9uNedyR+IpnJ2PGKj3TexyZwGnUgHmnYOkpGamofe4cBQIkRDGns93uM+4xVfPbzQD3KT49zFOljoREiOrJkY8UPQGAUAQ8wmxRRU8k8ycIiL28Pzb47yOrwr4+Xv5plt53cbuuIAI4MIfisGwZHMAt+Oclz1SoQg/W9pCEGZz4jSwmIwDBJ8kVAsdJ5AqucQwMmckSoQKBJ1MBE1Ai5YEBIUZxjMTID750qDGCOSHWs90ZNg6iBmSoBaDLrkoYAxo7Qd2aN1ueXz0+fHMf9/uH25pMvf3Hx5ASGh5vX3xJB8B5Rp8eL2XJZ9+thv/XWeXAPqwdyIbDFw3Zz/SEQDO3D7T5u1g/Bu2q2qJuu2R+OF8tmHxV4UfhU71K9gyIzEUKoDw8izaSaStdJsrbrGM0ADpvr05MnHx42MKTtepORR8DucJjP/K8nx/w6rnu4eugocUrifCBmTb1TjRpVGo24mBVp25JET1JmQavs+qfOheRn28UnLznLoO1XV1e3u1tMMp9W274vsrxvDxLx5OmLbH4KH17f/v6HYbe7LBaTyUT7Okr55LNfNWeRHjbTXnxNJVUcPBAAsoy0Z2ZkByAmRgzEY4WRIDDYQLXIu9d8dMHlF5KTIRowEo9mIvunKNg/C5bZCMB+9Lg9Cj0f1aBHs9Cjh85sVIDwEZMLH//quFx/TK3R+DaGZcYk5uNn1RTJrPSBASlzPWgSMMeDGgVWjMSAhEkBxIjYATGigRJ7MIUoNvROUk7mIJKlJIYcOAcsCw+APveTqSsrzArM5oIOEFWUVMbTS+TFHBoRs4IYOAMPrlAl1Rp8ADYySQmBWIDYlUo5mHPKpGpDn4ZhjE6rGpkqMyAHJGYEHZK6GIc+AvmcfMaOCcUwpQFhdLwiIQXkYEQABMbgcvMVgEEUUhEdFWenZEaExBANENREQAVQZLx4KAKqgEdFHGwUDZIQJpGoAAlovLKqPGoNzMHA2Hk0M02PVzwVJCW2E1BkqpMMqtJpUjNEdqPuTSZCI2fZjT9LBvLoHKOx90RI7MEEAWikywGoRlQlREdYZm5WZsvMn1XhZEKzzPKMHSmhmggaOjIkQEYg4FAoBHSZjVxoQmAeRWkFABUEA1GNaSxnEhUFIOcFHERRy9BnLngFIvYuBM48OI/OgfNGgZgQyIBgrBAlUuIGTCMm08K8F8u8QeqZ3fjpERQJ0NHjDgbRjMmcqZKKDB2FjDkYkpiOpDxCMDTPTlRVFB8z0P9MKlId030EYKKCj78jGBiNbiMaf/DjKgORP/qJ1GTMnokmTSKSTBUkqUQZ6cVg+NjJiaRqAEb8qOKpKCWQWrAV2fc4iA1ig6KBiSCCEY3RbFADTTSaao3RiByjA0A0ERMFdGCjTVnJoXZR0oAApoPzBIhGah6SDuRIIyU1RxDbLtWtpgRAiM7MgBi9J0KToSAypW6I3nnzKCDsGDFh30mbCCc6EJmCAIo4QBPAQcg72WEiqxazpqu7nVTLebO9TwbbzeHohKaLo/Vq6x3n5VHbRLtdewy+KGLfsUMElZhUVWJKYi44l3kCXMwXq81DW3feeXQMSGriiAhVoAMMyEEBVRJI4uRiz8PgQo592xfVxCGoymQ2PzmV9XZXFo5xut83KSo5FEzZZFImTX38cL3vUrq8PJ2U1e3Nw6FtjXw+JREiOLq6vneeQMBIm0OjSeezAgBQ1DtUQYgKhoZY5HnbtcOQCucAMA7JMTvnkMDUQhbQgBhEgRDJjAEDUxZcWeRZkYU8QwJGCuxVRDQRIyISsSI6IiJARCAKPpgpIrPj0b3KxKbqnBsjAKppdKQSoKoGH5B4XJ3RP0HzgNk1693rTX35yYsvLl88OdE841p2nz9b3H+42X3YdEOXYhMCtYeucQ/BDjAMZ0+e7Pb9ftczOQdJEnnKrO+GRj+sduaG2clzjAYxffr88yfnX/3//of/95OLGfYyNEObdOiHPLjD9sCipeP+vsmQFsUyDvupn3bNHQV9+em0WdXbXZaFyWF1wxybvsmn6frm+8XklBOy6e7urt82VTY/Pz5tdq2Tiqn/6c03v/rqt+Q8iaCK8/iLT38BVuXlPAn00fJpIB+EYVfvZlWWpKu7g9F0MZvHPr368bsksWm7dXlwnE+yyWx5jM471q6PKiYpKprLgw+5D7Tdr5hDW9vx0ZME3enzk3rVb1Y7V7l4OEym5cXLZ3UtKgNac3Y5394fJGq83xs4ZB0aqA9dPqliShIPs/lxO7RmyDmlrjHT3XqDcdZ0A5xpt1sN/TDEEBM+//zz1XYlGwtl2Kyai+dP8iqbLyb1YTO0qp12PaSF5EUmsRMxTGm7+ZCVOTMOXdxtALQBHbJJmE2yoY8uMCkPZhKTGQ4paWCXY1llh31jDrMqmBRR2qZezxmnWaYWRazIyzR0bdcYhXK+ADBHdFjv0CDnbH1/x2DVfEnB9W3XN8aYFQV19aGqpq+//0PTpelsqijeBdGGnZHDGG2z2+72N1dXulzOZtNlGuTwEEFtPsmbm/27n14/+/pZnrm+388X+YcP31+ET2Ndny3PV30rQ4IUK+eKvNwPg5ldnJ+svn+/38Xzy9O7q9fDbkixefmni/EU9P0AgrOT08N+Xx6dH8R6gfnsAi22dZeKokuioIdVw6CL2UlJZbtu2rbb1fvJ/Mj66DPfxsNJNf/x+29ny88pYlEVok518l/9xX97d3u1/v4DJ2aDodncvPr2z//sX2wh+8WvP7s8mfYPezkMk6ykT+d/9Ye/+/WXz58uzy7K4++++cNsPm33m68//YocHe5309kJBh9yPDo+2zX328Ph+uHN4dCfLj757LMvHr753qM55DTg7cN95ikD1yK7jG7eXz85/2x6kt/9/v2Tz385tJjC5PTkmW0Prk5RcD55Xs7DH//w77v1/RefPl8hoeCz5SyH3PlfYSiWJ9nVm78DiOCG91c/PTt5WgR3fLyQhEn06uZdKr2fXz5dzH74x397dHRZTGYDFGi5bw/9TT03toddu90t80z2q/16mx8tD3taPXQZ+N3D4eZu/eUvP3Nl+Se//u03//GvMdrq9TX12uya7WaolsdtE4cu7q9uzbJhu+4fbnsZLi+Prt+8d8H/+Z/+Iu7vJ667326vh+LrT/+EeJJPXDUp/2//9r93c91vVk+Wp6/fv/308qzMORNdrXtCt9kNP755/2w5X8wmE87LbFIUKE4ebm/IuPS5E3n/9qcCChU7vjxtG2Q6SExJogXabA9VVYrqtJpWRZGGGHeH3X5dLqppsSxy3663WZicn57G2FuEo8myb9q62508mdf7pqn3y+NZEwGBTk6XZNZJ8sFt193seJEGvHtYkbPpvKqm59evNwVNBYa+6wD50+Nn/pBSE3g5J8hd5k7PT77/4d2zp8/7dnDl9Fd//iUq73d1Zvb08mzOMHR8WPelX8xm50n5v/yv/9eH7cb0o6vIIImY6DDEIca+a9RSM/R9GgR0SNK3HREPURCBHYWQJUmEY/KMBJSQiIjZG+o4jjNT8MEzK7AqIJJjVokpiqVeJBGwd16J0IhMQIGQHRKzQ+8NzOXBhZIpB/YKmKSPMfVdrbETSX3fKxiAIKJjl5UT7wKiAqOoxaYDlTwLXexEpSzyABilu7p64x2Z2OFwiF2aTPK+j8tl5X1gx2WRi8p+/bDbPKSuCy7z6EIR2rbr6nbd990gRRlevvzEB2sPdZX5pLDd73MXmHDot2VRmqTYt0hoxIj46ALGUaZgIEZygEDjV4qgkuKjI8aIHAWWlKUksR8QmckzZ6oAhnGIwMQEKQ1qPJlcbO+vxZJKbNsNs4u9OOainOwPawrZyeXzdvXB+XwyW1xtN5Nu/uzlV29++n3bdS6E6TJsr9djc4aIdl3nssqHLIoRAaTBoWNmBUySFBCQQggQO02D9E1UIjPKPLADZESHaKAikmB065giAjN5x2MHcARjZiCnavC49DJEQKRHnhCgmhAoP/obPqZvAAwSICDy6OsHM0AgGLf+4zYXET8uxcYBeMQWGSgaaAJAVBuJo4iM4322AT4m8oHAmEHBwHR05KgqAjCywWOqDQ2Qx6idIKJDjGqO2OeuDFmSKvUdDMMM9EWRW56bKoI65oyDGXqHoFpkQXpBVEVLTiGXpIkcuRwUidAZiIE5JRExAOeJgJMoEwEpAPeGxCTezEwkIVCeAxIik5lBSsDsCYkIlM0wqaDPZLzXRmHA3hS9H1uduqGF4J8+PZ9XeXp4uL9ZnV8+OT85wubw6ptv6vXm5PR0Ml0sTy7AhWa/MRMfW5S42ax00EnBqds/3L7B2BSVv729i0kg9iKJNT1cXR8dLWmIh7o/Or+Art6tN9pKg1uXB1I1MRAdUg+sLvMG3LXdpJgdtjdIMCmq/eGWFDmwR065b7ukCl9cnK7q6G0nZvtts5vtyvkTAuz6lPmynF2YRa/inXXtZnE8iX3X7VNM0Bvc//6nsw6++vyZn8L8dHn/6v3mfufdUIT8/Y/vPn9xMQth9/6V3r5Pd+tuNcTG79/XFPXoiVcIzntxWZiGIIesAIYw7g/RiJnRRiqLjF4dGGVLMhxX/gMSIHat3l7hk880zwUSO7bRooL/BKv+KBR9tMAh/bNU2j8xiR4TZvixd8ngo+QEHwdRABu1IXw8U4iAH41H9HhGfhZM+6YDM/bee3IMiNajAnkGGhDI5wmMFBHIAyDgaPYYhSw1sJioT140Q80CAjijzCB3jOpzDsGQ0QVjRvKGrIAMo8cQFciIwHtyPELnDckoQJioGbgILgMQ7XuUlEyRGMiJmLFo21oayJRSNAMBEuBeFBCYfeDA7Ih6GWQAEnaQMWWlzwoikr5RcpRlogqE7MNj1xcCmJHLFQJgQIho8fGUOQbH5IgKD+hBjJFMLA6d16RR46AGQM4hOSBMsUcjAwIPgMpZTgoYU+obINUkMUUiHmOEjKxoI/VGFJQdojGlijw4fC5Fb0MHg/SgyCzgiVJMhgb6WBPAROw8h+Cc88TkmBwrjkMkmoKlTnQwIDVVSI5dlfGiCGeTclnwNMcsmHPALI5ZRQBk9M+g9+BIyYBIUIA8EANAAkL0ScVMBo2MjsipaByi4vjqJqJJwQMwsFdylFV5yCnPNXh05LKc2SUjM8dMCDTKS/LoolMV6800aoLkmLJkWWCPRohADIzI41IfhdmIQARERZIoIGeiQiPZmhgZkUgl2eOGwByTKj66gv451nqsk0MaDZr6aF59tPYpGuhHsdVUDZKqApCaxigqKjFJSiqWRDWJJjABFTXT8eZg/A8rmGNPjCqRxDgpRbB2wCZhF6UbLD0acNEIETWpQiJHyAjoDRETQ0RkJWQFA0VIYKoYQFHNVJVMFDSCJWI2o5SQPTsm02SGoETmSRKMpLI4WBJCMFBg0qKAIrPYB/TYYdcpQY6MkYS9sTTaCQ8ccK6JJEEg3w2NAxOLRL6tYzFfggmIQfIYsb+7r6Z+ejbfxkZiGzf3fnlWnJ+2D+vFyfnVq7dxtc6LiSuzxDjUzWS5rFtBSSCAypqIkGLskz7MF5PjxezuYTsuo0aUtSI6coZK3iMxoIJa30XE0NZWTrxGS22M0QQhAVZHc/CqsE+azCOGAp3NCheHmM/yprbDWlc/3cd++vTFyexoIt/e3L+5DoPkjo+eTOpu0rSHoY9ZmZN3Td2eLHPneYgWsryrUxQdulhRLgJFNpGUMqaWwREiOrWEaOy9DyH1Q+5cF9NIT2cXiklVTibVtMgyz94/3t08Uh6CI3xsamdHhGjqvAcmJkREQiImNQthdEUiMasJAAQfTJVG6RPJOa+mYuadB7PxrgwAPvvFV/223V5v2fz1+9fPnn/ePDzodr+SbHtfS2x+8evP7zabm80dukDmThazGK1ivtlvo4PLJy+3Vz+WpTtelncfHpwv8tPTH66+nVTlcDhs9uuLJ5/97bd/Mzuu6rYv+lJ5fVO3J3R0cnzZt22mBedhNinu7m9zTwUXYRK267XsNaPQrePZ8Vfn08VPV99ZQS+fnxS57d8niXJz/SqQny4WhDY065iMgILnftCnL1427Q5BZSsppc8+f97um8snTyfZpGnl6HgqEjVGAZkUMyQqQhFj7z2n1J/ML5qDnx6d+ZAP0Z9cnFHfDTFtVg+oYkTocyZ0zjMYOQyuWK27+XQiw3ayKHd1JOOiCpfPnpWLU49Jur7C7MdXV8WUZSCJUWOdVC1hOV1wcE1dd4eOvQNUdJh5rHd7nwVQ2u0f8qwss7zZNJu6Y+8cAwU4Or3stnF1c5tS2/fSNtv15nD7cP/V5098FqhFjVZOs/bQHertrFx27eCYLcbDoTWMIczYMSIj2pAG6iVJjAN0++Q8O+/RBTRIQzwc6kJz5xw7X+8PeVEUE8+NSq/dfueYy8VChhiH3jvfpSH1jfYZGblAk+N5u2tPn5xdv3m726wWixkxJYA0pK7pnSfOKkV3fvnZq5/egNLqcHe8vHDOd8mGesiq4vxpdf3+6vb2avewkfjm7GxZzRfLs6O2rZdHs+39/vW3b8uCdnYzO57f/3iX+dIAbvU+NpQ12WbTLGdHRMMwtFEkDatnTxb3N28Wsy+GdvHqu5/OzuZvf/puPAVZxu320De7xFqnGA9b66OkYftwe3z5SX9o7u7ezZYVOd4dmtOnPnWH7d2dUbq4POqTfHj1h6NJ1fbtbZEl7zf7N8Gd58Xpmx9uptXl7ds1YPj86Vdb517/9IMg/Tf/1X93OYFXhw01XXdvbdP6ZX7xyck31+2/+uW/Oc5tu7rPquLsyQsvwenQ7W1WTKaOyuMwPZqvVzf9ehfbNMmnVPjL44qIDz/cnEyPur4+vvzk3U9vZsty+Xz+4dXbr3/19f2HV/fX9bDPbvZDkV0Ui5N//P5/PJ1fZAsI7jDJZq46uV6tNq9/mLpimpX11Yfh7p4Zzt1iu0nzJ/Mc/R//7i9jhfP5fDIp+0ZiY7ebeq6kwIT97HgZJuWH3W5/sBfnX0Xwi8XJ3fX72WRxdnL65uZDyfPlcnn75oenX3/VxG3B+O7D6ujl86vNTX56PFkucXJkSHK4/9v/8D9MtMiN69ttUWXPvjzfdWl2XK5uN92qM4Tjl1+Y3fXQWyAxI4gwfXL1cLd+d1t3+4tn89xlmJr9oZXyot8enp+fB8k2t5vjfPnZV8+/ffX9fHKaU4rDvijP0q7+fHpUOdzevr/8fHn14Ucqq3J2dLhal8E/7D+E/PLLz3/53e++95vb7Elu6V5MUt8UWR478QmeLC7r/mCH9up+v1zOPZe+zC32h363W21ePH1xeFhX8yJweX23CgvNS795d1dN877TvjZ/Mr1f/3Qyd6HKmqbpk4Hpk5fPU+ybtlG05y8v1ncPKs2XX3x6/X716acv3t3cHNb1tJymesiPLgaQX/76L96+e+vmR59+XW7Wh/lJnk0Xs+OT3e3t6eXx5uD6pp0X8xoyD/7JxYtv3rw5uZhNsql2bV6W4ynoukhuhDOmrm1jil3XxNQfDjtVS9EkJWXz3tPjZI+B/eOOG8kjOXKAaqrkmJkcEzE7ZGZyvnBEYhoTaYLeJOq4MPSGzOw8MWnsU2uiUcRCKIrSz+bF/AjQCDB1/dAeYnuQoQssHQwqCYkcouPgHDvvXRaczyT2XTf0w2BEXerFwOdVGZwn3qw3m+Zwfnpad13TdZOsiiLoaHY8LWfToT4sZ5PmUO8eNt2hdeQmk0qdj0Pad8O+7Zo25rmbLavlvJpMab+/P17OKNlmuzGBQUUsZj4X0yF23hMiSQQmDyE4V9qjtZfAxh4VYOeAvYEhOo8AoMPQKrBzIZQzIJahHYbB0NgFUGF2pKKxNVNUazbvz7/6s2J6XG/fWd+D4mCUlRMlh4qYbNjeLZ+9YD6SQTkvJ8PQHfZZ5omzZvWQce7yvN60niwEbYfUJ0sQyqwCS0QOERVGTHkgSwiMyGYuhLLr9motkNcRl5RlZoYqYGYmBKAjyt0EcSQcMHJC51GSSTQwYjJ9ZBKN9E+Dj6DdkRZhHz1F8LHM7NEd8Qg3GqugDIAMDQFGUOP4ruFjMQujwePuVsEQyHTsTf45ymPIZKr/NBfoiNkiA0EgRBz7REaGrgNSA9TRXWUK5l3wgEl10MGxM+8sn2jsNSXQpMmyjAWMyYFYcMFQzYENShSSxZQGQHTBO8zHJJyjcT5VInDE43cSDFAhBAYwSWKmRRWSJhMhxKIkBIJkqoBMSSM7QCAHYKoMmkQYCRkMzGFCAINEYK1oQmaE4Cgr/eL0TERvr+8wlM9ePHXc3X1439b1bLE4e3KeT6cCBmnIsoJAV/WDJ+i6/XR2Bkz7+6tu/YAm+80h9Y0JokJWhPawC6gTZx8ePkxmpznEerVa3V/PqnNniDaoJodkfkIcvO+xH3KeHOJWu37YtVu/Ovnks2J6tFofUr0ddlvz2eJ4cdg1XT3kIp/M8vu6q9u4vt8+fXbJIc+cJ0iOHZE6mO9ud3lpVRUI8649ZFxSwHTo737/Q3i4ffri8uTF5dNffyW9Xn377fW7t4vZ5PZqe3K0QBfXN++sZ+39rJr1264j3dFdfjwZ+oF5UuRFUXmIOzeWgpkzRTSPkEAEAIicipmZWCLn0RIRqwhEonZIb1+7yy85PIMCzYTNxjgP/hOP+p/ePnZvI3wMu/ws8f8z5Qg+hjbx44cM/+lBRofcz48GYI8ZOfjPH3LoAYH6bnAVZzlkDtlxHEUmRDAWQ0Bnit7AjS4SUDUZY8smVoYsV8hNEEQwUVFaKFxRCQd0XpIhookMIn5UhxVAFIwAMiAy8oZkRqAgaAAOuLCMTBOHiRpYGGxoBqsJvBnLEDUJJBXnHBqKIBMgiYmxN5e7UHqfgcWU4pA0KlEoXMhcMSPnwQSJAJLFnpm9z4Acck5qjIIAIEqEJonAgBxYQnbkERDVAjiX2HPImb0mwaGzNOgg3CZENELjAMiUBCyhKiCYKqlpFO56hAfpNzGlpCaaHKNnNyp8jkgFDUfwEA8SPftpQU/VqTpBDBTraIOCMSmHFCWKmloSDYzMLjjPzjETOc+OxcABEbGZKigmsdQ74iLjWeZOZvnpZHpelYtARc7TeZYVSDDo0LmkYJE9ECN7DwBo0dQc52IJzCm5lCCpGTKyM1QgHlJMQxLRlBQZwSETGZrLgvmcq4KLuS+KYlJBno8/MgAC9DEhPdpy0JAMQJIgkqqI2iBJzShpAk5AmTnPjgiIA6MiAZB73D9YL2lQEwWyFJk5iaDB2BNvKqNQA2AEBAhEROj+M6lIkzhm/AgtUtURAIWMIy9eQcfKWFAwM0JSMbOURIdBTCxFSVFMIfZRUgJAExlfxyQlRDRAYE+j3isMApjMOk1t0kOyttc2miSN4/FWMGBkMEA0NAU0NQFAJsKMMefEkHRwiM4hI6NDJIfkkohpFCEiL0lGlPGI1iaHDp0pxC4aiphI05gaErF3aobBp4AKg0NwVDVdL8m50nuvyWoHpn1PEkwQiYkQGUWV2KFpkkQpoYZ237jgDbXreuJgIM16T5CXi/luvRqafblc+Fm1fdigQDabQDfErkfCLCtqloiAuQ37lsicc0ROzILLAKFrduRDOS+1E1KSZJqE2BEFJDRkI3UAZMY+6wbZ7/tsikDILM4HFYvJDIxCdnbhYrobWtf2yUSqZelL3F3tDbCqymGjb97Vq1168mT56998Rda/fX1T99FPZHYatq8TIKopMfddX+9qICMwEEQiARhEMlE0DS7rTdt+ADCfO0sSwIWci6LwWQ5qqlofuiRWFmUxrY5OlyHLQvAOkRwjk4iaKYkQsGceUaM+yxCRiR7B1ybsOYRMREUNEZiIgIhQRscSgEgyNec8sXPkxJIbMWMfHYIA8OH99utf/eqw/ce/+f53VVjmTfz1L142m+1Pr1eT8xfdak/hl/XwTT88QJEvLp9u378tQrG53RZ+0nVN17giO0tJHmprZLhavzsPXyyml7c/rk+r7OXpSTUt7q9+/PKzl2WYxLabLAq3C1VxtFt3fTucLk6GiP0Bcpp8uHk4PZneb7bGNptNwJVd3c6n4cP37zW66/V6eXYCJqzdrn4o50+a7TaiPX163uHmdDH7+stf/81/+CvjrCffgzs5O0+hgz68fXXz/Owzptn+0E+XUw3QrJr58WK1erNYLPq+G9REMShI1/V+EzgcDpvg88NhuPvwU+44pv7k/GwYZOjVULOscOwQmZBi6mfTmdhhs3o4Pj16/frDUX40D/zs6VNkbJr94mQB3UTQKdjQt+/frfumzauy7zoO7snp09X1vS84poaIJuVSEX0Ijsk5H7IACHfXD2Shl7ZucIhUTMtO+naoj8/nXTcopt1um7S+ut7mIX36ycV0Pts9rNEBZtTGdqYzx855RPODpK6v65uums/JDFWTxnq383lJkBEjGAzdgDEVwTvPIeSioslcoLwsNfaDGHvKSy+iUXW72ZX5lLAjZ9U0H4a4321DXlHwCIZESXC2PLr98P7H73968snTkAVllSRI6rxbbXazSdGklro4nU7FoiNXVcsOu64dCGVxcjE7P3r/4yuIcre6++a7V1/+6qvl0TIrs/n58mF3vznUfX3Y7NqimKy3D13bF/v9YnK028rt3X3d09mTBYRMum6z6qIOHvB+fW9MLqPrqw/HF6fjKUhxYO/7toXCT5eLdtMFriQdBoxgsdn1EDzhhIiePJtMj5bX728e9pssc2efXPTX9xoPIjg9X66aTVlVhcf15oNrh9l8QdikIiyPTn568+7t628++eRCoj+uPl1f/zSfn+dHOSNQ2dByUlXleX5/t7saYm6+mj49Lk7Thx+vf/Vv/tWbNz+yy/JJdb/b7Q+HajrtERdVvjyZbR9iOT3aPNxUx0U3tBJc7yyb5Nv9fbW48KV8/4//c6D84uL87ubm6PRFbrxvH55/8XR7fXO/XmRnl3erh7Td9ga/+bP/zR+++5Zn82F1FfuZYfX6qtps72efzc5O2K7WpycvDpuGstnhEDm488+++PYPf40Fn10sL46e37356defPd3e7SgrgNxG1M+Pd03nJjKbzy3i7fZhcf68G+zV2w8TJ3t1FVVPP/s0xW1eIXAybQ0UjLLZ7Onlk7rr9ruHuu5vbz90cYrq1KmpHLobgP10OQnTLDbNL//k6dsVf3hYbYcuZe4Xl5+4B+iHdPzk+Z26JzM/uXcPm6svP3/R77u3dTwcml3z4cX5ca3y+sc/PP/8M6SExLOT04igBE3b3Lyvfd+fnb/k6bM+6vXd6qHrBKu3140ry3q97vo4nR+R+OeXn15dvSky3/VdD5iZAEBEu7w49bNc4Dipm5TTH3/6YbZYDNjVytHC13/6KzlY5qTN491+XS1md7ttLu6Lr55998eraTnpDvt//Omb8+OzkJWa3NWHh0/np4fYVSfFYWiN8PTpeY5KF9Nqcfy73/2tqKfFk1o8grFPp2dnr97c5ewnRxPVVNc2yY/ur+9CPi2y7Hp1PTmqxPPdwya1ZvzoKgrBCSgjAYKLPokOMdaHBo1UJCWNSVENgYiCQDJ2aIRgCgaWHCEhGahzHpHAzCFlLhhSCC74jEazQ6vgnfNO0tD10kVB5ACckbM0KJGYAnJVVUU1w6wwNdUkkoZDrbH2qIAqBA5R0BO7kDkiR6Sm0td1z4OmpKZZCF0UBZfneZkXbdcc6vZQt4PBrm73m9V0urht1rOjo9nxaeF5f7+rcuwPq/36EFMix2YhDa3LKJRh2O4QoKqyi6M5E1ofh/V6Pq+athm6oWvbLPghDgBEeW6AxNR0rYBMijk5z5SDoUQjZiLGcd3AjITJTNPgEMF0HCtE0tADEZHLDVGTpBSJg/c4DL1JD0jeeR8YdN80V9V02h5CG2vF1sh1NWI2yYtQVFNLPUpEM9UYCKehur9f9Y7YwFP2YbVxk4ICg0nXR5dlyAGLSpktRQZg51UlpeScI2RNQt6NYyUjSupSz0yIpqbCbqw/MkICxDE8hsBm0VQfPQxI7IIBqCgjCY4dZP/M9I9k+jOAVz9OvCMuHQBo3LGNniL8iHJR+IieRrQx2maPOtNo5Hp0JamaPhbTPN7XqxICjoaInyduHe0XxIxiSkBIqAqg5JgMFHVUt5RxVF+UyRGaI4oACiBoDLlh0rGz2ADMENnnZApqgAQ2kiURvQ8IwM4LgHPOjV+3jVwaNAUmI3YihggKRgSMjsgiojNyBkweCQCQMxLBJDFYxkxInsFABhx6JhMAABGJzmdgmsQMAMjamIwpFOV0Oe+2m9t6n8+WL37xy9K7h9ub3W59djKfzJeOQuxSXngixBT3qw8UY9fWDGxDBwrNauXNFGR996AAs0nV61BUIbadR333/l0+PQohDM1+u7lFoaoo99v18WK5fajzrCynM2Wf2h2oSH8gEe8wz8rmsN893PD0fP7kaVrl0buhH1RksZx2dMhIOLg8h++umu3DavfuzfHz51lWoati22hshej46bPD7k7VHBkzMAsMlrMh07Da/nC92ny4O3nxxfLys+WZ9vf7oW6mkzyJTbPMispPq74FY5vN55IG2fdKD8ujxa7tbOhT02dIABaH5NgjoMqAYISCYwsksn70FTE51ce2I+0iu1be/8DTEvITAUMYvUjjcxs/uv/x0Uk0Pkd/xhP93JL2zyrM/pmyNKbQ8J8EoEfm9Uel6DGvho80+Edz0OOjR3ASo6m5VgQkFAQejYQ8E6tgSgZMzMSWDD6KsUkTcm7AlDuKbY6BUp9ElQOEjIoFhRyQDZCdgUoyJXKq42dVIjJgNR3r0gVw/FY4x4aowOSDmQEZAOowADeMpUqSqEmHOPQWI0WHAFlwPBpgvPM+N587nyNhEo2IGgrMypDnXEww5CAqsU0GGnt1nQMDQiRGVETUGAkpgRIJkBixIRkIeUfOGzIZaBaomkuWmfOWRPo2dU3cNqKCpkbAeUkUGIlMse8QNEXRpGLRHBh7QZbRJWCuG0w9jRczJFNTBGBTBWDORuF8PkHywRVutm4/7PpDbwlcMu5xYFVSS4I8ZotGRRBNdayoYgNAMjJEIg4hsDqCWeknhb88XZ5Wk5O8LB2Wk7I8nrhJYI5Sr2lodWhBE6MBO3LZmFzRMSiVohAI+KQoqCLCiAxIzotxhBi1NwBNEjwDsqFT9I4zyjIsizAt0Xtmb0QGnASVUKPI4/OdCIwyhwZmfhCLGIdRCxCLZlEteHOMTpVBnXeANBaTqWFUUCBjTKCiwkJkwMwmAsSPqwFmRGQENfr5MD1KRTIIkJEzdKiimhLiWGXKNvIVjR5pWAagJqpmKjGJWRpSGlIckopJSqamIj+bBVWFiQkZ1VgZTSEKisAgFtXalNoIUWwQSyJJANjMTBQBAJjMiAxleKSjAQEg5WgFcEBGh2YMiEBgwiZAimRgIEbsQA1TSqZmBAJjKYchICQViWBJuqhmDKyC5tDlObBj7XPBoW7Q2BeFOIvaeFAclAVRUUmVRZGJHUka9V8mN0IIpFdBM68iMMmyvqvjths45FXGmrU3u+lkx+dleTTvboYsK4Yo9e5ggKzivX9MCBZek8ZohChJMTguyrKkbuh1sAgtOXIhk57Q2AwF0DOpDT4UY4OqAfaJ2hZcjtZ082XGYB7coWtT1JDjcp470Ou72Ctt6lTOiunRbHu316g0GCBvbvqHm7fnL5rPf/XZ4vT47//hu/vrO3VVnvEghYGCQlP33s0UkRnqphUhYxaJXSQiEBQ1GaKpQlZkKUqRTctJzuyKsnSEoqmY9LFLs8V8cXpUzSpywXlnMYGpSVLTlJIiMeDY3sI+uCwnJkdERM4DggGhY0J0qmqAKmksoGX0IorEyISA3nlRJQJCDwCqoGA/N6B99cm5dPvLzy8v3Hmztv/pL39/Vv3r9dW6PDudP3v+5Ev78btvL56fM/ctyr6WYnYKvTJjkVecmanum6Gsjg5968v5Ly+f3T7sp+7IajEqBvQPu/sX50/36ybOeb3ePb84rQKi+pB4AtXn5xcxDYchbla9X5yKaCspdlAWRV49/9Vf/PLYTf/4j/++KvMjnrcplsyJwPvZoviM2jerbfNOrnxMm/VGDisivLm+Pj4+Q6muXl0VmlU0/Rd//r/Mi5NQLsCG+6tro7jZ3J+ez/74+m5+fCxJ0tAWZVGVZera7WF92O7bGBm85zJw8Mv5ZFaa9BpTs68dHed5yexhUA6+a2rvQzJxkHt2X/3qX35ydlrOqg6Gq5++R4jZ9GkkLMqCYGfS90PdD50vQvBZrDU20La7xXypLEMXQQbTNDIYVDWZpaHZNatZuQglTxbFP3zz5rP5p63sJfNlsYzNfX/YpL7u2t3yrGjqbXOY+eANOPUGSWM8DDvvqmnfxeCoqEK9P0SNzrmyqFTbal7aIe13OzAqqzIrCkSWQQ5t5z36wF3TeF8U+UyGRhXEVCwCalaWzaFWic1+M1tUXbN3GRdZ1rQ9WvLoCLFHHJJm5WR5fHF7/aFrYjXLYj8gSVY4IO1bbLv+/PJ5vbrpm2EYDsGHrLChb7PMd4OuNs3kaBqWT0tHLjaG981hINzNjvjk08u3f/lGY1NU2W63mVbT3XZdhtzivu4iTpZZQev1HRfkch6aJHvRJHlZXG8fXjy7wIwpMfnHpoOhjccnS+2UOWv31u7YJEyX5/lFZdI6lgLyi+X5dndo9/Chv97s1svpBLytbjdx6M+Pj7p+3e1iSrYfhjafWnJDt7Ec4349Xb7Q4+yv/uovB+g/PV8+n75sazh99hLA+yl/98dvg6P59CyoXJ4ucu2q6clf/qe/mR/lKWLusruHu2h1B+6Pr95cnJ+/+nBrZIHDpo0vKaVDt93fHZ9OU9odDs0+trU2u9V9UeWvX3+wPrrgjB3NT6shmy++Du7w5s3fkx8O27sHmlTPju/2+6Urvfr7Dy30ZWYnk5MLj68rC8jDp789f3P71zc3+z71KnT69PL19Yfz5y+GXXs8O7qp8qQ63Gy6Inkga7tFWQp6j/n93VVwwfvsYXVH6B1n2cXZJ7/6zTf/8O9e/Mknq/fvFov8YX+jm8Nnp/P9amWSbu5vy9lJX8sD7Ipq+uHDu08/+fT67QcHC9KZyKGaZfPl/Orm3qhLXZMIYj3cu/16NdT7IXoTgtfv378MTw4HDD3d3Py4QxjWa8phaA82pLOjpVc9n009SjGr/tVXX7+7rZm65YuXmPrN6jbk5cPDppgtY053aV8Uk6bdBYZsmgnEN7c3y5PjIemu6eZDJyldr25C4XLnt+0+m1b5LLTr7vCwfdvUuQ+Lk6O7+82q27384ou6786fn+1v7tpDe1QdN30XyoyJlejobI4frrt9e3dzF7v65rqrqlBg5kLetk0c2i9++WU79IBSFqVDCJM0P5rHttnuDvvVMJ1XZVWGODRth71i5PvrzWJSdXXTNe10Uk6nla/YLQJqyqrZ0A0YfJ7R4CxflLPKPwqmqmaGzD5zQSyppWioDJrAoOs7MxQTEWUaxBKhI3YqCdwYYOKoQgzJUiA0VEASSEQMLlN2SIDoq0noU1RDB+hTT+zJOCCADH3qVMxlma9m2XTu8kIVtD209bZv9pYGBvGeK8foKy8QWJMaM6ilNEQEBGQzUhkATCKIpMA+I8TU923dtml0rzRNYwJd3xtSkWegend9n7uSs2mz22SE2bRo2+HhZpc7RzGumgMazMtpKEMeuG/aPHhm7ofYNDsRM6N+iKBSlaVaEuM+AghwVvkwc86l2KakYGDMCFzk2UjrZJ8jBGWimERSsoTggoOua8wcomMXfFaBWUzq84oLQOnTUKupMQKk1bsfwBeWlF0g0hSHerf3J74hCMHd3b47++RLBgO1oW36prXUGIddvblfP3RNXbooaRgHCgLzWciLAiUh5UCO2I2EIAQlUxs6sIGZwYKTYKKmSdLgRtinRmKiURECI1DVRKZqgiBE7MhF1FH7IUA1GQdTRjLTjzXhaDi6+Y2QHqdboFHeQZARnopIpvbxgz/DVwwAxrbuxzIpAEQUlZFuxDxWOCGYqY79PAigI670caA2ZWJAkBiRmMahAEk1ITkFZQRDAARGQKQhCRIxsYmMu+wkIipowN6DBwEwQzIwiahmaXBgJoLsiImcVwNHBBwUAU15zBl6h4hqo1QgRMSMJiIG9Lh8xo98G4/gkwjhuJOH4NlUTIQgiQEQuMyL4aAqYgQORo3KUEUDYwBu0FrB+5v6Yfgwe/HkybOnk0zurr5rhzSblMenM3PEDkKRO+dd5vcPN/vVlTMbuqY6Pjs6yq5/+Eb7nQxdWx+aXefyUsVSijElQLhdr4+WJ2VWeEjr+3Xf7rNQrlb35Wyh9dA9rLIjpvmRFbNgONQS6w1KApZqlq1v16tXP2Sz/fTJp8W8zLLQHZp6d39Yr7Tv+7ZDyUVtUmUyuN39zdHJnIvS5RWUQVKmplmWZWWmddvvtlUu5ZOZd267adq6K5BS5GH90JgEsfnxk+lv/+Kbv/qfdw/rbr9Ps2o+LV0ZJgu3PtSCACpeKWy2Wv8080fQmxuSdx4YgyOEZGKIDAbAjORG6A0iqyGIGhiQjgYWNHAa4t21P30is2kKHtDGQR5GXfQxewk/s4dsZA/BSKaGn2nWMJ6Ff2pA+4jnekSbjpG0R74XPHrv9JEgPwpM4/5YHlfIxixRyXMUSS0m9UYRMgioDpQ4EXlmE1RDSKqjNOXYjVDgLHBRBayh79pkhr7gMEFfmM/QANQI1dAISUQVTSSNcA19bCAnAxA1eLywjFVuJujIhxHAxJQUM+cq1YgxWd8Nu43ENMSBgOCxMcsjIWcTLipGHlcIwEXIplyEUE0xKwxZ+w56n7rejAiY2WVZCMGTCSRRScaO2CkgojPyxkTeY2oxCTinxpgFKwvIS2GWpOJ4IGyboW97keTI5ajeowJSMgXVoUlDjMMIvxZwLCHToY9dDaCmGKMhBecIFUwf29gRkNmR80BWBDd1PO3i2SEe3R3uD6mOGsE1TTx08dANUQjZZUXGjhWNUADNowKgJxeYnXOUZ6qcu6wq3WIxXSyWx1V17KgwLD2ExSw/XbrFFNlcOqVup81WNKH0MWFUAlVQ0TSImDgnnEdzUSGZioJjJiKfF46DKViMcRjIFF0gdsiMnCvnUckJxCgeCZDBaIz0GsEAqAqqiqjMPGaZCSkQElKHbMJjzhAMRJL33jtgU7FEosxIBiIqwqKoikrI+FihICLkCOSRE2Q6DsU2sqX+M6koDtHYs8EotoOMKGlAVBEBI6XxBUUBSKKAgppK0jgMQ59STLFPpmqqIjJuPNzjBgNM1YDQzNJAAhCVBK0T7UXbaEMCER0iISqgoSEwoBqijTAjBQUzFCQmJGQGERsSARglZAYcleaIhAaP5HNyLCjowedudNdKfHw1VVEmBAVVpeAkClIAQ/AOgiNT1CQRU4dCWZ7nqmsYGo+cBpSe2ZvLGFhISIdkfc+Wm6GpMSAzgSp6Y2dyiIMye266obCQBqmmRbNu2107OUo+D0OWofUlTdKQDJIOYmCMLrXR+8wyjTEmTcQcxaTvzGXeFyEHCK5r90NMzgVCl0wYmVGDy8SSggVGM+vT0Mcsj0AEfd9nTAjoDPsICc37LMtjWYj2tm8HygORxzz3KfUfVgKqhqLy4x8+bB42n3168S9+9cWP3717e3+PqsF577OmV0nxcBjkMRZqCkkGMZEQHDknoykoJiACJaZQltPFcjabT6rZ0pGZSlu3qZfpdDI5noc865KqKtCgMfUpjtk6dsSOvWPvfVaWyM45h6pg5gIRAxExcUzJY0CmlNLYhJhSCt6ZASg6ckzOAyCYijA7IWP6uQ0Q3r56e3Z6stoczk6OFkz/x//dfzN3GZ1xz83rb//6dFKEVGNXTkKe+n3b1FkxGXR3efJ0u9oEJMe4ODoCX00FJR764SGn5uXpJbfF6fJiX+9//OHOZhOiHpKxcmqtPyQ/qy5Pnv365Fff/u7fAcFsObU0TMpqOpls6sP88uL9u/X9m7enxyepiifnk+lJtv7jyufzpmtCnl08ubzdvEIYjiY+LzIqy8O+W6+HZMOzT58sy/nt+8FD8eTii8+efDlbnoSi3LTt+v7m/uHVfDE9Oq7ubt9XWUVKRTkl8Il4ta33m01MNi1DVXrvM4tWr9dyPyQ8cnWXB5/5zLN3RCAJENlZOS/7bu/AjDXPJqfFSbWoXr3+5m//5j9069W//i//W0o2ncx2m1Xm2/qwHyz5Miuyotvt8+mJ966qJv3Q+Jwzj6DgfJnizpCGoZf+ELueyNDHvm+2azubFF5a6F2ZF/cP1ybWNu0wxOZQ517bBkGQzGV5mSRxKYdtvVmtT6pZXpXdYR+Cz3wRm3a/2cYuTqa+3u8ChZx93dT71DdNzcRVWYbcDalHV2ZlUe8bWaVJVRqxmaLREMVidEwi2vXtodYy+L6u86mrsiwOMZqiy4rcOwdd02VZCMB3H+6y7JkkE5EUDRDIWVnmbde4Im/bQ+y6vq4/vLvKct/U2+OTJ1k1bQ5dGuKb76/OFv74uJrOy/rQ/fTtq8nRxXJ5cn/z0+6w3e+bvu2Wi/KweQh91nZUTLnKi+u390OzPTs/7bft8mi+72LTxKyk3e7gMMMs9sMwnoI8L4ZoYAIm7WrzdPmvXj65+Lt/+L+Xi9DpYTmfF9VZtxnu725/9ctf/PD2x4uTkz72WVYs5sfbh8ZPuu9//NvJJDRyuHt4Xy0um37Iw4T2Dz5Vdv0e0+rDd395dPaZ9Xiw7ayYWUpIOBy6k6NlFvKcy1ev/7GYTRaLp4eue/78SRWKcj657eL2/Y1h8kd4erRotltPAHmOkOcMb+6uJ8zru3v1z9er+23bfvbZ55vdTT6BWrqut7/47b/59o//gOYP6wOmeLP6T//+7/+/Xzy9nGXVJ5/+alnMrra3v/z65cP378BFSf1ymd/e/nD97vUvXz5x4O+v7/ohv7+///WLr7WYvv3hw9HpljUl11q3vfr97fOj51E583TYXGNV3R3AW7pdvZ1V54S+7tPUVc5pVeXSSru++v/8299nYE6Pp0WWdn0Z8p3VAzJXod/uT07Pn3/+6f37a+kg1tv5pNjXD7P5tGJYHC1vr3f79uACJuyLKgfzgWfljN69f1sr1u3w9PJos13lLssnGXZ4e/UT7rcQ8nJ2bJlNFuXv3/3dn3xy6etVtP76+uHZ88+OqvIetotltb17D7HbrG+K/Eg6A3/oh+Fh2DWbblaWi6M5tfWLLz5ZNQf2Zlk8v1gS4+r+fjqbJfSq3cPDh7kcUwLrbD4rCaRerxz7aVkSmHZ1t9751OAgk3za7pv7+4dnL54uJ8Xdbb16d41DP8s8G1dlpSDlfHaGLivzaTGb5Lpu+r5vJcrhfrucL0zk9TdvqrI4uVi2w4AR2aTebWfLY1eyWwQzW9dtP/Tnp+cuo3dvf0xU5T5nywx4Npm/fvtGytxRmM6nt1ev4XHcUO8dAPb90LftMPQGFmOKMbVp0JELgwxggATKSdVip2oYkzoXUTPvQvDo0iA9IBIPlvosL6Q3Rs7KZQhzR3nT7xu6G3ogMTVCQXYjvhLHyphQTMH7eujr9V72G+n2hJbnzjMX3otinyIyTRzGKCIxqpiKmRPQQQYgQ6Q4dOC4yEozuF8/tF3nQ8GOh0Pdm5KoIU4mMw9yd/s+AE+nSzNdHh3tV+uubWPdVZ4ZYej63JDYF56Ch6FrmEBM67anHvshppiYXEbomPZ1PQNM6FPsJ+WSsllEL0MrqSUHzvuxr6SPSSFiHB14SGYmcejaaC1zIADv3dANah0hmUIWckTp+obLxWx+Mc1cU9+1zapt90LAKRGjgSY1II4i+6Y+my9BBAnvrt9njlKS+/uH7X4b1aDr7la7vo2WtF7vHFNRlkxekhIGBUMFNgihMKSR1Gs6JBOJHZMBOKbA5JIlFIPUgSmYEDMiI8HYgAeAaphETXX0MtAYewY1QEUwNQUjIDX9udR53H+PrptHK4QZ/pyxQbZHIC+q/VwGDv808RqMjhwEI8BxAmbCnyksBB/b1hBHdBQTqfzzMikTVSIjxyO3ltmbJe+9GKiO3F1DNCJQ0+B8eiRnP7JfnHOGrKqqCZHQDJhoHDFSAucRlNClMTmAZI8DDBgQMSMkZByloDFuZI9LaEQXRkQSsaABAzzG9wCdzx/31KpgikjGACKmOobOAJGIETywM9UkA6HPWCUmj5AjSsDbdIBZdfb0wqM021Vsmnk1ny1PCp93MpTBqSWLqe+2h9vrggxSctXseDq9e/em2W9Be+fssNuGkA3qX1+tqsL16503Ozk6mk6rereOLddNM5/mGi0Chlll0Enq01B7BPC+MaYwSV3DJXd1kxLOT87r25tudQ9DzKsKfOjbxAYpKgyS6ri+a9fJoqffvLig3QoPXQv3QTmfnSWAPg1EgOVk39RD6og95oFDeXL6whFbvQYgGFqrW6vvDLrgqn/5p39y9ep7Tal0mUU37CJaRzHWm4ZUw4QgIAqRgUMm7wwRQR+FS0IgenTq6KMuaZZAYcyPgRF6BjRVSbG3/SFdXeHZBWQOGMaabfzo84HH2MvIuTYgekQYPf76SLEecyk/Jzg/OurGfJn9/AR6dOrRIwBMDYAe+9I+Er3GMyAKShTNHHsRGdoEYNSTRqVMfKWYGSAYkyKYAKKRjSqxEYADRKJEroOAbMBjSZnXNDJWQETNQJGNSUQFQdQSmKohITkCNSJCZjUjG8OY5JhGCRgNDB353MixI1SxvgtA4jfdoQZAASBiV5Q+LzmvOARCgBQ1BQJzLrgi57wUZCMEwBQjsifnsaoCFs47JEUUGTqD8dx4dH50IkIIBmAghlGJ0ec2qbQIyVFyrjMxnyWwWPXCIElcyNRn6kJsezITxiaKiBCzL7K8qFiOrN8P+zveuX63lS4xETONxgBNAIBEjohc8BCcmrkiD1VREp4Cn9Rxc+g39bBrYj9oXaeHTRMFsrwsJ1MjbPteRfoheccEUFVVmYfM+yzzZZ7bMExm5eJoVpWTzHCSEhzaPGMupphVXB1zFgJ2rt9Bv0tdo0PLQJzATDDG1NaEGELQbJqYu5iSSN9rEjUiynP0HsmTakgCoqBoAOwDogcfFF0coKmFBsgKh6z2WIHHacREj61qqmBCROOlFIkIWUFMJKkAkyIkkR7JOQhKTBC8Q0MA1lHXd4yeRzYePFpLdUy0ATpmSiKMZkY/e/Q+SkVNNGfqGQjUlB8XFIjAAAmdQx5FfFCTFGVs2pKYhmGQmFIUSaKiHxcUYCZASCPkUc0kkSIMCcUwmnairWgUSAoqoEaPFWkOJBGpYCJAk8TIKopEiI9RSLQEqhQNE49URGQix8BOCZQZNKFzygmdgyDgCAANwGtuppoUUlIzYDAxTUpMBB6dlxAQ0OKe0QZQcU4zTnhA7RwBK6eIjISIypgwkveYOXAlKQOIioCMXVAYhxSCI6WmGarzfIB609yWKVw8rw7bbbO9502RX1wOsepuOzdIOamGtm53e/AcgmXeDzIQYVa6COiAsRXpRYZWXIRJqKYTH6jpU9eKgWoiUXUQhJG5HBJGMwUxxoe+DVLlzvWx95SbSMahJ5WYDJyZjK3ybaMDtFkwCsFynZxP72539aHNM1LFh9t+t3s3n/lnL5/n87z/7s2hSVlR9KBdv49ttEGVHvnnYphSApPgnImCUZaFIUYkN51Nj06PlovZ0elxdXTq0AisObSImmU+5AFNfT/UdTtqPSFkphqCZ8fee+cYAUNwQOQYPDtVIMdq4pxnYucLNFBLjOOrCDh2hBhFMSUEIwTnnKkRkWPH4wLhY9PBr//kq/Vmf7JYBOTt+vblxVdXH971hi3UTT203obU8O7dYdcenS0TF5v1uqmv08Rz6sSCd7NPL379H//T35fznMLi6GRRZpum3b08Pb15/48hLF9+egmnk+++/8vz4uTFybNXr95PJucvf/3V1Tc/fP/qrzNvZSh2u33T16vdB3f+Sb+LrzYPZVVab+/efF9PeFqE9+/qKMPTyyeH+9ep62/v3lIIfXt4cv5J08L67rA4PiKC3bZJh2VKR7/+zW+/evnLGU9Oj8v9fvtXf/Xv3n14e1h/OD8/jm17dLxkro4mlwSBHELmvHd3dw/bzc4xbpKP0ufFHJJWIYfMJYsIFIqZdGoMPstitBCIAjmjzbobUvsPf/jjX3z9X7kA/4//6//FZCNdC7lfXF4YaeraoV6fvLzY72tCRkdNvVXV85dfCjCTEfoi90OX0tDHviYl54u+b/Ny0vcapZ8uJs3N3vpUZhPsLQ7D2cn09v012JAXOYWEK9G23d3Xt9P3zz77PJ8UbduGkGnqD7vtomkCL/K8kNgh6dFytt1v+6EuYU7OpTicnC7cxq83u5Q6SUmk8yHjEIZdPS2LvMi6ro0DEVPXDsSW5XndNCEExz4oSurroZtNZ/vNoSjz4Iu2blyhFrJQFL0omTz/5fPf/f3v5ofFdFoBWN8OI9otdW2ZFT+++fZ0udjuHrb3m839JpvR7fXdD9++WV4eC9Csmg3dw+/+8T7k2WdfPlkenyD5H//hu0++egIMfUz90ElrlVeEJErNJhVl7aY+oWSWVu/fe58NCaA3aNK+H+alKyeTpktd142nIBRFOZ8N+9tsAkODs6NieTLthzi1whKpz1VBrf/syeWswOOFH9qt+ViV5X51P/SyWe9OZhdN306rYxTftQ9fP3u63jVsev788uH3D8vs6P/03/0f/v5v/7F9/+Ou469//afrmzsf/MDy7PKLtrGU2ipbnCyeQupR6qPnx11X36028+mxwuFwWL+6us+PUIbDrArHp2XwF//n//7/GYL7+stPjk+P+v0hcHc6y9Z3b/v92vn8s5e/fXN1/dP3fzQbbt69FqSjiav8yf/iT/7lfJnffH9bTr989+HHupWHcvtqdftnf/Yv1qurm80tuXR2UdIR3/9wM3rEOca7u9eyOL58ukjt/s3tj0+ffX40DaVa39SFnzS7Xm36/Bdf/vDqPQ/x6cwyP1/FzXZ7M3PF6vBmPnsxmy62P71hiieXp8N204iS+k8vn8dJuH33jqcXPBw/ffLi+x9vj+afaLN7//p6chziATgVlyfLb373D+fn57MCm0Od1vtAHvvcaiuKSdDJ+VdH9e2V69p8Xszns0G23stqe/eLr/+11u3+0HTAf/zj1SJ/Vn9Al2ZussyPj1cfbvCgOeX9ugUb2kGN/v9c/VeTJUl2pQtuoqpGDnUW7sGTF0MVChxNbpOZFpkr0iPzMjIv8+PmJ8zTFRkZ0n270ReNBtBgRbMqSWQG83B6qDFV3XvPg51IoG+yB884Lu7nmJrpXrrWtyDm++l0Gjj9zkfP315flpPw4PRkb8MHjz++/ObtbmiW57TddouZW/ebLMN0Ut6vVgo4r6e1QdyuJ/XECImZkG8urxXzyXIBFOKu6QkXy6NNjCd1VVTVzPHLt18h1zn207pst23T92Wolov53c0dOXd5eXk2PcVy2t3tNMKTx09W4b4o+OHRw1/8+uvdLovdcV1OJ9O7q9e7q+0szKqiqpy/adbf//73X3/7uaa4i3lWzKjPUNB6u+V5sGE4qmvn/M32zhX68PnTw3ggOUtGwCip77u27fdt13RdlJSyAI1QxzGSo8QAauwYAaNmMzXCISUFGJJ54hAcAyAj5+QYIe21t5wbc3VOiVEYlBkCO4dOYpekS5KIbLo4M5Z2v9u3+7zZatsS5CIEx877gOpJDQGIDSShRk8CZsAcM6haHrdjI8A1Q991TR5iEqTCkEUGEWEmVR36oa5Su98aKFVTcAieupT2XS8mwxCT5H3X1WVRl342DcwEqEhgRArUD6mPSTV6psBIaKpKCGDUxxzKma+nwnnX3pSm3gd2hZhmycEXQD4JeXQxJs4guXfEzJQyiUDKwuzE2DTFoUWgPrWIRIDD6qpDndSPJqcPZ/jw5tWLzf1lXbDkmFOXRRDJE7ab6/20mM8nzpXdZuvm06HvV31rZtlk1zRi2XmXM+Ysznsih+xB1JU1s7MkYAJkLgTJydSIPBkbgKSeoBAem3DATDRFJSZjMxphD0geQRkID0W+SVRVBTQTKI8NY0hAgGaII7dJxmTZWIs2ZsnMzEwOI/P7Tu9D9GyE974HF6nJd3Gdw2gNJibj0G1jN5AeysDH0nBkJKRRh6IRVf1em2ICsDHpBZJFTdgHI+DRnGFkpMgMlgkQgZhIzBwyEJra2J0maMF7URQVMAQiZEN2CkCEoEo2WgVQTXmc7gkBCZ0/8JbARq7vmEwyV4IpghkocRBR79yY9AQTQlIQVfGMauP2f+wo9BlMMiCxRzJQlZzBkFxgMsvJAIizpnvd3ZX80dMPPHG/vTeUclJVVWWW+75zVYi5o+AIcL+9m9YudbDpm6OTabO/b7Z37X5dhJCH9vzB/Isvbxrtt237tDqalj5u9n5Wbjc3tS9THIiw3Q2M/vT5hQPsNrvUtOItba6q2SI6gukMrE/7tWZNbYx5G7sGiKTj/X5bT+vcDVF0Us+4rDXmfTvEZDeDfXm1+hcfPenNYpc27ctzMZ5U0/k89q34avnwSI8jAgNCjqpGEbR8cOFqz2DezPdd++bbuFsTF0fPfxyKqU+9ttvCbGi2y9KlcvQCcCgX7CrkCkDRSJOggQog8+EqJEBDk4xIBmgENkYoTUHAIKH3YGJdNIlps3NCys7G6q0xnQho3xGGxhSRGo5Er//BPPQdk2jkkv6T0sCDPnRwH9kBtQIHvMr7PJqZHaSpA98aRs1ppEqLKTiPgJo1JzMAUkbnGNSXQITJTA0kCRIRoEP0CKA2RAHBzIUvPHifwWHWUVACVdVxUTKN3YZmKgqqIoqKqGnMPeTDkjRmNgBVYxvhvKCqYEZISMHIGLkwyMwGzsb2qaKkyYLrmS9KRAITH2owc86Rc+AckkMk0MSOjRC8k6r2HMoiFN5b6mzYax6IS3MVFjWaYe4JxECzr9B5MDUwLIs8nSV2SVXFgAnIuVBVxC4lQHKhJDNUoKKXrmlFU1WDmQtFvTipJovCOcpxWL+bbG/6u8vY9RoBhVLszQajjAAueB98qCfCDlwIkwmVvp7OuXBLpTgM+y7uNvuui82+37cZmcuyrKYzIxq6vutT0yVV9M6VZbGYT6pqWhbBB4eGRVWiSAWGu9Z3HU0AIUtKGLNXIldSqLFcuLT3Q2vdLhuaCGiylK1KmRnqEiYLMW27fcoyKGVCG4PFBsmQDUwOV5rq2CXpxhRjRuoF0CCJIAKzY/ZZEo61XaZqAISEqFkFRl6P4uEkQBQ0G4COZhtFgT5hcE5UGHH0sSgCoGP0o9RI8P764RFnBwjANC4g1f9dA5p0WVHZETAAk4KBmpk5YSDMPpNjQFBTFc0xS9aURVJOKZmY5Kx5rIRTAENmMCV0CGACqIQJMSH0okO0LoGoDDLKv5oiMQGhqagYqogOhICAKqOxKDEiGIOiKrL3plnVNBsCk3OCRk6AFQMmpwjggEyViQGcqpFjM1MSBUZHozCHmjCSV5IhakQkBIcmyY07iARVMR1Ic2w49WSYRYi9CSASGhMwOdYqWEfWJ04DESMzmBAFJq85gbJnN/TRTYMp9rt+fQOumAxNC7fXNK9d7cKkjvsOHZWLWd/HmFLO0bmSfADLVVlaGoIDh77bR4KQeus098NuPnXVrB6giZ2iD6CYxCgalY4KH5lUwTuOCfZdrkpyiDIMiOQKz4Fyb5IBkCbTMGgXkvX9oIrZlAGKk9r6FiB0Q6eiXn2/jqtte73efvjh8R/+wWc/+4cvmr4NVUVhboAp5kHEe+eRh5SIKQ5DVfuuHxA8qCOAIoTZfHZ8erKYz5fHZ+Vs4RDNxBeVaRrb9Po+NvtWs9h7BykxArJzXFQlO3aOHXsFcERIFgrvXBATwnGvg569Qh7vjyknMEEENvBFUDMEdt6rgkoGIhPFgzILALDdpsTWDo3x0cWHH97t1oIyO5pevv7WlwqeT5bLpk25RHDF0EUTnS/PuihcFXW9uHm3mxT6wfM/3HU3N/tVLLrNttN91kzL+eLJ00dfv7jfrW9/8pNP1m/v97umrMpd17zZvOvKQStetfvsQDor5rPz58+63XAfm8ipT23X4PknjyG38+mif61O/equG3YoWVsZjqtiVs/Xt/109vT04kMOvFvdP33y4dH0+PHy6Sef/hERXL16++XPvrx+98X8wexHPzy+fxfOLh5cvtkAo+di9nC+3XcsaJL7Zl1iXDybO4SuFcTA1aQIk2k147KqJ2W7GfohI0Jd1qLgGJkxxWEYpNvsk/YfP/2d3Xb1xbd/18W1atdtd9VsUVaTUJXv7m7Pzy/W621dFAnTdFHJfqDqaDY/efHiV2pAjF3XO3JqOefOuzKnFnJylc+WYtqanRhYKCab+z3NFlPnh2Hfds3suMzRsthiNh9yAyCrzepsGGbzqUvJVFwRstnV9dXjYsLMPtQp7ZJGV7Ik3ey2ZVkXLux2QzWdsw+7/fp+1WInTT9M54uqmt+v2+Oj2mHoNAUu0Lssgw29I5TYB19UwWU1iXHf7J1zzW5Xz6CovCF0w4AOPLlWLCVZHp/95ssvf/SDH3osNMWyLBDt/mb96IMP3DdOoxwfLbrttqiKfXsDBV5f3UoRF8vF3/y3vyfHMYMBDmlH+ALFN7v2dv3V+fk8FDav/X67Xe+SxmExW0zLkg08YyhcNZvs3t12Q9tAP3Wz6VF5u1+3TS/ZVGmIaVwF7BgRMFqOqaqK3fqrL2z3/Ac/GeL95utXXtLJg/lyupQh7bp9kny7fots0Mv9tv344x9dPDz+8z//TxcffyAK7W4POKSmszQgDfvbSypnT5///m+//aozv9r7xenyt2+v/X4fKf3gD3/495//9VF1sZhVxxfnyDy0Glx1fHR2e3Xz05/+zt/99Z9tum6vzS/evfxX3/vnyfT23XXKgrl99ODo+PTs0aOz3c3um1/98qMfn7+9Wm0G+Df/8t///B9evH7x7ur6l9Hr7//Rn8rw9qd/8Aewa3/xD1+F+awy8JA///rn5yezQbqdDh9+9uH95bdX12/uzZ2cLDe3q3N3ujx9sNtsnj24wIePvn799YcffLp68YuUVvWievDZ97/6q79aTNxsvoRU/unv/uQv/vavtzfXud0sPJlgI/cXjybLk49ocBcXn4rFt3c3yrzpd8FwOl1WmFe3669f/aogqxfL6WKx13g6qe4znl0cfbVZV48X7gSBi+Zt/viTH/fgzIbV+sYoHp2coggO/b/7P/wf/+y//E29nHsPlMMy1F/ev66PAVKc1nF6Pl1vV9Rtv373qz/503//zeXdRx//cU47duFXv/wLPn4y4cizclj3P/jg05/9/G+32D758GK/vwzebq/f7Pu+61M1O17n5nazfnjyvFqe7NeJfWHkh5y5CA+en67vrnNO6yY//fDTdrN+/PTj7frmm3ffPD17aMGffHgeh2Z1e0fd5vzJ6TCkzXZ9u78ti3Ny9uXrr3fdrqrZkCh4cead79PQ3DYgNJ+FUooBUkKt5uWyXrbdlqtw9uHF3/7V3wDq6dlitd5LTivIdTU5PneIHtC1fS7q6ZvrNylpHPrtsDtenpgNPfFdG89OiiJQTM3s+OzW9lwu9s1BMO37GIeU4iBkEuP9erVer0WToSGhd2GIfUAaVRF2jtgze0DCPBAhIYnkaAfyBIlFzDKMZhEURVEg6lXWQIhIqurRE3tGZxRjElWry6JP0HSbzWo99E2QzKKOgbEwUAUYZBARQwdGBuS4QMiqoqN8gAoIxGxIZiBqluN4ZuPY5xxzSg7QIXVZDAxBd5vNfDafFgVJ7NqElkNVbLq0GrQiXsxmZenBCNk1XW8EyL7rO8McY1JDQVeQ8xQ0t97xpK7UAF1VVcckkuPOIYJnACAMzIEdIwE7ckgxR0vJc0WAWfOQo5mqKfoqmwkqgKpGUyi56PdNNasYtb+7vO13bracLh9TeUJus9vdmIHzPvWDGFbVJOa4W91Lis6FjLje7yVFMbm5eVtW1frmnr0LhR968KFgXyRR58xPpkAUh96rqQoCIzpAAzYZREfekBkgG3qA3lSISaUHZTAm8ISO8MD5QEQwZVDKpPhdaf0o4iQDI2RABZMxcqKqeFCLxjzaIWmmZog0lnXg+xzNe9fEwVmBQO+roMa2MgKg0UikoGg2ii+IgEAKOp4K0+jKUdVx/D6M2GgKzCgqetCuWCWjoWkmRCNWdIDI6C2nMTHjiMfkmoAQkQGgKY1QD/bjcALGyJgNyBQZxxH5sMu391htGzMWNF7P428EhkjeyCEYSBwzTUBjNhDVjIDMBNUIkJBH0o1IGoU0BgZ3mIoFDgYVRAYEMiZEUWMVrvB73/v46cnp+uYSME7m8/ny2Axi31NZWiYEnFRljr037rdNynK6OJ/Njm9v3zqiBw+faJabNw0Sg8DQ7x89PJ1MSmaaLhZ9J7OyLEn32625AC6U87ll0a6Htp8v5qEottcrK14HLrt2byJJi1AXqkPaXc3m05jSfD6BbJpgMV103c7lHjKU5KaOTkrcd6ltup+/eP304vjhySwn09uvrT8emm0xP4tGhDUFcwgA4Jz3HgEkqaRsYhnYAdX18XPusqUsmdAELBRuUQBVi+Oc4qSubTyUFSNgSwkIzRQZ8eCDQ0A2FUQY0x5jgvB9i9/ICQVQQBEDI0DWXtIutzux5Uhp9zwug4MEdBB9RsDMQRA9SD32Xid6zyf6JySj95ak73Sk93CikfwFI3BnNJsZKBCgEb4/QmbmfOB6EbEbvUlgllMCAZUcSmMNGIRNVYQVRqwuMpupJMhSqClWpbgDOReyISiKHdYiohro6C0yFZExAA1qaEqjZZANAJnYQAlJQdWiGUgWM2BC5xBEgNmAjAMWtVcAFCJHRcXVBH1wPjAR4uguNGQGHnE5YwdDNhEiDJMaK+eDd6E0JOl32DDqAGBUzI09aMYcLQ/GgEWlfmrsjSE7HrwXRk0pqyA5ZFY1N6kcTcCcoXNmYJqDS6QKBoEgZz+blotZPVk4P4dkPDu2dlsvH6V2F7ddjkPX7mK/ZZKiCkVRcVn6SWWhVHBcFL6aYCjI+0A0BZvFYbFvhn6QnKMYArHz5WSKzBJT1wx9NAUyBQSaz6cuVFVVqqpzhfRZ+z4MvUFEQ+8cqFlW3bU4adEFLpaAhoUjLsAFFoUUTRNVAJIzAs4mWNUAeTrnLuZGQnLejFQMzbISEImImqhKStlU0QiRJVnM2sfMBN6rQ5CY2SkTEQoSGpgaqIz5K0N044LKIipGyBlB86gbArEzU0ecNCXBQMQkBuS805y8YzMiDnBAUZuKETOAgSkRpSgI9N0i+s5VNBA5czya7ogPGphkAwJXOGABM2JQEVMc9kOSrGpxiJoFDE1lPJc4qLDMCEwGpkhRtAfpMw5ZhwFStiyaR4D8GFq1w+oDHY8+kByAHUBeqGKGKuQK8igoZtlMDElUIGU0w2jswCmTJwQhZEAySSAITjkAe1YzAQNAETA0ZheKCpXIIAuB84zMFlklC0NGYzLrICcGT0hJZLwVmSibK9hHFXN5CEwZLCbm0phQWkREJScgKSswDqYOiQ3MtWstCq7KoH3Mt7fl2UObTrTpoes0NvPjY8l6f3W3b9tyNhYvQhGqmFqHWFRGaJa5F9vvcwviiRfHp+16L31K2TA5iOgYw6ROKZIII2aTobM9p+q44qBmApgdY29ghkBgOixm2MQct11qBnFhUK1mrpxNNk2ymFETlyMo09Z36bfp3dHx9Mc/+uyrb69vd9ERA/ssIFkIUcmGoa+nBRB2SRQJFPqUiyLM5vP5YjadTpbHR4vlgkLFiDlHMx36NPRRRYaml5wQMeUMAN5574Pz5IvAzID43oiIRVEigmNngGjs2GWJjtlACJF9YQbMTlUN1VgJLOV0qCh2bORUdbztf4evu1nf/uhf/OjLv/t1IsaqWqf96fOHbbeZHDNy2WfXbamoljrJ2wzABYRpgjhE5YJd6kKor7Z3NKk//uCj/qvfzAKfnldwmq/v1u3NjuhKyFIatm358u3VfHkWMUbJ3/z210/OTyaLavnw+MHJkzdffb3P7Ytvf/vhyYOTk8n06OzodPbrX7zcvbnKKXdhOD1e/tEHn/3Vz96Bm6tIl+Om6S6quXH1wfd+sljUrbRf/9YGjefPpz/+8Oi3v/qvL169jLvWaeSQuzanuLbMdzdCRta7zbobilqpPLp4vFu9886zQLbhaHnmHzxwPqrs9k2zv70rwhFUihSIcD4/W04vYmpyToiAKJD7BJHItbv1evN6t7vzXmKXy1l4+tmnk6PjJL4bhuu7tyZWOGKEbpfd4D75ye95RokdOZSYFLILjoJTBSjYUlLIOYGoGlLXNVmTWtzth7NltZgvs/TkUjYQzJv1tqzLfrM7Op1sd/fNbl+XMwKfYk/o6+lkdX/3UB8W9VTF6tlsv70jkkldxF5Tm6dHdTbIOSVJk/msrIvV/bpvhp1spUVf8n63Dw5jioFdUYSmTwjEoCmLQlYDV1Qm0Hf7srCyLNrdqizKerFMXavZClda4G0Dx+dnXdu1283R6Vk5KUWyZ98Pu7Zpnn3wyV/92X/50acfM1dtu56df/zLP/9fiVmu49X1O3KUdYhizOHqbhOHPF/M+hwVyl3sddcVznVdJxKGvg++Lieuz0Nq+hT7fiiwDhr7qireXV89eHgx5ByHgTF4550rxlWQ09Dc9ZTy+t3u/OQ8Dzmb3G63RUWL+aN6frxvoqqcnrjVXbfqOqiqs4uTbtcOhd3s7tN186Of/miQaUL+8Q/ONvs3/Wa/ue8WR4uyOhObfv3V668vf/2jP/3x6uX97e7qez/53vCqWe121fJ83b+Yz4N4ulxfO6w+/vijty9eXb/5Zn23pSBZ9tPTELdwcTR59+ZVv24Xs5N9Gmoenj57Yq7oByrC6U/+8H+qHwhMG3cXujR98vyTp+fn/+k/9lKuJ5h217vj+MHrm6/ns7P64nGjLZ+c1VmffvLk7V//7O3rb88/eT6Izf3JycOLLeMRwvXrrq7rTz9+Kq34cvGrL/9+eXxU9QkG/mc/+elf/cN/ePjhpz//2T88fFYF7F7Gb6COTbNFtONnD3Ki9ttXPj4A5cv15Wz5g9TehGrrCp5PLzYxX23u60nZ1eWu6x4enV9u7mYti+pvrn7x+IMPr6+/NWua2MyqC71L628uv6x/UVW0PF7et293kcJiOa8m22/aX/7281V/05sN72yJxyjL3/vJP7/ZfrNv84CuH3Stm4/PnyFf311efXq6vLv95XXf/ot//qez28/dYtbc3X/+8gUSvHid6iNn4K/WL29XL/qTj6E8Ip6fnc0Gro6fnci7L69uvpkvJ7//z7737u12Cct+fb9Z94Hw/vb26PjR8ZQ3d3f7ZlVMH6ZeHjx4VtRu299hbpo+Lk+WV7/59dHRYjdsJsUiSNGsu6KsU6YwO5akNilumvXq9v784omrA2WJHRi5clrt7rdw0zqTNvhOdLORt/eXZ8dV2g3VRJvYz0+Or69vKNQaYZPuvTttmuGDx99/cfVlVQYlYOxgQnfXqwePnz978rhv+rpYxuje3e0kwdXlZRUOO6K263NKQx+jxGa/2+62MhIskoai8N7NJlONuR96Q0XgKFKSImLhw+gKIfAE5sgxkXeIYEROwWcjVAPNmiMYhBByip6dCwUWAcgXECFz7LXZdevrb5OKSmaQUHgiqIuyKgKBOVQDdC5EyUmSR0DQg2MFwCyjAQOLgUhUIB/qXjKxK0IJpirgPfdDlJxHcmxMQkgpKxjmlIqCHYEk2W42/TDUVV155x2DYsqJCcEXXbsXjYCc8yCI6AowFY1ZOnJ1MlURyi6lwXTQ3GHhlbmPWvgFESIFdl4BMoDzBROpSIy96ZBiL6aWzVHKZuWkxDDtBYZm3a52IZS7ZlsUnl1Y313nq3fzk02YHZWT5X71TqUnZ+A0ttkVXnLXtwaWj04fJBjdytRutvtu70OBY6198KGskREARKQgUlMSHZodFrVzoKaih54WADBVMgzoIEfiAkzQDDQzmclAypYNGYE9jCBWIEQERO+8SkZSy1E1AygjZTADHcu9QdXQRv3DQE2NiAFwbIuH76buA2zFAMzU3s/hcJA+xqjaGJAcR+XRAvSd7WIcpPFgRoBDjMAA7bveWAAgBEIyMGIPIwAICPVgbhoPj1FhLNdBGh0jo/NHwBQJVPMIV1JDYEQAMqBDUw4hotn7LuPRMmQKBswOiA6VbiP+FhGZDcUUkZwoICFxMFIgBDWADJbJwIAADBnQcOS6ABizB0A5UE0BQMCiaHTOA0hSQzNTdcGDyJTrx1yWg21/9t8649n5ReCyaZtiUpR1KKtQlkXOtrq+6vt9Uflq6c4Xp0Uo1re3m7urKgQEFLTpyals948eLvzNzQSHs9m82WyH/TBbHu02m1VMCFwX1YPHj6oySF639/fdpnVF6KKSAnz9ddftnA2zs/NQPUgSlssn+yvZ377d7tLt7c2irAp2XVyTM4zqM+eME/BMgkHaLM3eXrx6u751p1MPkuT2jpn54dNi9kDA5axD7gCNKbAP7LgMwRUBwVmXS+WgAQ3Ysg49SAviHTlmZ4ABnA1ipsSGRiij/wdAMyIj8yh3GsIhWEhsOl5pIzfCgAARgdiAQIERLSXnB23u5ebSnx3hpEDi97bIESqEB5o1vleE3iOo7WAEGh1I36lCdLj4/wno+iAd4XcL4SDCAhLYaL80AzH8Ti8FZI8gbJA1m45OPBwRLZhVwJJQk5IrSE0U1TmnYJkQ2SU1y4YK6ACQgMemKTjIvgBqwkQHgwWiSlaF8d0ZHU9jq5ORqeJYm/DeFSUKpqo5CztWRTVzZpYzgKWYwJCKkhjZF+gLCgUhjacbTMyODq2IY9xZxfKAWVCBwFGofbWkUBgFNUUKKOJMCAwoADCmAVUEhDQ6S8Kcy6k6Ak+CoCACgx3Sg+Ma9o4dcwDyJimnbMAcqgAMdUlodRHKqvSFQ/YUCismUC2LYqHdNk1W7XaTgodUhCpU80k5mYIr0DkLnrhg55gZXTBy7BxqYjOuuokKAWZRRCL25AOgMxEQiX3q03iVUAjeFNh7BERDEotb1X3HKWlK6BGAIYsbMqx3ofQ0qcWxIBoX7BUpkXPIiECQOvZeisKcI4yg5jkUMEFfgpFkc+DUUNHGiJiiiYhkRVWUnHPuehsyJjUxZTXHHhFNlAksG41NZQAIoAZENhrLEN53FyjgwezMYDjStFU1gqkYQkYkzIJMKlIEj5yRyADIMSBJjuSCOm+g9E+k13+UinQQBciYkNAx5+/yZwTIhEKK4gNLUs2WU05t6tNgZjnnQ7sD2HiYBmZsCDouWLMuQm80gLZRe8lDIlPNagD8HtmFxGAIoMSsmgHYgEEFFEQFkAGUCMkBIIgkMAHLKgcyGo4O3IwmCg6JVEiIHThGQyDSXhWV2MihobEZMeQcBTNiBlMKBCxiESWraIrAfmnoNCdIiuQUiR0Bu/fPU1QFJLM8IFUZrHROTUwTWHbqLQ8GVvgQJU5oIe1WUWkSouCkKDSnIYne77HqmIswne73bVlWolJSsYiL++t7G8wQ0HIRKl9NVLKRSM7OQ+kcAqdOWhnIQkkuUQvOnA+aMcVYGTsOfS/ZFAjatp/4qmnjtCqRdJBIxEwgoGigpgp5UlDrcN8kpzQMQ7TAwtOiSlXq+6bXNEIZHHC/lbfbu9TKhw8fFnjTdGnXrMgzUYgphYDOUeHYMcG4Z0Uop/ViOX/w6MFisVgeLaezGbFjHG+aIjn1fZeGqCJZkgGICjExsffBB+8ceR+YGQm9czBSIpgJsfBlVhEVAyNico4IVDKCITNBRYQ598bKjJxYFRQw6wh6VO/CuKseV0HJ7vUvvy3FLSq3f/1OCFt39fKbX1TTRYZsbvp29eLk5HFw/uW7y+PpUeU9O7fZbrquvzh7UM+KcuLfba72qf7046exub+92ojw6YMn1+vmtmuKRfXm5c2Olw+eflzNZr5svvn82x/+4Nn6ao3q9+82qzfv+mbnp8tlfXx6fnb17WVa581+XVc0Q9+02gxx263m3eTDi4dtpISAa5lNAkvKaf/yq/91v1uDK6b1fBj4V7/av/ry73MPXFcXv3vx8u+/Xr9bP3TPimriMmyvbgGwaTahLO7Wqy7K5f3XVTH98OknHz77bLVdp17X635Il8tFeX728PnjiSVKCvsml2WoZ0dRkqTELM7zZrNvh76o6/W6WW1XcRiGYUj7BkWM8PzRc0SXZGjTfn52tLtd15OJxtQM7uzxx8Wk/vK3f9/ubieTUlW8974oFUB00OzA1BVORF3woa6i6KSqTZWrQBVt9/d9zFlThYwRcqflvGopuIrStrm8vjk+f+qL2nQYhlhWvijddn1XFLUCMuJ0uohDkzOQQJa43a1CWbvAs1m92+xdWR4dH6Pdd82wj/uTapklF6HwHIaupaoiM9HIxKEowFCTmAp7V2glMjADEe/32yhDKAqIgwU5XS67Lg86LE7m6822ni+JqN91ZcXLk5PV/fXZw4dhUr+5ugrT+ujR468vLxenp+v1XTMkM/XeT6uaQu72XTmpzh4cNfvBexz6pt1k1uwKriufBDCUfc657wewR4sltPH19eXZWe3BmtX2yZPzy6u71PeRuJoWRVm3TXsYD0Dny/nd2613LvWR3JR8OWFauEmx5Lv1NZT19Xq7Gyjp8M2br58+f5rjkPp0cX5myW7ub47m9aSw3371Mj9YlsV00Pb87PHpo8+2N+sf/9EPbl6+BtAvv/lquG4925vXX/HOaubf/NV/u3zx5aOjSdJis+1Wq2+ECZJ+cP6scHd/97d/gdztt7Hvu8Jo5uvo5Tbv//W//Jf/8B/+P88eHd++u+251l7vtP9s+tw39IPji8tf//r7P/psSs1PPvrALfpB5P/yP//fajq6Xm0m86O7t+92zd1k6gvntu9ujsN0fvIg9vsnz59dvn793/7+P/7Jv/23XWPH5+c37ebBo0e//fnny6r+vT/80+XZpwrv+nb/qy++sHazurp+cPHBcj5b37754svfLouHCOVnn5588fXP/+D3/rm1OnXV3eb6Yv5A1+3V9avl6eTu/m17szs5v0Bvq6u3XFfPnjwxtt3tdonTchIcwd3N9dtv3znvrc93v3r38GT+O58+aftts5f71dpLwZl2q7YaoIn963UzOZ/02+bs+On6i9dlWTXrV7vVq6HPD8rw9IOLN19e9Tp8/L3fK+OAhqEIeId6vaZuYoWGcjZ4+/CHP7r61c+lb1tZhSJRjqD7evrBg/OPoFvrfnv54rbZ3zvk7dX9pcOb603OlrqduWK/704fPzmbP7h++8ZEH5yeTYu4atu+HWZh6SSvv3g5PTrt87DZbU67oaomt9dvjFx2Lqf2XRMfnM4vb75+Nvm09J6PF1l2vWBdFlH2qztKrLPlxIt4cm+u3k2nU0zWb5o9qAd3e7lT8JjJm67fvQ1F9ejBCWVnUV9+88uu7+v5Ue7Sk9MTKgutZqUNRVE1+5wlHz1YqpjXo+3NlukArutTTP2AjLttG2NSRM/O1It2OeYHi6PZZObZvbq6NM2CSoyOHZIzsxxFIBFzICIER8CEjhwzA2ESUMtZlQiZKOVMwOw8s1MFQC2LAqXs+ni1umt7Q7TC0SSEmpCZCgayyMimybtKBFBjzc7EFIQAGUFBPREaefCDDoVjBWZHZhYCoeWYIoKpoZillJKIAjRDmtYTEW32e8uu8DUA7bY77YbTSX16sgiY+rZVQUNipCFlyFqHcoji0YvmAkaXvIxNWLnvzND5ClJvIADU9V0QclxG2jkNhIo0R+fM0ggzMQAkVCHylaUhxkZyK5LRpsVkSa5k9n2+Ns1VWa+vVuXRnArQFK++/nJ2fuGCT0OSHIfYEmHhJ3HI5EI2icMQ45AVmVjBNqudJdBBckocnCGPnWXOuT72xBRjr0lAwNiTDyLqAYBQUkYAJBRLAgQKIJFVGVlzz96ZZI09OzFUpBKdJ2IwMslsiog0Ekogf0dPGeNmI1VnnHLNzFBGa/XomhjrneBQsDQqOPpdHO09v+c7Rsv7YRgPfJ8D0gQOaTc9FEKNATEcsdbfbab/cUo2MBUkAwQmzmA8wl+UABnGQBEoHAQiIHZsJqYINO7/idjIm+jor7KcxmwSIo1/G3lEIsORKwMS6fBrjsDu0VVEo+KAxOgcgAMZf7XxHTCCf1TLDv4sIlFBI8QCTMCMyank8Q9rHkNotZkxjYMXKsCBC5W1Bth99Spb548eMBbNbofm69IfFWXh3bs3r7u+Vxjm80URamQIDm/efPntr74qCHU2b3atq+qimFePZuHe1ZVDwLhr0r6ZlsV+fT9EKWez07Oz+Xzex36/ere+eUfedy1UwkRRur6nTGyKun93BdgMpitXBZD7++buLu0GXLE+XEw8k2rkDEjBBTef1Zr6Y6u3Q75tBhXYdRbbobA8n/tpiX6zLb2bLGbiAcGMzHJDcU9Q9H1EMI1a5ELbIWdHCqbqyTERF4GNTJUOjBPQkcgzapNICIDox7fQVAFH1jkbgKkikZkCMhACHK7qkcplqhmAvMuaQRNcvkAX+cFDOj2Xaa0wlt0dgpiHCxzxvTR0MCt9J4GC2RjX/E5X+s5d9N7KB98Rjg5120Cg+v5/v9cX9b36OpLAzEa613haLaJZ0dRkyJIwDcqDMSN4IBMgYGRJCYHNkJ1TSYamoqZKxGaoaqJKaKpCY6RITUSjZDB8rweBGSChqpkpEokoAYEJJDMVVTUwVmVmMxhNiCYiOQOaIbErjD2yNyRAylnMOWIao6cKoJLMMiGiKhGCY3YTckBFAVwYssVBnZTTI9XImHRsLggBqQRwwGAawXqiUtEJOkNUGysTkT0LABvgCK5HJERlb8CiFpwz13oqHFkdCg8BcfQ8ETGiAnhkKEEXBXNZlWNRmJ/PXVUTe3I+qnEoC++JUNXGVC1q0ixcBUJAQybwnk2V2KuRZmPGQszHBAiW0tjixQQ5DmiEklEEDcHIlZOcB1DVQQiEhijrW6oDTqfgEREtC2IeIU1IY+mfIBkQAxRq2Sw5RwoGaM4TGSKQgjAjoUtZAUd8blIZRCkM3EXXZdVMJEjjs9aN/HY2ABMlREQUVQVAOvRakgUDReYR6j9G28anhpqJWIQMauNrESGmODjngx8/Fec8MSNzRkEFdMyMB2/gP5WKTA5+WgMTEjMDckhkRCg65J6cYSJgJzHnLJIsR1EVMx2p7HTAzqlznkZuXRSXhXvFLkuTdVBIhtnQgIBMZaxvY0TTBORGiCPISLHW8XjE1JDAkMwxhACImBg0g4zYPDUVeA81G+HK7LypiRqqkgUd4WQK4Mggm6lBAgYDBTRwZEyG6jCzqVPLmcg8slNVEjJFQMwSfVGDQwAEI0bOimLgnRMmdhny+Ak6REBwDoBMVJAMpRt8CBkzMnexn8C0Kst+vZE+ynpVHC1pWdV2nDYbyZLivj4tMyz3m31OKWf0pYlIWRXoLQ0pd72lVPsKp9NGgDT5Auti0jR9028n86UvmMthWdSNWozJBxdNh6wpcYwYPMe4937mAwmwEHEqUhc15oqpk5hS1CHt+ryNHj0RM7hCIA9xp4m8K0JV7ff26u1dF7vzo9nxsnp1s9400Gb0ZQFm9aQoiiAp5qzsvA/u6HR5enpyfHwyXyxny2NflSMmKHbN0Hddsx/6zkTiEMEMmVQMkX0IVVWyc2jKyM4FIgjeG4xBYDRTMSF2QAhmTIwIhMgcACmlRKOi7irJ0SyXZSVZshgCMLHyuJkBcmFcBaHyoGFzv/HUCygdPzEXp5Ml+7J0KXb33vJRNWljPlueUUr7zfV8ecwKj04eocgw7Pb92rj84qvfBs0lDg8fXsQdUu3cQKS0vr5bOG+7HpeTq9vbWQ2Lybzb9OtVCxTqotjvmwfHJ8XxA8R89eIlunq/28bYTo7PPvz4DK8a16VK+7ff/iZKmM4gdo3sd1pfCJFz3OnAk1IBpIhxSPs2iRRsRFF/+cvfIGJ5Pm9CCstie7Uqprzb3w02KRwk2zft3gZsd0XXDvJZNV8cL4/r45PM7umQ0m61aXfZcTE5Pnv20eO+udmtdjR0jJo1thIlJUu5b61b7btmQ25AUldAamw2e3529lFqNLV69flXJ8ceJkePLy4+//XfXnz0Lx5/+OTm6pubyxeLk+OUxXn2rih4OmiHaqKJ0IoqDLtYOeoCgkFRVpalrHl+Nt2srzQbSurbyI4RXO0nfaiQ/STsV+9e4Q9/AOTKsh66lhkRdN+slvminM1Uc8ElA/ZNr5SLSd22mwzS584fLaeTqu36Pg0nF2e7TbO+3oGpqnZ9P6mmzX5b1oUPRbu990XluBDpTbJENgLyJID9sJ/OZkba7LZEs6KYxr7ZS57Nqv1d74j7Xbvfrp89e9LsVzkZMaXYMMgnHzz9q//0Z8dnF29vbu/eXQ/tZui6elrGCNtdn5KZ6tAPXdamjyJc1tXji6PV5TWJijhAqCcTi9qlNJ/VqlAytrvd1VUTdf7s4bJdNZti4zyXg1OQvl8DlJIOAbQiVP2Q1XgyP1nMl1+/vFTvHi4X3X7HDGVVlsfV68tX272VFT19dBK73XV3H9v4bDG5390Wk7Jttrv9enZUrW8vu6H56affe335Ou+EFb31X3zx15MHjx4+OP3m3YsmdZuby3Kop4tl17SL2VGzu/n25ZXahOrw33/5H3//p3/6ixd/8dsvf/Pkg6f3l5er3fDw4+f+3d3u7bskaSfD57/8a+f9z/773z1//uTpRfXym6unF49scKVYRc2n33vctbvYZl8Xv/71F4v5+eLB7G37cv70vJWt7lYfPZknA19Nt/erh8VifbN+fff27PefVOH0D7//x/G2w54u13fRl7/51asC5sN9d4rz5v7eKyI672fz8gFY9N5xHuagV+v18cOLh5PJu6+/OKLim69+dnF8dvXtV2U1+fSHv/+Xf/cfi3mhteyHKx/cvnmHgqVXDnZ7+WoxnzyoZ3G/93XRCzlJi0cLVC52k2EY6klxd7VPYNPjaQX+dtUFT5/94NMXP/8ZsJ+f1U2UNnavby4fPTqi3uLQLeaTN9s3/c63Wyuwb7pXZZi2Q7q9bj746Eldxtdf/cp59+Mffvz5N192N+s3X/91360ms8Xj5QfXdy+fnn28vVvFzS/7/bdBcTKZN61Gy+J9zunq5nZ3vz05Oz3/4OE339yKUhFO3r5eOa4ePn7SbN/dv32XPQ/NZpXjfjs8ODvf5z748oPv/ejq9ub04VMyDHX94LS8/Pb+6dFFxRarSUiSsyH59Wrjp/XiZNHe3SVBLsu+t6qevHj1JYnNz6c5N0fVSTkLaZ+X1YyA7q7389mFg912u+06c4AC2aFbLh9OC2KF2GF7t3W+dNF3+5ZzCeoS6iDD8YNz7YTgsAqubzeiNjRdUTI556Gsy3IYkgIWIYSyJMehLE9PT0Xy3eYeNAEoghGzOFJR7x2N9TOICgrEqpmAicwEEdm7AggdM4yN5OOEgthnbQboB02K5ogJ62mYBfSATIYqhOhdMGRTBdPSsaoIqlhPGJiRyBsoGmRNhQNkjsYxNmM5j2gywD6mftzK5YhEkiWq9jFCTvOJFwNCvl9v3l1fn0xPl7M6OEO16aTyznVJk7L10VdFlmgq3jnHzvvCUm+CDB6B1RSY68mUUVM3GEhMg6OJsibtUDUNUaQjQuecERt7MCcpiaia5BhFYo5tjkOKzbDfY7Wc1Bf95i4Pw6bLZT3Zbbahpjy0grjbvJnNl2aaUurbBgFyQaEoPHuTwVCb9dqKaVXVm/12v9uGukwpxhQrnlXTmSloysSMjpmdWYp9E0IxxDZUhZrSwQcPjGiAWTSp+hAMVHQoCEEATcxUsxECMlkcNA/oHAKZCACYAhgxOSOHEAkgm45lK++1nkMsxsBglGJgzISNDWgHM8L7i2UcKRVGRNFBP3ofTHvv8T9MyYZjzMfGHqjxpNYUAJjYwMzUgCQLfleRTODQGWTNCQCZHCAQ8Wj3MX0vSR2m+JElZAiGzoMBj+8YsiKaCIIBMSAjsZmYCaggEYAiMuEIc3ffiWIICocRA0cpDWwUIA61zKACaAhMY3/zoTGdTAXBGJ2BEbosioYGyoiAqpKRycyNx4RmxhQMDQEs50CsarGPkAXKYr/eA8HRg3nN9RT0za9/cbNei/Cji/P56axwod9tFfXm1aa5vruYnaQYb95cx9RPJjOuyq7gSTnzvsgpMSfvCyCuZ+7x6bKqQ5F1c3u1vr/rmm5+dl6dXizmy/7mPq/ut3c3mJKvy2I67Qcque/UxFeRq9OHH95svzbkXa9lR589/0iwbdt2u21rXxoWgYgj1OXkfMa72LZJGLkiV5Xl8XyqXlIX366/pcI7oiZnB65b7SWZZZllmKGbL44rXyt6DsHEnC8ADJGAkHEkpihCJnI0gtFND24HQEMg5tEThmBkOObMDvrO+I+NEQ9FNgB5Ty0HGyK47Ld7/fIFNNn5iRUlEJDJoZWP3hO21AwP1puDw+3w6euIGkL7ThIaX2P/qBi9143eU40OksyoQx36tdS+81OogimImpkSuVEGNUY2AMeQxkEUQMZpHgFJkcDYxEyzR2+oBpBTJGAz1fG7G4iNYz+QMioe6p5Hk8gIijEY4TRAqAZ8QLUYiJiKiIBZlszOewcYSCSZSEpJRZEJHasZqYCKRgPvFZENxmgvmIABihCOkh8CmPOevEfHSt7ImRg7cBItElCRsiCQhhKKmnMFMCBrBkCvYA1iICsRAMCzq0DNAARNCRyyI0d6qJ1Tcs57BFcGxyYMGHyJ6j2XCkxgZEnyYJoBGcuqqoLmuXmysuSiIgojki94z84TIJg5dioKKhp7oMweyTmw8c0WZkNC5mBhtIXmOrBJNMepG4iYWDyCxhS1h9QQKwUkc0kyMPqSQDPKgMlZlykkAiDHzKgG6L0gIzAzQx4YTaIhe8bCMYspSlZDTx5MRjcXgiKJAwJ0hAwA7ElMisKmwm3GnFAGkwSOeTS8qSESkZmKmRkRqwICmqpjN5ZXphyD8ylHUFPNaO+X5ftaANHxMQFqlpJwzN45ds55ZGfE5pSJDEXMO9SxoPCfSkXZVDOg6fhjABIBEiEzojmHOYkmArIcJYuoqURRy6KZCQnJeQ9oPDqiMpoRqNMBoemhizZI7iMamoCCgQKhiiYzQHZELIDEznI8PCbVDv1tTAZITBw8OBrbAscDCFQC0XElGSCMUCQDGVc6GDlUzWOlIhFKBiRnkk055iQiitlVxDUrJhqttUo5IxELRhMBMA4OANAFcchFYZpB1EwQnGU2cePeMlnOaoEL5KCiwETOj1H2ro2kBoUpGFf13XZ/cRx8WStAn6I2TTgu/NHMLOMe0j4iWbkswNHQx6GNcUi+CNIZFBw8Y1ZBMDTDNKmnfRwigK/CZFGUs5jNwCV0cT6fEvjNKmbJ6HwfbdfkScVA7MJMYWQ5CzETuyLUiTWSTutqNwxpaA2DJBVhY0pggdkVZYacTEQzeg9dvLva2pC+/+mTk0lxwzuTxEWV+ziblM6Fvk9qdrSYLk+OlydH88VyMptPl0tERqDRQNR33dB3Q9sfiEKExMxM3nkgDN47xyF4YmIOQMwIAEiIjvnA6kQau2gMMrFXVRn7YgHYeQADAiZGCAYOTJEhOPYKYhqoUBEVeH+QDLe3q9mRPv/959vVrt21i1ncr66Vc5e703kxNHA2vwAdXBza3Xo2q5q2Kc4f7Va9dUM55U3aIIcBcZ/taFJm5ndDu9/eba4bsPKDJ4+ffnCy3XQMPhu+fP3u+x8+kUxvvr2vpgs/KU9ni5LuTLG77adHeHN/VyzD06dn+8tbRn/77bv1/W7bdmXBF48fehgujssXX9z7mr+6/vZoMtPe6kU5nRYkcH25BqpgUZVlIEd371aT5QI9rJt9vNmpyiyUYlgtT2bHR3fv3vS7BkqbnVXNfff5i7/vCWez47Pl6axY1JOasTh+8tF0UskwiOb7q5c59YRaznzXNinnogpKgYrQXr9tm3sKg0Gfht6VrAbPPvvdXqo0WLfvJXfHF0df/uzFN9ffXnz6w+OHp19/9cubq69PjpaG6pybT2dDl0SjYeRQqQI6NqOc1FAKIx7AMA9Denh+stm+evvu5enxQ81wf9vMF8eucNtmX0yqpu1PT2e/vf1it7k5On3Y91EMSIBDWG93y2bnZ3MiBjTvPdROY08OCqiyJJG0Wd8jhNnySHrYdeImi3oGt/d3Z8cLHRQrLKrJetcenZ6Equr71iwTIXkwykjUNvuiriDYru28D76st5ttWeuknmiWIfUhFGC0ODvq0n7XbX1ZDH1fTmq3n9xd3z199ujL48Xt3evdbrNZ3XR9k/KQTEUgRdn30UyzCFg3HHgcYb0JU+TFpEgxx7Z3Bk0vgLxYLqrA716/Ncx76boVbnf749nCr9sQinlVe4dd7CwEVxweCetNQ8BcTtCXm/X9/GhZLY5+/fXPmqb75Hs/0JZef/1yWiNi+vbrN8p5uiiOT2bNW3356musF96XPAuXry6fPTt6cvZwyD6Eo07Wj+ZHFeTbNy/Kk8VmuKMVu0nJKYUKEfVqe2VF6KCv07TP7b7bL5YLbTdfffG/Afg72GD2N80qY/BNXB4tJXaO6Xx6nrbNvs/1yeMcZl++uX3+yfO7bdr2zU9/9ONf/eqbRx89225u2Pqr11fV5NnzR98vpvXPv/jPP/qTPz6eP3iVt455yLQ4ewIRum386R/8/jffPnj59tVsVjsyFOvBnzw72SSMq93Dx4/v3l2vVnpyNL28+fbZB0+2w61J9+rLnz/++JPd/a36HI7DV7fXH376h2+++bI6XU6eH91s0+T0UanVf/2b/9e77fVkOpn2dHx20rV7AwJUtJy7PrgQ9/uctWui8pyJ5qVzk2J/1S9Pjmazk5wl5n7X97x0t/er3HeNym+/+AoiOrbVNytz5HpTbuuzIx2w7WN9sjg9jkOK727XLLZ6szqx04Czm83195cPupz9wqWb1d/99/+SC4cW25Ugwz6n9d3dPnWKtTs521/FEFgYb+XufrM6Pn4ypJZKch7DMgjo26vN1Wr7QXVU11XxoOj37f1qXQYcYjOZHZlVhZ8aVG5SUTc09+vC8dVq9fBH38u3pMPw5Rf3Ht3t6y/Pz2azehJT3Kyao7PTlGVzfxVKDFWtQz+ZTNa7/Z6GUJcsdnV9V1TT3nKM2ncDgD4/f5gAvA8xVLt0rXuqAlycn7y7uknSz8PMJb5brT7+5LPLNy9jomI6McD5dLHe3WAXb168YgNXHobkz7/4mj17opOjRV1Vk3LCDFy7qi4l5pxyC30WYF8EF04W1rT7pm0zCDnP5AJ7NEEkJAaDLFklBcdZYgi+cAWPYGxAVSHinCJmwRDEMLVps+mGXpBcGbBmmgYuHaYhOnTOBxeCkVMzRmDilJOBIXPgiRlYzmZIAikLkcsGUSSpmbGJGgASxZi7fjAxS7lwQUQ9gsYhGQTPKcXAeHN72zS7R2fnR9OjpD0yV0VZhVB4KmJ/v9rXgWMSRDeZ+hjFOfQ+dHlIMTMoo6JDLhyiisRouWRXcNAk6F3KOY/sQIqEWJalAQKyKmfJRAaaJEU0AGQjTCmlYZ2afTc7VZi07U1Me3VOuQLxIvs+J4j90DaI2LedKilC3+1Ls0nlRK1gUkNQkBRj36mZ9yH1EYyKUCGXhh077LvIRWE2VnrlrGyG/dBPiirnzI6RSMTGLmAwJISUBsvJqyCialZTQkypB1RnGQhzZiIH44BmJHmEr/A4iRK5LHkcHw7sHLD33U4HKxDTaLf/jkV9mLffaz0H+ouOtBUAxNE39J1ypDCyfOzwFULSw3EsjCYpsJFwRIcB+v00LSrjaTOOOTdEzRHYHeZzBEACFQI0xIPzQ8HUEGl8hYoYIBGDjcYdHiudEdkMERlHxAchgIxmpbF9eaQ2jIYjAMbxO3zX10OETDAmnxBB9QDyZkZDQlGJBGQgbuTHjv4sMSIGdJIFVREYKYzvkYA59iNuyat4ZSRZujzjYQ7N+sXVZbu7uttM5vWT55/UVEoP62adJQZH/eZuUnLTNZq74FNJ5CBCSsOg49bDBa6Xk3Ky3PVDwc4H7te3+/u7u7ur+vT40x/93uLhh9um6+OQ9W7Iw2I567YNYGF+evHo0Xp1eXxyoeVZs94Pm1XfY5+SJ+5zvF1dL49nk8l8VkwpWxCqXIGhStmIeTadAegBSMMct30kWWHXWG76fSBS40KhNlcZLarloqonvgyuBFMmP4qLo+IoAjjytghNFZFgpGiZ2NgijGjEADTGwXCMSpkCsBHhQT0iUBl5WQfeEIIRmhKIMJBEsX10SrzbQ7O3fJT8+0688bu+9yiNLjQ4yD0Hf9mIFPofMmcIh5eN7Xl2UE7HFOdBhIT3V9Folxi5XO8ZpmaqIoZAjoGACA9k4VCKZkYWGeOWo6tASXBMNIIBI6upqJqqGYjJWBRugGpCxAJA5AxNJcmoJpsC4vgCAjQDGbnviKgyZodBRUVSygBmCCBCzDElzWM1eTYAB6hmSdQTowgRppzZOVAxhRTtkFBCpIPPS4iCMYIvhMiIQEf7Vx5x/mLMYYKu0HKamZw0IDuAXkCVWZiyJCBn4ztkzA4TpKRZyCGhGCKQGYqBgo3igkOPamTOLCByVkQDQxEAYY9ljYAgYiTeJKu6ugbyRB7BGAnZMwdEAKDxw0USh6C5H+lP5IOqICCiqgqxqh3yT2Zg2Zsj79GySIrIxgHclAoprW2tV82iYprFE5GgDgN45iFCDIZgoNmMvVdJ5B1YHgvLzJQYAYm+w2OZInlBAcSsWZQAIPZqxI6BSInQA6oC+eACl9kkUKQMhSNiBDQVB4xEBpRTBkAEkyTjnRINEJ1qZs+iikyqOSeULIckJRGNRk8lPBwFoJqaGmRJopRlzPF5j+zYeScWGb3730lFoAoKgIhGptkMjIGQLQmPACIUFUE2Swqqqe/AssqAhKrKvjww6JjQgASsz5jEumzNYH2ynEEFENVMRRFMyYiAkBVlXLSmxoxiZogmamgjWxuRD3J1zgCZBMkoC0IWRhvVVlFQE3YOgHM2RiP2B042myEmyYYwRkUBTDRnzcSQVWNMocRkQmaSk0AGYOCAipYBgYhUTJkNMTrvAJGFGMiUMDuKg+TseJQ2kxEpipEba0nFLAM4ABSFYSBgrp2IEDjHkLteRXhaQj2HuuYkkExiy2RcYOGLclr1+xbBLKPkvp5OXO2zQRaTobdknFAGU+fdxPkqFLNZM2QTiPvtfDYJRbm+79toRSij4q7J3ns1DQHR1HIEI2Tkwk9mk6bpYxtDDSEOSjit+Gbd9gpZgUoO1TzpKg0RE6tIGdgibNv0N59/8cmHj46XZZtSN/SeOGfJ0gsgOV9NZ4uj5fHR0fLoaH60qOqSCE1yH4ehH3IacooyOjNVkTEUnpjRxi0IO2bnHRESjcK3sfMExkTOOQCMOTvvxn3CCCESFVBlckwoWUCzgIIpO2eAlpWJgBHFiIDYmRwONAAAEnR3O3g471M0pJtX75QGVFs3sSrD0fFJc38lXb90R5im58sffHzhfvHq6yFQXVWzqVttrp5dPL9vdgBEjmOyb26unz17PB36dtvfb3cWMBI8eXR2+ebyj37yI80NFmezJ7/z+tuXTbd7tXn3ww8/7K4b6XKGxfMf//Fts4JqKBeurqp+LaXCg+998s3Nzd99eXdxMnvz4m3NAd1kOZ8VDrbry/njpSAm66nEUPjC4+rdjacCvMsSV9f35P3x/ISyHJ8Wb1/fJ2PqgrIvptNWt5vNqirK45Ow237FvL1cv36H08XRSbYqhDKIQUx17etFnVNflVU033aNiImIJBGTtrkZ0qYf9kNsRCXvU9/D04++pwKPHs5/c7l6/uHj1N0eP3p08vRh7tO3X/1ydfmmKIuqqpt9w2FCWAIpEpRuimAiiSD4slDo6lnFPH3x5Vc//OGnMu2rQq9v14yBGberzX7YLo6PBh2s1bPz6WrXn5zPZ++m66ur45NzA0FHOQmKeu+GZlPAI/WUY2RDX3Llq5iiKwIDg+h2s0YgJK0Xi2xR0n5+WsNOq6rMUfqm4+CyaLvbLubzderEkveVSI79vvDFZFK2TesCGUA2K+opZB3aJpCbzxfSZvV2v+mOHyzfvHp5f315+ujh9tUGmYgpp1iWx08+/vjyL9+E0qtkXxTmoIvR1FTNl0FFyRRUy8oPUUDz7X3MdYnsauazk4d92psJEm7v13y8nM4rKDlJ2w7Wt7zb2fPHx6XPvojVbOqLum075w+PhMXiaL/eL5dLduH45Ojy9g6s/+rb1x9/8EPM+mA+y/vN4sHRpmvu3abLze1d++jJo/u4KqcnJ6eP3rx697a9KYt6t9503LR9Xm+GZtg/0+PTU399e/vww/MvX742g3ZIJ8vT0+ePvv6Hn227XE4cunx9+UqauN8l9n3et/c3d8vHTxdHk2lw4eJiu929/OK/0/kFMn3/+997+c0bh3T+7OF6rziZZijftrk6mu7uNr/46rehmL1++1XT3GJVLB8+/uTp90o/+9nnf19R+Zv/7c/PH5/P/LG3vLrftfBqasBVeL19dW/bxfm83w8ffvbx57/5db2c0MzhpmNKm919/fDR9NnDYmpdX7z89ueN759+8vBIPri7vsnJYFYmpe1uxZ4//cHv/te/+M8/OTlJu/zqi8s/+ekfuT794NNPr1b32/vbsqChxw8//mmJfvXq5fT8fHdz1TQ7NInZYqaT4/r6zbuz04fa5ejaIuTXr28Ws8WHP/rR29v99AG1m61ETLtY8aSe1P2mPTo9Y7fOmIb9IDGdf/j8Fy9+I7t0vjiJ1HHd+TxAyuzD4sEZYvHqxYvJRX3XrLzybHFOOYrp8emjZkgoSXbw4qtXD48fTupaciNJJ/Pys08+SP001PXPf/Fr5vDg/Oi2bT/8+AlzvXt3/U0cpouTR+enX/72m20AAr66XV88OVfxddHtNne+RqoHYpovXcqbUHNqYlFNmy4dn51hCPfrm0U5f/rsohn6euqnYa4p7XZt6Tj1rZfhpDpFw123Pz09evPm2gV3dnbybtuttis1OVos17e3xbR69vysbTpI6fZmAxDOT05AegTYNbuUjYr5rmvmRUlhWN2/DMGryzJIOamHdMBa39zee8+h9GZ6tLSCh0kdQlVqygqWs6ghU2bRJNl7V5f1kPpd21U8Pp3ou62Yqo77OcnCjkLw41gipikJERBmRwasXdfFrGmwaABFWQZfspQQJae2VxITdt45JQIwh+DAokRAI2Jkx8yqRplSjmTmCFKWLJDMxFBMDST4QhVUoinklKNEh8EITYARHYMQrNqu7dO0LufV0dHsqBnWhOgtxEHMOmRvClVRbJs2y1AUFWYZRS/QFIJLKTtfRsjA/mh26lBjTnVRMMGQBYBjUhNRicGxeSOUgYyYDdiAECQn1dxrzl2zQ3QpdszkGS21zU1ThJl3pUrsdluudLcTR+S5atp7IaJQxhRNBNw4Kw2GlaFmRR98lhyHFGOPDCL9MAxjVEUsF4WXXnKMIaDzU+aq2X6jAMY2dH1ZqwoQHQQcVTU1YyRFS43F1hhVE5McciNgIvmA70EZ4y2q2RRZDcEElZmjysjsEZOx6x3sIAaNRR9jh7GBvIebjJ6iQwyMcYymjSPJwW5B+N43NLryEUbbNR4wLgj2HSZ6VJBgPFQjIxtHz/cBNMKDoDkCYcBEDYndOG3jP87Z/H7wHmHDhkAmNipajIhjogZIZbQegWoGckQAmgyQCRHQTEaVavQ4EZCZIbGCBxAiAhuLfsCxNxMENhpPjhXRkEBV0TKYjkVRB0lAbZRrmRmZRMQMcUzIAIvKeIINB5+RaU6AqETDMOTUbzR+++LVbjAvdHY2n4a6ubpr7zflYu7ryeSk7DZba9q73T5lqQLySG6l5DEQkcW+mEzEkuQ89BaYDWO+3/e3V6j68JOPZh//wIfZ0O+662+3b2/WL99UrmzbGKNGyi7t4u6r45Nau0GxC4xNJ2xoWcXTkGW72Z7N6oCGCh5dcM4jFRxSBlQyVWINLuQUTaCuZrftftjuCfAE/My5ia8LpElZl74wg7IszVhFRxcbEZmKpESEKMJMZIJmjEg40rNGABaOgUdAA1NQhUNKlWwsPDsYdhBMRjLPCFwRNRrjHaPmyAaQLfeUnK53fH3LHz4fh9T3/pvxwrPvGNXvfUHwXgP6Dlf93hZk3/3hw1fw8LVRr4L330cPZjYAO6CmD2NBTlHUBCBwcMRIYKajykXkDBTtu7VgDD5HBQZFcejGn1m/exKoHFqsDgVsgABZFZHhfRGVHeyBMFoN1YyIx3VoaoBmqmYiOeecwYCYFE1F0WxQURVVYWYwOrgKVXX8mEbeuIqKEeCowgKPNwxDZnIeHAOP2apRKB5yjpKSJEH2LjAWU6xqBRjzdwbBWAfVsQRSNQJ5hGTj24mJQU2BkIgdAQGSmTkkI0BBAgIgZ4UDN2L4AUfKshoChBKZGFQ0keRilNJNeUT3K5IxGMKB3UZIiuBJo4GaCjuHIOxGFROYnZoSKiOPWS0jb0KUzZjAEqEnVVNV8RAqa8yGwZOTzDAYGagiJkPNY8MjqI46PHNhMiYVwciNDUtghEikSmhsYogEbGjoQLP0IjFnsURMnslR5sBIOEa1HDGDugJFGIwAiSiMQhsiFaFSyaimTs0s5YPCGFyQnJmIEESAENU50bHSAJnQVIjD4a4ICHB4WIx4uSwZDTIlHFeboloc0YfwT1xFCna4hxIeajokKpJlEVQCNOddjgMoa0oW1USQgInBCESBDI0REcU4A/Yq+6RD1i5r0jGROp6jIIySsKKNnQViamgOjEbN9fAVQFM0VWJmh6CCWQhQBjEzAA+YD1S8kWMHiIaWEpEDBDUhG1mEQg6Y2ExUxXJEEBAlVUAlZVQwZTIPMaKwg+CgzClbNsjjbYaYx/gkAIDzhZoSORKVQYMrksKIl2IENlRARTzIXKJkFgqPSrYRqM1VhZiNorHzntHF3aYqaqomYD6m+65tvCfHAGRgXM5qGaJDxgQ5ZiByjlzhEwwqxoVPvQzbliygBb9ws2nBiJKzajctiRdcdJZyk105QIg21mOgIjrvdVCJggSuCEUoUWKKeVrU22ZgJYdKmZxyv81+AtOy2qqaaU6xV3NILLTZyOvXW8dUGhhoWQfybrdt2RUnJ4vFdLKczo4X8+l0WgQPIiImpkPX5xxzTDkl5xygqRkTMjliV1V11gyAZVmyY1AjciMQjtk7QlVBZDOjURMnGJtTVdX5oKJIbhyVHQVDEwDPrKLMfnxEEBMBigEi0Htb0TD06KeXv7lPwS/Oql3TlZ7QyunxGVj66t390bR48uhJf0vO7f7gD//kP/y//5dqVrqZWzUZB1Uu2qZrN2tTV5cnHz783ok7vr18izM5fTDrbtqbd2sBpu7r7WZFfn28mJPZ8aLYqAxtNOSsMj1dzCbeVfz125eQcxRr7/cnj86mp6f+pP7y/nZ5Ni9DmRKJpenkeMiCuitcWj5/erNaJeRJ5a9XV6cL77E+v3hw++rm+t02TIrN7WoxP5ssQ+V5db9iP0yqo/2+8xy67RYCD3vgwphxv71fns5dTW3fqZMub9frq8IcC+236dyfEfFm39out7sYynrAjMYx7QEzBuMEXdORYxN3/PjDyfxkv9vHtP/613/1u9+/mC7P3tytVPOLX/8ydt10PvHFLEcoi7KezhXMBTYY98cAiMQhKxT1pKrdZvv29OHc16SJ393d3d/t/KS+ubmtfVjANLapLrwNmZHqYjbs4unRyWp77YKzTnIahq4fuljW3Oyvd+uL+dkZugCqzrEQFiFopG7fOOdn9Xy32+7W63YYzi4udvdrTlATImFZhKGPMQmCN8Ou7QOGvtuEIiBTWUy6ZuMyMbEZDV0fCiwnk+l03re2Xt/nrNV8iaabmz51aVbNYswyQKByu9qGwnX7fr/uPvvhj377q1/sb29PTpebfYvJHKKaDTFqjqkbzIQ9eWb0AGaTumLv31xvgy8/v7yrK380C6vbu+99/KyLWfp4f7exPPjg1KyRbje0wbnN3aoIRRvFe65nB6y1B//g5AINnIahEe31Zvvu2Qc/bCwYupv7u32fZqmYFnw2f5AtZ7P7Kxk0HIUj6v358tR5arvGOb5+eyWOjk8fpE3eakqbuLpZP5o/Duuy5Hz0sLpZ3Xz71f1+v4rApZ822nPhfagF2sHP93n3dPGgzXh3dbN59c6jquFnH36vvV91PXzx4pKiFOxOZie3b778/NWrFvxkSif1sYKHKc8mfHZyRLJvhoFIoRn+/Nd/OT8//md/+ntvP3+x2qzwdGrgzs6W9enZ9uWXZC5ELM0xA2j+8te/YSCK/e2L68XiTL0bmr0qqnUvfvXF3erm4slFzP6LX77M/fpk8iDkxFQd17WbbH/91d/Muf7RH/5oe3NZVSc//t2f3m3fVRN+9dWXUpTNfu/ri6Ojk9e/+Sa3zdNHz/br1Hd+N+DJxRKnpZp1+60HDMQivSMjzc4HcCypv/D8/d/5w198/UWb8gdPTn/9t79eHs/4dLFuuj415xfLj56cvPnyalFNfv+z37l580b77vLN2z/813/cX90sJieXr++GLtx+sw9lcbvZruLAA+Imfe+j761u3zmWy9uvLi6e+cVZZ7OPP/z4zVff7Nv23d36h59+MgClLlqCDx4+zxLJ6aZZv/tiIOT5rEaz2Lc566SaPPjw6d3tLe7W631/t7q+WBTb29tiVlYVHx2f3vadVykCY8yTacm+cr7ep35+uvRKfWpi36Z256wgT0UZqqLounR8fLxp9tf3q3pSrO/3BQRM8u6rd9PZTGIamj5VMq8nu+1ucTJnx7uhT+3AXKwubyC1zx89/ujx02Zz8/T84vXrlNvBUAHk4ZNnP//N3/swcUVJ4SCYkmdj13ZZ+q13JUwK7CyK7lar4N3D8/MQAiLtNivPrqyLZAAKgQ8HtAjoXGEgBgrZTCU4R0hFVRRlURRVTBpTUlDLeYjRMyXHKWsUVXUytn4qiMRo2rUpgE5D8MxIrEAemHE8XwZEKooSi1LUIGU2UFVGGU/hI0IA7k09IZAx5CEnyYONBmHAFJOqMTKqqKQo8e31zacfPF3O5xCH+80NotZF7cGbdpCwgDrmTnMmxOlkZmLomCGbZg4+5/f8WgPnAxGOZV+i5p133rpucOjQqaU8RB2QfGDJgszsgnOVaMopxqFhdj4EzUZEknNKQgYmMMjOuUDkgTi2/bSqt9u1894MslpqWxXRHAMRoWGW3DUhFCA6DAM6lk7ykAGR2GVpVQGQkZyqiua+a4ENJ0tf1HU179o9kZOcYtcWISASsVMkRGfEWSWlrMNgeYjKjIDoRJPlROxVCRRGHLWh2Kj9jNXYo4MdRrjnwfWgKnSoJoOxbQwBxtzNSLGF9yXfSIxgh0fkAQw06i86JgTe2yEU4BBfGT01CIyH4dEMBMEZABAdMm6jt4JIQN/7KQAdH8waKmMQzhgReQRljBChERmD+J5tBIakSCTjcG5KhO/xxGPJlLELZqA5IqgZHOAp44H3OCwAHiwiakiCBGZ57LcHHDNroJoRyUa3yChnEYAZMKkkRGDkLAJIwMBjpg0AySEyEpkqgKCamSARgh+dM6ZG7Eo3abvcxXi/j8nqXrFG7Xea+zVP8OTspL+/7bt2yK5UGO43YFZ6XwbXrfdts4VFFfvOzJC94+SrUqJZzKCGTCXR9Oy06fPpg8ciaffqC9tuOe+mlMKyur3qtuthUDR2BUfax/vNfvrAYE61L6nrjiqfMUPlbNCYhtt3V+ezxTTUzsAROEIEC+CQCJAdgyGFUJopIi7KWelLNinRlchgGMricH0dtEUJzmUANbOUaPT7w5gUHDMncAATjYwtojEpOfJ3EQnHHTng6AhDOgh8hgxmZobskBHsII6oZgBEF4QoGdjQEwfnEuSoOYF3B3rVexHzHwkq9o//RXof3nx/+cN3F/EhfPkeCP1ecToop2DjT3ewr4Ed1NT3gTXJImOmcjSMGBEc4jwjF4yJ1RTMVJQIEEjViAjGKRBMVUVNJI9RlVEqGoNYiEDsENSQVMfWRwGUMS3zne6lZshsQGCmIjmnnLOIIACxBzPJSQk1ZxtTSYiqOv4rQqpmlh0iCvI4Co0/H9pB2XUMSOB4dDmYGaigJjQDVSCHRYnmoAgQKmBPIIAkhsCU0NT5EbKNhMCsCsYApslyBiFGB6KSjN14ETKN9yvIKmDCBGIZx/Y307EDgtkrIqAHUGM260zVDFWUWJEIgZBHvxkaGqCoKRmIWs6mmg2MNJP3o4hM5Eb12ghVzBSAHLBH8qCJiCybitDUh7KGnHRaUIq57UwQB4WYNBnVwSqvnA3H2yqrypjLBQBAL5aQyLEHIAFAX7FlBjHi0VZqqKoW+75P+WBUU0S2ARUY2TEaSE4IPFrJxAwJyCEjjc1mAEAcVCRnMURFGp8tCMAAIsLo2TlTiymR2njHP2DyAJDIzJjpEPsd/4KDZJRTHtWJ0Xulkv9HqWjEdpmCHpoY1BRIRryUyOh4MzXTFFUERVBHAUjQkXeeiZkIhVxS2PXaZ+gyJLEsZKAGBCwpInokQENThRG85BwgIzAAqYgZAjogGW8Bo54KZpYUBMVQxOPouCJGRc3ZABAZDqHXwymJqjEhGZmRpVFRVaLRwYooSIYpZUBBBsmWwZwxqjNlxknOHeRM4EENHKFjYATPI8geETVnCMjAMggBcQLJGTEYJGSPzExBhiZgkRVgUA6VWoYeNaIdlREFdjsPzhc07Drn78L8FOd14Z0hpHYnsTcw1ejL0tchJ/Naa0w5RgTI0hfVTM1yRmKzlCQm1TyElV8u3WKK5g3KQHa09F3MfS+bXphTn4UdezN2DhU0RTIEgYRQzObuTnbtFrwTTEXwyB06gkypS5HA18REkjNIEkFzhuTLul61LSNW0wqHAdgQtKpLZjefFdNpVVeVD+WYW0a0HGNWGW+hBuodgxkxJRlPBCvyjtgFYEAi5xDJl34EWR/QeWDBl0RoCuzAEJDQsVNTNmQKAgkBCRkRxoNWY06qYOg5IKPkCMaO3Qi9GoNtAEDF5A//1f/pZ3/xnyWnFNNqdb+YFvNqvrtbF+S397dHk7MXX36z5OPvff/jX33+t8XE77arp88e/OztN2VYlMWyaTaTsgw4++1XlyU/Ppo8+me//0f/y3/8f5JnQO9n1fFk9sXP/vLHv/P09j6/ud7W7K395aNni7QZVpf7y69eHy2OPYepLysCwNAkKecnX37+iw+ffrC5hu7+5sH3H84KP3n4mB6eX0we9Wl3+e7zbthu2nh0fPL2ZtP2+fnzT9+9up9NTNxw/CgsHjx28/nrb96czU9mS3r14tv54pRcIz2bToSH1F+fLZ+Wxw9W9+9OHpzQo4fb2Cv1D58eNXGz2bWTiu9W/fHi4VG1GGLbNEM5KbqmdVhmTezrWVXsLq9y7FLs+l1Th5Lms69/+dv/87/6v5ZT3Hew3tz+8IcfffBker3+9ub6y83lFZlOJ7Oc1BexrOeB2ReTnFTNxOJ4QlFWNSM6h+CnXELqow/Bh7DbNtttS8Hf32xO5hOuiiHb3GN2kHrpm74ogyk9eDj/5sU3CgrSDf0WwYDQVyHuhrv768XJA++9ahIRUPPOsvehnJhpvagM3f3ddWpXprBczosiwLbrd/t6PpvOJ22bUtMMkNhV88W86far1W4xmSIpsFpW5wgIK+9zys36tgw8m89STl3fmOOimpw8Orm5ulkcH1+9fbfbbnxJeSez8qjxQxeb+vj8T//tv/n//of/lA33zTAkY/aAxpiHnJGAXSGiMUuOWQ2USLNN59X9dt8NmkC7lIjdVuGTZ09evrxZHC2+eXuJQxz6nsjdrqwolmrQD6oAu80ulIeNESufHC8K59arLageH83x7n4xm03OH1x//UVZV0uYrTYbgf7Jo6PmbjsoXK2vP/vgeyVX97ebUPnprN53W1/MTs8uXrz+5sFjfyTTaTHr2vvTB2c+87/+o3/+5vVvFpOwv9kP63RcLtXZ9ecvSqbTJ4/CYvb69ctwdPIv/6d/9/Uv/nLIGgrnMk0qt7+LMmQgpKLo++zRteu4Wv/d0w+e2bfx5e31g+PT7btv65PnwN2++bbdD30fzfR3f/wnf/Hf/uzN5atH+UG3mj5+/smFp1/9w+cfPT2Ku54FA5becUUeRe/ffFtT1edeVabzspQh7t4lFGaX7u8yWR5y6erYDtqsFzy/3/qPvv9Hr7/4W1B3c/nW2/TVq9cnxWI6q9gVl7fNhx+dFWfH2+sdYPvw+Y/eTb4+O/+gWa/Ppkdx2/bSDO0WHYs1+7UCFNf79YdPny8/WL69effRjz/ZvL1KTdvHIUXytp4o/P1f/O27Vl3Nt7o5Wp4iwNHiuJfbjz79pL3bry7tbPnBJE9kYx/PnrW2evTZJ/Gm4vW0Wcc6H//P//7//mf/v//HtEakqnj4wNapzLRev9ms1pP56b/54393+fYlmD1ePrr+/HoSllDTeT1FJQa76+5/8NFnEodXb+4v3zQff/xh8+66KKeLx0f9vsFuuHv5dY7h22++mC9nn3568V///C8zuOkHzy1PBxF2/vpqHfepvd8P266q3KwMU192bb66v1o+ftzHVBeYdfjos8fffntlkoMrwaCaTaazyfr+ehJ8HDoDZORyWlMnZfB2Or+92sBmMy1L9rzbNH3ORVXO5+Ht5f2js8erW7pbr8vp/PNf/O3z08eT6WzfbQh9KMuXb1+fLC/Wq+12s2J3eBZ477Jmchgqr2wD2Ppu1ay3s3l1PJ2X3nvPJjarJ2jkXcCahzjEGFNKohqcL4oKGENZpiQqGtCVlXdloOCHIZolQoMhpTQAwL6LxgFUsgCxMaFINk3dftAhOUUXUIM6BtPsHDOYSspZFMAXnkJAH1glq4gogHrHhKgavQEYekMkDJ5NMWdCMJWcczYzJMopsQOicH+3QiRHHjMOXZtTXzrnfdHHCLBG01lZ9HEwE++cJ0KkuuAmRmEgV2Lh4pAIsd/tA9NsUiPkPAYpFAgwuNBjIjRETCqaczIqAJIM3mPJJCrBBSJfFHVM2SQbC5LLaej7vYEhG2LebNd+LH4GCFwVRRn7VlJUIABzhgk9EaOl8ZA2iYTCD0lKXySJQ+zL2UwFDidVYAwwSE4pZRCFHFNXTOf1bLHbb/j/z96f/VqWZGl+2BrMbA9nuPO9Pnt4eHhEZmRkRg4VlVlZA6uLYDdEoRsSBQlUqwECepDQAPVASIAAvUtPEkRBgJqQHqQGmy9ii6LQJAs91dBVWUNWZWXlFKO7h8/ud773THswW2vpwfbxyCL4J8R+8Lgefu45+5xje7DPvu/3xdgLcLuqR7VzRQZjau6fITNNIFFSArJMBjEzSdEDIZGkHokRSQ0HtAOASgRgsOxYd8wm0gMqAqqm3C6fxSMiyrLRAEnJNNG1ZQKAYOAFmVlewqZ8D49DKMGytQ3MIM+sXme1TAF4oGgbAtCAGVYBfN0SDgamKSIBM5sqkDMAUDBcW35gaMAxFcwTZkAgMBw0LEuJcjwCGHLjcSZGMBgAuQCDeQrAVEHRKLe1DWJEjhuIDnAiYiLK2lR+fhh0qmyoSnn2a6rDNB0UM+UaDIDXaOKhXU5VzBIBILmcimIjAQuhAIkcY2lsEOYikSkhNyizVTvxKUwmzXIOTje2xox8eby4PFqNNytytrg89+Qmo0nsskLQugKa84vQBxe4j4ARyumkN7ho1W9un85XcnEyHU9o9wBtZ3F23jY23vH1li5XfRRfOJckNdqNNsbjG9d0sSi3R/3pK0Ns2xTI0agwC0WYlEVBRhoVfcGEiMTA62Y9RAQiELWJdyM1ViFAYkpm4pCyYJBzi/kzzN8vouUWPFUEVEMmb2DDfBU5U2UzMB0IcCjPpvXoMQNABkBCw2wRyIkuGEjHbKqWuuzVA00YyLmAEtOycYpEjPl7zKpTllhyU9kA9qJhBpuNdcN7xTWb+m+ISoNINLDA1jqR2TrR+RoCNmDVB6lo8CUZDax4QjNCMETOpWRkCIM5Sg0w45HUwExBkqiZiRqoyjDtzTYQI8x7lEsI1Uwhh93MkigRIiFneYXYDLLdRkT6KCoqqkwgoggCgCqmIgbGhAasBiAKIABExEQkYkymaExZcjJkYmQ1QEN2HsgDEQBSLjVPIlEAiIsKLRARej98wJJUYlaayIwU1Ujz8Q1sjEmiwWAPUTNBdWiC63l9Vh5I0+CHTJ7ysgdEVUIGQyV2FAwZgQidR0x9L6Yi2if1DhyvzZGIaApIg7SR09ZGmtTy2goQIBuuE6yY0W8MRmpqwIAIhOQUUAGJg4EkDg6l50mlahgjJjEAC6iOkgfJZZFi4JwRGYCoMJOY52xUysYuBDR0FIBQURMkNUuxB1UGQueCcwwJTA2cCZgObGXK+l1mjYOyClM+aXvMSqXjgllNmVHUJAkYqIFDHuDNZo44iaqaiGR4Sw6NIgKg0aAeUT4yNOO4zAwwpcSMksf1L0tFqgaaEEAxX4o8AaqZmYjkOgYAAVVFRelT9quyY0VlzsoeoqI1ok3CVdI+WooSZTirg4IhA9twERHU7IjKLZqm2gO4dQ55sBLmy49ZdisrZH46MiINdCg04CGVDblVk7wRkzkkUetVIggAiBoQKfmsMJAZYTJHDKqYDEmtN8zuEiSRNFjJOH8IQEy5ozQLYsaKBZqYJIwzkZQq5qLyAARrepKJsKFpZCykiyjiCk591FmULeJxCeJT13e9imp7eoHEfnOPpyOfNiO0CNGJpWiAyhzAEZMnKeOqUYkQNXU9BVdUZbdokAGINKblyaJMDpTEOSOnBDAiH8rxxE2JurZPGg2tU/EEAsKVoxj7NuZVpqLC0Lk2JjYkJ5M6LJcXbK5wmmKX5qBmzBAcG5OYJZUyFG0vnaYekJBVQJOgc9WoZvZFUY0mE0Q2hBhTH5PEaJCvFuKdy2dQQPTOs/dFUULuxkAA5mzJRuKMrGbOa0YDw91InfO5eRLMKl+KiJhmLJ3zzpTFkoISEpDLpzLLRkQ1yUOI850EAMCNa28cPn327MXTb3z/Vz9+8ZO/9e/87Qc//Neql+98481XDy7fvHOwuT09ezK7c++95w9fjHYnbYKtna3Twzmip/FO16ywjy9fvbxxffSd9949Ojp83Jwex8+2N3bOTi5Xq3a0sXGu7d6tm2ezxeMXS5xc+c0P3l88/3zer54+fXrj1u5q1q9Stzn2T58/91RrqH5xeP/u7k0aFzvv3JzH1YPHz24ebPTt8ic//JPbN2789YefNizjTXd2vpqt2u16Olv00y2ar5ZF6Zrl7Kyd7R3sJoRX56c37944fvyEQzHaLesJGYSHnz49uHF3tFulOOkx9bLEsiYYnc6W1c640uL0+Cw5bRZzz+wcPXz1YPrV78h8maxXdq4M/bKzxFXBUWPqV6vVSVG71cJQ8ezk8r33vn/91q3U9tosp3u7sb78vT/7wfn5C+dd18vt2zeiEHPYu7qxnHeiwKZ9ir3Eogiq5og8sYmMx+VstmrPVgGddyVr+emHDzb2akSsy1COgi/58tVhYF+UHkvX9RGijMZVPZ5cObh5fPg4OEJUZASC1AlRWM7ni4vTjc0NIyCHHiGlHgBcCKZmZkU92uP92XJ5djZr2rh7sJeHp0iEQEVBUbiNLTYNk23t7ZydHnXaTarKJSZAiVHbHtSYwJHrVnPCNB6VSXHZLpLEcjwdTzbni86Ql11X1845WK4uqjH3vVyeXty4fuvG9VvtfLG7OZo56FW6rocQXODYsYIVRdX1LRkpQOF87FVSj2YOtSwcmorYx/dfQtjd2ditihaSJogAGgKtlrOzM5uWo8ePH+1f2QOw5XyZj4Lt3Z1V17etXC77qgzkqmrkelk9vv/h7OLV7s6VeXs+79PZ2SkXb5zNT3zhN3fqZjXraZWrUp+9eGnOxWjgyntffbea+GZ5fnZ4fzradKNw1izevXLl6NWL45PLBonG9fHh2a2DW1978843vnLvD//wn5Xgf/39ryxae3L/RycXxx0U+1u7TvqvvnOrWdLR2cvN6bShoomyupCdG9v7m/tv3Lnp0YPFr33lrc8ffO7GcONm9fDjJ+ens53dG3WtP/nR7/exHe/Wi+Xys0+erLr6rbtvlqPp44evtB6d2cWVze3ZxeHLJ69GW5ub25uXZycbVzcOz49s0yVqm+USC3fjzu3nH75w1WS6uVlzuVrOXr78eHOr3N7f/JOf/dEbG9t+RHASd/Ym4fqNn/7Zn/z6O989PHrx7d/84J/8k3/0a+//zu7BtseVzZ68MbEnP/831ofq+u0HDz5+9ztvz2edOPmNX/vWT//Vj8fjcbGztZpfzGfN5vbOxenJi2eff/vbX7Oqefr89L17b/bP5/OL5fb2rqv8w/ufv3FjP3ian5xggsn2ruvZ9SzCxWhrrO7KweT5o/izRy+//Y3vHDerK2/cIFs8f/GhKhQUTlarK1//ys8++8Gt3f1rt+908SFa+emzZ5MytO1ssVxevXHw8Pmr/Ts3/WJWIsdlvPnG7ZOTi8vzC1eWmwdluVsuT3Bj7+DR4auz46NvvP11k5Ve6q+8//5f/ewnq8vjrcn0rFm9PDsDhWpUqrPZ2eV0d6tRonLkp/WTF6cu2q+9/+ubm/XFYjaPbRcVgC+XUm9sap8uLy6nozoAfv7ocTNfhY3y1u03nzx4sHdjd7ls0dHZ2dxNSnLgxq6a1ouz2XhcPv7kwcHu/tlZW9fVqmm7vi2d39gev/v+dz4/ellvbVvv+1WrrRUF92ioELwvR4O3LvWS75ed567r57OVaXSB97d393d2gy+AMRS+qGpHPmrqV1pV1aJZKgARE7iUhJDbpjczQuq6nr1LSRkl6nAvl2JMsY99Zg4kIqLMkkRESau2nzdK4kj6isGMuhTRiNhpXps2JeLSF4G9AEhKpGqWuX+oCCEEiOYMGFVVnJEAlMEzOecEMakCMUYRQEqrVg3K4C7nK3ZuNps7NF+54EFFmlVHogVBYKirUVTsY5f9y85ToADghF0oQ2qiLwozqOoyu4hdWTpf1HU4OT4USM586mMSyewkESnYsWkgVI2aiNm7UPoqMBMgpr6V2PddI7JaXlwAiA8c+wbRVGw278vRRDS0XTsUeAFyFkSYRC2lVIWKkNk54nVvFlHse0nC7EWS9J2prJaLFDsVF0UUqRxt+FDFvmfAvl3EblwUPqVEREhO1YAtSUypkxQVhNCbmmcyM5GIZERBVTg38YCppYwfEumJinyjw8q53ih7jACGbnAANNCBNpv9GbkJKUdrbJ2voTzjzf1xCgBIGYBtiCCS8pwAYOgkgy8QLwM72swI164gBLBh5goAgghEaslAmQl+KYoGOQikkifttnYdZa5xRvdmy7ap5iENeSIwtIwJISI5MyUA5HzXNqR+sh0KAUyVIFNrdYAN5UReJhMhEZCZZCoOWv60KHtIEGlgbAOq6JCjI1bLyRkxFcyACwTDLLAZZq2EXEIzKkt2jO68a0jbMhRboS6COkNqUzku9KK9aBYXF33T8nijHBV178vUtahd5aCPLOqpBQ8ol6uEvQ9jV1TatQZa+artbePaNUSpvA/F2KDnenR2Nk8oowrLULWtEmI12RKP6nB+PqMu1b7c3d7S07MpBu3jrY29/XqnRnaWGMnVLisdlFN9gKBgCkAAyRiBQJkQgUBzyg/RKIuJtmZlgRmiMmCeL+GQYclOGaUBAo2mRll3Acu0mwylMtV1wsXyV5rjRFmPy/kxBCNVQzL0mA1ehtoLkXgf2aJyqUUpEokIOR+1mWxta3PDMOOF16MZhpyZDlhrXPuOwHLYEtYDY3iXAxPstTA5eHgGB9P6d7O4YKZDdb3mznJANUVCzp8lWJJkpoJEgZEQzUQhw4PWECLLs3EdqEoECKCSUDKG21RFkqgky/h3cEAIRJrZZCpqmkSSJpGESApoBkkiqIlqlowdhewRMQM1yFOkfMJQM1PJVJ88QJIpsyMiJVgXzElWtVJKZppnlw7ZhoPfAEQ0ikrCBKZAaJYQPBNlqr8gCikAKSgaGJoydSYECYkZgMDy+zKzlFIC7YeoHeoQIEEGDUAO0WGm1DIQSdJeEwEou8zRMjMwQRhqGIE8eoKs3EhEVRQhM3QO2YEKAGY3VnZYGgBYthiZ5i4CQ2QGAuMKMYB6AFFTk6iGRibGQCSGQwaOyJMTHCaiOe6b566YjQz5qgOGkJgMzbx3ShiAcu0mAwOggCWxlBNJqp4witEwQrlXYyPVyIy5AYAywQcI1CCZZLA82Tr+ptkvSsgxxZy0HTK8eW8MjWhwegICAHFemQAbwpKWx+nfkIoggQECA5iCgmgLzEYgvTAyQr4SpJR6yu1ipmLGxBzYec/sQAz6hJ1p22MvcdWBShLJoUfV4WpkkgCRkI0UhnxmAlAcRjUONloVREaDnMAEQkAgQFMTa73jHPBiQswLBJbMhLJXLSWxpKZGqmBiCdGroCdwmJDEkE3BkhopOmcilDyaqqGaYoHCNoRcTQxIkrEoODRwgCBghsDM5JywukmBGGChpkaMyM4SSYdm6pBEIpIgcLfsjDx6sotWNyNU3hxij/1qVY4nqqrLeUfeTbZxMiGJ1otzANAbgEQhz+S9koaNTUwdzBeIIKpUlBX79vxMYwohiIh1rVyYn9RKdRdT30RXun5ckPcbk1FUFRUwoWx1j0JIEpOZBgejsZvN0VZYULVsOzEj5a5pkgkgFmWRUsyrRo4MBdvYO+bUSxJVTGVVcHCxi6xEyHU9qkejsqqd98ykoqpCmOH9CfOqHIGKERMAMTMSOXb5BwBgdgjoiPOlBQGROEeoGTmlaKIIpKqmCmzMDgHQORVdX6Wc44GRqCAp9SEEBlLtCBkJzditF9E++NWvPnn6Yro/QYkbVL169PDo6GR3sjd/NTs/TZeLtLrsZFkse19tTfavHRxeHlXbk4vT5f7B1djO5yfPNjfGh317zfGD4+ejclRI+eTxxbWD6Rv3rs7n7b/564/eePcWdDgZbX/7na89PlmdP7p48WJ+9ys3fXXt0cOL/e3Nw5dH9+69AY30bZrsjffe/d5f/8nP7xR7zZE8vP/Trb3wiwePx1OIvnul8zM62nvzrfP5qvG0c20fDLd3p+TS+dGsKsbbN6+3F7Oup1m7WszTo/mLzSpwBWDw9NXTa/tXCj8+Ozu+WOF2qDXU4uTmteuHD44i6q9++7f+4vd/t78UV/lqPJrsliL68rOzot6sqGyOnhMEV+nl+SsKRde247quJ3X7aLVdX+VSJm60uGi++c3fmWzuLhbLpCrqjcPjl8f729vTcbW/e2tjY3NxsRqNK0IkTABaurInZUloiEqCZqLehdj2qV02XTfe3dq7dfP480OmwtT62ETBy+VidnmRYjq/vPz6nW+enZycnF4GDl0Tbb+YHByYNMv5wrFTkK5ryArUEJx/8fCTzW9+y9fjvm/MtKrKthMUUABJvRmQL/f2RwTh8PgsxeOdne3t3Q3s+tj3IjDeHMXz1LaSuvn+ld3pdPvo6IVHKnwRY58kmkAoK01tistQFSoRGHwYjdmt2r5ftXVVzxftZDI9enW8Od0KVbGcr6aTcTufX1yc7O3ufuuD756fnxV1CUen5/OlNmai5B2h9ilFFSJfjEpNiZgt9RSIyVns26ZT1fG4imKf3v84+PFv/ur3Rzv7l6uLfpWsQ3Ju3neiVqDvUoxNTzzYiubLbrIxbRYrX1W+GLVds5i1mlbQx63t3VmcPT/7/Oq1t+rq1uPD00XT7RehKIvZ+cyAmIobV/f0EvoexqONpusul4tu1biOp5sbmmw2X/U9vHz6/Nq1208On9y4vnfYt5u4+b3v/Tae988fPLlx8B4VNL1e//znP1sto3cTg3ByIde3945O6fnR4e03bxw/eFUU/NaNG6fJNlx9dfPKw188Tkv+6pvf+ejnn23sbUGC+3/90IrA1XSWMC0jzRr1/nh+fLBz871f/ebzp0+rY7d/7+bRJ/Nlspt3bl48/jgBFPWkYOcCn5s9PzyOfXuosOpi16Tab3x4/ymrMlRVsVEX10rofu3730+heP7q4++/u/PP/sv/9Ne+/87uqDYT78KVyc7s2cPPf/ZXzezkt9774NmDZy9fHb1xZ/z5R5988N0PtHtS1FPcqG/ce7OsJ+ms+fzxq40qlOVObNI3f/M7P/nxTz2PHn70i9/8zQ/G3Z2Lk+W8aceu/PzTh9e2DzbfOPjr+w/fv/aNnbfeCNvjG7f2/uoHfwXFxv3797/19tvXRhuzRdPUPKrKn3zys2Xf3np772Tx+Znq0fPjd+/ePn3y7Oabt9r28OjJ0/pwVrtir9p89MmzxSrevb5//8n9a+99fa7Hgdple+5RH332HAQ3b+y1aps7W08fz3Z2Nl+8ODuP3Y1b14rR9sMHn1+9uz8Fd/zycLQBdV3d//TDsiDvawXZnoyv3jj47CePyrKuJhPtnLlqPp+X6k9OV5Otnf5s+cNf/MQslaPAjoMP/aJZXay6XopQkDozMjDksLV/PcV4dLQKYevw+XI0GVVlMb883pls7I7G83k/f3HRNl1VTu68+YbMo/RSTUbnfff2u19vLs+a5QqcPz2doa+v39h/+ulTMxpPpk3bHlzdW86WqenyUeA9kWNEHJd1tNgsLwP7N+7d2duc1KOSi6BAQOQcWzIw9KHwBUeTlNSMihCapnHgAWLf9YzonV/OWtf5qq6ZMKYoXZeLk2Pqcto8SXI+mIqJNW07W6xWvYLBCFIRSkespgw+t+SICCEwOcl3dlGyjSHGNmcecu9DIIOkAlIUQQxAKYEyEzOZqikIKBOLad+l6XhEBhtTMlBRzUVpxCgSQy73MQuhUDND5eALcoApMJJjM1rFHpGLUPTNin0BRqha1+NQljFZ2ydmj9Z1bUeOVSWlqIiQrLE04qLpVs6VfTL2URmYnRgyevQVUQh+pBrBjZYXpyk1mrCHyORiMu4iqHgfkkQAiCmSc0kjGbpQiOYJUEJXmEqSzswIICb1LsQ+EVHXp7bruz62XW8LC+W461aFr4pqq1u9Ik3AcbVcuFBwKFQNkAE5xY5j17Vd9koAgOc8eVFEjLEjJsdBNCH7ITsjYqYIaJoMGImdc6phvfALSKhrLssAXMnSISB84ZzAwVCzXlnNE5GMYhhCOQaESMT5l1QTkvsC4JI9E5mjnSM7mNvKUAddIK8hkyqC8ypKsLY4ZdeR5okuDTXmOY6kCgOZi4GycYqzTJYnY0C83kHK3izMMpkRgiK5ISsBmYBra40t06wzr0kBdYgHoRkKAOXCdkAaCs4p83EGvrIh5CXnIWMBqCpoQNkkBaQiaAN3hjSaCImV7EwEDTZc6fq+AE5oLqIzGk8mpdeY4OyieXG06tWByAwWuxu1G29cIsfTuUezBCiQNEkSCCRoncSKg6ZWY+vHuHGwaXFF2jVzWfBlNHJlsff2O2rQH8+Wz4/np8cVQLxcJYvVhKodjm1Krdbt6MBwZ2dvtLlFYiNHKD2rc8QKSuRABFQNGQYcKagoZGkha4uI6BhgrY0gvJYBTA0NaTAhICCB5eL5zD82MFlXkWWB5pcEm5wUyRVPmb2Vm7zQ2YAgHVw/YKKa8m09eA/oLGWQircOdVLQtZuyf5CDkpllNCC3DIAQ1xnFYQcG99AveYdskFDz4jLAYI8gGHbW8kwaBsPUF6rQYIsbIO3DoZL9FzmZJgmAAXHQvwghM+GJiChFRVPtBQiMSFVVddCjLOfLDPI03BQMiRBNc45HFVLf57MHoCE6oqHcHCDXC4IpmKjGnARUNRQQIlBVNQUwZNZsH7K1ejbEQMUIVCgDwJA562lmZIoCRkNtEICIpiQpmWQ9DgfafT5yVQ0kS9sGnEukshZtQKpKkO2BX4jKTCQmQF4gOSRR9F+kYgUYk5oMXy2oKYOhsYKRGRsYoAGaMaDPSrGCGqACpgzLWavKZmhIlqFXquy9WZeHLojpQMvK8iYhOgOlrOFpb6IIDLoeBWZqaOCM0FDF1DgIAEBuqUdVJPaSzBAVCIgTCCCIWZYnGImMPXrOwg0iUkCIBBKgZFFkJiJV4Wx0MQBGUNXUE4mYMQ3eiCTJwAkiIqgkn7NWAEmBkBAYCRnNiEQUAdSUkDHbZUGJHYCsj77XIyqbYwY5FZEBbN1caMaqKJoA7G+yirJ9Ky9BgOSTqZpkR6tkd5ElA8tSmhEiMDERI7ERK1hv3Al1FledtlF6weFTF0Qe6NtgiIQ2rHKoZihXPnUQIUteo1B6HYjFrCEDIaikyEhEA2ebiIEdWBbiMoUNM0AMUMFEYkrSifXsCsIyx0NNEjBlv5mBSewZUSEiiHMB0RSUkJE9ImpqkRCRQJHNu7WLkjwbgCoWwUPsgJQdSRRVRVA0IADyrIAAhM6pZKpuZHJFKHGJ1IWosV+tAoBZy4FTF9UuyLsw3S2nm9T0/eUlALBziBBGBXinBtJp4DB2kyQ9Erepp+CrzUlqV6riXBFjTJJCTMJ9vbmZRFdn8+XlnIPrxqUC+XFVjAsUyU40Xzj2JMmIIBRYj92qUcfFso+ByTP0CIxopmypYGbALnZA6Igqx2RpVHIX8fRy2ccUCk/E5cSPx+V4MiqqAlEB1SCTsAyQRCSn61WFETk4550aFGXwwSMQETETAIqKc845MgDHrCZM6JwTSYDmHOfvPSWzvB5kUhRljD0SkuMkyaNzzFGEmFIEciEfJt4VmLs5DNbrEfD4o8cnhye7o71xuT31xwGLK9e+VhVb9x8+slhsjSqGNNqeXDanVYBXzx9p38e+s1UcYWoulwfF9tbWaHbRlsnhrGud9G3/zV/54Oc//qtxUXKP33rzzXpn3MA5zFf7t3E8mpSoPdv85cvrNw+W3Vj61bfu3jt+9qRbdmylXvTNalW3sUY+O25uf+trieJxM5Picut61cDZr/2dt598unz48nS7LlfzCyauilAFP95A6811XQnWzed33r7+4PPTAt1ydnnz9o2nj184Ky/PFueXx1vbewVMqvHk9MUlB/9q9cKku36w9Wd//p+T0ubGxDboop1dLhuNwNY9ffHJvd23C6r7ZQdeq9EGewiluUChqg+u3e3bLnVST3d+43e+Fuqim1+iyK0bW/tXNh98+me//lu/7iwuz2e+KIsiBOeWi6W26JlVNUnvPXlfoJmilVWlmqJoSewKKNkVG6OLi0uHdPX6Xovz5qKZt+2iUVk0vixOTi++8tVvTsYbs9lCJJWjcbNcjMYbRTG+bOJydgneAKFtuyKEyd5kdXH28OGn997/NgKJpb7rSdEHpwY9ejZsY29A+1f2Kl+cz5cXl+e+wMK5jY3RYraU2DkiRXGFm88X25vj3Y2dJjYBXRF87HsFM02atG9aJnLsumZVYvChqBVn80W95asqKKGv/XzWqBgjrWbLnd3xi+erxfxso6wOtvej6GSHwnTq/XnftYbaEffzJXnf9p2qkaF0naTouCDAyXQsSl3bNW2PBqQmXfvDv/zXIoIqDkBTqsZlUYW47Ou6qIK3KKvlAPTt2iYvUCwXC+m7vm8mo/LiPEUz4EDRXd2+Pbbi+Pz49rWDpt969vQ5s7Td5dbW1KLOFieioKu0aCN6/9b1W599cr+qKgrj1CyD4ypUllphO3n+8qsH90KUucaXH/753sb1qzc2Rhsbn/7iQbtIoa5d4LDAttWtqze6s7NzWbGjxard2d7YK8fff+8bnz9vUJqj4+Nb16+dnVpR09VV1Yb07OUpO2q7ZUz04ujl7/zat7Cw49PLnUk98Vjoqo7t/Z/+2Yb/jRG72emr5x8uHUhRl9RK7FdVWW9tbUxI+q6eLU43tkfzGbpQ7u9dm7143Lbnp+dHoy1ZHM4u0fbfeNOOn120rz54/5u37l07bZ4TbsPcfee93+6Wx6NvvhkxRlls7qrf3Njeq4+enD19/DxwOZru3H7rnfNnPzh78LxEe2P/aryQabWZXPzRH/2kWS2Cjwe7exfz1Xy5nI6KDc+LprUW54vmxpVbH3x9a/byZXO8OG9Xjz/7rCbytHj3ytWb07356eG8befLi9mL5+OqbOYd4+Hh04f7V9/78V893JtOHBZHL0+mU/7qnXd2ytH+m3e3Q8VzkCgjH96+8dbZs1cvnr/c2x6dH15cv7F/7erOg0/ue5RA+tc/+vG0Hk2m7sbBzh7o848e9pddURbzi3a5atjp3TdvPfr0lZmWk/ridG4qfd+//OxFRXjx4tC3nSYwar9y99bR4yMloq5jJEMofdE1XRc7K9zOdHRxvpxOx9456Rc3rk6ePT8xI9YQuyRdWjbz97/29qMHTy9WZ+NRjZ3M58vpxrSh1jO384U5RIRV39zavVG0K1vNl6eXKcXRlZ3t7Um8OIkVTMYjid1ytWrbZOaaxobJI0CfhBXHdeia5vziHBVv3bq5M6ktxthFMohRRlUp4E2sCIGZm66p67Gq9F1M0htmv1BMUQmsR0KAMpZsEmNPRM77pGCWr2jKRLk0QwliTH3XSd86gzr47TIwY1Qh9Zpilg5UrCoLQkJE6bvc5q4pgqGKgKnLHayWAqGlrAIoSCIg1cRZ5wITSdNRuWhaQPJMcbV0hLFbhCJkA/iqWWqSBFay90wisa5CWZQMrG0CIPbE3kkCAVRANx617ZyLggpfVgU7x86js9imUITVCtiI2asZJnXEXdcZEYP43tOYfCiKqkQk54C8z3BWpIBIouarup7sTDb3l/Pz1XLRNrPY9Ukjk88TSMyucABVRSICREQmNlFfMGczFrIaAGjSCMgGGvum77rY9xKTBe6alfUthbIoK+edxL5vlkU96tq2ZE+ODUkNGCmJITpRSTEiICOaDtU47FiSIMhA5jBC8ISSpIWh2R0YnTISKaEoqhlZTvIAqioNQAVcT1jNQG2YA2eLzevbF8qzERgMA7j24AwTaUJehxKGxA1mzQny2j5otmkMLp7XchISYbJoxJBv38wMMoUGXgsNeTKMjEA4+FSySgX6BSEmS1HD1B2Q0IAMOYNGVQRUIKXsfEBAc2HIkA22jsH4NMig5AZtKONiBrI1IFEuYkMksUySye+XAElBEWGg1RABejMzI8OUQ1E59YcUkM0hlCGAJDLhInDsGk3a9YLu+csU1Y7ns0ZABDykvRF7pX7Vc10V0z1re2vOAIXZzRdS1dw1lsQcS2oX7GNVh9qxtOfQz9vlRVlvunHpyyl5W8xPeqDNva0SeW9nszmfx47EsKonxfam9n08PN3Y36c99ugAnXNKqafc4I6YTVLImWlCaKCqBIhEkL+B15qgKREDDYXahJRn9shDsbbhayFp0GoG4HM2TOSxkMcKvrbiDPlFg3Vb/TB6jfI4GYKRiEiGagRAYKQARp4YvAH78Vg2NvnNN7rtiYGyClp2lgm6odj9l4QqG34YEFyGONDYcxM2Ev7SIwahNI+QIb9ENLzbYUQPD3vdgCZJnPdZjRAzZhZVQCRiJsoHDiBBLtTTlFIyTt4XWUXK3j0d0hBkIMPOGxiYiCLmjrWkaiIpH8Te+Rwp5UHlNERM2U4k8lqhU7M+9cOgBmBHYIqUbQ5CwDRYpix3BxFZ/q3crSSmYAkRmB2AoSlls2fsNEUDIEJmp5SFaAKzLCkxBtBE5hWjgYhppvaswVU6HGSZeGUDoQ2AEiBTMDNDex2BVFSA3NoiakLArKxAnhjJZcnACFGJGC2JEYj2bKSZIUy4ZmERAhoxgQPwljowA5PM0spQC1NZnznQNJmknHFBBLOc/82BWh08axknZ2BIogmQxEDUACnzlo0ImJNKylB3AwEDUYQ48PKy3cEMiRyRgbJDMcvXbgPJzqEoamaYElLRd0skTCmCcuyTAYm0SM45QujRF+wdgc/p4VyR5oKXJPnyl9nhapbpUWRA7FTUMjkHyEAJKTdD5pMrMTIR5WMBQFVQABheq6hrVpGKZuF/7U6SpCBKTKAKkJ10gKZgICLOe++cC4EcISB2Sr1xtLRstUsWNQe3mDAfuTleOhjuiCSlDK4aBkzmWDkEZJFEYMZ5P9ZVsimhKRNlNhXmYDYO2W0i0lxbl5KqEpOqxtiriZgBBYnK1CM5UCMkFYG8lpIEAEiZgLLKnbPXhJgkgar3ZZLIzklSTAKJicEXntmLWKY1UTLSZGa+8JrEEkCOVqFAjrlKNAFE9C70Xc/E/dmqGleuCK6YkMR23vgSgjdLMcIZIxEV9XSEmmIDCFaMakQyS2U5iiTQthxCWkYzK9gjs3pfunG3XMWuq8YTcq7rOpO2uTwFwyI44tCt5HJ2AY67w/PN/c29nY26otViZqpF6btGU9c5SLXXkrXrVhVhco4dAQMpee/b1dKRZx/UmACYHIBmN6mpMoFKantiwgn7UFVGXIxKV4XcD8CEKaVk2TmJCOAcASI5AsdVCI4ZmYIPZkAZQqjETEBGgMxIwGZqJoNTDsyxiyk5ZqUBgiiasjrOTEjBRInQkcvs65h0HVFWQHCFT9HWVGvou448j8CfPXty++qVImw9Ozu94MtQTzd2rv/kL/70e9/5Zr116+h49dad3cMHny0NtyYFHLu46K7uHvSC/UV7++qbabk6uLIrcXmB8ovHD2hUXSy7EujKlf1PXhzpXG/dvPb5ebt369rjx5+O9ze5LI+Xxm7SCvzk/um9Gzc8xO6yjfO488bu5le2wxIvV7YxHf/z3/sv652Dt9/Z/vn9H7/z5kF3fvL8k+dj3D7Y2ytq+Ozj+8vVYmv76mzZXi4XrlkebG+fHR2dHl3uTDdevXzBwT18+JwZmW22Wt14542u6w6PTj2herh+442zly8C4cePHu7fvd01s6ezo2vXD44OX27FacXuzts3jj9/Vt/+Km1tdHHe9f3G1nYUQ02pjY7LrZ0bDotyo7/15luAlQIwcz9vRrv155/97OJ0vjvaVQD0VVHXjqhZ9ioWQgiBhfukSshFWaS+k5gcgRm1bWN1ob0heBL3xpXbNsWnLx6tdNm2PZL2q1b7tp6E2UV7/vL5zdvX68odvrhkpOt3dy4vmjdu74KaYzo+OnToqnHZLlOKPWP45BcfL5P/1Q++JY5jjLGNmlDEHDkOztBWixXWYWNnMtnZOjo+mV1cbo7qvvJlXaaUkqPVIgYuYupXTbu1u/fy+Yu+FxNzzFF7TUl7KYpRs+yqmkKo5xfz0QYA0mRUo9q0DBfLBgzbJo3GIfXt4ny5vbuhUS5Oj/euXn3//XfmP1imVl0odQubZikplsFXk8li1fYxAWOKaujKugBT71hVFqvGlBJoNSrLysW2a7smlFXFLvVLysHOpLHvZ9jEo9O9q3vlOoa5bJrTlyeb0xEDdLGN2uxONz9/fnKSZoUvOepmUV6evYAEG6ONLp5E3yVyoUC0NN6kz5/fv3PjG7HrUiLHcHl+piZYFo8Pj7zCwb29l4fn0vuvvPmVanNy+OLo8OxiZ2vjF/c/3ziIs3b5/V//oIfYnUNK+vT5K08UXPGb3/vNj37yw1/c/9htbH7y8atv3b29v7v96NHTWe+CD68u52FnV3j6g5/8RT0ZT7cmbnIJlu7du/P0k8fXcZLmsxePH/nJ1jt333/w0ceV4Whj25Xl48cf723uRui6ee9d6FgxRm2lE5rs3oyrY3G+kxeljspiPDs/np8/dSJ33nijb93Rs4+LzfropD1/2n/z3Xuv7t9HopOXTYE3NjfuoOe05LG/uns1rCAKnf3wv/qn3/6Nv/3q0aErtm5+5c7p9uHHHx6Oni/2d65iu3hxcp4m/mK+SpYUdbq5MZqM5/NZOdo6PulZyVUb4KiV8+WyuXx+BLgxKetxWfLVuqcCV13ft/PTF7U7/fjV2cPPPxrvHBTbe6tWdvauS795+94BqJ+tug/+1q8mbLnYvuzOd3avtxfNg8fn9964cfjkcddzo+Wnj47f+eqbL4+e7e3diNCOrnAPMD8+vX77Rqvp+cVyOp0mwctlu7e3Mzue7V7dXdTN5vb2rIMguFzOP/74ZY1+0dnmjV1cRoMEiKnXrStX4Hg+n1sVqODw/NVitorXbl5ZnF6Uo3LzYHuxaM6eLagIXZe6aKtlW4wIq5HPAAEAAElEQVS3mkaK0eT+o6N6PJ2ORs3FCp2fTqbjyfjw9HQZY5hOfFW0Xdrc3nry6nR7dxPZSIyLAEiF8/dfHu5ubB4fnjBZORpdnC2bpp0U7vDVaShH43HNzBFWiy4iuKII65Vk4zJEg+V8mYwC0aiu+q6rqoJ8iEl9CALE7NSSIJxdni+XS5UYCg9qhBb7FPvOJAXvU+rJOV8EQIl9l+/RVDWpiggyF6FQJed9F1PfdV3qRaUI3hFv1qFkRAci0iYQE08QnGdisRxzyIv01qU+9r2oMZEBEpIheI8g4gz7pJKSC0VMVpZVantECCE0KwGiPqoviqIsU99WVQVEXUoMxEVhMaZOkBED9cmmoQrV1Jiw70UTJAUiQFMxh+iqKnZsAMw8nm5YihIVAIjRwCRX9RIiqiMURjVl5JSki+QLp2Zd22jqB5eKAZMzJMu85aIEJlMNwbut3e3d/b5rkkTru8vZXGJnoClGM/TsbCjkoiQSRavSee8t9hqTd57J9amLbSzGxWqxIqLUdRKFmJG4ma+a6qwoPKF5X0iMBNCtloUvI1MxHpmJcyStakoIKL2goYgmzPBRsBQLJEFFi4EKHAqWTFQGZQSIGDJaNgRnwiqY0biYk1VryAp9EbzKSYE8r5JsLrLhzzwIDNZBs+zEyY3mrwNla0cPIGRm0IA4BQNClIES/cXDDFUSoOdMVRgSYmYImsNFQJQX3wckkyTKd+moCDasUAOA2vCGTDN3Zpgpaxr2gRGJc7hPUW1wghnmnh0AwIys0YyhXE+eYZ0kUsodaio0KBhZWBLMc78M37bhZWmAfKOp4pBHA0QFU0AGJISkYIE9oOtjBOeZiqnZZVyeLbuXFy0CNVKQc3VwnPoRcJx10CMhF5u7QH7xuAnUoPHGeNwmLJjbFS4EqoKY2dUV8Hhxcr6zc9WP9q2e1HsHnotHP/uz/viF5/GSp97XglDu7G1vHyByv+y6VeOSVeOpCAZfekDtE1lyoUQCkASqjIwEJgIAkMeArccNDKxWylMytEyPGeBUmrLlwNYxwF8eLbn/PGc3B51DJVOFMwHaXiOLVMFgmNwNgUfMU8s8+xtyY0gIOW6iJoiQgJ2AAIc2VO7GG83mtrrAEgHMHFDWWZJC7uQmzsN8rT1lxTBLVtkogYPgqYM0NLCNcvpMDem1ZSYLF5YzTIAAyPnZ1m9f1ZTNZUnCNEGOMeYKLByem5lj36TYN21rpmWIoSwplxTSIHIiIRmpCojkRkFVUckpMRVTBMvoVWImJjBTlUEsVjORGKOmlFQzaVxVTJXA1JQQyHx+IIKZKJOCgWacsQGgqSQiBypiairMzHn+owZikCsYUtIU0UxNgNwgFiKoCiOpAZIzMOAySUJUsf41hB9MGSlX44FGAKNc3gYmkggZkEwjEpgakFMTMxOLgBnZll9VCPNCRTRhQWPyqmKQAIQdyho0BsCiaWhfFHWESBkBjmCkAgoKYqrqQ4GEaJRSD+uD31RUJQfQLF/DkAEgf7+SIjmXTzQpJWOvmmOupGBALsO3McfBmHMPpqkCkQGpQDLjwerlDBDWMHhC5nXmSwcBXQMTmSbqU2y9LyQJAPV9H2NCdClFJENwamIKFFNZGLPXJNm/aWCYBWPIfPRMBIN8VBAiO046mM5UlZkIEZksH4x5wWGAcwMQsQuYlPS/FUADgKiGGW4khpb7EVUNFIhwYJurqajznpAwx+IEQQCErZfUmkbTZGAACqi564yGeBkz5aUeRDKHpoi5uJxMlZlTLzlZrArIBJLxTjkQm5XkrBawJaFAlhSZTAGZVZIaZlE5pWhmIkkRRFVTZk2ZKUkyYERHMaaMsSIEUMtGrnwOJAOU5ImSyrCmoUIMLld9aoIOwZsm61c9ERJwCCF2vaqZGbEXEyKWmJCYXNG3kV1IFh06QGUuTdt4vhIvoQpchAKA2Lply6FQSbGP9XTqx3UYFeS0b1uF3P0hqVloMhNBchx86jtC9MErua6JGJx3qKRMWgSKpoAp9alZxlBXzrsqlKuuj103OxTuVhu7E+d8s2jWbjSkwEXli4Jj7ElN+ziuXIrcLFsyLRybCmIMDhGx77pQeE1CwZV1kRC6tlezKoRRWVZFNZlOvHeIYKYpZcadppjW4j0SERL5EBxzCAURsnNEpAqEaADeMTODKWVKkeYWAGNkZhZNuUTEAJxztjYOqqoL3rKsChrFNN89m3ofwAxVxdRzWKf0hxupx0+P968fvPP1Wx/+/Ke3b9340z/6CUcop3XfnY9p9Xd+45vdRXN4/3405K6oRqUEc1x4DuOyqqaT46fnZREo8P3P7r95787udjW7nG1tlLdu3Tt/8vzo2Yvv3/vefF48Of35QmafPzu+bOfXr+z2q9U7d67DJ8+Z9DB1nSbp087W9nl/OK4rmZ83510QFzX9xZ/8CWN/dPkyPWvvvfuenp98+MMnIKGN84ePHn79nTub1biu1TvukiRIJs1oEixOCPH05NXWbj3Z2Hz+4PPx3hQBurYH6cYOqebNsVTTrdOXTyA1PLHZi6Mb/nYaMfcQpbt9/cbs+CzGSDA/2CmfPv3w6pWb0IYY+yTimKVtmxUAhbbTJDrZv7Yy31xcHlzZpoKLzXLZLv/8j39///qbN9+69slHH452JqnrL8/OpYshkK+LrmmK0qc2Ok+G5oInh0l6FfGF72Lng1NlVpQenjy+3/eLplsdHR5fubZbTuvo1I1d1RVHJ8/ffvfGeFo8ezxfzeOzR7R5cH2xWpZV5UPoVM9Oj7vYGoGCgMfpzuSnP/6jurQ7t950IXCpCtDHCCbYopi54ESS9HE03dzf322by9VqJmdxNB4huNF0nFSaZlXVxXw+895tbu9dnr+KqUeLqtH7AhwkE/SUQLvlPNSjmDofCnDQrFZUFkXhV8tZGUI13Vm1Sz/mi/nFxt704uz81bNnmzvbN65dIeaXZ2eEbjKetk0zX8XAbrOuUXTZxyapK309Cs70+OhkZ3erSxqjAmFKErvMMOXYK1WhLKvgOaU+mtab1fxsFTCMJ/ssTT4K5s3psmvjq5cH+3uTjSvnl+mzR7+4jGejTbd9Zev+Lz7qI127djA7So8//3zezQPa1d395aVfLi/6tNjfOrBeDOTqjWtHR8cSm/0r1RJXpUB7etqe6WrWLCvd3nlvuo2e9eLJi/FO2rwzMlvGxaX0nQbc399evJw31k929nTe/PgHv/v2G18lJ/ePDjttb9y7TUnOTs6O58trt27ceOfK5ub088//Uqk7T+ITX9ndWRwfyauTCePG5nRxfLqxu7U0/eFf//7d/TfZiw/u7vtf+eSnPzpfPZle42I8/vTDj977yrvNiR0tL29fvfvHf/UHb105uH7tRqA9dr5LzXw139meNsvl4fErr35zd7Rza6vBs+5iWfhic+vgfDmjBk9nJ2+8/d7e7vVPf/rZ7tXdn/7sz9i7eqs+uHWl71Y7ezfcuPvspw9dwVtbI1uebEzqRbua7lTvfv3ugx9/tL+5p05fPT1++60bbV8cHl++dfuNs9OjJq7Ojy4A8drt/dX5qgh9bJfvvHf3Z5+9sjCuN+qzZy9Tt/j61755+/q3i53yxptXj44urx68/c9+93c/evIZ2wfY6pOnj+9t+uVs9fZb469/4/YnTx9E8k+eP/vmu/eK3YNnrz5/9703/+JHf1XUd966987HD5+33fl3f+vug58+nZ+eBd6Pgt1qefDWzX7RfO3dW48/fXJ2/OrarbfKYtTOL5vj2Y27+4zVi4fPZHvThfD82dO0XO7sTjdGG82s3ZiMUiOHL17evnv7/PDy+aNXzoeuHe/sTs9enp5Javt4cXG+c22fi5Id713dVpOTo1eTzUnXxqqmva2rr84f7V3bPjk9uTy9nG6MVKQaV9tbW83ZeZLoHAbnzGE9qo9fvkKAqqA2xbZbjLcnfTObbI4uXxyNJmH3yt7nHz3AsugTr2aLm2/ePn51TOQ3t2sY5heWYuxFulVXBrp544AB2uWSwFIU5xigNEW1RpNedM2qb+fzZXBcFi6ltGoaRhMEcsyMzlXBc76t9IGJM9NQHKN3ZYyRnVNF04QZoaIyKkIZfMFYMGjfgxogqImIOkAVCZ7zwn+KLRKLpFyizIhm4nzmAObFeKVcQqQmKYFg5bgBg5TQuVFZClqRwY2xr0IomAomAlSR2LYWU2CsQyiIJoVn09XiAoliL845DKigRGRmIbikIiKpTyGIRXGOmMjAYtt3bQ8p+cJpSqaWYiQCiaKSysIRSuxbxwaGqzaBWUop37KKiCTxngEwSbbloyhwcI7YlSUoTkabwfPpyeGiv2AE0JQ7tVCSR3SmwRFIil2rmhBZYoeU55upmV2MNyZmSS0haupaF8pmNpuMRyRYVSF2AGixWbTMwSEkJjJzlEBjimRJ0RjBJEWNBC7n3lPsiIKiqTINXh8DAjDIS9pgmWwC7Ag9gxCIAGTEiRHmrlp8fbtvg0Emc1Ky7IFD0fgazTOYIgDWATVDhWxkMDUcOultcE8AmEkWTsyAUBEAjHGdvFFWTirAPOBCMK/3DjrCEMxZ+zsYEdzgaSKyITNGAIC5hj7vQLZg5PQLoeZbbDMk5sAGRgSWBIbK+QGemtfkQSlngJByogpMFfJNYPYsIa4L2cDAiHltbRmsNGufDKEqZF0PDIhTEkKHjjPJidlHSaAU1EIo+xhRgUwxjByOfOoXs4tNEk8yqZnMdfO+tb49n+G0aMuF4Hj74N3Fi4/6blE4LowWyxZ7s2iEXFYg89llSkwwa1tfjTdujS9evaonV69evXs0m50+nVlBNOEmdpsHVT9frro0ZoeLVT/rkMuqHqVVw0XJnti8poh5nod5AqXDYDCjvEo/qBmIRFlhy7MdRBhME4iYJ9zDXNEQyVQyB90MIROpcfC7rQ1ctP5sIddXAa6lpEHFBMBcWcWwbuAbxi+SirFjUyNApHyLTlxXaboVNzatQOkXPvgM1U6mqAYEIAIDwQUAyUyGIY2DhWcw2mXC+jqxaXkljBBUcD30B63RDAHVNM8xQAdX3OsmQTXximakooCsYBoFnRNNBsrOG6iKiOROqr5tlm3TNI7LshpNRz4UiJxzf2syBhiCSEpJwVQkZa8cEpJzROSdGySswfJleW4bU68qYklV0TL6CUwkGw2Jc94wGz3AOTBNRs5ALHv1FIFBJEICJGLHAAAhnyYgScy+VNUkqadczcYgJmiDWU9Ah11CE0BDVnQGPp+yLIu7mc+NBmialAg0GbHLR2WuXMsP6Cy1sY+mAEYEhKIpeeJhnqcCoFE7ZY0pAhKwqWmWd5kIQVUjIqQkTM4A1SJIAiLLBiUEdKySGDGmyEggYpb1ekkaAczWXYqEqGaZPZ7leGSXcvUjZN54NAARNSYFQBXKHkZVYIwqCoqKBCh9ImIzRVDTaOy62KiBRSTn0CB4nx1j2ZImYAicc81InhyIGCCIJMmS2sDVRUl9Til67xE0hIGwrAJEbACKoACDls95xKLh4ADKJ08jAEBFRTOmHFyzPB0Xk0GIz/IQkq1LnwapaKmrQj1mDdpEIJmqyuAUzKAjEETEjHUmRJc9IYqWBBW067TrtG9BEoqhGhgQUA4tEuZOesuev6ytgSgCGBohgurgj8ynrGHgAQ/atgEaiOQTERFCVGbMhbCSelABSSZJNKmKmokmAxFJaoLeOXbaKwaf1U5SNUloSmAk8NrmS+zI0KICGSOBmmePoI4IxUiVi0CIFkV7QVFICGYqxgyWIuVTsypYZABL0QwCOVNxCKlrHKNKS95S05Jy1C5XwrnAsU0WY1nXMWo/W6Ehu1BVY88BTEAMnJOYyACZJQojUShUUr9cFXU9mo67ZZNNg0jofFEUvu96U2EX0FmKnZmOJ3W9GY6OTs6lwQKL4MqyijF6TqggSZ2zsq5ib8TWL3tP6BCrImQYGhLKehwhk4EhUVbWixDQqOukrMN0c1yP6/Go8syEAEimoqqmGQtviOSYmZ1zLoTCOe+DR0QiQiTOMjQiAjDREAJGYsy9m1mYJQAPsO6NzcsJqpiDzAL5efJSXC4VISDIJyXE4FnVCIg8Kw51gAe7O0zp04cPRtu7L04vkuOdzcnWza2f/ex5s5y5yeRo0b73rW+eHh5enL6iULxx48bp+Vw6wc2w/7W7cXJubRt1tXflyubORA22Nnc2R5PDjx4awmg0+vmHP+k72TvYuvLGQdNLqf3J06dV5T/59GfNeVdVo6+8c+fw+PD4+TPbXqKni8sjEZkdLzd2ptu3pqWUk1n5zV95ZzZbbU7Lh4+be3ffnV12Dz5+wKjnZxfjjbLpmouT08I7KoITSc2qrCpyPG7ayhXtbMFKXkwZ+z72y2a+6rY3d4qq7rsGoN26Mtm+Om1FXLvq55eTipuzM+36ft5O9q60izg/WeqkmKwWKFzU9XRjNL9cIWE1qRR8lWC5lMlGnazf3Kmc01DSqu/Pjo4mo7H2y88+/dlkY+w81ePtl22XTNG0Kj0TSNcTQhm8ZfMnsCqVVSEquS6hLLmo8PP7P3r8+cNijJer5FBGwV1cLDYn2+p4OT87SceXl0sRLgrXt/N41IOmzdEd5TCZTsaTDSY6PXoWm1UzuxxPxne/dktg8YN/+V83v/E7b959oyxr50JVhiiiop7AB+zapArNarGxNZ1u7p8dw+JiDqLIGjxPpmNCNe1MpFnMt7d3p5tby/kFiIaCRRUAiNBs6KRU6VwokcyMKHAxKgvksgzSLqVri6JoF4vRqESD+Zn5ACnFazcOJtOSH9rYITlYtWFjHGLfqakPbiJxNvOiyRM5z7ffujFfrji4ZD0zpyR97EVkVPmuaeO8C4U3wtRJjJGcmkmM89Xho2o0+CnOjy7GG9Xx84uNSfnw/sXW7l7UGLyHtj1/9coSND2+OlkuLldvvn2Xzn3fzppZK+rYbYyqsSRYrmZg2MTZaDMgxKdPX5DnK1f3Dxfzmzu73J9eHD76+S/+qByNXz4//N73vvbws0dXr107PZ9vQPrJH//R7sHOfH7sVXwPXiAiP3j0ZDTdOpvPv/2dr5Ufffbq6eNUblDh9GI5rtqXx0fPzp6grq5tV+999yv/4r/5fYc+YFWN9w9ubaRVe3F2vFouDvZ37i/PNiucz6KfFk8ffdb2sQwa23h8ejKp/fLkGLS4eXfX1/Pvvv/Vs6dPT86ens/nIdSjjfF3br6/mi3jpFdNz+8/d67bPbh+80r9bPbwk09/TuXoG7/+rfnFMvhzpZM/+dFfFMQ/+PHye3/7Nz76+aOwtfk7f/tvP334/On9h7feuhN2rxIjO4CUTg+PF8uVH5VHD061MalTSjaqRseHl6tVM5lM5ucXItY03XQ8OTs5T23qOzGUclz89c8+Xbbp+u7O1bvv3Hdkqbv21u3f/71/+eada87i6fNH5y+f3jzYn44rSrYQ+eC3f8NreHX6bBLw5Oh0izeu3bpSN93R5z87OWuvXNkumW3V9hcnnz94tjEa1TxZvJDp9ApK+dY7bz57cbaxOV6dXZjByUkTrbh7787sZAmu3Nje7nsBgePT+Wiys7k19XV1+vKsWbS4OVGzvrPLsyUCXbl94+Si2zjYu1G4y5Pzy8PDUzEEd/NgV2erO7evm4Pz0/mFxdG4blM/2dq8dfvKs8+faT87evXZYjVbPT4L9Wj32n5ZlgoEUWYX5818WdXFlYPNdrlqmk76BlSR8OBg7/Jitri81ODJFU3bb0xqMJWmu3r9Stc27eVFjPr0wWcemMvRajZcC5qVIC0JVaJyqMlTa1El6XxZlAUAxL5LMfrS913q2jamqAAJqGlQJOsgmBtyfFEiIBOFMuTrl8jgvVdNhBBcyNOW3jR1UfrkgEajuqqCpggoSiBJTDUvzycgxzTwkk1MKapISmamkjtuEdTIcfb0Y8YPq+bb+HwPjwYhFEZoAKlLZFSWIbeGErNmQ7+KCopIcA4IiroAj73FtIwI3IuWdU3eA6D0OsAuAdATOcdMjILoREREY58IUSzHA1gkL4eLqVqSCOoYGWCVYl7sUZGu6wEsdhEBJcYMNHC+iCLsfRJD4txYA0ShCpPJGEtfaB3bNptqCNHAHDk1IyYKHBcae6tqJyYxJUCSlFSh75sudUkSEnZtcoCt2vLysqjHyMg+mKW+bdrFLHjHzruiSHlpEjBlJrkkQjC0PgkRWb5djoDoRRISw8AagjWzBXPaKd+8MDtmZyoZvApmhjoYhfCLnE2e2a7tHq/lnrXoM9Ao1vYZeK01IcBQDZynRvBLjo/8c55ZgwkiRh50ppNp2J610KsaEvIwDR/eRk7v6PCiGYbyepdAhscOSN3MRIJ8Vw9DEdIacUyZhyOWMzCSzSFgqmr2xRuX178FJmAGIGKQkcmIiDBQNbK9aDBOvQ5QDOrFsIMKkMuG7DXd2UxxeLyqDJKrmpgYMyBT6hI7nATa3Jiuej+7nLWrzlC5oPG42t/a6Km5OLpgcmE0rQqW0YYPHFcrBVOCLiYAOl+umgQ7ddHOexBzQWHWrRTGO/thewuF62pXD4rdK7vgHS1ilLxCb2ezWV2E0cHEK6KmwkHShoSGqZ8BZqMZAOaOLRu66jJhB2wohVvbP3DN5TFYu34G69YwtHJYT7IcCF+EBwlpcGkNqS3NH3Fa47RwyOHgQGVHHMjKmQk9iFJ5PThJRkqZKpCQD5Go3N7hapy9nLQeGIgEOdmWPWI27CVCntevx+P6S147o3KGaHDDQb4bAxsIJ5hZWLAWQA2zUwHBCCMP1wLDnGxiMILXvr3cmIckYkgkaqqggH2Upm3Pz05F0mg03pLdjY3NUDER5ewegliMYLkmEJIMKVEiJmJHTOyQHBEZEGVtzswUc7pOJIe51t9Gfk4EhAwyZwNCZAQ2o2zwyKQRU7A8AFzu1AI0JMh4L8p8pCywgOZJOSIygsu+vGynGcKvZgBKgA5LsJz0k7zunonmhJrTgEyDrG3ChASKgM6MNd96mpiApgQIyKhmKKyCiE4BEVkVlRgFCYaKODRm8IBKwKCM6nLxmQkioA5iApAhoVPMykV2ypGlLHmyiuR0ZZZ+gICA1UyHsxTno0MBkEkRs2aISAZmmMFqaIYGnLu5gZlEOGO5VRH9MKCUjEgUTdnQQHMOB1NSzoAi4LXha8h/IjlURsdJO8BYOOczmlyV2KGBaEp9NLGUzCx5R/kEaLlxUoGRNEt1A1Yu73NWklWISFRBeQj0IqHLF4ahEg0RsjPUDDwL9PDl9uX25fbl9uX25fbl9uX25fbl9uX25fbl9uX25fbl9uX25fbl9uX25fbl9uX25fbl9uX25fbl9uX25fbl9uX25fbl9uX25fbfsQ0mvb/79+44llWKo6pq2jRb9FETo1RIRTUqx3u+HAFVgliGkKKwZ1dUMUbpL2P3IqbzJCsiSH1CorpwKNasxMiJSi67Uray4sCkShFpe7JdhS3jAnwxv1j1ESFKOdm+ev3WpN4KRdgr4uWzT5f92WWzBISuaTnYZOTaRew7c57apk0S2atKnDfpvIlVPQplmFS1Q2qahr0C6mLZ9KJlXXZtigIpmSZLCogkXdSUBBER/dCVQZlohpg7JdTAyBuYCEJRlMiBIaN3Yhm8IJlTlK4urBx5FV4uBMwuL1ZtF4uC6pK7RqMCgIUiozap6a0uJ55KVRqVJQh3HSsVo9G4KKsQKkSHQAAsKl03Pz1+kVKzWHUxyaj0k1GBjMQcPBfEXW9NB77YxqKejDaDryd1HSiYKmmcHT7+4R/83uJ0OXL1dlm/dfvKzmhUehbEVdNppiYyeFRRczWPJxsbV/bOO/fx42c92fb+GF3pCuedrRbzUIyq6VbgkYGvqo1kRRiNiAI6bla9s5T9veQcoANAkN5iK6k3AnYFOU8AGdxoJqaGiJJEBNAXQKUrRsSeiJgw9VEkmYkaOOeISCSBialkGywiIXlAQhfUgJkBSMwAUmovuvNn7eyV9w7Roy+APCETewBmX6IfuXIbXRl8OSn0H/y9DwDgf/Uf/ffOz09HO6O9UfHxg+c3v3L7K9+5c/L0dFztP3xx9tV7984++XQR05Vv35q6sw9/8BFNuZnJ48dn061xP2/cGCd18fLVCXvqW+CyuH6wu1zGy3Y+mdT33t777EefL1dy82tvCnhaxJDgpz/5q/fu3ekv+3mf9t7YcnXVzuPlKl6/c+vJ5ye6OO+pHW+BLdpXSy2nkyefPz+4MtnYLWaLVbtafvs7b5wcxllSpNnJy+ff/+43H/7kvIvld37zb3Wr+Pv/9R/XwTfHbR/9xVnXxM6Ualdsbo7aeaNAGGpX19ub49np2WQy5hC8900X67rQfjW7mF/Mmqouqqr2PqRVZD95/1feo655dP/+k+Ojb37/ax9++OPlfBVTHG1NVFLf9oDucr5ij4HBT0e37hwsjs+jpcJEQIBRhauqRpYIGigszrqTRQ+icdUVjJOdDe85QCCunNM4Pz+7vCSM/8P/wa89ffjq8OyoTWLkd3fGLw/PEWGVdOdKXTo+PF2dzxe7O9tbW9uA9PLFi9Fosmj7aVlsb0xOXl5SvbGF4exy9d67d376k09vXTvopTg8uVzM5B/8L//D7/7Od4Or/z//z9+///TJr/9bH9x58/b339+LyyWQIfTtspvuTGNDq5VWVWgWzWSrGI1C38QQPBCKKIES8WLWtI34kiT5asSXl5Y0XdkP7WqlAKEMEoMjyd1ELjhNmhs7soXY7IveDVz7q3H9swIowLKREPn/8n/9k7/4y3/DcNG2q9zSUE2CghhGRyG1y1HN7Ony4sKRXFzMytJ551Z9GhV8cXqhZuO6/sq9G7e2R4B6enreGf7v/9E/BoC/+Dd/2C75stGrt9/Y3B2vlr2CqgiiSoopmWM5efHw8f2PkvQxprbpmPH5y5dX9ve8L8kTii5n85OL86IY9WpVwK6Rtl/Nz+fsi42tCYu8PDniQGXwTK7rejBFlT6Jxn7VdNWoMNHZxSLGyITJjMkZYlkUzpNIUgEiLLxXBU/Utc35xbyJ7WrZEnMSbbqUwz4c2LMTNcr8XzJTafs+qQGBZwqO6yoEJEL0yF0bzy7nXVQGDM6VZeU8L1bNYtUiW4xJRZEIiGJMIqIGtEYeGCJnKziQD94MYkqIpqKeuSo8GrdResXtg1u33/jqxs62L5xzRADI4B0BIhO53PvI7JgAcQjtDrZxMJWTo+O2b0MISAiCyDkCj4CMQ+cTOyIwSmouQN+nXsEHSP3q6Yuni36RRNkzmcYoCdCXfrVS8eP/2f/o39tzTpolqIGK9QmkWVxcFAEZYLVYooO26SnQ5sZYemuaPoIRW7dczlcrx1iEUFSjvouAWAV3cX7JDrtle9ml0aS+PLtMfSLUpmmWy64cha5NZrZaNcxkAKbA5DiAJTGEoqwndXl+sVi2XV27btmumo6DA8Rm2RMjE6ya3nne3Njc2d4wBRdCIPaeUx8x982CsaO+7XNbbhRR1ZT6mMREVVST9kmC46T2r/7yzwHg/a+/CT0uRDc2S184ydhFBDQBU0YwoBBczgqAQtsb+dB0SVRFJJpUdRlqxwV7xwFVUg/IKeGqTXHZdfNVbDo2y2w+dgSYS7CyE91y6bghUfAAucubGMAkEYLowOVLKYbAJfuUFJDIU4pJzMjEEBT7UaCv3X77f/4P/9d3r7xroq4qRYlJARQQVSWnTtQgpZR5rogUVYsypD6mJBJjqIOJIlJmpMamS0bPX16+OGx/+Ec//uM//VlLvHfr2mRvSuUkw4u7vjeVVb8y6IqKuYbt3XJnLN5OHc+c610ABSdMxgJA0zDZqjf2dvZ2xqP27Pj+01efPXz86Nnx/HIZ5yuJkvqIYKAKaATmEHLLkhgwoWMEVEm5ZxVFNCaNUQ2AGU0SoagKMgFwSiIiTKTJkPMBkwm9pmpimuu0ypIm07oKpal2UWLSpJB6aZvO8kcGoEOiBXLPlxoMzGRVW4enVM0AmDh3ZAMAATjPPjgf2BOZWRJLAlGHeFkIFIhSEiRWpJyukj4lUfK0sVGEKvDYsx/HRMk8sFMTAuhTEk2WIkiCFBnAgYJq7CUpAjMTGrMfFciMDISoBAZAxF0TJUoFlrr40V98DAD/6f/3v1IsADPdwtbA4yEPh+ur1BpLA69ZQK//Pf/z6xDZ60mHvUbKwJpuM0Rbvmii+pubffHfIWT0337e9QNep+/We7AmONnwy3/z+V+TOdZ05HWgbyBfwpoUY/kbtfw/Xwf/Xqfavvj78Nv5aXQoI1rTM3Mw779jFpYTIcNP6+Sc5ZxQfmGkTO3VGGPXLo6P/+KP//DkYlFWFbayuzO5cfPG3a+8Nd0c3b652zYRELxjR44ct11yHleXFw8+fQievvH1dy5PzkUFg6vq0kwkWkw9IzjngFjQVZO6IDCVlJIIqGLXRQFLim3UKjChkomhnV+sesOTsxmZAcNoPGo633edSoI1PFhSJxJVogGyK0M9KerNUNShqJk9M+eWuoE/Bbb+Hl4PE8P1RzT8sf6KBkQJKKKS9pRaaWaSopGHUFMYA/k1dCk3/WUYxjqBOSSqAAAMc2MVoKHD7n/zH/4vAOCTf/1/RgNARRDMTU2QU3uGayQTDr3wQwxu/bRfBOKGrOVwn6ewjsT90nC018eO5cK0ddyL6IvQJ6yjd8NefPH55NdeH5TDcYRf7NsacvZLBxwgDHkqMFCDXIk1JJNes71eH9e4fmMDd83Wf3v9GSKuWwuHd7N+b0P8D9es8QFZlMe5ESqCGimP5Z/96Ef/h3/6e7MOtAGQOFH6+9/7lf/oP/j7fXsJGoHRDMFI1y89jIv128Ghrc9MM4qKXr9vHIDfw8cC608JAV+fF17jzPLRBr90WhtOLJnwkunbONSxwzqaOXznw1BAFUV6PbqGHR0gWrkNEMDW/4E1cit/lkSZtT3EOl+fEF6fr15juRDI1Iwypsjs9S7n71HNMqRL7fXX8EvjLZ+zEE3t9TtEQlMwNbBI4Sv/9v8WXrOKipGY9JuTIDFWJYjj5SIW3tXkq7Li4JyDJrZIRUoICYmpnzXsSDWZdADJUodMZCbRll1kRvKksa2CJ7AUFTxRSmZOopbBjbzEbubKCTvvXFkU9WhjY1Rt7I63NBoslpfnx69ePruYH3cSy7JMojBys9n5tCgQfeotNb1aQpXUd5rUg2kfY+wToBg5H5L0fd/WVelir8kKIrZkgXowFgAEdQAFt31kYhNRASZ03imAiqgoe9dHRSAzYFNUVUvgmEhLTwyCkNizI7F2FYX6DpL4CNT3ichMoyn6AKjStyl16AARsQ6V5yRdx1SYmEZhLpzzFjVBQpC6rNgXqU++LHxA6OZ9b57FFXXuuo5NU03Hk3HoVr2KVNWojaliZ4qxTRpskZrJ1qQsfGzD7o7HhRaqm6X3KI6TcxRbcRR8HQSgXTZlGXrodjfqnd1JmNSjrSsNVLPVqQU5m51u0abzUBRBRPrYFH6TjRAxJSkodGJ9jCZSBI6rDok8shixc2DaqzkmQ2JXuFCopJz1VWUjExUOLriAXAMV4EoiJ5oyY0Fz4lI0QUJEiT2AEoIBMLKpmikzgyITOCIgZ5opcM6VVZFKiT2iOIfAnpCRggkRB2RygSiwAs3TkHG/87U9/nBVFr729u/+ne/84uHJR3/wl7hY7H7ju995/9tb2298fefgH/+//uOEH7+5M7p49Plku7Yuzp8cb47uff7wsJ6WI+5v39nYvTb+0V++mDfNaorjDf+93/76H/6LP3/54cm4cFHs3/p3f2P55OgP/os/XcXlvbdufe/v/k//xX/+X8wvD7vPXqZ0+lu/+iv/6ud/DdLvX7v3xgff+uFf//lvfO/ej/7gn89PZxvl+O7+3iIu7//i83tvbGxye/bRw7ND0Z3wK7968KSl48+exPPLnat3+raJUGxulpvWXnjRcuPZ05kvmTyhWVmExdnCeSYHMcXD44tb+9vosVulXm06GVV1QVYHLsq6o8KnHmKrZFoVenLxnCJu7e0YF48enfbNbFSFRasXx6cu8DBXNvDo+7YvJlCO3Olpb12/sTc9v7zYvbJ/dji/uru9nD3+7e//96uN9/7pf/aPx/sdgpGDxfnF5eKiM9va3nz/G/cePXjx9IkVxfTv/uav3tre/uQvPz2YjHxdfvL58eV5HLsQCoamdQQnL05HW5u9SNN1Xtq3Pnj7nV9/669+909vXw3aq+gqbI6u375btB2E8/Om33zj1uTe3Y9+/ODv/fv/8Nmz4w+++93rm4HQfvu33v9g+fYnr07+T//J/+3nH1y/sn/1rbfuvv/e7VZkEVftIu3sTMFEFIraxa53OX6NJCmhd5qi9H0IbjQtDw8XE56enpySa3b2d5PpZFp58o8PL/ev1PmWlIHhdT2JWr4AoaEN1zwYEKH5PshA1cDAYzRLf/Knf9isTq/eKHswjRJ8KEJxfPgKC6hD2tupm7ZNMS1nq7oi73Rnt3ry6KSux8tOknEouGnFkQnFLqViq16drPJRIGbgYlkyWQIjACVQMEEDAMp7x4bBOReg7zsfuK5Gd29XnsPjR0+3tse721vnZ0e37h48fXRcl+Xu7vSTDx8h63hcCLjN3cn87DL2q3E1wSRt36tBKN3sYuaYXGDP5Midn12iGDEAaqAQey2KwoUAhAQkqc+IC+eZEFV1sZiTZ2Yk7/rUMzMQREkIru2lLIOpmKl3rmmTIQEqOo5maGjIMUlw3oj72HMo1HowckUBiDEKOxIaQA84LCqAmuWTETKKmmRMDJOpEZEiQMbUAjBTURaW7xIcoTEAoAOwCGYmDI48B5HkmBwREzChSAIuTI0YDSFXR2lSJGYi75Cdeh9iJ0TKzJlhELhou96LGaMhhRCIKYGwakBcpHbZrxQESNl7S8aeKDhXFePAv/7tDw5qD23Xxab0pRmvlm3lkGMk0WQxINSjzXZ2QijadrGNzogd1+NyCcj5Ztw4zZfjSX12dpnIyOK4HjUXnTVx1qdm2apKCFQUgZlSikVBbRfrcdmtGkUs6zp1CQ3bLo2nI5WERH3sVVIUXrYdORIAS8bsfOVMzSdTxmWyTSAVw6hYOxecqDA5RGhXKwMHoIDE3lHhlk3XdUamyGAKQMjBCWNqh2R+NfbWae0rMZWUOLCSCCEkWcPxjAM4Z4iRnXHpe0sO1PoEYMEHZtZkVKIydiYAkR0mA2HrWTsTYoRkCOAYiHO7rJJzuYw26w/MBhKBEQEdFSYZeqxZ3UhJVJIjUo0hOEMER6oIAOy8qZF36OjDB7/4b/7JP/qH/8H/brS9B2BMhASSBBAYEQlVTFWzQNm3LQD4Inj2EQ1VvA8BMIEyUW5y8Z5T1Jtb/qtv3Hhn7wqr/8GHPzI5vbxcOo1bk40EUFQhSpyUE9Nq0SyMVM5luYo7IxiNMSCgwy5Z1zYGVCu6oi9Lcl10FguhCZunvqg1gScoUtszq4hoMspMYjNQJceIlGknmdub8a4xCgIGppRRpQgAmHm+qiJgQAiEQDZoG5Tbx8AAoxgCial0UvZ9ZtkYYFI1oKiafummeqCkZJ7uugZojXeFTHIByE3kqIBrnTlPbYGDNxMyIgAT8d73MYmqAXQpESI76NreOa9RnXchABpCMonJGwNZvVFKqInLtmksSd8siXy/SjwgXIZJXZg6FraIjJpASCN6BGa06BDRcxd7LKysfd91zq1nzuQINdcXr6fpX0wyXz/563Kpv/Hv6znYF7Cg9cQRv3jAMD3LH+cvz2JfL5B8MZ3+5V/H18+/ntitX+b1+or9t37zl9QXAxia4eGXZoLDLw89V8P3SmBmmE/+ttYmiMwA0HQNQ7LhBYaLt9l63wxe92zpML3LQwW+eJ/2+j2txcdhNj7McCl/jExgKKLesRGOpvUEdj2Bo84Xvgzh7r037331rd2r1z1IalPfNJtb443ppO9EDan23iEsl3dv7B+fHu3vTAuEo6NTU5J577yGOuxvbFovsUsxgTkG0dT1RLJRF4YkCWmjZPbNMrLzXODWziitGpXUXteIbjFfxOVyNBm9OkuffH5qKQHoGiw0aBm5KwlRMdfbDSzy17PgPGHGL4BcOHxlucIYB5WIhu9zoMgMQhpi1puAmUAhg9gMIGOBMllMbdDhAfMrZ403c82AAIjAMMMlh9mxw2iIiAzG6wFtSGCShX4dfn3YX8o8JkIaKETD3D8/2AjJcrcjkqq8HgSEA1KdmFT6oZYuawd56o6QuU641g4oL1ISImAyIeDXZ0E1y21WGTMGGc00dIgjDgVTBAAMAHmfwQg57zMjai4fHJDZqJa7CzJUB1SUiXTgiA9j3MzAEA0IYeDdZMHDDIdSudxXbln7MlUizAR+VaWEfo5v7u5VU7ecawEKwP0iLRcn3eIcUMyQicFQZd3Vlz9XJtDhwMSMdSfKJV1mQJDfC5kqAWbJHix/zimftFTVEYtmsJciUkbF5X+CtSLERApgasSU1QBCVjUmNsj3aKAqiKQixJRlzfXAQDNAQzHlQQYkQhTVfJOvIp5Z10ogIwtKxvKmJERoapgpcoOMh2YCQGBgIgwuD558kiKiFCOxA8SUIjpHCHkEmgExiCgiEJKIMiHkN4KgIDqco5AAw5oANxwMIlIXLtTmCgcKq+fzitPWxpjElVW9SpD61KfEHvpORnVtIGCWJC3bedctfKGhdiDatkLFxJAWzWrMwEFDodIbOQPCvgPplNhLl16eXIZ6Cg1ynxi3kUajartidrrom/OnT57O52dJlsJSb41E4fIiHl72Kqtb+9V0NCJ2MC6KQtvlXMl7A1ysYh87iSZk5JWsVQkhtOQANPYifV94KgITsQsV+nIxv1wuVzEpBkaHznlJqWl751yfYhTxiJJSVESUqnQxJmFHIJCayciNJtVi2SYzJWhEtbeukYi46CAlKDwl0aixKF3b9oY4mpRKMTUJQFO0vjdCW/WJMHhHRagTaFkVhhQFyJE5XqYONLrCd9GSineeK69NUxSmbE3ftX2XEjAl550ry150VLho0dXVvO1XrayW6eBgv7xcbRST7fG0nE4MzVCLqqiKQJ5XXdzbuOoBLuezpHx52TfHT299660rV8vLnz8+uL7hK1utjvb2bp6fxeOLxd03v/bs0eV4tMtVRQQRrI/tbNVvVaXEGJwzRCJvygRslFwIgKAUXDlRYHZGoKLJIcYkjOS9FwHgip0XBTUxEVEFZkfIWe3MV1LymBfxEIfTFntCTILILPkgAEZGDhXqSHzw3oESUuAwIvLIIfiiFzIXDJ0IIoOujRx//IOf7W9M/p2/8+/98N/83stX/bK1b377O+2zZ59++uHi008XEiCewyQe7F759OH55fT6ag9/7at3eOPRw+fHrqrmiYvgF72LL87vvrlxfHzJIzs9PPrjf/2KIs4X8fqbd7mCH/7//vnq6cnBwdWobvT2B0/w2mewO6q0Syd7k92T1eL6za3N7ZH68GzW+Z1r//xf/kBmMt7dS+YM4WxOjdtZhemE/GLZ33r7m4+a+ZMH7fJs+2I+u3ZtOutOlo8/PPjar3zt73wPnj+4/7MfU/DbW9PZarlqO/LFolkeXNkRjYdnq1XCzelmAsMYTQSx6JNKK86wqqfVZrCybC5n2q7evnH9D/7Vn41G3lfF1Xs3X14cbU8rpu3DwxNHbBK3N8Zniz4RhZErDDe3K+D0/Onzjf29hMTjAi6bF08XiPzeBzfivH/w6Z/duzmtgjtfzFar+d/9n/zbf/2DP5xOy8Kb9/TJX/2gW0yen138H//v/8nv/mf/8cNXDxWo6/XF5WnYKpsYv3LvjWa1PDs5vVj16phH3kVy3l0suw8/eshqXGC54598/OLaG18ZT+5sXflq++T+5maYn/SCxfzV6K2b3z97NdrYuPEnP/jwW28d3Hjj5oPnL//B//iDr1+89e//vV9ddhePnp38o//3n73zF0ep2/zuN/eubtZNY4+Pju9dHU/HVhbBTAnQ1HzwBCbIZV2cXiw7sa3tkYCMdncKbi5mMq3rQHR23r240CvXiBgg9xPRMPbyQH+96LK+dcVhGQgAAZgQVBkFRk6dhtpfzudd7Kb1SCFdLC58UUSLWBU7N6/8/CcfluV47/q1Z48/r4pwNmtGo3HTLa5cuXHUdwkpTOpFErdYGabt3T0/64Y9UWIiIEDvEpjC0JCLhJJEFEESMDP71WyhislADSf19PT8YvfWrZ2dzV/8+MeI4ueN4xCTPX7yChEmGzUaLXqbdf3lqjm4dg20j12sp6O2T8LgykJjLymVVaVqW7vT0xdHm9vT568Ox2Wo6goBiHjRrirnq1Etqr4ol4umYpAYy6o4vZg57/PdlVpyRNp3CYFcQMepjQgqCYYSDwB2LolEk5hixc55BwrMZL31CmC6X3oEBsDVvMvXZknChMG5mETFjAzNGF1MyQyQLLDrJYpKWThbT3QYoQxeRNoY2bFEAVD0mFIsRt6y2ghKSOwYHEZNoBxCYQbEDshpbnhVS0mBMImKASTNDULsWJKQd2VR9V1bFp6IokQg7WPUaCASCne+PH1+9FxMQl20fR+jWN+HUQ1FGVv95r177968RbFru8aRM4SogEVg70fTjfOTZ9OtaZ/6WbvwtfcVX7bt9sZ0tWgjUBexi+bKam9z59WrV3t7+6enxwo2ny0MYLZcOvNl7ds2MmM5Gc1nS8cMBstlq2CAZAqSwFUhSiSmJBDK0kDr8fjw+PxsvqjKetX0o+l0tWw0qUNygSbTana+ynYxsUgOQLQejfouAfaOXK5zdYUDM3LM7GISNSLynhN5MrG+bUxMibQXpmEGN94Z9/OLrln5skwCzlNC6pLEGCvnHROCxTZioD52vnCuZASpS16ZcGA0LFzRiZiAq4okSqEEwj6leR+TWhdlBOQca1IBc0SqSVQADMhV4wpSS4zEEKPGlFIUUNMEaiKqRVmCSexbtNR1MhmPCCxqYsRQcDIjMgVjUO1TcOXHj+9/8uRnX5v+asV1MhEi8wUSpiiQFAm44Nin1McYrSjZCLjwySQUlfZtnzrvyTkQTaGsk6WmW5RTiHZ28+7G3//7v/Xy//GLVy9fTqY7Fx20TlwYIYfCoSlI9FuT7a5PsvSzS4WNbcEZL59VbcvTgifCELdQitgXxVVyVYfY6ALKVdLziCs/GUeRUFC30LgSFfCePboUeyYixKQASH3sQyh88JBiL+JLD2axjaDqnVewGIUYvKfYpZiUCbxDNJIkEsGBM7CU1p1HYiJgYksvvkxF6WLKX5T1MfVJGAeFOpkBgSc0USLIDhJEsKSIyITJBAAIUVJSQTUjAOdYkwhR1yVCkD5miYJMTcRURZAdk8MYk/dcBqbKO8d90yKQAc2XPY8qcAoMUPmEgL6EmCC2IqaGEpVTP90cexAX/Hh/i+tpe9kcffaQjAsXojMqgzUrR5z6rDJTG1c+FEUxzAuIeG3/wV+6EK1tFYPm89rqMKgmr/0Uax0HXi/b21okwV9WjH5ZIFp7E+yLF8iTpy9eDQFe+xrWOswgAL1+rsEGsX4+WCsKOPy5fgjAoIO99mTYoDTlt7WmMOcasHX/tGb1w4aWedW1IJAr5CATnEERQJEoT4sB/v9k/XmwrVd63oe9w1rrG/ZwxjviYga60c1GT2Szm5M4iaQoUoMt2owlJxVJdtlxUkkpKVWqElfF8T+ppKw4JcelShTJpURKHNuSKFIkNZDmqGY32fOERgMN4AJ3vmfcwzestd73zR/rO+eCykWjCzjYZ+9vf+Naz/o9z1Oa30q1tWGZjZftKQ3uWErhphRwMC4brwqiWibzOWUdJXVrR0P34NHhzMWtcd589KOvPvfc4QvPH1Q196dRxtTWc6YwDiJZnfOWVZkObl5t5+G9+/fu3z6bLRe7N5xK7k7PkagfMjkBjXXtm6pZj9nYZVAEWvVZDFIGhAxZUjcgYrT09ndHj1hXbrGzBAVMQ7/Znpwf3TuKY3KOGWQ6YlYSsMteBCgLHwhApbHmCW7yBMKCC8QDCUo/NQLQRat3MX1ccBPT7p9m5JO8RmaIyEVwUVA1VEARnebS0xlg0+CGygrAxZQeoejA05mjCGgGwExFpjGTUuosqkQEBCpTjv4k9hGVIHYEECltWQaIQGCiULrpp+3W8s2kUISippO0cql6EpcGaVUD4svFKi21guUMvMCbQVSmu5CpAvKF8qjl7LUJ/CkXglnZngmoKiKdGZuZyTQiNZyKF8t+K5qNmooaEV5epFZy+tWAUETALu+HOF2t5duqwVRZSEAsMhVLmUkeiPtmN7RbWoMmHzg6GHM/iHoEIEeAoopUWkSt6DI4BaYbIaqBgpEZAqgIkFMwNSMrmi8aFTDQVFXVirYCpcusVECWfStavmO5YwNYgXwKxmRmooZIhoBMQKhZyDDFTIRF7jAwESHgsn6ASCpqYMiIiJK16ESqWDhUQISy96abk5WbmoiBoU2qNAGZ5olwRCjCKxmimsLlIpChACDzJHkTw7TegSJS5FkVZcdFvUMiVRMVK0IpTZeVmlyia9MjoV6Etg1g2QNqjgFFYOy31PhFOe2remZpSwQ5j5ZyVhDBnIf1+p7ZmhCpMhRhJIkDogWW2axqa1c5l0fYbNMIENVGgYqwi6jdGOIGobt6+Oz+3g0HuzOap+3x48f3Tx+8db5eheVCwG+HCIMGhNVmPF1nR3zvUT7l8yo4z9ZUTFifnfXsUQ0ZsqUxGro6dP1g7M2Fvs+IuQkhg5ZCU8tiyGOKWRWQ6nZmCgQaczQhzxVj8EjgQFNOY3JVDQ6BvGfKKTJT2xJJnzvBLP2QUrIkHPxMcVQzRPbOMdrY9UgAxnE070McidCj8tBFZPB+lsds4ACoaneA2ERSHBkUQiUUx7gRVs9jl1bVrN6vw8Ojhy3MHbrGzUUHzyDer1cRh262e5iy1G0rZkMfA4Rmtoj9WbM737wx7syaF25dTX0YzUlMrW9jGmNOi3mz086PzlcR4HB3bxhXCKY5n773jZ0bVz/24ae/9No3bzy3v5y79cmjtHWVZNk+bMOs8nXt2hw3Nq6640eBWDNVy91hTM1s1zvnqELAse+JQADZV+xaVGZGk5HIAwJ5AgpAnkgASUQl9mAKYKjAzpNzzE5NRRGRyAGiEbKaEaEBETkD855Lun+pOBQF9i3IHLDW3KNiqCrgmrlyVUuOvTrjin0wIESCi76PPIxh7r/x1d/de+ql5Y2Xx3vH5/1ZVQ3711bd7Ufd4Hqi7//eV196of7trx7dPs+PH21+/sf/VPORZn16vLzx9LffOD46TX03zNz40U8vPrBcjKPMfHv73rs/9NOfeedb27e/3t967vDGwY0vvvaovTI+vP/t2oe0yX/lf/7v/uP/8j+ronv65uGXv/q1F1886I4frB7aCLmeu2HQ0CwPDnaP7z7qx0Gidtv+/Bi1HU82cXDbH/jRT/zur/6jqvE//Oc/863P/d7LH/vod+4cX3vm8Pf/63+U7p9ee+b540fbD33w2d//4tcE6OxsUxk2QJt+aJulRTo77wjt2lPLescxu5iiRT1fDWnRxs12efUg5o5h++Dx8MzzT8uYgPl06GaHh/e+89bNZw9vr08bb42YbXsyzFV+6YXd977+KJ7m6x94qvMuMZ2teu+W7exg7nefefmlhw++vrr3zsnav3X0yz/0l//k17/52vBg/Tu/9xsv32wjrF754ItvfePBrMWD68/9/PJn/t7/5z8JOsxS/Ykf+6H//nc/q4v58x/Yf+f2w3fWZ+uYPvAjn7LuvMbuze/ezbnf2z/oT9NS/WZ7erI9fvZK+7RdH8dhszpp99cf/+k/+d47D8bXHgynp8+/8uJf/Su/8IVvPfgv/x+//t/8X/4ypRGy/ds/++p777xz56T/9MdeWdSzm3t7rzz93FvfPfmVf/Xe933kBvPQNtXu9fm7b717PbaVRwSSlMkxgqUYPWHd0OtffvPm9Q+2raWY5zVd22s3g73+1nuvvnjdh/DgwXeG5z48myOiy1lxeoRkZgYDEWHPZdA4jVKm8YKhkRqoJvKaYt6uT3bmDTIyzCs/r10+So8XO5VZ/ehkfee9k7qaLZr5+eo+eUO0uQv9OOy0DuMWYsKq7tf90XF4/tbzd9/56kZR+4mnoODSNkEGjdmhIyI1IUDQqXrSee+854qd54pof3f53nff1Z2diMO1w2v33ns3j6JKNz7wTBVOb9++6wiIAVU35+fL3auNb07HaK6pm1m/PUWnpknMYo4oWrft8cnZYtYE54j19PwYCHNK80VzstnsVTUjA5KhgZEkndVViuPp+WaIoyrErJpGBGDG2lfl6Rdj6plr500SqNZViClZzpayqCKDqmYTcYnB1XXdjYOpVeyGPnof5rNZPhFHjklHVjLzzoGBJ8lohSdyzmURAMyiYErImkwlB+8YGVTFICWp22rTjQCUUswpQ9UAlAIRh0CApireHKFjYFBAxCzZpqHnVIhiKBFSmVr0w2hqIMIFvUkGkIyFGSVnH1pXhb4ffFhgcPfvvp5FCUBSUtU6eEBPwYmnp65e/fQnP2brQVA1ynyx3Pa9GaQ4jqKWUrfpmBE0iiKCExSUnBMb6NiPmCFtNbS02a5CYBm3oCnlzXJncf/Ro5223ZyuFruNm2HXd9tVD+rWfUdgqorEztWenXfZh7oJ7mR1jIxVCKvNCsHyENk8I8csSSRn8Y4Ws3oYYhw60/H6td1HJ5sU0+lqu6wbI3EO0IiZch6daxCKdzJ5RE3Z+co7Nkegpgje+WTZO9d1PbhpRPRDf+Yjb7zx3e9+4yFodsJZU1ZzgISOEpCUEbVqxBgxb8yFyF44uApS5Z1GwaGnjIqsOwihkZxiHJ3jtqKcEZFkkxNRcS2ZKRJUoVKAjJYwMxM5UssAGQlIMyhaRkYCAxkigLJoimMUilXwzDHl4JiQmZxzxDV144Zq56pwtB5/6Vd/KfX2oVc+Us/3qZ35UEnKAREcqCGDGGUjdiH0fUQjAAx12K5XpCmNHWMYU2+WNa5BIW5P0OoMSdmuPr38S//OL/zdv/V3Tx6f4bJdS9/MQ1UzEjD7IXeVr+fVbjcAKm/PpR/w4Fq72r7ZMCxJcjqRWQ5zv+pfd2EU8EN+3MfHoR2abuxHQhjFhLwFD0Rcpi3EKFmccwxgBo69ZVUCUHMEkgbnTXxkUCOVbGAGijlmyaIZmMnERDIDMlEBNrXITgYaC0pDm7XSODZRACDHiEqSTJMCkRW238zQBJUQvPMiaiqIqKAiZmoqEzjC6IymOWtOQgiaZIRcTqR5W6tmMHEEYDCOotnaxhMQiCD62e5ysbPcro8tqZg4DUBODFMyEMkgQpxT9nWTtzEngAyEHM2savzh1b6ZGWKsPCx3tRtPtqOpOlp6Ubbs2SvXFFrDEWfz+awB+MrFAwgRaAKEJrKy/PBCyvljJhq40FsmdejCo3Ix6Qa84GT/mPthcj1cvuoCQ4GLtfpJR5gW6C+FngtPzPsYpemF79ueS/7giZRQQIn3vetk2HjfBpRvSHhh0yi+kQt8avo1m1bmi7iDE+tg038mM2XQPAlQdiFrXfzNCDoRHIqIoIAXbb7v26dmYMyk44Cg49nJ/bvvdSfHTeM//LEPfPgjP39+tDLGWy/cGiNKogypWTShWoAiM5hBUwfnOCitNtvzpMcPzh/df1zR3R/5mZ9+851vyRjr0JyeHY9Rz4/OwbRtw2xWDSkqIaPbbDYIgMzDkCWOIFIHn1JKII5rNLy7Oslyj11KWarZ8nSduN4BAAMFnHw2kzZUYIoJvFIozcQGRbeZ7qgmlyZEAEaisj+oEHtl/01MHxaPTPmLiNAUgYgYaKr7U1MVAUYzyApmIqowoTGGhcyBJw19hI6mwBS1yywAACJERFUho6wikn3wBUVk5lIXXmb3qkrEKoqApkLMSEhABehQlQLflWp2KoJCFkI1cORcqblkx8UwYWA5RyJWMSylzmoIqJZFhHjqPkPinMVQEUsDlwEVBLy0dqmZSlZiKi9gdqpipuSc5Dwxj6ZlooU0WbKIprNdcmZyAJN3z1SBiEo79YXNqng0FUpsCyJaqQwj5uJALJVixoRT6yOqmeSEiJozMhVeqfZB86CSQXIWUKPHq3WOQ123ZmjFY6VZFZkdM6tZ8QAhllu6ECNxOfMA0ZCIjExl2jQAU3PM0zlDCIYqIqU08aKOD4rSBDjRgyZTBAogEZcjiEQqYibomIhNlRiJqDwF2DE7BkST8kQQUEMiFUsgRIxIBoKMRKW90HJOBoVZidmMHSOQqZIjIkopm2YAuLw3gU7VZoQu52RmAlROV1FBZGKnOSNCSomIRTL7IrECe2emIOWoOQQgBHZogiln9FzsOZeX4oVU1LrQ+KquLdo6GQQXNzAP+8izJJV3jWEIlRPLbEOWBCoqo6ZzgnOBAanVlBs/YxaxjDCGipua6nqJRo58gG3OfbAMYo5VCExBTJx5yDOnoUFJZ4/zeHq2Ot1qGtTSKGOkQdrhnPMwbtbSnw4ejUa2mdsOSkDkPXnnbNkYsRubGY71GgjZuVkoslj0YYZkILn1VfBklkezcey3MSUjTcKO61ATGQqmCIvFjmRM69FAgcA3zkAJKI9CbJpzyuqDI6pyAk0KYiAC0Y3jaJAFNZBoHp0RORXFiqie72671HdUhdC2jcQ1M7EHzYopVXULGaPI7qwBkKKTZBE0gXF7vn2oljMzgtU+5CELJCCYzZuxG9brCFYxt+MY1Y9ILqDLMaMBpHHcrhofZ1Vz3j/edris63a+kCFmtRizIKw24+Z4jeSE6fR0Zf12xFzV7t2vf+dws7r1oesf/NBLm7jZZNnbWaRh0wSWzbCzuAEUYrfhPOY0zCgRx7aej7FvFrvkPDqH6M3EVZWIELILc8OAzKXIN6XRgJErAy+GYIqWNEfQiAjAzgUPwEgIRAjknLMioBIUGZWcAyQzKrdjAjOJhfNFNTNgV1fNQkYldFRV3LbOtUBBgdh5YK9izHSRAAMA8JM/88lWz7/2zW+8+LHDD776yvd81Hfr9X/39/+LT/7Q4YP7d5bO/+j3/+A3PvvVr/zzew/vj3FnNz+Of/9v/refePnGD//Ex/7l77y2U2Ee9aXvuXF2fPze6fb6XHHrlN2/99f/x1/5wje3tvNv/Af/szff+qNvv/OFTcDP/NlPf/6fPNib4aO3vnwCxx9sZrmRe/fuH1y/8eDROXZ6+PzN4XH+o6/8i8Ob86ev31idDqTnr7xy/Xh7PHsk43h27UqYLRYffemp3/lnv3y+dZ5mv/ult/es/c7X39w72Ndv/NbONr3+7nZ3LkD88PRob7k4Pt+Yx1CHnd1FJGiv7p4+WO8f7u8sXR+TOPY5s2PnrN0lpFSTgaznXh8/2kbSEGrnaum2Z7fvzEL78ivPnXWPFrsHkLd7O9wu1cNY3xh+4mef/e9u3xlc45cNiQz9+VPPXekfr88H+IFP/Znnn7vx1pfP3vzu5w+evZ4e3qPzd3za5vHuz/3Ja99+481vv37UPxhQ59H6n/3Z5//B3/l/7u/ue/IHh3vf/fbtsQeQ7dvfXvk69KvNap0+9xufhbHbXfrga+/Do+PTg72Dh7dvb3K6+uzy0YNNPh0CLT/46vfeP3+0e636x3//X33i5vf+tb/27//WF771e7/7Bx966bn/6//yz5/eeXzjcFk1IYNd52v/4F/+i0++8jIhMOZrB6Fyiwer5U4LahYwHQS7+dFbqgJgJuKCL08WXwdJ49jLC08/b8beIzLlLr5zZ0TCw71dqogo/4WffvXoZKU6W591+4dzBVUD4GniQZ6nYbZdrm49MTAjGDGmjL/5L/4QEplaVTuKul6drnEUS4hVJ+O8qbanZzl3lLvjB4+MZXnjgBC6083Osj1dbUGtqhgJu277la98A1W7zQaRy1WgAMjM3iSLiIooE5no9Dx2TkFSElAMIeQY+75ndAe7h3p6rzt+fGN/Oa5WXO+Dm7/2jS+gwUsfeOb+8V0zdOjjNq/PH0KmcTtqzuxcTjENgwHU3iXJKtq2MyRebYYkhGCOYDabZbP5zjJrqoIzVTSugovDKGar1fm278cUkdA5zGLMJKpRkndeU/bMqAoIkpUZgdEQnWMpQ1Qk57yJmVFKGQQ90MwzFWDbYLVeI4LotEoWHIvamBJ7NIWUhV3IKZfphxmy80wsImUYmkUXTSjgek5CSAroyFk2UJOYnAs5jR49MYGqirDzWRKgISH7ICrsg5qYZEQAk8CgiI6ZkVXNOx7HAZ1TNFOQnAHQEac0ihkC5ixv332j33bVLBhbzNEREAFW1XoTr13d/eGPf3wGtk4RjIBoGAfVnA3qZbM9OZcxUzWPyRzyolk8fPhg52AhMZ6fnDK64FtiCW1J21EiHofoXb2oDX24eu1a7rvFYiGST1fnaJaHcRxjaBdZZLmci5opOQZmSqlb98oO45gsgq/qlJUCNeQdI5Af4uAczRfNfFZ3fQ+jhaoG56rgMQsC+uA1iyYNdUXARJxz8uzGNDBVZdFCVXNOqpCzDcOIAEnyGJMjvvSF3H387lpOwz64TNvjqOZyTADk0TlmRCIET9RvOxN1ni1Zl2wWgqs91aGahdRpyJa9SzEXq5c39Kac9fh4o11k9mLqGD07dKho6IhM2UxFxEDFvONQIasIuJiUCNkQDLOJZwcZmTi4kMfsa2xCAHSOXM4iliIoIKloirlpm5yH9177gj2+533wy11XOReaUFe+rtn7UFUukG/aLK6ezzh415pYcgH6bsTG9ZKINOeISVh1sVOJxqqtRLPB8KHnrv7bP/Mj//A3fvuo33jXYIyZQA19QB+arJowcRM0qTNNg5wdjYvDq6v1qo8p1Luz+ePDK856ExwjO22bYU2z3XY3kl+Dz26l4ogUnXbJFFQVHNZVxUSWLGdVU2YKgYxtthuQKbvBktscDzHFfg02gkMeR5AMjMSIZoTsLqJUoFQ7I6CJIRW4AzQDEHSbhGAgyoBYjK9TAzfgEz8TpKyiqtmyiIohUYEbiIptxFRlcrwRl+QSSWpE3vkxaQhc186LDEPKgxBijAKq88bnlAWsV9FQC8ZQu7apsoUMNNuZdwbkQkqqCqCGcagskydJiOAB3TiMOcZus4E+VXFkE9UkSTWeGSF5TpLEuW3eiK+Cc5IuhZ0LWeUipITep+e8Dwi6sKfBxEGgXTjPyqLHBfRRzHmAcPliu2BHiryClxOhJ3rQNI1/v5JzwaDAha3MLp+Vlx/6PjccXHArF+KWQYEDYFrxv/g6NrXL06XP48JeNv1ILxxPkwuKLnChsqk0uQ8vkA1AYCI1BQIqkUPl9ROiUUQ4eCKHPbE2FoxBTc0kaYzd8YP7t797/vjh/u7uB154brl37dr166T08oduUGADa4ISMTkHCk0V0pgliyqYoxjzph9ELPa9pnj+6OTk3hd/9Gd/fv/K9c3Jo1nNwOPpybbvM5AM3SanbbuoIcu2G7frjYhVTVXVTUIDxz5waPTK1Wvvvne2XW2q2ldGe4c7x4/OHXFb+XUWZAKzcpwLJIQX4+xC7hQNiC5OMC0d9aVOHIrdDBFpkpMuwS+4VAAn96eqTujRhc9nWja+kBsBQE2no16QMSQmosstQkCki/9NXM3lkZhGRDgBbFkTIrJzE+6tdrnSPL2DQZmrAwDxxJZckHlAXGbgWErsi6uLmKAwHFkvkKkSxJSBkJmnc9NATMBMNSMREgMiOxRRRGImxOL2EmZ3YfsiMCs+L7NplyKqiCBYQR0nJ5MaMZsoANCkN4EVBdOMyF1+R6CLk1WVkEwVTAGYGIuehURmWpL9RHW6SsGI2MwAkJk05ydHE429M1ViQlNfwgZpBGZFVIdnY4pjZ84ZNcToHItoGdUYKBg47wgZyrwQSSSLZSIiYjMo4bbMrKYX92yZziAwkwxIxb+Pk0woiIREU74VYNGhEImIVATNVAWK2+7ya6o4doolT4zQkYlMpwMSFNENjJgM1RQNDAhQplw7ACPHpjpFQAESUQkfAABT1SJDT8IcMoJIZiJCUM1FaidCUwVCZGQgQFJJasLs2HmAyQzKhDnn6euAErABQnELipqqd85UkbjkAZZDf+HGJK+DKjdJbDOOom3bLNj21arg9sn7EcCxOKDRMqiCjoqbfnXSdyOwORo9Za0aIKydJ+3rCtrAKoxujgDzWrwkHc4cs+GI7M7H1CxuNO0zO3vPOSNHul2fPL7/5gjJatV2FrWNgFiFLqbtNjLU86BokJWobtFDP4j52XLn2n6zO9cu5yHntQszICHNhIYqOaYcEQjBCBsyMSAOWCuMZAA5s6mJRNDAFAhnrZsHo7Y2DWf9kEScA8ZsydDViBTQm8hmlbaYFvPGRHWIoMbKWSyqCiqBZUnosKoIxZJojMg8Q/O5H0aLzimo67cnaNXMzT3X5ANx6IeBfKp8FUXq4Bx5jSrSbdYrNKgcIYV2MY9jT6h51BKwpihd3M7qOVrScaNMDCSZQRM1vlsNQ5f39q7tLa4CqA8+5oRC3gfH2GkGDnuzZSJLY1/PZpYGB+qYh5NVPNm7cnVx2OzcfnzuVMY+VuHGwd4LqxSC4xyH3J+o9nXl+6EzXKCruF4CB3RNimUA4NDXzlXIrYFjZLSsqOiYyAO4wgmaRsm9aWZCYgcc2FUKxESFyEUkNXNoSMVcawiqpojeMYvkSag2FVNAco7H2APU3kdyzletogP26GtCR0hEzlgZ2YyUJ6no1R/+0Ol7r+3cDffe+Nqvvnv0wve86IE+8IGd4wffGbrNai2/+c9+f3WmJ3d1/dhfWe69eDhf1HWurp4fnzx8447Yzn4Nn37l2j/9ndN734X3Nt2f/umf+cJ773zuj77ywWtXbn/jrS987u9874/96UVzft49/q/+7v/7r/zij59+587VuXvvne/cfW377CtPHT94/cozT6/WvRqNJn/6z/+le4++urNbe7/zb/7ZX/h7f/t/f3T7uOKzG4tmnXFzNGAeHrmvvXDr4I3b5zmvX756eH7n+K23zzwOR4iv/okf+OhPPffrf/ufhbo+XXWBHZgt9tp+3IxdVTnPefNDH3vu3tu3WwkRyAQF1HkKbZBttqwNYTo56c4wxDo0Ghh39up+NfheuvPTVG33X6g3Ge+8dba3CM99eP6dd85qgy/+zjd2D3fOH+vq7Gwgge3Ge9QBV6fDlWf2815sXnp69t5zL3zoqa+98Qdf+Sd/dPjSc9vT0+986cxfubUeoIddHGWd4f/2d/7rK7Xb2WvffePusFrB8vkhQddnv8XlPpyPQ6iqyuEY06xth35MFrt+4wPOrs3U6HzbpSEe7i63x93922/0dTsO8Xtf+n5cxbnbfvaPfo9mT/0PfuwHxnFbVXujxGGMoQq1a37+Uz8lW1hc8QCp324Pd8OnP3Dl8d3jq9fnMkrl/eSxJ9aJ8AXLqgxq9PDucXDhtde/yfj8lRs7OQ13H5998sPPHp9uxk7NIDlFo/Wmf/O9R682FOoAwGUZawJTkzCzWfG025PBOE5jnjrMf+WXP3vz2kHUk9T3CpglMlpbzXOUMSUvVYwrsS6PEiqP5jdnqxs3DkD59HRz9crexlAAZ1WdUzw/j7NFs+qsqi8GRlqWTRKCIiE7KvIsMZqRKQDobN6cEnYxWsoEHKrq+PgITa5eXT68//D86Ojgqflpt3nqxs07790/Oz8x0/VmQ4bNjJzUQ47Os6FyCJhh1lQSYwB6nHPsMjLGbGIUqrpyzoZN27SD5pyl8U5E0KBIAACQk6w2241KaJvNZtM6J2ZMqGqWxREnzd5XgAxEgpkAchZVRSZPmNSSQZfyzHkFRLPgQ9dvESyKUAmB8uiDyzETYmIesszYN03d9aNjB6RZtVjiK8+IgMjsvJgxcc5SBedc0LL+TOSDG4YsWdkFMUR2RA7ACB1RAaTBTJkdO4cIRN5AAZ2ZIjAgmGoaIiJGESIvap6drxoBQw5l8VsBwZFpTqkLi513Hj16vD5pWp9hWh9TkyGrBb979fAzn/zonq9i35GRrxodRkAQNUZUFKmYuLLojk+PFrOGVNA74MAeV+uTpg6Lqs55cMEvmvZ0deo5aNac0qxqjs8euZ2FRgOAdT+0zQ4hIndAW1c5luK6UkKXUjbLamaGomnWNklyTGJqkKX1PjTzo9Njh1S3fmexePz4RKIt93breR1zd7g/f3i0kiQ+OF8FgZxydg4dVdm0cosOx2wsKXl2KUdGp6CKIESsJcSIDSFdzJK//fX3VFCjITOioYLDgEaOWeLA3pkjCey5tm7shxEdQcUxxpp5m8/a+axetk4xkqPAaoIqMAz9WVofdbJBy04CKZmr3DRsJ59MUTOBeeKMpGKpg36M9WyGLrDFcewBTJAETdKImoNzWbSmkOPAvgGUmKGuQ5c3Dh2RmKHEMcz54Mp+wLBZbYhBz08QdByiAamqSCLHRFw3rQ9Vu5iz9/OdZaiqGKOBUfBGoJAlSyA0SYjW9T1i1c73svra6OMvXFt/4sOf+/bD+90mipHfw6pOY6zqylW+z6fKwVdex9RWmITGrq6b+vwcVFc7/di7cTnzmM+EdZ2GlcA2I5HzTnLj0XhYbRgJgyPzqCYWhZwDR5oRsZ45Rp3VNFv62U6mOWHbDFvZP3RDH4/ejeePLKegGUCg8shoWVTUHCGWQTmhmohkQiIHJsqMiEjMaiZqZRFXVAhJxUoqHyOaYTYLzFk0JgWFyVOgJqLMZKaElKSsAE8iiYqVJWh0jIyh9grS5+jYOU8hS1KVCOgYnKtCAPLKFc1qc4YVJkfA3qXU6VqxDjRTTJ6QGYTQECBB8C34mdVVJsoyWk6BzIziGCVrSmLorGVmp6FGHxrTIWUbOyH/RJO5tHK9z3A2LV7YZbQ04pOfwpNn1uVbPFFnpuAjm6bfcPGIw8uXX77BRRjtkwzoiUgq9zh4og9MQtD7Ph8ul8Pf9w4FQYILcenJi+FCCZu2+4+90aQlFCaiuMnKqYAlwoiwJOZcQE8FaEErDiIDQpTLL/BEwZpylXEiiYo+AMXShmA6eZdSGlYP7773xuurR/cPD/ZffeXDV5+6+fRzt9Q3VeXruoaUJIqZECJX6JhTH2UcJWUfQs66Ojsbxr7EY29OV21Lz7908/N/8NVH999rD3dOhl66PA6d5rGpWJnSIFEE+jhsegMMTa1iKdvm6BwRQ1PPHA7dcHp8lIa47dZdytWsdTqf7R6sTlfkl5LHAlipanGXwRStw4YZih+JePqHorBMVNFlRvVF1YfRpMlenGAXc3zVMvSadEdEItB8cTYSYYklKp5AUBW1SaRiLEKUIZJOAggxudKVIypQfvHJueVMMhEjgSlS8UwVeIYIVEvOEQKYCpgiOSQ0nXQf1ZLQdBENY0CTabGEfOGFKp2RHROp6AWaZ3gBoqliceeVK42QrBicsOhBOmUkFZ12SscvWE2555ComJmCmtk0pbo4A00MEciguC0lZ0AqMUNl7wAqIYoY4pMIaBUFNCIEIlAxgOIlFFMVBUBCKjLHFJZEaFKiPqcjJKrMpKqgWuQTUQmewNCAGCmbGeI6pph6x7sZwAAkZef8BU/IYMDAWVKhq0STmRaUqZxXKgWEVFAlZgCAEgRZeFKTyc6H00lFSIUkQkJCVhMugrCKFqsa2AR6gxFySTZm4rJ3pdCHhGZgWcmxmV2EHiiIkIGKGEIWBYAnup4YmBEzgDHThZCVym0OL0RxKq9XQ5uwVgQCREdT7mYWndRU1cs7OCJM0+OsyAAwOd0QELnwbgpg6GgKyQIwUXjC1V1IRejVIaRhe75OsU9NmGWYDQOR8ig0q6rWQxrPiFQhk5Pt+iTbOqUhq3feDIyC60SqegmuQgD2CNaA1mwzBBHtQXAx3xcVz27YUtPuQftqO3+6dU3Ixzqc993R8fnDkdy45p2DK9t+HGLHWOWkDQgZZjHnQ7W7P5+HlLb1cmazHVIEi8wuuEW2GVo63zyszDehrtkPw2jkSXMWqTCDjJKzKSAAk7QzjsPYx7EKDRLkHBFt06/HeL4dtc8K7JbzKnUnIXAIiGIScx7VGAFoHKNBNqqYzPt5FzdJDciYqaoCkibTaCqjirl5Q9bnfrOyeQWMy+UMsrow64dMlNmbNxu23ShH/sotcrUIqmVgv7O7z0Cbs40PO+bqJJnIb9fHIXhmTAlUfT07rGcNATHktt1hc4qVqzyQkDVS1fPaJ2IkillFQVM0sJTivG0xC+WONLc+zBbzNFSQOsr96r31fXtw1T+9u9zbc32NCa/cEH76NNXREPPAsPWuc2C+qdvZVfC7FryRR/ICDr0r354cIVVgjGQEWW1UzeXpQagmySyqJjQlZjAgVwEHNQSCS+G+XOkA0/ILFKMrTiqpQyvYqpmCCIKaaCCmaqZpBGJCZ8TljglUDD5FhydJl6MN+LVf+d3Ht++t73UvPBvu3fu6jnf7lWlFoufeH+7fXHz1qw9Xg5nQlb2wtMGG+3/5L/0v/lf/p7/5sz/5GWquEe9RuLdX01/8C3/q//gPfuOv/If/7n/za7/xn/4f/rdf+/X/5Oa8ikebd09eW+zOnn5u/3DX//RP/Py33nvt+K1HwxbXvdv6+ouv37n/cHj+1ZtUhdt3Hv6jX/uHP/r9f+HW4c2d3XXN4+e/8vn9F76Puv7jn3jxC3/4L1dHj2e199qO/ey1t97ZDOAoP3iX5mH+1LPymZ/9k1/8jW+s7qx5Px678HTlnr96/d23HsmI6slrtT7paBa299fvvna3Iu763s9m3QCOfbOsZvOld5UO2+uHzd6VdvV4PD3r96iOst2/9ZKE44P57t37b9BsVc1Pn/r4bPbiM+Ojh+/eudc/3OyF5cmdY24aAGCGZ5+7+uaX7vvx6nxWxWr4/V/+G3xjL5JdOXzuS7/zWth7/mS+/5P/xl/c+b1/8sZXP3f1uY9/7NM/8o1f/fUri6Y6aHVpGtJxZ8Tuw9974/e/fny87a88M5Mcqx26Vc2/+dq7zzz7tBN3ftZ3fZrv1TduLcYhOx+8auVq7fN2q1evXQWFw4On3/7We+/cubM+zXdWq7/5f/7f/d4XV5//8p1XXthzlTITAW7X28Vi9r0fa4dxXJ2Nu8s5iKZRdw/nhE7ZGFyM2YdgdrFaDKX2BkEhZRKrPFfdAI8ejFx1yOGpa3vgbHevNsAsogaucsT84VeeOTtduS5XVVXPCQwsKaFlgZonY3Sh8ifYejrZ6Uuvbb72enruCnWrI567EIIiXtk77PrTrlsLVteuXL3/7kNAMRMDlAFIYFzl9XZ85sZOYMLKB8T1ulvszGInQ4Zu1T+7c7VcBUwYJROBWkYuq0dipkBIjGKAzECMxM4HNdw5PLx3+p2rN58ZVniyOU9k7eFegnzlSvXoSA2TxiFuxiZ45307qzebztfMjNsuMrs8Ro+5ruxb3/pC3Txz88atbuyjQDWfq4qqItA4qpo7G9ez5Y5JBgDIBKpV5QQsxW3Xd32Jm01x1jTTQFyViRtXlc4jI6zK4hWSEmQVyVlNDSgmqZETjK1zCoKMgsZ1ONuOS3KeSXOqCVISEa2CN7NZVYPANkUkKhnEVXAVuTElIFAUNAPQKjg16GN0pmBGiJqFFbgE25qZSNYhhMpARMw5VgATMTDn2dTG2Hv2kseyYC0ZEcgzqwkxASk5Vk2qqmUBMYtjzgaWIztipqP10cPVgzB3ZlGzyNCzI79oenPdYJ98/rlXX3rm5N0jQ87gus3gAgNAPwxMjN5B3TB7NEDTyHCasm/nAzpwfnkwOztZjdJf3XPb7fnZsMpIVnuEQAwpSdXOQ1PrKNH3ThwqEjriCr2JEqqNowjYzqIBk5SiD25MmVIJ5KUUu/lspkRD13v2DrCd1/OdGSaADLsHB4vFwtV4dO9oUS9D8E0bnGcVKWtxqjnl1Dbz8/6hd06BUqYhj8GRpMzM5N1GsqaUNZMPqE+mB+kcmCrq45izqaEgGngmT+qXutgxaZvzAXLEmJMZg2lV0WzmA0hb1d4D+cguuNploG69Tdvz9Gg9rml9PCLW6BxU1NShqngcxzFnp54RAiKbWIqCLg1Cg5lqn3oKzFUgrwQqkhnRM8bBqBDtQwTKZrGuZko26oCehAAoVR5gWH/6J37qRz/5p3fHRVyv07geN1uJfew6Ve03nWhUEcl5ONsM+Wxt93TMpgllJGYkMjJ03hhykjKFQKaYs4wZyCVBMPNVjfXs4zeW1b3tvWG17gHooEIXt51vqspXAsyuQXC5750LthJMe0/VHzs5//b27u/cGb707IsR9/RsNY7gRoLRzFVt3sSUSVPMiXRMLVEI3KeU0XWjKgAaIgVBzDTuLvnwpr/5FJ5u70Lrw6zuuQfcHh5UONQnj8g5RpEyZayCH2IyAzUlNEeUDbx3hJRS9jU7QiAUsTGqZkVAzSqm7Cg4MsOsYqoc2Ht2nodRsaAkpW/RzEpejZmAupKzYkpYZoNFMBDvSEXUkB0ZEDAEYBCBwVTBxCSKUfZompO4WXvroN5fKltwPt29kzcnGkXTwEpJJeYIqMGHHEcHACYi5lQwjjPgHHtAROeByKHFLDbiSAJt4yMEFYyDpX6dLnSS4poqfM0legNPJuyXWswFA/Q+h9il+HORF3RJ+dj7/v/iz6XgNOk5dmlSu/jcJx/4fhnnyR+89KldbtbFL/6x15Z/e2LwKlE6RdpCuBCx3udXu3j/J+ASTqki9q/rWCVHGadvgFhma0VK0CI2oWEJSiDEqemK1GyyvqBaifhFzf14evzgrde/evTO7XkzX7bzj3z0o1cPbwpVQgtARF8BYFJj54Ovum2XujQMnSfK0RC0W52hYxFggNnuwpCaWZPj1rczdfb6W2//8PM/PvSSu7V3YiJ9HLj2yGRMguxnc8s6DsNmjC5UPfm9tqkqCnMOOHvvnYddlE3coG9TptPztPCqOfdxdMiXCM8EFF2oYxOxYkAAWKQKKMIGKpiqFEYHn/BlFwtlU0JQ2bdFainvboxsUEQhM1OcEqOsSJEqWmAvgqnEkouzrITXcKka4hLpU04YNS350xcH1pDJwCRHZpdVSsSSSAYgYpKcYFIrC9FWnKcCiMXbVYAPgAn4BQLEkuyMomaGxEQXOTglmAnMCEn1Ik4dwDEXeYrZF96MiFQFEZlJRJCm/kqDIk4Bs9MSbFN8awaqJe7achG/SuorKACqTmxm4adKYnchlcBAaQoru4xyVzMmKuexijFzzpHIIU1MXrl4mFkkm4FmsQlZEkKeFLMiIRcQTwGNm7AgVwlsXIGzgltniZpMM6AHK0IfFH5ZQcEwSUYmBCzLh0hUigRUlYmJyxWpSNMTfqqsREImVBURAhOVwkMxu2nyiSSSTQUZoch5AGUUh4DT9wKTnEpVASIYKqghsYiaKBICIjsnkqE43RAMgHxAKGhSOVJkJiXILKfEzpWxIREXybjEsDgOKY2TCI/FKsgqUu5RKWdiKqlbIlruRsxcbI/TeVykKAQkZJqOi+WsBsQlKI2NVFJi59ijqVze3iapaIyDkqnokIV9tY1r9HU2IEdS4aAD5+wdKiSwlGWrMCYZ+hwFAiADm3cgRGqw1YFcXflrZE3jKmY3xjVRm9EjcT+Aq56a+cMruzdSs4PU5+1Zon67uZtpow2cbuKYZ7YWldh3J97tzXwzdutNN2bgarnf7l3bdA/YxDszHZEEdWOeXO1kVKRlZbswmrINlrDx83Yv9tumWrbNgmFDFFfrs0CSV48jRM+YGEeibc6iVJGnZOvzQcFFUGZa9d2ew0XlAI0MTTH4dht1G6MmCdUiJiMbCRU0aIzOOzVzro25S5LqZs45g1lbpzFz8nPEmaA/6wTReeO2ClRxVhhWZ2mMs91d75iDjXkVNXIdvLtSL3YBEvmAFSNspT/1zUI0RokjQTIOzVLrPcZm2dZMNebsnPd13XePQUaPY+rTGLqdnUU2iSYYfNSOrCavqD2yx1G1T2tI6FrJEuZLrkQQt6tu8+0HdaMSz6/XN+a7N440n2Y1Oe27u8Fh2x4mq8wdJIXgZkCeqJLiy8ypgNqmStyiZaAEhAgMZoSqOqgk02imRMzeMdUK3tBdNENdMsKIpFSEdTWbChHBNJNF0FTuggjoANVENcUUVXrQjGaaBnKBrJKUwDGx06l5Qcg5oMl6s+fo6HR84dbu+fa7P/hDH0De/YPPvbt/7YWDw5df++IX/aJyc/jEh1/ZrXjz9v2nXti5eWX5xa/90g996uYQT5XdgPTqi8//w1/57R/4gZ+52a6/9C9/Wd5d/9p/8Z/fsvN/9Btv7Fzf3z48+daX/2h7cvDwre98e5a2Fs9MaXfZXWmkBpP6p372R7/5lW9Wc66f2XklVb/6T//Gpnu7d/7Tn/j+P/zOW4fPPjfcg9t3Zx//xH/wr37/tz/w0cNhM1Rhido5jpWz1Ln7QwZXff4P33Z48+4bj2988pUf+dmf//av/9L56Ykp3bh+ELepXc5M+rt3jzkwKImIsUfnXDBG0oRHd1dctU8/PRtrXH7gGR6Ox+G42tufe3fS3znKd7dHwc8jXc13Tu5m52S2G26i76/l9e67m/Spz3z89nePqhFjjOePH8/n+yJ69OghVdbr2b279//j//Q//s2//Q+onbXh5uO89+234o2XfmB8683bx3dOj8Zw3XQhN15o3nj0gBb10d17z99a6pzO1qf78/n1g+rBsN6Mx3rWf+wjhy50cb58fL8joe2ZrE775c4+Ze1Trx52Zosha0TJeajS1usmdvHf+5/+T772hc998OkXPvO8u7J7vWTiMiCQ4aJKORExIezszFNM7cwZWkpmpgyIBr4KBKY4rQtN3DEoEHoPz7+y994793/657//tS++t2yak/MBA52vesm6t2zryiEaOASTNMjbbx/dunL9zubRrWd3FosZRFjFuLvbZEnOkU3FK9PwtViyifm1Nx7t7N8UvS8sgDiMAxOvMW2Z9g9fWpp13e3ZPjtr+u0AgbaWq8U+cSuGVrnbj+/s7V8lwarStnEMTMHXC+yGoVwFzGyQnCPNWWJmIjOWnLnUW4kCqiRRlVD5bT/E9baBGafFuH7kK0opH9zYPTk6P7t3m1Wv37x+uF/3b9/ZuXJdxK/OV8TknRONAKAicUzYBGNcZRe3eT9unXdDP0r2Ga0iunXzmbsPTmbNvretofPNIo09ILN3WZKSsWfHuElplBQAAgB7ByKQC4WoSKiS1bgYVdnxkHIELYN/EbXghpyauk5ZNI9IaqqsOm+aEHyKg2Qt4wBBGGL2vgq+OdusB0kyjVah8hU5YrCyqIOEzrEh55xRMjKJKZXlNg9JUkqpaoKgAaiCMlI2NbEpBZEpmjgXHDUiAyM571OORU9H70nBOUYOaphlBAKxUUwdVcCByXIC0CAIt++85StnKCJSOe9qH0Wi6DoN3/fhj33vix8IYrPaZ/Pnm3Woq5ijqjrvwMAU0pASyiBA1Wwct23lHLmYzLI1dRVCs1qdPXvj6a470Rh5tvfu3TvPHt4UBURGCJq5nc36dWSeNe1ytX5cBWqbHXJ+vT6vm2oc09iPY0yCgNHALGUTjR5pMV80tVvnngJHzVlyFB8THt8/qbndOThMud+s4958L3WxDT540HEkC865TGRgjimnNAt1ymmqvmVCsGzZDMXIVy1RSklMNacU/MRTyLkpQ+5SyRc2A+9d1ThxKew1zeEsWph7yyZb0mHwwVeMlk0WIWQZLakQROlVsqiXNIKJGsYsoW1itMGEmTwTGM3qGsZBUyQ19p5AM5CgEzJEReIh5UCaWMIspD5mMQQahoxKUYEdgfNV25rzKdjIVi2WDiVLns2tts0HX/joT/3gL1zfewEji6ScJfaDSZ/7UYauPz9PY9efr3K3RZPUjwwmIUmKlkbHOG42LrCOJjkBKHlUFYDkEdsqKJIaRBGBEWJuXP89V3GvC6+dPz4RGMIOhion9T6QoaQO2SOLxkRqnfRqqxrN8/Vl+MD64d3gt6ISNayH8SyZRkmj355ux/XWohCyiSRbQwXZkAQZxnloua6oqU7i9sxGdx6v3nQvf/T51fD4eG2uXRwPWeqFUJW1q1qbzSnUlFQyQlwTCiKwpQRgRM4AkHHeVI7AMaOqifUIWwUDMgB6EmljBmhAYFDWeUNwgIqAEhMZmikZKWBWBcSLQTd6xDL6JwRBTTkhQU5G7JBAETbjSGW2K0YIKeZImFPaubWUK1fqg4N6NneEpLm9UXWr3aNH21FDNhhzNFXg5FmAMUpyntWTdttakQQJUataaaAs3oeZJbUINE8KmLU7PrWcQ90QTCMinGhWAESdXGBl3eIiQRcnWeSPKUQFsHnS8vXECGYTWIOX/wyXNhSEy6xWgH8N+Sk9UE9+coFcTAHVl0nVUyIxXFrXJkLILrZhev9pwyfBaPrRv65GXfxSYSsmKWqqVSp3eNRSpYaqF4G48L63wWJMKxSSMZGaoUEhTAEIQKd9VrAHLCVdmFXPHj9466tfOn9wp26bW1duPfPCCy995Hvq2TIEt3/1gNVAE6iO67XnABjH1KPKOKpTylmijvN2XodAzmfFqm7EIjA17Sw6uHb9Ru3817/8tU//0E+080WqUsVmK5BtTmOSoR9N63Zeh5oQD5Z7YbtuFzuvHB62mLfd+vTs6PT+47PH59Q454h3d4/vn5jh08++dO/8LTEgz7m4nya5sHBpeHEu0QTXEQJSCWYs8gpOyNg0ty95z0XWm7A0uIDXpnOBShp1EfzMhICsWHqmqJXJZkTICMTkLhInaBLziImLToSmUsSmIiu8X1wsR5TZE1LpdIJiUwMphqCCyhEAlGQf5ychBilLyjk7dhNShgSgE9OBWAJ9EBCKtiE6ubRo+lwiuoxbR0SVAh1hcZyBGqKp5LIrRc2gQEOlyF2nq8uAiZBI8lRVQtM5Z6oCiI5ZLLMRE4OCmRasGNBMFZHLGU7MIrmUiDE7M7OciKnY6ZgdTJorTEpEwXNKeashFvIICQBEBKEATUaMJeoaTTnT0jdEiA4Ns5HfdDoOIxGDASApqOSMQEZmBszBNBY+iwkBaIIdteTNTQFFTAUHywRI5NCygpUQOyICA0ImYgMCVbXE5IpLrsQh0TR6mDx9ojmXSDJVJqdWlvdgOi5EaKbATFTCukAFmZm5BEcVOK0ghCJamnkNVET40gVvOplWL+o+RePFTRcBaOpxwzItNiIiKJFM5JxXEclS0CcEACzTmoKDXWhnCEQkIo64fICiACK7gGCSBUu5OABcSkXbPmvWnJNlzGkcdWQySAFFMmGoqpSHEBB4EBuNEnmoyDVVqLEiB7MApAPB6LTEJVGtuUKryTJsmZNK3VY3+igVK8NO0+zOaxrxyLSHuI0uUetCNW+GZaODbYc0HCEIa/boDCqqAonVvqa67uOm9Y1D6rbdnP2yRdR4slmH6CFJsuMQdpxvcu5G7S2P2/jIch7PMDQzoCzSk4FzlTNTMTVh0SHGbZ+z2LBJkMSTRxUEQ+aYJLfOKjeOKQR2NadiTxX1BITmGcZB65pUrar8GDtAFO2TWFYk5JohDv3JOgaek1I8HxJlqqkb+70dYVeN3X2uxC9mCcXN9hILcSbtKxCLMaWxFp576tN5jIkCVGTU1mMyAWxCK3HWVItAFYMLAqxKAARmm/O5iCMcAnKGPA7dGgCUDReLg+ONIfnTzf0KctBFTskZVNRmYDXuozSLilDy+vzwVgDa+Fm9OjuP3ZtddElTu1dXoARuRr4XTdYJ+ywjqzMuXYbKxAiazIwUrCMEQAWj0vYkU9YXGLLz3oyQKsOgJW8e1BAMVCFzuW3qYKaGQSEgV0VNLzcEhIJcm1gEMJOEZIiSc4Q8oimBUuQMSJVXEyBnAEx0seYxPRM+9/vfSo/orYdn/9H/+uc++8u/1lf1/gevHZ88fvzV4xx5DsNPfvqF9eP1008dvtdT78av3t6cP3oErv75P/e98+jfHYbnXqSju82ms5/8vu97/Y++9MpTu4/vPzobuzjgZz5y9fC6vXkbzk5Pbrz89De/cXT3wSZU7qkPH778Pd/zh/e/vB7TW+9+8979+zKkl19YXG3b9enDFz/10um2e+07t/MZPPejh998902X/U71wg996kdPxtc+94Uv/Zmf+8Wf/8U/91v/9B9/8INX37t924hOjldjf//lp5fNbHZwMD95893u5MgMxgEAZynDw+Mz0FEZY59ZaH9/rpjZOx8sJZl5mFEVUV5+6ep6WK/uHdcAB7u8U6378wfH4+r5739FTk83x0ejSTWXCvXh8Xf2Xzo4eOFD7z1a7e1cuXt8/Ojho1l15XTTD+JZa4Bqu47kDbNdY/7dv/V/P37zZBzs+//sD7z17vDWf/9rXz36jkHn9+/p6QZnw4/9/I/+7f/q//X0Sy+7OV+5tbe+M/7Gb9xupMmp++qX717/4Eyz9hs5ebRhrrN4xy7HdTBPRCyEkhsOGazimioes5ydnFazo7u9m82v/b2//yt/7a/+9OHBsqxVSbShH966f/LizWtVTc6haC7zZEI2UfJOJW7juFjMp2qOggxfrFfmnIEYFYgxjnE+a/OQSIcvfemrj9b8Yz/2yZzHpq7EjJEkm2S98+iEDWue3TxYPnh0yi4Y4rsP+yv7s8++9foPvfhyFvVuKt5VVWIEUUQz1GdfvN7szGA1tHUwNtBcORPtd5YHc+JH994a43HVNsls6LZMLtn26nzn9PThwcFOyqmt5sMYdcwew2pzupjPBIwwbtd9uQpE1XmnlpGd92Hst0TGnhEMBcgBOeTgRERNkZEdPn31+lvvvB5Yn3nq8P7rj11Muhln8zm2Ptfj2ep4Ma/fuP22qr91dWd9th1zJIbgneS8s7PsUzTzH3z5Y+NGzGB9vprXLRMkDoFxM2wZFeR8z2PsV76dE7MZarKclZ2bL3aabtSknpwZjH0PEugiBjLmrDkG5xxiGT7GnEUVCOoQxiF6ZgBEdlmAFYKvHLtlcsE58KGq6+481k01DIOBERcpOq+365QyXNytHOGsrvvYEQERg5pnZqIxJs3iHFfeJVPvXRqySvaEaLk8xS9s5IgGnggRDQLYvHX1pjuetzVMA9HMDJJHGaPEiAjd0JGTLEogdaiZHTJnkTF33hiEDej2/XcqJiAz0GxaBrjsXSd269qV733+2YW4YZ37mDdDV9UhhHq9WokaIlehGsfes+UswTvjupcIiJKlckFMgPJip6lbf+/xsYmtj8+7o03Oo3ADjhc7u0PCrWbVqKgxSoZzYxmGHoF9qIgwpWgihiAG0cwBBq7W3Vk7r0isCWHTbYaxr6sq5gxkLdNwdh7Yhzaw05ht7GJdV/244crvtEsGc+iQsB97RXAcxtgFV6NxigOoAgMSO/JgluLoqVYzLjA6ukuqaHveM4p3WAcytbr1gMZelzf2aeFHh9Jnkmz9iDEHZB1iUhoHjZAdY7PwJJApqzMAI0tICI4xACN4MEJShgqMh9F7nIHEnERZQCk4BGA03ziETIQU1TQbeVElwoqDJFXD4l9s50GZXWiVTBxh7cQ5Npsx7WZ7/uoLv/jn/qNrex9RYrPsQ606cuUcN9F1Vje+XagM+zey5GSGOWUG6Lfb7fm5Dqu83ZivmCxGQTWVEdDM+lJlCswI6B2XVhtAVAUku97qZkyyPV6LKF1VBhizBwrs0pidnyuNqqOKjjIsWodu+fhxy1a9+cZX652rI2Pe3VVUz0gg67GjnB1RFTxBP9uD3eu82sTje52XRjHu7VzdP9yb52az6c5OTt98fVvNZsvdJfnx7EzcZu/Odwfc0t6uHVzl+S6ZyWolm27Q5DRSisKeHKMSZjXP3HhgMu9dHiWbBE85Wyo3/6wqUGYpKmCmnhyhgSghoaqpFTs8GJgYEyGQlm4gsOCcqjhmMAUFZGQqzny0mBVBE6pATkZATTAikJzb2U69WO5ev8bXbzR17bJSMhWuw65WQpRMIZoKO2Twcyey4toBGdRzcyHMmgCCsVejxK5SXXhwgNbltDVLYv3GMlhEBR7GCcr7Y+pOgYTwUv65uE4uRZEnBNGlLPNEbLn8tws95/04UHGI2ftAnyeqzSXFM02HLlvVbHrZ9AZ2+QkXWhNc4kITw1Qe15OU8/8PNl0CTAiXKIxdsE7TVhWF6EIkg1KLBXr5QygCwMWWX1jLSE0uJDQowVUwNawTTNKFmimgAXJ3fv7alz7/8Paby/ns6Vu3Dm889f3f96m9w70Hd48c4sFuM/Nx3IwpJu+bw/3DvusT4OHy5vnZUdsQKW/HbrGzpzGrWhWaGCOkntDqWZsNeF5duXWwu7fz+PHDmtCx0ySOQCQRmwMWBE157Ic8RrAMVV053p4dHT98eLi3PD4+nu/Ws2Vjj0/Q3PZk4+XhQVvjuLpz55uAjBCyZCO+hLDMLna1XbgBLrKEzFRVsWTHqCGIYGlNK8stpeCLLhTIi4U5wNKwTjixOJfkmJnQ5UfaxdgJS89aSfMpJ+hFNpFzSGxTsrUZiJkSoiFMZiUABFKwEvszxXIDSimmQlTQQhGIZkIuow4DKx60XOJ+ShHbBREFAEgoqoVPmWIzyl9EgCQiU5wXlXYtRQITKXP7sieRSHKalEYiNQMu/BohQNk8QEUk51zJusmSp3T2yZmGxYmBaFmlJBOJyEXfGeSiaU5mQkSEwtGUYQRSWclEJM4pAiAxYQG+piAuQkBV4Uk0NCIunpLCgiIYOVbNpaQMi0kR2AHrRJIJeUwim36NmNF8uYKJGQsgI2KaRIWJxdQRqomaMjI7p2XVbgoYQhUpKFZB9hFL+i0Sc6H/VDKAARogKgAw6YWaDIhSMo+IDErBGQCic05yIiYml3NGRNEMJmCoqoBKxFkEkdSmi0BMy67HElJefM0loFAy+6pYdJFJVIp/06A4D8lUiCaFUaCo0JSzIKiBIjOhz6qlcBCZFAwADYSQVTKiA0SVhIApZSIkFDXBIpa5kiLPkvPFLZou9f5JKur7gYwIKCkZoQMFVdOYJQ9Z2UnlnMKZ5i2iprSWvLGcF4HIEzM5kizKyJ5M87Z1jPJ2aA9HUUUx9gKzKO3o67pezmd7lFnkfNaiaH+2ebzu486S+sFms73l8EiGs5w1cDXztZH2ut7mLQS/s2hcyC5wGqwfe/aBJHCyMaeT2FcJKfatrxjJ2MxZNjpbdaZ56RmJY9xGUJVcO6+QZ0FjHrNIGkbmhoViL9HMg1PGmMYepHEEntfbHkE98/n6zIcqJ0P28wriIClpzMrE49iBjsExGQA5NZKsAF4jdWJDHzw6obrbbMBMLDXYVq7GMWWWPp3s7KPx2M59ghMbI1FT+QLWjc7lPA59149JVptusdifL2rnnYrzRs7PgfZqnnkMSsE558lrHiiPDkdweXV0f1hvW9p3AGaQBTyb9MmpbaTzrmURA+c8IEjOkQ0CIjRcL3f7vD5fb+987ejFF3Z3lrPVEMl6AudSF/bcCJBFurgSHNPYYzUjNSYHVjFWhmOWfpI8FZV6R05VisU7q2BR7wGRKmRPGJBYTA0ALSsIA05irg6qSdNIZgobxBqlceTECExMo5hMdYOWAazUYmYRQFYFTQMDIAWgCoMxWMm4VUMGMNDLi+G5lz/8e7ffONjb/frnb1fh1nF68D/6hR/+m//Zbx7fjxXl67J/fnT/6OGKfWqu79y+e4xVs3/4dILqs199sz/brNjvH1xN9vov/+Fvfc/LT7/0/GIb+fe+fPvq9aaazd5588HxnbuSb711+2jv+jwJ7y6Xizb09x595dE/fWo2f/3xw52955/abwXc/nJ++527q75v3lythv7w2jJb/89/85/cbG70afa5r/zmtXk66t/91KevfeONf/GR566zHWtvOwt7+ZMf/NbvjasVB28np1/HL9+x83qxhydHPUOdx8EM61CZjgyOHTbtwfzm03F4vHu4d3hl8d6b39WxnzfeLa8kp8Hz4aLlMbbqYnf/+tMH/XGk4fGwfZyG00e3z6/dbOqKXZbh3iPd5IORTr8rPPNX9hanZ2c7ezu448ejs93FwTB60f7wqf3Ht9dvvfsgxuSq6nMPv/XqT/zZza+98+bb3c3roOM26bjpN7/627/1gedfvv+o22xOF/Prm8cCOptXBBgXyyrVIG2z2SqhXZmHzXp1eGW2e+3g7tvHIbQpbZJ01dLt79S7+9XjoxWHxcH15dnZdvkU3Xr1U5/5+IcN7jjAs36cN8EFwtE9dXAlDbmuwnrTt7MWxNSAHYuoZm3rCnmkkrgIUFoz8WJ0Qsiqtl51wQcByLn63Bdee+mpG5/88CuPz/o7d1bXrzaSk2cSdTlJTrLTzPZ3d/7b3/m9oXd1a0dH653d5ujorRdufPQTNz+wXY++8pJSqL2IMZBkJSJJ0aLUTJuz8z0xBj3Lq/3llRDwmec/8e7tN+/f/c580SatEYJqnxibqiWRoJYYwUhy1qyGUZXFh2v7yxgHzGm5mG9XE1U0UcdZxaIkYeRpkmBqiMwIoIS+bltVPl13Y+zTkELtXYUSvGsqEri6cwCJj06P9w4Wh/vXzk672G2Nq3Zx2HcJUhSlOszyeJZE2IDNHFEH2+wqqiuanGLoHec+I7s+bWqErOQojHETiJEw+IAggUKFPkHaaevtGI2w9mGbOmTq+k3DgRAL8u4cWzJD88SaxMQ8o5oWQ3hO2QRq9iKyrKrd5XzM2sc4a6rVVrKYIGQVFsPgoowK6hzbRZtpFjGDlNQ5NtXgXO2rLJZFy/CSiNTAByeqxORCECt5ghlBRdU7hwbOMOWMuB3Hvqmd5jWiA0JJZmjIBEhMHomzgg8LiB1pEMQUE3gJ3lscMtVRwnqzHlCUoEaN40ASs2oW47qqyf/4Zz79/O4VGPw45KOjDVcOQDl2kvokBkiqOQ59H7t23ubVKscxDkOow9ANWJuCScLZfGYmbme/gcV2i+N6ffNwlxyqRWAhBYtmWWehgRzP4qaZV9RHIAeIMacQKsn9fLm4sdfcuf+4O4mnZ5t6vti5dWixG8434Cp1Qy+ZwC0Ws4zQjePu4f5s2WIAzOAapx5wTnVb9Tp4M197UaCi6qGR44sZJCiAqqqiSFYzAu+5WfedJAEC73kcc7kKmBxp8fZnJSWgEDyyCfOQKXiaL4PISjslohyh34zbgWKn60TOUTi1+V612Kt8wG2MsV9ngT5lBU0mqeRhKqBoU1HMucTHiIUEqOjEYghY1c7VAUxmFAho9GErwpHOz0+dOmByFYOoITChWHKh8suZb1sHVrPf97g7zH76x/7Ssy/9YISWzaB1psZBgAzBQu25qcd+y8CWBQnJVUbOBOaI82Grsdvce4TbNeYuSR6TqIqm0UEiNCnmjrJ6z5RTZqIsmrLMAMAUutVbq643zfOd0WY2IlfIZIZb5Kw5xphzhBGG/cN9bp4be0958e5Xv5VZqqfVLXw25Rh8BVnQ2LBOOMPmOtW1zZeL/Z39zSqtzs9HPl+NmZvdxV4rm+7ee+dDd/6BF/nqNbe9s4WznWVV+5puXK+FhnanHhLt1zrfDJXEHnAjAIjekwAgYvDkUBkxMNZt3fcDocaYYxYEZKYy1k85ETsAjCkTUAgupoyABihiRVXhaWHfeFIOFA08GKKxw5yFmckhqDj0amrAcUiWNfiQc2IPiFCHmup657ln+PBQQ2g8NQaBcEDrBlmfj3lQTSIWGUjNAIhdVTEnS+rZN8zZVLRqG2FEFKfMwI4UGqrr2kYP52mMgkAipdyNL3Wci2n+Rbb0hb3rff6z99FB0w+mtGp90p9zqQgZXHrDYFIPEN4HA12QPJe5NJdvje+jfi65n0maeT+hdDGZn5SdC/sTXrjL4OKbXApaZZZuF1t7wY1NZNP7GCFDK932gBf+qpK9VH5RAcuLqBSTF8HQ7IJ7mjaAyhys1E6hqmZAIGIY4puvf+O73/xqy+Fjr3xktlgw1ThbrHrZ3n28v5wH5rTZdH1GV83bduh1GIaUMjFtVhuVTMRqVnkHaoKUMDuQ+aKZzZqYbYjZgXbbURPt7uy99tYb7915sHd47e5bj842Q+pzHwfwDgld8AhoIv12bTHVwTu07nz41v3HglxvhrE7N3IG2qfusDnYXVbv3X4IOK9mNTsrocHlAJV9iIZmhkSaAUDVTFRYStu7TDZ+BSLTiyCeohAB4cXRxonCmc4uLgcIcVLrDMxKrzkCEeZLom0KMqISYATAVqghInaO2BugmmGZYBswoaoSu/dRbFZSeFSzWgZkJi41X4QooheYmptOI3uC2BAAEasBgkqWonWBKTIxsj2JOZ/CYxAKdwYAppLxIi7DtKSeCSICkoooKFKRCACA0ERzeb0aGKBxwYrMtAAvJoiApTGyBOqYEDEQlSgnRC7eOWIq3skpH2oKP5quMSanWm76JVUTVHRCfgxFMjsHF9k3zCWWGaY0HJvaAEzLr2DJHUPAqThe0Jk6S2aowIyIoIPmDAOSgqhqBrDiFCvKFxE757VwRkYIpWa7rOUWR4srKLGpMDkE0yyllE5VQUBKp8mUyilMzqzkrFMRcS+OEZYDYaaOnRmYiqIYoklWBQIyMVMjYgAwsXIWEREBik4JUjQFaZlmnSyKE3tFjB6AJEVmb1ak55INhVRcgUiSMzMVxhHRCFFREblYOwEAkafjNemqBAQAqIoGCoTOe1NFQERDBC7J3CKFNVPNJebJOy8ik9PzUipKKdfsVSFFDX4u0InmmFPWMAuzBtwgSayTfITsvDOIAqCzJeccAR2wF3VRTVIy7FkBGTq5R875yq3WW+RbANfQamjmPJv7QWwYU46jrTp3th2sSlf7oeu7IYqMouv1sKyrxjnQ6LwLIahiztnMVCUNMaZthXtz2s9G7BczCzlHYN7kKJiXi8YDjWltUNe1prTtupGY3LxpXD1uh5jFln5nZ1dyNKDjs9RtdUiYksycA7iIRt4mBEzGli1wrutgyjFlSZZiJvaELqcYWi+SXPASU840RoqmjIRZII2S0SxI1iGWvmH1DRPZfLnEoUcIzrHlPOZt1cw0pto7Rum226ry2+25Z6cQV/0ZWBXqmQAOybJZWy8DuVEDV4scR7UVVu2YQ4KoFhmMcqe26bcn/SZxBW0ILjQi2QU/9L1AHvPau0Uy5xS9qxBBUjLpOVRA9vDs7OhsGM0wHPDdWXW/Y27Rqcpp7aA7PdfQWKhz36vLyFIhq26SIpgiBrEsYGqsyI6rEu1iAmCsJUHfkgETouPAgEacDcsjwASwLPAqABIYgYGapNTH1CG44JeGDbnKQBGUiAVKhBsRoKKKohYrQVqTc2KJ8mBuJikhOENldprVsyNyl+Oit99699XPXFseLtR1z37iuVt06zf/v78p5zBfLq89dbBe3fNLrm7t6NwPTsTj6fFmdviUpHD37oNXP/SUO16//Y3b165ek32LunNvtYkGT33i+556+cp3vvztr61Xf/EX/+qnPvJzf+Nv/G/uPHyQmDbbuD7eXLl2QNm/fXL/3/yzP/f53/r84Y1D3J3ldf5Tf/nP/8Yv/8s33n307NOHrjLvzr7/h56TM3nta3/0mR/+9O//+r9qnc0r/43XH129dvOTP/Yn/vkv/faVp29tH0DW2e7u4RtvrepZ29oovn5zWO1fOZzR7uo0z5zf2bvx6J1vaRZoPIAeP3x3tqi32x5kWzl39blnH955zFH6VZzVtZpKtAfHw8Gtw1OXbHdxckw1P79/9QMnp7dTTrE7v3Zr9uD2w3wmfrG/d+3Wyx98pYHNP/ulX6lqbJbWr+LD0/cAY7WAxyfnQ7b1aAf7y+3p9u1vfSGlzXe//MUrz7r5TX79D9e52vvoZ376W29+c75Mvtt2w9mHX/3k59+5uwzzZgfOjjeh5Twq7df10weVs+MHJ+DCw5U5hvra8vjxyc2rNyXj+eoI1qJ42vW9GuxfbepZdf3a1f35zuE826b6yu3HT9066DZDWwcfaMcVeSQ1swpM2DkzVSj5WUYEsxDKDOFiEHsxsCRARTIkRwrSziry9smPfzg4//p3322Wi1/9wy/+hT/5I1cXeHra1W1Y7szuPXrAHHATd57ZTc34woeubs63EeD5l1/oh+FkE6/tz09Xmya4umF0pUTdzOAic87ms5027UQ4DzYTZYv2zndfG8ezegeolZ1qd7vaOs87bgFgV/aW58crFQafu01HCLNFHbHeboeA86GPjKiUzabsTULMktkxMqlJWR3PIhPqL2IgOSVGP6a+ret23p7EjbI6548er8zIBZ7tHWruZtIww8OHZ9tus7dfLXZ2T88f93kMTb3uhioTu3boRgITgLHX+WwWo2QpQc+aUgxQE9Q9dMu9W2Tb7vFD6Deq2ddV8G7sezKrqmZnb8e2W0JsTAHYWRnAUQY0LPF/4shLWTqbsOGC4LJIHmJ2FCAEQhMxVG6bWb8VQ+zWKdSERVomqHwllpidyLSEGDV7JockJqbqiBCgboJHzJJEFRidD1mLaShWtWcDGbOaOgbJkYiJnHeVgiQRBXAuMKqYBF6oimEpDREBYMcpp5hG55x3NMQtE7Dz69QFzzlny8lMXGWR08PuIdFYBY5jJ5pD7ZOxiO20O3/iU59+IVwZzvPB3vJ885ADqemYxiwsoGKJjMa+H8ZeZDx6cHJyfLw5Pa3rqr56beYrz2bkk1kcMxuNQ45MV596+dFbr591Q6pCHXC1XSGGbLK7bFardb8+vXL9yvlms1jsn25Pswo5suCu7F1ZbXsOvLvT4ig7TZMJmNIm95Vz58crZgqhzcZR0v7O3FfBCNhzt+kJAU1jynuHBzGOIQSXQAWzZgUNjtWUOEhOCATsiC3L6ADVMrFPkofxLJsaYnAECOynhwEDlNp5UwOgNFqOqVmGuBokeE48muV1Sh135ylmMOGUoxGgBwFI6NRo2yUUJe/Qas2KigQKIEiARJotKnSlh8HXUwEwoYE6ZlAZe7FQITvROF/Om/nCK42n57t7aIpDtogAKRGS5DQDHuPQ8sJ5njlcAB4w/fAP/uSHPvaziVpIwmiqKaVkIgySc2ZAlegQEJ0x+Sq4qhaZ1vQ5uBxbG7zVZxWPUbrYDwban68JxXmfDbIYq8ZhEEJwKpLZEZCg2JUW/TUvD9bvrO6vdZv4GYdtTClUzKBgBOa9I0dbF/LpWrtmf282j8fi+jD23bkMsxfazjYzX1lLpDraODjZuVrvPHel1Zpwl7AK2849vJN1Y7B13qH43YXrjvDRPbc+o1nt4ma+6TE0vq7BKgV051tKFgIBsiEWusMrSgYsq8UE4NgrQFYsdtISXEIIiqAiTCwl8kNNzRxTzqaWcIIbptZnRHSEjGgmjpyoEDrD4kYCszKHNFNEsxwTgLLDBh04IQZyxpVD4CpU5L3MZn1g5BQdeRMxW0k+iWmVdNNlFKh8AyCDjESBPKaYsKq5cc2yDkQe1PmsGNAJq5PYOZ9R/PhwTKNoHBkCktV14KZOSS51nAuVBi40mYsmeAOEJ4xDEZVwwnAmy84FE/TH3mv6D09ApCma+GJaPvnCLufp78OSpl/CaV3wUhNCe+IwgyfvfOEUe8IfXehIl/jSBB2VGSxMLEohNS7AoifbNX3Qk/TlSYmYIJbJ1Xb544sYZrxUlKAsNlkhJsAUTEDQUcX43uuvv/blLwz9sD9b3Lz17KJp9/b3bt66Od9dBEc+8Kyt66rZnK88ATGLjqFhJGyqJqckMbKr2btqVgEYag5NqOoGxPIQT87XOec4JCSWmIbVdmdvwS19+7vf+pGf+oHZandzduakZ+Htpg8BGEFzJqQq+GTiyM1mwa+HoLhKut2s5z5oTuBouXN1MwqN42L/MImZSZSRQj1N1EEv9+DkUzQTVWQtaT4qKQOQcwZAyAaAwAQMOMXrEiIgEBJNFsML4gsML1RFNDAkUzWg8rmTQ00BCCdDVsmngeL/AgBE8sSOJrJGykk9aaMl2fd9uqSpTClCBgCgJTfyyVlEEz1WEBwivRgeIpQ8aUBCcpPAZIhiUgQvNcVJAVNTUzUzYWKYQBIoP6MpjqN4GHW6PBDJoYoAURE7wECylNURQAYU0klamgL1i8bJ5fVcnHpl2ZOKkqZgACYCAEgMCJK1CF45Jypp41Yq3gzKasFk3Z32rwEglSgvUJFLNo8c25TOU/KhyMBMBAqJU5KkEMmQ0QpcjYAipsCrGEWh5IhPmiNYluxKx5llcs5UgaDQRjlLiVBzzmlRIlXMlLEg5DYdbsTpWSxCaKICAFIClMo5AwBmxK7QcEwoRRcDVBFkzFkAodjEjFFNfeXU0MTIO0XAqX3vwj9ZDgIxaMkXJxGxya2J5BHQGNlU1Caznmkm4iwZVMqGiU3tjROXrcaOAOnSauPYlRJPyaKWEQ1ACoOnJgioJkVxU52yp4rRLIte3L314l41/ZmkIqZKsomIocsigEF1qH3dSUTtLSMT5JxExaHCmCGOjFZcGKNkkZglk0PSVAeoHFfsTCCBrmWLgWO/t6yeqp0fCFNcqYyiJzQOg2wwwO7OErJLiciRgG7ExbC3hnY0Y4uzttqByqlpitt+tFxJMkcMErfbY/XO1fO6fSGPa3Y6xI7IwTiPwzYZ1NxC7kDUmbbO1ayMUNXu5Nwen/Qn6+2NK7uz2eLB8cl2EACHwHlSgA0y1M1MRGPGnn1kOx8SV05BTVLOWHlSwbraDaFpmppJ1+uTIa+2YyLm2rtsAwVWEDIRM64t1C5F8Q0SJraBgqmv2t0bjKGiWcUkOqLENPaONMY+ghs7YHWL+VXialQCMlQdY+8DqXR9Um/qqSI/z5qGEUHIUIILELdpPHWUkAAw1O3MmNrQevbDuGLD3bBk5IipUwuqZMSGzazOZO3MpeO1uNnjc1ryzYdnopvNKy/u79YDcBiTDNuNZs2K7cITetCoqTOsESlbb1AZs2GLEBhrhDmBoREBqJXA+M5sMCDPjSEh1UTerJBGGWm6vZIxGGR1VpIeqEKcgUpKjA4ZmIHFkgoShzIAIULQZKBEOQkweEkCoN4rqGnOzAoqPGnzUVVQJt309OGxM7t77/TFD93C9/rNybuLndbiuGxcXJ0+vLu6+uKS9uvbDx4eHlypduB/+HP/1u/+5rdf+eBHnpW78/rOy9fxrS/fe+bpp4c7pznnVR8HDq6p4kiE1fkx//pvfDbeHZf14dWW885elx4kSeensL8zmy/4l3/zd5bijt9879YrL717exXaoesd9MO8xQ993yd+51f/6dnbpxKdduNnf/ufHzxz/bn96/e64bmXb3refOHL7+0+94GHj7rh5L25q+p2tu36n/rEDw7b7/zB5978hX/rp//gs9+cSzV226YNp/ER7+/wejho9+49ftiGZtyMkOula46PVvtX3M7Oria3P9/LkALPl3Pabh+ND119SNy2CvVbX3lvt65jV514Vy/3wrXvAXpjk7dXDg++8Htvvv7Fux/54Ku7uy+uxu34qBtG6XFNEvdmy2ruT+9uMdOjYRUCNBTjw29fuzlbU7z97nDYPGPavvOvvi5pa7eqvaq52u7IwweHrNyfnW5jmMPqfAxVvX1wTnVo9m8MDe/teNG8Wdn8yuLa7uzovU1bE1YLdfU4GGYnPeczC6i67T77jd/8vmf/zMH8xi/+9b/11//9n/mLP/79appFS7QkKDlPF0so03LkRbFpcdvbhWceLiD0Ei9Hs1kDaDkLI7WNgCVHybv4H/47P/aVL7/90g98IMbYthWYHezvDQkc8wvP3Lh+EDbrM8vmrDrfps+9+d2ru0/NWlksmnlNooIC5tgAS20uIYDnLEiQPMk8BJfzZnVu2bmKco0sbuwVSiyXqmG0EMQkKviAlaskZUm2HePu4Y3Kkw8emE6Pz7PKxSC3oP4AqFDqMCwTmEhhXw0JfR04ONkYucp5jmk7n4WqDtvtSJnBqgcnp87bbLbbnx/lGGeLQHF76wo/fJD9ounGYbHbJJP1+ryp5t77nAfKVvvAFPqzc2VezPa7jMQeokqWbtjMa9rZPUCljE5z5ioEV0HK6gG9c1VIMQJQ07TjOPjgjVAMgdERbZJAym0bFCDnzORUNGdxyKxcO6dmMUvjvUPnHHvnmODRuhuYGs8Bgg9OAMTAeYeAznunikSjKKq52m36XtW8C5rzrHFlnhBFsqrzCllJtQ7eAInQhzBV8xKLqSgkQ2JnFh2TEZBjUckxmxoBo6Fjp5pLcKJvAoKaaUDuxy0HIFYzbxqUlSiklM/Wx55ELAM68r7PoyErAnH96vOvvnrzpX49Bk8nJ6vT83O1jAgc2rEfQM25IFkNckx5vV4/fvx4s15LGrdjR8G98PQzCY3UJFvqx6b2ZND3Q5jpCy++sL7/7ma9rXZrqr2qWhq7Lnfbvu9y04sD3HQdY3V45Vov6d7ZWUKuDg79bjOeHM0P6uOHD6pqwVhVdVM7arq0f+Xazmxx7/iu8yU5wSrG7nRTNe36fDP0w3xRjzF77wN7jUmTmCHIND7JMhYoP5sysYADscCNoAKCgPjgVSRr1njZVARxFA+qjtizDDlmYOa1xHnTelYXNW1k3EqKNPTcJ8kpKYh3QF6qeYutx5aLlQKAjNFhykoq2SEbTSV9KSpnFMizJgQPmhWQcpY6OM8hZQBxahBms+zrJAyEi52ZUs7ZanR9FUxRh6jjqA1T5SOCpLGpGhzkYH79B3/853w9ExXnGFTHcSOCDAzETV3nsWPHBuo4ICI6BxwMtaqCqvqcEgbeYQuB5Ny2QpxUAdoZmLhQB+fJoQPQpAI4DL1oBiSVjIaarDk9HhnG2yeyPUlW5eqA5jtDTB6ACDElp0zxLmzWzlWPR0u7dmtO/vqN9bfvm1zj2c0qdNuzNUA3YMw1VPutBBwiz6o5hYVksormV653m3ub+/fhtKNe1ycgvaVI4zkMDQRwliybDlkhyWJZKeD5IKSmW+t7TAlNNQFkE4/oCLPIACCF5TNLoilNS9855ZSUJ3eZMVNJF4VpRgCOOYlyCQspuViqjohQmdnUsoARiFkUIabKAVhGAOeYnTckpwoYcNY0O021u0AB3ebQtNINizQ4z+shHnWSAFdD3x9v5Ph8eHB/XjVU1QroRYn+f2T96a9tSXreib1DRKxhT2e8c94cK6eaq0gWVSJZbElUixrcsiXLLalpG7BkN9z+YMBAf2n/BTZswwYMG2204UZD3TZFdaspqSmJU1GcimRNOVRl5Zx3ns60hzVFxPu+/rD2vpmybwIXN8/ZZ521115DxBPP83tIUwYH5eGs3C/qQCWhJy9EKVvCQN4J+V43gw5+OsurQcuu8jW7okk9OBd4S+zSnZcAd5rMbs6zy2Op7Lw8Ox8QjK8aKSe2lZE+9fIgwIjG3f0bngKPPoVJbyWbEar9WauQ7ZJgTx/CT61FsMOywG4Z5zMGptHqMd6TP/Nwf2pMgt038amzaKcV2Fbw2v7mnQVp+4Zw623ZYXV2Pzsaj5BsbJcCBcOx2wUMERRUVIGYwLXLk+/+yR80Ty5qV7z86msvPneTqLzywrXDq5dt3fTrtWOe1UXfNBePnvRDLEIxxH46q4uyCo4E8mRS9QihKNHIVJDVgCDL6vys75vgQ0yprKblpESw6azONuxd2Z9N/O2P3+uXX7NMleP5/vQUk5rlGPthyEMqqtLMlOhs1aybzd7+ZM5uc/eiCKH0VNTl2cVq6KTyi+OjZx7fvzV0beEqVATjrZIAtDOGEY4OmJG7IiqanYlKJAQTAKbxMyFk2LaLO0BCYtzaOQzADA1MYPtp7kxHiIBouMtzAYApmAIyIRkyEW+9HUYAaIhMjl3YiScCSLqlUAmhG69m3BG7Ru0DTAlx6/xRG1vSgJjZqepIWIKtfRjR1CybAjne+chGUjXuXDZARCP0WjVvLVhb0xGNHPTxWiJAIBprOnbt9eOBJZWEhAQERsykagbKjhBZNEsWAHXsVXUMJOmIFR9ZY1uVB8yUicdknqrSuJ/E20vCRhb4Nu5naoJiZkxuFHqIaEzPGdhYdDaWA5ro+NntOGLbjxdMVfLovEIAImdopDRq6AbGzjGzbdVWMCMANgMwJHRjVDCnRIgOPRGOCSgDRCBJmYm2xcTENhbwjXUwzKa4u5q3GcVty5sqAIzxMdpeoaRmJsLstncPNSAYy+lUlR0TMSBsyUKmPhSqmZgAWUXG0F5OrQ+VmgAgkwME02zjXRMAcTSbIgAxkajkHL0PCKgwIpB2lXZb7xUAIpE3MM0ZyUQyASko2FhIRwCgOY4+KSAeyzefesRGcppKHmFZxAgGaFv4uopsL0JmIhIxAKP/H1ZRzlQ67wpEseDqIQ2OPBhOzIXCVAcqpgiTPLSiSUQVikFS1+Rk4AMCGDN4Ao8wq8rAk4BVjh2GTcpQB5q4/YJYWFQTROi7JVPcX8yahydVPXGMamnDfbu6kNTGNKiCo2FeFz6EWV0VroztesAUtUjmCbgucNWcE+MQ1cGsoKEww2hMrghTGQwcWrZN1w+5L8kccN/2UXLhQo5dcF4zp8HWZy2TOrGCLEtGxMK7IQlsux5FBQoX0pC7nMHZsMlFQYVTJlLFMhSL/YOq2gNzaG0DawKblzikaClXHiYzblsdg26FL6rKuVnIw0Apr09PZvMpV1ly40suK+eSsKHnIADOQ4oXCFROjzkOltsieO1aywopoenyXILTbNoND2bVAkwzVt5Xqqa5Z54zYVX6KZTNFCo3cchkAhKH3Jpmj9uCMSHrR0dOzAVBFuiSnQxNK+wqf62uluvHPbjp3qRpl4eFI8GY/Hy634OctOsQCiKvhkkB8ppCn3Aoq0uiwbkSAYRAYbt8wohindkg2iJEY4cuMDmGYGZjA4TYqLczgSGygZg5Q0fekyJDhtwl64E0ycBI439AHoFUJSkgMDOborlgyQGySI5962BDhgAJxYMLBsDOqxHsMskOqF9FH8qjcu9qUf/O+z++9oI/2Cvuffzez/+lF5oL6s7SzavzD+7ckpWrp/Cd3/n1fD705zybYXO/EZf7IawaOX10phSP9yfN2SO3OR02+5fKumNsL9b/5F/981/65jevHR7J7Nrs8jPtRfPVV174/rd/vTiqu/Lyo/cffvMbr/3T3/jTS3vHb//RH1RFqOrq9t0z/KPvTSA8fO+sKvaD7n18+95f/qW//uM//d47H90ZYnpGj2/U040F5U3GtMnDv/Otf++N77z9ne9+Z+NOy7q8/aP3+5PVbBKeubQ/KN/9+NGiPm5i01DscuRQXb58+f7jtlkN3lfdumGnk1lVVYcX5/ett+vXD27c3zsd2u489qfpycN3b0yPzpqVn1Vtu45P1j/49hvhUpXOlxbTXqV+kHfeeWtyEBLK0KZkcvV6zSs8edKmC/MKqUuh4kDcna2fq6+5w+MfPjm/NJ3rveX56T3zVpTkmnLv6qWTx6s7j0+lCzUvjPLs2Kd1OjlZuaMSWmm6dUEVteCNh7Zf3XqY0GkLVJaLo3kJLm7SYrGYLS53F5Hr+vUvvq71tL52xVbdz7z0lV987YvK2SGNRIOcTRQp65gNRtiuYOxGswhqQLQd1MKu5wRopAyMi1dMnEwnMxeH9MyzR86ld7773QPbv3i8AcspRlWPqsEcxG51/869N5fPf+GlybSuAR4+Wf/8F18XYkBdbTqwsi55W2ZtyowEIBm68yibHksSADRoh4YqD7mIQ7ScYrPpNxazzacaWJ3nsuDgTSiVVR0zWBZJmjOT+TxWYuXkmEKxfRaMy00pR7SxzYFUEQkdFyKRPI+r394jOYqrzYApxew9oDqP8pUvvPDd77+H00mXh3v3HyyCNE1bU1H4yScfPSYlCJQ78wV7h5OqAmVyDi06H7qYRc0zsWEfB0ZnaqHQS64MhSOz48vX795/gmQHs7LrGo8+qhAzGRNxWRStDEOMMSYwrasaVUvnAMEndYA2isIKznvbecQUhZQsq5JQWTE4iZkDAth5sypCCUImSgZqGpg8ctcNzrEjHlIsvZMuqeY8+iQRiuAQMI+rRWCAmHIOxN77+XyeNTciBEhgklIouQjl6MQWgYrZG6ecdOxhHWPtgGaYpAdAzQnEJKWiCMOQ1XRSTkSyAzJRjUoBvXdnq2Ufe/IQXDCDFDOhM+Gc8k+9+vrrN6/33caAyun8/q0HYqrjgmqKmqLm7EMhaVitV31Oy/Vms9qsTs+DQ0F72G2uz2ZFUWfNDicxZi44Q57UIabescznezHHTdsnkEmBOCwJJ5mlnNc5Z3M0WwTprWtbqv0zV46GGIdNL2wuqUi6fH2finqz2rBQRDy8eYCelrKujsqjw+mTB6cusiIs9mcgvqV2VlVdN8xCMcjgnO/ixb4/JKSyrLvhPJQTAx6tIQRiWSFDUswmklOUXJaTvmkJkBxHszyk8SogQjU3DMliDsTj3GGyV7kCoTA3cSLKiSULWS4JI6EgAwgHmux5LBHZLJmKDdm0Exws99r3A/M4nlMgroo6pY68y5o92v40MAUx81UpGLRYiIrEHr1PwMLOMSBBBAIHwhiOZ0U9wS7ach2XSyKqqsAMM9YD5/7qX/k79WS/TxmRJacs2blQhIDIOQ9MRuhyHKgovPOELmchdM4BGlsSNheHYTZZqHeSyBgjejN0pRWTQL5suqEoC8gRBBSgmAzINgwJA5F5E8tqh3165UrKD0+G9Z0hbqJ7nspJjFKyZ3NmaEq5fbI+u7eYLvJauuN5jEGlQJ4lCuC9mzjMWkyq3laLeVWW6ofenHRtn7TQdsAhQ3KxxeXJihP2HcfByDEyGGpSUaN+A0zUmWxWUSlHZMignfSbBEaqSmxmwEwEkFICBAquV0lRIKkDTDHLYCbb/AQxqYx0DzBVQ3COEcBECYCRRnEAFQxQFTMoE+aseZdzMSZRzGJMFArnnbPg/aQuvRPyulhQwVQEGAZpNutlw+Vpx1FqbLzTUKYkF/ce0v2zoo2h6cW3WgSqJo6JwahCN/XzS2U1q1jV4ehuyOjKQC6miwGsjRnQQIbm7JylDtO5co/ssvJ0Wu+kIt3ZhT6jyWz/IO7YPE91me3jEndZsVFp2lltDLdBHdwqKfrU3fP0cfpU/tnJN/qpp2n3i+1psmzrVAF4KlN9moz7FHk0TsE/Ixx9KhF9xg00Qiufhtn+LV/S7pW7WeanSpJt/x61jO27H6u3tvoX7lwHo2Qkpnmso2KKm80nb//owSfvQJb9xdGNG8/cuHl9Pqn3jo858Mm9+/OyBnY5wcmjpWYh5Pl8biA+TKuyRKKyKEIRNqsNmYj0kjV2sagCohRFSQmm0/2irhFAxAAhx36zaVwRitIt9ifvvPuTk02zd3jYPL6ovV83NPRdFmXvCNDGklDm1UVTIhWZGsTCyrbvY5L9oyPg+R4W9x6152dNUVQpJfJe06i00HaWi2PT+1MNcNv8p6ojVmZrQkEiYkRGZMJtOo+Ix2TTOKMd/2zFOUDcWrp0/JtxV4WDCEhPTxsaZzoGo4FEVAGJ2DGN9MI8OmsADJ4G2Ua/jOnTE24Htt6JmIygQMC7U9QQdzwyhayypW3TmMFEZhYdAdggKszsHKuCmJqpIaoZgDE5HPvXxs3uzikzUDNCNlXaAmbARtwPkYmIZodBxiQjgoHiKFva2NE7VmgJERKSimyVOxFE28J3RnHbcNewPvqicKehgooaGLkRFG1mJqpgRs4Ro6qYKnqnstV4kBENVATHgJs+TQvSLpa5ZUdaTsgIZiJCQElzJjXaDolcAAcmffTko4Ap6MgrRwdb8w8ysZoYABAKGI8ETUTQMfYlo2HJEA1H0JLi2GWEjEiSEzk21VGaFEkIo+drtCyMBsZRMlNTJeZxmznHUVxOQxrIHBNZVniKQwIfSgAjZtDx5qFIqGPVmhgSykhHMhJTIEQj1TFkR8hoBlkyMYlkYs/EOSdEUs0GgORUMvPYpmvkvYoiGSHbqCtaHklehAxEaiamBkrIwEi2pWsjoNrOGUmEhJrzTqGnpzfA7fRgWs1RJHgCW5c8OCq6mEquVQW0IQt95PF0VIN1O5i5mPtZBeNUaVZDwUAGgaDwCajLQy9WEYTKDap+nU7NvKbKfOHIMSNxFXN2BcehKwonbEnbVttl2yQB0sG5OC8P2QfKlhKCTqui7nStOmSNbddNSyKXdGgxxeb0pDNfFqWZ5XJTuDIE1zYXKceYGi4JAXygNjVJ+9w3RHVZTTH5JJKSVrOqyEqZ8hCjJUMrSh6SxhzRTGEsNDSC4IlQwQyXm7ODvcOyDHU9mVfTTZN9Oasn03V7SgYSY+GLsmBCBUs+UD0pmX3sMrL37GWAbNAPkfEMpW2bE7+3IV+GUJILTYwJOjTnDDhLQB8NuriWnOPQo2ICSJ7bNjqQokTQKLGNOqANkLkKQUydLyq/iMsnzEVV1oPApKgQ06ZZBp6pWNRGRSo/Kxxr7tkzMK+JWgtr4Y1y2uSYT31Bp22el/PFbP87P3nvYOGnE/cXv/LFdz6+l2ARxKWsAObCFKwA9YX3kBxSUEXEwmGh0hMlg0Elm0bNHVoE0JL2SKMjBctIThABvGUwEwI07A0EQcn57dWVM2gUHVQzckAEAXRAaoiaDRCQUMcnczaJgCAjQE8F+pWq+dySVkYhuULRQSjBnO0a0C7d3L9xON+vi+XZrR/cVj46hNoOpuXhweHnXpt+549vYZ9OPuZriytqUTvdpIvUD7d+8mfspqnnRVUfTF5PZ/HS/s3vP/jJcy//TO2qx48eS9+pKjFHCq/8pV/4+re+9dv/9J9/8OZbbYIXXn7l3sdvCuiwSV/96Z/7V588eOvu/Ze+/oWp2Ks3n/nhn73VLuX0SbdXuemUorqjF54fOgyXVn/8e9/OiQCNyb3z3vnf+/f/1h//SYPdn0zqJvfN47e//fj9j1dBf/bvfOv+9z9ZPXnyN/77v/TGt99WKZou7U2fnZfVkwcfHS6mB3t7mM3SpiY1ICpDm+TVl6/cu78a4kVR8MRVlNrKDaHJicPFk/Vicgx1eef+vRkvPCBTHNJmWl+Vi4f37zFB3WXLnqQMaDmg0wS+MHTBeis5MEs59ZhtOBtyDw/jMtOqOsZv/uw3f+ONXycMmtGTv1g2q4nBZLE86+oCY9nnRWUvT6sznnw8HEwmi6J+8ME9KKbLJRZ1ebFaXbtydLaODgg6jAmEkxdgoBYHdbXNqtnV+R/+l7/+N//8M4839u//5Z8+2Cu3QwAbKQPwyYPz0tHNqwc0Zsc/JWQaPAVSbikF4+LM0xGEjUWkkrOqZsHg+e0f3alnRzU9/8MfPPi9D3/0v/qVr603LYgq2nfe+tGXX37phVe/8L3fvHNye+///bu/+ed++tVV428cUo+bt+599AuvfR4GlyAVBXdDIkJAim30oVQRiKlaFLO948ersyL42d7s4f2GuLx+bdF1F7ngew8fltMDJEkaTxtJyDm2GitPhB7LwnEoC4yFF3LuYtkE78rKPx2Kj1cSEbFzOaexxwHBmDhFITSNkvtICiWHg8nBY1wWruiWcVoUm9WFL7go3Cq2fcqLgv203ESBPs7rWY7WR4nZmARMAnsA6vvWGASpTzGg965Ug6brq6La26u75vGzB3v375zEbA/TXQDHBO3QMFMITozNoAh+SFEBveNs2y5OMQNAESVGTwg5GwMYoFpMaTSQq0GUVLrSOWegOedB1SPHLEManCNgAwImX4Qq9p0IBMZsRoBMxETkWEsUkyw6zncCcd8NofBgFpwbRIlYUvZF7TzFVhgxx8E52q5jamZiZjDQJBGdU1VRdY7RoWQRVRH1zomIqahJTsIOFDBJUjI1DS5EAGRKOQ3ar/NaUcjAMqgpKEznB23TvXDjmS+9+pJp7HpB49ONJI0CklPOIo55LE1PMfVD33XNJg3L84th0zAoKuUsVQgXZyfPfe5zOVuMGOqqay6y0+BLEYip954Pr15rVquPPvnooA6T4CQP7eaiLCaz+eRxtyynZWMJKqpLO9ifPrr7xDloYj587trq5LHEzkMc4umkuFzM51AWzjD3aVDdpLaYl8g0dAMWhrlP0kxdTeRJhUrObLO9I0geJCdJFApFMFCR5NBlkZIdF77tWiJKZqYmSdB0GDoOfguLGCeTDkmMGQHJe5c1V/NaarL9EOZeC+RZoecxnfeUg0UwywIcqiIUqGaUYsG1L/2QTaLrVC3lrMqly5oMQC0RhYF6CSCaGaAmLTmSi65wrnJdqLpZSSEU6HSTyNSVRUotKVeHJKLVpNTFHgFjGsoJJLTkPLOrgzm2L33hm88+95UkxEzMPkvLAM4VWRVJjCHlKEPjyPvAZiYg5D07R8ySk0hmCuQceQ9WbpoLV81n0z2gKmUzGbJBWbMvShTNfefJEEQluUrRl6kb1u1yevlZnlxbHDxw9fvwwb27zR2hbHBNq0XOxp76YV1ype6yL4duc6HZ3WtzUc+L6zwcz3U67zFWlcqmnfpYaArdMhDUk1L63sICMaMNdz+5tzm/aE+6HCn1kRFE1REAmeVUFUQ+YwptB6wlrgFVkIVBYqciBihqInks2UAFQYC0/V/LY4+xQMwmhgaoJmbAaIqgpo7JDEFVRImQmMmMaXT3IyGJqQIgumyQEdC5LDraVDVLJmQPXDkFD5MFXTp2kxLBRfYOxUNWw6pw4pzE/vROCxNOqAqQmw0vO15Gy4hgYsrOE1ifO1dCfVAWhxXOKWHMSJEoSp+xY2LTBCYSB0eArLXE2Aihi10rU+b5vq+rsJNlJOeRH/wZbAs87ZbHT3Nj9qkC81SQ2dGEtoLJ+JTcPSrxaYjIPtV1nvKKxq996gjazdE/jdDALi/06ct3XuBdR9a4Z7rLi48b3mlZT41C437YUxr2Z41N26/setlh28VlOroM4NM3vXVT7VSnrZNiW6oFuyJ2EbSRdZtF73784YN3fiTLzeH+0Y2b1y89/8z1526g5MVsSorL89OJ9wG1kx7BFYXjKqSkqFBPKlcVYJSH3Ky7i4vGIYKqxoGDny+O2XukJqY4ne2nZCkJMeSU1XJOsWlayX1aN89evdSt73/83lvf+sZX7pye/uRitT5rVTJ6Dyazym/Wy6bpF/Pjw/mlNAyp76MlxNIRziaFaL+3VyUIR42ktjm+fNQOK3CGooIyVsSPQgcQ6vgwHmNlJjDalkfAM/ttFgx57C8HMEBCZtj1vo/mra1XaEylbSVMQyMzo7GFHbbZMzNAIjLafoMQiWxbFk7jc5vAzMS2YcJRGhgDTAaIKvo0kr89W56ebWZoQETjmyIkkwQEO0+TMCLCNjCnqqqGDhCBmLayl9HOpTdqHciIYy87EY9S1VhghogiidB5x2NeCUSAGVSJmNiJJERy6E0NAWlMrqkykW3VLtuewwZAqCaEhGgyepRGorOOdhUiZN0FSceqvvFYIaASAWzzl86xqNLodTJQEzQgdjqWmukoBxEYGuxgU4QIqCLj2j4iqxrS2OrrzASRx7cdCp8lERIiMHkln6Tzbgt6UJCxKMAARBTMkBjN0MyFIqcEACYZiXB7RzBmLyKisj0xYMSks5lqHtg55xmAkFhzEtVRE9ryiWA0H23ZQ2MWEoBURU2YeSvEOFy17d5sxlyoiiMyUzVEg3HqB4iSE9Do5yIwIOJxpXlkY+EYyiVnpoR+1OGZUXHEtTDkLCSmoJaI2QwIkV2RUzQU57xIBjMCh0Q5Z3ZuvFhEFEd7KIIpEDEBjWBNxW2tHgGKCpGz0exIbCajPW4bnH0qFXmeE9PQXoSqJuKSD4K7XBV1u7nbNo+zJSIyjKwxaUukWVQRGMEFON7DsnCe/RATAvWqzswc5kyQiBy0GbImbTfohSgBOtKV4+nFep3FxHwzWLShz9GX5AEqIed5Pq+zkRqIAguicRRBJs3R0BCdgsYBEJjSoIaeGSRm05Pz9XQ6xQ4sZ9boVfb83ny2f3Z+azHdR4aOkNH7wH3qhwjmUAsW9E1OVaBRgs45MbIiq+RB8qXj6213PmyyE7MkIjqvZ3UdZrMJGg0ph8IpRDMNvkhZp3U9Kcs0xByxT+wd1WUJmbhAVxZOQVEg6Wa9Hjb9/p6VFbXrJzSfCw5gpTkWDYRlURqnPAzDut90KmUxAc9d15y3nRRcsqM8uAAi2vVrruZJ07SamuYIGcw4nR8f8uO7GyGxEoecDIa6XLBMhdj7epAWwEnfl576GE8ILqINEaeTSVUX2LXeYVnSfD5J2T/ZbIr9I6z08ebWt7/3my/c+Nn12g1dR7PSEQM7JBMD1HlSFFQyRXYWATGldKHSIvWSW+SU8jK4yeipBJmiEQADg2gyUANv5gkxa2YChMw2aOry0GhKOY3dTAUhi8n2XkwGpqaGBjkPRJQyJEHAyqhQG0wyqUrqXCjHQR77AOQAA+6kosPLk3/v7/xs0cW339u8fevDv/t3v/Xkw+/+3m+9/5VvvPYvvv296eG+RGgjXLlx9cMPP+nbbv+gjIiVD92guYdnX7geMty7/Q7u+Ws3Xrn7ZJ2b9eBsXriqwtev33jzu28P7fS3vv3Bg7Y4EauK8rmvfOONNz/AS+nqs/W/+PXfsVTX4err3/y5f/Z/+3+8OJs1LQ5VvTg+Pm1Ov/atrwzZ+jb86Vv/+n/4P/qZf/kvvnPl5qu6OfPV7Pnnv9BPX3rpG/XRlef/1T/+P1y7evmTB7fDLNy88dyj95dJq/Lm9XY+sRAk2uLKXnMqTXt6eOVKz+yKgE76NksTE+js8AByJ81AQz559OTzr794evrksJpcvX7UxNNyWq03q1lVrLV/8RtfKGpe3n7oI1y059/6+W/88T/7yYIXOfPabH7kjp4v7985YUlXFjgs+7QJZpj7ZjqrPU+ePHiMBurcw76ZHtbO6b/8nX+qDrFHif0rr9348cNPLpaxrOsh1JOJ3Tp98uLnvvLWD+8eTMuqXpw/uli5Tj3WB7PThxeToF/82dduvfejl557/vHt04PD6mKQxP0v/tI3fu/XvnvpmRf/9q/8w//0//yf/skf//gf/Mpf/83f/e/+/t/7e89Nr3Z5jNUzQBozzjcv7Z0tN6YjiGI73NyZh0BNR6Ti0zIVe1qDtgMAEjMTrs42pxdtWS3Oljxx5eWr0y98+Zm7J41nXRQFa371uWeq2mfHuOcaWP87v/z1GzevPDnd/Mn7P3n2xf2feu21aO5Xv/Ojv/rTL19ldcE5Ns3CDoHx4ZPT6Yx97YqqwJWJ2qpZTsuAWVbLk4yxntSXLs0biaRcemdoi6P94mIdhxQcOKUyuKFvpGvbC7fedFxPNm1v/RboqwKSMxLmNIjIuLoFRKNBF50DEGIuJzUHPr/wn5w+mO1NTKHvpNjfD7U1H9zbn1W99S89d+n89JGbha6T43K6V+x9ePe+D2FWTHOOSGyaVKLlwfsqIzJXs6pu2zbmPJtN+5hW7RqQ7z66yBkkoWcDHFf2HTADIBFLTKjgidpukJjIB2J2LhgYITnnJOcRSOCIAMCyimlScUyMULkCkIaYguOxiMLXhQ/cDIlRHPk+xvl0hgRtTMhqYMzkdmtxlQ/rfsOOy0DZjLYqmyN2oOoY2i4yYMWOENCAx5OFWQ0dASKIZMcskhDBuWCqRMjMBiaaFJQoCCAbmSZkcI59COMgljkQgIhmkqEfiEtxfNKd5mAEpDEJAzlH6LTHS5NLP/e1bzjNKQ8iKGppLf0wiAqAJY3OlQAGaEPqoqamW5+dLc8fnoJkNFU0zTnn8KTprvgipWbIFxXXvpim1GcplCIbSTLKNi0vH+/lzdkdq7So3Hx/ZoLLZskEUVK5qM/b1bw+eHJy1sRuOpkqkKtrnFTJInurF/ssKCAoUdCqWlyUgkPjwWqez+e5T+rx6peebR426yen0PVXr1/rUlZBKCBuBhAC1SH3ZOaJVdO0KCRLjD3qQC44JjCWnExzVZaClnMO1c5nLRqICIy9zzHVE7e/8OXlmvZcNWVBuNhkmVIE7tcgXVYA77msfOEsMDMiR1MwAg6SU2zVUIiAAZ05x2DM7EUtqxXeYxKxocsZFWd1BYUrJl6c9nlwgdmRZoOU0VhRikk9m1SINPiqKArwtbUYAzxeb4osE7Ja4Je/9TeKouoso0JMnWVl9sAofcwGOgI2ygmgxazOB+dLI4+EsWs051CGFKXen6GFxLnAfVegiuQE7L3mwAqu9ON6vCsdOlAT7YcQ2Du+f/aorupLz18/P10vHwDY4MSKW7duL2+11iV8yXiq6H3YE0qinvcuDeuPuJggVi2swt4lmk8l1+onUB5OHeXmJ5K6FKEfpIvDpMhKrcZVe/YoPj5vT9s8CDvAAJ4wKQIDmBKzD8ROpQcZMioGYJEch6wiktk0KyozjSDZbCIqgMbsTCRlAVFnAAqOUcSyCDKZWgJAx65gyYpJ2RFt08nGo5N/DKCNDxDCrApA7FhUcUsXRUQGVTJExHI2gfkCqml2DjxPQ3ApeqBN11tRsCvQl5ai9jGvV9J2w3ItWUtyRT0d8pBSr72gy67wZW17l6msEvklFkVspROMkBWHkFk6wzxo1yWJptHur2QgN/UYqAPEoihCoZp3wpDZ2N71GbPNlulshjhWHWwloW0ya5xowpbrs3v9lgv76XR7p/js9BYc09yw4wf9W5yMne608/FsGUA7IIw9VXxgF1TS3a5+dkNPH9ZbfQCf7iGYjSs/48u22pTBbrEdP1WRRueJ2s4lMfoxR9b1TiPbReTGwwAKoGSSDRwPvayfPL79/lvn9289c3z18nOv7F++dOPmDT+ZmFlZlDLkmNLBYl8kDpr3Lh31TR+K4EpXGdowxCSr84s+xmlVWc71ZDaryq7tyuliNj+8OD1frx5NpyWja7vWxLKJIeYUzVRRi0UJ5ifL/b0+L+abD9952w9xfXKyGQYOoZyFNz6+Na38i5NLZR1QjShd5PaFazf65uGVg8vvf/RgMd/fP5w9eHh/yMkVrq6LDHixXoEVXStVtRgEd4mqURLZWoDGuinc2ogQkZk9U2AKREzIBkZAo6wyOoO2MCwwG+EVSFs/EQDieFYBwZY9vNVxANk50W04VMccKOw+RkRiIgKFvDXFwY6WZWPKf+QobU1G24vAlIgBDXD7ZVXVbbqHkHmLd9mmrFC2oadtgZQBjL2HNv4itJGl7Z2TnHW09wAAgIAB4ZZNA4pjLRgCAWQRGhEA47IlaNbt4RXLMO4ZjoErHffQkQNEBlJJ21N73J/R8YyAwEBgqoQEYKqmZgiG5MYLGBFUhYgRUXJG5i2eGQCZEc1Ax7ydiJBj26J5Rvzzlhy1u9yImMePSSVtr85tCBANzLKBWMp5GJR1NIhxzFKE0Och5yw5ky9NBbZICBtxQooAhFnyDqFktv0sFWnn9kMYb+xgmFMmpBGTtBvV646uZQS4230ZYYXoGMdmN8ScE+7mAluhzMA7PlgsEEwlq6j4XQaWyGwEYY/LUCTjg4BIRU3HhLoDEERUHZHYNKo2RCSSmN0u3bjVuxkx5+h80JzHAMR4EhKPoGzVlLc8ja2jU0cv35iYy6KOyBCd9yqSJTlHiIiGSACjxAUMyOMhBLF/Syq6fO211SoaPiFuY35SV0w0zcpherPPq8dnD522VcXSXPiQCoapF+9hUsBkUVS+zErgK4dkwCggOSXDVqyNDRM0Q19V7aaDUEuA1tUBOPW5H3oBnDAXCjYMrSrkhGlwaM6BM6rUTzJzEWaYCo1dTE2yPF5ybQ45QuXnzqNS57yHIfX9RiGy872IA3UYHGFVT7zypjn3rgLVIQ4MFSVgxZmFxeXZkDbF3G5M99/+8FSZNps8nxRDr7EZKGAz9JPaXwwnFrMgpCx1FdS6IlTeF8mgmEyRp0PTiVmbrREg8oEDcIlljSShohSHIWV2ZVnVxqRt70JBWUFTB5paCknqnFdx5YqGHFXTuZlMyllwzLrMlAQUuYwJNWLuAwljnqpE0IkM9YAh+AnoxMRyJsayCpWjrHHleCiLUNVzt+/73G42PSBB0DY2hat8qMDUQ4mQV9p/tN4Ue/vsSoYasiPtkTHF1HfdKmJqxHP2cPHMUbj96PHx4TniIRfkJhWZDqnVLM7NHE4TqC8qT9NkZNIAnks8y9oADIimEsEgASiCwwBGlfNijJIg96Q9ApJVxjAmREAj5MF0kLwxIERlYtRB01gXaKZJNUuMaorozdQgAJSmaAqSDA3JBXBTKBbq98AX4AMUU8AM6J/mgnM7/PZ//Qde0+d/6pWfe7F2736vPF29tHdjn144qE5vPTqtyyCoD09PzJFItVrr8eHhZDLJy86C3Vmdvnr5udn+Xgobbs7EwkXbrFMfFrUN3fqjNJte/6U//z/4tV/7fzE++sW/cPOdP/joD377n2V3HVLzZ3/wE0llc9Fb2rxx/t2/97f+p839773+1a/9/q17D+588Azp+f37WBxV9YtffvWvFYk/d/xqMavb5w4+ePOuH14+e/DRrZPlh/fuLde0tO74+jOLOP/8V37hgw/eevTwjbXjg4OP969dffhJH3hC8cwjCZGqxhjR63zvOGNbVaHru6LkdrUJnqZ1/cGd97/65RsXZ6eC9vD0/kE1+9mfeXF90TwY4vUvvXSlkn/z4fv9qj2aHvz+P/31vTyBMmjw125capuTu+9+NKnKaPbx/dNLRy/qgJpjGbhvhsFTeTS1wj94sDo+Pup9unZtfnbrnLxLIX/uiy988ODx0A2v3Hzxx2/9eH+2/9qzn3tlD1aP4dl2v5j4RpfkKp5MQNrFbJE2Ooj95JOLTSt22l2+srjz4H44vKL+yscf5QGK5WbtQvvsYkEr/fX/8r/6lb/7t/7Rf/P9v/4z9OIz1yo/ruFsh/RlyTfq/awy5sxsB7Yc/SdjU6/o00fAp6uoYApICEqMqoDMl44X84Ppb/zWT24/PPvKq9d92gTz80WR2ji0KYBY7Ibl+eVn4vMv7EegdnW+fySrrnvu+o10vsoQ/sbXXzoqFJOJIiOPfBwK4d6T07KSspCHDx+qAqgjEB/IOLUxRtEnjx/O57OzZXc0LS9dnTetMWNZ84QDq52cri82GpPM9iduyu35inJq25Z06ypixzll75zlcfEKEMZwHSsoju9fsmMwCE/OHxbsU4ou+B5l3XQAeOn4aNm0wxDRpbquhsxxtawdrWMznU36nASQfFlx0fYXwZfeuYxWBBbJvaU2d5NQqImmLGrIbt0NB9OD2LYhuBjVMU8mi2Wz8gYxZpNx2oJZt826aoJGhFhXZYyZmQNz27dDzoHHxStwiJ5d23ZMhAgi5uvgi5KTkiEZDFGGlHxRgmJWJXSAakhi6spCshCiqCERe84GYorEZVU4JhwbGAERMXjv2I02e2IygJQTgWQRNhRTj44xILJa0jwi1UmVAFWyoTkmr6YiWXL2zolqFimCRwFVIysQUZI4ctniOrVL2RCE0jtBAyJgh1Fq77/1jZ+pldbrTQg+1NPm/DyKiGTyTlSDqyyTpty1TdcPq25z+uhx2/QWexuhjwZl6dlx28aP3n33iy+9MkA79MOqj+yLttmknKu6AKOYei5t79qVxaWjj999k7tmf2+uMceUNk03OVY3RwRZLs84Ddp35jGEuks6PX5uetjZcGc6qzz5u/cuWCvNrZsVTM6hizlebNqD2QyIkpkLvrg025/R2ck6AQQiNkt6cXB4+eJJr9YbkoCWIUDKYJByJgeTatZ2G0QLzsj55cZKV3SaAUzzdnpweOlAAcgRkKlAUaA6LH3ATtAGN5tXgaYLdjO4OBm6C7URh2HoDBx5pBhFJSuADZJdyVFNwXpRc1Sic+NqrafR2YIONLtkQCZD0m49sKxCaUzOug1mIjD0My5LCAUSYTlxSclNPKOGgYrZoNGLTQO7fvmFV366ri/1MQuopiF1g2POkIGTD5Xzru+WJomD16zsvS9qRKeAOWUQmxR1EmFGAOv6paahnBQCGsoaBjNEDyRRffAxSxp6V0xdcDE2LtSF2N3b7+7Xh7NLl31F0/3EdL2aLeaHx5Oj/fCjDz5ZLS8uPobJlVQfZPTeeT+ZN+uV7d2QolIk4gXOy8LFnKwKFQ6blA4n1WstP8QccnsKF5gtprzsV+fWi+l+PZmlsm/7cxBJoIYcnHPMzqGaQKbSlza1ZtlEIUQTk5RUzImCoAUw3ukQYojIKgZiOSkZKOKQEqNXEGIa51TksCi5KAnBmnaIw26WBqBoztEY5RCzbIKCQERIKWcmDM7FlAxIwYBJmY29qyqsSiwKBSjI+Zgo9lFTmM54Ns+uZGJ59EhONt2jC+sGFRQBqTCbCKJ5r86xQuE4aIRG1+eRJBNbSjkhAQFCTgIsIcfeVEBRYlo92QTYEyYogi3mOJlalmIXyTfbCio7B8TWZvMZ8eWphjSuWm/NP0+NPU+jXmMA4zP/aztHEsA26bZdmzHTT9WaXTRslG0+dTFtk3GfuplsqyeMm34q02zbpHZfGPf36bZ3AbNtnamNdqftP3cCGYAB0NixYuOMUW2XPNpOvXfy1W7DYxcagBIJiEhGRUPcnJ69//abZ7fePZrv/ZW/+Bdn88VkPnVVyQKxbfcWCxPpm2YaprnJTewn8/nqYu3YpQSqILkbuiYU3pEd7i/qqrIcCXzXN6Eqk+VHZ3cDVfP5HhNmyZOJV7WuH2KOxcRbjn2i5BzbcL5qluvu5rPHp+ft3YefHCwKNjpbrxduMp8sus3qw08eTEp/NJ9fvXywvnfv1oO7E69XnN8rqvWyudW3k8nk9GLluJ9Xk2hWlaGsiqYfGBwhK7DB04hYJgBA3mozo/FnhDyOphrkscmecAR+EyETMgLgTvVQ2EKjEcb2qm3xnJraGPZE2rrVDMGQt69y8DQPCIDIxJ7IEeIWIW2jQKJPW76yJLCRTblzFRGS8diSRkSwO0VozH+pbDlZoxF9RBSPjiQdg2/jAIpHezCz287ITVNSImJgAyPiXWZdt+ExYtMxh74NoiEg7sJNKpnIgZnB2ERGKmIqo9yGY3k0gMq2qBHAmEhVDAwRGdkAVRKgjT4fVYXxgxirvcBwhFOriRmiOefHkSCOuyRqKuwYAbMKkQNDNCBC2Q5+CMwkCxESkZkg8JjyGOVCAzNRAUUCYGJHlC0hDFnULAOYyVhmv2pW5AOq2+7KqIKRIo7eYB2JUVtfjIn3fgwCIBIAOvaSk5qhZULajccYtoB8UxXvgyKJCMAIMUdihlFnHOUxk1HmATMm3t7tRQzBWHJOoxRIzGBgAmpCDIhESGoZEZFGqINKUkSGMdenGZERWHPP7D4tChiPPKiKILNzXiUBbU1AlseKP6MxfiayveGoEBERZxEDYPboUEVUMzpCINpaz8bbuOD2JMlgpqrMpGbjMSTisfDt35KKinBchCEIb9pbcVh7vyrqOQLn5LvN0DcDChLxpNBpadOF116mU3KuQr8wLczI1Ekys4AQksHD9flFt0IOFXKvU2sndXGc1dar002bD/emkKxw0yGZuiDSNF0zDKlb55yZuRRLbbsxMPEFUCAj0MiS5z40HEG05MJxVYc9lYFcyS6ANcLWxiSqgR0T5w4g2349R1imLPODw/OLk0W9RxDS+WAaCkhh4MszfO35w/u3u71LV8or179z6/amuRCRalaIyCSwQ/LEvWUjFDTFWBSU0ibFEsFht2bLIWuz3mzaVhkn00lQAiyK6Tz1p6lde0PefgwRxchyCD6LJrbYy6oXcrbp7GjPeGjqqaPEiKXG1KUUVM247zq1VJV7jtkccIyceyb0rijQgQCXAS0Pmz6VUMwO8tD5gAezRdj088msKkKPGQEmoXDErnDmSktDajpimJT1xw8efufOg1UxOQTbD2a0WDbrPjVt7hNkgdSJVRQcQxEckx0c7qfhdLF3IN61qVEVJkq5dc5le8z+QBGyolq2vFI5N+iziHPBQFMkRpQcKW+0qEOYRQ0Z0GEi22R5bGrk9kDnADUiMRTAzFQSTdVM0zAuJZgKE6tGtZFRGwFU4kC+GJceHHFM23JOMS3YOz8FLIhLdGygAGRi+rT7Sd2923p4ef/jWydffGmvl3Z6afaV6899+7d/+OS8sWhQ4+Z8ldDm149ObzeXFsc5p5OHJ0XJ3tcnj07lxvWH53f8NIDqst1MFwe1IQQksq51ZPnPfvv/nh/ctqq/886yEI9DEll+7tWr/+ydNyb1pb2Dw7Y33jTf/c53lh/+8Of+5t/9n/+D/95/85/97/3pBzeuPvP9e/75G9dmfi/Jk/nB8fd/+Iff/Es/9/qVyx+88cF7y3s/+fDBMy88O6Ppk6b/mW/+9ds/fnz37Y/bk48n2GiHZ3eeHE5u8CQslysdhtgPwfm9vb0nZ+dUATopFgEJFqE4XZ4/+8JXfvfbf6pXaoH88UfL9flqSFaX074xS2xDDr00H939s8e3nr35rOSLO+fr6cH1zcM7pVt0WbkZoLOFzTlzv9G96mpFh51eXLt6bbN+iEpAxel6VQhdnU8PgmvbpB9t8u3N0fQSHIdH9x4fTWZ7NrePipfsVT2Lyz85HYbcZy2cxSXVNw+WTTfdm4BLT84+qshPsF7sP3Pmy4vzizurzVe+9txHH5/9L/5n/yB19INv/6C8OPvP/o//l6vzl164+uX+sb39vY+78nVBVkjsHYx+4C1xQAHAEatkQN6N/HY+WgNAaOOgJtOyoKc9ueNCqqmZoYD0eehlMrHV+aMvfv5g8XPP3X/30e07j//Nb3zy9//W167uzRPqdFEjw8FsPjuenT5pcw5nD1ebrumh+JPfaqjN5eXmxsuXmk5m+360TydNRNBHe+fNDybeYr8uK5cHWPft1frgwcOHDgzJPNFsMg/ezwugJMuzVV1UnqDR7KfVarVexuSJ2ZVR8XwdC1/FrEDs3HZ6kEW48CknpIBMkhVRTXRciVPTcVVr6NsnZ+ci/UXX52E4Ojz0jq9e2jt99Cg1a0+wd7TXtJoG6Zp+f+I+//KLf/jGu8C+LIt124JYN2wMhMmDUcqdikwKLMhZNUXIlqPFLpqFoijA57YHUGYlSpqHpsvTcqaxV1N2/HRqQYgq4hDdyCl0IafIRQCEZAIiTIyAeUjEkIboHEsWhDjxzoGyGSHklJJmEFPF4AOJEHJRh9C4bKbZPGBRlJu+m1V117bjOAuzOQ/lePKYEWLKWURpHHwZiFrXDqoKCogEquNSJqKpZVIgguDDIL3nyZBiVZVMXrIlVXAwMqRyTKrZTDVnNQXDmOKQYvCY8rCRvrWekDx5yxENSAhMPeOXv/zy0X598vBEU6aiHvqYLRlA1OwEc58QnYicnV+Ipihycbrul52k3qGip22VB6JGcUXYnJ3dv/3J4f6cjNSUtHBQOQan2msqyypbMtk4LK5cf+7hnffbYXBKIOodBkcmw6QooklZLyopGIk9zRbzSbn/5N6tWXkkmtvU17NZPyRfTs351Xo9JGDm471y7ookhr4QpNPlxbSyWcm5bwmCxwDCQyuWE6E5wJhyr1L5SkXHFTJTDUVpTvumlSzeuXW7Qvbeuae5EnRUVoUPWC+KphlizELWblKIoF0sfby6qNtu2S473nQkZGM9ytCJoSbOlHxRBGYzGkSVgzeVPOTRzDXkKNmFwtgM1JA3XdxzMK0cA0AW77wXTJsVFMV0sogEq+6sLiZD7qqiYsKubSvwwQN0fSgZmcrCh41aKxzpi1/4cxw4x8E5HLpNKApfhpSBiCTl2HRD37IDAOdCEcrSgHOGlCVnqX2tRjknYkyxZ0ehqGPKZsDGKeayDiIZiXLKphqqEgyzKCHPJpP7n7xXFNX1F5/tUzYdHPP04Gh2eGlx+TBMClYtPvjgvSfnS1Bhl9y8UzchLueX0vCQyuB9GbVPefAGkNHM5tMj5sxYZEQ1ZKBus0JToIB0iae+qhdNzmWpk/R4dfdWd9EExiI4Zgze59yJMZOQ5sCYCQ3IYKwnR1VjQxOLKmjoHDlEAMiiauYATQUAHbFnMiNJIEkrpCnR0SzsHRbAev9xfHCqWYERAMeeHqZxAV/VIXqkQQQQPICKqhkZGFjO2Zeu8FTXgYIbxLxKYPZZWGToBi4CcOV95cFks+KLMz07S6tGxk9DNSIAgXofptMUB69GQ8xnsulRSVEiMoEoOHIMkjoAr840tpp7b67AunOln02L/YOmgFhO6rIuYx/400nyFgzzqQTzqVnnM9mvrWSy+9YYUftsQ/z2p3fLKzsf0mdyXvgpZugzhqKtj+gzX7OnGtPTL4z/0M9YkXab2olC4zT7U/rMTl3a+YK3mthuTg5Pv7hTvbZxt60/wj6Nmu24S6OKATIiiRAVQcFEJUlCQ8rp/Te+/8l7b0+r6suff/3y8aUr168ZBgwh+CrrpqqrwM6ApkfHnlw3dHuLhahb1FUI2Hfd0K+zSFmEg8ODlKMZ5Dg4x0jGBj4wi+ztHYOGtruIue/7bsTZDH0EAtSwWi7f/fjeWdMPzVLbXnMu1G/aFkvfCd9/fGLM+TTnIU18sWm7sp6top0t1/NyYq7QrI8uBnO+mrksw3xSiqbJtJZ17xzu7e9pSsgoIDElDjOy7adgOJ5FSIRmrOOXkMYwECIBETKPw6kxpTaqeJ91km2xR4Aw2o6esql2RAA1HXUZIkYjI0IdLUCGWzyOjpVnADAuvaioqIoImKCNveBoIAj4mXMJbHzQ0hZJoKo2YolGIwqjCmy1D7Odv45GGWvLUSIe5xrIZAC0DTJtRR9iMskKSo5VFZEUbEQsmakZOkcAgGSGtu0sYyLm0TipKgCgY4kb8ciSJ6JRjdr64AlHQQdUR1HOZEuJUhU1A1YFIwQiHk2FagCiSAjMhDRaYMyMyG0pRUTjW1YYK9XU1MZLYCvobQ//Vh7UUT8dUX0i4xIkbgHoKlmZARiZqagdbgCSsKmSqtlm0yJ6JAYiMEEkNTFVYwBk2mYVFREUDInEZBQit/k4pO0aIoIRqCgjZs04BuUQt7weNGTaotdUwVBFnfOAZCqIPLbI4zZNRrg9NqSqYwHetpxPFceIMhKAiWTc3U7MAAkliyNvZiMn1wwUlZgNQCUhgiEZAjMDAhGpmkjcOptGKYm2Qj6zE91Wcalkds62KTsEIMkZwYhIjcwUiJAoxp6YVHSsnEPC8fpBZBHRLdR8tJR9Gj3esYrq/dSueAZUbLrzW4ULU18bhIFYh2ESFFlmLJOpTiculBjKUNZThP2uKSQHKKZmickMJimGmDi2cX1xZ1GGIcNk76hy+7U77HPuhrwZHjtoChxKTybQ9BvyhpTQOs1tykNWcWSagcChSu5ij3XBjjBnE0AchlTNb1T1gsVLPIvxHGXDMCCg4z0zEStAqPLF/mKq6WGUlacw9Ju6mlvmYcOvf/4XQ1isT269/5PvcwsnoZ2k6rX96rXPf+napWs/PHnnjTc/CgVqDlE6V1Wl962tCGlxUC8OZ2RRRJkLRWyWF7OymVVltzqVbkmUixIqVw3DwCnHZJhdgGpWL6Ca990jssEHWi2fgGDhaB0jmcsJCjApkquoXfemofQBihjqelBohj4rOwqOSiZRGTih6pDjIOQBZK/e74dzRJejVb4w6YqABCIxNqt1Grw/0FQEMLE0qCY0Ru0lx9IbFv77H390e7naKINCe7GCwtpNh+BX7cqV6svSkZVDAmTiSUb36OwxEGE+O7B7k8kklP58tYTglKknyNJW8zkoCIKoqGxAuyQbB8w2RSgIJUsjaQ0l5nzh/Z5ZgWg5rk2WkiMBilwYCXkHWKqi4zqaASMi+aJXEdEMIkgAuUcWggTYSO4U1ESSrYkaImeQiMGYQAWZMDhjTz6YRVJz7DIA0HYA8cWf+/qv/ZPf25u9fHr//Q8xvnP//T/31c/ff/Tux6uL9bqfzybFtOjvP5rs+S997nj5uLt2dLi+uH+Kw1e/+ZU3vnvP1aGlVgseLAVXTJADgDLP50fLB0+KcPyz3/ji7/zrXyvqw76KhJY2T37hy1/NUn54532Pe6mv8Wjxrb/9tz/4wR/4zf2v//Rrb37nT26vLupME5j86I/uH379Lz7/7LNv/PZ/br589Oi93HZnd4Nm9EKza9VfeO7LJ6fLk7PNlVD93q/+o2adrs/3Pnr3nc9/4fKqtc994YV0qh++/yG7g6z9dFKuo7QKrc5/9stfffuH35tOKsCuLkO2+Ye3H185eg5SpQW99eH6yvHlVfOkXOxbRT+5+/CZ/f2FK4XmLuPQlBf98tqXv/7Ow4vqwKaLg+HBPV0vvQ6enBOuMJSOh/MzjEOMLbNjKsrycI/nzaPTqTM+W1++fKN9sirsSjzVTP3V/f321iZtnMVcMeZhiA56TUVdWZRm3b32539+eePO/Xt3J35q3epLX3r9ez/4YM+QKUtOVfB3bz9e3rv48A9+6/qzL1YVH9XVsIHDzx9+8ede+OLnv/Cjt37nd//gn7/2H/wil5QkOSAEHM22AJolqdG4HAW7EpbRhosIoFB5nzPulrx2Y8atvZolm6l7+yf3DxbFyy8fVUHu3n28atsXnzk+nk1LgdgNJ6ft1avzMvD9exe+LlMsf/XX/vgvfuP1sg5f+eqzb//rs0cP0qH3tknFQXDkYhuzVwTNIO06FamsiqLyXiSsut7A1us15ES+8FUFBh1gyulor+pWbdeDJrmxWEiC00fLGIcyFI6LLCm4IvdpMa/7Ps3ralL7pwNwJodsfT+gGhESsKKBKiF69pL7qgxp6D346Wy6Wtve4eH547O9vfknd+7Mw7wsJl3sJr48789UbFGHrM1yWDYyVES5E+e9oYCmLgkVQGZIyKw1+dTH3KWk4oIvy+CI1t369S/8zObk8cMH9wdBY6iKqhssZhETXxVn64ukSQhc4SGlDEaZK1etbTOoKqLkDMiePBGLZQQNgRVo1TRb1F/Klw/2p5OJDDKpSsgAppshIqD0gxmknETztK6HGDNRThnQjCyBGREaOMe05SrgmMnwoUjSIYNokpzYaGzqYOfGNTxfeN0OjBmBU87sKaoR+4B7MZ2RguLgiFSVqVYTzdn5gmJidkqARoSu73uBlBCavOk1DbGrq4otqxj6mXMupv4LX3z1mWs3Hj5+jICz+b6keLo6dY6zoWaNqZOUUszkXNa8bPvN+XJzfiFxyMMgORVFMFEAy2Jc1kSMhh/evl2XL8z3552zrmstOQ9YI0TpmBEAVJKYzI/n5cEX33nj+15y5ctQlzFF6TPW1exgbmoKqWOsFguGYSFdvT9bdxY3PaiUEBmb0lWucKtlO0Cuqikm3aurx0/WllPh6+P5YmhPZWiISvKQusFR7vU8VBPQhGJM3oUyp8hgdVUlyUlVBFIWMWIjBip84FB2TTPEuL0KONXT2bXrhzEPDBQ5OSPrhNgFTzNIHFtYx+FUm5WgEGZFzEVJJUBADByQjFANFBWbmCADEQQMWSSLKrg+ipoRKLBNvWcSFfGOyzDpE0ooeLbXEHFZUZI9X6pNgJ2hE/RFKOLqYo5r5IihSJYxC5upsUCYLw40xzSIxawCwGAovgje16vzizhEy0jkJBKSERI5N/TdpJrlvpfUr7uOmOu68kxD6iXGspjkxN15X1RTT+g4qKNuuXE+VJP5EFdoFqg6e/IwbfoXP/95IEEVNufqEpAE0Hy4+rm6mi7KaWU/fO+9i6btnwxVsGKivZD3iM5yP1jy5URVKmLyvtPUa4Fcl86V00tps1EtXV+KtFDNbW8P1FK1H4rgnabhem2Xie9qc1dT48ELKCgQhRh7TQTms0AbU0BHlHLcsJgoIjEYMbGpJc3O2MyymCOm7YSIVJWQEIwIgoODuT+YcOFwPqs4x4t1M0QiAkYANUBUAx6xKyYGGpjGzN+QRXG0A2oomJjAOQg+ExKadT2qKBCWtS/2aTrx5YRSzmen3ZOH7YNH7apNbTbAmLNjJNUuRVc6YwtlcI4IwHoDqKgIGELm2pVexcySw7H+OAKdgM/BLajTVh5AgUKR65nfmy8O9uumn+IwXgU0+mOe0qDxqSS0U2FGqt+uqOypr4Z2UhJudRnYunCfCjHbTNdnN2qfdSFtNYZPf+P4GP5MAm6r1dguBfbZqNn219ungKGtWrRTgmzH0v6s6PTpH9spUDty0W6nbISSPy3A1qd7PeoG40lihKI5GxgQQFhdnL/xx7+r3fClz3/t8PAoxU21tyfg9g72GMg79pPjOHSxG2JaDxrQIBDXxZ7nyXq9evDowcH+XhH8/vySivZ9K2CQdAyJgCXnq6HNjrBbr1KWpBHA+tQheTQJdRgSvPXmJ7dv3+77AcmlmKrC14ezxcE0Va6P+vC0oVBk0b5PkhJkAIXYDwXTg0dnxzeOM+PJ481F0x4dzEEGi7Zarf2kqib1ejO0fR/jkPrkiir4ea85K4xLIog4ruuabtnJIwkYxjp0MGAAsm3x1zYjNta2jx/CLkuIRjD6j3YHHLYRGwQEExjxEwBoxgiKY7ITCLcpsJG5YyOxQlU1qVnO2dQQTEy2NjFQ2DKwnwqLMG5HccRwA46vGg1PMIoNCIAEaCaaI7FHRJCtNDr60EZRBMFURuuHk9F7qDLKkEDmyJlZyolGZhNsT/qtPoUklhFxTLBupSQ1tTx6ZEZ6t2qybTWhIRAB6dgVhAjEY4DMVIlH0w0AmmRhYlMBHLOxpKKwJf6MOijmNJiZ8wUSwRb9g6Y2kmRpqxbpiLdRyaNasTUGqvKWQT5mo0gVwHTbBy9bSnZKMkhCyMYI7AgIAg051tWMfRiiqCU0oy2HCMgIDE3USJzzpjIuAIDB+I62dHUAdmRKCJTiMD72nt5cxkCgqoCNC3fC5IlJVZ1zZmKWif34aZiooQGy6Rh/G41Xys6DgWgWUefd7n6FIoIAyAwqoCP2yJCJaAtBUzFAckRZRUWc37ZeMKKOxW0mox0MiUABRuMrsSHkJIamIuw8KDIxI6uZZRn9dsgMCkjsmAA0x544eO/HGyMTq6lmAYCRJa9oTEDs0EazUqZPS58AAEA0VOV+34B3x4eLLzKEPmoZktlqNu/DAJP5cLBHClrOpjGrx8mmn7HbEyiQyz77FJeBnONpjpb6XFiYsWdQX06Dq5wrQgiImL3PWDbdGZX7bNzmrtdUiIGkPPQA5hiQaTIpBIY+xdL5YdgAG7mAltdD1/RpQfsgdZJCYgeSkAVcbrslUEFUekhtn4H8tC5FW8FY1RMA7JrBspWuSku+PH81MN9YHEruKd8+uzh95nBSpfLOjz6YTiY/f/OnTz7ulnayaVMoWHO02L1w89CgEuOMZNkxGqitL84rX7SN3TVYrrsUkwt28XidZ2k+n0heBZQwnyPsIbjU97kbMvaDqhgiMxbkSjZjzMKEm3W/GVyWyOvmcOYP9quseWjPLUvJ08LNHMzXm9NBEUOZhtZgYCxBAwh5gr5t0UJu+8jsiKuK+/W5H0QU+jj0Gj1zVU36bpP6RvseSU/b/pO73Q/vnibn+pgWxKIGvrJC+hzFDQgW26SimjKRda0WrIeVf/KkxYkcHsjmYp0QrSy4mKILxiG4KdieZTZdZ0kmjxgyAREVAAdmJbNlMXYzs5LoALBU9AJmKEmipBVAUujRXXYQCNi5YMQOGZGyGpICCVoYF3vQ1aKGop5ayq3EJsuFSETiGBOCWB4sDZ4x50QKhjmnjh1ZzjGbmSO/fSS0/fpr3/qp4cGjgwNM1XB8fNA1+kdv3t27fj3CqovdzWsHQ3PpZHmSz9I+12f3TutFmZPldSt9s2nbu3fl6nNXz5886tsupVVtdrEZ3IVMDU8vbl88Kef7izNwdnTl3/2rf+mjX//tH3186937D6+/8NyNV16aDFrtC57+7qOPfvMrL7+2Gqyf1xcp/vL/5H/8+//5//P5z712IuG3/tVv/cxLn7vz6M3PffGrr772l3/wzqNnX3p5//K12QK+++337z26+Pm/9vJ3fvudJ59AMav6dHHjlc9dfunq6Ucnt+M8nz9irhkmRVHNi2nClXcTT937H74XCNN6c3BUdCcXorjSeLi49MGHH3/u5WeXS3vwZDOvp8u2q6aT46NFcEFlcnKx+fDdH2Fu/Mw6BkzZ6dDlZNqt2vXxZJasmu5dOz95//HJvWtXXwCQk4f3DvYXqYvDycN02rqsQxIntH70OEecVpO2TYB4ugGOs9zjfqg0roOfdLkhz0XgTFCKff8P/ijcuOYuP79++FbhD95+545G/uDND3hq3lXNcljs7d+8eemHP/7433znR3uXroTD+dmD5k/v3nlhtfnD3/jNv//v/sKvvPr1f/4nP7Aq/82f/ZaYMhiqIbMpMod2s5nU063JeLt6qFu2AgEBliHsOjJGs7Ftw9agRFiV/lvf/ELKPWBenXU/fOtkcfjSq4fu7sML6oHn5Xc+fvhzs6nbbEKgg8Pw5k8+/uW//A1VoRI2m+X+Ddy/vJjO6XTZfO/tj3/qqy/uH4eqhD6qD/Pf/ZffdwJGzd5z87e/+ybpzBVuEFjsXzo5P93H+ZC7rJqH7BRDXRQVt43eub+MqfHs2dix7/qunhY5aTPIwiuI1lXRNJvt9IBYYiI09k5BxoWj0a6vBqKKTAYUqiIbTgfuNk3XdvPJpADqMlSLsOrktF/3axMDQHfR6OVLl24/fjKpKocOBIhJgVJKpS+TZtTcpViVhafZav0IiUAAweUck4mS1bPyzR/fO6orJlRCVRhS7vpV4YnJmIw9b1aNA0w5kXcANuS+T7H07Ng778c4mKmmlC1lH/wQh6bbkC+IXF3XVVFAlsq5wrusSQ0i6JASI7hxCOV8KKphGDy7PCQVNYA+95ZtbzLbNI0CMEEcBgADpJgGEclgURUM2UzUnHdpGEABVMEUDcdsgiPlQAgQNQagVk+JNcrA5NAM0FIazAzBcko5i4HmnMCwzzlaSi430ieX2hgh+CyZiC1rYATRF69d+9Jzz69OVqRGzGYZyAAYwLVtw4TOkSg451ZNe7Y83XTd+uyiPV+W3tAsBG+ijFQUjoiJWWNOGRXoo9u3ntWrk6ODIWvTrVxZCDlGL0rIJkKmltoO0X/xSz/96NZ7d27dOjq+NPWzhGjMKrnrlkl6KqZtGko3+ej+JzV7A5vU89rBnSefhLJqhySrtQr0/RqossEe5QsOFYVgooXznUARysxuenzz5KOPCgJCTikt6kXXrg0AgdC5fugLg5hbAEfgLBtRYAqb5SkQkyISs/e7Z0Hcz0RRK8ed5YBmMSN5rhzvQZ5xBF1FTYKjMBDYmHVWB2KigBjcoBkMcjSNFDAoi4EhqOWckqBjNyJX0cekri7Ipbp2fS85ZgHMsZ3QAXnfZ61d4QxFKkOCmGRYL4eLwpqPT06rSVF3NYZAAPW0zoLSKhQVFkXhnWHygQFsiEMcct92zfqCVRcH+0PfGWrwE8mp67u6qkCbrJ1CzpCDr6OKxn6zPi9DiJ017XBw/Jx46GUwVVHgqizqkK0D0Kqq2tX5xfnDV15/DQP1fWsKHEpmL5LRJDC5en509XkGsSzy5o9uXTxZJVKYeHJt0yakDJ13BTsrXRAVYHAu9Lkv3V6fmZFCse8P1IeqXz8YAmTLVTFn58iXRXCZKjhYeH9l9eGFg8YHQSQRyKJMZUJMOakaKGVVUgvssqoh5gxqSIxb4QzQEJwnZsdmHkaHIERRNTDiaCKkvgiAFM2alBVpXMUm2k1vBRQVibJpBholJ8k5jdhUYjT0npEAHUdASslh53PUmAScB3TTA+BJHozWm/W9u/16SWaWxRNns8Kz86Sqjr2Iibpi/7g6mGtsnZFzdVIKs4V4n8mZiDeDpJYbkU1OB9GAZCLnD7ulwqzPfuOO9oIrMAEhZ+VPJ8lP/UK79eWd0QZgy+h5KiX9/4suu8kYoiF9+t2n7iHEp5vdbnNs8h4TYVuJaUezHnEnALDlIo0S1c6XhDt56TMUpC1+Y6cU4ae0pE8LKba7+alj6rO7v2v+tp3/aFvg9fQd4/b9PY3VmQlaVkuAohI361s/fuvJnVuX9g9e/4VvXj08uvvx3YcX7c2XXjy+fhSQ203bdUMqiqw5pX5RL1xR7u/tadqcPj7f9A+A9Mrlo9l84Z07v1gul8v5rCZXkCNgZ0gh+K7d5L4DQO9JAbKqL6swW2S19mwZm3XTxz42IYTZ4V7pXbvZECp4jsmmk0XXnntX1dM5g5EBEWyabj6brDeNKpTT+XqVBpXgPSGH0vW9oimxN+HzkxUxOOeWF6tQlM0QpxUUdZFWjSuKqLLruh89RTrqP+OB3aJ2x1Nr+yKAsbtrJ/ONh5pw5Ozi0yM/fsq2E/0AeWyYUiMw4NHWgrglSu4GMNsCLVHVLBLNRERATVVHshYSAygigdouVgZjDivn0WpnzJ7ARDONihawaQYEzZmYn56LYgYjuXAMEo3SFSACARkAiart1EdHNMagdLSqjWV8aEykZgogtl1WIuatRASoI1eGWLfF8yw5IxICqiqyA0SR0Ta1vWSIEE0BkZ2DMT+1U/DG7atm2HmIRmuJqozLoqMrzEABRnRR2sbxmMZjrDomy1BVx+CYiMDoXTIFM0QedTFiRqBR/9IsBoqMgIZIgaFkYARTFdE8SIG4KA40gYqA27r4mUi3xXCAjAo0hsRUs3PezJBYwZiQDAhQchrb8MYKOSNApVG+lCyjv0ZAmN3o/cmiYIpMT5lYIgm28QJFYhoPjgiOL5GMRIBATGo6ap05x22wkgnRsmTULSxaRZCJEDOMWKydY3H8mHh3Z2EGgZQiuy1fmpxTyWqmmpHJ0GhcmmMyVQVVG9nVGHXwxECcUwJEZh4XwGFscgMDFQNj50dnGZoYKI2P7FHsJ3hK7HK7X1+6QvfKckjTMi8sd0lbH8y5i8Wzc4cNEhRBB4Eh50movU0TTNVQEcRgUBqkjOItAaonb6GqKjsEzgZTK/a4nMcsgAoQc8wxxZ6HZF0SG3JKg8S+2yTzbq8MySyUxIltWu8zV54W2bxAjtIlmg2ywqLePzgeUrPqO7Te+YhsFIBYcu6StuQKRSkcsJO267reUi9oIXVJ+/Af/Yf/29XpSd+tZofT+aX9x5+8fxgmzOX1K8++9dZ7BWTN3d/62a+/Gx/87vd+XBo1TeSAs4ODoTPMtO4y88QF3WzWzND17fm6F/GKlASdgnOcmt68ePZJZT49JKoZVbuuquq2j60IMQJiwTA/KtpkDMQgsYdsmGKR1clEYkoI1PXKUPqwz8W+I+9DbDeZPAqDo+DLA+dnfWd56LqmJcxur2CHq77PWSZDs+8qms04aOFw6DrJebNpU851FT46OX374cWqVysLizKvitIFyLntuzbGjA6JSFh6SSZMhQqWKMvmdFJO5q5s47BenqGbhMlldccSroR6Fo3JFUClSTboENZJNhk7U2XeZ85GKQ4tGKtMPV93dMVhZQqEbUpJJcXUEUMWQwNCNUoFEoIiITkGdgSmRCLjkwEIPTJmFEbA7MlVmKcezDSrtpbXOa8tZySWnFSzDy4D8DZXZKaa4tZu/eYfvd104Burn+Nf+POv/dp/9U9kT5rHMsPNyWkTKnjyYJlWw83Fi05K1fho012dL3wRPvjRByQT3axz8ifnlsVHphe/9vmP3vxIaXp4ePle+8kv//JP/dnv/2GpVb5oc9z7s9/4Qbrg2fVXKrxWXd2/8mx++1/882dhL9HJ//o/+mu//rvvPfP6F1+79vLtt3707f/2n+YEb/zo3S/98tenU7o/LP/rf/3mf/y//IcP3lt166Iob+j6Rz95+5M+2Uuvvvz2mz+++frz6+WjTaO51Cba2j//C7/yH95/eHrrB/8MB5etVdWz1dqXFWhO3WbYzNgs1KYcM0DsYzWBWMTDq+XzV595dPLWlfmBsTx6sizKNAxq+/TozoePmpPLl6exD35vUUNYd2sXXNpI6av54dxbWF20jx81CqGeXovJdWub1geTSXh4vvGtMambTiAKJVZ008sLs1AUnrVkAZFlmLrR9buODZc+BOhj7LO4ssJH3enFrWcOXwhHk+VZamOOGhgcJFy3zbyomxWvu+7y9f2br79y79HFT+4+ePELL6zX6x/8/v/ng/c+/sEz+NWf+qW3f3AyP3jx7HS1fzAf4wAghgAiUhUljojE7VhQn/5tZoSEu29sR83bUSyYASOAx+DMGccuXTme//2/ff0//t/9Sc431OTVm5c86V/5xssBrJjUj+9f9F39wgvXjhcT7f3/9b/4/Z/+2o29CX3ltef/8E9+XE4OUS796Z+dTQ/cF14pAsNsMfnwJxcH3PFwdvKgQ0UiIAem1g2b4CD2w6pblVUVSpck52Go6nlVcN8m9FW1cG1qS4bQp2f3bp6uz9OgLVvXx6wt7dCPjimLecfdZrCsFGjEfKOhmRIaIOaYRyl2aCygC6EY+vbl5575oz/7QbM5n86KKlVm4tlHSeZp7/L04rzj6FIfc1YiJylrVtFcVaUk8S4MYsN6FSN7poCSTbKKK4KD2a3bH06KmQEVBV6sV0SZgRAcgcupkRRNJTDHISVDUmPvhpRCCFmFjNVAQXKScdW9V9GMCjKtK+WQFVxRIrtxnqhqJroehlZVCcSkLMqu6+vSiTlk7wiKIogqIRmoIggoOspZyuBG9zIiihkgeGbKkZwjxZw0pzSW0AJRzlIUBREzOEYGJIXEJKEo+iE5LtixmpkrsyQyySIeyYBy1iwIjqKkbkjK0IGs8hqEiTwxm0QldmUYhv7mpRvf/Pw3Lp48brvh0vFRypEDSxedc6bKhKqShpxibLv+bL0ahq5vVrFfI6qIpZjqohjnV0VRELmsQICS1BdsSA9Pz27WReXrPCmzWD+AY9+nFbkyFAxiwVPb9kp+/+bnzgaFUBTTA419sVcaD7WfmARyZd+0TYHkLMah6xQrdWWAUGZTGww7V0M5CwhaJdPTi3VdQoW+ALg4O+mz1EVlRThdPg5V5ZUhOdO82iyBiBE3zcZ7x0xAXIYQUyyDV3XNoOuurapaspgKOfZuexUczxbzUDjTHM8vLYrVRUpgWrjJzf3qCuMkLc83lKrh/CJrrokc5/meLa6QEbuySIDe/OmTdepAewFLiCq5FzFkTyreITN47/skJeK6b23iSyQINCTlwM4DxItZuQBXJIE2pWwWnC903bYPtO0ak6yZeabden9vHtD7HJHDDMrTt97luwmcK/acKO8fHXFwOqSHD358vP98qMpkrXfZh1BUPCR1iA5siC2TAup8GpKmi/MnE+9n00rBJGs9X7jak09Dkxl85QMyGaskmUymGuPD+/euX3/WV6GLnQ8Blca6ZxVVkHoyyQBSLMi/9kKfh2T87q1Hva7Wj1NxWC2OgKcplkCiKSfNQB2RiUav1K/Pi/l0CMWFpfk0TCYH1eQC2xWmeemqqpgPRN2mBRUC43JSXX1pOGkkt6EIKakgguRsMGjnIDCgJGFE03FJnFRBYHRNGhEOmp1zhWM1KbxNiyKJUmbBlFST0SDwcAkp9/O9qjvdrFsdAIHBRMayFxVl5jHYQEiK3KuAbVfVwdDUiEDV6kkopiWwM9HNk7MGoCiCryZMoBi1O+/OV8OjOxgHNY1Z3bRgRhKNKauZZ1eUZVYu9/Zpby/Np4izQdSjK4pCyBkgMHpzDJijMQeTiuygBMZ759J3RXAmESw5k4DgwNCx9/PPKj1bOWdHJRq9RFtuK+wcPk9n1Z/mzZ6mz0Zo0figVEQC2ilBO3LQU8kIP7PlUSXYNl2N9s2dfWknMuzazXY/u82+bKNMsLUejFke/NSyND66t4LUbr8VYJwUbre3ZczQ+F5GVYB0S40xU4TRxGFbHo0ZgApYNkiqTR/P7tx6+P6baX368quvf/2nfyoOEtOm8PjKcy8VVMMA5syxGWjwOKsmbm/PQUhZHj16fLF6cLg4vLJ/GYmKELp1c9a37KqjgysivXeBnfYxAXkTKH2ZicAsFEzeZcGzzXBysty0q83jJw4lhLKuSPd8UQJqP71UFyH0MaqCGfgkJgikiDr0nfOFm9aOeTGdhOD2ZnWOElXKKmw2g0aZTidQ10NSRN304tghcdPkNusgtu7PFrN58JjVGAlUt+F+JBzbr4zGoz/ymhG2LWUANKaJwHDrD9qOv0ZnG9Ooxe5SgdsAIdjTEwBGgDiO3CFVBUMxHPvaUZIKoImAJlURSaqisGOXIxoAmexOeXoqFdlIJBzJ1aqgqmg8BujAVPNYpzbik5hI5KlwOLrlDG17tpnJOPEY3xcx23iuggHoeEoT8Xi6mSQhNTPHNAboREZJiMwUwTF7A0Bky2ksPCckQjJE0YwmBltbEI3DRDUDI7Sdx2/rsBsPPsI4nMGx12w0HCEAM5kZbVvRAMxEoqOtBrjNYG5NhYQAOsbWDMaf2sajzACQkLctKQaikbaeFwBiBUMkJGXFw8mUThoiIO/UqDB55YWbahHGeweaiiA7ACAkT5w1bQ+mIYB76jckcghompKoc97GN4AEZpoSs2fmkTMNQLv4wMhPMiYH22TZmEDMiON5RFtMNRGYoimTUx2BXA4RwYCJVDIABB+yZEl51H1o3IQZwOg3FXBuK15rJkRjZ2pqAkbsHIJKjqpKzqkqgIKZgozHENFtxRtC0Sw5sfNgaCbjp87Ipgqg45lJOMLmUZMAjMhz2OmAo6JOI+MbEdEEycH2dvdvSUUFKLDDuvSSyqG7cFCC9UjFdO8ZAEce46YFTIxepUwaEqACbfqUsm1SV4VpL+ZLP/Q95CGbGHrvJsyVknUaMSaPwI60B4Z6GJK05zGrWnZMogQ8IZpNZphyBkiL6VEV6jyYI8+oOXVxcyFGFXti2XQPArm6AATfi63X65yb4Bkh5D75mfOFRnl8vtyUdWjbzjIsJvXpWf7Lv/gfBO/INrU0/e2zS+LD4tK9O7f69vzjx6spMzTDXP2hOjd/9ft2b70+d8SOKfVGWRjdvKy7IQ5DF4d+s25FwPuA5FDNEYlmVMqDrFbJeyin5arZTIoSMVtuDC0nGXKuJzWhrZq+8D54gqwOoWsGAggAngsS2JyfiHOgAowhOGFObQxcVqESWU9CcKJe0fokqE27lhyHviuq2rICqwOPDffe1x5E25h40zW5XbWxFUefPFo/6NJ5zhm4RAWLsZVIfVUUBlq6SSiqKCbMZQU6rL13Hoxyc7xfgbOY0vFRWA/LxXRKdcF7l5KvRFXJk/NZk0CyvNa8RBDJicgDuCziOZN1qjgp99TVaEGVDTOiknMgzoWZZSNVE2/S5wE6QV/UquSBiCZqZECiYqqSMwOMojGqgCmi+VAikogCTuLArK1CLyaGYhJzTMasgDmnLbdsNz149aXjK889+xv/6PfP1vU//tXvUXXzk4dpMd9ruvVif3K+Xt++ffLapWfms8kbP/5Tq64fHE8iNs++fLW594AN9i/XvsQhE9NU1ifHJpuSZerK8vxnvnR4fvrO4fOLYakvHE7OHlN51hw/c+Po1c9tvvvGgzfeaPbt5a89VzD5Ev/Jr/5OsfdSPnvyve/+sD/pHp+mvrVM8af608f3bp/r+qtf/tI/+W9/5y98/a9Qvfyz7/53D9/97uLy0cvPfbVCf/dunUMoTYn7V169cvOFy3/87ndf/tZff/TO+xe3H+9PpjHmoU85DkGTYl9PAgCs1+3N/fKVF6/96b13njk6Om03Q5jsv3Dw9oM3wgyMYwRdHM32DvYvlhcXzcawn1W0Wm36LIe6Z5ouHUwp97AepI/I5FwqLDnrZ3W1WmYc4n4571ZdLxYgIGWuyk2jl6Z7E1f02QIV3UpmfmLNmbQXBQ5m2LY9+8rR9vZbTRY5db3Y8cGBa5r+rTvlYQ4Z2s2qPLranrZltSiK8tr1ZzaN0KXDWBVDLJ48XFeHxaNPHlFnJ8t4Yz57fPuTf33/H2N/8KP31//JD779f/pP/iFjcCNvzpB5i6Tc+m7NbAQ2wHYJYjuS1PERbwhkprhDJ24HGqBMlJWw4rOzs//Nr3wevT5M/sn5ckEUAtfOT+qyvOnu3Gs/uHOajtcs/hs/9eoLr4aA+bQ9+9I3nh2SA2gl199579bi6Mbl2vQCzm838dEHRA/PN/0kTNiFBJaiiko1qfo+Bl8QcSjqtllOq6kqiKTUd2Z+ddER8qZrnbNld5pFnPdKNJvPvUEX++3ACIAcA6MvC1e4JGN3rIyTBEQC0OBK5wKggcWABAYJ5O7Z/Ua7fZh7tINistk0nvHocP7o/LRfr+LGhq7zSEkSJvCOhRwDSBQzBREkbbpEyNWkiDFajGVZhiLkLGcPzgm9q8jEQvAx5aJ0qlyW5Wrdsg9mOYgNw1CWIYpW1bSPJ6ULQ8yG0gxD4ZwPoxuZIOUoEmMK7MpATewx+0A0n5aQ1DlWgpTjqEP3MTI6Axu6wTmoQpHiME5HJGcwdcz90Kc8rvCZZ2z7YT6dDTFhcKJSsEtZHTvPYyAcmbEInsBUBMAjYfr/cvVnsddl55kf9g5rrT2c4T9/c02sKrIoDiJFSlRTalGDW2y10bDTsdO2u+EkQBoBcuHkIkCAIAFy7ws3AsSAE8BA22inY8d2d6vVcqvdsgZKFJukxLlYJGv6quob/9MZ9rCG931zsc/5ikoVUIX66gz7nL32Pms96/c8j2QWIjSnjIU9okE2EzCIueMpMpmMEFW1mCgogAIIoZoKWwYpTLWVHBBKzuDYCJpF87lPv9ZvLi7PL09v3mYiUR23w5h653zMJohZpIxx2Hbr9TamQcZtuV6xFudRinjHnhnUqlAD+KJG3nExFVORNOrYDw2+f+O5l9SFMUEuWBOquLaep9SLasyQU64Y6nn90Y99cn1xfbXJrl0u5lUc+xfu3Ly6uhqi1E2VirDk1Xrj6kML9SoOOdu4fnyzvkFSj9nY8zBEQD9fHkopuWQmREcVVbnryWy2PNAYPfshDt6HifsD0LqpUxrZOQPNJSO6JCpmSDjFdaqC95SjwH6VOnPIJa2ebH1b/Mw5hGbpm7O2vtPgAdnSJHC3ij7Icskz8S7o4obyATMFIywZBV2Y1TBmjSWlYh6oZlIVlVnwRVRQSzEtBkV9WynRUBSZcnDOOUwJde0BydfgADk417SN47SNNYtQKVLEAVNr6cTHGZbg5OWf/YStFuPl9Zsf/Kg+Xf7svc+NBU0gbwbRePvWi45cSh0BOkfsochgwFVbQxHLJcUYmgpAVPNs1noHIKp9NPPz07lYKnGsmdvZPI2xWGLDtq0ty8P33r91fO/w9GiMG0fM3qehmKh5ZOc8eWaWnJDQNdW91z5WzdW7/MZbjx5eXj4e+GPYHQABAABJREFU1uDFHIewqMgc90gqiCWNqhb7lbP1tnsKN+6MDiReB7cN9PDg0D198mgVOXPj6lkVKsh96tZe4ibbtndOqGQJjomcWHLkFs1MIDNwEQdSilrOCkgMxgjEqKZMzIhs4LT4gLMavQOLoGzOEaNIKWAwjPo4yUVXDDWrAkId9jIHEbtpU3aaZKvptDYDImIAQiyqnsghVJVXxpxSGYv02WLBhXrwDaiHcX1+DlEACNvWM1nI4MAcScowRARtF21AjIP5RQtVECJzCGRsSEyIyIZqQlKIsQq+FEmAiX3Qkq+f5n5kJqqYA5V+S1dPrVnWi6PQ1s+0mp0ZbG/iwv0ynfZ7JZMK9CzSAj4suYcP7WfwTHPiD201u9/KvSqwR0x2Vegfkjr4oRVthwXtV7l7eWknGTwTlvbvuH8R3JlMEHAvG31Is+zPE5kB7fPNp7WT7TOX9irTxNgiKAHvI5pMd4tsE9OCOmZZPX707re/tbr/dtO4X/yVzx/efD6P+eL+Ux/s7Ozk9t17WQqjQpFFvQRQNfCORXQzrHJMxHB6cvtwvlCTfhjGbZ8ltweNJM5Zvfd93wdmBQDRWEa1XDQH78dksYtX69XTy1W3upq34e7p/Oz4UEquZ6FugkNyjg2Iick556jfjoYsRR27MY8GQIQ5iwGJFGKVlLpuFETNOY5lsWwAQY2SGnvuNjGLpqIpyZBNsoHh8ZKur+LTrgATEoIqEtiuOct27jEwsKnwzsAUEWhyiO1GxaTTTKd1dyJ2Y283JPd82O7k2D5YhaY4F1GdUsFUxYR0l3+tJqKSAEGkwL7P7sORDDuN06amwt2MyCbYDcBsmvkZ6DTmcKdVTSrPFHkDaEBAyFKKqjLtrpvJBfHseiKe6sxURWAKjKEp7ipP4qrtaZ9cCuKUOGOwJ4wmIQMQTMuETyGxqOyoqF0Eu8C+cw0RTRUMDUGmrGuk6TBUBPZGTQNApAk12qloEzI0sUtT6NEuBWj6REaIogLThiI+20g1KRnJTVgVMauamsLkpSOcVAhTAVRE3u3cI0oRRkQH5ADEyGkzM3a6+2U2FM1ETlSmO01GEykGqAZEBKqqshtJaJKLoiGzIRBRzgmRiNAARIuAMYf9tT0lQOYpQGo3198ZIgHA7JkJ2UwBpy/NOc4lMTOSQ3ImBcBEJmIOy0SI7TErRDIzEdm5LY2m5cU06kULIhE7E0OALNlAdqPOJh58MjcgEBrsO+s0C2KRvNNdAQgZEEsugMTsTAXM1CQXoR1AxlKEPU+6p0gxU2ZnNvnjpuE6EWofUp87qQhD8M4biGQj17bLKqatlaFqaslU7ChoFugoiFoBgV79JqoZSUHEigiGsWTUYdxmYctWjAZxh+6wDrOCTtEIY9a42VxIHtg5UhpHHVJ0HrHEkouKc3UL6DITuxqo8dgSy5gjOs0palGRYgAb6Sp5BO08j4WcDWPqUhlLbrI6CLN2EUI5mkMZr5smAOCsCUOv21WP/Wz1xhvfuX5c1/rzv/iF2HVlswhue/Huu4Hr6+22OZq99LEXx9Wwutws2jtf/NhnvvK9rxjQydGBc0ElB+fSmFJ31RcrSRGcmHkg72Dot6GiLDn4Wl2FLhcYpcTGBU9DGteqit77QA0F0FTG0QFWNRFgP6SoxLNDiRJQFosKMY+bzWhQ+UAaE6FbxKo+NvNZUAWIQ8UuYLVNUcqQc5/ixvtq7K/z48F5ikXukbMmSI6b7nrDPlg6PfLvPNm8+f71xSY/WkmGEJpm6AeHIEUpeO9aM3GgzkxFc1+yikCWEgUdlQi+IqOLx5cwVDeOm4NZzHx+9NwnVyk6V6kVD4IoooPhEMt1cAY0J39QaGEUwEbhkiwWNO9uIjsFZkQpYynJFJkWRUcrK8nnKXXcHuWcRRdVMzOpVOIO0pQRUJyWSTw2AwYjQpFUcgYkNApcq2usmpeykVLK2DGvnDXKhOYFVEt2VJW82z3YdMPw4/cRq0cXat6dnZakaQ2royXdODu7MR6iXV+srwZ58qW/8fl//af3i6xy2t597pN/9sa19/PcD/c+8vKjhyuQg//wf/t/+8Yf/mMeN3dfuXN5/fSD+2lx2n7xy7/6u//gd4/r2d27z7/66u033vvhB9/6nTa6o+PTi/Lol//6v/Fnf/oH1dGh/LCuc3d7Nrt/fVHV7a/+1iffen3zzo9//LV//s9++Ze//Pi977xz/+3//X/0997+6g/uHg/vrvqP/9IvBXf0wos/++1vfONnfv03/XLVHGPXb//i+9+OfrCh+hf/8O8flaOT40UcxzFDmPkMkVxDDG1VXV0Py8PZ0G3eu//IM6cUYYSn92N7p1k/3VAze5SeLOYnXDG2Lp3HuBmCh8XJ8b2P3P2L73x/WfPV1eNqOWMTBHDEuU9CozPNo5REYQwWsZ0dHYRDWXVHPDs7On10cXX7sKXtelydex9K/7gtrshj1lIxoKViVnMlUDibpATQrvMmO4iIsu1jl5qmmte3Li/vH8yfPzx87sn4uhgcVDfGFQ8Axc8/8StfePy17yTjw2Z+eqseHw+X1+vr7snnv/TqxYV+8iMf+0x3e8axKuAcAhoogaGB4iT6f/jXFFqg007kbrIy+bTBDHWfczRZmUFyAdolTnzjh+9+9pXntAx/9vqDz3zq09/+zg+Kps9/8sWApABE7oPH21eeO2q9vnLv9p/8l3/4hY///HvvX735/vY3fvMTeVjduVmOT+bLuzdPjqtXTua/98++tT5/+yPPze6f58t154LDVDyHFAsADNtVVVMAAE1QDAHA+Yv15ayqhSS4oMiAXLGyD0+2j4/nx5Qxp+w5IGHas3VgoGqmgka7nxZTQJ584AYoIpJiyRrqeox6fHj4/oOn88WpQDlc3nq67v26nNw4yCWlUdFKBZyus7OqZmZPmEbPgYlcpVacadxuOu+ZijZUTbocOaydT2OJffJMmrRa1mc3z4b11abvvfeIbAoSkyOqHMtYJv1YAU1KN24BgcjNm6obBkeOnVfDrCIiJtA2deW8R0SAdtaGpgXmlK1hh8yAUFRzFiNIVOY1PltK4DTLNp52BrWoD84zdbEYoBQxcnVV5yymFlOum7qt3ADFE1XBMzOiiZDEwtM2NREiI6FzzkBEJRUxRSbLWpjDju4WBQSxLGpUUU4iKeY8iEApJWpXMwdisQLGoT4gz5rSz3/ykxDjauzvvvAc2JRALYRE4FEljv0wjoY25rjp+yw557y93pQhigntifBs2FRV287NTDRXxBlGR0QcRMRR1fcl9ZtZ3cyW8+voupSY/ZR8CcDZcjOri5XV5UU1a45P2zaOzUF979Wb77w1nD9dISOZR62ccSy98SZJLv344MnDeX1wOL8RfLNebdYgNw9uHcxgtb6+/dzd84crFO23cYh9u5ylgovFbHlydLF5X5KIZnJA6HKUEMhTKBARbEyZ1SGFnGPJo6PamcUYtSiAA7W8zyo6OjtgM6R6LOPVZkCB+YJmLjZOrjYjUoaUbLuqNPmWtOtmh+H0Thu5Yt+IkYpL2eb17Lo/H1cxDoMl9DX7uraSYyrOVykNhMjIFMCcVwojwGzWhqollUq2eehz7LEf/MGM60qRrWSyzI6xnnmCSvSohpuhurOoPvLCz9y989rZCx/nPL9+9+L47ivtzQVAwZgdokSqeK4ulziQmufGz2vkoCWTChUb1mvJ5inUoe22101dF1LN2dRiX5rjmxy8mnHxzrFZMZZFW4tkQLu6Pq9cfXLrLGpvakA+9RnJXOsZiQlEZBz6opmcd3VtaAfPf/wjY3GhCW88wutylVbkFpd565oaiItJLpnRV65Spxq3PmJ6+IS8V74u7Yg+RtdXrpb0QAZ0dpNcnbbblko3jgwBIYBy6kdXuyo0GUytIDvi4ozAQ5cMkZ33BqQqns3IHACgAhAzOLTWc3DsnVe1mCKI1IyYFYAKYM6WTYiMkJwny6JmdXC7TiRAAIwpmwEjOaZpoZpVPU5BJeQ9A6FnMoV+KK6AI0dGkEt/fgErtuIQfb08jChmBQDNITKHptI6lKzZO2UIy5AdWOyRcHZ8AIzedMrR9aGWUrwJFwYEBTJmZOA0Fuvb+ZybwAcuGVSoeXMJBbZZsFTP5J0pn/un1KBnawf4ECzCXV/YXtX5UOvZOSp+WjLaGbefvZY9U4uehWXDh49/phbhfhkLO7boL0NJezXpp562+889hfQMStq7x3TPKO1db7s33udj7yEDmCgqnd6LEMEYDFSFkABBTRS0mGWzNHQf/OTHD773TR7l3uHSz+rharTxERzOP/KRO1UVZotapAPRULUmFvstEyhC3yW1Ml8eEfLB6c2SUrfebMeuqrxmESAqoes3RfVgNg8u1HUNqDEnYlaDCupmsRylrJ+ed7E7Oj44ctbUfLhsvKdM5JBkkFgiIpZiBpiTpBSRSERVBRCzqKoZWEqqACkmQjVDUW3bqqQyFNmmTEYOMaesRITM5GqHp8etD3B81GoptdOnl7M/ff1RNmcy5ftM6blqALv0oUmD0MmORjtwG50hEuwhCJOpLWs39gynv2Hn8rTJdIZ/acyYGRSRSZkqJanzCjhlTJmKlGK7lvmpwI4QaOq72hvedrgZ7ieBRFSKICohOGZE1DKFQyM+o9wMCVGlTM1WJopEhAy4U5FAdTd4YKLYp1llMTNi2u1N2i7y2lRhitMBUhM3JVgToyoRq5mJFJGJakJmdiylmBJMlBTqlDfESDKBQkUnnxQjqYFzvCssNJ2uApvEjUlwAwKcEoh26hwh7z6EKU19aUBopsXMFJjw2dVrO40V0Ah5UmbVJpcZsOOpWYKYpiuaiABo8sEBgU5uEgBidIG0QFUj9PD9H73z0tknp6xtYjcdj4oimhZBcmQGe65KTRGYCdQEmXYJ86bTlBvReKLMJkfhruxYDMB5p6KGburjUy1TDRwSTba5SetCIpq6CxDMhMmhkWoWyIjMCGL6bAo0JYUzUy6COwvgDrMEm3KmDMGAGcmjAQJPcGsp2fvKTA0B0UnJRDBFJolmRcHpCGACiLxIEVPndvgZswOAXdeOFHZ+Uv1UinOOyOU8BdWwIZqAQTGAqWxXdap9oOkIp6vA7W/6qCaOiR1BISAM7MGZFBE59ZytRKRxG7dD7ItKKdxn1UIAOKu8loTkpViKQymhCgFTWtaHlQUsRqzd+pLyAJJKKgCYYmYjiwbFShHJo1lxXHXXT+r2UJwXCNtUWDMaALORInmmENAAfbfdJMlq6zZUm25IadztaxeMWtp2TsxDWZ0cH19cle26n1XkjNvm4JM//wuvHN30Lb3w2oujxEw8GrVHd27cuXN5ddU0i7HIxbBqq4qVZXv9yeMb+e7Hf+8Hf3Hz9qkKjn2X3ZByLDB658hoNj8aY0YbmeH4ZEaMOYkajTkT8nxxUiTW1dyAzdeAbkwjAiwDCdDVqP2olk3jQFCbn82ObsLCOY1mxcDQkzPIqaiUcrXe9unwKDvfMKOjZpDBCGPpoqprHDHHoWgkLb0DK51QTJ/4yKcCKosNhP/gX/3Lk6Pl0vE2jkcnN590V6NyU5nHrJaR0AcEyuO4USlpjMy9csW+2fZrH6Ctq6ZyEvN6syXmBdL19fj8jRsL768uL7onD6mducCgmoZOIaskBGv9mcoWwDEeADmTLqZNLl0CqKF1NpS8AuAiSUoSZdBKNapE1S6VUdCBXgZXOxiEToHVwFQSaAQoKqNKRnKApMZIHqcbAYuUaKXkdKFiJr0WAbW+3xrXwTXeH4tG9t5zyDHvCTvYduNnXnx++/xwfxXvj5f/1r/5s9/+3e/M6/B3/sPf+M/+09//q1/80je/99U3Lq9ee/WGuIOwPKDtyrnw8P1rt7hz4/jklz93O64vT16oan/rjdf//O0nj1dDWZQQmpfPH/44BfqLr37/Y6/83OL4FWqfv+h+8vrbP3n03gef/pmPj7G6cVz+6J/99sPz/nH3YKYH23H43k/+yNv8pZfu/KP/+k9+40t/7/CE33n89m/c+PTh+bbVH/3+7/yOXuV//Z13XvzUx9zBUbXaPPnBnw358gffexd5mNd1GYcbJ7ek9zVV5fH2yTYuQxWTxTFzVbvq1NQ5KhLXtTFnUvHL2cl1NZCrkHOlvra5C/j87eOnTx6Oq+texpgSq85nzRj78/uPLeBR3Z4/eZJFC2fvOStpHINrdNPX5m+e3cXkwdLJ4mD75Il2W9Axj/GpXopoIXACrIZOPE43G905uwENobCYY3CGAiXFkezwzq1utSLnjAq55fvX2p48N6z7x+suUV375vp6XRqb3zh4vL360Z99pb2ILQjFKJ1brUZJND89/uZ3fuTrG/fulkPdfPnXPweWihWSiYy23Sxkt3E1zUXRYOKGpu1GnTYHdrNIADNFZJti5Cb8nYAYg/FnX3tRcn9yWN85Of6n/+obf+0LLznO3uv7V9vDkbounZwtb9yq3n3n4eW33/3Fn/vs/ffLWI7ms9M3Xr9+5eXq/XefHhzO7t1YdNvtO+9dtj7ffS68+973rIbQHoXgL9YXB9ySw+v15vbpzevukWkO7OMwomEeclPPiWy5WHSxE6w3Mc2xmnHwTAyOIdkoCpnms3q/OlAzFSWGFIcUkzmvpgBGjqWIqgJ6dAGdI8c+uJhiHVxT8zhul4c1M3bduBoHrFzXbdMqgTKHGQd3Pa7GFEOoBHJOYqoxJ9SMznHl0pBcFdBACLbd9dE85AIqJgDNoprPw9PVRb/emuLCz4ApMabYj2MGLFLKGPu2qcZUomjjnFlVVLMVYkDklAXAvGMHSI0jNIfgmQxh3jRRARQrHyRlRWFyhoRkBawot9V8061zSYhQUgIwJkzFHLOhecaSiycEojpUMtUxOASDqgoAMKZSioaKAXEcYlV7IkbnDJARchq5YkeVmAFg5YIBqWnl6yTDlIaQc0aiLLl2zorENGRJ0VLBglWzllEZzcBpQhMrnoDR4ec/9skXDm88fPRwebCYtWEYcx9T3dZdN6QiojGVlHJUgGHsx9inGEuOwETOM7KqEnMqSt45xwAFVAm0lMRMxKRKYkjkhzG++975Ky+cAbBra/RtGhN6MZWr7vxgcQbeeWVVYkWvWFWVjOnt774zpoxGdVUNsb+8/uDW2cvt4iYfzj/44CGk7QEzko/GEkI2reswpm1lcnZ2sFldVZ5SiRakcRWZVHWTwB4+PbcklXJwVXC1aq5CZVZi35OJqXj2qmUYt03VgrdcgBmc4wyQUyLGUO+yio5uHkpOw6bPI5iwV9hupcexmvf1jRqxBHMzZGyaKyvNwZxmlMwEeOxFFUSsJBj6IfWSY2QEcKBVtW3rLAI8mqHHyplHRyLFEKqmdgGr2cxX89aTt2rYsGgJ7hBnhzhfgBLKph/Gdd5qaFj6w9oOSv/y8x/79Cd/9aWP/zq7hYzZysAz46WMZQ2mLlTsuKhkVCZpD2aaUYoqWinJETiyUtZFOlctmtliO4zofLddB0egBQH9fB7aKpYxi7ZVG3yIQ/Temwmodf2KEG/eO4vaqeS6npUsIlLVlatrTRnQhr4n55xzLtToggE402Z5+85zUbvUzLY/ej/FzWWROunCap4vll7BVEyR3KHDQCYkNUdcLhclX/tgJldV0FCVbv3BEK+yP6NcVLq+2/Ra3PJArnpQLWU0e+pQPTrmkBzGlJzjqvEpKpqRGZKxo6mdRk0BCIHN1IpiYDCb3GSEBCQIKmom4JjUSuWcqZCa92xI5ECLGnNM5pwjj1ODURoGIlZARCqmYTJVAUkxTZKyaNGUyix4yWWzyopmxO3isApN1MiIYqAIMecK0SE5ARIsg9BBzZV3oBVaXq1qImwXiETBgU3rGwDEolyy9BbBIayu0/lDF2PVLlJbW0uSJcdYtmNVSPvh8aPhmayzX6fvlaCdXQd+WpjZO7Y+VHX2UO4zcAd2tM5eRZoUIsRnvrY9+7OThKY/tp8SpvbrUHjmNtpTT7gzgAPiPksIdmFGkyQBO4Mb7oCWCSjaiUP2l+ijnXiwP6T9+z2zZZg9Cy0iRDUTs2RaQAn14r2fvP3tr4+XFy7J2cnpoj2Atjk6O6ubxenNo9nhiYll0RTLfLnI1q+uL8HcrdMzAfYhuMqH2Yy5PD0/X19c1+iaw9nB4by73iB7Q2vmrXNecvaeYx6dQyZix+xmKeV33n730eXTfrOpHV33Dz/2/C1zPEbpx6wC0A0m0x6YlZx98MBkjEBYioa6AgQQZeeQNAsAQBySoQXnwYwBun5Mw9gPaTmfVd7XwbEP3rvKsa/AV5D6MW+HVT8C2fWWmDkVM0RDMptiwT6UEafvdjcCJmuXAZoi8K4P3naQzs4nOE3EJmljf1r2YMtu3Y26154A1UxyLkaKIjoxM6qquwFMhEBm08p9yhxC/nCkTwrQbgt5clAx8wQioZmCEtAOR5qAKQADAmIAICTVKSR7L2farrt+0mMMQBWYuEiczEGIZEiqSmYK4JwzyUaokqdSeQBTE93JbkDEpkLsdtoQIE7UDyMaKqhjziUjMk38Du90z6JlZ/ebCCdFNWEkoOn+hqpiINOXD7C3ze2/iqnK3bQYTfwfmYmhTN+bqO4KxYikZCScWtiQeKo/m6giIhLRn5acp68RCIgZBAjQARKgD1wYBw0/ee8pYaU20C7FbPL+qyNnKkTESCIFCMmMkFVlp94RGSgTmxbZmcWKFgFTZp7gLCAFQUJSVVUlx2pKCBMBaQCiCqaEPHVXEjGA2hRBBQbAhgo4JU+jTh9TCwLS9H2qGjiA6fvZ01GqE+Uw+SsVFNEQrcg4nU5CFjVEBQSRDAQGOFFLQDrdjqYDZmZkUqOpX89EAGBquzdTICTPZmqIzE4VjUhKAoAdaZUyTePBQEFFxHaNhZN6C39JKiIiLQUcESH6UDQrgykoUmjmOa1j0kFKn6WgZ1c5pZqgK7kUHYqOEYxEAYZNHwJZVoTiEBsHgKqpx9yPcTPEgQEkC6o44JKgCA4Fx20OriayxYz72AvVDiwK5spVroppiMNWcgKAMvSCDCY5lnXejI5yFsNKEzir66Zx87qtForn6zzY6AoZzxoAEXQSZx956ZUXbh6ByXw2216NJSk6D9Ye333xxr2zH/3kvbadnz++WjbLe7dPu36YtQe/9eu/+jh3UCO6CsSGGAUlC/TjtqpqBESKhqU9bNu2LVl8EQKskqqC94zgcwSuK9ccGGjEa1csd5ttj9ttXQzX15CLnBzduvvCx1GKKrFs+tWTUYppe7g86fp1P1wo47bvci5NU4f5DCxlG4ArlREAGX3EVM9mTkkNlGTM+bDA3DVQBmqrZXPwm1/8zW+8+eZ33nqYBPiD85QSOtuAHB42dYVKXAAUJAMET4BSIBpBjhF56IfYb8E7V3JcNG3KPMj48efOumEYYlcsD9sHzfIjOa4NvZrkMhoYUVBtQHqEggWALOVoWKINuRSXW0crgJLSljERBlNX5DpLbLwOcR3TylMV6Ih5iVhhBkkrlXOzXssAYGJmxuAC+RmCIXiViS1UIARSs6xacuzLOAApE+U8cByq2pCCghNjYEdupxW99srNGte/+osv/P1/8KfLs9m3v/Lj2tfzI/8vfvuff/rlO+fvfO3nP3r2c7/yuW9+7ZtP3rs+Ojt66bOvpevzn9y/Olze++wXfkneen95Y/H1r/zO//xv/dLvffX105d+dnH7+XuvHc+X7l+9/+PF8jmq74WZu/HS0T//J98YVj969WduH56OH3l5sfBn1xc/+t4b6/6ynBzfvOob71393DzgyeBP/p2/93d5OCjYvXf53n/8//i/fu6TH3/ttU9dXH1wcbX52M++msldbe173//6Ky8sm+US9fL6cvvoitvF4uzO2eX7m+ur2ITFR17+xOUH77lQcc2BasQcu+uj49OD2Z3Hw6UmqPwpXM30kqrbi8996va33nrw6PyB46phIkDv8WB2cP5kJZwusjRtLc6tt4My1/NFVQXnUYd+2fjgofFetw7W4jePSRiLrNP93HU6iKu8qQJiQDMxVCYgjYoO1IohGRo6p8hCMDhAMsJSkiB7nrUrD7BsRqDZzeNxkPVa7h4dcA2rZAcnL5KrZ34ghHp+gqtH4yrSMLYVVajbp93R6fPX59fOVd1YqIzF0osvH6zWq+Ws8QTgbF9psZsr2mTV3s0tCT/c1yTT/cbk1M4OAKY4/UgS5DJtYGEV3P13L06PZn/yhz8IBx/dnFfL5sTpRfB0uMCzeXtYh16sdS5dLs6fYmL94z9/95c+/7JFQ+vdx9pPf/LFJHp60LzVbWpfvfryK//5j/+/H/nYC12+ev+t95A9UFKznMemqVTQs1OQ4D2Qk2gyGHOpF812XHktR/MT1s2yYUuDx4oRHXMzb45vnPzgB98FTfvfAhYTzz5i2dPgaGpTgJ4pAAEykYEMpYwKyFLUoxusWa2v0fIQu+XyyPswdtkKjH0B7/wixIfZAwCiIyw2Evp+3MzatglNyTlnVY1t3WLlb9589eTw8Idf/wsT2sYUZlU3bBm1pNyEehg79UbBr+JaDJuqYjWfA4GN41jQwHuHNvQDO6dGBphVDltfMTfBpyGnnIuAEXUx+yJVM6tDcN4RARkUk5JzE5zkIgB9GopIFcKEdTt2zvHVdptKYUcxlVA5M3BIRihqpci0EZ2L6IRBAsZSfMZqMrqDIUGRjEYuBIOJZbBi2WsFQEAUizjnFExzRCYBcT4YGCPFIQIBoEtFs0pmAqxJspgQ8KJd5iF//Parr9y4+977TxbtvHIzE4hDVITRNGaNJedSigACD9t1v+1zTFqSlERm3ldgqqih8os5OSWPDCDOIWMTtQf0CMAEXRJEH8hv193FRXf7uco3ZWNeHKckKG5ZnYLhOA6gABkMYNNFDp4M+i6vh/H47vzp5dOD5eL24WkFNeecB//CjU9dnT+F4bqdnwaPZRxPTk4fnz9OJoau79b1vDZRBV0etqvLc4tUEjVulmIh8lqgZEMsRSJSk0qqQ6UFsxVLwOA8t6gVmCaJQ86OuGgBMOd4HPNOKjpZbK5WDuqUpd+U1SbruujW2XgdrtjqodE0iyFvUEq1LZQdwNxTjalPaSOlh/VVjFF1G1NUQ63a0CHOD5YAOG77sulLAhWdamJUMYkFx03tlRi9Ez2Suh7i02p5OyzOUhpIYo59NvXzg3EYDqtyVusL9eknP/+bL37sy2YLZKwqEfWnz7NqYSV2znk202rm6qrKPQSiUTtXN8SONQfnsIznF+fL+bF5P0iazavt5aYKGBz0nXTb7ujWK2FelWGoOZBxHBIi1SGYZTWJ3ebG8Q0jA9UQKjMrcajrAMBljGUcFKGqXQjB+XmK0m3W/bAeYvSz9uZrH2VN8u0342z0fQlVfZX76IIIkg8KklLnybv2wOWo6wFiTuGwXpyNRtgs+/h2KaNDK/kKqV8uTnNKyUBHQn9iYba9WiFtc0mz2rOWIaOr5gpZs5AU70md4wmaBNRdM8EUiEFKmFnbqkqqmySFObN1fTSEBEKIjsEhOgL0JGoQAIOHShuDMU5NkSAIk72tmtVqAGJUTAwocBZhpromMek3WxQmQld5BfMOyZGv65wHAFApMRfyjGoB0EuyKNwXLIjekWDqeq8lx0SZDNrQnoFHYVbN2cxATYXRE6qKWIJ6uy5X1xW7el6lQEKghFpMU+nPL6WUvF/F4a6y/JnRx37aPPZsrUf7cGl4FkE0hfdOaVW2t3HsUv72/8CferG9CrXbb/9QdtqDIriDT3ZrWHvWzGZ7gGgvCX34etNiaC9HAQBOT97/rO0SBnXype0XcX/pL0OgZ2ATIIAxmk2N5qaTHRiJy3b1vT/9nzbvvelimvvq1q0bZ/ee94vj26+9WrtQih2eHHbb7dzVi7YeQNMwjMPmdHmPXCjaDXEAw0bg8vKxyHYxO5hxm3P06lYXK+dIU/au6YeR5lQ3DKwkVNW1kRtiefvH77379hv90IGr22UYJN69sTg6mmcjEWUXJu1ORRUB1KaFes4Kkr13TJ7YqQg5JGZ2jjS5wMtmgQgaCxHMF7U5Xl+vVHS2aJ3DlJN3FSFojqVkyQUwPLpIm+i2fepSVggIeWKz0GwiHCakyHbBOUUtmxawDIYMfhI3ZWd63PkcdYr4MdQpJ9rAQCcOHCZtVxURQXfDaeKOJmJj6jsHJkCULAigE2AGO4FmqlRDMFUFQEAyk2kQqe5jrRFxCqzY5wzwLlkJTMoEm+OHPradgckAQKeoK6A9erRTx9CmAcfk1EynIDOYBAIHJiqZaI87AU/JS0hIhDplbyHjLv16km70me6GyCZFkADZDFUMcdfuRsQELKpAZmrMiIyqjACTWW/vuAREnHrudSdE7S62iUSahpOhEREqlZIJiaY2ejVEJzJthE0MGNJUJ4emJU9Hbyo2udV0ovf25wwBkTx7JnCOkMA5VPabfqiaWro4XbhSMhMzMQB470vJSqZq00ETuQmLVt1xWwZoMt11bKohIyIDlFKQgHgX2G2AxG4XRTRxUzsrIu1r7lC0TPZJVXPOAUAp6vxEOQGqTUlCk0BYRIiJ2YsoMhGwqUyI4nQ0CMjEqiKlsGMzJWScTAkTxGSyc8wpIKJqYceqaGpT1jvRjh5CZDRQyQDo2E1mQzQrOToX9uIUoKloZvbTDQHNmHiXoaSKqEjGwCqiYMgf3uV3UlHKicxyASyFALOlMQ2gmTApWa/jNl33sko6tK5qqpbQPekGnIoOAbPAGBOpQszjeLm1cXE8U1RjS6l0Yx7Hcduvu25bhypgaMIBEY7DFoECUYZ67Ag1WRkxMIqm9bZy3Mche84lr4dN8DN2AXyJaYoFMxe8oTh2k9rvHNeNtDNDd510qH2jObEBKgabOT788pf+HVdsu7XAbe7JuRpI/WG7WUF+Smh09/jgenXZZ6hpdn65qepZ1/We13/3N/5nr7///lfeeqtehrhaeZC2UfYBHJsqOAeqDj0oGteat4zUtI0I5rxhwsDOIAf0Oee5CwqxL9CvpV8JKwEjeZ6HZukpG5hEM/HErq6b5oCpMjOyfhhWEqUIR4hjKc4XV2fG7JjA+RC8OY+VH1YdoUPNy4o+ffeeI6HgFJBB7h0unywXzUvu6nKz7YerGLWoIlxdDlm0rpgIQuNy2QBo2wR0COhkUOep5OLJFXHDkCSlKlQlpu5yUOLHHzwZytXcyvz0TLHKVsg70WSq3hmRqc8GoDjkrOQb549tbNqqMqoVQykjQBa9Nmu9a02FuTYY6/rUuwPmQFQbBpWSZUVYVJOULZHmVDg0iB4pMNVMAXmm5oidWSE00UhuDikpmNqQh5WkwqaKzkLj6wP285QVgUF2Pwn3333y9uPVxz/xqS//8itca3f5sGrt5nP3Gnvpzfc2X/4bv/S1P/69y+220XT5/sMt87Bep/Xl2dkLP3lv9ed/+K1f+8ynfvSj7y1PX+yr/MOffOsLv3zvyab78z/55vPP39zG9NrJa2l8+o3Xv/WVP/n9ef3SR26+7Lv3uyebj/7Gvd/7/e++8YPvfu4XPhu7B9un758cvaCs/+b/4tf++//qf4xQXT558JMf/v6Xf/XF1fWSfvDw+r0fPnzLbp0c56vVcx8/e+Otpzaz/93/8d/97/7h//Nv/7X/5R989bc/+4mP/7f/+Z+ePL/87o/fuHlyU8ZkheLlJWbRqEGh2648Og4hol6mgdvqcNmo+LX07a1le1x/MKzDcUMMvm7ffvL4ycXl/GCRx+HJcDVr67GURX3SLg8PcpovW+2HYbV1qWweXtowssowpAYdGcfcgSiRGQojmbMCQh4JIGVhYmQkYPSEKFZEDaetHplwBTCzUntEqtX5zvD22c2/+O53W9/iTOfsDufzJoRVSdyP1mcOaRmodtSfv9uCVbgcChI3SeuLy/NPnNWPrh8jtWF5bIT377976Nrj0+XFenzh4MibI55uuLi7pe6qMxQIAKaKh2cQO6mqEZooAIkIEhBN+1wkaiBYcsmmpyeLWfCf+dzP/Lf/w4+yHv+//tFX/qO/+2kwOJrNi0jXDd9++4PPfPSji/qgax++9Dwe3Tj84s+13/reB6+9dIYEJhi341ur9bDN9+7d+If/4F+d3Lh7/8F7Wz2fVZTGcVkdeqoBcxEbcpdzmjcNACcd0VGCgZ2Rr6vGzavFZli1wWofopKUkvvctO1mU771+g+Gbjxc7PIpAIEDGxsHRkc67R5NzbFmiEhgRSTUTSxP2LsqNEllcbBAtDJsvQPSKg2bftBhGBtPhwczKfL2/QdFjBFyykKl8s5UWe3y6mLWzqrgfRXYfDNruhzv3L2zvbyI4zBzR32MqKAxZ41oaFZ88Jf9owN3uqgWA0rKAxo55JSGOjgUyONoCME5dj6rsvMLH45qDyKeSAmEYYgKBIzERIxYOw87M72NY8wxIpmUkg2utpuDdpZSZlNmLqpdP44pAaBDBrIUs01lq+odEQI45xhB1FLOogZEgOQcV8GnVKQUVDUpChUiOWZmMtOaAiMrwZCiV5dzdt4hGGMwFTBhgu3QJ8mjZGGLY1/U/PQYmtpDQuzTK3fuffSl5588fSJRTp879RXHPJDDUmS7jTlnRROzsR+K5NQNBGqqdV1pEseMYkS8ODmKeV0TjH3P7JCCaEZTE1SdlmW5bmsQNrO2mm1WI9rjOy/TojomdNvrseXgKatIqOpxiFJKtJJy8UZtCGY4n9dpiCeHBz4QOlfNvI7F5zblxIY3bhwrRhBBhmHYeOd91Q5Dj0UPwsFarlOJ6XIc89D4Vsacx3FMeljXZexNVGXaMbRccuvmCKOatrPZarOtfFNSklQQwTvGojIJRruaWwCAJw+fslpJkoc8drkUTTmTsSTbPInioa2q617Xay1GvhIPbrPqbVUoGhTUzqAfqfA4jDkJORg3o1MY339izmkuLCBJVASr3TYh5hwMvJaCJQ3auGbB83pWRMcxrZAsBJ9VmZvgoWF3O9BzC/r42S/cvvVFspZydsap2yCAD7U58sEhYh4jsnd1MEMgHlJPDhUxpuyRy1iGTRfCvEhRigTardaoBURzhhTT7PAWN3VfRgJom1pFkcBVCFbymLptd3x0Ro7EsnNEjLGPoa7JBUMtYwSmEIIPTEalj9v1atheo6P5vO3Hvr/ebh5txyfdc/V85hInzTHJCCmYaHAUoCCQiRFQi23lXS8lX26KhRnnOZLB0M2sr6uVyKZIyKDetwsO3bZP/hiP7pTtI3aZYI04Lg+XOcchb/IUNkHA5AKTZBMxAxO1KWhIi/iKK+/NjIgcQdaiYigqioxUBSYzU8sizpExFUZo8Oy5gyXEiycx9W4csaj6ikwKg6lBSZZyIWAU8YQmImMGw0NfJVVByDktjubeMzhmR4SokqdeZ5WiKRloQZOUJYljz0R9d1nVdSDuLtYOHHFDlgRdYZAyOSBYyakJaiFyEHtYXwXzAFUSMMSxlHEYFsUslSKYx1j2poPd2neP2OzEmH06zE8rKvDTzq6dZLT3rOGzhfQO27G9P2yHH+3zh3b9ZrALdNnLT9OfP8tXNQTYuYMmgGQ6sqlhzXb4L8KHsNKHqdO7fY/p/+o++Ghf6IB7WQpw73bbGUV26/z9w6ZYbgMARgJ+9OO3vvtnv5uu1ou6XiyXN+/evvf8S6sBFjduO6g8usXCs8KNo5OAEOMGMauUu7efv1qtL64fnx4ulssloq3OL5goBOcI/Nn89s1X3vzRT4AAGUVLTqVqg1pRdU0VMPjLy+vvfv+Hj54+LGN0ZMzsFJ48uHzywQfP/8YvpBhdqJt5A+S1GHtC5JQSOzaaeCyrHA9jZHYiOm16qZpkoTqICiGzI79wCAoGOeeD+cKg5GIpFmaPhikLiK5WeR31g+vu4XmuKue8yznphJbtIDRCdmBg+mEatGoRyapZ1KFRUZs6akB3J0xUdwoQgOHOcwYTDbPLm9Jp7E2dU7gfIGqqZiJTZbhO5erwzOYGuB9peyhuGorPtE/YmxwBYKKKaNd3jkQihZ7V+e2yqwCJVSddxSa9ZqoehylherpudlLOTu1SBZ4W6EZ72xuqFjGZfFuTVUms4I7fAWYGFVUBUABTmcgUngQAZhJR0bxz0iEhokomdmi2CyQimAiiXV/YVISFaABTotpkUhMtTKwGO5GCyExUhXiP1SCqqJKB4eRSNUQAN4lLqkoT0VPKFAJkoJPXfl/rMYm6RuxU8x7jNzBQ223FIipocs5ZkRfuPWeaEHYNd8TT94+qsj9vxo5NwcB0B/4AmCExg4oUJkZiBCDyqlJEiQl3jZWgIqriXJhys4HIZEpTn+LeAQFFhIimPCYVRZpOijnvAUykACgyT4ozEavIDluceC1EEQEwI9xtvtKuD2669RkA4I5KM1IzIUITNMSp4K9IYSbbOfhYtewgRySRMnFGE1ikCIAmkokDoldABFBTBFRV5ysAUxVmJkLJMlWimZqYIdFkkoBJUPz/M6BlGYNRitEjkANN23G4cCiIo7FshyejXpHb+LxuzMPAbO3JvFqttU+5aC0FTdAhG5aYI+A4rLv2oNaxt2ze8fXYj72wNaVTHzyHOWluq7lK6bajDGAJzDWrTd8uqHHKpAQ6dnE0FBAmk7HLAlaE3DIVTTuHoraLKrByPVSNP1jWJV6zryQpAUjuNVHQg+7c/fIv/9aN41t5vD46PgkuMGlTN+vNNYjO/NwObq2e/mQxb5enR+++/9SHIHmE2gc/y0NE6p5fnv3Wzx3Nbx9+9c/++N33emT0xMakGmmBbVvFPtb1jImyFDJ1bNEEPVahqdxcSiGnEsd+zKKl72WMAEK5QC66PKiXpxzzuQ9VSWMfr9Sl0BxQ04amXlaE1wNQyWUoiSSbmztjQUfRtmbkqYp5W9Wtks6PWtFFXMdDzi/duGU9VKFCxGHMNw7nNw4Xj667MGtOZi066sbROcq5TPW1WIQQvQ91w0SyXMycYzyoSsnsQr8dSO35WydzR2UcXnnt1TaS9fHmzZvbromjzNAPxOAWjgJJKBzRhqSX3pEWVBiCW5BVJM3cH6PNDHGMY0VEMCupGEmRq0BSzCE0rjrG4AF1uoBVBpONQcoyAqioAz8ztyR34P0ceUYAht4hFxGmwMiIrL5xWKwMqNvcXZnkMiSNkURhdgCaqjAHP0femZJf+eTPPOV3kLdn1fbjP/PSP/3H779/vv3Sr/zK+Ji//cPffnDjQfdAoLr8uY/eefvNLl5Gp/jm/cc1HVC2B29+l3/+E3fvHv/Zt37wL37v9w5C8+jNb4/jhss4nl9+7vmPvPHN/6FoXCyPx0Ff/fhH//0v/xu//y//i5/77M9846vf7Kwd3PJH724ePRp9Bf/2b/3VD9765nf/5T/j63UpFA5Pfu7TR+8//EYZnt6+4Z68u6lOXvrEZ3/rc9T9l//N/31xctC69Hv/5Hdvt7f+s//0v76ut9uRfuGzX/z+dx9YPNyugudCRcfL8y4OHpujxel2GKrKEyB5l1OC4MrSb0Yd1v3ZyeElEDksChfn521FJuKXhyE0TvDlO4e+lCfnb8H1m11KuaStCBnAWFjNmzlABmJxU9eDqfG0D0hQwMwTMGUDVa3qxgFqQQUmZjFDZMd+yg0k8KAQhEQtsB+VCni09vG7TylqjlsMs8PjxcPr9c32bEBrEIPTse8cm1TVYk5x022ulIRL0UvtFmdHQ+7ao7bkLGPUeLmp3/vq11dvXz1tmvZvfeYXb87mla+QhNAMCQl23QdGaGQqCsbTppMWACPDaQdEVYFcyYl9kKLkBNnnMqqAd3y93XzwOL56++Bv/42Xf/Le0C5/9uH51e3TE1Go5jWLkJ+/f2n/+Pf/+D/4dz9TMH/2ztHmenV6FBSK900u8MHT7cnxoqh9/WtPvvvd88Xx/KLfbrrNbLZoqoPUd2YKGpvgyfg6YTEATSn3db2oQjg5OLu+umybmXgX0LCUNPQpjRyYQGvffOfRj9tQ3bn7wumNOfzxHwOATXtxWaxIiYnqmUkBg139Kzkgc+x8FRyiOR62q4N27mSIq6eL2imUxtujJ4+qahbQ2LTvYtPUNQdzNk1mTB053m46x647Hz3O05gJpK5c0sIVt63fPsl1qAEUPceUA9nRwbLvIzMmkXlzE5IuZrNi1x7rzToZatM2uevI0AXKRRE5ZgHj07Y9mIUKSko5jnmzGaMJEbNjb6ZgWgwNkDj2sQ7ekIQQEOu6kTFllfPr6+Wi1iTi+Hy1QQJiDs4RYSlQew9FtYhzvpi4Kd/JBKy0VTAoxXA5awDUEJ3zIRgiViEAE4JpyTp1FBsWETJeVAdj2hIYqk5hIWJFsmLwvQ3iTZIO/YAqFTpMUiRZ5UPVaC63jucfffXuxeoiZqnms/lifrl5mmLvOEiMaiJkKZWYc9Gcy4hgkguCePIzNwcT56GqgpGgCwGMqjkIKqBjAgNnHsgpmWFBz96FEs15V0QuL3twT27cKgftoTvwuTAUJSNmhzbmnFzVeMYsScBC6wWhS0N3EW/eOOQCScvosOLgvfFoRbdjXGfFZtasuwfL5UeoakpxgHa+GtvlocE6bq88I7s0O6r77cY39fnm8REvOATnAmIZSwyBx7Ihiyo6jps6UOPdOkVwjgDZFNWq4EVyjmVPI8DV5TqAGzZjEjAmoQJsKLkSFmEduLiq6wczrCtoFtjWOZfROTY105ByLqmkFBVzaF0cEgzIBmWIYlZ7X4oioSdikuBwNgvOpSVBNfZNWxl7Eojd9aBrbMY6aFWFmId6ganLbLKoYaay4OPXfvY3j09e0DQACgF6r4iurpyoskNVc44oNIw16CC5Q9EwW2RVF1iyaCppHdvlgfOAFZcxYuorchza8/OLenE0P7mZoGhKHgKoSR5c5YgoDbEf+mC+ciFrMlV2ztQIkb0fhhFRAKlualAxpW039FeXoHx4etM1fNFt+s149c6Dy/vnN2cnd1+80zs8vOhnT6/e6eXSyiZB0zbo5iUNJF3WHhEdo8bRaw2JYPNEdeX9EXl/vZWwPIUhRFHSY4FDH6wTcg21fAj5uknvY3EmPCqIURE0QEUgEN1lhQAjpaJTTzGigVqOxTsMzKAqsZSoOeGuSYascqw4Yc3eOy4ees/Abr70iLw9L6Fi5oqdIXhIud+OJRCrM2MRdQYlaV+KA2ZmUwyVrxdVaOqiFpp5VwphDiFwscrJOPSaSorinCMktQxccCrTzjKWqKlkzFZrdKMAkYZghaMJ4IgCKKKJXFDoScciRlyZqKhkKYwkklBKGgVEh5j3OhHul8xTAxLsF9t7BWhak+5JItjrK9OSeKchwTPhZ7c63yEKz/SZn1qi7x6LezsaPLsicecH3ys6OGX5Tv9nzxBNEXJ7BuLZm0/GcNvpXX/J/bTHjz6sXcOdPIU7qQN3QU0f5hdODNqUX9Svtz/886/Fq3R6cPPW3Ttnt28dnZwWcfc+cnp4tAgqTXCBoVt36023WV+37ZxBQqDL66uCenZ0AArrq41IZnKOCcByMSvpzR+/o1FnBzPf8Aidr/y8bbzzRvjWOw++8903Lp4+ZE+uBghwerLoN/1P3nnz6996/fkbJ5KVeQouARAVNUhmqARgRSbtJuc8xqFq25QG50Psh6qutGhgJBBfB2YvEwQBjAjeAaJu15GM2AUpJUkax568h+bk/sMP+mwuIJBljUo6xbcTIhGoGO/qziaX0wTamH04qHYOQjQxE8CfzlNX+lBexA81xwmB2UdQ4U5FMt2btXan1SY4hqb3IGQi0km1sclwukeYpriB6eHPxCAAIpier5MHbg+mwcSjTXzTjloxpGdN84a7OKz9hTHlA+29V5PpbkoUAlQtZfKLETIaqpRJCCUkJJJSANB04lEQEB2xmsCOYEFCgp1I4RCs5OK9My3TF8ZIgGCihorIU8i1FNkVu6mw87skYwNTw8leZ5NsQ6ZqYLhz7amBmk7pUwo45S2aSEZ0zrEZgRGYlJyImHZx2mCyM6FNLXUAZJoVC0xYIu6UHXTAzITk0AjIeRo2m9tHtxnJpnMGu8PYdcepAaKpESMSShGVzM4hAE3ooAIZqoiVzM4R8U6Zkkm9AkBQACZve56Ip7J6QBNQnWqQJpsiMnEpmYlVQUoGBGI3RXqDARFNWhnuDLakplp0wjN3NzfZ5URNBj0iflbYKKoEKCZTfjkZIToxNRUD5N1CFaUIMcCUPg64GyQTtYmYcyYyZsdEBkjgwUw1h1CBGiLsOmpUEEnLRDlNH5PVwETRGRGZkeQyVT99KBXFYa3miH3JIwxbssFjFhm4UsAcWEQGlZWkK3MUVdjPARoT7bc98VKym7Unh7P5dhPX3ZWvMDS1gSEZgI4xF2PRMGOfUwas+2yzunbk4ritqqpqVJmRcB58qDWXEdBEMI8dhSZrYZS2ag2wgC7bIADVsiZPtQfkAii84K2USyKrXIkDJWkZgXjRLqQ/evmVT909PVtvnh7N6/miTkmAMVkxkJwVchmu18swF92uBqXZMkoah+vZ8YGfNU6oasjGcpjTvIcv/eyXx0/9NdcsfvTGD9999LAbn2zzNRcD15RogLaYHQMIUqlAY4fFSFCNYBh7YKjnvl+NWqzxbKzDaOgxZ42xj6trXwcpsVk0RTA4YRiyWi4DgszmVRxTHjCV4tEQOYTWTJBxSNuiEYqxeeYKzd84Onj14DBIGEtKkpGRHYOaZt4MBK4qKbrQVEalJFXzHhGcKsakwyjrbSbC9apzoKWkUFXVrCWA2KVtnxrHtw7quqRZ1SzPjripDuo7xtV228vsEB2pKrEHzUlKLAlAS5FsQ8WzmioRDxyQTDMwiZVBYSy2ybRSGVh8WzXEc6OA4BGQdQQTsAIkwMhUsWsQW4RWoXZugRyQKsdcTNCMEUxKsTx1WIMAiGeoQmiiSsm9c9Z3H5hebbdPm9mpW9xbHB9PV8Hji7598d7P/cbP/8k//d0HTy1VR3Jy+7/7ix/92itf/t/8vf/LVf8BHX3hO+98TXDrZpofXF9c0cm9F9Hx7AC6Mf/h1//EB440g6pe3nbbLg2FgU7Pbnzyj/7gn3/iE689evhgLM27q83/+rf+5v/5P/4/ufL25z97D9jfO5q5j796vYb2hfn1oN94/aFcXl4+vGaoXvrZ586ffgBD+uG3vpsHd+eFk6fdVQ9X6Q//5c+89Nwv/upvGK7/8Ct/Ub1w6JbhpddeWS/6fH19VXJcuPqgyjlRSJ965XMf/PBicXAmkjNm8wqBfNNcX60V8XqMbQwKrjlZ9I5z1puLdnz8pBo76rQMqeYWxnV/eY4xdmNszVCt8t5AQNQ7rwLTLdUBmgpOMW4IpRjZtIGCxK7sthrBGbExAYJKkWTAhoqmhIYmamg8Rfwzo6ViUWAxb4OnB915mKHjSrjtRgWwd994qz5eHh/XUFIZYXaw3AquStymITAHpapqHsbzF59/7eK99/tYQuWfXKycr5D56MguNu9JV//9f/zWp1997e986a95IjFQEeJp85H6WFAlVAhoWcQxGSIw7No8CMxQi7rAXUq/8/UfHhzRc/O7Lzy3aCoogxTg/+S//+r/6m9+8aNH5JbjrTuLYVh+6933Pvvyy3GQBw/O6+A20v3t/+AXFgusfNWQDV5ePjv9va9/uzm86bP7xc88d911N2+Fr95/46IMH7z/vuDg6pkI1Rh8o+v1NZCxd0PsKnYoSMie/MxXUYbN6roOTT+MFMU5V1E1xozkF8uZDt1mu7p745B8zX7qzgAAYOKcMzsCRAVzjAI09cWCFAMFUVMduowUkNiFOpaSt2XV54LDbO6XJ6ebIQVPocmOKK+2Z2cHb73+fgW1aqmCzyljgeBrx3yWCNiv+43mguSqBg6Wi9T150+ezhaz1ZNtHkrd1sgAyExMJDEXdk1VNTH2psUUTFXFkmYzSzkiOzMaTRezZs7+bDFTicOQgGidSicmoIERTSp2OVm0mCRoIlc557iqK3LOAbaM21g0K4E59uvNVgmSiCPPgLXz267zznnnR02hrsciMN2FiFLKokZozgCLODWRTMFlUDMlJkUgMC3F+Qpg4tJYrRA6EVVTds6QNKeSe+cNCJ4OV0/iChwXLUYgYt6z5oGYKm6ouHunp68+9/wwjv1qrJv27r3bsXTb7cqxH0rKKs5zHGIcx6HbaBnGLpYoqBhCyGMqCTy5edt4BpFsvumGnjAgQ5bITITonEuanJ+bQcnZwByhiRYBz9XVdT+WzfPPiatPB3RqXlPGkpmpbhtDAGIT16VSOe+QUbMjjJ2J5i6NK9Nl29w7Pm5s8fTxej0mcXVTHR0faZ8Hl0yFg/NX26vbt04/OH8kSZwHEalrZ7gOHqgJNrp1lxdtXTtf0pqQ6upwjJhtQGPIthrWAOworLabKjgtWbJM9H/aL5LHUYeUU5/FcS+STZkwA5gW73FBVcnZKjZTcuJbZw06aAgpSemiZSQ/C4WjoRplZ8DKRQQV/URFq4W2DsFlHdgH56zyXsR1vdQoZmsUULUcDeIGJGKSUCNnQ8ymMOfmuDp47dVfO731KckFIaGlcSzeoQskaIqkuSior+pQ12UcUtz6UKOSKEwgpEoeu3WYtdQEwQxFTMUxlaTr1QW7ZnHrViIbu8GTJ+JhHOo2OE+G1g2bqqqr1keJZsrOITuV4ioeupUCVfNmWpehaBy6rjtvj2bOzwu7y75Larnru/fePgr43GvP3Xzt3qqYe7hatGRvPrlan1O9yGYh1G1VxxgdeQCt6la9X6+6gGgFmGZiB11u/OxIzEMpJJBL25dTy6Y6z/3DmixADIyGfsxRSzazFMtkv0VQYPLMEpOpMQLYbmVdBA2JM9XssYxsDEWrUElRAiXEUPlikk0dk3cOzKyDcbS0sBvPH8xn/eYqz5pm08dhFOYGo6ZcwActqgJZNCV1TMQuizVH83pRG2JSVEXtkwKgp8oFhST9YLGXsTS+HccE7JicKqUoYpL7TWByiIquIFWgteb89JxHIQvQzKgJ6DyoBzNfzfzhcZ+vyTtu62gRRNggp+IMQhVSyv6ZrvNMucEdv2PPpJZ99M9+VfxT//pQ39kt95/pN/vF/96n9lMgEuyr02wy9OzrRp89dA80PXPF4f4pkwtnZ3Ob3n5PIE0rvv0jcWdx25tsEPb88DN/2d5arnvRCJ+90IdJSjsJCZmp8lY3ePD8jY8+/0qmtjk+wbo5ms2PzpZuisbVeH29RqNZ09y4d6NtZ8O2C8H1eTxYLIZ1l1JMuYTaq9Hi+AxB+r4jJA/OKjcMQxaazZr50TwN6Yff/+H3fvD9882W2DuHqmKjuSb85K2H//pr3/vg4lLR2MPsYI4FgYuqgZFjYoCpat0HLwJIwTFR41UTAzIoNyxlgGIpSh2qlDsfmqLCLogqEhEZaK4aXxSQKJk4V46O2rfur7776DqaFsvOo2qZ8BBTZWTY6W2EYIhMxIqEqLvFMBKhQ9gVaExw0D66hWDqDgMy0GdY2t7tNfFjYGCEuzcABDACnDKDkJAAyBB2NepmhASARIS7mGPb9flNkb+T3dBgpx/sh+YkGk5BQUY6jWQDJWKRDLvAgimQaEcP2U53MjOdCBdTnTxmOysYIenUHVYmBQum1sTpAxIhuinCXa1MosYEkAAhE6sUhSlnHXVSwgiLFAMkxyZCTIBAjkVEd4VchDSlXCtM4TrOIQAokHOAppOutI8Nw0khAwMAkYLM06mB/afYuRpVYMqX2uXyTNeVIoHKJJQhMU+ex+kokEhVzfKk/O1k3ClDSq0kdewdAJoRYOrVxJu1RUBVEd20C40AooWBiHl6pqoYECIZiE1h5DmpIZg5H8wsl2w7Essc+0lGEi1ECARqRrivOSbSImaKE0IINukyZiqlAICqIrEZAYCKmhYkx0wl56kjGIoiOyRCMUSUUpwngh16JiIEZmhIBGhaCk5p4VNyARMqIKCYmSjug7cn/FEnkW4yCRKKlemQRYsCARkxI5ho8c7lnKc7ngGIKQEAo4qwc4AuSwGYLggi5gmeJGIRAVMkoF1O1k9JRSleqzn2ofQrKdfMY7EYc08RDLJ3Q99tkowKLGBifh3zsOn7gQsAxjVggFJbrrzjg3mbNDK5mIfKBWFdXV6XVHukUgaFEgtbqKNBidtx3Hh27YJjtLpuFrUi62YsyHN2Tc/n7D2aqmiBqpkvby59aLD1A5GJWu1hKDCMmNWTxz5H1hCoypRiEVfVF+fDq3fuffGXftXier5sK0dZzVjH2CNA01RRIRvefPX5J298g2y8c++5eJ0aMh6XWsziMMQMUHyYteS3T7fS5OViPmyvXrv7wkefe+FocfKTB+8yu7cevDOMm25cAw1p7LKNitgsDlJRxw40FhhjTI4NOeQoKYFNVlgh6eKT+48b7yDw/LDeqqikAlvOQ9rK2K09U8wlE2OLLdeAueSyHRM5B+6g0IKcZkbVokSa+rtn927PF2DiA5dsoWIA9Vzmc79YLjuBwMTe+dlMc75eXyYpJReH3sh8aMTAxIpMgXAEwmUTpRQmKshdKmVcvXDoZ6dLq91qc3lyehQqLv2D2UFbKJaMMSbn0XFtZqVssvZCTZItWgAuIFGkJ0dMfSorsG2C65i6tjl1tnDsyRFiiXHFBIijlF6kF8gEDVdnwLecO2DyYsDsSlHAJEAKk3YLiqOZsKkjD64CH0Q51C0QUOGcxpJ6AAU2lRxyGser6Sq4dTq//8EH/81v//7cVwez/gtf/tgffPPBu5tH33jwR9/61qPDs3sruPXzf+1vx/SmVn/x+HrztI+zk6MnT87PTm80Q7WOY1lLL+Otdhb7gRs/Oz3N0gzu9FNf/LU2wFLK6/fLq6+89i/+3/9JSKtA8/feOl9fpGwXv/br/94/+Z9+f+jjJz/2Mbm8f7BcPv3g2oXlE9Xm1mJ+VLuLbby8PHrl9p1Cj94fVhff/+zf+dKfvPX6m288+uXf/MyD+++HVjQ/vRvC0Qsn3/iTy5dfOrn/zsPrcXW0cPcf/0WESnke2GXzY9IRAgzmj4+FoBriYTNb3b9fOatUx8vu/r++0m5gQxWAKGOUbOpQ91NEI0AtIxgwAYhNTmr2TEhFNIMhajBgZEdkYKjA5LRkMgguSCnRMgEyKDIhgZiaI5pC05gzmig4FB84qpBD9Eq1DOtBPRcQj/rkwXU9c2FwcqXr9fXsoCZrtisbJAGVujkhRRIHBU+ao/P338bV4EOIUpxH0ZIkPnj4nl+dL5cnAefvPH30T//wj//6L/2iFw1NKKoAKEP+4Zsf/Pid+3/9S79IoIGdqPrgpOhkHQcjKfrbX/n6Fz710cWyvtXMbty8/e3X7x+fNOIVox7X7m998fM/fiPVtw4zX947FW/00ZdubyVef7C9d/vOO++t3/zg6vOvLrSkP/raB2987d3f/LW/8vWL73/iZ15ZLtp3H116gLaC4Mr66QeUtjePDx6t3w1t210NRa5CG4AZtBiYWSYL3TbPZ82iPZxVQbqRAVnYKavmoR86wJxy21b9OJgkNbx18+DJ9RZNttfjbmK0mxI823HZwcXT3tmkyiDRVLYrpvN52Ky7WMTN2r4v6ycjQZ9j3czZgK9XaxO+OO8//YlPvv79H9auEslINI5DVEWyqvHg3AzC6mosxJmpPZyXfnCg9WGlEUcZHWPd+HHoVEgNVJCMkqABz+vZttsQUBXqMQ4iFuqa2Cto7eyF06MFk4gkA6ur88326XYbs9QVj0VzyofztvZVExhBnXOM2PdRDbzzm74vZoi4nDcpR9FcVX6TclNVxI4UmCkELypjHj2TQxhyUoXakw9s4ExNDavKY0VI2Ia66yJ5F3MJYt5XAqiGU7caEAMyYCpWch6ijYrsyIOYA2TvVnmzzltjSCIASt4TgYFyVdU8r4AD8Edfei2lPhdBQjJ0UFLMTT1Tg3EcFbHEDGqkhiJswYG6mVdVEH+1uawwHC8WIAlFWEQQkIFMTBVICQPCFOWIMFniRadJEzEogjKxry82V+Hx4zsvhaYpEk7iiiyPCB4hI2vX9SpoKuplOW8PZ7Mup/Pza3aWvE/d9tr3HBPIgPNZKZvN2IXxymcIPLverIv5+Y0br57OgnTLpr4GSKUw+HHbqWJFjhGAcNbMPJGWcV4tUxq1yMTjM3LSkZ0vSRk0kEcgVStFnA9asq/CdBVsNpHAqaqoFdNQOSQoUjgImbGmkhUN0cvBWVOfBgy4HXLJIg2go/ZAPbq6NM24uLpYl4vBcgYlEQmVAwNGABRxYXZyY3523LgK1SirxjHGTUlbzQpCVXVA6ofVZRsWVtw4rgzczHEoenb08s986rdc1crYgxmYOCbvnZmgTjkXWtUNstMySNyAqpJDshT7aS3SX11BpuawJZIxRka1WJD4arsNYXn2kRdHhBQHR857rym3Tc3B5RS7bls7z45TSYTg2LNzOZeSRzJz3oW6MSIVkTF162tROTm8gVWIpLHvc0rp6vrpn39jkcYXXr0TnjseFzMreAx1HTw6n+7rdy83MVlP6eBwDs2sCAlgAUBmPjgZxxRcU1WH15GDiyrO4SKJxpyHaxjX12V7TuXah6f+eLOYxdPTpiher62/jDlFYgQ1M0FHYmCEHEKKuUyxKGrEjMzRgJNelV4EdIrfQQS0EHwIlEGUd1fxOAogObTNVS+3F9fDyE7DAcW4RufFqBtKl6EvyGSiJVtu0IcpbZQUPGMToJ0pqAMkwbIdQAUq6lSrWdWcneimkdU2JZmsHUgISKKWx+SINEkC80dNCk77MYydXFwM65G4nd15DrxHj4iBJcMoWRgaMjYJpKmglCLGKIqaRYxpv2swiUJ7SWXPC+37HWxCPfZoDn4o/OBeHPpLehHsE6InOQifqTT7l3iGD+0euaNFflrB2XFMUzTw7qWmV3t2HLpzPdkz3xsi7hSf/SHtYZb9i+wfajur2TPJa49I7XK6dx9uijZRBDPzvjk6XNxZzI+Oj8Lx2dGtM+8qrwZ57HNZX10s6tnBfKmltC0entSbzWCE50+3ueShS5aiIzuYz+vlQRG+Pt/klI7OjuoKJY8llsXpsmmb683mL7753e9/74eUogXXztpUUl2TxtRv13/6h999/8G1JFQiZrp8cvEH/+KPj5ZL0UyB6tCKSBVc5YL3rmkaIFfVzdXled9du1AvDg5B0Xk+Pj7yjL6qlCsFxlyY2VRVShoVzETGojokicUWTbXeXkeFRyvqB1DaZx+bEbKB7AbKM1VOAQAJJsM34BSKRI7QO66ZHSAQ4mQtnE7Ks/qlXVDVFBg8yYk2jTLdY2+TFMg20T27sB1FBCJWZNq9NILta+535SRT5tH0arZ73/0BTGdcxXbHxjxtd4oKoeEu5GYHmBGRqQLqREAxsQmoybPrZR+wzmaTbKe6e5PpamE1VVFVZecBYEosMpsW/2hqKgoggDjFG01bZ2WaQyArKCCVnBy5UgR2Xw3tSsFwZ7vbfVuqYEhEYskUJ85lmvaZwsTRgxkx6eRxA1YthFPOAhK6yZ5Jz1g/MAQ2taninYl2s0VEsGnTAHcGPTMARUQinjrUpho1cmRFHfva1YwIhIaUgLPC0dEcQRh54gYBJtXP7TKvYBfzbGbkPEyF9SLMBAaGpKI7PYsJAcxUpUy+LWJnaiIFiSc5zKyYZmJCINVdkJmUTMQTerOLLEVWUJOMzM4HKaICRA5ACaerRnUaa4TeORGBHUFGDI5IcsmIHhGAJvckMrEBSBEiR4imGZkQyMBscjkAMJECainOO0OZwrdwijxkN8VmSMmIIDAhgUjEKlnFFMHUmP10vifCrohIyW7a7dAC5Aintj1AQt3fwvcGtPhUDYftSNqBrJFj1237futD6+uAuhn6HsFtRomBo1pAyJnJVQRa136zGYg052xgTU2t822FpjU5VCjzuh7FLFgqJatVAdCSQmGn7FVJKodHxzUjeoBNSqFirgMotVSpZYsZ3ezo4Fbl5x6jaPbeWVl579q54xLKkItoVVWGmraxSGLkymoqzfHyzuc++QvOlzzGys/Pr87v3j5q6nYEYgYkXD+5mHlPVq63F6ftrAK+eXjT4hYkxbzRiJZNxVfhICyXm0ebRTs7mi2uu2vmrqTR+vHekT+Yn91aHqOrZofh+vrp2z9+I1H/NA6PLlYzNAIc8lYt+jmN3VZ80x6LbkeRnsaMZhZh3Io/8JZy6gklJxmoMSQf+w4pmKmoGKgLXNeeS1ZAKOYdcymLZtmPa4ViQM5XB7PmbDaTMZqBryqjCa7E9WqzqCoArLyvm6rvOgPMkOcLVzSPaXQIVgoBoHMi6sghqHPEjO2sGochjqlP5jxVNs6Cv3nzIOeybBf95YaPaNh07GfadNScLOaHY9yoec19LONQLinMVIOhhbYxiwXXUApACRVL6SrCCg49zIlniqq6BS2AsZTBICuWbCNzQKgQ6sBzMzIEAgXL7BgIzIzJ737ksWZglV41i2SiAsxELtRLtLnxYKnLkkrsqkZUxO1+XaC/2MwsvP3Ok8O7J+dvXW/L+Wuv3O2i6eot8duqhjZvtunorT//wz//+p/cef6FQyYgSOQ36w2qxM0l+ZYcDtvt5uoyVMtFqA7m+I1//f958c7NVz/+iYc/vv57/97f/OPf/53tW9et4ydX6c7NW+f9gztnp69/7Rvjo0sSe/0r337hkP/W/+Hfvvzgf1yEk/nR8e/9k//it/7q5++FzRf+/c/+2R/84OKdzZe+8Jn7P/zmWRi++JmX8/nF97/5QHP3b/36z/3w6+++/idPjk796orPV48rh0ez+tbp8fpyaA7aq9zN3KzLxjfueDefm9vc/8F48QhXmytT6tM25oxESKEIKBAYE5laLhICTbd0VWMmJAjschFAnO5UyKSGUwIcM5lJESXk3bwNTEqaXNwikzOZJ/DSkzM1NGLvEEi0KKECKJIgkqljNEerfhuLokFgjJIQStP4pm7v3rzz1oPHR3cOnz69ImShLVYUZXT+UAVKzABDCy6sbRgyGSTJ2DgK2K23dVWVARJfg8bN0+pHSfNXy6985NPO+/vX20+8dpdYbp4eX15un1x1xwe1CxRHFZL79x8fLmbLo4YFLvv49Tffv/3SCy837s7BUZD64om+fT9/6gUGhtnM/+Inbj14b3j7/uO/8leeBwAz5CzvPLp8/dsP/op8+vJtqbB68wcfSMY/+t0fz+30z37/+ua9w8ODxZi3924uVpuuH+NiwV/4q5/7R//w627uQlMPYycqXqrSpYrcyeGNx9ePK27UAKm42iHBECMYGkhK62HozUnwlUS5eXBaNAfsm0X75Om4uVg7woO2Gfq4m9UrIFKRAoClFG+7IEMkJODdGoErXzdABLkkid32en50kIf8+Gq7bNsSZZM0duZMGDx59+jR+b2DAwIHwMyqqFkjcyVmamKavcejowVXgcjZOL7z7pttu6hqJ0sOXSm5R3PsfZc2i3BAkOuqKUVKttp5NHKOHFeiiQpnsVLk5sHieFnXnkvMRZVdeLy6utr0Dim0fLhs+268GtN6TBsoL7SHpqpqofIWEFCDY55K30xUhJlBzTOiWNt4FZEsgxgRi4EBNr5ebzfoK2Ziximr0sRSgdo7MxXDcSxNUymRc9wEF/stz2bO0ZQpqZKMBCExI7N6ZQcERR2Rql2N20fDqjghQpaCKiIjETA5B64qeuTbVz76MUm5u+6bpuG2Oj0+ZAebqy6r+doZUS4KxRgdgJ8vbuU4PHzy8ObJ6diXcZOwSKgDQgnOGq7Pr88NAkDx3imyIjOHkkdQrV0AwlLANHg/K5aAi3dYslgElvnm2p68e//kzjE6r2FWzGuaIifJhyBDZl+xCymOKlhXYT0OI5ZmccCGq8cPyUuYU8rKsT+qWigxxs1seXvWBKSZ9uM4yDb2xtIswjBaBqNpzq46n837vngOnjBlAUJmx+RStlnbjMMqaprXNRuaCSB0QwzkZnVdrFBFts8tEVUAY+emOiQGJPYOg1FOaShF0CiNualdFaBiygBtU40UC1moXUCrKk/FjZvkCGwUKin1RYtlLURAZK7Ctm5nB7Pm5MwsVOgbpXx5Pw4xFT+UMZAnxqim9VFPyxkHVvXObtbtEZ188Rf/5mx+kvOAFitPimzknXclJ2QEUOc8h6Bqk1KKMG0kJmRG1bi5lJLa+VloQ8pbBAvejV3abLdAzeHzz6dQxe2WFcFMUpnPW8kmUYZu0CLVshFVIgjeS9Y8jmLgXJCUna8MTHKUnFM/KFIzX4BzSfLQdaJKyVY//FHYxrt3byxvHJWq8otZSxz9UMOs0iPBPGziT1bXyYWnVz3OZoxa8lB5rVzl6iMjhqED4MqXPj1pXBsF16sMtJwFroc3ZfPmvOm8v3jxhdPQEmIKHK67ki0iqieAqVE7i/feSpmakYKjUnY+AZ5k0aQIWoXaaitYTE2LVYzgLNRe0cy5XEwB2IhVcCxXD7fL48qpisi8DXhdZsgxio3SEJspIrGrSMERigI5nM0qApNxFOLiOA/Ji2nOjp0xu9lS2zocHltz1Z1fhFRQIA4jGORp9ehZQLwj11RSOSxxuLwuV6u0zcFb2/U8r3LBnLFlBmPFmmZHClhoSgYWZ0jonUc1KWLVszrMZ+LPs3SevRS0M2ShwbTrvTN4Paus36EfP80X4V6WmVSXSQaaXvkZnbT3Ik16APzU0z8EnX7aevbha4M9e/7uIPCnuKEPj/0ZGkV7ZGjSCJ495qckrp2UZBPc8swStdv8NyTExvvP/8IvzVBCPasPDtVgu1lBSiKjr2bL+WlAGsYxjVHN9dt+HBJ5p2KLg3lTBbX2YNnGflxdXG6G6MQtDudjv0m9zJqwPJiNUb7yR1/97huvu5ockVIZ1pscMzB+cHn9xo/vP726GrLS5NpRVdER8L/63T8EQ+8coKsCay5142sXKu/3Ack0KTc4OUzU2sbXwTeVb9u2ni/qWX3jeHFyMA/Bz+aNny18u4hjd3G9RXbB8/mw7kd73NkmMZLRpBoaApLqHgKzZ51lCBP3Q8zm1IDY8fS7TUzAjIS0i47aReHhs7vxPl0KgJEBAHedY7Bnf3ZYGZpOgJEREzs0RZiMWUbkwGySnAxsSrraFZeB4TPRcHeu/9LI+ynD2UQJTSqJiunkOFNV2y+8VZWJdTdZLog0QR8IND0SkABURZlxl/tjViYCCwAQ2TkDkJIBDGlKCzIknDJuwLDsUnRQdSecgYHsSTciN9V4GUzhmzRV/SGhThQJEe4Na6UUA3CIZobkJq+eAQCR7VAfUBXcXXOTbuWnOM+p4g1hkl4MdvzUzrY5tWYAwq42EFBsStthFYFdSpEpGDMqoIqCggMCEzRFNFUVMPZOs3Sps2kY2JRPRGCGTLZrxRNiN+XKlZJhJxkroyOiokLsRLLt/IPGvG8X4Z2t1VVhSsOSksnhZCxABgQsORM6dN4AAMSQCE1UYApvIp4+/V7aJgBSU2JChSnIfboMAMGkIHswKSUhmapNbp/pS5QiSITEmtVAxQRxwtx2W7NEVERFdyFrU9kuMTNxTsn5Wk1KKewYeXetScnEJCaGtgtasTLlZpgKMT8b5aqGCOgcAk1a1S6oaD8j2klFF09+Ujsa+w37XNI6576IFgEETZnbuqTNkEfbdtkxVPMKPS3qSsFHzQg4b2e+qpzzOefWiQ9aUTLhyXs1X7QybNtZ3XeyXLRczRCYPKUIoTpgxxQ3TZ0g98v5LenDmNZduW7cWVPPC0q2LZInz6hYSnl69WR1uTmcYd1An0fg4FxoyDEAykg6qCHi0lIDevjFX/7N49nSZJjVVUzu3t2XyUopEREBRFK+cTDP6/PNk4dH7QmJnL/7dPbi8Ri14vbJ+WOvWIW6pM3FZWwPbzWzBgTffe+R1vbRl44fP7hOIllNR6tVZDTq6iXjJ567dXrj1ntPVsM9bZqm246Pntynw7Y4uTi/eHr+4HFTvf3Og81lpAo8UxFZjRJd8ayxRPbqK1ylLTMxOe8ZRFM/llwCAQEVS0TIBfKqq2v1hF71/0fVn/3ckqXnndg7rLVi2nt/05nz5JxVWXOxyKJIaqAFara7JdgtQW64IaDtto2GARtoA77xv+AL2xdtwDeSDKMBNdBqdEuyJooUKbJYM1lTZuVUOZ3xm/cUEWt439cXsfc5WZnIBM53vj1FrB2x1rOe5/esY1bAYbNtPd14dZ42ayMKTbAojfebzdZzc+u4A75g59XUu8oASswhNGDcejeO/XQLUbIiAkqhCq5yBFQK1nVXVbWsRu/kVz7/Ilg6X6VZ3ThHx7cWyz5W3WK73FbgqqY2rA0ETBwgqlaeE0y3AwHMQEYWPM2kDGYt+/ueFgAzD0kxIoySNyWtRHvTkchZFZAdYOf8icO5o0r3UjwAgYIUQyRmZwaItr/xk5oBGdooZaicSwmAW2iO2a1AVymOznn2tdgudCAhf+Fr9249qo/b+Z/+4HyFkM+vH350eff27OSFKjSbe233H/6Hf1yRu3Hj9c22BGe5H4/aw/Wg237jcw4N+sUMLCDwdjsYXASwRXVvec0/fetd3+D3f/jPrF9y0Xc/HVJjv+jz0csvf/M3vvTjP3276qCM+KU3X//w4Qff+sn7a3UPPvz5N25X//nf/lv/6ve+BU336J33xqWO6/jBWz9+9eTk//1f/8Nf/+037nf8sOJUmu/9yeWTJ+5seyMzV01Vs61Xq5SH4Rru3Hnxk58/Mqoen54Tz+DBY7u62KZIm1SH2mcEFTQgccxT/yKaAXpKqRgAOopQFAyYyTEiTR0a7DzxBIVB5wNIQVU0DLBjrymyd1ykqAjAruwpp0JAHkMgVoioqkmYlBAESAHRKBAIqELwzXw1LNnITJ2xhzCO0hy4Vz5/++H7Zx7ysLl46f589srR+G5enffHt4/X46UaD3EMxN7IV0xQ4liAmNUFQMriEHIegmkqqyBxfnRjffno4uLqC1/50uKkOWi7f/S73/9w9fAvvvEqO/rCN+798Z+884XPv/7iLfyj7//wzc99abkdosDNO4fjaskEX/rcF84exaOIZdVfbuRv/NZXjzuZdQXQLi/X3ezwzgvzs2F1eDIDyE+fLo9OZrfXNb/yuXffjh+fXf/v/7dfPru+JKz+zn9ycLGN7/z4/Fvf+kl3/z968013cLshKuyhrt23vvUjiP12RVSVw8Xx+XhJoUIsY7y6GlFUQUvOpXLYNUElOqU+p64N2zyyV66wbpyknAqlHA9vzNu6IpZZ7a76IY1J4s5VNO0EEhKSqQAR89R4upsWIE7hdTFENuDAfpSLOSmAuqAuKAd0B/OUUn+2XTSImO+/fHw4w4OTZn3dSxEzqsNCCKDkIfZJJFQUnLvabN586VXkYbXZHrW34rD1s6o+DDXWJjGP+ejoeLPN3gVEQJBsMaWhWBYTj46JVKEoMNpB5QOTiOQxA1fnq/W6H0lxVtfzJjhGZM6VX6tm1Yv19qRt6yqwQ+FpxqBmgHVFlgwpxhjaZr3dgiFm1ZSc94AQfJCi3nuQ4r0zZgYMnodh9OwAREgzGIgoKEDpfJvNikTT4hzp/jYMRoQkqgQQ/K6rFczKMHJTDThc5E2CAoooIydx6Poirq4c+gpdy3z31o3g8Wq1betOlLiut5br4tbbdRXaOORhzAicx1QmRgJlsDyrF3Fj2+toCY67o7atMW87pqvtmYEJKLsQS2LyDGCaPXud+IimxJy1KBmBGggaeUAr7LmRrMuzLdP25r0VY4T54dAHTZjiFg0rF8QKEBbBlFPbhTe+9Mp7H33y1i/effnWnfbgqArhoJo5rpvkrrZ91sS+3WyvWfGwO+i3/cZyEegCzbDkFL0Pdb0YxZWso0YldKoxJfIujlsiVhnAtFijxE1oRQsRDH3PyBWzma7HbVV3RDoOu5rwqb88eOcYyNQZW1EDKOZ8c1DGaCYeQjIbojYZneV2djAL9ZgSEjuHoWu5sFVD1/HTsxWsLYEicSpSVQE8FQTvmYqk62vEOlMosVhU08qHEMIctRTLRUYboaxJVGtf6qrLffnqb/31V77wGzFlzTkQghYAMxVTNsOSC1cB2IOoDNs4DkTBVw2SmWQDgNhvzi7r7lZVVbHfhIqaulXDzbCMo7v3+dfM+WG9yZteBNuucQxaYOxjiamMOdR12WSxwiFI0ZyjZmjaGszEsOQc11sgyn3vq9l8fpDL2Kd+HHr0wbn20Xs/l/Mnb7x8Mr91RCeHs7svuraNOeVQFndODo4WmYMkJ3/2g4tar3y+GpbOXFlf1B7RsdZB2wUjxLgsxFV91McCQaxqrq5GC1H5gaUnN45rf9Rej1a5YEZkmJXZBS1rEjNFM2EE3i1uCxOgQhZVQRPVqfbSEbMrpkDQNA5Vc7IsSgbkA6KAo6rh7TpB0i64VPL2KqsZFpGUscM8wOp6m0ZygDXRMKbA7JjHlAGAmW1aapUERKJiaIgoWIZhma1azG6GowUeHJUCzlhy2p5fgGRAkywiBoZDGp0DAlZJaXmlpHpxJautYxqH9fXZk7pFg66mJiAMqptsoWk9og2ZM5ggqKIBk6sIUU32m2dT0ma6SNHeRLTrmNpbdnZ8a3su0cA+5YX7RfbkDfosKhie6zrPRRp45iLa/Wd7mvTzx03OhL3pY68X7HNquLN/PNeonr3Is5zZVOi0e+xupb17NO2qixCeU7vtMx6oXTIOEJ51nrLzoTtuOBNRTuniaukM2PTWjZuimiQPKTEqkjhs1utN3bRHR0eh8VokJZExX1/FNAxHxzePTnC9vBBMrHrzziGSvfX2u9/+wU+wH8jRcrUtOR7PAlAe0uZHP/3F2flqGBGYpoOmWlQACGvvDZiY2TsRSSpKILmMRW3bI067Prv6JUQkQxFxK0KAqgq7cA1yxYSmTRdms7aqu+7goDvoXnjlla9/5TXVoW67H/74w21GBXWIRZRw8o0YAU1Qnamu69lSGfaHcT96DBEcs3PEzJOnCCbFaHIoIUxjzQz3jrTdyCOGPV0ITJUQUfUzJXbI6MGZqJIjIgJiUJmGRtnhA/ZjfDesdnyZnRwIz7BIALbzzk7Gpb1QSiIT2WYan7QbRbswmxHi1E7FRFNj+v6NkYJOBhaRorAHHtmO0DWBeCYGM+zsSGqKoMquUjMAZXZqajZN/ICdA0BD2xV52QRdNgDUIvuV0E7V1bLjZO/je4RIiJNlCYDQtBC7HW3MAGHnENoF5HRnP5r6DadWQdwF656lDVEAJq8TIggUQjIAx2EaDkQOARTUEdP0FYQpoAeIwKRkCM6LJC3GBhfLU/YORwMwJp7QD1J23i7Hbnd0d1chInJWiqkKGhrI5I8D0FyQEJhUMpFHcJKLcw4mcWZy4BCDgZYyfU7HbqJHM7EAm2QjQsPpJwAoUogIiUENAaQUJBIzMPOuElSVQo4QQc0mjjkYOGoMy85xtZPjAQGI2HsWyYhEjKa7grxpPgYIzE7NCL0jzDkBQlEBRClJd+5NZGI0LTkrAE+5NTMQRZxq+zIAEnsVmdAKaoIAOx/WJOraNOd/fh3cs4rGM2ArZViv+5jSZttXPvRjqjfSzBZplfpVGTexH6H2QEquaVRQSVQzIjq/cBzEpG6DZFOL5GtX1aK1c634RdUNub9SYFc1vu4q1yYpaj1UlGUdTEXjoj6S4kUlUxPCzCRkWxdBpJq9L6ht54aNRumb1o8AEsd52w4xB4c5pyyiFgGVpZIyC/zy117/9XuLI0w9YTVkuXOjo+BjH1lxs+zbtkp9wjzk1enFo593oW27A2iPkwNl9mH+yutfXp1+tBxWmGgzplth3gRHLPdeff2tB+/96VufqNjJjdniaDGseyRu5gfs8fz03JJsnjwmo9rSHMqNRfNa+xJ7vzg62NxaHtz489/78fd/fPLxnx199P1339uOGciRdxmdY1BikUiGVRViip40pYgmVHkGjLGoKjisWs91sMzbKNt+W8QG0IyeR01exz5OanfKAsRlt8etpd/WKEnVgdV1VaQ0R4sxp80oyM47SuNIJsbISME1Cli5KseoAkCsWQ893zsKLx1WOuBmExGrKBuAAhC6rmvcbJmiDkPKV8osksbSs/dgVXCHvrpT8cJ7Vihk5lwtJRatQrihSmqQYTCLIMXKOKZVzGdGQ3AtxnkIR468gSjksYyEDojEREQQPYBHwKyK6BxPHYoZlJBa9YLaYAomY/ABw2GyakRGQzAGmLBtZfoWXG+H9957mrbpjNZP+k1zfJy3PaH7dJU/HvOCFdPV7PhQi/Sn2/pwfvXklJzWKHdu3lpfP23bo5yHHNNQYhUa57huZsVsceto7OXJ1aWqQK/zag6lUMsnL86XuYDhe08efHB1Eb3+ypdf7IfNxerhovlLr7z6zZ9f/MHpqkju3fzw8MbR9mLLndGt+q/8p3+7Lf13/l//9NNPNn/hf/qV+s3P/9v/9vc++OTq6WZ576svvH5z/h/++c9ObiywJlQpun3/04u2roeLKzeWQ09NobROgVwxpEgay2RIVLYMJirITIzFzLwTm+LZrMWIvNmE7KcCYtP1yMwTmSoDTk0RTm2qBkMBIRiLYAiM3sgjMdroiEBU4jjFtV2NoiqkoowGDZGZDlmzk41BCm0p4/GiNXKz6F++OV/Jo7i8WMzd3eOby21/87BdnBy+zU/Z0WbbFzDwGIKPwxay91W7HgYDAGRDiCZsJQCE2q82G3COh9QPT8LBSX08f7DZfusnH54+7Bfz277q3nr65Pf/+Acn9259/NF1akNd3//i668/OH360hdf/L3f/eGnZ9cndZEQXnrjBPpy65WjeXPjo19cHS/S8YIQIEYJdf2z9z9qwknbVb94fNagHhwt3v7oYjumz79y37fbz3/zvrr+YO7nVe1I/9W331rV43/2X/yF733721dy/+vu1RdudobsoPrdP/7O4f3DKBcFuF/1R7PjIkkgsXdjGl1wjCCIbfDDuCG0zWbtgt+pd+QQbegzAI5lWBx1RfLVZfQU1EyNhm1K6ZlUBGbGTAAF6Nm+3TRFn2B90+ypIKFzruTSNh0BefRdaE4OuscPPp7NuhzHUvKwBVdzVDAvm7Q1Bi0gimRoqm3daVWG64t2vmCo7t88bHlz/vSTo8UJsQMN7WJWJQ2i4zYXtDFlIjA1sExoigaE6B2UUkrJKXviAqCozbxbr66QXFd3y2FYbcaaORz4w7ZZb/pNKcXg4HBee391PYwigpikpPWASmjaVFWb06YUNMs5z7vKgQYiIlKzqgpqKGBiRo7GHBHRjByCSZnClMwIAsREnj2xiFVVW4oxwcnBgYF67xOqmqY4OB+8bwwcEMUiWpJoYc++pt7607xMHkTFgStD9ByUmOrWN7NaqFZ46e7d2Wy2Wm2R0TfVsBlvztva4ZBS285BcLndgAtIrtBoCGSU4phL5qpO61SSq8B5YMkxqK7HuB7HrpqjEbmQRAiZ2eeSCU2sEKNiAeQQ/KgbT2igakIqgeeAboyqrnl8ttLw9OTwMBkm1zn046jOiJ1PWcBgamZeXm3ilr54983VcsygbtbRfP7oennzpA7NCQx8tn50++QAJIbg6rk7X2/Pt1IS3YDmZE6Ok1oBSIK6lexBG55J1rqqCVPGqX0EAvmxZJ1oUAJiEJpOiwHmnEtTVUlEs+ybcUC0EPghpbriuvJgRgREiEyFWZirgHXlNjr60CFx1QRgB6h1xWQWAqtiA5ZLSsveJ1sNpRgZIjuHCIwwm7c++BgLyHoWSqhqoOAOThRg7HsTK3EDlTETi2iMgsSIC6Qv3v7ib//635KUdNw2IZhCycLOsYFZYs+IFTUNqGgZTAYi9E0NgGhFhtEzDsM21M3B7ZtF0oTSkFKePnkyRH3hjS9Q666vz4dtX/uqrbu6ppxzv10CInmrffCVy2lIw8hmSOi8Z0fJJMVBSil9ZO985f3h3IyW4zbHkbyTME+Cw8Or4cmnr9xsb94/SEjN3Tt2cAiMHtXaSsYIyCc3Tr6m3EL88Sef/vjqgYlBNWMPJP3QrypsCzYIBSgotMvlkAZhuaLVJZx/kMLVwREdfXHWNlV0hcI8GxpwSWkzwmqVJYsqqBEAquioBZiLmAPeNXMDqZklYcKpEGhCX0sWExNFAydqm3V0FaEg5AJm7KhAdg5ThPFpZgFn/slVNiTmmgOW1G+HcVaHmCICgFoSa+cdOBaiLFobeMKSUxHNw2BSLMFUpIgG1o+y7MsmSsygCo4lT0VIk88CcxFdbXNKUBHlpKXkrAJU8jZvrpqaA6LEUsYh1O1KSyAoRYJa8JWlUVEVVKaZ4r4Tdu/wQdtRWXcuj/1e9HNu9N7lg89/uGMKTeIL7Jb6Ow7KL9k27LmbaG9iws/kdfbvxPbizmde/jNv85d8TPsyq2fsasS94rM3qHxGoXoWM/sla5EZ0C+l5mBSr3BX0bVr0jIzo6jOGYwpG1lXt7WvFXFMpRhsczo5nB11B2OfFjdP2qpKQ4pFzErTtAddnWOuAm7663W/bFzdHcyR+eOPH37v2995fH6aFE0sjhkdcNX89L3H73/4yXK9AUAGxACMVkoxBeccO5gqyafqchEF0ZjFOxIzIFQDJNKi03qVaIomfYbYvR0UzDkmJjRFIuoRToGZu0VXNbOfvvfJOx98cuel+1y3KRoboanITnPTnX5ogGwq+Dy4tzOimNqEa3HICEiwa50H2It8MPkYaNLjbC9FIhEBwp4cBAYAqhPEGmESSiaR0pCQvCGbAhEaTAVoMg20nZADE7x6j8cC24Osng235//YVJo2MWgMVFXVgMgAEHlyQk1YIkOkScQBJXKllJ0BCgGmi4lZKSMRgoECT/4OJAJEJhbJaoAmZobEpgVgItcoIJBjNREVBNP9AxFoAoJOb15KIWJDUC00JVWZkXY64tTfBTp9aCF2YDRhmJD24hhOsheoCSKrCiIBook8E43NlMntvk04VWvRhH/XKZy1QyuZmoJOkT+ZqNWishN5kcAUkMRUTACnxjZih7M21I43Y5JsY9Kc9cnlKdEOuKSiz7NsRLvDu5OLZbLLSSnT5wAEVfM+lJJU1DunYAZAjqejwN4hEZhlKZPTzpAUCjsnUhCAGMUAAXUqtGGcoEiOHQKIZCaC3UZNcbTTvohpYluJ5Gn3dRKaS4lI7ELQ3XfFRJSQduFmQJGEQGYZkMwmqY13I23nJjOTAmYCtPtqmalpcAFQCJ0WEZ2Kp21XOQdIhCKCyMwu5SGE2my6wqGagBkQPPtiTFwndgERS9ltG+ykoqvz0+BcH9PF5RAjSDYHvQDcvFXdvvcySxw3vu6AWzw8WORohwe3zdi3wSp2rvb1rCilYU104asBkEM1t7Lo6Ia6Q+FqsMG5Y9+lolSHzglxGdn1I5lxkzC1NKvwtknLfMmO2QNmiHlUCowiaXndn2+4a7uT2eKoT6vtoJWrejTq2nHANvjt9jJJQux4DGrhf/Kbv/3NVz9f8Imxzg4OBSI3aHmsamdFm85hCLXkB5/+6EYLTetu3rzx9OGKdBW62tBW261vmwhhNSyNKt90MS4XjRs26c5Ld24uqtNH51gaLpTWG4q9ZiTnciweRWsuUtQkONkOG6szZt2cbzfpKqcYo7/VHPyVb/zGb33lm4/7q2Lp3ffePV9ef/T0yfV6k7PGgUrRdZ9UctOyQ8lSiBxLQFasQhItW91GdW0bHHu2cdVnLW7WqcTX7t92kiE4cm4ck4giQVUHysrKXefMghUx07ZuSo6OKjI1wop4ZA9EABCIJGpSCaEJoSYyteKC02tJV5fbaz5ezA9P7khOiJZzRiTdbrSSxeFtDYc5JfI5pSHnS9QstnXNS0xHXXXHoAiCc5VaAaQaPJgxm1gsYiY5jac5P455mcuAzI5rB4dgHaibEHUiBQARB5ARFGGSioiMayRnWCMwUWAiYNO8VSMgKik7RGYNvgY+RIXgQ84DOrefF4Hj5r2fnc3a40zcHrzy8ueOn77/Vgh4cPTCew+W1dFRXH7UHHT9ZjuOg65guRqq2h2eVP3y7CBAIBvH5FIQLdEjessxy6aHutv2NvaDD+nw4AgomlLTwElo0Db5cv3W9y+GEvJY/ehHy7/85754/HXgyzQ+zLrNX3jzV37xk38lwZ4uT2cEr71xN89mP/3F9z7+0Xtf/e3Xt49P3/7OO//2naUfORXyyX/hlQOLSQYYttC90J680nzw0wff/J2/8e7PPjh54ZXLs49gPQ7boXOcU1JCUeCWFKGoKgCBMTMRDVIcQnBBNacijtF754xLTkyOCCsOwBRzZnYxZ+/AiIBZgVSUgdE3TBx12KKKSqXmQafF4bQ3pME3TbXerJ2vlZto3qxuTdnSOG4qF7JaXzYDM1WNNgeb9VLVzfzRl796++0PP/BKVxdXdTfvV+Xio6sXvvz6p+98NG6vkpGMA6+UzSwHiTWUKAxIlFVTycwCqmbIjpFcTIWQtsvljML3/t2//ov/i39wdPf2g+Xlo/ffIuvxYr2M+ahu2cb16vL88cWP33rvfH1amb791qefe/P29YMzOshffuVWYCsp3r8/p5IA/LiNs3nnJc/q6ouvHfWSf/Ho8cmiK5sYN+PRcTfI6rU3Kq4YMzhgsXJ0VP29v/Vb/7d/9Ec/f/thV+onH58+eXz9H//2105e6B6ewacfX909oJg35gobDf0qBFwNV1VoyXiaTjW+DeTHqEKoVMzR2fKcFStXbVa5mnW3Xrrbby5C0xMqYBsTUSAXNJZNd7CYvgXMVDKoGCiA7O5oU9R/NxUkRAAfPCH3Q2RUT7y6vK6wKtuh9wTs2Fw/luZ4HjSnKFfXI3nvxRHTgCWOA2i2AMTkq7BYHKAPbde6KpwcLj55b3M8u5msF9EhxsDIas28Ww+Dqc6atnJNVzdPnz6skJRdKqXxbjsMtffbXGJM7ax5enk+cw6QL7b91WbTdHXNwJYd2HZMORcEDwxGOq+qtuJQ+0Gg3443FguPengwz5D6PrrgpY9IZAgW+HrIDbubs/m672MuLGXaXmYfTJUdKyh7poA5JudAUxElVHLEdd3WjnLO2zgyyBDzFBQPzhGSqYoKCARfSQEH5JB6Gs77ZSLIESBmZGLfDAXRExK0Zh7kzo1DBFEAcOCcE9HD4wMQVQHVUlSKqJpJTkJFrSjYMPZpuxnHIcdhXKa27iqEpqaURnb1auiDmxVVREpZiL2agSVkUcnOAbHPVsyKAAffopYsGjyikWYhZ+BNiH04+vTpqRS9/dIswUDO5gfNuBpTEskApD6QC46K5ZQu1le/8vWvv/fJL56u+uOjbn7jKKq4Chc3uvr2q+N67UHGNFyvHt65NzuQxepiE2BsZigMq+3gDKvK51I7osPZ4fnVEzMvnCtfZRMCJIIhJkbORXwIqZSo4gmK5OD9doilFHjW1g0wDgNZyQjkalTzja9CQwACBmBUVXkopkrAQ9IWmrqtsQmQxrjZokDa6tDnoS/D1eb6dN2vkpoXFS3iPVsRdBC8I0feEZJi2hTJ/uge1AtXN4vDIusYt6eupkbScHlG3le+nkG66W/+vb/3fwptt+2vPBFIiZsVuYDkTQXQnHPovAqA5jxscizt4hiY0kTycvWwvVqeXx3dfsVMyTlSLGnsN9vLy+tXP/cNrMLZ2RkRzpqmm7UgmnKfcyTgqq4FFABKGRRyNa9KMVc5MEtpTJK8r0XFNR2wE8WkBQHYsW9ngCCl9E8ert7+6eeOmkVHWrnF7ft4dCLeI6ijmgCyWUrqaj66Nfva/PO379y8+2Hz3fffehq3Np+NfaT2ToKTqjoah012h2JzWX9cx2F4+qc8Xizg+v6tujvw4IfqsJtVB8P1Gp2AYb9a5c2oURFYkLKVyjkpeeLoGegIVGKGbBNL1wCIKGZFNM/kMTDHUSWLGaAZOYA0qsFYBUYHisoOvXc5U0oewY1j9GTZsK5w6LdkxoSxlGmjxDOHEEbEwtjUdWDOMaJIziJZypjZE4WwFZtVnrSk9TKvtzIkUDQMagauch60SO5LSeIC+SyMUra5DNGymUiovZYULzcEAXyGrOPVUhj9YVPIChTPUCQiFgDKpZipcx5hDyv6jBDzmQTYM9sF7JbZvxzZep4G23GHbQoffYZp8mwx/kv6094bBBNEBnEvDwHspSrcvbjtRSB75guBSaMw+KzE9OzRvyQz7fUt2LmiPqMV7ZxDz2SDX3qaZ4cEp+Ac7gy4ZKa5lJjjom5uHh9eLc9Tr211QL5qfK25nJ5tgnOzedP3pWIvIiE0nqocY4lx3K5Dxy+/dHu9iucXFz/58c8ePP6kjCUDmIdm5pXjB+9+/NGD86EfGYNDZ2CqBSd0DQC56co1VVtZkWzqzZQZvWMAxZ1CN3lSeJrzEhIawoTbQZhiRNPSucQ0la2z0nRu+tWgozUWxq0yHsmgDgFRyo5kjAaIUIBwQgxNh1pB9ydSAcDQdntOk+QiIiUTB2UDmHpHn7HLP8OHQqCdw2EXUiNCMyZgsamuSwB1akMjJjAmZjUCBTUBUEMj00k12p38SWDZPe1zuvIULnsmYE2vb6g5Z8c0BayQJoLNlHRS27vmwBSYQUFVsyVmDwA7yPT08SfWEhICiCoxI7CJIKpMvVS72CQikhGr6h77AIRQJO/ox/hMxQUAUNkVugMiIoMJIRM4KXlKZgGAYzf96uSWMjXDXQWbmfFUeG86NbYgTYNiRzUWKYj0/PgomepELSQkMBJVREJiRjNVkQwARLwrcSci3AF3mPkZKggMFdRAnWMx02Im5oirqiJQR5aRSgIkaGqOqUdypkDIkxqFhqoKhqKFyJtaKdk5P6lFBoB7odH2qKbpciQqiGZWnHNqqiJmykxTf1nOmRARydTIsYgaKCITkakgTjhEVJHd/9lNZjnHDswmxVNyds4BiGMyAASSnBGR2RUBIhQphOCcMxXcmcEAEUGUGJD9ROomAzAjIiISBVORSbveE7jZOTAT0YnHBGRmgmjEu1ihqaoqe+cYpkkmAU8w8onHLpKI/E49AyBkyaKqRGAgzy6kO6lovZU0lE3UzQbQ2Cuoant4cOvkiyeHn8tD7l77le7g9ta0reomNE5cVuOKzSMCFEljiu18C7lGVeIetBWtCHwgFsgHB7P1Km/6JK5RAXKVc8RpQ7Ixu25r01SyWFYiV5kWAgVUz+zIsYP1ui95FGvSEEMFxbDvoZds0XxvJjBAcQRQgq9asvYbX/oLX7x7o99+GmromqPlphydzJ+ePjyaLRxgHMf2kK5OP1o9enj+8Y8lVIHoGtdsMqyW2lZN54d+G1xrVm9GWPfL+UGqHEuo5tXJ6fsfdW1zYzGv3Xyd+pMbL47X51zZZry+Wq2a2REgBB/m88P15ROAImrMUB0sBFnAnpyvTK2bhWDxbnAVhze++s22a8YyDjmeXy8vL7en262FtnLVwXxmBS6uz/uyffvtd7b5cnaw2PT1S/fudYuTDOM7P/pRHEZDp1nAJF1t3/zzLzdzt1pdOp1GBgyiAV02A8sNyvWYGl+xWUWVQFY1JK8qjOyJkCs0ZVRyxQGhCjBw4DyaxvzCzRt//de/9pOf/MC5Wai3XV05ppPjxeVqXF2tXEdNV0AHJ1ZKcphmdTWmkTg4nlVuRuaAEDGbJiA0SIiKGK30jGMeV2q9QS9FAh9VeNu5I+fnRjWiFh0xbRE8AqiB5B4hl1wQSdScD+A74kB+rhCIZwwI6BErpIZ8lRJPDYjO18wERYQJEZG57GLPcH118eab967ON3Ub3vvwk4v+4YJBBjn/6MO7s+ODqsmHNzen2zFKG1oCOpgfJUWlZrnpQX3K2SiMw2Bq6D0hSI6b5dKnSAkXhN/40tf+ww++67yTVb4TDs/efu+1lw7XhF//ta/84M8+3V70qbdbdnJKF+9/+IvXv/pbb328ee/T9x893tRH9WJWf/zWwzasv3L//o9++Pjm4uWf/vBBHbf/m7/7t//F7/433cmNw6NDGOmD7/78ZH7y8r27y+WqPy1OcIGLj9559+Gjp7Pm7g0/s9UFO4eIORdXO5nqIEshoCkXPd2oGIHMSh4BMJBDLVNrRXAVGqgBg4upMFMBdYzEjGgKBBbYyty54OttWm9TX1deGTVmleK5ruoGSYcYh5yGbCU4rhy5htzMYsibc0+iAkXFHDWLkAnXq+I3g6mM6eqdT9enZeGdf+3FuxcPV7PK37h19P7pVdsuNk8uCMemcRmQC4SmQsDYXykUaivwYSwR2ZERCLN0MZ37gs47gtzVTQNlM65/dr7W5dWPPvjB46sHv/Zrn3vxC7ffe//Jl770yoP3Pl59+CQZaxVWQ7labU+X6e7mpOsOP3hv8+r8xG6J986IQgNq4FwY+jKmzMzMefX04iDMbxwc//EffLKV+o27t7770x++9jtfL6bsgYAJDM1Q4L/8n/2VH3z/g81ifOMr984fPH735+/cerr4r//x/3g8E9GNa4zYadYhLzW7qgoKJiJZQLN1lV8uL9WYa4da+lXPRExgol1Ve24llmF5fdIcHB0cPB36IQn1mCE3TQeWnk32iQgJ2DuBfTPsvoMEEVVsAigqKDozJGVydeOE0DCPMm+7Mm4Cgw8KyTzK7du3Li+voMg4jM7VwbF3rtc+pdg2ddGaHK2znTTVk0dXIRwYa9PW4/XQr3pmlrHElFnBB1d3DWbIuXdMYxyJMMURRGZdu96sDroGwAL7ru5KTD449Nq1NaNgLm1TffTx48H47s0DyVJM55XHysU0OBemCo0+51D5dtZUg4d1RATvCIpk05yzJyKz9XY7FAMEMkDAtqrUMOZSrNSOUCfWIhJhYGYFj9BUAaCsVtvFYpFiqqoGVKTkIuh5h+RHnLaER1Mlx6s0nNq6J2HHMJTATlXFrAQXQnXYtn6VZ47n9dxXbUo6xnRy3M1mnSFJ1jHn9dDXgWOK0yZ5SrHkAg4QxKHEy2tIaYYVS5ovOrRkWtbbcUwphOCqLsdECEUkME2YhjpUo2zEhAEqboekBmwyMBiqN0TgIJBD7XIS2QpqfVG2oI+6oxOBxNT4GWMhGEwAcyoqiQAJ3MXptQq+PL/T4XUdNZCaJGYjHW8eHEQrSOHsIteVP+yCLJcv3W37QYpEBenamRZq6zYPWjSt0nVxWtfdetg4ZlBNqgQTh1O5aojII2EpzjQEV4ohmnfOwGS/h+YDQVFFBNWUMnsvHtUAjSBG6UeIZRyieZUqDKPQVhmZM7NvQs3b1Wa13MS1LJ8uV1dDioZE3rESEBIbkK82xlnRqVkaEYn9jGrPXsa0dGDBStP5VAZHyc99GWIlAlv4m3/3H/jqaNguPUEuUUXZORcCBycC7AM5lpLJA1hGovnhgtlEU1UpZ7s6O//oo5+8/MZXZ/duZjU0RMO0zRbCG7/2mwp+MOG6qpwjM7EyXV0oMIBLoEnUkJBZgQBZyPqkVoQAFKpcWCHkYsMmsfMBqQyJbbNZbxRWzsZWyp2XFrPA4Kr69m0+ull8NS2uAI0AHXEhCm2VmcK8u9F1f/O1G5ziH7338Gq1bQK1xwcXm1TGp1DkelBOV/jkJ3H5yOlZ8CJlUGyzOk0Yz67VzgEgl1xM+u2YxgIKZCBmbJBzZIUAlGIqAIpEYBM8F3AqGcaJ91GswK4iWoInMUxZAR0TZikgxo5C45KmsSTNJAmKJWYTUQKyok3gkgsjT7CY7ZiaEAqg65yrAknBUrBkSUlkWtAqKJYsALI6O/OhKlfXeRwACV1VTNI4onFwnkCYjAykSL7eMimBie4qnHMSXQ9BySONwwVnlZTJzG9DqdmY2LOpSTLvPCAUEBWdsi/PdZZfFkz2Hpv9Mn6/mN+7iT7DfXmu2pjtLUO73NHztjTbSQG4F32ePREAIe2YwNM6GCZjz/M3hHudyODZi+E+UTbRc3RvOXqmMn0mfPQcZfD8SZ9LTZ9BWX9W25p+Zee4RURmK8JETag92nq1bbBt5m5MsaRoWUrOdVO3s2o2rzEKMcQUHcPV5Tk5ZoTucG4gTx9f/OjHP/3kwadX16sxCQio6TrG0yePzk7PpQiQDyGgmYiaKtOOC6NqYKCmNrUqAThHsKtMMlVBBAUoRZCQAUsuiFP4bEIw06T4TWSVnfrBO7OKJpkyO4gGRvODk89/+esxZ2QqKjjpJrtH6879Zfso2h5VaaaEqDDBfgWsCBq7YlZMi5qIEU31Dki2M6dMd60993yn3e2H0KRWGgEa4M5ZMp0PEdj95iQM6K7a0CalxET3VPNJEd6/5x11cwpkmD3PnwFMy/UJ1zX1iO/Sb88VRkYQnN48EDLyPiCJMBkDEXRKwE0fjYj2NSHT9pspGAATTcb9KVk3vRYiTi3mZgAEkzSJhLprVSPAnXqlagYFESZMBBICKCJNbC5VNRNibwZAupvpIamK7U7WTtcDMyCa7FFItMdGPw+dGSohq5qqEhIS29SGthMBCztnuz3GyYZjKjINlWcXA6QJdm771KEhQEwJAHaEcmUSXHTwhZdfkDQqNAS4O1MEBkhEBsDKSCRayDEgKBoyA4KpTJJfLnlSBYEc6E4iVtVUEpgRud01BlFBPDs1KJKJCdCISYrgRGUFMlEzRGKRZETo3D4FacQsUgCEkIBY1ESndJvCrst+slwhEjpAMCi7njUGgJ36TKSmgLbji+8jlqJqKky7U8CORRQN1XZRTSIWLaKqOg2bfQLAdldRnaBhYMCExAAmpQAycQDACbalJrlE570UM1X4zAbBTirq+8hciUpVEQNDX9rFwUtvfvXF175mUh3eWoRq4bsbJ11DyA6RzNChqomVkpNZdONQEgktNM7Mgq+PChSHR0yEtlSNQzrPSbruqKqaOPZFVgAbLBfepOJqA8tRzqOoQGYOdV1bXG0seaqcQuVYyCfx29X14iBozCzksVIR67PG0TyanzfOb9bwlVe++Vtf/YusSyB649WvvP3uz0dJt08OZrVv69mn730AYH2P2+1o1n3uzV97/0ffrZqm1fFLb37l33739w9qZzz3jX/49OPF0c079d3ho19YGcbt1dOhvHD3Rixl3swaXa9PP+zL0LMj1UGTkjvwt71gsTwOfQS1XJggxYGBuvagF10Pa3bOA6S+WEpAKp5Q5Xp9aSWH2t2j6tUXb0jwN+7cLzHWXTusNr75Ijt+/I3fdF1cLVez5s6br7z2yadPZyeLn53cev/xJ+8+/fTh+ePFonvphVcahE0avQuatfZUd56QYp9zSmBjN/dsVIW6MTODgsn70IUuieScXPBIlYwbsRJCVfkQ45Y8G6EjE4aa8unVxcnNlyiLQ3eyuLGN6yEPzbxtD2+dna9XTz6d33jh8OR2D83VMAA5omIglZ+HqjFEDl5KYmYkgJQkXaFuyngNlAxKHgfwIdSvEHfB3Qh8ZASi2WBjsgFUkXEKy0re5jRImToLseRAoThfORMiRh2IauaZAgE3Rt4ASonVDKhyYN6ygVHTVmPJztn+duDOHp/FbdlGY6wvrpIFDtE8mpb80ks33v7e25vkVtepafziuL5xQMMo7Yw+fbJ2QlywroI6N8aC3MwX3erpNrK/dXCUNuPp8sl3fvITEMJiBx0Oq4d37rqvf+3uH/3R2+PZI7xedgB+cfBP/uU/+8rnTsaYA22PbtfH8+apgW3S+VUCnC+vdDs+fe/jp1/80pfuvHr34dOH/89//i+/+iuvn54u2Xrf5q/9xue/+/vvv3DzxTisxozpiixVnOrffO13Vj94EM8vvUgSM+equhVTxyQlOUNCQnKgZVICHLmpccGzF+RSSIm9CyFUWgqqFBMAZGQx48lUrmhiwbmAHqRcr64KWhEHjsgx1U5yUdRUYrZCXS2uu0hAdeO7is8v2s0KE0LK2FVc1SowktRdzUSNo3SVZ4dtr0OW/OQJBPauQN5ktf6Fu+29I0QrN9pFrzqqOBfEKPi2X29BIWerXMhecoq1q5gZ1XLswWzIQ2latEJpGRAV3f/nn/4/bGufe+UV0vHjBx9eXG3b+ujb3/neX/+N3/7VN7/4+3/4g9sn7u33fv6Fr3/5tddnDz5ZH9WN6cnNw9uztpQof/zB6jdfWrSd4wZSlsO2Pbs6Oz89vX33zkefrP/pv3n31Zv3vvLmrO+vfvvPfb5YTgM2jROwktU5R0wHR/A7v/PGj956ajkN2/WNo8OSD/pT81UBzZvtcDg/qipXJAWagWnVNpfrszImKYCVooMqNLWvV6snAuCd9+RSn5vWd50jSCfNooz25OzSe7fosCK/3g6UsR82+2+BIRKgqQiJ7CfK9mzPFwEAiR3XDSPR9nIDCq7uLk/PF13bx/javTuX5/lyW7wGZs4o15u+bhslVzlbXg9tXTGpqatC129iGWVxuNgKvXD/5R//0b93wXKxrIqQ0aiq2zLCw4dXoVILHglMxRwSo2c20Qpo1FQkmZXKtR7HAJCz1nVd1cHVHtNVF1zd1u8/Onvax66ei2HURMwAnGNsq9D59nx71XruamaG4N1hN7tep6RGoE1dr7YbyZkQK/QBKmNrar8dtpO1O4vt2Aa8q+M1NFOd+aoOYTsMzmMIwZvr6jnRUnOa9uRC8HvvbyFmy6IiBiYezvp1T5YzkGbNUbE2dIpKRIfOL0ppK3fz4IgojNmwclXlQhcU1SS7KlyPqYBlKVkyIwEQEznGVFJMJa62OsaAFLwdH7ZJch97U8kleVebSsyZoHgXpOSpmxWU1YzQIYSiuagRebTETMUCgCYpoAlIQNmRB/IsvpTt4yeX9103O3QlDbkf6/nNiru+3xBy6pMAVFVVNc123NY1n8zbWNLqYgg1H9+YeXZ5GGeL5tHjx4Qk4B6cLrPAupflZWo7H7gNFQ1lKDCw+nHoZyGIwGZYevYGxbMfSgFEZgRTUWFyWQpzIJDUjzlLylkAnXu+RPaeFIsjBBskqyQei7J5U7VY0nqAJCpKAfqnm5JSKdTaHDKiD7H1A6otMEOyZeFG60od8dAnEoOsXFXh9iEcd0ooMTe+CqETgzicOm67+hAFtO+RQRUtmaMarC+D/3t//7/6ym/+leuLRzIOABCHvu0a37aAlFMkR85zShEUCKFfDSknEchx3K5HMzSF5eVG6XZ1/OqollKpqkq0VIc32Tz4AABcZFZXUqKBgkIao/dcUV3MjFxFpgYqUkYpMemY2CkBgFocxlxySpthtRz6cbVejdurvB2YzLF9/mtvnNw87rh2mZxjns/4+JaFFqYqmZJMFQxVMnvH3muVjdz86F4tAeMvaOTZzXAZ5GmhaLzeZqZw3NYPH/1JNz4KOPgKb907jnb4aHl5HNzhjcV6c6FDzxzidiMgUvJ0t8qSs4Kp1ewALBcjIDQokmtmcsYI7AhMGQrtIKaaRYmmDRHw0646FCnqmIkJEFMpghacG4cogkrAviKGkmTMGdW8q2IW0Type1ELVhWEqq0rG6KqIXMGSJIVgKBYVFIdr65dsEEAhijZAMg7pxmm96w5a040qQRFFaBIISIzdQEAtCDO68oB6HbN2eI6SbbgfB5Hf7xwbWBT4pDMTHBAyFUT6lmNAvAWPDcT/bKEMy2pbQ+FxM/UmO0EGHjm76G9hqQ7uu+z9cveJGSfiaPtIy17OQbtGV7ol57/M0LUs7X6FH57Vg29+8udoWlas+937ndRtedps+da0E5i2r0y0meErymlMSXSnjlfDBBERNHYByZOcaOSq6q7GjZoGup6O/btYraYH9UOyxghS8qx6vyQh9Bo1dWqtlqv3v3gg3d+/rPVej3kUnXVjZtHDx88fe+Dj85OrzAXIkdEZqhFnkldRDRVoeOOGD2VpsPOMoIoU2vSHh0FBmgoe8FLBAyA9yfFwEwMaHdGAQl0Ny8kx0YWGv/Fb/7a0f3XN1LQIaggIu4a7mG36gfEZwrc7kVxJ0Pg1J456S27MJhoVitFsykxqAGgIhLvM0VqO84R6sS0Rp1cRaqCEz/LlKbuY9i/DuwsTmY61ZHvRQycmtP2Jxpskmd2f5iGGO58cM/FQTAAcn4XhlPdO5zUzGhqKDA1nbxCpCqIRsSqE6xpQvMwgD3LeKnIFF+DnaMEwSZ7zvRpd3G26ShO8JhpVsZMojJxeaaEHiEZoBrsM040mfxMZPd+RGhSDEsBwl2OCcjEgJSYTQsiTLzt6RMzeZEEqMj+eShQTScA0USxUQRGtF2CT9UckYJMBB+ajFmgpgIuMLGqsJtMUgqIjIhEIoWQzUiK7Hco0VcOQVSEnZdNAoHK9Cuf/9p20zezejodzF41qykSE5KigUnwbrfrLzLpikiMU1JvEssmTJAoMSOSAqBNRfGkJmagItOZRUOYMoYiCh7JT3VpCGimzgVEAiQiP9nZVBQnmBtOli9Cm0Q0VgEEMJFdvstMtThnSJZSZvZEDAqmSkxmZipIpGWC1tGkhakqTl94dqamkgTQFAlZpYAJOUbQ6VLrOIDZZG0jxClkOkmMjp1IUSmGbGaIjISm03AVsWyGRIxIxMzs0xjZ/bKriD21jfOVARAVjAFPbr14485rrp6bhEJVVTVYtc43Hh2CTBlJKFpiJkB0LjgnEVW46MzRkGKsfQV5KSZDuu7jdrVdVXCL8YCpLhqzMXFjfrEtm15WUfsRXWhukFQ1MlENjmqqwNhsRdwXjXXNx7M5u802F/QOAMyEVRHBi4OMSO5u98Jf/dW/5mUby1h59+6TTyTw3ZObWOW6avrxuj70TfCr9TiK3X7h8P0//m6S8sqrL5w9Wv/Z+z+/LMuF0+tt0iwxZ4xDVfmjw8U4XpirIkA06yWPj8+Ykqtaj67fDnncKPP85J5Ls8M6nC4/bnx18egxetfMOjDaZN1aLAZjAQ9UN4fgAJldzSVmqljGwVw4W29BwY0DN2nMnwQmuEQZY9UGM1ldLdvcUAR25cmDj3TsH777ce3Dn/vqr//WN/8q4caFsn26if0qkzJ54rDpN2owbTI0rdUh3Mfwab8lxByHdlbfa0+WywEoGBaV7JiKKfqqIk9EYkURVcuwyU7Gu/P2d772pY8f/vTmvRcCeZDxerusQ4jDiAGqjo+OD6MOcX3hurZqblf+cNNfx1JCZ9ksUEFIAN75mRlIyaaBvElOBQnBs6u979A3zDOj2rtF4Hks4hlLLobZLBsGcLVDx+SQasmliDr25AJSQGPJRSGqJeO64GhQiarE4gBdYANSRcLKezJjsREkiu78FIvD4+Ca9z7+9kv3F4vQRaOj1t25e/xnP/jJC68v3v35D86vl0WPPXW+CpWn68uPbixOxrOLRdXEVUQF72oTUpB7zdF77//k7sldavGT9LhynqpwveqP2sXBvDPfnw/j/ODwxx8/kfrwoyf5/v3X+odPPrzevPrlL331t974d//9f/j5n75///ZrH73/qQvQNJyQ3AIWtw+3az04Onn33Y//3n/6K289wkcPrr/8W6/90f/9n7146/Zrrxw8eRI//ujR8ddf38KApRoK9M7fOXxNP2Vc55qJ2PWxAFhWQVMVQDPn/MQEnGyiaubIgU23fzUxBtfVbbGJZeyLiIKpmsRCwRNDHpIza5u6qistMICsQSSEEtpkisCiQq0LVTMqUVtb111nkNb5Im7Tn0jGNFJxTdsU4DFmP2/CHLnldL0+uvUierrePC5o5pS9RJVPzyMKbbIbv18OTha+oXQxhFvzs3RZM6nTsr30GZFCV3dJY5iulamYijKlMihk13Smbip77vtSV3Ry0ISFX8fVet1v4oqDaw67J9cbun/rKti3H/z89hoq39w4aP/8V7703/2LP/1bv/2Vy0358JNPX33xRt34jy9Wf/XN2wZZFEpKRejOvbudp8dPrn/y7kOig9svVmO6FJKsmAp97xePf+P1F+sWodoHuEkF7AtvHl2ul98++1ho8f/9J/+9R5AURQYWXW+X7N3x/HjY9EmiDJmodG2oDronTx8tjhZJ+lyi5siOHAemupvZbObFYt1UIczW/UDoX7l//wfnbzmvs64Z+zIVoUwzZzPdzRgnJy+C6W42bQZIrGpSiMDVvurlWjMkMxM+Omr1qqzWV6Fqb9yqt7HklHPR5Wo4uHGQKs1jdj7EvF3M67IFLaWuK1V3uVrdeemlXLaFyrxdjKlnwKoJljPKuF2uhr7HKsxmgSpSASTKJRLZkMYxJ3TMLviqu15t67pazOeOXV1XaRhKTg1C5fjjx08/eHw1a1offMn5aDHfjrmIKlAqmFc9GpsqGuVkzFaH5qDrztZbQUyaVXITKlV44eDGxfIyIEsaK56ouDDGBMB+olwHF8gJIjtExJxyIGYKAmCBLzbXRoagHFinGRNSycUzSimmuW6qbY5Px6uBdNrCSzF6dpohZ5217oXFifSrQC744Ksum88M7Plw0XpyUjSmDAgZVBD6PHpm70K/GRCKSuw3/eriMl9derWq4nbWjSBDHNBQTNkzgReRafZfZEuIBrkIELGRgZBIQgxFEMB5zzFvDGo2x8yAiFQVU8kphAoIszSq/Ojh2WG/fOHe3fnB7LrPMcOQIqqKKjlWAhUDwfPLbVdVB+3hevmUAl9cD9t+yJDMYR5Rx5I2I3uMRJAkRVMBx4CcFwcH4zCYYeO7jNAcHtoIqhZT9MEUFDG44GPszSSWyM6p8WrdG3KWbGreMzFOG7oAoERYeb+rQfGEhMiYNS7Xmqxf98yUY3EFnUEpYxwTn66DD/VB6w8IuHFtKHGdK09tRVoArKk5OHLgxFfSBXfQVGpOhCVaIXJ+WK1hTHUnHgClpGTjdtswcdHD8MLf+fv/5Te+/hd+/vaHsyYTUFU3B7cOAUEIilpSJeUxIkGTVdKqyNik5EmCDws69EIuOL5xw2pvmVXGSMZxldbX6zFnZIacJCcVTatNTJtxvdVxRLOqC8iURJSppFjGgVJK6wEMDuYcWogQQ6iGzQjEyJb6wdfusOZwVC8O7zft4uj2LYcFY+JYyDm3OLZuJr4BnJAoMiFjCxLXFTcuj9nMHLl4mf/gv/uDd76/mh++uVylB7KCWw02zbg9t7OPrjZ/EPJVKNkxt4vq/pv3Pj6/vnk0qxwUz66dc6jzMNR1PfZbx57IjVIyWV+0IjKA4Ny0nNOszuHEmCZCx6RmU7KGYLdYhd1UHxQnLIqqGAKQ55IKKKnhtigkYmNUyGZiYEa2g3BUDnXoeyZmIPTOz2eR3ZiExVRBshVR6Zw54LH4iJq0v1hqyVXlCAGpHsfeUSixkIJkATREYvJjHKSoAqhCRYhEUow8ueAdMSvk7ZCG6DlMqybgtvIzCg1YzONYoxPmkcJYhdnJnRuzGuDfw7P7wS46tndJ7GWjZ6XisOcZwd4JArv1Je01FsQ9vBX2oZC9qgA7kQf2AtLuZehZAO2zUTDbLfJ3BOxdKObZqz9XdnaI450pdtrPf2YNwue60f7l0Cb6kO2pTDtXyVTYtq9t26khzz62GQIwq2oCRCmkwIEVMoAsDhbsqq6uiAA1p17VMPdDLskggRl7t92uP/zww+/+8E+vN9cEMF8cgujjp5fvffvHVxdXaOadd+xFpmYum0JO07suImaGTJNrYBeaQ5ySKHvdZq99TYMWwBRo58YxQCymoGAmRBPcxVQUCVAUACfWtEo5uHH0a7/5W113m4A0CxLu/C/70itDMhAEmFQqUIWdFKgECER76s3+VKuqFJFSJJE4E1QFRiLiZ8Ez2pV8TegbANwpTTuRzoBpwv0YTDEt1Ukh2skZhipi9Fnh0J4lkuBZa98z69lOYNhLiM/jbzuZYDK/qU51FIZIO3PTrixMzQSJJ4FLTZgYca8BgarK3sAyiTsTSJunzrJJdJvsV0QTNG2yfcFkGdt/H5TQ4SRagprphJGGfUxsYibtDT1TVolUzVAZJ+eRIE7l9VOJTSH2kzZH7FQEcXI24nQxVCloDLQTRiftDYwMjJwH05IjMgKhlgIwtROioiEhowNVMUHYlQZOwgfTpHkpeWdqhNMfjYSAyZN3zDEqIPcpB0cfPXn4WntsJlN5nU0zZYRJD9VpojSJoURAOOmnBiAlE/NeAUZCAJ4MUIpAhDxJfoDTtZ7NtJTiOOy+Vjph7inlGEIlUpgIQEqJTLsQ6C6WuDNLEiIUEQAgxyqFCJldzpnZmRY1JecMqeQ06U1AU5TVzDIQT1L27r0BGKKUAgDsJgasIKBNUKXdisGIPeLuarAPw0279wiTkrir/KMJmkXIU2cfEE4tfqXIs2HPzk0tcjnHSYz+JaloNq9zzPN5ZUKmEBp3ePuOD/O67vpeiyE4hwiaSkEjN60eIBWZ9k8lFY0bzQMiIFMpxYNk2UouU+ijCHs3a6ubR/MXsq7ber6Og4hETWvZOgCkDqwrkCvnPLaiPmtXVTdFlflA+WjBPQpUuMlpa5S1Cqq23fa+YtOAWoFVd26/+b/82/9H1z9aXp/fuXc/WWQf2m7RLtqzp09nszCOG24IyTvL/XV+utoMy1gipGGzHS6q+eL2ycuLdnber5vZrKKDcUjehXZx42p5nvvLe/dmA57eePGb1+dRcu+w3LnVCSpXVVGft+vN1Sc5z5Nufenm1VEGrVxlRqWMwZllOVnMS5IU+5IzIXDMYHR5ecWEgQkdi6DrWleBQWGexZTqdjZut6Xkrp33663jSrQae6ibG9fb4hrfp7Q+/wDzdn4w1wRgVIVaDEQ1hBqdlz7GnJwLJfat8QFhVRH7WZbdyBvyYGjMRiSoOWoOQME3KCjeCZukIUhuJV9cPzw+uUHAWXLryDOo5q5rs7lVv/KVdw7TaoMPP5zdlJu3Pl/5O6dLFyp0/pCdd1QVKUQgglIUJICfA1YYjsA5sEwm7AgUva+BWdkYALQQaio9AIT6BrpDzzPzWwoiYiLCziGg6UToU4Ves+WSrSRRp0oljjgRK8AFrhUYbGI7kA9NoB3WOi7HEvKv/rkv3TsMf/jP38XF7Mm2cA03777a6rb2bXvz/vVV83G//vyLL5bttcjspLrx4PI0hKrPBViW/bLzTdB8ebr57W984ydvv/Wbv/E7f/zWD2azxVbOpMxE2o+eyMtfvnPr8O5wdZFRLoclUo7V+Jt/52v8s6fbJ2ff+87DUJEH/eLnXnv7d781C/TGq7d+/M4TC7JaXubYxLMNRP7eP/t593L78XvDdvWn/+v/yz/40R++9Wc//tH/9X/1v7s8P3/7wSe/8ud/9Vu//6ODxazTsP7Jh/hpbEZViAkEERVBRUyEJiI4gAPQXKZmRgBQLZMzFFS980g2xhUSIlFKRRXRETpm53LJChDmDZlF5D7GopJc1sOD3B1fjryYdRVjjLGE1lVhiNuua0PX2RBPwgE+fr8dezy7mHcHhUhFAMk5HjIW16g4F/0cug8u3moXbdlOqvvI5KJC5d02ZbySs4uV4cO68uWUv/75V+kAfvDovds3juLYW9Gkg1j2MGso6JiEGAp75aweRsfAYHnUhI0LXXzx9ersycX16MFXWYAB8rDqWv+vf/cff/HzX9LjdOPVe6fvPvn5z9+7U81/46uv17h+/W714PGqqu9D0f/s114reeSaUUrVeBHs1/DjT5+8eGf+q195yQQ8bvuYbt5cEOjFeYZYlSFDU+1u2wZgSIRdS8b8n/zd/+gf/cOf5KvNrFs23ezho/7G8d3L7VYVNklGHbyxZNEk4nkpfXNw0HS+Rlgu17Mb7ThszSSV7Iiq4HORvh+4nqXoZIwffPpx1VYsEAgGK88WBswsOxcGmQjo1Kvw2emyIQCR1RWL+MVhx5vkfbNa9uth8JVD9szU5D6PpY8ZFUMd0KSq8PDuwerJmqxVEsech2QemZiAXnvt5vknn3pHXdWUnHMSKZLGRNk/fnw2qlXBc9uyD8U218slE8Qxm2meQOseVkMshrePjrs2ACAhJSkVQ+jqdz/59OnFqnZ1cD7GcQXsQ5MKGotjNjOQgmA+uCTShoYZHftF215stkPG0Zmvq34Yq+DX48pYiVzM6ionZkV0mvRRqFRECEYtxQqCY6Cq4VJsE4e4TW1dacp17cVoyEM7q2KKjauZGG1qYcEe5TJv+h3hwMo0++SuiEAjJ7OG4tqBa6tZtzgKTVOyJdWDukUOUrIU81whYDLIknJJxK0ATF2E4GB7eRavr1tkJJvNO9/U6/U2kFMtY0nBeykJkZEckplGh5UBMDpEjKln54CEsQIIKY99EWYHakWLoTmqiVh0zQQgAGoG6F2DyqdPRk9j4eWD8w0anxwcKpKBSpECigp1qERtTLkOcnRw2LbtAMPDsU8KanI4P9IeMA9g1TD2ZOSVTd0mZd9w3bSCenm9POjaMY5NqObdYn11qeSHLBzCmEVF0XExIQRAkBxLiaJWULtZO/Rjzpn2sKLFzRMKTvOIzrEPcZuHbXZSgCFrqhcul8JsPkDblJyj9imNfQbKV97PF+hrxZRzAbAwCw5DSgXUceWaqu4lNfPKBY5JFbmkVOLaM1Z1YzLGPETDmqqqmXUHR/OmOQ633nzhN+/fe/Pp6nx+0s5CRkPAkLIIEHsXkzo/z0lIzQxzL8OAWAJrGJdxFS9zkT5uUTUOvcVecgFQ7UfCXJJuN0NTE+RkkqykvO2dMxNhMGJalqSEuWgxUS2k6kjFZHH75uz4hGqsA7kKqoVj5zwjYQOIizt3u1v3fHMoGVgKblcA5puGXcXzI6sqQyZfqegOGyIZzMjVYqCUZ7M5FH7nu+98+3/8k9t3f0Vmh+f56aH4q4sLyaM+fH8uV17OvNsuQoUoXRdWFx8dH99IGmpfb8YRnJgA6uDYJ/AlqyFaRhSqjSRrIWRvamoiTGxIzpEaiCiiAkIxm9x0iOTIIZWUBIySChnRJBMblGTIzMYxSUnmzGkxIigqoxRG570nwhzTFL5h0wzFGAEtMOWxOPApJZPsDvHglp/fDjqGi5+cxZFY/fpy7B3UdcCAIHmULMlsAtwyiaga6NQbqmaKUZUJlaAJ1eJg0XpnKSH7ZMWIkLg5mHE3b44OUxbISIREWETQ9LCZYbEmtM8klef2Gdhbfp7pNp8JbBl+xli0txVNvJOdQeOZFWlaM+6X7rgLl03Wll3M6LmB6PmL/dJPpgXVzkOyb48CgJ0zYu86mn7B4Jm1yOC5CvEZ5Wei1Uyi0rMPAVOISuH5MvN5JOmZcoQGiKxkABgQ63rW1uRMbp3cXG36y9PBueIRrfRHt46ZoLcNU9XUlWp+dPbgD/7wT976+QfI9tJrL7VgP/jBjz95ejn0yQQr51HVRLPotH4U0clIgkwiU30STkVYjgmmXi5TIpx+Pn0UNdjnmACRgPdv/lloixCMEXFXJeZpLzUQopna/Te+8Bf/5t/cbAZCR4CgNgk6SERTqRnxJJ3gzgVmgGiqiDiteNFkCrDh7uzv7GkIYDrl0gSJDE1NiKaIEexEqKlyE4Am5M3ujACCTYRNU0UTkzJpPaqCIiAFTBD3YiIy2FTKYs8Gle39ZWC278eT5wLo/p/pgwCgSiFkANqPUJzSVjCl0sCIaD+0lIhMxFB37HAgQj891WRO4cnvJLqPsU2nB59rr2oTDByRmdhAEZGNwGw60qYwqdA7zcuEaPq6kojuPhCamZgKIKooExFMzhcUKcBM5KbvJ4LtireIDRwYahHnCGHCUhCCqaqCoNl0jlQSAkyonVIyIDPSRD2doOaEIDt6OE5062d5KAR2zNOdCBwRIRiCmRVAQSREh+RsTGWM9C/+w3f+q//530dRIJu8kwwGSJMVjKfPpDZ54acibwJS25niEamUDAiTP2gidk+eHVFTLTThyVTBzLEHmMQmY2YFkpKZ3DTMmCuRguaIPIABMiIQwXS4APeXPEREYJzcOkbIhKw42bjMpEwblqYKpjr1mpUyTa6RmNAA9wiqCdQt0zwcDSbYNTJjjGl//USYINiGMh0BAN5Jc4BTTQeRqhI7JBI1QiiSmAnIdJeeEzCbiOCmYgrE8Axbtw+gjWPlvYHkHAl9087ZVbnXcVPYVbODg8p7E9ECVcUTyApAaw4pJTUrJRcV57yJD9URts326gxJB8kGTYV15QkgFSxXmwfe09CfGmyG/jIVcXTkkEFWrZ+nsiFwBG3rmuzbQr5oIcbgEoSoaYxps9r26ltFBsmV98ERsl+EWT80f+XP/+XKzpbbs3lXhbZbXceX794crUSR+cH8/OknoLmt2z4lEjtuYHX2uG7ZHcxPz6/vv/Ti09Wld1ziMPfNdrs9POh8CBXxrK11cefi8qFKjPGq6Kqd3cy9dB2pXG9Wa8c1MRMwuSgpv/G5Fx4/2JaSS970VyJqOee8BmLXo3lsSqFhHBUwgxD7vqSu6dKY6jocnizW62unvmnr1dW6FIshO89j7tebKFmCd3ePjiD2p1enoa6YpBQLBP1Khz75OnRtGAcZUiFCZPYOwNP8pFPUfhM7NNLtGL2nUHfddnWpKrV3ANaXnHIEAs9GIAppiIMx9qNYhuOq/cobr6Z+1XYLicUsgXFVVbPZPKa+qThYrVLAcDbrchy2F4/aumqaFxZNK9I7SKQZsd5dHM2rQuXnjAEIlaIZkHl0mQgEh2y950Y1g4yWBysDWkGsER1Rq+acmzknOQuQ0+kmKaJiIhHNV4HVSoECGQyKgYzDCmmWUuQ8AoYsJRCiEmqZ7j0AMFyntulWm6jnfeWb06f9puGjo+M3v/b1d7/zey+fvH4i8vTpo6fXj16e/fpm1N/8jb8zpv7R41MUMVAEZ2jGhp6S9E/Pz+7fvvn0vZ/fyG68umZNM+9apWG1lPH4g0cPq5iR5cWXbm6vVxeXV/SOzeP2V1+/+4sPPvz1v/b1t7//7ve/+2ONRj6MfTX0MVLqC73+8q1A4WffeRpCE6g5qbsvv/ja9XtXD37w4Ksvv/z7//RfuT69csCf/uKnzmTeuxfal88ffUDZOFgcBRETCBQlACYGQ0doUDyQgoAIMwNgLtFXnggtK1tEBIVELuQ8QGDnagFMxZTAqgZ8vQFSYN92giJOByKoZiN01LDCsB423J1Qd+vy6kk7C6OlftBaTU5/enS1OpSqO3oVEIY4AOmQRh+qrg6DFs6hgWa8jBTdYXXkbrqLqyfOs4iUDCBWORrNihXJGnOWFSf91B25w/lMe6dCZgXQFCgPgmIqhciHKsRxOzFh8yAAmqeNJXN/9pNrX1UJUPNoAmiFVGaLllV+8q1vhW7x6KdLzGFFejy/vZi3jGpZfvtXvphjCd6FWksEFdGk5DBl+r1vf+TYvfaCqxoGleVqdXDj4PIiPXqyutW92Aqn7E5Pr+/cPlbalTOYmYiAhk8e2r/+F98/mSG5eH29RE39di0aDw+P+rFvm27mqouLy7s3T86vz7u6tWjL07O6a2Ek3wVxKbgq9uq9V9MkuZvP2dWCYx61BHZcqaR+1eexdIfts7nRdFMxAxPZT8dplwhAUFFVQ+IYC2hxoa5bzlrqzm36RMgJyErp19kKOobsUEuJvR7cqCSW7dCDKfC0r2Exlmx6595NyJsnDx9W9WybUlaNm3Xc9E1oDo/aIQ1V0xIQFtEsaipFSsmr9bZIAvKSy3Uc6ro6Olgs5o1IKaWUlBFMVD96/PTh6ZKR561rPRFAVrxabrmqiubjrvFqWdJsPjvfXAMxGCI6tdw09WHXxLI1JEMUGUCtbisFI8dEuElFtOg0KwKI48hEZdq+IQKAImIFY8oGlLNChUQsOQPAJG87dAhqBiJCgIntNC4jFSamYiSCZUTEYhmJquBRIIkdHp64ptGmu07ZgGez+ZxdQ1AAxJQAyigmWrlACp547PuqcuM4DquL9dnjCpjA182cXJVKZoAUo0hxVE8Fu2ZAMKmE5LmWImKKiM4FRSDHUjTnjQ++KDiqASmXxOTMStGspMyNak0cCUqRzAaS4YMPPrnenGd0lavK9fbw1klbd0UEBUlFcjIOFGiVtodNbWPfx9ULx0eF7PT0NEgseTyYzbZx9ASo5olJlQFU7Ozy9GARuroi0HnbUCkpLg2hqtucEiiAFgBH6IicJwJEZHQVrzcrIA++QsqlFNrPjKpuUTx34WAwzQLFIhLVre9RAnmduu+8IzZgI+EiqlAoCYqUK9kOiYPPYAriGsdVxahSinqL1vs5NK6Po7WL44qdXKGY1QRkNGz62UHrCVxJbhxnh7fefPlLx1BffPin31m+szg+Obl7L2gCInAL0VJykVgsJlYd+41B0ZItJgDIsVgqRQRUco7keNrdds6ZQag4bQcFiZsBDYUJRKrGKShgYQMtgmrAYCoUGOLgAAC1xAwB73z+1bvf/Fp3o/IEqV+nNLRHHr2jrCo0P7lbn9yG0KSi5FRLCcQqBFzz7ASqzhiJGUwJ0EBLEQY0ZgVIqThsu9z8/j/8N//un3zv9uJlL/rk3e/ffrG7fPpezpe5DPfIeH15VMXXv3QriY9FcHa0CsqLY/UzQ9KUWwYZx7Jax+V1CzWCWdpEMB1GT6zsoggiIZnbb8JOikPwPOkNRGxEKlaKqBZTITUmRORiBoaqgkZqNlGBTA2KGQEiqCkgBedA1EpWQ9BC0xBkIwfgiFVLP1ABLcg5ubocvsJ3v3EYTgyET3371h9tywrBXCka1TAVtURYOXAi4H0oWlQNVE2NpqYc1anwWABrpg6hBlyPueTiXBXqppo11WxOTefqQE4NlFVzHonMhuSHbYVhe3X1XBPZOVMmmi9NK/ydzjOtt58vq6cthX0U91kMaf+3zxAxu+jYZ3xJtmfD4DMJ59ly/rnT6JmraVohPncz7fNju7rxyZDxzLf02awaTEanvU3pM/6m3XNOm/I7cstnGEX797T3mzyzSAEYqIr4UHvQsd8ahMrz1aMHBaYbRd10wXvKNlxdrTyz+uq9jx/9u3/z//vpW38GWWaHRwfHN3733/9we3UFIsQusNNpO2/KTSFMwAYCNJhSKspEBhNbF7yjwGSm3vFEKgGAnf9iYgntDjoa7qCDe4cHaDFDmDq5YHo4ggFO/g1W+9qXv3bvc59fPT3FUOdSfGjYoZk+cypNKgwAEDnAaa2PoAr7OnYE2HGuJ5feXj00MFFRySCF2O8CcgCyi17Z3h21A+WITPQUnfg7CACIimSqaDpF0KY6p8l5pKYI/FzlBCAkBTUzQppsQLg3jT0LQ5r98pADMDOFqROWEUknccgEkHd5I5gesyNDG8jO0EMEgKpTmoyQePJpiyRQNSKkHe56sh3tRCaVZ8oVIhnINERNFQAny8x08ImJgIooIU18p+nnqmYAaoqKqgp7X+M0GsykaCJiAyVgJNpxnWkyK6E98/8RPdOGVRUAp0AdIiqASiFiIwAkJieSERGnhlrTyQWjpkWz4wqfwYlMiZyaFhFHDMhIJFZ2s0tDIHKuMgYKJJJd7Vbn+VvvfPx/9pXGSICEOzyPmlpRJiamqcfPDKaxISpqBRDMJqtZmT590YymRH6y1ewQ4DyVtqCqsZt8VbtaOlEtWohYTcCmj1kIWRHMimohJJ14YZMeTYQIVgAJJEcmP5nVjFBKRjMGFdmTj4gAgNnnHI0QeGfQyyUD6BTSA5oyhgBqCKRgjn2RolImvx2AiQh6T7uHKzJNB1MNJu4VoSOc8qrFtAASEiIYw47kDTANaiDnEKCU4pgn0XIqdINnUlHXeUQOtR9jyjmprGezUs1C0yzI+abuHLuiVjlnVhhdKZkJREXyZqqoJyT1XSBOwxZyZGWBwXFF6JxazgNb6bcfqbvEWKc0GAGYb3w95kjEhnUpqeTB+y5UXeOa2exok7Oqb3w1rpdpHMA2AkWpKoIyrgkMoBZFJbL26K/9pf/49uGt0/NP5t381v1bv/j00cnxIg7XHMLTp4+7usljOVgcNkjDeMVh+PVff+EP/+X350fN09O19zOJ9XYjbVedP3rkZwusEdHXLa6W68Xipdsvvh4hrVZLDzrCuycvzNYFf/HeeVPD+uKaodkmO7l9BGL9Jr6XHgxDZFcb5DGOdd2O/cjsCIqgzWah9jNUKebm7UHJad5BXTkViuN2dd2nlCgJpRI4MAhSCFW3ybHPq5s3b8XVxsoaLOWUksC8q9DhrJ1xmPdjevToQcc2n8+Ob8xiHMdVyholCwdnYIZ1hemQ3EYCiY7LlagqMRFq1iKSJbExgvMhmEpdufXYo1owmTtKOPLcPV0u7x6dNFWDGg1dylAKSlm37e0sJcXBt1VommGMl5/++ObLctjeTlatYwZOZNtcrPELxppCo1KyLgEFggEyZJU4okdiEgpAwXRMebDSl7gUid4R5t656FxlZIjiCLIkJKeSfOWccS42tQCCjABTTnQEiwimOYkMOW/AdY7QTAyEmQx30ZtiuO3z4ubixdfdJ+8/vXdYj95fPDz73qNVG/3Vk/Pri3e/9sUvv5ZOTtJZGX5x+97nP3mwNZdms8PVNisSoJUizgD41nZdScVHc/fo7FRJ7t5eLK+3Zhd/52/86j//dz/zRQ4O6j7GTz58eDTvhqFcP1lXDFd6tV1af0nDJg9qzlfNzeNLSS++cff9Tx+6WbWylczz/S/WZ1dXjzcXTvAnP3z3//Cf/xfdXy7v/Pxnulos6vDRRw8O79RDplsjXX78DvRFAVJWj0rkyGgXtEYCVLQC0/6HEYcgJRs7rCsBUCAMVYmZHRvVwgRNU1QyVr4+SKGx2ZGyi6LMDgCilTEPflFnCCbkwdcKcPWES7+Y3d8Gd/PmHF3qt1vMMmd37B0V55LDJmy2K98EH7gvKTRhzUgtFpHFfPbJo0/vvPLSwZ2DJtx88scPanColEUENGMAHyRHdVC3tUWKWbarsZ6fbM+uKmQKlDxOuXxjLoLe0ZC3xRXHAQpIFnTOupaaZt1vZxxydmmzPThoFIojVMmr5To1dVXVzunpo6eHd+8v2tnxyQlCZpza1fiD06tX75wgipQSmsZaV8acY/zivcX9mwfNTK+H9GfvXHbOK6Z+Qyn5Wy/j7Zfa1XarVPcpBhcmb2+/7BFtbOZ/9u4vQlrqOKAfBHS+WCyXS6xdSiMaoMmYe/B5mZcZ0gvHL370iwdV6ESs8X4cooAqpsWsMTIXuIIQgMqwbdtKmGZNdbm+nndtzCikz2KY05zAzIhZdNroMjOZkAbTpI+IDZm9T30U0c04AuWucynlTR8XVYglZs1MvjtoymqAgsM2zxeLTz85rZ0f42CiLjhyELgpoxzfOnagy6v00q2Xk66zDcg0xjKvwmp5mSWzGYjN6pnHAs5GSMM4lCTIfrWOHLDt2q5rZ01dOeyjkGgV/HoYL66Wn55esgumMJYoObVNyBlJYVaHqqkcSYoFgMdiTAzGomhAORsq3D45uOj7IlpMS1JBuC6b46P59WpN3k+amaLlXDxDTqkKft52cRy8c965lHIuhYmJnAA2dS2lFAGjumm7aFAHD0o5jz74zHZZNtlDSlI7szEiOI8zI6dgkofjg6OZq0oOGsJgKI6jyaxq5weLUEYulkSZyczKmAyUkUJoS4516wFAtVycnbO4rm0rV3WLG2qSx00aewJNWmp/AJACzbKOoGrEWRQgTQsNMislceVziZ6aqvIEPCllamBaIYdkPXOFmhUAYEiyRL8gRQMKTQNZYa0mA1W8icuq94SkKqGqk4iUkauCGYoaERy03cX5xbEDIu6vzmzwiHzebzIVdeIVGGHW1NtMQyxgEIshWFatQYe+5+RkEDE0g3GMZhYdOA6zg/lm24cQUKlqO2+wOL5dEcchO9QY++lb0LR1AqwDFy1CxYkjQspKnbAvSUo3966rAKFoClByTI2ZbQdnBjmyEIjpMCJZyQWrYpYAzIWqWZSbL3TrT1bDckNZT24c5tmN820f+7E1XHSHXIWmZRzlRnvy+v03joxks10E2j462z46++QHP2STJtRxKF0Xplabtq4lRgVENGLWkkPjyUhUa+cULbTsENJ2dN6FCkFgvbp27PthIC11U5Ooc0SkKRczRfK+JnYUunoQ1VJ8CDAmcFBqePkbX33tL/25TaAifek34rSZzT07NC/R6vqgObpjIagV7xLHkXJP4+gouGaBoREkYqdmVoSItCQmJHSiWVNutcMN/sF/83t/+N/+yY3D+2tO6+XbXV7mB+OXZv3D8mQV7HB+FBO+dPPgzo3Fsljo5u+dQV9Xs6Pb25ITGM5rF7xT5XhjAaRjzHEYLx/PxoEen26ut+M2O0cioALACAZaig9Mjh2iwWQTRAATUDDLqRAiG6kBGdTObdPophy8gCmKmhRBMHKMCFkLUUBVMDFVA546a4oVYDKzihqJyVeVOIiammNtD8udl6u6Q2bnGG/dqj5uN5eXBQ3VIFtBVe/ZEUmKJhnQOyIRIeQuuFIKEATvzNBM552fVRS0jJvBFEWdqys3n9G848N5VldSqgDIQ4WWS9ZSaiLtt6BqfPRMHdkze3aqiD2vC8MpELMPkE2OHdsnH+yXtJX9et8+g4rBvWTz2YzZTmZ6tlzfvcpOtNkDhvbvYHpi3Cs2tler4Bk2ZCfs7F9hr/s8T589+73prdrzUN30g31Ebffv9JjdD595bIkdSynmsGguwwZwRuQXTdfNa9EU++1qO7Rt3R1055urf/k//Ivf+/3vps0WPeVYPn36QMuHDojJIbEVkT18h3YeKdSi+80aAAAVQecmBr/37GkishMiMjERIk5qCJga7laDExHFyO141dN6lR0boplOR9gRk3dFtQ5hEeqXbx6//vo98cW1XsGSsSGBkZUCnqcJwWRd2Z0CBARGIMWyIxib7YwqWqYMHO7yX4gGJoIGIDbRghVtJwES7WJtSACKMJGkQc0mUWuH0t7RswVM9/IYykRDMkNAFQFU2wVJAdEmnWE6gVOECXbes+f48737bT9yiQhUVads2z7Ptss/IgBMSCBAZGdWJkMQ7o1dE+3JxFQSIDAzERqQik4KztQYoxPae/pC7dvfEJHAAZrI5PUwJCDi/z9Xf/prS5am92HvsNaKaU9nvFPmzaysubqr2NUT2S02RZG0IIqEIMmwKREWCHgSbH0wYMCApy/+DwTY8ABYtgDBhG1ItGXZlLqbbDZ7Lnb1VFVdQ8558+Ydznz2EBFreN/XH2Kfe7N1cXEzz7Bjx45YsXesZz3P7wEAE0UExcn9PzGZYLJfmSg7jwgimdlNw52MRJOawNTVBaQKWoSdmwjKhKxGpqKgpoZoRKRFDZAmxxkRE5vJpAaxc6BTIlIAJvT1HWxputTUDNBNoVczK8JMBqgqouLYg8lkAJKkSMRM5hANHGHw3jfV+vK26/gSFChoESBQVFOYOtVUJq1nH9ADNeecGQESmIABkYN9b5sSsxQBw2mkE9LU/4Vod2eeVIuCIU6XnRGRSvFMiCyKnl1KAyHIpOEh73HUBjKlNT3ZXqFGBGR2OLm+AA1QVJkdgk2L8SaTUmmGwry/lJhDsawWfajQVA206PQSpuNPEwfdgNmZKRFNDbygZqZmyMSmyuwA1WyqmbM71xggTPFJmwKPU7bUDB17MzE0ginDx9ObpOoeuf1aKlIzMuk3WgpIEqWYSwIWdgToNUsuAOgVVQx0TGY8agbQlLWkkRAZKhNl7ppZN26uoJJdHJ1VhMHyzlQUMpDu0oUWc6HKcYLFs5EvaOw9QnHoc9xiVQBUY985cpDzeKvxNqdNHG4nPxYDGGlFIQ3Q1LNh4L/zS//ez37tK589/+FszgdHi12W1fHq5P6qjP12uyF2YlLPFkmqunbbclaJ/P5v/4EJonHbHh6sDi7PzwUUWCOpWkZzF9vtvPUu1HFMoT06uP+lF0/fu9iWqtke4uC62cnDr93cngnebhRmJ6dnm17izpXUD97Ituuz+XxOFUuMVV0ThBRHV7emNI5r0VhQLY5YjLVCrFpXd1WNzFvEqgm57HzFjLDZbtKQg6sO5ifbvnhqb6/XZdhVFCiEFCWLprEAOfazNx99pV9ffPjJk7Asi2V30M4k5WEcLXIBG8ZsSn/rl37Fz5b/5Dd/6+UwZu83SXKfWFKKY/A+peKZJZeYB0Adh1gkP5p13/ram08+/vDe/XvDdoxNrr3zLpjRMBYgRnYxRlMpWco2U8BQ1+Bg/fLj5lCw6gaqvS5YqXatKfhARUZwKQ4vkJxzM+drMCHz5CaARwXmCCt2nRKgRHaeq5ZCjSySb9FjkgLkkMg5AOdUMwKRihmYoSkZsFkex63EXiUNfcKwoHoHBuC8iSEacXhFabncbOfNsgboTo/HeCO5yeAwdFfD1eMvHS67dtZ94eT45L1/8ePqYINePvj43dA0X/7ql3/4/suYjJ3WhLLdqVLVhV2vt5t823sNrZmcXY2pwFDk93783umDN7b9NThhyfeOj65fXM9nize/8NZ7H51dPN3GkX/vj7/7i3/5nU8+uVYtf/rhT771U1/sr4blfJk1p5xWD7tf+ptv/fr//U+efZK62eL8Qv6/v/brj988OLvtJTdAWMbjLr/1l05On//xbzOA91iKsPclFwICU0ICKykOVdtlsUzmQqMlYDVTg11Ks4MFE+2GzIv7Y+m9Ry5lc3Pbtlg7De2jXeahCtI0qeRqNlfJOY/ONz7XApRKaZhc2pZ+7V1Mw/rq8ifVyRcMXByZq4cHq1N47/dsd7uqDwwtglATRsspJdcE8X5gPv36g4tPzhBodX/VPjrYYTHv5weHsBkJsXbOUMAsx2gmjJj7QXIzGFvA8XLddJVKSSrO14xcYkL2IdRmlhUTTKtgAEAArt+m1kPFo+yylDp4jrsBPYkQmI6qIwnJcLO9PVl1W+qRtpdl7MQCKSNu1+PDe4dmhR018zblUnlar3fb5H/qZ+4Nm9txG7djeXIT//q33nl0kn7w3ss331pYOxIZk9xbzURh6FPVOGao5h7N/sWffvwf/of/x599eNRUuuvH04MDJb++vZl3Kz/vLi7OyE01ybDZXT9YnH7w4w9TsnB8cP/e6YunH/jKsVnlK2KftJhhHarbzU3gmaQSgkenwQMCZBEEfEUqmu4jnXcl7RkA00rSHoZAhKiIlGLKScC4X29J7PTByXf+5PtUqkCQ4+5g1VrG601PDp3z6Nxsteq6FuBK1UoqA4zzZoHChugqqH2+fHHd1nXWbUqjmIHydb95++133vvJ99ExOe9dAMAYcy45m3z68qVR651TKw9OjmeLGRtbzv0mppiJISd7/uLyxcVtUWxqH+OYso0KGVDUApdZ9uh9WzfIlkUtj4vQFEMDidEq75JZG+pVU91eb4yp7moxKojrIbIPY8kiGR0xUzF2wZUsrWPQ7BGhiBlqLlUVUhYECOxEAIq1VdsXRAAHJiWhsfceKrqIVzssJVkVAqhOy0FY0FMJzrp5F9DdZm3a2cjcdW3BElrfOCQdEXHIZSjFOQad7kQLspqWmGNTVyXls08/1V1etF1dtRMkJuVccmJHqND6Tvar1gpQkEwUalebqRG1oe7jznlmBhAEFe8qU2E00wJmjlwuO3HZceWgziUjAmIwIyZKY1QyIhdC2F5fixGgt5t1UjxYLKd6KSYmNRUh5HE3Mrg3T986/+yzF/Hmp771xY/efZrAXacdhvLGyenm7DxpStgI45iiQ8oRJSVnWFkwcSVj6jOgNU3jjEQEFJVgsylczXzdgvZp6MNstqOYQ02LSi/Xdld7oyU7gd1NGsbsKkeiKeVYFNTQcV076jzXNXpiqxBK8YMOQ9VQLklLDstZXBt5p6oihAWRueuqmGNLgW+Y+w63ktPt0eOT9Wx59Pjk+tmZy1zGVAxn2JQ4HB7ef2v5jivD1fpsuWjnrUPh87OzcbfLlj1jMB2vNj64nEseS9VWRYsR55TBQMCaeSNgqgKKVhQMFR1wGGIU8gqcBIKvoKrrpkn9ELOIq5aHnatrc4Dks1kAk6GvrKyf3fi2eetbX733U18tdYA0VsYq7Kjz5FAQMDSz1ncLrOqSo8MEucdhY/2gSd2ipcVcCTF4JDKRKVNjRdH5lEUThzxzl+HX/k//z5/80Q8fvP1gE281rb/yc4fO+fX1xVtfffyT9927l7cXfV5+4f7po7Yf1hhCVBo9rQmK5uZg3vfbUAVgRHLYdEABNajI7I0vpNtL1z11T5/i05eQjXU/rWGywChaUMBVlZqVXJx3qmZFTcGTK2pi4B2bCoDOW8bamXd5l9JGGNz+hlzBEBz6Cd2VpSCCqDIxEKEhEzrHAbGI5jEWwOxlfgz33gndKSYopmwRWP2irfpKpJCYFikGROgYFNG8ZwRVMTet/4p5Zq4ZgFJRNPLsGZyaEzM1aNq6WsysrsCFqGoSWTWVQqKkCUU1iQskUmzcXa43r/WdyTxiug/9fE76ufv5nUnoTojBO7/Oa0MOvlJ17HOP3m8O75ScO97QXX5jn6AB/PwvwesvYf/DV7LU5BlCnEQE0zux527HXmkA+HqHYNqige2h2nvNaM+pvRMGPqc17anMr21Nk/rADJIPFosqIDE4z30fb293OSUCpK59erP97nd+71f/6a+uL26IPRSLfZGYffDOOyQoRSaT3QTkUQNmnFZuEFHBEICJJkJwUSUCh9iw845STgBkCkxYsS9aphOnOLWfv7JP4Z20dlcEBTDpLKriJrqewayq33xw/IXTUydjvLmol8uOYXHv8HIDz3a3bbv0zGYChIhkBlPPKkwmJgA0IwCdDF/EdxYtJGIwA96rG9PvqiqbogogGxgBT/jo/RTXDND2x2QaDZ9LAaoWoDszienepbifAEz5w+maQyBmQNNCRgb7hrW9FGNTIzjvxT98Nexe3RGpwd7urZaJPN2NrjtJadoBQJVJaJjsYMB73RNwojUVRio5IzEzy75gM0+HaPK2AJJKQXTTdP3OQET7XQVFZERfSjIDILhL+REAELs9PJ7Rpn14JaiJCAKxuysFm9YCvVkBMwNhdiLFgJDITIhZTUQUkO+8YDYxmdXEpqAhMxFMWD8AneRkndBvE3l7fwVNomSZKGl3MiWxc1J0eg5gMoAiU7DXZlXdgiVQEihjqT11VZNSCn7/Qk0tlTLRlyff3HRdTsRvk2xozA6RSk7MDoxUDYmInZQiqsxTRwcSsRlMWqpjN7kVRYpOktJ0iaAgWS5xMt1MirhNq7UqTIzMsNcbZQKKF1EEZMZSonPBbGqVsEn71QlmxDwNrUlgMxXAPZbIwIAZdJ+AncaZAqhkK4rskJ3KVKunosW5ygSRyGjK96lqESkh+InNAgYqYnee0yk5B2B3lXeTtZBKLpOQZjiV+tmrq2AvFXlPaNzvxiwQvAPRYVhv1mfd4rSuQAp3s1CKig5JlIkJPKIhgiNWYgIlywyQh62xit304wVaRExQqJR1yjdGUgVOpoCBiDSoJFXBtjkhsr6/DWyFwTW5WF+4qxhTvzbLu+H8dnMe886YVS1mBQaF4KxCV4se/0//g//NYtx857d/643HD4Flcz3s0mZ2NN8Ou/5mXdd1O3OBGCm+/9F791aHo2aNtnl2Xrd0sb4dBp214cX1RTjyxIaV+I6y4ZgKgS7DrPa+bEdyzcHqoeXtMK43l+/V85M44ri5/cZXv7QpLqpcZJkdtiLj1cv1bN50h1XT+O1mdGRFLJehaWqJJaqqCaJU9Yy5y3EXx15G0ZYqtnHoCWmzHaq6DtUi5qGrkciQqOTSzcP1+TnPDtpucX19EzwhmETLRYyhoPhQNYv2a1/96adXz3/nd/7Jg/nBg+N79XLRVr4mAxk2m+H85ff5mt48LNUmXikNfRQ008LovasQnEOnqDFGs5xSUpXL7ebZ5ebx2+8Mu/WDe/PdcL1Y3m/qDs2ylOBDCCHHAZi5a5Gq63495tLUASTjeNt6XnYHiZhEDTJDsDKUfFnARPqKDtjNCGsjZG8GmcAhVqZK3PrK5+y4rUUjNx1Sp6pMJobkEJAIawAyS2YmJgYimhmD83Xd1eOud74pI5aiDDhV/wIKZSByE9LrFbhr0XQ/87Wf/7V/8lsPjttf+Ff+5T/4g/faZmGGN3qBdb0Z872HD59c7TaAl6WctHOm0mD6+uOTD39ys+x4HZOaqYnm4uI45GF+sIplyKWMai7kXCBJPSDnfEMYh1E2cTgMq8UxV07/5Cc/KjzrliuJm7iN58+3m00PLAfsZSvxJvZ5XB4tCKonn2xvrn+8BsHKL4/oq7/01d/457/63/9f/28/eXF188N1Q1ANcPP83e3m3ZAVakpmgQSBKwe5AGJwdRVapzGNVGXgIXBoDgE6a9qcx3Y2i7tLuXwauJLANj/Mgct6tzyZM22DnEPayEZxdhDY18yUxjLuGIV5jlw7YAexUgPY7foX3PrMicertqxcd89k1l+aXvxwdv2sEkt0nSX65sDQJEssqZrPBmNq2Bu0XfXyk/MvfuPrtPQ36wRmJ28/fvHuxyZUpLARACNbFgH2jQsGevTw4CZcRwFDTFFNhFlIzbKhg6yFvIMMZKBZCigAe9NKd3/p/uHJovv0ov/oMiartZATQFfY5UHNh6qp/TbZ7uz25I3q5oPv/+ofvPF3f+EXZiEwUb6J0AoiobFoAdNPn505fPjkbLNY9WUYa3JvPTiavTe+9/EnJ/VJ23QGZsbvf3R2/3hWSi4DnF3sVqt2taoARLPAzjdjayDnZ+daNg0v6sYHUoppSMODo1lw49m29KM9eutLzz59sjw4tHXxfnazuSlFCODw6DgOowqaORAoEj07JiayaHK13S59s931IiWKHszC5+6nFYCREHVaZ9jfGRsAmKKZijjvwLTqWil5fbV77+OzKiyQXZG8ux2O54ebtOUq5JxSzJVTNiP2dQ0o1sxCQ7UaFtGYxtXDw9mye/KjDxtfHx2snp0NPhCYLVenjx6/+Z3v/nOD0DZV3VRFhJDGmK62w7qQC6yWjk9mq1XtA1uBOErwPpZ0c7tZ78arm3VWK0Cay7QabA4TmqsI2d0MQ8ljE+qu9et+BELHbsiJjNQCIGfrg/qj5er68gIsIPJopah16D2QI2mb0A8RGcUwC4bgQ/CgAojB+yEmcyxI0cSJ1pXvd2NXud04hNmBiHJwSKSKwtyXTSQl8E6zSckqCJSKWCme6XixBMYIKL6xtqVZ3RcLwDMOXfAVgRYlwMB+VFWVkguzZyKznllT2nzyybPclxrdNNFyjsZ+m8YCaEaCaAjqnBMxFSByxAiQRZMKEFdiQCCIDKaMXDSKmKqG4AEgZWNCESGopAiqOfQIXEBMQUXJCBDM0/zo8GZ3G3MG5twPis67atY2JWd2zjsHAirqmDbrbQjLx+98+faD9y9eRtWZUU6jBC3rywuNgupuJJYiIFBSwgzVvPMWUq9pK8vFgpC3/VZQXE1d1YDjMF99enXZzaoeSmgrk1yx3aSdb31zPI+7qwD1dBVsb9aUIA9aIrrAWQbRwpVDJN9wO2+wIm5qA7NClYYlIy0peBdW7c3l5uWLTbyJaK4UENRs6hpPhavlcVvNaAjrs7NSwCOdPz13R7k5PVrdv3f57rMOAMZk/fCVh9/6xuNvdVCVMsxc54Ims2SlbZrUZ4qIKtnKvbffDLMFMaGRScklpRTnoakWiyylmdWSy7jZgkFdBWPCunENhxTHzTBcbw5nM1+39axtq2pzeTXE0Vf16mBuVe1mNVAoSTyz3O4gDd3s0eEX3j76+qMRRXN0ObOhCzNGB94BMGGo5wcWqEByaEFz3F56MVVgP+NqJkjEAcmbGiJZSSDGriparFgTW3gRf/0/+n9cvPvJL/7Cz35w8SmZO1098EcArTs6nr04vz6cnX5ref97n744y7uNuS/dO728umm6+aYURuUEUnLtneWsAnXbIgUxB8F7akUyQ2isKerLeoR+gJI9V0m0lBJqRhRQQ8XaV9w4QyOwAfuSpBQjMkL2IYhkYF0dzbvVrFrOnz9/cZluZZyue0xFkBhJSQoR+RDIoAiI2bSWbMgIqCoIZKWQd1RRqWEAbATYcxWq4aaMN2aZqwBCPCSpK05iJpYkIRgS8NR8PqE42Iip6lwIXhRU0AlL0V0uZua9A+fQV+hdLIJb0ZQdKKqWKCyx5CRGpURjKKC53E3Hp/n5q2DOa2PO52rC72w/e5TGnvuz1yT2UbDXYg7caSx3ysWr57mz8txpM58Lee0NTXdunldGps9FyxDujC2vtoeIn8sS7ed3r01Nd04huLMtvcq+vQIe38GzX2tTe6Fpb226U9Agq7LDdt5gHshRP+y0z2DgUMHLx0+efPfPvv8v/uj715+dIzChkyhmwkTOezNDNRBkgIlqvFfL1GBv/UYRAUM1LEXUlIgRsPZU+ykMJsF7BBJTQMhazGxPwyH0yFmmSBQUlelATcRlEUVCMSMAZqap7ghx1QWfd2l31R2s6lk9W4a6hvH2k/uHX3TdwYv1FqR1DlUzonPMagp3Bqi7w4WENAGnEclIwdhU9yiq6diZmIloQguAHgwIQNFA4Q6iTNN5p4nlNwFlpqk1KtjU7gST3GMGaKg4oZdx0gtpygQRTSoVk5vCWXthUBHuPBR7Z4bdIexfy5wAgIT7u6GcEyPf/QqpZQAiZtWp+MNMxPiOZg0morAv5VMmxjszyxRat6mc627Mqk2Y431tGSIiM0wvcPoSUESJhIgBCAkNBExLiY69TsRoN1lChBANSXWKaDkwIMBihSZ/mYkjBAORAmCIOrlzCB2AgQmaEgcVAWRC0Cm9POGfyKOZigooEU7yJcHkPTJiNr07xWIGhmRMZAZaCjEhopnktANAJg+ohM4AVAWQAHBedQ5RGZwjj+jJPCXRbNKi4JSnA0VCklLUlME78kpFVYnYjHIpxAYM7DyoMbqixUwRhZAUjNiVFG2SlgHM0PFEfsjEBDBdd2QK05IGISsYGSNZkWI4SaPA7KYCPESWUmwS7ESIeBKVmDzBq/drkyLsmKbeMcACKGIIQuSQHKgykQqATj4yYmI1c+TVioGxc6pmBloMDESFmYlI1ZgmpoM6plIKGjh2YKAqoOZ9XUSI/XR1EpqJIgET51IQxVSQ+a7rFswEiJDAQP+CVMSgZlBxNQ4ZnBVJt+vP2tlisz7Dpa+bOqURJOeyLQYFsQpL5qAKaglMYrwkG0oaYrwBG3LZmpZcsmoMfEAYvFsoqUjvwLmqLZrL0LM1FXV1ODWQsDiAcqNjymUY9daKE1alYbfbbOImgqqrg68x9SEpkIhQ7dtNCv/W3/i3m/T842c/eedrpy9vxy88elT6XbNqjH1dLwYaN7uxarjf3nCFX3nzzevnL9oGZovlcHG4Wvqbi6tZ27kqQy256OX51nkHY2xcc35z5paH27QN7Wy5OBxSKcaZYJDhkxfvPTJcLR53rv6tX/2/ffUrP/3g3pdWh0f+AF+eXc9XXHIxhqhczevKuxRzqElAxHSPtReJw1ZtWzvPrtTtDCiv4zj2vXPBGK6vLzbj1geHRavKqQ6aCkYKgXbD+jZv1GBcJ7XM2FZVl7VXLWmXQJGdO+hWf/ev/7tPn/zo8ub8o3f/9OHhyaLt8m5YLpcp3mo/ao62vdXoeWs5c93MmUNM0bFDwqwipmMfiUHULnbDnzx5FmZfDRmHq42vq91mh4KLecfERZPEojl7X3kOFKpGZqJFRikZbsfLQ9Sj4+Pno6qrADTpzjMjqYh21T3CTjQUQ1QihJwGJXbOm5EaIHlHLYHHaoneAzSOEFCKTTTrgFipIlkw6EWzoQIxIiJQcEErAZuRzkbonUNmYmTHFRo4X01eyLueDjiZVS8+/t7f+3v/yub2w6alr3z5/nd+93vf/MbPQIZ4g0Pfv3j5gmrKY7Z589M//7OffvT0j//0e/dWh2XY1K7b0Gy9uTxo24mq184rDBqHYRMjhqaZdSGB3mx18MN6U2KiamYZL56v52Toy+ZiXQJhbQ/fPF0ES7fXbFxi/tZX3vr446tlc7hYZjIhcxvb/ZVf+Vu/+o+/d/PZddro+Yd//s0j/x//L/8n7vLh7LyasWuAbPSAhh57LcQMVrBEKUz1vdOvfdO1+d3v/V6pl9c65/r06I3Tm4snjJlrXUt0rgNUdKVq2lR1H4/xeD5rFxCfv1xUqb+9MbZ2+VgXDwU0rl9A21XtDKCgqyQWGfPcl3F3YXodqjZUBw2Qdyoay+4KwB6p1C9+sCjIGAwRuMrIZmk77No6qGgxOT5Ynb1/fvzGPXp8WC1rbqiL3hilbpxBLuadyzGioalUdZNyGbIQerRqNTt9dvmiL9EjkGdRI2JBYQZmIAdWAI3IMYNLWcnRYbf46Pmz9c793M9948N/+gGwZ0ZQmrq/G+dsG3OKgtDWle12c1f/1nf+P1cXH/2P/q1/8Gd/fv708vl/869/Mw2FdIpS27JbeSQftoGXy9NVGpJn+sajw9b5PuZZ69+6Nz+/vnl8/7BpGQyV9eh4XlekmqFkUHp08PCNQ+rqIsK1P0xpGDe7JjSzqlnvbn0c8xCJrGvD4Wx20VQQtFnSxfqTWdV4rpDcMGoaEkCpF3U7C7vdNmBdYaUmCEpZl83qxc1TMMtjZOo+d1tEeZrjqokIOTcxPVFpDykAQZS681NsVFQur2/quhaU4HjRHW76666px5QqXxlACBzjEKK7//Dk+vxiXi+HkiXmzvuYZDGrAMowjl11nCwjWA1YUnrr4cNnl2fJODQVBg7BOceth88+uri5XudUGm8PD+YHi9msbra70QQVaNuPV5vt1eV1yqpmzhOZOsbQtCllNROzkhUkO6IidHVzU1eBid20BMTMzqlqysW7OmtuZ42vvCoBIwg5wDGnsZSurVFZnAgSqqhOXEtynsaYY8kGYmYOnSML3hEjEtRtlWIOwVWNH0RVjRF2aTNwb+ycoYkoomkBdOxAlY66Q0Bex7FezJu6UVFTZPILX82ZWKSYSgFiHnLKVlQBQaWIFbWyRcJnHzy5eXFbi+uaOjgIrQfAlIuIGrtAjaQeiVUzWgZgJM6aCJ0nnzSZYREwc45cn28qbBwH76phGA1QNGcwNY9EDoOpgZJ3Xc63qpG8RwTkidqgpM1qcXhxdY2ohNYPuzWzFa1DENBsCUkNwISqutpu1ij29Te/ePXycjNEYJj7RlBEkAXRyIUwjOvFfO4Ebi/6dtltt+MytM1B24MkJpw1Qyxm1C2Wo2SY82x+UnKOuyJAvqm2V1eLZTf0CZFDN/etuxNMKUohz1B0l3p0Co3nygGIDy4QskCFPhXQEfwwzp07Opw/enRSHR9+9tnL7fn7N33RAVXMNYCOsDAmkq3e2paiCSZAdQzj7e6kbel8SJoW6NsQ6uDmGH72p37x7ftvyWbjcA7HIpj6YZ2LzI/j0WmPWxmHvHhjdXT/fkq4PD1I693Zk+dV4Pv3T6GuFIBKzLvd7vomVK6dz5cnqwjGdZeG67pEV67zdZ4vT5pl1yzn480NYXVwsDx4dMqe+kLVvJMii0XNUbY7olDd/0tvw6rRYJzVgyuW0Zh9AEVCZq5804A3s4ga2YqMvUevw86E3bzDpiPv98YG0JzKRDIVkdKXECt40v+z/+Q/l11++PXH13K2OAmGC7cIVuOQU9Dw4Hgxbs/T9dVXVqv+fHhxG986ma0Ojm5342Fdr/vS76zMccIjBWQqLVOrlgmINSP7AcqQ2SwErs2Lc+RD7dEVZkMyzaQQOBCAKTAZQq6MQAa0gmDsWE1r710NpwezatbxzO/mbuNsVGOc1vmNwDwxikw9kgbqmEFynm7oRQRBYlZBZAZQp8yuymXUArSLw2W+fZZvn8QUXUnFeawqBgBiy2XSO6CqAqgW0HrVNG1tok3rA0LaJUIMPkjEFMfJCFqFylfVxO1gFYmZipY4iiZPTlVMxDP3YyymOUUAejVJfiXQTIvVr6NXr4SVCcd/V1i296zsS+vttf6C+9ATfs6Z9Grjex8Svl7I/5yD6c5Pgp/78Suq9D7r9jpHdjfXB9h3VL2yPNErDer1N+1Ontq3i+2fCV5rBWhgeFeYBq8icvuXs0feeCTQomqSxjjEXDJ7v9nt/vQ7/+IHf/5nH372WT9EFSJAMbUi+31ThX1dPcBU+GUTrMfwrjJuKvUiRMeci6qB8w4BycwBOkIzUFVmEhE2JEImBDAgNgBELiXzJGxMRoa7M6iqjmhfC7e314Fj7hq/qr2fSnIDm+k4REZft02JN12oGshjHsGcY0IueyS2TaQZuxsPe9loz4GaWtEm/YgIgfZqzXTk92anu7ExrerAK1lwmrAq2utzs7f/TGDsfR6McEI470/9nYY6WXL24+jOfzS96FcZxv0/0/mcBubnBvjEUxNFJOeCTiQhU0SdNLE9JXyvM5HuLU1iBvuEGYCqILKaAiiAGdx9IDp/N/gBVAFossYoAKgCmmM/LWbvqRFEU/EZEZnKXtdkp2BEtHfETd+f0M5gCJMOpMX2/fEAQEB7PA27PbRpOn4mqmogiPsaOwPNpSCac16RYf8qABlBCQBEbc/2nrQinLrVZH8zOZ0+NcP9BW6mE8EHgchxyQnd3hwEoqVkJqxd2CZT46qpkW5EBjMlcghcSkZERmRmIAQRNRUpMHWZGYCpc95A0YCJi02VgEZ3JW4EoFom2pWqMTMxZikIJjpZuhDQqQogMDmVTC6oliKZkYgQ9plKVM2KEzke4RVujXBKfaJObZlY8kDsFWA/8InUiqLt+VYqe2KUTsw2YyI2EBUgBwBiYiqmSs7v5U/mkkYEmIhLSDCJlWZQRIqKY6da1IiYAXV/yvakJAZAJGcmxkhMaoCTzW3acxNiNkAt5VVhwP7GKOWIGuJubNxCVZh9FbzkTRrPcwha+5jZNI7j7eS5wpmEqstJAbKkTepfWr7O/W2RXjWqFg5BsyEDoxMiE5AcQjgKnpUQ89pBcm6xnL1VdfcMZZQkI4vG7XDLlMEupFyLWFRRHQk11D71g8Xo2ZViznfrDf6bf/vvf+2tx7t0dnrv9OGbD323vbk5G3fDyb03urp++dnHJcfV/F7qd7nPaqyci/Yz9Gnzsql1u7tEs4PZ4unZZUL15G83Qzvr0vp6OYdl04rkEfLV+KRUyYcHy/v3Xjy7NC6+aq+un89DS6sHX/rFv/ajP/zuj/7oz+PQf/uXf/ne/XfeOD7c5GSNrNcDGqlKW3dV2+7WNwG1xLHynCOWIsM4Vq1DpiGPwQUXqnnlVaSaHSwBNuvbytcqMQ5JVB2hRjXgXeodN4j76sOsyXIRKRN0EIFKGmLMJbjlyf2jN9958+vfunzy2eXZi+efPU/yyafPrrmpb3Ypg2UzZL+Yzbq2KVCaUGeVlNJ2GHJJCObIccA45vfPLi9u/+Rf+6Vvr6qOoRja1fV1CPWscoiWJJdYDCuhZJrrboFa2SjbfnAVy+YWnn3/ZPXN5N8GF3ZRhVAKOqqr6kCsJvSAFvtbtU2Jt9O7rw8LdnUWAXKVnymagQN1zCQ2EjFxjeAJvEkBQ1BEM+ZgCKhJrQAwe19Bq2mheYtYmHygmefGEJE9aFRQ5/dXwYtnL+bOeboBLfFGTg8P/nt//99+8iS/+dU3z977Tka4dzx/9vLl1792urt68Sd/+Ie73h0fv1V2N4eNnJxWF7r47OmWCa6yIOhuF09ns3k3KxEE2VntQ+h1xFE6pg1a3TUMHIfy6Av3F03aZKu6FQzxaz/18E+/+0eU+OymmJfmdn103K6vNvP7IZD/7OOb1ZL/0sOv/sN3/0kYuU+jlHjUUb5Eein3qmWLbry+ZFeFxXzdXzfMSa2uqYzR2gP/5k9/fPkkPftQFRJVzdEjzZxuLsJwhSXP54yzeVa3bJZxLSneYDscH9+TcVMFnt87GK4/BHKi5mSUi/fMjTjukj3E6kRAPbGRuYqIdlnOsGRfH/QFFofvJLUsIffXjw9u8JN3F6O2oUWiVAo3FVY4DNvgtZ13xVXONYOwq49C2xZW3zlRcQiOUUM1a09uy04Q1aEHBs1crDJQMwR68f6PTvLpQWDqwno7iGTnUFVcg81pp7tUdiOhEjEQmSh7J2AR8OD40Yuriz/60bN7x+3T296oIqsNWGKuM9WNi6KCIzBtruKt7b7xzS9eXJ39v37/n/3x9z760r1H4+6bzbR+wI4xtQv4L37vd/+Nv/IvZetLNlMcx/Frb88R8eomHh7WL653R/NORB3Rbitd8OpFVVKMvmpqH/4X/7P/1cHJLOVrY1e37TqeBRciCLqxmdkbD05++OOPywDetH9+kbdDzUCmR8uuaeY3l1vneRi2bV0XJfYkGguaB0l5BDTGqvVNKsWYncfDg2UuGV7/wX1pBQDt7y0mLAG86rawrBrN1d6shJpODua+cs+eXzbzmfOad2WzHp13UabVM8vJbdZSQE01SWxDSGbe+W28OTxYXr+4AAhIKikBYlbd7Ppv/MwXf/juezFa13Dst3y8MtPnz6+uL/rtbXZK9w+7BycrRcgiDpkcjsN4sR4uN32M4pAduZxKYJxXAYkDAzP1ucRUEAA99VkKZDXsGpIsXDnQackNBY3JxTjUTXN6dHx2fklcWdFXZa2pTCuUaEXZtAlVIFIpGTBnYSLnnBo49G1NQ4ykbj7vDLCqayBIUoQQUI0oFklFHRSLGRSorr3zOY1Zx6PDQ0DNjKFdGroJf8nArfcz58NUdUGszormMfUp58pVYOgZcxpUdTOML19cN6VZNr5rvFkKTte7XsEyKVOTSmQgBApUZSvAxC6AIgMgGCO0dbtNPbvakB0vwICIYxbvG5GigoG9IOYSPXlVSJrRSKkYIk/dkwYMkLM4ckeHb6Ss1zcbL5aQ1siVb0KoCKxPfVvNikpWBUEv1OOwqsPp/ePQVc9enu2GgRp3ML+3lfVm18+8LE/nkIqv64PmWD2hcvIautY1HDewXBxsz25ReJx7aKteZFQoDOGgQUQ3FrxUkBKCc76OTT2WOF0AQy7mKG/7LMoV1/O6OCdMRI5M8zAics5bU98hdVQddeGgC8fVDEt7I7L0m2tK6PPyFA7f8KGG61u5uSn9DmJBVPEdAenq0M1cOOjmGboaKxcYd+moXvzsL/zl43tvUGWz5rByYfPyktifPHq46ccDLzOV9//og9Wj2b2vvZWSLeoOwfKYFg9O6pPjdrUsWnY3t1UVSj96X88OZtXhoZ+1jLhb9113tH3xVPpy8uCN5uSkajyQJOdWByfdwTIsOmAMPCcQbiyALyR87GaPj6DzAhnGTWB2zD5U5IIhK5jzlW8XSoikmHOwnLdXmKOmAoDczKFbgK/N0MQQVVMCMVc3ue91TE0fyqe3v/uf/pe7y93Jl99c542aLLrOQDQUI8wZGB2gA2hm3eq4I6rwe58+eXndfvWd+/nqDFSPZmGbR4K5ogJ7QMrjgF5QM4qqyG4/48G6CmqYM2cT8MCdb7oGqmosguDIgvUJTYOnnAYD33Ibd7sYBwZWFSEjcNtk3oXN7dC17ax1NpRhMEBkdDo5IQiRXNGsWggUTQMRMWkRNBAxU2VHLAZDlhe5aNaZT72O12V9gZudNLPGkIqojphT2a/PAylgNvPejo/C/DiwY+SG2XmzXLnSGxS3HbIBMnJhLuQAwCPIMJQ8EJgDSGWwnI04ZzElZZJYwBEj5VJefQ7gnTh0lw3bCzR3+bBX2TR7lfWyV5LK3kIC8Fqv+Yu/+HnzxquYGLxqU8M9qHcPcHltJUL8/AP3IbFJqHoVHHoNlXn9LHcIGNp3usFdIO4VGgb2GtXePmT2utvt7lDsZYFJvTJVRlBRM9lsy0FToa+unz3/1V/99T/5wz8crrc5qSICsqnJFGUioL159y7OoncKlYGATrkhuHvxUy5JVItI8N4xl1Iqx8GRKhCR98xE028y0zTDn2a0WcU7ty/kUpkOiRoqKBICKJgRADpngI64rcOjk6OTeVPGMZWSYlrOZ4I0FB3Xo2qs58t7bfXR1QX604IUDGECPAMiTCtJuldl9rmtO33GAAAIiYjv0j2IdwIR3aF/phOH9CppNh2VV/YwfTVocCLLTyoHAQHsb1HuWEWwj95M4fnPqVfTaLnT/l7936tzejeG9fXdkO1VyWnnCZ1ImnJhqjbphsR7BYvQTQg/ZkZQM0EgMFSxKYVnUKb1aWRnOg1rgclCckclRwA1nUrdp5VvVZ0ccYivCvhIJRNPRh0FNTBV0alHHkyn1ykqAMTkVQtOoTXdP2rqjEdkE1EjZgIQRCN0IgIMxGyT/g1WsoIpECCRWSHkyfclUyPy3kxI+xIuRCK684AJIBKhFjEhNWHn0QhNpSiC0yzIiIRGyIzBU83csSvF6rYiT+gcEIpEBxURETqwUkpkF5A9gEoRInLEE3iKiHIRNUFHagBTOZ0ZEZUiYOCIDUlNiVnNVAsCTgk+RKeSkfYRSFUgcirFACa+kqkSk5RkNrXyAdBerAQVUwEz06L7rjcFNeIAAEwkogioRRAJ5E7vNjBVA2B2dz5HAaS9Lw8MVIkY2CFQLtE5pyXTvgVPpzHo2BlM48W88xMfCicXOVgpEdERuclpyQQpRyJvRZHYIavJpGbu8egT81tfC6b7SfK2T7O2a1czkZN2fjoMt0XW/XjLN08XXa25Sxqcd2PMwZMhSBqVawQ/jn0aB80JTYpmICKuar8oigq54DDorYBJCF31JkmqXNjlrWCZTHDOtZ58Ak/sQweGMiuQy3ron9etM/PsWp1WRgxmbbNOURNhngkd/JVv/PJf/fqXP/jwh21Ly8PjP//R+1236NrucLlCHwTFMZfRLdv5Td7FFAO3UHluDmJ/sz1/Vmu6vbpmqmfLw4urJ1A5x1T5Jhclx1bSbH7w7PoJV5h97fOspFuYVavFar3pm245Xg03F5919ypa1kePlxefbrdj/1v//Dfy7h8TwPLo4Fs/800N8/tvvT1KGUsuOy19pEBWTDFLKS5UnavJIzAH3/SbdVU33jMzDTeXaOwdpRjzEFXEd8GHardZK2I3P7xZXyM6JETIqooFTGnyoVWhAjByKDmLmlmSLMuDw9OHD7/89W9dXpwdPzz7+PnLZ9uzTUmiRbXcDtfPrs/nTbh/dKhAztWkua18Ni25+EDzxvtCZ+vtb//o3b/9ra/NwDzAvGtvrq5y23lXnAPnSErmZsYIedh5XzsOi2WbTZBcf/6s07qrmqt8IG6hRUEdExUp7CXHSCQEuyIRwQzULIn0mhDZG6YsmbhyriJXAwmZZwIAnMo1lUTLoDogGoFXDEBTuDZ4ggLkeEvO5bT1GtXGVGbEgcg5Qgdstp8kL09XDVVPP+mhO/3lX/nWr/3673/z7WNM6//oH//Dv/bT3zi/Ot9e86KuK8rYdNshxbRbHVQm7qtf+rk66NkfP33r+Hidhk+e38wXs7rhlFQlNsEPUfrNRhVykdjHpq0O5st56693mzCfadpe72Thu6P58uXNkx/++XdnJ928mX/6gxcFq/NdmXfVZjOUJ7vK+eWsWh7I/+7/8H/O4s2EjEtZ/tTjbz19cb6sg16PMatTQ819f+0IK2BMkbMRzpZv/ezvr18I3rKvtPNh/ijWj+crylcfuK7Ju9326hnNH4jIKGOO2Rzq0FM1hBx96ddXz2W4MXHN4QORRDgOKdarQ6FDokpSLJhQjc1K6ckzURvmb5i6Ud1WnHeHp3YNP/r9+3nmXEhqMY0ZgQwxZ4FSiISqXrA+mlkXhBnqarg8z6s2tHXV4rgbNOd2UV1d3USswdUgbFYkixK6qoKCy7or14NQ4iLLWeWaJiz80MuuL7FPwTkKlQ47AARCieocOqJhl17GPK8WL55sm0U4WjU3Qv3V6NA5wFLKGOMIsYceENA59dV7T67aan4sl2/+1GF/FX/zvU/+tW++nUvZrnfvP//0G1/+6je++M2bzbZtGQh98KKmkJj94cqjSQh+uv0oYn/+2cWXTo+PjpyW7H3N5P/9//n/fnl/tcvXtejucpf77exgce/Bw3d//JOurrxvn/XDWghC9ej0dLx+WVXtcrXcbTdd016tb51D77Gbrba7XTYAExVfQdt67lMyEUCVCNdDYQ4FIqC5/UfBHm3gPJuimN4tyE03fwAwfYRx1dRXLzawBUnqQ3tzcckDOWhU7OB0hla6kgvAGCUPIzOQuabqNmlAH9SgiJlBzmne1YvKPd9mV7fNwQHINhZRtEfvPE6WIejsuKscc1ErQ5bqettf70ZAevPh8vhooYC5mEPIYqkfd/14cbHexqECnDeNV5AkpAACjth7RqZSlBwj71dy10MsptlKHfyibUgVTNjvOdCOg5q1TQdykaOgKoBWVRMHkSksTjgLFaRUMZrkkoiDI0IDq6u2H/piRUVIAdnqqs6bGHe5O2wGKRpchBjTVrVwQFOZ7gNzFITYep1Xzbxrb253TVga++DqpgoAPA++Q3VWSgEkz0yxjKIpSwFQ1KJFdmNC0DzkH7/77grree1njU8p1ZXrey2JgK2unJozRc8h2SA2IpEBqAIZMblBeyIacg+k2e5W2MHQBJGMA2IAsCKZvAauWZ1gYSK1AUk8hSw7VXRUgaF3HlRR+d7BG7F/P5aC3vXjbjMEIgve+eDF8pRtVytG1TAWXA+1qxazDvR4vCra8qgW0dqTJbE1lZNRGGD+YLFOpen8ZrMj7Y+awwo6IeseHvbbdCtltVjRZuSxoCGDMYCiHj148OLqvK7CkJVDtb3dTlcBGXBA8ixA6J2wE2bIBQFQihXxbEGprQMhVF11cO+4rgmxTREAKsMl0/P7x+Xnfn759jdXs/vhJx9c/fEflk8/YyuYTRzZbB5WB1XIzko4evDV2nVfuv+Id3HZNCePHxlRVTkin438alHVdc/kuOsg3nz4SdsuDt+4lyH4tqrqcPbZUyY+euut4j1VnK+3AckxCmJzcDg7OcV5l0vJw1hXdev5skC9OFmc3KsOl5K3u5u1mi3v3w+zuZg5HxAcGXqypqqeXz5d3b9XncySqS/mcG4iJaUQWhcq0eSCp6o1z1aSSWTNEgcoAkJS1DVzmh1A2yridLOuJZsCuNDvku6kGpBeDL/7D/8zybZ4azXA6AI6avNk8idnqvNZpQqaig+1kMRxe9C2X3701pPPntez6s37x3K90X5be7+Nu1gjGSM5ZkNCIp+Bhrwb0yiq/Xqo0uDqYElUAIN3s7YgMqMjD+jZAk3YU8+Yg7oK+h76GELtcY9y65NUBjCM3LAJG5BI2SNMwdBMS7YJngrgOZgpERZVUAGwXAp7B0xEVFJmoHyDPftrT8QgidFVy3vzdB1rh/1mJOUq+BjHipiRiqAQuoX/wteW9+7NDg4XY4E4wuZiszbY9CIKQxwVwAXv6oYrb86ZqTMDk5JTXySOowHtJEspBtx4C96PkiTlV603nxeH9v+Z4smIr1Jad8zrCQw8IXB0yn5NkZXX0a79LP/zMs+dkvBajblLh+2ffnpG2+s8d/oNvDIT3e3hf23DdmcRQfi8ngV3zqD9V68QPp8zQL3KtN3tz+ce9MrORPs0lO1dBKQlxcuzszMYf/L+u7/5m795eb1BUTQADzbFem0ya+3zbpOOwkQqwkRg03deaVKoJgjgnDNVUUO0pnJgQKoe0TMF50RlUpEQlRkdESISU5EyvT5G2CNuwNC5KSPjCNUQcZ+FQiRgJiDv+M37x4u2porn7bKMcnN145mXx4cGRswMUNa3zocDtpebtW8Xoea7Q/Na6dNJpNh/lwAm18pec7G7mwncjyIi8khMxHviEXx+xNA0HCbLy51qs7d02f6kq4pO6t+UukK8q/Ga7FMmYDxlae4sZa/O8p1qBMZEd59vd+f8cwMUp9aqPaK4AIKYoCESTUB3A2DiUophNgNEUjMilCLARMwqAgiEJIaisj8iNCXu0ABkqjKcCEdwl6ODqYHHzITYKyghIaJIQeSpqAsRtdjkYt5vdmplI0RjVd3n2tRUy3TU9e4aUhUimsam2eS6ntQ32h89KYgExNMS2jSuiNimllxg2xtp0LSYqSIjk6kqmJmaCjFPVyQxmRkh3wW1CjGZyKQTihhOLw+cA0Ylz24YdnXgXmAoeV7VMtGbnSGiKk3ndgr2qQoxKJhIcUQT09gICQgBZCKSG+31bQAk1DS1trFJ8Z6LFEAzBDVAEOecFhFVZCImy8Vo0t0M9nQjACIzVSnADpEASdUcUS6jr1oizlmR91B2s8nMN2ltRkSyj7i6nKNzFdwx1gEnzxcRk0jxoco5GiiBm6hSiMDs1RRsMs2ZgZoqwERQKkSOnVeDIuqZTQQQkBHNwLIqEHsDcM6llJCEiIgJQJEZBbUUYsSpBw0AXrOKKKRYEN3J6SPEuXdLoFHKdcrj1cWnDD4094mbWX2MBL7ypoCKOe3SuEWJcRwJknClCsEvjL3m3nBUTSI7JgowOntShSaWmuoayXtrMAnoBvEohCbGsVgtVnu/AI3YdFmGGPuSB1D2WCH5bT/0OdTd6uaz9V/+mZ//d/7uf+OzJ+/du3+wbMOQYb6Yty2nnGzIGKMiUE7Hy9Unz3/Szru6mdehjhJd4BKVmNi51s+SQhzEo2Pnxj6mmMmbd6XPxsMStNE0usCWrQ0h3gxUzb07Xq9HZtYxXr98ujp5aPXhRxffu3+vefJ0jW1IKX742fPvPXuO0R6eHj68//D04cNHb395TiHGsjxYxGGjYlAyGBRyIIqqHkRSD4VdCCiYco69iCozsefNdky+QKFSoutOS74AVSRoutYk5xQBAExlyiagFCkmAEaSRfJoiCKjIoeu+fLXv/zg8aNvf1ufvHj+4YcfPT+/HEou4i43+Xp71jHOFnPv61iKZlGFUUpJqWmqVdc9fXnxu9//4F//K9/O/a3FUlft04vrZdcczILlDAza9xzqig1zSqbIXlUBymx2nK5fVFwdHHxlU9fRWBF9IMibFM8A87Sg56smk6W4ExGk4r2p9pp7VWDXsyMBYKoRApMBkYiKZrTMUJhMJAGaWjIwR55QVQoDEXt2wcSbDZpvmWdsJjkXBNUitl9J9t3B3/o7f2fz9Ox7P3j+X/y//yhK+We/9l85HP7H/87PP37wpe/92fIP/vA75mXodFWTDbvTA/c3/8aXbm/G//wf/cFouWybQ+oC28m9ezfrDJBiHJuKikloQs6ae615jm3wla43V1k345i/+M4XYbz98ZPPAGqwLVN12DRdVzcz98aD+WU/liJJJYIc1Qf9OrXLAyrbL3958fTyyc5g4bubm/j9f/qT5dgRsMaMhmAW82iBKLPmWAuNVqTz7738cXRGHti3yIFcGG6vKqxgvEbSMqpYM6/ut3V3++mP7j360vrihZa8bLoCmxI35lsXVuQX2K7s9gdAW6znsLpng0NJDgARXAhlcz2W6xT7+ezI1a0qEVMV9cCtu/P3T8bWFRtyslntmiaXKBrHXTIG55pdssjMjh68fXiVEgXsugYFmAN2VT8mCjZbuXZhYxRFLJQziHeUzQik9uQIREsIXkZRnwR5vV231Qxukg3a80CND8ezuIlVIG9csrJDR1XMGpKZVqG49cUGDmaZcjS0pgbhhMQUZgDO2GPY5bJdbxOnD370/WXbHTb3f+8Pv/O3v/UOOvSB3nrznR+8+3Ij8mMd/41vf73fjYZgYD44MKE9PFBLwapyJeev3D9wQDlFFQ21/+gn48sPz06WoWgCGOeetre73uwFPK+arp3Pdtv1s5fX05rDy/PPjuaEUTbb2902k1S1C3XthmEsuwQFq8594dHp2acfrsJKQJ1nBa3bboi3dTXvhzHGXdFy2O4DaExUVE1puo8wESS3v+FGMENDUhEDcFUgwiLy7MXN0eHBi+eXzsL5zbaduWLS1DBm3Wk+PjzeDlde3bjZatBuMR9yJGIwRtD5PADIZn3tTVH7VEpRQ9WqqS3g5W7bLZYHTc1SwNLt1WfPnj0vaCdHq4dvPvCeSYFVCCiO8ezsJuYy9DsTYcdoICI+OB8CVSRmJWUz8N4DWtFiCqbqPWWAi91YJx1hc7ScgWnOuWrcMMa68qpah7aeza77QcBAdJfSNA0posmsmBIzIrFnQnTOpTQAUhJh77djbGtPzENJMykETsSFdqWNFxJgs5QcooIJaJTUhDaAMWjteFE3m34Xuhn6ip0ncoIcQrXq2roIAoghABfJkrOJBgtAyA4lbZUVK372wdN6yEeHizoQYqkr34WD8+2NqKAlRgGIJqLsAVy25MkBAiqWMpKvvEMCp4ZElRmKZu9ZSyIgDmHMmX0r1kw3lsgQ45aBmBskUC2oxugd16ZaNAMxI+dcHFUPHn7h42cfAntTuNpukKjTWhmzSgjeIca4g4CBQkkpA0sfOfDRvdNqhuurnfhwuOzi5ma4HoYb40A6k1KhXzShFBGBom3dFCuKTohYNW7UIjF4Ilcku4Zt8hBhURVuZ5i0qvasIhwtbaOlHMgBabxco2OnaEXNex/abNG8oSbXdmdoWOLDdr5ctTJuZ6ddPaOTt1dfehu+9O1VdbqEUB4+ttvLdHnV96POF3UTGEp2A827xeHJ42/80t964/R4Gery8tw2W2TyIXRNnccEgV0zUw5tVUO09Ozj4ToePnx89OhwJ0hKF58+qZtZe3DsmyZpHzc3NTkEffn0U1d33elpWB1YYNmsV/NGt/3506dQXHf/fntyuBm3NRIBzVbHrp2pD1XTmahj8+QI9Xq7PXr7C/VRuyuZrHiDwF4V1TkiFgX0je9WQgw6kEbSKMMW00gF41YMWloecTUHJQNgZomZXEAO/ZBsl9tNHj/87A9+9bfLNs3fvN87IIddMyuixoaGCgiSx93aJvEh9ihCxmXURd0czA4/fXFzcLBcHBzOU7nabOf5AOuql2ysxTF6NElFYyqbErf97Tqu47gdZxWpJ3beqsq6GoE0FYsDAirXAE7RCygE9lZjKqFuUq85Jy1S1IZoerFdSqbEebsrgIUcOiuipuYQRQHYmZhnJmaTpGDsncrUX8MKVlQN2bcEyjLi+VMde2hWuFz4shMp3O8GMCRm01IkAZBOiFkCQDLElJA5zLpZCy7PPEtdYry62cQ4qCOQQiTKYEiIHMfooeSU8liSWgaf8lSEiohY0DwraQmVQ/cqhvk5C8/UQLWfxk/TfCMgA92Tg/YGmH2I6/MzfnhlJHrlC3r94zuJ4bUkBPhKMwIzuGvFwmkOt3+WaZN2t5t3s/q7mNg+i7J/rn1O7XX6zeBzewD2apuvdwBeyzqv6MX4eRED7/IsZgqE5KvZ4dGwvu1H2lxtIBcwUNFXtaEOCXCfExFVRtx7AvY+l2mLALY3Gk22IFAroqJKhORoqiqvvQueRQQBiIgYK89ayjSJFZuo53tbASCaipgCmGNGQCAsAoST0ucACJAqz48fnKzmbYiyHbAAAQAASURBVPChbgMo1hW54Deb266tq5kTsOAdUybSR92yhqOnyYygGBMigcLeJAUEqKhgk+KnrxJ8Zjr9vQNLwV7pACPc03imP/twn8Ee5UN7yWnS03A/J4ZJujDVfUBw73zbz+LNRMWARDErTHIIqqlOTh4V28eg9mN8r269GvCfG1X7vdKi8toxiIZ6d/mIiJkI2b4+646vDhP2+NXYVBXTSUS+i60pE97pn2iEaKRWcDJqIZgJABKiEk3lhtMDmTwAqWYTETCeDErEqnsz0T5wSs5AwaTICAg8iRTgzExE8ZXSaQL74cxAhrhvSb7L8xmYEqIxAUzpeAQERgIgVIWJvQRA7EQm0jabKSKQc1MmTUtBBCImoiJJTNDMQAGNJz1H1QgQrfLeM5uWumt2l71nN0RhV6GRQy6ooIqgKAYMNL01GBGTqiGAI4cK0w5oycxuOhpT8IrJAVjJyTnH7MlwIkOpqptIQGrBVUUyMSCpGZhJiiO7yvapOQYDJhYVKZmJmL2oAZhzDlDRjMhPyThGNFWVAoAUPAGb7ovbEME7VClg5pzfvwHuU4E8iaTT50XOEQkJ3B7iNRXkEZjssWXFRBXAjJkVFAHR2ERUgZiQcKquU5nq7NhUCdhAS85EbDbhorKZiggYeO/3PH2ivyAVSRbPGmpPhL454DKiQT/ibtzdys6HakHOeS8FmAA9gtqw26Y0ECbCsa4gZ6t957nRTIBUNXOjtsCsaDYwsR2HRtVrTiqqaYSSBdLN7qNRd+i6XBJSAB2YCvoQk0sF1v1uHIcKDsBQdFuUhzXsrnb33Okvfe1rz56918w5KV5c7LIg1Lhyh+vLSw82P5qNCLFX2e2WB6vUD3WgXHbXm5squFrl6Li9fHGuHu8vV+9/8n41rwytmVWb/toBL9pwdXmLsDicnW7yRS/x+vzT+dtvY5mlbfTEuYxhubQimpPcXLz58PFHb3zlvQ+/3yczTLs0RgYC0srWtxc/fP6k/QGcrmaVDx89uzw4XX3p4aO/9iv/auxHk9EyKLARm2UOYVgPSC7l1B0ddT4M0UqBIuKqaoxZU/aVP7t9rgbesYANKUERVXDBScnIKCoCEFPyHNq26vttMfUuEFMaUxpHiwMWaZi/cHr85uGqz+N773384bOzmzGvx1TItte7OmQkqhnbbpZidMGlVEAVs73/9NPvvTv/2a8/xhi9s+Wy2Q1juo6LyjvR9e5iOT+u520zm4+jcFVZKSYl9WASNs/erfPY1bbTh1Yg5w3quN08810XmofOHztX4XhLGFSyFlVXiFQJpAwas7Eiz5iOvK/VZCotJDA1EUkAqqbACooMnrliaoRyJnDNgkoLcZ2HrenLmrz6RtGTq8EshP1ngs/8/p++/8GfvfvkZnt7O7z95uJv/tVvf/TRe0cHKxlvfud3/8vZ8VE7n93s+jj6lAiS/2e/8957n55nvzr29DO/+O3f+Z1/8tWvPJzNF9+5fVo3QSURKhGQSdruTlYPZcDr6/XRlx6th544zA/54vLTvFkLAdW2zTf//t/7N3/zt/75MORPz18Og2u9LybjuP3mzzz+5P2LXnzduz/5nQ+/9ctfmC1HxNK/PxzYapYDZ9lsrjtfIaCYsmIphiI8CddN1bdu6yzMD9AQ8hC6Q+MZxTKO/XJ5n6mNsKrbxRDHsr4IJmNPiFXtHG62AcbdzRXUX6DDN52hS2dxc+Xrujp8XGQVvEreEhgbUS4OJSs2/qgJ8z72EBqQsipp9fyT2a0gOgi1YMns5k0XN9EjJrVmPh8zjyDN8bI6aJTKrGYGDUx5LKEgBgqhRkmJbLFqb15uyTwhBx8M1DlGB9NgqbiaNe219UacouXRikgbqiTFirpgs8ZhzCbZQH1VGWXlEkfFsRBaUhPHlGy1Wo5D2qCIpsNqZkUbYE57J2rJ2tb24t33T7795ag3pw/fWve34ya98eDer//2H91fLv6lb771x++fD+PovFMrzk23bsQODHHWelW4uB3mlV+s6mefnd9v5qo4XKb/y//1/3d6eJ/x7Ob6vGlhMW9v+ltnXRpBAT/64KOudrPQXG9HkPjw7ab0u8DUBu+6IFmJiQx4ci47ani8OHvGyjkXUDTLjpmQnac8jszkgvPgm6qZrgJVJXZ4h5aYPh33GXAAm6qbAQEwVFVVI2pedbVr3LbC4LDFxbaPlrKrfCCc1ZXSiM6RMABr3hiFrl3kfgTAIY/Hs9nZ5ZrIty3MF+3TT9fOwjik2Ww1jElVT+8dt46hFC2l9JpHOV3O33n7gXekU8Mu6LDbXV3e5pKGnAC1ctTWgcC8c+qAiSZrGChUbrJ3ITuPRAoFRUANTXIahq2sNc+6DowcEfO0QIp13bB3UgqhMqGpeMfBOciy7gdIuXXOo4XKmZnkQoCGJiKq0rUdkjmHVKAJToFqwliBoAkaxOyBTSTJaI4wOAGxArWbPzz40rPNJ+prTyFUta+akq31ddvWWBTUtEgVGpEc49iG5ZDXFcOYeiBCUBONL/v+cvfo6F7jzVDYu4BhmzZZMhk714iOntA8IQgUC+Q9c8oDIrf1LOaBCSrnxyiIrkg0zcaeoFIAVHQczABNHWqW6F1ApMBtLIXJIbDDkHMxNAY2VCDHgKQO0MjVR/PDs81GEWrvh37nCXxxasx7pKkpFWUYY7RGl6uWEedR83WppBZvWnS3WR80J0MZRKl27U63kkpdtbJN28vt0WpWVz6aLdpKo4pqNgPPTTW77s/ROd9UOUb2JiVyiR5pPp9NV0G/GUBAi6pkVyGaEgKFAAogJeYItemg/TjqMGY2MjXJpVjbuNuX17i9vX9U/8zPn5488lex1Cy6GfvrsRRDR1XlG5NVE+YjHobZX/7aLxytDhfs9OZS+y0zo3pQztHSLlNhN6+IOCDfXHx28+nT48ePF48eZRpKP2rR2cHB/OS0AKTUSxy8Ul01L88v2bdH9x+Hgy4ZSD84MSa4vr4ah2F+dG95/9Fm2Hng3A+qfHh6PxZxwSMIMc7m1bjdxRxDu2hOVsK5EnIQZBgzunqxLClqiVw5CJUwgxVIW5KIZixmYiKCVFE19+0CAIHZMRVRdGxseduX820duf/zj7/3G7+/uZXu4XEiJEIyFmVV9IFzP2oSInOK4FFKXq4W4khKdEPV58F48exWPv7wxTe++vheO4sxfXb2vKVHiViQzEzAJJdhsy03Wxhu9eycx4jG4J1b1MG5LVosVqMGTZCGse+pXnWzuRiokRiWcaxEtAiixVxSFjEj5rhNN+PoPKU+TdFcNCOCMrUBqRGgiiUVBSOwouBwqjMHMUXGuvFSEzmTTbRMDlx/UXIv6TynZDn3AQH3NTgmRczIRIoKKLOnsh51E1yZ4RgXi1XSeqOpDAI5pH5oQiOSQlVFApFiWXO/yyVFGXMRVQPPTGTIuRiAohRMalPFG91ZTO0vqD1wh3IBmD4dJtMI2SugxWv55vXXew7LnTw08YHuzB0IYIRkEzXmdQZtem57Ldngq43bnWQzza3/wrPtH2z7/X6FEwK7k6LutKF9gm5vYbK7B97the0lorvX9FrgmmbRd6LY9L3J/gYudLOD9pf/5Xt/+Du/cfHijIhlrxnso2rT9s3MMU9a1mR/3780tWkUTdumV74YU0IMzoGZZ6odg6lD8m4/nzRABvLOARi5fYiJyZmU6dApEuwZRYBIRQozgoGpMTOTA4KT1exw1YXKszEjAwERzQ/mbg2X5y8XqrPDo9yvQxNmyypBWrGuDUUA+bXXZzo1uhduFEz2jmScrDfwOvJlRrxH17z27ex1wFcy1quE2n606J4JMzVrTdaZ6UghTCxlBLSpQF5FREUAs6I3ZAMCoOlW5k4YmshWRPuRbfhq/ABMLjm485AY6IQeg1fK59RJBmoyKZj74BghFilT4mlSEg3IxID2OCAwQGZTvRuUk6JWmPykAyAQIKoIMe3FUSIQhelCQbKJRy4CiPvGKxNENBAi0gklDoBIomITaGxCMyGoykS5hr3tDic60l6FMCMkNdNJ0Vahu043I9TpnnC6WuyO4TUZkvb1uHgnc4ip7JHh+zEwuZlMpYgWRCbvVGwPblJTU1BUJE8usKcsbFp51smMqboXbREnQzd7r1oQvYkYACgQo5bCHERlyiKqGuJUdU2MWEpBdFMgzgABUfYps6lKj4hIVNTAQIugiiKzARrxJEASEiGnktkFwOkJiJBQCyJNjHBQNQQjMjOVMr1rEaJIYSYAUJMpE2mqpsKTAAcw0dsJKUshRDFgdqAmkj3XYFBKYSZybKaKho61qO3H3t37mBqoomMkBlIAKyJmSrT3lzFyVjFSJABBACRikTz5P3V/lZKB2aRUAcArqQiJ2CGQrncvVnUjqCE46l1TNyXfbrfnOQ8pX6IG73zMjYiwZ2cSx17zLuUdMwTnVSKSM2P2LZBHY2NG5+uAbJKG0VdjKT0KevaGYxovxrxRKUwMiMFXpaiAczRHTZWrzZQULm9faq7NHcRtaoD+wX/w79XBzPqu7Q6Oj88+eb5sF1e7202/Xh4GZ+YCM/sx08dPPnn7wRfm8+58/dx5OFjO89b662fY3JCVshnyrD6H9bx9Q/tbNKwh9GMvZVmFcHl79aitUcXGvmx0/fKTr375lz94/pxtRGk1mnOcxujBqK1/5W/+9R++/+ONxBSHBLKJUjOHxg2pVLN6JDNnVFI/715e5U8uPvjB+//xkvyyCgerxTtf+Eq3XDoHBHq0Oia2q/XteLNOCuBcKaYK2LXTtMhVTcwRDVNOQExgkiIYoGNkAKO+7+uqrkMbY4op5lJUbSrnkzgyg4ICE5hJjkQ4C/5rb7/16M0HH728/PD5i00c+iEO/c6z34HsYkQV8hUzj3EEU1X7ve/9cLbovnK6TGOaNR6ge3l9MxZdNhXTYugTO1PnqqY1FEZfcraYvK+NmXY35cm/OHn4i2N1KGNOKTPWwR+SP/DVAdOUOqzjeBuqQK5K4y2oOg4mZjl7Klg2hNmoBvIEKoaEWBBFjNBLEiLHrmUKhgDEHlQIPAdhziVq2eb+hbgafEs6Ywqa9h8JP/dX3/nuP/7B5cW6O5p96aff/ht/9ae//+v/6K3Hjz78/g9+/NHZ6sHD2/VmLGm9zXmMbRO2Uc+vtn2uqYbDB6sxXRSMVy825zeXDeQlttnR+mZjoX7w6Hjm/Ths1OW/+be+/dH5i/lhp33fNvbFLx2/9/3N8aod0HiE737/Tz69ffKXfuqXf/iDF97o6uaqcSFrif24qAKBe/nhJ/+D/86//nt/9p2v/9TxwN27n360KoG2uVZnpEIgJTsdnFFHNTKY0U711oVtgCsrpk3ZbCszAuzLS+tCvTza3G4cKy8fi1zg7jn3l5DMqoA5l02Em5thvKjeeHNsVkC5XHzg8mUTjqV6U3ROY68sOt4yA5KPaSP9yxBCW62MGvY+5bTqCn78k3DeL0I7xDgWNUayUcaRNalAwKAScobi8ejBsTto2WAcx9lq4Q/8+cvrOZyYgfMeud4Oz9p5rS+vTAAQHXrnYChDMIcGpUCRAZxFb6Y0lghMdeNjn1Mfm3lN6IZtBrHaEZgQFxcoJ6mYMCETO64kR9xIu8wI0nhOxSqQMY3RiIEkinUkWm43edaFl88vF41a0j//8M/+6W+8vzi8X82PHs/yjPijpz/+y49/BU0YiYBKEWSAaVnCjIGocGKmDI8enSBED6QOHx2fPn36fuV287pdLeqj1b2b9c6H5vbm9mB1QC7UPlxdDejg69+8v1ro0/d7HFN70FkpiLYtAw6chth1VRV4GcLl1VC72nnKCECcRVN/6zkiW85EAHXVgL7iU0yzg8ngfff5iLq3JQMWEzQAxRITg5+WrW8vb4OvUUtdN5ttmbXzccz9LgKI6Hi0PD5/ee6xmoeuoqoUTVm5WE3VvaPTP/7BH5FDQld1s9vbjUth0Szni8WLm0skQPLgvKKvl45ns+OD+PCwa0JIOYbgctHNdne73uzG0XveXo1atK58yTk49qFab7ddFTwxMoEhIKc0SikueFUxUUkyVaWY6jYOVtT5uq6couVUkpgPHisnGVWtaStU0QLIbsxlTIUUXO2zKKs5xZxyXVVuokew63dJcq6akHMOVVU05yzNwWGzam7jrqhJHBtfoxQKnNmzkhRxHEKYn/XniWtPLSECclGrm+5wdUgoZJYluVBBRZCE1BlIhL6h0FVhu7kWlZL1g3c/OWkPK++NUxUCmgfCm/EWoPFckiQAcOTFihapuEq6MbHGk2kqxsoIaLvUI1VmI7A69tO0RkWRyyjqfe2bOo4Ds5sglgpGICWbkgN0iEFUnKsm+36WRORELYs+evggvYSLqwuJagpbNPa+813qY9d2wN6QsiplGa13nOeLeeN9QF8kn6y6EbfH97768uOXLNUYY4opz6jk3FpgI4w57UbvOGkRw1lbxSEDMXCmIFWp+pg8GFStPzjdDv2y9hxL3A7TRRCH6NCrTGvYxMSmVpIVMu+BZ0xUihZNlncDBbrKSbaz/naouybf3g7jSDjcvsRZaKumXs3Ds53/5NnasDpowozwOLSPjpZ1Xr3zhb/6xS/9wo5Cvrl1Y89A3awVAS06jIMD8lVddfNcct7cpuv1G1//djWfRx22610u1nVLrrx5RsgQUwgMKucXZ8MQT954KxyurEKIY3DIRtcXZ+fPn5+8+c7q3oOI6iuurN5sNvOD0yiI5J2v4pC8D2V0SLWrq/rwOJcRcu8ZyRyCJ+9MCzK6qqVmpsQgPZYNSiZElJQlq5Y0ZsO6mS2MSAE8eTDTkoig3PTlcju/Gm9++OTH//yPx+gP3njU+4JkbTcHCKiQIWnODoy7ytXklwf1QWekaoIG/dU1XgXern3dKoSPzz798fuffeGtew+OF7dPnmyfnS9O7ue2HWJUTZYkxBQvLq3fyE0ORH2/k0ChqUO7UjAOgcYxbnaQIgqU2I+UQNW3M8HgJWsc0zCKlnHMApBF2CwglyjKQOYAwbEgAoGRGhhk0VwiMwmiiTgwYlRTE7nD9JK4wG01fzDvn19uX/aSDAWK+R6sZCHCTJJNfO1D5YgoJ1W1IkoGJuZbv+6TC857XzLmVHCE/raU3lQJHPk21F2DWbPImBOa9HEA1Imd6hAKQSyJQ+2ZMZcC5ILntqub5pXsMqku+FrruVNS8JUPaO+1mbAq+8wX0rS4DXsVafpE2c+89zTXvyDX7GWpV6oAvHIX7aft0xz1VQvb5ywg/zWdCO4kp7t5/52PyF5tcRIFXifNXuGMcR8Bgbsg2Ovtmu6JOaavfFB2J08YEBkaoqKihwdvv3F1/hImq830cTkpM2BTJnHi0/Dd08rEItkTf8HxnhiSiyDSZMQAs+C4a2rTwsjBO0LOpQhgIGQymrwcxLYPEQsQOEJVJUDnHABKKYZWkTMAQ1AFJgeqh6vl/Xsr1VyEnXOipW1aUyHUbt7Om7DdrLU7qKsua9oNGTGZfPJmc/86uwGcIAkoIfIEXjKbdBa8ExRtn9fZG4vgjiekKtPgmirGJsi2TdaH6TFmAIb06vzA5zw7ZkYGarYXOdBs76WySeZRMwUVMzVVwymABoiosJc87uhIakhmZoh7JrQB3nU/TQHGO9WK1GSaQSPSnbkNAaaEl5opTKdSAUAQeYIBKQoiTFR72LtFFIn2+gIigNtrU7a35NDejrTXO8DQROiuvd7MEPSV8Wqvx1mZ7E5mRpPgiwBgRDSRjlTNjKYnQgBCFi0wvRC9G+ciQI5oj9XCSeWC6Rfu9E1gs8lJo8wIQKICqmqiyFMdKhhOgO+9NXKPRgIDc7422WeyVISBgZAcaTZE9D4wu242s3Letu76TFpygIRuby4kpEkkBADRMvGqTVVhStIhTOAjZilGAFIUiYTUDIiMmWMWnt4dVNjXhJzzgJqVHCGoZGZngMwBzJDZDJlYJasoMBIxmE5cBFAVtMkROI39feUeh1ISo2cmNVVVVFBVQ3XsbbqJoilNyGAiJSN6QlLVV0IkE4oions90gwnsBEh7WOteyMeAohIAiR2rCoEvH+T2icK2YRQTGkCVCEzZ5XJJTdRqxw7URCdjIzARHInmN5hraNUTKkk4pubmw8Du8Q1gwZyudjtzXkIVzldV/UMqSJ2MZbZYl55Z4iuqpyrvPOumiOFOsxFDJDRNQEQsxKRagGJRGDO4niLwCqWUhlTDy4ayiixqrssm5wNtCnJAKjztWvYQBdlJWMzJKaR/of/4O93teziMGu6fsDNR2dkEHfbrMWpJ1/V3o193sRdYH90fH9n+bPnL46Xs1JSTrHhythEQonDV95553vPftiezuqapYShH+qmNoP1zbUQUddcjzcm+XA5Vxm349mLq4/nx8fx+iaLxM2uapqMGUXl5mLu3b/79//b/8k/+k/jJtXBbS5LBi2SAUEJOLin24hii1lbV5z68TnEMytyu6Gbc37yAWSbO7do28rXTUOHxweP7j88mB9A0cO5L0X77U3ju+12t9uszTE7N257ZORmApvCuOnBEbFXpWFIrvLIdLsZUJWY17uRCHKMwGgAcUiOOAQuRRmBpLSAj2az1VuPR5Gzy8uz2+3ZbqtGV7GwoaXo0CRnAGDnC/Ovfe9d/oWfuc9A42627E5W85zHMUUAWbTNbptERgRGpz6wOqi4k1RKBiOLZ5+x/NH87W9dlpCpMneM4bBqGhBz3hOZiIUm5JwAq6qtTLPzJGkwk6xJy41BY2TOUQHwXJMxmAOvJScgQHLsPBIzs5RcJJuoipmhiBiOqH1FHiATmkixu2T+D9779OmLi3uHR/+t/+6/+v6Pv/eHv/Ub64uhqneDl/OoadytGtcu/cUmhXnHlb7zxaP3vvskQl3MFWfUbd7+xhu3z3IvyF23jnj/wZvdCng5vzr7BEvFtR3NVt3czr9/Zm7mfFCAbR8O7j/85MWLo6Pl7Wa8WF8/fuutq/7J22/d72/6Tbk9PVpBqTXay+v+xcXw+NG9/+q7P5Tu9INPrs4+PX+MByHWustmJXinYMCBkDRlEVCgbDAeLC5WYcRIyizDrEMw3zbN2G/UwSYWwBpEifvgdylfsBbnO/S1FkFOhDnUhzx/s6HGyw34DZXY3fv2s61i2VBloLkO3mkxEWTLDrvmcIg4kAt+uerfaz77yeJGPVCfUjJAT9BW3nG6PY9FV/MVGCVy0HB9OteABAXVtVXVhtDH0nRt3Xa7siEAT66imi3dXx2+uLhltuk90TsiFRBkMu88aGHlAhYto6RhIObaV84AFKxuqiFmR8RVvRuKcEhZm6ZGGUw05aSiMMJgI5E7bOrLTS40uJp9U+uQFYEqH4ctKu0AamdSyuy+//j609EN59cX908fbiv8yflnx48ebCK0DpB1WjFjZNuX5IiRrg5qFfjg/OUXT46nu59Q1S9vL9FiP4ybIc0PD59cPOWKTo8WAYeceu/ASjno/FZlVsMnH5ylsXSr2XYYx1QAsK0rjeLrVjSBlqtt6gctXoaoGvDoeAlZzezth289+eSZ88HVcHF2/Wol2aZlMDQDRYTpytmvJqnuUZsM5Mg3tUiJWWYHnfW4jcNitXz26blDFC3gXLKhrZusdn57VYXWkZvPD55fPAldV8RMyuHJ/HZ7lTN2oXWOUhoNaXF4GFwYY7m+2VShDlXtvd+udzV6dvT2Fx9XmsgLJMhJtjebYTc4ZO/8OIweqap51oSSCgDkEpvgzXSMaVoa1KLoIDifSkFG9k7BcCq7VXTeK1EU0Zz6lJ1zYAbZjKSdBb9DMvGMKWdLSg6ryqmJiIJCKgpYPLEaSJK6ZjHx3qtZjqnk6BCgqfyy5cNunW8VjNScZwBNMqhC8MHF2FV1F2YiqmDBhbZpPZNjrlw4CO2hc6UkSRmZ1nGoQD06ZTRKVRVSTChmAMbu/OnzxjfzrgHIJuSh7XPexR3DFDIzXwgMVISRHDlGdkZMVLSolMpXWZSQhEzNmLBoDtyqGRh6H1SzQ0cKgKaqHomJDQRAkMARZTPBzISOG9WsmqZqFNyjf8EKPlieeJH1bgNoinZ5ew0dtM2siCAQBjLL6ABQ4qiArurmjkPDwIrmml0jl5TCjGZdF12hCjFZBqm7ICbrIf7/ufqPYNu2LD0PG27O5bY55vrn0meWy0pkeQcUbMFKQAQjFIwgIFFqiGwpFMFQj+ywp4ZCTQYldRRBQCEEJQEkQQKIAllFlkEWKqvSZ2Xmey+fu+7Y7ZaZc44x1Fj73PfA17hx73ln7732XnPtNec////7l10dax6LGpQqSBfkNl1RGsKQTDXUYZgMqxZRDkkbg3pxdBUxiRYHLVWIeUgGwEiTZz6N8X6IJyF6nG5tN05QYDgc+s3+sE1IB2P3kgMqSPu1P01v3p6ePH7j5e+9/Y239cVF7KrQIT5Z1UuTdbv+3Gf//Gd/+jdVmMa9WJqmHGLrhO6gXsxRTldhvaQoqd9pnu597lMYq81uL1yorkMt0nUOlkoqabRsoYr7vDuMuwef+kw8XU+gPrkOmQn3m9311cv7r3969fjJIQ1kXjEddltpu7BaInBsG0et4qKSRvMwqi/vnWOFOKZ02IZ2aYBxdaKg7tlQMAQUAR0hbSj3qOjuOqmZF3MPMS5W1ERgZGJD06LJmUb0K4Wn/Ud/9J0ff/37Sgs4W45sQEDOnkkIcu5DrYvz2J0uugf3m9OlzmtQt367y7tJxq6bLA06DtMywIPzh09vLuvu8Pqj+gurs6fb9PL5UwmfTkWJJVuZbm63L55rP+bRibGkApNb0jJaaStjhinjpNNhBFcO5hhQbcgq9TK4Z52IfBwn04LCEtjMSzFGZCYBxgBdtxz6/nAYzJwQiBHg2K2qjoTgOn+FugQmN4iMbUWhmgYdDsWzo1mMJMFzziI4r6woCgRBYibLNmJ2dJ+TSQlgcf8ecLVarfcHsuwS6nHaFASu6hCFxM04YhK2w9RrySiIyBUFEilWwD2EQOgy02yAs0tdtaFpP9Zejj4YuNNdXll27iSUo+yCd7UqcCf+IL5yaOCdGnQMd90JPXfyzKvIzycCYPMy+c7n8/Er3pl+7v51dKncaT/HNBzcmZleqVyvdKk7D9QrA9QRXO12PMy7R8HxEOf/TfiKnz2zbWYbxZ0VCABgBoQRfeFnvvz9P/nGvFAGPzadAYAgz0anYoWJ0Wd1go4xKz9OEmZQ8lxIRIiz6yIwCRG6EWEVBAFNDdwjSxUka2JiJJqXjkRoDAguLGZmagik7kQ003MEAZgc0ZHqpjpZr6aSXfM6tEGECNxMQAAUmSjwSf3g6vL56elp08SSCoGjZ9OPGlqMsCp4D9AEyoxwusMuHyFQ4P4qcfZK7QG/G1VHAedjSXG2qBxlwaMR7Cga3QXB7gxBdyrjsVca/OjXcHczMHdVh+KqhoqEczRrVprm02+vYmDHhJw5GH3SRQZHqcWPLWbgODN3dD4cJima/ShVwRHdhURAZoxEZmqaZ3FHTd0NiQABbDbJHKWN+ZAYpcxkKzBicjMHxDs7zxxc0lKIiGCmRmLOmUiOziplP87icFYO5l80A/MZcsQINluRSi7A8+fsRKxqRHj85AFnzhDgnY6ARIRoMKfV3DIRIvLMK5qpzzPnm5jMFemooCHibHoCdzOfTToA5OTuBkgkcc4FAiLTEeJFxR2d1OqmDhKnAsUyQO0Apk7M7ppN3RWBifhYFUiEDrkkoqMtC2f7HtERBwUOZurAIrMEiY6qCYCQ2MlhBqUTAtG8T+oApjozhnzWhMEBGcFLyUdHkYEBhEBeDABEpORc8jh/7RQriDPNicwVndTudl5ptn25IxCzO5qbqYYY5kPNeURklqCmxIgORFgUzIzQkcm9zKP47ivwOPrw6EY1cw8SiiadSVU0v3F095yzqjETMc5fUUWLGhCTWXEguPt++1gqWrZLEQmNFM9peiZVqxOAEhshwJzt226u7OoSMSBRrBdapqY5XZ08jtVJaBaIAlLXUpVcuAIWJoqgpa0cVLMWDrHvt/t0ZXwY007zkHQY086LUwUpqxGjI5iRWZlGF3Kw3ZRYqvr0jf6m5HH4j//T//S1ZfXOe998cHraduvN5rZq4+3VsD45fe3e+fbqdnu1T3U0KwJYVxXaFtmQulQoH/Lu6ubNe2/kcXDd5sO04f1mryBtv9/WAkUns+wIKafk7IromMZJYVotV7cvL6N8+7z74m25iE1EiKXs6zq2oUqTb64vT5689Wu//Mv/3//2nyLULOJWiqoQD8UbZmMRgb6kMWlVkzJmMBNnJkfAQrtJPxr7abPnGKb3L238zkmk0xrPV3UjtFp2y2pVx3axXD568HhzcxmkVjUJNpVZweEQmnE/VgIqNI1DUzeRPbsO42Ra2qoy8zROEiSA5mHIPaasIuRm/a6fdgcyY4V7FJ48enzZbyeHt5+9TIaObvPlgZCLZ8vjxfZr3/jOr/zEZx4/aF9e3ZytV03Tjv04bA81cbPo1HHYacn98iRJZJIWmQ0ha+qWD/rtBb38wdn68Xbx1phPS85uiZFTUWYhYGCMFNwFMTAHc0DKZpPlgax3Lah7skRQkwiToNSGDJiOV0EZEScohGboM9RWMtUSFuY2ZZO6jeE+VksGwnC8IUSnv/r3/vzVN7/99N/8q49+9G57/vizX/mJb/3oo/devKRQnd/r8va234/rth40bra37//oaipBFov3L/a//wff/g//3V96+vRl4jgQt6sOfHjx/MeppMV0moYSIy9P2nffvWjr00ruX40TgIXYfvTOyxK0lvbq5Vi1J/1QXn+tuzzsOasDStNUy/aDH12gL6p2/dpbyyJBeHlj4y/91q/99n/2T2U7+oXeXy7zsL/bF2BzR5SCPCIOq+aDRg/sYrzsztFqS9e5DPv9gOGN5fpJGvu2riu/ONz+YCo9lZrlHi8ej4hh7TFfTZcfmNTD0NX9s7z/PuHgIWz6a/XQ1BKabhr2KM047W08DP0m1vc1nHnQNEzN9dvyztfXzo3VGcArJsDJCkyaR0PNxPWgOCWzlkqMn/3iT9yMG3bSbI52OOyJaiQz62NTjeqoHGLjk56tT55d3ljRApiTMUEMDG7ZVR06qdE07ca6kcyuDm6aS1aicRjd2OqwnxISQR0BEMUt5NiQZ87JgwgIlYJVaPbXCkOlgdS97bqxvAyx1WFqjGoO4vUwOlSWTN55+qPF47Mv/sRP/3/+6+/+ZPdlUPjSZ14f89BQEwStFJZwnCi7EpOBIRYE+vT9R+QJBNJY3v7R1eVHF02ggZ0TgRYbB895P1xfHp49uPcZJt/d7muKq/XJkJCwVnQtoyFnRyxFOKbc182iie1mvyPhjIWJp6RasB7UiiHhB1dXCbSkkUPdrRax5vkqoHnPbU7jz2n2meL5assVDeYNMqKcPQj2hz5yuNne7KeRKeiUFaDrlkMpY5n6qayaRerzkAaM3K2WiIxd2E+Hs9fvv/zo/Sa2peTFarXZ77tFyDA9fPyo1DRZDgQVMxAtlgtXGMbSRBYN/X7UDGp5ux2GaQoxTIeciwVhcE/FWDhr0eM9lNCR0I5TJnRAMkA6lkgyIs/IjVhFKIoETVdtNz0G9GKlpKS7tottE9tYCZFW5u6TFVWLIjo3zRISct3UjBikqqoKEIixFHfTGCMRjxPce/21LQ86DchQUiHyVCZ1dYyQxgqwlpBBqQrEwVCYxYALhEfd6am0OowljUyYdQoIlEDdio1SR81WUhpyT4TPnl71l/snq/uLhtQwcs3Ch31/N0lIZrnm1tUNlJByGQGz8HH+XVeNZatQWKohD0zMxAAGCuCoao4BQYhERMZxAqyKZzMn4qyZ5Qg8YuTkRXjee2QCmjtZ3U2QLBsrPTl/LaUPDtMhTw6Eu2GPTIAKOlBchCoULU1sIOfDtnfHJnYM7sm0IK/a9VtvJDQ5W93eXgWqLBcgUBAFLGrDvl/US3RLqWApFMoaF2koDQYyajEgKhhW3IBnR63viF3IAdyO97vJo7AH5Ijr+3x6FpfnSx7Spp8Ucj/mTpoAwIOjTBQxBkyjTofYb+LlRZ/x/bS52fTaLJrojsk6DI/vv/n5r/ylL/3MXxgnz9PO9/ukWSS4Q046TFNVhe5kGU660YynFOuGmhrJd9vtoq23m71j1a0Xs1+BoKCZOPW74eWLy3sPHjdnawtEWQFMCHPff/je++uT87Mnr03oItSEmHeHvM/d2bquF0TIbRynXoSIdNiP9WrhBGl7KdavqiaEMGUFH90VpYr10hyxjFRu2bMRooJOyTPaJNM0hliHuuNYIZGZpSkBEB883PS7P/r2B7/3J8N1X4Uur0/GLo6m99YPck5kySxLpLPP3r//hXtQczYtbEkVATWnNE0OGGINS8mDMw2d7k6fvDF+399/usk5P2jbB0u8/ej9fhzOzh9uerze7G8+fDZtRiulZAUGQnT1PGbLBjkxmBaFNM3oXgVMZQ4/1Y5UvLDIVAoRUuBsbu7MGCqSQE3XIGG9bLkJcI2ZbNonNQdEBiBCUzfTdHS0OAEV9RA4xAqQLOXt9gC7zERFHdiQDNAmzcBC0au2diTVooSKiAxVF92YmJX8MHhKYXc7ubMmv7zp+5xAIFQx1q2ZRgab8nDYQ0lmhoIxVnnu08VKDIMruhFQYXKqQrdUikVfLZJfiTPHNfOdGwPuALrHu8KrXM4rIQDu1IHZUGRgBHft0HceojmR5R9LQ59QlGbh6M4FdHypo33iTgPyTx6hf+JZ8E6+wY/lKD+qPPTJxx3linmxhkAfaxP+cdaGEMDvcnPHG+Hcj/VKmprriADQgdyfPPlC0y2mw+gAM7EFji6B2VdjMpsQEBD4GE5zI0RmUddU8qwYhSCz35XAlm0zF3UjAiNpUZHAIuDm6FUVmAiR3RVwjg45uiG6MJAIIg05BQxEpKyIUNzdtVvUD85ONRXkSMCaTCMgAGjKhjFGA2cWRF7Ui2kY2tWSPaexhzIBY12nc+s3pZq4JnI1pfnTQQdEV8NjNMbmTBncubCOIwXJ7E4ZMnc8+h1g3tjBmfEI5keJxt2ObWlzu9ksR96lxxBg3t+aB6iZuTnyPGnBu6F51CjncURId34ne2Uns3nV8HE73isJa+alISLRnRtIPeOs/fjc9z4vz49xM3B0cyK+s3cAIrkd7STHyCGxu6sVIGCevUqzQxEQeX7jZhmJiGYrCiKCgboZATGTqTkh3vXV3kmZs3ZDR9cUzB+qAoC7unqQYG6EDAaONj+rgyEK3l1A6IbANOtc6jgzy4+FYjgrb6aFiN2dUBzM1YjI75rCZoSRmTEzzpe0m+XsACyCoPMVf9yMdNBkoQrMPJT94qQehhJC2OzHwGLqZDC/FhgIszvp7AMAYBJwdFNCZmY3nZ0ys2DExOAGDIhkJaOLgZmaCM+bo0SECKUkZgEAUyVgc0VAJAgkprPFj0opxHPx/TySCAFVsyLMRiuFQkhzPZq7mSlRmFnU4A7zdaoZ0FXNAbVkZoZX5iBEO0qKPF/I4O4GamX+LmQmmQHqDnTM67u7MQdTNZ/pVYiIWjIiaykI6HN2bPZAMpZckChwKJoyADgwkYESIhPjkcWvr+4CR6mImBGhpOQ0xoqKp6k40YIwEtHUDzaVpDamkRGXq9qKxhgbadf14xhPjJiFQQTcSchcmUkYi5mmAdHBxyndHPp3trsPkqdSRjdGJuDa0TxRgycNLVy0H64BlIJA1ea0bboacnO4LcM1/2//nb//WpdfvnyvCVUpfHuzU/Plcnn6xulHT59PeWhiqGJVN3We0snJ2dvv/1lgato2ImVAC9XrTz59uL6c8p5ZF6eL/bS/6HcnqxbVA3NVt3nsHz+4996z9yhURnzbj4uuPQyDlQEl7tKu3T+9/+D0xQdXbV2BxX6f2lNBUh23+erFL37+85vnP/d7f/ZD48xAJaU6xMNQplIU3OaxyJiKzv1b6j5pJkYJPJkKgQfPWrxGrqueITHcovX7qUrJ+peo3gTvIkWFGqGrw9lq0TXN6emqlFw0d8vV7bjLmh89fE0PISdVK23d9v1YMpWxMLJAm4YdOfaH3hyHcSyTGlEspVipYtjnyQ6HUyAN4fSN13v0P/vww/3gU0rztzsK37u3CETvvvNev12Ii2Z7/Pi8a9tWpKRxGA+rJTFF9TJNhzFxt2xC1ZE5ZChFGOH2ww+6cVq/HopSIfE0YAhNqFMuwGJq5hBFDOhYNVqMAZHIDAw0pd6QHGKIE0lr3DiyG5CPiOYw5ZzMnKlCqGNVlULEjohCQeqTKqxDPHGjEMQgzVfB5dPd4erp8w8/Ojl5q6seXF2PFdrbF6nwQqeSnu5hP3z1K1/ZVflPvvmtr/7k68Vw28b3L/cVN49W/N6PLoabw+ny/MOX4+a2R5o0aZoSFW/bZrfbvPZg+fh8TT49flhNV1PbdP1m3+f+N3/9F7//jfeWq5Of/vnfbMbt+z/+nevdCBXuD0O3ONnttOZFv8/r80ri9LNfOvmDP/igvxn+9X/+9PyiW01SV+04jmaF5zytJiyJPBb3aRVvVvRBf7GS08oKTLc5K/h+OuxRThUq7aNoX/Iz13fScBnbT2P9AOOqh7DbH86YeEwk+PDh+e24w3QFmsygXT4+ZO+6YJYsDejWp2m/f77k5vz0yX7Yjtc/KIf8GOH8cLkukQEUZXRDoNjUPA2Wx6kf20XkpkUiEPS1xK5NPISAItKGdnO4NQPAksqQvbC0xKppFOEpQ8HUNnG/H4GCO5gWSFZLDMwAlJImzZpVQgyBTS3lMTCyQFArFz0hqEEmrZaVmHcokLRmceNiOZuRgBkyYRXCalFd9dvJDG6UTbzkMmqsJLDXDjpO1MUf/puvf/6N00PfX17+7t/91a/+8k8//u73r/aj/6M/+O3/6K//r7PmyAxoAGJ6nM6aeoiEjAI2twFKqN95+4Oi6gzq+vrDey8+fFG3YbFYKth6fe/QbwOSJR89OW1+8GIz9aluOOdSABenS3BD06qKi9Xpsxfva/EatKvjetneXG1QZHO7W7Qtoqe+xNjUNV7ubrvF4pXd2szgiGmYKXvmx/m2++zrRszZmIIWy2NBpLZe9OPY7zedtnVbSwwi0g9btMKmXdU6QVWTe1Pm/gt1AGsXdRC8vr6p48nkOU06bccyWqz5/P759y/fK+q1NLmfumVAoKzZtcR1w1NpXJ//+Ont5jaBjSnnwzgOOZvO2esAXjUBSDwGVUTkJnAgBwEQjF1DxggsczgVhWMY932og6kFhpN7q5z00WfrYRijmpm3XWwcukdPKhLIOOSyWLXq6uoMrmoSqunQI5EW1WKL5cqB1ZEIlUvX1P3tiE4901Z0k6fiSQysTJkdJAZunbgLVVDk6nzQUShUEmsSQWCk2LQnXcfTkNNATAbKQBLqnLyYmuUyKqmaJmYcbg/bl1ePlqvFAh0SAwj75e1zcQH2OV9vQGoFwcwLeBCJrgDIw7RvYh2wnTAZaso9zbOueRNMGAwN8jwfNc+mWQKaO6CYFjCuYpM1IYLZZD4SR9UB3aI0UzoINV5MDaQOwlggI8qD88fPXnxwM6VQCWAYx74SlFCXVAiRUTQrujJDSWNG5lDFGFh1PJTT2F5h7vuR3GnIVSoAnolKyZptf9sjhfqkgSkX881oKXtgIS+GnrIReEOcUx6TSghNszxOjEIILUeA/tBHrNKYGYAJlxROJIrJeMiWME1KIvNqKqI+frSqz6Sq6JDy4XY046sXm8m2ufSVEKXpbNG+0a0e1fd/7qt/5dFP/XIBJJ8EsARBCaHmPJaUJzNvV8tqtShMNAu1k099IoImNNvbfhppff+ERaZp8DTloY91k3N6+eL5anF//eCxBbScdJwi87jbPfvgg2X78P4bbx1KD2qLpkr9YbfftKcnq3vnUoeUxjT0sW5iFaDk5qwLXQ2COSNZNISshWLlXFiCEzkB2aT7F+wJEX1SU0cSU52mDFjVJ+fc1I6eNc+L4iox/OjZ2//kn6cPLnzyanW+N03kYVmvuxqtWNIqSnO6WL12Up8JMoBpZJEQwRIxmzpyM6TRkuyvbpFCtwxVwUPe/eRbT777Aez2fc2wXsinTpqXF1eeh81mHzjqxY3MTU+gqjaHFczRSGlM1CcUautqX4pb4cg6ZRYSXgRhLSYxxChJE5GHeZ0bqGorEaYQqInYBiQiBomcBcWoFDdzNVd3MEPD2Wqnc/+1BHe3KU1jJkPG6F6qBp2snxKYxRCdEVnRcogViwASm1kpmk3AAVS12JinPjGyKx9uD8+e3mrxUFEQJEjuYGMZ+v3YjynpXK2EwIIwqYWqcUvH3IIVoVCsCFokmNcOr8Qhv9ODAF/5g14JNX5nqvE79eQT8gnAMa6Fd/1iPq/6j8+NgISo4K/ARUeX0PGlj6LMceE674T924Gzjw/zTsc5ikDwyhl0/NNmt8sd3nqOQR2FqRkr8+qRdx6qu9+EV5al+UnvDDAICEfS9pFKMxO4Yb06uff44dN33mNH9WNF3PEVwQJLYMlamFh17rcGRpzLy2dYNRMiAAOYaR1DDBjnMaMag0QKiXxG76gbIwnNy2NjYUQCRCBXBbOCwCICRK2wm6lqiGwGTC5UdYtu0lzyiI6BJKNvt4f1oqtEONBsM1FVIKjamAfdXD1fnaxIiEPjiCwZ840oluoBQDtH6wnRgN0/QT45Dp05OoPz6HH32T5zNKLdSZB3diOHjxFW/kmzjJl9zHF6ZQC6k5uOP7dZsjGck1fuhk7H0zu7Pcjcjq/mDgg2xwnvEpQAr9bJM/mYiAnv8kF2NASZHVUad1ekubBEEcmO70OJeTbeAjgSA8DMHbajODVfKkROAKamZjaThpFYNdOMvJmjWz57Xo5iGQA64PwmzGcNaxZ4AACP4T4k87kUjk119p290pLgmA28MwAeU2x2pHGhE4qqApEDuDnhvEsox9kgAs5cRnQHcyjm6PPx353xGdPDwrMeSEjqRny8JtTNrTAJIs5PjuQo0FSiCTylVqSJdDDxhGEZgKiYHe1mDsWMCJFmbKVnTRLCqw+JEIGc7yx7fuxvM2IGRHBSVVUjnjnihMQsYf5SoNm4dtQR58zp7KCaE2BGhI7IHEwzEgvKcVzPEiRhUQcv7sfhcQQ/scxaGc5X+NEyN9uL3KwAzKWSNJscEcktOyELzq7HO28T2iz6zV9ianA8bYbEsxwJ5BKDuzvY3bkT8+I6A5oQiByQUI4BWCLPGYDMzQFY2MzM9N+WioiQRPNQ11axe0GJMeesmoDqMpWht6SYVJiVJ8ueZEwg0RGkqYEbFsfAZs6IY86MkHMSMPFcNA+H5zcXf7bZfFD4kKfBTEO1CmEBWQCZcdFVrwXwMe0rXmq+IfJpHIhpPwKDvLgZ/v7f/N/9xi/+7Pe//7sPz85jtWra9u333qs5WhkTYWz99GR9e3lTh+pwu2+a7vnFRYzcxGo/TGUaY+jQc6ziZroAL6JuJTuXuq2LWh7yNGgbgwTa97t7y8UuldvDvqZuFSsNuN3sVosqWb756N2WdBF8c/ssgzw4e/L0xTv3T19bNuvxdnCU3/gLv/Z02Pzg/XcCx6w8uGbzYjTlghW4GUdgBo5ctDQVl3lL+giSL02gaVLLoO4GqIjq7DXmAFlJs01MWwNXF3DaJbq9CbARft4gNZEW3a3ZMGY7eTpVzNN+FA5c1yUXLamuWjByRIAiIppKJeGw76sqtCEUTXnM5NhyGNWa5eLp5bbHdL3ffObe/b/7W3/tH//L/2ZX/PXX3vyZz33mrdee/OAb32qNnu9vw7r57nvPmJt7i7BoqxGsn8ab59frU7/38JRQtsmfPf/h608+XZ+epMPOpxyrBrDSYUwffmf1iA90LrI0L6lMjg4oEmp3Q8KZWCZYiLOWQyl70DzvlLqqQ+95D96hVoYVQS1Uct4TMIABMHhGsFR6Q3OHkjVKIKgFA5M4kWGGO6noR+98+OjNt372r/xyh7t3vnH59KZcbF5c3vaj4XmLocL6/up7b//BT771qV/93MPX7tHq3snyo121aH/4tFzcbAs8jsuzb/74/e1epyztOkqEtmpSGn1IDcOPf/g+oZysvBhurzcD1uuugzAeXjzf3VyuF+3+2ebJky//8Q9+t/70/SFcSeTN7fbmcorYjKPebsZh2G0uRqTODyQX+3sg0cO0n6RGZkbwmiWnKTKnBIlCT2Ff8qlUWEZ3nvYFTRWHqoqxYodnAI3GRIrjuKoefsbqtzRNhpOVcnoeg+7t9joNm5tn33I5yf1EvBDE3Ge1px7WSFKGEuJSoA9osWz85qnut2HQh/H83CX0ikYFLHsCJPApcDWgsntTRee4OUxhifHJ/eqNeyF2KEpjIq+Z66JGHKpQLRbnwjWyRA7jmMGAiKa0e/zG2fd/+L5IBHNLbkjFnAGJSEjQCwc+RuOZLLixaCqROSgC0DQVqWNUFDMi2O2nUIO7mmi14Pa8++DDC8F68mmX+/rRchgoG1dtM/QHDLTr93W1HrabuKi6xf3bq+ft2U/s9i9wexOv3948W//jf/n/+48+/+//5V//m99596OfeusBIoAfiYOmyuwkbNmBfHbFgoFr+KPf+VolebvfOulWD4dxt1q/UQe4Ody0bbM5XFlYSMUBXfNUV8GtRAklMRcUiFCXMhZ2ur25hsJIWgdGtUO/t5ShQGhb83L2+Hz3cmhiQNKz1fp8ef70xYefkIrmvx1XBQRkoPNknglVjQCRpalrKNnRN5thua5++vOfe/fdD4UqspTGvqrEC5NxUlPNp+t7U5/MbT8exJFEmjrmw6GYsLSUcwEwIOG4Wi3JbepzzoWNqjpWTW3J94dDUZuKxiDhZLE4X2+moa64a5puvSzFMErVtbnPdV2FKoypxKYy96zQVeJaFDTWEYBzP4lISnnY3oRQV1W3ub1pFzGl4kVD265eP81FpR/un61fXl6AecVCjq2EnCCaB9BpKpGQGPqUAiFWjSNj5YAkIaIEU3SdWCJSFKI+j7pst9PlZInZMCdEc+QojFrapibwIM2IjhKKmQBlQ3B5sDztuirrYcq7IGyI07SvqBIAL8XBBQO6TdMAYFb8+fvPT7l5sO5AjBzqurrabtVNsJiZQQ4kxZHRVVVmz7eTQmGMdVxkNeQpgxG60LzvXwwYLGMEAxYMyM2UikIGNyQ3Gms5H9OewADJHQgQGBmYKOaU1GzUAszqZqgopJaBxN3TNBHLpz71qe2P305lOhygEAf0tqOKAyfrupNh2ptrCFWaUilWL8CpmFEZPUdtF9Vh6GuQppA5HXSyCpSRGMehHHbjYlkBqSNPALfpsKQ6ECjpwcs4DqtqEV2nkgxgPOzmsR/aaszZXU1oHHsWDLGiADcXJU2Jm8kHyyNpqdSAyUPABw9Onry+Xj5cLxbSD7vby8uLi/56OzrI2I8MeBLbNx+ev3n25Mt/7i+8+bN/IXOE8SDgEqNNgzAiyDAdWGF1cipNl1SRuI7R8njY3gh2Yvzy8pKxXt976GCm2UvWnOa43EcvPorc3H/zNRUvOUHWiKRp/PDp+2jx/ptvFoG8H0+6ZWS+3mysQHu6xooMSi55fXqK6Ii62b9c33sgwQ20FsbkJU3ClblJ1TmwoViZcNqwE2OEktAJXM1sHLYl5Xh6n9qFIiCDmQsJ3fb529/94J/+bv5wrNv1fuG5qRDL8mwxVJLdRZ0X3f03Hk15x5Xncqj9FIrVVaUJqilYLmmX+5upvxnK5OKi7nVVcVvBdamlfOVzP/H1737v5dVm2I2nVfX5t9qbXb96dO+Dw3jLuEt5xifnlABBi0YWQsRiDqBGA3ixknNP1KCIDqmhQ4VkufTDiHNYyJyI1ZQcCQSYMnFSPYWa3YPQqq1wygg0jZiSZTcHFRYHJ6LiDpGqRmJkMOOC7gRATVOndFArrh6FTS0EFprXky5qRODuTSBlzmQBHBAW7WI8jNcvtk9DEMlX12PqTQGIwNDIEClkHTf7MWdVsBlm05ckwiE0ThgqGbIGYFWVgC0HJMOShfkTugnCnQvIZ+vNx8CgO03mKMQYmIKbQUFgBAYkRjSHOb8DdwLTx3KQmx8Xm68iaMfl0538creivbO1/s/+w49B13d6Frz6rdkHdJR75gXbqzKuu0UafILSe5Qn7shFd06S+c+7VSDAqyzUx28Jjx8OIZqa1e3i4ZM3Pnz7XXCatTVVJQREDBQQsKiCgzooOAIwEyLmUtTNAeaeMmKwee0+HzoaE83+mCmPLCQEiBBCkBBtdocxzYICEc0QmRDq2Z1QLNO8ejZyYkMLsW6bk367bWuqY9tW3ZgOJhGBxzw27UKREQsS1qEa8wAAoZKimPvEzUKqquQRgMEspCHikEXG0BiTmqMfNbbZZ6N+x5FmAtePcVDgADBrquTsQIB3WsdRn0M4QoWOMxM7VjIdQ21+jKY5EYEqIqo53AV8jhm12T+ENJPQ7U53uhtP8y8a3RHQ4dUZvzvDc6DJTR3NHVQNAFkInMFfEbXZTOdDJRY7KqBmjrPQgkigNo/sOejFJHc6DeKxFs1nchACuDkTA4C5IR19RoSIgKqFieefAB5rsEwLHoN7jARINFOW7jRdJzpa+swcEefed2ZWAwR2z6pZRGZXC6G4GTAhESE5ARKDu5nOQbw5ik7M5ubmRxGNhJgBHJlgLnvQBBzmIB44OgEBIVLRNIeyEONx4qlOiETAgm0VuMdS7PSsTh9tcvais5Ba5hY1ADcrIsGKEvFc1ybMTGKgrjZTpdGNkPwOK2ZqgOSmEthxZvYAzJqaAxiCIwOoFUBBBjwOYMxFwYFIXI0AiQXnjwLKjJRyADegEKCo+0wZ4xkahcgIMCciAcxhtqRloLnd0nCOvc2+HjcCYqQym474+LHD8VoCLWpW6qoOIWhRBM85CUc+ftPcGdzMABkRaW58s5zzFCQAOPEMTCUtCRlYKigGAFqUOLiZlYKI6gqzV+uTUlGsgkik2vu00dFRhRBT9pR5Gve5z7ngkJ0Fza3vc0uhFOuHXYw3YXFSxeA5IzARuSGYJ1PLUx5vdLrZHW4P48tt/zTpFliLJiBCKnnc1qE+jIcEqA1UQqSC1FbBr68+1JwBqjosfSq/8Kmf+LUvvfbhB98+P72Xlfc3uwXAvXv3wfNu3LZNODvvStqf3GtyynXb3mwO23E8X51wUEkTMrPQ4ZD3eTMO/Xq5nDabe4/O/uzdH9x/eHK9TYRhGMtyuUzTbuyHk/bk9dOmw6uXm35zscU29CXcXgwPT1sj9ueb85P7y+78Zb8fYFisq8kGKCwch91uGRd/+8//4j//V5d/9sGlFe4dS9GasYkM6FLXOY9ZfUxFGJ2c59siESLYUNy9ChHMczEkEiEWMStTLuoFhDOBEc60NHN05yZKiGFwv7euxyG7yWbUp9vRTQMSumM2L6o5VUm1WNO2BGXYFTafRtseEgDlolpUCEII5Gjm+eVtUght9aknr/3qT37uD//4XwUb/r2/8TcB4Pri+uVH0737p5QhX20O+8OTz7759GarXnPk+w/u31y8zD7d7G/iKtTUnK4fRNJ+u69IwJWJNWOIZ1X07f59v/nx4tySNiNhVjazmgI7AxCAEKp59jLmlMyyg9yx+5JCKuPe06TlmpkNIkAjsVJwICbSyLEAkhFjTz4JehUqxGCqfToQVUQiYjEeddM3H52WYYDd1Tfe/W7XnK1z+OjljQjVXr74qUdTv39x8+L+o8d+9uSdF+9fv+zfAvnip05+4zd+7v/xX357WfDlxf7t691eAnRIt+WkOWGyptJtTlfPr6vYtssFcBxKiFUTm2IJP/vpz+03y0+/8dl3374MUv7f/83/6z/433zhb/2d//1//g//49c+dy9tXbgpWhZdmzxfXR3OHi2vrzZ/7nNf8KtdyIrDIFTzIhTU5E7mZbI6tiUPowR78Hjfps3huedh2a2q+pT9fLy9ChDWIbKPafyR4+X1pBxea+7/4q0ny8ZmdUR2FygEBm1rh5d53FIjUC8XD7+M++c+PFXoVbsQu65Z58M1Hn50LjhurpjojJanTVwA54ub4hnrqphyHQzdyaZxZzk3ErIbRaGi8bTDe2tZt7obz5rTfTLNZW+7qqpyKghUdbFACQZIEuq2qnvC5LpbdbGtaBizEc3AyFGtKBGURpAjM6ki7w5j24XiU3OM+rpmUytElSdIh6laBCeIVeXIxQyRp+Sw7e8vFrUXZk5MEqMUy7teqZApG5zUC01axcgSrjcXX/m5n/o33/7Wolv+xV//xduPbn9wS7/yK7+1rhZ/+t44vrz4/BvnBM4ILKLFJAjizK5AmHcQzDnGi4/g5e1YLz2qTn1ZtKt68bDqHpZ8mYbDohWhahrLarXykoZhu1jUXduYYRoGALi8fNY0fFJ1h3ECCASMzOdnq/d+/P75+b3FMjx/sQncrBYVcjWMFw1VKWUMbT+M4zTeTbDB3AIF9+NNyszm6RYcqYKg4AxGwhKpKBSF3aHknF9/ck/V+sGKDqers+1ml4qaO0sYUyk21bHmLIt6sTlsO9ery6u27pC9z2PnomBh1XT31pvDdhqHGELNwuhk6G7EZAaTlsM0Lbrm7PNvrj/zWtVW+z5FInfFGHKxvFAWNi+xYkZPRecp1eyI8uKuE1q2bGUa+6lfVNWYD+q5H8AJUtbgVNUx7w9t1ZSkTR0xl8HcimfUUFVdW0NxHnPNoQpU5cFKnpIC0mqxmLIBx+ZkAVbWDJv9DqAaeFjXy3d2Fz7sXccQRceE3LX1w5SuFlzVQPuUqHHzTCRVVwcOpryslvcXizJd73cHwJktQiJibgUSinmyomo+FdN+yh9+dI2lOj9dMXlJOVTVth/64SDCgaBkz6loEHdAEkcvYExYSnJ2JStmWImRmGUid3BicXctWFfLbFmAi2WzEZ2aqk15j8gxdDkXRlEaVUdAVVUhZmAzIBJCKpYAVKSyuQ+WQTWRcBVCsUJmn3r4+P0PfzyMQ7taqOqYJiZRdh+2VawUC7iKoLsfDge3GlkYsZuKM6UxJ5oit6kkJyq5oBdGr4h8Gg63UlfsaBQCNuGgxbMKuQRAi0Vny3pMxbbbO6w1ErbtME7ehSIkAMkYCUbVcWt8swngJWVzb0JFpZxy9ebpyRv3z3MMq7YBwx3ub7eboegh5brihVQPpH0Qzr/61b/yhV/481kaG0ZxL1mHXJBEDfvt1t0X9067+6cpF0UgBx3G0o9tc4rKH330o+70rG5Xzl5KSn3J49jWwSZ9+vyDrluev/Z6Cp6GRA4MdLi9ffHifaLw+HOfKlymQ1q2a0a6ublAwftP3uSuGq140lC3hsxexmG3WK7ANY97AIVSqqoKVYVVBMTswgxeejcrZcvYmZNOoxMQgWouaPH0lJeL0cyRsQApwe3w8rf/8OIPvh13caq6PjSDZ2NanJ3U61Xpkxevu3Wumt2wPW0xln1c1+BmSW9vL4lk2pfDtr+92jtg7pMbhEYW3ZqRgOGEwrC7KmN6Y3X+w8vDAUIAr90jVvfunTx6PX7pC2/93ve+88P3Xrhq4BlSC26gbmBq7rEOJRsCV3HpElGYi1DSIlNyUABXTXP7DMK8/tcxFWSpYhMjOYAaIjlLtWiHMYE4A6WpvFrzohsFhArrjgV8OqiZhqqKHAHAGFJJBFwUhYIDKYArlKSFMqIbAQuGgMw8V/qAmyd8+fSmvx1W6yc3m5wAmjaCuJZccs457Q9bDazACChBHEiJXdgQDDypIUdGEYpVDMJcgN086/SxJDIv5uEVjgeP8R17ZRRyc1A3dy25t7IrunMDkjZIF0JDFMHmFBIBoYM7AQDwHc3ouI8OTrOb9d9KkeHdTemoNX1CxLkDvMAr4elV8dnHv3JMN90pS3AMM9mrg7+j3rzyLr2yJr3iYR+VoFcBtzuJ4VUUDu7yVDBDT5jw9OzBTK0mQnNnJCZ0RyFRM3V1d8slBCZCZhlSUjNmgpkT6I5MiETC86EQoiDVTYNAuUwhhCbWKY8zokgAiCBIcCtzcKXMNeHzESEJsJtJEBQsIFSvzl//Yh7GaXw7557IQqXSRWQKJBTstr9edStGcbNScgBGdGkDG4rjzeZ2sTpHavPYkzeR0dMVqk3EmVtGZ7RjtstxXoI7GB6xRUc0z1GwMcOjH8jcdO4om4k/szJiZq8+5dmcdTdq7MjJOlq6ABHBcPaOHSUgIiYyOqIi79xmBHfDYn7iWcWaldBjO7jfaYgAPpeHHTHKiIToYHPhMhgTm5Z5yYFO87MVLQBAwvO+m+GsVR1BHkhsM+qIANQA0KyYOQcGn5NCVLSAzYNHj7GyeW5myiSz9uqu7k5M7oYoxLPPDMztDhvmAM5SmZbZCANmNr8F4iN6EnmmOqEDEs+qGhgAArPMBi5mmNLIIrPHEGi2A+IMu1BTYSEi0/mxR4g4s7g7czhip2Y0j+aZPW7zAJ+bAenjpl0z1KRNbPKk6A5gVcVIfnF79WT9GjOqmRqAGgubGzLlMprPPiBUVzweHc+YJzVVTSIBZrcOOAmVOZxIZO7Ic4kf6gzmJ0dkQCymxwvfHYlmVxQgEJGbAiIyzUK6qakVQjEzIOC5woyAiE3NXNEIkVhQVZGAOLgzoOvcTYYwU4SI2XJxwKJmgERk7gDETKrq7iQ8j+pis97qSMwkAIjIWhIe9zTmDxdKUTwOZ5pPh7kyC7g7Os5Bs1KOyqQZIhOyESARqM6k/P+ZVMR5nAAKgyjwPiUE1SyaMCcdx4KzF6WY+xRDjajqh8mGPg+8velU0Yr3hjjbOSxNh+3ts5I3rvuryxfZhsz7cerzoObuaH3ZBWTXDTFUVTDaJSXHpJRUaXHv9d1+EqxubuwBP/g//gf/ydP3/lXdUS3t7Xa7PFuq2+EwGI4Pzx9evrgoI1SMFgAA+sPl6fqsil22vN3vG+niGq8Oh+7kwWKc3tvfnlSrdtENWW/2+ZD3DEwAUz8c+k0xc+XDIcE0/o1f/bV/8s9+ezAab8qyqS6TDj0Cad1yiTFW3Am8vHxxQozBuZXVerXfH7ZXm7iqfusv/tbVf/Nf5ashY6U+IJkJCKH5FIXNyEEqZoDEiOYIRgHZyLFgwIrZNtOYPLeLNtR1SqMmj1U1jFM2YpLj92VNBh5a6mquxdad7qmkZERoYF7cgiBwaNo8TEiR20UbpKKg48hgXd29uLjSm/72Yi8safZpFrWiCniyXN07Wf7sV3/yV770mfd/8MPu4ePP3f/UOJaU9N75vW69BnJIw5v25PKwa0/a114/+/Cdp/3TSyv54f3TIPzuRy8Pu3QY+2TerWLfw7TfCagpVXGlNh1SrpuzcjhU8SqGMw1LMyJw0Oxz/lSCuSOo2aQ6mqqZRY4AgkhFFZxK0ZIPecxmgiZTWGHsquZEJDKJu5VyNewuc9q6ju6T20QSEZqiuwpqHfb7Id1dDJq2L5fwxeH0dBjy7W7bLOO4Vy7w4qPr1ULunZ9NSt/4wQfqLNC9d+WVvdNS/ltfPmnwzfcvb//43Rtd8LqK1nEax5f7sW7rxenaT0o2DE0XpP3M5976xre+067bm9v+xbQJwv/t7/7ea2/ed5Sf/0u/8qff/9rvXb387E/90mde++w3v/f7B57265Jyvn//bF+ub63U3eLt/+n7elk4ARfMsZipEyl6NhMtyS2Z95Vc27hxBGzrUMXmTKHpNyONI8CUilGerEyUy/3V/RzrXTbig4CBo+SBVD31vZqEh+HxGkuetj3nnC/+hHWnqrx+A87fSnko0/PKN9O4taTgnYRVe/p5v/xR2bwI7K6c3QCMwIMgoE9TzxTHXJJ5s67WbRva5vLi5rNvPc5YE4AEBpM8FZLQNc2Q+pPTs0PRpCoMIKjkHAMJsOvjk7MffXSJ1AKgkgNCLlaRFOScB48UKXShPD49WdzvvvlH3zxfPjKibNnURayOVUrT4aD1qspeplSa2I27KWQch8P90/V+v4uxjRDz8z0rqrqBe1F05OiAfLZYjiVXZXXzo6tlWZ+E09/+p9/8pV/9lf/7f/Hf/R/+/r93/fLw9d/7xt/79Z9e1DMwCUxnt64h4Iy9nIs0DRwz/Yvf/i4xDP3lMO1FqKTE6JlGFO/adcCIugkS9+M+TdN62bGDagHyx2+dfPD+88fNox8/f1dPsXiuqa6k3fa7m+u0bE42++1JvVqfnZdYBabS78+WJ11XjS+vWGnMab1ezVcBEercZeEA5qZlvlsQ4IwmBAAmBi8l55IyYGUFwQCUhaA/TExRuX15vR+mAalSEScZJ7RkRM7IGQpHPl0t3/vxdSfrnDIplj6N41CtV+GkG/e7qWhgAafAtGrqd6/eS+pdE1M2JzTXjJKglEQoknMBB5wUEYNQMSvFAlJgVhwJkZGcKam7oqk7kDBIiBIWlbRSUUqppFJ11fX+cBqbMqVHD8+vn14RVl/63E9dvnh/348fvXjmpRl57BqGKlTdYn/bT7kg4QiWoxHmwaGAmxSuai9oEttOLjY33IUdlv1wqemgZWfQMDJwyDAtukWDPFkf2s5FgkSAgNJklIfL82VYQDqU4baOYcxa1CNHgqqUhBUBOLFPZerHHuv47OlV2aRPnZwtG0GfmtBlk5vdVS3Bk0JQRgSuGaqCxcB8JlYAIWFgNgBGNIRURiFBZNXiOSME4SqbTsUiKUsAp5QnKa5aACBw6MttkMbIwXMIEQxKAUDMZXQkkcrKxILZEhGqOhVjpjmUgOjotqyWn3nzi9/8wff3qSCFaX8A8KZe6HBLvIpt1DLlXAg4xMYdpzQ0ddeZ2CSLsHiZe/MyWA7AmAYbxgZZAMex9LtxtTgfdbJiVJCAfbTRStVW5ngwtqwlWUqpaY/gurrrMgMzK1tcNgSOTCUlQtWUOXuZsosHCkzU1dWyq5ogOqmEKu39+rJ/553r7SHnPkvGs7Y9DdWnTh9/9Rf+4pd+4TcTN5oTgzGhQomRwOiwPwy3m8dvfTqs20JoRFVTl5wE0F2GftreXjeLddV1wG6Q1YqrBaKSdbfb2VAefuGz1oRkRSJj9tSPzz76KA/jp7/wBapozGm1XrFiyfvt7ebk7EHTxgkSODXLBTsF9mGzm7YXZ2+8njHNwDIgQQzI4jIjdg3zyNoHwkI1FC+5uIGWhEzZY7V+Ak2TWUgwAtFYyrsfffg//P7mj34kuMauwTCVAM363IJrCHmC2qVdn2+n69iVRddVVfn6H//Bi932J//cX753flLHZtrr1bPD7mZXNMdWnL1anzg4CClkLQ4QhFctj5+5/3Cbdhe7ixxEGE7aAFoWITx6cP76w1/9V3/6o9/7+nfKmLKzqzmU4iDz9vSAiAVF0BnRMRVKBqA5J2d34KkkI1RTz47IDArKNqWmq0NOlkvRUghp1dT3T/L11i627h4CIpCpsYhUhJHrrippmpKmVAAZreR5J50QQ3RzBSfiYsXUGAiAi6I7JsjkUCMFhjpELKaKEuqkPtmqT51jNpjQgdzUreikqllLMs25RBFGMYBi4EpeDAmhOIJxECIKcysWwDSOH8e47pI/r9STY9BrDhE5APjMfC0lZT2Mw8v97lk/XuZcYnVaN+fd4lEblrXUItWc81FLDgURgCrh6ETH1qpZK7orBTuKNZ+wfhwLyI7C2yd+605FOi6TPkZg4ysFyI+S0Nwa7nf5nVeUJHwlDb2yuiAca+xhZubcaU1+ZBT5naFo1p9emaVmHQ3O7j32eWk+908hgtlchDTDVRBQKhGWXMohTbOnAJnQfM4IhSAE6IRVDFGwCmxFCZkYAIUAy9y97UBHAQQBQUIwBwcXicXUwBmouApDiCE5aoa4WC0evEnNsqkW036XDy/GkvN28/DB2q0MOnJcCq3KhIuqLqZmBQyERXMmDiFUsbjtb7p7jx2qccpNtDJNDNR6nW1h4HwsIZs/myP/+0ijQUIkmONXdwVp+PF5BEJ011d4KTyinuY80N1j5iTQx0KezYrOq/OEd0kyQEZinCNOjncU9vlkHiWn+Vfprljt+ASfUCTnOducBTtqXeaA6OY2t9IjzjxtRr6bMs3KgrsbAZmpI98ZndzsiIsmdARUd0LSMg94d2BCmMFFqABzbaH73IWH4HNg6k4hJfCZYD0rLzojrYqm+Z+A2WFueWWA4oBHhON8JJCJmIHMHJwRwEpG5LmDzsyZqOTMIugI7kysZnPP3ex1Clw52Jx3g7kDzudmBnU3JnYHzROCUaic6e66m7Uqg3nbaiYQIQCCADdVy651JYTk6JOWi82N+5PZmuczc1FNQQMHBCBiNyMUs3meDYZqbggM7swzxsgQUZjUfb6ykAjBNSfko8Jrpog8lyS6GbLMEHEEdzWWSpjNygyocgQCKCUx8owxMgdTnZHYc/Xh7EuaDY+OaG6koMdcHhOYuxMHcFQrWe0oWJsREiGZu4OazqAuQAAiQXAtiUXcDWxWYM2RCAUREEHd5sgkM5kBHccy+0wpMUUAL0okbuiuzGJ+zMC+IryLsLmal39LKhKRsAj9zqaE7oaFGJCY1AuoIWMgrpnGaUIOADDsNhLw9vbZ0A/7cROoilJJDBJrTGnc3RbdpnQ9jFcl7YqbaTIqaZ+HAou2TWTbNAqxTbmpSPAw5A+x7urlcnO4rcMiSo1cNfVjyPA3furnnn74BwV11XTDYdj3m269rqtGk8VYgWEbutcfvv7BB++UQ29YVqtlfzgUIwLQSceSx1TMLbjfvPjx2fqEouRD3l9fH1QRoqZsoHXknBKSdItFHSTfXl8+f/FrX/npb/3w3Ze3+/EwPjpbP7u+XnRt3uqhmnZ5352ctbDsx33FeHV7QaHulotKwZ2ixV/+wlf+q9/5Q9fURZEADiWGqqSiqhSYHYQcyJZdawWKQnEDZwfPOefJA3OMAuZpTIG5aqKrBsIMgMRYq6FzpKpiUZMy2Zhf7KaUcXBz5joGREs4EUkpBuDIcih9drwdkxcj9KvbK1XMZTAoxd0BZsdidxrPzk7/wle/XE/pZ3/qyfvf/c43/ugHX/mFX1qtlulwCFKdnp/7pPvdoa3pc28+eqxnT58+5Sk8PD0frjYffviyjnx+vn7t4dl+c8iaS9/3qGwoUZquK6OO6dCdnpAqIpJq//JDKhoe2QYbRCLLTgSmludUJbkjh0rhAEws0SAAcoDgBFM/jUNf0jZNUxWXpFpVnULrXrtimvboIJUM05Sn7TRu0T0iAXHOO4wNpknugL5Viz/7lZ/93rffudiPauXFriw6YeDNLvng11sD1mbBA0zLRdM0DqUgdi3LVG67+yd0edCUbIzbnNtIAl4HYZQaG4vtg5NV2qenT9+7vnx/GlyZweOH73wAWdfdybP3p8Fu1w+lskNvhxc39Otf/ckPfvidQ//2F95YnobV6rwb4PDwXn0y1N//w+9xqWIj2hdVLZYJOIPZ5DFQUTOhsatu28Cniy6GdL2ZJvYqrN56ff/c081V1uT9ITYBEHUYAPdY3vZyszp/DXldwThpb5LMPHuHXGusF6fV8MGfTLtnNvXtvYcQlikHZI+rWPaTL04wOcEKwfPFj9rdRdQkockAhigi87cXgQagWMdpKEWBMHDbbPqx5IJ7bbrKSrGi9WJZVTFZImEqxFVd8kGCUGQqDRCj0XK90jwtm0hgABhiBVaIhWbjK6PEkHO2MQXz66fPqtVb3cmSI+tykfqrWAcNnmGyMYOFw6E3y6aW1KMEdEMDz1DLIk25XcShH13o3uPTzYvbZVfFhoe+sMHtfgKHzeF21UpTx9wfzpdnP/lLP/vm48/8/h/9zs984dOrYMtA/W6su2aOPzv5fO8nQgA0NzRHACr0nT/57nLRbHbZAWIVbm+uwSkPL+sQAfB2u1M1RCCM5oxc52k3DcnZiUK7iNvtVdMsOEQOAXvb3m5DhHIzDHb4ys//xMv3X+xKIlIcJHvulqvteCg6IQlOaKSv5t1MZO50t/l2N42e50Y2e7wJCCkoGEJpFnVRP/S9guasBtlZATBInRFIcNkuhm1ZNktnZarHMqqPWScFoor2t1shjBDYab1ap1JutvuqCsGMigmQlqEABKnbWBGVcZwsmxF0zaKkwiwUGRxRaMqJAEPFOWXLJZX5DshFFR0lhrnpFhkoEgIge86jggOUru1Gne6dnQVGz9rf9uM2t6u6kTpw2zT0xhuv73aHwzC6jtvdzcnp2cn99fbFLXPYlBtgOum6MvUILhYgWwyr0mdVwWo10PDuhz9I08EttU3HHpjVMbJhG9cl99nJnQNFDnWZIFId2vZk2dVjP+2vgTxpdgdUSJaTK4JPo4GqgzFb0/L1xfbwcvvGyb3XzrvN/qap6mkqu9wbihmGqnZNxdTcGaDYKFwjMwMTMHhmkKJpni0yMTgErsyNkd0olUOsKqFoQBI6MAzRYwzZs/ukbkECCxE0mg2RzCZGCRLmNlTXHARJOKVCGIVYyxS4zmUA4BgqgGyTtiife/3N5xfPexzrOuQ0gPnJ+nToDywcQmugBETmkLNbnnwIHNL+0C3DMpGSRxNBcrUpe13JDP4ct4fUNaGJKRecEiHKpJCTlZIQgtTi7EAeI9Cr1SeiYds0VR120wiYTUwim6qTu2XFFFgAncAjccs0bA+g5jfjVMqHN5f72z5PqeWanZdJHvDyl77857/8K3+1VE0ZMpqilZynqo55Kmnqi6f7n3mjOl1YIEUwITUlAy6wvdqkXLgK7dmaRQ6HHSEQAgq42dT34zg+/tRnPHDRZCkDc9oerp69GHfT62+9ScJm2jV11VTTZnfY7hfdg+Xpvf1woMBVbAQrm4ZpGtK4Wz94NNqgOoWqiXVTUgbX4kbUADLmPvfXoQ55LO4VGLglZwcJ2HSkNBwKCYNb7aG5vbn92p9+8NtfO3xw2/L5ANJDCg3Wq8bbgAwSQh4toG83L2MLq1Bb2v+f/rP/8++/+2ejw/SP/osAcLY6+dnPffkv/cLfPqPYkY2bvmnrumbGOO1vncr69FPDZlvKhEkqrj9173HfD7fXz7zw6f22EqYJ7WbbVc3f+cVfnPryb779vZzyPNVBNQIWIi3FHdChklr75FNCBWQvZaRIjuhF3dxUzdys1MJC2DaBvdDcQp2TV7g4P9Ugt7c3gMAIjVCxTIE4MtciIYCjTgCZCPloeTCFY2M0JlV1ZwZmAjUUclDTAgiVEBKhQs5qoOxQLGsd6zpY7HCxKH5bs5hqNjU0aau07ZsqesqxaU3NDESCxMjIVrKWHDAwMQEIUxqnqq6ChCKF6WM/hR/NOsebwBEIg0SIqnNdT1FNh3F7OLzc73+82fx4f3gGjsxNt3jQrT/TNk+Wi4eVOCCrT6o3lvvAsGjPoTqXUCM64fySr2qt4GPB586887GacMfThiOi6G5N/3FmyF+5kGZHwzHiM78Te6V+vfIoOXxsGTpidI7H8snXPR7LqxzdK+lofn5/5TxCxtWDB7HtJuuxAAB4MRImpKJq7sJExA6Qckk5A4IwkTmZo3ucZVkEJnC3SFgxMPhsTHI1oXkl78xcVdX8d2I6InpcZxeLCCMTI5aigREdhYnjsj17Ui9PkSFWVXzz0y/eGadhG4QO+3G1WMyoUCQGxkO/r6uuim3JGkWyDilP3IXV6b1hc3Xz8v3zRw8jkR4OaBqrbazDVVqpdKxFZt4KscGcgXJ3nVU2AjiC0OeipmN8D45rYwS4yxvOvCI69mnMJxD9LvQHju4KdxEyOsqO5sDGQiEgEjI7Mc7/zc+IRznx6FZCnk85wsdj5OhsejWY3IFmCwmqlWOIChFRHByBiKRoQSA1I6ajnc3KEW5DSCjz87g7sSDNmB/TokRw56+bS+5ZdeZGz51rx6AX3IlT6jZb7o6AZwAiMjBzp2M/opspz6nPOYoGAIjF8iy1wKwWEaAh3EX+zAoiARKgOELWPFuBgBgA3BQQ1NRpfoQTk2ohZHWdNSkiRgSzwqE6+mGI1AoiEaO5K2RAYpKcJsA5fuV2Zw1zmL9bAN2rWLkjIwJYVVfo5cXLa8uGAmrGIrNghjMSCxCRDFStHOOaszLDpDYXs8yNWY7oc6/rbLubTwgyqju5ukOQWEp2N2QiZkci5jnXRUBFs1qaHV6z+mwz1GnWhD/Wr49ZP0UjZEQsJaMDIhMJItrxOxXNDYlMFdAJad44FmYFACSbkdgsczqSGdx09i3Og5OYVQ1l7inzV7rbLFABQlENUrupqiFmcCOUWYoFpBlRhM6Ajm4ObqBIRECqxY1UMyL/W1KRqhLSNKZSPIbYVLXmIYNNZSyTIwYOkUG6qgbxEKDYPk9p+/JpjDeH3UWILQGFGGJV+aRpHBXGId0O00ao5KIp582gmlC93u5HDVNhbUII0qmaTeW8HYvpMI1RSNOkigu5z331D/76vwvbHxKWrl2VYhy7++ddUBm3e09pHNArEKbvvv/ddbsUZaRauNnvdiwl1u1i2Wz7KRAupLFx//Ly/UePH43DFoW0Ez9ZZWQpQ7foKi2bfmqiHMBAqFrc/6PvfO/zn34cF3lRlCdNOP3U5x59avXk97/2Q3aNdVudLboFbQ7T6v69w83+6UfvvPnZz3ORNFmou5/7yi/fDP2//JNvEXgy9KJGU3JFZkQgtRG0FSURjG1RHPsDYXYvSJErEWKuhRgRAQ29mBA5MiO6cCo5Bg5o0UycwCRlA1B3DhTMFLWAm4KNwxC5YkdVAyAWRMeUipp6gcCBvay6alJxMERdLurf+Pmffuv8RMZx3YZn33v3+fOb/+Xf/XsfvP3hLm/rEGJVLRedhaGuVgSGbmTw5PyEgfd6ePzlz1x8eP3Bs+24K+tVWC6breK77320evDaa6896ofn3J5KFLT+MPQVgoMKV0UHGa5584Oz05NB7othMWAU1cEdAMN8bISRqXGITJRSskLkrZWGqAbcAyfHnjBCeebKbqcuXag6UhvHVHLlJm5GaJa3CoQhOIwRaeyPrqJs6ds//PHbL3dd04y7SSnEpgmNjGoMgavFdr/XrG9+8fHu+dX1xeVPfvFx2lw/fbafkivA5UcvH523Kbb99f7B/VVKh/ZkcXVxuPjxs6qiq8PLGBsctWvWoUqTlXa9+vSb51/711+Pp29KEw/XP/65n//8H/z2HxZq0zj9k9/5R09OK9za/fPXn//o5fXmZkka37v+s29dowbPOrhSVuDQtPU4jgLOjSBypnyFcFm3+/p0KTV3xFmrVavcZNnDIgR8NN7cdGctVbVRrfuD3b6M/DQKwMUGowxQH/xk+eDTTT1CTiGG7fZ2yjqSdYtFbNo0uNI2SDvmm6GqtYT45GcP/X4FMf3464v9UJfiiqNqEyoO7ATD5EEqQFPtp2ECp8WqNYEPLi8ff+6ttpRn77z32a/+tMd6m6xaNIFqG6qqCX06lJy85ACB2Yul9Wm7eb5rquawGe+d3K/jizGpU2BkKM4VZcehJAE0KzihdLUVufhwU3s1bTdNrCJiydPiZE1FffBJHZkcBZgMScuEroh42w+EVMAsD4SOED2XupZs5ndNHc9vXj5++AjHvN8OHFba73ebw9f+9W+/9fDRe+/86Rdfa//qL37mU2+d5r40iK53M815y8uciBzRoCDHr3/tQvsy6PNcxqqu665Ng5qlOvLt5TVQLuzA3Aop+dDvcqOEvmi7URMitm0cy4bVE/i66opp4AmhCbzY3m4vL56lqUQmDsxSLc6blJJbkSgGqtlNjrNsUzsm+4mMjiHpeapHRHe/AI4U63ocGB2E6XDos2Rpq5hjSmNsquJ57+5WKi9LyL1fkZxzE8eUs4Jh4x6SKpZRhACrqWQTPXtwb8i5H8ZIAS1psdWD+9vhhREhQCnZitYiwqymakgUmFCLETOAICoCEDCTAkMuioGCVClPYA7M4BgE3G0as8/4AESOkcciEuywFeIQ3bVMCWPbuDTd8mS1v043eX12Kk2bL660T/GgWS/ycqiRFu29D25yE8Kw3UYo905Otrt+2nFcVUS8G3Sb0uA3w/6GgIDIsTKuzX3ZngbPmHokDNwYVgyVQ7VYrM6Wq1gT66GUraOBSEoHcAcPBJLNhFnA85RymjLCNOb3f/DR693ZayddSYemqZ0X+9tnThBRQEvx5DrUYVUwKXiQyo0IndERKakrIVOYAbUErJ6KJnOYK1wFSbhzU/ec84CIMYJ6Aot1rIoNTBW7uWPKJQoJBy0GrkxUNLkpWEFiAlMdEKMDqiGCqBWe217AGf10tRB57b3nT7OOKqEKFoTr0OR+g7GOsUnDlHLvNRM7oE/jQESY8pfunb+93WEhM8ypeNZCquqRiFUP+6FtKxNCC4hgOtmQoajmQ9WuJIQ8OtfVHT4VwDFyE4w4wYm05hMIKZSpcJTW9ABALQoAejYhGvspGg6328yWGcc8gJWlMGVatavTWP+5L//aL/2tf8cXp/2wh5LrEPNhEnLLUIaUx7w8OV8+OjOEGGMaJyJoQ93vdx++9xFatVieZJzc/bDbMIVKYr/fEFOe0s3l9uT0Ub1cTJ4AIDCZlqfvfzBtptPVebPo+nFcLJfkZHka+sN00AdvvDb5mIudLB7oOOW0Hbbbupbl4kQqGPbXTVVjSsBbm/YkTVw+yMCelRUjx9KPVrJDBmRXpaoNi0VCmg4DB27UaLuxF1ff+5e/3//gpRw41g9VWkWERefLSCfrYuBWiOrQBZ9u883u9OzRd37wx/+3f/YPf7S93AODo7hLoIvt9l/86f/0L//0X59w9de+/Et/7Vf+ags4bXci0bOr4zhMBYssYt3WacD7tX96OLwkO2xvNml//mhZny4EuBwyY/93fvMXqlX9P/zxN7wfSzLLinwk68wlTZXkyZKTWnErDoBq4O45FUJnwJn5E5AsZ82YsiYi1WSWA9eTehV5UcneFZGTOweOi7qroqVcxpwz2IAlKQkigebMQfBIIgFUI0MtJlHQaZrUSpkLngQQ1CVEzQXYDUwkkpmXst9d96VUdRMDNiEGs+yBAIJ5Ovg47FkYyGPdxLYZnAhZ8pgPB2FiYkfvVgt3iFWdgJxFy10YeZZsZtLHnZnDAdS0WFG1onmaDuO43+6vN9uP9rsPh+GlaQ/AgloU+smb5rDr9xV3QKRlY/lC89BWnZ7m09NGODgi8h0F6c7qcScEHRfQd3ijT7qJ7vSdVyiWVzElOOZZ4BNhsaMjambiwtHYe3drOxKVPxFpw2PW6RWVyT/hcYE788kR3gR32hXCsTQKlquTxen5sNmRIyCKoANkMyRiwBiqVFLOpRRlYSQkRiFgcAJgQkIAQhGuAkfGKMQcimklMt+ai2VDquoK8ZjqQ3JVZRYmxruUFRGjmwgjh6JqxNXZI+pOSQKJADO37ePP/dQHP/iW2W5IhffDyek6jYPUkIlAAcsQupUDJRuQsG0XKR8Cx+akLbfX6eYyyKrfgGdrl+B08eTkwQUGAFBDmjvL7thAMIe4EA0QjrhfO1p54AgDQ2K4q4cDP+o6d6cdZ8z2cUyYH7NjsxllFjaOuiIRESAbOQuhiJPMTOvjMh8B3cGOZ+5utMyigx3jhngHbQQ4sqVVmWQWmvCoa+IsnswwMjejIwvriIuaR7UVAzoqO7P6chyjxxJ6KuZuQIQ0188fJVlzgMCsmo+sLNPjBWnzkJ3B3EYU3QuSIBKBmym4ITOYgaOZAiAdwUDkr5jNdxAxVQUmJEbAV223AIUwzla+o2UJkEngLlYJQDP8CQyJA6KAm5sL10fI9iyxmSEjk4BiHpOIGOksQKCDuQmKu5r7zAUnspzG00UbHF2E3Q0MEDbDjsJsbie4612ZT1ophV4NEUA3Y2JzmzVfMxMWpFkVx5xHJCpuAABqDkZH4XVG4xYzmBFRcwZxltEIAIjcgEjcCh2/A3gGGyChqYMbEgqzeWEUIE5pQiGbZSBEQlJLxBXM/rg5Box2pBABEYppUT1m8gCBmf2oe/v8rt3KPOx9lr7MieZcnTk4AJna7IJCIKFqlo8AnQhnopOZ4VxgN1u4oMztbEikqrN7D5BYKte7r8RXUpG5lVykqgrGwSDKcsQDuIswBHdgDHW7fBSoKabDdOuF1NSw6Lg53Hy4WKzcoOQUYotQAdJY0n44jGaVQEpWMho2lgsRQlFUkEqEwtTrZDm1zgjsXk9SVzV5tWjvLRdvfGb5Zt7/WCpvm9iPvt0N98+XI6d2VQ9Xu+Ll8ZP7h/3QH/q2aULd3N5cVyFcb2/jourabrfdh1gjWB6yUvJUYtMlGyWE0IbnN+Mv/OW//q1vflPLRxSBIZzUIQpe7EfkvNuP62b59tObcSqnp8snXf1vvv/j0zYgjb/xF3/uvfdfXlxdllX1cz//C2//GV88u6ib8OCNR7vdzaI9r6poyfJ2+stf/uUPPnr59otnlaAG8oCIAMTCsVJVUwAYx+ytjCxhyba9rijsdsUco0isQk5TYMklORAAm0IuI4ZQx0rAAknJPRKzVIKsQHmckNCNhkkNnSIFRi/ZnEIMBDAcUk5H0Nk0mbiWhGju2VjHt944/Yk333yjjr4/INqTL/zEf/8H32rax8LNyXnTijx6/LoRg+3V88miBsd+35+u1xxod7VZ1jUW704WzLQfbu1meLJcxbacnK6GMr64uT3v2vFQKrQmLlTqtL8BN5JIXJOW/uX3Kqqq85/blc4IIwKBISEAIzK6EdRJ56yLgxuAFp0AQbji+ky4cjfVbP1FKYB6KNKFuCJ3ictq8aiMsVi2tMnDrsXQtKcxSu63d5FkeH659wRDEoOpq2Nbsk2jAT88q8c+v9xdN520TcZpNx6GHlJ3k7pd/PB2f7oUWi13i5UXeO3srLv3+DOfenC1O/z33/j+m6+f7y4OZ/fX987P3n3+4S98+fNf/8OvV9wo4b5MX//ed7pHq6t0WQ4JSv8//u5vg8XmTGHi7TSsQ3u7T9/807e7uv7pn/vS9IK/+Q//RdqEJlQlZyLRitxpn42proX6w5gC5q7ZNHE4vUfcliGlksBBJx/zlqQqqUJYOqHE1X4ibx8s1hctZj0844CF+t1mR6svTLykUcGmlp2nXafP+8112z7kuG6wTPuDRi+4qysDNuX2dnQa9rK7faC5JWJp1V24AgF1wGLswA7q7gpVZC9+yGO1Wq0/95Y+fPjWvbO3//UfhbpO5MvTk4LkQOuz0+HmWYzBAbp6rVOpUKbiQWTe8wtBJs/np8tnz/dEFQAqWFHLpux51dxPaA6xWq5ur2/7m2G5EPR4uN5hS3WodDdZUTJzG4NECDSMiUNEMgETqacpuUNYNNVisX9527Ywbm+mPlcx5gJRAgmenqwPPmHgAEFF6q754v2Tb/2Pv3f15P5bP/Pa3q+vytk3nofXlvdbRbAjZAJZDAxxrlhQUxDif/xf/zOsDFI2CGQOJTAJQNkdBq5oLJlITk7ODre3DnyyXFYVaCmAeHZ+/8XlFRNXi/asXr28ufEh52moKl2erS7ef/bwyRlJoJDXq/a2h9sJnjxYv/fHf/Lm+rVD6s+erHPx9z+6uJuAOxgA03zX1FLm6bUfd/ZgRinMMGQwVqfI3G+uLGLhOlE2tWI45YzoBJxV9+NQyRIRGQHdyexktXQrSM7MCYEYXO3k5HwheP3yel44FIPVap01X+62joElKBhxIEQKUlSHnCNR4EgAxQ1sQjBG0ZLNvCAasjuQqzADYnYoWgJBmTIRMUkUBSAAcqKxjAaQchpzYvNAnIgN8dKGEqhZNOMwWbaHj55sry90HFPSpy8u760fqOppfXoYboAdHA5DMSgIY99f55G9Xo3TeHn7LARm95IMUlbirl6iFUIz06QQ6rZuV8WrwKuTZv1aV5nuDvtNLhMyuxNhZWgz/tLdSinZppLMTAstnn54cy+ePjlbAZsyK9Fmf1EYyDUSmhsTF41owWwMVa0lwfzQgEWnIAERHUmIptwDOTMV08DRZ48nQM4DABAbh2rKkyNrKe7uEB3JXMHUzJlF3YUICbLlUhwQRGIqqlYcNcZYVJmDeiI0ZjLPABiqChAg2/miHda67S+cYUgJNttlZwbeAHfSEAeJXHR0s3ku3IV60mkJi6bf5TyCAIAy+Tj1YEJCZrq53fCqhsjums1ZPHkKyAh02O+gbSXWKQ3T3bYB5BLQGwQGIGRzQSVFSm5ogNIQhjAjvAjdbZtL1oEJDn6Ida04dU2g4sFkReFnfuIX//rf/w8xdv3+AD4JuJUiIQSJw25joO39VX12asJqqHler7v1w8unH9RNUy+WqeQqhmEYQwjCIeeESGA+jP3q9Gx5fz2mNKbUtrWP+uKjjw7bzf3zJ/fun4/DsDhd1ovWspZpcLT1k1ONOo152S1t1PH6ZbOWB0/OUuohlClpt36ICIiqkAxEpFY3zUU4omCZDFGKDixEMahUFrqhcAEjqnjYvvs7f7T73g/Li43vaFGfG8XDMHAVtRU5XWBXGWJQLBos2VT2ZNPiJPze9//F//W/+y+fDQcScXfU7I7mpG4ECJC2PvzjP/kX37p58Z/8r/59PRykC3XVHvb9/vIDbClIG6AqU4vJTk6fTEip9zTdarJ+u3NvRWLZ9lKqv/bTX7283H/7e99gNwluWNyKk7iZ5zL1WSrxAMM4IYm5uc/JCDp2PzO4YVZ3hDKZ49i6MRKLSKgI4+Z6v7vtUd3NQ4jYMDa1I6ilPJXDTscJCLGO4qCOkE1nP4wWB0MCRKQ8KaNLQK9CVnctmhWZC7mE2meFnNkBs0J0LFqEuJiZJgStQyCyJsRbS8FbF+qWi8VqFeqmjZGFvd9rPzIAIwEzRSJHm7INUxSRuj7eC47Q4JnA4u4+81lSHqz0Y9r14367vxzG3WZ7s9tdpX7rlsGYMRhLyommi2nqd4crCYui2XRr5ZYBlu0ph7Zq7rFUVbUwc0IgZP+kHvTKV/TqLx+rR/Cq0/6o1Rw7wg2PubM7reGoPB0VgeOj8bhcvzMxzT999ZPZenL0qnzipV9lzI4H+Uo3usMhOSISk5mFGJenZy/ffZeZAFHdirkjsQgaTimZF0QPgZnIzdi9EvJiQTiGAOYKyMRVCEEQHZC5qiKjE2ApU12LhNoAXRUBkQkQolRzzxYRM6JpAis485IRpWqrxbms7jFHN5cZgg1I7cmbX/rKh3/2Rzrtppw2t7fr1dIDKkIdQ7G8399UsaY5wAckEDVljLI8OcvjmEuSepV8LKrp9ibGF6tluMEmY4w+BUI3JeAjoQoRZz63Gh6nQ69yXsfAIH6cBcSjQ+JOPpzTZ0cw+auAGgEczSMACExsYI581ErwSFwhxNnedBw2d8Pq7rzOPeWAQEe40Ks0G4Afn2cuBZqlCjM3IkGafUzuqoDgDkSkXuioq9qs0ZibE/gsSLiamxDbJ5KUsyPMXRlpVr+Q0N0NAGkuf0QrTsJzo/oxRTW/GXQSmfvUj65wc0d0RGKeQ4/A4I4GDm7EUTUhoAHeFZw54JzxOEbbiGSW02ZWtyPAnBo0RSI3A/DZFi9BzNWPsqDBXH2nBY5oN1JTA2DmiHWxbJqIAhGZ5fkj0jm8Nl+KREi46jpxAwl5KMuucTxs+lEByIxQwFytMDMAqCkRGxgTI1Ipykxqaq5MgWacEIG5upm5sQR3RSZQYAlmNpfTuRszzVxOZDZQM8XjfOYYGyUSAARkZspZEWxupstzwfaRsWVevLiiA8zbWa5MbKpqjshalO4sdSSiWgAQWSwXQ30VpjUrHORjOyOguRHO/ktnEXNFmDsKoWgOLCxoDqCAxO5QysRcq4PqRERI7LN2yOTmwlLMbI6bGaAcAd7uJiLqNqXpqB9+Uira3B4CkJuPSY1oKM/Ap0VsUELPmpKzQwFqF8u2an3bYA457RD7dBgI42bbFyBwDEWdsmpJkwLFMZkzQlYvKlKrJyjIQSCDJxyKEYYYeZf2nrRbNKPCSihyGHb6eF19/vOv52lcnjRg6uMU2nh1e11FSRpjkFXXHvpDzlmEqsiqE4B7ITOCCFf7g05Uhj5nvb8+v735aDhsS+69dNnp+sVm9fjzf+9v/YO/+Zu73dUP/vk/+ycXt/vNzaZhWcUw9H0tQATiHKvmcpu2aXj4+I33n958+MHbX/3F1vn2rS8+evf959/9k6/9/C//3A/eefdHP/ozwGZZ1TYeunuLflMOu929B6f/4G//L/4v/89/+GI8VF0w9IoluxYdCwCxAPFQSHJetNBWvulBQagNrghgeTpYmYahOPKYVaAWQEJhrIU4UAagyBWCIJihTkBFap3JExiZFNwhF7IQsLaEvR5cIStKDG6GnsxZYpOz+7T/zGtnv/Xnf4Os7F5ev/no8cNHj37na9+5ebn59b/+i1fXT1eL6my5XnbxcntNCJ6cK84lVxLJtRGWkyWSy/reeLk77Hbdqtm92P74R+8uTuKTJw8vtz5u9mOR9emaiUvJRQsxJC2i6GBQSl1Xfvv2enHq8Y1NAXc1nJBaRgNL6o6CAS3nDUNGm8Z+a2UCzYjmTgARyDkaWLayH3sLcijTLXjO2UwtlxExIBGhkHAxJq+4vcd3uwcYIQCcSPjSW6//8J23Aemm33f/f67+NNayLr3vw55hDXs459ypbo1vvXO/3c2e2JzEpiiRpilLNmVocBwlcRBFSQB9iJMAcYx8cYIgMJAvCRLBQWwZki1YESRZViDLkkiNJEU2RVFks9kD++1+56mqbtWdzrCntdbzPPmwz60mXagPB7dO7XvO3msP67/+/9+/Cj/0+kuPzp5KFQ5qrCHsLvuToxZ6JxLXQ2mafNSk777//gdrt+nDGw/d3SW/+/F3uh4hxYtnXS9Td3n17tPLi133wdXVOOmto/DSSy9++5vvpKQ1uTwNB43fbvsI7aI6ePTRufehcvDhW+/G6nisqYA8+eazd3/lPT73TnTCcUoCgMFzUUMpBK5LCrRYK+W21aMj5sqSHCxaGEpftnl3kTe75enrQafVKhex3PUVVZC2xNtS+qnroTOl6JYvS/36whvhFZEFk/78e8P6PYpL0GrqthApT5Md3Yt3Xkzpuh8zqFS2XV6+d9SVdppYA/roUDxiyZOLMbiYdUqlBzNPHMn3rlR3D93dQ2jjmIedDqujBUjn3LKUHL0Slt324tbR7bOzR44JOWZBjnEqpapjqOqxS7Fphm54+MKtT87OEcIkZgaeHYEz4ykRcFNA8rSLR7X2RmBDsRD9XDfCAAUQEJgUTUwcGeUiqCBiZmACZSrO2bAdLfBld37/4S3iutt1JweH01R0KlkwxuXF5vzAN9Qnqn3xxRkm1d9458Mf+fzLZ2+/t/Bnf+RHfnaHfds0vH/YVCuiQORZJQP4N//V02HXRT+BCBY39GuRogVCdbRdr+sqgnVYtPRDHktdx0lGDqyKptqPCSyimihuxuI4qhQfoo/ivb/3wv2Lp5dp6gMae2cGYMXbbUHabHTYQtONo4wOb6ph1VTEBSZ2InkuZwEw1Nn+imZGpqbKwAhMBFDDalltyrC7fhypWR0dPr08BwQjZue0gCpUVUXGYz9AAdnJ0enxxdX7i6qepoEIt10qqdy79SDyovSFgEwMs4alC4Ao5JiRFMBUTNWAwBBESnSBEJC5SAZTAgrkBx0RwfugkLQUB5YloRFTLEQOmZyAoZmxQjFBZXJOpokpQnBATI7Jhc3Qvfbi/fao3nbBC22nsl1vq+IWiwOH2A3dcLHebDsreVj3wBYig1o/jUoGWsAKcsC42lx+OFkCBRMzrjzWKuh50bg8TiOHiiEgL7JW4NrTk7u3gkNZ99vLufxXpJAoz9gC5iR5NoqrpALS5eny6fbQ4isnzWrFo+RM1fnm3COKmmMUyQRE5NhBtinJiNIiGBMQOyNxjCaWyo5CJQjsaG9uBgPgMXWubsmhaWZ0gORc7It4amJQsCSWs2VfLSz3KDg/l6cyOHKADkEIo0Bx3pvKvO6FimDK6IsUQPSOTM1QUMnEiMqtoxPBabvbMFIGGEpii6nfmWHTLLIORbNn5x3lgsOU0PDRo8vPvfz6r33na6NQllKSSE7EjilYKUYmu646OuxMppQsl+JxzEkMmSiJIkySM8iNty6lot3k47KtffBODHIStaiAM7aHi3dQSkZiAlMqzFxk8LEGVJel5hjJO4w/+Af+rT/+P/+zEzXDeudFl3XtPI3bLQHKmGQqiP7owYPMSIw6ChsFiv3l5bvvvXly92G9qK/XWxe8c4HZAdm03ZoUb+H86UUM4fDBMUS0BFUV8jhtz84/+O53XnzxjTsv3B5zbparumm1KAOM3RTb2lTHsY8+nj8+W9T1wfHKV8FwQhp9tQBsXYwofRo7IA3VIVQLFQuesQxpczETywgqFw/NebOSAdF5GMbhydnv/r1/uHvn8YnWdTjRJe9Ui+Pq5JZvm5EUmBEKKRI4Xy+KZS2Tsf7Wx7/z//wHf22NWoC4CJgSkoJNJSOAKKABkqr3v/321//CX/sv/hf/xv9kGjc1UvShEAZqoFiBDBl1K+C4Prp7KPTJ25fx8vLu/SUCuRiRA6o0rP/LP/av/79367c+fgxdX8BMVSjP9PUpTc4zEjVtnLKkBJoVRAiBmaQogAXHBlQARVAnNSfgTZTSkPLmultPeVO8upylYkZ0Qyo557KbNNnY5yKEBJCAA8RIBiBFUECSAICKESigYWDy4CuHRaOLVmRG5FHwUFduscq5OLEKXeVDJjRUclAUCBnQ+mkSkMkhHtV0uNRmMVDAysfovKdwehsRzQVmMgBm4pSpH3Znzy4+Wfdd/31h5ka6mafDpqKWTYYpXW83Z+vds/X6rB+2w7DLQ5ZkYIjiwHklpICIgwxDHq4VvIGJTmATIokgu3Mfzh1VhN77CvAm8bVvK8fnQo+BzWRt/H7y68Y+9HyGOVtQYP9P9num+d+fbD3nCu1JN7j/MeJzN+Fzmeq5hnAjK8yTcn2uINwgrudt3DhT5jm+gSO8//LLb/321wxmaAqHyouoiKWcwDTs7UFAiKYwZ6y8I8dECMhExE1TRwcGGohtttSAAhGHiMxiRmDM7J2bZ86ELKZlhhuDee8QQJBROYnz8bQ+eFGZvXOO2BEj85znCsuju6988eyd3ykwZaXL9frk1lIJhZwpaVKHUNdNStlKNhHHKGVkqkxg0nFxeFRQUs6VP8Ht7sQ9qep7zwqJUBFlJDWdlZO9J2ifv5nFlNkdRjDDUvYGMdTnJq8ZzgxA8BzXQgo2U5Dm44m4ZzWZ6t7eBbZ/VtB9uGz/+0Fhjo7dKIT7wWFIM7PeZB+HfL6GDDNvBg1IVZl5rx0hIyIDzmWU7PzeQmVGyGbzoLop2ANS0dl4AggOEMAIZrOSgYFnrzYLPSh647EDnb8Fs4OZ/m4wv58AkdgBCaRSEhMxB5UZ9U0496PPUTGV2SaDoPMHUykz93r2kiCxqQCYkhEyIeUyInuz4tirCs4WJwMERjMyVt2Lq2oy70jRROiQUVUJbA5FzngpBDKVueee5w0SiWRmnp1HhGx7UI6BARZeNG3FmmrXb8d2Ea2krh8BnBZDjwhGSARkiKbq2KeSgHEGUxGQgu1hzwaAUCSjoXMedM66muYbYhggk5t5QMx84/cRwFnLE9hDrJBQbsxlRcDPFypGBhQzcOjHMhHPmUFEJLN9gZ2UQvuTnXW/4xi0IBgRGhAAljQR+ZvhSICKxKY3zkUAQCbDGfJupTgfmZwWme1xBN7MQISIDQFUAIFdQEQQJfRgqiqmgIgmOl9JGRGQTAsyqe6zEs4xEatm53jPg/q9UpEZGnGRkqdMxEigokiGYgjgGCSNqb+aAhZKzhVQl8HSlKeCY1ExVvJIQOjExEyIVEQqms9d9QyAI6KE4Ah1zBNTZEYpiYoEDzGGwOwJGqxb1x7SwQsn9wlVZcrFLi8ubx08CMvF42ePDk6P+r6zSarFYn35LDrf566fuuVi1VS14zhtN2maum57unrB8hRIyrAZdo8XB/WUqJhUVRgG+tyXfqxy0bfu8OiH/+yf/9KTjx//yi/9vY/efduHtKhC7q8sTxwqVrWCBM4hfuqVF4ar/PjdJye3FifHLqejZ4/Ovvetb7328mtR9P133s91f7Ssnj56fHx6O7pmGLdVqP/dn/lj//kv/AMZR1BTK3XFJRfvYwxLGXbBV9pNnMtggskPE0BwdURLyQVyVHW7HQMQMBmakAIyQZFEaADFsCAUVUsGI1K7uF26a7BctJBBLtKwZ3EePDhO2nHliGU2U0TPJjwNQsqff/31P/0zP/nhW28bwkHdvPbySz//D/7JS6989o//9Fd2u4v7dw9Xh0s0zjJWiMFFWxEFMtO+77ySIbvIU948fnb58md+rBj0Tz+ANqDC5noHQI6rRYxmuOm7tq6wiDK0q5WM5NgVKSZoE5Gb0se/sbr9CKp7u6k1YnQr5QMoYEhcBGXAvJ3S5ZQ2IqPZNA3XVTh1rlWgpKIiZOB8hS4WkDKuTScwnbqtqZXc5bwGLAYQqDFACo3Bvu/j1q3lo/fPMtrbjz8wb6hye7not92TD95j50uireA6pY3k+7d9GzypPnp8fbCy1+8ef/u9q3O8d3C4iDhFCzmnH/8DP/y1t34xJ6xOVimXPI3BDHaFs2432+98e5fGqYCtTtq87c6fnK9Om8//6A+/+dvv3rl399E7b/nl+IWf+OFf+Ie/0ZycPtT6k1/7np2Z8yxQmMERKZpgQaDgXRkKUshV2C3i1LSTMRlwGimVaezIg5KtXnwRrHLTZvvo3QpdwzXkqXSSZKQaiVyBeiyNwQGATtuzNkpFAThUBw8kp9Wdz3YTBPtgTNfzE856uykkvo1+82yx6446WVoEI1MQoeAKYvJMADJOSUQ9G5qplJI4R65v31k+vP/o0WW1XFZNM8Xq+nxz67W7uh1TNwbHB4dH6KE4MUCTbEgZKbaNaSIfAc1H3w+libQI3I0JmY0syYiqzrgfuxD9qIVITIVzGVKhtk0gAXEau4hR2WW0xkf0LJq9JwMSTYjkiQ1y9JyHQfJYtfHunZNGvZbp8mKLh8sAGmo3pBJETpYLP+oCaLzYvDtuT9pVt5uWgfvzZ527+JF/7Se/873f+R50f/QnfpqwgpmM6MkMikwiGBP/p//pX29P2ik/Hfo1ILFH5wMAJc2HR8ur9VPXuLqNeavLg5NtNzaVsaqr4rDb+Tx6xLZtc05jzndPjh49+kBMpkkunj5lrKo2COsoGYscVMGxNSmfLFc8ITg6uXf3G9/4Btj+XkDMgDRPC+bb6vOHdgOgeU2LEYmBWVVzUQycDOrYpH7LCMQ+hkiBJA3nm/Wd1TEVIUd5mtgRmHimVb1854OrWDdiNsfHzGFsm/OrSwV1gVHRSnLAfd8rEDB0eYzMDv1sWScgJgTFIkJEauLJiVmylM0ceTBy4JgJgAFETBwRCYopOy8qecpqyp6KKhLGWHVdX/mKkZyaV3UyHAfwaey7fliPCOqBp82wvHerK7vV4WEx3W4SCzCjgoux7XbXXT+iM86JAy7qxdPLT66v3kfMlQ+iAtw6pdb7KCZWqti4GFEdonexrQ8OjxdU5V0arqVkck4FkJwRqoCJmRYiKymZQUmDKMCU/ZhfOT05XPnduEXi7bATMUYL3jOolmRzJxkkx5WPjZqRoYE68oMkYgeE5BiJVYSB57CDaA6uca42JXasmj3FKY3GxSmyyDDtqhhSSR4diLERgAigwyDoEdkRGZJnJynvn/4VjVDVGGjm+Qb2pkUUHLGYNE0UFRNYhGadrgxzFeux7z1y9CFh55A5MLugouOYfKjMFAFSGT958uFn79/5lx+8zzEAk6W5J9SQ2VQ3687FJjoHaL0WwjAWqeNi7LZDGqqmQQO+YRWRKJKAiEwZjRiZSoBh5JzruvKIqtQEFuIxZUT2bSOWArK57NDIO+xzneOrr/7ET/yb/674mKepqZ1XJVLJg+aCht3lrkxj++B+RicGYOaYXEK97p988Pbd+/d52Yxlqg8aH+McUihpUoAmNpvzDSgsjw4sYIECpGxAWs7Pzm6fvHB4vJzKSOy4jmoEauNQtpt+5T2gHR+354+ftrVvDtrqcCXSF8n1QWsGJU95UrJJkdhHQYeAjqEMaysTqBaRulmKuckwZ2F2zOBS/9FX/9n7v/F1vMqnx6eni3tPn54beXSRncNIBugRHGId21wA0eUiZewA844v/+Kv/N1z0iJoM9fEANhm2AXwzMZBQqBiAPTVt75+0tz+M3/4j+dtd3DbU2yIDyH3ZqleLihoGXeDaXP4wsmDrly/OayvqgWPYxdqRrFpcx1i+2f/6P/w//SX/nP2yeUChOhMNINy9BGNxOa6aGI0Eaucs1IIgWt0ji1BKYoG0XsALF0279CjakqboQzFG0LRwM5E8piEYOonyoWR2KOiAUBOCYhzBgVjYkRwjkBN0dCAPC+O27CsXBOUvPYDTmnYZXSuqmMOMQfvmXnMntiZScmpK2MpmgqIdGgiOZcCscFFwJJQpjT0oYO2qWIdCmLyZKu2IAPzqqocq/eMawUuInk/L1CZ8yazzcHAFC2LDGnqx+5q82y9eXJ99WQaR0lZs5kSAHoiUxUQLSRgCEUhq037NRIVAyOb1nzp41PmiBSW5PdzyzmMZ7PycqPD3EBq4Hne7PdO8eEmm3YTBbtRk26EHHv+et7M7wuU0Y1X5UYtQgDdc2puzEK4h+Y89xnh8/c/l5j29g7E2VoUnH/5lVd/2VEZS6xrDJUZ5NyDSmAi4oBYpJACgDmC/QImQvB+1jXIoSdkQu98dHM3KxSF4BrnWC0Tz5AdFZUZjWMAjp0nDwhz19JswSHvfXUUl3fQcXDBAXoOoAgI7FkBFGx567bRZz/5ztc8UhX9tOtDFYtjj85RFMGuH4IPU85k1jRRIJD54sWmadM9q1YrHCwNAxVxcHYanfqjx2UFFhBGJAKdJZi5oHyPQeTnB9sMTMFo3uUGSEj7OfJ+CALe0IPmd++P+wyHNjA0NUUAVVWVG2yT4Z77a3ozKm4IXDf2MZyFtXnB+AZI/XsOLRga7mtgzUBUAZT2qo0Q8awzKSgSqADOU3zk+SMjguyh1Iizb87mjy0wyz+zHwdMNSOSwuzKEUS6kXNmQKTATGRCVFPHoUgB3JO3DGAuRJvjovO3KyZzxB5o9q6ZwY2iCUA0t8shoM0AnZlqLJaJZ37Nfi8C2r4xDZFdMNM5zwaoJrNcI/hcecN9ZhCBRObCdVTbv57VKQMg56SkPZx75kub4T60ZpG4YsxZANUTOnD9UNjXJSVTmd+tqjMmqkieo4cKAoilTOy9qYLpbFMiAlCYlzlFyrxfiVl0rgNTMyXHIjoHe1UzGBrNXh4zE0deAFUyADCzgRA7MBAR5tkCX2abp5kRErMTyUB7Pj2iqYFohjl7OFdGgInIjX/yOV7aVIpqIeK9FxKQkE1lnjjM4r2ZaRHTQi7M+29Oahqgqqiqcw4QVZUcg1rOBYGQyUSJSc2KJFMgZjVRAEISUWKeygQGRDD3oT3X0G8CaKBStO8lZ1dUXU0xRvSAwAgYmachqW2HXWps650zGCBtSh44EJg27FNKkkrKXWwPESznSa144n4sSL4wEkzoVKgYUGhqURYFAVWbKiYYOkRb3rlXucbTwRd/6GdeufWwpLUnf1jfyo2B6eXl09YzWamiX/ijj95/whXFRUyaJBfJQuhS2tU1LP0xFB4kSx4XGGTMadvRksXE1ZxN1t24OLzDVQ2zryzCgzeO/vSD02987Wt/5+/8HeaSFDe7geoKQ3ueRkv9SRvfePFoZ8P9hy+eXV6//e57rlph23z07scN+Vdff6CA337zTTB+6eHpxeXTo4M7Jjxa/uynP/Vz1z/6//vVX1q2DTIqinfz9VEBOaeio447FcXgK2cwdl29IMEMvs3kfUVoGKPbdclFZiZjU52QHTkukpgAwTClSNTi1ep00afd2cUwFNwhKHPDadQeQV2EKaMoEVPfF+eaNBRW+7mf/cN3lsuPP/owRr5/dPf0wb2/+89/5VOf+syd01sXl1eEtlpW0YVd15GUlW/GksgnBQeEsWqaGIfUFbO6OvztX/tnXW+vf+q13VH8lV/8lVePmvb4MJUxXa3r9kjUUWoISmAwHUvGRbsYp50ql1GDR0NX1Vz3HdOj6F/aahDpwQyxliQMUMZt1jKVLNDkFBC2MRxrGaa8BaOqugPQAhjQgsEbQnCtgaRhjTZoKTImMXFeZLrIgK4NCVOob86CNL76wsn52u2mbrfti6Ol99EcT3pYh60HZFonoRCK4TgM23zZWQ4lfPu9zVqiUNp2Z8e3XiHSXdp+8uTN40UKrj2/mqYuT9M61vW2K4dHtW+qvtuujtrr7bC52kApMfg8lscfvS2Sd09HHLJbLTTC4lA+fcLw0fXlox2NzUSlSA7kI3MSKymhuCkreh49n3vCO3cTIxQInlyVdtcfQCasW1scpOaEsqbrgaZSApcywth578m74knIQ7zdrF4Zc+fg6bKeQqiuN1OX07JduvZBIUchDdNWFH29jAf3NhpEJz+u+fos9C6WgIDsAzkiRC2pAEZfg+lcKUCOvIEwJeLly/dyqDZnWxrBOTQiV1dTKsgYvKsdb6939WErvq6Xt00wXV/6xQG7WnwgIJTiVECcd77k6XC12E475EgGYkVkYHRF0CbhQDWxjJPlrFmDC+aHoqWAeQQwUSyCFtARFBOrYsxZCSlyBraC3Phqd72tF4vNxW6xap999MH/5c/9B//oX/7CW9Ozmprg+fzZWbs4uH/rwbNH581xu6yPdpurTdfdvnv4/psfLu4effNb377nXrlVVbot2e98VQM5AxAraUpVbP/v/9lfruo6lXXKo3e+aPHeNxXvcu8QYvB1ICMpaJkAJAGPTd2SK6VAU8dVU+8+uWTjTDaO086YOVgZVK2pqzwkCrGA1D5sNtPhAtDZt37nu7fvHr3z+PGC48XZ2fHhSUo3T8qMjGwlz/MnKfJ710v36xKgKgUAkJyPmErxHNHBIi5AA7Nv60ahqKvv1HXLbpiGYjhZYeCpv1q2B3m6do6ZeFG5y+vr2Yd+7/bht77zu9kkCTJIXYXlYfXhx2cIWEoBMWRyjlSF1IAsayYXDZ2qIHA2EBRCZmQVYQI0MaAkmdkhO7PsGcWsKCQTqkhHYcbYxtT1ACA6SHLUxG3XrZZ82KzG6+1Hu64I6IQnx/cun31dRPpdCLHebK7quhHMgclsuL6+XFlDHBS0CiSCmCs6Olxv3yFixpJFtOTISggBwTPt8uTdUq1Scgj1reM77UFN2qdpY5bJ4ziNxRRUHPgkCpqC8wiMKqJ5LKVk3j7tHrQnd0/q9diFpskpaZ7Y5ooTNQJHAdATmENnAI7U+zpPAyEBcMRaVMn5bOYpiCZTEzUA9lwhEho6BskdIRsKE5cszvsiwq4WNERg5F3eRXRgSIiimZTIWEQI0Cwz+cA85Mk5p2rBO8lFZ5gmgCGpmhRQVDFFBFRtYrVYtLt+2w+JgTabcdEyg12n7vjkgNgMTHMG16Q8zivzXb87Prz15ddf+Nb7HxdDxwzAZZgY2Uyn7SjN0jcxJ6nRlTSm3liVk44yulA5xJzyzfSAGNET4VgMwlSyDSVoXkbG0jV1NBRvxUhCBHMU2lAdLJ3jod9Bttxp6rvTB2985U/+e83tO/24DeyapqGpT9eXmpUSaFEyqxZtdbhQyKzmKMCItu4fvfXm4clpPDrqp5GcY+dnS3ruOgTjotdXV9NuvHX7tDqoE2naDc67PI1Xn5w7weOTY2ZPjuq2AgIEGa63/XY8Pjxu6mpzcf7k7MzHZnH7EJ0rOpVuUwV2xqVkUislMyvFA6oqtIQguV/DNOrQUWxCe6JMoxQgz6I45u35x2/9i69ev/NOsLY9OunT9EH/qD048OGoJDPJnCd2PvgGkcqI2YQa1DQUmRKlv/SP/psn4yYJQp6HLCBikbJvpd67EFQMAMx5B4T/+Gv/8LMv3fnpH/jhlHfINJ5deY9beBYOXH1S1ZtxUYytbpdf+uDr4+Xjt9CG6tBh2cW4QMSx2x0cL//P/5s/93/9T/6CsaMxSTEVVaZc1JERo+wdD+bAVJUchaVfHrVNE1NX+k3fD8lUS4GmilnVI4GgTWCDCkEuhb14s1KSqIHg3PxrDERQijDRvGxLNHsUCjs20OgdEvmmqo4OfdP6tgUM1ow+Fw07YwjtghAXxC6XYTe4kmb4myJNUyZEEwDCJBmJRZNzk1mehqECxFQGoMm5AhBXDfVdPG6K85OmNKlOE7IPR0cB9z2ARYQBjGyGkswBEzMtalMqOU3dbj0NfUqiSaHs85gZBFBBhIBAEAiAyMxAkqoRMhgp5Amu1vSh1+wRPLumPeDngsts/Nirtnu9YD9T+X7WbP/z35MVei7gfD+b9jxdNqfGnqtK39cBbuJqN3XkMwzl+xElw5s4HN74rGzvUbLvU65vokS2B48gcN2cNAe3j+7WHunp5UU/DiVPnj0TBodaMjPOBViOyTMzIajOZtzgnY8hOM/zfBUAyRw7jx5Mc8lIEMiJ6Rzm5Zkbw8zMRGSmgGjGAprMGNv28AHHBhAI0Dm358AwEDsCLKhAtLr7oORy9ta3HCMSDes16cJVNdUhq5ScYx0cujLlJLkUcx6ag0XoeLO9zER1aErWsZvIxF2cHxwliO3VQIgkJjAn1mdVY478wBwJ0jn3ciML7Xvekebxtj9Ee61oz5i+UXngJlcIpiIEiMD7ojkpJWfJGdBTyQpE+75zBSSdx8BeWNkfxHnJwUwB58vQDasIQUWJHSGUkkEJDIAMgAjRRIhIVNEMaF8tzsgAoGqGSs4hFCQEUyQyQ1MVUJj5w7MJiOaWdwcAKoZAs6Rz07k2qxjIyAaKwIyUS5rn+cw8K1tzae38AYiYiFUQyauKSpnlMUQytXktUGUe+2hqCMAzHgAM9p16AIiz6DYrXoYza2lOocm874k9oprO9+M9mno+F5Bp726aKVQz/jJnQADief5L5FWS7bcrM1FIoTTeL2NztdseHRxe5sTsNzu5PN8cn6zEMiHqzIRGmv/CjUGqlITIpvOXNTUgcIisqKoy45pUlRCI51YWVFMVJUAkQkPTrCrsHIABKIKf28f24UlVZhJRIhIQA0X0krNzHmdelTlTNTFQlJKYyfZgrvn/MiGKFKR5t5JKYWJAN+cERTOTR3I2E7J0tuOpmSCAGRHSHH+EmZ9PDlStFDVxgWfEDaIDQBUxsBme5djh/hpIgAQqczkbAuC8aIdoCI6cqiKTI1AEMPX8+7HWpiBT0kkdBgBxAN4hImYrxgwk5ApRiSFiGbabPlZOsUy5J1t4rFCZxIZBiIloYhRRyQYmyo5EDU2JjRxlEQcOBEXUkaJzdXVQRwpYmthyCRWsXr/7uQcHx8PuzBAc42boAMlIq4CHR6uuHyTb5XQ55l2s6/PthCCHy2MZUjflfhhj5UA60StXeFUtven11e6Fhw8urh7r0PuDRgFef+X2b/3az//IFz5D2JIngIKqPrZf/vJXHDa/+I/+2/o4VzVisS71VeQhYyf41nvPoqG7vvz0G/fffDM92uya5WGzWn7w/qOUx1de+zRm+fbvvjlouXXnREnzkBl5d33+k597o193/+Q7v1u1hA5KLuzLMPXDVDCzNyZEMRhzsSK+AiBdti3FRbE6Vu0wQcoGZVN0S5g9o2MgSKVANhE1UUVByNBPG2iKsVWAiFiQwMgAielgGa53ayqWRyEXKg7kg/P0lc9/fiW6fnTWHjbLdvnCK6989be+9pU/8JXbt9rd1TDtkudWrer77MgfLe88fXoGZAEpjVNOUlXLomqK5B2bfPG11771m7/4YBXi4a0f/+l/c/3JW2+997sPD5d9v0UOhweHqYwCHkIgrdMIjgmMY1OnvQM3lhzLmmDoj24tSpKk5z31wHecOzJzAorBeWwqv0qVk3xpOA7jU7SRgQ0LUQWASScFERU3S/uOfLOEbATi1IGts2x0fNL4hvGEeDGfBcvKHber9XZXV2HaScUuAC6ODz79qeP3Pni0G8bFMtRLNwxyvRYzHroxRh8WdTeKr4MO2Yy//ubjJPnewxefnl1+6cuf2k3j1Te3ow73Xj3eXI04YWHWUUtKvvU4lB6GMo1t8KZ69smlwfLk3u3adUPefudbXztWtN/86PE7XdBWCcGRqE6CpobIdah22xSadhtwu2zz6l4GQiQHGYcr6y9xGqv6hFYPHimQxpp1cfsuN95w4drj7aO3lh6mzWXKyYXIaJTOaPOJSAZPE/pw8AICWPeOTlfd7nvZBajYrCp1ezWK1tjoGK+eng62NBcB0zhgxWbZkACZyYshKJjonGBSsIyAJ4fu9ZfW43BaLfv15SossSuRnGtCqEOyTMm4arFeZrNYt6zFewbmLJCBDNl5n2DMQw4x5jQ8eHjrydWuABSk2YAPZGAyTSWAL0DT2C2XjUrOYx8cmErrIxupShOCmaY8qwlWkM27fujHLMWwXi6vhyQOUjGPnDK1Jy/85X/6Nzewo7hcj4WCIbthLJ9cXdR1M47idj1NI+Vp7GInylU8Q7xcf/jjt17+1W//+g+89Fki9XWMC7HdQFK+9dvfe+udd1988OoHj55pKQ4rJicwDGkQyC7UF+vNNKSaMF8nU98cLMmxSoJJiwCxv9z2AmWz3fkqHi7bNIyq5eTo6Nn11eHJ4Xa7U9NhN6mPXTdVVRyUJJHrFOrmlTfe4AqePH7cxv0toRRBVeeZmEQN2RnM5REA8zOwoZQ9VpEYkhiiI++nNBJ7A7vaXjMRM/VdWrS3q0BDt7PUkykYhkjtsj27Pif0yDymaV5qPzxYvX/2eEwZmE3EzFxV9WUYykDoeR/yNzETFbDsKIKnAiX4RU7imbs0uuABGFCYmYnHImIagy+SmNhEcxZwLuWMCATM7AEAMnr0oFMVAlhm0ypUANo2cTdN42V3cvdOsvRoc7a6c9ptd4LEMZR+GzAAGKJUEe4eHyIIsnmvkJK3xd3TWx9fv7MdnsZAYJyTePY5peBbJDeBVu0Rsyvs2dUH7a3Tgxq0K/1a+h2C9mliIkV1+1VYMqVSiiEUSbkUU3d5MTxc3r17vNhOYx4NCp1vd44hOJIsuKdqEAEKqEc3ls6Ty7kzVAFWEANhYjMLXMFNG8icOCHkLBPM2F8kQ1AszE4VY73abq9mV3UpyfnaUyADQKt800+9p+g4dNPWOQ9IBppUiL3YfiJwA3L0Y954V3kXHHE2VVUtqqpVFQ8PD0vJ3Tg0VXTeTzmZghi4ANWiiszsvJjMjVHGmAtcnG9DW//4p77wq7/1NR+5sBmQ5GyqhJZzXxtTSm0d1kmRpmJGngkdGqBzdNMDyIBWDGQyUU7ChDmxmgPvvIM8CHPIkIxZoSBraDksoiMIsSldPt9tt2m896kv3Xp4X2TrODHosEk45aY+1Aryrh913dxdJMkX588OSl0zOFddvHv28XvvSJL61q1uvfHR44zYNDOTwIYqm+359fX1anGHFmE3jsgYmFEl56Eft6vTg6N7J7u+W60W0zBoGkX02ccfbc43D199+PWv/tY0pcD+D/yRnwuLxZSHcadYNBPodiTP4IirVlUM2KbEMEFJMiUVo7CA0EzGuaCi90bcX7716//s6UdvTkOu2jb3fD0NglgxrfuO11JZWLQtOvL1gnxEIDPwwGAJMIc6/Nrb//Kr735vIpRiqHqTMbK5/wWRQObrDD2vJiKg1Lq/9E//9ms/8MXTSRs3xMrVy0OoVhYXkNaatTWEsUyhvv3Sj26pvt6+fUDreukBJudrMtbNk9tt/R/8mX/vP/5rfzU6ZTWHxgxqMpYUMND8pM5WEM3ART65f7K6f7c6PNKcLj78YPrgKRZEMSVDQnWYc1YTAE1JgKmYiqAUMRNGN9NDiMl7xAyOCUHMsApspkgU28bYfO3ZefQxNAsVT1CBuCzSSZFYiSN2rmHkYZD1hsd+nJIqJM1KTIBDKsxhSqWYMjtgRzmTGoxKsVaRsagNCIZ5HNPT83C60kXkxcpCDE2NSKJw684p/Pw/BwCRpEbO8b4u3kC1lFxEYCow5dIP/TAmEzRRmKegpqimoEzA89lOpCDAhIogZOAMXDYB6LebJwQ5VFXVrGK7IOAZTnyjDd3MfPdGkxsEtd14iL7vkHj+5/vs7Zv0yX6KA99/DbAv4Nq7Vua1kBv94fdvC26Sajc0IrvZ3I3R6UZOmH23NnMUQYGnZEe37uuwudhcjakjhki+ChFKBi2ICojRRzBlIkIjJmL0LoDjWAXvg5TikdUACD3xnCKZI75EnIsQEzvP7Gf4MSLbfJRM2Xlgn0UcVbG543ztAyOA9+yYFRAIiBkI1dQRIoIK3737BmU+/+gbBlLFKKb9lMSz886R76ctUVm1R6LZZmuqFPbUtsuplK67dlS1y5WWPmWwq4v26CTz0a5AZodQmNBuwDezv6VoERUxZVBG2JOX58SZ6T6gtm/H29umzJT2aCFTsLnP7ubYWNFiKioiOZeUS85AmSQBEhjDPipVDPmmKOsG+wswp9tprw3S82Nrsy/VTMzYeQBUBUNDIhOFWQKZjW9EJmWGB93oXWgquB8vc5M6GxLMeiiRSt5rR/sI3FwlhnOhlqqq2Vx8B4Bz49Us38z3dkJSkecXSABAwtldJZoBUOcjRTSLNUhIRKo6280QickVSchsKqZmtB/Yopk5IqJJAQIRmWtAikyGwH72uYBZQTMVZfaz5CoyI3W0aEJDRFIAUfHO4Q2ACdHmdpQ5czefbEgOEKWYCGTBql4W3XIES4m8XG2359eXh8f1npKJNidtxRTNZkGKyc0OYjAxMyZgdlIKkjcDNXFu7g5DUbE9fggRmRzvB9isbcF8NBGRVYWIiyqAeXZzntRM5lIVx872MBlAwFISIwHjjOJm5xBJVZCRyIFlAC3729yMdkIEwjk2NytTM146JwAzY0A0LWBENEusBqaIJCX5WAFZkcKMyEjABiCiM7kciZAcqjJyloRoe8YWmEjZa0RARrRXY00BrUieuWYCso/l7t12N1JRCA5BQ7F+MCNHAcmXYuKqIJOoTeyNQBCQuIrLNnoRKLWKWpWzpSERUnAxGU6TAhYFABc8x5QGcs5MYd4bgmQul1zAyICQi8Iu28nhgWIV4sEPfeknv/jiF6f81FeOONar+snFo7vHtzVLSYQUAdeOfeJ0fNpoCFZwfXFeUy4pe++jrpaL5uxqg3j/sy+89N23f5Vc7S2tN7tSysnpUT/0aQCsRuT+13/l53/q3/gfSxqVwFS0mA/hMz/4uXUZv/OtX8wXnDbrtJvapg6+dGOCZdut80dPt93w1k9+5Svdr3/t2cVVODzVEN979Hi97b70o1+yiN96+53HHz2p7sHxwQrJpSQ+uD/5x3/2akrffPK92TgsRbxSNu6LuBDa40PfDWPJw3Zqg4urSgRVfNPeccVVVT1mqqvNbvPBOD1zRIjFORAwBB6nAkakiOKy0EicNUPxAdGpGkFJhYnTZJ6rgokR0Wo0cWn8E//6zx7Vq052h6vqs599YxjyP/mtX5edNC5ePF7Xtbt7a+V9JYQ+wG59fXb12HlljuyZCjZNFNMQKkTYdNfZ8e2HD3/q3sO03lE1nDx48fiFU3/62ju//hsxL/LFbrMd4mq1rP0w5aNFDLHZXl+bJdeK4+K5VoXgvaSsY8r63dODe/7o3kdX3016MQx9pxVzxVA37YHzkQpoNNGk5NN4OaVr0bWjHonBvAiqTsmc6QyVK6lkc6VMIwMRVeQk57WobfPFfBb0u3HsdEw2aFodH2Ri58N6zF/9zlltuDxeoOUFWjclIsQ5xZ1tSq67yMjABKTu4gLPn463W+iv4zplcWIgPrp7d0/H7knRcbPZIeuf+Lkv/8Zvfq1dVcHH0Rid76zw2l9ut0rt0Z3Vs7Pta/deX/rp6dd/J6xrcZhSIgxUVVnEpASifijEzQ542za7ahWcr9j1w+DJqHTj1ZkzmTqpmzt1VYFTz3B9tTtsDsLiJGGoX/08q2HzIXYXi2p5fbG2aXIoWWTsegyL5s6rY/6GpqtcClPU5LU+rG/dHykKQvCQPn7nVol1QiuiCMETIRgUMFdVVS6qZRIxEomBJRvVLFWID1YT5eAJyGLVom/Z45QToIAm9j73OcQGuGIUSYJapr47OriVESKzI3Le1VU9DiXWfD3Zol4cLdrLjWQztaJmDOrmGlOFUaU6rpJadoApscWiAmSIDAIMKPsENxVUkUwO8/5BwpJ27Z3GdniwXGyfXl1vB4cwIrlY+5Xv86RTGlIhUMeUKZ8+WHzw5Dt/8HM//Pa7H6IVyfny46fOMY755YdLL/xa9Up6Sv/1X/2v/+Sf+LHj6vbR4ug/+o//wx/7ia9cXrxX1Zx6tSIzaBAc1E2zG7bJJmVNApJUgbpSwJkC+Kpy5HfnXQEiV1e1yyBDd+GJfI2JptVxu+7Wsa6TOBTx7aL2RM2y6/vK1evLhMG7SJfry123nq72QF/vKE8yh/PBBFXmApo5xg+AqsZEBoKUS9pOo/HiEGNkLHma1/FkM+ycwyRytnl8787J3Yd3H3/wfqyaMYnz9dHx0cVb35Nilso4TAA0ZbvbHl9fP/Po0HEakqqSUZoKIoNaIDeUrEBAIAZsDIAAzrErpTCSGgTyJiAo6BwZpZIQQcQUuEhh9IjOs84TUATJU0Fg5xyUAlljbEFRARm9oBry9W44WBwsmmV3uZUZaYLMxCzJsp4cnoz9NGzPStDj5dIjC/FkqVmejBfTrVuvYfCbq44cpFFRJfqAWRz5pmlVAEPF9aIA96b3jk7unxxXuivTFqF0UIoIijJ7b2BCqtkUiFi0qEiWosxpOy4EXrh1bDyIahXrdZcBuWjyQATEngEs5xEIipgjF31DhkXFyJPjUgqiJ4oiRVUUMxKT8wxuKqMyMxkYETJgbTQvMBIZYSlgE7ADrJ07JgAHSghZVcz5cKiqgkYhzuMGSEQyc/CEuSQiRI5z2AYpKPishYlEzWB0LqCSISyqBR7J+PgTUZmslJwXLYdQ9SJYZjcSYEnLplExZodKfafbfpcz/vDnPvfR048fr7fFLIuF4LPpdTf4ZcMe+zwJSBFRzKKSS1EteczP19CIYF5otSK5pLEUxQqkENCi8cHxOGUFNV+KHwsII1tOnrn2vl14oOUbn/7Cj/zUH0z9U7Ht4UHUvi/bYqOthXfriwLU3rtb1FTt+uJqe3E+PHl8ffb02SePTu49iFX11td/g9iY/OLwYMrZN3Wa8rTbTVO3u75+7Ys/eHj/JFtCUI+hjPnxhx+VnAJV3vnzx8+Yqfe7frebpmF5cOvohVde+sJqffHk5c99yRE7cEg6DlexPQCVdtEMm7XCpIIcgvNACAZi4zj1GzRSNQ4xtgcZspbCro6a+k/e+c1/+jfGZ+ckSVwYhq4/M88tEpcqt7xqqvagPaTAXDUWIhqDgJEZ4jj06PyjzUd/85//g0ymRVAUkABRTVWVHd3EjmxPHjEDQCkioBzCZen+s7/+l/7DP/Hn3NDHZaWYSYjH1D16UnqLq3BQ+SeT4rKKL336yXevxqsLiN3hMZiWqm0EsfT9D7xy73/3P/2Tf+Gv/w0nPpaZd0Ls0NgQIOeiAABYDBEwA/NqqbHCuq5eeLDImi53DsgI1bNvq7Kdhou1oXpPAFBE5iVA0zn4YIgCxAQQKxcdIaN69ge1Q2EfcNEYM3tGQBEUBVaUbixKY5nIES8a7wD6XemmdHU5bdaglgWmLEmzCwE0I1fKrIwIzhANjTB5UykiYKMqEtS1M6OiJSXpzw0Gr12yqo4HDQtCymnYN6D1w5aBkZ2b+6iRVHJOUxqnYbsbu03udjKNaiwzpZhoXqWZfQVMSIAgiggKCmpQWFSRRVUBwTma+u1ue9EtL5fLU1/5OZBzYz75PoBozyS6CYDtmSbf13TwJvx18655/n+jMXzfpHTjNPq9CtPeSwLPf8/v/WPP3UN4s+m9SnUjYe23a4BIc6mRAIrqkw/fR+mnfj0Oo2NfVQtSQZJJkuQSQyBHTESAiEZIzrvgnGOniM55IkMHjrmAUYiBnZQRTdg5M0WE4GNJaqYMVFSYidnN1GRDLgqibFw1yzsclkjeEbNjx57ICJC8L2AG6rxDAlQALejx9sufSTmvn7zFLCNgcFKphsAlJRNLSaugIbjI3sS0CBOSw9b5aeiSZPCOoi9cIle2u1iF7P3Rlc4ngs319qY6Y6GRvAAbMpAzQDBEQ0dOZ6aRmc2XjH1CbL4I3Gh69txNZnM2exZjAcxMVEXmjnARlgKsJjBbJwAA9jlX3C9ZzPoa6H7AqSIayA3DFPfg6nk4iWbCoJZRZEZhi8gscJmgATtGEQGYiTyFHROhiBA6BUDD2TREqKaFeU6iGaETESBD4HklGJHQCuLeTWWmqnl2Qc39bmJK+/2iCG7+4iJ7W9vcRoJEOL9AUi0i+9OCiAFJVcTyfHLM0jwhGioAEQQw3KOeRIlo3ysHYFrISAFFi2OHSDYXvZshgWN3o/Yp0exPnHNeOLuOaEYjKSCyibDz87FUEzBzjg1dXVdVYCZu43LX98FpKmmypCLsWUGgGDtvZFLGwNEE1AqoqYljRkQpclMzRoQIxHOWbh46CDR/kpuMF5ihigQf1HAuJTCT+SsTspoRsaqYCc3+g31t63ysERRVy7yXTAsQ0dz1AQoGJYugzOR+Jixis/963g4qqBVVZcezoZKYAcBUEYDJzaNvrsDbi5Xsbnx2uo9qzi5YnSlWoFIQ0fYZPZw/6j4ZAEbEYKCqJnsnmHMOTUWEyOeScB4DBP99VhHOXElnvnIM1tbko5gqOFaTJLJYtiTtojqtFqeJXO4ec95FnsCCK9Ieri6unzgHppiKUfA8w9ItR/Y63zrEyAQKGIhz3pky4jSMzgKbB6NXX3kJ+uVrpy9cXX4cWqp9KGLb9a6iJk+ipg7o2aOz6Iycnp+fHR4deK7ylE+OjgipcAm1H/NuvV43WHlo3nvv201ExlQfhbKT3dUood6u08WT3cFxPcH6r/7l/8+TS/kf/On/ERahwAg490e88PClr31toflKkhGy5QylLAOTWtvUfZ8+XG/+5a/91pc//drvfPN71921r9vV4Z0P3noHIX36s2/4l156enG9266rumorD6bDNMDV0z/zMz/+4BuHv/zOm3maAHm3Hp3zOElW60OqKyaxyIGdYdLgKzGwsZPsYsMV05S7PA2eSBFiW1cV55RB1FS889ZnVXCOt11yDpwxEy6YJ5jb9LAfkmlyzOBYMvXX5c//r/7coe3e+d67B7dP2DUfvv/JO99787U3Pv+Zn/mMpm6ddgsfwCQPvbFnC1LQsVvUrXPcTVd3b52er6+9d+PQpzS1TZ0Ui8jqsCrOT2jPPv7k/qc/+/Jrt47c4fWH3/7mv/oq+hRz9svaQ0gDMJIp1dVhIV4dtU8evbtsDoGlyMSOJK316XYV5PX2+PH5o5Q+vN5SOPy0x2UuQwwJ0RGKaGHnY32gZcilz6UwB+e8j41kSFM/jTsA0TypOEWnyllGslLS1vnCPGEd57Ng1YTD1a2nF++W5D01l73maNqVYgYLdh77DiCVSdQJHiwDKp7vdrUuC/uSRm5CP0xlxMPbJ0+fngfDqoJk+NLD0999992PPnkqqj66dlmtN9e0645j25k/PDj6ZHt22tzalfWf+tl/+z/5i//f8XxDSz7owufz67/0t//6wXSQTYqKIRYpgqAJmhCsWMXhOtNVVdvJXaJKC2AugbltFwQ91g5G0JRtd3lUH4+bC1+GlqFpD7rUjdg7wJR67zOYTmUE71zLZUAOFcfA8Wh9+R0Y3ieD5tYLw9bq5rAs20KeYPJ5gMuzu6leZWzICxbHBFhQlIHm/nIVAREicoiOWURSqPjlO3D/ZBo6LERx4Wq/mYa6Pagj516cUHQNtQ4Qp3Gsg3MATdWmuBiGFCquGPthrFcHaT0Ci4J5F3PR06OD66snDiJ5X1SIDI10EhcwQd7lXRVCqAIiqRmAUzOR2SAspjqHXoRNGZnYe++Bm8C7q41Ppamr3fmTUNfrq7E6rK2qtjn5UVMuTqGuGxnT2O2qxSK58vBTr7/50aNpzOu0bo+qNJbNk/ODZfz1X/n1+y/e4u7q49/efe6LP/XsQs6vr/7a3/tbr77x+WyJyDxiAhG1ccoKpa1oLNv19gqAY3Cro9XlswuHedxe1o07vXf09Nm5c2E3DFw1LtaOcXN9lktWcgC59OQotE07jKOk/qStlcaDhpUzoOUsQNwE//GHT66v181i0a72z7wiioCqZjALQ2owr1/NS7FoBmpIxOPYZdmQa4bd9fW2P1y2VoohrhYtOQRQ5/yzy/XV5TM+WJqZahmm7TK0jy7OdnnyzmUV9oHQuU5+9LM/8hvf/JWrkqXkJniH3vt41Z9PIiaYpSAhaDaFgA4AmbyIomkMzsz6ND6/K5NzBpLKxORi9KrF+wrmIh8FU6nYIdJYLGnKYj5UsfFakkqJVZtlVFO0uk/C06jeEUDf7eqjlWopKatKs6pSEtXCxNFXIDHrQFx4GlH5sDlYHMQ3n363zx1a0Sk5QDQHxs5Hcz6EmmJTElJwq0W4fVQ3sM3dGkHGacylpCmx8zmrGXmOKqMWBY9pGkWklFKGbDt8/f4LzGM37hz5KY9ZEhIQeFQ1zaaGZJ49AFS+ViliY+0iGSgoADERmBUdENGTz4oOXdaSbfTspKR9+evc36vmY0B1uShoIppbDT1qFptoxmka5TIWBJNMTDN3NefBhaBiYM5UCVygesi9AagkJABgQMpask7EUnTm6VsAxtDcPjrupy5LauoAmkGYkG3KJUvdLLVYmZQpjiW39cJscIBT16FWX3j4uoxvn+2ur3d9qiKg1UcHmz4jAnmfbELni4gPrqiZCyWlMpX5LBhzDkydJJJEaow49WsXncbqWnZV5YWLYfZL49rXqzBhGndXMOmdW/ey0cHxyQ9+8Yem9eMubSFdb9/T4en57uluGmBIEA7rN37yp93tOxgJ+um3/s4vv/07bz58sHrjM69+8af/4PLkOA89ADCxKZWSaBi1FOt3nFNw+NoP/MDq9E7SYql458bNNo2ZzK1Wh6Gt8pQYMMRKIR49OGIiRDf0w4RSH68Wd49MiR3kIlWsjZkRi8mceClWkNCmrgw742BpkqIuLnz0CmXMoyqwY1Z79Ju/9PWv/t3SDXGxUA27q+RoUVc+hANy1dHteyeLlY5MjgxBKwcOy5jQBWI0VBRMafoH/+off7TZTLPQa3ND9X6ZVebmX1De+x2UEBHBMZsamzKFb37w7neffPCHX/uMFMl9ykm1EiKOhxVFzw0vNQXTjfP3Xv/C5UdvnV99Ep0eHR0O2745XAFA/+zjL99f/pmf+tm/8o9/NaKhKoEwAGgxRSLWIkXEc8AMu4tduNiElbarlkB94BQdADarSgnHLAjG5MRkLkVCwFJkD5e9iVRpNgVzAahyYVnnZZMjVcF7H4rzooAATrX0GWQCVAiVMnGIaiIpwTCVq8up62y3Y7KpT4KsBt65eU6LzBoCh6hFpmnwBOMwGoP3UQGACSsPy4bUXNG66MV6IASEEUSmqUelylcp76cHfX9Vik2KHhGJHDKalCzduJ22l+P6EkoiUxPkfcwMtahjP68vFC0IxkQqKrM5Z26dBkJkBU4ZEUs3jFMa52nc7/Pp/D7bzk0U6blwdPP6xkKE8NwUZDdTot+n+cAN7xie/5dZcjJ83nT23Mr03DwEz/Ntz1Nv++3d2IoMEVRnM1ExMzNGuPzo7Xe//lWcOlQN7OtYT2VKeTKbAvu6cYCGaITABFWsAI2IQ4iOnAFGxz64OWs3B4SG3DNqdAFQkUnNTC2VbCIQUEDrqhIFkxKjQ8AiIWFdL2+FxYEIOnYEhIbzDJaYCZ1DRBQyB6KI5kMlWgTk/iufypqGq/djYGJIacLOCKCNrUKbR4UiVLGokWLSERl9jGhgE+apFMntIg5TKnp2cDwxk9LBtmBSVZF9tRyQqSIgAt/YwOZ9TWZz6aTsLwEgszRoAAjzFHo+JAoAZqKqhLTX8fZPK6aqIiJSoCSTjCUDMiDNE3ycO7/2PB69sWLNdWx6M472n2nmZtPvjarNvJibsWgiSA4AZ2OXzkgm3De+zbanm2AdIjrVYlb2iUedY2zzKEOcEeCqiLO0onvdB/dZObMbMvT+iQyBcS6mh9nBp4pAMy1on55EmO0z+7AmzAQBnVUmNESALIXmE2iPkZL9JwFEJFGdLTwKRvOXgr3NyRD3sompqhAQEpZSiJmIDU2lMOMcddsn0RTVChEjEpjOTV6zuoeEgAyIqtDElgyIqWGGImC43XXsnEgGns1TBQyYnKICERMT88yFKiXPhCciJkApeS4iNIQZLE3EOSXvPcBszOFZVTMA1QKIyKwCuP+JAhjMPrJSkBG+L40hEcG+k88AjJhRDYFm4xI7T25Ps56Pk0jxvipa8EbM1nlAz6YfVSmCRIBoYsReVdUEEQl5n4QzBdAZhESIpiaqYEaOiWYCNwkmNQSCosJEQCgiCIDEuCeC2X6EEt0IXvOHKbOFSREAkf97AbRm6ctEu2lHTI1Hoh1aDj6mNC58KBQCe4TFIj5YLR9kxIlJhvUu9+Sbo+PTVWwv1o/LVBRZwUydY1TICIjIhiCinoMYMoMBAYjzDkQXbVRjZ+H6yfp3Hv/u//7P/0eLZZOXaEUPloePn50fHNXjKLtxgDyuFoeFuVrUF9e75cGxaum3QxnzIkZzNG6Kd0hG5CQNCYVUdouVG6dx/eh9Ubxab8+ur6/Pnj18cM9VeLXWOy/c/i//4v/jy5/7kZcePjBTdgEJym535/jBq6+/8c7vnLnVajeMKU2qyVPd7yazKdSL4I/fv9jhOx/8+E9+7r/75X+RBjJw1eHti6v8ja+9/cbrt1++f2vdTU8fn905vc2ARFUq6kD/yFd+ctDqX333t8aS0XS3yTqAuZzGKz2o2mWcD6YZhXCQFAQGdhVZt+v7Jlrvx6zjwcHSewDNbeXnVTY0QVfMFJk8K3hGoq4fATMDcmA0QEYBb1xvp90y8B/9Qz+dzp9++/yd1194+fTk3nffem+X+pdf+dTJrbu7zTjt1lyEgZaHy2GcuvVIIG1Vc/BFDIqq2HV/jY5NZoujInsGcKlMu37Yjj5UgcqwOVsc3eWT6pUHf6ibLr/75rd2T59Jt7lz72FYrTTp6vhImUcZu01YxTvDcCWWGSX66BAN4PLsXSA5Xi62V+cPXT0O38p81fNqsMbBAmUt0nuPedqpZBNxfsnxlCkCxtgeUEgY1ia9jINlmYYdWI6eQa0Ofre7cJQ07ZP5JsPjR2/+xBfuvf14d73W2lBSJicPj5dPrzcnx6+cXbzPCuAQFHa7wYCz9m083aZN5fnkkPpOzp5cUbEU4TjGH/vJ17vLq1/87d998d7hkC0PAioMdhDq977zwfHR6vHFGUN+cHf15PH5C7dXaa3sqs0aXp3CZ28tfvGv/NXFeDop9dNIwddVGHLPkj05FMy5DHU1rI679hDEwZRqJC0DYpngurZUphSdd86PZx+7/hkOW9YM3HSX4+Frb1xa1nFcf/zuvZNjQzf2PdSn1d3T3aP3+k+eLk4qPDxyWiHezf0uJTXIateYxmE7uKrR3cUyw9Lq2qCk0TsGFCJ15CtqU97NkriqRiJTLn2hptLjYzs+LoVqV+36yeLCR7gmhUWcyhCr5ZTEtHAVDSDECDI5KNM0DXm3PDxN0+gBduNI3sAZSM6KPsZpGBaretXW18kmQzQ2KYYSA8eK6rb5wa98/hd/5Z/fPnzxavtsYQsQpKoeSyIKjgNjMpsQoK6rbiolZRZDkCJW+wVOnJHHPHKdmxXm0ueumJo5PD49XV9c1nVUUhbbPru88tIea85TbP3Z2ePD+w9jQ3kcBpiena13z66az07NQfP4vX/x+qd/7q/+rX909cGjL/xrn3n7vW9UoSrF2hh2kpHMinlXbYYLYQjMRJSHHF2MPm6vNlJ043a2GwoXj+iBx343eGbldtkOCUoSy2KV3w7SrJYPDurLT86Cj4vgP3ly4cJiuy2u5hdfPem6q8Vxk3bpk4/Pb6Si4pwnJgRih3NUHqCAGSEhoCDMqw0+UMrTcXurv7zyY+LAOY1QXAgx0kDkGfOyDoHDuEvN4qjv+8PV6W69FtKWm12/teiVXZrKq6+8dLwKwVG9aOs6pH6CLKGqnq5LMSNCk+yp8c7NfQ8AUiwBEFNEw1ySmiAhAAMzgImKo0iAYylNqNS0qKqJi84k51QILKsiQh0bASYi8iw4gqNJptoFLkjqm4Nbm815RdSnnC7PF8sVsZ/S1kmjCIU1NrUjchVuZDxZvTTsJsLm3qc/++4nbw7SFTNXLLqIQA6c1lovWk+uJPDgAV1bL27dPwhlK9Pk0TLibjc4z+QYBMSKaHHMc/vx/MgiiCq2vhg/ffuNKljSrSdnguvtOoGYC45BizJXTFZ0ApsNOC7rRCGOZrNTCEyKqiNEUlWVfWOuESuoMDkzEBMFJSICVGMC6mWMTVusxKpad+tVbNR5yclHL6qoBvunQwAzNERSQCWHpWSHESGMNCUTI3DMKaMj8EzTNBUT5xvVrEZoaAJFi3du2SxFpd9uWSTWVbdZQ1UQF65pFAjUxm6IFYuO00SKwkigtN7sHONLd+7eOVpedbvNmPtuS91QdqMAVItqvN6mcQLEMsJQVG0XGLHc9ACiIkPSUtdcclIxdWXCcukwHrvJZ3ZU1exXhJIJRKesSUPxTx49iSG8eO9LNKRnz97Kaai4cCqSncYFRf/wpTcOX35xcee2onpCY/elL3350y9/9o0vvkixjCUjkV8uvIuEmMdMzpmh5cRomqcxdX51AHVTUgnt0iGT7aoaD05POQaDnFOplisroIi+9mkYpnFAj84B+AaZ11frhmoZs0x9vWR2PAw7Tf2SV6xZxJRA02SoQKFeHYiKeJAMPiAVLJv1t//lr373a/+MMSnY48eXvlC0g5RguTg8vP9KWC3AYpFkrgCbCjFUROiril3NDqfrZ3nqP7748Ne+83Uh0lTMVOeGJ9gDVueHaJmHkCkzmynBHBFAQgMyqNzf/6V/+NnDOw0v/SLGajUB4eo4tvU0dVMZDUYYuspcDs3B7c+//c3HmHpUbpZxuNbQLMhRusx/5Ed+6HLsfvHXfmPJETWrmhRhcrPngBkJlZBS11+++9HB/WPpIoxjFV1e1MEHcFhAF02YrrsAkBBURYqAmYmB0RyXYAYgQ0J25JkWTRuOVteHoT5ceGVKWrErY7ZholHiyKCURNCUKgIVlCybTVnvsOtBBTJzdBwDAGgu5AiiQ0euXgrXWQVMaaaYmBEGMyDVbGLidkmr4HPqh91EIl7BQ0zFOHj0VAc/bxYALp6+B4BTkhmpM4szOclU+qE7LzlpEVTwQIjuBoMS0LyozPNpBVFCUJB91ZERgZo6BAIGZHaV87XzNc5Gvn1SDGeh6PfpRDcSEXz/9Q2ceG4Jwn1o6PmUfo9Dfl5mb3sq0Y2gZN8Xgn7P1m7eid+XmH6/6mTPbbawL3iapzkqompM/PjDt776z/9xToOqiqgLPkuahp6ZKt84pikP3nMMAbUEAu89IDrnAEiIPDM6nhnDjtmBKybIrpSUJGfJznuPvM3bGOpQV+y9ijhmEWVXielgJBbr9tTXLTquY3DsnWNQc84DelEEgPmXzPYd1bkE0Ctk1/oXP/eFt3+766YrxwBFbTeGUGc2xFyyEvtR1BEqQogxS0ppdIyVV6cygp926JiT8WazicuFBs9czzFEMIIy38v2UgxYARAABtrXosHNxHtvJ8Q9oXg2eO0bygwBiumMw7upt58n7TKXm0sp2YghF6RChEZoakj8fTOY3pTlGdDsyjCxm6r65yPNYOZ0CRCbKZEhsqrSHIibbRtEZIzApvMjEhrqfpzMxp/5t7MCzplG2ift9mBBYWJVg9lIYoKARF5VDBWek2uMVexm7W4/zBFQZmq4IiIjoGqZV/XmuiswMJm/CgHNcHEzAEYPQFoyMxOhipiKzY3p7NGUacYG7OU821cAzvAiVFMtMzSHiBBnAUX2oT5mFhVHbiYlKSoSzxh5x5VIVstIrJJAgUNgQlERMPLUb7anJyeKbzsGZlJFJbcZS9sututLdpxNAAyVHIeSByQ/h2D3e4QcAc5YNbwBl+m+HmE+s4yIAczACGcU20zxFkSGWXxEZwZFiiOcb0CSldhLEURDdLNvC+ZcISKzmyFZAKaQEREdz9tk9mgIWgCZeE5as0mZg294o3VrToBIxKKCyGagKmYwV9YjMAKUPHpfE2LJEwET+/2D1lythyB7zB8yOVUTFTGbXULzNcv2/l2YbZtEJAqqxsxMIFqI3CzTaCk3rrvnriIw5+HwkAEBRNh5hFoxkCe0YpYMwrJpjg5uO0/T9lKmgY2b9ijZEpqjs93jLhUfHJgGIAroCBXJ0Sx+A4ChiWdUm9OnBppAwXEYh+SCy1v5X//7/9tFhc8uni2XFVN1drk1DEguhKg6LQ+Px6nU7WKX+8vu+nRxoArsK1AgT4p2dOv08upcVOvWKej57nrVwsXZVel2wXw8XXz74/OxaCfZnL+86NdD8Qv3pS/d/1t/+//1f/g//t/ALKUpBGqPV7s+37p18l0xyYbMdWyzOjQepp2iMUQKAVzz5ocfm/ZffPjChx+cX20umoNVDcfX6+3X3nxyenL4wsN7fb9NunOhbRfVsNkVzbzd/LEv/+Cdxcl/8fP/rQMk80IigCUbbCVJauuA7CfJOlwjQlPXZNLtLkwxldH71NYL77EJJFmg8iqMSpCIEI/vL9bX2zHLAJIMofEmEhgh29SPlJ1O6Bt2WX7iB7/4Aw8f9sNVXR/fPn0FafJBf/RLP3h0eJqG7MiUqxDrGJvzi3MBDIHrZRChWMVpmqa+Q8SUhV1DrCn1bXWUcioGpliKed94h9O0O/v27/hPbT/3mU//2i+/c+ulN1747Cu/8Df+Bjt33e3u3zm4fby82F2evPhanqrddhOdS5DJgMDnJHGxSEm6bc8EOaUlnbahfHL5/rA7f5rq5oXPuNBF7cvYpS6NU29kedo2rR0cvBT4mI0BsjkBbghyjmnsE8gllquSnpXS9dPGVE37PO1104ev3knrR3dP8GLj0S8//mjHkO6eqtq1M3vn3Q/6aWgcvPTi3WcfX02pVNGappIsq8Plptt1o6RJnF/sNpO0rVuu+O6rt5ef3PneN/gW/u77u5LUkm4uu4cPjzHgyEF9dbEZf/hnvrgZv6sH4cEPv9z+yknFt6/fe+fs/Ilu4g4zElrgAlZUAX3lIU2Y1eXjo8fRj81RPFjqtANOPvrUj44dh+VYLL54C6imkvGTDz2pa710u2kLJGW6WHd5Ojw+uH//M+n6MfhqefeVrblnA8Tmdntbgde79SO/uF9s6ZtDg8ncmNNW027hPRfjQY9d63cDmXriJBnQHBojjHnSIorFGGvnAFANxAe6e5Ie3nGHy92Ti9Pbt6rmcHn44uXwbLkMrIXAlkfLbNYu23HMzgczySU70CmNu93u+J4HR3nqmrYlzSkPq4ODbd+VVAhdtvH0dHX14QVwXVW1ychAjlyRlLrpw/fP7pw8SLv+YNkQcBpHmJDIUDMUAFNCBwkNwRdBpLquUt/7KmTVISWbBJk3RdmUgGSYABAdb9O1Sh77DZsF5xYLXymmy7UaDAgH7Wpz1juknG3nwMVmMHr3/fOHp69cPvro4//m73/7/e8eLqpn5287R0RRtLCCWDHLdcBhd6lSgnfLajl147CbikyTswyWB6kUm8OVZPUQN0M6Pmq311dA4B2AuvWYXr37+odnH5RgaZTVi8efvPvBAYUyausb39A4qafEMuRh++DBrYuC8cZbl3JB4BB8LgKIIsVMAAFotrMiEaU0qHZp2BwdnTqMq4PbGa96S8osYLuxv+rO2/oQADmEJ5fd/ZP7RGZlwCIMVCQVA2LGGCYXdnl48PDu+eXHh21bu/bw8Nb1s2cVw5PzZykN7DlPhYwYmcEp5Fym6L3IxL7m4ApaUhGzyodhnMzUgFIZKm4JKSKJFkOcckYEYypFiih7LTZ4FxwSI0maXHCAstlcHh4e1jFsLoeh6wPcOVyF3fm16cCh2YxdE2t13NTVxXY95Ryd0zKScw3FvF374l/+7Je/+fity+vHTcSUihYlqsyImISBfK2KVVuJgfP+3unSySDjIAgplankEKtRE5CL5Kdx9BxMsyFMaUKwNE3ZbNx1t1f3jldxSJeg6tiNOQuTZ86iMwrCVIHQc51L712lKsxI7EsenYueK9MEljxTlgJIBmgICiSSDRG1mJkLTU49GbFjFUKsQAtjsGLI5ojBsqIiabYCzGqqkr2LaALk+inx/gE/AyuqiGVCyzLgjUlEDUUKMwkUACUAhywqyBCdQ7RltfQuXG+7MU/GhuwTik7ZfAGfg2HwschYN/UM1GIHVqz1VcrFDFS1Ajo4OVg8vEPk63q56Xogn1/IwFKSXa+3VFUqyanE6H77q78KAIrYl0yegUWskErCrV/U1uT6wHsfADFET14pYOr61GXqeLcdF8vDn/jRP/6Fz/7keHmlmOrD5epgZVnPPnqGtVucnpx+6lPY1AUV+5S0RB9e/9yLYGqkuZhnAnQGpgJiYMTGrqTimETLmIX8AqlWIXYewXLJrnZFAD1PZWBHronGAEwAOpUMWRi5blqB1Pc9eTg8OMwpu9AEYucqQ8uF2ua2oKYyeT9PThyFqgB1JasWQnYuYC5Pvvv2+9/8+ofvfa/barVYaN+XznyoeHF0eOveraM7oA5VAwIJknOuCsRVRiAuQGI25j5rygb6j7/+a0/LWGYa6mxXBNB5smZmZkRzHcJMPFVCnFuCiNjMtJTgwwfXZ289efeLr36BI05lVB8TTJiTlRHIeXOgzkRt3U8jHJx+9sOzb3I7vVBDQ34Yu+WyzWWC66f/zo/9wHSx+xdvfnfF4AiYQMGKajYTAJmTJglxO+weX9RtJEYO7IJzwReENGUQ00l1KgwgCALK7BgZFUCNHDinkxbXhrrigECkKU11s8TA3sgj5t2km8FGHbcDKDkfyHsKUa1YSdZ1ab1O66ECi97Xq1hAURQMnPOuchZYo49xCYpl0qmU6AIYCFhhMssug4JR1rRLgoVKsUlBVTSR9XzYhGULVUXsFzf3gm77SNI0TVnKpADsowiaOVGQUkyawHe8ZS3FTB0ysiNuVSGjmiVVMzVBMVMFQ2QjUAOUguzIIFBswtGyudu0t9lVc/vU3tTzPGq2n6DcdN3v8UT2XEnCPdva9lIPPpeUbpwM8HviZzduFEBEu4kj7YWJvRNjb/f4PvRoL1LAvqnqOUxpxtXOthQ0VVVjpI/f/u4v/ZO/2+/WxHR1tVUVdpLS5L1rq8qkGIiPMXqqYwWSSBURmTg4b0gZNba1AyCAXLIaSEkFtQ4RQLSUKngjZaMQeFEvgUghx+BySqrArkJyKctqdS82S1d5MPV+ZqjzjG+ZfVRzYG2fsFMgDHNdVxN8KYrGr778xXd+9zdp3BwvMQRmB5mU0JJmVsQsCBJjhcSWgZQIyKCsVhVmd/50E70uD1vyOk29ksMqOs8lYykCjAQgykQBAG+A03PpoeIc5TNFBFU15HkQzJP/vaRnCjjHcnCebDPz3utCbEiGaIizbVykoAja3o08y1F2Q8UyM53dbjNFew6mARncLBvsJ62znwgY3ezCMDNgtNldAjZbjAEMgVSKzZCavXyJBqQAgKCSgXCGkBjAbPYxnTHIM+x6HzjTWbMBo1k3171gdKNfIQIrKIjMLV+Ic8mZFjEiQEJVRZgPNzGxaAIE0QKme+GJ9kPa1OwmxTl7vhAR0USzqe4NQXM97jyfN1UxIEJmlTI3the1/ZZn3Y4ZiioYECLg/E8iee6Am08qQJxzYLP3ynu2LLGudpfbk5MVSlqGQO3SEJPpesx9SvOjzXwFUBNURXaArhSxIswEKjRTeNSkZHZ7ejw5V0py7AwslZGIiX2RjERgICK4FxsBmUTSrNEwoYECMepzhj7NcxoiMCKRomZzWbABmBoxqSTn/CxFIbJKIWQzvYmCmUiezWREqEUMCMD0xnxqyMiecKYpzcUHqlbmckYFBUKHIecCJkQM+zGj+1CagaoBmwEqGCOqKe5J5zLX2xFyKQkAzFBmzDwC7I12Mg/jWW38fVJR5V1KfRVMMVsxVzU5O5lMkL1X51yMkVAmWhdqwKUyTgXFORXZXV59I+3WTRvEDAHrdpWL6tg7dM6TlAKA7PHmZJsB2zTjrCbRdtmOW/2f/bl//+TeSS6yWq0qV2mm0KbduOs6KNlSSU0TJUk/XLeHq7vHp5ALYyTyD167fX7+NIu/uLzOJs1i6XBc1UZmFZGmSgrtCP+rv/XPBOEzr91rFgebyykVuhy7KhNNcXf55n/3N//Kn/hTf5YjiajkFALdWh1ur67L1KH363FHqG1TVeBVkKUQACou6tVHT9YhVrdOG73aDpvdzk2u9ptd6p5cJEwv3D1IKZ09PfeIVRXa+vDq6XZxiD/5lS+cDdu//4u/UFWRQMQUFaxonoo4NrBCCh6DjpIHYW5Xi6m4SRYnd+5Npfe6U91lG1FAdTautk11yEYxSoEOigCYafGIIQZhCRgQGw5umMq/83N/6jN3X/re9771xqdeDjF+9/3vrQ7r+y/ce/WVh9vrAYNF7w7q47ZdPjo7e/ut733+cz8UonPBl650652LFbNvm/p6PXT9NpVc+wPJmqQUBRcI1IEYoFrJMqSPvvlt6dKP/8RP/eo//PlvfONfvfKFT3309Ue1lO995x3+AW4P62FzFcCBB8aJfFNXbb+9AIu7TQ/Bo2MEksKaPYC+cOvFJ5snU17vPuw68+H2bccRWBBLKYWdkzLK8ARqx+EuUsVYnMacejVHoYorxXFKfZ1LWxSKjk599PV+dSOnPJT33929/W7KMWiaDg+qz37qlY/f/O5ysRzSRJiKyfnldhIb80ihCS5eX+8UJVZ1mkL0VW762/cXWWC96f/ez//qH/uDb7zy+tG/+N2PkJabJGF5xIFKio+edCevhCqcXJw/++idTw7q1fvn6//y53/h7oNXKrf6+Jd/A85HF/1uu3PBKZKxy2msGUsx0XBBMR8f42HLo5Y0EAhF3pYc66WZipVcrL51OmYuGo8+82m4frJ79qg6PXV3Wlct1hdX7Uld1XHKfYJiiWKsvUev4MfVcP2xjUnZ4ySQQNtVuHXEdjn221Qkjjsa9ARW1QQeIJqZFEfKTCpaJEHJokoe0XBKJfpQyPDWEbzy4i7CkXeLxSFg1S7irvtkdXKwFtCt0eQ8u3U3hoMDoiJTIS9TKuSdSdscfLG3Q4fbNFwhShUcc2UmVmYESpE8rZZ+dcjnO1HyqCRmgEJMDO7jT54d3Vr5RsY0eQcgBAgUHZBJKQ4BEEl56ieFEqObpCeP5q2oFMQiwiFkRy5jYNSigChimpLkkkWL44J463R59aQ35apxpRg6gox5hDzpweny6dNnR/cOrgEffe0by+N2/M6uieH1V+5M07O2XW2uZRoHAyg2FZ0I2dQuu/VqdaiCuUzBeSFsj9sdhWlXCoKr4uX6whXbdv3J3btUxTz2U9KSspV8vj0HKqg4TPbR049e/fT97bMeGaEDFaGKVsvDp0+vp4RPL8btIF2+6X5Sk6KmxsSSC9gMkdwns83M9pHykLQwsKgWtvpgMY67qsKL8dqjXy3u5mEoJi76k5PjYvnW4cGqoafPHklJ5HnoRnIkRVLRg9Xx/XtHu/Oz4LzjWKZ0uFoF1Iv15TBNFBwaOGYphRFVBZAEHDMbeDGXclKAomU3JQAHpt4tKgsO0awwkYgogCMUtX7sUCX6ynn2dQRhAlXDMRUiMDXIJgUut12Ii2rS3bNH7XHjHTM6RVaTYZzaeNgNHWPOaSRQKWVMHKxdXwxf+vIf+t71J5frR6qShgxSSgEkU8zGzfLw1jiNTVgk81UVTk5OKp/T1AWH66stqHpCFQURACyW0YyADazkYlYSAET/5JOzl5s7r929V4YrT1SF4/PrZxOkIhIIHSAaOudVyUxAFYwZHVAGg0huQixlIgBCYCITMVVGRqQk2VCZAgICMu3n7lkhOm60pJKLM7OcBUylMKHaIMjsq1x2CFyk9xgQPLMvJdcUHbpiawRkCgQ45jH4ZcoDEGcppuqomvKI7BT9jDkRhGIZgZh4fmJwFO698NJ7j9+dRmXOFQGp6WBmuqprQtVcOItBIqRpSg59DLGUDKgh1hdXm/cenyF5KFLX3vtIzovZ8XG9ag5euHUSY5WHoVnUvt77KdChYwZPYmUqiRh5FWDFVKukrmUGU2KwTEmLCnptrejt4/t/9E/+2S9/4UdkBFfVuKgMrLvuJoGjl14/uXMb29oxDt3QkA8c6rZOw1SSkAPJ2bFn70Xm4ucRnEPnRVJVh4DWbTehqny9cKEqaGACOZNBTrMjh9uqNQIpoinNpcuxWaBRt97kaRIQBMUsPjbk0cWgQ4KhpwC1r7FwGtel9OaZOVC1BO9NR7Cpiv9/rv7sybIsve7EvmEP55w7+BThMWRmZWbNQIEACiAEEARJoAE0QRISCRlbPZIimw8yWVPWMtODzGSmP6BfZWrpQSZrymhqqiWCzQZAEk2CBYAEQRBAVQE1V2ZWZVZmxhzhwx3OsPc36OFcjyp0pKVleITndfd79hm+tdf6rcSg+4tHb33lK+995a2Lx094GY2760d72e1Pjk9O77/S3r7PIZVhSKOn0IIoBc5N44wQK6mYeIydVSzDCIpvPXnvd772pWJ26JH2m+fDmwY0nNvZfdaoD1kPQpovQCYOxGp1V/WLX//iq6f3z6RxXDoHwxEnjJ6Yl2ZWNpOX6YizBGjvvRFifHzxTRa/S5AX2m+v02JRRWiE/+h/9Qvf+W+3Tz74sNNkooZezYixgjIQkBmgu09D5cBqwB1qZGsImYPjuNlLcQMwc1Gd4cbzDEINhgYX63R2fJSWLRKw4bgzXja5y8DEg8RafByH3aCVCAADxjzD1Scf+6nf+jSRGy+7FHM2QBuJiJlCDjlk4Qgpq6FXsTLoVOKMg4nJAxcAtBSYMvA0VMdianUYmYK5GypEi23L57d4uSQgu6G07K6eQC1VxUEcUCWbJ4JMkFEbt4DQOE7GU9XBgALnQA1SCABi46TqwGZiqkigYDTHdtBVETCBHeV0f7V6vW3PI0ecYTKHKjJ/6egAuKEU+cvg2Y1OdOMjukFUv5zu/eWU/1Il+pOBsptd9ZsR6eZ/vomeHf74exlH8/c2s3PczeZ2N0cwV3NzhKcffvu3fv0f95srj7TZbMtUkMmqMGMMlNiRIXCkENCNEUJKBE4UmDjFVFViDJEREeZl1OWWeWLwyJA5eeBqvhtHAclNIzKpewxcVWKTVeaDgeuj1xfrW4AeQ4jMzIGZ4SbVhcSETAjkFQk4kjuakqkjIQCl4Gja3Lp1/5M/8u43/02ePBCZi4p1XbfoWj7AotgVDIkxuxUBVbdJCwZ/9c17j77zYb+bcoaYEKwwQMihNLTtSctkouZ2I8rNmGpwm0m+h9n1kLwyOxypA1td7VDUdThSTOxACIf+epz9xQ6m6mZmAlrIkzu686EZfW6UR5zb0G8sYjeL5SAdwssFgTfLy0yY01wZBoBzEumw+A4xNuE5WgWEwIeEGxwkGHcnogOyh4OaOqDrwZgD7gw0Y/4AgJDNjJEBEcyA6EYjQwc3EwQEV+IwF8MTgjOqACH5jJiZAc0OACquRATuBIgU3O3w+sQQ2FQQbswmBuZmXv2GvX3QbV1hxlTbbFGh+cH5wNR2JSRHJwqI7OAi0yz+ESERqrtpneN/sweLiVUlMjugOYEa8EGbkALHy/VRt8gO7aJNaCZytR3Jed5SCJQQ0EDA1a0CAREyRTO7ofAIUWCemXcGJnO/h2lFwhkAZKZujjS7rZCIRIT4ECgjJERwoBljPeci3QHMOMY5oVVFCImJiAlcAYGQ0dGB5lzjrPW4KRAS8YxPIg7MEczA1A41n7NIGMHBRAHAvSKAqRL5DYPcmIMhidTZgThfzUSEidzcwYjYCcAQkWbnVMBANJObxOa/QFCrgjQX9iEgIwOSViFmZhaRw+2W2G+8dQepKHDM6+XliycQFCMPsidbSFWHql67HIKD6rTfv8jSj/trld5N3QqB67BREURC9UXbASWEiqlxFCc308CN1OqIEEnRKRIzMbCotynXof7oJ37s9du3d/1m2abbt8/7q36zu+iO03K5LJM62rpbOMA46Sv3X3/09GFMWczqpCT+8NGwv9440bjpQ4wR4ri7stpvN7v29GjS8ofvvf2V999Foo5zubpetc3rr77yxbfeWh+tdEAp5f5rt77y1X/zp3/8Z4/O7rZdrFXTIt26d3t1cnL1fIwBW2ZzqFNlz8dxcbF5FlMwx5Tz5Pzg0Yv1ctlCvn9+8uUP3lqe3l4edc9ebJ+8uCarb9y/i8ftZrPbXG+PjzRkvrx6Tgx/+Sc/Ozx7/G+/+tVgBkjm2gSKEayMTYo5UJcjubmXro1NJJgipUQIOUDpK6FyF8dp60JcUw4xQutAm6maQ4wxYSM0gVXTPoa46talz7bDz3zsM+d5efHo4Sv37tpYmwAn987P791lis8fvZD9dHyydgEM+PjBB++8880/9UM/ulh1fd9vL4Yc29DkKqJSd32ZtCAwGGJqctLrq6cclsxJTOo0VZmGOmJLoPilP/r84Ed/+ef+4u/81rSZHp/9wOLpowebfvrwxabZX907Oa/bfVrG1cmSDMvYd91CFczKYrna9YZIjinFQA4Yj08iYnvx4MnD01tvRm8KLtMyxkQqCli0KkIEV9e9AxNTkxpGdBtsHFR7KL2ONfIp4im6BAwzhBIAnr/Ynh2dPvpgkEKi4511C3X66u9/GUrNJ+3FbsOMpCBTIbdV0965ff7ut5+1HG7fvR0Xq+cPxovHD9YnfH/dvPv+sxa68nj7T/7B55bdWAe9cw53Xz356nvT+bq7s1p+6+n1ZsT9tL9zd333I6d/9Afvnd06gTLt3n5w9WiTrrFWVgCMaGjFC9fYBSJRrO6Lrj8+tkVCFwKrhYjTJHF5dDdRGK8eQ9nFEPui6h7RxnGEoafFmRwdx2aFACc+EF0P7z4Zr8bcBMhtf7nXVfCqXWyntBRWXp57OAphjwsqMJhTPHl1nRbTF/7FmS3WXrEUcgHTmSjBB/QjgkPiGBpU0cBsCLVNy4++sj/rGte2y0WsW3bbfmzDylObU7Shd+DLZxf5Vpo2zzmGQFnL1KWY0EPAB9PYLlM1DevO+tHdKPJQ+mnsFRc5pdh2MpWjnJ5tRh0dwFWmpms4p6AsFuqAIBgpuFvMQdWAAjAbKAWqpc5FD+DohiE1WnU/idQagIIbCJLm0u8gpMAMKXsIVUckQudVu9zbuLu6AsGipbE1OKgRM6eQmG0apE055phPml2pzf1bzx8/P6G07Tfi2FBS27lPVQABuq5jRSNrcrtsl2y46lYxUZXoFTL68Wm7atthtzHRZh1OVkuxcapDt2iAHFRXJx1gaUNixJhXPIxNs9jojpynUhLyNPVhvV6t23EsixQ/ePHBpz/z5r/8lwAAHJiQkNlVKTDZd0kNro6EzBxD4pi9O7t6+gghlEly141TP5WhegnY1AnWy9NRd4Iy7PpFtyaUy90FkLdtduC2w1ILO8kgn7h/VsdeDIgycFytmtL3rqZuxszMIF5VFrmdw58EnpFVBQBUKpE4qGhFd0JeNCuCqOauVaVyjsgO6rOjO3NATk1q1QoCUEBVNcMQOaa4Wq13cA2AQy3U2q6MS2+sElLTLQmS6lgC8W5/uT5ZlFEArGmzDpRBygTf/0M/+a2LDz548nYLtVdxVFNDzpmzuzVxIUUjtUypmJ3eak5Xddpvhn4/qU/j2ISAFNDm4I1FThOImqpJnYpZ1Qr7Xs+b0zfvnJpfcTTUOJTdWHsgDik6KCEyBa0jIik4ETMknTmVhEPZGxjHLC7sEQBNxYnEK5gQN2YYuMGZtOIaQkbrytzPSoYBI6Uiym3r0gdHhmgUMre1DClECp0BiQoBSR2YAqAZjOAtOLhLCq25IgGCIQV0DpCUHJAQBFzm6EfAxn12owMYEMA6N2/euf/4+Yth6hFDqUNKQa0p1QiFEaZ+ALDl8sQomWqZBEzVRCa4dXK23/XXVYhsu+tzLqujbuyHRw9evHP1LVdPKanK6qhp2u4wHyAZs5GFSE7ANoMaClUG4qIWIlHTltqPwz7UQFtZlsV/8h/+rdd+8EeIeL+/APRA3G8G3fnR8dnp+RnHhALBvGu6iLFOVScDQCcCBOIESFLUxQwPBAErc3OfD7VUpdwsMGQnsqmge+Rw+eIi5m51ulITE0VDEM1NRmYFIGdxz8s1MYJVkhw4jrseUDwUmUaoY9t2HMOkA5MjM8fGkcZazT0SLDLhdP3F3/wnF8+fTHsd99OiC1cPHw7Pdqu8blbre69/qrt1ct0PCTEj50QpRmpbzonbMNle3TjmnI4TrfcX12Wogw7/4vO/fSmjzqGPm6SQ2dz1RDB3AuPNQHhjKFGRgBhiJAcDcFNqw1tPPrjePLnT3ucQ+n5KCw6wmPYTYFEv5Jxzl44drExGXffK02L76w/2C1suPAYy1bbNpU588eTv/MKf+6///j8dtn1GFDCewy6O7p45KroBaJVxEI4hqOXsPPf1TVX6CcBSDipuxQOxF02JaAHLe8vu1qJbRc6ZY6qDsAaASinjaBHYjIbJp9EqcMiJMykYd9HUVcdASADcNBCJchOIeBjqdQVsQm7TIjWxGcUV5nIWs1pRNIUo5hQj5BACezUycOfQJCQ30dkf4Yohxu7syNZtSGRSc24W8SCYmgmgV5sTgXOyxsCdiUCdHJlTkeqIhmCmrgWhBGIEQkACQncwnEEnDAAO5IhECMwcm2aZ87pplokDAeDLWR0OXsSXPqGbP/leOeh7fuGf+I1/9/cHIPD/5PO+F2SENx/dlLx9VyDyG2DNn4AUOb58obnjXd1NjdAevffO5/7pr9a6E7fdi/3V1YYDNzmIyKJtMxDUmruYUzYVBU2hi+RgCjT3aHjgeep2AOOAy5BRNSISmIqVUmfaNxs1OTWJwYAcEwc1BTGECBhWq7O0WFAERmLAQBEAzT2GwBgclJgJkBGJGMDQabZ2uANhECmKHpgB9PzOqQ7f9/A7X2xCs2AGM5NKMZtooBhTripVR3dY5ASuhHkqI4JU2N+7c+fy6eU47tq1oMlwPaWTe5yX6lFUwOfAzAxbATNliIR0QCQCzk6KQ7hmfqsP14dZ/nCiG+wQ8uEKMdu7AFXBHAxA3VQkqLhpmMdiJJjRQEh+8KORw4FaPStGdDDRHI46IcqhNMoBUFRuFhfoIYJ0CPLA4YOZI+TmSjjLfoe1Nhe2zZ6PmYBERCqGPDs4DAnRyd1m5WJeZWZyo5bfRO0QZ8oWUZhtc+6urmDIhAbIFNSNOMwXVZivsIiHbBjO7j5kwpeoIDWYAVKOQMSmNn+CqB5QTfM1QYWQicLc1KYqc/bf3QEpMM919QfnVmBTsxkEZYfaQSaejepqYi7gTMgIRoHdxN201rZJF30561Y+lrPTrmXqR3347IWUcU4I0ly2yGhucCPDHVKIRICGzkRkrohGeGBuIuLBAYTMAR0UiXC+ds3IJaKbKwABoqgcBKPZoQYHLHaVMmOJ5jCeubnJXC3noC9Tsszs7ohAhI6GwBgIzQHB1dyFkMANEYhIVQADIJgCMiGh1EJM7o7M6O5mBmozvQfAXIkDIAIhEAEYGDqiqIIDMwG4afG5UwxpLvYzNwNREwCOIRJCKYVmsvg8/cwAJiRVmwOhB41o/s/x+tbm+tmts9vDtJ3G0samn2p2otAgNG2M6gKxw9ROXoayzYnbxUJTV2qpxUITZH7c8yq1kouBErIRAhs65ZQRVdkZKIfYDyMEopCGvpzA2d/6z/837337neOzo34rl892gbVdRFdkZnJJLrePbj989KL29dmTSyfY7a6ZuWAJSsOG1CxEbhZHCHW7ea5lXLTx3p27ddrkZRvb9s3XXlvcbnePr9947WjYj1/81lvbWm7HVcumCYmtvyr/3f/jv/5b//v/c7dIYDTtByvw83/5P/inv/bLtX/BNPSbAXJOplPcdW0wlUXuxnG/it0yLHe7PWachqfn52c2VnM4zrRo8vOL3fF6uHXrZHpRLp5t6njZLvjo5NY0TWC7v/5Lv8hR/92XvhQX3eV2UBEGiily27Q5MjITV+8GDZNMhLVrcb/bkAMXKa6GqQoEiCF2rgGx25ULzu3Q71vODaogSkhugNyqUCb+6T//54/TKvBYTd78yCcfPnh/scikmCwEdmfibnV2dOuyv3x2+fzxB+/95E/8LLS4u76InLs2X47XZNkdmrbd9ltCXLZHexx3+8sphYCNVxq9DHVMyCK12KCZzGxxvvqN3/0fALd/5qd//H/8x//DttdP/fBn3nr38Yff+eD8OK2dOCQMSwOsJlBqE47UVGrViTtu+u1T4H559PrYj2W79ymdLT8S7dTXd+D4o/u8SE1TakCv47ilJRtAJY85IIK5iVcnTF02rHUnpqoVIEQJ1K5eUXCOB6Pp8y1cVP/me9d3FidUcFuqUvzsJ+9dfvj44W537/b9oR+kTk2MKqO7XV9uu0UoQz2/ffLW1755a3X3hz/7EZFH986p2Udsjz54f1NqgG7l0aYiH78ND+3h3/n3f/7f/NuvtSeWOIJ5vxmuLvYcAer29PbdL33w/GgfoQQZKuXQ5jTIyI4JHd2JuM/h8vSsnJ2AlwYgcuwWqwBUrh7G3QUWa2OG1coXzd6xW7RcNq3odx6+c//jP4FH68ur/bEFvr7eX34YsIsjQC8T7BVW3auf1Om6jEDH9306LqmxEFbn5x4arWaw2G7s+lu//8bgSzCoU4je5mBiUh3UQJTciTg1WdVhKliEUtoz8xvnu+NFEQ2OoNSeni1Ob1+++163WkK7nLSECNAGrzVxd7W/WCxXIVCddqvlYhj3KfnRukPoATEgV1dGEhBCWyzWux0ERBc3x4+8dv/t5+/MW1JgZFWLDLntpp1kpHEs7WnqurTXfk7IBwoiRTxyk+YbjY0VGU0ZEFW1VshMtQqZuZcEDUNUBarOjOi1CZ0rbq92fJLX6zwc86Lrrt/fLEOujXzsh++8/UcPpqm2Kb92/9Vnmw8ePdgGz9/+/HsnfnRvvXr+3rund2/LeGXR6jBwZXOKnEQskJ40DUupo68WK2QxEZswigXot+OeGuo6NitthDY0J4uTqhMQdy3HBvabPoRYA2qdjpeLyxeXAULAUEFPl+saODHlwJPUaYuv37n9/rfeg5sno0BBq8y7ZFIVzDEc8PgODg4cEiAgLThEhNg1bS1a96O6LPOSEYahL+ZNw/0omXgcyo6D9EIESLSbJgM0cRV08U+//nqVzVhFkI5WixBIkdrFQkVR51EM4mwoAHEiNaviYMDkkSEw7jY7ciQgMOVUQgDgWqxSQBGpKohUdGjTMsdWqqIDGpkUJVMToBCbVMsIbv1wcffWHcphVHn1ox95+uABs8eYDGTqh7PVUZ3Aw0jETBgBnFLs+GrT/8Sf+6W3Hr333vNvoCoW0TJZoEi5GoSAARsEDJ667lTMb99uF40Ou4uy3wNQP5ahjCEuQMTBRQshGQkhMoZaR3UHClcXw9JXn/zYK4E2MyNumvpdGTEFAFN3cGOnWocQGR0AQqBUpIhMTdsigZsHRAMUtcwZUYtOTNl9AtAQ2lqR3KtWcmKMahVACSExVEIEd/ZSaoMLJJJpRA8MoFA4ZFEgCG7AJA6OsQHUikCcAYKqmgMzokoAjgmmaUKISAZqiGRezDVy424G6gBMbKYYmIgmmY4X60lqfdGX/cCIFkiQPORxX4k9txlMVPbCpGBRvItpsKowBcyf/r43f/erXzP31KSqcnG5GacaI1OOXZsAYLd3R972w3wScMupaQyKguTWWUuymtPCBd1dgzKI1wnJlpS8+iIe/5Vf+Juf/JGfGlCkaophe3H14jtPmbqT9Z3TW+d5HRQ1cgQVpghOplZKQXJ0ACYVJUYKbF4REJkoIDoTkIj1/b5dHsVu4RHrMHLAoH51fUkcY87iWkUIoIlpZmeaSEixjEOdKhJHzgjcj5MTTNue0SijlpLjEtRL2ZpVyrnpTtQUIug0LnMbwZ9882tf/O1fgaKwPrq63m4vdrLdL9RuL87u3n2zvXV7ct09v2q4jeoclJuU1wsI7KgDTHGRG0T3ZtpDv3vUX7yYpL/Qq6+8+44DgKjpDWjmBhE8j+eqBsSzbmjmiDNSYsbOGgKhGxJ5oOfT7uGH77+x+FROHa0a6dVRRSaL5FjyMgia5ymC6mYL5PfeOJPN4vmTt1NTluuSWhircGqo6J3c/Gd/6a/8vf/fPwIw9OBSjMDUUwhiEjmoWFUTqaFRzhTRXWSoykiIGDJb0cBkIbhpXIR2QYt76/VHbudFm8Js+MuZ8frRNSiNO224yCQFsBSBEOJx5tioKwYwRqzGYuyQ8gnkNlBqGPnqSqtKCjkmYnbDqoIKTKqi5EoUlN3JKVJcLCYTi2k0ceLF0dIzp0VjKsM4YRHZ9Nw0fH5sy4YCMliDRnKAu4vIAR5EATG5E2ILmt3ZoRq6A3qMolUNndwJLaAFllLVqoE5+swpm9kWCKhWQ2gcgBFDk5pFm5o0g6HwsI0/Jz2+KwkdlseNbORwA7Q5GFLmv7Qb/MYhMXZQGW8W1g1y6CZqdnAsfS8b+6Us9T8lYuNBxMKbteoI5HNXtwo6k9M7X//av/6NX9NpX133+33pS0AqolFo0aSuSS0TUWy6JjAxUBEHiFL6RLFbdO6qYDEmAjfO7tJEMJGpCLtRAgQKMZY6ptQ0eTHpyAEYUMXNzA0KYAFaHN3KiyNgQrAYM1EIMShC5DDPq4DzrH4AlhCyqeGB1+IOGgKrKxO7OSGev/GmUHn8wVdeb48XDUiZZNargpHK/IaI66TKmBgkxMAKOu0Fx5Nbi91eS79pl+3aUS8eaFwtjs53EYqFADRPvA6OcwZKzdABgYAcDN3xYO6yg6GGcE5XEzER+yGNOOsU8xRvc+23fxd5ZDfZmoM9aJaZZraLHXRDNDOa/Y1u8BLQMks1cyyLDo1X4h4wAog7MKK6ISDhTTgO0EwOJiKcGdDzTouZKd680Jw0MtPZcORqeOB8wwxUBgBVufmpDlrFyxIrRHKc9aYZqn7QKYgCALipUwADZCQEJAZAVVM9AIfwBiFvc0cYE3EEmPtKgrkS8CyKmNkhdwdAyOBASKamJjAX3uMhP4gY3MQc0dGsckyIDICEPDO8kcIs2WitSOx+qJB3MwODueIdkRhDQJ/K6mwVQ4e1Pz89iTGGZvHtB08JDx32VZ3QCZgP5yaryQwgZ7MDtcocDvH2gBTcjJjQEN1NC4ekasxhzsPN75Kb4oEJ5RTC4d3HWY8RQkAisRvTKPNMpp8j/6YOMBOClCi6i6kSEh3kSzVyNAckMzl4Z5FnsNZsn51PSuYAhIwIIaopIWsVmEvrDhokMHG1ioDMM/X+cCzA/cCSgsOKJ2KY6eRIhDPcekZ8gqsK2NxAR8xuPreqmzqwcoiqcojrfpdVRLeb9phgHO1JhJ2JZ3ZqIvORaK3TGFNcLLtqIxI2x0f7YQy5weAG+xh6RiT30cxJE0J1iMjTVKsxYJgKnnSxjnswjZEiCSQiD46Zvf3f/Rf/h4eP310dp7bD0K2KSWxj4hgYx2Hbj2NkfnrxQklunR9vt9vFqhnrjp26RXPSHT148CAv826YNlXOTxrYT4niquFxt1mwpdP2p37qU8Nu+8UvfyswlUGebcvkab1Y3D27e7V/4urPH25EMPjD/+r/9Lf/2t/+L3/g059enBxVqffeeCOd3Lm6eFKrxyYLxVFFrZ6u1j5tQohRq5I+r1ddG5/tdsf5pN+UNqZhe2keC2FM7Ze+/vaPffYHF+vlkwePMTbjtkyq9+68KrXYZvuXfuYv+LD/0nvvHLdhVK5Mg1kXg3rNEZY5bwX3YGXaEZmJcSYvPolejlZFF4iJ2ghNji3iOPQPMeIyBlBRLJMYhw6MwRaNL19Zn7x2fPb08cOmzYvF6tGTR+D4kY98FAWun18sOl4cNTr6Zn/1/PkH65OjH/7TP7ovve08x5hjKuPmD7/0r37mp35xtx32+x0BNU3up+tSK7qaBUNOTXC3Wh1JtvV6RDd1JELyNz5653e/8Ln97vFf/9v/5f/n7//3O8y9TglecBnG3XDnlbMnF5tTPu7ahgirGzdh0bUqk8hAyEA+2BYCgThHjBFvnRzvYqgp5LxwTDmmxL5sOkVynl0ECkQm4qYIgEAYmrw8Eu1pP4pDxOCGAAnxEL15/8XQq0hcpcVykmHTm6YwhtVnfxDLV78xQLxS0QDXfbXdtF61F8+3adXFtnu+2Xz6E5959nB4fFnunx/tByv72iL8R//JX//Nz/2PH+yfbTQ83/B+N9rq/Jd/5w9/7ud+AL9x9dufe395erS9mv748x+uT7rFlfzx53+/2TM6OmZhDzFUMAttiz5th8Q4hGZ7frQ/WXJHVADEEkfd7WS8onpddn3CVLYlvvqJ1K0WUOKzD+Tyyc7TRz/2Z4ZKte/bHKNwzKkiE+Ct26dXl9fULkpKw8UFcQULuLxNC+doMZuBT8ADBQ/Q2fvL7eY2p6CQ20ZkKDpbZT0Suc46Q0yAxi7FEHAoAG+ehR/8vuf74e7REvsaPS9Pbm32m8VqWcB8UgbHqUTGoa+BMlEwjvthrOAACxmr6ZgpBZ/cwFS01Ek9ZZZBOC5CNgBhDu7OXu8etY+f98iBEh+Mo9PIGKb92J4cbXxjyJNJF0NIJF7BVY2FzVxpNihXqWoQ2cVCiOCQYiMGVbxpEyKRGqm5DlAnB4ier7b7n/mZn3t4+faXvvSdj3zyOK7X9XILUJ5+5zEu6d73ffr9L78Tnj0KDGG0Zt1V2d4+PlksGr19BwxRlYkQgREJMnjeT1dHawLDqZScF9e78dVX7m/g6mqzWS1TcB3G/Uc/8vrF8ydjQRYBBiaaBEQyupWhpNSVae8cyHS3N1U/PVpu+11qmsrQtHmq0oNdX/Uay2W/H/aHIVlFhCS3naqamgPoXD9EL/mf6AaoptU4LCgmFplMQ4xgJqAMEAPXWpFIBVJOxcOgfeza7W5SqZv9nmPq2sVU8PU7a5Orvg4xhobYyFQPOfYylRDYAAhdVEcfAYxTCiGrVlBnNpMy1FK0BgQTjHFRxKY6uM9TCo1jBdUU06JZRkygjoClFDelwI5QqotORIClElGTzl5cb9qjNohtLp8zYTWLiQLmyI04TKTYpLEOMVBFh6nXyT7zqR9/98XlV7/9h8sI/TiKlSoTYg4xhtSJiYEvmwVTV0e/fb5YH7NJP+y3IFbFVN1DqIhtbva7PREHpiqTq2ktYFXEp7FmWrxx63aEaZwkEjHHq92T0HSIyMgoakrMjegAwBxQalUAQIspAZKaG0jkhB49JAdQ1ZCiWiFgokzcaBnQKzFalRQbc/S509e9elVzRI8hgwkBcUjMrVcDYOCmobjbP+WQHUAAgLL5NJetumGgVBVSaE33gDjVgZhdtFolQpGRmZHQjRAD4OBkTlBtZGwYKMcUCO8e32X3yxcvai3jNJg6QljkFQBWq6UOk4yL5TExgrmRxZjFpd8PJvrpj3/8wYffqYpmUIoOo2x3tWGO4KWWk1un/TDSzYBKhMwUPW1lmoJ1DSaIDGgWJytOJj7BpIHaYIFr/Yt//hd/9Cd/2jHK1Y5Ar58+3z67domL05Nbr95p1u047ilk5IycpqGUMiJbys3cqWw2OwJIzIAZiczNROZ2uN3+sl0eh0VXyLwe4hJXmwsDOL51pMyChhzItOpETAZaq4gKAAMBhVRFy1SkTpt+kwIfH59uty+adr1an+w3TxBTip2I1KJVJbinlLJPf/Qvf/UrX/ydk/PbzerWtz54xnWM7qUfj145u3P2sf227MsUU8Zqy27VhGSq7cm6UgUSCgEgOx1N/Xbz/MHuxQDqUms6yt96+s6zca9AaDMtGcDd3OlmS372p5obIzGjqSMT+Nx7A2AuLszsgFVcKT1+9nx6rVCt3nQCgFlhFQo4ERYYImclZtDFik0LKEyLpoZXn+7fG6ieUt+ujoZhbFuWfvjE2a1f/HM/+U8+91uUsM2sBoHRHfTAN6XsDqooTiqBs9Ic/qO2y9oPyFCrBGJzXxzFs1fWcNLV3KFEtjj1trueSl+tL5w4LxKq1lGFY4gZWJrUTk7FgSM7GLmRe6CQ26Ut1gESbjbT9aCDMOfESRVLdXVgRBCBWsFstk0YEjKDeRPzABjW63ax6I5a7wI2Wd2pCLnk/SCT7UNQR+2nVc51u2emlyINIqVIVRGACSNREyibGhCqmooYEmEXODkohWCYKwYMjh5R2Q0BIIUGfN7+nxn5FCm1ed2kZQiRyQgUkP1mhJ5jhgeR5jCv240zAw/dY3OoB2acy8HOPDvQbuSk71F85gnq5SvATYbNXybNvifL9l2FCGEGV884H5iDkT5TjufrpzshwBf+7e986fP/ehyutdrV9fbkZB1CqFeb47ZZN9nB2hxjpLZtQ2AzR+K2iVUsJgoEGBOCqU0YA7qZKoExkdZqPt679erz3ZOAJGbdolXDoZSA7mKKihRTanbDgCEvu/PYrmNOiJBzRggGboghREAHQnMLxIiAwEBQvSYiYiRGRBIQNyEkQEIiZtZackz3X/04DvXDZ+/cO2tWiQ1qalomNq9qRoxGPmh180hOIG3KaDj1PR81+fgolmBDr7XPjDpONWhM5wWCw2Qznx4NAdxArSKzA8oM9Jkb3wEOx/ZgC0JCmgnKeKghmy8k7qam4qYmYiKqqqY4Q2RmNvBBjsQDRAbgkHWcKTEwR3cAYBYe7bsrAXBONjERuhsouACgHcz0Bzy1ujOzanWHwOxiDjD3hxLxS/iRA9xgjNDdZ87xvF6B2NTMnIjMBJDnFPYMLzqQ/93mZUlEapXmZjeYGe7kBya8E5OawqHuA2f5D4nAHBFnJLOBAaCZMgdTxTl+5q6g7kZM8+mEDnBAL4HPnikAAFTVEMPs/5vPKPWKyDwH3IhUxB2eHS1xAAEAAElEQVSIZ42VFAzQFZxu3Hs0y2gOxMFU53ycmnvwSWq7XF5f9BQ8dWEL6fF+vO6HVZpXBRORugKRmznUOXUISAazPghEgQjNCHB+u2Z3GBE4MZkpOKq5uzHRwU9zYF3PwCCY1S73QwfZHFgjYkByn18TtFYHDJH1wEOfpSchROIgdTJyRARix9ldZXSjhNt8uaZ5iXoIAcAOybibPOCNKcwAX640PfjpwEVG5ADuajabBIlIVR3I1AIHB1BTd2D2mYZARK6qrsTsZgBIBCoFiW+6+BwQ5ud/ZPwTUtFq/XqboE579PM9PbnePCOcCKN6BFeHNmDHhQODKBfpOB1VkanfqChFAC3mYoiKEAgyAgBqAQQ0hKM1nzQ2JrDQXjy/jkfd0frE9zZs4//27/4fm0jVxqO2PTs9reK7oYylL2Pf5cYQu66LTOKaMHjSx5sH95q7DMGqG/Gj/WNPsV2dbfYPpIxl0KkfjvN614937r16ugrvfudb0/5q3G5fPcrtq0cS7EkpPpmJPn7xALGSU2oaSuhJotLf+7/9V5/62Jt/4Wd/9smj6y/88R8+fnzZNBhjIgh7k2qGjlfDdkVhU4e2Y+Wm31xliPdOT3eiEPHZuDldrYfrfVHtx7Fr1w8ePvnU973xsR98/Z1vPDg/WoD5++9++Nobr49DEYWf//m/ZL/1L957/vh6rAE5ZgzBT47w/nmj4x77Kkg91EA02qQTmNLFEHbSNIGqGbZnlFoIC5mum9iM2htACLFUsNCGvK7jtEzLH3/9x948OR72F6enyxzadrHIy7TdCrNfX1wNw5RC426K5Xo7Bc6v3X/z3Q8e5Rhv3bql406t/70v/uGP/sDPal9jJgfSyTMvLvcbRc9NixxHcDFRkTLJtlwUVg8cIpXiZqpqx+e3vvCNL19u/97//D/8O5/7F7/xypuv3P9Ti9vL/PXf/YNHH35rpAyIdgQq4/HJUZsY0LwOTRsZ2surTYGngRqODbma15CK16swxWWOGm9PqkaGxIyMHNwYEQGD0uRm6GDuggmOqDqlfnYSEFECapt42EO7vty/+pFXWP3x0wc/9+f/9B9/cLnvWa6ev/mZ4zwtPrzcjE++9oM/8VNfe2f3YrBX7p2bPd5VHfb9MFz9tf/4z/xh/6UnY/OFt/slyifv3z9ZUaT3fvLfu/Orv/3tj6/vfPXr1wVglPjV67H5vRev3r9/fjzYorl4vldNis27f/z2qd+2UnoZldADjqrm2hBr1cCtNO1mudgcHysIjc4AVlVTDVkuH76/6JZx0WrRdPt4ef/7Nk+/FYdNffGcMnpgXRTtJ06xCfH6gw9WbRvXp3VbrrYbXKTF3Vcud6Nvt2nRYuxS1zFjaCPaZKYFclShy2+Er/7BfT5p0KuNIuyuoogqPDtrjZxAALBOaoYExKlwjGdnpdhJSkddGiYNMTfLs7Gokrlj1wVUqC+udNqY1HG8ipmBCUISoVLIDPZXw/rWEXCazIhzpM7rUKcRShWy2C1DGHNumJ1Q37h76/GLb5mRuiGoiUSARRfKMOIUzl49oYxyPYq6TCV1HVE0RzIqtXQpxcwqQEA2mtcaEheRJnRSR3Ci2A51RLAg7iJnZ0f95eQABPG3PvfPcpNfPz6fHl1aGU+XaRhs+3RzZfXo1kduLY+24+VRBBSQkT7+yid+4PzVb3ztSyklhzhd76oqOxOyqlfvq+yvdli9hpDarltkfrHZXV1vHdU89BOo09Onj3UqwOH1V+6+9a0PFEOg8JE797/89jfXq+W+jADp9PgjOl3o9Q7crrfXYzEgrlVzTlhlGiWvlxexvPfhi9dvnR2eY9Q4kJoQEQIRCRLgy3YaN3cgBCDMba4jIWLMAUQgEFvc7XeZGoZgYhgocyxumWPXNtvNs5DRQpCeGeJUiSycr47AFAzbrrGhIpKbxsAUg7i4OxhwbNRHJK6ibmDmATFGYkYHG6aJODs6AcW0BJ/UDSlshwHIyThRm2IHboRURUyNkSDhNPSGIFqrSIgJlMp+RIDN5TWSgYCLdIsWA4oYhsXxarHdPoFAqGXs95EbRKpVP3L6qdDc+ne//ytd41Nfipg6IzeAbC6IokypWSsnRDpeL46Pcimbqd8N45iIp340J4JAxUUrmqsAEs+EymGc4rK5vLqOBT929+7pKu/6bQ6tSn2+u6LMBkIQzDwxuQdHZk7mXsUJGcFTolqHHINVdkNwVhWHBASIhoFAXYpFTLVqCgGgEAV3cxBVAiBzN3cnR2QEImiYEomaq1N1tDL2FuLgJaSAwERhqiNDcNPI7ETu5m5OZK5E6IAMTJQmmBKBQ5wRD2qgM70G3BzIhTC4m3ghCl4dgY8X52Xy6/0LCnHXjxyHpl0iwVgkxLTZb1JqU9tUHWdjlZunGESmha8+efbGF9/68mJ5FNtoVYvJosu3T88fPnmwvbjcboeQD22YrOa7ySPSsm3ufazJnofrLLbvRzKkwKiRoWl9kUT/7E/95R//+V+MTVc2vfXT9uLi6ftPCGl1tDx/9SS0OEqfmzalrmrZ9zsCDJFi1yKy1UlVCQDB9KV1HxVc3JC4e3HxrO1WzdGRMLgVJkKVMu2RqQkLFTevojUGHPpdDBnsUM6TUydqnJrAwWRwGDH4+Z1bpfRXu+dtbEBtc/3IwQhzzkv1awHJmRbJNg/f+ZVf/+Xhen9++3XL+XK3OUp86+jW0+fvdN0iKu6unjs1CQPnxWrVNKFDlbzKEshyCosGoLUedg9e7J5+wDFnzgKVtGo/fP7LX3EiqHYIe9BLc8BNFOhQRuOC6shEh1yHmoN5jBxDFPFAjACRYdLdYh0QosUAPnjpvXElgygIJOMeZTb+1wBjJKxFc/LQffSbX/93b97Jd7OlJairVLN68eM/8umtDr/yO793RKQKqhojOHCIc0e5zWLV9eV1WuW4iC1SrSIi7aqVUgCdADnGfNbJYtEslyG1voP9Zd082Y+7Gpo5EsZkKD2O7p6YEhKxYHTyYOxjJTRGbppFSA10K+TGd9pvatlV9EAcSjUFGKphYiYhqAG5iSyioWnEwAxkqCxcyOl4GU5PPKKCkWBxAyKiBFhBZBgmnCzHACwA3vflIJjCoTEqcDQLzK1bcAgOBbyIFXBC6+ZzWebUZMjuil6JHGhEV0ImMGB0iI5GxJS60Bzl5f3QnFDqgAMgOdDL/NgBTPTyn9mZcWP6wZcS0svM+/f+miUmgJecIzigm2dt6JBVeYkzIsSXiOwbg9J3vwF3n8vD6UZVQAdwNAR1QWIr42/9y1//1h9/HqCI1WksDUdUnKbStO3pURcJuy7ntuHAgAwITWCZqqmkADllM48x1Tql0KW8kmmbmcGNGZvM12P/9PoCKFexSWSRU4rEwRIEMzV3MhiKGjW5vZ0WJ04BHBmJMDAHmTUPMyJ3pMQZwREYGf1gqeGX8hgT+U3szs3VZK6vSrF95ePfP0h9ePX+m7ePIkOpJbKnFAN7RCSORXwYx9h2OaVJL5quXYfFdrfB1coAFen4pJG6efXuyeX4Ik+J8EzJxMHAzaXqFJDQcM5nMR7e6e/ipWYrjeM8vaPT4RAiGJi5wWwgQjBXdzVXMRW1WXoIRHjj9EGk2UICPmfg5HBwv2sl85v+tZtVNmN4DgqkzR4ZADSd74mI6ORy6JianT4qN01nB01ztnseQEXEAIDAbuKqhIQQFEzqxDETKOJc9EZuYGbMxHjANh2iggAIRBjhAFsmRAC7oSgBugkBmhM5mnviYG4uQhwJwIDmIB0xo8uc33JDYkSi2UI+85hnLW0uVicM5obIiGQuHJKKEJGazjWt5DNd293UTMGdkGa0oGqdPwfnqnslN5s3C2gWSwAA0EEdMSTU/f7O8fHVxZMcYRn9Kdiw7Z+/uDp55VYRQQ6ABK4qOtM0CcndfeYWhDhX1s0UfFUFQGD22b00k4XMwelgPgJARNXKHJDY9MAfm+Wh+ZJBxAfINM53a0UwAkYmOLR3zSIiAHigmRSpRDzfvRARidULH4Q2QgA1nTHu7gAuUocY4qEaD8nUOUQznxORqoJEzNFMCdld4bumIWCa2zCUeLYegTuKzRtLyEzgxogG5A7M0VwI0TC4CgQC91kmMlUKAVzNjAOp65+Qitr2dD8NXZNNsQwbxkxgBpaTanUAFKn7/jKmrJ5DOF0s7uLuiYIGdspUKiRzd2ZX18KM5ogE0Z2wLqPcOYqXAyhrgLTIbb+3/QX/0l/7j48bNOk7DuC0ueodnQCX3WprDujkZNXbNl1cXlatpnR+dtuQmm7puyJVjxbdWOTZw/e1jCeLdpmaNtG6id1RdJ6QpGstN+2715tdXybwy+s+Uzw76S6fPD1rTrf9JsVmo32/1xPpYohHt443evXPfu0fLnH9Mz/1c+uzeyHpP/rv/t/77WVOGHP2QGrq0D67fn7n/KzvZd0dyzh1yVCKG5pC0zRYi5AH8sB0+fTq2eLB+avnw37agy5XLaO+ePz+7ftvqHEZ++/76CcFcv/gXbZpSXB+fnx2loepDzFV3QtMi2WadtbveXvltSSxyFTWrS84BkhqAd0mm1TdLTRtkmEfITQU677+wN1P/+DrH988fCiLhJzOzm+vV6sXT56yp5NFevbww5Ojk0W7UJfl0fL5o4dNDKvT8w8/eH/dHK265vr5izLsMNKf/qEfc0MNst/sQmpTF/tpQ8whpWkatYiZuomKjLUKBgVAjKIGSG6GhFH13vnZwwff+Ef/8P/6N//G3/393/7t26uQQplgP237fHZPpt3VJXRd3FztDYjBMjulpCpt0yiyOud2Vey6jgNROF60tV5uH3+xuf0p43NvspjFQODAHGekW+CEbO7qgMnZ1AI0KUQjKGWk5F2bFstDMv90fRQwDv1VbvpPvbokht/7nXeMTj/32+9un12Ghj7z5vdfP9ksA330hz9+efmibWM+ihukZPrVf/e7b7/9HnZ3Okh1mNanJ0en/Ve+/Lu4Xr1yfnRVh2WD41XtiyxW7bPHVz/32R98hz/41qMPb9+Ju+f7i69t1kMXAZCokilqwUIY59tRdCyhfZJX/emxrxroey0aU8xNSxQc5fyTPzA8e+E6WvBuvZTduyu8tOnSmpRWi2E/TM+fMmKdFJfH7cl9z63JsLy1GHcvjOFKPB2fYrdMqzWFiGTEVn1iAIy5i4186Z9333n3zXSE1QWNIiGAiqNDCBQp6DgxEsbgIVDRSaQNCWNMp8e0zvvL65PbR1rUkYv79TTE1Uqr1UlTTdOwMa/dyXLnY46LvWwQPATuOJBLJM6rW2O/XZ8dTapgtl6vnz25Pr51/PTdixzTslvLOBF4SKnIkBpetWnTK88UQHKVutttE6D1Q30kligqGGhqI6iAG7vL5G1gkApEUgViQIfITMgOIlqsToy5bPfUcu5SGaaY2r54FQukcRly09Sxim1MJ5GyqQXZweJxs9o/erx59jQcLxf3T3Z12zZrruU7T75Nizhoenrx/NbZchqkbZLYNAzXIeeQybkgEFPQOl1s91Ft6vvjW6vd5dUw+emdZhj70+VRkfHFi+daPS8CEz29eL/rqM2cc1tEh/4iw1h1vHfr9NHDBxibHNmLCtdh0y/b9GS3/aC/liqjHLx1KkpLAqRaBAFd3UwPuOSD32fmEgIQIYKITEVCiA6o6okbJCAPTU5j2Tn60A8xJwg2DWPbRmVpuraOpipWXLCIt90iEohK5ZAA3MzHUqqYiyIyOiKgqSHifCfuS78ILSJW0bFOSBRCCARio9VqZmKjSk0htyEHSmAuKsCopqpCBHONsWg1qU3MuetkFA6haXgcSiCoKOAQCHLTMcSr3USm46gT2DIF4ISkIRuNqzc+8Zlf+b1fa9jrNAarTCiKMSQAyyFVN+Yup2WtfnaST24lSNP2yaUM/ViqYJ0RkZEJTOdCFHepalIFXTjxbjeA+GunZ7fWWG1wMad4tXlRtVKTGJEh6jQaEDMVGZAAKJhoio25IhgxFx0BiRADBgNgCoRmTNUGd8/cASZwTJz6co2RDLX6HrE57KU7MoTMTZlE1Vl0Gnt3LDgGShANwFWV3JlMRRhTplxsYorqCsDAaIZVq0mNMagZgAZmgPmp+gZkSoiA6MjIYNWdEmfRGjmYqoowxsVivdm9AJWUuIz7IXTdojE1GSsTT2XAkDruvIq6xRQ45mG03Yv9x+9+6gc/xpf947GMYdkuzhfiCA2dnN+6vrxqj2IIN36KaQD1ETSsT6e0Wh6tE2ToN9wyDBuXAkCJGtiVz37yp376z/4HKazHq2ufQIbtsNkxhKZdnr9xb3HWTWqRo6vsd9dAGLjJXSbCKpOBTmMF8RBZDcAICFUqshNCoLC9uorUNasjQVMRUiVyk2lzdblsTxfr46FsSz9ILdg0bbs287H23aKNGGUSdwfdO0/TfhtySimOZYtMq/VRCmF3/TzmpjhUxdpvaSqLRdJx89Yf/ubv/eZvhLi89+YnZITd5dA0UXC8ePSMBHJ7dHr2ke20W969l5r1brNHbKwOiBDaztskMVSE7bMX02WP12MKi9CwjqJ1MJqK9O89fjSfxUSodrAQ0vfAZJHQ/aaH2NzciIkBiXDZZNXKCBQI0OcWIYhayq4Nx+Vqs1h3IcTtuIk5RkyiDmoBomo/TlcQY2UAsAiGAB9944eunr1rj/b373O3yJQaQPX95s/9qR/48Grz9a98E5miOx8SLKwAYuoq5Mwx7zc1VwgcUITNKAax6omQmQJVg6xkWy8v9vtnpb8awJBiigviQAA6bqdxL0WsYt+dtLBIjJwyw1SCeqn73K0DIVEMTSuTDRfX26cvpGjbJA4oqsXUmdHQHMQFKEZgYqYALnXqpwipThMfdevTk9gm6ffg6AGIgEW9SrkcTCEi1TIUmMZVMKK2jTeqy6FKmZCb0AI2xo2JE2qxbfUCnt0QPOCBTURoZo7u7D7jVub+IeYQgUStEGcKTc5HKa8jN0yBTNF1PvxzFdBLyecg7sBLNcNuvD7uLwUedwS0m6zZ95iJ/EZm+G4M7eBPgRs+FhwkpT9BIzp43fylJvUyBTR/VUdQN8CgQ//Pf/WXP3zrq85aS91vBgYMlDZXO2C6deuky9yk0LQZCWepqKqSEc6YZ4rgxIjkRMAO4KYxpDlHUstodexi4yKxWUDQjjsErHMtuoMYtk2rirupdutX4uIYQkghhUCR00zESaklZnHhEN0dPTCjmropIbIzR2ZEg6oqbhAoELM7+yx4AOWcylQoxY9++jPvfFnef/rwY3ePYyARSYmZSKoFR8KwXDTb4XHgtQONY13GtlvErWpoFlJtu90vmiDTuOCaYNtfT2NeS0h0iBIazt4Ns/kddrwZc+fqKKLZzTIXcBnawX8Gc27NEWhmR5moqtRaRMQOPwQBEuIMQMMDE2t2p8GMxzaabW+A5jdL6aVUNH8RQAAQnfdRDuoPEQLMwbXDZ4sqACKygyEQHrZJUOf2WAzmOusaakIHVDuZg1sFQCCev/is031PPNLVzNyYAs5xSwAEw9m1BLOgaXPkzb7byBaRDmflLH7NLjw7GJgI3NzFXUF1Lrc1UMbooAftjA7KqbkyMSC42ayumCnMqxr8sHoBHdhM3ZQZAecsKZg70Nx6h2rqrnSg8/ih6g4MzMyUGNygauVM5cX29mL1jqeM4d7R8uHee/NnL64++dodgEqHinpgZnR1RNHZYAVIBIccnyIhIrkqzMD8A5lIeC7tAEFgRHTX+Yiaq6uaWmDSQ1kYziSmmzMCTJV5bkZDQDQVANL5yhRoZoEDkdSCMxWc5mZYmdOUB2SSFqQ5kozuzkyq6DM0C+ZYnNtcfGZKREgI6mbKFM0cyFRrYJ6DcrUU4gj2XbOWuTs6EOAs+Jsd0F6zpQ3UzRUqUpjpX0AMSFWqA6K5mhxOHaQ/IRUhUW7bGHjyKZRuUZfEqd9clvEKDTg0DjRo72iRmdgBtqJXghv0nlATA7C6u7iJTwZkTpSMbTxuMLKmhrHaZtNHXo09X7zgX/rFv/GZ7/+B3eX7x6tufXR2dXnVLDqvmmLyqpkiMO33GxPbJzP3xFmLaFERjRS4Wgige9RipJ4o5hzbyBNNMUqbVynRixeXjx9dVx4fX1zvarl8tL19emrjtJ+uPvbx8xcvLhn91Ttn7z55emt9Hpi2+9GolJEy6vd99vt/9q/9Yj8Jsz/5zjc///l/txv7qUruuuv93hRPutNtX5EoNXHyycBO16utelG42OwXKahq6YfJ9ndv337y4ZOT9eln/9QPfuGP/2g36Wvnt64vnl4+iyGvluvFR7/vU9ejhhYpbq6fvHPveD1Om4xdP8i4LzGQSd1cq/bdtI9TdSKAMLlCWrQKYhZQR7PiWsF83E/Msd/DOtPa889/5kdevPjw3mv37929u58mGerT7dMcQkeNQj09Wh2v15dXV4j63ncuQqE7d86vtvuz02NTut5d7re75WLt4FVtLBOMFSGM/QQ4qdg4FbDaNo2bX13tVt1yV6uAji4YyBHdJIY2EJprQahF1+dnT54++JVf/vt/7a/+9ccPvvOFf/1vj+99HM73bdO+860HBGM4Pq2suU1pleumr4NpkOboGCqWzVU0sHnbVGDYT4jOouOzr4U7jYS7DAExmJpKpUDmFYA4ELoTYM5RHbxZXNuEHpqui11KcZ5WAACuxxJVz++eEcb/5z/8e7/0l/7Kz//Y/T/85u6C7+2tI03b7e5FfzFOgj4GZhnHTOkoYn9Rv/XBs8DL7ZPN2ekSgr39zuN8u/uRn/qpf/prn5sQeDF84gdeffj2Zv/oehTZ7fX3fvv33crtk1145eTV+x/741/+Stau7PYcMLRJwVE9gAVAZoRl18fVdnGnUo1jaYCMETEwu9YRmaVN1Y6a7rbUUD3R5rp//rjDYDJIFTdpm6VTqIGm03NYLjnzokm7F5dbSNQ17fEtx0RhVVRyhMDIIQ5SJgg4SPn6P33lnUevhKNoKKQhMISAYhkCgqO7A6fUqYi6W5U6SZPbkOLVaNY1zaodx9od3V+c3erf/ZYN162ucreaSCKQ1fHpi4s7569E2cPufasKbuZARG0OIj3IaOok6tMOTCgGGYpZiZFhhh2yGgGFME29u3EMr37k1pe++UGT1lJngypQQKuw344Nx/6qX61TiOSqACpliqEhNVDx6HG1FJHiI2AgziOaMkS09rgrBSLFOuwhdgZgHEOKykNmCUzVi4EN4w5RY5sdvMoopih+ctal3ILpk4fPdIONlNdevfv2t75y+sq9Fx9eNEwNxgn63XjJKUAkACcwRTg/e/Xi6YupSCQK7DRNn/70Z7/8x1+nqk6s1Yf9iKle76YcGqk2aTk+O7najQaKjKQWtchunMb69gfve7V1WpwerzYXO3RbrVp3e3RxEZr2/PzY+OCtMxUzizNa1m8cRXZAQBIGMwVAMAMjAwwcAoNDzCn1dXKmQaYFdYBYwBxgBMiUte6YU9/3yn5rdfJcnuW0BA8K9t77H7SrRb8bz27fIzVzrUOBSTEwhYAcxmHwUpAb9QIqoekaiqJqQFMtgSIDpBAjg2gRhd0wcorH6yNyTJxMvFYBBAQl9qJWVV0U1GJIXW7EJALE3PRlrK6hCT4MZoIRxn2vSm1edgTSjzIOQFZpEWIaak8j/tmf/Au//Qf/3LmiQC11spKJGTxEQkfVwmHZLY8p5i7C7dvLUq73u92071HdxDSgARKjuAVCYHDxyNHB61SQXY2G6+lOt37l1nqadoQx5q6fqhoSs/n8jOYE8yMdMUcGVHdHJGS1ogbq1ORUSgmYzC2EWEUwkBswBz9QQg2Aiu+R3MCRIyOYMAIGRFFxrZp4ZhUkagcvHDOAO5KBBMqO2Ws0A4McQx6mPTmgu0qlWQfDJKqI0YGQgioQhCpGFGk22DuACWF0J8Kobjb7mWYEpkpqcnFYpvX98MZ733k7cOAUhuFytX4thTDuhxjCdf+UQ+Mxgrszjy5hnifI337y9puvv/Ltz//RYnkUGZ+/eI6cdvtpkmnVNk7fRZnuLrcRBZByhS6tYnUSxgo2KU/OCIm78bp+5tUf+oW/8je61e1pGqxY2e6un13t92VxenTrlftp3W6GSkwMzoiYqeuOSz/JNIrW+VxzxcjB1JkIiAEBUAAIIGx3+3Gs9954TYI41oCRAcd+9+zRg6PjuyG0m93VVHbrxTGlpbrqVLVMTSIvxcGYAlGQaaqDujUIjehooG1alHEqZYxNE1KrKm59CiF62n/73V//7/8bmbbH52/Go/V+1NjErlk2ibYXj1UGUO4Cb66f4tHJuGwKe/PKLaTYxABGzdHRUAcqm/2DR+WqJDqD0BpIlWmadnXYL7r07RePn232Ns8R5jOx49DN7A5E84g+77rP474fJn1yt6ISZmI3gKqHEMT92TBdbPpXz3IKbR1s7HeQiRTZgnqPMIkYJWiXx4igOKxT6hbhejBuVoT8zttfafkonJnziMil7ttj+MWf+NFnHz59drnJROpmIuAAgQDBQVXdJvTt6GKrBmwaq7tFBreYKOcoUgMQjT5ej7tLtd4VIC5DPmponQUsKNmk0su0HY2gALc5RxH2iYqYeE6LkBrIkduEdZTrfv/8ae13HEgZBKSaiBqi6TRmapvUoLHxnBgBDtS1yZX3RfLJMq0igTKAO7KBoozDXgaRqSBzYoigy5TBsez7rjmCw9xbc0zMjBQAAAgoBGCiOrKBW+/Uq70AzGQNQHCPZgsKSwdEZvSgOjEhhxxSi2iMShwRIwOSC+qEZQDptEweAlKcJ3MzI+S5pWoGzd4kI/AmfDZPdgA3RiN6CaKB2cqBPotMBynyECa6qbJ6aTc6/Dt7i17mkmZlaBahXmKDCWcfppkaUSpD/xv/7Fff+8ZXiHS/G1VqIFp07TAKNc16tTw5XuWETRNyylKVANWBmMGUAwUKgaITkhMg5pzB9eY7MCZ3AmBY5BbMOcVhUHQVMVcJsXUgNRvMBHJzdCctThyZHAITIsWQqgNSZGZgTUCJk1gFEDMEh8BkToH8rUdv3VndPlkfG3oIs06hhKxi83hZpXJgV+va5Uc+9QPvv61Prp69en7CKGUcAwYR9CgKhbhdtWfqtlyd7ftxM/VsGENepmbIJh7LBFKLeJ/b3d1utYHpBTeEyTwQsAKg6fwmA9JLK9nsM7S5cORAEb7JpqEDGDgwgLu6m4mKiNQiJg52I6ncZOqIEYPPr48HbtEcVzT/btzsoM7cyIeEpIfOcphTsqY+35gQmXBm9sxmH5/b0OeqMTMzczAgIkc77IM4manNS9TnCx8DuLsEzqplBgiBgboCQqAw+8JwbqF3P0T0AVQlhOSOYKhqxDxnwRAdYI68zTZwMlM4cKNJxRjRzA87vqo8b5DMSKS56cKd8FCLxhxnwriZzO4VVZnHqsObNRtDHLVW5DDXfB0wBQeHkRF/1wZIxK4OoIyBkWEuZwckREY0dFNInGOh28dnVnnq4SMnt7/06EWI8cvvvPdTP/T9cyfmASDtZubEs28Hvsuucpur5RkZCOZaObzphlOZj/KhxG3GOM6uM1XhmRqI4eAkVJ1TqMzB5iXqwERz5whSmC8z6IDEohXADZUwEs4fyswvnxOCJpU4IoabyxciI5jNObwZ5e4E7oYzPwgADJyRMAKSqSE4uM3MDXBQUUR2MyJGCIigUgEwMCGFeV2bKmBQVUKYfwomZiY3ByRVRQBAY+L53SMi95uL3vdKRUSBSadpQvUcOszHfb8NZFO5BKtuRcpUSYCjoTgNWkYv+zYxhrzvJ0MDFDNxIGIMTEU5oJ52nHyMOb3o9wrh6Phkf027S/jUR/9nf+qTn9pdP10sOsd4vR9S2+bUKYmBjjqOYw25KUBdm6tDs1g4mAzuiGXsObZV6+27d0jjg3e/sT7qht0+TDqUfddwbrHfX+0u625fBo0Xff/h5YbZITAQ3T279+HTRy+u96NoZn//2QdH3b1p4KFoiI1HxhyeP3j4nSdPyjjKJBDjR974vre+/vbQa6lD6UdX2JdhsV6x0GR6vbtedmkayrI7XrrSwt578OHi9vGiCcvQjENhN2i6R0+fHN86vf/aK9/58PGDF5e3jhehIdV+Kkpc7t07Kb4fq9PZESW/fbJ8/+FwNVnsluMg4+jgYbOpKAhWCR0cDKCoFBhCpEkQOSVc9tcXGKJbi3EppfmhT37/N9966/SsW946ntQJQ9PEcV9Wpyerk9UwXHuNLy6viCHHrEqLxXIvuFyftKl58PjBcpFX61VMzX6/lyplVGZMORN5CLDfb0IkZG7Sqi/b07PVdrsZfPSGGDISEEaCaMoFJkQKRMZIoMdnx+89+vZv/Jt/9bO/8L/u3m1t883H3/5Xx3l65Y2jYWMYSabhw0f7W3J7IX52fLyT5yB1HKTWcb8ZOWVObRPP+mmPgQj68eLB8uwVqbedI5LD/MWIZwsrMoK7ynxjCc6paU9RJecmdGv3aDcp+LGmq2fCW6EMvPzUN98fzhd5GPYjcGV+54PHt25Rw5FjM1aqw/j0erPWheytqF4U/djde5+8R299++u/8At/5u3n/q1HT37tX/6b5dFiuNheX08/8ZOf+s47v8tt+/jpjo9Wbz14vh36H/v3Xzn57F/4v/wX/99PhtvRq6JDDJUCcnfUdtcXY2Lqddq0dbx1JCkEV6wlMNai4EGqqSsG3O2GUo00GgRRASzW5kRHFqTIAIYpraeR8mt3+nu3+2ErWmupfrSm0+WoViBEjsBGIEBQVd0MQjDJ8sHXXn109ToeYQVldYAiZiAJMM4xfcSqroaOJFYNIBPX6sVNbp/x66/aqrl1dhtDmkybVRsQcSqaaupyCrGA89H9mtrS98g8yrZZNpC6KsSxAasYAgRdpJXonmNWyE2bqriLd4s1cDSbQs4B1vth1+U8DHLchmXmcZrrhXy+26koBi4iIjb2VdmPzjtkDRaaECYwJBUvY+mBYdXkcQQd+0p6cv/29eZ6VuJR1V29KiGXQYbdkMGq2NTX5tYpMsBQxRXRVK2aJGZ0u766ABV2Bm4Ugwt+4733HPjBwwdlwDY1u/1lkR1wIW8it01IMk5ZyHfXVq5NwcEFfbVaPH32SGppCYZn1wj4wcXz2/cWi8Xy4YPdyem6bVuKebFYprh49vTRapnEpqvnj5rcANGgaRWaqa9Shqnv7986u9xuT26lkfnP/+hn/vAPvnR4eDbTUjXwAe9AKCopZZgf3NHxELFGohBjgxi4lmEcTIep7lJqTZOZTjao7Np20UQCr2nZ+rAJ3BLZNI5tbByVY5hk3G3HDy93Pkm7OCP1SeskZXEaMYBWY7LACClSCOCdMxERqHCMpZQYY0DSWt2wmJq7IcYm59gyss8NxoQUYRj2pADoWqu6xxBMNcdGXQBpKnXuG+2HMTO5KGfuutWLJ88oejXr1ovLixccg2ktw77JkBR/+LN/7mvf/vqoe6TSJagjQGiRg9YJwJE9ha5bHIl4tw6nJ4uQ9OpqM2134DYVQWIDosAYotUirkWLqhy6K6rkmK820zod316uZKwmlnLa7PaG5gEYqQmpTiMRG4Z5r1StOhIgICp4nfP77IgaGmLVuUQGmMiczI0BCEN1cHUDYQKiDM5owjGDq+uExHPR8KQDegSiagOGgOQEUbQm7kSqgyKxmitIcOToriBeOKe5sMrMHJU5qlXDmkMnRZlCCElE1JyIiZPZHPKpgEaE6sXQHBwJ1BWdyOAon54d3dr110UlNLEf9oumy4HNtQtrGYdeDcC79bEoFQFmDgyT4YPL59//mc++++23nRzIX2w2HCfw2WPguT08GKlpCuigw+ZidZ2uy46GsQWTInWyJjVe7N76tb/6F//T4+N7/X6HiuN2u316ubvark7P27OjvF6UUmNq21UnRUIOTFT6XZn6sd/HpuUU61gi5yISOdjccAIG5gRY6lBkuvPRVycvXishmZmM42a7PT6+36zWkwxapVmsnLnWYq4cmEMXEpp5HQbkyhynsm/b2xFSNYlNm3lJGEuVyHEc91r6kJxMj5r4nc//3m/+6j/Iq6O7n/hTkzA4VRVKq1p0vNr6BG1Y8VEryLGJaXVMy1MPFtsuRmKC3dN+/7iWfohQUXPbdKahTBORytQjQsg5d/H5+GIKCgYOfsBU0/zAiohoPotIYId69BvzIiKAzc3HiBQ4gIGoAGGIcY8GpwtpQXMAExCIHamL6gRp5MYQHSNgYC2FKBIlIwiOdfS4WH/8Uz/6/gd/lLujk9OQcsZBfRhvxfZv/cX/xf/9H/+Klqt5bBWyamqOGJgJnFDHaRgHoQQMGoM7mUJErIBtd1okTD1YAVGjiAxEbZDIJqaEqYqMFVTYBI2nyzE0eZxcsVBqc0hdezwSxG5dPPB+V6+vvOwoOnOo7sNUSq2M3hIHYkIPToaowM7BwHJg92mQERYNnywHF6iSmUUBaxn7vo5THdwx5DbHzC0TBATGtFzpdAgdqKqgEFEIxETMwZkwZuAmUOc8TrIFKuDVtRcTN2Y8Ij1GTzc1XsEO3UyaYmaeDQ9S6rYfMSUyTbV2IklrdLO5Ng3gcA0jPLRbzbemmdh6YzS6UXrmbM9N7/2fcBLBwRb03TrzlzqRvwyqzR8dNCaEeZ/f8Eacmk1Hc0G5A7gZIEOZfvPXf+W9r38Jg01TrWMBJkQcauFEt46Ojo6PiJGZQoyOGAOZuU7VEZkot4tAwR2nWtrYOEKV2kQqdVKrIbIDALEzEwcEG4fRVREM3eZ53sGa5ZLyKsZj5FVICQlTjDFlNatmSkDBnRRBmJLZREiH1jFFIGZgBPvonU+YlCoFKMzug4NbiyFyUFAEQqQQcq11tVi+9rHv+/Ar2w8fv3j17iqCO9bcZTU1qwSkBtVK3UnOy0CZSx2nfdlrSi152u/7NsRIS9Pa5YpW9jpM2lZkjkQHz8fBZzGrDzc0oQN56tBR5qau8LLbbr5QuCH47JFRVRUV1Zsfh+YydJwVKDikyw7moQMMeE6dzYsIbxwkL5fZ7CsxmMkujC9tajPe6LupRkTAmQZNs8IIB+DVzJ+22annYHN3m5moymx+UZM5QAeAsxBgbmpzKsIRwsFrSYA+P40dNs4RXF0RE4C5iwMwB1VHcgAwMEBEYPAZDsSAgDyzlkVNyBMxgYOB40E1m7Hfs3xqdpPIixxFdOZYA4A5gJi5znQzJEb02Ulj87MXESI6ITKbqalwYDxgmdxAYa4PQ0cEtYqQnEBVqwMiJkTyvFocna/zMvllE7/w1rulVAcFVGZ2dHVHJHVDJKKgOvkhjjcbrMwPT4GHVrvZIsREps4czOrMK3NAVWMmYp779ojYVM0tMLvM0cxZnZwZ1uwO7sYcEMFEHGbo1k2IdY6qza1ss4VWFZGR2cFv9CkzcDSYmdghRiIuZZwflgIxEpMjAVZVBGTkWntiJGIkmiNp8+KMIYrqnLkzmEvM2E21FiLgMH9RnPdhXCuAIbK6zto8MM0JO9EaYvZDw6C9lFAPUtE0SuBA1b1KlzuoEI/OxrHS8LRMF7tpRwwIsdTEmRTHaeqBKFAyQkpL08HqFJDJ1RUYOSDHEIjVHCikGHx7PSWAMoTzo4/+53/z724uv9pEOj0+vbq8SG27H7ctHAPV1LTj9RQwNKkxq7WK79VTTG0uw27qp9XxarVItRt3/dMXL7ZN11pozu6cwLhPUNZLRNp/+N63z89fg9h++OzB490TiH598eL27bvT1Yb2HrmLsTTr1ebimo1LWYzDLufQ5VVvWxxrjPmDdz4MHjiBgt977WM//hP/3r/6vd8cXEuQOk3AlhOZ2linhpG5y02qA0GIx6fr08trq7wp9WzZkLfTzlOEJ0+uSx3T6VHOvBuHsMPGN/fPj66fX8CGl2m1TItByuuf+OROHu2GfjNNewsCeFlVPFwPtRZhRUDBQF2z7GWPxuRVdLtYHBdRr0ihcwxuUcHeuH0/TXV9sjg+Wt05WY+jhZRSoK5dEPjV5aW7tNysFqxYmza77fr9rusWi8X50+cPQD1CrKEOspumMccuJTM/cPRdtaqcHd810+2+315fn56vtjYO7ELgFNycQp6vq4EcGRJQKLXKCDA1i/DFP/jt4498+qf/lz/95N2PN0f33vnXf/+NT78C9eLJkxcKITXxg3c/PO1aYOMUysUWOTiCIbqZl7LbPxdwG52jn53dkt3FsqWBiEGQIrq7TgiRiVzKHO01Jq9WZVKQRYw55bQ4dm+a9nBPOGkDTPW1Nz/61uOHo/Dn392d00WKcVd6Zv/km3fSIlORJ4+vL8YpJlienCybjlJ99PyCUttfX62W8Bd/5uN3z/CPPnyaWJZptVzxsye7Lp3+3m9/wRWXi8UnP3v3O9/8zusfPTIUONLP/YPfuSfHYVMAtcl5cmDBIBON9Siv09nx+/2Hm9S2jCQ1MzNGRjY0BAbTnDogQ5yOT5rN46eJjkwk5nz0sR/2HiJIefE09f203YyF6nRSyj431HEmdYXk5oE8xSaiq1toEhqo20g8Dd703zx//4/epGNGswAUWHT2LxYD0ipASBwVZCbjInlDERJyomuM8Qe//yM/92MPvvCFZWpit5jUMLJPZv1EC2ACNYtt04Q41emoWUl3ZGgpkhMVU1eJMdTJF20c5aptbutUSrWxOuaT5dnx9ePnkWsdh7Q63m97UAZM07Rfrpvjo9XDJzuKDROqFLXq6LltYoMhQCSYnIZ9DQ2EmMZSgDhQEgUXVLXQxbbjsZj3ihf7Dvzk7PaT/VNMbbtcmBh6QAUtfZ33JFIex9K1aS87wICVVFGNJsYEWK2GJuzHMeQU1V673zz88GlD4ErU5Dv3X+8317v95mixFoDdfoTkhE3O7fXlMzRr2q5pzt558J37r7x5ff3hapl1t9N+8piadjH1MO6GdrG6dXI0Cj57ftFx3u/99Oh2G+r1/smt8/XmcqgVt5Pej8uYpqZJJDRN+p2nj3eZL/dXv/Fbz1+9d/dmi8jcLEau1cB9fnRHcAS6sfnPMalDhe/hgcbNFRJnA0IHqw6gAbNVgyr9eH16+1XmaCamXqa9AzYpTPVqkq49Xk0j1SrL5k6Tt08fPlN0j5EcmzY4kwECKRKCMxAUE9SqUpvUpRhUNEYCgElUXWPOTWgyNdMwJSQRdQC1gg6MOJbJ6tS1i8CxqolMooIcmGOdhjJOFIEZR3CrMvX7yBhkKvsSssQu1MEJUmqiXU8/8qf/8tPt83efvo9eyzgpElBkCsiELqNJ5Mx5iaFrKdw7v01Unjz7cLfZZoJpLAaOigRwMChrdUoqSoTjMGAkZ7663ERvXzk/a9G1TCHGoUr1MXBgRADXMjEwUuPuBNUcgGi+yWeMAExuxEGATYEREELg1gnUxpke5FYJiTE4IrIboAuAAgIaTOAIRiG05sUQzJ0pBMpuAuZOhhimOjQYHKcYO9E+QJv5yLQCcIyoMhKQUjGZYmhQPQSWCuyBgNFNtXBwpBnAGgFmoqcbGISIjiaFLKQcikxgbiKiShlPz+/lXby8uuq6DhjNrdQaAoScVOtkJVJQKypGGPsydF03FAkRT/Kt4+P777z7VeDQdjkEmm3zIZKUw5CMZuPQU8KcG7v+kBFtmopZnQgtF8V12/7SX/3PPvrJz9Q6oLns9lcPX+yvtykvVsdHJ/dOx6msli0a4Fh1N3rDYxmlFvPAzZGI9GPfpFjHiUOeJ1czQQCkqGaX1y9ef/OTFsmtMARSMBk3203CZduuRtlt95ucFpFzGUezElPTdgsCH4edK9TB8nLthqlJwKn0k1SNYZUCVdm3LUtxTo2N+w4p+viv/9v/19tf+qP7r3+kpM44NW3XrpaTYsrddHE5Xl7ClbbLtjfNx6vm9t326H5c3uOwTdaP17sX/WgTX29qC8tmsYCg+35oEnPEYT+U/T5yUKh9kidPn7mBq80BGQ5IAEBBDkPdTXuLOyKqyw1E2JgpxRgYQa2KMFCYtSW1CXWr+1OulqRtVyITLwu6AHhkx+QOVlycUuwStQtQoKGLqyD9cL3ZjbE5Ofv4hw/ezXTenDdIexUanl28eve1//Sv/MJ/84//oRkyG6jMRcICEmMiRi2OGBQghqaKgGgMnGIkDTKoA00eMISw5nE/tG3CRXR2K6W6yXYcn11FBXcxwNSszX2cPHAIuW3bhSBBm0vOXJSs1rIbS0/MwlwRi9hYFcmRsWEml+qThSa2q1ncQAbGIGDh/FZ7fmomYj5VmaTaMA79SICA0dvG1hEX2as4ejHFImSHs8DEDI3IHPQQoSIycEgd+JqgIioSiAiSm6q5G+0ZOIfjORYHJgBVrICDzgEMcpVtLea0zdk2+5E7MKxmEjjNA7aBIQCFkEJMMTHzDDg7tF8dOo9upvWD8YQOvVUO38Oznqd5pO/qQzfk6wN55iZ7hjeGo0OP1Xd/MzOP3ee2aSGMJvpbn/v1t7/8eQDZ9YOIMDI4VVEO8f7du4u2iZGIKHctgZsqE1apHMwdOTAhIwdzaVOrimPpU8jIGKEyUkAE4E0ZwCkg1lrMiqOmGBm4KEy1xm6VVrcrtMxt4IRAKSYkBHIiJuAUU4xRYWxSZgjmDkAUyG/cOO4KjAiYQjq8mxQIlMgB0ECRAhmp6RwRNnMkXB+d3vno97/zjS/w5XD/dO1ewBUQmm5pYhGVMexkSjESh9BQqGKlOMXU5UFp8pqJQ+Jx2LUt3oKwTzBi4IMTBJx8BujMrqJDPvWlauPgJgfO8iG9deMgI5pTdXOllMxakVtEckTAADP+GemQKfTvykFzJ5jfoLFegtMPZ4E64EG6mnHHBIQEM6bH3eZUm4Hjjf/opUVl1jZniDUdwlbARKoKMGdsnegQ6Tc3JpoRSMBghxCuIyEBOSBRICDREpgcUG0GLVugjKRmQsRzjZgbILCJhJAQ0UDnc+HQaIZsZo6Gh9AFaJ33VY0DweGs0oOsimoqhBEARBQJVRRn4VgVCQhmmjKbg8pEMeGM/yCaGy0R0dXcZFZhzG+UOncHNZUQ0o3pBsCBKCBaSJwCZg79brh11DYJ8tHROw8fmtTQoNpBJGR31YNiaKiHnYWDpwkR0U0JgYnVDNyI2QFMX0ZQZ9ELAJE4APoBn6UGHG5sX4SIDqYqhBQ4qikeXn7eOHUkIiAHZ5o7BgqCUWAmdgckUKk+Z1vB3dFdcA5GmBERIDqwzRIURUAjJDNBZAADpPllkZBCmAv1wMzNEAmZCU2kOsCs4jExEUudwDGENDsl5xQnuJmMDghOIjq3qs0/pkoB4BASOJioz9/UjZp+kIpSwKkfhu0G3MexxryM7XkarVRVqZFqE7FAASIO6GRMnOLKjYbpGoMGLMIAZqRezcG0I2qYUwqE/3+u/izYtiw7z8NGM+dczW5Pd9u8mVmZWX1WoaqAKqIh0ZESKdCkCNK0KMpBSyE7JIbtCPtFjrDFcEgRDvvBL5bMsPlgh8Mm1bARLJICCKIrAAVWA6BQXVZfWdnc/p5uN6uZzRjDD2vfBOT7kJGZ596799lrrnXm/Mf/fz/EqMy2rOfjVVjS0V/5hV8Yx7eahcesz66eOUIQXMzPLrfXq1nIMbLC8ero2ebCew+eu3FvaVRVT+741q2L64u9St9tQ40u46KplqdnTx88Io2uDWMZ33r3Oy/de+n8cv/uo9355lyDxn64eXLCmp0a9Zu7tz761vUDLdl5P6tCw+hoVlT3Y9fFnhRaPz9eHMkwKAk2bbM+euX9r3/lW195/PhBRVBMAlNtGT1hYTLOyQIgAew2G8T21vo4WRnGYTTN5p9dbG+ftVyjWbFxvHuyur7yl1d73e7mcz9f12MvdcMnq2Y3bDHPpJ9d7cahE4GIWCrWkiBnYGISM5ysOQBGm7RrfGlClaPPWiyLY586a2p/d33yC5/5xMWDN+c31gRmUS3mUIflfLa92peYLy6ub92+GfwsdltwtttuCWC5mjvvd8O5Ec5mZ0njptuGeiYm+7FDZDXLJe+GrvahqVfbsc950Kwf+fCPfP3tLxfPzjGoATkGNiACLFA8N4imiGycVY3AV3x0wn/4W/+Ipfv4p/885U+uYf/9r/3m2O/qJl9fnd998dXr6zKWoU9NG8gxAMbZrKpmzbCPKalSqWfrKNHKCDlpfheaH9ryZVEEBlLJJWYgRwEZIWdkp5KppAo0MbV1FcIsF1c386o93Aypf/jK7OSG5O+Mmou6mo/OTrrvXR0dr3/kk6ff+M4P23m73+y7uK3Iu4qP2kUFcrSsdGhY6HK3+emPv1Y7/MF3n92688r98y/LYDHxYrW6uNqVnPtd2afh5k28d5drlWHXfOkfPv7BG/4lPGkoejCLcVEt0hgJoa1nD5Xe6M/x9m3naiAPcZLIoShA1RiKClT1nMjn/n63vWj9fDm/Gbud89Jfj0PWxbxqj9dmgLFUizo1jtTYgHKxTMiKVhAIAUWKmgiBGGDlwNHi2XfCN77wITvi0cAd4P9FFUE9AQN5DjR1GANUwZeUEYkJ96n41Zpfep995GNXgo45hBqcc3VVtATEfuhCSmLZBLmiwEQGmiSqzNsqjZmpgNqYiqsYwA1DzNr7dq05z3ghOWdNo6SqmXNoJcWcDdHloUQExy7F/mQRnp1bMQBkEMSCDJjHYqLrRZWGTgzz3jsK07bIOXSOc54Oy7q/uq7nC7KyaD0WCcrxqq/QqY3Oz6OWGHvJhR3Udbvr920TtOSxK44CA3ij2lUijEhSYklpXa9d0yJRJfHRm9/qkp2+eBqTdVq5utk/ui/FohRgcIyAKCbKki35gOBwt79Yzprt5eXDB0/u3T5brBdScH68/PY7b52u1rO2cZUnIolDAzBr24snm/lx0++2ZsLeeUagdPvo1PGYxz6PY2jCvt9fDd1YPGElSN/5wePpLqirYKqliKkZ4LTvMvMIB0PKNMk1M1FTsGHoBBi0IFHdVH2OngIEEgEkV0ouOTez6tGzJ2qYizjWOtQG4IjCoj1eH91/cl6oOjubJ7m6uLja9R0wj0NXcd3Us8urXZ9LG9zY70WEK1/M5nXjkdE0p/gczSlmVjkf0FsypFgRe+diyWqKCExsYqbYNnNmb4Cp5KpqUDVKnMJrDMBKYJZK7yEMOcccwYWURK93WHvJuXa0u9r9xEd+BrD8wRt/QE1hMhNFcjYNJUUQqQpV287JXEtw42R1tArfe/PNbrcREVJAtcbXJStNeltW4oDEZppyCpWLJaoUNLeaLRYVSt+nPtUrZ6ihaUw0hCoXNcimkGU01KiRgbxvtRQyCb7JRU2SkYoVoEBTGN9iLslzAEA1BvRgoFY81wpARIIKKMy+WETyrqoZnZTkPWU1FTVQKRHBTNUwVeSnOfVEUjwUyZhIkYpbsahmKoLEqoWslBwdNGIiokgAqkWzITCHqR8EANUyEk0BJZ2A+YaoCqgHWzUpilXzpUOXpKiWVNB5UpFUUi7ZCwoyM4CgczirVkMeA0GK+uTx5Y3V3WF9/XRzn513HschMfh5tejH7j2bgXfMM3d0a+VmFCjE6+s07jGTjcCqP/9zf+7jH/5Ujn0RkJSun1xcP7uaNfPje7fWJ8ucovc+jsmSSCnMVRlTKcVXFYJ7drmLaZzN2yJahYqdLyWRQ1AFcGCw6XY37txV1pRGIiJWybHf74Oft+18P1yCl/li4ataU1GVerYE5nGMQ7dvGh/HoT06Bm7EsgtoUqiGpp25UG2uLlwwRRr3kT0uKlcN11/+F790+c7926+8vz0+5mh11XDTuLZ24LUb51jGvm9mi36MGmpoVs3RLSba3X9HrfdUwPmhl6adNctWurQdd4RaxiF213kYHDe+CoSkKu18IQSHTiNQZrZp0H6YzBPg89n7NAAGtCmapjYhJWw6FagBQlUxKlRk7HB5chzmzVhKHB5TFRMMvmmZOaXIxL4NPlTkanSB2yM2tiHHq91ut/fKYz+ujlbz9qWHTy6IwblRJc+XjeSLD91a/+LP/Ow/+u3fQYnI5EU4MBhmQDAFE4fkHKtpxYzAqmoChuSbyoXK6pBS0SGFQFBy2hRGiGNURRsLA5IHIg6O2mVFjSsqy1nD7EjFs1PEUjINsWz3qRsNGUOtCPuuH8cRiiFChkIKVIEDQ3Zl+pTMCpj5EJY3yno9ioacrRvKEIlIu8FnA+OwCNw2ilpipKyurcFXw+7CledVTVokCaKyr9ScWUZzBGCijIqWA9mk6gCCsakWZqmQPQVjAgLT0ayIliIKeQBFcii2kxIN99ebiO5WuYa+5O04enBoBlDUCqI6X9X1cjY/app57T0STx6N5zrR80MMHngcE0HmeY3an3CEHMwRf+KPTF9BeO4sOdCQ/phPg3+MxD6ElKbmK/Ba7Ld/49f/6PO/i66UlEsSMxBAAJgvFndv3TxazQE1OJ5Oj2xGZmaQS3TOVaE2mM7XqEWC86OWQI7QEXvV5L2TkorEUFUAioAOfQVBSnEEppEQF/MjqBZiraqr6+DYB+9DXRkgMDpfiRwkMMbAxsyu5JEYzQSJ0AyRiFmn9zY5GXHqh0JA4qmFkZwciFNoYAScU8xFlkcn9157/cH3vkbQ3TmZlZIcE6kQAGZ1iOuw0hRz6cH5djGPSdMYSWnWLsY0DkMfFEhJ9ptZE9NOtXnBXKMwEYCRnUOg5wmvKWw4XQZ7rwVtOiQDTKripOqhGeoUSNIy4XRwymUfxIDJhTS1wCNODVJ2SNZPGbc/Vg/fWwnTs2paDghELJLhALTG58FYeW4dQiJCBCB4HmLT52tIp/SYgaGimiFQLoWIDj33z8ntk5Kgh4wlqSkBIdHUUCZWEA8ju+m6AL7HXQIDAzqklgyAyAOAqqoJEj9f4qZSJri36lRzplPOF/F5Btts6nzDQ57OGZCZeu9VChISYSnpEBIEA4SDdYedTG+JEAFEBZGJUEtWM0Z8/rEYMU8Fu+ydqpgcGr5ySeScIRFTHIdlaEvOR8fzV148/sH3Yx7l4vL89u0jQ0OYRCtERlUlRjGZwstTAutgQTQ8UJ0OFEQAQ5VCTEWAmRmxiE67Ly1KkyebGOnQTF9yntJlCM/HomaimRDVhNCZKiEho4oClOlbnrrGSikqBcgjMgIQs4ioKTPqAVKO0+pyzIiQc0RkRENCVZger6JiIM65knsiR+Ry6sl5IAIjFRWRKZ5I7EpOh8thhjTxCWDCME3UdTBF8gAEmpGmkhOxaSKMQEhiwo6mB+Z7d8FBKlKVnMcUexeckrq60qpR06MbtzaXQ583WaNg8hAwZVHxwbOaSvEosWyZBnL7kgQEA1QMpeG8arxnKyUZoadm6GbaL37hL/2tlz7w0na3QbDUD76tZrO6DFEgt61fzc4uLi7I+MnlZVXXw+66bhsf6Ozs1qMnjxHgyfUF5DJsCwRqFnNfBYxx8+TJrGKH1cy1jx8/Xc5PNwN964dvjZ0Eb9f7rt8OizVfdZsX1sdzD3/73/+bf/ef/LePrt4+8rC/uhpEq3o1bC9jyTHnOlRc48MHb14/fXt++07sB65Ce3ryI5/6sceXjzMOyBTHOGaxlEuRgDaOAwSgqmmaFgtETbuxO10sj+Z0kaXbaylFoQydjfu8WLcnqxmobvvuwduPbp6drtcn+8ttCPUrL9zYpq2f3RuvNeZYtyiarEs2oEvB1Mw5IbJQdyWBEjkE6R3HipxKHrKZsETXuNmPvfSxuH9W1YHIv3j7rmlxWGCUROPV5SWU7APGNKR9p7m0R5UPfLSon110Aq4Msp6vYornzx4GV9XNLEJGRodYjNC0Qmb126Hfd7s7N07r0+aNp29cW3YuCJABBN8QEIohIiMXM0QTiaGpOHAchxx78zZ37ku/+csxxddf/9TP//jfKP/l/Lu//8vVMvl58+ab91fr9cmN25cX57suvnjrbDavdpv9KDtVqJCDq4lcnui3kod+S/0Xjz66HGC+T4Vs1DSoEjinriJVKT6j5LhRibNqhkSI1LZzpKD50Pdx1Cz/8//s7/7H/9v/qN8lSbqYOcxxG3sv1R995aGj8PY3v+3q5nRJp2enT949j/11zPtPffqDr947++0vvkOzdn3n+CMfOv3Dr77ztT/44idff3+p6q/80fd3MfdFj5cnu6snnq9/8kPHl937v/CvHsmuKvt2WUsFbn95wW3lgtsNO+/DYLwzf75Y7ZazxgkZoKEndmQQRwXEymXSLI4H4XGju8vl7IigrgrNfUDockXEvvaVjBvgrBhmN842Ta1i49XA4Jm9YBEG8ohqTOyd35fOuI5DqdPbsze+9kp/zJmKFXOmqAzKZkTqgE3NESlAluTJoRJjJUD7sfjVIte1HJ3ceOkujOcF8tjtQ7tkDuaZqR63j074BSI3JohagCvnCMg4NK6qLWVTdZ6oqaREVSP0417q1lhzoP5i82Q1OwLfcNWWEhdLv7nODs0HlkxmMO7GG/P5D+hJilnYuVAzoGcehqQe6kUluptjGC0E12TrouYJZsKMoWqvhz0hFUOY1/1u5ByC9/31IADtrOr2XVRzwZEIiMZh5xRkzBkS+lBXIY2dV89kHoidt5yq4LRAzkAx3zm+rZfdoqqGvaaST2/e6S4eBcw6C6WMq/miN+32e0CQmYnBsBvnCKb2+kde++pXvnbn5FaAZMRGsL/sThdnTdM0NQHLs2cPS5fa2apwmd2coVNIbnO+q49WJUuo3PpkNe53nq1dLvbdrm78Ytkmw8urnWXM6fkmmTlUFejUKkqCjOjwYOU3RHpewwE+OHIeKQbnRcomdciuCaezZv3g8ZuuIhe4L+ns5unjJw+W69sPnj5dzOZFmbSYWjKrfPPsUuf1zbOj9s7N5fnjy+uuz4jBcQD27NtFE5rzHzx8yIFramMcwLAm9uQIQDWHqt3mqMUCYnDeO4dghMpICmrkkVIpGRDGErPgPLQhcIpRQaumLqYKmrQgNtw0WfaKGsehcXPpigExt1XVKOQ0xsr72aKRcfOjH3t9edR87o0vsteYSi59E2pFqplNSsmZEbidV80qWLhz4+xsvfjud94Y48DmxlIKJDdtAhwik5VMHMiUkUzURIAbSf3lZvfi2YurxpV+V/o8mx2nHNUjezYzEADnwThJRFJQCVSB2uGvUM2SBRCYgACdGqlimHqATBhBEZRIgQyUAU0tqmWzQBiAkIlyHhGgWEYUK6KCCpRL9uSdq1WIAqokJhEDMxDpwbRAZsiGAFQBMTpvoAzBM8RcmByjgCgSeleJkUH0zo9phwBoXNR4coFPI2U1MVaDXAqgVxEBBSItUlTrUBHHQGigqtE1q77bQ1EVjUipKHRETMx+SF2fO1cHhGaAtLX00msfSW91T682GkliFEtG5n112BGRIFo1m9fzU144NxWS7EiL2T7/zKd+8n/wC3+dKxrHknf2+Af3r59dzdt2ebJul3W0YoIWkwMyRe9aZu8CpzwYl34/DF00c6hEYlPA00wlq6FToe31pq6aqpmbJQQic7GPQ99B8XVT7ceNAHisQQnECGDeNtth4ODY1zw78XXAOgJjSikXqdh5Ql+HHFPZD45s1s66NPi5XzRV21187+uf05xf+ujHr0QSUz0Li7OV+GrcROqvqOvGy0stg6Fnt1qcnUbvHl+cI1iNralz8yMT8PWylBRB0LOkwgqaTRXML+vZsco+jdlX7BxqSpgVnsvNdighn4bXSkQKNvFiVY2JpjpnIAJV52gqzyYmtglTivP57NZiFnfPzJ2goq+BFp5OGwHwXAWYARIFoBq4akxENGkp5HhxPKcSDToj2AGOfPLOm5uk2/e/0M5X9eX5hj3bcP3zP/6jD54+/dK3v+aNkUxKIQNkB4pS1JyiSUxjCN4AXVOjD9g07mQtnguh7kvZZl80DimlEjyDWukjckB2iS0sq9nNFTmXFAqQNJRRmir4ipkJVWEcdlfbPBZ2LKAyDi4lTuIgIIDPrErqvbkavQM2T6zjmJRHQFu0zL7EpP2I++gMgRjM1MxXja8rAdAoJsW0jFmiKqZM8h63hQBMpKhEZG82oBGbicZSNmSjlMjZUOuiGYEBwVlDuPDUEgEDsK9yHo0UoDcxpmBRHQsglBxH3HZ7p5rSeK1QQTaCQlTMEiLW9aKqj5bru+vVbVwcBVcB0QSaVdCJkDIZlZAm7AtNkBQ4OBkmYQHfO/P8SZ3ovSDSdMzGQ2m3PdeJptyQIdDUcm1SmMI4yuc/97tf+t3PepJuN0ouqMiO9/vh6Gj9vhdfWMxrx8hAddtKUUIgwKyYNId6jmDEzgxNGc0R1ilF0xiIyZUx94F8yqlIMUMfZiKxyBg8axq9AySXVNEvpF4rNYDeVZWv6hBqZmbHE7h3SlIz8CG1KqAmzGHC94KhYzdRd0yVA5UsauiIwcAR5TICKjOZZjACxOeHW3YBwTFrfevuixLL1aPvNq47WTVaUlYl4FQ0OEIDxABW+mEEsqw4b6p+t3HzZdU0Jebt1f54VaPT3dWzPO4H2C9f/kiZMXuniNNlI2R7rug9dw/BIZRqgEDPcVQ0OX3MQKe8loiUolP+QRXMEIymJYDP44lwMJSY2kR8NsSJ1Wwmk9X3ORUd4GBOMVNFk4N8ghOIX/CwyOggZB0ULSac7DPwXE6yCVV0WFVMCKQCYKRlEssPktKhl0uBkRBITGySzmFCfgNNwB0zIjflxdQyszOQ51qoAgIDmVkpwuwmns4kjU6qPRhMLWWTjQcMmByYmZYpe4VTempieBmC6bSwYILpmCERAoGZqDKxmSETApnogRxPzMQAND3jCR1MLWtgaEhGajplDg1lSkxaUZ7GtQBIGOpAVLhuKsYf+/Br//SLXwjFnl48efHeWoqoTnRsmt4bk8+5mImCIuFU4qaiZoZAWgyJTMokKzrvwaxIUTA7+BEnwOMhGWYiKogIpgpEMHGywOl0OcSQEInpOafJJhy6IfyJ9WAqiOgONisFE0Ojgz+IVGWCJU3ghMk/R8iEJJIPdS5ETFSKAlhJBZGsFGUjdgg8rShVcc6DoZmoKCIhmpqy84ZkhloKMCFMuG8yAyIWm/KDyYiYGQAQXZECZKBCDpG4lPwcVfQe1hokePJH6+1+s1wdhdlxVM+WCriqrut6FlPyVTVrK+lGz00eM4pTS86VWVtS6aiIq8DQhkGrCrHFRJlAqwaxYMXLEm9/9NN/5sWX71xvN1QycfC+Xs/O2sV8K09CTchuu780HE2pZHGhHJ0ud9stqm53V0Cjc27c7W+e3uybqCT73WgiswDDZlO3TfD47OGbVxdv12eLdj579+Lpullvdr1lnNdHw67M6sXpzYVun7z53V/9d/7yX/2P/0//h2aJuYCAbPN2GAdPfLY+7YduVLRZ9ezy8fFL91TJoLjKvfrhD/5Ud/0vP/vfuao+v9i2riECxUkqpjEPzaxioliMwkyT7ccCmpxzr3/gpXefPDLyuGqvnj3bX+6X7SKE9oMvfvzNt97cDro4dcsbR/t9B5B317vsZ5ZPaJSx7JPEsk82so+spQArMXoyMErig1lRGbJYdwXkGHzNM6oXn3ztYy3bvh9P5ifrk7NuiG3rwyIEV6UYi4yLma+bNlthRBecmKDgdjd2/e5ovfa+3vfXKcXgK0E+31+zYxErOcc01FVom+Z610WV47OT2ap9+8mjCBpmMwN04Bg9Oo/ggBXQyJRUFNVTrVkVc1V7Zre73vRjWZ8uH775tYfff2P3C7/4o3/p3/zAJz7+y//4/7Ufv2fG7771AMKsGL7z7uPLq83tk9tnq0XTpu0YRRSSgWoRk2IlKaiz/bvl3d/j1evJ1pZ2ZnsXqB+0gGdF9JVSVTQ7hw5DHDv2MymjdzU+T2P+e3/7P/hP/rP/+7cu4tF66URq9E+v97qyc5UzqYZc760MV72fheo65qTLqr5795XHj672ec9t2CT85S9860ruMNunX3/fk/PNajY/PVo8fPthtvr6ughuz265z332h+88PLnanZ4c3a7ClasvdvsevQvzVgQGB+16vfW+Iy1tMw8usFkqWJCztJ7GYd/MX5ofv7TpH2SLK4olbQzyvGIfo1286b3fXj2bnd2qMl4/fmt5Vs9Wp7w4Ml8oXjahUh3RnbiqKbBVj8ymOhaFYlzqGiS48wezd/7gA2XZKkOFWcQcGAAZBCAzQkRjzFkAiQFBJuoaYEXiHC798s767afPbu+uCWIkx772zGaSY3bOLVd3ShFlpdA4XxHTsNlzKkI+gpEjNQ3OAVnJ0YFpsXZ1K4HHKnBNadwhVYankbyBUmh9W7rzS9ewFioFuPIKcuNo/s6ziOiMsBQBxGQgBS93+dbROrjw1oNNGaBZO/TAtc9Fco7A7FxlkndpvPnCjQfbzhcRhKolBhQU53mIqWqC5AhRxjjWrtWcBMRXThoHxQBkjD07KrmwCSlSyl7pxvFZXfHi7q0Hbz4slV+dnvgaS4zdOLjZbLG4oTIC9K4GF0KSQuSbys3bWem6J/ffBSVu2y51LxyvNhebGCHM5sBe8zBsh8r50LRXV1sdU3V2ox97ScPZ6jgg124mKe+7aAUR4PL84uZq3vedEceSl+tG+65dhPemqCVnx448GWAuEWnyqysTT87bKWSmZkSuqpsYJefkOKRctPQJY6IOrRr3PRWTEJerRRq3q0WtIEgBQAOTMVLWYejP7pw4B5tdd77Zcwi7NKrS8eLo4uIClW+d3t3F4frZBbKzyjGz9LnvhuAYQTvdjxoX7cqj80gg5CpWhZKn7VWXch+qQIaj5HnV1o48kaBD0CjJOQeKc25LKkSUctfO5rEbVu1622dkodorArEWzB5Lf739zMf+1J17r/yLz//u9XiNQct+zJo9zxXAe9p1QxN8EvGhbtjfODlZrpZPri9213sOULIEYjPvvVPAgmoaK0QCk1ImtCEixmHc74aT9mxGzaJCP+ao0VdVKsIUnONStKTDyAgAFMyhMwHvAqAUUPauiPkQpAgqOKiQOKbRY0BAQMfscx5VikEiDEpIpA5qBCdlNNComT2oiAEiOwRQtcCtOqv8YugujcyRi0lBFYCJW9UMlpx3BsCOALBoQXYgxZAVFInJKOZuHo5EsogITNO8wNyCJgNgDgBiYAoy5eV9cFKSToAHGwC8KGXRoqKiFVVD7JFQs3a2M1DIFnxrPvRlw1aCYZSI5rJBHPq6JUM631+Xan737isX5182zE3tRUFN0/OxQdcP9bydz9p+zPWyKTmSZ3K1jucvrW7/4l/+6w5nMZa8i1f3n2yfPVot1u2Nk3bZGiECiwoz+yZwqKSYZUmQYomx63fb0TjMZ60PJDlZyobIaFVV5YK7fkcMRzfXormIEHkw6Lb7pl2goxg7AK3q1lUBQdms23e7/fb0xbtUV75uJMLm8qqe+ymW0TaN8yGnEcyGcaiquhR5urmuPC5qrvP1t7/wazGm+SvvSwUaMiVUoNzOU9YC5Wjmd0823WbjZ3NTn4H2RNw0vmnnixVFl0YpYCKDmRGqFBOBJjgQdYtFs5gDcrzuiHm1bMVyaGG1XBAaGykcMBNTbTEeSDRASIgmJoeSYUM1QASHhiIEwN4RU8kmSqHypjKvKhn3BmNYL6ujxuZMC/LoUMA5j8SGEbhAEYjJM4uREhXjsF7rVecCaUco9Qsf/vjTd/7wG99/+OprN5iojFpw2N5/9xd+/Eef3n/ww/NzYlBGLYJg7awRBCJCp6EO7aIGR2G5pHpOoeEqgEiKI3llR+NmhKSSNGZhxy4EHwJ5HqnwqtaWgaAK7ECTxVAt1VURVS2XrpTtDs2LeWACwqKQVRGNEYioiNhUwCNSabEEhGgpZiFuWzTI4ziihaLOKKDrhg1KdKHCgMVxRGUyzFnzGKMManXwzSw8F4pQVU0KlsEIAhuBobFIFNtn7RRLNjAtqmogBCYa2Y1iAxg7KIomZCiiYMUymDnvwARMigKlsd9f5nhlgAUQgUEzOiBUYuKdI9es4otRXlV7ZdWe1KFREUBERoRDWRrBVJ8INHkZ8L0gyyE7dqgze55Wg8PXDw6HP+lA+hP2okkssoN6ZGLAqegffPGLX/q93/FBwIyJ6sWs5JzieOvm+rX3vVzVwVfOzJwLAhNFl8xQsSB71zQE4BDH2DN55NpKckjMAUwdIpBjcEkH7xiQFCwXQSQ1YMeoaqZVWEE4i/5IyYXgq8pXlecJxcNUSnFIAFoFX/mQUvHOAZmhOvKIKKoTlxkAVRTMNBdPDh2jKSgAoGMvpoisoEYGCFqkaEbwilqFUFJGtTt37xLA40ffNqabR0Hz6IP3VSgpW06M6JmdC1CkxH4knS1n1/ttg/PlrA3IUXpsQrvi0AQ3anf9lNsXGRGJ8b3arefs6ukiHExg9LzAbHKJTMf86RKiHH7X1G8qxdRgKk0XMRXVKdJFCJOV5iA8qU41amqmOIGFRP7YV2ZgKkjTRQEzmIjrSPycImRqZTIqGRyoRKIAoIQ8JR7BbMoTmRRkmsA3gAZkAAQiSARwMNISkkwOLztk7ezAwHnu2Jl8lQCiCniouyJE0WnBCBob2uGrz312UyHJQXYiNgTGSWB6zy0FEwB+WvSTb8tMzCaBw0opz7lgOslliMDIiEDAWTIRE5Oo4qQvmankQ+/clN4znUA1RYvaFFKlyRSDzJoLE4Mj06kNQysUSWkxb9pILVMH9PX7Tz/1yQ8DKDFNbWUKOl0kQyOk6Z8Hm+FEwJ68yYiTtRkJAWyihisATV4vTczOwMzI0Gz6I0RqU0yTD8oO0iSTmdnE+QabdsI8md8Iscik2ZmomSkzATEAELIBSsk0xTGm0SACMpmgmE4S5PTcIGQzM9AiakgINGVLVSZDGU0rHJmYWVSZSUpm8mhYUnLBqSkioSkyASIiA5JImuBEk2BkViZ0lJRyMEIxC4qZlZKn2+G/JxWFqh76okaumQvWooGtpLRBBKIauQphbhxjyXW7tgwAhQg9aSxXplfD0KuBGCQBZUtYxM+t4l3e1BiStVfn/sc+9bM//aM/1fXXlWdAO14vhpyKpaF/imxttQRNO+kc4/LoVnx8XnTMFoopG6SxOKpmiyaWcrG7vn/+9MOvfLT0Xb+9mt8+olVTV9ztdrfu3Ozz8O3vfL8+v/ToNttdGrMWvdzsX33x5st36hDi6oMv/jd//+/9nf/L3/yLf+lvfuG3/5/tanV1te/jELM089kwxmy23cd51fz2b372hTuv1ienBYoQtcv1j/7knyEt3//h9yyFq91mPm8zDBQwOAcxB+CUx8vNfrVacXDdOHhmjGVwGwCRlN59sJvNw3K9Gja7Lg37Z4l83Q3x0f0nt164FeYlGHiJlvFDt17YLl/58je/6nkWeCyMKh1ACoFCSw4NJAvAWMRhjSJlLE3rhMLY05/+kY9/5OXbCrJa3oUMQ0xmsK4WUYZU0sXFk6PFTV/BftyEuup2mzu3Xt50Mfbj7mJ/enJz5sPl/tm8bUrKBaHIaKqCIWfBXCyzcrjoL4zLBz78ytW2/+6Dtw0R2OekHFxVNaATuGt6NhV2ftpeOQwQiPJoAUt2zG7Yb8YU1XReLb/02d/r/5T/0Ic/+m/8T//XX/7l/2rz6HvXz97ttlcnZzeOV6sC9W/+3hduHc1ef9+9xfFyHC/HmDx3Yb6an6xNEJgkPilXD+bN2fH65Frb7XbEYWuWi0nlK2++GJmpskPPddUgo2IZ87AIh0nyL/2LX/3eH755dnocgts83m9SfjZc3T2pf/Znf/wLv/br+97Nl6t777v7+Om1dOOPvbK6t5599JMfOD+//1uf/dbZ6Y1xCzt1bz8aXjoKT+4/fO0Tn/j1X/1cbFfrpn73ahhy/6d/6uVvv/nWdXcr75cnrg0MfDKjNEDbFFldOc8BM9cbKVtF34SKWcfBBWL0IihjBNTYvdumPvZPKieV5s3l9frWS2nVXGlaa7+/fkLkoIRyvpX5DVvcGpxPXZH+QY0Vz10a97nsvKu8r9k7Y1NDdjNPYLG37bnrrm9cv3VTfFBLORoSgBIAgCoqEKtRkcJGnhEJijKjdyHspE+NUOOstixpXi9ZietGSFMeKkuo4AITMHO1G67q+bFrTkqWhhGwRp4hUIZcOdaxc94rmrESuziMGMTVVLJ1MYW6zWUsGrFptYChQ00lJa4IKu/6ehzGHOMLN9dvP7tvwEXMijgEDGhEY+aLy/jynXq19n1Odb0cuqJACoJmVhSUCHjcXcJRs17MhiGCY9e6/dB7pJlrmLwOO02jB8/mTB0TNURdJzirfXPcX/U+MBL6JmQFDy7tZBZOP3Hvp99890sjJpo1L7zysUePvgP+OsHYns6WR6ePH180iJD1zvHZICklrec1ox6vFw/2m+5yqMA/vDhfNPzkcuC2PjlaLU+Pvvv2s+WqPT+/yN4rRqc+jzAjCjO+us6B8fJy59zsfS/f/uajx0yBarjz4s17R/N/9YU/SAhFtJmHNGKf8nQXMJJ3jGiaBZEQUFIOtQNEPBTN2MHhLGCihNjU9X6/m80X+ydPEDJ73zYrDwaZFssVQLKw77eXooY1Df2+qRahmu/ThQMXUM7qRqtyOQxGwTDFWGKfzgub8rDbFUd3z45vnDRvvfVsH8dkoKXM6wa8AwfX3eW8mtfsHQeJGdQgBCRkJyKFgWpfjSn3Y/QUmLlIAnCGVBSInHPeVyaCVrIpDCm3DXtX59wDpYzQNLOhH9vg56sjUnv/+z79vhc/9XtvfOlidx01lZKd8yhkamBxyAqEg2m1WK5m89Nqfnt58ujy6Zv331pUvh+6JH0bVs4cGwMxWBTTrIBoRB5AFIwq7vZ7SXBytm6dWtbr/Wa5mA15R8xmioJE3jEpmOgYXJXBgShoMsnChYlECYqZI0SUEokrUAQkBYKJA4SFEA2JqRIrYgLm1IyRGCmLOO+QJ445KKkRqKrkXsELZ2SVYlrytB8DIkSnpqTgocoazQQsScrgg+cwjqNzKCUjucYvS4lEIFIQKwIsOSLZNOhG4iyxomBm2ZKKIXkgSzJWXCMHhkrK4IgAjAxVtQkBUDbjSFOJmpKJljQyABWoqnrIEawMEsmh5YhmiP48y4t3zz78yU99/gv/alnVCiw5Ex6iN02opw05CRKR5GzBKNgcdv+jv/S/WB+9MI5l2I4Pv/+OdddV49c3VtTimPoZz5k8B8eVF8kgpKDZiqpUs0qhsBfOWIOVbvSeQl2JGhB1o26vdnWojk4W6Cj1I5groiZS8SIPWqT3lcsRqIALGFPe7wcwf3L7fe2yUUYRHfb7ZlZ5ZsmlrdrYx6JmKeeUj46OimUdxtW8bkHlwXe/9vu/Y1i3t+8OfXZz387nRXMC8HVjV1s3Dpj7/dPzcV942RYAWq3djdvzo3U3Xm/SNYwCBRpuJXWzRds2oYihwLypSyl5SFISmM0aCn6mBizgkI9ma34+eaeJgkZAAATT5lsNQew9n5ExExqZSlWF4F2RYiZFwQXHyPOZOw7w6ou3X/zgS7PFStsZzFCQwABMqqoFYCkiRZwzSb0DkrJNqsKsIjnK7GixSs2szo/ffoKYZ+2tBz+4mM13H/7Aqffh8rJLOi6c/3f/+t/4z//Bf30dr5ERKRkRMboKt92+bZarWyd+0dKspdoxuZRUJMs4lm6Hplaid34YIyAUINc2zoEzYKZq1tTLRlhDhQR7rtDMGRCSiWApLPtSdimPWZIZW4GSRMyQmirmQoDKmE1FAxSBYTQ5hMRd1ZAkHHpnFVbetMQSU9nm3BFQaGbueC7OuRyl78brSydFEaRIqettdzggsPOiaChqKjZGEeYRgAr1al2hUUALOzEBELUJEJIyXIMqoWdLBftso0AyMCRFTAiKYDSVQpmlPIoVmSyQ6EzVAMgBArBjhH0scRj2w5huHr1v1Z4EXxsiECgoIRFzYOeZiQgOjBgynAJA+B7L9tBSfYCLHMJozz1FBz3gIDIdFMuJ7kGAYFYASaX80R/+/je+/Pn1kve96zZdEyrH1Ek6u3H2wq2bi2VLPHXYNKagIL6prZRdv/fsgws6FagB1tXClEvJhDmwd5OIz6QqMffIvvIeQHLJFJwpkBQVQPQZzfkFu9aAyYVQ1yG44B0AA5iUUlU1kHdgqpKlTBVDgJhLAkYDcM4hsB6sMTiVfiCASg6uIprkDCScyiMoHyrSPSOJ6HQoZ3Oixftwdu9en4c3L97BUJ/Vs6EfQl0pgkfSUmQK3ACuFsfX/RVhVYdahihc2Pkhud3FdjlrwgwqGJolDARWjBwgKNJEKTKdfigcDA3wHlZq8t8g8aTsTV+bLq2amIlqscO1h+lwqnbo5TATADn8lSaqopJUix4QXRPgnN9rzZtekgAUUVXZuYP8pDS5mibpbRKokAjAVIWITaf+IVAFOPSOI5JDQNViB482ABoRmsp7sUcDJWZ8D2QDJlKQGKefu4wHO4kBTplxKQBoBqaKTMTuoCmhAfJ0T00k6edKh7MDyfvQc2d4iOYRsaoeTDFmSDgF3QjJpgTThE8CkjIVpT13QyEQOjrA5gwmtrcBEU3ZN1WZzFnIAASg4JhVFcC8c1IKwAE+TUCmEpwjs3kbui7ePjs5noV7x8s3z+Nvf+PRv/0XPdoIooisaM85ZOqIRIsVIecRQWXCYxgQEDIaAEzv4fnzgJxjkpIICcghORAzKUqIBp5Z1AidiUwL7D0cNdok0xcwQ+dwqmmYMFGACEDkzAQR1BCAS8408afFiBwhqCqhTaFYQgQknfrpCafAIPAEHgI7FIXygbA2/Q81A2B2ZgCKpkVA2QVCUsnOeTDTiXUwQd2Ri+ZiQsST5QoRmSknBcjI7hDXM8t5RCQgOIQE//+w1v2Y6nYOpgJAXBl7NfHakMEcJEPcd9iNiSwzjTD5RG3ox41xHhLsRiLVQvWYkJEqGkGhjFpyDWWJafXSyY986OUPDvG6DNvZbAngST1DMRDHHkFTynHYZ8nO17vNtci+9m5zdV07vzxuLp4OSHA1bHKKrHDv6G7cdTkNN+7cQi4gZX+51zQOofru/XciWb/tIVRaMnsjcscnR1avNt315vJZ6emTn3ile/NX/+rP/vnv/H7zbHeNBWahmjnnHQfvnl4OohSVv/CNr/zphz/44GqOQAIKqG07+7HPfGYWqmdP+zTythur0NazGuJgmmM3EMjNG8e7fr9czdRhHoeFrwj43q0b7z56Z9H4fb8dK/UIuRTsh1dfevnN++88fXoes9x7YckgL71469mz7mq8vn32odvHL15un23LU9Wh8qpkdYV1xd1+J0UQ0TnzBA5B1UgCperG7PT9N05i1908W965ffeHP3h3fjwLjRvyMHT74AOZj+O2G/o8xBvve8W43Vxunl2fr9az07MjMHl2flHX1O/jft+R9wrC3pPzIgIOHXGBMp83J6fH2657+OgphaCeFLB2AcmzC2iMxIqHukhiRhVVBTEVrcIs5kS+DogmMgzDxbMtnPBR23z5s//f+by9fXL6Ez/zF974/LJx+t3tG/vNg6L6gQ/91Kc/+emnj9/95vfefuH2ya1b7eVwdff2q+BrqebLs/f7mM7f6vrr7xm+sT4909ldLZT6GDUSaxw3ak1VLcSyc0yOxCSmUbGfL1ZicboLsuYPfOzVi7cf0SiBEjONVInoV37/d/Kuu318a0z02q0XH757Puay7fjk9Vtnd1Zvfu8rHvuPvHZ69a3rPsleVr/75YeE7s3f+sq911754eOLo9PZ064/e+HWl75+f7eZL/FuaEny2MeCGGBWD4kBfVFhysWEa1/n4lg8akVCpqJmWJ/culv2P0zjLizPqll48MPvrY7Obr/w2ugrq/z6ZojfeyNRmTf1qj7uO8EG3eKoPaou779d1ZRqldZjtVRd7hJc7c+LJmPwJQcglBFTz5L8OOLlKL3tkzAUBqcmmM1IJ/MQExJzBZxTQvDEVChcgtm9U3d34WrDESwjSeKkxgiCrqkMQLIwuRJTCM5SqKiSMmocM7VcLzCTdLFpWqeI2RBgyMmHOpAf+zxhPFQVmOvF8XCxXUI1EkeGXg1D7epZ0V0c+ja0CXsBhFKWlbvqIzqvJmJoah5c6iFXPuCM0/VsFsBRaFwsEUt2FddNvd+MjvG4Xtk2k8jxss5ZWbUJ3DTLMgzjOIbgHQQTrIKvfNMPW8bgy2jbrL52rqpDFW0ALYSl9S6H+Ydf/PiDB9+MYxdmTZjPz6+exmE0xPmitYwpah1cjXwdh7HQ1V4qXzWzkMb49OJqNH3xzq2vf/s7J7dfwjj2T/bHJwvEkrrtwsHV9RabMJu3+8u+mQUvtn1wX6RzSEWpqiquwlXc9LE7auq6Xuz6Td2szu6sH1xugnBOpZ01743QcipNjapiAO28jjmTc3DYVogZTGUZgMC+QSDNuWgEsyy5amtTMgAUV0RObt4L5B4++B65oiquqsiTBuhL30kkK+wzt9V3nj0Zchqykfc5dk5FzTb7LRku2raPvXXqwB1Vi9RnCkxt5Rliipp0wYtlWDggUmR24BhUiyoBqEHOqU+9OohSqsWMmawAOle0iBUCBAEkj6CoREizeiGag/MpJiTy5D37ov2MgozxxVsvf+QDn/jCN7/2zbe/j2yIkweIiJGdi+MYgi8xK9HpanHU1sez+vLiwYOHDwCNPFsPDc+1gKIB2ph6Iqi4QlSHpKrACGIl5203nDSrwIY5QrZAtXO1mjP0PNWmkCAZSDETBEAQ57AIIBEKAbJ3VbERwJAcsRBSkeK4Ia6lJGZTKGCGE4SCqgCoWhAdki95RASzYqKkhOByjMQWfFVSCeBiulbI4BgpEKqgEIdRRkZkRpFUNDN5x5ykMAAAedcg5MpVJnAotwFFmnpKQtHsXZBiIhkw4XO8KJNDmA5+6LkGDaaCaKiKCDjxtnEUy1IygTr2RuC8H8fI6JlZVbpxSDkFXzfeZy2SiwE0HqHY04cPX7hx9rHXXv/hW98FBvb83tmgpKGQ25w/9RBzdz0jMh26y8sPlcVHb3/k/M23759H6RPmPKsXbsZd3+kITbMQkrLfkqcwa7FCKTmmxL4KwWsuu+t9jlo3LaC4in3lp5FlHMa+j1Ly+vYZz2nMA7B5H8qYBakbOtUyn9eiVs2qyruUuiwlarp185TaoCqaZdh1mIUAx7F4JnWiIBKLIVLl9sOOTBaeF6xX3/nOG//qV7E6heXJ1XVxRFwoZeO6rYMf+ijd2CZ7+IN3hlh4Nbf5rGpmbr42grS5qiC1lUscAc1zt1g2hCUENAUrKMPWuxAqFkQwX6DomCWpqoxdOuJVZTggAZhMj3QAQkIzPmQAyGCSLLEIgAGqEWLNziHJlBAADhyqpDdU/7Uf//iPfebP1M0tk5BNGl+zI8XJBWAw5S1MERhxTkCKg2mvmqBQSAGyL0OXu53Hkbo4Z16d3rnsn6VSqArNvDHUOHbzyv/in/2Z//pX/uU+x0BO2WpnBCRWu6bGqsVQq69izLUrmktF2WFiZ6q0A0gypJI9e+c9EHkGXwyLBeDA4hqwKjUNK9qYokouXAo2KYINiaT0m04F0FHJ0UqpnBcwQTIwx+wVvFnZ9a4J3ldZCnsnjgyh7Hc1midTK0lKSSkET1XFy0VuKktJdju5vKBdX0rJZQxNbWMS9/xGQKAp3SrZpHhXLO8RaTpam0opooqqB4YHgIEV052WiMQqWQHFVI2YEbEA0vMjrAqYmuYMltWATMkMCBiJWdDAKCqhpOFy6Lsxa59hvYSKKzMtGs2iJwy+aqv5op01TVuHMLULAeB7WaP3IkB/LDYcGpgmhWEC4UyGlecEZYODtYRQVACQQL/8hS98+xtfWR+1kn3qRz9fBufGsW+b+tX3vdw2jWp2zk+G2yzZhyq4xS6d+7qqfe0Qi6qgmLIRaUkOFZ2Z5WyHtnJEcj6UKTJjQqaOsBQFU2YP4L1vwC8LBR9qH0ITAhOCkSH5wIicxd4+f/jK0e2qYiQyg5gTEwNSLoUQVZEJVIWYpsSaghkIIgqYFAEzdmRoOikXoGjkkDlUUrKqQwB02IZZHLsAdu/ePZHyztPH4dZyhd5EHPvAXpGJqO+64FDQAvgyDnW1UIc5DvXCG88IKXZ9KaVpZnnYO3eRmxsKzwM6iO8pEZNRx2CqqTycr5nwgF0+HH0BwFTKZCgSLaplUpnV5D2jEYIdKFXTmVuzmk2LedIydMJUE9p7UhECABSdIEcHq9rkMMJDykxpKhubeOHPVSs8oJoPMgOSBzCzoiZICICTLmMq9hw2BAgTOehgKjGFQwQIDUysHHJvZhNi/8D2pgOr+/Dqk2nkQMtxImWSn55TBGwC2UwUIpywSkQggkAHrPfBNgRqOsHiVWWaECKgqhDB9C3YJICWjKgAIIJqAmBE9PzDORiMYLJ/IZmiSJk6vExl0j+IptQVqUJJgqiTYlKHejfIrDm60W3++p/9kf/bo99989FmGKStTEHAzEyJGQBEFdkhIhAf9CxEA2WmQ3OhTfVzbKpITGhSSrFDNnVKhBkYM0+yHRA+T0hPiiC/R0cTLROLelqcB2iUgR6kVlZTs8zIk+mNJqg2ArETzRMNCvk5P7wUMyV207cAphNzv4g49saHlW8iosoT0grJ1JAZVOAQxUZQM5QJMUVEU8EoMkkxtaImYABIhChoZirlkJBUs8kQB8+zlTqd3HHSN/+EVFQ3dR5NsoXZHHwNAuM41GFdJFc11VFFSUUAdlg6ld55zEX6cZNEr4bc51Aj1Fw7McJx5Zh1IAiNu0G5afX4p3/q51bLRvLu9GhlmRRkc3XtK1wcLQi9SLm8uKh87ZuFpAI6pNwdH92JY1HBbsglo8Oc8sAVrY5m795/OI68mC2vL5/NF26UwbtMvvrtP/yDXepF834Ymes0ipR0sm79QOvZ4t133jpb5RfvnD394flv/uo//yv//mdunN265MezbJut7ndxBgQqs6oOiOh80dU/+Af/1f/mf3VrcfMlQ8MQUoxawu2bL3/8tev3/w9/9J/8y3/cX133m53TcVbxbtgv2wCQGDKb5th7sTqEze565dZN7btUbh2fdt229B0T1wbv3P8OgKsW9dPN5e07K27ItX5udP+7765PH7/88XV1xe98+VpITGNTeQdGUjyakjISs1ZezWwxP0ud3Ltx86c/+ZPBsKkqGdPbP3zmwDlVy2K5ELkqtKmy4EvFi6P1C9958+0+x5NqfnJyNDuut5edZw4MHFy/H8QSUUUKqkCgU1fq5fbaUD74vnvfe/To2fbK0SKQL2bofN0uJo0b0ZELCGimcrjnwB1KFI3Z167u+k3lWw0FgfwN7vcDXz6TDP/qV37p0z/x05/8sU+79uTJD++dvP9D3/3DL/bXV88e/bBaH909eu1bX/76LuFRDuvZUV3hbn9VBZ8v3820uv3Kj1+8CWBX+2/9i1uv/tlmdbNrTx9dOvCithVFKWZaMATmlaS9SPJaYncxW60Ogull/+jZ01dWi8eP3n3hfbcvt/GsDpfXmwaqBer72vgzv/Dnvv2DH2K5PLlx67pL//I3vv2Nr7zT97sxB9bh3/y59339j7776N3vXWzSyckdJyUN+9UifO/+vo7V/llt8kG2INCCH6iuXDPvuqRDUnTOVSZjKhC8t0I6JMmCpg5JVAS0AAz2xK6fLXg57ss27Wn9Yl83Zqg5dZLt6aB78NUJqI1dnwS6fnChuXp6yVUNjoeSqVjROOaMLnhyOgzsyeWCYgRmMG9XLbqChLLpUi0LiHnY1b6iHDVFx74AQSmOseQYqnlEDuuTrcO4ovn7jm1el5x8FWfIF999QHmr7RJIQ1MBuX2/Pz16Ybd/OF/MlGaWAGxXExNqslEs+bZFsjx2s1nT91dNda/iqt898U0TS645YCWA7BY43n9K5oC9+RaqhcSrqm4ojVm7Jpydx2ccyCTfubk8f/uRA+dccAyljL7y45j3EZ88Ti+/9PLvf+dbS4e8CLbPqITO+dqHcQAVFzBFiWPyjlLKxsoMlqMVQcMxKQFywO3+YumpQNYUyVnLjcXeAZsymJUhA7uLZ08+cOenz47q+5f57ou3ri/K3ZPlwwf3rWRRn4cdihv2A7LXpqrnIczDQiKIWBmXs+biYjufNW++86b1Q02lDmU/dLNmPm/dOMbLYdcPXbWal4LMdd/vjlfr4WLjHKqRIq5mdSQ1zWer06sn47IOd++shryFgNuh46ZOsRghP3eaxjHBEtlxKapZyFByIgwTHm8i/U0bIdOCqAexhMiS1KHJat3+vK2WzhnIRczqUELVSCOdCJkF9pLKIKlums0Y62CAUFTNtIwp9yOgAUNklAx5lxHMMdRNtVwiOPfk6gJVFYw8pq60TcOmpoDBs/MmolpC8CVFUbnuLgVt3Z4EyiB5iKX2jaqoFSLzyFKKAogURygiknMTluzCuNsgOtW08P7mrbuesvP+Y5/4xNtXT/7g+2+4miSVbDlLblxwCKUMqJLVSWhu3TpbES8C7bvdO4+e+KYeu7Hb7yAn9JWv3ZBHQkIEZ0zwvKxHNJcERJePrzDx2Y255f54sXzw6NFqccqIgFAIxtLX1BA7cFhAyXkpKlpAiTgABiMrpQ+ejSOQEy2TZ8OSoCVBAFBP1VhSYAbDrAMCqLGZEnkEZSYDMCxqDIreM08t5oZEZMQl5aqqtUjO0QHFMk6zcWeo5gwrADUTmppQUUUiACKj5ILAalbK6HwNZEUTARMZQFHMysbICDCZw8GAmVKKjOwRi4ysSSw6YFXEYmKZWCXnLnZdt10v75JjchVnY/RoKYEJ2FTXzUgCbAVyGcESaijmHlxfvPLqy7vd5q37787mDfFhY1R756DYsLv6zjmHKvpQxgziPviTn/xv/t//18sn+zgeBSWqA1QstaxO1k21vHF2e7GoV6v18Y0jKUOznoFKzS1hJbnst3vCMG95uW5TGhWIAHLOQz/mPnr2y+Ol2bi7GtpFWy1aUrraDJvN1ntazet+GEtJy8V6GHtN0dezm6cLVdExxrErY9YI82Zx/WRfz3xmi30/W86LAlZB80hJl00Lm8uv/3e/vn36tD5+MVaL0WS5ajQbVVWzWGDwooClW9Wcxm77ZONmC5w3sSK3aKNQI+yGsQms0s8qblqHAFYGIlRE9t6IqmYlxWLqsXYYSxq6imszwMohwSc/9snFr/6TrkBSfU4MBgMFRZk6EpDMJlSwIQIiG1jlGBFKzszEoXaGc8Q7x4vPfPwjn/n0T/r5WriqIDCSliRJfNuAao5RMRGYJynDFhIIcJGsWmrPphS7mPvuevMIKzg6m/lZNZe0vPOhb3x9+Oob1x99v58t2c/m+34cIX7wI/f+cv7T/+y3f0eQY6DdEANrMV00bcPeYjEbORUYB1aFACaCgGKgTYVCNbo8FjNhkazmAIFJ2fwM0Y/qcuFWDcg7IlQy04T9gJByFsCIQATOUmJRUCHv6+AQ1UwJjK04YhQZyzbMajyaa2jIsCqOTWXYACGa81yLJ1stSgja7W3Xw/WVjzHHSAUdEJXiXYjdc7g7HJCwRiiqUMw5NU1mGQzRiMSmgfrkIDBg0SkPllQMTEVJgZEYVCpHCIepjZgZsAriIcNDoGQ6cUJIDxiQgh4BJfZxU95N3Xg9u08WAA1sRIueuG1WbXO0Wt9aL85ms2VTN8F7ch6RYNLBEZ5zh+yQ7zgUMAFNwbTnx6TJivQc4oPAploQGEA+99u/84M3vn164zSmmEROTteW4/Vmsz5Zn5zdaJuaCR2GumpKkYO5gnyXtgLFcc3OSYnsnKeQCrGY0ujZFcOSlQBFRUpBYkP03pNIiglMJKuZearGnJGD80fCNXBo6tp7ZuZAjEzOezBVAzJgMU/EjDIhsYmdm2olfc7FQFUFp44ty1NmKvhq+niAEMyYGEAPLexGaGaqRgIoiDYVfjvvAtS5l7Zp773y6ne+PT667pY35iXvLRdfETBlFXTUlWgO5vNZ129VNqJepR+7aG65bNsedbvdOUDVcdxf+5sEizvIDKAGpNMDwQ4AaYDnDOfn1B+kKZo1aX5iaICmOrGK1KbKKxUtRSSxkCKRTf1RCghTfZgBqGaziYn0Xm2WHgr1nss0YMYHFwxMVWsAqAamSjyBjQ51bIAIEy58yj8yopGBqiQgQkItCsSHijGYOrumx8WkPZGITDKZAYLCQQkyQ1Q6YOunl5kUhKkMDBTsoCshARgjqk3lZZPxh03LxPFGnNpsjYEUENBKyYiMYCoywa2ZeLqv8XlLIAFNHhNHDlCRWBWn5Klz7r1sJwHjIe9JiApAJnIgfhMZ2qTXPLcdoRYlZjNyzgFQycI8fSgVAZPn/ppKdi7KX/zRD//G7337S985f3x5+cq9hYkgMKObNCwCABGbbFpizF4O3jECm8QyIGQEnmx0ADqJkgYkRZxj00JTi4IBgJVSDJGJJjI6IuaSJ3rQtEgYSUo2LQrAzuvzvZyZEHlTVlFiNikGxDCl+JWmPbRNFgCgqaFveuwgSh6c9w45p4RIimKmgAKGE2sJyBPiVGcmUsCEnSPCKUlJMGl4ExebpiT/pH97dKIAKoqihgSMCEQOCVQEDIkJkclRTmnaCIkUZvffk4r6YWAI5H1MGYo5QDSLQ5eLpmHMiZBmwdcpdmMaUt6QA0fIamU7eGFWMIT9UJz3yDCOabGeUalKCpAWn/j4Tx0dNWN3FbwbUxy6fLI+HrqyWjQlyTDsgSlU3gVvSK7iIebFepnEctEoo0Uk9ItFlXbZDC6vu5unNyuqYslYYPf0wod0+8XF7/3BGwOWzHT1ZIdA8wUfNbMn1z/42U/93Ms3Xvzq99+5e+vn3z3/o+2o7WL9dH9x/93f/dv/0f/+//HPPvulX/57FVeLVeVClccxFSUfUKxd3S6tfekrX/rMj2NYvWTJkFA9vfDBl31N3/r+N773/S+v/TonKyWlvmTCptFhGMY49OelcqEi2417YN7uYz/CNsZO0nrRakqhDlzx5dX25PSIg3gpb99/68bp6uhofffWLZDw7NmluhIvr1zcguimH5vVAhm3285Vs3rGrGnTXQeCGmsY3dy1H7/94u1ZvTu/3PXj989/ePuFj3/wtQ+CAyLbdNfEzgBUNRWtmd958r3vfveP/o2/9Df21zsz6DZRh1It66iy214xBx8aRnY+9MNusvHGkpn1xfe//M7jB1fdppmtslaCxM5VzczMTTY5dpUiGKCIIHvnvOU0jXNMTVRNtZ3NSho51GrGKlzpbjcsjxeb7aPPffaXqQ4feOWjt19+Hdrj/ro8/uFXhstrLCVRNT89Ue/3A99bHm/Or5dnJ4Hy5t3vY5iXxZGbnaDk62ffCf7r1WkZ3Ulz9PIwXPuWA2UrZb68I3icjBGh8oMDMS3e19NdMLI4ol/8K3/rn/7ur33vzW9hYNFC9XxXXC81dt1L1/cf7u9fxtQWymJ+tv7Db799duc0hebX//DN12XWzhc3VF95/f2/9htfvXvjlQdvP73YlLo5ghC++d3u7O4LY6ahZDZgxTFHJO/rWQQvqujBcwUiJtiE2hAQyXwoxZAdgIsliZ60N17IXFvTFFFXhVQGkLw8nmu/Rzejlmq07vLSh9krx3cvuqfY4PxkPsYuAYhAEanRgZllMcfkgggiB+8Ck7Nh8NS081lJD4xVU6qWbQFDIA+83w5t27oKchqpcv5kXubz88bLnaN64ZxjdqGLIKCZUTB7poQGgFm0ZleFmREX2aeC6qpxt63YReHF/MjngkjsZ6KdSSrgq2YhRZP0RhDHLMYxkZtVl/vuJNSunqVEhhVVmIwrP1PcIDGYuKoKs8UQN+jpaBVa5pgycXXo0xFF1WL26HITGmlDTYYYwDUeY1Gy7bAtTmKfZk3liU1pG3NhcFUoUvKYJQk3gZtmt9t6x6G908WChFWoYulzNlIdZUAEBKx8TY5n7Y0XTm6++/QtIMiZVeTxo/tj7hLgqzc/sHn6Zh14vgj3n+2a+azxi2LGjpXMte3Vdry8Hk5vNkyz1aLpd8m3bnZ0rFV4cj1oUSRaztd+djzuL4rYrTs3kdWupECDVeUqeeXFe1/5/rePjhfo52nws7YGkScPnj1+chVC4FmVVac+nekuCHVVsjh2yBhTBIJiMsWlp62MqZnolLdHJHTsgA0wCSAiGldu5oCTDNfnF7Wri4mmbMi73W6xqHI2VQSRIlkYlZEMmLmPA5qxJwNThtBU3a4bRKzk4IIRAfiT1SKpnT99XMS48cBWt81UqavEBmhqmoonwRJJ4WS98FW92exEBRmcYwFRNWZWA0dtlN7AjCh4tpjaplEzLcLsPbEDvhGa2lfFli/cfenh+cWvfO43m+VslBTLEKraRIlJtDiPItCDnJzcWFdtLZqLPjq/yKqpH7QIOyekUW3yZZnmmj0ImCkQjnEkBQCQUiTr6fyExuRRry+vEciHEKUIGDkXgjdQRK82gJlnjyzTsB7URAeFTI5LiWgCkkDVprZYRO85oxpCKj1AKSCAaGimBZQYvZoWiQ60SHZMiL6QKZiZEHEuw1SqGphKSWSOEQjUk6NJiNEkJbP37DyoFEli4sxpEUMAlKLFITB5Bp786s+BFCIqMokDxMROy+iYRZKqTOAGRyE4LGIAJXAzjjH4ahjMe/FEDoBNNPdmQYqR45h7KRk9g2Mj3qe+4loKiBTPPmdkB4zcxfGth09efe3D3VAunz2k58ai4LwKDrtRkshgGUYAcgaf+8o3VzWfHp3dvqX7x+cGrp6dpDqwg1HiO2//QHIMVXty87avFvW8Xs3mrlkUrPbjUHLXVuFouXz08NFi1TCbkzikGNrGBWdWxtyPOxglu9rnTkuS7f4q1M452HW7etbMZo1abOaNZuddyCUp4PX55XqxrtqmXjf9vmuPWl/5LLFuamU3DkmG3HBY1L6//+a3PveruRvroxtXGZEzNSwm6Gh+vAyLWdRStvt0de5K9+ThI6xrnddbz8WRN2grPwvVYtYWiVzPXE0ABaRUVYWV46YlIS3sqYVx4NnKVTPZdlW1rIFSzIJKalXhe/PT82fPMgIzTKT8CcXB7KZxt6MJqkIEKAqIsGibJlDXDzVXDpwf0wvr2S/+az/3gY991LdVTsCsGXLRzMGhc5ML4WBGkHEcOigpd9FhNaE2Rhv7fSmjkWvbG3epltoB9kmud10/Hq/uPbl+uOryvbqUUohqQLDd5pMffm3YXf7uV7+iAuoYmEh4e7mNfZRcci6YjREAkVrvm1DQtPK0aNSzUyixmJgUnc0a01wvG15X1iIFC3U1KAKhmhFKEZUYXcrOsI/RVYwNG7oaw7gbJRdyDsWcI3IMpMycx4EcVfMZNi2d3BQfxl3ndJ9T5jDReJmqwKtlj0ij1imOTx/ROErOuR8cBdWSS8b6+dlyKjACIEQxEIFCJlo8E1hlCtMpxECnKDLopPKQAhiomPI0cwdDIyA0sckabIBFQQxUZALLKDKYkmFRRGNmBgUElEyGAEQqvZR3u91D54KqkANGI8TrTe19s9jeWS9fODq6O2/Xy9m8rmrnPPnJBYATLHaKeUzleQe94XCofh5rem45UgObavgUK4e/81u/9ea3vnl848hX1eX5ddNUQ85FdL6c3bl9q64bMzUB51xRM2BCFhFmILR5vVJDBVPkqVEglwEFkGzSRB0xEKkJADFTEQOjUWKZIDZqqKhEIcywOZUwB+YQvHPApEQT8ISYGJAZoK7oo8tXQEvRYmouVGb4vPjJ2CGxR0QRZSQmzpJLLqX0SBh8ADQimoIzNBkfiBAO0F72XlRMExGLGhDM5/Myjoj08u179++/+eCqu7N0jiyWqEkMqalaZMoax7QnJpEMhK4JUiyPGYDY8/HJSR4H72hBFvf3zXPBI8SKaEoPHdq8JvvMZICB55dqkvYmn6qYwlSgeQievRe6UpWikrWwIqnx5BibSFRopoA6RQ3fSycaPrfbHH6p6nMn2sEaIyrE9BwZhEBkKvDc0UOHplg0RDM4VKSpohkATVVvBiiqgMbEqkpTmG4SxdAAEYjQ8L0yOCZUQxVxbnKWPHdZqSGRAZgKMgOCiuJz89UkKakIIdr028wID7k9taIGCGiGxDyxuw/SgqmhAJgYghqyR0IQBTRDUJXpv+DgjNHpeUvkpo4xABPNAAeW8iEyBygmqEY8SV7K7kB3VitIrpQimkGZCELlDdQHRucAvWsoDpc//ZkPvvEkppJVCqJTEaCDz4qZEBkP6oyVUgCnto0CJs4xIYrIdF1FlBkRQESInXPOQNhNQc487WxVDQGRSXOeLGOTU3XSknSyJvFUMaZFZQJfIXuYepxUpitAzuWiRAimKjL1dRxsjwiGoCKAICIGwMymJibEbAg2ocTtUA9H7MzUplQdTBWBVLQQUhGd+OVg2QzUVLQwOzBQUyYyVdHsvDcwQp56VVULAU13NCKpipo4z6qCxA6dHFKZz6WiodvVzSp4tCiiQ99fxdQVtTj2ULL3VT9eW7nOsk9aogKngDx34IIORYvlbfZmUKFhLObM2+BmVu8H/bM/8fMffuX17eUTp7C8dbrPz5bHM5XeNyQ8U8j9uF+sl4TJGV1vr9q6QaVFU59vdmrQ1qv9sPWt7dI1gNeIuxRPj04YNKs452dhTi596UvffPDwcWQPBYOrmtXSNbOL7un/+e/+nbd+74vffeeN//B/93/84m/+0oPf/erD7b7tEqK89dXPv/az/+G/9Rf+pl68+9Uv/mYd/PUwpDHN66rrBwN1jd/14R/903+Zhf/0zx436zNFU4Wr665eHn/i0z/zX/zzf5bBwLsUbea5qJZE9Wp9se3awPO24bgPAc9u3Pzqt79fV4sFAFb2+PL6ZDbvxxiKBA7Dtr+OG/Q+iMS+cqdwdblfn9xKwL7k+2+fU9dryjPnUxcLyqgFR2iYALBulqGdL7jeXO9+8S//tTMOQ9et1+uze3fu9h8sg3gPxbIUq0OTU07jEIJTcM3p/Etf+/yf+7N/bX91lYQ8luCq9fHJttt0krEgKJiaQNGSEInZp2FvTJ/4iT/12S/+7uZyu16dqQbXNL4OYN77GTrKAoQcfA2gWTI7bwBkRKFFgCJZtCgUtVKxA1cxe5paGYr4ZVM01y3G3eZzv/Ev0j5//Ec+c8TVR3+qfvVTP/b93/vV3cXF+dMrysMwju8+24h0d19Yv/v4/p2bi2o2i6UUzdshzhr/ymuvXjx5Z12I8W61nF9we70bDcGFBsOCYc6EJV1niUWMApUyTnfBqzdO/mf/y//JP/wH/5/vPHiUSSXmoaTZfJVBpHJPMP/DX/vHr967+foHVn1XBnYf+PiPfOrHfuKr3/3Gs+tnxzfu/cbnv7+o7PTo6N/6+L/e9a/82q/8t69/4qPn4/b+1lwJy/aIhqOGpcdrs0bjEILv91uBEQAIiuUR2E9sNFNw7ERUh+KYCJBKcSrOsqWdaZYtesmOCFIRKfIYIEdWQKy2k8l03O8eXLTEepXHR5BBizp1NRqgFhVl5wRUGM3QcU3qaqAGC2q2nGoqFVUSI5jlgg5InFVHdVZNINXJjGt/KWk77uo7L3DrXaga79l551pMBWWYrZd59yTmuRao2xpBnZETIRlFZs1qnXp1jibvBmjKu3Ne3KiXNxXYGDSXENLl1bPlfL3rBnSraU/gvQ9VYCelv2CPhqTmwFwpqDZ5NGPT+v0ugXNQ8LWXX/rm9++DaUxmRSmgDwBoGdWAUfjq6XYW1tD4cUxMNMboPLulF1YGhqTg3S4PnlcMxMbkxUqWhME5BnJmTBgRRM352lAjkfctI4o4Rui2Vx//yM8JlPl8fn2187Iedt8SitbUq3axuT5HwjH39cx/8IPv31zv9pt95sSSg6/2l7syDCCyue7dYh0avnnvxYc/+FZbVTUexfxIAaqWU5c9jJnMSLXEVAYjLAZ3b8xnmH749B3yBBrG2K2OayR49qCv57Nr3ES0GQIRKlIz84edl+Q49qvlyX7sQxWQCUwOrSrTTywDYp4C9oQERVJOxJVzyoxZEmc3pp1IadsFKbGktvJPurFdrtgn0cwhOOFA1LpGZJAsJZcA5gOplj6PUjh1iY1QtanrlMvYxSLmawnOmUNgl9Tq+RLQEXtHzOxMsoLMZqGP3cXlpasCFhiuNnVTF9Oj9ekw7tUQxVAVwQQHIo55ALYxF1B15AwgpjzViyyqJveJa3fj9ocXt1/+h7/091wIJeYoGZ0jpMo5KaUg9FGcn50sT87qxbwYmL39+HEpBR0N/Q4tgPPEDolEs+RSoQcAUCiqzgXvMZdMRNePdwHqdXDeZEbVg2ePbty4oZjA+QNuckrmKxN670xtBDAmDyKEFEt01UykgGVHFQKbGvtKDYslxGJEBwM5B6SiJRM6diFnx+xVEzvvkKIkVDRUACsCREhEE8CAEQEqEDMRH1zWnQve1Dy6wTJRQEM5LJOCgEDGjgg5W6m8NzMmMmU1VVUzH3hmzFE7zx4BUxkYmQhTiY6IOWRIjjwSqQE5XzQrphCsS6lqGo8wxL4fOuf8kAZHPKt53/eKMpahco2MpSj4qioyRQrMMahpLklNLVVxt8eUPv7J17/+5Xi12xwOB56qWT12iqoqFrN4QkH67vljS2UOT296PmP82Kc+8r6PvDC4dnXjVj9at+1zSUNM37v/w1DNZ6sjjOZ8i8FVs9nqaN3FcnV+vTg5jtmsG32E9fp0eXQa4z60riGXc1lUVDnMKiY6a5oJveGroMWGODj2qBEALi6uQ9ME74/my+AJCMEigIQ6+IogM3EZc6TgbSiUhq99/rc373y7IofLeV+bZVifrQ3dOGyaZVWt5sCsu6F0uzmkp2/dv7rYo6+UnF+v21Cv69miqY5mFakoL8EvFCHlVDN4F8aSvGuAHBSI+wIGhlYsOuesRis6dUcY8yzMf+JTn/nGb/xKIeiKgAJP8QRgLTr5kkGFjLxzRcVUlsv5vAo5jzWG1phS+dD77v38n/n5D73+CtZe2LeLlWVfsrjARFBUZejAAMF7jxK3zkQLB52DhBTT0HWC7PyiXi2zzxosSZ9S1BFCwSbbyzduPLx4/NY7z1ZuVS3Jzzxjm6QUzj/xidcvLje//8N3CDkRqAnEPu93qIjkEI0qBwhpKK6ufOUiMRiaqq9cXVVQFJFKLr4GXnle1hkKWGBEqmCE0hVFUVLEUaH3+/OhZMLgw7JNZsYm+8FErOsRESrPtUui1DbN8Vq48ssl1d6aueVc13U22ePYrlfsGAUwZsyFN/uy3Q7dhnJWkahC3oEqO+R2lpjPTtcA3waAJJkMzdTYgEjEgIxRyRCVDk4iVVMwNRB63raoBx8GECqSTaKfKkxZD1cMiqoUnU5LqtOJ2qajFk6neAF+Pn4HB8qlJAECKURoIlhAEZF5THkf47C5vnx6+WTWnq1Xx8t2HkIIIRADExE55+rga++8cxURM0/hkT9WA2zCiiAgoKIpTNIa/t7nP/fdb3xjvV6VokO8XK/ai4vLvu9W89np6Tp4IoPDKxAhYZ+HIY/B184RIjOwqRFXDr1IHmNkg3k1j+UiyhaLIVWlZDNidkhGZSxpnLplCdk5p8pJYl2fWLVQcMy+9lXlkRl9CIiEyIfTMoCASYlEQOycZz3wuU3VHCM7VisINAVzRAAN61CnMhBObmEznCquEIgcuaKZiWUCK5OhofMVo0MkFWNidCBit269sBvy+cWbq7YOFQAMhOg45NwbYMyRzPtQ77pNW7dNU0WTyrCkMQM61BQ7dSU0bdzmFKV64SOCDN4D6nvNYgfDDzy/YlNexg5aiBnQZJB5TiKfvusJ9wOaQQtIBuJJQZma8iZ284TZmejUB64LERL/Cay1TaAZlQkKfoDzEaDSdIo3EyHCya5FxO9pStP7ATBGp1N52QTJZoeAOr20AdgkYeIkQDA7VVHJAETMCDSVuJkZkZ+YbgAIesB+/XHeSrIiEPJknYJJLIGDbxMOaHcQyQQwAaEQAIGZyIoaTwA5Vstqk6CDpsDkRdQOLWCgooRuKkEGRBG1qUnNwEzUJmQyIuBUCzYZ+8xUtRyq66Zfqs4hI4oUM8m5M2PnvYEcYEwph+DHHJkRCBoXfuTVV0J4YzEPpKxMyDQ1ndkEe5rkxQNaHJEJDIoIAKoYHsYHQjB1i00OLJ00cTMhDGYyfZJIjCpg06iAptUy0dBVZLLSqwgSISEIiKh3bKAlJ+dYcmJmM1ApRETEjsnUEIgIZPoXZFURzY7DpGg6YphIfWo0ubqKIAExqwihiSEAac7OuSnDTUSqxuQVxFQVEnMwEzNlcgRkYFNUj5kchemJDaCADoAQeaJiE07Qiec8cAMthVDfg/4fpKIyXu76K3Ssxcxw6K5T3rrgc9mBjZqJNKYS6wrGLIx1zTNHcwIKNQ1xWxMTF8NsqrM2VGBOHVrzoVc/86EPfFDK3leuourB+ZOmhrYNUfpQL3b9bhx6YrjsuppgVdfO+ZRTH8e2Pmp8LdKrxlJ2jhYq3oCahtEtxu3eckLTfTe2vn169ezbbz3COrRtnfeidVNS2cv2xds3/ou///fH8+t/93/8d9740tc++5u//ujNhxXjaQUffP3oYni6f/zD6uan/tpf/p9/58tfcXBZM0IVfAiuJCKfhxE95FD989/57T/4+pf/nX/vP3jpxQ/UVRDMkMT58G//1b/1S7/8D4GoxbrmnPcpxpIu0+n8pKR0cXF577jKcb/f+Zdu37zc9p7cPvfFaD8UT7BYLSBjN/ab62F1Sr7Gfb/dber1+uWc0yLUiNWr99734P7Fo2fPJspGwdLHfr1a1p4k6snqbPOkv/Hy2V/4zJ9fB06xOzpdV65S06ppZg31fVfGVM9bX7OqocOq8iK4u7z+yIc+eX29rQM0BLO23m3HyyGlEvuUZ00DZKJZcjQ1FfIel0er45OTr3zjK92+v33jbiEP5DDU7Gsij+Rocv26gAaqwoBEfhhzaIOCqJn3NbGIcFElQ7UsEp2vwaCUXGIchhQ8hYbH/bPf+pV/xPXyAx94f+up37SvfuTHvvnl35vN+/lJ/fjhfihjN4w/+P59cqZjvHGnXhzNrGkg+P3VoNGI2/3F41BzoEXglyteWrT5auaIx7x3XJOrCGaiqRv6dHU53QUXDy//0//kPz3fjFKF43V1sdkwmZAS6XyBTsRg3cxW2l+S46HoVcwf/sTdXh/tv/H4Q3derKTdXl++/6Mf+eLnf6OW8HOffm2/u8arzdM3r5euKmMxx2MZgYGUOMXMCJoMFa2UPLIROW+KKgrAik7VDExwesiqEE0mYQJ1Bh6AAMjYIyKjWiFiwajMbEqGeRQlRsJRNasykkNnogTToA5FVfnAN8FCppgsO4fe+WJJhZx35Gv0ZHW1Lb14SoZC1eJk2R43VIXTphbHyNZvOj4+CqFJeylCzsZq3lw9e3d2+yXvHRApYCzx/0fWnwXblmXnedho5pyr291p7jm3yZtNZWZlNagCUCYKPWiAMikQEAWTpmSGpDDDtCXaDjOCD7YcCvuBdjgUlh1m+NGWXiSTtEjRptkIBEkAJIBqQFSDQjWoqqzK/t6btznd7lYz5xxj+GHuWyDD+XYjzzmx9lp7rrXmP/7/+1sbDRGQR81UeyDA0GzHuJy3/eNNqGqP1UAeiMyS5MzkDKBplgBVzahZGuI8TUwoasE363Hf+JqyATFx1cyWw5iPz4+fPn0/uDbFdGvetYxTzkQshKqCCG3tSt9U3VTjdohP97gIKlNTzwTZgNA5H2i42qJoYDpyVTBLcVIRT4jsAiJmYVNSaZuqXS6erTfNbCk2okLwTNkqg5DS2dnHfDi6vnqKAE7t5upZzMkIZ0dLkbzbXqEOPjhS+N73vj5N8WR5C8SdHB0/fPDg9Px4cmncadN4WLq4i28/+I7laYhx+tDG7bpbzBazlfQ307CVMXVN+/Tx1Yuvn0Okm2tZeN5tx9a5/XZbUw1ACtpPkRC/98GzR+s+eozDjglxVHqeQHNMeZr6fiSm0tJKTIUIUNKkZoro1IyZ2RNnspic86BJxTT1p4vTB5eX53deePL0WtPU1B407W4uZycvoLMYe6rwtJ2Nm4lUgEgxV1WY4oTmgwsGvO8nyJqzaI5IwBXLkMEMJFZsy1kbc9qOE3hggEAsIoTIABHsand9ub2OU+w82JTTmNSmhOD3zgHKJHVogDAX038u1ABDMJWUJIWqQsHgwmazP5sfkWGLrvHtf/X3/zZ5BEcQYyAq0EoVI+8AYTI9ma1OZwufoki6Gfv9NDGxB6p8R+CTiarUVZVi75XYkSERIxCgqeaMWYdpurlev3H7HlECTZebdVV5DpzQwISANItznHJGmEo6nrhJJqaqOop5dOVvY44DU5OTGGQAgALpNKdJAlVggIoIHoHUxFABy49pcQoEF0qo3UTKAJfBqZJzYZr2CB4MgycFERMCGtOm9sdl2AaAIslAG+ecq3LOzjdqKpKAgCnElIgJkVAlTikwbscb8EYmYupdSHkCcD40kkbQDAeRjLJKcJVHExm5mOQ9myQEvLp80rWLBIqD5Diy887homlFzbGLmvZj39a1J5ZEqiyakw5VMwcUJtjs17Oh/tjHXvnq1/+wrAJ2znlGhjEnVa2bipAIbb2ZxkkuUnqg2pm+9ZXxlYvLj7z86urZxc12YiCzZORlGtfb9dP332zAheDqee2Ir4AJ2Dfzx9xcPr3KCiG0rvWuWzYNdrOKRgxt086b5ayrmgYsu6qaLecpZ6qb0DYMyVSHFMdx8O18MlhfjSo672okUMzAjP1u/3TX1G1dV+TdbDlbP33vD3/rnz3+/vdeeO3FJHazjd5Xq6MlmcVx7R0f3b6TkfOQ8m4MScdnV/uH19OeaVHPjo7a41uLUJ/UTelI4lChczFP6LwjYjCdsneVTqYxg3oAyjEhKiQ0JZkEVIb9ntpg6KbeXj97dWEhgzqPYEiimJVAgmMwHHNyTOg4inqy1ap9/f6Llw8eeeUmeE5y++jol/7kL7z00R+a0kSjNl1FwoqsOiAHF7ykhEqgCinHfrA8MHrZp2EdxyRcteTrup4h1wktmkCytDefA0aoPe3lst9evrQ8/vDB7s3d449/5rTndTevDCma1M7/wk/8/LtP/t7ToU9iWcQ5NDBXOyhTWWfOc/AVegLHqpDHxOaaQKNTTYnMqsbjDLRiQNAekwBknrzkQGRIRpAorQfY8Lhnrmu/asLxXBDHp1duNokMOk6EbpqyQRXOTt3ximddRT5nmaCM18GmieM46xa4mJFBUIVxG692tNuHZJYyuZBRmdEz+JQ1SybWpvHzk4OryMQADQ1VLSkYqMek5sEI0EANUREFzIAMqTi2ygAckQo5l9AQpNh0LBuCKqCqoh0yM2gIVuT7Qnq1UmBuYASKCDmZZSt3S2YkAGJf5BFVJEhRt4jjercj/yw8nYWqaaqag2NQBnXEi3bV1su2ms/mR3XThaopLuiyo4WiEBzQJCaWlcATffF3Pv+Nr37p/O5pxf7RB09V85Ptpq7o7Gh+tDruuhpQ8pR9qHJORF5EQLThloljnhw71Slr8uokRzMJXBlathE5eyIAzJazZEIPaCkPmiODN8kemYhAwAzr5tS3q0i+dt6z84SEDMSlrwgPbVzkmNVA1AoTptC8iZhMi3GAERAdITrmIh1kUbHsnCNiZCbTUuRUngtiqRR5l41tFkWgsvnkEs9TICQkMkj3X7oLlt558gDPmpkzHbbtnBTAQ+3JO0AyN69nOeVeJmCsa6cxNeRzTJKT9z5O0tS8f/owQxvuvqpA7IjxQL23ojvggepTRI/nm/8yRjQo46zy38EsZqpZNKmMVtqvkEpX3sHUUXQnKBZpIHJFRvgjTerAJzIkBARkFklIWIwbdrA74Q+MaYUepKbIpR8NiIoCZ1QcbIhIVLrJoGiTyIYHSoyZgUmBCGlB1hQatFk2KbYjInpO/lFAKqW0Jf6FRHb4y+VYDlj3EqZT1QPMunxaAARidlkEoPRkgaqhJTgk/pCIBEysNH0DImTJjgMBC4qCWU6FkwP/6pl4fjAErGYmB6eSqjIRHM4DAFgSLcoEsjfLCkZY/NEZwNg7QHMhjEMkIM0Od9O/83OfPjsOWbVYe/gH9CYwTZGZi3MHwECVmIovEUrlhRWAkUFpr8fChCqCCJfOeiI0KN2vWt53i4KokskxGBY4VPlbZgZasnUMSAC52MjYu2mcXHBmqKqAJpJVS1GMQ0JTlUKUwtKBh4CgoCZabPKmSnTQ+0piVkwRHR7uVqyWDbTAlbKkckiqCiRmUqTHrELE5BANzExEmLkEVpJkECECETFTZl8MZYxOcioeuwOp6V+ViqbNhWdMKgrgQus4qiWEZHAzjNeeXMmA7geVOCnW0ViNADAFDKGehhGJchxNzWyO6LYjfvRjP/WzP/anLG4Idbk6ylHy7rqZ35qGYYrD6fK26Q1UGOqFJJpRuLl+HDr0HhUCWOp3gycaxh2oTcNOBWySdrnK6Oezdn31pA7IWtUz/42vfsfadj+m7bCvxcZhPHrp/I/dv/OVf/77cla98clPzhcv7C9vHr/5tReOj3a7WBGsn37YP7r+xhd/9VO//NridPmf/vX/4n/+H/30ya2X8TLuxmkc07KrXrxz/s6777qufnY9ppT/+v/xf/9/+Gv/WbM4zqPmhFUVPvMTP3ux63/nX/xTVI4Ws6qKVAh1FTbZNmLb7Fbz4+ub62LyGsftC+dnPLb99XYxX8Rkjx89CdWsmh1RBUOe0nYfSIZ+Or/1kQrs4mZ/+979n/mp9l/87heerXeIkJIumrbxFY6m2dd68pFX3vjo/Ttz7y6fXBy1S4w+jjlNWwxs3tkU58tFZpEo7DiEAAbjbpji/uzO+X67z/0WudrnIaps+6vzxSvK2wyaTVSVHagSMjwbHr5y65Vc+wEg+GOBSoh9Vfu6EyMEZPY5GzmP4JgUgCvXPLx6umxPgwtZp0NdJCGSw5zIzIEZRFECsK7tBgXHbhx7JA4eUo5f+LW/5dMv/ehP/iyx75Y/dfTKq7/z//3bY3997+X542ejpHHaXGPExero8mIIVS245nbZ5aN4I1m4O1s4b/unX+hOtq66o6Hro3KDySLG0XSqQ1hvewPxzzPJm150ZKoqcjjtJ1BirLK4PESEGBx3oXrr0WaYhiDQVu3XvvobH77z+bOz6pVz/IkfuXN6fPS733Zdu7wcHnWyffv7H2w24nd8khlypKxx3FceXM41kieQBGTA5CQB4AwRURnEDIHLexCiEhEzgR5uh2rIUOLEaEpAJZKqYGhMzBNomVMwGKiRs0yQwdSxI4CsbEqOi9LPxBYKnEMJ0SkwuANTkAIzOsd9nLhpBid2fj6/c+Ib753Pkm7yFJpQ+5D71IHe6hpbT2zIk6Yky9N2upLNxaNmdUZUO64EqAhACOrq1tgjTZIFiCCKrm9wurHdaLBZru5P7FJyaNSGFlU8qToASQXiWfkuVIsh7Wpl73226Ckbk4nmJEB5fjwnT1kwTjnU9upL97793gM0x+ARwDTlJOSbd97/4Oy0DZ6SCJFvqZYhloZiIxcJgJg5oOTKGDSTqxRyFCFCzdExk4JmSZoVCTQ2puyb7bBVQVZgcDi62/OPSpzaxqeIt1+Yv//0vXbZiGi/280a4hqOjk9u1jfX+41HrEKbBdKo17hf3r3jalxvb4Dk7GiFXXczXE1J0YXSINo0jWTo+z5KRMOmbrLE1clq2S2fff/ZcbXcXV6r2Wje1Q0SeAdNg5qQePbgu3txRFksT2E2czU7f3g3qtpZgklyDqGylHNSZjMBRCvjtjJEBlNFyjGhaN3MJKoDSFlM+Gq7E26mhJV3QMk5t1tvMRNmhIrn88V+PyZKrLbfrhdHt5UH0LEOVYzqNJBp7Qw99+Mgrt4Pg1e2lB0wkYxxrJm3260akPKinZsCmjoyIuu3uwfXz5AQDMRoNZ99uHvG5iXnq6fPbh+fhlCVtxrnnEhyjhEo5whieRzJuQoZgQTofHHSNXUF6OrwuXe/vkn71uE0RpUEgKFpsmYxk0zg3K2jk5OmojiY2XqatiLADg00iUoOlZ+STCZ5Gh0YE4lKlhzYExPGbJqJq6vLi6NmNvMEGkXFQF3bqlOx7NAzUMoTc41Ihuacj1NSHcCy40okAHo0lJQNDQQVDMA5RjBjZENGI081oVPMSAAw5Tyxo+K+z5oVgBBjjmZKhDlN7GaaBY1EcpZeQQ89GQC5vBNkYCTPHQAZmHdqIIxGyqqUNDI5EZk0et9oigbEJpCFghORih1AZHJKrDAiEmHjCPXQIutLpoMJENmxoSEpsetMclM7AWTyLbWWYZo0sRBbIy6m2HY1sAc0VNIRvHeQQNEQ8jAmx75yjSmpqgNLCI8vL+6dnf3YZz/ze1/7OgAExmk3Enrk0pKLDKoGjsE5y5D3CaPicLU/PplMaHd102+G5XyxPKrHfr9YwpP3L+qsXVtXjQ8sKhCqmUxRr7epn2aTJDPMMPV77qoBYJd0c91D026HTG1nFubzWeia1ofK19VyJd4RuMXRqjtZuVk1O7mlORPxYrl89uS6blx32vX7rWeeNXe0T3E3pe2Tt97/bdk/aKl641Ofevr0sVvUs7OjcLTyrto9W4/b69uvv87NvBepQuWmePHeB8++856OUC3n7Utn9emqrroaiDIAVlx3CqppTxIRWshiGUG8GJphvxua+dJA0RGqVq4eLq/yPpkPzgch4rCc1n3e+k+uXvj6/tFONAM7Ih/YTAFZ1HwVgHEQE8OXzm6/cHZru9k2zlXO9dfTR1555c/+e79ydu9kUgxUs7qcaJguuGp9PY8p6tijmUOSOGgUEowDJskSUYi1W4b2hDEnjGkaYcqK2TFWVsUxoUIfIzB6gztHt2T0Y//k6sl4C0xDz66VyfYmbj77kz/zk3/3N/4JVq1FUsp+HnzlwTlFoK5qGscchLyJwTBqPwZyeZoAVTErWdPOaFYLe9kIjtmJpF12rTZLN8SURx02pFeqvXpqsPJhMdNQgUE9m0FKUWyKSQF4PsNbS7hzzkdzEEvbCaYYFGuXtN/tLy9l7Lt2Do40iY1br4NBMtTsnHGrdZXyEIhmzBijxGkzDKldvnD/5fIsSCIEgAAEilpMEJDYgJEtG6JhcQ4djGEAygc/AVoxD6CaHfIWpUoNDrAVRQXQA1cVgcqmpBClxQoj6RATKjkONSgNS0wEYqBI6E3MUAFEJAqktNsg1wBMzpfOcjKr2NWhrauu65bzxXHXLmfzZVfPQlP7qiLnvOfCgbVDBxVCsK/+3u+9+/237t4+v7pe77c76yWmWFXu7NbqaNFWtTOJCFiFwES+qWKKAOCAO3+UzQzyrGqSDLXvpoRDjvNqAajAUfLkuZGUlQZCIHWqIjkZqPNOBUyh4qCKSZK6FpuTCZiInHfecXAOkRwHAFJAR47IYUELgTlnRCRZSqE7oBE5K1vjYuRAckwApmAhsCraITAlB4wyFsEAGMmIQMETJ0lqwASiwkjmFDxojoDcLhbTuA0Z795/cbcdH9xcvnY+b2cY05Am4RACwq6/qRo/q0LWDIr9NHjnvaNhvHLcztvVsN+N+aLt2vOT1eOLtyaamvs/aujQcfGimSkegL4AzwUJUH2OoQI10QJHBXuu/hU1BUxNsgoJsiIIoAIyYomV2eFLh4jkkLgYRgyAyJ4LSVAowmBmKkWp+YGKREBEJLlUrBkWslD5+QPuqMzXCq+HoRhtiBBJLYNpoUUXattBR9EMTMxcustM1UyZSE0RQKSYrcoOnwumqEC0ERGRoFCaAPhgmzNEfJ4kMiQuqT7JRSKR50nDw0oFRCo9G2oC2QA9O9WDZ4uATFXQ9CBwlDwgFg46IRbkOBxKUBDBkJ6js4nMVEWIuegfVCSJokkJqoqhHoScLL4+ULgd+wihnS+vv/XdH753rw11P5bzi6XPFdDAjNgRHE6CmZgqgANTx15NTI25MjURJWIrjMuSXCOfcwYwZldCfcXdyuSKZKQFayYCWNxqBd5UvjakksBQSRENyQFATslXtUkmLBdaDxFKtawZAJz3KsLsJWfVbERqRoXpCMRcgEd0YPlJcSEBEKKhgolMhkTsVXL5y+yQOOSUQEGzOF+ZCpgRHOrZ1MyxMzNHHtGlHImdqRBT6SWwAjNnKbgpRJb0/+cqqtkQckr7+dFy029VY8rPRCxDJt+wq8Es5+TRMts+pmkaqqryVb2PWSQpeVEJoSEBEm+jf/3ex3/mU5914zWSikI/Sdt0pycvAJoBnK5m43Az7DfL5WlTnT67fpbcsN7286pbT1NXz9ZZN/tpMWsM0NeL+WL57PJpRcFXzdKHPGwdG+aRJX3hi1+gihazNj26AUIH9iOf/eH23sn7X/nax9648+Ti5lf+R//bmquvfPnb2dcQYHZUo0zzk2PC9O2v/9q9z3zq7IWfA7C/8h/9tf/s//YfH6/uYbJ61iLhuw8eRuOLp70Z3jo7dZT+7j/8m3/+L/xPqrpVmJIlBvuFH//pePnsm1/9omRx7Ou6zSld31y9/sqnn37z929u+mFn5+fzy0dPfV0zu3XfI7BzrKZ1Pauaus95UJGBpfJM7fVmSrKtqsuuaW+dNP1ue143P/WJn/qbv/GrwcCVMcsUj45XaGdv3P5MV62349XuvccvvPByszrmwDklDh4g7/u+Dc2m3/raO8GYVFB85btZA30ah2G/2TYhJPBDHlzAk5OzIe3EUlaZxgQ5Vw1LTNOwq+fu059+7b/8u/9gfnw+O5oTUnAVcg3ogwvEQBQcKrLzPhAKCO6nzXc//OIn7v2Eq84PE498qBhAItVshKGaZ6fZgGTKtGdwzFXfD13lfA0i17/5j/9raNqPvPoJwlUj+Nmf/tO//c/+tvNcLZyMwceUcr/th6pq3vnw5qU3Tqd+37Rdg8fjGMes7HXRVrJ7VLc20PmF1tMIOaNzQOTEzFUBhmnYHFxF9z/10nvfebj58NJ7NGbE6v4Lp2PfP7655DbUTX19fXF3eXu2OuvXA3gjri83k68Chfx733hr3S/e2oB/Orw0v/f25369pZMpj7gfZg4W8/aDD5/UznVVRROv6gZUsSJvaoDJCxpKFhf8IU9bHuWEWSynzKWJw6DUJoJxuSmrAjOBGhkwkBlUhI6dmCKhDz4D5iigxQCOCBCcM4CctDQVawREICtV1cbkRLOauOBEFFRGFfV857MfgzaoKHrYr3sC7Lyvat96GieYbi4TmE62ftyDn1XLW7pHL5C8627dy+s9Vc3u6oaCC1UXszbN+ZAFYJIMztd1yzZcTcPF7dUtvXr7Yn1z6/U/FtFHyUwwTD0Akm8ApNxJGdRVluMAPEN1+xQhgSKbUtXM+tH6cZi3s+3lZjbvpnFYrWr3gMaoRZBXA1OqfBfd+NJLL33n+9/ipjITCDj1UztrppQIckC/HYfaL/I4tWF+vbma3zrNYICGXgVTysAQ0GEGaJsmDuM4xpCALaNq532Vqzt3Xt9uL1Gsq0Jo2s0wbvv9+ens4dNHJ2cv1pyGbCyIhkMaFXTeMpqAY3EVosvRPFS3z+5hhuv3L2IcZJK6cbPV7PJy23SzdJiH0N3zO08ePh7HSYLnegbeI1pVV4I59qMDdzw/X+8v+s3Wc/jg+iZSBEUmZK4QXco5x3R4JFRh2idVU1Vk54B/MEUBlZLZVrAyZSJfm6V+P+yHaGi+YldZynBc397v17XzUcb9blCDetFmBgJEKy/nLCF59Pupv5mGZRdUppRHQkmSnQ8EBDaaZTGNQ2KDaHHQoe9H5xkRAjGa06wE5ggJbUq9abp9fksJnrx/sdlNIPlkvgqe+2yb7dqzRwQAiTFzCIAKhgqQ8gSGXLcpT+M4Qs53b59i76iPq/PTaydvPni7WjhUsSyMlAGHnCxnNVDVZd2cLRbS7wVsL+liu4HgyJE3wphBDTSrJY9sOYfgyQCJLSURQTMG48rtpjSN0/3jY1ZJ4zTl7Bx2XUigIsY5CyAzmaEhKmomM8cgziElJaTGDIghp9E5QvMpCTrvmSRPxUmeJTnnBScjEVNiIB/UhBkdVSlFMHMctFAzTRgCgAcEtQSGjoOYUplDmokoO3C+Spp9aKc4gInkiV3H6ABFQY0U2RkkIgVgosDok2T2wSCzQ8uWbG+YRLL3TkxzngCUELJEUGN2wCwSATKzM0NT9uxjFiIk59IYRcQBqamvOfiw6/vgvWbJSbPCfD6btZzTOPZ757hpqgwavE+ikHLMCT0FDPtduqD1nRfvllUQcwqV3+3GmA2Up6TJRAXUiBHRExIGN1+ubt/56GcjuNOzs7uvLHK/A+xV87DdowVnTMJVNau7ehxGIHKNA+eJOed1pUKWG44zVgcck1Y+TjmGAEqpHzL0dTc/O647lk3/9CEh516vjT6McRonQhaJi6MlMV5frKkKJ/dvC8Dq1vm8Wzx7771486SCbZ42L3zy5X7ZDTKqRqO5WyyPz0/zen013pzePZufrMYxJkCQPDy56G92OfOAGu4c+fOjer5snQ+ixA7dnN1cbPKucaQp74iBg9tt+wzk6lDX3nBCR059vOm3u6eaJvTVOPQUSLHDJN/5Z791+c4f/tTd+6/q+devnj4a9+8N6yHlyMAqIOAcVS4sTxfnJ6fnvtbthm7ijGrb5fsvfuQX/9yfXZ6szHLlcd7M4qBixuoInWGFVIvuSNOUoomBEmIwUIFsDnxdR/W7aQd5IkyOKoJAwChxGnemSK4C5NB0TVNtd3G5WoWmubz8vqGczxLqztdzJac8vfLa7X8Xfv5v/No/7bolLVejs3bWcB3EGc86zRKzkGfb9S5FuF4rdqqmIq6hdlFTwCRCyZGY9Xl9vUlx6FZNs/LoicxbD6DsvKuOOlhVvGr3UbOaGnpmH9g6Z87XZ6d064iWS8iptpz6nY0TjwP0N3Ho07ZvAvMw5N77qkEBzQYME0X0oannk06MQFkgC+Ro43bZLZrzFx6/+/7zrXDZICuZEqGBKioAEjJgwY0IGjCYKooBoDNyZTdDpohY9peKxUAEGZ93kVkh6+rhQQIHpYiAtGzDEK1gaQ1KwsYAuRhHEKDUnqOaaQYwg6yQUxJNIiOSMwE1Lj3mHhHhynnng6sqX1f1bL6cdaumW4Sq5ary3ntfVaEmYOeIAG7Wz56+/+7R0Vwj7De9ZE0g82V9fr46WS0kJS3XwnlDFsOcc1Jr6uA9T1mQfe28aAZTAzW0RTtjA7UkaQ+Gk+acx8ozAtQ+bPsNeDJBdE41IaGYAqKvWvanEBrHVnnvmRx7A2Ny7JyVsiciBcMyGjg4zY2cQ0RGJiIRUTF2wM5jgYWaaREhVFWknG9ipkMvHAkoABb4Y7ZkYIjoyRsqe09mChkdB6pU1ACcq2TShvwrr7zy9lvpWw8vP3638oq+qibZO2LnHaMfcoopdr7t/GKcRk8eSQEiuXq5POEREHS3HxuSzbM3rT2qz17MVhEiomIJURXd42AiKv4ZKJpkeWyVj3VQcYozTQ9pJzEhU4LiXJHC6HmeNiImQnRErhCLEfAHUtHBsIMIpd4BSA+18QSm/0pbXElPmhHknAioSCRFvSIsRCrD51Dj4oShIgAhIxV/0CGlCQejUKmBIzMpsGot7VeHujQs/yQ6KDUiucg1JWgGRKr5ABPSQwPJ4fwgAhUaNx66u0zVlBwjsVpRnkAlAaKYGIDmxEyF/k2FnQzwvDaudBA8VyU0E5KoETMhiOZylVSVEOl5TSEyAWCRlDRnRGBiM1NTx4zoAM0RBocxTeywj4M6ubXyfTIFYkQrl5uIELNkxvKqAI4AgOxgPMOcY3GhqVo5BwbiyGXNxTYkOZb0oYJQ+VBEACg/qEVEAEItNiVCAyAmlQxlRIYIoKXhLufMzICQcwy+kpxV1cAcMTn6wTfyYH1DKYdppYIXWUUUlKAcrSIaEJqWPASZZjNgZtEMCMRe7eAtUM0lPQam5LyBISMIZtXi50JiRk4axQQsIToDMzyg20SViUE1y+HBqZYI/6gH8CAVAcSYpyS7fhdVaddfRxvq5sxUszk0Nom+PrOMC87js2umLGnX99eGJjqN+wGZDV1lHLN+/BOf/dN//H8Qdw/M42K+UEAmp2knoApVN5vlPJhZSgJZL/uHy9OmIggjZYnjmKe0N8Ll6jiOGyNyyLthqEJnY95tk5NhuL6cH9Wntxa/9YXP7WXa9hqkl2EShF/4xV/81ne+/Z13f3f/4JJuLz/24kvT9km48/JHPvuj/+Ebf+W3/sHfcMir4/OHH77Jvga6+fbv/tftn7y9aF75zE/+Wz/5L9/+9lv/8PYrpxePYpqm+ew0jHEY1r7zvoZhct9/8P5/8zf+87/0H/7VcZ/GffLVbHV0/PpHXv/GH3yRfWu7fi+SJNk4PXjv6z/xQy995913H1yOI9JqcYs4pe2u3+2FyTdhGC/Nru7fr7cDvPdkcomvbvpFp6vjuWjc7R8rzFq/iPuUE7760Tc+c/2xL3/56xV6VDtdLu+e3PvJT//ikwfv32xvXn/5pev99c365uTWUd263SYjIJOOU9wPMMTx7PQusuS89xUnySnHHMdZN1utFrWjR0+eTZBn2rUYrvr3fTh25CbMyCSqCYB97UL1L770uaPj8yheCbgJvp4RBCRXRE8i55w3BCAXxcC8a44+86lfbsGR96rZgICAD3ljQPKaIyAhiQ9VGrXpVnG39oFD1ZoMahY6jtv0j//uf/kLv/hLf+ynf1amcPbKa7/yF//Xj9759r/84tevto+oXpwvT66uLsUr9vnq4ZNl62UYq0XLzu/HSaZY17XLE+yeVEFuH336UhqPE0JvSEkmtGzjIBafr4H49HITKn7x3tG7732ontfT1XA9to6bNhyt2kVzbtlv9/0Yx1XbbC56Rnt6sa5n+N3hUc7rH33th37iJ3+Ynj7bv7WYL+78+r/84taAqvDs+iqqUJZ+m478jF07DVtidC6UbgEDdgEFShMBOO9EVCUzk/cVIsaUSng+qxGgAjCBZEUGNEUiVBBRA1PJhABE2VAAfFVZTsRUsY8xE4H3VcKJ2YNYFHHMaJYko6pChsLmQATGAfOt11/R4+Woyrse9tujk6W3CQAp0+bdx7s0MrhxmEIbqG58E1xVJ9T67r1nj99F6lx328cLSSRDcs4m2Qr0CAQyec+WM5oxCZnBkGy8ePbwSeTrsxfut/Ozm6stI5sgI9ShZeQYs2YaRTx1LeeG6z4PFmPwTfSUJ3DOC46u9qFr05N11dTjlIKzW6eL3cMdIKUUc85MPE2ZKvfmw8ef+OEf+fyXvxyIuvPOLzDJ5DpnxMO4q2qPLFVgVeYUnAGQoikxEeAkCTiQd107G+I4pb6dHU0WnffW1hfX1z9652cbbtK0bpvZOG6aZfvowROG+nJtXffy3duvPnr3i622m8t+Pwx1s7i4uZIZmsmwXc/cK2984qPf/fZXk8LFzeUnXnxpiFezeeeP+Hp9mXdryKLTBKQYbpH3jy+vJwQxZjXXctU1mi14iEhJsxpOlhTVEKGmb7z3tmBQB1xVouz8gXpaVsE09MH7nJLP5uuCxhG00rRKpf6Uy0AMDA0Y0fLgnY8KbcW+rZ9dXMdBxt00Wy4UaAIAAku9QRVCN2z33kGKE5I7WcyebG/qELJCXc81ewBa1m6KCTEvm5YdXDBf7nY+OHaQ02QGKSbvg0bNcUDs2Ies9HSzNssOaFU3T64vb58uq2r24PH7bV1v9z1RXUuFEwOJsQvVPGmUbHkaCB1kFVUDSDn5ZtaF+bj3H3/hIzFKnDe/8aXfXJ6vdrvBLHliUI05GrkoKVTtvF6creY2Td7x1dDvpx4JTBJymHKuAapQZZDGVyqULKtiyhFBqhDQDFD7LJb13Xc/fHl5ctJWU78BBVMOjqFwTIhLtEPRCFWzGJoSqSEIMnvTUSyBKQJxMMOYMQKiAAE6gGyasqmr5gnMIzA5UGFwCprMQBkUEJ1HkJTQRjMQSEx+zLvgvMdW8mCGjl22bJZA0TIQqFgyAKLKOyQgSD1YNaaBCZg9WSYCgYkNLUewkA9FtgSIY+7rUA/TnjmwoRiVFg8ARbLAXrOV0lhCyJKQCYCTioOA7NFMcgp1pd6SYeUrdqGqKsta5HRTRINh2LLnpuW0NzFJJkZ+tCwS0agJobzf1z5M/Xjx+LKsAmaeUtrvB4nZASSZkDFGqV0gYmKasjo/u3v28nJxcvf+naOZBxB21bTdd6G2IIsFO3Wr1TzMOnTm6xa8B8N+2zvPi5ryNKbrNRGcrOr9Zppk39Y2a6psttvvLPaL+fFqZgF30k+rwLtdj5IDuTnKmCfL4tia9bPtpr+FjDtMT76H3q2J9wwG4MmqZVV5nC6uKMb6fHXy8ovQHkno8iZdPXjc+nZxcpoZQ1dBP67ffSc+fSrj+Ozipjm9NTs/qVerxoU2VDViW63ipGm4zpp8XfsqoIjp2Gc2B1ZVkVmmMQ37ytfDoDhEEmQXEkASreuVDvKtL/6Lm+99c+YAE9/FcHtxP581T2C3xvxMhjGNKUM3mzkOyFlF14+e1X3/mgsLrpPXj73x8Rdevj/lPg09oK2HrYkikfdO+61NouoDUZ7EUjLErDBBzgnRvAtumHpTyRNWSN6xZBMdk0bL6NzcNSFJ732FFB0DYUcV13Q6PdHHl9+jZrx1O3ATC6cUwL32xmt/Ztz85h98a8hMhi45X9UWWDAwG2BKY58u1umql2uR2jB4C6QVWV0nRcwGmPaX+3Q9jNvBNGfhKTuagVGektWdq5tQrWpdzoScl2SaxzHFzT710VWVOz2hO7eBHSTFccr7re22ElNCSKgpK6qkPofFwvkqj2Ot4Jr55KuAoIlVdhwHiJPLZmJpzCTUndzeXd98+OCdwyZZ7eDpKHhhNRN4Di1SRSyJIDyYGdBK/w6WPcvB+GwopoaHgqmyCdbnniEGK1VQxVV02LjogXsCYAhIhR2MhqaYTRCYEKjEbIxKXA3sADYmM0sKWuITZRNW4htpGAtOibx/4th5X4W6cVXd1FUV5l23WB2dzI4X68uL733tzYq77f4Gsg77PSCenM1eevEUzUwzs+PgDC2gZwrr/ZbIatc612RJSOyCB0BTmUZx7IMLKBlgr2mouTbDUUfPtaeqj3uJ0VNAbqIM0xQJTYGQKYkxdy5UguCQHTkm8AeQOQMJkTMEBWHisvFmAFNTBWYAk4OT34rthrImOvwUEvPBQcWASEWbQyQojjHNYlk0IlLwAbSQHZ1qAjIwzVogVojIhcPctK1MgyG+8JHXvvHN7Ydbe2m1QpsMRueBqZqGiKSL2fFuv64omKrR1LVNjJpSVDQkj6JM2i2xy/TkwTe4adzyrmAhEVMxF8EhlQOHgFWpgxCx5/S7osEc8lVFJJIsmp2JSAR8roIgIDokRkImT0RE/nAasCCuD0DfYlkhRATOkpERgYpNg7CoDwfOcKmmMhEuxjrVAjmCg8up5PgPkbGD/eV5bMsOaczn4ldpWjt0lhMil0gUMROSaC5rTlQADmQr1WK8KqvMwMBUymVWE0QkdCZysLcgOHIqIjkjYin6QCRQU81ITkWICYHNikBhjopcYAXcfuDVkzMzBENAVVNTPHxUY2TVLEVDMzSAUlRfOEpWOjLoIIoxM1gm4lJkUf6vAjh2eZoYjdgZ0XqIr3cNoRow2CE0qlm0yDmmB/np+Tck50REqurYA6BKZg4qCqBGwOjgAPi2ItWBKTAzOzUg9JKlINdKiK+skZwPuOhDQ73KQQzPEdlTWYyA3rk0TUxcChbNQCQ6dkCsWUyRXTAVRHBEuSi2bIAHZxmWq09QKEtEjrn0zSkxq4JpVoFDNx4IAEjOzC5LqT0xBEWwUpcJQGqaJVkJmZoxsWhSzZCBmZEAgUWTITE701wsc8+VoudSkYpUTZjEiVLOOUtWsHJaq7AahrUj77kSGWLKx6v50N+MmgEGQBJJBEpZCQ2QX7732i/89M+N+w8l9UxVUh32Ux3g5Gw1btZ13ZjZuNsaQ910OSUGJEeX2y0Hbzm3oRJAABqnqZyEYdxT9kgkKRpOR7OmcyuE/PbDD7/1+Ons+AinjCl6z7/wUz/25je+/v6j9yfo75yfv/76q6+88pFmddYcn96bn87p1h9+7nPT9onut8O6F8juKL3/3V9vm/a1j/3y2f2f/Ut/6X/1j/7+6ee/9DdXrk4OBadn62sfwvnxSR7GtM9Yd+998M7vff43P/kjf8yHeop5v9+dnpzMZqvr/XUUFckEejRfXj29QCerivxR9+yifwr7O6cVTMkITKVdNAKuH7btrCLg83mTBwmzSkA2AwLKxfXj1XJ2+9Y0m8/TwGn3/s/+8MvvfOXbT/v40r3zT5y9fv/s9pMP35zy/oXj+SKE737/6c/8xM+C5b4fpv0g44QVro5ObXJp/czVIJrny6UVNZi4qmoi3m1v1toDqjOvivtp14SFKuRykwLNU0o5GWA/yv5hnM3aEEJGJq4NgyGHUAExADAHcs7AAJi9A80Ixuw8YcxZDcAUwQhRNBePIhIXcnvXLSdyu6xVN99vbwJ7MU3DkFMyU/bD5/7F37m6+vDHfuQnG19z5T75mc+e3n7ty7/9hfe//wdT3s+7eeUgyjRu1yzdYrlUokkSVU0GP+wH3Q1V6PP0/UU3XeunhasslpK0LVOkum0gHR4JX/7cN2uqlfTNNx9WlR8xa5808tDj8ax++tbTV+7fS2l6ctM3zNcXfVU12aYX7t+5XN/cfenek3eunr3/9S/1b83duV984u/8sy+tQxXzxJhVFAOnlCVbh7QbjQymJCkBgZYZRwGelTemfoyFZRfzoWMAEY2gYPVKIt0MjJ5P4UwJgRwhgogxEjOnnBWQUNjUBKJEBEPmmEZ2TKgESM6BKZgF57Ew6RDIU8ySiF7+73xqShld5t21DcNi2W4fPaqrsN3uZrNlAO1japetclger6akt+/f3a7HzdVgvnZde3R2fzv2SqxmEvuOVqkffFjGPKiOAspt6MfYOBg2o0roRwCqLI2Pv/vl9iOfrWYn1F+3wWPOQQE9Jul9ICTnW4DrzbhOjWNSoDg6GAh13G5JkvQ7h4iE+2Fom2ocUucJU0IXwKDyjkAk7jzh5aObD5x75e7ZltMYo4B4MxPL474ytiQqqsiha4b1Mx6yqngiiQIOLHMWRZVdvKbATTvLOTZVkCSp1665l5QuhwuiuLuKyDhs+qarBSZHnqD67gffuXt86rch9X0bpsGmbl4zmlluG5iuvv+9rz/TKdW1v/fSy+ubp5MM2+ubWbdwTmrm7nR5fX1z+/49wdkHV49WVe2CYwAxeet771xv1reOFpttf/LCix88e6t17unVBYG0bfjDt99BVyFSqAgdS5YUhwb1+cMVxv3QNbN2Ns9pEjUVIdQsmckBAPHzyRFQVgNyfd9nSeQrYD+Jbda7MaXahVm3VIXddo8Os4qrZ1i1SXI9q0kSIw8p9rYjFe9D1TT9dufRo+aYh6ziicmYxB3VdYw6pkSE2YhCAM1ZTVRd66KmGLOCiSWP7BX1Kh7ZDInSmI/rlYkGDpXzvKhRcZhS1MlwN8Zt62susB41NSVPdTcLrt1eTPc/cu/W2d03H77z+Td/f3G2moaeEdEoZctTJu8S0ASwmrVHbUNo/ThktJvNlhw7ZEZicpmiKRhS1MljBSpZBuc6xygimhNFwYbqgM+uNsez+fmiibmfYnTognN12yaEDIaSFdG5AAZZonddymbmCMlMc5oQzTsWMwJIWYkguCZJZGQyNCPHniBkBSRgcirG5Ak8UjZAY1TVrDm4AIoAjhlT3nqkAMRZ2dGUE2ESyYBcNnCOkRlzFhDP1iZNWSODgZjHCiwBCEAWzQZARMAUo3l2yJZyjyyOvCoG1xTrehI1U0Yr0YDynq+gqEBAjkLZLzofENBEfPAISsyAIbTt7btnQ5zUxJHuNrv9Htq2C1WVNVtkhKoOdUx5mibBaTlfoHgVMHOGmlQJJCXV9XVZBVlzLlNWBE05VC6bBWYfOCU1oTuL0zc+8kMfuf3y/fPjs7POke7XGxeAZq1sVPvchWa+XK5WS3E+5ZHJKDS7zd5XM6w0x0kzYMjBhXGicZya1S239G7Zxf0gz67ccr48P+VbK4JMsypu+14SqApZRSocHWOKky9cSxE0ZDUy887MEhMyYtrE5nTRzjpazsJyJe2Ra4+O5ovp2YdpmE5futsdH/UEcT/IxSU+u4hPtzcXl4vj26tXXvRNy5MFJhRQpP24BzMklZRg0jxtfdDAjhG5nu1HqZzrIIz9CBtNW3XGiLrd3JgLppXs0ttf+ML1m9+8fTa3OKWUIBsD6DQx5tsOXiGvyMmBDNJP62ncg8GLQhU3x209TWM0+8Y///UNpM/+936hq33pWc85k++QarMp9z1opd6RkoFTBoGYkVIVGNh5rkhpsNC0IEiswJCm5Dm40JZaQtCcZKhaV6+cjE6up7i/mbchpLubyw/bqm+6WgFJWSqMwT72w6+8/d6jb71/4UKDBkmsni0NqXJIaQKZYr9PQ1K1FCOChVnlulrJ5X0PMU3DNO4jFe3YNI2jBaoXc6wdV+jq0CznykEMZIzUD7De6rPLfLM3A7WGq1ZFvUkIaFkhmQIrGVWgIEVQNmTQpOuLCsFnNUR0QN5Nsa80Yxy9Jo+oSTQpdXObL/rrhzbtDoIpgoCV2MshRlYwvCoGVCwYYGVjRs+tDCWGowSgRV8q5iTDg+wEaIeqbzSBQzAKtFRuFUmKDsijw0YSixmVD3AXLA4G01LcrqBqBwajqIFyFgNFRDAxxAPzRQUAHQogQkxCpAjJ+YiwnjdVFXb+tuZATzaXq9nx0dHdq6sbIrxZ39RNOD8/OT6ZAZBIDpXPhoyF1OfXNzfE3LUdACRNIqlqajOdxgEkO3aELsfJkzl2yAGMxTIiGeAwpgPOiZ1ZBhMCDFSpKpoD8hw69MzkgnPeOWJAdkQMhM45I3qe4HGEBgBMqGiIBIhZUrFruUIRJTRSFa1cFfNYHCqOffEJ55wKxqjYyICMkRENgZicoYKqajQTNERAR4GKPIIuxsE5h8jMGHe7VVu/8drrb3376xU0Z7Ma4xQl1g3v99um6fajhKqyHC1rAuCowTWaRkfcVLOrq0sjSwqM3OG0fvdry1drXZyJAYPiQbgBAzq40cprMMABia7wA6T1AWhUsF2lMEyEnAEoIpQmOCQuoTMmX0xAZVdfpCn8gQEHwMxElZ4LVGpWyD/EDvEQolLNCu55PrvoQkVGK7geLVqo6kGaEVFALCWABxASgoE9rx0kfC78qCZARYISR1JTNSkKbZm7qCoQShZ2DhG1AL0PeKLyi/IczFQ2X+UcZUMlpue2PlE0x05URRIh6WGh0mH5F0i4KvIBeFQOpiQhgKjIXkUXMxXRRMymWtjPpmpWcIaGiHrwiCkAMCOAlu8eGogps5Os5IkRUU2zOqScQbPpmKBhQBRRAGXHhVOFiGqZiYu5zAycIyBWM2LW0hbIpJjZkYomiQjEVALmBAjPIV9ajlbBiAmK7woOZx7BgEhNNVnhe5oBs0OwbEIISGwqpiogxF4ku+DABCAToZqaKnH5apVXHcumqggEOSXHDGiaE5Mv5w1VEA+6oZkaoKgAEZX8rioRSc7Mvlxh9l61VFAaMZJjLWQy4qxK5dVUU9ZECEQMiHaILCqU+hogJDoQ3PFfl4qyybAbEByDd4bz9mSIa0x7ByNLar2l7PbTDalxRXFMQDlNfZx6x07GzOiD4yHDqy9/5pd+/lfSfutAbh3fiimBYtt4R7Dd9fOm88E1wT1+Jtw2vqLG+c027Tf9brPjNgBhGtJstkK03bBdHC22/bprW0R3eXW9WizaBqTfYU6zk/DF33+zOz1mx3J1dev+8vzVl9/+1neePL0Stf/0//Qf/53/+3/z9Gp965X25M4ru300hZsePv2JP/+dL/0Xy/vNza7eDtk51NG9/fufP+lOm+Z2uNX9mT/3H9zsd8P1Nx4/ffjO060Cn7TNcRPEuT1OVaBpbH/tn/63d174yPmde8Wztzo+/aEf/vjvfumLwXkViEk3Y0zsLzb7WdDz00VOdn1109vy9vni8bMnJHZ9NRkCUH1zPXkgD3a0qJLRZiv73vZjXtYNIl58+O69ey9nafp9f3T3xT/3S7/8//wnv/Fv/vwvvzGfvfP4/Sb4e3fvnN8++Yf/+Dd/9rM/19WZmtCvN0fLZcqJKygNy7dvndysH61Wt8A5jRkR6qaOmh5++JCMCLHrjvr9qKKbceurAKZjjGoGgHGMU05+0SXAerYS55DZVY1rOkKPyMzeynSHCYEPN0kzRJYUyYrV1Tw7BQMRNAJ0CDlLBjDvmknEELjqugXEHdRVneNo2aqqHoa9ATQ12qRf/I2/9+F3v/6X/5f/SZ/Qgl+9dPcX/t0/8/bX7//Bb//Wsw/eqZZV2u9Pbh07oetHV2qrxbK52PfsgpnfbuR41XRdu33v92fNpjr/kZFnznulnF1Djcu6Lqvg7Ph4u90bu+PZTHbTejPeWjRQ+yb4m8vx5OzWww8uqKE7L9xqNe5u+nm3utyNTx5fDdP68Qc5T9tXT29/4vXjv/s3v/DWB/b25Rq9kyjTFAMzJiAm58jAtsPg1DBUgyQHwoSAZM91IGQsHlM1RcdoULyhyChmB6n3ANZTAEBHZR4Bhs95dIpJynQOU2Yw9k5VVXQ0BGKLExyGEWwiRSxXsUCkpoZoiGIcwcX9VX6yb2+tLMZhLbPlUU7W1HU3n3Psq/YkJ6nm1fLlN2LyoyXAy9rFPPQp5/nyaCQA54icyVi19c3Nw1DNzLGNws6Td5KYiWrSq6FfP5U85FC7zeOHs/OnEmYxDW3wKQ+SLhlqhkQas06hI4U0bfZ1s2izbvbbZbdIvuqn2M3rsZ+Wq+VFdUEAu/WAgbu2Do4TICOGqsp5ZBzvv/yxt975UDHcubt4/M63utPb+yHlSXwyyJF5Me1HcCwAKV91TRXqMIy4HwbX1ADKnjGjCSRJNaPGpJonc4HY4vTSR97ww42G2DZhv4m73W7hz8iha6p7t+4+ffvDBVmoFg8/eN9SrCo9XiyudrthPTSVUxXnQ3+9q7tGRrnOl3G/76rw8t3b7z+8Pl3dGzebXUoZxIbx8fayrSz1EbOww3EfaVLG2tXeJ7x4/KgD82KzeReHIU5p6rMnn6MwAqDmPDpHftbpc46jZlER7zBH0yxxnOquYWIkAiuONgTVEhP3IUgcK18rmCc0kTROrZtzXe9unrl6LugqxxUbYTtktTyEUE+5D67jnAuSq6Hc76/apsn7CQyjTZNkoAYMr/aPV4vj42bWU58kJoDlcrkbdkdNuFxvQ+CoEvsECsjU1O3cs2dnBNMQmbln/+z6UhH2w/Xt4yMK6isvhtMwBbcMvlIdFM2jz5q5DuMku2m4f//+nVdffQjDl599EGp3s9sFpjwO3gfL2QiUnaDNuvmirVCSSNSA19tewUzEe4eqlqNHQDZBSxYdekRrQgVmjp0DDYyZqkR4+ew6XeWPvfyit37IGdEFx7UnIlAteXU1AzMyFe/9JD2gkc3ISOFAuUQkZTZAy2iGohkgOa5Vk4EqZOY25ck5TjohVaaJAQiNUA3ESCoXVLVgahEpuLZ03JlBjDtAIw4Sk6A4Lm/SkCAZqMmY44UBeAcxTuwYxAH6bNGxz2lgcgIJAD0HBxCBDJ3nxkwlq3duyjtH7JmzKaEi0nOUY0EjUWkbMTXyLQKIqa/qwm4kR/v9ROjayvfjOPXRVPf7MQHmpHU1UgjzxdJMwEiBh2l05NMwoQIq9Hn0TZszZsuVZ832fHiWUrR+ipYhOELnWHJU203R8Ryl++jrP/bqrbsv3Lrz4r0XMGSwuGzaPIwRICftumU9X63unPjgBCmkKqZs6puapRYBYam5cuBDmJSGZGTdnRfCSSOs6PZ6PbaL7vjunRwCSI+Oxn6oZzWo+K7VYeiOOxbdPrkZxqjE7TwQQb8ZoyT25Ig8Y6gbAb+8e8cvlu78uL7zQqSqmS/Tdnj24YNmdlQvlptd77p6bvLswycwJTPIkfz5SXW85Cq0vmpDhUgpKlMEE3J1NZvnnCUPwWB3sRa39BRW82b35MPLh4+8Nh5nsI3jMICXRJbU1eC++Vu/oe+/c2fe6ZREpllVKVtOqpoDkE1JUjTQZJZFTpFVgyIRo2GM274SmAcmrB785hfcxe6nf/HnBoJmFphCHuKuH5mpCq1kSHFSMXA0mligqmlbrvJuGq+2ntCiGSbJgsSubppQG2rKmTGi5qqG+exIaM+BAWjeuSiCzNjMP3gvXz/dgoznd6uq1dHiKCTUfOLTP/T2g9/ZmQ79XnPPS+ec9Ou9yKjTMG6HHFHVdJy8IWqN2SjHdLOVftrvJ0NGsDQlAlUTm9XeVdzVVRWCr8Q7ZBeAdtebdHGzeXKZd4Ol6OvatxUhVJIgJYvEU8zZkJ2rPR614gl2A+z3teNMitONIBv6SWAiGNECSYAJefKghFVyUZqqnx/vnn4I62c/qH5iR6AixSxQWuRFXVZyygioglCIvABYKqGKeUjUkiGZZUNSMGMEMVPFEngoAhKyqRrQwZZhUiq77blhpLBcvUdiBDIqTF8kBRAzBS0gWC2dPsoohsZi9DyfVDZWB9Y+lTpGU4TDHhyBQICRNCE7sizO6dm9Vz77w//Gb3/uS+8++NW07duuvv/S7a715BDVlvNFTtp4X1VV1vz45lEX5lU1q5tmN2yJOXBIMo15ZGQg9nWV4shOEYAdq3BK4yTR+05FkuTKN8qJCFJOFZOByxkMGDFUs1N1Htkxk/PEjpmZnX9OXC4KRZHVBA0AVA6FJSgmgIjMBEBokkcCD4oFJIIAjJQ1x5wKWIeZfuBnISQDKQqCiipmMWVkIyUkQLBSIg5kIGbinFfLiAamoa5M0+3z8zS98Z3v/2H7+p3T+Uqnm22/DXX38Prpi+f3Kg7CE4A3dMPYaygIoiGJNfMqmYDkoR88uVnq5YOv84sfg8U9ECAmA1TV51JMycTn4t9REdGspiL5QGQudWZY6swQiZAdsmdmcgGJiEvijAgdAuEP2ELFIfQ8gKZmSIhApmIAIuo5ZB2peFosl7ASURE88HkgSMxAilcIEACYHSKLRDNIWQCMwdkPDElgQKgiZsDkDrkzUDMp9i8quLwC8UYqog+AEDkBVRViB2YliIRQGDpiZgRcAOdgWgJ3VqBL7BCoeAazJMBydQHNuMRNRQCQyCExWM45MREiiQjCgWUNqgqHZjfEA4lMJAEAETKSwB9hiwq1HgEJqHi+VAQAnPeiioYqyXFVKvqcc0DofDAx5xyzInPS3AQ+SBgICIxGRAiGqiplQwT/SjGcKhIRkpQ3N83EhEyohCZmYoTl4AEUEdXQRAGo3DGISDUBkqpJVkJSleCrrAkPv3iQ0ZEQtchMWthPz+9jpDkjmlpCYCQylbLMTMUMiNhA2REgmwEYAYLjcOiVLAIXYs5yEGkBVTKAIdOhlQ0J2AMSqGpWBWH2QIe7uCShg+7o1AQL0suASsefHY72sPyRiFkkgwk+B139a1IRBgcjeN9UzdKbXW+v2ANBUhOmADlPMhFYGgdBGKe8223X+02KyRmDYFJrZ8uz2Z1/47N/qkUZQERlm3NO6WR+dLSc77fb6/Uwu9dpGvpkMUai4BxrxevdRWgYmHKyKerZ6vzk+LzfPW5PbUyjryg01eZGfLPoFufO1t0cWMfd5mLdpwG01mEx785un3z+K99MEc5feeUzP/XTTx4/vn179cf/9J9Z3n5NY/SQppRn89lrP/HH/8l/+9c/cn5W1YvtOCahnHPW3cWTr9959VO77Wq2eu3P/sp/8H/+3/z3j1+obx834zykabjp991swaD7G5nEYF7/vb////qf/eW/SuzEOQshYWVKIbj1TS9qyua6qnJ8M2zj5QbYZp273lyN48XRUX11+czpEVUVGnPT5Bw3+0mrbjFfnlQA2wkrnoarfhy8pIcPHnfLl+qqsaF/7aXb/4t//y/ozebdRxcxj6vFykH+/Je/8if+xJ/oZiHtBtsMAWsDqqo2NH6/22OK6lK3vMW+GcdEaqyQIW83N2jatTNVyKIKSoS+6aZpIjYgcwaeXcx2dHKyJ2QXqqZSc8E36CpGj+QM2AyzqQ8ewIpFyABBjZnYuSKCskNTUcmOWC2DGSAX5zIQ1BSmaSAk51peOHQ07K63ce/Nq1HWxAOg2mw1//6jN3//rTdfufdxFpQYXag+8ukfPb5193O/+Y/6Dx8slxh3Yx91vZneXl+8+MJZ1VbTPnVUnd7+6Jg2i6au8dF48R3EPTWvdOc/dJPcPhZTW1NWQWir/ma/76cXz1486dKfePVoSP0ffuP9zbQLobp62ld1Q6qv3Luzfvwweb9eX3NVxTFOka6uplnXvf3h8OWvfePb70zrXQ4z75nXQw9AScyyNrUjwDEZMJIjrWDK5A0gJWYANCAUMDQFokN7pwkrAjAQGJkSmYCKsmM0EMglzhEPtlwRYjHgw8QHgBnNQBSyAKExoBYwLJV+RCLMEUr1gYHuMZscsNp375x4n6pF1ZwtctJqscroTl54cX9zfXW13ozJ1/OuWlx88G49WyVxgq4JrCietCZfze8lZUgDg7ckOUamoDFzMOdw0gTKpkiqMPbjzfvMmRiHcZova8lxe319dAui4zhNhOac76dRklW2j3HyDaloSvnoZGX9jqfIQcdx55AVArjW8qiiTV1NKkBc1bRYdhe7rDHnHOvKI9CDp08M5Pr6kuu2hkpu9m3rYiA1QFdxtdRp7xxntaQyXzVjnKhytW8nA2p5s+vnWJlZBhiLJcfYMiLryh/xOk7TDpEGTFHk9p07m80eHBzNVu+//T2J2UyeXJBbnZ7NVx8++MNp6EPwUGuOKUlcnszRXJqSY65njQ8L0JyV2u7o0dNdG3xX8aquyVV5/8wHf3p6+8MP3uu61TTFLBaa2ag55/Hy+mrZrZJYH3W7yw+ffpgcNpUzEfSUETwgEShi2f8DgIKJmJlL2ZChapridiYmzVlUiRCJRcTIoatcOxexFMWAQtMEizmOmuPJ0WwcxrprDXKaMKodL46vr/oUE4Uwxmmc9l1oHXOcoqaeXEi5VwMM5HwABARrq7lGi32e0th1rWXMQ24wOPCKPKRMhqjW+YBVbRnrtgFV54AFEADqwIQZMXEVnSsP8ZyyWT4/feHi6kNfV2NMhMScV4sVZPrW1978d/67//aj9Yf/76//3qxxlqLmGCeRrAIJFJz3o+TFcrZo6gCoWXY53Yx9VDU0T6Eu1V2gWTKoGpmn1tRyVs+IhMM4BSLw3sB9+OTpEXX3XzxjSdvt1swqDnXwwbkspirsQpkcppwARKXU+JLpmCXyYTxYQA5EpqXrFwwBBJRMM7HLJqhZdCCrAAHBlbkioBPtTQWASkFIlFgxi5oBI4ghkHOWlcwDknMuSmJgRE1pJKeqguSTTgpsyM41BqA6mmZjMiAC78hliwbIyDHugZwZGwSVngBVEhEpKKggoaFmkbITIiJGL2oiQqWrmDmlSUl9aCWNkiU0VNftmNJ6vSew45PF9dXa1Y0ZcuBh6L3hbrMTUfJV2y2laj2AZCGmwAjgETDmJGbsMTwfouVp9OIbDpOqIKQpMRsCkIXarz716id+6N6d2+e37r941wXIYkwck0myOAKSr1d+cX4SVrPyMm1kOYPkDEihYnU4jkPASsUk91jz7NVleOmOWpbYD9NNc3S8vHW3nXV9HNF830+qUDVz8uRmzegYyfKUYbX0gkYgbHEcM4aqZkTVKUYxN5u1t+82926Lq/3JiYSqqRpv+Wr9hKv6zisvh3kbTU3t6sGF9pYnXO+n7s7p4tU7Vrm6aQL5OEoVPLNzDji4yXA7ZYhiveQYg7XjmPO+363fh3FPCZLCOFwjCrJmA3RdyP7qu3/Al++eLD2aTqIOXBYxATHz6A5gWgQAbZgyJgNSSkoQYyLAxs2yZid2i6kFffZ7v/+dqn3xZ38mt+TqyrPGcUTgLCmmBNlyEgnOH62YXE2+v7ySUZg4CSiQI891Reyc80PcKyV01M28D55JgE3Fq6iKIlldoSkk49t3Pv7Ou990zjxvZuZt1mgdhiGuFsfzttnHaBW5mncXl3ZtmLKq5pgsgSsl6MRdGyintOnHmON+VANiloK7FQVGDr5bzNlXoZ6xo+C9iEnMMsT+4mZ48qy/uAExEAVyjnBKAjERgkRxpo6prluo3NA0UjtftRqCpok1BQJQQaRsslPIYKHysttUbKgax42o1Ue3dnU7rR/TEFeLFn6gRvjSDw0iKqVDQ9TQSoF6BgUroTMqrytmCpAMpGwGD0qO2YEvY6SqYOAJEZQQ1ICh4HIRDzmPwy84RsfsXamDRwCQYh4xVMCDcQRAFBAcAhGqAh98CkRQJmEliFa4MGVbjAYIKoCMOamvvOOKKGyvRtB9PcO/9/f//pd+958v5u3yrD27dVp5l5MEZFdX237qmpY5GORhWM/qppktU7aYEoGrXAWSskwk0rarGHPKGUGJDAAnyUmSdxQoqEJKGckV7kyWjGbMLosRsbmKQgfBI5F3ReUgYvd8R1f0IRJVx86RUxVGVCjGG8ym2ZSIqcB0QJ0LhVUMWDgsaESArmCsHTlUMVNAKm2nACgqTAVbc+iSf94ERVDYxJYPFVGFIqyK7LzzTLhbb++f3R1G+c6jDz52e1kr5ThWzp2fnFueetn7EAwMLCFBtlT5ahh2CMy+lpTr0FhUKLbBmyc97MNrDbiV2YEHbWIiBoV1Xa6timk2zYd7CR4awQCAmb0L3teh6pyvyVeOubiBiAnJASAgH4JgB740Fh7XYRUQqUrW7LiIFFRS0sWrA3hweYAVawwgkGk5W1S+eEwIxVyk6UAuPriSgIBUxMBUhZnLbv0HSbaSxoKDd8/UFOwgS8Ghlt5Mc3FWqSof+uO1IHEAkdipSfnKZE3lMADZiPRgv1Jih8BgKpJLdxyVvNXB6WSlJAsPqT0qsSkrZKvSe6qGRFqazqj8AAFAyqkIJgCKwOUwzCDrH3XSF3AWIhPCQclSzSLsy2I3SUkNTCDnFONYLIpS6uTNVE0tIxIRH0xLRGSWJTO4QiI3wkLbZHaqmk0QkNirZjNFdqAKSCICpkUvJOIsqVwzJEMiUAMyzy5rFIlEoUD9QdUM9eCG5JR65zwzJwFiVs0q4oJDc2ZY5EUxJcMyfAVEVSU6ANEBIOfs2RmYAR1yiCBYSEbllocAyGogkku5DhAZKDKpYQGki4qZFQaWmjh2ItlUkKCE3w64JDOi0ldAxCg5GZZbMBGymRT17Y+koilpCJVpFapjNJgkaG4V9kCeuHE0ptyb5Qz7m+vrmHWz3g9JkcM0xq5dsPN1c/pL/+ZfOJr5NPR1CG1XAVXgwjCMQ56a0NXzVVO3Ivnq4hka1qFTbq+2N6717NC5ymk9pp2DNqXdNPaoVrn6wcUl7vetNR34eLE23I557Fr84he+Ksi16L2TU0P/T3/vS5TrH//xn362fzZdfP2bF1fn916sTmbTdjOGq/npor/a66x6991/2R0dNc1ruvsOCV5cbLyzUOO3/uBry9PT2e1rF8LJ8b1//y/+1S989R9p6N98uAGuyFeVryFO6HxMyu3i+nr/4Xvv3rrzwrQbKHgSv70ZT5Zt6nS92QdwKrmq/SbbfspEmBGUeEwkGeomEDkBUoFhSoHhZDnfDuNkipMaNrfv3N2v0YZrs27fx8URMqPEYTc+aapznlXi+NMf+8xb73/zux98cDo7a4PXmIGs9rVn7qcdhzrlCY1CqFygiXQae8edQ6u8XlxeIuLR6hSRdvs+TX2UDOyBSAmIQQ3H/QAu+/mCFnOIQhQQvSPnuBJjM3JcOXKH6DnxYUpTQpblRqnZACRH/cGTJJuhEKGBGZQiQFGT4BwAZKTdlLFZeKAWKPU3VRUqcibRjELbVdv93/p//F/+8l/5ay+/8vGJTEC4qs4++srPzf/C5/8///j6+1+7/8rqnfcfXvVZm/ZmP71yNN/tr2PIHBYa3I1sWbuTs2rfX3EvTVVJ95HkuaIa6LAYKHDN8Mkf+dhP/MgPvfXPf30xzT788PrkrP2zv/xv/Vd/61fFuu1uqHz15NFTBgFU5/nsxXvrJ1fpygK5s9Xyi194+NY7KQkZekhxTJM3h46IydhAbRjzUd01TEFzeaQhuNA4z2wIyqSirMDeZTFg9MHDlIEoJ0UUdkTIJmYAKWXCQApAZo4QzMaJmaU8AbKJAlQeTJAJmLMZEMggoFAy2iBqouxcFmEwABBCY8dAFqdlVy3mHTa+m8/jpLuLa6oxwRg5sdd2tdqpNavTdrgKDTZdWF/2dTdPoR63H8qYuToKoduN22q20mlrDFGZzUBJU4IsxmYq3rWU+mFzE4ceJt5nePtqd3W9GZ989U/d+bijLJqCa2rfxXgZaudlcjBB0uOj46tEN2I1uyh5yjfb7aV3YbMdXV05sDo4h0IVAEGaJADmmJ0CM02ikEynfdcdabwySXdeOfvgwWXVzkBZUrIke1y72kE2zBIpTRaGKbmAAWjmfaaRGppDE/ve1YEqFIcWwaGbNtMbH/1xmx5YTkMUdTDE7TjVEw4Q53FPu6ub9ngxn1W762m/T1c5pCEsu+OMm4vdxbKb1wEs5ZSm4aafL+YSW6D5uLsax8FGXFWVb93ZSfvk6bPHF7vgutVyfj1eYUOzegk5e/ZvPnrwmfPbQL7ZdwzuyfZqKX63n4bGu8bvLtehCwIMAEQOzeI0TXrAWu/7YbU88V03Y5/GXiWxYxBVEFCgg/3tgK9QLSAjJbQ+3riw1JyDd4BaEfZ5YGSg0PnGGTUu3NjQD7ldnguMGFbRSDQGnjsNKMqMjQsJgSAM0140B+Ys2SBXwXnmOnv0vNttauduVct9vz2at+hkFmrylaqlKaUU27oyBCbeb3YpjVg5qHiIAlmDI5vEId9snypmTS72eyWomplEeve7b//4D/+cr/j3vvf11bze3GzJIhOrCBAqASCageMw91Wdgdk2KU85ScrM6EKFCeM4OTJ0Lkv2iIjMBXegAsQiEipvituk47S9f3R0t1qmfjPGHgGrOsgQkUiRkByRVzFCMEuH8EJJZZkZjOwqUANJDB7BZRmzCSKbkubRY0gCIbRJkmMutYZonGUAVCQHNsUshEzopzQGRwTOo6IJqIBKNoWy0UPWHDErIpXiXBRDI09NjDfEKEjOcU4JLLnQekc5kYBHc9kURBnZuypnZB/QNRZ7BGXvNQtRQCNANR2slJoQl+yKGQE5s4kJCzmSQTxqVMimWbNvvIqCWJxi0/ib9f566Le7aYxxSFmgDlylMVHwZqAiKUaqXFaxmNljVA11k7MQAIHmFON4eBbMfJ2msQEpd84M5n1TVd209R975Ud+9kc/sZo39z5yj3xVNg/TZheniBkgcxpxdbRob624CTKOGFMI9W6X0HGoPHowJkAzVmb/7PJmsThuXzqn1vtxaqzZjHJ8dmd++4wtVjIxNpPt29AS+e746HrYNIuVjXtmCFXjou2j1EctQB43I8ahNqNGkUL36gt8+7a13Xx5i5ddlmHWdOlqvX58effFl9rVarLIHNL1dvPgcnp88+D9D8ZxvHt6hGSOnUwiBGScIAMbes/sSC1UxFZtrq83bz+wMYEZmrWzylWBnc8yEiPVOMQxWj3tRB8+3H73aye1t2zZYu1rEVISYCARk6xZUKXsztRUQYFMDRKob4KmbIDO15ZHzNAgnBA//Ma3X/ypnzfElCAQgrg8EbksWPpnNICFfiejbPrBUScRMTBxaI8bARszJhHycbVyriMADSiiAiqWwCQF9FVbjzoE57O3yyvFOrz48qeutg8+uHx2vqR2YY3mpesSd7NqBfIkm0ISy2pmjsgiELiqqqZ9JCZkbWYkmgAsV7WDkFM2GzVHICEPPlTV8tgvVswNTpD3g9RZ0PbbfrzZjVfXEvssEQ3IsQYcQQMDonlTMSmhDiICdmoYMPgqkHrd7WV7nadMpAPlzOJDgzRBmoxwt08tuuy8Pz7mk5du3n6/0sm3bnU6/8EmWVQIyEARAY3MAIQUQZGNShQIQa3sTwxAS8AHzECwvBSqgYEhGaoaCgAiZAQwJUQERUBQodKzYYZA5Z3SETk0OtiLEAG41MIqGJAKHoq2i1gFxWB56F8oeBgRRUDFIlsd9oBIiGbeO+bAQF1Vd/Wya2fDpDE2v/fPv7JeXx8dLY6OOh+YHKBqVzez+Xwbh27uG8dZ4s3uat7Mu6Zbp8hVZSN1zdEU9zlJzrpojohdhMkhOPaIrJJjipLRcW2ql9vdanZSh3a3f0ZojqjPoyOf0AxcVS+prhGdY3LMTMTsEQlAAQ3pEApj9AggGgkZiRiUkQzQgAgcgKGZqBASEePhyUVJhYhUsoIiEBqkFFWz9x6JRBISUakfAlQTgoMrx8rJBlMzJhIRch7ARHK5CRyK351vuk7H4dXXXvvmt+K7zy5ePVvMlhXKRIxksN1s5lKN09R0XVdXUXrEqQoO1JkaCvbDzlkGAESdzRCGy/6tP+he+mHulklBi9QHelAAn6O0mEn1YB0q3zhFw2IIIiLfsG9CmKEPjvm5jaokGqkcOhhQIbwgIjD8EdZaEZH5OZDLNEsiIkQWzYwMyIeQpgGRKyZgKAMcKAxrtIOdJxdxEAHBTPJExM9xPwRAhHYolnFBcvpB52w5+8HVKU94+NZLsYAd/EQABnYQUOEgv9tzMhhgaf88SLqlu0w1M7MpgKmYEjlyvhSWIFJKqShcphmJiMmkaDpiYsCkORdtsTwBiycJwMprIWGhO9GBbVTEMCvnlkwVseTSixojZkLskFjKNS2ZKzHP3hAAQExICRGQGVDBFLQoocDsntu5CNE0JXSO2YEhIZfONTzoa0YHaVoQGZFNs1pGRDvgOBGBEKjgup+7foyRiSDnpKbEDBwOpY36nMFvSsRIjiioarasJe3FLKiEKAZQPuhzyFEJ0VrRAbMBZDMEUGZn+AP0Vem/g4Jlg0IxZw8GasLkAEyyFGg6+6okLc2EEI1I5AcAciTIUCzqmtF5IqeSiNgx55wBUYvhspikEIumz+z+NalIRNt57ap5cIzojh32u2GYqHI1k0+Sm7qbZBIFx4xGla+jJNPMTK1rYoQf+/HP3DtqEVO38Ntd6iRoAqp03T+bN2fkGBSy7C4eP85jns+OB91P437Ybps6dN3R/mYjALPORe3TOroImtLlsA5ICNOd42apcvn0A6p1VrdX6/31ILRsKCIhfu73v+KP23l7km8ir9//q//J//B3PvcHwvNvfv7Ra2f3ps37bfua9pfNpG9+7Vf/1J//i8sXP/70+1+63r63OjpZ7/ppGKGib3zldz7+6asQuubs5z7+Q5/95vfevdx/9ahrNGHb1MO4GeK+q1ZNqMdxoiS/9qv/7E//0r+9PDnpx+knfv7Hv/vmNzdPn5lSW/m6oiFiHwWZRFI/xrqrxLvNerq8Tl3bISWJo7N66jWjuMYog/d1zFvAcdje5EwGjffStLWmcbfdd7P29P79YQy7m+045X/w67/6+PLRp9740dc//kpd0eZ629T1lGSMI7I557u6uh425HmYphAqRAPJUxx2252qxSw4TSamObvgEUNUy5KncVQHqkDsuZ1JFXaJOFRIHhmZA5JzrqrrlQFmTZ6d5MggyCiiRAEOtzHQ4hX2VWGGAZFJAZ6Rmnn2aCAmIhkNgcAxN1XIgli3aeqhagRHAhrHqGppl5p6XkH+G//5//Xf+5/+726fnqIz0eQczVfLz/yJP/mdSp69/4cV4p1Vmypmy9dXlw4sTztejARW+5VJUBkYGXC6eO/zdtI3x69qRv+c7f6pF89+5Wc+882vfPu9t978c//jX3n6/h/u6v7qev3Bd77X0uqD9bBs2ru3T7rZjpTrineje/roURohJV0PevW9Z5f7RJWrswHBsFcCCxWDoXc+q7Rts0v9pz76mVfvzD546xvtrBoFHj3anJ3MQ1XFGPuYfKBgzodwdb1Nls/u3oqbvYLFSarKHa9mz55cEFEVwpSyC+TBj3HqY6wct8dLURAFd+D2oQXSHE3NVWG93WfRqqPQ1OQw5+iQs6qompn3NExZgo+iOEmD9cm8rhkU6t1Avmo57HPKm+trFOtqjzLqBER2dH6+vr5ywPOm2a+3FGrftiVxHcc+xqlaOdIcunnUQk1cpCnHtA7OJDnnFgwugjbHJ/3lzYPr6eH3r7PkZhG4v3Y1ueABaTNce0crT1dvvZN0HOK0fOENd/ulx5e7vI3tYhHH7fJodfnkAnGGmof91nuaprEOTgklCrIyIrMjdJWHy+0NYUs0dI7SoHHKFjCWngDiFKfCgFYQF7BmPwt13Qbv0In1mz5k8QCKY9OEnDOaec+mRsT3bn9KYr/dbE/mqwkGX7na2oQxy3jv9ifHtHHdbEhpuoq6S1U12w5rV7dxEiA7Pr6VU8wKpMZEoakA5OrZxePL3d3l0f27LzztrxZN0yyr9bOrloPzOKp+8PDB6s4xAm72lw6kT+Pd8yOS3PjqxrZqcjybpWl6fH21/OT5vY/ee+fbDz9893ttWEACFDAmVYvxAHdXtSTiuwV3Oj1NTkwJOXgzy2DMBKJACArsHWRU0wIwmnXHKmZq5Hk7bDwtQBUNEHFIsW5Wu/E6NF2imGWfMWE128WYTY89pSmhgeho5tGQQR2S43ZWdZMOfZrYNTlrjpE4zNoZGnvR826pY6yCV9HN5ooLPofperNHseDIFOZNK4TzpgXRvaXZrAHaX15cL85OyEhFQhWc46PZrafPLk4XL7zxyY//nd/+tY0O/ZCncfKMqgIKzjtgjlnaKjTBt57TGPdRt32PgKTAxAG9QUKAmBVRCal2PoqO0+CIg/MA2ng/xSm4ikmDhDdunV4/uRh2m3HMTVPnKc7buvi2HPkpRiATEwRgIjEBU8YQ8+ScNy2z0KyaS2ULoQfknIW5QiJJmai2OIFBhqnydQLxvs2SCVBMA9cqapYDB9UCgzBDRSRyXYx7BGVmK/RZyarROTIofiMQzdmiM4/oEAOSmCKoRYlIbCZZjNGpZGYWkVIY58oUL4+AaChiB8O8gfMcFKLo5NDbAUudATJxMBNkBgrAvYOgWQGYGAlttmgT08V2t13vNusdOQ+AjmAYhpt+Olq0mhw7JqB+t27rVdvVg5l3bhjHod+V7ELlnarz4fAwaKp6Fpq55vUuZtN+AI9zkvmf+qk/9pkf+eRy5dt25mqfknjn0zTkpMHXMkRIcbZYHt09C7OQpkxqwfzNzajZta3zlU8azZTUHFdguV0uV/fOcFHlOFVom/W2qmdHt27RvJZ9DI0b1htVbeZt3XaZbRHmna+2j6XfpbZpmrZiI/Xo/n9c/VmwbVlWngmObs7V7OY099zW/XoT3gTRERBAiE4gEEkAJYQkUiiVUlUqSzJVNk9pVmb1Um9lVvWSli9lVWVlaaY0S5lSKFEvISQkQEFPNEBEEETj4b377U+z27XWnHOMUQ9zX4es6w9+7dq956yz91xrz/mP///+gED7sG/KxTa0zfEzz+Rrp82NG8fLhRZw1UU/bwDffved07Pbz37oxfWwUSTMZdis21m8GsdxSv3J0eLG9XbRuJsgCGGMLXTByBFgvb6AMft+o6vsDwdYj6AQmCGrCNpQTAyjZUgoRE1owml3Vd75+jeOuQNTAGRANTMvCEDAIujuBU2YzQ2J3EyAK26EAECNAB3AXMlrvMm6jna7y3c//+uv/tiPTPvcLqXrZnlvWfM+TVHqXGrab5JldyNoQmhaJE8Q1sLt6ayPkbO2OgZaY1DTAsWwABqCBzRBINcSJKArsDYBIVvuynFzY9hTHnfRmuNi99/b/Ztf/o/vrh/bjADZrZRigNzGFhhKmVg4NBElSNc2neiUA/Lo0QoGzcP791TdVZs2tsslHx8pB84Fy4TTqAMl9mk/ekmWBtMJ0QgFGTgwuNswWi5FjZwYsVhB9MBEDuxkiq7go+WhIKljdkaXGsLcUkmmhNBNSNOip5ObUvKCUWrIiJ6CKbT2/zw9m4G7elVeFDJKdQJRTb+D2QdZmvqfGViVkRzQAdTgg/Ym93oqcQdyrqwUgEMLOjmwAx/oBlS9A/gUdQ0Apmalok/sEIWpTiQzrhVZbghUxSGuhyU/2EZImJCEYyBhxHns0SRpMdTteiXBr99YHh3HQChMgaTvZoRhTNaEZtb1mnfTNC36sxi7sSRUFzSSqCXrNDKyxN5cU7pqmghGVgq555SYuY3s5sWn45Mjch/SlZZBGjYrAaMZIULTH2HbYRAGDCSEIMTVrMEsxFx7u6qBS0JEYAA0LwDGJBX0zkhqiQiZgoPV8ikHIEdhQSJhmUoRDpozMSIzUJ31HogzFaVMaIRoVio+F6tkb5VlXq/EGFEoau0mNy9uEhp1bbN+7JWXX3ut3F9dfejGoiHM42XbzZaLE0jW920qxXMJDOal9t200pRCuUwcoqFbTk1sEPL+yXtblsVzn7AwRyY0F0L3qos5ErormPHh1CFITCyomZCr64mJhKNwRA7CQnTIjMGhrP0ggRECIUM1bv2pABpV0QRALYsEq/dFlUkArCrdSPCBqImOzPUkhITqCgCCQkRIXEpCZgCgmjkicP8Aq+w1kWSmxORWVaYDtauYQlUlDgj5p7Rsr71hZoxIBGZY2+5rSguh4nUAgFlyyUiuWpjoAI7/E8Y2aynutVmi9pTVn8fBraJwKprbwYCwRiAOdB4tVZvCA/4GANGtAIDaQW4zMwAjOsRB3dQqn1UdkAxci7qbRAGCSqtXsFJUwZmIWyHB7TAeN3LQkrwOGJSIrZQaRSNCcPuTRjlErHVySO56qId38KJQbXGEqgVJmMRMCVE1IyGzmNfQWZXjDYmhptUQnnbQo8OBCWJWzJSJ6mJmIACocchaIQdYkyIHGJZ5FfGqyI+qKiyHkiUkMDX/QFiq2V43V3fzoojsrhKi44GDjx78ad8fPs3tV9uXmZUyIRESoCNTAPKqtCK6OdJhrQIAMEnRglQp8gr/f6yidt4VsAhmVpB9TGtjbGLLgFCMPLhxUQtxNg5DzhkpgGUhWhwvbMifePW7Pv3p799erKVB4rCcL4Tlwfr+3ePbN/tnTm699P6bb8yjnD9YCQXp6OHl42df/tDlarWcdfvdftgPqeST+QLNmmBW8PqNs3fffP3i6p2bN5bTZvXe+bv72Uzz1Dgc337lV373Sxjas6OTcTh/495bScvdO6d3593z19OHXvz05fn79x8+6Tu+fnb6zLd9fNwVdzs9HX/pH/zfv/cn/0YJd1dDc7y8brtzaomhfXSx2ak+OZ++/Ltf+CTfDXhrcf07fvpn/8u/9/cf76b7uk3jbmtg2/14upDA/Gjz5Ggx/8b7737syaNPv3Bre2XQHN156WMXj3+v5NJFWcypXK422+1gcOvGnf320W4cWwyNy2Y9qHnXpGdv3nr3vZU0Qp0MeR+ao8W832sxtah5FtqvvfFwcc2XbbtsENTU0bCEOB7faB+W1cV2WPbXPnTnuXT1pATykZPFkhOQtxw0562ZM2XLMZAAmiuTDnmnxUQaAx+nDKpTmpqul8BMOU3JAc1AnSaKbTenRhAphIDEjt7EligUF6gxSs3ZsMrEbh4k1PVZw61IUu9Yrmg108Nzyc01ARgiowNz/VQb3SmIEGJBnM1PJ8BdKuZKyMBQXEPTBsDxcv/z/9///m/9nf/22Zc+NGQFc0K++aFbi+VP/NY/X73/2jeB0K2kKUeE+fK4Q4XVY6BAsyPzJmd0a5anqpfT9vI11/H4xnNHN27Xu+D0ejw+beYC53v7hc9+3i/e/tQrL5ad7a92TUvPdYvP/Oh3lHH48lc+N1me9qtn7975ylfPbYrjZA/O1/183i0Wm81uzENsQ7cIZTQOMq2TZTXGuIg02TSsctF23jXzXjQcz+ja8aKYHR0tHz24PFr0pDTvZ5a8QFm27XYsTRNSVBbsF4tuN4LlvmljY7HhKM1qGxINXRu6KCm5A3UxVOsxBTbNgSllU6P9MCz6fr7ohmFoZp1nH5KyoJZs6vPjowQKCF0MNmyhmBdrun4zAXd9XC7Gi4c3ZyfnD94NbVwcnZw/2jZ9o0PIxQAAmFHibNad31NpA419GcuQhiW3Dtr1J13brdI6dlMAY2jdQdAFhml3bu7kIYT5+4/e3YwZoZzevrE4O9mvL91RIndtaNyn88d/8Pu/vx4GFD959+q7/tNXz05nGbfjznLOoYkEbm6h7fN6X9SWfTfuh0zGscm+JqY85VzMAYIQtqHknL08ejA+s7iGijZMCiYSozTMYF5iG4bdICKWBmactc20TYvjVsW3uz3o5EJFM2b0gm2gvJPl6Qvj/l7bznOyknTRnprlTHR2+7kHj7/VND6U/Z0bpxfnl8e3Fk82u7ObLweed508fO1LbdNtphEBAoeuobaXEGhc+ydfvrVePfnWO1+7fnYTfPKUm4Y3221/NBt0nHVLnwBAVldXz925mYbVrdOjo5A3VxM55qzb/ThO+ezGya2zk/M3HxyRf9ufe+l3f+tbgWYgQQO5lbZrD5PkEELbKnoIEYnz3tqeSskhRjlsAMhVwQHdtO7azcEKiYBpyrnp+tjMQmibRksuBpaM+jgXS4MaA/ZtO+1KyiqqHfcRJKFKnEnOUEjLLsQ+aAkhpGmVgQiEzaGom09aZi2rlmA5T6jFNJUJVEvdUtQuEeza2dXmMQbKDF7GaVdabnppXCFPpZnNApAEGUtSzXG+bOeLq/fe/9v/zd/5lV/7F2vdF0YdJxY3cMuKCIVYyQ11NpNFYEvT5LpznNSiMBN50YxDrFhmrKkjzGqmFoyFUcc9AUBcdm1XptJOfnO2uLr/0Kw0sXMrYBgAI3MQzpMhAZorlBAYERELgBI2dXeOag4p15y5k5kyk6uzhJILADo4B1HPSEwUtCCH2TRdkYjZnskRCJ2CyJjWtf26FK1FGWbGZESAIO5qpkAwTZmfHiAS+Jj2HTdiQTDmMXsHpsbA4uTFJXCMlFOK4SjlnYMnVQQKIqrJPNedeA3KmmYmKWAEDo4NtwhiZiJNsYRAlf0JQAl2imPkPg2jSERyzVpZ3sgdNntoGZlbCMezxTgOgwwsuN2NIhhCzMlkw0GVkUgIiac0zNpZSoMDaM5aymHD8GmPVQABAABJREFUFKhpZtfatpvt274Zx253IR9/6cM/9WM/kDV3i7bpZgbc98FShjSRFXIcxwSIs+unzdmJU2L1hucXmz1CYFISwQANtZbHYC1nuX/17rU7N2fPnu6n7UICDPly++T6c88vnlmORTERYbwqidsQl7M479lyFEznVwTQL04Wx8dE4DE0fVfW56wouNiPtLx7o3vuLsznRzdvBmbd7tqOusj333x7Wg8vf/jbMycTmPez4fKch/WYNpf7i7Pnrl1/4S4tWjeL3IS+a9tuUjMolkZUh5IqMjtnirMebkRCF3aYtGm7aSoinkC7a6eTuHpv63j/y1/opowAxUoMwYEcSQHE2K0gmqkJi5UChm7AwF6PGWCMAl7PZK6GYObAhm7ZZgZv/uZvvvw9Pzx75hQoNRZ1uLJx3zVdoDBNa5sYoWUWbjsOcZxSXPSzsyO/0bKkSOpjpn2GYpaSZ2UUUNdcwN2VzM1tUAwhiGk+OuZSEk24kIV1i9e/8c2vffnhu1//4+TD2CAsemkMAzlw182LKbpPJbWLVrqWm9jNF9mpWCG12DQ0lnz/fNpuPWUoThRkscTTI170ZTv6sLecyHOOrH2gSB5QIk2rVEoSAmEBN08TMSErqofYgicBkJQQMrNIzubNfjsFzwiAZEDOiJhGmCbONu4hJ0fiIj62PHe2J4+6kromODXHy+N6F+RUi8RA7YNBPyCSA5mrOOOBdl0tFBXp8sGByGuEgSpJuApBWsu46sG0NrXWf0sFoJ6xCIkQpM7HDVzdCICpdldZLboCqpMcN3AERxXkA2nGD9E38EP1ux042SgUAIACI4AAaSqxPeqOjtMwrcfNC6986uLh/WGzmi2aGNi1zJaLvpuRQzY1LfO43I2bMV0eddfR2dyGaZg1x1FkmFIx7WeLaV/UUhRkpYZYCQb1nAZzRWd1GNK+awM1PI0DgMUYEbGoC7dJXZoFtQvgQOCMJMyVJYToTIRc0QJIhODMRKZWu5bMnShUqUetADOgVisVecUPeZU83B1BwVFQGYAEmEWdEKCaZYjIzcAP0eAabz5oCohMgm5WNwDV7uDgiuBQVFmq7wMj98V3s1nz6sc+9o0/+sN3ngwv3pz1c7cyGehs3m7T2Pez3fYqE85C8GIZ9gpTiP28b/Ow249T08/Or7YNhaOmWT95z5umvfNqaY9RWUTMyGr9OaF7IQAAFjUadwc4EUCVtpiqOQspcH1N6bDMuFabeWWv06HJq6bJiA6RfK62C3eRIFQFMnRzq0+ng6emJgMRvFRTBmA1abAjWDXkemUlO5OA+0HIrDkgVWFhYtXCFErJBy51Vc3BEapvhQ60dySzUhWmg3HJaxmZ1mXvVdgDcFNAoFpPpm4fFGAhIYCpEiCSoxt6MT1sKLCO9p9y6K1UHpMjCwGpFzclEq9V91qo9nxZeZrjQ3BVVQcixAOqiA++I8K6czz8rWrlqT4bdyOk+llATK6KBu6ELmoTO5Hh/XtPzs5eMM1eYQhIZkWtMJOpIhECuR6K4KFCi4AArJRUvUU1/uLgVelDQEJBYNXsYOjCzGpqauZaEUCIICQOCOAlKzFV584hBKcV2o/gxsRqZurMQXNCCkRgWhDrPwn+VCKrmbJ6haaKiFjDaaDoT2ni4ERoZq6FRUikwsGrM0tTRmZhUVUwf+rMCmbqakZ2cLUBIAWkwx1NSCWNWPtX3A7Qq4NieuBMmVrV80yfZm7q/8YpoQiy57wZh6s87dwcKDgxU0BkzYUBRTiPut0MeUxBuI9d4+Gjdz/5A9/xY2wgAn3fLZbznJQQPvriqzMJrL67uN+3LKLMTEz7YTw6uaUpRJTAOGvb7X6DhMM47laXnkcz++O33/zSt/5oM+wePXj84O2HjM16PbZtd+f549/83Oc3abp5+6ZO0+X5+YMH2xu37p7E08uLxyc3Z0e3nvnNP3j93h7feLJ++VOfXty8MwzvNguc8ubVj/+Z5c0Py3xxcf+9Zz76qeXtuw/P1+OQkZviVEzW+/LNb/6e6peZHgONP/Znf/bh/fc6sdmcKGCczXaWry53wx4ePr4ylrfff+Pho7eWy+nGNczDennt+p0PvUoyW6/Hpm9mfdu2sisJ21YJ1ttdQFu0XcB2yOHeRR41XO3H7X53tOw07zabB/0S5ydN14aFnP0//i//6GpzOhDSDDiyqT16//G4WhHpYh5/+md+7of+/F8cIG32O8sWhMjH+axrpJOmc8DdbhhG7fplaDhN23HYDMOVu0pssufV7gkJe+iwmRnxkNI0TqYW23C1vYAg3emxxmgsRmyA9UHkwMUREae0M8zdbBGaJrYtx0gsxEFCwxxCiDE2MTSE7FZnw/UDCwDQn+LlnAAZazYKuXFkMPRsAtw2y6OTO/3i2Ik1Sr84co+b3RSZ5suOcPsvf+H/+c6br1mxmqlisNnx6Sd/8DPNzRsW4tFs3nET4mI1+oXGe5vx/sXV/fXVOJW2W1ATPXPDrex3dnX/6t67ZT/Uu+Af/9Jvfu6Pvvb8x1/8sb/4mTfP/fZ3/iW8+W1/dD999b2L7bheDat/++/+xe9//ivbPb77bnp8Gd5+Z9TSFHfpJPbzrj0CjLFpYmzEYRYbMofR5qFhCmE+3+R8crLoF2KKroGxn/Z4cnwDIQIgOjMFwYgABE4AXQzz+RyRGKjtYtc3RLybSgwdAjFSCBKaaAAojCJITIQSAzYCMXgIBQmYQpQobOYYOpCATEDkQPtkYyGgGLt+tljMZt286VpuiAgkjAZZYRgGgQJpa5t1j5SnTYwxZyNsY5g7SNsvZvMlMxmBtLNsFmdBOmnni+w6W15DCqGZjUXH8cK8gA8lJeROOEoXsbVuGccx6YT3nmy2Y1ZHQDy7fW2Y9uBATk0bAotZ+bXP/tuL3bQzv9qmd998t2y285aaRkw9j2UaBuZGvaQ05jR5LpAtIHYSHfxqM2pxJteS0jQRkRA4WFaDAOvN7njZkg6NlZkggmXNxTW7IeGYEyNhLpALgs+XR6vkg+NEtMsTILQhRLDTePTR2x8lexTj9ujmHPvYHZ2MHiE0ZSwlTdEYrb1587mubbsmHB23d5+97pvLd976+vnVfWXYD6Mlnc9Pbtz68OW2pAyRedysIo/9DF549uzu8zezAJJ086PQdcujZd+13PB2yEDd8vQmxjZGhjxud/nBxapdHhvGzT5DiK66eu/x7u3L4clufTEGbsC8FJv2YxP45q2nxwNzDiKBKBB3DbVhyLlYUVM/bF3M3Ikd0EWY2oZCoBAcStIpxJhLjhSJxUFzGUvR+fLocne1mUYHKU4KbVKQIMTc93G1uY8MqSSUYAzOYoyh7xPkbV7v0w7Jx3Ew9LbpQhBCYuLj5emY1RCz7qcyFTRqG+n7xAx91Igy65ouLvq+ibNFM++btovB89gvZymXcbtzK1H47OQEd76/3Pzd//rv/Jvf/cX3rh47wW43lJJVjUhCiCLSiLQEt4/7mWAx345psx/HcVfnVU1oDl0SQru0I4LAMk6jqaUpObgThsBd33Loxr3GJHdmJxFIYsPSYEAKEPvATSwA6saBi6u6E3NlcxigIxbPWiYDYyYmcFRCNgPhCObuZiWDKxEVTURo4BLEQOtcMXBnWvrYmiYCcC9mhiTAVOdpEXv2VlURDcEBVUsqeST0GAIzu9Y/mQTJCgRp0Y1ATbNaBsTi3khb+zqYaMor9amAAUsBBUYPCExNWJSsbegAXZiIiTAeeo4BTa2hpqRRiIiQOQB4lCiAkfqIIbAgqufCZlBSNB1XV6TWhUZQ9sOwWV2VkmLEEAXMotTgAWpK0zDkMQ3bkRx6abQYAwUmRtRpgqeKaUk47nOAZiknnS0/+conf/LHf5g7iV0bmpkqIUlSG3MuRdUtmWoXupsn8+duFcJxTERxmHw9TpNrc9RJLxQwl1Fi407b/Z5De3Tn1janJjak8OTx5ezs1vFzd1XEswmF3TRy38/ObpzcvE1NE5po7sV9MljeutNdO7Ouk1nXdMRk4jiWFO+ctC/cpBvL07tnIl6mPYl1fTttt+fn9z/8XZ+Qk7CdxqK+3455lVcPdm9+463QyvWXz+CIFEpKiiSWfUrqLM1s1rSCVhpppo1uV7pLOBLYUWMnbe5D6dokwfsO5pGvLfO8T/0S4uLxN77sV/dF1MwlhMORyApQIIqMAQ2xdh4jIZKQYEWjamEgAWRkQqofV+CMyMIhUOgkNJrf+MLnI7SlNIMGjyHEBjVr2gEEDrNmft24yxRznLe3bjQ3TtsF9brt9iu8eiy7LYwjG0VsGXuAQByBxcihYW4DNK3HxiRwG50zSiGf9OoqXVz0Fuf77qx7ZjY7g3k7MmJDsQmxadqmaSVKiPPFcnZyPDu91h4d59CWECeQwXizGq7uPxnOn+yenNs4CeP8eNkeH3nTgeaQd2V1tXn4ePXwYv3kKu32KSUdpzImUwBgq0pJzlBKdWAB4DTtc5kMLGnZjYMW8+w6jYEKwKQ6TpaoaTBEJy65jPuSBjXHlPKULacwXY2USmzESYDCdnu4C9RAFUpxK+BWQ0mH0ysegNFu9XPBayVVDd6Ygzqoo5MbuhEgGqARGbETG0g9zRMhEzBpBWQAMhy8GXXG7e6qWuq83v0AzqhFSgDVXlEdQ/URyY6BWJhEmMiFQBgjgRAGlhBD18RWpGVpOZ70xyfzo6vLCxT/+Cvfvrs836wex2ABQRyOj0+JozpOqhzkxvXbSoXAF/312fwaMLqVGGdOIZs7aYysnkffFrbYtBIbc0zjVPLUtX2U1o3UDIgW7fG43ep+AjcHL7mo0VgQ24V0SwpRhAOLMFXULJMIBa48XmIHdwRmYQzuZZ/P1XMd/qsVd4siQhA4MFUbC5qpuSGBWmGqwXMWCjXa4mDFsoOLRLdq/YBDS7ajmTKLiFR7gR4GvQBgQCASkFihGFpogxNQYFXNZk3XxRhmMX7slW9f7/n+ZV6nmIExRmsDtWE/rRgwoBAJCzaxITTCjDy1Hbd9626EjTmfLBcngcq91/Pjr5tPFtg4YAyxa5u+kxBCaIkq1EqQAgB/UAAGCFg5jMxwAF3jIS2F1bYGBMAIjMhYbTkHRnX9ZeYAgIS1vEy1FtWjuT11uSFU0QQM8NAWAYSVEmpWEL3Gy/4ENqwZDtAtBARiVC9qGRFUS01hH6JNB4QPAaJqBjBzU7PDjYJPPS71Kg6oJTSrFCd9yumqGdBaV6h+sEFVjYBUDYDMsf6g1W8CAOZ6+EQmOjyY1bKWomqOqqZaDvc+OIBXvLc+3Sva0yRchSW713scqnLk9e/joRBbTdULCwOSA2ou1YyEh2I6RMSi2jTdg4s1kqiVyjhXV8f6aiOSOKAfmjAOXjw100PuC9TVsNbSsoM5IBzw0lUVqlfrgAdabB1oERMRZdNiauAkVEtu/WBVBLODK+qgt9SKNDcSqa6jpy82qeVDY8cBOQQOoOb1aVpUiQNJbRUXN6usMUAkCQ5eckZ3IiFhDgEIrULXgLFSttEqEAa5PkqxaAGoejpXMlctuK/PciSqUUm1YmCGYBWSW4EkZvh0cT3NoSFPw4Cap71mVXcrOYsgQiPcgXPXNFfrJ5r3IRpJaaRpFifXj69j7n7kR37uzum1J5fvNIED0mq1B8aEZVvSOF2cnT67Xu+ZtH6q5JK2m/Xt5bMR7OHjR2fXTkYtXTiSpoWS5otZN2/feP+dL3/96zPEa4uTYLswa8+ObwTSy+3qs3/w5O339h994dvGdPnNB/emYZCmVSyP3/ra3/qv/ubs9PoffuH1m89/nIfx5372b/RhkfePdptHKd3Z75bPPfNdl48eddfuHGEfbnz8jTf/8M/9xGd+5bO/2QvFECUM02b7+NF7X/mNf/L9P/ESLRbPPPfST/3E3/693/75eXfk+w2qTVNS9XGzXSx6cHv0zhP+FP/R7/0mcrs0fjLYiPnm2d3NrqMw5cTs+zQVIWgkeiByHnb7XBDDcrWCPJCyo8PFxig026t93y0dNbZy77W3Lh8P/7sf+z/8w1/+v02Pt9/zqU8MQ2672W69NoAQZpvp/OJyE6dNz22D7YPLtxbHN1vqEY3Zx3FCDG3oEEnNSk7CQU05NA5u2Vo6QgvFkqGqqhmYhWFvw2bHYdGf3rIupOSMHGNkcmFCrn2CJgJeYbjoyIwAROwOAFxvhtoDCsRcxW83BFAthuho4M4cwUGtNheU2pUKRAKEIThgdi0c4uJ2Kl4MiqcYiUUSpoJ+evf69v75v/iFv/dX/ub/6eazL49jRlKO4ZlXXvruH/qJf/fz/+MzN545nfVP1usirXvc4Rx8nHZjaTienAy6IitN09683d2/LHm6unj/rXoX3L5x69757uJbv92+/rqM5bc++1vj9snNxTwIXT8+ykVbCtNFHrcZtZ1Uh0GmgsOwfebW0Us3r71/b5vATvuIjRxdmwtg9/Jzwm3f7K5dh6tp//DR+NpXtzmfxHB8dHaSSy5lc3x6bRr2s34ORkQ4nzWb9cDRkJSQtYxNDIRwfDwfpu28ZTdsQidgTrkRDgyCJZIe9Q26s5MEFgEKuB0LSiRzhVr8VEaz67N5YG8DmlN2R9Ju1oYGUY2htIFTARTM0gz71JgKE0xTE2VI+8CSd3s0YArjbuIYw3yme3f3GNpCXsZctqNPaqrmyIjzxULIHSHMlm0/HyN4GXRaAc6cA5BM2acs/dmHWXebt981L+6wG4dusQSkNOau7/JUgAxNN9symmtRkegE6/MHx8fPQSTuZ4RbzJb3G0VsIq3KEJuGCIzcCHbZsxERENGUlREEwQ2yZtWy7LsppSPs99MU234POtWOCMBgQCG0ECJhMxebpp7ai4cX7mMCV2RWL9wyNlsbzppn8jg0PUCexqFApMizKa/G3fbs7KZz9mh5GJSloOSiu9X+/GIbeD7pnnguTZygdHLS8fxifXF6dn3clra5GTlfnK9np0ECPb541HYhSH/x5BKx7Lf7SHJ9uSgPdjmrnMVmYScn1x6986RdnCDny1EfPb5042du3r7cXg7TdnnSPHy4vf+OEcYggEjt0bw160M87Izcx30SRM3KRIUwxFg9/YfphwMzq2VwVzUwQgRitzKUcQoYwUMapzY6IakZcdQyqWPTLiaIswi74dKsYE5kY+DGGYuKm2vw4kW4yYaWJkdk6mLosqILOaqjYy5pNCQep92+7LrZ0cnyuI4uHEkdG41mnvM0ZgiKKZeAYdnf0GnjwI8uH1w7PdNxmoIjz0Lb5V2BgX/m537qFz//2YvVpbqjg2lRqCsNABWBtdhi3s6QUWk7jFNSERx3U4gdGE5m5hCIctFFNwfDqZTYNIJN0bLPU5AmhG7SAuv1DVncvXY27s7NipA4Gx+GnlKgKDq1DToBIFtCdCJKlqAQEgtH8ywYEQksE0K2UXFAdyapLNcgDR48800uQxtAfQIkAMtFhaS4E7aInZsF9iHvxRtQNJ8w1AK+AIROhy5iUy+55JSJqZQcusYF06B9lKJZs4e2QcwccFfGgA6OlicyYwkIFiQmdHEQYC3JIKHBUDJSn7MiMqCCZQRS0yDgroa19qQ2s1QvN9W71g0yZGIGciRExibQkNK8KJDP+s6FU0r7YR+6qKm0DUpo1GC/G4Vot92bYduLTlM/myFgE7rtsNsNUxsltM3heFAYzYgDzxbj2p6/fvenf+anAPeGmSFI01u11k0pj1MpalmnfXJkOTtqzuZTGqJ2NsH5wyvG2MyDSVGkMuaAjBBKGdM43Lh7F1ppNLYs6SptLsdnP/JKd7Qcx23XtnkqaTO18/ns2jF1na3LvI/bi6vVaoiyjPMjDRD7a9jg5uH9oHF7uR1G/dC3vzh/9jq0fbtY6DAWy7N+1ji//c47p0dn8+PF5LnaP2wPq3vbB68/mq7S8d0zkADg866jFhFA076knStkBxsmT82QoUyE2LZdcBynYQT3tunNHVS9ndP13trOisYJ8v13/a2vtWMiK8S1qgGKaU0DFChYm3odHbyOo9GdwKpAYE6OiohW+QzIJG7kqGDgjLpAfP/zv/sDf+nndtcXWagdQrq3G84vIixmi2ss7QChuTYLfWciDTt4gmHLU8FpiGxeuPb8GExoXqF9GIUA0RnyRAjsbjaoljFNVkafPCQQ57OZYGqevfX8GysoHAZdxY5iE/KoYMpdIKGu7bnvrAnFXZh9X8qT3Xi53V9eTLtt2m6rMhLns+bakhZzJ5w2W7i6KuNO8+SOCrZ7lGTZoJqOKU+HHVPxJG1siAJgKdklxtm85kMQjDBk4DwVMCPPadxKAOrnA7qYmhYCAiYTBUBzIEFpopkDuGZVxFLK7nJ9kIqSMtGhQgwBHPAp40VqLAKcDPXAZDFHrHN49OpAelqwXXd4QO6M4EBGB88EAnh1edTTtBzGZO6OakYIFdMLFZ+DDuhEqIb1N1TbOpzAD9+UgADBCSMywEEXIApIgodKNO6xi356urzx4PLq+rMvSMT3H1+9+/7XP/zci11ELXp8dCQcQivFFRi72O22F/u8n/XX2ma2HgYmjLAIEtfjqpUG1Pom5uJNiByaesDPRadpbEIHzsWnbNq0Myrze0+eNDGHIGMpwoIorkQyC82MQkMIjMyVwwtOBEhajUUGZoAisapCLKEJS8qCKIfEUz0Ig7uX6gBSU6wcEwlqWi0wjIJPm41MDxVRVRFxK0gB3QFcmBG5lORewJAO2B2j2lqjxb1UCDkY1Diao7shE5dSnIhCA2WcH80+8vFPfvPrX1L1s1lcRF1drPo2oggaqtkw7kIMbYzTkMZxDU5MJGISmmlXspbV/jELLVtYPXhL6Wh+5+WwaM2JiMyRa8kUBw5GOZAEEkFkh+qRCkxCEgmrKbY24H3Q614tOIZPNz+HBKQbPWWYggMxQ93u1HRPPfl7qd4ls1IP4W7+tDbKAQiA3IyIAFw1EQuSgIG7E0UAUsuEwLUkxI2JEVDNEWpdDZqWw81hCjVTWe09Xs0yoEVrNNEBTa0+VL1GtwEBrHqmav4LEFQzS6jNccjBHSrzGBzMjFAqJxsOpYT1LgZErsalqiAwMgGoW7Vn1QrCClAkJgJygApRgkMiygAZwFX14NU6oIgMAP3gIq3vBVZ7gQgDKtYOPjNw0qJBsOfu7XvvP1VR60U6VUS3GQHWng3VomaMRMTqBdyJxd2BENy0KJOgo5oyMAKbKQsdXM5qCgZETKGUydyChHrB1ZwI4LUC1t1MDQArAB6LOmjJhTiKBDdDEnWDOmOtvkukQy7MwUqFTLmbE2HlNFotD0AsqgfuNWDtKTNX5oCAVooaVC3M3VSzV/OUo2ryik8icQeAJCzVlFFKYYRqGUOsNUkVhHkwDGItN6SaasRcCh2sn39KKtputoRwacWKsgCC5XHgIIBN319vmz4NIzrkKRvAbN4tZBGb2Y3m5gsvffRsFne7RxKDNASE4zSE0IXjxflm9x2vvnrv4eNHjx8/c/vmarUqZZjPg7QhaWp9drS8FmKUURFlvd4R2LyBd9765muvfykocbd48P47z16fXT+9PUzt199//dH2cWqW127evbaMv/IbXyl9pw7ZyydeWXz6zivrffPWll766F9ICU+OxtPFUgSu3r668/K3Gz978szZ67/zK89/53f2s+Xrr/0yMc1PX/zIp//Ma2+8t9m9Pez2TuBtw3O/9/iNN177vQ998kNos+/91Pd+7j/+0zLkCCYd73Y7gHDteO4IKWucL86ee1nZv/qVL128f+8jr3zyO/78D60vHv3Gv//355f3m7lg2W3XF5oTmgShbta5lovNltF1MnSPzp3MMGEf8fjkGCe9vNr4olucXv/iF3/xx//6Z37513vLfu/9JzmNcTU8f/cloq5ve3M6amaxDZvN/vGD1xaLvhSYpt04jCQzjowgaTusrgbCEmNEh5R0LEPbRWJ3hl0ZhjQ2sY2hncp+t929++De7Zc+9OwLz+2KZ8C2i42wAza1rYBEQltnOeBMLq6kh3LTurgqLczr0mIktSLEak+fvkiA/DS2ejDRUX04Ilkl8HH9dCewAk27ODlj9jKtC5dA3X6/AcDV5QaJxunqX/z8/+sv/Gf/3cnxdQdA1yDy0re9/F0/+CMPX/9qBCFug/A+bbe77aJvneGNR+/zPB7P+kgTTHsRCVKm/dVmOoQOUs7vvfXkKFh88Pj7vuO7vvq1Jxs+C035xHe9+Oi1B9n1meePf+Ptr+62ery4pUMah6GbxSYel7EcX8cn+uiv/tgP7tY3/uU//1fPfduHLu597a/91Pd89w9873/49//Lix8NF1fTpM///fO3P/TsHd3u631ZNFFw0VCskDMHTF6IWc04YDOL5s5RmAkImGE/7JuGU8kk0M2iqlEpaNYgIQBLJSCie2H3kgbLadm3VrSYzbrOcolM07BD9yeXF2fLE2JvD92T4uooJEKG7kDollbrdjmXYYIycs7geLQ4Xl1dlTJev3Hj0b0npWgbBMCMSA1BKMTARmmvfb8soG5A7uOobXMEikAzoMYBYtMjEaJIiGlAFSwcoJ9RCGhW1Pu+AcCmbRyhapZP3nmYrSaTvYqP29XF0p8x96aLOWAIhMHRsaQMQDEoB8wKeUrDZmRXRwfXtgkIbqpt26WcyC1NA0xlfX/VNA2SAhaOYEZCouptoDQNqn652147Pk0rYvE8Zmefz/ppM3lWT/koHsvk426dCy3mfROiRU/JIza8ONqlRBGPz04ev//k4nw1pogcLte7NOSwtFvXry043N882ieNixNDD20gKrMez8/f2afd9ZPjpulCFB1zw23WEloGjyRx3G510CDSRha2vM0XQ2pic355tZ7W+xFBCBTc08P7bx2dzK/2Wy3DtGIOzbhJQ8H5os9Xq3fOHx0+EiRqAUKsRIN91qZtHFzN6keIV0HYUMEpiCFO04RoYNA1XYayH6e2EyIMIbRtjxLGVBoiRixpA2Dz4Ga9FmgkWEmzMAPA3bQDJHMwMAMYyhA5FjfPQ0663e27dha4Nny5MHnWWezUbZxyHsfQ9UhYxiliP++X2+lJRjKzUXNBl/0ml51Bni0XyRUjOLAr4wibS/jZv/HXf/WP/uDexQUzT+plGgFIJAgwABhSIC5WWmnBaUJdDfvI0SZtQsMkjuiqxRUNPGU1Avc07UNoCuZcUiDqm6ZkjUjL0N7uW4QhxmDKAJ7yJCGCq5v1MSKyI5lbQEaHkkuIJhQQwEGIW8+KyGZWNAExE5u3AKg6MolrEQqljOgKzkwVXJbRCTESV/LHCBgIoOhYoEQ6KiWbOxHv82A+icBuv2qkFYkcxCkIG+72w24/DYNpmqZhGHbqvZDU4jRW4hgYHNAQWnRnLJFnQ94GadDUQRlJS+HILbXne1j2Z1jOi5qV0rZ9GkcAhdqEYuhQQK2RRk0Z2Sm6mVpyNDBlDHUQguDTVCRwM2v2u51O2VMJzGHeG8KYNBWNIoS426+W3dIN1S1GGTXnaSKiXHY9iQTDpyNfAOAmRBLf27TdvnDnpT/7wz+AreWhEIpzUCAKojmlqey3o8TIITh4f7o8uXPmXiCNbQyXF1sHQiGH3LbRNEsI7LBbrcf99vj69dM7Z6v9YwGEIV+er45vPXft9q0p7wWw5LIbp25xsrxxjebNOI5t16b11cXDR01/dHTjdnOt5RhSMc2jEHKIe4VrL714dPc2zTsKEYpOm00j3GJ4/Ob9vKaPfN93ZN+VXFiQgPe73cNvfmv3ZN2d9ifPXisOHTciwfIUWzEIAJB2OV0OtnfJgAUQnKJp3rtNTeiFhTnwzLGlEropsGKkDPHB/bc/+yvzcRACRzUDUHUDdAscHNCRkdByArODc8gMXN1LPQEISTEnZMdi5sjsqGYFwUQQDEWast29/cUvPPsTP7YpOcQQ2tD1R1EW4y6XhpuzHlptu0l1D5rEjbWgqgAIMohYrd2CWq9lZgQgmpGVyIpOu6KaplE4iDnEMMYSmzZgF/ZTyvvN9nJJ4QRnac47HCd3ZK/xoPl8ycT77Jks52L71fqdh9t7T/KY8jjUcEdsQzPr2tM59qJlUFUfdjqObkrkWtTUOKNfpjIlz0Z2gETXky2UiSzGo77MF7ntrRQY0kzQy5QMiJDMMBsWU8vUt0BY1ErO0aK7MwkCskTvgtqUzJKXCOgkKU2l5KeHZLTidhBlwMHdDGt5PRM8BaG4Ohzm1AQAbgXAGSGbIlQ/GJkTIhMguiOicC17dyGsZVBVNKzBMVMHJz9Qjax+0Rpuc3NCDgzVFUBWkSH+pyjFh2sXetq1QkShKl7kLkft8cP3V4TD4tjP7t64++ILD17/pq+vvu2ZZxiSJlocH5EEQMrFiJrlfDnmyQyO57eaZpZtbBtyC01sd5tNFCbkKJ0WnXIWEUAcc9GsDNhK62rZB8fMAYpOnIWNYlykNJpCMQC0pp9h7DkGJqfDE9AJmRCZayqv/uIDSsg9BAF08xxCAERTNy1Vo9Ontd+maq4IxlRzLrVUPguLWakFakrOJOxYVB0ciQGIuHpkQDUfJDoDBBIiPRxK3cHMrJTsALU+nCgiUik5kLBEt1xcm66zNB2dzF/40MvvvPM67XIQmbVdGq66tuvni/1+zIVK0m3akhuoI2KeTBg1FyRsQ7ta7ZCkb9C3q93wDXfo735U5sdGAc2ZjUDcagMY1ZH1wWdTBehKMEZCJ3xqfgMEr86pAw+rlpYdwEXwVEo7TA5MqfpbqLqu1A+wl3qzUeUJHVhahKZ6yPM8XcREFcFmdXwOh9gPAULtv6t2ngrwgENUzQ7eouoJOsC9DJHN/SCkIjqaugMQ17dVFcGRDgSlipmu8Teu6UKvABxWO8gzVYgw08MbW301lS2CVJuszT8oKNKnIHAwNzA9/HRu9TWrL4JZgRqCQzh8nxqHOgh1WErhA0yHiuaaEdSiUGHPRAimnk1Lza2SoIM1FMbdhG7uWkVoRDgg2M0OgRVwYrba5aal/iBVqiFCBNR6KdWfAOCAJAHqNbsRExKVrO6FmMFQn6K+a0LQDn1wAAhMXBNhptm8MEsFqCGRgblXuQfM6icDI6KqA1RSG1USEWB18Jk5OYBrQWJ3EAkAhy49AEdgM0e0KnPW5UpMRKwlH1xRdOjtUy1PF68TUn0mu7uqMTEgILkXc2RzqOFCdXMwPBRFOjG7VcTZn5KK1lfrGLmUknPOJYGrsLdtR4JMG/BCIiQmQifL+TRMR/F4OzTP3f7Yx779U6vLN4+Wc5hIAnOMSCDkZbTTrnlw773NZuq6AFaQPEaad/12Y9K0xqkJFEN/dq17/8G9fnE0DcnF33r3jXnToBqz3nrm9t1bNz73ha++v35v77vv+b7vPb/3Xuur3/vyH9bjbdH9UTv96Kc//kdf+OaP/NCf/RDddmy84A//xK395ios5o/eef2l7/5hxYBky8WyPz4CS80S79z96Gl46eEaTtuTzf3fWsbm4cV58qabz6aJvvZ7v8wQnvvO/3yxXP7cf/63f+Gf/aOTG+3VVRIZiMM4bCESCv7253/9J//KX75558UfuPuRz/y1M0sDdNHt2Y98x4e/9c3XfuGf/hMZVhwQvL84v3RPBgzcLGY46BTZCRQCTOMmEX3PJ7/z0b2Lh1fr5enR4gjGlVxu7r3+9lc/8tKtt//otc32SdfNhnED6IvFctjs2CVKB+PAZBerBwHvhH6/92G5PBn3Kh3vh00vjU+43W+XpzNQx+KQkSSm/Xq3W7VHs3nTDtvycL9//ereJz72sY/cftG7uDNIiAHZ3YqjcFDAKoG7c01kAApLBDgkfvFpsrdWUeABB0fIrBUy4QdlHWogGGoVs9SwKAIiU87ZAQzcLAmJqxJAbDstSzDNu4kZWo4cJZXEIbYtb9fbX/vFn//Jn/6b85MzAyzm3eL40z/5M69/6YW3vvi5k97Xm22wfHN5nIthDPtp/eDRevlsBPbYxGG1PWrnVnYX2029C7JzsnFEahp+590vXay3s6PTBnzzcG/rrhR/9DDMz55/5rmmTOXRt65269wtex0Lt/H+e1dUdqdx/SM//Kmw+56vvPn6D3/6E6SrJ0/uUfDzB48un6yu1uHPfORTOhWc91frXds0IoFARIqQWFIhNM1N3xQvCG5QEIEJmRvk2LR9KQXMmhiQlZgdGZEZSSIzOCNRI0w4TQUcpt0ERPGoT4YgAjDMmLrASUNKJRWf9TOFHRM6hZwnflqValpiDJbCsN50UQA97XZRmqxIxYJwyTkrNrElm0IMsV2I9KATmKEBqTHiZNYsj6eUZhKo5SnhTOKs/5B6BiuxOyqQzQGwZWm7WbMfxmIASqguSr6bQsDV+a6fBwMHhRC5jPvZ8aKYuqEVu7p8ctuK5jw/7ndU3I0JicU0oRmhu6kXC4EVi+lEKDmX0ErKU3GIKfehiS1t9/umieOqxNtd6GCcEgOysAQu4x5QpBFvhKjfQ9yNm+XxjCeYxVbM3DV2DaXhpDs5bnmb/ORoMeyvdjqdhZPdsCZulFy5zKh/vN5g386XTduFhw9ePzqeLRf9bjX0IBdPLhlgHjviEJouEUbGKW36nvnGPCAOw8DcZ9Nxd97N5tRzyT6yzc6aaT8VyrOOmraxYWhoVtQfbR/scxmMKeFR30GaopJmKo33t2dlLJCRjpqT+bXlnRuf/+oXPvTyrcMhORAhooGjoQEAC4fYNFacmLxozUW5IxORODMSGuZCWhz03uM379z6cNMFhlKbVExzZBr2U06ZLSNi28h2XCPBfLk4v9hiYUS3ktt4k/XSc3YOEFtBMR0QtGOivt8VO99eHLdzAY8ByamlLsQAObe8EBfPFJXQbMILYZ03wCyz05u7rTbI6hNzmHf9flo1IYZuDpY3Y/6Rn/urX334xuv33iygwy6NaURGn5SJEN2Rk5cgcH25jO6Xu11CYAzgrkWBCKkwIKG2s8YME0EpTohROnDLOjGRc5yS+VTunFw7bdvgwGChaVTV1Pp+RgAgkMZtEwOQmIN6QSdwkBAMknALpo6mVsyDGQlb3WUweVEFDmCsRu6lnuccmWuXDRCiuGfXEVEC8zBukNiDmQIQozn4qApN2ziMZYTAXVgcqdIbb7359df/eLXfH/fNcbs8PTo6PppP22G9uXDLBtC1PUNwLaGNZZgMSaQYJEJTS1kzOqZpR3GGYEUzMhuqe1HbBuwm2EoIalzcmcgwGhSwQhQAnJwBsIC55zoLq7N0NAJFQzcHJgZTUJumvN1P0zD2/YwIAlHf9Zemu5SHTb5xcjRr5ggAgWfzdrNad13fNyFpcdcQxJFSyTXxAQCzftH28zyuT4/PPvMTP31848Z22KCSdH1o2MiC0Ki5pByCaMY0ZBBZ3L7enixgvxbBBm3Y7rJSbC00IYZYNMcglnR7teriYn48S7ZphCI326urcYCXPv5RniPvQUJcPb40hsX1k+76iYOrpijx/P3NuE53Xnk1HvXYNrKY6XYSUgN+5717Mlve+vArNOvD0QwKUEl9E8lt2G7PH108/8orI0zTVELfTldrHPPDr31je36/i7A8Wpj7vF0g8rgfiJyKueXsCBCljV3bj6ttaJsSG5sGzwAawYQ8WDKfcbh1ncghQSjzdti98VufbbYDg+dSACFIUFVEYGLS4g7Aczcl4NrZY+5IbLU53fUwyCY6HMieclXrTr3SQdBwSfi1X/xXH/3RH9eTmLaladrmtB820HcRFrN+cZTswsdVQPQyMliDVsGeRZObKZqDoDA4InIkraV/eZtsHLVkImJvUJueuk3K864kz+sxS99c+9AzR+eL+OC9YX2xS6gztggFjUWIY1bIWjyE3cV29+RyPH9i613eDdXFQ0zct3EZu2XHs2hefMyQku12024L5rlkJgYtXiqrgg7RXwAzb1pGMhQqBKGJ3IUwCyWpIw/rQchiQMdEaSppl3frru9RFdxBNXBr7oAFRVJJFhi64wjuecdWSD2bpWG3expAe2q8QACrIRgWBnTgQ3SnnjrNEQ8oVa/sGPVKtq3VTkjIQGiGh9pwNiMjcpY6+lEGNAPgQ/M0HAquKpnFEQzNkfFpjxkCKBMSMBCgo1c+EdqBKIzohOxGIEwRhQ3UDU67W+uraTPZvsRPfO8n2Fcnz/evvfEH0+VuThACcaRZ0/WztontkBKBsMRtGcaSjhfXyEMuOecicdY0dLV90sZWmPb7dSstIBBaF7vkvtpt5rH3UooqAqh64CDMKWka9wQ87HIxa2ShpiCBuwWGCOiEJsTEcEDGoFdjgYIyEVbFB2vxkgBYpXke8kREhHTovEd2c3cFRyQBd1OrKGE6HOyBUKtpwMDMFBkrFZvhkLGqiaFGorsTSpomRqynTUTPlmNoCNBU1TKoORSDQky1xB2w+iZYYtQ0nV4/M4L7b3+rXI7XW5UJUNM0PgkUNau7UxBiZ0bGdhzH7X7XQAsAjtrNZxls2A3BoRnPw+VbA0L37Mvx9A4VruMPR4IEXB0Tdd4M4GCIfhARD2GmwzKq8K0K0foTbcjdDvNwoKdiESIQcC18rcEhdHIwOhh86hrECnWCSlrHA2oIqWKnzYFcq4UHwWufZyFk8MOTzdzNatMZCIlVM2U1wjkyUVVzaoOZH8BefLjnTAEMyGsaCgifilFU6T8I7KBuZl7fdKrqDROpQn04I8oH3GpENLM6+K9fgZnNlBEcyV2rOsXEZhmoSlJ1fbmpVWlTVSvVm5EruQyIKg7HkYQDHFDixCwIZLmgO3F0JDfX4kEIwSMJotQcXJCmoXjg+JjVdBo4AD5lRWsiYqJQ3zpwq8NzUz3kwwhq3qXeKWbF3anulQCRhCpokggBCKhSxkvJCKZiQCgkdnjcPa3hU6uaDWLtfAQrBocrqj6+A82/ilaVOFZJ6X54R1jdD9oTAlMwMzBzt6oCHorlqlxYbRbApWQktOrIOhDVkYjMHMCIBBxUFaEAeuCguSCL6lP1HwUcuT7GD7HK6mvTp1RvBfzfSkXjbj9u3VyHMWlRiQw6Dbxv5z2UnNOMF8tJU9HMWXuReWivzT/y7I2769XD2fKo72ZXVxfZIG3WGKKiX6yvnnvuZkplKPuubwsKc6tZV5sBAu6n7cnyzpPN1Xr9aNhetM2MbbRy9dq33k4pL9q4eXJ+89nFbvvkd7/w1attNmxfvP3c+Rtvbh+8qQvMwJeK68vN6Rn/N3/r5774+a/9wA/+FfbTz3/5d/+Lv/qzb339m/sVIDQA1C6vAwu6etlb3zrGcZzaZ+6cfPyjDz/362OK3/njf/fRv/7a9PDhcnFW2uscwfDqfHP5h7/zr4pAE+987Nv/wq/9xneP+VvcbmMu2Ura7diiNLGN8k9/8Z/+13/3v7CtjdMQ28jAhmCxeeVjH/vvXnz5K1/8yr/9N/9AE/VBt9snOxqzYohd2iXPnotzlGR8PqX9aJ5kfbHx0hzNGuH86ned/dK/+Ye79Spjd74aT3wm8/Zb77/5bKGeWLCft3GfUSQ8f/vbUx72025xfGOzW8Fk4E0MMl/OHj+8bGYxuZqVwBip2adUwFGwm/P6yebycrjM9tGPf5xjk12bGHIpTQgiMuVJuI3tzIECN0AI7ihIQA5kBxAZHXoCECv7DokdwUwrRQ9JGNmxoKlZqRWW1cdXKV8OpUIRCQkJi2UiNsAg0dncuZudCUdTy/uLDJkpAsT9mKIghfDa17+IJX/mP/0v50c3KFApWZrF7Y9/37y7+bXf/uWW0t2Xbz16uNpPaXnSPl6dDim/92T3zFE4ksKIx4t+HDeLA88X3n/34XPPXQNPHOnNNx6sdzKli0H4wh6iLn7ke//S19/68iZbWj8puRgTh6AO0kRnkcVi1n/ot776x+e7+x/+3hf89rzkRw/WDy6+tjr9yN0nl/vN5vT3v3Lxys2PEu8porpZKdI1KSdCZ0ZAZCBQMjcwNOUGhUrpJA5a3IUR2FEM2hCBp5yVJDiHDN6LINYdNFZkXRBxZzPJCUopbd8gcJlysUKRyFBi2BWPQeqcP4ZQfcRwKGRkQhmnHYoAagytSCyJ1J0jd+31CbGJgUt2BmDcpNGpCQIhULbUz7r9JOpN8SlpLqqxnbsVQ2aJiJrryMORiMp28Gko262PGZO7uRcIHjRpScVcm0hJlRtaHC3HMbOTmXPbmplrYvAyDsQ8pbzfT4vrx3nITJDT1DY9gQMjSoxtJMfq6TZHIdnt102MTd/qUKaEYR43m+Hk+gxTnYImys5F01hC1yNiQXj0+L3bRzeerC85CseYUgJzK0bZuPjjJ49iaCfNYb7Yjb4d0KhHltlsUXxkVKYwDD6btfvNhWQad3mCXNKoax2m6drxDZ342Wvf9v6TN51z09tUyt7SjVs38mrfBLTAOefZfGHmwjCMU9oNZ4uZEh0tAyK+e//+M8/c2F+U19984/qt0+3ji9C2x8sWd/tchsWiz2T9SfPdn/rY5371t/qj2bMvPftLv/qV73p+/3/9H376V3/tG/ArdRxhxTxlpShG4IhmNX9NWgoAOtUJNziAqTtFZh42V8Nw1bTx7PRW17WIebfZBgklmzONZWcIqhak36XRkxs2+2kzrlaEsekjcjAJU8619kpLZqDIpMIhimZ9tLvsu9MlHxNxZGoCb3e79bSL0KIqOc6oZ4TlcrHdbpouZFPLSBgYJApqtiidQSmWg8TgwqqXj5781F/762+s3n/zna8D5imnYRpySiE2XdMPw5YDgybEdHJ0Qk6rzRaJyzABuITAgXIpfBi+qJeiCnlKJCIH1gEhGkkU7iHbndNrt48XrsVLQaRSTEFj20JCdEUFDy0JGzgDg1Mbuu2wJ0JAcTW3wgRukytwEBRBL2ZqasSYy8jYCMVsVisziBCBU1EkA3BkNgRTza7urpbRBAldDzZ9CpDLaHmad+3FavuvP/sfLy7PhTwQEvPjkd7Z3yfmNsqsaU8WXd+Eq82u6zoGCYzL5ZwkdP0CgdO469rWkNQyEiOROqWiXYhjHhoISt7FWHBnou4IgK5mZsjOSMUMSN1BKJplrgZ+KywSmAGAUbIqswABMzWBhKHjsCMuQaZc+rZdbba5mIQA+6RuStaEULKFwD6VTiQgAFgbwpSnojZOuZjKU5RpuphkwzdPbvz4T/7M2e1bu+2WIbgItzMQU7eyG8f9oG4o4llF8OjO9Wt3jsu0n7a7edM+fP9SnbplDHNGQs2TpQxNWF1eMjV3XnhxLKtpSm1o3fRqd3X95Rfj6WwcVqFth8srRbz1wkvekkWE3Rgd8357sb669vzzy2evmxB1oqalJCoI3O7T8PHv/+7m5sICgXtJJQKy87BZq+OtV5/l43YypdhOo2KW8dHlm1/5ahdwfuuoBFo0rakjemgbitEQp2ECajC0NNlmsxXxgskMQhBNWVW62ZEA7nfn8ehaoZK1iDV+7+Frv/Kr6f6DtqE6G3azogUJDQGJ3d3NVSdycyuAiOZkbqDgFc3BbmqqZgpc+bVuXszMqzmlFl2bNkL7i0e//wv/4GP/+7887Wz3uIDCNJXFrR57GscLtAG8GCIoKMLea0AD3AtgVioADRR2RTFwyyUVgE5IVJoQZ4jJQPIOrx492av2R02czfIcXMh2U39y/dpU0gSloA+XG9YSI6IxU3JN2XaX67LdTk+e2DAQQtMFQ+TAxZEbxi54jNM0NMK2242bfdplzYUIiSlEyTmbeVFjQDe04kDobgxOqmilaTlg0f2V7i8sJSiICihsaJhTWg2eSmRSTzaNzkhqNaKFxIYgLGnWe2g5DeTGamm33yUfx0xF611Q+SpAYIckFCA7kB/ILgBqXszACd3BDBCYEA0d2FwdUQhNzU39KQMbCQAVEZwBpfY4kQJoAVMHBFV3rmgWqFpRjUfUhjMARGIGrt1WZqAOBHwgJ4ETGlXoVS3jJgxMBalrl9sLe3IxmoRXv+u73n3/Xs8Te5kerI7mR8/dXL7/+L2j7hQlZMUyZZZm1i4VFHw6PToW5JKym81mvbnkaVx2i1yKm/bdrAoosemLlZymLjQcZNKMwkJhP+5C0095clNlkxDVlYiUGSVyO+PQEgGj1VIqABcWt4Ot4xBaAX9amiSHcynAIU6FWAvkARGhliuZgSECiRBKDZQxs2ky8KKJmbQyWhwcK5YF4dAe5YdIGpgdModupqGJAFUSMWYO2NRvRMRqhsjFMokIBdOpSu6RxQwJMQRgxbNr1zHBe++96WY3Z/PkWyyTtG2ycUjrRTgpVtxSFDQx6Vq2BiyrawGXrhvHHJB8td+881bvYYO5D7FtThwYidFdQsg5MQtLgPqHCER06Iw7rBCryUev55BqGgIAdzg0kxuAIH6gFD31ylS5CcAAmMQt1X9tqpVtDAd/DiISVN52tRUBWa2oAoc6gyMiBDUD8IPdA/gD4PTTHJ/V1BICGkDleFTl1g+5SlArAMhMBycPgppSdes9NZ3gwdik1TCCyHBwVblbXQBOSFUWRAQ1BQDUp0wlNwBQV1BHpFISIiOiU8WTGTFbncmqInGFcDOYOgGxu5mDWaVN2wdQfDjwkBwRS0nEDsjAUDv0arIMgcw9jVmw7jRxSAoCHknhwN5yrTVvVc/iaoszALNyMOwAmFndQiCRmyJyNUP5AY6P4G5PXUKq2eurhIBIxUqV+4gZP4gnMqMaAKhpNZRV1w8gOYCqMoW6YIgwayYKCF6Z8WoFkIhCxSdVzcjNFbXe+PU1UU0AB3th5VWbaeVaIbGr1fWBxOZKpkiM9XoAVA+9w1i9skSOzsI5pUouA0R0IxJzdStI8tQR5gekUn131MDr9/lTUtHl+UZQimVzt1J2W8qlgJbFEeHtGTVkpWSbDDXEmLMJz7/ne7//eB43w2MAeXJ+UQPL07g9PjlTKIt5R6jb3Q6jtF27urhkh1nfb9ZXx9eXwvHBg/fX2wc3bjyvY1nMF08u7wXSady0s+6oDScv3MQuvPvg8f3Ly1s37gSZZ1+V7WZxEs7H9fvbwUtGwmvXT3/7C1+To1de/PRfeuPdBz/xn3zf7vLB+Ph1vDXy8iWANGzPJcRSkjftjWc+UhL6VLiZAcLzz79y7/VHw6bfXSxffflWKtuvv78fLkdLFJt2lfIv/rP/9da1DuP+v/07/9k//uwf/MfP/oeXzk62m23uWhIx9eV8/ge//tnffPbsB370LysGBFKoNcPRTJseP/393/f83ed+/n/5H2O/yu/ZZGshPJq1N64vLh4+zopX7s28a8v4+OHr3/7ixx9dnqtbkH6w/ODRG9cXzfmjHOddaIib7pkXXn3zja+fX13cfvHuo4vHZY6aE4CBJ2ZvusX9y/FLX/n97/7kd76wWOxzuby4InckigTJJmnapGmzH6Ypx2X3xvuXwyphWBQZCXFKZd61QMjEDjil3DaNSEQkQDRTAgJiAkKq7sSDAF2dsWaONU3u4AZMtQ1NwV3B3BQBmYK6MlWOV4FKdDNGByRBcgMTYnAw1xpiKwWQgOMi9gMREMWS94FQ2gCkznR0du3d9771h7/7qx/71A+d3brNiMW06/n4Oz/i4/4//MP/+frtFxW361VOul/Omnvvr9DkRsPNcbhYn+slEj69BwDOTufDNK0vr3JJYTlXJe7iZtrD5ZPd6v7ff/zWK3ee/6kf/5k3Xvu9L37ud/o2so3c4LgrFymVBpbL/mrT7t6+HHu9ogS2u3PUJx2+8MdvPlpv9peB25Ojlnbj5MyxoXEomvL8eD6mKyRBNEJlIkcTYTTvpLVkbexH34B7VnAF9YKWOCKR5NomCc4sTYxqzghFFYXGol6LNd1JwECFidtoWkBAIve9ZB1mfYOegGu4RYDAvZABuQXGiVwiJiSgxplLTgZZmFM786PTo8am80fxRGKcg1psGYYhD5d5uBq352337LDZTMN9Oe0kBgdTbj0uUOrMQwFMpEPlMu3Tfg3kYT4zYiB0aNLs2IikCUVLKEkTABBagaxeVXSnYj6mKbSSzscmMJQIJYDKblq1QuNUKpsvT2W1yxJadtPkRNzPZsMwuetggw6FIvoEbshIfRBuKYR2u9nmZKjStk3A0CqR+K4J0DYtpP2+kOo0GpaQit45vRHjXJqw3/uTq838KO7GjCWLxNgfRem3l5ftPJCExalcP549TOsSgyrtpjSTxrSZx0XauefxavWITFFLO+9gZtnzMCYDD8DTlCZQyhhFSkrCcTlblkkvH+1Yyuyk7c+OR4ojjt1iPkxy+/T2o80GzKZSTtrFu/l94472zbf++D2wlmn2O59/o5tf+4NvvP7sJ/XbvufVfwl/WE1EDk6MRCDCLOQle1aRyIwlF+bDaB7AEUiYueXSSKPzs5PrV9vJPWtJs/nRlLNzMNQxbxfzO8OwU+M+HnmZHPdNgykPaLYdh25ODKjjNOvmpUyg7tlRiYzTkFXz7WvXUtbIFJpm2uyVCEj6LiYoShATmFMBvdo8LqWcascEk2WdaBr3u90w5xaYXJMldyQ0Wj1e/Sc/+bOpmX3jK1/0PIKW/TROJbuDahlL4ighxsCO2AqGNA1G3MSwHXbEAoSWTYRDYCsuoXFgtSxtW8Po+/2+a6OBaLJW6JXbLywCSS0GkMa8EGRlEOaihGaCmNQkNDknJCTGAkXR3BTIiQRqBS1TINFc3DpAAs8ADBVw6+x14GnGgnWDKYBATB4VzDEgGXgBMcIiBGZgeQKmouDkKU19t/ja21//5d/4j5o8CAtHdHUrihhaIiJ13Qzb2HjTLNUMUo4B3Wg9TDEYSAMhkmAqowR0L4CRWLxM7E7AwsEhZFATHctA6AhaDKJ0Sg5u6EAQauokkCSdIvVD3jNFVXUEAjIzZjZXd1XQ0YwCj3lkwQDREIY0hr652K2eu3N3TNk8DdPQMKc0LU+Wfd+ag0iYcmZSQkh5RATTAnwgdi3i0bPHZz/1Mz9zfPuZzebKVGPbY2QMjER5s7KUddJhzJFD3uXFvL9551kKOO6nIO2wpu2FmdF82XAjpgolxdiNg46b3C/m0JoNJUp0h/v3Hxt2N159wYK11KJpytD0fX/tKKOVMgXmMuavf+WNJi5vv/SSd9y0LTKnYR+Jwfxb77x37ebNs2dvQBchCjsCUBe7x++cp2m4+eytHBgDtnHmCNPFVTdbnq/eiCIn1xfxuA/HR/28n3aJAnPfh36e0zjvzrywrjN7schpSADITqUYU98cLUjAXKU9tVkHASVDZ/6N3/rl7VvfPO6blLKhCTgKqRnWMwxwMVI1Cl7coFbAOCA4qDNyAUV0YEB1JDLTQ22OKaA7Ggcq4ORUwIStd/jWr/7ytRfvLl/4SB5YJYe2yYo+DEQaEJowNzWHZJ60uJk7O7mIMAO4kSmYgqoBNGAzMDIDBdqP47jfIkehNs7nkRfdaWc8mOcCyZ1KMTmaPdvcaoa2rKbHzfgA8qgJ8pjUh9Umr1ZYCqchtsFZmsXcA6tjGhMSjFOicfS83+cpTynv1FIgDqqFBazaR6wWHiNQ7ZaiSsalEIygaIJpbSWBJjJ1Q8FogxoZA0JWUwQijI3GiFq0ZHA3EkZ0EWhn3C3c0MYrG6ecclqP+2wItUYaAGrKzJFcsOJzDMWJHYWfHmsOEZbKv+UPcMF1Qo6IbgKkyAfeLyCgc23IRrdaloaQzZxIFZ728TiCMaEgAmIt2q0jdUJwrFADJqJDiTQAISgSIRtaJWaSE6IYlMJMMB/38d3Xz6/dfeGZV1++Wj8GnBj5c5/9wkvPPk+SHz95cnp6erQ8jV0YikYKSLFoGfKwmJ8EpnEaAodGGjcraRtCTGUi9z4ukuf9uAkYm9htd1dM5Fa0jKopYATDWZzlcZzSZtHPJUY13A/mxurUNi2HjgkPlWfE5s4ECIYEVCWzqnohOyBzOGBNqn3BDaH2u6NZ+cDPUh0fVF14bugAYAjOxDknABQmCazq6MaEgA1YqQ4/IhYid0fg6st2BKqHUkImDgimRgbEXCwzc/VK1K20W4GDx4zdGFzNDIGYJKCfXb8+Gj558Na97eqUw1Gk/W7bBO5np1YAqRmL7cZp3nfmyfIQm5i2BbVgiMvlfLXe3Lhz48H99/Xq3nwedu++IddfmJ/eUDNzqz81IkrlWDs5eGUPA30AQD9IQBXUDPUkg1VJOixod3c3/EAr8goIrquP3M1Aa/WZuSMGqMFQrI4kBFVAYhJ3AjeoRhasWoNV4DQwIwAcomFVViJABtcaZ69EG4D6tb3KgohGgKoFSKhKNpUFdBB/jJigGvkcrb7y7kSsbgiMB8sT1AI7EjYzQkISL9mtaNWSgQ4o66oWmbq7HK6n6m1a5bBSrNqOEJmIAQ7ZK3fzUqoJrrq0wNxqTx56FbFMi7kTUwjRS846Mje1+T1PiUjql3GDyCQHYgkI8FE/QwDyip2qzBNCFDPFekBFdLMqqwBA1VmqXxiJvLqlCCsXnAgN0N1KVkQMEiur27yQBPSqujC41nrHUnJlvSMxi4B7RVc5uGlhjpWWXtnQtSqkxhOp6o+azV1RD+g3BHAyUER0A7ca6MOqaoEjgNUGNGJBczVVMHBgEq2pPZSiBdEqGBvr5+lB/rKnsxnTXESCqdV1gmZABevwpq5sFtXyAdO+JkyJyfx/yypSdSBIyfe70YunjIgoHPZsKalq9mSesiefL2bC4c7Nl1rBXd7PZnMCGfbrbrGUhhGMwGfLRcCYxgGBCW1Kk4gwOoXAba/E437YDLvnX/nwfpugjee7qyDhiBfXZkdaNi3Zar3dXwz7zY6ox4yP3nt/OW9u3r5278HDh6u0Hv320WwR+c7RUe+LxC89eO8yjKu2TF//0hf+8He/eOvR3ZsvfeYTH/vEsx/6TshDzThx10sQtfnZsx8p09hEPX/09gvf+8Mv/Zn/Y9i/fu/1X//O7//Mr//7Xyr5aL1fhQh7lBb9X//j/8+P/7j9xT/3X1k6e/33/16HumMj9v0wIfKNo+M/+v0vXe35u3/0Z++edaCOBORCREpaLB+dzf/8n/vpf/RPfv6oP0lMDx482vPw4RfunhG8+3hzfrFmIoEyTPbavbcKQ+zaRw+HF1944dG7X99dXbz80p23zlfXTq/ZQPniMWwulfGNb+1v3Pqoa5unbWQ5WR5vx01hG6fyHR/53mvL03vv3j8+mzGBNEzBx2nKSZOOScswjUA8qe6HSYLs0/7m7RuWssRegdJYJARARgkoEUIHyKYKVIlfRMjFCpIwCyDVZK05EDLSUwiqoyHU/swabofDPQtoiMR2QKwhujugMBctAMZY+wPQTGs3BjCYA4l0cJoDqJaIRRBK9jwOaiBBHPHLX/yd1W7/3X/2x1puFqcn5GVcbbWRV77z1YvtPWrw+MbRu+/ei/O2i1xYz1Xvzk5wvUpJYxeX8fBMnKYkBbsQPWMx1Sk/erAKQjdOqJ1BaOXq/N033/691/74c2XcusTIIASFfa/GATgqiYTFtUdbWFuaLZd+Y/HHv3Pv/tiUNjIvNPl63LlhzpkC0ySB2WwKMRZVJAc2ZGNwLFbTFgBQdN82gKDClIYcOXRtk2ECcKkFCAZ1qMdBAAHdEGCcUteFcdJkU2hZTQlAzbp2NpYxBF52YX215VlbJxiMDNVtWklSpoCYAY3YAJwQgJrYNBJ2u530bXHA2HRnx1MeC6KYldWq9Zz2g4Q2D6ldypC3C5nbbkAEkzSVIkLoGZmQnEhKcTZ0MAgo2FbDEYGZptnptd12Nw7DLLTjMI0TdTF2iyOgQTc7CXG3HYftpqSRDFANUczSUHY3u/nl6lE3m0noUtZZ1+0yuu6y6TgWQbGi5Eaqs1mTNIcAMTYelKR0QmkzJrU8ToRBgKWV5JgH3KapWYaT45OWLEaMJPs0xBD6EDY7ff7mh6f91Wq1dZZu0YYGZ9ic9icX6yuDvM9XsSNNYwHrZ+Hd1+/vp3J63E1mz7z4Ur68CKG9/+C9tuupF4ibWUdkYcw57abFotOi22FgapsoKALo6+n8bLZM+/xkdZ4HXK22XqbHF6vltdnu0doGHcd8+1o/7ldpXC94EUwXIFrytZsnr37o5W987fWCdO1WC/3y/sXmxrPPxSn+0Ze+Ue+Cvmvc1QGAWdEdDSSEhk3dHO0Q8TdkrlA8TcUmZWoW87gddkwQOGyHyTkiIJAXz313NOYpBBqHbRciBznfrENgiYEctuOeUiuRIHsBVCZQBYBxmhRKt+guLwfP4BnI0aHEENC9lDJOpZ31y8VsvNyNu4FamHQa9+M8YNtKH0Ij3bTed1E4BAdseimpkKd7V09e/c4fnJbHv/cHn0ctSu6oDs4sjgKuMQgCYtGOhTnst5sy5dB0qagwO7iWzEwcZLfZdLEF5DymlMe269EJ3Pq2EWFTim28e3L95ulSpwHdHVlVEaltumRKHCMgluKKsYGua7ymBBDdjJmJQ9ZJ0EJsplSEpXgyUtBYC2uF2lJGA+TYTFMmADIzMybM0yDSwiEZoqaJMCBSKSVwY+CKEmJULARYcjKg++uHv/Rrv0rOLARu9qfK491NzQnRyTfDDhHns76YsVsbO2Qxh+3qkpcQoiCYUFMUDMw1ORYSSp6Jxc20QAhRUwqxRwAgBXQg08PGVxuJ7pbrkAGcqKlxZ7MJEbONjC0xORoTu5WUcpKyH4dZ23d9//jxubtfWxxvNleBqSCCerbSz3oCJ6K2ab0AMsam2eyuhISF1D221bgO3/HRj3/mM5/hrrna7rQAhXbMuQ8B1EseA1MhHsYxhOhqXT87uXnNQYerlWC6fffFP/6dt7RAt2yJgczKNBGhZ3vy9uP57Oj01o1RxzTmrg0pDVnLcx/7eLuI+2EHDrvNwE137ewZbps8DZgyqz989GR9tfv0j30/zXsIDsgORo6abP3wXCf+8A9+mrqmmAm4FQ3A6XJXJnjmxVc36YqlkaYxtXE/9NwusPl3f/iFWzeuwzxY13DTm7MjgRMUT5tRFYRhfHyhV2vK0CxPusUSAVLOoQshBAqN2VQAjWkc9nGKy23+2r/9t7tvfmvRxNrbYgAZD4MfAjIgKEYIJKxshs5AakZutQINzcGe2hgBkNmLmqkDlhqRICJTcjQ3FC7gyDrX6XP/0//743/uZ7v5WXxuObs95342lcyukKe837sV52yeVM1A2CRgpwZjGRzRi1rxEKJDKa7omqYCITSLPp50BZvYzLCYJqu7dhoS6siho0XXtrPpan0U5SXKsH99BFu57Xb7ZA7TwGrCxItZ6GbOBE071VSVadpucUrD5Y4DMpgmQycRUrMgBAyqYIAxMOaixbHCnQAQ0VTdmQlKSWWf0UpETeMei3AwRnAtCOyTqQKEFpjQHEsSUwOKIRDlPYJ1c2j7st1DNipuxUpRMGCGQ3oD4NCz5IAIRCjCGIzJQdycclF3Awc0qOJNJazQIclQybpY2Sd+SCUBuhH+iQXDFMytOKK5q7sBIQtCABQmRFc3eOqL4BpKq8dE13qOJhdAdDCpefy6WwVEZgITQkIsW92s1rPrp888f+fqyTuX9x90oXgXvudTr7zwwnObISFjF1sDGhMQtzG0OSdGOp0dmWmZxlZiG2e77WBlbLsWMAQvkUO2nG1qYtNKt9/tLWk/m01pp6V0IUxDQkeWmIq1cTllT2bEgThEiShtCBHJAgkhE0F9h71CpKt3hFBN5VDYRAcu88GTAUiCT00xVMc7DkxB3cAVQZnZtAD9if1fJAKiuhNiYDG0DwqPiIhQ3I0qVfeQUsFqeEFzLVqBbgbVtQsxxlq0RcQOWg/hbowOZo6OQlUlBKiUcfBnnjkjz48fpn0pM86oW8bCEmII+91u1i6Tg+vYhIBgCNzEBl2324FiYMKr7WU/69fnF+7IN9rNqK0Emp8iMoIi1Qo8FpFMhK5g7qbgBVzBtS7Pyio6BNEO3FQEQzc9QHnc4CnQ1/FgJ0F38wzE9jRJQSQIXDRx7SijmnoDqEG1milDEJKspdZAYzW6OFmNhj2VhIhYTaGqGAdpCOpRqH5NM63YYSS2+pPUWaipuTHJQXkA8FJqZRiAOaBBlW3JLNdYGxyUCANwNSeo7SRkVohq0NsP3xEcScCshkG8qmaGfojGmQMisarVNJ55IecDM/wAtHZE8WoOrDzomiJ9+jdy1lLyfjrv47XqQaZApmaqwp5VgTDnXLSIMFs+OpqlkitCH8zQARi94sYRD4Qmqzj8ev2IgGZaOU1qyswHqFllQiEcYOEf9KmBQaVvYkVNl2r2IkRiwQOZt65qsKoGPk1sEQdANs0V7oYVEOYOh2/CdbG5GQuXkrG2AFQSGzBWMBY4izw9UHudY1VUFhxoUIdaOjU1V/cKwKaqHiIxVEi9O9caExL/YBUhObqaEgkT56KIFVzgyARArioiqmBuH6DfngqHyfbbcdjkNFgasUzoSpY9j8URmhbFc4PYhMW4zTdP737kE99NwpoSWFHT2PUh8H6/diiBEMa8v3qUcibpTufHDNTFpu9aidEdhzEHisuwlNJyphaaPsyP5vN7915br+7fOJrdunazbWf7USnOd5PfPz9nnKLo+ery0cWqTHrcNcuuayXmEZ/s/Yf/4k+fLvzlZ07f/NLn/8nP/3xOw5e/9c7/9M/+ebEtN8Gy6+QAAkwOHpvQNiTkT96+uHX2Yn+y+PQP/ajBsa5buTofL987Xc4+/u2f3Ix0ubP3H9kOm3/zr//nGB//9E98T3926/T29dOTpRPtd2na53E33b+//uJv/8b/8N//n5uudyNmxkNJX+Ms7fHiU3/2z3z4I58K7WKccgYnaS+u1q9+/OWz69ePuqVkcqV33t8+ery5dnK0t83j9fl+++DWc9dv336WTDf7YXP+/+Pqz4N1XdOzPuyenucdvmHNa0/n7DMP3S21RksIEJKwAhIgGRzFELugkkJxqFSqXOGfVNlOxSSpOFUQVyVlXAQTgkkw2CZYHpAQMo1AA3Sr1epWz6dPn2nvs4e11/gN7/Q8933nj+fbrap0nT+6zrD2+ob3fZ/7uq/rd12NN4/Oz9/5wz/2B0LoqxZvurNhvFAbncVQVHncTE3gF28dZTepBHRg4ZRSv+2JBEi6lDZdH2ZVbOP1zTpWYfQMwWbzeaiq0NYugaqIMcamlaaF2CQHB+JQc6iQQvlqCgXeUf53VrWiQ6rtyOs775pbqadhDsRihcJvBgAIwERl6YGIBRdW2v/cFdxLbWWxclYxknCo2jg7iO2MYqXuEgMhCxEhSlVDgA/e+eJn//E/ePy1f3Hz3uexexiq6fT+vfP1ILPZ7XuHCSZ2bTEeLGK7Hy+26289uFhtPRtJkK7rnsvnuQ54sD97dHY9jAzuk4/zZTOOY6ejSh7cfvNf/tpqGEVqCNEkMMCi4f0Zi47dxUZ7C6G6uBkvLsZnD9fvfvPSwt44hirEVmwxj3XlIQb0sv0gYd5ZBMFDFSmElCcgH1NH7MAETNktBnFXRBrHhGYhSKnKZkZUExamYO7EjAjCLMJpmiJSIEhZGVF41zqsSQnJDRqWPI6uQBKEiRjdU3kiSGmDBKpiJEIW8SmjZvIJITFaJKpCg1BV8/mYNswSqpk7hqrlagEGjJjTIITCkiey5CHU5hhjm6YRFfM4gBuQAXcprWMMk1fDZsCsaKo5h1xJztvrG3ZApKRqHJfH9xZ7BykZms9nVXdzKe7qNuXJQ1BwaaTrL2y8EUjTuEU04Gw61jAd7Ie7rxxa1HqPF8ft4qhNmoUDA1maNNrh/UV7WCtIPyhmmy9Zw8bDGFvMPqohmC2Evd+Oq36zXldc9Rsfu+nl05dWl48956raj3V9sLeIRss6Xk+bOJ9XbaXWT7BNvk7D5fb6zMYUkwzXg3UCOr+62Zw9Od9bHEo9Ozy+ZeKxkU77MffdOukQ3HkxW0RkSlaZYNba9gT2VSmNU93EqmlJJG01rVO/6gz9cH/ZTdtVt1H305M719v12fo8zOn0YHtx/Y1Em1d+cHF4dHVnv6tlffb06Ve/8PHN2Rae/89Uc1ZE1Mk8G1ouTRzlHE9IoO7qhXIT2wVALaFNo4IiJJ+GKUh0Y/SQkwWXikJAJQDImVI3Tpv5co+qFnmPF0e4t+8BgCH7NE5bQDIkCGTsUleje6ecJtEEjNWUNNaRGiaCZTOjKeeuRzWU3LaymNdNXW0H63tVlSHlyLEJNeTBxl5HJ/abYf3yd//IKz/8o7/11d/RNIL6ZrVdd4OnZHniQrkFSHmsQ2AIg6qRoFSG0I9bc2OWIHVWGEYLMtMEOeUgXNc1M4FlHRN40MnE7a179+4e7THnWFE7m1dVxcxt28YqBhEGrevKIZOgVFUGquu5OUSJLKFczkSU05RSJiwQniBSCQcEcXVzLWUfQ+5C5BBi9iSC5i7SKGC2yQHAVIAYDQACLxlnZOA6GjlHTmaE802n/80//kUBZCRXzyUbL+UAA+6mquWAOyls+nEcpqkbPedp6KY0uTAHmaYtkZqlKZc2YQQnVyYMCBK8RROyELxmiGgELo6QdQKwGJiFgQJSNCNEiaFRmwgdPSEogRAE4QpxB5NwRABBZJRQVQ24pzTOZo0hqloaU9/17awSQlc3x/29fVc2Y1UHwKRaVW3TzsC8jvE57xLefOu7XNr1NuXR22quarEKzAHNPCtlX930OZMm7Ndjc2t/7/UXZVFtnl3AKj977+OxW4eaD24fAALmVAkJ8Wp9nVO/d7LPDWmaqroRlCcPn+3t3T26fTyO2xjInbpVv9g/bI8OIAbLuQJZX19//ku/fffNN+Z3j6CRUNWxjuRO7pH93Xe//eIbbzaHSwOITY0ODAY+PXn64fJ0nli5ivWsDk3tWTHZst578NWvNcyHt0/q/WW1dyDNDFDqvb3Qtmawvd6Ol5vVOw/hZmjb/Wbv0DmYpWEaUKIs5okwgc9OTuqDBbezCIt62371v/4fLr/0zRbqnH1SFydxImQxwOSCQhDQyYGMKKdMQEzknrKrIigiEggRALIwx4ixdQ6I6AAs0ZndjREqFkYIQQxBUSRw6/CNf/pLx0fcHs4myzBsZLPV63PbXg2r8zSs0tQ5GFIOkgG203DZjzclCOfOElpDnoA0YG6pOT1tD+61iwNuGqogaZcgoSixumsd5WDW7tUzUUlbHUbBsXpxfvKJw3uvEN/yvLQs2RglVnXdzvaODqSJ1ayNQRic0sRjh9s1bLacQXsft4lUhIhLKY6mnDMzF6zvrtzLQZirOsQgxELEKU8COWJi630aUZmxgoRpk6wPaYuQOYbaSTV1nLbiyd2YCVCNkes5ccNjDpsuX62mm27qemB3NhTk8B02KqiiZlQl9VJjgsDk7oAlzm2lzRlsRwn25yYCgCJ+FGuPO1EGUyoWrrIdN8xK2XECGsl7hBFZiRUYqCIOiIG4vGByRBc1STkkZXUxBCczysQTcarYArr4VCFFqgjFQY0VqY7VwflFHqf61kv3rzePVusny0U4XLTzBl+4d9xNG2mo2WuqeR1DhJxEIY292kjiRM5Bl7OGyK76M+c+thUQZO9iVSnapAMhNu3iqlsl0FjFrKlwRsBdGCQC4YRopkokCrW6KNUQKq4qICY0ISfSAqlFJuHIxWgE7kDCgViYhIjA7XnmzEvyDJCYeGflUt2Bxt0KgchcmZEZmPk7PfFEBGaacpn2wcE0FXHCd+tdJeKdCweBiExTCcIJMzMTCjz/g4qLxUq80Qy0ALmlDOClUxzQDQyFq6oSpNPbxycvvLK29nyQuDg0jJtNf7PZoOMwTtPUa86qisjbfj3B4ARVqGDSo/le0swx7u3P8upqePoBrs6uH7ybx00JnImQMAvK89nY1Yuqg+6A8B2XVlHHdgkvAEf3Xarev2M8es4qghJfK6koJBBGdle1bJYBlQih9NWVxB4KYmEX7frjs5VpvIQoiUnAgCkAgKuZ5vI+F6WwfILg6rv2K0cks4KFEHBHJ0YpEapCX2IUd3QtiBwovycC7hQi9dKwVsSC8kqpJAnNmdhBd2I9kKtrmeVs9+aUskIwK14kNysxq/K5A8BOUdmBxAjd3QyfayvoCG6ueQf1sdE8F5sPkjhSNpc4Xy5ejnHmCprNsnnOLOxI2SA2c0ACR3aYxulk/9iy71rqkJHZfUf1KZsmcCRidN4xxR0KCx+Le4YEsKRiDQofNzta0c2KikVCgZByzm62E+12Co3v3t7CcrLnd78dqBpx95OLj624np5HuwBds5sxkWUFB81W4qKF3wQ7jyYUDqbbzv+FXpQ6ICiUJUbkrKq6I16zCCIJR3AzTQRe8kZuSru2s6IoFZY2MLFwcEPLWpjlOyIMyfPvPvmupsC/cxXspKJhSK6Wc9krOJGbqVrmIClrTimEEMIMPSxnL7zy8g/GWANOEnjT9WcXF5uh59gw1wKhjovtMB4dngJUefJ502rOk44h8ur60kCPTk8ocm+6XfWbq359PWj2fpgc4N7pKSt0N103ThqjIdaB2xiPDpZUh3W3JYRQx/mirQgapv35yf23/8CybSEP51/98jd+64sH9Txo0tXqvfc+d0VPmcdsA9VSlIuczIAdGbDe3lwuT/Ytb7F/+oVf+vv9yr72zuXhC5+a7x9fdtTpbHUp508nHao80S/+/f+0qbs/+6f/Z33PVEPTcGxDaKKaGqSjObfrp//p3/jLqciz7upmSQOxqefkd++/lg1mTTOfxXlVXV+PH77/dH9+cDSf+ZBBnVi65GdXl+PQ9Tq99+ixgxwc3D05ePGwCtv+Jszb+dHtr73/MITZ9Xb98bNvO13t7wXwkWsOQmJyMK+vr979G3/zrzz86MPVzfrpowtN6oamfH21utluppzcdLW56brO0NqD2d03XuHl3KrGJKAEDpXEyomZmBCFSEREGBCAzMqLg1LqV6hggFhKeJiJpPSZuQVmYQFAAGIRLOZTEmQp8HyH0rBggJitrA5VTQNX5WkkLLS7ZyIBEkiM88Xe7fn+EYRgnhWUKbpxsQ0Swtd+6zd++7O//Duf/YV/9Hf/r2cffWW1Wf/s//wvXMvycr1tWtk7PVbwoVcdpkjtR49vbrI8XQ8PnnXY7JWrYLZoKfhqvVruLXpN0sS9+VJdQBqj9mI9GqkJQDMfzDD6pOlmvQ4NLZcYa5vykHG8uLkcRlX11Xr8+OFm0+U2VJvz4ebJto2MocjkPI46JeMgyOQGaE4mgpENAzE6gXmUSgKzlK7NPOUxgZnQoJO7IZArZFNHYKbCmCyRZXMcp9y0TdvUptmzg2NZnpADIxJhiKFu6m6amAt7EoSIEYWlqNOWsyBbMgYUYjIOWKkaElqesmkChBCmaUCDYKSDmtT16YuLF9+Qg/04a27Wl9UsLpazYVoTiROaT8RgrkgBqNIMOk11PQ/1gtqjbjJAAQ8oDVPQYbu+vrEJLJPM2usJc9yPx7dPX3xxmowZm6ruumE7jNK0I+Sr7fmsiaabVmIetuRkql03pmm6ddDOGvSYT1+b77+8XEE24VhXoY6xbs1x1KnTsUsjId86udd3vRrcf2sfwmY+p3avmQBuNunJ41XTzqtm5gPFiZcQDsPewayFICIV8aKqTtertOlGcJcps3rf3dg0oerBYo6e26Y5PDoOtbTLpk/p/Y/eP73z8u2XXsQYVsMmkTnCNA7gqY5YVxV7qOpgnsC86zaQh2FzHmw6P/94GnoyX622KhTqKrSVMU8Mg9q6z2HWZrL95fzh0wez+dJDdXj/5LXvOuk3l7MjXuerbz78+PPvv/fS997OhwdnqxybplwFKSkxqxqwYJRQB0ccp5xU1Z2YvJACBR12zkIDJUauYtMuMtGUswM4icRlFVsEQMx9f1EJLuLeNE11Re65rUQYV10/qmf3ZKmtYkBg5KxGGMvmZ0wZuM5SUQyO5oBAOAwdE9QsizrMYmwWbdVWlhU2YwV5EWqhesw+THm1Xk9TJ+A4JfFxc/nkhRdff+v3/+Bv/vZnQDeqW8t9MCejMRVMU0LLFcK8agKFbZ6ux17RDGxMo8RAiHkyzWnK06Qp5bzddiQEgtlgzCpRJFaRQ7Tw5q0X7+4f1yEwkkhAYSeQGInrGFuhUIe6CXVdt1KFULVBQulANdDJJmAb05aZJAQ1Jw7Fg1xqXN2NJaoaOgVsGVqGMOU1cSF4uAEKl5JgFqncVXV0TGWjzxiYCFDzMBE0IS7/wa/8Yr/JQGygAF5OJO5IgFkVylaLCAgBGVmymuHOta855Wky1ylNfd8VHICZCgs6MwZX1GSqhq6IST0heXadbERGZmIMOU2OGQmSKgCrKhqAuxAxs5o6wuSjoqIQAGpSz0rA06RuxoCjpm035pTrKobISEISYqxMHRw0pZQmLk+UKsSmKsetnBWBbErPV2hw96XXDCKBiMR+SBLi/sFhHaqpHwJiSrbpklFUx+XpYu/O/ham621/eu/lWXvwwfsfZs/z03Y9dkYATJp1fX0zTv2tV+9Vh7Hv18N6FKk2Q9/O5i+89kr2lEyT2uh5cXIcF4viGm/bSjfrL3zuXzSHx7c++WaPqRSJmppndfezi6fz0+XRG3cndGdyMxsnRhnGtcxD3KvHNDpSSjp1Qxq1gqY/P/vML/y9/flsymNKaUqKQaAKKgx1TGTLo/3bt+/vHb/QHJ+E5YxmdZjPwmLeHh42h3vVclkf7FWHiy5PYzaCWp/1X/2FX3z65S/WMRbToSZVhZSy5QRTZkvgnfrWaLC8cetqcsxTTgMhA8vOg1KiRPqc30lOvJtFgbgUPDhg8cCigVAFzu4kQqzrX/9v/y5up7MHV2fvna0+/NjHIeUUmparxqk2q8DCNPQ5Tck17VqYFcnUUrbUzNtm2dZNJNDcb9aXa914tNBIxeAp9TqNeZt5bHBbX3y0Pn94dnV56bGqTo+bg5cOwusn6dbJIIs0ztvQ1GGxqGdNFAZ3nfopDwknlZS1G21Iacp9n1MyVyYXAiYEQss6OWCaUhlSTF2I6iB1FCEURkZKSUkBJ8Uh4+BTpzl5ylNKnVl2MCPlyBwQwAMKATuGWM2RKYNbaLFe1DFWXcfbLU4JkaCZyfFBvHM6e/HW4Uunu52BuxtowjyhDpCSJ3NDdAHFciB0f544My9NSbuR3B3BnMqBzwpPGQ1cEZN5VsgZ0ghphDxiGsEyuiJoGcbcqdTelfQQICAZsBIpokKZJoGM2JwyUAI0QShsWjUXJgabxUqg+vDhNcj+y6991zSsL548DNC9eH8xP4wvf+q10QkgurGrTJNOmueLvaoJ7aJZ7i0Wy3kGJQ6TWjd2NddVtTDwrKOrTSkPeaLAbduuVjc5Z45CwtM0TuNEHCdDI1ak9TBkRKqqLKzERix1FasYhUMgEUZmJCyeXSQiEiRB4tKuxRSQGHdOHS0EZyListMAd9fy/4pvxcCJOUgUFiIhjlnVC1fK1dGJiFmICRDKMFl0KDcrkomBA6ERAWJ2UzdmUXMgtNKFylg3M+QAyOVXYUYiImYFV/MMCsSOqKDEjMRmikAcqrpq2qo+PTo9Pn39Js+erPOE6AGkChAJanc0AEzqo6eT24fGI0WVCvs03FxfLNqDIUMMVUSE9dXcrrh78uzjbw6aEpEBSAwcglDxXxVaDbgWK4aCG7pT2XJD+e7i8792RX+uuSytdw+DokS4F4+Pg2afiqgH6O7qsCNaizReutvd1E2tlF0BEDo4EzMyAGbX5w3qsAMYmZaJHRF3GgCAuwEUEcqfCxLkgOrZXanElH7viiuUpCLjFoxXcbPsqENmWpb0O/3EvWCJSloQ3ExzAWiXpf9OhwQwze6280LtfvKuXZ2IVc3c3Yr8qwX1VExJvuMklbEEC2cLKRQHoZllzWrZUN11GkczLbWIEqKpgVsax6urm7qZjym5g5oNvj29d9hPqZSS7cQhdGIsLjAuNSvFzbQ7q+BzEcl3fx+hIJawQKIJSxcTMgPslOxyJTrtqExIWDofHdxMzbyUypV8PxI5oKq7OjO7Z2ZSMy28/VL2xLQTjkrqENDACk57RztysCLAIQGVkC0yS3mnkdDRS+geiQCptM6pJiqcETDmgnQv91pAQQdLNu2+MATMaKBqKWsu/FEnJOKSX0HmIpkSS9Ly7d0x0H5PKnKHrDBO6uDEQOIc0SMr4jTZtptGRzfyQX/k9//U6a37/Xa9ur6c0ohElo2DXK2uJcpi3kLUeq+FmkYdhrTZdj2C69SlpDnbqAy46BRPX3ghzps4Y2oxzpvLy2ebvp9L3KuqOsg8Nr/v038YMsxFqkpGsyHrsB2Gbox1IALNwyuv3Y0HR7/vx38u5uH0xePRLj/72c8/S9svf/Do6mx7IvTBV37n87/y36X1Y9/hEJGEkJzMIQ2/8y/+odlYxUoQ61jH2WKt81c+/RPc3Pmh7/2jr91/c6+Zz+rZctkihne//fV/9o/+v7dfvPcn/+zPV7df3mSYtbOhH7vNdhzWm/F66Mbf/JVf+o/+z/++DpMrMglVhAwSGCItTk4urp8N256Sr9ab7Tg+fPIIpb//+sneUU2RY4h9touL1WYYN+M0jcPF5c2Qte/9sF2cHC7HqW+Q+m3/8bP15doPDk4vry63yUyn1c1ah0lcv/alf77pL/7U//jPx3j3+rqroxws26ery3cfnV1vU93sh2q27TcZlBqpl+3scC+0LUpErtQJgEOsKAQAEBIiqmK1000dDJi5QmIkQSBEYgq79khwBGfa2REI0Xbis5uZ5gzlRuM74hoiAyAT70juzIiUdDLLWFhHYGVZAoQsQTiKVISR40E9eyVUp4BN3exJFUVCXQkwSFXPT4/ffXT2t//eP/1rf/1XvvCrX6uyJYUf+oP/+jvvXwZesBOasVGVuY1CUpssRmpuuoS041PceuHgxU+8kDk3katISBiYc0r9kNqT0xdev+dwc/9ubauLijSyLuYxzEEW0oENQMujuHcAh8umcd5v94AaxFswHk0pTLnOWg0jpGxQcGUIKU9AGGIAsIBEjgGNWczA3CcHECVyYWQCQgOyZIMEAXIvHm70ENjAiIkDuyuAEiMgjJo40GwRyd2GjOhUiZI7OzIWlN28CV13A0gZUd12nsxyywKnKEYmgQBJquhgVBYYiOCZBTAgSRurperQD+s4Ew1x5GZ++zULB5qhaRsWxtxTPwSkKI1jxVgRR5IGuGZ3yp2mHsgJch5XAK5q2MTZUZwQnUBCRGSRkIc8ZZgfHoXF8eHt2/VsSdKAIRqaeRBGs7aiPimFOG4TgqSckSJxfbw8nLNA0oi83F/u3zmURWMiY5eGIQFQXTddUjPDKd2cPWpjc3m2/fhJl6W+uFh3fR/bapho3ccHj/unV9385KVE8+PbL775ybcnnao6gMH68oxFQ9to3IvNncXsYBq3kadZo2O3udzcoEisF8+67ciwvPXa3vFBdNpu+cPL7tb9T75455O5A9tM12dXw6afun69Wl2vL5o6hgqBUtNWIH68fwDg4zDVs3k1YxKjxhe3Fl7hqtuY2dQ5mqzWlycHi3ldaUpC4opnZ+fffPfJrVuHSuOimX74x+9//x++a3EC8VWXrlf97pEQAiAykSBUQoTmZkGClGewWTm0EJKrujmZTeMNIBDHMQ8OY6iCuwbSIV1w9KYSVz/evz2ljUFvviG0gJhGm88XTVsfLA8DtabITDEIaErDCtJYS0C1lkME3XaXi9lRXbVMPI7ZjPtBV91AVXRCzzgLbYjVfDGrQnvn9AUfh2XTEkLThqapYtMuljN3/cQn/9AP/dE//U8+/5vdsFadxmHsx8nU06hgQFIzVw7EzFXVDHmT0CsJkDSllFSzgQKTMEE+PV68cOdAMDeLJmU1A02KZtNknpRS+tSrb7x271UEFOJZsxCJDDxv21hVZX1UxUpEiCGI1LEK5fYIVlUcqpjAQ5wRhRAke3IwoEDcGEg2JBYkAhIWMdTReoUE4IRCXDtiWVigU9ZSNSnJXVhMEyNatmzZIZsDuDjhf/bf/s00rpjc8u/V3qqa71i1gOjPSSQAbqqWDJxo1JRAJ9Mpje6oVrzNDGTg6lndQd0AGQveIRrHYscWYkZkRwwhZBtL/YsjZc1lNwy7rD+UUhskdlNzVHU3JQYmymlStZyzegZwEuLApXq9amoSGVNSwsk8gas7RiYpeM3Ct8Ts6IixqWhnmIU7d2713TiM0G1GVqpkDkrD0GnWrp+ePlt7FpwIze7dP61bnlU6E9ib733w/geC1f7BYQyBBVjIiZDk6uxq0RztnRzfjBtEbKp2HKZn589OXrpf7cVkU11HV/NJZ4d79V6NkRA99f1H7777+P0nn/ie71seL0MdQlNJFU0zZMi9v/Ol917/xHfNj/ekrURCHgYxGq+G1aObk/17hbxRz5oQ49hNPgGm9Juf+czy4OTZeff+h08BAgHllHLOaRyD0OHe4Z5h9+jh2K/79dYm0N6nTUKTtl0SGAFQ4MkmIKSBh4/O3vmv/97Tf/GP92twG8e8Jc+VoMGkME3qQ+BVE99J/UcBrvbDtq1Gr6cRiNidgQLCjlyjloCFJICBTcmzCjMRohXWJwMQOIMDUSyDQgi1M2Ukaevp2fVn/tp/vDdsBXQ+j1Xls3nNZJRGHiddd9PNZKO4ihMjCYVIIRAzB65CTcY0GfQgHmM1a+rDGR+GHNGMSZuAtSxmdGf1GB98q+vPseaTZnkQFhVHdI7HB6+/dPDK/cULbx0dvtrmo+AEnt0mh2nMaRjHbT/cbMZVl7o0JlMnQyQKAJymrCkn1WyOFEueaJzymIwpMJVNHDIhuscQmqZpY6icoFcYGVJICTOjViHX3Af1RibxwfuaoYLkqUPDxGEMM5sfj9UCWKJm71Zpc60502w5f+nV0+//kRf/4P/o1qd/38s/8CPw3GjtyUABMuUEKdNoMJqlEulxcwTnnVEBn2OQDcgBHdlAABiAGAQUXUkzqHnp481Ao8FkPBgnpWSoDo6shBZEhUwQBEHImZ2gzEEMWOi8RAxIioJUZQzKDIGAgxFl0lEN+STpS48eVtV4//7d7378+L1nzz44PWjuHi506E5OjsHY1Jt61oamlbqtQ9vUIuyQwaHmekoqIVq2btstwn4jeymNUx5qWdZhZmAhxKpun1w+S9N00O6ZavbM4E1sCDlnH0dLgyGEGGMy7ceRSYRiJTEyV0JCXkRvQosiTBQoQMGRcGAWBHQwByUkYSmv2l0dy7wOAM7EWNQfJDBzy6bJNLm5pTxNGVF2XSXMxOhuxSdRFpDuGiUQ8s7URAJI5lb6zokQ0JNmA92RTBAcCl8CfNd4hSQIaGU2Byz8bTdX88k9O7qECmknY1WxisS379xent6/HKvLMWBYJMV+HMioqWJTtZrgerO53Hbt/smkmvLQzALgBDqo9ttx08xCFXVz9iFvHsj1R5tHH4FlZARmZsFCVHAAQ/+OhcTBixvEtFTd245jvXsxCFgEBf89qDWUZDqWBxaJuxMCAZGXYHcGU3c1TaoTIqecEci9jDbwnBWlZpNbdkuIjCCmhkULKJEnU9/1ltFz/jQDuFkqnj13U8vPZQUr3Ovn8pe5wc4wVdQI3UXsAJ57yZCK4QXcLWcsYry7arayKTF3Q1UvA1rKqdQeAuz0WgTaeW8APKvn7Gayc5/tYNoEhE6IAoae3c0RkLycIGAnMBkSsENxKIo4ar+BcTN2q2m7tnEcth0YWPJxsCdPrquqAjQznVLucl7MZ2RWDDtmVmZMMwMHVc+6czSUzxl9Z9UqnYAI6Kau6mZMvPvOkzh8R0wq3i5gJAJiRC96+a7qrLyiHVGeSCznneRYcL0ARZTxMsYSmFphWpmV3gbGYg4CdFXfRQ75uZ0MEbFMxA5gKT+XKgEcvcQODdwcd4VzwBKKxVOfvyh3K7pnmcALN4wRwZJbLt6xnQsJqGwDdl/6rMyBkKw4ulncDJ77rHesIjNDgxAkZ3VwdGDCWLG7MpArslpwe/0Tf+j04NY4rhXSfFmDy2p1vdhbxGo+5bFpZotYrbcbZjeDatY0VdNNg5ufnJxqAhY5ms2vzs7GoasPl9erS6fh8BDTcL69/vj4YLl3uAxJn91cNsIffPA7OW2p4sltu+2zKxC1bdVEaZu4h/B93/XGP/n1b7z7lS9///e8ShD/+W98bnl4dyu5nuHM5faxrr7ya//Vf/Jf/h/+6t++dfS2qgEDGBhqEIbzTTJuT+5OPVw8WKVNfv3t+2+8eP/m8urhFx/8se/7I/jGD599+E6oqmGdEvnxiX/zi//4E9/73e3s9E/+9L/5dx7/dZWnkDh1ljWdPcuahQSffvit//tf+Ut/9n/975/uLxHAMJsqIc2rMYoJZcj9pu9fffM0bTun8fR4bpv542e2uhlms5AnmFQDOkt1cbO5d/paE5PnhYbQjd2zy7OPrzYvHN05W62rEI8Pjs/OLm4d7It4Bscg9+69fbB/kOvFItL9e6997Xff/fDRt2a37tVtCPOIlDebayPFtt072p8fHDhWJK16iLHmEMgRmYkCESM4IQMwi5S7K+1U652F07wQbATAzVwB1TNA2RsBFeNbwaABAqBqJmIzNZ8ImYl2ZBBTInFSB0VAZCEtcn9p/ClotXIjkskQYjXfvzsG69ZXDBZjmAZ0hGyuUGULTu3ieHb+6Gx/AWc3m8N7b/7xn/13v/Sbv3L7Tv3xgw8qoMXhfFCvF7PV0HnWWaw/fnhRroL19RVwU81qG1IaMoclCqXhgqkeNo/TNtcaq8HePK2PlnD39cMvfe1xonBzcxOhXV+N7Z3Z3n67eTrkrad6yKR18u997ejzH30Za1gsjumcNFEeJxHO0wim5Kg+cRDL2UENjZgKDq2uApipKXEAQCZhJDZEImYck6En8ud8QXAAIwqQzVFBnSCYK6I2dT1u+tne3trXSFpaxzBKHrSuwtVqBWohIgN6tnL0YBHTrK6lyJYA3bOhgU8VtXkYY60wZmUe1UO9mLqtFLhdzqCWR233aBy2hgD1YurXgclsTBkcy/hqSK6ph2lDPoVQOUOwcbmsztHMNMaZ++CIs/ksu+ZMxtpWTdUujQO3h3OEfv3MVLXvQ1VPQ69D3zTVOKxfffV7vv31d6umnqyrZ2Ho4dF2fPTg6ZSmqo7NvNmurjzCNIwUwBo201BBNQsbSnfv3t08/Dhv1KPEOOOe4lG7uj63sW9ms+pw7/zRM04QKPZXm3GtBy+dXF2tpyFZhorw1gtLnBnFqhLejBc6TR6rF1/6vg/f/Y2mFkXcn9WbzdnRwaknfnbxaByHvb39UPl3f/ITdj18/O63ZzFM49X+0Xx902+HyYXbg3o1bHXSSBRjeLa+OmgOtqstGQ2dYkDrO3XYdCBmjlgtZrfuvP3td7+8PAIBvOq6KauaSUt12yS0Oy++dtgkocvHX38XW3741afXqwgGhwfL52sDE2HaPYpp6HJdMzCgCKi5ZmQq2W8gQqZQN1w1WSFN4zRukMnRkmaxjJ5nVTVNmqcsUiEFYQyVtPWe6SpPo4/DQgLWzXq7IQci3XbbKFUVBFyRREcFy5SmO/O9lK7BkEgBsGnj1I1jcneWIH3XnT2+nC1rQZdYffvjd5pAvQ0YCY3ymGJFNurewYsv/cAP//K//IyO25y3MI3TMLqhA27SugpVAUXOJASgNAzFQe8OQcS9mMUt5URsZFapPP7oKYfQ1JIU1LSd165OCjXDS6cnr9y97WquAsBAQpgRgAMLADiQo00pMnXjCiUAQKzqYdxW9Vy3ZoQhsOZsGMC5YlEjd8t5QDByJyAHM01IDuwMKAoAGUHdTByFW7MaS2AQDJCQAiA7TuiIBVJrQIwZ5Mtf/dL15TWxYKlHLcUggASQc0ZkKHOJe8nyMxOCuWZ0ySllptjOiQgNGZDNdJpIGBkUJnB19OJBU0joRBw0aSmyBSLNmjQF4ZRAMBplo0QMpuagCAZgWRWRmYKbAQoauBkJSSUA7oIuNI2GDlFIhH2YShWxTdPgoOZIzASEnqfJHEF9yhmYLE85KzOyVJZTuQrOnp2bZgx4tLdg4DH55IOLYk1Dr30CyAaq+6fLEBnG0XOuM7z/0bvdZrr9wn2q6+1wNZvXqRsp6fnF1azdOzg46qdtVXGQ4H26vrhaHpwe3r+rwfI2+4Tjtm+Xi1iHaRxjK6hpujj/6ue/XsX9k7v3ACGE6BksJVTI6/y1L37l6NaLe3fuZDABJrBGKkxwc325v3dCkUed6qZGlDwoGnFKn/9Hv3rz9Mnrb770ta+97xu9eva0mvaMjLgiZhh0XF/cPHjcr/P+/ftNM+PAGFVCFdu6HwfIKW1ugLkOjXjdPXzwub/11/zRo4NZ7W7uSixmKWkytxzkWcTPPH7nd7fPzlNvIrOa31ie/rE3/8AbXYLHV5GykzNLziMU0bCwVhSJZEwJycBdhKcS5bDv1NwIgzoUjwOoIyNxxbC6/s2/89c/8Qf/JN9awJYsbggg9z1VNbIAsSOBOe5m4TJYYogVcUUcWJi5Nqfs2ZJC6vLY+aRBpJbF+eP+6tmNjdg2c64isGdJ4Amy5hE86eHsttlNo88OYxeH4QPF6w4mJARgwrHr8zC6akoTC0PWChE8MYG7ZVMgJEd3d82GXLwaZRgMldRthYDDqE4ibZQK2SynLAzF302FhIxIwsLM5q6Mip4sBnaE3jM2e1rPpiFH86lfITrVsti/vbx3P9x/MR4fuULVb5fzXQANnwdPEcgNPbn3DuBsmQkYFBAIwJx2UdDnKSZEJ6DSn4NGVtxHboyOZmUuK91F6LorNAdEcHAjJ3FALwYXoO+0tjEAgrqToBWStSM5OiI5AoFZBiIABdM67ulQfXB2vYz7L53effr0y5BW+22YNU0epsP9I5aWmRbLPXKckk7TGImJZRqn+WwJhCnnlIZYRSc+XJz2Q5dzJwyBW1PLNqTcBY39dmywrut2GNcp94IsHN2s7/us1lTNNI4ikZBcJwYW4SZGBA/MVIBKJFDG8QK5xXLiFYQy4iECMLIgjTaVnm/AXUkRPe9L9+fJv7K9RXdmtqzqGUGEK/cE7ky4y5wAIVJgQkVzG6cxhoil+xsMi0GpuFOQHAxl16VVgC/uhoi8651XLx4J06JYlTUDFWXIIyHv/DhIphkMkaWZzcd+++L9Ow9tvDp7ryKfVRJChaCuqpTmldTxsO8mNhZsh+kmSpVAnKblsu1WWwW0jEM3eH5So2199nhIt++9YMbCLIzC5ApYKs/c1XL5DW1HV2XfIYv8eTyNfPfQczVD/Y5YBOZWBBp1AzCAQke2EucrTC4jUNeiyDmAaWYUh0JNLnYkUNeSCkMS1QyIOafdVVP6qsrTl9hcdxCqHejNijUJ1IhDuS0QkqMBQKkw82I6AQBk80xAiOBWVvXuu0oiKGKTAzo4M6um7yCfAbAEj0rv1Y5BDU5EpurgsFNbAMslabZLWdnu13juh9rBkIv+Ud7nnQxXfgyYmRFJgGp983G/udKczaCZtaCSEu4d7xt4P47j6CKcx4EspZzn82WysZiadncoBHACJDPLrowMDkyE6Ka5KFyIaLvHExdYe3EemZemN8LnloXdRpxA1cwUAFlC4V4bAjKBGiEaYPG+ApG5gaObiQRwAoCsiYkBd29R8VipOTowU9aMtLNlmVmRbiwnQChPt+docCcmACxloEyx/PNdXhJRNZU3wEtlIYhbKhFAzRmArNg7CQnFNBGgWmZkIlQtxjFKeRCJCOCmAGia1TISFe8SEX2Hc72TisYpk0shIxSqZowCBJqmcTvhcna9Sp+8/z0/+EP/au6uzcbZcj50ncRYNaFqaG/RrK4yTNNVN1ZVEwktB0iTkE2uYEDI66kb+lU7n5P6sj5OXWDFzc2NK9+cPfEuNXt1dzO4Dhfri8OT40fvfjyp+uhmlibDQHXTQLD9wyOpw8mefPH9B/026tOuDvXVg/ff/fo3FgcvHN6ZPfzqg2WFt1u9c3ib8eDyIt8FA0EnBAQi9mn84j/+/7zxyqdGxroJzVxv3bszrDq6Wb1y6+Xh9fHw9gvDepWvn+3fPl1UizSuYTNsPr7+xf/87/7wz/zcq2+99cf++M/92j//z3Map6HZTtPUjwAQ6kgKz87e/4/+L3/x53/+33v93i0kCCLd9forX/jtg4XkUadKBvVhm7utvvvR03vrq9ODJfB8Sh2QiMjQbaaUV4TbKX28+viVF16upuV6PW2upibyUU2njeqUn519sLp5dPvWG2OuZwFNh+XyYH8+GzdTurnSofugny42m/b2vcOjW93Ya56mPFEbuJ1DaLhZZIhCjYRWuCaiKOLuwoL0PBpGodxD1ZU5IBhAKitm3F1+VBzKLAzgZKXRE8wcwAkZqXT1iWUlocLOQ4ro7qAFjc+AhW7IXO0I+0iFKccgO0OzmVoSrmLkfhxjM0e6NUyDDz2Yx1iN/QiE7cH+gdDDJ+uDOX/jS//0V/+LnOd3f+hP/Zn7b94N8ad+4zN/+cW3Dx89vFptxorr2/PDD8YP2/l8fb2inUMTbq5Xwzix8XLW5uE8e5LY1Na4UwW2V+2NY3d5vnr7dn3UbO1m8933F9vVGo6X3/iwx3a+2sJ6s7p9cAqpe3j+7VfffPXRw8vPfvigni0w8M3V9THNQxDNmi1TICzcASRXIxKniCxAxCyWtsgsIq7IzDuypAJMQGSlAhSAyMGSIjB4YewhMAFgGkeSKFXM1s3bxYPVdqbEjpGRqDDQSISZApFYNooMYMxiSOiG7kFCxsTEiAIimiwbVBTSMAYkGHowt4ohxjhbjpute3ZzChIRx62Gpkps3LUYWrAMoQMcCUXYM/rUrYSUkfvuXJoWQoy1pIvz06PlO1tADp/41PdiSmYwDWqEVFfJABGrNtSLeZyo3avytO26Lm2GwMSBMfC4SauLScji0ACtLCUIYbsZzi82YICG0+jNEpZ71cHx/je/+S0gaqIQwnwu6/W6RllfXuYx1PXCVKfttF5NB2HuBvO2tm6zUasa0A422+Hk1t7+SRMAQJ2Z5vO91Wa1bOcsPKxvwJRIckp1DB8//QqHyFN0Q1AaclXVB4LdHK1tIoX87Nl7L+Pho8tvSVpDrGNDq5uV9Y5J5wfL9kAur7YwKgoj8dHsdD1s5ov5+fnjcTOQsE+m2ZP6wd5yPW433U3qvhaCjQNdbFcJwNWBoJrR8t783hsn//If/tbRi5/Ydl29PHnhpWOBr84lWBrv3d4F0KZxXNTLaUiznYc3EvPq6vJgvwohpJxcDZiJ2A1cHSFEaQBMhF1r4EjAinkctoSS8ogkFCokZ+QQYjdcqK1ilYhMdZ02fawbMkijO4TAMwN14axmOZWHOAZxdPVpPtuHEQgUzfLQgzebbthMowG3i0NVqltf9d3h0YnreH5zvr844HqvEUwX29svfPKtn/jRX/vKZ7c3F5hztx0AMwb0ZABQV9HcU+4bacgRAOvASHLTdwSubjmbuTJJrEItgtmub9ZNO5/yZKY65DQmaBpxN02vvvLGa3dfQndhCtUsW4oxkCBBoR2QA/iUPVJAqSwbuXsCYKtbwSgyJsvoZpoQQ07JsyMHYHLLgYM55tQTRdVcggqIoJZjJIYq52SqCNk5J++BcjYIEhhQrTdXdarjzEDBJkeQWn77dz/PhJ5VS9EQwg6m6O45I6IQIqFbgXdiwXVOatm8CQGYyt+ckgZEAxrH7SxGCUE1BWEEB7aku7NVNnMEMEUQNXdAUAekUEm2noEJMedc/O3g+J1qTrcMmMlZNZcOaFebhtFVPZlIQDMopZrMCJA1q2kItU6TW55GGPp+sb+PyNM0AWCkoKwMNOY09Ea8G5JvLleMoZ3NYlup5raiGMLTm8sppe02a1bJ0NTh8PRYmMecolQw9GcfPNnfv6Nd2gyrWAsxVoG71Tje5PuvvF4taHN9M58tcrZN1/XD9Knvf5sa2a4uowRW/PjZh/cWMxZHhjri08ePvvG5Lw3d9Mb3fTdVIjtODUxpZKSb9dXTBw9/8ud+rlrUGbIIW2+efeo2eejqo6Np2HLbhHk7TdbMqu3m+uqDj67feygULh5cBrWcps3T83GYYhBuff94b9pcwyZTaE7ePJLF/maz1W6oZ3MSBpQQyNH6LrfU4rPp2de+8pv/1X9SbXWvmkFhY5kLkZoawEbhS6unn7l+9HHeTqjlcx/G/KUnT77w8d//I6+9/Gfe/MHhm8/mnIGIuAJEIAVPhRlqllloh0V1Q2Q3g2J9BVQwJwIHgjKayZgmJAqzusn23m9+5v5P/YwvZtdjMjENM/AoXCcbyUkkxqpNmllY2ibUVZgtmBgNPWU0A84pXTlpiGEelrW03U33rW8/zBup6YCWotU0eJ/HqTLJY9fMYj2vDcJi/2jJtz746rZO4+w2VdebL2/6TY6OQinplC3lpNOoEwuzgDACkmq5FoCJzTIqgZNm4MgGDhVzQGki15LT2MwCtzOvKiRgz7FBnUZc92gGHiwllhCQUbM4ASCoJPMEzrVQXSEhmFbkYDlWDDjzOu4dn85vvTAtl2FWBeAgkIbn3DoCBAACMCVnU/KEyo6oDFbuEsXL4AgZdmQTApPdvEjmDg6p4K/ByBXLjt7REBgJHHE3BO/YruyGXnQhcuKSGgFyInRwAQByBijWV3cnAAJ2E0UD9KxexdOKb390fnnn4LRdhHcffE5MTw4P6qaqqvrg9HRxsMcczNwMiITJUCiEytzn8wUgq07g1lR7zNhN3SZ1TlmQACoEmHTIOi7aJajeDFd1fdwP3ThuF80eAmyGmyFPlTQNBTM1yOBhHEYD4dCEWKFgRSVaVnRPLGp+YX+LBHcuNpBsY3EMAWQADkLooJ4ZuYSRhcqm39FLDTaUCBWhE4Cj1bFyQCGcspsXQAzTTlkq3LqgBuBo6oVtDDu+LmbbIUbhOUMHEFWVEAoDGJGIUB3VJhIhDqVUDBwES2Ma7/JX6GqZcMdacVBArOvawO7cu+ugV1fvxRgEod/2DNBbntcN8SxP/ST9vF0gJlfK69HGqZ7FWkIeO6IgHHBKqw/e33tjL1/zhery6NQJOMQgYZz6koTOWTklwtHUWNgIjW0He3oe2XJEN9WcAcCd7LlOhAUJjgSmWA7aQI5W3CXmVmJ8UODBu7YyB0TQDEUygGKOdOFoqqa5eP/dneg7SB0jKN6fZGAoAuBYjEuWYae2MCDuFKQd+9V37WegJXRW+EHleezmRYN0cwY0y8SCKCV9C/icvFPCI2bETEimmYvdA6BggHb0asDddQvFm+SmYGilRs3BHe25plaEKQLXXdWRF/asmCkLITg6uOL1+ZOLiw9FhJSyaT2LmnTKBDGOmp49u0LHJghqEolX1/3+/KgAu0wVHGnHazdwQ8QolQOaZzAAJkJyR02KiMhCiO6KQMUEVNxxAMS7BJaS7DhWOWVExOKD3tnNCK34rdxUDaDsyooFDNyA+HlosNgsoBCLoHBpmXLWwlMhYixYb5YiVWpWZjbzHQndjXZ5SCqM6sBShGAC3NXPIRQ3LhKDafmDENA0mYOIlDKp0k/nDg6GGLhIfq6qmRmIQ5BY/g1mMnPzUkogTqA5EyI/PxF9pyickMh2NXLEAoAWmYHqaRhdYdnc+pHv/wkfVmPuHbTbjtOo0A/tfO7Zri6v1E08gjRVO1/WcnH9LHXbZtFuNtu2adb9MIz93uG+Oaiju77z7a/O2qGWpH1aPXny0vHdqADuT59dt7ePv/n08dPVmtDZfRpytxnmy9m4TcTUNjEMa9jAT/70n/qnq/cfvPNF+KlPmK8ywtOby7O8rs26zVV7ePrg7Fl7cvfl7/nRPI0OxFIzkY46fPz+2bff+8n/xb/5NHtltLm+Xizmr3zytdn9gxhm1fXtpzOuP/Fdn/yDP/2Nr35Vasaczx9fVYvZevP47/69v/kX/rf/3uKg+RM/87P/t//jf1DPZ1dX4800HC6W2y4j2fHBcrN5+l/8jX93Wd9+6e2f+N4f/NHrZ4++/a0vKEyntw/wXEPjefI4X2zGsYOAFb4wOx66/MGT1aTugNOYe0w1hm89eKDV4oBkudgD14k2Pc3eO7/sJxomqdqlqm+3K9FxWvWZhZummseKpOspod597ZhmTR66nEciSKDQNHF5ELhFqcwJuUGOjkQk7kAUEEmBSAQAABmL2XJnq9utOArDouDAdlR7QCYp6VxENDCEwm8DQDYr0DMxU0cpBwAw8J0XBokk5VxAOrk0BTo4Fu0ciD2lKUhTzLY1BjdCXBzs3+43z4bNihExEBPnKQnIG2++7T7Zxce/+82vzWYff+E//Oy//Rf/6rff/Z2bx5sXls32+kLqvQfPnuLh6enRrN2bkXVj2m2SlaJ6q3k6G/oRxESGro9YV9UMYmWxutR1lMWT1FA/3NqL9ZzmlSOrPE404dgTOl9XSWp67a3Xr7eAze2Bx5vrJK3bSEgEhgCiquo8ZtirI0PIOqBZWU4YurlrthigJIdLoYdpRsdsGllyzghIxORAAiBMzGa51EmCg2VDBFAVZJ1SW7dXm01DEcqRfbeyA0CrmrAd+rZqAZwIwIt108EBnRklSg1M05SZMITabds2sy5lcEOG7FiFhkBms6OL1VWACiAj1ehYZZ8y2KQiYdysDm9rdrMpjf0ojOTgOSEzSAvCGUfg+NInXv7KF94HiC+/fI/VBJiRU8rBIcSwWW9mdR1QotQ2TRTnOA2rzbaaRWHZDuPjjx9CWL7zzpePTF+9FesQAyGKQtCGqXHZZtu7PXcbO1vfe/3oetP1l8kdRh+JUxBBkrGtcqLN1XZ/2fZpPa43ZOqWw3K2ffxkhnugvqwju80Xi4Dy6MmzO/duA/LJ/ReqIDpMhuaUuA4AojYlJQQy1Kvr1enJXdq7k1C2qyfRNbYxRlqG5iuf+920XcVI3eZm/2B2c75dzGfHdxZJfErj/nzWw6YKVZ7yVT/xrM6GHn3RzPuURqL9g71x059fnmefmqauWIaYbt1++eGDr7CEIHB6csiL8aOzp2N30QSKsHo2XvfX7d31m6l7z3tt3B68/7hcBX03yK3S2UwSJYMW2EE/rGayJAEMbOpgToQpKwnHZj50NyFWapbNTI1jEK6GYdj0Q1XPQphJCOOUgPh4/6UxDYSMYOom0qKxCCDRaNoNneNAwlPy+bISxzRZTr0Hadt5rKPmpMMIgu2ivlqNp4cn/XRjRlerbYjN5eqaQ9ONE+G0t3cYmXly7NOrr7725o/+6Oc++PpmdQZ5Az6RO2KQEEcbpjSYoxCRk6BkSyTmKP3QJTUWHqYpSGR1TwmIJsesagip71RtmqZIEgIH4OB0a//w9TsvNU2TzQlJQYEQg0QIaBZFtts1IyiTWiZUQgyVTDkjkGRGR8bgBFGiyoTC0zAummU3DYwG5LuzAgcHLrXIADqmEUgmmwjATGMIU0oKqY71lEYig3I+EGCAXNz1pmCOxt163W23QaKjG5UM6q5/oIj3ZuZlg4xU1tklK18MEwg8DakKGZAQc6xbwojgmkEiMRIZGOacjKhmFAcDECYCz+BMFJKlJoR+vEZXJFDLASoHIw6OhdXqSGiObokQKgm9psDBIDUxDr4SBNdUvpOglgGYab1ZS4iETA6aM7IIE7OgARBwiKCGgDmnyDHhbgOyOxAFW5wezA/2rlYbDu00TpcXVx6ZUG+urtKQDk4Obt89rIJZhuXe0tU/uni6f+/uYja/un4iIdZt44rdzeb6cr042A8NP7o6WyybNExDN223/YufeKM9nI1TVzqiblbbpjoQrMZpauZhvLp48NWvv/O1bytWx2+8wvNaDUHdTW203He/++UvvfRdry/v7SdLJJhzBjV369Jw8spdF0NjEhqGyVGGYeqvn50//ubykNrFwXazbqt5Ujnca/tuvPrqA5kt9WRAxDT5yQt3nxngdh1imB/MIgZXHzY3lpAUQ47dR08/+o1f/ea//KUFYF1XUMxpCIyeh54Ac9v85sV7v7x+cEUAboyipohMimRGgf/Jww++db75Sz/75zb//HP7PDg1Zk6AChYoqCZVRUZAdgzgRgCApO7MrDsHBWUdGYGIASSwJzfIEAht6H79F/7Oq7//R+H+fTloYh2DhDSN1ayOdVugw00zCzMJiwUwGpkjWjLDpDCYORHUsW15sXqW333v0XZ9zfFw72QvpTRoPwxjU9VN3bBO9WKvWmaOOezvWYyW6fDlT1893oTp/U8c1qtn0+UGtklrAEbEyKnzSAGQysMdkZwsaTYkNzMDN6eijCJSoHpRSxNYmAJJdBSSmfC8xSqQ5mnDqVdS5hGyGoJAdgQNHHJKWbWKHJmSiLGow0JQh27sRog1LhcQY4VUHZ5U87l5Cmma+kmQ6735bkiGHQej+BHAgJTzlJGIAInLfr0MrljGJwcjEARUBSNwA1XPgEbgZO6G5lr6md2IQBDBIRsAkrmLIwG6upVxSo0ABBlIi8nDEQBYi0XRLCAxSs5mlAPBkMbZ4sXu+vDDs4t7h4dtNZw9eieIVyHGiJF5uVzsHe6FWLt71hE8mGtsKiRhDrizO43EOGtmq26cUgYBc53VrZulbN04BMZZbIdhGIdtqLgbr9lpf39/nLwft8BE1hDXQOPQd6GK4JzNSUKIO/YIERd+cjHplBCZmhEKYuFOgroSClEBNYm6IzBzADcER7RdxkYdHERqJCpWFERUcwAFxGLKclQiAiuSDyOY2URE7mBu2Sxw7Z5KtApIijzExVSGUFQoc1fNzGVm3FGS1VRdVROiM3MJXkGpZnd3NEI0VyAs5qkdEcmRAEPVAKEQ3b734pMxPbn86M4+LWZVnvqmniVTxL6ao/q4mTSiI9tyr1mvu2Hs0VSI0qQUpa6r6er87Btf2XvF+tGeJKj2a5gtpFuD1YhsDjlngMEBOacQQumrcoDd2wtoWqhbpdIPwZ0En18FWDJrTITIbrmYqADIARx24GoALyoO7Pg+pRUOCNnddJfdQ2IxzztxyhXAC54Gid0saSrsvzJ5uWffvWnqXrLYppqYmYjMd2pCtkTFGQREJKq5uILVMnHxrJGDE5NDyaaVBx2aZnco1hgkNHPzDGBFui3zWbmq3YyIzXP5ZWFHm92ZcmwHfSp3Y3RXs+zICFa22s+B0EZMuUgbGdDSowcfSG0Nh/PzK5lJGON2axjadR4c/HqzIUKEDKaq0nfT7YPDKQ3MRSLG8naraRmIHArpuSCfdqkxYjFVou94owCRsioyiwRVtR2Qp7ggDcsAWihXDuRFTiMzMy+zrZXPscDCi+hraponFnE3ZCkcXncHc2LMuXicIeckIpoTfocFBcBSXF3lzwVyJKQd+9+NmLAEQtW4+IyKrQ/cHcgdiSwnREEkcyVkdHDNZdOrOZeQnRZHWDEvoSA4gCKhZjMzJC5S9U4sBmBmeI4i+D2pyNUNLWdDRiaUEMY0sQgaCsXrx/bn/jf/K/aN+VDP2SYB8vX1KlQzSzKbz/vUtbP9/fb48uLKPI1manp6cLLthqZehlomHV25qRfXq+2smZ/eu/vk/EOK1bKVpw8uF+28quOirp6eXVb1/sW6e/D4IjDHiDl7XQnR4q23vutXf/W3bx0e3zk5vvrww2V93AK//dLy3ic+XTU4ruDo7tHFtU3rNJxvX/vxl7/3T/xrX39w53////grcekgAFrUC2O//pW/9ZdvNS/wXr36+GqJafrGe5df+vZP/MzPNJ/49Prxo+OXoT26o2f9TF69ufniD/zIj1B+9tnf+PVPvXW6Sdumz7/+q//otZN7r37qjR//4//L3/riL/Ogp7wgNK+8mcHFxYd5u10c6LNxu/3i5ePH/7zbXixa6266i4u0f3xcXT3bdPlmvXGHm0t7kPx0j5ezZd1kIId6pijqiII315fvf/TBwZtv3D194eLz796stl2Xu5HcsAKpZTarzKaOq3C0f3J+3h2fxr3T5fW298g3V8P8YD5NOm435RMZwebNkrBGqoUblghSM0d1dZYQf08tQpKyG1B3dw8cENEsY4lJmyEJuBMSIpupO9pzGHupVyDCnBWL2Rtxx24jAU3mXmhgFCpyB0PEwDKhGRFqBiYpVTsIBJYNQLgusrTlqTQ0qmKsD9RoHHMaxtIIUMd6m1YA497RAtrTD77xwUvzVq8v//r/6c//0T/2J37+3/75b37zfP9pyjIt87DpV0tuLHTAeTlrd0Nyr8g4DeoMiSgSgEPaphryOHTqoTqdJaue5nT/9MVEV48fPDg+3rt61j275Ek1EOmE/apjMxyxux6l2qvC8c3wqInQLprKZ+QuInmYfFCfMqNLIEsoJcJKwIRCaJ5iaMwyMgMhmTGTIST2pi7JMiJCS1lzQjBAo9JLokpAKTsQi3C2zGBHe83HNyuQFgIZqFBAF7MEiPNFPQx9pGXW5FgWTSbEOSXcZZszWYhMyS1GGjpFFJsmGDvadzUAjpZ96K6rJpjmKHPFLWNrw9q3w7TscGbr6/OT7hVEJvRYVQgUuM7WSzWDMKtjnYarittFVb/y4gsHp68s563107SeROpQ19mtrhvN21nYa2bN5XqtmOrZwWazPn/ybG+vzaTx6Og6pTsv3rang07b2WzWD9cpp+xmrnnSCZ0qebpet7WsP75Z7DcHdw6HfE4GEIDruU8uiPWSA8lhe9hfbDlWQF5V1fV2rJu4d3ha5+ZyvNo7PH37re978uRjjuH23RcrmXXrgdpcoa7PL6WaLW4d3lyvMUisYlFGpeK7t08Y2836Joa5S0ibXtXqtpmGNUuQw7bv1ge3DsbVOD9YICiITd3UWtPnLqVMMh+7qzSlxNSt101o+iFrzuZh6OXq8tprGga/dbSPQqrjMF650zQoKz7cbF/7nrmc4fL+/Id+bPlrv/Q//NzP/9Qrt376L/yFv3V6tHfnEHK3+e7vfvuf/pfvlWWtg1vO6AqqYB44NrP5k6cPhaWuKnAzR1UtMHUWFomWcnZVs8B1ykMVWoLQ2TYG4YBoqAqz+f7YT8ghD9sAZBkZOAaZpqmuYxXqs7PzWWTFQCxXaxVaTGnLUfb2KbLMmnB5eVmzEFMyRc91zevrS0RrqzZxbudy/9an3334Qbe5CU0NSYlSHobXX37re37kD3zuyQcfnz9xm3LOWg526u6T52zZCLAWjsTJ1BkNTMchJaUYJzCQIDH4aIhCzN2YVuPQNm1kSesuxJjVxcjQDxcHn379U1XFxBgw1E0cuiEQUEH2ghNRVVWM3ptyFFNgYUKO0hCKeo8KVVVTpolJLTliE0I2zzZFaNR1d6MGdkQDV7VkFkM75cToQCJUMVWwW7ab2xRiDZYJKGBQmAxYNbslkYBRLs6elJpVQ0AgMyswhOcFMKhmpLsJhhDUrYy5DjBZ4mSztnFAJ5QgDgkQS0lz8KiYk47EHKEZMwB7AHRTCOaobsjA4DblzMyIJoHG1AVst10nVCkgEhoCOhBgcoxUKWSRYOCmYEDdNJq7IaiBCIUYun7I2RGFOY6ompNEYQo7m5RaP4ztbMYEgcixQXfKOOXcVDtuXQyQx2F7Rccn+8OQz8+34yax4NnTjWW8dff09M7B0fHe9rpztSzDan3DTh7DzeaKiAxJkw79dn3VMdb3Xr1/tbmkSNhE66btagiwPL7zQsqZzGtm09x1q1Avi0dCt5snX/3GR195//LZ+od++odO7t9hYWImwnE15NXwza+9u7rJP/JT30eRCqYNLIPDzeX64PDAwcy0mrUoYZgmIbTN+smXv3Lz3sNl0x6c1rP9mLZ5+2SoN2qrNWadLsfpZgJuTl+629JsGNehqqTY6DWDm7uJNeO6X33zna/8d/9vWl3sMQk5oSbLYhUym2mepgnkd/rr3+rPuudZmlJADV7W48RZkeBbq8f/ry985s+//Hb/4OO6EjcDjETg4IxBAXGXZicoLGvL5UcABWEycyInzA7gruaZJRiYAWDNNPo3futz3//KKzKbjzYZDvVijo6mhiwSQrvc5wVCYAcNwuDZJGfOFYhgJR6vzvqvfvBoWFugpl3cc5Y+bVIeQ4hLrNzMdSMCs9ks7nl7e+bz2TBlgTq2b5n1j373IlL4gfuH0/nN157cqJUEhIFlMjBEME2eETkhOAk65JwNsLROG3isArcCdeN1KNkX5IABpAlUBagCJoyzmmtXHYU1JMwTsBPk5JCFkQMTeXYjElWllCBnGE0yQrs3tnuMVrdNZhm6LRpMOZHIkNLZJu8WyEQOwFJycc8nY+dS/+yWyrHPCsnEydH5eZqltHSbuzk6ooGCICg4ABgCAIMLgJQub+IEYAAG6MAAqAZMDDvTEQCgEJplRFQSLpM6ods4gRNzUjJuxnTsV8fTRj9555Wr6w8ePnsMkG+dHMQQo8Dh8f7h6VGelJOFGKtFzUxqylIqgczdh7yZNwsi7seBUDmGrC7ultXQHaZKKEp9cXWBqDFEqYTFddCcfUijmlWhJfZp6s0TcUSUbpyQqhCrpmkAQFiodIyLgJuBMRFTAMuOkG1kqpHINReaDCIBoAO6efYEgFJyQ4AA5J6zZqQQCJkoa0YOvCukKrJRBjNEEiY1Q89IJAV158VYUnQrFw7mim4I6OZEjLSj8AISmBVUZVGykIDQDZwIq6pJOZETkpRADCCVQvEd2AUIwB0UkQ0ADbBkCTkK67xd3nv1zXe+kZ+NZzHWIp7UU9IqSlu3lsYxTyE0XbeuQztfzM6vLwions2DeO6GPqUQq5ynzcOv3//uxXsb3MrtlMxZLKtqAvcxG3MIfR8kiggjE+++qiQCiP48h2aegZil0ue6T1YDACZSzY5OxIgMZjuLMCgAwi40ZO7GxMCUsyGAqe7eAABENFUidnP3CZCFiypnsCuk2d0wi5mn6DD4nXCcqbnSrliwhLscHIpTBrE4vczMwb/jLWLwHW2HmRDQzZ7/l7uPZBc1L1U2Vn4g6u47oN+JSoHDru0L0fKuYJ4ZsxoiF5LSLv2GUBIqxeliakJisDOvOLjnXJxZDz561zHnDE/Or5Km/dA8ePTk6PDFq27CbooSLKFw6Lo+iBHKeti+UtUAU/EQFTaQaWbiEqxjLKUIWvD2iOi7okAy1fIxFLfOjsOPBgClSK6QwQENy2OtyGFahmCnXTrNmUUNzFTNiJiQTVVBEXewMHVzzYWORMgOOadRYlnwExODORETkpkioNruIF2014J/2dGhiEyLtOfgIDuV1p4HDzlrMjURZhbfRT6LMQpLOgeKguYAgMU1hlSa2Vxz9jxJqBCJhalwX4jNFDRR2Q0g/f9LRSWY6uWAaIbglRCgzRazyxv4s3/2zzFsVtvLWSvj2nWaMBCJIylHGtPYNjMC6vo1Se62N5AaAtn22816axWHQBlwhHS9uvaR6wovzj4M1bS3v7d9ej5tbNbU872mW63Wm8sk+OD8DIRNrR+SJr97dPDeg4c//KN/+Jd/8XP339h7+ZRePXz1zbd/hKvTo9t095OfGoZU77380//Tf+sf/e1fuLUM1O4taHF661MdHF3dfDg/vUuO0gY302797Hd+bTG03/+HfvLdz32R43J+T+DZ+Nb3/lTz+g+PQPXp3U/vHxNCj3r/8EU437zcnjy62IK5W7Lh5vxhl39L6k99iqP8/j/0J+6evvxX/8ZfErc6MlEIxGnCVe/r6XK5bNGmRcpg44DWbzeOuO6vUtcnoMfX21jzst3brvp6b//V+3e7Sa+up/WkiakfbbMdNPfjdv3t996PVh0f3rc8pPTQFDCEtBqIN103VXEWZie/8Cu/8/Dh2Uv3j//Vn/wx1MShaudRlScbFAAjMca92YG0+8wxxtZcRCoDJiAUIWTCwMy+M/UULafcfRDACgW5OIEJQfMkJACmZkRU9KHySFNTt+cCOBaDID23SgIKmTGYZjUSAfdAMowDB3J0M61CzDkjIkvY9Z/tdltgJcAPbClxCIhU+dItT+GqW92M05CRiGnR1rlL3U2aH+w/efw02Tif2Ze++BsdzV+//xOfyLN/+Et/8623F/31OKbUPekMaT6rdhdDHbphGyLP67jdjpIHJqSWMQ3JpjyYBKwxrM/XT6ptdVvbcDRsZ5eX+bqzEG3WgArHGKYBxt7cYFqNV/16cRCZE+i03GvGq5QdiSEICCGAah45kANkyOU9V3f1XFcVsxuyuasqgqube+JAxAQo5iaIGUhEnocroTRTGnisWD0TE6ojQk1hHPq9eY1YegicRUpxY8oZAFgIAAgAmYoPwNSYn+viBMyYdDS0fuw47uUxMwIxEUpoWiBz8ClP4MNknjIyxTH3+23sN48qgRBDn5SICvUtp1FVq1g7mXtGx9pD//Ts7U+93h4f9qhVMwuA681K8+RMmkZBUgeTEGdVolDxstnOtuuLvBkkBjU7WBw+efBtDvTSq/enYRy6yRKq+aKpckqWhhfu7V/naVFXe7FZX26uVhtM2R2S4P7xwTR2urWJRp/h4eF8/eB8f6+Zt01KQ806pypthwC2qGfzdu/p+eNZ5CoYSBs9rPvVuI2pG26u1jJPQ8hE1E1TW8fNZrNf11Max2nCtEXV86dndQgOdnA4e/bs4diNe3sHm/WIEtWaftuRUTML/WYIoSIMAHi4f9Rv1qGBWS0Dp6gxTWruTRvnVXv25Hw1bluZvXT/xdXNKqdROKyurqpaCMknm8bVrdhesQ/vrX73/as/+MlP37z34X/w//yPDvhE1qDd1Q/80IsvvHx3tzbICk6A4ObEGGpOecQs9+6+NHSD2dg2tYOTcDk45KSawUGY3ZxVXc1UE7C7jcxIyAZZ0xQlBKachhgjEw2biSg07Sz7jSB345YEGIOEEEJ1c349bnsMbpCnnHSamJtZqMaumy0rARGB80cXd5f7UZBZmjsnY04fPHnfUGeLBjh2q62G/Nrrr3z6B37gK19/73zqAMch90PfmxuLYNGfVUMIbormTj6miYNIrIbUSwxZbVYHNejWXR1Ek/k0Iejx/sxVps1mVkcCIGYBmIt8+s1XOBqGCE5MYuoxVsBewA31rFbN7Ww5jZ3UzgDjMBDEQDKMW2CQwIYYY0hdkiiGjIBDGgmgtP8yRzJ2FFUzykhqlpgYAZkQSsEqpqTGEtUFUAhH8LKvEgdK3gE3SBGAkuaQ5fLyeldrglj6sL/TEVu87cU3AABlBcvExfSezVPWiAZQIZI7G2DKxilzCGY+TdkpMQdCbGSWpkzcgvdJh8itO5uJuRKC2ihEyDSlCYHVLVS1Ak3ZYxAHzzlV0giJA0x5G3nmiEhi4Jera6ByZsVJjbKZWs5ZQggF5WIWmCPj1E8MVgUJdQVO4ziOBEw4TVMgBqQ87hymH37rfclBPezdPZiSImDTVGdPLzPQ8asnx3dPGXC16YlsuXdwcfGs326rpopV9fTqTNp2f//w+vzpZrVOaPun+9u0HVLiuk6bqbtcg+ILn3qZWsk6EWHXDcN227bL/bt3URLZcPnBB+996VuPn2z3ju6+/a98uprVyESIqR9gwu1N/5u//oWf+Tf+1N6Lt7OPbGBDQrXU5eF68NmeBa8Xs+w2DSMD62Z8/IVv3Lx/hj2dvHYP2yB7jIs8izPYDMfHC9fpyXuP87Oz88crfPxofPMT8f6x1JxGXW032qeGKxgkDPjuL/7y5Tu/NssdEmRLAKiOLNERcp4Aiav6mQ//Ytye/NgPX/zubw0X27Kc3q1GDRwhO4hBTdVnv/G7f+bf+PHm7NpAhRDNkwNgBsssofRgaM7ojgwEpMrMbGbkpJYJynZKHEBcDNlUzQyJmKlG/e1f/Aev/djPzu8eyCxkVWF2SHUTm+UszIKKZcuAAGnyMQnzQpaQw7OH64vzp5vrQaTaWzaI0Ntg0yCIQsGTBUCIMDvcn7VMVYR5Vg6ekdyGIU2JT1/5Ibvuzh78VpMuvucQtzf80WjrMo4RumdwQ0fL5gRK5GZoYDkTMAdyBxDmKLEKVVM5UQwc0pTTVM1mdagc0cGy5oAK2jNrjo7kQOxZC7CbzM3JidAg22j9IEDjpgOoSOo6iDG6Qdbkbjp2TQh9n6idBaZZvYO7lwMeAgFomRx2STIv7AFwR1AARTAkf862dXB3JjQrnnQnd4NSQ2VQ5J8dWQeBqCAMS4sTErqrO7tBNhOiUo1XACnMwR0YCMhTVpYAjIgZsrew/+zxePfw+/J0E3F9dfnN9c15qKWe7VsKTHzrpTvt8gBJZm3FyEAoFFRT+SVMc0pjO1sKzaqq3m63lq1uWkDKqRMhN3A1BAwSu81WkKu2JoTtaguAgqiTsmOMbUppHDoOEqVKk+fkRKGqa4khEAG4lDkMi1UO2ZmQ1DIQMAcARSxwnDLG553ViAp8ppiwFEsDGhITUxZAUM3CLLRrn1DNSASIBWSNBU+9A8oYPQ+zPJ8qi1aARadgEgM1V3JyL6GY/DyuZWUuMHDzYoxiJgHgolBTGXPBis5lu/xaoecW6YjKVVyGzappHAGxuv/6y4/f00c357fntUDKmiXDlBOiu5ECz+cH66ubyGHRzrbD1KeRY80jiaMKCGC3HT/80hdO3v7u9y/pwcVm7pvh6sI0hUpMIlGoqpoocohBRJjJnTkAcjH2uKsQuBvHumoWbdjRGwG9uOjQGQDUkoF+J7Dj34mbFZUHwL3Aidx21CEor9e8OFashM4MXH1Xka6lHAzRAZi5HPjLFVH8O+WjKYwp34GU3VzBgYithJfKGw1KTGa7tvPni3o0Le0AO33ITY1K1B523WClXA+5SH07kjWAgWvOCCV7XiomsHh5zNDcsIBriQDBbEfISVZ8gQBAalasUtmSAxACGt5cr54+OZstm9XlWoiVeLXSzbW9/OLdb5+9t4C6Rhz6rmlm25tuthC3PjCnPEkowkfRonfKSMkmuyMCGqBh+cohImXNhe/1HFXu6s95QKWRiSVrAi/3LyocXvAS4SpEJFczd8VSDohQLjdEBCZVLcwxh513DJHcMhJmTeWOpw4sbGpU7oqFRbV7IHCRYAC1XKFF2isSYbm+CvwLEHeXGIP7bvQuwCNwBYddZaFOzAHR3RRQCNl8B+cCRLDsjrsJX7h8bwtrylRJwq4hrzSyme7+hd+TihDBgACzqSYYaYqRYxVXw/CvfP9Pf+KTnzi7+CgyMEu/7bjCpGPV1k3T2pS2nUpY1EIZU6zjfjN7+uQJs4cAFL0fN9zDmNRz7rM2crho2m999M2jeTVurt774KPDau/l23f6fPP+4/eqefvk0eOqqnogB0hZ5/PZetuttn2qYw6plf6Ve8cEy5P7b5jtnb52LzmpETendz/5h3/4p8KnYv93/uO/+8qtH6S9N2sd481V1bTnX3tPA+y9+mpcNs++/vUf/CM/89tb/e4f++71r/wqfPB47fpDf/7fGXDNEoRry8FceB7f/t67B9SdLgSOX1999pfqk9skz/RrZ0/e/fpJU88Xbx3dou/6/T/88+3/7j/7a3/prVfuPXm2dlDNIxIYTkMyUzp72B8fzi4vV5iMqRnGtNpMg2rTNsb28Ozi7rLdbgdEfv3l177+9UvNdJnHbjB30Jy2enbrYLGdbk5PF9//1vd+9jeukm6Nq0G3l5eX1cF+XS3/yt/7b84ebU+OT6ydvf/gne/99FvJDHh5eX7Z9SvXfrl36KGV2QFLyyBOFSAbBwoVCZeouYGrG1MQZHNHImY2NzdFKhcbARalloUAkRx1l5I1dXciBFfadWEQArgqkgGUrKOpKSETUVZFpDGZ5zGTxiqOOe/iaA5AAQERnYlCiOCeNQuLYiaPzy2OoGkCkLrdR+YpwdhvVUdGaJo6a9a0RPPN+ATBum339OMn871vfPC1j3/6j/xP/tSf/osK73/w7a9cPfv4+qpfSPPso+9Eb0ZXwCoOAUzEMXRDBtC6bhzwxbt3zj76uN/0OKaLJ/nF2/OPLroPn4JBs03YkHs3VK6LveXj1fW4UcJ62HbzGcuUmFA3I82g2ESzKVVkjBSCmZa2QpIqJ6eSUHYlgp0JFomM3ZWNLAMXQl0Qn7ID5zwgwHNsrRgogo+a6mrugMyMYITcxvZ8ex0lAE7I7I6lWKYK1e7DIjBzYQZTd0Nm9QToUgWT4GNWKyciamvqNOc8IaGpGlNs66xriY1STNPYNGEcrpvUDVePa7vfnT8TC9wsQNdIwFKRhJRGyx6i5CnFWQBs84zne/sXl9ckNq8CxZrNgcgt11FS0tAGkTqzU6BFvZjWGBd75uPTx2dHx3t1zUeHBz/21qc/+PjbmKYAMMZGGrrZbEKkYXI0kQTLGBn48nydtkpV88Kt+5dPn9lo6QJyZq55Pq+nnA7nwU+WaUppO1GokvYh1GnIZna8d/fu8f2nV09NQtZpGK5zf3Vy61Z9u91u8ktvv/LhB9+6s7jdjUPITaVScdxv56vr4Wq9qqmp6nh0/9bZx2fCzbOr1PpeE+Hy4RUGB9RpStjrdtCjwztVa2MyY5/GdLO5riNd31zNlsu5SKgzVD6fh+5889FH71+v1ovFvk3c3XRTN9VVqJsqTxMTG/M29y/fu/fo2x9dX17cPv6eabABw5NvPXzj3hv22q2PPn74c//Wn37ns//9b/yzrz7foTkwGgAxBUGdkiIRYV23BLjebGOsqiBjGsHB1ZkFkCQKEEya0jQ0Va02EfGi3T+/vsCosQ6+nVABQcFTVUWw3EqVM21vtnU7v7m8qma8WLTotu3WFWtLufLMgSjQ1Mmzq1XkYKZjHmqL3diLYS0NpCRSAcjlxQWHkHN2cw0RTNjh7U+8/eqrr379vfcvNmN7uHz/w0237TVnc0uqVZA0DZYVwE2zxJhyrigE5nG7TaolB6+ja1LM7Eg5ZUI317SZMPW5Ty41I5pPt/eO3nzhpdlsRkwIFKtoBiysOaM6ExkgUVB1NxQJw9SHEJUglyMJBisLSfIggYzyqMIVcpuzVLxM0xrB1RJBRUjqE2NQyCKUTc3VIJsZcmAObsZCY94SVkyMzuaWdZAwZ65TNgc3VQQMLM+ePWEkLoeq0ghbDr+/N9GBmgWQXcYeicBzzoFDDBFIs2qaNAjt6KwsRPycX+kSOeecZV1gu5NPVWySj+oWOKBbFZp+uGam7bBpwkwzZhyrUBt4ACZIiAIkAGw2BYmEQoRqbik7wmbsWMQcXNVGLeM/OhHQOA1pnJAomRPj8nAvITuzoyNjViWwpq5yGtHKQLs7GOXBwEbl6elHqzRobIVOD9smrDfD7YPDto4OMLlFlZuLy83FmkRUx+ublWXlDKtnV+N6mq5Tu7/cP2i6zUbz2HjUTTdcdfP9g4N7B6Mmymnqx74f03Y8uX+nWca+T0Gwe3r56OH5Zkg//tM/uDzZS4AkotMEE4jmX/tnnzk+PXn1e97K7GjIaEykkz958PCV196WWruUsxk4UILK/dHvvvPsy9+Kisdv3feKsyOoSyBeAHBFVRgmal8+aVBuveoff/XLm3c/Hze3ce+ovX/X2/l0lmYxrB+88+v//T9IT8+PFwGFJxtznpBFRBC0BvacHLjT/BW4fv1f+1M/+a//7H/47/y51WbwrJYgayai3REVMakRUp+nX33n8z9z8lp69FAkMPmYFZDKbOxpBHCmAk42wN3qgpEyqJMji2dVg/8fV/8Zq3uWnXdiK+y9/+FNJ91861bdytXd1bmpZpPNJIkSxZFkcQTPKMCCAduyPhiC4RnAgAF7BjBsj/1hDBg2MDAMA4bF8UjwcKShhhRTi6HZ7G52TtUVb918Tz5v+Ie991rLH/Z7WpK/Vd2qc8857z+uZz3P70FQQmMCVXDMo1hW9EhhGe9/6fc//Zf+6vTlGz0YVVDVYbqYmCPBSEg1O+c8RWKq8gbPnq0fP3mSRkGrmnaKlUYYRKxidhx8YIuDZmhnjmeOgveOsukwZI+1QBz7E3DOhDYcZi992ip68J3f36nTzzw3mx2dvC/2bAOGLuUsGutQE7ukZlmziEPyLti2bgZd7dgToYIkJidJMQkjgZgMkZHJMXnQpNnGZD1QUIzc1uaDVl7rGgQQvIxZh4wqOAwIIScEZ+wox5HzIExxyPPGee1lPSCBwYbaSRy3V0Fpri3Yka2FAcFUUFWzOiYzVAFQBEWzwofFSxdMeZ+zMmQ7xKSAVAZRwuILKPtILEQiICYEYKYCM0LV0hhUfA+IZOBKBbqK1hxKkxpVdR8tnVZ7/BwOw8XZhxhiGsfJrAWlhieT+fzOzet71w4UyCHWIUhWVzmR7B2JGSERunY6M6CsqV+fBA5VXZullBMTx3EEs+BcNDpangbwdTXJaYiazMghMzMijv0AmiWnkh9KSYU4xTyZL0LwVNBOWN7KQK2s5hUQix+LoLxFUzHhlMo7QA9gaslRgAJGQTBQZodmZV51lTOFUiSzHW0BiBgJEY2QTJXAiu0CCU0JzCSrGagKMZIzVStBAtzOrsjkC0BXRQyKS0igOPsACiZG1bboA0LQMmMyFktq0Yfg0h32k6HTFImLNkVEQFa1tev1YH/foh4+sE3e7FSx9urQqUSDDOxjTgrom0pkrKqqi5qGtbFMm8W4WZtIHeo4Zovds3e+c/tjn98s5fH9xzpuQBKCovfMfvAeXQWhQiNmDuyYSIEBERlNMmo2ABfa2UKo2k7HRKw5I5efl00iqmxDS7ZFUpsJXu5Tth8P4pZ7vWWQK15GyRARkBhANZe/n9GBUenDMlAsguyl2rFVFREL80fVAK1gYop5DKEEjooXjEoSbsvGNnXEZlZkX4DCn4KfsKu35UOGZiVuqqUTXkSIcFt4X34v0bI+KqaksnIGNSQ0QytSFOGlbGWlEgcIDIRKmz2pyKjgUOHe+/d90/TdQEiM/sMHJ/VkXtH0/Hz55OG9F5+/5kKALPVkul73i93dfkwNUYqjr2sVIQ6mUpxQhIbEYhlUEIGJEKigdogJQcppDICFB0YMzE5UGEg1C+RS2YRGxYhkmgspfPtqBmxqYIWdJ8xMTDmnYrxi5wo1SQVUMruCSy88XyXyqtmKKFuq2USMjcoa3i6Phmm5G9r2ix0UvjVAUZcMLGtCQGJWUzSULMUMh8iEXkQACMwIGYEAy7FQQwJTAo8EKUVmBi0yNCOWv7mokLKNwJUdgYgBE9FPqgB/0oBmIAZgjhlwS6kfuvixVz/97//qX3129L535l0Y+oTo2qYCXOQ0jkN2zk8Wu4B+Pp+cXRxDzCexOxuPd9s9TVkhB4+T2i8vTitXTZq5I9/n1bSFlDanR8/mk9nu/vXVuHlyfAi+2qhl4iGloR8dueBC27QKVl+9RovZZ3/mL37xZ181J5KjYRvqWZdg7AGS+onj5bNH73314bPj/edvPz6ir/3hD1779EupPz/72p+/9c1vvfzFL3iE+PCHq+++fRY+9kQ3n8zxrd/49Zc/+cILH//FJS0JyGImz8V0gY7qPQfctTtVv0k37r76tJN4HGc7Nw/7dTvfeeWll/7wK1/+tb/1yx/56Cd/7uf+4re+/PuT+bX1mC5Wa006nc0phAeP7+/vzEPt67mHnB04YDzrUYkrR6VGsGndALzJfrUaPEwwjbPGkDXnKkOVzJYnFw96zBFmi7u70/3NhZ6tE1FoJiELvfPus2WvB7ev/OVf+gtnz57uX5meXZxBaPu8QqRQ1/Vkx01mihPgBtkDkKsaQAIiZmZyBCBa3IgEVNJnUqoZoSwCSplfuekzpRy78aJyjZlVoWFiUUNGEYEtVN8VN2epsJCcmbaJa0ISsaqqk+SUtKomoErsPaACMKNkYXLsXM7JAAvI0pCjiJgReQBV8q5mdE7YIQRjt6Aqp8367Ch1K9GkWdv5tHY2rhYp9f0QHz96JvTtF1/5yHfuvXPnlTen7eSf/86X37x91Ts7evTo4iL+RDBtJ3WKue/iOMZpFYLB6dnZ7vOTTWcnh4ezpj09H5xrDjv9+g9h3TfHF9R4DZVfzNr1+rzTfLbZJE2ZwJG2O3Ufx/XamuQWPNXRUKxp2jyOZhpTZCqXMRkaArPzRJBSNgQmBzISG5Z9q5mYIjGRN9EALJCD5w2qD54I1QzUGJE5iK7a2hPlQpQHACLNmqz4u8lQyNSQ6LI7lJC13M1L+4OWKmmzLAm9z2Y5F8odsnOagXKqDBIAkcOqyt1Sxeq6ulg+nu/taB4x+MneQZZI5CdXr60FjDhpQo8cQow9Ova+SZaVq7HfVD4g23w2r9q6F0sJKnCbi+WNuy/HQZq9nWGzrNoJNo3rBEzZVQYV15Pl+Vk4J5g3Y/brbsI46zerLIaOsuIwSGKZz6ewyY+edOfrbj5fENre9YON5Ufnz9CyMxgHMzSvPAG32sQnHxynpIAUB9BNVzfN0Cu6tm3mVZgcHh+Tg739+cmTJ6Z248Zen9fxLHrvuyE65e98988Dt7eufuTs2RN04/Gj98w0Zj24c6OTbui6oMCID5cXL167Kau1bxy3mFKOCNmF6cEuVNWY1n2MvuZVf96G1px/4eXbDx48mtrMR8PKxxE/fHIas1T1hLkl75JAyjxgvro/i+MwrofpdDIO9vDwdPcOH1yZ/vgH76MtNutxHAaYHcp0fZLh13/nDw/46u3nZwBfKYc49dm8mqgZsKsUaLPZ1HXDzk0mbT/0ADUCZs2AW2KgqahlT065dIg4BUsqoCgpk2dk7NNGJBP5rOaAfFVli3UVMijXdbZMBMzBNuzrdr4Xum5DxJs+KtK1G7s5DtNZqOt5N46MzJ590MXO4unRU6BAjgTFOV6fR/K8s9e++onnb906OH5wcfRoeeuNl3/49G3NvcbEjnUURjREVXVEMY+z+SyN0YBcFYZhVAVVqjCgsyw5xg7Ay5jrygfPXYQxZYcU6oqpBpHdZvLJV1+dtVM1ZWDHLmVxLnhXmRqCOuelQA2ZC4S18o0poFFgLBtgX1U5ZTANztWNH6LnwQ1ZmP0Y10gCyGYsZcXOjIgiGR1tpRyuUxq3Na8qAGXKxiijcw7QvKtM0dB7b5qSmTlPPhC4TMxliAe43O/b5Stief/HMiOoIzIRM3TsCZBMKiYVQQMUHcd+2jZY8CJqMuZ2WsU0Ot/kHEEdmBKDWa7Ix5zJAABT6hlVTTxXjiokzZacq1eb41A1BKimjp1IRnIxJ/Y+Sy7vT6nv1udLBiKmKNrHhOSqwGTkmBWUHJsoIjbBMxMQu8r3Q9IxEiQmTmn07J2j2PV2GTp4+ZW7FycbVwcO1fnhcT2n9cnq5HRDDD/60wsgVO/2nrt95+YLF6slNdN6Pjk7eebayptu+lXu+7EfHYWDvWm/uui7ON2ZWtZ+GLmub7/5qpJJksbz2dMVAi8OrjWzViWKyDqP9z54fHh6dnD72gtvvpwJJGZwZNmgi1/6rd988uTB3/3H/1MJSVJ2JRKe87NnD1752KuZpUsjB5+iaVTKvDk6fvjtH8GYqQ42meKsZu9Ect6sY8ptVQkhttV8d6Z9ggivPfcFiJuTB/ePv/d2+mC/mV3DtX7j+398dnhvCq5qcBx7SFpVDogEUL1LMXFKCOi8u2jl3pB+7Zd++o/+9W9dDJvQVsOqNwRyvF0qabECISISu68+uv/Lv/Dz/umRIliOyGxoGUoNKoNm3E6hUOjnqiagxtvCeyi+plLUolElM3o2RMKaXQUpX5y/9Zv/3xuf+dz8o2/Wt3aywYCKaG1oUL31OPYmHZ4/PVmvBhBACpOpE5CR+iRDCL5i7zMwAOgYphjmVZigVpwzr/rsmMw0Dp1KCdU7GPLYrbCdz17/qdnF0+XjP2v16JMH7EY4j9CRN1bPhNvKanJbjx4VsgwzaqFXkGYwlNFTuccCoynGCMbGpIiaRTaEFkKF6mvnsAk2aaIPg0ISVHFgYCnjEK0bUk4AXDeVa0hICJIKM/thvbH1RV6fK+T5jesxdlzX26F+W2yyfZkrcoIioCIhiZkCqqAqgkKp8Cr1UgZbbiwCZJNSpYRlVCum9eKBMAQFQiIzBiNAZ1a4yiVCc7n8B0SHyEXKdYBqiKKOPFqzGicXKVzfe5mXx6erH1KVkKydVMMmLyZ7V2/e3jtY7M0Ww2ZoZ5NJ0wz9YGZUMMZEk7pVhSySNQGiSKx8TYiiEQuBiRGIHYPzvFl3VXDM9WrTsZkPwTcAqmNKXT8ErnJKWbVqWjMeYjTG6WLug1czT46Ikch+ksEjBEPc5kpw6zUAQLNSel3YM0W2QQQ1AVTamoAMiQgdWOGilPFembhY+6kIMWZb/wJiEZCQkIAYMJswIpmWIZocIUAhJStooYYnyWCKZVHMRIBbhnE5EmYIykzbPBpzsafgFqZLP3FllNHUNAMAMKlJsUuIiQIicNVOYegODvbR8PTDty3GSUCjqHkg75tqdxxXgkrs+jgG07YNiYImy3kgz5AppuzroDkNF6tn3/vW1ZuvPwEcwTkeHZhIlpRjP2AYjL0iEpIpMhEQGyiYQU5MBoj1dAEu1MOW2GVmyARbRruUi1XLwGzAVGDeP2FwsUGJ7BXHjW2jV0Ut2/6jApTERYlwqAEabD/Sks8wUCjxw0JfRlCVYgor9Vg/cfmpyvZBDUDMAJhL2AqBgFRFL80+CgYAZkpGhV6kpnQJwy7YI9X8E6tOWSiVT4CIy4iBQGVooAKnRtTtCbiFBJVtUQEdbT2IZiZqoAQCAMz09rs/VBeHTT9uhrqZ/PiDB+ImZ8uuqjO3F4jt+mzVHiy6sd8TlTEjEGA2lJxziUBlyUw/sTsjAJYQi0gidsXvaKBihmalHw3KSbjVZpCQRRURtVxuYGpSdNVSV1ekN9Dy5VIqL8xK1UdGJHJOVYolSK0YMF0JkSloBjETh5653MGKFUvL2GWGznFKuWhRJe5vVvxh5RFZpHrMWWDr9S7nF6NY6eMj9mYCZqJbE18JBopKodBboYKVK04ECBWNmXPOAMLIqmoIjoMpEICUfCI50Qy4FSH/HamICIlQxMTAsQdGqpjc4lf/e/9gvVkDZk91WzXnwykw95FiHid1IykeXL2eRm3JTo6fEaIkcI3fW1wnwOPTk9qm7Xzv6NmFzxPHk912f7m+OFudnp4/QaHuIu5Mm1kNy+Vy2XeKcrbeJLSui2YoKimlJo/7t675i/7Dr/35X3zzjc9+5vWLvOY8gkyhXlS73lxQIjLlzdnZu++cPFjeunLnO3/y/VsffX7HX/3Xf/i7j7/18O5nfnb/xVeWxxfw+CQdHZ7d+9abn/lc/0e/1awTnm285Emj4jwAbaFjAAYuji7gOFycTcVfi/Nb4eZ3T59MmgUPQ4zdez/4AzpJ5yef2Gn3fu6X//5v/Oa/fKmO/dBrNEKXNpmd253vAeDhyerWNeI633v8pAoTqjkNSfoxMB3M/Bc+/dxwpt3y6M7Nm5PJ6/0P3q3noW7Dj398uMlgok+OL+iGf/j0Q9/ArRefF191Hxz6ms4367UOf/aDD6/cePGjr79m4+bl527OJtNnx0/nV5/rVuvJdEph4aYz4+D8lFxdKFTMTgGsPHcJzMy5QMgGAoalPpCAkyZEMgHC8oFAFkEPCFS7OZEjM1DImk0NdXs7A9syCZBKmx6WR1t5WTcVE0k5ZdGmCqoSvHMeTSFHIWBQNFPRiFjSDgoMntkEuXA3gFI0KjdBIDC0zI5bomq2wNFNVEbQDYFlh/vX9pfnpxTc+Wl/dvTsfd2Qxydn7/2Nv/23//Z//+/86b/4v8xn/rWPvfbBO0+3F4OiL1MSuqg2Zt2tpy+/8Tqz1+bZvSePr964c5R7IM4QHj2LbWjcKJYg9dKJgjVHyxMlm+9dy2ZPH5xNnRdFCl5r32eq2wn2EVERTZLAtinYqQgTF4ArOQ/E4JhdBaQIHoE0KyKnLBmx8o3YAIqMZIZjluAbIofZQBUUSxVqICLici8QMXYVoXMYxNK2r6G4hxADs0dSAAI1A4JiXNLgMY1EaEaoROiQHMcRUxTIjGOGbqDWDyky1xXvkDakofJ15UOfB7Dsq9ap5fPz6tpVMiAFJFn3y0DBMDjCqJECKYIPHiFbHCa1NZ5Szin1IYRQOY+QrAgREdUYAqMPHsfM5tvgbe/aOJ6dO8020je+9V2u5cq0HoauappBo3kWzX2MMWUiquoWhau6khw2MGBldR3GVacxt77SqN35JguIArL3DFk0qdAAnLJJtbd/bb3azCfTB4crX0nULqr2NI817Vy9ujo+lM2we22Rnvbr9fDgyQ+dwWx/Wrk5GFyZzPs8hkCb02O2QKR3b1x12A944Stq2/bw9HxYb/YOrvkpKeTY5aypH+Bg//n12Xm3kaPlGOqmaiZE+fHh+ZBzyqGZNXXtYoIU085i1jbuIp03B7OnJ4cVcoy5ar22NvQPvviFu0cfbDZrmPorZ5sntaNNt5RVjgE3E9/jCFvFlADJOSYCIvB1Na3rzXoFlnzVIFFOOY7jZNoAgiRNSRDRVC3L9vEqAkDBuewGc0ZEMWWx7EOFIzChkG3W67atjcbgm8DuokusREhDr4udXZNNlGU98c2Ea6dPV6eTdrYaJI9pnXMIrahsTjce8mZzVjFHGcWAqN2Y1XvzV154bq+1SevXR+c/fP/wI69/4WI8PTp6pkPPJuScEKfYi4pzwQwyWLRKLRNxt9mIWlO3ZJpEUEc0vX7QMtfPnp2J0pBk2IzIPCahDOrGG7tX3nzplVkTQmAwLkmxgkhMaSA0RGBXmqGighg4RHKuBZUIKx+CjYMzdugURZE4VBwhWwKQGHPtGkeFYKiEXKIEDJRFEUPZfZkmcKySiHtgP+oQhAvSVBmMCcUATSDlHCtfEaOSSzmdnR4P6xUxShYoDQaqhUd5uTZFNUDewg5Lf44qlBYCJBCz1gUXGs2lFlZVRdSyoKi5nJGRyFkWJHa+VdSyGiUgUwUgM/C+Ftt4JpENU9PnYvtqTdXINEtpCC4r7hCqBInIsho6GoYBATSLqU2mk5SFsqhYM3VmErME72oXHLs8ZmqSyBijkBmR+cA5jqSQAL13UbZSkflw91PPVdNmdb7avz4bupWMSU7OZ5N23GwUVNa6kvjdd95fbvq967ev3nl+fuWWd3j86H4I0Ew4PTm/fffGbM/de//p0HNVN/1m1a2G3dt3/XyhkCrvVydnm4tud7G/c2WPAg/jIGM6f/r04YPHmvJP/8IXFvs70TQ0laqlYTx/+PDt77396S984dpzd7rU1z54IIlydnY6cZNQu6yj4+B9yDEDKZ7Fhx+85zhbg/MXr7U39yQLEcGQ2OVqPq15t1uf5e6cSSaLWbfebJC0bV3z/K1rz+mTk4df/9rjH7/FlOcThjgoEpBUVKsYWGY20RERxjg418acjtTNPvqJw2X/pd/7fVExZGR0yIyoaklTISWoqokS2sOTR+Oimkymul55cB5RMJdHYXl9jz9p/ymv1GiI6BBMRgQTM2RUN5WEJr3zAYkxZQAc80gIrnWxj9/70h9dO1l/7Jd/NTiWAdnT0I1yvskbWp70kjQZurrKtSjbqJt+HCty0xDqptKYSFLVhnq/crteXJKu00hZOatJJiB1QNNmt1uf98sBsnhvyur3J6/8pb/+7p/G47f+YObl08+3bpG/dr8/O89gjGqEWQSAGREcV2LADMxIaOQInHmHBEoaRSV7xICR1007VRqIfMoy5A2DeQ4QzbnafD0Ysfet5dgLMI4Sh80AG8lrM9UwrRIhsBCk8eIMmbVuhk2/PjrrD49d4C6T351dmU0uV8gFhYqqSuV1iwCMVBSIVCUbiiDq1n4gCobICoxopoaqgLYNqJRZaKv3FfqJGjCSmqEBKTki78kRFKkCCMqA6pgVkMuQLIKE5FpMZNgazo5OVrvtRLvDoX/mvEwmbrWKfRwnzeTuq3f2r12dziY65v29A0NRzT6YAdWhUYOS4ytxW2aX0tDUTVW33diRsYcqUYQyIZKdrVaSxIdKUdmzS+SpTjgoqplUzgVPSRSRkfzQpyzQTidVVQEhqwSuAIG4TF9Y+NDldDbMJuWeSlvwAsI2cLR1fSACIUrRelQVgcyMkA2VCyu6RJgYyeyyjwm0FOPCFjijKlbkOmJCYAQDQsQsEQ22jiXAYlb6N+27gLgFjUHJqKGRAUI5TAigxkhmJlawwVzkYFPkEqlRYSp17Fv4EZYy5Ut+mZghOOf8zv5B343rk/eaoIqDirJi6nOZg107qQzHbuMZmro67zfe9+RCNW36PoGaD9wmvjg7YnIffenutx9feK1pHC2pgqSUpY8Co2gpuyRANMKieRGhpoSIY1KgyXx/Fy7NdcgIUPwgjKrMjKRgJFvijZWxuhw4UTPTAnMlRFG7zO6ZATC5rR4gamCEgIgiWzBzCRltbT4qCGSwbeqCS1sWQumuSAbERCoZYbs3IkdbZRDQSmYKoNRII237thi3AoeZUmkhBBUDFAU0IjbdmqFEdVuRBopWCrcAgbMIIRFCYS0T+fLb0KXUyIRbexQQACC4wqiXZHWonz09Oj86rZrJ6rxvp7Nvvf3wwbPTl188GHI3Rnhyftonef/+o/l+0+zVm+Hc0mC6J5pDqFfr9WJ/XiQMMyzlqGZmkgkREdiFIgSXY6Y5Oe8uH+qGRKUQzMQM1LEHg+2+zbQQygn50nJV5DOALT4cTJXZIcCWjlQM6aDlS7ZRQSpBMwIwZiZDkYjsRBS3hWIl7qgimYlKOAxAtUT2kBDMRAwMC04bdAtyUTOz4skgIJF4qU8RbHU6kpSsuAuREJFI1RRMEBmQil4seUs9NzAmNDCRVMhiBkaI2+COgBao2L8tFTEiIvuAJYPDSKT8N37lb9rmLGOqgsvRNtJXdR2adkjJ0sger+5eiasVoxdUdkwEB/uLk4tzwmwQZ9N2d3rwww9/eHP3ugzoA5+vTo5Pnu4cNJuksB5rV12ZV7E/uvfowYWJSTpdbTwSCnigUIUkowvh2dPTSt0bt2/efe55bKfr+4e3b1zv+wEngaqAQBQCeWxmUyN49bVXuK8//8npF3/p+vtf+mcvX70xXM2f+Gu/tBKlWTN//rWdF17dny/sx+996w/+xcHu7Ws//x8e9xfp+DA8dysbADggQkTvXXtzb3K1uv36x/oPH3vv1qfLqPDmx16bnizeefj9g/3q2Xv3/+A34uc/94vXb75++/rHTs6+Wzc1BgjEKY6pN+9ZxxhtYNwL3rez6BwfzMJ0kzRmE2DC777zjEadN7v13DFUL95549nJvaerE4HmfNMNUSdO1v2yaWwzLM/O5WB3cnzYpIg1EQJe3V//lV/8JbFhWnGOcv/JUWjbUWyyt+PrWTRCP/NVDchEwTEREaAr2DP2HsmZiSEDsWZT3DZgOXaiAGjOOUCUmIHQeU9MiOCrxgA1J3ZUWo+RSEXJeVPNBj/RwsutxqSUalvOiRBEJCYhU81KBrUjJMKqSWroytYpg4JKNLUUsyloViCQLAYExI6QKXDwAFpVpmKShELjp4PFofPHGjc6DM7VdT2L8WI6rVV1eX7+3o+/94mPh2//N78+DTd/+sWf4b3BzYHc9sUIDPt1zFnIgY0W6ur4/CJqvRyGedDZzrW7t+8+eHAIGmeL9mKpMcesQgC7+/MUczarqiaNw8VRbHcn08pRP1g/QlURw6yaDuuuJT/0HWMODA4QLIvYJc4peceEqHkkVsWhRJfVMjtGEzFFUKRsYgoCBEhmiMSoIoTI7DWVZwl64mSJySFgATw4tmwZiQpcDokRESTbJdVOgcwyApNjy1JAdkyczJxDAkQGEoya68l8rQPlEaEVBa6D9pbyhjUET3HoHQcV6IcIaZQhYqjHMesQ03hRVR6MzTTraGJMk82qq0VdysuLw93dnWHoSHDStuSp2Z9DUzF4BaiaOmacYA1ZVSKq1pMWFIchhAmpmmFPJH2GLlplOHajUM5DFsK+G2NSVzHXYVj3zjcDxGtXF4f3Ho8ii7aNALWfjrnnylkCBiWwuqqGrkfPdVXHUVTy6ekhe4fV4pUX7jx9+l6/OX/uxTdmuzdOz49OH9yDzej9ZDKdXb/pN5uuadv1xbKdT44PL8RoArg8fNJOWlezA+wvLm5O2q7fUDSNcX06zqdNvah9TeLGylfUBI8OUNDZul/XVc3qfTVdXiwvTlbZDIG9Z2f5/OTMiPYOdgFyNyxnre9ODvf3Zthr7OLOvHn39MEvfu6lH771MIbJcy8+v5KLN37qxfsfPppaHvvu/N1+/oLj/S3cHQECMwEQASjkGMW5tm7OTlc7u9RMW8S23/RdP9ZNyCkjgQuenQeALNEHj8YiKeeMyE07IfKWkgkC8GAyr0OOyftgak07GfuBPASPhOhDBSSOMnFNuKqqSlIKxPOmaVyAmsYxktLFsGy8j2kEgLEfyLn5dJqTSmjGzeZjr7xwZRG61dmEww/feTKd3wmT+uzR09R3JpJz3uIXiYr8nVXMbH161E5aSamu6ywoKavmFMdre5P9K9c+ePfBsl9JEi+gKTMQoDV1JUPan7afeeOVgFRKe5zzzgdEAEZA0GLsNokyKABCjcSem9gvCSzncZvUIARREwFFE0UBBmYoldCQtRMZJ81cIKto8JhSZK4QUTMEClkyGKgk9GggCgzkwSNGSnkkIIdhzF1pOCRAFUsSg6sU0BA+/5nPvbFZ/as/+NMYU9lawdZFYOUlqdAXHHHBCyNQKdNtG1+xJyqtiSMQl+ZnBc06eN+KmJo49mPcMHkzdWZoikAi2cy8q0vdIgKBITFny4SJQNEkafaODZC81ySI6LkRlZQzscs5EjMQAUEIXsFUc+N9GkYlQoQUx5wzGIiYeERiVRn7eHGyZFd755UMFJioClXfbRDZuS2lZbKzA+DXF2kc8iQ0ow3J3HRvz1LsN6luK0ubk0ePs+Y+p4vl8YMP34FptXPjxt27d1DH1PdXXnwhV/7+4yd9HOdXbyjGRP3i5uzlT71glUPyF89OTw7PvasXewtysOk6ZNYxv/WV7x8/OL998/adV1+IolQ7QNAhw3r48m//6TTs/uyv/KKyeGIGgyF3Z2snzcGta1FHy0IuyJg05mbE07fvP/nRe0S2+8LO5NZMQWSMHmg4PZ80wbdTBaZ2Mp+2Wcdh7GMcs5kYam98eP74T/9k9fBJW6uUrISpqDUhAFDKkmKsQkU556x1XQu40cLSV69/7tO/+wf/rErn8xv7ZxfjMsWhy6JFhgQAEBUiBDAgNIG16H6oiEdJg6IoiqmQsYEAKjICIBmaikoEKC4AY66MHEIGIPZTU7WSxTZlQgMGVQUdxwiouy2ufvCVL7/31o1PfuqFn/65PG1lFFwldoGzr+sg3kGtaxkYCbO1oXYK1KU8bOodP7taV3ttDioe2U1YQncxEPsQyLup5J6U+tUS0ugIeVo1u0EX9di63F6/+zP/weroaHP47Wm9/sjVOo7126Qn62RKjqm2EulhQE6KSgho7IwIs0UwD9lM1EwEGUwdZujXISAbMDKSZoM4SMg+JU0OqmkLMVEceJ3SqHEZbRktM4hD5zAEnvhkibqIo/iqxjZ71TFl7dO4HoZsM6RxsbncGhhuJyjTLV/JAI3IJCuX4ImWtiYo7GQVyKaCAGhGIgalN8hsi0QBRYYSx0WG4o9AM2KiwFR7Yo9qmlQBVU0ZyjRFaAAinhhyJtOqqo6erly7uOLqqY5dd1bVOCbr15oGm7TTNz/+kZu3b40xyThO6uk4btiziu4t9rKpZBFTXzlAREeVr1C1rqYAMvYrJmrbRY6KBH3cGIBltYxNaFMeBWLlK3K0XK2YDUtw2OWL7ryuJhVVKQkYzeczXznR7Ml5XyEUUCObiRl6CmKioAACasxchsOSTSuSDZPDLZvGzJSYywhYmE0AZAgqxbmhiFSarT37lLOYXDbNYFELCErGBS8TQoBEIhkLELcccN6yrpFIwciACVWViGFrXUG1rWwhWn6dEl1TBCrcnEKj2mpIUDxJJVgDpkJMRXwENEJiqkoKz4UaERDz9Zs3H0l3vvzBbqOTqtKcPVvfCbkqiYhKSeDlUR06SUJBGJEYSSnGhMy1x/HwkNXfnNxaj222dTPJpLbpx24cNAsYZBErm/BsJeJa+usJMaWUk1zWOgAgZslEgMwGuO0LK/x5LATwkvLbEp1L65z8G8CQlSr5soYx4vKKDghgJQ0giMWxJQV5ZFbgR9uMkm2dX0jIIoKEBc4DqAgMQKLChIaYsygoky+49OL/ItzmQkFVNQORbh1OYAA5JzBUAyYyEWIk4iyCyNvWPCIsfWdqhVVS7ABZjYiISVWKDFHQOqIimsEMAYOnAufOOaGJ5wpyfvft9+vZfLNck3f3Hh2/c/+onu70KS37dVyLb/1m1ZmkB4+OJeFIYKNC1oCYEIZhjH2qajMiQBJJSEylvAxRJSEys5OcSxwWiWWbPgMoiHfcmtzgstTPYFtJR0SXLiG3DfyZqSkXVdFKhVxRlstfAiZCtMWq2KWxDgwYWTSbydZ0hEBMKlIMSgUXtYXKA2yRYQTEXIoAgMkK9YUKHknVDBkQkNjllBUNmS/TYwhAVm4RlwhRK9pTud4lM7No0fWosAUuxXojZlDNefS+ISMTLSiDLSPp/z+AZgqGlox8qAOOYq+9/pmPvPYpG46ryk2mbR5ENBJ6BtIk3nkQ7ZbLnb39o8PjWVMjB2YFD/1mBI3I5jCcrtc78ztX9l87Onw3BO3WJwfXqycP35cRdBheeP75ps4fHp70kpOzcRhTzuScxFS3jfOhyvXujdvny/XJ/Xu7i8Xkyq3NbLp35wDzsj97Z3r9JQOnZpINEWmx//P/4a+99+dvf+e33nr+c6+CaX//+Nm6eQxu+uJzR+enLlkax0XD83q49/7JR/7+/+Ssa+X2cyf/1T9vZnO4eps85zGzc+WOBu7g7us/d3x8CmAYEs3bbtrWr9w+uDY7/954fnyax9Mnb3/3a+vlr/6tnX/4j/7hf/6f/+M2VLVnsyiybuumYTrqO3HN+dpf29m7Oo2rvlerXXC9cjI7Xycx5LgZ9zw9Cc/ffmHn+btYhXf+7I/zptufVsvVyN5vNpL6syR1bjb7u/vTiV9TirlaD/Yrf+Vv5G5DTnb3Fk/ixvxivns9zKZSNeIrJu98jYiE7HxFW9cvMfrtzQOhcHNVFMkFZlExENqWobKCGoBjZmYkYsdjPxpEKCxGIsnmPEkuArQQITIjOwSCUlPCqCpkgZi3jZRkonEcukFGNkxdv1webbqL87PlZrWMcehW63EY1CSnBAaaBJDEBMSYCQBcCFWomF3b1s6z9+y9z3FEkEm7uHb1DteLarZ/cfgQMgybgVqH3roLe3b/4fcy/NW/9itptf7O177/C3/v77/42ty5r12+GCEYVbVfdx0hkrM3P/Xaj79379rOradPnybJf/79b7rKHGjfn0sGNOcCGemgmzHGdjKt0XQcjIInXuzp8kl35co0Oh4uVgev3majDMiI635k50cjdkEglsMAoqXXLMfM6JmAmC59owJg/TgwE4J4Js9kWUFFJHrPZuaDUzFiHmNCNO8YwG/x5IAsoNv6BldqEcq2iLfEe2FPAGToqWDw1DCrZiUxMlQFByQxm0XnnFqyuEGR1A1Ue/J1Rh3SKtDEeXexPp3Od/uhn+20wDFUVAVvWUDiuD7z9RUNwQzL93PKEMe0XuvmhGDUPGrGoR/qpsqCzoWcc5gvVsM4ayYCtXM1waBj17ST2SxYDld33njv27/XUrM7aTvu2zZ44InnnPKQrXG0MeGJ2w2hS8vXP/Xxt773Ts75wb17z9FNhkLYyaFhg2HIm7bZ2Wnri6NHznGMIzkYlqNTl9J48/lXMUaazRe3r731x9+AOMxmi3vvPNy70tWVr0OY39pfdvlilSyCGonZZG8v6ljtTACcb4Oe63rYLNqZQ/TEw2bdx4gUJjvN2A/YTClQt9xwQPRE5IG0Dn6zWu41EzM+e3jRzHCz6dWRa0IclVJWzc55YL+/WBw9Oo4xzSYhDRAmdRoG57xF14bpn3/92ZTchKtn54enF+cndZc3m2pMjXO7ewsYVk+Ots+CYvrLUUxAZYs69LXPxClnXW7aSV2F0A+DihCTmfRjcsyIkrJ41xBWOZ+WqkLK6gOmFFs3FYgIAo7z0BdbvCeOOQf2tXOO0AiUiBj6LgU3aeqZUsKc5pOGwCLojYPdZyfLOG72rlw97fPubO9k+RCEunVPvp60V37ms18Yjx9Zv7m6d/X9R0ePT/kzv/TxMT46PXkCqpKzqiiAZZEc0XnJ0XJaTA6wwZTHUc3M5TTkcZi04WBvX0V+8Nb91GX2ITSEAKLGLqjIOMSrk8UnXn6tcoSI7IJnL5rNSAEIG1QVzW3dDmNvjokRtj7zjGQpJTMDJVD0Pqj0YMbsTAA5OF8TMTL7BjULa51SASuSGTqmksrxDIRb17Njn9PIwaXYMwTCZCgkBT0+lm9vps7XZpnNI/tAkI2vzu/eZnf0bPnlb37LM5WxiB0VUkKBcTITMRTWqip4j57IOVYQR8SOvXeq4BxlkbbyniE4GpWTeM5IDIRccJoMpmCEwRCs4AHEcoHOIAoYgXgOtu39VUQFxmIqEMiiObCTKGrmPMY+pijb+gRER9TUPmdl54L3cYwIkFO+GDei5gi89xRzaD2zS/2ogJ7ZjAwdEsClq2j3yr5kUNT5Ytfl4eQU/GT22t2Xnn14bzZZuIqX3enq9NBz07T+5Oiwnmgf+7Mf/BAPjwB49+qV6d2XhpjBeL64sruzGzcXuZHZ7l4zCaPp5mw4uvc0L+Ot1180wvLm7iif3//w8McftOg+9qk3XFuJdwY0bnpI+OTt+8vj5Uc//6af1X3KjKAy6iibs/X1m7eURSG7qkLitO5qqHmIh+8/kN5oWk12djQZM8U+enM0MFK1iWeTnV3nLaNtNiMk8WFGQnZx9uirX/7wS1+pxfngrIJcZhgVE1UGAAElRcccSpcyAYBFDM3TNHz9S//tYAPnRID9MCA5AMmSvHfouMgIplDmlww6vzJHRiJTLiWBQESMZFm2SYiiVWwLjBHZqwmY5WJz0Jy6I2av5sHEciRidj6DIfshj4QWSCYeZLi4/6UvPf7md+/+zM/uvP7Jmhk8+ikZa7depqPNrJmuV+eqVtUzAvSBqp0wudHiVHqXjEnHjCuRZWYLJqAq5HpJqd90TN57poZmB9cTrPK4nu4cbMTC1Tuf+OV/9Pbv/T/OHnw1DeuXd2dO3L2aTjeSzUSsDs6yZLHa+wSYVAmNTBGYVUv7kSfnLJAaZGcJkkUFASZgzIKCDoUQnERvFxHjZlgvUaqxgzigjV6iiQh4pNp5Ry4nGjJkFo2gFsckm87GKMnGtElJ9mbzyzeisu0ukHsqEF8wKy1loIqFkGqogGVBrUiFm4oEUGA4iApQwjlQBl6ln9CSiVBMBSA4IrYQgD2DYQUcVQzIiBwiApMVEG1Q8kL69MKu3fqpk4dvI643m34ce09NE8LFabe3t/fm5z652JmPMXlPFdfe+5S7Jswchn7sMlrlgzNwzAjb2qGspfg9197HpMO4STGqiQthjGkYRjJGJSYOIUiSnGPTsMdqtV5ucucdO9+aC32WfuwmzaJtKsFMSIE9YkmQKGFJi7ikCZF5229diop0i4LbCgnFdES0zdZwCSiBKaABEhHKllSCYFsfSjHBICpvif7FALJtuyoeFrZiIbIsiYhLAxeVzJRkQCwIU1UDK71whltGMgKgimw9Lli0EkK9/IMtuF2KAFj4zYRQeuVVBcmZQSH7lsoxRGTkmBOzY6yT9M2svf7iK08+HPvN49bHMZ6gS5Q9KgiN8/mioxS7xIZ1PQNMm34ZmEG1rhYgIqDBITgbj58uOEz3bp8MoXYpdcMk1Pkc1EZTi0Mse5rSjcWE2ay0hDlv4Dg027ob27q6sCzSACnn6JhsGxZjAkAr8KLCJyqJQQZQES0qA5gClD6yLQS6KDWIZJYBt+qdimJB7agyuQInLnKhFcYwom2pQ7ztpAcsfA/c1pnh9poEBCMTUdRth+Q2PLj9xohcwNxMbstrRjJRQSlaVMlhmWxtSlTShGaElE3QtvksRMKCOiJQtS3W1kBFx6Sm2XFAAgB21L71g29W9UTFLs42g9g33vowUXju6pXz9aqqQiCta0R1j4+OP9t89tH7T3Z3eb6YrjdDM1tE4xw3koRq0kLQQAfFwgiFnIWmJpa3/04ooqC4vWuRITGo/RuUMxcmLxoAA2kWYofb+r8ipRlYuRCEmPByUrYSZyMkclvUNIBqYiIEFLAssQgyBVBdhDPadg4hqCATAG3rBa0AiSCnWO52iAREACJyyTsXBTAkNMkFE4bos2Q0U5FtU8ClEEmIWgjCQJa1eJEM2DmnmgkJDKy0kRJLzkieEEwzIpKjnBIxIGJOW3jlv+UqYi44OslATHvT/Z/+Cz+7OX/S1oTAQx+ZfdfHCbnlajWkVE9m603fNs3JxfFkMfXkhiGncXNxcc5ULXb3To5PfY2pH69fff7Z6TMT6FZnDP366cn5k/uhmd842J9Pq/fuPTzZnAskT2hADXsDa2dtLzLmwVV8unwGXf8rP/ezi6o6fPjg5rVPDGej+Wbn9mvACCDIHkMlxFQ1z33uF955f3Xrszy/3X7/j3748MHy9hfflI7vP7g/X+yQM2qbTR4evPWjxQsftTfv2kl3np7euHl9AmM2NXZggoQqCo7Vql/7x//FBz/+3XuHPwwzlaafX5s+OLo3370RZrv333nvYL/psL/28sFXvv3tT/3SX33hpc+c3HtLhi5Dh4R9ymCVQN316f0PT85Olx9/8xYdHz87O5pV9WYtMaKxO1l2V3b9GtefuPPZe+8/PNgLTTP9zKc+cXj/xycXx5ZkOeQwmSaRh0+Xs+euw9HpwdXFtes3fvDuyWLS+ABq1XRaPzzdmJtcufniZLGIoIrkfUVEpeWPmbcVeVuBgLTc/JVVjRGRC5hGwQyIRbXscIo3kglNLafkgQy3LTi+8uUGImqgwKZxvRz6DhG71XKzWac49JsL05jGLo1pTHlzscwS+83Ssarp5mIlBsNmNYxjqEIaRSW307aQFOsmAGDTBFBzlYeUVcQ7N46jmY+DpJhWAEkkjT0Hl4boquA5qGgzm71y96MvPveaC0EBjy5OU4qAzlXu6OLoj7/+9Zu3P/nv/cP/6Ct//t5ydSGX44EPVFd17uVgb55SkpTf//CdusHRnt18aXF+uFxuMiFjFYb+QjQwOnKGzit49LIaNlCAdpmWh+vr16eH67NZO++sdsGHtZu2jGDHF2eeWRGrNmSk4Oo8Rk8EnlUSOjQ2LGwoQwR13otE9H6waE7VsppVGJJkXzlkYweghgSSEzkUSdyguGxZCD2SAiOCkrOsicEVTBoSGxmYFvabqZRqCjJTyIxgzogJCZ33SYlKg6ZamIaU2HIKRENK4p2oYmgqM0TMZj7sZK0ZB5XRyTp3S59yt75gXGfpzLs+Ze98ThIc9RJHs72duYy8M70RqqYfO6YQu6SayCiNmbJ6dIoKmLp0jE5VSBldXfVLizLdmV91iYZxNWnDxpFqEM5jjstxfT4ur9y4enzWYZaD2e7D9z9g0xTT9f3d8aIzlVBzUrWYqwCTqk7dRrQHpFFyv1xNq8qD0xFJasrmCLz09370nfluZZkIsDbwwV8s17tX9tV7Xzm2lLOgq1Lf53Gj2YD16o2by7OT1jXKoad6tV7teNdvxn4Y5ou2bQitUlDMeT5r0SESKSQlPD1de9ds4upgchCHNKQcJWegiW/j8vT2tRvvvv+93d09M7dZdpIlyXD7zkfuP3hoMQ1958At+3PANI5467kbb73zeH5wd6EWV/3+zvzi0VHlXDYlsAfvPShXAXk2QiNUQCCom+AbX9YvYTpR1X49VG1dEY5dj0AGLCpJcnk6Jk0IYMQcvJKxiKpO2mkcR9ExEK3WpyG0agYCmoXRTZr26Piwms6yZokDkg/sOktDTk3rNuuNRxpzNLbBRl/jfrM7mYZ1TVFWofKARBzOR3nh2ougNiDM2/mDx6cfPDx79dOfsXDy/ofvH6/Phpz6IRKbD1WS0QwcIjr2zSSNaZCMKHEUSQqU9ndn853Fo8fHq+WyrkvET+KYi68cwDzxtK4+++prTVNnVQZyoRQVu8KwVEllQhATU0UjAEySkCDLoJoNFZ0zJsFiXy8UArRsgmnMCTJ454Y8IlhWULOqarNKMgxVm4bBMUFWgUhEBmoiqEbADr1oFjFAVNDgmxgTUDGrjwapZOOzqom6wHnMy/PVR1979cvf/BYRqinx9g0ZAZCY0QhRZYs49M7XLphlBrVs6Kq6qnOKhuyQRAyobL+JKCxVnbFX5eBSKoh0MiOmoCZZIjtHzqkmpmAi3nlHlAZMOvraS85JovOEZjFHM/EekYEMTYmIiQu0A4ZhAKIrV/aeHZ+MaQNZ1SAnURFVQHLrrp81AdTiENsk2cw5R8wpJ1GLqv/Wdh3ywOt1RxNPTrrl8uLo2Gk1LLvJzl4NcHp8Tja5fv1uFcLq4vjGrcb5EEfdbaXfdF3fHaf87OHxxVnXzmY7V66ue5YYb3/ktcXM5ZyG9frk8FAlzvYm1U6NDQ1jpEDnp8f3f/TW+fHJjRu3brzxqlVORQHVkRu6zbe/+f1qZ/7qz382kVnScvSXq4vFlT1qKFvMGRjRsgRqXcdvfeVrh0+ehUWzc3Mv5WznEXy32N/DKIFaDlXrm6RjTCkDivlAUx7S23/4e+9++bfrrHMKRiRgEpOYoVHwAVkFDEobjrEAe8CCzQHmnVvXzi4ePnr45GBnElwwNRUgpsm0zqPLIlSiK2wqilD6uV2/jGRsCRAACeTS8sBExmiaQUpRsxGRISUAINYcidkATRKZMDE5B4LgQFViGgBN40AGRJSToloV3B5oXj/74Hd+ffPl3/zYz/3y7KU3hx4AXOOq+bSRIVaVD5V3TcUV1XsVzRzNQqIycFBOva4zqmPnmTCNQ9bBMLlW67nfub6jXlWNoQlUKejcY5bUHDx37SO/fPzkvSxPSZaLGifij7OO0QyQ0YgxeC8KDlGQgEzFEBhIDTJ7CpUDUym5FUBAIMttaDyBcLIUciZSYkIYomaIsY6DbpYjETlmKE6eln1g7QdU0aJ3IwABOPNTL12VzjZMFSQ7f/DsckhWREAiLOaGwuhVw1w402gKDsioTDFQ8ji5wJvBwHLZKZqBaCFUAZohaJlMxczIgMgIhdHVzgfPLjhCUWGVWAAwAGzIyACUFYeUVqkNO6/ff/YI00nDAhIX09oBDV1/sHfl9Y9/4uD61TQOKFBPas2y7E4Xsx0F28iG0YJrZ5N5TL1KaXaDYRzRLEYhRDDrU2QiAvA+iMCm2xCTc05SzqaKGMi5AOT8ydG5qExmrWRFc2NMgrCzu1tRUMiohuQMWDUhg2NXFoxWWO0mhfgGW7YL2DZuBnRZdLVd62/HznJtbKuR1LKCMPlLXYmoQB5AEMGVbe4WN1K+stiTwABUlIjVwAwcOTXZklmA4JJDBHCpU1lJffK2HKn8MGWxJInYIVJpFwfLhVdVsmY/8WUIGCFDIcUAFgw2AYOJGYgKcwVgxthMWsl5Qnzj1keXT6bd6n7FiJabOgxp06e1twDepXxR++l5NyALB09mjGY2MKPlQhe2RU2npw+M2M1ubjIJUkXjZD7DjaNxBGJRkZwLP3w7AYNpygQYwHLfb11FdpnukVIDty2lKm6pcoB0S4chKzOWgWghAGJpqhIRAEUCQlbNZiZbAwsiODUVE0QsnxsgEKGBluwYEQGAWjmSwORM5RIzZGrqmETFzLhcqqCIqKqFVawFflTMabC1OpXdfpH/CmvIVFSUnSvthkScczYroQdQyIhU9CZELsJi4S6XVHih2YpocbugQbbSkaZmY06bOsxO1hdJra7Co0dPhpg+PL7YGNTNdEjjZrnxO+3+zkJ6cUB3rl9/5917XuqLJ4eBdnExGfsBXGibuhs2VTsrqDE0IMQieEnpBCi9ZuWOBUBchp2tD8hUtjYwKKwZA8TSNI+ICHIZ9itSixC5kqS0IhIhEPN2Y0GopiqZGC+FUrSSJURSyaWcsKTYsmipZzPNSI5dyBLLfwXajt7l6JsZM0tOBf1upQSgtAoSIXJOkdgZQhYpZkJVpRL5JNxqguX6KplEZoZQPG4AW7GqWEG00PHRI1E2zTkhE7FDKqakjFQysP+2q0hBRIJzPnC3Hv/O3/0fX78+k3HYP5j2682QR5YBSELlTWESfDMNvqrPT9faRUkKk9mg8da1O+dHpwp00UmXUkCNXX7y5F6YVdDabL7z53/yPY5D7aa3b92ZEJ6vTsY8JoIElnrRmDVH8MG5yuHQ9yMR9wPvNTv/4H/2jw7v31+4vZpmXX6Ei7lFB1whFPMKIOLYx83Dh+nh/WbsmuXwB7/9r5+b7h7++dd/8J37btN98e/99ZOLYRJ8RrzRLoYnj+LTt84en9y9/lmcTR7/8I9vfuGnxZIhlrucACqZXzS33/giT93wlUfnm+HGtFqAf/nWbr1a/O7Xnn7yM7/44IN37syrP/vyN/6srn7tH/zj//f/7X8N4+msmT96+Cyn4eHpKXMVKMQBz0He/vC48biYT19/8dZ3vn9/OJFNlwH01Rc/8fTo7W9+88+8uOtXb2WVZmdxg6/dsJl89wGciozWd9FYHj45vHPryvnQTzw6HttmqnGDmVZrxbC/e+16mMyy92rW+Co4XxBVTMREBqVKopzraIYAmEyJEUGRyBSYLGehIpGbmggSp76PMjJbHNM4jjoOKfUpDXkchq7POa3Xq5PHj4d+ncduHIdQhTQM3aZDVJDSD4Eqxp4lF5wcMIGaMTtVM8tVxTllMwh1QNBxjFVwJqhZosayilYRRBz6QURSHNCUCJ33psZ1jUx1qMGAiMnj2Pff+sZX33vvnTc/8tEbz72gPpyfHtaofpdPnq4PP3hvuOhfeOXzr3z07ve/9BvzK9s0Zgh+TGPfrWfTRRPCpsuS/bhZ1YhJRldhBc1q0J2DK72qizzx1SYur17fi31cD1kwB22yuLadnhyefurO337z1vq//rNfP3j1VtPt/srn/9Kf/et/2db1fruz7PrUxyuecBio8jokrXSMucS6hz6NA4wjhGAqhsHYBQVKIo2r2FWx69TM1z6pEFVVqFTGLELeA4F0m7qqVLLEjM4VL2zO4jmogaQcKDBRMZFKToCAZFz2KQVnTkRkAoVAVfqIiQHikEERzCuQY7MUmSo0cuQzYBZlNctaNyHmzGQwrNbdeb24bZNdlLPVxdJVe1jtVa7KQxTJvYBvJvUkxHjWkIO4hJDIeYi0Xo1XDnZ5iBxqcM5yNIPM0RNlSs28cQQ++FUv1f5d1vTsR192kEJFg6TNatA0Lvb3j/MmTHbPM0qiyofz077HYTZZUE1qBuBGMUTedOP+3my+d+Xi+Lziar1eA8GoqQo1uxZUprPdvZ0WTFZnq3qAcRhHgyvXF1U9WR+ulMYrN/acrwL689WZAbDzV2/eevLejwgCWtIuYZ/SpnfoDk9Ob+zeapp6f+fg5MkzNFqvBiVOfQ7C04PpGMeAgSqfk6z7tXfk64AxNYvqqN/s7+3rhek6bc5XtJFnF49T7/IC6spFsLOun125nqvgGt+vhrauZpOdR0/uVy3ffO5WL8ef+pnbP7qHx92wmFQxiYE3rlbng+MulVUhQKh8iolmE1AhNWfYLS+m08XN61cOnx4hu53dhcToGBOxilaVz951OddVFcFykpSTWk7A6FjNPHDtXYprS8mzt5jrSdWNCYDSOBI5RGb2IoKos7bOGl0FxAnMoYWy961CxWqWpW4aVRi6uJhOV+uj6Xx+fr4mg53J9Uphc3HRVLN11LfuPTu4dXdnrzk+edb3KyQQyaJqoDomMAiuMmDnMYOux56MXOWryinAwZX9i/PNO997b1JNGGomDyCQc9X4HA0zeKIp8+c/+om2cgbqsCJkUOTKZ1MihyoAQKBQ4CrFiltXKWkgD6oEhIQpj+RQJZGgqjpHiGwSwRw7r8qkXseBEBvX5JwcQ8YkFsEYOAqiq13Xrat6frm/JctgRoqxciHlJJiAglkiqs3UNGXQKswQMKXsEMGIDKvad92AYGoIBowERCoGBCG4MmI4xypqBiEEJGCjwI4dex/sUkDw7CsPopmRUxoaP1dDs5QkOzP0wGyaBY2LV4HZIwCRlQ11TJ33PlsulFsTVFVPwdSJDsy+rEzRwLEruQsx6Po4nzfeu1HkaHmet2RIG/oBwKqKAXgymfbjUFV+AK1dNaqYmA9eTR17UwCEnHNdhXIVbFbnnmtGPnn85Pjevb3dm+QnCde3rt7cnJ5Nd6imG7efu/X06cO9/YOdK3swLB+8/b6A7E2rawyrftwMQ6yTw/7Ruz96+P4PuyR/9LU/+sgbL/zlv/wXd2aT7KezqzTdnQvq2Mvm7KINePHOg8c/vsdgn/zZz0yuLAwxx8jgMcvy0ZOz09Wbn/v03q0rImMgrMkfHp1oj5PbU/BqitxUYGojYMKn3/3h4btvh5awwYu8vuYPVAb2Luugkl3rjazPY9To25rEt6l/+o2vfOO3f4PX40IB0IklJFEg74PmBIZiAKaBQXIEIc8OTOIwhqbOiqqIk/lmGS3G48fx4Oqur9vF7s7Z4QkSucCUsdsMjOwrz2QmEQEmTXsw3zE0KjEBclmTIpoKmZmKA0MCMmBEMCUTM2e+Aix8FgGiQIyWEwgSqKEAZxXvHUhkJCbOgECut2RkYUJe2XWbb/13/2L+wvc+92v/I6qmF++/2wdpp7ODl18kBzH21ZXg5yFj7mMPgDiMsRs9eUNnZJIHMZEUOeBkt253D0aCVLucBgdx0rY5DmkzOFAME6Gwf/cjdz/58+/9+HefHr47UJPRkVMCGyP2OaOgJ3XsU06C6Nihs2xijtFxcmY1O1NvDtmYAMEBZMhCPDaovnFJPaMjTJDSqtuMGYw8tR7EhiGKCDsKVSAiJnAGUUYiMbSYxIj9ThsQuAnnp4ON8eTp6XaF7AjMTLbvBaUDl4xQEZQMyzkBaIoIBKiwHR1NU9GC1ESA1AzQZzN3WbUOhFaG2EL8JTNGcATOk/MECOCYjERjKVzTBFwpBiTOOTT+ztnTxz6ftaxs4EPwdejW8dqNa69/7LOTvZ1x6GvmMJu7QIlj3baVJyOEhItqN+Y8pg5MmJDZIeOoo/ONgd90Y85qgFmM0cmg637tgnfeO+SUhworIjSJMY3S94SWzJJSElUVYO/Q1X4Kmkwt+BqRgSADMXtGU0vFjpNViJ2hFOmPkEUFiaC0zF1CgrfCDBMxXsZNSsgLAICAyQC0eH+BiBS3DpUsoyOHQGp6aQopMUEzMyRGZAJDMFHFgpUCLE2dTLwlSyCoanGk/qSy3TneIs4Rmd2WpwuFTVMyywilwxu3nXcEW5UQkBiplGOKCQLitgsKsISqyDnHonEyW5g8vxxy1gu2s27cAFFw9enJo0k7n833xiFPdvcPj+9PQmhCI5rGOCKYr2pj18XRnM4gnz99b5p1Z/f5Rx0mGEwwhIrQeecNTURUMhmCZiQgQgzaTKaTwPV2a1B+MlBRwlLlBgCEoCpSnDZWgE8AaKUlHRTB+UqLMCHZCmIcyEyl2Dou2wJ/YtkzU0InkkotPSCZFpGOijJfFCQ1uewsAyTSYm7QrS+sxBJVpHg2wUo1mGx1RhWFkijHUplXRA7Jadu8zgUIW3IFggiElKVAbAutHgDoJ1CeIgRCOe7bj8tUkpg5romYjJCd2kiuctyenR5NFgeHjz7sx406fP/xs9s3X3VV3dZIuqNmfTIU9cQ5QR+7tOy/8NmXHh8+EMO2bjuNwfsoWRS882CYVRgyIjl2WQUAGPky7CqqsHVJABWy9eVnXpDPRRsFUVPNRIRcSCDK5IhckZFhmwZE1W3HHJKWZVs5nYvVzFQASzUUIHK5frVQovAyb83OxEQEEJFcOZUsq2FJpBXzmEkWRDKV4qQGK1M7qWYDdY713/SNGIG7xE8X17kzA4mpZANLj3bWbFYoW1J+HwAsl3+Ma++mYEZUCicuIbYGqpnZm/27rCIg886xcYzyuU988ea1RdJuOnFnZ2eeoFt108Xkyu7+2eqsbqZ1TadnR4udRXDEWAcXSPNiNmXvu2F1vtkcL09feP5GqCiNMQ2rqtahH370wSOLcf/KtYO9HXRV3637i6USD2LoK08wJgnQKKFYTimbkZnfqWd+1T167+FsNtk8vPfg/gd7L0yHlfjpFQADo23rH+q08V/9V/9Kfvzw4lkvt5vnX7y7OTydXfRXJs2Pf/P3b7bu4Kd+ejOt/UuvHeDs9OGzOku9Pj36jX924+pLB7c/ljbZIHNVy5gwsMboqhoYw2Tn7pu/MKyfPrz3g5tXrqWV7PTds83Fmx979YePn1zdvfGVL//JS3c+/v3vf+WFG9c+cefjP/7ub7FrZpVb5SGQhdpP6+ZiueklP35yvjNtb+zPDh93O83eUK3Yw2Tu7z34YHW+ljzu7+xxZVcWC53cfOeHxwZ248a+wYVGGEV6pafnnbjV825nHDa7811lr8ybztr57rU7r2CowDkDqIldET4BCam0WWORBEwVEBGj5HJ+VqjjZjl24/ri2TguY98PfT90axWN3WYUW5+f5zjUrY/DKCqxH8dNN5k3gNptOssa2iqNWW3r60PLRBJqYnKSNdS+FCuwYxEZx1w8tXHMGCNXbFsMNsUhqSq0lZFD58cIVd2QL+ZcUhDnXMpZJYJjR5BTRmMokraCoYUQzBQMnPdqFq37/T/8rb/wU1+4fe2Wy+N6qZbX0ymLxKPHb//mf/WfvfbxL95+6UWqh8tHgh5cXZzoMKt8Vq3m0xHz7u5Cx3G17tfL8ytV2J9c2ZxfMNb1YjJcnLY1DeszFP2Zz3/q0bOTdNqdnel+uEbz+i9+7mfPj7/5jft7T88vLh4dvr23s7s7BxNHfu/KPK4779yqS0Q0aUPdtsuLzaRpjWwjOF1UdRNq5pSEHQVUY2Biz4xEwU8cVwCY+2GzzHaADt04ZPbsmMezoV7M6jDt+qFbZl9zO21NyZM3I+e8KTIXDyS44JSS8ySWiL2obNlnaIZKAZnZ1EwSEBMjQYhZMpCorLsuorKNdYNxOHcKNkSEoAYpjjO0Lg6RrH7uhXMymLQsB9O9+cYKciL7uhIzYxY1B4yu2VwcZjB1HqliYOIqibD3QN40K2AILXI9am59MAPImQyEKprth/lsVnG3upiF2pbL+aQ1oGo6C/t+1fVNFjS8dvPmN9768f603vQXfRyavemkCkBWTyeNr84vljmL6gAEdesX+5P1s6h5EnN87dZrfX//7PB0db45uLIAdlnw/CLno8fsKu181bIpD+ux9TVCjiAXy6NuOLcDd7gAAQAASURBVJ3ArirdunPz/GJVu3pMeXd/f15XddVcnJ+CxiyZ2aUkgHnolt1Ks6AMSXtJmqbBwWDL5VnbNA+P7w1p1BFMFVEJcb6YnB1fKLnRIG4GRIg5aopHT+67pA1Rjun05IiYJ1M6fHY8vwKPjx7lOA9w/L/6j/9P/8n/4j9psNWR6ir0cbnY3UIcEYwZRNUMYlIVrifN06fPnAu7+7tnZ8vz84s61K13vgoppyjKPlAIhoqOISs7MmEAUDDnfeoTkCS1qm1zLmsuGVLX+Dk1VVQrVvx6MunWF+wwRwVQdg5NuvVFYLdaX8ymc9DRFHJOoaqH1dohS8Ku65z3EmESAqZVaMmAvvaNt5rm6vUX7uS0enb/3hD75bJLmohRszmAnMwQTNL5almHuvXBuZBi6vsOHTx+sJaE1+YzUEhJNeccs3NsYg6JyKaEf+GNN9rgMigqsaOcBZFzSr4KqKqqznkEimlkdN5VYxwdVtnEgwcBUGZnYxZmB2ZUQjYICjnnSG6CmYydSWb2oCC5R4IomckhWGEtJhgqnrJ3zGEYY+UqoCymxB4BRSnniASiERkBGdAQQwAmRTGrK28aDUEBfFvtVdDUzogQQs4KSt4gONc2fj30ppSSIID3PqYUHM8mbUoxeI8A3Wbd1tO6qRkMROqqIgRUcGisKTACOjU1A3LByBA8MIFlMwrM3bDxHJgCkSCC6Mi+UjNyXiWZCqKo5YCVQK58PYx9gRsx4qjJBwre5RzTkDfdUNcVjuyZAWEymfRDb4CmqSYDzZv1wBO4OrlqomDgXBj6ThWCd7mMNwAAsDx6Nq8nq6PRcn/j6q766nxcvfra7eOHJ3GIO7uzZm+nW522DbXTfUf10eExUrO4MkFIedMvD5eKtrO/W4VqOmnQadcP77794Z9/9Y9+/7d/+7W7d1658+onf/qT0kNMVQLcrV1+9OjJV7+5evjk9ivPP/fmi0MS4OSDJ4fjef/w3Ufs6lc+9urQbabT2gl1p+v1RXfr1i3zpqAApjmxAnW6uv/o7a98PQSiCekEZ1dnbuZjH+tZm7OOqQeIwKjGlt2E/NF3f/S9f/lPN4+e7RiRZ4zFoWRk4n2NYAyGgJKkkGtUrK5DKvkRIgJCQGBv5NerEYcUHI9JM8NsMnfX/Hq56i6WCOC8swyShRQQCGFs0KouO0FCk5S1NF6ZEeh22gQpHFZ0BIgE6NglKwGHSGSGkBUDghYUsnMCyqbj2DtQA5AUCY18ABVFG1NShcYh5aG7973f/L/+R/O9mx+7+5FbH31jcnuarBt618wrqLgbR1FlV7GhjOitzmPS1BGCoU52J6FdUMUYWAgKNMRRTVniOllGUy8qmtfVdEcao50XqvZqUz9d9blb98oGgNGkIW+i4ziOVKqmBEQUNaMxeee8bz0xoGSHCJpQjBFJ2WMwy1UItaMRsyWQmMyMmbwjI58NJEWywlgBUC2bdwR0GDCrSBdmM5g3BuTQVy4k8OO6C9uZsKzJS+4PNIMibGVuA7vUEi5X6gbbIncEA7StaCCmUOYkMLosZVQpZY5aUoUKSIAOsbTwojksNhVTw5hlJHSIaDImiTlPmK8Om1zlEeLggqIYGsfert184ZOf/TiicyShDnVVjUMfU2L002qSNY1jrF0tlrP2JA4MnfeI0HebrWA0dMSYReoqMLKm3I8b9uKrigByHIL3pluFGhT6bkBwBZQGBEgYqhDYe1JRZRcAUEEcew8ExbKz7X+BMo4SomoupdeOvEJJZiMAEDlVKUAfh+QoiGXc/g9bg1LRV7Vww9EMVUTL2O/QIaCBOOZtyRaamXERAMjlnIvrhMhxsRuowrZuV3VrMXKFtYxYPEVFFygjs1jhKhd+jWkRtohIxArtaHteXGa3XKGrlBEZqZwSiIjkkVlVSuNBNmHvXEDkHcRXzj+8h0ldrWRrZApqlnPMCqCm+WC+v9mszvtN66oKQ7JIRIk9O405otHepD49vm9d37gDqYIA+NpV3oaeDI1rJsS6CoTEnpkBwVxV+zABy5euIiiMYBUhYtFxK7ptLTlGuEX1AwkymampIjo0VDMkktK5hablM8SClC40YgYTAARkMQBkABNT0zKjGwCaSiksAxMAUJPyJ6KqpqUI0kwIWSRtQTylRZ5QLReLk21btPRSKgAw0JI5IoJtCVfB05cLFlS3bh1EMsAy4pWvg/LzbJMqYFv4UZEJlRRE8k+axlJOs6aVvhcZ1+Pq2fGxIL7zwdM27F/fvdbJoJL3d/e7bmiqdr3ZXJyvEaieU5iH3oR9/fb9J3/h47eHfB5T8uxFlBm3EHYwM5UiS1LR2oAYCVltWxavVvzghuwKSAvAdEvvZpFcsnVbsjgYAIiU4kcjJNGMiAClcQwMyuuxMfP2GJWLG1mt5PgUsKQWUUVM0aDgRNTMFE1zJkImLhYkIi6lZ4QgkqEg7HV74pmqlNS26aV7zwyVvZOcRTISbu+lCsDlrg2AmHMm4uI+K8oSFtYRUHmwInmiWkufHZcfmAyk/OQErnhY/x2pKCarAitoM9/7lb/1d/JwYTBGAwDnnZs3ziyddieOqparYeimYU6ZRXU6rb1zfT+SxPc//PGmP7772sf4AxhTXHWdjckRpdTlODTV/MVPfyTFleace2WpXD2D7jyPA4UaolrORI3oIIxNO2P27Ovv//Ctf/J//3/FQWRc/+BLX+rPhi/+D/49qjE0QVNmQFBKMTtH/Xr5/I1b5/XjxXOzyY7PyzPqLngFb9x49a1v33/yJ+/f+bmfHxGe+8KnH/+T/9rlOj0hf9TtHtzAUGuYw2bj5xMFY8eITBxkk6gNlgyxeuVjf/P6nU9d3Z8d/eihXDx7Ye/a5vDsxsG1L//xN//T/83/9rf/2W/Ehx/+4I9/+zOf/RTXR9/5/vfmu4shY51gWle7M46jDX3aXczPT4dJVW2GfPPG9df35u/df2Qp9+vx7DTuXltAhe+/973mtbR39WO3b91594N7wcedhUGE02EE9JLo+OjkznPza9f2T8/ixboHbq699Il2Z4+mUzVDQkeuaJMGyM4hkSIW7qBpKveCEosUSWm9+dGP/uzJvff61TqO65xjcF5ExAAda1JizGpgmhJqVgNLUY0wpjFnIWZ0bouiR0CEmBIlYCJFQTBkAAIRRbq8WRZ8P4CrPGQFIEBl4KqerNfniAxCBOA4iCozC1gbKgQwAcbgqpBT9lyBxYorQgaGdRxC8IRGwIrgKycq4FgJZ/tX3nv7XVj3L79w95Drs7NHOa3nOxMntnnywQMYb935uzfu3tp665IOXWIgNh6XI04DVdaPSRTjxnarHck9yXhxcWZ+qhqdz29+8uP3Hz44PH706PAY4cbq9EfN/OB//r/8333jS//0D77zf/zgh1/7a7/4c7/11ccXxt3pWdMEbngccs5JRJxjduhrVHPEtnttDqJVoNdmV9uqljERuWTAgXHMQPwz+1fYVMa+qhsMbuzGq0h7ezfaCcSx278yWZ2vA/lbV26Nthn7lQ/VGCGCBIkVeRoH9jLZOehXEZFNhZBNksTOozKTgRAqEBE7zNkxecdkxuQzZEc+ohGBc8zeL5fnB551veR0XFebrnvkYTKeP+JqX/euwrqPerY5e9Zcux53rghXaknaHWh3JPaOHKEBovEWzkyZhLDrBl8FsbGaTCPG1K1BVGNmQyInGQM3qjxvbgBiHjpM1gaXbGWMVTvJcUPqDNEsA9eodrY574O/cf3qOMjTJyfdSnav7qOXaochkmsNc469COS9xWS57Asrbr7bzGaVVbQ5Ws73Dm6//vLxo/diXO7v7U4mLVc0blbO+3YxGXtSqXeuvDCOnVlq2gbZGI1VdExXd26fPHliqscXa8sNZlx2m73bO1Ul54+O525ybOPzz9969OShjpokeo9npyeumreNhyy56/YOrj3dbKJCG2q2VFN7sT713nNT9X1fsx82mzD1+3vzo+N1g7Y7a3TsbiyuPHj/sa/Cqh8lQU5pjZY9n57HNz65g/H8f/hrv/D/+S/+M+oykscQfKN99s+/dOtP4OsAsN1tAPgqyKprppV3/tbN60+fPXrh7ou7u4vVZrNeb4gmddNoTCIAETBrhtFA60kTu965qh87VNIETKwSyRjBgG1MgxvdLMz7IdaNNwJSgygoFEIlWXJS58B5RwAefUywXEHjfd1Uow5RIqjOFrur0yViAMsOg6ub2QSZ1qFdfPW7b41gr735klXxg/ffH2J/dHh8cbEODRFzVVegKKxD33nvG18RcBbJshERCjidzDVlUVDTjOK8dwCOAcjnJOyoJvzMax9dTCZRMwoS4Tj0DGTBGXgsEFZgUENAVPKOu6FDEiZzaAyoyCIRQAHUsjITZEVkETUC572qQPGoWx5z9OgUMrsaTEBQNZNHxhrNkzoCz1iJLBXUwELwKUUHDs2j1G7bYSOSB2ZHRKJABqKJnVcDVMtjQu+IaBykbipyPMZYu/Da3RdfeP7mH339D7ctHew1CxGromM3phgcOU8GSs67MPOevCNSYWJmh4ACQ11VgKlgEzy1kgc1CM4BAYiJQlJ0XBkgoCKZSgaDpAOzFxvRlJmJnJgomJkkieRKcw95zzFGQpAsiJgs1xxAYdLUdR3OlysDa6fNMMbCKahDFQCZwHIyNfbejIgcgZbmRyQuz4LZ3vzkwdOTR49vvnD7pB800MuvvLA+3ZyeXCwWV67cuHq2PjZIOwcHk6o6+uBEhub2C6+mCtvgvv+N79b1dHqwO98/6JanwYkMm6Mn48vPXwe+dXy4XF8s/+gPfv973/zj1lWvfuSjBwcHN3cmj3/w/vrhUWvuzU+/7tsazKMPJjkPKS43H759Xwzn+7PJvHVAw9ny/R+8deXgVgg+l4Jy50ggZKZ++d63vluZA4Sji+Wd23dchUMazDSmFFMSS+gc+4YG1IvV7/36/3P11o+n4CY8UUhqhmxITA4k55xTcWMwKig5cpqSEQuyswQiVQhZDMmN0V3Zu3v7+oN3fvwgBH/69LjZid7tLeY7aM4xnZ+cuSoYG5mqZOdB1V57/s3patgsT530ihbH5AIHCoqZTBQQgLICokMroSVgotJZTciEpSy61AdTGeEJzUih7FARAnmCTCKqSuyNBIwMxMyAXIzy/tsf8CC/8Nd/6XtHT9urNw52r9b7s1UePZmoWC/9ag1mSC7HVLW1nzg/r/2sZgcJABg8MQsHhNxrTjkNMbhWBSVn9IExV1faK5/5hJucn/633+s3G9OWgAJYbUaWLReCROUcEWCWiITewGMOFcdN55s6BA85M6JIAhRXGaGxrwRDlsER9X0/DJQlGDgiViAwcOxiGkDR1x7BGM0bVi6krJq1aqc4Dc3eJPeZHJD3FBabc+jO1ltXkQcwLDAXcIDGJrCFrMCWe4+ETIV8SKwWCsffyLQ0kIOaaVkbFEoiGjKVrkQxwMKrUXMADoiBCYiR1BQJnFpFHJUSBFE2nKq00g/d+lHrMTR+VrtxI7nXF1585aU3XnbOIyFvkbQSQiCELDCkIeahrReMPMTeeVe3e2kcCWzoR1NvxmPuiMihc4TOuOv7HBMT176SIWYC70JOYmakmtI4DD0TgHeErEiDpLqqau9I5LILnrdwLYIsmdmrKVNVemm3XNktu5qKY8jACC7nz8JQImQGsKRqhojEJgJFmilwY1N3yVclBPJkVlhEqKZMZABoRkCgaCrEbKoI6qhIc8W9ogZAxISoVGxHhAaFsQI/IVobAGDxmzjnRASBCsuGkBmpoE4LsqhUqgOUTjbVYq8oYFxVZMaSVTPF4rChElxnk+K2SM7x7MoC6c7mMeQepO88GJrzXDvL55tjA6kQm1D1Y29mVeXH1ImoClbcDgJjHohiFWDsjhY7IU1uDzxh1qBah4TEIYRQ+VBXpZCOmMAxEppxVf3EVgSXSPZy4rvSpUWGVvqtyBWIE4Cj8iEYiCYwRQImkiwFakSlhf2SX1M4gFAieFuE1FbGKMGlAhGm0idohrgNJdmWDAUIhLqtLy4i4/bkMbRLOjohiSoVWQQEAbPk4pC6zDOqlZbDrfiLRCil25oIUQs7S0S56BRanDJborkqmMFWy0AkZABIOZFzZkpgwXvIenJ0PI7d46dPu4yrIS0Hf+3Gq+VDJc9jzIh068qr319+fzbdOT8/G8fRt9Xx0Wlor3/5a1//zGs/NZstlv2xJVSFJOoACImIVMzMEEwlb3FKUlLMRszMLucMhGBiWj4cQ3bbTxWEmRAhp1SinXApwSKygZVDVtqBskgJoJkiU8kryiUqhEpy1KyQyrcl4EwBgSWPpkLO+eAgJWRPqKrinDcDBsomJiagpW7ILglxhGRbdxghMBGp5kKGQkIEBlDcqkeybSRBJOcRCNEBGFpi9qoJCVUNROGSlE9EhI3mAcGIXOmoQ2QTM4eXOu+/i7VmZO8RkvzKr/5N1I4ptk2tWff2rp+dPxUSx4zIVb1wVSDtquCJa0VUoPPl2lI6PjsSbz/9Mx//0h9/Mw1pdz61lEPFV3b2nx0+PljsE6LGAYFUEZmd48C8iqfzSQBfn8eNBD+dzKzTlCymQZy+un/3P/7f/8MhizLWV/be+OLn//Rf/lEze3UN3TBGh6HYZjgwMNbV5OVf+Ll/8V/+k//gb/77C17/8T/9frT4i/+H//PTb7/z4OGDG3fa5srB6eHjquLDw/uf/Ot/b5zKwx+8N3v+1c5cotzf//bB9V9Ec2bIBDmN5NiRmVMiVze7WRsI0+ravs0fu+7Eufr02eEbL7zwO7/zL6e7e+7kKdTVn33jq5/45Od/8PaJx3UIfazlfBhWcSz1nt0moublZtVWszHTZD5rXfv4yWFW4CaMhsPIj1O+vpZwuEpdDNxEl6OMo8T53u7heU8y3Lg6DZSW/XjU2e7113evvzJf7BmBGIIhIZWuMfaESAWTBohqGpxHAjVFtHGzibG3HDEND975+rDKwARsmsEYBEzFCJACpBiL1TQN6pyTrH2MkBV9EFUQqCpvTLa1zBmQ96FSVbHI7JgIkcWUkQ1c1hKzpCSJkZBJREWNPUfNhkCe+3GoK5dznEymXIV1v6lrLFwKREJiZM6oAcmS+eCipGwpUOWcM81JtSIvAoAqKauC9+2y1/cerj76qS80x/c+/PBHy+UzyJlJnzz78Ld/95//kv2V7SZ5lYY07OzsN81CMd26Mz+5OFmeHlfVbmwIYobgN8P6YH9/vc79mKpJdXK6CX7n6vWZYSt2Mb8zkSr/l7/3nz559/u37+zf/sQL3/32d/tBq+kVP9s1GZJoCJWIsGczG/tUzyaSouTsPaup6P+Pqj8P2mzLzjqxNey9z/CO35Bz3sw71q3x3hpUQoBKlhAggRgFtLEYmqbddmBw2Mbujg47Orod4QHsP7odHe5u48BgHMZBGxBgJJCQQEOVpJKqVFJV6RZVd8458xvf6Qx777WW/9hvCvrPjMjIL7/3Pfucs571PL9Hx34IqCJW1Y7K9+pYcw5OXeV6Qa7Z1Uzo0AyZfAVzfyjiJvM5g4EeohMFGbuhnrTCBBlg1MNrc1+BD9NhG8lBljzsdtvYTQ6OF4fXhrQduoFUgHMeO8gppz6lXT9MJk3rHJuKxqwkiJrHQXbrtFqlMU6P/Hb7wfb83tVrr643j2fVUd3ONk+ftjPchVl95fWdchy2FjckfhxWiOCMUDRm4XaRk5Al2V4E0WYxp7ZOgHUzPbt4eHX54m7YAaihiifHPpuAgg/TceyXB9f6yzNL/WTWpCpIfeS13+4uCa2qWTFPW7zBizOP82Zx4la+rcwzMj25uPRE6EPTzsfT05rZkJomDOPYDRIFr9y+8vbXf+PjH3n12kdv/dY3T7aOAylWc54dJLnIg+XB3b57E50CIVkYh+28XYa2WV2epRSbhlRttT2/cXQzwWNXhdUueoGj5aGXVPsguxRHPe+3Y7JOUFRVtU9p6qr5/MpmOyQfo8V2Or/sh9U4tNXscHH73u4bLvBm2C78UsC6FHeXsewAVmdrFiXmJAwAZ8/ixXoESsOQRNFy9jXT8vD08uzkXsM6/5t/+yuOfDs/GnfGzk5OV/PD6YMPn+0FU8WcVLPGmBHwcrM9OlxMmvbw4PjDDz68eePWpG7QoB97RUB0zmN2wN7nHE0RrST50Tvf1rOHDx8czI9iMiPVgjCqJwjLUYakceLQZUAy50ByUgTN4oG9d0A27DpAIMS2boesmpEdVYHIQRXCeZbgKwx16iGjqDMX6IP37z86Pf/oJz997di/9Z1v5RxX47AZR19XYJmQwZg8SoqzxTLFmGImgJhiHMeDg/nR8fHJ08td19e1B5Ucs/NBEFXAEXvQmefvfv0TbaijJklK5FLOgE4BvassizlBB8gVAMecjSFCUlamsvEz0awCLnCMI7MXEELOFp13kmTSzvrNJriqH7eoRoih8uMQnWPnXRozOAquyUYGSkiipqY5R+89gBI4NVMTAhQjYKcq3nslRTEyTCJgisiOQXOSnF3wvvKiJpaP5s2glA0OF5PPv/mJo+nB6e6MmBg4JiW0aVMrgCIwE3FJiqCYTkLNTNnEqTryWdDAnEPyvO4207Yd80iGVeUyasBgJgSaZAxu9jyuLwXrH/xkiDtAqHwzxJ4YiFR0RKSskZFFDZGrEPoUk2QRif0YyBvC0WLWVnW3Hdo2jEnGMWl5BGUVId+4UDU5xZhi38e6CmY2DDtG8i4wg5nmvN8kjzo0V6e3Du48efisXly5c+eV1en5GPvjW9eXV4/OzlemduXacb+z80froZfFjaWbocbh0fuPGp4cv/jC8uaVzWYtcsHsABsYtlPvm7a9e+3lDIaaLs+f5m7bnTz62te+/Db5iycXvg5U8W987avu+Nry6g0/acFUc9xsL7COr3/qdQ4qfddtdmfPns4O5ge3j0dOeUxV22pSbwHPtg9//a202Ry8sHi8Op1dO8SjCdYIWTXnvu8MGLHJ0uSt/eY/+gcPv/nr001cuMoBKIgkISRF6lVGkeAdZRTVqqqGfkvkjHypYFJDZ2g5EYFlMAe+nm1FFsuDBpENmhDyOG5XK1RwxJNmSsd+1+3GXVciSoyq5n7wu37En1xqHNGKPmUlbcREEmMB+ZSkgCPCUjKkGY1MEvLexwDIZR5GNZAEOSNkMhFE5x04r5Kfb/TVSl91MkkIqJXXZorvnrz/1/+v/+c/9R/+R+N2PL/3oNpMU+VnB5PcD5JMc0JP3MLkyryeNdzWyhiLGpHVA4qR9FnVdMxD1zX1JCMopGY+Aw8RUr8zwMP26LPr7cFq128ULiQ1tQvEJgaMkIFM0hCJMKZMzGggasnIt15VRyFFImJyNQY1P8a8ceiVKI8II+cBdjvMELICq6Gk3GeLQkBASN4JgHNOVdUyoZDDIdhkUtd5N65WqRtC5Z1axaNb7ucCQyAERwxmqKCGVup6SmcqQKm7BQBHZAYK6ACzmkFRCoCIpTRpWaHdFpQ9mEG2whAxAkVAZ6CazbR4BNSE0Ry5bCln2GaXccZ8BLnXeDGt4yR4G7ONlJPcefm1j33q41ghoDlE79mTUxNidkwQBRgm1ZzAIeF0MiOCGAfVMUP2TQ0CEnPVNAiaxpxir3kUiWLKNCF0gpmRclZE7ccOVJDA+QAetmO0DBDCfDKt2DkDZiIKamRmjp2BoIkjRDRHzp6jhwCLOg1ACHstQLA0VZupKj4f3AuqRqCYO4yIATIiiCQiKskfUysyIvvSxU5FpGMuDNwSawMiX0zjCkYlFaaCRPtOAQSRPRKHmctxURMCImIrmKxSRVdSWISqmZ0jRBFTTUXIoILcNipMay7wGkACFFMuGJVSx25aENCiKsUGg+ScB1NFcMAG4K5dQ/AXD0CGrQsMkoZx5zQRVGreQIjJV3WnEvOADiyXc+/MJlkVUIPDJsBqfHjgrz0FHhUJgB3Npm0V6mrScgilvhkMkqqY7T+050ZrLLVx5Q9AzC7lDpARHZJTNEYsTWSApGpcAGFEhQmIhWeDxRJeyNSK6Irms0dQI5gkQC6+M0Msf1U0c1lg7M/ec9OQCRQho7CXiazwaLCQsxWJnmcG9/kzkawmCK4Ek5BIBWzPaAJEJCaRXGA9IhmBiyYlOcE+2FViUmBWnDVFByYAJCBFBSjvJECOixSaU2QmA3hycraOabvLq22+d9pPDm9x0+50M479fLng4By0J31HzaQNFHOcL6p+uxliffXGCwfH+StvffuHfvS7Vu+dpTHFGCvHSFywPIZGyEWi4YL4UTVEIBZTSREQnAuqpRqslM4WNSY75wpai5wr5X5mCmbMrIUGDlJccVmzGYoKogKAmhExECDstaRSbcbMiOacUxMEFFFAJedVsqoqKRGqplLugIhmInvQVdFmC5wN9lItKCA57001awbzQGwmTE61mNfK14WGe6iWagYzcl5EmBiRRDOAFjFrX5RGCIA5J2IPRIQmkogZFQ2MHIskIg/PdaJ/WyrCuO2/+7u++/Of/sL29EPv0Xse8nixegYYhWE2XdZVdXGxqUI7m049EXnW3vfjbohxNp94xDGlb7/1LopbHrb9rkMKg+qzsycYsyZNqW/CJDhUR+ApDcNuvTalSTt58OycqCZ0XddVdeMPqt12/d1v/N7fefTakW9Hk15oUlU5zHwc7/3GN29+/2fZt2QlZbiXUQ1wxdM/8tf++s1Z9f7f//H2hRv1x948yYQv3f2Rv/SH3vnNb22ePBFBMwbV81/7crpaL4/mv/rzPx+aq5/8kR/cXj51CkmsFMESqOa8W+3axYEagHHdNqLZtXV786PP+pPrfS/3v4MdB8iPL97ZxnNeP/vOt+6PefLdv+t333vnq48ePZxN6rzNz04u5/Npiroad4dTuH3jKPdcSV9R/cprrwxxiIhtzJt+VMMh8wePz6SPd25fl8PaLkz0NKkSUtp1NdhROye7cjFcvfupz81mMwweHYNa2Ls9jbjIjQUfB1xYgoCahhzHlETSMHS7YRhMY+pXYJpzD4JJsyFm0TgkRPS+EhOVWIWgQN2ua0OILGNK89kUeb+pcKHSrEQ++CqlwRSYa5VxUs1Sksp7YFQREyNyDgWcGySBKVFAA+cUTAIB1y6BRVXy5GtW0VCxMISm5aqClCv2xC5KZId17VVyEmEfxLIPNRIF72KMlasICIEaH4bYgaZ2saDg19vhm++88/Krr9yg+PC9PsXx+Hq93sTN44df/Zl/WU6BZORQxVE63H7s06+sL89ef/F7/vFXf/Yjdw+HvB6Uow6s6HJrukVyY4RH9x47DBBos9kQDfMp7C7ze+Pjtq0ePnl83U+ypVc/dfT+l7cM0PhqiN2QepU0a1tLuF2NFQE3REJD1yNYs2hjH30z9w0hkUMCRUQ0REZDs+Cr1I/k0BA1qWWGUMhTNsRd5bzmJHHkwEqiOIyjOnSISiyAnC2aj9zWkMI0VFN/7fbdT2geIYV2xiQ0jGuE0sGJo2TyyqGSrGxotmXCOEYEhHEcLs8P7rx2fvmsOXyhnV3LtqwXB9gc7qTzc+7i5aDDYbsgMRCZhabbdRA8soOkw3Yzan84u2VpMOkr9mCikplcOznarrvgaldNs4jmiJKonoihjbHytSiSecQF0IZhK/0qQVAX1rsuCYKlqnUXlx3S7ODo+vvvvkfbx2kYQkMvvDB/98MzIZ+yhiZUHtw0IPBLL9z4jd/61vUrL4mlp+uUXX374y9fXIzn23F1Ia+8dG106zvXX7zcXB5cddLxdJ53m95sNKdXD5rYp9X502DTaB1iVqsd+plbojTNdCapG5ICxi6uKl8NF+lk9ayZLTIMy+XRar3iqvaOLHkzHnonVlGob10/Pn12Fri+e3yw3e0udx+Kpmm9DL4aNO76zjy8/LnXf/EXf/m1Gy9sLs8OF9M8pKxw5fjmvcf3lZnYfHC1C9tVGnrdnq/ag/mzx4P2YxWOhiEmIsEoxNW8CY6xj+UUEFPKyTvynpjQA5tITLFtp3Udnjx9/MLt2zNs17tt3/W1r7z3oSK1xOxMYo6pbPMbVwVmstGhJVCHlIYOva957sJUTBtsgsOx20RC593F2dni6tWosU/CVSsyEBIQVBW3scoiu213OJ9CsFGijpskI0H26iRlDBzmYXDw9Xcf3r7z2ssv33328H7u+t04PHp6Fi2JiYoqSFPR5vKSg88pxRSjSWBfBXe0nLPzH957wOg8secKKbNzgGwCrObUFtXkc598I7CoCCqXDlpNEtNY+Vo0eiZidq4qQQ/vfCwkAiJybl9IoQrmiFglo+MSBdeccY+u1CyCRs45RHNOTQd20I0jsgN0iFWSRGLOuywRiNtmOsSITGAFSxkAylyXFaQKk27YBc9gRuUlkxzsMdXAFMgAyMT06MaxkQK45fLKZ9/8xN1rx5uzM1Y5WizWcaAhoRE6dAAGUFWMZVWeEExd61wAZPZcBw5gmYEYSMQmzcIEAoWmamPOzjcAPAzns7rxzGojGCEE70JMScWMwVFj5sv7jJgWNwnxhI0c+dJZk6X0moAh1dNJCGG9WbNxJgnB7XaDmpnkjJQFprO5qmXJm90OwExlHAYmcgjsWbJoGllQTfR5xcFP/Ld/+7/3/X+IAe+8+HqC6uTxU+l64nB4a356sXXOz6vF6dPLmiuRAG2aX50YSb/baYQ7d16l6SRi1Kgv3/7I5dPT05OTebhy5c6RbyeSx83FGbXN0eQF9HT64N6denHywf1mIVQBoj15/97/72//7bqdf/Z3/K7rt25euXsFTZtF9dp3vRohy2DbzSUa3nntrrAAQtM2oEooIcf3v/yN0289dMvm6foitL49mnJwRKqI4Ag9eW439x9+86d+YvPtt+l0NYsycWiaerNZW5d1TAb1YGCCADnlAKyanXfFYZBViQmRFNkFypbAlNEn1uX1668sJXzlp3NMrgoIPG76cZcq56fzKbu6bZEELI1+Qmm8/NQnf+B3v/S7n375bznMKKpmxKCSUdGY2ZGaEZEpGIKoEACyF3QAyhwU1EwIURWTaRlhGM0kE6NjNAMgS5C8dzkLEQsSW9SUgMBXPuc+p+iYE+G9tz/4O//5X//BL/wxl+vm5pVw1KrFNEo9ndRHS5421aKlipGUHGtOqOp9IKDcjyrZYkIO5Hw1m4WmMoOuG4QwZ6XGNW5y/1vPfuVn/9nJKnepGpO4uholo6IPVdwOaKYSgTAboiMzYGZVlcjoHQCiENWUCKLLFRKAr3ybcvTsMNW7DQ67JAkV3DjuvAEryBAhGwIlEUjZqRQldDDLMnJweXp1veskrmTdIUACE1H2kOKeW1doJlz27wL7LixABSs8EECy59xjKoZxJFUpHWhgJApCYEYFF60IgCAGSgaAtAcGI5ITJCWfS9gJwICzWTSNFsYEKjVgm7tV3p01GDEJGDl2Y5Ybt29/+rvfpJokx8AheM45G4iqEJGm7NiV26/kXPlahdIQRZSAwUiTFY8hJkdgmFLFLCKBfDOrzRhBHDEggUmMI5oYOTBUI4MKOQJK3TSOODBqiuwqQDQT7zxxufHusdNmwOyKN654FgoAWE2L3R9LFz2hY29mqhkMFKnQpRFKhxqyc4Dg0TFhzkJgBkIGDpQ1m4J3lIwyQsHfuqJKaKkSxCwl40ZohQFgBkqGpRS8j5GJoDy5wEpE2lTMgAlNc+Enqer+V1PDEkpE3GcV95VM4Pa5nlyUktLABQAOSVQBzJErkR8shBcz01wcVWCixbQBMD1eknt1/RD603cZ+jooDFJzY5li3MXUIfrdmL3DWZiwT3nQrB0RBV/342BqwBBUd4/fOb7zxuM4bAVkHM3z1AfHDskxM3IpXsx7A6Pjf2MpKl8gmEoyEzUjciVPSEAEKnkkCp59AeIwoYiqESCZZtwXDkPJmhU9QosfBMlUiUA1lSCYqQGxad73RaMz3eON1UqFueyx48/zV8V7YqDFHVaOWiGLqQqSFd/Q3vJTPAQqIgKGosLssFiPRYof77mbDCVnAGRiAyo0oiyZ0Rmgqej+2i7WTt2vr3ICF4pWY6hqFogfP3q8HfuEtI359DJGDdcOjrpuyyTOu8X86MHjR3evv/Do5LE4GAEHy9gnx+Fi0Edf/+Kf+Qv/sy/9xN/91ltvH8yXm81qOwy+CmaExDmJY1cMUHuDUxFPZY8DL4h1ydlKrAWRyKkqQkmQIRFp4bIAmSox7aO1yGaasxLS3p2HxXkHokbEIhnBqBDRAQWUuGC8pGTTymcrKkyM5tQ0JylHhh0bYs6ZiYmwYNHLP8uABSaNiKammsQBkiMgVTUQMivFVMSMJioZ2Bf3GSJDsU6JOCIDJGAz2SePDZjJ1JBQpVBG9s5OsLz3sgEBKBf/mJb48L8lFQ39cNQ2P/B9P7BePULMYDz2ebfuyCcmmS8PLWmXd20V0mgmMF8u3n94Txxdu7YgR9t+6xx+35u/42f+1T9it0hDbF0zxHh0ZdGvTqo2jHE7axsH0PWRMIta7McsOSXdDZFdxVxt1x2TkudNF2d29ce+8MP/m7/4Z/6T/+x/3X7kIzKdRfDHd+9W82mzXPhZm8Wcc/sGTmYAUjWCwK49ffDu2YMnWwhv/uCPWHVI49k4a8K0lstVtVi0bh5l+v5bTya5uRzW/SvXpi++vFrgbHZdh3OY3IAiJ5MHTU8ff+2lxfeXbijIoA5dUwH4ava6m19eu8Vf+5lfvXJrenLeP3x2ubiRjpaLZ/fvaX82qf3nPvGJt999m/PoAMYhBfYG6fphfff64Te/+eACpZnPjuaLF2/cvn/yDG30wbUNxazrzen726d1NZKbBY+H8/p8NeyG3ZVFtZgcfey7vvD2o9Wtj3w2TKZUWOyGmpW8qSgT094mt3+E5xQRTEzGfid5zCn2mw0zjTmv15fj+oyqGlyfk/Vjdj5YyYKooDdTEKMhm5gI0pBktxn70Q6XXiAxEaMj5piSoGRLyFAxZ4kAisSKalB0ViBms6yYCcATBO8BMMUIjCGEcYi1D3UITEhgjlhFLWdGrIkqJlRnhpUnsVQzegBgNgazjJIqIkZVjciIRKJJTRx5zZCSIAI17vbLt775m9+oWF579fYM4a3Nr3b9WlW0327P9n0fjfcHs7ZWsXEzSQdPTrMi/enf/xfff//nw2x6mYdD16wutynvdmMUYIeoZs5nVtcPW88p75iCc95fdtivaEfcid24kWpvwzpyJcCASOWeguQ3cfzY9Ze/88FvvXj19fc+fACYXrhz44tf+pXjzyzd1CkSEqUhIbpQhyxj3g7DRqfzVlV95Ychi0pVz1cXl1Xd1nXN3mHG3UUPrqm9d0Zu6KqG28Pp5cWlRnWO0VcQlRUJUfKY0oiOsdzmSBzXzF4BAVzD6siyKDghUyQNnhGsnU2a6dRyHPokdCT1zQFS9q46mGz6XS1VRbUZUDPpx878hKDOKpmMcRYBkAFr5tFSXiXQUDGN0SxWFXWby+niiqqSC+vd0LSHKRtkkAAi4gCM2MCRd7txnU04eAMZh3EQnR4ep35r2VRkOl3efPn2L33rayHw5bqbz5pxk09POh0lD7GqW9kpz/F4eXh+eXl+dnn7+I4CHR00xHZ6/4NpLY3xdhg/9frL6/PNwdHxbrN69ujh8bWjw+PrZ2dPZy4MvQxm691uWA+mOKu1qWsFnE+X211X+7bbrLvt7srRVDcync68adqOVEnMY3Dj9euLk6e7ZjpJ20QEbVPJQAbGbM7RbrshgGEYvDOkSOKDn6Ws674jByA2w/rDb70/CS7ncdo2BrYbdt41/bBJOZrH29evPXnvqWOcNE20UdpEVVw9PZ9XE88TP2122+0w9JtuffXF66cPntw4Pt6/F6nUjS+jQPCucj7thqHfzecHk6aZTeYPHz08XB7Np9P1dpdyUmQTJA6S+vIsda6J4wgudLFPmoE5J8lZqrod4tA0Ybc7mdT1ziRnpcqZQeXC6fmFqAlhNWn61KmmJEPtqj4Pvg6YFBVFsiQRU4+AjrIYqIGn2cFCennr7XuTydVPvf7q7vzJk0ePMtnjk6dDjALZMWs2yUnZt2HSp5R1HPruyvExGA5xZODV5YbBV84juziOnsFVLmarfBX7btk2v+uNTylqlsxGQCQp5iTddrfd9MdHx0RgVDpLsqkpKvtaM6oSqmlWCoxMKSkiZEtZlAhME7BjHwwINKtl8i6JGXogSlFz7hWCd5WIOKqHnJvgc1QHRAyi2RRNS/NGB4CSiYzAwDMakFhy3oEhMYupagZCEEBkzeY4mElMERBXZ6vv/fx3Bz+ZHhwQkI4RFSb1ZNK2Q84cEJklDgBYVz44NjADVIW6qp1jsthwi6BMyOiYOeoQmEOoJYMZE7VIvYCapVBPkpiid8hQCpHMEKEOEwIQjYymcQTTugpmWQAg58CtAWQZnfMGCmhN267ONhl0TNkQkqkv7XMiRjBfNJvN0PcqBqhErCkmYg4e6jpUlS8oSYCyzMcUwQVfTsHf/Lt/5yd+/B995uOf/rP/o//V4uiF5toyjU17tIwxM9C1xeHjhw/buhqH3XnavfTaLUu6fbZeneyu3749ndR97jD10wlIzOt7qwbao9sHy1vL7TBC5MUhJ8yb3XrYdUOXoeuT4dELtxY3jwny1S6l3Thuurd/6ZffCTi/ujSpjpaLOguIkOOmnszmC/G22XVVXZsJmQ7rdX++G/ru9kdfee/RW2GO7VE7vdIkzev1bjqdu2mTCHe74f1f+dLuO28dJQTPl0M0AyBsqrrwBA0AMjQhDENWEWYKvhbNJuZDrWAEiBLNYBSlUDNk9mykjiBert742GeuTY8fnn4oWSFUvqnGmCLodgcoBmSMRt7FzfrG0e3/+C/+x6t/+WXXnVU+9EMEJvDE4AwUyWmMiGg5E7Ig7DuPFQyEHJkKA+Q8MjkihD21NZsq+cZABYFM0cwQkJ3lTGBZogNARkYcTbIYEaNhBVC3FTx9+tN/72/93j/xP7x17fqGMk5Duwz1YsZ1Q8EpGjDGIXGGNAwigmwYUVNmdsvDm123UhgBwQKMMYeDRTaUlFPvN2eXv/RP/tbF07cGTbuUxFSzRkvOeZUEgETgPIlhSqIIgVElc2kSdBQaUhZCySTERd/M2xEcTKPWu9Nh7DB1IwAqjEEBJGUxLq4DJDMjNd2NGSzMGiAU562uwLENMG6BrDGTNKSM5qowPax/e5lcclKIwAwgAkQlFrgffdXKHsv2Pwqx6HpmoEoAZsZGZogGjpGAmBlVGSgV2jGYqbAjNAFzOUcAEECHaGpjGmIS0LbxlcTdMK4rGAJB5YL0OaHefeXlT77xBgdAFMdEaAjGjokKfRmYEJDVzHtfNw0ippyTiA+BKeQcGW20xOV9UoGcq5wfxz6bYslvxGwZACRLzCkRcwYaurHbpDBrwnRCFj0hAzgkDC0CCEBwAUvhNtDzBivbx8oKtQQKMKYMtlAKj7AEZPbmHQVkIwBGNSXnsHzghKpqaIRKhoEMsra16y/PLp8+Wa0uNutdU9XT5fWrL78uwOTRiobnCjUKqVCTRaCIWASMCAAiAkDeOQPYh4qKuFOgu4RWsp1FR0AyRKTnuiHiXkc0MCAtDYeIpgWfBICkoshFKClt44aApJJNBIyJRHLRPsyAHBM5wyRJHCM31fzG3VW/hn6nsGUfUkxpGIHZOU+mNVgS2g7DpHKe0cSQ2bEThUFyzhENdbfqH7yVsHrYRQI9eabLxcGVK7fadhaCRwICVMuCrGZI+ttHQEQAgJ9/cQKCRo4w5YhIhf1kpTreMoBo+ZQNsGA4npfPYUHhmDG5kudTTQamto87IUBhEhEhWKmDJAMrfwNw3zdUSgDMqMBsil+yuErgeUmdmuxLlAu9mYoIyaJCe+myUOGcAahIUSwLtUckFxB60f4QyUT2sUQkLVirEoXDvZagpXBClZwTU1UTFYTkiNM4PnrwaMiy2aV+I3Hk44NrcbvLcbs4XnR9XK3OJ2G67TZNE1KODMjOOc913bShos32n/6T/+cnX/vkxS4nTdvT1a22zTmlBM778r8UVQQhYhVBgr0pb8/oMUDG/WW1T/KZindO9r2bIKZUmGpmvNdopKjWhb3+XMEGtbzP3hoQkqrsxZbfZvUWlN0+syjAWC7sYqNDJFUh5kIOK1X0ogkAgFw5a4akls2U0QMgMasqairPAs1gaFlyIYUZQCkKIGJTLfp4+a2ZOOZERZlmJ1DUWC1u0XKTIXaSBsPSMQXFtFjaS4Bgn2D8t6Wi6fL4j//oH5kfH3f9OJlWqGCmvvahbknMMoFXNJgvD4dxXK3OGXQ5XYDhuB435+skfQj1L3/1F7yfsmtl2A427LrNYtKgVMQ6bZu6qWTMKj1VjsnYpd3lZtoeL4+OfvUbv1qF1NYVO1/P5xfb8S//6T/xD/9v//kf+L1/oB/dYdNe9H3lHEd79u699g/XxIFFC/OpJC0RFIjq6WShtPD+/rx68Y3PHbx6a7se2+PF5OAle/CU3DD3zfr00Us//IMPnj6780c+cfYzX/7ID/7hZ6dns5feHO5/bfXwvYOP3hC0FHMwcr56/N57L3/8hx0M5eNy7DIJANVs7WTy+qf+0D/8//yDF994faaTW+6j6379R3/0+/7p3/9XEKunF9vFnF947cXT3/iNa9cW2E4vz05eunvlIy8uf+XL749D6Em3904++mL4yKdek7f56b134wBsbthtkmcO7v6TC8CL5dXDttKLPMouX71y+/f80b/w01/5+htvfJ+vq6iJeX8/cp4NVC2jikZIkswspVFNJQ7DmMYkaRz63c55JpLd6hxUALAfVkM/Dv3onG9CtRvTrJkC9ITsqU42OHaGpCmbEhojFgeT79NgpkJIiM77FKWqfE7GZpUPvYzOVWzE3iFqlkRGaoDKAKaQGMkRgw/ZNEtG5uCcY2QgZMxZvHPouG4nSaGqGmDJIs201iGJ5RCqmLIPznuP4wDMPjgCiBlqX2nKCCCSQ2BFB851Kd57cr+u5f1vfxX7k0//ju8177/1tS/m3cXRteX5aldOQY795uSUJ/XiqPrIm69+58m7FftAHmNXOTty02kFMZ52Oja8AJrtdv3saEZEMQ4ByLuAhhpBO5gsJx2PYxonbcjr7esv3rGnsVnMdt3GOepSGvNgUcx02+2InDjQClGpqivQpoJm7DbrdVweXEULiE5yMrJ60pxdnFytj3Y2AAD5+sO332rqF9sK0TnBnE0YoZ3PwSgP20Tc1hMjGoV8Pet25xzUVQEA8hDJESLrmAgBg09JHDPvX1kMSED3CXY2kJRAR7KA7A29cyFa570JCEyYa6ymtZxsx8unV44/ulqfxrxBCM45FdxtL/y0TbYL2AAohVY1WO+Cx1Gy2cC62fWrGjHMr9D0Gud2XD1rmpkzODs/ndRNpGDOkqjJmJHZNVWw3GUFXEwWm/6yDpXllpxL4xaJso7fevBONfHV1Pvl4sG/vrc8nG42W81ysGxVfOwMBujGLvfJT3gH47Z/cm12vYG4erThedOPtnq8nXC8fmO53q4V1Pmpp+bs4sQFQGMHvkHzzDB1wziOsZu0YcjxcuyBwLNP1k8PrgrsLOZgKOPQd91sfhimVRuqJ4875Go+nZ5uNuMwNpNWDNfd9sqt64K9S7icTp9dPvN1hUYK2bSbTNs++m0/5EGm9ezRex8eXp8ySL/t60mFVC/n195/+G4UcEj9dgiVW20vqoqoxkVTheCfkWXtxq7fipnQ0e3Ji/P66NaNb1ysNn0qp6AbBvA4qgK6vt+uh/7O7ZvrzSWQxjzOppPJtH786MmtmzcW88Xp6SlCRjPvGM09L+dgswyaJeU2TNEZeYYoouI8E+aaQWLMqQ9uRphzHoBtOptMZ14yIIauWzW+YXaqwg7Hsa+CB3XB+xjzrJqmlAmwqlsCukwrCmG94W43+8Rn3kQa793/AALce/TodL1OkhUU1AgghGrXbRxb3YQUAwONQ4w5ixor5JTaumHnoSxmDBA8W9Q43jq++uYrH2FnkiMDm4HklOLQdb2YDUOfc3Ih5CiawRhLBQwjEKKJMToTIGFTQ0PTjJ6QQc2AGJ4vx+pQATIh+xDGfjBFpuCoQXCiGrxDqEzVcRDugDMiWgZDp6aePLpJTAlARctYTsBGQCpQGr5SHhWRnDcDMgRUdk5EERmdg4y3r702m/rd2PXdwBwmk9mY4sFiMea03ewAAMlbec1VE1Ui4hAQXfGxu5qJMevI7BSsrgOSiXWEnLXqY69orXNRMwKRM8hCYMA+6egsGIAhDdYTmxQnubBIQlJFM02klKHExNR7F0U0jip5N6brt67dv7/JYl0X6+BTHH3wqJajGFnuNEdZHs7qNhAimO6GwVdNyqlyjpgL6tUg5vQcZRoWJ8Y/883f+NJf/ot/5k/+6T/+Yz92fDzfagIVh/jed97t1mt74Uafulc/eieZnp+cbp91x4vr7WSy6dY+wLR223G89/aHbZhODufzW9OtjIJYV9NR0OLOm2FOXmy3G27dfmF20NbHi27XL2deh1E3fQi8uVxtV5vzi/P+4elXHp9QaG/evrm8fqte1A6oripChDFqJ9Oo24vt1du3Hjy8lzQ21Twip6wuVPPFUg2H2CXGajFdb7qqapeL5uzeo3oaNKtjLvMbgQEgKuKoPuek4H09DmNVOVUcx8TBeccgmTSLoWhWFR8cK0KS1cP7r/7AD/3xP/dX/h//9/89aRr7OI4ZiDIkHSIghkCM1sn2aHnjf/mX/9OjJ7v3vv6lK5KAieuwL9ZWRcQs0bEDwFKv44kUlIAlC7IzNYNswOSCATgkkwSmjllyBLViQ3I+OMcJsyoAcFR9Pr6CZHGEoOoQc1ZQDDWOo4r1P/fP/t7dT3zs8NXDsxivXplVzQQtmMU0DDAgJlHVEKqchZQBoK6nQBB11eWz4JfO+aGPvvLk3Jiiq2dxi2+//ZXDOddw/PT0w+wKBBkCIChmTWgIpIxEjo1ACYidSg6+Mh9yQHPAHhUzo3qDKiuaqdbd4OPA0rvUZzIWyVlyaUyXpMyMVAC6YGIogKNiJdmDkUGysLuE3YiRkZ2ghgbYRr+3YxU1qMCJARHNgM0MsNB8UUGyUdl7A5TinyJ8GJWUDgKA7m+ke+6GZ0IERhIxZlKFPZsWiZABWIxySmjmAcgENAYCYonxPI6d59LGyCmpR3/75Rc/9Zk3qmklOTGw8wymBOrYl5GafQAANWjrqgxGKY+k0gYnmk0zpDhKDKFCAkUzgyGNIjJKJGIZe0RiwxLEE9GYVRXPdpeV8y+8+tGHZyeUtfLA+9ayvaWfCb0P+6kfBMntff575ggRlM+MFExNwZSsVK9rSQRBeZ8HYGL4bVWe0HuH5esR8YYcI/a71A8PL89OT5712+04JgXcyHjyaJ0y3frIa+RZNYOW0m8zNDJLEp3b04JKlxkRmSkTgYGoEVMZZUsaSlUVgJnQlMvloEpIjoNaLgXKUqwZVnBEqKBIqDkXkDOAIkpRNBCRwAhJc0THnsUkmZlKRIKcS3IHWAVFMEvSlAV9mFRHN+Nlp4PFuHXk1CXkoDlK2s2bSSe8GWMtznkUiCn3QH7S1jr6PCpgYrZh/XQ2v5KHrcYRRePl+frZM3aBAyM5RgIsIaLibyqmIgAsUY1S+sKimQhUjamUjpU3ZSw+u/3JUStxAAAwtSJJFEpReXRqgcEU5hqRiOzJ0EURh9JTJsXah0hUGtyJsQgfariXeIiR9nidQpRSKf5hBCVAkYTkEJ1INDAigOJT0lKvpiJKTCVcBggqQuz3SCxAEQEGg31aCp5jmhBYIIOqFNoUWDElpZwd11BCUYYm+PTxyZDhcj3senl2FpNVVQhGvQ45m/Y5by9Xt67evexW88PDeLZOar6u6rYptpa2mY4i7z+9n/K4mE9wrYvNMK2ryjsTY6acBem5VoNYivzKE638n9UAQNkFQMxpZCzn1cxIzBiAyRXBy55XWyChijB5JmeWVc3AmB2CqSixLx8yF9C1CiIXza6ESQGBia1IP6AlyUnOGzIiqgqI7NsGSoIPAUoTWpFRAYkcPA96msreHaXK5NRUVJicPW9YM9NCsMbnJi8wE9xfBmCCRkyskvc0dsWiBINmYjKTwuUvHcH7SxeKUZD+O1LRK6998iOvfX59+dQxx8EkRa6YHTlikbgbtteXRyG0222XUz9fTKpJc3a6ITRG23bx+o2Do6PjL37l1269/OqwVayqEIxr3ux6RqqCM8S+lxgHsdSvNpJS3OxuXL1zkeHX3/pSPa3QII5D7MfZKx//9OdeWM715Ntv/b4/95e6TuJgfR/DwkDzdjuAn+WUnpvrioUPTQswPI9PTs4uL3Fx5fZ3vRl3HSuR2ukHJ5OrNz3F7t7Xj64t3xnOb33+E1pNXvrExy7v319Ol+9+7ZdvHk/J+5QGcASMWVNwOLt6C6zTlKhqCM1ACU3yeLCc2WYybybNLL3y0vLdD/71ZLIcAX/qS79wdOdQBzf0wZPdWB585OXXH95/mmJ3feJnhm9/52Q7IoIEj5WHDx48bCb+pVdedLK9/8HJrhtylC6OKYdBcHk0b5Criq4czQh5OVv8s5/7F9Mrrw0pUnCu7GOSAhg6kiwGMMTRTCSPknPKETSziSvbQgXv1GzUoa/TznnOOWulT/o+pUzOxTEj4DgmVGzrmkglJQTwwSH5cYg+OI4uaSRWZ2RAjgOYoVnjPAJ4x3EcAaQODgAZrHIhyeiIJSshV86L5qRGSGIqZiVE6JGDb0A2iuYdpZQdcenSDNMmiU7r2qsasQ+NE0VyloSJiFnJDBWQ2HGFRADsw9APjtEUSNHEXINx3EyP/KCbx0/fufjZ7nf+/j/5GTz86i/+E7I0f96KKRlIeDFvBcYHT4etuovh4vr1q6fbCKONmraessG1G0fdeRhy28XVjARM2ZmBCaGIWgaUnHKPNlFEN5Pd2T2bHCjFbChqRLCczIlQa33p5RvK6cq1K31cTyaeDE/OnsYcH54/XV6ZLxZT73gcoR+6qzemwxjnTfWNpw8/8eJHd2kL6lKUj772+mxq29X26MZys1vHlJksqzb1ZFI3p6fPJrOFm7brNAL7yRTBmSKKKJXdkWbc91SYrzyKASHs204RirSnBiqM5NCBEZNLgxC7oVtNIfJ0PiJHyaEsTZq6aueGJBbaK3dGFyxF0B3ipK6mBMQAWZPFQdOYyu2670CtqtrW00muJtUVf7DcPXlyk0KXDMFBgiZUfeqjpHrSbHeJNEJ27AJk05Q5D6Jydn7hEETRBJ5ttp/7fd/z4emDZ2dnkzhcf/XaetMPIx5ev7Z9tIrrQUS5rrfj6GfNiXTn67MXbt4MwbE3X/Nu28d1XC6uTY5unY9xFPvkZz5178NnTavYD7dv3v72d/71ar2ZLtvVZvTOhPRwPidm4iru+vlsGly4//TJ9Zt3dJecry83K9OenT85PasmrTfdnaxcaHU+zSlJ1qGLBnTleLpYNLtBJ1ydnq5GkWmoZk37zodvt42/3Ky6cRSUajoNrkkQ0XEW9VWYNLMnT953yTM5Zk1qQ1LfuqUPbTv78NFjGXF2NDs8vLq9OD+8Or29mN17cH7jhWtTzm9/4y0Zxms3l+UUqAioes8i4irnPGSNk8lEJA5ZELiuq8PDw6dPHx8cXZlM2mEcUFJOWVTIuRyTWiYOYqaW0WFMnRbeWR9dzaOkXewcBXRNHzOiTafLYd2bQRx2bBBjv5zNxy4JpLYNIBAxqSKhi9mSWup77zwQjTl7osl0Frz7zuPTG7devHqN33nnnV0aTi5XT56dgRmq1lVABUk5ibpQp9y1volxAEb0yOoq57uhB+Iq1FHG2AsTe+c1KqXxtZu33nztY5J0TCMCI2HfD5JLrFYN865bbzc+xauMzjEBooi4UGmBiZo471PWssuynJxzTCw51c10HCKRF1FJIsxgWXSQHFU0RzFUQlPJms1YVTsAiymKmUGIcXRQEwUEAZOcE6IwekJSIFACVCBj0JQH5/YQyrJjRwRFjRpVo0pCdNV0IUkutxu13NRVzqiSQbB2IQB6NGNiR2ToPTumbhjN0JELzJ4LAV8AqQq1ozDGXV23asLEHIKZF7ECsVIzBGVFE1PKjsmzY3KiKaaefN4vzZm0uBEkA7JjB2Cg2bFjZtnTZ1TiUCGvL1aMqKYxZ/GuxFpC4Kat+jExwNHVBYIultMcU98NapJNnGcAVMmAKCnj8/ozADDPveZpRdth99/8+H/zz3/lH/5P//xf/fTv/L65a9bjjlye3lhqxdeOXoijrs9Ph013ePVoOpkmG6umrSfV2YP7m8vNUXvIbd0cBWhARmuqadqMuR8tD5RjXF/mzdnNWzeu3H0BA2WBaT0zxNQPWNUQewVzoZ7P2APaKMPFxeNn5984/yo0+Opn3zh64ZWrL98NlZ/p7Kf+zt9/9fi4F4WbLTULdzDj1rm2NnHr1Ypr7NJOqBbS6saV1cNVdI36ILudAyMXACyPAxGJSu0naqDmPDAie89AwJ7imJ0xIImokXCoFAytBF+sFuke34PHTz/7kd/1zU9//5d+/eebMAbWaGVoEVTEIZnEl+5+7D/9K3/t1Wf27j///14jU+I+JUELhIyoxmq5zGFlC0oIBtnABAhdUCJgZ6D62ztPI1EzEU9kYMjGgNkQEbIIemADLe3UriLMkBIBjjkrgao5coRoWZnRBUfD5d/4z/7qJ370D3z3j/15P6kvnl3knU6nlFKq6jr4tm6XWQaFQWx0lc8wkKNdd0bUVs1MAUDGnKKmDFyTuJO3v/3er//T6zfaR2cnz+J2RM5mWdQzIbGouECIBIzgSl+zCWtdN56rDOhDFWF0oQqgaNFEpeAk0eU+5mg55ZxiAAAzJiYi3cO9UXL25FRMTCYh2KjxopfG48wxMxkYk699Bs4koTHMUSSx4r8Zkgvbg8AhWkHSmEkBaYDt4f1gUGA3hAwIygpmimpSkJklmURYaMtERISWs0LpYseyTwdTg8IEVkEEKlgQMsSctEswmDlKgNYg8ksvvvjqpz5uniQLGiKhZgFQci5LwmJ80QyiRCTJwCAKjGOHTAaS4mCilQ8ECSWjuhQVFBqCcdyo5hBaTaPznhk3fWcGQlDVHFWa2h0fXG1auPjwvdfvfBxUNUld1YUAgqWPvBR0ExoQAZoZ7d0eVPhDZcBjgOfZFisoLmRCYjAk84TFrgDMzsDMUEFATcA8YVxtHn77LdmuGXHIue9HACTnZpNpGgUNzh+9c3jgJleulAo1M1RRdpAlE4AJKhTZTg2IkFAFHZtkZrZsYxyJyIhzFjUDAjNBE+9JoqhmzUpAooqIqqZgRYgH0dKIpqblggEzlVRatsyUmAGUEE3ULJtFlQgpS0xGmFLKORkgpFhYv4I2mplhVm2Cb7BxzlLuwJWcPGRjNQmOWq37LGRQM4MKEWUk9GTJq6hpdsi1o6PZcr154tEcE8MIGAlIFcsqwjJGkd/mtBTFYY/vQSJiLE1hiCYZAU2NmFVNQQGRStM5cekX29tZ0BVjmaEhsqkh78NbpWqs2Ez2Fp7yrQMQsVkuZp9SXUaFao1oIAAESFDsQLBPals5rsVNsy9AZyl8+XJUoTiDBJFU9oVopvt0VlEZzAyRRIpoUWzLxESiCghZ4r4qyxQIEYq8KHvnMpComKhaZMChH588eZqKWgPcRz28fm3Vn3unPjAqeg2z6UKTNSHEfgOo4zDSZII8AxUxRNDKu7jLftIgNZs8PDjbNLOqqh0gsCMAI2RDLJFFJCDCrAKGjFBkUABQExUhJENAxwCoSZnQkFVkz/rdW3IEgZzzxfy3x5kDiMpvyyhEvtCLVEuKEwFITZmdqWIJJpqW/gW057YmKzkbKvqp7b1dpVtSiBkJVfNzYJmpiXOeHZtZQdqrSUFq7wV2RVUtUqOZFo+R7SOTikhEpdEhYwmV8/6jYPaqKipAiEhZhZD2+r4Zc4kFkz1fG+yloh/7d/44YA5TF9B74qfd6cHsSr8bdnI5m8wPZ4ft8mB1uerjMA0eqFJzGILzjDIYSa/5G+9+53B5SBLTsEWRabuonRuHbjZpp1XVb9dn548LMI0J2LlPfPyNn/6Vn/vw/GkAanAChNpvb778ZrebvLI4fP+X/uXCsqWL+vZrnXPtjVmf19N4fnCwxKlHx/tgLDuAPQsePblMtM394+Tb6/XxrRHYV067PlQHkDk/2vzk3/n7P/Lv/+jRq6/vTvr3v/32QQjgw7Vrd9a5Xr5wc3d5bol8RaYKSST13dNHkCMXapfn2I1+UoFaHvvZwU3Xti50q4uzF6/dXKU8Gp6eD0cvXf3ar/6amx31PZw+WwcXbt8+OLk4O1we3f/wYtMLCts45CkPFgHDtz94/FLGg6O7Tx/vdrudq5rgaMzxdL0VTWpxd3nRTBur69Ohe7bd3nn9c+w06+gcZ01ogEASQVWQgSirRMw7SAmGKGlESd16myX36/X9Dz7YbjsBjUMi55LkQYa29UAuKxgBO8fMQx7ZKTM2dTXEMYQS0kVDiZapAnBAxGjE7JGJTFVLBlJ9FThwsdlClgwZg0dl1ejYpzwS+0CeEccsZYnpgi/09xC8mKIjEPR1lTQfzaehbTlqG6rSy3uxjsyubRemqFkYLXgH7OpQgQkCKTynVyKzdzuJnQ6v3L168v6H/TY2NdGYzy/u//g//n//0T/4V179zPCbv/z3SPaHYRygvTK7++InJ9deePS0e/DWWz/8x9587+s/94Xf+cY/+ulfnVxZVDeubZ4MXnC7OmmX+T/8j/6D77z7rZ//+S9yaKUIzxkqaIYtuF64sh3Jx15640bz3fnBwtabnHtyiIwgwC6giq99QYr5hqeLaYqJHX7v9/3O1jnFxN6QhQgcSCmKFU8yjBcnJxYMyfptR23ddW7I+F/+v/5P//6f/EsAKAZJI+VdEmBPu+ECYeuaWdUsNI3IFEGQiLxTM5QByQCdmRT6FO69rFgKMJkQTAnJFFIeiYAmbTYMdb1A4r5z0LRCIyJmkfEi5vVgveM8pjRfXrnMQGCSo3OhQGpBxLqR+97FvnI8gsJuQyqkhsCTxayeLfL61M3mnSG2Uy86diOFzIggpjFMqRp3a6tQ0YPD7eYSxoGdGy06DL6epW6YHS4frtbJw+ufePGLP/Nbs6MJeBbkk/PL64fzy7iJQ7+T/vjW0bNn66pdTGLsusTmkuQwdWmn2fLNV+4uXj5y0lePh835sOsuFrPlrJm/987bedzVkyAijATkwePl5XpSzWazaw6y5SHLpl9v3a02+XEylc36fDqfPXq6askNQzw4nk6Wk81OztcXXYwO0TFm0aqp+36ofHN+MQzmBY/XWzvPl76ejbkXTSKiKNO5v3//QxcgBI8Ixvr04mw6n+UIAGgmLNStenYSWh8RHHlnqJvUqUKu0xmtL9cLpZNvP3pmabZobYJR93wKBLJsnhyZkSqKjdu+akJdV8OQhn5g5qZpfUUXZxdXrx7HXvq+ZxeG9WVVtw5dFuGqZoDcD21oco5t2/SrC2YPSEmUOBhQxbXEKJZzpq7ftfUcRXyo0tihURYtL4lsFLxvwuT8cmWkJR5f+aofHjV120ts2uOLjbVh8dGPvrJePVhvV0+3q7PzC9OcY2binDIpgKoMAzsf2G02q5RT5WqP3kDzOBzOG/ZVHvux65pmoqNJ37fefebNz929fnvst9mycyGllFLee4tFcooXF+dnlyevvXLHcsTgzSCnRAaaFTirjQyIFEzFspSKDTUB0ZwHzBNLpsQI3mwAQlWpQk1IiEzeCQTlIDmiCwIgaFzVqpmMDbz3pCOkIbKUV1PKmkANDdGYsU15Sw5Kx8d+6ZYEAYg8ERnGlLaeq8r7wWjbDxA3bFlFjN0YowuunbdR+0kT1OqILo5jHRiJQCG40E6mhFYzV5ULnkNwCpFJkLGqGk8TciZCyWJG9VwxKDN7I8tsmSpuswlhrdhnTQYJNHv2AArZHFRAnPLOswcgtX1zAwAUcAeyc6Ha7Haq0jRBoUmgwGMIPjQhxkiMzNTUfhyTIwSj3WYwg3YyadoGRHzwoMDoU4xIyJ71Obw0yqBZdgZqSL7+5qPNX/3f/W/vXLv55374z37/H/ihxcE8obNmErfDbj2cPTx55fXXXetOTy/nsylmPn+6GdYQhgVDBbUc3jjcDsOsXUiXZYw5Jk+cUxzH/mh5cOcjr8B8BkSY1IWqlzTUkXLEXXdxuku4qWbTied+N6KBSb5yUA2b3eNf+NW3z7945+6dad2A+aXA17/5Nan06udfm722bI/qs9Xm4bsPh23u+u2NV+9Us4abxgCHqDxfVrNjf3bO486yOkQwdM57BFUxywjqEBRxGPrp/DDmLmAmQu9YQMh7VeQCa/VekZ3HENjF7eYrP3fzpT/1H/xP/hfLf/HyL/+r//bBwweVD2hKlJF4cuX2F37Pn/h3/uj/ePmd83/9j//rxXbrA20oceVSMlV1Rg4UkJQRqDzNi2pURpY9bLYYLlSF9qXSiQCc84YZmMBIEV3TgJS2ZvKWk/QGbCEoefABQFLszYEnErMkgMijKGZFpUD6jZ/92SfvvPPdn/3koyfrnPCzP/iFo1fu+ADjsENHWcZENpu3OSXJBITTyfW8S8Nm5SYTF1hyZl+ntd7/9tfe+spPTTw9vP/eO/fvDeSjopJEJVXzZOTYB0LgrAAZWI2RVYmoIgrBQPsUPLpgSNmjRs0GBIpIRqRlFEVEQYmSANgZI3tNAxITOSaHLKUULGdV45HULTySkzhMqho4APvKM0EPSXQY5vPl8ym5AB6IUEt3EyOQQVIFQQAgK4iiYiUxMGRCxyhigJAUEYwQmJAImJHQmAgBDJEYM6gCEBmgeoKKHYJ6JGIoGssqy1osy4AOkQKBZzDIdnSwuPvii1h5I2V0jOAIxcwxUbHGqaJjAwEyciBplKTMvvEaZTRQz6KQh3637UYRA3BAlSrlmBQ1W3Y0AHOoFUyzmAFXbTNkA6O6CWr1B48eX51ctZgItKlq5xwVOAgRkStzYgH6lmHTDHD/eiUAhRiz54QA2J5GVPxGUtBFQEhlVYdGaoKMzJhzoVfK6vz0/MnTmlFUrFQNEJnBMHYI4D3HlO6/96367DG7ytABOhUJlcsqydAHV0I0BSUKoDlFQpMsBlpAJWaKCCoZAIiJuBCOTbMAO0MmZMJSCq+IoFlUsokSYYwZ0KTU4hgYgKqUnAvSXuhFUZME0snmQlYrTKaAgppR0PsiVNmk0doBETOLMZplhMY1i4og9WbJLJphEkXIjngUG6JwFTgEMBrHLXHdtCGO6sz1fYzd2Ruvfd97FwTSsQkjlkyGAACCKAAgqMvPFwdIZMVfjw72NVqoJmDF82Vmui8335voiniE+2oqpD2xWPd1ckQkWVTVSp73tzXZIjSWP1tJ/xSGuGLxASOoSrm4S79VKalXzUQIpRirVKPZcyAS7qd9BCsmw1ITuQ8zFmubme7jkQqly60w+8yYi4BFksUYSniNiIsUxcigxRBj+zihFVehAqklIKJnz571UcTcdtT7J+fGdRPc2KNkUNXddmDy6912euXw/PLyyuFkk4fa1QROAckFDj4NO19xU9f9bpe6Uc2vNt1qmw7mSpJzJEQPAM+NQaU5ngm5eKbALOfERMWkQ0Q5R2Y2JOe8mUpOzntTeT73aNEUCiG8tAIUoryIMHtQUxHAMigRMZnSPoVpIDkSEoJJzgrqXGUqoAIA5LwZgmXcC3CgKnvyPIDhnoZlqrrn4AATARQClaLtq9kIWdEAFMq1ilQcZEWjBiydeag5Iz2vZ2MGAGNXPH0lBWmmJpnJ7e9DCISoIsSU0uB9vb/e/m2p6PzkaTNp2sblpEAwrdq0TQbu+NqBBx+Yx81Kuu1yMvEe+25crda+Cd3lJg391SvLs92GwUuS06fPrl67CoNSVNA8DaFCuP/+u6q9n4TgWojSbXpH+NXf+uKjs+8sl1fGnTDIZNY+uhj/0v/8//gzP/FVOnn70dffq9vZfDH7B//8Z/77r30Ctu927zz88NfenUIzZJ1YcdCRmSKVFOWefVgt6vWj/pUvfDrZyGpV5UTS2+/eWxzXH7z9yF99pb31MtdisLmCkzsvLM6erdePPnTNsrs823XreXORgsua0QQR2sPb4CgPmVTLfRxNFcG3bU6cBP/Un/1PZs20qu5/9Wtf6cZx5hf9enX7pZvgG1N7fP/hrOXFQavnq3c+OO97mM4m07rRcZ0hMtBicXB2en4vyyc++bGXXnkhffjB6XkPSiJwvulYte8zOVVS51sfJi9eP/TdRtPoptPE3G3XopZT2qwu1hePLh4/2K3Ot6tVf7kbo3VdGqJalphUEURNDLJZkYGRkAjJ2+c+87Jlq2ufIwqgd0wemkmIcWTHgWrHbhz74F0Irqn8phuJnOaxcgQmjlgJiV2oapUIWYi4WOrqyrvggJxloRrAkIGyZCoqtGYG8o7JURqkahulNQCDmnPOsQOVo8VyOmtIeTJtV5crBBx6F8LMoR+Z2bsUR4feV7VnZ4BRh+B8EglNnVWAVCRRjGmrDI0YbLc9o9bzelw//Yl//F/88T/4B3X10lvfeqecgvm0ZrBf/Jdf+tjv+pF7D8/+3T//7339F/7hvXffe+WNN1776JvrGAXrz3/+4+u3v/LKD3z013/z6wG/HfLDKws+3yVHmAkRjACd5zzEUFXapyff+ODeSMN68cOvf1w2Z85T1v0+gIzJEARCqEyMDNoqMMHsMDhA4GA5E3OYM85rAJnwhNi+53t/wHIki6Hm67eOLItDqt3Rj37/v8cR9qPPdAJGQNguaoXRTOJqleyiv3zSTm9wXVvxIaToHIAIs9MRjQSw3EioPM5NTRUMSTRjEjRAI1Q0VYtxgtqki7x5Eob7ta97gJCH2t2o/fxyfS7CA1d9HP24mbIEM0ec8mgIzjkUlc2W+swxelCzOAydB8iyCt356b/+IsNsyBk4QPCjybxu8rjDqh1JIGegEdEbVwqsooPGZtpMjq/C6pknQBmvvnj1a/eeCsUPvnOvqQMk3Jxt0mY4vrGM2x1orCtyYsNlP7Mq7sSZ0yi9xd0uxrGHmI8Pj9vJ9ORi5Su3nE5O77+/XCzT5YCVel+tdvno6CaqnZ88Q87Xjya6y62fhAZzEmLsdsPh4mrcCYhCFsmRqD5YLvKu2+z6h/fOdMhqtt30k3aSLQPgresv9N0aKFFAUnV+vHF09PTRPXIAplU9QxEvgqJx05+enUyXh5NpNfZdE8L52enB/CDn1K23s6MlGm0uVzeuzs83ZylG4FK86Xd9WkymB8eLJx9+YJ6R/LbrehibunXV/lmQxxxcQER0ZITBtd5Xjx7de/mVV5qq0pg369VstmAOoWovzlch+HK2fd2WR5UmceWWrClrIgLNWoVqlOgCdbtd6yagiqgGSUGABRxR7V3l1quNAogKAhByH2PtvKn2uR9SVLTprEWDpP3Bcm7ZSGiz3Y2or7zyinfj+++9e7HbPn16rimjkSfHjjKiaQJQ551pMmJHgal2UG02u+BRPTV1w4y70WZNhao59RX7N1//6Ct3Xhh2URABOAvkpKIaRUx1u93t1uvZZLqaLEM9V+MYs2PwVQEQCrNzzmXt2DyQKSoW0ENxkTuvBuyrXBCG7IEJs4CS9+0wnvvabVZrVc1jrHxwjA5LngK9q4dxrBonDpgIUxXHvnKh7M3Kq+UgW3aMJmaZnTNJSMbsNCt7S7EHJ957NFYxF1xoq5y9R28peueCIoA5osr7xbRRzSjsQF2BjVtuqso584SkKbiJZyZE56vK+5iyAYYw67oz79mTV9HATjRlyYgcfJMEFZQYDFJMfXBTz03S3tQQgRGzRAMEYsDSYS+EYAKgmnICACSsmspXvmznLCuCLdpGRcZ+cN7npHEQDjSft1lzyQWG4HOSsY/U+JgyGKBaSskHRi6lx1DmIkCMJewGZoBjhe9ePPzrf/v/8Hd/8m/83s//7h/9U/8uhTmhQQe3X3wJHF5eXh5enRDDcLrqnp3Pm0WMkRt37ZWbiTN1ZBniZYcRalelbrc5vWjrg+sv3q0PZkkJAdkhey9pzJkBXNftMlQHN5fT42VVwfnpCnPenpyNp2u4TBVzvWjS2bM16vTqATkKM5sczS4fPe2tXw3DzsHk6sHBnQNVCJMQUaIYk5vO2k3dbzImC95VaGNOqWInWuDEDpQkJzTywfsmxDikHIOj4FEkcvAG6F1IMZH3gKjZlFAJAo/53d989OPp6g/88I994S/8wKd+zy+/82sP3/3a5enF7Zs3X3rtuz736R94ob76wT//4vrLv3A9aWLX5zGroCAzmYikAUCRSyOUErOCMQSTWErBABBNUUFzJvLoKMeMaGiUFCXUeO1IotDYp243DS7GDABjTmBgmjWPwAxoUeKYE6A5BFAszVvOsSDkrE3lJ0OO737wnfXaTQ5cPe8ePAk1H754h70XGfp+55upCoF6HTMhkscoO9f4qIMmcVUrsbr/9bd+69d+0uBZb/2DZyfnu9gn7EUMcwmnIhiDcgmCMDOjgzqLcFWFykMZMpMRak6RKnL1LA49MTFaNwr4CqN6h1a5lIRDpWIiQkjsHQCFqo67HlSJMCYxRE/sHPrA4KAKDVHolLCpvUPrh4prDM5xgP0WHYrTjpkNDMhMhchoPxcWPAmaKSJD8cogIKF3pdcLVffJJwRzWBwIZcAFAiAAIzJQYzS0ku50RKAqKp3oVrRj5hBIlURyygJAjrJz9y6exNX9ELghj6LE7HxFZOSc9wFU2QU1BJDgHaqSUY5ZpCuuzF2/yyJoLopmMfTw8qtvqtUXp2ehImb007kQjTl5MocI4JTIV+R9VSQcHjD41oViDguGpISADhyLgRQCcRGEsNg8ivKGYJnICM2yATMAKJpCgSqhiTEpYiy2Hy1KhBFJVIlZJA4yppy3u/WzCyJRsD6OpcBsX5VFpGbbQZCgW3W4WqHz4MI+BibZAHxVOaacVTIYsREbWrm9EqLkLFEAwTGpZDBlxNKtWepyFBFD7actGOaY1TSlXFJXIBlLWZchgIiCqrJzJUbHxFo4vgbOewQE40q8nm3o4gIzEQdgVUzkghHXB0ebiBsTrrim7BhHSyvUuasXbmppcIwIztezro9Qeh8MLbsxK5ME9vOm3vajUfBESCXnp7D64NZiOhAiZCQVTYCYVQBBkooAKunz6M2+YqwIMOXrIF8o0WpaZCAxhT2cCA0ACMTkuaOoGPOKEoRqpRuwaGdW/GdqWp5xRRoHs4IEQnZQjC3IkhMiFsBQoUcVSBGg7X9oQfMAqikWiBDRXjwyI6Kc8r7/vCSVig1p/8gzeK7lQQlCApRIgYiIJse+/LTyizC5JCM8b5cHEzVDLUYYVDBQYXJ91z14+lTQbXfjk5PdamNXrlx7tnpS1aBpdOwPZ5MHDx8fX38Zav/k/P71o1dJsalbQS9jplD50ABoBhMGM5E0MKa2rk9OV8tZs5wymIVQvK4CpU+eSCQTufKFIWMWI0BEUhEEIXJ7LjuiaCIqPmuCfQCbRSWLmAgTgUpJ5pW/U/QVZqaC6QIzNSIHpvbcw2WqkhMxP/f+FEN3UWekIMZNjYgMIUt2QEaomorlkLmUK3uzBPuqNRLJewEQTSUbGBGKJKIKcF+eCCb2b2VFCbncbySLc84A9h15JY/F+z2AAiBTsSCJiZoye2JnhqXE7b8jFVUVk4BDn23cDrvby2vbfhAbK6Q4DArgia/Mj985uXft6nFC2QyXd6+8aKlvsTk5W1NVZRm7zWY6bbMaM4CNi5mTPD68f3+17hbzOYZWBC2LKPnZ8unpe8uDOwdHs3c392Bnm16Prr45D+00nn/qlZv/ly+99/k3Xv/yL//ij/zQj/3cf/lfvXbL/C5cW978ijy+/uKdLvaVd+WtYX/+9nfgNIw9VaQS47PV5OpxurzY3H/PV5rY4gS/9s77L33pmzfvHq4ePxWB0xM/bPXZxT3xj2/kW3defvUbX/kXb/7Qn85daqbh/L1vL+qlqkfEvOt9XRGTqbGjOBhiQKpe+tgPz2bzYfXPwuw3v/BHf99P/+QvHh1cbfNks+47HVFxGOTy4enqfOdc6ONmiu7mlcWjp4MDvjhd+TDBDOvL7XsfvjNnu3o03W42Y69oemU6JYCL8x2H+v6Ds7OTh+jc5TpJF6vaqaFIYraYchzHLALRcnFpw95zh4iGBIBmCGhAJGZiALL/3NDUB1weTPtddOTQsPEVoptCDiF456KoKXjv6sAISoSO2JsGx4oVg2PnXBWAyRCICIHrth2HYTKdxpgCExMp4pCz9+UdNAE4YJKcq9pLzIhkJdisEEIVx+iCRxUVI6SDdjoJvmkXoZ0iVAA6dDv2HoHH3dpVfoy7pq4AzUTIsfe151LACb7yltNi0tq6P66uUOvWcWN1s7k8I9Gm9ml3+qUv/fjUkav3EMc7L964eHZhA37wre9MmsVbv/JTJ+fnRy9c++Z7b7/5ymc/+vIrf+1v/c23Hm1+8M03h2pysHzx2f1LFjs8qNK8XT1dEQBIVsgJRD0ys9dqdz8534xr8eZMBZ3DDEhmap79Xvm3ApNTIqcGBAIGRA6YAYECgAkBEHlRoRY1a+sqAwVUACMSamgxrclBRSHHZIDjEL158mCAlrGuG83eHywVEjBqzqAWXEhdP/bbr73zc5//5I8QBUUxU0ICQkCjPb5S2JGklGLvncspulCjI28gak4QL4f2eH75+GRyOHfNfFyfYc7V4nbUUCFco2dnj9+iZgY6yxmoYRXRfhWYQa1yNObOeZwvjtJmY8Pmg1/4F5NQ5dpzW+UMWUkk5RTNhNkLmasQVLv10+rgZvYhs5FhsEiUXUVZIGU9f3KRNj00oEJjl2A7mlld+93FiDGmMQFZYtIuY5b50VEHbtuPaunwWnNyf6x98+rdm5XLfcrW69nl6nK7feHanWuL/MG9d3YxJ0nby1M2q5hThBpnG9lqzSqjh/To2UlV1Y4k7s7mh41wM8dD04QiqjnH3AQER50MxJTzyM4b8sV6Pa0r7wJyvYqX5MLlxcn59v7N5UtpI+xplGG33c0mzW6IAC44d3l2MZ21ktHDBCKNqZ+1Vc0GZr1FK1lr4IC83fZDjFdfPuo347vvfzit2LV1NMm9Vg30l6uz05O9VJSiCWhSzTKOI1eumTbNZgYRsozEPFvMUz+2k7Zt281qLVmns2ZzuWIEYkxi3nsFqH2d/C7upKodE3cKDbeOfMKBgRQB0Jwn773GuJjMh5QyKgE6YiKqGjcOPRig8zXh5WZDRE1TM5KJEQK70A8yadtnl8P1l+4uDt2De29fnJ9ebrd5jFkyAWoW4uAci2RyFTmfYnZAOUlVBzZ1NV0/mB0fHTYufPDuB7kbmHzfrZeT2fd+z/cfHR1L7A3BUTAcACGBqYqKpGFwBk2oP3h4uos2DLlppWmqsvZRUMFEwGBa+oIdEyIy+dSNoa1UxhCqLIOv23HMjslyMiUAzBHGOAaPhqmqg5KO24GIc47EbBH3W1BNqpZAicmEFUxETQHZJ9kwzz2GLAOSSR6Ag1o0UcJaXQiu7nJpWjAEE9t5CGS+CrOu71gdqUdUNEVVUmnqartZBYQRxlm9cEQmFHy1GqOvqlA5cp4RsuaWGgIMvkopDWmYTpbduCKo6hAAyBRMwVAZNSpVLghEU6jD3EwRSxGMgCFb6fD1DARmmtVQM4yEjslnzeRYssZuyOXhRZhMNv0waSsyFJDKN5BSqCmp7HbiPaPCGPNkWpce6elshggxjp48M6mZxgT7FRrkHMHIzOrGaRbHCGSjIUyrd7r+7Bd//p9++WuffeWTf/j3//7XXnsjVBzZpkcTDnz+7LS7WC+vXWlqH7WbHFQxR41ZUkSdOJuYbIZhs92snKuuvvSR9upcEAM3loUImJkBqmAqOo7SNJOrL9xwh+1muzl6Za5pqK8uN4+eupp128uA4ybamGXb7VYr8E4Zh8AvvPbpeC2g96GoAmYZHRIEtNksVHPu22C5UnAVO02R0RGgEiYT4+C4VhUVZa4AMqTsKbBjyYK5xL1ZcmYwh2jsVIGcTymxgZdOf+O3Tp9s+O7HX/vE529+7E/K5/4Hk/a4QqkE1m/95v1f+kn7zgcHTgTztt9hTZVvk3RYCCdMBkDMkBNmIUQHyKYKaEJSWrUcA2QD1JwdB49ADlTBAP3NF/LLt6ps6998y42D7He/YGre1WQA7IY4omYzQ24yaCkyZ8QhRQCXisvFRJOKwrOL3fFkOWh/nrrDa4e9R0YPQLPJUpLBqHmU1I2Owm7YVG3t22qzGV1oLs+6J2/91ju//sXUPxN38d7DB/dWq4FZSES0oCd8wxTIB3QMBhY8mUmpMnI1WBCAFELDiUcRajw4jSAcwmjQqWVwjMw2ICo4EjEENLRSWUYE4EISQUdklHP2zEMcwGkILZoyVeBrdaFirnwVZBi6rmzdhy4+dxXtUbrF24CICiUjZYq2x6MgGrABKqLSfhJxAEjCjFnMCAT20AwEsH0ODRiRy2iMe+sEWWIgU00Sd2kYjA2oQmBRA2EsI7PGbA/PHjxdPSzN42wKht75KlSMVM8WyHUas6qUPiB2jgBMUbISGgIakVEpNlJgdZXLcRgl1QfXZ9PGDEjNfBUlZZfROUIiK2I0CZjE5Mkm02ntsAoe0XmuBMghF9SHAjoGTQkkSakaUrWsKgoqOQ+7cRDNeUxxzElUTAwUk3hDAgMQ7xVMFTSmnHI2SXkcCc0T51FBAbMCoGpSEyNLagx7D5ciRtUCHNecAQkdG7thzERoosUOY2Za+CmACqAgTKULHMDEcSHrGYKyAQJlLc1tYCoCoHULw8jeaaYyBoMBqKEBIaphMeJoCS5lI0QCSkqmgIQGlNWIjByR0cQhQfZUScFmgQVg0SrvEEOdakgOzEEgYy9A0Eu8yOMigGRggAxqTCXz0zA7dMl0G7OoVqyBfR9HVcw5u+DJ0LpudnAl025fBUeoyAAEhdaUwRTY8PkpsGLXKlwYsJIzpD3Ge891RpPSbk975cX2cTvAgisuh8AQilvfFWoGWbJ95/3eCFSOkBWAjRoU1k1RHlUIUawUnBUPEaChFc+SFahy0XFxHw4AQEA1VZF9/AzQbB+A0j2iXveYoj0EW8GAiIBARRCd4/L7ChOLCIKYKOz/n/8mvyYiSA7MqKhgYs8enbKrdtthzHK+3hm2HHyFVDUuQjLJp2fPah+2F6dPd+dHV65tNn0SjTJwCJaQkVVT1uzYp0HZVSoSAhPAsJUH987nr98GlpxGh2R7E54ZGiEVPpSqMHlHFZqaqHe+2MGK4kUIQIUesnf0MJfWQlQVImAiLRE9BdPM5LC0QpYwKYCJqgq6wgAnUCEkKW1kxASoKSIRKqihqCIzERkYoREgED2X5MrZQizzu4EWybUE4NAzVyr5+f2WQBMYEToERHSgAr/dJmmEJs9R5qXLzWnJqbGz4kB0oSTaRHR/makWhBoViREds8tx38z7b6SidjZpm3kTvOzWL1x9+eTpg4thfbhYXq6Ho/msqqputT3bPgWyzXbna779kZvbbnf/7OHN+fV1319ZLqP2V64u+90Yd7Fx6ilP59VXvvaNSTi6cu2qqaUxj9uhDb49mn7l619JuXNV9fDhk4qIfb0b6Y/9kR/bnN4/PPTJhjc+/5o2E5svd5NpOD5+/fu+5+f/i79z8Op8eePo4emzq9cOrMQqc0YmE0FmFc1xqOdTB7eiwZhyFTWN8v7b3/z093/h7/6NvzFc+u/67GdvfeRji8O5+nkv7I4Onnzrt75z/sR25we3rz48Wd/92Pf0F5dUB/D07MnDbc93XUgiWSJzYylLBGqrFJOrQpaEpMO4efGjrz86/62PfOwzP/czv3TtePHhg9PLs3P2bjap+qFno2k7q+pATbUZ1+tui857V80O/KYfwATEnzw9Hxt++e7th3YySmwnIWmOfepHfPeD84ePzlJGx4CGDsF1oFk9UuUKXI4AubxYSSFBFBIdKJRWxdIcomAAYlYMhyCKCDUyEXn2FVeh9V1KisjgOQQQNYbYD4zeO8xp8FWYLK3LPQUXMqBg3bTEHsGMhBR9aBTRcQUYmqbNsdOcnA/B0DQBeeBcZF5A8ORdHVLOCIaSpz7snJd+8EQ5psp779zR4fF82eTsfD31u7Gatqcnp/WkkZQQuambvtulbD64FCM5l0EzACMhACOKIbE735wY8BZnG1QlU1p1fVcFBs7np2fnEBZXrwB8GwA+vPfk+q1rpoOZGPSPnjxyVBvVFwP+5C/84t0XPtIeXvncZ3742aN73/7WQz+98pWvPvmu3/HSxTd+Y3b1Y5fmNSU2UlUERAp9nyZhEkc4unb8+Gx3vtnMnJMsRE6k5JO15GaLTZSMTPehfkBERUAUhb2sS1Qyt1lVs7ErGDlFdkiSVdG7sR8UCMgAlJBVzaS0JNLe/+sQmMETI4GoIfjQVFS98cr3uiyJpECsVaWYZhUMVIFhr2Y55uANDUERbIzR3EF4+cVxtRlrVx+JpPM63/PDlDjvKFBys93Js2/8ymQSdBzMTw0dc+XTOsO6ct6l85gG9g16C1V1eb4zrq/cufv04ePQLPtodaioqgmS5dE5tx3HQERkrsbN4yfV/CqCl0GbZgm2xmzDZmTPRmyCq4stoTs4mK/7CzM7vnJ8enqaskApohTbbcf5bLpNQzy/BGfc+HpeY+3Ijy6323Hy+HJ9dK1eHk5P0nDt2gtJdg+ePDk5P1lcOW54GkI1ny37AYLDJxcbpKmpn4FDEAfsmSI3zXQhhOt4WTFqgn7dUZgf3p4ESs8+/PDwxvGtW7fvP3jgkLIKo+acx6zmUrKxX42T4+rWC3d4V3XdJbEOQ79YTJqKHz98XIXQNG3Okai6vFyrcaimEWyM2yFmpjBdLMesVWjqNmTTtubkLq7eSefr6u6b3/Xs7fcvz55cu9LOahGCDagj/3xIFiQiBiBo2/rxvWfHV+Z1U2dIrqLtxXbZLDf9xgymB8txTHkcch5UUSQbACBxFVBykgjI9aQxlWRaNT6OqetHUyTCmBIhVpXPpoQUx8jMpuq978bBCJBZQIFBLMVxLMswBHPIu7idTMK42UKCvpPZYtbUtNudf3DvXXM551hXPgs6QnU+qwzjjsQ84djFMYp4zWN/NLl+MFuEcHz9YNlOw5NHj7vL87jrHbt50/7+3/tDy9lB7KMBIrBqzllFNKU8dL2qenYPnt5/79mj984uUavv+57POYKqophG1lLKwd41OXaeG1U1My05BsJkOY7rEKph7L2fqGQhc45i7EnZgFSygaSURdN66JArBQTy3lcpZwAyNec8s3OayWjMnffljmCAWruJRCJXCk/FOQ8AhISMcdyy5yRClACcpMjMVWjNSMaoQ+8JkljUGHPfcONCaNqpSzptJz6BBecoMAEyEGHlHQKGqg6V92aemMhlEaVoTElGgAoR+Xm3CrMTTI5dloiuJl+JKCGqZGIty1zvfE6ZyAU/6cYtlqWrKqAjcqaWrET6mcl8CECYU1qtNznl4B2SSyk1dZtTUs3BOxBWshDCOIzGGFNmRID/P1d/FnNplp1nYmvYe3/Dmf4h5sjIrMqsuYoszlKRJluU1BJIWjattixPggU3IBsNdNvtpoEGDLt9ZcAwGjDgvhMaVtsGaHcbHnqQLZmmJJpksYpFVrGysjKrco6M8R/P9E1777WWL/b5k5LiKhAI/JF5zjfs9a73fV4GQ8fO15xjJufU1MDwph8ZGZkoi6DnA+kSCRCVSQH3hAONv//8zz7+vz/5wunrf+3Xf/3hZ798cn85Xu5ggCbMsQ4vN5fL07Y68tO+z1nAVYGXaX91uT6bxivwdvzoQXP/SJkJGcmXNpspJkQGsb7bu9Yf3X/Ay1oFajcPrp4mX1e1Huvi1pEBbdd9XA/58orHPJu5Pury9QfNCaVTpqMFi0k0BueIpqzIzlCnUabJsgKEhptWRicZQqCUIzswwwwy5YFVwHQauooIJSOSmfM+5EkIGJBFkmlGDKgJiJXQscuqJKll4mcfwJNP1n/8h9PxkSxv7cMxA+b1Fa+f17GbNWEzdFGjOVDVbCOREpkICSIoAjKyHXQJExUxMHJelY3ITLJkRGTHGURsrIqbP1vl+CJ1Hup6MWfZT1eXdVUpaajrfrdz5CBliFlNCIAJHbjAwfKEKmRGJsR+VIMsiuSYUjfK0+fNneP1uFsTwJhT2n3m6NgS9Ju9XKcwW4RVAxVaL2IudZBHvzvbfvT222//we+21bSLF0/On358ubsyjIGjJjNkYgL2zifKvqqNAA3ZNzknZApNDQxY1QAwJHEMhMzEwfsoKQsMRiJOBGmKlMEAJWcDkpK8N2NwSVKiVM8aEUlTIoOcxLOr6krRas+KlNQcs0eqYsJhoFEEjZs6+IOrSBVUQBlEbxgr5LQwc7CEU7RsaRHItNzm6ByRM8toaoSqSAQHuq8gICghkpGpsWHZ1YtC1iKrmBpm1YQqhogYQLBQ4QzRUFXEci4NuewYgBUIGVC9gXd1UR8kF7CIoIGIcVuLqDi0gg1iEAUVtajMRjkj5adPPnh9eW8as2di4jyMROiRNakBJAUAdIAghqUnS9JuknHyBh5sVECQnJKoWc6Gw06HNcpkOSmAJE2jqGmepgiwTXE/Dg6QyREzknjCGqEhNhVkAMgxpWQmQCJAYFQwvECuwHUlI0BWUQKlQuU3yRkIBbETA3I6TpQ1BA9ZhLKoaZY0Tg7IIROB5EQIIknLMc8UAccxiZRdMkrORKV8kDNYsauYZD+v6vt3DJO3wNoQISGpSFHhkaB8j4RFPirXJJQIDxChGREwe0BVpOy8BCQYDYOhAopjSGmovCe09bBPpwsi6zUhVCigqKPhhqiuqtO2ln4Xhz2RY7Scs6qZauUc+jaa9TGVgzWQMHtCJIUc87SfZEHJpEA8DEFAAdAkG5SI5J+j6w7BZDsExMrLAg4reZJS1FBwQlayeCUrRAYH7hcWVs6nQUREKNwZsqxGcPC54I2Lw8wQbhDL+CnvHMTUitxqN0oRHP57SwDKbnA1BzcSkqipoUl2zsEBjFR+/oHWrVrSgWpmRRRFKKwfyJqZHaipZEAjZCS0w90MBiogAIR48AuaoYkYZCLMMZ1dnk3IXTe+vBwS+OM7d4axU40uQXAhptEQmGAc99MoARabOI5xbB06rKr50kwRkI0dOGYTSFfXV6tFrRaCr59f9HdeodscAyQGj+zMQEuvhymzA0KioKrElHPCQyEaGqBIEQIVAJn5QHEkNLTijyNm1SxW2E5sCEheQVGz45ByUhECMDRyzkAAkJAMNKfMTALlKMwKgoQH4Za0kOMtRyA+DIGgzB4AkEL5WlVFNB9Qcs5JKd0TMRPProQFS3VauTYRQEEOjrai6QMhQvEuqUpBrRM7xEI1x4PVDZCIi7sKicWkyFuiyQBEUrkY/gWpCEE368sNUbb05On7DHb33q2s5pWBgggr++WsWvDi4mILYlfdTkXvnNwZxiEOm+sNtT7surEOlQ6yWi41Xv7ON//Za7cfzf1xRJimCZI2ITjPf/bD722nrSMa+mGK3d27pxPPnz57rLKfLdzydB7lcsrTo9nJ5fOLyxdP3FIuP/phmAUaXcjtUWhM88HLiCgqXLEkQ7P+6iWmCVn223OBxdMPX8ZesDn5h7/7u0LNT//y1//Zm+/8T//2f2PcTrrfJRACWVX+9dce/W/+d/9Be2ulmep2/uFb333tp34O1J78+PH6Yvtzv/YbJtFSz3yc+yE4b6btcpm7F9vzq9nJA/a4nfLXf/bXqvrRr//Nv3fvpDr6+KN3/5N/8ODOyScf9qba+Co7XZ4srs/OrtbrZeV/9muf/+jZU/bVfoizGYOoIuy77ts/+GE/TMiuE9mtN1XT9hq73Z6IkVQBTFTAJjAGMASJRiVJVMCCZlpC0eWhVrZCgGpGJWkJdnOVICAAoaAdnS53MFZc11Wl45gFa0eLpunGMVjJi3IgCE1DjpDt5Gg2a+t+SIYQQhC1pvJVPY9RPXsF61LX1C1SACVfkyAoJE+mQuCAWTdxYiOHQXL0FHxwJhMzMqGIqiEwGzEwh6ZerI5jshDm3vusNl+s2nmVc7w8cyE4ZHIIvvIqyFRl3TJWjqipPZIX5ESe29m7H7/zr/4rf+ufvvl7my7FNDpWETFIHLxD3u0PDWiLeXXn4Wzv5ckn7/7t/+5v/vi7+Zvffnb8xoNw6/jZ4+/9x/+v/2Mcr37mp/+1333+g/pkZjYtFuFk2X/hC0cquAnzfew8kykwARAniVvt3vj8G/vd5uHDE4+oSdGRSvbEAMXEbUXTJQYkVpFi/ANQUWPEss0GAwU1AQQkRxBcmSskZx0yVs5M0ziopma2GPqdIySbE5laIiQG1JzKo1ySEjlRYeQYM5FSwHB6LAeVGgsSomwMCTMyIoNlUDRiBNJQuRxTVYeewnZ2i5fHuarAICwW8JLz9kUnl0LkFrVtz+n59xbWEh4Ln/Bsnsao/WhXT/XqrLn36jT1OUtScjiP6u4+euPZy/fr41yvoQmmw5DG0mJLGiVww2hoHONIbMN+s9qdh6NHWrnzTT5u5+3s1vpqX82aW7PqD//4T4/vHG+ga4+b3eYydzFJT567fufIOYK2brLpdprMefbU9f0s+HGfLz54MaP20WcfcdWEPE377mwcbBnufvbWiw+eTik3s8VunHyofbPAUDuk23dvx+FymmDstmdPX+RpqI+qqAkQpnGgBKlPswWOJie3b3VDHmPf1HRyfJoHev/9Z0zs6lpMZifzftdlkXk1Z2iOTmbffve73/jS19e7a3TY9WNOyc2dRBuz3bpza3Gy2Fxvx2Eau8QE/dSbWRMaZJxSRgNSMM2byx5aeLl+8pO/+Plw1KT91W/91r95e07/49/6NzYfPa9dcIsQPb5y/85b//S9suGYpkhEoIoKkDJkJbPzy/NXHz2aLWzfdad3Ti5fXnrftXXoLWflYv0d+4FDMGZk59EyA5rbD71zJKTmhRFM0HlkQRPOSSNEzxVaYlP2bhozUkZzEsVUVckMUxZX1WITG5qZolV1TaHyFq623b0vftZXPHTDdtslGaZxxNoREREDmqW8WMw1a56GB6f37t9+dTGv750cjcMoaara1jv59jf/6MMPHmuS7Xr7+S99+a//jV8/OrkV+y1gwcRmRQSwlEfVpJrWm/3HT5998N67+5SWd27PqMY8qbhh7Al9HHrPbMARMsSMntm7cdpTVRX4tPcNGVtSyeyoZRCd9r5hFXHBcWQKFbNjxqpwfQ3juJ3N2+I0BkYTMSrV4FaHKuEEQEYKiAaac3LsU+6qEETUlMEw5569Y8bC0wAIRISkCSKDgHGAOVIDlEF6Imp8y0aAuaobxzZvIuGESkxeVUNwU8oIEKqQzQBRDB17IBITV1ekyiAAnXdY2p3VsigTeYczxQnVctx6T6ZmMCI2BmKSjIPnIDlPeTRUswzk0AzI6LD9ykAhpWwI7P1kEdwNlYUgSwJkR01d0Xp3VXlfV24YBpVcVX61akPws7bxrnbEmjMxAwIzO/K57/HmBHZ09xgcz8XGPiIEzZZVHVEb/DRmhYm90pH7eFrHi3c+/D89fe309a997svHzZ2v/9zPcsA0wcnpqap0U/aNr6hRoXh1fv7xj0ESIbm2nd25y00FRs5Vms35kFLEymsyURm7bnVyUq/qSXPjZ4pOU3YGwL6+V0ur60luf2Gu2279/bcu33s8v3e0enBvT9P81VNp5hCR2PnGMzoCCDUrmoH6qoZZa+2IfqnOueDRMxJDRkceLEJUAGuC68bICAganCfCnBL4UFg1xCaC7L1IYjDHQQgSKNVhkCyj1MbOiCH6l9fh/JOMlQpUSI6SwbQZdETA1osWCmtCEBQlYFEEwJisdo5QzcTAjAhB0QTZqWS1xISSTDHHwKsHdzX2tt4HwO7s7NYXHkzPe9fFNOwdqANNMRlZG0I2T3XwzoFMOE5pSoxWSLAK0NbVXrMj8qgGBEzsSbONu52r5OytN5d3lief/eKte6/Sftpf7pNYt5/uvH4KjjNBRCKq8+Qvn1+8/Ud/dPHJW46Gi+3Zn33y46miK6EOLE7ZBFGJuYTVuVq0OTCzQ2ADj5SVQUNtLELeMToPMkykYhbj2KMn7wB6oCkPg1QYxJA5oEsGmYCQTEwnFXAInpAJK8eGlnWIua2dILm6dVWTg/c+BOdslP3lPm+2fbf3rWvBWRo+nZBVQQSIDB2JWbYDQRcQjIA9EAIdAONIYECkTGSK6FDVIWRAUy1N1MWpbH9uukBQUDQmnlCxFIGXRBuh85SQGBRMAS3njOiMLJskzWrZSYpqDp0jRlFP4AVAWmExNDAFhawaHBmCopFDUylEcCIgRkRHCKZaea9x6C/POCzTlAWw8oGMsio7D+BVpFgIuv4qpUHSmGTyPoiq91XOpQLJsphaGnfbePlErl7g2NMUc1YwZ2qKKIA9uedjHkVrYk0qKTtSH3DpmNTqUKMjBgMGCpUCF2aEaib2leNSX2QpM9kkKSOMerC7iIqAGmg2tGysqmMmK0EiSymBYU4ZTFG0rF4MilEIiEhKjRcgIB+G20OjkynhYRoGkJwqXGia8r5vq3beLswwgyHwwQRRaDhEN3aTYm0pzW4GXJBMBmCATkQRlNrKN0AJMnHE5DjWgWTsVXxdzyIAInrzXoJDVBsy2gB0SQZGDmoEdUTEIghgQghYDHqiBjwBzppaZSzh7OCZoiWYgDNYJkUSAmAALe1TigpsnwJ94aCClBz0YTi/aV3Hsl69+Yuf9mhxQXyqAZioCYKVEB8hi4ha+tT0QYWSDUhIIpmYD1VydvPJAago0o2XxQCA7c/b3w9uJjTVYgoDIGQEBHNUquhAkRwAmipo+SfpJg9ljCBmB4iSWZEaCgTOERevEpYAV2moZMrFoIdIhe9TYEUGqgrlN6DXm/NBU59TMr1Y7x0vK+KM6uqQMiCbYAISdn7W1qkbmyps96LCom4cBt/MNEqKlmNm9KEKVRU2vI5Ro+7roMG17733JHzx4ckCU44o4lyFSIdwnylkdcxIXBp34GCyKvVxyuwOj6FDNTwiOTU9fIBECKVZzIlkQsZDh2EqDPiDSccECUXAJCO54uFBICIC05ylkPsNlNknyVjMP4e/yYRIqAcZ0UQREBgRmMs/J4fImCkTHmR5IFA1PSC0AJSYDn9+mP0NC7FJcxGWFUqMNBMzEYpokTWJSs/aQRJVyaXx0EpDnyo7/pdZRWmKaGiqR/O5iYAZI7rAahRTTCRx2h8vT84uz69fPvvsZz/PLjx7/Ik5UNPl8cJXjszU5PR0MaP5kXM/fu9Hr935/Gx2MvZ9TNPQjczGDt77+Mm234dZaII/f7EJ8/Zql/axO10db8+eXK4/u3pw98Xv/cj2/vpJ9+BLn3nzu3/8V/7Sf+k7/+9/9KUvfeHuo69+5x/8X+NlVy8XAgCEmkRVyXkznTbn508+euW1By+efvLs8SfLhw+a5fHu4vytt96dKt9v6d/+X/6vfvjkiYjKZIe2OLPdOD3+wXu/851v/Tf/5t/M1303pmfnL98IPo6jX62a3EDOhOJnMzXhuinRBzQCTZfP/pj4J+cnX8R2OW/raHjv0evBufufu/3z/8rzt//092ZtW985+v73/tixp+t8q/b3vvSFd99778Wz2ayt1ld9AAiuyja0bdVt0zQkddR3nTM3W7SXl/3L59fjkByDmSBxUsMiXRdfhxGhKVASYzpkasuNWl7IRRIqwvNBbSwvh/IwMQjk0jC0VRWqFo2qmlpw47j13lcCWdSpzedtjClb8p4QrQotcV23NRnWTQUG07Bum6XKAKCABMCg6DwlgipUGVSVPKoRSmTNNgs1q6keimycD0wxxrEOjhmdc+aInHPsMZFDF5qqXZ7sthtX1R9/QPO67TpZLZccsIjHCqak2UaHzOzJzAEjETEh6u37rzx+9u7/7T/93wqoUpBuGlKsgwuE9XwWKOyGsXwiu31693vPXRwe3j7+43/8z370wRrCvbP1sHn8wWcenFiWOc/+/n/w94d+4rr+1/+H/53/z3/+H/343Ze37h9dfGRv3P/aH3z7mw/vNGOWQDamJKCbabfvrwxo1+3i6n4gFjUAS5oZHBGLJANz3okaCjIRI6M5xFzeUIfcMxATqgoSZMlImHMWFeQADsUMBUickbveRbSUZDTguqoVMiIBUhx2JlW9WCJl1dJxq+XfU9MCtbt5IRkiqSoqFvpAWRUikhgWvl2oaw+Qo2A172Piqt13/ZLrCutpvzs6euTqIM61YW3rjyqYNctXr+t5F1MgnAHr9UYGTAPQ3AfHyYRCY1lUNPdDd37NEbYvL45nq3Hajg7mJ8f7q8uZMaEKexNjF+p20XdXi/aEXRCipIzArp5Xzl29eNz3ne9CqOnpu49B06JuUtcHdicnq2GaYNRtjNwwA0mWZRWqyoHHYb3xiKvVraPbd0aPx7dnl+89bvyyna+Gy6thfzH0+8qFmvnOvVfXLzZpc9nUixe799smX19dzZoK0hhTbHnhPE44jd3WeRccX51fzNqmnnnn/dIB5glXTcx4te1BcLvbzY5X6+2WFZsQLCZUgETf+NqvjOvLrutzSqujo360NMZ+PwRPR4tmu12jx3GK3GJFIUkixiS5ClWgRqcMkKpZgDH62/zGG/f7qbsXmllr/97/7G/9vb/9d977sx999sG9r371c//0O2/60I7DoRpWstR1JVkAkD0jm6jOl/Orx9d9N9RViFPcbnb1vAFCSVJXzbbbSkzes6scs1MRSZOQjtNoZiF4RpwmWc5Xm+0VAanmKlRTKtHuUIWw6/bBt+PUiQEiuMAiOfgKHOc0TVMKBkwU4+RBHPthTMOUdLSofn8VR7l89tGHqe93+x36ECfrh+iJFrOZ2RiH0bGXQV+//fCzD+8OsatI0dmkBFm/9Uff+v53v5uGGMw5v/ylX/mrJ7dO4zCaIjmnKZtBTHEc+6nruji+PLt898Mn19ttu5zfPZqHth3WyUTJuZytab1jx0yGJJaD56yZlZ0PAGSl+8fEh0ZSZqaUJ4OMzISsCmgMJpaUEDSNgDL3zfVuf7Q86qa+ZlNJ7L0RICBjjTLlmC0LsGSJxEDEJgYKwdWaY5REklUV2ZkSWdFHEGRKZs55xECQc3ZN1azjM/aCrCJG5FQyIIQQhjw5F3xQmXLlq3HsAwfnqmxj67iuKgeMzqUUmVzlvIg4RALwVGUFA/GuMvRjQu84ymCaGX3OwhSQFBCMjRTYB0PKaco5eo8AGNjnnKzMrZoNmJkNlZhUFUT3m85AmzpoHgxx0bTTmFTGqBaIQLKoWMpEPGurWydHMaZZ2wJwGiY1C0wAlnMulvRP7dbtshK0hsPGNEdJosGhIzbQUDtAUoIpmZDlGSeO1/r0D3/wTLb8J9/7o0fHn3n19de/+Itf0mWl7Qy9syjSD/vzM5ejgfVxevTZL9KiIXagZc+sWVXNHFbTsBv7fnXrzmxeZxMGb8qaIpkxYZjVQxrBVzVxMLh+/Lx7elFxE05Pw2fvzWfoW28+oPMKWLCjqoZU3MSAiBx8lonmhb2eiFEAwEhTJlNKiZwbYwIA75nYjUN0AExgmg3MjPKUCR0aMjvLkULIhoouKwGIMiYFAXNoloURCUfvOJlMqOBU0ZIalZBKlprAkhKQc/UkqeQ00VBSNFD2iEQ5Z2YDSaXGBZCJkAw7oLsPXpOrJ4vKy1WkeiEbqwywqcjfy7sdqoTa8tgRM1MdGQnMs1lOjXE2TUwpkUKpqgbJKWqum2BMqbh9vVPF4eLq6be/Tft+bjBWq34z+ePZ4uGclpQgxskA3fp8+OB771w/ff7s8Y9T7nfp+u1PHnfkh0k7RBeQFSUbEQORMfm6Me8hODEEQVLUqK5yMiVaBK28MeYpgQiJBCY21hHArBpNoszNiakRqZlRmVIVrNjJrQ5BGTFGFCXGmLRidoZk0ni2lMk7r4Zdb/s4vrga9/0wDnWucsxSV4ch+WBTAFVDMUPULKZ0s58stgRgplKhjURaHBGlRdXMCEroRUyZDhb3UpZVmL2uxDgkIWoCYABmBkA54DxymWPRmJGLuUIkZ1UwyIAGaMjw6ThIHsyreSVSUnJEYuSDkas4qCo5UFBCTlkMgciBCBIYUhS4vrx0R55cyGaqUcwAGWJW7QDA0gCSRKKCKhBwk4EVAIyRGQBTlqSSVNf9zvZrWF9wVoxZxJyimWU1cbzJ+HgzZDOWMueTR2sCb0UcEgDnUnSFgJ9CaBCMoNQsMSABelNQyabKWG5LRjRRooKqQ1BjAC4feuHfcoG6WbGwlwQTEKodYm/If462ATIgOJhXFJCMi4xkRWhAV9MwdVlALQLU5Y+JSXMGNAIAZDOlYmglBTA1QeRD1omgeEYIjJjBN8bIk1ZIznmF6Aydd30xq6oomCdGUUmGrIyKHAbDS8WAduqbNPYeBcmquhqHKUti9s7imHgQlhGADRE0Kxs4F6axTwqZMhKDqpXaBBAwOWC0PsVaA5SudCryyiFlZggKWHg1SIQq+SCmIRW+XgnAlqkbAA3RVNVy4Q3pIcJkhE7tIIsTU+EQmymTF8kAWFxBBVBtqoSsJQBWVA08YI/MtGDQi1hV9EEztOKRkQxgzKTFe2xagEd6iB2oqCGqAZqV2vYi71GRoqSUeSEAHcofSsCrALgLRgixSEiCaHHcP3788ZRtv4nbTXZuVs9WjoAqJFcPnU4pTxGrikGBUYIITsmh49av90OjNe+2TagscahqdKwi+25wyM0sTOOoIFDDPsYff/D8p37iDZ2uK5cM2cwB3og+pmqkOQKic6yqxdaFxFgubjQTENUSQSsRDyJChMIGIuZCLzIzYkI1RLIDgAjVxEDJiLC0tR5GcANT1VJzhjcAJANjdoVlzsRWdD04WJ2KW63M5khYrECIZIUzjYCECiCSkQiJ4MbVdsCQq97Q0csDtmTluMjzxT1kJiLZrHjekRABpNg9y5WAxGpGVMj85fWs/zKriKuqrts4DBVxBkoGaDTuogHU8/lqeVQdnb73/g+atr5393437MSEGzYCSaBRhKRdzEKzqKsQpuHF2eXR0XEXMaMYK0Wdz6tNtz5/eXGxuRTUsZ9kwFDTMGSUVIP86q/+zNXHP5Jf/hVAt325u3/7tXhxdeuV+/cGmzd1v55mv3AaTzLV2c3nVuI4BADATGbKmK+evNP42X49rM+GFx+s6+NHt08Xf/r4D/fjzofj3/vOt9755HmcKKuoGkFpJwRFAAf/83/33/urf+GXNx8/z3H84Ttv/eJf+xu+4sXq5MHD1dW775188fU4CCoTUaGUMZExT/2OJeswLo+WhgqA3gcArOvZX/mr/9rFR29f5d31fnt0cjuNqQ4k47Trp8bPzy52r7768Giu52dX3X5SoSw4duPUjaGhR/dnL86u+4kef7Jeb4TZgUk5BhQDZFJMZseziszGOB369wDAgMpzrSj7RSk0u1njQLEUHaRVK0wNO1odxX4/bxdxSgDZhcBuVtUNcTUMIzuqQkD2IDE0TV37+/dX283FLFCKtpjPh2Hw8+O2Xk4jEpmrnSYJ3lVtjWao6AzmbZtlqCiYJem6xjtUGUdzXHqWEIhzHKu6ZuZQNV23c75Ss9Pbdz27+XzVNjMZYxZcLI8Ws7lj320HcDmQYx8ENZJ650hrAzJLhoktVL4aY1ydnlhvUxyJbeq2Bli5SiRPAENK5CCnqdwFy9ls2A5V8N2uOzlpjubzVbV4eZHipLSws8trVQD2wkDj8K3f/13H/u03n/7KvV/5vW/+w7/1Gz+5+qW//OYH3/EOAMUxO8NQHVcGWOuLzQt69ScAIlouep2aGpRzlRh6zYaIXEyDouSQCQlQNCMqApXDIJEnYzNAE4iJWjKvqFBziBiis0DZU527TiZhckhewQCIhMkcSoYggAxWno9FfgcCKa5S1HKxKCEiEVhxDIBkkRRNtA4zUHFG6DxX7KlKOXlPrZO2Fa3X++HFZ45+5nIk8w5TikO/nK22SWNTg00kCtsXaXMJwnGP/pQAclVzJkGUxWKZsl5enTe8mtWLMSVX+Uajm9bD1Y+OT/zkg82amEUkuhC2ly/q+QrbVTurDWM3XFONZ+uzL33+lT/90Q+dC4Zyujz+6IMni5OF5CzOktPbD4/Pn1zMFu39R/erKT08vfX+u5/stntJZhHjzr3y5c93uxEXs+XxSd+eT0N8uDw5e/7uuIsU3GazaedHOXPT1NTtRIdpvwsajtr5+vplPTvy1SwNMG22zPXtu6eb6wtHOU15Pw55mnheBQ45KjCIFwzUuDqnbJlIbRoGN8fgneaskLWT3W7dzBseQ7tY9ONuNV9qVL9q2iqI5Lpt13KdMwEoMQ7jfjmbh9BMU+pjDCEuZs3VMNXgtzlfPd+CjItXTj9+v//3/9e//crpZ9YX208+vgi+2a33q1l1s0lWkWwGxCSqkuM4DrPF/NGrn3ny/uPPf/n1pm1TSv1275Y+Z8OUNGkINZE6B3Wz7LstglW+thokiaH13T7G3ONAQGIyxqmt5mqTI09E4zQ6H/p+REZURAimqJo8OXY4RGPwDsKUYlNVatmzAxnngROCWX7x8Z/maaizHC3aRaU+hJhonXbMHlIuD20Cni+Oxl3fd32YNZJBMfjKX7988eLjT3IXAyGI/Vf/7t9+9Ytv6LgBM8ch53gYScmy2b6fHl++eOf9D7EPn3/187M5mSX0RDNfzRpBLJT+nKPzDRohUM4Te4flgE089rtAztJAgYZxBA5IggaE3A89AY99RAsppyzAXBGlnDOIdsNk7AyZ2Zdzakm0MbEqITpClOIIh6SmJimQAxISIUeYTY1zzpYmDIGcE4lMThXYm2ZAs5g36LIgIFWEgKU3BDyKErrZfJl7IzIkmodZKLnvxqGNNTJRAleZee9mhOpQc85ANOWCaiJRJQYmJmQ0c+yICJmRgJjFXIyRQBhQCIG8d7WJIKgSEJGKFmu1aCJ2Zd4QFUlZBQk5TlHFyGEb3NgNk+SmrkMI4zhVTQhaWVHeuebK5QgIUFd1FiUk4HJ+g4JkKHfBZn2FWF2PO41aYr/zxSw45xyFilWBvRssLWbVANEChIWZ0uqzq4v1bv38zbefvCN4+ZO/9Asxhar1u6dX14+f591W+5g93nnjEc9qFSTHQARGxCp5cHVtE4gJAM2ObhlEmQYzVkngMoembqpoCZKr2nnYjut33rn4wbs0op+vTr72+nll9bI1FVUIxOXwBwomJRJJYipStuVpyoOvg0ium1lMFSVhiylNiCxgzjkTU9EsOQSvKTnvTBUILI1MNRCbZtXSQY8VEogEZjALzo2aCCnq6EMlmLKhJxKAUZLXm8P2lIPzgOYMkbyKSB7BjNAQFcnhYToCVSHnSts4EZf3pIhKkoGahExiy7a+vuqCZ6MgNvjjlfe3ZNdj0bc2l5yi5RTHHsZolNHAE5pqIjbnRMAAslEGJB8ykgPKWbNqXbtuzBiqs0/O++33csZ7r31udnKfT+d0PJ8cukz+egCQt77zgx//0Vsko7jh7Rfvnw/XaxVt2j5NmW1WAysyq/Ouz1I33rU8BqA6AHAejEWJQ06CAKYAJqhMQAFdzmkaokNkV43DYBbUpIBaR8kZVMGIUVNGUC31VUZoWPlakuaYVBWBDDRPEXOLymwK+/1+s8/rPF7vRRWNhn3iIWt12CSbgmTLBKUIydRQkQTMqABJAEDMHGOhnjEdanRID/EcwaJDGNtB7lBEVSws3oIC4UMfD5oIFdkJkQhQFaQcfsCoJKyExUhLkRMxkoGCGSEisFM0AVIuJI7iTgICI89hxqgsykRZJkCsfVAAZGeiaOIdmsaImQqNztAIyrsDDnApVMtqyUr5HBgYlmfvgVaNiACMNqaMIg7BUGMSMFbInsBJSb9p8c1KacsiRVBkCg5bJg+goOhZBEyVELk0U5lJiQqokJjj0llrACbAxcXBZqhF4Sm4GsAicqARUTYoiDs4cIVKjReAFTZ5wdSAAtAh2AR68H0dkFRgVsgoojDmVBO50CCq5Q2HmSKYWS63LGJRFQBvaNDlO4eiaJqYOfZ6k5TJoBPPZ/NTHTpUETUMoY85ALFz5USazERUAdVENSpIYsVQZdGZc2niig0kgcQkRlSJoXNNxaSWg6vHlIwdoiCRCUwxiSNlVlNBMs5qasygClkAUQ9CTpmjS2lUgVQXTs3NIRk4qxwYEoUPe8McogIxQiRAAwa0LEIHLQnN5LBUUyGy8hgFg4MepYI3Rj1VJS6l0gWAnQ56Ahzg1p9qDIUGZogloktAYAhUpg0BUBNB8gUsVQ4MBIxQ0AFGhb5cJkRVAsTyLkHKJgZIQKalg/QQdlNRAEVyesPQQVQk1Kz7XTeM2PWy62yzy6GeOU/DNDjvNZNmy1OuqyVKyqpdzHlI17srro5unZ6+uHoJyGmaFvPFNA0OZNYsjdS8YM0WyBnFvoPIISx2g737yfnnHs4qSehAxMDMeSZygGXKocLfKVyO4r4idAUG5JwDRFFFA0KyQ44MEKwg5UqpWOk0QwBCVhEFY3Jgh544xPIdgpXGJlUmXwQYvvlpBQJrBpqz86E8C8yUgNQK6p2LkU3VwLBY1g6qO7MWOhUAA5WqNkbSg8RXhHm4iQsVT1lR7FFSQjbPTowIEenmIYDF81fq+9A0lw9HJRfJCbGogPAvSEVq2vcdml7vBhcaRBJxxJ7Y5rWra//OD39YN/WQRGMGpvV6Vy3CISsbE7lq0S7mzeKodte7T6pQRUGAuNvs4jB5xpTHl5dn+2mIZPvddnEyg2S1rze74S/+zM9/8ua3/iu/9iv/+T/4z8YpnpzOp931V79w7+3d2a3XVuG985ePP7h/f/num9++9cU3vvQzr7smiwACQjaQpOxIrVu/aFbN9YuN88AVdQ5tfvTNf/LNZ08vq+M7//7f/9+/uLhMqQh/Uuw2B9txoT4H/Tf+B//6v/M/+p8Y6DZOV1cXddVe73tqZvcWNWCpkEvkAoLlIbmaJMni5MHs5NE0ETucdrtmdVvENAuiksLnXv/K97ffPmKPMvenVRNofXmeuunk7p2XF2cX7/xoNasCMgC6qkom3TQ6X6XoP3kaP36aNrthjGRIzmHFDk05OFPoxsSIznFbVZJzP8TiCy3tCnqwEhVZ6aCJl6fIQUwqEjQCGKqaOl6tjvYpO/YYWDnW7cxHBnRV4ww8oOU4NLPZtJZKw+nRK6cnd9/Z7Srm0JCCrY7vyDh6njfeoYd64Yb9EKd4eu+VfjORGDkP6ARxfnS863YAzgSoJAcdqbBzjORcFeowMzlLYzajUNVgab5YVmyI6B07xOXp6tbmOE25aefz1ckUu9l8mTRPw+jAN/V8sD2R15RMsK3aAiEbur5uXEphs9sVI5YazmbzKY2WVEgWq/nhleB4JJi1VXNcTxxvnTbvfv8Hb3z167f5YSbK2O26Liyq1bKVbnzzvfd/+a//ou6Ht/7o2Wde/+rl5s3XPveVP3574KodYpQphUC+Dtvtpp7xalZfb14e3V4OGj37YuY0S6pCjGhISA4REVSTqQARgC9FI3zwgpkBmgICTn3HxjZqpoRVNrL9druXuLj7uuEYMyBb1TqDARGnNCi6umnjkMHrftqFMLMMRFyqW1GNDsr0zSmiNOEAAXABMjrnLSUiMgXvUFJG9mrIZBWyDkMDkbqUxnzvtV/s8F6mESdI23Vd+zrQrlrtsFIWzFc2vGRP5H1YzKTiPEkWBedYqN8L8/z2o1dtM33y9Owzr332ercmVoz5uD3NY5enjpqWfTCAuj0a9jtkUxQmh6hqed/tF7PF1WZg7z3DmPXqen98ctfY1S2OlCD2qd/NHG3X3Tlc2L7/5MMnKYoOiuA8+wcP7ifjQVIY0+MfvT9e7tVXwrZczbqpOzo+7tlNwjFNs1mb8/ji4ny5WK53axObL5ehWj178qKezT1VVVuPw14kD30Xc6xn824cPMXT09Ozzcs85XoxOzk+ZZP1ej0Mm/l8Fpo6gYIlVenHCEOOam3lHIdd1+cEZDWgX52sDEnUhm4kDZIikDNJNQUcJQ7dArCh4d/6d37zd771By+2U79Lpw9P3OiGl934cqOXOTNPLmvdPj7braPERC9fHGKYohZTVhUTUNHZfCGm+323WLR37t/94L1PHrx2r2oqM+36PRq3bT2zZn25JjPTnPLovE8GyszOjePkK++belIDhjylum1UvOPK8VTVnFLKKRKwGdbV8dh3BAYA3oWYUoyaTFwdklo0bYPXlEx1mhKTa4+XjR/vhOOx6+IwVI2fUlNinncfLtmRGcSUINB11/Wb3nkUNAEkclM3mObZcvb664/2Zy+mYfyFv/SXvvITn7PUazECm6lpmazTZGD41ntvDzR95atfPK1vY8zj1IvFZlFfn3V9l9rVEpCTRAJNUwbMSAiSgMkxxykzIqNH5BQH58gIDTRpFsimaiDsQorJhRYdUfDeh/W2U53MiVh2VAGCEYoogiGa5KhK6pKgMgXLXi07ckhZRAxlyh2gEHKyqGbMLFpIOGWRDoVsagYEahoVhYDAGExLE42j4EouDHBvOQQnYj54UphSrkLVx6iG3vmkFrxHFLGEyM5VxBVAsRT5rITIqDZNIwXHIQxxq4qBGhVVQ+8qypIlsQegwlLISZNXryrEvmw9mJkJk2QiDpU3x74K3jtirCqPCPt+IMdkRi7kafSOvK9zmoZ+NPLXm/3d+7cdwtRHAVSEOEUyRUDHrAilMAsAdtvJCQ9TNkMzqVo2AD8PR3du+4p2V1tWAUJfh363c4F20im4qVMfmjtfePTq6b3rZ+/+8f/lvbq599rrr/dT59tALU8elw9OwsOVVMXxbqZK7HNWA0/ghmE9DevV3SPX+L6bmuVRHBOQ1e1cQCezKbNrlhbDxbvvP//hOxbVhRkumr5mt6gVyyGPDbHU2SACOSYiAQBEV1e+qcGF+eK0pxqATdRyshyxQhMGdhi8gElURtNkYMrEImaqZIrGJujYafEOcEF1sCNvkggs5Yl8RUaIpKXyF1BUDCz4cNMPrg7EG0hh9SmaGZZaYrCcJnKGfLCnAIABG3qzLCZoCmpUhnTN5MmR69c7NpemiN3gidCSjhHTiAoqmc1AEbJVWmYpMkVGYnJJzYgdE4g4dpPqRKaOm3o2Dr05SmiSRLZdPfMA50/e/q7ZeNr4VXX7ej+dNH6+l827z370/e/84PtvRxOcyY8ff9w1vHcYrcLKtVRRxYEUBdKUlQzBEqs6A+8PfjZGds7SpEy+InbmCQ2Mg/emeeiYKCdQUEMGLTMLs2M2MVE2RcGkwBTMZUZ23mfNKQkYkPekULQDYs9qOEyYcx5luNqjEHvWyUBNkUPlReTGT4FgKGI5G5IhACqaGBlJ0UpKekXBEREjohEhl0V5ebIYFu7BIUdT0i8EXNgdCKpgCCronBEzAKqSWkltoAFZ+Q//dOWJxa90gx0xK+4lNAUTUDlk6IHRGUhUUe88cSmLRioVRWiuGDkN0FVmmVDQk6qmuOfQgqtK9koNTExUmQ/DsGEZ00izAhIxHphwKYMqSk5dl4c+SBpzNvBR1Rw1RqV8XkQVyUr1F0IWQ0QCaJiXRKyihTaAZq6U/RkhRlFCM0ICRbDgHZX/G7BJrWSMPROalUDdwRhERshJS/TNSmwQ0ZCMVFAPcywhISgBACgTl6+OiRRBrbS/3cysxaMCBM5PAp69wRTzZRXuKNWoIiaeGEwZlW58D6UMDQEBuIzQJS0jKsiIgGpo7KFqDK9Mx1SuKyBJmSpViyJsTAqWDICRmEvnAhFMZiZWESw9BJs825QzY6WEMQ8gMXjXqILC3jSKBIUZ1VmlrtyeBQk1CjFSMYgVZjsgHGS1w32gpogF7GJaHloHkMfBwnNIMBUcNTk1KUIAAskNCMcQ1IAAVZUQDRQJEdigMEpJJRkIHaAzIpr1kChSKEEzKIKCFipNaYVXU77p2NKi9RRX0uEzRyBSFSI0pWyHwJoZIJJIOmQD9ebfMCBkAAQo/iMAAy3Xa7kBpYiWxVRIBiSqpVjbVEQmmbIjeu/Hj7cD7QZL4MHhbDbnQFmZXbObRvQOMjquPYdttzGSqnL7qdtePUn5ejFvIfZifr9bq2kNPHQ9gFWVO1qeIsAEVLWOAGIcqmb5wbMXi+Z+dau2FJkdczAwFTE0AHPsxBQNichUkLyZmZUlYpHqGMqViggA5VXFxEhUNGA1sYOFSpEKksOsOPOAyqBtiCIZgJmcHDBrhQtFRZhTUWUEQgISVSAi5tLhTjcoq5L8QmAAMjQgNjUijikyF6M2AjnTqXxjquVKUDioPAlu9F5RMwNm4qIUalbVg1aEoJrZeSt8K0QiVGMzsZtuPhExQGY8ULo+lYq6/d6yHp+eCDkBqF0FxAbSNMbU/9kPf9zgfLU4Pt9e+MbnmJumQeQ4TBTC0Z3ZrZMHt+t66tfPX7xoWweGw2ZnhB7NHIhNl+vz6/V6QN31U3N0tLh7PF13urX+Wfov/9Zv/MG04XQUZvPV/df663NieHn9ZAz7l+tnfUy/94fv/fVf+PnL989kvbr46Gn//OXi9fsmqpJy14d2rjm+ePu7tx697uYz52m3XRPbdnO2uby4urz+7nf+9OLqWgURUaUMAf8crwwN1aYJ3vzRR3/0g+/9zFc/f+u0vXz6ZFEd37l16+r6ol1+xaaIQly7QqFi74rk2FZzJHaz1ojNMqoxmbiSQfT3X/ns7/7O/7NuV32XVaYNy/ZqMw5T1VZoWnvarLdHy3mO0VTbeb08OTo7H59fd598fJ4S1r4OpOp1GOLM+bZm8KxZnK+33RSQ2VDUPDEipCyHoFBRgT7ViT5ViOzgTruR+QEKRDNnctxUtXc11eRRDEhiqkJFLiAFVfHBs3fLFTqtjk5fX91p6Unw5FKWk1u31ut9CDVjQJXVYp50JKhznkyoaReBg+Yoir6+1dQnIVxOUZGNQJtZPQ6TJ5zVTYyDqtXVrG1aBjxZztIYme1oecqY67oBxKNbx9W8NSPmcHRyksWdP39yenxrN+zGKVa+BrC2DlnEsGqrYzNXVyFdT8SIVOVRqhhSTlNUYhhFUakbhjjxyZ2Dn+Lh/RNehOVq+fGPHp8uW4x2cueNuX/9yeYDoyEz3rp1J6lUbTWInF137761fvfNs5/88sNgfZ7Of/jDP1idrh6/HBfHR6bRNwjTtLtKx7fu7du8OL6VpHdMAFZi56bAgKYWp4HIU6GwIaqVwBmYGRCWKKoBiGRQAeOArubZTibQVLW022yaGi6fv3ly754CKR2EQpuEmQMEVJGcuv22bY+D944go7vxKiIgmSihQ0UDRSg7KEE1AlBTMQF2WVUkqybnQ0oQmBBjNjNylhBiZo/bq8vTh38h++CUXTDZ71K3HyXkRYzd4OfIw2Z6/jQAxLh3Adh5jc4UPM+qGuRy219c9kcrHqH75Eo+H/1y1adt3pwft9X12VWzuLW/vGwf3I3JQGuGttt3q+Vx2naogwi5sHIVP//46euv3X+8uYwCaTtVAUafU908ev3B5Ytn/eWOme6u2s1uGLqpaRmAuPbBhSjpM1//Sj/F1WLRIl0+72W+fPDZn3384QdqF+RjzvF6pHkIEKfdNI7DiPPV/c998e23v3U0n/sWT5arftivbp1crzd+vpA4xt2+Pb4VwbbdjsRZhhePz7JITY2zMHb7+SwcLWoi7MdYz4+yIwouTVfGagZIbkhqqpLj0erk8nIrapxRMMcYuQpTgnlz3KAf41Ywg8ajxfFPvPbFx8/f/Y//4e+8//FzUg9K1SC7i0u6zm88vJejAPv9xZSCGeE0SlW1p8cHwRSJEcE7YgZMGhxXodpu9nVw81WYxubl0xf3X7nnfajN+l2fUlkOsmd2FIp3mdCmsVfRqq5SFstaeWciCJUmSkmMElqAXGioQkxoES0RwzCm1rdGBpCYDMmUMSUAhpgnA6h91fe9oyqlZJYwJ0k9oiKEPFpOws7IWRLwrjbLFTUn84Xup6nr5rN2QiTvwMR7ZPOvv/LwSfOdr//qX/+Zf/XXgCjnREhGAECICTVVPiRnj9fn9x997uFrd7Wf4i4jI/vWNXPyvNtEJm9qFREhsrFnP+ZtHbyiAeScMyOTGJV3MzCjAx0pwKHyiRAzAAoymRlRmAbJOYcQjDtfYSAWMztYYsX7KpeGZSZEYHSEHm0qm0CjrISK6qhWTYjGzlkSRMcuJ0nBtcamkoicZmOoQhU0751DR2HoBwafsgIFxJJCNVElTGCg7LmufBbN4oMpV3U7V0Ii59h7BlW0QqBAElOzbCgcSFNihFBVe+sIvOcFIDsKWUcG8BgMk+XRaRSNZsrsA3gwBqNisTYQwNKmLYAmJilNSSSOadU0ylKGyiRRi5zEDMDXu06zANGYpdI8TmPtHXmWlNM0JpXAbCo5JyhHsfIroSTx5A7BiWy7/bC4dzR6yp6b05VcrsnAYr5/ciS5rzBy4PX2OobpZQp32N9/6Mdz9u3qg4/fPj1d3j76wtk2zu8u8f4xzmtmLEbvNIlIAtPgGxON3eC55lCJZecrAXB1RaFyjZdpD0RNu4gX/dPvfevy3XdCcNlZpLy8Ncd56yt2SJImIu/IF0eCqRJQ8ZYC4DikrOi876YxTtaYpRwRAFiJnfOOgMY0Ze+Xt+/mfueG0TlWUzSKvQSuhIuwgaioFj2yKQmaC840MngthViI5RpQETBDxyYTs4gYmgPy7MI07R0HBacm7LyBWjHqoyvDDBMZAihG0YNzBZSA4jgBskqmmNPlVeM9EHMQCMY+lKZdR+xdnffXaNlgYufHyRDVqJi0wRErUDAgQrGcwQLzXqQz7c1ccOBnnh3plKacTfbT4Bf1xfXZ+vvj6ebqy3VevfYV6OL+2dW3/uCbTx9/bG3b+e79qyepstwG48o7NHI1e8nGbFwztzBqBBSDbKzeBzMhVmII3uWckRhoQlWfa0GGQAMKL5u47ZVQWdF7UVRkdDzEjGwsqkksac4kDM7VAKXwCzOkmllBAUGigUM2P45ConE7pS7HSQktx0gE9SxEBJMEN8QuFT0QessnRyRmRChqRMhGqMU0AlYQIViUv0ylShxM0QgRilel7OQRnQEBkGIh2ShARgQswGJVIyA/oQgYFcSCAVgqbhVmRREk0kMkRrFQh8iAi5sAyLFDpyKEnkk8M5Er6VEmRmU1JHIMYJoIENCrgmNWzZgH54NAEIMsqVR+eUI1KY6o8qlA0a19waQxqgATAeVoaRzzMGkaWwdguLUMwU/jtCBk5m5MFOpDLkiBDBwjAzZEFaSKkdkJqGQt/p2CoTGwXhVUCY3ZDEEJvQ9CJDE6FCZhpFJ6rMAIlkt5N6hjzIjEDEw5ZWRSUxcCGUFBniASsKkEdqrGxCIqYEJogAWKc4jRGLCRlBGUmVCJvFOA2EEdjNAUFQ0sIyJhAMuFyAyIqgJU9GWkwitCh8CEICAJvNbHvn6ZR2EmcIZitYdE6j2SRzMVME+KRNkKTChb6sBCVtybmumxc/NQsUWJgEyJYFSJOc+spEiocs6rWcrO0DwlGZEwsGNRKxk5VMBchAFivnEVHWxvCgqAhEwFLgRoBSIDqCoGZlAkm1KpblzWscXhBYhYTHUACggkORMSFfKXid0oTiURVBrcoUgwBoSkoMVvaGZAhoe+O0IzLTivQ8WgErKSFt+PqRTyl+rBiwxIBqiaiFzJAULpvMfiQYIDsxulBNDUij8NVBXh5i+r6iG8BmYGihnNUIjA+/Djd97bDzJk2uzGqKQEQDlmQ1elOFpKFBrfLJKoKQAHoiggo0z7YRqn0abx9v27kmVa75tVzZS6vvdUoW8tI4h6ChyCSszDFId+Nlt+8my9bF49boURjKR0wIEYOVcgYiJCJZuDCkimxSnGplKo5NnEMR+K4KyI1EpQyP03R2PkAmUqNy8gMLqs+RAcw4MhsiRqPXlHPosWQPXNbl6LW8vMkBBU1OzwiIVypx+YTzceATVAJgcHNpUeSEiIpsXlhGIF8XsDui4sqtKUhKhQOPLlmwIVZeKSHiEiyUIIVgR4IxU5qFlYChLkU2/dQSpyTL5p+2HIpkcnR2mc4rbzFZ8Pk0fX4oqde/bsWVI5PloAWwjoHLHKJGnVnjTe7bYX+4tns6YFwd1uF7s+aq5mASCevzzbTX2f00BClb917/Ts5dmj1cnLl5t/99/8t58/fXcKDkOoF8s8pf16kMp99OFZ7erv/+PvvP5Lf3lXbV75zGf+6X/0X/zd3/obb373zZO7dxJPqAxkXLOwoYmnqt9uCdzUjd12n/bDsx9++P57n3x8ffZnP/4oAyVNqKUQ71NrDejNwRABJ8Unn3z4d//OX/vPfvsHr3/u0ZO3Xx4tmzf/5EP81V+WnIErMwFgkIyeFYHbuW+OcxzVzz1UGicRURMKQVM2tHo+O733IFEl1/vL9XmodDdOZKZTNuNxFDW/3Q1chTTFMcZtP7y82qVoFRgReBTVjJIDk2dURFFZHNWo1O+jQ4vjaISEpaMRDyZJ+1QjgoNx8kYTO3iJDp4RNDM0lFFT1MVs4euliICJq4KMw2q1RHRZ7eL87PjkqOunJrhxHa+uzrJ3jrhtZkhccXX7qLFkbVh6nmZzP0zSvvLg/Xc/WLYricaG7AKWF6SyxMxoVVMRkfbqyYNJW9f7LeVx71ZCrMuZTynWzing0Z0H86MZOzcN4+76umrb1XIRR6uq2eqIYt/P5yTnst1tfdOqZO98OwtxHKt6NkyDc66qa6qqbdcP49rXdcN134+xSzra0eJkt3/RVH6/OVTDXj+/CsgvL8/eeHQ/97t1HO/M6hfrH/S7+Ml6fbQ6bhQsTS+ebXyAGcL+6dlv/sZv/t6f/M7P/+rXf/id3//GX/jpi3d2d+8ch3Drzfd//LkH9165de+T8eMhtlN3PguqU0LmnLMnhwBZtarqlCcEZPLsSNnMUoIpQJ2VARwwiyUwI2JgQFJPnBNcXV70u/GkmV89fdKEmXH9uUe/qn1GNjVCdJIRTCBHAW3qGYnUDuJ+5+cQt3uFRZi1IhmZAZQIQSFbJOKSQUagYtQsa58okqdoqOhZtBArzTmMOSVu2tBQyiwZsrmGh7hlqhz42K3JKI0xdpcr/6WGgXaXcYjslq6m5GEatww2ZKGugyH7YbI0YA3L4zuzjy/Pnpyd/MRXpQPYXQhkiPXd09f3Lz5OQ1dV3M7n+wuN43rczGFIyyM/Io7TGBg/ePt9f+Kyhzdef/jBW49BoW1CtxsuPnzRrbuyzVxv9khhtgzFNZD205i7z3zh9dbrftsL8JjEk7BfBJ0bYqjaMPOe6uDRHIaKDZk8zMD9+K0/WYaAOW0v07B5VnsG2DUVTf0+qjRHc8l678G9uNljxvOz8ylxXTXzcNrMaqdrz5AEuv0UB5XcDT4jaMhS1YHQte38xeVZ3TQD5tsnR1frHbsgaFlzPWtCU8e0NdM+dt24u3PnFmbepPG/+N63LU+0yU1op32HgTcvI3ENTd/rJlp3597pbpp0GtEFaIIfYn+xPczIMTlyh32SwjDGW86HUG/X/fJodnJ7dfFCXz4/u3vvXh0qaXWKmRTrdpbGwVTZ+X4YiLmpZrv9xjHn2Kmo84TepZSROY49ciCizX47a2viil1lbhCIiuK8jzmmaVQ1BlfiwpXjLG7Gy/24VeK2nU1JchICjjlpccWPOaoiGwasQ7XfdSlPjv3UDYjU1g0YdruJmzqaBh/UUprSONrXfv4Xv/6NXxI0lcxEDmkcIoh+egJogw8qX/ncK0q673IahnaxiKBxjJxlilPKqGJjzFUgAMScGSswBkP2BfJgahKHwZgRKaYomkCASxSAMOXJ2JtxTlOoQpoSGMQ4magjF1P0dWOamElFSykboiKTEYgJ6IioZiUrQcG3JkjgDHLKnaMGAUxTATSWyD4yE7NmA3NZEpKZgJp4Dg5cEiOsm7ruuzNXz0ihjnPLouo4rFS2oUbjkcGpWF03SRhNY+rqsBQMWZMiM6GjkJOgAgIrMAFWXHOGLOUgrJ6doalOMXfknRoi+5Q6V9qLCwcCXTEXEFKSZADMTmKq2hbJgGCcIlrOkpndOCZVyzE3VfDMKWpKebmsF40fx363Aa3bEBrN2XImglk7n6Yxx4mYyB3OW6e3ap1g34spS1I0qolR1AFQIPZ1O7dxnKidV61Pk+uGDvq9dNNiRdX15Tir+pPlZt2fzG89+NLxoPLy4qP1WZyffK3ClrkWSipZLSuDd2QZ0azfXoNBvTgmT3mMYuC9w8AZJCZR0SY0aT08++6fji+fNcwcOItyFXhVp5yccZbs2BOHstVEBEAu3gEAxAx1407ur56/1S/nXJ8s8Bw0SvDosB7GrvIeFWpwg5JZkEmdGaEomACxc6ICZSEuSTQxGpbNK7GWfAqQirAaQGZkM2OmYtQnFwpFEQ0L4ZV9c8i4kBloMdWW7BEhZ0mAgoRm4JDAlEwRQRGZnRoQml1d2dNnsGwIyJhTTi5OhE7BSxbOAoaOg6UsafIEKcecJ0HlA8XTOQAEBcIMiMRM2TuyWciU2HsEYq4IchqTCLy8nlxIc4Dpvbem4Xx2+8F2Hz586xPE1fzO/R++/OiKN1z59qgyMB94FoIpBOCEBI4weDNBY7DJsUONDOBUyMSTFxyYTUA9U5x2Xo3YZdGUjdFR21A2rTCqjVGAiUteUgQgWzYV8+gOoxDSNE2uDk3T5r4jsxA4iyLSNEkG8w2bgmX1zhmobytEYzQH2C6b4G9mIwQzkLJgLKkyQjR0DCrqDpMrqpEpKAgbw02JkhlkAy1ZKDQkKlAuRlZTKgF3IiJgM0JgKkREQikQHgECRXUASGxGh94fVGJiU0YsJd9k4AAZkI3CwZhkxMbsTMj5mpipDNFEyA7BQJRLihV8AZckUDMj5xBAcgYGRNLSLQZkJioidhieHaFqJmZ2vtxnpmoqqnnKe8Fptpj5hF4jZXOAY1ZDV7D9jpAQmQhQEIAR2dAB1QAL7yo0FUXCUnhsQDGJGPQA7B0xs0gdPCBrIQeokRqDOUIERQZVc8zEZFk8u6JhUDMf0GlooUFNE0EEiTWyxZ6JEK2obWDqCxASIIGVKiwGPIgMN+xcLfwP8oiZCNRE045dQ64yIICy+FEAPdTtgKARF7vXIcqFIonYEbJpJoeAXvxstpwf1RYtWYPmzYP1zggiWkAqGciyNzCATDAFQk+sxpk4Oe4NLY1H3rEpgZEiYIAkztBBAukc1oFoUYWYhCt2DgHMgTlEVREwoBKJQ/nnszdgyAUKU1BFJSOsiAwABSmOByosIrDAQeFTAEYCywZWNB4gUBUiAgAEAiRTVRMiUpOSIFOVMsYjIqEBFkgc2Y0770AQs2IsKoilQ6vaAU9Uvq4SIrLCzynddObIiUqJRplZsReoilm52A+EphKoK0VvNzlEKp4pADKTm7YtQyQVBWQFM1MHfH11/f23PsRq2UtKZklT5YJIQnYEmFRElVVMIgDnlMiQsm36zTCOyOCCe3lxdb3rVserRw/v7l6+5Dt3Q906DHU9l6zDsJ0dHQ3jTlTQs2mSOIxI733y7EufueU9qQCZsXdUdlxIZkoIBuXdYqDgnFPToooRsQG6T79NgPJ+ERFkAFVEIkbVosFQeZqZatZsZAbonMspMjsRE0uAiICiSkjEZAZquQCkxZSQgIjJG6hCvpnHFRAdUZGQDElNsPTDohGSqAKWfmpDBC3krxtPExKVn4zEBp/it1RUir5B7ESkiFMAB0IZqAKSoCAAgiciAyDgnCI4OkQpb34dpKIpplA3aYhMELsujVniOKNlSpZBHHrnfDNr8q7frjtHICYp5tXp6Wu378X1VXd1geP02muf+fCDH69jN42yCvMK8jDs15vrKaWzbX+5237uc6/JaGcfvZy18wXdWbN845e/+g//0f/jp3/6Z1HSqj05rqv15ur1zz/41g/fXNx5MPX76vTBs+7H7tUvfP7LX3/87kcTCM1q0D5NOVQenZMoOu2vHn/46Cs/3QuaUb8dAHXU/dW4e7a9HlLMWfBgqbWDhAI3N92nTWGEH73/8XtvPW7a5fOLbZenPKBJRcpx2vllKInFqRuIHANrpOXtR8Plebh9RxPYmIqplAAlC4AFq37z1/7GH37/e93VerVoYo63Tk9zHId9F9MAxirmvVLAF8+vNnvp9rkOzWoR1rZOavM6vLhMkKH2jj1WvjKDtvX9LoVAjjhOqhnEDAkPhqJ/+Rf+c7+/iZ8VLbywsADGKVd1dTqfK9SS1eXItXcnt5uqkQyzpvZc141r6wQIH29eknNf+qmvbS7PNWtdVYR1lnx6924eYgV+Nq8EEoLllJuqnmYLAmyYokKUjtk9fPiZF2ePAZUANUtTe02Wp9G5EKpTdmGxWJ3eerC+vJzXSx+CJqvalkPrQq7aRV3TanW0t+Hug3vzXUrdaOK3m+7ObTcZmuZ5U6c0VOwdV+hc2zTESOR8O+PaDddXwcnQDSGkHPuh60lzv5dZc/BTbDcybXtLYr6d+vHLX/naL3zty//hb//2rdUrc9rUEKug7QK7s6lyc5pSkO3/91u//+gLb3zzTz+g6dZ3/+Tlrttzw9/44mvrd/Jv/MLPmNaprS+3jx++cm+3OTtZzYr/Ak2JymCCZixRfdUygqbRuaquWtWEyAaIjsHQNJMv9XDUDZNlncYUJxk0VYsjQUS0bClUwZKSZnYcjU0zIXrnTKORU7b9dn9vdX+7Pu+mlw8/+0VgKIcARqdmZgLgmUBNQIHIww0QnQhiP2qfHPqcExqh6rR97m/97GgaAUOgcXc5dru6XsZpiwBxs9H93nk3DQAuuH6XYw7C485Tvag/c3ta3gLJKfatm2h9df348a3lchynbj/O8uXtJX3wZ7/7xZ/4ihxV/c5355enx3d3csZzUIJsmBV8e1I7G89f6DjO7t3OuM9q017GpCCaAS53XX2rub66PkLC/TCmBGYJJDtDoqT24OTuen0Fjr3CnBefffSKjH0TvBe3n/au9t/4yb9y/vyHML0cIrrGb7rd3bt3zLp+f/nkoydHq6PN0N05WkHWfdcjupHS6atHdWu1o4/eP/dNHRbVrh+UydRizuHWjJFund65fHmRmxYr7CcNyAhWVz5KbBsPAtM4ITrFFGMXpxh8vWrb9f6yi9c/8bWvXFxdYgYkaqp6BzsxSVEctOdXXY579o7YhdvNWi5W946H9Vj5anOZcrRl0wC7n/3LP/XP/uQPHx5/lnx9tk49TPcBnYw3xyIFAHZsZlxapVJsGzd53Oz283lz79Htjz94+vzZ84cPH3gXsmge1CGrprIGcc7lnCfbI5lBdg4NaYoTEIvl2gdXV86zSap87TgkS+PQM/nKB5ApoRhCMg0uIAEYTXFo6gA4ifaAqpZzlpTirG3HOHTDvqorLDIEVcPUAyBGqUM9jn3TzAk1i4SKpv2YolUzV7fVJHk5azYvpvnq4Rd/7htG0czYAA2zCjuXJTumpJCT5Jxm8yVaHnZjcKE98nU1i11MOS5ns/ff+5PTxYN7j07UEhM5CpYKXSIqKBswGkgG5JwyISlkDl7NWEymxGISs+OqnFvM6ODvcI7JgZKKpDiFqj289dGZBYKCbFCnmRFVJTgWASQCMVBg9OPUAysD5xjVjNCIHIBD1CyD47qc3qcpeZeJYgYptFphE0mB/JD2VdPk2GlOy/mq328hJZjWhE6YjlaLs8tLFRbxBug9kIXS9Ow9MjERxqRmVlUVGBs4MyETpmqIneM6yxQ8ZY3Osa+qOEUFITWmgOQ0TiUja2BETstRTwqq39h5IvaEiJYl9d3OiGaNA8OcMzpKKUlKkMUjebOpH2aLZdPUIsJMVWgAYT/s+3GHij6EQukod8H9V2ZXl7terB9ERFsmbxi3HQRY+gVINw1jmQsSsPqKfSX9wDCM52fd5e4+dl/9+l8aV4/+8L1NaObLps078Mk/+/Cj6tnLV77yhfrWMbDLVpJViA4x2rjfMAQmlCRm0M4XflaPedAcXWi8b/J696Pf/6Zdd66qczbJSlU9u3dSHS8HA2ckWcBZFZxILKcRRHLss2YAco5RVaZUBXKBJsXilDYREKldAPIqyVTYEOPQVs2w6SWLAkwEt4/vTeNG81g6W5gdgCB7BQZmQqeSoiVytbGH0nNMDBKJUUGooC3AAaDliAyILDlyaAqP1VSdHrockpTKHi32FAIDkwKOLeBNKJ3Cw3ZOOQSOPdZtOw4TRoWAyhQCY+yQKBOhCzIkQnWe+5iNHJHXFBkZbIyqHCpXoQCi42gxAVAVQtOkKVIdZk0YtrthjElh2E+Cmsbp8dOr8/4HI9XoVr61mLoN7TOMnrWfLLQekEiVpoyGgZukZFmYIRBkRTJFM8xSkLlK2XTSDAnQIHjXGhimxIwKfozRM0HF4tkAGNVMJAqJYk4q2fmQVVQhZ2EkCkhVgCooYWgaHSOSjrGMBqBAij4wZtSUs587nlXL1SzHycjNl7OKEb75LgAQoXOHIRiKvUVMTAnRMVIRjgTJGIrBUYQBzLIaimoujAdQBCUguEmK0SHTcoO5NnOAbMp0iKSBmkMWEWR2pAikoEWQQVAm9VgapNjQgFkOVxtGSw7VeQfoGFggIQE4RM+ohmTAbCTADFwEHyMCLaEDBFQDFU0TckbvmBgMBAXNFAQBSvaEgNE5JEfEhIQ3WkhUGVkGkKVXw0Q4BSSn4ClMQEAwxGxESaVQQ0ufGBKio9bzLJgDUFImKtSTLBq8H8SC442IN2gdeyRCSloI4ZlAHTOYAgoYghAQWZaKKBDFbFrP0skj9RVR6xSrNIzr88plBi1t9QDiiMzEsbcsdIDolsybIZoCIBa7kJGCIjrnEBGZkZAIcjKHhmBEWHRhLB6xEl47ZCC0MLpLwxc5toPPpqRUKWPlQlvz0KJLVErqUFQb0gBUgNiQUbIqJsPsmQBF8qRAEUMOtShn5CV70jHH6LGJRooYAdBr62sTzKIJBdAEhdgjCGQAQGQmNAMxFQYqF+aNYIoiUhoPTLW8I5h9sXkUJ065mFUEkRyhAhZghNjhpxdz6qE0TJSZEA4mIyYCoML9uWFVo9nNpqqYTawUohU4UWnty2D4qYKGgGYCgEhkBxpVLlc1GWLpGzQSlZI7R2SR0uFGAFiEYDMrxW2FOFXyTTc/2W64twpmqsV+QjlnIlZAkGSgovrd77+zOL4zKOZ9B55q79mIyIWwzEqau6TJ4t4D1/PZsOtQ5XixeHr2ohtE0ZLCkFQ0Ptu9uFxv37h3vH3xMiyOQrtwTeBQA1M2GWLHntu2yjERiYrFKGfnmyYcNXVJwFrWzMgimRCIWc1Kd6NqRsSDUwRvKOXF5mNYgNZAzOQKZ6uo3EWBIwAkukHFOURnknOaiFkkGxQlPeQczUA1ASCxR3BoBKiFmmQmJomYiJ0BIaIaAoBKKrcMFcG/YK1UAYGJzIzJ5xwPszy4koMkJEBXkpGFzYRGaqJWpFk1A1QjJBVBOxQfEGJpuQYESQlIbxKLwlzgpyxJbsSoG6kImbrdELuOKXfbq9u3ThPxGFMCXM5aGfIwJg4kJjHlxbxezVYzT45r2e9NsgsMVr3//Fk37tARkY/GMe6n1AFZF8cEev/BvQX5mMZNv//CX/zJxz+4/vI3fukcxpfXL3/yGz/7/OXGwKUxPfvw8V/8+qNw0izuH/XP5PG+f/0v/uw67P7iv/Xrv/ePf/e/9r/47/e559o5z+zcuBuRXWh8ODqtju9tnl+M+xHAuaZ58uzp8/Ozi3U/pgzFdwMHVDsiqSnYn0spZkDI2z5WJ3M/O0rDdHVx/sqrt5er5ur6ql6tsqilBJgVhBiAwRwX/dZVQdTAZaRkRoqoZtGAQnVn+Ub65j+5fVRl8C9fTH42r48XGwddr1Xwz54+DnXl2VUOqvnxsF87rCzltoLr7V79QlUNaIqymLV167e7/aKuduedCWSzJDcp2UNpY+HMHB4u9i8IRX/+1DtIzFhUbjTDMeWT45O+BwuAcaQqMOB8MXcUclZXLRZHy/XVervZCtQ/9Y2fm9X28JV7Lz95slgt4oShCgDg6nnrIVlibtjp/ddeB4+hbVRVkjJxXVer42XV8uX1y6phFVmKY7IEenJref7jK8ftEFNVNf00tIu5ijJ45zyxN8PQtI7JNJJQoKoJs1jl5fK4341N1WZAndLiaCUx1nXoNntmN2/8qm2WIVxsdrduvboTf2d2X/LeNQ+i+Pr40auvfPFHP/hPzz783sWzF+XDefj66znBeHZ1cb6rj+689xLf/Pj3obp9vt1Wzuc0vfYTr5+dPT9dLc6u1reP5pf7Tcbq8SdXY6QGmxe7+N/+b/3X33zz3fcen528evsf/8k/OVtvF3duna6W6+cXD09vmYJq4tJZhmio0zSYgWt8sp6tSjFbwmSEhmpdlDRbLQ0MURBVYsZAhupXgbgOscY5AgMIJCVQo4yq4Jn67ZWvbo85ckUGZKTb7UXdLOv6dJdTe/c0Xe7GPKI7+NFEI4AnrAxAjW8OTYaImrOiAoFJBjWVCcrOkHNTMaRE4U43RnYQakdVu+3X2WEWUQVViEb1/Yf86msw81mxfv0nZ83nq9D2VVxbXIUA+3j9/jtv3Ho0RFkc31m+Um0nqyXd++Ir5y8/Wr//7dXXfjL5ihB3u+vlw2VNYduNFHjqumnaL08eXp99aBCvz6TybVyAjF3m7EIjzrb95GezL3/x0fW7n5hhFgMEJTe/+2D98hmjXlw/wwTeVca8XKy6Drp9v1wd13VzuVnfv3v3x2/9/y7Xj6tQ5Wj7ax2mnKbrvrtwxCfHR3FMsxBUxHm/XC3SiBsLt+98/v0Pvp0Hw5TbGVU4g9n8pIFnm92kmZYLMVt3m/nKVY13y/nF1eVp24a6fvLkxW7Y3mluMxAFmsaxqmsFadvlZrcHbeM4zcP8+upSM0z90Dbz3eaaCfp+yn0m5qOT+eW6r9vQ76a4iyefqTfTea6srRj66Ov67vF8zN2PPv5IjXdjMq6mJv/3/t7f+Uyefvv/8B+Wu0ANpikjQc6KjClnRNzutnfu3+nWtt/1hPTKK/devDy7uL6Yz47qan61jYBggOzY+ybG5IMfp8FUlSxrZuIQgggE9pLFMyKmLBOgAnrn/CQDO5fFjCjnyM6xCwYAIM6RiEfnGpznlJmRnVmfa8egGZl83UwmQ44VkSOPAJJlMmFfVfVsGAcElSxJVIGDD1mSGYdAgHT68DZYTQ5i0uDBO5eiFtbFwYUpykwQaL6quVIXFdSYbJzGaZx22+vlyXK9uRbJgAoiOZNiZPPBh6g5VA5A0TDlzCyCWUGRvIoRWTmFeRe6vnNzzKUgJoSEAMETEzGVBgbPjR2CxGoASUZDceyyADiHJqoZHN+4kDGrIQH5kHUKLhhQTskxi+QpTU0Vyh46psFxSwze19PYtaFVgSSTC+wdiPYAyBzY15JFNCeVUNVILBAUeBKoqhlzTVWTcypNNQJolj1XKY5cBWZOKZsasGlWZhc1CY+uLid9IqxVUyFrBu+TJrADdY8piInnasoTewSwrIJARGyqRND3ncSMYujQOYfsplRMvVgFv2ja9WbrHIQ6LJbznHLV1PPlLHcJRGPOBrZsG8kpZm2rma9Cv+3LXXB84phczNJnqdkFdorgfMCcpourREzGzfyIlncTQzdOREAU8pQsQw7ywdPt//k/+UcebvmHX17vlhTdOMrxrburahVf7p597ztj0s/+xE+H+7eo5ZwASKfu2pxUq8ZIilbrWjfkIWb1PKthsf/k+Y++8/tsMLu96vq9P21zzICUmrA4qkPN5NAVwi8hki/PdgBLJaGAaESC6JsGGFO/E7EkWoWAQEksqTXBg2QjDAwYRwyNbxpLe3ZNVkZuhuF5AGHvsygQeXYmyp6TCGBidFaCGVhSi2KaiVByRGYDAwFgRmKyiAhoOQQfRUAVGQnJSA1AC+2YCAxNMpVCKEAkVDVTzaIiGsUm0MXxAtom5RRjYrTgfV/qrFUADF2d8sToLARVyeNoEA0RgBkB0DwSUJiQFFRSVNDaeQPmqobgUTCDhmbu/erq/CXEyZFsNgN5t8vpWqWHkQG2l5du8OqJAmrlwqIGI4mi3iEBoRdJOk7KxBU5Ly2poWXC6JHUKUkAqYxb9CSUxE3AHtMS/dCPmMSJaeWxcpKkcp5MLauoMiA5HymDqUE2I0eIYBITNS60Ve2YNPc55UlEFQycd9Q4sCxJHGJoarek5vY8NEGhMq7U0G68dQhAYI6Lj6b0YaNaETKw8MmJWQSotCcBAKLaISWjBkBAWHAchADFTwGKzAQlbcNGap6BDjMNIlOpiCJzBEwGBMUAII5YhYjJVBxSElMEcFwUdAR1CBWjI59dQEgiSVEDcaAAmFVMJRFS8ZMoErFLkhHAhaCSAE3MUIUkKnkmr5AFhAkYvKkiMqgCACGQcwXpaBqpNMpLjFl8vaTUByJnUDPlLFGSJ4ol0kHgGD2iAJR6EcdUIXqTCpnMnPOFo0pqKAoIuWRmvPMqTvPMOTVsg5904owRPDSnOL/dEYX2pEIYX74XhmsdNozU1HVsVqk6AZcwd9ZtII5V7DUpOAyUGgaRBMLAhAZMQIJqhoBZFQ68jsOmWc3KzeiKc4kIICEygiKpIKAZgRAeOqGMUNWKKkHkgRwUQ0QxeYEJWEHwAriMPnOtOhGQc5XkDKa142DAakSGoEROQJUsQ0ZThoohmIkgjAjJLEPVGNwO1ZxMYkK0nMwAfahUo4HWRHkUQ7ACGS5kK0MFPfhwoHynqDdjk5ZsEKCBsatEk6kpGDKDyMEyrMpEBTwMgKVSjYjNgIizFMN+6SYHYigdZKZGjAcgMhbOxKGNyuyw0RcpLhUCZAMwkyL7wE3eExFUEpPTUo91A9MSFEQSTQAAJfEEWCoO1KwY4Q6j4KH27kC+t5J0KmVeBuVPindUi44LCohmoFZCqQeaUuX45fmlm7Ui1XC9n5IkheWsGru+8jNEGPup8qt9zmY6TqNCDJbunN565713dFJP0Myqq/WgaOTYwLLI5ur6+Gg1brfo3NXFi3p2wt6v970oVBRQq8KerJxXw/1kHz09/8yrd5qSJMOC8mBTzSKFPa2ghJQlIXnnXJbJSrGFAUGJ23swNVVmJzkVUakEO8wUDMEIQJEJTFUFiAEAgdSEmc1UNN+IO2jFxYFogCq5lOMVL66CgR1oJOXKQ2LR5NhplgId1AK0Ku9QxJxvBvjDs9RMBZGLE80IzRTNksTiDiFyYKV3T5HphleHaFxYzaVKDdkV27uV1y6SiZplQvwUZnOQilarZuoyC9W+WW+7/TAawDhOzjUJJhAdhp4qp6IOeZzyrt9+7uHDfruPOd27e6/nvOmfD9vzk1XT/f+5+rNYy7LtTA8b3Zyr2d1po83MyOb2LcnLtlgsskoll2TDLpQhwLYg+MEPAsqG3+wXPxoQ/CAIBiwJsmAYggEDhhtYKNkqFciyqlORVbxk8Xa892afGe3pu733auacYww/rB15ScVDICPyRCDO3nutNcc//v/7uyF72uayWd8w6822u7q9wzrur2bdpl+f32jiv/bX/8bf++DvfftLT/bm7CyrxeHJ9V1DwdIWSKp5u9lcLuI3fnTGv/7erz797E+23cXeu1998td+d/bGo3E9ohq4g7lUbArbq7vbsxv5Bpv5Zr2VEDenl5999OnZ2cU3vvvdj168stcM692r7/YXgT7Tz8XAST/+2c/fePTk+dOn777z3uknP9Xry+VyqbPGMsQYyu1aJsddcaeaYwx1D+AsVlKHRVmCahEh5NKtb2s/+Ft//e/8w9//f97cjg8fPCpEqFmO6vf2288/f3Z8n0jg7vIKdXZ2dh0ogPjt1Z1baZeHngsAmRszxSoS4Ww+a9oml4zIuehUi6qqMJ0IJz8gfMEnAvjLgtFfsJJ9cQxAME8pHRweow8G2CwXRry+odXsSEJNLKenVzW2x6vm3uGTk9PvV57G83Vbx5QAXCIHjrEKbUo2bxfjcBOXy1yGKjRlMDZm4m13XVWzULeatY71cr4/5sHSuGhneRySjUywWuz110Nc1vNWb/sxzud11RByCCLCQCKBDaM77997eHdxXbV7M4JyNKR0d3DvXjq/NMzz2axIbttm7LSuZf/horu6XTSzk7Pbw/0nJzqL3gjKolpkJyAaY/Xke//jvTd/bzj75J/96D8BgI8++DBKHUne+6WvjhrHsWhHcTaHGDWHEOqPP7igYoz18d4R4/jmo6Nss5OzzbvvfOnm7EIq/K//0fdPz29dHGPgwO1qATlfvrhsY9yivTs/LGM35h7VclGfNkcEJgXJkhlFVLOLk1M1rOpWGIfzNagyURXrej4XtlLG0ZQY4jIMOWG2WNVEjMCljFZAFfubsjg+xIayD9sxN3UD80NZLiX4UAYGOpzPc86ok6psANPUahPmdnoq0sTbm0YMYVAM1IhTBkhoxoXSrdx9JGHucY4ExfuUejMzDJmDRGv2lrAZ+etfyUd7PmyB4A5Nj4NW0qeEPflQ/KY7iHOFdXVQX26vcbn/7nfe/cf/xR/89lG4/5WDZz//V+89etu1ksVqffsqPf3pG2/9UrrqmmV1e36+mNfZ/fD+o9NnH5Qr1KGerxaXp8+d4OZ2jQt5+OT+hz/9xC4vvfdYt1p0fb0BiRw6LgQ6GgJ5ZZ0X4W9+7/fuNt2isu7mrltflKwvTp6/9dZ7g6+BjbvB0QFyd31X8gihUQREqdrZ4f7hycnTxaxm8UbqDz//SAcgqEFyStvhzorHyGgMwvWDg4enFy+aWT0TyplK1sVsT5NfXlyEOh7ODgIH8xJmqxlHBFzf3RFY29RVU19dbgy5ikuzrZox5PX6rp3vF6d1GrKP63EtoapYNIzJyr6kb3794PdPn+tIbIYxvvf4zT/56Q8O3nl79XDx4mVfz3xxt/0rX5s/3nvvvW/9r/9//+DvAkAU1pJjDFZKqKQKMTIOm7tuPVuuZkXt8vLq6GD/0cP7z16+6H3dzFfzRbu92QSpSh6MEqIGqcuARETMwoo2VX5wlzdE4uASAgUzNFAgIGKppO66DgmIyUvJqTBSG5tUknBGBDdPwyh1JIQQeLPe1rFCB88KaGMaiAXQKyZkVh3r6qCkcYCBSMxL1QZTMx0rnEN24WrbDfcfHabBiUgQhJiIp1UgupnmoqNEHrYjOJBbGRQhtG2du+3F9XmsYNbOKeBsOVPryZRCcFeJFRo5gBaVdoaEriAcEDOyE6FI6LsuiJTiQWQsXTZFo1KyK0g094xIOeWSRkJ3z4ANOCHwhIrdPeUBgDRKyKMiVcWt7NAiZGBIhIYiguBmmZ2seNEsIeoOBksVzQij0lg0uUMqRYAIK3BW7VhqjjViyMMNCYOXUEmsZ+4EXrVhlodzEokxqI51bEoekKDYxK5loBQDa2ImQgpqDsiAgShMjn3VxFyVMhC4cBhKF1CEas0DoqtmpEDgCIroSAHBrShMS1HAVLSdNTKrHCyEgDQTltvttq4ZgYv6Xb81RAMbxnJ2uz3c3xt6BdwEZK4DA6vCrK36bt31/fpuyzJE2g3JImDjcLicZfduW8KsWq7mFgXENW+VsJ1JwiIOpto0c9Sc+t45rB1DIxdj7k6h3D1bveruH0D82rsy45HWKAT3pMX7sYTPPv6kfPjBu7/y7fnDN7yys6fPYbDlsRgaIwNQnxMJt7Fhk0//8Aevfvzne6uqBB9spEaAYxUrhciLuYUwIVaFGQHBCiGSTHEnNDUmmmj5w5gLk4ureHO4508j5MQgWjLEmM1ByxRJzn03gjgFUOVglcjd3UkkYXAiFqBibubkjhFFAoIxV1NliFlCZiAsqiQMzEjT7h0dUK0wgVtBcAZmCa6TnaMQgoPtyMVTIRAAGDghEaspgRE4EWT3MdAl5MLMxI4ZrAQ0ImViChWUPHUGRQmQB/CClkXYiQpRybYQSjrWHJP51CFspjEEMYhGnlJoG6qlQE5uMp+v8BBSvzl5QYE3Vu6o3FgevVRF65oIFQtCVh9JtaSUAHkdsAoxe2GYAJnsGbT0KIrsjnkkauoFo+NYZsSeRi5RhddoTtxl8hTRiMGzJs+pYuUoNrqpcQhAmJCoaVkNwHPCgJyL1lGMIEx3FcAMCDGyumvhNoZFhSWHKGVTDKCqAruiagjRBDlEKDusdYxBhISdkJA5l4l8wgpG7jqFs8CFaDf+O0/DlJmb+a49ZqJQE00lPhNGZDIo+MSnFiIkRCho05pbiKfAjACBo7kCMTOD4zTFRGad+C+MgBiJG7KAVIUqEE9teQYooSKggDEAZysIgmhMaDbRWxQBhAmAHb3Qbn9aBcfSkRM5FiEjNvQJ2uYwDW8gXFMIMK38XLTkkkrpe7QEaD1kCftBuj4XBkXnaYynmsxgKjQABHMIjMTsDgKEDs2EVmEquYibsGS1ykHMAyhZEXRVdyVDVFnR3sF87/42zKyZV0HRzUoP49yltLWzalFqMaSLF6FKAfqogw4JpcW9w9EhpStPdzVT4KhmxCwOBJ5VHZ1h53GZPg22c2C4A5AwEKBwJKachApicWE0sTQwEyKTK4AzQPRiXhzbCQ09ocABNJsCADOpJhYCCUatyIYsGxGHKGpI3PBE5XG1TFM8DH3i9UarLEchZGI3RcZO6WUa2ioc1DxuNntx5kb9oFaKsY9ljBBEogVPEVnAHYR34OgpZ8m4g+BPwMJJ+MYJHao6WVEI2c3NCiETkoMSMiJNR2g3wwn57xPOwRgFgM3LzhWEODGzmXl6VaeqJvddUToh++4YbtNHfPIlwK4lzWnKXZsCGhDxdBngdLFMrWSMk44ACIC2K7mjiV49aRQ09UeY73A5atOlOvGtwXdVeDvUkrnvYPI2NYWpZQNyA0c3VyswbodPPnruwDmPq8XsdjMi0Hbo60qqKlpGBdqqhbjsxw3KMOY8X64+v3x1vt4EoEDgpRzNGJQSWCe83mxnKm88OtxubsuWkuo8tLEJXbKqXqmVMaecRsFGAqWShiLjiPnTm2+984hZYcI4EZqZEKMjEoO7cMjF0bUURaQpTrd7m9EBHR19SqJJMJu8OoYEk/8KgNzc0BCcOag6kUzjE+0CfUBIkytlwjq5O4LS9IGbtEl3M0Oc0rS+owIhCIVJsTLHKfZkbuCKSJP5iZABTFV9YlQ7OZCbEsmUB5ycccJRzVTLLsiICO4MiOhaRpKaWMAYAcF150VTckZhKSVNzFxE/MJRs5OKbq+udVBCz6lPZbSNGkBbL0V0TJuh64GpBmoqQfAhp9ly5WhNW+/z0eXJy5vtJVpB9fVtvx07K44hcsXDML46OXUOeRhQCzJAHX/rr/61F5+eLdvFrK5zp4SLdV/Q/eLVqyZg6i+S7W1gQ238O/+zf+cf//1/+lu//mQRjq33/f2HZchohoF0HIsDIBb1s1fPX12cfL3cNbPm5d02lfH01fNhXB8c7T3aP+o3Qwiy+5T/Zc0EASZJGBER/erV9WE7Ozq8H9rF+x9+WJXtNq03fQbOdQhYrHvxNM6KzPfVAWNQSM6s4A6C1Sq7ar/2aglqZNB3UD15lC512R6zbWhGg5L2edDSSHhwcOyDZ9QqlHh//9nT22ZJBXJdiZXQNsuTu5PA3EYEg1ldbcdtbGlIg6ohytTkNZVlIu48i69pRK/B1a91ov+WSIS7Q9juVyXZvF0NkTGIszliRL5/72G/GR35cP+oqWIuJrNFCLyMvFjs/fzH/2pRLwJFR0klI5CgIBABIWAMbRXaMRUSmi/2+37brOZj6jnS3t4s6cOnnz+VqqrbekxjPWvb2arkc64CR2IIM6DZbFZLHDZW+i9ulaQKLPHBo0fbi7taKmhlzcGM9g4PN0OmMDSzdqsbE+I6high1I0kCvyVt7885iThqJI9tJGbKroTATgNRWaLh4fL+wD/CQBcXW4ijMSS0+fMYbW/JAMCKYBKmAY5vPcgBLq5uT49f/qr3/rSdnv58uT0/PzcozJU/c1wc4s4iy6WhtTdjVUVjg+WRjZW8vlm89Ef/POF0Jv3Vm+8+cTZF21M3W12G4fEzCqqVsB9xCJRZI5N06Req3rmW3v68enp1ftf+8bb7SqGtqrbCAAUG/KpRgHNUEKQisBLf3v94vany7cWGrmq2sBVnFNOBbNFJFdVVzADh+lEx8huYF4QhZGnM7r6BDSZ2NbFTEMlxQc1gRBS4bY52F6/qmbvljAjEuamag5Y6k1RDaFq4liq+dFbN4vDEbT2AuqVh+s0GBTTwgRIWM2b3NH1+e3e0bEors9OD/IbX/7et7fYVYuDzDcLL7eRbk7SLMr9/UNJOAyb+mBZzWelFChiiO3BIuR8/vSz4+bLV5dnxIyuOtjZ56/mRNvrnrGmyHeXayawkrvTM4oYajo62O/XA9e6uv8uWBi3XenX5rnvB2Je7B9/dnZRku638fLmrCtD0zYOYICxbfOoHqqU7eL8LA/97djtzZbRtjSgBHL3tB4lBLPe0c0qEBMr5yefzWLFmYwCo283A5LkTenXOSwYWW6vt1h8frS829whmDBt7tZ1W11dd9JCs1jcbbthvd5bNlUtubRV5Ku7LVYUiEvuwOLVTd/OOKXxxWl/lvqqCjRiVrUx/eCjn1qGFx99/Pjdw3tH4b13ls9/dvYf/vv/m//53/27/+AP3p/uCOQQgkzLsZwLIhaD1cHx1eW1BFksWwl8cXV9sLd/7/j45ORMqoYYQ6zRDJSng1IxLwAORkiMBAh9GiVWHCMgCDIAuiMxp5ymrV3WkjXXsQogJWUYgYgmZ9PYbZtqSYRJkzuUMVMIzaqlQlaSaTawNrRiWLGoZUQobtnpens7lu28njlCGlMaintp2hBigEJVmMVqNox3pUwhEDByAyMmcAYCciIkYUllrJqqqNm0ywfc3zua78fnz5+V0d5558njtx4YGqohEROqKnElDAgZgXIxwWDqBOCWAYKbubKWYlEBDcURjZkto+XiSdyMJDqSVLEbRwTTUpCxWI7SIpC6OqBZUh1wKnZEMNzVFgRhA1VNSI4hTmfHlMcJS2HmIdSeFAgMCqIGqcx6B0Uizzrk0V0BVEupmC1rrEWoUl2CSYxBh2TjOoikvi9qAKCl5DLWdaw4uqPCLg9vqkAAREJh1GQ2AhThOuXiChAiCbqro4OrA2XN7qNwMGczZ5ZUhmkJipNnGgARWcQSMMswjKWoqrmBouY8Rm6QKI+DFpygkpoTgK/X2ybOhu0Q2lmMAurJU9dtDeje8YPb25t+6LmOu1OC+2J/eXaWiXW+JGwIa59XRAZu1OW15hk5bS6eChMDWT92NzcpZXcbig6dxlDXUcrJi1d/Nvv6e29biN2mI67NcGvr/f3DNi5tLCcf/zw+e77YW22eP3/n618uSWEe77qxqqp2r8HscNP97J/+ye3zy1kd46LqNnfoIouGF83BYrlOivdnHsPUf6TD6OaBWd2MdpOJWREJImKKoY5UR6hDqASoBwJQde1ZSyGKsSnjNoTo6CKUGSO3MNbjUJBw0bYQ2zT2uSgCEDGhE09qnsXJl2HAHBx3CA8WmVIVjKSaEQhRp1MwUXBVouhFd7tLfz1guaIZIJMjOAoF9TQtwUwVhaeluzEQsGshtyBAtXTbLXcdzXjUVLNIqNNmI6Lg2cvgJbs7SyzESAIIIj5qQsfkMOMKJRhSNBiKVhXN96qsdPryLkZVpnbZADLD/Ppuc33eXaP3jo5QwMEUAawYEQKi9b1aUURFzM0sEqPmgITMnqmgKvRtHVuhEAKMI6OH5FoGMiDNmhUjj8yUnbIjmGmiqdXHEidwQ/BE5CABmNTVQIkpkJNqRAhIplnSyObDmCLL2Cd3DyLgRu5cBQKf1UGqxuZEEdumKaWomxBSza8PkIiIvOObgKlrATUjgOLGzLCbX3waPxFQAdRdzdUAiHwiucB0ymZDIqRSnKaKH3RgdPRp1Y7IRDwVZ8FkgtgtwXHHEd7xhX1nz4CdlaMOgV2JhVnQDK0EagfUIJEcHZQ5FiV/rX0g+kScRSQmUjNiVEcicTOwTJo9JdceYst1owbmIBNlBHG6yQOogyGge3Ibc+rVC0bu01AJ+2qVNzeVbcRMDLOCgRiU7EBIPDF1wQMAu1UoFVPDjFaI2NQDIIFHnqqtqQXLDk0lNaCDULPq4xyP3h5iE9pKEZ0c3ElEswL67PAe3Kn3vQBHHSormHqQsR/6evXGOHtU9t8pGKk/1euf0/YMS45T1q8kBnZ3BOcdAhV+AcnZjcpIQThEIEQGcveSCd0RnQNJnGrbzEi0VLmvytqBsrTJAYmQmFHUUFgACMGE2RFN4kgxF68ZwBVhQsd5cAsgCXHC90yul917N+WAnCz3GNwMM7VFZs/SWDWxbeu0HdqqcYVBFVClEkgO7hgEhKbqYYBdnwzA60UMTOXD+PpZMF0JgLz7rBMHBFdzm5qOphSPKSA5+CSV7oBERO7mryG5DmCqDjZxYXZtQ4jTLxHMbacv7IQmUwRHeo07NCV0JFLLhLzr51IDAHMzcGKBnRaEau670isEmOr8pn8yIqHrri4NkV/32pGZAhghqemUdtNScEIYubtNWSx0d/OdfjghvQFgNV/95Mc/ytkVLQbZDlpU0bFiqWJVCrTNsgx3IerYb0m329uTb777y3/+/JOry/OGaa+NVIqZLea1qV/34+XNqMbYW/fTz7/61mp7+SqGul9fZOwzzmaLPes3YN7EuRuoGVdCVcy9n14My2b94F6c1ayggiAyiWJGhEiUcwJ0QDYrRMIiphl4+jZt4oIjkdoUpZxipwgOQKzqgEocXiOf0cFUM7ozTe8IoAIgqimi7zjXu27yKein4ECMtBNjeQIVAU1RQgc3dcPp5jjZyifG33QJosG06QPH6Q9OjCxQt4xfdPCZuysRuSoiA4KZviaf8y4c7AgAqgVRHHy6FlQLwITOJvddfd8vpKLN+i5QnDXVZtsZQN3MhjTFEwsicRCpWQi2d3eLeXzw8DC0cbi9Op4vrq4/ubr5fFYdmum6HxRTN1zXYUHMDpbSUFeNFg0x1NXq/PbmpvA3fu2vfPYn3//Nb36lv9wIHYDNA0TKd8t2L6tuveRm/u2/8ntn/e2b98JBu7i6un5n8e2sXs9qy9kVkLGkUs+jO7dVefazn7z39lfreexuN+vLzf7eYtvdjikDLU7Or6uwK330XfsXwi4hvUO/T4eU6bm76aDvaPVg7/nnP/3Xf+c7p9vho08//fZ3v+Uppc+ff/pH/+Tq8ue/++/+ezhflAl9B0ST569kQu8vL1ZP7m27kxfnr16cnb37vaal49/47d/6L//e/+1v/ht/60c/eqnFUxORZ+9+85devPqH+0fHx2+3P/zTP+6LN2bHh/NXQ77ddCpXEni9zYFZBIIgZSCq7u3d+3H/LArCJFAD0BQxfe0fMviFTuR/OYT2xQ/ffWYdcCq4KrPFqlsDVzGNCYQjVYf7x+swDn0ibNt5c315Xcby+OGbP/r+97/1zS/92Q9//t3v/JUYmqxYUYXAhhpDAKw1KwWZz5fZtI5ClutKYg1j8VxKzjBux3lbl5IAbLk3X/dDP2TMJRJHFgi1gxIgcYgVciUcIxCZGhiB8diXMqbAdNcNe/vzu4sWxIRDW/tqPi9bY8eDgwPUstw7vEvmxu1q0W2grfZDtQDKDm5DAvAYpYkyjMraT6/Me+89CTGevNyMo1dVLp5mB415NeTcLA8fHT/86OlH8/kKF3t/89f+6p/+N/8lNTUt9g9i7K0FquXB3v7h/ueffNQSGkDTVAGqOe1f9S8zZNf28qo/7ccXH7/I//yHQHQ8Xz7Y2zt6eLS3v394fIjCoSGDsmhHipy7AQBdsMt9M1/OjlcL9OJ4d7sZTi5m87ad1e7oalXVaMnIFBcxa8o6ZuPl4aE0lQgGkX4zMKAQlVwAgQObFSi7qk5zN1DAiSwJO/4+wHRQMEc1R6JhSE2oAL3kbTWf93lc0ArLVmxTBTNNXpw9kNQImdCgHwPOaHZg7SE4aQFMRLNZFDUrbMqujgT7S+oP7p6d+JDahuh0a2ev9o+bbgiPH3/j9P3Pnv78Xx59+zdHW5YBu1zdX9VnVx8cPlk1M9y82kKz7YvUy0e0PenX1yENgSxUEIUtxH5I/djt398nlQh02Cy2V9vR3FmlElXfbIasCbX8G9/57tMPn2LXb7YX7739rZu7UzQ9u7jev3+8Ws6vTs+WywcztKqpLq9P9u8fU1j5ze2inR+uFk8/+yzwbL5oh21Xt5S9PLq39/EHn7dRWGRvtdys3ZSbUKOrq6PRdky5QNtUq6Y+v7rWQVFpWOejBxVG3gzbu/NTN6jbukCmillkuOuJOOnY1DX0pnXoIDFKvx4j0ca0qeqD9uDq4kyzjQMYcTeG6g4DeHNv1if2VtJo6vD4zWOs8Op80B93Dc/zLf1f/9Pff/u9L01XQbZiWJxAophr0WyuVV2J8IvPX77x5HGI4eje4csXF288vr9YzIbt7Wr/aLyDPIzCAYWDV+ACDpoHwmrCMVSxAsQgbAAx8LbbsnuIjQDmvq+qys2iBHIPCKOmuopgOIxrJG7apavnsYTQLpd7l1cv2kowUrJszKqW81g1M3ACgu24rmjmEC/W1w6AGB3QDNS879Nk82bGlF1CEImMEISLEaGYQyVNKVk1TXUyWlRLmcLfdajUEhLMVm0L3I93DVV5k957681QEQZGxSBkRQFQ3YBrZ0Fz5sqSWilaEEQtTyiGONiAVoP26GQ++aI9CBMzA4tM6yZjJHWaejgIEUCJzNyKuxHozgNg5s4cHDIATcm/oi6AY05E6GjkBoBMOJYtMSPBaKWpFgyoPgGAgJALZCKJwABRICCKxFm2QlaIQtentt1PdGs5VXWsrFGk0NQCPAWd1a3mZZ+GafkZQq1aTA29AACjgCNBFMQyVY7hdIB25poA2dl5t65UUyZGBBbJPgaoAgd3MFM3RyZ0IKRYRwPrx46Imqqqq8rNBgBmSsmCIEsQ4du7u6Eux0f7yS3lPA6jq5esBjulpWkbeu0qKrkIB8JcCTlibOvcjyEVAjQdm2oWlR06TMUd0jhaytYXSE7CaSzNqg579V5VvffuXDLYhnBGYgELCgIxZckYsVnOoWvSq6unH36+vbi5cpkd3+e39kliKp77AW77V3/6k7uzszffefPk8vy69Kvjw+Jc2kAzGuecUMIqciAozkhIUkAZgwgVV82TSBSmTB8BejHhRlB0TFJSKYokaMgyNY/nwOSloIiBO4CVDCU3TZuLWxlHtSqKFnAnDoJeaFpzG2pxIiZGJAJwIQFwAneYhANzDJMUxFN9izkBl5KYgmkREXXbEbCnUW06AINOXdMKaIjA7AaWCpEPw4DgTYiKaEURyNyZxWNkqzWl4CYsZoqahcg4BrVYeoaiSo6RYwtliBQisptHoHG0WTO/RZ3dO6LFgkZYNPtciqbBGOPh3MY9HIaC3mdL03ErFWREclPzAgzOGJL2FJlFtO96QkGQIKBqRoDA0ugQgDN7oOieuwoZ0KTCQQckCjFUdaOWZCqOTAoI6ojKpTgQSwhkQGpKbIGKmggGd0ua1fuxq6L03TD1Eatb0WLZmElc+n7TrJYgHGZ1mcUSWVASQmSWwMBEOx8JgLNntyn0WrJm1AJg6LDbVL+GpyC4ITO4m5ma27QMh6l63R0BaWeymEiIbm4GZo4AZibMzEQTW9l3HUwkxEg+xSfAaXI+AyIzArhZdCIEACQBxkiAQiwsdWgSc8RgZgrWxjhF+A3ECUQEdFIHGADMFNxel60ZEqhmMCC0PKaAjoGAmul242Y4AZyQHSfMbdJSsm778dY4U0ihgZJ0bSnuVYNt9ll00DFZQk2Oo5uaNkgJKDJRSi1RcJvCUBElqQKRmQsSmgeQoWR2BHMPoUevFnvXNPPlA5zVEILUAOqmXkmNjk1oe1fGiucPke6821DJVQzZ2oL7zaOjsv+w1LOEBKZSH+Dsm+WypZurmHt3ZdglyHw3Kk1s5mnWmDq6iFmMmEiAp6olCxKJmFnASCgwqTOS2Yxd+0tNVwYNzB4yCxIDMiJPMjC/hiQ7sroDLZBqwLtAnNQcgVkiA6IbORgwE2AxVGPMiEXXzFqTMGVAUnMwGZVOE+MVfbmeV9KlnBlNCFQ4BA5kPoB5cQvMjK4MSEgTPWqX65pa1f/C9OTFkCeCPxUt5lPCSEx1Rzt2ADdEMmR7DWkyNwJWMwdllqn+GBAJaeKfuJm7AbADTPEu98k2ghMQaJr8bdc0NoGh3KwQ8ReS0NSKhRP/3w3Bd1AwoCm4O13AjGhTiFR3zk9ANstE+LrXbKr2QidjktcNSK/Lwiap1sjczCcPFDKJoZlnVLy9vL67uyPCy5s1VfPL2yEVY6d2MctZOUqfkwRmGBrMl5v1SpYfPv3o/OZsJfV+JZEdTKsmWsndkM8uthmrZtZeXN9cjA4n6f4iAEP/8lW7nbcHD92SIauXGDDnsTBVscp5TVzFprq425p37715JMJqDpP7yZVEDJSnrkMk5uhWXIs7oILT9PDZvfGEqGZI8Nq8o69705yQ1QxASxkdeCpP9Klzzl7fFwGnDDgROdFEvn+Nn/IJPgUOhgZghEhIqlOyz4l2UpSZEtL02ZhQ8WA4+ZWm86BpJnSE6RNLhGRadvKiEROrTZBydkd0R9mp+1YKAiAx047nPflAmUnNyBwR3b4IX72WiohRzW42dyEEG1LX92nMGhYxYLdZL/fash1C3VRi6EAQy7bUFM5enSbfvPfVdz786LNuvZnNFn0PUu1lR9Oy3nYAVrQAwHxRjaifn51+93u/dnF9Mo5rmt8Xwu32pq3x7vrl0WrxnNJ6+6reX16c3K63vL/aP/7a44/+0//LX/8f/K9uum61nI/b0SMzMzKFunKC3A/D5vL66nocQvP8qvJVX/T2/PRu6BL6gzcfvP/5UyAq2ekXH4ApJPvFPQCny8ocm1Xz4ac/uz67+Ld/5X+xWu0/P7/bu3/vy9960l292H989A/+3v/pu7/+txdPHq4vLtoQEJxjtNnKDEyVpJH5wfrqD0MdPv7kJ3/205/90u/8ndt1NyPokz964+3Suwz5/oP7Z8P68uLyO9/99T/qh+9+6cn5xdnLn33w3pffODs9GUdjlFlVL+bNnfXzuaCra8l5FCEmu17fBkYhIEIBLGoOYPb6Dvf6bAVffKO773EniU6/izue9+5PpVHbdiEykISKmUJgpno2Gwuoi0EhlEChKBwf7/38px9vf/bhaC3wohTGqbKRjMSTDSFWoEm1VHW0lAi9pA7RYByO5vW6L5tNN1vMh3S7vtlyAERDU3RZHewVtbEoOEYJQiGXQqGeThKT33Xa+1+9etX36/Zg3kEfGM+e1d3Y5+IIrNnJ8N7h8e3NVd+vm9gO1OSEq9Xe1gbejsm5DQEMJTCAg7maoaHhbjwgGHPqfum3fm1M+PlPfrbdjFfXN029KCXfXD67ffkRstzeXiKWP37+viskpdnBquLZWwfvfPSzn+RtPwzbppTcFSeKwjmPl7enJDgM3epouV1iThkZIwRE2qT0s89f5k9fkGEThVHqtpnN2+XBYv/wYFbXe8ez2aIydASY7VfrnszHNPQi0cy69WgOsQ55u0UzEurTCAgS8eitN2MUdU13qQRx9b5LIcpkoCyuhDiZC6Z2CwNH16m4xHx3TjczYjHPQEgiQ8l1ZNMc3CF1alAtj/z0kiEBZAcgwc3m9sFureIx4Bh4IE5pFKmkngPJkFSLN5HL7eXV00++9L2/crWxxcNDfFaNecBG6pbSq1cYVknjTRlms/nl2edvzX5LsBo72Obh7Pyzw3kL3a1kNpUG25NuzRIjVY8ePr48P7m5vAFGB8/DiCIudcEadTi9ufzKl949eXFRNfGtJ0d3236zHkOIsWop1d///p97n5f79d7q6KK7GYdOVIZB07wbAyYv67Gj2Fhp9vYf3K3XbnermrCs7263s5lsu0HFedlYW8GI55frqm5DoKHP/baQVacvz588Puy21xLiYjVbb8YQhSJsuzWIHr91/PFwEqoqxLjhG2wKeGUFm8X8+ctPn7zxuN+mpqoX+20vGCtqDg4ubrq3vrw/bjabsTvam5W7ZObbYWuCDUUX5xgGcs4h9dvRtxtPs9mss3TvweKyZMWQoCqA2/V220cW/1c/+GD3SJBgxaZTi5mzhGHMALDanx/b0fnJedXO9g72jo8Ozs/P27YtkPtRZ8vlbboDntZQMk2SAPWEhJSqQke1AuiuVgwIg3kpYOZAsUbhYegxxFwGQAIXc6yq4CAALhKGfgSXYrrttpQVqDfBMW2F58xxFsQQ1NQNAtQ1VplsKMWA2nalQ1fGUi+a0IIje3FwRIaqDbfrKxEupQAyctQyOhoRFx0IBHZGcpRQBaRu2I5lZInIBASLZhmQtpstsYMjIwFTqCKgM4W7zV0d9ggr841ISF6mI+AUdHNPuQwAOuQ7Sylr8WLm6MBqhd1yKaZeUplwmOCFMO4KONwcsruaeQjChIhehbrPnYMKcxpKEDEoSE7CboqI/bBpaslqhmXiviFUMdaq6m4+uazJiucpTg9ORIyE2bKRl1KaWDFTLr2ZukPVNC5oQw5cAQQDEAmImnNSyOoahIeSiJCR1Z2YUA3QBMF0UNMQGrNSAIigaCaSYtkwAIm5EQmqowuiIhI7MYWSBgQg4ZKyg+eUNtdrL2BMuVgQ2lsthm4w11DFlFXV3Hk+q/ZWi2azqdtmvgiBxLUIMzIwStHStg0zlpLx9bPAkcfetaA55RF6HWaBcxlZyECtjH0pUTTWywDc5+KRmEJoAiNLHHm/iffmX//lL++f2frHt2F55BXU5KvZ/quT53Ub1RoHjqrFnBd1O+5Vcbl5cf3RH/5k76tvto8ew+HB4htvvbjtXl2O82b+4ctnY9k+fvigWtY24uHhwd141ZdNs1hpyci1uat7qCvNY7ahqubj2AkHJ5LAu8pdADUEaYEicwyL5SjsxYhIjZBFtQSilDO6IZPmIohTH6K7OTASABi6T6eOKSLhphwZkN0NWcAntK0Ts+aMgsBToyIJRdVhR0oEnhrWi+ZAbKZT+MLciBiJFaYqEQJAMzecYhUIpsgICMUzg3sxMMzZEUFVTTOyZOKmbmBMGAS1qJmLGKATxxCGNBBMvW6I4IzaAjPhWFSQtqkPR3NrBIWl6yumg+XB6fV5yjZ/vIxvhsvxLp2cOQEBqSkIFrOiykQk04ynEtC1mBb1HohNxFWQ2UyjCGUBBiDNOpJyjVgsW1GYOpC8BAXrcjDn2CiAEAxuQzY0r1myahVIVbP2BQNxZGLLqprRgQjNqZBwlBHRYqUloQiTEiI5RgoVkRL17sJUBEvJ4iFIwGmexF1LihkScU7TcjnAxPrbHSHNlNwMjYiQHBWJyYkFi071yw4gzO4QUKYIlwGaqttU3QRAbGCRAoCzIxgQT2EaAPcpbWSALASmiLCbwGiiujA6CgcHJhYoFpmJXAgIFLwQIxFrTsReLBdTlAigAE6EZglph6lWcyEWp5KHIBwBchlJUJChdGXbQXMfqZ2AX8zTIQoYA1pJaSilzz5ihAS2sa4HHaHEiLJcSBO311fSd9YDG+ZBEbEimokYZDWrmMBsagyzrByomQLUjmwqgGClDbItZYGgZvPl3hBncfFgTSEGdCyAGEKMxAEASo/b0+hjFaqNgoR52+revD3ZBlocQFimWdA6UKxj3pICYkyw56tvmJ236w+CdlHBtNTUJlADM4AAYDu/DZiDTXBvkcmJQ+RMCJ6ZTIAUo0IfiIA8ClG3RdwaZpb9jFPfG01Co1CwSYkoA4ATiiINNBuNK0YCE2RAIqCaoEbaIDpjsizTrA5GQC5VBjHXwJyKsnmNZAGLzD7bFCF6r2bMd81MSF1RQVGICnm1325QJ0A4OdjUWQYTe4rdbRI5fzFLMTqSu4GpwyTGmJlNN6spZYnOvrOlONFUIOVOjkTo4KCIYGVn2ZiKiKZH4pRIcn9NAXdwL440RUamEiYiMnMAVFd3I2ffOfXQEXcTHqKpIQDAVPzFbuZAU9Bykm6JoKgiThYJZ6IJ/uZI6FC0EHHRiW1SJly3T+glQEQ2VQNkCtlGd/CJAOlFqH51eQEhWE51XSfgbTcg+HxWDeOIEp04lZRNXbMlqMOqmJ+cf/5gNbtX1V7G7ZijcEr5dj30BtOisru9WjYYV4vTzu7Ou7cOmkXNDOXu9FWctcujx3lMmnPKGmOIddtt1xUhoQ/Zbjp+dbl5fG8JnpEnhjxPNB93Z46ljCKCNIHaaQprE6EBOJFZBmRiNpscVLvmBQASiaUkxy/eHUf0yXs1jdXuxsxTZ+v0NRNuekqBEZObm01oaSTa+b/8ddLaJzOaFvXdEpB4YsCTqRGSue4QSICw+3rQXIhlKq1Ty+RGRLsjzeu/kwB0olaREMtOgoQd/8gnNQSAd/4mA999X7+QisAhjX3XrZezWSTOm6tZs/zKl799erWezzb7+7OS+8XqsIrtvJlv1+vrkxedbQHK0N2enHalGwOHfkyGWFVV33clZStFVW/79dHh3nKPqcXlon70YG9vrp+n7f033zn9/DZttrOaa0HH9OLFZ78iv7eM7bjFX/m13/jjH/5LsEuuNtUiQE1qpqYV125mOXebTbtacVWvz9ZvffVbi+bw/ltf+sE/++Nh6J59fjKuUxl0u7l7+epFjKxadnKK/wWVaAph7kJoaIZ9Xx68ee83v/nrmO9++Zfe/smf/nB5Xz7+yR/LTYf63m/+2//u4vhtruizP/vR8smTYgXcEIWAIA8sQdNVqPFH//g/X7zx9snnr+b/nZYrJqyXR4fV6u395beev/+DveVhyrBVpTq8vPx42fzVn55+enm75f3Zxe26bsNiNVe7I7V5HRAyYQALxKJZ53U9jolqsQzu0wxgk9HRfRc3hR2JDP+yUvQXf+wCahNECwDqOoSKQxWlaTRnY5ot2mbeZgXTPHQ37XwBtLo4v4yVvPj08+Fq/6A93ttbhYr7YSSzRtoR8gCIAAEAAElEQVRtGsDMTaYrp50vcOxSv61CK1V7fXH+n/9X//cPP/qsDLq3nL/35K3Hbzz+0jtv3NxeqeFm3QFiXdVQYLlaDilJVceqGTaDm+68U4BlzKGKdV1XdaVZqYpgOp+3TrS/PN5ut1EakYFCqNrF+YtTpkWUG6n5/tHsOiXKGoXVoArBnSeSnFjRqU8HAADOr2+j+5/+0T/lSH/nX/vvXnXdjY0//+HP5vPIIVhS8tFU9/cWCDSManl4/snF4Wr/bPODlq7vrb6a8nrVyMB83Y1kNpQ+DTBv5lUrV5fnt7eXoVgxE2GAgkAQEIgRKKmRpdTrUPoXL1+Cg5tXDQeG+w8PDx8dtfur4ikulkeP7oXAgNptxlKslJFDQAAO1MRQxmSekVRxACMRKGkkRwmI7Gbg6sT4mgAB5tMBD9XMsRBiMSNHBMbXxXpsDqVMPYyWMrNgHtDUDah5YNICJVUFqIhaJzAkBCx9t9lsj994a0Q0B47RjAwtSK1d6l9e2+mmXN34qtEWm8dHlz/5KEjDoMPFeraaL/aqi1dXX/ree5/84cXTP/vD8JVvai2hbvLd8Ojo3t3m/Kh9eHtywu3iydv3nn78wzeOG0NxgKI8Fh2Tj6x7bz6qNAN6CMuZDW9/+QFqOX91enz0uM/P28bLmGzEStrlfG/vjXBx9ukvffNX/+VPfhRic7i4j7cdGZ+fn7vlvfny4eGTD55+1pcxBp63zaKFIH637ReLpu+67W1HRHWInL3f9n1KTR0JwuXJ7Xy2N2+qofQYlKf+9EoArWQakxrBi/NzpWGvWZ2cnMS62d8/PDx48mfvv78XeLE83tz2xfyu3z54tD/2nand3mUMsejQzmSz8VSKj1tACDFiS4TlqrsDDF03KmMrzXY7gLnTIA7bfrP3xr4tKu233bVitm4sKd92/a4BjZG9QGCZ4vl1DE1dn7w6m83qUMnxg6OLs6uLV8Pewf7B8V5321UST189ffONx7FqLI9lUBZONoRAaOTmRJRTIkAkZGdGQXIIVApYKqUok0wmlzQMUQJX1BgM/dhUzdB3gbAKwQoUBoi6WtS3qfvw80+/8/XvEYVh0D6PITIyZE9tfewYQYCwEPrebMaEwLPbpCFUgKnv+sW7j1NJIVYAEEMk05Sn5+tok5UWWCiggyIAYYyNloHZQoRFaKLUKZUhdU3VWow4g1gzixDy5HZx1JrnLe8LNmCG7q6jW3Jzy1PziGfLxUb3YObqIBxK2iCiFS9W4rJKZeuOImE7dg5u5uzuahSk5BKcAlfF3HIxZzAlRzA1VaH97IUpmm2CiJYBkK2YGmSFbEULxLoxVWYfU1fFmSogCUk0L6kMCExGybQJVc49uMdKUKiAeTGXWBxCVRkoIqQ0EjQi3qfcRELUtqoQOaAwYc5FJBDT2A/kLHVTijEU2CEnwMgCM7GMuiUE1R64AmRVFSQmLlayaxVq9JzKSFNMZSKZAFDgZAkAiImIMMjopYAhQrdNqo7IWe1m3TvSwcG8retum63CWoJnX6fN0d6iFLi5vnXw7XZTh+r18iya9U0Tb7cpj5iLQrH9BlaNOFddhlEVCVRp8JzAoA6xqmccKmem5V0a0xbuXpWVzIpfnd+c3X/nreuLkzBfzZv2+uRn9/e+2z546+r5aRtmccY3L9d79w4ZFg+ZytXm/M/+kQnjN7++2l89+s1feX7z4q6/WX/0sgMqJVVVvR5u1XVvvj+ozWKthMKx5OxgpiNkp1jHEAHYDbRM40bxwCKBm5aCsLCHFJoax6TDiMQkbdEenB1ZQu2eahbUwkGcmMzTkDywFndVYSY1QOeqchLnXbZMS6EgSKTmQIAiAEbEaMVNzcfXS1ogRyA2UyYCMDSd/PmTTufwekZCJCdH9V+AGxwYs+UEZYro9kCE7LoNaKhqZVTLIIGEc781VyN2lJz7oiUwxyp2fbEYoarKGIsDmDKxxEoBPI9hFubLeXSkyD1t2sX+sR69uuljtV+aEJaVzKhcZyVyJzMDInVnsIjioKYKHBCdmKdy4uQKUjkSgjGAWwFwJi6AVaxEDbNDjgls0IKMauCEVfE+d4ysJSeQAqAG6G7gXBAFgUTcIQ1FPZsXgwqJXIQgJzPXMItjSWXs2RFBs5plQeQ0FFnMK6lrDNkVmGqsibx4AiHi3YnIEkwhxqzIk3AHikjgioCqQCSIgjaFYkogVgNGYAIHCyHGEIWrmivCyCRq7m5gxUyLTeaGMvWsIpMhFXUAMFD3woiArGaGu5GFwJGQhQExiMDkUgRCFqqhRgxGgagSaqq41WLFBaiRuDEFBEZ0kglY62DoCIRuxhIAHUtBc1adbvwAXjRXQp5LsZvQMEhVcIoYCygKuOdBh032TcJ1z5vLoR8wb1IiYEEcBRI33cFhd8PzluLoo/VM4IPPY9hAJ0yuIIhNYGGcRWFVERDG4kDEWhTRGZARIlFGgswNSRyD1MGodSL2fqIaY0HJ6uN1RIwiIs6agmSzjc8e+f3HJQSDzKCgGvItAkloANLAAWURKFa6ZVQUzq7shpPki47uwmRuu1UFgQQhnFp8AdkwMIISEVGlhQE8ogMwhMM+vyBv6tm7HlqQ6O408dTAyB3JnAk9TrP4yE1v7b50AE7ERIwOjWFjxgaJXATRXDCQA6OowYhgDlqU3IIh6JYgGsWR4emQK6geSC3ooObOfTLIoKomTBEJi7uDEDjtkkPmuBugvmA0AUzEHncCUldmnvJaOIk8EwxIC6NMwsCkpeLUlrRTSN3UkKYgGzk4IsGOGu5Tbki1EBAS+fR3wJTPtKkKjQHcdXc7RN4VNNFUdOUAgBOtBg2nHfAkT7hNmHg3AwBVnSwkk4nJ3W0XjtsxrQkQnKyUyZvkjjs1fBd9UnBzQFVDmBwrmQShSM7penO77sbNXc9NfbPZOjqyVHXdpwRSYWhKTg4U6r1nly/vLs+OW/nywWHAXjB3llVtNL/aDneGLvFmXFduDxq610aswg/uNtbUd9k5mt1eLxaL9YuPZiQsMySwZN6glqxFi+V5Xa/7HqT5+MXVcrZazaucR+QgFADALBNSKRl5VzzPzAik6tO7gCQIyBQReKqK25XcTTerqSNP2Kww81RGr1ocjEjApg673UhOuw8P2tRYsrOu2USVQmQHUy0TuBoAmBjBTW0XOHOc7BiuU0QQeIKjI03kPiQmElUDB+YwbW6YpJSCu1QaIvD0qdOSnBkcEBmAAZGwEJGqAgAx+2SK0oK7fw464ReCyU4qkhCJKDSBCVVtb++waao/efnBb/zW/+T25GdRx+XiYLF/hFSpwZBTvfdgGF/dXt6g08nT68hLxSHWbR4327FXz92wVS1ufnTveP9gVrWsWt576w0wv73cRI4nT58SzFdtI8y3m02WgKtZcY7Vss/2/MP3MdkHf/zzdv4IQjX261jF0LbODBAKJhQxV/H+4x//ya/81t/4l//fP/j0g5+/ePG8z4N6j8LouL65DciErJYRgHwXvfJf4Kx9IsAjQhVELL378MGv/+7vXF33J9dnZ6d3OUG3zr/1W2+dPTs5/to3tKY89kFGHe+wng3DEKqaA1y9eLZ49JU0DPXynWq5rpq9L739XgVeMaXtsN0Ms3r52Scvvvr1r91dvMRSHj6cnV28PLh/vE7j0/PTg6Pl1dW6CW237meLuFrOsqamDlQJkm/Xo4O5FVJYX28CMxrgBI8xn0olVPW1IuSANHGt/yKTyb8Qjf4Sx8gBwLJWUjMSA3MErhsRAQwsIbYyV0NiId7erauDeP/xo3uHxxkaDDQFzUMVuzIaqKAQiapyxQ45jYmxyur/wX/8H33y2eeKAM7geHd988nZFXz/B8s5vXl8/Ff/6m/zOC5m8ykGXtdtckQkFrKSN+tb4icKruBVWztgNZ/N9/cLYd3Wl5+dzWbzu/Wm3atHS6uj/cvzqzT29+8fnLyqqtViNhzaAGhB1BrH0dTMiiMqkYNqLqlALorj9JJ85zfeffbxq7hOYOM/+mf/j8X+8a/+9r9++cmrTz784Etf+/LTl0/ffHiUNc3n9dXlzcnLF/cePF7Nlow+juVbX//V3/ne7/xn/6//85i29WImISAVYouh7XJON6MM3m+GwLUzGQEoTFUa7lociDAwomUbJ18ygZOaabZPnp38/POX5g4IEqOI7C0X77z75nJ5cHB0OJuv6jaWNIpAHkcvSuRj37NbVosiqqUKkrOikrDgxH8BMjOm4MBmE2LNWRhgZzR9LY5PnR2ECMruIeiQNaW2DurQpeKHx3dVjsFBSx42IdZjHi2KgMbA5qCAkWBbkoVaSRUNmdGpXc7KRbT1xtkGFI+zwydvRNPONrP5Sm/K4YO2OoKhOwNAL5ty9tRM+GCWSqlinceU9pz389XNJ28cfVMUM+Hi4dH2+Xk1C7nPsa6qup3X8frkQlPJjF/50sP3f/Czq7NhsVx98umn2TK6Y5FhsMMni9vhdntajtr2Rz/9QXe72VuFYeyWy8psiHvt3e3t7c313fVmGLVZxNW8Pr+4bmT/bj30Q6lDKTmPQ7dcLaPJtruumNo6mtl2GGdHC4k4blXJDvYWrpJK6nNqOAx9f7252j9ciMN2ux2hHB/s3a1z2tir7bOjptJh7Vr6vgfiBw+O0XFWRRsLMTTLioJfXdwVhe26kxCGnHMhQ27ms2CDstVTiIhqJa0l6DYHprqtLz+4GyCVlOehbZqq3ZftdkPVjk9hanWsTQ0ZLfs4prptSUK3Tg4+W9TH9462m+3LZy8ePXkUYmSmPdhf92skJmEUlrrOfUbAcRzdnQJ7KRwbdB+3m8PD45vtCyPv8u1ydgA59imFnNWs7/t2vyk6TPjnPvUYBEC7sUtZ22ZO2YWJg6w3d58/+zTWoZgRg5s10gSM7gU8abE85kWMd7fn7awNFOo2aikBKW3H6fyUU64leHED1GKxjo5GQgBi5kA8XRSEgOQ5jyAQQ+1OjAQM9XwxDmOsQtPGXBIAMImESnOuqkokSgDEVFSr2GpJwAygwG7uwzCCg5MHrtQU3IjRSiGSHeqoZEAIISBhEBmRA1dWjIlQgQADhaxQQKuwl3JvSIABESXEAlZIo5tqEcGcrIqxUGEGIgsuiMJQmY9Fc5B2alCJEmw6soCxCzmn7ADqVCJFoomyGQulw72ji6unjYRSRgSYLeZFgQIFCIRctG+bedcnNyMJO3aJuZBYcXUa89hEUzW3wLEuZQDHrImZkVikUQPGYKTmo6MiAjo7olkBCkQMjr4LwAAzZ/WcS0gpEAmhp4JmalaKTQfvwJzGMqSUtJ2ztIuaBGehfnV9ls36Qdkx5aIAgAFfD8n7R7Nc8+31cNdlRLaMVRVig1UrQzcEb2bIxdRCACCd1dWijgJ7HFbSbu96qme9VjOvKiPVNJuJR1s83O9hszxaHHf38RYTdjyWcXudwVQhG1zlbv+tYzoPi7u1XG+7f/JHWPtq8z2L4+ze2+2Xjx3XsAzgZKmQSz+YiueiwtEmiKUDSaU5DUOCStwLODoGQBAJxQpqwr73krH23hMEZkKOosktF5Ewpg0jgmUAg9QH4TKhjImZQiZi4imUykI7xiuyu8KENWF0VEdG4YlhzMxFC7oBoXkBAKJYdALVJ0Q0JEAGR8Sp7scBUR2IBN3c8lT8hzt5EU0h5VIwp5QisNRNccA6VqA5D0QRJIoF0gJeOMZS8lRQzyKZgyLmIVMlGRXImaO7V4w5GQBEkYodnOPgbArM+289oKp699GTix98WM8P7mxb0LkGYTBAEnRVQieayC3TqzDxJVwYmWiSxqAoMQQRJmREcCjgEDjnLI7WaxqdBDHQaJrJ61D345YA1dNYEodd5MnAxS06BWRzTmpCMOaRzMkJjPKY3QyZXTUVJYI5Ut8PVgxUnWzezCoALDkk5sHInYUbdsKpVNlj5NdnRwFHLQbAtqM2TLkw3M3DRE5ilsixrpujxRyralrJCkMM9WK+3zTLNsyrajZt2tVUcyJ0ZAaAVLKDlzRm09Esq5mXYehz6gWBQ1CDpCWVPE3PLkIhEIkIuxm6q/kAWLzg0LfIjdeCWBGlqQvamYHQp2ooM3NGcDNwF6JiGcFo6jJwm5wBgMhMY+oIY8nFc8awLd5xc+hxVoBNjc09F8zjkLe3uB1l3NJ6xCQKR1CR8byKt/luW5IK2nLvsu8XTJCKpaJlZEZ/TReN4HuRNWetWKbRTHUifU9sETfd6VpEMPScxma8qWoxeW8bD4vKVJFLhhV6P14R1kNKCBgNqN/CmBvBcdxPstcjBBfXutLApDZuESCIcDmn0gVAxgBM5qRqMP0TdrwimFJaNhVATbwmEkRAzwTKWJImZAoxCKOggkPGtl59PXscqhrEmJCAGBmQwcBJkYgpgHm25AwaQ457Dlfi2VyAAgPUCK0BGwGbIQAWR2eUCOLICMUxMyBRIw4cAhiYoEdGDdcDLFxW4mRjywKcOQBE7rkAE7DAVA4FZNN3xkDAoErg9kUK0wHA1aas0OTiAXBlEtOdHQmQDGyy+ZjpRAjaWTMAph40B5+MGwgwZbh0snUQ+E6emkoAp9nU3BQmYR18pxiBEexo41OBmoMhye5/Au7CEm6vxQ1wV/xi1MVdoW3RTIgAqKq7TbljVgUw9wyv+84mBWSCKdm0sZ80KHBwyJon4nxD9Q8/+LPbbRoVB6NxPd7cJlBq2haEXMkZ+lL6QYNpWl/a+uwolDdXM82Dgt+styJcVZhGhRDM5W7T79X8xqI6XlS3d/nzi+s+Qds0Zch32WzmTV1ZLjevnrar+9isotRQvF+vSWHWtt3mNsxCaIG1fXV6umjvMROgGygaEIrtxmAiItcMTEWneYwBSU0ndJrhDt/jYAAGO/LUlPlTgAkStUsUTqqC7WI7CETTKzcN28wB0EtRQlRz3sW2d51Uk/PM1BAnvrQRAk3xRjcHmAKhr/U/m5Jx7o42qRdu6kho4GZGHIgFEH0CA4LBlI4UNjMCJGR1MMvoOrnhESYw07TLQHdwIs2KBP9tV5ELh1mDKbtpKsNGYdyO4/r6H/7D//jXv/G11b3H/RrH5MBmxNXBnpXY32wvbn6618hsUV9fnAE4QMRcso2GlhWQoOu3i4Ojo/v3rm+e1lUdZ7PtWJyWqZPl4tHnz07bxez8/OK7v/yt29NzhxqLIqRmtTy92/7mr//GzdXN8dtfzYGCVT6O4+a6Xh6QMVTorphzf3377IfP33yy/eTDT84ubtabdVhF49SnMUZxLW89Plr3uet71emCn7oMfXrXp0vCJx8YQpfs+O1f2iQ83dz+yt/8lU8/+rDcdB/8yUe//b1vvPMbX3v/s9OvPVyl69N89fLqp/29X/s9ryltepkBZeeqLkDmh48eH3322fv7yzYIpL6Qjh/86T959ODbz97/8Ve/8+7v/7//YO/BY57XH334yb/1P/y3/sW/+KP/3t/+197/6X/2YHa0vesVN8vVynLqbnqkUEngwENvTEGnlVo2dmzncdOPk6RtNmX5JyHTdqiZ13eG1wij3e3uC/r19BtTWI0kcqiYghm28xpDaBctMld1zYyakxD3BcihrcO9w+PDe/c1V+QYQ2UFYqiLJqY43ZhCbPp0pzk3ce/5q6f/+//o/3B1u2aUSa8EdJ2MdAjbAT58enr6X/xXX37j8e/87m/lnANSykVYmraKVd1XvVqZfHYl5RCY0RFge3vDIlrAMzbc3JxuV4+PxuJ322G22J819byeNxLnVSyM0rYVze4tjj5fDwkdmUpJkM3T6GCEQS07716Sba+5UCl9EyRS+9knZ5X8wapt/tbvfv3p1bB3cO/uetvlMtymX/7W16hZpKyLetEgvHp2/k9++JM/f/9H1QwX80bHDFiAeD473t8//uBnP9+/fwi+JVybmoERMoAHkWLFAYopAyEiIyJ5ATeFCXQ3PVxrRgZ3IEFKXVpvrv702bmrs1AUmc1nq73F4f2j+WLv3uHecn/hWOq9GsByGvuuK2nUvnCgwkaBWXZ3M0TFiaJnNpGuph7PqT4RgMANCU0zALg7gzCHPHSeDVKGBnS+j7NQzIVp3HSaSpzXpZSKyACnM76qIRIyIOuoKUoQMJgxBXZPi9B6RZdSvvIb3/7wxz84euvxarH/4Y8+2OvnfR4PHzw8eCf111d0tr18+tn+d75c1/PrV1up5y9OTnLbfOVLv3byk++vKhw3t5tNb6krwyZUFc6EQT//9PM5CwHosM2bvvQgNtdUSuoL+HJvj0Z48ODgu199++Onn7ZhhYjzg3By/nHZeMmmbT69PJvv1xgkOI29N/PV3v29zc3lo8cP+213e7PhOKvnzWrVhDj785/8WKhO23VsalXPQu9858ndZgu53J7eCc5yUxFTYQtVPavabnOGGuZh8erkBbv0W5gtiJjbunp5enqwv79Yzm6vXxJgKRRn7YvL87cePXj67HnA6vTsZO/oyXa4KlrGkpvFYtZI6nNx26wzB7nur5rY3o2DDmMpA3PjxshhnZUhVokqFOGqbIq12vWd6xcJbSipEJIWUy1VVQ/bPkbe31tc392u1+tZ0yyWi7qKLz97VdVx/3jVNvX15UU0J0IWRgJwzbmEEFI3VNWspE3xEojzMOiYQhBIXmEbMRRPqn0bWkchqSTUph0KJQ9YMGAEL1aydgkENPvps9PZ3t69gzfAwQrkfqirVtUjtiSobhlQkD3kze1l085jqFI/ILGDatFQxd3jGTFwKHmMMbjvyI0SowGQUi6qxdBRzRAxVJUjINK42RrRlASvm6rvOhFEjsQBERmBpwAODMiFwL2oE6pmJMg5EzhQzN2WKQypi/Uc3a0k8/j67NCXMS9xaTap/25eEBLRiICEEVGcsKCzCKVBGEspZiVZclckzJ5ibMyKm1vxpqpVM7NPHS4EogUyZUAb82YWGmQDVXN2YgBjnpDbqZ5VRFSzIAoAqObABNBXAULwyLWbiVAuQwhguhWOCAE9blOPRgRkDswShItbDC0532UVD4HRLSu4qqo5OAvPvfTujoRmiZzBFFCFyQyIQ3GNoSmmRFiKeXYgVDc3QxKOEqrgbnUVTO32ps8K6iAgJBgk5ALDoDc3A7m0ixZHuN7claxIWNTaeX13eW0OaF50JxUd3tvf8poAzq63215DVc+W9WLJsQED061asr4kQGyqqg18fDir2NafvDy/9r7L1b39sFgqyN7R0Xp1WlJvdRiGEt1W+37y0bg9v7jXHorMb7Znbz55++mtb3E8+Nr9br2R2cHxKq5/9EnThODd5vs/aGWTV69eXff3/sYvt08O83oQIk1FYpQIr63EjgTIEghTGiNNroQsgF4SECEzmAlRACeMilCtZtAEsBRRkuecHLmRqvWhc/BcbPosGzNMfcjuVQg2bAXdCEgmgsoupzTBMSb47BSIYHB1M0UCAJzaZ2BaZRNHs11ND2pxdMQpbyQTaZXcXocy2MwJCNBth7eZsFpo48hgTpwVQwykhYE1DUAOQpg0d1uSCjigDmAJS2YUdGDzUkodWcfRiyE6YhREr3kA4wKYvY3N3e12drhATspMy2rxcL/em91tK66qxV57eb02Z83KTMRUciaQ4sjTlQZOIoSoGYCRiG1UrsQYMUQncASqRSJ79nFQy+BOVswInHFQtaGriXQigAsgOCNMhoIYmFBzGUOohUG9X7bRxpwVBKgwZAdAKqUgKpubGYKruwghEiMQFK5aCUiYAzPyCLgLmaBCGXdSEUuIxIw05FyK51QQyQCdMEqYuBsIGLk+XMwfP3zw9uM3q8WealF1MI+hns2WIrGdLyW0UwfemAtP9esOzGIctRTQlFwVfEydlTQMg0MRoqkBzxFME4EhCbAAMpFMZVOqOZf+JuVsA56fVZuOFCBnikWIQoimA4K7eeCAxOjOCDrlq8GZJuOAIpqCsmBgBtexlCiVW+VanIwopTwoGDNiINRRALWs++H8Ll32ld+msStAVFfuQSGg5G6sUKAiQ00hWl2PV3dNExkJork6EBBhIGa11nRWBWRmYUYEAyIydXePQSaiOzjVgSOYlS7kDgbPw3D0xlc2sycbJEZF663c1MLgCMhB6hiVe9JxbLSDk8/2F/vLem+QB8lXkAdPt1YjkLblsoaTBo2NptggoTGRghPCNC1PAGSduDzECMjESAHRgBE9m/WEAwUhridhl5GLKtZz5GjojBBI3A3RgXZsICKZRurIkFFNqiIHkJ8J+WhgxA4GqDOSCrAnAfbihZjA1LQAAGIpuAWZJa7AjbHiKgCyGaLExL4e5hUMyj1hDgshhTFnZ0ML6BaIAKZGHkbHqeaLSKaqmN1Wfco8utFEINZpjzoRiGnieyCKmTIJ7KYwnOgZgPh6WT+Fy9QmvQtgCnsys5lNaoKWiTOYJ/PIFP+wKefrShgQ0MzMbOcJgJ0tCadW1AlI/MUMCI5T3hNwJ4dpIQo7Vs6k+Ux5N0CziUDG7k7EuRQh5qn/DDmXgkQ6BUuJATBrISYDdisvXz0fhoTC19eb7JwLjkmx+GJBSEECFqZcwjyuLj5/X/tXD+ZVQzKO/Tj2SCAsRDAM482ga2lv7tZ7Am8uqRW6S/58sNtMTahCLnvL2Wa7Sc4X130twexahKoZD5aJ5pBt0awydrODZtN1bJEMt4MPHddNIdGJ+BM4AoK7vq53opwLM8OuD4qYacpbmhfi6QLNjo4TEgoAAQzEXSfTjk06DTEiEpqbA7mr0Rf2IjMHFxYRcAMi1FLwNe3NiQlBy4RJMgMgQNAyKem7zw0yorqjObgZkCBOtwibkOSOzsTqhSbkGjq4WsHXf9KmjKcjmmZ3Q4mTCvJabeQpQ4COiALoU5Yc4Yuk22upKBefzFbktJjtpVSqNhzP4eLi9Gc/+dOzx3eH9750yAsqJenIgasmruD4rSffuTr/ydXVs6YKdbsPnrwMljdSVRELYqoPZstlazasu81s8ejm9vYr731pzOM777x5evn5W19/99mrz2fL1dnl9Wfvf9yu2k+efirt/B//8z+qEB3KkMf3fv07A5SmwvWzn/+Lv///+b3//v/o5fl471tfhypAG9ev+re/+csf/Kt/tr59eXjv+OWrk8uT09XxvBt0sVcLyfFy+fz5xcWUq6fdNbu79Car2O4KBjacz/e9zCnnOeHpJx8dPV5V9+7v7T9YvPNLHuPREiBt7PpqOT+625yal6IKgTDq6HcKhlR1gwnZzempLN+YH+ybqt7epD4dvvGNTz74+PC9J9Vyvh6Gr771jQ//8Sff+OoRqD/98JlqfvB4/6efffjW/UNESOqVNE3TjGOeHmyEHqMMKd/1ed7Wm66XQJpdwMvrFRbvONWvhaC/pBPBFyaiXyTTXrurxqGEUJl6NWtCxAwEk0c8iKuGKBTC8mh//aOxGVcHRw/JeL3t7x8cAIFaMSjCMtH0ARRBWcESndyc/G///f+dFwemYgrmu7veTsiaWHaUs+8d7SPjbH4gGWOMXkbhUHJhguurq4nzFpraxtFMtaTbmzMBH0peHa26i+18tTg/v6YKui4BMM7ml+vNam//6uKcMcSmLmgsXDFDDAm4WHbU7AkcMYRQR/M0vUanL6826yQ5jgjNoj1SKYN36fbuZn3v8YN1V+7GZAFvi95u+u/+6u/9/d//R2Hb9dBThHsPHnTPX9UcKSioXl+eLGYHcd6cnpweLPcvz19VFkMjOhgSKBqCF9PpJh0mj6NNsxwicSmG093KFF5/TK0UzchMIXI0IiRAcLVhs767vvr4089Vp+cZoPB8NTs8Onj46N7DR48XB8dvPFigd2nsu74b+g0RmzuJOABNtxNgcKXXufRiNlEMcbfZgqqq+5RmE9IfayvjOBbdEwqx247HTRiMSrJgWDlA1u1Nx/OGmtk08OZSqEAdG/Oyue2WGEZ1j3NALiPWq71ntzerL39jvV0jMe/VsZ41Mj+7yrP5wxc/e/rNX/rOJo9nl2cOt/Pl4b0HB3/0yUc8r2/0qs93nsygPr3zL73z9U/f//ne4VJm0A9DixhjRWx5jdstbTdjzpmRFDzWdS5ADm8eHMayfmO/CTF8/MlnB3R/OZ/ttfuxOrodrtoZP7i3f/LsZaxkvre8vU7ri8tFFe7WV7mU5fHekDA7D5tUz/2r3/722cuzx2897q6uY5S1OToFM+vGe8sWGcbNQDXyor4+v97eXHS3azP69JOnXryuGuPKKa7TnVQ8m68A9ez8ogzjXrscjU00rmZDyswEWmqgm5PL7WYLArP5fDsOsaoMtK2rIY1NvbcsWzdryOYrOb/e7q1mi+WDl0+fV6FezOO4HkSYmG83XR5R4kxK2UlFPM0Y02EehpTfWM1fvny+mc+W8/l6u91suqbSet4+fvLG2auTs1dnh/cfzGeL0t9N7dkGxRFRhAlDi9ltVykhIBUn2xrBttswVMMwIBoDVCGYKpIO46adV9vNNo86ny3Wm5u6CSDsgTd97+TSNn0aDu/fi5Xc3Wxn+/HyetOEdsg5BjGwjGXIKVKFRKUUIDRA7Udnc8JtN7qRq0uQieNXSmEhA/BioSJwzyVLCGPu1TVWMWVDCUiqClXburFmBbeh3wpXgmheBIMWQxJAF6YhpVw0RnbGrCWlMXDUVLQUxF41ARi459wrcNGiPjkp45i3VQiat2PashZw1FGjtP0wxFg55JQLCiUF8EIMQ74wUBYvCupFgHLuQxAkRWawUgyKKkBx9DLZ/HDitGgV6lx6IkfG4ja50iNVo/WHR/dPzi5msVJXJGWmCmpC4bh0dOYADjmpRHF189zOV2ksuYzEDuiDpli1yDHrltWZxA1CaMW1TCwqKoGplD6G2mzaTo9mA+JUuKOAZKoh1infgTMQGYGZGSKLqGoQAqBc8gQARUCRkJN1w1gMQ+SGqIzp8cNjBGhHLeq5aMppQbMyvRHss3nFAuvtJkYOdb3phtjsAmgjtCmGhL485jFftU2YzykgbgZQnCUuiiOCRAPuE450/n6/XfeNeR5Vs8UxJsL5/N52bU2I6+u7PRRwTgYXV91otO27fcgQOcybLBjm1aBrIiOJWdEfPrLrtH5x1uL8wVcf0Id/cvr0orrNmw/O519/M4M72Gw1kyqOlogpm5mphCqbMmCoaiBzNxZxg8llZaMZOEpMxYFa91DU0EGIx1zcDAXMirkDaMWBPTqyc3CwGFi7HgBVM4KqF+RKCX2Cb7EoMkswS2hGE9cadFqUT3sPJi6qSGRA06TDKAAA01MMCQBdp4cdgRmBgxaYsB8kYAUACUDNHcHADV1QW4kYGy9unvNmBHUgKqUgYHEibhxFwSXMy1BAPApoUpZYsxSwKoRe07yqlDE70qze3q2DSHLzbLPFQVNXhVJ3t7ZjO3h8b/BhTBmQAQMz1xQ9KKACupBMhzUhMrC6rRTc1AJzKcAVR2F10FGVdErxcGRQg2KjAsA0OKALJx0EPRqiWi4ZiQDJ1VAFs7tBISB0BDUfWJDNWNTAfQKYEU5sRyQEp1xKKtnL1JtDbkU1jahVixaI6wBNKDY6oVlhCeCehp3PWjjOZ+2ijqnkoR/HPqUhJ/Ci7qYsgUzbyA8ODt68d/zkyXtvPn63ms3cLaXR1YiCSOQY29nMkYlcs9Zuk1RERCHWhoJIriVrKaqqi1ySakECQSKgGKLvqoGMOTjsip9d1c205HHsOrVB+03Srkua3UXryApoubSB3V0BaaqvckMSAjRH2/k+PBCyk6OCKwv3XZdTms+PNttNQDAsgUIw8Zxhc1H6dd1U2cfb4aKEbYppqzkpjhSq2AQjzJu7zaaRqsuaK1utmv5mQKbZam5jJiER7LpRiBCdkOaIhwIVG2jhWE3wWnKc8McFjYTJARXZSASSaKExxthwFzbPmRZhfrQZMocVi3A4HUdd7R3fbbbG6GFGSOKxHQfyF3b3fMRPtH4n43wz0p0fIp3F9aez8bYFIhAAU1AhLK4MWNxxAoo7OmBgBiJE5hAQaCLYT7jjGBAtqw3EAlbclMibyFqKUSGiaRtKr+MwQMi7SMf0znL2AiSDR9UIlIlDRkeEQDgjqNzNQRFE+DVRXQlMRBkywK2hAM7Vg3kwgshsSmNo+uq4p7H0r1rKPqi5y6zmWYhMAVEIrBji5F9Fc3BEMydTh/zF9KRFdzxjotfmIHJCsJ3jBsCngzO87g2aJjEitKno3hSQAPj1RL5Da0xwmUkfRyIAQjNw9wmgAxPUxglx+o/p1UZiNRUKkyvZ3dBBTZGQiHed9qoTagQApqYsZtll1t0m+BECAaBaQSBHzCUhssKkGUHWzMRqgzu7ZkTyaYJzI2YD6LuByRWtsFzddI647XM/uhM1c+EqbPvBwON838fF+fPnYeiOFm3VOLj1o7kDGYKZmg+K15nuNtt9HN9s6OG8OlnrydYGoFkjhNT148naZoyezViSa5/627sLY2v2V6rgKWAlZRyWs0ZmizyWVEry8OmLi6+8u0RXAGASdQN1ZnI3daXJo4UIKGqGr1vpCJFY3M28ICEhFy1MNOVSAWVnvJg+idOpxYyApuvB3R1BLRPKFCS0Xd20gTPyjhFvluEXCHkiQndDJtOiXhxQWNSKubkXwGkRI9PXTwoUk7gVAHA0QrSitqNY0cSacfTJGjiJjDuLzJSdnKxiTuYFwIiDm6kpEWoZkCp4XSrxC6mIUL3kMiSpKgOMdRxtm/PGGDZdd/3xB2sISvGo3WOSbtuX4EM31svjlb/bp9HGEak11TRqty6cacwJ2R88fILs/dDVYUWBm1UoNvh6vYTF5cXm3jfu320u+hHr+sHY/ejRm4+7YVgu9zabfPT4+JOPn0XE+1Xo1h2I/fgP/1ASnbz/TJb3OVDKxWz84Oc/+o3f/Td/+M//m9/920efP/t5OMG/9Wt/+wf//L+er5rR0QnS3frxvdWLq7NumDoQwRGm98MRppUCE5FT3cSvfPfr+/eb9cXp8qB5/4efPD46+v6/+LNf++6vWtXCqKu9JVu6+uzPH7/91Z/9q5+8/dv/JpmPnRpFhEqAkbTbfKZ3L8owxFVgYQK6PX+119SLo2owHQffXF7u782k25BW6U6H0z4NN3tzCXA1q7MwtpFTQVYJVUglu+O8naWUGOl6uy2l7B/vj6m3KICWukLIU65xkl/+YuUZ/gJl/YV8hLt72F/4spxGCbGdzaWuqpYhaxAhYgARySHEUkopEGar4zfeTuCQ/Pb2xbxdbcdOiMGdiRnDOG6yw9APqn7Znf97/+F/QOQJv/Bx7spOJwDB9JMhbMvAZN315d3tsF8tm8MWsPRDqqrail6f36ADMk5+P2JkoeP7DwERXWNTnd6tSez85dO3v/nlyPTy/HRfl/vHe8PQM8Z2HkuxZtH0ZXTLgVjNOQYDYKgRubinIfuu9xEO9uaILXSdzeKrcb23rC5uNk2sn51fjtCH0LQr8kpOX24//eTz7PTo6Hh9N9QcTa+GzbCaLzcXd6OU2LTHy/vZbXO9yZ3OFm1oGxjYbM1Mk3sQDByVgCaU2FTlU7QgARbHqacQjZgmawMAgJAjFnOzouowQdimI2bAmhlBJmuZOqaxfPTxs/c/eDrqvwwVVXU8Oly9+fDBm+++9/jhu7OG3FPXb1PJaRxKyTv0AIsDUuSp8WA6kjk6MCtYdu/6bS20HQa3uqr3CkctWgUBg5yyOY2bLlYSiK9v1/P5/aZdDaVDBjB0BXHKycTBkfffeRv39rMN2+24mC1L0SD1GhMu9ufHaoD1QsYO2+X+W1/69suLZ8PB4uHDJyd//ucz355dfbaaibNguVByFhnW2XHv+m4NrEO3YZXZrNl2XcMxQ59BE1cj4Gx/OV9V1xcXIVS3N+Ny/63/6b/zv/yzP/g/3pTtrUIusrnuhRp1f3VzsTrEd954+0c/+mx73q/uH3kgmsW37x3cXV6tNxuKBI7DXRLfCzFsx8FAYiWuqWCexVj1dvr+C8HUUHV1dbNYNe6Yb3rcNJqG7fWmaeeDp2ylCbPr801Yeb3kalEjMksx18ihqqqbzTjf3wcNh6v25uKsXsb+tl80sR86ElG00aykBNmIZOy9UHtv8db+qv3k809ypz3ianUPSzo7+0zd110m7ACGRE268TF56p3RvvBbV5WUnGIdwIyJNQ0lpftHByevnn3pS1+pqoAAwLC+uZov9u49enB1ef38s+ePHx9D5lI0bZNSYYJx2BBIyrmu2+b/z9V/xcyWr+l92Bv+Ya1V4Ys77+7d3adPnDkzZ84E0hwOwWDYMi0ZpA1INCDA8IWcZFGAYV8YMHzrewcINgwbkmCDgk3RoChKzGKa4XDynDk5dNp5f6m+qlrh/3+DL1btnkPti27s6i9UV9UK7/M+z+9pG60VMDSLdLu7arolOeFcm0uhzdBPgyGtj8+u3rxeLO8gTClxrVPXte6mBrlbTMMupbZWQ7QAruPUZVay1MYmZpW9Gk/TkFOaIHGIR2d3yWG3v8mRPDWY0WKdrDS5K2UMAeu4ocBu7HXA0BGFSEmtGGDgUNzJFFW1FgY+gFJQxcUjOnHDC1fNsQUdCcmxhhAdjBBVa8oRmQwcBQI0bgwOzISESsSBvQBCQGd3Dk4yuVYBQyCYdBcSWeEYW4SsWgJn5ujmiAzIbkDIiMGhMrm4ICVCAKC5a1XZOQRUr7USsUgPQEW0wRhTnGwMkdzADAIl9+rEXj2GxEDEoMLOselOtltJIYHVQO2oJca21MFqpdQgogLFfDT1faJlhS2gpLYpVWJqixnrNDeDEFCxPToCJg5cyuSgRA2CI8RSa2BERAdliAEDgDrOZcDurEwIACI1zH0loPiWOmimZj5NddkaEtzu+qGII7YxuOnZndVUVEXcLcU8TtLvp7bbr7p2MFocL6SOMaRalZAYiRClHgTT3DQTrqeN3A5Tt1i+c9p1Xdxte9D2cj+UWlIMiZlRZSh14DooWhhEmDG1OTYxLFo0u3P/7qtPP4vgq2YhWbxW7TcIfv7ufb7bitg0wquri2Lb1dkqAsdp7BaLGjF+9XH71Ueffucni/vv7p9+Mk4/Aq5myl0IkFgNGWIMKgpMhhAxEpC4gjoamtQUsmh1A4K3JnJCEVUlCgtO68i3VUYijCFXlxhz9RIAkZK7M3rVaqpG83rdORAxlVIDk6g6e8gRneZ1JficZUB4y+swdYxh7pk1B6IADrOzw9QRwESBCJgPk5OhuzMTEbrUOb4mboCz6R8AiYKDVmZUsTJOX7z/BBANkVNCwpADdZ0JBKs+bgOJmfE0MFSwQ16DUCFgVQgG6J6b1tQwhbxeLe6c3vzkJ2bSpBkeb03X1oI1eFq0bcO3Uwm5qQDccF4Fx8zkRYqahCYewkzsHDA0yIEZqOxrTAEZgR0BAiKqggFHwnHSiczUqoN5LcaB3ChScjRGlDKZBQDkkIsUKLNZGV2MA0RO5jZn2oZxDxpMQLUiUQzMnMZa1MGBRJ2QzSoAxByoibzMuEo5JwSvDBSyAYGWSapNU50O14KYc8pNbpuWcb2oZT/UotdDGXWeWzAC3D1df/ELX3z88MHp+YNmeZLb1lRS28EhKkHA5EjEgcgDc5kGcHU3xoBeGBQp+HxKQwyMTdugKxO6E0EIMSHP5YwHR8Nhtq6T1VrGkcETwn4Ey2lCC4mKV6CaKIp6RFREVQT3cGBwOKDNzdamGogDoshIqAhigOo2TMOyseDoVjDQVOdiAbexOtUC461st7C/nXoJvBdt0mJpKnIzjcOKFw9OjkUcOd+wbiY1IkBfdMm7CGYG3jYJ+cC7RcAFs5aiwIAhoPIcAfE5HO0EnkwXgGRcsYF3fmbXdWDEm+s8DWn72XEqC24LNUOtVTtuGgPMjU/DTTo6EylEwYFFrwLVND6F4aNGwqr5Sshf20kNw7QEZDBHYiZ0c8SIAEhuFgARgBh0PgARfS5lpzkEjIETeSb0yBAIDRE5hxDQ68EbA2icyC3OhN1DETg4BA7kJiEERGFDd4UQvaxcR4yMlMRLBOuAlgABsM41YQBO4CEwZA+RrFlSqSpoQSEP5hNOZJQAxX0fgiOtjj4Yx9soIwd++KUvyslK6yBDQXAGRopzpM4cgczQdRzJR4C/P49ORIQ0d0sBEqEZIJjKbCYBB1UJc1oPHInMjIjB4RAT+rws4cA5VjUhZkdyPWTKZpcHzoqUq79tsCZEBwLEeb30NlgGM6sYYF7vOhCD67z6QrD5rQGnzwMmosqIcx0YvE2VHFCthwoTR2AzICQ3rypMqKo+xw0O9QKHnIeBA0CKXMf9dr8bSqlS27S8ud7P3FxqO8g8FWvo6JSOf+sHv35c/Z3TtFfpi4B7VQsx6lSL45vtuBPYTbK06ZsPmi88Xk1e39wOmdBc5+YLTnRV6nK5vHO0fnn5hrsGMez2ReTa1ZsldXmpu117nK2YqtSpgqITX9zcfEGPUhNnKc3NEIkADeZ8FYqpf74Sx7emq8PXKLi7CjJHYjd1mGNIDACmlWaC29w9ZAbEM/hKrc42XyQCRzeXWjlw5CTy9rvmdJu5M729paHZOUsczc1NbO6n84rEM54RZnOICTMftCBEN9cDLCSYGyETEqgioqkiHvpEmYiQVCscGFhOjm/NZeYg8+fN3Dkkd3azEOK/IhX1fd/kplt2UxFTN1UpZdz1Y60pRrbxxSffunz52fvv/sz5+m6k4NVNyYmguffgw7Pb61c6XF1ffaw6xZbHcVANbVoYp4qD+tCXmzO606XGh/H+yV187e88/tIw1uD+8OG5wg2vKK9TtWjMD7/wQb+5uHfnOBGbGLqZDLvLy/Pje/dOH/7w6Ytz/dKkEoRye1IT74fx9uqFBfrV/+6f/dlv/hmsL17/nX928Wp//8mdcb//+V/84Fp23/ruy1KAaBZ8AZHMQNUYiZkII2L8yld/fry6+vTFi6OHd7D6V3/xw5ubq3vvnecuTgWnfg86cdtNjk+++M2bz36yePQkLjtxW919Uspg6qtlvriwozuPF++8t58gBR52r67efLbZP/3KN3/2xcc/PHt4JtN4dveBttw9frIr++3zq9vb/cVm39IyOm73PTMTwTDsU8oupg5Fam5y2+biuD5dP3/9IlIU8hh5EgCfddA/1oV+KoH204+8/ffnLWnzmYYJiUOTUpsJjRkcgGOwomYeUqQQrEzr80U/3a5O1tfPXoYA6GBVTStGKjqAY0hRQThQFfm//Uf/4TTUqrMVbhaJ8KefIQDAYSsRvvTlL/2dv/Nf3j1/8m/+5b9cVXfXpVnkpmvqNC2O1wbuqu447ftuuUAMqWkqmCJIP9y8ue7H3cnpCSN5jIQWJtHt7vGDs4uboT1e9bv97AUtdYqY3AoRcaBA0R1qUSBIfDgYSMN0W1aC+/2bnAvFdnkWa1+Xpx0Cf/jw/d/63e+FdTpf5Yer9voH3/kf/dv/zt/+R7/zl/7iN/+Pf+3/tN9M24nefXTv577w8De/8wdAsTgtTu5/9slPREu3WnaL1bOPL9DdTANHNQGd8+fIgDNPjAO64zysmPscJxY4dE8A4dzIZGAecBaEwQxn0oOJSw3zXh4BBZaJAzNAJMZitrve/PbLq3/+O98FwtUyL5erd9+9++jRw5zj+uhOTgRsBFVlqjJNU0UgqQaIgpUzF9mxtc2y299sjxapFtgOGFK7G6+WyyZR2O0GbJaco6q0Teihpnax1b2g5dyakKjI0AOI19r30+r4KCyST2b7ipyiuRvuNv3R0fHRyen+zfNueVKxFpc9IT18pzle4bo92e7efP/bd+gx7aZh2F1fBJAQ10cNG9wOr168Io5lnGwot9d7d9hsBrEhZh77zdlZO+52wzAFQhXLy5MvvvuN7337d6ep91Kvb/eLxQKcYtfGaLa7XeU73/3979xej4tmlWIwhJNFunz9+s3r121sZcL1smsi1Gm6f3Jnu9tNrzaBYBxqv9mdPj5enLW315t7p/efP3226jKa3Tk5u7h8PdwM7aJpj9qxiEIZpz53a2B0VSuly6EM/dXV69OTEw5Zqrbd4uio2W8uvE8Xr6/fv3+vl+2mTNiQgTHHDLn4zP5QNQCqL178QGRnlYJGGSjkxfXNDXAWpZBiu2prHTDh2Dt4CMhepiJvIWbuxFiKpByl6mK1mEpNTWuK1zfb4+OlivT7en7n5OriarFan56dSrXtbs+G4JRSUlWKGRBLmZbL1X6/zSkVEzaiEEPbQEwhy8yA1qm6ewikDuqeUurHiTlNdsMcUm6G/QgiKQYP1uV82W+YQwhpHIamy0Md1aHWsmhadOhtv0hpGqdi0rVdk3LXnTLTNA3c0P52YLapjkgopu6EyLPp+ODZxjnvgTpVMA8hqktKqUwFFWLIQ73plidjVVPhEJCCuYN7TAEMxTWF3MS8GbdNzmqqRk3Oo+1m+Mt8cwmAYsWsmkwCAdiqFgRw9HHceqhtE1Kgvq+m1V3Gum3iotaa4/EoUxOXDmSiKTVTHUMkdzCpzOyGgWLAWGXPxCoCphyDGhNgwgaM1VzUkA61uyAFyNDRYd5RUaaYQyTUIiNwCjlPQwmorIrEYhpyQymQMkZEh9TkftqbCrq6HVy6gRhAY8xSFABDyIxMDsTBTJiiG47aBzQidifEwNTMVRzuPjtbi0wEAcHAmTAgsIMgoYPP3WZdewKEuYkpUTWdC19T5EAoxUAAg09j2fbjnbN4tM5d06yXeVKZpopOhFZxkklSCLubbcpp2B/g7jLd7i/H/eZ2d3OTMG4n3BfVESxQCI265SZWlSoKHChFqlMZSqnTIkYy8alfn3EZx6p2fHos1VAoMz//9NlRFCnVGsSOwwRYZdpM+81wdueh7bX2AyB2d4+nbRlKWX/pyZgXzZfe0U8/Wiz4djuO18UbYiZVMRV0EKmp7ayKuREShzBXFQEQEqkKhYAGxKxac5MXy2zkhiRlFC2sBSATEiCBAZqGkKcyEDGHgA7EsU4S3WUYU5sjRQ7sSkwcKIGquYKRzzepQA4ErszJAe3tdh1MCZgpiArhoRUd5yubq5kjICPOkAdwIw5VJyaamat+YG24w+w/sswcMZyf3y2GRTQxhUBIoDJ2vJqqxDZpHZEjBFADaFtatojQb2/DcmG1NpH7bWkWC+fYr5vmvXdOHpy/GXb9x59ydVIPywYAA0c3SXlF49R23atrlcJiKXYJOCEYhyimQOzFgiO6IRvlOAGZWV4SUvCAEEkcbBJ2nGFoiGBuOWPXUd2LESOQ2OxAqMgBkVIIYmZaidAZwjywuTKxWFFDA69Wq1KC2EQWMidyEwJh9FoLijMAuKUQHB3Y8zKl9ZJiiOARDNDFTdS9TMgkY/HxEDoQEUdPKYKph8CLZrEMOEyb/WBmEfioye/dvf/OvUd37tzLbRdTcgRAAp+PWafAIWcOiUNUre4WUlKx2Xwxw1DQAdxDbOeVu88nAITAnHKOsTVHx5mZMy863VxUycjVdQa+aDV0sqrozilKlZgYEUBF8GAcmI2tB9aK6+E98FkbwRTzXELFgdpuqaDElNNip9vAqd9PbUhWddIalG/KG205tYGROKSoTmKumHkRqW1Te7F51TbHRDE3oWS9HfZaKiOpKgKoGxE6UGBCteNlN06wA9oNJSZsE1WZbd2HiqNMaMWRQBd36oOfH5bntfrydJOH19P1x9Z/nJpF8mvDjtbn+1rFtwg1rc8sHwtRbI43233XoNWNXf3+Gt/k/gbL79Kum3o4xkW0SjYZCBKgOYAFBAUnREaYe5fgEFsCQpybvHFmFSkxsaOYFVRhTggcidgcTJAOOjVRQEAiMp9NLowUDQApOIGIMDqwxibUgSiEA1wc0RyD49KADSoSEDmZaWUMjtxbjdAsqy/NoN8ZR4mh16DAgSlyRAwjOoa8TBHK3glP7z3RRYemGQMYxpjcnCMnTsQhxFC1gANY/3ZSQQNz03l1ajqXmYI5wJw/ehuV+FzumfERBu4mxDSfBAlJTAnnTBnb4fZDD4O6w9yqBm9/xnwpJCSzA2x41mgcHNzAwUDNdX597e13merb92j+kW5ujuiAaj73IKm7is7QRtWKyABzVDgA4CTCxH7A8JCIEMHc1zGXbLuBmruUJvHtsH/+4morNhUchwrE5N7kRIxlGhNlu9p/63t/dD+Hk6PYLMLNdTXAWoUQHWkr+no/bEYzQJjKg9O2Xa3+3h9d7tG/eO/0iyt/drHZOs7nEuRmP8r17e7e6frm5mqiw8WH8EbU27sBKe13Y4rrcRzLVAJQaok5jkPtujijV2cc6wwkckIDRz7YJ9RltsTOgqYfzho4L+eU5gQaMkXxQxu9uTMHmEOFFOapej4jzcgXU6EQeV4UuBk4UpgpRWIyY+tmRdDNDHQGHcxoNQScC+xmgZuQ1RQBiD4v0UNXgQOU2uejbG68FRWaM4xEM0v+LXp21v4QKACoARzaZvFzXZfQnJhVnJgOguTnUpG5l2lMITIFFfNa666WvlQZaOmMRFqmzfPPfrTfLe88evh+e3qeUpqGqtogm+sSdIPSx2hvrm5v31zfOX1yfnzEXIWs7LRbnBYpSHBbt7n7cK/6la/84m9+9N0vP7l//fSyFqNm1awffuv3/+AbX12fNPTDj14tPnhsIhjJAHdvNr6fdjfPn+HvvdxuvyC/imD18ubd+++uCKbrq/c++PDbP/n40TuPP/79H/3C17/+O//8X1bBYTfUUaab8Xx5skqvByYRVZ3HAaAAxMSApUpe8tHZyV/4M39Ss19fFkUbS3nx+s3XfuUX4jjYdOuWcmqw1tMPvjy9uXz06MOPvv+7R4/uK7dkBO2RO+doMXm7SC8+u3ny6AtE5NOwffP69U9e6aVub9t3nrzXv9k+fnLvx9/5/oc/94sfvfhxbhafvProzv3zz15sPIfUNiTY5GxoQx3X68U0DXWwEBJBmPodAIN5TmG/VXKIzCJmszvxLQ7tcz3Gf1ot8rdS9NsH5xweAOSUKXDMKcTgXskQFFwdHWIMxSQS9r01Ma4W7dXlfrFsNomQKMbYLroYQxmVAxNa3Y4q+Ht/8DufffpyTuK6HxyT8F97anPg0wEM/tpf/8/Oj5Zdbl48/cmdBw/apnHXOShbxm1AKqpSqzsjxkDN7cXrGOJgE4MM+416CTm8vrx98sHDd+7bsJlOTo42NzcAOedu2o8yTJlTii0RNTm5eS2TmwOFEIJWMznYrX/86bOVLP7dv/I/ffyFd//ff+P/8o9++x8/eP/9XT8W95bDpy9erBaLCereq0RdLsN/9v/9fzyv8k9//WoNudTSnK1vdYPw3n6zj90ihyCyHfT68YOf++Gzj2uaDA0Dg4DPeDxCnusPIVTxyEB4KFskJCYyMzWb28gc3NWA0GfoGjPObTBzmh8dHJiQEMQkUDBVd6sCSOiTA1FgXqIft9mRzfzm1eXNm4t/8et/gOghhZhbarr1yepo3Z2ftsero2XuFk3LEVOW0LHaJ2OxvUCIqR81xW43gdeob8Oy+3E0W7DFWmXXDxDi8uTMOUz72xwyuW37bco5kLjV3TCGtolugAyiWpzcXaHjHFVQbHuzz/fvhmi7un//F59859MXZ/eP33z00cnpnZPzh1gEhk3EIjfMdBTdbqdbjk1lCF2LKW9vbmMIjFSHqW0WABYLIck07hMv3ODmavfOl97/1V/5+ird/qX/3v/8f/1X/zd5/SHJNPS2WC13/dVXvvju88+eJW1OV8vNNJzm8PzVm9W9u1OpwC3FlGK8enUdUxxVnt1eJMdu2Uzj2KW0hlMrZdAJyBTK/nbbdW2K+eXlm6rl/OHdq4sr3YtQEpy++tX3L29LZVq3zfbyYrlYFfN2sQKCsYxiEhHMJRC2vrzdvQiwLPBSrXbQEqpMgxg1IfQyxpwjh/Ozo09ePweYrayMGGQsxK1CQjeo9sknbx4/4l/9c1/4W3/je0jL3X7qGuzwwKeYbzAAITZpmqZaJkDjEO7de/D00+erxYeL1VLk9uZ6f3r33rOPPjk5O7179/TpJ0/bFJrEAipSXc3UUbQMEyOVYUBnF1dVUIqhEe8RQM3FqhRdxJX0O+bIxIQABkwcU1KTFIK6OxhRqrWYCmGihKnJDtak2GaPbsTgroG51NFdbLKRdapDl/B0ua5VTG25XE3TTHdPpdY2Za0ld6mCgalqr9rMSBQ6UFVMvYacmVNgNrEunZBCMK+ixDQViSG1zXKqPSDk1Fr1YpMDohO7iqCjzMhPACZMZq5SQT1ROwyb3J2qKlFEcJERCRkjVC/7HsGmqqaeYjvVKYZogDkmMKUDPEHRsxu6VHNnZ0Z0K05OcJh/YEYFA5IjUqzjGDiSY6TgBF5A1UwtdbPIzIQs6H0dQ8op0HaoRhkjl2nsuvVYKjfZVcZpIgYiQoVpqikycQQjN3IXsT1hJspuBIgGBkSOHiMWGVWre6gusUmmgAwYrBQh1xnf4GrEwaGAY4hNnabIi2J7DICEzFGtmpm79f3OqkSc+9CVCdsmBGRT6466ah7dmXi1Om66LgdnhN22piYlxnHU9fERkNUiFJhNmLFbtPNRMNUtQd3fvG4Rg3vZT+3qdHnyYD9KbjjqRD6hT8POq84AF8st56ZldLYpyjC8/Cy882SUmDHvb2/3++3o4G2zPOkuLz+ro9qkddv7bgzQNeFomRb91C+7s4vL16FJCPXkJL8Z625Z7n79g/IP/6luq7dDVKYYTaZlswBFr5WJ0cDNHXQmdYqbmbqJmwaMDDhJcYvgimX0MgLpoFMGb1YJb1xGUwVWdVNQg5iYE5gyJ3NrQp76iZApElGogqgATm7sh1zPvJIPrkLgiDRXawMCzuXNHJzU3EDn7l4EMyDnwA5ubgiGHP1wKVFTc2TkAGAMQAdX0TynEhKjICCJ+/mde4aYAjHH0Cy9ogphNVfixZKwqYrd46SOxT2uOhdbOHikIJKbTBtZr9NUd3SWp7YbOJzcv3f92TPghBDz6ghSwqk0HSPFJvpRJBhVR17kdenAOYoXbGMAUORxLK4eCVQFc2pSqrVkdAAVd2UIKeEyBnUTo0AcQyR2qaZCCWNkdHYVBYZZyGCUWgB5tBpSYoYy1BgSISpaIEQKCnk0T9iAmrkpmhEDBi/FtTCCmKC5qoUUHBQT4yJT20QOASEhgAnWkQzIqlSV3TDeHLx1ZarTUPo8dplDwGKAXhcZcmj7vkDx89XqwZ17pyenXbs0AClDgIYwqJOrqEkTYmBkgsAEwFLV3WJMHH3WBZljKcYcKGbmaG4z2BpcwYEMQI04Q4izE8XVRSs64WzVMENUVVc1x5BTHPcjUJJSmAOqBw6jk6ICOQDGt0NgYAYwZK7TBIEiMyISJ9HSpAQy1rJVsSY1rKYyrHICUUNXsOPu5EK2hDRpQfIk4tMuh8YAAWJv4yg7amgar5ftoiUaAt5aKaZ3l+3udsCcyrAjcEA6qHUuEaEhvlURJVdGIEIAohCAzYNaIjfO6ehsjCHh6MFDy7Q4n7rgt0/3F5+tujHms8phZueVcYhNEKxhcWys8aQtLh6WdPK1/fgm6I99t4Hb7x/BF6MmssA0n4EP1V4IIGqzo4+ZZu8JMyIghghMGBj50GVYpiklBijulcLKROfMFM1vMyJhQECaY6R08O84ghGoGTsjBzd38hq953TsjGDuDHRgOB+HuHStBkjIxOwWDAAkkKMyTM7TFEcAJknN2NwbHRDZwKkhQa6GIpBjiszg7iJuAIl8TtESxZhiiIRERBRaQI7U/tQd0cyBQEDg2RkERsSEbK5E7KoH/dF9vpM6oBoO5rFZj3QEAgdCOlSjo4IduNSI7MAARoh/LD+ZAaGDzo0TBwq1o7kGnmFhgIfqVXlL3SVzU9EZsj93AhAGx1BqgbnDxmZlidzBPcxrfHNHUwBkApqtd6bmBvOXmRsoAsH8H20itDoMz5+/LsZTsd2gRKpm7WIZczKozGG1WP/gB99bOLYBNejz3SWhRwBj2gzj1VA2YxEHRyexR8fN3dPFf/Wj1y+mXIyfb68/XMEXzlcX22lbtVRJKLei25vxm6tH58f3b8seECnQNFb323UXMzvkZnurMWCXwjQOVi3mxbOrV8cnjyORExKQ++fxFjQTZCYic0cMczoH33b6zF3hCASOoKBWiZCCgatrnZtYQcXMiRDeYqIdAHFuEJ6vlHpIpCGZqCMEovm3IxCAmRZEZmZTYZobMmVmyrkpc5xffQebkzmgMO9j8K2ATgewlqkUn3n4gG87rHAOuqrPMhSBHTgwOIOxOLobmAMRiAAigJtWQEY0lfqvSEWBKXAYh4m4TtOUmAI5Y2jzogoWq9ZXB9tNr8fb68ubZ6vjO+vFO+fnj2IXDa07Ovr06R8+f/nSQdWNkNwZgGSSgoWIQTUF32/7brHOTZvunw1VsmlOZr7f7xENLp5dnq2O3jk7/eS3f/3BMqnc3lxu7/rPOlHoonSefOG2ePbi8nj98Nt/92++996Dixs73peh3w9Mq8Xx9dXQrOPdD++0K1x6ev5yW4tMou+89+6/+K3vmHuTEBzEOAUto+mg3aI9PqJHX7r39YfvX3//93/20Z9tn7xHqf2Djy+/ce+D2KYffevvfv3OY+paFaLcxC5/9JOPTv/8L3/09//DD+QXVRoOS0ws48RQb69eMNPYTzl1roWDxVZXJ4u+l/XdB5P3F/32aydf/6PvPXv057948/pq39/cPT/CZffDF9cVdH3a7a72/b6fpJhKIOqrMjEid223bXomqGaBqGlonNSKvo24H1Yt/rlj6JCY/JxMNJ+85liaH+Bc7gBgjm7Oc0ejGgamGOY5iWLwvRuiV5FxpFF8N+YUy1gYWMXbdmFq9JaRDAgO5f/zt/5mdQcxcHdEh7k5Y5ZI/Y+fDaC5B6Lbvjx5sn55cw2RX7x8LUJn9852t3skooRqI3LKiw6CaCBIdHR2JuPg6uYylenk8fHHL69vdv0TepyWq/0wPvzgyR/93rVplWliYDVFhjAPnAiEGGNQcUSOyNXHOeoJAI+fPL7+dPd//U/+z4t3Ftc7PXv4pevrXi0qw00N2+3waHl03i6//eqjcZoapvcen/y5r/7K3/xn/9XVFozS9c0omfX4neXxk7p/s99vfBh/9Rffv9iM7z5+/OLpC26z7HWOYAAQMxoIkiMAx+hmomZuSKRq85Yfid29miEBMam7IJobubLj7CJ1nDs+wQF0JrWbzZChg62SHAGsCoHD1M9aaUtICMuWmEjBzIZpP726vvzUREWZQ3TucrNcLO8+OgsZX71+s2zaW4fTVTttb8/vHe214P62WTURvcpWG12v7zhai6JjPTs76VZBGLoYQaujRYKWLSdGbIbbHYC7FjWdwWHdsr15fXVyuuj7fcqLlI9gTK4yTfVmqyfpqDx903FLecGLu7tPvjXVobIwLsfx2vp6s79958u//K2n30c1syFmCRBKPzlg2yx3m8vdflgcJQZY54a7ttXdn/mZL9xZb3/rX/7ex89/uDh/IsDd4miow+s3t+frcHF1dXN7gxQ2E1gMGOnuvdOh7vsyNG1adOv9ML3/5P2Pn/2QY5yGvlkcQbJpO2TKMeJ2HCknotwrP/jCOxfPX3uRIhZi04MNUh+d3nl+08ewGPtR+r6JuDpuFFbg7GNZLzup5XjVoeFuJ0Pfk5XLenG86F7fvm5SXjTHF9eXzSKGEHb9pu2WyybHGLZj/+r1ZpExhvjZzVarWdRpUFPJiwWjIliiFqH5F//kJ21YaMwZLn/+59d3jx9+5zd+DAAcgpnXIibmiDG1wzAhluXx4s50/vTZsweP7h+drK+vNjdX10fnd8a+d4Cjk7N+ez2UwuQcIyAtVicj3vpsH4QI4m2zLLsrIh76W4d5m+scM7AaKjcxx2aow9SPIYSYo7HopKTsBqmNIsXd2tzuh9ExRGJRIfCOAQXAqJY+cQSHNnZ9LfvdNqZcRx1GMa3mstsOy+UJEeqoMcUylshQawF3NYXAFLwWmcrYwGz1w5ybahUInVy8sDO4m2nbNbVqCBEYKOQ4Axcoc0T2GjmlkNSwohIYAjAzoBVRjMQxWx3Na4qJDV1EXJkr8lz7K8mwDCUtcmwSjRHdzJwoTzKl1NY6MStjKFLMGRSYmRTBXaw6iTqCB0UPHAzM3F2ICAHRAKsScjaAUsfcrAjYHQEgBEJxKaXpUhn7lNIgExPVSQg556haRQeGDAgKSoR1miKHFKOZ5bbTqaBZwMAMguzuotVMIydRKCaBg5mH0KkWM2FqrFbihJDA1R2ZmDA6SuBsUh2cgInE0QOnefsx982FyOZEBLVMizabGjg0MeWUt7u+n6oTWXXz6uBNG/sigcOd85PdviwXzQh1318jtUQx5bzd7tfrBcZg5XAt2O17AFgsqE0hwXJ99ojiGnFZaTuVUq2gWnVTo6ZdOgYkJJGAjmCyr06UUoYYuntnYXu1u7zsb248L0/Oz4G1W68wJxNx0c1+d+f+2rFcD9sQQmyiqQbyEHm8vmqaZrBxF7B7//7wyRuxMdSClJTQEYgRAzuho8+3cm5S1cyVOQBGDKRVQD3GJKKE4OZ1GE00dFE4D5NlDZkikAIYc1C0CorshGSuKhUmxEOrDoEpIKpKyq0RurkBMIcZuhE5zEiG0DCgu2qkoKKHIAYzIrojUiBQdzVwJAJXdAdg4uBaZzKHAvrMVRURc+QAACZV1V3BHGsRAy3jhNPQdp1PKkWhW+HJqSwXGDo+XYOaW+B1qv3OtVqMIBqbVHWEYkMEPloWq4iLuKLBYi2O1EbO7gSqBkqJpl1pm1xrCRFMpmASmNVijGlQIWRySoyT63IRIjirlRqRWA1zamNGImURJ3BmUQf20GQjNKIQI3oIYJKKjmrFC6BjQGdRI2c3CZG6NrZdUtXQgCsY0GClRUf06iAUEkcKVnwAThgCIxE6ThIBpgJ1hrgScopp1eGirQhuYgRaK4dgNZReYPLb661V0uGPDeDjVDabrbS0XOTAjGbsxpEsAQU+PT+6/+DRcnkcYzvWUqQiR05NZDJAgACzkOPsIOZOxMhtiIHxcAfogLFhsPoWJMSBIzC51QPJhgBQEfgwrZMzBlHEEBlyaAQTm2iX83hzRUREJG4u1WohCHOYBykQEyiICfjnzBcA8BQjolNArYXRq1iTU85QZQQmzplkAjUM3E9DpOzRjCyFbBxWy+Or25ujtASqRj7YWC1OFEJSEo+Zr273eZKmaXOIHCJNQzRniiNzYK6iQJgDinkOhIQUQkJQUUcCQjAHhMBIYMxhVOTFqXFCk7aOaFY1lvxuenAvhdaH6zre0uKUOEDI0KwKW2xWxZFCQAAtWjE3q69taY+wTvUPcLdZl0+WbKSFCN5CcpHAGTy4HyBiBw/UHCPFwIwcZo7vDNzBkJGVCSCwIVMICGNAJmLGUNwcGOYo7GEmJ3KoXgwIiMFDRFRiQR6DbtrT83KJYAjmbkZIYK3JWvhGUZkBgFANENAESkJKztlxwci+hfGFOm7gTgKSjFUnCA0BBwopctskZHJzMBQ5QH/mLjMlx4hzCaY5VPvjLfeshbuJmxOzqBDRjIOZA7ZIBGCI7G4+cw3dKDCYI/ps1lDzefBSlVlAg4MJpb6tPJbDPT3g/CoBkKoA2tvQ2YzDPpBnZ/rYWyYRgbupuvnbb8fZ4TuzkFV1xkrMHgJzfdvpbg5oMwMZ5ugcqdQDK/UwMxHiWzfefGDqwAyfvHr1+ma8Gbwqqc9pegrE0k/dIlFqvvdH32vN3n98b1+v97VIJWS+vN1tVC+2O3WXIimGhvzBcX7Y0OsXr45SvHWf+nI92beqXpftk3W6kzED9CbLtt1V/PHzN8smnC6zu+12Y0osJhevFdxPHt53MA45E7kqgpnKIFwhBJ/AACnNqGZkcoB5whJVOLA8UE1nEdARDGxuGCckpogIiK7gb32dgsiEaLPShzNNKIiUz129YGpmZkoUiGg2ms2+CTNDPuSq3Q2AidlM8S1iykwOEaiZoD77gz4vsEJCnK/9YHaI+zqYuwH6PFeqCHEAN9HibnP7irmhzyrhwcIGjvOhPzcSihapBTG+LeD7KakIjIuIg7eUIha3UVEKVIcGiKQKuWFwdQXU3e348vJVF36yWhzntkuLdnt1fX3x2W6okcmttsAhxDbngjq57PtRWSfxuFpe1fpyGL7w6EHqFomjl/7m6sXx2VfqSM8+fZnXJ2+e7z767h/effeu1NNpmAK3stfts5vMi6/93Dd3r8uH3/j65tNPvv/X/39f+p/8e81J4qV/9PR3f/HPf/nbv/Hdux9+KTQnxfO9+w/66fX9dZyUc6ibvv8f/Bv/zS/+3Ff+7//xf7zZTDh6wPoX/o0/YU1+/dl20dK//j/8y5tvffSV9z/oMI/1OixWi7vvtOenXQwBJQVThjpUd7/95BUTQYtf+zP/Wt0ODBtcJehWhNPti2eM9dXzl0dH74bQBgaw8fLNs9Uq31xeLtfvfvzDP/r6r/5yX4fu+OjRV770w+/97vnD9Q9/8PLx6f39dkjtYnO5l7F8+Wtf+Ee/+ev3zx/OnxUOyVyrmIoH1O32dr1a3tSdhQCoc4nfgVA964Hw+fkNf5pJBG8vj/P53f2PvwcRkZmJMKVaBQHAD1WmFGj+Z2gSNyk38zLBF017dXO9au9t9teARVVMp8DxW9/9zvVNT4gG5m+5/2/bH39Kwzqoumim3TKfr5sPvvlBUXejtk2mGmMUca8iY8HEzDEGHvcTc0yB0Cdzvnn2atyWUjC1Te7tzdPn69h1XXdxfdHkFpT3u9sm5aYLMo2GkhLXMhHHgM1U3QClVDP9nE8RXR2mIaXb11t0WCzahFjQU0iiiug3/eXpyUlBf321fXx2/19+/5Nf/KVfurm5dsCxjpJjXOZ//If/+E/9yq/+0Xf+i0zx9UUKctQ/e7EFO1m3F5v9olmM+7JcZzNr28bGXmvtt715BA+BEc1x9qASw4yXBkME+inP5FxnqAiuc0jNCbCaMzgjINJsU4QDARLMlIhw5vGhwYzkYJqjbUToAJE5ORHhOiVAFAM0iGyy37z88bUhhBj2o+wV+t2w6Na7feUHTfESnLXKMOwevPPu+aMnY91F9tV6MfZ9lbGUqkW0eikSU6Oq46RWp+LT2fLesN2lGLumEbfiOto0TmJSlm17s7899gddajfjrRuO/VS1xtNm6/Xuh0/Gm++d+fGL64ujRx/86MdPj5tTe3X98vkfjnKz7ytIenD/zpc++MLF66tPnj+/HnepS2ycjzocFze3JZ+EJx++s7m8+Jf/4qaO/uPvPTdLxyd5czNwonXg3MGby5cP3z//9OWWU/vo4aN+c52Tc4owOiPs+t044SevLzHm5WoBFoZx8+j+WbTj3eXe3Yfq9+6u1ElUT9aNjctxLMgp5nx0un5479Gbn7yGmK3K7XXfLiIT3lxdN+slTEjOAahUH/oyDMOo+OTBw6ury6nW9Z3Fy2ef3WnO+uk2JWhz2pvlvHQgQizTYOiLo9Wb66uTowV3uewGdQAj1GjVaim5ax59+b2Xrz5pLAWG6ebpn/vlu//O/+zP/5d/6/tvzxMEBomJCNB82XU5p1evX0WiO3ePP/usf/X85b37946OV7vtvo79jFNdLNvr1x/nGDCimRpwSF1IMgx7dGMmQ6kygnmkNOiemaZhRAqIITQLGccIbZNigMCBx2FITTayYVQVMUCiTDS5i4OpS8BgrqUMy3aRA/X9bRdX1ECZBBx3/W1MWQ0A1QH2ux26igkG4kRmCqoR4+QYQprGPTqIlEANKVoRBgYxm1RBQpMRkB1DYLHIhAyorkTGEcyUkMyKucbABqLiy9VymG4n3RNTCCEQTDa5igNKrRwbIq5SYxvFzGxymQADQJ20SBW30kZsYnI30+pWq4g7Vp0wduZlxuNSYDUFiw7ilB0EXByUGRGoiqpWDJEpulqMDUAZ6w1y2zaLcewJKYZMAWoVplbFKRzgfTEuSikxZYHKlokCA05lRHBGRA85tAYO7EgKXpy9jPvcRmCoom3TiRZwZSapHrhBJwJFxEDYMDtFqwpqKk6YZJrfUjQDRndQ9TG4g4sbKM1ofyWOhKHI6GrMKGpi8wpD7p4cI/PVZjtORQDFLObgb8mh4NbvtyLVa7ta1JCwSC1Su0Uah33Knbmvj9bFdNxsu5TmY2DRxs1m264ZrV3ku82dd3Yjam+A0Wsdt8Nu3BSHJsYaYmTG1QrGCa2S1rM799ruiHwVObmrIiLh6fLk9avr1fnSiuyutiy0enAvNU1ztDp5eLr9+PluGkIJmcpimVHR9/Wz3/io+8L901+8Iyb5wwfbp69zANn04eGxm5hqDORaFZ0ViajKRIBo5ohalYI5IwAh0pyeUAdDgsjEwIGAU8TMGNGdKTiz+TTfdtJcmmPAHObNLRHsR20jo9P8Q9HN1ASR2BkcSqXAYFCsltRC07SmXsw5uigROTN6AYiqEtxmKYrAI0dEqgZmBgbEQExm5ghFJ8LgQIxgpoA287gooOvcRGNIaLXyWKfNTbj3ODx+UKhCRMvsuz1gLlMP0zaKt9S54Xi9TxFlVG1KPr0zbbXe9m08shAXDQ3LDtGdvMtRQUPgvhoiVDfIbUyO5BRixiWONYKiGVRAtwQWGSMYE+WY1FkxIHGcS4pCRQIkZlBn5ybP/UgRQdVUHQi5IZFJp8LI6F4ntRoQg0VERxRsOOVGyuiDQEwdInkdA5PNHSUUYrdgDKZqZhGhgBexOY5CziIW2ga7JTaJE1B0RJDq42D7m1I2lUZBa8Ec9FD0MfWFHd1qFRpVcgiRSQ3MxVXWbdt2KWTCCBxDl4OaxdAyBWLUeT5BBwgzsXtGHdh84+dAlNzMVNyqWXGfmAOHaBjm4i2EmdQhYDrvwR15poogoRMBx5hb8iwiUoam68YQOJCiE3pAI9eAwcQEjJ3Ig7omjgg2h04QzcAUJHJLjhGCRw8E1EScCjiZeUxJq5F5jknUONJ+um4D9mXqN9OSF6Ew8+mk0Maxn7YKoxSoU2k5OKVaYN1FqLsQwnGXiKBqmWvfA2JizughgAwltjFHCuboc5F3cFc9tKVBDGhAkFfAuak73b5Cm7Q52wd3JOzuVQk83oR9Jbyh/aVJ9sWyhkzLhQGg1owW0AP1isNG4rr9Zh5+mHGXmRHQDOc2dPWZpyNMOO975xGDAAITwdzqTW5zGHTeVkcnYShAtcLIoSWICIpgTBQMkIK6kDIzoxkCyRyzoQD+OXOFEBVi2Mf1YEcr2zh5BVB0As+o5yG8BB+QFap7TZgJPIdI1ZCTswlUxHJMGuubpE3lR2PMRjpvITCQyoAcQ24UWKuquauEnJkJEaUqOKlWmn1V9PZ+CFFddX5dCM0MgcFRVYgYgV1tRprMlmg3R0BiNjMEVLM5t4RgfkAOH7rFANwATIUhuldwcCBQd4A54zZ7l2ZeqAHMTcjzzze1WTfyA2NI6HDux7m8GMzM1Q0Ig2g9LOpNZilqBreZOcz8Y5xvJ8xMwOhgYJnRx2Y2P+s5X2TFvQbUq93mxfXtbQFO7W7bO6XQLlNn6tYsm7DK3/vxpz4OXzi7pyCv+uu2bbsmP73Zvrzejg5VNbg3TE0Tx77v93I16KrlR+u82NSPVd5MPnr8yR5uZfzKKq6akKpOXnOCQulyvz9d5eP16nq7EfPFarntJ7q6zV2bF+uA7oSO2uVYq1Tw3V5Ol8ld3eaGeQQDn7cjNrulAB0QiHHWRNHVOHCkBKCIOGttyORWmImQDzlKpMAEiCbFXS04zXhycDelufAbCADUZJ6HdT78Oc2pG0JWq+5qPi9WAcgRDA8/w2aH0OGWxsH97WyNBOhgAoiESEQis54Hs+zOHGzeBLq7o4oAceBgs+Q0fwwOdyg0CwhqdYaRITGYfC4oHKSimBZFiquMpXotw3ClFapSdRncVGskZ+cqFeaLuft+/3q/fTWMIzEGbtandx6cf9On8Xr7YtzeSo69ilPt+y2Y1ToxslOkuHBvEenN1UXT5v2gJ3cfes7cxciaMt7sd+tH95s7R4rN6nQBTCHG5Spdl92ODO+cTC8+ffrb/+DRefvZRx+lr3/l+WevPvzyh6OUJ+88xtWKKHs8XXXnx92bf/O//6/9p3/9b/7Kn3j86Iu/9Oxp/gt/8df+6r/37/6dv/2P/t7f/a3Np8//F//+v////Nt//avvH8VyO11sv/lL3zhaH4+3VypXT772M2fNcS22378J6LuXT/N761pKt6Th8uXV80+8HZbvPDKbiH0aN2nVTX25vdg++uL7N09fdvfe95CKVK7qRTAha5Xri9/+B3/zL/5bf+HH//zbj7/4SxfXGxmpKo2j7Kf96rhFytxkdry8uIoUM8epTmAeQkgx9/0+JTxZd+Y+DoUQRSq7RY4u7gfF521H4pxxfmsp+lcRQW//fL5VmdTFCYmIVQoQmnlANDURFRFkVrXQNIaQcr7dbc3LXrbDOF5eXwzT/mi5NIMyjAD4z/7lb4r7zFmb/XuH33YIvx38RDO6jYik2pc/eGgyfvLRJ8vV9me/9LNqSBTXR4u9aqAY20YAq+osXXGK43578eazcPye1TKOk6l0Tdr4ftgXAWnyInTt6nR59fKGNcA0HB2fTFt1tEkmpDipo6q6lHFQMdMS8yF6c/3qZrngzbZvYhLV3TAimFkdtuMiZwC/Hvsff/yjL9y769vxajudPHznP/hrf2OssFw0aYXcxtVxO10+/6UP+eMf2ER6cnZcUdo2jv3+weOTizebly/2KSVmjm2Mqy4/WJyujr7/3Y8+fnm1DPm4bbGCTEXNGSqYO4K7MaADzYoxAKEfzuFzC5qp+awJAcos8x3og2Dg4m97T+cPCYC7YwxAMOeWgcjV6gwYJXAvrgBIRAAg5BCJAYFAUVAFBFRif7u7kQAa0zCJtKyQ4jJe497Iitrggk246ffYsJQeRNenx2Lh+nKzXsWr26uPby6X777DkXelDpMiFaOaF3F3venWbV/2J/ePJ9nn5qhtzrp8tl94LVujIBqNMS6aha5WFT4a9O7X//TF5R8hxevXr5uUx4jr9Wq33f7Rd78toufv3r/74MG3fv2f5LaZq9yJkYjPjo9fvHqz72WVu4qooHLl4zDt+/3pydHDk8c3r65O4vnTcS/T/jY/v7c+efrZ0zZ35pBayl0Lvi9aT9bdfrdvMPl+X950squqdRhqcdvd7mPXNYlud2NB9BTunB1fvN5Nt/L68qP99RCaxbJbkteqJYbETG3uduOQUnS0fT+sFu3JevX0R5/e3txLKe59dKd2sfKqZ+uTp6+e2nKBAKZKjGPfU8CbcXh+c0Pot5td27Vtl293/WLRooChCznDtLt4ZjuB0LgbyeKTj4b/6D/4x0aL+Shg5hB4vh2vosWkXXYnp2d9PyDx+fn55mb76sXF+d3T1XKx2+9R7Ppqv1jSYnnqug+JyygAqnUEwpCYIKC7VnHCmNvt/hY5EjEFCciTFJZgU3WiaXJw4xhy1xaZgCgwCZFDGErZl+2yW5ZhnL3i6JJjB4gxBkOrYBjISwGPFBkYAs6MRmfGMpmji4qacYwcs6oTB0NT05y4FAFBm6z040xzmzkdM0rFzWuFEA7kSHMwJ5UJAJgiuLu6kXMkamiovQdX0zlKBeYih15gDKiOIiZiwaHWwWmWhee2WjeRlBMSC1UTcSnmXqowBQTj6OgC4IETmLtJirzvb0JMroog8/wOVOfOqdmXTqDmCmjEkUMU9+XqqJbewdtmpYZmaDYlCgDAHNTqNBWI6IxoNk77RZdD5Biju4oKQHWbUm6YcZpqarODjGUbQkPBpE6iFQjU5xM/ITOqMkUzUDfwQoyJAgGLQQgJ2cnJLBqpmXOIMK/7TGLIqhNhMBXBMgeZkVhKAUJBc/D9OIr7vh9EraIFpsDY9wMRrJaL1eLkZnNrYCpyvdmfHndjP6BjDMHUxEXdtAoxuVopB7v1zeU1J16fdvsrm8bNtHkjtqCh5MCr6H3dRZmklGHAsWm4zTGnVQxBmTh2xydtXpwcPSzjEJ0FeBprnbS/3k6lnCwzADarRBGEaXm6GMvt+iitjldvLjaPHj389h885TYfxWU/+Pjx5vyrclsHXSRetVaL9ENyRUARYScACoFF1d1CYNBCzO6gqD5jNFxNVV2JIxOAAwU2nFckhWEmiEAtFQLgzNREIg5mFcxSygZYpSJCCuRugdlnQjQYEdos7QCYzQBNvjW/84t/8pbS7Xe+fdpfZcbAQVR0bpw67N4BD1kAVzM+XJtmVoKLlBmeizBXN4S55QdckdhU1FWtIrCFXCdISySQKhPMvIkcncFLxUlrv+tOcgjLzc3WBGKzkFHunt673Lw247Mnd59efdqktW8hNUF0sAIibNXKNLXECBgCpRiaptkhmhEhB8KCpjTjuRQBEwKhWQWPSZDmJuQcIzAjmgEg8lzvHYAMCYoHRxNVEbc6jgVFCRyqdU5u6gBVhJ0JkRwSBZgMA6pZdDAnEDAEVwlQ0KoLGAcOMSOKqrkTESB9HnwAsRgJzIkJ1NgoTOrFputdv6teyQZFRXNX1bct4QDmw34aRusyTmUE95STAiJiC96hgRRQU9EqlXOTYkYkmz9wqiIFFN2ZcgiY5ukJzMUFzACqA7rNPQAiqiji04icU8rMjiZ6uIM15IoAFBpEViCb7fCABqgz7F+RkYBQ3Q2d0MmdwVTr7JgEM0Cnw0wVEMTdY4gz3m+G0TYhi1bRSaUiGCqO29HZAXyc9st24TZxiKq6btr97SYw99O+eOgijVNRHD1q2+Uq5hKZjNDOjk7UtG07U0xWloGux+I+Q13AzTI5FEkxzXVZh/pzc8RDHxOaBmJAxCbtsQ46BsQQM0yFJR2j592FXzxr+m2row3VZGIsedS4XIzLO8PJw6G7LyFxSIRSZKLouD4rN5sl8SIBuBo4Mbo4EdDbGAC5zTmZ+UUDBHUjDO7oBgRIxMSMWoxY0BAKwj7zaQUIqUlqBBCJkHUwREoxJqklILgJhUUGNqtzDEe9MiARF3ZYnm/1eiFbJlFgo4iG6HrssLSpADkSIyEguwZAQyBkiGniAtEbHFZph9uX+zELn2haegizS58RUI0gNiljchUxcwAQUUJkIjWZ16xMRJ/7KPwtyRAckR1tHmDm+4SZPTSPO3Od+Wz9MT+wpRHB9EAEI2SEAyrEZm8L8cH443bohvMZ7zoXp/nbpT66qR/cAD4PZQgeYPZA6ef4kbmKesYlA6D5DIOz+SmZCtAsQh3ST/OHkA5OJZ8ZSWaGSLNKSEDmauoI7FYdhaCUqf/2D55u9s2+EpTiDghGIAgYGDzYRx99unlz8zOn9z94/PgPf/z7TV70VX7y5vnNIO7uqiSSU0jM46a/G/GDde4iXo+lv+3vIpyexe/dTC973QldK3y7li+t/biJKuIGucWmWw63vZutUjuUYdiOKYUyyWazOUktU3DEEChwVLeq8OLZ8+MPHnIGB0OiQ/wMQLXOwuysgiEFBztEbhDNtFjvb2n4s8mLmdXUaf4yVVMAY0pAfBAAzYjIVNwNGWfhZ87emCtSPFzOcLYNoqgCoMHhTQcKqoUJCclnwWh2n6nSoTdi9vq5qMx6OpHPtQ3Eb4HZsysM3MwC00zPclPEAxDSTA7dmuCEjIRmbgCqlTkcEuRzsvKnpaK8WEMpLtVKv9vdTOOEZuO4N4yiAKCOViYGwuriFF0NtJraVKUJCTOGlrFb+OL4wfvvBiWc/Lp/jbrf7q8S+qJrNtfbGDMtG++n0y+2txcX61V3e/njob8+oXNKYYDxyZefvPqjHyXONjVQwn4zMpO77ra7L3/j185+7c/+03/yW/+t//Ff+jv/2//dk6PT3/7n//Df+it/6Y/+4X++fXGZqFXpFyy7evv8JZmv//x/+8+1q3a1XPzCL//s+f0vbPb7tErPPn76c+9/ePXk1V/53/8ffv2739p+dvn1u6cdnPzwe2/+xDf+JK/udsfxyS983TAv14t+V5rUjq9vfvT8N77xzlfGi6toCxynYC36ad320+3+5H4e9wMdw3Kx2Ng07XpCNRicgJBt6gPbj7/9/S9+/V/vb1+0hYaL8ul3Xp5/uNCpbF4/o74en52OoxwvlkChTRioLaNYjd1yJVK7blHdEY3QmWDd5tBwv61zJDUy16oBEQCqwew5hM/dRP+qpejtn7cIo1leAmibbKbEPJMtCGfQN4EJ6AFTahV0pGnn/Y24oiuUWquMtTZsRMBVFAFrLd//3o+Z2KrY25bGAxoJD2rRTz2Kbhja9PzNdpP81/70z7O3QARmUkadMjjG2AAyqJtpDIEZm3XnaHW3obi5fP2KU1gfnWyefbLIiOyL1UJuJyt6cty9/Mkn52ePpQxlLIvc5jCIg5qJFJTBpp6sUohS6jgeUKa3u54p9LWEJnJKlXiVF+PLTxrkhhE5ViKHaLfTw/XDv/fdb331+H3L6+XSx1JqP91pAl336Wb6e3/rb/zar3zjn/zm737y/FldHTljE02uhlVe3tYdIL14PagIxs0kEzhGQwxtQXrTTwQYiWNCpQgmLpoJwT1S0CpM81GORSQQi9l86gAzxgPdDuezPqCqISHN5DaaBWoQ9zk/DerzpcFVZ1YeEM4sNQNHAtNZqya3ma7HiGToznzbb8dMX328up7GxbJLDVOToU6MpdgYvUC/83G0fkcN4asr6wWPjlx4US1cE128vjNJ/OgHgYKM2mpYHR+F6xelTtPN5s43f2GoNbVNEfWyJTRRSdxU7NkDYbN79bRLTX8zmJV7733xTV/P7h7dvHm6XDSTe43Vp93udjw6iVXx5fPLq9fXJ6d3bRj6y+3yaCm6/wu/8Ms//MGPlsvVyYP17kbGXiBHTkGn8fTOaTR68fo5On7/B98FC/eOu6PV4vr6etl1U7UQo5hFhvVpc321QWvbkKn6k7sPri+fr1brmDJH229tuBl3Wz0+S8NukL42zG/qbXvWMJkXPz0/e3Vz3XbtNEzr1bLtFuPF6/3uBgOVCjLJvUfnu34Hpm1ubOiPzzpt4/72dhm4bLcTwbJZ6OTGPlVhRkpRAm77vRYgt5B5GCYEjBk4mKii6yrDg0enF282DdD+qieApllcvClh8jZcz0eBaHUzKarVQX0cBjg6Sjn2+6Hvh5Tinbsnn/3k1eZyQ3dOUpMTgE5exwnBAZ0AUkzDUAC0lpGJ0b3ImHNSNPESQ1AHMGFEYlKZiJvcrl++/Oy9k3fTFPv9btEth7JnCSq16tR2R+i+N2UOber24+iuhAzEVat7FSug5Wi5sjLKCAmjqksd3CHklnNyUwiBAasgOhPybAKfA7kOAGag7sW1TIhkBAhCIRh4SFmmyhQUkRndABFdjTmDCzE6UOQAb6G7HAJiSCFKEa3CgQkyuFcbAErgldaBIdYRIiXXidwjxmIVwOs0HC/DfnczlP3x6XG/Y0APgZmTqhKgO9jsMXdBwqI7pIqmphNHVEcXCyHPq96qJYCrFbUaYyYMYxmWq6Ni6ozuKFaZctWyWOSigkzEoYqk1Kh6DkF0ImSrQdSR2BExLRxqDgvmMOx3wYiUTfaLxVKkAjNDGmtNoamoHty8iLmAqJPVGkIa6pA5KwzurBpTIiSvrkRUZSSKYCgmDi4wGWTi7B5URuIQmB2RAxEnCnGqI6hPo1YTEQNwEDeYB3Bz9lrqyf12uYzVbHN1K3U06GJObUqX1zdtzouuK6WO/VhLjSF+Xr0wFWlzRvOhVK+j6U2EukIQ0auri6kOMu2hFAQu233oGm8THx+nJjungULkxtAjIVMIR6vtBY4waKuru2eI9eTeCcaEQjJMJr672McmIVPosgUqoJDi5mZaL09/9JOPf6Vp+zj1kStxl/z1T569+7XHTh5CiCFKrVZrYBJwAi4iNN+9at94dzDJcwgUELGKUuJ2sQqcmWLsFpQCuKkCRzYkMY8U3D1QEAJXkVoBwyG+CI7gVQ56BwEyOIOjCmJgZkMCMj5p991w2/G7v/zz8ht/4NNedBc4OrFbIWI1mAeLw05rnpjAGUHRzeTw2zghODmDGSHPKNCZB0xiYG6AkBaCIZkFrONu086dP0SYGUWs4qI5xknHYRMyY0cFK+egLM4ylWkabmOIBMJA08UmNLFsLedFjWwQBsFEgdBi4ti0ABKEvZgDQMjEWaUAYkBgMnu7sDP0lCMihciIQcE5JKt7rcJgkUDAsaqNoKNIMSllLidCAFefjS4AjoKMjmY+ViYjQgEPuZ37UOf+ZEUPCImjvIWaRAYKIFUmqQYQUmzcqziYpxTCsg0xRSfYTzLVcTPVUV2ZMCQMMfOu36lYDIflmRadW25HKTQhIAx9VUJC5BwXJ8eBQCatQ2UaU8qg5uRSdCrTnFo6FCe5mypBODTdyzy+ChEy0WGAcTOzOS5hMrkePhnmFpjRlYhNlOaPDQK5zjOtTGKE7sgpc0pzJbm4g0ugzMQ+yy+Ac4gD0NUrE+HcXMQcCMEthFRrdUTmdtJy3B3tpeyn7bJbiCpABxgn2SdiRNYyBqzKDlzdfPAprAJCl0Uiu4mftWkaLxtMPu4oZwMfZYgtC+qi4d2oCOwEqioip8tuMveQQQVUUojVXdwMnRExBBVhB3PlpkEAR8Z8Esa2HaO//j6Pr1JRG3o3Y46EBBW7kGA3pPFVt7nYt8/s+P1y/O6UQPJQVNFZmdzVRRABMCBaIALQMEsbhxFibnZHJGJ0EWcKxJGJA/McJTVSAQmRHNSwMFPgpOhANjdAkUtCrAYAHlNWtUnmaymiicPszZnVqFoBJNjYnE/7j5MpIwFENWO31vwUaAOkwIwekFhHNSM3h0VBiE3qUSfoEkiT9s3wutuXiZtNm9mV0NDVYeRAENIcglSpKlXN1cC0Bp7rednd7a1g6uAhsJmbmqN+XkDPFMDVrCARgBPMRw06zEuWGRTk5pi4EdPAEd2rTeEAIT400MxBMToghwyBANxc598zW0iQ6O3ohA4wV3eZihHY2ziJz74UcEQkBJ3dT0DmDsDubuBO7IeegMP2fqYmq6nNK2IMpuZGDm5Q7aAZAjjO1Stgolg+eXF1vYujolQjJiAGIHUArbnLWuHFp2/u49Hj5fp3v/dbuYnrdvV7T5/dTGocxmlixMWyTcze779xZ/GkgUWg22pbToNjVlmSfHnF68A/2tW9xxvF726mLxOeL6KrTmWIxHu1WikCnR+fvrm5NlUEevnsNXJ8+OiBO7rhOIwKlFO8vdnVCdU1pEOPHxK7GcznAVMG58P7gjSbGh0BwKwSHfhdCORgbjPMyGnGgYPPl6fZO6kqh5+ATMhvu2YccG5VAwSYc52qBQADBweC2U+E4C4wt0MCuam5MSIhKTgxEbiaIjhRYGZVIQyHz6rP6tPMYqsISMTgThzB3V2Jw3zramoAyDT/zxLhTDhwpuBuQEzEbq6qCAdJ4Y+lIscQUlCoiJAXC8SVyzDVoWsWi9iAqetY1NVVi1S1Ok273U2MCdHFrIXldLMLcY0N1KoF02KZTBiIutOjsr+CiKnL+8vrWNttvHFKU7FmmSEsKa4C8nBxRQs1mSLDMjbLLo/bzd1H76qJI+luSKU+/+5HZWt/+Pd+8yiccbf84s/d/8Pf+Ec3Tz9F7T/50Q+uX76ZMN395jecY8ugN5frxx/+N/7Ur378o+/fbDdnx18tel2vf/zDb//28my793/g8sOffTj9ylcWOZ1+eP71D7/+1ePFmaFXc0aw/X66HrTj2nu7PJ0ursf99YMvPdjev/Pew/eqerNYXrx5en6ybLuFmOQ274eRb7a7/a492uckYoCA3fHDD3/lyE/Ocuerx/df3ey79frmzeWTn3/n0dnJZekF6OWnr770wfu/+4ffvXt0T6VWFSPHzGgyTGOz7LYvhsCpXSyvNxcMnGIqdZRaMuUCxdwBkQnVDpPPQQA9qN7wU6m0+W9vH3AAgBgCuCA6MkmdYWZzcTYic2ySmnNOHAI6l6IVxBIM07A6WbpbahpADYyGYXt5eX27IyYFn1VP/Cm5yn/qiRyeF3oKvOJ0/3gdLEotY78PHIBDHaeUAmUWMwfkmFRUVGtf6qj7zW1aFFGjJu/22zJ6v59O7ubFst2J3Fy9gS6q8TgMTeJSyq6vDTcTJ0HM6KbD4AKmSIgM9rYadr1ud309Oz0ix10/lhwn2UQOZlqquIGHMAHc3tw+uvfh//J/9Vf/xt//T263u3YZQE2men2zu+11vV7+9sc3Zf3y61//07/zvb/bLhtey5e+/s6bj69/5U/9zH/6ybOiVUyd0VTdmIiMQcRHEXcDIjNAIqeqCpFDcCfwzJyYGZzQzKVrspqiI6iEENBU3MiR3IPxXNiJhO4wm49E/SCNI85WRgdHwmp+oOsTixke2EfzI+RzrymiE1V3rwoIUi0w4u3lJ//5/6tpF4Ze1Pf7XsYJAadS1JXdSQTEPBAJmCMzG6JUN1SMEUP89HvfIoeZSfHcDE2FWLh772e/MRo6GwAFQnfYjjuRCuDkGCLv++Go7bR4G2JtyrNPPvry1x49BZH+Vq4H6nV9ssJj3g6jIpGLEp4fnW12U7tar85OUPCHP/hRnWwkff/9+99/85EDPjh9sN0NyhMEHPu67/dHx0f9rq+UaJWHKlXqYtlOu2m1Otntt7v9uGj4ZL2SaURER1CU9Z11jPny2aUZLNddjquL/W6z25dSFzmRh6r24HxpVW53u67t2pwf3Ll7efEczcq4a3MuXrv1SoS2+9t4lHcvnx0vT+7fOxv68mwcOQSplLssi9rXqgpmJuQ5p9Uq3w67Zy8vQrNEZldt2nYaRkT0ikqGBsvlop/049eTaaIIPom49bdDdoWYJz4cBUjoAJzZCVKXotbdftd03XK9moZxHCswnd07Hqfp4mpzcrImprRI2pdpqpHmQQaJiTmysJuJ1akUbmIpxdQwBhMRM3cQqZkzgotOBlPf94ImJkUKOMYQPWEMyUDUatuup2mqphTZHYoomCI5GBwtTncF+n2tQgQ+1P2yO3bAqloFCDXEMEzD3BQhxcycObhok5fjbgtI6m6lKG2dEQjcsbpEItEaQwJyZqy1QormFQnVlYkdvJqEENzI0QlMpKoCIoq6OTmiWjDFQAQGzIQ6gAuguTOlrugGUGvdmVYgQg4h5rG/tepuNPSjisx8RGIqqsTJzVJgxlB0cqImHc/95Q5TCkszMjEgV3RANzAkJjMGMpO2XXFuy37b5lgV3dOcLgLFzO1+GBaNg1tIYRInWoQEiOzGWqtOxcmmvpaqbQgxQMirGFi0JmRBrCauAlGN1MDImZhcXU1yzKruIaS0nD9haMAcgZsiU2IyncBkvjUxVQQAAg5RRKrOqiLM8+ThWoJoKlrratWkEKa+IEKTU6AwTRUJYo5VdOjLx5+9Wi7b1fHy+GQZYhaVEEI/SYiNG6iBGbRtund68vSzZyke2jCNwijgU5/zUZEsyl0L/c2rorSdhq3TWIEdEnObkyvCoDvf+0KaoyXHBCbD7QYglXEMrvv97nbzmtrmdRm//uH9681mfboEhNS1UOtwswshI/FUp4vry3sP7lOD+Sw9/oXHr8t18SEER/BaVQnKfhcZZkrC3ETTT7cLWht7UajiCQCcm2ZdTd10njsIyAGYSUXqWNARzbR6rRaJ0U1EKYWUkom4qtg4d724OzObJa0VVJFxnh1DSPOyhwDdsKDHHJ2wkqSTddPlYTuOi7u3NZ9gnyIXFfT5iwtjQiB1O9whzLfCOtMN1cGZGQFdjZGZWLWaCiGoz60whugIlgClTJibWoQEMHbFvY1hKuJaOyZgnrR0IW1vb5d3Tge3mINM01hGWGauYT9Mk/aLZYPO7h4WjQVHEzZvumVv8TCvYVWrOYaqFglTk7bSC3g1RTNmFDPCEDjMSFQAByInAkR1L7WAmA+qZM4wlkoGLFSH6goymRMZkZtGjg5GTKLGgQEJyUNsMUZ3UTMUj0bkaFYxxBgWMz9mLj4u7jelZjcDKGLAgRAxBppnBebYZKMw9krFuFLtyQRiEwKncT+MVYp5joHe3qcFZiIiQlUV0ZDJwatDIHLCnJvYZgo079aH/ZYoAoLZgbTCIXJAB5hqUVVADiEwz3WT5GiiUsXcTUUOCBsIxAgIM3pD3RDRANHQAFyFSBHo0J5mZuqIRMQh6ORqM0gfUQEDMuGsSaK6UYwgMzMOEIgCI8C8fA+AAMBOhmAii+WCeVmlAGu7bpTN3QmNyNqUELmKFxnZwFUb9jbjdrxtYjNqbQlsKqEWc0GpOazH3YABDIMjc8K2YUFlE1BzAgq8JJyJn/ZT/vEZVUlAhE4w43YtMo21cm6lTK2Uc30dLi/t9iXpBJjVUfOyxlyRqI4AEHDK0bL2cf+c9XZ3+3Q4f39z9qhq1DIe5XPnDmjv6sQErj7TWtzn9qxDGxTR7G0BdyQ0V0AHAjhYXgg5UmyRjX1iahCCOHNELY7ohBaRMyQhR7RqpiCBuaIiQmTyOQDLEdEBJDsJhp7aGhcN9HM9CwEhYga9Z/q64DYwEBkaBLEEUAbkFEIyEgRGTYgcgh2lbenL7jLx6Qf7MElkYUs5AXlMOTAFJtVoOpUiZiZiMwgvojsl/qkFu4gCIgee+cJEbGZiUyB6+5EDd1FzpDA3rs8BtjkcambgBq7qcyRNEfFgAZ5pru5+qAECnyO+8HlIygFRbS4yJ52L7Y1m44+oIJKDz1Hg2ZEGB2uTz+qPmgKiqMwuKHDA2W0EZjb3r81TIQFgUXVzws8fVH9LRLWZ0iP+8vLy2VWpsasypkRjERFv20WOqRjsh/HixcW90P3Su++/vPxEAJjDH3zy2et+UjdU7cBWbViQd2zv3AnvdjW7gpkzbyO1KSXnqR+XBh+00Ab6SW8XBfca/vC6vlv1vWUic7N6tloVi7v92NfheHmsJiFwLXLz5hKl3rt3j0IQBI4cOJr4m4vL9z68X3Rymt+tEkIAcMQwv2bw1mxhAGhmYMyBMTjCnL1QL4x0aGaYe+tM6cDgm/sZfLbkOAI4qtUQwltUL5gpuKGDOzCxHcxfinPY0A9Tm2olDu6KCIQEfqhUw4PJEAHnjvsK/sca4ix4zXxaU4fZOubuaEzojqZ1tmQj88w1dndzRaeZXG4HCyogmKMxBtMK/zWpKOdUq6mLO6XcuiyM6ez8NMQVd627Wb83oGHa57od67iRKbbNDAuAkh/fe3x2cjYoSD9mhIoyDltGiUZ5udSynYog9KtuGbHVzTZQTgFvLi9//OL5+dkdgjjcbN679+7T739fNpum9wrFGl9/7dzdY8SHD48//i9+L9fFN7vu+nvf+eDJ/e///rfufONrP/r7v95vty3B0fLx6snqetgvV8fPPvnoK++djP3u7P6DH/zG907vpy997Uu/81189eKzBd/mqR/r5uzucJRenPln25vmy3/x337TPM7Hx+P1bbc6cZBxN6QuxogYbdptTu492P7wd5uThxKCALerYyJDnVLTQmhw2hMKQGxW5zliIFgfNTD1rpkpJ+pO3nuXQxy3erWRX/vv/AxeXj599tnX8p++un6ZGs/Jt9uh7LWW8ehkdfH80lyP2rb2I8Ncmhf6cWybuE4RCZqYaLG8KpqSu6IjIlNAElEkFPc/Fmfexhnd3wa/3mrkh4/ubENRmR8zkTl0FFICRBMDRERKEShCajwmKGVsV/G4a6ft/ujsaLMdMeLsTjTAH3/ysYLynDv7V/Son4IVzf4idAAy1wePj37h575qO0Rb5xWK17bLMYSQAqjEyCIlpiYwTqOEGIf9HjEAxfXx6tNPn62Oj0/PutdvLpiDm5dhcoPUpP62J851lNrv7t071TKm5ih1LY5uGpTMRFXLVKZSxq5bzq/M1eU2cDR0zM2yzS9325PTe262ub1hjGOZPCM7pES/84Pf+MGr3xt7TZnqpO56O06nZ8vze8ubyeJi9fu///Ef/sanZ+2RT3pbpx/79vbZdcyvP/zi49/73e8kWiAELWbgFNHcwSkGEJuvBMJOYMgIbAQHQIv07uDGDOgwmBM4AgRinqm0KbJBCkBIrjbvhWb36rwWUjfweXVzKJPxw5kHxeEQYqMZPujk87VhNowbIERiFzWAIi6KAra5eubFGCEEBnpLSp+lJT+E47wKIaojgiGgzQZXFRmLAzqQolMkndmVYFMol5dvwoPztoll8BQSgoBXJqU2C7qxFulv++3NtS+O15vN5Re/8O5nn768uty+/+HZZy9fOuXIbRm3U7HlOt5/fOfNqzdXVze12oLSxfX2nXc/DEPIp0fXt1cvrqYRLUa4vHi63Y/t6dHdh3ffPHs6gVCAOlk+6h6en1xcvj4+Wzz77FXbrvbTIO4Nhf1+zBHIa26a9fHR7dVVrcq8b9umVONEw3RDZAjo5hqoqqemhVJSDu9/5dG4Uyrh6cvnbeTqsmwWEGURk5qMxSTg5HRy9nB3calE1WCxTDJViq2jG6fL6w1MtVl0zUnb76cySiRO0DiwWmGi/W4E1dDGd7/8hW996yeLNpSdIHe4n6qUlNlcKDGgPT69K9dXcXFYouUUpnHgQKYamQOATP1oenR6QmZapUwjARwfL28ub/vN7uTkqBdpVslLnrbbJidAImYwCkhFJ2LIuVXxFJtBCpgy+jQOTWrLMIQcay2Ofnp2XxwBOHBHmCLbXJDDKQ77nimO0hOimwfGENupV+aYGtjcXDGAKey3+yYjB0otGw3EjGJDkUgIrq4aElMgDiSCTKRoCOo2BYpzfoZF1TRGVtNpGptFYxzGUlIMSCFzBDWoNWIGtDlzxsAqqmpMgZANDBESd6qObk2OoFXBpYgUC5T6YRcDAdnsEFb3+Zh3g2maBMpUx2HYhrwgxpTavt8h2ViGGBqkAKKREzk5TA611sKwUB0psFolmNxy4nb0Sq4x8FhKJCbyQXpEDrFBJEJWx2oDTrVpGgWRqYSUmDJ4SjlOwz4H3u9unNBQmZjayAHcCkBpI2mVUiZLeTJcdEvAlcqY8trKOCMt3CdTIg50gKcQmIubOhBYscnn+06TGBmIjTyGxqsQJESTWjiEmWtLhEzgrkShFkFDm3FRgRmNCGIkUzP3UZQJxyLm7u4MFNq4Gyu3kcYhEBMbiJW+phRNzYiG/b7W0jbN9ebWwPb7Q0Hybjecrdqjk5PbCyfoQjw6O4q0qp+9eL1ak0AbjxqYKk9KYl60KtZJN3YLAVmEMazP35kMhGixXAbGsrk5f/+Dpz1eXww28XjRN3GhMYBzcGopR4sL7oL4dr/vAkqMiyenH+qXNhcbXHe6Q0ypWTSwXFiMgbkOI+jIFDMtUJ3BwZ1TC6bECQDdKjE7IBGpCM3eeKTYJEdlDhSshhTaDPsaEIt6QEd3CklUQmA11FriDCUhRA6IAdkdyJAQDWlGcpABCZqLO8N0s1lvh4fnd4f99v7Xvrj/zm80HMF1nn7AwUWJGWd/OzgCiAnNqXUiRFIH8jlKCUIWAorO/Ah2UxUFsCLqAIyObHPrj9ZChGaSmlTHCSl6MEoObcRIrjBc7/n+EiIAeBlGWHTU5brVGNtahZZNj0qRrVY1qdMUiW0Yc25it6gCzXKx3eznHZgBjLXM11ZVEZ9SbAEygzsThmQwi6FmIqBKpWgp+7FCzOoAVWkqbg4IwCBaEBHRMBAiV625i02K4pBiKOoGnIwSRjEkNxUoqjEgUXD2UsXg/8/Vn/1slmXnndia9t7nnHf6ppgyI3KqMauKVWSRlChKpNSiLKndaFu2b7qttmGj4QsbMOwLX/QfYBiwb+wbGzAabcNtGG005KbdlgFZs5qkRFLiWEXWlFU5xRxffMM7nGHvvdbyxXmjRCuQeZEfvsiIeOMMez3reX4PzOzxoq6O4OjIoJahEiIFhEBhveTNikSkaNmVPBZESm2sVvM01iFnrRKCqWt503oTBB2RPMYmJuJIiDiZtbFZR+7a5enZvW69aZcnpVjJBchSTADOIkyBRd707rqaAlRGcxOS4IimpZrDPESYcYghNsyBWNBBtaoDUhAJRATHVqLibrVmN9eqjogOHCNQYABhQnQgQDdBCcSuFcka7FpOJHP+Enh+oMwOCgcCjkyEUitYyavlWqgUNQQHVBbMpbqp1SJN69Lud0O37uo0LdJ6Ww6kJdesDoe+jo5JGiughSPyRs7dY5NaoDqqdTHkaReggtdIHFGVMACcrxsiQCIlD8jVnZnMnIAMAFQdYA6ZUkBiZsSOLI2P6+4jyJNgqULKYAhTCn56UaShWqsW0NIPu6hth051OLWbzc1PmpsfXCzOtFo97Frt3+wVj64rAyRwQDScr01HgHmJpA5E5I7AwrGZSSh4rBwXBAeobALVKYKbCQnPSS5wtoqApthxY7U4igiZFWFSKG7VITixO1WDAuapfVXOg3mSgkgzD9p8Wls9V+oFHAkYHcSlhsoMEM2c580mKnjhWsKu5ULjC9+vSneayQnUtPCMU/fZygNCCZBLVbNpBhJVm4PeR7FovqrcZzg/ILjqSDPADdkATG0uiSNwBDJTQ3c3IkZHR891ZKLZPHX8dcHNlI8dZOBmSPPhHH3W0P1oLDqeqmcUuMPsB5lHfncEmOnUSrMGhDOkyI9+MMAZPKeOCLNJsR5b4AHMDQlN3cDddKZoz/2AAKA6MTLMNhYSQ9QygRW2+vzZzWGKUx1LLcjJDJiZmMxVyRnc9je/9LVvfvTD71XW07M73/n88W3NiggO543cWaSW4DSlvD980IV19Oq6jjJlN4LbXLpEmcNhMgN8h2AT6Ae35aXyrtJnB0Wf3lmn5Gh5WiypBLw99DwIgBeqFxcn29v9k+eXjvTOu/dwPspNmkK6unn1BXzEWFVtRrKjoXmdVTVzlxDm/CyCAxH9a9VIZyWNnBFRzYnQXOmnBjOzuZfBTefak1ltZw4+H0V4dsUCACMwggG4mzETqAIhApkqAIITzfYld5qr8eBIKEJEBCfAuWvtDXFpBtAwEfNMLgJjlpnKNP/GnNBtbpU99rqh0+wpYZY5sshHtLabm9tMlK9E+FNB4Y2raLbALLqSwdW8jGM/htg1ccWxCzF6GklomA7Xt0H3V123cIah75HjvfN333v0c4tF3PfbYXw5li0jjPue2saRyLuT5RfOLt65vN6dLMI3f+6X//Hf+bvPnv7Qrq6ev/jJTf9skb54uT/cv39/32/V6eEXHz5/ff3gfDHl7f5mZyfFIOltffnHH60Pq+dPP7++vLzzKz970q4iN7cfP/72X/tbnz/+g4WvLz/+7uXt5Qd/7W8+/dGPxV4PefrOb7/yvEpefvfX/9kX/uL/XBpav/fgUW0/PFvvD4e3HqSzf+cv33n/13D1FdkD7l/+8//T/+qbv/rfefgLv5YlUeA6vb65fPXi6eO33n6U9qTdRUDfPvuMi68+eNc5yOLUplz2e4tKInfffden2G7OUAIz94N2mxMXvHn16mR1Z/XWF779a/+tpmt3u127uXt7ey1BMuRtf9icrMjt/HRz2O1Vq7vGGIGoTLVbdjlnRoxJDn3PRwqfNint97lYiUJQj9ZCP/pBjsYi+Cmq6Kc60Swd4fykBHMAgFKqqc/ZM61m83XpLiGoe82lZAetWvqxL4tN2u1viGV/O5ycnnvVwOyGIMwIL56/UDui1X8qRh4fjgA4QynR0B2P3YL+6O6p9gdT+ke/9U9+9Zd/YR1Xecgu1oXWyrwy4lIUyQCRGNrEhlWIX372pO+HzcXp9e4QuzQftjRnIj05WU9I+bPX3qyRAFm7VYs5O1YkAjWWkJomZ6+zK++N0zSlJnZtnfJhd/vO/Xcf3fva48sfDyVvlitAmvopISeixbLZHaZS6txgoJOOJb/z/oPo04N3L/7gn//p0lrKjljfffRg+/r15HTzerw4vffq+Ys799Z//b/+7e/90efbvd7cFCYErGjgrrUioYBbjKxqrubqzsRM5j5zxxV8hsUO4DGQTrVmdyR1xYKgmAInIgFMMUQiqzkKgSqaMyDNLLtZP0Ign1kGyLOABMfLgJnLkaJHkViCZK+jKSHOFBsFdwdsoiQXmt8/7uhHZIXPqTecZWoicjdXyODEhKgIBOyIDGAE7qhBAhgGxKE/vP786XLVJFlOarqztluoeamqCNyE/e31crns8nh1+er548uU0i0by+Lk/Hy6nZq4QoSfXF/feXjBN9d3UuxfvgrjdP7WyZQNpjICvvXww8vPD7vdVWT75NOPzu+cRWAf/Xq6PW3Dy6eflT4L0rIJW9LXr56tvd7ebD1Qwjj1g7Nr9aaNdy7ubm9enW3OL2+u+8OLDsSrOQhgHYeJVPrDUA1CkmWMpvXLX3i3398sgK5e7oDg9asbZzq92Oz2/cnJupSsXsXF3WKsGHnKk2c97DK3AQKdXSzGXX3xctty3A97LdPJqlWtgpYSNIvw6YtdMfBhYvSUpKhV02mqT15c/YVf/YV/9v/9r05P2iYKjCUhRCFDcKxCemfVvbq6ur68PR6MAANznSp0PubSdAsJ/OrFc4kxhtQtWkOb+mm7GzZnm5vLm9vbHgksm0ioEoFJ3cxQSHIuZh4DB455zAhcrQA4CklKhJGjMnMQ6nf7k9O7/TQSeqWS80jg+0MvaaHVpnGMMSASBSK3WopDTXFJ3msdOQoxgHnts1ZR0MiBWdCRUph0CBJqmeY9W9WMjEhgs6zJLLE1ABJxFzTSrOTIR/KLkXiMkQBBPQgXq6EJJWcAMKvmCk4sQa0ShaolRLGqQYKVXOoUxQx6ijZHdxxRRBCBkA2zV3WdwWBY1QE8cWTAWoyTjiUTE7OUOgQJbipM6uZQHaHaJCGYzvT6gEhBupI1cXCymndRgnsJzOjVwQNHA6ylRFIdJnIJ3KmzIRsxBzZQYVa3qWYHr1q7puvH3DZLIh+HXZ6yWUVgRyAWAQ/C6KRDyWPJVhaLtpQJQWNMqmW2buU6AbHWquYibZ4mVHNANUCsPrvovQoKuRoY+KQ6HctlbJ6byM3MDN2YBAkcCdwkxvGw73fDolkEoTFXq3UynZfdzOQGXhUUdSzX/bRcLFZ3u1KU2VOb3G0mQy1Wi7Gfyn4ihcDHE1HTNq6qE12sTjdvfWVPJxvfIenUvu4aTNz1k/lYgxqrQ67j6EZYMZtbnsaJYCjjols6S+1rm5YdJUYhdCJcXaxffPL47bfvMNj1dis27vcYGg6rEAOF3GIKtdQCFpeBA6Q2pDa2q9ZBuQsQ3MxDI1zJpupgkxpYJQhi8/wmpUyz0RyI3Y+FmAAGc8aYGWMyUUfSouJmRYlCmcaAECSqGyMjAwmb1pmhkFLbjz0LG5h7RQDzqhyM0JCBHJ1C4PEwbj963rV3eie8c1pjqLdbCYJEZggoLGRakQXATB2ZiAgcmLjUgjD/JxIgEZkrAngtcxhjbniY9xIcxK0KOLlDhRC7sOws4DzPDuMApUoTD4dbaZiDdczZ9vFsobk2i6WGqFXW6UK3XqYh3T1VKbaguORcNJt3QSBXLTmEgExIhA61FC/Fa15I6MeDALKxOhNLnbwCqjCbCoFp0TwFjljrsD+UXJFDybWC84wgDGzuECmKsHC1wgIhpgQI5kLQChIHNqqFKE+C4sWgakJBatxAgcxUhKNQRN71Q1ECxyHnBORALExCiBZjgEWSRSJTHyYsiB4x4KA1pZTHbI51UKjIwjGm4+ENmYklSIzSLVNqJQiAAGttyU6Wab1YN+2SMAjPXYuW80gsRGSqrsaRhAQI1UzNxmkgkuAViYGIOZgXZiY+xpqY0E3N53OIEItQnLnsDsVqrZprPR7YiFg4QhA19AkQQFJAQjRgMrdK8/8XEN0c2dwACIDcqjCDVwIPTI6uiE4+wQAWCHnc95EFHQMJkqXQ7go4wqSHjKWhBoA6lIo8ZMeIC0lgUJGTxMlGR9p0Gx9vhrpbNCsjJrU69LIkMPWqCJREstWEdt7FgJgNvHqFzEhVC5OAQ3WgmVECIExWNVT2XNJ0w/trLcbdojZQWEpag5A1qyodxoSueSq9UbN5i6bB6i7tP08+YX526pWGzyxPCgFMzBwR3OqsYeAcr0SqOLe/H+cIJAYyIyJkJXc8suc5BgZxQ2RzU0JHdiciZjVEMBSwUskEMDjSqJWIHZC0Es4fv5iNjuxAgCxkAAbCg276fEv5lhgBWYEdJEA9sfCqQmZUryRlNiKJM7m7D0TiaObo0CmhJY+1x+vvt/YzA51YEFC0krkBAyulsMg8aTMjRMc5WIEISPbGW+dHSOjcMmZIZK5uOpuLkY6INbMKxAaKhA42F1eZGyKSMAACoVUFFDdFACSad0R4DNzOv8q8ySf3OZtBasZEs3nEXOeGNTCtVudtrM20WnBEVC2AAjjjFGe+8qwwzAXZBgBzr5HOpjBwAzuG1NzVpllHqq5zPNMdgFgBXCu6oepPPnt+M8VsBKZCxICRhYQ5EjK8fP6Kent059Hzq6upiYtV+9GLF7uxFPQG4aLhe13ovDYilse16IrJTDnFNmLk2rqbekJrwFOKh8kalFW09hQ/OcBHg44VP9lZKcOH5x055Nt9d7LsTs4OfVHzw34Cu1l1nZndXu6eMT14cNGlhODNMkXuDofbkAIBgCPTEbE/WyfmDTcgEDEce+7B3RDn99GR3wVIZnVWGo+tQeBMDDbvwmcBjmbJbzaXmVu16l6DyMw/mq8qJJ6VSJj3OT9FSAEQiXo1V7e5NZJw9lfam43L8Xb1Wcg0mzUhdQDzShRmMwgiz9nEWQImCqp65JvPFiQiVUWCWQMlZjc0V6Iwi0ZvtAM4Qt6RSFJMXeIQSNpuc7c5edScvhVXG+AGZMFxFeIqps2iO9usT5M0y7YJBOfny3uP7nfLeOj3BbV4Hvrbcf+abcfeh0RlNzZyb5neuf/w1+5/8b8t3Zd/4a/9+yT+ve/97sXby9v9iy98+M3ty6eRIe/LOE7qnJrN/bsP6tXN7vll17GT71+/TNDXutV82Hzpi+HsYrjdxbTZXLxL42Hx+pNPfuP/vXv6yeWPnlIJb21W4/X26ol+46v/7u0TgM8P8MOn7esnNOwhrNcPv3769s/vnueVrNf3f3Fa/NxE66vL6fKf/tbT3/zdx7/9L22o+9sptYubZx+Pr549uPfg0NeL9997fPVUwR88+iA1DaNFCUlIFbeXL6QoAmpYthfvt+3F9skzmCZGO+xu+t3h9tWwvxkPOa9PN9Dop48/++DDnwULVy+e1rHeuf/AwooSNrL07NzG66FnYS0VkWKKNRdCaJvFsl2BYx6LWWEmmS2gAPYGm04A88rrjTDk/lNW0J/RjNydCOmNhJPaVK0cM+Rw1FKREJkYCdzc1W3ubRdgyOqhaUtxBhSU0merFQAdcDfs/2zi7F+rVTPkAOH4TCWaXW4R25/9mX/r3S998/zuF3/xZ3/h7tlFqVUCWzXi0HTdvFAi5Lk83sZc+v3tq8umWz578oIxYY51dM2GiqqulTabOynK7fXLtx4+GMu+XS2laW6ut106ub4dKvJUTVWLuVN0DBhTSvTm5EEseLoM9zeE9dn3fvB3y/RphCo4dYlWabXgRTHYT4MTUCKEEiJ267g5a8Y6vfP+/fOzs9NuExikA17Tld785b/5y+uQfMS2aQ0kl9p09OUvX3z9m3d/5ucvvvnnL979yvreo3jvEW82NUVjRsxAxQNCm0JkDnOyuZRpymVSchSmrgmJgcCFkUwDOmlGm3IZt2N/PYxPt/2T/fB0qp+P5cmkrw0OyANyZfGQUAKKgLACVXVVNzdTndch4MAAop4cEmIATwAJUdwZPYAJeM3qDgW0uCp6UVMAJwRGDwQBK5oyYhQO3C0DCmUkDYwi06QArOazaFXGOfRgdVI99OSFAmyniZKgxIAhiJAIhQ5gvYkXPNEipmm3XXXJprJeiq1432y+9LVfvr28wrzHwy4Ajoebq9cvp/2kRcc+11zLIb9/8eG/9bVfhXx7vqST1DXeLuL6cMiH/T5xuLu8Tz1ON9z5xQ8+uw7n67/0K3/h46efnp+cgqKTcyIttQtS+r4f98yomi9Wy4h45/wURG8rT5WMcKqZApNAaJOHcLJJOd9CnUo2zb5plst2JRJr1WVsBaANod+VmrlM7mZf+8KXQ53OT5axiYvUkFEeMBcHUHInwAp4qGruhz731XrEgTy07F5duFueOXlWDSKHm8Ozz17+e//B3369uyllMrDUJohhMn1wenKvS1AHDOHO/fvzXTD2Q2zi0BciGcfR3WKMJ+uzcT8cbrZBmIlTk7RqriV1CQGaFOqUp2kMIbpT4FCLlpqDiAjWPJS8I4R1e7HoVlVr4s7rbDsD5oAVBCTvt3k4MHOIzCLnp/eHQz8OY67WrdbqFQwI2WyG9Xm32mzWK1dDw5qNQbs2SCAHDKErRlOthNimRnWaxh6JDrtxGgzmi5yYJBKHJi0ZOTCRWwMRFMgocGAIbbMGBUYHU9UyU9oAoGip1UQCMokwQgliZj3Y5OrqXs2qVYDqNgFUgwpQokQ0AeQ8TeYuwoTE7mhVTcFNEANRqX0QCASMhC6aK+EM5waFAmBas5mZe7U6j1sEZDrWOrrXSfsKJYToblUncDDzUnMuk2OVYNV6FgwhAnku+1IGgKp1JMwO1QBFAs49PI5NSNr3OlU0SiFwwKaL0obQwGqz6Fbr0K3SYpE6SY2YlxTJamZzrxVNwdSBmZogSyKJMSlaaBbgGKQxIEMkibkUBKy6NxjnPymi04xqmbcg5gA8/80ji5m5g1bVoik1w5gdcNl2TCwAOGNKgRFRQnj4zp31evHg4T2JIhJZRGJQt1oLBY6xCyKMvlil1WbBcnwX3G6naRLVrm3XiwgN1/X67unJh+89+taD1fkJhKUERKgBawSLHlcSF4nbLjZdaJpus6z5qpbrYX+LRSzLYVurBkANEXghN1cvIAMzksj69OTly2dNF5AJzVz1dLlMXdCSY5fSZkmLlsBSEkIYr285u1d3g5CiBCZBSU3XnZFJHf361a7sFTOyITkIgVk1PQ5+mucur3bMA6IxOjNwDMYMeFQGSi3oXqdJtRrRPE25QVGVEOazolplEUnJ1JAAJaI0KgE54QDl6Rau+7AIP9o9ie8+DMuluHsupDjbQ+YRaF5Z4FxqBeiuSAhEAHz8gtsxRMlENBvwEYjsuPlmV7ecHTilpblw06oZOhCCSCcUBAOTIEZ1nHJNq80ImImHPKYg5fq6tUQD1Osee/XsidxyH0TSYiGLTrrusJ3qWJl5GHp21rE2ofPi4iTFsCiauDVQhYyDxZhRb4bpeqj74pOUHKaBS+3cl7nEaQB0NgcTwk2Mb63D2+d0/8IvzuTBW3jnHmzObLOJF6fLB3dP759vNuHiLJyvYdE5BnUxFQRCcmcEkoiRFsuQkjJtF501sSKqMBKTI1JMLmIptuebdtlhMdiPXJSMaq65akaairNznorW6maqYH68C4JEAhFZdN1pai42F++d3/vg7p137919dOfOW6vVCaFXrQaK4EHYasnTOI19nQarvdlY85TrVGpWB8QZaOJVq9Zs1VwRMfKsogK5es3Vqmmtbkrg7Eo4295LzaNpMVM3RXBkkdhybDkklMghobvVgohBAiE7OAsjMrpXs2w2S2fgRm+mZwCs876ePIS4aLvAdMhjt9osl8sUIzkKSKlarZJQTG2QlGs1s6Kj1iImNpWAHrCGeuC6JRvcdlfjS1kFTgGFcxkCW0oECCyxaVKdW40cIoBVU7UQhETUwBwYw+zxQUJGbiSiA4FHaqb9qHlCy9RudPn2tHxw2NzNdx/mszu2uYDFCtvojCCYWtksmtAwLduyPpnO37lp2qljT4owJYPGiR0RiGf/hDuB05F56bPLaL7rAjOBEwCouzqSICI5MPM8SRAxUYq0QGfGueRs9mY4AIkklpSRDgQjI4SWiRGqmym6IiEnonmDApE4giLl3K5vZUnCYoP4CIwZ2CB1nloXwkgSlBgoBo/BOc7/mKJlVg0WwUNhxE7X3De7xziB1wYUec5zzfab2RJcy4wmQnDTarW4FfD6ZnCa1RucExg4zzNIAGx2LF53M3dmDDC/OH2m1cwvMjxa7c0QeTZownFVT/5mla5eZ4K2eXWrM41otuqrqpnOCc15028zUQfxGChznB0khIzHsQ8AwFTNrWhRr8yMXnFOwB0P37MyMTvxxNzRiZEJEAwB2NTmOhyw6jCx58vr3ct97Su5IjjnapMaCqUYll3K+35lMY7N/mZ48ez5arP64dOXT7cHCNwSvrdJX7yz3ARaN0ma9TDZ/UVaNizMpVgwbwmWgTddjACJcBNpFSkmRqITom+fh2+v8X4CJH5e8E+vhsmhiWHcHmoZHcrmpG0l7Lb7cepDorBIj5+9evH6prqGJKaac3avzD6vM93czYlohgHNQPH5CXOco/2I+0EHAnZDrXb8kAEZ+MjxeTPJzOLL7PpBRzCYyyhpro5AcYM3qevj98zqj3sFsPkrc4mKqiLy8f0IZFbfsK3nzA4RMsKxtwIBCOdAKCAQgYC9sR05zoFcQDCtZgUQcFYe58ok1aNiBX5MRILPxqhjm94bOUHebA+QkKx6jI2xqIZFWHJKbKjVJXXN6lQA8HAwRwxQyzRmbJvWKKpTnw/Vd7v9y8NweX31SoII1n3e3W3urbo7J+vTi83Zfm/NuG8qLVfUT7cV4jQ6Vhp7MCCzMvXjGOjOvbdfXX1/vNqtD/kf/fp/8dW/8he3+/H2xbPVncXbP/fV7zz/8Yd/62/90Wff9enqyXd/5/zuu4vNzW9+/1+ePXxvfbKGUk7evWv/4nbVNnT67fHel3cP/8a4/y/q9dQ//vjig3efPr3a3DvPKicPfvn1d/8u3nvn9rau1tyu6aPf+CcffvAtGhfAi2I7A+h312cd3rx6sdzcP/S2OLlAgNefv7x4931EL/tXn/zpv/iZv/7f57bZ7y435/fUvd9m1zCOr5FqGfL6Xjeq3nn3m5fjyap7+8UP/8WdiwWkZn1nvbk4WZxvdre340S3u/7e/c16vXn+7Mf33rovoQmxLUUjobshWtt1FZQpsITrm30KNJVSnCTEcShO3jSBsuYx/xmB5qe+nmNJ4k8lozlX6Qgzuyam5GpgbO5EWOatrHmt1eYWH+SqfthPbbvZ3x4CBSKQIMwyU1o4MDiR0KE/EAG6E5G9AWIdT4Y466cIQO5AgKD28z//dc/7P/rRD2nSr3/ta25D1zZNjGHZhCBQJrMp9z3HNsSIgvVQXM0x/uF3vjcctDlZ0DI0yy5OOvaHzfpUDwPUUkfFGrhLRaur1lFPVqeHYUot96VoKeO4n4oCRwWpxf1N08HYF0cZyqBVm64VOfNCo5ZhuF1TRMICw6GMUaJmjqTg3m+H7qQ9SfHxj6/8ZPXHP/idE1y9GsfFegHInz579q++88fo06O7y/sP1xMNqnb5cpzGYpTXK1qdtbcdyLt48TY9+872R783MDcOMI4VIo79tEhRh9pGev+dk0cfvPPxk9dafb873Ht4lm+HH3z/sxAWrs44ox/mXk8DBGEEtKJaDMxhVLp1MwNGDEzsHgIFwkTUcIiCqsrgM8nUTF0tsIBWZD8SkRicSY9vLEiJDE2rcxRQRSRnrOCqBjLjjQCIIXCvNRJPqNbEkbg6hnXXH8bQihCBltgFc0QDCHj35GQar9PNyz5XXp5ebXPm1uuwOVld375CiY8/+uGG603ju/62kLw69D/z4TeuXz+/2l7+k9/6jAMPpTark/1UFmd3d69ePHjrXs6DM987v6ivp4ftyT/8+//Z/Ttc0Z483kvLzy9fprRYruPk4Q9+/EMdSystQVgtV91y/fsf/ej07v3J6s0wNClIDA/vv/Pi2Q+7tt3fbttF5zH0h54DXd5epbbl7t7jH/9xF4K6dSlhZs2+2ix2w8tNampBxXp67w7ZxDFYgSmL53LQab1outCFECYYF1168tlnpJC15wBISAz9OE7DNA57XVCSMDHnXNV90YbrQ39gnAqwAwtPU31x+RqpphTaGDv2V5988tu7m5/5+ocvPn/pSH0pyC6SXry6uhOln4ZxHLu2ne+CqWjoIpBLE1KTdre3IYa4bLnI4Wa/vd3FtktN6LwZxym14XDYUVqmRVuHvSPkPLWYOAgie6GSlVmIeerHfnyNWNbLJk+TqdZSYmxKVdTs7MZOFVyNiI3hNu+bZTflkcBjm0qtIYRxyO5AIofhkPINJh3GfYoLr6iemWTevAF4IzHyolYDnYgoNWmYBm5CdYpNO/SHlCIFcSSKceozixzKTZOSoyvopEYN6cyLMGeSoeSLxYOrbYmpOez3MbYIgDaBuaOjGbhJbJDA1IOEKkUrCjVB1mP/MkpT+hutBcUdsJgTkWtWz+yuE1a1pk152O/0SiQM40G6lgMrIAPVWgnIxd09SnJncBGWXL14BUBVj6ktXrV64K7mPbiiEUpgwmkaWAI6TUNGcSSsnhWUg6hXs8oMqU1YwjBN5twu2lonQQdkSREpaIGm5QohUhi1LNsO1Kt1g+ZF0yIMogwA06CSxNScAJ3GMmAMWfeIAmRDvgUMo1kFQ7fAMdts9SYiNBTQmYUBBkWtIoGZEUVAKrXMGLVaJiB2cBJRBHdfdk3ORz+Xg6cmmhm4BhZkWkXOFZbLjoC22z0RdV2yXMFhdXKy7/vd9U2bwrJNwzQRH9+SUw7Zln2ValSmfSDb7/oFrk+WJzReVh3L7TQCjWB9VR0HNwUKiOgmnCR2IWrx8lzSV0RWHOLtzeGCLLHvt9u0WizunB+mQ3t2ltqYFil1KQS6vLq5/+jO7/6j37h7cYpF1fF2e7jYXNioOhhhAKkkrFYpCgCM09wBamaTR0eBMtZufRpjm3V3vX+2Wd1FJSJxnq3mlREZo5fKQWLb7dQAWU1FQiWrYAwiMWrOjJirhhhrru7uxHNOOcZgNq813ZUAqVZDnucY6scxtG0Q0sePz96+c3my2MWQP315ITWWXMxmxIm6Ess8MmGteGSc2WwvclMgRiJVgyAIBuSOhm5a1Ysd3Q8I5Mqujl4XbXjwtq8Xjm5lKmVsukbdx6GXTuJJB8BRCLSkKGgmi1YpmHLBWjQjUanj1Jfpem8FpZFSQCBw8LwbDpe75YO71gQLaI4xdYBSfFQgQBvriMgxcINNv90DGWHxGJ04F63D6I61ArkJIycGNkpCXaBlC11DqWVCMGcEr4oFGDx2kQMKKmh2VSIbQB0QEgcJOgGpZnVTgEVDS2R1MdexglI1MPAydyyDhxBSFx0YiqKDZQOjMk1arThj03J12+8CMadUwZu2MTguk81ROGllgLZrz5KsAkJKELGIjkJJq9U81RmSf2S3OgLmMqFSiInItRhzBKbADMTmszNe0YkogCNzdJ3UzYu6zxt2EgB3VaQ0OyRmkPd8zdGx1/lYJmWuxd0MrMxrfecjkBbATYsQC3Ch4O7ZLDKamwEGCnOWI4q455L3jDjmwc09xIyOzP24W7WLYehFxGb6K6qqu5tEtgyKBDEMZTDLYDgdphhlhVJrVSRn2Q9bqxljFELH+bqegyjICIGZkZjAZhu3CAOBe+RQSp6/DcCFXN2ILATXSF7JeUmLMwWvoIXQZiUVmDhUc3X22bjtBSn0mrIxnyyv83Qx1TucLX9Wda+mc8UVgrvbsVLO8Y2zBhhoBoQxoqvP8i3N4+LcseDmZBmzYEVC05FA55I5lIiaZ3l/rqupgu5EbrOnmYErISK6unud+yKIOFCoDhrWW6nbenvBOy8ToQgDFFgQnqoNytvINciShT1H8IhiTmo1KKJxhEQABlpZpatxui63rxo5gzKZZdVawRDQFfzYBQ+qpjrHw8iqEtKbnTuYGeO82YF/rXEjIon7jKKvhGg+p6zAwexNx5mBuRu+wcSaVzoShXG2Nh8nLjNkptluguRgppWQj41Xc0rsSHM7spHndmPT6g7I7LNYBPNlarNfZr5ziNDAHOYG0tlSjT/9zuo2J1IcEN1mH4FaZaJSFRHBC1MdyuFPP39ceF1US8mOFLgRkZiClVJ2OxrGt1fr3//Jj5aLuFilz148r1ZjI6T21jJuEqGpF61t+1nfP1xw42XKCiKBuI1M4G214GISJihqZYHAjKvU5FpLmb60pE1H39mWF3t8MmJ/PX79JC0TUc1dSmUcThbLJKxjuR12m1O+OD3f3R4e13rv/t3Nidw53zhwKWZUhWW25agfOeIAx1LCWSjxmRUFCIhzQI+R5sYoYlKtDrOTB4hodhw5AiHZkTKNc8rP3RnJHNWMZAZ7EBGaltlzRsR29GKgm3MIqtm8CEcEMjNHRSTVSjwn3kjNZnGIkMHBcIZtzwAZB0IAIGSYf7ra8ZnjNMNfwEG1IM66or3RiQCZrSrAHHW0Nwikf0MqAp+bKZgDC9o0BkKEQAxNSrFbMHFAorAoruplvbkbDwxYncQOV4+vnhcbtzcvzdBYxmJDHl0Au0W4c9+DdOsOhWwswzguzzduPMWCke7deVv3N7vrq+b0LO8O01qYbdnRe/fjP/6HP1l150bIxIvTs08/+2S5+OcL1oPV+9/6xRf/4NeXbXiF29uTD84evduGdR0Oq7ffu83p6vmrTcybd89vG//Zf+fP3f6n/+nZw/t3vvFVbxIMYbE8gW0dPnn++Lf/8Fv/w/9eniianHH+3qefPPriF19cfsoweDWd7MWf/NGj975cr+oC4uPv/vj0m988PL08W70bdPP0N34PPVJegsUU+ft/+Hu/+IVvEGi3aPNYlquNWIECuUzb109rXSzf/0sJmvF6//u/+RHkjnwxXGu/G69f3t5s871757vhlqT0Zd+E1opzw5QYMoC6VWu79vR8NY0TsKVOzMyqx6ZB5F3ZeTUEYJ4ZdHPK66dYoH/zh89PPjzy0gEgMiI5CSGHUormDJ0L07jPbkBIJCwpQAgprZfdZpqmOk2qeSw9EMYmcCQyNXfNxdR5Vsz/rDx13OHAMf9GTmZ3H7z1S3/pL+xeXz188O7156/zqLf97uE776SmRScwQ/QQmRBULYJ7Nc1ZAmAM189ftYt1EkAvM0rH2StjWC4WyzvL7uLq6ntnfNot1iEuG25cpv72MHXL3aEvh97KBIgl79ymRaQIx+3BchmHUmIK6mAIw6Sp5bFOFJpRRyrZDCKFLp54bPbTIU2yXjaJ3Pb7dZdevBp3g4Y7KejieqhQx4bl1fNnZ2uh0H/2+BJ5cbN7lSBSaBZtUycbehizl+K3jwtpiqk2bdNPw3s//xASS0rXz3fvfv3dk3sbeP3UxuHd87s//vHl5uz0Vb9rIptBDBGpqvtMNRLmOkMriNTdZqweAIIi2mxIzKaqNn/IBMgEghSYknDDGEIk8BSBiFghpRAZRYEZzEkRQ2C1qoCTqTGNjCkIVG2bAGDkEFJr6oZGHKoWxlRqVfBtP1WgJgUvBw1qHFyBHJKHnAsSxhRPThvj+vyzT4bt65tAiP6qKFfdT8N0mCQGMLyukMcJE3e13B70H/1f//fm+Qtf/ur7b//C3//1X98sFkX1yYsX9+6c3H/vS+3yPOphfXr67OnNN7/4ASq8/mz37t07w9Xr/rANTbw5TErTw3feXnzhretPP1qsugYXo+qjL36x3z3rpDz48MGrz54tVm0eiyi+fP08pbBIbZNkezjkQw1gKQaruVh9ef2DEEN1t2oHm4TDfphqGr/65ff1MNQ6LFN7uL1BLI7h3YePnrz4fARdLxuHPEzbnDrqGord7uoWDUqdDnlySd3JSmLYvT6cn5+Y5SRxt+3vnp4xjMtlzIvw2fWuobjfbtsUS3EvkwgyWT/2IUgkuXp+VbFIG8pYp8OEJrJKY48j8mZ98fSz6+aNG5EJvVQbsxVlmDPLFdAcYbXZjON48/rqwYOLIGKhItDJ6cmTp4/v3X+4XK/3t5doQkCMs4uVhIN7JSBi6qcDEoIDMwFDSEHEp9x3bdffvFymDSQqY6kTqOGUdyGFZSc3t3viJqYOqsWAediLsBAO4y1Qx2EzH95YpBZ15KlMLC7cBFmM4850TClUAidDkd3ea63oAO5eXYKAtyUXdKVkEAbDg3oHRRlZQosYzXLkONo4jtfE081+K8j7/W23WjKLEA55jJzUGUEYUyRDU8BMBGPRAsoWazHHSgLEXMmIyR1rmYQZ3AWpoFYjtZ7ColoJYYEiIioSkAAMqtZjp7gT0tykNJ8OjRGZIxgigbDl6RYJEHl+KqvZXHmiSlPRNnRAVmqVpiF1CQE8gtXDkAF1rLtGwTiklghw0rJadOhpGsPt9ia1EdvGq+Ypss8UD98frh1yLYcyjMghT+Oy7YpngAA2UggVlMB9Tg4RMqGL1FpLmQjbcRwZvdYJxR0slxyZHZyYtMzhCAVXRp6hueoT87wA1H4cZ84AMdYK/VgQcLlp2iTb7TY04XY/Xm77u/cuPn/8MsVwfrohJjSTRhDp5YsXU6lNDGq42w3DvudwHA/KYHnSuEkA2sj09PX3N4sHNVzw6t7bb70bX12qwv7169002ZR9mhAMxN1qZiJuLm/KSZvasqvTNldomiQFoioDgbpIWK1W0/WtJLGi1aBp2t3LbQzx059cLdP7UdZDfhW7td9sJQag6DFIF7mUwEIIVlWC1JoJxNFzGSUld12tF/txPFiNUU7Wb7lV04pKQGxQkXEeNIjFmc2qhMgstZQAwkKB2RxyLTOoVViO21VAFlLnCrVqNdMYWs3Oy+giqZHqWKcMLGUoYdkYmj+/mr778b1H5+XOiX/xvfy977ToFKICqRYWMpsMEEhQ8M2p5Bg9mzeiznMjDJWSmcndZpgGsROimKQgBMo0u9c8LE5jtxnBGVWIEEHLVA6I6EZVsbI73A7SNBCAJKgVwEzL1nIO0vSlp02aqtViospYy3RoNh21nMcDWoHQFitu5qWSqSDFGIrlAJKYyjToOKAVlMhtKnFRJJRSanbtJ2YWQY4CmDE4rhpqBZtFQSJkRIiBaLYdzD1/5mQAUpFIJ5gmGwtn4EKELEBoqIrIEirYANCleLPfGsnooI2YSS1GTo7sxA7cMmvNpR98KFM2y1ARpqyUi6jJnJgAiCmoa0rx+C6QCOocEsWWJAmlJBjIvVZXMHUrPu4PGNTZIwsghsCzVWeWeorVFBoinpu53M00m1bTjDghAHMwmfMP7E7ugGSmVc3mjT5YQZSZaU8kJIGRzMFUvU61ZgPWStM06dFA4arqBk0Q0glBCNFnxD8iMgExoc9qBhGg4zzduWem1IQ2CJrXmajZSkpNMx72kYMbqVqSQEJ9GaOkNpbdNFYkNVPFViJqFYCca54Ot8KGwfLQxa6aV1UlM9NSpkZwPxkhOHgM3AW5HSYn5HmkU9M6zXYWAqi1MpEACFXwEVA8NKWCoBg6QKLAToTEWg0hGmCQ4GUnNkDeT5FyPJO0SvXQCeL2s/5wy9A7EvL8Yve5dY8Q1WxuPwEwM0USYppdLODOBEwILCTR3BmRWdyIAyuCmrccI6WsIjyHqqjOQTNMQbxBUhBSRTYEJWQFf9M3yvKGWz/jfRWU28V13WCFNU9e2c3IRBROHMYEWqkgNqoLP0Qa2cXwAI5ssvBOQAwqB3FmDygp1svCg0EpRA4IpuozU/PNlP4mNWbEJE1KMbyZjpFF5s8AEd2ASRzUrYKTMM2b92PKB4gQ1dTmqmE8ykyqdV7OkxMYgKG7EREBmVcEcGI3m/0es56OOB/PzQGrzpFJdwRGPtarq89AD/pp4ZrBzJAwVwQvVmcPi7qjK5gDEIABkB/r0hCBYIYigQOYqrKLayUOarN2VMnwMPQfP345qlS3ojp7zlKzcFAUKH3lCcpUP3r1UbeEB3fX33vyPCNM1YT97bPuPBB6JS0S07VSP+4/uNMu8mimKTX7XIVhFeG6+LZYI3AoNYQ0Y70De2LOqZV9XkVvFvYx4nd3eJPpO5fDFzbhrA1RvLjdjLvEkEI6DKXvD9XGzcl6mvTzT59PU153YnBWHYVkZqSYaWCZic6zMDLrg/NnYqZwDHDBXFU3G27A4M0Xj6cFRlQzM0eebThECERopuCudQLCudvR3d0LIQMioRyvdgR3NXN0A4qEoepkVudgmru5OTG7mxsYGh4biugYEzLluUZt7jfQCq7C/AZOyLMqiAhVK2JAnq1UPvu1zW1W5rXabOdAYlM3Myd4YzR5IxVVrQ6laRZEhCBR2nlvU0tpUiscJAQyKD61qSFYtpG0W6YU4mK9v3o9joehv/UxA8mgNaMb4n4Ybl9tH555vzschgG9CU06bEdulwKesB22B8tlt7s6f/vtPO5Xd7rX0/Dqule065vX29db3Wwq0u3tyDd93eMiENf85PHn977xaxYKgoa4WD385vLDV1//ma/+4B/+nS/+hV+VEO9fvK0vPnuYTn/0279x5+ze7XbqHl00d7+wM5J0Po2hC0uii5vPXh1eX2p7v7JsP/pJPUAM56tm+fTHn1uzHCraOO1ej+y0H/3+l78Mk+njZz/6w3/2lT/3F5796I/e//Yvb+58oxwOw/VeRy2HnA+TxUVYnpbp+vLy5diHtF4O6qsOHn/3v9o+//z5p7/34c99SN3pyVtv593zfpj2Q78f8qJJZZ/XJ4t7bz263u9Wy3YYb1ZpU9FHy30p7XJJIbSEYHW17oZxmiZtl912PyBRFO7acHW98zcdYz/92/3XTqI3lkR8oxjNNeoAEJhcDQDndU5okmolc3BAEVAttajW1MRchhiTA6hZt+pUTVVFCAnBvNZydnaGxDCvEOx4f70JeaIDmAGBB6j/9n/tl778Mz9HNY+7WxbCiqcnJx984Z2r1/vd9YGZ4wU30cfcl1okNTO3ol2214+fjIehbTthLlNZb1J/czttJwK8vdl/+QvvlTJuD9erzVK9DGNeLdlYDtOtom/7w1CMuIJWIQZQiSxoOB6lIlSXWoMIJh7Gw3q9vrzZEzsSL+Iq5xtwEGbtD0IYdul/+u/9z/7j//J/d3N9fedO99YHp7K3i0fnf/iTxyX7+fnJ9eOn99d3dMwR6dEH7/zW7/zkdHVneXp++fx23cbDfp+cQ5DzZrnNw37yiEpc9/vJmnL2Hg6l7Pfldd19+c7pgw+/9J1/8Mn2s483q/t1ezCS/W4M3KbYjIcsDVetkdkNcwUzxCPPGtBpnorRkYUVdCboS0QicKd5uaDmVW0wNwDzQgghIAMmwlOgCMZobRRCHiaFUpBs1ppGBZKYkIBqr6ggAVCyTKVU8ibRMJVScxsIzLRlj3Ly1tn21evUxrBqnn1+DY5TkNe3mRpuJL6+3uN2/MLPflEUDtN08vbZF77ywXd/8w9gTMUqJQGjkosFUfVpuyNuFti5LtOt/t7jf3xns+qa8OL2+ttf+/rLp0+xz2O53B2m3csyHPRaX19x1pgun7xGhYApCm42m0Oerq8u760WG4bk2l+/3O+rJBYpPtLLj28O2wnNI5MQoWNsVqXw7WGLCXI/UQzrZTv21h+2odaUIgJVK4OWpqMEgNN08/I2GCYJoWGOtFpvypP85PFnVXNoqF2l2pe2awyFMJn7mOv6JFqPoDD142LZ5b5PDEBYK7YUl12SPN29s9nuD+lsueiaaT80iZsUQwiHw1YI0MFdSzF0CCG8enrbnC2OrcoO5p5dSfBHn3zq6PVNLJmRQggkXNQ4xMVicfnq6uL+BVQvnrtFzLleXW271bJt28N+LyGdXdzZ77eL2ACSJJm0qsNMGiYJMwrABqhVObBWTW0rmpxwKKN7HacxhK5qLYax7abhUIpKDKp6GCZEHseJmUspudQmJQw4PxDGQ08AWgshRhSJ5qpdE5m0mo79tmqtqkg0r/4cvG0SMpHI3C3jhiydw0iUQ2rHMsXQIEKgWLIVc3B10+IlhS5xHIovmsVYp9CIgxqiKjTSIcL8R2ZycM1lQoICUxsanSZk7/tdFCl1qGpEEMHZoSIwc62TUvE6aqmuuZHTvs8YI5HUvK9TTY0RqLMJU1VTUPV9CtyXLWJMcWkV1J1Qcx0aOmGKFZQEAGo1dS1qBuq5jmOeuJEZsKkW6jTe7qeuWTKoCHCAk/WGgFSNjAygZBugllLwOBVgrbZqNwLtOPXtKh2GXrPHtjUth8NOEjOLA6sCuJoTqqMBmjZpMUwDCtSaGcVI3LMTO3PgNPTXgRkRnNjAYN7OIcxtILXqXM1TrRCCutaaUwoshEzDVLVqdVf1EKjW7EKoWIa6XrQlw35futT2+3GMY7dcDFOJQWrNUYRZmiYxz2FrHQ7DfBeQsw+l9qXaeF12i4Til852wHbJslqtw6vXgcVhQC9NiuDuIbqTE5nzbjdC0UOZti8+PW3eq8PA0E79mNZdE4TMSs2txJS6z1+8PL9zRyh++v0ff+lXfmbv+fLmsg4I5kXHKU+VOKY41LLcLLzPtdgwjS6pn/Zny+UAFdSbpnV2Vx/ysFwt9ttriStDjCnlnDGIVp2XiqUfnYoTKYFGoMgFbLFsx900czAjsQHVWjjgjGuts1IDiAiMJMyTVhJhCRrpALkN7g6UiJm8SWOuxtIKD4+fNg1rpP5ijcTRsbiBA6g6yTxJuqOpznn3mQlKSM4GhIaGTApGTE6sql5tLmkwM3dvo9g0whRT01Vz55jdHbRCdfRh3AMSsJsDKFsp7Wrhk9k4UsvutbozgOYxhkAOh2oOjiEShzLVzsFrnYahO+nablWBtNTpMMaY9uOhlGxaXHNCRwfKdRq1qE91WJzQUholKUg1RAWHqQqBkEkIxUtcCCzCfKwXYiyIajILRdV5RmVgKVBd97YbxiutGaapUAO8bFQVioPh/NsGrflgIZDUVLOTqiMYQBT27Oo+TRkZp95smup+wOpYHZyIiaYJ0abqAOhIauDuDIj8r4G+zJQiB0E31TI5U53UymSe3RfVFMy89JFaxLnAMTBxOIJizWbZD5wIicTM5u47taIlH+udYwRzoogUiSNZtToBVOZZ/qgcIhKTBJJmFsRJi4JqzTlPQI1BQLBaMqKRkDoEhECaOJSKh2nC2Ikf2QyIiIQIxkizW6PUgmbzFxjAXJsgVdWjQPF9vzcEEkxNM/T7hsOYx27RTTrUOgGq5kIsRAmPnCSh5A0oEzEzpIAIgYNhFSYzr0UJmMEZMQq1gYVQmADZXefmanVH4mOLLZEjuIHUkacyTJVTy0BqFZkcoYKTjjBaUzONYxhfp7LH6eDlBgn37t3ph4mb6frT0O9jzloKUFBigPkc6LNFxrTO5V0z8JiA/Bhs8Z/idgEAiB2AheZhGMEMqoETkji4KpCjO6oC1CjsagYQUNRckKtlJBDmudldrTp4IHJwdFCAAu4OrDlJ1eXq0/3VB+QLr2U0xpURRbYObV0yozDYCok4E7opELYBWYzIlSWwV2dUw5j4ZMWQD17NVM2dEEztp5YQJBQJAMiIIQSmIPRTdMfcDEWms6NtBk7P63irdSbBzdAYNz/21R8bhmbH0LFNDmd00ZzqIKS5gsqt4JF0jOqOSEUzExORzmwaqMRsx3YqdHAiMnPm+elG5oBEaA7o5uA281dd3d5U1MhRzwJQUwczJJurZsxndvHshQEidXciA5ufJWRWS/n+xz+53dNQAmAlhHqkQsMh9wsKXVqW3fT06Yv1KrYtP3/12g2ESYjvL5t7i8heDvssDiOHH+8PX17Ec8jqJVDwasKcSw0JxDVgWAiVIsWtTXLIOqf7IspqtVbvI8NJAyjlezd2m+knV7k550bK3UVzPZYYAgmfbhZXN7dZaddfvnv/znqzePn0Gel4cvfsLG5c54IdZ5rryJCIZ+WIaDbaOCEBuJnOhT3EfOwFB1Qzc2VGJHS3mRM8lweZOxGDm4OpOxG6AQubKRzNdbMH7XjHqWbi4A4ORscZ2QCcJcyYVwRUN5htP0DIYGYEs7eo/LQpb36QHjVeQjc0AAA9+oMA/cidEXADR5h9TEyEBDbjmVDNjrDzesxVwmww+7NSER8tVeqOLGgoFBKYNd1KRAgopA6qOY+hSe5Nybbe3AuQuFu2zVt9mXbXL+PijIPutvvd9qpC8WkIVS2PFGgYxpPNMh/yyemC1dqmtTHvb3cAcdvTvfe//tEf/cEyQDter3yH7f5H3/8BWmqaO8axS5arLVbLTz/60XAY07Qfn13n6xHu4OnitP/R9++c3m3vvQ0gq9XdtpH9i483z4b47PPNq/0nr15cQMvp5PLqZvHgbVKwmCAT7i+vHz/33ZY2bxWy59/746999b3XP/n0/Fu/9C/+7v/z1/4H/6OX3/n9MNWT8zbdXeDD8/b9h7/zf/+/feOrX1mfLddfeudhtO2+OJRsevb2ox9/54+kKgZ2t8Ui/skPf/zBt3/uO7/78b33LlJ71pxcTE+/8/kff/y1v/yLf/Ljz07uPnS22IiQLpaLUSWkUMYhIDMGA6hZz0/v3+xeN13rTE3q1ssTVS9TCakLIdVKbaJmkYZxUHd3UEBgRlV0NLN5KPqzlqI30hEe1Rt489YkaJqITOTshCJiNbsqEzKjI5Rc54uRCUPigenkfFNf5EXToHmeJkYhdndaSnz74X0kwiP+zm2OYyMRgIK7eeL2F7/9rV/4uW/F2PtUTf2tt+798IefnZ2dtDE1Xcd9DYQNBzcfp7zfjqUaB8jjGEVYcOj7aaooOPR7gbA6adnD8893MdJhd5Mf5OXpokmLaT+kk5SH0a1wXKRIsbXxcpcrtizMpHkyrBwkuElq5k+nW7bv3H00XD6F2Dzb+mTadV3R3o3YQCSNwxDIWMLh1e5v/63/8P/zm/9x5tvNg2V/M129/PxLd85+5evf+PzzpzceXjwf1836+mrYtM2Pnzxe3T8f9rsTbm9vhnHs74XzOxfrH3zv+xnWZ+vF6QJSG54/vhWEioTN4uVney/jvbfvLB6c0qvL608fpNOv8ePrOsHJWXv14ub89OT66Y2bNW0zh1mqqSsyMQoTHlN+TAQIWo0QCYGDqB+5eHPLBuKx/YIQiWUObJsbMk5FFWgYRwZoGvGhEDqaRYPA6KCni2YdmNgBLDSNR95mve0rWaaAmJq9qsbItETCsR9lgdxS6ej03XtPXl+tGubTDinkUr76tffSyeLxj67L7dSkVbY9BQ9hwaF58uLlFDnXsSTMUMdSHWG5aWtfukAkjaTVq+evd88+jYv01/7yr/zdf/RfOqMfbn0crm5u2+VqdXp2qPAz3/7q6nZ3M2ZH//Dr7//27/7h4nRhPgUiPfS+H3b2uIUylbI4j+99cD87v7rZY+Z+nxE4BeBFE0AkSdfKzctbRmpWoUa+tz559fy5Ko6KjqxZXYsptG3TLhr03BDbzg4HX9ztmFLWvN/vtzc3MKhTCFWG277mvIyBI0qwYegFdDwchtvqDpZ92hZoJne92ZWUiIbtRUM/+5UP//CHf3Lx4PxPr/rdmBtHJkbTFGXozQ2qMSA65EiJHNZdk4SVcHADhk1MLw+7qlgKhq673R67nyRFYOEY1NTdVierm6vdYdefnG6maaheTk4X16/3h+1+c7bult3QD0nCkEufxzalYTzMlYgppGy1n4YQGERCaogKATCClczuwzjGNokRFIiN3Nw+bbt7CgTiLcuQdym2ZRpMrei4WCw4RrG5tQIYTNWQAqKEVoaxR4fAPOQxcCBpah6LZxFCSORWzMtU3ZmZvKibzZAGDuKGTdfW4ZBCmsZ+ueoOuxvBWKwYetMupn02N4MyjRqB0RBQGRFArVo1F0FgdxKRRBS0Dm3buAMDiUApmaLAQCxiVk1rUXVHAqrGWIojVMup6VCzYtJcwTAP1hnUUue2aQCMlNwM3BjAjdlTRFSFJHFfelPnIJEXbkSEWCEIZwdyhBhzGQ20WmljNx4O7WpjwnV/yIchNXeePP78C+88RFTPA4pzWOQxd2lRTQOGwNFdm0UHSaZcoSBS2g9bBkhNMhuKwbjfv355ZYpelJjGyRIvxlya1GWrkWQm1QoFM0Y1J69W29AooIK61wolwDx4gGpFpDkJG4IAmgi7QqlavDYxuM+VtGjm4BZinMqETpxYq9/c9oHCommFfH2+2o/FvNZcQgwcAgsmiHXKMQZwOd2cad0P/WBgHAjleCIKHBJKQ8thP0kzIlvXTvvtk9vDtVWOtrSyX4QQe67YgqNjDcxMPBGhkaLus7ddvL7Zvv+WMwGHlDarj7e3j1arOJZ3vvLuj3/wyb1Em4bI83KxfG3Xm/Vid7X/87/04R/+zm/93J//8vMX+0Xs8iG3oQnmraTJYdm1dRxpmWwyS1byBMSB5VAGdmLgfOjJ1Iua5uyEHGPoCimYOWhqEzmFpi0Ki81yhy6ExYwCKzvMxShag5ChEpGCkczLT1BzMkOrwcCqjoh2eqf91leG7eXu9e3Fort+fctn7MJ1X0JvdTtuhrx9fEPvPGjffl8/+xRKdqvMVB3IETm4A/ocQKtuRijqhm7oaK7mDizEBBTMDFAJwYnESYCckoRWswoxx4a6DYXkedc0MpYCiNwGiObLBqvDdcn7sfGQd/1KzswKNlGQy+3QNGnY3Uq36GuZhoGDUNu40GLT1FKIE6YWmrZddjCO5LWUIilNQyVkc0sU9/0hZ3SEkBbGzQHEUgiLNufsBQIC9lM1MKqYEAIHTo7oLmzJi1NFBFIncsMoGEBZPflYbRqKDqgjOEUzBsSuJSpWixfVWK0jBJRDXwjaWqow51K8FkCwam6YEkOexkPFCWysNDc0WUUkRvdcMTXYNALVEClFaZL/NHXAFDgBC4k4OrOBa8152N9GynjnhBiByBnNvNZMBE1omKMjubq5olVmJnTQWa92dAVXK9M0DLVkM+UQJLazHKTVEBRd55ZuQKA2EMzF4+pWHMidzHIpPSEwktZRbcyqqjnnXAtSCFnVSBx8yJSxKyoNBD1WuaCDEzPQT+ulaMbNVC8OUCyjpqJKIujuY2ZkQx9Kz1GsQskeOTjrmAtKakinMhoUpw5QtmM2g2WzeHXz4nxzLzDlaQBqVDOQM2NMwW6nuYZtZrDnqRBirhXdADzNbdbg4IrIQaTUKaB36E2+NdqoO3EqVZKlOF0tdSv7Z3S7DXkMtQRVmU1CQOR0AgxXP1bLC3IHyMbIXQVHZDV1ncdjd4K58QqRHJEI3QsimDsxAuKMpgAnpsREgmDgLKzmiERMCDTl0jWIQkxEDuKIzohSTQ0gcEBEAS6qgoGJquW54tJQASoQIyE7kVpCZRjKyl8jo0/rMW9CGotXrH3AgTQ4kFIMYfCuCTjmkaBhx4CYGN07dE5iRXOSWDw1wWzIpSfXUKsSOeIxeWQOwgIkxJxCDMwEEiO/GZvQzZFwhs44GJgjIwLZnNqDGUTkjuTgCMeAJIGr67HXBY58IjwKc0AAOgupRHj0eChzdHfCCEBm7j4Xu9vMRlIts53F3QkQkapXc1BVJj6ibNxnpvWcIDxuWGzeA88/mQAJ524Iq2+cMmTmxORHDhwisHtGN6v6/PLlfmdqqZQ5jQcSw6JboNkwjo3h/ubZdtgFsk3bXh765/s9pwbM3lqn84YtZ/BK6iU1T7LBYfjG+QLroQBFo0WTxiln4FmyFCECCABEpA4CIIiReV+9elUMAPhoQQ3gGuxf3tQt8B9eT+/V+gFg1yV12O37pqltFE7hdtc/efHKgi/X7W4qv/VP//lf/6u/2m021TEQvAE/qUA4Sn/qiORwDP0RsbuimWMFQKYZjY+E7K6m81p+bkyZ7wFSq8fPz2fytc0anGmF2TYLRyCRESAKHFHDx9QhvIkrwrGkDxB4dmPMqUFGmlsmcL7Y3JHYTN0MCZCQSebMo5rx7NUzAyCvdaYWzb4yM0d844ZzNXMiIYBqR3/vUUw4GqfeSEVai3CyWkg8SDLFqpVIWASRRCIAFB3UJnAXlNhu2rDZLM+VyBGkP8g6LNPGbNh0UO/sbm4fR/Try6fDsHf2bb+NqTFocoUyVNai1RAYOU3OtLn/9qMPrz7+V5tp8E++N7x6fPdB+zjxB19/f7y9Gm8OuyfPIQZeMg5jnHanwT/FIO2p8uLm1ct+gvull+VCD9dt1dXijODzq598mkt55/zh/vY5dufdw7cAaThs16dnWCF2i/O779xcvorv/zkL8dXHH3/h9M728asvf/Nv/vr/4z/5d09OPvvHf/LgS19+vXvGy7CreXR+972HZw9P4+li9/xFEIxvrw9pVThI6G6fP795/oTOH5Ypv3x6WWstxcNi5Rj72+n2xZN3v/T2eF7XF2ffaNaffnSpddw9fX7YTUNfV92iW65evXyymloh6aexbWOBPJQRJlq3q8hMQCGiDo7mi2Z52L1wy54Nq84rIJlTjMfmsaOF6M8Arf+saPRnvjznJUKwWoEdmfJUKIRaypQLEJqDqjVdiiWPw6Eu1mowZU2LxW53yw2ucGlgBEwkrvlLH7wXGi4TgsvMVnRXrYUA1m38lb/0V776wVfdJqcsFPt9iYGd/LDrv/LBBw8fPbCzOw/u09Lrk+/9IFboFgtkVDV3k9SWUiHr/nq7u+njujOjUX13KMNhyrW2ywU53NzeLC5Ot/uqOAMO+DCOGaWSlCGHaZgM1FtVb5sIWE86CE5Uj0qaokBoM+Onnz159M57T559nrilXDLxrR2EPCA3Isx0uln+n//B/1EiYaQEwOh375x9vhv/6KOP333/LX/eb4edC4/DdBK70+WdmuvF2X01Bqhds4LM24PF9nSbbYELK4eDw1SAGDHY2194SyhvJ9bcBC+i5Xb7/NHPfvjeRfP3/i//+fJk5YhaS9NICdKkNlhN3Ix5nwsAHtN0QmgGqgAMSDwfPMCxuh+7yRDc0M2EmRyA3GtBABECc0LsmGIT0JwIl8vo6GSwbBtXHEvRrG3kzYJzmXK1ro0e6aoOzSk+uH/ab8eTu2vucD9Nj39w0IwO9Oi9B692L5nyrh825+HsbvfkyXbdtG1A2PbD/vbdRw8+Ojw77PfXLzkJcQv7y9vh0z6DK3NmyYc8DVkW8fU0TdPwhc3FtN3dbLdDgbcevV8R/s4/+/voIcZ0vdO+6LJNIs12PzLKL//81//z/+Q/e+/9d73o7//pTzKku+cPrq4v9/3uS1/94PLjbYzx6ma/XJ/Igi63PYU0Et+5d293+bpr1vvDFjl2bXj5+vNVfLhZrhaL9rOb54HmchbSkkutwCFGubPYPH71JEVcRJQmjbueiDLUoYR8nV3q+w/vf/bp7d17Z092/bvv3NsOzwuHto2fP3+x7NZateY6bnvNc/o9bPf79x7de/HpU0JaLFbTdCvL5Xcf/wRDuj2MwKJWQgwlT9M0sbmk6AhGARZSrq6gDIumRaLh+pDa2DCp2ni9W0Ac+tJvh3t3Ti9OGoCPAQCR8iHHFKaxeK0U6Pze+XZ7c7OlZdvUWjPmzabd99Nu38cUl6vVbrtvV81hvyNTQwwx+ZRnIYYkgFvfj0EiQFGz/Th0i3bM2ZwCJRTO4z5B7Jo7poAMEptENNlesYBQinHKXs0dMDUdIU7l0C2Xt9vrPu8WcRNDCBaGoQdkAkfHPE3M5GDOjIYRuZTKzESx6txTCrUoJQcEYmq6tq9LEc5TQQpAQYE4RYWSHSprEGFqioFWB3hTFKlGnFStla7PNxQQQHMZHEHdtZqpDYe9BMlagbW4KSA4hNDWYuZRwlLzrasyRgBQzyGG3XDTxAtHi1EcKjM5UikVAkoABHbj1JwOtleUIFxVBcXRBaSqSUyqBgi1jG5KJCRU60BMzIiO426/Xl+sluu9XVrmgvD+e19CGbrl0s0ltRwWFNTQWDiFJjSRq/fjSOwIVCo61G61tpq19F5GYZuGwzDsJKXYNHmsnTSC5L5FQEJ3Vycf855cVK2JK7VMXms1xCJe1UoUMi/mju5qo9ASnQAKOLlXwlAsMwVBMbP5pSdEWjIhDsMwZcXA5OBCXZOK1UOf14u07aduuTjs9g8enAxZhZmIhnHfxpaZh8NwefkErS6Wy7v373/8ySc/XSSbuVUrFdUZLQSO2+mQYTrsrgYPpGVCH8ZqBcHjVEcWE7HsbgSlWiAetOxuhw/f3SDQ+fnJzfWA5ClKtjHG9qYfTu+/tcvbw+FyqolWcuDxZj80p+sX18+Wb5/5spFiUQQIJMgcc8gTuCC5EXvcLJTR3W9vdw/euihYBCWrU4hN5GPbPeBcEE1MZhVcgebWecKKQFyr1upBDWZ5hthKYZGZm25mcwMGEasjOkYJpsXNStbB4OTdR/Fnv3Z+2sS4LNu8LCYNE8P0o8/1d37ft4/1+vbuh29/TnQ4uRg+/XQZmNmtZpKAaqYFUYjEEJwQgR0ZKbjXucYOHcAUwEzr3NA5NyE4MgNVLVGCCUBVXoWJqWGXUqplBqxaxzzFZeqn2ybFcMJuiCHJ0qqoTiBdsinnfAghoAK4N4tmC4ZQm0Ucq266BMVHonEYT9yT0NWzxwno5vVOzoO6qRuY9nWshIXnbi0QJIhRmQSxiZ2i5YMxspPWaJKCsYyTG1DAgOY2ZVNAFgzzaE8D1bRM2mjNDawy56qm1RlTUJ7hj8ZC1R2YDGEcDmChmA2GgZAQEYENaqkIVPoxdUkc5+mCEE0dj+vhuQJIsiIBcWwqGBtwfDMfOKh5ErI8GmJl6NmncZ+Hw6YTzV6zR5mzD+xEjFGVmQISIyNoBnJzd1W1bOaOaFq0THMHL8xDGZI5OQCAAbjqNLepCIT5iY7AAFhrLrU6EoIQORG7lpKzVSsKJJiCs1csUtx3ng4O7tMADC1xIAInQJ/RrccIh4ErmIoEQHXgGY/QSDKDYTqsm5PDblg27aHsiFgkWtGYmv2hV6sg3i4XUymOzqTCMUhTSg0cgQCwNk10l10eBAFKWaYuYjUTIiEsgoiqDI7oSF5yBWdCmLG4cwAGKcw+OgYy0AXU9f4zuUVNHZny1Mfd7Wq8pLwPxQIkAGaOMzNKjcCIAErNzuwQiqm6FYRqikc4swkTgKkDmCExEQMhAcx+lrkdygEJiNnVkWaG7gySwqNMBApgSMAC4obgNZtFomykiG4emc2dARjAAAMLuqnVhJShltnk5tFVUb1xJy4FbgfZXvN0vVpcq1wswsl+CBXBqyVyw1CAC4qJ6HqcxgjCOnUYF5gaTrkQEUQgpgCOahACWLG829dccEnmleYGRkQkdAByIIQZoM+z2+jNDyLCGYYNs3to1gFgjnsRUFV7M1jNQsyMmT46lo6FDG8yTni09oCZAsmcfpznc3etpu6z2wuON6zrG3w2vaEMzWKD6iw8ER85Xqo0s7SJqlabRQdwtcLAfkQfzwQcVKsEYMf0oQPhkbkMWKsCK6IR2Kuby8+fv66Q+kkJhTkcpilxBHPNw0WzeP3i1UeffLI+WZ8vOgphnyuEJERnDZ4vBK2aqheNqX0NeLPb/sW76cwOo3uvYZkwl2wA1bztGu7rMkQZR+7irVVVyrkC1oAUmUfVKDGkYHU8If7Z0zCh/enWryp90nsK+SFDE0IjBFqZSUhCkGr+7NnNatF+4f1HMPbf+b1/9a1f+vnUrdUJj5X25ODgoG4EQCyIWDXPDffuQMzmNpOD1SoxA6CZ86wJITLJ3CxdrToYETFxrXXOc1WtxEIsc0+dz1Q+kdmMNN9taooOalU4wtFuZsdutNl/5ObgjDBXnc1NfASkrscrhY/49Kp1Xv4zMcxlu1rdnSXoHIizMhdXmaO7IqCZmSsBGIK5MpKZHrvZ/N9wFcHsZFSvVsceMSBGVSNHEAZnU5ymrDVbyWYqiIBIoYuprWWK62UTV1YH8DoV1bxt0urk9N6rzemi3TSC9fB6Yus2919v7eT8Le5W2KwIKY9bQUUCR94sLk5iu3/2ZJz08mp7uH51Z7nEvq6bunzn3k++/4dlrFR9ePH41cd/GFm3V8+bh++cXKxef/8zIcUk7SIy2GQHKLvuwbrvS9t0l1ckeroMm770y8WJABKrYZ8Ynj75k6/8lf/G59/7SX6yte5itdz0/VVZjmE1/eAPfuOv/kf/k7/3v/0/nGzOofGLe+0PPvq9j/7Z/+vb3/4rz579Cd1bfOW/+W+bdFBVEj7/yU+kwrDfdneXv/cv/t7PfuPDz//09wHW43DxrW/9tdcvPv7w537xD3/zt69vX1jPb7+1blu+BqVGICMLDUVDtyQKDO6uMbQsSc2GaejywCzTOK3b1TQVUx3zQb0qqFOjDiykVYcpz3rh7GyE47//f0rRHDaeZdQ3LlMURCF0ZBJxcAkBCMcpp9ipO1QFA1cHM6sFoYbETUgE2DZdExk0e51WZ+fTsEeh9x/e/9t/45deXd1+90fPsubd1W6RmrcePfr6N751cnK6bIOWYkXbJhKTttiddFcvX4377eHlJdhXzx/cbxers2VMOH7+o0+Yuqmf+qvLLiWwwOjFNZcam65W7ZbL17eZQ3dyvvz40+dTzkSKbjHJYSjd5qJpkPBGzRyjQmxDeu98U+oIlKDmIJhz9b53s/jGbj2Nw4/+9I+jELHcPznjafvsai9s2WwssO7awHpzfXW+wm6xCANDw440lWrEterJ/bv/9F99T2I8vXPRLSiwRF5ua3/v5GS7O5DDzeuDsiPw6/EQF0RhfW910t/eTnvPUQJ1w3jQUqFOyNWnHDT+5OVn3Wrx2aePbxJ+4+7D5Xtv0zTqmIdhGIcpEE/TWKHOa4okGAMRc65FZvMqMjKPVR1AmHNRBlRzNQd1IeTAxOTqcxGCA6haioGYypRbt8CktaYJgSEgriw7MaMXQiy5FlYDkTYfzLLmydcr3g1bR37+7Mm9B2fLRfR8WIVVAXn+7HFt4P7du8M0UfaUgaaQVRFyd7b64Y+fvl03C48/fvr5u+/cPTs5n3x/+fI1W0hh0d3rtq+343DTm+97u7hzti0xprfkfLh9+eSv/o3/4Cff/72Xn3/Xi1MQQs65v/fuhYp067e6cfwP/7v//m/8vX9wcf+iVghOt68nqPTq6dOu63yi/nocx6mYp9OzZ1v/+oOHg10qjA8fri+fDSDBmCjF2+2tePuFd94PFT959fkI7cXpgiyx9GlBEnATNh+/2C1ifL19GRYcEPr9oQxuQy0cLu6vXx/2795/uH11+dFHT81czSanAaMQ5slu90OEFVjQUnIui6672u/FSVpB4qvbQw2Ux9FKatvU3x7QPZDELumUQ4SstaqWkgWIAat7gVwM/hf/0f/4f/2//N+gczCSgCgy9aMErtliw2Ws60WyaczNkU/RNY3Vslw2U3V1zENGhGVMu0O/6pJwmPKA7Jt1sz/kfrcPtApNQnDsQUsloPnho2oOIERezcdKq5Xh5O7z2V9YVE3zIXIrIkIR2GudsE5uJRMh0TRWr9S0KfveapmydnEl4nmvISycmBnVis6RLmJixGYBil6AKXahqJestcx9LgQY0NE4CBAeBVJAlrlNPLUpTmFiTqnZhAoZSkqNWg5NBICsRSACMCOm2BadVFV1BNBJjZncjNHUgYJMucSURidQlAYGtZAaLaMakqJQrJDdiiBk08CcTa2OqopAQoESe4YY41jyXLABhD6/thGLjuyTE63aM809UAkRx8GLZ/UqgMzB3QFVJNaiBmpOtR/mVZ2Etow+7HuKfPfR23/0wz+l7oO1sKt1q2VRr1bBvdSSFhHYiioQtqsuhKQZWEeJiRBYpNQeAXbj8MmTzxrpwCBPmRGqF2BQmsaSqQlWy4yDFE7qqEjV1XEyIxCJscnjKCzmZTaDMx1tnkhc1MgrMiJV4gRADF5V1cxAnZhSGAer7liK2sxQkF0/lTE7AA3Tu6vu/HyZxwkNVaEQrxYrgsjstEAU3B12w1Q+/vSxZj26aAEWy+V6swIvy9VaYdpO25upHxwmDe4MAH6YdhOCLHyy1WKxWUfnDK6G3izZxppNlWXdRIHCMXpxHKaz2HaCQZrLJ1fNYtFeNIt2UXNuu+bhO4+uXtzeeXAvZ0jC21fbzWbV5+xFSY0JQW293uwUItOoVd2laSi0dbe3g4UmsHCfRw9i5qXkpknEoaqpl1LK3Eieh5FRwdEBvTgQc5QQfbgdaBGqT6jKTEysWoH5CPWY+Rcwj0hoTKGJNbRwu3v5W7+1vOhKHiStK4R0cjLmIns7b1eawvVnL5vNmZ+/Pa7jnXce5s8/TcaEQbW4GyHOAcM3JUpYba79RXcjieAK5oxcvQIhOFZzVTVwR2maICKKatPoU2/plELwPJpJWK6F6WY/MFJkrz66dExeyEobrGo59M1yVRxFOoaWO6xV4ZBpsFixHoYkTX8Y2yiITRPbmFUwV3DiRc1cezv0+wZEVWutKJEcAwhpAXMhD01wRwEyjBhYqxmaN6ZtmRzYijtpdqvFShYQx2JU0qI1UVowJCey1JCf4VgGVEeT2FKJZmpBaB4mLVB2d46AjJgdWZFIxPKYixKy6pxlAAMNAessYaMBoqOhuEioQpAiOyC456lONfBPGRxECKDZJ5hK3Y6joo/9Dfi4kHXJdRgLN8YxGFqUZGo6lqn4XI0HaHOHnepITLMUVEvWkgEshsBBHJgluiGaAWYHB1OYycpAMbQiC+aEaFZLKaO7EUUIHVTMpZRcwZC4ZTQJTStCIe2NpW1v62jYZmhXbctkaookappEEI4d2GjOaACOzK6kVtXqcrkZzU7aB+JcGjUrBhYRUQ0qVDJjbJbLfdkZgtkoKQhFM1R1YmEMANYlmFB34x6Rm25hjFAmL5UdrRoRgBkDBAQCmznIzMyIglxLJSZmAqKqjjBvNHxJEKAfr35SXckqaw3FkleioIwAWJEKQWVHgFzNqpNWFjDUYsDk6u6Ms13L3Qjc0N9U3cyeLyOQI5tgpp86GszJGIc5Y0XAIQIiEwfkYm6V3YqRIxoByhEAHBnQLAOCgs1IlqIZXN2NkNxVOBRX1BC4Ex/QDbQGKTf44qq5ejUdpiqDzdMmlEap6NIoWW2nw3mBpIzGpJg4iBOYiCyVuIKhRHEkqxHbClXbdlKQZFyKjgd1JTN1IgYmPlKECNDdtJoLWAV7I5gem8V/arbD2SviVQHAEBxnCNAsi1ZE8Z/2mL9RE970PIJpZiIiVLWZk41AZvr/I+tPnm47s/w8bDVvs5vTfN1t0WUiu8qqyipVVbLYSBEiJTOCA8uWBnJYM888cNjhoSaayVNP9Bc4GI6wJDocDsoiadKUiiqyUtVlZmUiGyCRSAAXuN3XnWY3b7PW8mCfm1SDAYAA7gUucPbeZ79r/X7PY1r51I3EExwEzFSMCZbGpNmC9ahaEZa2GS4kowU+rFJpqdWpLHJzVSP0BkqnGbuqAiAtH+eiGgJQPIVopJqYIiCDKdaKKGk+fPH69VSxGBfJgKZkTdu2XSCRqmYor+/vyUcWJKKPXt4MotHxo3VooCwbMxL17OYQf/Xq+qtBv9UB5Nl8h8beqdfqFSMDYjIqyJKtrmLYzyWjp7YRlXUAnQXIO1DyPLtAJk3Nv9XSRYN/uSuvZ/7pbR2zfP2SmjZUs12ae4Cz7Wo+JjP36nZf4Fff+upbd8Pw/e//xe/8zh+sthfVlJlUhRhMqnd+QdajweI+sxMfXxeiHBgyMhoA8VLOedNOPP3ZcuhWyQSe2aGRmSJ7OPntBVSZyZZF/bKRWkjmRqq2zMQXhDkzn9qLBsQO38jHAGXByYORLgWhpQKHtDCrCdFUCcjATCowM7HIIu3Dk8yPyHSpHQIxnbBoSAbG7Bc1JLxRp/2PRkW08KxEHFEBUB3ZNcQNKKakKU/EQSSr1SrJOwy+DWHNLuQ0xRhyzi6Ebt0D5FyEdXV/57OFs1XL3KBvyEPOx3L4UqVpgxuVfCzo3PmDq0mMOJxfnO+Pq023ffZXH1oTxIb1e+/95h/89b/86NnZmkLG9fbM8cA6HT+9Hh5fB2o01XT3ulm3wa11dqkQuVBr2T756v7DD7x3LtLqatP223LMw/ULbGJwKCUbMNZKxjRPvSX36i5MMB70OKTXh5vzy/XLH/8kHMvrF4NNK/9o/fGvXjy9l6s//F/+Zm9n3bv5579o3nnI2wdwl6lRRDh7/DTPNacbOLPzR+thzr/51ff+9M9/+N63vrV+9xvh4uH1y5da9etf+crPfvBh9+BptcAYxiF5H4ZhIJk9YUpT2zalTkwOqrUxGnoCL2YAWkTIMwV/HCcmWq/Xec7sKDZumtQ5rp69Qporwa9libZsBd4guJbvX6NTPxOcp44IRIg9AJgaLhmxGJARFQHQtSHXgoSrTV9lvnp4frgb1VSlVnFSzTdUVJpVN+9yHdJ3v/2db//etyGuPvrZR59++Nnm/OxmkPVmu9/vkJHN+xjbNkzHsarloqba9yE6euvho0zN/nrsyvj48fn1s2fFBMv06ssvNmeb0LcihtEDAkfe3d9XzVQIZUaOXYvBV7fiaX9dprckAYDlnDk6c2TU3LweAtB6E6dhIJiqTWCqNLgVFzHkE74utn4aKCNx7H/wyc/QKre8Wcfjy7uiNEllk8ur9dcePf7ls+dn226yOhaj7up2eJV3O3ChaXpPMO2nFTVmAp7NNZ+8uLk4W5VjOewP7bo3UilVAQEZdNh2bcPhetjHlv15c7eXCXCoqTi4Pey3Ty7f+o1H1z/9El++enE4Xlyu8kEx+bliv+qae7y7LWyspSoiIdZaQZbnvSHCsmQICEbIbskRo6iVqt4RGNRSyIwCeceiisSOaRrLVMUjB+ecaRW63Dzaja8IgIMDQ8xQCzRbTyxo4p1jpNl026y+8s7VBx/+snPrx08eX7+47R5fVYFDzUYUw2oC6TcP2vvDq9d38KXfrDa56Ntvv322tUmtZooU33n36XFONrqkCWIf108+3+vaNcaKcr9uwsPzlRKo1ZvnvyiBv/bk7Y++/0cff/KTEHzsVo8fPzzsr7UWqpqyHXX6G9/6LXlx+8sf/SyutrvD7uHDC/OJ2Fxwo6qxG4f58dOHL69fnz/YPHnnge3vdZiQdbo+6FC11lKjijhxVN1hN1G28+4JU2lZ98Pu/tUBfdiuNochP3j88P7V88fnvWdb++bF3Zdf+41vffKLLw5T7mql4HbD/fFuz8SpyuvbO0L/0c9+ebWG87Pt82evwDls1RGXUl3DNde2bbePtr989uxyXJFS71qstL89SK0YHCFV4HlMQRVN2xg9EDhEooBM3l7f302vn/97f/tvfu9Pvp+Gud30EiRyi2YpjQyMTIg85Bro9GIkRRyHPGeOPk/SbfpxmELX+Sr7w6Hr18G3RfIwzqu+NbH9furXbcm573qZwbSUOhkakRetoe0lzc5smu8Wscb2bDPOY/S+QJaajZ0PzlANFZmX6rfk2oROyqCQjimjc0uYvkiKwa/7PpXJkYeIWpTI1WoAZAhGSIbAWDQRGBIktFQyENRsUkYkhjcJYakqIuwcIfoQzApRQPJN7/NucM4H4rkgoPPeqc7MsZSJ0Gj5UlUDByoAoKIiZiRejHyIBmCqaMje5TSRglktUplcNWUw0WTVGAORN1OFPOVd26xrUeeDgprHw3xEJmTK8xx9o6AqCoDRNwTiVbEeTSaiJuUCTEhAzswqQJAq3qGBiRoHH7vV7rhfdytTIufYlIXEeKr5q29/reVQSzUBQHaRpRI37FxrmmMbHLtpTAEciasyhkhiklM1nRAkxPCzP//hNJX+wQYAHCgzS03eReeimRA1xkxgqChigK5YMhLPsVYlY6sgpSKoYTVbvrFMtahWJFx0zg6dQlGpiChmzKxqzGZaTBRQm+inOUXvYmw8W8mVPWYpgWh3u7u82LALgbkqqBmqzNPOOWLnW4y9b0qVtulGUK4LGwLWwZ1F9sETQhEgpP1UCq/uDvNcMtaC0+Dcuo3kGtc1zUVcGeXA3LSxbxAk+76z7mwNvYrOWvrzzeH+qOsuPjovoKvzzW4/XNJDotB16/2r+6aNtu64CZdPHrRRp1c3/aav0+z7UKSGPsyHXURREcyikJu+0Zx9DE/ffTvXoQkhm3gfVZCUQfGwv+u3lyLArEBoiGDQtI3W2QgRiF0E5xcnmfNkzOw8pELIpSzrh6UpQctPZ3IqqZZKHMpU2ott162lSDv4YrjdbI8zhLFiSjpUQHLBhdbXIXdKozP/5EqvX+NhRDPvXNWsyI6WRQWjyHIiUlVaTq5WwRSX9b0ujTVlBnWkBlY1Np2IiUhYN2Ds5izHgY2avi3ODCysOvIeaolNOI5HT75MUNn5NvTtuhyPft1nSGKzD64kDQAE0q26nVn0MbStGYR17zkeduMURyDbzfvY85gOvqJnCI7FMQTXAcso3jdNvxYyElU9kUwYyRjAY3YKhmbAYiyZcmYNohg9CZkQshOi1JDviBJiJa4NwgURKY/VU3JATE1oYrJcMSERK3kf5mlyAA2alqoqusC5AdCU0Ve1pu1QlZ1arjUvHFyFJcy0Ctj3MOU6pWWt/K9duloAbR5yBvTBTwapaqmTI526WGpSzVJLzRqiLzWRORNhdcXSsrT0zjEBEoqUKkXViGARZzH56IMCIRKgqmQwRER2DoEQ0fnI7BB4UTibLpCQYkQpTwAGanOaCSMyca1ac9M1WFFnN4P1IQDG4trOuYTZyBVFZRTCXzePFkK2SQECPPGQvJU65/Hp1dspTSndNf2msYaZkygQJB1ccKJWC5SU0cBUydBUCKvnlpCG+VjZtb5fO5vmyYxr5TwcRCopllTUbDmKOYBSijeIzB7JrJJCcFFBzSouIGJyRly1OgMv0ElGZFM0YfZNzVkAjVGtKmpZhheSBaxCZTRDVAQlEhWzhZaygGlAFgkToi1vjGSLLhzVQOsJiLvwT1VxCcQQEaNoAfRAJlYJgVCAnZKRWuAmqwPMaIImgX01JPLVUE0doaoyeQJVQEWpKsjBamJRrzDh+Ay//IK+3EMydhG1Y50EZ6WVw578epKzytviLipQjWgekVQODhvhZiYRNO+8mWKRgMyCQm4gSo1qST3ZcH9oHoOa4WLLgyVg9evfw6Ll0Tf0xn8N/oTl/XmRo9kiHTuhnogNbHH2LN0OQ0KCKouXahEJLUd6NqCF0b64IJcDOSIBLFM5QFjM64AEIrK0AlVEAQDRjAxJQZaPR7UaLPmuU3JwMa+DCdMSxyu4QLcXgLLagmhWA9WKyAaoqkQsWgCcSAWtjqXk46efP7s/iFGb5szOIblFWSoi0zSXqdzs7+a59E1sY5zNbofc+PCkD4/6OI6ZGVQtIIILz/bzmdjvX3iYdhZQT4RmbRznCio1+MZBTkWHUqID01IlGzeGpKrO0TSXtmlmSWa46hsR0AnWUBrSD27xk1FfDhxcfpe5JVi7AIYqAgCllrYLtzeHX9GXv/2Nt8ph/vkPf/A7f/iHHHpVMhVYvlm0IhIAAYKpnGqqCxOaUFXRBMAAWWsmYiYnUk5IKQQzYfaEaOZMTaESOu/bUpKhAuBptLQYwRcKGJGIMNMyIlySaIQMJoh4Ms0sUXk8CQd1YRidcNQAsMSLTiFJXeykTACoKgYGiqZyar0ty8Zl0HmKqZGYvrnNVVUBmYhACZGkyhty95s/IhgTOUJQNRU0Ay1a9lDvod6R7GvezeNdSaN3jAYE5F0QIG47Cj6uV03fkCcmiqFn367WF6vmYtU97NdPt2dPzy7fWa0vIzOl25g+c/n26aNzAhdow6YOc1xj0SkEqWkHJl98eNO+/256q7+R2/ho++Ab3w6AWI2Bzy4fCjhAHO6mMNXjs+eIZTS/O5S4vgREvzrXksrt88PnL6ZjAtS54Nk77++nuQ2RkVzXGJQ8FB40pnL343/pwZ89ejKA+e3qPG7ST376NWzqL18cn+3cUN968kCs4kW//q2v2VefPPw7/+bF7/w2xpXvGx89EvhAt88/v/3kk+Pt7ZN3f8Mzp5IPLz8PWHXtVt/6yiGtmR88eOffQDx79O5XAtG03+f91McmBGpj5zkMaTfWTBy8o1IKolv1KwOdyxzXIZEs93cTg4K2rZ/nBKImtQl0dbX2rMwAiKonMtFSjj0pyP4HfTRdfoQa1HrVegAxYlugVkgE5NghGKg13hEZmjn2Wk2z5apFtF9dxKYHoM12HWOIzNGTd/HpW++99e63tXYvn49V+/XVI9+tQ7Odp0oGbdNtz9aAxsghdqFvYtuM9wfvYPVg89HPfh6pPHjQb8/X93f7cTek3QCp7q9vtBogxBigqlaRJGmo6ajHIanJeDeUaTzs76OP+9v97tWOuTney3wswcf5OGieYiTkCjR3K9yu+Xzt2qibDqgMHVvvT6miV3f7yjwWFVHLgAo1z9OQH6y3jXfBxbH4gR98th/uUz4M6XBzwFwPr1/onJ3jIsV1Hj0wwTDc7+6uGRyhP45yGOt+ys26NY++jxqYYxe5LRnvJP3o9vPuyk/H26lOh2ngrn37/QdnjxuK83F8vj98ZuNd/vLVh3/2w6szujz3Zw/WRafLFa9bU0kNeQAyAxEFZDUzoCJQFRVQFKZUc5bjcao511pKLoHQEziEvm3aJngkh4gGNReqEhlVDQjE9GK7EgZxGiPH6Dl6drjZdpnBmMWs6eIxzUJqAODw9VAvnn7r1Ss8vGpefFZefXKMXdOum9j7YZCS6LPPnnETj1KfvHX24DxOh/3L18/ux/uLs+Zrv7ECG/uOm1WnbeC+3W7Pvv2bv/3ub721fefR1W88iU83ehal5VJlG3tveNVu5X73+a8+7Fdbt95mkePuDgGswnRf2tXFew/f/nt/86/98//6H23XnUNvDpstP3i0KVomLXfzfPX2+bvfeHAou/feu9RhHG6/GI47lUKqwzAVKBQM0EITY0dTmSRNZ5vudv/Cap52E5qrBR3SNE4E5erSf/X9xzGu0ki50uOz9/bX+zby+19/UqdhTVxz8iEqUlg32ZWrp2fnm66L3fNXh2pckhyP8/1u5BDGsdaidcyH4fDkYrsi78Sutpv73X2/6dXT9qrzPe/ymOecUzYAMQDnQhNUhBgc4sXm7L/4v/+jZ5++vDg7e/vtB3/rr//2YU6VtUy5X7XzrLVokTofh66Lp9OBqHPBO1apaR4ZydSmOV2cn+tcS5pdQHZOzGbJ6220mrWUTRuZqmol8m3oEbgUceytZEObZSTPaU6lWiliAlK1zqNmAXBEHg1LrbVW75wPDZAHIEJmZAfBYWhC03gHmpgcgJuPx8CBDBrHkXzjW2CfhedizjVqmuusUpnYe+diICJkIhcAyBSkSlUBRCRGJtdGDI4a36xX7GPT9rFrgZ1UIQVLVhM6jVoyLs+gWlSEcAldmIosCtEFe69STSsj5XEyEySg2OSUEQQYxQosG20XjKDITLRU9FdkHhXJOI3Zc/AueNcYLIUABVD2jpgMBViQzFQQnCkH1zAsdho2rIBKjOhI0TgwIsTop/mYx5RTStOBGwx9127PYs+AU7ex/ox8h2DZYz3f+nXPfR9j05a5ztlq8cddLvOcU2aO29UZmvhIHPzPP/zxl58/W7dbUEADdrxAH4w8c6MCrI6Ua1UlmMsEZs571QKAZlgtKwIs+XllVFermIJpNRBCQiI1Kmq6uD1qRURRFdGqprmWqaqgnrbe5Mk5hJwlZQHiJPTq+ni3m3f3R/KuW3VEqADOO+dcKfk4HIdpZCbnfU5LLQAAIAZXpgoaBEJKmHZJ76vcHfEw+0Ftn1xBLAJl2kTrqDrR1sJle/nk/K3zzdXTt77+ta/+9vtf/872na+unjwWqcOw48ZnNSOmdYOd7x+eo6e725dlFAY2UXJURdk7KkQFp5vp8OqOzaQIOldrbjcthrjdPMiztK5pQpAyAc25HEEAKwQfiIjZsWuWS907B1AJyTvO8wRSyjgzYMmzakEAE1FTJDOpqEIGYOqYkYiI8A1HAU4vuMzk2AcxwPXaVh2AK7Xh9dPUXIk7F12ZxLOzxyXRqlt7xPz8ZXNXOeOLw1BiqGKkZKU6JTrNDGARw7zx46nDBQ9f1aqhLQdakwrL2y0AqjkzNujW21JNjVARx+KrhOirmcQqduAgXduolfk4MGrvW6jUcgjMUorsBpWKTDJNKY/qHPQNO3JM5+dbk8rM6DyxY6LxZTp8tt+G7jDcbx+fIVrvmaFsV82qc5EheOg7Xm+arg2RyIMRVIDqSFtvLpiPFJgdYIvopFKtVtKyxDa0wmbRVadEec0pytQCEDAJdJ76pjZ+dpp8EV/FanaOW9c6aqACVvWGEVwDQDJBrbkWZCpWlJQcLC/2i/+UG46rEDYNdo63HVxsattUz+YdO2ciUnKa0um7wIpZrnWa5t1u/+r2/tVueH1Mh9nyYToepmNKQ8pHqVlyLSWLFMWa67T8dauzaVYVrVUNAdBOy3MIrgm+924dwppDE6KPTeNdICIg4hBC08YYCU3qWNJQ8oiIjsDU5mnIaTgeb+d5dq5Z3ECH43A8HsGxgIgam6u5BqIOZAX1yuOFwzOGbolrLEszADy1jRgNFKCYGkF0TVPa4f5aNJ+dXZph9I0WaXwEpBibi+15mXZBpHMBEQ3Yu8a74J1jpwC5iRGJapkUpsZXVMsJL7fnjsCxM0BmBkBHxIhEbACefMM+oOOTaInMQEXYxDQjKC6ESTWW6mDpiaEYqOfiWD2jYwTxIJxHX6ZY5qZWrwoiInZSfKmKqKqZqIqCGiEvpBxauqZL0ICQF971iYILvOgIdUFB89IuB0TPTiAnnDKRoNZ6m8Zrhya1IhAhMSz6OiQzkKqgdpKuOUHNWr2PIYC33HGc8fi5/OpLvE2u59qtdbVK0GVq1V1gty36YEoPh8PjaX5QfGOBAaMTptkTAuER+OjIQgOAgWpPGsBIaiF8AcfkUtc7rZUET6owIF7MCMuTbunt1WRSteaST4qDpRp2cqirwpszvJ366x4MTcUWdPGpwFXM5MRLNgKj5acQMYIzQxNb6p+EjIYEhMAAoKeG/BsdECAAnVIryLQgkBBRdRmvVq1LSsTejJhE5QS6QhKtUosjD2qmZmq6tOIMTgVEYABWXVB0ChYQg2lVywDlxfXN9V1N1c9ZRE0BfPBIIKWwZ0F/GPLubjKxXOpo8tHN3pAer+OqgVzmEJwVxWrm3ItS9ofd713GNo8q1voOwTFh1WqqqloRiljkQBQBOubQhbBy5BRqVhXKYuDcXMrCZgJSCp6adh3iO4B/68z9ZiuO5MP78tHtOBXZdMFMDuOYrCLbqusI+OXN8YNffjkqjlL//Ht/IungGYEcGBG6pR2GAFIrES1Dt2VSeKJE4amWyOxMq2omJmQ6PUbYnyqIxIiMxlaL5NkxL3fTcruDgko1EULgRTMKqCqiW2lcxAABAABJREFU4sghODA2W6TiSou+URcoMajikv9YuEgEzOS1qlZh9LQEn4C1iqoyO2IPyESeyDFFVDZRNCByALgw4FDNVJdKGuLJLbvUz3Ehbv0PR0W8dHDUzCylSSVbzVrmedrN434YbtJ4AzqTCqtpKTnNogLE5Bvghn3PvicXXVxz6Mw4eN81sYtt67uVvwzUdd22bzcXjx5b0+zStLo8H+cJyLkAUiVPpdus7m6+VKnbs8eP3vv6wT0R8Hn3iin/6nrXvfPO7u5OZPZBvQctKTTW9ezqWE1W68gBZ5lqCHh24fs+ja/P3bF8/r0H7/vmgf38xz9Wi5Hjq7/80fP/5l/87J9/z7UPx5d3H/z9/6x89BPqeKKa2IrbNfsXn/zRv4DD9PGf/NWDhw/TmF9+8uJitfnVD//5Z9//k5onWjUWfJHKxCaQD/Xm5aHtzsUU2C4fXT379Jfbs4t3nrz/2c8/OuyvmzOXpsGF+uKzD9/7+ldQ1NIcOvRnzVgKAnt0aNCEts6VwXkfAMiB8wCWq0M47HaWChiQQZ0Sig2HsRZtOucDdZ2fytREzim/oab/z36zpX9msDxozMisdbhuljyjqKiLHhDQO2SuCrWaEYOZ1BIaBrJxmqRo28bzy4ucMqKLbQQiA9CiTOh9OH9w9s1vfvvJwysAv9k+6NdnTdfGPnbrszlLLhpiK6WmLL5rOQBaqWPt/cPXH36Wn33UpBfBp/E4zQUunz4qliuj+aDkUhZmio2jAP1ZV8FWl2dJ7cvPP821AOOccpKaNfkYzVGzaotUA3PeTWk4zNPLXXpxV5/v682Ek3n1bXdxAY1LqZy+EQycc+tVc3F1tR9nw1iqSVGHylXlmNs2Hkr6cr+nVcw5q1bUvOnieb9p0adUypRKzsRIqG1YlxlsKp0PANR1K0Qe5pLycdXT24/74Gqd9y7Cu9967+zJ2fnVhQJsLjsd93cv9lxd2uUVXj37aLYUxqGa+l/94vUXz0fs27OvPf1itwublpFVFNHYMXsiAs8MCM6xYwyOQZUJHFNg9sxQDRRENKWakqRcpqmOs6RZPGLnXR+4b9wqcghexeaculXYjUcAx+itWJ2q1RK8mtWkOquR9wrYtJ6pHuTw6LceNJc46uQ2Lkulxj16eh5dECubvrt9fR+5efLo8fXLG9/R9mp9FPhyqA/e/crsfXO1eXkz7A/To83qEeDtT3/13/0//subP/nT+vMP7370yaptn7733gdfPKvG45jR+/PN+Qeffu5jWzGAj2Me7vZ3u+Nxzvnq8ZO4Wj/sL77/vb+8zyVerPdprmRfvNzd746m87rvLzfnax/LPnlz877sXk03N8eH775tkSrWKaezs4eg4gTm48AEIjmb7epw9mirnu6msp9T1545Wm/6KxO/u9lpMqmwPTvbH7Lz/aq7ctAjKBpN+3n3er66fCwFPLrHDx+Od/th93LY7cYxN5uL9XYV2dcpt0SWYJ7t/Xe/Nu92CHyY5nGu+2E2H0ZQ3kSMPNfkIjvHzgciT0xTmVOunoPkOt4nNkLyr+/z58/nZ3u7xdV/+n/9Tz3FMc8MbOr67dp5t77ox+P45mHBpehSuQ9NPB6nrmuBca6pX6+Hw3AcEgJ57/Ncx3nenPVF0pwyKBN5kQXazAaohsVgzmNoozkozKPIfp5nBWoiRe9al/M0l6Rgzgd2HojIoVqttdYiPjRN02sVNCPAPKfd4X5II/m+iKYqhiyKtJAkFVRBiXxovGsQueSlD+JqFkD00c9TdhRrMSYGsMX5oqpEoGDNpnOBHZFIIbK2b9ATegbUolk0ESM5REJQAORSpaqkOplVUwEDxuCAS5kRjLwX0ZoVxNiQzXsMzEHNiBCoJhuBXZElnB8AIqCPTQtiZJCm5CPneUSGOc9gcHrbkyomBlBUgJ2AFlViLiKGKlgEErDlmpEZAbWa57BabYZpACnEVkC6i7Uanp1dPb5628R715ABE8mcp+Egkq9fvjzu78bhUNPMKDF652NsegCdxlvCSqifffLhj/7qBxcPHvsQyUxLBbWqAoSiaIIx9ClrrVilAoB3gVCXDHYtM5gQZoWCjswMgJjdslVDQCY0qKoJSItkg+VDJgMCAPIOEIDIRec9adUQ47pvi5TjOLPj2ATvfYy+baKkUqve3OyPh8RIViSG4IP3wRWRpmkRaTiOIfg3jh9gx65thmPNdaW2qbmZ5/D69XR/sDyjE1emXIvkWlLRotC23fnqsm+26/XFwyfvP3r8jYvzt/vubH1+cUQDb4jVyuRU5mGexpJJi5NdGs8fPc5S+4uzeNYvr6Te2X5379pmnuv26izVI3peXWyGfDi72qQylaJacSgyVcKuE4fN9mwZEHp2JmaETdvGZj3Pac4FEHWhIRCWUpCQiNHQxUYR2TuOAZjJMzIpY5aqoACwrGQJ0MQMUAxFFHkBalS/7W8Pd+3FanW1Vq65HANXy4Mv83T/ylnd3x2UyLPNNzfdpkuR7OE5XqxrwOARtJLhScDDQIyKKloMtEIROJmnFw4OsEdyCKxFF26ZSWUrUgYEydMxjwdNu5yO6nmqcy2K4HTK881ex3S2vUBgA+iaCDUxSYjuOA3xfAOxFQOtqgLzYUQgMwsxOKs4Z8ul5hKbQIFFdVY51pRCrk5DDKu+Cy4QYBtCG9xq3ZJHoayI2QoEVgeVtMDgfGFUJ9XNNcw1zkSTeWgckfMEKAZFnVpkbFiogKvOIQdGhiomqpPl6t1slKpNuSxBAFVQoCqOoAUlh77xLTN7Hw0MHWL0FIJzzhCqgpIzHyB4cOi8d6Eh8j7G4Lwj9s459mrwa5Spakl5nPNwnPa74/1hup/m/TwfpzTfj8PLm/vru/vpOFpRMlS1qjqXPOdpmvbztD+OuzmNuWQ7zf2989754ELDIRqSAZgJk2MOxB7JMQamyNwSejOrNeUypTzkWnOxnCTXmmrOeURUQBBTY0w1D3NKU/FIDTvPLiCexeYc7YHWTZk3tZyBbrGsEBVMCJAXiKwhLl8fyGht8Ew45cGgznOap9mzq6a5yJv+B/kmzGUsNQNoURHEZDKbKFN1dLS6h3Rkmclc14rzV4+fXm0uNk7OmxRdncfRBNCMDZYdAxgRMC7SYTNQq6ZFVAzZe+Q3G4lTO8wIqGRJsthWQE1US5WqClrVioCoFSMlB0teC0xFclEROFmS8HQsJNZFmo5UVRcgIDq3sGvQAAwJcPEXvwm/oHPk2ZEBq6JWQhWHs9qg6Qj3SQ9VkndE5E7jQREpo2l2jgxMAYqVDKWCGTIJN4I9WcXXr/nZsBlyVEFlGaMmslLRLMPlGN7a17eO9a0aOzEznxWVfBEVcwIukZvZATUEjoE5AwurGHq74/kuWsUZ5NgyO3WiYrRMyejXOSJTUwUVy7mUUk1PawPTU/tOARXQDAgJ3vjOZDGgw6nYJaebEpfPbPnvVxX69b+FFjwVLt81qvVEVIcT4vpU/SFeNLVqskRdThyhU9VJlorm8q+WBZ4IS+8NAElEiwoA25tf3EKyXSbtBlhq0RPQGJZg0fIrVy1aZk96/er2yy/3lboCJKjkmZ2vqgoWoq8ljeOewNZt14fm/PzsvmhGerCJD1fuNCAWtVSi81ndy/vhm2f+LZqg5OhDKbY8ERYGBiBEH9TQoCpqViAHbYOBtHccHIfgkFkRfOOMzC2W4ayinDI2FB95/r0L/u6DZu3leiw/uZmujzMZoFnJVcHu9vsQeNX1Nzfzp59fl0ltLh9+/88P98+RUYxKFQBjRjWhhU5ArPLmtjQFBCKGNxY5IDI0QKhSDBQZFQQQpNoSxnAukAvLVaNSAcxADNTIkGjJCqlWtbL8cxAXfbmIppPEfNmRgImIqC0NUGa3RPwQYGlrq4GYlDovNUkAQHZLc2g574suTGxYUurLRHFBr+Ob6/A0CV1YxsimYAbLGHm5C04FNAOQIoggtRBjyXMBM7MlpOocic5MDkymikTO8dqFxrc9O++YTAGWt0BErgix1jRrzaYJq6cKANG3DXue8iC1mCQG37UXwbe+d8wh1dKuGpYutW3w673id37/b7/64IdfX6e7n/9Zs/q9/hvfxQ9+wJwP969tfodrpWOhuxS9luk23173Io3lYFnnCuKdQzA33dxIC9v3Hj/7s//+rW//1su/+vjLP/5hu17Vu3uJfref19c7FxRZvnjxfFKe6+7Rml/+1evf+Na3/+zHP3//qw/uX4yvv7y1uzm//AU/2cQOKlloYpkrM6tCvFzd3j3rW/jLz3707b/3N371wz+b7+X2s2f76eZrT77+yxdffIO/0vbu4aPfqcnOrtq7QUKgw3znPDcxzGApTUByfvbw1cvXVXSSmYNrKfjgDvc3Xeevb8Ztv9r2zbiPu+nAQKWCKrRdG5qVap3r7GMgTiD4hmR2Mp0hvHnKwxtj/aLAM9heXoR4rHkkJj2NkxCBHDupSgg1JwR0hCZVczrczk0MzsfPP/9VyUmshLAxtdh474kJROo3v/l7HnX+dNg+2OxvplozQ21aHI9lmvdn28eGkPbDw0cPvtg/7xyl43S4m77+u7+3susPf/Dh5tn1H/yN3715eX/YT13X5DlbypaFgIEItYyHQ5knBJMyte3DhnEajj40TePSOIsIWJ2GY9cHwEFqmY5HUlGw3TTFzbaC2x9TBEGTsXHAbEp4WqFBG8JxP2rAMRXnA1J0LRWW12NuuotvPvn6r774+Uq5ECBG79l7VMx9yzVz40KaFB3NOpW43Zy9tbu9jkbzWKxkY8zGxuniIv7N3/3aX37/g9ubJAjdptEx2ZCnPasQGEWPESzvcnChi30X1lALqpZ5XHcRkb54dUxynKjc7HZPbd03Lk+qZOioFjGtAMjLF1QVR+iJQE1qQcDgHDGrimN07OaUCcgIg/cOgVC9R0fAAAFV1ZrWOUYPdbNyoGYlswscaSo5Oo2umulCvs8ipkgomo55+nx1VvfX+7YJQ60V6Bevvvzf/of/q//8n/6Lw15XfvXq85uLtzZZp6EcSzmsV6v15Wq8nT+9f5XuxmYVzx5fDK9f/ey//1nTb8ZRZHeTrwcH7gs3xscXX3v69XR/W2ped9u//MWP+rNt22x2x+M4DRhCquaZz55sC8N0HP6tv/u3/urP/pk07tPPnt8d0vrxlZUSG2426wdvf/V4mDpNXVjnKEWrd+yNnn3xepirZgPobm5eBLQikpI4R6uuOx4nqdh3bS7VqCeE+/1Ezh9TajpmgnEYcpFWY4ytEs8lPXn3Kg133rtaoU7z65u7rFUPruFgds/RP3r63v7jZ7vDwVkFzSXX3e2cJpkrfPL6ZfB9LTVbqaDAEJs4TRMx5qS+aY/VADmgioiJRvZsS0LBocM81ToW5ykS5bn+6X/35198+MF/9O99R9Pxj//0i+c/HWJtidw05dbcm7GyLjYPAiUyrRXNecZaSxvb1Xp9f3v/8NGVY9AKWgzAZC4Tlu3mzKmVabAqzISEjjBNx3F30/R+mhMYM4Tj/j6GPpkBeEavYnURXRGYACCCKkDhNja0mqdE5GP0dU4I4rwZyZxSE/uaxXlmxJzTtj+vkeo0O8h5PobggF0F9RikJgDDwDWnknKt6Jxzjk2VEfI4Nm1D7MwQldiTWg7RN10/jbsi2bAyc60CJtG3cx3QyFNjpKILN8SLGhJrFmMTElNpmx4QiNB7l9Mcoq9MViupLi4gM8cEYMk3axkOjJZlqGYGxoSI4iOlnLp+QxwNdfkV5ireRzQtoqHpqxixMxE1YAomgojEDApWKwKhIBiZSU12efX2T37+/eCib5v9/f32bNfH9ubF7ZyPuaSi1SFtN+vjeFxt1pv1Rcq1pe7s4kqLlCzdqnc+ok6SBgJgRx9/+MG/+uM/Oj+73Jw/ZGQfPJowcq7J+w7UzTI21Bpq4FhkZALAoprmElbdgzFdM4da9yYTcQBQkWpgdErUCxLWks2AmUGF+HTwsROE02rVVDWVUopxDLnUueSUikeMwVcBreYY3373om1CyfZqP1Q7vvP4CsCKVI/sfVi1F6UMhGCEqcBhfyod5ONYKqg1hHh1/nSicH+7z0V2GS6ci1C8b/rtg0lAqaGuhy64wBoqeCOOLvTN+sw8HsdBY9gN+8jt9fOb9bveRAj1fNPcHUdSc85ZUWuUkFXUI4jAs4+++J1vfuv7f/bH3/nuN2ty9Ajmo1Bqh3tpo9c6rLeND75U6Zo4z9IGV1RMQAFBrWol54PvxEYwCD6SmGkNMVourmEDJQK1ShwQSEp15Ix90gORpopNCGDFwAAdOrZTjWLRp5NVrXn2fdtcrHLTEEka9qhrEQuGUz40fl36aGAOKoruXj6Hq4a3qwPVIfgzx5QKx2jmVJGYVRISmpEgILpFGY7AjLYUuQTZAMjUcHFumjp2kY0REBy5edw3W4feMlPse6mCog69jDqPegxz08cyz86187T3qzNTQCOZRhV1hN4HEotIEwuQhRigW0s16h34WkmwkWkayIGBdsjr0EeCCKUJXVidGwhSSXPByNLErABEAnWGJJiAplBcxEAlO204VVc8CCsCRWMiRmPnxcxUgDTniYJHplmsLHgmktq0tYpDLpKCRinmnBtrJbOIAaxKHZzvGcwTgDcBdd5ng4JGBo7CsoRWVDHx5EhVMyEiiBkK1AJSmbkQFT0Ru+Y0mWqRLFJUl9M0mpFl3ok+f3HThKaPQ7faKthiLTTTktNyuCJmwxQ8xRCCJ2YGIkCUJSPBRKhmVUoGZhVbpJTkPVLQmudpzGkSMyKHMCOQSBarqWZC5yEA80LolGks89HVGhjBU/VoDhzSiskJoBhUUUYyUeIZOQOogbcF0WxmigRojESmlT03TClVM0u51ioEqGp1HhR0TCmnyYfAnmtKpMiOkbiKmGZlUjNDId8QYCb34jjCfO+wlOMow+iasAydlovbETOzI5QKy8OfkaqoIQJhEiFg74JoZQSHSqpkYKSioiKgEIgX9PsS2UrVyIjBi6qqKKIaVgVGpqXxAmC6JPhoiUwQLx0cUiPHvIBRwIwBKwAhGUE1e6PhlmXWDIaB2IuOMguJsc3EHKSBYkQKFcxwSSsAmSkZIzZqGQmKFGfqwJuCK9VhRU7X+bO0rtURSAoIa0LQPEcQ7i4kXu4PV1kvFR1iJqiETglRAYAIR4I7HNhvAnJAY1GH5AwS6I7KM0c70DVmRqCwVm5iaJgAzREinDzAS+kVzBBBAVnl9F2Apx+DS3EMwdSqmRJ7RFITPNGCDJZ8ENKyf1Ktp6oakZrQIjVbtFOIcGqZoagsvGpCQlBcyqHLaQyAiU76NIDlFlwun6XPhkCnEICCqC5m98WixcQqArZAsQWB0AjAxJbBh51oO8usUInIqZVak0iaQT97/rpKU9kXm5YRVWQGJCVC5mGYSGG1bm53+37VzIz3JbeIT89axiqgnrylrMEdPP/yy9v3HH13zbjbNdH1Me5yzs7hAk+iJXcluZRN50omACMChxZQmOqE6ogARMHQKLKvRmba+nj05D21KxzH8cLZmbM4l0+L+8VAwxfHP3j3MqCQt2GewTlb1AwKd/fDT+XZ+29fUNSPP/jR17/jus2DKgRop8eBqHfe1JgdmLJztVaVYqZM3sxAKxC9AQUt0jkgALNlB0m2IH7NCEFkCZKfPvg3n5ppXVpmtoTGTYuCLWl6e8MaXn48kzNEFQFa4Ea6BMEWLDY7j0bLzB1MVWS5GLXWZfjonDcw1bJkmqRWcg4QVIWI3kCO4HTNLGEiwjcGLPsfjYpUzIWgqqhGzEUA0XKeANRApzk5F1XAOaq1tO3GBVVkNSNTVdBS1IBdMEKTBdNQJc+lHl3kY3rh4yWWRrFhBiJhpKpTyYcIaynifbjfp9ZjU+vd65ff+dbfenX30f2v/tXt/uVXv7J6+cX95b/9dPP4K/2P/tHx5ZfS2aPvvP/xH8vXnmxev/q4v7pgf0GMHmzaHQHJxx7Ijcd5moqVdHd/788/acdPrl/89KwLqy29/9e+/hf/8GPRHB70269c7oZ7W1PYIOzT6ip+8k8/41Eru9AzNrXMuXJ/dvF4PKRyVLbNPAlRYR+ArBaAakbYnD349h/89fu7ax/ZdU3K5flnn331d7+T71/rvjjhs7ce/Pynn7SbyEBg9dOPPowulprJ43F/JB/KbIfDsV9tj+PYtWtycJyH+/k4ZOPQVRUAELDjPG9Wral0F+ftZlWn6fbm1ne+axt9viNik2WaiPamc3YKlQKcqFsGdpIfIIANu8MCUF9suMuTax5nF4IquOjH44HJ2nV/ezdtLrZ3N7fHYSxaWxdKyeRcNWuIIMZVs4U87YbxcD/23SaPur+ftKqxM8Hz9QNEj0htC1nTKsZ1w92qU/Y5xrFYqYeM/f/z7//n67PHsV3d7gZV9Z1PZSJHpWRmACRVUJGmay2n6xevU5XYwZSnftNsLvqa55zm0Lv7V/eI5nyo1VbnG5+T7zZYTRE9sdYyOTyWGrD08cSnyKky8XEaNxvPTDVPGNiRg8Z742dffvj0rPvak9/+4MVP7obpbn94+rDJknNOmrDM6ezyIudDzYCCx3RnVqd0EKxtH43t/rh7/5sP+6b9sw8+nMZyvvGV8OJ8e3d3E8jtXh6O92NKprPi9Yhkd7k+2m5fHe+6lsZS9uOYSQmxa9RbyUnP+4dYY5kOYA0YqhkRaxUmZAQiRFu+B40ZyZAAHC3fPQgKSbLnBXEPDqwLrtaKurhRqSGeSrVUfRs6kuXpSMHNc+qYI2BSDWDsQkEGQA/oidlhIJB9uXp7uxuO277xNW0uu5/97GdXjG/Fq4/uP3Od46YpxYT8e199Mt7ZtEtHdxxfDA+eXlTXpJvD/uXONYCOMpSqlQwcaIv28OH57ZS/eP0qKF2sNo+2T1/fvELE4XhvUrbbiJ3zyo6x8zy/fnn14O3h+Uc//P6P3HmI3l9d8uMnD375yUtuuve++eTF7vVZs37r6Vd/9pffv97vOTAh3N7frqyo1BgarXkqQ9dFQtjyGjmlmtQgJZ2GmznPIXIfo7OKTt956/Enzz9ziC17A1uvfaBmzvMwDIGt69bPjp+fna8cQObUXrQyKPmapnl7dfnh55+rpmlSyNUj9JuWiK5vn51dPZ5rQimWdLP2oW9z0U3X5P3YeMIqwDhNM4MBodQqcwkcrZrmaZim5qxFcEaWp4JEzpETfvWTw4+b7z24LP/hv/N3XPnos9epVnAcz843y10wTbMLbISlSAhhmmtRcC6IpmmaQwht0+7udpvzbWw4pVkAmq6dD4d5zqhIzosYoeW5KqALzTTPKQ9aNU9DaFZ5OszjXcwX6GK/3kgpZjaVQqgqZdYsKp5g2N3VQsM0Q6+OjJ1j74ecQLFtmlpmEw0+NL6ZhtGzM0mIhVBFC6Jjz6UWMXWMOSWpZTwMYub4TMBUVav41gGami2sIhA0JB97ASTvwSE5VyexalqzmuY6MfCihZaaABkBTYHAoRkTgSoTzln8G0aAgVcDBQQkcr5aNVPHXkuukkwzu2CApc68aOzakC1VGw0SaDCtWoo5c45EJDYbM7VaYljPpSICvcEsAgEqMLKqLpg57zwC5pLYEQA2MTx5+N4XX3xxfsHdurn74kXebLabMyB4cPnwMMwl567t+9V5t+qbtm87ZeeDD8mKc1LB5mlnWiHVfrv6iz/9i+/96b84O99cPbxqQrCijKgGYpkQpMx9s4LQqSohASMRqIigEjMUhUQkXqCwoyS5xVbJi1YmBOVUEpM5JERHBIjAzFUSsy5AEVUtOSHzPGcxEjCpikhi1nXRIw1DOl+tmuBi473jWmV7ebY6W91d7/b7vQ/OM01DAqYRynG4O1ttRNQIzi+38EsAgN1+J4fUrC96Iu89xtZXPYteEM5Cf7HyGFzFGAOgWFBwYjLt2Xwe3GDKENrVtgK2TU/rckN8tu6HfbrywWqRYeCuObx82T/5ates719ct9utcfnVLz/+vd/9XW/w9J2nJcA7v/F18BxWERz43lfRaZR12+2u71fvvVO1klqZ53lOXQhAGojMBJnMDNgbmA8RllO7VFBBdoBYS1n8PAIVY1TixmOZZ0CAWkyEkU+238W6DEZIiw0NiEWFfWjXXhb22DwJasPeu7aqVildv00FLYTmbB0Gnm/367e+Yk+f7G9fuHU8+71vph9+xDevVYSsIEAxBENDRXCAZEQgRsQi+UT1ADTEJa4IjmuRYlDNyLmqUnOOojJk31ljKrWE4MZpDqv10Y/+YdMUP0xlw+1huPVkYbV2IcxU2vUGJUSnrinD3b1vV7DPOpfV+YMKXKQwWc4F0ebxWFMebnZzHje+taG6Cqvz1YOLtmYbsuXxYGLR+6GaluoZc0rKqCDMDYFzYJAnhuyMdMxmDs1CYGJOkjzx0m+pRaEhRMtlEtN9Na2E0th4dBV0SFarJgb1tY7FOW7aQBgEwZBpjewLERj3XVuSyfJwAnQATg1MTE0ZpMoC5vU+qmMxILXgPClqURedo1MlP9eMiGKCjCf1MwCgqdRscpiO8zwu4QZ2rGBMXEWJXNVSa0EpVUWqgAJhG31cbE1gCCampUoRK2qQs5FR8BGYEbCKlDkNx0Muo5mSc97HE+GXkYnBrNZspstie7zbicxty1QVHQCWCCF4FwwB1DtWq2DGZmAymmXzQoi0wHecyhKBhVpFTBC5iQE0r7vL/f6anXeMaVRmbBj200GJAKyWimBkCkaiVaREF9g1gtV5YvJi2aNz5AxK76LtD1h9ydnMTM0jeaCTDksNkE2XeAF4JCYyFAUQxVRNl//3BlV0CfwQgdTC6smgAa8qpWZUc8hVTWHxJJGIGZ2cN0gmmhdiynKLL+0TUyRkspPYCwlRUE0AeTFUmCGYLYdddHTqHyAgQK056XGiCRmRCrPJcYcCTEDInkLVGTEAWdHZBBVUNCsBAWnVINYAEspexoF0AjW1aClabQGPwCMFlLqa0hPVrSCKitNKYMuYoaoHyFoHD+KbgC5K9aZcDdEls4HsFy38IoD60qsg+SLIbYNESERG9CZBp6oIJGpoxkxvUMWnrTsR2TIRUMUF+o6kpotsYhl5vjln2WI6BTAiPmGOfr25p5OgXFSWkDAiIzIgLqmm08kc3+Bl3yz9RRWX5LMpIi0JsFNg6MQgUgBcBkZwihot5EZFJJClwW2iAohiujiyDNQAxQQNFzeWyNwwfPTpl3NpMvq5zNETGAIRGBVRRHTspiE7qdN4bDxVhc9u77rg3l23LUrOyTOjFAKsHD69H87JfrMXnibyrip5AgSFWjoHkUHENBu06LwQqpYGgp8ksUO0ipIBcEpKiM4hAlhVRLU6P754fL2/71y38s2xVPJuHA6XCI8exetP0z67v3h297Wz+Gjb5ASIJASmGltfq72+n9ndg664ll/96Eff+P3vUuirmJmyA6TTW9vil6siS6RIdfngjMghn2AvCwB7+cGEiIBqCoCyoIKQkEx1KXLi8tkt18xyYeGb1BeRe8MvwiLlzdyQFcBMEPiEu0JdPnHV5Uoo7DwuQDI4lRFxoZGJLdNGMwGDZW51glWfrhk45YFOwSJdZk+ntjJRzfl/WkBTURVlROciKHvXxbDp2rO2vWjiedNceb9h7pGjjy2QAJlZkXws06FMd6hH0oOk1zK9qPOrMr5K050zdUqsgDqh3Uu9l3JIeRbD/ThT8Nw0Zhqiz/NRyjjdv7j+9MW6eXDI0r3zzf7s0avPXsHkypEAGRAo9sH7ddzWqWKzPpbw6O1v313PXViT5Lyb96/v2WEuxTGu+56SaNLzbh1u7oYf/fj2T3/4yz/+4asX1798/lEpw3y/79Htb4Zyt+ti+/Ctq3G8syxbujI7v3z8tXqs4/1YExj5D7/44sGT97EYFCH2ILg4S4kIVLf9BqiuHlxtLzdNI1Tm2y9vvvHVP8i3080vf4q7e6wmqBcPz+uYHWoax+NhL0p5llW/WrUbj3zVn2sSQrd0Gr1zecix6VPWJrQo0kfvmWMTlXiYyursEkNQ0369NvNsiGiqcnqGI7wZEp0eN28m2W84dWKHm32I7TRNJ2CWAaoQmEnWOhFVq4kJUdURtkw6HxtXAxmBETGzq2p5yjLOlgUrOPI/+9lf3d/cfuub3+59IIQYm1W/6pp21TfemQPCCpLFUC8fXZZxLrtj3h86H56885Xj/c2Xv/r8/mao8zHguFmHkotHTMdBi6AhAoWmISbnHBj0/epwHLZn51JhvVkBEZM/vDwGipFdzkbM3jerbstKbChFqyjHSE3DXU/tqttccH+lsT3NTZmdwwfnm7V3KDUwTUPe36bxIDrPZOX1zct/9cE/vr75uKHpyXnbUOt9ONwPmxBWLc955xpsw8rQHYdk6plcCF4JfPCr1arv43h/KBPFuMoZDofx9vULm+fb1zdDLgJE3lNwrnHWYHNB1eeMc23RbSFegF8Zt9b3fLjey1SCAaAxOWYCAjWroo79kqdeGBNmb75QEHzw9gYAA2hEFD0HR41nhwZSo3NNcKENS0oxBseel0No770DRKB+3SsAAdUCVWnpsoNq613jAmSj2hxeGIqLTUzj5FDrOF+url588ezf+s43Kc8glo7l+sv55Us4HN3q/Kxr6TzCpg8BQyRed6vteTM3pl2fRSmY3/gjltc20dXFXqYHl1e/8/VvRRd/8OGPa4HILjAF76UIiTnEkmqdytuXl1cb/Gf/zf9vtdkG1xznEnx/vnn05Pytsq+3r2auFov89Kc/enF7rcnasNqN96urFgUDOKi1Hncb1z9YbwR2U/nCu7pqm0ePH8ZVbPpmu9m88+gyevUNgclnL56p8fEw7McpRD+mtNvdgZSU7fY6pVQ2qxhYz3oe0rg62wLLNB/OsH/st1frGBp55zeenj05zxVzlt3umIfhyaMHWWd0NI4zGOciOeV5mnOtTYjmGAKDmlQrc0FRx44dTcP4b/7h3/qH/+Afnl0xUWpX7DoXWm/OweP42/+br/0v/vf/bjH9G79/+X/5P/07j2hkUVY73B9+/c609JsQWauZCKkiE5MDMCJdrxopJU2jCQTv0zQ457bb9TTtiSzG6IhRgRGcc8QsKXnyCMDMaDUwdCFQnerxbtpfz4cd02L6cHpa4ZE3vn9xk/bV++Z4OCKSGOSczWB/uI/MwUcyQjUFdc5SvW8arHVAUvYhlYyA7FhqYQOSur+/qybqSE0rKjlWM0SotdactVQmNCloEGME4q7fOg4M3qFnwhAigIGolllrBkIkx65Z8vVWa8pHdpzrBCbMDMai4HxQq0QAWnGpwZMhBea+aiEkVJCcYUmjExH7Zcw7zoMWdcRSlIkJsaQEuhR0aoBQswRuPDIZmFbDbFiXPHmVDIZqoqAqguTYB2Cshm9/89vNen19/1wluxbmciBnzSr6rmnXHcYA3vXbFTksVigwN34sVRH7VcO+Wsmr2LJz/+Sf/td/9C//26sHb109esu5hhlX60ZVkZnIe98Xg1kPQsksG0gpO0IWlWpVMBtqkQRsYokAPLFIMqlMvLSjAQyAq1QzEa0LQKKK6ulVHlXEO2KHSJpLRbBaRMSapnXOr7rmycOzy6vV2Vm32TQVQZ07HgYE3fRtcMzs+q7fblZ923Rdd3HxwJFrm0arjsO83AUVuDCmeT7cX8+ofNH0Dze1Hi8avDprHlycPbi8fPLgzOeRS9L9fbp/eX/75esvfvHyi5/OwzPTa4U9aJKS0/0tlrnMeb1Zhb4NIToXQdquuXLcVKPXL1/IMKrZ2frcmaRxv960leTpV57MY5JKGL1oWp/FNOwC0/7lwUGQqgCoRru0JzAGtEUqj+ScA1ADrLmCaKCAxqCkBepSXEFHCiE07AKDISFHx9ErO44doKtiYOiJCYGQRGx54QEwyxWqmlCzPldywi72K3aRESAdAikjOR/As5IbkoDnuU5uP6yeX+uPfnH48PNRwhi2laMCARIBIfJCunWSnFZnFbUwLLBPYjBWZRGnhrVCMagmRWsRRKpzlbk44DpIHgob1bG4zPN9Iq3GhbddvDqjtmk2F9SsumY77EqgpsxZ54qGFGJcb5gCDZmGjNzMRUmso8YxSynMrioG6qD4MmUqpmJt21Tyrt8GF0gQZ5x3Nd3ZfF3rXiADFwqFggArqSiRY401q5k3YwQjAtUCqNWqSKrTkIchpymXlPI4j3c4jXgcys09DeZG5ol4cqG4Ms6oALnSlF1V00qmnoNnbpjWwUfQTdNenZ1fbLbr2G2a6FEdqJViVcVMTDg6ahtyIYRIIIw1OPCB+75r2pOCsEjJJYuqmS51LbPFZ1+LVjFJaSDOIkWqMDMQkuM3/iYSgZJz1ZplqnWummtJJpXQCElF55TGYZzn+ZSiIAwLV6nOJU8pjbmmVNI4TcM01XLa6zM5JCa3uMXLPA9ZSspaBFM1NYq+9QQO1DH6QEoVwLRmreJVWlImy0TKfKLPACJ4QqeAZqwV0wSm/n5/W6ssIGDybAi5JkJquk2IfYxt0/XYNha9MPm2A0ZByZaApFjZpXuMjsmhlDIcUMn5QB7JARMhwNJiElExY0THDtEtKQPSylY9ACOYaRYQMSSP7IoBEiMFz23EhhQtJ6hCC4FJK1gV0yJVVRdoigMBrSgSGTvvvfPGQYGNmJwHomV0UqXCQkZDZnLLcUEXjgvRkuUUAPSe2TlmJDPSjENhEQSVKqVgyVQK1kXdqaLFkXfoWnYewFQAgJmYIaA0TgjG63T9AvZTjCoBZvGlhFI5O7Y1QmzK+CQft3nvSNiDoHnEYOI1BzCykIiOFI1WHgOpeSMmBsZMcN3Unzew90GtJpTCXkMbV2tARULiBQxtp/qFwVJkBztBoN/krE1VReoialyCJ3RK7yw3hSwooOXsfUIjn/Ij+OZvvYF1oZkJmC7dn18zhAhMVVRPzaOlJLjM9OFUGzuNhkRFYUGzIyGaiEklVXca5OJChUcVrXVpzyksLbMlprLQsFSX2q3JQrhBNCuTyXg8Hu7uVSlmE0NbqDiOfTVFtND4LNI4x5o3fQfsDBpSvmjiRXRs6sktzJ7Z8yfDFMf5Dy7aszoFAnQNGjQOe+c6wJUjAos+aA2x22QrV29vREtAbpsmhuCCa1pXCTi2kV1j2BBHHzNEbC4fnb/bsAvM+zxk0L5fhbC+PDuLpf7hA357TYcsP7+bXx1L0/joULQUtSInTtfuWD7+5PVusOub+7/63p9SycHzYoNz7NVMZFGWLOUzRHSI7hQLMtAqC43LTtcA6IKLtWWWrXR6Di7qsYU2qWZy+gAMAVkX0rTaQiJbaoUAwMTLd99CSCB2cIq+0YKaWtjkWgsBaM2oFVRN1MRMFs4MERGxQ2QTAwMGYkBcIOsqpnrCAxLzshsglqpWFZCkVq2KRsT/k1ERmgEiRxe7ZnXWbS5caGO7afuzpj3vu4um2TTdJoS2ade+6WO7BvCqliUXs7nWXLWaznlf5Sg1a8k1l1pzzZOp1jLXMqXptoy30+1zS7v93UsArlWAwjiV880qhJY9ri62X37yQZqvf/rZj1aPzzl0uarrNv3F2lxcnT9cnT3uu3WpNE3DmA4CaUqva9mrTXfXX2SdtAxghVCZo4gO04QNbh+u2Nmr4/1X/+7fnls/b+ff/O5v9pO8Rd+S1/j3/qP/867ib/zudybHR8vN083r+5dB09W6lZpHxPjoSmJz/fwLzeMJMI4ISGJgomMaU05hsxkn2Lz1/pNvvBtXFNYOaX761uXt689CtybvG0cvfvXLs/PVl59++uLly9hGZCfCY9bjPFbWrLlvfRMCIczTTIjRc6ql7WPbdcWgiDx6cHW2uogUKc91GOcpdf0KjeYxk562Pst86H9OLDoV0wgZgRGRKbYRfSh5NhEVAYSc5pJSztlMa5VaalUFInSuXXfjNN3d3pacEGEac8k1zWU4TE3fNNFrTU+fvvvw0dvb9QZF01TbrgldULDYdN1q07S9C7Hpu4ZDrABzdZF8p88++fH64i3XPhzGUh3+/NPPXg+H23nyzh9vDz64peFpjnKupVqtOs/peNxxcCGG+7s7p6bHgmKbdYssOc8ixj6UIj46zznnIXhHACh52t+laSARKCKpGv56h1ZzrvvDfCx1n47jNDnD2K6+9u5vG2qVapFycAXaw1wn5OfHNFkg37q2B/ZVqgKYqjNpY1vUVusrqTlPBwcQDK8/udm9PkRPBgjcYXMx0ypjPJbSdB4d1FylCjk4P+eHD8L17vnmQeSNO//K2XrLfcC8G7TU9bpLlqvqYZqIWUyXUu4SRSX0BrjUjpeNAzHbKR9rqmK4NBWWL4clV7ysTQwAmMC7BdGrakCM0ZPUTABSAQ0cEyA2faPE7Eg0mVZDFansGBSo4sXD87N3H+9m3d2Nx5vcxs0//d4P/9vv/XkVqaJAlMeaR/nxX/3q9d0+bJvHb11poI8/fXa7P4DT+/udYHP2W199+t1vbb7y8JhTqULOffH8dUD/sFu9vr4+jCNj8P22WW1RKE0Z1NZ9q2ks47BqumHcp5K783Ulck139uTR9nIzpd3N7tPeB0sSasrpfhz22SBsLg5Djp299/7DIdX9OL717lcuLi60Tvv9rUPYdK1DcQR5GsCK85DyeHd/APSTJGL1BvMwPDw/d8BpnlXsOAzDfA/eoF9xs91eXO6HOs9126xuX90dE801fuNrf+0HP/1ZrSLVpFZ1Cg7YsXfOx+7Vy2eeoGu8DwiOlcBHXugdbYxkcDwkESNHROgQtYgHWl2uPjo8/8HuJ/356uHFyszqVD0QOoDeLt9Z/5N//GePvvadf/b//Yv/13/5z4OPuVgVvbs+sYqqLm87CGYUCBlzkZIqIjFxKWJgF5fntdbjOCpQv16XnNEseqcmgIS47KhMTRbM0GnzzH6csuMeqfEuSE1aCpKNx8Nuf8iG9fSmZeYN22amRBFDHytoSqXMKYYmGQP1wUfHaFbmOkySBBUdR984di6GKpJLFqmucc0qznW+2+2S1rlkMK0qVXWhWLLnnKda5xPlE5BdAGRgvzp/ohzBNUhkYOwdOFBAAEcUmLyIxtgyOgT21Jopc1RARcg1lzIa1SoFmNhHBVI177xgzTo5b0AluGiizjVFgNmpCppJsb67UFN2oVmtq5GPLTvvfQSj6BtArunIyGhoRsvYi8E13KJS4JYdMRKaIplCUa2gUEqu0/i7v/vbDy+vPv3s49d317FrkszHPNwc73dpWJ2tQ9eHdq3kXejA+ev74zCm6TBPuwmzsMGXzz77r/4//+CDD77/9J0nD67OnbBDR8S5SpaKAFIFVJPO6AKCqyaOvFZ2GPM4BXSmRuzVuQrWNv1cJtVCIKUMJtVqqTIRqaEgs6gBMCKJZGaHZkiUpYiKmiJongettY386KJ/63Lz1sXZo83KE3VtALAhF0FC9jE2hCRVyWHftsGH45Sy1LaLKjUG162a7cUZec75xK3z3pv3h3Gn+bC7e/n81QtzbrPaPL06f/r0UVif1dgfUlrq/RXh5eH45Xj/8fUXn7385efPf3I4flzyM8L743jz8vnH6CBNM8fgungsZUp5KnO37Y86xvPu6r23D2VynW/X3VQUYnx9v7vfD4V0UptMS5V0TDlnbrl9EOODLnuouCyr8NHmqtRsUNExBZ+lGFhFy7WGtlfEuQiid64h9uQjOs9MtihvzZaJHDJnqeB8BlwqFI7IpKJUtMqIRA6RCckFZyomxsY+ttS2h5Sy6ZDG4rgiz9VGMdtsbXMu6KvWbhhe//EPbr//y+nV/OlPn91QfNGvh9hS9EbGjLSgWBAISE3FQHRZmLPY4hIiZGccTJCBAjv2fqGhY3DIi2kFQwxqEEILwM67EFqHTo8HnA6a5hBp0W8Fzxnns7cuxcSsYgDrwJrq1g4iJcmbq7V3cry7QxEwzdMsRTm2SkyERtJvQtv5povUcNc6rlqTDrskqaYst4dpn6UCRHSS0jInAgUtxUrWVKyq5+iIHVAEIrIsKeVZioxD3u/Tq9fHV88Pw6s5PU/lVZYbsB1yDpZV5iRpSsOBSurMXJ7XLjTkSVGrgkDjw/nqbNWdB79ah9WGY0e+AQ5AWuo0TmKmjhRMrDIbWDUVMS1a2RM5kjelg1+fcQUAmeyETbXluFEIh1ymaa611lJrWbIvjtmzj843hiRqpcxznqZ5HOcx1VpP1nU2YEAyxMUJBRzEsKrUOg/D/TDvUh5zSXOax3kc53HMYzVIpYjgsvyWmodhP4zHLCnEIAaqHhR1abIhIChYRTRFWwRerLWz2hrOAhkAGO1kFdeqamZE6B2rzV1sHC00Zyy5VC1ggmaOHBQBkcCevFfvChp7ByZgVUG6i+1tvafOPXl8jnJr0w3nZKkS+SpWRdiTSMXF6SYKBkyoeNrtqxoCmUGtJqqmxVkJZAiWSzLTwA4MARxj9ISmVU1EbQHWLPMdA2fGamgKkkWzELgk7sXMPx/p+4N9fyg/meqNYUE2IwVRqM4RSEUAQl5SNQRIyHY6YS4bSPLeMfESV0hlFlYhFmUD1uVnERdQJVAEh0FUi1VgYFSPGh2TWFRoVVqeary/ji92caqMwUInfVt6zkzqS2Gf5FGCt4oEYFGa1ZS5VDGrTEZsA8yDU2i9ByVJBIjIglYMRkevOrjpoTRF3VypGLI5VyxXzaryRgbGRIzMy/n8xKNBNPvXBTTT06z8xANahkNLLe3NS/YCCaJTP9beCKWWdylEZFmscyK/Rkvjm8rHm3ekhZx0GkAs4ip8Y+Ay0zehPj0pz5fxFiLCSXyhWuDX0aRlwkFYVauKgqrJm8GWnUADJoaKaGal1nkZ933yxW3irhAwm2depI0Kyow+xoLNl3fHvt1E9i9f3B73w2YVn2yaJ2tPKClVEqViwP7LMed5/s01XsJURIw4V9v0MZABKngyxiKS5sQAhFbUyCmTQs0OCRiXqnnnnZgSQ7UkJZWazYcduY/G24PpjDalwTlQ1JnRde086RXh76zp3R4U8Bc304v9TEhdE0xlnOcpFwMtRV2z/uVnL4m9TtNf/as/KeOOGWu1UiqALDt1ZEQiMxQRg8UvxqfijllVWaI5Bka4sMKxSl36hctsXU1hkY4hAyAiL0MiBUByizvPDE4ja8TFYbhQtAEYkZejG5NjdohkAPU0QmIAWsS45PwpEkhoqouh4wQ7X/4igp6YQstltUy9FEAVFNFOVx6TmrgY2DkDfYM8+jWrSFVy9S46H5yPzvtaXJ0mNTUQIq0lRxeQQ6nJqIurJ6G78t4JqmNarnIUcdaYKkIhiKLCpoyRzDN4AFZNjFpLIiyOWE0QwaHzBljzqy8+9Pl2cxaOv3ixWa13d3eP33m8P47j7jbPd0oPjWo57id6cXV40TjbbGK6vt/05xNSzndn5+GQRlLbP/s0ShYasQ0ttYihsIudJzjeaTn77t/+4T/+h1/5a//Bb/y7f/e/+E/+4z/4znd/9vf/s//1g7di0BevP35v9bXnrz67ePCN7p2Hxz96Ne1rSwFwvr776H7Yn3vHq9YMlmbrcr9jQxSoWW9s1b18dbc9e/fJO98itG/+G7/zL/7f/+Ab3/7qB3/6g2//nX+/FGHvf/SXf/S7f/Ov3958/uL1876n4XgwsKkMTR+O5QgeRaupitYQfZqLFDu7eND3Z+Z8KfmY68NuO1NmV9ugRe1e1MfIYQTQtovjIS9O3OURd4JjvUkXnZSPZkgops2mG3OqOalUYmNCE1GFKrKoSIwkz7lpQx5mT1ymMh+ziiK7xbnOzqnM5Exlik3sNtvL/m1JQKQvvvh81bezFXOYphKZtutehZkjsDOmft1++OffPxyHfhu+9y//yf3udj5k8MDeffnJdbg6T2mIEE0VtTKBKUopMieokmtSUMmJFXPOrvXvPv36D37w59/+xjv3+10MYbj7cj7c9t0jrBjQ8ngTqDJHJEcYhcqcpwTKvvfeRf9rYhf5rmO0MU2rzdnhmDzk8fbwuX6qw3A/pXYTM6qIBlLLVqxOUvum29f5/NF76zmRG3fH13mYSgZTGOQ+rBxOLSCqZREnGU2cI2PX7O/vGdV7vXh8CZKb3h3vpQISeYeiVb/xlbfGnA534/3ufhu642HmGHzvomvDaMPrqVdHzlvJ7NCYlsTjkiklXOwTtmhhCWGB7yNTXRRp1fwbdBoDe0eO2MACI5CGhqfRCphKDQG6zt/uZzDzgAU1RHZZESy4eJDC5BQslyQE1Dmo8+7F0PRXq4vz4914LBk9FArbWtbnq9gEyRKdO9v0X1zvq0gW+3L/nJ1zl6u52gTyN95/98MffTKV6fwb72dufHRPn15YsfDg7OtP3v38J5988ep149tIuD/u7nS6OrsMQ9Nedsf5+nzdrt7a9pd+ui9/8/d/85/8o7/Q6oYa5tb3TXt7f3z85N0++lTnR1978Jd/8gEqokDVtD+Oq/N3Pv7oNTt8/5tf3e9fo8PNus/liBxNZJIJstWZD3OKHRBBqarV1t3l2fn61ecvW1gf7qpmXm37BiJGQq7vP33/48+/KIOBwKpbPzhrnr88PFhvfnJ749fbz19/tnn6aHN57tndvD4Yl66PTPjsi1elQN9v0AZE2ayakuZ5nDZXa8KYRXMGUxqGjARtG4uCE6U+ujaU1dq+9fv/x//4P/mtyws0vz7vJ8ve8fuP+H/3f/jrx2H3f/uv8MX3n/WOp7usXbu+XLk6QxrfPDVMc2YgMwVFR6S1+BgQDYnMNOccQ1x17W53rJ6kStM2jtAh7vcHBkbzjkmdVjMKLrbeQFdn2+FwjBbNdM5TdK5tAkFRlPGYinnvuq7vEWoaBqbqWKdplBkb1zTBzdPssDmMcxvCONzH4IJ3koUXhUfyVcYQ4nycgosOKI+zb4MQTCWD9xh8cO7B5WW6q9H7A6LzPqfc9Q0tsQY0YjQtWki1IhG5aNmaZjXXTIyGqFpFMpsZFwURyUgOVGuZG99KqkQtCKgURAaEEJucUnCY54HZpZqBMGkK2DkMqUzsHImRmZWsuOyBvGpBqM5DkSKijrGWjMTBNYBcJRFa7BpDQ2ZRJaQYOillOS74NuY6BO+rYbajbzoDQ4Tgg6jOmb7+7e+G5uNPP/1pG2J4ENaXDwFwmgoba9VpGEqtJha8a6AyhqqSxV588fzDX3zw048/iNR+9etfZyMV8eyaGENs8jARMhKqAlR2QLXk4AMyM1AlBWDmCApACnUWnbFqQfGu0VIMlJ1TIDBdhMRVs1REDKXOjCFSO+RCzfLCXdySiFdouvXmwdnbF+fOeTLLosaeDREcErQRyfw05ekwBu8b8whYyhRCrAX2x1FrFcG7+935ZjvONc8zvOFTjMO8WdH5tiGcVe4JO++6t999hw2KzEOZP78bZRr6mhny/lhGlbFO7OTlMH1288XnB//oxbQ+Xz97Pfpy+Ld/67v3Px5ct+bNGhKWJQ4aONeCoB6w7mYuNJbKY/Gxefebb93dHXxH54+2yZx3wbsI7JRZSOY6aimOkZE8UQFZzu+aJuPgPRMqIpkDwGqI0zShkffOuWBYoSoUBRFAw7Zlx6ha54qtMyJmY3daRAOzASHSG2kRAWI1oehDXJEi1UJVQ/S5JqMGfVAAETMV9JwDq3dlpN65bQz3SlOW+Na5fedt7Ojuez9pd9oiVClLKlYVInk1NSRFqWSoC8LFTsF6YEMyVAGrakTOqpCJ5BGU2FTnDN7P8wAENVdkDdjJNDECoivzZGp5LNjwXLM1CMD5mI5lojVNHruwyhcPXv/q5Te+sikApuiwYxjnPN3t91eXT93hmBVZjTo2BgPSkso8kQNEJYapFmXPHNoYHVYpUwQ85sy+q2JoClmwenAe1HmzWoqxKmlGGSTlCgDIVsFQM+AMPJqvrXMR0KNfjuLJ1HxwpdZQEjGQAiFVtZQrNyH6BrwLBlXNqxQtc64l45jKlLQ4FHDsQ/WsiMzAaOTIal7QP45cB365C0RlAZuqCp2wNbiwjg1sLnk3TK9eXzdtH9pIHhWE0InWhQDtvENwqlVyzZZVMRg7QlMRJNECCJ4DusgcmElNxnkqec4p55LEFmSwkoHWVGudzZwLHElVBUWkzimXUskxkIwpN46BqkIiBgT3Zh2magBqzIBkjepa8pE5qzkAb6YKpkXNkNlwWcqbaCY2QGbAPIkLDYIsVjCp2oRmzofDNGeU4FxAj2whdNlxquD7y8nI5Znn+41tDVgJQKyqIBqaOgJHzNkMwBMSUlFAEwQg55dJgGMPoAjAKGyiwIiA5EzUoR+LAghbVRRkIuBSRM2qihgomJihWTUphnO1Q5YXWT8VuFGZVMxqBHzS1t+4vHrSrVmzh6PHTGBoJguFdzl6LrkYRERgAgzMwS/PAhNN85gxY+tQsFpVRGA5pvu2W5sVQNCqajWDqEIHRECkQEpaUrQ68/5ar5MjQucy+0xgvQEgGXoIRfr58JU8rmsxZXRrxlqlOANnZgpCvPc8B3boGpUI5hAUl+oQZ/Ij1AjVTB2AZ1eBgMC0QFVZGEO0GJ8J4VS5eNO9VX1zSFZRxBPgdTlHEZKBETo1RQSTisQLH9sMf70wW3J4CwXmBMMGNARaZmoqywoNF8GkKS331fLxL7NNWHRmpxDAm6mEmoGYIogZmFYAekMbIUBcBpGIaLoEQQ2BFy6zWV34RAROtYKhGamZWS45OaD7+3KcfWUEqSrA5IDYQIGIiEX5sC+UdNJ9mWU/prPzzU4OVxdd0MLAjishDEKvhjzvxj+47N6z3YqwtF6NCwE4TVoTlIw0iW5Y0bRxNAzz2aP46uXB8bqUBKJiVcF6Ym+yqNoCOzJHyHMpzFALNU2XDC7Pz++PObIPeFit2tfHpne4xdIR/mSEn+zqp3sTmB9vQhf8MWUfHBIe8+yPsHbw2SefP35wCWP6qz//i+989w9js8o1Oz65xkzVEYsaMsKSxyQGREZXtdJSV1zMZEAGVTQzEuFCzefTzAgX+R2e8EBwaj4SIIAi0puIGdjy0ENAXGZAdLpIDOT/z9V//Oq2pOmd2GsiYpnPbXfsteltVVYVqSKrSdF0S2pJ1EQGFKCBJGim1kQzaaa/QJoKaAigJECAJg0BLUBski0Wq2krWSzDqqzMysx789rjt/nMMhHxGg3WPpdsDQ5wcLCxzzYrYkU87/P8Hin3iiQQky8ZM3AUUQAksiUKh0juisiwyFSL9MS42AKZ2Ezuo0bL3ENmWEiESxrV3d1NDACIo///SUXMjE5t36d2zaFdsnoxqtY5BE6A5Eqo5tqmQM06UI8KRkAxMDG42r1zjrRmBEspiDBw68TujmpOAi6EvlzM26YZ6BiaVCbVNR3ffPHTP/vj9y62Tf8er3bUtoebY70413VXqzohJ3YGWnfolj97Vcx0nepYGdPp7uby0eU1x5Q2kpWVMDTj7WndP6kxKOMkwnPhTfjN/+i/315tH3CLw/7w+UfNB++++dbZf/x/+N/98s9+b35++No73ylv5jrZ9tH2337yUfvosa/6XZ/iob7T7t4063UAKJnbXqswRVBwREDoVxtQTByO+9OTD77+ZSEW9eayuXxCj94JL8ePP/3415/85jDOabU5nU6vb55T395Ot3fHw2GcUgjzXT2U4fLsQl1DalQ09c2Sbuu7vm3aw/GOU9vGZhxGCNr2DSVmo6p6d3vUoquuTWGsdQ4h+Ftd+W0AzRc+0ZJLQ7/XGc83q76bqs73ArZbyZVi0FoBHUyIaD4N7ZpP+3l9vioOx9Mk5gA6j3NoY5VqplIKuud53F49BooYjRDOrs62dxPNMxRjW6FJYC4lb1ZnSlRB5uF0+/ouTzbPnprV9uJhovTi+WcB/HJ76UVQZBrq+mw3l5OZqRHHxCGEyF1sGLyqGFCuk5tQ01XEDJ5B9/P+OA+x7zCyg6kxhSA639x8kitT2AUDxoYggiIAlFyWVbA734bQ3n72+cVZv1rHBAmGw3/wW3/zH/7BP4kRuz65QwwYEJrom2376s0bAq4l59FSzKvIz59dNzHGvuUY7w6HqQyJHRhO84G6YE7atycVr5NMc3RGNQGLSnUuWiClEJDWfa/ztTpaFGZskH74rd/80z/52ST9935wFejuZ3/+edOttYFW0mEekFhMESkighm7qykivSXtAQKAObIhUgwhl8KAgNhQUPT4VWWeaZu4YTADVm0QmalrosroWjdN0KoBCAGq6NLACmIJvAmEiBzSoCXGEBj3z48ffu/Jfq5NR2ePz5+9ePnu4yuZujKHIU/TYTjbbQF1t43gkKccmK4ut69eX7chTkP++Z/+hR20AfvF7/85AHRdvHs1uHty+v1P/kCP86ZtH15dfvb55z1gACpeLcDt6ebpw7OO4MH52bPbEnH7D37vjxEjp1AcY6H5drh79bxd9fOmN4jlo9dETUeqaCXvHzw+P99uP/34WorfXR9WhMPt1G6TKg+n/Ohy++r6yxQFHZmkjavdevfyxe1wym2RZy+/4MoxtolTmfPZNuXjqSB0sXn28ovkuYG+2XRvbuZxKkX8+vb2wXmKXD//7IuLxxccbMrjk0e703yaawnUsJECz4dDSDIdpouLHWGMnaeGTZWbGLsW1MrBwYDF2hguLttm1T58uvnP/4t/fnV19b/97/3df/pf/m6R+fxC3vnOw+vb21Vn/8+/9/exCdt3P/xrv/0bn/7+H4VvN6+0P+Hu4z/43V2491OY1BiDmS8eVhEhDqZLHaIDEFMQVSTs1ysRqaKYszeJOHTrnVTFgHWqHHgeJ+ZAzBEjijUxqQhDGuYTYgohHE/7dtU1bawnHffXIuvz8wtmJcAuJl9BgFinUjhEjrnMw3Q05wgpMoBIANv0q5NpVR3GMbYJwbXkru3nuQBzHjKZB4yPH1xt1mG63nfQoxgCLjXVQGjuKsoxmZo7NC0nUKmamjb7qCpE5GYilTnGtLLqgAB0n55xMgoBHB0IAXI+GVjT9LU4B8aspku/eBjyvqrF2IGTW3BndzQwsRmZ1IRjq1odxUH1PuTuMQUjLAu9mUxNu65TQzILnMQyMxUphJhSd5Abt6jqwUE9uxWilYgREQInDjlnreXJo3fOzzbX188//fjPHun3KTWR2qFOOYsV3WxXvtlN9eRMt6+ff/nFF9f75/vr58zwYLdrmlUTQiAMgcFZ1LiKK6h5QB7KkVNIsVFTEXcBBzNxakChIDQiHiioSYwx1yGwxRByntHNTNHcTDgEBFusUoEiY5NlDEBkWHRiovtN2/litfnNH3xLcz4cZ6nqFE5ZHpyt2hjGnPu+BaPb/b5UbUN8cHmxv7tzgK5LCGbWhRBDxNByAKTAABDTvcO0TdS38Pjhxp3Wj6+Qz/N+ZePdfDwcT/uXt8dX+4GMs1rDDBEPRV8PAm5aKiN+9Ozn7D/bnrW83n376VMUXDVJplKzdoHbJt6jIUStVCQ8Ho7r/dxCWsV2uB3CtpFxXD1a77+cLLmJWPV1f+5lRPMmcM5T2rTjVJK5ayUKMYQpT6SIaDVXJA6Jpzk3zYoZazVF8Fq1CLlSDBTIENxR1JCQ0AE8Ba7zxI5mJo5IHENC8rc1y+bLoFwV+3j3/Mt8YDpf+67XUkK/ohC8CocmITshPtxRGeMKX715s24CEbRNi+e7z+faXW0efe3D+uef9LOmYCKiFNhRSsGYvM4AgBQAHJEc3dXEbLnPG7io1JIXZGeea9N2kZp5GHvxOo8Uu2bTT+Nc8wBozToCaZ7G1EcFnKUCknbdUVENEVvsEnXotY7uux/9dvPBPDS5ylGM5HbM00gtbDb97Lm0vbe9htw01BDhlNnxZj+ZYeya3jw5hjZppEiESpqViLvKYJbVmKK6N9SgBxbDbG7VSfJUBAuAsDoR+jSAuc4GIhEgNZHRHaqIzUXQwB1qrUxk80xNklzQgAMFjhVShYAKJGJVZC7kSkB5rnkuquJOQcSzNdwRYqyOxGoEmFJg0RqbqG/9FIS4HHGXzIqZOTgtYEtiI5ql3h3uzo+bbt1XE44NvOUEIxAhB2IFBHMEUJM8D65lKbvkGBnDAlQlr6ZQJatqzrOqIcUQQU2R1VVFxQlKraJWajbXpbldqpCDihbLbYjTfEpmTAHMHM2BicLbASkstzZG76Q2HmaOHqADicTui+HMi+XqxU0FZ44JEKacJ6ulnrKNAKSG4orVTKugGQVzd3UEAoKx3k46z4Glpi5Rq91we9xCA45msjQQqQoAkEMAioiiIshE0dwBlID97YEM3NDtPhClAhQAALyaa8REDgbKTEWqmdzzRh3NobqKmpkUtcnorvqN2jPzl+bTUsaOOCl8KU1KT2jz+Lzecv4cdXKtCLwkmwB50SvM0ZaWTgSLHBcyN7OZFp8FdFGOyAw5OSmiqWqARc4wQCNAd6gm5kZOKJaIwOFQxtdatW07s40VMpjqgEmNeLKpo3A56kXVSI0DKblAQQ8JEKuRw4R4DMDsjXkyiMvv2dydFINQqzC3jibaWcvaOLeANJcpqRAGdQAITG8jGIuBRxUQ3BbwM8Dio3tbHm1mi40IfOkLAjNBpKXszKwCMP67T4dquuTUCGhJDhGSmS4V7IRk5gs0xlwZw0JIWiJPy5R/oVy72/3qI168UO5g5ojsSAjL56zmDsBLVljdzcVh6c8DBDBzoqBWlsybIzmYmIK7qTDg4TC/up6UIqCjORPDkj4GCKmdVV6/ufFiLdple/6rN68hhOM8bdex1uomRJQMJeDzKdc5f7fF92BckTARBxoRgMDcqmobQuJoOm/61FOYRSKbFnUlKBIDBnRg4ECqTu5SaorgqswYAXaRrEyX3r6uoiFmd8VSFBLkKsmJY9uHcnfF+hvrMI35meCnNyO4Xa1iQLSsqY0tx/1+brZNTKv9cHr6/oPT3c2/+Rf/4kd/9a+0zUp0eW4d3cyX6xQtkBe4VxLvdUAAR0I3d1QAdDBHQCJdFEZEAF5If4uRyNwIl0fKVRUB+J417wtFaCnXIyTA5d5ORFQlL259VXvb3quutkzhkEBNAJw4qlRmWvA4y1dLDOa2cK/vL4aIyyNFi0nKl8Qaq4mpId9D6QhpaUGFrwJoItZ0bUhNaFep6zDEpl/FPnFw8IK8YL2t1KLVE7WbmBIjA6EFFdDqksUsKCQMvQI6IDOHEBc2v1RxzWYn0UKBAzdSXecCxWLTgYDM493zF+lkNMaX+/3N8eZ8063AAvNUKqKbChA60tmH3wwaGTvjdrXZMrSe92lOdtvUgbjq+OpZv1qHFDfbs6bZvTzND//63/7mf/u/g4197Td+DeXU6Yv3ry4biN3lU3qyvvrr/9HDX/utu5fPU/A6jobpG9/++n4/I1+IrQhX722fzj+9bl9PMJc6DGaoVa0ImC6JY3aScQIjzQMHunx8gWyxbzaXD834wdX6P/u//aeb/uLl579EY1DNxzeIUquRxwAcm3Q43SbiVZPclDGIzA7VyIk958E1d00wt5Jn5KaL7dluRxjb2MSQxqkAcp2UEJkXSBZ+JQr5W2rRW7LaW/XI/XR37Ls0Tid3dPM6VzS0olIKqEsxnYupoXmdKhjmU5bsOeeaMzFiQFOpU8lDIYS+7wiMXWGaQeHR1bmUsQ1hs96m2Hapi6Fx44DUxHA6Hbr+jCms+15G6Ve7R9/92sNvf/jkyVPIeNafoTugIVJIDWBCoMCEKoTE6JGZnYLbeBhPt9LAuWoNMTZ9S2RdwpKnbrfJtbTrNraswmUc8+FFPj3T+Uby7LnWearTPk9jrfdSkbmXKW/OdtOopxGniqvm0ZvruyZQrX4YSs5lPpZEMcR4PAybbrPr+ovUtQgtah5PXddjYNcK6IEphcjEqQ8Y7IOvf0AttH0zm4Sg284+ePyQgdgwj2UcZqhuYvMwH+6OAD5LqTh84wdXUl4drn8htXjxn//ks/ffe7zbpiDaLD2pcH/M12VDIybixAQAau6GS4VnZF6Ea3CMyLzUf6gQAjEAAQdyduDl3QLrriXz5M7oXdciIhPPRQHYxGJMjNEhVBOQElDZtEFmwux4Z/bFYbwb9j/4tUvVF493ti311R/9cl2h1R4qzcXnGeZTHg/TcRgiIQ7l+OxGrqdyN61SAlVv2BN9+1tf263aqGG8g1XcQaX9/hjb2KzaoZ52l6vN2WZ1dnHMtV2vLs8e7FY9En/+5c2Xnx7a8PWzeDnlkut09/ILLsOaNSEGC123XbdXz3855DsZDiXn+p3vfvvdB5svfvULIs7mqW2mmt/91pOgiBXkdLx983oVzkC0acOqbYrAZ1++MtZ3PrzMqXz7u19frVlsPpSyeufJ6t2u2YRKpd30Lsff+MHX7+br/TQ4l6nk0GK/IqmnOt1oxS5s9/vbJgarOUoGmd9cv8rj7IgeAbtEKcwiwzyExBiilEpSE9FpzBhiWvep62LfGJOn1c3R37v42vgXn/zTf/QPs9Ux2npLP/zWw3l+kUTkxt59dDEMt//l7/79OX32d//H7//w8tX/7K+t/tP/43/y23/168sqmMu8jD3cXNViCA6mqoGCuZopOjDiPI6mQsRdkxjhcHeXS2WmFNmWg46ZgXBIq92lE6qr1pJiimkVsKHAChCbVR4ruYHMZdiPdzd5mrhrsd+tdhfoPs9DTEzkMYaca9P2onPbhhA9tMt2N7exzNNd3/Yk0MV23bbj/oTVfaqUs5zGVcO7vgkK77773uWDB9UEOYBjk1rEgI5SqoktExtVXWDVoA6mRB4iU0DmmDgKVODlVe6BQhURc4aAGBiJ2ByFIzqY1qxVXI2dAke3ykghYCBjmN3VxEVUnQwphJBl5BDMbNVtOHREbdu0AQIoOjAFNrCUYoy8VOiWejKXxEwQAoXALcWOI7LHYJ0ZAFJMO1vSqeZeFYoGQaiap2G9Wj95/P7VOx/UfHz58R8f3vzq1Wc/Q8gc/MXLz3/1yU//yT/73X/4X/znP/5Xv/fJRz8bDsftenN+ttv2qyaGyICmTNC2Eakxs6mOSDFQCBwNi9sk5ajmxGmuM2IgRuZAiKpFpMTAIUYHdPOS1Y1MDQGRorqKCQIGYnV1UzFZQtORmFyXCCFxNIQ8jVoKgZ+t09VF/+hi9e6jzaoNuc6rVTrs969fvbIiVlSrzdPUtg0g7w8nJKVow3x6fXdNvOQlwdSn+f5d4OzY4JiHs6vd2WazWW/PHj45e/iUmn46lusXdzLlabwDBGDCgC5Fq+moNCHNqdx6OfHh4Bi7frXOh/1pOKYuoUOw2gVgdDfcbLbGob08475fN5vx9c31F8/Pzrrb29vx5ji/qXVvLBiJ5VgaTOP1VGdom/XpVGKTjMkpoIUXX9weXg9eYD6V6Xqcbsfh+gSF2tCTU4xNSG1MLSBxIGYihCLZzJhiIiQCCqhqUoU5AgXmEEJwQ3MFh4BMHJYQSoyxzBIuHtLllWNIvGJYxdAFCDBnGQoqk6lZ0cSla7TrS2Vum82j8699/2u7i/7BRbd9kJrvPPUnD9XAi5MROjoxMC5TzaUDhx1YNalHCGREhLFJgEt5FqGhZulSCxq82vzmFUxzww1xmKtKsZabhhoUt2FeccuVWo1rWvXabqQ7m9LVCc9vxu4Xz9p/+9nFn39x9enL1f7EZKvL882Tp+ury/XqnLxlaMw1BmQzRFqu9VQkOOZpVMI50OAWOtpswraRi1AfBnkY9Z0u7qyemTWzNpVZAziRG2nFXOo+Bwk+O47ajLU95e1c+HbAO4Ej48ie3dBHP2qYQzS04owS0ANWVCBo22AmijVELCXPJYOb5mk63eU8iNRprtNc9sfTNGdzX7A4DWCv1kpttbJVLbPJ7CCjVYpRquBbrQJAl9eAL81P8DY3A/fpKVE5nQ6H29vh5uZwfXO4vT0e9sPxUOdJ6gzq6MyhjU1PoQVgNyhV5mnWqlYX3DW71DqP0+k4DuM8Z1NDoBBSarqu7WNITWr7vk9NAnLzOk/jPJ7qPIJaQwuemitQFmUmAkpOwRjU3BwBGJzcCAwMXMGrtFXOtDRaqkEGKKZVai3iiug416qKanwY5DTrROEapld6ugO58/rGj6eUTzGPUaR36Q0amOQ46uFUbhuCbf+IaXO2vQje2jAlgEQBXAIvYMcF8OhzKSqyblKMEYkDB0ICc3Dlt0oNACMsL2OipT3dACk4ODE64axezd1dVcxM1ADJwM1dALLD7H4yO5mOpkYeGBJRAGwR28jNbt1/5zuHp0+Pl+9UjugQKAKSeFlgl0uEj+4rttHMnSOmBpHAUbxWuAMrJDWAEZIRKudJJnTSWlVBxAkRiZxhtrzYXgiqwTzh6U05ZWS3iMWolGQSI80BKjpr7Q/X7wucKboZBkeoASmaB7MAUIieq2PsWqCu1g4QQYGpEhQGoSiIzDF4cuwEesM2hOAoMQQz1aVZ3E1VFw/lEiNahIClX+ytYEr3og28vU354lUTB0fkBQEKSAhMxIR0D45xZyTCQBABGBzsbbB2qUlzACTWBXrspPcwooXP5m+REYb3FeiEXzGGEdGXoOKSXbpn5cC9zoj3DxoEMAADdje1e0vHfZvb0v/H6O5WyDVnvT3OxWhpySuiDrAQbphjETucJheJLlzqx88+eb2/yXV+enXmtXiRrgkcuTC+qTIe9t9g/0ajHVchb1IMiAnhMkJr0kdYk7cILiAOjo4oXd/lSft2LYYxhRixQU4Km5giYWJMxF1MAN4GqnlaB9qQJ3CMSRGFIiCftyut98hDAc5CneiPOvhW5+r2yX7+4nYGDk0TzV3M4yrdTuWTV3e3x/z82StVu73d/+kf/KGVIQUSc0RkDsuzge6uCgDoyBSWKrqFJeV6z3piYsawjFUQYWmNVBXkQMQA92/ge1w2AAIvadPFyrOoh8sDZua0uI3AzYUIccEXIfqy1FU48P0TQwGX9oJ7IQnd1N0Qne/ZVEYU3Bcj0v0uvtCUCMOCSFat4MDMy4ZiqibFXf5rriJz5NiEpkNatilnImAuYKqlQgwhqWjkhISByclECgQ2UDRHB8c0i9c6UR2xznO+DmCRgUOPiOoVOQTswBFwA2ihw9h1c5mSF5+vy3R4+sFjS+3uw+999kc/blbdg/X2+OrF491DohDaVoFC6sf5eVvnfLrbnfU2HIchf//v/NW7nzwf6932nUfz+YN29zQ1XcmwW7XWcAhPt+/9hnz9r2wvV/yHv98++CFr0vGEgZ5//qvN5eXNq+vz86dF6fndm29uvytZUhvf3H15dbE+vXoWcVXmMXSzv/l4+viXby7i1+SW4oeIgJF9MasjU9s0V5dGGBkJJM+5GlSE80cPP/03/zRt+NE2/oP/x//pi49/9b0f/s7+ePfZp5/N+7vguF1vp/l4tb188eJLbrpa5rGcelqn0ERMfYNDGduuLTmb5nEueTo8vHokMrt7IB4PExqcDsPZxfrhg/OPv3yNhOCI4GqA96K4L38Wehsg2IJcIz6exuqx6RuTmcMGGasqIFJYQDUmpSB4LhkYci0QuKAqocHyKwfiSJGnUoEImVSUUCgyEHFKu02XC5rUyEjqKYSL7bm7SK2bZmNZzRSBA/PxVIeT+Awi0sTw8vl108UUkgdXKURK6KJWRWNqRT1Pc1Fdd2m1SjcvDrvd+fX+RrE6mRSVQ9bRN9uY58ooeRynodzd3XGHlcBtTHQG2EQ6U8RaJfT3k+Qvvnz14XvfGE8lrTa+3T5+7+L2s2c/uf189fCiVaVSVh1pKVXKze24as4aaqbTYZsgBSTXm+u7drPq1u31i2ebbRebkJpUx9Oqjz4rlWk+jVRh3YRf//DrH33+i/38DJK4IxDGtt82/Xg6tn0ILcZNQ9jELv7hnz6vM12/ODw43w4HkND/3j//5bTXvlvNZZilLptJQC4OkYJKdVdzu98GzJc3Pt8nU8FMAyK4LWV5xGhqROgqgb1vUyRAcTQnJBUw9b5hqNI0MUbiyOpRsvDiREYACKTqSgWykuVhtJi2Dy6e3R5y4YcP3n316TTdhVK6jz7+ciryre+92/XmjuMgkWEax8ePzpWb6zcTGuVSH1w8WDWizWo8Ho8o0CcbpNulL29uvPH1eh1Wm9e3t50GRsxVtM7tNmw3bQySx+F4mPpu41bleNrbs6q1Cd36fDepDK9v3AOIT2OWaZQycTBBBOHPP/nicHhzvJkun7zTJzjc3HKAl/sjeAwxXDzozeuvXry8fNjv747Hm3Fzvnv/ydXrVy+7tm2POA/7rufZc1Ubqd6OJWh5eHbWh9jszl7fvG77pHXuVw0h5lMOgaaXuWkCEQNDl/pxOty+PqJlqTZNJatTVF+al3I04t35bj6M9ZRJpG2CQJnzhEgUuar3Xfv8et/zjo85Z/NcaM3aBeniZ8fp43/x5+vN46sPHtjLfeQP9i9/PrtPA/6f/95/1dbh29+8OO77ZvfO/SXZzFTdzQEd3REChSo1CyMFd9VqTqFpVyZ1v3/Ttu1qu269FRlTezYVd4BaS9/HyOxS0dFRmVJsOFcRm0KisY7uvul3dWk1iezo5vrm9auzR4/a9Qap7dLmzfAqJQ7U5CoE7C67zU6rzZI3bVL0MhRVXa37ec5EYRjuHq0fbbf9NEyPHl8+f/5l2K3W267ULg/jxcPNPNg836Wud6lOgAtCqGmXLjGrWaQgOBI7GTYNFNW6HAXR0CPzQihQFzM0RCcgoJyHpmnHchvbNbydbLp50/RlnEPrc80htkXGhsM4z11aFUTRhbcqCBy4qSpopmopNloyqYjWUuYFeBBjU90UlIA4NIAJKc35GDCig6PnOgFTNRHLXWhVMgbmSGaVOYpmihwiV5F21VYRc4+xMbKnH3yrjIMkunnz2fMvP6suDE3g+PCdS1QqWpGhjJO5RmZFZArEpIYxNmOdyLuYWkcfZFzuQRyjawFyBXT2lMJYTsQE4IlawIYDz9NtjE4OVRQAAsesFVwIeQlBVhUAdHUFJA4IfsoDMrl5kRKoK0XWm16xypSRaL3ujqfBAY9DnQY53I3EUHMJXdy0azUYxrLqmi6lvm9EXU1Acd21kcNwGpuuCTEFu/dTvLgduEubthEppGoq1akYXR/n43gKMZLZ6nxN1JxKLbUcpYqKq0ajpRxX1BgDQDyNFVchraloBYBSZRxySI27mVZ3qGi3Zbo7jbvdOZBBF+JufRUSBNg9ORvKDABq2q87vQ7ATWgiEBADR6SWEHh2Oad0GK7Pd0+EI5uHEOc5KzrH1KXWoVQVMydiGSeoAAruSd3EqVZxdQCXIujUUFhCDRBZHFCNiZC4FotA4D67P/72t/aXvQ1V2bTkCtB0ZBDnWgOHtm9HzZRSPDsP/fYY9jWaHKZXz15N26jUpaebY4jdN742fvLxTkgBltppX/AfoTUnN4gQHMQXhqg5EEldgGYSXLvIieAwT1Jq2/XNKugwN11uulDFrEScTpQYA9SJzIEg1NMN66ynG5Lq+0ME6Jt2Ezh0acqn7LM9ndePHlUrZR3S9gpDA4C12jTm1e5BLW9cayCoZZqRHa2aG/mchYAje2riEqnbtdFEEyGLl8PU0HqSMuSZY29IIOZAY6luUFUjh1rN1MR0rmrmjtUAMKGGkJquOpuYAxcHdWRAZqxogxRmkjxUrAycANkqa5WSB/e5aMnqtYhZdQUAZAyB3VXEcxUOzmYxJnMUByB0YmKyen89QFgovAvWhHDJxQComxO7QTGdRWsumktaEQJorUVtmo4xNE3svQWMoWlapEDuROhugLSUe4XYIqB6rVrnUtR06UsJHAF5Gb03GNDMF9KKzrnMqrVKSSEwmQMgw6hlcGvARSEBkJuBArIBqBmCIbgB+f01m9l9XYsYHo1y4OTagDGauRYrFdERCcigGsqpTiVmRVd3ZBCvxg5VEElNZhJJLQdjUooUse5Po4do5nmez2IigVxGMDddkLAc22YuAopmpWNGIFOvLgCIHO7Z4aaKyMuBHpwIEXgJYpoKcVQRcERKqhWQkc3EFFDMqpq4T4qTeHE8iEvk1KbGPRQtUgNgAIxt8+4H73e7VaXNyI7HDdZrUCMgxPiVVAEEC8QAiRABm4SREMBMqoxGhhRUTMgAYzGEqCCz1rFZ7tjLkNqhWGHwUieOMaFXzy/0emjMkRNAsBoY3a2QTAhJ/OE0vadyKRiUKHC2TJyiGYqRsxPvocBqQ259xZUF8qCoFnBGBSA2XRltKsfQ7IlyCBFjFQ+hogs4LVU+CG6+4NEcnRzuvUS2mISX27HZ4kszMyRe7tl4bw8hwPsPeDuh9wX74PcJvoWKboS86ABLF7WC4T0pBv1ejXJEdjBzo/tKKlRzB1sgceaygJENAEyXnyoAIJCaLNBRAFIpAHgvCQE56PJ1IqABLs5QJAY3NUVw8IogZvLi5uZQqDJLFSLkEACdmQWgis5ToapkSlUZ8eXtDQZ+99H2eLxDg8BEitX91XEac3mf6OuNxjpRGwInMRCEYnrVciN23qVjlVo1NjG7BfGiMs/SGBexrJqJZq8mpYhnykhWyXfNSmd1wuxe2/Z1nq7y5C5DKYiWYg9Wu64ZTrVr2GVGgC7GljChrzm+sflFxRen7AiPd11gzFNuV10WG44yGhVK0zhcXp7vX93+ye//+Pu//aNudSHqhIDkzLSgzhc/jltFYiK0BbKHaCZMvACwiMhMFw71/eJxXXKGZuJE5rqoiua2dA0hLEbqewvbQqByM3NZkGtIBOCq6k5E92wrAwAGU2Voie+FZl/QWPcPo6sKEDjYotIuVScA6m6EvODNHRERlyfGF1mSCQxMF6PTvycVNW0XU9c2vWNkJnBgRAWKnBRGN1N3kYzuiCjlbh5fArVSY0BWd4RKDFl8LoOVI003iWd3SM3KIbuD+Bh9EzAAoqi4F9AmNo1ahEAcgcA46t3dl0M+ODfzqYqdxmFGTVY8cjSt8yTrzbur/un44otOIQqN0lB45Ecenj1Xunvv1/4Ggo5vXm7UbBVR6+nms6/91g+w272emqc//J326mmtp1aAxvzLH//Li9/5nTKfuE1ofPXtX+fVxf6TTza0xWP40Xe+8//+J7/36x/85amxjz4/Xfzy03aG4dmRhdAViUGNiFQcIKx3lyEGqZVU8jTMp7sXH3/0a4ibs7Prl7dfe/L4cHj22a+e/s3/wd99/eYILb+6PkgxMxtLmUWyytnZeSBE1ya0q66bhgHciYCZiIAZbm+P1cHVSi4QUQF26344jV2XjgMa2Iuba2Iwc7pXhBy/Uoi+Alq/VY8WOGFx7/o2tqFKZXAvBQFNwRwCARG56kL5B3cMWGWqtdVSlwhAzTUQe615OhIqMSMvDykwOzRhs93IzZQiFawUA0JIqyAayly7VXc8nmoZjsMprFN33geG6+H0wY/e/3m5jfsQu9Xd7XDOq65pmxgZlSA0q47ajkJLLjrn/uHlNE3ENk7709S/8/43TsOkSPP++PrV9ZP3PtBxLyYhETVNiE3TBZFTrXeOGZ1rfUPcclgf7+6nB0tq/Tvf+9rHX7ysYzlNX0Yq6/PWAGrxx4/eWzXr6c2L1NLZw3dOJ9V5+vLZX8SUHjy6NJ8ehYtTpXbd9LuLQGG1TXd3Yzn57sH5N7/+7r/4lz++On+KHJv19ievjtD3ILy6Wh/zwI4+VNEZg4pIKeH61Zw2TbkuXtlri5swDEMpqBpIPWAjItAySjQoaGL3Yw+iwEU0RBYT5OBoAsboDRMRmTktVcoGkai6tzGOUpb9rEGKhEzOAbUYB3w7vRAEWK27m+M+kkCCXAERAnhDAUzJkEI81aoBgrmMlcsU1vHZ7dxytH1m7WOTjqcc193z1/v59tWTBw+fPnnwq/3de995cvvR9fHF/unT9w77gxVdxfAXn/3FxTvvqcOLN8/PHlxud3Z4M6Sdb1ccACHKgyeb+ZCjkQNWmd559KGX/d31vpSKznk8asPf/Uvf+Mkff7rbbtFk3W8/e3GIHpsuvPPk4WeffWkmzKDOzXpVEd+cjqnpL7/xFMbZaz6MeXe+0YK1TEi82nbM8Uy63YOzm2eDFZlO8MsvXv/gu9/87OPnrrKfp9r4d3/72x/94o3X44Pzc6NoExQbAVCy7Hbvf3z7nEZBwyLzzTyYwzTk/YQXJa8ajG08nY6rNmar3CYMEDjk0zS4cAoQAkK0Sl696TuPp35HcE1SSYZsLkWh37H5KQ+n/pynO6dV/90f/fof/uz3XWM5lKz2u//6L77x5NFP/uTjhDbMeWjSK2r+l/+rv/zP/sl/1b6U56/eDpJNwBbHuJkiAgbCXEcITCFqhZiiqDBRSGm3uSxlON0dOSYwG2/33XYn7HmMbgTuJtZ0u2k6gruaE3GpwsSYUpknV0ldO08nRAjs01xLLjev6Yy6Veq3FxdznQBsGo/mNM1T06b12W6eh3XoAiEg1pyJiCMgkbs/6HZtq13XAM5TvTu77Eq1eR7ahpuzVuqYVivVCm4B2QFzzl3TArpK6ds0TfsQmFMUdXVNTRrKTBADg7CYSCCac+ZmhRCYkcHzrGpqLoRgUqwoBHNjhsBg1QajTKwcUpaKRsFCgIZDcpljIEh8mkvCxkVDR0UhSy0yblabMp/Q29j0CibkUhWZVNwAOYJ4DSCGAhAUrIsp18y4wAg5pm48Hskg6xSJqmdiDClWHc0kxU1RCamtWk3FOEEIq/OHJqVr15SCHEeV2q95Ok5NitT0B7EYKDWxiBKhaA4hFqmLfd6tUCQxJ2N0coMmJbPacgcYRSyEUObi7oHjkHNo1sxcNeP98dUAgUCB2JzVDN3VNXBDCOoLg0NCiObOzOQRwImBAMBqdbu9y/vTVEvNtXKKc5mrlIajkttcRzUOofV+E8I8T1i4Filz6bqQ2t6d1pseA8cS61u79f5QtleYVW+v79BfhA73Zd7f3WXL2GI95dC2qe1CiCctd4dBqtSawSEQoWFqmxAiOPr+6OcpdDr5WE5HdBBXohhDoyJQrYhCoPN3H9/W0+W6/dnPfv6jh0/LYFDBEsY+2FgIZZRhk0jzHATnfYmrBqaqRcRmqrJOsZTj+W7LCdSEqWmazkZHtFryeLCma8yBUiijhLaLfU8UVCohRsYUQ64WmCigAtf7G6mGwI5ISGYGoGQAgO7iMdaOS+PN+oxmcKUYOwd1KBwoEmnOggNTzyHOjvBw7W3aYPP5z3467rEcmt27Z/U8DVc9Pjivz18RRRN3tcCsqqDOCGYmIhwiEAAoxVhVsRiIeC1RKFBbPa0fvg/9Km0uu9UqMvB+sE9fh8kAopaqgikCzrnMpdZs80BMwTS2CRtG9LwfTqNAG5wBW64DXFw+uoWsMYpqng+r7W5/Ojbt7iRaQphRExIRiy81g0gVIsa5zBTJC1BkRz3lsQ9Q60Rem8bHeSjKxYK4amAkIHRtqeYK7nKay1zEbJaqFKqpk3LDwB5TMHRFpsAAAcRV1QADsVOYAdCDFeiZIgC7Wp6r1yrldprEOaszLkkHqGociQMjkzEKRoSIhg4RwFNMFd2kiFS610uXO4wTgDkYuJnSgmFzJ1ooRtjElDis+nW7WlHbZpE8FVUoedYiOZ9i06l0qVkBcaKGOBIYkZlYdQdkBRNCjtGNmQNBcKK3Hg6PYTF/iBgieAqM5gEdAUx0gRZVqAKY1as4G6tkJwMI7ujLaPPeMLJAYTSgdiaENUP72uGEvtdC5qhCCEVqg+TuRAqlEHtPGInHPJhjogX3uaBpqEv95IUDckzFPZko8kXszhIep1ssGCyEQCZKTk5kAAjEkatAiEQmBBGJmFjsK2qyIi3jPUd3RADzJU8HjuaIwImWSBKqoyELUvZaHU28ihaDYjCrz0iV3GMT275HaIfJGdyAOaTt2dnjd5qUSmBSxqYFikgF7kG8i2OS7juVDcARmLv1mmNAR0KsIgqC6OaYRZUcCKk/r9M9H4JCBBfVAgRVMklKwGSOiHc6XUM2CNExgjUkTFapFiuplkeCX9d8IcLeYogVgCmSOYuTAMXmxuYpxUgeTVuBhoIaK8fsmhk6wEY0anzK7diEKSDEmCpEHVHEbAoYF7OUudO90OOIBPed8u6A90gVAHBYjs0ItNSig+tSCOOgb1EfjgCIqFoXogMhidYF3O732s19xOjeCELs97CIRbRCQHJVhIUwZACKSzoTXKwCoTsuSYHlP3WwJeyG4HivK8liZsGvAF3mRMFMFqY1OCxBNF30LzCrig3eDtN+kmKxVAF1dFLTVd8rBasyTrXMRfNU5+HqYvtnH/2qup33a0aikBK5VpsEXh2OZcoPCb7R4YqVOKaYInsinxSqo7sp+KR+Kn5C9CaOpQDBqksuQmhcldwNsRqqoJpV10yQCe60VOCcK3Y8IgMAozeqzJ6aMA8VrUKIU1V1a9dJhhISOQALbKH+oIUm+Gcn++Rufj3K+2fdisM8zDFwv91+8ub6WOxru776cb1u9c1efvzHv/abv7m5eFDUeKmMA3QzJvYF6LJECOEtIpoCES+iISMZ6JJnBFt+X6a65NLI3JnCvX0M0U3B0BGI30YL73VHXLz5xGmxuDHR/cIkImdQtSoOHojzNB7r9UX/mJDc3F1p8d3aQjsiAjIFRCRkRHcHU0Wme8IVOhGB2tvO7HscNzPb2zDyV6yiGDgCuIMgJAJzVQImTt1qq5JVDARLmR2h+mFScyRzCExiBqDMS0kcLXzQJq56ToRCTOTO2Jub67gsjxDIiETFqUVnqXR29WAeH5R824Vwdr5dr1oRe3h2xaGNyYc8rGPj7LGN0/FlbGJqN5ji9XZLP/gPzp79NJ1drnFbw7mFKGSx2Yz7g+Tj2eP393ev3zlrcDy+2b84q9de4+F03I/H1fZiurmbfvn506/9JjXN6skP1483H//hn128ezUOefjyxeZ8eyg3g/v2yXmgAWJYPXrX15fm7FWbVfJqAACWgUBI1eTqcf+rf/Pjs6unsT3MQzZm8FoH/5//r//3Zw+evrq5O1vxy1993HZpn1EmQ4B13zXRmxjuTs/W/Sbb7EDEYZ4zclQyJDtNw83+9YNHTzbNDhxC5OEo4zQ2TRzHcb1uEeB0Gqt4YHIDM3/rIPavmtAWMRvfQvcBIDJ/+MGZaYkpEZMBE1qes6mKI4kIGBNNpVQtomJu0zBpVSNtu9VpyskAkLNWI69S2hDMEQBMlJAbDpEDEMDlRnI5Hk4P+wtCHFSbmGJwSvzgwfnx7i7lKZV9ByIjoGNMxGDEvD7fcmpqFTN3UETSUlwqoTfrLpuIKQc+P9/O+/n99z78xU//SFy1Izxrm4uzm+s3u1WPgY26tD6bfF5imI6zKdTyoolrsw2n7v5HpPryk49Or1qHRs0QpJTSbjardadV33z24kBgeWpW8cXrV1Wgj+1lv2LT5y9eNS0kwFVojne3Fxfn8yR3++PV44c3z14a4IsXr374ze9c30zFtAU88d1733j0/JcvpsO03vTjkKXqbGJiaDAdhdCqFCkGROs+YOR14juZx2ny7A+umkKWQoxOGRWMwZ0Ii1YwQ6BIpIYqAoYYERzUnAhoYe25OZiaOIAoMGAgbFN0LTWbsSK5mYNWjskdMFBEn+dTw6RZIQAjxhQWS6sTqjqC1VK35+ujzTZjnS2mJJEefPP96z/5aDy99paadTx/9ywhOuucT5OUL16X0uyw2f7GX/v+z//tn7np2dn6MO8fXD1GRxJ50nXB8dmvPm0t9j3vOL65vl5tL5wQpkkcOYUPHj2+3G5/8csvq1m76lhtGvP5g8vRrkObAqXjWJrkD6+uvCCyTXlqu4QcLq6uvvjy1d1pSOv+8unVNz9851/+7o/1MPbb7e78vJSaq6x2Ky3GHJjoansx3QwwlkfbVY4pNLY/3rzz9PHx9eu+b29O+7uPbpuDpTbYIIEiJrw73X79a999dfvq8xdfzlIvdmdffP6CYUIvbd81FIdSH+0urq8/7gKvgnfB99Nch4zgiBaZUIGaJIDzKNMkm/NtLSOzv7k5TdVCG60Ur2K5yklFbwO1itR1zel4/P1/+XvRLBdjjNNt5pSGqcKbHFHe34QHHz741bNP5NMXP3z0tb////3ll1/cs4q0VmKw+3g7IoK4p3bj7rVmt2W6SGriHDjytj3b7/cAWLJP+dCsd2bAAeZ5JiIhajbnaT7U6YBEbACmpjV1sWYdyjHapol9mWqZVItToul0MHgVr87b2O02u9uba0PgDjYXu2Bcx3HdxU3TH083m93WtJ1LadsopZrUlIKqqemDJ+eihZmHQxn2Q4pNu0pVVDyFEDhFyZVDCMS+dP855lIxJHMLgecyJwxmQJSM1VDcMTZNKYWa1gkwkJu6O9+/btHdQmBmFyf0yAHn8ZYac4RShGJEka7djbdHM3YnCMlBwS1QUhG3bDojGAdGYzMhhAdXl58++wQQIqEgBgwxRQBEInIUKxxDjFQyAFgikGJmmcFLHfsmGaBIMPYYU63FlLV4zcroAKBLv3JgAAiUTB0NY0qxCeIotbpLWvXrpp2K9etNrbPaEoCXTdepoBt0aQtaEClxXECgkVIuM4eIkXPJEbHWTJE5tiLVXFNoVERFwaGalpqZcK4FgR1A1KpJz5ExELJYJg4LrcaWI5r50ipM7q/f3N6cjqaOIempSi3MBKWYVASYhqzqfds0XeOAdZ6G4AR4OpyYohkfjmXtkQgispl5qfKWW2eSnn8+9DX4Q/K7m1aCgU/7N2XOyLjapLP+DIEqILi3TSxuK0huSO6oSCEA4Hrdb7p1Fnpzd/Q8g9xMw7xZbQ0gNs0C2mU3BGiljvtD2j3a7rrTcbAa84Tri3Y+DXk4ah7bNRexuO2AU0osFNBxlWIIQaRsdn2KsahYLcxBTWvNsWFVj20zDsNsBTGkJgV3r7VMk1tVzMCg4A4KCOK6dPFWNUJMIRKRGhgxIgE4MaO6AqSLx2G9kaJdim65zoWZVEzyyNwgakwxWEOOOFcruU3hOBYFqBbKnE+nm+Ofv1p/P03rLZ09iJ8/bwMRurlaXfCfCoTgwi1JKWogIGaElDR2zdnFZtW/c3m5Wm2wOgeElk217F/WMW/MQgaZKTRNiH2BasMIRROAe2p3qynPwDhUORwn74hWK3h8IYnSatUkkM362qq0CdoOYiK12LR4GjRnPZmUWVEVFK2ohZJVxec5GzOgF0OpArnGFvpVyF5jE6voIdd9xsNshnH2ud1dxJRElSKDKTmBLPkWQ1AGdDAijBEpIoGEEFOM7FSNwYGdiMXdTaWaYjCEMJj0zC2BquU5i1YVVVcgSl30al4CCXpEjAREhujqgSAa2pS5aVx0GYpb0a+6n8DdljiO+nJNBSKg5ZCnIMIcI3Ibm75p2q7DrqdaCXieNddcZPJqUXOtQwin2HYlpJR6QjdFU3dHii0xBWIIgR04hKVr3MARGR0i4z3AtZYoVeqsZAQBzJCommnOGBbjNEZOkodIrlrcFdCAAzMDgLuLKyATMpg2DmTzGZUJw4vAIy/kWCXwkKjkggFiIgIQyVYmYw9oQx0QQ0ZoY1PLlJC5upLmUlDalNpE1DWY7t7UYVqNXeQUwAJhcQf1JZgkeSKPogoO0zTXYH3ql/ohB1SRZU5My7j3Pq8EAIAUCFqGCqpgwoTZqkkpiNWWUlsDE1dzh8UnCOixCWGzGpGZqBEFQhGNzWr3+P1udyEGKaQUG4HgFtALEjqQuiRmuQ8pLL3gvNArnQmqm7rIKDYExJZTLuQIAjLp0LRJ1Z3AVIgs64kgosKCVCG1kabn9XVutCOONTPkgDOaJK+PvK7rtCt47jEgG/FkmYiDOZqAKGN3N885hRB6k9IBBw+u7m5OVrUiEzs0huRQ5/x+jCf0Gpt11WgBvBoAU4D7J225GJk7Ogg6LG3yi6n6fhUgqCoujeduRASO5oJIhOxgAE5A6npfkHFvJdKl+8wNmUhc4D7Qd58IUjdYTJRvP7NbBTS8/4+Rid1AfaFWOwK/raxhhQpufm88scV55G+bixDRoC7fyUJBEisA92xER1pkWBUhVCDa7w/PX5+yhuWbRg7m2DZdBS6At6dRxdHMVHe77Zevr8exnO22m81KSlm3DVrd13y9H6dSHzO908KObdVxLehAUmdsWB05BHIrCrPQIDpiTQ0J4iTarVLRcn7WEkPbkqs5M6RILUEIIVI5zVvSxAAJZvLAGCJtWnrgQZhWyblAHVWVnJCczJQYEpCYnW26mvPjqgLVevzVkW6HamIfXm6+9c7lrm0+vj48vy13d5qcnsRIjcSU9renP/lX//rXfvsvnT14UlyrGTMAGjKjoxsDkllFBHRQW1xjDgBEZAsaiGxJjwHCQi4HN0ACk/uwoLmj8/35080MwdUcFzulG7gukvQSTzN7+7iaOQAxA7tXQ6SUwmV8FJgcwdwQEdDVBIgQCZwAfEFXmymoITkFhqWii5YcpgD4kjGH+1q9BXH8X3cVcQyxSUtOk5YEghRzB3TmSEBWT0wxcFVXlVkmBQ6AUKobkDssYhtHRKQGIVchDAGq1TFgoJAUAd1ZvG06pIF0zvM+UONCtQwynICdWJMfd0Q8zw/OH+U3t5uuLcc3yfM0DvnmLmyeDp9+2unYXDW3x/zO3/jb7eU6hs273/v1v/hnf1gnZYbVWdNDajwwrXQq17/45dO/IRqgffDhZvV4/OlPmio172sxvun5YuVTFoO4enjz+tXdm7uLr13Y+2fvf+9Hzz76o+N0e/XuU6mNfPEl3BTtCnFQXzYXclBCBud+e4XMEPzZp7949Ud/9uRv/Y+sv/JAfqxdWustXP7lX0tn/etf/uK77394eqNNopjANKww9n1LlBhTQ8EBI7OjO5KoNYFjXLVdF2zWXDWPJrUJzIHjkoBlGKe8XnfNqvvis3xxsT18eYP37H29lwu/SqC9dRWhw1KyC15PQz472wBGraYEMRKag3vNiqhWDcDLmANxACYjF18QBq5EgE2MtU4xYERKsUE1ZNI5c4yIeHa2VUfs482buwLcXZ6jc1WRaiZoGadTvTw7D5y2q807D8/H67Ec8rjXVdOvdmfdet2uNwGjVatThtCiehN7EujXXQOwXq2efXFgJxnn0PXTOO/v9t/74a9noabdPtw+/MWbPzjvOyiFR29tO9RiouIaGV0EwKfpBCQ23i2roFmtpagiiRSKMXBqYyh5rlKc8OzyTKtNcy0FE3Ns07rvXEulyAaPLx9+9LOfbc6uIgTJOu7venOYRHK5PH/67Fd/nGN+8+Lu0eNHf+d3/so/+Mk/fvLgned/8Wa7blWLztU5YGDiTMG6Te8CuQgynV+eWRmvp6kNjaP1u4QomHDOGiNRqhSchZ3cwBChmiUKRCGxGWKx2lAwk0AEAMyoBo7kRALLWwwA0IzEbJ0SAAG5Wm1ik6Jj4LlKzy24tLG90yFQMMJIZop9txKdjWjJx3rsbsrssWx3582MRVTIsdWKQpEcA2ySBP/ZT37+3/ydH47HoWvXF+9uTzK+vHv50Wc/e++7D5999BlxcfOmTRXg7nDsYtjfHR3ASfs2FdHd7qIW9oDElFgvz1Y+HP/sl1+YYU2Qsaxjsqb8D//mf+Mf//M/2q3OJIOEvo3xvXee/vQnH/VAVv1i185zJa8R7azdVakb8z/9F7+vx7xUSg/DFEErqLRRVGP16hWq90jUyHe+8/D1cX7/gyd//tNPbqBykBHowcNzN5ikREzH6/HhxYPR6ve+94Pnt8/Eh/Wmw9mL5dgxeLhc7V7vD451m/q7189T5Kn6MJUqcTyW4TAWNSgipn3Txiat+5SPk6GsdnTzauw3W3JmRRbMOZMZIYEaqiJmqURRUQoVdWV05GCi0KxS29EwH9fr7TzL9bM9z+3//f/6j//y935nuGu//e33fvrnPwMAqeKuZsaMS8bW1Tnc1ze4W52GEBMiS1FzrUJt33Hg1aZ78eVpHm7W63NLzVhzE1rJRwJvmvV0eJPS2kmbLhBYIOq71f5wqjq167N2t/IBzXNsUIY83T6fWkjnV/3uQtTu7u66mMZcCKgOPs1S6W4qVvMYU6yuoOF4O9Wc+76tZqVMm1VzOJ7aps1ZHejlq8MsuWlbmfTBg29hXKCSSIhEEEMQ8RhDlVqrmAAjmykZMbq5IWCMrbsAhxDBdAI3DkxA81wJ0B2lVike2U1NPLcBa65NZK2qXBhDxApuHIN4QOIYoqmaitUSqXFqUYUYRaFdnYuGAHA8vnIqjCy5uBEGRgxLeTOGqHWm1IoYEeeSIaAKGFGimEth8sTgzoTkkNS0CgDEEIkoAlRUR6RcRwoNYjQpIbRuwsDqEjC5uqkDeUQuCAaLId1Fq5ot6jMjVs2iU7zvekYwQ2UgnEtuYiJiF3IXBzXXEFgN1AycEMjdmBaKpy4HaWZiarTWpdzFTBAiEAdEwoiuojUyqaoUyVlNUdxBRVWQgCPPuS5Y1qwaU7x8eKaIq9QMp6HpOw5NGcuq647HYxn0dJKHF+fzdKAAtLC6AQDg7vWwavS5YNc2q94RyjTOqUlt0dXm0UNhCCtFOBSR2NjNNTjGjh0W1z82qY8ctpsd43pmmqD5xjeefPrTa8ZU5n3frnMewZSbgGHhxzKMliC0bXPxaHN6Pe/vpjrLfKxN6MVUAyHRasMyz9urR3PWECLJjAweQyDEgKwcI1vVwOCoJg4uABAixxRLqVDV5iy5lGHSaYqkYgBiOldTRCYGChTqkvlSMzR0RAr3vhQ1olCypu3OAElBT5mrgaILAJKLI1m1E9PKgSK3DAJi0MWpnuhy894Pvvn5L38px1rfHIdfPd/8Wrv55gf2xef1dItlDmbMEV2cXMmyOTDD+ZavHvG6j13XtquE5ATkplXGaQ4oUOd6KBGAivipeuwdwMHGYZ/HUUulUiMCxCDVB60FLMSuXJyFp+f2eJtXbeEUUtRi+fpl2LWYEqnbLMSRLQe05Z4WmraYMYVhOhFkiKgWxnl04FoLoxEgSCmmIJANZzcmnYVuPe3VjUCyRHCcJzMLMYrMhC5eueEAjRUDmdGXriEMhGi1p2R67/BV8+AciZhpuUsgogE4O0SmpoVa0UCdFINGD4EYrO+ZzMdDdo5hu4FVtFrZLJiAmKrF1KhkD3GqExmxgel9AA0WyqUveAsEI3UEWyoGEQCIqWnTYlyPIWFqIbbICQndoJS5lqmUorUGylLmEFibzmHxUXDTrRokdEYmRyAAdkIgYjZA5IBITI6IqOIcWoCaydStOqWIDiLCHBiMHRHUdWZSNEAzMILFz4FE6OgQwJbGD1RA8wZ160cPccaIMXrDJYIKILcW49EOHDVACVAbRfUKjAE7BwAt1SRgbD2UoxB6rwliICaV44axt+rHELEndLUaAqOqlIrM6BRCUkcHC4FCDEBsAC5mhAy09EcY1KXgmpABHXxxMSiR0v0R3kTMFgnDXUXFwFRV3IEMzACcw+wAsc0YsGtrkdVml2qpVZvt1fbBuxi7yImYqzvGnjiSorviEggFcERHICQHMPfKIW62GS0SoViF2UhRcYWETmDO7lO5WXU1DxPAKlITKFRXFgO3oseGV5jCi9N1JiB3UGXXJjQqCiB9HdZVLiSG4kakAdQ0kZNpUCUvQnxjMqcmxURm7NqGCMBGQQzBDF0DE7oLGMAcHNZzuQhxrnGlibUEjogNIhHS0jEH4ItxCh3AdBmuLzzhf7cI7pnXS4Ts/i/LJvlVcAO/+rB7YwgiuqrY2wr0xYi0UCAAkZDcdCkZ0iV0hmAGRIsFCdzMbPl3VF90QEP/yux0z6J1RwdaDClq951nCEjIdn9ZBVoIi27VFBwIGR3RBYBM/fmLwzyFcu+8i4buqCmm2WB/t1fRJsZhOK36zlRf3ezbtlv3fZMCo4+lvDmdjrWWUtcEm4CGXgIoIsdoJgkgGAFYy5gCZCHm1AVx94gwoEdmpyC1IoVqkB0RvEE/iAWkgIgCOw7baA1BcQoE6h4DH6asGrSoIUvWVYp9G3xWKA5GpUqIbRY5CyweTPxBsEfnTcvll3d1qvCz6+H5ae7BJ9WubTcpffrqZqr5A9uaxcuL3X7Mf/IH/+a3fvs3dg8f5wpIyBhNDQAIGREdGAEJ0QzdQVUckAMS0YJgIiTHe0FwCfuoCgJ+pVG+zRCaG7gpE97LN0zuCwWbzJaL/HJcBTN3NaAFte0hEt7TYBqzuoRGiRiJlv4zRFqcbhTZVN3vGd3g4GZv+Vv21b/AffhxySa95XV9JRUhkdp9TrOU7O6KhDEhtCDVzENahehMpCBDnqqqOYCqmajDW38W2yBAFJkjYxLro7FK2xDJLJJBakMtORBO/flVJFRRq+aG0zG7htifU7Mb39zsNu14+5pUbu4+3XVeX36+Pnt3/8nn733/aduCHue26xlS3b7frQDnLz763Zf2pp4K77979vrzL771rR9gBQsNNLyiyNOxjNPlBz86Xh/ys0/KfLx+bg8+/Prro509fRBZRPP7v/nOdD2lbzy9+ut/rV5+fdZNjY/KzXV9fXznatdvN192NByeBzY1ocBIgGGJSiunWJEK9F/89Ofy7Pmf/KN/ePHD37g73ASKX/utvzLf7VWrlenFZz/9Sz/6jcPd3eE0gXHfpvXD8zzN43DoUqOq0zBwiCmmPIxdas0NQa0WqSVSevHsWnKMkU/DxJHbJrhrNVtRBMMAiDESocrC6PzKPPT2L/dUwkVwBDRomNG4Te2bZ18+en/rgNOUSy61VOQI4LPULqUqxsTzLKaeS207dodaM0KdpsFrFRVRazmqWmDGGJ3AANO674oWt916g6t6Oo7Dfgohdv0KkJvVOq63t4dZKKq2L788tSGuzzc/3u83XZfLtDpf5bnGSG3fcKTZvO9WcbUNXb+92Pzqk19cXV26GaVmmKfGrOa579e73dnLNzcx+KfPP6ausRSHLP1qdUlPTm+K59M8jwIg1QBLiAheTe9/QpdX2+2Ds9bws1d3+6FSszpv8XD9PAQsmmse7l6ewLGjNZJ//cOL65fHLz9/xilenHefv3jRbS++fH378OFDA37/a++/fH692z5ESB9/9ClpCgy786txr//o9/7V3sq/ePnT6G1mn4ZaJg1t2vTr4fUUIrSJhmoOLFaKDF3U69PLdx58+wRwe8jbVRdjPx/rxIddaADRnM3qIoE1kVRNVGuVpRON3IgQ7S3m3AwYA0ertkptjDDkQuSMKFoTUZNCLcbISMTMCCqiibCqpRiQoInNNM8KJpoZzcQiBQqhEN4xUWzYV0gY3Obr6+tn10Vssz57cxg7YznY1dmTL16Xdbv74iefD/mTqwfrNfHhbmrf307HIfXBKIxWoaF+0waB4fWbzdnu60+//ezFn3UhGXi2Yk5NiA+unhDHT559GtteS5nG/PB8V8q0bTc//lc/0RqMm91lV27vyqgf//KZedt1u3feXf3Rv/39bXt5fHUzTBWomCmWcDwIxogpbC8fHK/v2o67TTuOU9+35DYVuXrw8MtPPjpO+cXdKVd98eXrh5cXx2l6/OThx8+PLw5DCuxdLNEfP30oswiUX10/j5FiWM0Op+GEzjLPzBoojuUUYlyvVjWPqYshhXZoD7fHVd9uFI8yxVXfblempZxO+7vpYn2eOL25PaxWXR4yRXPRLKOrOSqEiJgAzSk0TePgahndmvWqzjMGatpuX8vFWffke48/+cVt6MM8iAX6X/xv/pM///EvOe6Z7/0U6maEZqpitJR8uTq4O3IIih6IkEDViBecBAJBLoUqnl2cI9k43mFom7Yr0xhCKFX79cXp+FpqRXQn8ADFqhGuNptTnm/n2y41fdfPuZgak4vMtzdvisenDy+3O5OaXaFvwmrTTJGO+0Mu1RGOx8HNjCoYaFEH3w+zgU/TeBMQEVSHWjE1aSynaqVrZB6LVGLAmKL6MpAjdjdTp4TMRFBUOQYCq1liCIK45MndSGRqYmtOAORG4kChQVBXAoQQuEoG7sldrToqeoxhJRrWq26cbsgbDmRZBMDZpVYOIcRGRRU0EEfmuToZigqtVqdcY0gphFzVGR20ikRMDhBSb+DI3TTdrOJOHRvuBBFc2qYRKTGk+4ACYKnC3IjVyGgOQKBKrkoMxAxgotVB3JG4MVHAEFLIU1XHea7qskqrk/hchcCIoy1jM1CRgQgbSksm0QHmOhJSDKGqsqOjGRTVxRWPKqqqbk5kJvdFMMyhlNkB29CKzQgAZGBKFB2iuatUcEMHczWrTA0FZOfYJNbMgLnUWjISmHoRKTk3TWwDNTF0sd3vD/vT3PStipuolFoicxPldEyUrvfXOs+rbSsAsbmvCW9TIIaxyKR2miePrcWeEDpsqI4xRQixAK8DQEg9RbisQ855nouUfrNq2xS5BWgMWqVQS7wd3kCcynToEwGimqhYEFfzWqu73Zz2Z8NlUZxlBq7bizb2bTqF689vHh7fbdvu+subi023Px7Ou/fzfCcAIrVbNfNQmek0DpETodRaALjpulJzrRVcOSZEJtBxmKxUAqDAdS5ePFdRZuMA5kAEAmoFFSIHQERfICmAiGqOy52WQ7i6OpRKgVIkm2rwBIYIiYKFsOB3ShYN1obsdSwOqm65R+vw8QdPyl98SbXSbHrMx27DDx/HelyBoyKuthqQ21V38Wi9WmnqPTJQ1DpaLfV01CoLmBPFiYgCI7BowpCQFGDY3x58PIG5B7BaAEM6Wzn4TFjXq/lsU8823XqH252wZsoWcZrGUCFWtf3zi7MPqsxuoKDQJKyac25SdxgOXepKKeAgVRPD3d2dx+gKQGiEqkUNXcSKAfrtZOAYIk/V99JaF5ssUOaA7AqsGkJYsBSIZIGJKSTsQzCBPBczdTUttTaFmphLjrELnNSsSg20CCtkophCTE0lqAGCuJVM6EAQU9e0IYIHFCXoNmtWwhSMMSqzagSopRCzahEBV2GQFKNLfauXAsAStgAzv587LtELA1FvIiEQI6eQEIL7gvXFGJF6ZI6l5FqyirgpISKzqMkw3t+NOISuA6Kl9MANlhpEXholOTAGJLzHwQYkJ7KGTEKsRry0uLfokHXMgwA6gSOou4u5gWpFRnMCq4SBgNwN3RQdncCICDogsHoxFXfMoQnJlfw4F4hskYzNVFdNiLOoMFPLRDUP4DhV49R1TdPKGJUe777285tPpfMOZ7u95bELYesq6oJgVRTRKZA5mEFo0qlkTgFcli4AQlJURvLl0rn0V7kBBgdzUwJkXMxRRaW4GxKiACFFwlyyiyAs2ZOlqgnAXdw4RIhBU3Tmbt26QrvdAnM8f8rbc6dAHNyMHZAYkZm4OgAQuLoBMC7SCTE5oLcNpri0I842ZRwwxASNlciuBbOanKAk+eK8eerVBGRJ1DGT1tokBsrP9fXrcCdMDNXMGDXUuJKwlvpAYyuWhN3RkKtUQmRzUgUphe3I9AZlm/pgilWWHIxjUDAjqLInLMGIIS3l9IloZXY5zyd3zBAjmSrHSIQhJgLikFzFFjQ0uLndG7jg378mOxEtyS9EMFuktIUMvcCYXHwpLCc3uy+ddmPmf08PADddHmZ4axNbzHsGCzuc1OqidzogIi0lf/f3NliatZY1uTiP3G35SDa97/dbvhwzM11QM6ZamJLdA3AWnQvUqpkERhH7xecvp8pFDYnAYZpqu0kpJXWfxjzdTW3TlNOpj0wIf/Hp54C026zbEEjxNOSpTnclI9HO7WuRtqTHqmctTbWuQ2wDs8qmaT6fh5ismhJTVnWHdeBapNUF8MxTFhOaRGdtNVcvFsHnPEdEFKgqxAxqtdZAac6CAZTMIScKCNGQgb1WYVNmINAYIyEEBJVKzI68Ytqg/uaWNoSfDPhskr37gD4XWRlYrU0I10NdHysAIear89U45H/7+z/+/m99/+Kdry9nMCRnCqLGS1kd3isghORAsCjqbsRspktsF2lJcMO9+IJfSYrgBgoLvhoAEYlNKwAsRXsOjmBM7EALXvre3UYIgLaQzBHRadnv3JeZrpv50tLGIYE7EpirS0EiJLxHkdA9+0xNFiGLOahUJEIAkZkowb9z1r2VimKM4EiBTJUC11rVljZBQxe1QoBgzkghrinutDkXRC2zlCJSRHOVeVlbVUSlTmqBy8AekQBLZA+Bm4BFRGBcJSt1nqfbJiSD3PbpqEceR9I2bp+umt22AR2Oqd9l0A8//NrNp5+sdl/ftquqVou0bZtPYyQdD186fHvev3l592p6tT9dful/mB89fjQNEihePnnQnPfT6ym/fDnk6psno+WXLz8q+e71p8+//x/+9etnq/7BAz0eTsO4e3B5/frNhz/6bz39jb/67PW4we63/sbvfPSPP3dscLA/+7Off/+/+7f6pw+4P8ciVgxiACJQsjnrOMQGX//8y4//0T/7ra9/77N/+aff/Su/QzKP4g++/f7f/7/8vf/J3/47d4fXx+GUx3meKlNsY+eCqWnUi9xWZgImNlLTcd67i1EE8KYJFCNHH7PsD7Lut9vt9vR6atp4OA6piQZ4OuUkQBTGoWzWq/1xMtGvtid4u5l9pRo5ABKqIzR9v70ah9v99ecP3//BfJpUSlVRB6jimslhHGarAokcycxBJVdHQxFYr+M8zm0T1cGqoyGqMaEjMkHOFRQCc9d1gxkjM1A5KGGIMdU6LxPcSVUr3F3P6Btze/HquuuSSkYJXUjdqmvbHsEZHR3APA9jYD8cDiGkNu1Ox/n8vC8+K/Dp7rhK/XTIr69fPn33fUdst2tDV50pmQ1H4EKAZKAqImPVfYSE6FbvawVO+5vxcO0K3Ke/9hvf//zzT/vNRdNenu8enu7ubk77H/3Wt8fjBLx6+cVP85u6odW3vvkhBC5Qb455vX709OJdMn/24rOb2aa5xBh/8Dt/6cf/+l95kUOZOEtwfvbmddxSH1uRMk4zkMeezXQajoQgquP+wA21q7jZ9jXvA/E75xeN1pnzo8vuuLdxL31KM9J207yAqSOqDhyCueLCFXAwBHdr79MlFDhUtWUbQUAxI3ByMS1nXatqmzaK+LK3ciAzJdCWm+C5CRHROUBgAjBVAUAhUpeAHgNKNim1AsS+qSCfvXl10Zz1bbtjfNzDp+ijlqYN3/r+N37xiz/fdd3Lz59P/eM+nR+P12Wkecp1po9/9mXXP3COtG4PN3dJgpws78c+bns6v777vAldG1bACSwzNqvV6tXNzeH2tqE4D1lFzs7P8liC83/8t/6n/59/8o/b1er29bWut7HdlOPQBQeZ97fT6LBen5OnAPDovas8ls06Pr9+psH7VfPB+08+/uw2ACC3ZpG9oHIKFQIOw1EAFMPNYbi8PDeG65s7AtCpoAIBuzjGNOc8TrriQOtwezo8uniyr/u5lP6sPd2OXRu6Po45n6+2Vxftl5/f9nEVJpqsTmMp2QxpfzgFoprH/a2fbSO6rter2HfT3djzFpLn+XC+6XhCFIQQDVAMHByJxM0Z2J1j1zQtB621AIYyKXThtb6oaeLHm74/Hw/Zzf6z/9fv0pHaVffgycWyCqQUyRMzYQVCXIBVJoawnE/xnnSqsvhj79MKbtM0x0BViqt16+TqViyFuD/dXFw9uHrwwScf/cm67QMGYSs5BwyRed33t/s9Fl+361XbT9MRxFJgq3L36nXbthfb86uH8YvPP+q7zopVm0JiMQkNBgzD6dC0YR4His4xyiSuip0XNYoUOZSTCNZqNaRoVJUztmAmMQYz58SqWquE2Ey5BAq1WNbS9R0zZytMwVwIwEEcITAHZmUkxBjaOU/MIU9D13WlDE61lNKEHlzcKjG7CkGsaNUVCNqmO04n5uBMGBqgAsgx0FiqU0AOKgpVYttx0/C2M4n7L4aeEwEAeqBYtQCHgAROhMZWAgdEBDAEKGWOkaoUYibiMY8cQ4gNlMLAyJQQi2aFkSAomgMyN2huVFIIJg5q6lprhugUAnpFd6qV9IQ+E7IppkiRAE0d1dBMlZgdUN0BlImNqaKCQ64ZMCEwWQCrwCiSAYAZzZQ41Kpq5lABXNSyF9XKHNQKEDuoIy7WfXFHcnJnCmpqKqIeEutxVgFT7VKcRAVc1CCEYa6RIPX97fEkqmqk4iRW67TbtDmPTddttitViBw9sJhNw/zvvTMVnK4ePAirK6E0VQZ3zXMidpCb/TH7JMjrzVnDDW8uHAHUmzw6WGQ2s8ChX50xro5aKhCk9ny3TpTalp0JiSlEdqxmMfGI9cG7DyjFVb8tp6ylTkW6qUFo8psKE7ft9vX0avPw7OY4xyY6eSlVs00303AzLOAkUEeKBIBKOplJicRVwaoDUwiponCk4XZfp9I0DUBJfUsxUTKppVhVNAYEAkFX0zZ06guvjDECAcpclHzz8PHUJFJDN5epnoY2nFWtTljRmCmXOYZG9oe6v7F5EOe2S6e9tobvvPONEDcvvnwR56mPMVxs4w9+EM/XG9eGO9ntJlKqnoc5lCxv3uAw43GIrl5zIkAMRSrHwO6S5xCAoXIVBABVnzJMWWcpAtx3bgLYHqvoupvb3t99LO9f4kV7ENM8kZgeTymFLTKYBoGKK9QNaeuMCSlQj6iTjtVLrUfKfbWltBDnLLV6WS4DKAYUQMc5ew0qZgABw1SkCerABszIANa1LYEjOXoldVDXUp3JYyhA3DBShSztkrcEaULrItgyUS3KxVQEBMnRHWqMHLsEqYkUo5u5eEBMqUzZEIli4BhciCmXqZo1XQsBZwN0lKkqAjDmmhuIITWiDuaTzJEQ3hoXTA0d3J0WrAmAOixxBlw66hBB1VSkVEQHRErRl2YcoJjaUutSzrQkekyrm0otbtp2XQgJOdACfiUAW2qATCVHUOTAru7RkX3xRhBT7GJPqkYmqBnJszE7d0DRLFIgV5cZjRHYHNA8IKAWx4CoBGigjgHY1MwNWeECEbNez3dh1QjmXbsZDE9SKmAuWYGSpg31JDKWPRbpvYtkI+INHFaoXdzcDa9DDLGJlw5W50hb9Zqa6JAkT0vFfLVMANltGeMVMasm1by6oBKTuYFDoGhmCz/c3taSgIMhOoKYRLIFQcII1d0dAlFiVndTUKS5igYSByQXlRYBGYXRm+TqtN5Ru27OH1O7bZqkovfxmCpLaU3EqICgb+8KxGaFHJwJ2x5jJEYTyTbNXshD42eoOOFoUBUckIZyd9E9FXX0GSoRRwSLQIY+xvnVcC0kQGxQQ6I053WxXT6dQ12rg5sgIJq4MgO7uairVsYXTXou/qA5TwbJKjsbOJgBu5GImWll9GAAZEvhmIMn1wsL7ZwZIppXDNXDpuk5BHJmRGNHUUO814lM7W0X+r+nmJo7gCsSIyHaYjIyMydmA1uCgmYGZkSECAa0AMHVDUzAiSkA2BLpM1MkemsuAXcUM0IEIDDFe0joIj+hLQvQjYDf1p35cu5CX6QrNHNzhXuTkdryCgMnZLd7Bg3cm3nRPZtXSvjpFy+Ok6qzARjymCdMyKlrUvPm9uZwN3Ztl+dp3bcR4Oe/+mgYh0cPzhNzlZrrNI0DuK3RdoHe6fAx1r5JLy2c9fEMqqsQx1xtqlURyHEWaThO2Wb3iO7uc7GuC6NIm5IUaftUDFLbig/M1Kaw6rpqdTCuyTv8/5H1J0+2Zdl5J7aavfdpbuPda+NFmxmRCSZ6FEASLEgsFkWUihIHMkkmMw2kiWb6F/SPaCKN1JnMJNFYakxWLBpJgQ0AAkggkW1kdK/33u+9p9l7r0aD4y9ISj6JwTN78dz93LP3+tb3/T7OY01tiw5C0G6a4WZ40He7gpBWs+ybJjBpFzCkYDk0jg7StykbWoK2a/ok7bj7jeM+iu6HemcKRKumMSl3k6y7JgV+dXG7ah5cXd4GtpNtfzuWv/yLn/xOc7R98F6RzIHM3N3UnJdW+xBMdUFTL2SfhZQOCKbKxIS0mBkJyUAR3c0AYEmELS2HiK6mapWW16EBIoYQVMVM7l1+iyNyuezhPdka7+NtDABg7mruRhQciAjdjBCYY5VCAKAC4AAB3NARENx18SSBVQdCTgRgJiE2bgjg38Ld37mK3N10nmYOrACmopoBqtcamJGCWeEQAFi0NqGltBVqsHNwVDEFqZpVq8hoUEVGW2o7pWYkM3STuch+1sgca17N5Wp3U6ep8A1NB+SjgPj04w+e/3ivtlLx2LY+5/50M/lU37x+tP7g7i//9OTR09omOdiwG2t2WAdzrADTaL630wdPwvq4EXr+4uLsvRNex0Hq0Xe/q8PnF68+H84vu7NPXp9fPl03N7F99smzt+fj0cPvrPtYxt20mx69d3K0PtnffDHdXkXcQIO4jZfjeHk1HbfNtH7v0X/5P3a8m2omCiGwivp9MYFmHw9Smrnfzjzsyvc++8HXf/nFJ0+PwTisjr//679z/fKHP/urH33/2fdm0wcfPD16dOTS72/GeRz2+93tYXd8vDW3YZ4otVSN7nHoisQxhTzuHSUGFvdJlIhXq+7mch86Ptqszi+u3j95X7Xte7p+fm6q72DW//9fS3TOAUlNVeur168//NUn1xef3129cermPAs4gYdA4zDEriv7wRRkrthUjKQqkROAiRXDAERAgAGBHcghRTEDQFUnIlVDJFdEjtM0rE+P97s6jfO0P/SbNN1en188356eNkdRGzh5+hReD9dXh9PjrmZZn2waJCsWjoJqLblA6Ax4vV6NY6YeUgzEmhIr1tSFlBonjUQnm7V9UT988uTLFy8RjB2mu0mrjPvbzaq5mLBJ693hxiE14WEudQHGLV+l1OOTzWE3zJfDX/7xn/aJ796cc78ZDjruhqkWhptcCsT90YPj891eXQu4+fT47GyV8PbiSwUnkSYGLr5y2n3zs5/efd2LYRuPH260Zpvr3cXdquUsc50LNdAfb+tQatakniKDoyq6yXrVYs66L9Stm/60CE5FYwsUMTTeNlhnH6YZEKoaEARmNDBXJkohVJN7ip0ZIgGWNnBgZCBALmYcsGOaq4bohFZzdXCIUEXQDRxRHVW22wZVYopMgITFjBEm1wy2ismsIpGhEXEEG6SmbcNJBWA/TQ8enD3/4twzHW+P3r65OqXt2zdvPzn9znGf+r65vHrVHdMHzx6/evGaWUvJH3302eX1m5JHhjJe7yljRHp4vF31acyH2HRorFm6Jkai+e7usDuk2LQpSs0K4ebmrj3qHnZHP/7xz+ZpfvjweKxsVkHmUqZx0K4NqUmX18Mn7/1KzXfj9VsOs1OtY57306ZdBecXv3zVt00D5OVg0LL5PI6ekAh2d7vU98P1Xd/1L86vu5a72NRsL18NkML6qK8HvTtUaunNm6t1CmfP1kerVb6727+9NkboME8DQUCk/f6QkK5watZdzXa3n9K6U4LQtrvdPE1VCYFsuMsnxw+bJlIMoYlt5+z5cH1o0oowdEebYZ/zrCnG4Kg1gzsTA5rkzI5WEABS03PpNQzH7+/+R/+Lx//sH/9J6o6e/+guFTtbNcPz7NRZqL/4/O29VFQrIOr9dWRpTCC87+TwKhJCQCQK4OC1ipuZyXJglaJIUb3s9mPf99wIordtm+eJuX3v6Xdvrp4bEMcYtSUCd45gm7YD82LTpj+24tDUwzAIZGd88fxr+vi7D/rV+08/fnv+3BwBMa2TB8h5NvTYJrUaYkLwucyAEJqQqyK6qjEbkFYVYqy1Nn2PdS4qwTsVRyYOScWAiGPjlpEIqgB6rdUEHFBVkdhcmcM9dRSCqqtpBCeOJkjcVqmicn85zYIAVQZEcaJcqnsD2hCwiIAui0Z0R07tVOc2BFUsSh1EBwEic2QKoDbm6fT4weF616Uml7HqgETL8t1rDomqZMTgbLVqw4HTwqkxreZ8IHC0CiaMrj4H4Ekzx+iqMfa1VAYgo+UuQoimQARm0DW9mDk6MUvNVWu1ak597KtIqVnV3S1GbEMqdSTkrEKIKZAvYXsjAvIlK8FoKlIzhnuV0RXVZcn/MZOaIHgI0QAJ42LkBvMsBZndlSkFpuUSxiGAg5ZCgGUaGQkYSi1EQAhihuBL6gEdAvphd0eU+tX60emDw/6ujSkycdurKKvnXDanG1MoRVJK83ivFlUpj4+3Hz59YOxWJyEs88yi01S8DjdXQzFu+y4gtKttxyjOERbsBLmCKJgT5ZlYtwHrbsgNPz46KvOUUxsCNU2jg9RZUGqgMO1uH7/3JE9ltVohQOrbMI3cMBWMScfDYKvWqy2exGk6tG27Ol7t88gJDiDbwG3XlVKU1N1rmQEUAzdtJ4chpEhMVYVDAAyh69qTI7264di5oeqyPjdiNicEYmIBY0zVFcWYEQm9KiGjaYis6p4daq5zoWJd0zGSuUzznPo1AAORmhOgCjL2J8cPRzAA6DdpLoKb9tGnT5uj7ur6brU+06637alS3d/cya6U/S7kkqrC/tALrBx9dyCRmmcBD82qcbE6k2tU9ZLdKzlGDmUcOYY6z023RmZoMKR29HaIbfPJs6MPnswPNrcoPs8BIRm10GADhnNMlCdJqRspHIaJkjkjxwZBa51n3berNd4RgLRNRMZSpE6HKl5Tm3VMHAkYDdjJCIw9j1OhgMTGER2lVEDv2sDmXrOCmUMt6kqgqq4VwGJ0ZAqcAgCRStaiKs4A9W4GjkpKkMio7VbIBAHbNhISIidAUXOHSOiBm7YBwkqGgEWcAVPTc9QQk5G7wrdvISZmCsps6CbF1YoZN4n5Wyc6AgItH1JZeKd4jwB2QDFWh+oySZ2yizK4gXJAd0wxmStxWBq+FrnDFyGaJ3cLTZtiQ4BL4szBFZABlo4h8woyOgcEQEJwIwBHNvaQEGpFcS1o4ujepYhFE7jWshgxxKsu2UlarFBiasx8j5WD5cACIkLV5LJWK4B5EGMJWEHn4Pui1nKCirWaeV0hHGFTBFpu2SqiZmCFVBxAZiYKU42aZOoROpPZg4lX8+oO6tWlhhCYYOmfZV6AH+6gRIGYTM1BF3MLmCMTOjBHcFVTM1qcBogUGEut5BSQ1BzM2sCiFRJplhjQEJiwIQocIvNsOCkCxeP12psNdpumXTtFRFatIcUQGjBlBs8KxIt25IAIoKYLCNcR4mo9+lK5bmZFUQMCSkm0JgqBI0ElKGKHnV623YeYSwQ2EfNIHApO5+XtyCOQokHDliStM55UPxaMXgzYMRqS2cTAqEsJk8xst6wXkDi0K5Om1oisbsDsbuoFzEwBdGngYUJ2QgckJARqAXtKYqLGlQhS4pQYeXHmLEYb8GXMdlNZTA//ntgF8C62c8+CWVxbS4OVurkbIfg9qGhJ5xiAqy+eGQJANyNCNXdfQpzA71qpF4+YLw4RRIMFcEPusICpAQCBDey+Y8jNkfw+KAS+RNIQFyS3mbnWRTwiZL0HFpG7AoKpA4ibtA1++c2LN9cVeSUqClby3DYRiB15mOvri/2mi3WcI0HXxh/99c/247Ra96sUrOZcS8/+KIIZzVWfVPkE9WHPNfHdXHqkhmBXVZQCEgKkEJAIhMx9k7r9lFMAAA8hEGBkHEqJTKBSSwVuvUk3k1wUvnA/tzS48a48RBDDMs1uMZPOkQap4FM2P+S4JpuKDSUjR1QeZ2tDAAJxqQaUGBD60LaY15E/PYErqV/NcJPVZO4jxRDQvYrOM3796vKT9852+yJVnz09nYb8p//yj//23//7iER9NLfAZAvb3gHcichMFrx1oCAquLxCfcmO4lK64qCA/i2wf7ERgZPa8vz40kYFiA5m5kRoDuj3uXZkXkR8N8X7tP89yQgQwI14QW4vLnhSrQDu94CggLyoQGruACiq4I5EFMhc3RCRFyCkmzESEpnK/6+rSNWAnFCtZHcjsFBmK3NyJQIyUyvupKoOWGtmzYqYmAmJAysgOzu41uReVXvSasaq6hhVLXkW1aqK4IbzbrgKJjF047h70Mh49RqLqMRVe0Z3tytXqdg8/OCa0w8+/bXXP/n5xx9/8uVP/6Jdw0iaUjPthu7hca1uYBVAwFHms09/RS5o+vJzEn/63ferjmU0ubp6/sN//pv/6B/tf3Lxyz/6q6HoJ//gb1wdrn7jv/jDIX5ne/ykWfeFDWpw7LvjZ/Xm0KTeY88I/+z/+U+fHj39ycXnf+9//j/4Z/+v//erDCvQ+c3dw9Pj/dWr1XufYEu1TD7vt/06NfWP/x//+6d8dNY3fthf//Dzx3/zYzp6qGNG7x8en/1f/viPf/fv/YNcRzJ6/+zJl1/9jGNEJVRIlBoO+3m+uLrt10fHxz2YVbEYCCpYrjLl7Xr99uX50/dPDTR1qVRhIkY4PVrtdjdN11ycnwMlYv5WEvoP5aL/KHeL4G6MjIqq7ZPvfv9wefP65S/ee/9XpNTQpsPtXbtai2gEq7Valdg2YCC1aJWQGM1SSgtoq0ziRawW0IrmFIL6TIEYKSSOEvIsdRZ0hvubUQFTspKIjrdbI8HgU72bhjeOur8Z2Pl2f3v06AiVpGoMybB1jJwiIdR5kFJOn25fvdpXt5Si1Nq08TCM7Wb78OjUIPbrI3XI07Rq2+Lerrfc3NzcvtHMcy4oGEJXClRHS9uqHrr7nxMzD8OsUonBMGRsHj95cnF5UUWhVlYf9rsmBirT3ThaVW49Z1k3eHd1k3NFotg3GHgapuCIgCer9eE2xyZQndXg/Y9OX735+tHTxjRPhCLl4fGmXXdXWRiUhYkpeFptOqB9H6JN1tOJTpxVuyZSpWG+W7ebakU91BJ3eUpxhYUDu5qaWSL05b4P7rTI0oqBzLXhCG7oiugJHRDVtGtaRnIHqUAxhBAwmmVLHEJIjBzIDVkFXAwdKUZEDYCO6O6lekSAQEOtoWksm420aXuZSts2N7tpP9QgqeJh3dMvf/nj7zz4eLrdT3az7nudbo7WxzdXr3WeXYWEPui6ry/fNtt20zUyjh4iOoHn/ThhSgIpBgxNSE28ePENGCd3E/XkouOzj9/7wQ+e1nL4h3/rt/+P/9s/6Zo4TDugut8NxqGN3VG/ynW2AL/z63/j3/ybv3j64Gkb+lCpgrfr7W53c0IhtaGNIbWJZrE6R+5yruIQPY1TnueyXaXjtj3t1l+8uMhsQyMxNKDl+7/6iLZgB6O3w9FZd9iPt1eH1AUTa4BabmerDXUa/fr2dt2edcRtgrvd5UeffXS4vgSj8XAYBll3W7GS+lZVmWy77VR8X+ZV197J3aTzB48/++lf/fUHT05uL26w79ulpUQEvILL4kpVrczsVREBGCUf7vL1s+/tvvfb8uK//mL8a+9C2ByA2b2WwFG69sOP3mOo7+RkJURQp3cIfAJ2EHAw8MDsZu5LeJo9GHMjVWJszKqoNW1z+fYVYUWbmRyRFovkOA3b0wch0osXX0dHdiLgnOemazab1WF/q4JVcoqRA7nofp445LHKxdVbhkePzx6dqbx4+Xnf9SngrIURhnHouyaGTjwDSRf7+ZBdvfFws7s9Pjo7DNeu0MY2MBXJWhQr7u9ujzbHosqIroJIpoogjGambZdyLoSgVUW9iSGEWEtd6HQEZpLRMWACB+LAlGo+JIIiFsPicK9tas2SGYI5O6iq5OxG81RCXNVqliUiDWWOjlILqIAZEbhTaqODTuO4Pe4ON4dAveZbd0OP3AY1ZUrVVWqNTQ/kzjzlsUmrqR6qj6t2M45362Y7l0Jh4yaanRnJFZ3JEnOcfSBwjq2KuolaXShmStVMAyUAYAIRixTFnThpncU1+OgmblAdAgZRqzIQGjjWklOMDuRgKiVgUiXEhNgYFLWMTEUzw/0JRchugEBmhoAEjBznMhKgmQZmAFYxJAycwMFrAU4LW8cBOEQRDxTdyB0QYhYRUURExJJzzrWY9NvVdruds6XA87jrmlTV5lyQMM+5TBkRply0CCHFGGq4vxF98MkHD49Pjx+cHOpUhn3HOVGzn+cpzyXPQ51y1kOZ7mY53eRVYubQNZ2BAgZxcFGus7oIc3O8ffH2fHW0nc8vPuu+9+DJdqpjKyYqWhXM0WzeC1Yarm43/dk8VUPpugA6o+X55qbc7kQpWdSDYA4tdVDHw92Y53nTp+1q62rzNKsKBxIRAGVktWXhbC4iZrXMaNA0cagZRFQExBkiO4E6FmNKAI2Vam6Bw1KlnEK8H/UpIREFC+vj1D6y6j45APTtVgo0HEPNnTHtTQ/7vok5iIweK0yz410JKXJCD0GOuseffHc85PLydXM40HwIq5Vc4/XLi/VUsUJ72ME8RfagijkbuA8DqmOeU2pcBp8GzXN1qwCxjbFpHVmqOzdGgH2fCWaiyYyadd0+yB+8V56c3ARUyYbUApKb7mcMHVOSPAJgxLYMswaqjAGDCcLsE12WMpbZtyfbPNfjdMTwOoYknuoQD7t9SRijxwhFs5k6siEDaOyiUogcXRHVmoYBwAjFKmJtY6OmuTpz8IAA4CpES92wGmBoGmKmEE2yVgPiaTAFCAEBiAnjUdNuGlxQZEBelU2IArmLSRNZ3YGDMwAHiIToJCgggQmrap7YkIihGkQ0JozBxRBBs4jIt60ohnBPOSQGUACsauRkAARL1svVYFmbLnEaX1QkYjABBKLg5syMHMwd3VUKMwM6x8QhLLyYhfoaiJcjZ0nxmBqCuhdAAwJ3BDNwY2IL4AAQteSpijkbuqsah+BFQ0BzQ4dlooaF5gJMTgDIWhyqeWOgBm6IBN5HzOASoYY4ahYwhTlyVM0IrZPHwHW+jjB3XUIbk+pRYN+sbq9zSH1V4YbXR6G8ukjzCjiu2lhAAlNdflaAGILUDKApxVmUAPoUmnDf2m6iyOzgYg4IzOF+r6tGCISMRA1AVXFUBGQkRAjMs2mgAGDqi2oWSAwNCNDZIbC4A0ftVtL13q3nuH5w9pQp1YpevYkpOwQ1cmMwZBJ1pMU5hk6ICIzkgBZiDQ01namiilj2yB6w4VWiVngnZHHBJcO4t2+AunU6YkWswCEo040ON753tCCZkSBbEt2K93luwYjdzAkdfGyhOJB7mF3nBl6R7qGL2hxD7KwGBCN0YEdyLEgB0QOYm4IHVkJSX0I3CGqQqAH16FRhqaprY0zs6La01ivi4uJyMzXVZWDyd8QuXxBFCEurOACaOROA672iRrzQiwHQ0c0KUSCkBTazRPTd0Q3BF50gACg6Ln1TRAEczRXvmxsAgQEQwHipMHe0xeW1BEGJ/R6LbIupBAHu5SJ0cL23nLi6K7irgYMRBjNAmNVqZHp1cfXqbXXo1S1XAUQKpGoRg5R5t5v6vrVa2obaJn359Ve3u/16u9n27TxOfRP7vjlr03h3fYtK5BuQbUPXLtfWjBRV7TBMoYmA0EeutXiFGBIzBubJqrXNRN4TW1F2A/O2acFqE9rBbAz9xcUOuMkQWMLknsVvgY+auLM5ICtYrXI1BOJeSiGO6s6xzWDY0FCmJoa7qTzexAUBi1aFMTvWCqbBhFjL08TdKvzytn6z11Ixmh0HDgQOcHeoLy7vYjwhl7dvrp48fTjM+f/zz//Ff/kP/8EkEmJwwMCsi6QLy+8X4N58vpiAAJBcHRxMF1sJm92vx95Jfr7AwNwBOQQIKsXBDB2AABHwvtwXF4qQ09JOiESLvccdaCkgpGXZR+BoBo6gKkSLTIVm9042ACAkouWdY8hsKov4SEgmlSjcG4fMAQyAvnUV0bshmcDNpNSS58Ot5tnLzFojap2uoM5W5nm4q/NkYgoAkWNid1EpboVBSDJrTmBsEkzJncCaGBqiVUirbrVdHW377abf9s2KU6cQp7EeDkNRi21Lgqp09uxh0x9WrW36cHHxKvUtaxEcJHG9eznvXsvdW7A5rNLm8cnBpDs+Moe0ehRjOj198uJnf/Go1aYxOOnH1Sozvfjm7dnDD7brByF0HJsp39zsr99e7PalYHf69vy8TPuu4WF/g0TT3d368TGYhWm8/Mt/d5LS+slje/jo8d/7O/xwlV993aAljtHrn/7j/3WsIkYe4tc//vEa0vTF62fHT4vlcBSOvnNGq5Zg7YTPX15sPzr72dc/Xj19vC+6n/j2+s7VQ7JHHz5wr66amjhPo5a56drrwyAG5iAiMcSubVNsYugePn7KKXTbzhEx8GE39pvO1BBw1fd319eRaB7zPFV3wndBs3//tQjU7/r5wJwRc6kSYLffp9St+1WMIZdhmgYFy1IM/DBM05DnXJZiRbDl0Y1OxCGOh0lF1cxRa5mR0ExNi5sAmIMS6jQfUpuA0BBF0An6457apIFj3x49fhpiDIi725vtSdo+PBZyY2i23Uzanz7oNmtVLeMoUhDMTYbb21Lr9e0+dKv26Ak1jZmXfY0BH79/Qql5e3XZd93524sF2ehZgWCW6e3N9aFAwTZDr3zM3bOw+ijH94bwsMbTb2W0ecwhhNXJqqY0KLy8HYSiipwcb7lPo2jbPXx09oE6d92q69rtZtu0q0qkhJwaAgqB3Gu14uxpu7V21Z9u07Z1GV5+9c3rN3dvL/dXN3o1ltXZ5vb2pUxXmxM7ez/EHmr2PJOprRPmeZyzmFNxscA1oHOI3XpGejWV6wI7t/jwSJddG7AtyWpHRFZRAgBTN0f34NBSaDlGosisqsyETLOZI4pqdTBiAVN0BI8xVAAjjIFqlRCCuvdpXUpl8OjkWYOBmE1FVSDFBERdH7ddCuhlPpjUaayHw0yxG6vMVa1pvnnxVgWpabk5XT38gPvOEV++udkd8jjBfpafn798+ORxctJDwSwc5MGjkxkEY9N0a+IutceO3d3dAAAONU87gnr24OjjX/30g+++D/Me9m/+/Gf/dkw5rtdGYRxrSN1qfaSmYPThex8fSvnF8188OTtSrRXD9Xg4Pjm7HfPJ6YPNZoUh3Oznm/00m84+3uVrpdp2EVJQxnaVVEokfHt9jWRKooixaQypQc6HA3ARnxw0NLxep7v9oVRY9w+AQmIYht2k+emz94zixd3d9VTiyYPbYagOTg5AjPTq5eu7u33bJCnF1Io6ABNQ2zautU/hMO5D04LbNFbycNodr7ltmAMul3QUyUBAISDyNFQwP9ogNl/9D/8n/ge/3b75ypM9TXba9Sdh1bSnG9zEG7QaV4fdvZ9CTJdCAxW9b8rExV/jeH8+OTI7gLhTCI7GkQ3UAZ24uh09PEtd3I+7w7QT19g3nCIRVCmYVk+ffcwhGqIzh6Yd5gk5rjbHoWunWooroIcUDCqSMeXD7eXl1fnlYdw8ePzwwdM5j4dxR8zM1PVrd86igOQCTKlrVjpDF1frbi1SQgoYuUipUgHRREHvNzVgBgS11mUdWeusUsFAxBB5aVlGRjFXXzYzVKuZB+QITM6IzOYmmpmZQoztpnhdFuCOLp4BpEpxkBBJLcfAUmdl8xgMGIiZoqk0bQsMHj1bUVMid5SmYxV78uzjMU/dticmM4sxILGoOEBI0cxBsQsNOwWOgQMiK4hgLTAKqQcFNAVjbhZAJKIXGYnQrIqIuROH5WQI3CBCCIwUFO5N+KoAEEPsYlo7IHFExGXacWQzBMcitdQpkJtVMQdiB0PCyGlB22rNhJ44ORot3zUII6kWUSmS3VVNpU4BGcxNTU2qZEcTFTNVN0dzWBbHujiv51zE3QxKqWpKFBC85IJuZmJmHOKc5TBVE6ulSK7LlrnOc5dSYnbQzabv2yYxnR1vmGCzbZdPwdHjx+tHT64P5e4wm0HNRXPNudzux4vbw36aDnmu4Nnleti/vry6uLm92+8Pw7wbx7FM4J5CFPdZNbNgg9dv315++dXFF18Ph1GRi5oz8KqhPlKP2KC3ZKDVa7tJECl2fJhHQRO20GIusyWiGGpFQCp5bgJ1IXHEtOL+KEK0AgqBYtOmrrcADj7OQy5lrgXbwE0UtXEYA6JM2YGBI/ctrdeQQuwadosGEaBBQpOGiRfQq2QK4MGrZQgaOqLhIt1dtXevV/MYytSS2zjEIm3JfdF+N61uxvXV9Nnps6Om7aPrcFsuLvqGT5+d8Fm/q1aVTx89OV2ntL9ZB4N1P8eNQLOiozRZO2NbAhpz6CaR2rIdRdtGOGpw3ThUjDQDDEhjE4emORDfAPjJdlw147o5rLq7B8c3nz27+f7Hu+9/5+50tWMr4MG4Ee4wYJZVf8SB5lyywFzStJ8BANC6VRItOQ8mNh0OITbd6uh2t4seDndjEXNycAOrZlryXKap5iFA7SIySdNQjNhv2tOz7cmmPYpMpXIpVLPXiswWkmBARCRSl0ge3KJBqAJzDuJMzO2K1xtNa2qO1Xq3jXsbqWHF5NxiiBhCbJBTRTYKgkEpiAlQ4KbBNmoXahd90+JRL103pzSloF0nqcFVz8db6JNHMkRDqm4QMAR0LUzg5Mj3c4EZuCMT0eKXI4hMgI4IC76tVKlmTgiMgO6gYAaubgLLMhwACZmZEAMxEQeOISYOkQMzIS58VgBa6p2JERmRzMHcxaRKUROzpdcXAzFjiBzMdM7zbtzfjIcxZ0ZjBPAAHIiJiBxICe1+hqJlhjIwQAIExOou7gaABiQGjkAEBo5MisTYrvlkbau1d6fdBmFKDYMjBlb10/Y4TtZMtsGYwNerQDrBeJXcUugIHRgoIBPFENyhVkUMGIIDcgzLq1VFVA0AF6fosu27R4pSAGIOvEx34IouIDO7wtKSxQQIjh5j5MDEMYQ2QEoeVpy2sTlt0hb5yKnJdYPUAEZkCowBDSQGCAGXVb+HlRtHdzRFA8Tl58fvQnBLGgs1sHVdVgMEQ599LiQYYwyrJh1Rsx0pFMRCDl1baCz22qiqi6FXtGu4vaLrHIqTgtagoStpPds61xUYiNQKgFGlYtVgIWfds71e+y9W+eUW9x0Ez1vJLO4Q1NHQBGrx6kzOAQjQSkAwzejqUBCMTBcxlpHRHMVQjDkQByCCb3lD7nbfGAdwz2c0fzdCIaIt+7HFiwGEyODEnFRtcSDB4vUAc7Bl+DJXWPrUll50ooUkRMiLiONgy4djYYiC67v/Hb37qdvSBnjfj7iYjxZ9C8xMEfldtTm4my1/7WJtxnctE/claKCm5gKWmwC3t/uvnl879oZkquCQUitqAKGJ/TSWcRxAxYv27errV+evzy/PjtZPNn1P1kTYNLwlj1ZCwKp+FODDbViv0jXyRNEpIuLpUcduq8hNjClSy86oKYRS6iR1r5Yp7Ks5EKoNo9zOPiq+uMlvJrjNNmIcxIdSxCt7iSyeaDAT1hItBAzAgE1V7NrmZN25lFVjYKUn3yZPUZpNqBErGph2IXC1bRvdK5KvmtQRHwV+CPK9FTzpWcAz0q5KFgOAqnY71C9e3QzZqtKbV1dH260X+Vf/4o9iH0SrmZopuCL6O9M9IjLQvRdsiagtheNIsDxsiHQ/hiMtXfAOjkxL+FSkAoIjvsONoZsBLO0f4LBQJvHd79QAnInBcQmyObi5iwsFNvdvH28HX96p99oUgLrpoieCO4IjAZDex3EDEiET0TtK4f1R8M5VZGZSJPSJOYTVhgCY3LOD1oQrMHItbkwhGJHGpKGh0JAhQAUTAmwi1FqRlr63ojZbHgDNIRJGwc4xLa0JqBTbVqAoFAUttXbrbkZt205gxv7gMEsua4jtfp9fflXCo/TRH9Cf//nVy5+H/hC7Lh4fjSWnNloeTCCuT6/efJ2nadWl6fwcK6T3v5NQvviTn3zw8INNtl/8qz9aU7A6pXV6++ZKZnj+r/70wfe77/3+f2ZzdvRQgSe8/OLrT3/w4eGutv3mzU//YpPk6y8+57T66U+v6i/Pf/LN/+E3/lf/y/MyVYItd7/8p//kg//2fxeeHM2xWa3TN//n/1t9OTSb5ic//fxv/effaegwP399tH7yeho//u3v/av/6t88+/D97XYT0dePzr786osmIeQR3dRste3dyn4/ivmUp1rFl1oKYEQ/jMN6s/3lN8+7vtl0qQ4jNfHq6vZosxKzacoPH5xc3479auVU9e0tEZgupw3Au/8ALMlEBAA1D0hIKIAffvLhdHfdPvw4xa5tG62qksHNSs1z7VebPNXYLKAqcABTcEFfYmZItLgMMFgtHHh5SyGhm4IjmlvOFkRVAqW6r1AprXguigTnz18FXH32q++/ab68vhh+8ad/2WzWZRxXIU374ejZWkWZGNVrKX3fFHdinuuMwFAtNmEcb5rUYLGmayaRowZeHq7a9RlR3N3s2r7t+34aaplKyaVpe0x91ybXBBURghNtQdeoie+PBEc9OtlOd7e1MlGjtRbNZLZKcdI5RXavNb++rXW7Xh2vNle3N0jJHEKwlBI4nByv9rv9qmfrVl7t7vxCzEBls4Lru51W7KE/6k9+/uatBXnr13/jk4e7i+t1PLs4v+lkqxWr+ZOnDwC/XG82b54fGKxWVfFRICNgoHnIlijn0a3kooGSuxIzOy12QlAIHLKViAENiRgRHN3QAIGIETEErOLA0DZkblUhRkJAEDOrqWkmNY/YthgQmNnNzDCFBOaGrlaZ0InUjSjMVZzDYLqXmuJ6dfz4+u1V4tQ0qUAZy+wYQ0dNt86qEmizee/y4mrTn6qMTQxZgWPaTfky1w3Hm8uDFuvWZx//4LeeP/9JiF3TrpiiDjnnurvZqc+h4bmW1dm67Tc3Bxjuvnn5zauHj7ZHx6np10dn7Xx+4IaffPje5eXl8VFznndzLTf7abM+OT4Ost9fXdwdPXn4nQfvf/3Ly9evLrftOoV4exi7hruWrm8vzrZNcBZHdQwGiNQ23auvL5omzXlOfVJST703q6NtDCu+fH779OH7Z0dwGKY2wfooPTp7+sXnX9/tLqTujPRoe/z6xZfr0w5SfO+7Zzd3U9uvj/p0M+xj2/BJO9cpv5kVwmHMBcERa66d4dHRWki7owYLjvNd7FJz3HBtLHgebxHmyFSVGckcI5DY8snHtI7KuldtHj35P/2TNzfnsEkfBjm9ud5Dr9uz9r1nH759fvmMToa3N+PV7n57gKBVmZhDuH9yTAEX/jEQgIGDLnWftnDvTGUpRkAkNHc0BNpsji/Pb8sk/SqK1C52ZZrVpN9sV6UQ74b9nom6flWltm3X0XoaLtqGmdlQt74C0EA4Hcbx4uaNNfzs6YOnHxnDm7cvTtZHSIjuQGnY76tK3/XDOPSxPz4+3c07RAf1dbudyOb5gGTm0q6203AHwAtbkO63JYgAAblKBUcRW5ooCaHKDBSIwBEYo4Agh/uz3EykpJhmMY7JWdxTgA1oHsecmjZQuocKBwYH8AxEABJDKOKwCFBuTA6sSBqQAWKpc+Akpm0XSq03+1rUTvrt1flrdJAqpkUckBJS8koCoEQmQSqaQZs2ImPbrAIG88pKhFCsiJECMK9zvo4cgJhQ41JIDUAU1KtJQUwLZ1nEEseYVlJK4MZNwIFQq07ihTk4OBKn2KlMRM4hlqJIJCKKbuAkxq6g4kroJDobmWkxRAFUn3nZu1JAagAdSNABkQUqh4Bo7spEoqACIQTgICpNbMhRtSACkGMKgi7uk2l0M9UYmZGYgMlCSiJCpqHrHjw9m3c5EHNkdE6p2Y85df1UyoPNgwipb3ugq3m4h7uvV0mtFrB5nlNCd5rLeHGYrw+5ocZR2i4SsUsVVxGbNEcrhAAcHHUdW8JOBGjVjNcXybhV+vjp+/vLKxWIsQV1xHiY5pTIzbFjI3nywZPbq9v3nz4rNUv1ftvbq/NQlIpgzuhWJ2IOAJjamL2K+TSLVlMrHFMKrCK1Cse43DFVxJe2sFzcQUDALK2aZt00J6tqlUwOc145pRB0LCrAyEqxVKHARsAUNTF3DYQU2tb61D79QGJXrt+sMEst7WodYizDhHVm98BFROe7ESmO/lbHvdZ5++wpe8DtQ/J03EUEnqViTem2v3v5df7mpZycZK7j/lVTPNQ7Vpe5SC3S9cphChR6Dqvg7v1cC4xKnNs4EN8GaFex5SjjPEcyijHRMNf9mvPJKhyvYhOGqfSKDKCkZR41EZko7AllyGOlNnAuh+Gk7ybdw/BCtytxkBLb9XYyGV3bbtOkk1o8oKnN6qWaFhUBBPOZahtSBVcEZgtmOs11nJBDGSsIyL1GoSE0Tl41g6lRCIGnKVdTU2CITUBghBAyEsfEqfNiTaNQBZUiuVeLMa42vTYMoIgeg5fx4MLgIXKjROIFyCBhk1IIrACVUI3BGncvdcYUxY0C1HFOGBE9RkQiQDUTAKxiIb1rRiYmYiIkIl44KLAgUIkRkRgXSJm4irr5/Z+b4mLDcEOAhfJCSMsVFCEsm3MmXiqZYVGfnO5BrkiAjuC2hIPwHv7hYETxnUMe3aBKGeZRUEFrQy2CuQqouSq4uQlDG4lBKiABouJ9foeA0G3Beyh4ICqmKcUmUecyYW1jg7ImDb1p4xQsCBF7j8wBqW2SzBB4VUb2og7k5IEaKjOKmhBEBEQwWRZ14iBuhEnUALCWSgiIwA5tYCZiYgTMVslgCaGYCbgTsxPYfbYJ76c3IlUxcwInMHJnMOLYcIx8TziaqwCghgDM7DhU33gcayDrV+nIMnLCJoUYYapiZVxL6fIuuSNzNXVbMDfg76ruFEBirF1yRiKY51y0wNLbRa4wH6fVON8u6SsDQVTUvcsdwBlwmkO9tvODXXKqQT0Ronl077EEmLUKYTA3K/vGS7CYnYamedXBVystsRfXaP6UMGYHCZjICWihKxArBAREUwBBECQQv09SOKKhK1R3YPfgSngPikMwVUMAUVPTZWhyA8RABMAhvRuSF1eR38/NuLiPHF21EpI5LMeog5spIRsoAgCSudECCF+aqBYNCOzeke2OSGLK90M5AKKZISLRQizCfx+CM1j+5b6UH94LVAiLA8QN3AkXhDGqm6qiA9yj3JkA1atbCQSHafjFixdSWycsJiZmhjIviVXaj/OQRd1J6/qofX13+fz87YPN9sFmY5LNpEmBwTaR92W6EacQOihspQA3nNx9X2dpYc461Uq0Ot/PTxJDoIpQRdZNmGuNyKIObrHUo3X/QuhG+axkB+KU3M1FIhHGhcEDzEEqDFUTNmUyROo9ycwV2xHDVGohW4Iv81CfrnGaS1S3FGotmFozAq3RM7JVlyYSIjRtg5AZ5bfO4l9cTleF95WzWiDcNISuhwleXt70fZMafv7VN2ePTi6ub378Z3/1G7/7n8xjBlDiRSA3dydHMEPGxYyBQA4KiGDu6iE0y35rqS1714Vn4AboSOEeUI2ESAhyb/MxdxdkZl74ZQCugAvXHMD9nZgrxGEJ7zIRuBE6LXTr5XlCJWQgWkzc4I7IgAhuTAGX9j9CcwMTc12+B1FFJFX7j6SiGGPkGFO7JOjcqsosJSf0wCwiSyTTzB246Y67ZhM4BIyu1VwNAdw5rKtMhB7a4zofiG+03pR5h2E1zcpxQ4FA1F3JEcSb1DSnD4a73bZ/SHF9eX579uTJ/uK861cZSrvp8rCr0rbvff/VKLl/z/uXsGqmgtSfPfrVj69++EO/HbTCw9/8e4cf/V/Tg/bo/ZOSa1h1ZMQzodJ3fv1v/9lf/Pn68dFqlZqXR3YrUNLJw8d3tzfTL37x63/vH8yA3DabB0d38+69X/vstuyBoO3SF3/2r3/w/e/ydz8drwacb379t3/r9S9/9Oary2+w6Y7WyMfXP/l5jI8e/d0/uPji5YdruPjzn8occNu9/+zjvBvWTbr961/0GyPAw8VXb775+Wff+zsh9Vrr+fNXUDw76O3YtG0IiYhFbb3dZLBSm4YgxBRiAoKqQojD4Y5NmX23v3n//We5lqVmkQkSE6iZSmr727e3hAjvjADfGoqWgrp3tXyIjkjLTt18vAmb2PSdUpyrMlNdmhdMa5Yp5KI1D6XbNodxp+BFJUupZQ7GTGCmRIBYgUBK4X4Ny99Ojsixb48enFaFdt0dbR7eXV+UXBHkaN2uGv76Zv/f+lu/9yd/+sdyOzzYdFfnr98/evLw8UZ28+aEHj07mac5KM0ZVi2Jqbo6WNXS9txvYnGeDuPxydnz8y+ySurD+Yu3BqndNtdvR2KnRKFLspubfguEJ6en0vaTuIgTo3tVzREdwejd9iAgmVi/6rLUFJvZZapTw11RI9OS96uuAUPi6MbzXLs+nN9dEVKMTRNCLWU63IBWsNpCECmMbkCHm2HYZQcPDeGc57o76UPgmEVpWMN0V2TcxAQGGIJm2+12faObrXaJb6+Gbr0KMR4OZQRYJaq7klo+OukOE3WVHDA2zVzG0LBIBXcCNK0B0M1ijFULYwBXBId3BeFmkji4kpm0KYlrkwKo8LLYA/CiJNZwCMSqhRMXz0CG4HWubJwgBG4kCAKY4SQ6YaaWp2HUOgR0ltlV3WrHCFrRWzONqZuz6mzzxW3bhQ8+fPbq5SVR0QAd48PHp7c//zqmvtuGpk0vX/08OGxXbdtERcqo03wwnts2BTLG9OGHT16c3xzGcdP0oBL7br2dL74ZLl7lR/1xlTIdMjpdXV2RawM2355fjxOMWzTZro+m2/HLi5vbq33DTATXtzfglQj3d3a62TKUk9XR1X7fxLTfHxB4rmWY9yfb9/MwbU+OcA2vXuSjJC357c3lo+PjPM03V3fHxw9O1s3bq5u3lxdtH65uLo+6/mZ3AKf3jh9P11O79uO+7Xo6XN/e7rlt2lzqPM95mplh3aVhHILBKgUKnFTKfmjamFJ7dX1jKYKn2Qo3PGrJRdGMZPHBIpiqF1BSU3eNbgHo5lZaXU/1wzilFT0Yzm+s1N/6rU9++fL8+S8+D9Z1GwqxS8dHy6dARWIbKJDNhg7gruYhBL+3WC/7E/T7yk8wFURkYkJ1gBTiYT+ESBzTycnZME4KJNXKKO1qPc37Kta0HYPKXFWrq7l5zTXGfrs5GfZXzpJaFg2lQMmVQ1T3/e3teQzp2ZMHD5+C2e3dZWQWtRCgW7W1KDKxsqpU1369QvBSZ6lTwKZtepE5JDIVN6tSAdDdqtZIwVzJMOeZib5te1FTswLuZkrEDiSmQISMxkwx+owMCAaqhVGW2wEziwKnxpCqGodYysQYtc4AHtrOyrwcwBSiWqQYqs1Q8zIhBYjAkSgkTPmQ27MOZGi4ub2+jSGILh4zYuTAzTDN3bpnNEOAQKndHHYXiNVkphQNFuUumxkFNhcl29fbyMCRa82E4G6B2pIzIkSOVaugOkgbT92AkdVNVBCBmN28icdL+2cKMUshdIRaSzbXjoMvvWYBl4uIg0/lEENEBHRTN6aGrVE3B0yhIye16lbIDQEQ2U1Ei1khJHRMHEWViROvbqerrllHDLCYANzUjAM5e4xUCjSB0dwREbzWomKBCMGZ8Gi78s3mkEsEcsdcirne7u6sqMyV23RzuIFaX759tTTRLp+CYXcXeGawGDkF7rtmmA7ZMruVuTBAA4Tq4hkFGB3BdS5iyqkPTZQ6D7WIUwRBcQXsNpvf+BufXgwPpuvD7vp63W/6s4fBIbVtyROuNnelroAnsP1Uyyyz+HbblKvsI3Todrt/dvJkur2iTZOxQMOz1BgTYlSRjkMwKMWJg5ZKKn0f8mGfCJtmNRwyZytSotD2+Pjm+euW+w3tVpBnV90ezxrLVCW5IiMF7tcWAsYQiEO/4di1x0fQ9t3pVhKU6uX6rnNc21ApcH9iNWPbNqse0Q+7QTfBVwjNyt4/xRL7ImmV+qY/yLW9Rb0JHDq5Kdf7HKZ5ennVTOqO/eMP4s2b+uIbubmKsYNppjLrvCvE4exhDqk2fYowvHrDqy42HWR1pLpJ7YdnWdRzKmxdFxqq+XqMqw2tTvZTJbJ1v22aFgXnsbTr0zzv3Orx8em0u9AA2tAUuT15cHO4HXNO/CD1p4d8oGpymEw9nmxV23Vnz9/+FeF+nUQdoQmAkAgZGRCqLfVdNO1GKjJPNYYwmHo1ppSLuVmohW2OMQEhciDyAGRLobVZFYmUEBxVOaCKAAVmChyMKToAKLaYumSJIZIClVlYhUpBjRgbpkBh0SpRqaJadC9mzEQpZinBkEN3AM8hKREQBWKXgmToJSaCNshsHCLecwwA74tz0c3RARwJAZkQENTvAWcAai5ZpAqJQmAAdzNAcvMFtw/ubuJ47xxCWOJsC7eZFhzsst+gZd6Fb5fnC9bNiBiQFjgIArohBQqRAgM4NAEIBGH5c/dFrvJFJAIODO5EwdwAKvji3FFAMDBXNYCAJO+md7YawAN6ldse16CmpaTIzlRBGcHZlWqMgUMEd6Mweq1IzebId3unKOZI7CJg0MbtILsmRIo8F3MAMSPGFCCCsxuhqwkQ42IhAKoqYYHkOlRTM4/MC/AB0REwMFcwJmJHIGdO7kHEiJiY1TxxFLOpZEZrY+JpRISx+m2xWrE9e9YaApmQD1j72LWH81QHF4NABIZguHjKQJGYQ1Bk366lazMQehFXQ2du0ImpDc5HGiqf7WFWMkKoPs6GG5wFZbDbQ70d+SqGCS2DEVE0nVGVrAioEKpbxRrRG/Os4rG5buBVb9edqxcG0YSFwHBD2EidnREQGAQxKXcKljCZolshFkIWEMAATLrEQs0TgIMF9MW8toCqbYkCLV3kC+WIGXmpt/8PsdaORG66iJnEKFKZ4oK7Wp5eA6AFSeO+VKS/Q0/j4hAxMDXxpcsc0GEJ8qM73qOLlwotAHNbJnYDQIoqdWm2E63uhkjmS/jJ3Jea80VUQtW6OPkQ2d0MVLQCRr+/z9Qq81/+/Ispd7hgs8woEBmaOocAiEMe5zIG9DZgng9ffv31adf/ytP3bm9ucp3bJnRMD1Zd534+juDhCOmR57Mm3KmUKm2bsGuIJUUMKvusM8bJtTqAmquvAjpoBEputZaHfZT5EJrjokjEU7WGANBbSmIGxNVJ1E29pYAIm7aZpKwDMig4zWhSCjeJ+24yhLgqXdkH9KrBPQYIfUPM5iGxBMQAGAOaCLohYEuckq/I+YR+cQefDyrGh2prwhigiOwP8LMvz3/90/fbxLv92K3Xn//1l323/e6vfj/vh6XRfuGs+cKQdkIiNzEwWHxGCO4mkomW38vCvLbFFUjEBgYqTGwIgGAmeL+C/bbJXu8TwaZL5BAXNgS6ud5XVC4EdGTRSohLTSUAIrHf5w1Q1ZjDvWFkcaG5IvHSJIBEbgs9H91cRJfOS3wnJdxLRcQxpgZ9CfOSEzhHpOCWxS3XYupVS+AoRokjMyMiY4AA5sHQRQoytKGVpWu3XZlJJJCca85iBU2sOlFsmkac+tVKxgIALOq7XGY9DIf3v/dw98u/ahCbk9V8rY8fPiXx0G3PHh9dBSCAFEyHymkz5Uly7nNGg9Pv/spfYjz+6Dvpj3/pK76moenW3Tc/bC9/cfezf/7qF5//5q//Tx//4IOvptVvP3rwL/93/5uEenM7Pn1vO2S3ZPM0EmiI6o3Tfnfx5qKTIYbm5Dsf/It/8ke/99/7w8Ph5tPf/Buf/+m/wTF/+nd+4/b64rP/9G9P33x1/sN//eGvfHdNu9X2JELq19vz+TD+4uIRH09Xu9DXX/7LH25+7/urvrs9+Pc++e0f/vRHn/zqf3J59+ZQxm3T1Jyb/qRtGkqU8wxogejRdhuY8zyvV8cQMM91uz09f/Py6Ojh9eUwjFOp+fXbSwfoVv3d7i6khIiXd1cnCF2XPADIt5irewX823cdLm8/vPcrulkphblLLYdVA4yuBiLMoWm73c3UOIhIWAK4VULgKhaazr2k2KkcQghq0IRYqy4LKER0UQR2gJhWm22zH8ZHp4+yWDw+fvRBvL58CVpqxZynNtVf/9Xv/UTMcv38r35+ehYbsi+/+PK9j5989/2PxsMhhBUFOuxu590unqytah5KPtS0jjf7/ZNnHwkB1tCu1hzqxcvzZ9/7/s3br8a7cnzykClKLoQIKvWutGGT++YwZwrk44BmBDuvs1ad6ruacJM8TBG9aWLAOSXepjOpZKYcIEEKTeyZJNs8zvF4LQZ9TMTJpI7TNZFNGRaa5/nN1Xeffc9gU5RomkLTZnMM7TRdyjT5xNvN9vX1dK7j+vRU8frRe48vfzEoGEdS1Sb243VGoVXsG27GLAFpxdyqKzRlxumuEEYzcwfREmJYkq2KWM0SABFlLck9BQb3JsUFTllFGBzEKZBJidSaSgIALQQemBDIzRgDugkgMSp4QAxt6/sdxs6KuWogRreE0HCYtcaUAIwNYpN2+yGGqDVP4yxmWjU4zfsp1wOip+5kHEdT2R/Kbr+l0BQZzcrT4+Ph7et5N4dNvz09rnXeNA2bg8jhbqgO8zCalsyK7ofb4ex0Gy32jNbhWG7/09//7T//2R/9w3/0h//4v/riePVEJavl/VxialZpczvd1qAe/PTRIxkKE1/cHro27W4m4NQwI5q6rY/XqhKKJ2pL9d00AIDWuW0jU3j5emdx/eEn33XSXOTT7dmAL98/2V4Pl+vNs5cXlyEEsR6bI2Nv+nZ3t9usWgmRUwihrQpMrhXGQ20AxmF39mCLzNMoLi7V6qRuVufZ5iIKnnDdrUvRJ2dnh8N82M0AXWr6jqlOc5kmWq0suCuCiWlGjIrFoEZuEmAF9ar1gF1IZdQo6yBx0kN3Qk+Om9eXLzbt0zCt1tttt1pdvroo8z2rSGutc5GizMHBwBzR76s9VQEZ3Jc7jJsBemBWEXdFQjPLOofYIGOtNa3bDAhoFabqerw+0gjTcOibrm2DVh8PB8Oyvx0wGcFhfbwSn8fDsKYQQpSs69g6gnicxunNq5dx2z/etk8+/Ahf4M3VOQISGQQKDCXX1PaS6zAeOk/9qq3DJPMciBwcA6o6kAODunLgWhxFIAYm0lpDilqVAyK6m6gKM4GBm5uJu5lBCNHMQB0BTQvGFTiE5aw1R8dcc0QgRmIMIYHLErmo2WNItbq715oRGkBCCo6E71yBVQtAZkJCVIfU9dhED358un1+eX5ysh2mPSIAhmrOjEICLBhUREPTqk0OGpu2SA24EsnEgSMvbWiBKVeNHBmmogMGlqqmSEGcjIGY2+qBA1idTTP4bNaDM8dWXQKRyoHIHElsATMGUUmB1L2KwjyjB3RRQQCgsLR1gJrWcmDA+2Jvg3meOW0M0UgMKhGgQ6lDpKRaiShwU6QSOHgJHKayB7SUknl0z4lcVExlAWZrKS61Zc9ZzNERi5qKVbUqgGzrVfpPf/d7r66zqR4ubnfDhCEGgBiiBxjVV6t+P+y8GihCQI73F6NSixtUlW0ITUxM0YwDLF22gkCETgwReZymYq7mHNBduY4MyTk4BhUoc3bRjx8/4EQH3U+8OjNobm5vf/ZLf3Ic0yo8OsPYnnh/vDmZ5uqcQ6DNmq+/efHoUQqB1qvt7mbSi4Fgvnt9952PPt3d7addbYK+/Orzo0ePbm8PXQxNSiYglahoJByJZJoIKDXtNOXq1m03WvIhvQ2AeZjsoLtx1zYPbF3XK9Biq6fH2nW8Wa8ePkJArTWSpdTUYimQiCE6OdGhPH38dPvog7uvfhYcShEGcebZNTWtP2Rqz+JpP4C9rSOHLla/eH13pAXnTBQ5JNW7MIjsB0703vtHOd9ef32ea1eqr2N78uBoPByilSYl1CrjEPuON81Y5rQ91tWKofc2xMnm3d5Wm7uYUkeCgzSh6buio/aM7cahidGJUV1qzdNQyzghbUs9IMFVnrTSfJfjaQ+I1+f7DUKE47JzDZNl2xw/yIdLmbPB8PPXf9rGoU43q9V2l2cyL6U6GEldXiaJ0ywyl4ozwCx5Kk2jzpiYxMQZnXEGT0boziopsAOQCNQlymYQQlZFd8LipqldAQVwB8IQY5MikQEahFBEKrm4m9YgGh0QrejYNsAxkmlKjQEuSBsHliyQR3YI0CE3IYEHYQLHjFpjjIDusiDPIUUOoYF3m2QiQsclNuPoYHBfsQyIzMVcHQ61TLWIqpVKSxk0oS2pmIUcjbjkc5wJmAgAl7cooZm7Gd7735cNuSIavMMlLOkbRCaMQPTt3ALgMYQYooOrOZrFFKBkdGcKZBqIXJY9PTkA0FIYRAi0tMOqG1BAcCRnAANW81JNICBWcyeilmKDsdpkjmnZgIboCMpe5cDUuGsfeQJD10iAJbMWQObYECNyIqmGom4NYghhQuAUMjOIqVmDZGJSLRI4QiAGEzeLzG6yoHOBkClUNzNLIdJC5AEgCgiEKj0zAjjE6lbV7r81AEJsmqZK9Vp7pzVMKzG4fXVVXu8Pb8snv7J98uGMYwjrVrCd37LsiQAAEzOoI6IsMy26C1gTrO8ldoDsKrlWRGhx4fGUrKbVj9tealO9OpjTWmh1p/sUaIC7SS+BKloFl0A9O0bnjWuUHboTd9XBmBVUxSHyofGLjvYcQRXQCM3JD5EG9Gi1iyB47z+L3IoqMRA3gD24oDprSZEMLUgVJvUVOrCZLtEsc0AkIl2Kw1QBgJiBKMQuxES8DMDvpCKEhRf9rX4KjoEiAJoKBSJaKoMRAXXBCPm7df19rb3fe0mQEBDc1BUAkHi5byPQPW4I3BaRAQgRzQRAARGBgNBMfanuMydYOiKc/F0JiTn4vSy19KD7fRgOTYXRq5cff/HNbgyxacXAwKqqubfUjPM+pY4jqahJjU0Tm/jDP/98xf0HZw9zHkeZGGmb2pMOe5TbaZ4dGsATnR83cLJud4epCxzcqmro6W4YT7tuxgjz+Hi7+vr2kPrVWHOkCFQJuHcs3GCRNkVmnF3m6mB4yCW2cVaLIU1zTZxcpGnjusFa8sSeS+aCwbFfha5KgJDcnw+36cFjDyZSJHQtGdV5dEAIGRiIA2IACkSo3FEHNvYhEPlNxlLshNPvnHHC+eeT7yuj1s8ebiKZis0KP/ny1cfvHT06XQOag//wz/56tdk8++BpmWaO6ODM0UwR7inT9+4hMDddfF/EYfHpUCB0JOZvX48LG0utwoIFW56xxRzEbAbuqpIRiQgXqr2bwD3tHwGBqFmq9wwVkZEY3dzuW42IAqDCwttyczCzZVpHs3sUOlFQEQpMxKZ673pbKrDoP5aKFlR8jKGaIQJT0NBgv/FsJQ9iwilGdGdMbUeBAQ0BgHF5NMO9cOqIHiCKh8oaQovapRiH4TaqILO6o2Mt4mYIyKk9XL85Xj9KbBliCGtqjpg6qx4kSJ6a1OSyrzSMwzc1qsZ0/PTx1eH17bx/+uA3B9cTjggwT9cffe+z25/+6Gx1dtmu1987/vG/+5Pxr/6Nd7a/Hbtnn9jJ+19ccfPZ75995yP4v/+TcvH5fHV39OCkOelLLgDcrVbDboxgZdx3a53G8+urXT74R48e2n537eer/87f+uhv/u23w/DZqil3/OrlN2dtUK6v/+ov3/7oX7+3/ZvHzx6Zp8ufvThZNV/+7C9vBv/1v/0HOyyj2auL4Q9+/794++abw+2bFPNUDpzo+Gidq8xmsYuuWqbh8ZNHMTYXlzfuhDEpgklFDofDDEbzODJ6k2JEvnx5cXb2YJpnMz+MUwypD2uqbmq1iON9veK7U/VbOxF8qx6Be6BAnEJ7gpbmvbCrVAEmxlikktVSx6O4iYSmHmK4Or92JSLMZYqEiW3MlQI3bR+wMCISLrQc5Oi0ELkAQ+oAkH24uWvXp7WD/d2Ymub89Zv18fHNdL0+O/qD//7vf/PTb17+4uu2acbh9vTh6sPvvL8/3Oz3w+PTTkW7bhub3hGdw+rkmJtXZSp5LlMZ5iyxW+2G3aZrYrs5FANqAnvbdatudX11jc5u2K/W4+7F1d31zX7vNcM0yHyoOgJ7Ged59y6A5tD3vYtiSNWRIYKQu6Um7cbzhho0GufJTDnG/bxHJscQqaPYzkptE6qNbdNJzuL+6bP3X708nB+mry/u/vDv/udffvWvr69uQgwpEogPRdrU727rTiltGS5mn5CKhuA+Vevi7c2k2syju1NWaxN5Fs1OzgxcC4xarMM+8bLJV9UICABMFBf7gzITqSss2zxYmjIhIjmFgNylRg2IcKnRRPKw1HIsjwyFFBKBtiGag5bC5AyKgM7BAkvNRFRBq8rq5PhqGGQqQSSB7292IVCZJbYNJ3TFohK4xeL9iud5B+TtZnU5TiQGSH/4d//+H/+rfzGMfj3sf+u3P8t5nKZ6fHZSp9lNL3e3q/UGXbpN+LXf+uyv/92Pj4/7muvzr97GBh+fpV/7zU/+7R/928fH/Gf/zS/yrlu3mF2wjY/Xj9+8frubymef/uDlxUtIGFJ3OOxjVTBp221uC6egRZsm5ZLb2MQYLNYqs6lmzU48TMOq7w/D5VxuV+06l0NIcLo6unx59dHZ6d3tdbdOdze7eS9PHvdHD7rz/U11TwDr1JbbQxv46vqmb5oxF7CQuiZEHnLOFEfzPuDqaPXy67cUQup7HKaOu5yzF5myw2GKwYdxOJRphauSKWzjepMuL6+CWT4MOo2olQOJzpGjeYgUCNlAgKlWBccuBXIAkLTqLw67Rx+dfX148xsf/ObVFxapV4FpnJwJ6f4m5EgUlk5Wo8CwuKmXM4p4KdoAYjfVe8+zAzMAGbg7iNTYJBGfswavtZbYpc3J5qpeDHVHEYmDmBvGEPsYqoC1m17Fcp6MYXW0yiZjrTHGboVuOM9zLVOz4pLnN2+et+EDxM3TZ5+QwfXNuZuqAjKbC7o50dnDB7v9bUBcrbYpplqFIs/TRIFim8YZTcTMXR0ATUQcEKFqMVc3V1V1dQfAgI4qllIwWhaATFhNhAA4NLZwHQJpBcBgIE3qLE9uLmoYSKqZufuSz5ra2EduEZIBL/wHlYJECBFjjhSAwM3mMsfUApm6iOnh7k3ThVpmU+DQHqZDpFTKSOhusxZjj4B5LpmJHAQBJNcqhdkpEUMExCrFxYi95kLMTRer4pInZAymqlJUlUNIsdFaArcNN3e7qxSjuau7mCNiiBGAVJVDCkvGzyUwmmt17GJjkkVzAAJ1dwJ3jH3Xbg/DVQiCQG2zEmS9b+kCtULghATmhIGYFtAzMaNDkbLpj0qVJiZxq1XJjJkCRDVdSCBuuNy2q/hQyqrr3ZGBwC3FNhv/5JdvHz168OkPPvnX/+JPEvJunh9stynGYT91Tbh4cxECnx2fXJSbMub+qF8+BU0TldCyh+ixJa0wFY9dF0thVxIDolGkCpTic57QOIXABI7F1WrAvm3BSQQYcZxK+8HRbdayjrltV5tNfvWWity8fvH2819i33IKx/npdLU/3mzLq4Je5fnzsZb65tCcdOuHR7cXd7HDWvdahwTQBV+3GE/ee/Dwk6F1DmF1ery72SVMeX8XQ+PqMud+1WutagrR++Pt3W6qFLab3ub88u3r7cN1yDq8/70tR68aNkmphkgReLjZT6U4FBPVqWSEGCkEtGGwV+e3l7c3t4eosv3g49OPPxumc49xP0xKRBwU9XA3HKQkQDOX2YJ0ddLIHWHk0uhht225wKxzPeQDcaXoY96lR+9Vh2HQdHw8vN5P52+7gECY5zHpmZtn4YFa7/DB42MahnYFl6TBrWmbJqwj0bA/mFWISaHUctA2KdVhOKhzzapi9WBmJRtMd1drAUJsqmzawAGxTkUPLnd09dXdfrd/++/uxlfX052sy+phc/rxJ6Ec37wafRHezEMKVgEcHdlEm4jsPB1KUXcOUy1NiE0TiwiozQ5GoYKTIxqygEcOqUMzqA6ecYmXhOAYxFzFAxiRhaahLggikxooRWZBUY1IlQgZMKBWBaZKoFq7xApm7kI8iEwiYA6SGyCHAKrIqTUQqQ0yLfAKEzBSg8ApEkguqU/3UhEgIrm53KsPFIBsWSmaOdOoyqWUmmuZXAVVwQgW6uo9lAPx3QobgYFxYdiDg7qaLbO7EiES+uLqw3/vXmViJLqH9SzHk90HOTiE1HTdajsf9g5FbW7JQZQwqAkYkpGLYEAkVEXkFsDdlk4fd4zuBIBgSkQUsHKtQcAxEVdVAkYkhUwB4hILUUcKsxYgSU2TMOSCy1xzkEIh1ixYpGcEdQISIkeaa1ZTc5ymXQCqJROgqgGQugfiQJxCnEHxvpPLEdGASjViWjwmSGE5mdHBoKIDYqBFylNTVYNaAI3YHUQVEI2CgCkaoTcUcagPgx81fJunF8OXt4e7t1+/bD/9vXbzOO6f69tvOnRmqOZmGgB0GX0JDcDcPAY7Ws0BXcxzZskNubhyhIHnSmUIFpvHfffZ3c0vxQcl8MAT3DpcZ88N1ChMQiH0aAxODBo9RzQCFCvOPYaoMiGQxXDT+HUPGR1dG4AkKs4Txzumjj2SOTK4mSqip0hCIl4IBUFFR3BFPCCiWQncsewdGoSIyACN+730464ATkSOziEwNxgCxUSEhBzfBdDeuYsckYhYtBDSojIRsy6s4nuVxxZfP7gh3RfHAYDpIlsigKsLLwoRAAC5iZkykoO7yX2nFTgiOixGE0R3tXsrNCAjLU1YuGQVfYlcugDc3+EQyezex+MALtlBpmH6+vXr81vwtBZ3Bp/nHEJcoqeIbpBrzl69ie1m8+BP/+KPj1f9b376yeH6+vz2kkM86poHq8bGOw04GShC67X3QqHuzbNrFzs0x1qmHFXoIF5xXkXogpnlbMmZigkSWMkhRkfPWQJzdumYY8Cq3geu1dAxS00cEDVFMz3MGSJB1cqRY4jgMFgVq141MDzbdKXuybk3YSmlirlahWAEQGORkMLkmZ1CgOzFolcs2xDSwvVhh1p+bYt9G350XQ3w7X769PFm2B/y5NatXl4d8lA//Chut40K/tm//TOE33n64dMyz0TuLgBAhO+SiI5MrnavTuPSSEi8iOb3E/ki7yGHIKpI7GbgRkTwrh9N1RyRQ3BdRnd2NDNl5iWUy8T6bXKNGFzcFQCRUG15hOAdPL0yBbWFOgyLZ23hzaEhIHFgQBCRe78SACG62rc8628zyWiqRBjh3kEHxNU8cHQIQG4Q3MA4cuia1AdEpmUB664GhGAKhCKFwEyR4gZS67lywoQdEeRcoE5oCmYKWIpF4u7oFNtI23VztL7+8ptudXqxn9wwANbD5bg/reNd96gz2B8/Pa1ftXfnsH3/V+v6ydGv/uF39k3ZbmqEdNQI2vnLrx4+/fSvn+/7FOzlq8POvvNbHx32N+unHx1/8vFhv9tgk2s+efLk7YuftRzLYTzcTY+fnNy9PMcA1MSu3/z0hz/64Ncec2rOd7Nj2hB99+z0J8P0+rCX9z989tl3y2Guz7958e/+YvX7v3P2t36vOT3+lH/z6qsvC++lO/GzPp609e2eu3AxXv715Zd/9+//zw4vX3g2jf7g2adIoJbRk04kTt4wBJ6GejjMPzh5Mo8viCLEKLkW9a7pz9++IupCiEM9xERzLZe761lMDSkwMgzz4fToaLNuTcGBYgy1mi9K9nKu3tcY/QeBNAdAdPSc8zDP02TtqkO01BC4EzGDoXiMnYogwrAfYDFUGnXNqpTKTZrysJgMaq0cXKQGpnvMrd7HLIlZTWPgueR+tVqv++nmICanq37armPjj55sX355mSh06/bXfu837i7vHNvv/NavPHj6IDsFjDFGyYVit6QvAU19Ch0A6tnRNhIddEcr3aT+6cfPXvzyl/Ji7jdHhpG4i23jZqlrKXmzjjdX1zus03xwUZAJ0CFuuV2lmPo1APwzAGibVKWaK0OQgnPJTZdmnZxX282pV4FiiXtpCamVaQ9QROeKypCmYqvmZDYotQJ5szke5jLWu80mhVs+PdoOj7ZZR5fKAazVq4ubo+4hY5h3NUnbH62v604VDflugEnh+gYeHq0YpWagkGTKU5aYmpLJ3YFRA7an29ZDOVSiqFPhSEwkIkikbo4gaGLA7oBeVQI3CshM5hIDz5KZI4RkKBwD+33m2QGEXF2asBYZsdb1qql1bAMGFFAh8FXbvN0PHXEVjSlaTNIBYhsYsu6bbq2uTU/FkJlKyUisToR4fXVZcwYFC41u6fQBPdic/dP/5p8Nd9ODR08//o33K5QAHt0vXrxpm25/t2u6diw5BN8eh75lNqmuq7Y96rvr/TAc/Oc/evveg2fHj04P522ZJ+qEAxvq9f7csdZit1cX5AXE81AT6HvvfzBO84sXzymRFi370nNbx2HCkbuuiJrCtvtoP14g182mG+72BsXAHh49ePv6ctW3atpvj/IsU54//c1f+8XPv2CKYvPbw/nHn3yk8wgqpeSz97Yvvtnd7TXPhRKePHh4/uYycElNsIrTzmcSiApNb+i78Xq/2z99cuxgzapxIEXY9Mld+023WZ3t95cn3BRVRW66/vLNnTBJdVIhbOFdqp3RVCsBeeTQRrfaJXrvwQeX8+XvfveTi3H88P3fvf58XDen426KqZ9yrYZZvnUVlTLvA9OUZYnlI5KKEdGCe3x32DkRMaGbLU21uNThmFs1JyRiVeu7JjChe+SACl3f61SlzhSa9dGpSYUC6zXv90OZS0qSYjw7O3379sKLpphMjRm7hpoV01iGw9WrLyF953tQ6ezR++12/c3XX0gu2+1Rsz4e50OpszcUQzfPQxO7Jmzn8RoJm9TkMmku7EAILroQAQiIgBB9QQu6ytJSAchuoCJMQQ1U1R0NEMxjYKvqBmqVyMDIHCIGBo0xDuOeMBKS1EruaKjFpAJR596gKjAgodTKyMtVb5KZOLrUSLFQkWKNEyrPB90ePRh2d02X5sMcNGKGgIG4DRSKFrMYopdamCg17f4wJI1GzEQRk7uKqpjh7DHFwFCqbLonu/11CuyqhIwYRDMSSK0ppLEUCK1lO15v9vWOWyR3soVn3YrOYOiAvnA9wUWqiiIqIGd35vvaIwBQVQ6tVan1gAEokQOamlilAKCKABxiNXHzQBswL2VkABEhx0XIE7O5VHDXeXb2yAGYqkgAcl8CZ4FjGg8jUECUwEGXLRlCNW+a2MR4dbUfD+XN2yuwUEom81pL04SuT/NhYnBiGsuMTGmV6N18YApZK4FKEa+pa2Lbso4QEkckKLWaTwAKWJ2YOsYAHsxcXa1lTMwNDUNxSOTNF798/bvf/ZSbyEmhgxfPLzVh//DB2+trb5kb07y/Pbfx6mDD/urLKax6nPj1j95QjttHp/OYy7Df316s15vp4H3IdY/bj5+sj9bNDFyrDftyc90ZOGofyasUA6SipcqcMQbumsP+NVmMEHdfPy+7fUypmQyKdiRWhuBpuNqpWBfaMtZ62NfdLjZYMI7jXWzbsF5fjbfitQ1AfcPIPt7lcju+/DmxWFrngi2zHnaBbdX3vRCpRof5duCiEXDaXzqTKorpneU6Tc6wG0fvbCabQ7Pp+nWCOKlFokcnADDtb8ZcdPbT0PC2mbuuHOE0FXQbpGgXkABjAHepSjExJycas6ZtA6uYi4JjIELiEBtX71brw3ST59q2rd5cB7rz4fnPP/+vQS2bXBwuodEKUyUEcEzGPT94eNw+7Q8JUurCSUwHqsMdBjQCStimLjaJoSJRkTzNUgUQoYkJAg42hxidQVVTYkJUg745UkJu2lkdEqVVA7M7szC4O6sxEZaKAKGRGBlXnaaoYmDipgzamrl7cAopFMkcAwaWwEZQQyQEl1LMFFShOFhgDYS1lkip5oKlRq2xW3loqiOJRmoiEgV3cGoWdspyGCxVyyCucenNAUXkxRMhqk7A4K4C4LWIVcWYkJEQfEEZYUDwxRsC94ReICRAF1Ww+8InMHTXxXmxpHgQCJgdmHHhKy+iFTgI3GcfIHLsmtVuykgxBg9al38xATtEEVGvRJWbljCqKiCoCaHZMqyZgCsRZpPBaUSNiWuFBQqERlKrkifkakIIrugAGKIQGQEgUmUyBERA7iiyJLeGgaWKWSuuAB5TKwDqhgSBbRYxtxhCDBiI0rvRcOFBBU4CS6G6Bw4IEAgQgIiqCYgAAgERB0QCqaay4PzBXKtBBGJiAIfFZAnkZg5G1kZmc560b3mDYZjGq8MX5xfPbzBaHj/rGUQFDImCE7oBOBG7KSGoeSWWpjVCQyi1Vskc3UQk6hxs4PmOpcLlpjmyzek0ImlhmNlnA02AvbVcAwGjEeBsNFeoKaRU2UTcSwRAjODoygPwTcCJyIGjYnRIEhwssw/Ri2FWQ4UI6kSE7jYSooI6I4KrFyLMLoTMGEAJXBEruikGjIZqCyXY1BDgvuIPGIkCEZLRskP69xOTLZEfU3Gme+Sw0zvgi6IvnjhEcF/8QfcuIr/HG913WtFSmnY/nzmY3zOhEBGcFqSXqd73D93v9t2tAihBIHKRsuDXzdxNEcndEJwBHNDAwV1UEQiR1KuBgs1V85v/L1V/1qtJlqXpYWvYe9vwTWf0IeaInCIrs0Y2ya7qLrLABpsULwQJ3bcSIP4FQTe60C/gvQBBhEhCEEBdNKWmQFECKLFZbHZVF1lDV1bOkTH6dOZvMrO99xp0YceT6juHwxHh/h37zGyv9b7Pc3v/4sUh9KcCaFYIoAvRPd3s75ddu1pt9vu9SlUjc/qbX/7kyWp50S+nYXc/3EFgQFp1Eb2mhpHizcPDefRPWus8MtgyplprWIR6HPsQgLhd9EPVwhTU7qcyRyNrzUicBQYKayZz4T4hQ6kCSNuqxKgE6DEhZtdMVUVag7PNSbtaVOIM8ZB1yBWJG4LU4oK81KqaYfbMSTYoXd8YcgQo48BxgW5Ejvz4zhdoti1SYEzBGNk5DKBJ4aPGcW2vCt9OYNf7j046LHX7sD87Pzmovb66J1iv1gsV+cs//8uU+PLp05JnbRkR8qNlDAjMiYLP7CoEFbE4B4LcTZgYKc68KTd7C5AO4PCYMqNHGNV8qc25D7M55RPm9CU46uPQaQ5mzjhtMFUOAR7/PuimiAiP16ozhZmDZvPv+6Mg8vF/g0CIaghuTkjM/jZb9zgqyqVyG6cpz0CjLNXUYtObOLZMQUyyIQhwoDCfBcQM0ZkYZigmMBgQB7PqwcwOaMAoEHDVrEJoajW3Yb+/RncUJLRF5OptrZpR+TQtSzfUu3RyMlwdG9G06ItmcNBxgGFysdWz76zf/4GcnT79znfuJS+/9yktFvnmXm9e+zaGsxg6fPHjP33v8hKbVa68PP9O5yf59ZSiq2ro42Ldnr3z7sOyX55vTj/8bhYoUrdvXh73rz7+W397HOT662/OTmTs5fz8fQvd9mYkoVOjJ+tn/Q8uQh9bb3h1psQXn/7OrXXQptV++zd//JcbTr/79/+1v/rH093h5SdP2y4svrn9JhfdLNoM/Rcvv/z+7/xWva/TYQ/T9N7FBZRyf3//5IN3hywB4Ox8nYejlooAD/fblEjKUBEbbrJq37fM1C1bn8rt7dYci+ac83AYVLBbrO62x9DEhdiyb29uD495SCL/9RX0FnCNNMvQ3B3qVB5ut/2HH/F8+ha3ahgQGdWdmYtUBaWGhukormLuqi4GDVFEDImdVdQDm+HskTQpSAEQgcDMzZyIrIhlm8rt9vpV17X73YTOXd+7pdP1ed816++c7A9Df3ZyOO5PztfjNIzHw/rkQqxSQ7WW7e396eJ0fDjev7wCl9PzxWHiMY/Hw4hIkfjuzZsYYte3FHB/nJx1GHbqElPKUwUK5KFtglOwjE3PxFGpMw6MXd887tA4BDF3cVNgwNBEAGpClzjUXMARlb7/0e/++Zd/HUNG15QCWwKFPrVtuswUsnPOtenDkMcvX3417bI4XXbpn/yT/zw2Ssxd393evVg/O5eSG27CEuG+Si6f/+Kqo9QvUzYwAFykdXe630sUC8E3q8W0E8KQYoMRSxkcoVkmAxL32W0YQkBydTMCcTBwQC8qIUV1cwcGZAJHrmY4156tukKMVB2YIng2NQLjNkazGrTIwIz7PGJKrljFmhZDYMcsauQIak0ba9HbXM5/8P2f/tXnK6rds9Nc7Hxz8s1XLw5j7sGRIEVMqb2+v1ssl22b8jGjZjvUbHy8GcuuPn3+lLrEyS3nm69eAcJyvS551OIgEnH8+3/w5H/2D/7W//k//2/aE65Djc3y69d33DWtk0xG0GxflW9ut4vV08M4uPlQxm6xTIkTd3kcDGx9seS2OR6HN9evxuNQ89hy71IvTjrkcbNouybm4k3qnbDm6lq0TuNRvQiyv/vs6dmymQ755HQxTHl3t18tVyWEN7dXgm6IyGG92ljNRARAHOhht7u5uTalsOn3dbjPeySbxtyv1k1MJ8uLq4fbqVZSMVXJEjgcDltTZQoiNag93WyOdeIQ3YuVsY5HnYQIdvud6RQarOYVoaHoamhF0YkCGi3Sejtt1VAd0OzF/QuE8PUv3wRIL7/4SgYP7yzCIi3Wy/3rV/v77ZPLxzxFqeqADsA8Y+VnN+fjvcPcEUBEkN42phGJSUWJOXKA6imloZbYJDMjRnLf3m3bbgkeXKHOHFBOThwXSwHRKl3TySCafZJjv9lcrs92xx2jR6YY+3HKpqDWxGBTHl+8uv7k/fenaVz06w/efe/lyxfjcEjtpmuWbmjmMbWJSas0TdN163HMBmaOUk1dMATiYDpRYnOTWoAgIZQyAQIThRhUiZEhIBKJFCdwU/d5RPaWsAFgbuCU4kLmNpeMVUrD/byAZwpaKyNXzRy7qsV0NLOQKCAHImEWLwwKSOCmUhEohhBD03SrY5lMQbKj5FXfPdweuq4xV7CqWhmDVQGXRd+JqNTaxEjIarmJTdHJzVJYqBk5BerEfdG0tZTQJPPWXAPFnA/i2jadghKERI/8qamOWQaCUOfsGARAR8hEaOYKFoi1lhhijTGPIzOXisGA3JhpmsaUGnQSUWe1OhFFAFeREBMyFClzm94NQNFU0AU4untMrdYyr9zapq+1qthquTrmPSGaWghRa5VajHnmMYbAzMlh6ImqKoGbW9exme6Ow8WqAwbNcna62h895JpzBThKEQBo2ialmMdcx9J1UYa3A1N1UwuBtcr2fts9a9VriFwKC8iibVgREln1nTwgE+msyvO+75q+WSxT25HHdNhmqdJ3q5ub8b130Klolc3zy+up3OTcXF5eXV8t+5ZXUZcdFBmnoWV+uN51JyeLy8VXn93Wfcwi2+1Ydfjot799yA8bWOhDfrg+hHHMdedFV8uEUsfdA8fYbE4n3YbUaZeqoM5IYkwp4eF6J9PQCEBxzQVIyvEguZBDHo+YUlCX8VC2RzRN3DRpMU2y7lrJx3JXFqtNTZ4WbW73WIXC0zEPiB2peqFWY9o/NMeDDbvgpEPWnItWm7SWfESAZBZ4yjKoHCNQ30CGwzjENiyfbPKQD998fcrGRvnhmtqNn6+yT9g5pG6Xsrcx234ggWW49RHWkUPTJQwtB3MJfBiLZjHJAtAO+2nai+Hx5sV+OFSQw3Q87rcFdSqTYjAiyxNghSQYqBR1Im9pnKergRZLzlKXBOdhXLTt+5fd9R1h12MYKXGI7aLlhAQiZpIQATwgMZKQM7m7AhqxfOgAAQAASURBVFJoIoWQkEDNqgaEiA1h5MBOrOgYKDaJS1PUJwAVSUCoGgFioG61wL6F2HoTjbWOSgaggApuNTCiAzFKzc7JyYScKTYcWaVRDxCDAyAqSkB2BgOXUj0XhKrFISyUIVKruZAJGGT3GNOvYbrsjgqGEIkiMICHuTyAZO6BKCKmx9KFuatKIYsgRBzmjTcigYmazKfhGeiqpmqibuaPVh+bCUSuMBM5ZiAMMcxlIXeYyzj+KOXBGcdP3HWLcBzAJ9PKwICi4IQROSFVcK+1EnlkNzfGSMj+iHYRN0F3R1RmDajuoMbIVUrwlNQ776hgaLDOPsx5QsWYTecfAlI9HEdbrFLgnAfHIjBQ7B5/LgYxNWrmpORMCIgBgNWdEMEsPH4mWmvFGFSq1+oADh6IIpNWc3BCUM0EEAKD06N1y1SqgAkylFoQsU1thioOGElqyaWEpkVyQ6y1BqA2tmxS89ghrdv0hONHlmuK3HiTRwSo4Eg2Dy9mOTvNq2UiS6m4mxZX91yiJRRvA4kMWTD7UEpFBHVLujZdqj4gA5EREztFSdHYQBzMCQywCWssGFiDV9KJbEiKXkPwVAj20SurFW8cgjAJMBUnHcNwFF84mhZHR2odzHRCIAaniOZI3KjaI+4Kg0N0sIBA4O4qLqpWayWfqfTuagCztq9YNiRXUVAX0bfDIlSdadOsKoCuboQM6GKKsyx1rn2BI5KbIfJs/pn1Z4TsMDfdnJDc1c2AyM1oZoiCzhmix1Td3GFDdAc1BXzsmoETIqDjo+94zpaYOgAAmxkCzd8pdXcTMyGUUoYX31y9vqsee2AGK2TWtH3OXqQey7jYLIZa1YiYupR+9dULKPnbH783HIe73YMaREqni+6kYT1sOdKhlCXatyN+J4Xr/RRCGMYSAa3WZcTRTGqmFEFQFSPHYxaGYGNJoABIEBRdwcGNkU2xCQnEB0GGaEDMEEDHYeginz79IK2f/fjF9qef56vtIIBqNYZo7l1wZG+hPl+np8vunVWTZIhIrpIBMgiHEPqkDAxkYwUj5khQiHwGSDuAiTJxMUspIUMo4/udryL8XD1nvzvkj8762/14f7fHk4U5Rh7arg2RXPTP/+TP/vbv//7J5XmZRgpzvAfUlGMCm516DqYONqekEclMmIKDoRs+konmSSG6AyGpCSE6IBCiOTjMtjXmiAhmOJvxAAjokXQ102QAnDjM5/uZfWk2G/GMORA+ZkRmqfHcfDRVhdn4Vuf0J8ywCURkdgNTfVuJezsqEqk238jNTCsBQhUAQWcE6FJbwbFVBWDZ8xTccgpRwQCD82z7wzmr4iJEAGaqeZp2CVLqluDAyNx0Zht31ZqLGWFums5lwWOvo/bdom/pdr/lZrM77JiaZt0WLflgeWe72/LOd78Hz9/HvquoZpDWGwpx2o0Xp6chpP1+4N20PH03R7qPbpE61l/91//lof9uORz7Vb8/iDQ5cqwW0+L0N/+tv/f11V2Mbb6///mf/vFv/p1/4wGauFy4aH35orEpj/LJd/7WsE2bVcAasaHuZM1aR9Hu/BKWJ5cnF68/u9qsv1VrBIr//X/7zz/4vd97f/FbP/rP/m/vffpcpt2z588GywjGsZvGbPXKcf3w8sVFu34Ytx++/ymtWn697VKDUJ4/Pf/i869UVMQohqZtUqIpaiA4DvfiyoEZmWNE2AemJjXXh4nc94dtoiBVDvshH/OsnqC5of14o/v1r972KAHAwcnb1VIZ8zRxoDxOKbAhSZXVsr2+uj9bbUANHSmQuzOHGBjAVC01nYmgeUiJKDM3gDyTseCRwUXuxpEtaznW2+ttTGEac9emoxUVeffjD9RsubBat6YRhWdt6vF+f7LptXjX9jfbV+88/XAcyzgOT5sI+0HHqUyyf7DdWFdP1qvNZnAINlmeVv2GGpJKVvB0fbLb381W7ND2VnmZToQy9BtsUuctYKNMyG4egj+yio5HQTem2DbdMJaubavroltLGVQ1cFLHf/H5z9/7zndffvNjrIjMi/VKRicPu7yrIXZ9f//qnry5OFsd73Zdu3x4fVxvgghjSjjKYRDyTRPfS93ttC+pA817JmYL55eLF7u9VTOfvvs7Fz99fcuITRucvNokIDEGhGouRtqenKXFBDl37TrjUQHNzc1TjARm6k1sxNHB5+9iwIgMqsocJrWAzByJAiG5SHQEywyeOKor+BzVNaZgBmJg0pZc22YFLuISEgEKoKWYjlOB0JS9xP3kNRcuMTKq7e9vVl2L3LCMx3EoQhWmrk2udjyOHPzsfHPYl2k35aGsT0+ffPDu7e21Dcd9yVKn0PRd2sjxen1W/+E/+Dvf/OUvv3va/KP/+B+dvPvk+Tvjv/7D915/tT+9/PY//cXrb33nA7jdjlvu4/Ov7j7Lum8YoYmrdoUYDMppf/awvXGgm9cHkH3T8LA/qFta9lJh0fDT56uH7SgM2bMFPpQpqotlQANl0nDIx9A1fQgRSjFdNqurNw/o3fnqpCV/Z3Oxvzum6PvrO0pn/Xl3OB72+yyiefQ2JdlP++2hWzeWs7sQ4939EJrYdiq1Ni1SoN3tjsmRcHG6kOi12vm6A8kPhwOkuIpptxswJYhUhWPb1e3BuZgErwVcUhcIQZzVMWFLyQ55ByQhpBADgEQOZEsTUUwaePH+mTcLdX11+yoQtU3cH47zt2Dmh+IcgzYDc0B3UwfDEJjI1IgQEdXsbZTJA7PZfLPwIsVFrFqz6KTW4TiVg/RP1jEFcCWGdrH02GoW5MjUIRuAnJw30ziO+wHosD49R4Sb+6suduAYAtRSWiJfhTBB3l199Qq/9d1Ptg/X683p+zF8+asvDtub50/fobTebW9BkDEY1N10vzl9GlO5v3/JhKnpRMo0HNXU59czIpESQnIhNEYmQjQxN/VAakpACEjIFNBmxRvCrKJBZCYayxiQAoXj8BBYA3EtI0EyrwDuUMSdU6iS46otTgAqNjEFh+wgKjlyAE+GJmXC4Ig+STEYvQ9oCAboMOZpLoxxjPOEJTIzs4O5BckWOaiKkbsoJ2HG6iJaECiEWEquqolL0SNSIG7Hw9CszglTBHcFQhV/4NC4JaRYijEsUvLjuCdgkwEdSYE4IXjkxp2IyF1UBZARqIlJpEYEQyCMbjzKGIiyjJEb8cqB3MFFRTOTI7ACGFjRkkJPGGpVcwOVOTetdb62SAlGNcBgrgAIEAwcY0T3EGgsGVxdDIFSCnl/ELdS63qzPDtZHEoNKeSq680iD2MAEPXg2Hathhodvry5e/L0nXu/KmrchGX7uDYoU1aFo0uzpOVqUd27vm1FVFMKbQBKAp5HRm9PujnmP5ZAADFh6hgjU+S+aQuwWoiaroYS+7UZ5aG0y0XYLNtnFyEXGHbpbNOuFtvdbvH8fAP49Z/+inGVIrenKTxv6XTpOS4uLwxibm1/8wbu929eyrd/59++evNFG4gDlzzlKQOSe1sGk+loseB60zTLSlPZ38Nx63mK1U42FyXL8W5YP3unTxdlJw2FpsI0HUOp+fYKRg+ibuNxdz2guTpjrseDT0oRqws3oV2s4/ryIZeWoYHoecs51/tDnTIjuoopaZ1FLaoGwKjqVkEZsO/Tk7Pd5ar2/GS1bu5ub/ZbPj9b0dAepek62U8OUcBhfeJdiqwSw9acQghtU6TEfnV/fyWOMo73h4ODQNZplOJYykjkgoTBpimDYQUtIFkLpwhv4ySOuYgoqBM4YlWt5KYFhYuaEqLrIUNlPihsnPj2vvUvMD2/un+ghjeXPbFCqdFcS9FcyGm3H7UIASA4RRLPTdNyAxxIzFOIGmoTKcZYpsoODqgpMEdVpJiiGyJKro0bWQ7s3AksYo0GaOY+85lJHUNkIq1mrqZFyD2wO2EVdkMlCg7qUT2ZtkaA4eBaMROmqR7nUT+4oiM6sCOZe3Urwk4MVMcS3m6SGyZ2MjdyAlXmEADdPDkpAjN3kboQuiaag9Rapyk1CbmZM6bMwXUm7wLOEGlVRwCV+YSDAD7fMGB+b8TZrOmPzx8zV4YZbuBmj3RheORtQ4yxb/tF19l0bAgZSiQyV0dUVwBNhEyIWhgTQp5zH2g6S6fNATCooSEUFcQEgoHIpbQwboyCsSqZ4EgoAOqCsUVCsGqiGZwMukU3aLVAWSkz9IuFA5ObkyVmRFaskkfyFgCk1PkgV1V1niVECjGKICKIKrgzc+JgpmrmbggIsz0OHj8xdBcpRIRoQChmhqRuR6k70dGACRv0AASlQFUIAZWyu3tmREdH0loPhHHJseTMgZ3MnSICEIiKz0UYQlFwB0O00CIldK9u4BoESdomNWM+kvUOY2Ki/WEVj6dt2JrsnIqziDIymYAqgFEyS5pl6h06ix0Rw4xfAVMxE8bWyUpKOQQBFxcwBSdwi4RutSbYmyxq7BUwBqBoDnM4CFTcioMSNEjBKZorEAsgIVVTdHNABVUgM0VXd3ETU1GtoOruKtVBzcFKLTU/np8Q5lI04iyuYjVDBgBkpHnE4+bggPR4JPfHRxURomjFOSWC6DZ32x9BIPPYyMyQiJDNBNEJab5Nmdk8RZj/2+ioqnPqf76AH78sj7OCR57SW725gimZT3W8vtldXddMnSZWVbNKBAXkoAfS+OTkSaCYp6IGEnF/d7eKeHJ2qTK+uHrVdx166GNsQMtxWsSYvdw+7D5kelJqCOwJt9U+OVu8GB7AjdkjItLsIRUv3jTJ3IcqjamDCTxKQJggIk9ibRNF3QRS0ysC+ZizBA/PL9/L7eaffrn75f1nGJuLk/X3PrrYLJcOHpugeRoOu6J2fzjcV/n559dPFl1PcrlcBNKzlsTH/Tg9bduhZiAwh2KhVkdwEUGnhgMhptjkqn3TTHlSQHUmj88aSBv/5UCf7/R+3H3nybpxmw5DmcRUQ89PzpcdA3j40z/553/09/4odZ3KhGSANBtgiNB0tgY/ttDcDUyYafaD4SPGejZRzSE0c3rcQboDwSPAasYRwbynBSZiB5tLbKYCTIj4GNn0RxSxieK89nMmZDBQ9/kt9u3UHsyNiAhZTNGJiMFt9uk93pkdkOjX9cu3rCLTOo4KEAi15oBWxqPVkTloMWYUKRxIZZLxoMO9OXR954jEQJQAwKkxQOfghmbgnBiBKIAjOMYQxaoUDaEXyYzWpmiHfR2mpjndrBdf/vx2fR7KuB93+46fcOyP11c0FavQrRcRrW2CxNi0AQACai1FGdAgH6otl/v9vta7s3eetsvVQbx7etZpI8fDcXsvCxwOY1glQp+2R8RwnOS95+8Rk5up29n7F/g/iAyjKo4J6el78Xx/+K9ebs5WP3+1+/SH3/vLv/rj9yw/PqgCeZc+/cO/pRG2t3fdSXv+wVl8fvHqmxf/3r/3D/7rz74ODHKw49WWpun+5YvE5Wc//8XJ+r2//vMfn62ohzrcXC3iqqHYdGms5Wy5ySUv+rPbu/vt7mBIAOhqZoAhjipN1z1s7wKnYSzoyDEQU4xxt98jYmpSmWQYp83p5vjyVtSQ5qCaI/7aCfP2fgePvfGZZ+SEse0QYNhtsVZuiPs2jxOgTXm0kl1VxdRtmDIiaZ26xaKqOHBRh4IEAGSu1V3eihvVOfrM6FBDSub8sDtgiNW0iDgKhrB5ehGaTodxdzz0bZv3Ou6np+9fUqAy5dQvHrb7acoIVkth4maVpmkaj6O5q+puu5+ETxcX0+6FSu26eDhs81GenJyb+uW7l1mm4/7QdC0xAmmZpsix63tHxIzsrC4R0c1RHekxatoEBgrTNB3LuGz7CLjb3bfkaG7VpmHq24W6fv7l58li30VRrTkcdjuL3rRNPuzcwpLTtM++XPXr8+BhfQK16mGsEJvGYLTpw2+982c//+UGlyerhXhdPrk8juN2mFZA7XKp+xFDfLi+udgwLdOrzx8oNLuhRGIIISxalpoYJhlTrjaOUmPVR295QBI3NQvI4gruxFilNkRqlpjMBVXjvL8TpUBIjG+jtcBggAD4eLMwKFlCqIsmIsfDtO/Cwg1yLcSdGgZK7tB27f1+unh6Ptzdd+DLRfdks/r6ly8BMaCX+x2iqZl6idyCmdYpJgwLqnkcHvap7znFiydP727uD7ths1mcnT67q06xf309PO26Jx+lP/3Rz3noDz8db16Fs93tx5dNvn5JQX/jB84bev3yR6FI9G/9t3/+08vnJ2rShnRy+vTVzWsI5gJvHrZScgjEXaJIg2YBMICcNaCnZnHc193tMUVanDfriye/+NnnJ6E5WfeTwuE4BEonyxNKLaoNx+NU84vXL1bLfjjSV7f799/rbrfXbYgJqF0sPn+zax9wzNN2n5+dnl29vHdtGD0rb9ZrjjahsqfDMY/HwfxK8uSKpSohHI+TmR+H3PfN9v5A6MY4jLJK3bAbfAKZat/1+/1hGIu6ayl9ilAjBrZcIJCbIcfq6lJSIsI45MM69l2MJ8vl7U0dpW5OT5fLzWq5GPeTqq/X62kw3Q/Pnl4+rg1UTc3nHA4CIPqjQgEdDBwdgRHnJ8287nh0KCCKaggBkMxdqnt1ybXWujhdeyBBKLkYQeyamNrb3dVi2TVNPNxrfnhITWz7tuSSs+x2u9T0F+fPdrttaNvWoplxSjkXB0eX/f3LF6/655cXh2GbuH3vgw+++fqr/fEhdMuu70qu6NDwAiyMdYIUVqdnh90DEKamBQIDRQYDU8eQGkCuPu8DIaSmepm7ZkQNIJtOAOaMaooYipkjhtTk8ago7l5zwWiIwDEBD4wEMXg1AK9aYopuU5USZUUuMXZ5GiN3UipCQGcDNygWLBBzxJylC8FUgbhILaZny363ve+7jQPkSVIbYhNLngIysY/TsU1LqdWACCmlNOQjkMbUMbZjHpDFkRgCEwOTuU6ya/o06kgEADqjPVeLk2EaXQuHmKcRAztACExIuVTGxglqGc0FMBiQ1NIE11rNUNwVFdiAgqgSAAKJCRA37SZrbUIgTmKToSCDmHUx5ekQIcQYZpGHoczOxBSCaNH5pdnVCbKWACAiHJOqOCBy0FJijIEdObiaEXIMHCMiKpBU3B/zux8/X61Xm/PN04vFT//885r1WGW5XO6nzI5MkJC/+ObzgKTu6nF/GH+9VQmBDWNFO4i1pToymPSRA2EX0uSlX7SgDjkUA0CglhIj1JK6ZtWvwEQ5rs76QtS26/2YvhnHp+uuX3UcY+x7RTSm0LWbzUk2UlzXhiajdnlx/erII/r++GL78N1vvxOOiUfe7idZE2z6ZXeiL3/1zee/UhTE0DgUtwohtn2tWDIgNAjuD3ejXNdxiDG4oRaVXK5e3ck4gPr9drfHRqcHPz7Y7gHyVKbJjnsvClYRjEzdhZGYMVQFQw4c1GIT8O4u21eJIgBORbxWJiKMaloJqwqFZBzUnAkwpLZfKPHp+elDuWvOlvXZYnfCe8dXD0PsGw+rrw+H+ze37VH07EnPa+nC0XW7P2YUhHysQkh2dVukDrUOqvs8VTUwVXMOQOalSGoCgpaqFgIzQTIEmkwdwakx5ilPbhAjIeok4IFzrupYEbJZYoCpVjOOhAXq5JiCYby9CxG7Gw1962RsbG0gR1cjzXXmU7hUZkxtWDihupC1occIYuZqkwI2MTRdRVWimKKKenXTSlgMAkFMMQUCDU5kTSCKxl1T0GIIQJEtqglSik206egIJo7uZkWRQtsSYDCzminXYsE4RQazysSGXkkqekBgJC+KYG3o2INZrFalKFepOStYdYDA+na52AAFJjAAMEZGgEQEiA4oZoEoAqxiSDEQokjNw0htCjE4R3cnVwcTLQjAHAwR/PF2gw5m843CwR6NUUT8qPee3ZpgYJUeT0HwiFxkfgQHO4BB4rDu+n2ZEeDB0czVkJ0xJCaGSOzGRcYQGCjgLJ/XTOyAbmCTSQ3JiNy9Ck4GS0tNOTRmddpx0w/lCMsTCA0ZOyBRJBSC2sduqqNBjSkB0cM4nPQcFovjQ1nHvpYSCAkEwCInNHIzYFYwNQ2hCWgB3dWUBCGqCgCEkOZZw0yZDJFEzTEYIbi5zBp0cOQq6mrmNlbLAIJ+rNNOXJkD4oKRzRgQFdAc2d2tOIFaQEw4N3wlmROhV0f2+fE906VMH6ErjFirOgVHqiIm4qoNd4GaAMFLceZG2qbebxZNU/ddkjB8vQROzHegNUYkZq8MHhgriqGGyF3RtVMLMaIoiAMBMoAXUSfL7COpPpqeSDUhkkI1BCQ4tnRQW1cGF7fK6OjVkdwFyRHIzAORQQE0M3UigxkSHADJEziaSxUrIsVUTGstWYvMfWqzaq6gbq6/3q8Ts7mZagjRAQnn4zqD2XyuQkRzQ5+Tx48xU3O1OZUFj3/qbU4JEdFMf41vx7mDiASPIwFDYndwf/sQnGepM9JonhKYOagDAyAAI7pDnekx5uY6T9nsxYvXtw/TwakCuGokaJo05RIFe1pmsmoOIsMwHqbp6fOnb948PF+uZJQX250jEdDl+Wrc7xKEJpKC1yzvcvxBJzEXx3bZL3bDWEQSIzL2gXMpDuEw1rlxK6Yhhop60qXDOFQxQDTQQSE5FKN9pcE4AAJIPQ496/OnF+vLb/0Xf337xXi0dvn93/ruRcvs1sUQAxyHehxGLvp8cxLa9DAM1cuTi8vt7pjVfnmouzJ9+/mmA87qneJpA2UcesJgAIEcIXEEF0TN5sWcIs+fbyRWRmhDtLyy/NGiuxd6EPubm+G8gWerxmothb5+uWXkpxcBVdoU/vs//bN/5fd/PzSNaQFUYjabXTDwWOuiYK5zPihEnkeI8wAGieExtj+/YAthmKFU9ngpSCB2R3N01XnG5ib+OFCCx74Q0pwmMneeb57u8xBpjqKhAQA6OCLYW/K6gxs6IgO5PVYewQ2QSLQiMjK6/cujIikTGzKDqqiU7OKaXURMwEzUiHAqAAiUUlHgEIZRHJwjuRV0C6kxc1UIqeFA5ElFnJFC9KlOsjestYJ5KPXoXlsmVVktu7uHAzSxW66Bj+1yZWJdH4bj8WxzMu6PkiEpluGwvX755KP3mq65vzswEjimkAydE4r4+fP37/7mxXS/pYctCpbzdnm2udsfmaPooVs2+5ybtrcxby7OjuO43Jxtb676tpXjtL+7oTIeX/209h99/MPfo3VoDFIarl7+aPHkW8M6nFycsFHTMYCBy3G7vb365jd/43d3x2Ecp53aO9/+zZfHw8vPP3u3X335z/5kycs+tSctffGzL26/um1DePn11WE/vdef3f/iq3fOnq6azT//0c+ak3UT2vefn3z51ZfeNNvj9bFkatsqFQFRrBxz5J5Dz+1q0zXTMHRtu1gubq9vFy0jooLFEDyguCMjB3JCF3ubHoJfQ/gfQVlvQ0WzZ48DHXYPTXe6OTsnU/YJoaYUhpxBIIWWJKKFNpFmZQwRMCFKoICBIA7l0DUx0DyNQk6dYkAxyxXC3DFnqXbY76tou1yCDqkB4kYkpJC86qJbDodjs2xCcOVDjKmL2HDYbBZ318wA6+U5M49jno67Uqa47Pqz0/vpeH6+Oh7pxTdX+TiJ1acfffTmX3yzac9xCsPtsPykA4fjYbxYrIiiTOPZ+RJ/WZtmJY4EQOLkwEyiwsEB3/LrzLVqJIyBU5jKNGJKxyEjM4XIHA3YiPsYdDLDIArr/qxknix971u/fxquv/76F6/2pV9297tRxNq2FVNPsT9bri+WMD0cjuNhOmxCK/djFhBU7FJ39tzWtTo4TAAYUgqJt/vjdHMoB02rljmh8KhZ1ZF6h9zHeH+/7Rva7Y4Ndwhkc1XE0eYDvaGBNzN2BZAAiQiR5s0dGGCg4DgzZXFOZDIhoYpTDGai4MwegxKj87HfLO+2wwfrRYy5KDGy1GqhIWdRnw7HtOinaey61edfX7V9Hzntrt/4VDQycUDARd+VQpNOFWCzfMqA+/H6049+62heCbbX98h6PPiUx8M4LJbL03X83/z7f/dvfvnj/8P/9b876d6/2Cw//M3f/vYH42G8koZIuv/iH/9iuS4nZ4WwE/6Aw6ssGiDU0a/tuuoUa+rT+qiV2wawXK5Xdw9Xq2UfAVbN+rNX15T8448/ef31FwkbqDBsXXDH7KI25pEjOsQpW6Bwtji5ufn62fvv/Ozrz077ZrVohmncHXXz5Pnnv/hx314G59TEb3+wHvOhW3Vh0aML9dxC9/zp+XYccMxlGBfL5ZvtlgKBGzkt1qucBydrFgt7eauCNdfqEJsupq5UTZFTNCBs2na4Ok4FKvqhiBL8zm98/0d/+eduq1G06xOYh8AGUMpkhLnWpo20OC1xYWj7rXXtyQfvPO+Xq4frr4b7ShQjc3R+/XBTylhy+5gqIiJEDowk6EbMbsqE/ih3cDc1ZHBHAp5DsehuhkQpRlNFJg5EGJkxRs6ZVierCqrqVZypJY81SwjRHELb9KuTcZyAhZH6deditSgEWfabMvkoY+LUdRvzauqA2nVxOpbjm6+2HM7Ozqf9VYzxg08+/OyXP18yLZoeAYdhSBwImQPXscaY1pvNOAwWIiiaqKkSg9SSUmPuSFGrELgGVJBAHXqs5Qg8kzsY3tIHzBRM53l7CEGyE0cKHCSCauBAHIoK+Wy0gcihGjSJQSrUDM6zcQ8BmrAwMQWrqJNLIA5MkW2OEbeJq3sTGqsamnTYPaTYpxBErUmMpMhs4GimJkCQOEz16KqMMddsbhUmRzIi4oCiVQoixZBEKnF0B6/GGDjEXPelGgO6SxtXStlJi6gaIkeA5M4KB0esIjEGIOfACB5i1CxVBIKPpsRkKk2ISM6AihaAECIClTISYeBmrKODjS6Ijc8WKahv11wIyAYKTOwIAERhRq0hIIY48+oQsZRMSCZmau6+WHZU6nZ7NHULczmFALmo7w55Gt7cvcIyiBO1bbfomv2U94cDtHzxbPP1q1uCoFqO2yHGR014aiICVZEmpkABsFUxMARHc1PQ1AQK7IBOIrUORZUZCEMggOTqBAEdExPFSH334Diyt6vooBSJOQTitGjBAWtcnZxW2S5P+kZNzo8rT917l/2pNtOUFp1eH1DqYburggUXX7+6IbOrL79OJysR4Cyk2ZyZIhPINBFaYAM3UytVXXE8VnOvuUAkL+JW837ECnb/Oo73NG65ZDNjV0KY7SzsSogRCZUAEUMw1BixCQCmCOwmzGyNW0BBNgAFAg4Yk3IzCjg6L5uma3Ziuerd8CZjxeOk37weH0J3uu76zVc//+bFN6/2ZaDQsqYvPvucyQXHUWthwhntUKwhjKquApGRqboisak4ogMaUdOFRC4lRwQMWEmJ4TDkosKMLmBEIhUQXcmtZlUEymZTdiAyhoSxqDRICaIb1ALRolc5Uh5it2s9H/a5ZEXUgA6VwKvIOAxuEEBjkhQZXMdiITAyK7mYJkXG5LGfyJyjGUdHzwaqZMItEAQXhBCZsWkiRII27ajESCEm4AAu6EA0OUDxBE2ykinOJxKnGCpoNEOUSFmKCEZv1s5BEcgAyJwaR6xT9aoRkB2hVmUl62PTshNbdUE3Q9cYY4qP54JFimDOgcEZ5m/oXDRAaCMHChQ4NQ0S5pJrHv2AoWuh7UMiQjBTUJnX04COBI7gZo+AXjBw5BkHO8uhHOfqxNvTNBISmCKhA5L73MkgYnKY2XGJQ9cuh0MT1CMkBgSMc68ohMAkEciRhdQAEXQ+pbGTuxSoxVQhFLDUpOruys2xnnHTQJ3GgxmIVG6Cus5wWbWq4g2mhMrAXduN0xGbMEldtmkRA03Wd2uqHrGgA3oVUYaISGom8/IWyR1dvQEiJK3atl0kKlojUVU11cdSEjIh6NwxciAHQJjzKoY8uarBhLSv4mCGSASqgsxmiIAyRxLcCZzYFVTAWkRwIgACq3PpxBQAZ7j4W04FzegdM+OAAFYRjZhMbSqBOIXEHgQ0ahsneM6X7e6wRoheRi9NFMdeyQenWo3QA3MgUmTRHAA22G6cGzUCgMBmDB7nv5YyZDJkRld0IDQAcVcDVRCFOLDsEyN2nGnGvDibgxI6EbgakpgLoAEaOBI0M2e9eHVyC1LKNB6OroPUoibmaqpuZmpqAu6q9hjyepwVwdvq14wTViICB7US5sfo/NkjI7iqzUtzh1k/hghoZo8AIwBEVlUigDlGAThPQs0FAZBYXQF5rnnOVBp8e3x7rAiZggsRqgG4ARj6LKolRNBHVr2p1qurmzc3+6ws1CjRomu05DnUooLDIMZgKBE5JaexvPrys5UHcjhC2R6m80WzDGh530DtYwMRXx63l4yLcnxyGktK+9TeFGE2V28JDHQVQ1W+FQ2B0YyJQf1oqrNgByAxH8vITBnAzYVi5DZYjS51stVm0T959s+v/Bd/8abC+oP3n/c9nXVBS8ZAGNNYpRBS32DrJeDueDC3PjXLhjsOhnx3c7/qEwk8PIwhhtf3w/Pzdpp2bdOZgYOHxA7mXiIFJwKBGIK5NW2DniYdmBEhxWwX6B8k+8noNyMcRhuLf7DpTCwP9c3VAwGcb/oYbdwff/Ljn/zOv/q7auao8AiQmsNoj1B4DsHNEQ18/prOMSOFRy4bMNEM+UdQmkP7AOBOROb4mDvjiO6qeQ61z9fkbLj3Of0Bj2QsmKeWqv54zTCCqxu4gxvNKc7HGpk6EFFQqYBAxEyPaU8AUNFfc+seHwmR2d1DiqCEhEzJLIE5MDGR5BIZ8jgBY2x79RibRs1NxBHm8VJWm9Mskg92zDxzLEkrcYydgokMgXtOnZtux0yb89Vmw5aZNTVY6tSQIyQB5RbHw5DmzClGJ1ucbWKIV796sY7PuO/RXVSdmjnOmuuxfXIGv6Lt9mHVtQmSbKer1y8e7IrujsMx2TjVwtwsYtMs1mdPPvj28+/9Ril1cbZm0m69PHt6effimk5WmycfmAHh6vTsab4ZaqcucftwX2XkZjMH/M/PL8r0ddkfQEK/Wqjpsx/+5v32evXxh3/2j/4f69H3R+mHUnV68sGZyMPZ881Pfvbz7/7gN+5vfklTHfNRTN69PKMQHvI0Tbt+GWoQnLxpUwZvm4iGzKx52rTtyzcvTpYrU237tlYFBmcH96ZpVM1AUgzgst8fwD0GkjonWwHA8fHO/3i3+/8fGyEAuq36yAAhRdXq01SnMbQbRwBGjiggRbOjR42q0i66qiJSQnJHalpyNkopklFo1NAZzcEc0BQJTdHcJWdEFzmW6ZBSSF18+eWr9955P0YsWlw1lzrW0i2Xu/19mfYxhSx5dqsVLeY+jnvAOku6x91RDnlo96ldtQsIqTveTD/62c8uTi5NDT12/WJz2nHAKqVbdrNhcJzGxarLZgEYERyECOc7Pv6PWlaAAGz25Oz0u9//vb/+m39WZbvqFi6qakz85ORizOXqfguVUqDdzZaD3wyHJgYM6V/81X+T99+4UeLWcy1WRh3PN+sRQQjYMx5F6y2NB1VcAMnCQ7Jnlxc/+dUXT1JsCSRrmSZT01zGYzvtR3BsMXQYV4tmf3+/fs4T4NW9NAHzWJgDMcU22MFMhZFUKyEBUmCcgyA+B8HBAFxEzCylJAbmXlzVoJlBa+4xBAd7BK2puVqM0ZA4daXm4BqIlAGNnQlMq07IrmC1TtAQ9/Ti6k2/aC2Sc4wxvPr8a8oCiH3XAVueSh6HpmFhPbs4vbt/E2M4PzvZ37/K1eJiebLuFk8Xx/10d7u1CmD+4sWLP/uLv/rDf+sP/oPf+3v/yX/6f795dUM6/dHv/+v/p//0F9/cWEdn71+kv/v3v7eVX37vO7/zH/xvf7VZnaUwdV28v902uGRP75++983NNw/H3TtPL8Ts/uZ1naZpqm3o6jS2ZIvUXN9888Wbz54vPzk87KKQo5yuljrWwHw8Hs9OLnfbYxG5O1wx6cP9Q8S0Oj351c8+a7pzZrp7c3d59tQyfPnq6oN3npO7GyyZHw6Hr69fLru+QT7mY9siGEXrVnHx9f7N2WI16EGUGlgSOKOZSArssS6agA0djmW727Utx5QejuPl5SnTol0eqtfUpmAKOX31+VdTBTJPfbNoFmPeG8Ew7VLqmNjdA3qVrABTlY+efIQ1lsMh7x5SRCMQcZNiAk0TA6/b9nFUhG6B6FGVgDhvtBjBVB+3H8RzoHr+2syJ1Tl8hOCO4KaIQIHAXaS2fcsRa3YmSk108aZJh8OxFOlPV4jgqe1XZzevf7k5PW3bpgw1F6lQiYaz1Umui9e3L/q2IUNXZyZzcALJ5evPfxlCu16cSdm2yd97+t7N9fXkMYbUN4js7IgYuVEpozqGlKYyVq0OM2WAYkhmaC5NaIXJXOZOnWl1qSqDWeDE6DNWIACDVkUOVhGJzD21bakI4I5eJJupQQWK4Cim7i5SfQ7aBk60crFh2vfdBsCri/O8ieQUe0BVNSDOIuYjpyhZ0fCwP8aWmtiG0BizihzHwVWZLQVWJwByl2nahYDFJcUmQuIYGPl4PLYU1ZyRtRQEp+CWJbRBRVpKx/HYJWAkreoORGGqB0BlQnSUavMKs+joXphCCqHKxCFFCjWPKoJk43gIsIwhEERgr6Ymj4rfcdqH0Kg6Eamr1QHBI8UKzBhq3TkoIQTEUisRVq1IZKohhJpziC2hl1IwhMBBXQOzmAAYY0RmISQIuVQmQgctpe8W6ICOCHH/MIZIJydhvD8uQi9SNZcpQnToAgNg18cusFU7OVntD0d6q4Y1dSLsF32zCIq2HYuYUgpgCKgUiJDVwMwCWkceEnuMhACqTaDIUWtlhLZvJ8AqigBQCql7VUZUFXRIKW1OLkS9Q0KEgAzg2esnv/3RAzysTxZPzzYNALVNc/nU3wxt6sflqCUv3j27f5jeWT8bj6UMNREnTuhonqvngKLHnA+jAQkQUMxqEAOuUts3ecxNWCzePx+uHhK0ME1NdKtOiV1hPo/6vC03I4ymEjgpQIhRVAS8aRNhlALi6K7UMnCYj0hVvaogkTIA46iHKR9JTSFURo9uJMd8fHV12OYvs5A75ppHE5FKGsyxC+RYBhkQQgrkjsEhEjEDOgEiBUZDQ+cYDQEJtRqII1iHsXjVaqrqwYMaMwOje3XwtmUBcKJagZGPeQqRGjVmLObjVNCAAxiakoeGAUuIaG6vtzsQ62Domk4cc/XBS2RW9LiIMaHLMI2VsoOri8DM/uNUrcAMwmWmNhZ2q5iHGqyQutR8rN4itkBDnhaLBVhHGG3RhnYxN+VM1E2ZYgyMyBVpEEfgoChVtLpI4YZDZB2ya9WqaKpu1CQ3qIJMTeWgSFaEqiEQqpkbQ2Q3gFnEVmJYqRYgaLoW326SETAFBtP5CFINKRA4xMDoGDlgCHOzwEzHceiIrVZyN7MQ2Q2RGdABkAMDB3WfYT2OThTAHm3e8zLLcQ4WOTIj0XwfQCAzw5lvAG6mc5WViGYBeorcNymWShAQCiMYKJAjCjK4oqHMm3xzQTNzMYAKOvgQYmSXo+bkJ9WgxbZzCHUc5FhdMGL14k7MLFbdLSCzYQVIoQF3cupTr4jJpnVKjQJiNOIsQ8tUpiOoilQCRHKRx+hn9RqMWS0YokOKrCrgSOCqBQ0QZ0AImZmZIaGbEZCZEQKATiUXw1zFiSqAg86opwROCOQqMssIWMzA0VXBDCkAgYpXwAawjejmzG+7eADoyEhzz7eaOZKKA6MzZ/NhHEQiKhmgIcbAWUWJDJxdFiRJB7baIoBIRGpQLIA5ARiggisqJmhawjW2yTkgmBZAQ2IAdANyV2QBdNe5rzwXRhVcEQ3B0QvpNsHBdSWE1QGsavVAcydxdqESRaA0g70deD4lYwyKooQi03C4ccslT3P9wh2Q2BxU9ddLdbe34bqZ4EvkbjprrR7pWrPlYz7Wg5owBaJZb2pmRhzAwdGJUNWIeO7ZwfxmPsMfTQHgLZ36sW9OHNR07r/7W0CXmsAjZ9YdDJwe+e7AAqpWHMh8psWbC9zebD/76mpUYm5AqWsiIhhgKZkd3axqbfsuEMl4tOmIeeqNLi/WP//im/tS39ksvnt5ftjdENpiESVPYvG4K7+xWj1ZdsdS7sQHhYP7RUopIRergEcRN+0xMEEbwjAVdBIHJx5KNcRRHSgoAHIotYQQx2FoSRPr+Tvv/fQh/fgrelmak9PTi3W/acNUJlAqAn3XxUWLIg83I4Ov1ktAwxRdwHIWteX6tLquRW/eXPkyPXv6JOt0dziWdj09XHdk5lqAKXAF5xDRITA3waWUSIBEIYY5u1WrxZhisCead4Cvi42TXw1ey+Fbl8tlwrLPV75tQwjMfZPuXl//9C//+vs/+FQU3DU0bI6IKCrMj0kxJLI52mMKbsj8KBdDdHcxwbcgc0d0c/dCxHMsKIRkBmY6q8UfC4z22MwFAEQgRlNzf6tEQ0KkOSNkbjA3gADBIQRWyUhv51luZoKP88j5DIhIjMToZmr/0qgopRaAQ6DZasPM7gDgWcWJmBJpDsEVTJTTYoUcyDH2ZOZgymoOrjXXMoEJsFcD0BrARF18NABEBpW8vzctSAHzQXxynVpuhoc3nDpAZzq3Y5IlP3n/k7tf/fjZ+UXN8SCpTI3tfRj26VlZL09EFIBczMyIsLr1zz+Mp+vFyfrFizfL04vDdnz9F3/9ybcvXrx86JoPpGjTdaraBQbkZrU6e/7RwE0Em+63qYGHm7vv/mC9fPe9h0lAbd0tEyQo9uT5k3xbX/3q1R+k7m7McRlCCinV/+d/+H/8h//+u1N81n/YGVnO4+3dw+qTH8Tuv7Lj4aPvfed4PPSxCdU//+lX3/29367Ay6fPrt98Mbx5GGy3PsV1v86Md/e75aI1s37R3Ly+XbVdzjOtkNq2OUoNTapTtnadYmyaJuskihdPnmoVVzMpBG3gBAAxNmaODimGIZe5djavMB7nRL+2WjzyraBUdUHkDmPnqePQxOKlTIm5aaKYmjsGMnXmFMxHcI6RpAMMKjWFSOAuFYIiEcUkWWNIpQ4cW3O0MnFqYhOWq+U0TiCkuVavfbdYrc6kbF+/ue0Cl51sy/hkswLHyydP9ruhHofN6nS/H9s23b++OX16ev3V1dlzJMW8OzDjcrVQDJuTi91ua36/6U8Owz4wP+z3Ty6eh8aGw0PXdW2/2m3HJnSH2+OzZ+/fXN1wiOYaYuOqhOCupv5rllO/Xn36vd+mXD//4rXTMkRc9t3d/RYp1Uq77TAM+5O+X29Obg4PpxfPFK1OI6BIPXjOBA3HFNsWwT0jiveOTYrdsrm+uX243Rcp1G/uDgMinZwthpxv7q8/fPdyd7vbPNkMg8RVN+GIjEaxJtws+u12EphuD9vnl6t9ub08eVrbpuS9WBYtsesIRckIIhkagLsgIkF0dAOvbo5Y4bHcjwgK6ujODjMDgJjAQNTBGY0ITM2rRowVXN0dkZn7SJDgZj8FX7FRBY2BU01NSEbhZhhub7eW2m0pfU9vtsff+jt/dPPqociUVgSAbUpWBQSXzcZGcaVls3I37ABU2wUZT1Vziquvru4846ptfuO8/c4P//Cf/fGf//Qn/5f/3f/63/2Hv3P8//hP/t0//PRHf/GfnW8Wh6mfpvH975x+84ufv//95fbh+HI79qGy21QktAkYu0X39e6r0fPp5jyPIwelEDlGIg4MKTlv5fnFuw/7N+9cvifZaBmeXlz+6qtvztPKRMjxvXcudofKJJzi8nQzHejzr95cXqzvtrcECXJedcu6rXGxlFrX7Zo8XV293Jye3t5cr5ueNaUmdH3io3Wr6AplrEDh8uxjk/2qW+VSxmmYpoFDHPdjPkyhYURLSFZy4FSL51EK1mkYmximacv6HpueBnxd5f5+7NMFhCYmG8vkTqCQQgdODXHWkoeaVk1LscG0ada7cocKXeyPx2Pfr1fL0zf3r4CYOSKAyvQ2VfR2p8sIDlaNiedXc3x8oXR3cYcQyB3ElDm4m82x2Hl7azPBMWh1n50e84pYlYBAfTwMqWkoRDVB5s3Fqco7+/1u1S9SE6WbR0JScOjPz9N0jwEipj5AqbXm2nZpwEKT3L76sv3wY+BulOPZk2dg9OrlVycnF23bjFKkFiRkDgKERm3TuTsyYiB3YAf3CsBq6jKajgazFZ4QzNEBnBBVMmI0NSYEx8CEEBUrMwzjMVHSKsQJgRA4cCeTIgKi15xJE1h0ya6GnIA6cagKNUcGyGUEBDdoKDrWQAiei+Sm7ZgiemhTF2ISJgSQWh0CeHWDyEkhotNYJoJgmAg8sQJRalD1iKYuOjkEJBDK+bDoegns4MyMlMzYXLIWjEGgEkfiOFRtU+/TBEBmwkgBA2NQyKqFKIhXMCWcSykECEjBTdt2ocCkDiTzSr+qMUfGuUHgkQGBwEuIqdRJqhrzpj3b5SMymYzVnWOY2eHzG5WKhtS669ysRzNAcHMFNQemUGoR89MnT5i8STgOecVhJWJMqaqO2jfh8vJkV4cf/MEP6lR/8s9+ltrQgJVc9sOeU5O37hAuzs8Pu13Xxf1eTR8TpotFArU2ESFjbBnJwEutbd/2i7AEt4LbbGaoVlMsrTo4MvEEpczY9hgBG6ZlE4ACRSfZTzaCWEmNcEyABKarvl/0Cx3r2Xpdal2sW1yE9gmlXdBRVtz2GksVNFwwtFoXfdxfS9tQaq05WfNpaJ9DYs/Hyauaq5VSoOqxwokyJwyB+2a57DQERoyJtIqi2XSAQPLwGq2ia4gBQ6q1OGquEjEiB1dV4JSWCOhaVZFC522aHF0EAVQ9AJkQdIuGFbQGh3XbHWtpu76QNi3mOorTXdWXZXxz9TCKixsZgCEFJAKIHEMgwnwYmi6aZjWLHIgIzOitg9jRiYBCMEd14xiIA4AGhODIwMvUM1TFtJ8KAoFBApkxxg/DoWnasWSMoZJJriGlvknuWsVUDOiRFqwEyc14Ti25ESFTRRi49omQjR3JddV2BylEFFPDhF3bRMT7Lx9Klk6TAYugAapi9po6UsuMHSlirQvG6qPkYmoAToABMCG6aUACgCpCwAToik7RigpYahmDMWIMBOpW3AZlo+TOUiFUL66CiEmkMFbQKRIhNARWLSogq5NZQA3sCAkNwzgyxxGhWaxryW1EJwTilh9HRRw4ciAI4KrmxATgzlgREyEwhkCmYkIVxnFfidlKmWc/ps7z5BWZQ4ixc0QCV5jnNTPMGh7Dzr9OsxAjYYgBiQEAKID7zIUlDmZGjuiOMLulHNA4xFW3wjIwRsBoUGeOMM5vOswIAGiGHsAdqgYYnPaghWnB6FUR1I67jptFMuKsItxEUYwNTeVQAZDIOIiWBpghZlPhGFDBRau11FyGPnomZ24Xk4knMiiKIxNxE1FBS0ECDDwOMHpchhRoapmZ3JCQWFQCB3cBREDUx0XMXPzWgGjmjwEY82KqGJC5mBepCG6Oqmbz7d1BFASc3BGdkVUVzBnMHVQNMBJhUEMRjAFpntDNLiRzM0QMzCLGRCKmYAIwVaOAtdZIAR0gZGMs7ABaokx4cBk9sIhFMo7AAYA4OBIGAiXThqJaXfOy0RiJEZXYwNXmKLMCOIExIZML2aPTHkDnn3pwJ6hINDG9hmntTSdKTJFTVXVyA2AK4ALgAELgBmyzcAfBwB2RKco0Hq26K4AjEVMAIpoTeMRqOjvv/dcr5Dl/IY9Q6jmqhI6zt+fx5RqRMCKAgYcQVCtzAHdzBX/0n80HLyIyM1OFGRkLjgiEc7DI3M1dAAwJZw63m7+VYM1Dh1lsxwBgJgDzXJdnhRCSUBU32e52n3/zeqgmFKcqytQQ1lrNFSAg8zBOTKCqVqpLefH61arru6b55YuvB6mbrjnpw3HcipceKBjyovnq5vbbKTaHB+jwapqWfZ/zuEIMjkckA1KH6ojAfUq5TiE1jNQEhpAeDuNZ39YyidvRNaaoYAA048khNO27z//LV35TFtSvn3Vps2yahoacEQlTStSY03TM5tpyAicwOA6H9ebk/uHQdW3qYrc+3R/ukcBUAuFYx+Vm/dXrq8+OepZ6QTmWw8nibDzmhigjBAcCZwQIYcXNKPk4TuqWQluyCpgoEPDzBs4b/kzqg+Gd0XQ7fOu0+bDptJTb+23RcnayXoX44rMvT1fL5x++pzN03MAJCImQHrkN7mCuMwVsnsrM1hckd0UAoqBmDqIqhAGc3HB+2wYyBGN6vHhcFWZLITpTcDcHmi8zRJqJbL/OCpgKM9sjT50QXWplDv6WYf3rixwcbP7Km9G8GwakEP6lURGFRBhmxk0ITEylljAn6sxQvYxi6sWECcwDQZjXz4ikXjgGc3dqmmYZiKVmUSV0Dqqllmk0nQhNSkUKJVdsKATPtwfNx27zbPX07NXfqFXO231VIVTAqCUzxtGMlovb/WEcd+++e3m8fhM3y+aidycFMwNDyFMuw3T2yffl4c5117Un/Spt34dDvls+2Vx8512x7LpIyyBlWp0sD2PmZhUQjrvt+Or1k3dDWDWHw+409jIOdZL05AyQVAbqzNsATA8Px3B+Jk7jIF/86Y+3L+v/6z/6j9L5x3/wv/pfxAUHtCfvvTsaPf3h79LLL5pjfPEnX61O02HYHl7Sv/J3Lz794accbz799NmffPaj1WrZOd5fXclpx30YyUx91bQo0HYNF1GxyGEas8n0sJvEDJjN4bA7hhTLYTxZrF+//Oa9D5YcEyLXIl3XO5KKcQil5Mdrcz7zwf94x5vDvf44LXIOHGJqU9O0i26xkeqiAESqWmtBcKmVIBSrxIyMQASM2TRQ23dLGUZQQcI8TbM4AUMUyeYOpkbsHIG55hJTNDUiqlkQHUx/9OO/vHyydMsnF2d3t7vzk5NhGtu2xUCpbUzz6mR98/plt1lDPFY1RVbAvmsx0KLrEQhDurq6Xa9TF2naH1RKu1ifXZ6LiRzLYTvEJh2mY9NEAV1sFg/D/jEFPdMTEarK3NH7dcRud5//5q9+mkIYx8kijbQcJ8i0sCxnJ2fHYTo9eXa/ewA4EmpiL44DwPXNfrlYOHNW0rEsMARCxCbF5vVdVTW4EZfkulLn1DxBu8o575xOz57e3dzk47Rcbsa9htBGpoe7PYNKXWw25+N2y0DH4wiuU7YUFr0k2BdCRQQO7TRQK2bq7OBmDBQDS63opnNpyBx4/gciEZqaq5pICKEhLOIE5mqJI8zbEkMwwuBMZKYmSAaMiIbq3qRmPwzoYFoDJkcrhmI+VRvH6eTJYnQ/XT/9u3/73/7Fn/5348P27PQkpLR/2I6HUkuJ1N/cHzb9k+3dA5JfPN/srm+nUs/OTw4Phzrk21f8ybOPT7vTb7785SdnZy++/LEZf/aC/vf/4T/++Nmbf+d/cgar249/7+zNnwZ7KP2Gf/iH37p/KTdvXv/o/3vD8ImKHceJQoxp0S03D3ff5N2uaZvDdtcHWD3rTIkdui7VArXIZtldvfrGgn/nhx9+89VD1ny0kvpUDrWlMJUsYjIdp8OhWy0ZtWrt2tC30cbpw08+uXp9/Z3v/uYXf/PXn3z84f/wox8DtaOThTSoN+1iuxukegjhYXtYdouQ0nF/NKDDVBxxEOEU2xjzuGPEgFgRQog1a06mVkOKgXkWbXZdC6bmuliuh6FClXHcLc/WNfuqW93ut25OqdUiJkWdmpSGaWTkGML2Zrd+3q8X66s3Xy27ZWwWk4BA69Dd7rbZ69niMh/zYTiGIG/XBgkIzMzEaNbm4dsas+us7zSHmVAM4MRkrnONEQHUhZjnZUeIZIRIMVdFAI7kDjGGorVobZtG3c3cCY1weX5ejba73aqN/bIzcFWZhiNSePrkyd3VyxqAEkegOoi7pkhgejzcff3CP/zupzJOxeDkybm57HZbNQUMQCAuDBia5ORmwsguFcwQvEhNIcLcVDKbyX8zI1MtM4ZHQbPVwJEhIqJaBTO1AlCcnEMQ1RCDuauaOxCymzpoaBDJgRyI5jCEaHFxw8QNGxuax6Zx9ylP1aqCVPeAhBSROQT2QFJ9e7hdd4xuwOCoIuoYDVHBtY4pRYJQalm1/VCO6OKggVtDM9EYG4hoVpoUAcXcMcXsQiFMJS/6bhoOnCKiWK5iSgxFCrrHEKdakJkAax2IPTI40qTWxiSiM1M3V40hTMeJISKmPO0YopimEAAohSbnrGDLvneVCtVAEWXOWYjKUG7Mi4m52UxfdQNAAiJTd/eIpHObnhJQUHNiVhGOAcG1VIC6XnfDVJqmPw66etpfbNqXdw/vffAcstx9fR+i/+D7H/RsD7UWrSoWZwEV43q93Hkdi9zutgnpMIwhMr3dGzBhpJRz7ZomEGJkIlosuq4JXcITgNFHMXLg7M4AUVRzNTADq2DmOcQuMRUphsSxjU7bu336pB8O28VqtdvvYwxExCkWNzYHC6geAJTSUaDrlvl4bCDl+6MZAIpRnaaDMw6ldjH2Z6s3t/snH70PJhS9WUXmqI5VFcG9YER2B0FaLFtFreaoUHNRzOiqti8i5NZSmAPAJv72MU8EaAroESnWOVQC7qZIjUNnRN5ocCPgQNHdDqpaSsdUHYrxvotv8vHV8TCwPIyHbS2jWg2UDAlCRIruMVBKZKK5uLicnFy82W2LGs6WXmR0BAepRkg602wCIaCKzDyPWguAU8QmBjQw9CFnQ1PDk9MVoR8PwzEXjXhxetYmLJomLKUaN/3DMTeBi7qHkKXMU2ADBnJuCNCJkJC5CdCAYcXoacldm/aHyaUWgLbrU+Sua5BjqcVUuVVuBtlPADOu15smeUBoGyMCs+AaQQJjBXWGoho4MBM6SBWWOg1DTD3MAF5w11JBSCFwMnMwR24QHskVRKhVApLkCh4YgquL6aTSekLwqiVwABdFMUBQSkyulY3RBR1MzTlS22WhJi4pAJCbmr1dKoYQiYjcCMhBAHH2HKi7gkecpU5QXXXywsgpqImpkgMBEpIDECNRmNtkOC8ticzZzJHDLAjAWRYQGAEDE4cwN3rc1UzMZtWPIzOqzQIpRFZXJkLTGKNTgGruSBQKFiQiT4+KKicCnM0MBmIEE9BAZmq9IjptQpdrJlMt2WsuZhCSRhZUTCEQuldwblP0aszODEedknnPIVggIABWD4BhcpyC1w7RNCjDpH3ALrDKAKBmJlCgSW5E6K6C3opYSsQhADg6mLu5uivMyi1RNHOnXK2CBw5ixZlzraKogD5/qEDiJgBehZAcHYkFTNXIBHHO3DgDKriAZUA2awhFhRwACefsEzgzv+VnAxCBOwaKKaqBVWVq3BF4Tqo27OYoxVV5ItZBY8LObWAEZkAwciR0Rg8g6LQMp2ttOgUoGU0Y56+ygztxcER1Q3R3dZh5L2TIBj5zfQiAADTgrqE70fcU2MQUCNTRCQns0UEGPEvL3QiAFAMqipAZSi17psVsAY8hzLL6ufZliEDEGEXkXzo42Xzwfjxbiz2+8LhZYDZVd4OZYE1kxvOTDAnB54D2nEgSAJhboQ6Oj7F/BEQ1MVMkQkRwgkeS0a/fvOahEpqbzuEj8Lci9ceJGhCaqdZKDtvt7lfffHM/FUNmCoDWdGnVpuubY1XhGAJAFm8SM+Lddnd3c92G9unFs69efHN/nE4WzUmDPVYUa5CXMQagNw+7S7QPG3fA+6qnqXnax74c22UcptqFpiCyObtzE/diRjxOombmWCWHSFW1qKTQEWIpetKGkHAYMjab+3TxszfpujvvN100xVq7yMDITTTAqqRmbYilTMykZpcXFw+7hy4trGK/XLU9p5Qi2XF7CAFOTldtaq/fXGcJ55cfHiQCHNa4X7YLLNBiZEwRFBxaDPfu1WmB4ZD3SA0oOGlAJCZOyWopx8NJ1N/cpJ/tpp3Fh8o/eciI+PF5KsdhZBybtOiagOHLX36xWC37zRoe1cBO4A4GiGZGSEg4T4ARyMApRHPl2eIBPhN+EclNMQQgNYdZTKamiIoYxJSIAAkev9FgAGbmCK42867gsUtk5uJms+EKEVUrUzRXRHQgAFczJjI3N2NmQATU+QpHRnI0kbeGg7ejoqbrTB8DcUyMCC2S5bHxam5iE2GhSDlnpg5sjNgjkbkQBmIiRtEKiECROSDFyCEgquaUPLUCVtxE5+XY8vkox9ilZrV8c3Vt9hBpnMpwsfigXz5NcX1yejrkr4PAcBikUsTD8nl62fh7n3zy5z/9+bPvfs/c8S1plYkIScxo8bQ7++7zcmrjXS67g9G/+m/8O9/86OcMVQ5b80VYLDW7Cz1559206vOQm0U3NlzdrOjX/+Kv3v/e7643m2NsKmEMXoa7sEE7Xz6Uw+2XP3ly8q95bOJq9eWPfxQFUhuKZ9dhsTz5s//3P718+j7W8OLzh3fMDGuWh3eWn7Rjfvrtj29fv9xfXX/y/L0v/uqLtvZ/+Ef/85/+xZ8U90TtZ7cvlOnDjz9EGRhsKkLgx3Hoz/vFpvvqy+uxatOsl8uN5ilxCqm5f7g/fXqJTHPBABlLKYGpaj0OY2oXsQliBmb2lk0Ev+4hgc83mfmwJ46uWg671bI1R1CUnGO/JqygNQQ6bHfu7uolT33XtCm0TZgYvUzNyQbESUqIpBgJgNB1Drmpk7nVibgnafb3Y0xN7Jt6KLVmTPFh+/DwsIsdnnRLA1fLQeNwvz3/+BPELaEdjgdxs3rv9Wy5Wg+HwzsffCBlEo8mVcGvrq8//d3f+urrw2/98Ddvr3bX2+uTzfLN1fVHn35vOmYRSE1TBlink5wrSJmmHUcGAHqsWBmYk6P7LC94fDVquEHB1Wrx7Oz07Ozi/lBPFt2XX/7y5v52+3BXrMRkYxmmfEhdg1bjatkvwrPLp6b65vqqXYSwZgYaD4MztU20qg4pUXBW4BVRqFlJ+8idZbh6NZYjgMtx9wBdvHhyfv3qoWmjV3QJMHiUlon6ZTuKjBVaCru9JI4A7Aae2sMwtNCFAMG55srIRCk8NjE0AQFBdWQAB6yikYiJM0ETmlqKmgE05oruIfDs4HwM3iICSN8s3Q+EBOgyWQqLqYyBI7NOIrlOTeg5ctOxHfzlV9fedT/947/4/E/+uR6ms8tLNLy/uWX0gBQ5rU5XOztmjYvUHyHHzUmcSj2Mz568+zJfWYJFunj//OIvf/JXAod/8uO/WLTT//R/+W/+9Bdqd19vRU7Gwxefv/l0Oh1f3f/Ge6fvfdhO93+jePj+7/3ef/yf/JPz9XkpBw/JoRmOhzHfihhhEKXN6fJysYpRrvc7RhKPRcr2dhtW4d13z3b78eH2ftzeu8Ld7XUW++iTTx+uXjUUj7vDYbu/fPL0XsfLi+XLb14s1ovtcZr2IydJzfKzn/+oSWq8X58v9zvFAOvTPqb09a/ufPKi/WL1Ueruh8MRj1NM6TiMTIhVlm17Nd4tmrZLDYVw93DfdCG2EUro+nZ73GEMFZzAp2l852xVas7Dvonx7OmT2ze3xyvIcsz5UMsREK2Cu5g4cXB3BWgWy1LEU1wHf/Z84yWsoEdaFUpejufnT2/f3LTLptHL0MTWqmrqFovH7YJbyRMHIn7bVAU3dSJCoHluBCb2KAZxAHMzneHXRMwBHHBOwgO4VaQmxWAiMpVaZdF1QJCaFAgDo2EsUzlm6Zrm/J13c3bzig4UTA36pq9TTql/8vTjr7757Pz0QkiwkoIUyTHRw/Xdw/Xr2HXPn51PKgng9OwCOdxcv1ivL0LoBqkALqpzfX9+TrsYGQBYTFHEmdBBmRkcQDFQqHNUCtTNm9CJuVlmiu6G4KYV0UUruLjP9mZQraAFMShIiMFRBbJaAcsQkMHMpG9XD4fJXZwacHTCQNHyMJMHQ2hAi1sRybGJ+ymHbtW1CRiPw15r7duV1CFSYIiOZmgACkLmXkUc3IhQ3RwmzS23iBHR1aWJMecxpf54zCmSKzBipOXokytkV+boosSJQ6hVEGOkxBTm0VgIMfsw7zsFYhXomqaaIJBKKTKl2GFqUAPO+TI0dc9lAnAxKAoEioDuqK4Oj7ifQSQQoro6xsClqgOYeyR0B+akrm5qYDiXX5CZIjOYGaBlLeK1b7pVtyC0JZ9uH/aq8ezsiVbo2w7CTo1uvr4v1+NRYbHoE0dRGQ7bvllA5ePhHoiahGYqYt2ilfI4MD1/cjlOmRwduQIgcrtcrtrYBwwmySx0HqPpWI/BVT0EDUiHYQCKwa1hQnIgCCkVFwd0gplqBAzqNh0PoJcVtRZddml397CApWMYdlp3GqXB1h6O14v1WTnssQXuOCyMAqjpybOzadgtl82b22nZ9hkmIA8I5Ajm3DARaQVCVHVyMHSd1+9NDMQRQj1MtWBatLDacL+KFs2KESDU4ETEKoJo5u4UkcFBYzAiKJgNGvMUuU+MClmsmlpqF8DrrH7Q+sX29kvbXts4TDPUziEEJGuJyIRd+fFN1qRWRgoBje3TT354f3/tFdS1KsTZaA5IzAGBOQQiMXPC2MWAoG4dBwZddcHdEcPtcarspsCRRbOjcEOB08GUAo1Vn6+ftHI4e3bx+ublYdIiKgbFPaTYpCYfhhiRU0BUIMWEHJBj4JQilQXTIz6kbWRCpeDIZEG9cSdsexsdKZsXYgRiRzQm7jtNDF0LrlZHLhOLSYYQWdzbtqnVRnUXWM2pcHSKkQKnQI+yZVdzo1oAQ1HEAIJeLIcGPIvTFNq2weBEXnFuZQZrdTRVMIwCDaAU5NA05BCdUbyU4pZjiKqOVD0k6BoPkWMgFEXl8FbFrWYGqU0xclA1Uak1AmRVdyheghECQqBAJI48jcdhSKW0phgIfF5lOwHN1F5ymyE4c4ZoPsMAIhJRiBQCITETIc3K5rlx88ge4zjn4d0qwMzdmOMyAsSONHtVqk1AbqDz+M8RycndQcxDqITZZXRFDD0sg7VuhRmYgbGCiXvpm36qGtwtBbMSWZUqOKUUgJEAyA3nshaIo6B5E8LkVBUEZcCCPXNY3yF1fQwQEkxz/QGLhRK0lCLaAjZEiJhicgIxY4BArDY3jNDdQRQc1QyRgLCKjVpAq0GoAtnMkLPOxyEtCICoc0NJgWb6sQMRqDsjVIOAQMA602oULHAiZzOcZVvk5j7PJuZZhjk4gCE4o6igcIpt4ohzbUvB0MFxX4cNYILMzl6jU/BixIBgqMhEjMCgDazWftaLBS1sOQCCCbkCBCACRQOnQAoKNNPpwG3+ILwCKmAAjKAIZUh+Xekk88oQAAIZmBEgMc6dPUOsLoomwSrZ/4+q//rVrFuz+7AnzLDWm3au9MXzndinI7vZVLdMUhJhwzQgUVe2Lwz4T/KFAdu3hg0IFhxgWwE0JJlBss2m2E2eDif3+WLFHd+01ppzPsEXa9chWVdVhV2FHeZac87xjPEbFkzRkUBoyjwbQBGRHNmB4HGC/kjSBnd6ZOXNc3VjRnd0m4nAjv54vcdfxzUI7PEaP+f9ABHd9FEFhV+bxMLMpiEKjx7Zx7ybE7PNCt1MuXYk5LkXHRDUBJ0Baa7LAsD53xKSoTkiMs5NRLXWL1+/ud2P1TikIA0IwuXq+fXD9dxYEoKP45BCjEQPD9e391uAEEL66Tff3O/2Z8vl04s1lAFdg1FC4pxe747j8fjt06xlKEAHjNpa2WmCeAkRwCqEXW1dR1KmPgZsLYWUGG1UYsTm7MBz01+gsbQu5Kk0F9HUT4urfzktsDv5YLOudRKt5xenTR0EgGYmOajoMB3On1yUOg431w85hcykNJVCIXDOYjbuDykmkZZCkKY5drXI1cXF9nZryw2xtt1uk8NQC7qTo5iOtTa1Gkj7MBY/zQtrrSEAWAZmQEvg0IWmTxNsNvG64U+metD4k9tJvP32h+cicjwcE/t6uYaRfvLnP/69P/6jGKNpU/XIpGpziR0yILHpXI+F7opO6ChN6NHJTwRONKdDzNwRyAHNK+KskYLPshD8m4Ygf/8+nU/cYFoJiYhmEBYCzSE1RnRvPPuS5qo8JHBAQCR2n2EtpqpI4KYATDRjtv4NqcjU3NDMARFCLK0ENJOi487cUoziXlVztzI0kZEZzF3UAgUwQozIAUBd3FhijDFQChkwq1trAlCZyJpKae6w3d/3z5fH+2ODYbyF+28OqjRylNj1F6eSpuE4OvdlUg6RynH78CZv9Odf/8lxd9y9/Xq1/sQDmbuLcWZy81bToq+xv/xbf3D/1Rfs5fe+/0fnH3ysKF//9UvZPsT105BiOQ5g0kSRuY3T8vz0/IOn62X96Ae/LWPy9SkuehubM6T1MoxtkuP56cnz7/4O4IgkMtX9w/6rn/5k1cW/9x/9/f/nP/2pkhWbdg/3Jx/+UNLm008+/av/5J88P9+cnZ8/bI+Hw7BpZBT75dndDunik+99+gdHH4913JWdPRwSYjEqxYtaNT9OLRKuuy4RHacR0VY9ToVWm83xrnAMUxm1SYgIAfb1oAB9SqI6jaXbnDizmLk7Mz3C1eBRf0bwfw24fqSKoEtbdQytHbbb1G77xUXX9wLQWnMCRBjLkFer4jJWPbs8eXh9TKvcc3KCqR2BQ0RWqSGGsRwdHRBMmwOIO3bZDClSqQMQ1X0pw+QqFLj40aO8u92dfHIq6ovuhCCiwXF/VDnK1GJKx1IuP3l2e3fddQtw3O32GzdFX1+uxzJOh8NiuWllv38YpUpglqZPn13sDvdeNVBIsTu2JhZaOTBstvtjWfdVNfp8RJ2RihDiHLR8fBgiu7Xxy19epxwIf9GkOjiaU0hdCCcn68Vy83vf+1u//PwnYxnH7baZvHnzJkaTIsS5ijf1FNgM4iLU/dSnbmrQggYQ1YLqM9zLmZddBNAQ2Ryc+GDwweWZvryNEcVgO1TRmgA4hWQL1klqud1Pp4v+/Pz0etvQKjFTSIzskax6jMHEq1pA0NZ4Ll2c20qYxCxGUtWifi3aAC4Xfa0jE8Jc6AiuBmgu1tiJmACx2pQDj/WYcKFgpTk4LHNUGx3sMLV+TWJ6OBYHHochutLUAsb12YYQQVpmNPNFis8un7zZXj9ZpR988oN/+o/+4ebyan9zE9guLvqXX3yJlE6fnN+9uvmzn3x+/zD2Z4yb9flHT//Fz1/+1V8cn4O8u3v48Ad8Gp9/89PNk+7syacPn/0Qdvf6+l199xMM7XtlrBKaKSSgOl0byzKfaojEvOzjw3Fv3lSmbW3raDK1q2fnBxmHOo3b/XgEEFv0ebla3W0Ph3I/6UgGy9zHPnmkoLB7u8eGo+zPL06Pu/3d7iFCSMFOv3X1MBYPuFnG8f5mLHWxXvSZi7YnJx2224D1ZEGtah/TQPUwjJzT6ebkXraLfnncb3UsjGG12ry1LZJy8JRCY4q5m3Zjl/i4G901xyBlOo4Pk5Wuy/vxAQM6OUdGdakVHGNKpIZmGYgIQcvp+hRaRkqJ+8npUPY9wfhwC2aBKQa9314zUpfe4wreY61NHy3ws7F5Bu09HpTUAJFnBKY95tLmWIABogPNBQ5IIoZA5obMWiuYtypI7GSAbmag4GoxxBmVag7r03PbPbQy5MQQjDm3XRnu96en5588+fbD/j6mbrWGqQ61VkJcrVfb3e7+9SuA9uGnn7X9mFI4P++stu3hjkMfw/K43ztSSAmJQCnkDplVjTk01bnXDMFm8rtrqwoIpCpqjYBcESkisbm7wTzVIUJ2MxJ2qG2aK0uNAkB0K+gErblURFBpFMN8vmuqKS9QB5BSWnVUMSdAwuAQCBKgMEfCEGK2AyI7G091iIgUsLQRwSNhmUbzEjObgaN2uW9aCDkxq2GTulh0rYKZRgS1KoYArG4cgs+Dfg7Hdig4rHg9TLJKSwpQtZlWRK1N1JvCnAzI4oAcGSEZm2LOm5BBUNlRmvdp2aS1elx0oZUxUmQmdzcVes/m9Lm1Fbk9QkRb6JcqBsiACF7UFInUnGKamkaMBNSk9iGoIxFNzYF5Zoga6HyPJqTpOIlO62XfEYXViexr3+fD/XH57OKjj14EksB49+4BA0Et98NhtVmrkIk/7G+uLs6+fnO9Wp0wuuk0DdOvQ9q5W4hRRowxispitehyCqo9uDYFYuSOTFZ9rJhGq01GHQfqsxuTqKp4SE0asRJTrQUNAbjKUdHVIYQ4lbpYbjSyd5y6mBbR1fqTiH2YKuTYAS05L4PZsN0RIYKhAavnLte9Xp1dHbdVhxqXkQIggFYhI0JwMVIgBEYM6AEsxgDIbWpWldQZbTgcjYKnjokhkIMRsUlzQ4eATITO70fg5uAclRli75zZGd3LNM5vDSO+K/XGdl9vHx60FfIpKQRf9lxlBhebus53e0JAJBVVJJ1RygmZbXf4ZrnojruJInWLYFXBoZmtUiQkYkJ0MJGmQKwmfYyBAhqV5odSqh6PasSUclaB5L7I/c3dHXIk8Do6o7+5vhm0vtwfEdSIybBOYwOjTKVWZiRXQm869cuADIJKLqS0TCERB4wEjDNARKFKAa8+CeeFJ4QCaigqiOpokbjrEmau4KITgVoZ2dEFytgQCRlCYJ9JPxFyiMGJFz3HaI5orKiOTuCmrdbiGgQ8ZmWmDOiM1CeOPdhMQgNzjCHU6jNoBpEJnMiKtZgYWw3igOQmwQGQGAOH3AzVIcRERGbCYMF/fUcGZArMyOToy8XS1WqZmogCqqkhKDi4S9PAHAh5Gsfj9qRVbw2YjRAROQQkNDew2VMUkPF9mZPh/DIlDiECERLNDT6PveNIDDwXcYI7UwRQJ1Ct5uagpmKujh5ztunoiA5B4T0k2JwJm4m7Y4gFpIIrs6nOOyC6V7PJiwRMHELTTL0aJsAZ/0IgBg21cwRpxQEjk6mJa+SueQ1MzauYzVctBupjEEcT59RX971uCWulMcSkQIIWA+Ho5AAzfTrMC5xBTHQurmWzR06EqhIGAzczc6+1ERFQNFRDbehKUKWamrg7QJvDKLMO586OYdZdmETAKERknu+c4NUQzFPA+PjzxsdrghsgxsDeFB09csuBc05x4cBIQVUdEVMEVKN0aHnETuWms9CEWnjMXiUKqBbNyfU0nEa4yNaR7FGVUd0KurgIckQK5GAqiAxz/yo5EGiTgIEQ0ZUxkhvP06mg0wrfTaWoJsB5yyEwcTHw4iroEkyIRvYWvUDLHBKCyNA75LQkfmwQDyHOHF9EfmyONgFSV3n/HICqIjHgY/yeiWweEZlWLbPmNEub5gYU8T2Ea55DqwsAEvK82KVVmJN+roHYZqDMHEibIUcEAOgOZvp4X0MyeI8tAjQzBLRZUCAUNTBF8za0X339+vXdcGwIgVQEARfL9TAdd4djDBiYIgXgpjrWcXr58uXJ4oRifnN3BwwXXbcmlOMRpa4vNwt1UN+LDdP48TIvpfTRRbC6b3K+ry1xd79tK8I86L3imbGK96SEPmpdUOgCAUAISOKltQAgraFqICQ1cWuLzY8L8+kVmWdsh+FAMYbVshzGkNLD/cOqX3SLldkBozedWivEAQxTXgaist0uV8sU4jjU7d0DAC4XC7WScg4cb+93zcwDm4RRPTBMVpu3aqwM0tylMQU0vNntIpC0gREcnGOUsYJgMUUKy0WGNj3p6arzVYKfDfpugpe3tgnHH3x44lM5mnUpd6tuHKaf/+VPvv1b38sxAICZMDEQGpIjmpnDbC8iMHKfE8joMJcKsXnVOeyCQPPHG8BcRgQzjZpnoRDAzQQfD+xsgARoqgaOFGajGc0YbVACfKwzBsT3Lk4EnPvEiXn++5mmP3vlKHCtEhjfh5PeS0VqGkNSETczN0ZnVIPa9ADG1dXEa7NAbEzE2ZxarSoVaJhfLrlbIgWdqyY8oyqQGCAidIlVDNFDYgYF09Uq7o67tFwNtSSz8dWvymHXrbuYkh8OC291tbkL0veRU95XzP2Tpvz2yy/3Q7r5+t3pd7+jROqGBOAmtcgwQFyMw5A/uJyMcjq9+tZvSx1VfbU5sVG9aRXlxMHjan1KmLtFX47Dsot3r16vV89Ov/89Wm8YMJBSU4C4u999mjaHr988X31w/c3Pv/PvxuND65eg490H5ye/+vM/RzqpjW4+f6cHo7SiRT7evcX98fSDJ1Nrp8/Po8oPf+93QqDbifn8BePpp7/9vX/+n/zv6nZo0/ThZ599/cXnkNPJyQY1ukGOqbqenq263HUp3isOZXpy+WzcbhFwOlZPzjE58Gp9ogqJMyOHEM4vlvtjhTn+aqBiAEDvA884NxfNWwDAo7mUARRWm1PMi2FS90PgwASRwkPdd/0KUJlmwR1rUxeRNna8VJsuTy4GESZepP725i2RS21M7AreDNTJibkbyxQznZxcHvd7YArM/WIz7h6wyqrLwtGML64uf/qjv0ox11r61L25fZtjNvU6lA+fXd6+/WqxXt/vXgYidGll6nJfVbtu1SZBaMtNePHs+U/ubiDyfjd9/J3lsR3GfUXwzcVFGXYXz05effP5IqftURI+0meJ6b3NCsHh1w/Dq3fvTvqlkRZR4IDI5M4R3YDJM/j25Td/tXuzXPBvfO/y1deH4zR89GJNjqXoVAsBOYacupji+dXm8uT0zddfVciXL55fv3sLDXa7e+hDDsubu+PtuD/up7NFTF1Chs1Zf7qmG9RxLxTo8mxzfX8rjX20qQ2ljg7GuSujHe/e5kWKEOqgXchICUmq1Ejk6BCimAZmAOOZNePICAiWQ6qmgOTOYFBKAwA1b2p9jlUUzEwh5ZXbwMgRAUHBkJGIiHMYJ+lCGrVUl0iAiTBiSpzHRFM7u1jud8PlxYJCenJ1fhyPKYbcojaKVCethLww/slP/2XK/OnHV7vx+vrlEVeLGMMy23e+0/3jL37hlOLp4t/7n//e7u7lp8uTf/ZPf5bqGZysv9n+1Ytvff/zPy1/9s/frlb+d5+P4zCV8fj7f/w7//v/9SsvwbqyPw6ROS1SzD3FfhxNxfoM1qbdYecY+q4L69Tczy9PH27398d967OLS0NxODs/u9/d/sb3P/3y86+JeRrHiqnfLLXib338nW++/tX5+VktA4ZFWqyePn/x7tXNOvTjw1jdE6Tj4UAc14tOS3H1sdSTk2UdpryOVWx9dvHNy1duSuTm03Z/zJgZqFWQakD8cL3VKilzKy0igmkCiRHdXBuZkbT6yYfPb6b9k/PFg5bXd+NiEQHdrBFiCBEJzRo69HmpzRqEjz/67iqvt9eHi1UW8oZTQv3Ws2//8hefA/nZ2fqwu+5ismarzXKq9f1xEUQaABLPPFHiWddBQ+Q5w48z+BZ57vOEx2PR3LoghMghmBmCx5SKmItxIFOttTBCMwM1cK3TRBQDhbFVDixN0iKk7uT65Q68c1MFizFw8LHcr0/O4WBlnNbrBUZyg+PhcHa+MbTdw3b/9tUN8tXVi+Oh9X28evrCqd28e9cvPCDJfNQ2b61haDHF6XFujYgQiEUbgDpHU2VCBEdEJlbzgDPqvREGnIe1DuY6I7xrncjY5jO2CYAhNBcA0vA4HCQxFSnk5jZwdwamSg2JREnNmmoiIgzSSk5xJluPdaK0BMQmNRC5eQM1tQBYZAh51aZEkMxLsxZMrVWmVCYlQFcyzGRUpOacAjIBYwhlKqFbIlIpdbkIBi0wU4xUsqjMBWSlTIERyXNc7IdhwblJbTI56KSK4KNMm+XSrBERdrDd3UdettoGxGVKKs2JccaiqXd5KaLoFmOcxikGRkT2yMSkwbWR+zhODsBM5ubaIs8FRkYYGElUiVFMAkXiiAQKHmKoTQPHACimOiqzA/vh4Zhj3lwtL87P9ugP42EV7Lvf+nicJKa4vx+mqZyfPetiO0oFwpzWOd8Ox70ZBgKtIvoomI5TA8j1WIjbZsmnCZZBWI0Yi/FgPLUqqoG4Zyix3xIjdgCgAlpr4OyAEWluv/EQndGYpkTT0IB4GMaTKtY8QogaagMbNa+X417XmzOUVG5FH9Czt0nNo0wkg2gF5tin1RF9YU8SvguUuIuRzR0UyBQQyVQMjUNkQtEKZo6BgMhg7lZyk9BnIZ2MsoLUEpmVe3MDF3Gfa6oZ0RHcWZwJEmIkPglo6EoEFjogmiL8bPf28/FwdPMYadl7K30OHGm73YEhIrmaKRAaA6mpgaccEdGaUcDQkbW2ffXFoqkwVzRHixkBiCAigbk7qlsFhhxDjlkaZopV/TC1oUyOABE8UEEYmqAAEVHkbtG1WnMMyDQdR5qTTcTiFCKIN2R2wGK+iMQgi0VSVkisroliz5GFohkBTcpd2GyWpzje6DROWikvzVMzgMQioNJiHxcX6zFAs9gU1G1hHl3ZTFXrQRCyq4eUI3lIxCaSAztiE2d0Cr7qNLK7gzbV6t5MxVsFkzYh5eRlZGSOqcHMJeDEhKUFBAcBBzFvSOqqajkzsnaBgT2Ig4Kqzvi2HIOpILFjQiAT8zg31UuKSd+Xem+Wa5RqrnO5nhmknIGgqcwVPPN2EEIEBGYE1+m4r8d9WqyFKaT8vg0KHAyQ5g5NQkIMiKimjDQXpHKIyDQfXB2AkAFnp8Zj7wghEyLw3APGpg7MrgLuTJhCNDMRQTd6dMOiIjZUY6qm7lAEG6UB2xBycorakNwZxdxTKqKjwoIiuIGKSyPklFJBUwzGcXQBSk6BiCLwoRYKGGIu9RB5BRBddcGZzZxwdHHkoU2iGhZry7R/OERiSiEvqO1G5gAuIXITCYGZqFnDuR/dlGYysjsYqkMVcYTIobI2hdKqqDpBVSu1GGMzHc1Hgwexo2k1ZMYMsGI+ibhEjG5IYKCjWCLqGBW8PF4GCJvmFBDIQRH9serFDQHF7AiwDwG6BYalW3AOgDZzUgIYEEwYB7wSvdMKhEGRKxghZsyRELChY4JVgESuaEoAkZXBCIAoyJy2BESK3px9hty6v+euEFoAM3BANnc2IXZjPy6DRFaBzIF9yqDg7uwDegnmwRGjkHlg8uSEYohhCaFDjhTIAZiIiByBmRHJZrM0mImDt/cnInr8PuGsFc35ew8UGigjI6G7+qPl2Oeqr8e71wyEn41DM4oIkGg2Bz0CiucEoclciwaE9MgJcQAkAHefdQJXU3SFWaUCfCxZM2Sw2mok/vru+PJ2PzR0ChSSinQp5JPVdn8QAFToY2TX2g7Dbo+GH5x/9OrtfcMHRD/tuuTObqxOxlKt1EEYX2/1w8CfsIemKcZFYB5qZE+My0V6d1vW63S6XN7dPizTeijTIuSqZRQHJBNxZjcr6inEZrVz7gIlDOoMXfeFrd5ZvwIwl9vt+NFHH//sq6/SdssxRYa+7w2oSm3WmggxhbAMoakYOuwftgSG7mJ1GHeHw2GzWgHpYRzPuj5kil3IzCV0DAbtgWKnBgIY82IYDyHMDEglTEP1y74zFQALIaJjnWpkjB4eJj+4Qwz75hdkHxCfrMIvsL7T8LM3x6btb377ikOox2lKuFyd3ly/S7+ib3/72yFGn+f1szHHER0J2B1MhObKMqA5W6tSYaaqzT41f3TvIzG4zsosABLGmWll7oQzOs5d9ZEwgjP2nuZMETwuWwfCGY9kPjfGGiDhI5bkEXpFSMRBtc1YUjPlEObM7L8lFQGoSAMzcx+OQ8cAUOpxN+62m/WGKFUTmt/RGGO3ZgjuBWBf2zgO+xRS0WFmhOXcq6opB8bAAZERPcbYROrUYuLhuD3cvYQpGgYDysuT9eqs7LatQKl6v33YHDtoHYYFeDHsuv7i8x//PBd49eXLz/7wf/hwZGRWlZwSuM0XYwwJacFRrdZgsN/WvpGOenN7uHpy9eXPf/LxxZVvzgO14/395uJUUWOXy7G6Axrc3d08++3fqm3AsOj7wKjgIWxO+pNVlfrFN1/9nf/gD998/cY3zxvC2dMnJ/lis37it8PTy9NXP/755fPz293wgsP2eP3ku8+FQzX75v760z/4/tlHL8px2v7k5XJ3eP7Zt7Zl+/Krv14tLx1pqkIJB2207LW22g6rxebd3f6zH37/5mY3jccQ08XJJoWQY9SATYyCxxi294fNyfn9/jhNdnG1Prza930fCbVpiL/27T5C1N4jrR8tkI8BRwBwIMa77aGdLWuRFGSSWsVEa4jhOI7jWDDEaawhc6li6LGPHjGkGBe53Ew5+1AnAXcH4GTz9II49EHRVRql4CRp3U0yduvl7vb+MIy1lDIC9zDV4zDs37xtRNq0nF9dHscDhoQxq7tRUlzXVlQaxpD6pTidnS3HY9nd7xeLeHt9K+i+iC+v3zjS2fn53cO9OQDG5Wp1/fbu6sPV7e39i4/Pvrq9/eCjj37xFz8mjzqTEGfr3WyBRuf4KBWdnPQcO6j1eBgD02rRQTNyeniYVmfMjKuzBQTdT+Vnv/py2k+AsDnfnK8313f7JeB6vbx4+uz6zfVXv3y7L3dffNFyMCCvr1/mPv/yq6/6tOn7k8M0LhaUTvR/8b/8D//T/8N/Thq91pNNPnz1lhEWm07FI+fVyXJ7fexCMGkmLm6ecKiK4tEQDE77Zej7cn+IzTgEMHV0B0FGBzS1ZoaEhqhIjji1hoCR4KqLC2ZrNQYc6jRTVAgjzhl3MyISLeg2H5kiJzcPTgRY3LyWcWqEhsBAaIZp2e+2hwUvDgc/Pe3V+O7t6Oi+JBaXiY3xoOPlxUdDfZhEFOA4Xe/2d8jxeJQC7Tu//eQf/b//27PNk8vTy2FvL/L0p//8l/d2GaC3esxn03/4P/n+L3/15m58eofp9c31b0NYHdvP/8X91//5q5/+4imvAHNN4iqyPx5SXCtHCMccU58AUU/PNiEthmGKBLvbuys+vXv30K26F0+vfvazX3Lql6uTXQvri7NXr1/d3d2en56eni6tYuRu2o7T+rDfTieXeSDaPdxyhMP+oev4MB6/94Nvv3t1vdlclSLk5fxi9err/bSblt0iZ0I1Rrq7v//mzX0fmUk+fPHsZy+/jEZlsrFUVQs5IIIVG4ay2pyZNjVlAhkbuB8PY1r0jNyan11c/PWPfpkvQwxwerYSqU1ktVoeh9ERUoghAApL8dwvlquOIdioOWTOy+bHGMNJ6N69fTsO25PzyzYdAUFcnKBKLWV8PBjRe1f148wC3X22sqrOMy4ERLc5LTST92zGkYKBP7oYwVQ4xJTSseyldUguoovFUt3ALcToRpySio5DE9HAwRyIUQA3F5fj4WAOUmqMIfZpmIaxHS+eXWy3u0kGANycn0AkE1lvTuowlXG8u7mOuTu7fDocx0XAs6srCrjb7VNeSrEyFHeb6UIQUN1cJKf0ON6eWQBgwDNvlZDZ5yEn4ox+AAAzNVUAVzEwIAruhhADdk0maRNyBKSpaczczME8uDHNNixSMSrNxIlYWJqMzF1OS2sjQnQwglzbzaI/01a75elWDGMAkFY0QkzdchwPKXcA5A6tTZyJOamJuSTsm2PIsUyH5Di1lmIAAlUzHZkyx0AUmwi6DcNxJjMcpqOCmqE2y4tMIYCbE4qDB6hqtSmQMZNOY6bQ5QUBmRR3IQKGlGM66D6kjEghJgSqrVKMzFxkArVWRQyJGShMpXTcmVu1KtqI2IjdtKNsWjgk0Ta1Yxf7Uoccs5mEEOo0BCK3URzNhCE5kKmC+WK1yCw3tzfdIu/3U7zIi/XaWtlkOtw23Cxfvt2NCnU/YUyrs5OGEroURVJeb/dvvE0EXJutzjcIuZRHwfT6eku8DJxijF3uAzJxCIFvtg/3Y/GUmCNxAECOgd1z7icok6oEAdPICGpNTcUIIURE8WksSfB4u203tz2janXRTmz7129Pr5Y39/eXp8umurxchRDKtmAMpWqtLSQOXQ5n5xMnGwtK67r+MO6Apm5FFdTnWutEUEVNFNwRzecOaAcHb+ImMhVAZKKYEjBJjINRH7M6REJCg5kKREwUqkp1BKTEkQIDRUc0ndyEFFrhKaWvp5vP5f7IEM77FVKp5gB95GFspYhTmLdZMlCXyOxm6AgI4ooQlB3AgQhiPDb5/mcfvd0NY2u3u625U0Ayb02RaNF14izg1bzWZmJ3ZSeG6uZIMSTRYi45J0VGQnA57I+5zxhYzQg5hFClEgY3Ugc0CSLRLAd2dkPniDLjfCIBNZgRzNjUofgiMY+Jt1oQDUVIauSMnI3BSZyDJXZKhF1qRScgjjlEcEPzLlGtxR1V1Ewp4+Vl/+Q0rTo7udgcH/Z3+3q706aAm5ACu4FYAxVG9aloGQMAqpmIE8ecrE0cuWkjVSNmIKmOgtockB1MyTFED4jgi5DcBZEKmoOHrkN06gI7teapQ8ho5OBGgdjD+4QYAMDJ6YZA3K2KOiJiiBRC31EXtU2mrk05BIqB3IO7m9VpauPRWoE+iysaoRugE4dZJHoczbnPWByay6Xw0ciMM+HlfRXLfGuCx2oWm+t/HAg42Nz9TIDE7gaErg6Kri7WMMYCWBgn0kJ0ABM0gDBoqwnFYV0tqmMbAnGHDCrgXtXEnTiEGCMAkBEiUjAQo4YEGMNkVbU1p5xzqwM6ZM4JsIqgOyGRxWaNiTfdgpBrtWsJUyt9H7Q2Z42ZBzFRAnOpLQU21daMkZCAgFUaAjCimiOimAGgG6gLODW3otakAXozE7Gj0juBL4712nyPVN0UCNERMHhbB3nB8HEfLgIvQQmDAigQATRRJGTmgFjn5mS3GAmRmKA2IWIPKIukXS9KHRMhmRtxmKfS7Kzqzpd3Ei/hcsFTae6ZIXhgEjNUCp5DXEbIbk1UO3C2RibsjNA5qj8OsdHdkZjBHFwfmxPQHdGJUdS9OUTECAG1EjYPnSJbDDtoGcnAiahiHdktomlLQH2IgAzuBoIURDGGFFJPgQhgzjoCEREh4JyNBXclB+PHq5I7EgKgmSDNxBl3sObtMZ+HARBhHp7RvHKJidTE5oJhN0Ka7VqIaI/0FHRTc320/z/iIB9nFQhgbvPFbcY54aNxidQE3ncWIYCYMWkg3T3sv3z7al+rIrohiOUQT9erw7Df7g6RY2Yirbu7+46UMRxavR/vJzmcLvOqW2RGRm9FwQApjEUTYQBILmcMnbdBJXg28fNFjq4M1sZxkYOjJmwnmQFQADiHtjuAZzPqUiT3jhmrUEx1GLqYRUutYwjhVaP7xWmX18tIitydLt8NO6IoFZbLvkxTv1wcD8fxcGytnZ2crE9OmvIXn39ex+NVOV93PSHubh+KTgqWcwJkBOxT5ypVdNUvTC0GHUdTj4pmAG4NtJGZATuTuzrCJA0otmro8xkPUpxpYhaQi7VzCl0GA2Txc7DvLeAZpc+37e5B/+rLu88+OHsaUyzGaTw7Pdne3n5D9PzTjylGJ3C3GKOIzLaeudHe5h+sC3OEx7JyRWQgwkcTkIM/Ws0Q5wfBwQTmPdVmyi6aN5hlRGlI5IimgkTg4HMJ2gxHd6c5kAvzJwBq6o+MbYR5SbszszmoNABFmuNv/m9JRa0UawXMOaUQs0kp7ShFtNFuP2EUcnROmXpOnTogYUyZcBKhFEOtjaxFDhwSIzJBYDStoJU46SMzCXLPhk6Rx/09FV48f5bWp33e9D2oDBh7Xq2FQsJQ73akq1YVqgPrb/69P/zP/lf/1Yer08Prd6OTlRoyuehMqWeC61evzn7rBdleh4f9yy9w/bxbL1poAHK4v79/N3yqOu3un704+/rVO6uqrRhazBwzEfjJxTOHQEC1KhuNt9vxOIL7NO42zz508PX5BwPIRHb/zZvDV9vNdz96enI63nwu+31X9O3bV0++//u+P37zo3/1ndPlcNzTpJ+/ffij/8Hfeffq7ur0pLd2dXWZlh2WaXVytogLKX44DLofnLMcD+vLTV5ujlP74W/93sXzF4cGflCpN6ena3AVKVMrDq1OGnO4vbv93m9+dnu3Z2ZpFhOp6f64B4JSGs9KPBjgvEm/z3vj+1fQ4+IDU08xjcPEEZm8I2s21QnNVQTLIIjJVBYZm3sCVqdhrI6wLwdAAKii1q8SsYMhGr1XzLlOI2HgGKWI1JJiiEygEqJj6MRnpGu4ub1ebF7k5WK73fX9cirFVF3aOIyxvzpdXZyfnjUbcw5aWpvGKurQzp+cbO+2T/NGQzOGxNAtY7FShkGPNUQ82njyfEMLT5UwaNO6Pl1YayEGAIBADOxm6h4YQ79crx4pLa5laE3NuQu86BpyqXaySSdXabHkd2/v++Sb09W0NaYAkon97Xb/1du7NsnV5UnZ1te3b1Oij3/w5O1uqgfFpCkhokeO3/r40wzQKL+6edj0/e7h4c0v/uTJ0+5yc/nVz77RokcoqpAiHg9HOBwt+INuz84vGEMsOhWNJ5lKEjuKtNjF5bo7jNNysWp3BXFm7hu6gxsA2SMsDZTQwXvkeXeZ58ZuRm4MWcmZCNDAlQEDhVFKHyHGgFgikUnJMZdSAkVTNSQ1CLGbhhIAWrOh1bTsP3j+7P44/s6/88M6PgwlJg/O2neLMgz3x+sffO+PKhzeff11CHBA+Y/+p3/8kz/9/+ScGLuAPdm2X/bL+OH9NWeycthe/yp05WqSHHMMXfkHf//Fv/zpf5OuTv7656XUfjj6T/5iur2pVJ/+w//rwx/8O3/7zfTjw+4QIqc+EUCIMLYSAizC6mzh7+7fhm6NYZLxELv04sXZfniIq0RBb3avNpcLb6Sgr968+lZ3SkqBMwKIiA1+DOnbv/mZjnuPAVGfXK1/9POvLteb65vXz58/3yOXBt7466/fNdfv/+an//Kv/uzpxYvtNw8fPjk7Pe/KcXJs3bJ/GEtoUd2/eXsHTHmxOpYpgyuLqD4MZcPRwJ3IkQLlmd273w2OwRqwoXfLnYS8WRf15Murk/7l9S9jn+enQrQSJGvIQg7dZx/+AHFILEy47BeU0v31Tbdg4Hi/P6zOn7349Pn9wxviuDm72D48qLjUx0lyigGszacaUIdH1hm6vR+smYIz4qwq4qy9uikSoPvcvQsYiaKbo2MXYkDjyPU4giqYEVNkNoTAgI5Syzi1i7NzDuNhf4RmhAzoi9XqeBjcTWoDhfGwP7tY9ct+HKzjXOu06BZlGlOH+eOP3nz55rgd3/mr0C+XXR4PY4wcU4qJGGPK8e52MgBX4xBAjIiYWUTm2DYSujnZXK/rZvPJGWRWiwCAAiCpj4TzrA8AwVSbTAHcOSDE6EtSBdcYoruhMwAgxFJ3OXSI0WnBYWXNVMecg1sPSoRetHFaKyoEBqSAIcR4c9hr2pCDuZUy9ryYpiNYZeyHunWwwCszjwwQWgAzGFXGamdd7gMEFgWokzIGVjDGoDo5tKrFQLq+b9rW3cl2P+UI5CZVmzY3Y0ARBSrk1McTlTuxwQFL0z70OSRyEA/9sh8Oe/HgEAAVvA6jBGZVBedI3bEeADyF5OSBWUSIJkQYWmU2J6IQzA3I1XSSoVqdq/RCSDbHA4DVWmkFKE5t7BcbAARicAhEwEkMPnj+7dbu7h5u+/VyP8LNdnw6HD96dnZ5vlkrX989nF2e49LK4QghwVSmw5EDE4aIuCtlc7pxQTkWRwYE18enQIsQ1xpQ1itfPknnq5vx8OrljSiP1jNCF3IikmqqYmYRaBJipACJIHizubhJhZBUREFh3B0Pb253X9wO50+9eIW9c5Zt+eJHX/zGH34/p4iYTk7S6/3NxebsuC0Nhs363PFoDmgWDVAQBcfWUs9hFZMsgL3voqgAIRJCQDSgQNqs1hEAXT1RVGkiqqYpci2T11KbNHNFNA6cu7KfMEEFACLyEJ2MoiEQsRAzkYADGKMLs6S0h/AXd+92C/WLSz9OU1X12oqAYL/M03DgEJ3wYBXQE1DXZXaFx3ArmJtibQjqro0SpyJ4ey+l1NMeTy/Xi2V/v9sdDpViOjQ9lDJYHVy1aqzOCGpKRE6EQExIGB2czcgpEQdDpMAcCXipCFYV23IZanVEGNS6kJYxDsPkwNuxdIEoh+aWOIhVwEjIjqbkMSbFoO4QrQUjiqDuzQ56CD2Yh1UDQzIEZTTnRCjYwMGVwNAUhkFMvTUzcO95eZ6X54uL8+XZMq3PlieffFiIr28P37ze7koUR1Tw2qQWqdXGYqOYg4KjMwY29kyhCRhFCDRMuiikgtY8YiKwyMYxGUDmQFasCbOiQ0oB+1jn0m4mBg85W+6N2cEQqIiiS2AQfRRMn3/wrE4TEporEpbqom5mWLPWyVUDQoiRmKHWxO6GKWepxWpxEQjhvT3FMeAsmIM7P74/8f2BlOA98Q2R5jvRXDU1L0dwcwBGBNc5CAyPHGzi2IGLm3KOTq6mYEaGWkyZeHNRwrj11iINPk0uh1o5cTMZXQX0kmIuRmZkRCIBvJo4OjMgVDMbm2oCAxNtyCmiK7gxohqHBSoh9hGHYG3mERvP+2FmQkLqcje24UF0r/o0pzWIUXOE93QATCGA+5wDADSwOQdN6GDgTauDO5C5AUApY3VqhgaAIbQ2qeON4l+X9rnKwV0AHM3BaKZwOjSAO+UHtW9Evp3z97q0ZkDSIh4Jg4OZVUEidKQAGJjtvSWGiYqouCp5iNRFJgRRE4Q4b/YGYEaBijUJfoCTRXUmNYwVDRkRWFByiDl27K7QIhLVGq2RVDcwb0hMHEEEwedsjLvN2RmeGS02R67cyIDIEdyYnMgLa+00oguAMYKbClpjEKAGQBrVEMVDRgUjRKPgFil2XbfEMJvXEAneS0b4virayABdf31lMjMAe/wAcAIEolm7dPCZJw42m39QzdDNkd5LAABAc5XQTOsycwJSE2ayxwbmRwHA3B8T/W7uTkjms3Y716iBzWaiOZfkMIP/wIEi//Krbx6GSQmZY5kEwEMMCrrb7zuOixSn41HLMRE6xtfb/a6Mq5Su1uls2Tdp8wAPlRddVuS74+HJOkIta6bzZEsn9YwYQtSO3CZFgJh4h/62TGddCKhDtUOjIhaYpcGoFlIYhsM6L0AUDKq4u6OUsWnYPH1bY+HlSb/Q8WhqCHy6Xo17ebi7OTndxMVirENIqU8dhfonf/mjP/3zv6hVACACdpS+8+mHv/WDbyGyOrQymFYHde+WixW45Rh3+yGyBTRRoLAKfkQ0jonhEYIwFFGjqoKBirZAlCM0xKqSEExNqj40o8CnapsICJbWCZucaei95oVI7Laj/tWXN/3pqg9hfxgQ7ez86s2bW4rp6cefcGDQKmKP7TD/WhCcb+JgJvSIGppBowhA4Pb+N496+Rw5Q4rzn5AQwcwMkR8h5xxnGyC9F9xVBSkQkroCsCkAOFGYKVfzK/RRCQV0N9XGzLNgNM9O3ZGJ/y2piNDn4ozgCN7cKmrLPT3cDWxJGizywrTFDsiJATMHFKm1ERIGjoCMsFyumXPXL2eHFJCXNpGaGsWIxKwGiD4Mk4csZEh+fnpmt4c6HnO3vN8eW8jp4qrlyis0wxYJITx5/uLzL3/5yQ+/pbc7b+2RkA+AhFrFiQTZubu9ux8P1x9++/Tu9nVufTI7POy0Aq/7J5++OFbtQcfDLncMF2t1nQ6FY2rD9HD9zXJzdny4CWvrz16ICHEyBBMftvfL/XB5cjFOw+5wH8/PT5fp6Yunuzq9vX0XNpnOTvIy3b29+XhxQWN37l3GANxiiudnH3zy8Xe+/uLtx6ur8d3DInamoJTX5x/tb+73h+PqfLF7d5/OTxeRV9x1y7MPPugvnj5XAxBxVQcoIggYiLbbHUcC967P929358uzN7g9tON+GGtpGPLM/yR6NEfi4+seZ7A1vCdcP76wHvnWgFXIQUVaGx7uRTm2NuOqIhgCserInLRMAg7Mh7GG2JWhlgInJ6fDfkcExJS6Xt3F3BHNnGMEwjYWBHZjolhLY465745T6VedmvZ9ng6VYz5MB3CSUqZhyH1yhLzMCDAc9+6Qu8w5o3ba5vEhDz6dna9vbq4/ePr03ddvx8O0OdmcXJwMw4EBr19dr06XTz44mXYPyxwINETa7w45JsHZ0w5zfj4CxcWC44JCgsfHQJY5qk1np/3pBbSxHrb7syfSFvHrr+82q5PNKuyP97vt/vn5i6cXV2/fff3h86vb65uzZxf7YeoT/Xu/990f/cWfXyxgsV7/9Ecv0fmDJ6ev3rwpkqdRq/n5afz0+XkiWKUPf/qTw24q3/mBvn5nEsrm4skwFAdK/YLh4W/+3vOPC999fXf7cjxfdv2Gb+F+Vw4fbi4y9DfjoR5HrIWZq4M2YQJ3z04OzoCIjK5uGpgIiMwTR2dTn5s3LMC8CTmCg2MI0esEoOASMJVmQCjgIaZqRkREAGbWLKXYaulz97B/SP3JJM2bcBEpk40PQWURebfdDiT7drw6P/swX+13vyjTWPeH/nRz9/Zm++qL9Wr7x3/4G//9P3k42Zz+vf/xH/zj//qfcerTwuo0nKwx5/L93/745c9vd7uJ4v3Xv/zmybN88tE6/xTpBk5Xmz60lB8+/d3z3//d8y9e/ivOEkNYrfrX77abfjWMO8CcY1/Gw31zCskNUHy1XIjLNJX97tCmlpbLYS8pxX7JJnC1ANntTGG9WqZI4IYQHuqUO7u7PiRO9w/7y65fxH6Z+y50WXBEur15GwlBtByP716+fnp25V4WqxBi3Y8SkYdjnWp9+sGHr7/+4mrRiworo0qrB0RenSx3h6FD6xIi2DSNIWHuO5Uik7oYOfQxWbPTJbftPTdhpofr+0IaQgCxwMSBeMb9uCPhMncgR4XJPMRFHKtSm/pFzomlCSO41TJu9/shxrjb7tvUmEPf5/cHI59HZ8TgyK5zjQJTeA+9xLnUwwERkQDN/X11MQE6qVQAnLcAJE45ACI6GWCp4kCqWqt0i05E3QwJOcYG7swxp9JKjAliNCYOqZaJA+cu3bz7VR/Wi83J1I4eoF/35VAbsLvFmM6fXsnr68Ph+Oqrr548e/7k8mK4f1j251ba7d19yMvUsQOoUex7cFKdS4qjOhCxgJopBxZtkSMAz4j3wAEQZw3lsa4EQN9Xv6lh4NxqjVJMi2sLGVxGJFdtiN68uAdkAiIEUucKIjBTkAJSLK0G5tAtijRGcKjEJFqZYgwh9bm2AQ37tBIRM0fn/fHY9Yvm1dBctYmAzl5mRwqzIaHqaK4RQq1jyomJRIqoBGsp8dRqQHLFJInKiEmNFIM4xdKsg5RCbjJm5ipb86P75DUmiEwJ0LyNrgLCKAqlFBRCW3W5TENOCdCGOlkDAQHzhJGR0BxRh7Jl6jgsm9RAmbmrw1ZFiMDR3CwEqiKIgQGqqbqaATsyogGiOdFclg21tkRxtdrsp3uA8fRkqdORrK36fns8/ub5d6DAP/uTv7w/Hj47DJdn58O+dCmFrj8cRnQy1Yd9KUWYiDCWcRJt6Mj0OFE5Xy/AwlClTeVXr69/eX9fXHGsqCoR2XnUNgctvFQAQKLErFaxObfJtIAZ8SIRo3sttQDZIh2k/Y0//P2Xt99cPb36i7/6q9+O6NUhCgW//uYtcLfckAyjnq7EBiJxmJjc1XSqNg3Hm3c595SyAYYupBhUGyrNdx4T12IQyM3mZxGIVYBC34pjjCYjc6rSaml9iD7W05OVEjSwmIKFYK6AFKnXOiFgYHd3bQ3IKSQP8RhCyfz17u6VtHFNxYvtRz+KVU0xUgi8SCO0sFm6iKsu+u5YqwBO6qsYtFbmEJCKSp4JNoAOJMrq/vKb2yJlvzAjOjnFh305HiejqZqLu6J5wDjb88BD5BBJHFSlaQOAmOY+daxSDC2CuxA7HceBgxsDACFpLQMQqeOhqgME92ViJMAAnFi8ALoaiGAXOELoPLAYDgceenBCxXbU1mDSkStZYJiEl44hRsaIISzyoqNp20RkfyhEUUsVlZAodQFX4cnT05M+ovMibZ5dffLi2ZNR9OriNoUvv3hX7veTTsXq5PtdOxY5NBdkotjnVo0DT1UMDWIsLokoGnGVDEGcGZnAAB6ZvUhkRl3IIWhzsWaREZAciWNGF2AGdG0VnTVnJXBQEeX06LM+u3xRanU3YA8RS3N1Fml1Kj4Nrm21yDknR8I2gDUVU8UQI7iaCDRBDjSXPM84jlkPgkds5nwrBjd3n0mpTHPa7H3xzjzsBncAAyPgucodEJmDqWBgnjlYDvZ4tHViYozW2uXy6s3+65HIY6qhVDYLNGElintrTNyJZ2NUA2cACIwQaKqjaQ2maA4YIqC6MUcx0CYMTBSXXTe0YlacOjGIzGCl6dFJQ+wRgrgTcYS26fNwnBZdD2buQI6kGtBw9g8gIgEZzfE81QYIaq4qhqjmDuauJrNF1QkQXAmgqYjDttlfHKcvHSs5EpAZ+tyy9Ni15DAzrvHB4OdjXYbwLcCEDgiEEc3gvcuhGfDsKGOcHcSuHpDUXCMpVLMqEBEjIhg4Aiq4AwZ2JlBMD96d5hz8OOCxIaHPcUJiIHaBR35ziO4dGFkDn4vyAsxcdjOfuaE2W3Xc52usv4ecIxu6EDQyRHQPDBAtozYCICRHBgjkikbVPEJ0T1VcEBUwRHZHrcjOgRk5zEtlRmMhvl95SKqKhPPnD/C+Q3qGVSCJNaQA4CJCHGZhCwCR3AHtPfVDbcZOP8YnZxPcTB0iJABnDuDGSIioao+k9vmDwGf4zOxPcm2IbKZmBuiABOSOBoaqRojSfH883O/HaoSMrhYYuxj6mIZhSkiBQcqR2nCx6b+53n9zdz+UtolhzXTa5xD5OIw58jS1BVFH/HaYIhOraKsppPmQkZGHqsCUGD2laawZKAVX7xA5mGOKwF0ziEQBIBITkqG7GTO2aj1lKUIWIPafH+ihP0HDaZoWXUKM+12Nue8WuV9cVhOsDY26lL96+dV/+Y/+m6oADMQIjoKw9/Znv/rrn3zx1588vfyN736mQ3GpCCQoz64+fNjfmjeREiKGSJySwnGojVmBsXllcH3UThzM4gyCttn/hZEYydnAACcLbwUjSd/xomoEoYAYsjftYOwT94LfDPBnf/753/29b/eZBz12ebHerG5vbiXEJy+eRkRinTHSZuZEs2bkj5IPzkLwLB6ZOaI6PsL/EUmtgc8fSTP8AVxm6vzj2iRy9zm76vb4P7sjGBiYujLzfExHJHN5fKcizOtQTecYGtFsKdIQI+D7z+Z9D+B7V1EVAGNmd63DAxOqtDoOIWdAcjNMKYSuEgbGGDCScYeO/WFs00FCyH23CHmd8jLFzt3cxR1DQBMLqNpaaxTjUgSZM+Sl0HHaDX3tU59UGqKQWTvWRb/UYbful7ubfV6n7WGEVRpvh0UHYRN3uyI6ESZVcTEEBEJBb9Owff36eHOzWcL99f6M93XaLlbZkRYUhtrOnl+NACFlU+pXZxj6tCCdtA3t1U9/+ht/8Mf7mzfnubOym7bHRIUIx4qrk4vbn/81TQcjQ8VOJ4qIXQx9toCrjy/5PE+7/bc//bjvUHb3w/VDd/bhN2+vlxcX0RMrLiXdfX5vIy+W50fyvLxokPt+oadn+7Eulpk4HreHLnVdf/7Rpx+uluvr169Ol939faMUuetk0BDJxJjIFbVSDnG7fRfAmTFGmpBDCBRm+B+o6uPbCt5Lh+C/FoxmzdwfxwbgVfMi1zKl5MPxuD45zdHbNCEZo1EIJIwWEFPEgACt1vXZcjwK0SMkhdwDRan6uPOFWA8Himwi03RYLC9TvwZtrU6r5Xp7f09I681mGI5a1EpptQ7748nZea1DpJhSnCYhd8OhXwYXz6HHgKXh8uRk3G51ane7h4+fffD65c0nP/zdaXyH0C/C2Zuvb168eLHerL756o2NPO1pmsp6FcatgC4/+OD79C9/QTTPDqKrhRDTYpUXK6d+tXyUivqlr08Sqbu2NjzEgJ88T9p2NMDVRxe/+sk4TdAt1xmwDrrdvZzKVKZycbZ0VQ+0Lf6nv/zy7Oz0lz/7ycnVWR9jLfGr1we1cGhTXnZi+Go4yFgP43R6dfHkk6f05qtPP15/83MtxAjVzYYq6vKd7z7dvr3ZbXdniePVCZFzcAz9YbtNS43s0amheLZh3DaQEBAdOZC7gau4EId5duUOjE5I6sYI7k7IMB9SmNkVTQ1ADIGNvIWYRYRSMiQDACJ1SZHBTGwEJjAsbYq0BAocQur49f22Oa0hbt9uQwSEdsZhw+ny6Sfv7t/ev7s7SQkBrp5fmKfv/mb/sH3gpnF3nbTevzz+7L9/+eEH3Z/9sjSnh/3hb//BBx9/t/t//Bc/uR0XhdI/+Ae/9eHHf3kj07PnCeh4czudrtqXL/c17v7wD353wIkCHPdT36ddqn3upBYx4dQvE41akANDRDdrNkmrajGgu6ccl6twOIqqA9Jq06WOprIniEWqKTTVy2W3Jn/e150qAF5dXk7jeJpzn1MtMu6H/myxOA2b5clXb35xcXU+7iSl/O7NSxjk2ersUB5Sl/u+L+OriyR8lmRsb++G5WXfL7voOxcdhqO0Ehi3262rSZUYszubc63VHUS0SUX3LrZlhPE4pLyhXOq0a+ABHTlGJI6JQmylnF+tLtdXtRzPznsEVvEgoO6UYoh8d/NQVD+4OBnGvYMRyTBN56uL++ub3D0ejHKXQwzmMHctuEHg8EgkwvnQDvRIxTN0dQCcGx1c5wg9UVTRGIM2IfZAwdXF1R26VceB61Dq0Ppu2bSmnBjRpDCqIxp6tbIIi8XypIx7Ctyn9WF71/eLF88+vb2+75ebZy8+fPnmi3VY9t0ClMcytlbXm6WB3r69Hh5u7lxCxD7m5uHyySe1frk/7nUaOfWojmqtCTPPlRJuiuBkhJBdlDGiMZoQo5lzQPU2jwp57pIgJGQAUxETYE5D3aawABez1iwYgLfi4KIKFJAcHFAruGurIZ+FLmKJhO5iWiEkUkFGBhMEDtSDQZf7YyEOfdelMhYxEZPUn3gb0VjVmhXCQMxNgDyoirJwIAQzsRjS1EYOC5DJ1RWaO3fdojYIAYjwWMYF9dNw9KYcsZqoTQvsBaCKugnObStyAHdOXRuruiJ585o5Ac9pVSbqYwDvuqkOIoKqJhpDZOLocZ6yijaAaM4pnBFiU8+cWzPnxzQfELsbYTRDcGraFD1EdnAmYuJjGRhY1cyUKODjsCtM6lfn5/v7CkDXr2+qhvPz+NU3t/3my/L1vWiognc397v7Y63l8ul5rVJkutycuOrDINz3rm2sYwyU+r5MUynvG9AuT6b9tOiShTiYizqYh8RomAKCQStu0kjJ3d0AXItUQQ2O5hXdGETbQbGqkFI4VItpfXfYvnv7+rB7l1JYnGxkgXrW/+k///xT/92Yw/WrL58//c3j3iNDl/j2+rbjrCoQcdxtaRJ7OPoZWliW6lNzmNekISFyYHB1cFXUVsDcClIgdJextFodAVxbHUz09s3DeplPIXx5e7dGVuPkGDAtTQzQrZIDGKQQnSPkUN1GCq/b/uvh+hbGRgFDHkXKNC1zBDRKjCEf6rGMewrEIVhrpdScFgtmBECyWoQoNDMGJ/DWtOuyahOVWpovejqNYPRQx1J1e78DB3EkohDYm4E2BkIANRW11D0WqaVIcW78RUDyiKEiaCBzENElhUXu1adqgIaJYog4oO9lyhg4d1XassO8jiOMaCrsTm7MiGBNBEJBSIAQgMoYkIYDHI82FfAQjdy9TSCYAjRWsg6tSx44bVLeDVAVpmKqBmbBPZherheb3PUxBcewOFu/+F5/dZWRwu3nH1Tbljf7qU1Vhv1B94d6aFBmJgypSEgJaM6x6UwT1XLsBNPMUJu36Lkv2QHdkEJKm/k0bta6mIg1RrLAkAIhFUNE7ZSkDK5NUh6grvvF+0EynF89U3REN1PVxjHNKsZ4mKAVIEghRGYK7DK4t1YaugPGFAlcHy/781zXH6vBCZGI5isZPU4xSUHgsfQnPEI5cP6L+bT6yOQwdKIwF1ARIiDPmS9HBmIkQDAmVMAKWPOa15dhPCyhTFjVHvqYeg5qYOBdiuN0HIGq2gK62ibyeTAQKPfDQTqIoTWpFrmzFCEvyB0MAZI2NZFFF8xDgIgGVSczZlo2Q2I3aG7g6q1JotwTAtVNzz6VLmfUpu5qhgDWJAQmIJEK89j9PVUEMYRgZtpaDRym2hRIANRgrEURd82/UvsasBKFGVsDhITmczfX3JX1uG8D+pHwx8N4ulpmgYyqwLM2MU+YHFDMIgdRQwCONHvNquAwNdk9WFJLGFNHgA7m5I7GjK6WCEfiQ6AdmesDLpYSsiGI+Ir6yD0DojTGQG7slexAWoB65E5B1Mqv6VQIxIDo4nM5/XyHQWREBhX0Cjon5lQN3MEUFRFYHZEsBujcDbyQg3tTN5dASHE2JxEwYYhMhEQ2ew7Mzf0RrwJgKm6oomDt/S3Kae6NcgMCQgZHMyWKM1AG3InI3ExnAQh8lq4evV06l/z5bHsicjfGmfdoj7d6hEdP1Xwz9/daATxqqPDrbwUggTmiqc6GSgBKOd599TAOakoibm4UiJgDp2HcxRC4lg4Nl/GXr25eH6ZpapebLpsuIjWTWi31iUxzYFZ/OA77Wp/38bRLo9bBQUNyNSQ0U0Z0MTeIORIbMt0PMDUg8+IymjSICk7E1SwhAYWQc6slBgqErh677jDhoVtozGeLTkXXZ+u7m60Qu1tKzCkNQ+lT30f+R/+/f/yXP/8C5643+/XrABEIiSeEX769v3/40Q8+ev7kajUN25zy3fabuFhZcxNlSodpypn2++E0dKYTOoWYiBXNUu53U8GU2lRqm909HgMROAENtagBMR0dfzH5UepvrvMCrUm1vsshXgRHqeAWMX+z1f/vj7/8+3/0fYrw5vrW0JbL09s3b7scX3zwomnxGXmOiLOcg4+l5LNnw9zQAByYw6/pn3OuFpHnBTUvEpu9ZDRX9c0I+Ec3HCIjooOZCRFyYINZdDV//wyZNwcnYnOd3ZqBwsypBHAEYg6q6q5Igeaaw39TKjInVUcCRkKYAFHdKa0jhma66heMiBSqVtcqwg3QMSD3qyX3/SKEHCgj5Rg7dCytOoZaR5ciddR6LG0y88gdYK5WKuj66cXhy4cPlldyfI2QXK0PuTVZPXt69/lPlqdJ2raWk30ri08++Oa/+D9/72P8/M9+vjj9EFZP9+NDXi3NFN21WUQf9vdXnz45vG37tztQS8tuGMZpGuOz85OPPl1BdxwO3XItw4HRgTCQN1Vpte/jkxcftdaWZyfH/dGnd9d//eWL56eH+8Pp02exC6uVvL4ZnnznN67/63/Ch/u4XvKaN+eLd3d3Z9/7LZR0eH3fEa6Or+9uBuzCneyKH5t2q6fr17/8q2HoTi+fnnz0dNRCMRyHXUj5eDelZdy+fb0+Xx8mXC7XMpSrk4vcdW0cx4dtyDYcRgAGYAdz81alTHVzdnp3v+UYS5N+k+tdba3EyCY6DYWJZyw/wlx3ju932vf60OPG/NhwgUy324eQP2y1EVIK/VhLTJmqFpmMFYOqNdHGYFIGBnUE0VZEKPelGXlIKdR2BGZHMPAylDK2ACZiauAEHAKQSSFRXS2XY9nxbLQEScwdxhz7LnV1GsyAOaBLqRJXS8gZY67qdhSm3kxDnxF8ETL2+ezJ1cPNy2G436zD6VV+vZ02Vx+9utlSjk8/ePGwPV49u2jTgSkixyoWmRMRhWxOMebc9alfAjJimI5lfgqktePD/uw0mXtDZl598c315eXFzesvP/pw3WrllDlQv0xjnWpzwLw9SB8dvHz04Sf7u3Ha798eYfnkw81ldzcOfQkG6hQN1Pt4en7y9cs3f+O7H9xe39li8/mXrz+4Wv345+92R6UlHY+FU1hkPpSHse5PFrkpPDtJ/9WfHFarDaFiZ0vuW7Fd2QPHpseTVZ4OMpdvqktAQCJXnBtqVTW8j9MAkbnhjJchQCcwD4yOru8HImDMHBFJtUFrCOju0iTkoLVySDmmAqDeFimrS14u9jKOLovT1bSrl5snOS1My7OLJ9d3v9ofxy+/+llYd5Bi5S5yOJYjBO9WvtPD7/zeHzwcdm/vjsj8yR/+7pMP4R/+i3/26Ucvbt6UPU7/3T9//e3PPl4vwle7ohcJFlfju8MXP3no6Wy1dFsM/SfxAeHPf1x+9PnbD66u1icLJ0pxISZmY8cpdHCsewWDuFwvzg+7O0Mf6jSWdrLpQ+oXHed+NlyH87Orb968Sl1qEMo4gAMvexXrN6vy0H78s7di0G8CxuCihFJLM6vX17cfPP3u2GD38max6kPk7cPxy6/fLjeLCrg9aEhYplImXW2WX978qvdFBcAujOrbQ1mtTqwUgQmbrJbd/eghJ1WwZl51tVjdHO8JGZkI49TGqbW3D9uTzUYMDwiFMKQMYKqaiWxqaUGU4PJsPU3bu+3tyflnoKzqr+9fXV2dSZVjDUZ+/vxJXufblzeLbtNl2nTB0Zxb7tfzU8CRzY0QmNlMAcAA3P3XxRzwyHtWwBmsiO6GSPioIKErhjDP2QhwNreDgYtqiFytYcCp1pU4h4AIRN510U1F1YEod9Ps3+boVhUsrTpwLYrdov/6q59/+Nm3L6+eH3cP1o5hkYIEUVOR5WLZLsr99bsm0+uvXj59/tFytbrd3l08eQ63EFNQ52lsKXV1mhwBAis4EJg2dzR7BHYQPmJdHVG1OHiInamBo7aCyLXVSNGdkZAZQugdFJkopdYqY5KmuV9I3YKDqsQQpbZA3KUo1gKjk1eRFJe11hC6qYxMiIS1TsTMHMdp6vvlcbyVOrq7aiMO1SYASxwNjCmqQLVKzBzZqqlVcK5yZOrUjEOapAFA4r7ImEMGZ29zz7OrN6GIXSIDRSEKOfZD2TVrSJ1jqnVaLzp3qnV0MzBb5EXIoUwFUInCWEfqojqoeer6YbcPMZgbMoICukqbYujMoLbCxKbqkJBRtIpojrlNAzo62DzINfM6N4Y4gFnMWZqK1pn7wM6gTpFUmiMFCrvj2G+W97u7Oo2bZf9OKDKfrrrp4VCm/Rdffb0/jrRAxVpKVfUytVVe1dJc68N+ayp9Xq7T+t3t2w+fP3t5d4NMm7MELwEAaqtKQBTUPQYA8jBrZ61GCATmWlTFiYfqzMHcK2p1H3WGtgZWQWLRRkw5sIEfc/sCth+tXgTaHMsRZX/96itYX/ytP/zDQxm32+HycvP2zW3zZEVsKKaFUQcRXC8Qr6hvKSfHtlicHMdx2rshYSIM3gTKWL214/3oCP2yR6QAbRpHcJjKYGhNZdkvpMl4OJxfrqwNh8Pu3e4OU2qOFwCdio9TiJ0i43JR2G/qdF/LfTnelaMEP5pAhtAlVG91kiaZsJU2jxNQR2FVAGQ0dgO83JwPtVxcrLe7fZvqHCXKXVZpIRACzwwwRE0R3QpF3G2PWmW97MFgGhszznL1jMI1ESCChOzBycVmygkDADMDKJgGTsjRc5KpAJETIHkERoluXGvJOYiMgLFhsBRz141yRAlICchyCJMWFEdGN68mBkEMzVueKjk24UZ4EAlENk5aa17kygN0mVJcpy54Wy6X7FCy0CTW1NC7nFLwPtrF6Wa9XuUQe6DlcoUO3jSQ9MgkrJMW0f1UxrH6UKBohEiEMQVgjCkDZ3ciVCk1iaPYIvaRZvoimiEAQCaOjK7I7NZQramSByQ2stx1E6AhKEIMbLVRE5taM+P1CS+Du/h7SsvV0+eTiJm4K4DCnHlorY9JS6NIIXBOGYBaHaSOKTWazQ6O7upSiZmZH/cFoLlhxcweS83wERLs/jjkn38RzWRqJMTH6yHA/K8c1Y0AZhUazYSY1dUfuR6gphAYUtLl+diHTbeyfTlZdB9tPmzVbnf3OV/utdxBGzg9aO3DMUeDkagaQTQXQM+LtYgveBkj7kCmsKrIoBoxcM5cdNIqXheJM1A7Hq21QJnQFY1jFpkYAZFTWKo39cGpiJPIvgvcqqkhITN5zrm1piZmbj7XqLm7ECLF0KaJ3VVdTVShmiuY+3ywjltqv5yGKaQEjuIzhWTOK81sQUYwx0cqswMBHhFuHE4BMziBReaZRmJiKWAfGUxCijN7HhCn0hRpaiLD1mqx1HjJmJbODAAx9CYtEBnY5MeHEHto6yTEIxgH52CcKEUIaIIIbg0ACAbVQyB0UHABbIjsBsCMgNoEke2RHuj4WJxBM40fEAxAmCp6RaxkjVpiAqD5Wg2uwT0gZDaA0NyIUEFNhYgJyKWiVqkFTEXlkedKBA6q4KrgBubaxN7HMGdRZ/ZxqAkgzphimE/OQDBXWQESoz1WmNF7I9KcFDN0mN3Z839oAI+FUwDuOpeSuuvjUQrxsfvMfcYVzT8OhFlANZhNIo+3OgQt9w/XgD7PdBExRO4C7/ZbQk5EIcA4lrvt9mZ/ZKZnpzmbozsxOfIwNQZntHXMxfXt/uCBg7daVLRhCsVdEHPAaOiAbkbI1aSpG8XKYQJ0s0gYmQXBkJoamQUHABhaDYEfyvFZn493g6T4Rgw2511YBeJiZTdMabEEkeNwnIE8CUPf5f/bf/mfff3m+vHt8fj14uPIEg3n+B/StslX7+5OeiYTAr398svzDz9wQNRx3I9StA9r9pixRSJRQISAJKRjaw4AZnO/ZgyxtDoj8pjIiQTcHZu5c/p5EYnhj074jB1RpmE6SXRQrQCeaXT+5bb94z//8m//3qeJWUprcTg7P719/ers5CQtO7X2GBB3N3h/8QJQMyJmmk/daKYz9BPQCXGuT5m/fHOnWb4xm9cB4mwmn6VJdLc5l000t1wS2GOpmagSAIDP6+qRWIzoagaKgDrzJWZXKj5WCczW+39LKkp5odFDQG9VHRhjTDoMI0IgZIYI6iIaEqtUCkG4i3EdqAFDphRCQISINBPaE8amxe142L3bP7w97O9LGR3Mxbu85Ji968fSZ0/LzbN+uRj3v1glqg831ce4WrStbK42L+s1GPJpX2kqD1/sTs9PF6d5+UyffADEM9NLTQBp3O3lcCAiSxoSYAVoLUb95uvrxYvvnvytv9cO0xe//PrSS9dn23NeLTiE0KUySsxp2g/9quaL1asvvu5WT4dJauBpanG5PgzDh09PX5bpZrftu/yTf/bf/fF//Pc++OH38O3h/LNPVx98sn93Syh31/cnv/jpwzYtX3znh//+7/zT/+P/Keb+49/5/qu//NW3fvg3Stgp7K2OgAtUT8u+LLPUyZrtsRYPq7Pzn/7Zn/Xrk7Ozk3dfvV6tu/3wAG5mOh0OfbcENMOKDBziyeZke5jGyYZxunpy5mrG1C+yFplhIsSkRKA6Byjea7Hzi+v9mw9mF5upUxM1d1FvtYo7clZprbpUZwIwbFNFRA5IwcgsBiZEVzENrdQcc+DEAR3cEVVUFSMQkaGxN3UpTKxSuxxrjrWwC7rBs48/ePflu5O+e23javHi4aEhQJ1aLSLVWmzb7W7moDDj+fnG0Thx7vvx+k44xUW9ffPm/HQzaJ3UP/udP8obnb7468Owl4hvX39xcf6b24ftyfnFs4+eenLqSYpxzOih75aBU+BkACai2t4/BbxY9t1CqrXPvvXZX/7lV9XiwfQ7f/Ss78xfN0c/DpOANJFIkQAcrEpzH7fb1zIZUGytwQTrfrnuXFR3xzKJxmXe31VvBx9G9MMPv/P0x98M65xKaaP4+YvT25ujWBYFUY3Ob765S6fhP/gf/eYXX7+qFKaRAlkzIYhF41TFbQw9l1FbNVf1SKCgAADq6EhAQOoh8FwUiBiCgzExoDcHQHKYa8J9HokwBkUUNwIw94gcZoAkIZioGrDHFCdLALDoV3d3x/VpdxynXdUjNMcwjuX2dk8B3u1uQUZtVkajENzaOzn+4Lcv/mf/8b//v/3f/F/+7r/7/eND/Vc//tWLzZVS1Nb+5L/90e//1gdxyljKb3z25Isvv/nwt558+aN3ZepttH/0n/7pn/Dd7/5N+v7f+dafvLnLpuvT8J3vXf6//u+/lF8cf/DsY4GDIjLRMO4TcK3CzKtARlwEwep4vB5KhYib82W73xPTMLZFTiF1qq5u+/20jOsc8+Unn/zsp39pY3NIKaXrt0cGnlg5EbmItsvnT17+6p2IRcrnZx8u4jK3env9MElbrrNoAW0iEjs+DtsV0ljtq3evP/3sKsaEGBXG0xSG4g4sOhWtZ6cnFHiYJjFgRgSIOYpUxWDwKMXua3MKHrIoLHMe1Ha7B47oAKW0hIwYAi+Wy82zTz5tu1tEOb94JpBbm6aizz/6aL976EKajrU2YFjevbs9SetylOViU6exeludrmN43AsWy8WMezRT07mNp81Ze8TgqgDogPjreTEgYHgseAA3NaY4R7QeJyNqCEDEMXK36tycKKg4IIFVbRoY5xqQlIKAuTZVyjkEyJT7m7vbGIObmApzWHbr++t3Fy+eLTeX99dvGT2vUpBYay2lXl6dk+v+fm91uH39Kn767W51Mhy2q9PzXPfHScpYupQZ0RBdxNACZUI2q4DgoOasjo8kFFMkAnMTURUEjpFVNYXkOh8dWa0xxVaOxNHAmk6ZsjX3jsxqpGggoOAI5lbbLuSLMha02WUl6joPNN1sbgEjBBHp+9VBPaWeQghgHiJAUFU3FxsppoBpPi+Sk1R3IUXiGAk1xlBaSZwBqWpTbVatGcbUuY9uEEII0BbZx/EIEJjT/bDvIotqDKyGZLhIC9Gqhsis4pGyuhY5MhNGbmVCaGrC7F3uPSiHUc1TCsW0tBEokxuCIuYUO4QYMYy1QuAAABEwOBtOxUU8E4CjW3OAmDIpILKZqzZHdzcwEIcud8xcy77rs5p13dJ5GRQEU7dJh1amUfjNu2dPTt/97BerPmCORojmzCEm3h6HUpiBx7ExxS5gm6ajtVpsuy1TkWmc+HQ1PwVlmorAfjzmsEy5M+4FsDVh1dImwNbMTYqCCpAIO4ETGhhgNG8N0KCxAwOiRxNGtwbwxe21j/r7n33nMvd+02Lrx3c3MPnUtd/++Lf+7C9+1A6x9tRHePjVrd+1G7/GvjfOXUnpMKQV17vjcEjLvDzeFjjt8CMU0Vbb/c1RWpNRdGrhisFlOu7WF6fLPn/+s2/Wl5cn69XXv3hFZm9evvr0N741NHx3tOuBQuL1SX/irhIw4JRWn9fDl8OrO588gqk5oy88hNCaGfm4L0sMJ/2quQHRvuwsWFMRLSESMpq16GHZrU7j+mqhx+H+gxgvzl/8+PXnwrSfxkWOpWriQAjDNHJkDtTMZCop8FBlOxZs1nXJXcyMHBmdIyOTAQwifY6BvQsZIbpzF2IDZQRyVFHUyeohorvhpNUAlos+RK4a+7wK3LpaMqfS/GGYgD0wHQ4V0RSQBoDcNa0xI6MTKLmq8Oj+sJsGGGyiIqbudRJA8WCQqLiZTF2jipQij86MpDmmvtrt5AZFmzOdLZcgyMriNIW4bwjq9TDk6A/X9y+/fnv99t31dn+8u9VhimaRyB0FIcaQFh1QEo+GkVBz4OAacWnGtU3MIO4YMeeuEWAMQRu5mJuZIKIjTjaF1DciYjYkN/emchhVBI0icDR3AAEwfWy96U9PWdVMTZuraGuuQjHllExEDRAphDiPx0MIJtW1mTQErqLoBqZgCgaGTqgIbCDMwd11hsSBg6mbzTG0mdWrKv5IL3q8I7kp4iNu2N3mPWgu61H09/4kRfRA4Ag1hLw+qa7r9UnHvaKfUT5ay/60w36gPdS3wHthG1O4a8MqBmzRFBzUqEWcccdggNQzLXg26ClEcHKaRAtoYVH2BwYPFgnWk0zWJ8cOAyBqnI9bVVNe3m7fJodVjjAIA3Yc0S24uykBGlEgNFcHbCKujuDephQDOouYqiOzulYVN0CHZrp1L84JMaAD8cxeFjNDdHeaEy7zN9TRzYJal/nOZAjpPEYwdUdmmluOYmA3jSEGTo71MTcUuWit0XEZmzi1h75Clz8p0Nms/SGpInEMGCrEgdaJ98ysztEjGGZCMiVyQiS0CMxYEQriAmkOdDNhEJ7pzxY4mBsSIdgc1CEMCGhAjhyBAhm7GooQNkZhYUR3dsK5qzQRKLpCU3QLCYFm+IvPbh2AaRgZdtXEVeYSMVMFNzN0V2tFW5My1VreGyn8XycnFQDmylR5X0AONuN/H4fxFIhnWdNMHWcwLM3RuplS424EzMTqSsj2/+fqz5ptybLsPGw2q3H33ZzudhEZXUZGZiWyCiigALJAiYTMRImSQZSZZKY3velBv0I/QA/6C+KDTC+i0WAwGgWqIUgRBEgUgOq7bKKPuP3pdufua63Z6MHPzSryPITduJ3ds7dv97nGHOMbru4uroS+oJoA4B3PammyWrxOQMSOD9pfpGDLCGaGoIf7t21aWdcZgYgO677VImW6Or/UMu7L6eZ+Px3nVRcQoUOLDsOmm9VOVd1gnalHQvPv7o+z4wXRWQeJPfd51wQSz9WtCjMHZm8SGKUBIxAAMo+qj7f9i3kE96bAjoGIiYJhIiIHdDKkSbWJTo1GCqLcp1jHeS4nzqnLdDgezjdbZFd1kfkf/aP/fLc70DtvlQMQ0NKo+KAoP/zABeD1fv/DY/fxk5WRNAxff/Grdb96dHUm5mUs/Wo1qRWSCG5kcy0xUBOLgci5NEP3HFMTA1yq3yunzpmr1siW1VVNzF7O7fOIf38dyjRlNKa8D/zdsb10E0e38Edf3TDQP/jdn41zET/FHFPov/vq25/87DcA2U2cHuifCOiuiOHBBAq0NErC4pte7j3uZkrEiKS2/KLiwinyd1rRIvygq1VafumBQIOyCGHgbh6IzR+yJMuUbu7IhMjugrS0ZKsvfWe0fK4VHnyUf00qAg4xLgXbzIHd1cFzn1waaTN1aSXmzMzS6mp9tloNHCnxSlTcAxL6A41fyKpbHXdv726/e/3y85u33yloqy2kLuUeUsKGMefxzQ5H55w45On2sMrQxgOQxfOrhmjsiYBR7XQP++uu4vvPPvjyF1998OSnr1ZPmIO7upOIIEcQdZPD7U1wP7y+jeSnNzehtq62uVRtYi5NdmnVx4G2n17E7dlYRnIKfYpnw9l7T067r3evvwLrrt+eatyW4qEPnng67E8b4piijPPrr/w01uOhlAbTGNZ5+/Ti7S9+VefTd69ePv3tz9JHH/rZD+43mTerdLZ59pPflP2sXRoebe5u3677eDhNfe7QPa1zKLDanr86vV2dbdIq5vUwat2f9k2Nhnz/6qDQ8iovpLXj7kQQYuRpOg65I2hlPI3jfHZxtr8/yCzaacyRA6vCQ5sPPjADH2Rp8AcpdtHI373/2+0AVtbrlZSKyENen6YpMYWI7g0j51WHBLXMylZbEbVsLqC4NE+kzDF50NImB0UkcUF2dQEmmYuiNivcbTF14lYrUEj9euUBmhtk28k+b/ub8QZSMFMHZKawiSetjPHu9u5J36N67BMzhcjDtjOwNy9foHMX4ma7qWV676OP3rzdvf3uph5PTx+fd5mevX9lJCGnOpd+uyrzGBAj8VKWFyODe22VmZkBHpKikDMNXUhBynz8xc9/b5ufvP/D97988+0mrBA1MprIODZIXEUXc3DPXY58HMdyLKQxdZECxD78+c9fJshDl4qQkRP4mjnN7Spvf/X5/i/sBKnD2uqxkPcM1iap4Ks8lH15/Cg9ejy8Puw+//Mv/vzz5ym8n8jFbH2eQL2cSg58OkoIeZwWQwCqSmYmtwce5EIXQjTTwEyw7A9A3X0JEZkyQQjx4RvxB6KiiAQgJGAKpzZlIw5ZVAlCn2KaBGrLOYqYu6lUNGPm6VBIHfNxlgmBQ5fd4PIibzZ0fzj1kRq4e/u//z/+0+1FWG/n62/1y19WfwLsMJ3Km2+6/8+3r6YTf3c4zXu4Hdv47T6cfHd97zEahQNufvkNvnnzZjqldpzaPf3hf/b2g/h3nn+pI8y5j1qqwfjk/ffG42lNayGZAQGUApm12hoT5JgZIFrjitsus7vOqAYYmFzP+hQD1v3NOkWkdHeaA2LMvDobRMyBzq7Ob+4PGPg4l46AMiuK+X48CCki2vF4LzY9ef/8ZnfUEFfvD1Cnedr3/bAatl5Dm8dpOg5dL202TH2f0bvxMB6OBzCb9qOKYYbc56nNKedh1c37MldpqjGyNTXMs9TdOA1DVG2RQjcErW3oOqLV+fZpxlCbS7PYxc3qcteuOZzqfIQ2pRQb+9VqvR7i9cEtry8fnZ1vVp/f/lnoIhgy/ZrYhUikTZgjuCmoATARANjSmOC4MAWYg6kCvtsS+8PNR00foC8O7mDmyzIt5uCqKUYV64YMAdyolWoKSweOg7emTNHFkAKxEeLVxaNpPoz7OUeMfYwp73a7t89fnj/68Or82f54baC5z65erGr17dnluJ+byf5wa9/zez/4YH3+uJ523uYuBTlzARUVMyNG4igqjiZWQ8iqy/rOkFkMwWPmvtkBAZiwtYLA7k4UldqyDxcpDs4xgQKY5TyYgqCM8ySqjMEAnCogMSMbLuVlAVPu0+6wxzRQTAKWAyMoqEEgWzIrbliNlNXErJmVlNYGjujkVKWEnEOIohYjj3UOKSAlsCJSyEBbEyjE4MQhRgMwpoY1p76KAdA8Gzmb6OQ6ZC5l6mLW1jpOczlxF1srWpuZDXF9e3h7cX4xT/sYsyogQkrd4VA252emMwMzMAQ4tba0vRB4Sn1TJQBOXasNHAIHNwW3lFJp1ZEhOBLKEtoHjoFbq8uVJM1NbegGsYIATNxac2/MVKQBgCmed8P1y6+R6/F27wDj3F5d6/19ubgYHr+3Pn5/d7w77aty35+d961UCLDu4/XtXYgcFg+/8dmwPezvQ8QU068hjve3OxUvtU22T8PG9az2g7SJp9lbdamQc/ACoECREBFzq9qHBBTA3UADDYiMCqahSY0moJB4eD7uv/ujf0WH/d99/9MfPft0u+oP4xit3h6eP/v00dWzp7+4n2U9DKvu9tv6/mcf3I2TvbybXx3k268unvydfW3mp76Pp7I3/PDo7Ga7qY4LLTfn1aZv2BiU+zjXWeqEQHevr0+lnm4PAam/eryrrhiefPbDavjNX/7xuOmO46HVcZT7Q3t5AG2hGco6BQJ1JAQybzFS0bYADERhrlN1o4CBgVMsreYY3KHN/mz9ATnsbm5C8qHvxuOotiMmdOsiI3HMsZZqpl3XGXpgLkVqNWkaHAMip5ATNxFkZGeVB2hLH/IQO3PoM/exO06zaK2gzbFqy4gyT4TubA9JJ8SuH5prazCLdjkYGri7NnMOCYsBiM/S+sRkpm5a0cEMiAKkRC4CVcRgP3IXvYtJi6mRgEdkN3QBrhQBmaiJpu0aGzSpLohTyebj3CCCEFjKAjxWpxhuDrXyeHF50DRbKy9fvfnu29dvX73dXd/r/pgxJERgQGBMA3JSJ6JQjQxDxwmBBUZ1TUgpQZMZiJCiOhOCibtjCqnYHDgQh9N8oGXFV8eYhxiCI5k0BMg5zKMQMpiWhqHvY4rvDsmAxIFZiSgk5QroLmq1UFAvFQDNgXzJoiIyYcgemwKQgTgho5uaqbuBOwdHQLFKxMi01GcCgC17CVgsMeAAThZCRArvcji+oIUBDIgRw7s0Dro2cic1U0VGF0AO3G1Ct0JACtytesGAzA6H1WpwtfN+o9FE/E0bG4VGUclyYCklUQYX1BmUDGMx4zggmiinmLSBtQreQqTcneU6t8Pr8/X76KxKIa8OINWLAACjWXO1PtHcaBU3q6gXXZO7XZQ0lxnUHayVRugxkiyCmPtC0iSnpg3AHZTACMzVA6IiAmF1L00mJyeO7oRgRAgI7vyu5Xh5/j4QxAGYcHB7v+9K0eKoIplR1RYELjHWppEdyKTNQAbmbm4YHEmYulXXK/s8abuBcha6Zw1IVAIiI7nhQLnCptru6C2FBmpmsae8cKrMVRd/catoRv5AAyRogaIA8jvzMiKoq4MhOPrD3nMxXwADmyUAAiRncFT0BhIQgsNDmgbR1YNTj9yYDB1dF+QTOLiATXL74vkh3BqglMndQBW1IYiKmLZWZxFx0V9jPP6KROwPNCNxWcqlFzIsU0BAdSMKbmpu7vqORwP+4Aqy5aSB+ABpAzOARW1yJAIzeKiqNkQyN4Tlv4iES0jQXM0aApkLYkR3BgdGJHz29OrmNO6bNFNy9yYd4rBaYSun43E8TFGBc0I3kbbqu5hjMZlEq0MfMJgkDs93x68Px/eurlgrCjAHNlxTyK7izhzJUZqJGjK4g6iGFIOH29q2EVQrYSeCgVjVuq5Xa6CWMqfZc4zcWhj6t4alX+VhQI7n2w3v4pu3d+cX20ePLw/3JwebT/Vf/fkf3+0OuLTNLVMpPMQlH9Sid1D8ZYdpHF/cHz64iK3W7dVwMJNxuteyvbhgtLntc8LTWNYDE0SkQBycysPLC54DllrPur6oNdUUIjo2h8mtSYtIjKgMYv6mFDkPeQhi/PMD/HKiNxIEzVwYSIH/8OvX2/PNv/s7P0YZyzR1ZyuptUxjHJKhqzuY4/JphSX0+IBepgfaKJotd0RjIiT2xd5BD2kcB1d3WgZmRAc014Uc6oBErKYItAiORLS8ZkRkogT8oD8Sgi0g48XqRKqyGE3M3VQBDB6Swv99VlHK0RSbGjM7R28QuHd2JwpGtU0BQ20tEm7OLpmjuzMCgjNic0khllZcNJDMp/vD/esXL7548+Lz0kYgzMOj9eV5yqvQb7q4QjR3DTodr3+1yX/fMJ32+yHAd9ev19v+++9uFC2uQxSDUbdXF/vb3fnw9OaLm6fbqy+fv/i3//f/87s337338XuqutQILiUM28365ud/JJazR/TEs9qrb+diFz3v1FcJWVWlXjy9uN83nU0R3LG6QgirVfjlH/ybT3727/7Jn37+O/+T/9X1n39lR3n83tN08aFg/+ii//1/8v/+W08+/eLVv/nmv/59b8e7775/9pt/czX0f/Jf/se/8xs/e3v50WrzseNWpru7X32eq+ueYbex2baXj4fVR9vhKemK230XqB1254/Pv/rll07gHi/OL90aelkln++uVZQGRo9DWGOf2jxvz852N7fLBXaxOX9z/VZdV31/tz+BLs9dWK/Pqny3ZF7NHJbeswdv5YNNcYm/Ppzm8J0ga8qMtZVVf27uQ99NpcSUTtNx268dg5Tab1alVhVliqdp3JxTAGbuHMKqj6pTYIgUUdWqpdwd9rfEXJu3WegMhtXG1Pt1gmaE2K06Qzkcdx2fZTD2/uqyu76+G7rV1BaDRFCFbT+cjvuz8wszlSZtdioajHe3ozS76LrDbq+M37x8ud5c9mtsX7xVkdgPm83V/n63PTt7/4Nnf/B7L54+fdZOs/ZDa6IayaiPkRfCojmAzqejlPFBpEatp7uMFBJtL4bD/W622Mfw/Os78Pls9Uxb3E8zh3i2HUi5yqnW4zjLsB5CH+vddDrMmhIZumcmsGRxwHkPkdNxd/Iu9mfde0/f+/q7t+aIAXOgVc9392ohkykxPnkyQNszdTHhl693v/Nv/cY//72djUUAWijrrd6Nc/Q+IkZMs0w5ks+1i1HmOTAxB3RHcxFhR3cDRAefWxm6qOCiTYECkbqKaiBkCgqs6M09MocYSx2JGSlUsyqCROYyyX4IPAH31B2rdBz6GLrE87EOGaf7OfX8d/7OR48/fnL+6fu/99/8vt3e7ncnz+XHn7z37fX97tvD9mLQVv7lf/EF6XYdLwiGwCLSYn7y1Rc3cUi1SN+vP3j/7O/9b3/nn/xf/qvz1fby6uwvv79t0E+HsxefX//k07O/+XfP98VvX9jq4lGxb/MKckejIgK4QIfMaf3ieJ2UrBZCXMpwLs43Q3R0yts1gL+6PT17+v5uGsnrhx9e4mzTfnKUb375/frsar8/otv5sx+gz2dnV98//37lYTqOTeu82zFAdWtVVsPFRx8/+df/7A+1ZoP5/PwiZqs2O1iM1EBJqRwlIL99/SbF4XY/pm697Tf3u+fOkPqOVnx/2L2600+eXYAdYpe6jkUKgKcUJfez1ZhIESOnu93pgw/PAsdyuDdp3jR2vaoQYd7029UVAp1ub7UxeJcp7o+7ahbCMAQ6tttTPazPN0MMN3ffrM9WUqmafP79L5upO3WKtUwPm+Sug3exseUrMLsrOBIRLBXFS6TVHN5Nt8skhIiMZKYIbGYPlQ8IAOpKiej65vbR+Zk7dV0KGaVCzHH39sQheK9iak0DMa86QqMQyjSlFLfpnNVN2nyaOMaYI5ge72+uHj86u3x0ffO6SY0pnJ9f7g/3zPny8ft3d69i8tP1q7fewoc/3JxtJ7cy7lf9pnlCILMamREQkFSUMC7AgAWzaSAqC0uS3JU8OQAvE6uaJQEERUlEFAgauzpjkDbmnI2get2EXOfqTIzI1NkC7QRiDil2bbojMsNqYmM1RxVvIAUcU+xUJwcLQ6pVREVE1SAQgxqBOIg5iRhycjb1GiF0kQFU6pEpG4CDMaIq5JBFPTCP9cSQU0pqqt5QWIEiB4c5saMZUhan2ipUj+TSjsiBECNFM+j7c44balUNxcgAmnhrcxdia9Im12oaGBAJGU2QzIhEFLwFRzM3E2YCcFHRim62cB3nNm3CqpTCCPKAZnQmWuwEIq2BB8RxPKw3l0COgAIaAySIWca3hxOif/XNF/3qHGBarzfj6fjdm9tTnadiCCxqqc8ceD5Z8ZoSYgxI3vVprrUb+tlKTBD72O7bOD6EDmpVbUJobR4reDGfjmNg66zIOBqAqJhPOXDOCTgqhEjJ3GlR44EDk7Wl2QTRNTMAOEe3Hq3CNOF/8l/9ix8//uU//Pf+3odPf5A437287R+dt9MhzYf770e4uW+n+fr5dzLvgtRB8YTAbQrzscsDzBXHtkk/vOq3quNmvWpd5ymfDpOWkjnMh9GcCaOK9GdXuch0/+bTjz+1uQ5n24Tz6eV3bbJffP9nx9Or2zrLaXSVjpqLBQJTX2p+pXokImCRxpEyDwfZA0ub786GvoyVOAAaY6gYxiKEjJgO5Q6lxiEAhuZ+Qnu7v019ZG+uphWBFowtNZWYmBEitbP1hpn0VPuuS4khWClQRV3JPZTaiDpGmlXdbDy0xtNyNvZRU8piYogxEKEv5YaIzmRgM4c0R0bm23neJHUIU2uGoKAx9qY6RO5yPJzG1pxCIuamNkvZggdKrskEDxVPhJte3UBJT20esI+IXk9EHjSiJaMwRu2sRQZkOk0j1hbAALkC3gusA/ek4+5+Kr4L6fnLt3h2dry5/v7b5y+fX1+/3nvR3iMaClhOKcaEOQtHCAEwkQRyZ68NBJfQDYEDG3OOMaZgrhFZ3UGlSNMFY6GVkZkDYzJzBqraHCxEoVpCiGmTkVKLQOtOAaQ9+KxVqqg7YiAGJMRk6M6WuugmxAnMpMyq1bQhB0fmEBEzgQfAoC5mSOSgSGwmqAYAS+UqIQOyuYGpuy3DqYkua20wA3MgR8RfOy/AF3cSoDvhMsguf8zRiZCXqgKgyGmAZQdGjOjgqgS2opl9xd0qh3F3+uH2SSt7VCH2UncBAyaQRbvywmFA5NSlCRxrc3SPwcAhJBRt4OxqgJg2p1ZQPAEyZHINwLM2IzdAMSNWRF+lXE4v2WadStsLiDrHNGQKCEpNTN1CiIQuUh++K3MxRcKUopmzaSY28wruZkyhqLWl7xYe+pAWIAktySV3BFA3QnIAV+sSdzGcGjRgZgjoTsgLYpQ55xjI9cG+4RQY3ETdzEVMPSIlQtUyQjxxro1WiEtfF5F5r4yeOPTgEV0CGBknSoEIkZZ/IbtHVRYgzETJvAGyujguotaC2TUnMkAA4uXqXuKMzEAW3NkgEoErAokbAJIBGhCQAosjmwZgEHXgnqmBKTqAuiMBYrPT2+curmboqibuoPPkrZqZuTooEhGg/PXeejNAJiJwF9NFFkUEWKrrl38GABiYu7sys1l7GJMczJQXi5wpAhiAWmViBFYVesh7+Ds6yAN6xtwR6KEua0mlqRESAjryouXBA5aKP3z63hdff2WTgpfNkNYRWFUdbneHUmQ53Qc0Ynt6ttams7R9U3e/6CLX+cl29Wo/fb0/EcVtCmmeSoOZGVsT9fNNxgizeM6rpnWGMBkgkouQgquPxrGLpDarDTnOVkfTgAqm6NZMFyaUinZpuJmcztbD+mw8VCc8TicAaMtXmThRbftXb66Z2R+wystx1hdg8/KW/BrbvoDs1WA/2fX1NESbTsePPn3/zz5/ScqtSu6Ig+IkuetUqzXtU5jKLE4co4nEEDKTVhWHWaTrByitmcxOGkKgQIpFHRADozO9bO7Kb0f9ZqLr4hTQ2jtCNkKh9E//6FeJ4R/83c/U6v1+lzm+ef32kx9/CPJO5DLzB3C/uhMstCGkd2AgBEemgEjLVQRqgE4cEcHNzcQd3A2dgMDBCXkxX6rpEkZbwEaIaAYLPGGZxBeTaaDFO2/gjUN8sNwhLjSoRT5WNQP/HwbQTA0pZM5qYsCKgG6MSiE4OHlorcU+D5t13qxjWHGIDlhba02QcTycAEXLOJ5ubt589/rVF7vTjkO4ePRpyJeczro4IJADcghEPrdJzHbjGNfr3f7myScfv/qLX/70p79xevX8jOi5hyZKmYUdt2fdRz+qv/z+Zvoq3Gn+8Adv75+vN1nMXd0dzUzn8XC8BQRt3sXw7JMPvjilsDnrUnpz9/r+xWvsL+eZXNPu5X136RyH2McytSY2z36Y57NukObfffXLD3/8yUEOl+8PxzKaAndrETzdvPn4s4/i02fDT967G99+/NnHbR8jXK08yfH47L3P3rziP/v5Vz/63fe6MOUYj+af/eC9+XBz8+blDwJVtQpa2gm1YZu0leOdgsd+A203no73u5cvD7v99jJ//8Wrx88ex5jHWs7PN3fjkRkctMzTZtiMt/NcYeg2Pk9IsF11N29vU5+7Ie33h4BsYkgQiAXN/dd9fMt954FP9FciOQAwnaY5hDiPNXEDDodxTwxNpJZax9N2eBQRpJbNpjerIYZ+nc21H3r2cDoefT2oVgq4iFPIAVkxEgZq02xgyBhTPu1O/ZBu7t9w4rlN86QcchObtXbr/nA8InI/bMbTXGuLfZ7udwhtLpkyUyAvgMRgi7/TQ8B5msU89/08gyf75s+/3t/tz58+PU2Utuu4CvV0/PLbb3aHXak6DDlkc9K0Oqcuq7s06TMtVUat1Hd1H7BepUzpfn8/XGzOt30b92r10dNVOZ5+/JOP/uhfHQ6jY4jowEiBMSBEQINYNLpA10WyRgOTkVHKnV/fXQfqnMJcJHWZGMrcXr98jc1qqV3Hf+Onj1+/vBOft+fdJvWHnb29L0OAr5/PEjGe93/69XXuuFTpclittNsqXo9gIa/yYTxIDJoV58aGIQYXUVMEE22EmKmr2h7QhgjLeiMggntgLq1EzibqLqKNOXBYUIwK6JMcgcyAVIhQI3MMeW6Fg1SrY5M4hObSd4mO5mrK3Ke0e3Nze7x7TPL1q9unsSs6tZC/eL0f5+YKNIEIfHDxt96+etVHf/Xq25T4N3/nSYDy+PFwmlph/uQ3P57wy3/+T//5pz/56O6r07FMl++tdvtyOu6evRee/SiW8e3Fo/defn/87s1LHrjKPB8g5xVgt5sbiMzji+KCEPucWpkJaKzj0D/WWlNP56uNNru+n+73e2D1OtrUXd8cnr+8fu/qcb89M/Oh75sDp3Cc9bvbFymxuU/jvFmthxhzwGp8ebE97urP//zr6YR5WD99fIVmb16/+uDDp1pa1w2n18chhYvz9c3d3UX3dKq+GjaTyq6M6/UmOjWRsU6S4PLDD/f1NElzRe+4lgoErc4GhN0wy+yIMSRHyV3Qqk7UdymtuIluuj738ezsrEtxPM6hR6CQIXZ9nmpRETc71hq73iz066en+frsahsoa0374/1EjXJ6cvXJ/vobfBdTDSmZuenioHZfpkF7t91Z4KRLtvuhTnHZetGiS9oCJzV7ADGaASIszBAK3ny3OxIwBdIHYCOpk4sRIYGfTifi7mxYWznGHBOhq4ta6AfS1lQcnROB0TSXV2+/f/LeR5ePnt3fvml1jrlLIU1T45CG1dn9/eu8CqfD/avn39qz91fdUObS6oRZkTQEFjNANYNIqeqERsvhQ6TSwpFEIGQErq0YtBi6BzqKmrkHQ0cxE4cqUp0iMZb5qOhItaoYmro4RGlTzn1pB+IkdSaAwAkR+66fa3HXB1M5kKOJKFE0tRCTEBkhIhMxU5ilALRIROApBHNQ83V3NtaZcIGkWjfk4zSGyEQRzWtrbg6kOUbXqrU0aF0fqjhgCJQBZiAs0kSVKHBItVofgqkQOAc4TZPiKYTc7ASEAYMrpxy0NRNMQ65lNLR+6K4P48XmTFtljoBeFicRkpmL+6ZfH6dD5BBCEHdzHHK/Gw+LlQnREbDMc8pdn/vSCnBMIcx1wkDqwCGJGTKoNGCKHHa7+z/84997/9n7q6E7u7jYdOdGjCjHUnIXd4fZHbvAZ9uOUQ/347OLs6nq4XRwd09xFFqfP97t76W1J48eH+bdSC61vlsbgIBPh6PW6qViaf165XJCazCO5hFyFxM5AahVaxYIKBKF5sKcHFgdFYwJzIu7RBzccBobdh5CzFeb7Wfvv/XwH/3Rv/lg+MX/+Mc/fZY2K7lPTZ4lqfN4kmNI9Xj/cjsMbCLY4nn37TffB9b59HbzZP14WI3f/snt2y/K/sQpdmdXvL5Yc6KuCzmswkoJaynzUc67LOPsx4lPn/s4vXjzBnQux92rMt7t907N0NVcm6+YiEjBSFmrhEgRCJ0jB6GCIE1an4MEMOTqmrpsLqY21wljFwhD4A5B2pEJu/Xq/nCSWhq6RZqadTEEtlKBEDgxAJFBzrnPgfuhSGP3vBqqyu4wYSRQL01bkxjdzaTW5ayAFBGp73pUFWncByGkhiEsw41FZjBYbQc1dQrrzWa631sTIFGEwEFlXg/ru/vxftrlaBFNAB5KtUlVVAFT7lRqyp3UpoIAiJFVScCKSWAyahDQsASjhLFNp4miIAwWuQvToR5Hn4uZGjQ3glZKO82nqcqkh93UDtNLxPvXd+Pt/ddfPn97d3c/zTHFhEDgOcfcd+KsCIIgZlgLiyYiRDHy5tFCUmRG6GM2ndA1gLE2FZ3LDMCQBlEFLTG5ewVw7jvocOlkZIDtdlVOBZBqNQoBCIBQ330K5nlaTmptgQYvttIYIwcwS6kzV7VmBhYCLSUXISESIxCiqZqqgzU1RxeRpRWeEHPuzAQhui+AGEcEU1m26OaKyM1aWNrMiBARl/5od1N9V2br5v6uRVwXgYSQmiGGvLiODBEJQaWKABKwWwhFYZO20aYrXt/bcUZSCoweiKsJU8AQHaKReiaNzAEHca1TnwcIWV2a+UnBmq1hVdU6p9rcZaaBXJRc0DlygmhGlaKXU3Xl0wRYsI8Bg5nRJOJdAns4/ZqrmbmKIgAwRAYgU0EERBR1AHNRQojMiTkYMBHBAgogd2NaXgx80BCIaEnqmXNkYNiXWs0DIdhC56OOODGhNfJg0pBDCNGxuTszLXtHAgALnPqYSZpIOYR4HfPQCAzMOIApQQS1wgkgRyRyZQ+MwaEhBgd3UHRhK+hARu6OHBfXiMESQAdTQ9foAAa8mC4cF0NOA3cifJgrEA2NwBiMl/hXAwOjyEstmls0QqOlstXMgd0QRGudT/P9KymVyIm8NKnVHl46WzpPycwCEPK70/FDMtLNAJGYlnx0MzfEJeJs4I6IouXdAQsAyU0fHF5Lt4UvnmtBREZeIkgP0g8sHbL663aqh+IpRAD0B4yXPUQv3yFEEAEATVwVh/7svadP5usizd5/vGbHu8NxnFuIARLWOscc+kgMgB7UYVKZW7vs0sprJh9r/bO3d3vnR0MOogiYQmBGMAxEIYYyNadYahUzZwaKZt6nRClS5Vm0mgeHnoMQBg5WXcS2nFrTVexv59KaJMDbqcTVee43fV6RJ2nzxeVVu74Bt/3NnZYKqn/8y7+EQKb+12bOxW4IAA8YnQd30TuVbWnsaa0Nm1WdxsP+7tHlejusX71+q+TPHp0fp7LOUExNCqMThiKu6Ia0QPuBsIqAYzXtYxgPE+VBHcUxI6hIJEwh7Kv/ixsbBeqSiEWjJS+IZLbokdwg/ZM/+uLyvct/+2efktW5VIixiTRRohAZ1ZaSMiNkJEIDfbBMAhASsesyGzsiI5K7uauZLv2PxIv3mR6URUREhmX89gdBTZdk60K0X6xJbshIgGoOauZIxKYmUpGImX3BaTG5iyNwWCxy/31XUQisBkjEEJyAE6JWrbqEOcUQQ1ptL1K/jt0ZmYMTqJkoQgMTafsyn27fvHz74sub+5vmevnkh/3qvdxvibtAMSKq1qUeR6QGonh+VjrYnT7vYYBSpze3z/7dpwXYuaWUTjcHLba52o7mNL59c/P1/+Y/+Lf/7D/7fWNa5YyMYIaBdZ45JfS2vXps1rquy5m6YfC0nVhPN2/heHr+x39y/tlv3kyHv3H146t1+Nf/7L99/KOfXj59hK0xYj2O3eWj1MerR18e97tP/8bV7/3Z5/mzrUi5+/rlo4+qmr35+ufP3v8pu1xeXtx+9cLkaGufoNn+O50Px/0bhHpzffp3Pvh095/9x+fvDTmJ2R3G+w8+++j29f3760+73KELhTCXMSTsQjThbjP0XcwBX3zznUuVueewLlM19KFbr9abIi5Yx+k01WkYzus8pXN0wu12FTksD4mlq57AV6uV0x0gNF9Ss/AOpr/MvQ+kInywuz38cpPWat1exFJ1vRmmUkPqtJ5Wm346CTGGHKSKtXa4PwIkUEghmIrUljsMARtQXq0Ru+XTWo77ABA4J9ZSq5fmKYMANjq+2cehm/Z1mvzp42enWZ+994gII1EOiTC4QquVM9U2Zxq8SZ0bh7janAF1KSRCIZeU6Pl3r7fn54bR0FOO7B3R5unVx9Pum+vne8Nps1rVWS7XjwhTParm4jvrryKapRjQbdzv5uloqlZnedf3Ucd5NmnW1Vv0WcH6u/3cn8WO+8NO1MgEpCoHnmrJIaBDcFIEVfPGx2Ndbbunjz/49rvvUBxjGobhdJiYgrlgjJOW6VjOhq7bdMXtdrSff3OK5rXqOhXVsr64fLtv7On+1OIKGSIlOHuP9nKMKVo7cOE1z/0qbdcXclMq18cf9uVtPHzZogUgdjBxSSEuDtgA7BxAhQACUZHCgASOYMSxqK76zhyaNDDJgUQUwYkDEjFAjhERGWMEbK2mBFwLhF4qdylKm0GxzrA/2V31n/zg/eubl5vUr3r70WeffPlHL55snkx3d/fHO0Bij8dj2Z7lz7+506mSoCI1Cvtj+c2fPf3zz7/AlubGv/j2yx/9Tdq0829+Mc97baEpE4hBIl7z18+/+Z/9r3/y9O/97ne3/91Xf/B2vVl5rRSYOag5Is7W1GtEQoNM62YWcNisV1WBXKvQVKUPEaAzTZTs4vHmq69fSoUhbIHCaPeI0YVjTGy6AggxpH6Q0ogI3QX1MO26mLVR3+cyN9dUm3/3/PbJ+eZyfTEemgMdTuUHHz2q87SbD+99cHXz6noYztzMRKtgSnkuM1WtrWCiw/2JdOpWfZNGjMwcc1Spc2veDWfp7LC7Lc1Cx6B+fXtNyIiEzoHyxfaSeihNwE4x5X4b71/drrqsuPTWyMXF9vrupu9XQ8jXL19Gxm1Ot3f3Zr5vhVepp25/eOOm9V330+Zsa0uzCSEruRDAkjt7mJuW28ki3C7gfNPFvfjOK0y/3ozBMgOZCbgAxLOzs8P+EDhECpFJtTBFB+QYkFm1SmuBulrKEEOrgpEQQcVVgNxSF4nNTQCCK6rLzduXV0/eu7j64NWL77zJsB5iasdxPru6ajpztOl+3r+9AcAf/vBHeeiqV6YA4AAGLmiIDm6MHsm75ZmvJhzjklIhEDdjIHBeqisYU+RVmcfA8XTah5jnaQ6hd1UzoRCq1BjXLg2xMwCGKFbAkSinMEzKXewLStMSAjOiAuSwRleECGR1LjGupFJQgFp6otmtiXlwA42cAQCI1ZtTFG/U5qVemQOBk5rlmFRBwMU1hJVIISQDbFqJM3oymB1VbK7FUB2cGhiQEZB6Aeai2kwChFZnhdKAA5GrJA4iYtpC2piSGSJQMx26/P2rFymuXRXADX2ax9WwlaqGbi4CcqyjgroaEokaGs61xHeDUGsNY4whVVFkdbMmJSC7Wpc7twbg6qJFI/NU5tRFQwAcXlwfq77ZblbXr79PMa26/KNPHk2lzEc9266n0/Hp+RqYADtQN9FxmgmoqetJMu1DoNPxtCP2YBS5HxK8epg9G7giGGhTtWORepsITvMEig5RpmpDjysistwHVWEHZBTQwL2nnooRmJuyAyJXwKINia3OOuMwrLtH67yOymcvvr/+/33+J4+NzlP/6aPLZ48vEPP5drMO3S1M/Xq7+/JQp3nVp7RdTUWG9XJIO5WdyJv75JkZrz//msK3CkYxcUoM5AxE2I7jdW1Q5rK/x2AEZHf7YbsaZ50LYO5IlNEMbEhdsnY6nbarjUE1nHPMgekwHokhhbhK/WE8EGNhOJTmSDlEbZ45RQYnBKYuMlkD4Cp+M56aVAZ3BLeWU+4iMURAC5ExgIGRMSHMtQVAFROwmaSqGwSytB/3ahAIrZlJY4QYGICI0UHdjawRKQcUsZQIA8+m6gQxpCFPjE6pqslcRWYGXw8RSatj6Dd381jdBUEUI/E4T4nYyBAM2JsaNAwxiXjggOboiGbSvBEIuBIoaiCMBMzuXgnSbKhK9WR2qvNu9PrukGeeEDtinGtTKtVqGdHkcJuO3Wo8lLu5HWcDDtVNEDZ5qRmGwKmYmZuYBlD3VqQRQYpbpMBIbACq7siobS7oNml1IEVuTlqKguZgzWofI/ZoPYXoGETNVcwipz6JOyci4mowz9Xm0/Is2N/fxxDBfCGxikpIKXadcAhMS/s3h8QmdZ4iIaeY0sAhAfECsnYwM21Saykuo6mauYEj1hiCKwIyPJRpPsRvANBVPaC/s3OAAxG7ybuV+yJzISK6yTu2AgLy4uJTphCzPWAYDGCRF4gxRBIG6jLvDzsjZSBph6FfE0fkDsUNi7MbkCDtyUPXI4bg2KQETQgMQuzcoc2GFLtoIVjaxNXu5o5SBvbIJEoETApqFJiaHUKgRvnm7u0jIa1GZq4SmcCQAgOwmqgpuQVOps3RkUFViVBbc5ecswPGJoioIYB7RFu8AASAhExhOUmqOdEDOAcdDdDJgbEQuuoFh423wBaBjQhdTWUdOZItECtwdYDi7oDFTFzZJYBTYPIc0hp0TlqaFI1Z0NWVQQ3IKFQcDLeRDz3PrMCMARm80dLJ643RwBQgIvKiYoETLUrK4tsFQvMEHB1cFwkMDdyQAJAR6KEwDB1YyRu6oC/VGUBgpggKpoSV3JkjGC7KUtNGMVeopYGX2a21Vp3QKYooAS/nI1ciJghkf7VddyICRFWlRY6kpUqYFt3TliQPLVkNXiDtS+RtiZc98D8AEZZvFsR00fXcDCmYCtK70rMH5PtCICZYQlKLWRvp4S83XxATjkDAIaz2p3p3Kn3U1brbZLrbTe6OzEA4lZKCM9K6z7W2UymTmpt9cLaxVqJrfxb/9VdvDpWYaAAykZN7HJKphJQP5l9M8082m6mWBT4ZQ1CAk/tJdSWKzap5MVSD0owyGrmrkboRGLujBlKskvu8P7Wqvup6a/Nxf7g4W9VaYo4pURtFQc305dsjhOguS4LvXevZr5uZHt6Uv/YDMvBZVT3PdR7OV+wtOXRcOWiCbMe67aPXYwhsMUSiRa4TcxXJzEXausvBQwgqZg3M1LUZgRN4TDm5W1NyFff9rCrGDG5ACGIOgEwE5oTgJoFIPf4n/+yXj55+8Dfe7+8P+5wGAOAQ0VFtWbayIwOYq+ByIRG/azRbbBG0tAw/GPaBTBUfEmG6nALMxZcoI/LCHUNb7JnGiwTpzkgG4KbMqCoITsTuQAhI7GZE7qbuuLy5buSOywfa/+pFficVqZo6mruZmhqTIzqSiygxrdbrxCH2K069ttaqhJjnNoMJUG3tsL97cXv7/Ob6+Wl/HC4/enL10dBfMvZLoxuhAQiROzoghkAohOKby/dGm+FGznFI/Wr39Z+f62735s/6UOZy03Quh6O6fPT3/55x2r5/9fSTi1+9ebl7+fzpb3w8zY1zQkQ9ze3ueX95NZZqjl3fH4/HeHGmJoB1c7U+TeNHTx9P37/Zvb37V//kH794ee3x8ebDMfddYH/1F3+yeXR5+6Z1l+/vy2k+HjdpQwHTeV+9lfHu8ScfXNw+xTC8+u51fvLeWT0e7u6hNb9//sU/+0NXmAi3H34UusNhf7r9xRc//cFv11Jur6/Pj3ep719c3/72v/eEqI7HOwqd+hxXXalTmU/3x+uy25fIavTjz370xRfffbe/+a0nn4xjPb94NMnsZLVoWkXqMyQKXRSy0OciRdzKVG5u7i/5cSvTZo2tFnI3t4Vm/SBLL263X9sbH3gisNyiwGHcTyYaCUOkGHlux0Acsx1ONQac20lNU5eq1jK2lGIpZb/fI1pTvbi40gUKB6DmdRJnclVOcRwP4qDupWlEyKtQ63E4G8TK+tFqaMFblWn35AdPxnGmEJxwbKOjYWBzX603hsRd1uNorsSIIZ5O0/n5apqnJroaMoDtd7eOcXeg2mpebe6vb9rhLg3D7nD/kx//+NWX37x883z75Gr7+PHMkDbhOI0gLgKtVjb1NrHqtHvbnW0fPgwE1Of5vq7SkInfHEfqN5n7Ouq3n5+OOzAIXeauy0ctYy1MBIHKVADYzPrUt8m/+fo5ubv7XJUgrjt+dP7em93N7XH88WdX6xzee//qX/7LP2dlLHi4qxdncVjF7dV2v79LwWprHSfsiAY/TtPRILHHAG9vT7HL8xTRwth02t3mDB3WXGsF4Ri1BloeKqYcAlIwEUIANEcw86aLAYRMVVXAAVzBg6kSIjERIFF0qzmvRBubsQsjErEZ7sfTakUxAzDj7EAQInPklDgEDtpevnnec10ZPTX/7s3rtY7H20Mro7Fs132OwzzW01QL6b//D559/gff8S5XgZvX8++3V0MXoxNVp/n004+evXd1fvjLu7fzoZhu1n1HuF7Rxx9tX784ffvL758/P73+4n7TnRFQzFHNSxlVBepxSF0j7jg3tel4qqJd7z0TTIeU4Gyzfnu3b6pPHp+9eXPbp/zLr7/vOtJSY5e+u/n+Jx9/vNvpYayJ+e72Zca0WZ+X2nLOOcS74zXHbQBeb9eI0l/kaHTcH2PwEM4YA8Zwv9/1m7VHLrXc78ZxKjFGcQnZwQwMVLUbeiktRhRt6NjpTKpHFUAngibTZpMQIiGMpXUcNPfoFjpEwlUaoMhcbTjrc16nrj/Nh1XXpW6YStPDHCJPMvkMidPZujvc3zEwGM1Ty2nVWhvn4qiIkHM436ynm6MHSJFKe0fsiomQW1NMD5WJKkaBlr0ZPGhGDw7gEFiXRZ/DsvcFJ+ZgKvBQjwwP0z+7kXEK/Wpos9RSOokpBjN3dAEHZkQchiyqp+MU1zkwE0BVi7HrQtrdvO76vrWjqqIDB07cnabD65dfP3v2k2eP3n/1+sWkJSXOMVRpl1cXd9c3q+2WQtkf9t99+82jR5cxJEV1A6KozhyySzWvhKRQzVoMkTCAP3jJS6u+DOPLhsUNgMwVSMVmDsGAMGQTMVADCZBdGhGpjNoqhY4cAybTxsyijcO61FG1cuBaRm3mKAGplZHZkAHNmUk9UMzRVa01FSdoUoyAMGhrCGpSCQNzMFcwHXJfdSSOAC5aEbCqOjm4Rwh9jEepSN5R2J+OfQeCbmTFKyY6jceUA1NCCwAN0YBoqrLpMiOpcYo5hNDmKXcdBQY0A23cKta5FdV2fziJSiSV2kTUCQN3oLA0uaScHUBawYVaC0jEKaSpjOCKKGbedVmkuSuH2KQwYiRSkZiiqLg1I4xobhCQVrlHREBdxSSOHrsQu5PNMJuXCUzXm95LmU7jat3dnsapamJqUyHmPjBS2K6Gm929qaVhe3l2frE5e3nzYjxM1j3MRmZOXWaprczmLiImajG7h9YqgasrmqOK1WrjlJc8Zkr9auXANfTulAC1aqslBQrBU+gg4HGaI/SxhdCxgjHQ5nw91fnaXVcA9eZXf/A5e/7o8gfHfQnvX+1mem/7pFtDsaNfnvOQbq6v13lrctp8/FGa8Hh9WK03w4r7SODJlfow3L9+rVCG9ao4yDgCWb9duTeZ2+WTR/N4GPrALuOpTqLVNFFoAlIrRrgvhyaKDNWrKlFiBegoiDQAb7VFStsYBZniQIRQLWOoc4s9Bg2nuSBbUwMCEyHEPmRFBgMTA/fEAcDRnSLNY3FjVe1iIF6mbR9y3B3nw2FC9sBI6ITAITBCl6O7i1bE1hrOqhhYUarXuXnCTg0FqQIFwjaPmXvmJGoEqKCIBRBKOyYa+khjQXEQg4ihqc7NCJ0zonpmQgADGmsZ0kBoqIaRkSwg9MACICDoMUCQWomiaRPHB/CKYxtnkhbDYpjDLschUu4TFGTQbrN282oQI3sXMSUiCAQUmUPo+q4jSJxESQCJM2hNxGikoDH1CMBg7oLSyLzVGmwGmQChNFEH59AMqxoQYQdpTXlDacONFObiRUlRLc2MYI0ih5QdSY20tvvrm+VTIK2CWUwRAUyaaTW1OtXcryEOC9MQAzkIMYYQOUQOmWJHHABwEXvElYgDoEsrrbbaAB2ZiGlpa1ogq0wEFBbnBi5OIiIHpwfvgD/gaMgQeSkgf1hIuJtJWHAySycSR2OAhxM3Oi6WVmpqiOSA5rFaui9T6lf5tAVEY/AAtRzAhWMXUlA1TLEBmNZ16CNldiA3l1LmY+wSQUO2qijWu4Z8cYEBq96ieUBmJJXqYK1VQtsOW0HCHlfN929OLAHUhhBT4KKyPGfdfl0kysRUVUyUU2LMKk7MbNAFnrSROYr1BsFUOQYCB2BidQCARIjuBKjggGDuYqjuiixqTzu+ZFgx9UDNMREyQCR0A2RMHABUwQ1RxIjQmpg2AnAgzpENbDehTNSaYKAU3AFjMhFe6OJIYgDeIiEtuUIDc2BiNiIz5iUdhr+eDRDITB4a5dWCelarBqKGS6EGODgsFU4E7t4Alw5wM6JmwkjgHgCWte/yiHIpCIYYnYKhE4OpNpFpQmhWSjW3pXyeaYFSMJAH4uVC+7Uq8euNOzGZASA0EyJ88M29U3h0GZAQVRSJEMhhcYItprkHGWipjgV4l+1BMFsACo5IgO7mDgoPfbIAgEsBmvmDJOX4a9uRuxmAB4bDNO7nOgTc5m4zdGrp+m7/+PLJ3fGuiyG4xRRrqaVVjqELHDsY0Iq0oUt/8uX1y4mJ4ibgmi1zPCo1kyZ1NoTUjwAnhEltkxEcRTWnNBGqAZh2udu7hUBd5FiQjNw8cUCisZTINJa6BBrM3DHEnEXVTnPmOB3Km7uXlxeXjGQMnOjm7s4AUH8NTln04Qc7Kfz3v5b/J3Bwp8CIXEbp164FapEDzJuzdcJh9/bmbGXaGsSECJxCE2WkiBgXN9cCkYSHGCwApdS5QkcYmNYxTqUC0VjElu0oLZVhju88ZO6GiIEW1g8Yp2Px/9c//W/D7370v/yH/4vUn5tPS9XdwyGcFzz/Oxj6Q2bUkRbH2HJ4p8XU+WA0A8TF++bvbntuzMHMHQwRH0DUCL7oo/RrdQkQ0QGR2FWWi23BuQMulyE/vJoI5kpMgLTw5fAdvfEd1hqcaGFMGIGC2FxOABC6nENerbbL7RykuCO5mil6U5uPx/3t21dvXn97t3vebZ9cffxbw/mzlFcJiMyZ2EAAlucBqpubIjgzBcJ+e34Y9z/78d//43/0fztfDS/+6E/s+duGBxHcfPoZ0Emu36xE51e3w9XZeBhff/N6nta7V3fv/eZnAsiMLtrP06+++P0f/PR3m52Df0/hYi63H3z4o3p9a0eDPpT5REml3P7xv/xlp5WI4vkqna/b2KLK9Zd/9skH/9MT4OWHPzzV67dffzvI4/33xycXjzkN8+k+5A/x6vya+6cfvH+1Xb/046/+0z/gOLT6vNmx4PnIc0nehgpZH18+on59cfVIa5jenkSwUZp1Hvf3LuoBY7eqquiIEEDYZ7h5dYOAT947G487rq1Hfn77cnPxuNZJ5xoDITq6zGUccr9ZD0x4t7dhvZYXt9Ms43EkU19jnWsgrAaEZPjOufeuEw/elS4+qEWIgBCYTmNJw1DqHDse52PioPNJ2okpzeMpriBgNx7GsTXzuL7c6nQKcSAAbfs6HQmGRCi14eAUggFAgNU6n14eWvWz8zM10KbH07zdDJtt/cV3L3/04x9+/+3NKqfHT56sV5vr2xK7zGEG15y7Jh5CVLHwEFwOKfa7+/3lszBsV1Knzfn67nDsUl+brIaz8Xiqx1am46MPt/fT7fX186fvf5SwULt+/fLLzWp1qvMqhGrTdP/59uwDx06P2Mabvt8kOZLrs482TR6S+Z98vHl9c1hz8So3I1ZO2203zoccgamj7NacAGopAU1ct+tNc1lv1gDu5EGCioPUuTYMpLUxuQq8ltvaat+tbt9Or8vhT3/++SqcZ4wJZJWHWuZnzx6d6rS93K679CrZhjl1ur5K+0O7Ky1vL2oVyB1TmCdl5Ji8TmBlSmscbyo4xcgxspghEnFUUwREJwIQEUQC5iaSIyMAETFwYBQDp6TlEJnNHhrOzWiBS4KhmDbT1J+VsSIFNa1Vuk5XCc0cgbq8AhvbWCKTzOXis8dff3/jv7j97Ecfv/7294lRveQQ+rh6dvn4m+kbcaY2//QpfTWVs+EyBn91d3j1ZvrJ++erVf6Lv7zRw/m//Mc33TnFrhuGEJyGjB3jfCrffn09XF7U/uLFn96keRDVal5dCW0zbA/jCTiKAzCc9IjIT588uT7uNNTzzcV4rOjAEJvoNnRvdneBre2Peho9b8Nq5ern8QrnfNy/LQ0S5vVq1Uqb2tHFnONh3KfVtjSu4lorx/xoff7ly2+25+sf//DTf/Ovf7/fnOWu70TXZ1uI2KY2n+T80ROK8vSjD+rcQNu261XpcCjkPKz7BoYB0LTWViYR9hhxaruVDjEMjNgzWB21lVU/9H3Y3Z10hhT6dNVtn17Kqdxd3+aAsctOfW0jFVrn89nn8349jjd9fzbvsRUb1r0qrc4vrm+eVz26mVNWtdNuP8SwXmcifMd2h34YpIgDxEDSAB/ayX6doV+Mzyiq+JCNAgJGdwADVQBaahRMjQiXJP3igtXaAkeNqZS6VKSDCTrmyBQIwREgIhp402WH7yHFpk1dIPBw8UjrsevXh90NKGkTZI4xlVrur797/PQH7330wXfffceuqUtyVArdZnPRmlw+2fDN/vbmZrNZbbarqU6IoE1iCO6C5E0151XRkQjVBN1Moxsu3R3LNGlaUcjJGdNUdyEECEmkBQ4E4j7FwCoRHAIAWnXz6Mk9GCISGLiZmcxAJdJSBE1uyGHViiIvWyIi8hQCEWkBU4rcPQgrSFWamLkLOYhUprjAfYhT01NpRd1BqzHE3GsVNs+ZazlECsUaOlor1S0zk4Q2T9Qjdb0KuRE2FJ0pkMuyRPBN2qAVIoalrEggYBCxYiVzMmsEFjktnjK0QNQRhxSTtAkMRTGQqSoTm7Ym1QFiiK3OhAoAgGwmjGjGTRugmBmYxTg4GpqXZqZOZjFzUyNjUzNzTAxuOlps/G/91t/45uU3x8a397sEfnGxJgcALWOV2loTdzOF/djWXVanhJRiHynVNpsZB7rd7ZjC3ct9v+o350HKuwCaqBuYeIqpjSd0Z0QTdZPAhEiszqjz/jjfT7MjoaeUnHD7GLu8DQ5BSph3OFVsJXY9OGMy9vVl9wNUVJVaqlETrQl9c7G6fXu7O1T8wbNNxzGsTkgz640cPtmuvvnFN7dvDj/47GMpb8+36f6bL9LVTwcO08sX376+3Vw8e3tz15/FqSjHrVQgvUeqMZiZTGW0wGm1OZZ97nLqsozzkGF3fzjUCutgJ0BLKgUTqrprCwSQkUxzCgBQxdOqH+exQ2X3nLIBrDmcVLGNaDUQJ4LEnUCrWoywmSu4ag0cAqdaWyCLgVVN1EN0IipzgeqRqCEisgM/1DcDnEqdW4mcchcookoNzAhkaqIWiRCo785EYbSCKdsQTrj3BhGXLj1qDaiLQz/Upp5Z0WPMtbTMhBTBMhJrETJKbtpaAxOVQCSkwS0gMZI6mROFTsmQKwTv+iUvE1s1UsgeyIMwhrgyj+IlItg0SvNAmF1jphDjOMnQ5ZApJsRh4EB9SGmem0g+v4BVF71sLs/m/U1rU4eQYupjSshonCkThGqeLBCQQTDigD1ARVWrM8qJTCMAmhRp7lAVAUnNxURVA2FBT5dn8WzjXlYBnHScmtegDsLACTerjmJfjq0eT2U3y/hAbyzTBLlLKTKRM3NKS1O9qYIvsh6Bh5TPAIE5QMhETOgB37UTAaA5uROgm5qqu7lba42YVYDpYZf9rkocAWFZUC2LcXB/2FIshCMHAFA1QHWgJZWCQEtQWhHdESggZwWk5WH00KIF5MhgDABN96X6aki9Z06oJ3BXNs+EQoUqYNEQK0VCD0v1Ac8MTsFEISdMDBNNk4zOq9Cnu8Mh98PhOF7kzud7zgOyQmATMTOkUGvBJkPOl9swhoJIpi5VW1JAqrXlGN+VZrmZMkCgwDEAgpqmGKa5qXqOiRNLmwfmKw4rbUciAEAzBjTHB6eJKRIycZOWmAENIyf3LcETws7cHRs6Bk4pBAQxAQACXhrjPSAHNm2MjgSOSDE0JKdgnImz6MzoBNEBmNCX99hbIBJh9LMgLaAvjCJHCMjkzsYubcEdEqGjL8ksAnAiMEBEF48ckjZyepgJyM0cAdjRzYEeyrzZnWFpyEQ3YCc2ZwXW4M5A7AhuIAEN3BGaalAOSEhBtJmhABBzBAKHwASAbgauQFhqIXggUyxHYzdHBEa2RVx6SM4tB3+EpfTDEdD5wfSo7+qpAJAWZvkyPoEjY1hUHgAIxAtixh5AkIBAD58dBwdy0OV30gK9dqd3lVi+8JWInSEM+dmT89im3Wn6/np3dnV1GMc21UCYYm7SiCwHYoRVTvM4Bqmnuf7l28O3u9ZC/3hIME/oNPRpP8oQYi+yr2098G5q1yN2FmbRnahTGNRnoKvVxuaRglWzo1hAmJsM665VTP1qnMfH2+393W3us0ETxCJ2LCIIfdcVlX7oRD11ycVnnaQUJ/j8+Qtc/DB/pQgt+bN3qTOAh6DMX/MZOfhyN0pDEqBpLqlbT00d3UlmdVcSW0hoKOBq1gwlEDMHDiimhuAy1YY5zeaiNqk78STaJcOAoMvcYuCIy9v9sAWgB4Y8uC7exsRY6yfr/H/6P/7vfvzZ1ahW5YiuzGFRcNzAzNwNOaK5qhAHXKR1QFN3AEIyVaJ3xkhf4FROv84eGjIGB2AO+s4C5+BM/NBetNz5AJDIVMAcFz3dHJEJydzMDYwecm1ExEHEzBzRmcO7Ze+ix8Fy51VwWwzXWqdS9inBsMpnl482Z1fEGSiJwek0T6dxmk+tHMbp7m73/Yvnf/nFN3949OPm6Q+ffPj3Hl19tkrrBMwAgRFRAXSBdTm6mpiD2pJCtHKaqBkgo+dR/Pa0f333Mg4X3411/bPfvT17eo2a1v0v/vTnETZ58xT7fjffTdNOgJuTuXRo5c3XX/7xn23Of7w7lVO9JzbUMYz3Nh5P09hQrm9fH188pzffHF7/Ig/l7DJvnm0hRtMWM6rd912vRpZzqfXq8up4uPnZ3/7dF6/ur54+/e75r+o4v7nZPf3Rb9yN8PZW1pcf87CeygnatOovA/b7l7fj989/ut1+/f/9LzLiZ7/5k+NYu0189sHZ7v7N9nKQegDQfpW01el00iJlLJePLok9JQK31Sq9eP3akl+crV+8ukYPp9M8TW0Wq02OdweyFGO/2WzRTWpZ96FO8ziNZrA/zOM4m2iXM1GgEJrK4uPFvyZ4/nWFHN8R/UOgKuoYELiUihiAsFYxdVNJOQHHrt+20i42fdXxdNr3XUL0JjWFVEdypfX2HAHD0ImbmJrKcX9g0vOr7nD7ery/DYFC33HqlNIPPvis7zZdDh9+8p6RqSkDpIBuNUUOOZpDDIHcpZYUk4PNdeqHgVim03HIYX93X9Sub28RvdZxPF2DTU8fD1Xeci9PPn5moV4+ffTy9vXFs7OXr395PL0o483++VfPtqmfv1od/uIZff1+vt3I151+5fXLl1/8q1df/Ony2ry9vf2N3zr79CdbgAaMrerbF29QD//W3/vhNy/f2FLEoIbgm3WXI42nORKy8niQocepzZNWR/QQVmc9RVxi0qUUFT/cn3ZHc9qinTfL5r4e8jTX6nB9dzuOhzZPp/1EasdR07q7vduBtm3wgW2Vw6rLxG4gR5KRbdJqjq3lcUawLAWlIWF4ePogOdKyaCJw8Ic+ckMSAHcnxsihtja1CszqJmZNXd0BUEXcNKZMmJgjogeyHJiJc+4INIiboDkysYmbQiCfx+n6+v5Y4L7SH/z8F5Ch34RN12UK2uzm9joAzzut3P3i8+vHV++/eDMDB+9TdXx1X754vUtnQ5fX330+3t+nyfwHnz7ZXob+PF2f5Obop9odT+n3/stfHr63wMFQKQCCdymiNBJdxQgyq7X1tt9uO7XTJkBPPk2jSmPk/d1JT1JHqe203fbiJeZgpq01dCi1vr57DWA5U2tzyimnxORIOHRJpZ6tuzpPFPLFox+I+5vX99SyjPzVV89Xmw0wA1HgVE8tKOqx3jy/Dw3evni5GvJ2fT6dTvN+mg/jfJrHuTSF/Vy/efE9NO9zn3LMIQTkdTzzCofdgZBiwGFI/dA3gWk2hMShS30/rLfTOM11DgFDH67vb8CFqT/rn9baxrKf22FYx1evXrVS1sMq9Svv+tf395vV6nJ7Nu1281iR6AdPrxhVx4nN+vywNshdT8SttofOIV04ZIsfFg3A3MR12d+KNjBTrbbg9IiX34sIzIQIywW4VA8zM5CHzGndh6GbaxNVtaVNQ1oTB2gLIBDAgJqUKoWZQwoCgJEFsIr3q/OYuzysAAkD9cOmNfnu+Rcc6dn7H85VpnmmgFYdPQMkprS5Or969vT25u50rMydOcScmlS1pstD25UpcIhmjsSG6qAU2YFEpWkDImR2C0TZRJaloLloE3ByQ1F1N3MxVVUjTu6CqE2qOTSpASNzNvcQMqC7SArZAIHI0AwMmdVc0dUFqJqV2uZaKrjWWt0t577UhswOFELXRNWkSVlGKQNfGn3RkyqYVmuVgWqtZq4ujDzkjTmBA5gTuLei5RjYEMyAmXKMm6Zo4G4iy9Sg5moMFpZIiBsgaGtzOa1z10rLMakaAMZEAcBNqs6ISkQcogHMdVYXJDTzwMkAOaSxTXGpPyd0YkMEYo5ZXWutqmre1EWBHBNxQGJAD2y1FpXmInNtv/rmy93pfshh2/dPHm3d7W4/7acm5ucXw9l2RQCOtjnrOLEBFLG74/Hl3c2xzH2fcuTM5Gybi6GY1nnGd4KptMaIgaNzNMDapDav1ZsAeAiUCKDNcwILoMyw0AFMTYoQxYTktV6//vb6+uXxsJtPh1rKXEvVIm2SNuaY+5hXoRs4t7Hcv74jw8Pt/Kd//s3PX7z9Zrz+Du5uN9N8Odqj+UhF1M428Ju/cWby4m//O7+dn12N7qfTSSTsDzphxotL7fFk+329qV5O7bTf31y//WYqd5jpoPNRpAir4Hx/lFMZT9PUmpAHZnJtMtc6ORB3PXdDdVyvzyImE+sIZZ6qe+A+QPDm2Rml4Tx2Nn9yfh4IKrSjTXtr93U+ajF2DOAERl7BIOKok7gQQgicukyB+i5vV6sYOUfqIhGYNJnqgkC11RCHDgMZM3LAptVcFVXRqjZTUHFTXeVAqJNqRY6p02br3PUUTZBjFIYZxbJKZyUK99SwNq/9MDjzXKW0WqvEd6fiHKijBGruy3nWmjQn5y7yakirFQR2ZiXPiaNJRgjoolXc5lbMEM2jeRBZB9/2uBqCB+3Os/fQbcP507N4se0+fI+urvL5+XBxoSkTdYlzx2HTxfMhb/s4xEjEaqhAQERAuGysEdFLtBqtBqlZSm4nLDdY7qGdaplPsxynWmpD9ByJg0X2zIjgilBSkGHVqDcMALkpKzAEDn2HOYo1dZ0Ox3l/4HfnA1pefKnqFmJcEmLLXqpJnUWKqAEjJcLkGEzNpWo5yHTr9V7LbR2vp+PNfLw77m/n01jL3Forc5lLGedSrImpgZubmtUqgMQcOCwd0r6cwM3fdag9mIuWn3dVMQczN9WltdkBgEgADPThXAQA7rTkd1wRkUNUppmaDJ3EPFsDoCp68mJdkAzCiCE5YsfcgyWt7lLkVH2afNzrW4t19OMBd7f6uoap8dHW7ZaPdN5xopQzozM7hRBDXOVuSHmdu6vLs7xZvbq7dXN3r+oGaGYI0KfO1R5OxLRwqfHd9221FQQgc3M1xEAdOSbgK6BHEJJDIEqMATUH6AJ05H2iPkJPsI4hB4yRCfAx+c+6+FEOm8g5xJxTDIwIhJRSyinllIiYOYCTK5iB6FLGgAgYOUAImDrqsvgJ5ZSBwdmAqgm84xo5PBrrmcKAoIulKRIFNAA1UEAGJKRg5giIbvBA1CVFEHBV6d2zeFBCJwUXEKOayKMjOBtgA7cH+o+hA3kAi+yR1bJ6MoTqKBAVQAoBAqCZGYCjI9uSV3QDW9zQQGikTUqtY6vHOu9Ox3GcTqd3dTfuS7kfLaVmpu7VXJZ9z7uzFaIjUwQ3cHMwf0CTL+wYW2KC7oBEDrY8ENXNXNXVwGyJEi2oalz+qC7T1rvT2/I3CKA/lKy5IgEzAdjF2XDes7tOVafqXR6MyQJRzkgBkN193feXZ6sPHl1Eb+dDJKB95V/d2olSDnjRJyJMKanjKK2qEkJCPQ8WI00ceOhCIGRWJKPQfOFnYRdZERqxGxBDM63qszWNPpaJAisSMjN5YGQCN6UAjngotV9vUl6fXV5aUzboUjydij8Yb/4HX/hXx9i/bi/C5QOOzQERIsEqhiGHq6vNWC2ndPVoazatc0pMpgqAUiUgx5AmsQbYHBBApZlDSh0AuuPcWg7UMSHSKDoXEXFxdyRdYNsEQGAICqBISkFj0hgLKI7H//BvP/6//p//Dx88y6dpFHUmDBRN3UzVxEwBATkAIhIsVkID+6u73IPKiMslYUsnygKPoaVRxhzB3M3cAReM0cP16Q+hIXWFBedmioQP/ZK0kLBNrSE6Ey1QswVbow9yGC7U0XdyHMBfBdCkqbq2yhFj3+Ww3qy2kckRpBQRaXOptTFZiNTqNJfy5u2LNzffq+hq9cH26Ser/iJAl+mB1I6IHFhV3disORAABgrgZGpm1hS7y6vdOE5l3J/KR5+eU9cG6V8+f31/mKYXOx1vP/idv/2DH/7wv/qP/59/83/0H37/8y+m0ykFf/n5z382/wNK2Stgrb/3T//Jp7/1H2j/VMsvMoyo11eb9Pb5N0//7m+n1TqfP3nv09+p92X8+qtVbl98e3/26W+sHr13up06YGwNqtbdIZ+tZK6RtwDD/c2fHm5vnr73dHv1KF9t59v9ej7c/sl/u3n8Qzrf5OFT6Ae7u9XD/euXt59+8uE69Kfj/RMP/+K//jcXT86++uM/W4Wo+/3u+xcX55vreZzu7+r9wVXarNnRZjCFm8N+quP2rD/etruXd+HQra4uiMPq/OJwfb3N/f3uzt1SyG9udlKdmTZn58fdCckRfD4dATB3wRA40jRP4zgu65dAvFDWzd+psPjrNNqDTI2Iph4y7cei1ZiJwFIEd0OwEIMzA8+iU7cachdzwnZ/PO5Djp0FPtbjs7P3XRAQxI3RobqLitqw2vp48/LV990mP/+Ll9St3/v4cQzx/u1d9vD18+8vVp+RhtNxijEz9+PxbWKdjvXZs2f73fM2i0VBoy5tztbn1/AqYMcpt9YGiHXy+/ubPp9LBzHG4/70+PH7ATnGcH+9Xz8aZJyM/fZ4Xefj5dWT5PrJ4+2ji+76+u76+Xevnj9vFSBFU3P12mA4W5NSq/t3n4L0i7/Yv361J1g/uji7f3tAiHcn+W9+7+dnw6NZMG87KVrdnj364Dh+GRN99Ozy62/evPc4/d3f+eA//2d/ArbmnDrud/M4hGxqHJXRi2ggkQLVqEuBB9KjTUdTh8i8vVy/uXuZWnc8HQqQaRNKsR9gOsUEx+kWvC8F+i4OA0zq6349jYciwslWZ30rBjHoUTkQI4hCoGAGjuYISMsTZtGUUZrGGGYdz9MlEZ/msspMhMxBHZnY3TAwOhgYonHAuRQXY6acaG6TKpcZuU/c5W7TFzMxRaU2+e7N/r2PH1OG7bP3X7z9ZRvFA8lkVht6Q+TPPvn4V6fn//4//Pf+8X/0z86u8Dd+8vj6D+/YbXc/9jFXOV5+sMpdffTJDNfdm/0tP9oHCL2uQ8PieP/67nf+1t/BG//TP/++qKXAGNJmeyXT1CW/6C/vZDaglPoI0nXp+n5mIr9UMvTATmEsU5/yZtWJHgDbsOob4TTOIWG3iYxoambad/3r+/3l2RZDd7HaTPPY5by7vdeiAEyEp3rq0qYKXDy6qjJth/NXL14w0Xa9moo8eXL1zeffrM/Xx9F+87f/zts3N7/61VdP3n/68Q9+9PM//LOYIvV8fXN9drX+4Md/65f/3S/GIsTMFBCg77sqrcsx5EgZ7u92/dmGOB3H2dlDl/oh7Q+7Oh+6rldvpfmwOgva1j3P484NVv2Vqby9uV73F9cv78/Puzf7u/PLi7PYBxLP2l1eVM1n26vzi9imw8D97v6k+sAqWm029TSBGxGiAzMjuGojJKSH2pnlqb2YtB+M1Q8/5+Zmqg641KKZ+UNd7+JoFEEkDoS+dH8qBey6qGa0VDMTumOVVk2HwOBgqiklVVG3nHurMSKb7A2AXKTOwBhCDki3b757/OyDR0/e//77b9arfui7+Tg5YJ0bBRnWicBaq5tHF9P+4ATMHENSMbPirTgocGAkwqBWyB0aoJELuruRmiljB9W8ibMbWN+v20ET50KziCFnlUoUwcQYjCwS1zZzzIYEEJbTT9HJWF1oaexWWIzqtKx3xCpGptBRJITggcygqRFhawKm3iRwrlWcyX1JgVBrlQMnzmMt6qOZRUpNKxEDJbIc0QlB7Nj00LRLKSpwjFxpipkcbDwempCpQUAKAXxuWgIHpBZozTGVehriRlQJU9M5YMDlXY9UdFb3QGaihDFwh97UXcQAHTigGyG3Vn+949JWMQRDF3NVYwyIYA7WKgEAeuLYwAH5NE05smoZ8uCmkVTRhjjsbooTpjho01pLxBw4zu7Ho9KxPHu0lrkSOMZgbofDITJtt+vTVJzBYzhOZejib/3s09dvrj9+/6O/+PyLnchms4G3fyQ6AAEAAElEQVTnAABu5q0iqIg6kDtUABJHB+UFkQObdYfEtao7T8UCQOq7kNe1oU4uk7YWl4NDFkOkUkeTMSBHhwL3kVKKPSbGzfb+fi+nYyZu8/j6+rS/7cYn69/+2fvpuM+n8OgJ3RR8df/Lfv8kpXhUL+sSh/Cs+3H85e7u7dg5Hr/5ltuuu7y42Kzu73w+pSFZv3FtNXbGEVIXTAK1enbeySQNZQ5UwEMIyWIVDRCcygziRrPMKJFAeoxo7oxFayGPFLqO22w59ZuLixf76+P9HYKBwyhikRVdAdx1aeG0wIJKQNz1QtRUAL2ZhBSqeKugVRiN0dyMQw4xVmsOzjFWKQup3UyIWNEatC71AYgwCmBirK2AmJuQgpMGDiYutXU5cDYg3J5vDm4iCoGIEoUUY7wdT8ZAK7JG3twYUgxVi4ARBjVrDl0IbrIINU4hICozp+hl7sgRtGMSMdEGQEiLb5tkOnWAhEqk/aoLMWltgoTgnDmkkPpVd/WIIU83h/mwM0IXI4c+RMmpSulyCJQbhRCiiKHDwqU7turgOVAMZFbBJjN1P6nOCDxNpXloC2CVzMCtHRG9z3HReX0u4+EQ+r62YAdC6YnBog9nfVwhRGtu1aHO1ZbCxYfjgdo8YQxENI8TEnBA0GYNIUapJYRkbhBjSEk9tDpO88mtLeEioodQxTTNol5LMxVXV1MTUIdsTsERfDngEAV1Q6dIAZAIHInd3VEXncQXlyBHJDeTZaA1UzQAQGYqDxyQBL6gQBCRAMgQRQ0Wn7WJmXtmQu5i3gxbabcuIk7VCR0TdPM0IXR9vDA5WhuJu+AdQG2lMXmR0wl9BLd8dlQUk1nGY63Ptj/gcGaCqKZzKT4CRzHiCMxY1XLqJm+b7fZUVNlSjhxAxRwNkJjIXNyRY3DAeZqYOMUcY2SimCNMUloJaEOXbZovGT8d0mx6a24L+hsW5wsxIb5rOkbE3PQRwG918QeIa7SIlGKMgdTMTTFxCKzuYLZgBgMuFXXITotpIhCqA7i7Q0wbljFoyS4NgxGqERqiIVESO6seBUYCBTRc1C9HJ1ZQCpmqEjgAqTYiYMy+ZH+YVZQDJG9nhgf1GU0AAJ1MAzibMmZDUzRgjuZEKASN0RzRIAMEAjBXCoBasQABuweIYmhkhq5ohhYig+dIhozJUWoTg9ltcg0A4M7m8K7ow5cZxsFNgYCQlmo+dzdbXnhaWu/VKyIgsFojRF9CQkhLTR040IMrRonwnRvpgWm9FJMvdpXFSkfIi45GgObLgR9oYfuBM7GZE3Jrxc1WXfc3P/7w+Zs3p7mFHGNTcHfGs6sNNgWRdJ6lNGlVpaJbMfn+ePrTt2PNnZTqrod5WsSIqjoBTmLimhKhtEQ0up9EO3ZCJwcwBW+jYEZutQHDrH4eY6BGiMQo7qY6/P+5+pMnW7YsvQ9b3d7b3U8T3e1fk/nqZWVXBRQKKhgAEkWhQFAkIZNoFM0ISSZpopEGmuqfEScymWQGM8mMMkIgITQEQBAACwVkJbIq+5evf7eN5nTuvpu1lgYe96GKMbsRMYh7jvvxvb71fb8v9ftaiSg6R2JxjMinXKe7W1lf1VlvX11rrbe7HTAKBWU/lbqoYnqvU3ztH4J/y7b+Opd2n1FDQuoAEyA36IkatqnNvKIglMf9qmew5qaYUqDUytynpKUCibGcco4MSQgBplpjjIA+EyEJe1HzViuiGbhRWKKeyHgfKrw/3Rq4By1Xkb77zfP/2V/9nf/Jb/1GiFJAhROjEDKALxBPRECme+4V2D2j2hoQLfxhRCTH5dIiAAO4/x23hSX0VjEHIllwcEKICGoG7urGHN4KPgT3nWj0NsNm5qiquMgCvvQM8MKVv88QAGirgLIUUf4pqci1IFqXRAL33UpSctNcm6OX+bTYIlxdrUTz8Xj75vrF9d3r2G0fP34/xgcgUZZBRxsBMaOZgS23EBAG9YZ+r1egO6NQZJqdAsu6uwH4xnq1gfHw1f7d7/7GZ3882Ue/evNP/unjf+f76+8++86vP/7qR/9I5+vPPntO2/N895KtEgbmeDy8wLP+nd/9n++6cPvqi4S42x0opP7xGjYbevL06W//hU9enNZpS+A15+Pd+Pji0bDpHUJH0Rp++Bf+V62/xNMuih/vrqfj6ThO02mu2s0e2p2/vP1qvel+9Pf/27/8N5+SoGJsD7cb3IL5zYvX37zcvn791WGart45f7UxSlA/v66qsuo0pLRewbj/+A//pVd99epasV9fXYVhmPdHLUdQkJimNp1dxP0pH+6OV48uTmWsnu8OL5vOjurgZZoSSxdFtTatKYS7m0PXBXdgRNMGDLW2UtTM3ReB6C0bHr/OA38ty+LbNCaISFPwZX3RSp25ag4pWZvUNUVWVxEk8uN4kj4QEwWIER+GC7PSVEMnZS4MU9q82whBEjT//Fc//fyTn7SiP/1Xv+LYffDrTx5/89ez+Yzt4nx7HCcHev7izWrTX9+94Y6NfXu1PpWC2CBA6CMGN8XDdHDHEMNuv/vge9/tMJ6m41evv4x9rtCDUeq6sTWrI79p635IAWizOXv/6dXZw1paqXmd5JOf/uRH//pHbRpTerB6dFmKowRERc2lVBKwXCQt6Em4vUMtUPNaQZ7Xo1ZrapFX5SS1YfMGU83HAiS/+uiT2Ccl+vEvPj/frgLpr3722cPNFlCmuRXVdef/+//0P/9//91/+OarLwmLJN52URsSYbM5n6Y8+np9SdbUSp3b2XqoY16Hbev5toAWzzmvOA4xzXnMc1lvQ1jX45hTDrrPfOTOU7mb1aFOBoWEk5ZKAYU4kFTXqmYA7i7M4GStcgqKbI4xrNUciIIjqju0EKIWa+AhECzqIlrVkbB3DgXKUipOTg4eRLDjDGX2poQGIMJhJXMrzaodjx9+/xs/Kn9UjD2A9DE1KbenZw+HetqnMv+Lf/Dj3Z2eDemPf/lRijQATXelgm0frT95dfPswVa789//0aebbXr6zfmv/pUP/sv/1+frB1ejHfttvH3z+elLMDQOgUga0M3x8PjszG3c779ya+w4j/l4OsWOAKyZrldnmc3EiuXzs4t5mudagW2qtYOQorBwFBqGYco121hbXUG/HoZIIobXb95cXG0rqFVWnENHv3r+8TeePfEcsM7rzfbl82sjbK11m3VTN4Bf/eqT+ZRjt/rsk+tcpiB2FrcDr253b6SjrPW83+xuj+88G55cXX6ZeFbD1DnRfreHhWRIcLvfXT64WK3WxUFBUz+k0Jec5zKx1SGGGELfb5rnNAzHfAKCzcXF8ebEQAwtSgDI51dnzLyKEWvRqWLC/WnukNnr3YvPPymElSucxuPYrdJyF8QgMSUzQ0Zi8rLYhcTvUWggROa6PHfunz543wgLvpzw3e8VgWWrRu5GDuDOIZg7ujJTLTV1nWp1ZiJ2RwNTt8CBgN0xxlBVS5sZiNFRnRAdhVAip+N8ip2swrbUbAiE8TidXr14/ujRN54+fO/Vm8/6laQ+CPHrV/NwLoxkpVH0PE0sXFsVInVHQhFBRmiNENWbtcYSmEKrIBKr5mZKUbSpVkd2lti0MRoRloYxdBPeqTcJYcotSqilqToQqs1MZNhYgrlXLQTiqMyJAs6txhAbFAMkFaCFOZiIIiK7oSuBOyMweq6nFK9qkxC6U5kVaNVtss5uFQCDBHXNbRaRhVQRQ6eqQtDYcjs2BwSKHffrtTVQVQWbWjGFnCsJMTOCGaj54vhsiEIcUGJpNTkBx6rqrmal5GNMUVsrqut1jDEpemAu00QGzbSL1KzVWlJMBk5IdN+poMKxtSoigCZICgAUplIDUWASFjRVVaYQkFqrgZzMAcBadbBiJklu9tcvd9dd1/cpOcJmvT7spgqKhKkjUN/vp0Sgal5MQvjWe+/tD7cxCRGMcyGzFAMj//inn6SYfv7xp6VpZG71/lkwzxmdiDCgGAmRNAMiIieoDZ26Pj65upSuV4TTVI7HiZm0OZqtoxSCwqISzCuYqpaWJw/q6katWMM6tzAgUGXE9ZnURnX0OWspVuzw5lR3c3yze9Lxfr3b32IcLj/4M9+/e33z7oN3dSx53MFG7jis3kmbqxSFTnt49Xo/VT2dbusp7N9YdzV0a04BNI+MMSCo6Gm8XQemkOIw9EwlIpaR1Drq0Q1QGJnMMsZI0iEhp8NhJyIb6V1JYu/WGky1zllzRgzMgaxOkzA4OSM2WDoANDAjETDOrZBKU40MYKWO6spVmyEFdAToojA5AY2lza2RQ7GCqAE5srhjkAhs1Y0ZDAFicDcniDGV0ykQsSMAUh/n1tqAvJHu/XO3uj/OwlxPYxc6di1tNqFu2ExjAWoyQhRuTY0IKQHA4nVIwoGJlUSYQ+iITI2Yq0IKfWJDVCWsrbWGDTiXyuDoWsfGJDHy2dmGCIwgCntphADaVlFSP/D6Epu02DKfgDyEECi03Wm92o6jBnJyIAjgYlZArZaqJOJUG1ofi+aELNKjldZOfd9ZaUDuACEgMYlgTMgoFYwCuVMn0SPAXMEIkCQmYkbXMHDXWYg8aa5Np2PWuVk2vG/YgTLPoC4xgGvX90jsYKqqOlnN4DjpXWCGGMhXisFqqTWbVnNjYVrm7Xu4h0ggYG7N0NQWI6mpMyN6CIGIzIBMtAEhIfjyKLmn4i2joqmZAdzDX8zdtZo2NiezZktZp4MpAqqpEDnYUn9urTEGIrRWVuv1au66eCZmTNzQDRXJaytECN4ciJlzPYTgjOgOMfSlzQYTCRBhbafN2ePjaU8kppWxfvfRoy8+/R+ePf5L4J3pFEI0IKUw22gOyjJPE7QZwFA1uLu7tmaBYIm7gpPwsmkBJDMlQhYCRGZWrUwwxGCOphYjK/Ja4P1J95VGsCLBmcD1fuJDWKQKVuvdnsbwDtF7gglciIUIEVSVAJkFzGtrCg2diRgZiBDU7uPlhoYEjowYWKqAY0TrW8toE/q6enVGAyc2RwOWDJhBnNSsIgezCq6EgUKwSo7g5k5LXzgt5i8iNEJkQAGxtm12XnBiNyQDYhR2XZrw/G04y03BfbYKnAhdDSuHuVlir2qZaw6NiQArORJWwgaVRcEaRGaJvuB52cERSy3AjOhugOaBCOX+LliIe28HKHdwIV6OQEi0aBUOBq7mTkgIizdkURLIHHDJcQMCuFqBe7LM/W+AmwMQkHqj5SHjej/G3YtNSxANHdzh3rUE90AbZZYFHv/gYn226VozEcxTWUzBavW0P7naaTzudb7YrF+/um2Oz29OXx7mUV3IVhEFYW4aCaE1CRKIAoaOtLrnMTv3TXBqms0IAcHA2jYmVQD0tXBsFZzmWpnRtLk7MWlBRiLCXOvXryK6WinTcVwP5ylGlnj78u50KF0AZp9zvk953edX31q2/u0U+yeUo/sN5UIjt8ereDmEPtHcWjWDCR+cP31z/fwiSocG2Ay8eg0G1TSYqjUkdgQkZrDAbE27IEtvXRDKaMLQhWBzfbaWT077XCv54q7HAN4TdcJnfXhysfr2e4+/88E73/7Wew8errlLRUWRhMOS3FJzBFh40gvglxbyPJKa3ge+kJaWSQdH4vv3fgF1LXBQWHK4Sy4k+FI1rJUIF0WNmM1sQRaYLbDNhV7kZveBgKXbF5l9aSxm1tYcbQkzAiG4IRISv5WW/rSr6DTNKQ3dKsXUUQjE1Mpk8wSINk9uFcG6jjXXl88/f/nmdal+9eQ7w/Zxnwai4MjChq6mLuRwL7UtLrwKgACsXt0LYwCwalWm0hOdXtx+dfH5n/+9v9HKbj30/+af/dPv/qV3Lre/fP3VT4So3NZ/9v/9h+v18PmnH334vacXj959eb07Tte7zz87+8Y3POHHv/jk+3/tP8fHD6fd3SaakaNzm30Vw7y/i9uLsrr4xve+WT79WQreD910/hDTUzNhRgWUblh/8H1m9RdfBrwlHmcIfPnos+vbl67lkv7in/srf/e/+C/CeRjCdtzV1Sandfit//g//tHf+r+tcP3oGw+ffuM7L/7bn1597zfp17/95D/535w++jF8+svV6jI9/LXNd37r+Y8+erDqP/3hH27X2+PhFFaxTbV52TyQ2+uJ0T/5avfmWu1h/Pz65Xe+9f47j5/96sWXVuAw3g2bfiq1WTs7f3DaHRBJhJywqJpD6lIf014PJHRzd7w4F+IAUB2paSEkwLdWIvgT6bO3vr633/DjnEvVasoM5g1t2fyrqursuKoln/puuLupJJRr6aKUohGEyKfTifkMIpiBFuUYzCSw/eE//DvX14dxzKeWVt12rnq8u66FHr7zcN3z65fX0/7OWn305MFpf9hs1sOKD+PhfNvfPmdQWq2Hu7uaWzscTutVf3z5YjNsx+ubz9989V/+f/4f3/n2bz169K6sVk0thiBeLs6Gst9FTl89/3wu86c//4M2lblpf7YNpjHQfMJuuypVS1YgpICmjjF0MalZ6ldvmzVgrKGNjV1ERIvnUbU5dcB9P+uobgFhGGJuLlFy0bnowINpSEOftR4Oe0YFwG9940HH+se//49fffJpWHWIMNZpnXoD7Lu+Fks9XV7i8TSdpf6wm4877c6DanEBy9mzR77o+s3hzYEGsEarroU+N9NHiWQlX/1sttYLiGO8e33Xx2gZzc3AwEXAm1YCDMuITqimCB6IEJGYHJBZquYhCTk4aK2upYERMwtBLaWLjBgCDezQ1JIEjuHm5sXZ9lwNiuaOIgfuIozzTIzoLgy14XQ7gp62o37nvUc3r3cgvNuNuZbhLKTzvon/zb/xv/g7f+u//rWrZ8fjm812iNzSNt7Mu0A87qd0ufrFJ7fbfFitzqZxf7r1f/L3Pn764Fm8GN55wje3x215/OOPD5th/fp2B4YUg83ziXzO45BYxxJQvLUQg7fmQK1pzh4R9rd3MfUJ0qv9y9SHIfRTMnRcp977QQ3GqZ6mu4uzywplFbgfhqyGbS0tbcLDX736cRo203TYrh7WjNXm2sq66+fjCWsoWtU4N61j7fv+ydPzV3A7DOtW6qqH3Kw/k0fvyv5Qn33j8ssvblB8SB1kffHzz6fTxCw6twzQFGv1Lspqtbl5/VJ2YwxSlVJK2mxuk7dq1UIgtSpxnYbBi1XPNdcY1hUEh7Ra9dP+uRafvNSSu6Ffr7etmBB7tmka+0h9DPPtNN92Wp24nZ+t5npvtyZGYTZ1Vzf1f1vIcY/Ld3NFAHSn5eHv9/D8RYQmRF9qPN0AnHh5jC2cNAQHa2rmrqpVXaSZ16zWakCoqnGJ0II1rQoBEYIEQkR0EkKgaT46etel3k01IzBBVK3OmDiU+XR39/zi4eMCTz7/4ounD86pk4uH2+YzIl89eHRz/Wp7/sDZDUmQwKzViuiCAsgEwczcEQjUqgJFNoN94IEQzZoBoAg2z7kMSNBqa5OELsZEBoQhYvRmWgGJI/WmLcRu0jHIyiEAchc3p9OLPsVmhhBQg9tsbkKkrbCwW2vaFJBbA1WwVlshaKvYo1PWerSRpKdGpcxIszaikMxZ3YQAWcBMAKdp9ADzPBFF89YND8djQcNaJgLUqo6RpXNo4KZ5Zlg6sLUBmzmitCkTtqZzxD66N7NSTiLkiBWczRiWotmowBKoGQIKh5jnU4/igBKSmSm4Abov2jUAuZOrgbYmS0cLE5jHENCaWmUAs1bqTI7mFnlQqyl0ag0J1ZRA3KM3JuPjVM0sClXX1br//rOzz2/u2qmdTrZab46nwzzXPsDutOuTtFKS8OZsvT/O1RCYhn67P406zn0fzy5Wh31e7gIzQHd086qyBF6aAlIzFE7MIchq1DXAWhJ1PfI2q3uZaojrgIrGxkwSm+amxc20kJqeh55NEaG0mtud19q6oSVUUQSrU67H2c2IpWSrCpcPuvMw5crWjl/86rOz88etUAzDu998WGCuzvvjdSxaK8+Em1/79Xj57M0vPjoLw1laW76lmts4lQxp/UACmB/W/YBVx/2dFYNglJKqWisLTKzWSTBEaNsU3W0BBTTzBJBq1ari0nROTqW2RxdPsd7N0Jq5hIhugcJcswCqgzgzs6ODU0FHwLkV4dinpA4k6WLV94He7G6QUtN2yjl6UwVR6FNqWgG7mLo8Te5qlFlcROaaG3EctnS1zVPOt3fYoXoprQzDufVcoMnVGZ0Pu7NAOsj6vJ7GpBisCgMJceK+H9YPaL7lfJjKUbUpBSa3RuC8MJDRwSRSigygoK2X7tg0SQgOXtGR66TOySyDkyiAmrZ2P5yyWSn9KgFBT/J6nCjy2cUqDGl9dVXThghstZrySFQJ0A3T2SYK0LHnMpfdLZUZ3dA0KzBEdWMDA2i5SmBDdDIn9QDaXAK4U0rCQ1r4NqnDRDaXmVOiFFsxpDATu4tG5IGa6kVkt4JYtYCVHBRgnthROIDQ1+dFInBrKIP0Q+o3mscyj7XM0JYpRpEjGZAVA2PDEFYNM2gLLARuXgGRAjZdNgoNoKFp06yurtpM0cGa0tLhpcYhIoCwODUHAiQzs9ZaLeZLFIPcHN2QQMHMlE2tZUI3Le7ioAYGxLrQZJDMMGBEXwYhMbVNSEFtmjXotvghcI+GRhpSN5djl7bTXAYBAHXMxKhEZhVtIE+qljzQyaJaa9MqrSKWi5I7eRAOJa7W1bXakVlUwXRWx1mzBcekanNroRZUIyQEAiZyQ1UlXQJKSERAEIytqUNDwFKaOg1dzOO8PIx77k3rI+HfCMmOh9cGd6CFHJDYMTmQtq3IOeElhXeEe7MAFkQkdgxGDOjOABIigDO6ugiRupp5NV1IBU29GVHsKaaFuIMAGMRbX8fbLp44DhQwYwNGNkNnMXbs3CJKBahEstjs7lkrSya9Kd3naxa4mC5FSogEyD7XgegKuEUcA08IJqSo7IpkiCAA6EAIDZxCyN5EuDkWchIXp6I6kTdm8tY7oIkjETICBKIg0WuBhZRk1lSbtsiibkE4CAb3wAH5rZFiyWPcN5yhmS5qAQEBgpobKIAS0r3fA4GQ3ZdJf9HfRV0Xu4YbMIu7070IamYGDkCG4O6GQOioujSILJ4sAAACvN/GAdPSDXivpziYqyozBqLAwExk7AGFeZ7nQRgMvsynXGopdZ5KaYWDoPrTFMda3V2BaB3QPQQ5lhkjZNfaFBH6IEeEETBjpMAB/VgaIfTCpWSP7GZA1DDG6Haq0sWqtQG0kKoamGczRmoOwEzsMQQbNsdjjgNv1r3ciKt2qetJp1IYQf9thbe/7dtFgPsA358Cqix4c6YNw4NE60GMXFZ912gy/vLNLUhHxBsKQXzGpQ5MzcAcBVmcqiM4KqA7mLk2DSKlKSGVqXYcElNF/D//b/+DXb376qvXaBCC9JvNKnXnF+sHDx5cnG9XqxCCOITW1NzUEEUYcGmvZ17iXaimS6h3wVQR0UKbBmYEAF8ylYAIBGhLDvEtrA2IFmsSLLHNt2lFYgIAsAWJDQTo7taqA4EDERKxWkNEQnKFpbYPcalaW1a5gkB6f2kRIaguRlg3t6/VgvubQYRiihICMlXzcspeSqttno51PoVO3NvhdHjx+ovXr768ePDBk6sPHbsoa6LoakGY3MydiIBcDe1rYjcn8GYAiIzcgYNZjUx1HrsUr6/H2Yanz771L//e3/qt777/O3/9r33y8ceffPTF43cezZG/mhqklBV3aWPf+rVvf/jw+Pf/ob95/vxnPzx7/Lh/cOlKw4N3ymknp5tydytF+7PVcW5gKETTKaPRXE4Sm/Q0HyZP/cPvfliIztZ93o2UkgCMd7eR24vPvhy6oR8uPnp18/SDd7/84nnq7bNP/6jf2Pf/ve//8b/6vAt9SFhryxLC2fqDX/+Nn/y9H1z+5m/d/Z3/O2bobg7Hl7unD2X/xc33fvt3//CL6c3uCMGevbv6+b/addvLEMVq3V8fTscmdZynCbBxMFlvXtcyXD3sVme7/clzSUSNgxXVXCq2nMvURgm0u9vnuQxDF6Lkms1ra63vN3d2YgzWsqo64WKGvo+euSP+qdDnWzgYEKAWbVnv7nZXl2tPwCGa5pyziCCYk3V9KAZXjz/M8xeXm/mXn/7oOx/++f0Ray5mvrnaptT3CXS26TS3ag54fr6tvhvLq2/9xvf/wT/4Sby4/PLVm2lUSkNYy3h7uDuMD5492e1umvlhzONpvrtpN29u99c3x/2xqr16dUAJD9/7xrDa5te3v/ZB98nPf/jHf/D3mh3/g9/73TyiHk9upUz7aZ6Ot/tPWKf9HiiWXADh2btPS2w9RApB8wnESfQ4HVy9zY0jazG/L9axWlvqhO6RTtAANldXhzc32pRDCMSpQwxUrAZhz/XBdouI16/3i/XzpBVQpnHuSrw9zBUYifPYfvWrnZY6np7LagC3yIIczWCasoAY4Z/7/ne++Pin+3mfa+5CUg83+8IYNmmFcorJDrsTlTpXmt5oQFqv0tklS4wvPt7fTbm1vs7OXp0dNgPG2G6nDjksErYtSw9TWPRjQsfq2gXJNQeWZlq1mZoIZC1DlxihOQoFM2cMQJ66bpobgExaCVTdvBpzd5gjUZzbPioJ+PWLWwYj8pyrzJyN0AWy/vBf/tHUpvUqnOb5/Seb42Faby5va/3jj7+M/3I9+vDVoT59fPXN773/D//2P3j84TdIbJrzLBrOu8shlrvD+xfb25v+Qh6U2W726Z0nqx/8+F/8xT/zG/56c70bSaqEJMJdH0eYDvNojusoPUkzermbzjbDeojHw6QUjmNbJUyRh46vn78RIpBYTRkdzKaSq6IbDJvVdnVOTKuwOludvbrepaF7/Oz9n/zsRz/5+IeR4vE0f/i9Dz/76XNEgqJkNOeZhYahf/V6H2N4dPHuL28/vnywam6hC7nOT3/tMnThxaev+5DevJ7nYoHQnU53p82DoaDW44ycSLBaLccxpdD3KSXZHY+RAjoyQkBmQIWW50kCr1ereTpdXTyMabXfH0j4fLMxlf1x9FlBeRpnUxi6dDjsEl/mUfevb1NMx9dvumHYnJ0hEUjEYwthfZj2EkPhVMq9VISIS5kCLYcrhKVW0R2JGVzvtzhuZkb3hTIuzGZ6H5/2t5CI5TjovgARlpIIFoHaVBUJpnEuqte7aejCWRTKWVKcDqVbpWbNICCCCLfmCLAYXmLfuWlFT6t0vDuqNTMyFwcIQ8dGh+PBgK4ePFKU3c2XQ7+OQ2hlQkBFyFZOU05DQGYz53s7NRi4ApQ6C5E2RQ/qZADFphAGM/PmgAzkjuiAJLS4rSj1xUppmmIqc3ZvIjEjuxMxtTqpSUCxmp3MQBqU2EdFc/DAnaqSGRGha9WGxGYeA1dFTsEIKUgX0iHvmULVGQkdMMZhN98MIcQYZ9fFtywcXJuauraeVkUrStiuhtPxxEhm2SXnhm7mBC4FiIE7I3PXIFy1zeUE0NREncAtxGStIMim346nO+k6C+huuYzEONU5YiKClhs7ByIBwii7nIG8VSVC1ZaCzPkQ44AGAQXAvTatNXCoCs6EiHMZJURmyq0tLyAhMQc1U4VZCyCyu6sHiQhUcimtbDdrItjvdhyiSBz6Htz2+9ODYXj4bPuDP/rqOB6bWQgs4GAYus6hvXpze7FZRZEy1lHncLZCSfkwdx3mCmm499YRAiK2XAkocMRg7lAMltzC2eYsdkO3uToxSQzaKktgUPQGnKw6kQaCfr0yn8HLEh8QB7Dm1gyaqgdmnU+uxpECkTG3pW3E0IiL2fmT4c//tW+887D7/Mf7T35yeLV7fnOcXtsAE7/7wbN4Hs/eeTw8MjlOXODzz9+w0e74i+Sz7w7Xb7743m98OB7frC8H3EnyMB2vZzixAU7ZSi1jC+uekWUY1CXEWMupQUxhVfMLg3pseVQSBugZQvAyg3kQjxIdmJxv51cFmzFY9cCB3NxqYGjeqjaRZGBqCkhR4jhmut98iqMOMd7t7g7oTjxNuYviwOoOzBI4l+rmEKC15l2kED2gRZMk0+1sDKcwrd/9VtbUXr3qDq9Ad6Fo7Ak3xMNqjkBdDZ2WIzhGIytWBQwDXJyvhosVXTwsxKdrng7jy0+OpojEQeDBo74RnA4zVMjFAoogMNKplYocQxJABCjFkbCqzFWrNm0g4N6sTC0CCsOsJeeYVr2Zz2NRCEjAMVIajIO3JQpTRXwJRxUAPts2ISL2w47oRCVbnmue1EWSK3CrCgu3FzEkyK2GQNglNOgCIni3XeEmgZkE0qreymZ90YBP2QwZURowg3cRQihuDYhMuC5F00CtZgd3YgcQup8LNts1IXKIvN5yf6HAyA3xYG321hAQ+Z7H2poqEMeeHAmDiRq6tRqFzZupO2gDVIfls2UZyxYMh9l9d1SU0PVOtdRcmCUwI6GE6A6q1V1VtTYjA3BgdHBTVAQM7qZKxEtpvJOqFmuECKYaiMFc3QMHbRURT9MI5trmmAIGKPME4qBgaCcbMeHBdtxJhvt2z+a5GSALNJon54AJz0JO+x2s1k9FQfW2rYKyf3V83QXAIEykXhxIhHMrDTJ1IY8ofcSRGX1hdQCgNuUF5YJLphtrq7QY8gKqGSITRTMH9T6GWiw3N1U0Wgk9MP1zQ3cH+ApsBu2YE+IZYq9hYIxqEaBHAkbk5HyfhFpq7GV5CAISoxCbKTvYwl0WUgV0BfXmpIjsAOoEBBgwDhwPZEdpFxg7woaAgMyOZkZOblHrGEXACVGWYcRUza25MSzfwgUQzEjuwAs3IwAF7BpdjOTVT8muQ8vMuqRxwBBd3EABnZozOAC2jkkBGAHJyW0mm8mQPbjH6kvLjzkoKlBVc1i8IWCAToyCAQE7gEaekNiNiJEJ/sSXubsbIxPx0hVOtHCLHHkBPqDDwgolALev+URmhnXJSy6mkAWMvByr+D4mCUvQ7L7uCoDuKUiLTvQ1qOttsr/5QmqvWpa9HZPcl18BqvqSdasLBokDMkk3QBi9+XoT99OdVb3ou4dPz3762cusdGhugLVpCDE3AweRAHUaOorCBFTnaZO60iowEXJRQPDaFKOAeUCbXLOaozoqERkAEdPbv9mRRTiRtP2hX208iWI6ncYU5eJitb8dDdQEqXmtAEQLumuJX/1pn8N9NdPb1i53EAZ4r4NvnMW5lLQaWgMDDDHc3ry+vNySzW65NguCcysuFLpekZGs51C1JXYCNEBkMq1L7ZO7o3sgCs6ZYJviB+9847vf+KDrNxJkc3mhOSMbAip4q6rqRogYER3IiUjdwZcKv/tVKxOp3zeRAbiDm6oDENO9HImM5PcoIlMkBuJ7AxkukSFDJCA2X/oBgJfrZOngs/uI6KKyMZK5LlTNhfYgzAjQtCGCORKRW1uaBBdemAPY4uVakgFf5/2+loqCCIMhtDrXIB05zLW2mt0KsIHWN68+/+yrn8XNw2ff/C3BFZIgEKMHZmC6z1o6mTnzEtG811sJyEHZWzUlBHAIMQqZuazO4nh+8eDbv82Olw/OHz3q/pt//o//yn/4n9rHnwzvvbd+rJ+/OP2N/+x//fEvfvHO5gMchtcvKg69Ut3fvTpdX+/vDkM66zdrI5+O+8OLN+dGrcytZLOxv+gk0tmQZkTvu1rQjra/u4uBwGHcHQJR6tAqeoBpd9BjmXbad9O333v3zWdfxuP86T/+gwd/4XfLCR5u34H9RzCONh+BQj3a7vU4P/Hts3cPAp/Ph9/7j/6z22Kv/7t/9N6H6fEgLR/PV/3Di0cvXt8d717GSGXaT3e74TxOh9xmU9Dd8bC5OvvW42/C6z2neLXZXKXN6fial5vFjYjGlhkCIEWW7bD64vPPoqy0mVaVKAba9YjsgC31wsf74j4mMnNHXeJmi2/3T9xtiz6Oqp5EALCV0ncB1EqerRZzdYzsQt7q6UQxoutxHi/PzwY7e/PFgUKYytyveiu6P93aqg8hrc4ephBy0ZvX12R8Ohx+/5/8YaC+D/z0nW/Mc90XeCjsEboNv7l9/ennv/qOfFdLnqe8Pe/fe/cdq8fz9cCCQjIdbr740T897e7mm5sw8LgfSWIt9Rc/+AUEh7GSMBAMQwIKIpyGNaBIiCQ4j9N0nChKGDq3ejqcSkNKLEH6ris1x4ih76ZpGtLQbbsxH1q+Dx2QwXgcIUbporVCiVIMc/XxWJL41dXm9e3UpaHbXB5OI/Wpjzh0UnLdHY6z05KlV7VJiwFh35XWIFfrAgBF6berdZkzxv6Hf/TRKvDl+fnYlFLIpzqEyMzW1M0YFDKlTX/UadUP5Dzu8FfX9fxR9/pmGi7WtTbrJYDPfvr+b3/75cefAblXRUZY8DBkpgqmHNiIWmtO4gS+FEshCHIGBSR3Ul0waIYBcslr7BAbmArRcZ7Wm54F6jytOiITJxzniTl1YZPbAZElhDRAnRqgNK5hg8+++fjV3fH2Ta7uMPif/e53fv7jnwNiqePf/F/+u9oQrtDLvD/Un/34YzDowedczKKI9JtIlXc/Pzy5DKVZmDZ3eXzxenz46/af/M0/88nv3/34Dz9hCwjCQRwqkzqUlEJMMQTs18PhtH/ncQKdhth78rPtxX6cxwzE7s3mPIeYGuhpnAEagVoBwf72MFFYSSB3mKZ6h+OwCozl0zd/KJf0oHvns58/jwOdTrvhvIMmq/XqeKxCEFK8u30JQHUuu93hfH2Jte5ux812U+d6Okw2Zg6hNTvdTpIiqte5nJ+tiLjbpAcPLz7+7G6uE5MTMwKo63q7mefSAK531+88fpL64TgeWGI3rNEcMZxtz82kFDxbPSleQHFqp2y3l/zQqNzdfBkCuhchfPDg4Vevb6RiU2NGRz2ejjqpImxWay+23aysw9cvXqWgb9cGwkwLJO/r0oRFP1rKscBhcY3eQ4oc0GEpkPjaKExE8NZqC4B+D+1cNhgGbk0bI0mgRn6aR2+RH2zJ3bWiuwAaEKhhU0A7je3hg8vcTuNUCCUlmeeJkEOM6jWFMI/VnM0boQhKLWOedpcP10CPXn5x9/ByfX5+dff6Vms+W190Q29QAKiUnCTEEFWrG6lNQATcOThzAHNEN61gRoA1Z6G+alOcHQyJSTTno1PUbJbBudZ2bHoi2ZgXMyIKigt6E6Xrs4JIctOmEKIw2m56EzC4E1FAcGT2JTcOsjD/3B2d5lKMfM6zEQrfI6u7yAptVlMHBJ5bdtaIiUNU0hTDKY+leTu5IDF5yydCXPcPbvYv+5TQY2tqdQb32kxctDYP6L4UUbuaSQyqzQ3mdgBBICWD0motpQEd59P5ZnBncxPiWhsKzzUjYGQMgWqtRI4gQTp3UlMWZqBWJm8GDMRs99tCac0bZEQQSsvpwdzVKoVUtQlIU2NAbdashS7UDCnFBEE7lRS+3B8Hlu0qHiZref789qYRiUjN3oms+nCasp7w7u7u8uJczULkjSCCCdvQpW13lbp4fX3b3jpMy1wCBVNHhqYtpI6FOhIy92pzm8uxZaR4fsEaJEYym6YCqoRmTUlI2GMMszsRn+Y5uAiJxLgcjtzalFvy6LNauU0JQA3QwyBaDaJIN33zu/H88TqcP3zvL//65ffzpz97+dP/4fN8HKmkNz/crdebd6/LWe/rHkAbssUAx3pcP+zrPBOeTu2lr9udakzbMt15KPs6nvfnEa3OfGarfYpvTvslcqFqkQb0MuciwIDVWLGLRQuaHsE5cEOYtJgWuA9IAIr3ksxAmLNlcE1BmBAhKQCocWAJbOYmQE6mbRyBCJXmGMEMnIVNDYEYUVwDyCoOlLSqR6xRstBwsTameZqPh0MLeMzHuq9+2NPlO/HJE9kw+Vo8g3puhYn0NJXTxGEiT6Viy8pk5q2xhvV6dXHuq20XUx/t8O745pWTV0ddb7urqyGdhcNJDjfTboLWWJGrg7Nwn1Yp9SKMoanPeQKHUgouGxmmalhwST8ZhnDyJBgb+HFS9cAOLfU2bCuziteccx6blcQkgTEEBTQeXNg00yrlMuWS5wpN1es8OwJRY+MuNIBqINwrlEZBkpREXSdp2wV2AWuuGULh5JTGShPUYuRqqe9XfYhYWhlBS+M4WWgcyanm3OZSqzpw6PthfT8XDNsLlsBdh6EHjFbmMo/leMBWCZAZpetDfyGhR3PUVssI2txggbMEksSdoxUb1SrVqtVrLmBmVoicDFqtTUHdEFmrttaEBSiIBAQMIkEyLwUCprXVUs2cBCkIEqJaRaTmLtWsui+AF9N62McVdP22AqNZtVKtITMSjjUTolqLoa9em7amxWlujqateSCK1rKgpNB5y8yhFJXQEborgTnC+PBie3N995CfcjlzOA5nl7fz6yMe4Owq250WWm2Gu9OrM76MJMIq5ugicYAopSoYQQVo5s7o8DVTVpjBzdQc2KyRuzlxwAWiCGZAYGghJlPQUocukZfe8KHR47mk1K9T0KZshghqyoEXwy8QksgSCBRwgfvuGwVz9+CwnF4RUUKorRreA08ZiUWQiImFxRsUA5COSSDfxv6SPTku7UWNiBQcEFUDYmJBN3c0QlSvSrak1BjQVTEQ4tJ6QQ52X7UQyMWhtDUJVuoACPm66TGAERI4WGOv5EyezNnAyYWAwkKPdmPgBh5M3TU6iEUFqdAUXaN5UGMSNTBrpuo1ghAhGgigEAUiV3+7Vn+7Yr9/FsMSvsJ7CBbaUmnvxAhqi5loKTM3IloSbkhgpveRQAdENjX6OiMIyyAPzRouvc++IMfAVJEciVQNAZd0m4O5OSKZNXBc9EZEVK0IAChOUEsmYCACgNB5M9daHz56RJszdjrux227uP7xxwWOtU6PrlZf7QoSnxqJQnFTBms6zTO5A2EDu4oYDSLllPAm5xVv7sqcUsexKWAfeMhGDToRwTkwn4oh8txKIwkxTEqORIZa68V6dTJV0NIPRf36er897+dpWq27aSxTLqsej/NyuFzksT8xub49dL5NAqIjYaCNwIeXPbtSt+n7raFrrVM7dYm9Fu6jIEOtzFwU3BHcQsSqM3Fgt1pz36UGDcBZSBmJxapSJEYLIYYYMohL585jhkBCGRg6AiQid0O2r0lhaAZL9yLqcnBWVWJiRHRHv68yo3vQ4+LlQFow0vYWKUyAC7tqSTIunCJY8lqL9nSPPXJdDktAREyoasS0bNYAwM2bG91fv67qCMDE91opEOLydykzg2OzBkgITOTu916TPyUVtVK6FMfTEZFrLt4qWK15JLT5dPzk1cd5nM4uPuy3T1NISKStgTUPYKrC0ZxqbYsVavHIBWFwqKqIBuoLIcq8mTG4AygwxxieffPDzdWj68+eXz48/1c/+KPv/e5fpWff+Ni7v/7v/zW/fkMf7168Om4v34kPYuhtd7r9s//eX//b/9c/MMVVJ1/87Mu7Q/kAzbXNhyMDIkBKsZiTQ9mfOk5WWlFt43x+fnGcypq2QxwC6HqVoCnUOt4cAslw+fB8tToefvTOrz3Z/f7P3+xuDrvrw+n2w78Utt9+//XuEBN9/Isf/Mav/Yd//MM/6tLZw/Tkox9/iV0v0xQ8at7ll4enQ8i7EU1uPnq9+eDXj7vTsNrMx5dmXuYyVBBv/TqGLlrjxxcPpzLbYfzOgwsl3427EWoj9I5P+8NxmiADQ3BwJODIHz3/TEJsTcvUhiHMY27Vzs43d4d5u+4BmpOzEAC00gjJ3jr3/kdfeD/N+bLyZWZCKlOJXQ8ATMLcmdY8jyF1Tj6XVpg//N6Hzy7s44+mYqe+22zP11HSeJgBDJgO+6mMt9PNS+g3D59uHv767/yt/+of/+Zv/lZPm8uL7Q/+0X/D1HFUfbFKYTjO09nF0wchplOW0/XpMP7kp69fv3ydRGMfWiveat9Jv1qDY4o9Al086sucYwCHMGzW61WfC5lVRFBHImjqp+ORAYUiMas5mJdZEfR0exfSlgUQW4+dsIC7qyLC6bSfZiylfC0V2dwEQ9eJRBlrlo6nsXaxQ9cUrcDspFXjcapT1Z60j5Jbxojvv3Px0VdvlIyXMHl1A3R1U3NDQfE6m2VXQrEuduAIpgVKCMhQMFJpatqO4zTOpUu9qtbiXaJOcp44+LpZm8aRqHXRcEW3NyfpUvN62L3MYwlOFELTgg4GaO4A5Gh+b6hncGSk+/JEc3VnCWbgQMKxtBkZGwCG4ETEsbTqIO4IDrXOwvfc3C7GWmtgz9NRrVoEcw2CUCvM8r1vPXA9vLz78sF7D9n6Vu3W4Qc/+IkdNSREIMrHT375/Gr7SIk+v96/991f1zplPV2dM8yk7PXlNB7HVbfa3e6Op3q9P64vhsc9zp/MP/mv9ac/C9dfeiBzq1oqs4+nY4+4QX283Xz68rPV2WPUqQtDrkAJ3G1/2gFInU0iUkgc4nE6rtM6CDfzy9Xl7W7f9fH8Mp3yFNzOzld1tuNhOlv1Tnh2vr05HIkgRpznsr+eXGm77Uo1dd2cD3k87PY3fVqt+24VI/XUdA4mrVidGyEIB77s6zTiaGC8Hc7ypALBzNvu+Prl3Xw4bDY9kXPf5VzZcD7kNpXETNIfj3PqWBBNoYtJtViZFTwOPXIopoTYSkHUIQz59IYjpMjIGIS2V+cvb96stjEkQ+fswlEC026/J0nTeJd3NaS+ky4SvQ3mAyMicTNraogIBG5ARG5vjULw9RZiQTMKgC07FSJaal/h3jy8dKcgGLnbUvmhpoAYQ3C3IFLNxWWaWs7F1JyIGHJRFkERcwW129ubB2dnABiE56mE0EcZNGcAITQkj33I8+KWpRj6UuvuZr8COl9f0OO+zMd5tvXZ+el4F4dtdWVidJcYl4BCtQrQUjfM09zAkATAzAoQpzCM022S3jku8brEchwLEjZyBScWYySqTo6MIUV05wCk7p6RIIRkzdTMHYS5lpmRVMFcRaI1A2y5TCkkQiHyWkutmaiz0hjRzeYpM5Eh9Glb6syOqtPYdkPcogcmyvOIjF13Nh+yi5nZMR+capChVg8xLCXUBHg8vU4Bm84MSE4MWh26tGrlVNucQrB7MVkxQLax5SO6KxuSjOPUhVBKUW8siRWaVXSVECZvJAiIpzKzJACoaiEEVS2tgJMj1OosPOZTROr7/jQeJUbVVl0BSDguJakYpdYZEbwW4KUrlhAlSNA6mysJmZOZ39zcnA0DB8/l2JMJwtBJoPD81RsiXIWY+iHgOI/zocxD6tj98cX5fqqrSF6amm764cWrGwrcRSll3m5X0zzdrw0c0CEGQcIoHQVuU0WEpt6QSlNoreodI7t7XEVYtEOyECVIcgctZqaEBERMBKau3EBjCszmpu2UczVh0DyXpTnHBZlQLUJ7NNhvvb/aeop2NZboHM+ftO//lh2f37z45JBrqBle/+iPf/vPPxu2q/XVwO+s8ghP6GJ8dbPqvHu27fswGXsrN7c/vRoeqeuwXoNSq23ous506GGwWHJOPTatN7vsjQYGNTU3EQ7suWZrVoErxcaerYHXsxDdSKRjbofpICAGGoWBiBCaKph3KRJHNfXWQoi8SjkXEjLzNKTDfOpWXTf0c+bAbehp6OThNx6c1v2IbUU+7XejNtmu+9VQIZiy7UcA9WmWGkG5vbneXjyNw9oBNKf5eNegzQW9OhlXH7HWJIotQNMgXRSOUS82V9v+DOOmcDuprfq4Wfd5rwB6HvFb765wbW/2NUrBEx5uPDRaoGlr4Q3j2WbNEEsrR6pokPr+MIIlbK25uRIIAiEQQ2nt9SlbEAp9y3q+6mLfH4vOZYYGnAuhd9JFQgUAIiSYi9U+wWaYbmDyMpY8zqpAQE0dWYKjuGZ0r02BgCNi7LTDKbCs17OQt9bM57mowlSxOk6IuboTqHsHaK0CI2BqaC2XApCVydUKRVjPbWwQkUNcre7HA0RhBPA6HQGwjlMddzVPrbW+Xw2rIa0vKG4cRFidmjYtZVS1mguIcEwKDZAJMRBmbVaLa1MHd13yOyLBvWm1qjWrEVEQJhKJcZmcYwgEzoRNW2211uawNCs7ESMhpa4ZuoEbAlDO6r153ksADBE9VS2OiogG3qxUHzf9dkYv5vPxUMdRkBQXrkpATAQ9MUFRt9ktK3gEgQpMyGDVKga7PX0Vh6Ecy1z3ElCNAMI6bfrtpoyUa85z2XSb2DrUjFb7xLnOkEfTWZWGsGlgATAg28IpNmBE1QbuRIxIjATgImxadJ7IJaXBvWWtwkEEEaDqHAnVTNBTiujEzROHUicgroAibG5MpGAAEJjRPXBgQnMHpCDEiK6FEFHiIhcRc23V2j1pymnpSFGh1MA4JCLgbggwetlTODMGWHi8aEzoiA4MjtYUAzRTIUREDlHVnWhB+YDaAvm95wMB4NI42wfDgqBrx6QApTbhidSInXDJwBETKBqiLWcMBaaFwIBuGNUZSMEQQAEUmpPh/cbqLYIDARHZGQwZCcgQaIHDMAksyaBFpXD/mhuupv42XwZIAObo4A3uN/V0f8Q2hyUKZEpECy6agJapjEnMltUamjVzJ2ZEcvfFl730xgKAgYGpL5f6khG6H7F9kQMcHNzd23JDLDVqHIgcmlZbaOkIbobgl9soTFDKvKcYewThGKPluZY+rlptnRC6l6y5qvcRo1Sn3QwbBzNWsM06cS5gpReqbs6caztLvHYzp+qNCIoWQgdwdjc3BjJrxAmqdzGYtlDKSvAIqgCb82GaRnPA5km6Dprp/cES4S35+y3CGvw+gra8N0gESBuCb533Z30AbMRi4FktDp1P7f2r9SlnJACwdZ+mw8TOaoZBqjlK0AUIHcMCa2laRWJpDUCAiMzMPQWeGKe51QLIsFyvvKQEEVzVHVgWZQVUTZjcwXypn2d3R2RwtEUqWlhBSxARnIneRhoJ8R79uViDFhHKzBCpaV1GdXNDBDMTElscRHT/Ai1RxkWAuhefEYjZ3e4ZxbbY3GzJHBGhtrZc3vc8IzdiAkC7LxBYoGf/4wCagzdBqWVq2tA0oJK3529evn79gqi7ePYByyZ0Cd1clckdtbYjesa48gbmCGHNQgBKuGC3AEFMKwA6uqsTijq4OQGD6Xh7tImePHv88T/9H5LpL//44wff+ys+rP7i//R3mVfj6Y6ov3p82aqVfRUJZ48vzh9uP/zt3/nqky91bncvvrTVI2TzOr341Y8H8r4PZhpbgBaOr4tPOlfMAE/effbTf3II635Ynw+bs5Yr9OK1OidXNYP+/J01t/PPviKxSFBujnEdbnajXV1dXZ5dH8vFsycA1d58Ba++fPCdh3/48a9Wz95/8v7TH/7//p8XEnD8+eHzO7u7RncIUaR79OEHN+v1aco4Nq4tbFMLIpueqnZnfPvlvpQDiNcTR7ZS9+t12CS6PuT94ZjNWlVEHPr+dDo+enj1/OVzIXBm0yYI4JBzbe596lYV2QER8lzvIRvE4F/7ifDfyrH3SpHfY4yWSkbDbrVRRweKEtQriTRQJhKW6+MuXJ0VLWuBLz752eF482o6XdizTd91cbCqD87PpKN3njyuxy9e/+JNUfsnf/xvIp/9H/6P/5en734TqHz50Q9uPv6032xZWn6VjvsiAUP64jTd/etxRGzD5oykX68FDPso0EdwJ3TigEB1KtM8JY9thvmkabhcDdvXr6+BV6XU89WmWjPLfb++2Ay1Zh46p/Dg7NHd6+eIjVA250/m08nydLwbcRNMoKmFEAEJOKVuQKpdd/8SXT7Yttv97/7Zb52mN9vHj//ZD39C67NSvTQ8P4vf/62nn//yzS8+nbX62bDdbM/L6VWpOabtl6/eDCFOVjviIC27XZxdqc7zOOe5sqACt6II5f33L67Oz375+ZfQxdP1uFp1V+eX13c7BdrfTlf9o9TTdKoDy2E3p86+8c7Duxv97MtRiM62klJ01xT4bE19pGQrvSvrsLr1U9XGiLxYPoGAkNEDSjMHdQmo1ojIwNWNwIRSqWqmzVs1RXBwXHXRwAkQiQhY3VmituLEx9PYDZJhRLLUD+M4Pnp09eY4HksuJKGLdaw+prvbm5yOT997/8Xzz1er9WbTl/3JqiH7eKh/8M8+J7RNmL/49M2sroU3T7/57D35N3//X3n1q4vLKpLHsnl8VtXs87sVBYmxHk/ttv/BZyfVHh2BDRm69YrQUBceccuzhygI8HA4P4xTLlZP85C4mZFoikKo43zbzM+2nRPkWgBarkeWmsur9cN3kkWUBXu577shtxyAbq/Hvuc3r15x5E6sj3w6tDfP79599nQap+v9837Td8MQurjtEwf57KsvHz44S5QkdShU5iKUCOg4W82FKGSfqLei0/Xt7V/+7T/3L/7FH6ZVp6qq5ugCGDla8/VmOBwOTBZiFyICCfEQqEO0FMNxeiNWUDo3vOg21a4JS+zE2jhPjVYdgEwT+J7ZyUt2LXdT3W4vpikfy9T1nXWrd589+cUPfsRo8+GoZepXw/2HhBOzmBozETPkBdd4H0BDRFs6QxHdfNmoMLE1+9o8tBQvLFs4t6/JhISwANNRkJpbKxWJuiDvPL642e2JHERwbixMyouHVtEDEYOggxA0VZ1mHBJTD5ERHDGUcnCzEJJbsAKCgbta2jQfbuMZrtaM2M1jgaD96izEOE2TztqlrmoxAEYRCAAAyqSupjGGpgWRm05NCZ3IGfxtVa6pSCy1CbOgKTizG5bm6BCZWIu6KhObFmjmoQGANXNCN40RVSGEeDrOZIHQTWfB3hURDEAJgYTNhTmFLkHoJHmbJzfIZQZrgdjMA3UIYmokCCyM7I7NClcRDq0WIVlaicmtmZecAydATHG1K7fYMCBUn4E7IKhgILG0bNrAKEaszcCbO3hrxYAAGak1MzMRKlpdKzQDgJyreUNk1bqsnoOhqTvhXEsgTiFMqkDgrl2KmlupTkiEoUIhJHQJmGqptaAwuAOSsHBpFRGgKYVlPgBkQoDaajPYrs+I7dX+sF31K6HTpLtTJdDt0KPAVNS9BsYitEqBweeprIe+C2HKuaBt1yslODtfC+Ht3U69hY5T7O6vVqZalVM4O1sFxkhICQ/HXMCbupkSupaplllrck0SgwigRAnEBGTQFjwkh1qUXLTMMQVEQkKIAYMCsd8jJxyEgJAAHahLMjCs0W6e75497Aa5DCmdbl4OkdMlXSbRSi8ObsJpOJfvPfEN5UEqJty13cc7uK5td4zYz7fSWmWh1flTCg9keiH5AKdQrk9VcBqPijxsNtKql3nMY+o2ULgPME83qNayJo8JBLAZAAUC8tIYKRn3WupYTp1YSmJm0i3LyRqQsQV2QMdmplWFxSt7a8E5SJrNW+pg2+GT84v3n1llRLV8GOikK965QVqduJ2iGltIA1Fax04VI6NSu7474gxoNL/epeFLfvRuv7qo3Av1Y56az0w1YPBcCpQucBx6wXXO6oHWF+cundJ5iIP7rt+kFKlL3HWCBKHjEXC9Tk/PH7J+hd7anaGhSHKH1ZA2Ka27VCYXhGHozWtTNU3VoWXMU3XAClBzPjsfZCul5xKDqSKQhdCISKiVuWmVcdoiECsw1+ZgXFpziupWCapADTDZXJhmN10q3sFSIKUaWRDA0O/7wiiAxLGBUIgexrHlOuQxz8WMcFZjYQ5OkQWaaWsFKtRcFBFMEKzmXGRSy4RVpBswrqS7j2HWMgJEL7MZtFbqPNV5JGIUQBbkBJyQgjk1LW0aS5ndqqsKk3sDEHMPEu7/ESUxscNc1N3QzWoFWioyDUytVWBSIKCqoIjoRFWZRRSgqdZaXNXdipozgxqziJpJcNClDtoc1YGhwnRjRNkGWa1RPI9zjGvCRhbcjK0RaC211GJiteYQNgufj6ASNGHSXJyJ3dAqGgN70+oO2tyoy/PYYC/nT5XTV+Vm1nqWznxqYm7NEc4CcNRVMyUMTKbliMAsQFIbNgV1YkM0M0YGhGa6JLsRQa0SUnPnZbgEQG9oCkhCcaljEgRhYkTa9JNZ58LA5kiuqQ9uLs6AgDE4kHgVRjZXcCFGWsiBHoRA1cybOjkggTe9h5swmIIzATFHhqbuFQmBTRFiWuHpOtlpJXOJXQVFCaiNQd3JzZQqgBMFtUYg5gUBlbEBdHxfqIZADs0XgpGbmzETBDYVa0Bu4t433xSfyBuokpkso7g6loZGSGToLuroYOCESmIoqg1dySqDkQIam5M6maChuwICIZmZvzUKLVkedCcEILI/MTrdz/f3HGVCYPfWWiUiBF+OQ4Rorsv4DoBujrhk9mGxyZqrmi/LD3BbfF3gyMT3SwUgcCQkv0fPLFxrAMOFK4Pgjsv3CZyY0Fozb3CvnODih3qLVgIHRWBTJWTTxoCubUj48GJ1eb76+UdaCzUnJ1I3ALPmqk4ooNoTVG0uXSEYEaszG9JUgkhwnEprqzR6Y6WqVYh26jmgpFA9gBAYRJJi1DE1a0wmjEzY9/3u5ngV47pL14cTxz5BlLEQkUOLAR5erT59cUJiNycAu19JLkdIAHB8G59phIn5vW13FYiZU0qhHybVu1ZSiMVBWkXTsU7JaaxAFDm7JGFCN28uMQ05j+4gKOSOqOSMagZASKUpBVl84dndgADA1KwpOoArIgMCgYMvWNjlAiI3hQUVtFRJ3/t/zB3wvvhlYVovYKK3PW6+JBAJHZpVRERmcERnuhcEjRbjqoMZ2BJkJETAhX/kZks+ccEb0ZJ/XFRFMyImJDVQNUIGWGL9ikjg5KoAgIsjalE2kZZ82J+SihBxPE0I5K4EtZXSOH/2+Uej8tnDpwxriQMakgGTGAKCLkxPa24YGABI5noixRhTL52jaqsO9x9J6EpC4AjsFAG8hsTUCLqLefLLxxs9na3ee2/97IMy17NhiDGdiA/HYzdspuPUB6JqQfr5oOfvf/sXf/jDAncX71zead9apVLPOBVrQFJPp/1+/rPf+o/evPp8Ph37zXZ/vM0l76733arDM/7iq189/MbTw+ub6Wa3evAkhNhvzsZTnsss66vnH//i9nr69l/4yy/3N/7ZFytZtXFX98cgbff6q/KjA1wf9KuzwEXo9J3vfOcn/9UfrrmfP/3qq3/x48vkfVTzfDwdv/30gxdV7Lz7M99//LN//QOSU8csaf365fPYrY+H0zTtHj54ai0GTnny7TCEEG5vn9/c3IYuTHncDmdzzutVf5wOQrRO/S6fGFmYVml903arYc3CgbQ4WLUkwUEdAQCZcam+XhKwyxu8FDF+DalCZCZJAdYDqTUSb1aLG1lFNxfI+Xg45t/87d97/P535Pbmv//Ff7daxb/wO7+z7p7eXr/ebrZ5vm3jmxcfv3yOctzdidC0n+ZTvnh4ujtNL7/8+ZyPTy8uNhc9CZBEQl5dxi7yfn+8enQ+HaLERQWvrc1IUFstkwGacMBabTYA8uTH06SVSPjTl5+9I792efUkZ0uCjOE4jzHUed6XpiKh840W3k95nDLCvBlWiFTmrGXcnF+yEDM/uXp2KhNKRzHkccdCIvevyXq1vnxyeXd8sZ+fPx3e+a1v9bev8qsDa+jn0/jTf/NLYfzN33j/449OearH8bUgdcPZcYIYOEYRbHkac86xG3SeCJwNtMzcU0i9g2zXqbXpV7/6eOiHuB7muRrii9s7zeoQtsPFbNDQhyQpyOLe+Px5CZ5cmxOebze/+PgNetDShOM06jCE/e244jXYwjNbSsqhahUURnIzQdD7ylpXbcwoKGgA2LSVIDDniZCJqJn5Unbe5jT0qi5C4J6YXHEIQ0Tou+GrL15S14HTPM1MuO76XTUMeHlxMXENV/33vvvu6+u7hxfPXr65JvUUwFXnad5erHLTeSy7Y0kpZYOdRVrBz37+eWsiEL98Ob/77GG/jmOtlM5TKi3Pm8S4knUKt9eI0UPvVXW1WW8uzm6ev9gMQz7chWF1ykdA4kAvbnYBukAQg5yv1m9Ot4/eefDVp68jsqs3olW3LtNBwFbDMJ6OMXbf/e6HP/zJR/1wTlX2R6y5rYbWD4JO13e7zfAIVeZx6oakis2g5PmTzz/vUuIYE2+YaghxrCUADGdnxaEL4sjo5AitFm+NyYZVmOu8O2Z1JXcBPu52x/2+1eDgBgZoBJbrTJw2Z9uial5EgqkzcnB3PZI3VV2tzpkJCJGCas55AoY5F8IAPNBq/fLN3aPuAZhi0NCF5199ueofhHDebOzOH1w9ffbxR7+8fvV5YNjtX5w/fKo+rM7Ovj4WIaLeQ23vgXrLtkzN7jmhy8TrBk5E1Fpd5nsgBAMENNe3T0d08nsepBrfn/MckYmDA4K2PuKDs7R069Sm5miIUciqelMTiutwtCkSKTqnUFpzn7swGLGZShxazSgMxlZq08oCHMM8tcP+JN0Qug44tXJCLa1VIcAUFjm0mVU1gOCmwkRE5saM2kyEtQkAMoepjssh0hxmzcCQ88jYO4Bbqa1ykNZa1SUfjuYM5iziAE0zY1yl9d10pI4cUX0GA6KuOQkblOLIc5uH2KtWA4yS5qkIqtdjnkdhdIKsTm7IOJVCwsBYrTW1QBAplVxTqIECmHfDMI0Ngad5XOIDQn1zC9yfysnsxAwLQXboU2lS89zyKQQmTnNzRMxF1VoKgEKqBmYO3hyYzN2aQhfXh3bLlPKUY5DIKWstWmMIigCERZvWeyh61gaIKSZ1I/LqE1sCl/04boah1QymtU20DCcAy4nZ0RiBwJMka7Utjm0HLTmtuiN4MVhHuTq7KHPuEmXR/VTbbE/OUwIQsNPpFCQAgIR4PJ7mrObOLORELKXy7WG36Vcl25OHjzzcF4kuN0GeS6TASGbQkEANDXfFcoV5yom47xLHxF2EmHJruTWJJCwIltvExM5m6hxY56XajsFNmMGMGgoJkVctzRuxgQmzmBkSdkMYVr0xvTz2T94AxoOn5sdio16/mU9Tq3067U6S5IPffOfyW9ugXL3d7KdVDOmsP34xhdR5Q/GSy4Gxi/3DYs3J1pHqyZpZaxLi+unVKud97uDlNF08unx9k8mKB8a6IO5iAUIhBgRHILDWRBGaq+chSBCRoKELqopkSFKntqBZAaU0pZBCSrEX6kI3BInSAlwNHYUungXsMXalzKZQSzu1xMV83a0Ps06tEcX1KvYpBZNus8mNrwuM6cSrWE6lWrWm8OYFdQyCgTmuNiZSRQCqj3PXPdj0A3jz0IHEKLn3NqAHcDOeDPcu85H2VeZmQVwIheLc+nkO0cFlrXiKq8S8Eg6y5c3DTRdjKa4Fi1JBUe6m+WgAoBaJ1ylaLs0hXnbhLITzFM+2Grjw1DutNoEQ63widYMuhljyCckouHNyjGDip7mNx/k01tymOSv6bK0iOWHiyOgEAOZg7uzA5IRI3lplEYM2K2vDMrfctOTRjZhCJ4hgSRDEAKeGVKvOc5mbGLEHgTaFfIo1teyEsSCGKF/70c205MLCqq3Ms7sBSbPGHIkFkGuz2WZrIFgAEDkGQahemiKYMgPJfcCWQ5KEquiuVGptrWRHmWtpzc09t4Lq6LZkgAANwFtthswWCLk2c1et1UzdWquIbkLUfOaUZjBqrao6spmjghOO080hvzrr3nOJfaJa7tRbFJ6Ou04GAT3NLwpOBoYYrDECIHtttwJgjRtYUziLZ6jFmhMzi5CF1nIf+6PuIFjougbVqzNtia7qPGrzxJuz9FTrfqBzlngz7p1aH5PDYGkAO3qzLoUGWHQJemvVRoBOhEzuamoL2aPMmdHUWq6NkWMamIMhkmZYMgQxbZ99kPf7+e7mootzbtUASIQMWkMEQkYOiMgIpLYgocHAyaOwt3w/g4oQUakzibgCBAIjq00NwUhLJlhsI2KmShY5GHjIt+swzO39SuAIjqStIoOpF6sQ3M2Xcmx3c2jNmyeotQQAQn7rF1kyOATgaGiYvAs6V9DMbEFpKHSmqtFGthJCE7KFJcONnBlpoRk6miC6VWdmNwIc0VTIwBjUzNEQlMic7u1IAERLtAdgGYuNkE0VDJzo7awE7qCm9LZ/yuB+W7achQxsETsBlhcBzZyRze3+P+4N8OuwGKkZLrszBwOlt3VeZm0Z9M0cid0XPxEtB3iiBWJFb9E16o4Oxsyu9xQjJjHTxdXicI+SDMwArhAQvMyVSAIiAqyHVFrtomy7OBaPXbTWRtWTugGVqbi05rkbUoWm5EiioEOgeSoB8a7MELpDmY3xnOPt0eMq1FKYQdW1li6GQ8sVpaCJaUBsbtUMEE+3t917F13XzWNZr4f1yss8Ns2ljt98fPnFq9MilvkiD7nTvdMBYZGWmRRwk+iDi/TBJnXgBSNhLE0pcvCVeYqJXQuWyUi7LhStHVOUZq0YAlNgx9oqArg2jhIc3D0QNkJyslYTSwFIDgTLGwDuRsQArqbCqE1ZCGixCC0uDHJHQl4ycmZvS13u44y+iE0LP2gBny+mM7g/3bzdv94XAxoCNSsL/8GsIfC9AASOxATYtN1zIN6ahuBrY5svOCO89x+ZLZxs5mDuCODgwqJL6Ix4UZqaNri/VJcmvj8tFRku9OxS8zRPx5v9m9vx5Wb1dNM/lpDI2ZomJm/NhFubEZTFwRURXU8GTsBoZA3dO3flENTNtfr9ihnA/H61TGDNkLC5r1bnWkoc4OXt7uGH71w+ubp5/nyecwhdi/reh89ENQCvhsSJ8jx5CN/+d/76z3//hz618fZVvKTj6dSpvfnVLy+N5skkSPbEq4v97b8GXZdiaO4ZdNbto3Q7ncb9RB4hz/tXu2FzRUJg1RmP1c6evP/ONy8+f/537Gx48/w1ztZVKuAiNNayZoZiOtlXP/3hXHZwl9j5+c9/eXE+nJ+fXZw90XzY3RxW56uaT/uXXwxPvolrqnC8eHb16sXpct0xEssQJCdkHq5aa1+8fnGJjw2wFvvFl1/sTuNcigTsYkcs8zwGkqaWQjeOkzdTNSQY6ziX48X5U2bIOK43qzG3XBVgIaPbfZXA8rH1lqG+SLP3xWgOzjy28v6jx4m1G2TBXglh7HtUJyfR+ek63v78J3/wd/+B7d48eedhDOf/5p//aL/7ffEqkV3betWFNLhbbgJdfPBrD+o0eWnhNK/Xa7w4I1NnDF3cvdl7s24Ix2rIWKYZ3FR1mishaM0hcghda16sEcdNWhWcDLQ/6xxEi4UYnj67Gqf55tUvU9d3w2rO48OH57XuhVKZW0xxd/eqqH71xU/efefD995/9uUXn5ydPX38zns3p1PjrjUQxze769Jm4ugADIiIY7tvvam1jsaNV5NdvXjTXt3kWsLzXb18cpVFDztE0+O8b7WlIWSrSF3grrPZKFXD3XgrLKvVmTtoNWChEFfb1VzAvAWS19d3v/frf/YT/eV4OJ3qnuMawco4Q+UUexrCqYzP3n/y4hdfdRCE4e54Mq1n67S57G53h+dfvXz3yaNPPm6Bg+WKZhRSGFBarzQiIKEYGLgbOIEBIS8eSF7OBvdljcvm6r6QkRCRzdkRizYzaq12Kam7gpq7tWaqhBC7eDrdKSYa1pP72Xk8jkeUjkGolTLOrw3Ptum0H//dq28PmP77P/povY4q9sGvPf7spx8Rk2MFoMtH5+9+6+kf/f4vqvnPP/7V/+7/9Df++d/+CkIYs3br+ff+2m9+sP2Nn//g5b/88e3t/gSxPnl2tfmg+/0/+JTX63mc3MWJ3OVwt4sSDsfj1fnZnOsgEumBeZfzLVBTsCTDq3lUSXWybjWQwyqE4z7nejpfd1PVaW6xWx1z+emnn189uJpGF0RlPX+ymcYc1mc6t8sH50Ki5nEVAtE4FQQdNtFiR6lfD/3Ni7s5l9iJpDRs1jc3b8jl9s3Y9QhYifv1Ou0Or4YhevOWZ1fL2eKAZw/OXl+/gapdiuY0Tzl2FLqUS+uDTKq8Xgf3vj+bxxGMun7I7RTDYG6AYoZswnFd8914HNOap2mMYWtErfHV5TtP4oPr178q+Q6Jrh69W/ft5Vefxa4Xh5vXdxerYf/mq35IMiBFCs53t4flLiAiAHNVdKTFC4SECLbUI4IvztzlQnMHRyVCQHLXpUDWrLGE+9PNW/HJ7Z5bhITo6A7CorkSA4MFRnSPIYyTSpDDNA/eEYCEoFpXg8zTGIb+vuzBWs6l5z6lNE6VkUWSIwCjdIkN1SYw64d0mk7rKCTIEQCkTCqIwIJuTYuqMtPY7lby0Gu5Bzq4qc6IZIvBB3BuTVVDiu7qVoHQnVGSQkBnaA0UgNDc2JEhELRIEckca7PWpc4Vqzb3XArHfoVkqMTu1VpF5RjcMFAPIFWN2ady4NjnnLs+hhhrLXVJ5qXYWjFEpmCkhkBAJVfsA4RIkqxUIc9lNGSiUNuUUprr7LkSuhICBfTmkNQMnWqFOZ+SdL2s3DKQoauB1KoxYLPcrAjLhlenOudcY2SkhThz0FbZmImRxGHOrRBTbTWm1Myaaop9c2gIps0chTC3gjFI6AXCOGXhUJoCIDKpNok9eAUCM2ytMi9k68Yo6haY1FurWRDcICXJuZxvN0n8fLv+wz/6ubbmGIDBhTfbAcf9Voa5tG6zur7ZOTFFCSmAYxf5bH1xOJ0YhNwPU8bQucF4PIX7QykEiYgE5lYNEErJeRrLrK4YmIjYEIMIEmYriaTUPDCFxSXURmZGtXks2JpZca/gXmuZ87RarwITCXOMI0/kDEEwsDalIH2XZobbCDmsX5bVRx8f8vi8mdd5KtO42+mb6yOE4ImvnmzPN91AQQH7AI9DV1+PpZ5evfrk8fYqRbx6f1te7DDjvBtFwpy9dp5VJ7O7/WHXF5ztbn/Y5VqgH2/VFAn8MB7d1LERAyJaaWnR8LRBs47FhTuSGFAcTaTV6uBMVrMipUlNg/AqUcTVsO5W27gdcM2UXB2ko1UX/XofcaRSIoIDFWjmczmI0Qpa6D2WQkzUHVtSSH1PLQUKq41mneXRCuqsYwPEse3x1nsf1714t21xMJZaLW0fQFOMg4FbilUPQwjnEhPqphsw9QfUbHA369HQuwgwQvF89Hqw4SK05rP7CaAxpkR9ou152GxD7LoIcnpZJ/NTyYdqjuTgzNAxQwtZXMnO3jt/8P4lrPpj4SC0urSV8PkmuTTHwsDuVl27hBDX0K8A45w951Lq2KaxlezuwlSFsDoAORgiCjiqkbMbQkAODEINlQmjF9FM4CdlAyiE2hEqK4N5GRKxtKXvqqHP1bSlMmJTV/YAtsIkOBRFkhi75ASLzwcAap7VLMYICCFGRaaIYMaMISUQKc0AzGozL4hEcSMpgbm14nVsrc5jIXX0SqjIpCSmTQBbnnCuWqu36q2AFtGK6t6qxG2fVsjSSlZoCw65WTNHJmRAM1smCljAFgBu1d1yqbU1QELHWio48apjkmk6eNiSgyuF2DctzH2pdZzuTuVm8pO2JhgjRUYDzIAaOLrHDBpkg3RZ2i0DIvRqM2gMMNSZQ9qsN/E0fdVsXIF77ddyjkDc92q14ovCN2N+3tqRyc20QAMwCqGXGEg6li4FoAoG6qZIxGymCECACx7I3ZcsNLG4uqOZVwM2wyUe29y12sPtBxL2r3aHVrKWDMwhDexNyJs5uROAAgJRFFFAd2wGSZC8mjc10AYcUyHFTsyQY6iu5rqwflGAArkTgjAEMgWG3Ooq9oPesN7V9qh0SQndWBhBa/DEEMFLa41DMDCS2Lw5eUN1aq1o5AGBAGhJSDECADgpBkFZKzcdTdqJYZWyXoY4ewuRb10VSBFcyJHUWyVC8cWUmaESAqgGMUFgxkwAgObaCEQiYggSuDQHWlDTtBSsAqo7IeliAXHDt3eBqjEvrdK42D8AVJirKqGaLaF8d7B7i4k7ODos6hIBLDIdgC1Em2WuV1piVkQIbKZqSoQLC5KRAZYsPxrC/cHkHsTlfh/3R1+o1254D5YyW36AAI5EEdzVliSXuikHCV3Cqu4O1Wqu1aDr+8h88JbNAXCX7aig6OvEQ4hT8Uch7fJxiMOuVlIIURpZH1BaC4H24BWRrJE7UVf//1T9V7NlW5qeh31mmDnnMttm5rF1zqnq6u4CCq4hgJAoUYGQQhEKBXXLCIVCd/oHutL/4Z1IIaQQGaAsKYkCRJg2QFV3V9WpOi79NstMM8b4jC7mzhKx7zIjc6dZa645x/e97/OIJnYTTx464Ef0GaIHYwNVwxQCYSA/398/+4Ofvc/l3ePUWRIxVRlP02YbP31+89X121+/KcDkxgAOpE/DPAxrUYbRr3P42U3+dEekZdNvumEn2hYCpOBE7JwI1KTrB9bCTMu8xBwM0Y0YA3JENgqABikEBFZvzLGKoFOIEZdKjJlIESPjvBRyLK12fTJVMAN+okTBOsR3B1NAdFBiXtlSDr5qy9xsZQevZ3KkdSzC5opApgpoRGHVTcGTWEYQCZl+PwAi5LUPZmZPSjR6gr2DIxIjgDmqSmAGQDcDJCIyVUIkCrZq3sAQ0QyeyLa+jsBwpRshIiDZCk8h+P1V8HsdoBJaK8d5enzz8pvjfNpefjz016nfuVmdF1Rpa6cgBSImRAQGN/NamzGTK5iCOzBpFQuQVVVbc/An26U9sVHRQKXoIiQt6mTnKRj/8PW3l88+rvOUYne5uRzS5ttf/+rnf/sfnaYZPD5184qJStgN/+g//F+8fvnnp8fT5e5ZPdTuMnNnZEnOYwnh6g9/fFdfYbTudl9i7Putz0vYpdN5zJv++WfP837Yep6ODnF49/rVsFs2z16EoZdTvPnxj69eXB/efffHXz37N7/+xfFXvxj2+dXLb37+D3/61//0t+fjwbcXARPuN4fjcVmoUX59XI5wfbJvu9VhM9d8k/vL/bvHY1fPf/Uv/58IATm0ZZHjQ7fvtPGySMy8yGm/3ZjWbugAZZ4Obkbg1mrkBGAMwBBKHXMMCB5TVK9IWGsdhg2CgUOMoVWZpro0QQJXCcxroRGfPnYAV6T571N86xucyK39+PPnbECQLvY30+G+TOXd6/uHh+PD/eN2cyHVu27ueOCr6/NYTqfT5fXF0G/XoXSMQUtVA86B+ty0vnt3761tNt2Lrz4+P9411VaaAWYNMW8wuSXYbHfn89TUDJ8SCE01pQ2gqlA/dH3Y1VJOy4EJEH0+L9YgpU6sABK4XlxdI1OrzaRMp4PB4glN/eHhfR6G5zcfbQf//ne/dj9utjuwEgLcXG3dA1IUVwWnSkwQAOtSnDCmuF4Fr3+4C35NZJ/cvPjtX/7G0mbPdBXbp1v9binEdH11NY81p7BoDYR1ORvM3TDMIo6RU2cqxnSzuzq8fb/Zbx6Od9tdohmsOqj9jR//MbJzqPtLtJjvxpojAYdAgQnNlss+PL57DQ5TadBaF3IOaZmWzcWw22w/veG7+R6pRdxpwt1mU2ze5HA+HBMjNDPQlSlDwCEkWUriiOBiMlCnVggJCVtrhOgYCFGtDGmrKgrYhwgKFDzFtMxjwE7UMGDs4jK2tsyIFlNA1nEsn392c1cK5nwc76bSTM2rkMnf+xt/+K/++V9YpZSotRpTuHv3zrH8yc9+/l//i9/kTX8az7/69cvYhexQ3x++/j/+Vw9/9hqcWvU+Z7gb/9P/w794+RKqD2vs+d3DfCcnCy4gMbM5dPu9Nx/S5v58evHJj21+iWwp8bTMd+czGocURLTfdOfzY/BuPs/eWr/r7t6d+5isFEh5qXWcdb/J5FBONbiYYGtuUQNhSuH8cGTQzbB9vL9b5hkY+01aav3si4/u7x+vP/n8zfuHpZbEJCGllMda7u8eycBNc+iuts++vfvdi+u9UQ0xG+BxORUTcpxKm6ueTnMZJ/WoTVSFHLRqyJkjdyHMi8SUiXFZZnMi4FOrfepy7E7zgRwdqrXF5JRo2ex6h5oidQHFvai9uOrffvtXOs9dpODp/m7c5vTpZy8eHkY9L+NyTzjfXl/ev7tzh4QaDI6Hhw87NOIQYQ2qMRm4uzIEeOIyPsWKDJ5GSOvyhwgB0MwBnJhE27ozfErUA/gqYkBn4irSxBiAmDhhbd6KdSkgOwcScA5BFABZ3dSRKBITxY6dACsSEsO4HLebXaDkUAHIzJBAUSgOHfH59AAAkbtlrLEDDCGnDIJmvt4414g2IXU8mFQDMW0OhgSmhk4xJNW2yAQIhk/2DV5j3eArrZCRmGP1BTmQi7dqXlVnJHd0R855D4gUkxOkrluzfO4xpe24PIAKsLk3d8a1ZleNOKhCyFEbYAwA1FpDF3XX5uDAiODa6hzzEDg6EDGrusviXswsUl/LYl4Y178qdWmoy1jqIyC0WnO/exwftnHrFC14QyvWAMVkMZCAvaEhGkFUFCeocgLwlNb/ZBIxZqYYmxR0IPR5OjExOAQMDOxm+367LGdGIAAmbqJuLSFoba4KbIi6rmibKKF3IdUygburrfZoenpeeaoDOGDi1FRjjrVWq5IwPDye5zr7y7diwMgJADtWsNfvHhFx09nhsJzbycxjikOMem7AVIO9Xt7mHJ7fXLk5M5/OY3VhDnnzoYBGiABS63FZOKGbLuMYUkeOpi0wuLpKFG119gLqbWmnJQdwljqN6+JXlIFSCNTEmjaukgqnPvQc2lNfxhGdmJmJU2TiLoYUfFIbLgaL9H4+n7496tLK3O7vT9MkOeKwh88+v/jyx5e3Fx3NrWnEQVGayGwX8Pk//gmpk8jv6h1e4n5O9sh9ukhs5+X7hlqzAujMwmiOyqQb6gj7h/kxBGImIFbwukZQQyJyE+XAkWmpknIcWysK6ormKUZpRdxVXcx9u99+8Qx2NFzEDkyEplAgMhEu4twY23IJtTN1w9wnJTzPZwyx765qpebYVEC11SUQOQMPMWkIHiP3MOzDi4Za2+vmzYKBzEvzh8djjf0ieee5c1M36EIgldjliqZmrbUJQp8zYsJ1b0nEQycR01XyU47ikPLD/TJGFFdpELhX1G3G66ttt8HNMAANiVmH2c9TA5uadAAx0LZLaMYd7j/ZHy9T/2KPISDyfteTlc11VlGJ6Byx7wkDQ0DXmPuKQTkaBuXW0AuoIFBOup47xQMSA1DgEJARY44QI8XAkTgGD85AMURGVVnMCWHIMYJZAwoBpVUKqOSCbtKqNsTkim2ydl9MKmXOXYz9MCs7sZi7W2SS9gR3n5eFCJtqjDGmSCFRzGAFXN1dzWOKBFEdai1qjU3JHIgZrbZFa62LtMUIEclCzo6orWJrOFebZ1fJRGCqphyCWU2p73KHGBFijETgYmoIqzncnYgCBwshEqAjMBOpsrpqo993hMFVLcZADi7Lw8NxP1DAVIugZ1Nv5vP0eCovZ32HYYYmTBmfZOW2iVcu1mq96AYRJT1FBwCtcofQ0LeIXYrDaGWZHmo7Vi4vtjej1HO9V8SGhyO/H/UxgGxSH1hAS5tH8ZCcLol9qdl6qs1ViV2kBiBCDhTFHBzcNIVkbiKNaFW92ZAzqMCTQ0C9SVtpO6i/+8v/BwBvXBxRGTFFTht2qe1MIeWUDJjzYCZIBC7goEWioxu05moaYxYXz0SRZapg5FWbuCooPnm5GJ3c0A1dHYj73nQf5dCnOsKERhSyODInN0Vkca4qIUYFQTcCdkBn9hiN5hDYTIkM8YNyyRyA1NUiGpB1nWkhmYPBABjNSN3UMmERoRgUHBzUNaITIwKIOSCRQXQKQGYWEBiDgK0Hbgc1NHclYjZ0ZF3zN/AUZlBwdI3gxB/YjU/PNr5SpdfgJwKt1r7VWI5rW8cB6QlrzcymsorJVRutHjN0XbMpCEBoT7UjcFQDW0nCTGwmT5Ca9fSOCOa+nupXG9ZTpnvljoKZrOkhXI92gO5tPfI7ruEjB7cYorkR8zrm4Bhyl7fb4fpme1Y723SY1ESpZ6E0z/M2RXIR4mlRNt4hjiJKYakmDjFQPzon7Tg2bV0gjHYsCwcCUmRoVtYeA0AA4BjS0qo7IHNIaSmz3r9OxpsUXSDGbK1S7MdatEz//s+//PL24b/+5duZyI1hFarQWr+TT3r+yW3/bL9jryES9kPMvYOLm1ga0m7WBVx0KdIKdQwYRqPN9nKWwn0yIM9xbE2GMLUFA2LDp0MyIkVuIHOZlawBSq0eya3Ny6S20ud9bY6vR2k3NzAmXlNCSLw6HdcX7Yk/9aE65x8iRr6+EwlgpVkTuqO5rpO+lSREHFaqNK3rfJM1BLBOAX1N+Dva+n3WWt4abUM0B3An5idAOiEAqK3QbCdmX9/M7iuaco0yMQc1RSRcl8G4jjf93xkVoUzn8+nx3evjfKht7jZX/fa2ygLLfeSOqbgt01xDQqs5hY0xgTBjErOYogM0W8DNXec6iyjxvNKU3H0dl5rqCrDXagAGDM0aMKWc3efDw8OPfvq3ImN1GciX94dPr79qE8TYYt+d5iVkNrN+u5mAr3761enxl/NSPt5uYxzAkVD2+8taqohtb5/reZLz2PABwYnp7tW3IEuZ/Xq7Xx4X/NSE2qmeerhI6UKnRZciRRrEl9+NwXbTm3dFGiXrIrz7t3/ZxvHh40sT7D/748/+zt/75X/+T6rI1Rdfho9+srn9NNNc3zyWt+9uLy8dzEFwuz/H3W7Y//X/5RcXpPMMx8WcyLTlzY6ZZWyyzKdyt3vxR+pFW3t8e15GOZyOtdXAg2lFWbpuO8q5toIE5JRTKHXZ77rzcYqYhiG1UtAdnJbSiqisbx7G1cC3JtHAfX0j0poP+AAMYwKGmCEtc3z9i7epO7blJMq7609S99mzj75Eou2+B7DltORNljbvrrd1qc3QwKU1dQO1Vgs712quShTy5a4sZXw3am0vrq4kU+jyeD4iOgE6plaAsTtPd5uu7/rNTItK7fvOpJm5uZDFiCGyc0ohBhVQF6SwQv7XCmbkHIddl6y2GvutiRJAHrqltdM0Fkm3z/+IuM3jgoOej3d5d4nIIm7Mudty7GMQdkOkkJJ9aGMO/TAvUMpUyigam9pwnf14ev3ye9dOFniApVQ/n467i+Fit33QQ2stK0txxzqEbiyTir25f/PpzeXLt9+nONwMz1q7c3VU/Fs/+eh//3//j3/y1ReuMPTx/eFUqueYPn1x8/D47vXbu+3F8zxsx3NRM1uUiFtmq+DnNtb2u5fTv/ff+6Pv3/1pF2roYj+QjjYdSodZWdFRhQjZQMBV3WIc1IxYididAFnBZCXqE4uZAzVDVQIDtbIJW5fGHYiUyDFgJmtDQg1hGmvmAJ4udpevH95n0fPDqM2Q9cXz2+MPrzf7DoxCW96+/vb6o5sfvj1sb/rT43FxTTAYDf/qL3891eXFJ5v9xdXL1/P+evBaXgy7b37xQwSeJXgPc2v/1//s3wKm3X44neyTL1+MdTo9zDlmtN1H26tvj+/7HIfQcTAZT9mblrscWk8hYzi1usyVKdXm1es4naLDZ8+uf/jmt5/+5Me//u3v3JG9DZEYK2UcrGNnJqmOGcNxmvttAofpOEZO06EAws31xeHwhpGXqUzejCLjts/+cPeY+9CDprD5/i/fDlfdsN08Pj5e7C52A79/c353+uFiv5/mUuf7nAbOAQNGzPs+3h++fTbcvH57j4gcYp95nGZwS5Hm89R1cYhBvfUhMWNF0ZA4BFN3xcnOMZC7k9N2s3FMZTpgwEUkpTROgl169vGNz1MEzdtutroYfvrF5w/vv+s3ubZSJnv1+nB5nZdFa2sBqMG0225DvH5aG4Ajsaq5KLgToAOaCABCCKuYg2hlCT096biZw1OUEYlVhZBh9eq548rnUyWKTgYGIcSlzkgYCU2AjEGx1eYIKcRlbszsBGvW3ZowBzUXMYRARIEQAx+O913umAOAOKDWBgaEaF6bcb+51LJw9NYqmaJqQITIYj5Lo0BMndrEgbWAwfp0xyqCAIGDiiIEgxACiwp5Q2+iDc05dQCR2gLQMCYzA2AwAgN3wBAVZg5BZTFtIfQh5HGpQxcCORA6NHMUF2IOxgDoBCLGBGqFQwRHDNikhp7QEdxTCmaxLa0uJYfQqqa+S5w6TrUW8lCqdBylauQeAyhF9ZpzWqSoICqIlDK12PVdn4scA3CH0VXO0wPlQUUcGhIiMCvwev6yZa17YCBAQXV0jnE4zQ/IUaURBmJ2IwdTkD5uiy6EoFJRMUQAa4aBmZsZMToCOrWlMtNqewFp6ByJDNFgJTKSqjEHIgBgN80xN2/NRzIHYwIGIA6Jq19uh7nM7TwhI/XdBvTjZ5eXQ//m/iyBDw/T9/en2iQwOdLt1WVCOsNpvxu6rn///pGQxmkCcNB2c7XdXN7+5je/bfXpkFyWkjioCJohBiNM/UZVFTzkLqWwVvoZzdtYSjWQTDCNc5GGZqrABBQtRDAK6783p8RgXmbxCE5a3asigjPgkGOXAiNboyZbBz2F001/qrxlqOeFTV6WUhffNZyh3qp2kYgkRsbI6BXJed9Zr5E5d2G6O9mShvfL4eu76Bs3kNN7LMfEYcJ2rtOzzz5NtxfH8zEAtnmeWinaUkhIIIuaG4hRcgZYd5ciAkwVNFym/mLXX11+8/Wr5Yfjjny77WMfjseDmEAA2ocliptMUvqux0CtFlTkENzQpCb06EghUOAq2u+eLdUsbEC0laIkzAraNKLU0peaKCF1EPpTZ4uWzcX1PNZ6nqyBVpdWMXrzCYR9ORsApX7xQBzIuDIAgYLMpSypjXNKHKSZEaDqbsO+hdYba6Y+Ug9NrBijxC5BP7TeORETBo4b4k7mQpE22/z42DJxmwuQVQTu0LvQP9/5VSdddmUGDjFhrRwAUyLeQNhq6KoZU2LCpg7kWARFZJlVBVEoYlsUeN0/r3UBTyHEiDEypahMlKKzCFZGYlJHbG5ICSk1pwoGbm7qYIimbktpktkCzoosNt83ODvOGslJFl2W2X3WmLGvDoQcEWJ8OheoGHdpdTq1UgOwk6JLqwshhtCBIvAirYosMcYYHdgYiAhEq0ix1kxEkYwoBE4cSFqbz1oXdEEwIsg5ugbVhkNIXfAYuevBAEScHFVBLSCLIYIhIHMgRqQIHBQVdG5SVGqA5GiEiggpDORUazQNtU3L+Yh5G0PUslTBhnhfXp3ljdrZtBCSaVoxe2C5NQKTItWlS8RWDoGzeGM+g51TnKU20xuHYa4zxsDU7pc3JVjB3QLdhDbx6KwEbak1s6KJeknMwVqbTiRWausxBg4qLYdgauhq2hiwiZo5kVIIGAIRE3OtzUTdHEEwsLvHmBTBHGNiqSVRAlWtxkwu1duhWQrQUcgIQgzOjibm3KyhU5cYWynLjETMxIEL0vbFl7uL3ZsfvpbTiFUSUwnQmsTYZSaXyhRpFV2gIaLF7JynViTWkLYLClBwsIABgAxI1TORoxD4yoFyVKU1KyG4OpnIkAIgIQgCsSOIdKEevNN4ETsuR4nckARA2DAa9YGrOa2EIY9gToBEAcFcIBhk44RsDlmsUkSvHx5YmpMREYqF1Tfj6uig5uquBgzqxkgrd/vDl6socXj6qRUlbboyo+GDlAsd3dTNEFFN1rrUOh0A+FAys5WR5OvtQ1XWQhMjEaC5mel6WHNQBCJQMyUEMQFc0de2zorERdcYFoeVWGxmAKTuSGQAus7ACNDBjAjZxQy8NWhKhtEo5KEfNsPVTo/nWtAps7hXs+uLgYMgYRFkSEVb1wRDdA7K5IZL04s8LIIENFa86HvwI8W+quSUQS2CJQhsgG4YsDQJIZZS8iaLQ1mWUJb9xe1I2ppcX1+8KVMeeqE83o0s8qPbq8/++90PD/Nf/3A+FuCYNgi7TD9+vu0d1EW8UcpGEbmvq3g9bxYLsoyEqM3ctBAfLDfBDPQsc0bNm6EW61OigR9GTR1HLFBVkFS8y6wuUhVUdZ7AvdrCfY7uU2nEAaQBoKm46ToZBHziRK+cFzdfuROuBh/O3OCGzh9WjATrEAlXw90KPXAkdlNwW8NBroLrW0LUV4rWWjoDXuflQEgYRSpRQHBzXXNGjOQIbmu2DQ1WRjuICnNYSTQrFFy9rV40ACAK4KYqq8ve1FbgqInABwXa0y3h7v2r8Xw6HA7GFmIfYtfsHDk10eU8BmKDVl1rm4kd1uB96ohyIA6IChrYq1dHdAd3YfDWGgLaqoMicjcCXudV5gpGrjZp9WiOASg9+/wPHsfSSp2XaXN1c1iOz/afI5mj9NvYrHb7zIg//O43f/zVi3/5L/95TzeKRF200OJlf/Ppi9989zLfvMCc3v/ql8HpiUjn8u67rxkUMG93H8Wcl2mcyikFyn2ERaQs8/kedx89HN+Gbrj59LPvfvj+/v4+D4PtL3C73V0/c7y8m/xv/q3/4Pqrz6+/+tfju78SbqOOb+Tt3/rkWaCJ9EDYV2ixT8//4d+R65vlMPMukPfL3IbtJm5BUzdXxampWeCglpADkp7fPvLl9RTGw/IupcCRvOn5PG8u+uN57HMXiOZ5EanDkE0tBDZRa1VLYQIhf5gXBWQOjoZEGFHdcSVcmQEAO7jZKi5K5LcX/eVmeH5z9ezq+bu74tzlbtjuLgwIIACqWnHD42HmmM28PpwYRRK6eZ0LEbhDKcpE/aYT1Yv9xePd22HIz158+nB/J2X2jEyhmEtdUspdFxGoVlVVQtvvdwZWpCpjTEOI0YlFFZ2R4jTfbYdtSL2hd322ZEghhKRSUk5FJq3W2mTiqduag4ENfS9+7hBVYbO54osQIrsVRKC0dMOeyERaiNlMp+OR89CamAGHD2ongNq0tUKKsygl3Oy6X3x7Vu03/QUxyzi3yQQD0bYu9vr1CYH6GAP0HWiTqm2B0hqYEx+ndnP7wiV88/17T84axoK/vD/9T/7D/2i+f/X9r397bst4XJjzsjTCuz7rfru72O1/9/CeA0v1mHoAQMfIjA22qbMGf/4vvv94/1xqK6LzOGqR0KV0cf1490OHxBFFFcBXRKJWH2JYWfoiBdCBcQXwEYQ1bZsoOTpRQPdmSgCiQII5MCHFiM20jBMD5pRnKQ/HBwwW+3iY6nRabrpNGrIAoVgieHx8HPJ2e3Ux//pVJEiZjuP5kxc34Sod7g7i+DCetsMWtXRd+u7t49B1p6nmbpinRgPvboduC99+8+7v/OyzkKfX03g/HW5uN53Bu4f5/aP1KQ1pcOFxGYOaOd0/Hj++SUD45t17jz0S7/quqTw+zv3Vzenx+OvHb57tht/99Xfm5Ai3N5fH92+YIXX51KR5O2vdbC/eT+35s0+X6fHjZzffvvkt9dEInz1/9vbh3TSdd3krTYZu5zEfjmdUDAFToHqaDqfx9sV1CFEAhs025jCWRkQyT3kzAPLH159+/7vve0wBKBKNp/O+687TodQSYkYg4r7v6OHhgTwwpVLgMFbueJESLKSui2E7z2W/uQgwz6XEmBVb5CSRjnMZYvfw/rvNLtel9f1O4gaL3L18I6fz7YsXOrpqejyO/XD5+vWdWNtf7C69Q/RF6nZ/SWK1LTEQfyB2uT/daQABGT/chBgQzE3NidkBPhT8AZmQntZr4A7rFAAREFar6zqvZg5ASERSBYkCEhPkHEurhMCRFZwDoUFKYT6VSHnFUTAHCsHUTBV9DW8bx5i7AYgUlASBPMSEiOjczJZachxi3LQ2MqOYIKBp4dARY+47UTET5qiq5mCyWioCcTStDg7gtS0OgBTIAZkd0R3BsVWNwZjcHWLAIkLsbqJVmBgAKA2mC3EUdYSgSjH2iCxNiCnmAMiiFXkdbxlgJGZmJlKAUJZz6PpWa97aovVpFWTurl3XuVjMDOAEINoA3LQpBkjsLtJKDrmWag6LNAIwKSLQh2E7XB7OR2CnAMfzwbwxo5oz0zKfyQVsJSpyk2oOFJPLjIhd6lLszKUttelkpkg0T7M7ovs8zuyOsBphsLnuuj2JV1m63JXaRJoLeCBDstoQgYCLKDHF1LupNCFG83UJR5GjqIs9MR/VDACHuAPqzSqYlKUSM5qdjydg3277uelpaao6DOpeSmnmNi+zqyUKtWg3hDKVUy2ff3r7eDqcj3O36U/zson5s+cf/fb7X41LeXzzfddH+nA8IKIm+rTuAwgxksOwicvS3IOahxzzMGy2aTqV03RWxxpZBc5FGZyBIoOUwtmQBZkNdW6VdsOxjBUyelD00HVWjJTEkZD32yHpstw/TudSCpzz3e2z3W5IndLD20cwXaoYcFB7ezfdPVQakCCom5sYAg5DRhyneq7QmCUpctXOwCg68e7588vdX/7234TQ39wOLY6H5Ziu3RrBGUTx8vLi+up2evn94ftDCoyI5IhuDWD/yY3nlHfh05ue9sB9Thf9fW73b+6v+ouptF2PzMHAzERqE3JVz8xTUZ9rWwSYPLQuBoUadttN31dXSSIBChEwzygaXJlEGEOcZxdUVro7PrrJpsfc9dd9Z9bBbrdcLPeiaktxIwQCXU6HHKqzSeBAUTk7Dwvy2GYv88aK2bLVUGBgbzFEbONVmPf7hqdl8/mu1Zi7IQ3xLDVw9MVVHiiZmBa3aqEs0icIHjCyh2UTQdEXJ8KshrFP+TrX6E0qEzuCiakCEU7mSEPkLUASiBxDVW/mpMCOAVGluQm36suMyyLLoiKmyoyRWBkwM0VGIk4cus5SJAZkjTEIoIKnGBmDGglIay1pZAQtBdxiMDafi8wOJxFqjUcNVbCpYcuZzOO4lNIMcuZ+QCJtqvx0GRCjSYupA0Q3l9YAwKwCKAAWmw1MVcxaCKlWKD5iQgDR6bGep1Lntog5csqQcjWyuuA81unoooge+45yNgzYRFsDaEiB8kYDk5i7oUFABgAGdAfiJ+h9bdXJwAUR2AydAmZQTDygq5RGFCJlVYHg47wc7t6ltFxvPi7HY23zyONBv9UwIrW6TMx9H0lQ+phVBN0AmblTJQk9dDjVo/pD5up4XvwuDn1bSDV1w/O5HTDxyO/kolqXHg/TrAyRDcyXI/mQMAsmYxKDpBbBgcmY1Onc2v6J17kyXeEpccKkblIrAao2cAwhSKvIQdQQmjkChVUI1aog8YpAceAYgyIY8NX+J9Ppu6k8bDKl3HMcKpg2R+oRNSIsS2uiKSeKHPsugk/Hd2JnpqcKlpiJoxBaxMVrwqioouIxUAgqaOAUO9EFAMCqB0Q0NIgODKSeVosuIjqQ24oADoroMWlZEPhDbsF/r40PFrw4oNbt7hR3M0ryJSwvA50DQjToGhp6ijRXs0CqCICCqIziwIpRY3aKgIHITIqRA7uxOzBZCP0MkYEcDUzWRqOiMSEBKTogq3n4/6ufwM0B0NzRPVAUbasYij6witxdVQjR3QOF1Vdl9gSkB0B18xXci8RETQs9MZKeknJPUBhcS0ArwnjFKIO5Iq0tNkRANXNTQ3e3J2/6+hvN3RHWrgZwIBKtuEZymRR0haNblcgMXR6rzY0T9/PYdtvNl1/E+vU7cYfAWgRUatVzXYZ80UWvhsShMxVtHDoxKyK7nMfaHBlSHMtErq0pAKorOqlIwEyIzNjAZ7UtUwDKTilGHSc533WXewx5GseLwbo+IfWa+PhwXwAeDschycebeP3FhVZNOVutGIkI5qqUe3AI/VZqMfVdCkX9ZC4UL7vQ5skBt5vtvIyvVN5L/dHmmal8NmQMHXsjN2t4rPb88nIa7zFu42bzMLUhB4RWogVOy7b0DP35OC2VxXFljSPU1jhEcwBgVSNauT68DovUn7jQjk+UH/TVemdP4j13AFBTZoZVc4e4Ku0AYM3SIDJ+IFI//QIwcyf68FCkFZHNhZif0vrw1F00UydACk8KdAd3MBMER0QkMjdzWe1mAEAcVNVhnRmhg/PKq3FQ07Ut+e+Mir779ofNsL+6+YxykrY0mdt4NOSceobkvhFUIYphZ0DuwgAOTcxi2KzXagidBvbgYG4i0hyczNVkzZkbMgGgixAiIahIawWstLmiBhLP/a4gD1Kmu7v77/76dP99SH/fvS7nsd9uj++PVzfPpjenP/2n/7vd3/8THlHD7LWmgHdv7yLt372qubseXvxk3+c3v/3r7fb5mXoTG+fTb/78z9L5pMtctPWXl8tctzGdpkN7uFvO4w+/+TefffVTVO63YZ7PD3cPF9uLh7vx9rOPfBi2n38RY55PU+NN7JOVI7fZD6fP/uCPxuObf/A/+h9P3/3q3eO7EGw6nk20Nu+efWJgCpIGvthdlv/mVNVsLGq2udgXfhU2EbzVsx5OjzHYtqOvf/jun/2bv/jyo4/qYiW2UsWAlmW2ZpbA3Ey01Lnf8TQtkVPkKKa5C4fTeTwVdkdEW72Nqr5SLMB1zcs5ZMJNF5/dXjy7vvjs9uqj692L5x9T6FPg/e5qLtrt+mV6BHA0bcdGgP1uYMaqU3+xrbNoqePxEHjb5QEDSpMV0QXsUsp8OoPKw93dIkvC0He9U1ikqOIyH4f+ogkSYgyJCFOXtC21NQOXIqBsCKYOGHeby6Lzli/JyYyaCa3tFbFxejSwKoWQCfqUGvfkxk2XcbyvIYL7ZrOLTOfzbJ6QgjTJ283maj9NC4oOfd70m9N8vr25VYGKDOhNGn6IFR0elyFvRF1mHLbdZpultI676ZFq1ZD5VMYY9jFF8vXBSVPAu/ODe/jk9sX93f3QsbA18XePZRgGbAua95xG0/3H+/vj6/kX0+P7w812R2q3u4RMS8XjoX7680/C/d3xcLbFl8MCgSEFMJAibNqAOqaA+Xhf8kV0FAFmMMrkHb8pd7kf6NzcnRGRoxu4oohi7ByVgIiiag2MCGighqLu7uxuZg0AY2AHRAqcEaG5WbXSAFRZiiGEpc2h4y4Z2IIhM6eQjAmnw2m36czB1IBdq3/7V69yn3fb7uX58W//5JP7+8mBagXu80effvx4N54f7n/20y+GP/joux/ON589u7ncj3/53eef91amb16dv/z7X74+33/99t3nn3wSr4eL6356WUpDBgiMcy1dGKdaLjb7zWY71rNhbHWsteTUi0r1WlW62Blga8BGZeiX4sKNEGudHR09cmClKXXdHvou8ek8qY3DRf/24X3uhpRo7tOwyw8Pb7bdwIwcqIHn3SChfnT5/PvXXz/bf/Ld+0NzSsTuvExydbmZl7nr+xG1322unl0/PhziJmNmrXm//3gpj9M8OpARDLtNW5rKNE3FAbscKeQY0twad5tuG5pqH/o1SLzpEpNIm0BtWWYJrd9eONrmIgaPSS4jsLZzd/mcsh1/eDVsekW72F1VPJjhVGRRNCSPqYimCHWcqpp3/TLrxx99TnR+ePf+wyGZ103XOqFAJ0QjXmfOwIimgsgAvmrO4MnYah+erVYuoyEiE5k5uDMH1VUbASEwEbXaAMhEIroRphjcLRA1kchUtUZIxGQia/U+IK1B2viUy7XI2KV4PE8o2nfRua3m0ACBYgCnpUw5J07c5scYejNybzF1jgbERUPgDsCX6Q4NnECkuANzJgRzXW+SrTVCd1VDRAjiLaATIZC7SWlKeWNtBpWYcm2LSnOkZkBIHAKGhJ7A1M2ZIiChE1hF8tYWBCEISxXmSBhamfuuP7VzvxnMF0eIHCKFIj5PkyG6IBoycU5hXM7eGnMOITayBuIujEQWIqVJC7imyDEwaMvBz/UIsZilzea6tQcLQBljS+bgyA4616VLCdzNRESIE2C36DJAbuJLqUAurTBi4jzZGUJqUM+y3A77cT7PbXFGda4As44p4DjOgSJDRAIxEVdGdl2zYcncAchdkQCJzNFMXZUgrAs6IgJU86bm5CGkIKaEwgTM6AmOp1OkeHu9OR6mMOTTg9y9P74nRFAxBddPPtqfRpnGttsNjHA6t9999zYPMeXu23d3H7+4Gc/tL373m5v9sEwrlJe7Lj2likpjJEKKkZ0op/DRzeXVENXimxOcJrm47babIUA1neRdNYyi7qiesRj4ItnJDalojB5BwWV3sR8uttsMkfDx/RGiLKNGjOYYzZXxjM2IDXqgdJ5sd54vX2yynx7Op6VUUxj6gICJeVna+f742YuLHUzHViarVSumHYZkfYemwSkGKKEcSX0+bh0R0m/fvmlwAU1HqEt3tCvWyzZchqvQl3OLO2rT69ur/g1cHA8VOQz7vL2I/e2u//xZC6DUlnnBkE1dl+X5i8s3QxhrvdxtllY8sJvZUkmBVKy6IOMopijqgdlRuytMqXgPcw8WL1LXteN5dqnaKqiHEDlYK8JmBscqkTgz3DURbvvMILoD9wBlkwivllLBOUgoj0d2Qg+mNaTY6qggELDVKL5EBwwMSB5LHMxjVXRqB1ne+zIOkWsf8r4Pw0Uz6mI0c41kU3Mp0A2jeiMM1UtZgquUWkpRUTQbYnZCjsjIrTpm9FbtLEQRBSAEiIH7tD6NrGZrMVNTZA4c17KIkEsr7TTKw4HmBaZap6UWAQ5iYDGEoYcuBWYKnUX2gEIxcCygRJRyZ+CZhmBNjRJqgqa+hOiuBVTaZH4WrYhNGD0QIShkx47CJmhzW+aACIzrBw44OjzxKaQsTIzIFCJTZkqGwH3PhOSoqlKbNUVoamJCBqXhXAWgPJg2JGAGRvRV3KOtTlOsprUhWOx6ChnTlpBiEIvNpCCxGaKIq5jMrhV89XVBv8kcUi1tnkZwESmuAdyTY2AMzmLitJYvxG0OCKWOD3rATVWD4/x4nu+xFTOrsRQeq07k7hQcowIEqIxkHLR6wC5jVkVoWm1UUoeo5IBZtBJBjCkbRduEvC18Omgdy12xaS7CuGXrlXNA5NKbRYQmPncxpUDdtqdoT3SelCGILS1QBACRFomZ0NYswhoiEnMzMF9jK8CA5OwO5A5sZoSAKVRtHAg8OiOiGszH9jX1MOwud9CP82OV901bn/cpdrUeihYKGPveCImoNicCG5cyzzEyu3MKZWmAVBE4cwhEHlgZMZg5tIYerTbRKN4rRXcjSuAc0VyAPKhGs6giISICMhGCqpkhKccQoouTCa24HVdGMqtokZQXXc5J5dkWrrqyEN2nuHzftxIgZgNsJuSRsVSpGBUjQFNrAZmVoyYWY0AIiGaBQAmdeFEQS1JlSGsLK5ibqro4OjCRmDA6A3IMAOhPxI6nORYA+NOR3hACAiGEpgIAhGvuA/AJJGTA7GDuhsArstg/bM9MlYnhg/pq1c2sW7W1UOgmT2EgM0QMFFagDPhaQHuqNdEHnpGt8UMgJlRTdzNTRAMkc3BDc1QDQgPG3MdSWmBWYgp5u9um6MphPJy6HOcq4hiQUwghMYqqeZEFAyyqrs3RpbUcwtiEGS1IYz6BbrRe5VjVIjOIu2pMaSZozEjgHE9Ve0IXF2ngOKR+OT1u58dPrv64nGQ6Lzl1y1zypttfXCZt7WwzuCxLhJYSal1iSqWhuDlFNQwUInLKsSAbd2omjl1gVjPShkvVufPzl/02g3+2jbma1XEWWT+OYh4K1tT3CElcUrLc2iZRPZ1y5Jub3WHJmRgTvbx/V84Coiq6hs9hXaYi4FNhUM0qIEbOauAIRLRCpVUbIRHyGrpfX8UnvzSgGaC7rLXQNSCGbKYOuhbiiHktBz1NdhzNDdyY0ypBI0I1d31y3iMxIoEjAgEgOSAoghMFVTFt9ERVWhN26L7SuoDAgMgdPrTnmBBdFf5bs6KnUVE3XG8vPrq6uXXGVuZ5OZT5Xq3Oy4SwQHInVMPsHRK4YW2CXDmEUo+7vENwdcshr0ytBoUAFBu0mYgJ2VwZAyGvXGV1J8YYw66PXZL+Ml999aVuOznN5/vX2ZdXv/7Th1e/03KGlLr9Zh6nEMGn0/L99+H7tw/DLzJyGjoPIW8Tvm6x38SWYT8gtuP3v+r7fHNz/f3vziJQHk9y97ALjDFKnQPicZ5vr5+19vJ3v/nNze3OpLz/za8m+3r77JPd80+/+c37L7948f7Nb9EgpfjD+1ef/PjLH/3BF7jL6gbmX/zJ333/yz/78k/+0b/+zfuffvLzf/Gvvv70D66Hjy7kYWoaYsrNuywGdXl89768fRP7zO61LeLni6vb4/Lg7GWaYkqBcCkyPty/evUuQp7mcrHr1azWmobufJ5kKY7IAVsRAABVBgYKQ5/e39/vh0TAYLjru+uLDsy7SNsuJ4YYQ58Dh9TnvN/tLzb71HeXV/vUxXI8hxQUY+5yShkIQuJ5OiMAITOhshPCUk4qGkIWCMN2cy5L5N4VWi0Jk7dKRI4gpYHiZrtnprnuLq9v6ngOMaY0lLJsw0bscpwm4tDnYTyNDiaiTAHRCODq8qIWDQQxbk29ljlEdKAUEoYQPIGbi2NggqBtISIOMWHnaGZCObDlEG7docuxaQkRLy6Gx8dZCbvcq664bmbCVuuMk7QSIiE4k0NAClHb0wA450CJg/s8l77fS+PTyQXBXaWe/4f/s4t3s/3rPz0FuSJom01csAYyIUXPqNHAljrvr3KX+3fvKrg7qJmfTrO4Yzqm7W65n9pMHgI5Xeb87de/vf3ok/ez/uZXry8jtWMjsy5GCRxjaksFhadLWd3ZY04xsCccx8ePnl2eTtNml9xAzNxJWqPM4IZgwZEDuTdQQWQHhSd5uX4ItyMzmbGqm1tMERSQDJGYWESQQBEU2CkycR+5Wp1LkVZNY8pRAxgxuFxs8uFufH65+8f/87/5//6//TPOGPvwh59/YuO0jPPpYSyNFTFmGAJD8v1PLp9fhFev6927ZSp8Xjwme/3tm543p1P65Z+/vbmJn7y4fpyOn/8EZHr8/usDwY4JNjkvS+Ggzz+7Quju7u4vOOgyL7V02w2nkHIIAR1IFd8+vq+mbPTu/v2kcnWzAwcz6YduKUurYIqt+OVuY6V2CG0ZlWKMmPOQuLt/OL58+bqWFvswlVbrAnTe0wWleD4/bHP3/pt354fTxfV+u7sSBVzMDMZpQiBCY9HT/dugOB/ALd5e/+hf/5t/8elnVwh0nkZtmNOgWJxcrDHyduiWKoHjbtvn/da0RueON6WMrpURwZqDhsxiGnhrngJYZ6J17gjr3JDTyx++vriI8zTq7TYu+vLrr2mzLWDbiytmOxyny/328PadixEiRY6BF4S7hzv2wvhUzl+XVOYuohwjMqqAr/fA36/RPngVf88YcANifAICPDlNQN3MlNZqN6KDt1oCR0dsYlVluNq4V23y+92FuSN4jgndnhTBKuGpR8vEjEJNFo4cYwYKKfTT/IhoMeIKvuHQOxESRwtVKgfIsRMRBA4pL9OMIXYxu6CI1DbnvjOzJo2ZWzP2KCLEQcwdkIlrHWPMptURkBgImi6A0EwjRRFRK75uYJ7+4SqqaV0AYS51ITdFM2iRQ5WqBokzsbopExMIAZhplzeuS4jU2qJowDDXEUCRMHepiCLweV6uh4t5mtSlH3auLNBSyN4qGg+bXau1SQkxqIOqQQyOsCyTim5yJLf5cK/L2G82LrL+z8cQW1kCkqm6CxLHiA4gDn23KyKzLm7AMTjqurYwW7RV99B3YRxPCEbAhAmRl6WYuDsxxy5kaTouEwc2hdR1iNiaiCgGrkUj0mq9IH56iQ1UzRHIzWMkQAIwBZMyIsCm66QtRADgFBI4vrs/B+KrbRfEjvfzsMkhxbuHIwOOY5XFLjZ9q3NRSwQcgGMU9T/47JMIYe7L+1Pphx3ZspQ6HWf4cC9gDojUVFyt3+QM8sllf9XF1F8KWrVpv+s/fnE7PbxpHQ19XKr1Q9rsAl2Eh7GcHoqclZS9ORpsY/zkk2d//PO/ka8/u9oA2/z+9Q/fvn3153/+GiWpgrYaKXoiqYghEtlFF7chjY+jdDgWaBy5xzqVTY4d+fl8ejz0L7/5wRYZ2zRD3V7t42Y3gzspyVLODzKdaz3pJTWQ0/xwnpr1RUECwBll9+n2MBxsk70n7iBEl3aIHS3TYpfz5vri9sefXXzc1XpEoqU8qjAwzVMNAW4uBwx2Ad31s4vTd0XVUp/Px8UcpUp5PIfr0JAgBUGpTSInr8ioWMerT/v+dq+0t3h5LEvNaZ4bMl9uL7b9rqNumQ+P5fFEumSbzrW2cwNZ3EfeAFF07XLeX2zjfnsa9TyLja3rNjrXtYfiCoFgLvchhgobZVYXctsk7sFJMGz4YRlb0flxgaWZMKkXL5BK1YBAjKEpREwQrXHFwK2d/KA2exe4qDB6ayoKGVHcxaGq6gKB3LMm9pgMAJjW1E8kTE05p6EhATE4sTNzRKfF2rkuICLTbFNNU8Fpis2QWJwSe+h75YSxQ47A0Yibe8BAFACqSnGyPuZ96BljPS/o0LFzil3gaRyncWqj6AlQIYhxcIps5KnLGDECsUlbQV8AaKqtECWmp3NBIEZiIkoxMsbSSuwHCj3HbCrAGlmqVTRkZCKa69TaKE2tjgCYuhxicKBF1doSMUVixJpCAHQmBmIRAeTggtrQXU2lzk+AphDEGjjGxCEE50ixAw7mrdZWm81LYeJKUBTQyQjmOpGDGlCkw/n+HMuRxpkmYVEHsXsKRogKVWE2M+LkruoOvhCBNjXoUuzqvGSMibzUM1JBFHc07fBJuM5kC9mBWybcO4YgOWqsPmcsyZgaCnXkFLS15S1EYqyZY1BNMbmPHWNAwzUb4gYu7kCOK1EUAaQJEQVmAXNftzIfMMmuAOhgTBnQAUxU2Z6SwC5ObCkE1qpIgDxDEzBt4o4qrYo4KCByJA4cUnZRM8sxQmsmIE0BUMTcgZmZYnUPANGNXCNjswpmbq0PVgst1hfqGn7A4gI5ICCBsyoisru4I9ITK1qBC8eQyK2QBzN3furMIAQyCEYMUTktFLjvoY+EH48PJzq+7sGSUTBsIhU8AKAU4mhQcurO1ciAjANgcBRTck/aAE1VQSlbJ0ruxdFSiHOrQBBzNCMAyzEBmLmbAIJ/OMADrYNUXyuh7u4KCu7g1cyf4DLrwdtsFZ2p6u+VZ2aGwESs61zMFNwRWawREgCtQTlcsTWg6vrhpLz+cWC2kiFZZeUTs5qYu7usT2RPLbd1jLVGiwDAXd2IWdV0jSmhO6pJEYdlkrnYYpoAYiAwS5FP49KIxbwuogv2sTsJDonJ/bTIs+1wnGckiiHY5MU1IIjXy9223Jf95fbd4/1nu0s9z9vcn6cFMC2txUwC3thnwx4AHMTAE2ur9e77bv8ZoxWVEElcy+G47bfbSCPT8Xg0TBijuTYQBMYuEVqtdd9vuhCmJsyAFI+lAKQ+Z/LW1IFCl/fkegH+seMf7Qd5fDVE19YsRo4BVaG1a6rTw696wqgCp7srs/7gWGWpY737XhXOCnUcnSJPCgoiAuTEqGJqaq5EqG6IaODoKCbuikAiumKZHch/P2pEWl8dB0AMjuBgxOt5DRHW6/wpTf2h6YiABLiGyFe3vaxv0ZUB+vvZ01OifzXIIDuCqQAg8VM0ycERec2xEdEKjvYnRo2JNubwwdGG7ipmRORuv+/cPN0S+t3N7vo2dr0Z5CHlGKcQlnKcTLXWZXwb445xMFXOyQFCCKCqIDHEUu+IMYauSHXKCJkxCwAwoCqjIVDwVbTUCIg4gKOYgXl0JiO0/KOf/NwxJMbzwyuO5VRP+8tPW3EAQVKtjfsIRH/x//nPv/rRi02qb+7f3u6vDwdH45df//KnX76IC/3u+79+Fp+9/+avtkN/PLzPPLTj4/nrr3Eqm+ebH75734+Pv/3Xf/bR3/3Dx+NxWZaPbofj25ft8fHl+/Pt55++/vM/O3167G4/jrsotZzeHFm5HefkLSc5vv3d1YvnjINunz//w3+QLr66fDHdfP652D/98m//7PjwrRocz+9vv/w7l598VLxOb99dbrPZgBTYqY1iQfS4TA/TZnezTKeLzdDvtq/eH5xc0WOXUopLbfPcnKBO0ziWiJQCT2V2s9jlt+8Pt/urJlIW3/dDx7B98fwnP9l26XKz3SXyLgZwCpGJYb8b1CnHKLWGkOZiSBBDHK4v0nBRxc0eG/nm8vKSu4e71xhyv7mp50fkCuCI8XS/gKZmi5sGwNzvW1FBFYOYuxgCAJi2tOnVvJbSd12TChyXaibVgBFdmsUQiVDc0tAhohOo1NRv3NZyfBDRnHsrS8qJQ9dlnJeH1pacBhNb87dp6FK4Xebx/ePrgS9zREeorRBhjAkwcMp1AlUkspBzTCmGlAK12tyUKHhgMQfM1szdaisZQwiJny4CEG1YlxCZnMfHdtY5p34ZbbgJX37Jn37x6o+3QR/x618Dd/28nB/LfLt7lpnGx+nV3dep67zUslCrVps28H0OihYSfPbs+dv7+/FwysxxkJ/+zeff/vUbAfnf/m/+l/+n/+K/xM0GoD68PlzeXoz1bEVFGgSTINqWy8025ITupUhpJcTw/Lavi7nWgSkspUN8XOaIPUU2Nzch0cCpCLBjH/siBd2c0BGJyEwRkQDdn37IxKqqDSJFN1Q3IoqBk4HJsmYh1Q3QyXW/3cyP0Ss58jTNrS7qQcXfvnzvj+Zed12UBb7/q28u+t4TvIRz18eoyMEe39xpLbub/a+/mz//+Ms+TZm780MBtd12C5BKs1jiq6+PfQSn+3//P/r3/8k/+Yu5xhRiYncDVVIPATbNw34//MH2xde/+1VgiJnnMm9y1KIpxdN07ve7ksp+t5FqOpPUUmvddPuQ++P5PYaUQ8qRSZeUo3nwkJVT39N+t/nFn/3b3D+LxjhstYkq7HYXtXIrRqhjAZjbvrsBOzKFZrWp5j7lvHU4bPqtVJdSWUNOHJ1OD+9+ezz1HTr7R59ev3v9kryXNte6EFK/6Wtt1Rt1vL3M/UW/YK2GZmFZtIozSd52qcPxvNRaA8Wuv4CKqSOZx+Pp3gCvnt88Ho7Pbq50niil3eWLIPLyr79OS5mnU2gldPlqezsdjhEo9JSH/vHxEUwvry7c7Pj2vNk8PRit6OIVxff0eOdGxIjYRJj5g0zRTXWNf6wqjjU2DfD7JRqC+0o7tlVMQ0RhbRIYAN8dp01OuSN3lFZjCLVIDGGpSusNz3H1MKi6mYcuOBJgjGABw1Sm1pc8dMsYVUqKkTiyqKqqV04DUGRxV08hgpayLEQhxiw6rVjqmAEwq1RH5UD2ZH9DM0MO6ADezDBwRCCC2ErjFEVKYGLO1MwEAVaZYFJpCCFxdK1dSGQ+zWfESoECcpMSQ1JX5gGMoTm6G1SEgKREUM02/UWZZwTsum45HgkCR2RmLQZmQwiCYcgdEQlwxL41y5ymZYyYI6K1YjkAwT4Nb8+PQM4hmNvKElfUZS6ELeWNx74a1EVi6msRZjATxsDAi2oIaKrgirJQ7M7LKafsolIqM4ijqonhdtjVIq3Ito+VMAARGZurg5rNixratkMRiZEcmAlrLQRGRMzIqQNxZjCvosIYOLA7KkDMycVcVczUjBHNNOSABqWIqgWm8VxXDO1cG4I4o1ZBsEXaNNfA3OUI6m72+Hjo+uDkKVNK3WEWKXIB+e35INRijK9evU0hdJH7IYYuP90MwMmBnsB+uLvoYgQCC17324Acv7gZMtnzT56ZnR8fZ5ymy9v8/DpefnIBwV99/+b96/PxUVvhSMmaffXpj3721U9k+2mXMft8s7/cbtLDq/Hde8DE3sntPlzdUh+TZbp7tXhgTpAilVaRvNv2DLjZekDshy5st/Nm897609uxyxhTllkwPJyBMAeaRWvVugjoxDptXYd06BtcEtagtXnM7zZquz5ttuQmhiEj59gEqunw40+Hqz0mOJEsDsE0uLKbNoyA7l6lAhoRbi927341tqAxEwdG9GlevLRMvTLMpJAUOQRK2CT0dvXjq+7FtVD2zQ3EjT6+1zoFnHZd98mzzUV3FeFiLrthGu6P70t5nJsstngIM06jesScqDL59aY/V8PWGLrjeKYQFxJyqipULQWM3qExG64EF20NHCPtfdnC0AVnAzst4FWlNpnVwy7UZddvoM2qqYfs0FcwCilQiovZ3OwIo0gh5ehg2HcJzUIfzxvHfc8E4PO2x5QLBKfQFy0AgWDN42zNMVA0NzBgAIDQwBXR0ct0lsMDjpPNczmfTBoiofu+49ixhMRpyCFVZAqZ3AgBAc0DgbH6kITh2LRFRLcSULsYm9QUg+Z01LJCQoEVCdbzavOOMc7FbVaoYCgYFVkZiEKiD49ERGGVbmQKqiWRe5sBFMARg6up6vqZKVKkuYi4AkqLiMgdQSbvDCAgAlIAQYJmSyBUdZDa5qo8uLnVBdydAEPkjpkZYzRrgQYAiF2mmA3ZgAgptFAWCUAdIhFlkrZMQbfN3BjMFAiKweKnQ1hGmgpMGNlQqquQIqGW0Z2cejWF4B1iZA2cRMRIKloa+jYvwYuhYAzMXsvI4CE4wZatA2hAR21qsmR6dpE/QuyaHlJunclUzzG2tXaEyYGJEWudICSrhq5NanAGUchIxLW2FBIy6tNNyDiQmauZw+q+Xj+H2MzElTmoCEdFIFVAN6InRVfAgG6gFAIzYJ1r8ZJy6LthWYqpiqgDYGBMMFvbho4YqSki9ZmXqsSo4KjMZlKlgcdu2xpENLQGtBA6eWNXJtSAVfvJyTE2NcbgCBQIxJEQnRl4VbuLqZlwCMjgkEA2DrOZkEckciTxNc+GZIG7vQ0vCmEnLXCI+wtKX0F7XcezGyAkUiIrBLiDnBTYDaQw8uLIkNRKYGq2OFWiyBQVkIBadW1CXs1prLOAGdGCFIdNICJGMNNaI7KUCh86RG4rPWpFBcE6EmJa981hRaG7u7kxB3eDD8xgAFR1fwIHq5v6OgMwAyLG8CFsgrgu3AAd7GkV52ule52vISAgESGbmaGtIyr8MMgyX8dPRIRgxk/ic1szkg4QOUoTRBDX3G0ixzIXcjiOy9XuIvVZmzHF3cX2biwImgEzWm1eIfchnudpcVgMHKi2FgCKt6t4kUi4WIZgQAUcQnTDFPm0SMjD5IgUEQyIlIGItJYAgdjPWnaB691d6L5/Fvbz5eUMWk9zK23DcazHLpFvNs0dUEyEWcyaeVWTYbMtpoRUAxCyNe2YnQlQTEuBaMQdZygjO+vcqLXedDAujig1so3jqcv2sdk0PvRpR2odsSyFARJSPcl0ni0iucJcUxe3fafuwNFdVkMXOSEEM0WwECK4E4BZI+I1h4PghGgrY9p1TaHTSq0ycXRE5pVY77R6qMwN9QOZfCX5qK7eGCQCV1vLbMimtkIe/IMrjACBXNXcDQmYEyK4qxmaKQcmDmuk3VQcyM2eJtOwHg3Z7anuxhybtnXw+ERV/2+PimLoc+6YCNSIONIw5BTipUOc8YHcHZr4zIzu1sUhcHB0BzAP6qraUJoRhwgBwFWMYZ2wugOulwMQIq80MAACRAwBc1hTLRe7y3o60Hi+e/kqd/Ttd99+8tU/8AhuhuTdJr599eb48PjbX/7yi598RI3qOO6v9wfBXcft5ffpoz88zGj91bzovMjtx1fnd4+XH13c3b8fDw8OeDjVUuTu7lX3/ofju31/ez2XWdS++4u/OL99PDweT/fvZFoOh0N4/uOytF3u9/ttMYKwuX99bNjaUt5+9xtjp0A1baxPw3ZelkdKen/3Km2H+hFdX/w4fPTx4XQCtXJ4R6nN5+poDmbBMYJMp3ff/zVSq+KhZzGpRSjws+c396/eqcp0nkOgYb8Zp6kVVSuP6FW1ifVK7nizzxdX+1bK5e0zFiUM203fGhJSWVbhKZVqIYe5gAFUUebkGOM2dRnLWELu52V0pIuLi/NSvGrxc5dyWUTBsBEIiBin2O8u1cPSfIBMCAosUJygtkqh49y7gSI5k5six1qVIaY0DNshBXTQWlqTGjkSQKuKxMQYYiJgBtxst9P82F9fEcA0nmKMVzfPXr7+gQEBMKd+u93XZdrki9N09FpKreZ+ffNpCluwxVRLlZiimZWiRUtZmlph89D1zHFZRmBPOc/zSRrEYcMAxHHTDafT47DZ5kDn0+gfLgYMFFNgoj6nVnQ1zi/Vbq/xRz8bxnb3/i8fqW5Jb1yIM1zmoZURLTVzdRz63RaDygIOH396ezgvm67bcTaZ1WavMlWZI4cN/+qbt32fDo/v/8t/9n/+7d2rzeULLza3JbeOI3W7bpf9dDrs95vrq+H1m6NpgioAFDg353d32uWP7u/GXd4O/RC8hKRQHAEDh+amCHOzBjyLK1QCQFBVDxRXMZ472DrMftpXoD35PRGBlmncbjupAsU1G8VQWwOz3CUzi6lnl7JU9TpshmMIu4vrx8e3u2cv/qf/q//1f/Kf/SeR8yblZ8+vhhf5v/nT37qTClip213aXw5Nedgz8XDz8e6P/u6P//xP//rFi6taikg7zufdbgNiKJ1j6Df7//Q//ubb7ylgH4ilFTDsYrdN20Tx8fEQqH0/vVnMcmCI3TI95EzH0/Hi8nK/uzhMxShcXV6/f/dmO7C7yiTkfng8hZC7fmDM3//wzacfvxBFZFaRhL1VffnDPadNSAE4lvl4tdnePx6vd8Pr+ZwjewRF/erzr375l7/5+EcfO5tziCHUKlXmPgeZJlYvYpvd7u2rH7S0i5udNQCNcdPfHw4IvBvyUmoMwdSlCBHs9/2sNrVFRiUehu1OEVw8phgCzKVW1bosASlC8sLzIoxYzcJmY1XP5xJzz7uNmmLRNs1TrTeffRQS5vPGDO7vHsNhyV1CN1Gi6tratByuh81cFyaNvz8kI6m5gpkahScDwmrQDCECgJm4/z5Tva6vDFZa4wctyFOMyJGIXPXpcRcAiaU1AOWAXRcUrBQzVeJAgQkciQjdzJoKMnMIbh6Ym4uKUiBb4UJSY5cAxZExoJtVWQyo67ZWDdy1FUdIeZBSW1MOXUimJuoaQqwqHCKAW8CYu4fH14wcYhQVc1llFjkNp3kOEVXkiVuIuqaDLDCoMBKHUMyIAhG5Q1kmConcTQsScUJjjTG7NrElc6cGSMFAAUzNHGlpC3oyYEM4lxFUHWApC8cEhO4kamZuCio66SEGXkzULAY2h2ZG3AN3aiXlUFqLIZ3mM5qIGHM6H8YuZ2RTrZeb5+M8AxIGD0yVVuu7oWnHYVkKhgym5FTNzItbhQpggjQorKAoBNPAmJm0LrFLRCQVzRlDmEoxw5gSMzUTjvm8zKWUHKI7TLWkFFSBEm1yPk+juzknYiQIosaYaqvARIj29MRDZopuKTESamvmmJkCUcx9kZYyJxNEl3nq0sA7XlzROEXoE3Vx8/27B07x82efffv+W0c6TdPz58+n4zS38sUXH3/3/mVOmSwcT/PmetfEank6Hoi0gEwAEKM2CwYp+nbTqT2RMeooXQBQSYjDJlIerOe56yLH3V6++En65Lb7/tv5+2+KQbx69mx/e9vnrpp0oSdPFLoUhmcvrmeYNzlcX1GfYN91OYXlGvt487DoWW0WVwvrGrHb0NVmd3nRV2TaXfQNzZq4QB5mcDZ6fDydIVqiWCTMFmt/Opb3pVS0VkdFCAFnFd5FZC1RLvotqsHTYYTR0Ra1OXV54BDruopPedG6D3S+r626GMZMVqouhYxuP7v65hfvDIBjpIUNjTguU9sqiklIHYcIE7BBfzGkTy67rz7HeIVKNXcVDDCgNayH26v0bBOu9/uydBhgy21uU/QZc7PqECFF3mw3OW92aDoXEj364tV0hlO1aTk1dKLgriElBuhzF9wJ22SLy5wikxNR3GwvzKlnnjlc7y7rKA+yAHqAJLOcx7d915n2sR/OolPyjjI7I1VzjTEvYkttLEZiQ8J+18WroWy0ELNZxi2zNgutcOAL48hhEO9cV8kxuimSkRkBGSggMFrQpS5HX45gzaBhoBiTqHYhIbqnPqR9iKm5GREwoYKjNdem2gGjw1RqioO2AM4xW2ZEhxRwbrrUWZqao4I6IQZaM5u2go7JEbEuhVMHJmIMAF2MH6alwCEggqmXVgKztlqtsFRWRUqiIq0ygIibWC1FVHylJyMimak0WwDQgRIiQWttDjFO57O5aQN1hSAE2OrswIaQe4uYYwoQ2Glrrg5QEABQwWKKZEYcu7RZlsV8YWtmp4vtMzt3JnORMSUWEeuidvqO7mHDFQshCS0TegEx8C67q6qxI0VLiTZBGziRUdNarRBFcGz1HEJqioJBeVPtMTqBJTVucI5dUJkDDqoH1k2XvljGN7i8xnyufCI4F2fAnFNnAM0txs4oegyYIhIyEbiuiuJAa6QX1YyZ3FRFgEhX8ROQtqZqhKvoKACsYUy11sDXdHxmd3dHc3BQVMQGCClkA0eAUiqHUIqJat8P6g0AutShKziYe2tKiI6gSE3aek4zAifAiBwj2UorXNxanwhA5iLmceG+IQEAY0Bil4ZuDiArXN2MyZFcTQmjqwKzcRaWQGwoaLweXAHMXdCZweZm6jRwl9UTQGEfu+S7265VrpUAWT0+MU28R0SLKiTkFsywGVMxdXYOAFqJE/mmLYplcL5ojLY0BW/ose+928L1Z5BiDEwuej7aMvt4Ull+fyGsoQ8zw1W/ak7A/sSQWR1xa2cfwAGJ/Mk1Tk9ga0JwCyGs32F9cnKDp/I+rIIqpfVtsDKJXB2A1zgZOgKKFAAgZlvpvxRMm7mskBoHVxcAcFB4mmcJIcH6MhIzobqCQ61ts899xqGnw+NBbrc2LznF9cBuboTAq4Gd6NWx3F6GYI6KUxHEoC5DygMGUY8UOiswnhkUXQ1JzVRlESXumxiRi2rKvXur5hzC2IoDO3MFujCA+5c53EPKIzFzjiFFkGmpXbeJ0KkoAHSb3r2N433HVtvSDDBy8WYGaJEhbtPupGMKUcGLOnI/a4sOmZAcGR2ZFlUxIECo3qUNk+8JO9saZqljCNjFKCqBab/dvxwrxbALwIgFcLG1Wl1iBBVlCro65hEQyMzNFZkA2RyZCBBWEoyDM7H7OsZDM1uFdLxq/1bjoisRIgHaSpJFcwMFwg9f8DQvVm3IEZEc14wSEIG6g+pKC0UicCAAlQr0VHx0hDX69KTtC9HXxJw7EJo7haj2NMxaB0y06ibhaRT174yKur4nohACkqODwWobSZv+hjCYk7SplUNAzdHbmrdDNoxmKy8pKBCQKVRwYIKmlcwMmnkJngwMwAhXdKqrtkboiA8P78tS+s2G50kpNtdSTuNkx7d3H32hS21BMG7xNJ9LO3z24lPkVE7t5Q+PycN8ONEtzmX08Th+/Wach+df/Pj++Obyi58RDJvO38Z2+Szxe9xfDkmM1SP7cLUZDw+f/vSTV21+fDMF6R6+f4Bg94e3NC099j/67/y9eXoXEef7IwS8+Xh3eP0Dhs2G4uM3by73e0xJ2+wgXZfu3r158ZM/vP3o59/86RuS7c/+g79xhMvtxbPj29fzVFLCx9dHPeFpOnz58yh9sGBaKwbBEGv10/lxHtvjMr06jVZ1Bq0G01gOs5ZStXlK9DiODo4x3h8e/+Gf/ORnP/38fG43n39aqjCn4/35cDf2KW9udrnfUKBAsTQbhqGWEmPIOZ3H2Q3qOB3ZEjmQAQdkFGt1GknmlNNmfxOS1EWK13636QClKXI05wycUmYANU+bDXEr8xw5xr4HogR936X5NKq23GfnXpcGUA+n02YYuryJMXCgJsLirYmqBQUAbirH8ZBiliKn8Q6RlmXJOe6vbpZpymHjZuPUEONUJaSNSVVrIUWK3TLOCG3oc+w6DjhNc4SoTRj59tmPUuA3r981nS6ud2ObdttLFC7S+jyUpc1tDIw553E6SaOYUpMn683QJTC/2uVvzg/ogd03m/jea/dsePnqO35+cbLNn/wPfvanv3pggoD85SfPvvvhu+bEBAa0jBO7RM6BcwYI6nevz8MmpUDvz2MrpqbH47zzi/N8/vhFv/uUvz+f99sbm87bIfx7//iL3/0wfvPytN2m7SZUS22pj49LjGkfrt89vqKAYo5oEYO4ZYqZ94e72VIEZ7PVwISgThhSYmiYGGurQ8ruvmYYHdwRmzsABF8Fi0C8Oh2xqaFhShmAmsHY2g0lDgyGrS2ByazF685SUynkPJbJwdkVkh65fPPyh//uP/r5Zx8N//z/9W33xc1nX17/F//Vv62++eizZ8t4+Oqnt6e7+5uL7o/++NP/77/89ePDsBvyON9vr64FNuPxWBf9k6/+4N/+5S9C2J5P0izYo1DbhA6LViZKQ5eZpc3JE1HpcljmFnedgTZpz2+vlvMJCWOIj+cJMZjYdD5gLSF3wB62A4fUrJmhLJiD395+4mHTxskrnOX80x/9rfn+r+q0xNR3OZ4eJ9UqsZnY+TTuLi6ur5/95v3L3OHr09sjTnu+pWC5H5ZSoTRE2G1396/eDtur1y+/vv7yk7TZPNTTj3/0xfuHI7sFcG7EmF0cHAkY2XPeVmmvH8aL3eCRcreDhlqKGUJDrdLfXHHwh/NJbf+Tzz4ez49MGGMGXBouOeZ9vhoXswCPD2PHabfvo+l0PvbbrbbabVIMO3GPqQ8x1GVa5sUcU5d1qR2KhZafbXN+orTEnBBHJgQ3IgwxmjQVJXpacv1+lYEO9ORQeIpB4wfOoq0uZTNHXMlHDqiisKroA1PEi4vORCKTqBmSASFiK42QAyGRozdTIXQDR/TAzgGdyYEIyHG997YYglasS4sxuXqgSMRN1bSaEwCJtsh56LdLW2u2oUwTkYfEUg6p317uny/zdDidtjmtoOVEodnShaQiyF7bnLgnc0RXU1NEBzNkCwxqoKs8gjkhECLFEByAYw6pMynWSk793GamZGbEgR0NZmYGTFKg1epMlGJbNMbQpMQ0aKvgRIFzn9ztdFqEHBlRnYlMoev7UjRxNsJiytx13ebxdN5uetDaJxbx1G0aMDgVqYfzGAO3OoMVA0zMhO5WDETUzJZ5CURDpk7gaCa528pSkydSJuImFQVLWYBTMQ3IF8NlWR4O5zkkRg4OjSiCEwIR85DSPJZAOWBsVnNipoDkY5nU1RyR0xrVBsAUc+BO1Q29SUEzVXFIMSYgbGodMgQIkchaJA+ks0wY+pCitfbFx19+8+p3Xdowb4qdglcp8lAO3TYcx/rD45vc9dNclkUOxzGZner8HG5+9PHz+8fz8TRyCu8ej94shaca5tXVDgtS7A+15T73XRciE6kBxC70xmpNyqx1CuT7XdzEeMzw6DKNxyNBXubnQ7i+7qrmt2MqQ8S8TYwxwjRPqtWrVyUd0nwhQ+bdPu5iyJROpzYVWZBGEyFaxgpiEeHq+sX1i83HN4gcGnGMAc+nXYQ+QdqgMUtVH42cFMgw5LDBGes4FikerGdG1s71Iudq0bnLkXvtAcXImTBoheZRg6lai+dzrVg4spuO80kktTnOSwPyhI6SYk9zLemj3D3vy5syl8g5lfPCnI7HcrFA3mYRH2JsUPo+fvzFZ9u//WPLG55jKlXNgJoAp9TH/U1MnTrN9eyBDuPD3M6IFYOHTFu4oHCx2X60H65Tt+sJbWlB2uZwnE+Po4yslWjLUBlD7obqDVMU5sVrq0UJsDZBmcgt7YVbDOpL2TNd7i7jNrxPD2/LD26PAonAhxA9dWNbjFHAERSwdhvKkeSxWS2BY1DISXebNOwZO73d5pPYBvtWqpS2SMJ0WaXjuGuBMUSACC0SomlBCIk2AbEpMossY2pzdZGO1Yxy5KFbrV/cpRwIeagWK7lBMwBnd9D1o5c9O6hAMURpZMZNUMPGHFMXpvNhnmgZI4ekTQ1VEAWIKHVhEyk1qehmRdxM3MhUTaWW1NUmT+cCZkJkA0RiU3OEkKOv8wKpZhKRTAwMAFIgRFRktrwq2MC1qUwRg4mquZg0qcSRiEVcVZ1QW0WVAAoMMYQYCcFJm7sZwGrrMVU3BUShMwtgU3IKFDyEPqdoYdN/clgKu2RNkZjMRymnOkMPtU3GMyCRGjNGCK6UyB1BA5vULCE2YUxaOIQe9OR+4qgOEFJQUS9mToaG7gEyeLdYgRAd0Lj+/7j6s17btnQ9E/qq1noxxpjlKnYZxYk4lU8lV6QAZYpEct7glEBCQvwIfgA/AHGHhBDX3CMhrpBB5AVKZZK2jG3wsR0n4kScHTt2sdaaa1aj6r231r6Ciz73sezLVUxprTH6GL3173vf5wl88lrCm8Dwll9T3n0s/6a/HEp79ODw7IpgHgBp06dEvkZjgcwBVulVABKucSIODvdwICJzD/cKyJKADR1WIGkAw8vjqCEFA3u4mwYx0SqUB0lSm3aZKLQF9HmrpTWPPI5RGfuejEALrfTARIrRgtxd3Y0wkJqaBxgAJE4YHOFm5MQiG0KI2tTOICYXhqNTxiAGZiRGgtbAFAPdpWlb00WJN66BVBHUyErqOLFrw8CIMAAm8kB2R8TcbRNszLnzxZo9U7n3p0T86fbmev+BvSEREgSCa6CH1IzRA1cUFILQRoyODgBCXbMBrOZyyK0/GDsC92wu4MIy8rDNmzFfXPabHbnTPJWnp4XuWj38+1ERYbi/cKxjrYa5WhEUwhV1CO7g4eFNJDsirsgYhNUtRSvCBsxsnfIZAa3zVAxEJAYOoDXc8ULNJoBVbmXOzEQU7hG2OmctGoQR0tpYcjQIACIIdFdEZqIXXrBZMASCqTKTU9T5LPiSDjaECHAMEWrmHuDus7liPpT6yALSZSw5YJvT0XwxyInZjQBnNSQZMTWnTL2Rnww+GTaPh+fLcZinQ88CFqKBrYWwAgEyRRCyh4SMWadb1ud3//ZnP/njp4vbb58/cmbsb0/u2hbX1skQHou6dNcZpepTygKBuxQf9h9cAgXPGM6pGnkwQXQBjDZ2AtNR0ARQCBwxGAxptcrNOg85EcHk1u82qhUgigYgWIbnRTvEjtFChVJotEVVnQVW4DR4EMI6hA035BXLAKs+6kV6Ry+6upW9YO6EBG5MvNYSV8YnE1koOQEgMyOArZkzAIBYi5DuBgjMaUWUhUOAC9G6fiBkNwAI5ESgpo0k4TpoBiJg9xahxEhI7kZMaz7O3cxXxDHxi3/GzS0AiNcu2w/mvr8dFbFkWC9TX/99KMLs1OGmFxbwaaaCZN7cmyOHG4Br2Ci9ADIlRESUtVoAQQAJ0NeRFgAEkJsZOPlK8G4FnASneprrshtxKWVztSmnh1KPr25f3V31vexiaofz8tnnnx3a744PH27/5D8dbt/0l0OrLQuqQdrlQ5le/+jHJnH861/5c4aEddNBj3/9i3918w/++z3A6f399Xb39O5u2HQO5dWby1//5tfyRz9++OoXmx9/cfnFF/zrr7pMNWZ0ubn6vCG5xec//uTu22O5e2fn8s23737/7/7nD/bYBuCbK1FvFnkcuzoJ9G/++Ke7n3356ud/1mUpwzYNV6Ht8OEDwvLp52+efvcLGzP3l/Nii5+baHdx0do+p3Sal1YXAzPy58OpACLx8bzU2oDZ1MAh1lGfpOxE83RJZEuQUTmYeSzRct6MF9vtdns8PS4GjOxLmef2fDqak4NfbAZrbdNtJOcG8enbT4iiaZO+q8t52G62w2AgIjuVE5Kmjruhb6Uycy0GFKnPCNj12dRqmRF9M4wIiEBIpK0cn86h3qJK4rAlVAP4YnsJ4LVOpUzjdqRATtwPg0eoelhIl7sxL8cFonX9QMKffP7F6bCXJLuLS0nQSsu4zix5M+zOx+dNf/W8f1KdNiMv55hOZxF+ePhu3GzQ6fD0/Hx8/tWvTtLh4fH51cXNH/d/thn6w8OH6XQKIdUzych934DcMY031hZO/15kWat3WT4+HViYCBOTRstb+uKLV+n4wR697gvsvn09kPRfVum/eXe/P7dQ2/VbYUbHy+tXdx/fWT0fzqhO16/fhJf5PC+LDn1Wj5vNiAj754cv/mz4n/yXf/x/+T/9268/zP+9P33zy6+/fcs7XaarzWARzx9Prp5zvhi29w8PU7rnDVvzlHMihIZO+epqMx3scDIeCJGY0U1brUlEm6l5QiH3IaWUpLYaABpmGiklsyCELMPSjsy8gmccAhgNKlOotpx6FpAszaqp5SRjTnMDZC++dAkRxcIBoZwXnZeb2+se6h/80Y+fn+4A8LuvjnURge247bzpq+2wPB3Pp3bdDX/9i99d7S6z6KtXu5ubzcf7/Y9//ul8PGyH8RfffyVjX2sUqOW0jD2nhOG2246lqYUpKKjevz9Kpi6P52bmnjOfH/c3n702507yNM/WWhqkGzt3Y+kuLjeOcz3Fw/1TBA6b3hw22/Hw9JwFqIPivhsu6vn+/ulhyBfneWZ2AqNEwCEpByIL3x+eX13uTOc+4PX2mkKjQcW26icE0/F4qFpud+nq1Q1araVc9Vf1oFBVtVaGvtucTtWSO/lcS5/y+TzXqrRlZR6H4dXtZ/fvf8ecJOUGcXt5IYSH+dRn7oar5mDWOiFDVZ21xavhLdZiWjFxn2SXLx6+++5yOwyp3z/sL643darQn1NCwNpqCW/X17v98ent2zd333/YPx+CMCUu0dZPgXkAkJsB4ovIw3wNVLs7ERELIpoprO36NS1E6G4rq8dfuve0Gj0snBADHRkQg5M4wvm8pMRhJkO3VD0u9SqNgAhCYOAR6NCau0N4hLeL3ba0KtRJEkOjzLUoGHgYJUmyi2nvGLUVBBBJFkqc1m1MP25dm61xfkS3yF3vAWotp0GLIVNOfdcXp3BCkU6bJo5SlqrLLu0SD2FBjABODBqWOZNAaTMS8spSJQgF9WpWmbi2ih5eFgYuqiIpSwZMSATmZgUJAhwBmNk9gMQsiDhlgqpEGR1340Zrc1PX5t42m915nsaUU05WLSKS8DIvjJ6IkehcZun7qS4EobUOeVx0DtSltX43LscJediMmzY3fZG2NXAjIRRuS+lyNyvMvtQ2iQgaOTTEqOWRqSMAN+v7EcIAjCj2p4eDHvvUm0dthsjru2WlSOJWllorY1Zors0sOIG6C3G4E3POSdWEBSACrOrZwwhZKDk34SzEqgHkYL60uc+JGQBoWhZEA3OvWqFuUv/9wzdJ6DwdOA0j4bbfCNH98bC53BwWm4u+3W7d9dXl7em8jGM/9v379x8P53PKaTqXq1cXGHaaZ/gBa/35Fzf1uYbaxYW8uhlfb/liENeGzIFOQsXUTmetp9mXbswVa+rscDxOz+6ahfvqbejk1Q3fL83KVI+n4+NBLrmAl3khV3ed5+Wg8/V44ZGashU7HNrzqRTHx3mhnHpJErzrh0+224F4yyNJOkxT3c8dhhIopuNpOtclQ1enttiC12O3E62H5XxWmzI6Awh0DgUsmoYjOWDi7AqAAhlZ2G1pUR2Be6kQRc20+BmsaGk2+6JWIqBjCU5zKUYaQiWm2x9tHu+nwEhdys29WNTaDmVzfeHuXOrY82efv/mjP/+T78mdcrS5aiHInAeXLeI5aDnMCx6fp9OzqTRzrad6PCJljpS7AWIYcbjpLpJ0ORFyr2XxChe97ZP2OQMOZqBFB5CLlAfJiWkyL9YwMGPPqiyIxOYLK22ILm5uk1+Fn2+ur19v8uP9/vDkubuJtNXcgxPqmRBB9+N40cvAm2ydy4YPlcvT1KMk1iGhexlmoMjkCtEpJAzinIm7IAqn9eV3x7aukQJNjZjdw90ChhBMl4TdK1VDEnBY2a1rC8FNwMnaibhFWww0wjJ3CQTAwT0QmKTUCQxF81JgYdCl1Gkuz0teQFpQWE/igIiyTvPBjMwpTGtr6u5zkFB46sdaliFvf8jWGTKmfiRmryWlDnNWa4HU1AHYPFRNTYlIuj4JBTEyhS2uFlbDwc2suc5NtaRucGdMmLhhK1Ubrt5oj4SYEpOwmoUXa+5gAEbI1hTA1wmJGhGmcFRFpOyIkPq7wyMCSaa+ALq6R9OKaH1mJQLu1J0wMokoQkMOdXbhglHFTXAruAVJHoCIhDzXhYgaBjJAMEV0ksU7cHFIItmihhvhgikICrbz1ucRs4N9WJoOG6DhfHq8lM82wmYnBBhbSRG+FGiFiFdoiBAHahJuagQObmErBgXAjIkZWC3IV3HSaguLcCPECFvjRStDR8MQkAiZmBCYe0DjlFKAE8pu5BRLqZRGNXWv2z5rq2ih1pCp7wYz81Zqa7W1Pstc1RA8UXW3puzIRCkMorgvywJH7yj1BIIOaB6MEaAeQhhhgOiR3NcetSElwBbhgKgBlHLrRds+rQO1cAsOQwkIM0ZLXk5LPdcPdw/PH+L4ADMJnrrt38vctaoRgBxISKBWU0CgN3EXUiqBDQgNHYDMO6zp0vT1Zoel/3dHK8AsDC363AtlNBRziDBJ4NCNrMczIjG9xOsiXgY8GEFIZm0VnBPyGqAOd0cjAgjwVTK1jkrDIhyJ1lFbU1v74EACEB6x0q3xBWADL1QZgNVvFe5rRBtpzW/gSkQCeEHVICAhmrVV/bGevNag0grQIUJwQBJAAERiCfTUdxDGHN2Q1NWMAxGZtBmEjQnnEoS4uJ+8DuPlUuaNkJALhaoj4tRaYohYj4W2GbrnaR9Qjapxao6SCAWJyRCQsFnpeQUqBwqQB6JVSEelIfx1qtCeyzeHzfb3r707zd1iksGGsY+lik2ojIjVFAKoyy3C2pLa3HHpBpnaMkUhzOxg7hnCy9xxfTFmcJqWaSPIQgwIRAEUrsLJV7kvOgAFhrkLYV1KDcoswNLAOedWfczJSlttucjktpbDLMjXXqAHePxAHEKECIQf5GXr/n7VAAcg0nrIQaKXhscPtCkPB8f1jXqZIYKvZ+yIF90eAAIxBoKbh0c4keCaJ4IVcUQv18J6bajSitxCjrDAICJbsxHxAtj+YepjEc7M6zI4HFa32npB/gejIkkdQHg0jIhASeweBsohu/66z+NpPtdQEnQgD4Cmgs7ILCkiMPqMfQAB4N+a+cKRkd0U0d2ciGopSNRaYSGdl/vD3f7hY4LbaV+3m/Lw9VcU/HQ6Px3LH/zsH3zz7reboV+mp2//7S9vLm+//81vBWYsCvN0cfXq/uuPf/aP3h7vj2H5XM/ksz1MOtBXH+tP/tH/bPjr715/8pPz1399fn9mxVJhWXxI3cf75zjVp1++r++O5TauPr+l3PoUr193pwf89m++/rN//J9/891vLtSeHw8dtfvH3/30j39+93xojxNurwS70/uvOhB39oaZ+5KCmDafve76/upHrx4/PJ8+vv/2l/96vBhBF3C7uh1/8Zv3Pxp224ub/bfvytlUmyBe7LqP96fD8fzh6blOdQo8z6VUaxaS4kVznIWYsLYqmq1e953kdHVxoQ2JNvePT0WX+/0+3gdhAMjhfN5dDhfbHVM3DBsS6bLcvhoJXWic5jYtxX1hZDBDj0xCjKrtePx4Ph0Yk5k/lec89CnLkFPqc0q5VQwM7rjPAyGY2iox7votSVKc06ZrUcICAfMwchrKfNJSAIGZy6QsCdCyJKQEtkA4o9gCAJ77nPuLw9PpeJhVo8yTiHCFzPni8mqa96H64d1XqrpHZpGynLs0zueptQJQTf3p4Wkc+8s3V91m+8ohDwm/9FbqaV/saZKE1nw7bs2bltOYts8f7ykwpaStYUKnfx8zNSNJydSXVoCyBEv1X/3LX/z9n3Kp06vX+eH54S/+4o/+6//uK86dJqyK26FfWrOlbbebyRbJCIhKkvKwd+wlnZZlu92dT5MvRS43gjam9PV3x3/yf/0X7x9je32pV/L4jefvP857O58wdQmRqmmZi4JJSsfzfrfdThphtgC1XM5xuN2+6sRfvaZS6vOjpiCPkJQinJjdjAnNm6mhrHC0td0DiMjk4aqwBKJZILmZqUffd33uSzmpKnDX3Kq2CJincvnm1dP5+eLyzbh9pbEfuWtLG3Luul5Qtn3/pk/1m9+VvcZS/+Ann9zuXv/r33wHxG767tff5debv/9f/N7X3z7Oj8ex31rmd/d3VxtppwbVvv7FEzvsT0dOdRSZCgwbmpdQdy01J0byMWOobvLF8/6eZWCL4/1ZIwG4S1y/ulSLp9OUuAOAlLmYRpSL3WUEPJ2mN68uynwUSqY2TfW0zNefvH796etf//rXbz65Ph4On99+WdrD1SevosSGfNtvFl+qF3VD1GG3qWGdgGq9eb357pfviEfZYASfpmW72WBGj0AmTB2m1CcUWLZMOXOX4P3Hp2GzaYjH/Sk8nNb1QwSAqSrYp6/eBGmi9PT0gUh63qkXFMwiHx8P4Xpxu212ng/Wy2DNlOHcbBhuHo+njcDSypj7LvMg4lYMqOA8bHul7vrm4un5btz2Q+73+/sAqMu8u7hazhpB3Th0vTx9eOw3LwU0NeeUtHmbl34YmQiIiMjMiQgh3IxIiNlM1zwauPlqNACwNWL9AthDc3PzNQ27NukJwh3K0rRaT1BrxYhpqdshEyICFmuBJEyEAYRmUKvWbBTkTZt7lsG9vEyrwxGRUk6td291KQiWaJdTV+rUTAlT6tiiEMDQbQ/HZxZIIiQRiq6eUI7Tx8uLN1sYl/lEjllkjkUjDJhxaC3CESNadSQgYsS1K25ASEgezbw6AInoYsTSlpmcEcWCmLLwEBZVLXWEAmDgWpDQwkIjDCNgGHamMyAtc+GUtDUBmI6ncPSQpam7g8WYe6jVmMyhVUdEEiEWjEiIEH4sJTG26m7AYeGWOjF1ZBYBjDIVXQ8rzZoFZmG3qqaI2tqBeaeIlEarS3id23lLm6Y159FCCcVdA0JLu7q9dIKE3OdtbYsa+/rfSSgiawqJUDzMXSOgyykQTY1TEqalNUCJ9aVEdl2fj4kImzZ0zUOviyLh8/Fwffn2st8ez/cYAoENHBjHfiCU8FBwa3XstpCwahFJGHgouttunp/PO06BWKZKFjnRxeVFPc+77fB0PnOS5nD56uJ0ngdJXd/l4eVT8Cd/eL08LOfDCdA+fzvebBMlaQbI/W3KtQOd2zw3dweQlCl1YFRKxuNs+4/HYJqzX/QbYX49MC5YHveP/bjjHMlHBuE62TIEXcSQJphLxZyt2WEqc7EWwSSuaiAX24ubm4vNzZASzKeqZeKUBhjaPE1Fz9NimFS9hTY1GnIvQhGN6ynmU0zBBtF8JQwynUEwM3AkUIHQOAPxxOjpiBEdugbNBR5Ooec5U/bqFkyIjBCgqk10YYTzQdOYDMvlJ/k0LqgJeei6tBQVxMPzIb/dcickuL3cXbz55DCd+qGHolqXUJch0zCc5doFlof5/v1v6/F5222ItyS51Wq1AY3VjMh6sR79ApYsabEGmJu6mo9jN9YcMyCQB459vmLOKXpSihUmDadWHDJ3w+Yip2Hsh+xtDunT7vKi3zC0spz7NGx2p/dDazjUPFRUg6yPe3XrkwspcaDYINz1xMZzP/TTomVZWiQiMEcLdw4Zgchzij5z7g2FiZGSOfI6Jgeq0IA9CN3NTAFTYJ+2I10kYY4AIYpiiVg1klkrwEXtDFWPzom4JzFUQwxEEyYDDkwNXxL77HE+nCAMrGwCs+RGjOQJsAfRgF4kr4lRhDprmbSWQG7F9qzqqWPuwuxv8xQMGFYDEgB7JMIMOQMxs4dVeunZSAAQ5yBgQgRATpj7gFG11LlEPXPqMG1BEmEGq1COHE5ma/YhDb0MnfSDpQQWWtS1ATiiARq/9JgwgsLBwVPKnDsTbOhe1bwMELo8d1TKorkfuopVfduiWQvmKt3SHsFasgDtwlNAdVSM2gljVIMlwoAENdxASBrWBQ5jP3rt1hl+ENV2DkyIQwQhZQ/SVoFLhgKxLKUApZtLeGinELsaNtwmj0JUMwV5Ccu5S8CBBJIYPdyU3DElZDB/qSMhsaMDs0cI0RqqcndhYgBbaX8RwuyBFriCSBwDAbMIRnBYGErq5qXIMEzurU+XP3q7EbFFz+/fx0mrN1pvxgEknUNk6Zo1sCCkQFStFcCl67aDRzDSyEhmi+rj6VnzZyDbIFQzCOuoL6sXxykQiMJNDdkiCYPRHOv5CLqV1aIAmjaJz6a2dm7YiWwNr9WzPr87ff3L+zu1vTkam0hUpA96PCKMzmHGaxUPGzNExieYp5EjGyCHmSMFIgAD9mm2L2Tz6bDRvv/tcywuK89XSCI81HSe8m5wbxQUrgQGYBD6t88FtPKJY73cEYACfM0Wrcm3cF/PN+Cw+uoIMV4UH+tzeRARhsRaCgIC9wAPcEJycPeGlFd4KAF4UIQzMiIEuJkBEkCEK9FqNIYA19UQ/ZLsZgwMUAA3h1jNWT+03VamjbmZ6pAlNBy5tLI/n/s+l+YAmFnC5whPgpsuHU71utcrESSGQlM1lO5wPsOFiCAKImZYWjNLXSfMgBiYCMGZKnkwMQ/zoltkkpgAemAkZI2eMcgWCHbZTyHREyxp+eqGh9urz79TODY8G1xyllrGnpbSSGvuOrUqwTeOV7ZAO8VcKg5Tq95vj1ZAug4QSuXoFPRkswJ0QWhGuLpZiMnVXA2EkNzJFD0iDNAJ3MyHPEib0R2YqwID+VLqPLWmXSRYedIegEjIEEGI4KsvDIgSAgboGpJBwoCgl3jOqgYOEV4B8PBDAREBiZhQwqFppfVVfOmoMYSuu/yI8FBCQGRkDm9uQeAQqwyN3RXD3TzcRYKIVq0eIfuazlSlNRW1KtZeImmISK6N6WUyhIhEWJvRvxeg/TAqAnxRz7yE3DBeuniSmIR5E6ojXQ4YFgURQEPbCaKanpX7LvVABrCOvBHckCy8wepdQlArEWbVCNE9TGubSmtluj+8//jh+vUnecjLcpzK8fKz269/9/Gnf+cf/O7j91Otl1cbRSHZDrv0L//Z/0NCn/eTzvPmovZXNxZznUHVPvnpF9mPz199fZz0i7/4H30cr9qba+7O/nC8vO6mfUCfyV0uL47zBNQP3RYT1zz+1W9/m3ux5TzNiwdfbobHr97Fno6n082PXmvsrS1Fp5++/vPffvUbRR5uLub3wpvMCVOHwzDoMU6HU3/ZW+D79/fL01OK1l9dheTf/Pp9WBpuLz7/yU+Pj/ebuhzefw84by/G9999W8IPz0+nw+F8nl3guF8MoEU4g0Gse1dAsoBBOkjtf/mP/9Fn1zcfnme1ZUy7r776ziT9/Mc/+jd/+U//9E///OO778Vos7m8vtq+ef3Zh+8/fPmTnz49Pep5Gvrt8/O9MOSUj8fj1fVtq9NSzbQ0bcVl6HZO9OaLL07HCbRdXl4eToftMLa6IK7TTydC89IPPQD2uRchlE4bYBpx6FpTwUTI/WbbyqIW17tXp/2TaiME4THlXFtNqev6QbvRwkqd61JY+rla1bnfbM20LpBzv724sLpMh9M0f1fK3LSQMGEvkje3V287ufvuu93NRT9202kPThAhQpzxYgdLKeFWWwBJStIJg5kDnU77cA8PrWc0RfCi1m36siyqL3PTeVmGLgkLikJbc5mQID1+7/+myvZq+ITyHPYvfvtxSsmW2lHK3ZiH3rPt96fzGSqiYMoJ3r7+8tvvvjE94pC3my2j9R1DSt2QBGXcXliFZRmTmIL81a/vhbfuerO7eLw7EjnnJG6t+VyxG2R7c7MVBqvW/KQf/8f/2WdMcrqfHj+Un/39L//V//eDmjfDWAWOSItVAUJ86YI0DwwjYjcf+qHVmRAhwLSt5zpCWsO6BlDN3aHvuqIGwZkyg0s/gNdt33WJHp7vEtJ51qVYYqhF05CSxHxa/uqvfv0v3v3qf/O//V/9H/7X/8dlfvPL397z7UUBsK7T7fDXv30szc8V08XN9vXl068XP+jtp9eHfbu5ffXVNw8KdHt5/fjxYZV1YiiFd8xJKGpxxFqWR6cw7lKaTlPmfLHb7M+npS43u+35OEHAxWZ82p8rYtr2TEAkQBHmZabzuanHdjeo41Knp8e7676/7nMH9MnrT6b5BAFvf/zj775/d/Xq9XI4Ta2OY//8uM8s87S0hMG+/3iH7fp4LJ9++Up1IUIMd/eipblr6O764jBN6ow4VD1HNDe/ublprey2m6++f2g6X+bLVuL65vru3ePQdf3Ybftc1Ot0yN1NuBzPre+5F865fzx+9Xf/+M8nexCjOts0+c3l5vl4utpcj7vtw3JETsNmE3WBaIdSHU1T+vLHP//mF99I+LmdQjTMjoe915ZIzsdDB4YoiJxIGHAcB8kvE1NmcZsjwGI9z3hAWDgyIZKFApCDuxoSRNjanV5LyRZGRAHgZkRoqwpLCInM1qTzi4OhG7enw6nbSO7S6TwnEQRwa7W4A2oYkpDQsjTTAJKlqraSPddmFWizGSSrWQEgC8JgzF0CVj+7txpnV045k2M1bbZwEggATLuL22nZN69CqdZz4h4JBrosbSLkPg/T6awIHrGUc5e3LVp161IOVWJZdRZh1QwjgJGZxEyFs3oFCGIgJGyk2oioBSIbULRQIDJVx5KoD+kBG1p0aSyuRAheTUsiaIum3AcEC6oaILMkYhlGmdsM7uiOixPkze7ydDoFOiO0VmoBQJKAMXWneWHuhzyUphBgqu38DG4qgsRaA71ZeDUOhNAWZkPK89K0HpJ0HupaBsw9ZXdlHoopEczLnBIBBKVegcIicdfaVMtp6G+QRLGFOWFoK7VUTomREZwAzF1S4pRqVeqFAJbzxImb1gC43OxU1cwkcazGTzdJdHVxc3Xz2fuP70icE1tTUOs3fVUVmlLiZbGyNJaXwSU7E6CZR4PXb3bXm83u5vIv/+qbxdQDHo/VrITZcb4H9pRo/zQFwthvRkmv316+f3e3fgq2CYYdZpHt1W6T5WLTh2M1d4xLwpmA0vC+1eLemDKwgF0xC5G3qpUWhMVgqe1KNCGW6VwVsL+cNG/6xHFO7GoHrdMFX3ltVaQuFdG6IQe189LwrAI4XG03l5vxalw4LOx8nJzBYjnXeZcxadPWxvFtMyza0tBLFiZoRY/P7aEEdZ1ItaiYgoGZOMt4ZBPihIQQA0hzaCbuQ5dk6Dwkjq14pVY9rIA2EU65L9aAXJu3eR6V0pCNEnTcj/jJT4aHr1qrDCRChODzXFp1Sn0Ja2k4UWINfdo7nBik73rOwhiblBagNmmbcUYHGXjzCQEZXYkfZ41JTxmTSDKsx+UelmfHNDeaJzODo5apnhWbhXKiIfVZp20f1+Omi/Q0U0875esmTL1xttR1As3DKEMwVoJMud8wlLmcY8hqLg3oHLAYOm0xfJe6HjEMJKeBWRImwj0g+sLUAUlzxdRBHhl6xZ4FKbFJRhIEZmBxJErIDKYA6IFG6YX6BcHAibIDYuobIjFlQpDwIK3GHhl0Ws7qDVWzZMKR0FNS1yMRMbg6TDoLpyFnnwo27VTbsrBAmJp6Xh2I1RAxpSQiHqtUMNpibgQOCBFWl1qH4SJGa6W85CnAAMIrnpYDc+Jug4iaO+Ie2TEQvbF49QUC1KtXJVO3ljvKw6ZydtlRvxHLPk8I3IIdJdwA2GpjQBZM0qe+x360JECUGMOX0FDV1Y6AmRjAXCOAE+c0hgcPPXZpdgdxOD/Y/OhaoOOcs7d2KXA1pJbbdcjDbI/ZkFKzRuESCXCrWGdcEjNbYEzV1SMzblLO6JeOrfq7oGjkIqmDjdqERCGp2iIgThHYTBdDTd6E0FCixwJnTO16O1b152lRSqolcZYuz5Vy6qU3FCnNe4DwhuoArtr0hWAVklhrAQx1FyRvBUwJ0YBW2EoguocgEgnGWrgfgrhCEKJ6S4gRUUpxTpS74fq1XF4v19t20Z+b5m3yWufDCVUTRCfkbgYQbjV8fRpsFghBSQAsD7m1JpkdKUpdynHfavBV9SEkAREgcUrAshJuwhpZYzcCCkwVoEKNqIwiTKGuWihREFfsQUaqs0BACwJjd0c/5PZb9l+241NG0EiE6aWmxUeNO8AbouQABuQmHAxmANr12qWghuDojqtCFRlddgFv8jAkqoQ3G71f1CARA2AQIjGQFj08AiESn85LnA5ayt+6nyBWgRQGvjjvLVxBBRndEARhPQJhc1uxhuvzNkR4OK2SWKTVaOZrwzBW3DdBeIuGa9cIVu61qRmQEJC7RdjfwlQJ0YkD0bVFOL5wcIgAbRWrrbRRZjP3AEIydyJxAzNfHw/CIponiFo0UI7TAkBhjoBrxak4qsFhKrucb/uUo2LOSxQ0aIiGPJU29P1RF899JNwvE7umcAaq1ioRdfmsiszeGjUTZgqfEgTlQ2sR2mNWDxcGTOC6NFRniJq9csDWdObL++iOkHoDbo99zilgmc85MEU3LPM1hM9xmenjacKUjFMxc+OE6F6D2/08V2pknBFEGCGEOQBRq2AIcVhjog44LBb1TDAkiY4WACJA9ygrLTSjtub2IpdvFWId7QWAE4LHi6j+h0vlZZD0osB7SQatRfsVBrRaxl5mMiuuwVwhgCiJpAAL9DUg5K7rljXckZCAwMHD1h8kZkAAInRcgz7CiVEM3MJeDGlrxmlN/gOuokQmDkAPYOawl2vMAzBW05+rGqxxxf/IgLZyjJCRnQwAvBhCxJqeSsiSKAkLodfZEnK/6TU683JuzUWAmCT9ra8tMNwquoG1AAsPIQ5gbRO4WtNWSrEGAJ7jq9/8+vd+9qen4xHKkljevf9+c/VmyJ+k3F9sMgsQG3Yx2Xz1yScff/mrJDFc7HjoLHPaIFFzU2/tVMru9nJ+9wH27y3/4fDlz9MwPH749fMJhrFvAtClIrjrgHp+f/fbyebqdJHxbtpTM3B8fHd//Qef3g7bV9eXT+9+dXl9dffdXW7x9Lv3H4ZftVa6YcOUQhfzksdOPzyq5PmwnI+Fcyq1Xb/aHY/vvE4ZFFy64XJ//s19+/7w/HR++vjIUqcyT09TtaW0u6fThHSv8jdPpQLOs6GgewBRMxdhF4wAI1yW5fg0392f2yw4bEJTtbJ5tTXSkx9/9pMfffXtb4THT25uBHjc9Mix3V3O0zN42ezGp6dnhgFVOPHN1evDdD7uH6+vPpFNp+cTYV+rlrYwQddvM21Oh0kbnK2sesWsaUVxRETz8GjELixgjfpNXVrKKeft6fm51WLRgUPqOnOjNF7uZP98aA5jPwQhgB+e7nPfby8v4axuutluXJf5PE9TTanbjH2r036/ZErVaxKUJHnsL3eXh8f9fDos0A5lYsrmsMzKMs6nE4Qzp6cPR0KvZemHvixL6od5mnNO9TjnQXavLznz+bBYm5dl+vD+fasKHYfa/un88mGQnIfueDrkRF3mhIkJM2Ct9Ltv9Q/G1xeXn/z2q7vDh+eOIA3Z3TE0akuEN8MtxeCYz+c9I/3OHiSNP377xf3dV2hhYMUKkiG6nw/LoV3eDh++nivK5jW6kpX5kz958/7XlSQYzNQBEAO3/TDPy6yWeqpEbz4d/86nl9/88v3Dw+nm6s3+ked//vE8LX2/YSStHuHqmphXPWZzY1yDDwCBzQzDiNnU+zS0VpDR/Qf4fQQiqlUKw0B0aJBPS2TUlCnACHw7pu/unrohTYt2aSitAcLusmvvnHIqif/wz//+//O/+2fvy7m/2CrlZb9MU5WOy9wOD52GLaXdfftxOR7P+yg2jwE1+dPTO3Tp8+XUjIeuTqbHBRF2u1EQlqVsxouyFKSaKFf307z04xiuEyyHNu2QU9iCoOruSMjF9Ppqx6bLMqeBbz+9+vqrp4vhouHUZ0rjxfNpOu2LHfXq1WUYP3w8dRsikPPDCTV++uM/+MW/+Vdj7kJ1ReMtZ999saGODtO5b5fnex1/PhaEcGRVFDiUDzfjq9pM0AlpGDZE/Xhx460QAjO24mUpT8+PBL4/HFPXaVkMoAD+/Cc/Aj91Y24ab17/5Ntvfpe3sru+Pj/tv7t73uSr0/Pz0c7Xl5fPzT/99CdlvuMEwn48PjHRUkoaJIKGIakCQsYGd3cn493teAuxbC/9eDxUPb+5/eRpf8qZQ2vqmSjdfzy+erPBLLW+PB6kxMxC4UwIEJKkqoY5EAQ4gCGimTELgIf7GnvFAHeLiKB1gQZIQQHE7O5hRohmioimjaRj5Gbu5tbcm4fFGsIPryS5LqrMK2cRwlaOXvUiXQay/fPH3SYhQSulS4MTOQQgu3vOuS4nVURgZmbaqk5uqu4pp7lMQxr7bjwc73K67btds4KhInSazh0msNhux7LMSQQRrHni8Xx+5CCi9b7PEJQpuRlhNKuM4GZInDDPdQ5XIiR0IgTmLD25OkTO0pq5GqXQ8OYNEKwFCCAyC1adU5ddKxKRiEUDwUB0ayguScj5PM0iAkLMEk4VYgpz95RG0yqRM3elTa4KUR29xDlQz7WknKd5GqSfq4K1UQZXJYTMhIDVERxKM4PYbMbW5ggXSYFggdUXYIFAVF8Fq6WVCGzNlqUIEgtm6c3jBxgikiRt7kCZBSKI03KeGcjQa1NJ4h5dFo/gnLW0pboBBzVrigrN27DZEMA814fzQzh0HXpTZpYkamHqrYW71zLlJKnrJiuYBQOzyGbYHfenccM55W1P7z7eA8TiTknmaY6IRJgTf/n2y4/PH17fXrRq4H6/f2y05PRyIvr0i5vnd+/7bQcc43botpcc2BnORYe5QWg1kQSKaXEbw6EVwfPbjYzx+jSlfWnP9mhs2lriYfv61XB1xZc347jl2Ps8TfB8Wp607QfGRJr6qyS0zAFIpVog95uNVH919ebTV59N/nw+PYkuAgidnLRql/o+YdEORmtmAePFteQBfNbz0coZlkz60guRzKkDgOBoOdV+GKrG0gozkBkqQ6hAztyl8JTpaptPgac2WbOMoTo7W4loi2YeqinlROmFXnOa6nDZV31g6cAtD6m5nbVOD6d0s8FstUwP79/B9gKIKqfNdtOF4vxINYrq/uPd/Yf3ej5uN9ep63j7xhskDe6vlvkcmM+u++InPx8Mk2moGvL54fx8tIdTmaZjLWcBADHMU7eLnKzL3nn3Cd1U2pps9wLR64CVrLO5kUPxk+7v9eothEs4VuQ8JNqXx30wEUGZZjYY8kiFqGNJkPlCmHXZh6rAmgoAVWzYl7Rdcp95qyGVkEUkZQIiTAyC3tARjBL24RAgs1mJSgaMXVDngMjszEiIjBZIFGiB6q7G9STtEet9540qEE5CGaIAW3iohZprYAErXhKSlgXaDLqAEwI0XSzcCYAJuKN+XLkkURf0cENt4I6u2PcZhJrGeakvoAyAVuaqhohuXkw3UTJdhq+sUVKdoHmAc8pIZFp1mb0uoVpbtPPBpcNuBBiiKkXztqAhxKStgJUEISmljlPuAzNAgkBv5u6gihiSkLuBswAhWAtLS21E5GyUCAUCMRGnLD4yoLiOhO5I/RBJbOwcJXaFOqPmscimcinLntjdlyAPCMaOuQV4UyObRVEgOBwY2RGciCrDwoEIpWllySxjVGZE8yXAhJiikXqxCXqqUVoLoHI8HVykG28sYK6LLS3hMAbVWkfJwswhQiLsUFsLI0mw1qBckUlbA+QAZIYVS+zx0i7yQGaB8DXvlxDIawCvdame3cAgD323ObsRpRivu1evz3rOzTYdc8+16yrwBgfBlhI7tOph7hwhDBmZ1dRNPWr4tuuFeCBgXZZl2k/vufssyeWCycARQThx6oJYkCFCHFAjFMwpkDysct9jibBgqFbMKiFBPwZvQCLBPlQHZLNWKB57+DXD3wA+hhWjRJ6AIJDQw90JnwgqJVoUCzIimnJkNcfUC7CFYSAgG0IgO2Zu8WketzkzU0/42S6+fVoO0EdTBWVBRnFbcFZDDZZ6XPA0e60BL6miWPtE7giwHn6AhDBBOCGDg0NEvIR+EBkRzF3NGYMAEGL9HY/wCFx7ay8+dAhY8z6y9vfDHQOIRM2JEJAh1rwQWkQAvuBrkAMowMMN4CXptAKzIXCFg4QbIifuItbynCKBI5IjCVACZAQUcxJJrRUkr7r4qtECSFmGShcc5/P5atxS+Er0JsIIyEmoFkQE4mHM876w48b4QkjWfXOW03L+hIdMvIN01nnTj/NiCXEJAwg1hZSIyVUOzRrLFjFb2HJ83SekZW+V8ng61S1pUk0RrI0oTobj2Pu5uKS5oURitVzPiGQWCDZZtaoYkJFNzRhbqBBh0FwXRGPKBEljATOEcMCZsBG1oEJgrQFCYmKipqjoTrg0K0WHXiCQidcl/FrBg0AihHWqwrAyrJE4wsKd6KUPFhFEtAaLmNe3FQEi3Oglp7YOlyCcwtzBgCQCAwJsDQKRhyEQOAD5i00PaL0yA4xYbIVkB7kbEAfGSrZCoJWZDQ5rQy0iVjLRD6AJcHeMQEI3YGZbLzik/2BUtPb6/OW4vyLcg3C9Y7jawuAdQViTTBSEqhebC4uaqBaA6roOpsPDA8waIAI6MQlxmGGgRQRLa7WVpdYJOIIwov7q3/3ln/7Zfyr9iFzvPrz/63/3m09/5Af/eH17PexeBYB6HD5+zPL68uYnNf6bm4tte9yzEUSOuQVUou7jvUbt26KXF2/vv/7u7V+c0vT0/hd/00s3DijoXj0qdkapzLWcl4mrMTq+vr56GHoUIxkuu6urP/7R5pMRxsvHb7al3/3sj/7sm3/9L+HszUwuoR/4/P3vDvcflbrlpAhcZ+tQiJyd28f7D3/zy/3dL4/7p+nc5mnWcALwBnm8cpA8CBDvD096bkFDGse5LH/1/rspUtdTdii1OaBZgAVBcISQvLq+/ZM//pM//IPf25HXaekutpvLq8ePH9CtQzk/H7rA33/9ZWn4+vrN8enQzt4EyqQ+eiytJb19+wbacPf9Xd8oyRzmSfHp7q7bURYS6iK0I2K3Np1qhJYCkq/ffGo+I8ZybpgGzl2rVaR/3j8TWU6UU9eNHUaOsKWoIe1ejZiReTwe5jC1qvNcEAUQv3//gcC6RJmhHI+nj99paynzw4OFmUZJKZ+Lupt03Erl1OVuU0pQzqVoad6WlvvuPE1aGqdAovn5gBRg6hHuA3OfUsr9tu/74aJVA4lEkBDt7sNv9r/9utRD143z+Wm4uqA8Ru4Ld3m8vn4TAP8fAEiCXuvYd24eFApeizaNsd9Upd/97vB8P9Wqu2220sAjNDzodJrHxDcX116pRBsFzZytZZLHu98xwbkuFqroQXhxfX3cf6M1uABG9Jfp9352+2//8ivV+fHdx4cTWYBqFG1ETIDLcaLQLvMXn1z9v//th7nW7+7981fbPvrkO12efcFPb9788rdPZB1EIAEGEiEyuwcDCYGQtFpRaHVrrKlUXfcI4erOBgHgFmHhLXabTZmP293FI7bZzLR2284CSrP7+yMamSqE930+zpUYmvtxrj+5uqzt/D/9T/+Tf/rP/pufXv/4N1/9wqCBExNsexnH9PC4V11ub/tNlvuPJ0zpy59+km333/7zf9N5vL4aJ/X9+SThUVSYmDFZUBeccdF5aWW37Y6nMyFxBh6DWYBiLHmAcIsAffX2EpoDwaYbpsN0OfCYu2WZnu8eqVppZxaczqXOd69fb4b85uH+LmfGhmNP1NHu8k2bjstU7t69Q6u7bX93/5ESbHZDq1yiTKfjzW7XK8imm3VaoqJDWFtOp6v+piNRQcfW91Ka78/LuN0+fXy+Hq/O+9NSdTtuq5kj19JS7sp+alO7uXq9nM8MlkmGtLt7uhs2XZ9TXebjcY/Irz957bF00s/LQka/++Y3u4t0e3NT2zKOeZ502c/dLuWc6lxqaducMjISZCI/HJdymvTd5cWGFcp5GoGH7QaHNNUlbbqT2jKXN5++OR6e1nuBmxOjV1f1CDTzIEDgl8VXEEQgvfCtgBiR18wr/u1ZidhMwxReTPIAAebGksJUUnJwId8MzJkNwgmGoTN3ACBhCyPGWtU9HEDDM3FzHYaeE0cEItemLAgkDmFuxBxM7pzyAIitNgBc6kkSM7IIqlsEJcmAINJfbF+fzs8p95K6eZq6Lm/HndbKQhFiK1kSqZSZ0XoZ5mnZXu2YaD5PXd+FwWJlSD1KClgnaEnDHUmkR9AAQGQLTRQUoOHCfQIqEdKPGhHGHNCnzTLXCPQwdcx9HxoaTb0Ehs4TqDPHeSmtWakTkhDT+j6wUG2ngDNLdmgsAh61lYyuVRFJwTU8DMIDA8fcU7CiClKYap0RDYGQ0ypoJaROmAi9mdUqDIta05qZk2StzcK7LoOHg1DqPICFhdm9BQRGMw8No5CopWklhGWZU0rmnrsMBopIw0DCQ+LptE+c3BwQmOh0nocN7bYbjWhLuPNSF0EWYA1NWcZ+KHMDQkxEKYEDEHVJyjIP297b4kYWYQqlnDJDLeXb909WzdHqtPRDHjfD2bE2XXEmX737JiwSMUbs90/Xry8QZSovxK6uH6+uLw+H+1LObJL4KuN2iZoTWFiKNIOUUVQSeuuiBk7j7nJMPGyGmypz0zmNhsX2z13ebdLt9tWG+ty8soAn+/7h6dHg8osv8nA9pHSaQwhpgWUunZM371O+oLja8eJPSzsAh7o3s6cP7/NmB1jn4onFfMkio7D5DM2wNQfrh87OitamuuSBOHgjW3DOko6Oz4vNGj2yJnE38AAIJhykHxBrC5HUpzr2nWKNcEzSwB1ZUjYks6i1dSq2KFNLTJg94Oy2WQshRAROh+fzMMe4zUs55k06HMxIcPtaawgRnu/AnvfTsv9wPx2eqy5FbTSnsIQioXNrHKRKJ4uDlQeavrU564L1/Hx49Id5mbRWtLlQ3212u4Q8Cu/G1CVGQgFOQeBGAysss55FSHDcjBeneX6ePhTTOJ0242ZjhvtJp3KaqnlrtZVShUBrFQTghfqeSUwnjw6RtAE4df1mnhfmkbvtmbaeB4QOkR1MMFMwBTElBHTAWI+6YWYO6IAQFgg5gmezAMwIVJo3JQp1R0Pm3FPS8NqKgWEGcK/lCCFZdhHz2eNye8NloXNYK5O2TnbuSS2FcXEgrRyWUAgFwNfwhwYiROJE4tFqnzJmcGQGEo65qpu3ueIPNcxSite2LAURINTaPLbKadRVprN6jPOAwB5upogRFBauS0UAIqN5McWAYFYmcQ1blqRQajUK7pi7tbgCdSnqjgKtFUamQJLMSSRnSqLLFOgpGDGQULqch9EckmNMc+5zRLZq5NragjJf36Y+BwIMJXWzPi/7Z6VGTCSMxr5o040kscZRmbsEmAATCkU4mlmVqJwwdMKUODIoCHQcCRENThZLoIYfGK/IGF2AMvVivjeIczkYnDvoYb5HnTiIPPoetzk2ia2VTX9Bx4iA1moCQAdChtUWii9LOyCM8IhgQg9jRjfHly4JEmBKadU+AFhiTJQBKSWsdXGAUmfcjBXww93T9eXVph+oNj0fYZunj09ucpynuS3bLSOXYbeDFhxo3jK6QDQPh3ACpsa6MLhOZ9VJ+kSpM3ImIkwi/QvPGpmAvS5Qy3rMJEAIbi20NOzMbHZIStYiBCOsMTIFJ0iUolrTFN+K/yrB96mbICGGtJYjwsEsrWRxz3gkPWKlBOKSTJBS0yjBJD25AgOTh0YQOSbyvGn6yeUmd9kIiPx222672J8NCTBAVTEqNIUGNE9AGC3EASHU/IfKzUtLB8JXHcmqh4kwWHVU7msAe9VFxctfI3cVEg8NWG1oAAgr1dHdCNnChNh9/ZIIJCCUsLryiRBwzWL7Gu1AWne6TGyua6B7lV7BChgGNG/4Q1MOEczr+rRv2hDYPQIBmS0MwpPAPC2louPOPAJ9npfwEORE2AFlpKXoyDmam1bsOobomc5LuUCSgNq0YDjnjhMBS7OUiSKizcPl5Yli6GSeANXfpHGZogFcbLq5nYmSNsNQDACFM8gdxnXo28y1NSrQp3oLYdN57HuY50/G8ePD3dVmdPWDWiM2oQJ5bqpAB/SRy1xUqzKR5I4wxbywECAurRWM5o0A24u8rIE6WBMgrTWYG/IC6E2RKdzNHbVxzoHYtIKHNW3NrK2LwPUdVM6ISKbGknwN5sDLn0KsfTEH5BcJ3spqQEIMX4d+AD8gjijAA2CNhgEEM0e8GIfcw9yYsJkSEuAqkF/B2IGMvpbgfrDLmAcgkfCadkJG9JfQ2loRgFWgR+gvEmwLX4Nr4eBhED/U0vDFWvMfjIqitSb8MsQiTEgG7gC2XnOtzh4O3hi5S0OXO5SUIFNyao28LqYOEe5qGuGIREDCKZMEBYYbBgCDq0W1aT4eDg4YpX738fzd17+9enOTcz0dDqnPH7776u31l10/YECt5VmXNhlZ2t+9y8je2pjT8+P56rIf+qEcH93tJz//o999/K6cjmetjx/2qeHxm2822cfrq7vzHfeNgkPyze72Jo+/vv86Sf/2y7cdH6fngj6M25RIPvl7f2f8oz8q41ja8vrP/3SZ7f27d+nq9dVn47EoMXVagvw4les3b+G0+MPj3d23H37z9XT6OB33dSrMkARROt6M4/VgBuNuV83H3eVpntCstrvxaid4+bx//uzV7uevPtUB/vmv//rN7cX0eGhNu6EHSJcX27fXu22f//yPf/94qhhJ52PJIr042Hcfv7cyX2+uxgRHjy7nh+cTGL+fvu+SXFxe5a4r/tAPr3fXbz9+eBdG4c21DhdX++PDzds3RF07TNfbrrbp7v27y93FdrdNfefFxu4Gto0yz7Wa2oq67zZDvxlPzx/3++/HfsgMKKha6uTzPAsJBJC38/1h2F52m2HLLdB5GLosp9MRABP2ZTr1DBLtNJ9ZqMzl4d3D7vrGtE7L8eb2FQAfz4ceuvP+yGnY7lJO/WZ7vT+caittpX2jR/KTnvphGyIowuBRzDw11e/vv/nu6785PR1LLbVOAYpMF7efbsbLi4trof7i9ovbH2+iv1JOstnODgrDwD808wnMmjcnBGC2sAZm5toWbTCmYX+op8XGcUlCS6tDToOMUBon0nCDikyL23bXmfrx+NiPw7DbWJlByYER8Lg/jlcbcuiHrJ7vHqZ/95ffocYwyPffHx0GRImIza6vs0rPKdHbT29/++3vHo/y+jJth+7shcduf0zH+4WSSHTLycnJVZGQCF8ShWGMJOvQ10yYHDwQDWH9RAeBmiFiYKyHEiQgCIiwMF3XKK3N1l69vj493+8u+6raI1LubD4trRk5haVuc9qfk6Svvv/wJ3//1T/5r/7J4f7c+e233371+mZ39zT3m3T1un/ze5dffbW8uXh9//0HGvun0/QXf/HHp9O3//Dv/t67x1fffXMeCZBo3xyYdhddhuUPfvL6229OQzeA0HSuiVPuMk2TkFxsB8y8Px6GnLeAGfBhP3vDm+t+snqezts31wjuLR6P7siXu8H9gTNuNoMQ3T3vQbX5WbXOe+rTMOwuL9/e/tVXv319sfn8Rz867w+7vn8+7cEITBFxDvzTH//o7rd/Mwh+fP4Oh41kEZbjx2esJW2GnHsHjWioDt7Qyrtv7v7s7/7JA9bjealm0vdLUa3WSu2GHEQG4Qm++PHnc3tk7H0io4wZqKNWyul5SkN/cXVxe7W9e3ccuk0no7Zz30XT+Ouvv719dbFlWc4TWZyeDptNjhanw5EtsuRzoes3r/nhUGvt82hFLy8u6lRv+qv9+bnPuQs/HJ87EoLOSq1zffkUgK/IGPcgZFqdJggB4LqaR14Spx6BGI4BQCs3ATFg3VREEIl5Q/VAAAAMCvM1ne0RWaDPgabVfJNyGoQZ5yW6LM0AgSBIAGdz4dR3oursjGqdpDHvtMU49O4FQKzMlAmFETEcBTtgQMJlbtaOIttAYfTwEE6tVSJkkkw5UULJ0COEAZA14y4RSZe7UpdOuiqNyYgYWkzzQo6JcivFIzi4aQMIQ8l5U7QyEqEQgUdRL2gCTG7NFD2ytnBzCnItjCQ513mJgGql7zZuLtwRACAmyW4WIV3eOTZiDNMsfDyUGrihjI6UKGWuZpuuX4q22jpKGj72ozWodXKHlEQEm0FZShJp2joGTtS0uiEQmy4Q4G7oToG1VkZhgAQiXb/MT5yIwMPCWlvD9Na8qaX0Yo2UxKfpnETCddP3VVvO4oGtAIKssrhAIATOUrU1iG23ae00z5Q4+4tRbrm9+Xyq02bYPj/vU6Z+6JeyuId0iZlradVrIqpL80x9381TAwtGDovb1zex1Ot+bM2rA3mcjjPshAHGQZ6WZXEfNrlWOz2fz+cp5y7nFK0BoXQM7olTl1JrWs4V6GWH1g8Xnavw/PHukH2SckiCCjShDZs+97IZUzvrATh7SkoBmLsUjLnr82Ijbj1vGkfd3jiSe7eM9V4/QIEh2XR6etea3n5xcfE2d5c6z37pmLv6NFGLtz/aDa9eJ7GL03fT4/25TQlrKVUXL7MjXFQAEDvH0vWbYdwMue8yP5s2U2hL3+PkZT/vQXHDPIdoGhz7npC06xyTVSkaYcclmLhjZ9SRhMFa9N4NDS0L4OMBawkiyUK2rifJE6NqQnF0TniuBgPTZTe83dm9srOVljkJ2zQ3V5I0bJLsUocVjudKovu2n+nQH7+1cm806Gzbze3kmzMINO9OpysUggibGaFzq4vSMnnMi02Ph+fzdFxKaw8TNYiGHWUh0urbQcDnRg5pQJIsmwQpBS92bzphWMUBWECXpICtVX0uc5TH/UFrcvLFtAVmd50kIwmVxDY2vu4tB3nJAagnXWbQgLnVksJAiWfewHAjScCESHpKAcEYEMZOSARISJHcMMJ9XR4iRg4QXyGjrfp0qtNkSwsPZu7GTeqY0d1KQgfG4urh1CVKiUU97IJ7bBW89oiokIC51SjQqrUaAMlxXVqDoEBUIuwlGXNEWDCGqE2J4XKXnEAcM4Vt+gNZwaZl+mFtAKohLGbFWjHBMh+5VkdhYSAUJtJolZqbAaBVb7ObwopKMQNvEYEMGAokpg10QRdxl7QRThQCtUZUiiweociJKLFQ5pyNwjmFJBk5akM2TolIACVJljBVwwElUzMkCyaIEfKuo7EhCWokhusNfMr8UX0ONugNVAgHyK6EIKbE0jGBrGJoDANTb5CksRFsHElhAo712yyiOTw5OsqGo0OwVT7TYp5cD3ScHLrtJU9GZQJrA/VBma1mPcWUY2refD+dr2LLnKwtSTJBrHwoQgwMhECmBtjUETATubcIFEqIBC/CLHR3ESGRQAQehv7NsT7XqsEjdhkHos9eJ+Lvnk4ZC+9n0poSlcN5+bCfDmckKA3a2ceERA4USNDWrk1gn7u5KqYEhF5KWybGhQcM4UimWgFDJLsIIFHK6GzhCJViJjLkdahCtcr+aJ2hZAaIBB0Cm1XEJt44KogatDO0O8JfQnrHXJ0MTUwlGgJDdO6BQBlgMQuu1C2L1rG7mJvOFJG4RNcSWgCsyQ5KjmLQcfPPUz9mdkYgtrBt7n58639znJE3Bg4WEB5g5BSo4eZqRAzu9gOxKwDcDYEAAn7o/njYypHGH6phAGhW6SVhYi8jG1uxSBwvqR8L93W0FAEQtCY5woLXTBAEAHmsIKQX6jEihTszmkcgeriDE2K4QTgRryTigEAkAogIRgJkcwUkc4u1/uah/jIuMF+JNDRNi6mGRzNIfV/LaQWf1WWmqLvLSwoMxL7LNYCYdTEjrOrDODwU67v+vMyXIsdSKpMSq4fkzWRhkiaJiVwYtCxMPWBUi5w2Bv3WPDfalXaRu3vVBWjmbraYmpq5t6Vn/NFWTqfHzTDW5TRIR0G16RalayehOBh0ic9W95kX1grm6BtCRlWzThDXWQxLde1TnspCw6huaE5Aqhhp8FQIoTkQw9TKrt96c0moZsjiULok4V7UbPW4u+HKoQJGjwAnFgBadXfu4RiIRMgB+JIeQwh3txW/44gkLKtrjpDC1f0FRLVeS0hrFi18baoBEglCEMSL0h5eIEJryVGQXups8PLz8QPpASHAaY0juQOzwMpKj4iXfmJArMkqZKb1GlJToJd59X+MtQ5XxDBvRAL+wm2icLNStYC7W4EAQcxJFElZMAQMkJNAGChGuLdwFwJCJkorBF2IISC8NbfqRJxVbSmnx6d33kCIAHclmgJJlz/cfbO5vG61e/2jH5mznS11uBl6IJSh7+q02fU07cfdztUvX92cTycJJ/anx69RzjAkIh4cHp4e+zdvMKaPxz334QPm16MqVjg/PmM1fj4epOvzpmPufvSf/Q8vLq+6iyvtemBJaOlivL39vW9/9Tt2o7GLoSuqr19dPT7c/82/+aDW3n37mw/f/vXx7oPkTmvbXmy6TQ/YSyLT5kBGXmsRYRChBnMpKDm8Hs4LE2so5tys3N9//fufXv3k9/6TZa699MtcIGVbfLsZXJdpXt5/+OgN3GUcBxQqVQdJ2Thwk7y3udQKIDqXqe+2LbTrEgm11nKX3UBrkMnT3R48NkO62ErgJiXY6/m7D38l/BkJ397cJKLT8XApeN4fz3EAiG7IMuwo9U19Weaq0+OH2eukZWoiGSHQGSl3ue/k+Hwwi6e7u5Tl+tXt/bQ8PD54xMPDU1E/nU/u/P7u+Xg8UaJQO57LAjQtBWyFvAMiEnN4pIxMzuHdttelvrm6uLncXQzjlz/5dBzSclpkIIJ2nE7hdD4u03yYdXn+cM+BmyEnpotx6AJ78TTGOCbI9MUf/nwKunn7tln9m98+bHYXda7QgRyPiuTejF+2B4BwcXtzeHxAwgiry7TdDNW4Vgh0dUvCQ2fbMdWgabIAxIAlpFSfdWGhaVpAelDkMOw6R3C3MYshXg3X9493y36KBG+uLpZahKCj7vxkHaXLN9d7e9je7uZZlzMMfQaA5+P+9k03Ne2Zy9J2Q7rawU1HhT5GJ2PeJtl+9/4OB5Gc2MOsBa4iprU7aJKSal0H2+aORGYOFOZOZowEEcj8gryHCHd3X5oBYCsONUJ8rudqbV7ocrtt6puLLTzPKQFjAEJHeF7abtMp6uvbi//2//ZP/8v/xf/8f/+/+z834FI0LNIAP/3p7tym58Mxqu92u2maO6T9u/fcLQ+n+4N6dZrOOqkycDV5PCz/gz+8JpiPyz7nZEhJMjIdjqXvx2VSdbD5nB1sadoWzhkoE9BxKeeyyJgD/Ppmq6cJQZmSmt6+uk4QTedI3dB16ASsn33+Gmo8PB1cyqu3F68vhkQ0Q3s8P152Ms2GhF3u2FmNw/F4Pt/2Wwj80Y9+pKrhYa6vrm6/ffzw5nazlDMA7A/7zy8/nWrrE7vWNlfHY0o0l7p/vLOlAGKr3uyEpL//B58v+rgy/293rz/ePb3ZvQ5aFtNW4OaTt6VMp/2cuNfqtc41akqSMG3HXbjMi7XaBs6H8wLRMqYs9NnnnxwOy49/+und3bG1Q7EpiM9VGcepNZ2PTni5ueiQ5vPHvB1mhcP5nHL+27WB6QvgZs1LmxkEADKQAIa5M/ELlo9ovXJgFSesNy8HpJVtKT/czgJhXa6gqQJSdY/gZVm6LgeGh2bpXg56EIBgEEtrAKEGgByoTRsSoghJ4iTNHFeABdIqhQAhWxQdECXAkMhDVU9uPacuMNy9y7lZBaJ+c7mUJWpNlCMasmx318/HfWJiTswa4SllCFdtF7vtea6uNmlLnWibAzDnwdzcjUGaNmBGQtUiwjltlzYPw6Y5Uk5WI5GYI/e5WjCHNW31zEk4p6qK5EzmDcBUWDAIufPA43lBEJZ0PD4TsSAhp9asttING12B2wTgAACqi0Na2qzezDwlrs1qa5thC4BkFFqEpJYp5REIIdL64NTc+AABAABJREFU3vV5qNMZkbvcmyqwnLQV8OweQdvxamlLhK7TwNowMKZ5Hsce3AWJEBW4tEoIrS5I0lq5GC6XtqQ0bIerp8MHDxVJF133yfXN1++eiXI/bpa55Jyzdw7OgHVaCCXMVYs37/tN0xgS9L0ACWJ3c3GjdoSIToD7fDpPpZjMlUP3h0Nzf/36rZY2Dnm7I6vt6fmITDdX22Wq5Vw3m77jkZnz2M0zn89103VCaZnn3WbkTI+Pe078cjDqxnY+AXSO/WJxnk/RyRKsJILdmLcd8Zn1QWm/Pyl3KV0srgQhzJeXl+g9JKzWZDMYlONcn+a7Y502ebM/nNr8xGmTd2/H/kIgFZoFNJoOyfPY3968TbdfFj0u5XCIu9nauZbH0yTeQVBgcMrGkSMFjQC5zxKupejTEgOLevWIsAxWkZwYG+BsLQIzDI+n6Rx2KrPVedxcIUd0uBv6FrPaHGCKYVqnU23TUs+aMlPAkBi7tG/VO2BCdBRO5N6lHfadcrbtMzzaOnejRBtP7jofTq/pGhIXRwHbjT0l3l2OIjTky3JcQMbM/bG0IDl4zHNs48icFzR3PC1znU71eG7TY5ewZxPqKAdB22NtYBqwABEIUBKJgZfJvMMuj+PRqBK6q0IxqNXQu66M26qtgpymubhiR0h5macl7bpxR9k3Q0pLf1qWp3mfLvKw62NETB0vi56naDVaFUctDZWR+hhG3WxVUsaOkRImAELCAHXQADP3Ri7AhBAWBWBRI5BVLBFevBmXczud59PkJsN4HYhACSEFpqoKNHS9Q1iNQlRTBxmXUt2hYeqC07ksixbmZNrQhFNvWjmgmUdgCyevCCHcG3BrHkhABIaMSEJkJhyXm36ZTizRC1Sb+W9LB12f+xEIyzKjVaQAyaVZWGFHkWwtojXmnsKbtVpnWxZB9OYBiAxEaB6ZxAIiSPpBumStsjNLn1IGCgiPpoAtZXGMyJS7DLoygsMQhJmJibq1axFAhGhWCD2lGLZ9ayrdpT0/U5mv+sKjJwFaA4uJyPzzIX04LodiNSUQ1nZmyOAEsZqREBCUYo49cU9AzagBNYRMsgAKRM9kEB5qBJizm5NfMmyCWsXqZNbFTLWhYh4ajMNmQ/FBoGOz6XRibNTtUuoqlspZDQcEJZfE6o4RDOFmGKslQhEJEYk4gFiSBSRYb50vDTQkUgPh3Iy56yp1i9cnq12wa0ppHD777LHz/lI+fd1Lmznr1e34/HBcysy94uKHcx0krcvE0zyzpO1mdKhqDqt/gLhoRYxoxeuZO+i6cV77KsAKAihMQvKisAxvhA7oGo5BSGSQ5kDUkRe9St5hc10oMarDUjumjHZM+ETdrwK/Q3giDAKuiyiyowFWwAAXhASIgeiEHuqQuXMUJy9UjVYs9oIKGBTuBqTYsQ9DOfzo4lWwO/2A4jX68c346cfHD6cWRAyqzQKBAQgptPEa8Yh//5AML+JzjCAidne3iowRGIiIFN4iXggyHsEs6EFAgSu0kSLg5dQU0dqyGpYj1g1cOAADuishuzsAIJGbIdFKPHbTgHB38JUm44RMEOoG63gizNbTFwAAeoSHYSAhBiKjmLUXdjI4AjJgEC2lBcAqvWLEjFSCHBBMt2A92TZLbYtG2e02BGaGagghDmxYh8wxawpkQ8fgnOZz2Q3d0gqRnJcCBM9alJO7Dx2v5tkzNCeZtPRJTJtKNFZOMSZ0B0MkEZY8KdTwj6cGwedj9XAm3rtNwAVgG0a1msFuu7s/LT0hFUuAKKlqCwRrBogMaqaJqLlj85Tzsc4j+qUMp7n1nJ+WJSiGTAnQq16nDs0NsFUYhr7MVYgAYK0UAlEAarPU5SB0BIPAAAEwU3wJRwIRAph6C3CiZOEvI+A1cgQYAOZtFaOtCjOLtgKlf5j6Oa5ZHkQkdnNEMm8vU6d4mSH+baNt/SUArAa8FwNaIL7kl2LlGAGR/zD+cXcIBFzDQ7zm4JgY3Ff+eoCvLIn/GGtt6m7KIr6mRSGEIDwAGUCQgglTGgGAUx+UG2UmWX03rbWqk7mScCIIwCwDIjIxRSA44MqTb4JtWuYyzwa1+NkjQjZCdrd//+bzz/Q8dZAEjbqOqPckbWldL/dfvWvL4eJq+/Gbd73wdrvTk443l29+9mV0vU8TAWz6rQ2vy70tx/vPfvwz2Q6nw1KWpbu69LNYLNtPBskjjkOJlK6ubj55nTYb6rIMQ06D9qml9Pw0j6G5k+VwDLB2nqY6G4ATL0+PU6K5tNoap9SNI4WPt28BuE9I4l51LtVIIFMtBU1AUtd3gExgJQJTh93QX93oaal12V3c1LKctQBma87RRYVN3tVab3bb1twoXY09I+MY43YbQYlwuOjO89T1fJ6Oc6MOumJt020/+/zzh8c9inzy+ZsPdw8XF7eGfP/0/OXv/WxnRZVyRvfatAJxK3Xs8pdf/CQPPRNjSksru5urpnhxdSubXSJ4fv8ebJaRylLzxqFNkuvHj+8Ph8Pu9mbx5Xe/fXeel8en4zKfHx5Pc7PTVLVqrR5Eta3QGDDAQDBzWy/6dcJOFEHrl9IqGIQIVF13bYQqHHhsOVF5vr87PqYkv/7wTTsrBJJ4m2dmQGubLg2Cu133xcWw6entmyELPT8cMSfJ2OoyDLXU+vDN/4uvNg/5DeqG+FVRhywpCwUIYTDwD/eDV5fD3KbUiUa0Wft+GDYj6fzlT99qKl9/dZ+gJwtrMPTyD//0LY/0l199uOjyRdfN+/2f/9kXV7ev/sUvnj/cHZdqXc5hLY4zo4ojFOUW7mbMh2VCXAAk9WxtROYP70+cup98/vn+7rdkNs/H2uqP/2C8vOnuvjl88Qdffvjd89PD8vx8/Dt/JP/4v/jkv/q//6YYPj7t33y+fWrn+sxMEA2CwHw1cREFIAFJAmIEDQdkUlck5iRmKoTgQQCEaB4sYuZ500MsCAxIALoZOkLox2H9gtGlQW5LWUBy7run5ynOxYp587xJ4+ZtzPlf/Nf/P9fUVKFoNR+j+/ZvHofb3e3lq2nS++fnv/cPf/ZXv3l///Tx5z+5Im/PD09vXn0+3x92fSqIGnJBdH6ePnl9MW62kiR70JCfn88C1FwNMQgtXIRNcRiHUBBJirUfun0JAq6K+8nF0tDlfpv2z4+3r97cvXuXmA00CW+G7X5/wg0capXMp1P53e++3Wz6n33x5ldff4uowBLkqtrxUFr86CdfNFteffp2+nA8zu0LxmWpnojda9XdeNWKOiAlGIaewvuBD+f69HzX9QMjttZabRGoLzAXKVVTv3nz5pP9cR8ODG6Zx5sL4H6yxy4PF1eIXm+3w3xaLjY3H+7vOIHDkvKWAiDB2KXS6MvPvnz6+DF3ebsdP9w9DPkybW4u+NSdFzk/0qAyJu6GeMJFy3C9oWPMJR4fi7fmkC6Gi/k4taV12xc1rLAghgi+SHvAEIJI1jVEAFDQS6SWABDM19zrCk0EQgIKdUOw1QeMq+BkvfcRiDAQHqayVBtyJ5mn03nYbMKbEJWlCsmQual3Q67NpnnZjDklsVoTrw8NEOGm7gEiYuYRBuEM5BgOzsxh3nWdW6u1kLBgpwCtVUReD15EmZBUK7J3YzqfTjltcurDMeUBGcuyDHko81yn1mrrdtv9/ojoXSdMPJ9naR2QoJiqgTsgrYiCAApHtUqcEMO1irDQMNWZHKHPBs7IQh0JsEsLB0ThRMStnEhkrtP2cihahotBOuFK/bid6xNQtLZQkAwDCEcKXUonHcK2THPfZ69FrVCSBkwwYGVwb1YwQAjn89PV9tPJvNUaWQhJIzx88YpsYWChTUu4GXgaulpmhlQVVWHIO2F8PB2NKRF3XRYiWHesxEGozcbNxiOWparqvJxbOHsc274xck79uAXQbx+/dwcROi0FnMiII+bDKSWRjqwpbweskJGYO8B2Pi+EwBxPhz3D9OWbz+b5g1UXoof9Y049PJXUiQGlxOdpVm0goZMsh9kUJKPOC2ncXPSbi+3q4LvbHw2YiM9TZVyqBqU0TwXwRUoNAG5YJjue7UHJEnZs6AeQ0b1Z4wk5IQrglchwfbnU6HIHsCxWA4ZZGWPMlDCqVUVqu/z/5+rPfm1b0ixP6OvMbM65mr33aW7r7hEeGREZEdlEKqtIKBCCqickQCUhIR4Q/xJ/AEgICR5AQggKlYRoJBIBlY0iMzLJjM7Dw/32955zdreaOaeZfQ0Pc1/PLJ7Ow9Je2mevZdPMxjfGb1C3fpnfn9vVIyL6JG924+/lmMVl5Oz9w1rfk/aY/f7pOTejm+E0L0vr83K66LKYDT6od86AXIgADBkSA4FHq/3+Yb1YelztTqiksa5NKC+9OSWLqOBlys/L9SpWA9bOu/KGPG3si+ZIMigyE9blZPO6PszX69pnHxuXMX/++rfyLT2gPmAtS90b5zRmUZpYi/k+8x/I9+9+GRc0dUyWBsozXt/fz6fPYrdTwZtdKpbf3L3OP/mpQ93V0a775XKehqW/W91iVQBlbG22OVzNcXU7z89zvS794TDc3Qw7nt4opqelH+X9+fz4fDpfzX2yK6/Q6sdAe2YN7y6EeIkqgNVOmDJKmu6OeSx4BQ3MZbSwrjX6NVNWTi483LwOQOIylZlv0KglaoUJ+graBQiCr1d6uHq0AUwwDSXtoIyJSqI8gDAKIRhu7Thpuzc6pBbUVSNi9iAIajN7gIW2k1lvyxkd8zhaFB4KEQBid4XQSBFc3DpZykK9q/aFY84U3eu86lLbJQzYwBq2GIEDWTb/C9FqIZjQnQBcSZgEqZoB2oBEnBHAwyN0qbMw1royMgeDj9sqyONEmJrHOB7Duq7VXblALmmLULj1COjWrNbaZmsAigovmj8xEEsSBiRiIGIncEQeIqEHJHB3N0EpE/ZATDl4K13rXXtvGJnTbszDgG6IzAC9d289LDy6CAghJU75BsZS7BnZSo6SIsCR+aVkmtJd7H4r031vHzw6iGOFqAgmPGQmRapMosEBkysuinBZUJZIxSMJGmC4IaO6kyQIRGsCFQ0ACfLeI7qsRj3nwTv6eu7aqWMqHyUMSYskzeOurddhl2AcGura/Ho5vdoloRcesQMK85ZDQQjEIIwtm/yCm6Tt+PzSXiQbaNLQqXii5+eHaV9ykutZw6g9PK9PZ7nujxOdHq4plxPC+8upra1GfbDlzc3O5sokY6Zm3tVPc/MAhyhDikXZcMhlQUBxNJDI2jHIwFZyQUIgJilITCwejgBoCq5qHSPAKUBdhhPIood1Hd5Im2g2a7G27AXMT4P/AuCXau8ptyBSBXYBGIEgRNEaMYAxYhiam1EUxASMAAQBhAquGICUtsQjoAEosQEPq/52ylPGmgiIX2roiYcUv/+q/HA5Nz+yWgKPgF6dkMShe9suJv4jTdrdt1pzd0fArfOPtsasTb/ZBJ+N1Rdu1vE3ffaI7oERwhkCw4O5IBghd7WAEBEGwK0UDTnANtgNE6p2QOzahBNGmCohI2I33yprEHHrX3MwQkGw2MJs4IC0eZfc+qYZYYC5wfa7moeHq7feeDcFhggJ4rXVCKCwA+F+LAlUUID0vK4JrSoNkRzR3J2pNlWHMF/WftiXpbehpAJkrR8O++fLclMwO18oAMOEHaJrr0QK/oRwJ0SmygAYO4HirVBu2lNh4rR4OwOfHLm1Y0m1mZhylmeAavbpkJZFa9C1RQRI1R6+QqQ8BIKqEWwuLUzMTKhqpMGchZysL3VFj0RDoCtRB1hb31L2RaAGrKFbdXsWarViiId6q9Z4E2lcK4EhvFDdXkQZepnIAwZTtnAERkIII6QIRKCIwK0wZtNPzJ0CgLZuMgffqvS2NmEPD1Mm2s5Z8WOf3ovXB3HLor4gi5Bi+zbAy+LYGtS2l7frYES4b9FRAvwNWhsdtlK3zZYXTARAwLjlAP5LUtE4jMyJCQM9zHtfkDAsgHgcjiIM4B5ECPTyLNPZOpqFddW56+KIGIyAjETh8FKWHO69txUjAqH1pbdlXc5zO+WRWm958vZ4/eaLL//g9/74/v6yu3lb+9x6q+uq6CGAoYMEwKjXpRgwsuIqd3n/k5/A65uuup5XmQ73zxfc3ez/8HY/UWNeuZc3bzmUk0h/A0zjftQOARw8HD4PMtQOIEMaht76/P6aSzoAsLdUgbwtp3NbFmZyhDobZWoaaRqZdkjCIsyEDMtpBYSECCxlX15sGdUQc4Bf50oA7ro7vJlXLcNYiGpbvZNhmNqUxmk4XJaZpvGnrz/91Q+/qBJAncfkNCYc92V3f/4asc1LryFrq2btON2W/etR9s+Xh8DWa63zSoYCauc6UE4ZJ+U6X4DCPYRhHOX0PF8vnsdCgGtdDodcdlO3xGnwuLDsSor7h/fn+/fr5fnhm2/qvIR5dzjcjd9/9wUn/uWvv/vq3XPj0mvtDmtVEomAQDJAxBBhyGQBQSk8gMC3xGYKGcgUGZEZEUKIgonJWUgQwwIxbo4DO4nQMI3T61eCcX54nobSm/Z1nfu1XtdtX8pZGHIZM6jOC2i11qO283GSCLDWmV94WwAQ1+v0ZjdNcL1v1hLA4GpkDoDmxsBBL66i5+fnw83x6d2ludy+2te5Pz+u0f0X3/zwW390U23WjuNhGKbp/t3Dblr+h//xf/iHv338q3/z3YdTv/v0cH//9PhwOsrtZ3/0d/78V18IhkX15QJhVXWuJ2AqA++P09Py7j/9Tz/75v39V1/Yd1+Z1X03xhT/4l98q40Q8WYqp2v85NPhl1987ev0/a/eWYXdNEgevni//m//8+8nv5mm4c2rfveRwDv5dlmTZSLcHhkOjgAIoWphDtk9HDAQgYgtgrZiXZLm1V0TpHBwjCRCwkjc57o6EIS77ctuDa/W8q40x0u9IlNdW4S7ETMdXh3ff/H9TqY//ZM/oaH9+V/+Kg9EMATFm106DPzJ7eHX353uH+vN69t/8A//4Otvv6bun37y6stfP95/tw548903798M/Hs/+a1v//TPhfM0pGG3/5tfX9jZenTrScjMcxaPSAyh9fXN9Pxh1grjKApVe4uwcBlzypg6MmDcvH79N3/xxb6lJPz04fEw7lyVIffan67PmPjx/oKU33y8R3+CbvOp/tVf/qrOy5B4P+YVWJKMefrweP3JkNblGcLU2u3bW6cOQiI47YfW1mnczesSEEYhA1+Wc7WWx4yYHWseqfXVtO9f7+krVrPoboZ//+/+8Yf7b7Trftx9+umnPzzPXA7z8jivS7otQaG1a+Te0rNdMJDdj7uDrmvrNaVktYf6Q3+fCsSy6BI7zgPmpStjLPdPy7srj4VLojBearcor8c8po7w7v7y6jhe2kXOVavV2sZd+lEyjc191puGBwAhUzggsYMjMKBvFPTNggT4GzNsAIKHASAR44tLLcx9G1NsFAZAMI+66HeP189eH22pda07m5jIQUmYiBmxukIEcgwjr3U9loMDIGFtFRmRIOVcW3tptTUHg1yEmZyYiGzLiRNDuOvS3Uh2ROQegAQIvXdiFkhrvYbAfn9c177b5XleHQgAmcXCyjR1tfPpseym/TT02uqyYmgu9MVXf/bTn/whIrhaTgXCiBAkIwUS5LLXru4wlrxcr5e+AFhYiEuAae8AoL0KU69VJJspQZAgCVLHnJO7Qrhpbb1p7yVnIKzzyszjUNyUwJF9afMO8pYJZBFfjQwFeb4+5Jh6WLkp83wRStP+TVBRgBFDLcxt1SqSzFxADuN0WS55LOERl1k4A+acctc1p9StnZaq7sjYrYV7bZWY85DXec1pGm5ulzp798TE4+huhcXUWXCUIUmar8uQkIAIyc0QyUHcgjwkDZ+8fvurb/6K06jrMiTp0NUWDM9C87IednLYj23up8sjmInFcrlKwpSCkAL8uC9CNM/1el1u3hweni/QOQ/pkzevPty/D8D9YQ+Jf/3lu82qf3N3MxBZaymX7Frr+rys+zGj/LgKrA4ZzPD+bE01W3t9PAg4AjfvEFf1OAy3i6lwKWnf2wnTTfh50Vp9xoDRd2OiMWVC7BDuNZMTGxITpV1q0n7IzKIFvKW4zHaPAcvzgnaydX5oSxEpsdb5iQcphakHIWMOC4sex3wkPkyHYXdQPc25uF+qWVQ1rZYKUpaEApJDmhADoQwwgCzN9l2yQkJ3dIjorefgjmroXbWu8/n8rB0FC9eYEH5//7oOa+eYYZji/IaHBGlAH6fB9nGGc77B027oi2+FqSXxKtZbfXr3YfzsRqYMmd7ubn7+6Wvc5ebYcTQ+du1RWy7lcv/IHaFz5dUlLd6ljNV8dS/D7hMZ9nlKGCUfMQ2QgPN0eH13d75vUeeuG2W0Oz1X5pxcaW9RcsosabxtEMN4SGWXndBU1VPede/BQuCmqOFDTkyYEdNQ1rYwQGK3tmgPloMiLA3aMz19oIdnzxA5gU/UMTkgBQlmRgkgQ1vMkDdvEWh023RHAwQSxBSA7hS2qjV1RAmeZChMSSgRKmjvS++8tiAcJAiXdeHWwjt4b9cFtWttANTXVa0VGdzQjIqM3AcM62oCDgAYwkCCFGEB2jooCRETkulK0ZnYNjIHABAWYhAaS4EffdbajZl3hxtAWNfGiZmDk4hIRBB6aFuWtbVLrUu4EpN1ICJKzIk5MVAIBCK4A4k4g3kn2JK6hoCUBrBm4YQJwyAg1Nu69BbdkEKwz6BCzG6mAH1t0XtKknIqA7MQMASEGGQGysyJSNA3Dgazo0MQuNyKvJX2aNoohzCDUTQJIs41TFGy+6H1Vycuj1Gv588+/pkdP9XHL8cEK7d558/FqpC5skMhFnN0DAPjUJzXeHIIVyVPKdhUu+W5MUDLQwJbsJ2pX3W5UqzjbooOBAjaNkzkVrukuqV/hDBcdbtkmlsE6kbyEt4cB0Rs7igk46hORPn17vi8PgCm6TA4eYL2EWJ//8jHbO+XNdrTYm0HN6+n68O7hGTdC7G6Lt0YQLW+bCI5AeGQGUFGUA2PcCGcUm5QGSG8UoxCQ6OEjIHg277vXVyjW/IM4RDETMpuWRrsL6on15+XXY579PmM8TdoX9f+a7bKDKACINojiAMt2IMVwRAEPBwNwBAjPAeQCzqZUVAwJyRRc7eOkQzCgXrkZONunT+/vXVKsRXHQUAwIiPo52/l9rv+TgPlpYxBO3SNAKDApa8i6d8ZKhA3FCMyW4SHMxIgbZlLdyciQrQIZnY1BGaEqlU4/TgKVwiNCPPGJBvgBgIQyFQR46VH6+V+DxGgZkTsYVvhzIsegbHlBQLAATZ5Ln5TzAYYSJvs+KI7hNNLgM4xELYXgOq8IEbO2BQxYJkbh8PW26FKEZe1eYytG2SgnB/P18NuWq7m3SZOzXztJgKErmAo1Al7kHlzVFfLQABBRHXtanC8u/vwdL8v07osmBMiKUGFOAgF4LWDQgxCba1FuJsPSXPi3h2AhqGctVdvO4ZMro4YIIBKGDJ8cASSKfyqvs+JADpYIUrM16WVcaBAAmDAwuiqNXTHDOEhfG0XAAzKPWLIOTwCdEzj2hsxO1Eklsx1rYlIuzf1DfbkEY6gpokSEJkpEiORQyAS/FhyxkQA4KZEGBAB2xQ/mHmDT22SUHgQsSOFu1kXyRBhvpnRAhHDPMKIZXtjdyfm2B4YES/la1ulDDgCOnjEJr4AEG6Of0AgJHcPBMAtRsfb+T02uWY7xiMQvihKP2Kz8b8kFaVcRApRqKqHA7BZB/ckRXAUHgM7A4FrbyviFmer3pu32bUFYiCaKgITJSdHBKMWYBAKvbqtc1vqfJ3Pl97m8/yA7Ewh1qSk8/MDikuG03zNg7x+cxN6fi3a2vP87v0v/u2f/dbP/7hbg8x/8A/+a89t/p3/4O8/az8/zcdEsMvpdlcNhpvU3bqgYxAyeEkyBVIeorXuwMQASLUvGFByGcehtWW+v5gpAdRT7auCOSCGe04JJH/44bu827/55LPWZvWsCGUo3k3r5l10GQsQMIFtdyoUAB+nzCTu9To3QChlcoJpf3h+fmq9HY5vl+vystEG1eXM6rW2X65Pq1diatfLeLjrazdfv3745ngstq77vF9mL2lfkoBHx35aL81iN91I0NqvblH2d+NwM9enhNR1ff7wtS2/m0VYSrM43N0JUUATTCsbQ4pG63k5n57W+fp1fLn0x261rTpkMTDJ5a9/8cWH5/PqvUUj8ub0aOVyNgpEAQNmAN/iu0zIWL2nRJwFCNwhZ+RcMsW0z7vDuDHi52WdMnUzIFjXJoJE6BrCWBKoypvPfvLxT//2OOxL+rEuz9uXf/ln398/kHk0vSkyJuEI3vRRYQ+81DCENKTEkDJKqBHMrbvHbsTLdRmXqzZo/DsSnBjNIsBzKWaboQ8A4O5murvdrdf1eSHBYUXWRaXEd+8+/O2/+9nd7a7ODN3Pz5Vx/PZd+5//L/7ff/DbH//yr07PfXf45CiQ0NvT03d9/aG1OIxZdjyv59vb/dIUEUS4usV17Wr/9l//6meflP/Rf/dv/x/+T788vc82A0VZl1hXw8Doq2j79lfn/9Z/++/8n/+Pv8DoBdO165TL9ZK0SyOYr2tzbQ4QwqmwBxJZeNe+5RQguGkwkYYzEIVHgKkxM6hhQABBEG4SMjoCCLODRbNUStdIuUTAsjTvRoGJU+8nB97tpmr1dO0DSyqptzYWUNP/5H/8P/hf/0//csfjuenlYUkT/PSzVz/9yeHP/+yb49tX4914OrX3754uzxqNf/huXlZGL+daP//ZIa2ncVoTNeHUavvq2/r6djdInNd1jcCS9/vSuyngYZ8SxrKs5urIDuToTVtJg5kmxFB71ssf/4O/c//1w3QjDJwoYfi6qms/7HcKa2AMw24v8v756Xy9Wm/gBKzVU1Wfkrx7vFftKTGJ7Qbz5Vsq+bBP356ePvuDv61g18sy7EtrwZEkuPdrVy/DLkOJ1rz5lHOfl+vTnOg2gMswWjhFCHN0H/eFSM06SbFI989zSrvazgTrRx9/dJ0vYdHcWr06pCKR98Xb1SFhknY5ke/z6KmUbr7bZe9weTqz5N/+7Y8e+v3HH796//U571KtUGsvGQrJdNy9vk3f/+K9KhOmRe3nf/uzb/766914BwFlml6EogAi3DrvSVhEeoWwHsjI7K7h245ItLnhAQjc3BDwpYMD4GVnQoTwLb3v26tbZ4d5IRpTul7am0NRlL7WxKWk3L1HADmgY6jnxNfrWngM3YbtjsI5lPilds20E/PmqgU3tYpISCMYeFPiSHl0s16XDJzS1E0jVFJxB2EEpl6xry00IqLkZH1252l3QKJa5zKVsg8UfPjhu7ubOwzURW3utOPz8xnfencFc+INXapIHOjuIWnkXHRea6vdZwjIw/F8fn/cHwHEojMPra1EFg7ort5pk5okL7CG42bpEpZSxnltpuHowegWunQlVAQkSFm2JKz63Bsghvce7GUsN+nVt/ff5AoJZa4mSVZ4Og4DOTJod73bHeZ1piBQvNoizMR0upwyU1t7BFpyC4+ubn1eV2JBgqXVfSqceKktwtMwBiQLQOHWOhcOBDcoOd/e7p6XRYhCtewzqa9rC4csRMPh/vH+br93gJz9/fL9tNuF4dzNgBLi4/X59c2dqt7dHCh8GA9zLPMyf/z2Rsy//PZrR8zjuF7mtvR8l8JtECqH/WF/VyI9XNqy1q++f++mZbf766++xyJVg1BuhpI4ressjK6WmSijya7Ny/P1tK2CdW313J4f21oDCJ4JglfupEjCaWQh7ExVPbV2YzKm46vVA2QV+64+/8rXR0/72N8NaJkQcAgCHgLc1C8B3e29z23Yv0ok5+UR45tdWsE4Un18uvbTRfb7MVEpxfTmgnU3clTMLFQyGjKPQkOwUzGglgcrU2una4spA2aCnKCMgGGIFwaaZLQ1DJgAKCChDxRqazAkGQoa2yoioJ0Rk/iQhiWHu0xpmsqwtOVmN90xfrg+s8eAOmYKjxQk4E0Y3hzf/PzVu8sH7LzBU7LguvbzV9/ffnqXd69XRqWWuBK4UFr3H10Xxgnt7GkUSc2vT321WZWng7IwuQGm3UcTlUE7OA+7AWTImcYgGAvocDOViFpbc22LNhfW3tMCk/RRMYGgMNMwHu8cywHuDkFmADRoh5RSSa/M1+7IMKIBLhVTHmSPqHO/eFdwz0wUvTmcFl8ucHkC7AmQNUkeJ0hFOJujGVqghTsEIKoraA817wsRMTG7BLEDESBA8uBIA7I4BA8OLAHgZr0tAu7uUc+GCVyQIvuJ5/f9evF16XMDyEEDUEaPBCvEUB0TU4oBkQEgScGw6HUkhDDXBkhda063RpwoBSJRhKq2a3ImQEE09eamhwTjDn4TQMvIEWGNRIYiwQ4QLByI5hTE4RDkLKNwr632XsmRyigllTETSbiR+ZakijByyijJFE3ppT/IIhxCAQOCQbWrgwMHAQBBxPWZWFMeVGnuEEqMQoApSRoyIoAAOkg3bmtCow10h8mjA5s7ICOA7Li8Tet9kS8jCCiRMXpGGCJ3F+89db/rJb+X4/mVLR+NeFPPWua3onZp1+Nrvvnp9E35vvIGFcoBK6AFJkQ3XiFxgPfeWURCKQ+Ht5/a5bJcvkuhBaCd4fXhQANcYnEYMA+YRKO5WTiUlN1ig6AQgrozUYQTobkb8haAMjSP8ABzNkIaU03cNEZs5Gt38OAV/dlOn97c3eTc+0J1SdDffXie605fTZynt29+cvn19612xoABTm3eDWMpRXH7v8HaewEk8onh2rW2SKhOK7ClXBTMonUhTAWAERlf+lTQrAF0DvJw4cwcYsZWO8KV8kKH705f/+wG4zb++vrhK06PLVqIgycIDhN0g1BAQzduRoAMpEaAgVv5ik1hDA4owaSwBRIdwNXVIlwwkAAo9/o5D2NOykTMsc1HX7Qg3uX8+2+H03fNIhmCaXeNplHN3TwQuxv9eEneiL8/FpZBBGKgaWVMiKwOtKk1rhHOyBAYAUw5NmkngpEjHBGYZbOlCLNtN3gE07Yl+tU6k4B6hCEAMWHA1psGG4zIAYnx5bavBIwIHs4ACGbeERiBA8jNN4ZyOCEiWiAgIqhZQDjGRrAOJEIJ97X1kkszR8ROYokeGz4uwY4fjYeUZVfYL+dBJoxYunvITdp/fT1R4YZ9DXegTb3YHQ/fN29JVuSrRQWuSidiUa2+rgHCGT0USYEqRhFGD2SylJRYoF9rRcyFQsNHFmJR9THRVoabHJdlpcwfaktSjswHIrVaIhJEBQfCRDDuxkvvwNxNzfpBUrirZAUoaENKl94Dqbr38B2QuwGye2hEM2fYAOYkSMIcdRs+Ujhs3OgI9EBzIBbcWuURCWn7uMy24xxuNh3fnnUQiNvzycPNIfylPYwxbPto44XtSeBASLDVDL9YkTYs+guKCIMiNunwR4vQxsiOl0b72IqJrSNygAMZoRC8FKLBC/V6M/ujO7xQz7eR7+bAg5co5r+TilgSESMCyUshXmtBCZA5HFRboMaLBcrVmrZnb2eKcKvugZLNSM1FJgN2V6Sw1l2r6+K2WD9Xm7uuS+iMC4yTR+/9sq7XMe+/fvfV0h6mUA777Z99/nxa5uvTv/jn/znUdb8/vn7zUzU4X9v+zasy3uQZv/r1r5u2AUklaW1QxnUxG8DA66URBaWsS13ciRMlRoe1VQzkzITILK22+Tov5ysE1GVRszFnU9jqupio9XntpqoP9/cV95mBmJtjrI0RtVZkGMa0RU7NwYncmIJVFcEJOnIAMVDmsg/k83wZxqTzvndNO+q1SozdsSsIFwa+aq0Q01T2aSQqiQw8wmGSm8PhZplrGuA47c/LWc08nCiVIgqeGW6OZTUHtA/nRzM7PV4Eabe7efrw/X5/FML3P3z4+ptv3ry5uZ6eSxm9wbyu+9c3CfP53TNEm/bFmk47YWhfff3rv/zyu1NX7d2Bg3CY6NWrwU6tP9dAB4ENuIWEhbh3g/BebX/IN3eDQ4z7RByIsFxbTrDfQauVCQIjsXqgawcOIXCHtcY821AoLPpKON68+em4XDrvUqay9ghRZJjP11sqhyGPCRMSg7u6EyRiIlY3jHhclBDGLJlx2A1M+5EQke3c+vM6pT1a4yyAnRAhwuzfy2ICPDxfhzz95PNP5i8ePnz7fn/zZm7Lq49eXWrX6j0qYmnVqzkTU6Rrj8j71t8PU5ahnCz9V//47/zFf/GPFwwVnK/LDrgkGvN0gSZMBG7NA/zN/qinNX30kV7h0zc3H37os8GYSAAv8/XVzatcxtaH58fr/+f/9t1h2t3e7K6nxoruvRSiwr1rr5GHw1dfXjkTS/K5egRRMBAgJk7VLABEJAi3PSw8iIBeuhJMtwAzbKfZMHd1Y8wezCw93N0tQoB6QGZ5erikobz+7PYv/vkvEXbabXaXpcNaXfiX709RXv/Df/gf/V//9/+Pec619mE3mNr339+/+dndfX3AAQPwObzc7B/v63Vur47HzQD57Yfr3VD+2S++pzK0FjLkpv5Y9eO7ou16c3tsHQxo63I8Hnb1fGFAZvYe2lsizsNBpIRVYFb3N3fHxx+er48XrTWlKOPgiE/3c4B5vSBzMAODo+13ZciTTvT0+CQo/drHMXeAMNiS4df56hgf3j9jGhZhj9LWyjlKkaXWtfaRxE13qShqYOpa+3I1rYQRhih4uq4oEJKYIADDHLz/5ONPLqcfQnsaxv3u1nqb+1PKDA6tKSE79FTIgNZ5LjAdd7cP62nE0siH3bTP02W+pEHyze33777nBo50fP3RQ205yfn56bnNcjs+/7CkYYKBXh/Hy3x/vfS7u9vL9x8+/+zGw64P968Oh93N+PiofV1/XAe0oavCfOvLDEDc/IPhP5aDUWwVsu4BEbTdQAMIwDHAAdACwg2BYjtnBZgZEQYhEu2PYzxfc0nTlO4f2/H1jaoyb62gYKY5ccoEzsNQSPi6rpmxrX0YaBjysixm0c0QOZfi2pnAwwHCvbeGnNmVXt4w3KCv7ULeKe/AQdUdTDUQsUxja2rIEW3tNeVxWS9NO6dhPJC2ioRlV8Z1f57Pu8NtLsf3y6OElWn34fnbj3/607q21hox9LaWMqg6kBCxhq3era+HaVqWuek87g4GZg5M8jLGgUgpdTcpIzADwryeS8mn5+ebu9tam3UsaQd41ViRIeXicxynm+d+QeZeKwEBt7VdCifViKD9brp/erq7/VjJG8yDW6tuLmmU+8u7N8dPL+erghIEuZk2BFq7Jiki2d3KmC7nK0kCdwLOOSHA/cNpTKIRg0ygAMAIjBAQZN3rcrEhp0GG3S6gD7lYQ9NoraF3DCrj8NGnH/3w7dfuWogjiCJe305jEibhhHXtKOygaFCkhPdP7j7CaBSA6pd5EadxSt146daeny/X2cP7UsMcAJ/mZSr5ILIu9fruvqnf3BwvFzpMw/dP96fn89uPb94/L8j+O5+/1cva11aIhzG9vn31N19/GcAYDg7j/qX7qTlcFn+a/bnhCX1gMmnevYXmxIecGGNtV5BXkt8MwzEPR8bQy/ekAcBnfdd6PEY+jOmWZSqHEe9aNMOn6/Vp7XUoa5Ic5+chj5ZEVxHZL225tv7cVNVfH4a9RKK8QiEecvIs4hCccJBhNnJb9jc3eZ9bqxo55Xr7enw+4yiJzSDTQgFUKDGmofterTWg67o2heYbXoCIPAI6UEIgIEojAHa0jj3QyCwjJMTAdZ55JRxYuOQagNQS0dou7OjQeVemT4/2b997s4zZVBkFvfs8X755R7dl+Ond+6bfPZ/2TfbDzuoFe2OnsUzLrDmnxLgsV3dCa2amgMrp9f64A54oHOTSNMiFWbq9KsUzrEDuWhJYOHVdQevlnc1Ps2mRKXEmKsIpHKax7DQ42tL1arTILUoQlcQAvSegHQ0S4OGrrmbVzd3JSbRqAaxnBy8iwMkjMIA1jdNhBxTYtXB2Nwtq4O7h2lUX1Ia90Tp7KFAyzjjuaZiAMrmhA7FYcNNKgJvLn0nC1eqq7aKXE1KiwmatPn+fr89era2OeQ+S07jjPPbaNKBqMIl63xGp9TALYIfkaBaakjC5mmVkQE1cMMy0u17Z3V23a48juhsRN4sIOuyGl0tyYJjrMuchobv31dSURYZBLaxrqG8dBOAuJIDGmSUXHibIiTGg1wgz7+AMoRohxGEzEiFRYtRo7qZeCXOYuXsPcmBVB0Co5t17F8CGlDU4pZwLI2FQaHQEIEiB4GopkSAH2UttD1KAbdxZAkbUIfzNIN/Wbk6IggAaVCHcMZxST2NNt7IDVR6kiTkjSGIrg3q868B2+OhgfFldIQlmcm8kY8N1Zl3L0CCcPA+7PQ528fnhewYElsgCaXTg2XpSDcZL7QnGxXQamBHBQM0SSbcWtuFKwhkRQLVvQAFEAhAHACE3MoRIbEWe0KdXB1CXlX/+87/zV99/KXd3Ln4Gj9MTns8XbecWl7pcxC7v59bPw9prqwMPDI4QDILB7oy83WQxp4Rmg/DEOFGoWZ6SAwAmdEzkATUQFDkhMASYMphZJyCDpAGMDAFqwcS9GwIRQe0LlRSZ3ou+G+hZ3RklOmFI6wiAyA6oAIgvPhmIH9M9GEDMBBnDX9rpt88Ng0g9DCwggLIhFo/9uv70+NqDgiQiCODHkigiJAL/vTc3v/j+4b2LEhICcPRq1E3Vnch9+/MDbGCXl6hm/AgdRgLe0mFItIFjtv6G7fRCTO6GREAYurWev5SjWxgjmW/9rgBI8aIP0salBtpSSGEe7p1RAAJ4S/2DQ3jYxqgORACK0BejGYp7AEZAOLxk3yA2vHK8SAfmwAhEKSfCToxNdRz3Ndy6mrsFdIdzjfvzMhRKGU913QkuvWbGbi1RUoaVcYkeoebGiNUUXBCcKBZVlTzPnUMBwUjO1g05S+JKaAgQjlopWcARMHsA0qLmPC2uOawwR9COcd2qMcCY8bLUaRraGskj7dNlWRIkjGhmBp6FGL236hiO4eSr++KWkAbmDMCIMhRVvarrsuAYa1fLOUt2jUHGpV2GIZu5EBaRaJpSSoTXroQabq2tHnv36F1TKgFAzOEeAYy0+b8MX1BTsDGtEQAZgdwVCTdpxlx/E/4ionBQ7fHCw9qmsC8/bK4RjhsoHclDEQQA4ccWPCKKF6Y1IJFvAcPYlKAAf5n6/iaS5hBuiohEsi2r7RRq7kQMAeYuJO62nfLj38N1/egqSiycIsJbJ2JDFhkTCyIxcaBX7w5E3lu/tHqy5aEuz4mpdwVMnIoaEWQjtSRMZtp7u7q21mvtV7e5+lNwDyQplhERht57EOX9rrf1+/svj/fPrv100kz7mHicds6+uzmktAuQrnW+nM+ni2lPKTlgY6rtHIhOz9ZRLthVuSRkkSEB5iQoshPJaleESJKk0Pl8Wq5mpoSJib2rGqSEZWLgFIqY0pDH83qecknlNjyfnheHXiiGKedd1qV5SpSEiBNLuIfrOOy8Y3R1b2a6qvdlNUfwvs6X/f42D1mgAds6r2bcg019FB4O7Gp6XYn8dsjuNHI5XZ4B293+FXW+nD48n+9vdnvXWGBlxsOxJJTw8sO7e0ysAZfLOY/FWmuxjlPOo8ynU+vt19/+6uZ4l/aHus7H2x2kkKlMUykYH6VBxt3z8/Obz3i+rh/ef/fDhw9fvXt/er4004uGZorkYK0INcCnea0tmEIIzEmblpLCoXYDRFAgZlV/fJhZ8HK+IjkhIVDPASHmJszA4eE5EUT0rplJq3tjU1zdcBMxeofKKaUMI/fA3oIFxnGbtySBLDxNCUwzChCRZEIHJEHksAjvNRytuiZyGWSUUeGCKEun1etOtsyyJ8kRHuC/qQME47uPbz67uU3T/v/+9T/+o5//ffP5V198nRMcMpBYvhvwqvOjcRKe+HmOf/k3f/nRJ8cvHk92r99eTr/6DH7nj3/2//y//MsBJucorwQizpcHM7VIJcunn35yPl3LTf7Z3/5b//pPf/X/+qfPQtPcA3dpaW2H9OrmNkF6flwwS+gwpNT6ch+zma6LZUqGmFKeq6FFTmncDWWQ3qBCCw+CkI1d7V0IN9i9mge82O0ZAPIGX4+thhOQcRtYQCBt/6C7AoSkCO83x1c/vDtTObTgoPTtaXGHcJPM89OShyLhZTfAtcJy/Yf/wT/6X/3P/rPdq7c3MoBwKnRzm7DUP/6Hn3zx1fftdvi3/+axpF1gpIG6Lp+/vj0vi60hOX/z/jJM2aDud/tlXtfeLzN0gzENvS7L0pDQAJdLZSBTdcNpGEaO2nvX2Ilel0Xynhh3N3d5d/udzX/4+z85n5/Ozyc2m0oyykAxt85MMV8Ty6v93bLM2pbdoVzXFhRPdbmVW+FEERjYqw37Arm4YV0ipTztpqWruQWom/ZYV13APJFVXy1MJDESEE/jeK4nQj4cjkufdV2BWc12Od8c9qf1HICmvtQ5Qte+Bk05WLu1bjkPSNnquk8SddHGTLheG+fMMlrIuD8cbu/ePX3wCnrlYXp79/bzdX16tafraX56fjre6C4LCxlPq+JynTMcv//wYBjVW+F09/r1F7/4FhKXIS1LfdkSGMG9JIntVsTMhADg6kBEtJ3UtnHZBnVECKDAbYyAuNW8RoS/zOPCMDAAkBAAXQMJXP3uOID75TIDUUnJPRDQ2lLKKImX1qwbhO+EQ3A1L3lqa+fA3hugIyR0FCJGZnKPjoDjuHt4+jBlDjNEcPMITUlg2IXF9fI8HfJYpqaBGLXOSQSRSsqXc5WEpoukPE1DX87InlLmJG2ZOcXrj9/+4i9+Ef44TpNRX84rVIbBe3MmVgSR7ARCpcNlXddhBDfgrWghyjBCXxUNsVV0tB6cMIlE2KqzmzKPKY3dlpQHJV/W6+3wafRVIM3zVesSbq7o0Zdeb44fffvND5jyRImZLUyEM+8i6tLr03VOsavnpytcX93d9tqGUrzydekf3709X87XeknD2My9NsDkGNPxAJ7Gkr759utyGDmVIDmtp8+OH/dYLtdnmYqq9d7HCMSobTVILImDZCgA7fp8uc3H1gwlleGmoTrUAL97fXMsw4fHpw/vv19rG4fp7ubNDw/vel2mIevae/jrNzeQ0zyvbnQoe9Zoqk543O0qugHc3I3rsrTrPCLOp2evbb+/HThyOA9jd/PAcbpZ+nPPaW0oQgA0lJLyqG6uvi4dNDLy9XJF7R999PH5NNfevv7w3gFLkeNu/N6ehmH4jbfOPCBlkKFZ3M++qELioeyqY1UIt85M2ScpN3lUhI6cypB8iHKg+ughszuaJuYsqdXazcKtRayY5tYl1hlZYjVwAG9rW1p/330VT6zipxtIpXy8Djc9T3cHmdCx32MsmadnkCQi5uull2BVxkhjpjjASOhNkViTUNmX4UDGEEzeXZvWmcNcr4Z5lFSYMjEPt4fjmAPrdV7aXFPw5HtdE8YU/e2nJR0xoJZUAtoJ+6Ut2a6HlI85h5MFX0+1J/Jp7M8rumk3KJIHMbXTD/f65jjsBaZarH8yLT5M6rqoKkIaEufkQNVgWXoz7u3KJbPiqze7w5DfFhrcv/nwfLnWaAeK3ViOW+Eo000SCotZq8YMEMQLuj0vixfjbPsJBrQSdfDqGrXqeb1cvF4EiXgkRO+7ccpwFNuxtWYW0DniwBkTnteFNWRd7Ay7TvOih900pxgPt368GfdjAEKrGcKooUeoq3v0pfbVdUVtyVpEKItxkiTYwjgKFUBCYoKSQQpKrytjeGj16L3ZfKG+eH+Y71ewDvO8XNdEk9rIMkI+UJ6kDI7JPYH2ZEZ6dVjE2YBbax0l5ZuAcAj3a3CkxD3EiVM4hSK4elMNJQx3CaCuPCRIGVgQXlxFpsDhIqR9RWttmXvrZiTyYxgZNsGRDruD+RTAXJKMgwdFbwwNMCyQAtysttU8DLWgRRkCxIAhOEiAS0dGe3F49IBuwYSkBACLqkhhoZIH4cQipbCwovVSpLeK2rg/Al1J3EN/BBhbBGy5HkRipiK8z/Ca+IdznyNnFAbWvqLbJJkNtEtTS4OtMrd9tmGqocWUp54t5NyPKUu6O/M3SrimtCZ0aor9QrhSsmjnqO4pK0g/2LxzPwZ91fRaC6JjQZowpDBjDFn2x7yuMwDnrXiRkIMCIBwdtpw0bd3VCTEwLEK7MUsYcGZzqquV28N1XjtEUrl+/yEPh+l49/FP7+b795PW5cMHTHG9rDYApJVBzg9zBKDhouuYKAJyEmFcTVU7JUglvWzb7mgGHYSIGZAFgFQNyBGRmIRlw/EKUbhh6BaEcQTkvF13kQQDQyEhIs8s/dkuX3W7rxToBADqCYNBIhIAEziRbVEbhjDHlzAlUCAIhQQGMFLS7Yaxgb2ICcIcnShIcoefUp6YXBJCIrCXeA3BhuJFhNsx/c4r+eGby9InXNeCkZBJoAi3iG74I7YONlaxu7+kydwcAcDhBTMciNuJx7cKc3dj2rzUAIFM4hYRAIQRQMgI6OYbogUAmWRjuwOCWQ93YTZ3odTdNt/QhpSgFzIxIQQjmxthMJLHJp4BAuELv+alTsRgCwxsObiQJBrWe0fEJFLP13fr+vHrWxGuy4IUTEAW7v5qLPscNwO8b21e7G53vL9cmgGQVeHniM8IwXXiBIApsCrkEAZ5qhc+3hBTTjx3m8NNvWBBSoACAGiWGRe1nIpac0JnDmBFqK2RwEBorTtSNQSipXYzv0mpORGBcHQKRaKIXNJc9ck6YgBxzmkBPAMCwGo9Zwlza5YZzNBjJcGztt2QL6ZpGrR29EjgtV+RXHK+XuYNyK8WzDZ3pcQGEIRdnSWrwjAImIf5pr+5u2EAApGEbzk+3nQ89wDYSmDF1JCAiN0DgZkw3APDw5kkwgGQSNw9torqzddDsqGnAwGdNyUowiN44085uJsRJzOPcOYUEWaKSES0ZYsRgYg2WvZGzrYNmg7kgREOERbb+mCz8O1L6z9alv59qQiRPNzcEdG6h4FrWETO7O7dtHsYYavL+fnB9cnqB+8z+ourzjCB05hLktS7a6zhVvtpXefzdV7aZRg40joUqbNHENUFUHblIKJrvagCwrysz+X46vDmc3JykHEccIzu2q4VUEFQxgEjpAwYwYxJEg7oyCXT9XoO991xn4eh90DKDgBeTdXUAb32C6PYRbvVYdxnOkSkaTfW6zWo7O5G7XO9tjCg4OpnNfO1OmQm3+8ykWCAu7brzMRM1nsfdhMidg2MaHWuS13nExC1tmSZwiPnbBGcUkCYtrqcta3zec5pN4675gbWu/a6NOJcl4WRAeSs1ZTTML4/P0PXqmoBcBulwHI5SRl6xavW86mezufTOodrSXgbo0RBbIpHB3paH7784YvDmxt2+nu/80eX989a6+Pzt+NhWK/fn86nxPb0fDk9PSlDrf279+dv7+cW3N1EwMDLCJg0cUiC1lQD0q4MNakZOuQsrmaBJJgEzSFlMdUI9CC1AMScE4AT4nKxYScisrQGIJfm6DRbJovlAtoDhXPmUhjI2+Xx21//y5ub1/1wmwy793meH796z4EpUZgrdFc6lDQwXXoVTGDKiXLKqLGqSSLv4aQoaVWFVqd9ASfJO4Sb5iEIiOyBmy+C5WUxIJX5Wf7iiz//r//3/6OHh599uH4vQUykAX/xy28++vTjL38492ppSr3W/uTjjv7uP/pssnT614+w1t8+To+//OYEMKadd+DMvevdcXh8uh92Zb66a1xO17q6dvzTf/Y3Aww/+8mbX/zlt3knueDiDsgRCEzN3chQ9NRpSqKt5/1wXueAePVqxwRmzg7Wz5S7o4PvWcS8BSEEqfeIgGAiIoDNV4GIBg7Ipg4R2za7nU8wABzIA93dNOfkvYKpO5SM467kwpCgzh3N3324ZpfIIkyWhQmYU0S/G+gf/2/+l7nnBLmdOiea9rmpf3hwRrw8Xb/7LtBbjh077o/77x4fMMmHp5PWXqt9/fXDMOVmuj/ultrm2nZDXk1Z5OHp3KpCYBAxk6Mfj8W6zL1182baWjUikVSGYRom19bb9cPzKkN5ffv2V7/880Ma8oBdwczVkTwgeh4Kefru2/e7XT6dzq9e3R1HmpcFiRFxbctAcLPftdo5l3fz/PlHn60P5wDwkObtfL4/HG5up9u1VkJYdXVTyWR1ERJE6NrrgruU1Hq7nFTr0/2HQoA55yE/Xq5L70MeJA/qDRnLUGTrf2DZIIfPy1Ov9bbsRJJ6DNNNqwpg2tswZu/9cv9+ff+eUajsxkzny3cUvjzq6bTejK9KwHRzvF7Xd+f72+H27e0Bw0Xsdto9vj/1Mhxvx8NdyUW6+rJcf7wkB4t0bRCAzL5Zny029uZG1tuARA7OQIgUsKlGHNYBwPyFCLnJQxiEiFuwHyCYycFJaD+keq0p5WHQrlWQPIJyNoDugcBbj61H9KaJs2MYuoX31pk50AOdhN16uBHh9mha13qYbokjQq2pu6MjIaFIydHXOcIARmIRKgiBxIRSJsawZV0kQc7Fupu3DOIQd6/eni9Pgvzpxz/79stft2a7w754mU/91cef/PDhw93dK0ps1pdlUVUqmMuAwG41YQ5O67oM4hjRtd8c92udFw0AsohwE0IDAjPtChi1XdM47Hc7YKIxi4TaCu7C5BCE8cmrN62fPALVoEgzE0qEtK6LgRpFdNtTTgK9hXej7fPTykLzdWspZSYqeajzEkjdbBxkWdeH6+X25sZJHVPrcHc4tH699utlue7KEGaI0XrbzGWE5IBvPv7sL3/xZx+/+mh480ZhRQoLv79c2CEP9Onbz7/95utzv5jquBsKDxDx4f5dYkLmIWciIpTosbZ6OB4uzxckzgSAEu4AsR/Th4eHw/FQjoe6rvPT5dNPXn373Yfe17tpF7OtS93vx97bfHo618vx5vjJm8P9uw+X81OtWut8mHZra02VKebLguRZ0vzDhzAUsHWpJbN1f7h/zkniN30fCHevjhfwu6TPs83a1MDNJ7etdMR1RZ+HUfYpiIhyotqktzBPlPe7uxpyrV3bMmPLDTNxh7i29Xm+OqXea+1L2S/720Jky+U9ucyrPF18vtirTOvan/JS5MzD4e3ruxHW3C/u1UOZ+54RoAO5wg6BnMUJmxsQBsR+n5iyydCBES0Q197Me7M1i5E6SxamQ+Zd5pu723L3+TDuotdH/2FhEIZbQ+319S3fjfzqo5wmigZkq6/XeVnAWhGt63OvyLORpMg3zDoe0vzlcyoljVlh4+DzclrxwzO93V8Q7rkzrGYjRiSZAjwAkzALpSyuPl/0aT6N++Nhp7ubmxv0Xtvj9fnD44fneR0OUJjRSyB2WxllyGXMTIEwpItWLsyaeofF7LosZ0mvSr5F7n5BvnMjC9cwcw91r57QpsQT77i86XrS5Qoobl26JLeh3vW1i5eOfkEA6cPNcT9NngbFNFAwOARFr9ZnABMNnWty59airczBsfqyYC5lP8J6IhyrRWOMYAsjIAo0awkQ3Luq9ghgdgdt/ekRmrZlwSBTooQsCXOS3S5zCvMIYOAAs/WaKQkQMqtWQhbKhrlR0bCUB0Zrppx3CdjmJyJBGdTBRYC4tu5qR5HaVbsVJH1pCYdcEgFITt0ahFoooCdJZubgQSZEFp544lTIEfPEQ2YBcwME7A2QVJs1cwAg0t4RLdAILFPqqmEAknwDXFelCEJioUhCiGDetTMiAG2jRNrsNQhqrj2wNWpXae+nYZGsiA5qG8AVt/gNATJiIJEw52TLW+kL2VNl8EyBo3fxGE2TQfQgSV7UeKWcNUwT8oidAF0yyPgwsyMe4nyjJ+51yM5hepV0Q5GR2siSJKsR8X4cXv2HP/n7f/Lnv+jYz/2S/OgY3n1PVEredZsyBQyoHt1MLboRgDB3dyY0swAiYMKAzQsA5uGAiCxAjMSXy3U43h4Ou3o6hbTbV+OXX37z/t2Xu2/3+93Rn843rw5Pp6f7pc45e3e0GupGkpimAbW2GsiJeMAEFNVKlr7UlMR9I6QAOoSHO4T3YAoE8IZ5dG2aTCihAW79FtHDVvCGmAMbGoQDOroGNE+8HD9a83E96eXbS1+IxTu78mbwBzZIEP4bieal0PsF5Q0OEBEFYkQEDUcACPNQgCAUJjMjAnMgwH2nn+4+BhoMHV8q6GNTNSOCAgAYyH7+Vv7ky8ennkdCNCOEknmQYW6LgK3df/TWvRiLfoOR3gpf6ceYzjZsDgR1RURkcoiNiehugBukGBDA3QkRiYG289GWI4NAMldG2Uwlmyjlm20EApHMtj+AuzsheQTSC1/IQyMcEGNzdkNA+EvtLBI4IJJteboNukRsjuva0G1IvKg9nOqbQyIk90APDKrm6kEQyfQtUh3HeW2Z+NSsZImIq+GltcM0VF/RIKGEBycEcOSEXQekLZOWABOjuT3E7BlWa0dMQ8hsoSM58bWbRnTciEucwDOxUiRAIcjCbkICuyTXVldTZPyggSk5ycPaHDCVIbuamlEsEUtAAmGWxBRm4ZZKXtclp3zMh9PqSK6GrWoSIcIIiCBHep6XhGDhANtAdHNpbZm/aGsTlt77OI7q3bfdazvhwsv0FBEDwt1i01uYzQxoG5yGICMK4kYnQ3cl4u1HkDDcAAW3TmGkcCPEF2sYRLgTvGj4EeiwWfoNabNPbt9ttHDfnJtb/hABMTwcDBwMgHDLpuFmRAsPJwzcsO0vjn5zd+ANasf//wG0CA+H0I4YhIpeCd10Viyupm7N3IiX66n36/X6rs/frvOFgJiyMHMaODwqEZOb67p269f5CQmaqXqA52G3D03Rq+kyJK59zpLn9ToNd69fH/70X/zjj+Pmj37/jznJbjxez/N6XViYI6SUMh1quwAiBAolU12X1hgAKKKdrlXDUhnQMawwgK1ri8rUVMFdASOVEqGt2+7w8fHwURi0ti7rAojDND09nUri/eEGEFjGto1wzXtVQO+qEU5B67USI4ggRMrFYS0JlmqJBMK6GvHAJaXxyCFtvSzrvL997ciFR0og1Bz20+Lr+Wp+xujWdG6ah2OET/mQtk4ZAQgaUqpz3e/22K2BPsxLQiQVXetV/XQ5o7uCUUnXGrP2p+8/YK1/6yefI8fpm2+5P95Zk+vT8vzu1+en83m5rE2pr7Wba1vmlEkyU8FfffNDSqklayXmpXJCT0Bq197IYiwgjMyRM15Oa6skSbpZERJOXXUc0gtRi6E39u4YBBZIiJBIrEdPjHNtrTVJzBvvPUgr2OJeERwJoHmvvd/elLXOjw9frdYerw/ZcX8z+BS9XjxAGDMLCUpiSsgiDN21Fqaw7mgJ/fYwEBM5rU7nS+3K756ffvrZ7W7H1+WSjjdIBUM33Z2IHOjHERoMd6+HV7/9/vv79rx+/vr2l3/yw9/6+U/PZQjKZu+Hsg9bhl0ZUupzB9Cqa3/o/+Ivvos2XOfOQwbiw20pd1O96CFzu57PgBE4ph3vGFrvzS6tvi43rcn1sc/wTBnRXTw9XSplqr2R9Nub/of/4Hc/uXv1v/vP/ot0cxTy6S6f1lO0Tja2ugin3vuQ5OO3080b+dOvT4I5hNzdw4mYSJoGukdESUm3HBGJWYiAayRBwvAAQQ7viBgRiRgAkDkqCCYCh4D39+effvzx37x775A++ejVd19UZl/W2hCdLJf08OEeGV6V4Xbc/dM//cvd3a51XJb2pkx/67df/eKvP/iVPlStsN8qglttS5sHKsc0Pp2fct49z/7mzavz5bqsxmDAeJ3XaVfyKFE9ENQNgMFjzGitjrJ/PF+amhkkUBABj95No93eyP35MWeazP/w7/3hv/6TfzKmXMZ0vT4zypSHIedLm1ssr49359UH43GaPuJX2vt6rQR4M0zQaptnmMZZKzNLKol3x9uP9GmJRfXcsMDAA3UIWpmdITORGROLJNHm4HbYHU/LZZrGZv35eQaK3bT7ob8HkJvjESJ2u0MoEubDdLjUDzkVCOpL44kz7Rlxrvf7YWIKZjJrlNBcCeVwfMVSlvpDu1wTS1j/6Ld2N8SPtf7089/55hd/fVpX9wVcqj1C9J98/mnotQeMEpm5t+X13Zuv352P84Gz9VrXbin/Zi8IQkRAU3N1QmIkp4Bw1a3SEIgQA803R7MiYgSQEJNEmIMR8panh22f2xSmiEAMhwB0VWv97jidz+shlyJpWVsSyiKtK4uYKTOpKUQ8n9aP7yZk5JyESRURgIi3W6ZHM3cRzoRJEB2WS50mDu9ZcjdCj7rOLJmzJLRaL0A+5DuKHFDrMpc8lJJVbbebWl3DkIy6Vxr6kFL1NYus1+Vwc/PZp5/9za/+4u0uj0OeBnj/3ffjmNvpcToeH06ny4fn129e5fTiN56X666gQFq7AqJ2VW29VsCNhml1vSRJSOLaMVMSvs51GEbtlllC13btvYcTqFsu0gPW6/nN/u3D6T6JOOJpaUj8en+j/T5CkWJKw7mdFCmnLJ5CY6lqGINI65rRESnnYt3WtrjZtN8TWFuvHrFG7PeH1h6v87Vgnsqu1gXNxzKOKS3a9+M4luPj9ansB5K0Vn18fv6D3/3d7778Fgu/+uhVWfvarbfoy0xBv/rqi+LYVt2/GhlJu25AqHEaalO0iMC823vrIw1gVhI9Pz5O+/0kdLg5PD+dW6xvX92dnp89QeueSzo/P6L3vcgo7ntxgePxFqzVD/e3+yM0eFo/DJkDuKRsgUjQrLpDqOUxz4rIKGFzq29ffYRDj16b2lDK+TS35SWGmZk60WEot30AsmWBnNNqXl2jhQMNRcAkl7cRo/XwuCx1Jr1gLBE28DG5pty1a4YhaW19qRit1QRCAdwNVkA9r8vTUFper4IFapmeW1RJQEuir87Pd6kcd7MtT57LfHrGzbsOa9c2yjSAMPTaFFOGYWrdNgjLNKScBkhTM+7uTuFWm1coblCJfELIyckux8PHn799K8c3AbSS74+7+VrpqqM3GuDNndwcUh7cbUaQ9bzEUtvjfWZprE7SO9oCBmuaBNzGG8lHqb1TBJcEHqpWhtwfTvR052yWx8vy5PlEQYf9gZjNBboJcyBY7ctJwcBwvt3TsJxozWvYh9OqTrvdXZ72JOhYL+7kzewkAMFCckzhRAa+Mvg07QIhKML9fD3t3HAixXbt89wvgpg1peCiKwE5Ow0V8wxsROCmEhGt6fKULIoNGLC2znl48/ZVP9y0NARlr0bavbdAMQSyFcNDV6oNDXLtOaLPa9erm4ZZD82cos88NEpNQRKncExB5ACQEB3BEptbI71YW5JHrR6RiHOAK0maiu0KTgWMB8DC1udz8i6JrXVDVmvNGgIRhkhySmGEoepAzBHsZhSo3ZMUzhLmzQxYGGuzqhq2rjEvrz5583JJBkBJiqgehDTtbiIQoJhBt6a65swWTiwOadjtqUzN3UNdu2C4Wa8tAiln8ECUgYp7B/EYC0gWxajd1MIVQtiYkJk5J6iBbhZMzMJcPMTcKWMqoL7WBVoP6dLm8ySn3TSLLcRAlAjBTYMcwpkJI8IMgwiwmyHEq4y6w+g1IkMwoQglUM3kyFQzQRoWvo0gAelxlUxlKPNlDYuYV37mzDDdlTNfr9BX7wIt4KNEheKBmRLU89yAponP//rXfzUcJcHOrd++uoOH98fd0NuqXSO0X5cUJkhNu5AEkBlYN9i6awndkYjUOrgBMQYQsSNXc3YH1SmnqM/j6/GLx/e76WZilrvX9u4RLh71fHp61lF+eJrvmyk6Ny3ImfgwpDCLUCd3TIsHm0eAIGjvskuOQYKAWIgIoaoNtkGPvYgARYcqoBYEFgxA7u6OZqYa7kAE6hAuMPSlIglDG4YF07s51u/bvChHwtBGAYxkCE4aGAgWYQEcHowIwYQgrhzgXCTw6F6oo5BCBIBunN4INyWQoAFBRo9PXSYsygxEAMibRLJxn5HQN7sNvTkMf/hb4/e/WlUB1AI0k2DgfuCVw+nfYa23ZnNw2/BZEU6UMBDCAhwJzTZWIyASemw0X0YiRLOtuQwCHF9QxC/pM9zSYi/d5rw5nhhJ3dABEc2NEF+iwg7mii8kYttI2x72okoBIKDHy0gPEMN1A2xvcHRGVu9mGkGmFgiJ4PO3r//q2++/O93vp49yKaLdIBwdM8/afzKMCL50WwGPmS9m4zAguCD2wJxFw1p4D5yEra0llVkbUm61qVuAIHFolMBzq7GfrHdEHEVqtYmTIZ9C3w7jpbbKSd0LYjePBI4BSO6aiEmgq4HrjkIT5GG8tjWCugYU9oAl0B2GxC0s3Pcopp0QMgEjkvA2/BHEsQN43A5sFhjYHIBERKC5mQunxFhY3YMAomqPSMLuHhHWK2JguIcjEAIRMoISprAOuPl9XiifiBTuAbhlZV90OveIurnrNz8mAIU7bKNWwDAP3NDj6IAR7upIvLn0ASE8aEMaxfadJAL2rVcZEAMiXFg2YdXdPF7A1kiMDoAb1twBNkuREiITb2M5xCASCGSCCIuAcMcfV8HL9cCtArDpalrDNbwDYgQs9ZpZADy0Xpelrtd5OTVbH54eqrYyZKg1akUSIXQ190g5QQ+A5JDDQ02m3e003E3Hj7RqGaHXr07zN7PCQC48usZBzB4e/uC/8t/I5RAYl/Pi7iQkRQgQiDVMUjazWnuLSGhE6BRIUVsv43AYx1J22isZruvZ2pVS3u+PZoEU5rqfXj08vnt1e8sy9OW8rEuEcSLVjoFvXn1i7ojmsWhUYHAN7XrY7wCtVrOgnEoqydxRspAvS91NN5yGwyTelAiKlbauHqjB4VyGw/HuzbA/IMT8eDnd39++HY9v3hxfffrLf/WvAnopUtt1HAuIt77uSrZu7gYQw5QfT09SxqfrfGmNk4zjATity1ym0qJhgiL5dH8+mz637my3OY0lZfG/9Wr8bv6u91NLy1rP1uqvv/wAGcvhWFt/7osFmQCR1/OlWu0Mji0GWVwbYLSekQqDBJJkR742aK3T7NFJiKRIABXkWp0hzxdTDxHSMOvuDmpNhNi99TVMywDu0NwMyRwIfFlUnYgEPQSxuiNwOEjiTMwMgPH89CGXNDB3T+uzz5eVgN1Cvd8OY2FS8xb9IKmMeb5eU5I0DO4dSYTSOAzYaozZkZ3H5wvAD1fap3y3qF5SIndHNCIEQo+XIdrzuf36u3c/+/3Pus3TNO0wtdNsre9e74oe3n/7uDzV8TAiY1v6zcSJx7/+i4vbaBCU0+E4usWURYs9PFdRBnAGd5S3dx999d2HNrdgeP3xwOPpv/ff+Xv/5p98+Ju/eKdG5AbYb/eHcKDg21f78/tvHt9/9yf/9v97d3NDFMD5m28vCYuQL7U/PV9yPkzDNFd793Qe3xwCKBTdwyMAwgMYwMGZJMDNDRx8U8vdwygi1IwBAr3ayrB1WACAhGOohzoyMZGbr7U9XQM8WKIh5P24WiXQMiSrcJjKg0N1aKZ/8i+/YJlanVuLDfP55bfvgpMDnueFhuwW5mrqIjEInc6X3e1+RcArdjVC3w3FzYTkOA2JsVo/r5fdbs+Fhlwu55lCxsyPD6fnU2vdiJmYxiHfPz0FQgl4Pj97AEMsp+u/+tN/mVBqV1zabn94fLpAZtDOA08y/XCezWza784+N9JSiiwxlPG6nAvj69uDSx6GBBFLXZfWnk9laRfKeTwcqj4RiYcj6nV9RLkz0DSm2RQU2HC+LoD5MtfLuqShzMuahtw1pGRKiTI7WuZUTTV0aWcMjG4RtnbNfejrHByCpK0db473z6fdbkpcSEpox4Da1wq6aN0P0ygHcljWeRD+7sOvLu0CFJ+9/ejh3cMwDcGS0JTk8uEZUkDQ+Xoe72DcDX/2V3/5W5/esXVIMuymHw9G4G5ICBSbP9YhApyYEJyQww2QzDogbyFoACCE7exirj/mtAn+3UTPX2bBEFvZKIlIyudV85DculpDhNaVU1KPVU2KqEdv3Z2Qk4GyASMurRGREzgGglkEIDDTy/gPU63wxfdf/Df/0d/TU5WcoKFqkzRuZmESIe3myzzXVI6ExCIBpm32gFKKuQFQHgk7nS/nXKYy7JkYEkHE8fWrn9nPv/3+y2nc7/ZTrcvhbvrw7f18XYb9KFO59m6d0RxRM2UEiOjCKdBKycDQVUHQw10bcwnC1lsAEGe1GPIown29dqupNYQtM87A0tS7d+bEzI/nx0jETCLYA5+We/LGhOEQtacgRb1U12qjFHc93Byjteu8sGTzrr2P07QufRwG1/X5/OF4c9C2tu6X6wDgn779+MP7x3Cpqy7rPO33YUFCt8fD9doKJwIaS25VW12q5sPd3fnpvJ5qbT0PbN4TxbBLNQhqDBPnlC8PJ8j5eNzP59W7TQNdni9J5Hp9ilVzloHL4XD4rc9+1tb5hw/v3z9d1+7EOfmUsgOuKVMEj1NCPK3r8vC0yDB64vfnCwM09wFFBD++ef34cJrXy8/efHxe6+541OuMzDTkYb9Ts+Vyud1Pn78+vj8tS+vEcTjsbJlz5jIN8AUAQChQpNvd8SMwzJRIgNzVrC1MDtYZcJh2MKTKFeZvgF1tzYQDF2JsamuftaNwCk9LjaXpU50BcC/7V2PpWZ/wclnm8zyfokpoYewr4xVytTDtQTTQVZ4nXKVRJWm1plRy2S1awWntTWkO9QSpL7BoyZimsew4yggJkNGF3IibnofSMtTKeDZrXoecxsQjTzf73VBGEXaCtnqoQVNpBkp5fH1W6zUnKLZ2jHZ11WhCoF3X2p1Q01CIU5EIOUxFPufHb67X75qgJodhSPPVE0IBj3fPGtNsvet6YqMc13U+3h5yORpFJ8IpO0a3lWWCiNq1df3hw+MSvkqWw11K43A4aFuuywWZjhnRdG16BXCRgD5JTLm4OadMyABuAAAZgpbVHJelWaO0tgtCsrVpODNfVxNu1FcPdJRaKy4trQRr0lYxNMgZqWAKZXFhDSZ3d4RkJRunq1Y3x976fMXWY7GkAODoph0CuS8qQHQ6uyiWcxp2FEySVKFDinSYo6BkQLDepXVU86r9aqAIID0EOUqZEJM3hApbJBgVxNkNwihClm7g3JyFyE2TQ6ZtrN2BAMDCbEOHYDk4kkYHSRRY1yv2h82khm6kXdtLGBlYbEsSi0TrDgGBHgg8IHrhgUA90N2IvVtn9+BkAZgGUA9OwAXB0jhAzhrk1U0bZSKWbsZogBumYrVgxAyUeusSbMEQQcxM6NYl0TiNaZeoMDkYUGtoZ53q8/6jedo30zUMHBACA2mj6vF2AfIgJKAITgGWAt6OSq1d1vVUs/uIsROdYV1BcLUKGMEYFqTKFonJADlxXdtuN81WY4Y+zz7VSJ5y9tXINGHy7qEWw1jydAL//PPfrl/8+rJ2W+fMHuuJe12Xs3DqIYqpG6D2l6sjOZP0TTywnpPU1sBtA/0hswN7ABAjoruShkGHTH/9y7/+o7fjz//gt/7NL796+GKBTje74tAfHh5I6/trf3+dFwjxJhEjSyFJgClnc80lCVF49NaEWYgNAFAcXjR9QUhsQ0YmCACRhMDMDoToKoDo3RG7NwD1IISRMJmLeg2f1c8cbEaYYslrjPRubT90t8wYPSEJEgQZuGI4BocRQsBGlOZwIApAtDANB4BdCFhv5B037oUzMgGGgRM3h4T+kdnvpr2bBaaA2Kp3EBPivwvvgCOiAKQ/+Ojun3/xoVtikSTAhMHgagy4H/llFcCPtovwAENEjxcwIgC4G4FsJjbEjScd4b41TG2Woi1EthVTRbzwHiNsA9ls74+A7vZSaBUYCLRd7AHNFbeycwcgVO+yEeUjAIIJ3TZdYvMubWcyBCCPFxjrVtz2glpidkCSXAoK6e/99OPn61VVt3NZeDChITwY/PJp+XjaG0tVb9WzQ1UdxxRLzVRqbyJYL0sZb5/bSgmW3gBsdRhS4qoWSMRIgNp3ANSrIEGYujI5ejenJeLorh5EWCAIQZhr62YuicY0LF0TChOkkgicoi7ag3ht6ggDIAXMtTOHIbbed2VkxLm1knJbqwOWhCmVS2sd2Vs3axaJQZvCON34ON5fH16lXdNVKFG4IPfoCalrdNgEISTeTEBg5l17YrFN7wsP70SbqWeDn8cWWdzkIXMH38rsgpjNFcEZIQCY2cyYZasKZubNPgQB5gqbsAj48qnhRttyQNpcS9tjw3/EWZvbi6oZjkQemx6EZh4I/CMKHYCRKFSBtvBouL8EJwm5a6MNlgS40eJ+E0Z+kYrq9cktwtW1hXmAg0i3bmAVESDWdSEPYGu9PZ4u786tuZJHVIrVO/jx8OowDDf741AKOyfkrTglLJIMEjjQPu2hWif8xJprHNAjbKbp7g//7n9yXMfj69emV+2dmacyVqxpSG2tYOFNCQMJ8zAJE0vVcO1Y8m6Eg2rvtff6ELaKlGEYYcyAXvtiapKyu75/+AEsLVcvpbopuJahZJEl3Luenr5b6jLkgYECZFPVwuH5/NB7LWUSlgimJIzcqy+tfvTTz3dvX9vqIrund997myOMEw95aJ3rXCVNbvXp/h0RJh6PN6+u59pCH++/ilJUqV0fBUZJu/O8hAIPpN57q4AS0cowNXAkJuBCAxhfLnNmOs+9unIezvOsLInwI6FpIl5POfD68MO//PYXi2qSflpXTzQeSm29hulsVbsKmuG8OjRaV3Ml4N7Ua9Uskgg1cBzYe08oGjyfSUDMuaML0QsOmDAseoO6hBqqA3EgoTluO7W6GwKoDSPX3sxBciKW66wQosbuWrJvLFES0gggJMTWNGdZqnLiLBjWW9Vq3rv6iwQbjMSAWWhgAW3WrywAGwYloLY6DNJW1d48glimYbpe5ix8HNNe0vP2DA4k5nDz3ullEUBBmq+Ph9/76M/+7b8xTW9uM2eVnLy1NRBkV27E0OfCb37vs8dfPw7TMa7n128O3S7zfJkGvpyv1+vp1WefzIqwVoQxy6hql/YcsdJAQPTpq+M//Sf/7H/yH/+DX6bDbO+iwX6UPKab/f6rX78nluuiyscvv+u73Vu/dI5YZnUgnIpCw8g3h1fLCmncrzHPAMOQUZSFuSsAqTtidO/M2T0wgpiDQcOAKMIGGT36lp7WqCOKhQOxCa8BE5F5lySrqm+mZ4DT5drBX93SenpGw6pQFcDwZjycl2UjqB2OKQ/5X/3plwMisIDQtNtBrEi93I3SLQ3RF6WgRdFJGNxCj7s92ko0X6/nw75clg4YLpaIiN3dLLD26Oa9qRO0uoyvb+viHlASa8JaO1x0LymQx+Px4fF+l4dVu6Nlkfm0llKCIdIoxZq1tdfEOQGvzXbHm+syOwBQ3h1fWX+8f3r87KPXbm1ZO/Q6+J5I1mW9K8J+SRPMPYxWDCcBBKjdbnefXuezQ1htOQ19NQ/6vZ//7q9/+HIYh2ur026/I0ny/+Pqz3ps29IzPexrRjPnXE1E7H6fJjPJJJlZxapyWbIBCxZAG4bkBgIEoaQb2T/LP8B/wIAhyIAEwxAkdyWqKbJoFplJZp7Mc/Lss5vo1lqzG2N8jS9mbLLkuD4XO07MteYY3/e+zxM/fPgU+3jsDm5NVCogIQXux8tD5AyJjGp/GCKBLg0xPX92GOdzl/fDXuaqIPWw3w1djtHLchFdr1/fPHwsVuuLN8Orr69P5/G7Hx7W5ofr40PRdHXz/MWXy/rDnuzuNB77bFLyLnF3MDnvdum7d8u6HIYhc/SPH26fXgnMAMCEzVRK3TwObgBgm6TVNs/PFkADcjDcthBmSNsel7bOtm/4BTAwQ3oK6G5ZdY5xNX5/d/6D19fuVQ14GzypSbWytKsXPZn1Kc2z5pyIXMUix6WVvtsZWkQWl9Y0xgD+VNpGgsfT8ue//Os/+bf+CRHXVpkJxJ86Zbq0Wtxqn3e1KUhN/VFFzVoIEaRp08ip1NURu90BEKd1TLHv0lDkUa1x5q9/9tOQwne/+/bm+kgoP/z2t/Ni//IXv/jf/Lv/W9rr3f1tvwyvj1cUAFVLFQ60til7SDkC6LrOw/WRO2hTZYpEVt0YqZWC/OSbMLTD/jivZd8NzMEdVd1czsvly5dfATNk5sABYS2KpECVYlzXi4uHGJmYOEbkMo/hOqWwyDpbaX2fFR2RnGPqhhR4qWPVNXV75y5n5ZiWea6yRu4CQ5UppHCdr5yjre3Vs2cMvpYLh25e1hAjmqr4eDpRpJuXx3VZDIxien6Vzrd+/3i5tPLmeH19HEJOx6/e1LXt98Prfa+lUcCvf/LVXGs1f2iXPvVffPHq/f3l/f3Hh8uFHLvI5ma63p/foemr62uktCxraxaYmbmubamAEWsRdehTeJzKtKwhpd3uJoQ+5H3QVpbpH/zoy/fvb6nnqq2NS8ccAM8PJ6vlejc0DO/vPj3L+dnN4VLq04noMmeKVuvAdGSkxMXQQwhpwCLkQNn64SBDkjgDVtCZsKra7KaWHXJxBo4xBVJpujowcTDuPEcJxoj9sbtIPZ+oaI6c98DTrcLk2sgWM6cADCTuzqYOSP0QESJcGCyFpEylQETtEy7TWhU5cUgt5JwSsUPuumdXryh2WG4jPT7eP56K79L+vEQ3b2BMoRGMZem66uoOJiZVTNQJo4Tegp5r7VmIHJmx72MMoTQgrJUNvamh2q4fBg4oy7CLP/4nX52n73yxskqMFBKLCo4lZPHr9OnjdGQOqBidAZVbNGmtza1d1rrUpirNlwg8V1rByQX74TBcpdw38VrqPJ7H8wnRvU8HBvcmZAFDxCmgAVVgn+sKvg/cQ0MKWbQXyAyBnchawCwQxSSACGVBvgh0yqC1zhdpJpcJ64qLIqR1KaFLMWU1IAk2aQzmrSQViL0GwkiMpIhtlVZkPj1EgQ57qWtMcdPxolMQh3WuDKhF1tEcG7K4YUyQrzDeKPauxcu5nB95LaAMEE1BQ/DYd102g2TEHrlAjK5NrCm6g2FRDZCIUEw5MCAzKIpHqOACpojmJmtTI0YMT8QQRsccKKga4M5ciAzM2zqX0j+diLqsYg4KiqJa1wsouU0YBoqMaGDgChgcQDhAIGJgQ1vL0oqCRECIQ6C+h5QYPCZDbUgO7oxM0pwwMSuDY6gGGqiYi6sWSxoSKYiFDvrjsb/e9YcDAJqKqcF5lPK7Z9dLn0+AlcgJwmak3uwR5IjgiORWHRXcKZAzI3kEeN7hMUk4aa3ZVqOl5xIBAc2IMXoy1axKgOAh0KG0RwQXQMCQ8Ga6PNqbPOm9KWTKrAxT6OHr5KiVjBbC6Xfv/kWsFvo3xSPTuWmNSVk0pSDM7JRSBCiB49aKVG+2dbnQZ1m32c12LdwyCM0NndAlMiBBU52aYNj/V//8z1+8uhrePv/+uwertFruzL64TvPjejfKoxgGdjVVQPRAKK2CQkAObJFZwCN5ZASwEHJzIAru2syUICbsE8S0EYLI0GtrlkhhESxEGBwRRLSAVnZSdfaqMoreG1VjpthLUtvXhZY746YJTQIRGbujIzaCFQCBk2twB3oCHAIncyFiQShMO8O9A1FWVHECAwJCZEU3JuWgZi9L/X1PNzGM6M4BuD3dn8ERN7aMAjylcDDw66v+3/zy8FffzxyGxJhZ3cyJ6ipW2uecNWz4oc95EHTb5K5OCEzsvgnK3fDvDjlAiBtCCJ8KPgjgBORo2872SW/vqqYUIuAW+0DYCMfmam1jWDHwU9+fGdAdCJ7kVO6mjE8jLfItrELu8oS6QUBgdwSv4rIFXKQqCFzGsjjJOL398ot1XGqpQLyZrLVaJW/IJ1Vy/vHV1Xr3u2N/9WldDWGSGoiW1maCoQsRE2NQtqIlhVirAWVAYDd1XVULxMwcTLsQF/FAZOAK3jOO4jFkYFQvmbCUSpmrWY8wBF4QAuNZfBRzwNnImjpza8ZumYnIIwIzilNmCIQa4qRO5B5IASOnSIi+FFkrY8ccwTXgWKupogXVtpql4xsrGLrgplUWQDSXHHpViU/DHUcHabLZ7t1h+xu4QSAGIHfb/sjmasBuSoBGn9V4RNvzsA3+ENE3DhAj/b2JDD8v+h15Q4Ns1TY2gycCuvvWYfxcadz8d4rIhOig6GSmDkoERIxOasocdAOyA23dEWtCRO6GSBtEHRwJ2fQJiO5mm8PsCdH1r4+KtM6qJiIcSEGZCQgixUhZEaUWBEa24BgYS7VV8uOkOhVroTM6Hoevn33x4nDscxcir6W5qrmTiYqYrgC+tmKcY07XcY96HWZxFsSbIoc/ePtv+Ol9a4vVCkilrjEQAdW55NiF1Dsyopa6MITAbF4TpRhDW6sjmBs5qkLXHzkmM6+1hECcB2DTJrXW3XBtHlU1RFpVQuRSa2mlLCsDxT4+3++sigr2uyMFYPbpMo1zSd0up67vB+BEgcGdgw6hC8xQWnJ8+PC91nVdLiFmJERQ9HY4ppzRlak6MYeIcUhxxfv7x11H6eY4DOn2d+syyf18z6lDpgqCYSufg5gY+WU8MwREXurK0lLKqjavtbLB2ubzkmR6lvl5bG+HPezxcV1++O15XOsc4sBeOQrhVLQqaWQJcXWttQVAcF0XH2fsou16UrEOQhEXcGIUkWlpMRASE6qrEhFF2g9pmeYm5gjawjyLCQJCDGSgKg4E/BSXRAdAZykYQgbk6WKruRupSk6hzzEFWFQ3JyUi5hSANHIY+oNxVgCG4GgANnR8ydxCy4H2SCjSioERRxdp6GZGIVsIkomPeU+Egkj7q8tpOY/WESYCgjqN83G+5/01SjLwvwPcMT991f6jf/j67u6Hn73Y/cvfZRmS4MJpGO/HTlqdlt1+eL3rP366vci5LpeuP/5wfny+z5FhXBQI7y6P4LJMD7txeB72k5ech/N4iUN4vFyWtbjBza5/Ew//7N/+n2WxP/761bfvbzHs7r/5tV6qtEKosoq5IsDprvz4sKPO1yY//urm+w8P63jp0F/cHGWZg5rOY6zrH//R8/P3j1DdxBGAAkEzMI8cm5GaAzoRi4qqxj5slSBzhW0qbZt5wRABCRQFNho5kVRRRDcLYZe5m8bHfXeISD+6vrq7/K4b8lrldnoAU1Ovq8Q+PtyOh5RVpKgRwsOnx/11LNXH6TEGJkZhbbMx0VWXvM19DPXy0Fo5hJj6fizlMtfccc9c69rWcH3YtQqHYf8wjS6SmIYclqnOY3FxJwwxqKuLuQElGteTtLZ6dBUwF0BEL02E8zxeZBqPadfFbnc8PpwmMkzou/3hcpoPN89uP3ySueyGnQADEhDcXF1/eH+Xu8HRxPRhFQwZcrw/n80tEddatVSjo1NwrTLX1PPQEYUs1A7Xh/vTaXccKHEq3DvAeYJFPBsSMsaQQhfiIksMIeegaIf9rut3tx8+DP2z0O37Q0eBT8vDebl9cfNlqwjmzFDK+Xx/q+A8pl1IB6yXx/N0Pt/fP+4OV8fnHSPvhu7x/vL93e/I58dpYYjXVwfXsJTiRmrQpL398mXaZcpMCDk9uZ/MDIlqXTFEIAQmDEwApmpuqhs5DwkZcGNfbskhQNxY10aEbkDEqrZ5GJ7sI7Bx/oAji9vjND2WRfwYIucYAUgXBcIQyRxUzAlTny/ztOHXzSVyyCEbgGzHXkRmdiAKgGBECC5V5pdXz5dxEpHAydURycyaSIzJ1Z02jhe3VgotTGyuy3jph720RjHE0Jf1XJlCSIf+6vb20/Hq+eHq2Txfap3d7fDsht+9++4333eZP3z48Ad/9I/CX/7mV3/969e/9/pHr19++PW7EfiQ924KCOoSYnAkVXW1Oq39sAtIau7WgKBLudWW8g5dtAk4oYNRFDGkVGuRuhKomweIz2+ef/vuN0AR1JsDqasKIDkDUUDCFHOta60z0fDFyx/fnr7H6EOKc3MPHGOYL2Puh7nIep6M2v4Ql7nN5zOZ530HkXLeqRmHJO616n5/6Pvdu8dvCTIgAjIH/L1XP/n+47vhuAPDcRyfXx3v7u9/9PbVOC7zvNRRE/Ifvvn6dhr/6c9+jrSeT5NJ1Y6l1to8xnR8dnP76ePtp4fQd6HL53G5/Oqby7wyEAXW5ojdIXVgkLo8LdNlKjkjAJwfxpyQCYecFilMyVHBcBh2HOpa27fvPiJSiPzh8dR1nEP49DCie9cgpHDY72LK29dlyt3QDwwxXUFkun52M//wfvsUrOvkHFpT85rMMyIwNbKEFEIy89R1nGLxi9UxMSDV1i4G1rxI66sf1AGAIvQ5eJPpOsauxtlatIqGIUUCamBIiApqPpfagJBQUdW8TYZSlkqPM/Yp7g9ZAaNDB5CctaIDnldkQNTitUIoObWek1PBPAy74+Hll1cvfjYw8zKUS9lHi48XGAWR11W9uYupGQfQtgKzA3CkvOviJYjUabwwo7MJwPObm74fWnW2wrlMunZMxp7JBsBkNYkdhlildsd0fNlN341ApGIiWlrDAHJZ8iqhS4saLguzxTAoUbbRVoXTGR8nbBaZAey4i/urHtByn6lP6qLLrEudLpfz5f50us25H56/hF2Xu6Rybu0kPna5cxUOTJzGeRGvgIE7iodjoJAMl1ZFmjH7tiVyNHc3q9r0fDcg+7gwpEg7N3VfmxrlLBS29aq2WldxVxDw2oYrgl0ysqaqTWNMpalWQ3Fh7/I+5VTaUuoCwMu0sOAKxmxuCgbUJbVCkZuf80EpDvP5weZTHac+dCHuqY9q1QAbJaOcEDoO6qDrbNCxeWKH0hjRPBPtnM1hNjUKiAogIrWCV8aQU6ji0YNg3oZVStinzik1AceAyABkoATWlnU+XZ7eBW5m0srallF1bbZ03LkbYSVIgYN4TbkLHRsGRxRvUlZzkzK3yxgBmBxydk5aW6ul20KoJhQCpYC5426gmLqAnpMQGmEzkXE5fXgotxMZItB+11/f9PHYrdbUFEPQ87re3T+Paz+MHFXaBvIQN0Ik2lYXIKZoxMzMbpGBNg4ORlN2TiHoYa+36zq3FDUfae86kZFBQRDz1rbSCK4GTtmpAhmEylJxH78WZvHJxKOuAyL58Xr4ch5P83pudNoPDbzu6Uo0YNB939M6xuApArSyg06npWU8xoiiCOzEJjUQq3szQwcOCAjqQIjIrEaIjIZuYoi1tVH9VIVDTEUffjOGH+oekBhTWSHhQ+Xz7TQJImEwi44dcUSI4G6WmJnI1VQEXIGJiWtryIaAppISKwISmjdwZQgO4OiMjAjI0KOJ26YMjy6kxXTFhlAubtW8MVuzU0gR42U4YInyUNaxIAMHV/YYiERMn4ApxMgRIngjpadSFTmgmYGHgAidyg5AnWo0AY9A7AgOarCZ1wfH5xO9jteGmfropI5Ajk/ld30a4gAzuruDYXRdf/5Ff/dYLp4YjNkVnIBi2kJc2w3dN2SMuxGTqmzVTETccNNPuQ5AM0NEJm7attu8aCNEsWb+1DHabu2IbC6f5WVuJvh5miCqgKSuW6GJkdx1YxUTBvCnCMmGsUbeCMq4yaq2xDcAqCmiI4KqfPaqsSMQcy0zEahIEc4h17Lu952bR+b5UgKAEXCrewvPuyTSbs93X18N7jMgAwSRikzAqGBgHtGtFLM2RIioAtBMvDm7BzNFbupV9EDktUakJmqRRZV5I9DH6oaE0SREXjcUvfkux3UtxMZF9kOeVU10I5UhSWZyUQDAjYGA2oC8STOLMUzqFXCAGKAFlytiEQUgBtsxmQMqZKIQQnUlA+fruZ13CDlgVXbAgEGkIjoCOIKZI1MzVDGmp9y9uT6hFXBrcujfZcic2BAZSbQFIkBwB0QQq0+8cSbQ7Tys5vpUpXkynz0F8RHcTLdsPuDWEdsO0v5ERjcDgu35UTcHQ+SN9+RPcSMFsC2yhgjuSFtj8WlQSgD+9ANPobgNDebmCL5Fk/CzB/BpVFSrIG5fssEd97tnSAdOfYjDKmWZPnn9JG2c56rr0uZxOS9elGNAGBLxl89evsjxKiBpBeDoYi5lXYkMGdZ1neocu1QaOYXcdRAsD0MtI1H6/S9/apdb8gbm++N1WZW5CyGymoAThFY05kgIu27fqriaKZk3wG2c5v2wk1YjpRDTZtrJIQMaBmRGJdv1RzCSOh/2vYMjM6VkZSHmbn8EZOQgTt0hxNRL03F+DAFSt79Ke6SErs3aUsr11RVlTAla8+nSAtm6rtYkdx0G9wattbnMBCxmaGmbDOauq60u44QOZV50tXfvPqYeM+Tz7Ty3lq+S1wbuTB7Am4CJ5V1+tr9ptfbdgACBoLa1MjJ7Qb19uO/Qv7yKP32WZbo9f/wt8/rs+gDP+w/T8dSC7FihCmmVOvR5rOd5lRQjZY/BQ3Ax9QoKyERiIAZTc4/gql0K++seQN3UlRyAwQVh8mJkTFssjjySA6oKmm2mia2n4maERIgmLtUhBVkVkRzRzJlDVW1FnFgBMfLSSqAQEANYa3OiKwOqYA/TpR8SmLP4btfNtUFpxFENU85OKIQC1MdMHCrp5I22Bi1AbXXX7wF02Oe3L/L+amj6XW0iy1/uDi8YbpTQ4Km7qfq0PUDW8/38n/3n//zHb/cY8N2nuxevh/3AJN44ouO6roc+PdvRT75888233//kH7262advv//w/nz71fOX5jUFCuEqE376+M1Xr74qc7/Mc+RA5gyyO3Zu8Nt3H7/4w+Nffnr/F/+vb/7oxWH/Ztf9+Ce/+NtvP61A+65VGKcaAl8fd+P9hVE046lUV/QGjfjudO6jU+Ih9yPBX//q7usvr1InbRV5qm4jIW2f+I6jqjNh3WiRRMaOTFiJIIBARwMDi4uJAQAncgQxA3cCSsAIzcDXVp4Nu8f709Ad9hFaKdBx7sPj+fz29ZuPl4+HYW+Lzeta142oTSJweWzHZ8e5Lv0+l6WO58fDoR+NarHzZX5+DNqgaX52c/P9bz+SVgR0ophS6jvVEkNsqmor0QGRWhNmWsCZSJT7IT2Mj6QgrrVpH5IR9Gnv/aEuLWc6r483+8QxlKV585hiIc799XladUIxGs+XSKHf9+flktd96pJNLWC8TOu+R3AcJ0Xq3JBTKLUqpqvd1VQ151SXoubWrOuvc9wpllM5lTJ3gSQiM13Wy2Verq6Pc9HltFDVSymmjTgQA7jtjrulLmzkBjH2bbO4Qv744RTDIEgg7eG+hkBu9HL/tloU1at9N54fHCuHkPrBK704vD6dv4Mlvb89xdzPtyU/i72MbWKRUpb19as3D1NhhdOl2jrvrvrbu4sifvnTt+005cHrw2kVy+FzGdmfEKCOgEgqYk3BlAkBYmB0B3HbyswbQHF7EQE4IW8SWHJE981auA2JnnLRuGk+3FXXRcokpmaormqmBEBOa9UGlHMOrIbgYGDeBXaI7sAORYxyVtXEydUI0EWNnRGReF7bP/6H/8C8MRO6MgXCVGoFB/QU0YC1teqKKeaHh/dv3v5eaU+0Sg7hcrk/7J4P/fN5fSDCoT9eiV8u97njfe6ko48/3CVmN1rVvvv2bpovH/7f/+VPv/j6zRev/uKvfvlv/MPfv3l+9a/++s/+Sfc/CTlJaLVVxBRCV9YzOhsS6Nbpc6YoUpFATRVLjhw1uQZkXpeRfN8RtrrGGIy5iioFpAziTs5MtZbAiTDs82GqSwRW0Gbi7oLx9ds//O23vzReuYEmEFRdJTT1ZhXrbj886pxTxylhaX1MjgiuKVJASDk/Pq7M4frmOkF6vL1/fnN9mc5o2KeOTNfpnAy4akihuxp2iW++fAUGOfDLL18b+Kf39zFiR/jnf/1XzHDsdm1aQxemy+RIY3P/8CDTygQslaNGF1aMTMghhS4EYLRapVWhrIE4plBFxO3w4tnD3e1SlqWWlHJOV2W5C2Af3793YjCbVDiHFHCeW8aIq7Sm17uOmSiwmH1/d3vd55zivNR5KuhlqfXR/dP51314Whucp7HD0BR8iKIlh9ClEAyWWkOG6KhSoRWsIFoLVAoW0PuYqkIxdIwCNRBWV21rc3FrW3o9lOISz5M8VCkOs+NUlQFJBbvQAi5jG7hb3V3o6DF4dgu10ZroJg0xAa8tehRn5O40r1a04x7FOThRD+7g0UXYlXUyi0Ps+t3NtGrbSy1zxvhQi4BB6BSSQyRzNA2uhz4Jedv3p0WXas4pdeG6312lfZ92RrqD1O2NcS7rTJ0jtmAaSchWqyHGWIK/+umr7++XenZ34hhdhckZV59H4DCWktVIWsMpWYtXc5trnEY6z1fdDl1E5aoLCQGAS1H1lUOFFdfzpc3T6eNH8Xro9o4J8yAhoit5jbBzhaWt5NDa6sCEomBV52K7klOUCtSUfHGLiCnGYAzb15gKGFQDUmpt8tYApN8PWiXt9xVgWRQQ1jLN1cgQFNipPJyoBOqjcMfWbL1oqaSOCk1blzPnLjGUWqQULUWrFzJHR4PACKoQ3KolSlbu1e+8LL7WaCHh8bh/tiy/CxxcASmgEbkDWQRjUBRllF0Kg7tit3BeWyq+QOxCAjVrrYACb6MenxUMDajFTLu2MYEpOrAhhRzMmzkbc5mm6Bhi1M+KA6llM/yKioiKoSOwNxBzN/PMscOQqeuYeW1qIE5lGU/WVvTqxtZQULycjQKZmoM4phjMnTloYA/Rux5TwBQJmUE7Zs4Dmny6jEEw7/t8c0iHgzJz9GjpfLucvv1Aj++e/3jIQRnINWBwACMiMUNA5ti0qjUH4sDWjIGigrlQF8y4NKHolDXcVPHL/CiPFxnMCII0RYyMnRsR4oLS+jTs9lHudR4DhwMPWnOfrn6z/rUlBAoF3lv9dtEPaXim1ZmwH+I64ZFwXj6yjmQzYayrltVjJSh2k3YEYW2tAxI1M2cIKcSlzDkEdVDbsLOfbUgOycnNnGAt1TCsDcZZPSoaMiGLdO47pd8/PntXTqdF3IKZEqA0i0wxoZg5QiRGRCYwAFMkDOphFYicBNyQiaOjI7GRCaIBqxrShuNBhKzqsdundNP8ZBWgKpUV1wkboq1qykgCAF2nySktHrWZVXFEcm/uIl4dwYMzU3YIZuQtmjBEgugAAmZaFVQxFXNiuAnUu1SEhqxuZBs3iBTIkUj0UPwNfbHjHxV1YiKd/GkoxACMuC2tzN0c0NUMCYhfXoWvXoU/f+8RmR2WUlMMXZeQ/p7eiLTRhUHVnlCNZluPAcEAnIg+T7ZUXMFRRREBkYlCa/UzvGZrhG01NNi4xQ5PswC3TbTjiM5EZgAGsimxCBkjwHZ2AAdnCqYCvoWctm4oAJKrIvMTEVnNdeNLIjoQYK0t5Sy5ceIP91NEOM/Tq1fXjJRSdj23ps2sj3zoKXLbdZwoHpnJS2mS+z5QAAcFJ06A2kCBsTRIgdDJEaqbOYhBMfUUKabmRhzYralxjBM6xUhPndi0GK7miTwhLqYx5MCwtWrNOKfUMR1Al1qB41RbQwRkd2kuQAHNCji474kyGLbaER26fp4bkg05gVZmjgoijeNQpPUpAOJcC2PuY+ivX3wYx+t+aOODuSJGoqhoZpI4SVlSjLOqAjsSWDNtMfbuT8hJB4+BP8MVAP3p1IvuW4zI1BGcKT4NlrbHEJkAFZy2AY3bU5oMXHQDg20VRXB3JgBi31JFbqb69NjYU/5oC/hvY0QAACc1RXxS5m3UczNn5g16Zb41eRActtmTP53SQVWf0Ea2zT/hfzAq6nc74hQDciA0Qt8hX+fhmihHPsPyQEMex1mrYkyQWbDmFNypKDw/7l/u9s933VPpwJy0gUskZYRm3vfcCFTHtRZVXeYezGNOtTkrWCOO7MghbRBK5sBNGiKl3MWYkMjMVGpplSi4e6ABQJlDH/dTeailhBgNvJYKIeR+ICCR1cwRY+pT8CZ13R8Pjm2aLghYW80xMnPKO6ZIgUyaiNe2ouqQEwVYp3Vda+DETHnX7wOVddVFcze42bBLzWfMmCgsywoOyzLWWnfDIfV9WVcyr2tBxs1rB6L9rv/iR29uf6DbU3UaRM18cTO3LubU5axqweMOk9W11dOiE4QwTpfT6T3E8jjdYk6n6awA2sBDGiv/5n58zvXlocuI5e68WMvD7nXYndw/LKoIOcTEWHQKqgEQUAMxBEUX5CzSzpcaQQ+74WEtjXzotmYQAtCy1gb4/LpDlalKKSANUhet1vM4hZSr6jYoV9tsW2Tum4TFHTa0CZibOqA9sdscAD3nbKKuioRdTAqgSGCaKbXailbKKXUx9wMYDMeh9sO4TNKEGCKjm/YxOSFyiBxWwMZJZd0Rz2vZ73qEPF6mxP7lm/3Pf/aTD/ff9cP1h/ffL/K7o38/FW45NzemKKJMTxG7Z29f/cWff3df/PkX/fp4HmI+PZ7mxa53x9hBBXCnJuwj/emff+/RPv7N754d/OWLq5urw2VUrZ4GzmEQS8+uv1jVzstUq3AlIgyE7K6tOMsP9/X69as3L1//e//ev3n95U9++O79X33368P1/ptv7rDtRN3cIoQXr6/uLz98f757kxw9dLlrTZjizU3//bvbVjTm7uThviFEpEAIbqI5pbWUJg2NYkzNpCkYOoGLNEZUKYSEhGJGYMkN1ZlJmoTkCVldIofFBAFCjK1qJnhcl6tXw2xO4yU7BE4te97383LJgaN57vrbxwcTzX1W5Waacn4Yl6KSuONsVzQcc+pD9+njRbU5EpA18B8eZ+Hgiwhgj2RN1lpDDtakrh4xnU4TMKl5yN20Lsc+E0JZ1xSCSO1CjIm7vH/Q802X13GZ11M6HIauswoY0Q3G0+nt6y/C8EKByzS7aj7gbt9vU/dht1MRYne3aZwhwPO3b3/3/ftlPsWcSi1kQSq+eHGTeA/yQFp0OQuELvW74c39w/2z6wHpcKotpm4mffPsej6foKmZxsBlXp89f/7t33yjCIebXdfnWnQTzXZd38axiqX9/vjsWb1c6lxffPkSOc2zOsamDkqcOaJraSAAHFTm49VBLS7zfJrvGvsxp+PVMLUVQ//87dfz+2/qaWla932opwsQzut8WiVK3V0du9yn3M3nis29yLMX1++/fx9yekqY6tP5VFRFBNkNHBHsSW2GsAk6ntLOvrWpN5OsgAEyAAFaU9leOVvqVlW2BKyqAlLs8PqYxLpilSOpKyARIzIttUQy9oIQtGnXd9aaAKIpBZCmxWuX9sxRWt08ok00Ajmiody8Ob54NqxVRTQRGQFvuGwMZqrSkFFb45AQ7frm8P2HX3391e/JeTUTcNgNQ62jYUhpWMt5qetw3DnL6eFDjLvU5/2+b+v8+quXd/PlL/7il8t68jp/uLn9Z3/0s9cv3n7z3Q9v375+8fLtw6ePz754xTlGiHWuaotKzakPKTbRuNvXKm4gRQJT5qxSuTuYLtVPue+1Obk4VHAVV1UptX315dfTNBIHZGrSQkobJU08eHNDra0GMnMf8tX3H75d6+l4TKZw//Ag5K14H7s3Xz4fL+vl8jAMcbjqxmnWpjHHfr+7jDObL1qaaZeCG4CbYUspttL6bmimkdPx0Lemh8NLIrg8nN5+/fru7jHkYTzPj5dFOKvWdH31MM1ibs3AeQUwpLoIpXA47Nb7+5y7FQMCnaf2en81sN2fHpyNAcb7hyF3+Tiga+gCp9haPV9GQzfA+8vjuqw5p7zbLdP6OH7oA5rLs5vru9OYd7kWctJWW4ocEgUOXMkAHhfdQXB39Lgobcu4++Xy5vnNuNbqGCiGTRQNQIHaKosaInZ9FmlMVojM0iI2mzapLs0COUSxPOCeOLJg9pZCH5ocIjDDKrW4SyMJbrbm2CUV8KZrydiHZr1R3w05Z/DVMxphK8oL6mqO3HXEANG8I8o5RTBs1qUuWlpF0MxBKTITGnu375wDkje1pnr76Ttt9WZ/XQIsj9N5XB+mskqcVm3KHjDl3WrhsfhgktDBtQtpYD3s+mVRq1ScU8pDv9/HHhrVVawiegQMGFPDGkMHqmWdI/PpUnhHemTqOezT+LCYAQVnRBUNReFxmmuqbpLCdRzWRYnEV1GpUiBAP/Qes4mUyFovi0m3UnuwGhPH1eXxUWQttcQArN6lZBwAu32fiMC9TfWsniYpbq3vE7npak1taSIHBvJgOIQIgEiRVUkloM+1TG3qaUAaAgMBtDKjisUu3Bwsd0SOuLapNLMK6E1lqtCcYtaRu12PHdQyzuNdXZbWageh2yfuonJYhXPqyrqq2LrUiwtHziGDcy1riIkdjUVwqa21skqx7vACuzS30RzAse87yTlyZpMq65DJCSLzEMNxiP0xXSaFgg5oEhFAcbuugIGxmdXFdK42EaaAh7acPWDCgBZMQNwxEnJACkAh5c4ATKvWp/tBqc2rUiBMPXEicJFCaK2uMTgCcdzF/kaQGljLOi0XWR7n8d7rnI2CJgIKFhEdQoiMTTXkfm0C0qo5xkRSAzhKSogUgNBImq+jLSey1TxTpv5qKCbaTHOwsY0/fO/jr3/8Nvb9Sb2hABM7iLsjeGQCq5v/h4Gqg0NA8k15k9xLUydgzmKm4KHDcCVTbbomUCZnjEc0kFJ6S9WLHFh6uJze9fKYFd26BDrMl8dfvUtv9FxWibp2Ywprg29VRm1L9DqtoHWNgtBRXSYoY+pfhuGqyV3eRTNk8s295MwhkVRBx7lWRwK3phZSBgUmAlV3R1UAU1NRN/fmUhU8hdXrgh4D1+ZztQezd/NHQIWQwB3B2I0JCUxEkKk1oYCbkJsQiJw5OgVxM1WOycGbGhE1tcpGCUMgZGcOhmgiREAp5J1e8tzWkixhQ7ZOrYAXgMbbUImtcKhRJeJiMCmsxsTMJsS4RVS2vxoBi2F1FOPIKXgUU2VCVXOQEFAt2XwdBZwXwAZA7JG9NQFHA1agXuyl3lx3fyTxpSZR+AhkDATIZrR5NojZvACIA2+YcIQc0L867v/779aGJAl2N/n59ZBjGPo9fP4x2e73ZGZEtPXuAYBgS2oYAmwr502yZuBhG3iYibbPqWtCpA1M42BE6G5PgSR3IjYXBdv+LmabpJiaV2QG2GxwW7IECZ6qTKayCbY2u5kDMkd3dTcE2jRYRIQIKgURXbewkqqsIhq6tIquVdiIsJo/bZqL6drwSH4d6FL1w2yv9unYwyQ1JKoi7rxI6wnQSY0WhB2jqBbxEGICWMEyp0VbzsO4tpcB2CS4ulJIqdTa5xhUGSORb946tHoIYLIaaExhGPL3jW8Xuarzz3p8NTAl+zjDh2ZmwIHRfZtfdMQIrK0euhgBprWFViKRtbLr2KREiNkZ0YpWR1cxQOhSdPDL5R6HTwqt1pY5QER1AINaBdxVhNBdhR2aWpVKZE00mgGgqsawNchgGxQRkJp9ZgD5Zw8du7u7ISARu5uqEaG4PrXCEB3R1DdJnrttokff/GuI7uZmSFu/DOnvGUYICFuW7Skc9FQLwA1cBYjbnGhrv5n53yGQzAwwAOHG8tpUfYCwefSabuPpzUL4r42Kjlcv1KnrE4WozREIPZKvIBeTEWBsbSZGjLlKKxa470F5bR733Zdff3HdM3jrc445A2GtxJwU94q8NB2nUVs1ta6jWmoTbVrP6wPHSG4fHn53uP6TtgQACZtZB5RiCIFFVLWhATJShBQzcSZkbaW0ogpLvRdbY8jLInnYU2B1MNmkPJs7Iay1OcsG/e7yzjGCbtAvA6+mIGuJmR0cEciBIpnoslTm2A0ppI5jzH0CqWUtMfSurus6G1oru92uNWvSYkzEIbE7WOrzcDgETJEDEhBRLRe+vvnw4eO0TGr1yx9fXw/9/W/+8kc/Zw793ePjNJ1deVzKp/O8qjgUZJ/LLBjE/LKWikVJH29POYEDqLmU8rEymV0TDWs9Uv36wLvOrH7qcEI8QKZHYnTVul6HTnwxRwGdFzHx2tCNVPGYmF2IGwZN6JEgEN8/NjUO2AV3XbRpiSkFCEUcjGsLtPVuE6SQ69roCV9CrkJMMQXfQpCiCoaB1IyQwEHdHXBdNUVUc3SkTYhoEEIKsVNyJxCELqWhH8Axxz5z5q/n+pv3CUNiZPBxGSGnISaFBuSH7qbDQZcJIwFBCN73sU85UHm4/61WOF3Y225/oPH8Z3Hfkx5j2mNIYkAk26fgT//0z9KOH8b2L/7yw//0H//oIsvt/Xh9uNZ6McplLArMxIDOKSoJqbrRp4fHxEQ5XSxoyBHkdC6r6/6Yc5/rTCSIATmG0/nyhz959h/9+/+jb37z/sWz/S+//f4/+0//7IeP/3VrOBr9L//Df3B/9y/uLtAPGczHaUJD8/V/9yf/87/6829nJXPjlBTNypRsutpdK/vzV8PL5+Ev/tUMAuhISGK+XcrZKSBRREUzRDMP4IG5tZa2ZQtCNe8JgMiAOCQDctgKq+BMzSAEduTU95/Gb9+k3SrSXe3NoJzXMVoXjz//6ut/efmXrdaybGcYaGImxh08fzksKA3wPM7sTim8Ga6++dUPAWKXco7YHYbv7sYQQ7H2ozdvv//uAxDWYrna9c1wms7RiR2tKQU8HMKQHB36yOuyFZ+UvKYUJAYHO8RYlnsBO1537tXcmvk6I7Z0lffnUWprqdPnb1+WuZA3JGitEe2+fHv1za9+e/XyJl/tdFrntv7q/QdiwMw3b1/eXy4EKS46V5E0DX3cxf7dw7d9fl7ZQ0zNy1TNwIfd1e1S6Xp/brauIlXnxwI5MoW1tmJVqmNPqhY4TOPUd7vzuJKHm5dvwMr04YfxVPrcg4SlTSFxoHx+mB0t78L+Zr/L6Xx7mlol9oCuWl0Lh/T61fP3735Hff7Jm5/+zS9/Nd1+qnXNA131w/Pr3Q/vTgyKQWi3hxluLysAH/e7cZ57zinyp7u7fkiIT/i6GMJSGzEzIKhzDsxsYm5upkRPJWlHROQNSvZE3HMj2CQMstkqibZtiftTzxEcnBjNrK2yj2FBnyapzPsdDhmlNkFpaq7WRFMMnwPoVmqLCIzGHKIxuKkpM7oaUOBAQAgE47y8uL652ffzeOljqKWtWq7iFZApGDhwCKql7wYnc5cYQrmcl3Hd7Q5rnU3ExGMOZV3Y+113nOs81pJioP54ephSzuO0PL5/f/d4W+bH6yto0nK3//DD7X/yn/yn/85/+B/cPdJvvn/3+rq/fHyEW3r26pU0BUUzs2aCjZkv48OzfkBwVeUcwckEwGhaF4qEwiFkmRbqWNDFmrS6aFubHLvDw8O9qRFhq80iWnQQj9Kud88u9SFQKlXAWh6srIIY1lXdQdTTfkBWpG5e9TQ+Bu6alMeH+5vjsyKLNq1rM2057gIhEeyGPStUq69e7e8fzzHsj4fr24fbnLKJ5D51MULg65fPPn46FYnj3QxINzc3tdUyLSPMV0Pu+9hijGnwlNhjWxYEXOalZxpPj32/vxtnLXK5nBao5n6+rIdnB0+hgGcD7iMBT5PUKo4ccrq9/biWOVJaSuu7nhGO+xTYpwIe8dXLXS319fE6EDzcP6Su63J8uD29uN5fXXXvbiet9fEy7Yfh6mp//3DfpxhCera/unt4NMVV7Plht30KhuO14EyBvfdm9TB0gCoNR8DZ4bwUAmAV9lBtidg7hYZQXMjd11OfcQjkPqNrnwbvd2NZ+uGGgDt31+qQWfnZno/9FYjmkImLsK0mMAhfAakSeVUTBKbt6xIwOKE1dENVKyGG/RCsaGLvdl2OkBiHfYfYVlmWIpdL+V6+63KXGKZ1GtXvpjPEAUN2MVMnbDqfJHSHDKgVBE0kIliAkkSUm+gyymLWpAHmtcLpvM66KFYmYLEkmKSrM0gJzWgRiQEPX18/PqiN1dwxho5CXYW5ECAe0gpwErsm9Nota4lxZ1bdzjGGwG6itaxSZJ7aZDBvcETx7IKoFnxxXcA8UN/vMOxjK8wISBUCJbV1ch8EEGzhzEPcH3ZDII+JXbWaDNwHSuBtE55Eh6XI6qWazoFUxabK1tyFvTGIc3Qprs20ceB51mmt1jw6xS4u44KLuI5UxgBY3JViMXLgLickWse5FS2rLqINQMSQ1VNwiAj90O+Ca2nNlVwCxw67Pe5SYIkrXZYac4r7IXDHZQkD54Cr1TTsc44cAxH1UCB7nTzMAOJgxqIqTSSUVshJpIFY4Op44aSq4IoUUVvEfOXgdR0DmKkFZPDWdTl13dP1IHbcZ+5SBixlXcvq472UBsgAhkghBOIYh169NlnGsj4+3uk6o0oUu+bYRUJCcpTWFEPqOooZEM1VHWApvrjOlXNHrYU+NzZA13Gspwu2QsRsgrIqhWq+XLTc3sv5Vy+O883zHUcmDwEJwbec5kYaAlBASjGtVbfFvoMDY2YOS9UcjFVATcXRUdseuT23hiXBldW4FNlJkNJUmtDcHV9beU/nT4fNyr20IRA1eVO6Ez9f5ZsWyHmnAFVYkQIlUjJLGq4fi++OB4EYGzWPJpXAkDwTRMQhsFuyUkPMMQ1Wi2sjDu5G4CCmtl0m3W1zQmEjUICmJq4pB6giKka4mKtjiFRkW8lGMky0jRUcEJzQAwKCmJkiMxGGGBFcwB0dDBCZNiLo07yR0YNzghDdUJmYndWQODT3uX0/r4XjswAHhICQOOxUzEAQDAjFVRE0YiM1M1UGC2iOhgxERAbo6mbgFAyDIgpB8UhmAoEAMzESOMTe5XWUPVU1QOoBVb2pU+DUjIwZyPqxfNm96vqXMlx5JKgl+AWsgRtCchMkd9u0ZogUEAM4mjUO6UdfDj+/LL+91P1u/wc/un7xbIg5gz5V8h0ACZ8sVQDuujGtn5ZlGxPIEXGDBW1X/+2i8zRpIg4ArtrcNrssEGzIdXZTRGzaiCIAbBkTA1CTJ+QRbzQZV1UmRHQCFDN3Rzcm8r8LlaC6gW1Xf0AzJSLdkkWqyOTgKYXaWkqYE4q0JnTY94RooOK6rfkRyRDGZtdkN4kr0ke159xLu2TMJhop1OYR0V2GmE7VGUKk6LISQHCLCDsiMdfWjMu982weFECNQ3xqiSJGc9WKCBwCMcamKfa3dWo5XdAeW13QFOrLQ/fFge9vf3jWXWmkRe1iqGY5UEAqTXMItWoKhAilaUypWhsoXO2SlnWPgTwszXIKKUYiZI6mWtViIC4LjQ/X+8HWBo7kpm6BApOLqYPlHEAxOnDI6uyARMGd3JCInyY4rptT2xF4+wtuZCj/rHNBE3vqJMKGCGJyB1EDAjAlpED0BDU3AAd1JSR3cBNEcnDaMOnblBAYXNVtSwYhEgLZ9m8CAEd7ekoJkdxxi6sB+vZNuT1mW6ltM6whbQ8kOhq4m8lTJc3/hwW0od8jJUYU8Nihy1LWj1LqOD40LwhQy1yr5ZRySyluoTIY+vji6vgs8Y55lzqOkYBzyDvsCC3l/VTri91QdvWH999e8GGVWdT0qWLHKTMxpT5O4wrFODomGMezlDXFxAERQaqiUe5z6iNtAz33ECPFTt3rOrnC4bhHiKJARE7AHDmE7ddl5NCxgyD2iUhUIfUg9TKOL25eOgRzENdx3fCaNvS7dV1CYDWKmZGChqgAbVpRGzCDg7Q6XB+tae6P/bDvQLK3UjSmPrgjYa0+Tp8ezg+/+fabupxUYZnLuq6EuOujLuPl4dGkaB3LPKNaM2N0CugEeYjdVRdiYOY0MCG6mrWGZK2tQKbNpam7DLvUVIr56lzUC2Mb5SjLyz1XWZdWnSLmoKXpuhwSqqwcdoBs4A6QsmP1RLgJduaymjIgtgKGHoAROBAygKt0XUxD74JgUqq4e+56c21VTJW2b013M6WN2SUthGDkIW6LEyNwJiNiFQ1bM0SVA3EgM1MzR2xVW6juHlLqdtx3rOua+8NyKUPCngIQq0JRyX3ocugPQ8QQ0QRMrVRV1RYo1SJodnM9IFkIcZ24+i7AQFZdpj4uHV6Mw2QkDQz/fpOcff36J89Od+O82C9+/UOXPTtzc7CUh2Fe50hJ1QgxRSpiXQxBPTBH9AKlVIYA6JaiXB4+vLn6vW7fr49nM58vpePwh7/3I63T//O//Oa7D9N+97isE0KI/W5ZR+fuv/i//mk7G4RoSG4lR5imhnb45pfvvG51HlqKjtOyR/ryi+vjYf+LXz589faFrxUZMAaZqhJYEzcjDuhkCK1JDpG2d5kBGpoaB26m5NuribbLJ28aKxBibKIiRoQmgh5c281xb0XmqTWlq106r3VHrGX+V7/4V33ORVd1i10ANkMwcWfEXrSIuwcgBzOx337/MXGYpmopPs5ISw3G0cPuik/1kTJSg0AIra2zoLY+D4JeW2UiRLx/fHz7/FVtJmoCthtyrZ44aZFaLodjl0I+YQGH3W64vT9Hxuvj8/PddD+d0y5pk5CTsj1ebl9e3WhzJ7qcLm1ZQmQ0XWp5OD/ud11b5Pq4T7u0rM7d/s2rF7/+y98Wl+eHXi/Tp9s5UM8hcOgf11vncjzenB6WFMMhBMwpcvgwn7JTTH1V7bh7uHt/udxeHV9QChTCeBqJrO/2TGkfmUHG8dP9p0fw/ub4csjDOJ6Ak4l0/VClPru+/nD3PVQ5PYyhP+4O12KFImbIgP79+w+utIy1jD8cIp/vbt+8ujldTm9vrkub391+uN7vkuJPfvKTX/3iFxSTQOiOx/M8Lud5HknAd8+O1p6sN2YecqpanCKnJ4OeqzJxiHFDmCPithhBcFcF3nZcbBu88SlTjRvPbzsMbe+tDShPRBSRa73qs9Pw4TSmIWZzAFtLW5ukLnJAJHK3pUo1DwEDccxpmidA3r4ziJmYt3UduJu5VNnF3KSCYwix1FpkBDuGEDHZeplzToBbq5wUVJp//eWP//Zv//Ln/+B/zB7N1FUoEIWwtCVw18XduD6mGMHwxYuX7z68H8fym198n3eml3l+uO+HjgD3+/7bb775b/75f/1v/8mfzOP6ePe4jjWl1b2Y+6qy73fk0QEVyJnQBVxrnVMXKdFlehjSMdBQl2U3DADk5LGLzpI7HlfpQ6og0cC1drvojM3VwbrUsXd1wa9ef/UvfvltzntmwEgupq3E3IUoaBq5qx61StdDQI2R3766ef/Dx2HYZ2bsOjdtgjHeEFhMfNz187TW0q5vdg/3jyEmU/10d5dCKEutc0tdyDfdh/cPosKRUXGdSgNtnhiDCJzmtQsdEQSOZRpj7Uzg+vrm+rq7f7xjunqn71otQyTuBnFHCm0aX9xcT2tBAIzwcHcb993lvOzT/nAYbh9vobYco0vi0BHzPvcRqqP0OSQqUpbcx91+9/HjiYgIgyuQwZdvX69rWbyLURnlx1+9voxL8Ha973ddvj/N7x7fD0POEGttXX66HhDhi1cHs0apKiQCohCjoF/8pM1CeKwVxVzFTZixKZ6rphQy+g6VgRyQI3ahW6oWLdztDFITR9OIQ+zCup65c6bco3u1HBNknttaV3VVSsAhiEggHnImc0asRbCLznEpqmL+1JFo2WUHtMdMog9nnatNxS5WdixRIfWsUpbpLImqSrXLoR92MbguHLWACQgGZCmskTh3sbseWjM9XSyjqayPs6nzWsd5nNo6qi4pyD5RIgODywJlNq8OTirOh+S7EJ53oI2crVpwAqR6WZkw76PF5IChSyIWMFqM1b27eb6aCphGpj4RT36RgB7RHEFdKSIj0WQxpOt+n4HYENRSjH0IgZkDzq1g16cIhJCpzfPETIeEEZut546qBVuqOMmQh5jCOM9AuTQbHyvXFojmeclSoV2m+zJc553tRYU8aAUTa4v6qtG4OaoIlhXBXEeiRlbR7HrI7jTVuVQ+XZQ8ZDMtjQnRLeK2vBVrEAKSK7u6ttqKAeSckQeOkYm0SSS4OuxajM48RM7ACBAD9TEBY0LUVTyGABhQE2vxQlJ9rS4FW8PabJkEVtMFrHpA8znI6ohuoBqAMjORB3YFR44dNo8xdn02fboeDPsbDAnJzatXLetULpMupxAg9Fl1MauOJqqCUGtpuoIXBAFzMAo5pxgCQylVQRNnpMTh4GBqq4mJrEwE0gzUvWEJ2CfsghOqOhNHxpQAE2gGmcv6cPJP715l+fJ1z6yoEhgBzcGJ0A0BzWDTSLMqAqQNAuKbztMDWChNIDUhUWnouw6SyQy2hCG1ZV2hNoSIwBFAJDvspsfp7vs8dwgH42tAW8oUION7ONx03Q6qzAn2HcdO4zKdnvFrMru0EkhzxvHxV8iWE2hbdyGymbbWh6zFammZt3SJ1uZsEsBdGnMgZnPdFvoGpuBAWKUZQDMXAMOAQENgJGLVatoYEwcIsDZDctyuqfY0yHDbIBFaVQgoEBVTMtqAJYSIsJF/EN3ElInVXdEdncITtYqRiLmpLNDeLd/ejd/3+foYfnyTfgo1ODgEJs3utYG0oEK2+tKsITAAZYxqCibSGjISR3QGU6K05ac2qm5kJDAHNEOAoA0PiW8CibHYhlkBBEVgVQCg2Lgz+TG+vsI31B9aHy32TNdQPjGCODoggAKQmSE4h2RmvDnNgQwYTP/xjw79OcLu+YtX1xm06/ep+5wq+hysMN3M9LjVxDb4tLj4VjVCh8/KdAfQLQuGiABP6aHNCeufW2hPWRQ3AKawUWocwEzx88/fhb+2cZUDwtPKDdx9E8W5m7kSupoCsNuGAtgGC/D0i/v2rzVAUAAxYwqBQkIOhF2OZSnaKpozIgCqeaTkVkg0qrinD4/TEPl+XrouEiBtJJfApQgz19JEgV0NXMQjh+03JLMEBkwVAHkD0lgTaQBjK+Tu0tRxQ0ERgJY1Mc9VjCE0+f2eykDPtc0P48vdAWoZLKXWArIDoJoRBCQyRwdRWwBALQBUZSZemgREUSEgRzfH0laEDTKE4MboA/s63e66LxYD5LQuIyFRiO6rbHNnpG2somVlQjf5bLEzVQX0LR3GmIhYbeObCaA7OBJvfrGnVBmSGbjrFvhycGLeHg8iNhN3/Qxt2LTBsGG8EMndng7VDmb6NMHEjfQLG3zawf2JnWWbUw8+Vy4BDBAYCcCAWKSZOyGbO26pI0AANzU3QybaRGlbAO5fHxWlrqfQIbjVwqSqxeW0lnFtJySa52YAu30flBvnfhmTYmcU6/jF8XCdY48YuSMEN4SmWmckXucFDX1d3aRzXMWrhiEeLvUkqmjSVozsp/Nv5/OcMElry9xijDklEa9qu12fsqtBSBnj0/9TJoZNyEHW769FWi3VrAAxMe27nSPHHJs7SjVpCEzOSETAwZnAisr1/vm6OqE3EFEPlDl3ues4pBxLiKnfHwCkacOQEVnmERpVre5Fsc2nas1bXW/dpulumi9rdTNZlgdmUQy1zl3f73c7HDoFrm24ezivbfn243dtHQPpKu3xcSyzZCJkqtL6Lhh6B7CLjFBzCrlPpVUHJ7OlrNNlqgCRPQEyeZLGZh0TMABSVf8o8WGxU7VII0caup6aE+SL0KyFmK8DBsAgy75P2OTMaAKtacqb24zAUcynVZijAS5qMehuYIVquiJGCuYN5mrW1i5Q4jTN6zbu5BiJyU22Ki6Qu2iKtJ13AYAQmghHcG/NPOeIYIyIiMxBVAlR3YYcy7KqG0Prdlc2Lf3Vlbu4JaQEzXKOhJRzhEDjPB+6QxdiraOQY8yE6X6thkT39WrgIYX5vHxs6xfXL6Xtyu10/Xzo+vVcfhO6HwFcIZqO5+1T8Hi+WPVDx+rhYdE//uLL5Ycf1tJUsXi+2vesvi6Nm7lzM1dnjmm8TEU0dpoH7Disl9INx2P/bBoni5p7BECxQDH/97+4/fr1czg9//W9D+t6PDxbL2tsvlb8dD/u9gEIY7SvX79ezh/WVhfBdcSgps33XSxG61JvrnazrI+z7q5pt+tv35WH5d1+/+VpWjkGVVHTDXamrs3aht13QnNjBAI0AHBCUEA08M1e54y1tKCRgBhNN30VAUeKHYuuHYe2Onkuq5ytLYFEDGsryxwPV7WauCARqGOg2MX9TX/zYvf427sudQGDI56WCRzVPGfqsw053z7ePd+9MPShp0Of/vZxIQrMSBHdra6wuphaFQmV+zjsws3ppNf7rsrF2IoaxtTM0P1q13WpK2IZaVnrOtWMoevz3E6GeNx13uHDuNRp6Xdpv9svS3WCwG6lcpfn6s9CSlT3Q4cBpeLagImaOmK5//43iayp5hhWEPZwfX39ME9vr54XrTnkMtfSREBC7lJdT3fvn8U9h3Re29WLK5hXqWsMOSRWleBxQxA3r0M6pKDL+U7Edjc7aSGkdW0WY+hynurMMaOKrCspmdPL1y8up6lNS/W13131/aGVB2CPXXdzs/vmu1/87Md/eG4qdT1Q//hxvMxTN1x7xrDQu2+/veq5XJZS4dOnh64LbdVarYCdx7HN6/YpaE0BEYxAvc1LvhrQN8uHPiH33FwJn95njkTba2aLqhLxZ4ikgZnj09Boe2uB+1byJ8S+S0N09XA56z5xTuicprYCUARSVWktJCaTIfbBnQzAHIk3UYduy093IHNVYnJHNX92PUhbEBEQY+p7QzAjFeQwDDuw5qoAFPOAWtQqEf34iy++//aXP/nDP/bFfdvEOi5lVWq73XWf9lrKfBmny6cRZZK12+PuENUOP/vjn/3NDw8ffnj3ezf7mMJf/9l/+2a/f/vV2/t2KWsrS737dPvmx1/W+sgdqfH4eMm7PUHv6DFwgcDI4Jjz8XOOObghEQVMbZHj4QAhr6JgcIjpqxcvf/vu1zFz3x/KalIrNG9lEuXT/IkCi4mjEoRD96Is79EFHEXUilHO//Qf/dO//fVfTaN55ctpIhUCZGLQioB93u26m7vH3/WJp1IoEAqJsnq3TDAgtHVK+z1jMJDTeT7NC6jllEKK7VJyis+vr5ZpGfb9kPKLF6+CEVoFstc3rzkdP/7wwzwt7TJboN//yc9/++4jd9RLDYRjqchkimC0rFVru7rqVins+0P3bF6XermknNwlhcSmw+6KoPNyAbCmcJlajHlyRnEy3O16Va2M/XB1mh4fl0uXgo6zS+1TyAk8AhVRw3fn81XfjWVFCueHh5ur65vj1dOtIECJ6GW9TkAhAES1ugN729M1dcm5cZY4qFvWlgRBbUGeVBqCUoNVDo4J0lhXgdw0AGWMQ0OHQtqkuWDuHaIUZajEAB6qkxMNu87LAuIOoUuZApCNbsAW9/GwKJbNUE6htMYAMVDEGA28FbC4XnQqfLfC2XzOzuC2rjlF5Jt5LiAzuZquNRERUJ8FdF7rtPIV4yFxUghAR8oV1yqzIZ7nmpkl0NL01Exl7UGTr1F0gDAzPTBq34FVbxICaZEaLbze6TzKRVG9mroCUdBmXhU6p8xLhNAlAS9olpIjKDjkbLKztohzCOBtVWkeia9jQrC6HAilECihe6vqWPs+omPHGbDLXBGgC4CKIKvuu5Q0RWVaQRbTta6CJVWRpQfnjJjFKkMIqrgUV/N1OY/nwAAQVrAFp+OeAjkZWq1B0cuElqSwC9XS8r4nQgjklkGLkfk6RkdY2jJx5IzTBF5dFc1yCA7MAG4SKHU5RCYFcFJFzBycKHVZOfbp2KqQAQYKISTy4AruMfQcuLaCKIxk7qW10iooMTbGYnKWcmFTb01kUVNzaS4IZMhRKqEBx22xrMtHYAKLCIE47W4OlRAiZ3i6HhxvbgxY18fx9uP06V0d5zotTiRikZAVMCQIGfQYU5RW3BpTU52xxggDOlEgJMKg5K5mbKY24zbgoEAcGBS8USAzRdtsTcBMkYKrg8uQum4YCjXsw84d3V9d5YAVnMJWQzBxMiZyBLPqHoh7NDAHJgJz9LCVM5INWZuX2SKBeRAFa6a8U19aIfAmrUmPGCbVY0BAJcHxww8ZhhheUSForHMzQnXEYv1jt+sGD2hN6/oQQXaYc1tdadf5SrWNH2469UWvILE6LeugxgjFK/XX6dC7VC0zODg5UVJBkZJwCxWJOURicFAQM1cMYtbMBII6iBs5RoAOGcybeXFNgImBmVyfljoUkpkyITMjkAIEIpFWCRkxhwjoYsrk5hg8EsbEShAQGKyANhODQEQREZxFyc9JPjUpiGiPgJTis353TZhtKs5kZA1ry1Ziq+jGvaqxOpqQqwNg7IEJmN2cEhH2vYKaNBcwD4HQkDi4YVUYAu+pqJYZZce9K6BToK0dgwxdX/HY9FX6isNB2aljYGS4onoAWZ+sUxAR2U2YbONSqwhhcnNoCIgvcmpXw3s4QHpFHVOKf49pAVDdQEAEbvB0gOaNUO0OW64a/Em/QUwMQUwICRHNFJ8IRIBACKgu4Lh5PrZ6ESEjkLqpCj2NFgAczLdDjiFCQDIDVTEwc2BO0qpvEZ1Nyg6AuB3vQd3cN9cbueHWZnJAFTEFJt7uazkH9g197SnkUUcCKABD4KWuhxdp2Id4qdJkvx8exzl3PZOJmEIQdXKspVnIDkEcM0UCDcTmaEDiximKKSKYbQwdYwIxU8ZKLFUG5oLYAHcBouMQO5kuQwou0jkMZXX3fU6XiqKhZ5NWr4lHdOTgKk2NEKSpM3BgAo3I5mYxLEBR9RjJzUMMWqsRwyZDAXaHSEgOrSrCJHcfYsjN0bljBCaMBA0sxdRaZSckrs1dANG1CaMgICIDKAG7g6sYPInDfAt52efImTsgbRGfzeKNiG7o7khGFMzNTDfHkrkREWHczC28NcieHkJz90DBwbdwnD0RhrYhJX4eViK4brktN3VTRIYn7pmDO7puVUz8/DESkW1QCw7MQU22//bpPP+vj4ooJDNgpBiS6FJrvYyParP56h6BiDkDxMjdfhh2V8XnJYHdZL8K1hERYGsNVJkCcEo5oYoaMPnl8VORpqq6nmttFPeqea2SQ4qBxVfs/SSfvnz9D1mHqpITsEHe9YgAralLDEPTxrED95jcTQgpYDBvVdacwulSnOjly+fmFjjUJmVaVpGhS12/a6LaSB3mc5lPFwourSpAcDap8bDLu+cxZl2X5bReLp8A6v39PQcOJOt0b9aQ7fR4z+B1vd8d9HCVWvXLVBoyGCFg1ZJ2u7UUqTV28fnLtz3Euky/++a7j7f3Dfx0We7vx+KyrK2JdZGXIrW5OyREcyeCPDcm6HfxeaAukzlsazNiMlWtWFZeDLoBwdWJBAnBuoTAHkGQ8KLmSpMDuwWxukzFeV3qqJ53yU0XEiSkDhSLuTlHDmSqOwaPcLd4E4vuRhgCFLOUkAJyhoDspk1d3IGDKRBxNS9rDRRtm3GbY9gG3tblQGi8Yfld0QUANlheSrGJODC4V/GnLMB29GOsig8XccX1sq6r1Z0EmnaoighlkSqdgCZIOTF716U+ZG9W6lq1YgwE2IVw3dG46ljrzTGFiBT8R/v9UirpwOOrdSU8v++fJZJB0C6Tfk5bg0N6eKxdPzx72T08zh8ezykRWDVMtRUwdQB1BSOppsyieHtaYrZ+8J/97OX9bfnhN1P03BZXixJjPu4/PbxfS/mnP//xX3/37T/7P/w7/7f/4i8fPv7u7U9f/cf/+//16XH5P/0f/y+95JjDi9dHTPTp8RybvP/0vkvUEF68eb7ejff3F0AEthhxNxCyQgcN+JvfnNy6T239X/yv/q3//P/83+30aCbuHpnNvDUZUt7EivbZPOVPFBqo0hBZzQR9aYJuJkbMQKgK4K7NELmZ9gGYrJ7Xr7/84le/+u3++ur5m+NfvJ8OVy8+fbwlQEr9WFUJh2GPgW4/Pj4/7i/TSsEf76cOQy3azFKXtFagoExNwYsi12N/he7ztDDH2SwwqoGogkBiYgzFDLfDDgdpxQSqFOKSOzQITUhIX3SD1PNx/+JS58tUEVCJ3AmAQkyQwjjPYkIV9sesRlIlx1haebxcbm6OxCTmL26Oda3nyxQjQ4iucPPiTVtHn9c3r5//8MP3OfR1Gttd6rFb5iX3iTk9nu6Pu6MDPD6ci0rquldXb+4//DZzXGU1BTUCiGO5Pz8+RI4AVpZl6Af28Pbl17/64Rvqe0yyNNvfHN/d/cA8CLkAIIXE/ahLimG333389Cn1WdxTzrN8fHl4piqy1vvTBai+evvm/u7jq2fPv/zqS2lruSygdcjD/mr//rRw7p69fjZ/OJeKV6+eLUnK3fnj7fuvXl1Ba+h+82w/jfXJAwtATNIs5zhNTUTjhuJz27ZqRGxuSEjI4OTuRAjg5oBEAGBuTxxrQtu2N4gbf2GToZnDJolAMHU11cMuoIpZQAQGyphUAADXWskoxggEsc+6LKUWM8HtKSVCotqqGzGTEas0kebmbs6EUhvHgCHMrWUK1swNAjPFiEAbfRIAm7SQY5/z3/zqb7746scqaCCO3veDuhYtVi2mBID/3z/70+OXz8URe1pEQt/93s9/OtHvlmU5vrnerTL/4t2f/dmfhd3xq5/8wbvf/jc/vLs/rF13HGKKp9N91/XGoZokAhFRt5RybZXBiEFMTBqnsEqDKoSIZLVVNVwaVsNn14cP5/eC0OfhNK6OISZ0MbM67Ib7y10I2W2b7iaRJQZVLY/T9Htvfnx/ey/W7u6/O+z6pi2kCK77/VUT72MXjnkti4KP48ecIzAsSw3MFOm721vquj/40U/HD+/yrvcEhtiK910/Tpe+35Wmb26eeWh1mnc5Q9+vdZ207frONYznebUpdnh3f7mczwm5o9Sg/Ff/7f9DvJpzJFKxZZr7Xff8+vAwjVO9/OjV62U5f/2jN4/Tcnu6D4hUY6JuvMwhcw5J21J0CQTH3VBKaWKKZKFbwbWKaOsjgvlpmtzcVI95FwLENKj4+4fpate9enbz8P2HWfnnL96AXm7HJaeIAH/zy19vn4K72ZLS893NaGcvS87BzAB46LOp77rYM1zG8YBwiNFnMQBVO9V2QauJils1h7KoujRxhKKRDwnYqmmq2wmOR0IIrOZdCAihuq/QVW9oBtXEu9J1FIBodfOBIqg2DOCCFIsYxORqzSVjWIwvq0ZO09TmaV2NciAfJ88h9tkiuEbXNSkEs+ZlrtIR5ybItUFYUj8Tz32+6vvUpeJsYByxVlHAudTLOqnZqtDaEkNi8tqMxRu5EE9aevIcY8pxdZSYhzeJp2nUR28WLZhICEmZapMUIieuIcw5kgswE7GjI3clsGtnteviTqvadAq1EEFiPLAWFQz11MBd1MSYkGkBs1YAFMC7AIfd/ip1WpdpVQ/RsILP2krgNs71YWyXxWeDaClHryHsc/LTmK0t59u2ltJqjKFVa0TYqM0SM1cwUXCQxIH70JiXImqNQmhl7fNGLyGIu6ZCqFbW6f5eICnxOK99JFPZrnORGMzcPHQAvhnixMzMTE1iBnDjGJ2x3+3L+QJt9RSBEicmDka8VgURamKMQgzAboQKVA1rC3WVcazSihRDImYzQgjm7rhtrYSQCKyVyhyaWOQu5T2lnnOfAteyOvnnO7Kv58c6342nh7KszCHkXXFH1CoWyW1dijzwzoR5qffNxtYuAOta5+NwSP1OqIq2Zg3Bxc3qnMjYA2OUje9KEX3D1AXi1Dyog7aG0tCaiWuTqrIG6LoeGF++fZbTD0YWiA2c0CkEdTNzh+CA7mSCAdBUQ4wItr22pEpiyhis8jgTRkRfo6bgvXsKQM0Cdfl+fsy870NEgS6wzAp0EDZL11ruyS9KREjRHKgbimcZHmX1FLQbKsTYCsqZLZQyVndacq97vy8hTiFjWWQphAvy6tiTcV6b9XFo0xlC0MAegZnMUQ04ZDOtqm7S1My9mYEZE6O7uUuryJEduoCM1Io4kW007A01guS+ybAgELgaMcUYIruqgUopjk13XSRwYHdywebb5M5BQUJHMaLgVnGJIqroY4B32k6OHK3yuuqEerULf9TnmGFX5ocGrUQtqa2+NuCtlMNUYwzgqZg5JWNYpSph6rrkIRuOUxUFoqjqbuBAbp5Ad+iMMpsNMYiL2Zqox426ggkkd0t+5S/7+BIwAlkAUYoUY8ivZD2hC6ABovo2udySQeQIjsDIG9CHwfcOLA1gaGagTzSiv08VbfiZraRj7qbbBwo/l9A2+vUmyBATAHdEdWXCDTqz4YToqdfzecKECMiqDqh/lzVCRHXFz0AidwRwM1dtDsbE7iCmT4MqfMLWbE0aRAAEdd9OSKL1iSlhW00JCZ62ewigTRWhro2RQkohJHOPBKDCbrVW8eikh9TVpoxhVU8MCBCIzaqohkBKDmSLukvhGEstyFnM3MwAuo5idSeX1gC9tsIhITghmUpmnEQaxKpqhGo1og4UBIHdOvciKggCaLU66UBwTAClCQGBBUZVQyYFIyTUdghpFb8DIcoXgYNbZACQTBgRIyO4gDU0RERxNyBGtlZdvCIKUGBuZlvv080DETRLMc2MEEiqshnQJrD3zcbrYMRkBgYaaFPd4XYYZiREUhNwNAPfxkmA7soczFS29JkJERORqJiz2qZa4u1hUzcg3EjQjoBADhtrUQloe2SIgj6ta90Bzbc50laCcyJS0A1d1URiCEDkrpvjkZg+40e35yaA6zba+/9nFSERGhBzq80NxUR0YqQcD8VD1x1MjBzYwp7pZrh+8/wFt+klhBc5BLcQOCRubkZubkboJBip1GpRuozrZYVoQ0yXUgD1atg3WcbzkgawKv+f/+7//h/9B//4kHJMaX/Vz4/rOJ7EKgOO59pFAhDkSmYcI4A3A6KEYHUaeRgyE1FiMCIzE20SmIZuAC9S17KspTTkNF4mIlfXOKQAfDw+I2tzLbd3v/3w7jfzfNfaJLIC2v5w3S5e5+mwCynz/f3juNa7h0ew+WZqh8eSM4VAHJMBSRUXnFZaiy51NfNf/vIvHx9rrb6OstYWuixutZkB1mbqUMtW6QAEbIRqHjYPWeJl1kdcrg6Bh248T3nXi6gbzieHkjsGbQJo7kiRIAQlD+TWGiCpsyGdPWPzXAlmSTGCyHXAAdEyrFIUkKOx69DH2BwMrrpsXrRoIILgrtb1xAmiS+wpZMYmRh4ijKe6O+S0hvmirbg7AAcKZCKuYOpeK6KFSG5qhDGwASCgIDu46v+Pqz/5mS1b0/uwt1trNxHxdafLzJuZt6mGxSqKZLFokTQpURZg0LBN07AsyIb+BMMTw4BtwAOPPDfggWee2IAMCxQBwYYMNiIhNipWUWRV3bpVdft7szl5mq+LiN2std7Gg/3lpaTZOWd4InYTz3qe3w+YEQkksRB7GAmZozkxU5BJT0y8rGGKZrSuwFSHAfx8bKq9wHhxsPsJme+P5xfPdx2zAeeBzsfTbrxOCU/ns8LSUQaRubS12eNcMtar/a7YOpeq1fN+vxsbd/H+8aeEj1cx2PKUm54eatf152lJ5ox4ejweehn6cdxfvPv8LQYgisgO2walI/OAiJeHfHGg73337brknse863Oi02rm6cv3D/31ZT1NDRqt89Xy4//N3/rk97732Q//6Heef5Z/+Ns/fdlPf+Uvf/o7f/jzhXINCgVzmGpdENOYv/z8LqZFOQPx8bGK2JDhIPyX/41v/b1/+N2bmw9u57mZ/ovf/Z1nzw6PPy/SdboUYkZ0dSfECmYIxSwLh4G5CzoSWkQn0qwSg5qBexJugAUQclen5jUAAYitad9fO9XTtA7jWMt5neHqYoemA2ED2B+G81Q9UA1atO4yY8/juA/AWsiciWlZlgDO3HOSnGQ6zaHomB2lH4fZtBj7osMwTGgRkDKtdbEA09rl/uLisCzH66ubd/fn1alDTwEGEBiSemPs++Hh9FDdh93u/jz3Yx8irRVEAKtZYhz65Twnpho2Py5NBBR24zVJblpvb9ebm8Ei8jDUUtrcMOQ0l2fDLtZ2mqamULVIJwAUQePVxenxyH3adZdEUsopSMzRLN6+/wq0amtq/vzFq8dlOj7e3d8+YrCrAcLVs4tmDdDu1nf9RTJvab9rx8cSsD98lLrL3cVlPT9Yq+tU29IMbK33/WU/7DuavTG++pVPjm/Py3r+xstXj/crytX5GAjXP3o9f/rp88cvvzrIOA45gSzTTIhY4e5+7brDfF7OaxzPR2Ibd1La3CIswJuHaVnq1ydo7mpuyCIsLMIkpCVkO5PZoNYBEbbxiezpkMS/tjZERDDz9noUgaYGHABbRTuYKLbsMtANtGqXaL/vpnkdurQf+vO6hrvkviwnsAQOSiYUGiZBWcQDZq1JOscgEiCy2O6nJASChkm82lrrlgmdy0J8mfv+WI49ppQ7rZUIMSBJ17S62s2zi8cf/GS53w+HZ/N8ksyECUurVYlCupz23O/CT2dgIsptieNxub4Znu8vHw/j52/vLi7Hv/Hv/bv/4p/+3h/98R8cp7effvtTt/Lzn//o+PjsxYfPlrVoXfZXOw+kpuu5piStnImxLhHQrq5erK2QoKoxsmQxDEbo+sw5laW9+uWPp/nBKI7HKdAbQC+ZMLTYzbNhaUVbuNZCPl5eHR9ukWuYXo8Xc10DQ8Lfv/kqSbq42dUm6KkXTOHLOrUauc8d0rRM2qKsOp0qM+cxAxCVON2+D3RD8hqt1stxHDv+9NUlRHc+lVFtKmtYK1Oc18opLWeNOj88Lrv9aOE/+fyrqpBzuq/lNL19eX3RoHaZSZgIdS1Dl0Pj9nwXgq7+7vZxyHT79s4Qk/unH3/r4XSPEXlIXUpJegMNjkdbM2RiOnS5ml3uDo/L0h3G08N8sRtdY1rqi+cfvD/ePS6labu+ukyZ+5ymtXx291CaI/IfffH21z69met5aTX3CZ8eBXB7fzocLq1Ol4dImRarrjPA0GXJu/EgdHEu3BGvE4JXxLrGukKtVFHg0GtvCkW6xCERHVtvc2vwgL0QAmPfTNdyPuGcMBo7YFtcn8AighXdnRvIw1LEYyfRIRhCMEzQyBBNA1mAE4XkJCgK1IbD7FR8AGNZSgoNMApKgWopIMbUsxk4FMSKErrVTQsSkEVDfKhLNfGpRK1VgdI49DHVumhbFcMQwQYRJmJICBlJVGszMAMH4eiwCWY0F4/YffRcmed625bgLIZRXH1xOE+IGa8OTRw54zCioUhSyiWUDAlUqKDE0OcRFb3kUtL60A1okJTHbn8NuZ+9dNCClAjOdc4CY3ctEg0mlJXSdK7nZqtTsMD0MJ0Xeqi4Ety1BuvUV4OclqXm4+ynR/NFozYwRuSEDYM75OwGruBBGNnVi2tD8cMlW6vuEebsXDWqO3BGp1Lqusxa22IVAbPI1Frf5UScgCIAveW+k5wAoHqcq9VQaJETjmMHDgRoCsBZNeaylBrz7nC12zFSRwlQrOh5ntxrlh7TaK15U11Xnde6lKpeLVYFzOgR5kaI5gHgJdQiIJQIzU0ChInJU0/IGFEBMjG1+tQwfbz9Uo/Hui5dN+brXNX4kgklkUmdQVdkXNoSMxUCywYEKSWzMhwEyZWUopEHQUIyR3LEcCMn1ZMTeniguBsWSdJ1ARgOBKympTIAJyyxcga1dn5zO7z7PF1S7oBzD4Bu9WtPOTCxRjB0QWCmzsHI5hoOFk6ATgHkBKbrMhvTmAYeyUcozTGO6A3WZib9qB7Vq2H3MNlgWBfwbq8YwVqsdXKIBsLSaol39fCK14uL99UbRctp4SjSjcA4pMGv1+/HcpQrV2Xff/JxsE3l9pBGzwXZd11nVigkDX1TrW7mhmECvIVrAeQIGuFMBqDqGmEe1c0RqUsNSN1bmFoTISB2AmBRc0JAkvB4AvMQYISGmzUkRiZiUYXJEBN1IMCdcYsMKp4To5kReCJi7BgskA0xEmNUX8+x0Z0NUVPgur7F7gOAiyCy3FdMM2tBNCJDcDfyyuhAxojCXUMhCkOFYIjQtiQARmdK2jAwEKl6CNG+o4HMoTpLQeMgRnISpWTACSEv09WSPxpfMXYozMgRIGGICN0V1mcY9w7ubojiFiBEiB7q4MTpyWAG4K10QX1JURfY7Zy3gObrpAgDELf/dfdtVsYQjvT0whP/mkb8BCsHpK1DHRs9JgJiK0Jtriv0CPvFkAgRIAhFwSMMghEo4kkZG0/iMwdEAIlAtcokROwRiOC2bRUxfOubAAHHFnAHECaHp+UeIgfUCKu1BYADyNgBiwNoODEhUGs6oPV9V0yZdu30RvpuyKRIc6tDJ33m01m5p8TQECMcyRfHjsXMgMgDiJiIW6noBgCukISrtQDqgHq3BHjDsEc9IwBgEKq5AXSCoQoIK3gGkERzK9jtFGL1yAjglog8nMAJqLr1IgJAAereahkQLwEbBgWgOiGsrRJnc2sewuSBjkEUdVNfJfbt4wMiDCTMkqZ5UghHYKRAAIJmtUUIJSLZFLZbzQwAiIhRNgk4ArspISExBoaHhn/tq99YVEhI7h7mEEYkQbQ13QACiTDQwxE5ns5dMSIwEJGYUK3xtv8DeDKsuSOAgW2WmW3O9rRVQ3D3MEPY+Nns7oTsHu5KxBAbgQsjAAHNHcGfym9Eqv6LwPQpKmq1EEptBhFBgmlI3SXo2tYWmHhzTnU9urSAEeDZMAR6V1qijiIxswAxd+rmtkm7te8P+92ACGWZEIA5RWASbwFA1Jycu6nU5bjcxc/+z/+X/8Pf+Ot/az8M03L/wfXHH794Za12Q7p+ticANVzWRcEyJekGQZ7nJeV0ffXNZT2lJHWpt8djhBOC1oqAmHurLUkSxi4F6DSk88PD+6ketfjbt+/f399bOTrMu51cPNs1svFqzxYR2PDeRlx9fv3mcTkty1SUu7ulxHL8+ILiJgYPIevGjoklk4igQlA4QplrPfn798rdUCN4THNpKXPuuVZFMGFWDUYgBjSHCCJg9IERTIWZFOqilSpnaq2s62IKBIauwLj4OgwsFCkTAiZm4gDgFpYZImAutXfIHrsOCaGB9ZncZy2RQgh4FO6pO5flkMURSVWY11UlAjkuLkgNqpV+YBQDVFOvFcyjM9BHP0+FXDoBMwgzNwsPQNjSH2bqUwowRArCWlpASEraGhBIlu34TPYCgNCgLZGS9B0i+jhyqz5rI+WESGpagceEpoOQCKYB5YThdtjlKGU+nnLqOPeHQ7fUFSpf73csZOrs5aKXBC2RvHj+7P7h5NFh+DhKQFGFNr8THcOQsFy8eLldBeMwyLA73p3Xebm67r/5y8+/98M3j0q7Mu2kh1rVN0Q9NGscgoDm9O4dvHtjubskNSckjlIXYW9RvEA9235/uL014Wf/77/zg+vxR3/tr/3qv/2bv/lP/8vPfvT59MFHr673V9/+cPnHP/hMddAGrdqYU9d1y7Fd3iBI2Jg+evnsix9/SSmZe238j3/3Z5zGua79Dl/uL9+/efv8cryHY1kbIXhtIiJA1lqEIQETR0RYMJFQMte0rb3D2LHPqTYbUzeVRrapUZE78qq11heXu2WtxX09nShFTruI1JpSaCvVzSxchLqcajV8loZ+Pz8cu5xubsY8pC/mRwgYR86Jo1aMlZSJPaXkTE3r8VyJkThdX1189eUDuIY3wh6FQp0lJ8KqC0urbTZQESCiLnfLsnC4GE6P50POfWaxyKO8PTcJDK1DzvPpLAQcpE27nIT7YhMAAgdAeKvLrCAJDzJcX33+058f9mM4UxBKl5mmaV6thQEwEcBufzidZiG8urg+gQ3dbpmnOuNynob94cXF8/vptpWVNTruWOL+7h2yT6e1ldLvxnVeUkru0NYKYRDrPierNi3zmIjrQlUO+zRNRRdLnSxt3g1Dku7x8aHbj1i8HudZax6TteVqN651Craxp299+PKHP349nab3r+dPX7784sv7VzfD/VePb46F+hxqp9dv12GXUObHNp8XJhwGISBKxkSn43ldVb5uFQE6MZmHhxOBtrpx9czcwZiYiBFJ3TAC+cn3AcyB8dSCJvDNW6yGSMT49VNQwpqHP03TSUT6Ug2IplWJxIGCYjdgrTGvSxLpU3p7uu9lWJfSSX7COiIJc0rs5kgQBNaehtw5dQ7UaiNAN0dmTj7wYF7Oi57KhHBIkokhXJ/SUEpOUM0//uanP/je919958+knFpt0mEzQyZ3W+ZF6yKZh64r7l3u1xqHvte17UV+6Rsf3M1lDf3hj39+c3M1n9dPPnr+859/SUP65p/+pdefv/3pzz+/6HcffXgzL/b8g4+m29vciUFgJw4tY3LFUrRoSTICOkvOXV7WtWPUZYlSBeM4P87HkwDu9t3j6dSPOytKhLurXXUtpfZdWsIQeFrnlHm3u3q4u+tQyjxNp9NuN15eXtalsGMmePf2fRKgxKnL02natSGlpKaH6wvgSLkURUPaD5IigUVKqRMY8qjFrKnNXsPuH+/OD6fjQJJyP+aytseH83A57g7D+eEI6Jjt/HC6vLye7s9lnZ8dLoT45cWHt/6OqD08nA9D92y/f3t/S8LSZ0npIrp5Xr/16YuvvnzN0uXcff7VawK8vNwPDF5b03Kq58NwsU9jmTQBSIQQg67Jm6/15nBNJNUNCR8eTm7oFrX66bTUqsQATT1Lx/Dq5eGr2/Mf/OTz+VwMfD/a9eUTn6K1KLPSjh5Xt3WGTnoiSuQeEhbqXStu3tyMWXuZ1rk5odoIAeczQNtdyrALrLlMILYOod5qCYiA2txaLXAMWAOpaOMuOrIIg6BJ0SpHhYYNGSEUO0Z2x6pQRXKCDEZu0aGyr7vEEB6UUfK6KqEwtI4xHF1GI7Bqrc1BLBDmHkTF1VIkJA2G8I5oWc5df1HDpvMs3CWDPncIHmQRupTFzNCV2BIRCiB7l1OHEKAXLSsLAAbkVrRBpH0/7plBuqh2e66lBlCXBCFaUb1/pOipQxpGE0CBHhN4yshBSMQCxBQtTKETIvHICYlkPjqyPh8HyS35PWvn61m5RLaOl+cvrvaxDPHCNWssc7srUabqx4bu3OY8rbRYFFNM2ZmLKlUo1bgWBAOw8WJHzQCgExJy6BgzR2ImTE7zMvUyRkYX6Ics7m225WTITiwMNK+LKlhpCAiICQUCOk656zjlRSsT49PJrAhLHnaqKamjO7gxhOmajDmMJWkQEbZlbsvChLeu+8MVAkptOk11ejRXzU2SWVt1LdEKRtM6N2vNFfkJVezo5gEYxGhu8AQG2X7euoZHlrUtKRyNlnNIysJPioOwlRL1fACiNQoK9hfXqe9RV5lu68OdR4iQZxyHnUNDZ77wR537NCTI7k2YhKC0CRATjYAISOpGTEyM0ZqW1ppgr821NRx67DsR7oZc6mTaypr08Xg8zv6Dn77I1ifuMhcLBAroANDdkNi9BVhEcw+LZu4IQoQBLsIBEMSKEIxRH3JcHpu6jGjPuFmBKRNnhpUwkENC0MEQkQHhIN15Lmh3iY2gU6ViSEo5uucL3/6J70Z+u9NjO6VdGtUPWl/1BwjUY9Bxtz/H88NV9LmW4a4+SIxtrVFX6YdXO2SL6fGUCYK0Rc3byWvUPlGpKyC3aI19aa6IBcwpAtAgzBUAVQOIt5oMBQIAIzkAMm9mbpInaIqib0LvJLJG8LbdDUgEZnWdKmDgQA15SShCg1AIWIrmCxkQZDAHoIXqCR25G4AkHMsK3FTfzPRj5D/bKjhDEZrRMHEEG5gDbqoLKwqulqMhsGCXuwAiAAZsazErRNITEpMCgocgSCj56lApk8VqIBQbowYY4RKoX/gVPiPLJDkQPVwCyJ1IkDvuntX5BNgAgUjAIdSIhZyRxNQNhSMRgYf3TNcZzsd7OmTirG5fd+u2PjVtZZFNOGXgGBAetAmlvi4WPZmkNgssBG14MgDAjT9NgOimEZuwDDzQQwlx62v/or69OavMGwY+ma0AkdhUPSyRAJBqQ9iO4xECCcnREDDAzRQQ3BUR3A0BkcgiHFxSbmXNCTsGNB1TCgy1KEtNiII+EKaAOfB1wR+8O98M+y5sL1iVGBg8aitI2d2bg2NYbRwoTIjIiK2VMSUNb4EGgKrZCYMYSZAtAs3ImghL2AY9SRwEAI5AyAE9kxqsgRqQEXJmF6+BRX12qFX3yIkJVIVxh4zeBBzUN3ZFT9hXg2gQTghdlmmtu747zedEHEhqsKiOHRNxi9BAAxQma61LGbQ0XYSQkTFIXbe2fJSC+kS+cnfTFlYDkXgDDz2JxgA3QxltVUdmDjdm3upniODhHr6FM4EIiAhEHLhVgcyEMyEjsoM9ccRItoGImWEgbInk18syQH+6DSIyiQcFQrhupqltNenhYB5giF8/ghjAg0jMLcCe0EeE7rHloW76BNj6r0dF67r2XQeBJNLW2VVFUrgRR5i2NiUCDV+LIecuy7N0fa6WN70JpjFntZZA0ClBuBkEYzMBHqhXX1vDpVqgW1UtCybFiGbt/vacAQ87rseHf/aP/qPTmdQbafzv/9f/p+sPnmtbVN2bMqWxvyQCj/Da1FeO8ObHx/J4f58Y+pTHYRfAfZeQsJbVYagwn07n+/uf/eC7v/3Z569vy/z2dCQKVJQusXAWGPcpHVu8NmJKfUYEAHEjJzyfi2oVgD6lqcxV2+WYU3LCGuCAogqBTaQDEqbKpkPGQx7u7+ecc3GUnIeOWRILVW3kYIu6gTsCAlDkRAwOzN48AlydmWsLkjBHr+ZowrGuZamxOlpEjWDG3QCAhmGEAmGCEQAdeVUKRyvazPudWFiXIQ10XKMTNKBZozmfqjJKl6Dr2WZDYgSSTAAWGaDWXWYSBzQR8i6tM6jn09nWBUsJ4dhdJBa+P80EjMXMAgnQ0QyaAzPVatvVgUSMFCCupsW7TCxR65o77rJYi6rqweOQ11ITyJjSokoAKJj3nWbp9kNAzMuMzXc9pWTEYbU0YU7QywCQhpxcw0JdS3K86bE/dOvSzmd7e76j1rrkxAmiFcWMz5fH9z133A0NMvhTnwJzPk3Fg4eRtDxeXjy/HPNppnVqm3UOBXVd3QmQANjdCTKEgDmaRMSyVEYv69pEX+0HI5ruCrd6PCkSwvXz11X+4Gfx8Md/+Of+/Id3QFfT+g/+/n/xN/6Hf/Ht+f3rh65NjdKQKLVW5/Xxr3/zl64TPDxWzutXzZSFu3FlB2/SDbfn6XA9PJ7K9YuPv/rJo2tghHQpaFtWIzInQDNHZCIwcETSMEbZyDEkROGmDQFcW3LfMbLbatalRM07Im26O6S5ughe7nrVYJbH+9f7/oqJCKhVL3MloLrM/+F/+B9Qwt/+T/6zshKDLbOxAgoIYce+ED9OJROiBSdU8sVM+p7EkLzLea4LNO6zoFliUQnm5K65ozFfumIH1CLaWpVImFqp+8OucQL3x1X7HOsyd5zMIsJMdXexIwDXAr6C5i71s8/OiGA9+zc++fCHP/8SZJzVlHjYj2Zm4f2ud0VbT5y6d8v5Oy8/mh10bobEXYZoS52bW9/tTufzSUvaj6Wqx3Gpj9cXz9bjvNY21/LRh5+s7fys36/rW6+2rOs1pdpara2XxD7uu8svHr6k7AN3rVXH7uXw/Ief/XGt04sPXjRvwDK1hwaO3p0fj8zZVt3lAxs+nk4dY5971/Ljn/+x7K8vh2ubH26/ejxPb5+/+LVamy7r/vmhPh4lpfvT8XB1ubqlIecwM5d+b75eXdyYT9MyD7unQsW8LOgGlK2VMBXqtw6to7sFuIEHCWFsRVr0MER8mtVvZxTuAOiwlW9/MUlziwDc6JVI24OSOaUuhFuDTFhba2pDJ6b2cKrPrw+tqSTJqbNWAhOIhKqBE1BirgYY2JqCRh4SYJxPWtaGSK0ZgFAgCQ8op4dj6oeL3UUoQICbAVAEmEUYMLEFIKZf+bVf/d53f+fX/uxvLSF1qTaX1A/mbiW6fuy6fH64A85Jun7IrdXltLZqVzeX62BD6uD2tr+Eh9vX92++vLwYNs7MF1/cv3tz15O0qpDHDz769uFwrW1uVsGhH4d5WvqcEWGZz4N06OGuaqatqJaLQy/kgrqcHudpkZQ0HJn6oTuVGiTHSQ8oXZ/Od0cLOFxfaVEJmtcFJa+zdiLPLm6aaVsVDI+3xax0acijvL97OHAn+6tx3Ld1ycNBa1Kvg6TDoR8yd+iq3nfdfF6IOSd+vD1pKa4xr2lSb+Kt6t370+GiO6Rc2K2ubx8m1+qJ/+j7P3lx2GmbeC2zKVHpFW4/+zln3u13q+hJdXp8NNMdgzoeb+/77gINvvzi/dqCVcece9ko3a2adkBM3Zho4MHIq0MmUVNBvLnoCeM0z0pYSl2XykQvX718fXtf6nq5H++Px6XY4Wq0DUcNanPpuowNVciyOMLj4/z0RnReB2VIu5I6hb0GRgJQCm5R6rpiVDMLoK66TmVafWLovJVMg6t22UcqSZHUuzzUVlybAYMlNZfoGFbUOUVLiF0YThv0mOrKU0mmxEAqDB1RP1QiSqRMCC7EXXigJeYOA9ByQACZwzqtprQup9E8szioOSKkZktD0qrmzl6cpGAjpgVaJhxHYmxJekRqocAC2lL4HoiI1NukIU5gQIiDEHPMuFZuSEHROrGrLi3BUyVCFmLMTCicuxC7+igPi94u7+191WokQsilkc940ygX6wcxJIFEOFpEr82wGVTkSEkkMlogW6B7JczCVglX0fd0trZIWCyJpoCbi35ty0d9kjiVVo/T28c6N5T7CU62c995y0pdwXCxQOGUu4SlNUUwoW439h24aSqBCB5mEunQQWZNEB7MkNOIkCQn5JQFxeuO9ZBprXVuDa04WsVUI5gzM5lCQhTkvh835FwEqEe37zh3DMgsQcjWCGuwMzbILXUQUUzd1DiAdWUtcLdCN3pZqoylruvxrtXZw7MsQlNiZwLVeZ7namVaqhMgo5oHRXONQCTnzVwZ7hGpy0gYgcjSmrktJg2JgjqPROnpd0GSvDq3tVprIHm8vOSrZz6OXFYchh5lXdbMXJAaOIcDJqLx4vqjqGFzMEZoCwxOhCgUTohuQTIiIbCgGWDJKSOAq5urCANnsapMQujq+ni6/6OfwCrPlyTRhJxJkmOEIRNA8VDzYIaAAEpNuVjXoFtqquZLqRZY1QwQLLS2pLvBvHKGNp6WzD4vjKmTSyGPhaBqBFh1Wvpn134/Haem3GVJoLqDtIQB9Wji6yQFP9QP5ccQH7/9cL9fHtuVU+ibtHyRc54ejGvs+/3uupvAMM6D60Uv53TsXqDF8uMvfrKuc6BYxNoqJKQgU69aAdnCW9hktRAU5hZYATZyNwNaECMSkTuaAyBZICgiggPiZg6lp/ESMalD9WAQVTQMYECNLELmqpwR9WRDgxSMyUwciKVP2rvljjoig0B0lAdsR1MOzk5sSWPvXDED+D3Ea8N9kzZhMwJyl4gOHQSZendQV0AB7N2OvewI2AEtnFkiOYeiNSUO6qxVx0BkdzWvgJUacjRCokgMA+g6wrxb5Xl8OtAFUALpg8gdwoLQEBMxR76W9dHtiKgAhOCMCUIY0IHDI4LANDzIIaV0g7gev4qLHHiZul8Y0AKJY2uS8AYnfuoTbX7WJ7oQkrvBBiF297DNJBexfQ6x9Y3MlDYNWiAGIDIGbgJ1hA0+bQibOtZ+cXwHgB4KoYxBTNsYDb4ev33NufYA9I2bTYQA9MQd5dIqowCBqqOQA7orERDwmPco9jjNOafEiBHgxpnfKkwthskO16N4tRYegYQcMGA6BzKyUKzbVsvB3TGDAwgwBYrQsUVjTohDBJiBoJoyiTAH4KqRu746SZ+jASImEYIope4ByRGJFBEgMKxDAmtIWKtddbt1KZnREEZAJG9NMxMDOWI1LeEjCbk7i0KUBpuPKJGwpFgLATNaAtnMZRrczACUPbw2Em6qYIAWQiwIao0AmBEiEKG1xik5AiADOiBRgOmGuE4eipvABRA5iMg9vmZ/EPg26UIkiq2w5h5hQEREbkGYwsNj+3KEeyBh+OYRNqInpuVWWaAnnAPTlkWGwQY22taV2/ksIBC7e8BWaBIP3bpQHgbuEVu2+a/HjwAAhK6BTyiu/0arqIJH143N3FqFtlK0uS5zXR2ktVk5gXcOAma7fsdwVZIh+Oo2JgAKCDJr4B4YvJHso2nVui6tFHeda13qvDar1mCpToklffDsuZjuoj673pHHbqTu2avzuf74xz/607tBcqdAEeiYvFifSVLqEne9mBq4tlJefHAznY459z/54Z/87Cc/Ipk///lPCTncj+fbAozkrbZZTVX3o+Re3OzhNKeUy1wfFkgMINjvei4LIuTcmUZrOq8a6m2tjKl5ONnhMovwuq4pZUestQmEuCCsgdaaAWUHHPsOfOq7ZA5raZxSU52W5haAxIiEjoTjLl/sc2bs+/7x9lwe1xCGwNoA0WYuwhANhGOe3BoLkoXtiJI7miOGMIaaBgSlFk0DWmALzxBMOC0OBLmTu/sWmBYlEKRMpuGGu4xmCk0zgzUHj0HyAkAI3a4DCG0NAiKkFDDlx7OXFYUyogI0CC+lDYypSydVc/enXRxWM/6asuVhmdBbS8AtqFUTcgAz8pxSqHFgZkyZiCEFdZKaOieEaNxTv+uqY3WAgNwfUoJ+sPX4fseYpFuaP9ytrZ6eXR6GMQkGkai5m1fT+Rylyv1Sg6APrOZXl50TXL34pEtKvgcMkooB0/kpKjodp3G3P82tNOovDt//yT1hPozp/nEJYWTa73YP6z0GMpMwO6CqtWoM0coa4SzSLLpOiPRSuBK+n2tpAQ5BRrswjavdof/45atPv7l8Cq+/93PPF5988+bPPb5cvz/Nu3S6W5LYdz651suPfu/z974ux/vTX/5z3/zVX/rowfRffe/Hf+Evfvu46uvPpouhN4XbN/MbbCPQuOvbqqaaWdayQhDKFlkHhHlzJgBwCNawxFTbgoS0yRTRm2+bW9PWwtzJAwKJStHzNLkA5m4p0KZVhK/6w9oCkS8uxvMypSy7Pkk0cHvWPf+tX/mVf/q7f8T9nsEuD7vjuiytaQsP3OcOgTJDra3C6kuR8TmhoeL9V28HSbMiCQVCrZWZOrK5Pd4MH87LCsYIAO7AbOoQ+ur6quiKqupgYM9ffHD7eO+lABIImQZTAgxAE6KUZFmmIaWlwbQ6D/m02NXh5aJR6vzZT3/6Yrc7zZObW9a6uqh1iT9+dtOmxaZVgE/LdLnbhbbc5YPt7m/fI+KzVzfrXIPq0s7MfT/euEGdpsvLw1xKaARZ+EpMWZKXisDDcIAwjfpwft+PmaGvLXJ/eHn57c+++kPTKaeUu/Hh/q0kEun3+8P+cDgdzyywgxEBd/3ufJyuLy7nZdrvDusMhjhZ7btOOvz25SfHu+Ob23uWjJwxZUZrrcznaW128/y5Lscwu324K3WJu7cRHaUEX+Pr3FxrIQGNIEIUCIimbTsW8QBg3KybAAG+XfRgtr1RbRwM9CcTg9P2rkXoAR6OQNsxl7kzYiBKkggoVYHRAiyChQwJJTkgC6YsgOHugJpkCARCKutaajOLcRijurExRjOVnM7LNO52mFInuZQ1ZXJwzsIJAXFaWxcdS6KILZIBEuYUrmEmSb7zK7/y2Y9/79Wnf8aAQ1JxlcQkEaw3z17WUwHmtc4IDB1IpD2kNErMxx997yffvD4UtG/9+q9MNbzWSx7uHk7nVR8Xh0u27G+++jl+b/mtP/3XUt/Xu/fhZs0o893x7fXNs/1hZwHCPSJYUyZxxqm0VnWDU1w9vzkei1aDwHmaJSWtVtY6JArE/XA4z1UC0ZqWtd9d7A7Xd3czAySBzElyaoZF8fnl9VTWU2svv/3i/vTQDGzSZ30nwQj87PqFW81jn8Ufb+8MAUjmWU+3UzVTL+OOzdr53WltPuyRkFR9XW2ZzyTUMUctiERkh/2AxC9fvLp7/fmuS1vQxyJXl5d3p7tWV86CkRAgEUnG8Yavnz/7/o8+YyIE2h3Gq4tdOZ5JRBK6e6JUSsmJEsWyzCxp6OSrd6c12uM6rXNJnMq0pC7VVjpJP3/z9nGapro+2/XPxtEHSIfs+918WlUjD/3F5fDZz96njidtifkXrSIiJKZpWgKBe25IoS1jHWNt6tqyBwLvIXFb7psJykXLjENvQdGVSEvOEW1dV121n0pE83Vx7titchw7KQwlE6pWVKiGdxO34t6oRTBY3nXU5QVbA2wAETpS3iEnDyqFAjOQo3rQYmHWZo8Zu6VFqDekqgpuGqk1iBpADghtXXKfJfFA5MIA3knuMwtGl/e1iTdLABDGYPM8NUjTOh+La3WkhEiOFRibOEmqAcHMwozesc0pqyO6jTknZKlRHFA6vrpIl0e9XREscybpZg+vUKaC2buDEmiQm7tZQ7Tm2yKOEEGCMBCw95S5D/YR8ezuCq25zuUEnKw/RBIdx6XzWwuv56mWe8MT7NYGGrnBAaDfDLsC6FaQJFA8HIUwS6TU1sWn2kn0HAYAJDz0MB44ZYigptoKkBet+9QzJAwc+gsvq1phaoSNInIiaJIJS2vYLBN0SM+ub7DvgQaGCHBDhn2PScACCDvJCJerPvYcVqvwSHk3N8spkaTh2dVzifP5vq6FQZeHN9r37qDYohdvJgmblrosQuC6mpcg4xzmbgEK4RYagcyIQIROyJQxsbAwQiACJ/JwBXX3UiJj2h14v/vFEXJtFixB3A8X3cU1dYMTSz+gG17cEJ2mVj1Mvda6cB6YryjdLDR1fVBzau7VJO81ADDMV0eQkHAJYJQ+SzIrFC5smDva72QYpVENJhBJrAo0aVai8P1BlM/hgEFAodaYjJMTpnmhChd353TXxscCx8KAORyIFDAohQDDNHfe0PZgC7F1Ha5lSWSLFcCsjCyp1XltBbCnBNrXYZdiKgm6aGCRilYEDrTm3KXBi10uI9+6x3x+0aeoPXF0+36A07LEkPmCIfjN/B57T5Qyp2ld2r6bXc/FpZ0VrVorrUUHxmARIAgcRt7M18CKODsUhLBgIQLAIDIQkIRIhmFBLLStiQwyISKZmrXIiQijWQQ4CQSJIBlGMGBGCugFR0xRtfOeG6UpaD1yc7zYVUOGHVxQ2eGaKreW+25BeBelsslauDFaF8RVhVkZdK4/Iecm6uJPZOMwYiUgADbHjYFFoIc0iIMt7pJWU8i8H4aOoJTabDVnIRZwDkWwYEQQ0BAZICg8ecXr2L2w4XK96WoHGUKiYQNICQCBYaM9EYF0KBcxrwC+1Y1COBwQIhAsFEDAkSMBkBfOARdO0/nkXefy9Ro54AkkjBDu6pv0GQkpthQWwj0CbXN9PM2AIhD5a5ARu7cn9PXXuc7WBjK3iKBNo/XktAozoycVFgFuJ1sIEYSMGO7b3C2QGBDV7Kl2FBEBFGAQuAUHAICo3jZxFgRtq3+38GZmXtRev398/nwkRFdvZkI0r7UgEAWLdEytKgrdTfNCXQMPkqUqcC5hEOSmLD2AE0Az3RBI6rU2DEiBKITSKrMUVSbRiABQr10a5rKMaVhNg8Q8LCLCMjGYZUoIbshTWzty8aAIIaAuodso8dDqSCjwlMhBoIUFigNaBIOJezAS8FKaZGlWiQLRRfi4Vkmy5SDm7uCwEaEZIgKFEJDBUwqPtvWDwj1MI8wcUCEAI3AT/popkTzFLYgA5OYBARi0+ewIIcI8mAKZwIGIzbedF8GT8gzM3N2JNjB2MHAE4BMxyQFQmM39FwvHjQ1q4YxkYYQUEM0NAJkZ6Rd2PN9qUJtlzwOQeWM4Am5OPQTEAN+WlRHxhGBnoW35+F+PiiIckIIAQwHOtdxN6+1S6rKoghBKrU1SEPddSq7REz27vGL1oU286S02jx9xYkJCsEWYw5QoSCgYltD3y8pIJILAz3a7F1fPfv3jb5HxSOt+hwr2j/7xH/zk7rYV+pff/d3vfOc7l7te8sFxzWzL/DidlvW0HN/ftjI/vL2NsPu7d7WtX7z7bDWngFYhwq6uD/3QZ07Pdi8dcpfT6fT2NE/r2zN5lDnWZgBhbQWMfuyY0cGxFmJiorWe18W0BYZocVUxZjMbdkzuACiJ3SkcmTisGdSgJNRy7gLFil+Puy4dG6iBSdebuXu4ozpKShCehFlgt+/6oeu63LHgHt5PrVazDYEeNNcgRnS08HNh9g2NHoeUBg7ZviIsaogoDaOgMZl0uBPMFX2VFlRbm2uIZDMEJ22m4LsBxqRXHQKAFaPgBQOat2SMYGERTAy5Zw9vwcfmSoB9EGC4itjFmIq31sAb1KVyklEoEHUbxIdRot04EACGJIl1tTJrADInD0cCADQFa+AALNGPmDOVCZaipSl0SXK+uhlSlzqWlMhaHHbdnkd9nLs0nKd1riUPe0eYl8Z4nhbLXT7sL0Fg0bK0xiHhgGaX+0PfpdqKug5pGHNDX3b9sDabymqSdvvDdhXkHH0H3Yv9m7slG1+Pu8/f3QKbmg5DKmvLTIhsDhbh2NQ1DQmJEglUTRlFuBvhfHx4dhjefnUM7ZB52HVmWqv+xq89f/v5uyF99bf+1p//2fn+n/yjH3yYb2orf+c//d214M/frCbxP/kPfuOf/Ks/+PW/9O3/39/7Xl1jlOHTT/cvXl78s3/+/e/82v7/+L/7K//gP/2dly9e+SMQxHFSJH724e50e24TO4AHVNNfoKw1ojkkwCQCpkxEyDUabZ1HRMAgJCJSxJqypwzhfSfe0CwCI4C8GWOMmTrh+8Iug8LsoA1stYaC7L6W5du//O3++QcvvvPxX/8zL/6/v/3b3756gaZrqUPPuoJwLlPJkhY1M1O3f+tXf/XTl9d/95///jgOx+PilFK/q8uMFk7QapMhJ/Kb/VXP/RGPJAddTRKBpBBqa1vVjtPxIl1KL4natJyO57nPsqwVJafUtaohYCK73Xj/8LrnfcKUQLHrmPn9V7ec81odXT/+pW/cv37vQONuqBXGw/XVxcUyP4CbEbfamCL3F2Vd0aGkqK0Mw9hasFKZTsjQJdnxbpnuS1nM4vnF859/+e5i3z8+PDz/8OrNu7fDbmxaa/P99TUCn0+TIAvuAmNedXf9qsT7o56HIaPDcbmjLoVBK3V32LnPFsTOF/sDkb754jVLnm3eXye86A9j6mi4++lr2fc5w24II5knJeLp9paI3KQa4mx5PDQRTB1SFTHE7jDs76Y2vrh59ezpKghXrZUcHWktZbABwREAAwkJEAzQtBIJxNPCOXybbWNsz1R3gkAiiEAMMyd22oqsT+bOYEZAQFP0p6OVVjX3EgDLXNZ5HiX3GA/HCmA5WSeMDuxNVYMlHLS2prDrIxF4bYzi6oAxlfPl5bXrFqFLN6TPPvs5pZz6HKbrch5SD2CYEIW/PjRBcIpQRR8vDh/gBz/+o9/95Du/ucyVM0nfB8R8/1jn9XB5UZrer9XqyoKcqcxTH0PW8Pfrz97fP/vw8vDs0s3mh/uf/uD++tVLlNrvDFP96vb2409e/fK3vv29P/mD3/w3f2tHz6bbu/PjreR8MV4t57Ib92Ywr9PF7kZEGpbT4xogh5ur0bxOKw4kzA3AnYQigRbT59f7XepevXp1/3C+vunX88P+2c003R764XiaDrvdy5fP3331ZZa0G3heIyXocEYs2dt3rl76hzd38/rTH75+/o3nzGbNM+fPX79b3r1byipEagaEZVkiqDVvfVtSF9PaC/WCY5fc+aqjRLrWenE4XO8yHKeUuWUQkjKt7x++jM6/9eKTL++/7IaEhMNIO89dkouL/cPDmVCGPq2tiiSr5aOXVyLDvrYafjydPri5qkvZX+4PGvNxIqGq1cIS8zpPp1IAfCW8O84fvXh2/+69Bg4553E8zasjunTXF2OgK9koORNOS7k6SOr683GZ1+X6ZkeJTnfHuiynr2eYSKQGIBIgW4e7YzxAJX2ICKQDaUNH46E5ryoaJLmTHrTNaYfS1eX8TsCs7d4dY6ksTcABVZWjy9CJZEVht4rL2tY1h9FajJgrWk4JEMiVuhRCKH4x5mfkuVSJ1AzdwKLNXksEcLaAYzEHDWJkPjuQiCgFdGjJ1xZRHGxIzMRjnySapA62aiBSotTznlmMqLU11JpVRL6b57UUoEG9hgMQmFbOGETAAQJHb2u4DN0IrgV1xWwsEaMD6WYRjrXj/hvP61eLnbXV1u2GgfI2dTbuzwZo2qfKBqhh2gxJEwdViiZIgh1SIlDuoDVEINVpWR8TOvce6JwbJWNbzfm18xLd2fKdX82QDVKfRmqMIO5MKA6EbETMuaOoYcWZqnRMuXPQdXI/YwoZ+n53cOoAEpIEOVABhJ6DVDIJWC0FvY2tsiRCWYBzRW8ICMR1TU0DbMiDd72M18RD0blPwV0qXbKcGaQTYWaMNcl+8ygzCij0bsisZjLw1SGP6356vA2Pqs02MAj2IBk05mVOVVGiejWbLJlRgJggleYUjoFqQQkZGRFC2IhEOAgtNCgBuUgAQwIFjsYRHIs9sYpU1dS57/ph14/X0u3BXdzdm9mmyMDduKvrubV23ffuRoiLLkOfByKZ5vCUuh4odympTqipWbiXQEDMnIRSn3FAJEGA3EU/iFBnEd14jgdzAyaAaKk2qg9UUuqBtRVQT8e2X0q6X7D5cFpFGwZJgEMYATF4hJKHgVGguw4I+3yB8I3P5x83OhMOl/0HMreBb86Lqq4TUfAFHEIr1UZxbruS97jXhhwe+WqtJwjM1FGCxRwdEzo8TIdh7/OnR7uvnZ6BZ1sBeWTIeV1ny0nYEq6azJZaZ4DaqWdVtVoMmNVBzRWcRFw9wJXYAQtAgCwawZIcw6M6IiUBRsDWFAOZADCauUCgYnVwNSEGwxbg7sDIyfshZm/iMlujHiw7JErYBpO0YDy2mwpIHSLa3LC2DqW11Xa87NvahRzyPJWl1TvW6usAlSATilCqnACqRnHRHIhgCRNGZGICU2iubbao0DmlYnXwIJSBWEg00N1mpRDOnGRMHCU0OIgDmKFaDUCR3rBNXnoiMbsoy0XrPuDnbWFOTB1DBoTKgWiVLAOQg1G4I3B37XMlOxKp+SmIPQAd3FEgEXAAAjIGYXCG1LVaT+eWCBP/66Rom+VsP10CgATcHJyQPGxDzG2q4XhCHG1sGvSv+yAbTxoQMDgc3A223+lAm7oKASEszAEIcCuPbD/anzA3gpsfMJo1AmEScwMkeMJrk4fhtnEKNDeC8G08FYBIBGxhzGQQTDz0mag6xpvj3fWLcRiTtug4S5INc0Xhe2+fXHR9mHmIsKCElhbGhB64Wb0IwBAws9YGJE1185k4oKsRQ9WQiDBjYY0wsNUqM7SonCgLUXN0ZwSPEGFyTErJPWGs4JQT2jpESwyEgOA9w9mcUGoEEDEHetTwxESM4ESE3tpeOAgQwxlyzlUVkQ357IEdndsqlAPRIKq1lPuAAE6AeG+tB2YRiBqA5gFIBggY5uYpISFYhHmoU6IAAkdmdnf3iHBC2r4YgLB1xJCYkQDC1QDIIZ6+SxARsUE8EZCY8Bc8UKRwYyQkcmuIoFqRZJsTbmY13MJCIEQw182F5xFmbRvC4eY9Q0bcQiUxM9o8wsy+Ubg8kAACEJ6A6xt1a+N7wX9rgFarUQpf51bnut4dT++n5R65t8AW0OXMgkSSUxJhXUkAA10ER8y0LhFmVpNQL6PpQigi2RyqttQJFm9tpmgMlhD7gGTxZ6+uP3x+eUFVnaBVbKkj/Iu//s3Hf/7dL2d9OL3/z//u3/lz3/4WCt9NX/YXnTarc+36IeUhpXz10bM/+e4frr4OO8jnXRK+fvH84fGMXtfjND8cr673Eb6WCWfHUAftO0H3Wr0sWh3cQRjPa2UEyUKi4d73zJlYiBFdMRCAQwSMYN+noUf1SoSA0bQyI4VpU2RtWPsEnLtgudpdvHx//tn7KXcpwDAMTTNGTmymRHC5T7ljYgFgNRbklLI3hebuLWeU8I4SRKOgxYADEUMockIGUwXzMEZFFeYEpWfqAhQJzHIg5+5+thyWxDoBYBUmxAB0SdETIsS5hpAMSdTc3BOLhQlAJ0+Ufotg96atExBs4x5rw1AozfsUPYFZmh6pVWwVcqJqjRE2NRITcjgzJcmlrtoUGInR3AGxWUDAXJ9yUAmcHuqaMBQigliKWYfEYDq1cdcThK9KQ6a+P7x6Jon1/ZeYKRHXtbSm0YEzTLW1aeFogHBxefDFHOrl1f50nsvkhPz+4fZb3/gmQUNJx9PS5a6/uC5G1Z42yZ1wR6HNxEHPev/VBB6uOqbki5LS6fGETIhBzADmvgZ7GuDNu3dX+YIVz5PxmVLGoRvu7+bL/WFaoyxt0Xpzkez08KqHr97c/u6Pfvyj2zPp8NmX7z98tj/N/rM3jwqZwb//3dfn9/x3/vZ/5QV3+3Q6zZ/ePPv+979atP/iLf7D//zHzJcUOUt+/+V5f3M53HQgJaKZEyNgYms1JVINIfRtPwtIAVtpEUI5giPCoMtpabOTBEAEWgQy1jJf7K/uT6cNy8cigNJnKnP95BvXdT3dHSckdFUROc8zI42p++Lzt//b/9W/93//T/5vy2/+VsvwwdXN7ftH6dMw8l7k85++RtrtOjlOC+ZdzuNyLP/wez/pfvjTy3H/cFeneb048Hla+6FjsK5na6Xrc2nlGg4P94+hkXbRCRJLcSXIQtTqcj3uAfj9eXrx7KK2JshNNSWCiNrW3IG6HOc6BPQ4Hrpd0dUdJFFAOMHFvo/7qet5maah70AVgGQ4XD3/xvH8LrPVaaZuf3Vz8e7165sXl60CtDBvkFCG7BzTPCdOEbEb8jQvx8ejSELg9w+PNy8uvCxR1+V+iQoa67C/bLW2sgJAl9LQ5WVqMuyu9wfzej6dbvZXWrXaGtaa+r4frba6LmtKnCQs3r97w1kda2sGE43SicfbN2/FcSdUTvMSaTouS61pyEUt5wEiVquvPny23s1qrS2FmqK3w6ETGQNpd8mAfv/u7VNgyrx4aK1KXQA1NUoStbq5uhKww5b2gPs2MsVwI6TtX+NJEErb8N59q8eGu29PKPcNyLcpRLc4fOMBsgdUswBIBOPIqU/z/TELL1V7pC4nR+QuL+saCJzIMFYtFN7vd7VUIM5dvzs8Q5EI01JZ5DSX++OSEyRekGA39B4Q27zcHTwA3ZqbuaRkAeawG58fduf797fD4Vkeuvl8BgYmacsyjp2DPRsv3375HpqsdZ0ei6/YU/dbf/FPt/X4cD798Aevry5Gm6f1uMzDRGEvrnbDfrj76vbDi+vXP38tjd599pP+5lXeDzfPPnn95eet4cXNzds3X718/gFwj8ylreRRo1U3DRSiD77x4fu7h7VWAEHK3/rWL59f/9xIx91Oj/b41ePru7sPPvog1vj4Wx9/9r68vX3/8OZMw2FdHzgNp+ND0y7cWqtzwG7ol1X/5Ic/urgeF9VdF4/Hh6rNzM/TF8fjeXMRXl0MnaTT43G3HxHk2I4oaX6ssvhuL5zQXaeHeZ6mD7/xihCX9bzOkxtOx/nm1XV/0d0udxnFkd/cvV1XPWQxbI+Pp3ktEXj/cNqKjXfvj7uLYb8bf/bZOxRm9rJq3nWu8NXb+7Hv33/+XlJ+eFwuDvthyHNZQjV1gkbPLq+Oj2/N/d3towAeBrFS353mD29uQuIHX3zxZ//Uty+7fF5n14jFxODZbnyc5rKoRy5rnVplZkldXb9+FvRdGA/DoBJkNUXrveWe1llPupgmb60WqOwYPOJgmCJxgdWxGp5amwNxndc6Ia+9OGDTzByglHsZEwRgSJnmWoAxU2ALdQhFUJG+T6njLjEIN6AkMkrOYYi0Fq8rNHMgXo1WsNJMtWEjDDXOfd97ws6Z3dDcvTRXBFkNQCh3w27XDwhoZACFgNEOY3/Zp/tazGypUWojLWFQDT0Qmrm6qiNhbWsG7naCoM1dOlT2oFXVJJIgJUygWh2whQxcCF2Rd11/s1uXRxQlLyNzZUDnqp6Dq/m6njMm0cyByDlZB1YDKgcPzkRQQS0Q07AEqJALIjSJNUcI6ijceyyTVtg9YppjnDwRpITMwRbM3DEnCEDDQbIIGwaGBFHBMIC8T8SCeQDJmFxTAt4hJndOqWP25k9xuVmstXWEa1ldGSgvtTTkSn0VcghtgOxAQsxAXVBfOeGQ+t3VSG4Q0KcYejIQDUFO6aCZioVaEGGHCKqrGaZRMmIrPOf9QFYLtYW6hJQqZaOuVQ0rycp8enQrEIecOFTRGkaMbmJqRcmBhNkdYvth4wTgUQlWGfqcARwAU0IoU9OUPadfoEw5ZemY8kh5YGZd5rFD0tK8lbUSwu7qqmHsO8w1Jw53U7V9Gs8ObS5RFQ16SSwSGFsDwi1MlXruehBGBBNicJCUuO9dmNzJ3D2qy4ZwVuHI2ZDn1P/43phHh+FxggbJjYgSuroHg1IYojOiuUaEJEYkCoywlFJytromyAJdD5OQVyvMzkKpWefysGocLjx1ZrMvVaCTlAPKsL8EX4mSxLhhHyu1mtwMrlM39HmaJzllit1S5ocOS4fD2HsNSjDsd9Hart+DTbUu/XCY1yMhACEDpc1FDMSODMgCaqEYwN6eEo3WIxu0qNDUA0Vy30zMXXbCALNOTAzoFoENS3UBUcbQQDOzljMym6WaB/ZAsGI9t+zYswUvyvu4ON9w92VKc7DDkLtWw0EWr8eqj+v6ABYJq4oVg0QZseOeeXCQDRoDvkaouyMmMjSgAHfgQHZEBySMhASMRMJAreqEzlBylw/sRZutURESY8+mLUIVwBwMQsPRjJSx7wZwHzGujJ/b8/lcJR2UrXFLiGGVXKCaYaU+Uw4IRZKQjmEnpZG7M1k4B4ULwkW0RoQGDQjNAz2Bpj10bZni7uiRf9EqAiC3TXkGsbmsECLCwgKC4Klh9PWIJ2JDUD/1RfCJOR2wcYuQCBBhi5Q2xBHYZgVBRPPNksZuCugBEB5E7G5qlZEZ09fNpSdBVQCA66ajjychGsGTlxqQSbXBNnVCJOFzLcCREgInQjwez88vd1Y1j50QkDBKYre91wsCMSgaV7tBK2VIHZGSo9JTL0bQ7OlYoDZjcDDMKVePRFTNRAAFDWhtBhhBIEzosEvjSVdi9MU7lkQeFk6ESMBktTEhRBSIIffhKrlzbW7e7XoqGsQcsJgehDKGhlMAAwtSUU/AY9fNZc05r2EgEbVGJK3BjsWik26T8xKRIHk44PaGmh6N/tJ/57//r/75/ycxl1oCtl+0hIGMDF/r5MJjQ/mYG0KAeQBsX/GIsA0aDYHIRGSmWxDjG6HBjJkBwLwRcUBsLOpw3zhECOCxxVEQ26MHKcKfwsYIZjZV2F5jwzY4OiAjUphtCRHYEwoJARHZAwLsydr3dQYZDgFhEdv4Ua0RMhEBBBOZG8B/MypCtlrO4O18/1Vd383rqTlgKGCXaSBM7jHurobxEEZ9P4Y2rpOfi2B1rJJ6Ygi36lWQwYEoBwJTc7QWGgB11VAwCkfbcX89XuauX1uJECFZp0rkzy7HX/vOq3d/8IVj++L9O/Z2c3Xorg+RLrvdOD6D1GVO3f27h7uH6d7x3d1dL+ZmFPLTr97VtY4CxDEcDksxYHSR0BrqYXZ5kIdVl2rVQUOQMDYDPJAqpB5zRzJ0uSMMT0JmNIlTEHh0lIxjNh0B16bIQETEbE9NLQaHYk2sSRrNIDEMXTaSGl6aarUgBDJOkBNIF5KQAINZ+oQRLLhlOeixYeBaKUAQaiWoNu+FJIfkaObMosEET+nP2FlCQHVGQIQeveky7LottwFQFGzaBKPDQEZgahqGffF0Lk2AgdmjQUSXCBFVww08QBWBsM+B4UCRBBCo1yCIUmEDq4F05BgkfeJ5mZIjAKlTWHMPQyMm8KBt5+lmTuZImNydiCVzhDfFubREROhBsO8TYbQWbnT/dtZifc5dmoWXFdqnzy4pyvHtFz7pbkzXh4FEry8ua1AARzMCTIZ5t3t4qIT27PJwnM6zYn/zsjuM2cuHry6/XB8e70/7D34FtD3eTdtVsNt3SWh6WBJJODZlSD2UxojmoQbJkSQiETKb2uHiwqxdere/uM6MH3/w4e/+/k952E2n+StU83QqVtyH6/Sbv/HJ40+Py/v26qL9T//n/+b/6z/+u/+Lf//fT//ge68f55cXWBVvLsYx1v/l/+zf/dv/z7+fp6wU4/OcdxmEzlr+7f/xb/7+/+Nf2QN9+6PL0qYv39x1u+7584wIx/vTB/vur/61X/uP//bvXY9XWhpYkADh9uM8tuxazSgcRcxNiCCcwMFNgNGdiC1cAAQdAoo1EhAEa7FRjZrFcSl/8zd+ff3u7/3gT15fHm5Eo4mARQAp4eU3vvHsxbOXJ/+X/9nv/M5nx1/9S3/qq3I3I/fUXw69Xy9FAxPllD+/PZ9Vv/nqGy7dm9uHanG4Hiwerq/o8/vzYeghLDRylsRxM1xwy6fTevPsxbo8IjiEpCTHaRqkP5bp5vrZu9uTiNzfnYeRnUBVLy7392vDzNJ3BHyR+javQl7Sau6ittQFhFmooUeKaTliTuxRS+NGpV9/4xvXpx/86Orm+ZtjAHQvP3h+d7zHoBRMBPd3t5dXz6zBtJ7H/W6Zpi715gqgQzcG4KkuY9qVWsr04F7n8+LElxejCNW2YskQJsLeCxN3gywWAFpLY9Tzcm7LckE3HfF0f5SOPvro0kyHw7BOBc6OEMO+k9YQ0rI63t4LAkF88PLiq/fLl++Wj1512OzcVlUkzteHPH3+2TDk2ZqZ7vtDSLYauXt++/DO1fdXA7bpfPe4XQXH01TaijJkQYjIOa3ThGABGLgpOSA2Fh+Qq23MC0YiDAcgJAuPp4cRPZ2bADggMUEEIweY+1bnsfCNtk4O8HBu62wd6a7ru57N14i21ui6vrXWZXZoobC1qHPqay3mpOZIUKolYccYcg8ARJy7rpQKBKnrAanrU6uNAe+Px49fPrM2b/Jg2OSiSBbhzohqKX/r1375j7/7+xbl5fCNOp8Q/f7LN0O/f3v3bujSbk8dW5ivVL7x7U/ffvX2dHq4eNZ/69uvhuGXuz/46ec//aobL292OxGQTkpty/HEsz2+fzDBP/Wdb5Hb8c0br+Xikw+uri5vb4/v37y/6C9Ml74fU6LlbhlydxjTNJ+WeR1SAtcs9umry6W4Vpje3kZl9DZeHWyw08N5fL476fTpt5/9/u//jhKkrvvmdz7+4ov7MhfMQBTHZanruc/ZA8vZtmOQ93ePKDi3WLQQwOn+gRJKh66g6muLWpZ+SBmVczys9x9/8Ov3MJGfn+13P3732fXFVXit4O/P6yF3HbTVtAJdXO2Op7P7mQXcbTf2Za0UeQV6+eLF52/eZEQEV3cAakbB6XFeXz8+hGPnAWxuOp/rwKxLW8PGlNay5A7WtppG0zUz7Ib9aSmPD1/92nc++t6PfzYCFI2R89lcaRwPV8v87rf+1LfrUk9LSR2PhxFD3r4/2ewBpM16iSHJeVmGbmABdn16Meoyt74BNy99gp5jSKHtzJCx9eCpud8bFmdoxkEpp2aNKXqGWtel1ccAX7NN2T1HLeTmucPcjRcj7/qs7FOdTlNt0NCXxiI5xpT3+14gmYmbGLh5z10K0upncyimSwt1DfBoJXT2WIOoamcgkMFC1aFjpEpWwipGJDF1ZsL9VdrtLMc8yi4gr+YolIcBOImv5FkAAQAASURBVLouRm+dH41QEdwpXCEsED0QiAgoDAR7Wk2wDLsGW9ba5ZBwB3KlMAQGDA0jSzFbYZijJdXhwyudFjvpfF5IfLgcFbAdy4pnvByYuEINNHACN4mz4EKwCjF5YLAHCLoikRBRz4kYXKjumYRaEqg15sAil48hQD1AIkA0CMrcDUsAEQNidhAgAkeAlDoGD3AUAPfamRNR12PiIMicwIKADRGtSN8DYjXNhEaVmMpaG0NKuZZU67y2oxpCbKe+1O/HtnqJdLi8wKvDyqwDzxDhFllAuoTGDAIDSDZxyCERoIEGxEkQHCUIcMhD18WQW2no5lkoCLdba0RZJrIE46gMGyrVrKWU3JuaJzds0WOwq1hhcgQlWxGa5GAa1A2hYR4NEhEeopvbrvrIKF9HRV1/GJG74A4RcoSX4/F4u7RC1HVp55IxpWG/w3mGUiXhsE+AhAgzHn1dIVaIhu5AWNe5VrVg5gQKZIa2SABhjmAMRvMAxZwod8Com9qbELM08OLolU0lHIk4DD2Aw8mKuxNi4IbuIAQCgMT05P/iIKQElNPonsAw8eEg5pQWPc+xqgj3ZuXhWuJxusz0IYnBMNe1PrSW+77n7HN1bRiKtIsIxMIM1PHZVzHF2RL6y8t+JXlY7m8676FqXau7pTHRoj6BKYSDU0JCxqKBxFkizB2QOHMoEiIESoqIjMAsLSJAKUKGfimsLgikLMLJrQjYoTuoOXkd0AHYISwEBJUTAdY2R4aEKXFam/cJkFpzSY6BlJJjShHr0tEd9nqkPJOCV9KQyV7Q7ViKQJnXRDwkxADSdcGzywhsQI6kBDmCGHshFsWIxA4BJUwVWZEQCcIQjQAEJYIUeFZPCLlZaO04a2tM2NbKrh1xi9heYbGtOXIidMtmHmGD6ou4IFX3aIFBWFAFKUV179zMzNPXLx5uBQEQ97Q2KQG0d0LZmDowBLhjgDQXh3CwDhqNLBEhc1Us21WwoRYBObY/BAYGIXkoAm22cofYDGVbUrCFShABALRpyHFLdra/IRJHhJkB+pYZPSlBABEkPJA2ChFu8Y67eziibKAPcxUiAGJC863KhADhHk+VlE0yC+Th4A6wyd3B1ALDPZa5Wou11at91yc6n5fEaVmWLqVmkwqEtZtD8mqrNcO0mkFENpXIUKogS0IUyJjgpEm61bUDEt6MZ8FmDJEAIBgF12I9E5onjEQYQWtbiemhKTGHmSPqVkUFNghJnNRHgOaGlCDITA1pNr1wjLAOXLfqDWBCUnACDNXE0oADfG1qQNVgdtvR5nSAIeXzsrKZBAnnglbBg7q+21lUd/NwAkRfMAryLiUJ27pjTgZWK0unqjBQEJq5uXsAb9nfZgUHctg+yu0fwiwCKdwZkwg9Ec0jABzxCdaCT86yjXIFgCTEQYhAW3CPSPjEiMDtc9wIVkhbkpjN1NWAjIgBaLOuPQVaYRFOkjA2iTF4WLghIhGTY2z1p+3xDWyuiMEshPjfNqCBLdpqKae53JVyMiDKB6Yx5wPTziy4SzmPDpk4MeRqBdQSZbd1HA4C1CJgEyQDM1OzqmqEVEpFTpRTzjmpYvNd4m99+FKEy7oyxlzXjJxR0NVWvenGq8xfPpzlJlsnRn7z8sacAzmPey2TtrXr5HQsr9+8W6r2F9k9WGLsiCG3piJSouNOVjVJMg5Dm+5LweYGEClL10WdHAEbeBL2CGQYRxp3sj8M49DP89ksrLlIEqJaFQgfz8V7uByQCIUBMdydcYv1gFAYASIIJXe7nPuwCu5MmHnz64GD7nYiDJS6ZohuWci1cU5qGgBB0hxDtcvJIQBczYBg6EDIUUASVycHbuESkRCBnpaGQ+alKUSoAhOnTjB8TJI5zFQGEgZAVENHoszNeKkGCD2y2sIIDkhAAA5b/4g4EIhJtoocQxRout3BsXq41v1F587LydUDkXJKjFRbhCEStVA1SIjokISAPDHrdl+jgO3yaYoYAUQBzNTMKBDcAvDh3QzcCRJ4LK0s62l/2efd1Smh3Fw/vxjW92/qOjVpgvzm9v7ycMFiLNHCa1trKf3+IiWilLrcPTw4UV4KfnS1O759+P4fff+XvvNbXdqfljeJny6Gu8f51YsbIIGA0nyfup7l9jR3kj2cCM2iP+T3p+OYB2FER0R5PJXwgOR3P/v5/mo8TSt6TOe2P/R5SHE6D0LLu/c+dZ8/UlvTv/gnPyPb//1/8E/r0r/65IAZ7x8e/7v/1qfvv3z99/6Lf3D9jYu7n5w14vJatJiXFVL7o9/+/qCaGO/enNa6/PiL+w8/+UjyeHxYqR8++1k5Pry+GPZaDQNJaDtqSCnVugK4G/DTlNkJcJu/ErGaPdH3mUMtwsItgEtrQCgitVQ0YIaqUUz/5Q++e7g8MN6iY60tdSnMKfGptl//q3/53fnL29t72j0bv/On/41/52/+wX/0f/3wO5+W1f7kZ19CmDJ9fLnbR+3H6Md+ejh98PGLN4/Tw1GPU/mb/9avXV5d/lc//G1di4dLwsvrQ0B9eJgyuxEAIacUUeZ2PvR7SJQYIx/uT0tVG4au1WDAvsvTXKZmEFFWPeH8y5988Cdv3xMjd3Kc50M/Si8P1b71p/785z/+w3WeE+knnzx/eLBdP0ZD4l51veIzt3b7+h0ELXV5e//uxcsPBPjhfrm5PPDl9TjsT6djrbXfH6TvWms217YWJH483b94/omjzOejEHIC23B4lIE5JX55+ezd49vm1iuT03ScltW6od9uEYxAvQz9MJ0bcSYhBJqmNdlcS5vm6cXNbp6X0mwYhrnEWudPvvHh6by8+erUHy6LemyGmpaB8zy1LprWVgNyJ8dpkdVvLoZ1nh7fvMPm81L3L14FxdWL9PQo8Nb0fBgvmjszqBU1VVOh5GaBrKaE5K5uX9tCEQHBrQWih0GAPy23ydwJ0dy3VygI37KmbWvtgUAYCJJk1bqstQXeXF2dH+6zpuO8dpKJhImKegAuRbc+tiRuqrmT5t5ULQwgKKe6LJt3NsK1No9gJttadRuxMfTtw+sXlwciAnFAarUAMAs1LQCck5iZoPzyd371B3/y3Yc3nkUqVuR4mE73x5au+rdfve/TqFBHGoHq9YsdMgHRl5/dffpLw1/4rV/99Fsf//F3v29lfvjqTX853tzkM+GphSHudhfTpA9LuXp1s6727v2DdOnlh5988cWXhHKeHi9IPMmwG93aeVmeP7sG6mNdd9KnAf7qX/kr/+y//KfRp7WGDbsPP/rwJ59/9vr1u//R/+C/9w//+R9+9dX7D68P3/zWr+Uxf/+Pf2RJfv03fuXd/en+8VhU57kc9tlca4skQgTjeHk+nVzd3WsDBkSkxAIM3tqL/TDkYW7rWs61wJB2f+Ev/Jnf+6Of7fYXh5vOxW8uL8e+P/TDuOpP3t3N3fAXfvWTL778PIGH6pYs3lxfzktZ18ZIibwu5Yu7u4elDB7jIBTx9uF+N1yFRYRtZZmLQ09V+116/urq4e19v9/3++7du+PQC4PNy/rs5vLNl0cAns+rOw8Xh9v79x++uAJP7+6Pd8eTM6+tfPHuqzGD6eotri4uXt/f3VCezydJ3X2pc1sPF7uU0ph7NpEeHu4fnw6AATJht0+NyToJQgdfDdUBDADJ27S2Uk1WMKjLgQMVhkzldF/5XEynGmPXk9MGV1dsIuzhQHE5EHuZz1MsBivVc6yIvNvnru+TOHHH1HlhtdPD2YkHAJAAkuNaU2sdopoaQLU2ua0BiJFcW9FmZoAkstSz55wxOGVkImEwYwzoKQTyMDCnaNQxKYMDOPOxrVVrzzRPCzoTDeGhMEO/A+9LLcGg8zkbW2AxLZ6AnXPSQkKGnKNFuCGFoqVANoPWlMCTRCa9Enm5X9eHZKFz8QBUpU6Ycytn2mdLpgmFu2ZopkRE7JmzJ7KIZigWFmjSGyL5Tokj5TMgu3HDsugZdW0BiTuXEYTBw8zBFJuimNXMmYmEsiNaOBC7G4MBRjBGHi13HuAYANE8GCyRQAQYAYgQofMKBixFWz/0oGZReib3BmVlpAAfUtdaAc6QY79/4cMOc5ZETo7ChNQQtFVHYu7AqLamRObg2qBqNfCmQSl3XKZlGLJECwOjnBIVJAjPBOBBbl0eiqEBShICc/cIU2RALO4CRChJyKMxh2QGK+QVrLo4QSFXDg3MiNJqc8hhCaML/bpVJD1iIhL1aFrbWufH96eHrwLs6nCZOluiYH8wFPKU0zAeLkN4tRqtGqtSj6FGEubewkw23XjKXRKKuppXYEAsLF1KObR1afSAYu6SI4lZOAZ1HdYWVTEYELMIOEQ4AIXHRjoDQnBgAkakpyZ1AFInKQvmLgHwWkqFWM2435mda3GmgWBF15Fbz77j/Ve3/ecPX9XrWZ6d9bnhNdT7XZ3iZb+XVdDbUh47GAkwmqFRx520PPT7e72P+czsnw59w9sd7Y6wOwFGxE7a9ZjLBCVors2RzMmCI1IiDgxwjOCAjYDLBDIkrqbg4hD9jjVWiz6nXef9upSex8R7cG+tBWCAZjRyIzRHJcIG3jqFMExD89bZgLP3khEi8B0GkKpU2kkGFwV1qesL9gvHgudarFXJyS9o7asuOjh1gNmIArPEQS5rw0LopDW8eTAFQIDWrVjDgeEtHJiyoxFBbPcftQj3QAQiQOHEuN2AINoZhJ+gzizA0NwQoOeM6v7/Z+/Peq7JsjRBaE17m9kZ3vGbfIj0iPCIzKxIIjNroNSQVNFSX4Aa9UVJCARCJSQumjsuuIHfwQ3itrkuQQNqGrqRyCq6q6trysyISI85wsPdv/GdzmBme+81cGHn/SIS8RN86/PXP3/9HDt2zGzvvdaznvU81jCQnKJYNxMjNzcRpoTEyISMEV5bnWTdGTT1lpwwnNwYg7ljW2UHMAxghBS45BxOicDZ3AiJLYECalsBIfKhTSeoyGPRE1/40kEQ4e7xyAt6L1mNDr6YWi0B9kLqWYTkIZAQgXDxHlygIyQMWARiFozJHMDcFsxnoWabOxHBoiy26NSgMzMs0toA5uoRi1Z0oC9iSR7gJ/skjJP4MSIEcag2V+i71bAq9dCsqRYjIs50vl3dHveMjIhdok2CxGwIhikAklDPZLogtR5hFrREcEIBGM1jQDJ01LZC4KCjNQLXiGbWc4LT4hpqashF1SQ1w5xSq1OX0a2BmWCsmA+lZWYHdPXFYruGc5ZgzwljrEhpiS0BHMETsZurmjsyoVlEhIITsKubgVC0WhOzQ4OA5o26TOxzICIkO8n2rAF+9rO/YlmphRsysYctjVrmHggOEGFm6uHqDZDdLQiRKFwDYwEDfTG6X9x+GZHo5HlnjghAJ/EDAnJwgsVK2E/0NIBA8AgIXaAkNUXCsBNsFIgnX7MAWPrXFp9hAF/Ur+GkreDhi0efL64Cro+MNgyI5c/CgXKPRWKCCCNAVd9jnr+Fisq0q/M0jjtEN+OC/dC96Punm/7Clm5YRqGEEdoUrYK3Oh+zTWFaNYBJRChAARCBRZophrGkFGHHg1b1pTGu6zRke36NaJnEvVkZZwcD6iUB6Ibpxba/2U33+4dfQpx/6xv3b2+A+pxX836adveCbj69+eorgWrevnpoofXJ1faqv3AYz862fV4lGY7TfXY+Hu93x+N4fFCrwPhwrB5sj2xqN3cEYlhv+OJ6NXTCJKWqB4twqbVZtVZr9YTAQWaLaY8AOoATkqomYdXWcUSADJL6Vcfr7fk5v9lNxWr40mEZRMwJOSl6oiRMbkEkC+TU1NRial6BOcBmD/SUpZ52zKXBFiYLAyIAYUyMSRDBm1sIW3hiAE+IbIabhE7BoI5OCQ2pMSCBMbXqCAbsnrh4eIR59EkQQr0FORIEAJMkWSgj1MwdgDM7sRtocUwsCdFqEtQepmMlF2Lu0hBsda5qEQ6JOfFSo3IHT8Ls4IgBzhJdz9as1kUU3gMCGTmzooNjcRCKuVUhv7zsDdp4vA8hjQ1qWNen5x9yOz6MDyliM2yPFVaEeZVB1R2G9dAi5mLYpJlcb/qLs9U3n53p7d1Pvnq4ua3PL6c/fPLs/qvP5NETczOs5+r7MmHkPtE4jaknFt6Pcz9kSfKwf/jbf/onb97eGyBa+Kyy7Y5lZnRKqc0+nHc5EGcoZs6JGf7u95/s3t1d8urd0/67v3/x7tXuJ7fzi2++eJjKcaB3r2c9zsD0+S/e3b9+eIiORGcmIhxLqzu934//7b//7R/9i99s03ZEfVMMgs5eXI8Orcz7Ztfb1XFnetcIJWeYxgpI4R6Os7ZY1iyIQEQmtUhMjuAWXU5zq0xk4QJLg3W4hZmLSHUHj5RFmyFSyim17vPXN8/OKCjfjiMwNdXmTRSB5O/+2R8+/b3+gyfbl7dTfLCJ1bV6un/7YBrHhjUgnHdV7u8eOsHNhl69fVUaMjF1aV/mv/zpu3/3z14QMQCmLhewfSlaysV6VYrnvis6Q4BwzggdCwmYlg7zxdlZKzcc6mCSZArvhk6bizD3fJyO+93bnB2RUzeYQykG3vr15SdPP6R6O+/3737562cfvLjBh3GajMAphoE/+9mPEKDvukP11HfUD9PDKFEurs+auSO1sKKNid7d3rx48Xy/v6+tEUpV71dPnl995+3+84WEPc2tmQPFYZxXA/W5u9m/hT6Ns/EI4BMhRGtGcSjHs+1autQmcxvysKYUZxcXh3nU6I53x67vNlfbWUsgMfrZ+ap5Gw/6y5e7zdXq40+vx1oU5M3bdxnh4vrZNGlVff3mdeKoTTFLB/2Xr242fPn8xdMvv3j7wUdP5W63O97rFB88Pzslyamv46CznT05L60+SRd7oCSdazAkU1t8NogZKAIBEM3c3QCQAE5VNQ8AcFx2S6aFPQuLP0MgnZwcwBepbI8AAYZgt3CD9WaNIloLIQkgAbk5QddaTTkDQKtjkqQLsqyGzoGuTQG4zPPQ9+rQTK0pd+v1ZoOB81jDbLXebrsNWgsWV3fEpSANgLHU21gwyNDy5uwPv/8n/+w/+79/9Hu/f/Zi8+b1uzKrOBHy5dXFw+tbUkKP/f399bPn3EktMe/Su6/uN9f29MV1aZ+WCr/3e3/wT/+LP5ezF5Ux1vHsm5/+0d/5/uuff6F9rtyXclfGu+k4ffLtb59tzjZXa7v13f3hglLf9W9u7wJtk1df3n55dna2vX72wx/+xX/6T//86mz18fMPvnhz+OXPvhh6vDg7rzV2x+Of/snv3zx7cvtwVB3t5QyJ7g77/mx1mA+ASqYUEZyQBNWm0jLReKxFndmY4eH+cLYahtXKoiJQTv3V9vyLl1+lobv+4Om71293h/lzuLk8P0vD+vPbVxfn55cXT968vSGJ7fZsvevUy8+//EpYZJ06SWdddz8fFXF3bJ2gWjO18+36q92DGMxzYcLV0J1vzrOk2nSzvby5u4uIvpNxbmVqr292pfmxzF21Y2k90n6cS9FXYN163Xfp7e3R3UYNgEZgItUZDsd5vV0NAx7KWJRWnc+zP6hyB4c2H7Uex4kRrp5s7ovf39z98e999K1vPesGf/NKxvG0GWz67Bql1pR56MUNDz63ClLRa2uljiPXwKNPOu9ScrKRGpKVVubWzCMXwA5QdTJKjb0R0mZzfrXtUX3c86G8flWmnWkhEBKEjGZjCfOivj9OIoHc80rIJRqWaSrNmvls6qYeFEGtLVXuWEwAAcjcAqdMGLXQkIycMlsmR8qdgPBEISgAlFjDY5vWiDaEgs6lHJq6COTM4ywuHXWXBUXNDZO2iom1oYZEXB0mDMKs0FCYkEUIdciCGArogaJAjZAJwzyDAdL5BQ2l3FavgQ5tbnlgIVlvV1JBAatrEUdAFjZg5M5y34Bb6BzOauQRjigdYYeUQ7YKZB48q+noMZM4UYdo4WVRgEAAbzMgC0RKAR4tg3IycDIFt8Uqpizia8IIaOCJGQK9ttKcCJkHBdDWUC0UrChichdAH1gOD28iLKeuaIAkEqlq3jAaAg/crwygzg0TiINZ5cwIjiycBmuBSDY2jAZaKMAdAYg8dJ77VYrmTrYU1gRzgHMgG4VHq202L12uOQMimzNhR5IAPUywEWYAR0bi3hI6IXZnRIgRzjBqSZDZtGljIs1mCo5YnWQ+8Sm69ZoIszA0u7l5c/v2zbi7yYLrPkGdSTAHTfc3tTnLSjZb1q1h4pAePPqVXr0Q1Sizz6PNBRnX5xvJfSBGm4TYw8HDwDBKm6cUiNQ8p8mjDZv+OU/ziGBNteMslNQW5RcEQFNnQgQQIKalahoEweAEkZKs+jzkjhBqq3fT7dSaNXfzgJq7PqpgGIIAYK4zm/Z4Nj7I25t92Ugkztv8+f3BU+Xren6ZdnNcHXU9AdmWA5EGNIfqhKjYThydfF1oVDD3YTeahdNqO2Oqvr6dp7WsSq3YIZiROwcFkrlDRKKUKTUjNwvO1R0VCXhSKqE1pdqtWrqEucvKqwzhERpmqUvX6EKqK3QOIjs0PWAnMtRbvRkwrRtuNZ/fdwLdQ+4bY7/pj+mh8X2WLL52S4SHYHcqltkThicAKgnnqMDkFTMyKYgDIImn7OuBul3oHDMxMYjpgdgYwVE5SFBOUoaYCUSjJSEAC20eaJhaq4zAPgESGwGkHhJoRXBCcw81bOHC0oCICZnRUIA3IWvLEBgUfcrAwELRGkMAklEUcNBGY8mW2BAwYTBWStHjbDl1DQAw+wK54ELeE2q6SLqESKh2soow9ZNiVwAicAC4KyECLI5UC0hkhBEQBAKL6VSccnQ/ZfUIQBAKCOEBGBiw4JsBQYgeDkAOjgi4EDqAENkjCGkRSQo/6ROfQKXFyw3AWiViDCDAcEckAIKT3swCOgACIIhaiRPzxSGiNaiRRCIlyznPql3Oh6k8OR8Sc4TPpsp+3vc5ydQckxzK3CVsXi0y5NRm7QCqBTq4ebSGEEY0mkkEkbSqkmRpyBARRDf3zESEgWxoxS3nPAYawlqkx5XqkTNBQ4iopkZYLDhJ0+aCDmHMozouISRyIDp48yAiR2/h4ZZTCrfGIMx1VgxH4bm1dc7TXBCc1FLumNOxVsnrZovcEmmbEkpzTJL245GIfZEwJwYIIiJkD0IUhAjH0EAQd0tpIaeCuyMuoCEQLdVJROCFl+buxLxIeZ5QQ0AzCwIAOsk2ACEKQXgs9aBT3xkCQiAs7wsAYDclIlcDRGa2psyMAO5+0jePRXyImQgizAIXtWqgMCfiRYFhidsxlj9Lx5myJAgHcHgPOP22AQ0ciRHTMAgiJT7v+yeDbDiIEZgAkQJCtbrO5XAfNnUCPafpsAsBdVsa61oLFJq9RYQkUjc17Vcd7LDOGkX7Tl5cX2zXOYFPZZoO+6bKCHNrsF6vNsNq3T1/uv383cN+qje3Dw9PLpOkfjDpYzrOQtVcd+Phbnd3PB76VfcwNXemdNF11z/5+W9ud7/WqiAE1tyVI2SRLSPAcCASAaZIKSACE6QEnOnivMtCBNiKtgA3n2oBCwGUlBPG0q8TtYEyEYkQIQRyFgYwFgGvCJG7lHKfUBjRWwujnBggpqYA2ElapdQWqSoARhJCdyekoe+CYEmv1NGqs2AZ21IUBDBmDAwKSF0KBA6QcDEXjp4lkASJW9vwamxu4VZLklgNXY0mSQC5hCEZmAEZMyJBFhgArVkwMvpC7kEmMxNiZFmkDgICSRjDa/TMDDCjz2Y5oQhkQQ+H4NqIOZdpAmRhwMUYCQK8MUYsUKw7IeaEq9UQ0URQBYWpOUYQhDMAupsaAiYmQqcEXaZMENWs6MObd5cX0a9WrRkhKXUV8pr8YW7rriPKx30lxE543k/9sJI8QPDTVT4/61bbzsbdr7/44su7+0/++PvF5D/5v/6fEfxmPm0JEDEdS04SQH3Xk8g4HglRyVgSsnSDg0VyRTJm9jCtjQA4QFog8OFhr8XcYr3uEfHdm92L6yeffvvjP/nj7//v/o//lz+5TPrwANf8B9/+7o//6qevv7o/3Nm3v3HuWY5t/t/8r/7H//v/w//zjcGLj4f9OP3+H1x99m++GlawuR6ututYnd3MZb3Cj59tf/HrN/fzw0ffunr5ao7ZGaLPNI/T3AKRiNAdm2qSjI99zsIYqrgoqC101JNuXoQ7CuGy+nICr8QS6lqtg7BoV5vtzf2+56CQl69ucpLWIiXOXdq9O3z8jTNT6Ka9FjxfbyV1f/qP//Hf/tM/+H/8k9U0zki8Wg1EMc328uZwMZxja88uNs/+wbPPPx9vjg9zmzuP3UF/9Ndffvz07DCGWs0d2VSZsO8Hrcc+c5fleKilOQtpNYToRbabbn/cdZ20WkvTzrK7J6a+k+M011q3696CU09AfJinaJpYuqFDry9/9Rfz/m6zGtbf/Pj25sHNd1PpJKHN51eDVhPunjy5Oh5eWy3QpE0FAUMCA6IqD8EZo/lAoGXU1rbr7M2wxmrY7MYv2/FBCCgnv1PB4IRIUOc5MyF4z93mcnu4v5/m8exs2/dsXq/P1+NUDOzp+UVKaWpt3YtP+7ev3j19+vTDDz74+U9/MWxXF+eb/d1eUgo9AoaIcF59/I1vXub6y3/x400e1sycqI77cS6XV9fTlDqRvk/jNJ5lKVPp2Z3a+mJ1fzh0Q48GyJS7016gzVqD8Xi/OluvV09N1cHVbSlQ8GKcQ+wRARyxLBHgHiyMiG4KiMynbrQItJMbCCIiEuNi34C0VNUQUD2IGZG7XIeBzaa+T7EQTllIHACHVaehs9niCJqkMzWPQMmceAGhJLO6BsE8V3cNx2Gzbk1TolCSTAAKbB88u6YEIBCOCIEEhMTCbAnc1BuhQERp1uXVn/zdP/vBv/0393er7WY13t9fnWf3kXDTd2ygT55c3N1DHIsdy1TLN37vD15/9eXu7e758+tPPrh89eWb1sU/+O/+2Q9+/vOHe6ViXvxf/tN/ff/u7uxstfur8cPnl9uLs/X5+XE0iNbNeduvlCTlHiI2200p01THjz75cJzrT3/5cw863k9nq/Vf/+AX42zn58NUG7gJyc9+8kW4z8e5v9jc3e3m/dj1krr01cs3+4cHEUEPAq+lMmVyTtES2jwd78bj2SqheQ+xyenVzduzs00W0TI/7G4HoagtQw+GPUOC2I3HCPvm8w/e3h5e7259rkFe8PjiajOPc0dYDdebzcO7u8RjED0UvVhvEmhtGH1SagliyMT99uL8bDweORBNvRTaqDUl4nF/jFZLtQmdJcdcHu4P0nEgHA9js+DMtUzgJmCQcCoWCB89v7j54u5snZ9//LxA+O5Ym5FHyuxO73b7y2H1MI5kgKoff/L0eJj2D3MGfvv27cj0/On26ZMryAP8638FAD5XEcbWYNZmcNRG2Np0XKExokgCaDQVarqRvmmTlFEwTBMogeU8oMBcJsqduXOXoB+s7wzi/uZhevPGD14fgqF3My+11QojqWurStKFMFLOvXRDBmvzux0KGgEREgWeOB+IHh1KVQMQ4CCUZkEdimBGyllMkAQDnAgShZZSCWdwCRWhVe4RpoCY5qpa54a1OgYJUk5SQ4CYKQWHGmZhJo0a0RogAVCEEyIjLCbHK+FB2EEUEYE4IAkZQaAjEfQpd70/zGX3LmokwSjVKc83ezIc8qaic2JgcNckwogoCQOtlojQUtE8E4cbgGeBbZclkTvUOmsdEWqWxcKmAngA1AB3AHdtrTUT4EINUpqaedchCdaaw5NwBAoldPdmKScCbhoMyE4YZkUDwRU6YkADd1Oz0GJGwAqFc2+mU2nIAojqxoTcC3VZVv3stdWorXFxA+dQShFg3TCMU6HozGAuR0E0MM5dls7cqjkARmsQbar3GbVPvY77XgazFgpqqgjQ99BhGjoIZHXSAF20TQIsiBBQtKlIIs7BQMhaa0ICAA4OR2hBi/YGEAI2BZfOuxPDtO9Y1caHu7vb2/vbt22cEgZLUm2m6Xic5lIJk1ug+eTaVLFfmSkTXnQpzlauAdpN90YEJD2zkAweQJ7DapunpoWZCanV4h4cXhs7gwGESB46sJaYbZ6B0N2ZyMyYUBJBLI7w7hqMyBxdwqHvh65jQTV7KPvd8VisqZubAkDKGZD31vrt1fF4Z9o6gPPcQ50Jru5u76ozDPzxB5ubuG1Rx+Srp/2rV/f3OgJecsZL3qQ6M2RyAEEmIBbFllN/L3hXbxvpkAfuwHIrATbpPDUAcm0JAaGpVUHuWAxJeZF6YQ0NQuEEIICIgqWVzOzQKfdjdLL5Vs5d3s99uE1Ts6nj3q3LvEVpXShqzSjDaqMCM68u8qqHshlvP9L+07Pv3Ppxvd18Me8h+SQTcoKgBEm9Y3AKR2EIV8QkpGCQMTshUpc4HLgTD8MAFmFE08iSG06gLXWSOEeQIMHSgW4NgRnQVMPNsBKfBD8QkcA6RCKkoFYLhzAxRrgbUUhO4eQWDOwBDAgeiJDcuqKXRhd5gzPkbq0RHAGYENhRkmQXLKRMqGoasOKhjcfQaqU5CGfRCCcHVDdHYljMr7QRGCdyirnUxKlNM3U58ylLxpPR+NJFhh6OsRS7AlAWtH5pKQo4eVotbwAIBFp0iBF5aVNbFGIW2eOTUzQY4mKABgBIJ42YcFMPf7SyWuInRkQzV9NwR+T3skSwNDKeYnpbcCJCMm1Ltc79BMyKkLn/5uXtXIkB2lyeP7uEiFbNS4HaOohBmNqMtYHqeKwtIbKDKTMgYXEnggTQi7SmHCBIFdCIwyMxoRmBC5hAKELVhh5dQgboIKAWjugD15mRaO/RRcs2i7Y+JyYECyJCDIbgCAjIKRlCMRVOqotAtBGxAGYmCCcnYpSU1I0wTM0xOqIGMAy91lKtCUSXpVYFgjGo9Fe7tG0+raH0qE7Mkuex5g6HfjVPR/DGwgFBEZmxmkdrYAa4WN4vXRrsprAwSPxRqBPisTiKizoVxOKftygFLZLVgYjMvPjrLZrpsTQHLkShcGQhIvNHD/vlIQkAMGaKCGIiZEAgYgg0VwAg5kWW9uTYtzw6i+vMgjMxL/ymhUYVAItM0kJNikA3NzMiICKPv9mA1hQw9xvmYrWCdXl7tro0dRlYmGqZEQ3RwY5a9wEHgsnbodgEiA3QAVprbobMzFSbqtniiiyBppA496nvcyq7aQBGs9qaNytjTV1CgHANotZcDrrloTfPDtzRy3d3/dlapzjsd13HOePD3cN+nkKAhJBRzTXw89e3h4MeJz0WI+FanImEO7fWAsy9Yw5XBmyBNcAZc8ddLw4+rAfIMlfnnilJJj4e5yAmDmghKRn54VBq81WHkjGlEBERUQ9CYGIArMVzNwh3Maknw5BwSp0cqxNAOKrhPCtTSX0izqotEDkAg5fDIBCzIeViungNOWJd5HfV05CIgpMgclCUVhaSGCAiMRG7KnN2QhA8jm3bdbmLROTQJRYNoMC5muQkTGEGQMzo5M5ECLlLWgoiorAhFg/0YEqIFgCC5ACSrDY1gwbBidSNKMY2YUrUo5kBcBrWjOlou2k+ZknL3hAAKQmiM5NFIKK6E4kjqSoLg0Fp5kGltAElIlDYACihCK1WKWXJaXU4lsTepmm97twjWGjYPnn+/KrfsMf11RMt6kgZAZv6fLi83ECZNwOtJd2+fPn2i+OvfvHlrgBdfLj5+Pt/8Rc/vGC4ubmH/oQU7cdpGNabrYzHslrRzd3t1ZPn724eUjdcnl9/9dVLxxh3RaiPaIYUQd68VU2Zcx50nqsZMQEsvFNylh/96uHX3e766vU/+ve+9/mvX7fDPT/96J/8p3/xybPz73z0idVffu+75//8s4f/8i9v/+TDL/+X//P/6X/0f/s//fDLd2D9j355pIuN1HZ4c/Od3zv/8hZX3/jo3/7wr7RtMKKv/jwPI7e74+F73/vk9tV9e5ioy1MxckCAJEzMrg0B3cMdMJAAiFgtECUCmBiBWAiAmpah7wSiqCZM1ioAGuL52YoSnW36u/0+Kz959uzlz1/PFrjqdY5uWLXJv/vJRw+/+rz75N/Z3aU3rXzw5S/urjvQ47c/OdtPON/Ufrvd3799mH3VX4rWdy9f/zf/ne//q7/8MufV/cPewb/4vOpxOrs+f/nunlf5arOph2ilgrsT3OzuLy8unUhB+zwgQbhmyYmHUo5qRhApcWulVgthFFoPnRCkxPvdDJgwswM6ERIlZrDIAgCpVDgcS07dvNsj4NXZ1X53e9blz798vVoN9/udI2iZE23TkL2quXuLq6uLu8P9/rjr80BI03HyWXnod+OMPAh08+F+verudrWMdSkzdN3ggGi2PTvbjwXUA0arY+KotSQhM9tsn1ffC+Bw8fzd23cdZzAExPUwhM53N2Nm1XF6U+ZO0ALK/pBTQkrj4eH1Lz57CFtJh8DFG5S4WnXV1UtZesOsRRsBsQ2Mb98+gFyhdCywG20u9duffng+nNKDtFptBadDqVU5iVrDcCY0d8SlZdoDXJf2W3AAYBFzd9fFsvXk9xlAQAiL8yvBIiMNsJiPYsRSqwFEdINwsNiuurub22G7ouC7/cicJXNYgwAhmuf55OPT2mJDa9o6yQ7o5gguEfNYTb0/y+OxhIerkxs1f3d/WH3jKQaVua3WnZU6j9r3GUIRwV09iIXcoU1FGNOqL6VUhbMPPvj4dvdv/uW/eP7xxx3gxx9e/+KXv5ydBETBm5KHeNFVJ+t+eHj7KlEe97vP/vLHz58/+da3n//mJ1988Pw6sc7faq9/+uuvPv/Jl1/d992A9JS6/s//+Q//9O/90Vd3n5v7H3zyyWc/+um3v/UxdSs7jB88e1GRrj764PCLn33z0+/89WefpZ7N3EojlLmFI9wdHvaHIyUCBSG2qXFC7zkNCdO6Fn379uFyu9qeb46HcXO2yYpO1KxxxqdPP7g/3DKvV8QAdbtZl8Pcd6tu2CqkxCln01aBPCF99dVv3Ly5mfcOjl7vHu7vdoec5GIzeNPWtE6VBZvjw2Hal2Jz7SVy3395e3h+Qb//wbOi0xe/ebU53z696lVnBfrp56+HPl2crx/uHi7Pr4C4H/o85DYdzjerM0mvxvnq4mLG2/Pnly/f3uQubc5jN5bdWH0uZ6snQ0fr83WpsJvH2X29GZ5u+3npxDrY8+szN8ssEeXZ9pwSFyuSuA+/f/eAQCK82qwgwlJaP/n4xz/9bJNPs+DyIk+H/crGuguUzjUHcoauH8C99nJGPk8Pdx5d69A7iW6AHpSmlLIfjkAIyA1gLFW63OVue341OvnuMI/H6TjbpJI36tDmJh1LtlJnTMKrPrphdXmBjDKkplaOEyNENRdihqBIIkw8ThMqVJscBVN2spRJUoKcmIlJ1A1T5j5HLaTG4RYQKc3Vho6bNYPiTvNclfNxVGB2ygHcQjitITJ5iGQGCmWzhjxAz1DngAgPQkZGlmzMCCFdhnBBJqbSCmdQtkBgYFnlSsFAw0cXh/tdK3sKXq0zIIa5TcWOXddnzlIFgCBEkJiJFybjPM9iHB6NSJlEaJWlE246H6rVMiJZiJcwB2ZEtZpTbxFO4W7NHBwIXKtioEs4sEYTJ5ZE5sxMBE2dmNCDwlM4BrXDnpr7PDNh1/WU+3BMXY8B0zzWVs3AEmYmyd2QoalNpabcAUOtJsOA28Q9hzYhaGUObcCgpeYkUSKAOEfTymFI7O7hAVrNjSUD03g4dNxiustYwmofDNiXNgMmJKaEfVxw4+YVKC/klEBv2jpetFBGBEjo6BU1mXJKfS+ZAA3AKRGRzjMDUEptnjgPGXkConxKkvc3r7X5w93tNI7c9Gy9QUFH6ATdzBDB0cCFuVt1xCKrZK7e5pTYjpOFa2thUUtDROlWsLQ3IFokYABwakvlFd0MsM3HeVYzBG+mZkBECK4GZl1iJjcEI1oQQ0lpobJaxNDJMHRJ2AJ3tY6HY1UrrbRFdBYdBABxjOpuxzoNwNhfQkT0cnu8O1PxcXrYH+1iddvu4DhDBzgeN24Xs+12dT1vpuPuTQVi60ikbRNJt94yhM1VeDVXeyhactp2GfRd9uyR+u789dsbbCkAx6pn65U5BrkF6EmnRgzcwhwpp87NqjZIEmCRMWWOBXhVo3b0CNp2435+ev3x8f6tVe8gX6wvp/nYyo5YrrbP1XcTKGVwLWey/ubZNr/bTbxXnnPAleCxDlvc9tmt7M3vu/wkWian8Bw0qVVFAYbWWpfYo7m3oT9rEGbASJxyneachqvh42lszCOYBioxeFNiUUczHTiFm5kjsZkCpEBDRA8AMLOKIQGEIQGgYUKwRKpLGw17tNaABIQhrLa6QtmQ9C1a9RXnpejJSD32i4+YhWsrechBfLN7aNx5tBSOACLpWEvH6SQeZBMFRZ1PWA4BIrYyIXdEuZqteD3Ok3ePUBGB+8IZigAIdySJpTEVGZbVcFGTOfXW+8kEPcJcwwEQwT3cFmMpAFi6KNyUaVGzJg9fqCKmDYksghDdkZaKO8Sif6zaIAIAiZYKWTRtzCfrkIUCswC/ZraYZIU7ERnAolFTWj3MrVHGLPM8b4dMiMA8TnpscXF+nm9vEDlx1+fBHSkEGibueDEowX5qsyOZRwS7KRJl6twKIoQjsxCHNkBMqiWEMRBQLQgjJtXz7cq1RURzK6aOkqSHaeyJ+xDC5ubEHVkAguuyrGVHM8CxVEgrko65qdtiQ1VLY2ZBVrNAJA8hXktfbE5ECXhsddt3izRXHniS1dvhxUff+wdf/fKvpvHHZwzzOJ/1KzUkNit21COGAVKXkmpbnO5VtZktoBsQNVV3B5QF9FlAP8STRpZ7A8CloLJgQ4DobsvdXnAmRCRatDIjFqhw4RAhMIm7gS+4USBRQJipEC/ENUIKAFdFXqSOACKQKMIjHB6lq8yBiCKCCBFIrQJA0AIj4mK0Z2a0iNRCMIuqxVIShjBzfvSEPUFFwcySIEnb61xq8LQf70QyWqgaEdRWvU2uUxsfuuyuR627MNXWFu2Tvu8CjCHqVCIA+3zcjwl4WA1pk+/myeGulLLuu16wHo9MDBG5WyTeUNY9JVJ1Bz3fri4v17/+4kbCHx7qb35pT64uCAjIPfxwGIHhOM2Bfng4WNGxmMjMYZuLzaRVrUooI4IvvjqUic2NmQKhVe26RF2YWrgxRQpfCfcpEUFr6u7CmHihYGmEeUTKKUzXnW37EAzXAoIshB7enIWGvgM30Lnr1iljSkHiCLoecmuhakKotc6giYcgPjWwMueuTym5B2bSUs0bCyOjWaO8ZFIx9Fxb6zMBuKpKFmEBDKJwq2HBYX1O1tStZKazHJkbuAtnMyq1AlCpmjMSoJuFGqC3IFVvClnAWhAvj40BEmFgaCfLYvfIrgMgcGJcrbBZLFXNOkc3uDCHeSdRZp/nCcJTSgEQhBHQdxJuhMiIC/9MwQgxM/VdRgQg1ofJA/skVpUYMAAxrLWz8zVTzOPMm1WfktYyj8fpAZIkpY7XKytNVnh1dc6c+vXZbNER6mFcrXutY5vbfDj+6M3bu4e7mzdHTv36+kl3tfnxL3/er7pEdvf21YeffmOZBURk1VebvNkOWuqCjvZCqD7v7ym87Mbp/nj15OrN63cYzR36IVMSVy/z5GpCuN7081S96lwaQ6zOV1D1Bz/8xfe+9yyxfPTi/OF4eL2bL6434/jm7//Rsy9/9pf//r/3Dz/+4ON/8a8/e3f71dXG/lvfffqDv77Z7erTT5/93lXOpIdp+vKL4/1v3mVOD/elW/t6Iz/+wRfq3fTQPv/1mxWLqSto16U2V4RY1MSXCgbi0r8fAdHM3SInJiJTNdecMkYkDPYGLixpmkchAIo2zhgpCYxu81g0J0du3poGa+Kg9Xb19te3n350/eEzuVijRxWy+voH3//Of/8nX7x88fETh3Y8jO0w06yX3N1++e6bz5Mp/LP/4gfQ+O7ueHG9PTtLX/7ilZKYgzANiTgMw8/W3XiYrRpU4wAHBMBSWjd0fT+0sR58CgtG2KxX0zR1ORWdgNDMy2RdJ9vVMLYRmQ7HMYBWq6FOVQEA8O2b29Vm5S3mscIKRdiqPRwfur5fNLzmOvM4IuHzp1c3N3cecX62xcAyz/vjfi7zdru5vT1szzdZRFZS56ZTc4/VxVNXMrfx4WCgJNmmNo2TsAgLBHJgqDXQpsrCx93hyZPzT771zS+/eIuBwemrVy8vL843q/Obl69D4HAcxzn+7t/5/k/GY2k6Fbt+9hxa293ePfvgw3Gs81dvjfVBbX119nAz7g91WHXUdU9X13f3h1VmEd7tjh6wPVsFlvAoZR6Ppd/080G1+mE3d3BKkrcX59Ntgy2NdVarBDkNw/E4YeKoqqd6xOKsCRiAhNaaRxBzBCAhLo4PRLVVQFr0sBeTWTMneiytLT3Z5otvompzjDz0hqjh2jTCctfX4u6mptNU1utVEAFzUxMmElE3DwcCIbawVo+CjBQoXKcjBwSGRl0gbwSQJGZ6nMbdbr5+ctl1OSJCo6kuARwIRZh6RcGqLVQ+/M630nrz489+kIV20+HiYivQ396945xubt62YlmklfLNb31arN28fffN7z77/KuXr756ZVrPrs7MFatfbVf+4vrf/4f/g88++82f/+f/r+2T6fbh4Vt/NEz1q1df/UYyt48vedP/6stfvXx148wvnnwwtnJ1fY718Iuf/Xy3n0Do7GrTKH75q18R8/3dIa3zbpxSTt7g4uKsu0imur/Zp743N0DYXq4BvGpdr3oMdccIauMog+Se8BjVxqdPr1599bkGPnt+sbs/sDdkfPPu7unFed9v93e3mHDVd3d3Oyf86s3bYdXtih7nXaiNM677M/dmbvd3h341mEVxS8pDTp9+69l0tO2TFw+3b9/uXl2sLq7PLon51e4G2G93+7SW41ymd+VyM7i1aTfO83hxuZJ0Aa3u7w/PrrYx7XNE0/nZk7OH3b6OddXlmaLfDA+748V2XY/z8TBePrmqEU8+fvLx08s3b16eR2pVi05jaRd5O48TJqgPaGofvnh2dzhobUBGGuXu/oNvPJ0O7eef/fRsvR0P+2UWdF1sVuntl3642R1He7XTp9eX/QUyYpiWArMqMHvVLOJg1aq3yKt1xx1JGKEeCkAaEq+GfrPZUG16nO0wap3QU98PNfrEedN10zQpRDBD6ruhT+tVZDKzqRQyR/Mg0NoEknQdhlo4onDKzRUdESP1iVGImTpZ0pEABWEnAFdyA7PWDPvcVC3cUlaI/VRBeDaoYU2yRwQFUmqOaq0AOEmgh1pmipTrbI5iyOEe5hZoABEcIMzUgE+2LQQFqISp2abrEqG7B2MTlg8uN9aOTfVoIkGOZSqEVB6OaQAcOOdcCDil8AAWb9ZKbaUgk7vWCq3rpeucZCxlKvVQq2llZgIMEiBxCKeuODmEhRkQ5h5BvbYIn6fRI1Q9kIKTr3vq01xHlNTMg7nWskjehDZ0s1pBS1W1UryrOWXOqInd2EAbOOTcDFZJuo5smrq8ah6NEg6r7uKsPxtyx7vbZmoEIMO6tanLq77L4ApEARZmAVCbpqGXfoA258TEUWsRovFYO+sp83Y1RG1mFBCU7NhKzG4WgZJyJ91KLSKKRWAYchCBBaQsc5nbrMiZOFNnKJ1xciB1NHKWbKoxqwd5NeAEwtXbMgvu3rws+9FZ+i5zJxqGzAAAphSeUnYHyZJyL8OQctdM3YPZzWprOh1Hay08am0pJXOX3PVrcfcIpNQhArC4q7mFNy3zPM1zq2a+6GRaBEiCcEZHBOGQxe9J5NEwCoUoiEW4atmPbVaba4loFqGhAUAQKTOET1NJuUOAnoV15j5R109N+m0/f/Urmw+wbTu6GV5s7+eJK+Ou2zQbdL3B69km6dLB7RXWvtTr9Xp/OPRMqyAi6oHGcpyP09D7ed81PddZ3ZO22OTNwzgi+XrdGagg9sPg2qI5YtQ6Y4BwAqAsjIvdNePc1AGEPVrjvLoYrubZmWy4WJVIx1oQmd0uVjnX0tocDJjTQdsa+DLnbeJe0sp4Ht9RyruoatAnfs7Du3JokyenOXLAbDRy9IJctQBbomAKYEIAJoQQBG96ZEkBGmbujZF13mN8eS5JABpoCyUgQnZHBEiUBD1nrLM2q2sBYHO3cM1CHu4IGIYQTMCEajXR0pUJFOZqFJDArRahDhw6TtnjDKU3W+WcGrDrkDmRkx5JMmIyrZRzQtdQQ31998b71SAAZgkzSJ7qKMwRnhlRHQlFRM1jERIi8HJwznMx5xRi76GicKMgg4VGBLj0cALA0mW2NES9NxlGdPeTpPGpWwwW43MkBFyaehbFJiTCAPdwNHJ3YjJrAMiLBRYAMXp4mAE4uKvqoimxkEqtNSZiWeSL/bEDyUTQTRf6d7gBghssJh7IbGa397vWnDGuz1fgUWpFYFMrVTsiJGpWgFVRAfOQiYBYMNzA1LVlkVZmksAIYgQEtZkRSmkdISKYGTFoqwmCiEbzDjysMXMQVdMIZyRG4gB2c/ecEllpOudwJC6mSMEErWnXpeJNPdSsx/BwhWAKCoBQhBAOQl+EpYW550Bw8JoJkCKjrh6JRq1pHtbKWbvVVzc/i7gfuiA3JNCmFsBE8zR1XV7axNzaQpew2npCMWPi5q6tdql3bwCMSETkrkCISKpKQg4Q7swIBAHhrgtRCIAgAE9aQotJcBCRg4UDL4LUsQA+frr7AfDYzOhgi8Z5mEMAEKgrM2tTIkKERfZoEftb4uuFi6S2OBQFEnpYBDhQhC+PtLsutJOmlR5TRYJFG/1vQkWZIWcxwC6v0gUfZ61tzzxMh2MvYhR1mtt0AGih81in0MlKYfRWChEugu1mIWQA6A4wTW0uFpATDqtV6och9/V4VGgGhgm1GagysyQq0yyZkEIYIVHq5OLyfPPmoc71MI1WtVpFIGKMALOo2saxSMLjsWm4lhpNjxD7I1BgomRoCKhqLOgRCGDmoZAHRoQIT8zDmpG862S77ZgwCB0gHCGWSQqSuDXVVokYwAmaYHR96jbMS0dYBCcGQQR0c2ZJjExmfkTxzBQtXMIoGMFUm5o2dwSeWpczChGDumbEwCDmLvGkhoQQgQLuKkLurg4pIS0CRbbQOIkkmmufZRH7R4jElDK2uRJ5iHGmxq1alGoYrE3DoANiQe5kLoqE3aojNYIAcGQCALRAAyJGdA/zpQOWQNUiMGWxuS06aUCIgpICHAggUbhNzOLWTBsyRQAJQXhTI3QCLHUR4YOwqNbMmiRarbtpqhChqoSoS/ubOzFKQrAGgMOQrNYyq1VjoNdf3WzW/Xbdt9eTAGWst7dvVt0mDUNVYMZ2LGzjy998uX84Wi37Y+369PR68+n3vvmLX77O3AHg5mr14vri4tmfjRUA/g0A6Ex5Q9PcSEhS2pzzYdy1VrebzeEwmSsJvLu7316fBwkRujVzNAdGNPc6F2YuiVt4x1iLElKH6TDOvz4cFWKe5+kwffLdDz78xmUt9Wc//aI+rLbrjY2RrZCnn/7yfjOU/94/+od/+Vf/+ZZ6f7v74ubd1d/+zo9fvoos89v95dXZR588ffatJ//Vf/avOuyZu+2ak8hUFTuGFoAKbGERABoQGECLFgsGOCEAOuAiehfESBEY6g6EwIIGtvRqmjZmYkFzwwjJnDuhLDf7OwjthMHrcWy06byv0dXzs21JflMP/bB+uLn/83/z53/n7/397/3Rd//Lf/WjaX5pjThlYiep1x9ff/Lpt/71P/+r66eXkx8j6mFX1uvcdanfdle2qWOZHkY3oyGpuVoLi3EaT+cd6pGKhYMTL47trhjUdYdSLXDpDQXwMJ6KYs6mxsilqWldcHIiPOyOgFCrE5GbopuqGrhTHEvphiEipmNBSkUrmZtahFXz2qaA2lpzjvV24Ey3t3d9yn2fJaFkvDu+rlrAjZKomTtxNyCiiPRDN5sCk6rprDl3eZAgDpK7w269ygEETNNxBmg3ty/N6mqzSqlrpf7mi99gxKbPa+Lx7j4hbYfVw5vbLnW/9/GTu9uHcCxzS4mur9YRMI7HI6hBpC6lLvUa02706mdn61Lrw+2dzkYMWm2V5PDlq3Z7v+wFs427Y62FV2dX0FInm3acWBUBE+fWipCYOZ0oRSkcRMTMTmTWU68+LzqOiGAeYEt8EwR4CqggIBAc8KTJx8LgWomYkjQ1AuBwwKAkgQDEwglxkUciiAU7XOwcAAlJiHOutS0bnzkwCSCH6tAP89yiFmAGQJbUDeutE5hhAAcz8/JcQDgJgsfiM7rq+9paNwyffOdTB/iXf/6fXF19N/cpDEV6zvxku727fchE9w/trz/70eX11fb8/INvPG8Wt1+9efnzL55+9OLZxy/+8I+/8+Mf/Mja9ObLn4y37/7D//B/dDse3vzq5Tc/ffHZD39CZofd7kf/8p+nrkPhP/zTv/Xmy9evv/xp83h4y4ygVVPfk1Ats3mUqaYup01PHV08OeeAMlfzNo8FPdYXWySEOc6vz/a7ox7HVlQGHtYrp8aBmHMS3h/HvlsjYbQ5k1iNh90xdSm31FR7EVWtVdWQndQ8SyIB1ShVa1EPSInDbRyn6ViSYOqSJGqtffrtD+f93I714eYYxpcfdW9+fbybeXc/D11n5uNcAvV4nLZpS4AMRLk7Tk2ChtyHRtEYKK833JFMVQkZg8qkfR5shatt//LVvTfthm6utll3V9cXV882x0nv727JFSJl5k+/9+2f/OyXrIDEZ9dnq3V68+ZeWy2uRdWKbs76Mo8McH+/V4POYzLnR1YRcT/qoTjsDqUq9Bnm+X5L65r6zeX17s041r0t9CVgjsjK59veXTkwry+qWxnBq/ckT6/Ozq7P797uxLTMjZU2w8X5k8vDHA3xuJ+wS0hgAevNCoGnucbcWqkLC89azUmIORG3OhNhltzc1RbUFjfrrus41LKwn5h7kZNYhFkBDWstEA3QjpWEsmAdGwolkWmemU/FRkR2A+kxiN0hASyVakEKBAdaDWsFV+lbbeGLwWYgBjMxRAcWEYxYShX3zJA2mZpBM2+6Tat132fpPvj04s0BX/76ZWKmiDnqqLv5OG0rdE3W2A2ZAgKEq6nWOh6OoUZCRIwE6C5WHOzmYayHagj9kPuUtAUHajPEICaEUFM0F4AwM2vWqjatc0F3b8pJnKX64JoCOcq0cPw1otVGEKHO7jC3zKCthchep74fzPvUDatuu4rVYb8nAmLMibNkolxVSStHpKG/3K66xBDBwyq6PkxJUmut7zJBgGvqu6IufAAIRChz4bYP82iQk0CrGePsfEDVKNBSpzqpKXf9oU4OpBGgFax5reNuF8SLy6Rpk8QVAoXLCJLJa2Px0o64DhfN63UE565r5in33mEdjxgQAAKRq31wuf6nAACwm8btOoWl9Xrr0cQtFhMlSMKEwSml1XrjkCh1pXkpLWUxM6uqtuA80qz1uXM1N3WlOk4iXZcEEHlpxWGyOjezWqbxeByPo5mDQzf0gIQsTCyZwREAmemUnD/WG6o21aZWq9a5NiLWCEkoSRiTAwB4qyXChanWWVvjhMBQxomJEgATFkTaxPnlqkE54DGmKSX59u9/unuznx5s3mvGs9XZ+eHuXRF/2B21j22/2T283abOKpwPg/a2XeGhTq4MqUNNptIqlYJdvzZTRkAhbwZMAcKIAMFgaMEki7AxEWMQIXY55yx5WPUqTls5fzYXjGpS4+nV01XXvf3Fb467m2ilaJkf7lbb/tmLK/Y+FZvv9gOns9xjqOW+lQOYqqqkGQfM7BfU7edJDXE1UCLXqlgps1oTQSClxIxu4EDISWIxowjs+k41anEZAFLNQtMM6E6ETKjmZJGYDDQc20lj2UyNiQHdw0ArLJ6pCyXGDYkZ3VuDiJQYkdRUmwMyElotCZEBOiQG7YYcVgUlpwAyIGI5mYsRCDuSgQSMu93tq/vSr8+2Z+LYU2aUaI4OIlwIwSIMmAQAwcI0ECPUFIhy7zqB+107AaaQMjRDRUA+kUYwkMiXp3FJ5gEA8KQQAxKxVKbeo0IAQBGwzLKl0efxLQ4nPwmKCKQUHmYeQYt5FTxKtEcAkSz9RGYWCEjiER5ACO4Q7kvzk6ojUISBRwS6BwaEgeFiRkL7Q1tJ1moICRCZxCMy42w6NzdFJkDHWcEIwoFpcWIDMwB39OBqkpgg2DEHmQcDihoSLi1j4EEAqE7ZyEMCyQMpCADcKYAgCJCRhrDQ+ZR2W0QEITkARrBHYJgpkkQEzo0CYA2ETKaCsKBjHCdBYGYgMEJARgJ3RCaIVheFKc6paQ0ELbWTuzretcP9WYa5NHIPBLOIAEdY8DswBFi87YIIa2tqlYXnpZ+1OaMgMi58IqCTcrnIiYtGFAAnJ7LlBxAGxEJOg8UQBk7m4ECL3dDig4Z4gpMQFj/Jk2bQ4xPBS+31pD3lIJICAuJkc8VEAEEIHrFI3CwPIz/CnaeHDxkATo9kBBIt4EM4ugMxEqfK8PX4enw9vh5fj6/H1+Pr8fX4enw9vh5fj6/H1+Pr8fX4enw9vh5fj6/H1+Pr8fX4enw9vh5fj6/H1+Pr8fX4enw9vh7//8apD+3P/jt/jBDVLbwCoRMjE4k4ICYGRGTs+sxMBriIxmspaMWbhjsSSeqECUSCKRASI7gFQco9UYpAD8wC3mY3FYhpnLXWVkoby3FXyIUMMYIR3KGaA9FCpmYMBJAsSRgCFqJggLJwU3d3C1pcnAKACINBBB0c0RwiANSMhPvEwoDAhFyrl6oLBY6ImZMBmQeiC0I4ODjL0sR3klqzx2aJohHx3pTOiYmIEBDAGQAttIEpRZCahsEgvBnykBMxVtPmGhCIjoDmSCmT5FCrtXgzIQkFBRhW/fmm06Klec609H86xNyUiBkgPERYhB0AARNhhC89X62aByaGUC9V1YFYmDEhmaokUURHIAyzpmopEQv3SZqqRai6AQaBtgoALJI6ISICnWvziIXEttxhcwDk3HXn61UHoFoPTXVxzQMXYVPblwZEFmHNtn3uJc1FSzMRXGX2qhpICM08JZlbXa9XHfM0NUPw8DJXSUgYTU0DAxCZm1WKQCFGVHUUZMIytiQMgbWFO861IUJKSAxqzoSJqFhYsANRxHTUtfB6M/zFjz4HgNWKs7Ke5sSjOwEAnEh7gY9cwxPndelHhvevh/d/jVM3MWDEwj9cDrNQ/d7/dSEmLk33vz1CYDz2Ov/O0XE58Ontv/OBpwMBIAITLFxDB3QHh/BY7BOAAggXyylwAFvorQEBQAgEsLj6vT/mchZLD9HyfZcXx+Oqgb/7fU+ndBLoX964XILfHhEiHr8InOiUvnwRxPevWxqVlgPi+2970o07NTgtn76QefHxsAALhRVP7qYIv3NJ8bFZCRYrvvfvWJqh3p/p0mC+WBIsqjuYEISAIICWdxAisnBZWjshchKtWprq0t33eLVPVgcnSbvlWVh8MvDx/p8+njiEIGfEALNQC/NHEw1/7EwnfHyg/LGb+XTZf3vOy93B01X8nfvzO1fi5Mzx6MUQAIg9Qk5yM84A8L/+x/9IgoKJk/TDClC6oW/mRDFN+5ubl3/1b3/4y1e3U2A1l4RZsMti1Vq1Wr0pWPiJPwvBskha43LDl6u9WJ4JU4AjgfCivA3VMJiFlu517wXcfC6BTIHAhF2CzYaEuSlMo6kSIjBFAEgSYQG3UhoAMINbRGAgzs0JMDESQWYG8LkuLosnZjUgNHXTSElYiJgSI1hE+JBl6FOtNldLggHobnN19ZDEBFKbb7Ncr7sPrs5zTl1mDPxv/K2/9d3v/YED396++8mvf/PZr38zzqPO81wK93ycDpyJBPqNbM+GdU9JeD/VcWyb1ebVm7ux6IvLVXbdj9U4dYnEQh0iw/XF2mo5TGPX5Tb6w6Hkvg+UKLXNVQSePzs3QAJ/eCjHYwULygwEh6JPn56Hxn53cI7c5ah2vxslpyfnKy3leGye88XFRpCO+/lhms/P+mdPtsI8l8k5EiSEROGvX90O62ym59eX10+evXt7c3e/n6uvUjob+uNuKiRDQtIYp2oR18/P+24YmOf7/ecv35w/PUeg+/sp57RdcQDJutM6J0CQ+KPvv6ij/frzm0UA8sVHT8Srme2P5dhsvT6/uavDNn3jYvXTz34B0E0Aq/Vw/9C++cGTcnf3b3/4s8hrLZWY/9YffueM8DAVTvxsszmU43bTu+L93JzhD7710e71w8M0TUXHyR6OY9ndUSf/9We/AID/4X/w9zoiYKBl6i0scAJYFqVHAvdpGgEu3QcOsbTZNNWmTV31pO6FhMhMRMxEQiiETKeF4SSJGaduBXdz8GWOkhATMxMzvh+nvQOXyY6/s/GcWO5w8st1d19W7tOC/vgpi0+On9xWTqsmEkPQskcgIDMTMiEuB4RFPyFOzi/LUU6tF+HLivp+nTlpdsLSgsqMtCw8iIt//eLl6x7hcRILOS31eFruT2KaJ3MfAHh0hjkt3oGnG+HLlrKw9mkxVvqdlW/ZV37nQ2MRTTvtp4s1/cmlmBkZYWHh4+OpxOnzFiuZ0xHf7x+nPToed70laKBFhmTZQfDx8PF+6wv30371fhE/6eGaLcs7wHuN1Pf3FpZd+P9nTX8fURASEb0PWt73LCyRAS09Eqc97bQnLRuvA7gHRLipmRGgIf1H/+T/AwD/s//Fn3Z9BCPz0q2TcFnXAQNR1XZjM0qb9XnHKKFuS9eRn4wKzMus2rSZqpmaepi5ehiEE4a7BTgiEbPIQJzDTwZPTWtr5aT/epopyyZHEeQO4RHmJzVNP1kSQZw6I03DNLR5AOZBNufdapC+YyZiAgzX2uax7B7q/UOd5lA7PQFdL12S3HFiEkHCYATCRwcjQgAMYOAOqWPuEEmYiBEhMLSVsczjOE4tiFIvOafUAVE4m/k01eOxjGMBYgRLCYWD2EkIxJEcJDgFZeTMKTEv5jUOYRQW6EiLdogvuQ6f+roB1DXCTdUiwEOY5GR4wyS03L9FHIZJmEUkp9QzZRHJkpYwJ4gJSTgPHXcCjCEEDGFVzbE2cwc1cA/1IAwIU8cARPdopZUpsLW5uTsheJgGOrGdHNwhWkF18DiOtXm01hYZBGBInQiFLWsMeCAgOiVarc6G9SY05jqP8wjokijlRMSARMSICMCBTEgo1HEGx6phy+Ji7mYBWJr2OTOFqoqwpNTUDofRtPW9XJx1QkBMkrKpjuPEqtNh+o//4x8AwP/2f/IfrAGo64/j4c3+WBRv7+4swWL0VubihmpQrQmzmKPD9fmQwCN0e7HaH0vu+2pAwTe3O0zkRG9u7rpezNRqNUfp+DA1cxj6BAGMhAjW/LxPT3K66tOq7zcXm9z3TLTOiRmbh7khwzg3ScndtHnTaKZB7BhTrRyREFkSIDpzEJ+t+tYqi+yPx+pBq8EgDsfp7GyFFlP14s2shUcPKEDnfYdACoCY9qWOddw97Etr4LEdhvP1dr8//vDL1wfVUGuKq8RJYJwrBKy2/XbT3dzshKPL8tGL56t+3XfGwrv7w/hw/M2rN5JzaVrHZiCb9erj5xc983Esr+6mo2qX0/ZsfdHz9UXvAPvduN+P+1lHNQA8W6WBuENeJS6j5k16frlu1RzAAO+O031taSWeoTLwWeoGGTrOQse5UOr6LleNxJh6CYAwmo467sY6Kk9lwBRTJdcn69SFXqz78/Ptfq5BsZvau/1MKau7A8zWyJUBw3C7Hu72c1XdbmS75tJac80iTaM1Y5FF6WUQauNcDacW+2NVha5L27P04bPzaRz7rpPcPdxNb14f7vazBQrFKsuK6en1th9W9w/zUXVWrXMJ9CGnq6uz1A19J+9u9qVpa5qyZILatMzNFFLuiHg8jrXq2Wb1/GJzvhqm+fjm5kEt3CzlvB261bonSXe7/eevbm5n69C63L3a3cB7rSLaQDUgRW0IBCKAiVCQJZGIByQGZHcABwTBANBqZN5KEyb1Omw6FlIIFkKOJKAWiYWEmJM5MKG3YmhIWk1BvLUGXWCQqGAlrJAWFzz1LFI9HFCQPIwCTKNqAyZkRoTEVExZcDF45+BliUQMJzJcWu/APJApgJ1xjugXG3v3YKSOMyNoCPFjlhXL7u0WkFDdESkI3JwYiYnAIaLPbA7WkBDdgRNZgEVgLGZ5AQiUqTUMz5moY+pyQgdtHoIVwN0TPNrJY0zLhsZEgIKdA1g4Ipp6IKVMQAEUiNhKAwSLcA9JgszNnISIUeGkiEQs0nOZHVIO137gY63uykFq3os4YphR5iB05BZuAAOTE5pZ6rioOqIBBkJiosQkYt6MPDgQwhFaVSEMAhTRQBCXgTOSjS4ArczEDB4pMTL2jOoQc+07JvB5mpA5OBpCQ3TiOpZVn6JqIDCSezhT4NLLDiCADIiOHiKsgerAKYFVRCdBllOQSgwo0EqgCEZ0IMzgYQDBCQNi1oZIkbh4mEas6IAuOC2zIIkAC4djQIQjQJxiM3zUB3ufh4f/Nht/RDNOwWLAY/iOcNKue4xbaTnce0QDlsgDEWhxKAsADAwA9CUixeWz4/RvePQzhPdQzd/ABPAEeZwgocWdAU4PWvDSwwqnYNd/G4k+ni/9NrAmRDqdIC5yMnGK0x/nCp7eukweBHBY9MjhPb50AmlOLz713MYp6I1Fcu2EKuH7n0sitFhCLxH2429PXb6ABBBACyCztIjj4r1+ip7xb16c5Q4QYgDQKS7/G1fu8atg4CncP50voiEwni4aIITH4klBy/O5zHoCZEJ/jyM+Rvqn1AwJwB9hvuU6Pd5diAAMZFpSnSVnI9AAPyWf77OWx2+/AHqnTmYIQAJ/rx29GHL+DlQUJzsEePyBgUG/8xWJ0TgCdXn9ai2uyKlrzVRbzmzmrbbUp9StknT9qktJRjUIah7HXQutwqdbZIsRA5CFS+LFih4I52oASIQQwUyS2DwCMCd2NwRy8JSREk+zLl6xpUZYcJIgtBqA4EjqMB3HlLNkcULm5E2JYVh1puqEYIS0OHKCLFqqQsIUAf2QwVUrIGIsGQ9TW9IRYhk4AFoEamgAQXRdntyPh9kjLABbEEIrBoDE3BwR/OJseH7RP9uuxTDc+7wWwnGab/Z3DJJJpvupHKarJ2dvb3SVcT9Nm82K0EDifNuJOIKNuz1AfPLh9V//+O7tbbnYZrDSzFCoTfM6rSICEFd9Vw/l3c1dHtLzD9af377adB1lmRog8dnZsN3Qqktu7K617tYX/XFXmnsrmoS11Wlfa23AhORjdegH6WWcK4TPhlhsnmoEjqrDWp692ERRVQ1oZ9vVw7upzpUYWnWgACcvjlqOx9IcTJVXePZ0UyJ2+/l4KGukfpshZ8ndX//819/66OrpRfchbg6ltgZNC3fQbVdv31bR8uLJ2qbp8iKVw+7mq9nuxhjy+YsngLK7vY3w1692su5XG6hex/spaXQ8VJN3Nw+fbJ+sLiJE9/v77cVmXzl33abLK4xDnZX1j/7gm9PLPVF+sr78tz/68dnzjyun39xXGHfXL67f3Oud7WTV19zdv7pfZgFLLBJai7wAMf7u8gO43BCIE8L+iJUvQABELG7yEUEQjgGLAnQwAEAQLCKcCHBSEViW7GW2BzP4aQ9BpEAMQsclQFhWjsd/fnetAogIIgo4SR7A4pYSvojNnzYzxPDls8L9tKedUAU+6dQHnBAeCHRAWTaFk0nCEuSgL3vYItJ6WnvgdFHwtBEtEA4RPQLvj+v56fWLj9Wi3nAqDJyQLwRfwH18v79g0CPmhQCIfgLMCMBP6+2CgeDjHggAYaetDtHht29/1PukZa30OAGCJxU1OgF/p31/2fj8fd0I4rQpnZRJTmA9nmD9Zeda9DJOmyYELfIsj3HBcnqLtAgAnoxukDwcHE/VHjidDz1u5MtHn644PqJdJ5wIiOkRFsITnHe6ChiIvnzoCedBJITFlXJ5gBhiMVcm9nhvVQn5LNBBmBZTS8TgjsEBPUwdkDSQpAsAD/PQBSdCWuRQHTCQ3c3N3TjULUIdbRHBCgii90+GIzYkQUlhgMosKdhORisAHi6Ebr+F7YIiwkONMAIXRZBFI/Z0CoEgJIAoXUKiQAwkIARchFIQhZwBhIGXyAUcEYgc0YHssa5GCA6xTAJVRyRzZ5Kw1oJy7hDAzVhwQRI5CQuZelhr1dRazoLEiDgMxNLlDueq7rhkFEhoaMToCESBhMTIabHyBRbGANdAJzR0c3QPjcWhiYgjHDwIw8IC3BZR9uVBEKYF7OFAjuBFttIAAI3cJo8axOYIhghsHkxCjjtwdxeRCMewiAhgIEZJDo+K12hE6CgepLWhW5tHiEIAzESCILRslESyhDaUiWpB1SHlZgHQqaqjOwYIRrg6ELMFIoM59qvcDZIyGTkGYHWWIDII8ggWDgxmdguiAGQhFuYIyxSB2NwQDIWqecdcp9ZKcQtAVK3uHmEQ7jPNd3eMJp3U1lgImMxVp3Ka7/M8qj7pL374+uXtYR+IpbZxLIuEzTKFuU+pcQraivR9L4kJ4/5wnA/zcWxxLM+fnlP1iyHfPuwr4bDuRjNT35yt3GGqhoIEURwEAcG5EyByBzFY5dXZ+booiFPf90Go6kgMFrW1WhqJIKBwtjqvez4Uq606+JAkxooRBOjEKgTRiSspnXM/Ee6nOEzVmad5FpHoQFscxrpOKRz7LBwULZ5cb8dqkzZITF1ed0KBT4bVduiP40E6NDfp8MPLlRARhYwwNp2s1b0h0a4UKg7tzcV2uD2MBWid80r4+9/7dJrnIH73bl/VQ213e7e63g4JGYMgVGPaz89W6zBlgu2qzznhboLajqpvp5Ict6uOhjQzYKsCKzdbrfpp1nVKhlDMsUJITMUaI5pWdO6otHlujZkbsO/nCG/VXfFwmNBxjXAc57OUXCOML4Z01afD/RECKFObVQipw2pwdz9FeBTddKnLPM+1Y0mJauC7gyHFqk/jXJiYGVICQtCpEnBR28+xPyg4ppwisDT86m46WyeXfPt6t+o6Qg9wQwwkQ9qebxgEFabjhOvOPLjrVr0MSXqRLFCtKZpjQCeGMJamrQGCDOIYh8PEXb6+On9yfbYV1un45v4uBMyjy7nvui4v8IjWME80dGAahLbMghNUNDli4jLtITAsVj3nPik4J2AhByT0CF8S2qKmtfVZSJt0ydyIxcADott0KYlaA3ckoiQkWTXAAjy0FKDqYnNr5lUFUH2yChwiGOop5VMkwhiOizy+MGOEA4XDsZq2cFVBA4wuMzOlzEzAYV1mc1UJR6w1mod5EIIvOb4BRqC7NSNmYFKMnHC15jBoClYjENGDk1QHLf4YlBBzDB32GbVZCJhTKyEI7hFoiESBrRgRYVCdlQiQ0QElJ0FggggHjmJ6MEMwYGQIMocAGtYeUYv1QitJzm7HomPdz7DarEHwUJpFJIYgZEmnigohUCCGgQNi05aZgqLWJpSGdd/nDsW81nkuS3gjWVar/jiXRNwWAghFcUssRgBMqZOFblJqxSSrIaM6ECFTIDZzByNEM+dOAL3NjQMgJBvVQw1GANBWEBoTUuapzZByd7Hxw5gCE2jPEomm4lUdmRVAEnXrJBklpbBoCodDMYm+532ZDRGFd/O06gQQDD0PHTkKY1Q309zL3KxWI0QQrKrc57zKtdQyuTA0s9xxcwfCMhoLEiJq1LEgcITv51MI6E5CwUvw56cI+hQEA0IEETxSTx5rdo+4TZw4RwDv48H3geUjJPJYpFzQooilQIYIELRwVh6xofdsGnofoOKJsROPWf/73y9AzW/ruoQE4QiLGOkSghMiE/CS2AB6AGBogJ8ghxNwAe/PmXDhQ50YTwgLO+kUvfvj604Un3istP4NBOaUOMRv/3sxYluyjKUkHXHCcU5ZDzyiYBh0QkIgwk+i/KcrBHTKK4IfUZSlau6PFf8TbhHgj8DYQuuB05kvF/JUN4b3RebHWjG8/38BFlADETzLolTnAb4UEquBLtkC/g4W8ztFbXis+vpycD+d25JUBSxqdu+L3Eiy7A5LuRnUAB6ByYUEdpKdi3h/0vF43r+D3f2NR/d3bwi+T2jxBOoRBoV3wPh4TwmTLeCbkIU5GlJ0q07VAbgfNtdPLn718m0P1GqrahCYOnG1WhwJmJEIzQMQFlOGUo1lwR1BmLSpCBB4qa4BGggeqSMgRI/FNkK15U4G4TI7Cc1FIWHXS9HSkWwvB8lSGx7uZgY8P0tZiADuR50s3L0jTl1HPSBgzI6C2kwDOggiLKqSBKp5oAGQYAAhcaiZxdCxOQBiU7NZzWOeGydCADMTRmux2mRiDofnV5vnq3zdyzc+eDJNJQ+rruv2Dw+jzXHRzxNcymY/zur++v6eV6nO2hBdy5D8bNtdPklH1SL46v7d5Xr9dvcWOfUrOX/6/OoifvXLXwTg+qLfXm2O+1LK3J/n49sxcNgX/PEvbjPK/TSVw0yp+/DZ1f27tz2u9rV9/vkbC1YKBGutXp2vszOH9SlizZwTMJ092bSX++NBp4hdOXz04qKxbbMMeZir3+5nFF2vyTrcHabq0TviavDk3aZb98M0W0a/eHH1yy9vbh+qgTHLsdWm2uV0dgkvrp/vX716+vTq7f0IZfx7f/u7Nw93797dX1+eydp//as3+4epFJvDN+cXa4GPPryCfPR23Hxwef/w5ko2q8tzHVY/e3X34vnzMh78zFzieDzM+8Nv7g93F/Of/dEnb35198mTsxingvJfff6bbo5n33z68pevvv3B773o5YOrzf/7v/716my4fzgKYVX/9ZcvL6/Oqh72rXu7u0nHw81uOs6hJNiN50Pnff/bOUO0TBLCRSSSABdP2UfD42UBWwhAhABo7vFYJ3gk4CwUQEei8HA0BHb8LaMTTwnmssICEoIHQhA8rofLqoIYy2axIB9LweCkQ4lLYYNO2pn4fu+CiBNCjqdzgkdQafliCw0DEJlokV8lokeKDCxevnjavB45nh5AC0vmBPEsHB9E8FMZ4QTW82nSn46wOPqeigd+Ail+u1jGI/gVsZCJTrDYI2Tz+JffLt2no8XioAiEi3HTe/7Sic8bAfYI7ZygMIBlE4twXFxmFyA+/IQCvq9PPG4ocfqoBYpZ9tXTvr8gR75Q2sMD3jOeACCCljpHEGLg40EfL8QJDEMCCgukwAACNwQkJHc4gV8LuhcnD+QFU/PTA8MQTghM9IhwLeWgx7t+qrwAnhwBHq+dOyIjoXsQkpOFLVeGmE6bgQYkIcdYfFfMwyJco2OsZrf3e5dtJgmCYs0oXI0wwE5UL4sGiI5R1Iq7NUVwAGdhYgK3CGBZhFQDH9V7AYk7RAcEbmbgQcLh4GZIREigAYRazT0WGhQ+wnrvFYZZTtEQIjAD86IUix7Bgm6IhMQA4EsEhQGPV/dUbDvdyaVw6w6M4NFOfBnQqQQqJiAmQkEMAnIAYMYgZAC1Wg1YLJoaifBiuZV7lkw80/FYABAJKLGjBwVSOEaDgAA6eT0QsDMTS4QFBEcBVwg14YTI5oxIqYNW1DxmrY4pJ2YDFmBBh3BCR2VhIGtRc87CGVDRFYOEBTXCkJAWcA8NytwCeK5Y1B+vIwEnD0ROGJ4FhRHcAMWAtJqV4m1yq11O/ZCnppAlDR0mzn0HAK7aOYA1RmMMg1Czpm7gNSKImgcnZyE3DwhKIANRh5zIQaNWFNMwbyEAFKzsRIQYyOQIYK7NvBa008I7z9WR1F0NiARZuAcBBg8vzgsOTDYMTIGpSwiYlFzdIHSyU3kA4OjY9cM/+8Fnb0uDnq7PV8e3JZC0lSycRMygM3h+tt52fTIf1S2lm3H8ap51X9yh69PDy7dD0MdPz5/I5hBxX8eN5FLII4hwvc7rIQDASJ88WddD2Y1+3E1PBvnwxaU1QJQkIMLcJUSExc5McDpU6bvDXC43W/QY0qBmVRsDNMK1MAHm3M/zCACj+ld3d+e9sEfP+cX5U7t5t75YvZnHg9cn683Nw76YTqYDc5hv1utMORmhUTlOCCFJAHDougx8tV3nTB+8uHhVxpk9h333xaZLMgz86t3005uH0YICSTglnMf65Nk6M745WCC9vNmXcf7VuzcEsVqvn11cPL3qY6pnXSr7idC///1nv3j9MB2DEw8bBm0QslmncxmSUFft8O4WEuWU7/Z1P9aOeZSwV2//6Jsvwqmjbo2d3h+4lT5nGfIPb+6mmdfX66nNq16wp2JKJNWwS+LVx10tU5SprrpMEe4O5t50srBNP5NfPN/sxzKbUc9l9PlQKsJRvVvJ5Vlvx+OTZ5vjpMfJa8DBnJKUqu/mybyer7uBobV5INterN1k96BKzGtZ94yONw+zNnq4bfsqz56m8yfnq47h3YNhKDPnfDA7v9ysIjTg8tnm9WFqiYZV7oDO18OTq+3t7qDhqUvFKueuS+lQ71Pfs9DDwygSw3rg1D17erXJvLu/v9/d3Y5zn6VjGTJdXK0ZIsxzn/rCCJYIs7DEqZniBBVZG7ESAXXSESMRqzklTCLqLYjN3aoiATKgBVuwOjcDdfRYX27T+eABJBRehUJyCkdHNouOJayFlnp8qDpyAistICjQXNUaE0/jMVtWUxREQm0qIpClNQVi95o7VoNekiu1mdA0PKJRqR7mjpHZJHOgk4ehqzmwJEYPz4LmDRwFEJ04CINbA2/eQl3IdJJuuDgXBTrOBTWoABolliDgHjA1wra0hjAQEFZQZMo91WZhFoHg7o4R2EnHKBKYkRJSmM/NADwoploboiGoxTaRcEC4q4KHuCPROB174pVwU4WAMs8hYQazOWQB5IQUEBZOAU0NXB1AjZqZImUmwcgi4SDBBCA5w7Cep1mrdmupszJKFrCmtbbAIEjhOB5qHiKRjtMsQEBoLJw6jGgKxWtFNfBM1FqDQDNDQu66IXfifbmrh0Nr3s7Ou3U3zKWE1lqhugFgmT2Dr3sS9PHudj1sz/LazGfX1/flfN2vO1ZsieFy3eGhHsOBtEv9qAUAS7WeOrBQUzcXWsAcnMYxSSpFbbFgYGaBViunkKzuc+pJa4XiBUM9XA09vCoD5tZNU0REC4j0mCSTobOfyk5LeIW/xRoIw3zhhCOCnzCgR4zhESNYyGKIGB4nhjfgKZgBAMQ4pRkLEoGxAB8R7zsR/NTPAIhL5XGp28LC2/B4HwYuBKZTtOmwdMc4QBAjBhACM6KdoCcMJKaFhrfUQihA3R0eExfHJZwChBOFPmJJCZZsacmUHuPgU0y6BHlwwh3e814e060IWJwdlqrvUpc/tR/AYzYE7+GcxxgXGGnpe3B7pAc+plVLzkMnej8iwOIKsVQbLcIR/TEVWa7S4qfw2Nbw/rb9FhR6TAaWfAwEUR4xQTXQE18HUC0LijCYd4IThAdqC0BkYjeHBZT1kzfBCfpaPv791fDf3uv3iY+1oIXIFoBELBwKqg4B7vH+3WB/gwf2CN894kOPv19ymL+RiT2W8vE9Kw0gPJAQzAIo+LQlcFrIOJFF3KFMLVPOXc8JyzT1q/V6c5U9xmahwUhqZg0wQoSFoTn4o4GsNfdTloceEU6qdZOTALh5J0CGptE01GwYkjUF1YBggyhWFQgpSzJzNS9TDcSpBDF2FkT05HqFLPNh2iQx95SiBaaUIeD/y9V//VqTbHli2HIRmbndcZ+tqmu7b9++Y3pm2GxSI5F6I/RXCnoQQIAvAgSBIjQQNZIgChjD5tju6bm2/OeO3SYzI5bRQ+Q+1aOqi6pbB+fbOzP2zoi1futn5lMVQiJoRz4GZpYy1QC3CHRHwjK5BWjAdtsPOXk1q84EDDGX6rMFkUMwEgGGexZhQgKoNdj1x7uLX73Y/vxHn7959eZvfv1d37+4fHXx8d27u8eTpzzPU8f973/7lc7mFcfZwOz2/tPNxfrNm/WQ6zifDqM+jtNTxOefffF0/3E10OUlMcL4+PUno2Gduq7fbDenpyN4udnQfP/xdDe75rnC9noVXMs4dcNqKvDtu3tA2X+z3wr3OR9drrfb94+Px8nYTm+v1heX3ZdfveuGjSC7xjowAUZ1A5S0etwrmG626enTx/769c2Lm9cvh/F4eNo/gWQFftqfqlswp26YU6ljTYSfPnzLmfMAP/3iF//2179eY/fVV18O3QoEyFbkcf/du0FoB/3a/PffH3/24s3Vy5t/88/+l8urK0YZq56SvLxe/f2f7v7w+3//+ZvLlfnv/9W/r5B/8ubt8XHCbvjij37ym/dP80Psbw+fv1z/7tffHB5K3l7sD+Xr28dp2m8YfT58/urtx9t8q/OL/uof/qOfvu77uy9/9/HTg6SVnmj/afrTX/7oP/6737gBF/vRj158+e9/5yBEHIJu/Wcvf/zlu399+/BRID0/LYwIizoJ2r4YS4LsImJt6EHbecK8cRXCHYEgrO3Y6o6BERAWBhDulM6oNzMguAfDwkYJD0JA4gCAUDiTbZb/ISw7WDSAOcwXHiMictsKzrsYAjUQJRp6AYCN/7TseOSuDbgH5AiDszx50URDg0MCiSOQiAIh3AmgvWYsUA76guyccRhCOtNmCAkDCBkBA5GQAyDC6DwPASR3P6P1bVfERea9nCDL2dK2rqa7Pe/hQc/MyKW5X7bA5d2Jo3kGLECCL+gORIQ3sVbD3s4THKJwCERmRHIP+lsg2zOf2Nph1mKwG/qFaM9SsfMaNhVaxHmPBQBCiCDAACJq5gXPDCfHljyKDORNDgbLnIZaBC0CeDgiejhCNNISAACxe3uzpZYAJCB/Zgx5O3LPyFY7yQgAkP0ZN4NAQCJ20wZELU+BtEhlqlqG1DMnC0BEVb4/7IunYVhj2DRWBguhBhwigFdFCJ3mqlAn82qIiLFI5JrGrjk3EAYTAaEHmMYylEJBJBEAZsKqHhAhIq0gQ45wClyQTQTE0ACDM+vNGz7avqyIYO7qIAIeLEKIwQ4OxCaSMFpAEjQadfhSXjNgOAEFIQVYBEWYqQNhhDmYubdHgyAwAqPhbhCBXc5aq2DUVuWYTTrnlB0cCaXre0m5l/E4lWKlBLeaQaJbCbAjOBHbbETccCzGYGE3JEmEQJjRCXCFlgHUrYAjOhJkiy48GIycEIQZiUnZUNwlAkJzASiUJFTFOTAxsSkQZ7dwDzcLDodk2FkgoEQEEwcRADoBAii4qxG4kEVILQWqonMoOUJhjcwoiVYr4oTMFBV8Bh2HVDpA0+KgkXia62lWMDCWudbUYcLEEkEte145Ajxq0WmcICqSIwVyJgFHpZQCCwAgkBC7YQSHOiK6adswILzx6cwJBQMZqhEXIJjmQ58RuLI4ZyQWVCenWdEp67gkoE02Ph7kwatLHE+zu6tairher19eXWamzeryer39+P3X1NNhnt9/eDy679GPXvuBy1ED3Ka6d+j3T59v1ivuHz/dHzBqMEs6Ho7X2z4Tg3mXBfZBs7/arV90q348ANr12xdl1NWqb64sGkERnCgMO8njXKCqjhOApb5b5YTBbnZSFTMhzkwk0DmeHA9VuwyoxSiku+tTOZxGrWPq+NPtXVGrc11LsurOdJhKBrvu8lZWuN3Md5/WYqttRxrgmIPKSTddvw76fLV62fMFpse7p+4yveqILzLlLDl/+f7+k6I7B9lqK294q5BXw+zlYpzK4TQe9no8fPz9d3Az5KtV9+OXl/V4wsfj54nSm/7bu+P4eHj15iZ3sn+cU4oVp9l0y/LhOM117pgmg1J1ND5NfvWhvByyaUnrdJkIuJ/GwkrDhN8exnLSi9fD+FDdixJUQ8RuEKyHuTzZ/OTEPLPNSdI8u/iuTxTw8aAfjvVmxP08PUwziExqgKhzHTIi2ON+/5Ori+mwf3//aTtcEycvZVIsakkoMB1GVyzXa/rlT7a//fb+/X1/OBBlAQqi8CjrC+436XiyGUFJZreYxuvd6tN9acqZ1692pc5lqrnLiWA8lDnRZy92O+Qesaoe5hYHryvu1VBr0QKOwaarYdgM/TCsXr18+fTu/sPH++/efQyJCK4nvbnoLvohM0OEGZg6B/cscxgE9blbjoL2L1dFSjx0CGQQjZmLiLVWJyjViEE4VAuqoUPv3gFAYl711A3WcwHsMhOGo5jXQEEiDwy3cLfj03S4O+0fnUCECIAyBCOgby768WgsAGplhpgDk7i7oVmtETAdZyRIZiEiImhKZMLUCK/CXMw0AJgfx9qtCMiIKTMoIAKk4JTADTmlUIyWjRxETuBORKdxHjaMnRUwYJYugm0lyZKgUrCnIQwjAGrVQNBaSoCzVAIEdXFiDLOOxGvTx4abhwI4hBsiAFNRNbcgWIgNiAaobmAGlASCGHmpFKGlvIqQhrqHmvX9EIEOQerRBEWCEVDViEmtqhtJHqc5Mw050M3GwiJI0nXoBn0izh0xneZiaOqKHICQkSwcQOfZCoa69UMmo0o0F+sACc1qrWHYURCysJsHI3BCYgiMMnUprCoxFC3IlJhdKAis6Mmn4CEleng4bFNIl6tan3QtACVmQQ0NIgcqFA91LAxBpRtWIOXmenX3eICEjWfBSEIBZfJAnTznrGYEID1PpWKEqZqW1OM8j9QBr5gc/ABzIFBEBZEUkwlxeKwHKZUtAmyR3lD4UmsthdeCxTyPFJuUrBXfjXezQDxnMdCCp8S5YkVAOIsJEMIbu2Tp7hui1FIeFyJrICzj5UWRvgyKEc7zLmhdK55RkoClieGlJVi0WmfbIWQE81YzA9NzTCIQUTwz4BHw7FLRZimLigAXL5vwZebr57s6dyBLHfwM9cDZHgLOA1A/x0U+s93b6/0tEtO5vm/VQThTE2/AuTmKZQAe0WbFAUB0rmfbmiyvEeef4w/8oee1C2i1w7lBWNb7GexrkjYGTERJIALMHBEUmyUCBkIoOESXECxyYnOzBnVRkJ+VDWeOUZzbznZtzSECCJmoKUMaMYgaEcyh1iCiJkA9D42XV1p0Lm1Z2pfzb3PBftCYnUfW547rGUTC595rWbkgQAtnohLQnZfz7KSEERju3ZDVdBqPklfIybT2/frqerf/8KnvobbP31pDjK2BWu7PHQnMjRMNXZ6qzsWROSVab7JWnSZ1CFMQWahVmCjChSjACfk01kA6zYbgwtT17OAQeDzM6XpIKWyuBJCZDodxqhaIPacyaUuf5ZRO42wEqyHv+nQ8FTMw9/W283BX6xBNIxExwDyWxIjoalGrQUTXi0cEITdJMgAGmAUC9Oj/1d/97H/9D//ui6vL24/2OMnbn/zRKfhoU+o3292pjuPtH95dXVwj1PWG80kdbA7dbVerixVtun6oejAgmMdaAd69e9SJ2Ojh7jEC376+BoRSIks3nhyQhl4Sw8N7VcO8SY6QN/zw8LjedZer4dOtnqa5G3j7crNmUo35fjodS3K/2ax6gOk4n05j5u7lm/U82TzHYT9StZxAVtytk03jkNlAX7y+jn5Xy0xhtQYnyRfD/d4zu41jjnT4+MAiX3x2dXg4jGbGdbWWD/df/uT14MfRu8SMOffffvcpqn/x2dV//me/+snrl//X//Ffvv3jL37xD//i4/3h6lfT/Hjr+6cf/ejadruk9f3X73abq5//8S//8v/5L0brNpcXj09ohdJktx+//I9//WVVfLvrn+4P3aoX6Segi1cvpqf55vpawGud//Dt70uZfvTZ63U5yfFxz/mz16ur6/WdTfO96eT/4d/+/un+6frisuvSl19+v1t1gGDqkJxi/vX3/+724209FTlLbxZYe9l823+2XWOxY4NFgAQYjs80n/YEOnhAIPoZFH7+q2HZQgsd0yO4eXotvTo+0yKoAUZ01nYhINKZ8rDsd0zki0A52sHV2DPYVLY/7J/uQNFIlYu1TuDyXtE29ThTVmBhLzKEI3HbX7ztmwuLdEGp44cd7nnrX9hPjYTVsK1lsHE+TAC5QfdnydOyZov4dtmvl33qmSl7ZlI+79bP1/CfGuo9/wbAgsucBxrNL2UhqnI7XwwgGvu2HeGB2PTv1CYivmykEbacyvAMxFE0wBAXqdfCnAJ0QGpUKz8LyOCZnYTnEcsZtT/fLQRAOC7i9LPD0d86wv4WdEYAHs9/dKF0USM1PZ8B5/MUAH/wzVuqhWdy1pnGA94M8HxBuJYKBtSVmZsou7iH10Ah6r7/cP90nK8vLgTcbUYIIKiKguHQJjbmqqWqVnD1qNoMNptSOpbKKYCWM42QCciXu2iTMw8CocSEYGZmyICOoAjuAYjEHrZgt3EOtYZljc1gofIStiqFuD1N7q2OY/QISkSCsUjHF5Jd07YBcQCaOTIiQDWLACQBWoolDzev4TWcEClakUQIKIjMQljVq3tEDWdBJEUEFHJSEsq9SErzBKcaURs7LFRDKIBRrUpmA2eSQDBkXOK3A4k4sJUBGAhBakEIbhUa3uk1mCgxMDoBECKTkhs5JXCJYHYMYgYXVY8gzkkDDAITAUOMVoupFQ9p3y6HcDPkdgQ7ETCRuxdVMxWmyGTqItkpqiBk6leDU0JEtxp1EjBhYkcW5iRqXs0Ia04wh3sAMqi7V1OKLjEz9n3XIYcFaoBBNWscKwRHsJyYyYUFPIgzBYW2tHUMB8WYbQ4SCwWW9jMCBncMB/RSNYg0zCiYzBACzQXB2RQxWKflKfj9V++q8ehuHKU4qiJiBE+VHyc8TMfDt49QZg5NiSmBsytWc0sUVqHvOchkwHqKx7kOPL59efWP//wv/i//3382eo0c3CVhfvvq6mIlX3//8PBUy1iznlYCN9v18VgEDqlfqSJnUvMsEuGlWnOeHRh7Y3fDxvAycwRg7FKm8KhaAobVDlGGMtuQ1WsiIoSn43S0OtWyXmUD81KTu2TOSNu+S46guJ/KWrrVRS9anybez7VPhGHCrFrV4qlOyB0ZhGOxoJQKUBa7WbMRaC6vr7vjXu5Ox6lWQtyt4DiPl5cIFQ+P6MYAi2roabJPx+NXt6frgV7thiHh/sPjVcrk+PB02K773bYjSfNYdqn7YhXFvM6V0G+6VJQOxTDl/Th/tsrr7aqgbi46QvQHgKDrlRye5tPT9GDarZJWxdSgXDxOo88KhtkYqwNhOKz7YQCMiOOkOusU8NXj7KqGTlgB3RGWxo6JPT/tkYAvNq8cbBh4RozZgYIZCDs0XUlPEX/1m/3tgQ57H4a+WAw994OsN+nj41jn8aKXPqVhnmzyY5kPY3SEu9zpqV5eeId5pjjNda52tekuh+Gz7QYmtaLzOIdZGjoPZkxCcf94NAoW6ZguL3e79erpaf/91394+rgvriZa3AQgMd/sLlar1ViNRYJwmqZxngggSXb1TAtGtPyL+56IJIsgazimVvg4Mnigk5s7ANZaSFUQEgZaBEm3zjVnZSKCGpGIEDFJAiAg5ABiIJ8VxrAiSMGMCM5mHBGuXrVVCRnnI8QEQkxEgVHnSghA4AhmaAAURhhCQAlqqQFkARTkgJC4RlBgqR6oIYEMnaDWQIgIYsmCQohuihgpGB2n2YLBIVQhd5EEFOpm0+noZFwRIDsSVqzmal6TQAUNtQBqDt3OEGB9LxJExBWgTMEYQE4MQgwBsxu5abghVoAIzAgpADwUOWUxQMEQgJTQZ63ubl49iJMCGLATOXJE5JRA3d3UfZq8Af1CaM5AYBCUBQGmOu/6AT0CaZqqSFqvBZM4iTe34pglU4RNc2XBnPg0wrHUhNinvlYAj0jBgqXYPM0AkJhMwdypQfjcsQgHep10mjjEgsHgNMnscbWh1MtYCiVJjOZm7nnoHp8OnYKE67xfrYYNkk21WEXNw6qfy6zg7rHOrDpOqBlzn6mSl1lTQkKZrQJSnTXCGjvXzTNJTlzMkaPrMg2YUjpOExQPiIJQwwFdGQijuxB0Z7IcoRCheNJyLsmavqdRUKAVkedqEeDcmCNhG+s18AifKScI50KwTTCbI8zCF38WDS2lM/7gUvGMXNAZQgJfMJHWRCxvQYjhGNAU54jPOAAuKnuE2kAE99T4D81imdGDHEEDuF2mL/U1MXq7smbbSo36A4TkHggLHgRnuUHzawZYhFz4DDzE2XWjQUHne4Xz9QG2FfMGHoXDMxMGn6v7c5Ha/FPbOi91pS/XeEamzljJws9a1goRiNAD0BvisVDD2qdisVCTzgylVmcvGkAkcghEzIyCwA2QYjZ0ax9ngAdokFUnIsnIADlB0TAgjwa/gS89BfrSESzfKAQMBBZp5lV0RmzczN0iMDwkIQSEuyO6exNqICJnIWII12qgZ0Tq/GVrXURAnIHA83ueAaSlscKFCQAYdJ6v06IFiVmXJhmBUu7QgFEUa7iCOsQoXTYiZBl2ly/f3Hz36ZNVB0JmVq/UfBcgEoqbu4OCE2BOlPtMTS3IqNXGqH1md++SpER6Kg1GNHONIIokCch1tpRTQFR1Zna3MBCmagBEZTbGYIae4Th7sKAyY4BH11FVqAqnqQZhHjpgmizmagDITGrOGTCcLFIWDEJ3Zqy1ujoRJyF3iPYUhyfG1OdqbupAcLVK//hXP/6v/rNf6Bi3+7HI7uXbzy1lv79Ls4dhNvr9t7fkc/KSmI+qk1Zjv7t/2Gxzl9E9DoWU8zwfBKc3V5uPH07dcHN3PCLDbivbK5mfpn7Vg6Snx1NHeHgcy6ynE+2G7cXrm+8eP4ynAzsicClVay3z1GPmLP26++bhFpwfjyN4bIbVx0+HzSrnjn/66ubpfi9DP6zS/d3D9eWljOOoetENB43UJ0j06vLi9mGmcV/cQlBLwUms6rE6cXJ3goIlYsa56mQ0kIRNxzKDdJd9nqv/0U8++/Bpehpte/l5kR993F/+D//9P6GU7/aHnDZ/9r/5rx/n8gnsj3/+JlT/43cfuy5l6y+6q3/6P/4vyfoSeLNed2nz7v3j/VePv/v9Nyui42z3p+OU4S/+4S/vHz/sXrzcfzrspHv4cHp1szuOoxC83uVO9CV7tXp5MRDi/XfveX/cps1K+HSc3lyuLzfD7959v7naxlwO44mDX257lf7pq2+NGYdhEIQnAABCZqKmmD0TQxc/X4oINF8es/DwxfelUUTCERbD6HhWTjXqSKPhWBhGYghwOhvRLEMDQG7dKp4JME0E1mie5w2VCPBMjUQ8s4ligeHx/JDb4q/jbX8z9+blFo1IGgsxtbEzAsgDOP7WfowAYAEQyABgEAS0cDdg6ciX/QIpwpfrW+x92sHADU8iasEJ52kBnKlQ5wMiFje8dlsL8WlRhQHC+UCB5Z8O7Q6xfVLPx+jy02ekvr2J+TmPggIW6yYIwvDG8YkFHYf24TUKzg84lIchnBGH5fbO2yngMggJ8HBozTuE4xmQQY+FgAQOhEje3PmWQqCZ+jSLo4A2imk5CBERC2XGzycsCS2AHlIsYmQAAGoORG0B8ZmfFYj0PKeBZ3Py5cylhmi1s6oBWYTkYe0ullPEsRZrXytDdw8zPOnp9lT6tA6A0/hEqEnIG8+JsNU6BlDcNaComoYDWK1IAGBBjgC4tLSL3hLAEditEhBhCnDTGSgACIi5WS2EURPSGwciUbCboYcHwlmaTecGDsDbOQrBjb2/rBW3Z4kRUmJODACMpOCEAESO5EKBy4eXWAwcPIA4MMDdABILRDgFgJVamJN0GUIJiNv6AyZJBRwcLbgWTQHBHqA5ZwwND8naZxguUEbfP5lVY2KkpkH0xoUhaEYhEhAeFd0SBnLinBRTKHqJ5lBUrJqbRUXKgYjMkZCyEZEzgQAKAVWnQEHHcOAgcWKEMEgIHVKi8KrKHBggEGEAgOrggEhCjd9GzXAzzIPOSlSMMPcSBRJr5m67lvXKJDORa0V3RhOwjNFJJoDwiswE4BHVVB2KhREGElkgk7FgEpCecVBt5llNcItGgKFnHtaibA2CAMbAQAy3AAdDyV2NRiqnQEEBYAZVYvAKFKQoQSk4Y6qBCsQEgYG5z1Ujzv74GVOm2IoU0yJwsdsoyuwGzF89jRP4DDWn0FNNWntjcj1YVYTN0ANSnWczJcH1Knf9cDT86uP+DaQ/+9NfvLt7/+HT/esXF6/Ww/h4PMawXq82myHV+nj3YSXy7YfDi4vL17kz89xlbDxEjMSsWlKWCE9GhAJIHkgogcJaCA2EISKQvakNKLokk3nOmdU4cF/0gFiJpv1MEqHtrHFwFes2uSdW7oaC8vXd/dsXuxe7PH3/xFV6kTaV7dbD3eNTt1uLJZ2n43HWgG8PdWD4yeUqk9/ef9x22zddx1veZJxUWUhrKerbLssLVCLzJNVWu27/ND2UOFSfS3zz/dMfvdh8frXuQi/74X52OmoYzRKz6jjPXuw68wwgAKh63fdPOU+j3u5P9vpidzU8naZ5qk+nyTzWa37TdYL4bhyfRi3TXFWrYeYQLqAAkb2qAGRJRWuQd6ug8NR1lPjjYRqJq3kASE5ugU4IIUL9IJJkOtZJVcGKTjkzHsHCCRDNEYjYAhw83+7rYe7miYacpMsBnlZ8V+dxHw4ZQ7cJhz5ll4nTt4cynqbLdU8Tbq8ut1nCEUICZdTjbruJRGUaVyljGph2Mb6z6uBgDGOZkNN617+5ucLT+PR4uP3w6XSck6AIj5M9nqZg2CS5vtgKJ0LOfTIHAiwGBLhbdUezYb3ls43FGSoSJmYAsHBmBERTx9apqEMYAEzzlDSkHle9XKxTUQ1OGu5h6tFxIgAwcKdEpBjmhYUgpmk8Btj2ZhPzanYcpxNw6lZMqHAyciimJVuJEEiG7AbAMaRUXMd5diLmDBqluhXn8C4xJTSvm1WP7iyCjH0iNBu1pgGhMYkpUNxVO+yDGdQYualVqqtDEskGiki1GCHklKL4MZQpdV1arZiomBVGe9xXrWSTF0eRrNUkILmYewB6YUrsQpgo3FtInIO6gS/YDDATEZLBKpBU0YECHZyBEFwIUmIPrxgOFgKuoQRAkljAQc0a/RpVp9NogR5AhLnPUzFDzqmDUgFcw20yduwIIDEie7AQJckOfCwFgdQ9wgg0cYiAWQCmiAjjGpRX/axHrTqXkoVFIjQSUPVAirDIeQjqvFQLDIAiRIgZeUUyFXez0zjjbKvN9lSrVs2JTuMMJOQDhAmnWeeoFqDDSkjjsD/V4sMqE0gn3gmVgGOpyg4EIGizgkcQKxgBbzYrrRMgJpIh5/2pGOA42WbdMzMizfUkLIhxmPR0UEnYZQQNcy0IASGDDDummedRO+fj3dJht7p/0UItVgWtGm4VPzicFe0A0kwSzy05AkKgLdoycANvuS1I5/nes5TgP+3hz77LgeAR7o38soBNrWVdRGgRrd+ghQO/0JlaeewQzbU6ACKcHZoRUGtHDJoJhcsZzGlUH2qyKAAiFKLzhS02nd7SWiLOJCOPNk5d5rcLRtYK9kWqhtg67WYdtGAYDTuKsHPqCy490sLbWsp1gAaytCtuC9TI9Lg4Q8MCCmFD0WAZXrfCvb1vnJunOM9Yn9d/KYEX9IowmslFa0eaMpAhGJ9Tu1qqTov8gIYEBeFcnTCSUBbysHA3aPwxpGg+V637WO60WVZLEukySWKgCFcDd1WLQGvzUNMwCKHF/xmaKoSpz5mTaNWG2T8DQmeWQeMEwPluzzf6w+/98Mut9WUEj8YDWv7keZAMWmd3cMVip/BK5POpTPMo0ymtrzl1U4Hd+qIDBHVMYqoUiA5mDoGO7gYBCMJFLSfIjHWqVg2B2tB+PlUkyynVqoMgExU1x2BGV5gndVXXKqlvlpqSCB3MolSVQVLuT9M8DChh8356OljX9YllM+TTPOWBbz89gFPfJcMYNhxG4/0pPKr5sIrVwNVPr2/66ZGOx2Dhp2ORTG4aFilR8x7uh7wZUhmLaog0a7n6q198/l/8vZ/9+M3rMB7DIvfD7ubpNN3ff9f1wDZ+8+33tw/3n04PQHp6OPZXL6eUPswzgCkEYAU4jU8xDhzJ+gzXq86P/qa//t3HU7/1F6/7DPawf1gRHw91tnGdB6h4eKgBtFkNLzfrx8e7pDMBjlDc/OExwORHN/3nL17/ze++e3gY92NZr7YpEQWSxcVmezjNn26n607+t/+7X/1//udfT9Uvb7Zk0ilSwOnxSQQd+QTDb94/RCXqeHd99f2H96fRYj4Cx2l8ePXi7Zub3e3Dh82weri9dfXc8WcvNg8fj3OSyPK4P253w7vbu9OjDp083n/8/t3tt7+ryeYvXq2vVvSrm+Gf/h//91XSqRTe/aTvN93AQgkg/e7rr9G3Tye7+Ox1Uf3ym99fvXzJE//xL//4X//6t59fX8xVt1fbD4dpHg9f0Gcvbq6+//7u9nbPXfrqw/vdSrabdHf/oeQVRLm4sKfT8ffffb3OP7p4+fOrF5vbv/7X25V/9/ih26xfvP789O23P/+jN3Wuh7ncfnx0c0OwWni1Wioieo4jcMb2jYBzpAAulKAFg2jyM2vqGApjcMHw1mpBQETLK2jPIweQB5oTIlOgOxG1kUPTNjUxESB6s0tpUEOz6j3zfoAo3Bpq5IDLfrhIo9r+Ry2sygOaQz42LGNRcLmDm1uT7bZhAyBHc5hpB0lTv4X637LswTPsBWeLvobzIC6+1207oYXC6dy2w0aUgQBcVHjQTPsjsKmd2v87S6vOBwE18+w4+z1FQMOk2ql8BoXakR2wPLhnbXaDm5oZzUJKcoSFshIR3s6QJhlqvFFsg5hm6BPhzSyw4T3Nf+dcOAP4wj5qROBlkRZmCuASmYcOgI1bhkGIssiisDFsDBYxYpucNP1y+4XmbLVsymfYcEG2ntlJZ9QtANv7xjK6WWjEwS0OD+GZ1ttgySZQhIUJA7YAKfH/51qoXoIIDNUioRKQhX31h48kW+/gWE9eRyHoOmHi3A/hYO7grhalmpmWOpuCtUlYAFC4K7iLRBOdPY+azApIZ6rNRBsgzC0IiRgBmdjAbJ7pXI2BV8EgDD1Ds62PBm/CvsVc3q1iY/0AuAMiMCGGCSISdYQdY8EgxPBFQU+2MJUYpSkPApAwkCE8Uk5MjBE+GZiZz2YdBsgShLx4PEqSLlV1CA1VoEDXQAFz5xTEAAIokXq4uMT1VX/a1+OjGZAbUkizizJXsAAKwLQAugEGNUhUIMzCg9BZqBo758AIMmAMjiAFQSRBDiUHjiAPMsTAxBxtUioRAKEYiZCJBhbzWghCBJGjBoaHRiAyExI4QYsoi8XrABAIHIIYWKKAyWaF6wH7FQCUqtktilrUjLXvug5Jwy2aplAlCakjxzSVEsCJN8NgDhaQJSNlAAqrWgq4MnrjUYFw85JIARDGlMIcwAk5EA2gea5DEII7hEYgeBIMnxHM1a0qOiCHBIYqpiAAMEVANrHqMJvrYuh7mfO6Sy+23cN+mosFyL3Nxp764QV1+5gjD9989Y4p+j6lDDHWHL4i6cMBoTJN6im4c0uqZTQVf3/rJLQderjZSYC7/exHb9/f7Xf9QMFvr1b5Vfc0T39Zb9/Np6t5tcop29xJcoq5qkEIAogzSbFABGB0DbUKAILkpszhodVQWBQAAHvJsxXSSk5elRU+jhMyjhbbjjeQT0/3Ly+2b68ur+X622+/1aTrYctqRFnnEIutgqsLQLWZBk9OQ6KPo3ciaPnm5uLj48PdsQwgQ9Gf7vLP11fzoRafTRwCqxqI7Hb5eJrR52mMMpkpvtymy+SfvV2PyF9/d/AERdN+sm8Op13HOpTUZQEDmF/erN/N0/i4d4MX6wzbzi3m42xVN1lWlA3wsUz4EF2X1n0/TxqEn+6PuePXm82bl5d/8/X7p7keZ48SmHDWMQszEyKhwVxK9agJPh7Li00PAY5AvZRpXg0SRubRJ+qCheh6u/1w/DjN8zjWIzIMdHOzq7PujyO4A7DVGuGudbeitzf970c9TdDlJAyE0RNx8ZX0pzJZVD3dve4us+ZtwgnssWowz4TbPvPQjcjH43yYxvUmr/rVRV6vXgyHTw+AGUn3pzkRPY2HWUuX14z85uaqmL3/+Pj06Xaaa7eSLDgdZ0p0mgslDKLqkJJEwDxrdGkq6qZWLXPqmGKcuJTuXBEtUFHKAomX8EoCj+roGIbGYkwAqjW5d4BIycL3ZeSMqSdMAoSJiIARU4AQcQOjkQFgBlJD065XFkiIAalnAQUoVnXVDRaebO6HiA5scnMKcw0zghrFEodiKIJCEJkQC/IqcWJmEo8efTqdMnHiqFFIFIhMsPpikjf0XU/9VBE4JIFwzDMEUZ+wCHhg3qwwTA2PD6BHJWbJKTFzT+5R5jJp8RPYnFxTkIDQANNSQ1q0U2murqwkQZlxEbKgV/AwBjFraVMMKAzAJExEwOFOQYk85R5yUq9Bc7Uaqix8NGDAzIxY2R0pyjyBg5q6MSIzJTexiGCm4Ewyz7NCINHjaOvkSJiZAmCcTcmJQoIMcbSy15MAMIaWiTgPeeW0LrNDlq5fj2VksgAFiaFnrVCsSi8o4gEVUcE4HMFrTDj0IALqriUhKMBsAiqbQr3xrrtST8xzLe4aRuY5Bx33plMtKQcQz56OJS47HiQbHEstwQlTOgVUi8wo6wGsALIVAEzFeda06jut9Wm04owsuzzo5LMiKlQN6aGWWg0yp+TaR0hOU2AIziXcQzNGivWuu789LsVfLCz1OEMlZ0J3wyrwuaYLAFqUUK0MxAWvWWoxiAhgsgjDMICGHgIAMhCQL8X88rOFB49LtejLZC4Qmh/nUiIuA9BzUdgubJk2t5f3YCBrLYqHCCA6A7mHhDcTaz1rENp0EhH4zKNsY+9z3wGNkm6L80xAAJ0hLVimds0tMhodqym/2uWxozoA2LlKDw/w57hjeB4eByG2vL+lOMUfVru9XKPx4/kDwjPqRM+IUgQEWAPVqPUVAAs5y898muVuF2+R9sOWAwMLWkTQABsEBEcQovCwgNroPRTNFFwDPKACucWOXYQTkBVHoCBsw1kMWByplnE/BgAJc9dx7jl15BTuSG5Ww93NzRTPsi8DgPDm78NJur7vcoKAwAY8n9lEsPh0/i320PKtaI3U8++dbwsC0AE6IWwj9ham2LC7xaoInh7ukbtaVBiFsOskDzhr+fDdb/r+evfitQCtV5vNbkvjvgYBIwYSoUb1QAYkaooT5oxpI9W9X+VQ2z/NRIlzHsP7nG/HCVWZuUIgQWIiwUnrXFNOPYnkrnPTqDZXDY/UiwtR6irR0KfVGvuUbquipFqomB1PxxnM1F++3HbM06TNLtbKvF3lqRhTDB1cbGWqBcLasLFicMejxXZI4uDAhrBZZ2E0xAIBTOVYfvp2/d/8l39+c3GZWR7fnaZYf/w0yZpyfwo7mu3pOH64e/j1t7dff7ytMP/pu/2f/uj6/unh3/3mt5MVD715fblZx/o6lToZQN9ffnZ18c1vfnd48qPDq5t+s51fvd2+//7p6aA1w9M4gw2HO5wLzXX9+Zs3b192dx/+sN6RVCHh+jiuNsMUcML0Ucvjx+PM600vP37Ff/WbD32ftt2WEGc9pr5fx/DNXfxP//z93WF2YeOc8qxj2V5kgCjum8vdb28f13nocgDN3+3hhBLrG8EAL90Ax2n47Uev3h9YDocTEueO7vZ7sHhzfXPw8vLl9d3Hu6FP6wvZ74+roX9z079/92HV91/+5rc3V/l/+uf/+umh9tcwH8u//PWXly/7zau3a5Dx+FAB//TH/6t/8y//8sPD7d2x3j+4SdnvHzeb4c3b1WcvX/z66++OcOpX6Wr90wOssYy86m/64a5Yt34tm92bP/r5brp9eryPUn/3bv+vfvfrX77+ozcv/2RYy+Px3asvrt5e9l/+7usJ4q/ffbNbfx54MentpNqvhj/76Rv7qz/Yxm626W9++y0AMC20z0XchADA3pyfF8y8NeoLdL0ovxgaGMEMHFiNkITCGqRPEYQihAIkyASIgLzkdtEicoNAAF4saXDhUbYTARq35TxaoGeWIMAyM8CznKhhQs1SmQjAwAnJwj3MAQLBoI3NvWE0EQBgjmCBTNzQ+SZt8nD4YQ8OxIbd+7JA0VhLLSMKz6o0XPZ3BHdXUApvfneBvAQx4LNDX0O5l4AGoHYSEQJhgAWYWztAoLFzoOm7sKmQ8Uz+WhCVhU50xoMWMsniHwcQRGHQ5GcEiLyQoRrvBlv6ekQVIGgUD6BlWgN+BpS4QTMO4M3eCBsD15fvQWPvNq4rUSx0WIQwACekZaMn9GjjtqZWi+Z9vhyPDYQKQAqPZldHuBjnnAE2QF6KjkXq2DjH1ELQlqONYQGrsGWsA5Evk5sz+rQcwE0BGXA+DOZSWVKElgmEJCLev3tXjyH9fAiFsEHICB0sZ/aiiQQNwbmaa7CGVQBD8DBzIwQ0CDAisJZlhqgABTG1eU8oQJCDqwUbCVU3WDhrFMiYkylGgGoBDAjzsADW87ysKdw8sElANeBsVSdInFN2gkTAKBiG5CKYe5QSxRBa3GFYDQfHtIjRGbHhJkAMOTHn5E4A1gvVcQxHYgAEa7xqQhIDZ0KSDtisWYSrIjikRAmMCFJH2KMJKBux9dd5d03lILcPcTi5okMAOfaJgSFAgUVBABs/AaoiYAWLrhOxAANMXTFobhgFa86Zu2Q9U6PqNPsqVhAV8S512TJHj7FFt1CjyCnWDDmaY5VMgJE6HSEgTACBkQO4VXmJHcPMIQIIvXllCfTYd5LzesvcMRPXCbB6nDBGitpx5gCgQCbkDtUpgLxh01ENKzAEG1NNmLuEXUci6GDggVFNQ8w9Irg9/SmJkiRKgRld0BnDPZwxSLgaubs6AA1dykCe0NRVQ20BeRdE0gGBEhCCeFSlBc/9wbDr85sbDo/5dNGvP56OT/V01NJt1vB0FLUXQ2IL2g0P4zhpVa29hTs4walU4hQQ2816xWszdSvdwP1Ft59mm2mu8zhO2/Xq9DQf9RN361O1ju0+IKludv32zeZf/eHd6Rb+/k8+H8d5G9512QyZcJUZrRIFr3u3gIjgqPOUJAQMzQ3BzSGJITqJhtcKUMOjADJCrFheMB8ZZiBUTkV+dfU5UaKj39mHFy8uh6Fm7vaFHk7Tl+9LBpS0QiyGIbkDAbXTIJLRj+PoxQBT7lYrGUetXz2Vk53++OVwc9m///RpfCp0mV+9udCwaoCrBFq04jF8sxa6wNXl+vbbe1J6k8kRu3XqkOOkZfYPD2OFstsyMX33cAudR+opxwRBAEyxvui36348Tn3XH2d14Uqs1V1o5jA37smI3t8/znc2TRUttv0w8eIJ4xYG1cwzMgBIJ2lIwDgzGcXd/ijbbRIGs3WX5mpeYx9KBo8PHwjBNDhnzMKZRpe51BI09N14GrkbEsEK+acvtp/e39Y5htxzslKrz5GZqctVcbRCghe7C0csSO+n/R7ynI0xfTyU1dVOLtan0Ccr78fyk35Llb9/PHR+yp46jZvLi9UQx48KaaUk1y+uymn+8PT46dN9mU2nGZlssmrWe+jsFg4szpJTyuuemRmpaE1WCGi2amoesCL5QcL9A6uInBgxApnDHZmqGRgIEiFpmGk4xskqQwhKHnrquGiQGQH1RFHGwIm4IxRzVahMinHS8ghlprTCvA4FrdaDI7o51nZ2zO5zlCkoMXeN5R0FUXKbQBkEhiNwBAIJ5swYqHMcT+MTYCcoQMep5AGRg5PMxTxgDhXAFVOYRihFQkVDiOocaa4O7IlBGMK8jA41yEmguRFb17lQBNJJYTqaFSJHDA8rrkoSScTBAq0JZKBGgkVHAYiIDtAS25EcAAiQMQSKojkFMnMOAgJ0y4KZ0AJDupSzT+MUE7qnCKzVVAnc3SBMmJ1BklhAEskpN2Ktaw2vY6NuuoEpZ0RuoXOVWZhJqwnHPM9TzGoTOgYRIRoU1dL3A7mHmAg9HT6uEhuYJJFeKCwLkQokdoRanRNkYYeKjhAJc7LqAsAB1Ss6MXH1ev94EJYuYSa4XOcZtDAEp8kMgtWUCMIVPTKgGj09TbXLFz2X4pTArVZAFCagCIdwYlplthJTKRR0PJZMIQiBzhhYK0fzssPccXVV9+QyHwKMIgH0CdAhI6oFRim1peu9/Hz3/l8dzxBGnIdqz1TtBd1oJWU04+QARghzpqU4Q0DEYEJZ5sMUEQTIHo3Kios/wpLL643eBrAANkvrH7TYMODz+wKEN3Oa5187k3oCllK4RSFjG2828wKAqsG0SOSaUeoyPm6za2iVOgEC+XLPS1Rv81RqE+Plxp9NNwLODq+A1FJZJfFiNQ4ALfoXo7Hj25K2mz2Te/CsF1tAODz/x9lyaLnTtoAt+QXghwXA85R7WenzkDYW9tNS/beq/VmsRUvtvrzjQmhagDiMwDbSBXA/D7IhgAAbCYuR24A+LKKxvwIqhhAkwmCsjRdlC6+njZoBz9oCAELKzDln4RQGs/p5FLYQ1dwBKc4DYAAASdKl3KWcE1upRdXMFouPs6fQWYexwEOt7zmDgctn8qz0IAQhyNC4BaQN5CS0Z84DgJbiXiklTllYuEuS5boTRPz08XY+Pe5evsxkfZfco3pD5iKqAYAQtQkvSyrqCQFmb9kzUK0XcYBalQlKVcmiTdjMLCRzmW00r0EQ7gqhxQDDKYKQgMGt7i7WfZdPx+KquBFFlwGvUv/wcQwID+8zO0ufunIcwyJnebh/uNntDvuqRfvtsOnz4eFYlGtAGEdgGXW9zT0CqwlE1dhtu5998bLa9OnTQUt9sRv+zh+9+eUvP9+kgY0kb/sV338apWNhKuOHh4cP/bYW1L/+3ZfvD8oXazySH5zH8uVvfjM/PcmK+026XMcqz3Z4uLm6sDqXx/rV+/txb5T6FHp9RTfX62++Le/ez7ttv+pjPB3XfabqH0eNfvDO3z19d3XZXb/c/OZ335+ONvQ7Avr0dMsvroeh39BqtZLx8dPpcLxZ22nUu1PpA/s+77b9Mcps+tvffP3m84sZdK62e7ULoflp7vpOp+PH+/fXm+v9fry+2r7arr+5H1nSxeXlN7/9XZd7ggTA+2mONMAE/Xq927DZ1AUYyfQ0KcDt4TCP8VCL7uf10P353//VH373N3/yn/3p/d3jYSM/+Tu/WH32D5Km/9f//f/8iGUGfLg/oX+6e3q8uFpl2v7+q/+AtOeiXVq92u2OD/t6spOWFQ1/+PXXl313dXn98tX297/9+u7h+MXli4uLfr5/ukzcb/uxPD3efbk/1r2VP/n5T45f/m7Dl29evpXp6fFxDLK87t59+1Qnhy4+u3k5dFcw1V7wzasvfvfN12unP/n8xf60f7x/ak9BA3AWGcjiNmOtkY4ABF8yYT0aQNwMUZbN0dsWF8INkl3g27blJiRB4IbsnDk4AY2hgovzEAbicw78gmw7BEUjmjbVW8MrloOq4cy++CUtU4Rwc3CE8IUbelZIgbdd2dtsAvlsnN/ixheQAjwCnEiaUq2djgHPIMZCcDzvNu1Om6DVAILOR9wiCSJyt3YmtOtvbxi4oD+IZ0ro2eA/kNwNIgI8mmPkcgFE2FTYTRHtCM8E20U4Cgu/EhwWVtLyOi3Jvh0V7hFOJO1oCFdCRiLw5XocHMIWFTaFgyNFQPN+XhS/AW0m4QtbCs/1HxJEi1kngAV1IuRYoC5r0x9zpYXnG4AMC/DfIDxouXuI7Yvm7s120ZocJ+JcRGDD6YARztJA/EHGR8/TEVrgp+XIbTMfo+dvBTxrIQEAqlrRCOM6cT3ND4+nMtbNdgibYw4Pr5aBE3MyhoAa5AKCzRrZ1MwCEQSiOKJrVUQIt8TkBKEOZCApwDSMARUMkDmvAcCBIpbvLRMzo7khAXBUMxLQsnwtGtksIiwMokWnNXkdUMvHCBCzDIIO5IgLdEepG5JNqSNOlpRMIQJack9i7iWLDDllESJQySwcBCA5FQePCoSdqqshGLoxITmcHQnZSVBEehSw8AhHV7SDGwcJQgcQSJSBHaQaRSS9uVy//Gx3/6nePj1N1R1FQ9EVrIZ7SkkgRURAT4xzTJyRgrNRzFQhJFIfNBAyZKEBUAIRoSbwHOTuho4RYtjVrost+wZ8iJiDnILCRiDg4Eyp6UUoU8o0zxrQpoISAUhUXVNqXlEQSbDrKCVmFg9kwWDQCJ28VrdZ5xHr1ItQrS18xj0EEQFrqwtrQLEuABxZg5U4pZ5wMOslomiYhlub6RGzQ/Pzd2AAC0PCAAlzrbKMCiGgOdKGGAEhkzOpewmda6mmKJgBIMCQWcgbGMwGbgFWyRMBiHB7CmyuIpTzUBSSYOeyk+gSIqJLli4DI6/pat3dPh4BIxkEsKq6uwNOQKPD++M4UA2f1qvMTpdXm1GFpjRHpEzzOJ8OcwfepX53cUEQDqm4rFayXudDrV/f31/3/ekQfcp3J7uL8uOXV3/69sXh4Z5zcKK5qhBzl0otQEZRVME1AgVRUTq1UmYtY60+ZuGL9UYAf5q2H8Zjb12a4YryJigLUKbcX81uRELSZ9RePSJO01yOx51QFyCZJa1rPXau18CnsWaRp/2Bw97s1vfH8TiNyqt3Rx+nEpmB2UrYeOrXuZoVU5h1nmjFuRT9eB9ljPnE04O+WsubbX+zW719dfXx7mG/L4eJPiru56MgjKd6fBwrxWqdx2nadWnIkgIodNX183FadVLm+WAO6E/q4DFrBQRnuytlP83q3qW0G7ppLmGeGBDRAAigo7haryQzZanVno4j9nLz9vJx1l2XtnlIBFM1iDCR/dOYERGc3LshH8cRImysOUgxuULHHRkl9x/d7D5/efP1x8MERhAdB4C7z+ipTKWEg5t0gpyQMmrKwK9W62/iXpx315uXl+txPE1cvE6vt+vrPoGVQxSujAKnEUQIJWeWi27VQ19Ps87jcX+YTqcuC6dAdFNj4MvLTQSuI560rrp8mSSpB9o4jtKz1tN+qmOt1RwAu5wjIMr8n0BFuYuIusQshHEIBRIQImkoBMiqN4hp9pQzo0PHFQMTBol7YCnYYkbBHMm0kEAiszqf9neoQViSKyAPzOKKYQg+kZQK4+S1JmbKvdhspk4Sl5t1aSQGKwoKHonIokJmSDGdJteEzgFggF1OQ0fOpuGEgITFNJF1QCthLwaoCZIaFwWdOBEThQh0GJNWQiKj8AgnpDB07iD37KbjqcyThTFw0liAWpBw0AkMKBwtcyIgcCBkt/ASyFLRCCN1SEgc0kGyaoQQTC3lkAAYUYslYUHIwkrkgR7QSQcB5EpkZdaqzQYmPAxI3J2IIwMwh1CYV69MoOrUhE3CJKSEhR29ElHoyI4YJhSl6qSzoSNCEhIGUyRmi6pWJAn1tGUhLcdTCaNwLADg3gsretWSQFovh+GZs0RyJBboqyIZZSfgiWACwBBkPJaHy61IHxcdfrw9XG4vj56Omk+Ps1kkYUZKKavRqaqFd3nggFpdQvalZoYQ7fpsgG4t8oUl8XEqucd+gHGcVqkzn9w0E6IkE9bmTCZZT2meoUxiyC7MXLCzoRdiLRQkzAnCF6KpL0bNTdPUlGKw0FrOXBUIbxWpttrTkWkZczJgONhSSZ9RIABBWDRfcJ58LsANeusxzvDPAsnEUqU3Dk2j+pxLbjwLxwC8Ra1BRHPhaZKrQG+D30aMP0NQABDWeOfP1BxcxsHQJrKx/GvByCLOnhELPAMA4Y6L++nidYRMIEwtWy0AzJ2Q1C3csTkxNBDn2bln6QrwmezzLEJrkoKIaIE6hBTLOxHgc9vxfD9nCtTSiD3ThWCxcVjG2xgA4D8w6xdx4fLfS9OAS1Y1BJBFqEfL+ogIBpAAIghGc6AI8lBAC5gUiWLosE9o4CVgtucGaDF4ale1VPJhFArAgYAE4WGq7hpNBNGoUuHmiMRAmHLuhz6LkFtRq9Xi7J/xXPAvreszlnnuLQPOLeN53YEwMAggEUVAXeymCFvbdl5dFKbizAyIszk5afEudS/efmFK795/b+/f3VyvN30n1LoWNHfwIGqRCNQ4WpkpM5STcicnLRHo1UVSl2m1Hh5PRwICkXBHB2TRUDVIFGboBkCkY2WiCCP2vOpEbJXQximHFJP5CGsi1FlyGJr68j0sU324m5sFwii+Wl9OFdyj77rpWGuNxHwoTpT6hJ3Ei01uzD4MmU715UX3+fUu1GmuP7sYbm6GL170r6+vEnW56/r19rdfPv7h/YNjvH37apqPnz5++Pdf/s2R4XgoNAENaT4c4ul4cbnZvRw+/OX7i43gil++GLYDf/r425sXW5sL2ayj6vGSaDfBDHBih8f3fnxIvewYTvM4Xw5508V//P0D4iaLrVZ1SPj4Yf/p4WGacD9KTpen6Wm7Hi5XOauuUv393e3N1eb4eNhlenu5/auv9s5yKjGXg+u0ebnl7mK162M8acDh8VFGAxetZB4EtmJBGfxweF90xcNPLjd35bgb+tOJEveAkKK8ePVax+P0dK8il1frcixE1EnHyt99/LiV7TwzWs4+/O6vv9qtLzF/trr60d/9i5+9ffP24ZNfvkh/ePPF90+P28t1B6rHEc096NWu+/b9BxmAnMs0P+7HvutvXl3tUmLKF5v+8qo/7afjpwci7ERevd19uv3uN9///mL7auNs5fRwt99dbnaZ/8Vf/punTw+/evnzm/6ylHEjcJy9HscPd59Oevz8xZvri4ui1F/i/VdP8XEcklo9wHSYTk/z6fQMFTHSM9LaSCWA0awiGs8FARwcggOaE0w0vg0iEDG5CzrFwk31JiaF5q7bGAgBTEHoTRTzt2zvwBHpHLJJ5/25EU5bM/9sdYcErTkiau7r1BDrZraCYe74jOg0Nio04kyjPC6eSwhni2FEDadgQV7CAwIAFjXugk8DAJ5f9myy/58cHQjQjI4ayQUBGn3xLHT9QZKMy97VkP62y+MzaOS+jC3c3JQW6mvb+BxwYUadr3HZ4NthEmEAFNDsmtuGvDgPN2kXLho6CLMIYJLlOAwHbHShs08gRGBTkztQYEsCRVokiYC+cJ3O2rx2QoU1Jx5sNoOAtNz9Wca9jE/cok0ozls4UltvbObZ7fUI7TyyCsTGCDvzmBf8EBvo38JSl42fYplXYIQTcos4bR+YL17dcR7YLFObZwXadCqhIJCqdrcfj8fHaVh1pg7shICBpgZAPpUAJw5nCknoYe7R6NHQImE0lr8wAsxsEQJSNHtCs0X9h+Koc+KsTRRH6FYZHBmEwR0CHL24R2CoRy2G5C2rrAFGiBhuEEBMbh4RqArzTH1Cd5HMhIROSCw5Je866bJpAQIo1YQ5zFI/5LzKadsNqywsGJJxEe8zsZtCDcEOcR4nd6JwwrSQ2wKQKGoAgAgxmWGYQ1RyJysoil6M0QCCnDitgjzEFHwlTz/+8eZH9PLhyN9+X58OTwCYMxMBhVZVdgoSdMwWCawjFeiC+q30PUVXT6AHRM/UVZGC2pLkEcUsOSBCMEjyncAOcQjqkCS8hmv1GcIFk4cjY5MtWIAkgnAgCmDgLqi5hlIgAROvN9itV8OqS71EQDUdJz3sR/XiGBZkQQ7iwC1gBZ2YzBVNwQw0hJgBGDy0InOdy7DtMXwg7IBmq4hRbA4KCxcUbH0FotYKYeQzcACgkEWAmbYtA0mQmTgJI9YJVEudVS08ENDcLBqBGyVQWounLpiIMAkn4UPZt6dAeq41cspeDznHtsu1Kiez6q6mB6ckayB0vk49JDg9TAG0GbbmhSVN6I8lcpfWaYs85sQpkEYsp3m76lc7Upupk6HjqczsMR4EUtquV3cf9pzkmvFpKsjQD2z78f3tU83p3Vg+7qePH/d/fL2u04FEcqZhNZDIjPT9UWly8GCEYvMgLmLghuoEpiizhk0ThA+c126XGa9Xq4S02eSHuSoDhEP1MJzqNLsxangcTsdikJhXKbXtFJH6iGvmOedgMcbTeIgyd27OcDiWyem35QiI3mVM1A/Duo9prjYaaK5jQacIPs1Frc4m/dDfXA6vr3cG9uGpHCusdyuX8rZfv7h6++U33+wkPc58f5pZ6NVuKzPcvXuUQZ7GctHnjnHTcd8nSmiI8zgKJVT+8HTgdX4CUkpFq3uQTm9eb+72p6naVIxzEsJQ6zMIxek4EZIaxYwPt+Op6BigQ8qCY7Wuk2JmxSqE9HIayzSXfkiDUFH+dP8gQpNaTkwI4XbVX3774fBYqUCgGakP4pkRzJlEx3nDWQzN9JPZm4tuQDw9ja9x45iuunU5TbuL1fRU9Hi63Gyz6mbNXp0l1hnK7GPBmMfLbVeqP568lun4+MgVLvp8mrTnVKuFefPl3OTup5vLp+lA4FiqHvQknvr0/nj8NJ4ei1V3U4UafS7ZjXxhmC5QEficEzOxqhOTmeeUKaA6EYEAOoKbd0NyJCM+mRljhujAw0wICTkEUFCtYPIgn8qYyVZpmEtBAB8LIeS+F6B5Gj1iP03m4spmZMBmmjvKWbTNDgKQ0tDLSU9h6KbMjqSJEBlUm7YLs8Em5U6wuien7MBMh+qokILBKCpNBgbq5h4prI9KXefrDIS+f4qxOhj0hKskDj6H9xL1NLn68WEMwxQSJE4uWTy0mgEIRPOcZ3eKNnIjWBJYkcMDqIlnKSISIhOaahIBIrNGE4LcdwnCAE5z4R6TJHMPoiCuRYPQAtTBESCQOVlgINXwJvGo1QAiJQFwQCCiRj5OOSFG1QmjDCKAME5FMs0VwEFSCvSw6qbVPSVGwmBbbXJxnDlW285GpYKBcBxnZjYNyqHkSQYCmqth4sSJPNANg8SYQA7zQRAo0wniRMROK9NNjs7x7t0hoa86Un0U6lbD+nEPXnGuCmG7vgNzcaAa87EKIBQHxI4yuobGXCpGVFXgLqc0zjNlDzJOvJL08Lhfr1b9ppu1MEMkHscC6onQvCBLU8iIiNWRiTAzAExlorDttg86Tw/cW3XWuvFWs8FSFLbcEVjyhS1apaYBrm5tAkwoBNhKyFg8HXBxLl1IHi33tg0C+czcAThXy7B4KASEBzmcQ1YaawaBoLUIS8He6P24cNGXKfNCb0fwpnL35ztpCcitrqbnd2zNkJ+v4dwU0TleoxXPi8ypNSqx1MMAEUTNcrJxbsBaVX4GLeBZo8FtUHpWKGAruc+Y2vJi3l7dWzfR1qpdDyAtoFUrxWBZtBZEfdboLQEhS14x+PJkPtO3fmhl4vwLDQdc5sMBDmBtKOmeMZiZntlJ0TqmCDwrwRzUvBp2AkNCat7MTnEmX6HHD9JB81IKEZE5BLl7WDWtrtYkeIEtvoMAgJlS6oa+ExECUNWiWuoip1ikBs9fH4znH+EZIQqAs685nnkJQO3rG5ECrKFFuJh/nAFLqKp9n1fr3jwCpZdVqcXQA3h9eZ1vH/b7A0MIZysGwLjMrBEA1QIxMrOrZubwYOKxamAkkc1uPR3my2G1XQ2nw2nSgGbTFDHqtESxMIVbMKkGADlAs191R0HWWi+2q7v7GpSHdcrZ1byCDdtExutEhHp7716jVgCU01RZoEe6ub7cH48rSTViCr96sRLPPh76Dq+2VErcHXRfgVPXp+5F1wFE169zTtd993KTUn9huP3mu4e76aNC3m4HBr3si26kH9YaN7+9e4jKyL4/7vu+v3x5s11f/OGbu+8/3F9eXhy5ppzC7Udv/iSv5VT1eNjvhq3qcLt/6K7TRT90mW8/HWrpShq+uPns3dd/1ff98QTYraeZBqb94/FoJ9F8Gmd1vNnclFJGx5998ebx/jBP8DQ/nma7Cn7x8vX77z4d1G+uVtOhHt2++OL1WJ9GwyHx43Es07y+WJcZ3emiW5+mU5fWbz6/+vK776PAxDAqbVe7R8fvH+7v9tOQtsdp2vUZStmiHKpXkKejrrapTpqzpMR3d48DrZhTklWffJVXV5tVOL3/aK/f/LiOF0xXTl/9H/7b/+7Dwy2s8v2Tbmg6PN6+ff3mZPH77747jeoYu269W/UeMqx3X7z94sdXLz/dzevw12/o9tvv3394P8jFdnd1+vBhu83GWAj//C/+0aff/od/8+svU/aPn8ZplL57ebl6effh4DZfXA1jHavO2iVav/rFz/9LvdtPn456ZZTSH7576NI8/Lj/8O79wWLY9cuu1Uy9gM5bHQKAh8WZxdA4RW1zdFyMhXGJ2Go61AbzIzE1rBxbeHwDwiPwh+DGaPTABfwGbBFcZzFXw0Ac2m7PTeHeNmuPZV/zFoWGC/NnAYWsVfQLVQfczZsU+Bkieja492iSVEQBRAd0DARDIDzzE/H5ErFxL5fa8UxoRMAFyPGwthedhxpBLSZxuehFOrxc+7KLLds/LJ5xhk5mHgjuGuHgHriMw9pp2G7Yw2kRjCzAv51PVYCwCIMw8KAFBGlkpcWMrtlJLXLmgIUi5gQIKD+8CjbjqXMIWpzNbMC9OVc/n0xn9zda5jBN1A0EEW1uslgkLS49Hr4Mf1rBAXHexSkCCJmR4vzxh/vyITR47W8fbU0Zv/hZe5tLtIlIA7EwAhezwuUMbLwyWCqR88e5cJuW9qDOwcHjWB/30+Hp1OdB2MtswJA6JoDgqKYWjmSSAIPcDQMQudFmkBjAURg9QlXdw9wAhIKQHANs0USSSERorao0DBTUzGbYPDy8WG3f/MxIhAW9VLViBFJnp+AAEuYmOQpoOj7yAEQ0RzZQb1aMKCSdMCC7ahJiPPXJZ9AakFgQqct93+9W/eVqdZW7gTESOQmEGxMFamgFBIYuZVV1rVCB2Z3lvHBNKxgYBoTIiaLdJVE1HEIogCiInMjd1ZSCyDkig6wpS3/96mdvX18d745/+P4Ptw/fTHwcsmZBVyXOAEguAhShzENa9WHpUlyAArMBIEoAVmLDZpfqAUQ8KFNQFziYG7IhAbuEAYm4z9W9Wi0RBCIsOQs68GK0mtUZKaFkDUIRSV1eDd16m7r1kFeCiT3M6oke91r85I4+z2OUsW+k4xB1BQ3IEmHgBjUYRBgJSxIUCKOYAYAYOBln1bDA2c0YADERJ0lA6ADN09qsuFVydwBtm4waQApwNwViImAGDBlPZZ4BQpqBKC5+mmQ1lC0EhJlBUDqBKOioltLSFwShIz4+HaKqO2oWzhtlBBzJCwaoKWqAxThHZU2JOhE1RZSO+1L3nVdMuN0MET4Ib3NnGjcDAxkn+fQwRoBhqmwWfrx7ePXyGiD6m02GefeEhaPrrw/uh+PDqs+WqNToGZzm9eYlAKr51fbiVKfEUiKd5ohyGuu0Xney7u/2x53KAMgRzHSsRkme5rnPuE2y7jstflTLg8zFg7rEkIgpEbjPpc5aOVPnugKfXQ8K65COsMxzBpbEDF7MKkY/bB5P6GD9ONLQ70uNQWKiuUad6+kwnQ55YCTXPrEzpSEoQzlWEPhouu7TNtPuhh/iuHvxAjxTSifFI03jcb59uCNEZLvdnz48nhxh2/PrYfXTH7+tfiQSYcCIabZUcHfZp5yv1uuqev908lX/qZYuQU8yhqfEOWVTvuh2oScQnYECsd90I8Gak/o01qJEMM+p5FrVAevom5V8/ub6/d2nUJ7HMZCnY6lmATAZPdQRDcOiuHnEFJUIX11sVzn9m998aUGbdbKCiECCfab7x8l0doee2JUV4TROOcbdkMLzm+21QUaw3AeYebHterjZ7UadHsaZUqwSyoqV4f64B6A5xmqqTkMvsBpS8gg5dUaIaD4ep154nss8FzXrc0jqUPLxaBJx1HE0f5y0iMyKhpAS7C0Gop7xP4GKEEG1WhQSASJmXEi3hKHugYwgZEiOIO2YYkACBackyBTMMsUcSCQUws0eGLwIwRxMnB1Jli4ZEw+HcSqa0KhD6oc8FS0cZo5uHfeEMhA9ajlNNZQQQwREyGF2nYR6InSDxNwJuapDdCxBXE5WEcxYCMaJGBMhzNVQ2LUmkraH77Yblvlu/3CsiEYU1Cb8gZ4FGADVbQ6IFBBjrSyB4CwcTmGmBmDEIGDWCMut9W2CoAhj4kBQi4jIhAToiDkJArkjEatFK1USwWhWw7NVRNOqCjFbmdHCMRCRiYQ8TIQDHMFTl1rXSoi1BBMTi5kGQbOXQQc0W3Us4dWKWhiQmymSACdKRA5mi9txUJI8hXGW1XoAGYZV1+3Wx47vH0+oZkGOdFB34TWKOwZzREzha5Y+M6FYiaJ1KiIYWahLpFmR0nrY4Fy/uR3V1vM0d1usVLveGeHicnt4Oj35WAvESVdJNsNwPBydKa16DS5mASCApnOdvM+JmAuieqXkmWKVRYtO1ZN0ZrifLciA8lhmZuLUqU2pj7iEJ/KqHlwpgWab0DskSj0iTMdKsnTJ6gHQfK2BWgxIq9zOpSgsaE8QL3UfAFqAtkGxtbSSpikIQUQECYSWsdUgEQcDhzPkdGa5NJ7Lkprc0uqt1XrPXPIIRORmu0jYRqEKDVd6/s1orsrLw80IGAvhCOAs518Y7e0n2Gglscwn9Yf6/oyetEKUaRkmN65Tm2fj0hvE2UvBWncCTazwPGSm8CBE4GbqvQA7fOYunSGlRaBnEUvRbD8opxbHo+UOoPVKtNCrsJ39jXvVZvO4iBQWWK49lg4YizH5Ar1Bm6U3KV8ANFIlUCBYgCGge2veWtOl4Rpgy3AeAqDUhqphYuwSeMBcfHaMczOBZysOCwD1UmdWBaRSzd1M1QGaBcriLYIAyCyp67sud4xQ5nme6jyrhy8I3RkUgoWvBHD+By4j5CVoZ2GEAUCz+kAgwiQkAKUGnO8lFvUHAECfu3DfP+5XqyHCCxwgEAmtlJTkxesXx+OxzNMw9EgQCo7BTN7iSTyccIqaCGsoBwGaeSChBRxn3Q6ZDPTkTKxVhVm6ZEWLWhIp1YC566VYBQggJCZTD6JZo+awUtbbXcF5BjNI4Sg5f/9pKjO8XhP6fHs3ZxkenhTIAZwd5lNB4S6RmjNTIuw4bTHN9SQCEfZ4OKGDcGbkFxcX//Ann8Px9tXV+vUFjaUOw9V3v/7w4eFwd4J+Ndz85NV2tZnqbGW6/fiRcip6+ru/fLP60H319buVpxdXn1399HOeh2p+VwI3Vz/++RfpRTcfn1KpWchg7jtwvtxtb+7LPmNstoBe3306mudTrUTd958+bS924fbuYVytuxc3mbfp/Ye7LWeo6LS+GtKL3e7X338Vgl+/28d0vNlcSpbj+HgsOnrpr64eno6XF1vr7XR/ezftt5f95y8vfJ5tOuUN517Wm92nb27n0xGBwqneFSM56fFq++Jm8+Ldx/3H0efJtrt1AM3kj9PpZ29//PWX36+2g6zWCkabQWd72M96/7COIctwmKeVlj//2d/59v23sFpfXF5dda/+0Z/9g1rpn/zz/9s//Sf/JwlJ1xvJxDm2lxefv725/7SvFeYSIEyAxEMt/Hp78/Of/JxgOx5pPXR11se7+HjHD4/5V7/4+TD04+N7XveH8jc77HGPqANxvf/47tXVZx8K/uqnv/z5Luu0nzDtdf7m4ZN2hLD64vrnp31Z8/z6Zf54OtyN4234L1+//f77rzshl7h4vVn2TuYlbQbAo+m1AgLdm1c/eFNRLbiP2uJLg41E1PYowiZxIlyy4BcuDf2Ajy8SIlwsfmPB65eXOZsmn2Hcxpw4UyyRETGoac78zHqNxY2OGmfEGzlnSShbRGexZAUsrsYYJLSkn7W6DRANm1aquVYznDk4i26O6KwFO4NjjdXqDhgeHoBMywihiXypsSzP5yoANjH3M5WqrUgzQgpr5w+4L3bWcNbrAS7r2/7IQqdtIrA22mlezst5Gx5ujfEEjeqCHqDRtEoR6I1pQ+BNNtb2S/OWBdZuzqJlPEQE4tlyzyGWdLI2a2mGys2RGiCQl2A4RHRwDOdGd4LzfrxQXZe//VnlHRHgQAyhALwku0WTsAEA+iIzb0hZY8nCWfoHhOiLs/gCRS7LBMsSL0TmBaE8L26DkBDPgmkAAJugmE8ne9wfV5seEQ2snTFqwY2zRgQEM6IpepeaCrNljjWSWiABMmIDCkEdAKlaCCF6ELWVazbxXgxO0xTE/ZCsFgqOhr8RpqZpV01JPJgZqlWIREBMaN5M0HUJ6EJt0xpugCxKkJDkxImQhDNSDnT3PPSlzEdOswGZcyBTd9ENN6m/SP0m5yQYSQLJG/3cg7ixzMyZIme20IioGKk9uA3GJAaRqrMTWVRKTB4lrCokkJAuS2u/KgYQ9Mhc0JiZOF2l5DFfr+af3nz+85/96A/ffPeXf/j1cfxt5ZLBvCobE7EiU08nzNtw0pEBiOeipywDIFYIIXNWNY/AcA+BEAQkQiICBceo7ZF2IMXsqMEQNiNoj9kwCEmASDiYhDNRB5wRk6SUug3nrl/tcl6vUseU0K3UmXXyEWeqeyvFC9US6EgMrpwlMKxERA0tEKDVVZ2FUY0QnVOwOJEFuKMHqdtxnuYwlEDwIHNEZ1I1chWAFAFgFZKqMncBREhBiYkkdWxhqsex1BkiyIq2va/txI4xOgpIuPaUmNHC+5SFobp1w9CegnGaOk7AHF6T8ORuYGUsxN53qSM8TSpZqvrtZCeNC4Zuqhzg4eM8Exo49Enj+AjEKjw7AmAm8+qHx1kPShnGMvJW8pqnCR5ZT+V4s9k+3h2mqoz03d3Hn/zy1U9/8kd1svmbb26G2K19m/TffvlvX/bbHhKfppeXLxCdIP/85av91D3UiVwJJffRlJJzmR3ZiE5TpQS47p2ZmTeXFyTd/TgpYG7COw8C04ggCohZtU4KNdxs756J324HMUA0C4SIi6F/mOvD+IQcvUjKa+rwyjyv8hezO9IHfro/2naQCL98uauH6WE/0crrXOreV5xXSVKG1Gtcang+5UAYHfLdRzqcks5jGKjVKmN1D7AaeD/a/qT7oler/Nn15XG8gwAGSsgdJAneCHLXr0noCb/7MM8VmLzPqU9ymHS9zttVt8sJwD/sxyA0jcOsRwCtqg6c4ma72z8eW5jihMru7777GFAl8GZgZjmMFg4p5dOoGLGiRX/ZYik54PXFRZ3LkNJ81soiUaF8HMscOJojpocZEngiHmR7GgOqb3pWtFJP6+1QyzRNdhrn3ZDITKs64PZigwPfllLDn7AgwTjb6VByNySWq+tN+Xg4TSVIuEun/T51SasCQAh9Oz++HTZPx0OXu64TYUwkMMUvb66rm0HsS91P5aTGQf2qg0+38AwVzeORCZkpwIoZJVEgsKbONyw6JF4THPd1rnGs83a3S0MfXp0JhdxKMFSvWCB1PaJoVQ5B7gDGVZcJE3ToXtydwjrmynE59K5+GouQU3IqMZuX2YIzhgaTm4WbIAB533OXoipgMLCoAyNiuIZbiVqRhEwdgImBhOdSPJAkzJ0wCByZtKokyB2IuGu1Q+k5B3k4VrOmSXeH2epaxR0nDQcHJgDDMK8eQWJBQUXNCTMyeTATN64TGBJiYHiYBQOGlgAtYQDYdRnRRcQdHIgAw8xca6kFY/Q5Z6ZwjRjLXAAk5Zy4E0ZhC4NQQFK1JDLPCuhWDQBr9TAgBmACDyJ0qAg+F1NEBChzxRCPKBbdsJahY7YMyoKA6EGri81unfuu31xfZ+mxxvxwa3lcD3UI3p9qaRmuzBAgCWupiugAK+GePAE81kOZVBCziLurOmbc7fIasD7V+9sJsCOB5JEF0C1s6oV8cKs4G4Bpqa6zeY0ZY7ehPrM7nKaSCBGEET1IwSdXdgAzJlNzBzcFgmAGVe12Sdk2Ce00W2UM0nBMsL2CBDhOShlkC7M514hZhZiQTJ8FaO0hb+OsNpQDosV4oHkrMmFzEUAADLAl9B2sDfwaHQgAEQr4Enly5l3QefCLiKELkYTOpS+e56IRjSvUfr/9EURARhA+dx+ALpEQzCMitGnWWhXoi/GBAyJ4QxJsyXJp3UVgBCPBMu8NRLTzTBMXklL42dI1PNpkOQCwWW8FQMCiNKIFYWkc9+dpMSM6Eno4tNMbLKLZkIZHnPOFCMmX6SZa87qEBRrz5cab9u9cRXsAAqHTWUXXDD3Os9k2OV3Gy2387s9wiQeeHTXiXA77uZr+2+BRePNXwgo/JLiZN4uIlvVDz0wudZg0ArFjXHVL6EZoQ5ngjMiBqjmFuzXlXnOeNW8LjxANIgwI5CRJUp9ECBqcNJdaVSPOMcsBz9DfchvtJ7ioVRZO1tlyIgKQFvQKoaUQgKAxRDTtFQKerUwfjyciirmKEDQmg9kubTjlqi65G4Zey+nwdMrElZtsxRNThDsEB2KEMGRhM0PijChMTmTqRLTd9pfbq8f3J6za0EYLE0AzFQBQ1QAKyAg1DD26RExYPVDt1YuNOCRiEemZy3ikXjJLiA+DMKZBaTw5LdoHQG7KXd/vq3oEYt8ls3i0Kcw6ic1O1pv+8DDF7AnoBXT14bhd9YD9v/qrb7+7G9/vfzedbJzxzWevf3H1cr3OQOXuw6c4HVdd3lyth912nkqH2+t8enN5+aPPX9sqj+4UfnGV//F//fegTk9eWHWzGh6PD8pT2m4PXj7df8ybblMSgx0Pp4exXm5WNx0U3X++2328n+cZiNaUZCVOZXzddRm7NMj7u4eHfRlPezSHkDnsz/70i09f3a2z/N1Xu4f9aXWRx9Nkp/FYVQh+dN1zJghdh5YyBfhmtz5N893tPVFIpuM455zCVVLuV9ZB+P52HYGSDkqvN8PpNPWZ/vTnv/jyq99/8dnrWedEHofy4fuPMfpUprX0ZdS5Pl5crm5W/e+//JtaA3n9d37+XwxD/vKv/9n/8P/47z89PjDB5a67fHnx4faQk/ScBpAH1cShOqcO/+TN6/fvp1W6fLFZJQVwr15u3vSivB+PnutP/viP+pxLHfO6W1/wL764QeQf/eyXD7e3f/6f/wXA+M//xX+8zG+2bk+39xc3DDn+8Juv3GN78bKH1fjp/cfjp0NGD7h/OKZk0sV3H799fZkeH9UI8sDnBypa8lG4u4dhWPjZNwfOkVqtXde2mThQ68ma7phgCYA0N1xMrNuPPJCFCDCwmUY7QPNYxefAdVgc0yDCG+a0OCgDAgECtmkccNsTzzB6ALaY3mhXi9QKVEcEN1yCz9RhUSEjcAsaa8MGOhNUAx0ACSkoWtN/5rO039CzFKr9WuMynVMR2gKes9UimqLKcNFJ6TPyv8z2sWnAl6FBtCA0WOyew1vakiMAIQUE4SKipkWFC8saIDYvpOb93RAIX9YznrH7dqceEUD2vPeHRwQvTnVwDrMCQLQGYzRWEba4WIz28cHCSV3yHpbbCkJmRILlSgkW/Vc0hAgQIFos/bnw8DajggZqeQDgOe8pDBCimYvHWbAX5zsKRCQCg+ZnSM+HHi7Bp4vrEjS1Oy50Mz8T0iLA2sDmLDZHeNYiw3iw6eRz1a5L/UDIVGZzDVAE9WD0CCYPDlBQYY+gnIjFPQTJoXlMN09yoCRsZh5+jvxkDK1KxOHh51GIR5wOE3LkjObGIo7kDhTOQA5EiZhhCE5pFSamrNVSULgSOoOpmxkAhnmEGSMRY8o5E/cknUjHCUjUrUtdlzvhSRCcySwIMJEQcE59opSIGti1hOCGeZsWtVIfkRNhjWqKTaEKzpTMwQPNgYAjKhFGI2qrkYKPqpIyJyAM0NCa3BAVwpV4podidXPRXW7ywIcNv3p9/Q8+//wXf/1X/+9/99X/PM4n6WPgHhzAZ8kbpgLOkGk2EwrPaBEdMoeiaZA6KCKSkPrYAQ4B7DOgAGozhAozCkEEclWbJTwCFWbENWEGYhEGRkrMLAGELDklQupT13ESZmHGs2uiqZoVaH7i4YFYrGYERqqmjlhUww3MBQgUSTlhkDuHYygDrL2IQqCo2ziVyXQOT9zCR9yZXSCQymQQaGYFHdgAnVqtJszSESCFg+tcyjxVK7Zw81oWnqqbKdREfQALYyeYEd1JIsKgc5/PI9csmIUm177vCJGQbBzZaJqtsHlYEE1av/z08O7piF2XV3K56g8PB0oy5EyY0LyTGDq5P82Rk8p8tem3Cd9/tR/3Xk+Rcv/qjz/v38i722+22/VDRQf87vaDj3uo4zTin/zZ3xs3+k1XNlsZCl8+yjTXr+vYvegKmlS/Ap1G2KbBFNPwctX3u4s1TietAYwcZZqKgQVSn7NNJWf48ZsvLhlB9VhinOdqGo2mb2Gl5FqDCBLNMVsBU+gkbXMcio6lTjPvhPthPVk09hWboZpkKnM9lXL4WKeplPCL3fCj16/+6NX1pFzACxda8RAxz+UxKjGtO0wUWbIApDqjduTV9gd3H+RST+P0MHapbrt8V/R+LMeqEc7QIGl+GOfbufzuadwl/nzoL1f0atMNgGiYp7reddyn41w2qTupR3ioYeDaeMfpauAV91nwi8u1enz3/m6PcgjoWdydIjCKpPDZk0TXSS0lVISRwi+3FznjNO9rVXToggjxZy+u/n9U/devbUuW5ocNFzHNWmvb469Lc8tk+eomyGaLTXSzSYCiRDUBAWzoRQApCQII6k1/h/4HgY8CCbQMqRYJUuym2E20KZtZlZV5/b3Hb7PcNBExxtBDzLVv1XnJPPdss9acc0WM+Mbv+8ar3c0+qwRuRTqjDtrv3t2M2bMZWpnTFBoBitw2xQsK4SGjKoCZYuHODPq2LUTbaY5NxFag8LgdmxibSBSQQYz9ze443RVwl0Y2T4JP2Y4Fubzf3rl2NtlaAgANUz4MmRyc7PKyn1Le5aRMr8ehx7Cbh5bkoo9NaMSbEKR3M9Q+xCfrntdhuxtVF/Z0kYpuXt5FkSaiCFMoIXI9xcaWDXSa8s072B8mzNL2q7BqyZgYkVBIg9isZXvYh6YLIiVnNFXz4l6GcWW5ESl5ImzqakzCXqZVj5RzYQdwLZrNk/lgDBCmEdDNyRE9BIiBQIxP5YE6FzCI2jbiBbUYIM9GoJgNiFAQ85hXIoY4qoG5CJq7msVW+pb6Pmby4aCATS4GkEWwlnrFIWVdcXDDYZqCyFiKEDOSFUfkZXxIcUEqZhaRGYDNEYo7Mgm7liL1TFzXS4U51QFNiUQCk5sHCcLCydGYXFKZJ3cHaIPkaTJHVxXIkSUSonBSyNmAKAoDACGqep4zOypXO7tSZpHaYXEr2YGaGFSdSExxLpaVyjEnP67X3DSSXQG97fvN1dXl48vzzSa0PRYCg7Tqm9s4zimXXRNdBVkYEEOQaRqJLBDFGAJqMjukca+Hpm1pNvNiChTow4uNcKSC2SVIo04AngugUN/QIU2HUswsxrhq+uNuNiV3x0iAqFkN1E37EBlwmstoiqZdZAFDLaRYMhUAIzGGunX3/Qp1FFMROcyQNaRMoYUMysREcLayzVlIZiV4PA/D0YYxW6H1pjudDtwdq0zA1UKAgHXiAhox4zKoBGofGJexUcu5wU69OjiB67qMlkEkx4cG5NK6W9gbBgR0OgWp1szFxfYFS8MRHYWwZmMiIhEBQjLlmvhL3iBBnW9S1QawGn+spkuupTmCMwKfAgnqMYQXKgVrj1hPiUffx2b4wwwgd8CaYox0Ap3ACZYA71PSKp6OFovfy2tKAiyzsSoOB1QlBnWwpTEPXtEeqoU7LOcoQqCHRm7VXaiqS/gQr1TTqWs8UP0DJ2JrUUoAAIAJtSZbAy5F81Lkn25ZLcBrxClAWmwk30ewOgCaWxXFvMaFojoW9QLugRqkVVPPAzAXXH76cmPq+Q0dvA53hoU/8DouDZEcHYhCDG0bBFFzyaWMKaWS7TR6u7rNlub0qabHWuAvU8/+kkJU3YZEcHpWCcHAA1MXqJj5kgXy/VU6DsfNxTof05uXr5tVf7bZ9G2T5oGt5ALgvt6sbt8NXeAQBUZzgNpa9sUi4UToBKNqFwQdGDkIT6oAcJzLbp6lSWLeCRbzcSo1iJTckVG9MkqnoUuEwORubSBHt5wNQwDLlvcjXGyCoU9zzskt08XV+jDb/f1ds4ohhinDPOVAy3SpELl6E1TVA0vLbR8TlDwpc5SIXUY4jgPh7bvhm5vPivukNLlD08CqWX3yZPVs8263Yx3m8bhqogcZk+ZRYxsb1J/8xk8EZByx6Vfry9KF1Z/99OfxDGdPSHMrlDWbHC/PVp99+922ZGcJLYOOc0qxWVHG4WiSck77cuz1wMMABVjX+v7u9npz1lI4bucJJinar9dmPOcMwEx4czffT8mGXHJarzfHoRhoWAszgJamCzF6LjC/HziXVOBwTF0g3EPTByP78Nn19n5b9uPMXmaIVgJ5mqzpkT3fb7cth67Q7v27dYgdta3gOB5lIjSI5nHWjy7PPtvft9KcxTBMBwFnlLGUris/+4P/7n/66b80lFXX8FlTON7vhosuPHm0akSO8/4IJd3v6/iEt9tDbPvf/OGPS8oXV/H+YGfSjIfDbty/vrm7vU+/8SufzvdflFT22/0w6mp37M7ST//8v+OrzRfvjw1tnjx69vzq6ZUUVfnp52++2t097nAVzn7j4vl+e9Q4BLN3N5MLQuDVxfr2m2/PBVpuD/Ouafubd0usNRKZ1SGJZDUZ2R5Cb6qjSx1U4aRv1MAadwJnQDSoAwqq16kafh2cCBFosRIBIhgQ1SMOeEGo+hQouNaTlxsSlgX8+/4TT2AG4K76sKG4L06TOlZgwaAWwBPcdXnxegqjXogZMHDHYs5I1bMKS/xbhWSo2rJwAUgraHQSuU485kNiHOCiXPnibyI+tR4cwJZIp3oFHwZi1oCZeoXqyHOssKrVkUeLrAReg5sWfdwfXg6dUqXcwWEhv5Co/muNm0ZCr1k3uDBipwFpS3cFicAqy4TFnevLfNje0a0SppXYtPLgpT5d70WzogdJzc3BibkCM4Cop10OTrBpXXhxMTjbcmFOezgtqCk5kta3YEZoi8gIDzsIIqCc5j/oUnR4RcTqHaxPzmLIWizeS4FRebRqY3QEInroo9zejOSBhJoQsD6iBCg1/det+tsBwYEQimtFvGJwrv0xJEQ0JGcCdRIhtyCQi/qSXKWIYFgVVXBDcMzZDQqKITUcWd2NgZjczRmokZphJCHgCoQahFCSevE0K+TsJYtWeMlQue7RHIjIGyJZ5lSgI0jkkhwBRQLWphcZoQsDIZAwCVGNdzilgTOAk2tOCZK6npxzgUzrbSBAJqy4rlpNYyIEQ0Kk2oAkzwDWliSMLKEhzD5PzqXk4uopKa2dYpCIfNa11AWkj56ePXn8dz++/dH/+E//0bfffQlxbggCmJXZiY0kAyEVhGOLhQCykYEbYyYoSARIoMRCQASRQNTZABwn8gKOau6WwY00OSijuBYDgOAFo5LE2IK0TuIOCAWNRYy9gKmXXAAY2XIpOrlNbtl1ppJJi5ZiRRVcGRVY3TMJuKu6ulL95Lm3IbiVROCIUEqMgaCoQUpjKZNZTsmDIAUwV82Z3Voiz1oyuEIRR7I+ehMDIQUiMveU53maplwyuBk51vVZTU3NVckd1bxMwjEyBUOUBq1wDOOMgkugr6HNeQaEYmbuHLglRS8Mnhx3w6zsQ8rjlMWoZN0OtkbEQCqcg4jZnAoappJD1w3oyez123crn4NhbmjqAX/t6ssr5Abogyff3e41YLKhhQyWpBHrYn5G8rgf7qdx3MJwHPdpYsczOMb5ou1WDW2BpvGw9ny5Oufh1rM2VLpC5+1mnCZFQ8Q2BiUc5vRs1RCYbbcHDCjNMOs0z6XMbZAoIKaqPpf5y7vxRrPmsgnt02Z1JpECuU2DmyFoydlmCZFZSKm3OI6WEQ/zfBzK69v59nC4zx5eDn/487dnZL/1w6fPPnk8hWHKExzzpl3t2zAc6bzTy1XzbqBI8uSyRYpTMhSRAMWH9XWzK7MZZNQ0zCiwaRqZylyKF+8CA9JBc9dIx9hG6gKetRQgTBAohHGGueSPr89vc7l/O2eMatOqi8/6NQGD4l1O2fTJ+fpa6NGHj3dFP39zt52sMJHQfprddc7FkVyMYtAgq0bOuvZ+P7RdBBQODYB2TA3ycZ5XTd8EL6XMw/7Hn/7kanP+s/dvRnRpqJUmJnIqKLBLU3I/56bQaKBFzSgehyE7F6RVgsfnm0z6cncLoOOcGG2zunKmEJq78TCb7rcTFnP0ddhcrfp3NrVdmC1sC0zT+GIll+v1brib1ZFhdBOFQt6etdlA1A7TFAPfpzHzWePUUhjudo3QOObRnNHOUgtMOk1/RSp69c0QvWF2FBDRJqKQhYaLWSk4DlYFkxAcwaIIBypzigEEYB6m++nQ9DGwCHFRczURKRlLwlRIGN0tp4yABopGZsSEjmqeVV2QuwAlWsqUCzBTzoWBIjsLdg3V5NS6mwoDEM5Ao0NEJmZBytnAgBCLIRGt+lYMQCGXggSRGdGL5064J4zghzHtJyOUNpCQ1wSw41zcMCBS0WK+aZrihijZF0O6liLMgJDdCR0IqEEQQK5VByChqwYmRNJ6AkNwMFRXwGFIXRdCYEGuSSeNoM+ObuwWHEmNCdlBHCIxGaScYyfTmKRrAkHJ6mpFNZdCSCGgmedSijkSqJUAQiWtJLITs+Rk7qAERTVnm3MOYmkoBrReSeya1Wa9unx89cGHMTAzr2JsWilFc1hZPutXq8MwoulKBMw0Z8q+IWbEUgo7BIpp1tFAsXGKIo6CSedN13RIJeshw/txhOCiCCKDlpwZRgen7FrQjNUlNRvTZPujcmhQYtO1h93WFDOAgQuGnkC1QFEvyby4B3MvisSAjMUsgUfMVDIYTmPOyUFdwCKGVNQBj2MKgnAsjMTUZAgp5QwB1A43y4fhe1nhpPcs9xRr+9UcoW76i7EHkZYoeFgChRYXwYNQgVVaBK39usoWLb8ITwf8Wp6bLeECBHUgl6s5LSySFUUhbwgFqbrRmEAdiJCJa61caz4zN2AUqLKILmNZlsoUoRaPSxYp4oMeBADAcHI3eHVJ+PeFuYGjn5i5+h5PY+BOvFLVh6pMVNuGi5YDvIx/ebjAUC8H1WgeMDN3IWocVBURayxU5fLxNB7Gl3QhqDEE/BBZBAvxtFz7Gubl8KCa0Cngm+snGZbSGU/ZHgAn+uekkdXXUL/wwQByUpkevhAInRDMXB1HNxTsAvcNApi6l7KQP75EVtfhcoBAZk6nx6w+L+aOzCFGEQmIDKZWhnGY5lRTtGp0VT0WnvS7E8T18PzWa7IogU6LrlQfPwTE4lZj10Kg2XJKDg+2DgAASMfx5fv7w91e3dt194NPPhQr3aohUCsGRQMhFZjMQdEBiUCI1JUQiUmY1V3NALEUAHcFzUXVzByzw+1xXnW6arrtOFbXCgFcrvrduFdAVWiiBMacp8DoiG5mmoWjE5aihzwkdQptctrOPg8JEpH63aFMeXd+eYHm45THBCNSYUNCQu9aXnfNOGYtrmj9ytZXvDtMXdfvDmkdOwTr2hba9sZsMh8hhLWcx2ZFMCOuNmdti4fjMd9sAVQBx+ybNjBvvnl3t141Z9z/0Z+87FbrbnPx9uUXqy6t29g2tDtM2+noPbrl++Ne4nw/HIuRaEDmnCadjmoFs2gSUju4nl9fvt+Ps3obA6KFliSsXcSpazpMU3pyddk08Ivv3s2JQh/P1/20nc5plb0k9IbCbZqvri9aHG5u72MI27m0zjf3x6dXzxiMmuY3Pv3127s7eXZBzfzqq18MKc/JXw/7R0+vYhc46Pn1uj36d9+966L0bQgzfPjixZcv32/Wm+PueN0Rjemv/fZf+8M/+/lxvEeJX72/S2bk+f39XSBuWxRKN+9/+Z/9F59xLhzWjx+fz9l//v7N9cXm0fpRw8Xm4d3NbpuGY5Af/fjX8ptvAQVT2Jw9frebQgx96HeaunWnx+2rV58Dt48uP/jsqy97GqfxeH6xSfPh/PHlNo//5KdfUNO+Hqbf/uhHnz55fnt////+s19cP3/uQTiEX/+VH20wpu1+/+5w+ahThMPdcLW+7GL33auvPmhF3YaUNtdXjdBuyqcPEiCgaY2DM0QjKwv0YIZ1upUZMgK4o6ktUzH9YX2t0nANIXK3k9KDXl1ZUHNzqqhzghSNgLyacJn9YW1b4u+q10cXusgXzzI+5OstyzIaoqpCXVQWEQIqHbrY5x4W4jpmGYiWtXTZ95bZ7gC2QDpmQFV8MdWap1T5qdMSVPkaexB1fCFka360nXK/q9pToZ/FNaZevwr8NCRuYZT8JNDhafV1B0Q3A6puZUNaWEw4yVG2YD6Aql6Xw5q1pKCwjA+w042o0E29OAQ14Q+ApeJSjlZvVk20XvaNE8UEAEvokteriSe4CL5f1B2twk/1WlEdnXG6YvVGGiz3BREX+azCwbWWKAZoBoZcIS8zP01KqC7yOmav0kS4tJYWa5zV5wPrDFnAJf0NHMDNzb2iRrgIgLg8nacaBQA8BGh6jj25KxIKYjEHAiKqFwXcNRuQm6KqkiC7IpEzgwghuQOBQyVrRJgQURU1wwm8rSnf1VlvICJmOo2FBNfNioSBkSOCqqMDcX1giUlaYWYy6FcM5mlETZgn14I4ecmlJh0GYieswBmRAEs2cwYERvLY9EFK33SHsTTM7miLiVKJoDJFDurqlhVJVZPqlKE4ITCTIlomyAvQTQzmdfIaExdVM6h5knUHtOI+AyRC6tg6nhFamuFmmm7aFSNTBt/PBcYhMc9x/XgVLhpmbjpb/eqH/+qH/87v/9lf/OG//KP/+n54f95hI9l8rCOAzIA4RIKcinCN4ynmCoRqBcCEUAEzGHg2QwUzm1lTpFYVHE0ooCUzTZa1eERgkq4lkugkgGBWHSoIloEhTyUSYdFAIkCacyrDcNzmae8lRRGVxsUAfCzJ1DCYAGQtburmVoCdHQmJAnFBN8LCrOaGMOcZUxqncRgOLqam2ARiIWFwBGcHdwUrVsxn1zYyUBNCKxgBiECLako5pQxOCOjFwF3JVK1kI6uYorODuMOcArVBGMxnQwZ+QLaLGQOFIK6Wp5RVrWgg3B7Tu7m8vt8ZgxFcrNqQNak2q/Y45cu+SQZ5zO7lrIkE7uBC/frienO+uX17g5A+f/k6PO66j879g3gzDpx101P/mN8cdu15d3iznW72a6D4qP/u5Ss42OH9vivpGqUJ0l+e4XPClqKTlfmYHfswIotaLxGy4WxlLG9v36DbcTj0Z6suBkJu24YYtcB+OzRNh+wsTEARRA9p73q727+ZbLTy6jgezRnhcWMfvbjaRAYH03AzD7fjoWsjTUMTIfTsjAAKgW5u9iXlrm2IBwnWoWvRrHiT/Z/98s1Ho/31v/mkb0qZ8TAZGwXVT5+ER+etfcNE5cXTbhzhfhoGH3784sl4d88k3Spuh3wzzJOXIna17td9fHd3P5WsxQX0+SpctNI4nIn4bGTBCcwKUmiiOHMb+Ol61d7t79O86jtjQaGLrp1SOozz68O8m3IL+Pysffr4TEi+utm+nMbBbVIPzG2LkcjdiShPFhqBWa9WqwLgnhxADQ/jfHF+JjOg2jowmD959vRJG3/x6uuhJGll1hRo3cZ4fh4/f/0uuyHEAu1sRUsxBWAIHJvAh3mM1N7sdiQ4pBRXLQg9Wm/G/bFpOmC6vdkXQyveR5FI+92xtbzd3fXN6vLs4v1hUoKMZS34o2fnN8fp1eG4vuhTsVlZpyLggVgQUUsbpUAxCNSScDON80haYphmn++TIHQnzvpkQDsioDhq7XoNREGMBHMyAFZ1cAodccjQYoKEhZBQgUqG4WBDwr5rGBmX7UhZEQpqLgIs1DhYyda0kRxtNgd0F80ZnNogoEpoZy3lCQ9pFicRjORnfVQuMYKhJsWiKhJBArpGQnNOCRy8c2cwcSfC0IYgjObMMmUf3RlJkImBQuwigdtxP+WkQQlZGggN6WxDsbwKoqqgJNUkYoTmAYNaKeYmzBHNVa1IQ06ABCyA4MQIhlIrsLyA6Q4wq5l5Q1S/IAElo2DGLJULr223nIyB+obaQO4YpTHNvvBLPs9WAEzB3IsamJWixMRBTN2mAkyEBoxQ/VDEkxoDsUQ3GHM2s5QLQR0dUcpMFARmXYue17EfpuKMpqSzuRenUQGabv3k0XQzqaaicwAKSFj0rOnUNCGhBTSxQvk4SWhUuaBlwObyfHV9ST665HnOIM4tpmPRAtbyLpVsYRUYZDDmUW3OA2UVamMMxfSQx1FjwO7+MB7BCX3VRjBH1eJQFIpaE8kZBlVKjuISGQDIHZlmg30mVVwHjoLgcC6rueTZuSSegboYDwluXqXAaA0EAywnZB4X5MZPjEmtAXVJX3DCSk84/iWI5jQvbcksrWB3fQiWvuepeFwO9Yi1X73IDg7qgODMtV6GqvkYgMJCfNRjCxmPxcKShVQ5eZfFHocAiFx/m5stZqNapVQFaRl3hpV+OgEt4HQqeesbBz9V0fWMAg94zmJOqN/pS+D1gwPqdMo4aWR1gks9HSxnGQc8BQhVzL3a+ADdgOTkcAAIlY6pP9m+p7FQzbQ2gh84fj9FPbnhqXl6inSA0+zoigoth6kHoutBK6oOgZO0VN/XyZcADz/1ITZ2eVYMHLESRhaA1EABZjARCUFWoOplBCyKwFCDmeo0mkWoQlrSrhYwAQ0hsIQQ2yiEkLMepzHNSbUmU/5VPQtPnXx8eGt/CTI6PcC2sI1OJ80MhYzIAQgpCEsxddRTABUAvH57RwV324ki382H2d//3m98FIshe2CehzHPqka326MhS8Bc1ACLASBG5GKAAE1skmVzUDM0YERzCJGcaT8nAH/y6Go7j5ZKMkBz1RyFkwOGUMDEgYlZmAVLUReOTZNcp9kjo4MhamzjbjtBZkuFwccBxmTNmV9cbvopRZGbQz4cbdWQIGx3MwKqOXf84qpbr4EiXG/iMUOQHpQPOnCT7uzOPcpF168DISCIugrRo+tNEDqOQ0cwHuHtdt804Zhw3RlCGqfy8v2ru/vtfX6V0I30xQcXv/Ojp8Wmu/H+bh6vnzxG00YRqH1/3IPIBGk4jFebbvP4ent31Klt43o67iE07Xo94EgSmtDfvnvTQtOsJJUyHtL+dX7x6PlffPe2X0HXrtZrKZh+9Ki9eTkjcxO7Qw7vX82z++t5vzrnXDh0zAGoi5u2xfO+6PFc+KubP2eWN9P2V1+8iHT18rtXobVPLjcjmE/l+vps1fbffv6NMG/W64D9px89fvdy94MXHw/7Y8n26aMf5nX55Zevv359i0GBKU/p7GJ9KHlzsY4E+928eXR1l7ZJ3DMq+s1wGIo//fgHj/or24OgJZluytBQQxbm0RiuLzdPnj/7Qds+2t1/N5f5i1fvp9IN86s3N7/8+OMfB27/8Gc/+81Pf/z+9VcKdjPTzbvtOA07KHdZh+Pxo1/9VQjwF28/F7JwJb84eKPxJ5c/7ODs3Zt3cypj0ru3t/dU1m07j+nF6mrHdFemXdJp5vXlipHG8b5+CpaoICItVuVKqJYuAEJY/OALYmS2YJhLbo6CESARgBPYkhqPdeRT9XnV6eiLEKrAAEiKTgjFFBGBwC1DzbJGggWNQTSoEwzN9YSC1hgSAqtzDOuYLjCH4opeZ8mTu3p1KxsYglltVCCAVVvXgsss7QGGB4QJCUCXraqaTBhrdh8QkqMtAJEvqOxpJ8Xqbq7+Has2ZwdHq3jTIuxUMcpOEkfdRRfuxqtTuhKiDxsLICDV4Z9el3jA6pau0tcSIb0wqguPVI3DZQFJ0cC/F9RPcUHubkuklBvWjCVH9BrRbbjsGGZKgOAGtMhGD72PxXGHJ+4YlxtMCBXoqYtyBabqQ1b3cbPqIAczPRFTVS/Duk0AkoPpSYmrIh0t3jqq63+FyBhDtZ6ZOwEqOJo9OJGhBmlX4gi+b5IsHQvwSkU9FC3NKjAjN4DshEiC5MBulp3MS/FSrFoIs6IaVo4IUmmFhNC1IFU8qyYcOQAyCQVyJzJXqulZRvV9gHkBQgZgK14U1J2YkAgRRBgRRcgNXdjrdirEjuweHDriiWkyy4hWFAGVyM2ECShARQWrqR+RmN2RkPuuPe5GRmaCWR2AsgMyV8W3TrkrxRmc0UqaprSffchcp1sUcyVGdnErhgDMerKYI5GDOpAj5KyIDFQDughMAvbrcNVRMyQVXhXYjMO7pBOso+ax+H7MeCgv7WKw9XS1smBPEdrr/vxf+2v/5vMffPLf/8E/fv/NH1lJG1TAjObo7OYFETmisoJ7qDi8AxrVgs6cyAFyKWrOVgqoJZ4NHaGGMyZkzEYm7kyNBIgthY6FtU6Stxm0pEyjzcSNgEbpGBiKa56G6TAdb9O4ndNk5hQaLaZa0I3Zx2ICXpxUgUgEqyiuHEmY2TU2gQN7kAxsCGNO+3nKiqrORGpQ5hwQCFEd3EiLmTkAEXjXrEPomSM6qcE85zzmYSppLoDB3NEQHE2tZGUXACqWA7VEgYDIECmSEXNMGB3ncrLeFPSUk7rVOSASKOs8WBnI722eBQ1cgfZTWUci4cu2Xbs/3vSlIBkGhouz89s8t018fvYEzajML65W372en6wu6PLZu/Hd7ZdTzvMKyjCOAaaLc33UvniXfINxFQSL3n/53RzLuotP+2hHXTUXsb9Q85ikqAF3XdutoEENmqYZtO03HZwPdmdOJecS4uCoBqtmRVAIHRnr0MxpnpsmtCTShgS+HfQm+xf7IQNkcBQTpmKlnkkb5meX/e5QDvP87bF8vOmj4byfE8OQ8n6aFSy53R/u7vO80+KOKRdmCk2YnH725cunT+THP5FpONztg+UuMtpqDo/Wx2/Ts543rRynPCtNavvtzjKwDmKFA8FmdaPpPMZiLgAhNBMKYKRxPGd6HLkk8GybrnGHrpGegxKAQkS0oudCj1e8c+qZWgnTqN+NA6B+8Pis68uXN/e7AneTHoVerFa/ebnu37//+m7XIvZBmoxN0lXH108v6zKasg1mQ07HpFktijR9yyE2Qu66amM+8uXVk13R2/1RCLNayeXW53MWTfDR+dOp2LvD7Kaxay1gnjKh9E1fLJ9dnKHqYZ7QTYBA0RnF7bLrp2Hu++4pN3eGh2gFzNSePN0Qpc26I4teNIFZ0lE5Utmsmg8etZszNIRi/GawN8M0Z3MBIu5it4rNkOYpz9OhnG/CMEwjpoIZRLquHwf78Y+e/cln33wvFUkQKLUfsxxvzdyKabF6iBJxRCN2FnNXKHPTRQGyMU2HoT/rm4DuORcFQzcraprJis1qYBgAmFHcyBCAjcUBIpPmUh0Hx2mOGFvQ8zbMCcm9FWFCRxzmiaMBMnF0xBCClQO6Bm5zIHDB7E2wy1WsUwZcCyF5yeLcMyNAyYVDBDArxoRFjRwaokakMq/kIM7EbfbJDGIIutQgiACBOQgebPJgRF4sc4iALoCuGcGhIAGjQVaw7GYG5A5OtW0BjOhqaggKnkpq3dlEKr0MHoIUdQKs5U4xQ2KWEJlSTlnNANI8MTMhmZXAAQCgeESKUXIyR8qmLZIgl6yrJjo4TJrcEEFLZRo8BiYgQQJVNmY1TJp2+zu1oZHzTaODxNhTv6pztUPf2S05ArBJoLawTcW85FICh0BRFViwFQEkByiAytSftwFmLNM4ju+/O4h1gti0XExDRB81FE5JJzfqBFcRSgYjJkKuqQWWkgZVdEN1APVgRNR28f4whRBwkUMKmIGiZ5DKThvlAq7OhqumsZwkSK2QUZ1UA4rv9ZDHosgFFTRsvGkpyPddyyVy4OHvpyxPADCvmIkTQuVkK3dTy26qTPMS5gBeoXN8UC2A6YTou9NyyK/18NKada8escqr1THMuPSJCQxAXYtT9vrTEB1i7RsjmakRuXsxX0xylSYy06WOJnfjZRT9MsEFazZG1RCWHI1ThkVtdi5fcEqwsFNsY+2KV5zmhLsgLsBL7boykusyzW0ZlFwvAboXZ6E62A/cnYEImHjpXiMQe01TqFYmZjIFIAjGBmJmqopUA7xJFz2OlsYv0mmaDDDBiQlyqjAWLJlTtFwAOJkg6klkqeZr4/s0qw6WL4VFgKt6UwVqCIGJGJwU1FAdxrmErulCrb7t4GCLTdCQ2X0Jal26yPXMgAiIRBSFG0F2d/cxzcM4lxqYuzxVS21/UvVO5T2ebhLAgxeETifeBWsw4NMFBiBDYqwBA+X0vk5/zFKyfhWJaXvMu7t7HZ/MXrQYh1azTsNYso2zHudcaiyKayQiIFeNdY5kzsvBg2pkLICCFnO1wKJ5brFtjQ5GQOyoTeTgqFrAtYnSEM5TzqUO/6ZiMKZETEISRYrNALbbHoZJG4yhnluJZrWbm8PcyMWq/eSD60f7Y9edz+OO2V+9s3e3ZbSQZr1EoT7u7/bn55tNI1TymFLX2/rcmCk5TCVR9iBhux2F+PGmj3k63s958IPh+3eHhKpToWGOQsLeRibDs00TiLpNy6sQSJugjIrN/PRy9fb4pqcS2G8OQ9P11IZxd1ArAJ5ms4wNog37i4ge8XDz/jhr2/XT8V7KnAY8jI4Eq3X3k7/263/wR585gJOoQRrHVdC337yP3IcOH3345Bc/fUklkSAgbPfl6DJrebRZrVeXaTdOd/dX11h247t3xxfPn/eqn//hL9iGYvPZasVMx9sBtGzvy3ev3wr2q+vu9mbAeWrBPdMm5es2bl6c/9nXv/izr77eJWMWKNZ3seG4PyRo4Tc+/fDnP/8lid7cvw3R29BuD9kcuuyx8AU3NhxxgD/97rPf/RufNKs2JuBxzIfby/7xR88/CUjT8c1++1aCrp+8eP78+Zef/cn1ejMPd6/fvXvSTGu8uZO7b25et3bx7m53HNLm6SVqXoGcF/3ND5+8Ob5Zn+Pf+eu/9X/9B19+8vTZv/6T5y+//bbtmn7THt+8XEUPj6/nEVZhc1+Ge58OlFFwnHI+psM+n62XCWgVEaigppkVraFw8GAT0iXKv+bi+ELKIC96TTVjVsNZXTzMH9JgfFFs/EHlV3A35WUqpBtApTCWV2KlfsLr59/cTsbnxV4My18XtR4czZY5YOYPErxTdR5VAQarxLAwkovSQYtTDul72HGRP9BPivyC0OqpIVH1mNogQ0St7iKk03awtGBq3tByecAB1EwXV5cByfeXih420NNWRPiw39YitSomaHjasBdlH8wUXP0hMtpB0YtblZy82tBO0shiW3ZDoqo5LXd4WVoNq/S3pB15lSfMAYBqr4hqoPiy9tYwU3gQDSto5MuiXD137kv+9KJ24alV4ICAvEBHVeSqUlrdxJabS7h8F5zmaiwMl1coyhUWfzwt+t3JYbbIk+A19Kr6JU9hS8vTRd93YQAAYisSkNiQTYSgijZEXsCzsiEVTEnBULOTgplrNnSY1Q0QmYw0SFgAbLeaZGdIHBDc2bCyt4hY93UMgOhoAMCgUKYcm8CMDMBEuOyU1U4IAB5QI3PDgmqagGmZdMcIVmPmkZjBoaBl0ERGVoyDnFp1yMQNkwA0wqmU4gagjlqR5mqFJjD3rGWc03AYtiUWEwIMwAiOTCRuBCC0jL3DUz5TZHImz1YfA8uKiE3btE3bc9NjbDHEpp2M7qahQJOOA5dMvc+Uct6qTe/KnU13aTxetrRqRX1mbj6+fPrv/ht/79tfvPjZT/+n4/498hiBmawGaSYydcmmRRWrHQsQCZEFWNTAHJyh5BlUq3rIDCJMwBgbtURMxMgobegbCOJUo/Cz5nke0jg4kBNJiDweBCMj5SlpHtXTPB7ci5kSMovEENx4efCQxjIhtc5S1x+jIshIRAx9aJNQYvQY1MHRDdVcGdmzIQIwmKtWrZ0YiChEBA0oqxC62MXQYV1Wiues06iayY1rKY8kWuHmKty6NQ0LFlAla4jIyxTbhoFm94DQnCagZbTYhWmcGeg4zWksqcz3h2ECYoIgkA0V7NFm9RtPzz9/dWuTXvbNRdPtder7pmnxMOylaQNiOuxgTsVyAdxvd7loevPto2eohxtJBscESSwjv2V8fXwyN9J37rmPfVTbz6kXsaNvrp6sLx5h32tgUiB18hS1CQbklLNm0FKGMWewRILjVCzIVPSr/XSl4ceXfUSLvcSmLcVlmihAHcwg0ePkTzb95Ph+f0CR3XEkgcdnZw1630d0A4QPu9XXRzi4vB39o5bYPE3FRg1TeRzCPqmmchYbBxzHSYkdMZsFhKur9av3t49vLqf3HqmbTC42zWYV04gC8PRqrXMuCokIioHqpoll0mbdvnn1NlNzuV6BzhK4TCOSjUoF7WIVCb0oHif3aL5haIIhtSLCMhxHZ5xSmue5Q1yHAGPqO1533R989hLFBht/+OTyd7rrm/vpu/vh82/vpkv98ZP1kyDaNG/HIUz5WbP+W7/xqTclrMOU590+vd0ON69uEmpAdBZQJ6KATd9QmueUNXbddtTbdMQOmwKq+vTyDEoc73c389TGdhX6RyR5Mgyx2fQ5DEA0zFMUCJpMgQC6IES4c3PD3Th0RY77Q5znJ7j+nU/+lZf2+ts3n404HXZpsPmsax+tz+4O+uZ4ZEchWm9WoSFuZdPI4Ziy00WLOQ2zAYBnkFkZJ1O1VObz9cqm4WIljx7J+zRNN+NwSB89+qSluGhES2FEbliQkRgMjAOg4FwMI3m15FLNzdNpTjF49iJYQJp5nApiiKSeCAnUTZ1Y1KAgUmjmaYRcKBgjWi5MwkJOkIu2TMOkVlwd+9h5tvO+S0Xr7qqgQ1EXxxgM3QtkZSbzPFUTjKaBuE8lxxACRjNsRNQ8qbOEaRptLhXWCIGJAYDTcQSmXKyh0IRmFULWbIiR+nmeXJUMCQgdRRCZPYM7MLmRSwggQFY6ampBAeQBECGU7JolZy8FAdjRgYwENY/MDMwkTAgKFiNXp5iYIcFQvCA6OzFPYKVkruEo4IagjI5NznPRsu5aBPSkBaSXqFYiUUnFinUkZlYICX3OJsQ4qxCwSC4ZmKQy4UCllCDghGpeFIRk3I1tR4dDGufxrOezvg0SNtcXHEMxmMoMyKGNOs/FbPaZxJWExAvMyQoQKWkIGJt2LoZRLp6vmjPj3X5O0+37Pc8iHACV0ULAqUxPVsEGnDSS07QHKiyQOiZmTEndiZRaYZ1yyrMTA+L2MDOTjE4sJRUmMivCQgw5a0SEgzYtm6OWJs3J1GZLPQMDDXNCN0RaSQDjYlxmRyBRjTGkNBU1juHhmFzP1ScoY0lzWKa9eAXmwWrRg1jNR4BIFfGopA+ju5MjAmn9SQ5Mp3Ke0M0WtMSxthqp/gBErrPHK63jda76ErWOC9tiVSeShWcidUDCApBVrQ5xc69JzzU45vS2UCsH7suE51oNGlRFqtbVcCpSoeoYdKKG6sjo+gp8aXmCKWDtTFT8BrFGWrgBES3xDIhqio5MZOZejMgFAzm7OzOhe1FQc4yAhIRo4KYuSGrOgc0MESWQAQIoOrCQ0KlGNwAmqx13esgR+j62qQoxhvVoAuhgUE0j8Je4LgLzWn1Wpt/h4YBV75U/YDe+OC5qgrQTAYGTOyKKw2Q4ZWdMl6vQRa6Hg7nU6LUKExAgmRZ/sCLUyEumJoa2DVEYAad5HqfZdInBXX6tO37v4YNTkKSfDJEPUuVS+9e3c+pwV4cjONCQHMwjW9HlkHBCCAAA5jlPQ3l0vTKzdcNmVsaR2hUR5DLnNJrraPlgOiO5Gzlk806IEYIEQi5eokMyqxYZJgqMqqaqFEMudnN3//HZ5snFxfj+FgI9Pr+Yt+MPnn/4568/FyQh3KxWjMOxKIprhjkpAoSALoUFkDEETKUExpwyKHix0ErTBUMbioWkb9/fP+rjs8uzkVnTwOd+eH+PTevSjIO9uZ3auL7bg1vGMrlZ20bywOY45xWF/TTHi1W7wlbZppTGqYvrCab74/EmjRyp6eTgRYuuuNlPc4/w9Hq1CvBbv/bs5e29IQ27ccxpO6qkYS7jWU/7cRjyECOHaXcu00dXbQR//f794/V608KT80fbd/t399O9W0twwTRlXV2cF4q7+5Ebai0d9++vrnib4fJ6U+bx/cs3z588Pzs/f/NmihPc3n9+fX45T/m4HzsMQLpayQ8//eTtF+9fvnu/3kTS0rcXo43XfHEo5qIB2HHVYQCA9+/frCP3Z+3Hjx/98ovbjG1La/Q05/Hlu9sAXR8aMXg5DF989900prOzdWYqc1qt1jbnlMuq6b7++qUWjpFbEcKWafNy+EqBH/fPMN/u3788jzIfuF93xz3pZPthGI6Hx2dXz578gJkbgW++ed3FsFmdq+J8eHsu5hb6yI8/fvH+zTf/4k//4O3unbVdd9YEgSePzmPf6b5YKYevvp5X7VhCSfjFV683OnxYdu//4saD5Ilvxq2Ad4gvNk9G8Hm32+b7/X7/9LI3E78d5kC7u7FfLRWRuaGhn+DfUoUeI3BXV1wQGj8NHD+p4eiMZFgTwarms8BEWNcaUzeDOriqTmhf2FN0hKX0qW5TrNkalS2hhVR2d1hGGXD9zJ4Gv9c1sPq2zGsKT9XCwbxm9FR9CmDJh14WD6tTC04LRm16QJ2XeOoRLKgNECGerEuARA/KNdbw7Iq3IJyw17pKa4VTCRdGtI79cKvUrCFADfmuYX0K7ohOi+fN/AT3Pkwg8Gr/c6/QDDjVZf/0BWaLVFTpqjqQ3BEAUdWcqlxS1/gaJF73+lNvAJfhZAaG9tBArbtoFeeobsqAS+A3PGh01dHmWq/josChgy23oG68BPBwuRgcTvJQ1SexPgZVkKq7rLm71nRwR/me0qlc0RJ5Dg7oROBOy7AKA3NEQjd6UAaBAKDUkCZcMr4r8OoA6k4LigoAwNFZkIRCZGZEqU0tAHEKYmqkLkIpmamSk9UIbic1gOIEIIhmSksnDIEFHBwKoDE4O5Ia1Rl/BMSMYWHJqhRkxZCIgyARMrtVnxoTESKBASMDECAxIqBLYJaEqsvDBICIzAzg6kVVzVGYEepHW9GxDbEPsQ3kDpk4OZBlt6xgyIs+SegpTWZ5yseEKSOaOVdEu/Z45SH3ChEFwBGJiN2V0R0dGd3JgpOTCAbytgkkTKHJCjH20S7SmIk0b/dcima1FoeUqUTNOY1zXpWz+P5i/QHjRRObK5Gzn/ze0yeP/+znf/Dly8+ncR8BmDwVB7XAWNDVHZzAnMmFhKgtJIaGDgWKgyLDAhQjBnLmgIEtE7qqKjMhMQEyMROWknKeS5ryPBm4WmEOsZmSEbjN0+yawYuVhACCUtOoohAaq2KNwhcOBo6mkYABmNGRDMkAYxAUBCgZCQKrZmKPgayYOxV1yIDoUisz86KFEFA4xKbr+th0DGLqYKaqKeuYsqmbGrgR1hR6W6R2rpqjIRgBJrNJvekbII1kkp10JlvMyMk05ey5xBAHsttxKkVHdQWNRB9erO/mcVIsnqZ5uG4it6GRBhTP14EFkI1bOFjGAj7oRddIpLfvbzyApiwzDC93HZeG4rAVtJXM1MamsWB6HKdj28iUjgGwoXDYgT/qzj98mvqegZMilsKuWNxsUieHEhxcXNnHlPsGAwpydxxmLX47DBq2f+2jiwZdC4ijxKaLcSxlsHxzmMuc0fB8FR9Pc4gNE143kpletH0EVnQC8qznKL9yvtkZ3h3vt6NCyQBiuaxAG/OG8KzbHM2tbw598247vJ/yUSkT7pMNtz58sf3k4tHh3diLPz2Tp1cXuz2son90faXz1spw1jd3OZnBfjwGabVoK82UXcX22SDZRbMCmEqancrVRbtWQ0Rj9hj2qmuiBnkuFsSFCUK42c8TclK46OIHF2et8m4YHl20b8fxl3fDq7vD7zy9+PHj87M2vLobX98fxjT/+Hr19HzlY95EedS2odWRkQD6tiuZaTdTYLfiVtjRihWH2wIR1wA+TNMnH74w8Xc3u+00qKpLFHGwOUYPLac8z8O0jvFR3zA2r+5HR9kecnEsqEMZWhYAq/3Wvg9N2wUo3+72QiJjXsnw5Tf/5Pd+/8f/zu/96//kpz9/Ow77aRgVb3IYcwmZYBi62EShzfrs1d0dMB8y7Q7jahU/PF8h8vvjfHN0kRViCQ1cXqxWjXRtHPapY7yO7dzPFtq2bX/3r//eP/iH/9/vpSJhEKGiGhsWoRBlLmrFFwICneoOrlAGx1iMvW9J0+xuTddwDFkzzOoO1eHrbl6MiYb9AbFFtnZNJMgtCmjKCmqDJlVw86LGTujGBDG2h6ytEJGBQIKyiTEAqZmmTKgMbqqBMURXz1LHxUrMMxQlBjfUwRSIClsI0gTSrMdpVDXISsZRQsPRwdyzmyqAEhqSGywOviWDsTBC5KDk6olA0qTgzAGB1AyEyAxKojybVPePKSDmOty1WGBxP9EogG1ogjm5ElCxpKYF2JkNjRCjAheiECqNkQFVDQF7CYzYJutI2IIrei5ghdxLMTBnQlVXB2ICYDcIBJYVtGycklpBAMHJTAMrgpEbYUMuiKs+NiHm/URDnqa9H7Hr2pL2oW+EY9LcinsrpUw+635ODWBBDREBOZsCh9AyCJQyN22zaSWA2b7kPZYpcOouus1cwCSvO3y/v8UgQI4RVk2rx6MYqHmefULNyR1ItbQxpjQHJmZ0Ky7BjYbs5BCEZqB1i+gFq/07+2zgqqqQKm9VhAAh4eBWHAkaRMoK2UwoZEt1kA0hCQE4MVAZykknWvSFBdRAxEqjLIIPLlN7zWqvUgEIlxwcImdaSl6E7yMAAB2JTsnNYCdy5XTcrznTRohMULOpEZEWvtzxROCbLZINEyECA4CBgiNiUj1x/EtNDzUkm2u1u9Dz6FTA60DfE01Tx7os7oNa6sL3c8zQaxOzzul9uAxQ0zFqmKhrrTqJ0Ovot9r0NKw5GYa1aEYwZkJwxugZsnqeh+Slj9BwSyxqnkuJIq1I9oKObRsBSgicDBAwEKqhgwEiEXqdl4tuAExkNU/USaGSKIgnZwE4VOsIouvpNp/K+Nparu1Sd1tGXS9mtO9R/EUzfLDbLbUmATkExlBjTRBYoSgekrOU80Y2UdzK1mEqYFCn6tVgdFx0uWoGIIxCXZRIiG5Zy5TSlPJDmDf+JeHyQdY0BFpcG4AANZ5pMWn499klNXRkYQ/U3T0DHIvN5K62MEt/CSuSwFfXzZA1ZQ1Bbobp1XbuNysoNs9ZVWMrt+P0dhgNjAkArF0Qr+XKaimEeN4EdxCR4zzPpXb0OatTiBO6Nfz4/GoCfbc/Qi5zybfH2+fn5wVhMj+mgsjrdcygibQByMkUcM7mlM972ojsiwIqABTA2IlEpiCFIWVLSUu222N6/dm7Hzx7tHm0uRRaXZU/+vlXOVmZc2gbw5Dy1Ld9FJzKeD8eO2rWItNwzGlmbvZ3k2Z9dHZpXkLTZ3flMvoEnJEYAEQ1TdPhOAthQjyM8+a8n//oL5qGw5kAlO0wMPN4yC8++KGX19wMl12L4nkqiHzcDhgwwir7+u0WNKf7m+kwjP3F5pAP4zCPCYJEdS/FuSfM+Ob2/jBlbsL2Zh8JPvjwh/eHw7s3d174x8+fbjZPv/j6u90878dxc92gIKkeX74kUSSWAAEAAElEQVTj+6k1WD0NanDzfnd7u73sznxMjz+8cA5f397nrOdd/+mv//rX3355P6ZwrwfzeR6mO9urAtE0q+P4a0/PfuX50//yv/wfrFDbdWcXmy/e3T17dHX26HJ48/ZivVoxNbqaSvPkgyfb9y/HlGa93ZzFUuIkTXO2/uDDq/39/X7a3cwp374nnRvGZ9eXw3Z43d0+vuadZ1ohkl1dXn/55rYZ0ryb1xd9Uv35y6+3715ngBTOV+ebb799/yxe/ujFj766/3YV4du3B6fuy5s76WIhW7949vgrevXumHF89OEnH1w9HV5/0YWCYrv7YZhSWMUx92jXY5kCSWhll3Ih2g35tBcsHyB1VTVzgHpOdVsOsgiIULtouGRdA/gSWKSuCEAoDo5UlfZSP/aMdLKOLi61RQSq4gvWUUnooFBnWlUp48E3TFxpUDotUzWs+qTuVNWAARmWHGtwICRSNwfyZTgg1qFnuASZ0UkIsdOSiG6+TCFY4BuvzKK7AbIjmishLSfkes0I1d0XbNYWwGlZ7uy0JNUYN1XTuqXVn8yLtrLERgNVQMdqJ2KBaKDm09WR92BqVP/uhoDfWwQX3QXs9B9pGZSwbGwKp+QoP8UWAakZLQPpEGCJwa5PQlXoH5TBZZV1dyQHq7jOAjsB2aIJFgSU6levt9iXmEM3dQBAAYAFya3OMiAEfmDXsAb5VfQYq9u9rv71YVwS5mrrByuYtlBYtLwWIEIGVwZehudBVTRrqHTdfKCKf5Unqxucmp32OUdyYqLIRMCBgNENwIwNwcAzESEwuqMpsbq7EaEBmLkXFfTaQUIMCxKHQLw40gmoBl4bgJb6DJI6AmDgQGYcoJgSuBBV/5ubqSmAMWJgIQB1zOZmqq7FTcEU3NBJmIjQjdwUDQHnPGfvY70NBApODG7OYiIAc24jc3EOzgIki02R0NSKa0r5OJVdsiNwLxIEo0FxBCszI4MwQkXnVKuCy0FNkV0il5QBvBEKHDqmCETICEjIUShl7WhVUMfZBy1aAMcRV0ETYc52UWw1zToO7ZsJDn37UUvn3G3YmiePfrjuHj1/+vkvfvYvb+/fqU1ESO5JJ2MCYGJG4tpaJG/IoL53ASYiUFSHECJR24igR/MijK6JVIWYAiMviCEiWFEOzFHynNXBDCAXBM/zNE+jpmzuNX+844alVlYOIEZtgbmoBYaGmImYIAKZqjqCBGF0JECMQeook2LOgdtNA6LjMXsBIjcrJaUmVJsKxhhDCG3oJTQuQRU0J1fNw5iOAxRFZ0aBuoAu8W0kHJkYXUHNs2MT1K0QqZgFLVaYRSCRLZORx6IxMiLvp3HMiQJxTps+CktO6mpSYIVoRlOIvIbifHtIEbATHI/Tat2PqrelTIGeXG5u5jIM8/uMHANE8zYMhUPXGwfzsaf2gvphmPbTAac5OFgqq3WXx2MbeHPV+llIkg86mjVI3EeQNpZRU3LQhGnuSpGEUfquffr29cvbm7fv5vn8fHVO8apvrJRv7m4+uTojg6LUSIgcTYFjvEvz25uZe5rHQwO47hvR8LjnlvGsDVpyLhxFpIteSkTaoAXqFG3SxARN8JopLEyMhArAuGnDB/1aBb95v7sZ84BwRHw7cMr5nOjZqt1gyLc8HeYX12elzOOcmBkLxCjFtBHep5lb6i/l/uaA6I7lzXZ//fhxj7JCDUGnlKJi7JAjIBuI5zxlaNumM7VVGye3ro9343y+bp+DPO/7MTt18C6PHYjOlM3+9M3h1S7/4MXFJy8u5GZ7M8yfvzusA66kaQxiK4Ml7trYxyaEgkQ3kAEwxEknIXbCfS4FEfa7lnTd9TF0f/jdZ2+HmZlS0ZzL4XArAqt1vF7h4S4fZ8sIivjDR9c/XJ29enOTGp+Y9nmep7G11Aiv+3YV+8tVq8cJzN6P7i2cNbxZ9dOh/OKnn3/19cvrZ88//uQnP/viy89fvT4gYPZV34TAcdVM4DqM0neHOTn52dk6p7R34shPnzxZz/D6drcvJQTb9E3fNR1vaplt7LyBOcOR8J/9yc+Xc0H9n75lB1+FWLR0jSAREjjgPBYC5Eh1rxQK8+SIgITDmBEpZ237FoTVmdzR0dSLlVzMQCBDVonGKlAKBoGSFciIg2YbkrMBqrk5RWfwXKYu2KOzECgOkE2YNICxm3rCSGyYIyHEuNXSBY8uqhARWXMI0QXQYkrz0TOiabCzLiKiW56TDUnBMDhEhyBEVGbXGp1yyMUBGZ2ZDS0BOHNR7QHUS1YvRUvxoq7qgSgGx4CDk6cMhRENOEkwjupIWAwIDUUBvXgdhiJEXLRhICwQQnYwN2fM7uDYIrhZoEYB3CASEZgAas5r8JhTlynMzgbkWEqmxShE4A4IXFmouU7RMkHMBYidGDJw1qIKicEIjXAyo0DSsE5D3wqW/SWHvm1e3+7e3qnrse1CjHi24vX1evRJpQhDKklc1MnELPKUTVFbZEOQNXbEPaaWfNqNecLbN0diCh6dPEudBVqGmARIdVQs65g3GMZJj6lEaNxJi2YzJ0BViRxBuOEy5nEsjKwZA4bsLi33LSPH7TCqQymAyBxom1UVWwTNjkCcKRvMCZsGI0e1XMAQPJViZsldEGveH0FtTC0lGH0/IeuBOnnASQABkFDBH5j3atpCghqE6OAEvmgjCFw5HDAisjpxxb9XpWoZrgu9siA8SEDfZwYtycf19L8k+0BlxN0AigJgwTpsBIDBCRa5hhZrAaqbAhR1BVcDAlJ3OA3OXSShZXzL6X0CuGmtU+srtqUF7d8zRoZWe7vEDpZLPSEsLTioPWRCMGcCq6H0iB1LaGKGJKuz//1/8n++fvH0H/9X/+Cf/sP/D0PluInU0jRTdBEZp3m16dCsIRaWeRwFEZgrF76QMASEgHXKSHUILOBNPaLUsh0cqWI85l7MDE4jYL43oNXe+XKIg5OTrj4CFSo6yUqLi6LeSAYI7kTAjGYgBFl9Lng3WoN21nGPYF7MLRnWIv1henU1KKCIBOm6EIQJaUp5TnnOpQ4nq6ciPz1/Vb06UUBeY9UflJ6TklVzUer7g9N9rc34etqF7JoRwZ1qSDkBPXwz8jQXEZyAX94NrkYGnkwhTcc5zWWfxsMxuxGgFTMyEAImnHJ2RGZDIiFiZzfriFZ9k1QNHZCO2c1Kv2ond5/y1eUFNe3Nbj8jHtx+5cn1OEz3k61bhqLS0N2cdsOOCSASBSanTDAhlDGbQdNKsSJERNTECMBznq6vN53DPNv9QSGE7+6L36sIrCJ+/OKsDMO7bcoKu/00QYamGJRBKbRr93bOMRuAUC44ZrlsnzK17nfG9Pb2ZsaCTeAMQ5qL5emYXQ1BSnUPNWGcMz0Kl4FfPO7f3m8fXa1gwLttnrc3sS+eUrduIBg6H2YDa/YT7+YMMLTcv9mVuQTTcBafH/C72ymbIaYcYpgFnz9+cd3py/zqow+vdvepZHx3dyxNMI5YkDHcHNLrw1Yjxiv5O/+zX/35L7+62LTrLr558+7FR8/fvb0/u5b9iF9+fh/ieYL1o8tz0PDNqy+R/PZu5EerP/nsdd9sqA9fvN89Ors4v26mMq0en92/fdt5nA9gk/30F5+3503XnL15f0cN/eavfbi9G16+fnXB4ezqMmY/buFwxIvcOfEw3SnT1fXm5v3h7eH1Jxc95+nd3Z2BN4KmeRoVu75JHNunF89+kMzevv72zc1n/9rv/OqXr392d8gfv3iyWeNk49fb+3/++S8vV327ao67okNuc8xzu32Xbrc3Hzx9fLO1rXbvpsPv/v6vv97v/8mf/8XHV6uPf/L062mfJvr5669i9A9+8GOG8T4bIRyKHvN0n6Zp2rHLdJgt9s2qffzs7MufvgMAV6uZPqblQQeqecE1gK0e+B3IVBFpOf/DMjeMWBZlBRdEtWrXtQVRSRlbEBU7Se91aa85/nVqBrjTKV9mUQpqVDYDwonpUVCo48lqhDCRqXu1xwEugUanz7ZZTWYD+suxZ8tu43V9QkRfwoC8KhlapxygG9RwNgUHqzgkLLztaWNbZAhGQCR3Q2RAqFeHFolF3WsAICySfF1nCeuKbpYFQCu1hFAHG9hC5DiBg6kjMUKdeqnLtQYFXLyA7nWdpaVnYHSKr/ZTdl4Fr+rbXH4OVXgKcJmptrClD9cP8GFhRTj9Flw0OgAkgxPFCUBLAnqVp2h5J4tNj/y00QIAAhPS8n1V/ANFpOJK1QaFigCIvJgX4WG7qqJSBWAWuzfWFJ06MgABlhETuDStsKJP7jWyqoJnwCctECuHVN9uiBgiS8QQgAWBHAiAHYBRARwiY0nKQoSeJieomyZphZncGE2LkbAw14Cl+mxUmImQjOq9Ja4JXIjgaAaBQIQBtc4LIQYiU3NnKqiODlBaIXMzlJmIwEx1tpx8MnQW8KJcX/JSMbCaGlrS3BCxG7kSsWkByhKzTFkCMuYYQs8WUZlgSajAmmZD5gDSGkUBAWMmcidFIzKwAlaAUU2LmSAAo0BwqN5/GlMhYQBEDmY8TzkGnXVGFlCHAr2sp6FYKmO+MVJIVPY4bWgs8z5JD8cR/N7w3I7NfHluL1Z8JdaehfWvfvJbT68/+dkvP3/9+Z/k6dbsQGI1EKAgSaBC5AjL9FZmZqpTRCo32MaWsA8c3GXOM4Ayibm5J9O9c2PWIbApSIg5F2SJLZupEyMRqlZlwJimuZAhAwm4Ua7h+Iik1HoIhgUgN+iEQpQBgSUSNTMERwNQIJCIIoDiTkTEhAUiu+g0FXeAgmqkxojEwhza2HRt0zsGA3cvRD6nOU9TmmY1MDXhiFBnO9bEdAM3ItJTthahR/J1S320JoCosae+seNyOIZpzgqEReekLATqYzIGniAFJicUoLmU9TpgsEzGDMOUSkMj85Fgm3OejtxIs+l8RbnAfje2zzrscBrDu/Fo1B98ynE6e96MWYFKOTpri6993k09hwyaVCng2TryWQuaGEkFZ6Pd/WGv2XXYSHtBviG7CjGV5osv7l4dvr0f9payBnFIxzyuWZo2/PzbLUL8lWcXAcizJU2mBTk8udy4+pvhuJtLH4Kl3BS4WIUOjdw4MgA6kaIDoXoR13WkWd2QDa0JWDCWog2xqmLBKRVwQCTI5YcXZ0+7sjP4i/1gGjcd/PB5ywk24XK+twZCaGnD3W7am/thd5TYnJ+tA3gebUD/drc7qrEDJb1cdVOZj7k0oKEAxxC7eMh6cIvM2SClPGLoWyqaEBCYgDGrwWSbFhnhOI3bNDWIH5/322G+O87O/Oo4br/Kz1bdo+tzJXi3H44QPr5cb7ouH8r2drpADJvO1Zjgct2uWsau6wukIRGZWbaIE5qoP3/+XNn2OWMjfRfH+505YAgTeDEetyoh8hVMGY8T7F/fPO5WHz1/Nn391X6cAoETGmASejMPz5rmBwGlDQ3JDPDFNE+qw7x/0vUdxXmmz3756vlkv/fDT3/4yQ//2z/64zweztuwbhtA7vs2mzlhYd96WnNsRF6P++NRu+2xDfLovHu/T9M0DVt8FDrD+OKDD5tG9kASuu/e3/N8nMv8V6QiFCCkueQQSCLOYyYkBhAEFobK9jrkZA5w1NIYxRpjLUzkrrO5KWAkAbeUZicCszxrA200wWI+FyeYIUltpGRdiGa2lokA2CmZljIKUaDiRcGZEadhDI5Y/OxsfcgjuOfik1Iq2Bq0gFX/Nh0ROZcJiwYEIQSWBqER1AyllGKgBrNqY94FagM0ElJ2LQnAFFzQzTSbTZrNpGUqRQ3Umc28qIJ5ywhJDc0FHRFJClqMFjsQNnSbS4Epo0fhIM7qig6VFCpqyR0YiikRiCAJm2OZy1wMzbONYsyGwmQlByfMGlPeqLaKOCnb4i8RYjVnBiYCx5ItCDKgZzU3YQQEVDetwYhUDKbiKFYUDdDIbT8bUyj77MASdoeZzeZZCWHcpUlV12RQumtR17ZBDDyPaIYOMs+AwuTmmGfLMxhJo4BTluNtySOTBCvWRFEtbcAjltnAA0YGUc9IBmpMFLQlBMcyq6rGyEYyjxMpoBuZ94GRXZVcPTJLIAzqrpZVAISZGnCgNjLmORuBoWUP7p4LAjoWZ0TmABApKHByACGG2vgF5gCIJ3PYQnoAfH/25lMdvZyzEcl9mRuy1GsnclzBwc2VEQVOcQuLJYhqjV/L7kWIqt1KX4IeysKTQ0A0RLU6gcsLuJ+m5yg4uhc9dYLBnYkREJwJpEL+8BDeYKcmoTsYE5i6OyjUTNMHon5pby8Fbf0GA1zCACrCvYgqUt8IGLhHRIhtKTzmgZbMHV+iO/301qs5omgL8tEHP5rcxA55d/ybf/vf/F/8b/83/fo8j+U//Hv/QbN9/8/+5R+7I4EQuE7HD3/4+//Wv//vz5N+/Ys//eM/+kfi0rZrm3P1E7gDkiOx2xL1AwB10syieJ1mAy2AUE2RMnIEda/Yldb+c3Vm1XEx9UR3SrQ9iXMnCc2XW/WgvwA4IVLNMjBcwjNgYX9KxndDaYX6wNKAermfLCsRwDJbCMCRADCIdG1ohMFgsjynMs85ezWAwMMTViVLP90yQsAqklX/oz/olyf1q96K0+mqAgNLCoWW5fDq6HWCG3x/HOImjNmGqbzfp1TsPMqjiw25TnPiIPNxeH2zvz0WD0HATcHAirlbIYQg7IgItI4taSFiykrsH15f5pKz28awpFyyjttjf37GwOP9Lk2TGeyP85v3hx88fgxliEHWl836DG+H4XIFQ5m3Y9qN6uYu5EEQDCeFAo2IGkUOmK1t2MgvZH5+ee7KX311Z5p3d85Nexz10UV8etGEdUzE26koOJFkS+CeDDEZw7SfDkH4crVS0rQbktFEnUjKVrqmnTWFnucpN8YIAGQEHhjV3NWJSa3sJ+81vn53yA4KOg7zKJPNeBabx08+fr292W5TMlhtzhqR42HXdHp2mc7j6vO/2AF1P/zg0TfffDOThlZakjInDB5byYc3374+Hicn3FkiAjjvqSe/uHzkWVno/c12dRGh5XxMv/z5Nw3QZWxvb7ZteHSf/O3xCK/CPPlaAgmena/Tbphn58ylzI9WqzXJ+bPnf/r5V6EN61Y+ul7t9jMVPHOcDsfnl+2zHz+f77azJnV89eqmp+C7ojk3RS/Ozt3s1XG3znLWXjSludsO+/3eDKzY/n7UPD65uiLi+8PghGnWUlxL+dGPnt3c3O9v97/y/NMujj/7i5/e77fUyZ98/vkPHj1CnY+T0zn80c8++2o3CMGk2rhftHGzitdnbborR9ueX593bfi9X/vBz7999/yCDu/f3B3IhR59cl1o/u3f/+DNZ1O6OGupGe7vCWG3m272t/f7I3YO80zuQZiDtCvep/LdV2/h9MfdzRSxZg0qEpojgqNXQuaU+I9obnXiGIEznXaQmoe2eKNqMg2alVNEDQIRMCCygdXmXFUXELnSLAx8WqlP7jC3ulZbncvjviCUlSddYraX0Wm+CA+LZwxOpOSyoC2Sui2I6wmMqYubLf+/HvRqbBKSg5lhBRTcTkSQE6KC+pKeU4OOrL6yeg3riHs1NzNYgtqMljleWL06NUeHkAyMa+wOGCIwU43cq6ugLbtm3byqdOPEaEvukSNQdcjVLdjUHas1D9TNq5frRCo9jMZ0AEcvprxsfVjZqGVVfOgoVKy4akTLt9VMq4XZWSZEEAEgMxICAxKgaqmCR60LCBbFBpHNjYkZpTYPzA3cCRkQeYGFFyTUDBbOYYHQEE6AsrqC+ZJjBMZV1ABU04quVPnLXL2yUfhw0yv0teyYgOSAp6cASIAEOVTtH4jpwU/p7q5G5MJQACliEwjVASAwuyEimJUloRxcbSYm5NqNw6VEAHQCAHYzEqLqykSGmoXjQDHWiVdeMkckZkV2BNNcvCQHQnOmUpS1YMpSDNTZEZnTMnvdmQgIldwglzw1wU0Jy/Ii3NXcDJQipVzarkGJKKF2+6hmHGZHInVnCYLgFGphQI6uWjtp6kZI6gXRGxEzpXoPEVUTADaB3eqEmuRlVGqSd8pkqE0UCQJm69Um2ziXY7GZE1K2Obtbtz+Uiznz2Xa9Uod5fWbzka47vhQK2LDJ07MP2p98crP68Mvv/vj21Z+l+Z7ZjTFrXu4AsrsCChoqGpEQSBQhIgJGq6uIBXZ1UHVHT2WAIg2tAq1i6AUbioGbBuTobiVlNQci1QxkhFnVCajybgpQR+IxESM2jRTlCQFLKYtGQwWcMYTQKTUOWXUAdLOMoCFwEEASmEoqEFCMzYtrYgQiJBZpmiaENkqLSFoj5ExNUyp5zMmI1JTqh9CcmFmWFVtV1Yp5AXZgUNAY4ipIz9KQg2okjJBWS0gL7IY5FgCbz9b9d6/e7yadppJKYvRAbg6bVpomTPtj6skBd2XkyP2FfHO/H0fbzLJGu4x0Rr7f3hyprJ7IPubccoq63jS7u91xSCmXXjiiZNLuyTqGeEg+F7UMlHJ3sYnnMZnZ+32XJp0zMwUlv5ldM5cZm9VYoITu56+/2R3tmKkAInor1ApPxykYXl01PfLnt9s/Ar8t428+vmhVYoyhpeQaez6/WinA7m53kLljDIKa0j7rJOXqag1AuWgQRDU1czdhcYeinoo3xETiMJZSBDkKCSAwFFXMyMK9QEB6lmyc9dF5c8WNReybXksq03HTrN/vp3eT7cfUQ4gQnq7WEmGc9kOxBJhBG5HWWLOyxNU6Asw+lZTVg5N76xQAik7Fwz7PMh3bIAZYss5pqnG3zVoK5AJpP07qZJOuJXiHqai7j+rfbA+pYAgcjN7fDfeH6f7q6kervo0RtficXKKInF2uP7RHf7E7hiZM+ymwbjoJjD3LmXAX4OvbV11DOZXtdjunqW+ai017mGcLcHbWvXu7Bfcnm3Xo4ryb3m2nkqenj9dws7+bEwImQiToQ2jd97e7F6vNo3W3Bf1qyiXbYHoPKSCs42rT0s2rN8P99tOf/OR//Tf/1T/52c++unlzVAhRiGgq034cUcJZGzlbFLwILenRLZdsQ57POJy3LSCnw9D23Yo8jRMACfHjjSQ3S/mvSEV9z8VcWAARDCLLMOs8OTiRMyEgaVEoCcwNBSjAPJamNUOwMufB2y6Ae3EHM66DBgx2t2MzeBdoGrO6dhxCkHkqjJ4UMrMzdSJYrCK37NiGJqkp2rqJU8p5LjammVowg2kfGMUhZQgazD07doHQ1cAEeUpZC5SSJXKj1AZumcchafE+BEDfZhNaTKrkng0L4qwlg0vkwKTqquBAal5cAyCxKJOblWyBOAqOBqjOFMhEy8zkHKgGD0tgxtIgaSZ1UDMSjEGwkLqxhAIKgClPrXAgdmasI2wBQVUUgnsLSEVFnVW5ABdr1RpiUgiERCQ1JgfJ0aQefAkZqwcfEHhB0QmRMGkhxAK+CrT8M1FOyo0o4nBIxjjYdOaQzXq3ohCACNhmyLM1s3Is+egwGWUBizmZDd6tCdmYHQSVYyo8pdQgexsmZabARseUFcrZ+rEdXwv2m6bArKLADAA0jsXKTIG7rpnAUMmEinoMEhl6iW1o55waNmoltyaI/apNngARCiDqnHNO5ox9oIu2vR9GQw0BFtQeHJFK8UOeuhiYWc3FxMwC16lQhiBgfkqxroX/qZDGBSzCk3q0ZPTUOo/woZf64Prx2jt1AHAyIEJw49PRwU8N5wUJOWk0S+0JkMHZ0eoUUAADL+B6sgQ8vEIEB0QBoOpHQ2DEmnCEC/y3BB8sOazmFVOq6cK1gnQ4zWmGxX2Epzjk+luIaq1c+9KE7szEhhRDy+FHv/2bf/vv/O24urRk/9X/8z//Z//yX1gGh0LugFTjGwzRAAISJ/+7/6t/9z/6P/6nR9JJ3/35P/+D3/6t3w9XF9vhCOV4+eTy7//H//EPfvyP/2//9/+mAdAy/43/+d/79/6j/5RoAqa//bf+5n/7n1/81//Nf7G67l98+pu/+PlnDNmGBM6aDQkZSZeiG74f3YJA1acAaGh2iqNAAAKn71mhJfsUl3gj85omfkLGHk4Hf+nxWACD7xW0k9RIAMkMAWp2QnKcErze5efnsQ20dipqe1vilYDI1ZE5NrGNITCRQ9IyTjnlbKon5mx5VBDg9BSgOQhh1QjBF+PDX44bcqiRJYuoVB/Nh6tAy4+tb6IGTiHSKd0J4M12BMXjIc1ZA8EPnl+tW+RAAXBK5TCO90M6ZnMGVwUzMyCGugi5E8eQiw5pPA9yfbEupcxzJofGmAqI6/X1uYKUouTGTSwplZzJAYrrPD0+o48eXx0O02EYLrldr+Rps9nPUCB+e7PbFQBmdgvMReLuMGuxAEiqWvTq6uzHL55dnKHt57tx0HGcEvcSr19crBTA5lnt7jDOhVyxDSEEwZ6ddLwZ2cBBg3jX0ph282hTKjnreoUfPu7vbg9Z9ayVku39eGy91QKglFRLyYyIQPM4yYYe/+hxt/LdsE1FrzZhwgNt6KOPXnz2i89utpIhTANkUKA84TjNu6vzdQD58stvtGw405vxdrcvg9vTZx2ppnE4a9offPDk5cv3sbmac5oSqtuHzy509Aaa7bv9y7vXH3/8QS8IJR3uExC50vYA2/ttE2TVxOE4XZ099pFtSKSwvR2eXUOzgve3tx9+eHl7d9zv78ouvd2+uwodxQZ8fPf2PSSnIjbF3/34d59cPRXEvd3Juvmn/8P/2DcdK0UNa2kgNl0Tsus3r1//2q/+6HZ7+PDTj797e3c3HD68XEUK2bzDLrJwlGG+f3y+uZvGp5u8XjfPL6mn2Dw7f3Eedjef47wt084pq4785Ox4+KbdbN6O48HnuUxX1+tPr5/tb+/GNF2vQsR5d789anM8+A8vL1598zqmAe7i/dvpbvJ/5e/+9gw6HO32s920y7e3+7/x65/uj8eXr7Z3u4PH0q19P093x33fb9ruLOnduL99dHa5Wa1ewrYuD1Xmr5bjSAAIWrlXR69cD6K7s3udwe6AtAwJr6PLvObv1tWmGn6IxCspVJ1J9XBP5AhVbV5mTvqCMEHN2Dkt1u5GUM1WeNKSHZZBYQin8JmqvdjiP7NlYaBlXVt+0iI/Ux2ttsgOpghgyzBPcjWkOu8MwEGrwQnqFlP7BuS+5PWcGi3KizPOzay6SnzZY81Nwcxd/TTD6zSKYEnHWboMtNCbJzEEKgJT+zvgToSmRojIAmZFC1aPNi0+NaSqqiyLt5lWcd1PGsbiBl6wIKhH3EpOVdpF3a0mEZ7GpYH7yTu8bMr+INzYshIvuovXUeUAJ763rr1gyy+tYmEN3FkGM3md5c6AbriYGd0rWVVvG1jdnB4oZzPHWv2RfU+FKXr1JmLdFf4ykuqA6jUO0+jkg65cElaWzRCJH/YCFGcGZmQhJiAhQHaz+jPM61U0R2uWu45qjBzIEdyCBGFU0ykXOr2dxdCv5MxeH3FQkiUPnoiQGQCkYvFRIESkJSe6DrphZEIDKzlZgIJBCciLaSpclMHMFB0bWZL6CMBMnbAYZc8KDSMQOBFZKQxGdbJ9JCsaY+sUmYRDRKy0lxEzOC0fFHdXQxbiAKCghhIQAKya+BRMT+Hm7gjk3jZSchaRNBsRuRlFToigOUAMSGaFhXNWAlo3ayvHeTh4Ka0YRLQtxHUzvhvvh1nX+3y0Upr1WnYYMCC2sZWGFNdBNp/++vmHj77+/Mm7r//0fvtWYYqs5FhMa7YGgJtlcmMiBGYh5sgkZuTkaIWwqGmZM0BhRWYFn7IOnqiJm7BqChi3vee5DMeSDZxmYkQydFUVzQWKIxTS5XiDjmBMKOAiRNTZPKlpi9QIGwhiIGmcG3D2qMn3pUxRObTsnpsOiwkCNAE9axqsGIOJA4YQmhhFhJDRTcAKWtZSDEyCu7q5MxcACZFCi1rclRnMzVXNMgMVVwwxdFEAg3oEAEZ2mBHbE1vHodsNc0nzIcP9MR/3yZiSGjIWR3DABMm8a2PxOFvhrvOigigsAbQFvGi7p+dXUzZnn9V3h3HPKQ3QuD6OKz+6DSixXe1y6Jn69VhcGK8+uspn3bA9rjYNEJlwPszlPo13+bAbecpPLtfXsYvWdnz9/m16PYxvykFd0C27IiM5zAaeLAR5FJqnIZ4TrZ5e/+E0/f++fP/Z+92nm4sfPTvb9F0pMIwZI5+dNx/545/fvaPYnHexa2h3C+pYSuHY5ZIVXCRGDDkXYBE0wwKOAobkKXDrguSqJRE6k2QWkpvDhETHeWzUnl627Dnt95eX50e9H3JumM86fHkYtwUpxjXDeJi+fP0+iFxvNiuj4zS+sSaNKQhL0wzoFMXmNGXvGXbTLMBd059LE6U6Jn2ekxgYR0Cd80gSFeDrwzROqQR+O80th/fb+8t23bZyddHeDfloME6jR3ty1T/ayDfv9OVu/OL1Ow3dxUdXqODkzfmVMaHED548uU9vwxHWmwCixzKDwTzNjz752IK/2e0HtQbRDNkpF9wd9FDKfpx303TRtg3acBzbxqjhweGb3f0w4KN114gUal+Pkwi2iKtS1t36OBWze7Pyg7X01E7Zwqbrerk9TAKsDiXnP//zP/v4+dXf+eu//vn95X//z39xdIxFDUUodhJU8/24TS6zgWPMZpq1EYoBPOeWaR6OSfpp3Et3vmpbtaFt1Lp89+7mr0hF7qUTVsMx+VwgZ8vFg0hRK2UZs13cmYgM0MgSTKOKIDcE7p61EqfIxZMyOjnkbGnOrhzIhKUYbg8aBTl4EyC2bSICVwdgCloKuHVNU0YbzEGXA1hAmpASAIscvVyH0LAdx+yKobal1Av4cZw6ia7uQCoMwtmVsJCTgQtJIGsEGBTB0Hya5qYN05CQBJFaBiIMaE4wa02lBDMzpAY5zxkdWmZw06ISaNV1ZS4+aTDj6E2k4paKogdyCeTIULIRVUdykSYeSh4LOELDyIpqljQTx+TmDIgowDGXtqRm1h4Ic4lAqB6JIjAatK0g1R4fIrqx1XYhuLUBmVjV6vxWUwdCYjC3jtEBpFYmCGYWEUGCKhSziDyoCnHfCpXSRBuTWXES4BXHVqwUtSQUCWMirkNvG3adZ2oyLqoDSAhqhISxaclxUoc5IwI7DIftKmHCI4BG5MM8Q4gNoBsSt6kYW7VDQzZoG+FCbqhF+74pmYFBXbVoZDbM7CrMSQ0RyAANHWh7yACqmIWZiOrQqKYJkbmYmqMQmUIAatvOS5mn7ADFwNwlkOqDGHDSYmrT7eQ9swdB6NR9xUUtAoBqxYITwYEG4AYMiyFLzWlxLHj9cQQL61GFptqjhkUeAvM6ZcwLVoRmSdqsr6rqH4xAsKhFNXiRTxS9g1dUpA6uWdSimgC9eLFqu7lGHQPjaUgP1mCF5Z+XSc9g4ECMaMbgqyjr6P/J/+n/0H70Yw308jA9u3z8t/7tf+Nf/NmfLAOBliAMMgQldoaryFdOVzwXOxy3+/Or+Pu/91ue7HhzB8l6FjtM7uF/+R/8vevrzT/6f/0/tofh7//v/sM5jMdxbBjXj67+7b//7z39tYvrj35t9ez3//hf/LN/+J/9X/b3X6x5nbmGgtGSswq1Eq9lNdULam4L/eU1gNzRsI5jQKi5sOAAWnv17sUrjQdY407rGcUdKtC/TK5ZUpvwhB2hO7k5EQFqjRKF6gyA4+xv9/n5RtpIG/VsNmZQJURg5ijcBo5CDj6lPM15rvlEfsLbYCGGALBmcjgCOZycGt9LiIvc+FCgnri1qiYth0V/OBL6EpJb8Qc/SYYAAFCKEjRAyIgfXKx+9PwRE+4PEwjd7Idfvr6/mQpHMihFMxjWzAgwc/CsxQoQUIjSd+2cC7iHLia187798Ozifti3XbMfVM1vt4Ptj20Xe3NTbVZNMv767e1HT89ePL28uaM+eGyb2G6GqZvG44vLZgK73Y+Q8fXNeDO5G6l6DNQGujxfPb84287F6HxUvzseqVltzmJztWk3q7Lfb9Yr19xJxzznfNvHAOb/f67+LNayZUvPw0YTEbNZ3W6zPXn6c/t7qy6rb0hCpEVaFCS5tyzYgN5sA7YBv9uGX+wnG7ABQ36zAQECrAamZFEEKdIkJbKqblWxbrFuU+fee9o8ebLd7epmExFjDD/EXHmKTuA0yNw799p7rjkj4h////0qbr23fghzkAozBTqqQ9fBbuwJIMYxWewgDCRRYj3EHLHyzbBVUyXnGVk1I5JnWM14dcqrZSbJSe3u8d0YdbE6GdL+iy8/ARly8qGZ3zua7+K18wSE82a1vo37dWY4Wh3NtJPjpnq13w1R54sW8zCMNeXw9Nl+vwfgGJg9+RxQgW43m6uXny9mZ8dn90I7e/bs07pGddT1qR90O2K9qN9+8ObL51+0M39+fvTFs8uA3Kti3a73m/NFMz876dCak8WDd+7tXuF+u6uOVn4xa5fh6Uefox/efes+3S6Ql5eXKePYIT7+/Akczcz8yeKB7POd9vTpzZM8RjfzD05Pbq9uyaS/fCw3u7fOz+Z1vdl36mCMXPlw3fdDTvdax3Lzy+82771zcvNs/9nPdg/fefTsy1dK4D2pjSeLpvHzy6GnY5yfio/u0yf773/9PR21VfvpZy/mp0cPv/Ho6snnzaLG9uQbj9588fOPBu/vvv/gjfOTn/34s/Vm+9mTm+9+79GYryLlV/V69d3l83xhrjt+1Lz6+NpkzN268bP77Wod7WY/ksqjO5Wm3U79a1kYiwEQabJdAiJgVvlKaClakeQSHDo8jDIQl1HN4Z7UCRZd7repUQyQQIuBA9EQoIgGk2kFjHhKKYMBgmrRuK3oFFoshYCMaGqHm1gBiBBESw3W4WXCa5uIgiIilCdVWbIYqYCwsfQTlNEbAIErdQumYmaCjJP6jFbsRa+/bSgHfjMVAJj+jSRFP5vsjMal9G2q36TJyYil2V1xym1TUWsMhBAYUQGEkIzMQBGyTeMWomKYFDXFhtKQDEFViElLFBmpPNjNoITP1AQOebbibiKaXJgIyFPwG02lfA0DRfsqsVUI5BO+SKenbpFnCqW8WHmJsPyQy1NVyxpfLnxZaA/P90Os2aZBjYGqaKnROEw9dNpjCJTqMcMDMa/4e0ERxdSsUI6s4LEOvaWGJoxFh0ICAgNFPtCvilAGX5mnzA6Iq6+Wn2KEQgLi17cCiaqaAJoQigiglQQYc2D0AN6MCcx5VzQCDElyzmlQVDAhIGLKWQFxMjohIgEIln5ZRHJIzpjYAzvwbJIysKgiO3ZOIYOhCaiBxaRYFkgEZGJENTIEFT2kBcvabUxJLUp2rlZy01JtoIbkvaQMTAbkmRwhQ5GnkBELy8k7J+ao0OdVzUYEcFQcXozOFX0UnQdCAmVyqmoWCYkMBViBUswOzSyhJsBEKGo4ZnAAHl223FYLEem2Y7ffbW1YLivtxxBnuFhkS10cMLPCNg8Ex8HaBrhdCi2CDygGdLS8U339t9++984Xn/zo88sP18OVlGmxZIBsxABkSZnJOULksnFTMgMxkzwlKdAEWEQxJt670Lgwq9o6hEYQEDkTkWS0FPtkYmjASIxoBAUmpYJsXDsyg3LDA6ILXoaUjdgs5xIUqCg0vl4lAtJKeAAd0Y3o0WjKdxM5XzFgaeVwMBh6j0RITogA0AOCSZaoKSEykHM1kdcqi4I3MUKH5ckIGGMyyWbKCMW0GIJzhN4xMRWsOwC6ie4PAIDe31kddbeb9bjzTQjRouh0NFADxJgVTTzZ6VH75OXNAPjg7rzb7Y/JX/XDYjGbVY4Y1118Og5Px/X5+WzRhi5F3cXnuxh3yZO3ZCpeXTUPp/3YeQgUElYyP3PtqtIM4JjEOMHlZZfUMaCO/riuZoHmrnaKtzZiF9VyxRSHjEoi4D1DEnY0J6ozesAlwXvUZoDbIf9Bf/XRuHv3zuLNk5kPYIDUuNOwWOp4ebU7r9ArZUnbpGMa8zHPAxMXmB6hUYyjqjYOEuAw7irywXmYzKoumWZ2MeaUdSNECG5eNabY1hqaXtNxaG3YLWdVTvr45Yur0ZJQzeQqJz3d9nlIeUxwPK9+9517X97En37xDBwNoM7R9aargmdy2zGPoyw8tYB13TLoKNBU2IsA5/WmZwcZ3cWr/tFqBSZf5t3WuDpq3Sh3Tk7TaNshZTNDOm7D0nuJeb0ZWu+//tYDurjqI2y7+HJM989Prq6vHpyd+qZKg8x9OA4BQ7Qundw5/vLqgtQ5covKP7u8SoBtW1UItfOVc+goSW6IUe1+O1NUGyQQq1lMaUSrVo2KPb3ZI/mmpqO6UbBuvVUHykPOmo1uc9bAFgDAMuSbIW32Y1Ztg/f1bDPsfvH55vmrp7/1l3/p7X/jX/1nv/+nN5tNj7nxDM4oaRPqJNbUs4z+xc1t1+1jkpSEAdjlk6MlGHTr3bgzqdBgePnyWUNK47/sKgrBa7KctY8WnItZkTkV3zKgQzIpnDYUQzOQRJ3lqgUCiDkrs6mRKBlpjJIVuQJBZBfNdkNunJnhKNgqzTn0Q/Ykns0sIYAxiEJWYpiWjCFlSxYl166aN3wTNSIkgdoMsyXJhs6brergCHYDZHCdOrAsCKGpzWOmbAKqGpxI4pgSENeenPPjfhTkUVkNHTGKeADHKDmaQc1l8iOtD2wECMwUx6yode0c8whiqp7YPDprkITUjMQ5b2CiWQENjJkc0swRgiTphZIyokEGdpXLAIhcISBlB5BirBM1Kbsh1wIB1KkFVMeMAp7JOfA1gQciR2JmkkWIiJnJgVNz7NSDiRCBCk8MRVPHJAbZQAGZgMgd5ApFZs/IZglgzHpGvCTsFMyhtZRa4LnriEZh9syOiGTX7bPYrGKFVNU+AyAaV24XUxMq4Tpla099lWXcRBKac5MERavUj1jNRsIOY6ONKo1xqFyomHNSGKVta8joXPBsQJUPPlhJY1GKUaeGGRWBlDSLBdeQScyDmma12pOKsffeoREktdfH49r7edOAMQPF/VA1tYWqG5OYYUAFdRX8S7/w4DoBELOp7PagFr12mUwHCZu4wTr1TE3uD5m4oUBa5s/T5x4MS+VvmNDXVP5wAleUJuGyGUUqJ4Sp7MoYEdGYkMkOA2k8fMpBKSBVIEUsPHWZklNm02JZ4J1woNyU7fIUlmBGZicixMjEJlMkjT03TZsG/eZv/cb8wcPc1OToTphR4KPT5fnZ4sVmY+iApfCSiCExn1b5N8/rr7XH649+9OJHf3jva9+4ef54vdsu6lPgOvZdO1+Zp4T6Ig4f/KVfevDg7rNnzxMvfAhLRkTcjoPVs+/+xl9PGTIP3/je19/93/4f/vZ/8H//5I9/iGVDo4p0IECVThmEbMKHMl01IyuUDZzgDnjYoRech5VGNFUwLoN5K9fEDkdGnDxj04AbJhIQGgIms2oyCUz76zLfLRAKMVxHdZ2czKitIBqowWhQQCTsS7FITqb9MKQkh5xIibegHd6Ok7vNDuBqA4Ap8vcvkbS+8oghTPRysDJ+nsSnKbGHDAeHAQBOvrPya9iNpOpY7t1bfeOte20726UhZovZPrvZPu1iFgJImifZEcCSCBGTKTPVzpFYQN/UjYxx141ChAGSdD3GxckqCVUNhpqvb26jQVjNz1aL+OzqdNYS8ybqs+uOrE7DwIsFMZoaCAYOnpx1+5MK6znZ6PsuAfNtVqkC1fz2m/eRYVm3PD+5fHp5s98BETfEc7rt10GjCp/dfzTEft4PqxYaz1fX/WfPtn0yU5ov5+/fP3p2+2rYpWE3VADzqtqRjUkvnndRspoQ+9ur/ThkEQzeQQWhYh90Fqhe+PtvLCD1o+TxpnMWLObYbYXp6HTpYIjeifm+yzc3N1yTRbdbj4tmjtsQqnbd58EPD85Wzy42QOCQ9+s9WpxVfHpy8vj5bYpVU4Wqbu+en3728tOnL3ayHR+9+ehWKn9yOlDX3J3XjrcDPvvixXI+ny84OHp69RQojyM8+eISBMi7Xb9bPji+86heX+zYz9br3vummh9pDffm7cnJ6RefvXh+sd3sbB7q5fz4dojdfn3n4ZtW55/9ix9v+lskGfu0jZt51Sbh2yH3vd6bra7Szcvr9YPFLOh25haiTZ8ojfVo6a2Hbz+/Wp+dHFm1HGD3V37n23fw2cX11cW2GegNgONqEbphk/p03K6+/vBXfvTpT7Zw+83vvPsnjz8FXu75+PnLsd+/fP/e+bc/ePeLq/RHP3zS1Mmpbi5fQtpF0aEKQ+wuPrryC//og7sQQnP/zmD9uuuOHxy/+OL69/7oo689eP/B+2946r776AG+uv29/+pDJY4ZLuL+7TeOv/sb9z/9xafkl/BHV1BqAeDQAIlomtUU2ZUoJ04gGARAKj3rB1GXJlIdvI70TtFlnM7eX3n9iieRUQ7+xNcnfENQO9R2TVOI4h48BG6tOCBLH/hr+0JRQowQpKSMqUSm1GzStnFCJpWlq3DQClMZdcpiFTQbZMmkgEylCI4ObQhmiGVaUSYT05QCDvoM2QFZbGpUmgRUHBCoIpb5BU1KDVIhdytSWRa1iBiEKghMRREAUePJAlki+FGzJ2YFchSaOdTBNf1+11mGZKBmVIQzAkJWycwuHtAk5jCJqCkSlDI7dgwgakiEU1sblFIwU5DJhVq+ezUrpWzFsjoxfwARRLXQeSZLKBEhCVr5WxCVpidzYRkyWEH+lgiYIdJhk3BoSyMsTW9gxlgupOA0xpqWAbWCznq9KqhaBiMEKiV1ZbNHxQkFkzTz/4ewK4vbZLGF0nQw0RudI3boAyEaM2FxZJgCGpAlzYOxIQRWQGAEdoRccExlgMUACCDOEzIpm5pYjmoGoMYoVgKAOOUQffk8QkByzkMwrCa3GpMBiioQmKkjADKTkbgSRTJDc6VcmNABkaoQmSRziGIZEDxjkqSCKAzKqgHNRIqwYQYcRTKSErGrQE1FzGnpdDczUVUFBUbyyC6DMgIaOCYGSKZsRVSzLEIQjJEQsNT/GWrhljI57wGdFeQ5kABozqYYBAhQsiGB52VbjftbFKW9SqCqi0QD4xDweOY8RSTNBNZh3pJVyKLZmrBwrhFlX9eVf+Mby6Pj20e/+OiPX62fRekNIZlqViJ24COwGamC9wagWH5cKiknMTQrnkSwDLLvfZV97Zu2Cb4eFbJoTqMhZVAjS6oxJdGYISeTCXqFnA3F2ACpJPHKe7FyYJXkPBAQVJ5bpNqHtvZs1iQcTAbAHjCZZSBKkgQMXSBDMatd7b0X9ISMyAYkIgaaJWpOkkXBDMl5Bw5cRVnZokzW0JSyJDVL09YOFTCEmhwDqmAyXxuBJkM0DujCdBcM25tHd8/eeePuLz7+Yu2uJUqOpmM0VWRqKi9G3vNi6UbL26TbmOcqXLmbdb4Z0/1V8M59ebHbEe1E6vl82+u+26Y+5t4k1y43CpkRzTmJ1X7be++AfBc5y1iHeh993O4CJuk1RbzZ9Ca2CNWzm6Hq4f5Z2MEWBeeVtVlz1NGm0a+xERMTL+fVceUaowBuf3n11nJ+3vK65p9s48X19mYzbt7Ud4/bwBRl9I7vnZ6ct6ujJMfz+rPt82dx53s013/9/JQgApo6MmVmYVQIOOTk6waNBRRUEYkcMaq6YLXu0pjns/2wdahGsOm3u72t6qZ7vlvquFyEqHo72PVOQk37nNejqGb0VYzjXl2IEG7y3WZ2/s13DfHy5lYR16Haad4CMLvhdlgwt22tDaVBYxYC9hVf9EM5XHWGrnFfe+veg0d3/4//yX92remkqZzS0fFsiGm3tzErOTCEt++/cX19ddvv99vRst0/Wq1jfDHGz9c3R7U7Wywljn5eOQfbsQuBuMpc6QhDrkJ3sX/7eIVMXUquclnymLN3Vetx0TYXm61z9KBdmElWCPPGu+qm2y+XfhxlHfuK6qPV0e1+uOg6Zc8e27YBsahyMq+uVdfkouTttpNsPCoFn0FHNudhnTtLItm+3Izb/+pH3/vuu/+N3/rgssM/+MXj5y9edIYzQXK+bXzO5mR8cFTtWtr3Q8pGdRUbX5/docwht7eX60GiwVb67W23W86bf0kq0qwMftTkGdDUMSopqRIZM1fO0Cgm0TKNR1ZRRpAuCxEETikLkqTsWTwVWDCAaOMocvKISJhEkgoqqqSqorSLyEiWAY0qZHaGDEbMGjCrgCB5F2YYlGXENIBlhKyQQGYVO5BTX5EpsWs8gaIYZICG0UPGnJ3n25gInUVhUw/Q5Uy1U8vBAyFs+p7YNb6umMSyJHU+iIpDQzQTIypTHogxMRE5EhXveea8SA6+AiRII3kaZQQq7DwFwGQ5OKcJ0CQpEBnVGAiJweJUl+6QIWqMPVmqhJYDNF2aZfCCrQJnbbxjEyZkR57RBUA0QUJQx1iQ9VPISI0dUxEmCBUQyRBYVRmmXiou24msYuB8SXsyIeUsgdEhEkBNnBCWNY5m2ISNy1Glca71YcwWLc8brn01aB5jv5q37FzKEMdRQT3CvtuGylCk9gOpIomhqwOlznIaZ54b5l5T7dmDgUpTB2colhdUuTo4T6Yax5G895VHnSiCOQ+Nc0JsIESQQCBrIGJA9s57NFVEdYTe+aKBGFrJIuUsWTIY7hh9Vc19WLram7be9ymoWtQ8pJRMXotE02kcD0Gtryz3026xWIoIgQkRsASWrPzIp52XHcCaRaoA1QLXnGwbpUxsGj1OJh4s9TYIwIdYkSt7ejKC0gaDfMjETeEnKFZ6K8G3kqsUKQqFqUEutcw2BQAKI2NSOIo2ZJMKQQZc/PAEYOCZAQEUfPBAKICSDLBtz95465u/vR812Xo1m0GvgNHf3p4xaeONMbnYzGrNfP9k9v33v9PcfHreX58E3DSUP/9Z3/pmuVjntm1X6CvnGmQah6HvB+zTclavTt9Y3L0fbZOkll5ZeR8jBXfsEcm+fPzSLVfRVb/1b/87n//5T4f1mMnQALMiAZEzBBAhJCqgUzAsMrcpIhU7gHwVFywJwRLfgAOZAtSMEXXKch6MOTiNgF//6OH1/vz1/h+xFFaXd07pJgMEy7AZhBGOG1oEALUbg1FKHkDMIIv2Q0pSdjmAOEX4/oJ96eAsmywG+PqgYlPw5LURaTq3FMwCwgTsKLbQclwsatFkbjh8gwBfYa0dUg18Pg8fvP1gdVTvt1tjNuZffP7so+ttzkCkkjIalr4/UyDQJgTMNq8bx0SoDNDtOhV1TBEMTG+7cRRvXPX7sWJ3crI8Op4juy8urkihZbYhgU/ooNukW/HnRyvphYwgEFMFiin2rgjAQ3YRGvaD2dGssdp5T1d91zTUID1/9uzqep3HPFstdr3Y1c5VeRboxIfuerPXyKRm9vTl9X6XYoqO2Jyr21rBZu2MHPVJgsLmpq8WMwaI3WCgKcbrNMCgrer82L/3/mLXDR0O733z/n7TJTGzfth15kI7n2vWqvabXa5cuwCfLJlvLp9vvCfNOW2Uapq7JnaGQH1KyeLd0/b+vfmLL15Vrq5qd7Ssb28GF/jlzcv9mAwrGm3sh37YMWfvfG7c2w/vdMJ///f/8PvfelS3+PTLL3Vs7sxnppb3/XxVS5LsfBwFMyHg6dn8zp2jTd6mV7uFC4vVLMRIVZv2ud/J+ZEf1xvQ/OrZDTBWrv7oRz8/vX/v6MHdi93l5z9/nG6uuY/zozam3fbm8t6b7yzObD66s7PZALFtsV+bwfhLv/Tt3/v9z+pgIPuKnCheX9/UjDOwbtTq+Lhp3vCb+Mmf/HwXz+bV8e2ms0off/H8DJyM8aPHP5L0ytmNju3IuIu71Z35G6G9umh8iF97652bP/jU4axb922lX39v9fEnL2fLs6Oz1fPHL7EHN3fv3Js/ef7sH/4Hn7HS9c26beTh2fl3V3e67e7l8+dmu88//yRsIap02yiMq8oHhh//2WcP7501dXO4vV9Lr+VcQdORHAHACrNfD3nUcnfhpP5QgRabvTZs0nTnlTu0xPOh2JLUDt4cO6wY8BUZp0wM9PWSoqqT3QlKxsXoYA4pXwrMtGgCYGJiVv5XjV5LW6ZaRuITtc10ovUUc4cdGG6K5gDQymLHVnw5gKrCyI4mTrTqdKjUAgAq3lZA1cPEpTxCVcEsSQYAxfIlylpppsaEIEZYjFRkCgSUyzNaxDNnUwRSMBQ0yc4BgwRfxd79ld/919/64Nuvrj/9B//k74x913eRSaqAsU8CmKKqMpL3hN7BmKMmDc4ZWVYhxwpAB5rg9C0ha3FoIRz+ZBLwihgDqoeLZFh+B8wMNB+cyMUahqpqxowApSCXAOmrejIwKTmg8n7TUhc6WccQTW1y7hjAVKInOEUMpwe+4sFPOr15SqbMphAbEgCJalH0Dr5kOqyA0ztWS54Oy+SB4C+MDYriOXm9oCQmubTPAWkplULVnBOTMXuQiERlTMNIwA7RsiqhAhmhWvlEERFQQQQina64AgCjgTk0JjIslMlqIoOxCAgCZjNTI3JEjp2ZIig45CwCpXwtm6JlyWSAjIZkogYmaqIqJsjl56VFsRKdfkDEDrVslxKiB42gsViTQcSSgBojJQXT8jzgipwHdoA08cWyAnj0gCXHl8HMM6ohslXO64jKWLtWxaNVhDWYI5QsMQsaZjPHSK0LOFvs97th7CyqBQrsnKhuU0SWThdnx5JhkHE33GJmt0DJMGatQ1IAX9eeKNThjTtvnVbh82e/+OTJL27iTUqCDjRnNUKADKo2ipIjQp0s8GAkWeKYLAmYkVPfzOv2aD5f1VWN5GCMksYUx27fDWnQmPbDgKLZUrIspqCFdy9IbFrsN0bkFUxNHKIxaDZSVkDwzi9abAIxWYRAlQ+L0eJom6QSRYss48vW1xMiVaFRnJuo5igpEZCAxRjRFIhMNARP6FWMDTmZsqkomMbyuEYwMOcmuAKjOjQ0zIopqxEHqlTVMJObgqc3l7ef2yfvvf32e+f3vxzjbrxFI3YMikTESJDzYhWWp3V0kEFq7wK4lDX23fnpbHFUJ1ZgS2O0LIQ4xpz7cRzNTENA74BznjVtCG7Ifeqt4kUg31RVHvusnRMJLBrFE6dxcGqjQEypQUekR87d3G6Xs3mq+Om6c0YDQFV7j5zG5Iha72bIMyROsUtpMT8WGdteWudHoufOfxn1k+eXcfRfO11ULsyq0FYhVfSwuvvjzz/7aL/O3iDreH15ujh54BhSImLnSQUz4j7DrSijzZjBJHDo01CeVJp7kLhoq4sI0HKSOAtuvRv7ZGwYcprXBGnc7/pt18/b+ngVUoIcET1gW401gyoZhKapKq9Z67puVmdi8Vtvnu9y/uRy+3K9PzldPKy9JSE056l2Dsh6y4mFHW409+zefnjn7t2lI/rv/dXf/X//yZ8GkLbhoesN1TOE0CSwq5stDS/ffHBKppltMw7zEDRa09Z9Sj/89MlvvP9OM4yw65WrmK1etCuw0TYZskomAjLa9f0oeYi5G0dSqYOTpDf9uo85BPbZmKye1SnG3X6nSJJ14d3Qxy72auO95dHVRtZgzmFgbM1B0lVVv9pvbmJ2BAzgqXQUKpg4xDHmXrJFGUYT4Rxv0+7Pr+8uvvad7/z3/9rvfvzp83/0x/9i22VzdtzConX7zbppWkbnJGCFiUCyXlxcLdpV7G9XrdUybDcbk1HJlA9jg/KfOKqKaDYmTprYu1ntJq4tIUA2NSEEE1TwaCYEyGmUZgYAIMkkgYxkBBhK6QTHMbEBTchG8shCkADGDM7QMSBqIPSOLYpzqCCAqKIIPK+qTdaZ8w1QTHnFvAzcmwIDC1dOSPLC4T7JmJNDZBUxa4gXoRq0I4OkKkSbbDP0M/beADUTYstEhmPK3gdF3KtmBqcKJkBYwvQOuQ5NymYCgZlDyKU+FEkUU5LgawTOqQcVEmOP6LJqTmN0jHPnCCUTScJk5DwxgINEat6xIcVY0voESWrVOuX5CG3G1pAJHVLD3jF4JCAOQGBKZVKjU18smZGiiooCiEK2sqlBM+9cytkRIjJMzSVWZA4tjl+yCinFhFOcnYyBAU0MTZ2nxtGgsnRuizAqIlBQM0Xnoa0dDsLk/Mg6YuU8ZBGzMcVZFXIcwTR3UDtuHHYxdkNC84TRO7/Z7marmRmnOLjgHQbOYGKLJrBQJqxY2HmVRKhAEBxV5uPQtc5vcyZTYVPJhbQQUzK0bAkAkoxMVRNCTuLZYUBJlhOiJ+9pSLIfsgfMKqdtXSkkTYTQNLyqZpshDiJfbYzwdfZnimodJKQCksBiJio7Mpo26HDQFYoxGQqPpmzwDUABRYGnDt4SgTowDw7y0fQCJrYkIKIvdh5AnHpLoFjh6YAj0qmbAyY6tpoCiIGISkF7Hvwn5XtiKzVpBR5Rjhw0TaEBgQgQmR1pVqCaPaZsfZpVxOSqph72m7c/uPMrv/uXdtefz+p2pvTi8jKPg9vmf+1Xfot+TXPMaxnu/9KvxBya2yt4/OWqWZKixHGO0H/80aub6/u/8a88ePCBVb5aVPUij5ttRey4AsYQquHmdrx+fnv95emDhzQ/djxbNk4g5D4ByYPjeoT8+Gb48smznFOFFjhEjaZySA2ydyiqKkZEhmg2TalLEM3UmChJLjU5ZVR7QIZPV/91agTBBEC/QkaXS3/wAxyu2VQ6U64lUckPApmJcRkKIyaB21EJceap9ZJUOFlSTllEVFRFxKb24+mwQJPEB69/FSdTEfO0cFunk4wSvjYb4OQUEGWaTGtEVHAZ05H2YKt2UA6i01v+NXljOW/fOjn62jt3b/bDMCTHLhl8/Oz5JxfXozhSzaIohlgYtAYM3jtQrYlr5hxTcFwHN8Z4vJzNq+qL201vuZk3xE4isiCS2+8Gx8icdIj7Ic+bxhTbxh2fttcX2103nLbt0Z2GSffjgEhVFYB0ve9veru97tOe2llYLOpZ7XGGF1t9+ur2zunqsrt9crup0S+PKw3sMkC25gjzOFxePevEv+piDkGHLnZ9w85VbohCzqrg1vtNJkbLtXPeVwE8Vh4DbtddqOdOIMXBeTw/nudZn2fDrNaTdnFzc+sye/T9pnNi7bJddzdZ7XJ7e3LU1C4Ez08+3xrgbFb5ykILq3l7c9ttd6kD5xyNOZK3WY1PnnyxWlb9oFRBP+ybxp09XO12yXhPPtxe9s78zfXuzoP59naoPA67/smL66+9ec4qoXHOV5Wb7Xa2Gfqj47A889TOX0S7d3f+4vnNYrVU1RTXKW0kz6Rxw3Y9r8MgWDXVnXeOeLj9yY9/QQxnp0fOex8gbgWq6qMvP9YNDreXVUyUTTZbjno0azbr69vt1fJkcbHejzkC6Zwq7fs///ynGIyz1ceL6nj1+ZdfhHnlRLcXL07efFAdne1e3v7iD5/P7f0kfOfBnb7fbIb9veW8HZQgdnH3zQ8eDvuAo+rNxTfeOHr3wYg7/PSx5oo/+eGfNcvZi4uL2832rTvhBIZF7WTfi1vDEPuB47i+0THtBs7cX2/PZvQrb7314w9fxtGU4cvH+1/6/oMvvvjFg7PzB28d//THL2eh7caee86SP/3oZQev76TphmEiUSNERSJ7LbRTeaQglm4dK1J8SaoKFnNNodyXxWPC0uvr+kIkMzEzU1MExWITQgQAKdbCIrIYWAY7QOZKVOmr03x5wMEhtDWtToWco6Z0SDsd/iow0IlnDQolX4ZoqlMtIxy+FDHT9FQs3P7SmFCsIljoP6BFBVOdLLwi02yCppyrHgxWIKA6VS2oaaaCBQIq+WYzIyo/dBKVstQSIIMSIRCBUtbsGQUpZiIkY6zqxV/+nb/5m9/9bXV63n6r+m386Mmnl7c3+93OTGFG5w/fOlud993w8sVnNxdfZOkDgZogUBb2gJg0Q+YqiI2ENl2R0jRXJjfTw30COqkKTNfVJlfPX+hce+33LD+KMh8SNVfYTwCFJISl2a24eDRPNs/iSy2SDZarXN4aNiXNAEvFqOrBB6RanudlBTwYUsvcyhjKa7JDvHhyvuHUMlZ4jmiHGt1Sn2cqRd6b1giHjoAACImYENHQAQoCMYCSkIAYGJCJIJEiOsXKeYdMiGZZAcAETEwlMMZsWRUB1ExkSu8xI7qD7EpF7ypvYI8onpwiKiAAA5Vdak6iDoiBEZmIwcyzU8vO0SCaFUYjMHWo3kxNrJxhAAwkSqxCpSJIDkABVWQkUCr3AaBBGQQKWCqdqQBCoJ6cKHvwKmgmjtE79uYcGAEbBTFK2Vwx0KCqkSKrgJm52puaM0PD2mXSoOpM3C6lyNwZ2ThIHKoqAIasEuqqmc9jvDViUVOiUNUGahFzjAOOVJuOYBH6DFlwPrM6p93+ii21szmuTh3Vzvmj0/sftE27OP74xSfPXzxO0iHKGDNmcR4ckhEZOyYu5kFVlThKFFXax109r++c3Z8dP2zqFSKJicqgeUipG8e+H7s8DppBUowxikoBJpiYmZIndSBqRFym1eUQqRqRSMV1iaiaNSdH7DxmAcsoVsDoOSkwjhos50CAmkQUXBAEV6/a+gRTHPaXu2GbI6pRufecJ2Jk9h4ZMpmiQ82QBFJSAzBiVDHHZbJvvgqeoSJziKxokkHFHJpky2JpWguWx8c36/gvfvbJ1x699bV33uv6MfR9RurH0RO44KK5tciKGOqqWVTPL3brXcOD1Pt8dgyNyzd930e43fQx0GbXYcqSzJgzjgJGjmZBqjYbklmFcAKwIFq2YKMOeXtR16H1bjN03U5pxDeOm8uUQdFreLnujwBP2zkKtwDfPDu6lPjZvs+ZCKhpwqyqTtq6SuDBty5cbgcIighptDQmGcdmwa3HaPD46fXK7NGdoxx3AKGK7sW4+8EXF50OdYPbXQ9u/i8+e3rytTdmrJIzei/Mu6yvBF9KnlHYGq1qn/I4gniq0AOYEEE7P/6zzy9S7I7IwDtWXLYBojLJ2eq0H5LzvuEURN8CJO/8bDaMdrnezGZzF9xtHEexBHL3eL7fjgLUVA1ROA71OyvnMyRJQzTPXr0n0pzyrPII2EvejnHwnNWWy9kW8eZy99bx3a8tm6fb3c0u1Yy7PgKa1jRvZ9sxPen2s252fvf04uJyFAiID+4e3ax3u5Taqnl6cbNYyt37y3VSNGAi8nQ77B48PH+1B8D4zvnpF5dPiMwDBueIMGehykmCEBwhXm33oeaZ95azqMSskMCAXIY0SgKbx+5kEagbQdSSikd0eHlzK56zac5Qe+cBd2MENQfkCAbrmtlsFFPMSrIf9cntkIL7/J/+yW+sL3/te99579Hf+sd/8Gc//OTn0Kcx5Rxhv+uJoSY6ns03edxFHbue1FbMb521H37x8uLqptPG1yHqYYRc/pMNRcyHkMSMKg6N8zWbccVVGwz6IY0OskHeX/eWpqe6KY7REDI7p6CEKgmSIqFJzlXlHRIGTtFS0ip4EFS1DGiIIpJy8swezTtzLrqKqpoNwQVmtlXtW/atq2E+6/bxdrdfVuwc7fIgqonpRtWxZ3OMZNyBopANOpAjAoOcwSADKXrPNeQxlBC+qWSdNbM+RUkS0cygcVYHX4iVpFSC6WKQFBKoIzJTECHilDIAZgXC5JCBEU1TlzSCKUBkc84cq6Mxy4TCMYl7SZ0wBHA1EGQFy8RCTXbcpbnh0jAYBEI0C4dmOTQt2BoEUEWSYv4iZGICU2NDNCqP99c5ezNxhKjFikJgXPI3yTKZCVjKKgBEpENUBM7sPGNwBkjgLKllC0i7XayblTbtdtznlJXIOyY1zBxy2+0JnRMVVDBCUVQ0hQwmSVkTSZeSASDmmDz5hDo/XQ2qhsg518AI0pDfZXBmwbucchl5MuFut63my24cr/ajifWbrYGppYjZ1yF4HsYRiQ2gqSuNEogAWbOBqUmah3Y35nw40yNYHTDnfJvzkNJp7RZ1jQAUMHtQI8qvlZopxH/IoB3myDY1i5UdXAEKU2mhmvrmD58/JYAMcTIcTQUzrzvvX+/aCjZ1gi9PJpDyiYx4sHjY5EbC1yd8nApcDOlwDCmbsTztcwtIoSRHkQHMjHjqBitDOcaDNwWMCnUTwZWsnGQiZLNK9M5s+f3f+Wu3z3/x8U/+pEVcBXvoN25/XSNLF9XX7WplcORPV2xW227Y3X737OGOna5Oj+7di7NlfJq2X1zfO15iL0Pf283zqx/+4Ghx37f3hpQYoGpr2d5k7hfnZ8O68w5XTbP54snLn/50t9u+8c6j+n7IwX15uYX5/M7q5Oa6N9/ciXJaNevLnZH6xkdn0Cw0eUbwuHeaxTk1JLMsOE0aRaash6iDyZwDCgxkJgDIMCGeeAIUYTb4i8HDQ4CrkDLLx5AeDgiMhRhd3j/FCIaEhXJpiCQZbgcx48bhLBipbA1EKU92gkI7gIOTCOyryjM4AIbgEFEAICqzViqoKZs+owzwy1FU1JD+AodiCsQoFZoqlNwDHLgeX7Hdv/7+o5Pa3+57JAKqbsf8xz//6OWmyxmYjEysfLxZFlU1Zs4GYhCzJBvPj9pVW4uqX8yiyTqlOoS2dhx8cH7m5+J3dVvllMjRfhy6GIV4VLz/8PR8ae2M57NQK4Xa7dm8WZ+TgiflmLGLuhc3anu0WgUe3nn37s+ePH/xrHfNrKnqmLWTaOawnjsWYRk322bWbPZx7mk/Dvu+QxcqpqsuSsI2+G7fHa3q2fFiP/SoklSX83ZxfC6S7zw6c2349OPH5nW9u4VRjhZhuWpPj91ojFHQ1UVRJGsRFkyzq936xYvN0dwN3UBeqhWTpS9fXm4TmeqMMKMlJehtRB69nt8/6S+3MKI4I5plMyEZ8n61cvOmYV+5o/mi0d31niGPcUO84sYPnvC4Pl8dn9y594M/+/zozmwXx2OqUSBzGkxxxsd3TryP11e3r26vKzqfL1fc0PJoef0yLeo37s8e/fzLT/ZJVrNqN6SzZmyDPrvqenQns3DvaPWzjz958OihcvXDj56gR1uPZ8cr2d/udmNYtPXcd/ucknlPOWeUeLasOkmiYxpxv9M3Ht4Z+rge49OrZ66i/b5btP7ug1Vbx9tnPz16+33/4Ps/+fCiOprd7PuLV2s309PT4++89e4/+6f/+M23Hlxt1znPnGt/63vf8/B8e3ld6dnJ4uhlJ+1xTX684/y8nmfDLx7n3a1VHl6ub5/ebt56eO+X3rv/+cef1Yv53cXxLtzElH768eWtGJkSVy+v0xdPLx48OELGFy/X1DqDvGxpWZPx/Nnzy8XpYtJgJp8F6DQumA7dE/0XrPRROgQEyGo8uWSo3BpwaFMHg+JPnnRahFJUUEQYVSnqkgIIoEgmMy4fUhQcUNNMpdJZhLC4ewiAClW4PHJ48kcWfOF0TwOAlLMvHgxSWBoPp3SSAIjppDgY6oTcQ0DQqWjTFMWRMwRkcI4IjezwjQGYysRungJlKAe8zvQoKwsSFq4gaNmmEAMCF+uNKhAzkpqaCagSAJKJJTYkBGNTUASsax+zRkOjBkWqAL/2W99//8Gbu+2r/bA9Olm+8/DNr3/jO7xoL59fsvNN0xyfHi+qGeb4wx/81z/4g//SLYgCffH4BVCbUrh//mjGTSZ6tbv8/OWPq1rGYU84qW/lUqoVE49OSGyYoodwCN1pefAeQnxw4AQRo6oxIQOjGRYNistP/gCXmihxZIiggliMvwexatoZHGYAamaoBKqliWNCSwERHlDR0x4QENR0es2CQEiEr/lDoDKF5kGLCgZW+NgHM+sB4w2AYAyGahOeeFrXOKuAEaHzng0zIEGOUZQwGxCjL4trVgXi4vEmANGyzSEAOaysaoaas5qCQ1FFBWTOloFrdWUkAQRs2RQR2Lh4dwjUIBc3NBKpMLJhABPvKKmoWsw65KgEljOxF1UgFBM1Ec3kNEtES6IjUAbLkrKpS1nMKZmYJIOcJaOgTWXCVGZEZOaMSpDMITkEgAzArOCY1NQgE0A2IaKkQki1A84aPDXO1RbGAUoZmidaq+1iBElxGEzCmPysXTiuT47P8th1w0ZVUmT1vq0bAQS2fndrAhVmoNjJOmeJq2E2W6IjkF3QZuV1MTuvlNn5qjp+417bLu4c+9XjFz+52d7kLEAuA4Ja4ysAziLecakGs5wIVExn7fL0/NGdk/szP4MMGWM3DP3Q9d2232269e04dnlIRXukPPopugAElQFkUILsiIFQwAAyIGZTJkdKBgGqJR6d2WzGGkmiq7TbDWNOCSzmMCaJ6MAC5lzkVzJi9qGah3ruqp6Qdjd9HNU0OF8TMRoG54mYjIBI1ZDJyIBBBIiYCJwHEbBiTwuBvCBplpzIQjZxDpLS5Nqe7hhfganb3Y4//PCjvs/vvvGoffFMgwHWfRc7ATWkeZ2r5mqftwKuqW/6NNfczGcY6i9udrs+CQF6FYx56GqrmiaMKog+E48AIMrdcLaaB57Vs7MkWdU6uZa4NYXbfXxys5Xe5UEX9fx00TYY173dXu7uVDQabBVrc0h4VlUzrR24V/3AxK5yFcIRI2WxcRiIFidN1mzkkCCNFlw1Xo1QW7WsiJdPrwbi7u37c+/z6qT+B3/88547JCWE5TLk0fYElwJDTC576IYhyd5ktvTnY5UHfdV1GGhRgRFmSjF3ZXbxyfMvQKxiMcTbPuYyt8hDNavHmB6v+13GfWSvMnbD3dX83Yf1rOaTtopR111Xs7vKOfcmujupKx/4dN6iY8j2sA5pTC+Gvl6625ue+lgROeYauY8jBYcmi0XtAHS/lrNVc7zwoN/7+nuXP/rwNqWd5b0kALAEuJPG0zDaTz794htv3rt3PKtqd73ubyERcFu1kLUb4eXt/r2Uvfn1zYZbQrJZVfdDSoYY3HroL3ddWIZ57aQTYHp49s6nl5+HQMQQJftZGB0CZdSYxsGFsO5jA7NFqNnxfkibPjvCwND1sRMSG2kWWoPTed3e5i7Lrk8RQBU886IKqhm1CkDZrHF4uxtjtn0HI6TT09WPP34CMb/79W/+W//qX370xtn/97/+g2fb28a7ylG37xCgy1kIu5gIWQyUofvk8vzsyLJ72qsdmrC/kor6wRziOKpjrpBh0G4YCI2S61NS7I2trh2YueCA0EiZEABTlqZxxJYxZRPnfDYDsxSFmCrnXNtkFjAnzKSMSUkAzLIaoBNlARzNKFuNgCTIrJAJrakYPQ2qs+AXYVbVfLPpOGOLmjG3dT2mBFTlnA0sBRrGCdS49EElJutrv5BEAanTITjGjGbWSWZSoohOaj+NaCB4qziKoKuQNWZhADANjrwnIgDPYKhImsiELMFgaoSNZ1AG5dibJCGBRN6CRspcM7UIoYKcuk4t16q0H1PtOBDX5LXvW4XK0GcrPBJyripEAD3MFcUKa0aVXOkqQpJY+NXqCBBIcyr1qkTIZKIZDBg5y+gMiFHAAM0jKEDphhgBwcyXOZIoEKYxEpe6UqCc2fm55yEmqmLjQzbuY+q32We1wVLUnPn8zYer47ObF58/f/VsNBhTao44zPyo2GdLFasAES/mrlt3zWwmCyalvIPWQm1+GDUpEochqSNFtF4kaqrIDGDfb/o47uIYARtiJj8IJMWcXdKEAgRKRI6cumBGfdZk0jQVQTawrKMQDWAVEhIwAhFEg70IC3cpM2iLRjmb6uvpARVm+FfaDhgiFAIpwqHKBriM/Wz6HDWbRr6vZ72HUWrZMX/14TZ9TLGp2LStg8mwTaWzrmTBYELUABAUOa7MOvWwfzSFAoVHNRMreMsyE6fpX6Xi+IDnMQUgU0QgKok2K2ErM8cIYIzEBEgUHHuFv/w3/9Z3/8bf2OyepX9/vfn0qQe3/uzT+MWX1d3jqDEZ8IxdXVeC2892rz79sOWqj/HsW++uK79T7asmOm/LxYv9tk6Y9h2u4/Wr67f/+t8SyuCD5iEQ7C6+XH/6++HRo+XR6Wd/9rFcX9lmO16u61A/+cmH8OMOfLjZDujoJqaxz2OMg8j356eXZ/Wn+/ys9v/N/+n/5p0PfqMyuf388e//nX9vffELiU4zDMOIhFN9DRcekbBDLTzVUqNjh8ymASBRORwVkxIhvIZSTP9M19ZKiLDgRQ7qzKTs4JRkcwRMiGpsaGYRaMhkoIZYMdVeo8ogJkZIyMiTNbqIiIfx8ut0GEzGn4m5hAClWogJHePEXDDIOrGIDomD6cBR3iTl2DE16kDhyeLhDflV6OC99+7uLrerVb3N9qc/++zTVzf7PiJ6UlXIKkaIzFQEI+cICEQVmIRpVnM0u+l6dK5p66pq835feVdXfLsfo43o8WRRCwEFd3WzHSXNFnVmv7tNhhYIaqL5qm5rv+3SXpTyGEKdR0sx932vqm0zv7pcr7t4d1F/+sXVejSlWtltNe+7OPbR+1pGic5iTg2GcbRd31+DJDFGjzbIehcI/azxbds2tTNl4mqBfafnx3et20VFc4JVX83z+99qFbwqdJt9HcIo4/bqKlA9DE5GPTtbHLn5qp797JNnRkGFE6JWJ6ug1QK7Yd13QxQ/b1tfNfXMffHkeRYn2+w9dmMOm62kgTgs6mq/7ZM6YJ7NwunZyclJePLl7fMvN/caf9TOXq139+6uvDu5uB6GXXaA1T1+fvt8vgpN3RpKmLfV2qfYfe+b77y82Lx6dbtFlZzuzO4EbGWX8mCXQ25cveD2y0+/DBygaqrZYnWOj47DP//nf/5sl4Drk4cP0pAf3ntwfvTws81H434nSLDGWNPL9W7Wni5O3hn2t8jp7oPVs6fPx6vN3Osb5/M/+smXqD4NfMbNmlNgiGm4fbk+PV5uNLdHdb1UoOEbX7uzj+s/X1+5s3D/7bebuyfX66u6oS5tf/rFRw/efICt67FZX++7TV7Odt/94PzZl7gMpzHB7uNni7n+xjeP/7O/+3iQBcQyRK5vRpmfzL9+9CCgfPr0y+XR8u6jR3/2xx8TNTejdrlrGc5XThDfedAA69H5auyF7rSGu29/497F4+f7oX91dYMZbm+203natFB7Xj/2FUyKmmIGBowEoCVXRYBI5RYuku90wkdCJXhdYGYGWGwy08Iw3eMqkhHkgAJSm/D8VlIyhYlz0AewPBuK7lPkGjvYjbBUH5btwyRjHzTugwvS8NDShWAAqggghQUoClBs3WhgGYEJy3GJCSauDKmVMgmdROop11ZkL5oQ1gLKOL04PDy+AA1K9AwMSpoMsThbM4hjX+I3wJxVyAUjY3ZJlJi8c1UI7By1IWqK66FO+L13f3l1dLrp+1mzonZJwUdwbFVzej94HvvcC+XdOvXbXtObX/uAV9Xzbvzg/rdX89NZfXx3uTpt5n2SAePzm1/5z//z/zimrLm31xYsRQD6ypwFBkAGBZZc1gNCmMYyxQU26fYlhoxISCUzWCRGUYFJSrPSWKogQGbFMQxGAGZSzMTTF5yWKkAi05LuK6LV4cqKWoneHFrwJua2Qklxvd6IFLTR4VpguVhlrjAZpA7vST1Y67wj5lL9QoQoB6CVKAAAOrYszIqqyKjFFAaWdETwGQEIFEWyIimAiGlWQUIELqTzEpVXxCyADEQglhEIHWeCvYmQBGRGIOSijJLmCllNRUGAlb0HR8qoRsaKEBgsgMZBLStgVnGekqgickHKF7QEqpJq7MFStCgq2TSZKRo7NIne12QJwAEgIoNlKRBj9mQ5IFfAATiwR1IwV1bVqNP+jSijlMkfG1jNUHlqyTl0NYSaPKCLWWDAzVZslG6Io5ilXmAAcIG8g3A0P0mGSIpq+3VuCZfHd/qc63AyDDebi8u4T9UyUEzOdOh7nM0w3wBbL0M3dkf1kbh57RtAP5+dvvfBLy9OT37y4R++uH7edR2xBueTIRGBYZKMpiIJAQbJ7Ko7998+P39nXi09sGTZp64b+vXmttvcDP1uGPZxGMhYEyAYoanlgiwDhkSYnfrKAdCEhAQGBB0zgteEMtLpgwez1Xnlc5UUaYxpv4+b/dAn1F4oGqJDBmNE4jqbaWRHwVUzX3mw3mffLni/61VQlH1VeXaIiMSErIhqIrncHlqsnwwMjK40cxFwWwc/VAGyZSG3V62MPLBHETA97IlyHsiIvVVV+MX1q4fv/9oDoscXT+dVZSnEHFPMBOBqx45p41bHwXvyAm1dxcjb9TAIEhkyU9IWK5LKS8soANm5ylUugDrnqzBHANlczY9nXq+52crc1nt9saduaFfiadxnzfvhdrGog3P3zmfnIdQJZu1MDYdtrBgbcm/MFxU637APrrtNvoOV59qRq2igvBUJ6GKM5uEk+KPaPU/p6S7fksUAfDnUtbVLiLz57PZxhJlnuh2kYl4urK7s57efH4fFad2sZuFmvB7S0LCDFIPRjATAMqIFzKbGiFRdvspPX4yuIcdo3m36PiZwic4bPlsE9vqij9uICDYLfIM49nF4ub5TVw8X1emRb5omcQW73avtbuxl2+aqqUdu5qvaOuEhn9T+k6ub282IBv0u3zlazNFvN521HA0zcRN4UQGCPHt+fe/Md6OkXAESBwDke4vjftd5plrUtxQH6Fz18mZ31PDd4wUq7WJs2U5W4RaMCQalx89Hqe1mO8wQDbRGL/usOd67M+daoOIhaYzjYBkFn26fCotlIQrbEcwsqm0l35u1i7YekPoQJdVEfL3etK7exri92d9ZLNXRiMBgXUrzAGyDh0zMITiLQgQuuAGF0HwIm35bIY9JUsplZ7/ZDwMo0Ik8uf7w8Q9++dsvv/v1d998+G/+f/7kj3/+6WM1dW01dqkboq9DaMLRyXK96wXcqzGGrn/zbt2/XEeh282UuZmkImQq3QfZDE1QlR2VudkYM3kFSSlhTJCTIpFz4APlJGM2jFI7BmJ0BkwoYGqkkGNKZjW7hrDkPbjEhnM2M5CMRI5QVJMYIUIEBXXBV3UAglFGIgaE1rdYuyjJtwtJg+fg2aP5DLbPg5hGgxFg9OiI9xIp9zVrEypglKwtwiCKzjfMacwBqXAKvWczc6pEiKApgyCZqmUjg+AYPakCEyAjBZfjiAiBQxx0VANC55EQEUTGqIpkQIBZMwqYCFTsEFHj2A8pKptVyBWZN7VeMMWZAI65AnNggTE4X1wrZF/tMFxxPZgZWBYxUAeZAZ2hgaXyECzH1MmBDESsKgZCQGIqMioRgDERETszMChdjzKlpcRMybEmzYwE4Ah1n5yxc7mugKuqMwkIvgpjv6NkJ8t5tbz36BvfMNS4fW5ZpkN4TLxCh1ovqk1vwxBrNiKcz8hcEjE09mzs0CtG0iziQsDgynFei8tdgdhl0FFTQh0NjtsWja5vtojGnkyyQ4p5IPMokKMo0pDEOUxZKgeGFiqfRGdNVSMO4zRc5bKWxxjV2BOhQcowjMvV7PVpfPIK8cFgZFNtPBrQFOaaImdl262HPeNh/GdWUEFf+ZPgEAIowEyDqeLDpi9XjD0AhIaE/HooTXB4DTb5TAqpFL/CbJuhGuQpayZqpSQFCgSjDEeKxw6K5GTGAGRQOUIzVSz4UTP07FDUIwEyZnKo33j/g16Ej0++9zt/9U8/+X8uPFHe/OF/9O+//a2/1Ny7s/zgfSFAiJz7Ow2L6Tx4G9Nw+fLo/nms5ovjFT9cffaH/2T/0Sd9txm2MTBUQb74/b/98Hf/Jq1W4/a2f/Hy6p/+I7h9svv5ZzcVzR+9NyxOTh49bF++vLm4uFlvj2fzQN7n7KPMMlURkevFuRsk28U2z47f/yt/45ffem/d32iNp6er/+X/4n/3f/rf/6+6vGVPTtkQc1IpKUDRAggxMypBMwIxQONyFcs1YiR57SN4LQ4BTOVx0/sEX899p15nnLhAcOikI9MixRTuK6uaQcqwHRUCVAAzbwiwy4VuYoXSIXY4P5Y3ykFlxNfvvAOXlhCZwPMEJEfDlPUru5oB4vRKDodGLLXKplamyjiRQgAA7KvMATD5+Xz19PnzP/n4s5fbrk/EQIwiBZhLRdOkLEkRidkUVNSTtY4aJEuqaCmKAQ48cExHi7pCOq6rojBdb7fC1A3D+nbvA6EjSnJ31pwx3V/NVyfNqEiOpCih7B05hSQxKdmQ9MnNVdS0bN3x3dXF/hoVRHTWcGbYbWLwPOTYNLVjM6YhD6iAQv2Ygw/zNqRum6gcrkVsQFUSRUFyvvFw9eL55nq33g+/+Ztvnt/R+RlB3Sg2oDamKueUwW0f5+5KINOcXNrFXvKON6hp3w085/uro/ncP/nis6X51dw5Cffunl+tb292103g81W4usnmudv3q3a2veilj/Ol9w1tb/fkwtFqbgTWDc+u1wyz83snJy0//fLPt5tdDc2js1X0gpAlx9vnr1SlbajvNnfvnV1c3rCrsui+2yGlsyMfkK9u8tFRUwXfdZ0T4AHEsa/88bL5/OKLtr2fYs7D8OHnlzfPruvlsj1d3F7ctgCLJjx78cm6X6PXIPTwzbe326vT1R3i9vL2qiYJFT9+frnbZk5ZKvjnf/pln9SZEjT17Pj8wdmf/exH6Hgxq4g91pAqvxPFKN++c+9P/8VnCemte6f79ctxuHjvtN2ON1hjisNNv766HhFRk6+Wwc2aP/9oN3P3BiOjXWikG/d//uGYOXRdfuNoOexTMnOcPeIpu2HsuDED/PjDz3TUTb+9jFY/Ov2f/7v/nT/6+3/vZx9evPlo9ezq5smnt8OoNq9OHi0eP7/wkM7eOM+43q43s+XEKhKbAMdaTH/0um9wmgLIQYkBQpUCt0EuBYiGPEGRyzKhCjY9HhAJrCzVWaVMGNTUAPUAyp0IhGAmWmytalg2IqUulQ4KtYERkB0e9jbZXBAPOOvyUDso3ZOxtOhWOiWlDVRLysaKwqSTqgUISsaMVjJGCEY85fCpiEMlVFXWKyvfqZkpUkloMRaTJqBOeWhCEi15YDVTZCI0VSFCMlLiyG4k75sKASygOWwXs7r2mnXl4cHs+P175/H2xXCxvzd78F629dNfnNTt0erubtc394KwGYmgCA1DrSmOzrFBPr1zHggGSXH/8pe/++3l8kSzVCY591Xws7p1du/f/Jv/3f/kv/hPTV6YDqgG6IvrZzJXTRjvjIBMmDMUFenQPFdWAhMzKwm9Sb6zoumU3rfyrAWiybylgASmpX/CbKqYMDMl4AM/G6yEzkyKEIjlPWWT+AMlFGkIhWWOUFaHMnIoy1fJJhKRllFRWWiApijbVAfxWi2aMNhQ5k0TXpvQkDwLIqB5QCN2oMgKgjkbeheToZFmzToQJmaPbtIwpeTlUWnKyhswasEETTQ9QlWU7BxQTgTGnhCSYTYOioiIqASipkCEDhERxTCbEqhDYGbHLmcVFUBgx5gGRtOUgAGM0QxMiFllMHViSTSbCaKCM0NxTEGRURizIzQdATyCEaGIgWkhPJBJMPEmAZCNEDNgWU+dILIZqDASIRoxmCGqc75lbIhO6oVk8hTIe1HbjbGPeVWH9TrmPA5jNGLJsltzy2HeHjWLk5vNbZkbZskCUNftUXt0fblZHt3t1z/Lca29deM2kMdjBu5Tt6VasqbdOHTLbhmOV9Wi9Q0hVq66d/K2+0A/efrjn/38w37s2OqoKmq+rkvBXTH3zZfLo+N7Zyf3FnVDJjHu+2HY7Dfd2O26bdrvc85qQM6BAjhEMiQUU0AjBQNlRgrkPbMyAqsKA4GZJyBDciHT4uT8jq9A4550v+s3N7vr9XqX1JBZTLlcNiBAIGMyYwgYicWVUkYKfnY0X2/3/TqCOchM6ssNNXEoLatEMBHJWcyxY88UMFQ+qbmKQ1NVtkXskW00CdM7i0Asq9pBMB33I2ZmTylHY/jw2eO/9o3vGEgc9q5l78Mg5s6C9wgO7j84Gm52FXG/2Ufg7b6LYMqQcooiY8YUpTKJQzeMEUHlaodMWTOyXx73p/PZrDI/ywY3vpXL/e3uchy6en2Z6nr29rL2FW6TiYFma8lz1SrJOkrtoZmRcy5nNTVNhHVgollbNaMsa3CgTe0gxdssqjBnn9HQrHFM3rzgR+MIlV938cmLPN+D6XhnOd8NQI6HrGhxsQLVjeL8Jqdnl8/0BmuM799biIx14F0/zOcOgiUnWfOQIpk4odv9CGgoCRFBsqrWvkKgs+Cd6p9+drHNgYlmHjVlAeRAN9td7ONo+f7JrG2aGoO73cwQk6S+E659HPNuvX/7zoO8H6JhfPKs7xN7ahYzT8FnfefN07Rqf/LkIlpex7wd0qWM33vn4fLk6MWXz8mkxVBDJNUFueBZRSsKDvR45hvWfojPXlwN+3FeV/P5jFEWrW/bQLCQ7fDh509zFRJiZTMxaVpvirNtfxLCOOylrNmOFj6wKchIoItlha5R6lMakzN1iJbReMUVGw1ZiGFRVyja1G4UyJIBoQ7MAPskFyIL785PFmmbUFEIj1vnA6UxIaJoEjHwjhjqyqmoISrDbjM+sxsb5F7bfvThz1988tEv/+73/wd//Vf/4Oz0n/zwJ/3utp05Z0po84rPm6Db8dn1blB7cN52Jg/vzdHVn7/oP33yF6QidkhgoqAgoWIE9TUgal0bkhBI6rMljsIpQlVbOydHyIpj4tRhACEEJVBNBFCHEJOkLJ1hJlpUAUyHITrnBIQslf1PBDUQz1ghCqo6NyJntcq0JlZJhjGTveoycIPsRgYSpkRsJKqSxQHamBp2bQg3FqHyGm2Xe4dhxtSJEpJmdeYsWznosEMGjiIFWl0xeUZmGqIUW5dmmHnn1JwjMdCUHLBzTM7nbFNhFVpwDGQIkJKoZSQeDXI29s4FDhzAAZNStn7IBMEwE5GKoColw1EsGyi44CoPjnzjvUmqSuWcMQCVygUT8QCCh0pwBFETPgRIAJiAwEixKBcEhKaAgMwgVBzooFbGTN5K7RIwSiyeXsOkhkkJUVWQpwiODplmzWJxYu1R7sZ8e6NpVIBmMXvr69+s77ytkobuBuc+QRa11IEKVnMSH9GjDy4YNwzj7a0T72uKKc6rtk+dorPKBXSpV+cwQ4zo9xqjt8qzizlpjGIZDSpehQYFwXQ2q7s07ONw0rjKOcNg6FSMSEWVmcRU1Liq05jQcOH9nIA1V8GN0ZB4SCORJ4ZdThHdy83uJIRZG3b9MG2MCMnodZH8FPuxg1xwkIimEwKYGIgVZtAUN0CbQNWvrRtAZXIKAKXTpGy/ig/bsDR7HLJFCIVuMfGuJ55RkaAUADCrmpFMXu7ijAfkqcgLjEwJ0eiAGTM9tNuIOYdmkJN4j2hQh5BEzUQUPJszW8yqKkBoWpUAm5vPH//sg2+9dZ31/jd/Zfa3Lh7/3j8NWNE2ff4Pf+/9X//G7O3zsDx20F396R/Ri2dtzqsqXtxs8yusb8/CnYfKwc9mp83seri2fRcchBBqn8cf/ejzJ5/mCvGmO3ar92enfXN3SIF8TrPT+Te/dzv04YNfOgZr++GLv/uPrn78gzY0IthJdIFc5V/tBNnu3p3X54/+9X/3f/Z0t2+blbf9bXfxcqvfeuc7n37+w303NFWdTLZZrAxvcwy+yiKOKabMjFkQTbwjQSwtxY7I1BxjzFLUIpy8OCV0BjRdZpxwFUX/w8NYHwqsRD1CEQCLEgcADoEMomGXEc04oCdq2MQkKqqiHdDV5U1TEizFGXTAWZuVfb8BADAjoTEVeoUyUSaj0uo8wVUnDavMtFUPBIyDBjr1dAPqV5kRAADX1J8/fvFHP/3kct8rEqiVNC4CMpGhiZqCAJWkJFiJlyOeL2uPDICBqE8pDqMZLZnGLmVJR8sFOej3edB0uRmCdxCoG8bTo4UpMnDr65OTs1Ajxqx5nGMtgca4MzTn0ETBAc8DxDTjcHp3GWd9znnca45pu+ujWIwZlVvvTmaBdM+Bnm2iREOsKvA0mEc5W877Gig4B3y73Z0fzSrHMefjJnz09Omri9EiArlHD++9/S273m2UeYw59YPk7KswZKRlaBrgY43rHeE8d0nTsLzXyiB9cL1GWe+d0/kiMOkwDF++eqacHWdNfrcdxyRLN1MjjORzQDMHFEIYh21K1jbtbuwXeHK3Pv/Jx79YuX6xWG27YTWvRcf1/tkQBzZ64517m323qpvPv3y16batttu+AxVfe1+5sxk9v9jtRg3z8Orl9WJZnb1xlLp4fXnDgXixB3GLVTh/934c8uUXl5efv6qb6vLi9tHJsSRMowDyzeV6YEWLbXA3++vjk5P5crW+vDheuQfL8Onjy5///Nn9o6Ma9bie7YZ0943T8WJr1bGk6qMPv3ShkbRfNnXd+uNV/eDsfnf50vy9P/9J/+o5rjt8ktd356tqEMC0bOz+m+/+4J//5OLm87t3Hz08Pv3Z55e7i9tPd55gdhxy5a+v483RMX7nV9//6Ee/OF7pd75277MfbYe9SNBvfGv58I35/un25c3lX/mdX/3BH/yMbHm7i8um+tWvvfH9f+VX/4v/9P918+zS8eyzL56vc+eX57NZMxqMFxs3DG0TGNNsAWB0/62zP/2DawDIogJKZkysgCD5NQ8IS24LIZsZUQHEAB56wsAMCm/YSmQJioJDAICg+aDyW6kAEz0sFKpgqjZh5xXNZHIsFsqKoQKiqhGVYoSiFpWuLsiGxYZYbm+cJCAlQgEAtK/cTIavCTvTrmLibBPYV7bEwvO1EuAFYyKbSgNs6kw3K/3wRFgSOzJF34qwojo9f8pfiVDkoRLkASYszZ8KSMA0OhdWx+bDkqsqMBicnM8MXAN0DtCyvbVseaOrx5ew29J6XK2fzzfbI0fjMJqAc01Gnbc1+krAMAAHFgMVkDF+MK9Thr1VvzRbzNMQ4ya2DbPPSnU7295uj8PC3Xvvd3/tL//JT//RxfUrliQqWDpCEAyMucC2VXJJgE2MKpniewakZorECAdRCMrhefoZoNGUPrcpwKegVsBMhFZAVZN2jwf0+FS8WawRYIU6raaCUF7ARNwzm3JjVnrPpywaqQGCTuyc4lSbrGcFXFTI20ZQSEkls/Y6swxESGCk5BwjAhN7R2DZeYIsqlYzSTZomxgBcyoeO9WsoCrRIjh2xAwEhrlQFwERRNWAAhsaQDYBLDz0hGxAIVgGLGOvlNWSMRIyajYQhKySgYqogWqaLFbOkxkiMXkyJ4mDiVlMQ2eoQxRgKr47RVLI2QQ1o6mhmWbRXBRUJCU2IkMG9o6AEdCBAWhgKlSmrJkNzbJyAscKWbM6MgRFSWxSiupyzkwolj1BU3GLGIAJAzGRYQhOEqxmLueOLcVKP7nO/abHChUoan9zccN3wsnR2b2zt568+Nx7M+LtIPRs/f633/vWe28+X+8alYurj8f94Jytn1xTzP7ObNxvxn2PoQqzPud9aq+zX43VsmmP2JwJnJ+/2S6btpn/+Gc/zSmbKoKBSpmeCVhTz8/vPDo9vtv6miHmpHEcN+vNttsNY9cPMY8RjcCQnBczhWyUDROwFdiFmlXBsQMP1KhDLX5qyEZGGEerfTi9+8DXGfO48mncrzc3l9fbTd8NxGxWSgLFqTCjGiRLhgiWDVKKG2hmo/QztjCv5qczSZ0O2dDFmDwFdphELKtIQoYUUxmz+cqRc1CxNaF2fuGJDSp/5BQQB9IU2AXnHDAjI8aDBRLaeY2ZBXPOWZ29fPHlnzX1196+e/VMbl9tqxXfO16u3jndv+pDRZDjrk/PHu81hWDCzhaN3+97M911a8dzAR0lpiwpmiOKWdk7lSQxr7vxol6/cVatd6/uv7Ggpd/cQtxZs08BsZKE4lScRN2PaTf2x02F5GfeNTPXZzsJeLtPY5+Bibwndm4WEuajNvhh8ICsWBsFa0GSpMjOZ0lJgRVnWY+N9onatrm4vhGcKVhK5kBN8sJBG+ytu+H2JtdeUh530a73uAruwy/WbHTvaIUofRz8DEeMrvGDCgvtN+N2n1Q0A223o5+HrF77eOL9N+4/+vTm8nlyoapWLq+cE0ZgCg4k5cRuPcTueZwFP/eVquv7FIjeuH8SlYZ1V9Hs9vZ6uN2u98Nd554CurZZLWerunbp9vQe984iDtr6q35s0SV2H12+mq9OoOWG2ncePcSr55LjuN7kvmNXG8fLq5vWtzWyaI7Jf/rs5t68evvhqWevA2iw9qgeFHVXveoGrFydh8Wi7m6jU1t6vnq1BY3BhatuR0iu4uVi5tTWuy2Bgwi1iQ/qquq6G/ZRxiQVYoMcB9vHLlTVneN57nczXw3j0I0Zq9p512fcZb2J4zsPVyce+zG5mQ9MOcltl4JhGjOFahcTEzhH6EizGoF5QoTL3bbfdpuj2YOj5Y//5OOzs4vfeP+NB4vv/eCTTz5+8uoIQ79JtM83sEldUuXK+9ubbqP5zt3FmPvFUT2dC6bzASoiNA34mtuWENTXagbdvkOi/SbHwUxQDJmRWYEMScyw36gPbhfVBW2XPknBeIhjAAEzyAYRgNHqGtVG7xAD5yhZCBwjQ8xaeyIEMXNEoDYMg8UYmBy4KlBWyYAxWsrslTQqENY1qxIBk6cElgzA+13SaIbohmxIeQA0teC4JieqfbIEJkAVk1oWVSJHzCKgwjlq1uyJqZS7AmTRIRsJEIjLxc5ogJTB0CGCWEqixuDa4HpRJBKP5FgZCNQLuAFAwEOtQKDZQQ6mQSyoabYWyTmoyOoqNL4mZFainFIyJiZTQCl9KCmLFhFhmuBhSgJEZXxhCKRagu6EiABc4ALIBhnUpNSBIoKZD57J1wwEaCYgamiiKCpMyIxiwkRm6rwHc42f8+KkWuC12JC29x4sVyd3nV91m01T2z5uL1++lMJ2ykieJQJWFFMW04BJ98mlYLkS76oZI2TnzVQ7SlA5U+stiioHImTKSoTBOUtWVRWQ3w1jMFlUblTthwSEi6oKrI6w9T6aoqMRFLIiUiBynthVTE5zJARWERFmx6RZxbMv22AH4JD6lDPlEemr7M2kEOEBKfr6d6nwAb4CyQCYTSPE4vEpc0SE10W0dIDewISMASQEJnBoYEXgO9iNzA6onGkSSAAHIgEWOUgEkpkYycEJMn0dNBRCQLVSaOtl7NQpOQfIZdwIKs4TgQUAc8SlkliZs2DFzgBFWoNZ1m998OBbv/7rjs8uPvppuni8+cHfC/NTGfhuu9y2i3TTzar+bF7x+sX245/FZgWpb/rb3W6DMXbr3WLWVnOfXz0f+hg5NGGmmdLYzwmTGAI68C6zPN3NPYXBvN8NnYAf5qGON/t4cxuAhnZxsc+1rzFUx2++2Tz+iOMwDtkzZBlE3L6nqopHIeaXH/7oP/6/vPHr/+3Zw9M6DsPV5+3ywf/w3/53/h//54+6hV3vtudn5+3pd2enx3/4+/84pxtnbKOMY28NEVMtqKaBORsOgGQFB6tmyAhyaKODCTxiU5QQoBiIJhypKQIygSoWF79HpJIvKEc8mGQ/LHwKwdFwh7jwULMhWTfmDjjbNIGkQ+pw8gSU80OxA9hrT8PkOJpeE6EpEKIUUn3xx9EE2CrqJr2mniHaNHZ+rXwe/EsAAPB3/vEfbS/S1V7FGAG8CTtSBRdoHLJzDERZNLiJeZVL5wjhKAbOjhczVNsNsR/icj5rmyZK3o9Ux4Dj0O3GBBkcd2M2w6+9cx+jXV+Pl+tx8a15ikmYkMB5l6KCgZrFmNG070bX+MBYjxK78fHterzOdeWjKYBtN7vRoPJNoDb4ehCfE8Vu7DIlQe9DHu2sct9+841f/7X39svGL+sf/94fXkJuGTb74WafP/zweeyygm98SDn/w3/2x3+1+cDC0C65mXnIPTo0ykw8f3AkLHmQzYvtdj9U8+a0uXP56UUzn61vt5AHrvnN9x9sunG/HbWab+P+9LRJ/bjpEavq9Nhtv9yub3Q0cWKWIlf1J59eESIF92K9P7rbnlbLxz//xYPzNiZNWdqzozj2Z8ez3TYj56o+HQZKvXUpzhwv7pxVrqbGV1ibwtXNbtb6CPh8vXvj7AgJtvs8fLlpPe23Vw/fvNfjeHW7v3fnroz56SefS7cLde3r6q3jo4qcn7k79+YXLy9Gk8qF3/z6d/74h3/iqtUuDdtXw51VCGQ//vFHzy+7tvZnd+YPlov91b7r5E518sZ7d273KRGJ+OWqOb/39qc//9mqDrO62Ty7uHr18p1vvPn408/ni3kEihBGbB+9f//3/tnfrzy+6H9uIf3yr3798dP9bsztzO+ubymsRqcPvvHoxac/DHN2gB/++VUeZ2By9fx2UTc5Kt0L3/xLRz/+40/jljTU/+APfrh+sf/ga/d946vktjev/vP/8D9qER69+8blq2u62X7t/Tfq5vxoef7TF59+/Zt3Nx8/7we7vtrhzFUniz5Ox4MkmRHL48AKfQyL2DM9IwyQyEmBXCOJiOIURZ7CYfia1lOcHUWxMdECUjM1ZTRRnSqoQAueSIFAVQ/+Hy3lX6I6DR64mD8cFhcLIJjo5Gqhom9LwSCXqdaEy1MQRCoC0SGDW1BIakCgYDINOgxAVWkSxNHMJtUJ0BECoYiiKR8ibmIkMhVxAgFj8beAmQkewDeE5fccY+lmBTBiYFAxUEf72v32X/7XTsfePf9yyeO4Ebkcz4/P337nm+n6uXbrqjOzhvY7Xt/McgXrfXObnMe5Co4jhUrG0SQjETHHMRGREaFjVAN0mXhRNcY+P3spJ3N376609Wi1+vqorQXgZm3fa++9PH7/eY+S10qaLRFBZQYCqGSaQiA0SEqT9dOAyYkK4kGfh4KHkykGbiBgVngp5Y/L0M4mo+8k2SgaIBC+fuQXhbG4hoqDy4DLV/wLs4tS1Ck2YfUQkAgJi+f1K7phoSFN8eZyQQEBTAppaco+l7ebqhq+ZhU5h54JiZjRB3IVOc9E7IhKDi8j5CgSkQYFL3FMkszENGczsFJwloUcIEvx8xsDFtIeTtMVADMCVTRjEIGUmTmBkgPnoNRvE6ArqTkgAV9SReRQQQEtk3ojyNmRR+JsJupc8JigJJ8EpVDJs5oaikQih4gImnMWMXaOA9fehxDIe53ABQpqZIQqbpKKFQAJaPJ6pR4xe2IyKfRuMpUiyqkhQe0doVRErEZEhWbchFYE2Knl8bj2ebs/rvw80G5W3Yx7QGi52g9d03ftkdVNM5/N+2FrAvN2/ubD+w/Pz3/tL32/na2ePb/42ePPPvn0R18+/aFv9qkbQpdmDrbXO2lkl4aYbsfFom9WC7zvdZjXR3Nfs7rKP/zgnQdHx7/86S/++bPLzxgVUSUJhfpodXw8v7da3a2gxmzJ0vZ2vdvutttdN/Q5DuQ8KhN4xdLQB1S7rGLkgc0sewaPUrEhMKtHI1BipJiFOThy0VXN0fnq7tI3I2K/377a3FxePL+62vSm6hi8c76ugCFHwYpKqSQ4lwEZVPKexmgWkZEdtDOOM9zEkYAM2QSLaw9BQUVFVJWQ2VvTsAs+e3ZtU9fNkoAFFMaYK7UEkOfMlfNkaKqODl2QAGHejn3e7waFNAuhAn65fXUnH/mTatVk1zDXkLOGJmRJjvnmtt/vgMg5QI86jlmSWpaKvaHVre+yNE2lCXfbWFVVTgkQfEWzumKPx8dhvZObjVz0A/jZ8f3q5rOOLJPRLo2zmu8crcauOwohBL7a9U+zVbNaJR6rzCnINnGo33hwp1m5taaX/dgGnIeQu8FT2ZmKSAJTVrWkMWk0rCgc1fhiP2iKIrAbxvnctRXEIfZRomgN4cnTIbi623Xow4ASIwzUmtrVJn2yvl7U6ZvvLhU6INlnjWQAGuZVNfL25eCAvQ+OnIk2qN+4c/Ty6vazq91sPiM1SwnJNSGQgzY04tKQYxrSarkcc9xvO2AOVThe1gZ5XpGYXVy9kmFwOUOO33nj5M1+afPZZtdFkgdfe+P4vebHf/TTX/2Nt//op1/Oj5bD7TjE+Ozm+Z2b1Zvndx21y6Nx1m1iJJ6l2lMV6jhGonNJqQleIQ5qBnCVuvxS7p+dvnf3eLRUkTaLKqfV590gKSeuobKr3fqEW8t6O8TV3N0/OU5P9lXluWZNeTAkDIz+th82OUeG47qqGu66zgxsTI3X4MzNZpfdeL0dTJEDulBb6nNMg1gWEKMxyXZ9+8Fbdz96fnM7jusssyYAY4p5vmgv9zt26KrACGww6JiiVrUjyYZuR/nq5e3zi93b9+487E2V3v3gzoNf/+1/79k/6febmFMIbrcfdrthsZi7NqwC7brtxWYfE1TL6l+SimbL4ACWx0Esez+t8X0SydBH6EbsB0dmTQBgULB9H0PtJBGZs+jHpKYQWwQGE+k1Nq6pgccIKY8xa+3RCETNE/qJCYJDlkCOA4ojUEnZBMwAo7jBfFC4kdQAVwiiW0NcLpcug7C3LMMYg2gIVDm/HYaqqhcEK7Cb/RDJGIEQsjook7vgkyYhB2ajaE42ZAh1EDPINgNySADek6EIAzBTUswGfdSKPAoAOFQDUHaudhZBGYmBRCKzB9DWkRIlM3JeTFPOZMzIw5BMqQIgz6iqorsxLtTPHLECgwUf2NdcNY68k4jskDMkFTUBMAABRMdJgA2DCRGJQRmCKBEAeMCABAYOkAnBlJmBCZAAXYQsohlAgBAsZ3UkDtERVsxmOopEZDFAA1ZjgJQTOWKENGbcx7CCMG/x/M5gp8s7J1w1aTuufNqtL28vX25vNgoQo3r0ecT9XmarYD6mFPMwLFPT8Ol+yP2YR5azGbkm5IyjKFJWBaegQBmSAs0dY7Jhr1XwMbMKzAzH/QBLB6CVJwUgRud9EswiRua9qxlZLQJV5Dx6ywBmzntG9YajqpgMMO4xeW4QMeU0X6xywobMEQgi02tRqOyc8DV1aBqvTboR6kSiNDgQXor3Zxorvz7YvwYcoTEjFn8yAoEVFFE5opejuioA4oQXOGQaCv6z7OJK35SUGS9YOfab2pRbK4hTQ/LORiFyv/Kbf9W19Z/++I916JzEeVX7ur7d9qAw9mMI3jwNZnXljiu2rKq4cLQwOKr41Dndb6qz5Te//90vP/75xc8/rd3jYb2eU1oeNVxxGLqz00op6cVzf8ybJ09eXXzZhKrb5u1lf3Sfddjo1X7xcD+7c/z8yad3VvOHv/PbL370k347oMeuG+cGDftZypjGYRy2u22D4HVdG/TPfnD9ySez73539fBdd3w/79fNncVw/+7+6jreOfmlv/HfAj/+9MefXHzy5c31E+s2Xu3J3/svb58+//a/8W/NV8sHM7fun+bjN/7X/9f/29/7B3/n7//t/xCdhSq+9+6DGH/pJ3/0z1Cwak7/J/+j//Hf/f0//OJnf3BUIUUn6Exy7VyvCqWuEUmzmoohEKKo/gV5pswmCaBA8F9DrHCyhFk5hgESHiTEcqACAyNEUkgK24SItvBYEaA3sTyqy1bOl1YiZZNj57UoCHDYVgMAZFMyNCAGQwMxEyw5soNuqSWzfHgR5QwDgGVkByVZCQjGhgJfKaMXz6+dhqNZHSUCWRzBmPT/R9WfBWuWped52Destfbe/3jGzDw5VdZc1dXVM7rR3RgJYhIIc7AgUTJFSSQVIsUwTDoclkO+UFgeeCFd+MKWQhEOhiMcJh2yTVkkBU4AATaBbgAN9DxVV1XOefLM5x/3sNb6vs8X+z8JoC+6sroz85zMf+81vN/7Pm+ClI28QwJE8OTMQNQck/fkC0bCRZM8a52WknNwvLU9yTnPYr01HlHGuq0FElfu3v7+o4tFt2rWF4trvmhIba88a8477/y4qtsVB84pdbFjXxGz5AQGZRUWXTtfrsBMUBdt1xpVDpu6TV0kck0nOCwS5hs7o2JQXpwt4zIhOUXsUh6P/UffvvHKwXUxOX12ZmfVUKtFhlXTnc66o0YWC3XoVBRy54jPj9p/8P/52mc/d+ul18oCaTIKnWYx9SDsiqaWDGm8V7RTHxfcLtY+FIUrdkZ4dnFpWqzO20WTm1qi2HRnkhUu61UXidg571bCdQsJiUGGo/HZrGMi9OQYPeQ3X5uEdnX+XG7t737w5PmsPimnw1WTsw1DZVtcHdx84+z5xer0rC1tMB6Ewi1W577MXp0DrLWuY/QD92/8uY/VR/Hk8dnRedMtbfcGVzf2rh/cWTQWoNwebTWz2bX90fsfPPhTP/nF733nUemHkqyL8cLmDy/P56nZH/GXH3wzD/PWMAyHpAJuNPjut9+7OFuGsqyGDjit84KrwZ4fL3Narc6HlXvppYOHHy5DUT4/Pbv32jtDpmeXT4ti6Lw/Ov6BC/H4bBFbd/u1j23deulL3/2uG9xYzs47Uyw8XMjR+crtOqryn/3ZL/6rL3+LCzk8ezyeFgcH5elp+oMfNCvhySTsUHzrc9cufu9xRv/s2WzrYM/f3vv9b36wPd5reGlDN4G5Lhyu474PixZOL4x8+drNvU+/8uk/fO/Zt7/7vTXaiYNRVSK0k53R+O7d40cPLs5W/VsQc2ZkMGCiK11GCTGp4B+FrhjNGAGBjAzVjMis7zvrvbw9O8gIjeyqpkpNwVQ1g+aNv6S/6avAVfysL7Lo/9ET6LBfEEjVEA2JslivPDCCAfZxMDPhPypE143I0A8M+gEHQP8lrtjXaEgqAAr9F9YroD5AjzsG6GUHAjOUPpdx5aG0DdfHxDTDZl6lJggAfdKOYMMD3vgZKZoxkJERoakBUVEFo+Yn3/rIO91qr4u8nk/iGvNwcb6+OdkZ2St67xMtSFXy7PSYzx7aqgmr4wIL6LIuWkJFwtSsEZCBDFFN2Tz3n0FnfWsksWpXIxEua5iv9Ol5kqYsJui8orVJtv1kMBrsvfLu9d3xh+f3Dxcz446HfqsYFLy9Ve05c48efm9+dIgKYJBNzYzJuO/nQgSgXivszTMvUr1X6B8DNbriGF6FAsH0SqWxXvG/ivWB9nqKbQrmzMBMRa/GFldTpRfoIeyfn/4DBOSruRWqKsKV64yupg39tgQGpgLc7wD9H+SFyTR49o6QgB0MBs4xec+OCdkRkiFn05QsRtUgqU2IQJgkAyqioRgBMgERKKOAqVgGQnR9aA4QALgvZlFBRWBFuvpj9SRARVVEU7Ccjbj/39FMNlCw3saiokRlUaoiAnuHCllp4C1ap7lTNVUxRkoGXcqFM9SkmNEALBMqEIaCGLlgdM67jYG49URkAIxdymTJgTCIhw3P0FQsmfWmMLCcIxBJ/4prD0ZC71wRPKsjQVALBMPCey5niwUjYOH29rbYpWsrf9HUICpZWlmyL84uTo14WDnUaDFJi9fGuz//cz/5yrufubE9ZYfvfvalLzYff/rs8//qt3/rwQ+++vjxHyqsFJbdetFVnYyDATd5tWiWA+22B9N1e5ZH2yO/F2DowF/fvTv99Ojaxe0P738jZUVng8F0d+9gHLYCF5a1Sd1qNV/OFvP5vG1bEQNVQiEEz/2a1KdJzBH1Hjpkx5wJ1TtkY1BH4AUsZlJzkBmUinJcjodQxES1tsu2qZ8eXZydLVMOOSKRliVKBnSEDlJWCv0FTQAVsWtXR+DHnQoAFwGIQzHggVBqs3FMYCBEwKgCmlXEDJUkBKJCuTRX+VBVgaqCEHLMkIF8jFg5dmgloCc2b+bE8RWepWDSzEoiIVQMOS8WF4fPDye70zQ0HTOVzpNLIoGCQjfYHbSP5w7dK7duFL59ejED06DB1Bs5bddjj5MdJgHYG6BCFp+yKOF0OqxnNcUcO21jHxDN6HGRrQCYz5vLVeRZ52g2ZBoVJaC2Bo3Ysmm8sxvT4QSZhghIwVK9SDAuFIu667LHYeHNqBOxZNFMmJwPY0aLAgJlcFsehxkFrCrCuHCp64oxAUNLpI5rdd7Ki1k0o+3rg0EZaVBbXlZhe3/HL6Xmyo6hRV1UPixjFwkCcxd0PYi0jfPLbntYOtaps4/fPcjRP7i8hISsUUg6gZUZaLuzN+jQWHNVFpr0PMcm68Bh18Y2CVfcWY7L5dbevh+MVllzzGRcp5zUXhqOB6bnEk8ul+995WmKo5P3zklcqDwXMi3C0Aa7Yc+EF+sFOV/yaNWtiZ0fUCdSjKu2bpIkM2xNFzlrwcu1ni3rw3XuwL12b2+9aDy46agKJZ8v69MTHRXD8cAPCR4fr2adZeeu82R3MOxy3uZyCdokG42GbuyO2rU6FqR5Sha7gLhdOWabDqumbcqCQPxpJzMx0DwqHRShRKqYLhcdEBDxZZvuAJwvV21iT7TKHSNMSpelHXoEdjHlGBP1i47lAqkoeVJVR4s1FDiv47fuP3t2vnh6Pj85W3zsE2/9L/7Sr/yPX/qd+4/vu6ZGc9WwWKfGm7myPL+svQugtLqiN25ehrLEQekc5+DNUJu2i4qimDurlxpbzBk8gipobzxkzGoCQoykRAZkllspSiYFT4HUgYpHc448aSg8GpCQd8xKqVMy3CurLOod+mAAmAWSWYwqSdm72qRDqiUOxAI5IkvQucINHMUGrUVrsBXpcs7GbSsGIigBsSqL2CbMxgaollPTkZp3RmoqolkSG9IyxeCcZ3RGaIogJWPf4uGZRMTUAmHKqWBGNjQN3hGCZCUwZ6BZCyS1bJKIXe/TsdSi2tAxEUs2h6QkLhACq6LGiEqqoGpkWFW+HJa+HIRq4qjg1FhbYxcNs6gyuRxFVAxU0DGiKshVYETVBJWZN3ZmBXRgooiQsyIgM+rmRguoluEK9NYln8k5AkRUQ2QDMCJETCJAhEAqoE0Gl63prK4NaTIaVh67ZDG1eVHH1fz+/fcv49JUfSDtoGAWE8uaY5aUPdDY7eZlTpbFZNU0kWirLBnVmLqckAtw6oBQrM4ZgSsXCKkNCJ4TqEJ2iMWwRFAUqYiSqqZErkAA8qjskogZembL6lQDqQC4wF3uEmzKSQzEEAryjpmIPbGpOGTR3LVWDXp6VX+Ug14V2LSMAZht+tF7d4j9CckArkbGm6BPb+UAeBEjMuaNlIDW16XBpgMHzAz6UhS5ihJeHc/watJHusE+GiD1IAHs8QF6lWiDHl2kjsDn/Gd/8Revv/7Zj33s7VZz/vvuh9/8wwM//N/+x3/7WWx/8/1HWzemhaS4mH/lh9/tUger5VbT7oNR2+yMw9hXSXUA5Xj3lt/ei4lvvvPpCixezlXWzeVRvLgAnZUAkIzZfBMbORsyZsKcu3VTO7R6tkjrrk0dxc4tLgrJ7MohxTv728WN4XGzOn3yvHDaXjQui0eRQM4F7MQZoNm2ufWzc5x/ebb6lzTxHPDSFTEW8dbHph//sfuTcVDb+6lPfeHn4egPfmv2rV8vlwu9lJff+PHR/uvq03S6P8idKpyuujc+/dO6Xn3zN/5R0ifFGy9dKzPNLn7sF37pM3/6l++8/sk3fvQXTj782n/5d/6zSQjj0bhetXW3UjMMzrKJqW1KsuUqBgBXDcdXj0Qf74MePUFyBb3ehNKuZL6NnKjWG/vVAEB7NOg6IgIMPVSsUMAiqgrlXmxU6ctWNr02fyRkXgUbek1KzUwEgBG1HzdvriD9vbB/xgA3l0yzKwWU+pkb0OZXbGSizRf69Ltve/QS10dn5xeL9TJZNigHPKmGF+t112UTDIGzGTF6zwRgYo7RBUCwlGPbRYel5EhmiG7Vdd06itpoUGSF2OZd51MFB3cHyJS7mHL30p1BssumBQLpYhbJ7EISrXOsY8cAgLqo6/PLdQ0Qs2R0WfT8rIW2s5yrgkaeQZKZtat5uzzXpmZDNPbMldOX7w0/+onreamnlxdtndu4XJ2c3b21lWJ7eLSQRkIgQrAMkDWJWo2Fhe99azY7cfs34CMfewVcElkqpOCKIYyE2Bdw+8ZBvWyX8zjj+PzRRdc0A0TubL6sGwENcOPaqF0tE9qwCMMCctbzkzPLMBr4uskguWQ/HBZtE0V0UnGFUj+fXXb5xt0bj56eZZGKqF12ROVykdjBdDR6dvRIVjmnHFWGk2EY+Obikt341XsfffzhN8LIg3O5nDw9XN8b7bz9E2/+vb/3a9tbYVQi4qhZJaf+5u6WJr14dlpT+uynvvjs4dF6ubpIy529ndsv3/7Bhx/OIFX705zTuChmyzW4eRaL0f/h9x7Vy4YQDvZGgYzanNRQskNYLbpRIEt4+OgyQAmRpbW2zCvpDMfXbrxxiQ+oaFxws7YdDSeDURlGxRsfezm4lx5+7avHlxfIGhf41u23zk4eJs5f/trXDYSBnY0uLySuYT4TO6fRnvvCj755/+tffXZ8v82L+Gw9L4pqGB4/PwlzLbG6M6iefu9BWQaL1fkslYGBisUq5jybbO39yy9/7fBynbPfublza6dYLI5ee/XWw+Oj84dPuNFuvZkkZ33RuaRMjCBmQH08B3r1p4fJYR8mAzMk2my4YNr3bhNZv+qrmSoZmZn2OBAV6beNK49q32qJ0JOGtA+cgQEoqhpQ/zNfhJ03d31E7Gm7AGQGgLzp7ALsoUCwEQwMUDa+KOuVBTPtFSIAA9rYUq4Ix5stbOM/AlHqRxv9KATxSnrQXonQK5abgW2gy0zQa9+9AN43lRMAIzmXsu3dvbW3de3y8FHAbkfrP721fW1dt/cfj1Lj1itAKlxRZqeLszXIahji5G7e2vO3bhQk/msX1NUggGLkSMSAnahuDFRAvf/DyPUwaSTuJTbTjORhDUBtAYrNMis4yYEU/DLPZFpPh2WQN3+0Kfe6uBzf3d07eB3sWjGYZExf/85v/jf/7f/BmTcCUgR2pgKmL5ZrQ9f/hfWnhr5WggGAWDfOUFMw7im7/bcKfUiNrhbu3pKygRJCn1Tszwyae/kEFHrFR63P9PWCpm5spBtrsmzGWH3H6WZgAbB5Vq+w2b3tGa663uAKv3d1wHAM7DkEVwQO3heF88GDMZJTNUPMwRrOSrkjjmqA6FQBEbNFRUJAVUfgHSkpIiv1EWtzL85CjKYAUXurDhmSKFhkdJgjMBmTApiimgJkIFQTYAVQInF9NweYEqASAjIxIzO7UATVJmdjQL4y+0rOkiMjmQlRwewRCbIRkUNkVQfmiB2gI0CQJBnZg6ozKM07KiF2jhwygMY+JScqWXJ/chBQVWViyFqxG7pgMRtAVQxDBkDrUt0lAV+Uo9Fqueog7e0PfHCtdM1RI8SSxVdALjYyzyuoLxbjYvDO5z77s7/wCz/zc5+b7m+DSe5EEMbo3nn9zu3rf+750y/8o3/6Dx99+Jv3H341i8RhxEUqth0OuA25q1udXBbVsImzvS3aC1x68uyGYTyuXgrd5QcP7ntfTbb2q2rCyCl30qXVbH55MV/PF6t107bRsXeACsl5yAzOsQsOQBlNlQVEFZkIFAxVoyE4px6ymgppduiyIpsbODepULpTgdQ164vjy4vTVb3Mas6yhgBt3WpW9oisYeAimKJA3xUZSOsERZbgulYHxEwD5HWozExMExgZZARQzUjmPAGLC+wLDCU5D67AgtEBMlBGU5N+fQ7BFcAs/fJIZMRXUtFk2xU3Ru8/OvaAS+1yFBFe5+7Wtb2FNFEahuA9W0Hr1SU4Na/k5ODGZGdYCkM2AiTnCdWSZgNRg+ZiSUiT6SClZjQYFkWVsqJG71xOeH003Z4wWbFs2vW8U6EmiwTK/cSiM3U0bxYMFqqCmAZMI8f7RcV13p4MG9UWsoUgolmbRqNSmVIWACUumcDcDGMLsSQmzQ4xp7SOerleDUp3sDNWTw+Pas4OXYhOs8HssvUpIbns+fDByd394s6NUFD1re/H3alzeTmtwmCYBKHVBr1Dg2hK1Nx6p4yniS/51tb47HCxtzX2FX9wfJbBBYegqQjexKFoGXzX1qtUV1SMPEdwHbij5rJwUFSDlcHB/o2PvvXW7HTBDs+PHj08P1u6NDBfsJ/P2maVXp2mX/jkdb5z8K9PFu99/3x5WbMLq9PGLud7w+HnPvX2zrWbp6s5oDm06Xqyiu3F5YnEZB5qadZdy4hnZ7MmJWUoqBgXtEqSID07O94eut1BAQilrybD6mxZR9PLVd1qvFg1jdOmMY703UfvB5My+PkiZoLhaNhkmc0vJaXhqIAobMCI7boLAw/OP5l3pshdGgVfUV7HDpzrgYPBOVUtStd2SZOsBe8/PWegQFaAFszjwu2Ni6ZNREWDdN50htWqiR5tWLpJWXZZuyzB8brukgExX7Qrt4Dl2azt2nc+nv/iz/3s4cnT3/jnv/HkfLVuVdnQxBNc35rEzKt6vbtdvf/HpaLBCBkEyZqu9ayOSTQr+q4F7AKKBTN3hRdqs5Y+VM5SSFAkSCDAhWMmJdVgjhI6h4qUJA4K7xmCRwbXb6koOuaAGZSIkHLsUAEZOrRGgCWriXUZ2E0GU6JQKATklNuubpOnsQuDcaGM7BlV5ouaiLucWrNkeVS6nNGhE1WJaeKDqSUTZ6hdpymSMBp2qlZYUXJg9mI5QfDIJskyIUTNCmgEYOQDF94LyngQSM0UCmaLOUpCxDIUINlB6G0mpBglsXNZ+tmjiuXAFBgpqzE0JozIvbGbsCiYvZXjkqsBoMfE5AIWnbYtiFruGIwNFVCtr9XtW6v67d360BEAEHEPF+jNL4CUVQWyioj1hWiUkhGSEXRmphZjRkTJpmqBsecgAKKKMnMS6IG1zfmsurEXChDLqbHBMNRNU1+cnJ4cXyznkdVVoQCX6wYBHGFMGRIWjF2kaNxGbSUBAjB5ELaUujYMhpUZYW5Tp+CNQys6CL6qgAuFCJJjGzUwloGL4CwmNZ1WFCMSuqpkNEhGypjAdRkIXaDszUy0l/9EuRFpNXlHJpollb4MTClL5Ys2ZQNlh977QMRXfgrVHlCpf4SChN6YbS8gkfbCPNTDazZdgki0YVRvRnx9EhA3BOXNNd02BTEGm09LQV+Ezq4s5yCbU1FfJgGbsi3YwC4QoO8j6dtwFTE4V1HxV/7Wr3761XtnXaRRXM+7z37xJ7fRvV7OyvJyd2fyZ+796HAyqQghpx/92Z+MzfL4q1+6+K0v3wp+WDghxjCia7uDj31i++2Pl+MhqnMMpOo0Zcur9RxW5/Fr3+APH/os69lKWqje3sW373Jza3l2UR2ewepy2aZBVe5e21sdn+XVRZ6OdTSu1+fQnBXR9rcPbt67U188uWxCkQNo60gIsGRiTMLQSjIKqbai2HbVsBGZvPrJycc/u9rbyYyV8yyYjTPBzkderR99OZ3NRuPRzTde41EwIk0ETWzX62ZcDfb2furP/8r6/W8+Pz4+mBTnJ/Hf/Pf+yuTaS2E0Olkehmp459VX//Jf+09/53f+RXd5iLYuHEoWBVMgA0IGE0UAkI39eVNRvfmsqWeNEAH2dx+0rL3VABTRAKW3nMFVjsQs9yP4/qkSiIoXRkltt8DAMHJmJp1yTw4h628ettExbTPD7y8bG5QE9ZYBVLB+iN/rVn8kOF6pli96iADAIQH2pFW7cgz8sVEzwE/8qc8u5u1owIvZ4tGHh5fr9fHFrGnWnoDW4L0bOV8GFFBkCA5N1MR8AbnNdaMJPSKvmxSbLjgHlNCRR4zZCgsFw5PDo2vT8c6wfH65OERI67wzKS+X6w/ef3Z6NJ+MqslOUXlzgRW5lSSeLhq5WDZNkxdZMWDJdnhRxwih8GbofdFGVVRiHVfE1M5m69hlz6GNqSzyKwc708nouw8udGVlCJ2mk+cLred7w+F63kojeRXBsSBIzoFoVBRtFFLXXuKz1tUz9F29d6vYOthDqr35uuksK4YqrSKlvDMyd53Xc6qG5brN2AB639ZJssTLmkEX9YwH1XBaxeWcshVFcTpvcturApAZV123tzu+fWsyNYDWnPD+ZDIrF0TFeHvShcHtl1+Ds+fH9z8sUBfNfDja0jS9mF3OT+eWQ+iSo/b5429liATu5vWDo5YOXr7bHM//4A++Jcly01oKPgwGftzW3cXho5hFYyxd+P4Hz6WtR35asWyVYXb4pD4+NwRyULCmun39YEc7O7tYz2vfNOZCGI88QNaYpQxhOFhdpDbHLq+iD7vDgSdezGbb2zvTqmraWFSjSTFYnZ2jytb+wdnp4XR7/9ru6PXXxs/vv2ewmnmYXZyWjgpPwOGDRx/sTry0ohTXKb4y3qlbmOzcfPTe4flJos5Kle/81te9aafrg2u79Uw8FatWcDK8dm3YXSbrcHu4H0o+SavhDu9MxtXO7ffPnr5773Pf++0/0NaXYZDGVTkgyW1s7cmT+XK1DgMqElXjDdZaRZnQ1PrOMwQDdpvuKqIrgBj0/5/2vhIwU+0LJ3sRVq+iQWaqCmKbNV2tX/hB0WjzGipS36RuCNabicBQBV6EStX64sX++zPbXPh7mXdDJwND6TWmK9urAaraZqsSJWIzJewlgx7IbWC44e/0saUXPsh+JQFD23hb+mFFv/r17tYr4brfFAWNe0ixbDY17NvYN5slARMd7N985+Nf/PjHPzOINnv0wePf+ycfrbb3n56OXB6iCxiES6yqwXQ326XODEdvTG/dWw8juAFhBYqcI3XZgJlYs/TrM1JQy32rPDBnUOiNTuy0h/WAsQtGzhRBDRTBkqEHYcvqfC5YuZ3dG06Z87qYd0IXZ2fDj9yWcl1eK+e52d57YzS9vlxc5mx9MEYRDdm479YmIlTVvtoekcj61hEAUATqh3yEvPko8Sps1g8YVHv1vg81G/bVpT3fyTbxaETqBaAN2Lo/E6DqBoq3KZe/Mh7BZmightTPDFT7SLrSValnf7zYJJqvvGb9b12UHLzznkPBwVHwXJaFY3ZUIBW9opiyMGZkzSxZOMvKSNARonlFIHCgwAKUkYBQjfqkpXjCgGhZjZERzfmes0R9Uas5SITEvQ4nmql/r/rsZk/DkoiWjRmQ1ERUHCBZz1TyMQsSe++yUI/klGQeyaOaimRhx2IIiM4HRAuOHHgUQjYkUxQBvYp1d4ZIAZxgicLOmPrunk6TiGjKKQmagyTJwNCIAzNT5b2JBYcmKGrRVLqMjYFkX44zdsF5GdDe9uhgN97cuX33e3B2uXp4ckmOEFM3X75897aE7XK895d/9a+/++4rvsIYo/Psy2BRqqHXLu1OR1vTLT/9t/9/f7+uZxcfPvh+vVRwahddNaFiUtooNuuaSz8cLptWoj++vvvSZHJQr6Pz5cGtV1N0KcNovGOguV3HLMuzRVs3zXpdr+rUJE1qbAIGxNpz1tEgAxEwACFiVucIRB0aGYOpGidRFEg5owKSECJCMl1rmz1kYl23dV230gEqYzYw05hdgTmlvuEnRUJAAXHeEzrCQBriHLRAHPhElpMABGJ0LqECIfcgTwJSUUTxHl3BxEYFUSiUmJCcMRE6AAOIkgtPKTbqhuACqDh2aGnT/giQ06quF9en5WmyVd0Zp8HEHy8vqssTHolorDLUOXUZ08BwgK7l0S5eH+NqcXKxXMlSg+fVqmanu9NBrdogn61WAxe8UVGUnjwoYsbRcLC1VZ0+q0HxwFWKztewOxzQtn96eimiSa0onUBSAiqcCCag2OZb5eDOeLg/GQnELHwR43ldD8tqWDmO3Vrik9ZeqirWiGQFEgF0Yq1qI1YCOqKUsyAI+1btydHChbBeIQh1TVPuE6OUikMOK0XtbOA8IcfUGec0KCjAR1/afvL0IRtc36tWqUtsa/RNFgWrLzoQ9sOy7uJoOFjU+fnl3E932tV6HRclMwIxQ7ak7EhxUo0KpsE8Fc34k1/4sVuvHnzlq//4vUcPSuSf/cy7P/nxX5r99gfbuYFX9uAvfHFxt/o//R//N6vnq1tb2yPUbcWf//xPXGY6On1y//iDSekvnL937Waxe62cbtej7cvjM19xKBwaTnenUPG6uYySLhfrtl4gkAfizlLslA2TKLOrWLKeLlaPn54U17Ymu3uLy1Uzbx2iL8J525zPZlNHjWVgbKRLSM503ejBeDQpwng6vOi6k5N5J4ytyHqNnqcO33jpxvnFaqnQlJxbbepuqnpjWIzH/v7JKimBQJMzomygpWTmXMpShdA1KXi6s12NEBCs9E5St1UWA8VTJIchAaCBERNCalNBOGJqHJUhpCSz2Lpi9Oj57Oj0d78o6SNvHvwnf/Pf+a/+m/9e7NihMAdBvzXAi7NViJ01m2vPRipSyESk0AOuc+6gXVnbQrcmzMaiDs0hMHBniELWYgpiDDRCSlB3Mq6wLACyWaci4IJ4xjIUngxEfSeOiYhSNhVConFZiKSYIoLv2mweASwwQmmiiOiSksU8DDwqC87WAq0lzWKS0mKMKbNkMNGMpimGwDllU1h3ogSlC46p8MCkMXUaJZoimAdmpKw69kiBhiyFxySSCJTRsrAHUxMxZO+ZWBAVUXTkXWlQuqJJKZswmakDggzGxIQEhEkUHBu4OiYEZU+OsHQVM0IWn0OrglZkzQqbZHPKuSgDegZPjrwjRue1ZWIkSVpvQICqltWIEAgdGGHfwIkiqtofiNQjZpU+cyIqwoQqYCY9ZwWRX7SzGGQzNBQAImKy3DfQoRpTKEJWRWJXlVHNIbSXsyQSBgNPCHlRn8+evP9w1jSRTKuAHlVNUTUrmoVQSMqx7gw4S8b+nGq59OAZgwBhkFo9sICRAgduzcphJbHrwIaF7k99UyutY0EBwIXAFMgaQ9ZR5bJIyl1wBYBTBEnikT1jZxyYchRU1c5SFCUQg+AYiJxDEYhECu5sFcfDQdc1zheu9Np29sIgvgGJQi/NbH4Em2vBi5xR/+MNhHgT8wcCYAKmjevkqtSsPzP2Z3uAK1Bef+i2F61aBgAgogooummihT4rhNDDP+nK3tKbxDaBISQij538R3/9r9+9+9KM4HR9PpoWQ8l/5vOvz3ZmD373n8wvvm3wigNbxiVNh0MfRtpIfWH1fNfRNYZhUZ03bRpQcfvazpsvm+cyOFQUUSNTgC5ZYo/EWBajaVUsG/M+5U4unrk7N3HrztbL7x5Mq7Mffmu2Wqkfs7Ph4Ifd+mL69sfz5MCatj56FOcziwiLR9TJcDgOxtalEnKO2TtTpEVO5Rsf55s373/w3rAsns0WL3/kte0f+fR8Or1IEkybuvauHJXOdB3TabNqoY5+UrWr50WbO+SRFU6a0RiF5suzdfA6nIadWh5/77cPdvcnNw+2Dq7X9Vlz8fTlt94Iu9uf/9mfuHb3zre+9M8++Pq/ng7LR88ulilZB+zKTiMTKRGSimzcRPhCeQEjMCMQMCISAEOynhtiRkDSH9ypN+FvPGKba9vVfN1MDXgN5AgmwQqnGVU66Yxe2Megl3n6WbEaE27CI3DlMOivdpux8dXT+cdMAZubwBWEC8DoCmLdfzNIyPTiuQYA0JwHwwI8Vtvj1z72OgEcHR4vZicXZ2dlKcOt8taNvfGQV3U3n8994KzQtgnBXV6s54u0jiQRNWbvHTm3WLUGMBmG0lOWBMk6g6N5czHrtODnF6tRNSiD0yocnc6f17C1zh/bnpCu27pdt6kjW7bNWSPzLoOZH1AZiFujmNm4rTvvSBizsZh1besAEHNZhJIYiQLDoKLJtKzX7WwtBMVWsLppHjx4cm0w+sbh82s7OwW521N3umik4FUn7BjBvBnGBEKffPWdo5Pj+z+sv3X/7Is/8crebiW4IgZyhN6irpl9kpQom6eclMDdunX3g8MPqLL2crE7KGvJyUyRM3qqBpRssU4cSqeSmtzV6e6d6Zv3xgc7I2MOpidn3dmicf6YzUIoT1dw6+2333jlne89eGCLujHb25oOr919/9F3HFsxZvERknqC8WSyWLhrO1uHp7WG6vF7RxcfHnbLSx7w9PpUNDw+ne+ic6rdel74Ymu3Gg5utOuLRJV3w70teP7s6aquEWhv4vdvHjz8/vfZuXkh54cX6zVl48l0vK4XoyrEul22eava0nDdlfPQdePxaLleXWacDsc7NNkaj8ud7e8/u1g1sAXh5b2D88tYue0b18eDck9nz44/vH/44HmGerh3fXd67bI+Xi+lztlXVZT65v646brx/ktf/cMfUDGajoumycPt0fJi3Ua9Nrq9s00XR8+5GLauPjptwn4J1/Tw8tTHQXNkN7bC9u2d0s6r4Kxr5heH4+AefHgUwsQxdRF2dko1mc+0xunzo6O33z5wOT/67uPptWv9W8D9ct+Hp6zvNTcD3SRPrhByvTrcNw+CKSIooVqPKoPe5oIAJlf19apoIKKqCoQGIGqbtPhmM+gpxwq971SBersQGBKibiYKeAUqtk07J1yB0cj+eGW7qr3IoCkAmqoAolxdt/vBByAicQ+3xk2xO15x9PtFCIlJe0MKbFD/utnAjLDPoJlD0gzIaAIUCEwYs6EAEROCs0lRvX77lZ/5/C/uT192LXcX8+vju2+9/bPu6Ls+NrpqLXZIaFkpa2pWo+2qO/qBr2daH+699Wq9WuOjE/nutzmzYaGAah1Rj2Q2zZH6kktAVem/STQE003bGDuRBAr9RqtAKJBz433A4MgTSOe59D6oGy6d52vXrl+7lofbzkMo1+PcXK9GP/WLv/wb//zv63njgDIIMRn3f8ObwrKeJc2EqtBj/hVARTZQRCQD7s8aV5KcbuqyepBQn2c0QN0okQbUd96Z9egje/ELrzxBV8qlXf2JewDRBo5kgJteg55b1APFtS/G63Nn/SalZn/E19rsJMzsmJ1zZVn5UBTBB+c9lUjBFAyQKYIyaI5KHVEkEiBDRFZPapA9oxGZC4ZqIADq+h4dAEBUdGKYDJm0D9mjMaJHY8usaCD5KmOHWUVMgUgdMahnAc2QDZHEVAVLKksOIgkQmJx3niCo5c4yMAiaYUrasTokZ8LGwXFBNPAEJrI5CRJ3AKV3iAZAbAam5h0R5yyMufTkMCtkYxSxJFnUACyl3DedMGBBWABoC1B5M8mNnjXJZWdN58AVbvD06cM7t65PpyiYupOZtHDz+stv/dK7r3/0HQiV2fx7X/ntet4Mp1MpJzfe/tj1lycsCw9DAWByue0chlx3nkksgelO5d56/dWRfuri+WFX12pZW1vHTmLk2HH2qdXYtetm2Y2GS1vtyGprcj2YhnL34598ByIfHz9ZzB81i2Wbc1vXy3m9mjXNOmkyMtCsxMQMTgDNWMwMFMxYkQyJ0HqCGQCRmYgoQWIlBADirAqgzmGradbmITlIsVm0IBh8IUxqTiSaAGQ0kJyMHZEiCWgSLsEFrymbogE4wGQ5gyMBSUjoXEAQJWBANBNiQmMDgYLJI5KpQ8G0ST+aZskG2UxB+9JiyuJzYjaOOWcL5DcnoqiqJpZb1HRtr9DkCP3sUmenZ8OiCEPMmLqkq5S4LCVjTNmV9OzyvBo6MkLJYhC4ANKctU4wk5QTLmJGkMm42NmZdDESEoRy1uWlpb1yhAkx6rViHDwd3Ju8cbBzOp+fzJvzmBrTmLT0NAg0BhyV1UFV7vjQLLvSlRQGTFqCc+zNdG9nK7bd4/N64NKtyoNaBitcGHSiYj06ZlK6CFaUwbU5WUyoLOrJt43d2BqNxm3TxVzBje1wNtdZ0kwaKhOW2/vXvnFSP226xeNZGXa8umfHLaNDR8OtYclt1MTqoiufHreLZGOmssQGuun2MOXVjb2t9SJaJ1lV2C5j64jHeXUP+OD58me+8Ivv/Nxfz6H7N19743/3d351UfP8tx8e3LtV/eDXm/e+WW1JQdshvfk//4k//3//r//brZUhz177yS88vgi18hdvf2Lhv7Tzzvi1P/M/e/Rs5Zr0+4/PHtZpfny5PbCXr90oRwP1BblwfTI9ms84157w9HRxsLvHBZl0RpYJU87UKhaeq0IqrrYmZ2eL/f2tQeHSoq1bw6FA8MZcFDodVJeraMQcgiU3GG0NURlEchSkMCyDN2rXA+dL4stGDzs7a1rnba8qWbkjOJzN37qz+/Le1pPTGYcgzlarWIVggoFoUgVIUgbeDcV1726HQJLqpMtFV7GzLu142t+enHTds1m7NlgnGXqejH1B7mLdnawjihaI864eDaqOnOb05d/95oOHT//Uz/7E3/qr/8E//H//g288+r4andcZUgeAMUpI8CekosHIAZJ0CURzhtVSV0vS5FAF1ASMgw4nLkfNHWNH0BiW4DxhSRhsUCJ5URXPAyNGo6wkWUYlOmICZvAeCMSYOKEgYauK7Lsutwm6TDEBWCxKFAZhU8vEPsauSd3QDZEZNHaGVhQLlVWMLKRAXTYfHDtf+KKkGtRSli4qIRV+wGSQIoC1gG3XDUtPhB4wx1iULgStPKfcA3+g1SwAFbuAfVqaUdkbFN4V4AgRJEUFFYxZmQgYsmqKQuQIMSdISWMSdeyMCudUrPTBoe9PmaHg1K69B8lZwBJkUykAxHlynhGKQBgYlSOrspIN0XmwmTQtZSBTQ0sI5iiLcC80MAkymSliBnNIGdGADLEVdGgEAkim/WEJkQGSaFJ2qD0ehwgQuCAiympKBC644JwPVvlBwclRu1hS19KkrGNzdn5xWXeLWS2Ic1MrbDCuHHkOgcCZZUUDYBeGTUxVcN5jbnLhA5dABOtZFnHELolixYOKI4CZGiVgWUZZd7CVpQDbGXlAqtvUGOYsFKAIkFI2siIEyahKYi6nWnJn3CFxVGTCQVWtlp0IrjIoOYvQ26hNkaOiEQJHgNHWUMTK8Rir4e4wwNeeAICa0camvcmY9eKQwR/zdwDgVQdw3yJ7dQ/faEM9lgF75anvmlGDF0KCgRLp1bV+cyQzUIOsKNb/5P6r9LkBA0TqW07AiLhvOoar1L4nN9Hutd3x/q2bl3F5j1+aPT+Cojiena3T4smj++35xdY9nr5R4nTfeR+7usrr2fvfhg/eu1aQdtG8v14MVqEorl1z4xGFQDlZFBLtuk5zxNjyxTM5euyfH8OibZu2KLlg6y4u9Wu/r0BwcOvMWTs73967tvPyO+Xi/MOz09XRucgPXvrMjTTeHd3cTbNLd7aE3UF3dIQZVquFWV2x63JqQ1W98lJ5+zbe/kh1485rP/HzzexInj28f/hs6+FzvldOtiaDIhgm4CK0x+tvfmX1jT/cnS2n14vausN/+n/b/sYIp9Prt++y+aXn7emE2w6K6e76wpoGnj2ZTnwpD+ePn05vvjSdbleYfZFbkN3rWx//3Kcmw1UBq+0by/vn9to7P/qxtz/75S//f3/9t/6JQ5fMFIEIWAF7WBUhMWqWPoShBkQASEaa+7yIqkMUM+rpooAGmA1EYVNxBIZ9cZqpZFwAeoKhpwBQeJVkGczsavzb8+hfhMtgU8O2uTMA4IYPqhsPwYuncfPPK3lr4zHa2A435FLDPzFEBgCAsuLlKs/qNBoVLqC2aWc63N+7c/PONpUynA4dBtS4qOudtJdVV+tWs8Y2F5PhddWT4+7saDVvdd0CeMlM3lGnJlm84LJJ7J0QlAV/5BOvXiwu5xfry8t5Wq8HASzQou2Ozi/3xhBTWrUtBDs5nc3EU+EtJRGpu7YKnkqXW8xmGiGRSFJkDOjaJnuHDjk4btsMhrGVDx+fO7aD/b1szenzi+WymQ4Gi1UOVnnm21uVMu5uD49ivHZzR7qGM6lP6zajg0fP3i+QC54Uo6nUVdjTgvJw7KxwDQGPpsvTebuClblMgBxS7j54/L1o7Xg8unf73rMnl+WghIJaoPlFI02OHRZU7F4brk/qec4v3bt59+7OZKQltK3llJpq32/vD9TFi3o9LsKnPvH29s29H37nq0dPHpYcGH2zatUdXb8+nK/mW7e2jy7Ow1ZVR1uedW01OI55d393PNr/2m9+Q87nSAgVNl3sok12xpe5xg4k19ulHR6fbo87Z7xcNAu9uDjT2WxVVUXMcHG0vMyPx9ujtm2frBqsqsoN6rXLSr5yhup8URWTTuzw4jB0izev3yJpz934UVs+a/iV/b1VVy+a07o9urXzmqGGsX97/+3zZauxaU5ncb56cPHeaK+6ub8FasVed2t/57vfW15edMPt0St37z742iNLVX2+Qj8px5OikMuL9arppMPK+9Mn67OnsHd95+EPT33Bd25vd+Hix378zjd/73vldOuDJSkMPjxcGjGCKXRprdGUiz121bJpujrJ2WKxaIKnLhR+uLU13Pvwh9+DqlhuyjCBCQn7IsI+a2aGhuSuuEJXZVWIYKAmV8qsgm3k4F6uwSuPjhropiFM+4bSDRy7X+bJANEQadNfSptyBVABAQNQQiRV2MDUdCMZbUSrF3Q96JepTfdBzy3u/az9jBwJkcjghcgMm5WCej+TYm+a3AwlYJOAoo3nSDd5sg0GTaHnKgOaEjvadG8ye2y7OjhUBCMsfAhO79y6/VM/9osv7X+kaLDIQBI9k2vWub1AR84PBHOso4YQyiGQh2jpyeORKXTH68Mj/O7Xh5p1kSphFKTCW1ZUB0SUIwiiKVKv2ZFq3lRKAqopIhMxIDBRlsTgHbuYFYjRkzkC1Ga5KkdDAF53mocD/uI73WjoRttd1u0iyJNHF08e+q0bb27d++Z03+qjrNRIH/Y3IEOzqyAw9Ss8A242ByDE3rtMfTqv14w2aoz1Bi9AAqQeb0S4+XQNETYre2/+MoONIwmxhxJd+ZOuxJ1NwWX/RdTUNhAlgN5NJmY93NzwahvouTqwCd2D2tWcgpmJ0Hn2jrznEJxj7zgwOe9KABABJgYT0baQ5Nmr8+zMTFAyUUekTJB7AQ8R0aNmFODN01UIIhI6c4aJTZUSK24qPnp8FyFgRhMBVTE1NXbZnBCaZaTMhBkNDBOJ9L2+/VYLAqpI6Dy32bKpVKE1YrGM7JEUENEpB8AKDQmBQEX7slDqHybYnLRYMypo4V1w3qRVQADSnASyOZ+6tk0iOXsP5MliG8J4/8ZNn4ej1XG1PJXWIvh5YyApMJZFvbdbepzzPO7defnmzc82y+mbP/7zfm9UUGyWKyD5sT/3bns+X80uy62yXTb26EHrizTammxtr5fn1tSDwQSlkWokACkmH+uRtqV2r9+70zx9eF7rZaNFUSwuxa+6wbojr36am1FMi9lF0y5is5VXe8XWzcm9yeC6GwxIXTO/wLxM69gsYlyl3Kp0qknUQLJ4x0XhwIAdAQA7B+wkGTswMzLKaqYEpOglgxSEKGJqipY1K4iYkrZgpXSJIOcMOYrzoRy4rjVpFQi6bAZkBk4pA7AJF8aFthLZGSOBaTBnbZLUqTNi7HIXHBMoobAn1T4ChaaIHtAjEDrXf+SdQiMGCiCSDUWAMngwarSkJngmZVimus6bW7KY+u1ifngRiSmG5aUBKEFYrtb7o6EboJhH4NJyslSv2ihmhQFCNkxNZs3MbrQ9qoawni9VASI5c2bK3q0lXeTl9ni7rSWmDJ7nmD+yNxk2KeY8KAfrZum933PF9mhyrxy1JPOmWURVxrbVso5v7o9Gw2HdalkNuxiz1kUpZWq3J8Ou6ZiARoEQTps4yDZEyqkjBjWIUZNAMrUciTBnrUhPu/bGa9fnZ+1q3ap3N+9eK8vF/ccLcV7LODRAwQi0inm9Wg599fq14XtP5ysoW2GD7QLFOyiQlsfigGnM3nPXZY6RjQvnQuHr4wXJM9/GsRtiJ9W4sJyaTrYmwxKM33+y/+Y7/8Ff/RsTHacP/8f1Im2Nwp9OAzcdNk9Pvv53/vYnp+PpIHTry/Wv/bP1l379M1/8RFXdODqZN2/t/+h/+Fd//2g9yOH2cPt/8s7PHX74ne7/+ZWda3769kemP/LxZ03z/GC3PrmMftBlqxsAo5fuvHn28Ng1Z8z8kbs31ov1ohNA7ZJiNk/Y1tEDhjLElC8u18NyeHK6qJeNRpsvovcIwa1W6wCBcegtqxiJenIK0hrGzjKyMRGqI9oqqvFgUrfpwXndJUBFzEBA3lHsUgL3zYcnH71599bu1rPVOrVSFcXIB5R6GsJL21Xqsnbdjesjn8zqrouxCIUry8L7LmVHwtIWLt+4NnqwameqMVsn1Da5axPlJAlCEW7u7CeFVWwHBZn6r394/MHpr33+jbt/4Re+gL9d/+79sx75Yg6r3UkZ/iTWmhjUVNXWS8hLaGonrTo2ZuyiAgN6oBFBK6risiuYeg9OqNy6FnWUiLIg+EI4IwKoEiITJpFAJFGcJwZgAFQjVPYIRKEIrUQisqgK2GWLScBBlzWrMHkgaFDZMxisVi1KURJYl9RAkCZbW6t1jaY5iTPwjJakQnKoklPlnGdnlBRQXUG9X1pk4NgZoEKMkpIhmGRBAOpLPzZ7NeQcB74syLGhQ8wISFogmjljtJwR0LuQohaBHYERMiVTAyQPJIaUAEECO0NUzQU55yA7kNgJKBN2onVdV+UQUMwrMpLnAsqE1CYFISxKyInJKkJVy2QABkxEbFmQkAlZjBCISQHgyqnSexiiGDEiIhYOC8emnMhnUhE1RQFGA0YofEJEdq4qqCrQezceZUc5diY5ztcpp/WhdBIvF+vWuyKwZisNTcklVdXCESTLpp1a5ZkIPGLlCmwjB5ez1CtgpLYlMS0LEwCJUgxJTUCg62Kp4IBiLfMZI+j2flmU5EJuNRr3WW8mZ+t1XTjKmVa1JnWoiiIACpy9Y/KujimmBGBMyH09vCgBOu8KDqlVj64CnBR+d3t8OWuKUNy4ce3qkH111kbgjQkDEF90mxghUo8EfpE/uBKPNooSXNEaNvzrzQBw8x+DHrrZW5GybX4VqIn0F4ceBwGARlecbMIXeGMSUwbrMTlm6ME51SGZrk8sN5aTrpcD9AtJZ0dn4+VymsO0VX78iIZQtuewnNTHx/b8yD28v9N043JMgwpBOSVYz+vTc6xTa7Mhu7SuAxqrFbqC9VIPP1w+vD9oQlerC6M6dRihSUmXs+GA57PVtU/c9aPig3/11e4Hj3eubW8Ndsqb1bP3H5w/eljs7+D+NPJkwKOdu3e3f+TzIUcPfPrdH66OHg2Gk8Zjun5rfGO3zZia5410xHjv3U8e3NrT97+//O1vLJ4fAdCQbVGvclfTvCvVrZdNOw5S6Ngk3J+RPV1+59uaotudNjk5sXVyN1vYBtdlPRhsN9Pbu3u3/M6+C5Ugx3knoluBRtd3m8P9Zqm3701ufuzu+NabVtnnf+Knf/dLX0qaDDGDIDITiQFTDhUghGWKjIygm1sXASDpRmW0jRL44qO3DXFiExpA2ljNzABMlNfJPKFjHCiKmQkm2XSobdCzYITU46i1T7ph/5bbiyewV4k2jxC+6Djru3kAkeQqAukINx6lPyZWvpCLui6Fwm85TCkyczEpRtuFoGBXjAbOe4fGqsmKETXderWuAvsht741iM4hYORsDmCx7BIqoKIao5OYN066ZEXlfukn3xZnPruD8e6j+8drKFZ1rLtUlNTUFzGUYBDYrdtlyQUnGICLuUldM9qrppMwi3z5bB0cg/V9VMrIZXBm1nbQNJ13LkcrvBsM/HrVloEPD09AVBGXi/V2WW5tj07Olz84PB4HuHdnb6usdNa5cdjb2fvwW09aEQCrs+mqu7u33a6XB/tVc7xw29dDcG2qy71BazGxxJR94O0qTNh/8L1TcrI3nRptLZpmseiGHLpWvA/E1EgKjIVnjXnb88uvH4RyvLvtzDcDz1b4rgj7uyN5dgZBlsvljTen9izm4/uzi6dnX3s0NudH4enZbGtUrp4fr6Ntb43Ojs4gYrg2XNXrLs4Jq+3Rzudfe+dLv/57fHY+nRbELCCkupKmdGV7Prs8XAVh31Lb5su8mC/qNnXj8VZKYgCXy/WNUbG6XFUhTne9y2mwvdNBUfH26r2TWKfhUHd2RnEmFmPFRslyFyVGpQSsXZYoxdmyyc2synzvxnU5W2oQzcOceN506zq/NNx59MPLk2Ubxlvr8/rr93/7Cz/9Mc169/puVz9P7erZU0odtW103naG/Mq96cMHT3PushL54Dyv123d5Is1BnWx1WYvvfH6+P3f/+b2sBrthEffj4vzFVY2Gbm9rUBF8fjRxRDLXOfFuk6Sd3bcMq7GBU5H1dPLdSj84QePMOp4XO3cuPat33kOPVbXgIl7AQdQFTer9ZURdPNGb9xCZoom1pPOem4RGhiZgQrqJuipAGJoagymJojEG9MQIm16zzdTAgOCzY0fe3PKhpi2CaLCH+03oKCERHgFMO7pxtBTb/r1BxAACXuCn4Eiuo3vBoD61BtiT0JiJNEMgP3KA8S9BgaqzGRG1CeiAOwK02MAfYYNHRZ++PLLP3Lrrc8MS3x0+GC+Or21t12l7rPvvnOrGlHXpjaCESOFGEN9aTkaYN20xo4HQ2+ZoYjRjAEz5CjQzQpzbmkZogtTUVNjMiHHoACmBADE2GcFsxDiBkRu/YCMrFeNjBDAUVAidaQmWBauHDRNBEtGbINCqgrefMNeeYW2dwfk02ptXcRQ6Gq5X40ipJu7ez/3Z/+tf/gP/rvjp+foGVDUhIgtp56AjqZoCKLIrFnBkPvA8lXa10wBCWzzLAAoIG8MQX24DGwDTNmQqAF0IyXp5jdCMN08mP3usuEg9h/ZVVcfQC9YqompbtQhBBPoHW1X6qBdocw3gKReMwIAMSGHzqN3TGAOmYAQXOCS0feoLiBmFkfKKBuUOIOIgSbnhFiQiAyUemamoRHjRiFVQxMgBEYTA0Ni9j3VMaltOmbFDE0lo6mIGiCIghmxQzBEp4KAgExdlqw5oWNE56l/YxGdMTORigiyZQMGU1IQsBRImYg2PKf+sBxN1SEAWDIlBM+ewDNTlkRICkBISTqHYICuHKxnyzabCEoibTOw/5Gf+nd+5i/9Z9Phzrj1Z3/vv7j8rb879aaj4tDc+JW7z88+qJsnw2p/6/bd1z/xc27nszt3fpSHo6bLLmh3cYnW4HLF65WedahODOuvf7v07432p3tvfO7BD97bv/cGPnj/8GvfJV7vffKL00989uzhD9r7Dxa/+c8mVfdz7755z/MfHj3+g+cXaJ4wVFxim4NkRApo7HPE04WDYug7hOBoMqwgUeMpuCLQwFtia9iidz5aKwoiWQWjZInRh1CWHrsciuB9cEQExIEZPYESYkSligKDNwsxSUrZTGPGwhshFwWEKmdlDOA0FB7BIHZoxuZVYkzdlWcTDEwMvCMjImZ2FYGXbAkYpfQeUm6gRO8JMXtPzOSdM0FVExMGQzYiIzCnGVUVVMBaFcZAlFNcQ06xjWhubtLm1rHvVCWDCPdvwflp1EuXlnyxbljz0JUg6rw5i6522bgYbPmCG9OVdIOhy00qtuCGK0spL57P3aAYT4tOMiIqscvqDJwDzFqZieR4No+dobliWIjyooVkHqWjjIvL5XhAqY0ELlCpuRswVwM3LQUcrSoFim22ro5DP8gZW6GWoBxujwsxwWoU2hTREQYX27SKqSAXiKJ2yGAELahn36gUgQLL3ZGbVANd1B7s2rSoSSdDOj1uHAy1Gj0+n1dlKaqelIEGgy3L+fqY68moERxMt8+OVqETMhkVFoqAHi4u17hqB+PhoPADc6t112Lcm06GWYlsxyGhHT1+vrszrFZ0s6A9oF/5t/7aW3/pb69/+MPHX/vq9emUBg7I3h3fuDy9WBbtq+98cnH2cH02B4mDamtwkp7991+9Tltvf/pTy9cn8997+GpapeftP/v29/7tv/G/vF2+9OCf/w+8t7xZ5kd+J+68siwq2sEmGqsISFHRZLr9yc994bvf/r3L0+MR2nBQxuXZiEnZ1FFPozDFtMwXq+apzm5ed8JYSyoK4BKCc20SzSAqy3YJZg4wrlpG9dujrcm0Zrt4vvZctE2DagGLZp0u1nUXc+F54CAE9gbLJotqEULwvO6Wt65N27ZeA7PoBORgMpgybhutlMtiUMZoGT1CGYIyUQFtjuMBD3yIAICsbDIIadE2ouumdQaahTOAYwekKYHZTLQDhnaJ4lTTb/3eyfe//7V33r777/3U5/67f/yvLfCadTAs89r+hFSkZrHL3TKvL1mbQjpDUTATUw7kmMFLm2Q8diHA4jKrAHoiKtU8erFIolgEluCUqOuSKTpEZBwwGViUnMWGnnNqmdkZYMZkmT0WY0DUNmmTeo6jB6AupazEZmp6sWpgLMNBGCS1JAOVYlDEtjUk1+ZtdU3XdGLoApYeyApC1IxmiByYaskEOCj8IBQSJeaGEFSsSYKMoXBZEhoEAALwgGqQU3YMBeIgUMmQulhHSDkz5KryiCaClowAickTFxwQcGtSoGrsOsspxgyFBwImC6FYrFviMHCYsxAOG50hNEiAjmbLpirjqGLIAspAwsgJyIcyZYCydEhtszKJPjC0AqhgGRgQSSUTGJAZoiHw5khBBIqAWQHYdSbEWOxMyr1JQUlXHcQsMdar1iPEbKKQ1Dh42h7juOLR0IpgwTMg1o5znRe5XjepS0LgyZvjuhNVmFSDZczQCLERWWKjgsMYfaGk5DKmtoNWJFNOoArgjQEVKKqix3JMvgKP0M0TZil88CnkThRdKxbPpRy220PUmJpIFqGJ1H9fLoiagdPUKRGzuOA5kyUTB4ygzlPSnFJmdaCWszJTYIySEG1Qhmt747hayrqzNk2mu5/42FsA/7g/NW9Iv70w1N+2rxJnANb33fLmTt4Tw8E29TK6+dEm4GNiYP1haHMWv5IN+vndFXm0d5lceYnsxa3/xcmvr0VjJCVEM0V2RsCUco1Qffqjb75U6en5g9vdx9JyOTYZ7gZ3cX751e+2j7/zUkyF6XqxUO2w+jBb5mXDi7ZSK6fDJtNwNMjrOq2WW+Xo4v77ebso37yXBwM0cyaVyPzpD+F8KWfzLRiOCC+aOiZBpq7Lo8Gwi2mZi/Xrrz+b3KqOzrZv2+Wyq6NHtqbUN37p89YW3/vK9z/50ZfnfhQGW7AzvDQuM7gU6+Fg8tbbneNcci0FKalIOdwCHYlAE4vls9R85XdpDSNxeR3brEwWvFVFsXaw87mPwe5uJkdPvulmF5gw1bmgkTyOgQ2QIIt3VCjVOaaL42s/PtTBoG8udM5l4AL92cOH8fR8bMPcbRWDUbl1Z7i3/e1nj1/be+Vnf+Znf/s3f20uyOzM2MAcpGuVv7E/jKCPTuLJQhg9o4KxpCukyabEbjPotR4//8fzAnDFkNjEEQhBm4xItoXoHZY9XWIzSkZD1M0j1UcT+t/6CkMCmyaWPjUCV9a2K1fbRtHUq7q9/kJABIgogAgmL+DZV4rmeFyeHM9EpSwCARAFDsys5hDEIDnkgh0OC1RbOKW1rMRSEXhvdwJmw5B3PLx2a/Dd7z5/voiEHIhQoRwMdvcnz89nVeVv7ZdRl+u1hjLEVoelnwyK84uIYte3qrKQ+bolorbtnOcuNiLSda3ltiwxSXd50VobgikqZhUHGLwzpiYCIJTBMWJKGqoCEZokW5OKVFKy1KFH2KlGhUrKOu8ElKIxnMYur05Stkt5NV2TUEAn40FoO43q6li//fJ0a4ePj1ff+12Zt5mCHNwL05cm47t+UmpdLy7nq/p04UFDUWB2GcJwUHnQPEhU52WtHqzQcG1/Wlbu4MYOoQRfJKP55WmlqRB/fH58UQK6LcSmWS0vTxY3difZ5PEPDrtOiqJctW2qG93xO/du/vDhB1hWeweT8+eXyy4DeDAcMX3k7u7ipP1//F/+roNBcFXdRPZ2bX+Uc5wMBzthsFifjaGYTreaWA8HA3S47UZFtXu67NarTiC+cnv/9PElGzVrfnKxPjgYTarxk+dn093BICTSZAqLRYtN18TlTrF3vRxSGZx3q3V7Z/8mObcW551PRbFcrVPGQmHiygIwSxJMw+nw/uGD89zkweCsTiT5YP/uN3/wdH8S5rPLoY9vffLtH/zhKRgXVfjoGze/9o3vHj2+xAjX9/fPl5fvvvQSZPn2g6PCh/GIRmQ716tnjx6eP/P797bOjtZn6/uvvXzrENNMwJAPTxspuyi4tz0sx9vLp6eTwk8r6M4WO9v7p2frYelAdGviL8nqOj16cLg5GPUMBYKeAaxqSGSEsLF29JZd0CvlRQGS9kt+X4ikiKC9viO9hwgY0AxEeg3nKuOMZkT6wgbIgApGBqpyVcZpYH2nGm+2oyuRx0hNzZCIVI0QeqJ2z765as5CVYUrlcus70JH3dCurccpE6OZ8Qa1g5sIG6Jo38nYB7uIgZEUyByzZSNGUUDkPhonAH5Qfe5H/8LH3vgshOBKvn7j3np14ebn11i3lwKXR76oHOjWeLs9/DBcnNnJpdPYWeaDW/jKp+DwOH/rNwI4TyOGgHllOTEFVABfEBfqGIEtsaFsAMySAAKIIKpqJmbre8ZMN/XzyL3uJtnQGBglhIYrvTF2422b7CYlV7n6/EJdx7uD6rMfzcW4OZ1jvQ4FwWxuCX1M1Z2D87OLnWuvfuLeW1/72g8X898TJF034IoCDDGI4brrfCC0DAQ5aa/vb3BCfUIQrljOsOEZAaiZAIAqEPfyPkAfYTYAAEaGzb3ZXmiOvfa3mRn8UckFAPYGtT5wSD11cZMy3Gw/ulGU7Eo27GWmKyMSIL9IPzMBSmZjR+gIPbOjImAoKDBWCIxo2XK26FA8iWNv5BIlYRXLgALQG+6QAEStt0JLFkBnQAZ9VShsZEdDIuxlSUekqgbZjEQ2HjcDMCNUoqzQdSAdqFOHZqqcGRkNOxBEciZkiphVo3feABiiE0xixkHYA3nelIIKEhJ60I121neuAfWAQkyWFTCbOcc9DkIBQjnIbetdkbMOuEykouJ8uQD77J//93/l3/+bw3KXsvgOX7r7+YPdb7rT+xeXDfvRS2/++MHP/MpXHj/4kV/+qcnk3v7u3fOns/N1uz0dEdj89FSe/pAX53c9nfzLryCG4d7N6uA1eTzLTx/CwOm3nxSrs+7m9fHajQ5PL8+fP/vWk8vf+8ry8sFktPVxwsPLdixn8LjdKvcnlRwFPerariFyZVAEzdFyGLGSdRerJR3u3yxkddqNG1Zatkc60HI8GAHkrKYgWLuGY9IYVUXVzBGJgaiGsiAx1czc1+K5SMDBj0YeKoaCQSWoFlWOOXY5B/UdO++d8z5wiQlUMjopRgUxYGoTJMxNMiNLpqqYTRXJAROGgfdDdp6pBHUOESQ4I80GFiQqYwSvyTSUhZBjw6AqOZplQkQVUmMQRPMAhp1qAmIR0VxbMmeWxCJKl7tgmjOgguTcvwUBy9XaaQoVOALjzDmqgIynI2MXFTAJUGeAzvmIFlOsSi/om3WzPS2JdFAWeR0lGecCbF0Gq5MmBI0ymA7OmnmR07VRCSTr+bwAzrLuYtdEi2Q7xZ5hm2LyzINh2WQTE2+SOynM+UHQ3HoOk0E1bzsblOBpxRxBKHbaeM/Ourg1LtES1LJad5WvLpdrQPSaAkBAp0Tn6xhKItCKAhC1uRGTQVXNzyPQZJUaRnLAbUoppsLzeGskhpeprRerUYFVyteHePd1Nyp3zpdtbRqjJCQvHKgcIkwqzmtNg2B7Ls/SFg0nBW0Xw0984Se3fuTt9dnzw3/yldHJbLyy4clpPvzG5W6MX7h2dn13fdY9+af/4g6626+8+mxUPBvZ6O7t4x+0E7+NCxcAM7Txk6/wZHDynYe73/jOjm/b4d7b19+9/+S5fuNr213Tvnd58fQrHRLu33j106/Qy29+WLd1udvJquvaZ6d1KEdvfvJHgKy7OIX1evf6xdPjZ0dtc7joKu+GFYvpZOjXdTpcr88etRBYQLOaNtEkEgfpqM+sq1pMUpEfOeIYQdqnz4+qorg2HdVzcI7rOi7mjYVQEYHmPrK6NxmnJFHUxJT4eZPSyXI6KkKbxugnoneHFai1jXQtDAfOgwpI6V0U8ZVDBxUUknNABIFsQGQ7FZofcJueXCzXkaZl4QsTQK8AKus2EWIE67KEoiI1Mbp/qR/+xntvvzT7c598G4L8oz/4Vk61g/AnpCJJahlXC801OQMmDQESmgHFCOTQjGKGNrN3Vo0R0PVlibFWERh578CyatasjFowqwOAThOR5dx5Qs+uMWMHBdnY+ZhTRlIGcGBeISboxBLGOiMFNKN+cyXqstYpb4+KCZR1t1YPXLAjNuTYdayAQMQMzFl0Uo26LjLDuuuyUWJkhyUHRSqyT1h0mpPGZJp7Jl8yZgcMBEbSH+2sKDwAFI7RJOVcTAarxXrVxsK7LiobcAiT0WA8mUxG0+vXbzJhMRr6suq6TEQSJYnWbfv0wfO6mbXtgiq/XKwVS1+NytFU2KfVSdM1BaMLbtWsh+swrirvBwwAIAO07CCUPkJlzjtCLiNpqsqgSdquiYrZwDkfvCe94iWbsCMkJEU01SiKzpDFO9qd8q1rYcBOIddrbbt8Nkv1qjlZ52xJxLOWxMgeOKArgMhitihx3barpmk7MQLPTU4qwh6NeQUxAghgVZInyKAYiAqELJrEZdCUQjVYL7oQ2BkbqisZRJUwaZ4EHqAks3WnXl3qwEwxMJKPq65ZS8ypYh/YX6xqppBUDKHwhCRUYkCWCjVSFnFIxtalRFkAzTtmY4fKBF7QTAcVDyowo5J96ULbrQeDsss42d6+sb21uipIphehnU0fCfIfk4pw0xDzokt4c3aBDWISDa23dVy5/kGhDw/0VwPt/d49ZHRzAXhh6+jPaj2/DDZcmd4eAldtKWS92wgI0rqWP/PLv/jv/vW/ORTh2TNc3pcf/uv9JpWrdoTz0Xp243wuaZ7RmrrbrUpZtNwg5g5Vy+AgmwAUo+oiZRQLHDRZaNbwje/m88e4txf27qxaWXQRa4HTFdUWo8xjFkAlLSehsGJUFLRy3e7w7s//0ll7uX/91mWMO29du/2JT52dPVkePoNb150ff/4jn+pOHw/C9OZLt9cimZyvBhJTMRznepmbevtgt2sUUjImkaxJifzI4XB7ZzbYlsUa60xIfjSUQsVDJvIv3S2/+OOwvSfzy/WzbzhpCh64osBMksQhmoeyJBIRTXE1d0cPy/bouZuCI0uxDEOHiO161Hx48uHXr9377N13PtOwiyUnkHv37toZ3Nu/88Mh5mU2xCFo0vwX/5P/8DMf+eINHv3n/8Wvvn3rzsG1rS9//3enW8iQK1/UOfVTYTKgvp+YNqmQPne2MRKBQZ8JQOovaWgIAjljDTAMVJKRAzNLGbL2zfYIKHjFRYfNvWDja6ONjglXeNle57wCimxCatADLQj7+jPo8y49OctePI8AAHBydipqajYcTouqanOKknJMxOh9UXCIBqJiZgSZIBPmmPOwrJwvkBmNL5HqxfLa/ta8my1WOee4vzUiQFuv90q3NfVvf+TmPMC3PzirnC8psJuUBAUvJxCdpdjmBBYKQnJCFKkDJ01MkoUMt8d7s4vVvCFXlSqmEbMYADjC4NEAIBuBeYcEKp0AUeqiWtoeDSxQ16TRoNouw/PLhfcIxi64dZuSCRpSdifPlh7l5s4gAZ92cwVadVJHO/r+82YlZREjh8nW1sXj5jvfenDwxvCNN3a7uN7aqsYH/kS65TkcL+uiGAdPDuNwEN6+u5+icwrBBR/QQAajYGizxUpId/cn5TCtmvO0Pp+Eba/Yki5WC3Fw//j51nqobVDlQVHqIvpRoModpcZPBqB+tm4bgrXGxeHpEGF3a/v9Pzh8/HyR1/zKmzdiW5+f151A2yo4qwIsZqfeqCBcr1tf2NtvHPzgh4/OV8nZVhToDAajAfvi7ss3T0/P1i1U49F5atfz5iNv3bk8vhwMQQhXrc3Wedt7T7y3N2kXa1HNUt66ec+nvO8sLs8ax9t7g7OTVUbb25+Mq4lYePb0FAIxX8yPH4s1Di1pvvbK6w+evbe7PSzLmAq9sxeW7cViNWvWZVEOnh2e+6LIXV6ukgyrUTly63hxthwmnRTdz/z0u/d/+LCJ65s3r63X8ezb9TTsW0y0X+y+VGw7t1qls3kTQT716Y9efPjh/DLduHb94flRM8vT0bSLWWLuEHJeluF6peXFaddYe7UXMPUvcF8cgSz9awcbpHMGEEBBUBU1FYDcs4X71HFv6ukzy0i9ExDVUIH1jxtNezOpIfMfvbP9EgGIRKDa8/6wv8VaTxW0TfMamqkB9HJyn3Iz2rzRmzIz2OTEXlhWjACBerFAr9LSG8lAN4JT39vwIrUGgEpATGRkXIB6t+pyMfBOxJuoqAh6x+Dwxo3d23d3hRZRYNtNOM1tdjqM8cYQ9ymGeiHPn7rUkeTReuFmi27e+eEgOue27ha3f0Ti97L8C9DOFQX4kblSYnRVgeRUVQCQSHNGx+QKFUYEcgYuQRZIHeWuh/CY9AFh6Ivh0Pr6dgBFZd9u3Sx/+c+utgbJgQ6m4+kEyfhsRrPHmuPhUrY8jLqlzp9Pr+3gy3dn3358/vgZHj7cu3Orfv+rmmD84MmBGyzQdq8NRoOpS06kamhUO704fbqaPx1UGkrJdU3AXWyIKgecxZDMVDZNaACmRpvi+L7UzDah9ys1sP8oe3QVKVIPPe9BUZt5gPaJMdp0IWzW9P6AoRtS1R/ZW5FYTU2u1EN8MWrojzM9M3vjKspdxgCSTdTAUBQYEUMwQwRkI2eIgBnUY6hYC/QOnShEMHPoCElVkAlAzBBZRcDIDE1R+smFUp+49BB6s7aQqCZCJIXcK00vBM7+b0gEsa+zEyQyMjBRIyIUVUQGRskJ0JAMkczQITGwByo8C6ICZYRe/O1/kgI4BADlnhUGpiZJxAGgKSNgjyNElySqWWQOzoV6XSXVtETL9clqNN4e3Xrz5/6nfwOk8LhmaIalhLvb9WQnPfj+fo7F/OTy136D//Qvv/un/nI+ePvhbFGf4rXxaDIWlcuuWwRd77z90cXT89l3vjYelt3Rscb1qqrKdz8CKOFyVj96WmiEBR0etYgql8vL7mS0fOwHxWrcdKv5qAU6e/7JncHZcv3Jg1vfOT4+G1fvr9OjVRNXuCqoS1yv0I/TznUicFRwuV1fLv/g4vLp06MfYHUTyylrOcE9xJDTcQwupowdaq+Fq4JlTQpiLFAUJTlQEeNQTatQhfHAF4GYMeVIgJI6Qc9kBKhAw8I5hA1RiH1yKtgygncFOsDSyKGC5twxO6zYj0e+qKrByPMAAEBIRYkJkhIiihRSIFlWYVcQCyJxKAIh5o5RYhIDIGQEUkMiVMuqSUREyZJhf6RCTgZo6lnNMgJalhw3AbSm7TSZiGDBKcqgYPOcJI8m06Tiy4EWIaYkCoH9slsVgVQkCdR13C9GYIDmkqbpcGKWF/V6jaklWWeLqFlzCMMUQ464VboG8rQk8hpzE8rCwM6bCzGPCk0bGQnJVVwMWEPhl5IbCGe1ecyY5i5UFHzAcuD8Ip8zknFpBqtYE6NXyCikuV4uagSRNGD0SXqgHTlUxKRYx2RidUzJ1Ls0HVenl/HsdFFOyt2hORAGcsCUxEQtaQepLHzJPD9biEEcrcrCuaK89sZL88vluo7bk8KWq/nT7nw9+j//vb8b9rrf/cf/7MH/67fq+2c/8Vf+jZ/+W3/txHLTXL70H/3KH/yv//OT3z364Pe+Pm9mL//qX3tAW3CyKA7PD6pBzFHLsLuzffrsuD6aj4eBELt6Tgjr4bD8wuefLmYfufnJZ//1/xVx1XU0KJ5VR1OTTsAh7fhIVdMMjx+tnz08mn7pZN+dvf2FtjxghK2trfXZiffDIrA/GLKlPccHl8frbn56dhmXs2fPnmWEKGsqXYR8ul4HHeYkFeGgqtbdGjkXzjEwoxORHCWKdU5PVsuzti4rLyiVyZtv3bv/4dNF0vNVOxqWoDRAoNIncou6ExVSbAWGQ++J2kaHwBPFl6ajIVibUhIrvN/dCQGk8MRVYZJD4Yz66LgEx4QcWEsK2TS3aeLCS2MeMh/Wuq47ASu9n7iwalZVcETUxVSURWZb1VEyjMoKFb//aP7+o7OPv37zV//iv/sP/+H/8Oh8+SekosVlalbQ1chGBlKMcFDSWiwb+pFjDrHLqqoZhA08O89RSYFTLZIVHTKDGDiAKB1mrXAIgDmnBgwdCViH6DyiSjBVTQiKZGIoimJcePPsUpua6OYtYwTHLklGglA47CQuBZOPCVMExzAqx4hVXtdAUo2qtmkNQUBbyUaQUBsWIMhmU++8GTFLliZ3rYnzTnLqxDQbgJXAoBkQCnZNl0gJA3tAc8yV986dXCzaKNFhJwgZtwblzXu3P/qJtwejrTIE58qU1Q+KZh1L9kVRSnJ11/n5JYbJ4ZMHpz84Wi4vzbBtOzufD6dt6R2OhhwgxdzGqE2qywDGMvYWu5EvNamKZJWcDImKQUUaLCdt1BNkCU1Xk9NEWJLzjk36oaMaY1Yl7OurLUfthyMhkJ+UMBwYUbm3HZsuDIb18XFcpgwpJkgZcBlH7Msimih6Sm23Oj7tmlUXu0jQAQKAVL4YA1bwJMVW0Q2K6Aw5W1b1Rg4oY25iE5EMfckdJrcL3jFHJiHveRWTiG5VYeSkm69XuYCuQoRMOVpKTIDohmyNmrjVShyTmW9F1gkqF1pUW7eTYVVuOc7CgDZEU73scuFcSagpl4UHs+xc03XD0u2WZZOFFH3wwZcZdL5sUV0WOdid3Di4tZpfbl4GQr5CNTD1GHG4cmP0ru/+gNYTZ/rmqd6ygYB21ZC8mc/1rv/+5rBpurcX+YWrweLV+e0KIYP4R0mgq+xZbzJCM9sAKS3LL//cz/2n/6u/tYxqDry/JtY9/J0v7zZx8eRwqQlVWWw0ZEMZV5xyh7FT03HhjDhLdsOqKAeSciExdXXTpOBtMi6TJHt6Fg/nTfU8pewIiLkMnkuXLbcUIzseFu14TMu4bpduEKSrF+99s7wx1MmU33l1euPOubV+d+/m9t6gdCcns+Vqbaft4YPvffCvVh/7qZ+tbt1F56MI03B7Z+v84XfzoyeYnSsqHu07dmhRk1h9UsQHBx97DT9aSM7rxWo1rwmTSdy5e2vymZ+4LMuUIpT46rufWfzBb8U6SZLI4oYwyzGwU4xFhsrBcKf0q8v4r//J7sdb2n89o7O4dfHw23L0NVk8H+NWqIbDW9e4KNpVk8+eTsfDi+7sMz/+7vtfL9wheuQx5Nff/cSb73yiGw4fN7NP/9k//6nP/4W90e6X3vvWf/m//48nDClpNsq6sf4bbLgePXJexV64jHq3WG9g6yuMGEEMOzEAYMbSQYGmAB1AAygbiemqB42wVy37Xr4+odBbAK6AFS/4WaSb24gSECME7qkVPei+L/UDuYo8vnjszs7OB0i712947w1FJfdlM8SUciKgJCCSUlq39UW7XqbYFOXQA0inhpRzQjegkA/uXu/K8YcfHEPX7WyPX33l4Bs//P7N3dHr965/8OHjJyuoZfDq3e2iLFyqY7v227zPpaj0pgA/4LxszxbtosvmDMgSh4J8XfO6dutlJtKckguuKBlFRxVeG5fDQDvTUewaPxp1IssmRuBEqDFHBInJGFrAk0U3mzcByQU3GAzOji+6rOa58JxiJKdLcJNBKJUXSdQNv/7ebMhUgpu3q8//5CesKtJ8tbcaXr6ffv/Ro1B2r737Ui4wp2I6oYM7r3Bi1HZr5JqTeRHjmGW0vVWvO0PnfcXqutiOyjEGUGwuzs+iLXbu7b13fNE8aaLEvdvFtOTTI654/+nhfDDyj46eOwp3p1vL5enpw+X+1s5oVK1m8+VlaqMNxlONcnxuTx/PC+8HYz4+O7PcESEXVEx4OqkSyPHz+cCNRNYLw9Tq7IOT8WC/ZD04OIjder28RE0XFzNVUIRrt28dHZ60yovH3TCdx9hermPO0CRUii6Utw9uWCAbDUceRU2sI9KL5Ww0DM/PltVosGxcCaNq+7WyGtUpW9heLi8Xzz9YL2Ztm9ZRt/f59Ml8whOSi+PZ8WR3en6+vH+6bGToitARl/vl9njr9HC1yPjWvVeDdKGVNq0GngpvX/3yd7Ym5dNH55PdG8dLCtWkzTWDPbl4PqiKgEMxBzIsyT2+f8hii+WyTZl8SZAXa2EzIK6d3n5n5/lsNT+xrnUUNpNkZt5c100BQEENoS+36pVYNVMxBRNVMUtm0juGVB1THwzq94c/FkolBO1jRKqgfddEL94iYG9J7BEpxNbzBYF6RJ0iAiFsWtIQkXp+IUJvgzDAHi+tdpUzhU17myFekewVkMg2v9KICK/iTf1wrsc06ua/rtJwoKYGZMRSTXbf+fhPvfGJHy2x/ta3fueD7/5hbmbYkgUyVg58Z//a2ONkiK2mxemH2NU3KtvhtLU+5vOlX7VwcllmxnUNMZtI5UM8vyjK0k6e1B/+bnz4YBKCrRM0nbQnIJmDT5rJOfBBukzguCxRBBEImYiAEciBAyhHkCLkFrIgJkAFUVNDYlAxQnSODNWMOOmIdQdyF7E5D/Ekzp/Hs3OOabB/Y7x1vWizPXmwevB8fbBaFM8EPRzs7kxHhczXP/jhgKuXLVg5XQ6qz//kj0/HL+ESj46Wg3v3FrQ6fPrk61/50ry53+iyKFFaKT2nrKKb+DAhickLH4+ZMdKVnk8Gan37Gff/unEf9SbjTVOlaq8o2QttqUcYmQEgUW9h3pTabgw5vQEOEPtsPBgSXS39tkkobnjYileuIkcOkQFJFJE8GAuwZBPKjB0iaAZEJEOn5tS8AmeADNrbpJxlw6wEwIakRmSE2n/fLIJMjoxVEYHYgA0JKIEYM5gaqTMnisAKwCqQVQFAWA3U9XlKyQLAJsQgks2M0aPBRkbyaESiiqQE6JgBqEMgdigC7JgKAkfUU8E6RDNLACn1faUAXacevfPgDEUEFZ2iM8WmLtJq3+VRCCeHi/FgfGNnspqt7o2249efLmx2tvz+3Tv7oZrUl6fhxh35ZuUlbTMsHh52v/Vru0Ma375148aePH4avv/t2R/+uuKiuvsGu23cO4DzZ+gX1bW9o3/1e2XJ89lq/KOfH1L+/1P138G6pdl5H7bWesNOXz753NS3c5jpnoyZAQFwAAIQAwgw01DZpFQUBUpy0RRZtEtWWeXwh2xTtqk/ZIaiKamKJmmAosEAEQaIAQYYYBJmenqmc/fN95745W+nN6zlP/Z3GkLfqtO3q2+dc+7td/de77Oe5/fQel1dzM2kHzHR+9nxS7dOH9j6yfngqeF41Hv38WL32RcWHz60q4U03k/XSWluLuBjWPwBk57yfFXk315ePqj1QsVSx+XCqWF1Ua/urN+/eDS7++57pCHJL3N7OBy9nKa9cW+Q9obTy8HJk1PXXgiDtIGDeAYBMAp9EJWwsawKUv0EilynFjAE7yUwMIgiHyVEBlEAypBWrBUIMLJHQc2BFYAmMIlyTNFHbZPU6CgRtFZZbvK+1UlhC2KlEJljBCcxONcACEjUyBAFDelEJRZNRPTBpkoQ2thCdEgalQYAiexiEHSBUQIoYIpRAUjUHDqtUydCKSqO4FgoS7unYNLLNKmqDY/WtWe8LMEorVDm09VoYpQl4NCzNiUjzu8l1k7M8nyxl2LZy1TE86ptS7c7Hs6mmxHpQZ4BUm0ga3mv3wsV91XveHi8Xjwe91ICSbSASEQlzMixZ5ONN1W7IkAi1CARVJKoKG1KVLoWU1LGemUQjAusqeUqpGCNlpaxDJUPoV65nsKe1mjl4WwWe30gqnxQUfsYRFHUQAjeYBlj2wSO4ljqql1Um3LdkEkZEi1hkOpN7QMo3zBytEZprRm4Cdi0VJZxOmswNmzs3bdP0zQdZUV9RoPC3Eh7tw6OzfoSHL/6/Avm5v0Xf+JnPveXfvqkrhsOpHF5dnHthdvyoLn+1LXL02XxuL6xu4d3Tqp//ZXgGqphc7/UbXZD98rTGR8MQIXgIrT1sG17H97158sP73/F2lalfZPk7dvv4vvv7BRxfvEI81FVxxZxMChkM38ui/jo1E3/uT+85iWF/i7L6NGmtYrGo2GIftTP08EhhUExOGJXvfDyaxcXF1W52ayXo/HofDabNeF80UQfyxAiSmgDJKRAgvgQ2AeJ7IZ5olNe1957YFCna74/2+wkyTAvDiZRkUw0XBv3L6abRhdr1yZZQlF2EkMac9Iq+gnh7rBnhIFFSGc5pSqkNmoAhehDmSWGSHXhAxKvTNIAhcCKRSJnqWWAPPpbg2TUg6Wzj5au8XDSeACjUxPqypDJ0LQcRcRarayIoETaBPqtdy6/fe+fXR/3/ujP/PBX3/z7vycV1WV0FQGRzUgT6hQ8hW6y6E8SYrO8DNJKU3KRgUl0YBFkiYKAhqKEzv4rqmViVgwKPQoqgSCiUDPGKI4ZDQihOAgqQa1U4EA2Y4gkkGrL6KyhxJNQJOHtTciHANEZnyWmccYHaLwoCBw3mbISMYooRZvgyCilkT03oa1iVGCBMCLUwVtUnr2TKCpGteXQOhEjpFF1r0dQJkk1BwHUzqGxWc1JY7QaJbFqUpsM095kOOop/ezztwZp4oMIxdJtoufQtkLKx4Ah+BZZY+PrTcurjTQMPrPEHKsIyuLOU0fPf/F73/p1A/dS3azB5dpuKI9J/6wNJvq0rvMk72WJTjIXlSjlW9eW6/GgL4lLGExiqvPWxcBRotIxitJaWxU7Iy9y4MgKgAg4smclSjk0AY1WSBaVItCSNt5ahxRIeYwaoalbxZxGDwitd1Xb1lXVBgcKs1RrpRyK7ulkLN7WEyurWioKqIwyicQK2kiBlWgORjMhiRGoY5Xn1ippXKDAjCIsClEjoQdfWVfpXOsisWWsvCIvEbbufvBNXAcACllhRbxhZmFAHRzVOsTWGwN53wSJLlDfWrcJEkkhinCMQQEYjT602msNRBAXy9nOcFcZPUizzOa1g1HRk2azWl5uByME1TkvqBvNqDP2yHba7wqOu4/S7Xj5I7awSDfY8VXsp6OSAm9/ze+hj7Y29KvUz0f/ArYX9qusEHWtxritTgEkAiJotY3pX/z3/qprKh9DrhOFyiWTo6dfqb7xbxPwBhG1AeLSR0CEGI0iJEFmdo1oQ4Oey0b55Fa+mrazB1on5qBQAmRIoSJRoRLtWysCJCbNQiAszCZUl2ThxsF+kdnStdNT72L/Rn+XrH/3vloN5DoNRrvVxplBTjGaGKEJu6M+9wcc3PrBw82iufO17+y+Ijs3d5F5c366Wl66+28VzA8/uJft7pvbnz/+gVfJL7BaTx+9k7TRjq6pg5uYJ4A8IOxLffrmG/0bh04UBKIAXDG2mVIjZZdaMXsQltwMamGjjatqHxkNZ4GTD+6Z5T/u7Q5Ds0HTt9PNer3CPEuO02wwvrdYtXlBVUwuz/279wabU1mGTw9weMbsSNXlT/7Ax/wwv6zXsVndfuXFoue8nj1/e/c//Wt/4+/913+r3yU2EISRBJkQkBjYb6EP6moYlyiCihRenRkhBungo4Gh8qIQLUGikEQicxORAQmFrjIhV6oOQmcQ2J4ZlI5EQdQxOmRrZxMEUAj2SkHqlrTc8SgE6KoK5yP6Vr9fjPJxMRybXlHXq9Z7ROAYg0RAaGsXghCG1i1Xi3nThuBC7qMYX/mGdQqkV+t62Cs4RjWPg0EmLV27tvfh/ceVcLq799Xv30GxoHq9nkpVVFK+d/eOypLxOB2lyfn5ykduGBvfNJUra9e6yBhZXJr0qqpdzBoX0GjDUaw1pIhDvD1KnjssJj2DAHXjYdALhKnBrJdXPm4qZpP6tmGDnXbb+NbmybqOrg0tlFQo7UFrTBAJVZapyCHE5iCzBcZEokewGlMTBgNScuZLfbS/c2N0/Su/+brqmeDpja8/vvH8QU76xm5vN7eXd5+cT+dtYbmNrYqQJJsYdJYXOxNUCTO4EDkIGV1V1WzmWokXzp1fhsKbi+XiuYMby/NK++HpeaWtRuWOnxojy/nm0dHuXsPkSr9YVEAwzBO/9jZwPzcXZ5uO9aE0FKnUDab9fN54p4xjYsQi2xlk/QhMDkHp0kdTxL1xAqvTxfncGO3BD/J8U8Y1Rxdcf5TIyhPx+cUqku+PevXCp46torKu61qnYOrS7+z2bJqul9VwmDa6baN/7sVn1n597fqhobRy1cbVTWgIE0G3LqsszcqSSZHn8PD+yd4kyTRxsAfZjbMHb/Vc73hnd3G2rNy0R/Zytu4nSe/WvtvMlQvn0/VsutQWRdQgT7nBo6Pxk7qcR5hk6ZdeffV3vvblvYMkNendt5s2ALRorVJD02zme+PB/l4+Y9esm7OTxd5ubnfUym8m1zN8ZM8a33pvzEdBYdlq+yIAEEEiS8froe5qzSK8LQHrgjwsHIUJoAt8dWAxvIKLdcLRdpsAV12F2w94pctQxxrs0NGdDnDlSgXEzvEDV1f6jsAjW7KNoICgcBRGoS1LaPt9CiBtkWVdvvoqt8pbtWlrXP3o+4nCiEJbdzeDEiIB4hefefrP/LE/FRtFsRy99KmP7+9+/fXfaRflk9nl808/f3zr6QQjtLNm3nIM13dHroT9/s5uQuk7c/d4qhulajaguUVkLQASwSoFhp88eFe9/InNM7f6+7b51ltJvQHfcgjIAcjGEFTWN2lGps9AoAEQiVJAACXgAwBKiKgziBmwSFujeOTQlb2B3/5hQVdUWq7l3h0FuybX1eNLX238+X2pm7qqd4zKJK4/vItnj1XDddixH/90hRrKtp2fycmjyX6yqcLnX/n4DzzzkhzfRg6ulJiWdizattf78Xm923v/8ElTfODnZVwvz5/0YkUSOUQgjduWr27dJFsbcef/gc563AUCQWIHP99WrcYuqXY1K+DWvNwF7DogUbdcQpCtkLRtXUX8aITo8pEC3H1ivDqY2xcL87Yz4SPXWxDwDEYUM8YAFGkrJkIbvUYCMCIIQhKBHYhXEpQC3fUFeo6gMIhIDEgGmCkwgWIWBUCCEJlRdcFGJYiCCKAAgDQQRwBE0EgcOQoIESkdgCUKKhVBRCRGRuwGsaiUISAUREYEjUCERshECJqIJFpMAASBFWmFUSuDDEZpEWYQgCCROTiMzjkJzjOjtmnESM5ZUixBGUyNqheb3YPhCzd/6Ac/88MZ7Dz57jcWD16Pjx6dh0eff+2pF1+dPHj7vVk9TersyQfvHr/wueyHxjFW4VtfM4+e7Pg2OT89/Yf/oPryb0+efaF967314hI3mzjyvT/7dC+n6s1fak7ft7v7rAa3X7t1743vPXvz1Xvf+T7cv/R37ue7o8WTM9tfueAfnr4ngMM0hcvN/HKZt7xZvk0iN1955uTyoSposVj3rcqqGtRmf0jpJPmxW698e1p+9ezBCfE0gpuu7j549N79GuoIHqV2tHrSG29cqvaKm8bs5XiY9Q9E9X0bReYSGRUAo0KlFKEGNaBkR6tCqxzBRBcaBgm+tsCglFLoA0cfUClUVhCbyFpYo2KEGCJETokCxECiEpNhz3GMIRqljEkRdEZ5QkazQSBmVgzEwCCayMWWJEbv2YmyQF3Itx9JWKMRUCCJSAxBongClCiEqvYRRHfObg4szMBKAVrkDEUJawQgBC0foeLJwSBPdsY6QlhRXNaxamPfWo4GwJTrMhpJhxNoOTR+bPJUlNVWt2FS6PPzRbmoUVufpb0ss6KgXgGFYWoNQOGYGFMJ5BfDQSoYKJWyXIbWEhhr0qbeeI8UHXK02lrUbSzrwAFw5YMEL0lmE610ispoBlAQfAOgPMcKWiZs6kqjDNJMS1RWUmWKSq0hBkHSynnHipTBNkQvwVGMHBOrnA/KiUJsV04JJMihqtdOyjIorZAgsSDIpBBi1ITMbCgMUxzkSROAdVJXwVUxcpPlVs1n/WJvsTy/ZpaLe+8ejvo3fvi13c984fsX5+PBYK/gi9/4rbu/+usToX5v6A3iIGu+92auFd89wbqViE2iw+5w8NTu5vRBRrFZLBEJldW7g7SXrafwXu+5yR/51OC7v978+rfGB/0brzy/+vDD2eVl/6nDFSduUwZFSnDoU5m2R1EPXtpxe+ZiOvfr6ozhsDgI7SacPk6LbLOsRoPc5kql6UY0at47MuXqEqSdnl1AlANrn33p2cdPzpeVc4przwoZEGPglpmx+/OUy2XdKDycDFoHm0VtVOpJfChzhT5EMratLWLOrc8IdvK8h3a3n1zOVnHjUwUjTUZCojSjpEarVBsCDp7IkNKJVYAYgQVRozKUa60ItQJk5jZ4F7w2SaoVMwxA5UabnlQRVj5sWn2xaX0VekShKbVVlpAQXO27q2mSmLaJLOrB6fyXfvWb29tx97fIQBYFMEnIagItLiqJYBNjYiy0BaPWTRRU7FlnSkEkxLp2EggASaMi7WPACBYzBZCgYWHPESMigyaQEMWL0kprZOYA2EAkBA5OIwYQRCajLCnwHD0Bk7WERmphITXzfqJj3jfzTVsyiBdg0JqNMj5G4pgpQhAj3Apw0IYBRVjTqqrGvX7UMTpWCYEDHwO3QYv0jVYIUrdG28gQGjYK0l5/t5iMB+MbT982qYkUh/00zVJjs3JWQYjT6do1sHEh0dK2rfPIrjmdXaajnouhbevW+b2nbx0cHF673v/CF1775jePfuutN6aPHlobbj/3/I/8uz97/darn/niq//jP/p7y4ePtKWnX/3MyPsny1VVjL74o5/Rofz6V796TKFwERtXeU76vcIqB1FZvd7UIqG/ky/WEjmKQkTFEQB0lKCNJZYQ2giijTGIggETg8bUdZvSDmirjUJmo8UYAqtICCWGCOKDFpm2rYD4GFyMDJGsEYVJYrRRBK4ysvAckLTBoQZuosqS6B2RJgXCFLyqm+g0CnKksLuX6QAqQvTRB3Q+RkCbmMqr6TLEMgkN9PvioCVSSpNhBmHnnEb0UVxAJmDtx2PlNo5bqWoZDgot0bWNRGmCz7RSgrZL/LNFUG3wLgYXnTJoC2sSvVm5LEkno36WpmUVi7xYTcvrN2/ePt43zfr05KR7Cowm3dXdXKk2sWOTAnaN6VegaviI6tllFLqwwZVLCPkKRrkd5X7P4Q8f/XUFOIbfCwV03VaC3VAkwoTYdaIwYYjsmyAQU9Av7h/PH76T0DDJLSzLTcnOt2rch36OrfMuEkB0KBENCDB7Qc6UsgjBm14m+9cne7d5dr5eztAOuZ/gYISh5TI0qxVpDVmujIbQ9DQk1qY7R6fVVF97+vnPfj5JipNf+f/gxeWk12MgJHU02XlwssKsV2PdGx7ZXCHmvlpAU3FiaJAuytVwMnr1j/5hv6rXizo72mvapbIWjDJJQZI109nxzsH8fBpmv1W6mSkoneTj3WcJEzUaqNQwh5YBCbzOdl/89Nm9d/qjZb5/qzFmoMzqN35FzVcWJUGVBvFt7SEMdC4SjNWXTS3jXTvAtm3LJzN/etJDiGFtIe3pIoLEy8frs2/q5z8dld4dD+nJ6vzON+x8vnhr8xLpG8PebCNqlCaGN8yDfi8mxXzZBqHlao3GXr/92k/+2T//q//Dv+JVuW2MjdE7z0JoOhj2VuMJ5LUye7t7ZVOGZg0OQKmP4l/dbt8ztEG6eL5RkGqKVyXYsD1IVyazTkyErTloe7SuMOpXl5Crq8IWlYXCjITcpSUJZNt3+z89mJBkg93rN32EJkQRpVUSuBXmwIBEztfsHYOr6kXj6yjKR5iuXaLJ9nrjvb2qqvvaOm6XVb13ongQrAABAABJREFUYy/Rcnbn/OL08tbxXvlBs7msQ5uULaUj+9zzTyVuNj2dLRdNaPFkVd3oZTF40lSyXFauqVqKHW/MW600KofEErM08T56FwwZTYo49DSlRjvPk9EwLeyyaYzBxJC09dhYCM3JfK4zU6R51ldluRmNk4fnKyYYjnuYNb2dDEFIpJ43KjJZqKq4amKMBtpABBngKDOa3MdfPF7Pn7z86nP33n9w+WSFEsmDqxFGOun1np7s3fvGN9+8rNpVcIDOu+O94vhaf7g3Cak1WaI1CmDaTzOrfBNcaKE/nDe9ctFOH18cDsjyZneUuXdnss6J2xzh6U9fNyZunLfK7JiJUebx+WkdAhqwRiHAzm4yHPeVW82g7e9ku/v99XS6uz9KCAeHkwePl7X37y5OXz48VqLdsjLav3zjhZO7d8Th5cP5SnhvXGgyZVn39goffdJTjNrq6Ovac51kOTKlxo5HabNwCakBKRRUteQJWE3StpUTiGbdxCTt7Y57J8uL51+4WW4w0Yk4qVqf9P3Z/cfV2TovaH8vV6qtOZoc2s1GWSarr+2/8Prv3rErTAvTLE77Rj+ePl6eFLSWFoMl2k/zhx+8XVhz4/rO0odN45IWi4xqF/aHdLg3fjSrv/ze1/u7GRFW85VxlGX90WgwXWwCw2i0c7ncFLRsQVaLxa0bE9OPVVt9Ymdw+eF8GJIioSDJcM/Ch6vtk8DdlV4+euy21WYMANARh4Q7cgxv2eogIhBBGNBohSwMgIiMwh2JEfD32PNAMTCpbWAZGFHBtrGrkwuIOj13e/8FQGAF2/TpVdn5VdgNQISBY2cUgi7yA8Tb8E6H+O1agQBQ1EfrjKtorGz70rrQnaBwBEZQolAkiFKopOhZacrUjixlrt7bs4Oez0WK4vD60Y3DrFdMH92bnr5LUvayNJe4NxrIaukvL+TJY1NxrFmxYhcANUOLLNEHTQBBpVmajfps62Lni7UzzTe+akNI08Q1nizavABthQU8K5UwKUj7YK0wI3sgBkUYBUIAXwMD6ExCg6EB1yADaIWkJDJAQNKJ5/Jb34qno+zZm4hJ1j9oTmYq3z14+cbq4sScPwzvvtezSpJi8Ownqt6+NgGo5O8/bh891KN0vWpGz9zWO3tlhg4H2agXmnrXplxfLh/eTaZnn9PL1bD/pRc+/s2z+19HrJaPed10uKLILMzQ+bw6mhKAAETGzsJMW9l/myBUCACdvwxYISIxM25zU8ASO4vodmEgAgSy5VMTEQBvU1ty1aoWAVEIADrQ9VUnZpd/B+60w23+DIxSCAoZ2MfQeopOkCMER0qzEDFzFOQQIHhiTwRaAWHoCsYjA5MVAUHeutMgdOQuHQVBUDrliVgTkCIUICQGYlDSxSyJUJEynZrGGCJGp5VC4ggEoCF6YQEWRmZk6hZ7AgoVEmqhwGKUSYhAukozY4kESUswZIkSRRZEsQ8ighCDa6Nzvo7BcxfSFgHSOipjNTByy+HwlRd+5s/93I2Dzwz6Q4l8+EM/ThLV8vL8wzfTTC1pufuJTx186jOobPby2mqFEbLDgl595uzv/Pe9xwtauwQI3727eud90zgcGTPJ2sIszx9lt8Y43kfWenRUn8z8+cqm5v3vfHf3Y19s1jO/su0oAcvz+XQw7nHTsA/tYsUXRBA5UuVdsjeYpXBxdrqqmt1bO361KSNVNfdVQrNNtonPgB2MJsmt/a+dvPekbKSaPmrdyYWJHARZG27Cerr+TnKjPhwlZHYF7fXrtxIDp48enj18FGNkL0oR9bUeKnWooAeog6ga2hC9AFL0bQk+cESliXqWMmCtlFLMkaNnFzUGxsgRfGRRAhREmBSlqQHq2vYILbFKbGKQFSkU9t4BoXAACCFU4isAJhEFpKKKJUSjIbNWeY3go1cxqtBtcFGEMTBzMAyRQwwxEFJEFMURE01aRHMkQZCoQEhCdF1AHyZFnz1UFYz7ezdvmLfuXjZVtIyLy02+7NldE4Q9sJdGRFwJBQ0z6SH4uq56RT+rY+nipikH+zt+7fI8uaw2JtK+ttpBmqY6sQFimujpdGHATiRL2qgkMXrimIMHI24nST07q5GICERpHUhF0XnW3x31y8AQ2FcNI2sE1Mo1zUo8oMqSVBMLMIaw2lRGaCdVWYCzGjaMfasdt5m1ZPV5WfrIEphZxAfwXmnZG9LDh66vwZOqA01XTgMPU5P0KbOm3njUgMwGqHWusMYosL1ivomZUuNE7Q1sPZ/+J3/7fzfce3paJd/9nS/vrmd3Hi9uPPXcOMeD4wk8PJ//8q/G73zYv7Rmpz/5zAvpJ/b2+0+d/4tvjM9PNnceWlZOsPzYM6Of+3PlJNW/+Tr8v/9/sKqiC7Y/QNDtdJFa9fE/+1ObXX/4Mx/f9P/+6vX7utibufu3P/25EurB6BXz5MF69ah5cjqIZhEjfPoHr/3cv58Mkk/f+/b7v/mL7Wn51tn9HjduvlqScsJzo0yRHj7zVJaPHQTbKyTR6aZyjarLZnpRnUxPgmtNltlEr8u5aNWG4FwgRYnVlWNppJeaug7zOSsM/cIAyvl64aO/cXRDe1mU9fnaoY856d1+79qomC0Wd+8/SjAt0Oz2bErR+JCkqRdfR1YQLGKhEwWgSUUBhVogBAQyFsQLAxGnSpygI9NtlQipCjE4z4AZ+0luWqZTV/ZyMxXTOq4ZsVGJ4p7C1GBkiBJYK8ooIBDSbP77A2hRwCYqS5W1SblpkJWPKgAqxlGWQcvIAAwWSQHGOgoEUYqYIhBp07ggSKAttN4QaGEMPusV2HILXbkTAyoNAqK8h0jQYMyKRNiH2CqtAwMLE4nEMMhsw4CeCCIIE8TEWIskAMbqvSE2lQ++E9QUIKYmESJpS0JpG+8YjbLIEBgEKCh0GIwSSMlaYiLnOUk1uBh9VEIcYqoxCk52xoc74xvPPnt4fD1L8sgkANpAYrAt69XFenU5vZzPV7Uv1+Wzx0e3n7p1eH28mrpH9x7YyD2Fo/3rgOrgxm7QdP1gPzQAbK996Q//0E/8kYuTB9cn7f3vvFe4Mz1993MvXMv+2E9/98sfzNbxL/yFvz57/1/90//u79fQPvuZz2uii+HRg1/7ZzcGe/08HydycVn7Yd579uXvvXM5eWaArnRvfiXtR+VCVTXaaKM1orcGfGwMJUyEuqt2UIgRC9xI2yNp6irJBFCJq3xZtlVLRsUIkcQHjm3wESyhRAcoiUEUpYwJCE4QYkw0Nm0gZcAQkgEubQSpGlaU6yToWG1chIQFQuREK2FHzKESEuondtFGFLJWDRJbO1yumpSVVtitdBauymxfgYrRR2FAhanCgCjioiSYDgg2TYhA5ESxKhtRZEQgMUlVunUDHDUhjYq8qVcaUUiBMDiOErVRCnyRZC42wrha1Pvjvb3hSAtvqgpgGzr4yBoC2MGkgRE4Xt0Ounl+qxZ1v1Ao/j4m8HbG2zIot/7w/2m856PhbHvj72IJ28K6bRQNt4aR7hMKYvCBBweHr3zpJ7Gpf/KVm7tN7S7uPGlMPur1e5Pg9LrcFLoXn/pYlXwYzy7y4LW4XCO1zACoSEAMQKKpNxiZ/dtPLi+MUvHGCyHb591hcXikLh7g41O8f4eCixyir0aFTqCVdbtaTRurR7dvyWx2evc7e6oo5WIRWiDgxk8fPHTU6xub9jKlBFBicCZLOLpyswZfam0jYoVAo2zn2k4kMkFJBHucFbBzuTinzbpAiCAYN/tttazqiOP+rU9v2prTFEh8bEJ00blGqX5/tPv0M/7uO5ZncX6+OX3SC3M0WgtxGxRGCb4NAY24xtMwufYjXzq9/bl1QzQ/03df1/P3cDWXdYgRWCOY0Mznvvid7Hjg24I3IXn8er98YgW0C0WfBkO3O5aL0GzW83Z2mU1Q++Tm/u56MbNZalSomvL26HAEim1Sl1Gn5JJwa2ev3x+9/sb7eWIDoBFPHP/y3/hL+d6zg8FYZPl/+s/+140XLwYQoggA4VUKMUR2AEahQtSEljjGrlybPsJdyTbAKLA1GXRGBiAQFNj6j7Z4dAQRBowgxEBb7jYACFztnj/iqG+lIpVGFwgBY9AgHkECQ4AgoQ0lx00IdVOVITSublyIEiUdHyWDw95gXG6WfrUWCLPZlCHRQfl1rBzt749PV1Xrw5PLlZfY28lHhZX1+fv37lWlywZ21jZZkrjoJfo8typy3VQhNhOrKIALWNWx9I0gIzOBWAKwKIREuD/u742yxGhMbQ3aR6cHxvYSwOjXvrCD2XSjVEpgkKFp2tZx264x9/2eeu7FPlilrddamJTEYZGqwP70ZLN+wnGTxCAF2Ou7Y6vivI5vvndeKH36lbttC6GJAGRYQeR8POoP9s7O6tlJ7cjHnppuEMCsGvrc8aHjNlV505bSNmnej5JCFG6j25QmS87eu5xOV72xfmYkVVvvj29LHP7GvTteeTugw4PDJ4uq7rELLrHBRa8G5uBoxNHPp8vVak1RuxClDSKqanzZNDYfVpCJFSnbG/sHUacfPjFpftQ289JtTpcuzcSHXFEYjRMXwkyocq7Isp4ZDQ/sG3ff2j2Y3H3wYaoy6OlHi82tvYHGqpfao/3R45NNG4KEMOzbRV0N+kVQeHGxLnqTp/Z2dkhFpujXsoEspkqr87P5nUcffuzT1x5/+OatnZsS200Vbt0a2lw/uXvy1A8cLJr1o7OLzXSNUsTR+PjVT95/+/UNu8ntpxylvRHNNn5S7LYn5zf7fQ/VweHorcsz7vX6x6NQXT64O9/bH+z0G7VZk3Klb4b7B4eHyQTtyZmbXW5q54ziW4d7yrRZqvfz3klkNLoR2awbdF4b3du30u8vH69HN/rwOwBbA4d0ReUhRLgShDx3ICHZFu+JxLiNFuGVf/QqXgrblwaCCETh7hEnpK7wEjrgEINghA7e0WWNtjllvCLO81UyGQGvGrK6Zxi7kAdJ6O7i3UKchblLPSN16bYrGYGu3CUMrESkK3Ckq9at7qWzzcN1USmODEQAICSo5OGjtzare+PdZxUZbH19OjdNeOnlZ/Mb1zyAa9vj3clx8eJ09iDP8yI2I6nTOF2+97qGvO4d6Vzr2cK6BQYPElGY0LCPJGo03yz+7n/VG+PaJDyPudfsY0sMJhGbBm1IpyAYPZG2ONxxWaGMFe8VRxKG0P3fVCBkEBlDgLaGVgFqCAFUFERBRi8QQCP3lmvr1nh6kdo06LTvm2jtlBtz67YPm555ERezHM38q1+xk8Sv3i92b7jzBbZNdbHKs8SdfCeaWTw4Sm6+Ska5chnZ9xLOcqreuffcqA3abppl7+i544Nbv/HGV97+3utWIYJIDILY5fsQCKQrwgMC4MhKqU7E6foOELv3AhERM4MgEF35RQMIkiB0uUgQUld9BgDUNfkidl113bERuQqcXemGAN2mAK4GErni3W0PGDFAFPGCBiVwlBjARyURFKBWWvsALNBE5VvkoCRuvUII4FthJm4DoBgkAsYIzFqQGKXjCgVgpZGAYnSiFSJqVBEJAQl0ojQAsUgkjcBETAQJG44U2FN3/2dpQ4gxAmoE1oTAZFGlyioiRQa0jhA5cFcw7CKgShUlNrAlg6C7mDaKMIdYV+y9q11diWBnJqy1Itc2Nk2V0n5Z/eAf/9M/+if+0qA4jstAGisG0eQDJP1J/rHPGanmZ2dmUPgoNst0pkLV1hLa3ev2h/eztEi/8uXwld8eS/CJmL2dchlnmfK39+W5g+S1T5fpoe5/Aki5GHsnl4v2wh/0J5/7vP3EF+Jyjqt5BmH1vbf5/Xezo75/sJ69ebdpQz/vC0OQJivI17P5RdmUEUgv6xpijFEwS2xmDROJaTewY5JeTT9187OrVB29sveV9+/8v77xtbirKM+AgwCWm3J+dmcIJs+eMb1DZWFM+2gUgJ9eXPimTfvKjig7SLAPDC7EoDgaH0IVGgc+OjKdzS3aZBhJG03iPftWMLax9hw8o1LWinFAIspoQ0iGVABCtBQR0VhlMBAScohKIvnWubW4JrIDiBqUC1ERbvO21vhWt5XVohV7qwwAMKDnEACRkX1UMRpGCeCjBA4hqkQbI0AYUUIkjEjMTIxRQKvtTDQocm6gdu0ywGLeDpI0Fx8CrsFXVRlLTPOk2myid8BWic4BjncnvPHTCjC398qGwZcRL6tqJNBPkgEPHGMUPjjcXV7OfIgtqXXpB/mAq3rUS4n0RoWNrCCBvrUaYdN6QWpESGmTqE2IdQRtktW81o1TpAprU2uX5coo5WLrfRNAmCHLjDHST3rrxUIwQY3DXjpwAJX2rR+mAUFvyuC9GOY8S8/dpvWh9RxFNVV8+/6Ma2kEKhUbH60iFWPl/N1p20t1z8JORiwQPYKhWthXNUfbONpnHM2qdF395f/9/yo/em2x4mePhqdR2pPNNRmZi1B/8/v5i9dPf+Vr+q13VUyTnd3DH/vc/Z6ZHU8CmPT5I//o3dH+RDd6FuLNP/XT5/tPn6/nw+svqYPvq837+TAR9u3l3FTrD3/x5yd/5t8VluXFNG5O91+6Nhscm+SLl/MHGUcop0bbPrEsp0mWDI7Gyx/7dx7V15M77/5wbrPD8UMRVmAwQ2QRPj6+Hl1UWZIWfSGlgerNZjOvcp0GG2eXFWuDmUoGCQdaN02/yObrkgAVUAjgXDBAWaJyoVEvd96BCrHlIs0Gg2EEriufREhFhco9vbdT1lVT1u8slgCcmaxnkjSKeIcKLRnnQsSYawWkTZcrQY4dKBqCSCCjAQExAel6j2KiwXkVAD1jBFGojUYmYNC1C7UHRXovM4HhQVvXgiKqr2z0zqDsD4oQ9bLyZRMZZZKaGMPvk4qKIlNWc4yMkmTWOW49I+ler+dDcFXtHBAjAAopQHFRQFMQUYkCRSaxWarJatNqX7Y2ocSoSDVh4MieABEVieMgCixoR7oiQZFCkwUBjGgUkRKU6INWopQoAU3KCyALoUFUjZME0aJVhivPdesrsEbLoG815YHQx0aY29AmWgXxhB0qkhv2wUejLbStAczzTAWxvaw/HI8ODnf3dousrxLbL2zPWpVmylpCVdWxrfnsycV8dnn28NF8Pq9Xy6yXWEWqbouD/aduTGyWVcEdfuL2pw4/oxS1S0cmoVTzuirvnS3Pp2eX0xsvXy+O93OTDk77l08WO3s7tz/38pvf+G29XKZt9fmPPf3Lv/wPbx4ubt3AR2+89d/8rb/2f/y//a3X6rb+1jqfOXKLeDQcD/OzR6d3H//uT/+FP7mKk/2bN9/7V5Pv/bf/3c2eTTKqmoAI4Fswmmt2WimjCSEgkjHaGuqlDUGCEmMkYde01XrdtL6unQAGZg9YC4BRVRTFXCChQKKUMchEGlBiMACZImvsE+fryD6yVkjKsiKbWNc6Typm1tWslO4TZQTG5LEKBFSz8q33pJLEZHmqvZi6HQbQhKRVYq3DaJWgGBL2vm1Ba62UABPGymOg6XkzyDLNThCVJ2Y/7vclUWUD8warBqInZFAEta8igM0TiqCtCQ0nxu7mcn1n5+S8NNoaJSHag93dG8f7RDxbrGeLrW6K2JW5AnepMZTQFY4Ao0AHUiTolsPAW2P4lYNjGxvrpo4tqOhqBMOPPj/gR1LRVZfvFjQjwrL1ichWYBIkIWHNP/2H/+if+oN/ZLqYHl/fP0rU5nI5X9TJ/o7K016Wtk3IdychbtTebh8+Ed5/K7z7Bj9+BGUoUt1wUCAQHIIGk15MWzVc2xsvqp294bUbdZaJpfXFjDeZ3d/JcSmrRbyosYohRlDaswkmHz/30qJRdvNk6BprVNMfa9DBuxWLtUqJbOrp4c4tleXoW3DeWipdaWOgptU2VzpxZFiJA8fBJDaNQTi40vvs+DCxqjl98NSzn+ZNRbFJlxWf4ex3v8M7w+L4GhUDJpNMdBAHJvfLypycVt/+bd+IFYitg1QLYWDjtY3WpIeHKhlJq3EzE7tROtsfjldGiv0bcbjj3qWq+TplxrABHwyG1OD6/hvqNx7tNKmcrqraMaBD4ujbeZMcFrsvHu2M90427WTzYb1+xGanKo9sUTTVanFWXZxc7oj7uR//7Bu/9VsfsLor4CZH/8Ff+Y8G/fyd73/3v/0H/4RAdvu9v/o3/1O2ShKDphWX/fE/8Wf/0T/5ZxQ1A3d4EhAgYA3ETKEb2RUCslUYRXyXULgyCsGVPaFrO+q8a90pIgTFsAXyAlN3wAQids05yAIgomBrJtrSLKCDqgIAEIHCmOQ21rFpYxM2pDq5bgk05iBdHltRQpYAMRmOd66/UGLz5OKcVnOoqrJaB5F+P6lWZ8+/eOt0sXm4XBaZHD17+Oh8bUgf7Re+XZ49OfOhwcxOl2UE0phoK71xejZdVR64Zi3ae4ltDIwlaJv0FArGMioBwslkXLtonNJBgVhQOuulAELaYs9UKooBnvS+++HDVTsn1U801E1jdZKlSSNy4+mDbFfXjdPC3gdNbHsZYeLEmsQe9MbHz+j5A/8oTOtpqDlEHVuDAcQ3wizOh0Gue0PLTo72By3Q5vHi+RuHO5//zDdf/90HD89r1R8M83RgWgk+BoibW3vDerbKGdrFahNgb3fv7uOz5Unks/Uem6Qqfmj0qS+/8/X37jV1aNnrmoPi7M47FzvPXRs+c/DG117HVVXsJnRjZPJCYhxNTH0aJkmKOkxP/e5eMQxKZ5bR3tssk5j9wPFzi8dnVb1JBBazuY6Mhl5++ZNPHl2khdKNPHV798179+uoBvuDQZLNm+ZyuvFaZao4GI3HB4PLpix6mYo46h9UDTy8uCRjhFkZzntQWLtzsHPxZGZ0T9TA5vt1NdfC1w8OdWIe3ZtCqs8Wm7TYu7zUo51dIl6u/NnFZraS27eGh7efOl04xnygEH310qevn5ysPnzvrUGSZ7h7fPOpt377e0Wqelk2O1mXjy+fvTaalzVMFxrwcrk8bernDsY7RwemFxpYZcYd7vb2Dm+8/s7s/BLdpe8P+vtPZane9DO4eHC+NxpFprv35qt1m+m0P+z1RjWBZ5HFuiwrH+v20d22ewpCjNt4GHd18+w5RmDewodEJHahXoGu5ZBZoiBxFEQhQJZIhAAkzNt2TBGQeNVkiIEFriysXX+mIowcscPicSdP4dZt0uXUuvfLFbG6oxZtO9qEr7BKnWi8DZ11oJsumSpbiwoAbSVj7vD2LCKiOoo1b22wW+1pqz2oGAUNLNbnd+5++xbq24eHQ+tLqfYP90HX3i9ihMRmmclbacaHh4VN88uH8b3Xq7sf5BLrvC97Y+/ArBbEjfINKMWkIBIpiqtWIU0EZdUwOUwtcvRkGMlmOgA775ByUwwRcjC5DEZRpYBEVAgyC6MPwhHFgdYQPJAHQDAW2AMHCAy+haYErTuOjgqcrAU3a1Qb17SKAuSpuTjBZuOGJg4L6B9anSUPZyaCzW5sFuPik59dffjN+OTtpG1kUUmz1uf3ClfBeB9tUjdFuZ7T/bf9nXcbTdjfKYw9Hk+i6cUf+NGqbh6/9w576hzGcbt16gYG4a51QABFqPOTIaGgsKDaThZEREIiCECRGUUhgiDjlcmsw6N3J6TD+HSOUkLpzhlf1WRidwS3AiHB7xlWAeT3cjcAwF4YOTKjVggImlk8SvSiCQJqQMliRNd4HzkEH2ofXAgulHVsGxDGoG0UKkgpUBBEoRakiDEiCBAoCByARBHUHJAQjUJUFsGQIjHAAEhBJG6zeAIKtbIQLQsriIw6Nq67FaMYS4pEFIAhsFpZTagxMDKC48hIVhNARACjUw0aFGlFHHxQXoLyTaydX62c94iAEmNI0FpNpMlQ6eJn/uCf/MJP/VxaXGuamCNaYVAoAIFEae28B487h9eUomZTg1Ga0qZq7Sh3bWDM9ac/ZV97YbGf4ze+yWcLpVv33PXJn/yftU/d1pO+1STOUF0NpMT1ic79/muvxuEg+ewXY3+cibCwcMg/9rnxh29uHp9IfnZ4dGwCxE0zf/KYy+V4bzegX1T14XgyPV+FVPDaYHzzmYffPXcxBO+ySdI/HA/G/dXiskBtvG7eOHmF9j/VFI+a9smaS4rSR0F7Mi158cH+UdPbaxIzkH6SU7ZnbphJ2pRTnYLKFaQUQJjFkomNa+qNr4NI4gMi5VqsVgY9KuNjFWN0sWka17bsBMGz0jZ4tFYxoQXUhBhBERKBGKQQHSAo0qFtxDcxVK5ZxbAJ3qNCUToCEnQ91x3nSmHU1BAyJGiQGUELJxZN45wiVKLYB4jQNF4x+UhBjDLKasAomhSACCtBctHHCL7ZYq0vLy6v7++Px2Mpq9LHfmZ7E/P+h5chxMWTzSQbsnZtE5qA/X5aVQstcC0ZtOtyNauwbI56WZvpdRlUkMQmzjmDahkdJfoi1JSoxvlV66IGbYteYbhpGxIPsQlNmuSxly0X3thMUCKodWhd8JpMkhcSDTQrXzVNjJCbzKaFSUARIFvU0DiOcrIud3IBBpNh55N10Smts6Qd9DIVRFjt7Ax4VS1CXLm2CVw7rj2rxIDGRQPPHh6//ejRwgHEYIiYmUCAaMPROag9CoZCY5aauqy1TSyqiQvHy9XHC5sfXr/93Bfq4oYKs7f/zS/F1z8Ysx739eru+fr8q+nbh/ZxmbihFDZmasbkNJtBkhzupPDM6hf/pdn4RBfjTNW/+Rtj0GE3+slAfuDj6uK+q1hHRzHYIhsCDe48Snrj9itfm//8vzW93f2/+YN3Xn6eZ4fq678WHr1nJzf0ZO/o2rK5fNJsLrP+zaShp2jn4a/8QtnMy4PbeNwTroobN4rEqsg6+iK3GJEBYhCjIVVMudGmdxIbm8DB7R3McPZwgTNdV3VqgyZNDSdW7RRDQzpTql5uKteiIkyLcS/NsuJitmJBHfwo0b1xcThMN+vVvG3Yg0bqp7ZA6WGwGhVEJQpAlAAgGAjEmCniGFBrFAC0LA2SQYQoLZEmUCipSIhcaW2ja52YQKKVjSQ+BEblA5e1X7exKpcR5fquJaXP56GuqiZqm5iqWnv2pBJUOrEqALvW/z6piLqv07eI1FYBmRWwTaxGjhzKTRu81kSJJs/MQRQREgkIaahctTMpro0TbZP5ZVkyaQvWKDCwWJSRNBORCBJ6dokli6opvbRYN2IzyHMTRVzgfqICo/McOuU/V6km7cQKJgYUgAFlVGeoFqWRFLjgWx90TpNkkKARcWQMoziIbLrMs2RZIsARJbekxKRJb3f3cFJM9o+Oh5MdpawKLSEE8ezdbDXfOF6uy/l8eXExXSw2dVkKCgQxSAObTLKEvBzcmNw82r3/7gNRpo0m3RkKGywXF+/fX1Lv4OMfz0Pyb//7v0smpNd3qw/D+sG3PqHy+b12WNoH//rrZ9/+F4dH2XPXjl/4Uvbew289c+PpT/7Ia6/2q9fq73z7O2+/9x/+h7Ja3q7O/tzP/mxjnvn5d9+5/9Z3h7X35xUN8o996WcuVuHTX/yRp8v4zr/6+fH1YW/36MO3H/QuT42yNsu8jwYkBVM3rRnlHmLQpJMUGHzbbpij94uLRbUqQdC5KESRoJUYRVoGLWAU5dawiAZUACicG6M1J4lSiP0QmOi0jJAqATAiJkXHjEq1LgJgokghQuTlUrTTRgsaIUO5TjKNfY3V1NWXbUEogbNEc4iMwQiwc533ywu3KFojgSAxtSKWQmTSqp9owtiKzzPSFtuGq1Y8hLzI3NIpwiiKgZiVULqu2LlEJcUnX/nYW299d1WzMd6i2hnluzv9GNvlenE6v2TeGk2jSFdVzNhN/xClM2aLQuwuAN1U1jW7dkALgqvuKdlKQNspDOHKC75VkegKYgRbTQlZtlcI3v4jQxcX2DKMUCI/Pd79qVc+Hz98q0frZv3gLCNVHOQ7R/n4AAtD3GpNPniNNsYQfbCafV2lqIoEnXMtMkbuIwCrtlWyfwQ3Xkhfej4Z7zA7iU6TPdjfof0hxHb1br753jeyIvSKnOtYMg5/4LP01FO2P8zKTXt+ml8/btcrOzrK8uz08twL7t88XF6u84PMscP5pU5Sdm3jQqJUau3sve+X5To/PoDrz4MyWmiLRoiCjFyFzXodCcOgXw0G5NjduxuXUcDF0nBzuTi/h+momOxCkeS2BabLb7zR3n83C6KEle21UTc6M/2sobz/zPPNeHeNmPWO5u/el8v7Qzvlxw9C9cuGVBXKuFr6R+/1PGyC7xtjJBQWbGYTBH+yiNFELxtPLUAUThKiyaS6ftR/+fNgB/rhSXVxt95U/YMbq8WM92+KzVDEVCt98W7v8p0fHLWfnqT/5m75QVOvTu+T7I0G9s//+R/98td+92d/+mf7B+NF3a4Xi2TQx5o+8akf/6f//Je4cijBUBdjZI1kcIsp8SwMrJEUgVbAAcI229IZzT5Ci3QJmO1SGD9Cm0Qh2noJVBdhQ/IMuqtzgi3apKtLUvBRjA0AoG3buqlFYlO3kaMSw+KRwJKtpWGpAdpES4yB2ffyPbT9+eJe1a7JN0kCdVspq0lZlZq93X2vYzE0ZZDRcX/Y07PV2jCfnzwRdkJKiJrgRgNLiEUiJHHjpRJoHOfahOCFJU3Not6kqbbkow8E3NZNajIIIRW2LPujfH/SZ1PH6NJE60LrSaaMb6kN0KzrVQjQL5SiSEYLs3cuHyXJOGnFkYGm9aSFY6QIuabAgIJaaWVx51aW9I/WJ222pERAKrV/uPf4wePp0nlgnSc+Qmb5mZsHAnK5XD14c6mU/cIXPm30Gx+uXJHT4X6eKBqNDmbz9Rvnswd3753O69qzQ0oQnh0l1ITUB6PUal3/w1//cqYoG2Z+3Vw/6OWjJDTsVovLd+Od33lzdjbVOl57ZS/b0Yu539kbJvuHMM7kcvbhOw9Jeq/efvbJ3RMfQlDN4bCX2369LDXEYYaoCglMmlqAxXp6ur44SIfGJI8eXBgPk9RI0+wMi9nZxehwJxtce3g2HxTpvdPTp28O5/Pm9HQxwGtsUjapFrTkD4vsqMhmq3qxmV872K9ndOF5WW04eBvUaCd7dHJS+pZsce2Za9//7punF/eOd9P1ajEp8mXNvbw3m5V3P1xSUhzs6MTpNjK2ZRH9XtQaNaD2Z6u+0gQwqxc3d4cT2NW23eFcqta1m2vjLD3oX1xcWivH+7mG8N5FffnYrc4XvlYcQWk7UnyUhaxXVL7pDYfrMj56PG/JpL10c9msqs3TtwrXVq7h2WoDJloUK8lWKuKICMwgHIWFkYNEQelEHIaPMl9dUVh3T6ePOHVdJ9P2fQAiwgpJOGIHqWGkTgJCZN4yqIUYFCkE2maQtsmiK5MHCkDsXhSdAVZ1bsDOrbRllm0Tcyxb9xEy0hUFm7YEsw5d3OkC0uGOu28RUV3hjbY0HOauOZEZOIoA1C7cefh2kQ+va5VMZ2PRO6PJSqtms1HKIKsQwTchSfKClbvz0Nx9xKcVj48DA1Zntqrp/ILagFHzVWNo8AEJAQUIEa0i4iACJCQSPIIFSlTWZzOg3iA4i6xISKHSxooLACRAYgxoYvbga1IedADlQQIEB8FBZsW3kBYcSmgqdB4dEzAIg0QbA6AD7zMCKNdqkODhQd0bx6Nhu7Mr/bTIc9XYRabi/g4skma1tKxxUcGqbOtv0dGhun7g6oTKoBelbpW7mKcB7f5ub6RjE6vGPrNz89Tc875CJoBIIACqy3sBYtzujJA7tYhQWIAQSSEiXcmJiBRZCCFud06d+auTnGirpXCnJHb9B6oLFW4PEHQaJUDXv4DYxdKkc65tTyoLAH2kFsXuVaK44egDpkHaUgQMJm0UUVGYQyQfVEQMjfObJrqG2zZsGq5DjCQaQBNrIkAtqJCCREBBhSKRIjIEIEASrQk0gEJRSIJdkWiHLo8oQoKiCBiRQBAIkZlQocLMEgfxIsiMni2SFiJRKJojWm21BgFGcE6Y2SEQihJFoCwgEmlSKBgAowi1jbgAbRuVgCYILROpJKGqgYObz33+j/1Fp/YenZeHGFJXnn/zNy6WF4ef/II5url2LG2bFIogxqYFRa2TuFnadq00pgB+wbycLwGaL/5E/qnX4pf/GcxOxs9kzZhxlK3KdU/ZIgPdnsWT91ZP3lM7z2SvvWh291ujKLTlvDRFCqGmzdSdnyUQoYflajroF/agOHj+BbJkkr7j4Ot2Z//p9PU7k4/try0Vk1vH16fn//IXD25Oxq+O711sqgEW1453svH88gIh2Rva/81f/uO/9va3/+Gvf68hK2IDCqZ6OqunqzvjRTuZHOrxjk1S1c97uGN74HmZFiYQI4OP1LQhNOIbr8lo1tpkQDl1BY2hiqF1dRs2lW/DetNEQFBANsEeiBIyyhjLwIFZFAF2tFUgwMASA7OrxVXg1hxKpLglZEGqdBI7DyYzAmIkaEOitRVUCsggoVKQgfgcFPiWIkaGGIUYWxZm5VlHVD5iguSd8yCKJIgLIQAoRNU9BJkh19Spsv2oyxXXHMpVWeRKnAZKQ5Aste0qKGtacL2Rqqfuex+8P7bjINisvBmRAFD07IF1slqts14+ANowb8q6AGWNgdAGVLM6egmjxM7LVaF6KSsjKrDOi8lmuXKKHXBQiTbpfp71i3R6OWWqN20jCn3dtiEWmY3MmOConwGAC7gOnIqNATHTe/uT6uwJkmp9VKB0DD5EZEisWpUtkCrLjTbAbSQNPkQVQUV1+/rew/lpVQYAVAhKK4qARFGEGeelIxJWGD0o0lneG278i4uLH789Pjq69upf/etPxrsXDy8OM1m9/uZgLa6W2eXKaJ1YlAdnRX5kMrsBvzvsz7/1naq+2P/Cs8Vzz5W/dTdfZlm2Q72+XU3td99q7943O0Kf/AOi9ynfD/VMvFcJiKVcQvN3/s8MYfXOu70qswDrX/315HM/4gb9MLqlFk5le7C8C9kI8/Vxv7f4/vczuFv9+j8+e/jwcjyO+X5IqlTpRCTUTfTeJoYJlMJAsqqXsZybamk1zheL/lgn+/nSuNo7zr1f+5pbSogUWMS9YW+YJUZlhHR0uFc1TZbk905P562breuN87uFvTnsDdNktlzWvpnXmyTLQIsSzFOt25Y8cIhoFCQYOSCCNQbQRYxV5zkOTiNpiCoGpdHaVJARokgIHEWAKCNQCTlh46Nz4h1ICIGVoEDPmgpiBKnbFjmMMjoakZr0LuZN5ZyLzrFQcARsxGw4DofF75OK0kxLjKml1gWtGFISjHlmgLgJ0oAICUPUKKixCpU2SVZkEEUn0OuZvTSOEyx9ixpNSkVmm6bBSInNtXDtom+DUTpTKUSKEJIk8ZuovcYohigxmCDGihofvTPAnCSQ2IjoDXIGygcvwtooF4MCYPZ5ZpXizSZg1PNFJQ56Jsvz7PTyQpsksB/k1mjT1q1EMGkKNhmM94/3bu4f3NoZ7w1HI26D1tHXPoS4WM0f3L8zny9ni9Wyrpu6VVpIIZLJLOVZfrC7c3h8bW//qD9IXYRhLotLf7x73Czwep5fvv/moy9/43BU7CWD6/u9HVMEuPzJF59dHRzs/MSL1U5xuX74z//Ef/AFu9MrMjCu2Lt1sdKnF2/+wZ/6Q/DK7fMyuxPx1s1Xjv23y7Wib16OB4Mfefmzqy9/9eDZZ/64HH/5vQ93+0Z6zz78zbOH3/i74Sd+9I/+mZ/4oS+9zL+ZTgbXbvz0f6z/ZPPl/+ff/t7rr1chYKqO9/LBJIMN1hWi0Za0SgtgVa9dC02IbVVWZVmJCBNE5gCRiRuWBjFVqkGFAgaJJRZWAwRNgQgFMNFq3+Qkvg1tFSkQpgrEh36alD5671WmdaS2igJKopZAoqPSxipKlQHnNrNVsxKLqt+z2mit9aKtoiBoFVBIAXjopWmm0LeOkE1ifM2obFVHIgITWHnna4npquKKvWc2hKSCSoCEE6uB1WrdBkWBFWJCor79xvfKqo5g6kVze3/y9LPXdZKC1qeL5UW5VsZu56IoWwQponQpfAFhJurgHNT12CvVyaQQu4bZbhbvIvkszLy9RsDVBV4AukZzkau6V+RtggEFIgh2YzMiIBEKKeKu7yQF9VKvr2eLclX1lbN1uawvekftKlmPyBTJJGA0qQIfNRjDJKvl4nvfGkyXVDnWpI/3Ry88EyqI79+Bsg22GH7hh+iFZ2KarJpNXmRap9Q5nJTGJLEvPHfzKFn+6r+lJfiYqdsfq59/Afq5b2tUnB8eqiyLy6WJcTAZcCoiBjSaWzfsbs7LDTYeSUNbt22JkIgPNF3h2bmfnsn9x5NnXkiv3QzaRqeij4CkNQ5v3ASEpJ/MF81kUA5W5fTi/Z1bO+7lm+3Qbk5nAxdW3/5aEkqsK5ZwEBLNmlnTuN+MJzuvfgomuU4KWTQwMHrQc2UTbTr87MvRPRWbTf3hm2Z9QecfmvosLMoxKfbNwBC7jVYQA5RLCaZQ3mmss9Q4rc21G84Wppfj8TPZsy/R/i3l1VCdLH/3V9z5e9MnH0IyGrUfl37BYnn6eP3O97hc5h7bsroZVMv6nd/99ms/9WPF3u6Lezu3X/qECly5DQrleUGoAjqb4B/7yZ/8hZ//hUxZgUCI1PXMIyBARPBAPnJG3dkDqwQisGDcwmvhCkFLvxd+vNpQM6LSHS4VNQIxEkgQFAQP3HUObwtvEKiLtNFHhBNom3Z+vtzZGyMLMVtKAmCMorXVwbfRSXQahVALKAq+9WftakMgCL6MHlCpLLPW6iwBNPW63duZ3BoNDq4Nvv8731bL1qSmbtqssFTYVbUe5ukkt3Vdt75yiKentRetGX2IjWsTnaCARW1RV9UGWjbGWpMD4ma92Sn6e4P8+s6YVcj6qVjWmaUUSYtR3PoSyypxWmfGpmy1IOBi3ihrTQ7r8gJR+kWi0iYtIE040UFJGdBaMolBYcjG2c7YLHblzf/xjVvZtb1s3CzWiEZpg6gfTddaq2uD3unZJYqUmzqfjCCz/dx87rOvvf3rX0tCWD5sP7hYrFp6f7p6XLoQFWlLSqcJCQXQyoxw6LQE2kSYVpymmFOEvmKJkyydVauzi4W+aNCBzEK+l9gy3rw+uSg31dmcWwaz09Tl8fj6xenmzrsfEktaGAT36qsvvfGd95ZNe/No5Dcr0mDz7OLsUgXnGK7fOGhWgSPtDoepS96/92i3n6+XM4Wxb2gBZEaTR/OLYZ65Ks1SvHUt3RsUb96bhQAJAcbmeOdGWUN/eP3G8TXerBZhvqjD7eERh/6H33/44d2Tg2vXlIFBWnhWi/MngyJHMWkxMon9xCvHDx6fDIsdX66ySMfPXz+d3gPsv/nO8vbRUaTSpoVvZXTUb9zu5cXlzsg8czv52r3vZ7sTjVhHX/TTsioLn0Lavz+7aFx9dNBLBgOSZFlHoQRC7O9PfvDTT82npx88fOyQsyQzSUx6pvEyHGfhYhPKsHjELuhimERww1G2t5fdu7OtOHAcELpCA4jCzByRGa5wYAixS5txRxnrXEUQr+63IqK2HwW4K7CPAIJdQFS6MrUtTHobPxXAyKCIowChIDCAiNDW+wdbc5EIqK60DKgzoHQpIhFEZBYE4quWRQQgQkYg2jL0kOgjyLZsfzfcNTMCd9KxCAsqko8WGkBaIIqAKDbJyXp58PDdasPm0XkPeyIrGI37N64H8jFU1XJtUeuV89NT+t03shBBj0MZd2jlFytqPVURRYGyIhGRo4ixSkR86xURGSXBB6toMgST6TYKEQzGMtqJjuoQMSuSZBjQMBvs0t+kgJQIiFJoLNhUYlCRwbUQPTQ1aA+kQSWcZBgygDVRDXUFwBAaEAGOAAIsIBGaYKpWaixu2mZS8t6k6veDyVbOXaxmg2Zt6g3O1gnliAAavUyJYnaY57Lydx/Uj55QVSXB+CdzTh/DqN8f7u5C/tpTO6U89zuvv603wQfhGDrjJyAA4/bnW9eoCHYDB2i8AgsBICnobBQgEVBEoUTuQmpdgwZvk+/bYUQAEBg6yhVuc2tdTX1XuLZdSW156tA17m0Tztt3gW+iZWFmQCYgdKGbcyKSBKgxxFAiKRZwjoN3oWnWdbuYl7723IpEjIZEibdMKEAkxIiokWJkZGECQoLAqHArd0USAVAcQgsKAECJQiAJgQChG662AX+KAsBBI6DVHKMmTQCklCKtdIKoNGmFmUgIwIgk3EIMBAGxFG5QM4KFEEhAS+Dok9SSa6lBIlGRkiSJUru2HCp4Znw4kUH6YOG8jZvV7M7bv/vP/s3Zg3tTqF/702c/9h//lTJK/7Bo59P64pEi0nvXOLbD4Fa/82sn979z69Of7eWHq3e/hXuj7LlX/fjT5ukd+PLfB/eg/eB/sDePqber2Ck3w/ZxFi/09WOZ3KyzgagY2rlqbNojTEN9537ywTv53Xu9m0/Phlmx9xyvN+uLc4OZTUaSYChlnKfpZnoQ1uvfeJCPD6G34UfzcUWZy6zsZod9tzPM94ft5ZxDCiqxO73TdfWp1z6emv1f+8abMxW+++Ck/9TQYEYAcHJyevd8cnSU37quTaFzk2WjxmmFLpbzFDFUtXNOxPrQ6vyA1LBIxlEwuAZ9Feq1+KptW1+2dSl1K0CKFSpEdJjmqbGFURkxKRESUOJj2xL72NbCXseAMXBbim+FHShkVMhMVgsqVFahYQ6EAjFqNL4M1qggqBPQhiA2ljoNFYTBx+hjIEXAwYsIo/cgoBoOKYEi5BBJxKDyXj5aIS9XzXoZz7E5GPf8qn0yn9sijYK2rx1ZPw+Tie3rpiXczJdaRpeXC0qHoVz7EFiZpo5t23of101FEYskjz5QWQ/TBAn7CfWLBJ/4ZevXq4CAg0nm2lCkij0GwLCossGOpypPir7So8nQh1Lacr26cPXGhbCBkKaFgBKFCLFpWokq8bJrtU3sunTcQMuOvFqFRRKVZiKkQZ7XTbBZlihVtaFmXvg2TRQylugiokVC4Beev44hbqrGovICASQEtEjRRSDsFyk3tbE6TUxGwCRYb571+Bc//+pTL96yr3z2rk8b5vHtQ3DT3Z0BPTrLJMnylJDa1dJqDI4gKRIitbLFrDFB63/zXvwXbw43JrP7jkNsgPefyl57oZ6u8l2oQpH0MjzcMzYh10Ban/fs8A/9xObN0/4Hp2U+V2atdVp//dv5g7PbP/hKVUrtI67O4sXl+nJhVK5Dir/4a8VgyI8XLuZ10WttuH3Qw0en6JVWhiETZhedEEYspV7y8qKart9+Uj+erj/+Y5/kpG1Iom/y3V5sYhNkM9+0wRula+ejq4Z9Xce4LGtqnFLrGCS2LlN4a294/XhQXa7vPX5cOQwEzuSBUIHv29QFtgCIhpC0NkTKUkCQRFlRxChOgogm1F6iZ85RI5CPYBQawihBuq0DRIRgUAK0CskJW2XbWLchGJ32svzudF0qZsCqjm27BlT7/RRAWokhRAK0WkY961xwwnX7+wNojGizpG0iksoyu2jKJNGGxDWhrDxEsqQZIhoUCkWqlQaNvpeAC76f2dTY2aJdtaEFRUguxkDEgGJSA9FXlWbLgkFc1tcQWZFoBHTQOlhHjshJbjzHRdtkCfYLIxgDSQxRAC1JKwDeC5E1xoBiUAysE4Ut+xA4wKp1Ppg+pv3hQcQa2zVHT9GnJAAqw97e0bO3b7+4198hVBziyaP75WpVVtOL88vL6eW6qYNzAqiNisi9YVIoXRgzHo+LvHewdzDMd0aHR43kLbcGyT2+DHcufdbOP3h3unzYz/pc+qqfPf38c4NK/dZ//n/YmwR9dnJ3Mf3C5/963HlpPchv/eCL7e88onmd7eqP//jnl7dutO8/mp5WG2jqzfz0yfJ6MtZnsxtDWG9qvqjiBbG7tHnszeBFXGReN8nO5trh/jO32xs7v/l3/x/luyfPWFQXj/G3fnVT+0999kde+Y/+Jj730i/8wi+890v/30cXD75wa39Q42JdAzNGD4DR+6quXPS+9cokrY8N+Dr4tY8VS0CJGoTAh6iZIyoCbCUmBkhjkiSMyiaJSRRwiL5eQFw539atynVhTACsooiIEIHBpo2UGLSitGoZc5OqEEIbuQVtjc20zjQa49sYXUQFqbECPkuon2ezzZrI1Chz1yjSZFUbBSMQcWg4y3GUDX2FdZAY5XjUs44vFk2MRIkuW7Yodd2yti7ERIFkYd62lYt5pns2n4x3rc4yS7Nq8eDsURP9zYPh1SaZgXHLlYidPUO269WPemq7HS0Cda+fbti7GqvlijDZrZK7ueuqWmqbW+hYAHJ1t79aBiOAECIBMQDEyFopAwOrbhTQPHr9mWtPt4vLPWtmlZ/f+4B2rklzq9xopUEg0cwUgzt5zO99l57MpIoQDR3sDj75Kbp9u52u/NlD793w9q3Rsy9PodHgM6sNMIIYpTURETlubdJPh7t2r090Ue0VyRd/4BQ8CieZBaVEVAAincbNRf3+g+TkyersnCaDnU/+6OWqxVbyTLs6gCKTFdi4UM0T5SGLXLVxtua2hGqK+zf15LCsVlnWJ8G0N25jvHSkR4ccTqIYZOebKq5KydPJaM9M6yzbHZa2XTW6nwZrgtEq6V2A2/3hLzVHT2OeRgmaTuLqrJ5PR5Nr1pXcLs7OZ200xx//Yjt9khuN8wgVayZMZfIHPkbZsH79qzKtDWX13rMMF+3ZIwI/emFPfvwPV2o0yfP1HJUZ1GUIwU8Ojp/5wh94++ff8Kczyyt370mRq3Xls9CHajXJe5vl3AS+NrQ1udPZ4+mHd/cObkTUVYhZouvZyiaZAZSqrtcrbpfP3ep3qqEiJBDcgkKQhRkxMoCAB9RbRxtEFBbu6Olw5V3AK4vaR7jaLkZGgtT10yIRYWToepA6EFf3QyECC23VS+ErcTOEIBqms6XJbJqkniNCqq0pq4XGYBJ2GGOM603lmWKsIwEqBB/IKKV069GoRCuIYUNslYq9gp3bPH64GUzGEFRo2slgcny8t6w9BBWEVxFKJ1UZCRW3RAhGS+ObDK0iXTufF0Vm8sZvWDOSTpKkbqusGHHkGANo1qmmXHHCybCHilhh5Nb0kuXJym0w0Ro1rn3jA3BO+8fjJK+CCvkwI5JBb2cwGmu/IW8BKSsmIdQEFCUlpbllKsLTnzh89I3lPG76hdKZfupg7/GdD7qIUAy+9imCQGaNwrZq33nz4R/5qZ/+Xz7/0j/+v/43J+yWo960hYsqWNR7Q50ZZBd0Srs969vYVuFgMtFK+bYOlIx2i3UZKDeDvd13708vL8u0sJ+89dzXv/FdRco5fPDBIjUW07CpnvTz21m2X0x2vZnPT06TvL9eNY7DOB09/OAcSgroZ2ezzMBy462Txbpym8ZkWZ5ACLwug+r3yvnycDQmhYu62b22S0bV8+bR5ebaLuz1sycnU4U0zvPzs+XAZL5ZJz3cS/urGnaG+2k29Gu3kxcyoaWupfXVatW6y54duQBz147G+L3Xv5lYlVp6+sbx6nK2WKxKCcfXj/Z39i/Wbze1e/ve4/5uYTA52Jm0Pl7Ozm/3jnWenJ5svCPvtGraxw+nx8fH5Wbjq5Dk2c5ucT0d65weX1bXe5kkJGRJyeJs1bgwnhxUjVycTX/1a/XOTr+4/uLL13d++1//G1PkptA9EdvD2zuj5XTja3dRtpwmw93DNqyX8/O0Z67eBaET+ZmZudOGWBR1bVLcmU+7FwJL140GCB3iTkCYCAUJibvAFwB3+GnoRKPt7R6v6g26YlAGIRZE6hjYUUR1mWgC7kh2IleN6QIihCQoSNv4DaBg98W3NhRA6oa7rhprm4zeonIAOrYSAbB0TsNtRQMSdqk7VMiRsWvKIi2Iax8b9NXsESz18DLksQ7zE310nEQdkU2e7vWLalPbat5+57vDWQ0tgBKuV5rIhiiIgFYEIUREiJG3aW+lTJGIjwAQ86Tt59Afa6vpYu5XpconEgOSQqNAkYsB8yGTidEhBzAGjAWOnUInCtFYZiGbU2gwzaHLe3mH0YGrJYAgYoziSuTA3iEIRI+EAAyggAVXG/7gA4TQxHKNhzPWO4djy7G9vGea2pKGIAAqNG1sPDF7frspW3O2jmczUCYgoqawmbkn77brx4PD6wNYf+rFj71/2Uwf3ZHV2mhkCFGiIo0oLAhAHQPxqs+rA0QxAXYoQ9ii5xCp86d1whF3CGdh3lqUZdvGCggsLARIaitEXdmeryLwW6z1loqOdHU37rjrAAASgQMyIHQ5uOC1UjGGGDkGioLOi9LIAm3rncimrC5mm8XcaSTl2QKyh8BdvbBW2OUemYQ0QAf3NooIOlijhBBBoQgRQURACYzoomfSIBiYCUgAFBIgcQfmQy2KGLWxpDiC990Q18aQWhUBFAsABREhrYmJgsRAGJUC8RVpZtAd5F1pjUZpbYAarTDhqHyTj/N/58/86Y9d/wx99Z3qvbeWf/u/jJnenE9vZ337wcXtg2vPffLG5/7oH6o2q6qMqenbZt4+eX95cXH4h3/GL8rFL/6r9Td+uZ5eqKNnwmA1O/36TvFKrl/yHpy6Zj/xY+H1f9RX9/D0t1Z8lKS+PXnf1ov68ondu6HSxk2xjbPR9Wvr8xoHRTrM+3fef/yPf7n94GznqVvVbjb8gU9tYobDo9l0E8XnWcjOp/XZ41LZYZ71rZo+uXv8A8VqsaE9WsiCzzl/elKvTxbNVPuAvf7w+r7Pe6bf12v4mefULZg8mD78X3zuSyfzM0qHv/wrv6TH3kEamkcbH8Ktm1mee8lpMNqsZ62wdZtYeYVRJyrXB9YcWdPTaADYh9pBcNG1m7pp3GpeMSdRFDvRmVagE5VYMBSUFkVgFCKHKro2tnXtSwylBQ/cBuckONKI4gEMdcq6MAor1CgKkRRRFNaRMagYICKyC2CwsJmGaFAwMiBFNkGEETUprTWgiqx8ABFwgJm1ELtOQkTmGK4wXqlKbN6u67Kqnj44fO2V57/33r27i3lwelHxRCWXD1fZToBED7BHDie7o82qSkjs0A7zwQePHx4NDmeXM+BYupCk1hjTU1j7OFuWmyK7cG1hVQEgMVqVtEEm2XF0bb1eHe2Od0fjZVs9//S1zWpJotjX3jfIDKLH433M2syiq33lfN3UyhplEqdosWlQ0biH++NRTxezct5C22qltU1Cy0DsmIDSlIjD6WwdEBMLe/3sYPfgt9+85xonzAAM4CJXClQbBAEskQAqUgA8HuVt2wy0ji3r6HsWWfOPfekLP/vDX3yuly5fv7OEa3V6A3VaXi5n9962kKSTEUyjkKp9MxwP0dW+lVjVg2ywWZyneWr1GKpaxSAu1K3ThYqhlk27+E6ld65hlsD547h4S5cN5xP78nOXcL7s5/NFfTTMYULDl59tz2f1wuu+7vlq8+Uvb6qoegOxioMLWd42l71ED03blgs1CbCJdeLq9iTywKTkQyyKXkJFExuhsKrXCspc2rRvv/725QfLEIgezRd64CTTGJq63piI1BKXgVJTFEkMYRNgvdo4YlXFzMdE68GouL6/d3567qvqjbfm0cfMmkGiz1flrAkAUAgM0qQwBCUHdrk1Eb2HoIkKmxPp2L23hTQaiIFFELVHaWK0jFqhcFBAERImEY4oEsV7jqRtcL4MNSMkiW592LRe5zo3XLoACm1iI+N00dakKTUGGAU1YQg+MWrQz8q6/X1SURSIqGMMijFGMMZYo7RC5yKXbMQoocjSBrGpTTQoDQjsnNfaxDpWAlWkTRV0qrNeEoiAbIgcfA0cKCrvowe0qUqBQKQpG/ZoUTnBeROjklRxqmWQUmoly4QjCGHJKjJXPkZWidYbiX2kFL0gC+jGC5Bkha5rrja15HaQmMzmCJoxrB23gDZPXnr2k08//SKgruv67Mn9i/OzdbVeLi4DuxjbIOwDo1WsRUVIQbLMHF/fOxrt7uzupcPxqvZH/UEvP8yK5M3ffuPRhx8sX//+AfBu4FVocyOafUNpo3vNpvjw0W/Q7UHvtetVtjO/82hXD/7lz/3fD3pGQ3wuxHpDPEr+wM/93J137hyWXu5cDPr97JWn7z9+3508bt95XL1zL1gVvIjH766a7BPPv/7em/FhpVqCOawniz/4D/8vm31qiqL/yeM3//3/raxLLOuDxa8ePnPzyeO3jj4zMeOX/vP/7K8s/8bP/oO/+3de/5f/dFTzXr/niP2mZPYh+qZqg3ATJLLUIVQe1g4qwGUEbZQgA/vUWNCqJYwErQgLGTRapaQIKdFIhdVDkuBrr5EpBTKuiXUNzGkdGMAjYjY0YmBUpNhGJk0hbsrat9GKUuj7vSwd5XPXAJHUIBFi41Md+5oGGnVmIqromjxFLQJKISezVTMeZyG2pLVN9HTjyzpqMaK1IPg6tl5AKReCuJa0IWMBQ5b4xLSbSvK00ACj/nD34GZRFDG4u+9/2FSVsDs9u9ju0DyjEG9JpN24vY2ZCQBt4cPbYJnqCKPcMYkobP1IW+5Q5yf/vQRBJzoJw0dEI8CPfrItMUZSQJoEEfbGfTvuGw15vaL1GdxvN26Rmyakti2XsSIc5vnAihJjKI0M60VcPHGvfz3e+bBHKmaQ3Xhq/3M/uAiyePuD8PCDfjkbDEfL5WX75D3o5dizyWAcSx+lUTYVQwFBENtlvbmc6rN1fHJBOwo39XA49qrVKnIIwbOrNhBBMUrjofG4dFLNffjtF1/72OP7d2IdJO/nT19rFWNVuvlpNZ9F14r4JFO+WoXZw2KUeTca2JybpbQtrFeqbXeLwabdPPrmb1+/mCUh9/cXcvrt4f7Yb2oVQSKvfWsMAAorXUWVHB7dfO2Z9mAvImO7QW01Zb5WZhGhWk/f/irOP0wFhXDZ/iA990xy/AX9lk7LlXUxuX5j/eynBjsv7PD67CtfH+8q84nnk5s/PH/zq/GNb9npyn/z1yfjSZpagUPZfSY/eNpn1glXXCjOdKuVh7jmut2kCcxdRYU6uVgMiGqU0xCkb6hy93/tq9lnXx08dcuLUYC+YQMYfLM4P2ubzcyV3/jOu6ki7CAUKNjpPSAKMURhQBaECACQaCAFCoiBmYW72yNcYa9EiK760eTqfIpQR7G9Or1XeRkghCCiALG7RxAiCm+PKgBAluYQxWRJarLEWNeWLBJjY7UJbQNQByrbUAULTisMjJGICI2JLN6JNomB2jexcessK6ILRZpNBsNFhXE0uP+k1mjZ+3snjYMoXgn6dQgBsdKoRKOWBHwvyYOPzotzXgA2dZhXm9iGhHQdpQ41GQ7OCUI+6tk+ZRNd8iZP+wFZkUECYywN8OGjmXhLyL4NlfOtcDay0c7JtKmqch3Guzup7betV2ps7cAmthUvllKTJGbiuYR000aR0FyeTvdGu9ZTlir25aifVhtZ12Fa+lW97qXK+Wa+qm3SYyX/5Nd+88/93F/4C//FX/t7/9V/HTg0qIoimaRJomW3UDF6ib4wqnauX6R120qMQLibm2ZeBafvPZrOvvOITdZ6B0t8+Pi3emgUqvU6UI6PL+bXnxkfHB4GpWbT+9V8mani+rPPVpXfu2FNLzULvHiwbtc87BvyYdHCnVnVS2lTxUJbK6aar5ulKwZFaniQpKuwblzwSL3+RHLTPPlwqNzHbgzqJZ0X+MWf/Kk3vv0uqiZe1gcj+/wLL5SXl+uFGw6KyldpZiBP3vr+w/5k10CEEHcHmbXm3vQ0Ozxsk+zJdFoklnJ1Oj+vyyrvJWQSpfm773w9zU0b8GIa9p59tW/k4mw27O3t7/WP9m/fO31sjGaMtpc+9/Te/OxtB40u8rNpXZbw/M1e1gt1vRwgXzvcu3u+WFyg5xyt6afm1Wc/c/Lo3cvNYrlqnkyb2CvvPDrtj3YHu/1QL0NoVm05XzaKcVMKquRy6aqoR7vF5GYW6gCwBgAfg1w9YoLCAIwYWQhFOALSR34MlI4ijR8tADryPHacI5Su5ZyRmBSCCDMDROjCP92XQPV7Ei4yALBEIhEEFBIGJsGu00pI4hU+SEdgQUYi0AjxKjUmANABZpCIqAtIE27XGV3fwpXDtXuBEWHnKgyREelKfsaugYoIlUZRlBhi5faVPJ9qdeduss4sDUwVZfoOPjpJ01QXA5z0JZZwdjG4+0ivvGctJioywCSUCIYOrsTCEpi2FkcibSIRYMK9nG/sx909fHwB5VR8abPC1w27U02o+33nKq1TNCrYEcQoPqKOQC2CUh0nXKmtV1cr1hkJcwwYI3BAVAIJ9AcxUYoYSwdRCJRwEDAiAWKEyBQFXFANmjtPBrml69d0PszCMl7cd09OadZCQCDlgst6lrWuprWu2tQFtW7QAwO76EBh4Lh8sDQ7k3Iz7+no98NP/cjL//I3F4/uuLQFgaBUd7A6spQoQuiEPwTu+u+2vjD1Ubq9k3GQqDt/nUUMu8ggbr1p2/bV7r/4VWUrqG6LgIAgLIqUABB354+6uYWoY1fhR64icGwUWlRGiAJabcVFxcJe6toLGY4xOmZkjnHRNGfny6r0FFEEYwBSIMxaaQ0K4+9Z6nRnnhUmAuIA3QPFBAoEqatF14SM4kQEJEYfo0cGhRoRIwiSRiDhjthFW6g7alQApBxsyXyAqDEqJBLlOTIDe+mwTS7GED0RKsXMBAwsEhhIqaJXYBHVYnN446X/5L/4L2/c+Hi6Tr73b//O+v2zePKwSGjX6lSfHvbyvZ/6ws7//GcuG8dNO9rN69N76hvfLH/9y4s799Xd7y7LNd1/ku8UOy++iM/vxMP+U1/895rLDSmO9ZKwL8VzLQ2S1VS+9ivc5EtXy+osS1Mj/3+q/jza0iw96wPfYe/9DWe+Y9yYc86srMrKmmek0lCohBBCCCQhbOw2CFhgoJsGG7DBC9utNrCg3WrbIC/MYMAIgUAgNJRUKqmqVHNWZWZlVuUYc8S9caczftMe3rf/ODcSiIj7z/kj1j3nfN+3937e5/k97vCF13q758I8dN2iK6zUwP1ysWrC14+LKfSh9DeOzH2obt/sBLOdrfHlh2SQZz2j8ZC4Xuiyd+7car7CcXe/f3vwkV095yF1nLeTXZdun3SrZvehS1XbrE6P6gSjR55aHd67/c3rZlFdLoalG4xt9fDlZ7/jh9/xr7/8S5977Vt1SbdOZ7jvn333ZXduozXsaJd7V47euDayG4mrBNgfPlyajRRqo5JCK7Ej34L3MYQYATiXZIGsYeOcI7KklhNRECtJYkgaQFqMNfjKtwuKdUqtYEpJDUFKalAyy0FBSQxbRFJFZieaWFmTV0hKXEWKKKVFJ4rJW6MZKyEpIBobVUSTY2cViaBWigACJNwDHlgCRygQQL3LzvwUzrIxkFiXdTud3SyOw3c99XTdbr1xsrh2sOSVWS5DbzTCgV0tV02y8w5GvV69SG1Ki6q7cO7c4v7cgJsUWZfSyWq10XeZwyCyiNrMfRvCXi8/t9lP8bRTsh0tLDNmk43xYLRT4zRyfbiowEdHZUywWFUuz5yxUTAXMh6sciOdHYw06gpCh4KGqxB8VZ80fndQb40GwOUrh4tZXQ96zjC1wXcBbETjMFpOFTy0ez7Uy3YRR3m+UIxBUoizaQ1VtDYjihZxNCisKJM+cm5vuZxhtoVBoJUisyOXf/wP//gTb3+GmtnR7ZeX904Xpy9Mrl4WZ8pcZ69O+4yTvXHtq5CUrUuo2DTYibUDyqMZ9DRUFDPpkoYWLROyzQxR5M7bw3vusI4xFi4Dx9R3aTGX114uB+0Fe44bal582WT98ZW91d5eGg78o7v18sjen9ZfujmAoO3qlKl5z7P9HX7xi89d3iCYn9aNFv2MVgmPWtrKyUl/pwi+0YwoSEyNISmMa46XbxwfHqY0upBv7YwO7XxzMuTC+GmK7crIUGoSb7qEd2O9MSogC3UM6mhzOy+VOMC0mh5N9zlRAlcFECUmalLThLBsIyInpPvzlctxM7M5WUByeUbOCEArQlozEpHNICNgpejIEGadBkUFYq+RFAAxaXCUq5CAGB6QCiolBK/io9gECjCvghK0nSogZ1QMejG2UGSzuXReyl6PkDhK03RQEKXAD+DuD6SipG2XNIJzBCKZcwwSOh/a6MAyoQMSFVC0aKKebUMk+dyZjDS0nQiVFoGEMEVAHyKGIG1DjMwUQQ2QeuwqAdGmESLDmEqymaFGgzXJomQ5WqNE2rU+AnaAxmIUiKiFRVANKaw0goA1lkFRUUABwWTMVoKvMmvrZrlYLYQUyYQqvvLaS29efzVjoyKoYhwmRnGJENVjhiYTjilZC5uF2cnc1cs721d2mDMy6frJm88///JunZlp18/E3jm5inbQpFxxu+hvjMplt7BEdVG+vh/99qMnj19+x3/6fY+86+nLpv/anK7/y3/cM7Fazvp9Fzf6k0sudquDv/8z/ZVUJ7N+oqqL9tELfYlLQ0VvvDhfNieNYwxsPvoX/5s3LveXn/ns1/77v/XuwXBoqNcc3v27/+346cc8DaYneHQUU9R+nl0Y9OIypTdupn/4j+Xzv7nc3nz8P/mT/+Uf/kOHn/j4X/vrP+1Y5vdPqpMVdSGDlDlDynWoq6YLKVUhNT4FxYxNisEyrVm4CoJIpGAVSrLgVayyIWNNVuYWYkBogg9EkJFYxk7rEApEVMkzE0RLx4a1CJAiAqMSdIJqTAAYloVmcLKcK9PSxwSRiQ2iURqYPHbRsIsxOY99Nky0rLy1uLvhioy9p0K1q1pNmFkGUSJY1W0SyS05TQkkIRlm3/megQtbRZZp3TTQhp7JLm5s9IueSLp/tH//6H60CRDJ0QPbzxllSNZx/bOCsvXrKiJn+TA8oz+uG0cekIvWro11U81ZY43KAxMRwANX91tQawRVJgQAQhIVQqQouTXf//2/9we//yPYL+/femn/lc+Nnn9+Eyp/4ybmVIMqgknQze7Wd66Pzl3MCGlV6d1vL15/0U4XfZE6Ku1sFW97z6zc7pYzaJrtnV2pZ1j0R+NzJ9dfc4+9DQwm15HJM+dUJTSdQDQ9N3Cd7r8RF9PRYOxF5q9/Ax//SOxD1awMcV4WwKpJAcuutdnlrXPnN5p7p9zM73/uM9J4I9ShZVkWvbxdLnOHnTM+ghKqsw2ACcGdHKsM3WgztQ148cu7YXYvgEfKN5KXEK3NYmizrpHbdcHWWBKwbVuzc9Er9obZ+V24cqUuJ85lFDvfVNzbyTf3euXI+BDz4XjQO/l3/wDDdGjl4LnPl1tllXi4XBFB6Gu+NTS9flVNDeUxQbO/0O3r3c5GtflQsXvfHuzT179tTYrkU288fO97Iy07s2X6w35sxgIxRKuIhWQ5UYYl2RUD1kW3aEPeu103bYepV7LgtVdecwcHvd2dnZ2d1KRX791dLJffevnadNV1vu06NdYqqKZEhvnsoIkKSmcV3ZREU5JIQKCGCZTiWdMZogqtm27OzgOAqgDISPrgAMoPZKP1o/utljXG9eZ6bYRTWTOyH2CtB4OBeqUsQ8NBEzGn2CESIdsiWy58UPEsJitURX1KXkPwIonAFJmxRgxDEMyHhbHK0Q7L0lkue9mbN6bLo6MklhxjoxubuUqClLIUR/1cVU+WnkOTE0JKzmRtaCyyDxGZCZEI1+e9xgebWUTInT2/Xfb6kKAbjXtigJkVUTCpgddv3GuCFANrULsYOLfWarFhXB4yB+f3Lo/6I4N9lWI07jvKLPYBsMikk7lJIaZpjJ3B1cOXtp77XBhvjkdbk36ZK+pqXrVNIqIQ0uncAzGipOS3huVkQMPRgGL7+ld++yNvf+L9Fx/+9gvPjfvFaNIfluSjZMZ6xC6leesByUQxKBYpADR1nNfyjf1pvZYOtVEBTWqYjTO+8Wqk7OcybWC/R1q2odqYDCa9UYJicVp3HTy6d6Fq0927N8psGIb0gfe+7frrz9er5eNXJ6+8OR1vlGUhj57fefXaHXXYHznO83e/7+lf++wvbm8Obh7N54vF8SwkDf3CnNyIj165evHhp6a3l+G4i3X1+N5uczLtFqeCvhhlSqEOiy03WHarWeou9MfOFYlW73zf+7/8lW/tXNnBkbt96z54o8hMuXRCYCUQxDTTJjTIbApnd8Z5N5u+evvGyA2kA1f2bt1/c7Iz1CQHsyro4ni6XCyb0LZt7TEvBtsbnaOb9052BuXVR/e+8uIbeT9/+87FL710vQowGtivv/oV6zsoUJnGjjqtYs0yMCfLw+miiyiF06qttsYT6wN0YaPoBWmrk+78ZOtwNjtzFa2xYHDGDhOABOtD6dqRI6ogSQBQU1JBBRQ8YwZBUmQ8S3Xpg3O3Aogk1bOMED6wd6xjZg/60ddoInlL9T0TeJDWRDtJQEBrLo2KIVbk9UMjidIao7Ruz1wPH86oxuu/Zz6UtVCzHn6cyQpn04z1r6oxRjIMQEDCRpLIoJdlDJMBK5WXIzwibkczXnWiyyihIOb7y4SWcpfIO+m4E0YLXCCIAqbkmbP1CplSojMo/5qmRsgsZLt+ke1drHMrOxNXDOLr1zl17sK52f2ZzZCd7RaV1ZGGNtSLbGOPyx0FB4mAiZA1giSFzJzl0TDKepbDLEkgRgiR2Sl2SozMKURKCVPA2GIk0UgCoBEQAQViBAFzcFwsTrIbN0f9gW+WzXTarwJqRkpE4Hq5aFA1petrEySopVwZwTlhU414Okj16WKkTr1aq7x8qTeZPDoerSahma9SbB1ACoFjXAs+iJTWyKu1XARI6z2G6vqzWtOdVIUIYT1oJlpLKUC0DhKqKqAymbVVSejBsnLWlXEmCcE6fEbrpeMMfgfrrvr/gGzdd6YwVBqTs8nZEdogHQKu0eAqojH4EIGh8v5kvpwvalCTA6iqNaygRESIORIDWCQFckQIyAgCjIg+BCIKQRAJUInEMIJqogQkaY0tUkUREkSJCYSYJSVGXncRonGqCoKIZAgt8/pTkth6BRLLxJGMiKYQNUFKKimRoSiV4WDYERrfRLSm6romRaNxoPETP/CJT/6hvzLYvqIefEhbFx+/e7B6eFBs5O7g1mm2uxmLyfZoc/bF37IQcgd9Y5af/vrJL3y26BcT161+68t+rMWVzeF3frh4+zNVlwBdBn3rckmxGBplI/Ww94EfqL/4i90r+zZtzG+dVvP57rnd1RD6jz5lBpty51VzP8S2KUy/X8DqYFndjY5zAmRNjJSD7fsUju6a/VlD0fdHlDt7fuvydzx90N7XrbhzaaN4+FxYdoeHt3rgNFK1ODQZG+fu71/bPf9oWBy7w9Xdf/5a+/p8Q83bH5289OKrr37r2t5Du9ObXxvnk9/1zLP7r11/fenLlkx7Mmv3t5+5ePHqu+vV5vbeMw8/fPn6G1+crVbbF7eMcwA+UYAQQ2o0NOo7Q2hMhj5Ya5msKBOyRUTR1LYpihjfQUsiSJGhQ2ma1SmhQPSIsDaDOTaq4lhRISODbEWDUYCUUJMCCooRUQkCYthKJ1EgRUgU1AgWapiUACQx0TpM3ENKkCyajlXReLAizlmbM6ZYmbVxEAAA6tZXdXCCm6Mh+IgkL16/laKOJ9vPXOkd1as8MVYAOXHrZpX3Vdvf6VOdpG6VpbZCAURwtWrVcEZsjF0uVquqS16BwIiuVs396MvMbWWjalZRRopcjt20uYcgJrMCLIqObehWZV4qovcJujh2JWnol72Y9bz4LM/C8tTXnWPjmVVBUlpWzXzVsSUGLm0hXlqsBoO8TxlgFAvnL14YDslCU/mmyPCpx3evv3ytzIsLW9sjY0uxV84XniStfPQRvc8KrqdzE2HQz8bjbHpz2ofej/7477/y4Q8eXd+/9/xLx6+9alZ+nu3Lm6/tyC508/jSNX87VBE5KImYnoOuziSBj3kfy7FZdStjRUIdQm2cUwQRiD7GShCd62cQwXgCL8lpCkrEvKyGK99MbyfWEQ2wSu2bx8qGbK9brnAukQfFRz9Uxopuv/bm6y/t/q7f1Xtigz/+Pa/983/zrC6Pf+NrfiWTPh7fr8PeRsqkY1uWpqpbjREgUNsakG66OLxzMNwZXHh6Aia1s6jLCrSwISFSU7fNrGNQVQhejua1ZTXKlNxxtZpDnDhrM8PgolcVKDJSBKZEqoUzY8RVE4ixSdKJBaDMZg4sWSsGiZC8N2unq3hmjikyo2FmJlBmFGYUscK9kBpkBYkCEEQ6NDXzPPhlilVK1rIyL2ZLR05QBgZMr9c2bfS+CRKTDFxeKrCDpgOITOg6L77tbG7/I6kotB4N9LM8+NT6xualAmrAzidEViAhAY5sOc+1Ew0AGdpEjgVFJQb0QdUpm5haSaqZsW2zMpKccwkSEMSoTGa18j3L6/Ydr925HYfOTatkHKY6+RYiAwWpmiAKxnBWZs6ggDgIBGrQxCQJNEjrkRND4RyaZK01CE1zKmIb36QQvI/JWRBYda01xiBlWcbGqE9gsD8ouk4UMMsyYBo47veKZx66sMkDq2F2NDVdfefNNw6ruOvb0f2w1WgfsyfIdHWNXXSuh3W9bOr+oJyMBl9YzDc++V17f/y/fe92f6gyiMlM772naBtovdH9oNdm3evTbjvC2yZUrkI3D9KCzZ1hd3Rtlgwehvb4XHPp+95D37wjr929NOFs/to2f8B++EP+T/xk+lf/sgw0YnP8z15S83zU7rDzLm2ljbKR1XO37547757+Pd/77d/69Q9Lf/rrb77ymbsXf/w7L/3OH/mv/+efqV56OY/3X/j8F1+5/tqd1271Ot8AJQXI3KpuVyF2gIoYRYIgG0OqSSAy6hl0E31iNOyBTGlTL4NB32KaaJEySMtZ7aOiiCYmIcBBP88ys7bkBx/jWhUh8VHIUGYM+qCglW8SUIgpSGKHFtQImCRVVffL0bSuaw1BwAOxcEaSczcZmB7qTDEZG5JIikEld0oGOwjiKKUYI2a5JQJJyqjnxhsoenS6yk3foN3Z2j534aIhDJL2j+9V0iTB5LvJ5uYDqUj/vWJ0tos+O70LYFyP9taBf1xzIc+8IGsupZ4N+t7qu1ckRHmAlXkgGMF67AzAdOZ1RUQABgRypFB++BN/yE1OpLTnnnzq4qWN6b2j7O4hJ0grlSjW6GbP5PMj+covy85elzQc3C+7dhM1eR8yax557MJ3fa/v7TaRzHCzKAfUTNvjWah8fedeN75gpABXQpYBKEInqsYwphQW0/jGt3q33jC1N8SASV//6uaVD7ajfuAYfLt2Q4j3kIInxHys/VxG23h04o7nZd4YlqoWOT7lblj2i0px8uxHN3NTx5aJgg9xelTVy8HQa4gkASRYxykvS7tNk715bzU+165ee8l4ztGI95xxaFfI3Nu+kELtQNpW3WQLLz/qMYGIy0u21hBrCOHwINx7XXOJp8scM6c5zqfnS3f0ud8ebWyb2Zt5LsVDV3TjfBtNvnPJuk/oN2+vbl6DF18c9Hqjczub7//Q4jd+2c0xNQoms8FXX/tCEbnYe+fq4HU5uTEMp5iB+kRsC0et+qKAva1eq/3jQzkuL/7O3/OjJ6H5zG98fnn35gbi7N7hptr5vbv3DlfX754ulxWyDUFFmQ2s9+hMTGtbAmNKgqCsaEQe9OBhFKAHpjaHKIgiayAprAsy11epSFpzjgBREeN6eKtnP7TOv6yvNxVeH9PgDJ+aQN+6Sl2WF6MSDC2XS0ucGQsogjGqdGGpRkKMARpnDUPgjBRsDASaHAVpvLSrrBwXxdj2irLojTZ2RoMJWezqeDr7xuYGzVbgkUrHZF3U1lprbeZjY0GcE6Mmqh7Mj4usX1q11lYtntStLXrOcvCNNdovre3bwuYjiWiEcgTHNrdkrHOFonjrNQu37xxFxaAtgKjVxnf9jWxnwufHw83J+bKcUCwMZ1mfrEXfedVKNRjNRGvxK8JE0iCEF37j3vS+7J3bHU7GNivapsVZS2jatq1WnSL6mIjAZjYqz6dVrNqy8t9cfe28yf/En/3j//Rn/uc7x0eZsigwog8RrcmLAgEclzHWRsO5rY395bxtWsiKFTVJkgXRqCkkk5uib3bHw9P9U1PyxiBv5lU9jeQSIO9t9SN3gcwTT11ZLNp2FqRKg2Ryh/1x/+XXXl61FYr66Wwrt9CZGPWVmyehbc9vjpPi3VtHs9Pq3ObGrJpfuLBxGFJX16mOqq6/d/7gGtrT5uXbL7LrP3Jli+bNol6MeDAq8qycLGfTQTk4PljePm4nxV6/N5YYqmp57fbB7s6uN23lp4e3rheo58f9DNUBG2sdFnU3S20sbb8L2qSmtDIqBk8+9NBs0eyfHm7hTiYxDeh0fqCYzu0OD26/6SHtbpV81C0jnlbHF/K9wVa+mq2ef/11KSTB6oVXX5ovZBWsM7w30oWvw6orMqvBquFa+bHHHl7Wd+u2Wy3nw3IwHPVMn0LlN/u9ENPJyaqX4/7tE5ufcevCOl62jvLg2dogayq8AuiabP0flF2u5VokEFFCkfVdqA/UHgJARVIRBkTANclozRJSWBdsnom8+kAXggcnYIC1XYYQ3nIQIjGjChHiWVk6ghKw8hkR78xiAmemojXl+gx2tDaSKACSARFFOFOk16WKzMwGMRZWHrq0ceXiYzu94fnN3Vgtm8SbXTc4PCJNmSsxJlKW1jMbFoGuNuuWLTbAIBrVGEA1xCFEYguGiA2kQOvglSQUBuKajfuuD/uNjdmd/b5P1Jz2LLIzbRC++njc3FHn5HDWNI1a7W9vdV3ArkVEgEQKJChAaMya8QQSiZQMSVLQBADIjMUIVahbaRBUp9gDC5gJ6CmEigkhJACOkgwZkABJIambCpwcCh3mxOX6aetsUHHGdZKoP+kk62WT2N3DGJyxyWEnpFvnNn/4B/H8cPDqS8e/+mk5mnYSjrsg59sr/c20U+7n2b3DJSt3bZ05D6HTJKrI6/ETAq/HBYiyZj6DMjIirMGCZ1QpgXXzHQKiYhJdy0Pr1CSsxw2IKrJG0UVJzLROudHaf6YPSlfXoyw8w57j2Q4FBmXWNyYzNHAZIxISoau7EBEQkoUuQjSgVScH0+XJqgFgRhIRJjRMjGiJGNAqMhAKkSFCzIhFBBAQ2LATFYtJFEmjRmAAgTXPC97S9TgZSApklSCBSSqKwKwAQMSEaADNg6ydruVDlCSpCcFQBIySPHiPgjFEJFIUlCTqgxd2jp0JiogwztjUx09cfPL3/9ifNhsX6yZZx2Ynu/SjnyiPP79145s3vv7yo3/w4+Zd73ejndMXX7LPv5Gf7Ff3j04PVzQHWnQn2KSSZvlq9P2XLv/pP3Iq/ZT3mSUr+xIJSjKWoq6degZgx9f99vp1iLiBvXEvT5USW7i9mE9v+ddmvUSbvf7JvXouK2MlJ0uOgwLkuck4dZoayN3E1i41C39/CcOiXkw3PtEvHt82PW1vvSini/zy5Z0f+xHXzafPf0u8KbbyxXy+sXO1PP/E8sX97tppOV21t+8edOCnh3mvb1xzZ3/pl2lOh5ff/dh/+v3v+bkvvPqN/VOXZLDpzF3/yCNXR4+8a9bf3nvm2SgrfzfYwST5hLEm9E01TxJC9CnFGEREMpcJIooLXlExBc8JVNhXS+MsGUuEQDH6laTGUAhRNEYkRGeBGJmtyZkiqTIZQ5Q0WVDBAJiFdVqTIUFASIIJmQJSK1gApyTJC3EwjgGEkRIZRdYoI6SkGlm8ppYwZLmCIQOOvE9A6SyAZp3LwI4NDsos7w0S5VUV6ra7fnLSpHp7exJDvH7v9HJxbmDcrG5KhQ20s+ChbYR12B8vxINPEJOzubXu3skyz814nLuuvT1bbfbKVRXmbUxgSl1oTP1spE4j1F3oDLEElZhEk0A0pVWwJ4u6z9kk51Fh2uAPT++XrkgqLuOBy0pnVyGlXmaIV6u2S5pZHFgGxM1hj5tuVUnmXNXGpkuulzs03FVdWGzujqPEk8VqZzQwCUeEmeBy7p01uYMsN0zdYDJQkLJXuN7m62/eytRdeOyh7/7RP7CzszU/OqZ7t91+deXcky+/+sLGM4+avUvtwR1681sbLZf5LkWZXOy3Gpf5hhwcmEFjyS19FwxqwNDEXl643VEShFYMM3SVK3ONIFEEE40GChQtkKhGxU6TBzLjiK03Wc9h7tBr1yyOe2+s+OCIeFjrSgqh4/biIr3xv/zNd/5X/7fi8u7mT3y8XZ4+894Pfu6n//neuNw5t/ntqqqLyUykw9TLs9AhI0su0i7nobKjLNvGYP1isSKvvdivZ0uEoCqmAOcgNkm8B4NgjEMwUTB0dZKAgLkxGWEMPculzRyZqEJ+VZYuq5OfSiBsYrC5WXnxNikHY8lQLgjIYJyFsMYaopcoSRCtgsZUIaEDcAIpAVJnmQAEKfmkYq0nd+rTQqGK0qpCF4W6fJD1y9Kvpg5kpyxmAqvWCwKhPrlh3//Qzv5s9fKd5Z3jGHKXFEA8MP1HUpEByBkHDIsmYUKISZIGn8gYURJMAZPJ1JigoJkhUI0pkaDvvLHgnEmCXStdEyMqESRsIQkwtKuQxKaoPiVnDQQ0ubGiXacBoKtSgTS0udfURQod9nIbgxhxFki9WMOESUE0JQkihkA5Rg+QElpLRhIN8kxSRNAmBd8E7ztrDDoOikkVgIgtkHbeT4aFRdc2bVo0qU6RkjGwNRo8/e7Hxxt77f7RmweLc7sFDEezN49GRTm9djKppm+v8+1VxVr2oo5R+pt5wK7x0lSreraM+zgqbbd/8O7x8PVf/Jnrf/dvbsTaBid3uw2A6Qoe+33Pht/zA8Pt3dlnnjv9P38OVg1sj/fe8fjd566Z2E7Gw1Wih378D1fPvPv06Yfgldv3/6e/8cjhnc//lf/3o3/+9/ee/c6PfvQdr3/2Fwen6ZGLu73bFYdmdrLoXd1711/8a//i1/7R/c9/Kam9+hPfU773e6+8+2MbLz+vb36me+Nu/Of/Mr3y5uPvfOxzP/cbscy+673b/8V/98e/+GuvXVs0/+7zX3r1xrd2gFuMkCuRdiEpADOGFBwba9EYBtakQoRd8qJUlggWwKEy5GhHrkDRrguhWSgzoozHJSNLF1OQFNcTK2Nsth7CFpwj+qggGJsYEwgaTJLKzAmJJB9F2picoGqDqBnbpCm0Asb2TNZzaKk4WlYLHyBDgwgQCWE4tOUwP6xDSFA4u2o9qVqEXs59YwtooRW/6jjPjSt2z18JyQ0S3T8+nC2PXU6RKSsGwmd3wQOjkK7ZDfDAsL8uNaa14RmU3jo6PAiSrUUi/PcUgHVpLcoDV9KDeqqzRAMC0BpKqmsUpQAxAhLBe59953CIApGatuDYHu1vl7ljVAPLNoLlJNJV0Xqg5h7fv09APWMwRi64cdiOh3sf+U7ZvJjUWgskeZb1TNji0xOzf4PbqhjZjQs7rZHOR0blRCkpo9pUl4s7evAmd23vwkUf0HI7XB0efO1ny3d/ZxgPYmZDV4lvutk0Ry43N7uuSymro2a9SZZtyMm+xkXPUjHondZVNt6inYvBDrHICueMy0LwcHCjnL1SUAXBLmaHSEWWj6k/STHF1XEpbfRVf6PXTZNPCOO8w86UToJ4aCz5XkYOu+borh5sd6Nhh4YNQcIQAjerYTM9feGz1N23krtUSJIuKsc4CUnv3RRd0pjcYCMUY2tsSP5YcPyBD5zUR2P0u9XN5TdermanWUOApa+VINoBmyrAy1+DW3fsbOqX8yx4NgDKIXiSVKBK47t9r1lV5uWjb/+gufjsMJrJ733iM//s/zfbv1YOyr1Hdq+9/Nr+4cm88gmMrtNlCAQoKqRqCEETAzCwkCCSCChgEgUEIowCaZ1URjCsqhAepMnWkNozpxryeiitoPygh+ms3QgenBXP+rXPPAxrG1zQt3wOAACEJnYKPua2OOtqkiQIUTQkjwVaon5ZkjEpiCRtfbClSuhAwubk0rB3sd87Z8pdSwaQjMkISVCYq3c887C2sxe/OaWyLHKct3WIQQxIEAQSDTnHINIEtNnAGJtCCCkRweagR4YNpt7WhnN2Pptblb7Gi+c2t3c3uQfRJDUWjEnQKYSQFnf3T9tVJEOcabeqijzfHuLVq8W5zYGRzJlMBHNLWRYlzQGpKGzSBKxNM+uaFQHmJZ7e6W68cjyb9nb7V8e9zRhjCCCRyqJQ37XJqWMVMQSQdFQ6CSIJIiKIrI5Wv/25rx+cNG//4MerL32uOTnqD4mA61mrbKO4HEEhOcv9zJwu5wza72dvHiyYE0pCVVRgJmPZku1C3Br3VXya+Y3esG19age56x/dDtPDaT7aqg+ni+nCCPX72aOP7w4Hg2s3j05WXI53Lu1trJazhMXB8cz2B9f27xT9kesVB8dtIhuQt7d3ujAHw6vFatRzi0U9GPRO5vXsuN4d7lCvECLMM0aamJ3IhhXn1WxaVatWMiq7kC5e3BOK+6ezqsOed9WqNVnjCssON8dlrzSt7zyzF0m5HC9Oh3Zjoz+xPk32+sPt3moRq6axhev73mC8qX5x8971rqkydmRjIBxd2CEXdnubr3/pxs7ucLp/OOqpstY1VkH724OwWg7KvK2b0aZ7+G2bL728fPjy5R7hGy/f71Ss1Rt3r0etyl6+ZXLSUNe+iTFoalfVaNA3LhWDfu3TaJiv7wJZj7POlP237hdd94utOT+AmGQ9ClCgs5dpfZMRPqALCT/oI1+vEfBW+GsNoj67+dZODjzDzKiepYDeWlXWVU0guNaMUc0DIyACJRVg0pgIgRhFFFGZGXBNxVZCEDljHZOuQcoIoKK4jtABkigmiAhoGEG7vVH/2Uce/tg73nvx/DvTYrU6nZl8LIbc8YmNjc3aVpc2h+jVraH4KakkAhRaB1oRQCX69crGZFAVEiokOvuwkIhBDHRt3Ju0k6KZzxzzZNKXe0cRqREZXNmrG3WUkp/bIWaDvDmdJ1sHyCPMC7epISS/cFmR1BpmRQFiIMa05iaefYzERm0CMSSFsAAyDqy0uSgYBQgK2gElgITkoiSKkZKACIABZZKEhMqgSaQLbDm2ngWIE5lc65VNQVMI6z29s7h5DrefoMFo5+176eXbq5tfRqlHhhZNzV37yOD8D/zeH9XBM13sf/FLX/r65/9x091hVnrAp8YH/1TTuv2C2YomQCQgWH9bRMqIBOvSuvVk4WxHoqLrq1dBIyiKkCITrdOHzHqG0iZAWTMS19ytNcTqLUsRAOTMg8LmbA2iMwaAJAmCYRK0lLyXFGPELsC8EoasZyIICjDQWUtDTkyqDtAwAxkhJiTHBknXvzIjqqR1xwdFRRFQ9TFFELJ2rbqaxBkYy4ZtFi1HoggK6kECpAiqqtEYy4gIaGyhaFATqo8xAYD3IaWWUJPvGA0C+hAgqEEVChpTs0o5ZC6zuV88sjd44h0f/sgn/x/oLnTLUPQzZIkp1dVi87FnVjevbX/f95y89x2N5MNrbwwPb09c/+TF+/fe2I9Ztmy6fr9wJQfLLRbnJudCwqpaxHqZIadUZZqHKpUbG5pQuuAXdZ73xm9/Er9+I2+K5VG7WC4Hk9HsYD93LCm65EhtUykDA5jgOyHTtgFyG32IIWZC1timDSElAaGMc4cpzl75uX/a+4PfN/rAU/PDHFUL2Zv7i47UX9jkoKNNF/Zv5rmDlPpbw80LF40232xvwizv2Ww5m44GNHrbldmbx2HePP+Zr+5d2Pm//yc/8etfeXNBszfhtY/94O/u9d7edsV4nD36+CVs37f64v0lhp411fFpXU1T18XURR8hcQqCyYKQA5KkkpKqqKQQvBA4xBDJZVZQgKKGVjRIjCBo2BE7JUc2Q5MltMRACAQBNTBFQI/QEjCCDSoCgUFU1QBBiggAaJIgoEIEUJNAmFA0IYghg2xQEhJ16zZeBAElawjFKipiSA9Cn4KDXq+Z3ndF3nkxJXcsULiU2dz055VfStre3B268s7NA2kNGTw4XBRZbooiKC7bRkT6ZZE5u6yDBE9kmw7YlIKUmyAhelDI3JL1sc1BRuqrtusSTgosTAhx0is7Ta2vWgmqEJKv0YP3/aI8qJZehDLyIL1er2kaARThGKJX3+/lg36RYhjm+WRQNm23OlkVzlo7WFQVImYmG/b6q1rGE9dJr227Mu9xqLeyclXL6UnYHljOXGHQEjrAje1B3svunC7vVTCfHgqbzd7g4Xe9Lx9u+dlyeeOVTGpG98b9qb80Hl7Zo3hUhpPlfDbZfmbeUHnl0t12NggnI6lDaoxzaEuT9wUpZTn3bRsqqhaFIPqIYChzCAKm1DZEH1CiBSFDlABVgYHzIolXJiq2ZNHdn83aC5DvSF98Ocqru4uwwDS0BrPx5qVLx9M3f+pn9n7k4+OnHrsPvZtFeuy/+ytHn//89Po3nj2fvaJdY8oItkPKcwJtlnU9Xd478Mfbj2zxVm16QCk7qLtVU4w3J4cnN8A47mUtLcGgMwwGMssbzvmmjSllRJVgFaPJXU6Rje1AV21tMmOIQtTGh41hr2jbKvhRLzchVW2YdYol9W1AIBVMqgwUY1RNpFSYXCUCIjGtw+pd0iAKGp11jIgURaXBdKzxbtMSRFuaZtWElGbLdmPUh6bZGpTLpq5mXQzSNsLOMphlTd949eDS7uCx3e3l/O4MUu1jjgmB/yOpSAMmER+D+KSqzrHGFGslYgSKEoNIwUQM0Uck03TKSH1rnQmjHGsfLNi2BsC8jRFZM4sEIEETaEIkA0TI1owyY2JkZUqCYKfHkmtKRqerjiKnaJoK2WK/P2LS1ofFXEF1NHKgESHH5EAFIzERAaskIFV1Itq2jSI4lznjBABDJIWgIS8MEmMKPurqdFVkVmJQkL3t8c7FjUd3rzyU4rabfP2Lb5jp7eb6nB7bWVV0tF8/vbXXHs/HnP3Q3/iVa3/zJ6uDu9kqQIztssMyl9AxUkLUkIbLMPvMZ7/xHY8/RPlg4aJyHdPI5uOL5a2jw7e97/c12x9YAY6e3v2q+wV3oX/1P//ji2cfp9AMx/jaZ7/60O7F/s7u1qyhO/e39yYf+Ut/9PCv/e3HQj/87BeW/+AbBwe3Jgb7Lrv21TfF5Rcf7rO68WOP/PoL1/Y+9t1088XZy7PnfupvvftH3rz0wXf9wt0vzMs7v+OT3zU7XN76335p5zw/WW7fO+mufeZTL/3tf3jhHe/Y3bzy1//UX/7C7V/7W3/3H9XTmIu2SVUVmVSEjJIFZGHLCkLOIFITBAwFUL82wikAGwI76m00YDpwUwkJonPGKIpQ3QUNZGxGKCjg0HhJq5A8SMIEiMhEzAHWcRlmV2hSazGCGs581BQxJNVoB+QM5ZPe0Et7sqjbgAIGgTpKKUMGSCneP1mRddu9DFY+y8phYX0ImzkT6qKpMnLjvA/Ek/HEZMV4OOhC/fqdW7VvwUJRuCCUHszQ1nkBorMm4rXhYg2ZXBOped1iywgPjuJrksW6hWa9zVrP9QTOgkFnJElam5XOfEhrUDGfwWYAgciQSqQoVy+f2xgXRsD4iCdHcH/fnxyBJkkpIwgqQqgiOSmhoveK0ER1Los+hbLMrjypw70moGOwNjfOdN2iBocXHtODmxlLZrzrqhQlK/IMAKP3yyk2s+r1b8Sju6OAMcRpWvjRZHTp0lbdr1/f7174peHbng3nHpEU2Lfdya28q1FCGTxwb0yDpIbtULMyp9SdnNbLhen1DWCd1HNbiM2zvOma2nfDvUta5LNvf7PIOzbjlOcnTSwTQuNNvcpSoMYzJN7oddkAL5wzF8/7zuNsnhZ1fePbzGK7iPsH4p/bePSpdHmiLceYRLtw943q3hu9voPUo4TDKzv3l6uso7CoXawsiVJKTZLje66YeJvXpLS7TYP3DpopvPDbd1+4XrgcNK/mnWZqnclJk3jTquyf2Psz9OBiEoTUAZMKpapTkwDzHEg0shEYpND5pTHZ7vbWj/3En/n5f/Z37kc4KR965pPv/8KLfzOqiqgCWCbDGHwky2vUBJGxBEkA0QBAIjQkkAQVCSApKGIEMIhEKAJMuEYOCj7wCbxFyAI1iIbQAdE6kIC07i8iJD3rxHnAVD+DcuEDxhEAgLMmtpLnRZMaEQUADYkT5GyS45ZTkbthL0OyxYZTgKaJSWMdE/J40n/nwGwbAka2YIxxbDJFmi2msanjvLl3cy4p+dWi87Lq2i75QeGCRIHoGM+Ni/v1ylPZAFpjiWjZeEAqnUGk6Luq7lISZ51Rg43tF1vkSoGuyAub51F8myo0viJ98fk73Up7uekXeX+yOZj0t7cH4z45ZqQCLec5UjpREoh12waTGAS6LnZdHTo4mWfXvnXYnJots3euGE3KiXHGuMJk2eG9w5BoXvlZ3cj660F2Fppl1y+zja1iPOltjHoaGGP3pS9/5bH3vv17fuKHv/Fbnz+9c43bejS0CW3ViW/bzDrInFrOMmiaeHdW3ZoHr8igSlA4S6rojCWsq1WOblLaUVkendRX9i489ehHV745Ojqaizk9DPe1ThKf3Ni42M9is5ouFiboatY6Ka4194uc6rZmC5JgmPWAgx2WLvHstMs4Pfu+tz3/zfbocE5VM/WoGc78qjcpv/uTH/u/fvWLF/Mhhnh4ezogHk3KctivV8vpcnH3+GTcO+eb9srVSxcuTCpZdR0Mz+/OlqnDsFH0X3nldYi00S991Y42B8uuXcTKusb298gOT0IgUwzM9unKp4TkOO8bBTye3mxpeuHqVjjq6iW2qVsInB53oxIf27TPvvfq/bsntw/9tzrZ2sjaVUoeo9f5ivOSH7u6LTE+/+LNvLd9GHJOMfb72yPjMnN8/zTEFCkAu0Z4kYAa3NzdWFa1gu5s9FSxSno6rc+kIknrfA4ipnU/FKICreUWeQClBgQlSkkfnLXXcWM9GzAAMJI88OshADESoAislwFRoXU4bB05AnzgZdKzPnRkZCZDyESEDEIIRICYeE22IacihAjK8YyWJ8hEqEC6RjWvzUqAZ11swKSK6zYpREjrovT1QwCJiQCbC5s7v+93/Og7n3q303R0/yTOZqN+WRAvVkcF07i/UZ8eZOdGkmcoIPduy2qliJBxSEIEuG7wVGAEgwAiqnImkpECEgiqAFjCvIjG6VNPpMsPm1dv9iTq7IRCS4n7xXZaLHpKcHvuKPgoTVFmWxdny9aNCjfa9qdLp8p5CUgMCBIBEtoMkQERk9LaqBkCWgYlFVGfKC+kOwvnAVpBqx7ZNWAraJYUQQ2rxsRrTKFQCqiamIMkssyWE6immIuBVUu2jglBSY0jVO0CK8B86u7dzGWMZe9tH/vEbTtzZdxf1iTRBXvhiXdevvT2yoxbyTe/57vb6UvPf/OEwEuC/0D2RwFBpLQmkoPg2Ve4flwDESYFECXiNRILcB2DiGuNkwBVNElSxjWAal3aqgBpjcZeJ77WFXgPjNQASkQiZzuijWGvIM6Ns+qcs2sx0IIHZOXkqYukCnJ/VpFiBsQgxiAoCTETsIJDdIiEhhmEDRoLbFCQGBEwqQoQSErRo6qFFMJZiaeQChIxqaBJKbO5gdzZPmRODSsqYZD1NF9Tip4JLTORMdYxOokBAYE4piApqWLwCUWjBEmq65kKgTeycfHR93zsRx4q7Vf+7T+Vaf3RD37g47//Ly9WG2DL0uXoWLqYrSp9/te/+U/+P3u72zsf/8H26iMby2qj3teX3vjSz34zSTlNLEG73C57cGGI1cqHqrv5r5630O198HF7eVDHivkinNo82QReii0al7JZTpeVqWK7f2DjhrM5+Bim3QZn7dGKCI1SW3U44a2Hto6PFoPeKDZtNZ/7VYeIgNyJr10qN4eLejkYZF5ClwlnXJ54/+tfK8aA2SQMdqNe6pcXY9UUUWG5XCyPxA5P5snpkuqwCg1OJo//6B947e/9m+XdkxyN994fHI/JJ+OBbXtSf/vTn/7Q+5859xN/7iU779OgWfR2toaJo8ZwbvMyrWRcptLAbDGH0KyWdd3UqAwCqUso4IMkgRTBdzGllNZTJgSjEDJOMVhMxqGm2MVgVBw5VYdYEJeAGZFDNoZAAQwLQUjaEWlSRgWRuJ6ZIapBQgVKgUEzo6CmC8DrkkcNypixAgEwAhkDTJoMOysGJQKloKoqKUSQ9BbcfT6vNZjL4+2DOg7cIFRNAuyV5WgHUvIJpFlU2gRcLIY2O+0Qo0vICWOibBHkKESNqZdpTrCz2T/XH907Xe7sbs8Wq972+DHavXP3dlp6yTJJCZbd2I7vr+pOsK82UZhBYI2oBNyro3YxMECP4PxGr5o1Kx8W0ZdlJk3XT1IgOWZXmM2+TcB9STaKtUVKcu/OiTHkOwHpHTbtUdtMBm6c82L/uCjL/qCPIW1sjqcnyyLLRmgnfe5ZU6LmWbHVy3vDYdYfffva6986mh8eVdUqqqP3f+DJ93/07dwmM3QZ2hqWz37PB25+/vT4+P67/vifPPnsZ+GFby9u7Lsum69O4miUntnr8+XVz/0Tu1gVAdkW0p4MU6CsSGxo81ybiNw4NYCLAzYhjXqxRVM3BEnKvhpO9SmK2NajEjFLFJAWg/qZts6G3/vR+SeeuH/rWvGLv7W3WBqTuwu9NjRdG2LdjW0Z785O//6vZk895556/HQwujkoe9//8fwLTVa9sdWZqgGPGDQS14tm2mptSzMcD4Pj5KksVJkpSlMt6kyc4dOo82UXnFISp+AMrOlznm0dNAEGUIS0aGYPbZR5YZedYkFgoVmJr4IPvhAKsbJsDGgEbIgGxrZCVAdmQibRhEpEGgULw2v3BkSlKIapEy+KoMQm64QkUAKqAe92eqyxRU4+SsSqVYribO4jOMbt3mTEg6k2mXrnqCWKCaLQ7cpVdzXPl089vBmBX7l3HIPGB3D3M6kIhIBc26L3SUkydj50BjmICggjk4BLZFFBoW5JPKEaUnEWFbmNoapiCiQSMEUwoGqILaZgLUeNuTUFskYI4ns9J0lCGzkgJGyaaEvkGCgBk1FVSIIp+TakAAKMZKezmBvOkVQFUkJVAkTUJMLWMlEbEzKpgBHOkaareVkaUXBEXhIpUkjiA2dGyYz2Lg15y5Bwpe/of+jRO9987dOf+6N/8o8efPNXPntt3r6yD27cNrvLb50+lpv3fui75wcnR7vjg+NXnxiYnseuS9oheNIoGTKADBEzcm5aDS6Oth/aevFrL+9c3rt3+2R6s7m05W7/jf92auCVKVx+58VHP/oh/Pq3b/wv/9vu97xLUiHvfPI8j+pf/arceWn8rWsnK622x42YotPdh6/whcH133pxs8jmTTiYLceXt3k3n4YlEfs3b2R39981/oE3bvs2G4nAjX/87174hX/x5F//O1t/9m3dqLz2ja8c/vwvy+08uzD+8N/56ef+1789+/JvN7fu4Ks3//a//MUf+tMf+aEnH/7UN+8tpgtTQLMMHBURjSGQgGCsqmFWwISMzBFBrRHRzDpnM2czy9YgDkVmTV0nrrsuKtZVk7oYvfeCiVAJfNMaMIIghMyERIBKSIQaQxKGVj0i5s5RokFmtFPVlDRaQOsMoynzjCks2noVOjJaOqpS08ROHfTL3Khadl2XoPM9RiCDossIg6zsG46dDHuFhsRgJ2WvnzuR8Oa963dnJ5yzLQApZZlxOb81RsMzNAAQPOiDgbUB5GxKx/QW1AUIQR4wAM6qauEskPBgz4dvjYrXs2FC5DOExDoRJIbYImnw/YwvP/X0+z/8bFreybNKlwtazM3sJHQCYEgUQjQWwagzZAlEFEsbCRk0BChdjv1Nc/HJGCmkzvZYg2+6pvMVqZtsnLu1P+2lJa9W9xZz6vesG6wWM9t11jfUVfmyBh9rMBEAbOfb0+PFsvNhqIynB+2n/7Xd3FrrE2ZRO5NrktCsXL8vimqKLoRsMLaZ9Y2XkAjSvVdvjT74KKKRLkxvHxZ9l2fg64ajMViktonWCRpIgEfzElPGlkDFsO+P6PyG2btC403IS5NiX2O3mOvuON27BQf7ZQj5wc3Zqy8Wjzzux6PRQ5fbvJ902aS52Rn55KVJ85hMMXaxAaGwqBJLr8cYWti/JwmES77wNsc2WzbTl64PO6NkD2YNWVeUTHk02GIUjijCqAQh4LqhQ5SVm2x4+vDVbO8q7d9P9+7Y5eGgtLuXL962Hn1rcpuk621Pfugn/9Srd2chptnrL2rbGcEACZEIFJNmzpCh0AYiYxxZQxBiSgkRNSVFkjOeBDCCACZRATBICIkQCMir4Nptj2dnWkVE0Hy9OwcFwKAgoAh0Ri16kEgJCnzma5Mzx8MDcGqSBICLekGWrbE+dYxISlFSUfLu+ceK8R60re80SeN9lbFP2ARYbYx2+gSUlgYzFWRXIsWuaVVSri2Y9mvfunH91jIQmwxYQ07qECx6Yg5gYoqLhSfIJKiyREEHOiqzGCNjSioqKUYUx5v9EXRpc9Q/d36cjSgQiZFWfNQ2mLbp2m989VZ3yrtbxeWLk9yWm5OecRhDxcohBpu7ur57Mms0xHY1TVGIiIibuhLhZtliM5odD7Tp97m/OdiO5EPqSFmNDd5TSsvF8vRkvooNEZa5C6o54oWdna6rHn7kMmdmNV+RwR7xQ9ujl55/KbTd+9758ZN8fOtbXxPnQaKL2OsVeVkKUwptCkGT9yH6KI6ZEHwSFiyZQ91sGheiXtwdNfN5YH/1wtbjV68smk64/r7vevy3P7t86cZse6OX2/L8uOxneFJLkZXEtCfcpSgUj6erqgk+dkfT+9tbRRP87GilAhtlEaf1C7/++el8FaLvCdWt37k0YrVDWx7uH/3ud71f5se3b97cu7KDwPdvHyzbVjR0bbs3HseI5ag4vzfpZHk4O2kJrXP7sxONzWp6ol6GZa/oj8PiVKyN7dIyUTIXBjurellkZBz75kQaQQ/9Xnl08/bs4N54u1Sj/d7OyemKTaia9vze6LEnnjy6f29678a89os6NdGY3OaFc04X88YXKj0schyyHB8eXr24m0y6eXCwsbHVEt0/PC0LjlWT5S4C3T08EeOGo81Jf9C0M6nalgAAvU9ZVow2e2+tBeuHtgCACCiApDWP6Iz49UD5XY8Y8EG8eD0ekDP5Fh5AIQGRCGTtJ0XCdfhHz+I/CIRElOABZ0yRCFDAEFvjmBGYmcloIlRaizCoCOYB2h4YUIXQIEBCREURECJeZ9USyDr2dramrVvWCJOe8W4QAVIiEjQC2LznqUc/+r6PNI2yk3Jj4MaZs+Ag8ADk+s2FX2Fp27IwexcN9eJykbeNgkYENGs5QxGFlBVIZY0lWif79EEbO6ElIAZCzC0tTvN71/30nlnUXRfcaEywhdIlqBSd6felbRR8lhfocsw8dVO490KOW8En0zsnKSkIsQMmVUARSEGTAjsgVkfKDCrgUMBoiqlVQlRrIYopR+pTSgtiRAZsg3YNsVGDIphiQEOiIowEVlAAkJSYCAUQEFOlAckKskNRZDBg+GQWvvDZ3iPnmx7Vg4vF5Q+Ul7YomgsDXF47Gm8NpVtQVVHVDpQ+8PaLr77uYhPRgPpgjDm7+gh1fexd98qimgeBRyYCXXfNgyokFEQ0RIIiiQBRROQtv5sCJFj7Q9dGNj4zncra2iYqdLYlOcOy84MtjmWyxEWWYTSGGICdldwqeu/FQ4opBN+Kr9LQ5sSIUQkACATJEalIRmgImYyCEBkgq2DB8BqhxSmldbAOc9CQUAMkL0kVBJKPqhoss5VkXCr6BpOQKAGgIJsMLEdIbehCYlDRqOQsJSIERhZNhLS2/klIIAJCGpMkTSkRmqQpaPH//HP/4MLOQ73QfddT3ze/+RLk83aVt5XkPQEL0iRoWjc/feHn/n5x/tKFP/KTBx27+/Xo3mvXfvZnu9vTY0+etMrUGamj1CtI84Amsy7rTuXb/+ibt37+65efyFM6uPLRj51+82T/9rXRe949+r4/TE8PK7FpiaNmUB0Ipro8l13c29YOlnenUIVy1GO0zGqUqtMqMy76iISDQembiOwYoGpPObcNBO4PV02t1jWtz3Lu5W728u073Un26EP2Ped4GH37Ks3unHz6V0xMj3z4A1U/HOelX82Lpnrzm689+v6PHU5p47FnsLwzVC1PV4fTbr6YgvH5aLhJuXS6eu1b0xe/lj/0kCtl5U97uQM2TZe2h/mPfM8PHkzfOLh/8PL9LwpJvey8cgyBVCWARE0CklJKEELyXWw6n0CVqWeZvJAKMCRdS5msioZNxo44Z5MTMxIBkhprjEVIAJGSBeiYUJUMsYAhZdVGRQkJgVkipaAoSSWEBCLEoAZI0DgWQWQmXV+qGEEMqEVVjATADMF7xrNDcrMIu7nrEe31HanHREoWKh+qUElDuWWVFDF0JrN2YCFXd/HCpen8WtV1seoaUGPRGuOIV4t4fTUtiuLk6DTLXHO67Bf2ytaWy1arJtRd0+fBhd1JfUJVV50uljEnbyH1bGra42a+ijrpD2zgvumxcWRDjMH2cptnxuLQ8MSSFWy7ztisUTNdNe2qI2gMil37WA3N6uVJ151Kqlb16XwlIaJtXFkXuSlrWJxWbZCYGYxhsJtPeiYZc3d6/Prz316mJAZaO7h66Wqqw1PvvPDI7mAzdL7q/LSybrS9cfH41WMTpmOHr3/xK/mdKd04zLTI8tL2Mvehy6tHh129yh49Ry9egwB+1ZAAipJR7BZxtbSOtByI6ZlRf9md6t6lwk741l1enUhog9kUngglY2sIHgRRCVRyZ5yvj+cLNxlRbzwePIynX5Lj4La3pCyaO/cMkMn7wrg5YMyL+atHB7e+9s6/+J/tn98LWjQXHj35lS9Mts9N5ke3Ki8beY2zVb30Dob9/KHhxRdO7zky3QIhpNxkkGJsFs7ouoRsYzRp08ImKS2tYlhCiokHZW4AwEAHoWX0Ic6kyvOsS35ZRfZCRHlmSdQ5G5NUdQuIaM2JbzjDIROEsFY+k0S0xtqsCSGJMrNPLUTJDQKKYWMt1alVML4jcNjlWUyhYNSuiwItpizP6pNgCSUQGXd63JBIj3jULytqD9tuUXWx9UVZnMboAkyXXkAHAwfJkeFXr/8HUhEqi4fQBGYsiiI0PqXIDESACgZQgaM3bQMpgiillJxL/YxKZ1MIGhCJk6QgEEEwQaupw5AZHGYZGt7IXb2CyocE0VhbKjULgUSdyHwOPTGO8hBS0MjWGGc60bZRIiNEVZdcbo1IpcmoZwQhFUwJACCiSpMw65Vdndq2NrHLs/5gY2O1ODEQyyLvZbxqQml5Mskgmujh+77jQx94+w+s9k9+9ad/6tvP/czr1+8Mh+4rf+ffZnCsB4Elrjb5J/7Hv3P75/+/2y/+893TF371H/6Zm4XZfO8n7n3uM1clUgKpvQFvEQybKlJMqI46T1+5c6+/iry7+dxGzD709Ju/8erTYjdEQhMnmVz94CcnH/2xG8/9D/HWF27/H5/OIZ/jpy6+521DvHTjq7cn0Afq+6POJ9k4N7p999DfPagWq43cSFE89Sd/7+o9bw9Xd8Js6r50jT/3tfOH+6/8r39vO020LN+8f7Rb9IoIfFzbd+DqvHnb3seqfi+t7LW7t5e/9n8c/clP3nzm3L1f+bXNafuhre3X/9Gru73eB6D5jdPTUNqRy4ix7ZIAG8eFzRxzSkIR1EhCzK1jaxDJEBOQpUwVAWTQK87hyDSL4+RbH6JPUdZmeOOli6qNJAfMgNFDlnNGFFNwjtqucWwIKELsFRaCcDJlsRGzMK+qorBbGdZdledmvlx0idUCMQYJMXVJk/pAQFBAEK7buNnLTOx2RmUIdv+4Xa3gBLoOQ5lZ8ZJC2L10YWtng9hVvr0zP2gyJVL0PmPKgQK0D7bzcOb0P7MNoYii4hlzdD2GPGsaWY9hgc7OEuvDwFstUrpOn53lA2AdGjgjeZ5xZwCBiJCIgAFH/c0rDz/74d/zIxsXeyWmLCWNHk8Pq1uvlRIoafQRHcNW3ziGpsKA6/GhAjhkzokUg1rh3LFha1zmJHaiaTI5F+/PZl/93LCdFQjU4aZGqhcwXYT5wkTBqKlrCCAlK0jahbRaFkWp81pCFGcYYt/YcFCBIhqD4FKMyi4rxgCYQs0m72UFRt82FSGWk950WW31tvztVzmzWVG6hKU3KXRpXmerLt29bXyXWVQrUbICNISkedZ4TaO9/Nl34OXzCZ0BYEJNxqjn/iBt7frVCg6PyIhkNCzy6vU31KV0fEsefsY+9jjtXYFBNjw6mv7mb6fju6btHHDsVAIZgLQMbE2vxu7G7UEXJvNbXPDshZvbJ6e+W7qHtty738Em715/fXXvXj5NqpCCApOCMBMqIrBw9OPh1g//Z/bJd6e8V7bd4Ve/Rq9/yZ3emp7cLzaXavodZsEyFFmW00M0MOn0a1+/d2XAN+Z+fSCMUS2RgCYfMmdB02xVo6HSWcccksYEsA6WEUYVJmaApBgRDZGCSFICpTPpSi0SIzCBgPK69gyAiaKqEKS1KQ4BEddHCz1r94MH0pEiPYifASBSSF5ALVqX2eA9AXYhNrja2drd3HmkTUPQBZNGDoYahaoLx1kUbNpI88yw4xyMQfCaOoSOjE5Pjn/rU59/5cUD5T6IVF1U78cFOasheDRZBBRRiaauPEQQHwMmZ1EhaFTDLrfGo4MsF8CqXZUI/XHZ23URO1PYqloSBMpScvKN527Ux/To+YfPXRiOCjPc3nEYlrN7FkGbZtXOmWc+tYCmrUIIBZBzeS8kDamZbOz0BvbmiwfUlqOsh4Y6bow1tjT5oGyqqG2rsV7WVRW7pND5IAJMrKSLutrZHrVtdHkxeuhCbziUw/vVqp4fz37hF37tpee+9cd+7HdtbtqXvvWVeLocDTdiYMvW5sUqdIA86I8Wd1eKoFFTAmussRyDH2R539gI3AQYTc7f3T+8vDewpZXCssm/+K3nbs5ucxGvnJ+MjJ0dnbbc39jYuH+86Hy3Oe7ZHA+Opvtt1xsUsQpb53qGZbWKojIyWQ6JSPdvHkJGxiIaY5C0S6d1MGXv/GK1Ojzo2uZ9Tz/E2+6Ng6pimwkNSquKGIOvl1cfv5L1vCVPPm6OhtFQjFGC1HV9aXcToncDdFhITiJOa8wpa+oT53g0HlhDbWiG/YGk4uWXXvZQW0tB4fLmQ4fXD7smNKtFr2cmveLe/q3T4/tPPbQbr+2DMjSKqr7yVYpF32VDs3Vu+OjG4HD/3tufftQg+jY8snnuiStP/vKXfpktMSTIyDsebQ5pUWNEv2zmVYM2kvecuWJQHk/nSboHheKwRsAAAsJZPxQggeiZc0gfGDcURM+spGfkF3xAt1ZFXa8MQIS6FnbX6woBIyMoKTAxAArq2uiiALReLxhRkGk9XERiJFSmtcK7/i0JiRQEiQCTiDKddZ+v2XcW7Zp+vUYkJ5SzNLUq4vptKem6fAFAKTGCwJkfKalKcoN+4lQUscx6YXmSZqcEM3RzHqVi8zxvbayiD6v5KA9gCcLaqHimTxEQABHy2uOyNjiBCigCMiACWwCCqBxpdPf+/JdubOyc56KcGU7bm8in2b3bsTpyW5fNRlnNPGNDoQv7x6XNrRaxIhwObDaUlPiMt4OahNb1bkpAcibEnWl6nVoDWV/bCKqQGRCENEPxaDOCYfIJjWgmCBmGJWqLKsQ5nDmgFAh53U8viMSUhAwLoi1L6RaKKp0YBcUEgWC+sOahMNii3rg8Puw+/6lJbyLjsqc5nbSHJ/PJk+/1AvPD2UY2Geb9CrTtAnPSM4MXkQKpAJzR6ETRw1tVBoKIhglIdK0bRkXQtXoFgFEFaL0twTXcCAFVgZjPwFiq6+2MoiDzGru1vjTlweoAAM7awmVE7DhjMhqRJOTOEMRQdZCwTXhvthoVE4qRBBHYGl5PIxjBEBMiEyFZZgR2zE7IKRokAUiJWJGSosZASiRABCSAginRWTIzJGZEiZAacgTISGSREFSELDNmzMbFFFDUUGZIDCCQTSnFGEwSjanzHST1MbExawa8KGiB7/vkD2/2JhQjZHZw9aoWw9ViBqHc2MjbRCEGRXCnR6/+3Z+6+q6n6RN/8PTS1XI6G3712+2nPuMWesjm3Hvf/vzXX/MxuRAVyEYtx/2jWWcbGI1zrCnNeRny81tPd5/FbGUn093ql++8/vN/7Z0/+fvOf/SRu5/9xuxXv/Lw1hNa9OrCzrva137r0iMbvV51fAyiWw/vgMH26LgLCNZqrIjVZNbYQaqXfZN3AWTumclkeQUhPzfq+vP+5mhc24Rp/+uvPfTo+/vb3bVP/ausueeq4+awfvOX7sTN4eP/xZ/B9nZcHdT9K9vnzu+PbXHuSv3p1dFL17YvXTl8Y9EmL708dLkJ3cbGpFvGW7/yucf+xLl2sLF3fnC6vw8NbF65HIGefPodT5TvqELz9LPv/Rt/5X+sZ1Wy2jUeUkBwiJBUUwo+SFV3MWhIa2AaQhKwTCoRxRqGJJbRGgJAZRLDwOwMC0FmEQxF4wwRJkFyBCyajDpIKYHQOrZLwBoBUrae6WoCJAGKEUgYFXyMAAImooI1ltCooiOToOzQMhCBiERC9f6sJvzq7njMHVbNuc3RoIe+DtOmOln5qoWZel8lm9kEdn8araTd0bAX2Tf3+9bm6OpaQgiYsFqFYmAxSiNdK7FX5kUvrxaN9Yk1UtQxlRdHg5x51S0RAkA0wJJwZzJKTSJAjDwEOwSXUggpHLeLFNOg7GFRjo2Vrs2NxU7YQl7S7aNpZFeHiJlhUCdgDNqcSelEuiZ0HUaTZyFht8aw1o1tsCy7wvDmRjke9S2IEn3p5Zu3TpaDjEbD3tsub98+nD6298gTo/z3/dD3vnH95v0btzXoOz/w8Y1H33P01c9On3+ubzOzaPLNzccvfPDwzRvZYBeFV22rFza6q3sHzWJvdzL+yIfk+lEzPy6KQeiAuEQy5Du76shhWrbiTpNgubHZinHLE/ZTWC2cQ7PaVzFcFqFbRSVnrKJlRWhajGHT4f1/8g/o1qOrL157puknk/nFTNqmhwQhAFBztCgG5fz4/u4j52cH09tfepk+1EuT8aUPPHNy+zsOX34OyU8G5QlUhigT1067QS+jYb6Rhp5A0KMJroRxYY/rzhJT25gqAiBrREWK6mIOPvQt50kU0ABngoWzAC2jagxAvPSSAfQcZ6ipi44hNwYEfJLa+ypI5aWru71euVGUq9iycRJjFM9kgUgkEhJabn1ShJw5qkaXd+qiy9oUuzalRjB0ruucykaG1mF+edREUaCui83KgyqiZhZJkyMtMkwA9xcVG6YWQ4pN5zPLhSPH/x5SBADg20TIpCQJ8oSWHKgWDtvUUUJKKQZJwF4wJCWV3EhhQRG6QHUNy0YVVAjQsoZEhEmAVInAsiRKgpBEQ6dQulWTjKjEt+qc1SCVebYITUxCnNhRVhgC19YJJWaE9mxPppKSy6yoAGHSWFhGTaohKqmIM4QIxsHe5Z1uYRyF+fSYnFsdnEbAvsm5YifFzV/5Fz9++cqn/9m//fj5CzeO7//u//Iv/Ppnf+O1o+WHNvqPZNMqNtk4yybw2ur1rDp+pQr6nh9835/+6UHPvv7jH2mvXR8aABEvkDR1ogBcQZp73wA88sf+SPfYM6uueeh3fvi6+nf/2WLn+Zv7f+mPZ8n3ULqTqRsUfnWLQEtwg6zcnmy3MzDvuvToR/8UfOnTixf21ZNXXObZ1Y9/9M2lf/z8+Rf//v9OFx+L7/+B1dbOvcPZ6tRfvvrB8Iu/bmLcmfRW91bjncnkFEJ7sgH67b/x1z/x+B+6eVs/9Tf/rw/ubLVd9/B5+6l/87MP/ZHf/9G/+gN3J4/e+as/9XTf9Vd+Njv+z7/ryT/1t3/8p3/u17/46ZdXCzDluMpNW/aOu+SsyTCCButyVOTMZlnJlHzwzvgYfQIkViXNiYbElUiKsmrDoguZI8emjT4iRMQkQl5yctjFrgtRfDCKhlQ9IQ1clntx6ogyD10ksAUjwJFviKgOrVpdIzNTiF66Qd80nVrOkNAHTl2EoIOBZGVGDo/nXR2TyzBJrIPvFTkCOi6dGRTZYNwrXnzj+eP5ARjUzETfOmOCdIWlt+bICLAGkJ5t/QFUlQjgzI5+1htFcLYhfVB5dmZHOvNmnJGJEB7gjPhBAy2v/38AUGFkAimRt8hc7fU/+aGP1LZSjwCRsVPf+JN7uKxjUhL1g8K85135U0+ydvHVl+DOHTddOQIvCpgMc0qiwqntNHUJkq/VELCqmZ9Wn/95/9KXN5xqhwawu3NggqiAtRZQfOOTJAQKPgGxYc6dESNdk5AJycgaI9C2zpUKYozT0HnfKtmU5VKMtBxhlnf1FJLGkKRqGGJqpkVnXDYIh/NuUQfL2lRcNc1ptTHoaRvcsiVs6iaawchHEGrk/EV58qnw+FNMUM5nqAq2p2hFUuiUin7/scexmqfj23ZzK121RibbZbt//bXxVi/k5YmPbcvZaA8efTh7ZZpO50jWEkUXmKLTmNmcOx0Qy9Ei3fsKoB+0hEDkIHv8sZOnv7OzPXPp7Xx6J/3qz9NJzQICKqQIKp2HCHUPdj75nfr0M+SGksCTHb73I7K10X3pZ3d7Zin3/ME36+3HYDBoUpLY9uf3F9eeW/32p9+3UVKEa6tOiMiAJ2SVmCA1kW148gPPcLbx4vNfnjBqG8mQAlKKImfNZKhgVDVBkkS0zqgIKTKoQcjOkioYRREACBQggnjVoKCgeFYftgZjaVqzRIgeZNb+oz9kMB8UbdMiprZZWEtBRY1MNnfPXbpQtxFxZfMUuo4k5WVOnFkPmfSkA42cQNq2VgRUjSG2Tbt/uPjSF165+easbdxonIu0SsjGCWMrXUwpSuwPR9GHoMwuFUhChpi6FEa9UkP0gpIU0ay7BeuuVYxbGzvSnZClXi9X9pGb62/eevPuaWjNU49eeftTTyxmbZHnjkaaVuVkorCcH94E0JjUt52g5NlGOTrXhTzLhwW70UOb8/3VV770BanSld3NgXOLttKUOCvIFb6LGKJv6zu37hwcHnuJCpBUfZTURei5DCAbDrKNDSxKsXa2MtM7q5vXbx7NlqR0+9rBv/qlz3/XR9725GNPv/rN55epYtcjTiFWhOJDGxTKPEPu1gsiKgTvNwe5EfEQ0KTj9rgJLWR2kfI3T1vZOplkfP9+g3YLKd05Skv1g6LsvEYf2JXOGuMcshkO4Wo+ODw9tmxtrz+bzYa7I1aQWXO6WrHiPGAxGRautzEcv3nj+aLiGGztcL/qTG7UTka7Tyxwuf3UUzE7lmbmD69b5mJkNs9tumFxuqxZJeMBrtLp9E6czyfjYVFu7m6MDw8Ofe0zMveOT/Os2BxtaPBFbgNQl1JKXh2f+u71N64fnexPhll/aPeu7HVtIg1k0/vec/Xw3t25b9AO9q48yS5duFBYk67dPowBptO6DtWTVy5u7ezcvX+6mlcX9s4fHbXLeXflwi749vmXP2PjfGj58s7e62/eYU2nd06g4+R9SgKWNh8aLqsGExyfrkyR587evXl4thQ8aIX69wihdc+ZrkcHKJIUgIDXgwNdl7G+tTSsu5nWAeazFWF9lxoCWCdHeZ34AlRcM62RmNZzCpIEiMRIxGQMoRIjI9AZwxcIBYEUUDWtYXkWlRFFVdd3+gPIkq5r11CJWORMElBEVUQVEFwbY9fvBokYFSB749q3bt78+taFd1A/72JIodHT47R/21ktx1v9nQ1tvZdl4tY6lXMc5sJMDKxRVQEFUJKeqda4jsUBnMVdgdbEb0VGNA7IQJABZjT3XR15c1Iv7pWruXbTMpe43O/qfRa0QN3Cp05dbzMkb4pdUUUNmDoVobynTCqCECUKsVUBAQUUtUYMtE1D/auR+rG55wpTnD/fzjtZdMMCsABYtsQxJqXCSNvhMqIPKGmdpJO0dn4RKKCKJCVnidHXKx70gyWrBLFNESQJpASm48p094/NpSeXSqO9S8XNb8mLz7mM3ca27+Xn9i7PX/+c2HJoy0mgDz37gS++9OUWFjYlYpIY30qiKYgCg4CCIKGAIhLhWSkBKCRSMgwPtiegqCp0lmtfP9BRkVSFAAlJGdiysRmoSIwA65DieqsiokJAb60Jhh2RRWWyVgHAMEhk1hRayzxr28NFpchlhoAACQDJIBKRApCq4TU+CJnIMBvrmDIhC2STBkUyRgUgCSkb1MiJQvBEghIVhBKE9UfgAyJ76jKXRTEUo7JxhnE9MWFHpBYjSiBEiyiiIEKI63YWUTDIPnjfJTrrXEHklJv+Rz/xo4ZJUwiBYhswM/1+2X7jy+Mi5daYK+fS8enxz//i5v351off1/T16MXP9G7ebr7wcnPn6BD7F//Cn996x9sO/+p/dec3X4Q6lKWJIqd1bXKzN7CQkhD2x+NHr+60909PD+6Sxa6Ku1uTrdi0//s/fv5/mlJrJ3YULl44wqm5NM6unodRVx+tiCWd23JFedAGx2Qm28x91xt207v+9F7rmwJFgnZ1NP0iy1xsOvGqGbvBXjyXz7ANTTUcb2zvTZZ3vuKym1cuTSlAV/BD73/v6e1ZO+sOf/nn6pefLxYF9crb4ba++3f0H3u694F38GIZyG9f2Cj3SnNxcHj9IMXGXJ5s2XG9Wp386r/N3vH+8Uc/5ow9ObzXlsNycwTWRgIVc2Hjbd/xnu/+d5/9xdOmDj6uMf/B+zYkH0MIGtPZCJUIQESTBtRaQBgVwCCCogUmRUsGyPAZowGJmdkJWTAWGUkMAqM0KoCgZ8ZIcqDJAKqescVEk0gSFUkRUJKgRRUUi8SomITJJAFDBgAVEgBKipgCodoHjlAvmPLssKlv3Z1axqZuWunIWVdO1Ec01C/L0yZMfSSAPpCmcC7LQ6D+qDeXVC+W/TzrUgxOt4f948MTtFJHPb7vc3IxKYYOCAyGoudOThcxROOKx3cvVNXq/nLBqy7FdNwse72CBP18TsrZoN/FKIySUgngq7pnOLM2eDjaP1AbrGHGGEHysh+Cj7W0SQkgQFj6jg3miszceu2CAlNpyBCVZW9kzIXtwXhS3L5z8K2D6alFb3nWtc0C6zdut3V89au/ebJnvvP95w/vLR579qnR+JzrDWfLm4vu6Il3v/P6G9XlZx4uTPXtf/3LZU1+ZS6988msoNAvy64u7x7QC6f6yhSPVs4UgABGJHacCu1UvAPFGGc8yK11YTbvE7fHs2U5cHnV05qyHHxUAdncntpYEmSL2iWJTWBjwRVXu2bxq1/i2ilAi6pDDs0K2mUXY5Ft1Ibj7vih7//OO/feGF3aG+1cdLR6+cWXK3ZFb7j38FOvvvjpjc2dNF/GzMU6vfORvStPjJ+7fjMCeIBxztIkdJSUlo1dHVWNjy7PTCTs5aQqnR86B5UfOOxZY4qya1MUjr1sHoCzbFUtGwELJKlKlHWAVWgHJu9bq0l8F9GayLCKeKfzHeg0tHmeW4uoYhgTqqCKihAHoUQGjRVEhtgpz3yopZWQUhdHzhIg9rLNzbIJtVpApdR6Q6aJC5NnoVUlI45b3y5CqkVTCsaQMSkhKQMZVMC6S1WK/5FUNB65KGiUUyelYwnJGeNT6hG6zDRVVBRm8V0sHWUWxkMHjsTaxdIndYoxQpLSIJJzDlDEq0OyTh3DqgsLCV0iNaSGo0hJaExAMABKjDEgkjKZ0uFo6FrfiY+549AGVmIkFLFEXtYYeAWQkBjJYOZEySJr61mRrFUfTo+mTz91+WRlNPK5yda9w8Mf/2N/4MsvvPruJ3cG95sXf/6F/t3u//yzf+FCzN13vOuT/+yvfrte4O++9A4evfRn/l+De/cUOkNHv/6p/4Z/4G18FQ7+7a999Id/JI18t3glx7l4TQwddK7IxIOPCYBHWRFLeE7b7/oTfyzO5i/9pZ98m39p58o7ia9sXF/embZLEAa6+e/+jZvf3JXVEil3buv8qDk6vHlwNA/XNvPfgb5aFX57Ix+xuXnv1sG/frN4+tn2Q9976U//2dNFt5zPJ+0+He63pv+2y88eJ1mdLKW0MuLfPLjx8T//Y8CL0R1c/tMvHf6Vf1Lk9Pbn7gUuGHcWbf3oUu/86B995i//uac/9oHp9/7w9Gu/NEfa295dpVQ8/rE/+vd+6CcOnx+8+pVP/+xv/NK37xcjgoWmZQBnWCmqKBgBjRascVEBCaK0TFZRV21TVVXbNI6pi7L0KTG3QVk1KTYxkWHHkBdZxlZ8aBoI6qxiCTmp5tYxmNUqkNU2LpLG3BYA5DWcdI0zprQ0GmQFO6na0MVE0HlBAUWKmHUeCnQXxjTOoAa+vUr3Kq9IIXlKOBoVg0nRBSiot3Ph8nC0tVie3rh5ow1tyiAhJwCMcdgvTpdLeDD75TUw8qwPGQB0vS9bJ6ERAGldOg70YHr8oIpYEUEeAIPxrDbnTFfidcLgLCywPv0DMxaqmwxPONyLp8uv/8uN7F28+TbbH3XVsj68GW9eTwsxEZUhXd67/InfpeNt9DUMd+rB8/6lF7SqTVBAAxrBWrc5yTdHIcmwX2roaDWNd165+40v9w/2J6jgE5EhiRRFYiLm1neSIiODEK2rmXPbQZZArHMGFRCb1mfEIQWTYvBzQAJsmMgKhryMe5f8eDMbbUFum/l940O6P5dmpqHNJ+MYMcxqRpIUfIrQRtY0GFnfLsBHQDTWZf3Co616eXX1ofJj36tb5yCzcjRtv/ZSZsPk2Q80ZS8YAkip7Srv+fQkP7y36lH+9EcP43l6aDvtPdYma70bOtIU2xZ7xhnw2chC0saWo3e+L50edzduazsHBGOMRGYtY+XZkjJlAsvZkmmQpazIxmym3N/AucaujZoAILOQO4yIg6c+PC0fsR2AxkxAfJA2dl3aOLdtBPhwNppfk8tPNXm0eebrNP/Mp6rnvvwMa4Du6uNDMHjb029dP7rlIQphYTEFRPzzf+m/9nW8v/+9P/XX/nsFMgRRVIBp3YwEuoagBxFGJCZZn1aTEiAqCCAArHkpoEqISpBEImiLBIgsALJu1QFZX5eA6xOsgBKcXdpnriKCelkTooYUg3e5k6SLthrwZAGkTn2zaNSreADCBOLBsjWoAQIaUOy8dLFtUcKiru7eOfmtL9w8ut0MsmF/hAGk9hGc+hDT+h1GY53pKkk+xhTQoHMmpJBUhO1SRBiLSUlIOZOxrl1WiFnRH71xc0a21+/hq7dvznxz0lV3782S4Pt/x9NXxhvLLrp+nxLFZc2clDgvt9yF7Tq0TeU3N/P+ZOjyfhdydqY0dnHn9JWvfvPF576RcXZl51wKXUNiyZisACRCg8oofrGaH5yeLpoODMSUAIAtuIICyHRV37xzaIqSEarjcP/OaXV8EH0HUTZG+YW9iy+/cs8Oik+878p7P/7+r730yuHNpSkRlPJexkzSiEpSEVWMAADCxgWmXk7WZRRCaezpPJ7bu7wYT/be9ZRC/cZXPt/V7XTWOVfQOM9KOxgWIjE2UTG2oQspxgRN1Qbk8xs7+WDw0o0jDZ16iikEzj7yng/u37kxPzgFdNPl4v7iKEB64ty7j46PltVqf3p/d3My6GW//LXnJ5c3zvd3nnrHo5fPjV74zU/deONGf7wx3tgGW6xCSgFXKZzuH9SrCoI44ocunL9z+6BfTsDmvptbMbnmJGizvs3zLvrEMhj17tarF771ajuvtjbLpvZ1oJMEkWQwLI/3b8zaPJlsfOmRdz/+DsP2+rWbv/mlz2+MNTViHDz+xABDMS4gzE9K4nt3DhEuLldtxIHtbbd0fTGdK6F6f/3krpoUPUpCwmIwKMbF5NaN23m2U2zp6rgyPddpwCT8FrcOz1Taf3+yETyLcIqs5R2CM9YLnL0ICusuTPoPBgigoAJAZ91WspZwzuDUtO7vImJEMrrumCKmJGsXDqIyEeG6UgoB16QiRWBClnUxOgiiiigQJUBBAQEESiKECizm7MgkSihJgCABiuJaBWc1Arp2qAgQQALm/fni5de//pHJ9sZgszu+dXr32qir++iy0Z7r9U2Rp6MbYf8VsloBmk20VwbpdscNEJozlLQiAunaRIt4lqPVMzsjWAZGNUZUGRAoI7UQDHZaFl2BUW7ewumploVPHTvUmGr//6fqP6MszbLzPHCbc87nrgnv0lZVluly7QE0utFsdANgiwJIACIgWhiCnpRoMaKTqMUhNZREOxKHIxpRHIgGguhAAoRHw7Wt7qrqLl+V3kSGv+5zx+w9P24USP7LtSIzY0Wse+93zt7v+zyesxFpZstRgszkY3FWlnY3cikJshAAhoCKSKRLsmL0kFmizLptoi3Q1uWOjudw95izrLhypTs8KCmD/hiLDNMwigIYSN4QKdYSe0QEZoiKwMgAaI3jhEbZ2fVxU7iYj+DIW1qiAwkscqj16MjduJFG66EyLYZ4fOYmcygQNJhj7w9vj7fWplyFld3B6qWPv/+Dqzs7//pzvxC7Wb9osox98JgSqKgmEFVUJRQVpvPPe0KjIsAICZkxASAQEytGSkueOkgCWI4DAZevRgUFImOdsRmKJogASVRUI+GyOoaK9OuUFibLZC0XoMxskgJhn1Q0pTr2J+287fzqYAzBEwMDwfIcgUAgzvCy16ZEhgyzGFZDLGSUjUVOkJZp1phin4TJabIERiWi9svxFS0B6exIkdSAMKNJSiGREhtBSxB7AVIWJV0m5cAQJ40iAgoxBEkSEoag3mvqEoiyIaK4NiK59SatXKjGRUqRcxenp+72Fz77V/4f+mC+sVkNd8rmYPrE5kX19Vt//Uv24qX2pM5GJfJ4UfAj/8XvaB9/4nS2ePyxR89+6gVDLEkUsBOJ3QLKMRmtg7gczyCdTuem9zll4sx0OpN5t746yorcWAy9nh3PTltPx9PNs6m2bWmtbm7IxgaNxxSikC2hqm89jIf3qT/Liny0ujuvBbLKVBnlFmKbDVzoWoqSjg7B+a0Pve/k6LWFLNYurE8ns9Prd12BZA1t52flqb9MZ207f3myF3b2b9wfr2F395ReeHjhP78YynU7bo9evtX2GYxHON8JxXDvmWsT0DdevvFkMUhvTJuDL41iIhja+8dNB+P3P253N33fO+kGo/ypZ5762c/9FEWTArVdTD6mlDofJakCiuJyAqOqjAAKScQDGYOYlBklBWGDYJEsm4zYGSYgJCVQYrSADMYCcErIQqg9oYKiqKoaIsIUGIUgikYUUJUoMWkKwJwwI81EoxcAYkgKKMBJUmIVEhYFjQDJxz6+e0meLLqHB7W3NIuRUDEzlLs8Y4lTYbXE4Pt50/VBlOju2ek2sFHo+gj1ohzkG2kQujiftWdd7Ie4vb5dOcxc/mAym7VNmZlhmfm+316p6llY2dqYt113OtswiEHWObeRk8L6eDvlqJ4WfY2EuRso1uxobVBaa8yQ6kX/+p17mnjM5MBt54MQ6lFuQt8msJBXgJzETxufovEx9mqSJyd0YX00KHmjdNbgYFydzeZv37s3u923875VmkznURAtHU7mc2dyay4/untxu6RWDu6dXHt6mJUbw7Lo0iyjcOmZ50/O7urKytt3Dtz2dr7fmHKYMtajs+Kt2/DFF4rjZoBc77c2OdGgBMZm4CNJA0zktA/CRQnDsS6abLGQ03m3vWf+8A/Ny/37//uPXbk1yymTRZTHL7jf+vEOxP/KC8XP/3LOlQ8BO/R1Z/NC2TUhSJ4RZUCIK95vDdOFnWFujiR9Jd3c/NhzZT+eHNyD114fmXpaN40frH3k/Y8/P3jr539hb2PtQSujqgBj2rp+9OrGzbc76OudlWy8vX7jRn3S2vasmzcaIqhN45Ux2azXNNdZvWjXjKkD9FEvjYu1URbVv3E8mSGcnkxGLhu5waKbu6xcywe+8zjKUoDTNkICBEMJMKbCuszlUhUdUZ9ktShzw4AaU0JyIN4rLProRVJQ34YUAyClpM7g2HFRclGAV5ileK+f1kHOTvrQC2UWsE1JHPSGXFKuJ130XQIARkuUM/Uxdl2IAJg0QICU4F1u3fmoaHVoEIgTBEMg2nURNCkoxaQCFTHkRplWqoxIDENR0iJq3yVJikyAlFuXiPrQG1JUTZoIuA90tgDDNsZYVYXvpceUIrSAbKxjk0Tarh2XlTHgDJuMfddZpEFVtJ13GetScRA1qRpaul0RgDQCKgiCghoLfe+RTAiCisWoqF3xzr1j0w2uPPXMn/kf/9f9u0ef/tbvecoczq7/nJ80buq/73u/+zN/7yfnv/Da2m++vRvDmw9vvKmb6+vvdeFFsj3X8M6Pfub3/fiv5GsP7T/70vFf/BO+P5ufpKs9OCBSwwQSExsasxONKwPsuvT7v+97yzo2s8Wjpbn/V3/kxf5HMgALsMI2gDNq4rSd/MTLGcQIOu87vJsySOs2m09h/zOvrh4eAenhYu6cU2OM98WN63f/uz954cnx06Nq8uOHpp4/PhiEvJp5NWd9XhYzhLPh6FN/53+6qfPmlS9/4ts++A2v3/fzw4PTfmN786k/8UM37Hj22ovm8z//G+qTOz/852/s7mTtIK6vzrPT2M22aH2vfOLeadu+doA//+L3v2fthz599ad+8v5dbL/KxfWzo21TUgLK0HKKTS1Fic52XdMjA4CxUHs/Pz5Noq3vSMQ5iEjoE0iKMY6LAgkIYpVT433C0ElQNBBx0jSD4WC26JlSktD4bjR0FWHOsa2Tj8EguywnAwhgEELfBx8ipU4lM4aN7aIExK2CLEFA2j9pFwmZqHQmtjIq7dAxS7JKm2sr6+trMfTXb71zMp9KhapB+hCl90zT0JdVcT4qWsa13w0TLS8M9G5wHlSR6bxEsPSOn4ttAM+ztPof+ER63vdhBEYwy8ESgCIICCCxQREdIl5A3ZV+0PvurVcmi7sr8+P8yW+U2eH8qy+Hu0fsoROOK+657/wec/laG8RaFzZB6jbduW19Q0mcpRi6yDpccWnxoIjOTW/iw9vzr32lDF3R9RiXxUBQlhT8ctUWEioaZmABIkqJYTQ2e5d4sMsn+3G6b2y0hXWVo95DDwrCBqJKajySYVvG4Vpx7flydaefz7QwxcYWOre4cS8cvZ2Hxq6tp8iAiByLlTx2EdpF/7A2KmowWyl91wuRy+w8qb73G8xHPtZsbk0mx5thsWE4W8xmb79Wz2t5/H1hey15NZ7KNOpjiZIXXtNLX96D1809KUeF9dzQOq0NNi7ugh1Pr9/q70+IlAoefvDj3SPXIDAOX4LXf85gL0lC6BUEjFnm5IGMW90FwGr2oP+lf+D278MZYDaKxUilIfGAQAZMkdPOk57XsY2oIa8cgk1Kyci0R60jLaazNz4rqx8I21uqnXvwVvbSZ3YXIStpHtKFURgM7FOUPbZ56cdvTr7wYAHKoOmbP/Ce7vAUrFkZFN//fd/7v//jfxG7RAgIkZmXiCIglLh8qem788aloRsENMq7kj0iIgRSYk3nXTMFwIRAIMuTNC4X0Euj07uNy19vTgKAoFpnXOaij8ZSjF61zzPIMqqbSQqdKhhDbIGBfAgK1hhsF4tQexLX1cdFiX03f3jv6NVX784nGtuycqXvvWFkC4PcBBIvElPMi1zJ+TZI71MX2BEbtJlpF74P0Q6yJoT1jcHexY1xObYB5meT2/tHQjhp0uHcf/X1KUvvvUdDq1uVpMFovWgP+lPpr17bzVwO05kjx7aqVh9tOmggmdhvbUQbG3baN3NKbTqbv/Di67ffOD162EfiVOTRo2UB5GJQRUmWbbfoMGk3Pbt582bT92RIITkiQKRlPzVEFT09PpGuzqosd6VMY1VWF689pn5hIa1urRguH9w9+rEHt773P3vq697zxOvp7snkVMGBcVnJTfAZoeNlrxSJOSRZNN44LhytldVquTIqsEHOxoM6xOndO0PIqgIkhK2NtcW8OWr6JnarRdE2vdeuLPII7DBmVa7WVINyc3P1ia1LPqTr+zffvnmzDnL79bcyq1fXS2DYn7eVc9Vwwy/2uV+YFEfWhenk8UvbdYTdrdFuJUd3b33+i/vd4szXAClToclpPQs+AwMQZ9MzZ9xwVGxtjveP9/PSmCw7PDnLTKpsnhnOCuuTTPwCjMktzev5jVffgGlfgJo+Wp8e2dhJPJq0p6B9BXT04HSv2oEFjE8Wb7xz/Suvv7IGuFOWsci+du8g5EOt09k0+MVi78L6oxdWHh7eHQzGs+bky1+752XKxlhrRdAAsOOAPMpHZ4uYGyYFZ/j6GzdTxjL3BDaBzLo4yDKA5eZgOexZXrL/A+XrnPMDS0QdyvILyyvxOYH4XGqJgJoESJlIQUmAQQmEkRGRkQBkmRPCpXuTCZCJGIDYkCyVCSiIct5BI8Al1Fp1OW4iQJCEgKRAtDRJEZ0DlohBABJqUIVl3FAFBGIClaXoSpUIYQnGJEwJIIrEZAhF6bU3XhyG+j2XL8YHd+KdB/nazuiR55PNfbfA5gTm91zW+CINNraa466PaXwcbbBLHYsuFyKCILLMtpDSssMFhoAtsFFjhBGUkRiCABAldobSySIdBepWlMvOY0qJ2cSusSbLsnUVI8GY0YqCARUkQWRAo8jnz2JykIJKhHfb4iJRPHLruX0t1ifV9oUuRtTOl3m9tRcjw/Edi85WI+lVmhYJxZoEGSKoRwTV5NHQMjOmpDQoICsjOLJ5mxfDD36i/+xP0NktGnNsgmgy7GA2l7dvcJSVRzYTW7IOXCaTE2x6sgYfzsN07jZ265bt6hU33n7f5rXd5z52//7912/er+uTt269irPjVJ8wLmWYCWAZTVZFWF66l2Z4S2a5AEBAACFBVgQiVBQSAAVZ6s4IUNmQcTZzjjhDgAQ+pZ4QACwgiiRVoXOm1vJpw6jIRBpBFJOEGPqUgvdhXveLebdeDXOTeoEYBZcIJxA6fxWyAhERIxEtR6W6HH0CGbaUzpvQygyMEGMrgGxEY2LE3BIzhKQpgQgSsTFOhSEBqkb1Evq45HuzoQQpeFEhMolADQhQ6CFETZH7XkIfYkwxSJ8ElEzUQuorT1+qgmeA2PWkyWCgr/3Kv/0Df3j1lCHy0dm8vdMMLO8f3jMimXb+7juDyobjbH9QX/yu733Qzndm96ZfeGnn+ORaxpE45Xbhk7NUjt2QdbBd7ubZ/p3ZwzfrgXHVql341ovkxpbrA1UCjm6QxUU/a+o6As16fH2+tTm0I9fNGuB5mnXOWaK8X5y4zmNb912frY1SFDLEqzuZW+3nc+3S/dOjclOzEouVSWuiXtkdPP/EvJ3zMB9O7th0PbN9/WBqQ7QBA5qtR1ZNf7IqZiij1Kcq8eHB2/feuLny6Y/m33zVHt0+/KWbZrw9ub84a5lD+9g3f1N/JenRWZVnp7fuH/x464YbZsQrV7ea2UNcy9uD6ciE+vprn/+Jfzwo5SyBthBi9D4lkSTLiq0S6rJESYQGgQksIfO5BwYlWQIStWwskioaNAyKCsvConJEYIUkhAgOMKry0iKz7OAqO8OWNTIklQ5QkqiKgCZZCmhSVA+JERU4UZ4xEAlQUkwCS8waK0taXggAAMYbwxa7aewoI1BdSADRyhQkMqk7hpSonncxAkYGywAlz7uFAmlAanRcFrX4lVwbK21XP/Bhrci2h7SKOBgUlgBDuLCzNj1atL3Mot6ZTo0AnMDzu5fu7e/HGA/beopiVkf+zO8NKhaZnJ6RNVzywazpQhTfdG3P1o2yPBNxRKdNV7c9MCVNWQaAFL1P4pkws9j6OAa3NRiuDSqEfryaW5CHR5M3bt87mC3QUFUUY2uurg0zxtfun9UxgSo660Apdr/n935f7vU9z2wOVreR87OjWVycDs3o9teu93fuJw7zII/+pm+of/SXTF03129mZ4EmdabqgktN57QQSCoIKoYRHAi0fT3pCMSa1bwkq+qQM8MZ5t3p4UsvwG98bvCd37H4+/82XwQeOPBtXRb3Ka69//Htz3+We2VWia0iCjjMMixzcRlYq2Exie3gm3/TrIivffnVrq6fs8Xp3/8XbvPrRk9fOWuuP/JNm32E66+e3Qy3Lz17IX9tczFbGFehhPH2erkHv/rZFya19yB3HrYFad3axby3GsvMNAR9l7qml66dd8EyrIwK7f28Dn2Id7oDAh06TEyCsVK2UfowH2cWhca2mvW6CJ0BExJTTBsmXx+4za3d8cqoT6ElqPu+nnY3m745PPWaXGaYBPp2kBeGHSoCEmuMKQxcUVhikKGzPnjfaiRM6h4081YgKcYQCME6A4Bnszb5psjyEIOKEpEVGTnHzsQ6WYMEmhAAGRk1/adY6yqj6NV3PaHzQsGDKCEzqIkdOVRSsUyZwxRVhRbz1Cm1KUlSFGXjjDoLGrBPUSpyQCRR207EGWfVOsgzi6n3StPOgzOOkZkIsLfODF1uaKkQTzF2CadtCglbJSQVkBADIRtCQkRBVJSoBrnvUpJoGfLMsDFtJ8aZK4+uViOz98zzd18/+/g3/pb7v/jWoxfXf+VH/8lLR29+74d/A63uHp4++Jl/+VM7WfbExUd/5fv/4mMX8QPjsBhvfezb/ps3f/UnaMBz3314sbj9x3+wuH3XHRzKoR0Ym0V2CggchS1RSD0TK+Cjz1yD+UF7vdn/uz92/51j/vhHn/s7/+Llv/1X1m5On3Ur9z/3q2dnZxLUgBBAC3DxWz/V3rs7v3vg1XOZ0eala899fTa72fzKQQd6EPsDZa92HUzb+ayH/saita2t43h9b35nRgUkUVNmJvbzej4dV8dVCRvDUTu78Zf+8ui4NzFdP9vf3Vv9pZ/6hef//N8afOxbBn/qT7/4J/5cdf0zK745qML1Tz77kb/6V/WNmyd/5m9/5Td/+4d/6PuvHNy+96+6/UWAy5pO0ke/6/3f/Ed+8FeOH/zD//ovPJ5VlCgBsAdh7TpPaBLErm+MUQFlA+0iJCUgGzUkTQYVEZwxubWAKQY8q33de2JRK5UxXdROIKhvNYxAAaJIsJQhQy/eR6GkZW7H42HoewuAYtouijKzIYQuyjLrvDrIdkbZrGkP52HeKUcZ52Y1s8UgG64MpNem9eNysLu155sOVfbP7nuKnfcC3lrriEOMMekidu+mioAUEVERUnwXLKHL1ovq+c0ccHk1WFIrcIl6UERgRDqvpJ3/GwRlQhRYnuuXFipCIVRQqIzZif4C27Elp2bIyMfN5DO/hC9/OYsRu770KRo3LfP3/LbvzZ/84EIy5sTG6CoNi+ezw/v97Dh2rfhkKGcYrAdpr3+pPj6MZ1Pr42pSH2JATKJoCBMgg4RAAGwMQhIwpIaiF3R9tQvPfWDw/HMibvHKy4OqYp20zVnGhsELkvhCY58BxShZVfWQPBryPuvnmBppgQdbyXJxedetmfb6y33o7fpevn2hBh8oCRdW6+4LX4T79zNnTDGkLoM2ad8UEdP+fXnz1SRpbFJRVTT34XCeLQS++mb7tVvuwsV8kBmENG+rRjWtog9Fak0/wZPOjTMkUdinbhBO3tLByoqTZvNCAsGh1e29WBVdn4aXruoDklkr0Vo3Cr4hYMMJY+rbXqFynZU3bsqdeThERwZAC86MQOwjsiYPRKoQubCuGKoZkDUiwTBnw61TP9CT1k2nA+MnX/gnq08+dXL97XDjrZXFAkxRB8lyAwLtZFKupvesDE8vDN4+7E97hsgXLz1era01/WK8svHcRz727Jde+epLr5qEZlkYgXNAFrOKaJIl9ZFSDEiEhJIgCjBBUlBSZ5YACmEiENWkUSQhRwWkJXn3P+JpAShSAlmi1s83yTmX1WryAShYg6FZxHo2KnKrsZlMVZPLst43htUwJx8hcT2ZL2Yn7TT0i3wxw8lkcXR2sujiZFpkJrOGAyfkpQEcgnhQyIijgKYs9r3lrPOxqgZN9E3dWQNZ7tCyK1wFulFw2Tbzs8XxnePaezIcvaQASraeR0OcossAu5kgSmD/1pdu7g/2s95sjtkWhrO8KFfJOkTeHFVdmBnq29litjisz07PDvzrX77etlkzx7nHgHQ0nWcpf+ripSovs+EwSVLAPmjs+us37xyf1l5ERWNMzjKCMpMBEFJjKaU0b0On0GuzOlh99MkndnYvoZ7uXdh+5frty1sbr33t7Xdun/zoj3/tI89sXb22O5jl+/dPkRCJ2OCVndFx214/6l3uLGJMiRDrRrSvR1sjdpH7uFfYx1d4Mj+KhyfU+6DtxfURa1LLR23XRRwCZ+QMyXjgvGaLSYeIaxvVvFmoN6Wrr1zYeHTrwjc8sf2FL77Si1nZXbt9evzw6Hh1ZTADN1wdnB1N6q6d1BEpPHlx+9nH97748pt0Uk6n3dHpZFHDhb29spqvDgeT03nj29HllbvvHNQPZ8rIhGsrVVOfUYThxc15H7sQ+97nhHlmy2Gu9YyqPF9duffW/ftvv45JjGieG4c8Kun46LBcrXIH3eRMlKY9jnj+gZ3s3p0vvPrKjQGaixvDy9urNw+OP3J1Q6y7l/zpmV8r1t5++5Cxufro1enJYUT02o631vq6O5u1keTixl6fpifN/EHzsMjG0+OZzRZUke88Ke5t7bxz4/b6zrgNoZNzPgUjkSIAMJ7XNs+BXoqouLRGLe3khISqSEua0bL9eQ6PPw/uLUezpIhkkM+DSwJILEs2EC67X4TEbAwBEWACQE2ECEBMYAzjEgyMywElL6FJgoC6RAItmUqEoizLOAouzZ2iiRMCkjAwE4JKjKSATAqaUmIVBCVAFogJQECNqX1/+vbr9195a49GF6VayTPsicXYlOK9N+PknWpnvanbSFO1eXVlQ96+AypLveKSzH0+3TjPuCiyAV6i+gmpEABkC+cJIAJECYJRCWyWDZKDRGKRnSbRWKztGluCAoKjqoLcCDAqqagGT1YIM02JCIAJEmFCiFFRAJQwB0FWhbMjFxN0nTGtxAajdcPK7A61y2S/hcJyNhCPmYngpxo8iRIaTb2KqGEUAVEdDePqih8UZrwKXVdSbPo3aYPIjMhriA1MF6gKxmTE4fZtbE8iF4mossPoZ+lsbnJKwFqWfRPchR3YvBSGqxxxy2Qre5dWyo2Der579dIrL3/u7FYPfprS+ZhyqSBQREBaBkgtM+oy7mQiRoEgEYgQgFUUmQwCJSAkQUAitNZkmctyJKspAQMgRREmRqJlfm75Dc6vB8wIBsAunxqYImmMoZu37fG8dXnuMgexY0QhAliG1NAw4zmH3QKoYRIAIiJjgJAyE5nB8NKLhgIqiUCNFUl56K1EQku97yAlIEHWqCqGhKIoiSelJdULVTChKPWIatJyRhmROQUPoJgCi6goLBtISaKoKpnM+Onpla2N7/mBH9599JsIMiOg8xneePGzf+xP7R2b1EqqDCE4BCDxhGSYYr6SucVksegW5R5Mf+lntO5Pf6kSYSazsr11cPe4ym1hzbDKVtfWju8f7xQX7/UPdi7tVdEd3rxLcw8j3nj0ArSLYTlM815dUXeNZO7Rq5fodA61H2U0HJW0MaJWqcHY1kWZu1xS32s1MOsbqW5IxPZtM5+WWWFArFI9cCuf/sR8Qw9e+JW17uHV55880Vm2+Ty5ddZoRnvtretWYl65pnkg7UZq7RBw1Up9Nqlt9+Ds0G4IPzm69H2/LV58psuv7ux96M6tv2Z8luYNTPX0xbdovnjqwx95+7Uvr1E2GJZW5t3x6cbG7vGrX+wmFwoPhKp+8av/179++5Xr9WaVOh/bAEE1CtIS1L8su6tlIgRrmEANASicQ84BllsSx2ZpgDHGICERKxJiJmoxEppEEIkYEAyZc7MMREJCskQGgQgIU0uIgCCa6PxzOy2z0ykqCyQCJIxRyIBXiZCEVUSyZa6TLaawfBfM+xiSHtddkaMjMoi58nzSQYooZC0h8sCSiRqiloCb6yvTehJiX7i8bZp5bGazeWXzC6PRwWymoqIQmskA3QrlTbSzRXs8n6tqkHjj7JjG5SLE/fmilPuD3B3NZydNs7DUpVPwMJ0sNkpeHQxS7ScdnTVxOm2LnJwzq5nNMrvCnJK2IQmzB4mIESBjFMSux2JcoQ9jYy9vb+dGJEpow8n9xcPT2Unva01NTKs5FUn3RgPu4/PrY1a6eTZbNAGDQApXH9lkgMPjxdaVR4cX12yWDwLf+erbu5fHzfXbT793952fe9EOd89unxa+MYsEkx5TnoHVLgRJmkQ5qHpjLQWCedOMqtm1q2uf/HDjHCxC/c9/Im8jzBoWRWOH6uEXf/bh/peLZ79+8MSz8OZ1iFN5/Q16+eb6b3jerm3K0882X3grc4ZJQpErqaoQaiGLNJlr7ddcan/2c/nl7Y88/tRbD29furQ3/dzDB28+hNHOrUmiJzdHQ3d67zYv3ozzB/ygnRz5aWYe+9STL9y4sThsXrg1DylSTH0CVN1aWy1yHA5zO8oOT/vj/S71fT3rfYhgoImyWqAB6hahFs0yN40pQ7WOOGFGuJg3Y2NBdTGbZsZuDYd5UtS4Xg32qrXZYjI9PL19/S4xiGWPvLK1RZke+x5IYNFvDorMZgREIhlnFgktKbBjtppIJU9pPMiSI66yW9PeunI6rUPTa+dtiPVcNSRjHTNL343L/NJq2fcJRGfTVhCLzLHVzkMfAygOs5wR7vzHo6LQR4MMyovOAzs1qhGjAkSNUcAQGRYkiRoCMVHU1MQYCUpnIRnvQaQXQAcWiVGQZWl8VSU1RBYg9JGQpesyRMdkUAWkS9HmxAyZpT7prPUhQBcjqwiCF58RGSYRRiBAUtAUJGcoCtd3UaOSclrEoD1luLE+bOvF2y/e/h2/7XtQLtxbfen9r/4c3nzdH51+otFhxvd/+d93Z0ex62dNytW/fnTng9/xdf3tL6Y3T2129Lkv/Z6nLzxyZz9w1EHThZ/7nAPJ0AQkjZKpkKEQo7VqM6tdjAkx+ntffROST5CdQvuNl9ePP/D8zYVf/d4/eG2wl//kv7Yvf67yVEWUIMe9PPpDvx9+4A9PZ8e/9iP/LPvVn/tAFtegT2994eDuXV30RyBr3/Vtj/2OPwb1/I0//0d0MR2PB6NqFLt+oaGbnFYjGyj4RWttOc50hyBBGB88GF64RIN80XSjgX1wXH/0z/4xvVx8/t/9+9f+9h8IM3nkN3/33qe/exPM9Bc/e+DstR/+27fEra9tZe7a6ltfvvtn/1+LaJjXmySDPA2BP/8Lb37yT37g6YtP/obf8UP//h/+6Ad31ktrbEgC3azWPjIBggZM6KxNQtJLZkwTBAJibrz0lNI4dyywhC2GqAKEgMOsWC+HZ/OFSph2zXhclsJd06+XQwZuW5/nWZ3aUc4uN+B9Jmk8LhksMKEQI+WGmrZl6waOVzJbz/3BXDxQBm53yMPSoopzxhXutKkV3fbOpcwMRnl28+Hbk3gSbVQFNqxKvpeIxMyxOw+anrvMFOTchHyuLBZ9d06EgAoEyvDrhBc991SdXxuWsaJf7/Msm/q4XOEqKAEAqCFjRdcR9xjHKQARLh+ZIrhoU9f2Cpm1EfWE/fa3fsvGN36q1gxCQNTlgtEHh3YFyAL2hoglWd/OX/2ShW6YDAeNoF5TiKoZK6EPgQ0mUHIuJUVFCxKid9aAw1lW7X7bf3G6utkM2WV299u+Nd2+649vy/zI2Dyc3MIkPK/jg3vk+0xEPHBR0GAcQUOab11aD02M1hSjMmVFtVLp/JATYTmGwdintlfAxL6eUETVDBwKG80YYp8kz/NMZ8fdL//U+v13ZOjs5k7vB4PBMBa2IC3aJt68WQ4HKfZdTHZYymglzCcQArNTylIXkWIKvYJkVbE4neO1p3V1HUGGl3b8+kZejmftFHcfkb2nFvUXKsNGPKos3bmYBFCr4VZUyi+/5/CLPzmMaiGgD+XOsNaEDAwIgqn3OH1A9HRUjaGLMSNQdlhujtqLF8PJ24B5HrPq5uvh+gubgKykpHXyRWZaSKGXYZmnNvr5/BpXH9goXj4VccOrzz2dHKKHuqkL5t/3A9/7T/9//+eXXn6HAjJjTAKISZIqATIixKQEyMSiKgoJICEEWLJiVTQxnScikJBAGSCqKlJUZQBGFACCcwU4iDIA03nfEgDOpvOV1VG5su4Xkz42ismVprfa+EZsAkpe6iAhEpgYkG2Zl01s5/P2c7/86vw4F9hslZoulVWVF6V0tYL6vvNdKspqdXXcdJM8t20IMWrnI4ITCRlr37VkzHg4BsSqzMcKTdezgfasbc/605O5cw7Ztj5mDtdG5a39qQ9LtAakpNEnQ1if+gQ8OWz3b392b9O990OXvulT71OB2C/Go/H87EE9O7719u3r1+/fuTcVr7OJR7VRsFv0oCLiFbBpOgGIEakTU7gkyTezk5OTSd1EJlJUL0i0nBq3XRhZXF0t1tZXzo7rxkM+Hnzg2dWRGXZN/+DB7T6evnL7jYQro6o6nQeP2ev3JqeT2QfOZh/72BMc49HJjDAbruQBw+6KmUy7QAoKXVQGIeRk+aAPcRFKU8xr/9ZLr2meAaaiqAoxe2vrRTF4+96DrtHJdJZaiDFtb5aHDybGlqTAFh3AcHt9UGYlmK7vDcftCyv/+W//VEz25HQ6+9KcNsdzLyezUHfeDQvTB+5we2ezB/o3n3lhY2frcNaePTy4/MgFlw/YaMB67vtsUOlZ76fd6f0D6ejC5m6ch65JKyPT+Xj7/sOHs7OBGRd5OR6UoWu7ZpFJskP+4tdenNw6zYKsrg37dlHkdjqr0bmZ6p7JLm1ut2ER23D5ifee3Lr5pZfeiPP7xtFQ9WJVdSfHl0aUVcVpKG48OOii3HzwoCAPVm4cT9T36xtlxcXx7ARBGwAle/+sI9W5aF4UVVH6pp36+frucGzXzo6mXTd55OpaNDo/7dDaX9+hLXcBooqovNwkiAKhLMEu77JkEFBTRIV38cN6TopRPX+iIIEKIRpERjRwnkUioKXEnJiBDRljiJl52SoFXD41EkF6l0/EhLyUiC0tYgkVkZmJRFSXIgVkBMS0fDcnEV2u5BhVCQAkydILsqzSkSVHBozRtiWBehYA0DM2Xf9Ynl1legzcJloCSdOjtfI9k7M7XXtqYTF+z+U6YAqYlPNpHW/PiplCNICMICIJRUAEDAORAqh1CAkMghIoAVk2pEuaUvJoMgCGZAAUIKkFJUKIyXfWWcTltIw0EhoLABIVzbKNh6ICKiARRNRaRYOWURGiQuyJjHS9RCFEkYSSwv49gWALm5/cltfnaX7CszazVto6aQcoKXrOCo1RY0CTqQfphZhVUEyuF55skeoKxk8+mj08OHv71ZWnntan33/yyz8/Hoyqard+43X0DUMCQvLez8U8upnv7UI7Cb7RWDN2xpqum2if2YE1m+NGu2pYhnqm3Qkc3Q9nx9p1O8N1X220sVGNSUBBCHSZA13OFhmJiA0YQIpK/O6ZBURFJSYhUEJQXm6/SBDJOc5yNIbPDxwExKRMSAAIKAAqov/R6QYVyYdoiVLoNPYosW272nuTZ4PMIZrFDIBZCaLIki7DxICExgFlALLM/iuJQiRSQjWG0DAZQ0SsiEBBIijHCCioLBwUNAU8/4GRNRqrzMqkyz167ImIwaSUYoqMyChMFFIKhGiYECAFkSC+hygQUmw9kaPQc2rf+/xzf/RP/IWLF95Lkjnf5/1MvvhLN/+3/2X9oA8xYe6mi7ooshgiZ9wjAAGpso/sOHNDVfWHB4xm4mj8iU/o1sWVy19LP/0L3PehCZTKZp7Gtnrw1m0dSrmup4f7eUEDM4zDkvORik99l/reODNYW1nU7Tw2w4ptWQ5XhilqP+/AVpSVCI4JfdsjSQRAZwArZ8pQL4arGxh66FqZ98emHX3jd6drq8NPfqr7xb9ZDwPrfDge3TtpXEnEkcYbqU5CCbJczGBo8/mrD/RUIrjDQZg9WXzge/+z8aUPqlyWUPqpnClsfPKR0599c21rXVjJEB23B197Z3z1UdaeU3dw/V6zILdSwAou4s3GmO3HtoY7+bw7HG6Pm6roTmpIkFkCoSCiikvtnrWGQK0hy0iIDKiEhohULWK2TOLDUi8oiGJQHGHUeA4C1Zg8IpOKVSIBZLAEEAGXL3AkAhEFAUwgAVGMoahJCUk5JkFVY9gLErMgRyVIkIyNnCXIwGRJgkBQg31/Pira3hojNKOwsIWNXYfCRiGqdNETmJR04CyDkdiRRoP+eHrcxsX22s7t2en6YKgKnI0DwLzvE5H3YRDyCrKLw9XNjY1bJ5Oj2LoyT5xmZ40d5l0G064XSR2nknSYOzXmXj07q9vRsKpKM23qRwY7GfHrR/ueDVfYS5p0UhRF6GVhsPdBAExZtH1Hjqd1DwpRKRLTWUekhu1bpyc+xm7RZeAHpT3SrrFKBuMspUBDJW6Cy3hrZQUHRQ9yD+pcZCNzH/7oe44e1qG149HmrGkvrlcHn3+jPjs1j27e+6XP28tX48FZrB2psTbvj+5vD3fC6SIC+r61gKzRt70xpEAElPJSP/0t/F9++/0V5BTcO7dFplonZ62GlHpipSHR8J2T+u2fgVgAFBBtNR7j//0vsi9+pl/bNTePmcoYEqFGAxgD9B5Y0CqbHIrCYsxvntavPGiL13e2RuHWlOvpUPuD2y98/Q/91rcWh920LnefavrDk8MFbgxnk+nNs65/652zbv5gftL1vQVwIhJtYgqApcbSWpdVK+PBfHqYxKskJNuDlDYzuVl0tcvAqhiKqfNlVbKBWeMFrM2yiEohstIYyg7cyEANzenZ4uCohkQuzyK7KmNBHZTFU4/tvPj6Temiy8x44KpyQFFiEBUZWEMiMUQLaBFzm+WFY2cE/LxpIfnt1bUiz75w/CZEIEZkVkg+avLdoHKX1tb6FE9O67M6GiJRadt6UBYatGR3bW89o1gVdrw2fumtd/7DqKhvUiOqMbmClcQRd11ERbVqVaxDNpDnQKI2QheT9ICoBglS1ASq1GtCsUwoGtsYBVhBbWGKkcnQSN/3IXiBEJNBYgYyhEQ5EIGELoSkEXiySAqYlDMENooEEQiSKpgkqL0YYokpESVISCIe+6DGqay4D37dx77rt3zql/7BP5t84bX3Hs8D3HoE75399E/6e/OdatVa7kKcA8S+z6MGSA342/un5qlr6CYnfmCjfeTShdd+9XOxCYVxEhODtkBeBUiCC+CANzfCmQ5nE990BkAsR+WYUg7Ugd8y7vU9d4CdfTi98z//meyVF3aP5YLBjY0hB+OlMzZO+tOi8sXWxd/8t/5qfu/1l/7k7/Ov3x427OuuBfb52uPf872T7U0z2RisjeV0EhaL6UK1MtOLI7OZTY+6ehapyLvZgthnpbl41t35Q3/i0e98/ui1yWNopu1s8cym+Z5P3RzbZz798fZPf7/70lH/4lfmF6898s3fLIvtlcZvnzzsFvfdUe/270LtAQiI7hYnw/ddfbNe7D317Hd+00dP33n7scey3/m7v/uLL3zp/skdl+/Gvgtt3QZZHtEca5kXIzAGZFRZZzPnsZHY+MTsNMV5346cadsYgMSYvHAFIkX0XTBK2CWnccNSia4W7X1c9BCZDurFWmn2dqqm17rpRVKEsvchgRKRD+JFGExp3XbpTtuuWXRdx2A5M7KWDwjEVfZk0Tx8eBIbLbQEL+vDwmP4ytsvTbqFmKUfhBlJmZVMSImY300V0ZJjmuQcRvrrLmRZsij+o6mQgC5FxIpLZ8jSaIu6vDPo+WEfAFREAA2hYUQFS8YAbDheB9guaNVwF4NarSkCgTUYUzLOQA6aV6sf+tCVT3xnjwOMITMWUUCQxZKv9199MT+bF5RMEkJUHySlIBE4S4Jpqb0xnHpFBD6/6UiCQMb4JJQSgQlBY5bbp96/GFXJBqvJKCT0bYZ86dJo/H4U4fYZziq5eUPm/wrayEiCJGBXH33/ZHvsLcTctme3QLtqY7WPfTs/VTK+aYeY+snJeFDlzsxf+mq6/jIfTQrKsQed91RVWBVuZdzWkTGtYsn3zmJY8PB+KEdJFcvMN61xFKNEUrM2yFUohRiSMVbFzkNnrfMhZlbROY3q6x7G5XTzMj37EaNxfve2mc8J7Orqdt3Xw63n7MO7cnIIsbcQEFBEMoOQwBbYbLj4yMW9x/5I+uyvmZs3u7bmnSt0/fVUCwqgY5IE918dPfvsPD3mcgOg6PLEIEWeXX68feur9ewd37XGqBFQUGQBoxVLgCAEoEZ6T+ys8tDKtdIfzFvN7M0Xf239yuVqVB2dHGpuykHx/X/w9772F/6Hdr7AoCApiori8tgMAgkhY0UEkXeZRcsrqSoheEmICMQgAopEqMuVPaiqCgAtb7PEKMqAjpGW9Kx3KS3T6XFZDaqVvFgteh99ApcbU3Cj0mMMnUdNZMhahoicDSOVdpytPVJdOw0vf+Fwdhy8giXmxDm6OrTRpyIb9mEmpEftacZc+1QNy0Xrc5KM4bHLVx/uP2g8TBs/sDlAHICOB+UZppBiO+/Q5ZwVEUUF0NH6Vrk48773ErlDAJCioL2VldA201lgy6kHMe7eYdQXD0L3VREPtpwuFj4sDu+edD0v5tJ6Q0B9C6LeR1AQIkBmBJ324eHh0eoT68BmMW2twXY+u3n9nVbEh5B8LHKrCklBlhd6ltXReP5gtr26WxdgpH+s2ixW1gbbW6/fPJzNzHBQXdp55vWX3j6ZHU6nk9zo7SlMXjwQpQ+9b6cY5vfvTZFIjb14YXPR0lmTAhmDfQoCIippEuP06OTq2tpakQP72XxhBsPYt1f3duZ9OK0PW99sDGhrfW1xqm2SeydN0zSircutzcyDWTvOES+tr5X53tXNalieHh9brLJsdX197du+7Vu/+so7pyeTqqhtSbceHAcv2gNQPtq5+NKb16dQY2gL6377pz+mTDfv3/nlf/Pyo1e3MwftfNY+mNhpyzjEVmLsH338iZb8l9/50tZ61U+neVUYl7MxmGNR5uNi+DNffuHBg7MRlUCwaBpLQI6299bZlb5nWMgiHPnpoiBc7B+Etu9QVot8pYy7A2bbn90/SCeSldWdg/mlre1DgrMkzz357I07+zPEaHTbIEXsCLCoNi/sPbx7fzKdtymAs5nmh2enlsN4rRgXxY1bR56woESJSFKR43BUPoAWAFiX9tfzFQK9GxRaauqXUdEksrSYITLSuywYJVmy45fBh2WjGVFFgM9nTMstOuiSFXaORVo2MgjEMiMSCiyfJku+j4AikMIyUYHnuG1VXsoPyaiep1xZldK5Sc0L6FKnviybLkdJ0lNGCVhBy7La3b28vn1pa5jPT++99cpX797fLyEVuexOjx5Z27s8qDQKrw1gbdSc3I0jbnzPJmtSFubzNbUc2/z+VN9suV8FZVAFjAgCzAAIGkFR2SiRIic0nFdIBohUBVSUDLgKUYUMggEUIkghKDIZYrSUESlKiGASOgd5CSZDUCQGQJTAAGCW8+8MABKogp4P7NgCKESPSGoYyxybHlMPMYFBQwr3HxiM0AYAAI2UEhvGAAiU8oLzcewa79i4wh8e5hVDZWG4aTAaG2ILqVeXj+ZB8sefGbg83nhNFw2OjNbUTxaGrE89bl9qt/e6zS2TXVLNUnMaphNrjASJ81mW4nwy80OTUuxPH0wf3OxOjrvjE2uHq26tWd166E9TG2FJN1QhQiRUIiLLiIQEQMTOECNFJbCMIiklwaQEgqoErMuXMCNnjvLCMBMQEEgiiUiIxIbZiHhilPN4GgAAMiGTiCZJmnofmqbtZl0/83G0su3Q+z5Zl/VCkiKh0PK5REiUK1s1OYKCJgJE9LjU4YngkkUMAHB+v0fKQY0BQEfJR2AVK2QMJB8loRUyGBEjCmhQimgUJWgKsJwTqqbYK2hKSkhExJZTSn3f9j4EHwUE1TgymwN67NEn/8Cf/39fvLgbQ6Ik1E7OfvKfNj/zC9lRDavje36WVy63hkOi3NnxsFs0Uoc8gDfiUTC3G7ur8zakJ59Z/cHfP1tfGw4GW88+nt56MT08La+MW1/mmPcPj4rBOGrKW1lhEEPrK2snfdsd7edaOx4i22ggpmicMTHkzrrCzkNgU2ok67KmmRsgtBYI3HgE0fZNl43XTw4PiqIqK9ceTrKRyct8teP21348pw/CM4+Vv/kHw8ktpiuTeVxdXQt+ov0BSQLeY27K0Vpse+jOBqv5/GT+4Pj04g9++IO/6eMetzWNUswY2ORAOyuP/57veOXte5OvzlfKwebjG/tHPa3FwTd+9Fd+8ufflw1Ge1BOUep84RdPfPJ9x/UZHEybabFCo4EuYGU0356DoXbaBUjOsDVEiEzIhiyBZWQAw8QEqOAI2IsDsgQWQAnYsDApgaIIJCQUSIqCaiWARhIDYdkCAFye2gVVpEONhgyo0hLSmCISE0oEUdCkqEnT8tzuKSMriqqsaBPZSDlxrpSjkvSNK88HphfWy6wJV9YuHNU98erJZEGZnfY9W6hjaghEQui7gCnP7aW9i+186kyxCN0ktHFGuTEMJiJiCBnQ5spwM1vbLUb7B/sv39s3ZdYiNchnTZ2tDDT0KYTNlSH4+LBvQaBEuzlenfi+DBLa2LCi4VvtyaLvPdHJbJFbBpG1lZWrmxs0bdo+dJ13znXgp10LPWTOxN4XLhMGCUFSbGvvnAECZ5QQ7pzN56FXpsW8G7IdOX7/5d2jkxND7uRkho4vb66dNMF0/e/6wf/y+fe/r30YH3vkcm2KrITx6ezX/ulPrL3nfbOzQXFWFRZKob2PPwePXWg+/wUTmdqgi8YbRYPMBDFkhRXVFNX70GbGfvh9s0p7G1f29/VH/+/iwQnrQJwoqTBDKk1UOOkrTmCkbxeI6jyUouWrZxJPAZLmRQRMisSFguFsFLkIplYr3ey0QiRrjDGlHR3e7/Zv3y/sXPzk7PC1zebr862s6y2ZjUFv0+x0Nn9rfDHhYnpyYjzE48MTjc3mapmp2Z8L5ZxMt7m3Ol/0+yf1Qu3wsW0/bU4mLSYqC5uXZV3PfcQ8y1dRYhNTlDVXRg0T8LOmc7lrRHeqwQjc5fzi6/t3T6C1Egdl3ngtsjIC+4hE0olWVdnM/Gw6WVstS4NXdjbQDabHZ2wwY0MSDaRs4ICwC+nEdwcnZ0GkcETG5iXs2Hrg5MIou9WDYZOYknac4UqRDUubnD48XvigPhkVL6rGIhAMClOqXKq0YGNz5969Hb9rQAuCxlQVDCormgCiWcUg1LepLKwzmOVoGFKErtM2aFtD3wIQheABjIBqIkCAFFRTYREsGuKEkBmhKGZJChYtbYYgmlLSZI0pHAWfLNuYtOt7FgXS3OAyDj0wGQL1GmKAkCJbCsGPKittm9lsoRKdNUV482Ty//2f/o/V62vfWD/Sv/EvTt+Ob/2xv5EBldpfKXM3rirLPoRUVtT4hWKJJECnGr/uD/zg4pt/a/t138RBn5g8vDA92v+ln82MFVJFMGRjggBpKlj+zh964vu/y2zs9l9++53f/9tGXcoUyJAoGHGZI2ria7H/0CPvfWR1NHrpa6/9yhcpQW6zFDBMCI2Oimxh6nE3yyfvfO7LN69c/IbxV381vPxOPe0NYAnlFPTqJz95u4Fbb782OJxvoDFVZjC1beh3dy7/9b9yb3d98cBnJ/P08sv6T/4uh6mLQHV8WqT+F1+ygU+By5Fxr71V/7d/+MlPfDRr1lfesBYHpG7y1tHRq/9gOLAfHdjbv+XT3MNpgktYsstOfAzPvS/7c79HP/ktBy+8sXl0a/qV1/d/7Iev/Xef/Mw//YUNc+Hu+Omfu/72h7I8tE2KKcusSKxyZ0QndTdwzrAbZKVBnTTtabtAgSDIjJa4IHWAheWyciwp9iISh6WLGCBojHGeurbr8yyb9zINwRSuqgpRPKvr2SwMymw6bYd5acmwhiRgiDPEjaJIqau7RhhtDmhwVDpbFX1XHx1PZknAGQ2yPlpZW13p++6zb75w6/ChHSzXqwRJrHUsCWJEY3+9jblsCoDq8g/net8l7+VcgPwfrGaEy4M8IGJSxfPTldK7xbTlIf0caQEqABjFMg+tcQLUNDQwdmDRx3HGhmIUEYQEJAzJWijy8tFHN77lOzxl0DWGCImTj5wEDvcf/vSPpXfeIAJFTJISgmCybEAI0vJY5MQnFTKAbFA0qQTnXEwaEhAyGmKyMaDkO8NHP4Rraz60ZWmsYbZcbq6204mzAIQebbADc+GyvbBJD6fqY+5M9H07vR023xs5m09rJueKfHF2YgEBEo2KbLzC1Uj7Dk/3m5s38NXXht6nkLJqoMAEgtVqyKBFl2xK87Mqs7kt1VNSIY2xLKHaTF0rRMVoPYBKRtoEndaZ6UI/K8c2CfdRwWQBNGoiSRoCostdsQiBSLNhdfDWl7auXA1kakzF9mNBTYVgSJPgOVvK8CgzD97+HL33wy2Nm/J96X17cuE+2K5/84uD2DMDACdlE1Nx9KD9yr82H96OF64lzWuvxFajo7XtlSuP12+8YKwyJ02oQhoVDYoAIjgiTWAFDdmOqrOgUI0//e0fFWf3Hzz4xZ/+uQ9+8GPMqOo7hCKvfv/3/a5//E/+zdu3bm2VDMsixZJFobh0/TIBIyFoRCVFUSVQVgSgPgKxEoIBJUYAoKi69HATIIAlRBELiKAlsiLIeQAcAGC2OMqOte+PkogaQU2SxLpBTEYwNxVnDmLo8iwz6IJUMZnEhKW9+OTKzdf3j+4vohp0ZjGfWQtra1UM/WzRDEqXSJ11kFJmiSWmRTOftdbqK92iQB4ORtf2Lt99+87KOH/2iUtv3XpQkMsLm1KMgJlhY0xqYycwnbQnx72IWeYVmJCBTmdNCoGcERQw2oeQgh7ebc7230GUTsQHP1gpBKjvFSIZgzFE1QgCBKAEiEAqWcZJ4mQxPTk7KlWHVQF9n5pWgriMEC3nDgB8Hwixi8lW9uLumvX49NWLjOS9d9LlfWeSv3/7LvayMV4Hzu689tZXvvKrxyenwICGMFLo0i9/7X7bzb/1E++5tL1yvEiLNrQqRenmEmLQYWbyYeY7TwYgxyBaltinBSa5fHHvaEEW7LTxzqWNtYHEDti5YeYL23kvEh8eJI/c+BglcMC66d0jG85Z38bU9N1Zm2G2vmL3D+dlWb332rZ7esf7fl7X8ycviFACm49Wpk33xOW1s+lsMpnwoPjcl1/QPt6+dXtrlBWcTu7ep6AZuTyPgzxPYTaqTObizdt3LqyvAkZn3fraam4x+IVBAsUvvfTS8b0TC87lpiRh1SCxqtzW2t6ilaPubFFPxsNNHK/07XR+cjjrFoONofOm78NiOhmXzTiHs7qfHsswLyFCmLTDQfXaGzcQuSFZ9G0YbmQu1bPD1Ia68TFGEhzkGee87tzD7mx3b3Xe+lu3H9isUrAXrjz2tVe/ZixLII3vNvMVAVBUGZGI0nkvRxFg+bwQEEOgslwMvGureleeSUzvSukhqRKiZUZEWgYuznljQIhyfuEHVmVGQiJiwmXGPL2bVgVEVKKkaelSX34/XnIigRhJzjtCy/9NEUAlLQnFeM6E0SUcxhhK4JVhvLr9xCMfuLL3RMmDcQZh9cqTF555562X7nztcxuh/TpnV7z1AJyZBJrmdZicyPpgvHMhoLFnNt7B/OgenLU0TxBKcBmIAKmiAIgiKxMhKYggAQ9jntvVdfEJpUcNS86kRBBUIALfKwlaAsR3YTcESNBHICaXA9ilqEsBkA2IQAIABEYIooTgBJYIDLIAKpIIDaQAIZCxGgTYYY7SejZGA4ACJRZiYpdSRGdAE4AlsxpI4srQF6VfW09sh20dfurHs3Cf0jwdv2MyUw2s7qs/nUE2WHn2/Y1x9tLls/2bNJ0Ug4xNhrXDNqKCH64Uz37DYXNkBuvZ05Wd3uHr89i3TDgoigDUN3VnVEM3P7g7ffAmEqyVDGROtS9VEEgQEZHhXKlHSICGlRGQyDAxEgMyOQImCX0SAkpogFICkWXmLAGyYWMtEhk2KgIAjJpQmB0zEzOhPZ9J4vn1IElKkICg91F8n0DqrvNJVtc23KAQHyAkciRtIgOEqjGRsUDMlJNxiYgAQBRB3sWbo4pqVDTLeiIRLOHxsoRmIYm1VtWzEiRkpNKZnhNZg0SIlJRUEdQYAUkp+hijYEqaBER8HyWJLFlcBJA0KdRzj1ZHmxtbWzubi9P/+vf9bsPoF21uAfvW3n7r7HMvwSkXe1dnddsOFqcprLCJD89WLIZFKCgbZ3bSzcPATi1kjs10Xo2H608/dc+tZKO1eDp5+MLLRG507SKvVfMjmR2cIDcZD7ANs9k8xpCVxfTkxBWkEdAM7XBX5ifgG+A8L3JZtBopeDYu56wEg4xkw4KCAoES+DaAdVlZJNTx7gW0WZTGs5XeW2c3L1/U/bs3//4vZx98urfd5td/43ywjoH7/bsye7U7+myexby4JAKdn1XD4emkEzAnKI98z7fkH3xKdIWCodBijIkyS6QdzO51w61HzPpxfTLbv3178/FLs+ZkctZe/YavP/3xn9zeGQwubRx0ppLR4Ss3IGfATNbjxz71iauz2Y3s7Lv2Pv43/9aP3Zx0BllArSFjEIiMQWe5MqwhEgEZcoRVShVqDLIM37MqgxJASmoZQcEgBUiS+gRRBBOQShLjBFVUDSZjDZ6LKlNMnkEEhYk14bkzkBDlnLYfQZe8xhgSGmuMiQnAEJlMOFNAjRFNBDhvG9x4536FZezEMq0MixDD4WyBxHlmPYJY6lJShCLLh5x18yZXW1X5NPXrxVii9D6OBoTK0PWPbu2sluO+0Tdu3K6TNyVpjEVZ9pqQ4LRZ1CEaNEXOhiGLVGYutnIyOW27qKSGgVnH4zyk3lg1BtWWmUAGPGLujud5krIwWcldan0IG6UrrFsZDUfkUvJ5xrPJoosRBuAMscPFdNJ2wafOFPnhpB1lOXkxPp6cnBQuH5TFtG4Y4cm1cWxgPu23tq/0XX7tg1dyivvvPBxQf/vezWxkBzujcljFDc7X3cndejxwt156pWL76DPPzd68ZzMEgzEkDN4hqpBoIlDrjDZz+dJXLo7FP7h58Nf/xuWjIo+D3uQaJGltCiPUKjlkUIighKyKqgkxKgQhZcltRFZA6D3ozFjMXJmgaeMc2JXGUVv37QKYj49u8jAzxhNRZuPFrJy98JXuo++PW3sHZw/WzUDdjAvZWC2e33B37/oQdKMcNErea1KwuYssbdceTTAreH1tvNifZhHmh0cVAYhy71fHruni3dnZymCMqtAlx9mk6SLGAAlUCcg33rEbG+zqU6PRQ1YNVzOLVWHXRxWV7nB2Nq876XRgB+2ss8iitLe+E1pIPVhbpugXfTAqEruJpPuztg1RGZA5gY6MtQ7WKncqzZDcc09cvP7gOir1HEV1a1gUWRYYHp7M5rVfbpF9iApKZGaLJnP5+trARkFhAWl8+5+MiuZdKHMGVocJOSWfCjZRQ56jpaRRKZGq+EiIWpVO+5BljMC9ckKSlCBA0ysoJbWlMxlBbiAgahIVNKbwPrAqEwIwE/qgKUgfJHrKmRDBB1QEr5JZdo69DyzYR5+z6xG8SPACqj5oURblSk4QXvnqg2d3Vn7T89/0icV7B5M3v/rf//f29YM9LkzTFRLXB0U9adOoyAq26OfzqbN5phQUFFILYJ95ulwbrg6yx3c3bv7FP/iV//PHxwi9chKDjn1AA5ZgsffEB5/5vr8E8e35Pq0Mnzvd2PYH9xjR92FYDn3jm8bnZD9gy7XP/+Kr/82fxaP6EvApZCkpO2bi4Bw8c+3p9z9z7+Xb8Z/9n5/+9mfhCz/xxl//R5t1gSZbH9lpwqe+51vC7/qBN0vLbC6Nd05/9B+kY18hFcb2R2f2bDLe29zcLeLV9ZVv+NDDX/yRdGOughkjO1sEqkqnKpjMarc6+NkGf/EX503coDWfaUSPfcztIHY4rXVNxj6pBYpE4lgUb7/95uV2Kqfxm7/pYytvVS//D//YvXH3xRf/frZ36Xf80OOP/P4/+9f+t79375//75s5FooxgUXXzgElDTK7SNGxMbZHlb2t4UnqOh+BucxLVR0OXWZJkdoQcAndtY4tFomaEDUBRBmV5SKAD31e0qULG2vG7O/fCcABuQ6wWpo8M4PSzXrhCIM8q9j42M+buokpxjQoeVxybvDhfMKp70IMxPPZfDVzTzxzdf3KjvTt3YMHiNj7SCDEZIncu2d9zhyxAZgALFnWS5PCOdn63RXuuxvlJSAPEHgZJzhX1BJiIlj+PV3GtkUBAUGBeGnCURVCRnAWi/Hjz33smz9sSj35+X+zerLvFh0lcUiIIAnYOGucqwbZ9pU+jfIQrLEhJSIxSNievvVv/hG//vqWUSWOIeVMETUJJAAmZkEUlL5XZ7rCmMKFxheEHAUALJHlzAdBqyEFsJmvss70RuZ2OKjWR7Pjh8lLUZZ9LYW2VTYsBzu3z5rR6kiHY9p3JAm76FTnX/n5VVt0uxdTuZ6tWMwJosZmgaDZeD1b3eoA06ypcIpOFoupKBnOFQ1AlopSdi/J5Qs4XIGm5qN76XA/9H75K7erZVcN6fKFzkA5GkfK+1ZMUeBsjienFKR4sE7HN7mLmc+a+hZaigqFtUQ+9V24/YZu74rL3erm7tMf0thSNhhvrNvJkVrV1GtEFDTMSATGgoHy1ivl134qXbpWj6/6wUhcBRyKW6+a2qtAUgwdgLEWxN26xfZfpu4b2t33mNGjTd3YMOHmSLszMIpBQHUplLPEESKQAihpckQYTV13YXNz5xs/ceW9H3HDcXR6tL//N//y//Tk41cvX31EoZtP6nrWbKyv/KW//Gdef/utv/Y//s8FOlURUQVQVWQUVQZeErt4mWcDVFleCVEQ0nK0SWgUUNUu3Xx0viUjBEZxTIhoQJXAi6Z3fR+z+aRvZohMhoXj2sY4q1aJyGJm7SpKQPLoxikpo0Mq8qIC6iYnBywxw344yBYtRBSTu4ApmpSSBAmCVJVFZotezlDivbsnEjmJpkixVm9lEWaLrl/PywLcwf3T1WIMi4UY6Iqoxnnfn8577QIxzU6aGDCm83luhkBB+0VQSEoQU7JMOUI1zGIfiCyApuBtPjg+jcjEjAahD0FT4hwymzVtiEklaUoioKbg42ndvf72ePBwb2ulIJicTfLMGYbhuNhYGc/r3pXZvbuH0xB6hIOZ38vIgi80fvxD13Yurn3+K2+vrez2R4dX3vO+8fqFX/jMF774xc+fnE0RISK0IWEEY/kk8i+8djhvm+/4xNO7G6Vomj5ogOxk0RtI46oYlFnK+JGr64ddrIr1+PBodbyytz3eXFs7Oz0RcDwa7m7tXVpZvX//5zCJNqEYr1LvNdWXr24lcLPTuqjcajVY21h95Orq0HB7Nrd9igInD+skR0W1FurFsKBqaFWrgZVHL69YQ75PvhdZKdOVjYfT1aOztXfu7B/cudPUM1CaqZzebaHHC1sb83ljKljd2jo5Pb32nmtns2Pu+66uk6ZRVgxNBN+u7m6kiPPQPjg6rrIizzizWio61MsXL+Yro4MHJ8N8tdgarV959I3rr7D2dYpsRitba9eu7d17+dXMu5xLCbi5unl48FbKDRk/Pzqsw9rBXb+1Ylnj9na5A9m89g+n0+RALfapBSIY2NzZ6enUFl1p2fvY+biyNYJIp2fNretvi3AXTJWx7/zyXcAKCKTnGkwlQiXS5fjfmCUfQxWIlABVdIm1hpQElIlEZTlcEhBmZDqf0iqpgCgRgBIyIhET2vPiGRMwk2FCIAQUYhUQEcR3sa/ICqSA9OuWzWVSA88zgcsYrAAJCJBBFVZQJdCIwIhLTjYiiRd/cfvK09e+bqC5JCEBtlmyw6c/tLY33Mivv/3I8WJNuI2RXQZJs6AwD2n/obtRxy5QQG4Ag8KyB50HYSBVTUmNBUmoSstNiYCyIzfgMgumh6QAor0n4wAJDIGoKiBZYANshVAkalQ2BApknQJhOQCTAxpAh2JAFZgBCNgAICAjGz3v3kXUBECwvGMiECUQTL5TY5YfkMSgCYGtYgJIAACGwQiIaJOSSJPn+fs+mFc6f+WlleEmGop7W/Jwjj6a6XEMideGsGEQJXXYPniQqnHwrblw2Q3X4v5RP+ug6TOlkpD2b9gv/rsV7NpyM9t+kvMtNGNZtBLaAEdcH8fJ7Wbey/wIZ8dQH0NWFvm4cHlsYFoWwIZMJkkIgcAsE220JFsBEhEyIxMugejERIwSNXpWJSFNiYAI1DDDkha07H0sfQdMZglYR1USIGAyhKTvlpEVRMUzGR/7pLEXmcSQlaurg3EnbQSGoevnkQTVi1nCp8goWrVO2QBZ0AQJQVU0gQCAJyBQi2IlBgHCdwuVTJRAlkBtJkqASYmEBJmIVTiz1iIkNaooIJqiqiRGzIESxy4RWiYTQ+rqGlQSYGz7Plg72tu6ePXZ9z1XzWejd758+MUvPvcdz3a9ccdH3U//6OFLrzkYdk89OavW7tw7HvyWjzz6/ie2rozrX/3Kq//D393c3yfEs76udwbrv/e7nviO33LrhVfxq5/Hd26cffEzK+Nqc/gbb/3bfyVf+FX3cEF7G2HauSwbfsOz/WtvTG9OKmukJzBZDJAbO58uBte2uz7NmsWAwNqCXME2F2ckK7kYhaYxaqXrBHxWlkQOmEIQVKMqxjKQqEO1LFy5Rx8vCjt5eEeLkuRwq7jg3zg5m90/O5jGK5f48l5etbK4XZhO+uT1yDBTXi4S04XHRNzO82v58+/rrNFJYO05WpcNF03QUupZXY22r3z3d9148K9Whqtnpwd4aiZfuYcPP//0H/pd966+1vTHTT9befqJgzeOxmXVmB5dIZGdYdvB1z/7nFj7rc899e+OFocnQYkySy43S5OtI7QqmaFypeQsH2u6WNj6ZH40bdqkSgjIKqpJ6VwvCSoJVVGCAimaoCiCKoQMqICqKskgAJoogpCYCUBTCOehSgFRwCVfDiEpJNS4DCQkNSmB4XODAVtCZOcUQ5RzhunCy9r2WpL+jf39MJ9Cm5Lvq2ElPrLqSpFjCgJpvcoomsJrCpBIbaIhmND2OcOjWZFRtXvx8dPpcTebzj3OSYJRdOBFWvJrWyuMq2e3blqJmHweIIRImFWZbTlayzxHEZJEbd0PFHLFgeMSXUhpY1xuDkdNHVLAmGL0vouNT/0qD7azEhFd0tS2EkJXowk6JCqckyTaSq5ZNio2HL9xOvUGgkgfqRVoTr1bcYu0SIAhhub4WE7rD3zww9fe9ywEaLPieDq79+D4uQsbN6/vpzbC7Oj0F3/KnR3p8+9Ze+Z31tfWR5nbqYbps29aF3ARwJXZcAzzmU+CEZAJYxcCZCrNP/6R5h/9I5TwaFcJWG+T2hbU5i5T6lOQHoSTMCSRIBGosCEldcYaIDVqUWNjICdiNOy7RjvQNmZ5ECn6ZClQ00PfbzsAAQAASURBVHj40Nf7K1vDZhF+5qcwSJ6ppq7+xS9MX39x9ZPfOQzD3aeunknvH7l8+M7bmzvDl19956zNXOkopCg8mTWNtqrRChxOutHYru+EXYD2/pGdyWZVaUi7W+s3bt+usmFlMEVrLArWFZNV7LrorBEAy1YEs2wwqopbd+4o5KVxsRNoaXNrZ3J6P036Wd9GzS5uX9vZ2PzyS79myK6MB/UiWs6ynKNBCTBbzJH0eHo8V5qJAYTcMDkuM0oG+95Di/NWu+bg8d3dR4aDRd31ORWYX1nbujeZzuY9KllLzhpmAy1ITD5pAOiMItKgKkJIqIQh/iejIkXTJTQe+yPftV64ZBIgKHMy4gubxZYWTVxEZgSfOgeakxgMLtPcaMH26NQHZgALgFEJWYMEg0aDiGCkoEli0kQoKSGijxIFRBAiRBOGpS1zk2Kyho3FBEKsUcLqeDA7iyFGVWBAQmpO++Gl0c1bR9/9bd/ynXs9v3L2R3/fn/jpv/Fnjm6/kxvXzo82oF2FMMqJYmA0ix5iWzvfd1FSJyVqhOQBczCjJqJO6te//Nk/+w83XnytShQsJ9WoSXwAhe2VDez50qeeuv4zf/fFn/5/TvHCd3zqT13c3rx3ciCKnLBpQhZlVDhFkSSv/r1/tQoZQT4FSQhexYoguSAh3Ls1e3ijOK0XvzY5++f/jsz2I2M3GaY+qKuY5nL82o31F7+0JadYDRTG8cEhgkVnA+pqc3L65/549dTlfDw4fuRC/pv+q0euvefWjXtJUKIQG6zAOYhd4pxdxrmz0Ecmg0XL0LJRZzEAcwIIqUkd2dIxN10PyXLm3gez7r/94a/c+5Mb79199lMf/obf8z35+h9v/4//pZoe3frKvWfyK7/j237fn/u7/2uV257ma+sb0KESEaaIUNnsrF5gvu6YCexuNbjbPRS0CC6CaGxtXhCxYVayMSiQzFPfIpbDFU3kirL38axbjEra2SrHhd6/eY8Ai9wFR72X3otqEgxk4tbqgDvJLLV95wmSM8PKba8MbAROctzWmeNotRxkxsVre3s7W+ux68+a+Ul3CoTWWtVg2ZJqitER2QwiqPfp3YMRLBkOcC4rw3fF4ojvSpPx3B5FS1Eavku+FsVl2D8pICzLAEBIAkLLtDUTAe2sb6XOf89/9UeHhW+6+rHqu+lL/27x5a8WYmISBcqISsMQehA/zAcPbt5av7QlyzBOK/Hw3skv/JPh9dcHmaDXkAQJOohArMrBCxMxRpe52g78U8/Zb/w4bOyUbc+Te92XPq833qxqTyosGqMQGQ9U7l6AvFSl5KNEMtmqJHSrayvDwtY1qnbHR07ZjQahGJMpYO6Tb42hPejCz/9jGGB69MNr3/BdD+oAzuo8oIayWjmc1/n6uhm6cOMle3JjMACoU9+ErjsbPftefPSRdHk7jtcEC1xJkGftrGZdAHhbZjjI7GgcSRNjIiJki1H6makIeSMpa2mj9enuEXvLnFM6zchm6CAqBMm6zoi2XTPzabS2WU9PRUMWJnr9i+nwXiGoQipoLCmCqAavK5bgX/9f4CBb3fXVmg6Hre/43oPYkqooKhujJNFjFhJ+9Yv08PVyNJJipeqS1J2fzVLbZxIbTCmBgLIhhMSKoirIVhLGkBHGjcH46z5QffSTMFidTc9mp/OxlD/8e3/o5z/7E1ce2Wpaz0BZwTEEFv9t3/bxF77w85/56ZczIiaIossVsShEFUPnqGjBZSIO5dzeDbp0jQAoAjNZwigal/0qAQJxTMvSRiLoUorncQgAADTqJfV9R0x9SEFpMe3zYmZyMxiPjWPOkap8SS0lshi9pJmt69lRN6pWsywumqQJrEWD5vi0HVfZaDhuQ9d3fTNrNPVzjQC5qBBBXmQAFDRiborxwCxgPB4L88bmGmTH948PEXVYufunEz9PoGINS4TcWHVCAGEp+UHtg+SFhSTyLvvLh2itUeKUQhcUJIBonnFV2ZhCVebzRY3ITRvBoCFSkhQlt5wxS5TTpp/3/b2HB7lla+xgkGXOLuoQZJIXWbdYoIFcKfSxD51u5Y8//dhwOPrarfs/9vkbIWr31c/lFuSdg+M6TmaLpmk4NxwEQABISXwQARja6rU79eBX3/ymDz137dL269evhy5UmYrXvvUrg2J1YyVEHrhhpSN0Ya3IFOzDo0mZ2RjT7tawKuxLb30OAMuyika7Xh4cnAwLm1vDHtZVr+ytXH70gjUZGe7a4IYrF66svfrVr6lk5coqWYsREMH3FpzNx1UCDE1vrONMQaBuuxLxysZ4b7O8f3A47VYOH05c8JmpjNq11WF+5cLp4enhpAlg758chb6ZLXp2WWa06U4PJu3uaN1qbH3/xquvl3YgqhRD5rKkcoweYpcezGURT+fTakXDjZsu+LxwZsxbO+uzN14uj46vYLs6GB2d+vscmtNjm1UKoTJ84cqFV79wUFWjZ5/ahfb01vSoAxTlliArqmo0ns/mEoJNgdrFyFFV0ebG5v5J9/BMi5FJzYxE3LCUgBGgJnj6ib13XnkbAAwsXQa0lCwtnwJRVUAR6bwNqksbPCGi6DmCHgQIgYGWajQBQFBLzEuqEJMiwjIcQqxyfrsnVIO0FBjCclCECAKARIgCwOcOBUCgZRGJgN69/KhIOgfrLaOw525ORAXRpJBAZSkeAkiKybCsFdWTe4+tcsaATfB123OeM5IVvmjXV4vFQG5r9M5xjJ2JgtOG59EmhIenDgnIAhdgLJCqSUm9EmlKyKSACoZANQoag4PKbu94MOoUrpl5Ydhs6OFZfnfhWkFgwCi0fFoCEAIyMAGjIAqAzXOxuWSMNmM0IAxIyAiGgQyct+oACEFFEBUJCFCW7HEPkpbTdHAMZFUNFmOIDWhU8UigkkSSoiABGmTKJfTV009MUu1u39nt5vHmjc7P1ncf9b5O0xMOHnrPLVhZDTmNNteEF5TZw/m83F7l9Sq+5PyhMHJKiKjZZK5f+NyokFFZSP5i9MvaopPQ69mi+dpLqzB3NLt3+37tF6YsDsXAat/maRaKo9mpT42AqKqcRyOQifC8/I7vpp+J2SDSUm1HSKQsmhDp3eIhIKPQuUhvOcckYmUWjMvtgRDCEpaOuMyiAkDSGAV86BW0C/1ZV1NeDcsda0V91GA1YxMwI43oFdKyIe24WGLNz89Py6u8BpGYIBkCSMtLQhaiKjomIyAIAoqiIqBRoiIwYWJWVRS2YFMQNmQAkoggKNMyDBViEgV0ZUqSMACRdbn6TjFlzu098cEPfcfvfs8TV26+8OLhV/71zulk/e5t87kfdQ9O5q++3X7tuhQb97dXy2/71vHz7/nI6igNVtRYKHn1Nz5x7frR9K/9zRzppsSLf+gHtn/gB7pqa23vA4Pf8p/t/92/ceXkqPv5n5r925+mu/cGw0G+uabMmrrbsXvq6W/fv34K7cPV8aA3lkRVtU9xdGGzpaRWSSQt5YkxaJMkgKnYx8gAIAIpquNUlVSOJXhDhqtc2fQhmfEQsqqdt4OVqvMyU189+axGb0KpE+frM2o36+vt5M5L7trrz/+mx9+5/sbK1i5YGxiqzQsoZaDcrQ/n9cyOi6P4QOaL3Y2rBMa4TBnKYRQ9NcPk59LW9fZ3f6KydOvv/W8bq/D+j7zn1S9c737q3+zsrFSDrbvvHNDJmSn9yiNXkZy3bXM4ZUdrO+vdg+l4bfDpDz3T1qe//LlbK6M1k+HRoiUA6ruMYJSZjbVhtbU53tnZkt6cHt9s2pMZiigwEZESJ1VVEREBIFJGREWBpOcwCEwigRnJMTEARQAgVgDUHlOPEA2giBKgKpICIkXUIBIBewUJCQgzxyhKipYIQM47xKBJVfS8kk+2PDmu7x8fHbk+Y5t8KNmFOhZEm1WR6mgDgGLWCoH0XdSA48FwDcyF7e1iUPnY++jvH57cm95JIFXlzCBDEw1ylDip275xTR+f3Lm0GZ1wTKh17cEZkXSnnSLA2OSXtldc7EySlZXhepWXLuMkZTVIIot6Nlukug0WyWQYfDsuXAzWJBOj+KQRqMozQW17j4jWGmDrmBSECocEG8Xo+cG63Ll1e9q0eYaFBcZmsfApre1ttbHB3K1srA13nxJ3Ic3fOnj57c3Ll576+vcZ8OP3PI2popVru6lduNcfvnh//K0fMZ0/fOkFf/+gfvFAI69fvdIYGxJIonw47BZ9aTkd3CJAkT7v2bUKzsQoYPoEosoEoh6ssQvGcOVRBym/81bee0YjkkRjih6NUUwawWAUH9gaUbOwZbO2NxwM+9tfWNO5XR15xmpc3Nl/Zf2Zb+9uvb5SZtgEw7Yqs3Hvyzv1rf/Pj/SKvzymwYcuXPjotWiwtLQ2rvYX3ntwRkN/5vvGUj4cjFKn8yZMoz3ebzjPWU03S1RKkXMTmwsXNsMirgzyaa+1AGX5xd3dw7N9ZrYGokQMfqB2BO5oOoPcDI0rzGgyDZzl9xYHPrWqfeiFA+/aja+99VKjcZCPEjO6Yt6mNmGI6my5srcR29OzdpozZpnJyMaYqqpA0gRCxoQYCbW3dLKYXdhx05NwsGivbK2t5XpCkLoYNFpLGSMRJWMUKQZBwgFTDjo5rZ0zhTO/rkY+HxUtFj7LSNmgZn1HnSIIZhYlwFaVt43eq8UH6hEdYxKyJDlpXoAFBEl5hMJqYVPyEAEj6LQTMgqkIkkUVRBR1FCvMaTISktYJCclBMMADGCUQIHJB7HOSkrENiUtcxJPGiABaApV5WKST3/dt36rPPnk2tln91/5pR/+01PeuPRbv/fxb/3Qytbw3r/8t6/9g39Snp6NhYLgwvsAYCDkYInZpcAABugxMG/9xb8wfm776Gs3V2vpIRA6RT8nMbsXoa6rumvryWrlv/Lv//naP/yZZ377b1i1F8uv3H/t77zmmojGcsSqtIiKFltPIXGOONdowC7RBUObKSmkVBgKt44ZWFUSuHuqi73t/vC4qmUUfeoUPIbPvjb/7PWtqkhejhMXcqxA0z5Yy8NsqPdae3KiW+3F/Oo7f/n/3rnzYMVlSfvhOJ83SZNyaNnBSVs3nEYljy6sY4On9x+Q6nB1GAz7iMRSjTgLWR3UuGQd31tE52jTDdY79wF1bn/11s/d+tDffh88edX/7GN8/d7wF7/w2f/i9zzy8ae+/ZGdt+MU3vPY8ULXHwY46pP44x66KmYu2z9ZrA5s082qQb46zLuAHqRjaImiioXYBo2JHBo1UPdTn2LOJUB2VDer4+GVdfueiwPf1HePTmezenW4qolS15MzXJk6hjb0BtRCco7qtm186r1Uw3zVZUMo8sJYR8X2Zh3T9GRSWLNTFF/39FM52jwhaRmhiLIYIoGxaJwkTEKjalS5eDSrJZy/HVLSczrFUl+8LAqovrvABVVMAKDKuiyawfIsBqJ87kfDd6dOKrqcOoHicnJEzuWHZ4e/63d+X16ixM7GRb427teH5YqlmRCBMjqAKG01KI1K8+CuuFyGFyDLe9/7k/t3fuTvbB6fDSFGVEB1xkRJKQVJ6hQNE5FqaeOFKxc++VsnOxfSxlZUNOJhZ8etPGKvv8yf/Yycteo7ZkvKpGSQy8EQq8qjsPjSkLNZbFtkDW0yavzD02o8hgc3zOFdaFpMXgkiRBXNM1rzff/aC+29V105GDzyPp9dxK1xyofMztc9Hx3gGy/S/m2XNARjcvv/5+q/oy3N0vJO8DV7788ce23c8BlpKzMry3vKF7YojChGAoFgBGpoqSXkBqaXTHePbLe6p0egGrVEC9RqBBJWwKKwVUCJ8pXl0lW6iIwMd+O64z+3zfvOH+cmvTT3vxsRd8U965z97b2f93l+jx9k4eH79Z7LHjGumoHpwvyWPzrIHSgPlpPluL8DRUZS0vEqs9obXlQAA0nrRVb2VXy1mDFSOrOJZ/YkDPvH5+df+iOtVjF1pjCFKbvZHNqWdnaYB5FL092mpz/RTW6Zq89udPh/wUFQEqkiCSAkAslhGWk5y7u7moTBxKCU2YCCjJKiIqYowmyTyn5FBw3hgWHbdYkFBTSp5ACRgARU4joRwoaSEibJGD0FPX/f9td969IU0nY5mWHmmNTy7jvf9/VPfvXJs1fuc5k1hYsCra9feuZqlMIDls76GNY4XQBIoomAQI1BECVQAUzrtiaFdX07KogIIBk2KJoTiiif2t/IsCqAF+i8rE/0qqeCaTHOEbHUPEVwHaJCVXdd1THF+mgfUSnDrF8QU6/IYzIhSd3ND196eXq0WM0tAucFSxtjF4ILbGjRtuCbEDwBhRZElKxhJpNjSho9agpkMEcaWmCDrTZKRZs7LQvMTN8aSysLntQ0VexYhr2iMEYWjc2MErddTCJGhBkM08CwKc2qWUrg8xd2E/J8tZAmmowdISJ0MY5GxcV7dzYG2cFB9ewLd5eLxqARVYOIQF2Ihsn7GIFUNSY1EMIqrQhDFFmsuWkRFEDUZAaApsvmqzePjifPXj2pVo1i11nmXunuvnxiSrf2fYUQQ0xsOKWEAkgEBEFFoPzSk9NYXf+OD7337PjS8y89S0GzXjko8/Hu+MK5ndZrXalRf/byxnizdzBflGVZDsgS9E3bzF+CGO+5dN+yq5tuVdfNzmCsSRcn83vOjl77tofzQTkc9mOnXtiN+iHwzcmxGdvNbIMzVERrnTNqXeGTZWNARI1DawpHXd2anMrMVKuZCOwOzkTl+d6KXZ5ROTk6bkL0IYwG/YvbG9fuTOeLoxCCy3rF9rj14sqNK/deDtM7q7adzSZlbsZ7w8nxKs9Hm2c2DueTIt+SwjXHJ0JIFuvgxwVtn9/yAs+/+OK0Cu++dB7nJ0MzPFnk86DTumtiOxTNhuOry9BMJ5jxucL0Undjfpi6uhiPio0CDmrWDBInIFS479wGB10sV1lJt67uL1PGbNsEo7Pbq6OZWur1bNOh79qmPl0F6wb6deaFiNbtPETrgDIkEEJVRAKjKqgosk54kUFFRUJQwgSKQADrazgpEqydjIyKCMwGERENGQRhYmaDRIqYEOhUBVEBJQAGFuBXss1ErzSwKYAgCqiqGmLUtM5NKCaRJBoQ12VoawUnIaIak4BKt2l5lESdAUihWR6VMLaitDjpH83Gyy5DZmNEI5NA9OwbTAjRAmWgCtaBZXGgKICoQgCINsOoKAljAnaQZWAkFpoKzyNctMdm86zed7br8t5ukMMjmAigQ0qaEWX5OhcCKeKaxsPrcFJkIk0ClNAwSAQmgARMQOvLISMhiOo6eEcGVUjT6VulCgygREoQWkWj7BIBkKp4QoZYow9IBClBEgBRJhlv4IaNE9cctsVos19Buv1SnB7YojD9Mq5W2q6al6/G0QDHxWL/lqFQDAe6PAkHL8ryFotHZ+umygprbCbeE6pjic2ElIAY2TCkGLW9ebs9udVw04BbWneH4oE1yZPJ/UmtszZ4XyMqwXoutba4yTr7jkQKsFaKQIHWnz8mTUq8bqVHEGAkSSqESOvuvNPTDWhaIyCRKK17EggUQFJcY9MBoAs+Bk0hAfKi6+rQbexccFyi1A6M4aIOXZlJIGnVdEBBLCYypjTo1gH+hCISSAKqMmBKghxVPAlDIgIU9KpRKEkSEIzqo0TVKBCTdkGTICYhTGrZxOiZxRgTAZCMKlggURLHiUKMEdmE4LHMRboI6c1f9y1v+eAP+loK7M5uQyp8tpjJTbr6+NVuf0EhWgXdzc2Dj26+7W3Dy3sOJKCKEhJ6i/d+8Juf+tf/vEs8/qYP3fejf6MprQQsCqe8d/l7f+j4f/6f8yZZJZs7cDhbzU1KbreA5JZHBzvntw5eYB8odSmpKEjXpnK7LLvoFV3mmIzaXLsEKSDZoBbz3ECElOz2RnQmMisTQA7MJs8hIVonziUrWS9H7wtboOtBbxiPDrrjxmLfL6fDs5dxqAW2s8n0xd97Sfc2eXRfryg0H4uUEtlkRWqOen7Jk8leP2jP527gg2mVuuWyzy1OnyubVtu9Vdj04422Bw/8wDcsfvsT3u+Aid2NgwxczDJo3fKZl7bPlbMXnu42L5YPnk/kkO1guNPramfSwGw8cN8bYPimV53b/sxnPtFW12OIqLA7zB95+MruAw/W/Z1hWfYnd24fnyybbh3PDQhRBQRYhCWArjV3OqU4qEiKmlREGK0Qa1ZEtsAlEYYUk3RJkqhPIg4iQoQYCYRJYxSDJIQSJSKdYgBRiYBB7FoZWuv7SVQA6TSGyWiWi9lwWJqgq6oeAm/0c46pZN7eGDRNoBQzlxd5Wde1M7g1Gm5s7jDEW0eTuy/fbn1ou7A56KmNyWgVVlytPHI0PC7zXoxQg67qk/39rV5vUTcdkMutsPouVanrDfLOpLr2G6Pi3GhIDSbBVjVpunH7uPUCqG2ompgyZ6OqQHMeBpyMi6kVzHJX+egRKXlgQmRhTBgpKSHkxCCgMRLoYxcvveFV5ndv7VdLuT5fnSuzRHxnuqAiRY3E/cXVa5/+uV8embYc23NbF/rnB8uwGj/2ur1Xv3U0HB3/w/+2t39ybnd06z/+6t79W1vHN6GiMZ+ptPKrVtu5iREbyUZbhToI84gUjTTLxgHZAkMIyISEmBkwiTqhTjr18P1/ofzQB1AA/+N/SH/4aV50KAFVMmMkNoQ5+ghZcoMtEIlh0X7j+wc/+n9HoOKzD7b/+j/qYpJWVULZVUyf/GK/Z22eG9vvFhWSJdU+ljvRG+gG0R48e+tzN29llzieGz3yqod/64sf2728GZJ3g4y7ZMEl38SgFvNQUYq5Bhu13R7lKbZ1jCe1P7/RL5g2MA3V7C8SEE3DCZVZiHXZKxtus44eHO9sqLuzmAzLXHxXt0eAo4Wf1loPhz0POVG5t33BWKq6Cg12Xe27yBZDVBIDhmuAwtg2FXtnHtqfHxqmGNqhyzfKMqbO5BZSrAI3TSAsDuadEzNru6VPN6eLnRHff6E/Dd3dZQoRooJR34TgjB33sxL4fJEP2IFqYVmiR/Nfsor6OZUlO2NOjhuJoqCQBAhjk1KWLzud1RJjQAoq0TrrHBaGYtKjaXAZ90k3WIccB5xH5JmPiNiItik4wtxwSqqMiYFUDaJJaogdkHbCRMYgqELSfO2vVrSK6PIudNJETGSRWkx5z07uHFwqt9Jh8WN/559mn/q1P/rZn9SU7c8Xr/7BP7P9Td92MCifrcP2u777zJ0Z/8K/L4yFJC7KepfKiDtERUpqA6gB3aoW8bMne1ACZB0wK0y8ufTf//iF9/7g5Bd+/s7P/tM+x3qmIYW97Fza3cTFnSd/6190y86CBTG5obv1zAAMAggYKvKUm9QxEoEieaWohFL0yHJoMqtCXRf3QYsf+fH7fvzHFr//W1/+sR8fk0uCaHikxShpyQCZQNVIYVJKPcPea5lUKMsHm/xt33Fw772kZ+pPz/RPXixENcWsX2iUDNJ02c7f/MbsOx+rd8/g9tnwxU/B9S+6j7/gqlp4YNQkSVXVmcIlHyDRyfb2pf/331vNrt39Zz95T92Uef/8t33vme973z9//JPv0Fl29UZv/+RshvrHvxu/9Btvchm84eu/8Z//zBe+9Ox/+JE/94YyrhZtjcFjt9ErjcunbaglWHKDcpCWVUeyqNvkzGEXM/IFQheani1SJGOI1GSEKjDM7G6ZURaqun7muZsxZePxVt/aSJoZVQcRkxINen0TfaYqbZN8Gg571tHZva29YlSY0reh82G8eSaKr21hUAo20kJW8k6v/8Xnv7LVL9Cg922SyGSWnZRmiJhVXRcjSjjlU5y21qyHlOsB5yuQonURK57+NYqqCDIhIp6ek2W9y5x2h5ySi3AtFxGpMkvdVK43eMcH3hn7HJZKxrXzpZgRGgfYGFQhMWRAMEIsY7t89tMnR1/bvLhTXHrd4tnnrv6nn7rPC4tHIkyioJ0kVLDIIpCpMoWQ5fY1byzf+c3LvXu9c2SYga1StVjWg8HG69/sn38yq29bdC7D1IpJkA5vpuWxy7LCRD2eLF++TsPt4UNXVoczlVK3z9O2p+Yo3nw63rmVRa+QyJIqRUldFAay4ItZNZzd0cMXg9r5KC/f/i3Z+bfIcsE3n9Gju+AF0SBELGjw0EP5lXvDsO8kZrm2h7d1tmirkO2cTZtnBqX1k3nWVthpntQEj1VMeSFQgFNPOWbObLhuvoqmX27uCOfd1i5PbsmNJ1A6kbatG9tOu6d/s7j3NZnbDtOVefnJjf1ndT7PQlJAYUBIRAQImiQhMHIISgmAHaHBKEjqkJxVMRh8UkVICYwh4igC3brrDhUFmSiJIquCxrgeYjs2sk4rpiQApOrU2EhpkI1f895V1iMU8ImYCrZtqMBoL+/Xx4er4U5vayvWjXhkCiZ1f/aD3/jxj3/ah7iWHiWtP5wIgqKqqIZofZdICqJg1tP5dWEksgKhQmHIAaioAESFmDQlEAQvENeGfwRmXl8yB6OsC930eBk7VMOUERcWkBE5kEhoDWFsq6hhPgtdG0Mni8V8erggLLs6bm0Oy6B3D6qUNIaOgdquI5TWN7lxZAwpGoshJCJSJYkJQX2bmike1LPNsn/m3Djrj5Z17QUFqVp1/QFvbWzPZouMSRiTqm+7jERTlxnu9WFnc8BITR0y4uWkWTXN2UsXf+DHfuJTv/fbH/voxzNjBr1cEUOdPAhbWrb40p3lSa9xzFtbw16/XE4aLhgjVrUHUGug17OA5EN0mYWEs7qzhpKKRoiaFNQSipeBw8g0abqPPf6cYqqTEsHesCxyM53XZEXEi5BzDgQcv+JOXBu/mBrFFsBw9vSNk6f+3e/8k4985Nb8n7XPvmizrJ9lVrSqm+ki9DLXK0p1etJUJrfRgE+60+sfzRYsQuwOT44SdJPZNCQuORfv7z0/ft1rr2ydHTR1qqskYvJ+0RsM5ou2Szza2Mt3NlRa30QGLQpWMRo1SXRZ4WPr2+S9kkBq2i5wjllCbRqP6jeKzCefunpQxu1e4Vy/asNi1bFmcPnyrJKjSXXn7pRFM+ZWW8VgAjka7GybNi6LHmQWmtWqb11/dGYxnyXrlvVqOBieu3x5fnRXWloenlwoxmXmVGsF7A92vfeT1bRPAjFwr59ys1o14v2jF7dec/7sc7ev7p3ZtJ25u1zKLGSkTax83QFgweXdu229bDPkIjhJORDmhLPFyo2LwmbdsjUJnE97m5zRqd2aCQhQARkZgYCQCQRUdN1Fta4RBCJMgutuQaR1peCppwdPCdZrEYgMEhoiw8rrggNkJD71KCEhrx1F62Sa6rpvUwGR1sVXAAhKQH86yxAVRFCVpLquTlcBRgUVAEgpJE0qHcJ6UremRSZF1AQKlMTYbAwB29CmLlm2MfrCd8Xx4eh4P1tNDMWYokSPUTkJOguCUCuoAAMYEEgisv61VRESqtI67QZIoAZiiuTjGNvdpv9IuXn2oZraNh1wzftf/vyleA9iIcjoglgUXgMqGUUQhQilWZHJAQS4o7wH7JQZ0QJbIFFkZCOSkF7pQGEDoCgeFSAl0VMslK5lI85QHaQAgJpUAYA4ISIZcrmQpdRhSACJpeuufiW0u3Z0ZjrqRI/TfJrF6AZlF4O0utbcpVqAdubYlq3N6CSz+eprLzbXrpv50hXDJBZFkyZJiYUwYbcKmDOOBya3CZo4bVQhVF1Tae2wc6bKsStsRFpUjV+GNpEgYfQiSoSK6/+XiBABmOBPa1YVAJFiCrRG7WsgjevXDkCnLXxwWowmyAxASFFSEoFTHUoERFX0T41pAAAQQEKSEKNAXPmq7G/0izxDlchIGSgGlTwDQp8iNgk0CZON2hkkBAMgIJ5QASSKTwqIDF7IiiKorMfJjChsWE9Z5Iq4tueZ03Z7VQFV30kKxCbFSAzGOI2CSBCVEyKwx4SIbI0za153cfbSA29/0zcbzTdHCSV88fFPuTv728Ivfen2CAtYqUPTG7o43tl6w1tsfxSrzhiwlpfLCg1pENgw/Stn7hw2D//lvzYDi0FMAkKsDk/u3FrefmFyz53DfkkE1MybbuWtgxTChXxD/ugPJvt3R1yyzcNyAZCMdUTcnawkLvubg2p6tLV3NhIGNOXWtl8B20JUk+8YHTinRJw57o/aNjA7MEaJ0PUk72tmXD9PkwOkTCVL2MO9S2brHMliY29zFeeIK0rJhDzO5cK7377APDSVmhwFYxfH1ofrt+58/vHc9iWL7nKER9w0K8uNM8WgNC8+ceO3PzG0/fIclA9cuTNpxzvnsrMPAD+uG6PdN55ZHNzN50f9ne2zV+6dXbud54BK3c1jHPbz3qAzPLt9kDXz1iVy/fd964ffkJ+R+cmXn/wyNM2G5bLMXv2qe6/c/1A8c16KUZge37555+Rg6ldeo6pAIlARUIEUNXTCyuQQTrmgBGhUUde3UTVcoCbiXMgGMgIpJU2xjUkYNJIaAFK0yAIJiSSqihhe11hSAg0p+AQ9a6Pv2Aaysj7ir0Xm9dfZ3X7HvLW9AUfTIzncLLK6ayjBYFAKcpu8JBC1h3dXW1v5aLt/a7b6yvWroWucIUVNFEOuIQtFBqDe123OOVJWhWYZJHZN23il4mi+3Du3p1JWTU0iaHWxaNRrr44bo15u7Mlxc3zctZ0iM1oskAzroussQWa5XxofglftEr60ql1yV0Y947g/zFKUECKzZdAiz4BVJHkVQEwEhgwRQFLrwKi+qt+/Xi2DBtvvo+r1k8PBuK+ENeOVVw8tmsnx8q3v/o5yY1i3S5RkTAont1fP/GFezQlpvmiGo025OSExe2/6hmufeZqt75YTp6iq/Y0hhklaTmKzMpZCjFwU0nUxRtVgewNIIrDOPXm2FkpbPnLfwsYYfPZ1j/ZXB/rZ50wKCKguUzApIahQaWNuDQHWqX7wwaOBrQ/377uyFVLbNzmYLHdmOpm4XDf/zDvDS5uTj31mlzR6nxJDUhviVgZDC1BLs9CNs5v1C0fF9kbfcdN4grjd3+pvlkeT49Go39V+MqtTNMbaPCdBzEuoFqFpRPOiWiVBGPZ6i0WXJ9GkGpN1uWOLQYxCr3Sly+dVmHQ6NIQRCPzWuFy0K0RMqepaY7EYD7Nb0xeW3dJSroS9nJCgtKZpVolS3utj1EF/3MZu++yINdTV3LKfrxprKbaASD51Ij52mLE7Waxq0QYpdOGoTg9fGr9GE95e3qkCSkpJcjYD4EtFsWlsz3CKHVsXkhhkfMX9cCoVFYW1EE0ClSiKyMBMZMFYEtQqJANrnhyoICZFQaMSBJTyhReUNMykhZBTlGQpsREMhDUGQLSAbNUnMYbQWsjBJOWkVpQNKGESwKQ+SoyxyByAalQ1aIwpXZa6gP2ynq28v/s3f+jrhl+xRXzNi7/7lfDMV0Hy3EFXtZ//T7/3Xd/xwcHOGKXcuNntrdo7XpfQAbiCMENNIl5CFNNAEGAPbEAVwIEjsBVohi4HGeVF/sxBdftX0x/8ZpkkJCSyQ28++Wfe8Yb/6tuXL96Kv/j7u5BFoE5iVQwe+5n/yeTNE//gf6NnD1xQnzRFzVWMUWeFKNWdUCiUCAzGmDSneWvf8s4Pruxwc/fMKIu0kqpOpetZ0uTwMM3UplBizlywQ5vVMWgUIf7q4TNvuPJXuw99MF0Yygf65m88Sc88Y4GIrLBoLWFrdP9P/ETzpp2JlxOP/YfGeO49xUd+Bn7qcTmqOZbGcpbzdFmLkCO977H7V2f67vVv2ur/V4d/69+YdnHtqU+VF374A2++d/HEp97+N37w4H94wSyrFLv5Kj14tnfPoxeyWXZZ+Cf/1b//tf/178+f/RwJjMuerGJ/DMtOJPraV06UMTnhQsgLL6S1jFGCy50QxpQoESj6ph0PXFn0jLRdHZ/fn1SY54McHUVVSVBkFvMMSTpRYeOMZgatyQYj3twaj1ymMUE0EAwmMxpsYswtOzEhJwafMi4H5A5O7jz+7ONQSA6KEVWghmSz/jjfi1IpcAQ1p/3IIKrrGA7xn6JHARFEYF1hs9aORDHIugFw/Yfr0R4wQjrFngogIpKqEBAIqYtLpu/9q//g/e94rKmv6bLayIvFasKFgSwzxI6MiFcFQLXOMhnf+JLd5lFV//K/DYNBuHV0fxIOrRrykjIySUEhiqqmyMCg2JS9Mx/8892Dj842d6DXi8JOKYaYZa4YjkUCUuw9dJlODgUAKdnCxTphW9F8yorLG09Qc7S6+mLId0bNu6vrL1587JGmH5UFuhNeTsDXbk3NFogilsnHjgQIQQwjZBCiIRlNFvCx36Ty4+JjUZYAXYchMee57Qh75++tNXX1fDRwGLrezm525kIWIN/qTRufbY9heNQ+/0yY3NrMMa6q0BvjA28QYy0V3aqiEDkfYME5O5dMNZ3MahoMtiIHUNaYFMRxhCefMddvxK4zYnJE7FYpAQArKCEzA4iQqiMEBQ2tGq4KhmHhstx4p3Wtnigl8SE3TkRTiAoAMaJSCOIyB0ZOaeZIKXmgtU1EFdahE1AQg0qAiJihQ2LY3otn71fKxdfiExoAhOShrZp23m3EOJbamPMRVoiA4JSpXw4+9O1v/b3f+bwNjOuWI1FCBIUIBAI5oSVw64HzmmiyvmgSrHuSmTk3XPD6KiBdFBFUhSRrAhKCimViPp0kz6bHTeCjWWWszUvKRi7bKJ3LAQwTWlYDioqigjFy3WIQuyyzzU3tIO7pch6WJ7UbkCZiypfzhTUIpFlZMKGKJlVwKKYDQ8mDMZxZJ6JsbN2mXkgxBG1m0NtoqtD6IMQ2y1bLquekcDY4RgD2PstptNO7dGac6qbMetUqzbvKOkub5v4rr/3eH/1vJqt2tjje7RkyXLfdcNQf7w6AaboMQRU83z1Y9EdZBPKJIxAA9Yyy5cxCaHyW5VWIkNaYXi0dg6Ne0ZtNmzLjFIMBtblFcvW8LkuLIIQ6gvi+tz/8ne97z29+4su/8YnPAbCKSUHYGEYUw4wGoheNmlKMKqCHbbdNOOiPri/iX/zrP/ami7vve8vD07b60jP7h5MVHCxKW7zpwZ03P3T2+Gi5apcb2z22Opm0oZpj1yGTb2rLBig2bUV2nCRlfXrTO96c0nJWh74rnXFRMGlqm3kXOpOZLMt92xlS9J6ZIaBIl5tSFaxQ24XYhYRiTEQSSLHzKSUkhF7mCJIPFI31YtAgWta2yk336H3DvOcKN7r2/NHnJ8dVotHu7hNXX37wnvM72Hv8+S9gxmz58u6ltunapk0M+8+/nJf59nhjwNKlcHjjhglp2gVyzIW59ODZOLu7d6FPk9nB3X2gzNQpK+xWmd0+OR4E+Na3PPa5p770ledvZ+X44G53Jy18ibkPrLGDmDu69/KVZ75yNwvGQeFy0zJVztxzbvfuS0eMtj2cqYImgBi2R/p1b73vuavT070AgBAI0DDDK8AbPk0iAxMRg8J6xoAC60LMdU4NiAgAZc1dXzfCMxMTGAZDTGuE8Dq3hsQMQCKKiiQAp9nmNYF1fXOnNfhOFda35lduMWsLrOqaIo16mplQFE2nJZ66FkqCrnNsqqoKSQxpWJ3cPbrT38oMB2bILVcnh24+3zy6U85ObLtiZgWlhAwA3oMmsBYGPRAGQ5AAjYMUKCloNOwAECSBrF8DAzPkXI3AvumCvzg8KrReNnmuzfxkN2xe2XsgHSlaBkTMc8IkqqhhLc2hrl24BYBV3yJHsk6NBSKQBGTW4EBQRXZARhGQWYkQ0mlN6ZqCIwFFgQDS+vIJiICiQAxA4gMQI1vQQIQkBlIAEoipeP45vfsSnX8omTzlFwPcljDLyz7jErzHxjsU7hpo6zA5kRRjRl1MGrQgNpRbtmLRlRRXc/ZegrLpg5GgmrJSe/3SQEyxvTsrctMBOqPOmJBk4WNjXMyzkFhE6qYJISVAC4RkkkbDp/MoxUhIioAgIomIkDSmoJpUEqogEQESGRBJ69iOBgVVNAmEgVQEFUQUILGhBEklCiij1VeiN8fTpbNWkjYaW/F7wxFDMEyKhjMjyaoiQWsAU1QW9QEkptyQytpZJ7DWD0UUCNY6qlJCNEQKmkCQCRlRgdBAEkAilACQEoqyKoomAGSzLhREZocAmBQ0IoBRUkREzoCNiCpCUkr53u7ld3/wB/q7l7R0Lg+zqy/MPvWZ8/uLkQHSbDX3480SNTQasJC9Vz3mjQopWjaWQcCS8YsqPPuS2vy13/cd8No3eAZObVbkkLTYGPbOvmP87d/19F/7f1y5sOsNA4KzeQrimqybzaw1pS9sIiqAwbAqIKhg6CC13OU43NuThLqKnBiSQrPSruPRBm2cUyLIMwxeI4oiD8dEDARETsFoJGsxhJb7A9rYNUlFoxlvNrU3pm8GpmzmsbpL0wTLxe2b+9vvKvPLO0RVNuzh/Jaj1ezxa/7a0cZs6OdYr/zqTuN7Ld43cI0MZjfaj312cL3nyqEdu277Ym9rBaqDmG5o8+iPff9xub1zcnjnI/8a7zw7eOgd2ate04Y7cHzTedSXj2QQh/fu1qvq6IlnNi9drAd9kXG5sRX6+du+5bu6+dTEqnS56W+dNNCLxMumPjhYHR2H1qsiIYkKrrFrElGMqmqSpHEtiqYUmXTtVCNQIFTGZNQZRmZLKAhkKHoQwCAYouYIDlQhrXGiygQKuA4XA6ImFJGEIXp0QutHLyZCMYxJTldBXXeAZna8LNrqopNLW/2v3Akd46QD8v64anLjJrEuB3kYZM8uZzdmc0dmt+ccQuzCommTNSvR7XKjqReE5BLsgh0krlqYdzp2peE8qHSxKYfjEx8qpdVknrPFTJeis5N6mJmN3IFEQlWLdlAKKBVc9t3x0XwE2GdXVzUZUxpaeT/IbFBZBEghqo9bWaGKqeu6FAwrkxZIZKwoiKoBSKIalSDdf3b75duT5ODY10ToeoXJsmUT55m9/J63DIdb/tozg/O9ANpNq3xrcHRwtBuqm7/2WxsHzejhN+73d+57+NWrX/9P/Szeuvq5VQ+3Lp+hk0lzewIAx8tZfnE32ztrJl159zBzxAwp1Eu/AhIAQx4ymwc2ZqMAiV0CBUP9nhrEyw/4h853Rz+z+eJLCElAEiIXhpzG0qYsN93KZkV89qm973pvujA0X32RMwdBjbMaYi8zTTu5NQrwbe86/wN/7uif/OTOjeN0EijPXek4NmK5S6HxXVZJyNxD589v55k31oAuDucWICaY1wFR9y4PD/eXhE7Et5RWwL3ReLHqYtJmFeo2pl5u2ORGEaTv8iCy0c+bRX12UNyzvSmLtAxN2SuUWVQFXem22m7ZASZRbWVrqw+mfv7gBc1NF6HnDGVZlg2bTgFYYtV2IeKcjO33tyCE3BTsCpBkoRZpY9ek1KiJm6NswLkm9bk5XLWQW0z4ws25VN253fzBMXdRFlUs2ZwdZCOyfYOURIDzzAEZgwiq5v+vAQ2RrLW+TVnuIEQmFBVgrCXkAZsmgDrSaJGZKQWJBjmj1PgmaO5s31r0XUroAVjRRHRJexkBG1QJyYsGQoohEIG1zJDYUNdGY2wLiAIWgYgcWbaconY+YqDSmBjC5jh7dr/6rq//v02f+r336vlrN6+fuT/uPsb1A2/38+fTnetbfQwn13/nb/3VN/+3PzwY9b/wT3669/iX7wcrQAEMSnKAGbglqGdZT+GQ3CrJGEwCDeAzMMMM1Xsfm5f+8HeOTyav7wXDAMkAWQx85bC6+Y//bQVxBC4CeEgRcPS+t5h3f3h5snz1P3jtwb/4qcUn/7ggUIIipWls2h3K7z9X7L56+dVb/fagIPTThhzsGXvrX/6d87ff8Yf/8CMPtNy3aAamcI696JWtO2ObNnvl7oPTz187d30S6k58yA0C+PuseeIXf+b+7/+RF3wIcr6ZyAO2DE3sqqY/dDSE7WF18NH/sDf8cNrdG48vLeZxcWt/HMqqBZM7jpRC7EKw975q+NbLR09+6uaf/P7W/RvVd37r9ge+8TD7Px72cPK1Z4dPPdf5Rkz433/t1771uz909Du/Wzx/JBEvx3jzd3/7U187+Ia/9jd+/t//yje+7X2/dOfLi3lT1Z2SGKqNtVnUummTQZPlfSrqrqp9sIQQlIiHeT5p584WyBjqZpCVGVlJKsbcvTO3eT9Dber2vrMXbrz80kZ/SzuNobMu99ELyLDIC6Gd7U02tvbdyXQZRZTNRt/lWa4hopD3XeEGIcFoI98ebMXl7FNPfsqUHCkL0RsqGr/sZQUkNGlqOM5Wc+xi0StPtSI9ndS94g069Qed0l/W9iI47TmOCkkFkeiVHvO1KUnwlcM+KAKIqGGa+/DuP/Pd73rda/Xk7mBkAkhs5zGEul25zjtVxyBKkQnYKAMkSUG6FAqyvD81B1MHlEiRMUpUxKgRCJAJJBGCZRYf4ML95q3fcJJpiN55D5xb61jZmSyicF3FZRe133nf2x75xVHyfnhhr4FYGNHlcf3slzfH2Wh78/w7v/+gmTnvv/rRn7/vm/9sqgfhmU/DyWHJJvpA1iXfwToxLnHtXvEhrWMBQITgsAomLARUNEoSTBGSJLCqZjK9TZcfSDE2TdMreuXumbZp/eTIi+/vXoxoeTziApFaB7Y62Pd1k198zBsWTlhmjBnZviuGJnZh0ZCD41sv59vbvLMn02Ncxth6tJgZ4sncxkZMllQxBCRMQoLCCrqe6IsHECJGlsWwsB/6cLhwWZpOZ0dpemyTys2XisM7ePsYE7ks85CIjHoxxigpMasm5dMZbwRRZAADKSYVQEIEYjIEICISU14Wb3hHs70BmKzjdScxETmXq+8g77/jnW/5g8/9MTX5mQvOaZ5iUOZUV1//njf97kc/m9QQKAGoyhrmKEoJIBJYREuYokY4pVcoKig4kIy4B5ABoJ428yXCgKAKQdfsdTVrhSueRm8Oj6cAzvRouFX0R6a30QNrUQHz3FqHIRqk3GWIRlMsRFiSWy2zVUhdqCcVFM3Kx+Q7vxIT20E/a7rQNhEsQ5IkElA8xu37YDQW64q7N2NbYdcItO2oyItNV4nX+erseLiOBPXH2SjvR26FlQwVDlPC4HE8HBZZfz6TDLLZKrRVmB8ej8b9937gte/4uq+78+QfVb5+95se/cObB7ODAx/D6ubR1rn8jW995Kmv3Dieaw5uczTujGuibJpoN2Z5puq7VBjfaNcjr2GIpl524zaY1JIhSByqSgNa4rpqKc+tZZfi+XMjNBJCLAkevnThw9/8jsPZ6qvPXVOyKUbG1O8XAOq7oMTIrAqKkAAwSgJEpNe/4cq9m6Mnby6fPplfvXXzqafrYlROKyG0m+OyWrYH1+/86tdunCzSeLu8cmmDIDglAC6H/d7Ijna2qlnDNj93YU+zoj2Z3PPwZVdKDJmxzuTDGLqsyNqurqsKvKK1CIA+kcGMnEoKUQQIMcQQ6y62oVVWA9yEdGZzp1pNsmJjUc2IqfZJJfqY2FrIbd14DME3MUp06uuqy3I6fx7e/87LX/jSi4cnN7fL/smdBrm758KZk8X8uAn7J6uCYLVs6qg7u7suc7dv372yy/PViSTdHo1vTetsWNaB5yfZ/NiZIfFshs54D/1ysFx1i7vN5a0z5tLOL/3J59xmORgMi81zk+lxhy6YeGZ3vJye1Ms2F7x99VbnERKVObVpefs4DEejq1dvc61Cslj53rCnDL2N0hTLo6MpvtIDiEivtImtv0WGtfhKuqb+EAqKgCLI6RNYQJUF03orWWtMikqIRAbW/WfEp8gZWi9JlTXVjuAVlTeikkpCYlpryaetVK/A80RpDR061Y2UcF1zBgAgKYJCUtF1A6eKSkDtQBEhnCL4QFVSp80nP/+L3X1vf/Dig0VeFqy0mA3v3O7NJ0VKGABCABBOkZKAAMgpRShlGYICCmrgtYEIDZyKC6qiwAbJAKRY5nJlK2yR28DooBQuucq6oM9fa++gbTcEQI1NmoARiVEMEAHSGk5ARGByjAFcsabXgmFlg+s2d7aKvMayKQgAwZpiQgBkxJyCo1AFIapFBcVEGBVjSCkSOlTWqIIRRRWioKIziATYwzTv3XgZbt1iYI+e6rkdbobaU6i48xQw1Cts29R2qOqSSIxI5AGpnwslnwRMrtYpAmokxASeOU+Fidv36M72av9p6xwAimhGkGLcHOCcYeG7Ju+1JW2Ptu6+dCiiioSAIpBSsmad5gKDqCqiwMCwRtWBqkhSWQefLYKqIBAqkGqSqACgSQWjJLZWRA2hCohCAtUY0DpCIlAijOl0L5hWfth3ItAR9/tbxjARJElM1loDZFAhhKhWrAkmM4QYVIUgqIAGg5hQRKKqErBIiAoiBNARMgMABUA6zUqv18BpWQEbC7JehsmvoZAiiYARENIpjYuQREQFkiTSkBMAknF2e3v3ocfehm7QJkEKaXL9j/7lPx8fzHaDjnOrkuxmESCEUJfjjPzx9DO/2+NvxHObHZA0ccRMi8nyj/5w+se/H9vu7mKyU7eAiEa7eYOKCRXCbDm/W45LH6UTlShaGlFoFj52XRebjDJJqfJLErTGrVatKrPLwMjCR6a8QGv7m5QEfMsEmLE6GwDZGtHEwx6AESRGYTbiGyyZ854ERLIpCjuEDCEhC0PbUC1UuthmoTa22OwZAGkmNZ88+/nN8sHlyc3SER7vh7sHMOmFu8lKP9dev793d3niZv1SXXjqxennvuS/fHe8czFz/Ze//LX+2bP5ptPlrH3xqk8yhRhHu+029z90uXvyicHmZVc9kJldwPnk5l2znFmI4aVZbrOtoaUwH5RjcjFpzHvDh9/1rrQ8/uLHPjoY9sphCYyrybyZTpY3rsJiKV4JbEzrgkNkhBCDmlwVQTRosqoWAEEgCabICJISKKp4wlxTY4gQyKQEsVWNPoQYQ5IgqMIpI00QCZGJwbIq5sigmGJyTECgKIkSU0IVEbEIRKiv2IqS4Zbp5vFRA+3QmBvT1bTtauYLWyPTymZent3dOqmb8U7mCn3m9iTL+eyoN07SY749mZKR9ePyaDVLMRBhYOqko8SJXWtZLY4KrhfdfLHaHNl7drZuHs9rcULSSeiXLsbEDrdGA2k8D+ykqmLno0jndTpZnCwWmpXU+l5mN4eb02oxGvdLwxritGmfnh0Wls/3ew5xZNzYDeuuBu+tZatJCCUkFgCCRDgP6aXbRzeCstC86s70c2TNDUUjb/vg1w0L7iaHOcXbN14ebe0V/bL17cbG0Piws3mFyNZ7r8oM7z/15Dg22Pm02N955F2BKJKHoe77VfF9fxa/6X3GN/a5O/xTP413bksdlt623/Dt9Ng988nhbof1sy/SwSEkMDZnI37SlcaCGcUosdwp3vEd1eEv2fqQGVUJOBOgZLJodxJscZ9e9ez1uz/6o/lA65fubDXbkVgZTc7WULPRKx5+/VXKr3e+vO8+vLnw08Pe3rjMMpKuXnbMKlVsVijD7MbBYne8eburScxi3mQ9K5B5z4O8B21WYmzrbtjLzp8ZVwuvHsRHilGJKLOBCQ3yAId51kZftQGsSQZ7JtM6VW2X5wwBAyCXThBvhztBKyTABOO8vDjqXbtzDYlzNgh2Mx9LwCxjDyF3zjloMSUJzDmCGWS5+jbLCkTbUNcu7rZtt1GMcoEtys70Ri8fHsYoCQAQrTN1k+62od+ZjZ55JHBnHAMMnQFBBlUmJAYyuM5/K8ZXSL6nUhEJpEiN910IvcIGxSaiZgzZIKDGTHyXjEU0KFHy3GIhjUm27/KOrJgmwig7o166DobOCnUbpohdbRWszbrYCRIZSik0XjApgBqbFa4wiRlk4qsQqOcMGRBmkGiZSV3u8GQ+ffVb3vnXf/xv3v71X9l/YfHJz308xmwi7jV/94dvT3X3ztue+ulrtpPcYvncja/8lb9bSDrbcR82WmgY1FmnIQSQJcQpmDpJ7oiRpEsW2CMQYK/M2KfUVnPOLvy9v7X1PX8uO2zu/tgPrr78Qg6GAdSoKHHISsg6iDk5FliBf8sHv7Gb3kknUx0NLnzH9zz3+B+SBydpCfHsX/vR/Ae+u8rGuOpt/trvVb/0/+mOj0vjNGHZEX786Ruf/OpjnGnnlSUrnO866OXDH/ze8i33xd5GQYPB/X/EP/mvhkVRTcmRJu2SsZsvPvMn3/jY7oe/QW+1r4pg2FLhhlvDdjbNe7mZJvnIbzz9r35Vh/3hpS03KozU8eUbG9xvIyOKHfAyptuXyt3/+oe75Xs3fvrf6B/85uGnfv3Cn/2+t7zqQvfC3f4iffnP/KVHv3OnJ2qOR3t/4c+Ze9//sV/++Q981yNf+5lfyl6ev/3u5z75G2896/PNt+y9n81/Xq6qDltnyiGIgB3n+yufgEqLhYVOu6jJCAwyp5CCQK/oZ0URQZktaV4hrxZ1Rq6TDCNVi2net5PpIs97nNlqFgnZBsmsyTLe3NgYWBdabbruaDY7ns2jYWHYWq22XGkATdYmEQWk3F7ZGAS/7MK8jsvMmSLLk2QQ7QitgWBt1Lhs6iYDzssi/KkstI4cEMJpm9l6vLwumAIBPS2xQVzbOtbmESAARDktMF77p4FeKawhFGXJsvLOM1++8kP9+RHV9ZIpi6ASOgvQy60ltSqJQRkSijUGJQFzjMqkquAQOk0JUgLJs0yTSoiGSEXWWaOAiqWph9tdv2exYsqK3kYUQES2CCgE2sszIymV+XLU27j3IdkvaXqosgptOlzNh/c+XF68/+bnP7sxGF/9ylc3P/D2Fw8uDLfvq6bY5+nq7ktlQAhdjBGtYzYKCVLKyUQfmBCRgvcE0KVgnEFLYlQJQ+xyaynLsAOp50wd3Hlq8KZviP1SXaxbCQd3/WrCwG1lN8YZqHgf03xu29XhtQNY1jJfmJNZduGMh06iuryIKUhKEVX7Pdvf2NbcVfuIyqEVCJCRJ40gDqJFAhEUsY5AVTQaJkTRlNZnXkAE4oBQvP69cvmNhrM+rFSKpnyAqYeDF2X3BW7+BE+ipgQhKSoTE1FSAQxIACiaEgFaQA2JgWUdRFRFFWRkQ+RYbCZn76nG5xIhIJBxMQaJIYUgMdjcqG8Ju09+9svPvvzU3/6Jv0xFxAyTxHpVzbvZ+Qtb+7dWRojXzdeEBKQCQTVEIUPWoIKktK4IBkK0iE61h9CzxETE6FNcJmkSRFzP3fEVFtcagXt6K2617vV0fG6wsVs4w2isdXmROzQFYmYEDFlnSgWXRCKiaMy3fJdSCqFbNtPbh8VOn1+eHF6bxIU0rXRdSmokaZQUk0ZSSNhCbivdye1b3nGvyQd3DqoB2ptfu1vVcaMYl8NRs6qOT+4ipK4Ni+Uiz7PxIImkzOqgKNvCGufYK3fQ67uT4wMR+rqve/sb3/v+6vaT3d2Xt0ub4exbPvCdr9+wTz39+L7P7n/1G87sFHEy++A3ffjcmXuu3X7pF3755yqvflW/45HiB3/khydxk7c26tT+3kc/8x9+9jexhb0R9wrdHZZ/8//137ly686zz3zyV37x0TffO5kelZeuPH9r+ckvPt81kjBWUYj53KXNfe9/7nc+Pp/4Qv3AYgOkxkQVEFKmJCLRK0REWPOQiRkJXj6+8/ZXjyZVN6vbcjyaD4pA+sgjZ5pJ2ywWr35kawfhy88eM5tJqy997SAndU0Un4bbo9GooKQGcGdnM5iKi/lj91/I2WkIlgwkXC1r8KFbreo4921zZuuCs3kSz0TWmgASE1e+SoLjwVARfRN7+ZAsLabHeV5MqjmoR4153hM0ErrOr9rO28QYIwDEGI0FtG7hQzHIj5Jvlgc+TLBc3nxpdtjygPuBzOse3Dk6OU7sOupEIlk9Ox6MtoYksnnpjEnHg41yc3tnuj/fG2++fGex8jB6/ebgDWdvvnhdpke9IqtO6lA4MfmwOMuF+YOnvzjaGFJJ9aqZ3X65i+ojxGCPqI5CwBkRnUyON/KLKcng7Hg2b4cSN7M0oRCzPHg/2tn0KtOmgfObj7zu8t0nnu8W9KcOUz7VjXBdcwCIoELr/rBTcgwCAREZEVVVgphAkUWB9ZQQjADCSMREay/SmpOGup6er6G/EteST1Tg03zb6f6iSoBExKpKoGtfIcJ6MkEKSigIa3+TrG2v6xLPpGvTYGAQREmaGIIKiohoQgMJzGx6+0tPfULqxdndK1eg69++2jucDXxASLj+iKpgipgUcH1JN2ozyUpKkbsIXQcKwAxKIKigSAhsERUEgBWxseBWH/3j3LnxzljylozX1Ke7Yw57MQFBi0yCrCmRAoJFUNUkbIU5sUWXGecADbBVpDVZB0654QggGAWQ1r4hggQKIoycAAQkASCiSZQ6zsE6s6zdutPBWo1IKYJGVcCsQPCqgmURfYup4RxBS5ie2NnCUQcgaT5TZ4CQ2fgmqiRyrMLed+iIyIBy4QwhEiujdM1KMSMmJCUESAEiWrZNxj6zvZ1LdjIHOIl1SoADoknX6jBl1t08mFGbexz087KqvUoSEAGFdVMYEygkQFE1IIQMp4l5SCBREogwYpRAZNeSWpC4NuYgSlLx0WdrqKFIlJAkEQIag8TEiJLWjXuvrALXia3q1cbWxih3BEqgllnRMtskiQmKQpAbLyFH7aJXgqh+/Z4okqwPRqoAUTCKEgIaiIgoKSAjCAkjiAJaRCQmUVQxSMyGRdfvtBKiGmBjABkYYggiBkgAEAgkBUgxCBiLOZmzVx66721vn4HNBvlhdff5j/yzC0+9vNMKt8kbyDPbKwrBxCkrHS5eOFz9ws9u90Dtm+LeJZj61Wf+uHvuieZLX/EHh4xx8rnPjz/7xxvvfKf08ygAQKGL6dOfePHf/OwlQz5G7yxZrlNQYF+3Ngqiel874xjJx8ahNapsrXW2ic3GxW3u9U0dOSXoavAdNIa2NsVllIB7A4gRsgxChC5yXioayKxYg9aQMVz0yGCCRgNEUWdKBS42eyl1BJulKw3cOT552VoYOJNevL4/m442Nus2L7P7Br3Xpk7MvbGX23Z/1Sy6S9mF8Jm79oWPdXerHm1vPPDWVE8jwfnde/2XnqciQCPLmzfP720NMo9x1ss29X3fTm99i2ny6S9/Znp3cfkNr9298qr25f2TF1/gA8hGo7Twi+NlW7XnH3tDcWY7mHxzY/SWb/xQ5+HWE19sVsm4RmPysxOo6lh56ZJxliSyQBIhJCQDaBSsKK8VG0JhiApJUstImTOCwZgkUDsACgECoCp0rfgOYguxA5QgyqrOKNHa5g/MLIAOLQIBJQE1WYHWsrVkEdfCqUgIMb4yPKvbqhHaj00/J7Z8UrUN4irKjbvH9/QG953dXTapSNjdnR+3q+1oKp9K28XaeweP3X/xS8eTg6qDqj7T34DSxU68qqItS/YprlqsRFtfF6Rd0y1gNiyz+3eKvZ4zRIvVyjqusSuI/LKOUbXuNErTJDJYL5umE07OC1YacshuHR+p95uF2xj2r714MzhsFZoQF8fz0tLQurKKFnTAZojMqoLeOoOoVddUbVigfuHmsVVKPrWZaRW4i0ftYjToP3blYTic3Hjh+L4hQlEPz2A0aTWd5YbD55/NjlsGWjz91XR8zPVSQvBRGbwUT3mJUJS77/wO3TLtWx5YpSP1XTBVRlUmSQ2a1z545if+cr3N29LaVZKXDo9//ePwuSfYV1mPJ7/085vz5caF/qo7TisXPvGZvKmMYWAltRETDgYy6KcgEBs4CRjl7FEeTVfCGKzRkHxTA1hKTfvifvHlJ88/9tYudqA+xulgO09x6YiKskiqbgC792zp/dvHQS88cGXrq0+e3GpY8fxw5BVKx31nT47aWdulhLtbezFVtnVF0KYLPXVAWIVQZKZuYzSgREW/385XTbdCxcLZjUGurdR1VZRF2zYx4Gj7opRmMj2gGpASSeyXfWthVi/J2dUilK5IkQi4ahplrFKVFxkAUhRDvusmnA0RIQkSq5osZXlueyx1pjYsw7RaWDGbNp7bHB03kZSOiA5CSEd+1xnjacNmzICkGDUzDggVCVE1xSBqiPWVuptTqajpUh1aQAQgVUBWA8COKc98CGDBADgwBKAhESKpFGXRpZgpwCqI8LHv8gRja6ahtUSspuDMaydJSNAI+jpCFEdc5E6Dp5AsECkoSp+RmQ1DFEkxZs6kOoqPMQU3PPOu9/z5O8997emP//o9JMN7Nw4X9Kbv+dBXb9xZul7xwCN7jz6w8ezzRZLcMbZSuNxlWScAnIU2dZED4AKa2yD2/KtQZGP6QiG+cLbPZRcDZkwIrGEF+fYP/bX6G79vlW2U5f7t6cHQslGOCF0Ua8kTCitF7CQFkL3e1vwu0fXl4ezl6dhd0qnXVCRdaTfNikvf9ZeOyzG3zXB/cuPX/8/B0WyQGe7EFXny3lg7tBpAoHC1b9o6YEKLqf3Eb23Y13Pmnvvl3wnPHVz0OQzK5UnVCIFll5tBpLffuDv/X36qXdk4GmcD16QUVKfVcZkyx8N7Hjh/rp1rveKb8+rZOwPOnfba3KFzql0K3mos9m+Eq09TK1fcPdwe3nMcDv7uL7pA4kyWZa9DyP/g6qqqH+Zs8v/8cNowG9kuvuoHL/7FvZOP/OsLq2Zsxx3o+WN0Znvnygfe/Ld+Mm0Nf+6/+/DLR5+FXQKJXQDjUuyW5AhrMQqSonNQMLg8W7R1XhQBcNE1iAnBggJl2PlqmJukslzNc+tWvs6dJZsZMoOeKy1r17VJl8uQIIbk2amgxBSWsTGxKhxtj3pto13VDW2Pu6pqFs9cvxaVMsCYvEGOocstJg2L1bzfH3LeizEBcq93mkDDdahf1wrQaWOsvlJzRoj6ytWBQQ0iASCAJAVARjylTJzKTEpIqMAsQmLUndw4+NhvfvQD3/TOxfQoSURrTVbEusNQi0S2pum8WiTAlMQgKAkYFEiW058aZgXZIxtWg0gqAEREKUUD1LVdf/tyDICMBixEIFKT5RITIMS29jGRr45X+73XPnYSS4ypLHPJCG1qxbh8z597XZM9d//m5vOf/Giz+MxwsHHp3MPzxz8Z4qT0kSiPisw2yRrbhKCgImhAWRESIRpHSSiJSBLjGAwRmCSKiRDAZsYYb+4+tfz9/y1789ennXNdctE3bT3f3js7LDdTOxevUPn6xp3y8AQW8wyi+lA9+fH++e+pO9WIq/kKjah4Y3tOU+aXOHnSP/u4m81xfXMCBeY2RU0ioMxWUyeCiGAtK8A6mgVRyfG6cZmZYp53XcMyh3benBzo7r2z1XJQT9mH0Cajazg5gaiSqiabK22JKVnA4hLCKqQQcqZOIYkCMSFS0lM4rrO8u5mu3OuzPsl6GmqynLuWRJK0ASSF1XJ1fEd8Mc7tJz/5yW/6pnei4dS0bT0/un27Z9iuJ8IAxpwO8NeapgCIqiXKCPw6YwBgQHNDJbMBiAKKQoI+qY+SkhKIYVKFTjXqaZMfvHI/OHfPYDAuy80BkiM0YPvGZIYtYwZClCTjHAIKgDXWWk4qnSZWryLO4WC719/NcMQYmuNbq9j6LLfaRWbS06yIsHC2ML0te2Y8RJ+4aM9tUtFx2B21R1Vb1eBCQpmtasOGyDBbBOkNzc7OOCzrHtFSlTLV+arowhb1XvvOd33ob/7EuQtnKeqLH/93KTwLQxhOcPX0H7nZ8btfe9997//zKe02s/3snpOjZ6/1l7fe9cjoXf/oe7/yyS8+8cnPfOg733hms5zczqTdPLux8Re+883/9ff89T/4ld966jO/Mprtv+f9jz7y6osvncAj73/Tw+P41U/81gOv2n71X/hzd1Yb3788/Jt/828vjsP2cNR5bhYzE1Ns6L69zfe96YFbk9UfPnXzYInIuFq0ohYMpxgkimO0IJKUNPUctlh86ku3V8t2kNntgeuZfDH1vUXMghZdikerucNxv6gmoa3re65sRcDJzRPK88aAr9uq8Sp6Y7ryHVw8a9/92gfKYZEVeYqqCZ3jJjYx1G1db23sdm0TO58762OThCQqEvdMwWRVIUCKVDu0Enk82qibatjfbKtV27QK4hOkEHtZaQkAOAKsVi0g393ff/bOzVurbnjhTG979MCO3dooau1W0ppef6XVqx++t8m68w+e4Xm9fWmvvjuzKvdevHDnYLm5uXP1pWulAzcYH6xkVcVb047K3oWz+eHx3Wp077m3vXM53rnz+JfAhF5Ptne3r2xvff7qk5nDWetDfzCwslw2Wa/sWTPxre+43+855gziMBss59PU4mGKzurZUa9dNSWbg2ljE6U8JY2bedEcVk9+puq3MB72AI7/L5PpKyX3ommdBmOkVxoOgJEipjX2WpKCAqqus2qEtF7na4zwGmtNSKhIRGsRCNcoO9V18lkECClqIlm3XZ3+3LojYU0gQoB1W/ra7IqwnmqArm2Gp1Ca9Q62/jd2DZ5Mr2RGVVAIAZKAmIynzckzz31efHtRYHt5PJBohERVJBCBIKgCnTKYEjjWvKcmR0wQPNC6zm+9Z65LhtbzlPWghNANEM/sujfB/kSvdeCDGW4Ea8mUkCUgVAKJQYEIiYBfeUECKGRMIqNCKSV2rGzBWiALSJqikq5ZzsKEzEoAaFCCIikZQFG0lCImUJGWIJ1/hDMOq2cgVsoM5QgWq0wFmVQVOVcFyLNOvYjhIoudybwFNNh5SB0Y4pCgC5C7qI11ZVBSAETLRru2Y1ImJIxMFhElJEuZIAkw5T1CXouMKYG1/UWAohy7i1eyl2+4pvVNLLNBE+Nqf1VnHQ9MudnLTCZEhMY6ihJjiOuTeYhp/WYYQmBGUEYCUZGUNKYYVYMiGgB5ZXQgqkScUhRVIUVQkAhIqEpoEoAxCGgAGBRVNMUAr/Ap2k5FQpYNc8oRYkweA5GzxALIbEiiWGOjBuucS9EyiUMBEmRQjKJCwGoABIKiKiOTAEhUSWTsaZ8skjIJIRuTVEHYrmsXYtQkklhBRAIRMqloBETriASSaOjalFJKwZLG2g965c5G7+I9VybLVba16aslPfHF8nce31vIRuBZxN573njpjY8895+/5A73edUpEs+DrV669ZF/vvmt76bXvi+8eBI++8fL6883d2foDGbY68LJb/xC3tfiTW/iLDeY8pPbT/+Tf7S7DIDcdkmY0YBlXoXQtywpZi5PmDgzqoSGgnhr2BBq6zeHbsiQ6iV7AiUIAUabPByo61MvJyAlg0UJzkCOhEYyB0QIlouhhKQSIbaihGXB+cBhxuy0WSEZlURFT4JAGo3Gl6Iv7t8+U7eqWxeGe/c3S8sszrQe931zzKxmg0clkqF0ArIveX4x0cAnUWDLGUquh8HYrJ00ru4nszLhpFkSt9vcnaRU9/pu9P4HutuLyfIIOm/Pjgu5N3cwP+kwxeao7tLq2ic/d8/2tt3rJ+Thxs6b3vMNcT6f3Xk+rFa2cLiYGvFM5AkjirIyQAZABIkYyQjQOlKrIBq9hWBQUAXXIGom1piETKwJUEVFFINniTE2HLsEooDCGkGZkYgMsTIDIACxWmUyzqB1wgbYEqJqVFFNKYSY9JRhOsiZUzo7zpzGnWE2W1Yo6qLmaqqm3p8couB4UFYdqMByWQ97Ga463/m8KOfLOPRMJhsNXY/Msgr9MgdHGJG8KPNKKGoSkd1e6ZNYhuPJvNfPSVQZt3vFYlWPctfLXAKDGOfzRWJqfChtFimCUUYMIHneO7N55oXr1wd5Np/PVHzRz5qmspCIkA0z46KrJk2DudEqZEj9vGxTMywzC1T7NrBGxjPb/dW0NsbkmW187DMN+uWw1ztT9l9+4dqZe86/9o17f/Bv/gOG0D83dm0XXnhJ//NneyufjYZpMrfHS1aiolBGY/LgF1b80BB96nfC/Ga5fLO95+zFey6H6/v1rRtNBVj07IXNZM1qOlk1x9qZ3nbPvefN8vJNWkHUdkdj+uXfSFiVNG87tJCD5YRJQxebeTYYaGRNJSAH6aDrGA2MNxOl6Jq0nIeuJYehiyB6MR/W//vPWv+TTpocNyg6cX1rU3s095YX910++/2vvnLP9lNfeKa3vzQI8bjNW3VWL+wOn7s1P575mdaLWde0MXMm4+Uw1/a4UqLcmrKXJU1l8GxtCFKvqqzIl/NpYcyozOZBYtDVfMmdjHtlFTuQmCe7nfUO/MImKJyNTF0tO1uXXjw8bAijSsQErGDYhwCJEO0o6wOAM1k0edcs++U2QW4yAGtSSmhs0P5quTJt2y9MiJIIMWGvl99zprRHK59oJbGOcdqFAeZnsj6ntUyPwBBV8LSuCVWEzNq9S/+lVOTFoo0+oOWoQAhl4ZQpxcgARV4iBgnJWCKyMcUesQaITbvBvSa0AbSTSqxBqyOTE9CirgQVQL2PCREVm5a7OglBs2oGORSDzABIirmlHrvQdr2iqFNWN601Ji+xQGym7d//7/5etpNLDbpY5Xn21DOTb/ixH7n8Xd8239o8mhzvPHa5l+Yv/vf/C1QeIRWGiVl6bnq86CsXee+4bW+BTw88+k0/9T8VF15lbx9/7Hvfl6+8iFYxWbYGLcZAxvlH39D7kb94NC4dxK29IW0PT64vAMQaIqKV70KeR4elcC9RWyfX3+J7HjwsoeONvQcedON5rRiFCEvd2BzYs3L41f3f/OXpL/x+7+AYgbsOyHDdNV2ZLQAsGohB0Igai4yIutT+p19KX70qQq/rpJ0hODOLbfHqe5Yx+f1l3rU9ZU0J47jouWBcJ47Pn9+//8q5/+Yv+VtHk088Nd+/PkCwxoArs3zgI1qQxMpElgQQLahpZ/XP/3Q+BX89Fpo1izTCXPvQChY9CuzbBrS/MZNiOM+bu3fe3YtH3/9DZ86/frv/0J07fyRvv3Lpuz989V99JC7IHj6FLzx+54vwPfd9/S89/cx0Jy1pkXq9ug2ZY8DQxdbkmccwGG4MSlq1k16RJfTLEJYN9l1vpze6cXArYtzKi03bP1xMyLlkQHw0rIOcbGoGECHEJmiVCNC4vBj0Td7rRcWq6aRZ9AeGqEAOZNQ4GgyLouTpYvnScoIWo8ZVJURU2FETqpA0cb8Wdhza1GKyWa84lYrwlQnd2mvxpx0zrwzZ1oAJRmBCc2rNOIXgIQIpmtMeNEJQCySYchtf+03fvHfPa77wq7/6W7/2s0PTnt2z5JrB1q5R3KBo2hOvXsFlhlUiEzrK1K+ihVDmDMaR9U2AKDlzkmRiQEzMCiEhOWDrDCBwktYUPWKbubKrfO1PyBkIgUiCw+libpTjZLl8+c7mIx/I7bDhozR9lvt0xmH1pY9BPeujMfdsoIFzuz25NS+yRfe1qyNjIUFAEKkgJTYWVZJGkJadiRiwMOtcEwElCWg5wyylpD5KULaWYxDU1EZiAiTroXzqKZmd9F7/tmz7Xj88A+ORpCYe3EJNkg2L3l7P9dqjE1tXbBLE0D3xR3jlAXf20aQmiZQGsJ241dJNbsBLT2Yn+2Y6l6ahzBpK0XeSJLfGmUAJGBPa9YFZAYABRRIahwiaAuK6pjktrz5H974GxsOKVlUzHPa29vytxZ/8tmvbPMQQNSV1lkEiESQM1daefeNj+KrXARh+4Zq8+LwevSyzKSQgRKOaEjASE0kS6YI1Gbie45xNHoxViSrkjNFgLWW+rar9qWt8P5rDFMqMgnTddIZsyqFtX2z8srUCyqhyqlyuSUOMJACdgCFlRquKqqJoiYwqQiIyAtpGaEQ60KDAqiVBwaCKC4WE68AapnR6MNo6M8wya7IcuHDsyA4xEYkFYiBFzgQQGLOswNQiiI8ri029qGLUar4SayK0hmBrq9ctmm6aogeA1HXJAqASJTTIq9tRG+hv6nhzfumc7u2dvf4n88Nbi1in7QGiwy6FrGd7zjCbjcFgejjpWdkb2txu1VUlXL/20Ycv7V3ioK9/9xvLe14XB2emdbfav3bhwtasvfTQe77rud//mB6/jGV/d/M+f1LV8xdzDTjcvPy2B22oV3efjqvZleG9r/6etx/M91/8crN5/q15BfDU47MXnjObG+/fvfd7/td/dPiffwFOVs/+yk8PX/XqbqFHv/HpQdfLdjauf+7kend0dmPwt7/5PU984fOs+Qsvt5jar3/vI9V84UbDt3/g0Riad7z6rNgLN45vfPSPHr9+t1lGgRhNniEKhhAFcsua0o3by80Lwze85vLLR7MbdxY20evvv3j9pFVpLmyWDz927mA22ziX9Sbp9rQ+mSwWVSCR3Jm2qWw/G25lEpSSLXr5t3zTWwcbuUhMnSfOAIEghdioSMll12mWmbZpkyYFj0ggzAn7+aDxDWg17A2soEoIqa26iGhCatrUJU1Itu2WGaGPCuBjK130/SJrPb507dbHPvNcZ4r68Zue/f3nRo/cv+uEH33rlS9fu3Nhe7PK/TyFtvWDfhHqRcR26VN944WdbOMLn/14/+zYufKF567tXrhvdPbMMjsx/W0b6/rlWxYzNx4PL13c3C7jtWcPXriDdvXEnSeOVpOyLIbbm8d1l1n76KMXbt+6WZR5D92q04M7q16mzoAGHRd87uyFW8fTbh7rudSr9vyFixv355PJSQPduf64m3V5Qmp5Ppe6mp3uBXTax7IWjUiB1t1RiGs7GCAoCjHL2j7BqlEY1m3LiMqkSkivTBpeMQMJvCLvAICCKiEQqEoioDUROgEQaNKIaxONEoLKK7HTdRJsXWMlgAoMogiKyGtyvZ4CsDWJaFLBJNIpAmoAMZIUFSSJQBJBC3w43b9n82LeQb9tjA/i+U/lLARhOG1ZAyAwOZicRSmqkkNHIAFEQQSZXultJ6C17UnTqorHC5GxMQ44sYnQgF0rXhJVGzRrCr9RVQAGsgCECAwIMaC1SICWgBElQUQkA4yKVomAeV3qBkQISTUhApwiu4NwjqoECUiKVFP9kq8bDDMnLhEF8coMQpA60g4gIXi1FqwVaIyzkLvkHeUFbu1AakEi1N36QEASMSRDJoakyGpyN84ldhKTdl7VGDBsSIEZER1rZ1MnWVlC6KDWtKgH58+J+mbS6+2c6e7esYrBQP/SA6/eu8gkn93/mtneG23eP95zduP6tWtPxSCklJIogEEVgaQKhjlFg6wKhKQqmvR0SCUiiKgYVfEViD6cekcVU5AuJraIlJIgoagaZmIXISGb5CPh6V6waLsxZMPhIHN5aO8GyojzpvPsSD0TMRCqoCoQYuFc66ISRgAwpGBSAkILKRoSRU2a1s2wtJa2UIwFIhYidDZYjozIhoAYQRKiqE0hdjVCEkFQAVASFIAkKqBJokBKELsYmxBYIy7bqdVVNclWq9GZfjo6Wv6fP/t6wUFul6PszAe/be8Hvift7l5+6+ET/+M/LO8+XVWtb3xZunD1QP/4c+7F/fr6fn3nCA0RaZI4LDPfxeaLX/j8ydUHfvzv9M7caycH7a/8+961k4SZj9Ib9L0kv+rsAAemjFVVZhlESp1nA4oSbeYGPakbQTMaDWM7q48WxdaujrbTxlarXBR9BEbAED2XJQ3HQJAIKDcAqDEZazQQxIgIpj8UAjCOR0XdtTbL2CAGTjEmUXJOYx47azfuhWXB/aERm8pNNWMuQ5JWEHi8U2wMu6Pr2eZmPFjSsuHBHmdWy0EyDkBgmWFqRDDrj7Hz/dFwNVtkC735K/9++8d+CJPH5W1CSnCmfPDN5gphezj9xJd7ZdkufBObuY33vvHNvbv7J8f7d69dq1/62ubeZSUHbM5cvvTe7/zw7/7Cvz188RlaAs7n7CtDViUSIluyqCFSRGTrwFkybo26XtdLogoyMCEzE4E1Zi3FQ2gZohJJTKTCGhyGRKIxRTSdAAE4BEqCnNaPNsMIyEkdu0zIapYHU4CuRfEImGKK0Z/mDULbMjKGQGSmVZdEMyYD2rdU+3DzaELG3Gqreet7zvYG+e6oLBEKyGNILnYPDHtNW2/0C0y8OygkpbaTRLiofOe77cJNfDssSh9ErTSpMz27Ak6p1SYMsmQJkvDJvNoYbah27JIts/OmSCmsJu2sjmJswZQSXL1zPUafk+HSGOoGm/1qX4LodFb1ymzULxvQFgNmHBGWXZj7uevZhbQZ2Y5AU7JELsvAEQBFxKrrio1+C9B08OxTT+XGXZtef62Mw0n18qe/sH15sHX2Ynj6Wr+u28OZTD21KRdHNvNVQNWO2+yB+7H0TGW8ebTVMv3217D/ZF038XheJO/6w4Bh+fkn5N/90sbXv3V/OcHRblKw+aD2KY/gUBNo2igyd4bBc7tMoUWLGDOuqBCDc48LwZV2pqMiDwU2SKE/8hd2Oa/5+kvj8U53uO9nS3RDGZYGM1svM66iTy1H46ykZKk8zjP55m+v3/hQlarYOzpztj+5fXJ8PMv7JWq7f+z3V3D1uBpYsEjCKgaWdbU53Krbapz1F9WyMGXeM45s6xWN9AfbbdcZ4tziogq90vbJtF1DEaerWVEUw2G/r/nR8e3tndE9Zy587frzWWHP7t6DUN458W3RW6xmYgg1OdE8y1PnSSJFEiRAieIc70lyzKSYmBCihhDKLKtaO5/Hrqr7ATbHxgDd9N30zlE/Gx5NVx5ge9jLibgNrKIApJhxRoAxBUdrVqCAXWuvEpL+F1IRk4IKW1KCMs+WXdXLCohY+2Ct64EgBI8qAKoUE3UexpBZUhSHViHEiLrS1vewqcKYs64QjCk3rB0l75tVaoMScu0TG3EZlqCl46bxKDa3HEOCNhSAjtR18+z45L7BdkHmwpe/8HJz1Ost/s5PvO/jP/kH7334Yjc7CT7ZZsFNOrh18AAWy7lsgwWSjripa+06Y4wKzNrqa1B/8z/7u6MPfve02FzOUldN984P5NoKA1gAEZ/qrkt+Avrgn//h67HrL9uNYbH4T794+6tHYyyRISRJWmf9ss4GSk2zajFgBF3W9WU7GEt+4ewIbr/0+b/3j5c15mwcpnI6/ZP33DdI3RBgE4xyvyFFxkXbybmHxn/1Lxe7g+oP/nj+e79tV7OeQ4Oo3hgks0S3gq5tcNzLMttG1be8Y+dv/5Vioz/7d78s/+mXtO0iJE+iktRLYDL3PnLv//j/PeDq6GjhX3vN/OQ/LBcvagrL5coNe8TkfaeqwQMTQBAqijyKPj/pD0oacJh40UQ5eZPYZsu6W9RVttH6zcGDH/wrh5/8DD9f4/HkngFMX37q1jT1sGpeevHmz32kWB72+sNLw/b4137ykfd/2/Tw6UdN/sz+3SuX3dWuNRycY6UuL01jcTAobW46qZnZSwwBWA0rq68XqXFWAUwgM4ldC8mogsa9rZHUPqWu7uq7Nyu1qIokVJR9bVfWsWFGZfTi0AbRlLr66G4C0yu3il7ZhHR3cuKcC+QxBAuQYorQtEGaJGshB9lELIte1rT6yhT59Jqg655dXLdcrU1G60I0tEyEus4WiGJCAEWLwAhECKJJ1ygSJIXMwvvf8+a3ftcPpFS85cH7f/4f/92PfvT/+Hv/5J8u5rMgom1rJ/uwmGS9IgKAoIlJUozgdDTqLl4cv/l96hxfe7r56hPm8KRAdUAkmm31tJel6arzAgopBEvArszJ9BFa75kB8v5qVSfsps2qv1GWWT9X7mcLSh6+8jFMyd66wymFGlfanLMKn/8NgFQWed1pbzCmfmEVunaBINEwJki+g0TAikwgAgkb68q3vru+fG8WPdw51MltWN51SGG+YKuZmhgFLBOyJGE2qBI7JZXcCN++GY9vw/ZW783fMtu5F/fOaN3e+P2f6T/wBnf+1cdPfWZTg4JgEANyHrv2D396+9LDne1Bb5xnNhzfiZMFNEtYnECIgMC5FQNKjpEwxZQSG2AWdhgMczG2dQyzGSVBWPNdGQShS2CAIgyuvwx/8CueY7mzNaIifOrTsyeeHFQAbDviFAIiqiFVDBK7C/e6b/+h6uI9wW6UqkyXzeZle+OzzZc+hyvPKqII6XTAb1gwKs6ndPBS1h/p7qVIDhmTREcuyzIRAC3GG8PV0QuP7I5WRX3+wl5zvMhLXi3r2Wzx+a9cu3tYM7MKJBAQRQRCdQajqlcAxS5qxpgzhaheFRSFsUnqoyhpp+oBPCECjplzTBZBDVhREginn/jTLy9SnnI0AA0ABAAUDCJKbKxhhIAkgJXIsq2aanUSpQpeBY3JU4K2Wy6GuR1c6Emdjq6ugo8AYkBIgdYFSuhU6fBu2rlizmyF1e3pZN5/4emj2cQT0Pb9Zw9v36DN0o1MaYhXS6q6i32vcjJYSNZsvf1bv+9N3/E9ulrtOvf0Zz95/sGHb/t8uerObQ5wOZjcnTEODz7zxLDuFl4feN+fb8MYNvr9cmnDwZJNed+D7dG0x3l1stu7MnTlxhnuNasVHs+rP/45+6XPDBYLsgC7Z5tb9w+rxnsdqD957tfTpDo/OLP70P0nzWL+zBfe+M3fsbx+bYcI2Cg1H/jwI01YLZubew/2Lj6yG9uv1NVUlneffnFiehvved3GYyfmmasv4WDzxRt1xMIDGmfZsCMh5jszv/Hy9Liq73no8j2X9rTtbrz08lvf8Oq3Pbp5+87+IOvnPdNlVQUyMgNWGpVuOOyVo/HZ+87tnDl3cmuyqpajsshsCklSEKOCEoMPqn6dXLHWRdWQopeYQspzKoqyaaOhbLKYNV1VFD2FFGPMXdZ13oF1rlhUcyZmZ6zJYruIoW6Tbm2Mg/rVagriSYutYmBtJkpOKdV862r18tVrtsjf/OaNb3rrIxrg0196ptwYh2RfvnmCjhC0GLjz5zfurg579yGfiY9/9en773t01tKdyZQzFyBcfNWlej8/mU5vP//82Te9btXWD77lNdNuOZ3M8l5vaDfv3J3YqqauOwltvC0W493DaZYXADa2Ie8PBMK0W/Qpuzs/mTde0DiXD3dGhyeNzeKgLLe2h6nGK/fufenJF6uOtrZGCBFgDrAOZ67zmYKnTlLCNSwMcb0IkXE95dF1nzOf2nvW02hGJCRBgbW4BKe8eV0T8XSN2VkLSYqqa8TwOsaFsq7bjAkUkfTUVkp66hiS9W6DimtIK4ggIoIgEAIoclqTXNAqgFIUTKhhHX4TiQLAwOwDGLjw0Nvf9c0/duHLX0hf+i1oQGOgzKW1hUoBYwQlsBlkmQ53o+1xswJIaDghkAAEv/Y1ARtA1TW8DRNkYDK2sZVkU+iUhSCSMRECKEEkdQQEyIUQQAKVCGRUBQVBGMhgSIgJDAEzsAFkWPe4IRJRkgTGrHlMoElFgUnXvGpIEgKDaoiIwmDg4GZmA2ABikQUpeVyJDFA14KxSggRILVorSPF5RzbVgTU5tBnTLlIh2gNEzCQr1UVkNk6VBIflATJqCPmDMAqGjSafMLgDSkXVhwrE6Jw4YrJXbmhMtpKbqyD83QyIex0c+PM+75Nztw33jz3qJOgeDRNx7NZOXjuzt1rbT0FIACOURIIAxpCNSQCIpogKJEiiWoSEVBETAIJ1BKiJpDEiAJAxJJUIACCJI1IuM5DIq/x5wgpxbimub+yGxjXG3OWhehZbPKqRgRilDaGYF12OkxTMpxZoqyUGFJKmCxb6ywQqmqHhhUtBNIUFKOQoDMOyYqIxrh2GkVKZExAYGOydUQzJlAlQyn6FEGBQAIBx5CQSTUhAXJKsQuh6xrPEiPKdS/bt2dvu8Dl/tUn/+W/2vjarQ22N3x49J/+rf6HvrtNhpPkjwxf9/d//NM/9IO9mTeOakOGublzFKuqXa3AoCdqQQFoOW8oat4b8X7kq7cHh9Mv/ouPDG5NNsFWDDbPu9oz6jjjWLXBN2dHw2qxKHfOgMlDbEPwveFYOeOMAkGt4oQTFCHb1mKzQ8Je4WPSuir6YzMYS1lo2ROJMXbGZYCcxDMzKvpqacshZKX4RjmJSVQMYzISEhngnqFIQRnLAeV5pNaUo+A7gwzEGrwlycuMuuBjo6EqwUIlbDZhEJQ0QiKNqVrZMrOocV6jsSqaiNiV2faGSOitpv7ai2a8S35pii2wZw6nzgtRlw+KbU6IluvprGnkmHX80CVTz93x4fSJL5vd+8t7XiWFVeBi5+KDX/ehkzv7y/2rmxAL4OhjBtSosgCxUVCTFcm4REaJVSBJTDFIChkqKTrjrLWM68engkTUCBokREgJFVBw/aBmwASKAFHAAEZRBTEpWmZDoijARhE9c2ISJkIBiRI6iT76rm2a9Rqo28DW5KaUFGerqmmTs7bF1CatRMjZReNN543yZuH2Mkd1TJK4zILqyseDu3NkvDlZbhT98bC/qJaUZcPxwKBc2Rt/4to1U2SUEqO5enT9wvZek6JmpnOmglRJ2uY8deBbDli13WxjPFy10sspJe0XRdHnm6t6uFlWVdW3bntn1PlQzbud7f7zLx3bPEOJm8PhcFjmGYuC96FqY9f4dZrGGNtFmLWNKm72i4Gxvg6XNjeL3M0WVey5Ngh1KYX/H1f/HW17dt11ojOstX5hp5PPjXVD5VIFSaVoCWPL2dgYOUHjR2hyA8aE0aRmAD2A9+AB/bp5A5qmnzEY3NhgG7DBSbZkWZIllaoUK1fduvnek3f+pbXmnO+PfcpAn3/OGOfsMc4Ze/9+v7XWnN/5+djFp965f7z3zINXZkeLD33kd1771f+Un+O8t7Ec7fQe9ft3Pj1MVZw3nHvgmHlMy8Wof7ZZLDj0m9xlm5uDluRk4luIVYqRlH3Ttr5f5pD2fv3Tb15/8bE//EP+4pnWdOPyzuEDa+3LRy5Vbb3wW7voMVXLGOdIyMjApApGPhQ9aKfcTkK7hBgMEgTiw6NsYo7RNZrIMzSWNQLNcroshqP+WucwTcZJMhhcHKQ6Vg3OtzbPvP/Zw9kxjPfaO/ss9bntK/3oz527kGXN88+/GZNmOdd1xXmmol3bhZC/dXicEaRoZRYkwWTcQuBQlim1LnhOCZATUbm2HlWwi3uToytbl5A1iQPKqo76RWkNzGKzOTqzuT48PDi2eDIaDF2vapqpd1kSmXWzWlwGPnOB2IlqF6ssjNgy5pAspRhz9iYIwCaCJJ10GaCBnt1cHy/quksQAzTQVtDPsiHnpTeKkJIEYkSQJILkmYkJ0aF1YIZMTZcS/LelIgBQS+yzxtFEud/f6We9ejzvAfcw1NoZuTynVqEWwuicOWqCaBs1tSkmMXDEGUWQtfXsZLoo+gONhM7T0OOs7uqFqbYiiBjYOedi0lq7fjnsbw4EtBisc5ea/Ukn6ZH3X/n23/F9X/7PX6qe3//lf/lv/MFkY6t0w8KdLHM8+NKP/9iLn/rVb/j937H1zvfNTqbXfu5nArQJCIw6xZxYESUmBDLHH/zoH97+6B+csFZt6nOvu/nm7M07vQZdkUV2SSlomzlHqb31cz9Kh++Wbn79q68c/cZzZ9R3IIoEZUiqaxuPnL/y7S++9ONe6sJB4Upvy9v/2//cv7h7tLj/xc8/P5yRA9cIRkKnPFA3AAvEkPnWUIksc5pxe25341u/rS6z7a/7Fvimr//MX/lhmMaEahYhZCdV0zND51NtSJpf7BU/8I0nu2XtwsbTH7r2z/73ziCseQ2ByLfTiDyvjr46qO7YRl4+tn3x6bPd3e+894//YbGsB4O+64cUJc8dSJJFlwQIiMQhwKDwGLtWEvY95UWn6sHmB3O8cjH8qb/UXrn68IOPQv/izg99/2t/9+9cvfaZk/qa/N6v6z399Qd//R/T9cP29sFWj9Myzk5uHzvtT966O7m5MxocHR6+43e+2+cPf+xjP7WtKfdh2dQgkT27ALEWMFLCk2rB4kqHo57rZTSua0UOhasbyYKjQEKYOKVMjo7nRBCZjSAZZM5aqFAsiwzC2LCz4PqjpUaJ0i1jb1D2+hvkipPJ+N7RZBHrRWpRk6PATOS6QMFiaE3Gy3aUMzvfJg3Ib08cICCa/RfnPZoRAgIQIhPi28GiFYgIzMxQQd3p7JqtXimrfBKR77uLj75rMpsuuhuP9osf/GM/8snPfuoIsnz3LDaL2KZu/w7OTpxJxxzyPnWsIrNE5eUnL/zO/653/rIQ+IefWPa32+c+LrMTpwk58PoW715e2J6fHi8XdUCXwKzXO957o319Ix9mIV/PB4Pp4s4b9+aPXXjMjg4Lztubrx28+Fw4uAd1i5AKhk7BBJwTiEIgjGZdWyLp4kg4QPCcYxeTNBWTY2NT0JQ0mVNnGPKNC9mD708XL5WBuvW7BVedI0yYXn9d77wGx/cw1dZ1aNGxA6cgq7Y6AiUQC0mz+/e7n//J4c56/53PEPUeWcvr+TFe+/TZooZmIUxIBIIJXVi09NrLWVkkI+lap0QGGhM5BGQysISKSmRsZm3y3juf43AtPvnb0sOPScrgc5/g+S966wS5A0U1Qsa32Z9hbvLcW0MEozuLaR28G2gwVS44agrem2pqxTGrz4w3sKOc+5z1STve2bVhFkMXbrzVzu8FB2goK4mPAiiQGR7PnV137NL6JfC9VGYukCVJYEaEIUN0R2/eH1L7nod333HlTGp5Nh1/6lNf+uLXXkvJVFBFFVQBxIBMckcMQAAApACgCkyBwDEgWgKYCnkwFpHVYCRCDmxgAYFPbWiwGnFZwdffDlDAYNALjsk7cV4gkIEjIOZkykyCVWxaQEgppq6uFsuuXkSIIcvaqnXBI9og1/HByfhWc+927IzElJmZqPBZ2yRgbJOZtM7DxnB96NFj8dlfvH/n2oLBO0nN4b31gbtx487Vs72Lpa+r1kV79NLO1u7OyD+A7dbFxz5YT2Xxysero/2dItv/cp8feGLzbK5Rs+W8nS6n00l/c+nq2UYWdHKPmvtrS5y88iWEE8vX2gAphozC+tXHLO+ZNvWs4s1Q7uyOhu+b3H9+tLUhrch4Fr70ZhcbtzFYe+pSTXu7l9cn909evfsFk9SMX5PR4e7mQxqb9TO7Zz/w/u2Hz+zd+erO8GrYfnLQ69eLl4uM5vndWHa9rcu9jTOG+L1h3tYnr73afPml25//6isny6hI4lzWG4w2N+4eHGxsb33dN37AqLj28mvJ4Ggy/tjzh1evXNo7OOHjZW+09vi7d4Zn14tEWrXFaNRwOBmf1M0d1yql6fqoL0KdxXLQT63hSs7XxpCVk3o8yMqmFsU06q0liaggHUvUCqv+YAAIzveiCCM754qQpSQptYFz9r3UVk3Ves67RXc4Pe46DiZZkVX10qO899mnfu6zL87HtRpm4FTAVDvTz3/2JZ0Nv/f7PjLa2nn9+q2Te/ebEI/nEhyaptdvLUqaXtwa7HXEZ3fb0cbXvvjG2fWeNzg3WjvZP54ezdXz9ZffOLTs3PnNl/fj5td/y/zFL7z+pbcqLVIqwjITqaouZszmy8EoryGi4vb6yEE01K3NksWbGKyHZafLDnpNR8LVVKYHywu8btq+tPeWLxgy3yKd3d0CuAsAzEzECIhv6y/NEuPq2W+MCIQRjdCAUI3QlJFWricRMzVb4ZnBCAxWFRAwUKXVnYdG6GxlPvuttQcMCVb6MkQ0UEVYwa8JVxJpAnIKgKBoaKYIvMoY6Uo9dhqGXSWYkNGpAUFu0IEtFZgQiQCR2cCVbS/f/uhH/8IZ2NAiR0lkykygwiAYkVVABMCgCLpzrhvsqCFbhKQgp5oxZAeQQBnQAyYEhFWJICCQeQGTzkpWwtRJFpwSQKvExA7FubTKaREArPTvYATIHtAjEZADJkNC4tVcGyICsyGAI1i9/zGBkgkgrlQSYikyOiMwVEoCq3i9c6ZO25aNSIxCbWlB0lAoABxKkvFJGAxR1GohJSO24CR2Ckgup9IlNWSnnCfnARkXy6Dm4gwkaQjqwCIQoHURkiNXKpYQnEHCkLVVk2UZEsOd46yO9QVxT707bG1Yv5S7b7i1kQIM+mWVF0yY0Ghkuzvbx9Wwrl577gu/uZi3K7A5AquarihSBAkR0QgtmXSWRBLC6iMEBRIANCXCpKamAGBovNq2ABmRgoEZAjGZaTJNJpHZSTpNFbm87K9vhgCxFTM/W1QpCIIjpjxzqhEUBFEAgYN26lzwkMyzhMDoPJAjJMfOIwZ2ZG1rlIDVCAODF/LIHFUktYaKyM5nkiiyQ2AmkC4imTpMqCkJgBAoMKioaBKNXUqdioJ00DpP0bs542R2kp8sitd+/tyXvtavoWV99E/8/tG3f38ajPKYHJJDJv/47OK5dv/V0qybN8RRpO33ChqNJjePvWNoJBShmTWjrSHn/pEzl5b/5qfvH9y7WAsk50Kmi2WZ++3z5WwyxarLXLCci7XcB5hXHTmf94fctdpSpqmpTvwDF70QL5uQ92VZZ7xI48ptb/i1EW2MzBcWAjmQrqFBwaFIFBz5LEuQKiAMbqTRYacYl4QZNZ6x66Tx5Y60ag6NDAVUnBFScKltMS8gd2JInjUqdq1Gw3wHC+D2NhamswkMB9h30lXkA4bhSpHnsz6EHKxrk55MJqNBKfd14yC/9Q9//tz7H8I80dkNeex8Vl5FjVsFLm5eX0wUepu7j55rbx29def1c+d2s+H6xTXaP9hrr7882NlC7zpByv2Vdz41Pv7mj//bG6NoAbReNpoURSGJd17JqS/JFcQuGYtGUPGoDpRQGZ1j55iJQBCAUE3AkqmhGtnKJJMgJVrpaVUNKSKAaAa84q8RgsNIjo2dMQiaIPBqeypJuzqmlFLXxm51FwzLkRgOsqxaLoaht07l0bKtBCZ110SBKCbSA3hsfXBlUHjjDmNLbgz2xnR+UnfOZ928E6AiznhZcYxNl4jvjPplmRpjV3d6FE9C1nt4+0Lpi7eW4yStMQQASFHMDXs7PKTXDm5sbazVZHVqbdmW/eFwPU8WB1F9go1Bb5gXtcg8ub2qPbhT95xbAxoOsjwjIEzSeoc7xcYb431PmDvn0ag1iV0bTcwsI8dOCBigqptzO5vLqnLEzUmztnGhzQa7D5Zh4BfXD0Z+aCfV42cfQZzeXB4d52u97/qONE9Hz31tFJLN69C0xaiXrQ1UrTtZSr9q2zlTwTSPyzZ2deTkCqcxOU3JRPtb577xw4NnHpkvawrheF5HatQqsFhkLs6OU6dJJR8UqhpPJsE759uksFjOgyz8YCAdUiOkC4yIUcGYy4Ery0UrY+/5/e92Tz5SHSyaN97sHb3GcR5Fqiq1t2cFRrXyK68v8rlYT4Mv6kqubG0e3lrODuv1SgT53t2TsL0RyHiYSxPznt/olynacd12wVnCk+kigfYHZVEiQCrKbDldGJjzHA0gC0PvrG787tlF29VNF7L1SnExWaz1MSPfxbQxHE1xwZsOUuVSlZaTNe9ENKFPXZsybTTmHiMyGRL7TjpHBsASycijM0cMFJrUzMfH2nQY8tLxrGpixven1XqvOFcUwRQUoKknS1nPHCEXzqspGDoAUsOk5JQwNLERTQrWyX+LtWYGIgwBzTC3MMLCN6bK83buHXdikIRZOakXdMBDguX0pHF1XpSySqsaooAtF2x16aXwnBdrdTR1QME1MeVCpMYM6xmvFbmheqOt3tDneQe6c3Hn+OZJtWi//Tt+3+/5i3/o1etv2LNXHniX0xe+8OSNW+M37+29Oc4gH9+ZPehyf+f+wd/6J2/pP8kB1iHfgoyYa41okGU+JUVQB6BJHnr62ds3D+IaYL59+Ilf/OL/9BcebbyCNLVy3wQtKSZHw3ww+cxvyGd+bQ7QA/cABIHI5OokkqyX06g73HvtP4S0VKBl4o4gE5MXv3z4wpyJd12AYGR+GjsiZ6reOdMCPHUECZ2ZLqu0TLqzsSUq104OY9M8cuXReOkJfuUlBlDvFqJ5EZCgPxwWrVC9vDU+GD7yyKtxHI/Gz47yzUtXuv1DBUutMUjhecDIb12v/l9/7ux7r7780suTfcleunmGefjg2TvX76W2jZaw4DwLzjlEIMxiUhWTFogBComO5jNN83QmOCwYz26f/a7f+5mDL6eDtxYvPX/1/d82+qt/441f/+mXvvorH/iWP7/7wFPdM28Opr+a67KZzB1nejl/z//81z+zzB/+fd9x/u60/Uf/dMs9MXrqf7xQvvsXfv4vAVa5k6znB16IwAAXbTSjDHN2xE4SWUccBkGlVVqWg4ItrxtAZDRbdomKfDKZFxmuZKPB+7VeWS1mSMmVYBbmk/b+cm7c7YwGaLq7ub27vlaQHnbVYbU4aWphBes8xs1+lnssANuW2mSByBkQogIkOL0Z6Lf8NwB8SjI1fhtiTQirbrOaAaKowQrVsHolAAIqvE2UUESAslh/4qH33p8fBW6qu28NR4/8ju//781lSRrulhe4unf9Jdc1YlTsbqsFPaqqiN1g4+KzX19ceIxyXy1r3Lqaf102vf2KoznMF9ovIlTx8HWrl0zgAyPlACmFbLB9rm01S2lxeG9ya3Ky9+YzO8+crdvXvvSx6d2b4eSwt1j2RVJbCwMGyr2XpgUBEz3dZxOaATMZQ0oqROjYmFYKHmJsWQmkNwogYFny2EyaKfSHg6FIrHH9StSR0Mh5Q51rU2MykWQpsbGpArtT2zMYiDmkINX2QdX+4k3jLB+NVK8lAUwtEUUTUCW0ToEBg0KsO2MPompJvEfn29QQk8sdMql0PoB6piSU592ky3Z2sic+MB4MfSxcf4hFDnVSMWACNLNV/F5NACF5TxAFG+sHAOgkMCombYzRAJGQAU3Ue18s5rf/5d8v3/2uzW/7A1VvrY1taiLmQ+97hmQGhKqAYEJIgExmVDVlupsWB514//4AFx6qiQKCpkhE3ntYG5w9t7X/0mT+yr2DzTc/9qVXbt69N1lqatGIgOTUloT2WxWeFa4EwcxAAKMqr2jWKCogAIC0updWEyxeZeUX0pX4xiAq/FaQAd7G10nVOO8hAnFQ9AAOQFQ6RpDYCDQibdvMUrJ6thBQDoQOJTbrwyw3Or43H99bvv7KeP9u06bMGZS5a7vUdbaMUZOBYmxjcPHrv+Hpel5/4fU7WZa1VagaXMvJp44At8+eeceDD/dnr61Hu/rh3/HQd/6RW9defPhKr1fTm59+ri9tdbw3TPeIjsqsd3z98/7kRu+wL7NSr72yuflw78xjGd6h5mC6d/v6q186E9bLAbiuy3qbIy1w/y72Lx8djMsHQn0yHwyHXQs5YqoO6ztvdbkeTyZpEXtr66meOFTIeo2Ww6sfWszvzTb2H3jX1Xs3bkvHsdx569643j85884PHhbnxjfV68Veb8f1njy88aXYVsXOw2ce/tCVp840qpGKa6+9Pr37+uMPP3LlOy5++APpD5N86sWvNCl86jc/t3+/laVsDzbf/fSDo1Ac788v7ay95/EL1XwS8mIxWWysF+cuXBL04BkdeO3mUofYZ4s7ow0Vywt2vteKSRfZc0rRACU1Xds4dNrFfggWK1DsFXnXLTtJqC5FMV0dP1OWO7OkAkXhU5dUzTmeLI7yfE20VdAutd67vN/fIt9qGk+nRU6AGKFTrj78wSv/+VdfhOgEVhFpQEva4FdePjo8+tizTz4xCtnk/mSjX/Y2Nyy2TZKTIn39t3zD67/28VaocEnh5sVzvm3a0veXs3EmmNQXA69pMr3+yjsf+kDduHsvH+Yn/vj+fG27HBVZFkSTbuWDjXzn7tGN4Ubv7Hr/6OCgbmIlXTIdbvaJXBthbWN0dffMnVfvTMcnImlYrq+VQ4dW1a2k2O/3x2005HrRvr0CoCI4AEMVBTqFC/HpPUermxD1NGi0YrfZStBEbIaIK2q5EZiZrhYKA9TTwTIARFkx8FbsDFzlgRR1lfJDNDAjtZUp7TT3Z8lkBdPzQGyEK9DR6dK0wiqtFJ1I7MAAlQxQ0Al1YAgpIoChODLU5n2Pf+SBbDObH0l9UFDHpKlDXqVQJIEjQAZmHW10u+el3MDFwhYAzkPXwmr0CMBcAHAIDCLgnIVMQYEZk+iiZWIwcr2eOd81HQXHhRM0MzLIjDMjx6AgAqrGp7VrAFBkJEYgQ2+yYjG9/TuXRZKVERKRiBkQFSPSakqFQcX09K0xidp1qAHIyBkEBCIx4eBAPaSYuk7JIztsulQtKYnGZKbgPRWldg1kHg3VTDnTLG+3z6dimN24odffygrS+hiTErKqAoPLcxGIfjdtr+vamkHkPkrbdMdHtr8vbadHY9m9mDYuq49FWtbLe73eIB3cH21veeuM2kXUUKwtaYhZ/uQjH3jxq19cztq3P1gkRTS1FA3A2CmZgYmqJLUV2MUIiZF90gSW2AANFIEYVw2Ct4UchgjErKYiAoiEsBpcZnd6LlgblJkTRmi6VrsudiKJTDoikZLZMRK5kIkRAWbMZj6CCRIiBmKv7ImQibwCGRqxN4gQU0KkUy4GrriQIiYaa7Mlh8DggTIwRjZViKwdpkSr2ilYUkBLKSKYeiMgas15UrVaUt/DxvHR3o/9y/0v/MJG6veKftpY3/jwt8wtuEZ00ZR5nhCmsf2mv/k3Pvk9/70DARRfuPvSPvWdH3FUzn/2F/PYEHYQLPTK4ZWdetLR8mBr2S6Ol5AXopjMept9N+i/eTR99Nu/u74/vvbJ57avnCt3dl596cWnv+/3Lg/vT7/63Oaw0M5jF70r1EATxla11TAqlNnl/ZDlmhLk3jwLos9LQm8uYO4gojQNk2pMlDtgSp1zfQTpIQVrLFZz8ACBVDKtW5PGZQNalXCjgjlFUCVEQDUCInOSGDgTwOB6MLtDzlubhIUzz6EEDJwQ4gJ8H9ABA7Lka+tQRqeXqhvX15br0xs3h1s8WXup/CZae/93zV65e/CpL97/+FcobGUPPmhpdzDsaeYLDlItDJRPZosvfE66k90Pf1TyjUVUNFnb2umV5aCu+h6qmptW2dLQkSFx1lv6wtAjejILRDEpgzpAh5h7x6gEYmZMrKZsq0tHQVYkfqBVshHBqSmamSUAMYxRWQEIjVTAiDyQICQGcmCSophAiqhpVXoFTacdZLGiKNvl0mL3dY9fvX8w//hLb/qicCSEQCpDz89srZ3jbMiuTdIavng4uRs7SVHARmUWgKdV6x32NsvJuGvB+plvSX3XDLPQTauNra12PtvuDS+tny8Ha8d1zQHaZTXs5zkVsWm6qj03LByDMRZEm0VRON8f9d/a21srs27RgO+1Md09mFRAUISq02Ev9xyLMmxs9u4czGMTe3l5MJ010RjIu6zuZo6dC8Gxodi8FY2LtWJgnYBBs2ypiyF3ivyN3/Dexfgk9wM9mTbLWfHUY0//nh+4f3y7P298kd/m4dXf97uEso1X7lz76X83SLE42B9PxlcLyDMFMacYl80iLXxKCdSVmMwstVSnrop13j/7fd+yfOrJKDwaFMypvnYwf+smJRB05HMysG7OzN2iRg7F0COKMcwunoXNjeb1F4u7kyzfIOu1tSSMwQVM3DZ6LNXaH/296x94Uspcy352vJz+9E/tf+5XN7z50EPketJU7SIf8JNP7tx77uPDJ/uNa7eeuhpKn8veznbWzMdhOHrg4XM3J/PY1Ns7w4euPvi1F98oPE2aOkonYFlwi2WUVjpJOENk3tjue8S8FwbDYn82EQtN2w5yB4ToKSuzo9mEIgy4QC8uc0XWQ8ev3rvxxJOPM8rkjSnzMPc0Hlc1GHp0DiBpl2pL0VRccP18WLW1Sx1pjsaLtByU/bpukLqRC7UPbRXXd7bWhsWN2cm5frbGnObzkSOF1HTJUUDhlpiAAgPESACgpoKkAkCgZiZG4P5vrCKfOQbPSQuDrdwVEB1xK12/LJuoc+0WXcfsAb1odJrKkO83WiOXGXvmVAmhJ2IB09SVmVPoTNsoKfisKGA0DN1J9ERlTj0PnKLr96HuguoI/Y3j/Teq9ul3fvS7v+kdP/63/mbB1578lu/63u/5tsPrh8985/f+/O/4bp62CjIYjGgphSQ09FCch0CwAjRiI5b7vI7dvO68y9Z8iHEhQC8vDvnSGit53Bjs372amj6Act6iWtdaJ+i4azHVUkJJIAYOwNhZTIKKOTkXfFCNdZuapcUGOHTojNAzWmvB9wXAgJImBgjkHKHPWQE5HzQAMUYuygBKVbs9zPe/8pXRV79w7oPfuIiUZs3Vb/xDk5f/aq7CHn2eM/t5HdV4vedpUA63HnZ7/uxlzUbrxUs3p3WbsVcmRK6XyoVjZ2WFi5/87M2f/JWdXumiK11Wk1VNfSFsHB/P8n5GjUqVCC2MCkAOaSmduVBUy6rZXqf3Pbt15Z0HP/ex6tq1oqTp116c/uh/+tCf+T3Xvvri5pXLKXeVPz/8nj/xA3/sR976//6j+Vs/OnruY8OqEVFEd+9wvvWhJ/nB958f9e/duH3+W3/742+89es/+v/OF9sf/aM/8KmvPdg2d3rLeSTOkFPXNF2toqaWcYFIjLFOdQsSMWVseRAOaXY0d9wf5FlObmN3GBPfTXuCzbxtiCDFblElB4oAZwdrN/bnXauxtbws6mV7pr9+Zn0wQGGRm/fuLqoWCQyhE+MAjdQiTeHrnqPtXrmoUJFrifNFjMBvB+uM3wYTEeIpm2IVIEITA1xtzOGUZ7Q6tuN/cZ+tcEcGK7CFz85ceujWwU1opiPnrr388sPv7x93DVXrv/0dD06v/drNj//8YLpABtcb9M5dFLR2uTdLuP6OZ8LFx5qOBp4GealsONy48KFvgy9C3L+ZvesbWiwnn/71Xi0MpgpMJJGGO0+OnvmOtFOAzvlgzif1+dhvXvj0fvtrw8Xx5jJqNfeqquK8Z0cKKm1io9RFdiimBMgrBKYagzkEaNvAVLeCoKEI89TM1zbK/sZydlK62E6P4f69wbl3cOLm/p4tD3u9s/u1NqKbu2dDe7UZL12nKgYmjMiMUQQJuy5lPiQVRXDEIBioEEVdRI+JVqckQB+cdZ2idjEFn0cvyCAAlqMaaeokqi+8z9jYDCRhLmUpo8I356rj6Df77p1fj5cfCSIw6ZzVYGLoAU3VUMWjM1Ajkq51RJGNEcGdTpeoJkjgnBOxLkVA8JkHICm8XLqU3xvqC18+eOMrO3/oL4+t5PlRmh9bjZyIVrRxRAIkFUBnyRCQSXnWtF/7YuN8sXFBhwWw5f2yqWoSHI1692YTrpVa+E+f/dqN6bhbaisAjKLJkjrnRFQRehlleIrJrdIKSAJGpLI6O1DunUNVlUatMsoAvSqoiAEhmKMICEiVaq0QAdTglJQNAACvPH/9iScub+0O6pmIq5EpSoMQo7SidZKq6xpJFTtPDpyzXu7ZoMj77WFz683xl78yObixiDFD8z4gAjRdp4qxg04SISCiL1i6ZmMw+sznv4ILW2ozPN/PiibI4pknLv3Q3/v7sfLnsfnk3/uR3Xz00Du+4eb82J89a4qT219u777evVLQxpnx7ZvnL+yMttbteN688Fn6/AyPmyFLd+FWuvwYuOns4N5evrP9vX8Bf/nfusXzuDHkq0/Q1760+ORX+Pwz/sIH882zw17RTCZnz28cvHWtns6P43b54d8dZ0fdnYOT+4frs5OteOyXk/nnPwuXn+LzVwdPvne+PRw8+O3D7Mw09jY44fUXcbSx9sDVdLhvR+oP7+j8gJpFSlj47aP744VW/QuXuOcfvPKOZnDeuUNtRGZjV2Tf+v6PnF0fvH/X/6uf/dX3P/7M+d3d3UfOxfZoIzYplZevbvXXHuSQV+Nlf2uQD0uEbDGduyiFz3rbW11Tg0FgXMQ2ZFnh0BSUAyB1mrrYgKYi812bmmYaPLVNLWLJJQPnkJ33XWpdMQrezxZL0djzBRN1ncZoSKgI/XIdCYGUELNIisLeymFYjFvIvJDrqiRdW0/Hv/3Zc69de+WVu9hWKgaSoveO2DWde/3O/GTyla9/5yPvfPYJKv1nX78XPA9yPzp7vltSCkOdVhc2106O261itDebL2U22D57fvfB/Tv3dzby6/f3dT575ROfOXPpytlLDyxg47F3PH7v9sHCYi2Yj0ZdU+8dXEeXpovjk85C0iL4KuV1FSdz9CFiY/nR4mB6rZmdnD3TS4wb585Xs+b6nTcv7+56XxWO5k1cHi/YBqeFotNnvaGtWgNERKfFGFqRVk0NxEzttMqqhgZgRORBE6IBaGI4nTsjAFAwACSy1bgYM60qUqsSyOlwmhECocPVinIKvQYFBSADUAIDo9N6uwI4XSk6cTWeZrqCDIDSKuUKgODAkGGgEMk5BVao1SWI/fc+9b08abBt5e5euj9xRU85UxHPQKdP2wjDQXvlcnvmPEfHVbRu5TVwTAixg5gsBCUH5F2PwecSHcbI7GOMoTc0RIhG4sAF7HFyTroFYQddVITTuTpNKFGN0JXGDsAQ1E5Tu4YGxnSKj0JF7cCIUUCEwSGIEoMnAWPHqUXnHMRkbYfoVTtMCsSmoGoYkCCSIiATelIDjajAg1FcJpbWUmuBDD10CQDQZaBISGZmeQ79taYo3RPv1n7hL+xEjfH2rWBDa6agAikREVLDIXSF46c+IKUhTqphWM6bftkfEnhYJmtkewcvXk2pblrLirVUt/jGa/kXPh5KRzvrZEXcuUr9M2HjTLjw9MXzT05mz0mKScUQ1IwARZGNRIBJEBKooSmYETORZ/ZKbAYCSUwRlJhWYg4iVgAAZwqEQIZk+ltgcl7pUt8+HmwOezkmSwkhiULXqiZFJAAxiauZzKxogDl3GRiTqAdMwKqQMTvyHhyQGCgiu2Dq1II69WoZGCE5FSIjRBQyRUsgK8pSMHAWCL2QCJiQKLIBp5TIIUQlx9KICoBaXgZ0rmnTRtBL8+PvaKc3fvO5fM7kkkGrXf36L//i9sVL+cUdKIMht416HgzPvvOJBy4dXXudGBwHY+Riiw0GoxzuzxCB+2ttf4PPvztOnnf399vYgWfOnGZukeUXfvv7TzbPPfbbP5g9ejlrukcO5utbZT05ele/oK2z2xN67a/9BXntpiXwvZFDj2HQtSfl5lrKSytLY4chx3xoi0pNrCd+Y2gxGhh6gkQI4LIMBKBpwTEggXOqkUOO0cDMY0HEenKU5ZmQE8nY2MiUyUDBMToS6QgJVB0Fiy3lfWXGZQOLysZ7ePG9qYmGiRi1kczEmgaKLegRVHNYNs4RapRl8lm/zDeaCgdbZ3CY9YsTfPlwefdj1G70j3MnW11L9bW92MLmex6vqyUvk6/j/GScc9bt3V2kE/JFeOx90htYjG66n+/v9TPeWB8cHSuiscZeCMquLXPyWWXOMHSiKSXvvQMjFcfEZIGUAQDZzEw7NDMTUwEDNSBLCAlA0YjIAoCCIpIYiFEy66IiOTUwSmQxJTMPZm41MxDbjlTautauS91pqigLziT18mwr2+YEL9+53zk/r1U62czD2Tw749xja2vSinQx1no/pXtNW3XiAAihnjcoSgjLKi3SbFRk1oPBqDeeNmVwxyez1CZnkody0nTjG2/tbK/xsi00oLJ04vpcpaqq55j7elwTUIAADhZtrQmreZuVbvfculnsYopJc+/nCqZ6ZpSfXx/sT6b743lVtQR8Ml1IEkJcdhFsWQRKVaKcwUHqFEIGSIPSc7Au0XK5pLZtll2GWTtfXhpmt964kWWNjMehz91j57dmvdlnfr3YobWzF+awyCjSmj72Q79r69lnWmnvf+Izr/7jH31oCAGyVCVZdARIea7YSloyKXbRk5cI+WgTKylefSUeH4zvXT/+zK/yS7eudkPnPZHCcsmEmEQdhOBSO1efLLTV1av5j/zhdlS65VH88X+dfeyGNIouwzyAAbaYROzDz9g3vV/WPGl0qTKdDOB4ePExtzispocWcqm0q7SxY1x3D5689Kl//pKeO//4b39Xu97fffhq2+jucOvLr76CzMeLJnG+P64h3e31C6llOl1w5hdVFbvOIYSSgdAFV3XSmHh2UVOdmv7a8HC81LoZ9fq+57il42aerDmzvhnraCyzarzR3zmsJuubO22Xbh7tz7uUe1cAZj44TzWZo8CsAAZdBAcaGyFiQ1Ng7yTGppama8ilkrtSO0g6KAMhzKp6fjLLmfqOgtKaD3mZq4pDbLpUa1ykVAEQSMHs0TAhiZiZYwBCEen07YTp6luMWg5CaDrtbN17VB1PF8vYDnvlvKqSSR4yNYwELudcKKaurVLGjoB9HxqMUiVrJWMCdcqMYOosD265mAYsi8JDpoPcj9by2NXzNg57vklpb1pNx1WH7Y3m+O/8wz+Yv3n8kDbvnk6e/yt/tdwu2uXs02/sbZVr3sXeqJzO5+vaImgGxgA5JACMsLJxckqqkAIQmbWpGYGrQ5juT5/evIrH1yef/PgL/+hv7xi3gcedMUrpYpaRx0ARASDz1EVBPnWNOGImjNodN23uS62WljkHnNihZU0XTZTVKAAQQdQihNgpG3ZdUw5KMR4rKICxJ6HMdFhkxDCYH331b/yV7W//rq33fuv4WG985dW82HFtTRBRMACCubp2JZIburVquv/Dv2f9PM7n9d1j3uhvLgO1TXIZZeStsy6JCawPRmRDci6sewDWxbhuonv0md2nMj1+aXH/IPc5IQE5IEUSTd3sMNbveW/vz/657sGHT1ou+Pz9v/vXr9T19qic/P3/Ea79O3vuBn7wnWf+1J9MYtf++T+8cf2lra/dSnMoimLaJCUu87ATeP7iF4vf+Lf9Z795tnbpP37mCw8ZDYfFvU/8o7v5C9/7gY/8o4//RitfDSSGNF9M1QzZoboMs8RJTKMAMZtpYC9VbOtkiUPu13pF7FKqOu+yjX4xqSModKlh5+YxOWbv/OHxeC3rW5fqtu5Uzw7XBmHIEkLuFml+a3JSqzGadeDQpwQVGkInaA7dsl4iMiOnpMR0qjoDIDAPRghI9rZKBhRwpT+n09k04BWOFAgM6G3dOBiqrfzG4FbAigLiqD+Pi341OT4++eCz70lZEEdvXf/ifP75ycd+pncYSXzrKAy8QkxWjatZhWcuP/Y+6m1oil7JUiutIJejd31bN93remVz+T0iA7fzZjebQNRkQkTocdGdjMresq2rarmZ9dz8mA7f5IM7bhktKSMitF3XGTMmZXIuC6ZGqGrJkIhIVthUE0KSrgYfAnOKrfNsqMtYpauP5b/td9NC5Pn/OD98Y72Xx5O9ODtKXZErU72IN148+/B7Dpple+em7R0zMXu0ls1WwjRAEJCYeQ+EjE5FwZyarc4FGhXBjEgRDUlXzjeC4AplJ2IA0ee+artI5gOSdzzMT1R0Z013HtYH3mcu27hw9uQrr8fjW2eefqzduLpYUmXNkFs/4pSXkhqzVbNo1RBHE2HnDRQVUorIbG7VdzdQME0MiIhRLDXi0drltF7M8sff3f7m7Xx8L736Ap1/p1x/WW+/CeMxA4MSooUQNEVICdVQWEC1AU86tIl/6/nZS1e7Rz+Mw5IyJnDtctLNZv2CCugydpcvnN16/PLnfu3z0iYkYz49TuUOg8MMlBGicRNNkZVURXHVPzeMYmjgiXPpImJDIELOjAy9ARCJoogimQCKGiHwb2F2AQDg/s32oQeGbQ4tdZZLlrO1lbRLiTU5CdbmDMysbQou1zjsTsKta8t7N45vXjs8GkdVF9DnhcsyUhFmajtj7yglp5QMVQAYB5ujt67fbZeaiTfRk/vjQWrfe2Xjr/3133dUttf2jubd9Fu++ZsujXaPy/VxPO5zvnfraAfSlad2Dvde7p3ZffJ3/cj9u0fX967Jm3fPTttBWoACLI+z+YAXs1l778V7s8s//L9OisfGt3+M714bnN3Svc/h5KDHs6PpbOM93zzTrj5eFt00TptisY9kePV9cx6Fc4+Sc3R8H1/71OT/+ruj6dwlDdzY1Yfai4/OwuCt27cvXDyTXL4x7K1fSePJydG49jzaPP/I8vWjje0nYOPciXW3pBd68/XU+pPXRuWZ62/sl5Tlg8WNN14bbDxw6cln98eT47e+uuPq/+l/+L1Iw4Pjw+roHlr0PgDZxcu7oqjkyjB0eWBwKrI5KlPXdim13TILrizKw4ODrAzBY9c1ed5PMaUknAVoK5SERGCCjMC4rGrnMiJeLGfOB+8ty/JWKgUPiIjOfBaKXttW0ZoyL7qmYXZFXi6qCSP1eyHBSiZCW7ujyfj+YjL3FFKKYtTN7dueffetuy80npTA+cxEYxR2lLNb1t3P/+aL73j0wruffOpd73gAm6O9vdvtPO69cLs9zDzk9f1Kl4mypk9i2m2ym08mfVab1b3+aNZOxnv1bH7zfJ4XO5uTWyfDjf6N23d9MUDXmzu5dGG4XJ7U1ZRcZqBFf1AvGy9Yt4JERSeDwMfTNlWhzXJUOd6foMnZ7R2mVMe67VoOjoyB/duhIgMAEUVTBuJVMM941VE4HZZ6e5FYIYPADHCVDjIyBFFE4BVgaBXeU7W3y0VAjGiMtIqKIALCSpFmakanjGqg1f9xGilCAzVJq6eXIqdVLmMlOwMQU12FJRF55SFHNDIwZHSmRsSmLYCgc+R1q39x3W9hfQiLMS+OA3qLqigECLpqgSEA6OYmXH1Q8wHVxk0LoYRYA0ZwDIkgZJhnFnpUFLE71kyztTWYzrSrXFnEXg/znJJpl6weN2t93DxHe7eom6AYrVJaK4ITEhAbEIAZIjhWXr3xeKqXYDQGADAOqmJIiMHAqesa19LOeswL0OROam1q0g4DYNegRYQOEBQMXBAGMBHpvDqoK6krZiCkFOeGtUhtzMkVKAZaEzIYvf0vWIejwYPvc3q0f+dG9vAjdZ7bow+tP3K2fvWN9OZrebcAAfLOQdJ6ErJ9mrx+9Nqb5QgG7/2ug/IsPfwQjy72rO2uP4+DMlEBuY+9jdCBZG2enbiDF6kbw2Ex2jq/AO2PhvvNskp+e/sSvPqVTjoAc7y6AsGRA0QzFUmmBkYMlkwJ2JNnYAXoVksuMgACAzlm8AAkpzP2SmgrfjkRCZKi4ukV/DaryDDGFNsmJW2a1HQCZsiYrKGqA9Y8z1yqQ+4sFzQP6JJJB2wEtGpkoSCoC4ziEGU1xE4oBtQk6FCAAIxMUdWMKZopYJQuIGaIDBotggNTWGXYOHNoqs60TWjsMaQGFClpcibbUn9ovjz8hY/1FrkhmSfUmLXL/c//5/jomYd+10cxGzqggVTNja997M//xfDGtbXSKcFs0WQ52euvSeDUzQvPXXTlYBOdm331M3B4uJxXPs/QZ/MWw/uf3PkDvye8733rwuQMSbvUlFd3sZcpjHFUdjEdtZNzv+eb2n/4z/N83SpLUsFsCmYwKh1nlmJC0pRSW5sLlPciGRMAE5KjPCizAmJWWlXD2sCCWHBKPXDONa0cHfNoKzbTjIbx+DDb2kQ/QpepKUhEAgABBDMhQoBVlTQZkLKKqTmC7a1uP2bdBCDDgJBn6aRhVi7z1C6JiHIPXaAMKKa26lo15tCkqGlZlFnH2fLOyZCpYd/fveA254t6tvPQWtUt6/FkY3trurfXkA3PXqz3b8FyIrN7J/PJDh5nD1/97E9/7v6nvpDfHctmf2+RUBEJe4Xv5aFzeXQcHCXhDtkBe8jNBFKHRMDYmTkkMeAVLEIFQBF0VcjvVFcTCUykpyG80927KURQVAVJasqMmpLPV96/qKkCsi6hGdZVrWCiEfQUax2A2ib54NfKsupk2sowZF93cTu0NQMGQA8kVYqdOOfWR9nz925XsXXEpSeJiRVE1DOmLj50fmeymLXLWEPnldKiHVHGWYZVOp5HCDkhrXUw7BeHJ9NlAgo6acep7fKc+6OeHKfFvCn7brBRLJo0XXZdp6OShr3itevH6Gh9c9A06cIwP7e5NvB4WMeDmEnbbG2szceT4bAcL+tURc4IyLhg7qTtJsPhulBvMW/6ua+qCtCdTCsE7DEZGiKfuXRZYoZQPP7eh75y+96tV17duvxMC9vVUfXg932gShfuvPj6U88+0tjkzo1XRk8/0hYbG9/1g0Plo5/41+eszfN80ZGqtl0TYwupRjJnCJxCL3ftfvvv/6FNDlXD0JUbIbhiwzPU3cKccGy7qJwVvjdqky2yB5aXh/Hh7XJ3ozcP1Cn0NuKDj6dPXM99JorYCUYlw5gVg2/7znkxSMfTvksAoczW9oj7X/+DN7/8mdH43/dIO1AfPHhojsfXf/1zPae3j+L4kQuDjY24aCaEh3f2X7uzaBid86lNYukA60u7mxgjZo4Jcsay8NpJ5jEUmStCmtdEhqCplZCyplnYbFYQyWK+qJqUYHSmPzGYmWQl5Rs5m58uTvaOJg89enE2nyybZn/RXN7uW5OeOv/QK3s3hSV20TlGTWpSV02e58vlQkE8h0GeGRfj8bKNVebi2tawrdpBXqrK/ba7FRskPJ/lZ0Phg3MZi4jjkAFAbqJyWDVTg0WEmVKKXYnmEUSiB1Rdgej+21QRO1e3MSMLqLFdgrEhG7FiBG69c1EFlWICh4jkNBomZx2nKnpv20NKmOo5F5yRz4jIDFrFBMLBe7WFJnOqZMZGDvuDgTjrbxaZKM6bMs/98fy5X/2pb9x94gd+6CNnt5vDf/VcA8dbAV2Xjd739Lf9T3/28PDkxR//Of+J34DlMoFxFpaNBbCcw1zaACwBMTEBoBhBcmAbyrd/4sd+6XM/0x1Mdk5mlwFqcIe57737aT8/kf1r3ImmBOgVbC7RESZQUww+aZ4tettnPvL1F55+//3rezd+9l9n872SoGk69t6zSykiGaKvGzVDYFN2Emh3dycmMS2y0K/iHLRlo1THSd2ZAYYs7cubP/Yfvvp//lSOodfb7fXz3GcS665eZp6zjXI02IqTo+nBzKHrxQFcb/JYZMNyUbcJsNcfLJtlr3Ceg0YIhWvbrsjyqqnqJutv7NggW3/m8dkYjj/zS5uDJRGhaFzBpKTFVkPmj+qOH31H9/ijFYnjlr/1qcce+Qf3/9if7tWQbfXHv/K187LW/PSv3fl3PyeOdpEtEkLoZ3lMUvR7qhaclX2vx83+//JPez9wuP0tf3770SezPDv+5R/d1YMv/NN/c/mP/M4/9P3f+7WDhz/+a7+whg5RDISQEsUIcxH0jvK8QHKBe9BlHaRFTLujErBpu4VGlxV5apv5dD5bVkhWes/kDbyCOBeWTbtdhGo2j7To9zbL3rBfjkKZNSJffvP6yaJSclp1iuyDo3xFGaYlIIPFmCQmInNIGDg2p0HT4NkBO4Skq34u6NslI/ytStBqBwVgtoJb20pwowB2+gI0MHXYePjgh96vh1/eu/ny1d113+vKAViW71w+6778q8Vh8g3ygIVS0q5ZjKv5sUa3tntmtL3jnfmcM89pUUm7SD3pBudiKI2sKEapI9rK2tvAYn3nTQU1tdPb8vLnNi894OYHdjSbf+EzxfhWj8whRY3JNKqGLCQFTNEZdVXjFQyNmFTUTJ1ziZGJYqeZcwAmAs7lbSVhUKrqYOPsYryP9/f8rbfWNnLt9OTGa6Nv/uh8bUOzx46++tnhzVvbVe0Ob+pbN7klTnWSFsACu64TBCByiAQCGhXICKjtImdZUiUAayMRcyAVYUCVyB7A1LrWOQUCSE3TCYzOx/XtRNTPRkv08vR73buezsNIJov20z/jXn2hPDnOPvDNceNSMdhpFm1va6u6f8OPx0FwtS9hRANFSYCKRiaKqy6cz0ARGiE0VLAEoErEBsjADhUInEL76peWbbP9vvfAcx+Ln/8EX71P49ezcQPjGZMXYBYF6ghUbQWQNUACUzDw1na37rVf+Xzvwns160uzzEBaaUlk98za6194GetienDyR//gXzy3cebHf/xnxEwEIZnzFLxby6lwNFvWhAwIyQThlK7rERJQLQIASlI6yAlcjA1yC+gdKaKgkSIAoYGoMQKuMlxIvwW29id88MJ9OzeXggwWSKbaSdcUvb4xN5VIo45DjDwdt29dv7l/Z14vmM0B+twH9ErOUuyInZiwZypcEggeer182SRyfl7VSm5272ibUIfODfIPf8dH3nvm/Pl6ebKnJ+neY1ce3FjsvPmxX0q6P986eeib38kYaXa9me1N7rxaL6qdb/3de/l2urTb7e6ev/Qe/id/qb52sygdQHPy2lezJz4SN5+69PXvKzce61fHMHux7PrFYrCY3B2MhHoWunqxd8P1tsP+TXfjS9Ubb2btNLZtnpXkXNUfNgIhP9frQo8DmlvbKSeLPdcsg5q2/I6r7+bCYyNw6/qbP/XXy8vnzv4//sH+3ZmkmLddvHld1vL+lScc0s7oQvXcv9t74z83F89mMLx7e/7E+58NXevr5cH9Gnpry1oQysV0hqGF2BTg2WWLxTSmDgAlArumLPspKYEAAjGTN+8o7xexq8XaQS837TKPGPoq6MDARJYt152ZtrJsY3KZB6AuRVOez2auzBCpqhunZuA4aFmOYt2wJImVJ3XBClYOrGJtU0FS9M5AyUBEu7YWD+wpZHnOntFVU53sL3zkj77/4a9MJ5+9fihLYuCcLDChCnnfJrx+d3J37/lv/uB7nr1yOfT7S5O9k+nGxgOClaPZ7fpYo7WzeG7nkc3y3N6kGbr++Patg8VhZSKVZsAvfOHVwQNnRiZb22uPF8XJUm/uHaqkIay7FNYGa5iHxaQ6PG6aVsS7vAgkcOXihZdefWP97Dnq88G8u7g9HORZ1cwaXVy7e+/caHs6XvgiOM8hz1Z3gYgAIKig2grlgsSAqGCyMmTairxmZqYijojQVoNogEgOAYETkIEBrkrgBoBGpoaOVm55w/+qSrtabwgIAERXfGpmVFOj/2qJsdVfVwNWIwDgU8eVrnJJq5bG6Rfa6WA0ACAheGIEU+OOoLuy8YBpZdqRNjCtc8496YocbV0LKzxQKNPuA2n9DHlHIWrbUVFC3YIJiIEkYI+K1uvNNzatzYm8xrxwkWKURad9z9tbdVcbZtC72Gxux/lifZpTB8YMK10WKqqiIzUjSbCajmJP7AGMVlMlmqwTICJCNUBJTB7bFqG1Em3Tp82+hlJRjRgXZEtHLXkXXXsE2iAjxg4tGnrBxL6PiqSdAQCwLWJqF5gjtDNrBWkTRNgY1FAUidURDftu7arsH8TbX1wvd4uH3zsf8n5vmPV2UDI8Pua5YteIaUJwPsO66Y73Buffpwevd8+/5oKPFx+cbz3knhQcX4d6nplZFhrGhmR46SIc3ptf+0w+u+9pKGCoXZuHsKsdr5VZCLnr1EuyqLYq65oI82rQERFBJEVJikToFT2gyeqKJQcKhqiIzB4JQFcwANW4ynjCCmONCN4HBIJTXzIAgCQxFDNt6iqmJJaiUdWosEW1Nqq3VAYsU123MfcFkWtRW5bYmjgS8q0BIpaEHsEZO/SkzqwVM2RUWCXEvHbARorkGVq0iJDQ1CXP0UQAkBD9qYBQDBTYkQdRBSNCLww5gVu2DyteXYQ4rqKnGnWaml6WbeXcVvXhL//ssZ8MH38H52cmH//EG//6Z8Mr99d6uUqHjKNhqQTxlZcGF86WDAoCavXJkQvI84lTTP08lGW3SFSMrn7vDy3Ona+FnDMWmb/83NqjD8cI09t70LneoL8UC8MMH31Qt4eZrnd7M1swA0LUDH3TVLFw/sxZMXMgPu/JSvfeIBYZIpg4IIemAICmkAeEGmJLjp0vwIQsQObViw17OK3AGGJSiGCeANk50QRAYEgAYBGSIHlDQnDadZYMsiHmOzZZUiA0EI+cF4AOcmCftIkGhszQVCApW1trY6LReq8/uP38r+d7d7afvVKMBi7ud9bE8+fWvuepe598OVKA+cn4iy9c+aYPwdlBy+XkzSNcLLY3sqYqlwfjvf/08cp9wl4/wRuzfnDVSRVKr57IEZFXdlT0mDJgX/CqtL2qBzkzdqbMv9WRVTXTJKbGtHpOi67sAABAZKJISGTJTNTARAwjkBkouU6NgUoEbTt0hpLUUECJuFMXY4yiTVuneJqnKILr+1Bmgcxev33niQtXtjLitjWXWxQXXEymwC5jJmikSyAAmhTbiIXz7INzbtq1/bzcGmwu54szg83JvIokh6kdhfLiaKslPS6mi7odZtmkWsxibMEOuq6X5w6RQygGpZC0Ege9TCkO1uhwPx608fKDlwd9uHdylPVC0c+Kop+a7oGzm4vxyVu3l1PwSwUPKnFpSYClA2O0TlQcN6C+xAJLFMJqucHFCMN81lXWRBMRbRPtXNqZnbR3ZiePXn4k0UTy7aWGN165/uATH8ln873X7jx4xG58c/9zzz9z8Z35rHzzP376HR/6gXS+E7P+d333y59+Lrz6pTWzZFovZ8FR7nIASqJ13WnuYzd1M3WIQzfA5IEyI2TqEiRQQMox88Gn1MzSZAnFWjM8O/ym72930uCFF+x//zHdLqt3Pb1+5X35zm/qXSZC7RXaCQm4XmnzVEayoiSKOKf41snsi68V77ryjj/+w3f+5n+sJlNOeRSWJeRA3WJRzxddMewO7g3eefXF69c2r5y9c3QY+lnVxs1RvjiZJ6GmtaZNy9msM9AkRRFyX7Rx3jYtMox21wAwY29duzUqC5DJ9GAYsuVyPluw9Irrd44vl6NFAk3SWxuUvaxaVsez6oFz5/q94trB/rxWYD/rYuwU5vdcSUEpOZzXdUYAilnWN8tEIGmbuVAtF96lIgTVOhNanBx19aKf97sUJ0kqlKfPrF8pcq9J0YjBEcOKdmdGZrtFvg5QtWkptlCOah2COQJEJAMz/38bQKs7yTK37GJh1mlryDOzikW1ExJz0AsuNtoiEmBGvj1ZoTYQ1EASATnCQe6tg66OEdnYRW1UU+lZJGUFk1nuCCB2sRqsDVsRYJ1Ox/2Gz/lilA8++9f+wePf860/8a//xXs+8tTXvfvhuy/fGnjTXrj47u/63Imbuc0H/rs/KC88R8slGTRNyrEgs6jgnD9JuEwyDJgBWAckeQPaJi4hnnv1EEF6wAh4AsOdP/23Ln3vD1af/Jk3/96fzUWDo1ZRk4qpBQ7O1U282cHD3/O9T/65v1ANh631NxcWq6O9n/oX2kmWZa2pRfDBJyRXlr6gDmiucr+G3u7G0eTkvOdhwWU+EG1FNTVNagQVKGRkUAZXEql4VpdSLV0xabrMO8TQtCmsZ+2wWcQuIzQLbpBjHXRet86BZxNpMhGwWpvWkst9f1BAbZEcbmxkZy6NTyJt2Jf394cf+OaHduD6j/2fu2uDfr+IYsocI+TgIFDZs8mdm8O224f26P69S5fONMOzTbEW6uXw7FYxKqrPvdZHGLksIZkYe1J0GFJA3zRRHVASZiKn29PD/D/9xOv/7P/36Pd/z9ql971x43A6nV88fx73Ftkr9x4+++jr/c8v9u5ABsS0giyAYe58hBUEUmJt7RLM+7zwaBGsG08qk6yMcTmvF1UbE4gaRHA5s6ljasfKnB22S2K33e9tDwpetMXAo/FkObt+937dCXsmg5wZosWus5Kynity37YS1Voxz7So2n4IG2v901QRERoIwGmYdYXvQYRTGIQBrIYIbAWXOO27rfZrgAaosIIFA6heWtvYiPP79+/1s7C9dn59+8qt2e3bs8Nn8lF152aBmaFFlZWAGNs6TeYIWZkNKNZpMaPMUVgnSZO718P2ueFopz8cLAIW88NYVYd7r/dRHVNshMFypqKu4i//L25zCF0Vunyj6rpoCtBQBIeaKBkjURaCOQRPhUNo22QdgBKtDJACptE554KoeWBwKiGFESXVMlvvtyfxk/8WFh239fhe4zMe9OLy8/++9+x3tidHF97xrji5V7/yel4foamRIy4AOVobFYG9pYRqBIrsiJQcGJhzvkuG6AWNvFNQZCZDAnLBc1BMII0ASupaX+Tel1XgcnTW4QKX3FKYjR7L3LmNarz4xI+Wr73A5cCVaxVZc/ut1LzK/e2QPazTrjtaZILAJGJqssJfqag6x4imGqOxkSMPEFHNNLosJKkBFZN5sNWni4DbhPWNl+P49jCM7HiBJ88zVgqIQCK6srpCY6vakILA293bhGioIeO1w9vHv/lL6898y3Jyv2bkjQ3kXrG5eeb8IC3y+bT5uX/8j3YvXLkQ+NpCEMUHDkQBrRecIxgUWZMQkuEpvejUhGSmgNSsQNrkvcI6Q206ZwLAZNgagYFb9dIQaXVNr/gVbx8Pxvfk1TtHr2Z3I6ayhNh0a2fXiVVSHc2a2nK3fjKb3T9cCkZDdAohA4IOQZ13ggKOnJlnDQEht81BvpwlTKxKij6l9tFHh+fPZ72Txexuc1LbH/qTP/w9H/0efO35mz/3nxaH84c/8v76hc/i9eOH/LAcYnv8Yvq5Ly2PKtcs+YkPnn/qB49u/sZMGNZ2D2/eHjgb33pls2ozdlAreNu4eGZ+84vnKK6/8H807aK+ffPZ0U5rDhdKsY2d8iStB3f8y/+ht/lxuX3DmqpYQGAfYgtNlaeqKHBUVxFzSQlGuTnWpaw3s+Y//MTmA5+oa3WDXmOJOu8q600ahWn6jV/r7R9VL//Sukz8aDsM3hgcvDDbv9NEYmmGGjMrs8tP5w/tdk7OPQjapAiGeQjBG/arqgJJDOizwOSZK69aVXMFzrIihIwNfRYMUAy1a73LKMudI226NjXD3gCS+DxvU5e6yhKigsMMHC7iNGrqqjZFSY1EbdrWRsEBmnMUmNtOKdJyfEKATazIOXKYBW7bOQIScV4MMA9NPUcU5pCHnNknI8qLDLmtO7WICtrR0b299TX8/q9/INr47n1rOo6NkEFUi10XgnexDiCf/KVfaB+/MsjX1s5svPvhq6/eepOKbmdjp1vGhLw9usTA9w/HbrgWymBHedtIhJj3vVKMk64h3hmWeZ7tHU2baE6gzIs2Ljy7ZtFOT+YkBKRJbNLV24Al4N2Dm+cvbdTV0htdXsvO9cLB0UEdp5DRKPRMYGNtLesVh0cnR0fL07qNqQCAqSdSFWBacaZX8gI1M9MVQwgsMQJIMgMkBiIyQEBAQwIEUrH/svatTr24UmjyKX2G6O1QEp267sEIGQnUTFewZkIwVVNc5ZpMbcXMN9O31eZiamZIBAhmbwumTeF0IvrUFmbECIzIVy9ckrbGLAQu3foGz04g1SaruWACBIgIG5v04OM6XANVZYFhxMx7gpV8HcmBkS1jczaTyw8tUtcvt/CosmuvlhHYE68Pa5lpKGz34kLrJHUfzZrWBMFQYs2hVEUBRGIAB0CgCdhZEoQWgJQQHBmzeQdhtWnNNHZErBCNWxt4fuBM9JXs38l9L1WK2VrqvLSrmgNCagC9GWmbwBQgOQVQUGkMjYiUlIABClOPyahuVUWRAYByZwZKmsoMvLTXXwsHB+YX+MaX+JEn+MzFt+6f9Hx/a3tX6iNskwHEGK3o+f5wuTjkx854De7GV/r7L20sH6tsI3XVsBzWh3fg9Rfs4oMOUXxoU6db5+P2Vpre6VcL9U4cN3dv1kuH/Z0wOchEW1wlR5HAPHHmHCKuiItiaEgAyOQIwLQzBANdXTJqSiu1DQADMbOYCRIFRRVECj4QZQq8KsMxO3g7Z911HRJYjGIAzMCiBiopmbUiC00xSb+FzRx8krY1ZtdaiiE0HDumIhQMDp1zhAArAT0QEnIuMSaMCblGUBLwSKzRoqAhOUIyxkajkBJZVHBIDpxDZKKkoAIrDl+KymBMmDMMo64ddvWhdSmzHiNDyQ4d1suq51149V529O9m8pOLLoPD6cZxrejATIG0UwHpl5w31eKNW92sCp6Z2aUYFzWkFMpC25QkDjZ6AXj68ktx0++d7D3yyJNxfNgf35cvHVn/odGjj6UbX5t85iuji1fx8Kbd2teDTkKUThJC6A21G8fYYcGyuyVrW+X0EJYL0IT5gIYjNUNXQhY6RPCBia01k2QA7EtLkcxrJ0iIW3mSiev1NFVoLaCoIWYBiCxGUQHwZs6SGCoQYgi6qjSLBO860tYge+Td6WTi8g2Z3seIFLIkhpJMDIq+FR5DH5ZjaA4B1VmSlFKRD9d3ytg1Xz4MPa8Dcpu2nFXV2QeHD1+Ue4dZ8G3VdicRdnpt32WPnLPp/Tp11jHU4NqGF7NiUg8lJXWuh4N1f9BJmndZ5lRMY4eZV+vUxFFQQQABPB1TSWBE1KkEx6qr0UNgATRbpS+jIaiQICEZKLytozEDZ6AIBtSKng5hGiSJ0EViMoCkKRkpZhK1S9a1bdu9zTBV63m3UeT1or0w2sasoNQZcacggKqY2DxCSrp6jF8arc9V6s6qJpHzAFi3Ujrfz3g2PtgarTedVm5Zxegcg6JHf+3o1sJUjcaz5Tqtnd8cncwWWz0/GGbjeVeleOfm3d1Bj5Uj4Kjff/XGYWLOhlnI9HBvfDKZA1NZ8MnxpGt1vmgyB1UUZCgRYtvMG8nzcDhdiErRy3ySLqZYaRgEFnVdLDVu9/ub65tfu3lve3t92oyrVnImbrsC8M7r185cfOjM9vDWF1+e3L+7f+utzyzar7v01PlzO/tvfbk68d6KStlvn33k2Q/svfVGb+CarOiY1s4/lO+90c1bNXCSSJKqoFlKgp7arg0CqW05eCDJvet06THTZEbEQHFRQZabJJRW57NCZruv3Wj/8c+gK3uzPM2dvucJ+vb33vXZIJSbcujKfjIUsBBgTefHP/b3+3e/17/zqeXFbXVZuTU4c/Vifedrd492548+2H3xubUoRejPEyzb1C+KM+i4V8Y7+8Vx9dIXXn35jZs7Fwd567DrDsYLB+Adl8ykkQkINQTn1PoUijzrmthztLi/NwiFtElUora1dAfVlFwZHBvHrAiPXdppZuPtUVGfdG1X37wdE0TKKe/Ta9fePJ5UveH6CF3uM6inwfntcuP1++PGIzufUiTnnKrnNKvn4LRNvTzvtx0qJOA06Rbeu42Nnel8Qhl6dq6xc0VZegZDMVZNntExG6ApgbFEcaajPBSifYcJoRJRh7EWMeBTXep/VSpCU0RMhuoQCmgs7rWdOFcGyFwAVTHwHvsEDn2m0DEAmSNQiVXTEJe5D0hBXLIklaQqNqDqMt/EmCImY5eTIzBI21ubRViLJydOOcHDTz7x4bu//HPnd2zgq5evvflH/+1/GI36d//iX75yxu/fPFxA2Hjns9WjG8M2neNiT5Na6oHPgMiIAGfQNFv9+tIVt7auX/tKN5sgQ8LQJQOwEhgAGJBdVplsPP7glY98873pfTi5P+jUGYiaGpB3WZZLp00Vu97GU3/gT2z+4O9cbPTaQup6kZ0ZPfqd33fr//pXHMABmqMuiiEah3uziIOh9EfnvvG3PfXd31GB7P/8L9a//EswmzazCh0Ex50mDp4xbxTaNmWlExJD6syiqPM+5PliFkdrPV+Ws83yNtS2XfYWy41IcV71hut+Y63rak0Jenrsys3f9qFBVt74zNfOeDg+meRl1ls/0wQHm7vzam/rqcsX33Vxv59NRxfXzwygNQBhylwoDAy001awVXz+hfv/69+4+Ad+ZEd93BuXC157+unuq6+M74zzo4X0CmyjdyCWMOfOIIEz7by0ecD7izq/UMw2+uXuhfbg7vKkemR9jX/9UyfVc8/s7uwV5X6C5z71Untn/8/8i3/1uU//POS3W0edQbtSniIxWycNM5P6WQOLLm32emUBObUH4yrLMgU7rhfsYCwtOpIOljUMRZigEXF5DszLZcUeez1XEINw0V9D4OOTk6ZakhEiOGZIxoCEnBpo0BxpbCU4YibHGDLnjWJ9GjRVNTMUUzBgwNVgwCmQCE6BkApmCLhamhAIVnjG1f4eVHFVSCLjtKhe/cJnSj8Pa+vF1tbh/GTeZJfOXaG9uyfHBxvi2bGSoYkDgaYahOwIsKsTpkQqaT4bG+P9+1JV/f6apTY1jSy7ONlTz+uXHkwHX3TMgIiaAAEXWjRLmC03gue4gE49hc4QU6fmMnIE4kkxRcz7i2JUnjvrWePR/TQ9ySxC1yJgABRV0+gwaLVsz+ziB77Rb6/DrI7HTXf7DaeSLEVgQIetlNbpxz4pv/aZIknRywid61g1wSAAtKIagjM10+S8Z+cttgaqnlMkz86MfSBOMTaRAqMDQlKNGEjQsBzUTcxKj3nUtvXsEMCS9JYTeeXXAiXFIe9e2Dw/qjmfvPRqduNawS66MoVszXH9ypfozVcwqT+/2y1iOZ9aFFUzQF4JpxWd42jJ0BFaQENVqytBNI/ec8dizGIemkgxsXrpCBTJWaFqRzMpM4uJDQEZCJRwNZtiQgaMJkjgEAlUTkEkmhKwSm++Ly/8dPvG58qdB9af+cZms3d73Lidq+uj56bLdH5teK9NDz198a+990d+4hc/9Suf/EJQU8S60SVJXjjvsy42GVrmSVQjkAJGMTMQM0Q61VWT5aDbGfdUEV2btAKaIcqq/quKjAQARAr/BejeH2yXXAq1fceMID3w5VpKS/BCSLuj3quv3jmaLJOAI2I2QDWGBMKEEJQZgaDIfafCffaZujzuZJSF/mTenr1Mjz56bnKQNghoaeeeuPwDf+PvDML69Nrr9Zef01vHmnPztVvHz3+snbTn3//hVvbh6F6YtTDrygHL/jGUT27txbf+yV8ePf4fHxqtF6GVV98cbFBqBgnR9diKtYHmsH+v31b9/jqcHSq7bLMPiQZtAk3INuvc1taTMrvhBpvF+YeqsfMuUNXEZpHHloYALBF9YsTcoFo2tw56ZZ6DhylSgvr+XgaB3EBq5c4XKU5/7hcKtB7nEgd+WMrJkZzcGjjOs2GrMYSi53vT2clkNBjlWxn6yZ3nJDrtHvSQFMwxIVMyaVNt2BpCyHNmBefZ5WYAEtEQBJm4n+ciKrH1Gg1kFk+Ayi4hiqQUGZlz19TLKLWB09RAqplhfDL2nPusZO+l1S6lLA8KyRSqelHmOTkm74nBQCNAW3cQgZirVtAjRgmZU0QFyfO8a5dKkb1BolnqlKxc7/tZ73B6ePLm4tHNMp6cRC7vV7IUYofWRlNwmTtzflDNPW31XV76Au7MrkExWYpbttQ1sYF45tIlWKZ+T2M/fOGNl0J7vLWLD58bHk/infuNdnB4/cBvFs16OZ8tkHw5KNp5FTJgx9hB0wmC9XKX5XIuK3aywd69+4yuyHKQ1oFaOz4ZLxd1XFh7/uzlQPNuUcc2Kuh6z+W90d2XxwCQJNKqPU2E6AxQABhRzMROsXSGq/4coikgqhkjARISkAISnQqm2JkqrOpHZiu0PBIwndaOBE/dCHaqSDi1pwGuViBU0Lelm2CmpipmZoqQYJVOslOi0iovAqanPyQEVVwNFsFpGgrJMSCKrhfryC5K27YxY8YoIAas5Dy6FdAG085ZOXsZs9yScC8zBfTB2hYJMQQABlQpfehl2XISCGF5bCb8wEY7zaBusn7uESEbSMiKZeXj3E/mBEBZX6xDEyBFF8xQETUmYCJygA7V0ARMzMQwYMiiyxQxT53EmSPBTqybx74z3sBWs7bNFm1eDGM0khaaFqvGV3NOZlbAal1mQsxRGqyXmswIBUgksQes59wtsU1gnhKoqUJUYyJnoBpCVaz7wTDr59bLfJnVv/lxn2jtwgNLwnD2shze5f3btlwEv6pp5JigmBzaZ/99RpTu3dn2Eb/2lQKypABZ2977siy0/Ogf9+t9Cr7oldWk6cKFNPtiaBYyX+IDRYvT6eT1+cZBTtX53bX2UETAOV8wEWAIDhFkNepgBpDAjExNW0RDQjQ1UwYkgBVR0RNlzhE6Q0uIqkyMSC6gI3VIGRAKmIognuasU0w+sImSwerg7Yh8YFSViCXyUiVFWZiR5yIE65J5bhrtfIpNhaJMAVNgR6VRUAAkUiAkJo8mKzhRMusUGJOhri5ch4CWBEDNkllCMDbQFk3JmAEBLbCLloSIUVElS3Kude5O0y3B571GY+kpdsIcULUHzBbS7SqIpmaWkoRAnVdwjC2UuaeMp/M2V+NR5vMMDUyx7Je1RAHM1jZ9W7ezmQCzr/d/5dcuPLC+fXWzm73WHxTz176QmmMMeZg9nW7f6Vee7l/nr74Ki9562GgmrSUKLmOgBCTmavCwvqHV3B0ecZlD3gOfgSF1DRCbAa1tEBBLBFPQFjCAGZCjEBBBGnNFJsuO1Wm1RANY1NahyzYU0FCRHRJDBwxoGilDADKIQAzI1lSMKqrJ59ArQRqJKQzXIM8dKIITBgMHlCVlXuG6uiU3y/nhHhf93tZZuXtrwMXk7tifKUKWYlWn5vruO5+934zf8dAT2av359eO14rRnFqmniSMx/NmfiKM2ai/tT7oedjYxJsH89567keAE9pYX2sXy3bWWjV3QxDPjjJlw0SoaZWlJEWi04OqpIQAhhQV2ggZECEoqBCZKiMpqKoS0Wks0YyJSMVMxNCMDDGJYBIEBQZEayUahqjaNJIATVTSaakoL7LYyKJp67bJs4zIgIGKIBI9OIFESMROa6mrBgDP+6zc3F7GbtFGT5jIpcITgkN79tL2m7PqCzfvQ8YA5AH6GRHX0qmKKknfsURNasjczdoOpCReLKdl5k3s+GTuQtbbGiFhz7utczvQdePZIiYMwc0XtZhWVUxtagoM/V5uwMHNJZECe+8BPGQkRCIOCRA28qGHNovdB97x4OGiu3n3boZSjcdm7dCFnLPxncOdrZ21razqpuyGX/3115b7s7auXnj+1y/zZg970/vjvftLeODpuOayjfzsd7/j07/x69/9jR8Y3xlXzz1/9K//2eUzfaPMjLI8k86ka0wMUyITUigzljKT4CDvxazAgJExtil3WWqawCwApspcglexwCHjE3G+sOB5zc9E8q0NO4Puj/z+47/9T3e3tqmzbrE0UFAdznz8hRcWX7vu/+D30KXzsgXQ1M2Lr5z5oR8Mf/Uv3vuVfz/5//zUVtvkkFWKTStbw9Fe3UXIZjp64da9sbQX/HqR6aULw+ODiRqDmHUymy6JcGvQj6hNJ8u4VE1FPmDDZtlt7+SAMp+3i6ppcuPRer+XHR7OrNJC6ge3BxNdJG2MbBFTKzrq5aEXupD2phUNhsdNk5apbhePbA+7emqJNgLstdPNjb5ZvqjSzvrWbLYsQi9kfjqfFRiXdZvAOOdxbAbDtaWKL8sEhkIPrfV3mDICZC/ISZhMwZSAjCQZaECLxAQs2mdgR+tAHaRkrEZqEN8+F5yWiiSmqG1wxN4lSVVqyFHL4vMsCGIbEYCcy9mapl2adTlGNd9zWGor4FoHBoCCfuVn0GBoUT1RcD5JBLBBwQVAu2x87jwebhcwPsC/+Hf+5RnAm3Rj8vqnv3Jz8gP/7M+8trF1+2tfufn881ewHEDqDfmtT/3yE+/66PGbL3/6f/s/HtxbeggRzIE3kAbaN/r5w3/zz/bf/ZFtzO/9D7+vHh8Ho6hCyGCaIDFA8M4FAqL7x3vHX/pkL4tv/MefOqMBAZtOjLxC8gqk4gGb9fXNr/uubtgfn3Q4oLZLWM1mR3PfP5u6PY3SSoTgu2RCIZ556LHv/taNr3sie+djlQRdTB77hg99+mf/fcnAEJs6skGbknPsBjnFzgdn3i3mVSJj5zuQ5XySI5hmHvrsdmj7ydGFM42XTedvfOwTw/w4xcjSkEi31Kn0H/nrf1s+8M7lrNr67unhT/5o+MLnkCSND2KcLZrD85ef3n/tjaqrtz78vukbszjlM/1yOonsxGPnCLVOwK4cjNhMfvUzi09/OUgcnd0c78/O7QzaroIunozHa7trCSMEj7Olz0oue3XXoAgu6qOF2p/+k8Uf++FWoN4/mv0//8i5xeHyuDZ1W9vb9WL88MPnrly4yoft8fTk5//W3/uup5/9N59+edbEFs3nXkkFSQidZZJ4OUtdsmEv3+irxepkPksJHSKBba6vH00OXQYCaqLkfEIUkwTJIZImIusNe4CRyedFH803Vfv6WzdWpR1SY4QsC03dJbWkHE1TbL2amW4MBuOjJTLmRVhO6tONka30KqcZIl5hiE4FxGB2OnRgCg7JzBCREBy8TSsCIEQVQwRkmlfx9Tv773pi68z5K8VgrXXFhXMbRpy6OWcKy1ZcBo4VrK4bb0jA7CELMXQLjD0e9lOU5uhO185ahlh3eDBT69WTZW80RL9O7KVrcQVLEgUDbIwZMSXwkJyqduacKAiAqYQMg6e2wnpwPvyuPzDd2O4Vw2oyKcaH8faL8sbn/Ww/q+cBNMUIhBlb2jqHVz+4aI/CWtXvC+y9ETyJJUiRHYOiU7dGAIYcGFpF7syIHai0gEnQpyjkMkShNlpSK4px7t1Dz0D/XDse683buZ70N8BmC+dZm4bQtYOt+I53wNlLTT6i+/vx3i0c3yld3UynBRIra6WcTL1xwb6rqreeLy6/117/xBpEz345n5Sxa371J/PFLNR1jAJvTkoR4gZSiys0BgIRKygYGnPqzClkRIDS5r7aPdsO13l0trjy0MyRy9juvpJ96RO9g0VGLkZCYHQMojKvHQMgrJipRqCgp+MguMpwAsBKt82mgoZkpAmxkb7Hav/aWt+d3P3qYZ/S8Lz1tnygLEtPfeADj525vH31QqzxT/3+97z++p89mRxbtFC4DpRE1KQovCSJCkbcdpD07dgbrrKlkEwdQCSKhhl7x1R4DImqTtIqAcUEYMlsJXL6rUGYp9/9uBkZxJjSyZ1DKnhRVS54paAo+8dH9w/2lVxGlDkQU+cYCdg7MJOVu0ldMij6obfNw6EGFRbqkpS5XDyjH3hX/+grU7egN+f0Q3/8r/bXdnX/Vv38r5/Lw621zQc+/M3tIK0tLpazZnF8O68PC2rYL3t96Jaz4uga1ZTdvP2MxfYXf1U70AJGD1+YlYPDhwowPdvGUjq4fwOaCizBsoYsB6O2mnM2dDvrqTpxmvKcGVrKuEpkJNzD2B5K4fnMWeJWWSJEDn3yozbf7Kqae0dx9oabn8C04YiDbACuAM6iVkh96tKwnYrv+zPnnLYiJwgxLqbl1UfrWNb5un/oYuPhUDTbvTJp55vbG4P5ub16afMxOCHDzOfGjNh1XZeSAoAAUchdljnnY9v5IjeEpBK7tlfkTJSHrKlaYNvY2InmiyxTiSvhjVki1KTSNFVczFK7zMqQOYbAXRfzLEPyrXSQ6OBwMuoXnp0YBl9IMlVtmgYjOKSiV0wWDRIO+wODromiyUSMu0q1NSWPjB5Ha4Mx2XgxEbO9A2mn4P7/XP1nuK3ZddeJjjFmeNOKO55cdU7lIJVyMhYyzgHb2IBNxtiX0Abffvq24XKBTkTj7gs0NKn9GJoGE5wkY9kIWQ7KsSRVlSqfqpPPPjuu9KY55xjjfli7xMPdH/eH/WW/75pzjfH//36Zs7Zoo4y2i9AJqEym+dbIGRKxxw++44mi2gasWjBdcAGHD1x8z3Nf/CzjpvU+LCHWra2wP16Jpuzs5r07x688P8/zYj6LRWb9RnE8a3uWyzvnbl6/FWLnU/PIEzu96tVnDje3yt2d6fVrt8cbmS/oZLm/fW6jb2wyKEaK0grHg/mRyQZXLtx/67iWjg2Ypu9L6bYn2WBcvnE1OpXOi7C1jlVQVEkFQdbYOhRdS6nwNAl0ulY4XS2AQRQUBRYkRFRRNAbX0SEUIlIEAFJEUcHTMZAioeKalkdfbzTTGsAvikrrk4gUQVWUgVQBFYBFkZDWQKO1xARRhNfvtcRo1ipFJBQujA7LyaDcaGMHiBPjstwhAJCFte4xAYDAsNTH39RVYzVESKCCIGwM5iVwo4aSsCtzs7kVAcK1lzAjO5m2LcPuprvvPCz62M5l2boebOAsBOBlnHcAWQRCAwq9SJQg6gwDibcKiQBImMgBqAihtZiShijZhg7GYXFg6nvGdgK1lq67/0pfeprfs5yKyH0zkyjkOjJIpgXuQZWZCVGF1TowoikgogCB9wrCfeNQoF1hSBCZ8gHUNXgDGQIgi0Duusm4f+TJDk2c3fFHr7lhWRgLz/x28bX4wM6ZNBjo7F6MbGzGwOA8uCwKWjB4dBdixPkCMyQO2LeWcubk+9VsdmCefK9923tNaVfLY7s5yB97qPtaERYLVAgnJ0JFsg0oS2ozj2WRGfKENiOjBIZOjZWCmkJUZgQVTQQYuEc+DWoRksIa9wuKhEQW7RqKxeCQkMjk5Iw6ERIyogIcWE6/JDtDJOCME3JJJAGqqoIRRWsyQtW+T5GbwOBEJBAaAq8gMbR9CGi7rByIU3a2FXRkc2etAVFBRSRKEjoOTNqhGhQEcSYz1uA6AiIMBEAogGyIDBiQ9fuDiCmBIql1nNgwm1U3WPZDVclM3TVlriUYRmMyb4hCnwwqAlqkSMjO9iEqYQosSHnumi5aMjGBtVSW1ezuoXFZ1yyA+zzPYxehDSawQMgGuIMr+egvDe/PTFV0bcrmK5+1BmJ4+siox6AqAzxq2nuNtUO7sZHapt2bURtMZsDlrhj2J/OBqUxIMPZSDaLPjBGbkRplRBQxXS2LPSon4DLhBIpKGZFRRBx4tQjWCZJwzHcv8CKl2EFYsfVkDEsksuocKqgYRgQklLROMAIpZZki9YuZLysWIX8+kdHIZJGsVc4QSFjQOqUKMg99xkyVtbJsrTOpxAY6GVse2mwEEyxO7uy35ivn7r98697svscePbl5i5zmZRVqLcbD+d1eudOshI3cZ+Xq+qoPKRthpw3bjWpjY7l3TG0yChx747SkchZb9pWlipUVT6c2ysCgiorCCJBAQlKjGDURQAKRdRATiVDQnEb/1w3WpOqsEQUQYFAEUBBFEebIwsyRI9EpM94QRhbnT6s3ddtPy6ptO+uctUhk+xhZ1WcOOUGihNiEDoQTMEGGrJVSlZXbmWbOn6zaajToUVdtfPG1ow714taUS7x+sohtmsWOUMlgjlhmZjouKdo6hoiYV955c3hykmLIqhGiU2dt7kITurB87KkrR7Pl86/cjgKZd+NxUa/apo6u8EWZLWMzrPzqaA6dGAIC4qZzCsPMl2RDUrCZNwSLNBwW48nmy3fbG4cLI6oEBlJlnFVyZPx4w3hbd3Vc3pa8HozbvVvNnMPkvs095Pe+7x03nvsIl/jAN797aey9mycD3Hrg8tvNV58pPvObd//Fv3vn9oW0bNvQuGqo1qwxeyEkhzjISu7apl7ZYYlDL96Cy6IXtQRG+rqp8iIFJWNjH0SEBMMyGqvGGKWW+5X3zt9Z3f53/zL7kz90d8bT6cW+XTCCZq6N0SopcXtvVjx4Hobj2ko/yibndw8/8/nVlz5dn/veyQ/+ePfZF+CLVzOAwpg6pa5tnMJjb33L3qJ++vrdcmNw9fo9k9HmuJCiW7SxHOXtKnQheuuCCjqKkUWYU6oyRyK+Kq/OF9uDYnOnrMOSIuVoYt0UQHVPyRY3D1ehSwJhOBxnWdktu7zAje3s1skJDge29FlMVrhk45NUk41Vh0H7+7c3+j72QGDo2tGBpGDXeDmCujvJC6sc26bbLnII2CZx1nQpDZwZJEbVGJLPvaia9a2AFVDW4kxnDToU5SzPJAlLUgVLriisMdAHblP6r0ZFDqAgypQsG0C3mDXG0iCDUhQVu/XVFSRq8N72kjoVW5neYi+uNyNQ37SNQBjneeGqvKIQpZNWIoKoBcy9LXOrMQnRfBG6ePzwmfsHdfni85/O37Z96W10+OW7f+7v/qmjceYng6LoLmKGSpvDvG/CyT/+R9c/+X+XM33TnUZBOrBvwPrSXoXv/9n/dfbW93JTHr34wsmNu6MWClAGKAgiMFtbDEaaGQ59isGc1C/9D//vs4VcFKo7AjAIYi2Bas8hGtrZdJhuPfMPf+L+7/yDxYW3lfeftW7W7L/28gf/dVot1Vt0RDZnzMS7WVY++RP//dnffb4t9fVVXWmry+MXP/5bo8xI27dIXeLMmqzKk+BxzZhZb1RUqRgIs2QeAbnto0Sr1K4WBaflF/bwGYOFvtb1mzotnF+leP7ChTA/QummF87G0bnbi5kr3Hjn0uTt39pfP7D9YXvnliNIi1rv7Y0OT+xzry1/9RO5GZzfun8xO94aDmOMsWmzDYcbQ+mUxUJkGzFbCiljezBhrutV3aSq8ENTOg7Sy3K2GhYErNonD8Yg9Uju0Ytn/sz/eDBVbK2x9z/43/1Py3/8325+ww/Ou3jj6a/c+fSXNpkdlu/9n/7p9b/9MzoYPv3MVb4txXnvMHaMKjaIztvk0fUB5n1/8cz2dASSmq5tClPVPTR1Kpw2yyBRM0dizDIGNqnFhGCyLBsMBtwsvbfDsijA14swvJD5qqhX83vzoyDKAGClTUlUkUCBTFG1zFZMYWl7Ml4ulk7IOqxX4ezOGGAfAITZrCmlRPhGzGJ901rLZkRPAaMqqgCWUBEEYc2XIFkDAtbpI2HVvcUKqococ8fL+fTcOcyGme1NRasExkHQBACOwTQR1HaQJM8Ql3tf+eT04qVRszM7aN3RnhAPi51677DeuzHZsLC91b/8fLj9nG1aS8QqaBHQWGIlFmtVo2ERBAVryMWILcfcGwPUozE7u9nb3hXPXSKjJG0+zs30QTp7Ti49INdfSV/+qA1zC5GEhIPf3FgR+9L5BrJ7N1fXX41NQAtulKEaZG1CR8zgDIOanGzpUCmEPrOsMRibdR07a1UVOkxuGJ94b/a+99NoU3rh5Qo2X8u7O2F+V/F2amrvq4S5ec/3hLe+C8qCAKqtg0BkXIDYZBcebm/v+5N7jgznuUBnMVTLOX38Qwdf+dyZ4+umC0JceSfdAua9xhBSENYUOCsMc48qhLiuXK39dgpC64QAkzK3lTPf+f30lndnUMECXGELkyjL7eVH8sKkj/16OuzJ+ciigIRCOXGKiKSIyOuU2ToZYBDX3mo8basAE6kSkpAoQpek73Y3zdG9F51Ly9deGT32ga3BXZy6tPXm/oEn8wceSABRGlH+3/7OX/6Jn/yrB8dq0QGktkuTwjkyCaUDXSl1qAy8Fiah6tohDYAJBICEwQlYVANgCUaOWoGUxCKB4rpoIyoOTz2AZ3bK+TI2zII02C3LKvcny2s3jvYO502KViWzWS+CCAnAGXLeWIcuN8pJGA0RGeMMjEo/ziGzcXc6lOgPGqo2h+qXy8PV+bwabTzxJ//8X7117apcv918+YuTG7evHRyF8QV48onFYsbD+y7u9vH667HLrvKGe+AJOF5sHF8b1ws5eh4XM2fERwPoocLj0pz8oT8Lu7uDusXf+CA88zuQjcE7qBUSQVdTYTNKIF3fH1Jawvlz/vX9dOeeuDTY2u6PZipttuNgugvawOJWQ4N89wm7umsPn+NyAruP0Nu/abm6XLz8Kbd/bDxIlhMZqVecald52tykwxkd7endOTPLsDguzplv/yNH95+1WaUKeTV2e89vi1I9l/6G745C12bZOSjB9KFrWlBE59EoxKUTcSZTGvSCqU+ShIzllCQFQipzb0RVJXTLLKMUUmTMnbVgETSkDjXGPsQkkqKkFgUcjWKLqV8KogJQRlS5KkONZpAV3hBAajsOQVQtEGeFc7ntlk3TzqLAOPPdamXWoBCBzPiQOknRWh+6iCg2Mz7PuoXm6odu6CbVvdXd2pMZmBS7rLLIxJjm0k43q8ff8hZj7i/smCRf3jn0PE0ny+f2vnK8WLGBBy5dyjQRdct6cXDz5vH+nVlRtK1KopBHMqYNKccIhiirjpOwsRcubE1z/Mwzt1qjeVYIuv3lXE3sWyUTrHN5lYWUGox3FvMtmm5PB6GeeZaj43ubk+07q7osB5Tje979+JsuTT76saffSJjCaX6ITGI1htZeRkAkJTmFpQLh6Whl3Udbf/ITWUIAWDunABUVgMy6ugaAiGSQCJBgHRxaN5pBCJGQFNcLdFICBSBAVkUwBlCVFRiUmVlPA0iMvMbkkajK+qMHFTjpGw43QCQQofXUCYxDAvPAg+9w5SZBLDIPs2M+3MuigFGDVpKCIhji8Ujvv6J5Zi0oi6QE1ikgkAW0oIhZCeUwSsabm+bJ+2KOZCnrU7dYRJ27kjQwWg8M2gdiJTNwwzLxAlQ0JTAA3gAjcacaCIcEEY1RzAAVUSm1aEpIDKHBMnLuAadWGZrXCTtuWoyKzcofXB/vXEyQ2vZIR1PnkaKYZWe4hy6YdqkE4DwUuRSGXAlJKLKgaIpWWZdz7GoynlWAEwijILkcTKZMgN5Vk6ZdSUn68BXcGKevfcXMDrCtC0NwdADcCqI0tQGJIUBRsB8ils5pd3SnqpwEF0MPy2VgARRJKTNpuGrkpU/To490YEKzYlfWIXCIWVRgDrNVckdYZvV+fRDjMqkhAmBrYc0VYmUDQIAiQgAsosrCCoYUUIHklEEHoAqEQKQqIiAGHdp12wwRFVUxAaKCVxVVFdHTKBKAdZklMgopJiQojAscvFpMiSHGFHPrOtZGXY9gmJwSCSZWIUyJO9szoFgRZwx6ccjSJ0OIhpMEYUFFUTRqUFEFDQmKSFdYB6qOzPq1WqubCYyzjtYZblVWXsvnXQrDEEcxSFcvUMyosgVb7bxCljuT+Y4TZ8YYY9m080aC2txmHgKBdFGJlh1vltlJHWNkWNXieDDyCnnXtYXP1GWKYEjBA1XOZNyuZuHFvW27u1o1HAJI8tMqpJWZsyFd3NobDe/H1dgUG35zuIxO0ZEDDrEaVSEsYRVHldF5DVFgOwt1bVxuRSR2NNkyYLFfQb8gJgAPBtERWK/gQVE5qiVVQ0DYBtu04FbQztxwQCyojM4BCigAMBpEJJCw9iaitesuqiCIMZhXVhniijOPlMvxEgWk6UxeoCug6wkgdQmEjPHkLYlJyyM4Oljdu5fvTpMnsDbOV2iHVTO4/bkbmTnvzl9aOr1++5lL50aD6e7+cuGnk1RhAe4kyGI+L8viwrlLL904mD526fwj993cX5YhP+vdnf74sGNFaptObBJrFX2QNhkrzKqikkRFkZTWg0QJzMxJAQWSNRYRQAAJ3niwRUFR1SKhISISQBYxCqcTTwFRZdQVI6tLgkbRqpKhdQopvVG9WWsvWdWKKCNrQkJOyQAQpiSRYxLhxOgKb4xtu94Y03aJFANwWeSgooJVURiAigDbsEg8LgqmpCxu7DOS1CRj7G41vX53PwGjwYtnNq9evxNY6sDLk9Vke5rvjIZFqX28dGabUG7dvkMGiLCJ8ebhUkLyuRNjTjrOfHlwuMw5kcLGeDgaFrODuU+6Zd2FzbKw0xg4z0bHi7kp/KwJt+dNwyY3IBLGVTXwxcHx7OhoJcTb3nzpK8/vbo0LqAkWvoItNwlBPvr0Fx542xMzAWzaOy9/4cG3vhmLbPnycXnthU/99b+2Kfl9btAsF946TxhXDVsUYZuCQyTRvp5TbrrBeI7VNNK4NP1qpWJsYb0ILqPD9ZhfMzTcBV9mHNRaCpGlCwQpRbSrtPELP3P4uf9rsvXQmTbr9u/g9g7mhRYG0Noom5fOLmUZ48oUw7qt7QJ8L0e/+B+r8WbYDUU+miu6nALEukts7bmHL9L26Nr+oZDsjsoQQuBw52AmSZuOySZFk+W2jwEYiRM6chkOfSkhntue9MtVFAOWB6VbLLs8H636aMWjGJNRE3UZwmA0RGRN4ACaEMtsulwsV8sGIkKXQp1K9Y+ee0Ti7Obeoo4WOFlrQtKTvk4QG04MYhPXsXHGBkmgNkOW0IHLll2zUZS5o4hcFXacW0bJfS6JCRSUQTQjEuZ1H5MFOEbMHKqSMykyESaRnoMHJCJv3X81KiLgUWZNEuTUBkBxlBAUYMXkAAUY1aJ60oKkj27Onc2tWOhqZjKRUpYHL8LBLJaQyPiyiEmictO3kyrrAx9yywIZuPq4va809Pqd8mTy1MX7bh1flUfe/Ir/pe987Bv7PlvduNE993I1HMZFmwIWkA1R3TMzBBFQAmtAAAiAW1D7LW/Ft75TIpqbrz/79376zP6qBF96lwInjcZEGpbsMo7a9qhCA+WppLLprSk6QMy8So+cmKEtts/8vu89uvrbfOOGufnijX/0t5aNK8dZ1xzVXe1kUJYjAZtiD5qBIjpqkyaq9md7i73l2E/v8/zcL/3i0Qc/uhHEGK+IBg2jkFBvyvPf+j2H/cH+lz9ZNV3m81E5mrcJnbMkBijzhXPGEOShg64xLeTeazhseVCMy6aeWQmifWjng241PrI3X/jcy8+9+NTOozJbNX2dlYNskB2Cq4vp4FyZXnl119i+WdqNwSSzYXnC4FrbtrYdnN2Rea9HIYVkxFqDvsyMlV7tcrrpzl04/OJntyi1AfjNby4funTzix8/czwzseUuGU9ZYTuY8Y2P53TmGLddgfdSvHc0f+jx33+0cdb+7jB46l+9/q/+yeCzn61/5Fs2zcMn1888+a63m/se/qmP/pOLF93JvYaMY2cRIPQCiptlcWZs+24eur7ruChQJBhSSy5FTgBdSIwCyHluE0uI4hjbtikMWYLUrVoBSZm3mcTu6GgPALy3BolZ11C8YZl1vfZRxmQnxpBw3zYGjFESBiTXhTcMaIQop1ex9dr2jfLZKUnv9Ep/6jU+NRkjvWGwwTX2BRVgXXDTKAM76FYxG7h7R8euCdOic3duOfEiEZxlFkXIvOtY46CqHn1wkI+Pr98OL949epYtZC5243Gx+ODfqe8dyP5NkbMgr/D1F9L+ESr3AtYY5SSiCmCc70UcGTBovQWBug659zEJWiIwqsZNtvjSQ64c+dC0h9c3Llxu6tXx0e1qMsk239tffza7u0AQsogAJ4f3XIg5r+jlp2df+IqvGdUiYAydMzYB24H3mV0FqAHzc1uxHA4eeiwVBYYWj+/pycwsZnpwkAHReNSa7fLNH6in94XmIC2O8umZ9vJ9wlv1803mbhVDojoSMFbT3uZtu/AoQ9cf3nmFjw6z0tGFy10HWTxOTW8QVRCiZCbZdiZJM0Qik0KvIgRGegYhDeo8Gc8i8dQunZgIFNfaO1RGMGvKPymnMN0Yv+UbV9MLGNj3qxhW5cZUq9FyXqcnvmlx83CzfdZIjByty0QgsTqynHgtcgaE0/UtkjIQ2XWiSATIGIUkKCCKIGAIkzQrpivn9MF3XHj2Nf7kf5BhyIfl4MGHOduMUay1w2F1sLoNsfnvfvzP/cWf+ueEYIiUddXHKGoUuj4lOG0/2tMaCxpda6+YlCIrEwWQHNASZEZHFi2vnwdikSjUiZKSvMGnONo/XMyDKNucPNYvPXc1Bjw8WPY9U+aZIUW2hgaZMd5Yqz5zigoq3pP31joLqoWn7WlpciFHGU2S6hATpvryhfFTl68UZVVN3/YLP/+P3ntpEj/75QemUzRNtbmxfPzdh6GKed7g5uGXv6B7+4viXPXn/unejt3I6qP/8Der3/7NPAqMKpAI0wy6Fmy3j7t4/ilezgevvqLPPguUwbyBtSi8sGAEQg8kbTl65cLlqYn2lVd3pyN57YY3w1vNRvbu3ze/97kHNm5jsdHKaMldPrgMdcsvfMr0oRhtd/OTdKVM4TA1teEE6JuTVQEAocmHrl7ddd3tflEP/SDe24dINK9yWuAjj1k7kMHMW62/+NH74F66c+f49evDYRk7kcE4f/TCfHlipTVoEkusayWF2Fvg2ey6LbfLjQvMisYak2nqhBPlzpBohBhD0khoQt+Hri1tLmxC16iwcErMAoljQxpDxGdeuHH1xq13v+3RnfF4OBwmAQnRAiGQz5xoqqpR2wdSYkJy2aqttVHokzE0Hg0MsvV5lllVFaG2DT7PFUzmbLd+vmpBoMF0mhXD51+5NYuyBMUhn7+/uFLIjdu1tzlgf34yvnO7HraOmxVZzt3Iezfxdv/68yEbhcrkW+UiHPLR/hC6p595iSU5BwmD9xmVecdtXnqNoVvOx4OqPZ5XaB549OEmNF/46qvgXDHOTeVp6OfN/nTsRyNPDvb2VvuHndrcjUbb/uyt2wdtjxnLtKLQ9s43D21vU8LD/fr6S6/tPbfKB+PTUREgIgmwqGYGEog5dRGCooJBgHWGB5gFgARQAewbyvr14AYAEd84SJAASGk9bkKgtUTtjWKzrsVfa2DYafds/Zuvn0HrrYWqotDXz6Q12lcAgCyuecUqKAKqCgKKoiIq1lpSZEBRdpkvTJWbjT5BAakwkC9npg1ApMwqqsLgDKDqfVd4Oo0pOW81cGx76FJeDcEeAhk1yMbYopAugLOyOUles2EOPZvKAYiyBsr8cJhqyOxQ6nlKCSFBTgqIDlnIMEjfzI9fPlm+eun+b8j9NppCcT3xtmAMqBWT6aCAcRUzQFtYHepJsKtjR1R+9eWyb8zBqzTZs8ba3TP703HubdbUsGphsYIYjSbVdbSr58jWO5CEmFgVIVkSAgXvBRQKKxrQA2RWUEWSsbkKw70707hqpcFyu1+xbQEC27YGVogBmhVZAgdorCXTmxzOXLHblxcvfkX5dslRlFVVNZIxiICsqpojHnzhk9vvfn+3fRYQHWGVVXOTRXAAwE2o7xzJ5oC5DBaTrAM2wJyUlJAAVRSAUFg5MTOn2BNiYgVENA4BQVIEMkjOkBUlWeOuFBRJmEgRgZkjIIOKqCFnid7w6QEAEFrnrAPyKDEyIxsiNGJVxIQQOpeiNQQsSpxAM0eEiAKGbBJOSWLXCglZyslDDpB5VSOoiVlEAMUgKrMFJDCiaMBaJGUFEEQiJRX1ACjRoDi0p2s6QWWmmLDtqO2GXeSjmD3wlvE3P5wPNk+++tXiMx8/49UXRo1DAgLx3oR5j3lGGJMAqmYGh+Oi76Dp47KBtu195qyhEJrh5nadymK8E+uZt7Zw+WJ2ouILP1wd18AhL+joSzezwqv2QHa5DxBjSm019cPyXNpX4cSb2ZGGWJZ43GYSrfVdW5s8sxxk1hjIoawUGQDSaomls27ECSBFkxqwRqwjNJI6yp2iPSV7C6hRJpDM26jaBljd4y66yVa3bFwxUB/RAFiDYDh1aAGMQyGICdSoiEHLPYOzmRnC8ojr2kw2grU4GaGqRi9oDDlyKBCMLTQESiatLFUju7MJfOyboRlUGxvDNrQQYwKYblTL7iRP7d7du9PLl+87ezku5nDrsGqpPzmZTAZVlZYnYbi5mfrVTLqtt76JH7288+CV6x/+CITDb/qx7/vEv1m4o362bM9fuXBA1kSTm/zwsOv7JKknUIuaUhQ0LKgihMCqoAIG3/B7gCKqCigZWvP+DSK8AWlHALDOJdDErEqJkRRYQFRFjawxjoYUMCVVIjqlkULbB051RrA1LCRGNMYgJCBS8JixdCX2J8tupkiYWSQWyYCyzDgiQMyqLM9MRHDenxzUaKAkOAkhLPrBwIPJD+YRbYEUktG7i3lEbWKfORtMzLKcDIOBeRv7ZT0eD0mitWCc+9Jz1w9OmmJYtH3MiowsMTjjPTok0c1RUa90oKbvAykd7M9RdXNU4jJyI+UE7TDrYn92axhRFGKhuFK9fTCbVn5ZL9t4EvoUEA3wSR8Sw9Zo8sil3ThfxMUshyYGvlen2y67+O73XP+VDy2/+mn33m+yPDj69Q+f/PIvvnkyrjsl0tB0WLA1QIyOitWqs4YMgkKQwl4fDS7/D3+3uvCI/LOfSi99Jc9yV0xD36VFXQ0yDgvIgeczk7liREmWfpADCyB451CJoxLZgXHZEWA/W9UJM2KPUo6El3W7GA0GboozhHFK/eGJeW1+8KmvbBdlsTpe/MO/xWHOgpG9UOYyt+mwTpyMmbfwiU99aWsjX83nZZWd353Ol31vGRVYElDGCINBLqyaojRx6MoB2gRpsVhNMghN1IAQ6ezOmf1FParKypezgz603cULk+Ojw5xMbqBbRe4X09JMBu6F24cnC7XG93XwhIB6fXZr1a3qjoeD8tzutDmZD/KhhhhSH1QGw4LbGBIbZzjEZt7kohhEYufQJxtaJZvZRehfaetlsmenowGqJyVm59CQ6yIDIJERVXSkCIlFec1VRZZkjDEW4xu4rv8yKppMcg+cWUqCbWICIAEQiZ0Kihr1jnKTcu+aWegaykxujRBCDrQQ8RbKyrgkfp4WNdURbd0gscu1KnPlZIRHRXE0b8HQmbF/NNdHlrhfH776oZ9911/503sHG9/94//w7udev7B1NKxnX/zQp/rFvITsOOgEYUxAjAEgAhKRCHhAAHgV+ME///+81qNr9NavfCT78peHQBZAQ/R5jhsbZnOrjWK72B0eJE6nAUQydbAsHLAyJIMSsZOMTbuzu/Nn/tIQf+zTP/Gjm4tZXHURdHa0RDXOTE3mo/OJvHFVJ5Q706fWS/fZv/8XH3vfubd+4F1dm3/2Fz8sd492fJn60LdqDYhG76lbtZOnHtz5A9/htnX82+f2/s1vp4NjT60LkOUu9IkyR4AWbb1qqjxL6I3LfV4sQ2vZ1v1I4wnVjTqobx288H/8NJ8pLmyVj5/d2Di7cVCVXTfLy8w4N8qGxzeuh6owW1NBU0305O5+kcjQ+MgOtn7sx8Lbz3eb08HN1w7+9Uf7r706xGT7PhqMMa7K3P/xP1R897fu//TfqP/th4ZnSv8X/9CNBx6Y0o8sf/Ivbbxw1aNABEAc7nV3/9SPZu+6sPV7f2Tw7vc2L/xGOOju/fu//eBP/co9zidv+1/nq775Zz+7HVPrbvNW9dif+MGzeX3/j/6e//Of/I97B8+XaHukvm1jgsmgmuyc7bpjYY5CVBWtEVPAEFyO5qhrFppaNSLiiQhw7N0S0KiNMQ5K54hWdb0xnLpqIxsMVULftYNRUbe1VfLO5w6EsKgy53gEtir9uCrqOh2vOmNtUI59WxTVmd2Nr78PaxDpG5EiPCUQrWXHuo6twhuWZJVTvw2prmMqp5UjUc2tLS3GZn7zuefPvvd9HdqY6vODiY/h4KVnRoCihEQGERX7FJpyVH3gB3e/+ff5BPqFz5x88cOWFxPX9vVKViu9fssAFNbI3SM5WeDyxIMiGRQgAUvOkWjXIfjclphqI2q1p6IwW0W7rLOyEgYObC3NZ4usnK4OF3lfh9vXcDAtsDDA1qR7q+XkwiU3u8WpR1U0WXV4lH31M0W8prdvxBjZEoo6Q3nu277H0RjKTEYD8/D7hvc/wNs7eV4mFrZVICLpMK5M3da/9PN48KpBNpOtRQf9yYr6eXdyMjr/sJ7PDo4O1ZzJ2mdFA5GYvl29/io+9s7cGhv6o6c/ZVeNdIli7G8874UIE5ENq9Za6lSKSZFs7pzVsEypRmeYlRXIOhQGdRB76ZNxisCamIjWPqLTsofxmoIlR84E69Ljb6uzTWPG1cDG40W3CrQ1VCrU9Lz1xNa3ntPjf5g3r8vtOzxvMp8xoUgyxgoyiCKiJVQQlUTGgLUKRMBoDSApo6JfwxmBxQC2vR6by5JdGk5vxuu3jViNVJQjHueBnMTUrSJEy9G/553fNhz+nMyPDXkk28UYJVWghaHAbIAQjQUlVFZdA2wNoFs7uwEUiUUrNCBf7x6rtUiJUEQAFFDfiNEt5ovN6data9e/8tKdZdN4LGOdXGFjx6EXCZJb9A5GlSNHKkKoeWm8pcxLWbgoGqNMKz8qrKKgzTNTWZN85bHVycq8/ttPv+nhbwyb5978jQ/0L38lvngrThb1wb2F+kvf/Og9NOW0ePSRC/3Hfy4XdtTltuGJc9SZbkHHh5BtAwEYBzaHc9vQzc7LKH31E3ryzOjpZ20fQRR8oZxQe0hLMB6cA2L77g8Mv+nHimbZ/9pvEr3k917bv3tt/N/8qcX7vxe7b3j1Yz8zMlv+LT9gabX86m8MX/sNtCXMBFE35Fif+RCsamxrMDkHP6oKiCuAtl0E3tk9mU6Ry8UzT2+PNnwiSN2kPzr65Z8dX9rt8m7wwAPDe03Yv8t9Pgpb/W01ZEZnJnw7jjbGq34l0nHQPnQ+Lz15SKwBm+6gyCsAAM4iWUkdkdUkCQTRACkqiQKIeueQu9T2sW2yrERXdH3X1st+0a96/sivf8lkxWOXHvKuQAWV5Lyvts4sT06cdU3bGK8h9dZjkWerWjQStGq9M84kCXmWWw8+K0PflXnZM+dKwggGQxSTZQahT6kaYdOaAsNGlS9mq3e9+8JtPtlbxA2l3cob1GKyQbj74IOXgCufW1uWfeM7Wrz06pfanKeXr9R1P9nI462baX772Xv7OiKTgSuEtXvsvt3PPr/ni2K6mfWr7v3fdP/rzx0sjzEXubffr1aHg3IEHqy1HeLB4f4jl891x8vZYWOHGbmBAWSGSYats4eFzSUrrOmDrpout42xKj1mkY7vRm8J5svTUREiIYoiGMOOwCA7sEYV15Ko9TxH14cCq4gIkkEgeuM9QqC1znz993TtQJP1HGrdVFv/GUQEFVhLQgQY3uASrQlDp3W3NUFbkNSAKDOv8yKiiiCK+l/o9gAq+l/qbGsStoIIW3LeZ9aWXWf37x2/7QE/5JyuvlY9/zmanwAaNYACjhwQQlXo5YdSlhmw0AdeLFTAZt5UubCQ8URolKDpZbYgvr/voxgX2l4iAwKKxHpR+iosFxaIrZMc+0WAmDI0mOWUouUEIg4pM+PR4EErpQECFRVWToBWraIzCU0sKx0NgyM3KHqVKr9Yf/VqRSmrLNcLAwUct4kbjTR98ElhyE7mNDugXiQCrlktaoVKzEqRFiUJANlMOYFGsUasVwPG28SJ3EBEwQCwICQM4ttk60UBjertlKwcHJAk4AR9vy5JKaFyZFaT5YLenL0Szj1uaIvaNh68lJjzYsj9PHUdiEjUACKIdHB89MF/P/qW7/ZZpqp07aUs9SLKggCpYAlHSWKEcmyLTFPvEVghhWisUxRPqCzKzCGkkCSqGAJkUSERAERCS06ASIAYEooxa0VHsEpkABRVVMG8gbpSiYAiMZ6WDoxzShYUnUVrUDQmTQlZVEQxc9TFFpAzoU5NL2wtWI3o1KFyElBSJWFFQ8oqKQGROlEyvJ6kAkmKalSByVhLBskYMICARhVEE6sighpDqHRaCY1p3fpPfe9jqILyEd//pm//pj/z/xo8dC6xyJ3rX/w974k9FJtD3dpxdReO7lICk1A9RoQYGZJm1rAoOkuJ1cBws1j08dy5C/38MHQtR55ubnXtPjTzqDZ3Rd8lT3niqHUfei3GA2IgNp5MqqOws2BAOC6YnHUjj85y14App9s74e4tQzYwF8Nx6sVBZpyL9cy1xthMTG5cBQSGU7I2mLEblITCIsZ5iQzA6FWEEWGN2IcYDakSQtOSHyKoyy0QG1UOiRTVEhoPxAxqyBOC9P0ps4ocgCWHanMcnsGiSrExyGgdQo4CICJrxbP3oKKqdroT6gaH593mqHA+LU+MeCOha1cZjspKhsM4u/Pi8K3vvXvn3hk/jiHBqh3BdO/G0Sh3y0aKUVZ5PTw8vv8tjxxNxt140i27B3fLyNh2J4OHH3zzmcfV2tcbMePzbqnSRX99/+q1V9v6a44Dp4CclCAIO2PWUDZnHBIqqkFSDZYcQiQAZbYI5jS5aRR0rVMEIqPsDbEqWEqJFBiSoARkUCQ1QERF5rqUgE8/yXtV7kLuyaFGVGNJGFVYmQGg9FkyqrFYLRtvbep67qN1Ji+NIkWGnOxm4UOMxpnphSmHmOf5nYPZblZw3ynRsMEGKA2oRxGQwaDwI3fjeHH7aBHrCMDOGAO8MawmAy8sw9Go7tpVx0gEAoNhORhnQ5vzoj93bjKY5HfuLe/sHStqOR0rw6pt2zb0osdNenQ4TCZP1meFl9SDrnvA4dy0GCOZelX5/CCGlMRnpizy1PdN6LbObH3xmWdHE/eWK/d7Xl574eThKxf81k42Esqmy/k+35P//Md/5MxwnB8ttzQ/WbSmKnqVwTBHSN18mbsyLmVgiYCctws1+/ede+If/939zfugNwNziMs5pq4+WmrsFPTuok2lb/Lcb90Xejg/JU3tsu3JYq+ymZpCberV2cKJQtIY5+RtSKr1zLmxQvDVKAIFkN3R6PCf/3NjsL26tx1jTG0fOPd5wISFYlaFiCVBnqd+laIbVcV9X/78q10fpePEdmMKRe5i7M9Ms3E5vHZ3OQ+dw0IQkFyR5ySqwTgYhj55q2OHvWQHTT8a+F4iISRmP8re+dD9X3vty5NytDebb05HgF5AtnfyiPPIvbdZz6wUBxMakD9ezPJpHltppL1Xx9Lp3XkdQV2mG6jAIcQ4clalD0TJkDAO8gJisASYUt0EX/rIvNem23PObs8oxdxRZdeKUESAytLOcLozGhuTVFUSoogFIGtYIEly5EKM6Y0Vsn1jh2bAu7rphGwN0pD6zCBoTyk34JxFFO50vvBdTW2nWYl50r6PPngPUFY6zKltebxdHh93gDkna5GChvvPlKHW1Yqb0DcnTSjzi8a74+SW/aNK7rd+vUjXpzeP07VbXvBez8eZfcd3fb+8+ckv/uaHB0k1pMDQgyoIIHUqBjSBIBh0k7S4cHFX3Y3rtz/4f58VnJY+tb0dlWF3a7FzBZN1R/s2BOXEoCElSFgW1YKBR0O7/TjsXy/7Q6exGvjXu/mdFLzfwQtvr7/0nwgQvCVHKqoWE1pGUbKaiHzRg6jhwhCdnNz8pZuv/LvfGZXTcZEhCriCnFpjuW0RIDcm99lyXs8N/epzL37gkQ/w9sLOPm0pVEaRePPS7slybjCSTSNvU71CtdzHjtUhG2fK0a7MGrW+GJBS116/em64tXnxvp/7lZ9/y4NfMzdunDk7RunjqseQBjG6hhQ0hCZ5C5lfxrh9ficEf6+8ZB/81v2Qzr/t7dml74CPfPjk1391Y++1DBkMe9vtP/3M5APfM/3hH+7+4y+OCYuzj9QnmfQt3bi7EQEEoBUoLAGeXyT4xPP9f/7JNMxN3V0sCL70yVu/99zs3JXx4zuXX7rRFlCqmjxML7v+8x+BczsXLz/6V/7Y3/+be3/rxoufbl1wkKrSXDxf9Lys65ZTMC5ngRiiM9ZZijE1HHtDgDDNBn23IEBv3JrWuOy74dB4xGFeifWD4diDr+cnkrrNSTku3WzepshllgXgsOyKvDJkOepJn+q2a7pQZGi92RxseKDV4eqNyz3Q+lp/etMGWCMrTpERqoprRNHa/7JGi64j/utdMRGwABJagBzEoN7b3795+84Z2iaU+cHV5tpX093bEtQ4r8oKQGh6FLh4afNtH4hmoGh2n3pvOLh38wv/cTTAIiepI/Y9AiFSatowO3CCYGAtODeZEQ2BovegwhkFi4Kg5ExtS9g4y+3LMYaCshRYMii8hb4rR8a6CgfbBgYxNjno4uBgeuaC2dhdtKHEnGOyDrLlsX7ho1ApMiZAtaYPnSmK1mzK297kH3/SaRNf/WL+4Fvqcw92KAp95Qkjxh4yn2sxqi35t3+DfGZPDk+ASI2P5Kbn7yu3z0bFBmT04FPt7U6f/y1iAwbQm2x1y/RHq3qVccB+Hii5jUm3bEI2oZ2Li9mtSS4+9cCcAal0AMPYLbz0xpGIACAoA7Fqbz3HtidJJAoASGbdFhTQN35DiAZUU9/1W1vZB36gsRvQBmsarIaD7QtcVGhhvOXi3kK/8rnyZL/ev1ltDqOvbuMCAAEAAElEQVS0EgOoI2OFBIBAgSODJWMJ7Zr0HJ0Hx5wwIRhC6ZOoiCihqAEtFPJ7z4keZfdeNAgQE/ar9uCqeegpEbXOOMejsjg6mkMn3/ie933sVz9IQircCYqKtThxBCidQM9rWJGisCEyxtBp+nodc1K7/maLJAqiRKgsSITek1GNKekp+wLqRb08kquv3p4lbdmd29yobDxul2uFobVEBhWp62RkHVlTDFw+pCrj0ZAyQnQusRCjJUxIyMopuLJw2cjmAblNxzicPDy87+1Hd+8MHn67Xe6tnv4q97A9Hi+/8PGdB3b2P/+Li9t3NheRPO00L9376T++886du6+9cv7aoc9GEBG8VWFkgtkSHAz3b8A/+F8gS4A54ACqLIIz0yk2J8B0cnx3+tQTgKv5R37m8pNP1M98ehyXzZ1Xy53RyMiyO9qw7cErnygPvjg2Oc+uhfnd7XiCq2MQBwOCcQN9nWp2YsABCBpbQD6AHIHoFbmCP/yX3YOPx3t1lf0DTHV39w5fv2Z6HmSlP5HR/RvXv9JsvvN7+uEsHO4vn/3c+XNboHJy9RZrHofS50vIM+W121cRyZXD0ejMyfzo+rXnd3YvWJv1KabYjsYXTD4SDKvVPHNEZNu6tmxBAIhTjILS9CsV7lfLSVlsn7vwoQ9+9szO5KEn78tBuxARXRu0LLPQqzOu8EU1Hlpn+7Y1uSnKwtjYd623VWAwzm2Px33oQFUEk9DJYkbOZzZH77uY+hjRkCiryxhMWCWT+Ew52Lt7OIi9KrVAR8FQk21Ug6NFcWb7TSD5QX1y6f7zZjBc7B8ezWaPPvXYnVdu510ysT985dZ4ZK+F/tacty9sYiY7Z8uDO3efffl6Vtjgmr2DxfnNjdldWR7b+XFqsT375JMn8xmE1aWtyf7iSDVMrDm53h0d16ONYjmPZ85teY6L+Xx5NO8ZBy7nxixTIKuY+ai0f1R7U1x8/ImbdTozdft3rp1OUo1Zf0CopWRIHFmHCkzGkEEFVSQDArw2H+gaWQ2icjohAlQgJFYGRQRBBFFBIESj6/PjjYgqrKc7onBq0gRgVfovUkKgdaUE3zimkIA4JVFhiUSAhIDrUtwbBxUgKHJKa3gYqOZlcf7840+99T1lPjm8szeuKjPrt8JcP/0RuPGCxSokRr8WBjGKhu1Lcva8MCNHTC3WNaJVaxnUEihHtA7QqHdmVKl1VGSUO6WQmt4RZtXI+CJG0RSUtdeQoJczpY2Q7hxQG4xRZGFmQhhOdr3uWl9JSmhULSghGABCNgrTYRxUSgrKBhWqUbj4hOwdpatP27IzgwL6HoJaQbO3n770O9ozHhwYKtGOmAESq881G2B1NmqEPAM3wq7R1EESDQmzAlzOCAkSuIzBECZAIQINHfRsWJEjxCU2K2p7EmBIHILhBBKUAFRQg0RRjlyNdedMu33ObT1iljfj8VVfSuSoztjAqqhICIZRhobg+qvlZz4ShFexX969R03NifsEtjClN6GPW9nZ97/vh25p+9nrX9g/eC2lSMYmZkSJgsIiIizCvHa6MSAD6vpAEtFk1BhMa8kcSQzALqHLVIwhIwnWFjtkEBQVlnVJ9Y1UkQKSNRaNiaoKTkyJFEkisxoAa01ube6NCEWJIECqSR2gRdI+JlFyZNHmxuboDTkBbGMSKwkxCSuqQ3RqhJwVdGpJHAD0IEKgwghK1oCIAgqApmhEnbDGpG2QRWObQHO+fPnN3/OTfzVtnQEnzklf5QnAlqUbjZss117yamhTQNQux6a05Cdjsxtfvxq7rlGsex4ATDfL8bmtk4N7lYEg2nM62b9TSQzN3A820BrrsK9PqkGxaEBVuG1NsgIMRggRHEVAyPKuaaAL3sW0mInNfR+1XiRGAmP9OIkBREyRDbncaR90mOtgpIBwvI/bYy02pBiEvjWEJsvBWk0RrFfrQJNaC2RSE1GRu0iEPXd2czdyL5BcNgCIalBQQBIQIrl1qRAFEREdsQYiy3HFCYEUCZKJZMRhwauVikdrVUR9ZiSKIjM6X2huMSsEUUOHWzvel2iczUZucpYFxCgpCC/Faza9+OKHPjbcGUzOGDJ2Mhwf37s7O1rSZtHFe6B4tH/Evtgq78tpHqckPFqQv/jtP8ybF5d3bw327nKiXLnHQYSVyUbOD9JqjklAQSURraP/BGTFGEGzflAskaIhAQBBEFAGSYKAaEDJkEFjFAgUjKIoE6AqsRprDaeEhMbnZP1a02eAAE8jFazgcgdW29iTIKRkyLyxDCDrEK2MjLlSlBxTVIWsNITqaLbkWSeHzbHdS+T8cDTwBa3qPqZ5SGk4yLYmW5mhMg9dH+Zdc7ioU25NbgHsJCuYdbw9wCRitJwMOKbM2Lpv62VzMl8E4fGoKqxdhLQ6DvPYpWV7d/84z22WOVLsunizPsKY6pSUjLhs0fZvObdbGGmargspsxYdimA+GAQJTds+8ciFo8PVSeQcnUaeZJmQzQkLIi5NG8PuxfPDvNxbxZ03PbaTsD64urDl6KHL1794VeuwWB6rgjeYDbJ5X493t5tGukUaVFYpbRR+dnxSlMO7N2fnv/1b6PF3derGBDg7WO0fWe8YUFPai8F8w/vz7/rB9uy2OXfeZmVx68A/8/lb885+17dBFY5evi7/+985d++et4rSaw+pazkvY+mjB+EgTSBHqZriYFzfuguv3R00R9rNswixWYq0qQ9UFi2KGpucYVBpmz52sRqde/zxn/lPv7lommKQFyU2He+dtFlmm2QCwWLRQ+Yqb4ZFzj03s8YQ9sx1wzvTwdaZ0aq7d//50Wt3Fixmv6kDgTEQxbQhXj/ae9ObH7h97XhUFKxIziGm6OhwWYu3m9PB/v6yqNyTD28+97Wb2WSw7ENvJEHKHG+WpYKMHG5uGQsSE/QMAiEpHB6FtrGglXAqcuOJMu+Dw3nTtazGmASuDrEN7E3FTWxTil1nSRFQbs2+5cGHHt/KkAOolj5rQxtTiIlBYR4TIX09VvQGq8ibaLAjZtBkNapEy96QIeMzssyC+fGMYu9MVGfVoYRulWUVBC3Q9iEgO3DjhmTrkr9zO1JAS5Y8CcaUTGHyLrWPvP8t73zfN939x/8hmx2WAfMAeeTbv/oFC5ABBYgItti+NPq+H2jP0OVH6Pb/9u+cZi0rApQuCyl5NA7ZoxLKQ0V45af+QvHYA/zxz99/sEK1LXK2Oejuv9K94z3zxupXn7d7t/NVZ5yAqgefxNxdtNnbv/Gt/+P/gpuXF//yn7e//H9ie7wMoSi625///Lkrv2dM05OYcpMrERgfBHIHgJajghhXZpnNuO1EjGHN0DEOMz/I0WoCNLaLIlGLDNXbYeWl66piqG06+djn3vPWK3r1sOy1lwDK3vm2D5KB27mgzlx98frZ3BoSUMOtNNBYG4eYpFHrQdX3idHgaOy5jp9/Vb/9r/3M0bUX+c6Hm3jsUiynlahFKTzki/1jki5aRWdEeO/unTI3X/2bf23n4PCxH/+RuQ399ujsH/th/IZvu/NX/jTcvTkpMmhD9TsfPZmOLv+FH5fv/4HyC7+595d+7Mnv/L7mldeqdgVtUlUcZsJKjMAA0XsgWXhHVgPj0u4c9sOXvwIf45wA8uJosdrYLeyWLH/plxc6/nK4+W0//W/+6Pt/9H9+4VNtXhRFNRg5Klw8qjkKoS9s3nfLrmsL8mxzRc1KBzESg6YOVZMm66oz2Xg+W4EBVdOnWBibAo83dobV9Oq1108OT2yum5tTAlfXnXMupEJUFq30cTYZTw8PT0iJnJ3FPvRcepoMB/v7i9MLOa3hpLi+ta+HRmt4Kawr/6dgCiQCxFN6xVpk/MbVChBBUVPi6XDnuMWF4Oxk/7EHNyXNTr78irz01fNRAEwvyRq21qugDqYbb/lmLDelT9aoAxy/5TumT7zn8D/+A23uub7OLakKaCAWp6gikhiMyYztKeC5M7o57WdLc7yQ1XI0sIp+iZV917fyuUf4U/8J7ryiTH6jMhnEemH3XsJxQaHWxY078eD8E2/u7p10s0XT8vbB0XCQh6NjSyYoAXNmUToOptQL9+HkXGZNc3Jsr7yZnnhXO9pEaWef+fDgmc9kk0ttlhkztNaapAmSdcTOqBH/5Fvl5U/Zk6ab3anObtLuWZcJtEsh9WHAHeP8moWOmBEZJJrZa3D0NTe4P+xd0+ZA2EBKduj5yafg4UfGu+P6tz46SLVl7o9riKu0vQWDbZndltghglHDfWdITW6Fck5FqheojUWVtbVOeJ0WQyJlQWvW/zMcbFt/1mNFOaFpgHvJc7AknLTnqjm5/rkPbpycjB9/b7h7G6EHwyDAvE6ZCSGiXZv3iMHqYDtMJzCubEhxOePlcdkvHCEH/vq2yhkYr+bdbG4VYoKQhLulufm84+/svAcD1kPhsq7zHJfveOTJ3/jwL7D6xJIAmLFGwCDTzBgGBekFAQ0RkIIhIFBVyI0REQGw5jTBYIkCcxRMLBbAryXKRG+QTGHvzlybZW6LjRI7wGyQ3b43a9tIliaFT5GburfGIUJR0Hhods7vDjYnGltNfVkYXWPaLTHLeJixhoxwVDoOi2zqz1x87JE0vfpvf13+5a9MN7Pgi2LZj4b5YPdSs7fQz/+2fq2/FGepRpaMggDIbr+Knzi8FEOmBUQFZ8EZNEY54mgEwjBbABcw76DykFYgAQY2uJ323PmTMw/wRh5Km//GxwZL2/3Un7bbG+4d3+wuf2P3yQ/nGxvuzrPxH/yRc8sTyw0Iuea1nBNkBGQBqkUdqq1tc/7B29VDA9tkL/zWUJYQTX08r0a2oeziB/6wOfNos1q6471x/Uo46gCn8cz91EaDWWoOumvL6q3funrivdnEGdHhr3UHL391mJXDK/fduRdpqvZippNMVaRr+2ZhXBE4WmMGow1plYyyRkOAaksqVvXCZOKFsQ/M0rcrBkfoiFQkxL4VEWQ7HUz39vf+2c/8e+Ds4ft2l3dv2/Fw5+z5ajJkgcV+PR67zJOG4L1HAGutQVvPalIeTR2il2TnizaEVYxiAtXLY7A2z7PcFWicIuXegfeIUB8tsvWcw7jtM+fCQyfP3rg1NMPf+9Y3/8Lnv4TDCeV25Cs/PTsebmLbbE43AOP+4TW1MNq4aMuzROlkb9H3TZmXi4P69s2WNT+6Vxej/LnVcVVkWzv+vnOujbFfsEjx+qurbkmD4figbh7YKZ56+O1f+eQne65dHh54aHRybZbhsLE6m8feOsxlWGhrxIAfZNU0O3fr+rXRZFQUG6t6tbk5PE77924v9lZfG1y5PLj8cDbs4He+BgDWWYDTpKgxZm0wMIaQ6FRxtgbToVEOBCCiaxEakEmqSIQGNSWLQGvSHa6jPwwqb0BVSdEkEUIEAQJgVgPrgJCsX0sRAUvrYQ+priE1qKLrHwFUEmYAAFRRRSIAQD1VLaCu5WiQVC6ee/Ld3/gHK2IN7YUzZ3Ibw1e/fPNLX7q0d+h7EIxgSYnIGZAYoE+X7udqSxYLDK2EVhcN2IEOh0lMZgi9VQbQgIFpuUo3r2UXBgKIuSFvIYkKKIsaoskQwSjYfrUQRjE88mqXQTmBrNu6XjFlhEAIZMU6zTJgB8AEoJIUmCxq4pKsHM85xabH8tG34WqvfvWr1XgIZdl2IcsKikqv3U59zLOcQcG3GARZrM2UAesFaUdlIaMd7oEkoiIaK6c1FUGXATOyYkoIIgCC7KxK1+tszvOlJTbEAKqk66/ukqKgQUSTolHAULuwQmVTYBPqjQfvw2dLOWnI+XZ0hgRcsyKHwpBYgMDVTXz+BS4IrdcuMit5b0gANXDK8nxrc2dw6cHNamA3ph/+rX/dhVlS0XVt2oCCijBoAkwJRBkADSADiAFVFDVIxgiiSAJmQCMxMCKgFQBjPKBR8GoMiBHEKEEjK79BaUGVFJnEGMdJrDEZAZEaYwGMIhNaS8ah+sQ9J0BAt6ZYoRibrZ9FobF1A/JWiZASoIDKuv8GYISMAVBneA3mASRCQx1rFCFYr1aUVUUJE4ugpsRNCMuW2iiLxmj5/j/xY/bS2ZTUGNsFOXzp9QKd6YVS0H4u5Ac7W83xbG8RBz/wA/f96I9qgPLo+IU/98eoDaUz/QILa22w3d5i4MzJ3dWFNz8Y6haQYrv0GxtEOXeSOBqDqRchcFUZ+sCoYGje9s7naLQWaVexEhNb8NMcwKGfuGTN7DCzGaLHPnI991keVqHMzoCChki2ZBXqF9B0sBqQC5qzQcWoSCAKZBwiCEdQIWeZ1ZCiJRSDVeHJy3hMTqWvoY8gSlmOhBIjkQIGNBZNDhZAEDDAGhUPEYSIMDadIcX1WAoT2xw9auzI5RDRIKBTRIgh4LRykzwcxWycp4OaevDhHnjmRR1RitEm3r1x9zd/6+H3fLevNvZPTuJ8ZYHrZln33ER1nYx2qDfYpZNxvtmt7oDJsskwnrR9J7sXH1kVu/ftXtj7z/+2u3WXF66OFYTG5bjqaux7iBEQiYhECYwSGrQJHKI1lljUYFovcIEAICmapKxEKuoNhRSMWkEDoEiKzMqJFBANrmXaxphiSMYbMkhEnDidJrDbVVNMspO6HfkiR+cJ+hC8s2JBRSNLiEKoeWYSR4w6qKq2ixxl4nyyerWZkbMhcX/vsBdd9J2IgKgAjMr84nC8WxXnt4oNzAcLvwJd1H1gNgIMagZutFGElOomVJuTa9duTyblxri8cXKYl3ZQZLPjRWQ9qaO13jAHpi5FamWr8tOqXDaNio7zfMnJEW0V+U7ucpGoAjHlVRmZV01rTO4x49Ay0+Zw+8vXnuutsYhytDAiaKFfNoX1n/zUcw+eufyBb/rW51b9f/74b75lmD3+8MCXGx//8gvcyu7EOsHQ8By0RXEG2xt3Nybb53/3d71++4Vl0xy1YfPd92WPvfnBWBzeern+6M/t/ea/3X3wYdv2vol1YfPNUefLK9/3PfSB7wjDjSG3jGS7Hvaeaz/+rzTQhT/6g6tBMX7XTndlR/buuMytWi28N2AVQQIDkjNRFkcIQO0K00r6AIG0WVJXJ9R+tUSjHt3dg2X5/ifKJx8N6Pd+43PZfB9KvxxOV+cefPZ3vuDHpm56NbbtEp40u2cn463x4cnCe6vKCng4m23mE0DTtn02GaaUWo6LDjNHx7MTVA4BRHn73PggMnUZJpgvzN1923SYqQkR6r6+cvEM2lUCbE8ChtY7dJReeOm1vstOuj4h595sjSejIt+7d2tcjqLqfs3CIioJdVxlhdcd1u2iXC7tqtaDui+dx9SsYu98AQ58bjWpiliTRxZrLRAaEiJJYjjZY2axTmNHYIOIKLDEzPhlF6xFRQz/f1jruusKK0joMlvPWyQbkzokSwQRmlVoYxt7BOiNSfnaqmntoMohQsHusOtgRYPJaH5ymBOVBWkveeguXykeeqp67rfunRzJvtG//uf/ygMbl377Vz5jb5/kSZTFExEbB5ABeaBIlL3l8dccHM9Xo0sP5Co2KhKBmD4qgbFkTk096H0PV56/1n/5uTyCd0Mlq2XRuGzjTe9+cdZkhzNz/UUXe4cqBsvMKpvlShoq3/Gn/9vw0EMnNVebk8IP0qrOCjwDfP2f/rXX5W/4+WxcFdHmqjaKAKKigvEWvDJlzlqFpIYZyTpANZV1xjCkyOLQYJXH3C6V2eAm6WaZjSfjMdjjj/wK/0YMnJmTNMhLQ5ySTLcGe23jvut7mq2HJu9plx/54DBdc4qiomIBQZNwWEI2xWozSgJgQG5OGnPrhcWv/+ftS4/xQ2/ne880h7duzw+D1S0/Cn3UypAO0ECXxD3ywGp17Or6iQ1/41/+3Ffv3Hrf3/nr99LxgcRYEL3n3c2v3fNtm6O/aLruV/9N/7n/ND5cQeIzx0ft3/0/YNnbURV7JJ+pJGuNcIIcAgbMS+CQEYMYSNEL+CwHDZAUgDbKYtG0G9/y7fUXnt28fvKtvOE/+m/tp154uJh8YaSpcp2Hw6Pj1CafOWIDSCLiwHq03rg+RQPoGIgARTLjhSBqoLR0VorMOtIUU5Y7yofjYtQ3q2s3bq+4pai2GlSjUbNY1R0uknpjLOkozxzwIPMAqIb6JpWjssxsDFzk5uuDHjiFQMCa/rNGQyCgvMGLWDMkaD1dgLW8GHCNEl2n/9cxI4OLkHqFysLJ0e3mJlSp8a/fydpo1QZl640hRQEVhHw6uPhY4gCRyfl+tt/GsHXp3GB3lw9vegAUICDtYkIVYxSsdWsecaTxrnnrd8XdizRfdE9/Jh/enp0cuuFm3LhvsXmpOHPZXnysPXjNGiZCNcakmJ75hOuOohlWw3PNuFhmZ5ZuVjz66MbJ4vBj/86sVtZYRRMloTUEGJW6B7+BPvC9WuX98h5//qPlzkY3GJ009eDs9sVv/AMv/9ovX3ny3dMr7zhetIGXkNTkhSL3yxYMBJFi4zxVd3ON0q00SSdSH82qcWV6gmtPu5c/5VIvSZAMAXqJzbNflvt2PG3R9iXu9vq7d2l3XLz53VdXs8HOpbBx7vLqTup69R412r7usUCNKiyJ0RVkvaHUquDlR0cPv2t16/n5J359wgEBAIElCZBBi8aiqKaIDjEDWB6Hj/+8nHmsfNs3hJaaJuYWs8yxUGiC3agu/sSfnl89eOXeaqKunN2xBt8gQROnAA6NIUlBBJPNu60r/Jb3HZGNq/7i7kTuvth8/N+70CmDAKAhFQRmCuDQhZQQrFHwKfL+nfD6jXThIcqJhAzZyWDQLO+9+ZGzhAAkSKiqAtAnJdRcyBKUFoih19PMVGS2REQYAIgIFHjtPBBV0qQQ5bT8kkTklINyijK9uLs1dhVQun14ogKCUJzfOF41y7bXJBuFvbSZGUTue63n4+nk7HA4qM6kqGqMpPmynY3LfK05rsoCNDjikpu+C1vbj1bl25oj8/iD+PqnP7aZh5NrS7d537JxkFHhV1aXvmG1GWU5p2QoEBlAcJ2B6AAYiMEaEAYgNF5k/Y06gE8ADKkGIoDOSU23vxrGw0tPvv/ana8NLj72/JE89B1/7sbVjz/+9ivXXqzPPrUdH3uPINhg84OXgAaQMmgCRAFrQIxQee/id+z8P77d1K++/rHfnvzw/yeF5lqXPXH914iS9QqrtsSm/I1/EH7tp2zB+1dvbY6r1c26ml7UnjE6UwwgdWUz677wETm/vXjqnSchPfid338Qr5pZOHvx4paFk+PFElNP80ExwnX1hhMSJU3W21yHa9+doBjCw4PXio0xBALhxFFF+64DCykG7iNJB11fFMXRiX7oNz/y6r2jMp+cnYwHNptuDgdbE7DZ6ri1Fjcng9FGZS2ASF4Wq75GSKhaFXnsAwcGJRWZjAbWQUyS+qQJohKRS6yxbwGQrHFOoOv58M7BnZvHxwfoyqWzx0eHgzPVZ194ub96TabbfjA+d3bLBYJhLr535Jx3JDBx4wCQDTbvvHa3bU8Q5eT4ZLMaHd66XZEV6M7t7txc7NcZNayDmBvevPvSHZuoDyuNpm7apum3pqP+zo3lcSwMGgOFLdum6ruujrWzMMwySVFDmC8XDO1925vNrE7m9k5hIalr52WMt6+d5M6e3R2IyNG9qx/62S+dmX7dgIa0Hrus85xIBtfoC1rPe1QUWAUVBdfgejCooIgiSkJrEjUisyrD2ta+HtusBYxKCZhQ111/UJA1lRpYVACUjEE1QKAoAKfqtDfK0arKwqIia/42CsragGaR1mkQOD2c1oMtDmlntD3J8ow4Ec7m+ySN6+tx3WZRFY06k1K0SsAKCDKZwO6ZEDpq6nh4j8hTEJqMIrBmyA5RhdDAqtOW9XipqrI5SlMy58Zua4NXsTm5pzHpcAt83vcJiaCobN+YJnDHGAEwsxmgERVBNJwCIaCxCgjMRBaYlRldhkll3phE6NRJQGkRHZVVOndFjm7EFLRv7dkzlA2kbxEao5LQi5ICKyG5QsGqEqTG5Axq9OjISUDSqEkRRBjXwiQRVEVVJBJFQCfcpBSkay0zGBRmjZ1yxNygd+QKgCRRgBmY0FmQkCVp7h6aMzVZhO2q7kLBLpa5/+bfmx1ej5/7dT6ZaxSXEZFhJS4Lc3HDbuyMAh588WnfNg4VgVICI1Iip3BMA785nT504fKzr345JIbMc2IAXTccRTUxKwCiWedqkzAbstYBEiqSgqr0KTAAWZXAzmaoCcEYa1QR0KxPxpiiRLFvxNl07ckBZgQiMkRJolFUBOuMAIKCIbCo1nJpnarGlCxgBNUMScGpZmiHaB24DA1AQjJKGkEETsev6+GpATJqAdbxIVk/zwogMYkmICB0JApJQh9TH0MXYda4WTzz4AO7D76rSzFT9jHhnZuf+6s/+VS0LnOs1ue+Q5y16bjaGv3R77v4J35s4WzYn5sJakWimpZtZjUbGJZAKOPxKLODtDoakIkRmKy4YlgOtTtCjdVwyII2ZkSaFYTGRGZy1manLKw+shVwRcmWXFZKu0Su++WRKcf5cLM52geryZPzGRslIZpuauEKI1DP4MwuFBO1gBLIA3oHiCBBjVEwmhIqawuISmt0q3OAioOMsQdA9B7AKVJiwZhwXZUFwdSRwzU/Da1FpBSTAFrvlZOrKgW0CST02nc08MyswmoydIWkntZhomrEQyfSWB3DwaGtcljuA3fgczce9JDKyeViUed3D8MXnw61d9uwco2PsDq6tX9vbzzOGXS1rKHAydaolX48GrSLdvvyfaKLbHPTV0MrtFyqhgZOrsOSXn/xuMU497rqVnkUZFVUh8QsoAlJM2cIKQElsNaSslFEtHkvPWFCjaCQGCyRCBOCChOhqIowABgyChDXlbzMq3c2Kwmddy4xW4lGTkdFVeZRUkJ8fm/vTJZvDypvDHBUADIeUU2yiCogmOdoYyMcQFNiRQmpTxwmpd0auhdvnXSAQnFjXJwct11QDXiyd88DbYyyndJvT0sSa8hE5vmqc87ePZgfz+qyzNsuzpYr7yl3NDtZpZVKxtlYVVLuHGIitdNRdbxsCGF7o9goy+uv71lD4zLf2dy4O591MV6aDipHy0XNbKz1VWJLPjdFCJKTKdhSG7PST5y7JzwYFaWxJ/vHVenvv7zbz+qT4+bTz73wlm/4ru9+37t+4+rz87uv7L326tmL42pn616818UYEZ1jtHlrEqS4VejB4u5D7/v27MIfoHH1wtde/IY33Xfz6v7wlVsFNvNuNmGcf+IzVVFd+r7fN/vdT80IX/6VDz5046XdL2ZNu6js3vFzV7M9lZdfy0K9jXT1v/+DD3zbd137zMeLL7+omB27wcoGv2ptvyqH3gF0HQBkhE5Rta7bukZVVEx9NAaCiBoU1dR1cvbs+Pt/f9w9XxjYXNXtlz55/e7J+NH3ffC3Pvvy8y9MR6ZZtsEQUTZb9eH2oRlkZCB17B0OvE82F43DSd4sgDmiwOykYe4vXMxD6Maj4uhkYQhVu8y5QZmpMqFbtc16lE8SNzYKtOGons/VZHmGmvLMZhaLfGN2uOCOxuNydzJ8be/1Pq9KP2j62DQBM2tt1nTJOtsHI009sJ6SxJiWXWqS1tznRZ7QKIMDM18EAo2xjzFKEO+NI1cZDxSD93VEMvakaQcGLWobehVRNYYUVEDNWqj8X42KBCCs110JDdiM0SENvDfWdm2LkMeYGmGwSRy32o/LDEuqtSkzB030HVtni6mDje3VyaIstJ/HcU4Z60Nn85ckLG32jm9+/xbj4quf93f3NxNtZYNWegW1vuhSL5ogdPc2Ruf/5O+9M852s3JyN9YRHCgJKRABCCTvAMWGIIjkvCtjgpQbRGNcJxrrlISWL790llFuvD7CIJ4iGCa01gZJkGXn7n909+1PveZ6N5Wdtz928gs+HNqQTGjJoiVEm096FkRCdQUZcUQcQoeOjBKpI3RgTIFaKGAKEZxXA9p11qHPTFu48t0PPfBN7zIun33ok/Ll55aB46LLfN7VDRsl60QBjItdezyfX/nOb4Cn3uvPPOAlu3Pj1eMbL5Ah4WBt5pxNKToxGihlvm2xGmwZXlp1F4oYPvGZo8XvIGuxkeu5s+Mf+p52u/raz/7ywzcOJpZIqJ4twgPbg7/w+53Zef2v//R0drhJ/Novffjqzrn7f+D7j8ZRz1WT7/qOk89/nPdiCqpKyLa4XYtBYqe1FjTOhyEJ48CRMRJY+44m5f72ePNb3tWAbT7/pc2XXrRJQBAyCyQQFIwDIIphHOnFX/1P2Xu/sbjz9HhnAnDXvviZ733zzjvfNv73n79+d391/zjXAherOqMMOzXiMuOGrkLBkLouJGeQLGZk0ZpV7AICxn4yGR7ODgszcNY1XX/pvmlVlYvD623ftcDzo/aYaTKozu9sHdfdSSMRYWBxezw4XnWoUBYZ98kAFc6Nyuz4ZFkv26+HgtbeYgHUNZ9Ivq4/hvWSmQgNgCFcS5IN4imLVBVxTZsEQFURn2fYyxkTtgMffP4lyWzVdrm1QrJeu61HFUyMG1vRDynF3Duv0i0O2qYt3Zk2LqCJFWCfFAgyItRICuQKAlUIDIw7F8Jol5wph9PRo+8MBztyPsruObd7kbfOzwS2nnxLeOV3pIlxIYZVlezeTJtny0sPl1l59OqLlUa4+rV0DcziZLhqDXqSiNYAJ0NIpNGgeeTtYbxrB4NBMSi2P1fPj105cNhGDAtbRA2vfvQXL//QJeuGbb8gxuHmGDhZa22ZNzdvFYsoi9akVf/Ss36yFcsiL0bas22Df/lLOt+3zAlJAxokm8TceDmfvCMNK3/xUcgv7DwVlh5rKoabPt8+c+bdv6t96ZOjKAgkSeXO3cHZHENPQGANqxKogF2C1+Om/a1fHrk03Rhj6MOq92vxS2JrXWAxguCzyJFNLpuXmi98xm2/Ls7RxpnjZz6++8QHnJ5XB+ohYCGbTzQw33wUimEjNz+jTdIoAA4tOu8SadfH3GYi4PvWv/zZZnF18NT7bgUrZrp/sjzL4hGTVWVVVkKDvF79MyAkBUnqLDbtkusjo5d51berrjNZG1Zde/zg2973gfc88OkvXHdkVZVYAbCJjKhDbypAJGBOQVCRrDHr4abwurVBFmDdLUhJ4hp1C2tGCogykeIbLNOHHzingmhg4+x4Zzw8PJ7fODy4MLHHq9nu1o5nRo5lVvVNCG09cmZx6+ZOuT0Y77Ib9d0k94Mi0749Cakz7MnZoPX2ZPP+81eGk8diqDbOTrebZjXJ+vmsOHsB3/3ObHp2+crV8OXPboLHygbvwEE2HXdh7kRdiGA9WA+xAw/gEAxAtwIVwjG4BBsJDmpwGZgIQKAGms44O7q3Bz/3Nx4MTevsW773zx59749v7/zZO8e3zGP1K1dfuvhtf7DvNNs7guWxP56BAHgLlYPUAUo9HOn3/KmDc1n23Oen7ecm+fLE0wN/4I8c/zLL7M7g0nZ84fNucQ9a4KOeL14pf+hH5leu+Jme/Iu/tzOMoHk4iLyQYrDhDlf5wSFXE9qi27Ol/7H/2R/szT76MVMfDhf9lY0nXjm6g7tqIENyokn7PvQRDApr38bYR1s4IgAV6Thynfo2xASAuc1BFTDGZjWwmAg+9snnPvWVO1Nrnnjgoa3JRpXbSQ6aCHVoOXMW88oUo4FxYC0ZNOjU9GwlYRcgW8NvLPfRrMlaSCkGIi1yyskYUpVoSS1w6NprX3vt4M7+wa2Drm36GMmVFg1zQIdND00X6vrO7M6r7/393/vwm+7bq+ujGBWNAmFCz2C9x7wrtszdV49ns8NyaxCMBNN1GNwou7M8Wqb573r7m66+eNuk0ae+fOS6Ympwazq+cWvPGrDWxzb0s5OT0OWD4nBVB9GW48XNy2jL/aOb4tLWRraq52fPbd7Y23ecjwvenx2NB9PKmuVqJYq+8vvzw0sbO/0qDDMHmR+704Ep2lNM0Bs+w9PNAaqSAq0PBgCNoHzKGzr9UFdF1KSw1iQqgiiTQjwdxeJaRsUCiCh6iideHzmiKIAqQABJGEXUAJFRFSBCWCddVAEEcG23WiO0VRBB6bT/fMrKRlh7pRRJMhXqVgRinDUZDeyowHTu/MRiTxS579VYNJaMVRUVSRvnmTLe36e+oXmTMFFRqvcmN0gBkUkYesb5EktjitKhCUcz73IX8tjVplnaZp5icFWZZEzWOWTTnsj+nplJFj2CF03rACRaQ+SQLNrylLsMVtGStaoiJApqBG0QaWdATI6dpKZu7c7Z7Bu/v7l7syqwCx2Q52PF1GtZUlZB30FqDYIKMhk+d8ZdudgZ1pWlo67Yuy5xbjApkYIQoqpKUlUx6/6eMEQmFsNJkZLJ2IA11rmKETQvOQVMS6QOMSkzAKkoioQY/WjaGDSFcYMyn47jrBvsbKWtjeSCXHk8vnYV5seOIPbsh8VitLXxzd/f7GwZA365jF9+jjghoCHRLjW37mZNq2M4d/b8u97ynoO9q+H4BIAVlNZAUTTWUeyZQ6vKKoyAisCIBOqRSVkFWZWAkqAVA4jA4K1nNGicNRkIkbLGqH0HCdWefi9ICmSNKjCoI7WWLHvVSCKkhoxXIWctGMkkKogCRGNRsEMFSU4gVy4Bxy5T9YToyCZhIBaBhJhU1os2IrQExoiSOdUIIrIoIdDXOVwpaYyJSQLXqy400UUimz3+u9+zoh47nN+9nd176er/9a8v3DrKgcS75YpH2w5Ms1zNw7d+96P/zU80ISC4PB9mfb81kK4yi5CODFdvuj/0CY+OnUFT5ACmOajL0hNg27e9cUbUE6ImdPlgOoF+lbooaS24UOkCoxmYYasx9+gqn2JNjXMISiPc2AomB+iNNzbPDbLRaIwk62zh4/HMDDYARjAYdtyBLcnknFbGWbRI7MB45p5cjmokrYviTD4HJggCxC43sandcIzkOfTGOyRENKgJGAiAYlQ9/aQBWAcjVVMASSYzqWMQBTXoCgAg45SFJCkjJkEFNmwmw+AI2fgNBweHgA3sjmBpgVPiEGJW5oOxu9PRiTTHdnwhWJme3zl5/qavaGOjMER3DlfV9q4lPrg1G+1uAsRiVGWjM9WGHB+/PsGkBDI0T3zTe6/+xq8c3+2aeysZDDoUMDHAOjUtyQGiiKrLsJfeqAFLZFRBmRQIWQGdE0EUYqWIiKoOxCCQ8Ho+yaqIJoEKGUEj6CUfQFaAzxGNcR4kQYqo/elb0ARhyob+2nxxd9Fcqbv7NqeDLAMCYVZh64wCaGIxaBFRATh4a5d1r0kHeWWcefrG4aKXwOAIVqHLCNWDMRpF2pAOlro/W5UnXUHu7OZwULjL56Z121/fm63E1Fli4SD9I4+en47gE598JbeDPHOvvXa7LIfLJkZm1nS4WoEoGprPVs2y8YUXhcMQjw4Oo6QLw9HbHnhg7/XXNoYD9H6ZYjSx9HkdEkO3Ndqw47Nfe/mVnUH2rsfP//qrt9HYrovO+4Ef1EcLjm3gcHBr/+/9zX/4//1bP/rHfvSHP/XL//EjP/+pH/zDF7fuvxgrG28dScelM07satEo4sJglcHTP/OXnvjJn8TL79l915tHeO/Zj/0H+/Rzl3LDSEESZhQkvfrs1Qd/6Mc7J7/rDz42+xd/+dpvf+nw7uzSxc2siX2QuOJGqBpVo6vNrX/8i2OubXR1Xvrf820XfuBb+7hc/M5Xbv/iz29z7dElMTEm6wyHJIiq7DOXkLsuCJBEcVaFEhY6kyVxU5X52Xc/9PKrTw+3rlz+7j/8t/72X7PQW/XVsCBEb1wM3PVdhHjl/LnD/ePYyByWO+NRzSwxIMGq76osE0OrNt7Y6wdjd3iwQJt85ueLlbUZedmebtaRj49Ww2GZAfNqNakKgx1HKKtB4hBWnfM0KKr5YtlJqqpC29Vhu3QpCfXFOOtinxvTKZKlMvn5ScfWayJT2LYJW1vV7lBnbTSKy5Plzng0cK5d8ZItg7B3deiGRWZAxrl9bPfMslsWVY5kXNs59ZHFOJMEmMW7LMXOEnIKHN/Il359VKSAiTDL8j5GQsAoVth4xwlEk4ZoEmUI4AkxscFIBJGcGlVAQ5Ur+05W877YHVkj0s3yQnOrUMdbnz+ksHHu/Dv+/l/935cf+hc3fu0XdusZdSFhxt3/j6v/jLY1y84ywTnnMp/Zfh93vQufLiK9z1TIJ67kCiMaFdBQMAC11KYaiiqqMA2ScI1QUVIJqkGAGpBSEhKSUqmUMpVWkRmRmeHNvRHX3+PNtp9Za80568e+wRjU+b3PGOeMfb6z13rnO58HDCXtgGBrRDCzl/6bP/Vc6vvDI372mRf/1b8+C7kDg2AUEgGarpsv5jl5RWSrZERi8t7FNrKoOmpCbcFPX7lZZEU4rjqjbBFDFZsaLcS66633dOvGreMbN/O3P7Roqsnh7mRWkXVBoWHDhMZ6JgRhSWotsSRSCwKkDn1BZS/0tLULDAYqtS0TacMCLJgwJqmaEDa7b/nRP3mzzrMY+2+5Pfv6C2FSefUMTtFFVrTQJgHUvOiFxs1fOCj4V6/dvCmLZXcunWIsHNUCONOAdPISXE+gRFu60qYo0CaKK4BubjoGLSXBnXm5duaPuEfH/fdm02v/e7acdDNHRW4ubs7GLm+rjSun3Y1lr575HHc++clz59eLb/+Wm3tzmbsEpiRVYVY0xpKSVi1YxdxAFUUErUkkKslZnKmRD3988Ff+6vHmaaOG3vsr1X//w/2aIVmwBCJgQJMgJE/pZJLOvP+P7rz70elR13/998I3nzpX9vFm++JrXz93au04URWorgKJVbT9ojxppi1gkWLf+EzJwOrqIoX1ouoFm3nITd40YXOwnhFyrK31484YquO7N1/xpUKdfO6YcRlDoyb3WZcTilhSEWYJwppa0pR63jVVtVctmbFT3p8kiyopCYCorMbG8CbBelXtJ1BDaOA+33r1LffJpPqmfwGAACOA+P57H3sH3P6Sa08okkS676AlJgSRgGnlqk3dXl9T4iaocIxMsV53FT//2/z69SKJoiKoMQYcWLTMDJBQ2XrH3Q3aeLRdsKmOQ2ox67VnHnFbZ8rL59rYZlkHnYXT/fXv/q76s1+i5TxPaTmrynE/J2knh+DM5uFu/eq1ErlGNQWiJW5Wv5eAEVA0AIzAsWaoT04WY2oXd/aoW3axQ3G+2JuZNr/44e+4/vSXF7evZ29/r+GxAsTEECIpQbU0B9uwnItCJysXX/lNae/mb38yRud5OXvuqezOs5ZWgmJYKV0pthnN8NqvpVMXdPTO2XBE434S6aRZp94++ern5dbtcceTxSReq+SMaafHLvdKSBZBElg76Zwu/uRfq6KaL/zC8uufLvM261KqCckDGU0hBTXWsSZCQp/Pu1vd7/3L/to37e2vFYtl3Ykb566UPMW792KUmI/LcR+ayWDnjfbWzfrVZwr14IyISlRcwWozb7qD3QDdBx/PD2+nW3fKvQncu33piXdt718dXjjdPA+0VGAgi6KkKSkLWWMIGACUJCZlpRibowOvECTMZ7vovBpBR3VYbGycsnZ7WSfriHQ108WGAVm9AULyqIYggiDaNjEriaJFNACiwCwK983eBlcxKOpqNL1augQAAE8OScHavFNWoQGAArAclo9c2cjzXFqg3He6Y0xSODrcvlW3y1TPD6Ksne6cOXV+Z58IZlGM1swxuT5ZHG0Ub7/kz8bdqj66VoG+/vwLPFti0OxUb0q2bmfm+FYRUR2GikkNcdtSBKNGKUW1BEAIzklqyRgAAevB5Vr2AdsllPrH/sSSG/eNT67degOigncgDEnAOSi6hYXDT/3/sq3T7q0P7d26OjieD4CbLz2bA6TdgxIgkbXOCgTSADkAhGma5MNzZKtyY5xtFPyv/vLogUe3X/9qdnSSnX0/t8nkVmaWjye2109nrrRv/bY0eOD29tUzDzwO6bW4d0S93G+sAS+HgzLc+FLzTJcuPjq68NC111/IDg6H945KFarnG93xcnk4PQo6yBppIbPS1im2GiArOkpk1Pm8Yy3Vzbypp+18RgqBuTscKUJbV01VmdwaYz7585+ex/zJtz5y5cw6Q9tMI0YdDPvdtXG5tdbtrIfFojvMsSCysJgvGwWFOuvnvRamd+829aAC6q0PnXepDda7mBLZPC/ypmkWy5ZjG+rlbDZbHO8vJseTw5kwhgRBs7zXQZ97a1Jbt229Ne5vjnq708VRnf3HX/7V9XPl+598r1krTZ7futtmrked3rJKGSXuggwht64o+qfOPvLG7VcATIPJd+zl7rDe2XUnze7snhnla1vdsD/dn6ZlbNVhZ5hDUtfBIss6g+6cuV405/Jxv+ipyybzfPd4t2pxa60bg5Kz+/MJcoo2r5KZNrODk72ivz5yA1v0Bhtnp8Xx3Z07Oca27tw/EemqkAFggFEF0RKxgRViWpKwMCQAVmQmMoCCaAwRKN5n3iEqkQISIdznBhtFIV1tHyQENEiryBZlxbQmARBmAyIIKCgMBpgMIhkwCACgqIh4364IALryAcF9YMjqNaSqaPC+TF0ltwZTqJaVz/ocUr/bzZqFNKHIfeI5gQIwkUNCZE4CtH4loqHFDMOSUqsG1SNbSM0yk0AWJSlFIdLWQXb24eNnf6843WeEcHKi0HbaGe4dYFW5w4Ms8229MGqxVqwtal8iYFQ0JnFUBTIIYNAYgcw4oxxM2VNyEhoUgWZmUcHlyfWDGFM4bVq13jlPNmcwdpCJ1hSZU0BrCQpeoZRBgCNGxETu8kY4PQrDwcLZOMiKrdK2J+5on7kV5vvKCmFFAiQGXuksEBWYlawUBGtjQy5M9kNqqdM1o7EujnH3Oodg0LCIqqCoxhZogZKwjnB8p957lpxxp8pmOdF7L+NoED/wod6HPl596pdl+ya3TYK2+5aHZ8PhPO8otnRqqJwIyAJaa1lDbltenJjqdJibrfG5j77/Yy9cfX4Z626vl5WjRWNv39vbP9qNMaJaQGERUDTWEBGRkjKhgTclfLD6C1mZ1FEVQZBWPj9QRmWDGEWY05sjZBQkBSBjRLVVYQQRiCkiKUdWwswYRTAmY2E0aEFQgTW2CqrsjcvAWVMAONDVTlyQ1aBNERhV2aDxAEbVoCZlIDKWHCEwgUEAFWYSQlBOQkIpJhLBJNLCIOuX5NO15/DmS3d/9Tf55ZvuJI5YWXUybwp07b0dsHx0ePL4R/6gzFSPr7tu3rZ5/eIr917eL2yneuz8+T//3649+eRye/LcT/14t6xo58Ac16NT5WiUHd7czdADsxn0JPjoDJZdcqUujGFtT6qiQwROBGxRMnjbKbIMAoBH1+we+eEoaVTo+9HZVB1qTBZKk3dbEdk4pWs90HlvsBHmEVidLvOOjYXRvAstJo2UBGNAjZhb5aic0ORqQY1N1pIzZtyFmMCgKTxLQ6BELMAq0dhcOYoxYAwTAauioRVQgQyRFRZh4giiSJkXBWUgg6Ji80zaAEhUlJBa4EgYXbJh1oJ6CB4kQnecxFujuDyhoO3ezIFXF1ubUBbzozrdvpORrWPbGRZhOT9zflB0zKKtXJmHpvImuU5fM/Cb3VG2FWcnM+ZiPKimdhg7i+m0F9LuvYNW5AiiJ2MFCQACI4i34AEyZ9UKSiIiIFJrmUhltXyLhIbRsCEDElcRO7QIqyMtIkBUNmSNsWi8ZDlbj76wxoGCI0KEFO63inrdvJPn0zbcmy1TgiC64PbCcNjLsiy3wowQDJrITABJNDOGCFDBWMjRKMedOlWqFUu/k2Nq1wYd59z20Ww6bUXFW5ORVFEOpnNlrEI97mSjtQ4kHXVLYbWZD23Td0VH0iuv7GZZNuyXpaVed+v1e4dR6cy407ZJkTp5Plm0dcOIjEjGkHEGqnaU+QHavbv3Op0OGUrC6KgVrVLVKXJDlADI6tnzWwQ0dnRl2J9VbWzbPtkrm6efvXFjGdogmIRn85v/73/0D77ze//wYx/6xJ9+z0d3tm9f+uB7LubyxZ/7zVuvbpskpzPbA1C0arxGObtY7vzPf7X3yJn92zubT1x54pFvL69867Xf+Oeqs1BXzjsPrb36+Rf/zh9ozjx49SvPvesdj1z+v/0IPPQu+Nw3bvzrn5K45KQ+Hwy2tqo2tNDtZHR4ewe3To/e+fFqeHY2XudHPoRrp3Z/9h9uYG3QigA7QBSOgiqhbTXGFFWRiFBUCNREcHOe6YkNXStubkdrH/jWNzJ+5c61UwM/OZpXgqP+8NT61vbO7dz42DaTo6W0DChZaWfLJRIWhfOFRcTCmKP9to3SLvU4NINellrt9Yo6CXMarvujgwNmGow7CZ2QKC+T45NFw1QqODSCzjLpwcm0TbR57tx0flKq4SaOe2Nj/WxeFd2i03HHk0VzOPXg1zI3XdRlZkKKw1P9venS5S7PcaPrUt5vT2rLcHm8cX3veK+qmzyN1jqw4NDKLPLLhwddR5M6WYMbpRdjOCVu2wwMoiHMAJgwkjEpKv+fsNYn03o4yCmnlMyyaXKyhBhRxFPZ9+EwcoVd74kIEdRnKFo4k6VEbJtWICoKzrdnZbe/ub7WO3N697Ubqa5G3WJtmG1sbL139MiX/ru/ab/+mX5bwULaxMuYHJkysw5N5rtd4KYimp6+sJ16y2u3/+W/HR4sBCACGWBx1H3yff3Hryw//ZXJa7f7CImUF9EmFQfJQoC2RXT9wvoSF2RTcCDL41ltYWalPXN2eKY3vXYNq0WnJ1//+X/18b/3T3KG/edfXU7nXjUUPjpLLmND3Chab51RIBUKkjJjkPLN84+eDLr04AjHNL13qM/d6bbHyK1debIdrMYiu4cH5PLLp7d8ffLCS18/lRF28+OD2oNB670jyrPYNsptE1pUd/zqnr6+XXpbWmuFsl631iYVBrIMAZq6bWvOc2oZPDlOC3UszgoruSy1FUDsZDSeHVz7f/2Z2Zl83BSP9DoSl1mnV5Z+e1Lt/85X3/ro49MTmO1Vo7IhK/3m6Cs/9uPvydoz73h3uHW4leUcly7PGCmBWlVvLQizsul7swxsjEpCMrVA8c73uz/zV+ZntrxJGBKdfrB/ZQ2uHQMhJAUDkBECASvkCAidkVvf6NDb1ov6oi3l7uHR5W//xLkzDzzz2rP3fuvnul0xhODKkPCgaU+0PWFJiGe2zi3bu5klsNq16NjWVdPD8nS3aKQa9MamjtV8EWvZfPCBfLDZnmy3YaIUYuKsKDWzdVsf1SZzmTFxkLkuIAGud4YoVgkQk0liEjcAsU22oDfr1iCwKlzcX0RDQERdxUWG7o+/CAAAV+51gvtII4ukoITAoArQKqciO33l7Mb4zLOfuwfWhVZaTbkFJAELBgyHZNB5oz7VBcVAwhwTOWNb+9pXFq9dHba1oBUSj0oWASU4MpZSK6Askrv1x5SGppr1+h1AwfF6ceaMdDqx37XVrI2c+T5I7h794OTl64O4z4dHmafUVLXUUgyqw0Oan8jxUgej3NtUn2TjPByKirBBRKeGksHccPvaU7B1WbPzkA6NOq+NO9zOq31LSz28mxVuuth5cMNClyh1yEBKSMEU0tS3vl7svswnO7KYc2FHNjXPfMHeuVZmGebWTaMsToCEEdAgKzCoISYRP9/2soC1brz8Qel4UzeTL/zq+RLkG8+XiYkkSASX5euDKja2UzAYTg0ktIZS4M5brtSDnnbEfeeHYNjLt5+DyWu5CVg1YAlcBhJSCgiIbZK2yvpDHPXxLY917Y2dl76wvvVHirUx3L11/PRn/MNPZO+9GJOU9+6Yr/2uP6zk3oFNRoQsqPHIRtABV8mYpuO6lPVk/dTy8PZg3Nm/fqf/wBNdcV6xzbyrUVPkAMaCOESPopwEAC1GtSteY+FHZ89MjXXOn7p4ZTAczeaT6d7i5rO34iwXAbKwyvodQGkpgkaWqVBGapEKZxJzEmUGEVZYXXSBRRBBV6BrAIuqiBbBEiVJIkDm/lOwNiqV4XheQ6uZz7NhtzNaJwq5gUaEvIWsV2mGRhW4v3l23epsNhcNbTOp2ny8tjY7rgizIu84h8jpbW/5tnFdDPcWrjnRxXazf2Rx2Xl4PLl5b7Z9Q29vdjZHvbCbd3NOaBwQiiOJy5npDJzzkBsBQSPQCDFAaAEFfAFZD9kxue4HvuPgQz+kp3z3D3w7/MO/Bi8+C20NHqFXAllAB1bWC4RP/lT6d9XlAsnnYPI+KqgD3wlR7XATgChYaBK0C4B0VuvDn/sLMPaEh7ADZvJc+9QXz4wyIIXZ7/OkNSZpYawfYEbz688ND96obLmxlQZPfkj0HW65s/2lr9HBvfHI+17jJpX82s/5wg6/7QcezMa9JZo2l/rAuezo937v/PnNfC9MyiVeHMeSUpZDQpKIoBaVrYDWMREjAmHe66a2xYBV04SUlGzZze5Mjn/pN790xm08NlorjFgb18YDWPfY6fXWepP9Q6raVN+1DmLotCfLMvcoWpQdJ5DP9vzBvXhvh9YegI2zTWJLmHe6mcuojQFgMq85hul8uphX1XQym81S26J63xuioUFZVkGbEPJOidzO782SJk+Ol01PzWBrYx0G37y388zPf7HYhI996G2j02uOMDmwCqImNdLrrzUNFLZz++VXR4O16uBAY1of9C5t9a5fvQWaMQg3+uAjl16NL1e1M2U5qxtt2iypHk3OX9p89dbdfNC5fO4ctrCI7c7RoSFXjrYGW4Ojk/kwL20BbZv6/UEzT1p0Y1z4tdy7Dnuta/3i88/haPDhT3wfLO5Wd7bh2vGbQwpVVCIkREBNqIAooFF4hRxSESOAqsLJECoyEQIaMBYNWjAMsuomqQgAiDIgCIKCAJpVYQYFAIgAVTgJKygIE2EUQJEVeYYAAC2zEBKvFqBVEYyqohKqIgEBqaxu/aSgSAiqCEhkEck6A46KPLPGoGpczPyigoOq3pv1TGYy33BEYpFoYtAqICIoS4jYtvHk2I7zzvr4kMXmDgUksOMkJ1NyBmOU5cL3Ou3pTB44q4tJHubxxdfsVCxY4YjOOBaAgmwX0SM5AQBjxKx6UhEIARRsAeBT5txgPQalGFCEq9qDwMldosYMT+v6A5KPYnVM6KRttVrYdkFxJik5cqlp0We262XRSMvUNqhkTAGMQfPa6mKxpyYz5UabUmyOaTEDC5S5tJIhrLaTwagKpoiRqU4YGAjIZ+iMWvAXT6svwFJbNxQLcZkar8YSpxVlmozzcamvPe0ph8XteHTbFl3NumXPVUeHKiGO16eDTXPqPOzf9p1cQyumxGxkIxsrpkCxkBnb1A05nzvDi2naudG5/FaX2Wm0fvTww4/1MTedficzvbrJzpw+eOHV5167+nwd9kUjgCFVBUUyjoAgEFklIrQALECkKsoGvRKQMWisKrIk0hUqOwZW5/L7URELtykROiBVVaBWhQHE2sgsQIlVwTgjRMaQu49VB7GkqBEEyXlvSlRnXQHKKUXQ1YYrsygroNJKeg5AaIyirkTlDtVal4iTirOeBUDZOcsJszzTmGgmvmnOF/5KG2787P9XXrrqD+uhyaZ1aAmThVG3DG2UmxN1Ou7Ql//pX39f73966Vf/t61HHrv4g3+3Of605OVyMBp94jsf/t4/UpnMZN0/9Pf/9uTe7cXXv377Nz/7UFhOJ3PyrgAT6xa7KNbkvZEk4FldFGWwqdP1tlfGtk2LOiEhpcQRjRElICp7oyhaIDSzIwIgqIwnqlqOFT18ynzkiqx1/b3rYJ1unNLeWn1y5Bdzq0nCzJjEoGAMkgUkshbAgCAYj4bVauLGuhLKDA5maCzlRYoINWOsaUBgUWMEUCTSFUODBI1FVYhsVtpEVbI5Oa9akSNk5piQATQh5kiGkoAygKIgLGqyxpmcNTfFGizuwsm+0R6kpW1P8ghElJ3dKrE/a5O2ex4sH02GWw9E163T8Zkz6/dOpmXH1UwGJUfNlI23rYgbnultXTo5uj44/5blwd3rv/RrevsgO15eyguo0+OPnB48+MAvf/Gp6XGNzKpkVuw3FGuQFO/b+xQZDaFTA6JCxqpRWPEXhVWSGAPWK4AAiQoBKwojKRIZ4w2qw8zanCyBWsAkAd7s1mXeZc6c6Q3d9t1W0vYi7k35uf3pqbJ49NTGuMy7jmRVkfQWm9A0wTCSpyDYkLl7cnII0eWZT+HUsD89Psjzctjv7c6rtmpYGFkzk7WkiiyWIsj+wazWcFLVxtH62mgyaboeLp0aAkdepMI4D2oM3t2eIBujWFKnbieLtmmrRM4FYSLDTew5eOj0RqdpHtxaa5Y67BVIeLyYlWW5aIMJ6GPsdwfRWSA0STJ1x4fTU6eGj144d/327XzYNRXX7awKsYkgqseL6cNX1ncPJj/987/y6HDrj37sWz/+kW+fnGznp8u3/Pjf+N/+nz/mTyY5+ZBJQns4RxMZPJmKps8erW2Njl7aXn/bQxOJ0Vd1PTcesQAOqedp/uzVLe5cePTKSSd7/msvX7jZ6tOfWizeyG2Z9T3H+eG9RcBm+MH3xyaePnPh3ksvX/vJ//vi9PDcj/yd9JZHu9/xXcfXthe/93OlNT538yY4EASQNoJJIqzGAmoMDQKGlhe3D/2LL51/8mMZp8Wdo92Dpbfrn/vqS4OCuA6dbo5qYx0PD3fn1azn+2cGGxULGAQwme20MTYhZV0jmU+LWbDxzKafLMNJxDZCC1nm8PBkajMTQHeraArEhlg0hLbltlcWmluuqA2qTUgxAKayYxTwaBpjPcso9DbKZWWqSqgJwH52LCepSoE9ZiFAlVoFrFvRHIC5s9bJM5NFmJ5Uy0nMxdWzeuBiLzf7qhUbCmASG6JFy9PQlB4VlLyjMi8rLjllmWVgQQ7gCDWmYKxRwsj8X0RFmS/qAApaJ7FgUckZBCIW9d6BVzKQkjKGTtchJI6MQswaoywXQROQt5KkOlra051z58996MHhzW++utw/eenlyR/50Z89+fUX9n/jk5thkZU9LmDCdj7uXzx1av/Vq6O26aAIQt+l9hd/coxST4821TXgCbwBIIP43sc2//r/dEjGDB+e/C//K+7tdjNSdQhWvd1dLqphJ3g+F0PeLDLInHGp9ItZw7Zou6fPfeS/66/VO/s/ZWG/RF1+4Ve//okv9jzEw2lhKZqsQYhggoIlp94IAlgCUQDVSGo9qVazN+oW2v75C1c+Yuaws7jGSazzopBSRCBEIEznTfaFv/UT3/0nv+vZX/y1+qnrhzPvi06zmUWfu1oQDKaIIIYotA06Y5yBRnKhwbibIoRpENLo0UhLCFaTCZFMctySM8vqSDqKRanRLENAQavYBOlZ+oA188NpSnWeWzMYNnV06921b70Qzp5a9mnruz5+OJ9Ac0/axbCf9ybLnZ/4yfWPvj/dmrd7+5nLEmBiRSQAZU2mR6FeEKuxYMnQSpMgVsjDaBxCimFRdAtzdi1tnLdX5yAE9+vxAJZABRKMtvLwpX+3duvT7e3rXDXp+/+SffTtC13XNv+WjSsPPdz9B//in0RjKvVA5ng2b1MCIChpWs8QwCmFpERZzw17JRCgTXGU2XG51jYnzssCeLx5ugqLe3dvgTGO8tJTFGnr1iK0y2AKyh25FIlcvUzdXlm6TkyB0LicqkUtzNZSCOk/T5JlRcZDwBXTFBRXawiABsEQIq2govdfrwQASoirXYAV2QgAUcDaIiEfpKxFmIdIyGvdQkE5JUJEBYNIqETY7r/Rmd1Orh/ID3LqzHbqF17uh6ACquxRQNoccxFMQcEaX2QSGuO9Qq4tC9QqFrr97OKVqvDM0Qq4zsDUbWyTQ4oLni0W4ysPN/XzWZzawqYONudOSw1yeHN8fj1Okk92eTw3p/q47uHwjkdSTgi5AAiZ4dG18Ls/0+ECFzsZcXt4qzncN2GZrw+O3tjLvuWPPf7f/p3U7VtjmTVFziTh7i13+LLefpra2o5gtkfLRbQ+ZQryxq1sow+lBXWMqElVV/mcRcKUDLAno0RTffUZLPrV4ANLbbfOX4wvP9vPcg6zEERUQNo2svFOKYpYQEPQeiWidvb6F+nGI+nslZN8MNu6+GCW4PPPWHHGWyAWq2CTNhVRpslnuQuT/er21/D8qfS2xzffMaxjp563ndks04qxc8JDX1X9aQ4791KtQKQJyFjWWpyJwsYY760n47WdPPeVMLDeiXR97/TFyb27wxIWL79SJlYWJAvCSCworEAGLEDihGTRqy8dF1YzLxwDJJd1TZY57nRP2/HlzH7DVlVd5rmIppQ8mZzQMCcGQUiAaJBBrCNSEyWyigiskAUG2K5a6aqigiLOeftmBqqAej8pAmbllEaDLhL4LA/oLNncxDA7MoSFLcAb2ymrquUEWTbIMwwskHg5Oez6brQ+c5ktB7s7J9Lt9LKsPjjho5f3nn3+7OaWpGPTajyZLZfkk+sZyt94I3t5YepDQ3kbUmatIKaYPOVaR2mTzfsJERwZR4oTlBq01ayD/QEomq6Z77646/OTcHABtQh2y3UgApQlZBmYAljAA4CFwljXBQjQCCiASWAcGPHlsK4rVwitmMBK4LuosHH7m7AbwfSgBnBFZtu4V7sig25jTBPbKjamHI2gXY5Rqv/4T6X/YO8d716sXX5+2u2fe//+d37k8vTF7PjW4mjH10ddafPJZPnzP9/dHJluF5swOWnSuHP6yQ9ITfzss73prB7QAWlT9sremTg5qWv2ENmIaBMZI1Iy2Io4Ie87asmvmY4pXvvtz9+4vn1O+puZ2Yiz6b0lO+WgLMZvZkexHa1vNrMZYuogZX7L6pAXdS+DcrFcfOOrafdefTRnEPcobqyd3gtRM6rnVa11VS3rpokp1YtF0qCI3FaW0BaFczYGQUAF18uwUw4PJifSLo0347KEZL0QWm2Ze9Z+9IHLV/f3rt+b/NK/f7oc6wc/dPGBt571DidHqV+c7Tx2Zlr25wdVTgWNTwWrs4O9ZlK/uL+YH0uRaddlb3/kyu3XbhrMqnkjbcwBDIdePw/c7h/sDgviCCPS2WS/3z81vNg7OFxog9A03dJsH92BwhAS+Wy4NohNYyANMi1z3DuY9Lu9d7/33a/d3Tt+487+a8/77puPga6YQChJDQIRcIoAlgFEBJMAMyUlJWAwQAZRVUkACY0SCxDCCnetCgICq26fAgIpgIoooOpKg6kEuGIJMydUBoHVdxtEYAEASwbJrlhFKiArzK/ifQGDruRqJLoyIa7qsKTCgGiNEaCjvW2DwWtRRcp8B3Re16HfLbVqEyAaAyDIEUA8STXZk1nfeGuLDNc6MMhmsZasa9tGdw75ZOGAqFOANs5aDBPX62tE3j5yx0dycuCPGozG5pmIAhNlBdBAxaoqYlAISYAoJ0OoFsiyEmQ9s7nV5Bisc7XK3i4iucFanM5cmjX7r5j5sWmaVM4pc9Tro1GtK5gcJa5d3tWiY/Iu11WCBq1o1RhAVIKUuA3twZFvH1grSppXvt5tDw/d7ISMDxCRGQBEIkqU+yxrwRhQkKwTtToa4niE1qV24Ub9RM4gGDIGC8NRFjPQKGGpBBxVRQ0kfuZTdOdltzbMHn5s1iv85jhmdb2/z3Wg2bw+fKU4ObGMqtaVzna6i+Pj7rhs37jON+65PJeWi27Ripqo4eC4evU5c+YdKWwV672N/lohoSXNyJGBRhddGx68cPp4//b24kRRFIBIrF05xFUAkopFYwBQkYiM8YBobOFMaSgjcEhWQFmAGQDI2DflrwAxBAtqsjymRGQQiEXbJC1HjAHIGGeVBXPrFRDUkE0qMSYwiYQNolElAGvoPukRTUptVGGIAGwQCMkIGjCkmJKQN8aahAoqSKACiEjGrIjOqqxoFCQ3Pmc9683WZP7Gr/xKN7ZcJUkQNEIGrbDzpoptFiFVIc/twmD/7MPl2z/6vltXX/nZfwg35rtPfbW7tbX2PT8AH/z4US1ZHwNLXQyu21MP/Nc/9I4/9L31T//T4//0mUFi5yhWUaxTi21oMirJWpeVOISY1bW02SBbH3Tm05mKaGiJysz6OrRa5Bo5YmU9YZozJdfvWEQuID0yoHdtYuFM3m1PlvKOD7TnzpuDPXj6ad2dGohpOoVeRmWpmKlE4oQQVwkg0MrDa1QU1GlUzFRc1oLJcoxtxJIACcVKElBUoVXxkIiVI1ijRGRoxeDkFFQTR7bGkc9UjXG5StIY1DolEABjuyCJ5gsxgXs5rm9SnQESzDI43Jajbe8HTYttiMYW48zdvbfz0OPvfvrW/pWtRxbTpc10f1bZbnG0nCXLm+eG+VbXFi4fj2vsoduQVFd3vzFavNy7Xme//+X+rKqrVE1mT6wP//j3fE925YE7uzu//vnnPOEKqphYADSZaE1EIGvIgAMg0fv/M1EVCcnQ6oEwNgNDqqu6JYkwUFKNiiAgAGIlOKVck1eyYKyBWlLiN1tFZeGsi4FHzs+qxhgXlQLpG/P5jcUsV704HlzojzYK4yJaZ4yjJBJabttY2WS8yVLaLDMBk+bTB0+tNRxPZjODsHpfAvOkbpglpkSATQ3rRemRrJqqSW2T2mXT6domhZN5NW+kyKSqmuOjdDyvnTOIePvw2IFYQwpQ1Y0CDAycH+UPbQ0cYe47PG+GvR5ZmNU1C4pQLy/qul3r9lG8ISjKQVKZzQ+6RWnBFk3dt1mL0Jh0/Y17jVqbeUM6XO+bIi8DVPvVrXr7J/75z+3d+a4Hzw3Wm87Gef+JP/dDz/zEPypi/dGPv/Xzn38Gs36DuCOxk0FOsgiLtdOjvW9+brp9Ox4eOUIgF1vGpLXyoPTL67eXa+ff/m3/zeDo+NZ//Ml0sNP1BAaisERO8wCSDj71FSc0KTNXFFuF3Tw4ufojf2nre/+YOff+ySuvlq5oU7LgxSBbIhKua4ksgsZbVQ4rmqBzxNXyC58bVvvFxUumgrc/8sTo1PrN3/m3Gxs9w0kSmpoOjg+Lwl06tTGbtPuTo6JbOm+XVaXLRWoECA8PUlam9WGnbeuJxETSoghBtVhSBmTAF7k6agOfWtto+cTEhElEnQWazud1TAAYa27bmHtYTGb1YcCmYyFlHWpO2rKTgRMUmlaNpNWftFXC3Lu6qjxSmWPWd5QjIMRlza0aplkSVTnb7xqtt8ZUT3NsDYaIYMmQIxWBZdOKxVpleneve+ZUz5GqiiEA08QAIaKSCCQRfdPZdD8qCq0QpSjIDTp0Tc3gMXPYtLJUJPFiolMlq6WRFGJhbMNuOmdMCRSB1Gdova4XUi6r137188PzY5otI7jNyx/cuPLoQ3/ioW8+9fP26lFbNXtqz/2JP/j4j/6wH6wd/Op/ev3Hfiw7Pq4TROtgtiRlAhfBBiSLSoq32T75d//H/a0BO3/hB74vPzw8+smfythacsHgi037jr/7P6x/1wde+epz1f/wDzptyyxBEYqCciqtvRLS/Jd/dn6+Z+7V3LLxUIh3uzPlSA6TtwkJyRJaL6oCK9awAVQkbygYBGeTys7yqJ9t1S/cXRx9udk/6MxPkI0QIikSIlAbAkUsyc2+cOtXP/cP1yCd6o5Dt3SXLrz1jz9ZXDpHB+Hpf/5rsrMv7SEVVmxukIzR3GWFyfYOlutnzo5yN51VrQTTTh2lrCxYfWOo8B0W9NQTDigOQCwYturItuCzjY2OB7NcLo+qal671ohx99iO3/XwAdozjzyy81v//1G59GRMmxdqGXKzDPT00+NsKEIEzmeGmiAcvSUQXYRgPv4+9/AVeGW7/tIzeZtQtfBwcPWl8qkv6Ld/Pw/9iWlwGU9LCeohRPAWBAH4vszXYMr64ewH/c1d83yTxkkf/UC7fkoP8LQbTJ79xcXRa2eIzlwePPdGXDJngDah4ciuLvxQi6xZBkMugWUlstaSvTg4N6/3U82DopxVi26v3+1vHs72Z8fHuYJFLHIX25BbI2SAoJ85RzDybjJnS06iLCV0Mtu2sYpVTAoiBuzm1jrAXbiPooD7Z5oVzhrRIJJBADCrQa0Crk7kK7WxCKzmNgora/LqpG5V0vGu8EMncwvDsXWxMBkv5yKMCpQIQcEAEBnAuH+vfvGL5WMfh7zTvvZceuq3N4FqsE1udeDKstVZ5JAMZ8iIxibWouwlQGl2dW5Yijb67uUHlmUnH3VlOoPArj8IVXRZbtSYg/386B6dvlS851vaz/+yQ515z+/4OCwiV+xGyZt5U5M8+AQ/+qgfnln+5k/3lwfWECPFxOi8RtGDPUpO6wl3jclU777aR2+bdtDn6chQdnkwm5e9aYBSYpLptr/7LOxd5aNDUhLgSFIUGXBQFcpzJqdMcTkxAEReOCEIYWBYDQMsg6FFwHAEr34te/S9abAmTLxYwGIibeOdBSxSVFRLiVOcglE0VlNSYUDuNYeLT/2HrY9968nWxfLiqcbNCwQUBYtCMtsYwtlH0tUbdmfiBdFloEitdtYukW6GCNPrtzcunfch7j61zdsv9x57f3rqM9VXfql0rSQLKLYwrQTFuKTI3Q2qZOBVHUyydfPRPzR867t3nv7de81R993vNcXG8unfUK3IkCY0YHJnAtb1+XF95nR6405/PiOFskM1EhNlmi/39gcX39Xm3YjUKpIrjNLxtCrzta2tQdOmRRXImjoBGiGEwhhWDCpBcBm5a403CggJICGY1X6ZikeyCAZJAQksWeMJRNUCIUJ6cynZ5y4u2HtrrW0TZ91hs6yXyzmHporTbu90z9hmeYDBIbpqGWOg2ILE0Ml8ZqoqxLaqc4+lL8CWRFztvxCaxWgM0bTSMa3r2vXNxdWbA+ec6ZiKzLR1PhNDNs8UwftM2kiAKIzSQjWhpNQvwXUAHRABFUJdrgUJXIK7v/Yfav/g+OOXN4GLwCAW7GoHCEFa6PUAAchAFAACcQACxoMSeAPtBJzNscMnFXZ6ag32PMQATFAIaFSXc6cjJqu23jZ89G3wytdBj9PxyfLsE3H80OTma5udA1u0xd25ne6HOzfsA98K50fBlRsXHymevxlf23bDU8XGW9tnP++x7Iy6i9kSFjH0u/MnP8ofeuKbPjPYv/D+b02/9Av2+F482u9sXY65sSB5t5NgOW3bhjOr9tLmqZPJbl10O5vD/Rt3s1m9/dSLi4MJ7tXnso5Y006WbHFERl/fbuFeCiJlKRzjuTFtXLAtnjITTd8o+6eMmtneNjX1GTHTk0ygmGE9uToxeCtcPl11RWI42NnrdsedQda2TYqBLDKgs94QxpQ0JBDM84LYLk+moOG0L7aPdxwICXvCqg1JTJMkcej07VlTcKkWy8nh/Au/vv3iN2598P3n1y+eJ5LZvIWBcSnvjAZuPJq9gojLdjqPSNMagdC66huvf/Ndb33wzt3D02N/5+Zhtzu4dH7r3skBln1fjPrZYOvU6ePj/RAP9w9mZtoUZb6c7RnqqvNJmzLf6GMeqjirZg5FOGjSEOncmdHkqD7a228P9o/m+2995Mr17Tv3HwNRREUgEBBQMGCA5D4oVjUlFEFBYAEWoNXnABGAQTWIq4RWAIIhTohABEiaAAhAlTABJVaQ1SjCrIYsoKtJhSbR1V0OVuZyAWFBZWtQgRRXnHoFUjSgAgrAykbwTZ+jrhbgFHH12sRyvH/3qc/84kc+9j3qy4hBUgQvgefeZuQ6EpcWlOoKmKGOfPdefumhqsCpCGe+6JWSed825fZ12ts2zC1m2agPlWDVQDzxbZu9ejf0nDYNSiIx6vJo0A26ak1qRVR9p8OcLEfmhL4LiU2MKkJgmCQ6TOvdppuTkp21dqeBVCNpAtFyVFx6H08P3O4NE68a55Ix0O9FzV3mqVtGm7Fz1MmCY64isVjjMSZJEQkpc53FsT79tEGCkwPg5EDA5eIQXFAWQEOkytEYhCRGklOWpMCseVfXx3L6lCISjLltNLDNHHQyDlO2RN1STg6RGQRTK9ZaAsQm0t1btp1rbwzDC0ssQ7R0Zs2I98fXT6Vm794dt2hFiVV4cexHT4g3/fHa3aNJxgRNMGidCLI6TtnNN9Lv/gc6OzSPPepMrzvo26LTNsxGNVbeL8kuXN/6fp6WERAzg4CCZNAAK5MqaiQEQ26FViRXoivFZESlcR0CQjEMMaWWVXnVdgMAgDYkUlUBbx0CQJQknBCapBxVgW3k3GDiVDhnrRG7oiEaozFXjaFFyMmStaionDSyiHGNhBY4cPJggZnArdpEBiyAISBLsNq/tAIMJIkdGVD2zgJkVYxdn3en83NzHrNpJxNr0UBW5ZCaNqzO/yHWIYqajnNqzXGBW9/7ie00t0/9XnHr4PWX/wWsjdv19cGj79jtn87J9osMI4DKw4884rRdzpvOx78jXL2ert9MiV23yMoC2sZFyZzQcAiFOWzj4J2P1gmK4QZUU3n2K97igIu2FkcWSNt5LYhUOjLaLk/6GyPk2FLkraG+/5FZkjEDdCQ7PdQLg4oV17vwwDjtHnjNbXfAhBBaxIRISApESCqpRUEgdeVAUwI0WIxgOTdKBXno5uhzYWQOxjmbFdq0pLxay5LUogiYlTjRokHhpEjWZSwRyaEwp7T65ATjQUGTkCh60qgo4I20zUzQoVjUGvMCyJt8C3xhTVge3e30tvYOd3Iwt19++fxw3c3v5daQHyyqg96oS3ljbFWuUbE1wm5HMFffg+5pWzVb/dP1K7/DV6vzo470L4XXdyFlp7bOt1P6xpefeu7FW2BdknSfSa3ojEkrsCiRqqKyAhoEJSBAILPSEjsDaAkQAK0g3h/Oqoq0iBZURBMZB0TEjZUiQ1VOJOA0hjejIgfYLztN3b7r4vm7z36TBckYdBSAG4Xo3EuL6uXD6eVR74wvzm30c4spMgpQJ9ue1SHKxUF/OlkU6DrotorO9cPDfuny8frt4wlZG4M0gUedziLOlXAeWp/TsLfWByLWhLY/yE6fGcyqZmeyNLn3zsQWZnVAb+o2OUshpjJ3bWBB7Rg4v9Z94sygH2TUyU+WLQr2ux0mO102aOygLD1BiE1BhlhjG0VVfELEbpFXEJbLlGlu2d2cTalvazV5bqPIsmoveLtz515ZdrtdI8jUo0+/8vxTz9H5zbULlyfv/fBjw8cevfuVF2+8fDKk7uHJSdbrxMzuTpYZImf9t25s3H75izBrbEJnkst8iqSSGEECQqiwuvPST/7NMDvOSa3NYh2SVs4WpXENNCpiKCUghU7n3KWd629sDEYPE81/5T/Owi9sajImAplULQwCeQuKoklCJOshCXPjvJnV0QDZJOnGZGf/hfAePdSUP/y2g91r8eDGWy4Mrz53p9cb+m522JlOanNhPJi0OyCwrGLU2Ok6QJtSsIbqwFXD456Qs9w0g7GvYqyOmwJ9Nal9ob4bMpdBVVd4HNt24MeeYhJb4GDZLJwhBSVJeQZU2ND4JjUcTNMEYdtWMCQKTRNaTSE679DoNDWdTtY0DXW5zO2ZjbJaBgEufF4nWMZUJXDdXOo4iRUodikrI28CROW89L0in4dF4fNFjWyxybLlycJxMIVVjgLGkVERUUAyrXAT40kd/ouoSAV85r0xrkStm8wZBJpNg1qzhFR0cXy+M92fEGhdB1KDYpYxJUVHFJeNI3JRUk6zO7sOl/1aFq8cze7un3vXuT+xmXc++zc+9c8++e7onvied37z97bXvv3ben/uj79cnkotDj/4JL/ns/C7n3aADBBVMyABXQH5RKAGd/4Hvv9w8/xea3wy49jG2zu5Nc6Z5TIcI13+q3+l/5d+eL/B4t0PYP4zrpp7ayqQFBjr6GzjbbTVSYx20Oksm0I4ioh1DiwktAwmEjYSjBVrHa+m7GSBFUWsIUCtJTTS1ChZXV0Ynlre20lHx1meEbm6btToSmTljCN1hs06Ibiu5ebwBKXf/e4f/gvTd5+6tjeteEYffNw+d7t+PQnNUG3bxtK7xDE47J7rpBKkLMr+1sDYkztPO09IKj4VRWym264YuG4WY4LQSlutVL1VK498/59r8Dg8+7mwmKGwcwSAc0kPfNsfuF10eih7n/5K+9xz872TYddm3oVZLAoP7QKmM7PmXJHLctmE6J1BTtI0ZKF+8PHsL/ytxca4e3e3ufVX85u3ILaguM7VnZ//F6e3zvJbLp20e/mdG+mF130lYBysaJwMEBI4DyxL680n/nS7f6PeWhvmr0J79cE0v/flz33hZ34GtdN008MX1g60gaZpFVpmSVhajwkIQFML3HqQtl7OTOuLzjLEpCfKDaI9iaHCdGZrzHF2tH29Z61VezSfo7NIFsiINIM8L4hVZVanSR2ImBRNWQKBs84LbK13ZilEQYb7IivEFfwBEMCsCNaGVlAiwvttIVBlBZb7rGtjEBRgNdtVMEC8mvVYqKeHsQ6N22y3Hh9d2rzU6Zx84ZOmniEAARLBCtgICDnL9JnPO0ndQWf+2iu+XtaNznxWfPDD8eL5Rk7svdfkxdezw2UBSGgaptRAm5FxKcuiLTdDqmq1ZVZgEhQVDdLWoa3zopdLW7/xlbGEavdue+Xbj8tL53U+b6WMRb2o19fX5rsvlWXp3/a+UAza8amsKOXKW/jq5wwHUuvIcEJUUA5ovbqcG+ZqZp2Hbq+yYVZ6PxwU+zsv//RfPvvwqa0/+CNNfl7u3Qknh37r4dYN5eCm7t91uQstWKbYVs4bxgisiKzWSlBEY+h+yoYr6KUqozVJe/duz3/xH526+FC6t2t2dh256G3giCtLDqAm9pAkztWSikBkNYTW9Y724Xd+MQ3K7iPv4L0T5GGSoDEuCqze8pF4+gqFrxXHT5WELIxFPrj88DKRBme4zec3zUsvyisvbIDhG19t/vnXipNZwQqJyDos7LRpwuPvyt79UT77uOmtwed+d/nZf1sRjn7orxyffTQV/c539OfTw2kOOjvoHu+7pjWaqVLgkFuKnax93yfmlz6+tF+R539juNFITjQPGkTm9fLZLwwefcuCt4J3oiIxAkhq52dH/fc+cParz90ERCVjEBqVrrMGlUSZIbEy6DJxIAZC70iSrNLPzBhD6OyKALaSKKkoIqFBVAD75vQAVDu9LgMxIwhNjieL0HpsraW+W6uDVhwQNUbGVJdlp8jzpprUdSsS6p0bxWCzKDqYZG102Y5Oh5ObYX5wsHPYt53C521h+T3ffXi4XOPKHtyQeuZIXEEqyLwaYGhq69x55UgogALA1OlBakCUWYw3QCgEionIQ8THxme2//cfL/9dOJrB+QfGcOoMsAAKcAA0EGsAC6kF48ABrKDnJgK1UMzAR0g19h603UEMgOqttpASNC1YhUyhm28fzsPD333hD/7Rr37ml96x9pDc/FJ96lL6jh/l8QX37O+Gq79hFztM7G1tXv9anN7dODesmhB35+VsR2eLOJjP9a5fcGq5me7bjjcGjoE7T/7A4dDPKuTDOnvupfy13e4purI+OpnsQ//w4GjPb2x1zp6ebG5dfOuHl5/69Gf+1t8bD3tH0/mpi2fCtLINu0ZO90YGrMuKRiJ3y0LVqgx9N04XSf2AfTpc8NHtJ37oL9BRA9/41wYWCk07WfR7vThfqLOqEB0um+pwb951+4OHLs1t1uv1kF2KUTS1YUkOY5SYAMCQwVg1yMyMTEkTZ2RCSu1y4dUA2ZZbJJ01VbJ5JCzKElU7FurZoUp7sTPeW+jkdvvZ6aJ/7trD7zf98/lanofXmxjQrJVnHn3s3v7+/s49MG7cy1W5DZz7LMNB7ng5X5RlzxmzmLQd6myMHzo3Oruze3N/e1es9i+cu3twb5Q5Y+yF82tCEhXPF8NZDKGuHXeyNrnMDrbWDg6PpovpcZx2/VqjXPQtwtL3XHdYrB4CUgARERFUWK164QpszaiCKaGiMoKa+zZMQQKyYIwQiViyCJhQGUCUFVSUAXX1AQKr68yqiKyAqoTECok5iRCCogoIEa6E9ygKIoDInFbxEQKIJoH7hGsAAFyVlQAQLJKoKq9eioBAxjDIjevPPvzQu9cvPxLVFOON/NFHw/Nf0VpAEiQxoBAZUgKA/GS3ufGqXHlAtkaVt2iLclrLSy+E66+7eYsbG3FzFHsOb5yU0zkmi0UXDo9c5ROI5E6sjYBmOAxlAUWGkU3qaARilCRkO4kyglqTSJuwNJh3cGuNxyMgsAYwqna7dLwI0wkha2xaqSzWCgvhCpICi8FuTAX1+smsSX+Lzp+vjMZe2VnOdbJQTSkGDS1kSkI024ftWxAV6hoMQolALmUON3usRtEIWUILzJAYhRERNUpvnTfP6vooOpP1x9zUlpyzmpaz1CQquqogJwcwOUERIDDeCqckCimBCOzvyPTAlQWtbRhgKgpnnBxtHxwdF8uGhNFa5hD2twFS22izTG603gMb9k+4aUAhNY2xJm/bcPUF3Nfq4BpsXtZHH6dzVzqdTuTqZDrZPbp3a3e3inOw6H2OqpkjgZRwVX42yqKoxjtrLKEhY5AsGudd7mxBNkMBRiGyIsLMMYnR+0sHbQyoNkZhp85aBYgiyWCIHCMzpxyRDIARkjyJA0ZjMyIIbVIRRyQMTQLARAioxKANhya1bBhQNTECKnMSQQVjDaFRBRQgRADwxiVJcL/XTYY1iZSZzyft6YCbDLBsIkdaH9QxtnlWdvP6ZJ7mUVicIyEgrw23O1V635lBeuELL/37//SWopd6dLxcdgv68s/82Lv+3r9SgPn+VAKvra+lyI5gUbf1qUfuve+7z5VfrJ57IU+tRjLKRT8H9QfH887/5U90n/y+/MJG5wQW1/Ze/tl//LZ+z+mMVCmmVEebFybvJADqjupFZTzGli2YhOTPnGnGPZN77wRm+0DrqQ5oklBsrZSjnswMOZtSTS53NtNmCYzgOwIqrGgsomLbaB1EAIyh9TVoG+DagBUisZoYMLKJDVUVYavGQ96NHNAX6ijE1oInFEUweaEagCFJIEVWMWQV37yAqVEgTaohkAXwxKpQDs3mI3D7Bah3oSRuC2s82ozcmvreeLPz0t7TaxdG9+7trW2dwhwls02NLkTvU3fsE0Qh9aON0DbkIyqBtPVykdp5ZKmk8WUvlej663W3/Ddf+epTB9fuzGdBCRNbNED3O5MIKyqcECTRWpUc5KBkiIiMrkD/ZpWbrzhxK3wECWhitzrCk1ogYhFHSLF2Gml12OVG0v1LsjduOQvW2rXeeLMY7jZNYC3RKocQGVF9bopODmudV7ZPZqjj3OSJut3ime39/cyiMWXenUMAQGPt3aNpE6WP6h12vI8WQeDCcMCMDsF5v6yaTreommpzbbg8mIQwvTDq1LOpMo4y77oGkgTQKlCz6gEiiKUqae5ps5O/db17qsh6it75asmJ7fq42+n4SRNDRJRkvWtAAEGE57HORNoU1akmIWNjCqUrNsajW7MDW2Z+rdhKGCbzJgiQr2cQa5nMF71R76iarI83Kxs+9J73n+XsledfzcxxDLU/vfHyyeyBzfVxSssQhNk5S2jnNf/eV14AJ8Z6QQosnBoAL5CE1SK6DCClND9xhhMASCRmYTAQFqEhh+TU5NgKmq3e3aM3NsaYSxPbquCQIyhqirwqbwAyN1FUkgo5L+qsyZRjSILGGlEUMZ0cHE2O9w/U1tvLf/nFX9gPaffopAlmf/tIcn/h3Jn5gu8sJ/lG6aSczeqUEK1po7SS2hDQOiTe3jvpdOz6oIxRjGI/cxrYOmMthkDT2BpOWR5BQkxLAXZYHB/vLqV1HTvodQ93tj1l1RxEuqJtZkAMWnQquqxS3rUpVaOxYUmupLLro9W6otRoItitggYQ1YpjPW2auWiWReS8k50sl2SoNOSMDJA7o848BKaYdbymOMwtIaBKPuqXmObLYAmN1QTRWBMktTEFgy1q8vBfREW9bkFERe7qJqGl2PL9YXSAKqW8LGYNG0MEQGrCEpukapCsDTH60nnCZT3P+8OPf+cPXPvtr51ZT+PYrvW2Lr7rfZ/7iU8+se4/QIW2i3u3Xr/84cf2N05b54bdIlqDUJ1++C3y27+ZxAYVyWxksUkNooIQyrZWcb1f9tbX2rqU7HN/8S8Ov/LVc6hVbFM3t+9+5IE//6e3kxLnazzerg1FS0ZMjNqwBwSGGgOOskS+JEOFZQBRqEClVTXqvQ1qlQwaK+jQlcZ4BbWiMdTiMBpYDAcPPPlDppTZa9dnL72my0VRFClGxkBoBI2QECkAYoLQBEBoVDxlWOaHVdi+d1x860gcFcPx/N7m4XM3TWhKSJIitmoza8DNVbIzA+gObtw+6ELTp0jec4JYR+p1l1mupWsRHKmiIxFgBJTMofbg87d/r/f2BzgL733P245fuLa4u5/lLnP2+uc/deZbftDP9ovnrttGjPepbrPCSC5aOEveCiwPTnwvI7UJkBv24qksA8f+o++DC6dqBTh9efSn/kz9k/8gbwTBozVbByfN3/+bPLYFBnNUl/MAjPCfJXWk4C3EBJxSdYR8XL1tffbO/3p55/byB7+n17BG/sST78E/8zf/+b/75f5wt4MzX1yTpaCCAIGaotOpYmwkzWOtofbGh8jMDQEvos2dIwJO4gfD3sYWtk11OKHYSORlG0ipFRDQtfXhkDAzMJu3IVGyoBImS2ljONsbrvtCa7HqlyBB2snuAbz5hYBEaBBW2zqEiHS/P7QaJ4sgi7AKrFpEgHpfqXA/bAJEVUXF2Xx2dDJz4LujTeitRdK809FqZg0A8KrJpsAJjUXq1HXz1c9hDjl0DORtd9B5x9s7T7wfR2OQxp19iEcv8CsvNdeuemkgWQWE9cv5xcdSaCyF2cGdt7zrY6IETS0g7Aw5MllGKfLRDbh3swy8vPlSPnp0a3Quv/fVsyizX/snm/nWuFTOVLbWZnmWb41irALH9ff8wXi84/Zeg0TaKhlkbZzRUO2jyRKC77pZEBhv+CtvT7PD6rd+YX7j2uOjcX33aPbvftqun5XmMBufnixZ60M+3pftfUmoNheArOgwWYqCHEwEADXOAbJKQEaLmkDAGRAUBRA0AQa3D+SNHUsG8iLxElanTqMgARRAggCo8SAMktR4BqCEJAwp9hZt2P8SsprIaq2x7KXC3ZfS3g1649VuIbxkYohpntAmI+RsZtRP76Q3XpWbuw4INeZ1IMMtAIKlzOqZLffoO/zj35lGmy4fCVPxyMXmGcvFUNcv+9F608w7a6OMtDPou1bq2zv2oMaEKOIKE23d7Xf1ha91DmK8/vzRonb/1V+uw8Hyy7/iFycl2c78dvvcb/Q+8if3o9bL2ezunb27NxpMd27cHhTuwka/37azeS3OAtm6aRnBCRSGUDGqKksUMIQEWiABqkHMrQECBUUAAUAiIEoslFbXYlB80/2ElGIkl7VNlWWeJEqqmhhcLo7QG8vCKSYEOxqNEsc2VEqa9Toh1NO9ST+iN7N+p5+i6ZWNJlo7c2k8OBevH9RH89nu7p65vvGed9f9zYPnX/TEuSfvIC6FWaxH0SSULPUUKaToyyL1TkNWYH3oQmNKK20F6MA69C7GmlLQ9rjPQnPYOgXw0GXAAqoFAANZ0AyAQBikBY3AAtaANcpwPN4Swp7LvLHLad31xpTjdHgMzGAd5Agq0B0fnXqs/53fUU3dbP/VK++/nGaXQmnWP/Lhw3zDcfJ7X9FXXkjAMWITTzqq+eTF+rmT4fpWy1nW78NG3yzi8ugwP71Bw7XpjVvKNs5qK5h++xc3Hrk49La4c7f+7Bc7yyreiSc70zj00EuXNsfXXr22d+Mkf/L7tn/zhfTz/+YJ35ksbc9uuH0+NeorBSgI83yyP6XK9EvTxJgQE3O7mGYMFDEuZh3mUb938plfyKbTvk7FYIzBKlBsTAZLr/sttFXI83zNiA+LDSf1WkcX9bDrDo6WR9tHQqKGq7oCIEicO98pM1CKgTNnyXMr7dqoW9Xa723uTubtorG58d0SCfIUex1qlzo7CS7R29ZOVVXrC7/E7s7Rcn8239/56vBK76Mfeezy2x+YHvcOj6XfHa1/5A/40Ds4vHtc1ZoYE3BFt67uV8LzwOfOX0qNGfd6y3oq9fIA3vAZl7mfxnZ7ehRcnWzvzuGxt+aoqru9fuksxYCMF86evnbrjaXE6XFsYmxSnXmvTqbtcW+QVRP4/Ge/0lsfvxkVKSIJCCGoMgEqKPNKVCYIKiwgCCoWEJWsyQAIlAiJFAkQVqtiIIggqBEJVmNtkFXnjUhXHxew2kQDSZoSABLJKjIHtCsGMRLdb7sqqIAIghVEFjVAoLKCXSuCgiCiIAqgASEkBdQYWaMx1M3H/X4/KwrKi7Q8DoNRNtqwYaYhegRICt6CVYhqq6l59cWOiDRnSqt4MoMbO273sHROS08DG0aYNgv0532ItH0EUcgY9DZGZpdnW0Pf7Wi/n4w3nVJiMqngw5mJERMaUTQCbU1ElOXctKY3YFswGJQoy2WsE3V6PJ2QBguJqx1SbuYnYAAMiLNkLZOohrCYqMswM9jNxQp651g0VqZJqIlJVKMokQSACjCBjWAVjIp4SRYkByIQ1sjKapQxMDGBRdg8jQ+9MxUZQfSaLCiiAWZQvQ8tzLxakPGmnR/rdFtjQm4TIBPZjlVlbNk3wSznaXdPSAANC1hmREuoiIjIqCGe3KHpkWAPFstiozM/2On2y1lbeQLNXcOrxTE2yyjbB+R7JtYqkdCrs6JytFzuz5cB2JWuzDMjACpJWWKjIAYIIaoEAFZaObItWYfGW5MTZRadohhro5ICqrByetOPDPNlxXluyKsmVlSQIGnRcBOiMFtCVRYC61UMrRx9LNQysyRNUWJil0JiAGMNiKRlaBsMrUorzBJLNpIYxVuwBh2KSgjGWcotYhJmQDBKomqQgAwjJcSs7A7ntZu+UZ8kX+ZB5Oj4pCid7ZbTk4qXgkyZNyEmsjBvQ69rHh2Pq5/+Z4e///mHKavn2iIbb2DB/Tdu8je+3P3wR0O/MNbFw3u7L381x7i/u7jwoW974Id+oLf7wYOf+DG89jqHBCDRkJx9oHz3R8vv+cPNqGAXqarcxeLdf+YH00/8NlVzsJDnWfB5Y3NGdECS2mzQC6GFPHNMjkN44659Yc09eBHaPVAHoQvqvG95eQg7tzC0WhegHZMhKwBlzEurrE1Qa1CJIIfYWGiESMkCkBhLFqhpYNbSbNeXuR+MwSBM55AErEmzE9tJvjSalpgX3nlCgBhiSMZZoaRGyTpgYw0gILcVkpDJ1HQhLVQqWxTaLLBljHPMBjw4a7Pb4D1o4vmRVo3U8xy5nu4p4Ki31S7A+c5kcgI57i33vHfO5c3yeOC6QMblAzVDlytYSbGSLLMbD4XrL/LsVdssjw+Os2gb1vxs162XxzvXi8zzollZH0VFFQXEWuPt6n8owEpTKQGQDFhUQ9aStYpASAoChESoCMpKqz0+BAASYAUgYU/RYiOsiVPgGFjqcN+AZp11zqWYHMNH3/n233jhuSqJIciIkhFv+MH1XqpSIeEuh2vzxRb7LhkzaXbbVIWoot9YbPeMPTcoPCEbwASSIqR0ftTbbZrlIm4N+3ePT4KAARh08sI6qRO7BKF66OzGWs+/9vq9Trd7amt0e+f47Om1LDM7z7yOsMqfbeSkSheGxaOjzsNrPWzYk1lGnUY2hKNutmjrllMIgQgrrZNCbk1m7YIhoJDB/ePjXuYoK2ZtiEnd0m6tD7Z3QnVUL+b1MC9t7o52Zg9ceOCFqwfq/ebGqfmJ7M1ndr7IH5tnZrkxWN5+4+oP/OEnv/jrv7uY2Nn2vUuGIlqXZ9NFZaxR0ECoQgLREOXOIpjYtuTIOMOSmIUgGQzJkLM2peCtBTURlNsq9xkJLJa1ffeHh9/9J8PzT5185pNSs3OmFSZLSqSqxhOo4ZAQkjFeCYETR+GVbxSS9xaD4dA2SeoNbQp6/5/9wS/emc2uPd1yhJgWHNRrCIHqxXQ6zTu2rdPGoH+QapPniybF2OaZcYWxRTZbxNZ4TYhtamPTWetLPu12SSEPNVDMigyn1XQ2bS+f6TcVu9yN++uv395mcqHVEGaOislMAYsQQpLUyTNvnQBUgSfzepznWU5l3ySysWlSBYzQ8S7ve26VFXzpa3aTWZ1jVuTYKoCGJSjk5gi4qRdr1pSoAgvn7YJhGTHVcUBWQvBkcs+UezWUe9u0QS02SRYh1jGxt0sJTft/ahWxOBSuakjsiWyGjYgyKEqRWZuSBzWIKSVtxYpXMgkSq5aFI5F20pDSj/61f7zRe/Dt4+/4F//0R05lkycef3ztoUunNrvupLVGQ4PxtYndfWn56a/On/rXa+9/R7/sxdf2jj/13Ckpp6lS40JMaNA5a42jEJKmjs3P9TebO5Pwta988af+waW706E3ZdFriWcPXHr8//PXdkOKO5O16f7tf/i/lPNg0a02hDJ00kTT70wK6X/wPeOzj89+63f4cI88KrlovVoSImUBQ6XPhEUieu/V2jbNxIIrUaIqUiY+Xbt6cvhKvTs1tWauCE1UZYYaMScikZX3XGVlyjLkXA4pAvDpgf2Nv/23H5i87+x3fmJcPnrYdnd2jwYWkyhluQWSaBjcB374Tx+/97HULfo7+/zs3Rv//hcuOO+sZdOBrQeOfRONrJeD+c07/aLEdk55aYnR1tXi+OzFU+1bzyb6wI3PfrML5IoCM5NbZ3fuzv/2P6Gm4aAmCBnkXJduAWuDeSMmYilc9Bw5DLWa/jqyULUEo96Yu7/z6+e+92PN2YcmVXPpwUer8bhYBEALAFmE7GgBBy04lPkSLEBmQRlEQBkQgBCMAXL5vD761C+s//k/HkPeq7eG8645mWru9r/+6uYf6n/PD/713/ncv7z60n/KCxpaU89j7h0j9Z2WBS6raK2SUO4NCklbDbI8tbFwtpNnx4LFcE2cv3Pt9cWsBg5l4REpBu51suCp4xwJnEyWIWiVwiKEwltISWI7VU0umMBNvZhzSyBZvL97YxBXqiVEtLQaWqyMxKAKLJpERZVl9T7f3wRQFVBCQAWUlepXBYiWbVNV9dn1jZrjMswaSpkxKQBkAJZWymVCVMJEkClCVCViomU0fOby+fd9F22dEiQJjdihPtSPo7WmOqa9exkVyyhu4xxnZbOY2MVyo1NWb7zQ4WY6vWn7p2i8mQw5h3axPf30zxX7J5rswGP13GeLQLYOauxaqODgDvScjkam/whk6yo2U6UI3N1oHnySlkfZ/AgzA2Q5AYOYLBNmFm1bKdHK9ot0/NoGm5K85izOeurhcpbJnLWJi6kz3ZM710rnuaIoSoUig+1aV3RsZqVC4WCzDCygNQYzqYIIK64m9IQiCiqiBh0aw4YE0BJFSYqiSEBGFQA8IxoyyIzGRxUrxJxAAARBDYQWEVhRohjQgox/6XUKCgjRIFiHqHmu7bWnjLlIJ8fy2nPFndfgpIrqOAghKoEYhwTKFjpde+UdeOZd3F2PYEJVlYOeWXdLq0ulfkIM4oxp6kYS87LW3R1oEahQTkTpcFiuf98fjds38TNfyl+5NuhCk2W7bZX3Bm2OBqBpxKk0X/tss3N90hsd+fXrz13fO5jQ6Mwb+8f9br417q+F5sjiwawGJJNTAoyNNIkTkjFGVVCVCAyiQVSFlfNbRHU1dEOIIqhKAo7wPlb2zS9jnfVuWTUANJ8c75wcHYf21MaWQbXOpLAUIFUCJBYRZhU0mVtUdQiNzWV7+96DFy/G2Bo3irOFCSId1xmc6XbnmvawgTX1g+HlsPWe2H25m2N9MuMm5sY5stZ65pasTUqYdzDPG9Bkhk0dC1PazCACCqP1KgqhNQKKpbn8eHa2mGYL964LJ+Xp/o2beTPBsADXBXQQBWSV/nRBEdC2rbv94f/r6M9+f2jhjdePx7dfGh3fmj71ya6PfjyEgx0ossnBYliOljHDzimNdf/eZ/Arn4aLj/GD3w/v+bZnb9+4cGFQP/P77vqLhbM0uHjn5t64W6TlAS1nRXcI4wfaM2/fV+umkz6/MXxrxhkdHrTVg2+xXT+UBuomPv1Z+9WTo8mu7/SLtNEETnZtOr5AH/qW69tXzw6ce9+lS+sXN84M8p3fPFrQmXe+79O75h0f+YjuvpKe/0qWQV7I/t3XH+zlISyyNjs+ava57GwU3TjnaTvujSiEYaYaqrFZkjlRk2DQBenAclm39RL4VdflT/xhvHqzuHF1YyiDCxuT6XJnOV3PivnxzGWYO7MaXbrMGptl1sWGc5d74xfTkyYtiSTrlNOqLmyWdfJTxndLN5vur5WZaFi0YT6pnGSkEIJhHmQ0Ce1yJtVakVXil0c6Xyw//eqXnnjPrStvf8eF8dvCFGm49bb3/YGnv/zM7aNnzq5xLhoiLpazOoEr8uk8NbHenu1trvXGGwNIScDMwnJZ12Xu+r0tAnM4my1ndSvkqejYjvDkeH5kj7ZVgoQYa9rod5LGU2f7r9/avXfUnN86m2PP+4Dtm4GpoioQ0IpGJyuOnSEVUQaSleoACMEAEKwqRoC40k3BfQgREiKtHAgrh5QgmdVupIKIIBAQgAInAVJjaPXZgaAJ7reYEEhVhXmFFkGAVfPizR6s3mfSEIgIIgGiqIiCQVwlwKagvNN7++Mfefuj7+xnQ1VuozrKpBxlZ640d38/dzk4kkWTbItWDQMF8LOJvvxieu2ljIPM28z1VQ0VXoZ5hJbaOU8VqHSXr6RF5KpSMMZ5Oj3QjWHsepOX4AtQw9ZjVqSY+US6YAclLypwjFHAgrAYa6SuoWlzVq4mUtVxBr67CeucQLU9YUjekekOlAQwCWhIrSH0RW4MiI/JcRPr1trMaL13rwNWpEVQVQYmaRvWYIyQodQGAyhJwTvqZqGNZLwwIDM3jXGESRWptSVdfpBPb0q9sPXcYYLZEc8qQya0rXHWjjqhqVlat9bH5SDue4gJUNHZuPKlCiiItInIWDKSIiMYARQSVFZERmQ2mIrde/H3fq3Xu9xODvypU+7jb8Pl8uiLvzWuJoYjA3ESa4kROqWPFJ3XiMLAbUqMpuh2il6HMzRZ7uskbWCAGIPzjpmFWwRk5hQZCFe9CkJCIlFGDoIAIrGtY2pTjED4n3MiAGBNVdtmGZmETdtagyIROdmYVpJYUVZrDEDmLXKLRJAMofL9XBOJJUqdQnA+VxDGyBQFEmtAQFWISTwaUABlMiQCnKJEVWSQuGIVoYACgxArgrVYBz9ZFqKd3DAkMuoIuWXAJu/m86g2qbTVqGuxBUfOAqxNZfFLnx9JDa5LSBbJeVsWtid893/9Z1eMuCc+lHwxeeXZa5/+hYfe+6G3vvc77kxEYwNV2714Lt68mpVZbKSCAt/z4eK7PxEHhYTAgfKCwEP9pWeLrTHUHkCD+OrhJ8wHP6bX7/Hvfs4cV84a8iYph5QscrYI5pmX4cYuUIS1dfELil/v9hLvb+Mb25DWALpsCYuCwEUJ1O2AtpzEmFxbhnqGKapWmnXQZYLARtE5N+5CYkgMSNASYAvWQK8LgM53uG1N6bluZV6bzIFtxaPt5GJRkVBk1ehVSoBIHqRlSbWmQKiErJCBLUGazFBazNAwgIIHFev6QwyTNjEiGVExNnedw6M9M+rlw/z2wR3fyTAIkXSKwpPpdPt5McCogMs4P0J3OmYbfPrja9/3liX/neVLr+9OpuX4/OjdH+0+8eCj58+9/8lv+2d//+9+4+rrSQwSWoDS2twat/qxmYHMikKEBhHQeUsAqkAAfF9ULIYsIigir075cN9vjGoNrjRzjYgk0iZWQbVO3Or9e0Fo67VRngK3yqd81jFu0rQcYuEcGAMUu7Z8ZXIsi6ZTFlWd1s6Prr5+vee7lGPG0DSRyGbOGkvHs4XJnLFWVIuiWENqk+aeOdYggkSsaohIFAlEwuWtrcL7G3vTZWWKgRPUfu5T3ThbnBn3ThaBOZUIa/18q1dc6neGaCgwiariURtsZk73uySMBCmqNS7GNgkLUBWiJWpErDWeYNAtmhgz9YhCFshaLzQuvDbtEunBSxu7t47suLhx9+sXNvvTNsznh2tZ57gODuilr13DB/HSB3v7rfn64Yvx4VN7t3f/yHd+R3zqme1v3CsNb57JlLGZVzGEclSqQmBoUS2gyR0ZTKoEYLxlSSpGBQUVDEVlJDVgXOmTJoyJwJxUUKZOezTrkAMjUUSIhJVZCCXFiiSoCEgSJUBUFXS2lcDM1hC3CZFoXBx1srPf+1+tf/jbstPjo5ufOpndu/zQ1v61gzxz3a4LlQwK97YrW6/e2en4wnpXlLkQFkVxPEvWeGTNM5NSEKlHwwGlEKMuJ3VpkBx551PVtMuFZTvKcysCDRYmQ7GLxaIRdpmJ9VIhyygbZTbPsnmzTN5zhDZKU6eW2VqUxAnhpE7GUuYdV42I1g3WANxGToI+KdHGoGcaKXOyLjtYzKatzJLkHVfHNM5o4JBYC9/fr+skrJQt2jjo+8CpdVA5WVZhd9b2CheqiJ7I2cqkinmZQnjzdnw/KvLeo1KMbZNqMJ5V1ZBxxlrq5cZpsKrWUdVQbMWRIBELg6rJCofiOh6ncOtzXzVrN7/8hd/6wP/jz6asfOLBt29/7lf7kypEamLIrD+exHZyUmAhz+zCa5OgYhoct0bJWNMtC5OHGEK0RDGxAyzIBLQv/NJ/eOTt715++Zkre21flAEmrPW4d+aH/8r2pcdODlpz2L78L/9N+vX/tOmst3bJQcFYpGj9G1X7lh//n+HbP5b1Tk/v3gtvvJAjkarkbkWkNmhFHDdkfUcp85Q3qRUhIURLVrRU4w6W1b3f1zTrdPpsnSAgEjmDYmXlZQWxIbEYj4VaG23CLFPisjCo/m0b53f+7ev4+V984ySW7DZnrXXZfFmhMxk5G6riwoae8ft+Gdtm/fLZ9dOP0a178y/9jo2ty4tOt2O5aqpdnG93gfK8x5ClKqbIJLZj16bPPX/6ww/EwRjFgQBZB0QExvuMd2pDlnJVrGfY6vnRme/+wPDRx+Yv350//cbB888Pjo+LyAatsCC6yOSbCE172sFrf+O/P/Nvf2N33JmG5NceDDe2PQchpG4XEKDxECNlGaQGYgRrwVrgBEogBA0Dmg7K7X/294uTL1949D2LX3ra7dcMBRiX3Ysv/sU/2v/WD3/rhz9Wvu1jX97+vGvlJE7U+SaELonT1kLrUBxlA1MaEoV2LR9oYdsY2iaJkbVTp2xOt+/eSm0C1YbUo+tkruuNK7Pc+GreaMzImUU1jWghQkpsUevqpBwRO2pDbDWWZGx2fwGNDLrV+ArQ0Ep+BqDKoorIqqwifN8qrqJIKCqAFglElQjlP+NQFcDCrXs3B2UWm8la3hmsDeoY7SpHkpUGBAhVSRkwkHeZSRBYU9vNO299bxqdMcajz50piWN0HTsa9SO3v/3z9qSSVlhlmGXCsT3YWdfh5Gu/Xr+ysT/beeQP/zl1rmlrjIlfftZdve1YG6DERkMrjDGzSAZRsLTazVtbtLOKs2BgkGYngC2M14p3vtfpPn/t1z0H4WgiqrUq7I01oMyCKK5l27AYj9CI8QktKpFlUrGGtDpJphlunsJlXErb63bBO5+SxmSWrUkibbTOoBOyzFGNLUxuqKk5KoIqJMXVO2DIeCElgxyjEhpjEAFiQgUiC0QJQEJrVQE1IiYUa0hJmAkA1RiFVQsfk3iiTFnEQCIEUonBGMqV2y/8kn9jwMczPw/Iomw5iEEQBFZUMU7AOGu7jic7u2+8cfbR96nzoJkBO7uzOztp9Uxmil7VBD/IMUI2MJIgW9yd1RMi00q1HDvzF390//xb6cFp346nv/vLRqChSAIqTFpAC2mpxriyiu7a7TPdO1lnrZN3u+PRl7a3K/W6TJu5WdQhiRIZb8mwLhkFoVKOqizRIjkEZSYAIDKECsAsTBhElXkF17Vm1fUnZiQEeRNlikQck7eWDHGw1WIewb9+d99JOtfrkgaTZd3RaPP0RmSAxGjwYPsEOE33TkCarVMbjtNbrzzQTMVGHvYG07u7bXPg7r1cH9zuDIfV1z+Twm51a2fNGDSZy1PpW5gvkXIC74xFSZSixgYATOZJ1JH1mZNACcAWpIqAFjggWbAdyXrLjXcWH3nXnpubGroLi7MjqBLUDN6CIhgCAA7eFOvgTMaz9cnV+NqreTKXpqovf9XufKPvUpicAJbWZfv9C5t/6n9Ov/vTnbvX3UufcftTvPcyhLy9dzvj6+l97zx/ds1Pt93+M70zY6ibOeSX/9KP3H75+QKL2etfGuXN4VGNj32A1s/0Z3vpG78e6ts63Oi97z2d9YfkxS+m3/tN03hMJsGF/uX38Xq5WLSTpXTe84nRw1uL9f7l9JYiLtBfDq/dsl/8lHn+d06fPrMLne/8mR8/eO2lS4vx9dm9C0+89+Z099IPPJB3YHntePLp33ng3ZfPP/7kJKejL/9GNzuo7949Nyq73BajolnuQgiIDWqDmrK8n5QXZy+++3/8cbl4eXTztZN//Hd5Prlz/ZZ56G2dtQzaRFj1Oy60YVm1DM4Px+AosoJDa4t6OVXVzGeqkhddcpk3ro5L7Xm1g471YT5nCWKkWkLXa+C2kvD0/tVLF+ijTz5y9+b/wdV/Rlu7ZXd94AxrrSftcOKb3/veHOpWupVUkqoQSqWIJGTZbUEDLYxhIMCAjd1jdNM0bTdgPPCwaTDJbcAmCEmUJEoBqaSSUJUqqNKtuhVufnM4+eyzwxPWWnPO/rDfkk2Psb/tfT6cMZ7nWc+c8z9/v7037s0u1jv3DkCQ2z588bf33njx8C1vuf2Od72rbLh87tLmzndVX5ou730+PrinhifHnRpXyMIDaK4q5xs+S8sK4Oj4pKicSIqK+wdtcI0bjbangg4Cy40Hd4PDzZ0NBSyK0d7hgZGHio9Ph8P2mJJ79NJTmxuTV199xdqu677xRsQEyFnVUAWMkNbdIjM1VQIAMELwiAzm1rRpUzMVNERmRFk7DgiNkMzQBC0jEYEyAJow0fqd2AyJTBWYAY1EAQ0dMqChkakBkSIggpryN4KrhmYIGRXXA3MDBENANDAEIjQkA0VPO0888Y5v+YENP6W60VAhW+672hsDmUTr5uAB0KzE/uqVHi2/fm9Lo0dDiZgASMvNTSsaCQEvT7WpyBehG8qCU+wBOe/sro5nRV2FKxeHCxt5c0SFt6yE5IqJIGDhyUapT65faTIwU3HOETovQ4eOXEFuduxe7dxwhvUYmq2+m3MRbDpVLRyjtqtQWl4epi5ys8khqiRUVBtS7mPhF4ToC2eDnx3J/CwIWkoO0daJSefEVIz99hXozzB30Ebi5Aa0Zcd+RIEpZ0hJkiZf0rNvHXY2SFrqe5eSdkskQjVztRqQgqoRYz9k6yGerqjNHoM6z0VF/Zl1vYkgOyCL2dbXTgYNZTAzAsCEppoHhcLTkOnrr0BxXGkm6/27nj298ujGuW341Q/T3bsYI5IzRybYr9qibs661M7PmGgeuyEODDYuw6hC7o09LFbdMg0mapYQjYA0ZwPOag48u8owIHn2AZHMRHIvKYqkFAczcQS+KH5vF7nvh+DMxNgXRKwCIlEls2nKgug0WzZLmKMfnJA3z75JkivPwC5lQ5G1e0vEREXJENUko0QwUsEAAY3JANQ0pWwKjAgmJERqashsZmboiQwoMG1YaO/spZjAl8ucxAExnj9/4eDBoW2Pyrc+1x0vuq+/XMRcZvDOiUAwdM5HNWULjR96U0MbtPF8ebna/9lfGJ2hTjf7+7eeuvIdV977B5aFllNXTRp47Uu2d09XXSo4GQ+7Fza/+3vTtOE01FUTRQA5MN/6hX95rV3CRgMO5MIj5Y/+x/L0M/U7Z2nvzfLlOxKzdELTrXZ5MN0cU2A5MFaA8QScp9TD9c9DnTkDUAmuINY0O/JcsW9yn7ipjJjILEZKokOLPiB4yIjQWeyQp6hqGM2URiWQU2ZUjTmyAZpgGbJDIUcj57JHkXhyEjanYpGcU80PaWeDsnNiLXHJdS3dwCRIzoYOYo/MtlrxpDSv6gJvXYL9V9ecViiCn4xVIJ/Nadxs7jx1e5b96EIx9s45ApaYHXDTlCktgSSpOWikO0ztndHGKtvWwG7B416q+b4U/sKVb/mWr6balxvLFp54/m3/zf/4N/7in/vJ+/uLVYdoykyeiQkINKBJHkRBCQv27NjiitijscNg6848kaqpiRIyeTXzPnjHJqoqDtVALKYhx87SMHQD4zLFtvs93Y2knE4PT2PKflwVIp6Ri3CybK3Cxx85V1ejIR6PvdtydnJ2iKvpI6NJa66fzZumSoKbZdipClRUpTbhoB1zBauBDeqoG5PKO+QAauoQK19sjifH89Oi5N1xdWf/CMyRC5HsKzfvXxlPynF189ZRQW53XEwZLjblpY3KpTwpvBckNC5wmVE9Fd41jKp5sVzVvvKBuk6RoBWbD0k0G3Pb99HYs6tB58enJ0Mu69B3q5ODk8B0Nu/DtFkMqw99+/t/8Td+h4t6Z7qV9g8vbExfv31E2ZwvHnl8/NRbfFWcHc3i7HB5ceca7Nb35re3rkxHq2z9anBmUckVGGkVs3PsnRviIGiFWekrGYacBiNm58TUsbOsAE5ScoCqmpTZAMmagPNXPrNsj6vDB77vsmDM4quwzmgAiqYskpADWMrDCgHBgELg1IHDumj6YdmPwrzZ3v3Ob9/4rh9akQ7Hy13wu+SOrx/LgF2vVWGMef/saEjQEMW+uzt0Dtx2VT04ngE+DPMdHx1XVVmWpQ5S+9Blq6ysfDg5O643SF3OqStgRIDIXpWlj1d2d2ZDaoIvXd7emSL4+WHrC6dFLr2b+upwf54LrFxhQ6wmDlysGjpbDmllzjQBSlJGIQNUHnrzAV2BAw07ZTW0nQtydctvdHKaOBLUTYl9XLTZoyPNmwhF6R7k3DlryVq1ZYq+G0aVWwGekg4m3EVKftENiWDd9f/3WkXrBfqU1cQbYgjIgRXRJBdqnGjVJSNcZWBCMsgpZ8tF4Tmp9ClQFUL42M/+4teb1eVL5cd+7s2/9Y/+7vTB6vOfeslHLQCMbBXydFTIPI9RAwaa5aIuBh8WO1txuhnbVbN3vdHsIItyBi8glUFhcv7mq93f/L82B6dN0MFs2fddpP2zxfL60cX3SJPbiQzdF39rUvnCFV2OVvo8DKAxS66uPeXe9743sYiH9ye1Zx8UlZnzwxQ3M3nAWqjMzW6rZbKld9llx8ENqxWDL52XiFhsh9F41Q+MqIJCUJZlSug9ORNK4pEyr323SdXyYAh5GMzUpsXoMp+DW73vOzLIaNmyZLOUKGBRu+JydTA/Pnrp4IPf9tbTL3zuzX/3+sFvf3obLUW1PJc3P+s4TkqMrTiaRFwqAZeVdUqEhfMbN/dWf/Nvx9O0W55XKiM6D05WEVDqzXHu87DqY1H4D71v+id+qB1f0WSpfHLy3Ifw5c+f/q2/XYmYCKlAMZaAQ5yHqmDMV26dnPzlv3T1R39s/vIrcLiHoRxWiX3ISQMRQILYgcP1FBRUISOQAzTICERgABmfRe7/ySfO2t8qAUPRDEzmqBT3pCwXv/aLb/zmx7/7//Wnvr63cX15Z1ozFe541ncp8ULKFJra65CoWzDauY2tcbmxN5sFc0DV5Nx4GnB+tBeHvq5LUmwHrH3YGhUbDUfTs9VqOWh2YZWHhORd4WMCY1GBlLY9H3eLEqlH8URl6f8PqSJ0azsyGCKsq2o1y6KqqrZGiOLvIY1sPUA2XMuQFWw9sEAABry/f3jt6lO1gs+tnHVOcjZYv+qTQzMzA9AE5jKvw1jJlFZydu7yMxEqrxoeJkLY+WrZI597D5778uLBl+vJxtAep9snw4O9Kml781YZeDhclRhPX3n53GPPptgNr7w4/+Wf3uySqqlkEzQEIDIR6CI1vty50HLVnn9SH3kaA4gD5VzXtRHOqdx+1w+kN77k9q8zZBARWa86CBo4AAVCH7KKiKiK5k5zsmzssEUlJhFJhmVTpC5XkzIPsWkqyNEzaRaoPV7c2Pdu613fo6NNmM/t9qvw6mdCAEJSIMsiZOQcKUgWLD14ABdSEgYDJQMko3UY2QjQkRkhaoYcLdYenFnAUsAPKSFjRuGigpQ1CxCKgYGBAq1hPrlszjIu981Q1SSKWOQ1HITEOxu61pxzVYB02nXT6i3PZQPVgR2WYLY4I/ZWsi/Bo0nXM4FD0vnJ6u4NINdHsUKGpx+fPvu+2EKa8PBd3zs8+Kz7yt2GNb/yq0VT8PwQBrCMORkg1Y76k7QtXVMWs6JA3wXPTT1mtrYfDs66ThywSFYgJDBao01UB4MMBAZoxgRoSoRqoAQCDy9aQiABRhtEHCEzwzew1kmEAMkHM81I5y5cjYeL1WqxU0FBVpVNJ0Tq5rMZAHVxntNgQ4tRaqXc6sXy0jO753eOF8sHJxdKGzOmNADXaWuraaguaRLni6/99qZy0YzC6PKxf+w03irhy9uVgoJmwJQpe0B1gWHotX2N8zJhz9Mdv3UxD0o5mwikHosStceY6/0vDp+5WYXAVPhVhMxgAdAgroAN1jSNMIVJgOUMdLX5hV+Dlz/Vt0fl7g4MOXYDPPIklKM8s7afteN3Hd9bjU7vOJzTwb1457gggNwXZdCP/AP36Q97Mwg6rsOgbq+duPNX7MYXdtNegif1rX/w5U//zKOV6W/9d9SM+uJc99QffGN1dPFic3iyrO7cfuTwLO0dpsjUbEZSzFEH4v5ksnhQfPyl4RcPzpV+XDR9e1ZceVs6oPmtl2FapaFoDrvDf/zXwvHN7vTgsenjh6ut7e/5Y5+f3X70/EXw6dpx8iev3X/x1y9++x8ZfeiPlX4333ttMv9K/p1fy4HvwZZduuhPrm+dnYxrRwF4vP3YD/2hbnT++m/9En3yX15Y7bnR5tMf/MH708eevjSRo2UrFyaXy3Z6PyxnKeau2snOyFN7esYq2Cq5wtfluXPn7926VZdN8GXB4y6jhlxN9AxudH2H3NTj4FS7lC9vbHb9amd7Q2w7wlFSWCy6uvAry7vbW4vFKsvwhS9+5eDwzccef27j3LuvPfre9269/cP/8JP9aRyPy7oGAG2m8Jb3XBvaYXZ4nGs76dqxL6AMrvboNKecl8Pu7ra5tDptN8ZFXYeZBw4YPB3une6GpiqrAfjBbMXjycXtyep4/8quHM/2RiO/ysPG1uhwrweAgp0aMlGC7NYKUDBZP73JkWZEZAKGdcTO1hkWsgSKgKwIgKwAogjgHnJpARCdIwDNgAxIpkBIYopAkNWECAmBTFR0zTgCAxA0XL/zgiqYma7f3NbP8zU2D/UbCA8zhIcfx7Z5fvdt7/r2TT9lw265okqcw7pwDWrZ93h65HwJRtLH/rHHV9/5w/OmwI//xvjLn+XjOWGQ6U7e9LaxqeisrrrCETBl5MFgb+7Q9eziC+91T1zDswenN18vykCuMADzZMgQCACgCF1C8TaymIceQw0ecjSSwY0aBSVyMJ/bWUsmuiIb1JHLXQugwozFGBL0fUu+YV/EDB6MFGzoU170ZaXnNmGrxn6Zb7+MR/e9eSMWI8wdUkY0YDZFDFXORKEUzBgFohIJs0KaW5cZJKUBQ2M+wPmrPZVBchm8oyKd7vWrhdvcptEu+cJyQswyDMRgy5Myzoe2Tb4w7xEMVTiqGLqqMAPCKPhQ9dFLz+TMAIgIlRxlYbcSbGdYSFGivHF2luLqbd+eXONCE5gIFRhzFs0ZneY4AKlY265kf3a6f3rUDbOQ++DIcZGQyPmccgKKqiTCAGiGRKAmSp4LYm+w5hmbAqaU09BlhSRqSI7YO/69YJGto7mWUBCUzUzSgOzXsWs2B4RsAMAiVgYyTU6tYK+W1UiAcxZwRqQEgERJh5gyqVXoRdCpQ6Q+JVZ1qpJBAFzwmkjYXOGMScxUFTxrjORYk4akYVTHqjgk1z92buvRq93Nu8tIcPHiYz/5R/ndL+Cie/2//1v5E1/QFqhwOafVqi8coHOdWZKozjmP2kfpbeSndnTSf/5TxbueeuSd73hw4+C0j2/s3bry2DV8Yz9/4rdGh6dlUVdNGVdDp12hnoeUUm8YXBHybLn/z/7BZWaoCgCDK1fct/5+efRp49ptOnr721evPig1ao7BSZhU2YMQhfEYypEk4C4DEAjAynQh1JiWPY4BCgNViz1xAWZp6LgYQQbDiA6BnSKhd8qAfgTeWe6kW7FvJCcjM8JMiBsbUTJYZsfmAhDGxcKBsCe3PVVSzQk6U0lYjS2UwGyY0QyA0zCAru3gEb1jI1MALjTlDCva2I6jbVxepnoSZ2+EKDktAGR1shdKv1isoi2+9uWXNzafLsju7x/tbG21Q6od5NylvBxzRFsCaLV5RSxJfyR8Lg12/aaGE7cYyv7wRA32v/T1J9/3VnW7zea1/+q/+Et/7a/81zb0TKEuqXBAqDlHUkFEA3PAqkmEARE5sPeimVyhQOg8cgAEJix9BYxCxgRMqCkaSKacMUVIQ+yH3M/alOwbEwGADmF1siR0ndkbr9zoJCPgIIlLqKbYLs7uL/tm4ovCRU2Xrpy/f9L3vdQljYinatfOjzbqMvYpDnJho7q3ivOcgSgOSc1WXayq5mQZY5LS8UbgUQCJi3Ej58/RajFf9KuLu5dVrB+Ew+ioy8PBbFpWlxq3XdaXJo3FRIZKBRlKFldgK2khkRnHVeEDH8+Wzjc5WRN86ZmQbFghhyEP6kjBqUi76Bd9nwDPgHQ2e2J7a9w0p7PFud2NL929d3l0+aOf+awfF0VRHB4vyZX39meTSb1Mevdo/vnD+++69MLhLBcbo6ffunXz1btXntpdHrWws5E3Lj/z/NNnd+4/+NxXV/cfXNwZj5OtTlqHgOQVyTFnDehD2TQR/bJtPRGhA0xo4NgzUUqCQBrVlFVts/Zy5zWP2dB3ouhczIMDQ/Drqg2QAJ0qGbCpGICqIAgTmrRU2OnV6YUf/4n28vSnfuOX3/v00888du3Ztz759kcf/dq9m+UEm2loh7Ro56M6tEMa1U2KQp69D/M+RQFyNGrq1WpRliUQq9B8pqc5AaJgjrEvmzpGLIqiQkCAlHpRHBxtVFXt9c7xfFRVMXZR0DFh8Iuc54usGOYpBwnt6mzcNOzMQOMS5id5aDMV1aMXrs2G49UwHxW+LjfO2lbTMAxmkhcxURnLqlySMwO1vFm60bQ4PTiZcEmDIBGoFIgByJehM1pEAeQYk2dH4LucXpudTZqyZpY+g5kDzFn+/1tFKSZLguDq4ElScEgEIhKY25nEwaVceg9U6GQSNBMM2bJ676vCg1p/1PsR+4uc2mHcuu8Gf/9v/wM5HJ6/tVcVvhhsUHVAfe42S4pdFCsY4ajScz/xY5d/4D+YuUru3+v/3381vv4KmQL7JODBKRCh7dTBDg9l6PMyIuSStPCU+u70f/mf3J1PXX7muXz9uDw+rLDqTVzjuigWyDFkU3DZb1FRUzHaODidlUDEhh4NYBiSI++LRn2Tdi8U7/vAxXNX+k98LN16FZKA5aCAWXpZpSIMrJoxhHEZSAyGnAZdYShliAQMhBgqUrKUyTmkKBp9QQBKyDG3lhPnVHjIWYi84+BD4wGGtEyFXv7+9w3PXrg0X37+7/zM6JX54s3DHcbCQ2Y1TsjZMIEfcVEHqmNcMYNK8uyAQJM4buxQgzpx/RB7CCET+Mpr1iRm7MpiOl/IU+/8UL7wllmfgq5GFzchVT6/t/XM0UJdxDxIPEQqsXJi5oh8V+1+7HP5058fSaqgQPJQwyBqYInIswccIAMAgyWICmrA8jD3WTnoAMQPyZzVje/Rw8pS9j5Z9AU59a61R2woXvnCB9727q9+dWn7J0LAVcEBq2a0aI8tqneuLMvFHDJMF4OAL30KQlw0jS/96s09loQDZoLJ5tbuuJnUrEEO7x+fLCNREa0/y4MSgaEBDlGZvSvc4WweISEXdcmMFNM3TAfMHsnxmk4EWcSQ1ntnoqYPWzsAD5HWqAYCqIYKhvbwtR4MiQBUGW02X7RGrEn6lBSH1eARhRDYDJECgikjsGpUGIRQgSQRHM1vf2Xn0cfZGyByKNAbGlW+SQU1O09I+HpZe5Tj4d7phDxJRhkg+ik7bPv+C7+yhOPFnTeqw8PdPmbFCIgekgh5ArIc1RMl0klRtmf75p6FsgisGFfeoQEiItehLXabx74V999gG4zBQilRmEwlrRcfkgmQMDMaYkp+7aaQNfwbvTAMmfIhqxEXsUtxYsXFbWnqcPnR1hSeeXr63HszNRjbdnYCG81odjvff8O8ByUWZAUG4qBG0ELilNkVAAC9YCDwHswZoKGoaYEOOkmFT+/5AD39XHv7TfnSv9ucKyGyC4IZvcuayaMmJXLknIqYZihYshFRVvPmMkDWjARgioQAugYyePImA/RJh4HHbvvaW9qsXKB5UsDVnTfIBvJmGNkzMekwgPNOFu296wUhZMWynL7vQwwhWCtlIe7xne/90/e+9H/fAHZ3bms3FAopQw4gCokogRUKy4OFXqs/8Af/gze+9NpvfuJz3LWXN89tjCYPlmmRMiMlARYtPLHnkIWR1CDmLEBMkEwR1q09NQNdr/QjqD2EhyYzNkBJ/A1WURILAGenSxeYinprPLZo73vukRHqeOexanrFTGbHr3fx+OjoeHZ0uDVuavL92erRi1euPvqktC7MhrCYvbC1E/duw9nB4sHesjAeV2XtuwQl4Ii8gMr8ZHZwMkwubX/g+1jecvbaF8tZX1tHzrGrDJ2oOMjQ9+3QSuHTrGv8KpQO3TqxUSAz5h7UB1qEWyfjooDCwwDQZRgSZIU6wLgEX9oyYSzhwX04twlPPtV/4Yt+slE+/yQc7CtnrLZytOA6CEUc71743j+4f+fVs67Qs/DoH/2rw7HhF34p3L4DUYiCHZ0FUg+5le7uI++Z/OX/rtys5CP/s//yL6yWN3d/9C+W3/6fVFXV/vzfb/Keu+r9pbenc0nKfKFP5Z0305290Te9J154cr6SYufp2YOXi8M3ZXkP9o69uDI3FE2g9z7Iya3U+63dXSsSoW4Uw9lX3yiH44IMhrk7/B27uvGWb31Hx7p5uFp95d9udItL06b7J39ye8R5GSbVZeZFQXbCZf2H/zI+/oJfvLn4uX/u975qy/1oeOvXfurenc52geuNjR//zmE23Lh1JJ/96G6ZSq5R9aBh/8Lz9fjq7c/8euRDv7Mjm2M0O3lwnWNmN7Eh7t+/Pq6nQz/keGbM5ApMSxNkz0GLoihW3UCgG9MagSXky6Ptm7fPwuTye7757Xdfv3F6fJqD+cmwNaHZ4VDulke5bW9+5fRrr4zrj1S+AuvL0QY6j/X8yoXRZGvz7OAODvHcdIoTd3jU9zlm71c2KauNPj/Y2t066xZJMrRyFvsbbxyU1UbX5iD9I1fOrw7nmnKv0oucn2yb9NMxX9hp7t3dI8XRVr15aQJfPQKAkp0BipkzzCBrRu86zEOIiM5AHWEgBFVGNsP1KieBmgoQA4IqGJIpARghETBYIDNyFSArCACYKooBAjogNVzLzgCJWdbpVEYBJbD12UqEakQPRcu43ipdP7QRgXBdkgOoIQEX7tqj769ppzuZFZS7vsXJeFQ1bpHP1ZK/+jm4+TrFCOQ4OF9yTZbQ1ecu1nWT9o5xGtpn3wmPXxiWswaEQDCK9YkQLbMjZEerYSE1t1VZ4W7R3KvLKvqQEFwoQAyScWDzAYqquHJBv/glB0hxUACTAfNafu1kMXeeXZiIeQRkSdCtXDvPAuZLyNFkEFQ33ox5gAyUDaAD76swsXaZX/6aq5gP38g371VdjblUdjZkZ3EtMjWCrOSwAufTMPDmtivHraAczxrp7OzYRMAxqJr3lZd8chbGdV2FODuy/lDPjrhT2CjElDjJ4izlDN585br9W3T7Bg8tg6VhgBK8CaKxkYhmNCMgdENWX3pvNix7YDYRIzADJc5kaGixw6w+eHr5ZZ8wTc9NipARjSjmxKGQpJYEQDPZvFsenx0+ODmer+aS2pw7CCFTya5ylTP1MlDORmLMQAyGlIFCMTU/ViwYXB6SUTIAjTENvSKBguPCB++c+71WURYlFI+mlkhT4QI7ToYm6g1NM2j2zptRFhoEKufA+pTVObU0mKijQGgMqIYCYErkHENCkYAeVNgQCUgt9R0RkXOWQNUJioqaY0FjApFclIEEmlGB82HeWbVzYbYx/u6/9t8WOzvti59943/4u0paPfW0bZ1zE/vW/+xPfPL6myidK3wegC23MTvCVsGyjmsYjz30Wbyrz5dehsVLX66ddqet3rm5XO6987t/dH79xuju63L3us5npDp0S3bm5weju/v82M68xIROY6r7+OCjPzuRwo1DajuiET3ydhiNTWSY982jbw9vO1y99kqd+/nZEQPUl6/GaSkBcdPHhD546jO2S6w9bY9gSFTVsSpVwYeASbAuAY14ROgNByzW41jVdu6mUyg2h2w2qDfPoQGqse0REgihKbEXTa4qkkS0TIzes6WE5IxAzbgcETnsB0iqaQWI651+FEUzYyd55Xww9NbNkQg04WC0WhA7jGqrnk0CGCazznPlNs9tHx/ux1XKi3nlwvJwuThdKmBmDA6Wq8GXoZluImQYjh2L8JaKka1GcmO485XNwsu5nWb8yEmk/ubr1XB1delgev6ir5vn3vc9f+yPvP7TP/Ph+Sopra9dwfUSpAoiZoeOCJVKIrQMxkwhp6jAAiFDyuydL4E0OFJMAMmkV23VZBhWUbo+5W4Y+igrMQORb6ze3Ll/JFalSMfd2YyhxVAFu7RTToJ4J12bLUtuoRwVsbfj0xV6N619jvHahc0KDEy9p9kiV6E8XXQiMgnlfN6WQD64elqSD7f2D/3Y+8CP7O440nun+1cvjLUfrt89Go037x8dJkNARqA8xM06PLE1ubZRBZECM3hWI1USAGVQsmVMyz5PiqYBS32qQ3WcQXJygJA1MO+WIVEGrs8iLiUPAiCYlX3tl108Pjm7MpmuRF8/PLh07dq7n3widfPtSaOajnPmAvvVQMjLtiOH063m9tnyp37ly4XDZRyeuzJU3POGHC/6+21Cdd39s7e+8L5HHnn8+ldeWdx5M56c1lsh91IGPlsMOerGxkaztXNyul+4oiJvOYkMlsUhIFOy/HAUw5ZBEyCqEERTFTBwDhgcYYoJRRhRVRFdhkyQkQtDzKLIwUGypKeLNl7bfd9f+xuvt4t2md+6dfnZR542n5Umf+a/+uv/7F/981/7lQ+7Egl9VY45FK6SzlKPmofsQ+VL73IAtMWqdcwq4thVRThqWzCHaqs+OROGUG2M5rM9RBzXVU4y9pX1oRMaXdgdhtVg0g+pG/J0Mi4mNajuVOcOj4+WwxIc7lwYpZgKFkTiSLhCSpSje3N1r6q1LsbLeZ4tVyKZmMsiVJ5mXTsTKC3LoC1zTsYc25hGVqDwzng89LEKgdD6mLSLm5Xf9H7HIja+9kTOjSVL1ez3ea6ZlR/bniRJy2W7tTWBO6f/e6uICdCxqjkmM4opswAQDoOmiFFMxMrKFSUhAagQWFkEibkHHAWH3m7dP7zw2BP/5V/9C1/5f/4TOz6++YtfvrO/PwbaAWSQHV8yUeyFPLvCpYFvIr3w//iL1ff+8Fm9dTq3gkp/cVO+riYGAQkpDckzEHMgXh6f+lAWZeh67HOiDDu+nDw4tX/xsUX5W8T+/M4V1HLZLtvcLQ08IimgL/De/fQrv3D12afPbs6PXnnNgAcEJTRgz47MZQXcqM997wv9e58Nm5PhTq03WjYlQ8ceQBaOwgef8+d2/Sx1v/M1kmRmzI7MIVfGiETIrFmcgib1PmTkUNYRMxoruIzZl6TRojP0Lq1EgRmKtFy6AG0v+6+0WzinV1+tPruHM9spdod4JklzjBQcuca7RhJZQoEBUWLWYjSKbQdmMmRiBizQhyElAQWvQBoCWycaO0gWQnUJhi/8lb/65PxP7Xznt7UuH+wfnD93qT29kbIphDZ23qPERIGSAoImUcehyKlsExQBJENOmgYKQdFJ17EpBQ8xQuwf7rcbghDkBBdG3Wbhbz9wC/KuEDXzTkGJCUAd+4SoIqUn77O8dPPp53/k5PYXz8mhlVJ5R+Pah0ldY86DudwLVyHEPjeTsarf3L345vHdEfPiZNafHteOq8Dj0cg1092tyf3j+6fH7TILVg4Vg3cBGEjT0JWeKnUqkKNs7+4eLg+yaY7GjJa/0TcldEi0ntauEz9qSVXUZK0fhodQl2+QIhBgzboGhwiEqKBgCGhghGQRIPqshFm6xSIPCRSdqhk4NRQkZiJcFwYG5JKbFmWZh/aVT4Z3fTDTJCN7hwCWYhdM8uL+ra9+crcsk4ilPqholGHVVYWTtEKDsuCxreRTvzlVJFElTriW9xiuEYBELgTJ0TsaTm5XzmCYLQ9uu8lmU/ouxl4qJw4KA46n+we7FHIeMpIgIWGSjOiIjYBSUnIuZ3BIZqICyA4dgyMysqEfNzua52VBEbF+5Hz72GV+y1O2eSVce5zne25jK9WVAfTDXKY1jJ+9/5tuu6xz1wMLIgI4UMtBu296N7zl3fLJz+Gbb4wCAS4FLEtbTqquXYbxxCsClTB1Shae/8F49VG5/MGdt3yz/btfwusvF9YrQAYSxUxMZZklMwC5AOhFEpmKZACLmo1xXbGpApkZuyzZGSMiU7YUidV3R2m1J7Bl7FyYalK3jE3leXcnD0POhj4EH9wQu699xq1WxmSeu6rYfup9CZzz1s73feGtW7rageXSUVLMAwBjBEiMmS33uTaoEI57ybl5x/Pve/EzLxWQT2ZHWfNWUy6HeVYJzCKasiEiAdpa1+1wENW14QxRTNe1rIExoqmZrdNxa0KBKfxeGxRy7k0hiaoWkrVGvLS1Mcawdf7c6OpTXWv9fNYUNUq7WU22ntwOStbxxtV3pCK43XO4TBW7w698du+rn/7gE0/df+W0n3dd4RgTxOhRCYzNgXOudAX1KHevf+nDR9Xjl89/84VHt2df+9gYOsgAAAiWIxXbj2WvQx0OVt2Od1s5Y9sWzVixEHPOBlCCYQDIID10ADyCMAEByEugjRfT5eq7/6waP37y5vDxvz8aP3r0YG/62IUVh7H4NDeoxmaZurnOFhw8b+y2914P9fn6T/z0sounMZ3d+/QO+7BdwfEZZOW60dybGWxc2H7/j7XN48e33zhfv7298P5zTz4L+bo/vNHtPFK+/dl848vp8G78+sc2L//++5//zC6cclze7duBr+zuvG3EL9HxZ3asW+U+PPJcaFq+cd+Wq7BTgbTaFNFovF30i1NqZcilf/bxky9+6lJj4CpbdVvjEj71q4df/fnmybcvrh+cf8+3dXduhTbh5mZGKa9d0pVYl8QsOM83fjsdf+7kK5/eOl4UZZFSWUPeuXNn+9xXtt/9+xdP/EhbXzSbFcc/Nz2+vslpNZCrGrh27ZFn33pv73j5uy+aL0+9Li9un3vnkx5bVVkttbTal+7w6EbVjJuqNiHMaSO41A+AUhSFCjBLl7rJdLS7UV2QGkHdtJSSpO8ubvi3Pvrkl+5df/PolJhT9vlYmrrcqEqXl6OouppTMb4z65n75iLX06KPQ1GEYlTP5kOaLcHME4MIDO1ZXBFTUxbdataEcrQ7Ojw6nYzG08l4lYYm5D6t7h0dSi6SSDkuts5NvbWV8ddfuT+0mpFUpKkeJkwLIjUUVUbySGKiiEwGwCLrswwcApgwMwKqERIqGAIZIKgJmBGgEaFHcIoEKkwloK6NCYC0NpkBSzIAMxJFNUBS/IZ4E8zW+FUUBQNTBgQTWMt8DIgIANezjYcV/nqQQYDMhNzN53k+I0jFxqgZT+qy3Cy4+MonZl/5zfDG8fi4Yxe0H6wuSVN1+6vdYH6xROFiVOGG4zpECJg1xT6QETD7goCUTbOKZN/UDNlMSZILZW5XyMZFhUSi5l0whr5rc+nj0I5ycqFUA6VoThCcgqGldHYW6mCK2dDVY1kpiZgREljstV2iRsLctZFdMElgFPsYqgCKRZ/t330c4uAgNuNtIi99omCaI4Ayl6YQecxPP2qOIffcMweep5J3LxT1HbzzOkrUIeeBfXDElvYO8itfDSW48+c0xzSsLHd+a8e2mj53nDpkAYl5eSx7D/LNl/F47gUIsiOmzOAZJh7Udxj81Ufd9gUopu2t1+T+10YYy7LOCDl2KgMZ5iEigyCCSFbxyUJQu/6qK29b7CGrIatJHCI5BGIKlQCrppS7Ni76fil50JzEjAgLx6KZNXrIuPaHC6CZC+R9IC4NfRRlTCLi12pMyUyqOXsOa08HsWP/sFdkTIKYwCpGT4ygzoDJITjIwmZg4ICGLJmtizk4VkwGlI2TWSd9AFcRQbY1OLBE742SmIk4Cp5YZd0w7YHIwGKKrKyQwJmai5GMSQkSJAIIddku/Xu+6z9991/6kDy4PZ+6cvOySd586lEu9ehotfPoO+4Nq0hu/OR747kL1b3boMqkdVW0HeZsCRDJcsqr+WLS1OJw3g2pCs1ObbO9PrcVQTHb737zl5vTY33zK3rnbkBSAkULTnaObz34v/1I9cw1fsu7ivf9cFWeu/V3//IVnIATqComxHqkbkzZqsLPu7lduIDvf+Hs4I3tBY2h7s56KhqaTCx1OigquabIhrSzpQyG5EKhiIolOpW0lk2rOUIAkwSpM0YoGL2yH5sBLM/QNVQ0EM90WIJLVJaAAcCRZF3MEAw0EpdAaElVEZDBMzsHqQVNKtl8MEPLgogmpkkYSHOkghyzpkEEKba+rKF0psYFS+pQhUsfz2Z+XCBvoHYptikBYajrctKls5jG2+N7B3uj6agY43hSkHY+hbxQ3M1xcWpcgJ/64F1/+8W/8WfjPcL6A+/6zh976etv1tPC3ztwxTpZxllQl/ret3zzp6qPvjI7XZgoAps4NBMgQiaEwYIjQmpVa+8Dgc/elwWwV1QMTj0kTwQgmgZtvVcHHWCnmnpZtXFY9iJZ+yjRAMw0P7wLln3aOH9l//7dDvT8tYvnxmF+eP/CyE8KW3bduCrP7eycHN09O5orARfOsu7W43FtO02V2+F40S2GKAkSprKkPGhveVQWsR0QqHLoTEtyHvmZx86tZnN0aBjO7z7xYO9N4WlRjfrFWW6H7Ul9fqe8UG5cbCqOUsbExJqQgJynDJDFADHHXiVXBBsFbxbV0XzeAR3ntB2wITURpzbyRUuGxBQ4reaGhoDK2OXYyzCZNmdd78g9dnG7YpmvlqRGYKeL4c5iGBVe+4QUL13cnS/aUlKcw0uffSBmWPqXXzy5uOWmzf7jF7dH5bhf5Rv/9vPnm0tbFyaXv+mD0/e954v/9hfrg+NsSw/iCqgIHa2k052N8vj+KYKhU3QeEBWptQiinj3BQ06ZIphJKJjNqYKKASRAZCIkr2AJs0lSiQxmWVCBiIbVccraNE0eO37+mZcPDg/v3b164eI7ft8LyKPD5WxvuRhL+djFp5+4fOl0OLm7t+QqLM6WKRuRNGD1pGq7ZUJ0Rgi40dSrbgWofTcsl10250CZzDsCwbYdOjlhIkA0b5t1VXo/dr7xhY84dk5SBrVuiIwdelBJbWxr56VB5wvCzEl88kOnq1Vv0auI9wBmi6UIhSSWB61CSF0qShSJjXOFh9KBIrWrjojIGBR8qOuqSroKDuLQBdJgMG6CDAMiBdNRXQpiIry8NbpzZ/BAyFh6WrWpi5FdODru/v0FNIaqQssgkpNKKIJBnvdZIkMENKrGoWxYSJarQVsJjiXDEFVEorauqN//PX+gvHbxLW99T/PMv/6V334TGDefOr+5tbPbdtOjUzpa8ABeELIZ5L6aPvrn/kL4wR8Zpo1THUu/xambzVeCSD6roZkjl0U5uG5IVQiDxD6BY65DZQQ0pIaDJEAArAuvaegMoXSjhny0fpH62Fh5SXjvv/7w5tWdxf29ySxLUXYcFQioYMeYceji/ORo562X4ZmRMm6/57Gbv/EbXlAjKNrQyCN/6kPu2z5IvhofH3/pU59xgwfypSsp+6FHMc+uMIqUxMwxlCBQeCFSGAaCAM1Y2SE5V83b7jgUYwRfIAbErmvrZlzkfP8Xfn350VHRd94HV5TZei6IoXLmLZGu4oCpqC9lMWcCrL4mcOBCQMnOOSMuJ1PJgrnvJNP2lTD2J4s92qhHrqaTM5eMHD4y15t//Z8PH/m19/+9vzE8cnmREs8OKlfhIrngGZzDyEaIkonzIJX3vgbICg5BDaqKVkQCqhlUCBUEVI1KgJzBO1ire5FOts6P/vOfjL/8z+K//nRtLmbUQCjqCAgUNYsLEamP2RI9+Nz1Kz+2+Sf/oz//cz//F7bLsGqzJhe4sFALgBDkttcoCWLdbBLT8fJwtFk0kyLJsFisJpPpucm0Zr/y/o2j+4dHp855DbjStDVqgveiMeeERWFR1k+cZKGkhogrDiQZ1Bw/zFN4R2yIiCIqAAqQTc1MEVTh4QI/rs1nBgAGprZeRAMDQ3v4vaqCARmg5q4fxpub6BNHyygE5IEppeCZEyA5UxU29phElWxIOWDRPnh9/tJHt77phzoiMbU4pOVSlovVV18szqIqhVB0bY9DIqOqaFQTu5Bi17iS1zW2ITtQAoyZ0ZELSVRFmRwSqygJpbajatz47TNxxuVqWCxDGG+/1Y7vbh7dtAdvuntfC2HDfMrzjnMGNUcMqBbTuhubuui4yCrsHYCuJbdOlVLyVRjigptaK05bj9AL3zF5x3OCaIOlJuTiiiu8npwWVLnTZTnfWxzfpXEQvWByD0TJBaICynx65WLzw/9Zh5vlGyfFgzs4GYEfL4opPfEEXr6cm8Ic+X6ge3cAz4qnn+wcB57hJKTLzxdXLurHf73/7CfCcuYEQRDVKabgiZkkCypaZIzmPCkqGAOgMwMFE1HNTEFzVgMzQmJkBDKY37KDN9rNZwNORFu7fxMPH5SMWO+4aovSXIceQKvVg9kn/21YJhGxQOW156g6R0N2GEZ+lPXs+NMf21AkBUsoxgqKSGyaJRM6RnTOWGB12ltCQzeq69Xp8qAfYpZxXVzoy8PVKiIaYDQyVWegYIGxdOxQotoga+sSAqhfcwUQDXSdmFgvtYjKmnCyvguuv37j8sVdxGJ5dqYCGL0YbO5cCduP5npaFrlu3NRN9x807M7AQ8DSuZGvmij9ccwSh6HQV/r84o1bH9vP71utnqmLQIiZakfoCFJHZWmBGAZz9bjEZzd3D9qz9OCl/TTacZKYiW04XXqmqhnlnYuzEvawTdOCZv3mIGUoYYjoAjEZOUSD2AMqsIMQYL0CygFCtRS9+of+0rDz3KYO+LV/NJ02x3mn+MB3d8OKxpeWJw9g9enRzhi7JZzcgrQCyf7BEX/spzWoPvK+ZjzGND9fc/3e74FhP19/rbtxt8zmJXUHD4oh9r/69+ruxfNPPK/T7Tff90Pp2Yvu9U8cvb63+/xb9/0z4+VidP0r9Ov/AvsHlw+XlvbvrujxP/W3HiC0K9Ev/PPL/QKA+mWmYrMaF7AJQ3DR9VC4o+1RHO2Wbx5vBSax4fGnXnvsucm3fsfiw/+ADo58YPAe7h2PXjvLd1K9tdt/14/Mvu8Sli7lBMv5po5OvvDlcv/1jdjyyQP85EcrjTsBUof9cWYErnASh/SxXz/9zV/BAIoltP7R7UL6Zbta9q505y8M1544PD2dbl5612PPnZ7t33P15ju/c/ttF+D2q/dfebmelqpgpvVkjKTiFRAkLRIkNSPKUQxDCFIBIlA4Wy4JkLvh3MTOn5u+ef2uEs/u3Qyrs/OFP10mFOwGO416NjtrmFxNoLicLc49su1DUe6U9VazWMU+qQgpBHYMInU5lrw6mx3nOCh6FS6Fu/kqZzt/4YoInRydThrNq7bvenYAAJvoaVyczha7Ux7MlsPqsccu3D0+ZfKBH5YHgQkABUFMFVEUgFDRBE2BYZ0IAVuDghFJYR1eVSJSAEETVUUyIngobSZCQC7WSgQgA80AtpYj0PosQUECRFpnk9ahcQBTWN+rziybAgKBEZIDlTWzfg3eJgQEBgMDIwCQnGO6+ervXtjYuvTY46EOvqld8LQ4ppe+hF++vpFGYGGV1U1Kecvb6R3fBFsV759i6OFitoM5HpzUk9t+ejlrcBZZFUREBiGGukapyATQulv3efeCQ2fm83IRghdRVSUmYFBzxqEITUmRTKHtAJALXB8WAGY5OyKLMckSinGan4ElVMCyMDaQDCIOjQ1TH73DuFhAXDFo6lpGIghVds5tZelBfc7inAcEB8aaATJkoGaUH30kE1Lqxu7ckDKUG4kKvnudTw/t7JRDDTkzZgBG0dHiAO69ovP91SoXI4dYSagtpdQtHAfpFxyPw9nBcOeNDRugLiGpxL5oKi6ruJzp+avluz6oPcLmrr/6eF9Mi6dP+OM/Aw9eMkFfVJiW8ey0YLWYRASZU1YkjlFBEmpLfS9ixEWWAdEIjADTIHK6cv2globlWVwthm6JKi6QmKbUppSyeRB2COv1xZRE0UwhrFcfTVVEVckgMzIig0kWEiMyNXHBJ13HDNYJU0FkW7s7EJkdORIFAKuYyIABSMgB98SAmDUW2gNxUowqhAUYpSwheMckpjlHZ0pgzLUoAAiDGrF6b6qWRTWpqqhgAkxOkcQ7ITDObYeFK97y7u994Vt+sCwdbT/mnJMWCgzXP/2Z/bt7MN5Kxfa0GnPjMeXL7/jm+aduGmBSWXmKwY1GXCquZp21kn1A7xlpubcoNpvoerfmUpUFDB0c3I97exONfYA0pK7rRuNKE2LGrSHql9/kr9+Z/dSHh43z18bnMjMS9IvWjxQDKuKwf1LUlSWUc+P89mfL5Xfbv/kllr5sKiSvQ3CMlFxZjXSxcqEwzxoqogKWZzDbK+uxFd4AcA3V73skR+BAg+lgSKpqZe0SwcmMnPIWgwkYoxmQmSQZBkRUEKKQztqwWQO5lAdXlqAiSUATKhKgxER10II1ELlGVys2RmQShS4itqRqVDouIQKYYSiIXT8/9NOxXXok36+CnEC7NOhkNbC6IlA5Ha1Sf7x/vDhaMAQSm4xHXded3xpHXXSnXblYlDuXuz76IGLJpTAdX3zQn159+wuv3D29/srrrsghFOOdSbU5xsKjWV1XNsRLIczKkFM8E+1VnalmYwJHBACiAGgBTC0X6JyBI8OSDFwIRVnXHLwXGPqEknJaxWHBNPQpxnaRwDRbTpSTJrWHJcG6OK7D7cN7mNp3Xrv8wvuf/Z9/8xNx1d89Xm5vFInEDHvpNupigpV3NhoFbdMGBxiSX0RATsADUTVCjzjveg6+UApFiNm6nM9tNKVzdGjQZlm1ksSy7U63b5zcPzieX928MEn91rTZ3N7YLsLORklD9ACDGKJzFLJqcN45VDSQnBFXoqpYe1cQdLkbTA+71BZukxEB2ZEaDipRxJsFtO2xnwuuBM8Wc2iqth080P2lnt+abo/HKQ/PPbo5tHHvtM1AcUBgJsXgXbda+ZwJCACAPSOIoqouWzs5nT9ydfsn/tyPPrjp/85f+f+++qnXv//HPzidbroSf99P/KSf7b3+xS+//OJX8v7BGCWa8HxVJtWAmlTa1IzHXGRJ2XEtqVdU1RyCy2qWIfeJiZKtaxkgMx2iM8J1oWYqquD9+ukXkznHlhNkjGfd/Er97H/4Hfs726N2ldwkh7IXo4bOX2gm5Si9890vffGXj9488hUaWTaYD90osEdCwRSzkV+vnM+XHTs2TKFm63Q8Cs7R7GxGjovAiG45yEZVGuJqNTRF5ZKg9efObdzav71IQwbd2Ghyhw5YovbdAH4oXGhc2Rv0g2hSRMgEVmJRFTSIig1JUuEVbDwOl6cNDUlzSFkWfRpyRi4tooEisHdUerI8dDn3Lqrmmmg6GS3nM1MgsirwWrURKocu3Dpc9DE1IZCtasaro5IV9oGjZo/532sVCbhOAFQQoRiFVZc8igKCh5H3ltGXEDUCOUVWyynmIWVDTNmY9D3v/9Yf//E//vrXP/f3/vOfzJ9+bfzud3dveeGb/vCPni3S/V/+uc2zW/zTv15FYTIYsAekS5dG3/tjBxLCLGuWHY32G7+2fPkmgDdnOSU1KZwPhQMHmC2DOucN1BUhAuQ4+DiQs6zgqey7uDpboviovpleYu2sW5VFwB4mdSj7uPzi1yfjMC/LlQ+qFHMmVcoaGEsPkQlTKPxkAFsIL6Nu+uA9apZhA3Y++NZ9zrA4vfHRj7EiuBqcF8lqpiaAEFPL3kWxqh55dCoppx7YgpHmZDnX9bQ9m6HvmsolGQwfgtbqzVEGDeTHQ+u7hXfEYKBD7nogwar2IaQhUR+xsMB1Od7puweecgbQIUOXVHPBLsZhWIoHju3CT0Y2dKu05OnUMsUUXU4xJiKcTEdbmY4fHN35hz/3yP/lx/s7n/7y/+e/fTdd5pITYTu0hOZcIDFMGkIlLKKZvT8NIW/WoVtOO4Ghp5IADYiyL/LVC9DeLh/sg2UQB4LgkcYX9iZPNz/2R/nXP1OfRI9ejYFcEnHMmBW1L6gQj8g4mlSnN1555/d930d0e5VW48mkmOweHc772LaSmBnLQlGrZrTANPKlDe2kqp3Z6Vk/3b7M4NXRjZO9JWOEWNTOVBfLzpzPlasdOZLeEoBzZCn1nl1TlCfdqZAT1UA4xMShfHgmGJiBqopBVk2iIqoAsEZRrhtBYGCGtt7BAsR1/Ahxnaxa593VHk56Sc/65W6xE7HgcdMjezMRIwIQ8d4DqBmIaGIzwqzQD11lbtNk+ZlfCNNHymfeFoeY+lUBOBwfxZtv1Bx80GwJ14UMpghKbNFkFHxOgz2U9DiDtc4ZTdXYGNkTxqFj5LIISuY36qywPHiVr22dxg4f3Coa2uDX5cWfHe68lJfWTC4MyRti1gFAyLPkXJRorCYDOS5VVNWUhDg4kNx7IkYwIpg4q7d75TyeTL7v/9yfe0ya4Emk67gowugiM7n+lJuGc3/4Wx/OZw9GthHGj3bzQyLJXU8VtQzp2z60GF9xJzFoy9MqzdPKRqM//GeWF3cEK9XEltuD+2M/xeW+NI+7Yruo63oyjdlWGzv2+3b50SeHX/vpcm/fQcYygGLOQ2JvQQoVVJeTKaMxWwLMCQhAGUGQvOR1EpsNq6QZkzgURwP/7j995NxF6E7SvMsnyynALHa2OtR20aWhHfIWLtovfHiUlloDKImge+TZYVggMrmiWNzytz6xcf3rHAWJui6tdUmg4sxywl6VmPuUg6fCV8CQaQXBdcOwGDQxJsrVqNwmfDBfqcAaV6QAYgZEwVMI1GfVKCKqBgxKgARA8H+IEq0dSrxWez8skvdPjyUOk/GoKivmIks+f/7yePvSaLozj+ILX7gQu9lKfLV9NaVclGFxFnkWp1uV1porNz89e+Rbv/lTq9XPv/K1K0/uPHVyuOOqFAdOTrMFjyYZelPpjTy5EufDNnGb4hB7pXTSdcV4xJuFqCbscbgfQ9ifn/UgaZa3V311YQNYMahRUjEQ5OyADIAgG5iAAIQSEEoTuvGb+tLPrr767yZde78rJ9//H99azC+tTvXu6xRPinqhuR8ObtT5BOIKXB0uXAOal/MT+dirQB62tuLuFXj+u770ldce+87/E34HzD/12+PTuW3sckxbp/vwqz8/k38MP/BHN3/sr7YeeXOrese3P7DLp/dOn3nqffrL/xt/+TOTxcH83u22yVc/+JMPzs5mE5aN6c57vu/s07/qbVw8siFdMjtsy27fwcHmpbNHnii+6f27Oxf9v/437vXfESiO3/bc2bd/53FcjB97dKvvQBlOTgG4KkNazUNezn7zF+mpt8P8+oaLeOMlPVruwFYI3gPzcgEpQzZYJCQsmxJithQdFmVdjbk2tSwUqyK1vZgjroOV3RwebTbufPV30/3FtdhulWO7/LbZ5W86nN2mPhO7jZ3N2XxVlQ6SZjEzMetX/WkWIWUVywIqCYCcr4YhIxYlV6vT5dm8vXn9wBh7yNnhuQuXTk6OytI2m2m8e3a6TBqBGr934HYn1WSHRpMJEfU5Hx/PzYW6Hq1OZ2RE5jqlmBGzoWdit7mxa23ntbpw9Uo0y2rLdlhq3qwnWLibb+yHEb/96Uce3D9MJR7O5i7hqAoUpg9mrVAw5pt3Z9+Yna1dnggADFQ4NoSstpaLmYkiEjKgEaAiIxKCkQnAWudMtg6YAuI69INI9I2bj8hAiQjWR8b6Q2uNj6rKmiNjTMRoatlkfaKgKpgxAhKbCT2kyZChGTwEFOE6oISI4LLk5fzszp2b25cfK2sSyXno0uEb49lx6BDZBktaYz8ux+/+luXkgquyXR3TZFuuPtrdvjU5ulmeHLi7NzpCYBUH6DwgGSuVDhVtUCjK+vy5XoasVu+cwzMHFlmyrM4seIm9jkbE6Pdu4Stfx3YBVgN5FDWJZoO5AiyTQxEzMoMIIKYDEGkaoM+WMimlYQkOtB8SrjwxOpau9a5iImKXZq055IqSJGUHSWzIrBk0Q+rRxFlqXdXWTZG6/uwkjDdkNKG+q9Js2HvgBDEhAIsIleIYh9uvh3xETz3ebOwCaerndtwXLBbPlnt3dTErZFH1CWezYlysjuZmSF4ld9qabV/W3//Di4tPFLdfpeUDHXbzdNfvbvnGD6lFKowIfAB2y+GESViATRm9IIOZpCSxo+AMeXgYWV6vvqMl6Y/Pjq+/dlLrPHdBclCLxjFDJsuiZsrOAbP5cmiXJgJJkAHMEReSAUBy7HOKhfMOfRVKMFln17KkbGZRiXnt8wCAjBo8AQEwOkfeOyanimjaEJmgZHHeeWYnOqCqRlGVPIAvENYyPmPnAFDNEJ1nUhsAxEARQSQ5YEdO0Js5gIEARHLKZgggMmjKGAHU10iaqRl94Fu+O1AYut4XYZj3tcNRbk8/8Rkum/LalS61fjICg4Ty2Pd/1ys/9dOhp6IeD3XIcYgihdm0dFFxfG5TrPNEo8abDnrQdkczOl2Nnn48iyVMocR+0ZpHQgrSZCTrBhzEsSu4IvBbwZzHDMssg1eAaFmjOzsm6dx0Uwmbq7t56Cl3VLA1VVq04tCBYhWgGQEwEGMWcA5DIcjUjCF31BoMZwoVj0YgAmjrMAWQA0NSMWZFFkMD8aUnppyWhIDOG6GZsvcmPRIwE4USE6ehhaxYlMpgxAaULKPzYExoat36iWE5EqmCoJqgMBoAiKH5QkRYTdoeVaysAntdpezEKup6YWTa3fLbdT7dy/cfLI5Ol2fLbkirXqrRRjVS6VcwxByH5fxo5/HtxVnnJCB11t03P86Fe/yP/8T0Cy9p8ejXPvX12dlpzYvNx58YFZRbOTw+2plspNnZJ3/uw7boG8PSeJ6tz1YyekciggrEFEWZOQNoUioY2AlgWYRiMilHI0IoPBYOMBsQtosuxRVAHyWnNPTZhkgxkhiSY1DVb9AbD4/nW1ubO+Px9qb/pd/4xNFxt1EUhkXhGsS2FzubtedD2KUSRbUXJw67YbMpEa0b0gh07Di4UHE5tzCARobTdvARM/FJl1ZpQQFL7wHJOX70ym47b288mF/myTvGmx57iXJxYyox5k4x89CrsWdyQ8aqLNbIfDQAyRFEjFwxqr0Dg67LnrlmyEgeSk/MkJaxLz0X4MksiThiIzvpY+tQUjo/3ZjWocRg5BF4EDyeDQfH8/snrWvK6cin1fz8ZjMpJw9WJ3XZTIrq+N5eNiQmQyxLV1YOnXvj9uHyTKcb5z7wwfc++bYndh693GafxAyluLTz5MVnL37nD37uo79y/KUv59y6eWvdaYh9zVxW3Ke5QwLNiI7KIsXBRBEMGdGRrwpS0JQkCgB45804iXoUyylDNueTmWUBQK7KFBMGGG2NHhx0+9X48e1zkPpn3vp46qu2T23bQmxrcqv5/L/4m//DfP/2hY0RLuP8aOGbclwUiCaEnYj3IXAw0zQkM8tRR3XwBZUMooNG291qXMmzg1UGTKqnyx4cmWTvS/PIqc/bKOSaEVLXpz7VwJhMTV3wAw0DZJABgnd1OTvLo9IXNYkr01xQoClcyMajYjoaySqPzVdNmdaOrbl30jGF076DgMTYJWtFHNDcJFlu0Bgtzdp2mU+HQZJt137kfJthvLJR6DJa38mF7VFDvi79xc1q0dr83jyvknf477WKyJEihaIYJCtI9pjJlQ1jVp9zybzqIoj2gym5aemdEgAnk6oK7Z32mae/6fje6bXdMMPNeOmd7/hjf57e99z1e8er2w9eeNfTV14+vjkMXjwFyolKCx1Z16qE0QDgQKvV6Y2f/Se6OBMojNg5q53vkvi6yMm6oa08QwD2YBC1LF1d+DYhQe3LiIiKrUSDoSjGw/GDypxT1dQZ8oNl1xeME1dWoWtlDRWuyDF6MCzQG+Ek91//O3//3eV/efV9j3/mzb2YcbAkpr50luH4t1+CifNnWrx4i6HIogIZTJMkLLwqQ07EhHVtqNmZm0xcCyFgTjPNReaKoo2oAs2ORmzgrEfts2YkTBpCWU08pBglp2HZNqOqqorBjNk058mkSKJuuqFRNS0MU9boaMKuMp4LWSgdV9jlxAiuHnmqZLnC1EFsTAZCAUjKoJp1UAbblnT0T3/m5kd+tuj331mft2QRRCmwQ0C1bGhI2Uh7KiGCLv1m9af/cnp+NNy5t/gf/9F4/zXoByMwpq6u4du+J12tZ3/vb13Yn4EaIMOg4daNyfLe2TPXzn3XB+BffdyRywzCpAnVyKGhKQFaIAsAond+/cPPvf/tf/6P/5m/+/P/E1OxSvlsvli1q07yxsaYqOAa/HhiK0WyyeZ0a/dc62zVnZST8ensqG2XZ3mBfgKCtSNCzMStuNTmFiEDhqY6W8WRw4KxZPTOQs2LlkxBMReVZ3poQFuzQdUgysOls3V2CHEdGVp3gx5mi2BdZq//Ss0QDQEQzAAJ1cAQGaxdHDXllaB+ftYBeuIsBcSGigKGIRaOOTOJcxkiJmbnCsyDBQB/fHL4qz+9E8/KJ5/N5MrgdDhePXitVEtGgIKmDiSlztWNmoCpZCuAJPZIzhg0RmCH6JhQIZMnyLGuSjEXh8gFJetD6VfLV8LwXJEkHLyIcTa8/Dt0sB8yE3pLg8ZMziiQJUUBJJ9VtdxB9DrEkE4JFcGBgBEQcJ6lPD3v3/LO1eUNOn8VDw/VQK8+TfVIJbm6gkqAnYIzBGh60AX0C5EiwNbywTE04qHgQJJW6kLnNjae/PblUIa53PvS5x7dGFHh+1Yrz0XpnUYbkkpuHn2cdq8M929bs+VH2+ZAzVuKaoPVZXjbtzpV+fhH8d5dGQbPaOPdxRPv4lHhXv0kLwbKnamYqodAzokO5FiFGY1UwDjnjA6JyyQDuoo54b1b8OAmSPbAvjco3Qbb4nf/TTt7s/qm70uTK7ayg0/95rhbuboqKLSTHf/Ct+l0m5iWfdw8fengYx+pV5oAda2WQcxioISmTqxg7tbYL1XSPrUPpLC2mxlYKDmKLFIaORrX1Xw1ZMkRQNfKOIQsuspQenaOXRZBNEBaS5TADI0AidAYVAAQCVTNGB8eCS/fPKp9nI6Wz17Z3tneKKrgikJzUs11M2ZHedX+4i/8zO++8sYf+kN/+sqFi2xDk4cCAQN96Ytf3j89COOJ33y82n2sO3rtC4vTtycaFgtfVahWOE5D670VjhHQXJmHgvvO67LgwM2G+RJh/CB5t1FWRhXaEOerk9nIYgApZXnu8mVIS5ASJBKDkjNjAc8YwRIkAUJQhZgAkqvC8hO/jN5Gly+eNM/0T34fXXvm0r2vFl/5dTqTcnvHmnEfHumeeVtfDtyu6sM7vmtheQgLYV/DsoOjo9Ad3V7oxnPv2Lszu7Kxk+/fWO69Nnr+7fO0Mbn6mO19pVTHl56ng1UVF9qfzfcPxzuXwnR7SA82HnkivvbS7IsvDYvT8Pj27Cuf8Ytvnrz98uJk//6LL146TRKi57bIupJ89o4PLF5436PPPsPJFv2JvXEbX36lvzfLo60LT3/zyUE3qeMj3/89i9PldP8Y4gABZJ4lR2xg+vj41svHu9dfLOWAuz64BtyRpWi+1KiYBEqH0zIuOyILo9JcmZvtQVJTlThy/MZ9WAFXjj2hVyJcnB4MH/7fdthSx0XfL8yeeO7iSV3uKdGoPkt5dvAgB28WhiH1fY+wxpp4j6WisgPV2KUhJvGFL8qwGGatDMu0vL03M65WZvdPVqEcX0p4ZXt3MqY3bxw2HWsHqqwzHZZiRVlv+uX8ZHt7iqqAPlS86lZF40a+ODpbtKJnJ2fnJqMkuMp9McwkLcH5Li5EtO/SdGNrtHvu7u29xWIlrqqa5ou394KkkQ+jKp+7NDk5Scs+n9/e1aGbn86KdSptHUwzQUBHhESmxkSecS3BFCAAJQBAIMQMRA9hQWtbkxmxARKyAhmsOzu4vgEJUXJCNEQgsLUYgUDV1EAAHv4GAE0fHj+0dhXqurkLYFnVCAmAER2A/N4iGgIAKiObioCh46Ip79189YW3v6uegiSW26/c/9gvNPdnpQ+dZNTEkUvnNSr0Sxu6MN7q264t/fi7fhA+9itwtID4Ol247LaKoc8cUNALA6pKNFH025fKp56nILCYeVNYnsnZvquCujLtnssFKCRdnsFLb+jXv9JoAAZRJO9AMqqiB0S1mAhA+whMBuY85ZTZeet7SImANPXZJKN31bjLEsoxjDbNj9vFEqWvtiuRbBvNyqBwU7tzK/SR8gCewSEMvdx+tdh/G17ahW6lCjg4Prpf3H4lfeFz3PZoLg1Jq7IMQRZzKr01o2Wfq8GozXrvOt95NR4c9Y6EpGwK8lWcz1e9FlitYpessM1znoFOZjgkYaQrT5ofu9n+cOduuPKMen92cObuPGi6ZGQpIrocrl6D3W+Kxwf5tTeL+RmZGVtGNSQAVEVgU8kASAaiJmhhHPa72f6edbt16ld1Uw5Kqx6t8FA4RKE1XQ8kkaorZchskrOQSDJgSUOMlKOZJAQCjpYDq0OzlM0cmqYknH8PWwdMxIiOwDF574EZmZ1SSeTN2LnolAzBjM2colEJbEYIwMksgQZyydbnGxqxc6Uzitly7omIuUBBMwZTRiLnkiQEBtMokkSSaUY0VI/mASf1ha3xLpEKgAjYwOlstfjS76RbtzQNW4+dn2yUA6IiYKDimae0AgSa7kwXYw/3Z2G5qoOjcRga2huHSVHZMrqu9w7BwLNbnc7DvbtNXQgyxnY4XpTmclwhIASKkT1zJzZILJyhB/VDG9W5IownYQCQEzs8kte/zt98TTi7gof7i/LeQffq68WY06FPPQAaT4pcFQTBuohlDWUFKmgWl20ZgvkKE3JJZoJm0PfoHCCZZda8jiGmQdAndgQ1moivRzkNQA6SwhANE0jkkmW1gG5lGVwzwlBGVUi9AHCo0YyYKQlIYgOJgZyTGFX9mh7gg4e41G6gDECVqerQO0bVSIPE+Ukx2hkWB+BLt3MJjo9Su6AyZOe8D4vD/apibem4i4w+BChd1Ww0pCeSu2G52H3qabCShlOdnfmNR5S2eny+8yvB0yd/+B13Dm5PVhQKX0+bjlLdFBlif/Sm3r1+PsWiwOlGdXNId+ay6HIHZAYCRil7Rs/kmYKrlEstSq6CH2/Uo3HpQ8FAaBKHANLGNqcYs0bVZSc6YBtZgC0bEZoqEvnwsDq+cmGrqvGR3elvfO41c8VYK1mZqa0cIpEgnR7Nr25ukiY0SxBRfUUhDZqHDkQe2WhOVqvGlSSpKZwim6dT5FnIJ6hvzlazITXezduYgIPHaABpeFuz8cy53W45cyUUTHEYUAzUgvNJNErqYy4JktTec0wpIKPDHLVXA8DCe0i5KEKWBCDbzJWl2EXHFIraQJwYAyfJqtimvLfqq6LMZ6utjfpcE1RAQCg4MX/9eHUyiy05HRKqjJnEYNavzNGDds6rhR95HYQQy7KwLIvFMN0eVU1587UH3/597/rRP/5Db756QxPXo0mKBqWBakUayvoDf+CHTl541uanjW8Ws8Xyxhvt3TtpvpA+Sp9cIM0ZJZInohIYZYiYEkTLqGRiCN5zlggeXF3GPvo6SMYYdd1061OUaC6gUqYiTd/52Lnv+0BNcufNGzE8384W5cWtZupHtNWdnfz8r/zq7OgVx0OSqnTONsaLnKOod5yTtt1Q+UAayePWVlVvVia6d/80q0tRAMx5ir0uliknA2bvMA8pDdk5N5+3483RU888FQe792BGFRpalySmBDGBVzdx0/F4Ph8GzXE4ci5MJ2NJ4gDHVSUg2Q+pl93JdN4lWWpNdbtgbHyXEpqMqJmGRgXG01FEQdRhSChSelzlFMklkWS26rs+9vM2Z4XDbGirq7vj8VhUwTu8duXCnb2zqnYPjk+Lwizald3i0A1NM4Ib+/97q6hVKZtiujE6Xi2XRzNQ5xBKIE+WJWcTQGOGgFzVtRyc7WxsTTjN5ifU5p1RefLVrxU3Xp8ffIFu733vD/xoub37+tHh1uLW5Tufm//8P3/j+skohzZTn9mZbkLMN7/04K//J1f+yJ+ur76lyYd7//2fbb/8yiY1LZiqsgIzSVH0XJLOQ+U4m6UsfSw3J93mVtzZCqcnPLTS9xxoGTFvXU1x33UpAGuUsinzIB351zar3e9///Hvvri9dywOEAETELiiblYpzgUxs3Mwvb/38T/yx6odb7I59SNiUBVDrju89Q9/CSUTVgFH5us4RCo8eDAP4A2FGCsHzpCmlzZlo+LJJbxzZzg+biq3WKbCB6fJUHOKiMgQJGZ2zpW1EWmUpKqxY0AE8+NCPDgPhQsKElMXF7naqOoGqKShS7yxq+1gNkiK7MlhGFJvDbWVEMm0KAtXytmwWiTOnNp+yJFLbw4DOewGAzYtJwF4GdEmaQAPkIYE5NiBY0dDzwQcvMQFRyGXsOv7oxupfn937Ro99fz4waugGQNh4GZxcPjql8OzP4JXnrX7n0VAUABf1Ed79S/8k+pP/yl97GlzHzdVyZIBHDEqmBiTMfPQRQVkT+HsYP/XfuHif/QHZitAXPGYTXrQ3AROw4DE5Ak0Fkas6gnGjtp2DjYs4tlZOu5kCLUrp4El1myz0wiAAaAAopwKh6q0WVa1RBAoHbXtylgbpmjqmVAsx4dY66wKiqKWTFVFAdd5IbOH+KF1CsPA1lbih/AiW0vv1/Eiw7UaFwAMHELs+3q6UeQO+2NjTo3beeGd/uLm7PjlC5XN37zVzJNrnXeFkXWWtEbhPMS8wb5OD4bP/6INbb/5uKO+u/nayLsC2ECwANRceCi0EEkEmrmAJGKOvM+qKOa5UAM0FbS+0oBAuSdfJAVCdJ5SyplhXLjhlU+4PhareUafETyVQIyMIomZNXWGGZuKY0zsl6Pd+kP/6aJbwvHd5gs/O1qdkTFnSqu5D1yE+mhyqXrsvQvXbW5fgZhHJGl5CmWpRqKWo4XGM2G3WDjnc/SprIdJUUVzY+fTisA0xXKjQYbzYdj7V3/dv/8/pOaZy0++HQ5f18wbl6/ZaZfcSX3pnEVQH4yqPB7xEw0U3siZAHpv1AcUjTEvZ9lN6iffEtuzoB6Wq2ML/Xv/CHQzfe3VDTdTJxoHTw4yUhUUcyKgiD4bcAJIPoC0KzPHHrJl6BWB1uXcwyBAJwQwBhh//avdy1/lLajGzzeRBbggGLpeLjQnza5i0chiZ37v4J/943EkU6emYmZqDtlAEmoScw4Kp10GV1SySjLo4sa9FDqOGR05cxKlb6MwTQvbaKpO26gGDAbg117YbAJqAA5AABPgw/pTFNYhZDVGQoKsBoYE8HszNHTVPNls3tO9Y2YkHWjzUlFV4JxIds4P0r164/qbDw4+/JGPbI433vXklaeuXnvt1u3fevF3f+eLLyX2nnECvq7qx57eGVvqNDbOqYjzxMFzrtQIFD0Rgqie5WFeeK+uMM6rs3ksp8XT37HcnM5u36y6Iy7NeRf3bxcWV92yfdcPxlXcWBrc+Sp2M0O0BMAbCkBEYAlKs9IjKrgAVTkpQDZ37uRKNt5W7u+H1/4avPHJ+vxlOHcZqvPzfjjdvbY4f/nqc2/FxeLer/6L8+lmgQ+IB9gaw1QA4oBu8wf+JDz6WDw6Gl760vTRd+pmhaNtTZfuPPWh03cttkzGL/02/7OfGB3fw7LK95Zy/sqlD71wtJzvP3hQPPHUMN4YRZjUxLvPfubnf+oZ/wFIq+1nvzXzjbj3Csz21NGb89Pn/sAforjy//Lvxk9+fFQ1ufc7ftdGO5ho+Nl/8WTqHR8qRzeMoZj2J3es7TnmMKryBfbf/PwT4S3ut5fyxqexIA1b1M0BlqbA3sAXM8rtlav27NtsbuXrNyqM1ZNPD2cPFrdf7r9+e9M35Wjz5Og4jCqWrGQOMMQOfHZ+mkwvjfjod/7Nudc+U+tqERcV6/HVK0cbkGJbhg2yoo+nCqLGHp3iIKaqvQPwZWFmq26BPi3n7cZOY2EyZPrs6/emmxMRvHnzYHkzaj/UhReDQAyeukEK5Ad3j/b3sG58zqvgmbwM3TJnkIKlXUrUAvDR7QuLdpXRb27upjQ455bLdrG0sqw2N+r79+5xCGOxJOyqauLK+dBXFI72Vrs7ZYk+UNod++XJgfOhDh7595ywSkBiwESABPxwPsBAhsKGCEQApuaYHZAY5bWCEcBMxcge5ntMDRQVifRho2etMwO1h1DW9exBAQVwPYxYi8zATG1936KZGiASEwKaAJICgAgiogmuzWkA8BCAhIgsIIaKJhXJ4SsvbnUXh+WS9950S1u5SVHliQ4hAovo0akdnlI5tZz72f7k/Lle+uM+XxpdpNPoAGx1QkOHgslKGm/BzgiKyky1mQ4XH5uBa9v2onPDjdvhxp18+zo1pU13oLggO7tgVMSB7p/iTJVFcWnsszpcb8zlJLFXHajwCF6HqKBYFgAsQ8IsREpiIFG54MtP+8vPrO7uWTXw+Qtu92K3d6s9PWw2L+TXX+duf9K31j9wA6bVin2lMZMOYBaC8O/8W8+fIIs08Vkg7e/hwT4cHpMjIEBPQFFAXXAL0/zo2xZXLnT1xG7c6j726W0Hrk8k6DzSlMI4yAIQnSTIPE5PXJm+51vKRRc/+otBItfcnh2mpseAzdNP2nTTtJ+6frU6SzH7AinOLQuef2H1zu/2TSU/+0/96y/mZYeERpCyMDk1UAUFBCbNSkSZsUdcsa1itzjTjNJr31mJVAKwASKQI0fkFbhAlDKknpmyGQG6mAX6fkgaQImIS0ZCZGQOaJhFVCzlrGKeDelhqkhFLAs7Lj0HphC8ARCTA0IzBA7MAGiqHhWZxSE5JXViAghozOwZyRISuaKqRU0kCbKSM1D6vSs6A5ryGnepZmqaLWVJYNGAA3XzvhYLOZztHW40o7JwpST52ivHn/zo/V/9iGvnBeP0/KV23mEdMCCK02Jz8zu/lV58OQpJJy7ZxubmcmhXzo1//Me3v/t7htMlfun1s3/9v1azByklDEENz67vo0hx+UI1Gfeu5KIY+uWy72Nc1t4zMjhWZClDEnFdpqIeBgkleHagBsszuv11eM/3rZRlKLI2cHIUUk+PnecBi1TS+XMaPIoQr/9PRAzACVMmUCgDbk6AC/AMkoF4LaAwQBDLfe8Yh9hRMWEGaZcgyYC0XyGjAVEIYgZojCjLOWY1jbEbyqIWBSMCx5jJEqsM4I0ETR26AOaM6oSnziMnQnQgydSAnYKgRiUhbwqUVJG9q7bNyCNrFDbNnDzk2fXXHXuLMtnaGI18059h5TSndt7F1l957NLh3r4f1eX2brTSmUPQYfGG65+3YpNcdfnRc69+4uPHrzcXtzcvFNCv0sHto7rattnp6rQ9+shHdrpYNrVNfbcdvnraeuvfzDpTBCUm9GYOEQGDd2UZ0IWyqYpJU4+r0rvCQV3w2vQjKeeh7buuTant87JTNNdGBITCA6GhQ3AO8eEI2YcApC++dk+LKieDJJWnbPn+yYGauKI4PxqPcp4WBajNMgZ2HlG7jiwT0v6iDeQgW1G4qixFjQpXsMf5wjSGLA5YTavaH5+tnr60e8H55vw51yXpFilG5sDrfBehZ0iaDNfYPCEKaATqmKjPOSOrc5AETSRLFtGkFHirquD/R9V/RnuaZeWd4N77mNf97XVxw0dkZEb6rDRVWVWUAaooBAWDcAIkKARCyCxJrZE0alprRr0GrW4xrRYyIGkamRaMACGBgKaogrIUlK/MSlvpIzJ8XH//9jXH7T0fbqS09O2uu+79ds57znn28/weYCMcUgJRhADCzJySIECGogMPcrPS08Vofd46ITiYtZGl2z+ceT9deGMoy/V82Q0H+YmVAUWxOuuaqGMqyjICutqVWQaSYgiSuJl0N7Zn/KdH0+25S6o6diKkXOqQ5WWR27ZrtcWuc5Xup9Wz/fMP6ny0glqnLnSNm87ayUFz43q7f7u5fbM5PMDZEuqmi0IUe8NMAzrvfBsJiaMLyesq5+iLvgXhBKnsFcFFn6Ipsmbpe3ePitMZmuG+Xoe1/JXPf3FyuNS1Mnq8WVZ2pT85PPzSlz75K/+/3wQLKGkxWzxw6tSt/b2DSUgMKMJHZUWIzoVxVZqMbt2eNDFqURQhK/K2aS2S997FaAxpBWWWtSwRBQAqMmGZwKtLW7enbaSEybApMqt0TFI7xzVQ1/XyXvRNTmwlcbfo6XJZ18aUCpOxRmHWBkpMKbItoap6oH0EYsToBCL0TO7q5am1cUpdVL7XqyaLqc5MFPDgOkxnzx3bvbG7v5iSEGE6VtpzA3tsZFNkTnI4OWDgZYunj28UuQm4PNbLBLO3wCtvSUXDlX650p/sz+tlZ5QhgEJp5RIIKjAhJRFArUdF6Zu4sTq868xmv8r6pF545vmBygfD9mD3sr99tWj9i7/56ye333jiJ//sm//p16svveIvHfZsSawAACyCB6k0Btr8xgvpH/zN7TlDPSuhG6sqgGiFyIySrjb1yb/6E5ubGy/+/C+MBLRHUEoV+SwKlKumf2as1/PldPrmq4vDpVs7eep7fnp/64XlH/zWSuAi7wNijHzt2PCeX/uNdPH4xlNfe/0HP9JjkpgM5ilC0wU+t2Ev3u0dbj3/SuVhtdyELgoS2SQCMSZJUlKJrlJkGDPWFCWpQiVE0YZUEnHixVIptYsCdqVfvuP81UsHp4djc9g2B/uWclRLF4LOckGUTKUQTJYBEIBVhKWOzMnYCiKXg75b7hsMXLeqyOOgpHd+08rmhWtfflbmu6XblgAod2lbQb3gtjOmQKKk0p6J7Tse8G6KN+a95J1vBAXTUgQz0xeOwhEUE+pmWpusEiGtFKjMsze2b7GIKvrUQpVF56uiJDCmAD9fUAhj7bd/9Rf69tL4He+ilREcW4G9fYgAPhBy/syXBxtlmk5QCBDBWBANEeHjz8aTTwd7rLDAjhRZRkgiVgNAiuK6OkXJohPXcmHl8m9/6pt++MePv+3Jy0/99mP33MXzpun2ggjFyDEAsEdY6R0vs8LkRZZX3C5cbNowa3hJRc6IbINSCZFtiAmUSFZz0x8UCLqeOhQySjOKT6JMgWwBOglOkbJF7po7UpGPDIwxpXRk9hcBkDvjYoJ0JAbh0XUc8C2xiAVEjuCjeBRLQzjyGQECzZvUP3G/2u3Kauh0Vq6vFvc+5tfG+u77WMHg9PXmqc+7N25Yj6SDHepufcRZESZLt7MYtktz4Pjmlcmtnelip5rtZoIBE1W2VTLsFWHaGVNRQJwfGvFRUJNmjlqYIwMpjRwhTlY3zLveXzazsplvHXYqMexdQ2BhAQ9JQGMEB0w2iYYi965TIirXyClxFAqLYIZv+6b2uaeN1cX9j6czdxWoTPtwNn8DXviC7VUeCnaJLEyY8yffHk+dUOyMLu3aZvfGc0W9D+MNrxEsilJJnFIGjSLKFt3SrW3c/b0//Ma/+kerMRhUNitD9CgQ586WNHrxG4cHbfq+v+NWTmaXXtSFXYx6gxMnskHFoFVhdZ5xFCAtWgNy6FqblwgESFQYJEhRT5dToUzZHgeGLOYbJ7LNzayL+V0n4msLRYSF1YCC+WLtBL733XD+bm6CPP0VefHLVG8jNtoKehLnCXRygYxOipGQYwTSRzopA2EIBUC+R2nrCsdgSoLGZSCHu1fMwVYwBW+9uP2b//xkC4sOhUAZgcgSRHFIGSzWSrW2Mr05U3W0WuZNJNC1YJr73e5gsvQhkKZEb1kROolWG2u0tJ6QQuKASIQRkBgERGuTmI/YWgiER5QVBAKMKSkgRUcRGRLB+dEuaHyS5DPz8s5se3v7bWdX3/2uD+X9ARpjC8vRB+/OnD391OWb08P9G9e2/fbO1fGV165cvby4pUqTRANLDKk9mNx+rcky8zYqj437fdUCCnYekVEQUsc2RY6Qj8X2pcyxP6LQrurmSrs4bPfOve3+w3qHbxdFocm64cBnlO76zu9q1u7NbbH87O9m8y0drVIGUANEFAUsiVPCRIMhaUy6ZFM2TaNqfdeF0/HGl7qP/n5vbQB3X0xLRrWUerus1obn8uUgxJd+W934+rnBaZAztexaro0wUASCcrTZXvmKdK+prS17eaJoL873w63L+el7+IEf5VMXm1cvrX36xRz2nVtECcfuPdHOG3395WGWbT/wiP+Oj+jjm7df+Aw9/3vL3tUTf+OH+ij5zeeLqr7VXSvzhEF/w7nTf+mnb3/xc70XnikWId8vfAVRD3eClDERSrW3Zf0M1GxhdPbeR3cv7W20I1jsgWKolHXCv/V/LLfmvS7ocgy6SGEBCr0YlRIknrHiD/9A9v0/0maj/t6k/9Ffp9c+nS7X+2rQnHls+J5v2bq5C8+93KtUlYursfMKRysQdmKKrWtUYq5jFZr2+sFYktJYnRrS+aK1SnxXWpbkvU8JCMl2cZGwVrpvlQ6SiFASGiStiuHmeDGdEsDedKKL3rKm5WJBPZtYscqXKCH4FIM2OgbGCGS1RdUswmioe72sc6EimuFyVmOuLBAezm+tjAbBd03bTWYppPrMiYEXD5g1XZrcOKzy/OTa6NaNLeV8vyib7b1jG6t17U70epbp5uX93so4EdvMr22O9vcm82l355HMAkfZrpRYWKFilCQiwhIjIipEFCBCDcSMSpEhEgAWIGCfBBAZGY5a60WYBRAlHaGGGEAEKTEfyUFHgpECYOYjyDzKkbcIBJCFj1RVRYAoxIxIwgCkEicFR8BrIUUKAUQUAAKRggCxLE2W1HRr73pdawW2i8ceeYcajk1I7jOftDFiYggpTQ/UvQ+3UYwGvbsn114ftZpWT8P1beiSbmfoG2WMLRnqFhbWU2bKUTdKMcZQWIv9tLuN127i7lx3mWIy3Zzr58qq8t2Sm0bPWOsxi0dJRuvonNXCzqE2hAgEKXpNgCEojWFWAxMAoga0GBlILFI/O33ftLepH+pXZdO0IBkW631T1rPDbekWvRsTXddQlMJoeyvRO0yUfFKCEFqaO5IJSgcgOjgTmJ2HomAWADImw9ASdyFy6m/YlQu90RiXh+rNb5TRq6AhaEUkgcP+0u3WYkCVNhDx3Xe5By+04PLJns0FhNzVnfTmFXjwkez82frWjrt9VbfT9OrXcbHDnuu2MTkhBJWjXu0dLuqLFy/4S08FSSiKRJQxKQoIsiSWJKAQMHIUbQdn7grcQiZt57zFJiSPyWEkVFmeRfDMR195yAiStpJZiB6SAlCQOLFLSTxLXuQgYAAIgVAI0GrDgEEEMJJIfKsTVqwCq0ymi1xbIqNIKYMsiGRQi1BKPnIga0TRUfTNR6eUjpEDJEEtIAweTYZaCykgpRQqVACkJHB0DFGLIEBkDyCIAihEgMjASRKnKCTGKtOvBr28OlbY/Zdfzqd7r/7R7+8988JAPCwXScWocO6DyXUUpoCUvHO8+lN/k77+yeY/f6JHCIGzjY0DhSd/9EeLb/2OtLpm2th79wegfnPxu7/vlodp0eg8N5lWi7nsyGJ/h/L+VkebT7wn5sfIOf/lz5KbcvS2V4rOc4vcLpSGKs8lBN+11iiIKT73FTn5meKbPuBEcuvB7VoEoEyvVOIqWjtOKnJiAYV5DgLiHIojIFX1oGvBZCAiiyXmJWQ5KQ2IFCOkgAogdpYU+hkEpq4DW5HO2AdSyIlEI6AgAaSoSIHNIVmLEUVJGxCclowhQ0qirQAARyQNzgF0aFBZRSlCiBgZkpemY0yS56mrKbIqs5g4YZ6ritIkzSdkWCdc7u2WwzGYUvlpidDNrzR1J25Zopr4bqXf237x6rGNFTddFGboWFN/o1GjYbmu04GmzMAsyVz0ymEHmsubn3tltXeh3r99sD3pvblV3bzin47za7u9/eXK8eMm17cPp7Kks5FCZhrDdSdEaAk1cFVmJrNZlqmi6PUGOs91ZqpeVlqjQAiCEsfSum7u2qVw6Dpft6HxuouQhIxmDclaVEYLEb0FLNqZTIH0/rST3CaQtdUyt7Q1j96h67hPzKEZjHvNfJllelSapg4KVFHkU+9vOL8v+PhKL0cRgdpHJSDMMYVRoZFj2dc7swARU9fed2LzvmHPzj1aqBddiSZJlkBroyILgdJESkGIwQBCIoOkKREqAY4pRgQOQcWklQKQCOJiIAFJYI120WtCFraCKIJIgaPJDCL0GWi5WCnLra2ZKHO4aL2KeWUWM1HaxoWPnhfLFhDqmavWRvPYuMAWZZRrH0Nu9eaox4md9waTrXJCnC3ir/3G5578lg9WOo5Or4eGm0VjLTgXCU0KUBgbNYc2ajSkjAJCyjNjTX+lOnMXPfRE4hi7uW8WaeuWO9hZ3N6tdw6aphZOy9kCBQ63DjKMJ+69ACvri+nisA1ptqDZ/ip7MC42vuxXa/eeDX03euTcQeir0YnxA0/c+p1Pu/nyo7/7Wx/5iZ9o9rY6XvxP/+AfLg62Tx4fXz/cVxoKr4/l47Vz/YV7rQmRDLYhsQJUOMxzUxTPX790duWk84SayjJvQ6ss5WUGjiCkpqkTJmLGlIaVvevC6Xranh2MJ8tlF1yemWnwPrgKsMjyQArRYDKhCwGc0ZCpnJBcEmGosjwJKGVym3dtkExpDTqkkBZL74ZmaCk5ZadNhzG2bV0YFN8E3yaIi+ijc2IoKUkcNvv9t21uXgvh+s1514ElffdwUIpqXVpE2VxdjcvWN0to47ExTdpm0XVDRQOlbu4e/ndSEcbY7M+n21MXU5nRetULtTPWECkJydURBKNwBFeIWIKdyd7KmYtnTpx887U3J9du3P3ghWPf/uQv/93P//Uf/taXP/oSfeYrr/7+J07YakV6O1B4rwRSUoQqYsb7y3pU6hIAp3HeCgMmyByJAAMLA7Jw9qe+f/3v/eOwc/3Gz//TMuDQmIgUjcY8o8meXh50kz0fFTed0mi4bm9dk4gMWXSRfaOVipSlc/fbk6drgtee/jqBIkZFmIQVQlfY8ge+HT70rSsp7f/P/yy+/FpViDLKMbU+oNZUGGHVhGi0SsIJPYMoMiIkoCkJSDIZUQLFHElW7z6z/q771X3HDfKZx+/a+txz/OpuqH2Rw4KY8150mGJQplCUczg6BT2mxJ6JcgwSOVljUJw4Tyrj3gn3LX/6llq5dgv7r31qkFFqI9Q3JQUCAq1RIWnl0Zq3PXzP3/4fZltPz375v4St2XDd8P4CupYUCYFBSd5xqVPpbaGo6WIKyWAMHIQ9iRETSc1WM/P2e4Eyntb8ypvV3BGSIYtBnQzt8t/+xuJ3PrZ28UNy/5P7+7+zThYigFJDUOEP/9iEFhjvxLEEoEuJXHf32/Dt97Wf/vf4yiQ2QZeZcCeRFyNUY1BdP2y53HCpoVJoRoP56y9f3HzsEv/aYjr1otEWqWu0wijCBgEhRtE9M+gPll27M9sVHW2pKygw05I8GY8FzttGlRg6STFgTgFdaTGQD0xRCFlrUh7NSn9FtYclkYjTIOmtQXKISQRZRFCOpFQ6wv3KnZzAW01vb01wj37DkBAQgAjp6Govb812CVKIZV4CYNeymJLKwnNskVgPZtaOz62sk7t25bpJTlrhldGJb/7Lbb8KN15vvvypduuWoXxUpC7IYj5f7k+VwODChdGj74gqi/WOWey1k0maHJSmtsEjUIwtIYkIx8AopGQ+wON/7f+5NKe7Zz41X77e+76/zAT+y//FvfalMiRCw95Ht2+LsQeFvX5AShzskTMKiUWC5O7e99fv/YHkUG+/zP0B9vNmsexUsA8/4V95JkYDSAG0MApjtz8f3hVUfcgHt+PtGzLdqQ//gDdfy86chbMXZLiRjI1NBKR6OXGTm7M3nyrr5QqJrQoJ4pouhY64Rwmhjgapxwjrx7vuFDyb0bHV8Qc+6E+eTiLoO02AqFEl0AqZU0yoMEbPImhIEnsXCEz/zD1mMFQHB/7qK1mm9OGh/tQv11tfX+qmevAx+43X5PB2wpRWx/0f/KnurntCfywO7NqjTf/i7IUvDugG3LhkVESImBIZYkogiVmRNp4jAJrEAgAEhCCRQRqlkYMAgLVwtts9/Pc/VmgpEhQ1hBaSQoaUYkIEgxAjtPedqH7yb6Tjp+1Llw5+/f8Ybe/1C7X04JmOnzz54udezEwOMYmgBsiVIkbnEiswRLnRnESbo/iGUggxiFHEURQiI7BAElF3li2AyNGsqshsjFGOHqNHduuhyYhuLFspVN/m9zz0MGTlsm6NyjAk9i435h3vfOKVy9ePrY/2dpbNYkqrvWKtaKZJG0CfFABK1BZRooAyA3vY1pSn0lrpOoRY9AciilGLXZX+ecZq2s1cWl0bFK+89MZLcz979VfX6heGMZVSEa4C9s/e/WGJs3h76W88ffvwoNu9vDnYULvhuBGVUlwubS8DbSiv0srJGerMVDUVXvrj0nV/8tG9X/nPpp6P1oeQV6nJQjZSQkZnarYfP/WfCkU4vUWyJyG/1Wyu/uBfO7xx6djyRX7994kMsBu/9HW/mNlqiGbYmQz6Zd5b296b9qgfmrQCb3Y3n0KmN/JWfc9fknvf3T37pccr3157YfXRt8/XLu7dOCg9f+ZXf/nd/+PfVscGL/7rf3HCqia1erI7Pndmhy6MvvUj+yttnz/X8+B97xDU4N4Lm+/49r2vPZWuPKsNknK7N2+Xdw9n59+zt/lg/9yw+/gnaLljhwBaz3un2tXB4kzJk4Nx06bprixmCZHGIywFtGE3SicebW7u1lf+OL305eLS65q7Zjg69ef//o1jm51y6fY2lL9hbj6zvL6dD3tVVkGpw+0OEhuyKMyhwcAGUUhVERbzLh5OGgtlobo0j+SKQZaYmA0FlSAHITK5yhiOPRONgAABAABJREFUlElNLkQmW/XHidNapfO8YMfmZG/Pt/tLuTXxCQkZrEJrkLXCBIOhXVkbr4/GLrmD6QRRshymzeGpjbsX05m3cPHes0093z28aSCbLttjp4ez6dyy+LhQATdGw7ph1/q8VMdWj9cR775wdtLUi665cPrMtZ3ttdVjrUKVQUb2+u2b2oGS/5o/YwACFL5jF4WYJLAQsIIjLxAqICKFgpqUoOa3LEUIoJGAExOklBBRiNKdcBkCIksSYUE6YtAc4e1IgEQ4QhI4uswfmVeBhe4YigyCIAqRFlQCnATwCIiNcEQzYkgaiRCFEwISQApJ98Yrpx9c2VyjlZUz5091yYdUh8Uuf+WPy+nMRNbSdi9/RZ1c4dXNWEu+tw9XtpSq4qHTJNE50gaDgtpB5wEaaK21JdSL4tbV5fT68Jveawttb1wLr73GNZqsgMQyWfD1Q4WqkOAVwGjdkSUw5LzUNSlJ4pVKfjGxgwF7l1xtip5yAUlZ0kK5ALMLEokht70Rqty9/pT6wJ+droyBm7Rkxcr6VkHljh8z597nP/dH6ZVnrGAkYmDSCoJHpSCBLB1IQNLAAYgwBRAWdmwxeEaEVDdWexA0oor5YfrMv1XYxsNF7hEhdyGS0j44JBHgKCSIHEhOn1Dvfl+LC/PmpdlLL5ccTQiZDvL8p7LlzegnuSHlpb596Pd2TGhTYJ9AZVL0clfXPJ2r3f3ltdc5JLTWH0Gp5MhXRsCAgCLAwkoDZL3s2P2892rrJpGSLgppufN+GkJPATggAkR1x+ccEgkppRkVInJKoePALKgFgQSFRSEiMwOjgCKrlFhC70US2+xODFMAMdNCxAhCQgQKERRFBlQKERmIdB4I2RBhIhBFGoRJATJ6HzgCK8hMphCj96iMIqVIA5IkZmHhiMAJUBSGyClJYE7CiSWyhCgxCQWxoAbV2oliY/bpj8dXn7n0xS/6xYK7ZnLUQ6GQlTm4tYUx5tK52Zb4eVNzuXYMH3rn4rd+Z6Wo4rC8FHD0kR/v/eAPOspAEDS2hnsf+MD8a1/rsdNEXZTSKlIRqOsGw/Gf+sHB+z60TCqXcW/38ODFr6I71JlhjCG0KUVrMsxsgkAKmTBioQXMwjWf+N0ya7P1jf2nv2h0Ibbf7jXZcBWp51MkFGUNgoK2FkCkDAIq7iBoblsUwLJ39G3AthEJaBG9YyeYl+Aa1koRwHTCPtFaxmQYmZQWIiEQkRBaFSORjfUB6EoMkDGCiEKQOM62bH8gyoLNYrfQ1BNg0SjSSYgAwt6DIBqiKu8Wh2W5GhaNSBdSYKGUusQJ49ItD8rNk4mMUSLzJh5My8rUy9rn1oo+3J74edcm3BIxK7kqwDnh1I5X+/P9w9Wz90s7Te00X11NXU3GhxjKjWPmiUeOvVLvPHs77l7vG9jM7Pbvfnz1RD4erSJTEhus3rjrRNf5FLuHzw33sN4/8D4kTWi1yazNysLaLOv3i6rSmS2qPMuVVkCJgVPomsVy3ja1996HEEN0np2XCKQNaM0mQ9RHj8QjtCMAwNJ7F4No1a/yQVbddWr95nR/b2tPGIDB+dTF0JhMA6YAAAwJQggcfIMRbSZM2pjgGkIlQpnNfAwcAQFdlyZtmxs1TnjP2vpdGyNZtKHxppfnKhtVq5cmB13XZZRpY5JwPLKYCWoiTcZLSDFxCkhAqKLzHJIiFWLamS5bTqYwKaVm7lYzm5OI1ZqIjvp95ajTUAxpiiFFvl2HzuoYZHdvyVrWxkBZlhubLzqK4AVAIWi1PWuLTCfGXn80WUx8YM2ogEWiyeDciZM3d7YJAEp97m33LjwqT1aU1aoa9EABS7RZnmIAjQYtMy4WvsqBgUWAEBhY4MjIA5T3bT6g1RM5chW8RMeh88ELicb05is39GxPr6z7cnjfqc29aVeIPrj04uIzHz91rFSgVs6c+vyffPHB0+enPn9z3qi+nzz1Zd8eHL9rozh+phppFvfq69dnXbCD0WJaz/cWq6vjLqpPvfJ8gE5Aa6WaNuRl3nVdnmtj1KxdvO3e84d7rfehQONbzzEqYQ9HdDBlTWaV8kkiwHKeJpe2bOgeP35hSw7Rgo/Jasx1yaKWdbA6MwpmrcuUXTZJKS4qW+iMhV3HWlOuTYrSOR8SBxBtdS/Luq5JRI1zzocFLzLMlEGTQFCcd8OVlVkz75pl5+rpzGX94uLpY6N+f+fG3nTSgpdM5NsuHH/y/MYb13euLrpbMdz2+xnIxkAPkvaYQOuuBQseCEfD6r+Tig63DxEMezYaibD2QSnVoEYkkgTWsIg2VhCYZb9tBlV16OTZVy9fvPee8drG9pf+8NMHBz/w8z/3mcNvvHlx94O++M7jw+ULr7vtqI0JKD2dN00dG+ffduH4T/6Vp37p3124dq3qYgLxIKwhSBDWWhFEvxjYx/7WX6mHwl3ePz6GNw6CJ0diekUilXsndWyXnUUVNGKh1WJ5+Du/Mlgd6hhNmRdW15OFMgqf/frzf+mv5ccsfO4Lx8Q6jhERjYmh8yZuPnZsb4zCdu09989uXWrnbWwgL3qkhHKlxr3FvMuEJAaLCjiFLlgaYIqSRNvMC0k5sJVKh0uDNDx3Jr9wrK3w4vsfnhyk6tsfu/bm12F7L8urLF/Tg5MHkxZiwzGElASVRky+JZFerxfrqFAnIJuVymSIuQW9dfV6trXsnzj2/rc/sPvSf4qsWZRfTknIFIWQZucz8Tmm+NRze//sZ2/XN0+OLx7/nr/4wv/5qxvXnipREYnvWkWKFOwESJuDcj5bESVeNbUXRFtk7bRlxGaAJ//CT8k7HzBUwrLDS9cmH/1s/dUvrzYNBcgM6QPMZx09OZ5955/V3/nO3Z/92Q1NkCIkMBjAKlAa7mxuAJP2xmS/5Qkerhcf+FD9+m/a3EiMKYq/7+LKP/vn9WrWm8zyj3/20i/+izMUezoHjL////3nP/3RTz/1+z9n6+5YVsQy7oaaEXqjvqTUtKEY96r+KM+KnWZ+ezHprxJFg60mJNAKMPkEXWpG/VyxXk6iKEgsFavKmjYZk1S9YBZShghVQdZJSqyTj2+1hENIjHIUNMMjjYjvtJsBIAIAASCSAAMgIoIIEiYRFCDBozseHclLAgCoSFKcHNSzM2tnbr/4jfXeys0rLxUnT5Qbm61oNBQyFU+c16tY35ZBAVmvxza3+crxe99lbHHjSx+r5wfdrVfPvuNbrm3TMjINxuWD76Sz91my6M5bRNjfU/N9fvlP5HALOFBCzwlRxGhEzdwVG6d4/8Ddejqf7/U374JsFHQ7eN8P1G0TXv4qYACNRih2rbbEfqnIZLmWrmUfMIhFi3llB2epdcPVwt2Gqn+SxejSuI7k2D2itAqYrNJnTlEvDicL/+Y3cL6TQRv2581+Uw6HddtEez2eXEmbK2vvfh/0Vtu9/TDb2/riHw8Xi/FilmKqFKjcmqoXowuhjSkVRQZVRq3qqxhvPBVu3crYZh7nz395tHoPr6ynkhIHApUYFKMAaZPxUWQwKcUc5nXVtfuvfVltvZEp4uVE6q5FEaLpFz6xfmF1sn43PfG+w+1Zv56rDMPKOI2Gqp9b1VJuuU/ZO5+w73g8hl169o/52a/R7g2oawDG1AKLVgggWiVmQYVHXdZ8VH4HACDIIADeAyTo+zslY12EpAAhcSJIoBWgIVeVa9/z55cnH685G9x9urv4tL/98SD5zHvM9ZeuvjZPOGtDhqr2XZcgI1IaRY5uvFAmQo3WKgUiLITKYwKEhByO5uZHyxWFgI5aI5RSlpAASmsjCKACqAHgO957z+nV8zPGzbsu5oN1Rqq7rhjkoFVoQoxpb2/nxpW9syury529jTzf2Fzd29852D/MSVwTCp2TiNZqJVfWyqZOXVr6zBzMlzVjgaYkREBldW40LjqjbqItaiFnT+d5dmkJuy09sFZsTG+UijJV+sWOS9HvvZCREm9MvnL8sUf3v+1PmdGwfeVq/Y0/rCYzYvChtb4T6hzfheMLNSRYPZuH0HzyF+Tq6+uDk6FcdRfWsyffHkzOhx0sWkSPZLEjcInOPQiv/zFqPYy+wGKhypubjzu7sdbtFjduW93m5Ri0CYv9FJVd9LFnjw36k1/5mdFqXHzlP57fOPfKuQ/1fvQv+96Z0sLqI0/c/u2fn+6/irfeKE50e6+8clLvfeBv/r3RY3/m0qvX44WLr9Tq+ID62y9Mb97iAXmlJIXhxY3l4nK8+/H87d+2W++9Po878bWHBjNr+wc2337XT/f+1Pf1KVv87s/mGcxOD+x7PrT4pndfuTldu3DfIlFvdJK//vnpf/iX43ULFsNCNUn3XAeKxniw+J1fLLLsVOzQ+zT3HBo9u+Z/5R/0lOzu3jxRbpJvCvIyHHUNy+5tSA4xcbDANWFMySXSdUogoEmlg5qXoTy+kZJgo4u8DMEZoeCBk2hjA6cUvTAUWU9A26IXE7etz/tlZrCyeX88pma5PlrfmR4+9+aNyhrdL7RAjB41VYMiVzjs5b3VjatXZ718BUbVXNzV/Vu93qnDkHWk9xvXQI3ejzeH3SxmsZzXaCnb3Z+uDscZVac3H5i7+XJ5qxqUSfjW4QEWwGTWzq83uuttVoeL7mDmjFUoCVGrAspMAywBILEw8FsOIwRIUSCIIAJx0kQc2B4dDhoIRJCZiAFZAI96yBCCiII7wWUleCenjIiSgBMBHYXVEICERRIKRBYAFFQRkEWUKBBQcufAIaLEiQgQKap0x8zKd+oVIpAQESkCIKUEmQCDJdg8fezt79vYqHxuO4vsaxRs5zdWRz21NdOaJIXe1nb70d8aXbyHi3FyhS5GKWJRdDBzMJ1TUYFR7WgtPflwYSh+/UW9u6sgQp71bu2Ej/9hTwl4pzhLOmslcXBlUUiKkiInJgGZTxnRlJXqutC2dPf9cP6uNtMF2mZ7lyqDa2uxLGEx7S6/ZnZvmXqCRgEQohLPXCfl5t3h1d53/kg8dzK1PnMDunbgX3pqduOr+vFvi0PKlI/GZVgSqxQTqSyxU5CEHUsC4OgbikEjIFEyRXfqeP7og+X6erqx5157Ldx8DWOilJDZNp6Cs0klBpaAqKP3cmcUJIZEOOqmC9evmBefXT+eGTwM44F54Inu81/Mlof57m67tYMSpMpTl4oaYvBIYvJcMRK2y8MlXNsbnd4fSzu/fk07FtKBga0SvLOEQBCAEIlThEQcWLtugGyQlYBrQxeg9dBFSM1yoKQsC2HxMRx5clCjynP0kb1jgcjsvWdirTQlJiRQViEo1ACSJAKikFGltUqbt8YGSBRiSloE2BitkBMEQoUmj0aDiFEFIEVJBtGAT6kDDQnBt10UagUTIyvwzgl0WV6KaJ9iSAyCShlmL2ASp4jIaBO4SCpACpF9TI4liiCBxKgUrY837z5z79O/9X/5a0+Xzbytg+TKE7i2K1hRCvHSC4df/L1C4cHlF4abx7Pz70Is5m6ld/p+6w+pp8uTD2188LsbyjElAiLS2Pmmxf7x9Zz3lRc+iECZWyt6H/nB1cfe42WkVlZkvlhu7bbPfNE2h0oESXnXKQKTW9aFaOO7RVFZYwuJCayFpMvUycc+xkqtVRX0j5MdwnAdjq8oibhoMHgUBg0AiCIIGpyHNIfcsiFSVgjQABJD5zA5UAaAgbRgxiaxRUSiUpGOAEDCLAk9K0BR5uh1QVEgBEiJqgwQ/GyhylwpggCkMlZFsMwYbFFCYATRSGnRWGuIdCJGRSl0Kca8WoP9fZofgDaUr4s2kDzyEo3kVRli42NnrIa9ebixk5T2RkzPrqxsTPf2cjIbZf+N+YHhlG+sJKPWVjamy8MizxVqdF09vZqf2Ozq/ap0SgVU0as229TFPSvdRnPjpde6g8XJFaMbh2WbrY1xs5dGZXASuoCkUKSHuJrTQiORznKrM2vKoizycjiwNsvKXBsiSsQROATfdW03X7aTxh3WofU8W6YuKgGtCTIreU55BqQoMcEREOtIKgoRlVGiB/nAKHvp+s1yNR/m2cGsRhZhGORl47nUOnrhFCgCu3Z1UDSBTQxnskraOjInSIxaAgtzVprae6PtsXx0YTR4aLh+5dZ1ntS51nm/R5ra1t1aTj0KESGDRvIsDkQJH93ykDglBk2REVgEU0oQEiROkcx22251zjeUK9P3MjKWMiQSYtFIERJI0lolSb6NPSL28dlbEwJZH/bBKgFZurSY19WoGvWLYVbNrmxBhEXrG6uHg6pdtNd2D4NwYQwn6VybZ0YpuzM5tMZEwNXN/smxnFopfZCYiCIzEGrNyJTb2HHwgSghi+vaXmTUlJLIEf8PjlLZCkAAODGnFDillDiF1DYenIOuyVu3dfnG07/3scHa6Pt+6EdisPrEsRMP3v3Sq9XC2nvve/B6A3f/375rbXX91ks3Ftx9zxNPvviZz0y763l2/OYLlwo9Xzl9V7u7UJCUoRD9xVPHvQNXpWVQwBkBushFkRdlzjHUy7QEdilF30SOa+v96AJLGo2y0Lm6CVpTIiyyXqnyuV+k6Blhd3/x7ov32fHo2Ve+XtlMvAQQnUQpDCFqUBv9EdZ166JCi8CLBUQLEilTSitSOnehjUJJRGsVUeYYA2qDatkwcdar1prllrEkiK5zvg2i7MQ1XfRglKL+g+fu0YQ7u8ubOwfzrmWEldI+fOL4iPD+s6fKRQc7uzvLuhU8AA5CVw7aQb/HygqlvNQQ1H8nFZEoTGQN5Tn64FbKykfcD4wGjUAAICKJQUcYZ3lmizOD8qItru4d3Ozi5deub1Tmwlrxn//Jv3307/yj0Xf/wOTpT24dPLfWz259Y8v0e1y3QUKfUsNgv/3PXXrix0d///3N3/rmHnTgNSAJiRJAIcVIQmow4PFadCEf20B9VDNOoAu7dJ1iXtSRkaGgOWIbpBQprLFNB9tdltmUfBOij21eZRc43P69j49WGUU5Z1BZ4dS5qLXhJsLW/rH1LjP5Hgz2ZqmXZbYg75Ejg4tct2VeLWe1BQ7ea2VMnoPSwFGR6ReDNiwhGbdcWk4c6caNRW/1xF5z22a8tbX3+Pl3ju99v5t91mC7PNiKB7c8hKzXN9kweAdKU2UwMLIkY6goVFmJrrrDG2W3tKQI8b5edfBbv2K0qac3z46zriXEUhmFIKAUKatSSp23pT1eZrMb107ftxnKM1M8SWffSZeuorSQkhXQSWoXu2/73vM/8+H5V7565X/5lbsrNNpG502uyHviCCj5YLgIk0Vc6HKkHr+398hDt/9xXPnkFzJDICErTJ7J7PJLu/Rj7uS9xx++AM+9fCSMABCwgLEQInACotB5/cS7fXXKz2N69CEPv6YTGYuSojm9Nh+emddLNhG/4/1+90V446vZ5VqK/jhCt7f76Ps/8ubznzl9bPTAE49/4Y+/5PyB1UyQhr3B+ko/AvtQH872+sPS80yS87ENDFmOZZ4ziQpKALQxqAQg9vKiNMk5XtYhMGujMzJis+FwnMIykQZKzCl7C1/Hdwpk7jzzESUxIxEhiAgRggAczXrvQK7xDopCEBEZhJAQ6eifU2IGQW63b10ZndwsNzbTZH88HoTtK02/V5y6kKjPOlO6mLUwIEiApaLFjRv1ckbj1X5W4am74r61VTnbu5JZSVXZ2zxbnXqwYRA308Hb8WZ2/gGOnXQHUh+Ii6AkRRZkawwkYw3Gwd1uqfK1k6Mn3tOFtRY0C81pWD7xvWn7NeimHCLZPjkiSRQja4dGMHkBMFmZ27wN2BuO4PB6Pbk1uu+Rrr+2vPnacL3opmmZOu2lSBxSC7ZUqiLd9rpF99qBqnQGvWWdEF0moJvWhi2+vZVu3Wat0/buYLwxqB00ScQkpRUl37ouemZJ5QB0Jm2jlh5NYaYTfO6TmRrhifvxkUfUfOvr//bnHvwzf85sXuQiF0TKrACm4GKKBoG7BusJcS03Lr/07FMXT9zjt3Zc10FbU2F8b6DvfcAsF7yYl/uz5Wd+u+ROFTqqDMcbqMvYRF1oYzMYARSq62KAkRod6y7cZ155w3/5q8PlTRwZmc/AHWW/CTRHFiESQZHEAEpABOQO7wqAAfAIhEtJmBmSCCTRoqDloNXwOz6cHvnmVjIMKTFMq2NtLHOdh0zPWn7+9etNStpQ44LSyhIakCKjzvPSRxApjFaIVilFyrnAglZrQAyQBFLiJCKEeHTJUIioLTMKYWatC/6I3H60C371U88RvNlB3Bz/8be8+51rg0FV9MKgSk09nRwYoL3rO9dv7mgT1ywxO9VBz3QXhriic0lGJxCWYZEVHEYlbJrMND4ynDm1QsFPdvfLIhNjlKLCRPIgexNj1TjLZovd3XY58M1dg/zb7lnNuk7lme8kQ0LPiTvIcqugXuykuqHhmWXIDq5+bbBYjobDKIaX81jvYLfQ06vGMsuU3fMUa+dvTtDoosw2T8QTA+9aaDpDyvSGyXVSWGOHsZvcunTVpHIFlv0xTH73b/v3fkR/8Cc7L7L9WvuZ/9PG67A8gE4UhGw01gUDzf100muXuYs5jWp7cfX8+2n/hf0/+VcHr780ltVyvnzgu378NX2vbRePnqn+4Je/VDz2bfTVSze/9OunPvy99T3vbajc/+qn+Qs/88T7L6jYOLQf+8Sn3vvA+/Rd7/zS174+KmEe5NSHP7jpdieff/7l2378zj8beiee++d/5dTlz1XvfPfwO3/oyo2XenBO3KW4t73RK9pnrheX3lBIfravlZuvnjDjTXXzBVgioOqjgXYOEMAUrECr3B3syAtPrwzzlSqHxdayxdcl1fd/s147Nv76Zwf7V0l3wCLJAwZlUfdyl3lxUYvqib57fP/LLjYGC2sGGBiU8U1uC5Sy8U0QiFGU0m3nlCGboyY9zMt6Pi+q/mA0WnRe9weXdrd3dm+XpB49edwTHu7tD4cVMFRrK87XLrjk/PGNcZ6PWh2eu/HmXWdP3rh1re66IssG1XC2d0uxjCrlYldqzHv5bBnLohyWw3G1/o03Xlu0i40+7Xdtr29Pr/ZV8M5g7cJy30cWyZUFyBJO9hsX4qA0rO9MktMRn05YRJIAC0eGhMAglKIhtEhMojXRHVfQW0cCIQCmFO+klVMCZGEGIBI6AlWDiAZBSXIUVGZBAmZARGCKwEKgAEkIgUSYUCOKiChUGhXiHdY1A4sIADOzABJoQR2O7K0oIozCKblhr2eUCUFMpcT5FIG1LnplMshahRRTBMWqv7uAg6cioR+cMecedW5eT/Yy9rrMITIjxbc/fvjB95R1k93aVQd7IATC0rLqFqCEeqU3Ktk8aIAgrm2JlyKMOkNttCJmUZKUcByt4AOPuvV1bxWgwrKXMpsG66qqwiabcw9niy336d/OZtvaaCkrlQ1ZsqhhYDfk9afHvTZOZniIeO3QLCbr1V2zKwu393E7ebkwFmypREUQtbmuGhUPttJkyhJIk2JArSFwF4kfeTL7rg80J08kW2Xzbp7/XrXzRj8m8UmiipyCECElSEIAHLXSPkiIYAhRISZGSJVf0B99Vg2YN0d8+j07xx5S9+74r36pmEfFgJi4icRcDIdtkwJDCF4ZJcHZTPkbV7v/8m/Q+qxpwejOg8pUwIgEEiUmJoLERzAbSsI6du2VZ7PUDAqd+vlOmwiQEFJ0ECWGvPMBAIKwNhkqo0GAJClwnGLkGBODSswaRWJg4MjRoEImkaNaPSyNCYxFXlh953mQksQQG6K5RgQuMwHGwpaISYSIlEYCIMMiklgSagohOfGRoo9ch9QEdIID3ZPYhqiYo1EoEuhooKGyJCGwD8kjYEqRUxJBZggCXYgsYLSiFM+euPCB7/zIWv/sa9WnW+hIpRYguAQW0dooQpFxZ+fGx3517ewqWhife6LOTBtd/9wx9cgDky/8oRmsHn/no7rI3HyZZUhZHhkyq/Z391Ld5AXZUY7OxWiiHVX3vh/6PVou/JUdmHeb2erWa5+Brvae0QAqaJdzbVbA5qp3CvsrrW4rk8e9CTtPWmswGG2qWRVVXCpYz/j0ydrqYnmo2FGRg48QE/uGlBI/Q3aQOnEezTDmJaqkgkcNkFSigsgKJ7ACfk7s8IiAogBKBJkJ9EgbEIAgGIATkxYOTC4pUwIlX7caLYSQuoQQsRgyRjFZSh2DwtCSMWw0UBZZFEdQLEiitIjXlCQ24DpIgK1nbFAiaANKQS9nCoq0JhKOGANGV6Ls3dqdWC3JVcqX2lN0QqCLge6X0S6T20XXSt3gaFCOVpPX5fh0DBEUm+S7Wzc015sXH17cymh6EG/cjlFHhm7py+MmKraFWkym7XI+mXQ3t9u2ixah1IRWFWWelMmywuaZtTbLrNGEyJJi4ODq1ns3W9S7k3qyaDqWzksUDQgGUWnOjdgMlVFyNDYTUObOLijynLOiQhmNtVXm5nUpwIy1wsKUVhVCp8qS575jJewTu36eV1kVFOe2tyJqXOYGhCOTwNS7iI5QhEzrA0i6b9DPu3S7vd0resYqBRJjijHO2m4SPWZFcrFGiAl0kTMxJ1YgPoR0JKMQLyK2ziegw7l3nBSkkGSnbqYo0WMpamxzFZzNMkTmlIJEIlJKKSIiShBH1qy3xWvTmoH3pgsTk86sa0QB1ZMlKWpcGPZzTjwwKi/s3sFEBFOQIGlj2IMYFBujdFHlyBwROaW9qweXvvbi02ufPHH2gtKVFsg3+zovk4Y2MKKm0rD3uiyswiQiKZFSR2P6I5Y/S5KjobygAoOISqlkiHKdBJY72zefef7UibUf/u4fClpKaUZ+brfqbmcLp/N3/fD33rwxhxzRVle3p5Pb+ytZ/vl//e87t/yuH/3wH3zqKzYs+9nxSlcbK2U9aeaxya0u8nz3YGuCbdTm5MoAoyyW3qfQzV2uNCB1XdAornFAqpk3KQYGHo4GlGtNlBLkxXg6Xyzcsk2+WTijdJHkw+947Lc/8dFD5zTZUT/rWuedTymlGB3wXOpekRsOne+K3IiwSDDWeh8cGiVBK21Jiw+gdMpw6ZwDidxJZHHQ+hmkZfQKwPTzPCPVdTOGhKPe6PiG2w+7e+3qimmFt9rWOR+TGNCn1/pZ6gblAKzLjLq9t++ADxq/F2Mjansyr5CzFRUl/Vdv3VtSkdLG5uhDaLxC7Jvs0IdekYmmlCQXFBYI3C5cV2AvE39jf9bgwf7BwnHW9czikLYPNhy9g7T9potnH+ld+tH/yD7NQFZPbaqDWZzuHz3AE4YkYI8d18dPtLtvGkyCdOThUAoNUESAM6uztVGtRSkYUl86jIAaVU+nrnNrw9GNw+lhb9RdPFWun5BXb9D2LQzLYVUKElizZwzneh68cry2Og6xiYQhI1SZTaJBI4oKuPNLf9g79cLhfJd3/YV+f9EsEZS1OQGXOS4Xvuz3kmdTEBrNTIRGJGZG+pWp/YKyAlSB1BQ9NZsvEy5GqydoELGfHxPz3P/+T/zTV+9fz/YO923W8/NFr6hK1e9qP87WIhmQmsFooxeugwvr6+97ux2O8etPw5efjXWDSKKhXHZGI6gRRk3GxNZRZAKvEDgICaMh30kzi9Mza4/8+F//2M/9/PrLL25mFwZV5mdJMwtEo1JRUhVckla/++7jf+X7m1/8DyUYZfO2dSY3qUmmw2sff2b1R95ZnulJrhPpxozv+XMfqT/x8b4ZiyaGpFK3eP4Zc3CLH7S9Jx+rP/98VZVACIoAIElSWgNHEDHWFEuyodiCrfTgE+X99/mvXqMYDXP73MtDR2qYL3deLs48eM/P/MP8U/+x+//8Qru3P8As7N7+vh/4if/t0ldJxenu4n3veH9y+7Wr97dvj0areZGrvF8vDw/afe6JjqmuGwmhBaWtaduQ95ULjZJBYrJaCGwhxNGhRlNI8oGUOM1UpQO6IXnnXDCZ0kBdiHfs1keg0TsWoiO+C4pAEqH/ViaOd3jWeEcEoLcqkY8qcI7+jvBIa1KG8PVnvnZs/KHCDhCzzcHZdu/2cvaJfPn45iPv1ouuH+pBdrzptglF2rB+/CINB9TLpH9yffPhFLvFrUt+/6VmcT25lmPDYEBlWZWVsRPuxOZ13Y3Wz3n6ilIBXUTQREIiKNwEzu979+Lcw1BVy7yIXbTJZUXVBpTsNPYr4yZaK77T/ZwxdzEwqCoxZZkVq7Bf+ckSJRa+swRpMIjjjYHN4vXnGxfXHnhYXXi4fe753GTdwdKMV8I8ocZiOIa+mk86e/qY1It2b57risVjZWERUZM1Yz/vQvCkNJLEJEVVGASITZgfFlk/ONBJY2TNisMido2+/x23p3zsvruV3H98eCHMDimbKaWgMMx8Z4puUXGk+f5rn/uVE8VqwNFd7/9p67r6+WfV4cRWBa2tH0QapVJ2t9L1nSwJd85WuY6k8tTdukz7N3H9OFgbgJQtRFmlY0WiigL7ZTz1SHX2IfjoPw19FbOAu0idIxYkUpIg3GFDC4FEQAAUOAp/oQJQwAICLACAQAIEoslE5ebH8vwD37VPY3EBREjjY9/z/U/tz7/++5/cHENb5bEBEAydz4wpDLGAUQAsDiExJKGcUARiBMeBSCVOnODos3306FUEgkioECBxsJlVKkspLHzUgALs3Z1dgJ02SlYH+bpCuf7inpfbbSQALYE5eReNLvvK5oUGkswYjoikjNVruuy6prAmhFAa6utc+XaAXFjQuVnkuCxV3VvXAO7m9HiZc+NtVRZl4etGl9myXSb050cV9m2qQyyKrrCpKO2SlCwFtE8Csc6Sii99rZ7dAqkeWu5VBTaLGu1Q6SJmQ5v3TJY3Ny7ZlHSZxd6w/JYfKX7o4uz6FTc7GK4NDQRh0aSRwRCm6FOY1bbXfOAnx48+eOlLv3zuy7/cK8gtb/frfX71S/qNT/W2Xof9RbM9p2GOqVNlA5rbsldf/DZb9OrNcvnsN1ZVSZ/4BfnN63f1K62NTwLrp7o4Nk24/Mu/kNPiJ37oR576xKcPF9uTy1c//cWfO/0X/tYH/+xffuQvfuTLT/32i9+4cvv5v//4T//4kx/8sbpb0fH4tzx5vrv58u3rt0Z5HrxxZTV88KH18bH25c/pK180+XpzzR7+1m+ldmvr9z6RWdVZXjl3url+OJ82AzPYMVV9/v7xd/yk//oX7Kufzc065DYFhig+CnSNUXqOwt/2XXU52H79tY1M0v5s+tD9vW/+3s1Tj+ZuN7XXwld2lBFTKh+N7ZcpuHrZ6MwSKZsgN/rNpz73ne/50b1z52/t74d27g8ng17HdROEISZNQplVWSYhKIMaue2CNbpX9hkYUSxJfTjnwMPB+smz55plPa3dRn5s88TJg8Pps69f22uX68fWhHyemUU3ncZ2PatCqI2xlbWxdu3hzE2DtvZwKzQLF5gdoCjNpG/s7SyaRV7kJrdlppJ22sYs17duT50tNtb7XUpJaW01t8vZtBaXskx1s7BM7i2piEmAhQAwcBKRyBxFWARYIoEoIYWSEihFyEfpsQTC/JbmenQuHGHFEAAlSRQWBSgp8h12NQADgQiDMCdhRgX0VoECAgKgwjtDClCoFCJoQmZWkASJJaEyERiEBYlICyCgSikiGgOYEdV7NzUawyYsQ2EAGVPd+q2tMoa833c+xohGEEVrpTVGaA947w2tDRlB1YO6AwW4Uul7Ts7aST9yqSIZBNIsFJkIlXB0XWCTqKiw7PMyJAghtVkyhNGCkRQAEZyHGCmrQhLl5joqnRX5iCJIF5eUQARSZlq1gg+/K770nN69ijwjbmntbr5w78LoYu3E/KsvuS99fhW1n/piMPQh9TZC6fcs5OJcjJ0Ekc3Vbr2My07JADqHzQx8FB8NZSkkXN9IFx+OG2PoiRgfinz1T73X3PyT9JUr5IlDYpRIEjAqTZCQWDAkRUhKhGNKGgSzXimJFIPsu3a6rcbzJCz9XgoOWZlMBU0+w2gsZ8hFRUYx4f6tvT4pw4iBpZ0hJVGSEiqVeR+0wggiSVCAhVmRS2BIWaLYpOb6ARQmG1vnWlPmfZuhRSEVjQLB4AFQIjKgaEwEiMGlkIQhxZQioyYFhCxKaWAUFiJMwAAxJKeVxiS5qQhQ0Z3ngescJJlGEgGXeI1UYUyHYLWKQAQqAyCCDJkTBlZJIBFHSWLQueAhdTEkxkAx5uAQtLJAlGuFpFKMIiz41omYEoROMwJzEBAGPtKMXOwVxYWzj62sn8dqfPq973nhjY/qphNtSCkfI2mKgS2hacOtZ15N7dm7PvxhOHk+3Jwxg4ugDg67vUXdmcH2VK7f9nnFVoYnqjYlBvRXb8DhMvigKquAJFec4d4sKZ5unDrHe5cyAdh9M5/vLTxLotRCWVJW9SB5XO6QUqo/iGUeI9ty4GK0JzdclKw6blKAgQJVpFNjPjHyyWVBbOrAJQkBTUG6AI6YBKgHKmHtlZqiCFUDWCxBIqChrEpdS9GxijqwhA7JUjHs3DI3RlxAdlAgJEDUAKKBlTCkkGYzGmSYMMsNtB0EBTaTltGkeHAzU2OT5wgWJRdRHBkjGzFSz0k82xyKHPIi+taMx6roRR9YM8BRogrjYi7sscwJMDSNKcrq2Hq9v7042DPGzBaLZd0IQdfUbtEmnd545fKTP/YjDJNM/Gg8avZuliv3Nu2cYo6mL64jrSBThU0mLn0IFy68fetg+3BnorUSxpBouTerMNoi63SJPbt/dXaYdLSqyi20LZJCwirPbJ7lRU9rZTUqZE4e2HEKwbeTRb17uDhYeBdl6TgkYlGoQFmsCigKFsXaaGZE0CEIvrULmqbt2ma8skKJ9+YHgFTvN5s636zUqFdWrP1sboqMEBoP1makDEPmo/ORcqMVc2AhYzFRDxKgc6Gbt9E7MqjGSLkmEbaZNZmW4JNwVGZvMb/adqdNMTDm1mEzyquhEZMBInqOWqmFj9s+7nmYLn2Y11rbGFMAGWZa+SgACsSQtq07MewZBGYxphBOBIiQEDCyGKUEAHw8OSiGi4UtzP7CrRRmb9EarY1GQEnMkgCRRiN72DYBATUTUZ7rnili9ASCIMqoXp41jTvYnxmrVkYDyOwf/M7nz67tPfbEO1SansRTeVYUK1XQGqNLMUFiSQElEgojHnmJjoYfR82fRMyAkEAAUUgEES1qYTS+xSsvX7/0wjM/+dd+bOX0+ktfeial4rFvfscbu3uD3ulXvr5/4vzdtJa/+IXnd156XU8PnnjXPb3x2s5tePHTX8l32mY7HBtcHK2eH/VMO61RRdeGCJbyPC1rqzLXSeOcRRSSMisHVXX71i6D9CrbdjEAtimAD8aY+aTTBpUgRGHbFRYWcwcK8nHfBF+CyVXcXS6MLV3keeO0ILOSlLQmJj0Pvm9IG9RRlKgg4FIsemsJQwCoQ1jTKrhOA1JiI1lKrjRK21JUuXuwx6FWSrrGKwKOmGmscfGhb37voc0+/+I3ekul2zSD4sXLV6e+4cSUIOq4bBZKc902zsWexovrQ4A4b/3ceWfVyzfmO5NmR+daeH3Y+++kIqvMaDBo9g9rL4R4a6/plVWJea+fmSy7fWM/JQyOtdF1CqEOB7y8sbW/FH32yR/7yE/+D1//R3915eZLq6Zd/qu/f+bdF269/A13c3L11MaJn/075dseDp/87OV/9S/uN7ChYfePfmejV1Sx23r9zYqQQBFCOiI/psBCnnna1ffqA9w5uPkHv7/WNGWWL12XOqcxkMDO3iEdP7X+13+q/sC3rA5XBr/5O7v/7H/Lg4NoO6HlysbGz/zPnZ10n/vDxae/rHzLyEKWGQNHimwNQuSBLvB2O3nxK6qMqhx4tEoVh13nTeAYbIJCUr19S2udSRFDImWltC3PsgHsNwurKmt1WRWuk5hCr2+ml1/46l/9K8fPjC+/+Vo6aPt1UYR8e7drfcoN9QYrIuCcK2zlF3uYZQrAz7tsOBge28jf8+j01LHdg4O7L56ib7xhuw67GBdLEARIypjYScIuUUiYSMWYUJLm5LNeT2uU4PWLW5f/3//4scm+39ozxW1Xx9QlYMVRKKfc6PCHv8MPdL3v+9Z4/33zsqBpg5x83XiWPC9VVqQ65MfOzFXsfOLY9dX+wVOfq4qec8RMCKBUcSqPb/zzv3vi+9++fOrVUTEGTpACAAOpo24lRQSJQYXsi3/kf/UXsm/9zkmRbbzvW/yz/8aAgGe12IfXvuqffMSNNheXtsuXXzr+xRd4PxbHshHF25//2NlveiwblrZZ7u9ehzN5TqYab1bD9Wa6yLK8a+v9ya6TdtAbLuYTIAFQueqF2nUpRgmYIEAEApMjpRhFJDctxKaSYJhTWFlVSF1QDCkxpI6UMGD+VjfsnfYZEUCQ/0okAjiyGgkACAi+xSm6Yz06glmDCL4VPsO31CQWUQA7V16dHb6bs74qj23vXqs6tPO0+PwXLr3xajkoFrsHcthZMD6kMNvfQCmKnhhw2uY6jykU5Nrdp+ud60NZabdvLW++WT70pK4oLPatYlK2N17rpots9aTE25mV2XShLbNrxRDnBR8/o3pDIeDkJXUMwBHbAC7awfgu1S1SF1T0EltBTlV+uHp/+fD3d9e/EXa/VskBKGxHq717z/jnP1fOmp36du+J4exwMkRczdjdfk0XBa1WSZN0EkTw5DnArp3MeRHhnof2m7rkfbVapKQ6C+U9xyf9Xn7yHuyvMKVlt+zleXv1VXf5jdR2eRcyiFmhQDwlFbsuG/SDd3a1l46f46YZ7V1qPva8kmxj/aTmlcbnUNgE/cCxyDMJqa6XxhTsq5MrjxS716a7T2Pz5nz3lnZLk+W+ieTbkRJ85jPDCLZXxDYqjiF0Wa8EbvRkGl79Cp67O+THUWsrgAJkAL0HNghlsJSdPT3derVKhZHUJVZKITByQoEkSEYBR0AQgpgA0h01kRkg3fEZMQAgaAXBS0puv4DiT//AbHhCumRRhIBQ5t7e/6N/d3fzMf/iR2++/LI21VgUmWQzZTS1IQJAG5L3iYWUVkoRs3QxApBwEua3mpbwiJwiRx1nLECwUlUA2HIKzJwYFQiizc3RWr+rorUcesYdX+1BN4kRJcYQMSAzCxkdsNUWOHplSJOQ1stlp9hlmSqsTUlyY5UIabA6iwkVmToEX+PKk+85OH3XG9vz0YXlyvRyc/NG8p2fd13CbuKlUMdW81Mag4gidimJZK3t1zYQ1ZZIRS0A/bLKMPRvX8nLAYQQ0eQb622TkBT2xqxTFKsHBSek8cCposYRBoUUoup8o5RBQiJtMC+4dSnG5Dlk7YkHjnVuubp2D9oTxt3e/Oqnpr/6S6PUSvBNMdx9+w/j//2nbvh5euW5Y1/7vdNhC/unst6avPBH+s2DY4mlgyFrtD1uglc+FPne7dfc/HYH1fFh1jWHX/z1n58vg1ktzzx5l3v1yu7n/vHvfOWXHnzbBXS7MFbjXn75E78GBqvByuKlTxwMrTRLhrh7a2XPuxBgcAYOn/v31z73X8qy7E5fCP0zQcfohpCnwhpsu6uv3+qvrg3uf+LaxA8//Oey1Y3FJIXtz457aylQrD0DGGsIk7B4o7fPPmq//++ULvTDLy+f+YPVagj11fZ3fy4t9+cHt4d23ehjkVunkEbrsrYuy1nFe7PZsrCZSsxav61vtn/jt/dtkfrGPvSwMXY53fex0xRj0wUORdVHHyWl1NXOc38wch2T1fNZa7NcUm7V0K6fxkouX71Gjk1ZBlt+7Kk3Lt28HZTGfnnXuYvz7RtZb70a9A9uXtmbbbWyn/d60/391CXXMScsihFLp/Lc102OYDWQTtmJDNrkuV0d9W7d3F+G7lRvvQ2yiCzQTaedUQi2PJi3sW4tW0DwnpWlGN8KnQmzACdmlsgJAGKKIbEAAiAoCkAKkyBQSgBJIQIdceqAgROwCEdmlqOQqAiiCOAdPhGACLIggKQ7HUcigogkzEBakSAKEgIevaOPJhAoctSpoIGIVJIUAYBFAQGKwFHJmhIgJIMSjg6zvcm1w71XB/a80pWxpl0sM4k939jgUAmrozSsiKIUE3AkbvDwptEFZEXsjyiLSIznNrNMnW5c/spLuLtNxngvrIg5YAy6sCKshGE50U2LkTEZro6Lj+R9bGZsCr/ea9q6RM5XB9qoZnIoRMYM3NbtOByY8+d1rxQR75MzJZ5/1Jn1/pd+l9y+gQbq27JTLk5cXJy8lwfjjem+fu45ckuYNVno/O1nQffEVokIC6Rxz911lt/20PZi2g+QX7uuv/JFO9vF0IFBKoxcOFXd97bDnZu4FLWyzjSYTTsrK4ZuWi1KUmKWxGjQJ7agNQNGT0aSTmTEOZ/3cw8pJufYWuRBKfNXPtMLV9JkV0tEUG3j3GqZffu3xuOnIBtoyVu3oLWqev2m+sLnuktXLQsAe46MKiYhG5VCn5JWFASEiDkFkGQzjN5EMIB1TKro58NeSUr1+tVg2E7mWZGSBKWMJEgpKKskeRBAOFpqKYTYuSggFpEQDCkTEQOnLoRckIAgKUUsjDEAOARVB3+0C4wCkBiTmnUCyqrORQKtXCnKGK2RFIGIRwkAGCQ48W3ykdgTRqVEc4ze8SJR0qRYUr8cJSEBRWS0VoIuCoeQBBIhKDIcOYVwx13EQkKGZWM4fvSxdyrUS0jLfoGr/dTWgACKpGPhhEgiIFHaphsdPwcnHjzcnvm2SQAy8bi3xwJEcfbGc/nFM72Lj6pq7NrO5IVS4nevrORWQ9EedggqieusP3b3hZgVyRbZWs0vvdR9/dnpm7e4SaoqBXUUJNCahetaqSlqY8d9nWdEM5VYut2svxZ4DknIkVekuo4newqjQgBlwXswijlhiggYGHXRlwTcNjpTFDtIGnIAnyArMFc6Sto9VFnJVFCWpdhBTMpmSQuZPkcCIAQQ3wEZgCTOAQtbAl0g5uyW0ixNUXIbEIjFkcqw88hJMIJkRIalO2JiobbACYDRu+ha1Oi8CIPpF8I+tAklAisEBREwAPngDw8o+ehb0LRoohmNQhvXNja6lK7PJ+Oi3FnWVy9de4/tTaNTNm9dR+NScoOmdM3ChhZoXdCCzm0Pqdlfvv65Zcv19m3VhoZik6DMYcCAc9du7+tjZ2qAzQcfOti7LfUkz40PMQEo0iozWmttlbYagGNkZh9d44M7mCz2F93eomtiijEyi9JKGJWFrESds7IoKktITMAMgkjqjqsoROyPB5jpg8lyd9GkoNnYtmVJIaBvOFhEl5IltgUdffV98ikxaATm4FFpA4waSVmK0bsk2uq8VxohQFAKk4AQJkmSoBPYCu6W5rpnaFA67z3bnZnXGVUJVAJd2D0XL80Xr9RNq/iesjjWs7NZqxUlYWLMkJTJILiulUKXXRSTAbH44EutgfTRGSYMwQdI3EVOKBo5RY4R53VICZgg03rUG8wXi0jShC7WSudaERDqldXe3uHcACRmF5Mi8Slu7c/axrdJPAlree327T/zvj/9nsffdfzC6b2rN2xGajFbTm6u3Hv/wbQNnLJBBplVChMygD6qBwUkfosAQojMCYAYmBmZUwwpRWYydm315NseuXTrzTcPPJbh1MX7vvBH3/j0//rvFnM3rIr+63j89dsf/LF3nei7+99/b1hMB+fWlQoEZKB/z9rG6e942GdWWoreD8a2jQxAyvDYZj27lpKIj5SkyPXt2UKDubHcy40W530XfIisdb/MsyKXEDQqABwMKh/C9YMbx4frvX42i5wIFovmb/z0n//MV/+4TWyZZnWwo3xjdXy4ewgCxuqobeMwRM7ybM2usbCXWORmGWMIofHpSL4YVkWzrJUhCV0OsPRLk+vBWv/VG69ujIYuRLAKFYYUlNDptWMDLf/XJz7pVXH27vug8S9fu7zkLoZAAKR0RHl9Z/LEuXWdqYwFoxEHzDq31hoxhd2sRrd2Zvsp7EyX+5Pmv5OKMMhi53DcywioafzhLPZ7GcQotRjmlbKcL2IE6SA1njWJ0aRSlNHJ/9fP/L3rly/1ClqBYGfSffH6a59/fQCNAbiVDe999DEse2vvex8OpFrVV3/xN7Lb0+X/8rOd5w0DRwRrrehogEaIHEKp6eTW4ZWP/HjmD83B9Lw6uZcaMSqApBAqnUVA9uHMuZN7RaG6RTkqVXIapF06Dw5PxHw9pxMXT9x/9262tvtbv0mJEzqldZ6pVrhLzpBNErvlHAstOvm6E0m+NBs/8P7NH/wOWLqnf+YfrkY2pe66jmuvkuoNV/qPvie7dx1HQSe69gd/0ry2YyKSa5rJshraMkH7/NbWC1d7iqrhUASwtIyxNxgyARKBNqmtne+0UuJqyPK1teHsYIIKptdum/MnTp0Z8TZUG5tysB390mYVM7tFW+mxIHgO5viJGEJsd4wojAqdhsgxBJ3RwEv7xUvBxCwrbMYsIcuNeOEUQ0Kexwtlb/uf/JeDj3/CDU4O8pILRy5kY6tIxXlwi7D1zNPHDn5Y9w2Q6CyPz7/Ufuz3N6wRIVDIKQVSFNLd+7P5v/n9SlklAFEAFSjFriUENhAFNAug0pm6+Uu/NPjAt28X2j3yfhX+RR8LyLQOce8//+vxh35l0WYnlrPJr/zL7VdvDhK5eSwrvvX5zz70P/0/MOHVN262sbq19HnZO3nf+QESRHZd23b+5mR3Bh02ijM9r6UNAYE5YHIZhNDvVwExsAscKq2ToEtmIWGhQ9HLdAAxkTi6EEl8WRUesO1YUP6bTnQ0AT5ijcLRD0eJgaOAwZGSxIgkwv/VZUT4VlsxHpXI3jEaJQESkeXs9af/5MLDb1td7c23FqMMYp27uXSTxQTnZGJFiIQJMS265soz4xMnAghbxFx1ewc2RGrTWq/MXRScy5Xn7LnHtKmgXCVjkyTHwW6e2ff9NciasFTrZ5P2erINCmV1kwbHkSFxIgVGkRYKKVjUOYl1U5i1yqgUJVPWkNnqFvbx7wjH7pFe/zBubd7zJFy+PNvzlTBMrlBMuuqhJmUp59n+c1/Nqn5B5Ise9rMsmcVkf9Av9WiApTqczoaPPKlzxuvDeDB3nYUTa4P3f3uXZwms6Y8Y0xCRBPST3zR0s/Dm5dlnP1oeHJadgC4UWWQSxWZEh207PPF4+4Uv5JOJm02E8mawP58tu/Ha+T/3E3jhEcwz7xkDY+sOvvGNav9y7+BNf+ONsQb/4pXCWCFUpUk+pMOJKisNmNrQllU4dVZ6/Xh7gvMm70TnWXjl1fzeF91gI6kECiVEbS0IglZRTKd7xXTPBDDbHRgpSDGLYgQgFiHSzAkRUlIUEzEI3EkryhEBC0AEGCAxKAAF0GUQn3ivPPxhBqU4IgFH7EIrCDNoH/zub4Mn79/+d/9y+5VLMdTWKIwpITQhTroASUsEIBSRLgajDCrdxaQVKWViiAjIIKg0AQonrYgUgWASRGZInGuKihRISAxvueYeP9vraTZGHx4csmsdqmT0MoGLnOUKjyqB27a0NLBl6GKh8PhaNa2VteqIsChHvI3gkVBlJjh0B/MzVe/GV7+S6sPH3vt9ZTff+vqNy1cpG/SWXdjbm54frKxmYhEKTBkQpYQcIJCqysPBSuNvjFDyZNvpwpDXxlSmahct2hxApaZxknkCsT2rBNs6z8tE1vRsrsSHfbu36KcJrq032A9lT1ySZm6bBRnEaqXUupw/f+mv3K/NsbyryFrot25rP2dWvYzG2U6Px3/rL8qZe3o5+W99Mj5w0f3xv1o5daLeftVfvcRN52Gera2rYnMaxvV4Ix27q3/XcdXOaH5jVBVuuRtNvvnwuX4Xp7OlsXLXg2fv0k4czQ7apUi2UTplotfVeq8LXg0yh7kqSWek+sfbLoXYLrOsKsPZ7/hODV1kU3Om+lmz7KtFsoHaGzMp43Jgri33uriY/sHPbu28WS3NE8Wm6o/d0uksA+8TAaLB2EI/O/fgk69/+qPNJ3/vwnKb3bxRNWVZ5tvxRh5OPTgbPVptPmEu/XaOk8VhEyYTdG0BpAdViFD0ikXdLZaumsN6kzLw4VZ3w01vxYN5r+xvrm0cWxvkWT1zk2beNPPdyWx/Nu3l+typ08fPnjt55nRKZr6sL+3sHE4OKJNKwckT/eduHnzt0s5kHgBA2ZRh90df/pNHNzcw+Mt7V5quaRp/4sLdL71xWTpvba6sxMjeSRC3utY7sVJBhpev7VYWVjbWbi1bYSPgxqPxWjm6cXW3MJip3nCtaLvtjWMrzqnJZF6MKpoSWts6Z6wej8p6fx8AAidkFoHEnBInSZFTSgCojqjSCYBRM1BgsYoYAPkOqTreeWxBEpEjrPfRUxwEhI5iqXTEvBMQQWFARSJJAfIREA/pjjZEiER8JxMNgHQ0nSCEo8kEocgd7xKzCBEigSKtAYQZJIrORNPl1z6/kmN/41xsjIaUJQ43b+rOh050YVJQ5BOiigm0MiwJ5xOiJo423b2PxONSrRYOVmUv9pc3wtPPqOkSQIMYbQtBkIRARjhAQOUBQwcugEEg1sCIkXOcrK7gh74lqChXbtJ+LdffjIMcL5yvZ8Z2SlQM8waK7Aivr/qjELU5dSH0x1m9C8KwdUPvH9rDeXro7enc8W52Pj7/bM51d3tLG9JWReeCC94ofeZU/s7HzfETu7EZnTyVz1uzqL0VJskGua9bawmF3dXL+cDoILnlw603YLKdjVZkbS3OZqK8RFCJRMQKK4lGxZjr9MC9vYfO6EER2lB3LtNaDnfqg3l7cNjN9jRIeu0Vm4QCgzBqpe+5Fx9+lxmPrDFhtsyyY3E8GK3d1V19id+8HCN2DlJKpkClhYE9YRTRyggzafKJk4LIUUWBlKJJbr1fnzg2fvyRjdPn89VNr/Pttnttf+ulS9+QrknLOXjWlAlHkQDCiZkVOeBwlImPySCAshISMjGrxgWiaAkIBUkrpVJsEh8VNAAABO9zpVGBYEpJ1RG1WMNoJOYECoKSxMQ+1UFjg7GNjgkCghcWFOBIkkAIIHahsdY2fqnzQRQU1NZkRhWRWee67RaIkjAKCSMkhCRHchdkGtbG64P145BlPiIru3nfGQ+L5iBFUBAx+ZRpJZGFpGndZO9wg/HwzZtZkZk+kri97R3ZX1REt576CqxVuut6D79L9QYAGG9ewv2b9fSgXwgr6A2KNrZLvxT0ergRBbITm2q2dJ/6KLjAwlqLazqNWmKiXmX6I1MYDEsz52Ud0Ja908fS9rZKS6XYC5qN80XnYOsmbe+Uq2Nog9QelSApTAEWM9BGqSrVcw0NxQDOAqBUGkdDmS9w0JO6w8RKK140VGUijQotNEuVV4AAeYYhQmRIBBIg14pQfIe2p7CiXq9b7lv2lOWQgJROrlNZAbmVNlAEMAlSA8gKmGJCUqICIvBypkkjB8wMiY5Ni11kSTovGQ0wK2OTZ5W0xJTAElnWi3pyu6f7090Zd3UtQY+KvZ0pDItKl93O/pU/+M27vv+DW9Q2b1479+gjPN/Btla2Qq+xGsQIKLmgmu5f5+VoecuTcxKZc9OECNJmnKeABHrl+KgqRwOHt74UDmtxQmWZs4JkSJPWuUGFDAx4VHMSWx8ny+awbmed74TbxIAKLCCiUlD0bVGCNqQMMliQlEISQhQUdWeErBCCD1k57DrfShoU5syJza++esUYiK5ZsXqY5xRFWZtCRwmMsoSklLJGgTABaaUZJPiOLDKnyuSojULMjPEcdWaUyVKMBCLW3mqnX54eRMBRZm7c2r5nWPTHva5xCiJHjJZe3T2Ys5qkyKSHCFWbCqVVWSzabqgtxARJRkUuKWSkFenc2hg9FZYIIiciJmZGiCw205qg8xKJTG4nXVeVViU2SAzgEs2cX8baFtnGxsqyqXtFVmo4nNQHh1GidC4xqdFqlVJYLnxMKi+yelG3TapBtMVv/dHvMCKXDvdWNirW1d6bO93+Xve6H50fq1KRVeKitgaF7gzmj5IZR11CzEdFQkgiLHQnow1AFHxomKcm+8Onr/7Jq3v/89/+8ekbr756bfdGLR2q8+dOza4v7jq3sfvMc6utC5Od2Wy+aPDEvXcfv/f+qjwZaheLQZotyg5uXr+iEUQLAd2eLvoaByYLQRLTshYiPrN2bG/eQEKdI5DtF2WR0v6iaXxoBdZ6+eG0JtQJfL80x1Y2NOWgXcHaLd2ZY+dODE688MrVIsuNwlbrTtiFNutp3xGLRkRhCRGaNmZlkXwqdGkJuqbNyBqKgUPtGxTf7xdL72bzaZ4PTGZSnNnMnjieUxClyk6CydWijpKvXXz8fb/2yV8DplOrvXndtileP+wwJYgChAyy7NIzl/buGo+VmpmySorqwMLIoFEIIuWQ7jm5MgqwaKN/q/TpLalIZNCriBM41zOKSTnvxz2zZor64GAtz7iL3qcE7CBFEVEYyfbMSaPDE28rPlNfOziYrDq7ntlK51zDrtYP/Y2/O904P9t74+aLXzv5jg/OH7hrMds8+bXP8ef/KM+w5QBaSWJmYUUgJIJaGaVIHzB97rLJ46BnlmUdMhWDI9B9YzSDQUIliyvbfP5hzLnz88RE2uRalwTBSNrdmlVt3S+qBx6AwuCyM6RSYGNIjm/oM2syXc4v7SKgmMDEhCBJ03Dl0Z/6yRuba7Ot7Yk2azERJEuiIGlSlaaH7r/wWn9xBeu3P/4++8xt9dp2BbEGNpmJXtilfjbIFCgLCYUVR46sWUQwRQ6dRGtNZvMqhESiBKXzbVVY5f3enzw19O34zKC+PQk3bljMi1UdhSDEwXqfOWirV7JeLQurGTPALgkAWsuCyuTBBwSoTB+UKFbdbmNzQsNgRSGh0gIoqDarteX1ZThdnfjun3zh139hlSZWEytFzCXgJrSv/uI/feQvfF95bACzZuvf/uu1SULWiePR4CmyUFCYYi6atMSUjFLcNUSGFEL0R1Ogox4OIFqLU3/tUj66Ow9KjIaA4Ekryr/4R/xL/+PZu99+85/845U3tjsoqJ9rSCOhvZu31I3r66l/c7aMluo2zCdTjcn0+/1SS0iehXV0PtZJBGgRdedFSxSXNIP3KRqTtDAJACvF2hqfglKggIAwVyghqswk4UgIBF0MEVJeFv/NVXTne3XkyXjrJi6CSHeacO6U1vCRdei/8SfvpNaOMP50pwMZMCZGgWuvXjp95vxkEFeHw8mtN60DJcIpVQWZQhlUKtaaWTm49tnfHZ2/D0YbIYe6S9BFbGI7megoQsHmmg/fOPzMf6DV9f7ZB+yxk1JVTeOjotGj7+WvzyLEtHIex72u+RPQnRr0vHeJ6kBiKhtcGxLonDJhs/8GTm4T2uAj+MgioJg01qww6/mUOFrZ2cf96xddHT/2v2db14ytVjTLV/+DOjzEzPQN6cT1zk4u0aDpfNcri7DzpiwyYbW2ujK5/QYfX0dj1OmTa499aA6qGx9LCkgUk44hZnnGPqDkkPfx/vNr9zzpr3x+/rFPyJu3xuNVlZu4rLsGzOkHZofztHejLLLy1Ga7tRfrpiLJFrcnH/9Fufcd/ce/G2F08MKXy8nrw3aqt7bCZJsg/v+p+s9gXbPsrhNca23zmNcff33evPemr6xMlS9VySFTILUAoRGSgKEbQUD3wBBi2sQADT09THQQHaLpxgtiBIyEUUtIKqnkqkqlslkmTVV6e705/rWP23uvtebDualunQ8nzhsn4sT58Dx7r/Vf//X746DvqeSoKaRoGkXnBiNjHYrw6e35Q4/3v+MHqRjQtXfkjefrN57rNVyQxM/+R9dbSxeejGVfyMSOSZPVVrXeguqtf/JfXeysUKbSRURzYkGwxjrpYlQCTycpeZhElUAF1JzMqe5j3w0BJyAABGgz2PzIJ+4EKKVFl6PxiIaABVPsqtViX+arjY3tt29/0QwG6SQKMzETsAISkDMkKUT2xrCkoGitRUQUIdCUUgIlMtaSMaQAKTGSYT3Jl7VJIiROJ8OCcH8Bbd0ypWQyN5p4CHK4DMwxc1QOiqpNMSmmOCxyb3RQOtRMAkDmR/mwbTuBrktdaSwqZ95wF8H4SLp14Uy3nE8oyVvPf/WZzy7nLP3RXsxB8tMTf2WwfnHkii7mIaAqWbRZJsCIwlFG7/2jKf9QfevZ8M71oRt767mSsIq2N2SDoCFiWknGgwkNx5rA81G7WhBVXWyhP8Iih5Bk2lBqynP5Kl+vvRVRml+Xpq7cdhbUf/OdjT3bP0d2ZxIWtfgB7yRLRts6IebbFwbZgCUdzTRzWVHki/l8dffWcv3c43/3X07ffqf59P931KejA1994AeKn/yvZkCL6mBx97VJu7bz4Jnp7d0167HnemDOFbmGgGDzDIghKbZhFYSh7iIzZtCt2izrRfYpttaJmDII+NJ2q7mytFFtbiC0PeMTYn8ycKI2weAysDZEEg/mfRys+bR9Ycd26Fh3315mofSJeH9aOOf7zrgc10dh9vkHm/3ye/rwVZeVTzbTCmOyZbncGq7e9z6+/H2rL3x1e/cVm5kBrHWrBal0x/OszEVCFzpLpCTS1BdOPai+f9B23M07opuUFs3y1Zf21srRRT9Z1rMZL/LNyebWpGdgtLV5PF/sHb16dHj8zt5tpnytvzHKxqu6/uLN19/eWzUdDEp3aqt/+cHRKy/drZowW6yOj97ob2263J7qbVaz4wGZ5Iu81w+hNW3XzZfDodsstw6P77QrHmbl6Y2BV78zyRhQMM5m0/nh1BaDatVtbowROytuvr+qm9TNg7P5pDeaLRbDnjOomb/fHggqILAKoPK7hjxAc38iAACqLIysAEDGojACRk0CGIUZQZThZBMZ7mOLVOBkTUwA08lVwkJKAAqiCMAn22toBRgRiIziSXkHBvFEcWY9OUgAQU5qaQUFZVIAQE7sjFcGdAbBKkeLhG3Yv/Xmm0LnHzOTra08Y2xa3N0N08Y0HgcU85RDR4HJeCWHCU8AJEDMOw/d7e0K6HA8mjTH8vpbeZMALJjCCMS2M4AApu1iIjjBLCIw2WQz3y1XBMopaEalV17c1VVd7N7Rt247smltoi4zow/FdOT7AynPdB13oSmGBqdTs7sX71zFg9vAAr4EjcDt5r3X5v/7P3ZneunNa/liaUzW5WUyDAacNUCBNifNY1eO18cqq8Hi9vjo1e6bL61evVp0bMiyWlSSJui8xtu7/UkPDl8GUw87qerEG2vx4feM7OL2N57vVWwYITIBepCQEn7Px4d//idbadOypsT9M2tCrj3YLTPbvvZ8/Yu/NEkpV6NBne2lIDAa9d//0VVeclRv0feK1bJdxn0j5u5brw2jigh5r4xoEC3r0MjEpzpCzajapaggogZFPZChVG0Pzv65P33qyQ/D1llry9z72bJdB7u6+sbbd+/Mm8YZ5zMOoRVmJLXOoLE+Y5+ZVlUiIyARIRkFVNEkgqDekJJGSQ5N0iSKoOkP/BTeEwKmmIxA7DpvOEabu6K0LiebKZCGFNukTZ26SBihISLWyCoqzIlFBBVQ1BMaUIdoyRBS5rLSDyiFLnRIHmweY2QBFSVDYkiQBKCX2a0yO71zHigLdTMeT9YzuMe6cbp/GKpmlQTYEDAwJwbAole4arr/n36u3T9S1MH26PDaYX8O5cZ6G0K/Xk2//PvNtH3Pwx/pGYLZ3nM/87d3dvchxQiWRQVVBBxinuIqGgaMKSPNYtv2BsUqdSFw5jwxWxENXbRGOhHEoreZ3vfRun+6VePoW6M4I4/52qnUtlYjxOQEZB4pXxNfaFphXAFYNAjSQJ5jkjifumwAbYIHH+qevoLreerAxsrHGt58DZa3qTSQRWVVJASDmecuUB3ReiADqJAYQiWoCoxGQ2h8YmeEDIGgrioYlLheiPdIrEjoLaCwiMZaWY2CNrVKCx5Y2NoCjO0WM0tEoCACIsoKSTSsFJhMJ+1KmtrwUkSx5wfndrCu2jv14WoxWy41lZRZI1ogtNHcfebZR9+zcaFX3E0hHt+FPrere8WZjyjmKSn4TIjcpNcfMU4UdvFg2SKFzAJ6kxlYzufjM+t22Ju3bQOd6288/fHvONPhteXy+isvN9Wh8+wsGRBEUIWkEmIMMVVdWnXSCYHN8tyjBSAVAGuRPBgPzqvLHRLExMqCRqVjazJj7/usuy4NSjzYmx6vVgxhg0yqVyF0CxBSCcz72qKi73SAZuzJcmM7ycgWlgyaNoQCkjfkiZCMGCeaUCV1wYIioSEkUu/AKzUCt8Oy3/Mj5x7e3Lj9+uFwlH3uzRtPXrxAg/zl63dDYY7a1ns/HrjFtMKObVEs28ScSu/A2GWbToIFDCGxRtFZ121423bBOW+cY3WiQTUBUCcSoog11w+OZqltmlT2jLB4Ms75jkPq4iDLiyLrmhBbWaSOLRAAiI6GvYNFe7jkhkJuYle3WZZXVSgN9XLftPDhJx5f7C17/dyoJAe3FvMS+lbTza/fLbOe2TKrrtMSERToPvhWEfRkPg8n9lgVkBP0HhABMhGBQBJJoLuLasF0tL/6vc984y//5Pdded9idHr98Ghxyufrw/XFtXdyL93x4qVvvLJ15QMXH/9IvTwWLJ3kyQbQxodlmZntQVtaSqZQ5kFuJpldNR0YM8qL9cnEuni0WgqngnBQ5qs6YoBh3l/E1hFGkGUTI6JKMlUN0YyGOWMqHWxMhvb28Q9/4H1fe/7rCWl9NOjn2Z3pCpJ2y8ZnhYHUhYhEBgXJkkFrADPXBZ4umk5T6amklGX+Tr2obCYRCHlUeiLkJJa1vnc81qxNokRNSqGNgXQwzr7x8hdVYVK488N8sWgHwzE6MKCq4n0mrIZw0YR33rmzPc6KdXX9fFg6UVCyIMAKnJCT5Kprhbu2O/tDUlHVhtF41Mt6UVKK0ZKKtE3DjYHMWdXUtg1qYkVDtunqjsG09NDyIL30qV/47X97aTu79OCH0rX6+NWbq8AM7E6d7n/bdy921szQ2BvXut3d7Qcv7jz2SPb1Ty+agMaBQwAgsoggoEKUggiRB+NZCAy3NsaWt7O1Dz5889NfG0TVWhhAIR3uH12a5MtRP/Wag727YRlUAFQjSOvz0fnzR9lq0isuXH7wlihEsAaN9QeHzcf+H383fed7s9J88W/9z0fPvNBDhqSkCYFTHSTUg54dX9y88tf+8tf/m39kSZU5H2SpAV4cvvX5X8z/8+9fP/9Q7I8fvvLoW5/7nXovDtdGc3RGU9G3iVURWhK0YnKnkcACOCQlbAOqUzGcAEkEIposrJJG8ZnuQB4+9axMwKopi6HJRlVTG4+hXomTYlJ2Eu/uz3MzppiyXqmUMHdAGSRxmedqackBswIiJ1cUnYYATNYak6mg0RRjFOnywsrq4OjGq+XQFg1xi9qwtKEs/aYp/UtXb/43f885a0PYMnmELELrB3nbtcLJCKMack5FVZMhBWmZUjIAMfjMECNXraIgIRjTT/P9n/kvz/3xH9n9N798jlCTovUIfhDj6mf+Y+d/8WwDoA6dSx2JCAgUdfv2z//rDz759ME3f68oM4qmXsVmNjNrg6Kw6M3RrXuLxQKA22ndrpbYNi6RcU6ATeLQQe0z9UJW+6XhGKwxsQtoTQaQd2Kji0kNACurYkbGE7BG6e7zKVSV4L7FCInuIybelbpV8SRs5v/wIJ1Ark88I0SAoCdQ6/u+CgFQYwgEV/V0Ptu98MBFaw00gdvDaXXYX+8Dxp3cFEPbVFaqpBF0Vd155nfPf+9PgBfUQGh6kppqGVaNOg9WxgOYv/3M6uWKJxelXw6+7QeKx59eQZtfeqB6padHd1NT+fOXuD8xskD0nvKIZIyIJms0xqQwsG2XXvpMXtUpqoKIGCSNKUyKLL3ySd8u4o1nN+UgvTV1zcw2IZ+BrCRRZcINmh0VvfGsPDP+/h+fCfcP7nVf+CWoV9iozfvJQJb1sXXxxp7tPWAfvsSTs1L6drKhIsKgUSDLXT4ANQaMNQbIJw0GMOUb5j2fKE59uPn0J6ef/8wYgrWU0AXbH2QKm17nMS6kSsjIEML41CAd1fqVz6evf1m1OKUdpJU9vxnqQ5NCsvfZrjazqgJE4DIISl2nowH+kT9ePvXtTIXnFe+s6envM9/5seYrX+u99YJbLY//w78Y/NEf08c/njIjAhrUHt0qdr98+J/+5YWbSyMZeCNgjSRLgtY1RhtSPfeAtG3/5q5PCS2QAUYABRJAQGRIrEZBjKHIgpAiGAO3f+c/4Pf+Kbj8XUoukSiAc6au2zSvU7ca5+sCo1mdxLKiahRmIYOJk3eAACTsCY0iqyhAYAA0yklCQAIGMspR9EQzIosMMI+JFFBZT0I5EA2ZP0CsuNjlkDWL0OtTY7TXy4rMrppOIAxzMqiQwGHMinx9bWzNgMoNGZTGm9Audm/faQ73grKGkJeZ964KnTp3qFWW4XDg9/f3yrXNkCV2PV7Z/WlzYdzfGqvDQC5R6cR5iYoWSIlCHPSpdiFO+oNT3wlbV8KLz0FI2EWJEEHQTzS0DnCDgh68Ub01H45PCRQsZH0uHAKWbTHJttTTEo5m4dUvZg8c2XMf4bIX4dzdRobf/kfQVBiW4/MLkA5CSq2R0UCfeurtVbOzMcJW3VF48a/95ODS1nj7fB4Hy2e/tj7kw/G5h/5vf/8wEF58ZJhuw2d/rdfNs3APq8M4It6w2+uP20O/rKt87Yz1hRbEqTOEphxw5GSIu4gE5Jwno72kbQ2Ge4UyECQ0rqeg1TI4y2BtFAGN3hABoSinBo3HFJSlTZEJ1QGwmvVN0+uH+T7k5Ivh7UVTXLocUooHRyOf0WJRH9zIjnfTYbO68tTuzsNb29tDe724cbMYp/rouH384fRn/7O3AsqrR+fTbpblVHWcKqgYPfUKC4bbxEAQ6s4Vtjy7U+drq+NDy2ldYbS9vYrh2t3Qud7g9JWnLp/7xhvPTLZ2zMYWj8q1jT40GG8c1/vHB4s7EfPTW1tny9E3375+dX8571idyZ2xBGnZXX9pj+edoGmiDvrFuVNnp1x//oUvDC1AlC6lhtsid2q6tUn/1Lg/PzpczCpyvUJo92pDRbd9erKsV10Kq0XtyuLUZv8Q4c7e7c017wwKQMepHOeuSzXP/AC2zjwwO7gL76aeJRAFZBVQFdQTQUiVTkJaDAIZFIKogmAiq2oEg4rIgBEkirAIItoTVyoLIBg8+UMgSAwAKEAkCiAIACRgkQAQRRWYSE8crAxGiQCIAESVARSQRS2giBKCiAgrqDAoEUAC6/rA7JEQLQn3C+9Zb199OZret+18d5nJoJvjwa5Vcb1sngFtn+WqkXtdXtcpJYmohoASNofp9vOjpy7WGZDNuuneYHGPqzklD2S0328mO/2HH63bEEjKtTLL6Oi1t/DwwC7aTDpnyBJaFkjR376a9q8Nglp0UCfwqTc7iF/8bRm8WTQLbkZc3UkXnqDJGlhxsYVqr7v7pl/MgVGOFhQZCgVuBos34lts69pQzmBNUag2RoS509RwYeXUGfSwKbV56evVK7dgnrKaEVg9irE272lu9cyZZjBZXX99/e4r4Wg/JMS1Db30oPu+7z5ai5M/98f95741/U+fK6YHKKIxwAPb9EM/Um+dzae33HwJPtd80EbJds40PvPjNfzGl+T1OyggaiQxGdMMJ+7cI5w7ZIQOurpTbwbnNrs33plkZWhnKhyNZM6GtskeO3/mL/zp+vQY7tw4+K0vV9fvxUULXUAAh2hAmuHgyf/2v90/f0nXz2CWY7QCMFpb84KZOBfRJwRREEkpxjYa70xmvDVWMRkIhrooZMgYow7JG+MICMEaNYrE1liwjsgYcCiE/0cCGgKrI0OoKaYYSdEgat/50mSeRYWjhpBiC9iJBGXipMCGwAAjGkFjwApYFYuM1lkiS0DOeEPWZQhoUAWVIXaxawEoJklJCMFIm1mcjCenHniM/WiQFdTEeHyodReaZA3k1opBiUCI3lvVRIYyhtmXXvJgFeXo5VuiRp0JxlTzerxecuZ0/970936De1Q/99Uzb9ztVaFG4aiIWM2WRH4jy9/5j//T+f/iH8SiTANy57Y01N20ysAyUIpqvS0zYAhTsTS+kG+dp4eujL/zj8jsiK0dXPij8//wT0ZDhThIbHR26HoW+hmFJQhCDZAi9r3mpWSo6oEApXVZAW0dj/fx4Sv2wg571S5R8JAy2FmHoxKOFtCSotfBJBiDrucUYVFBYuAWyECMEBP1+mAKVrGWsa2IT6BSubqCci+jIkZxSqAMAGAcKAOSwQSxw2aBzkIwRi0qM6spSmVGZ0hU51NcTKEoJQYAxa6D1OFqadp5BIiO1FnO7OiBjb36EMGX68O9aY1BoQsEbu9Av/RzX/q2918Mx9MDaM8/MPbFOuZbrGRdkVTQAmWIpM1y3u+PDmfGZtkCJAB2NWAjboR5XlSlHzx4pm3teHRq58zZ0xHKzfWXn/kixwNOCRKIMKKyqsgJpAvIZFnuSE2hqIgnrk/r0VI0OZJVNVFVSDtRARRAICR411y3PhkU/ZJjp2KcwiOn13f3DiY9WiVQRQVmlajsfB5FZ0kMyrifhcQrTgZNRtAl7qNzaqWDqAQsJVn0RTL5qE8uc/NFJ6TLEPc5HNbtqFdsuwyq5DMSsHlv7Y1p1ZG5zWoCG2PrtjOYInNkuVN1ZwZ934KEFAkkAZETRZCUOWDCQ5Y8yMh55aTWKKCgQxAiYlEBrHN4PTYrY7xXUaiaYK0DMikmb01uzcD5rqkJDIA2bbKWBOl4vuqalKOnDjqQ2IEkTpJKlwnr8s4sX/C4WPeQ7t26nga4cephlVBAYW45JScIqe0k886Y+/lIeLLRAYiowogIAqSgKipwkh8sCjExciSJt994A0LgKjjhC5vrww65a6PS/ttv3Gu6yXhr+MAHble3nvrxP5YWy3ovYONX+/eyXmd38nZ+IzvqOqLf++ZXhFzPozStNTTOvTBogtyZg9WcLEQGMgYQ56sWkYS77nD1+KlRaFZI/rCJ4I0S9RzWy8pmECA5orY9lLZ55PzFf/Cvf67o9QmoWjVboz51MvalGud8VOlUbVEUtXKrAkjeUcux6dgaaLsKIgBz7CSAVom317eq2V5JyEn7rmfALlJlVAvnzZjmi8ONje3182vXX3nJF+WyCq9fPxxkoxyOHjm99dq96wRGGQCwbhrNbCNyHOOgbkvjCLgoM0ATo1p0mPm6qvoOM+s48h+SiohotqiTSaOiv4wL4s6rTU23SKlAA4b6ZZHaNs4bl7nM+pZlMijGbv61X/vn9Uou/uT//fLTH83mx8/8jb9gbu5lFuZ3r7/1L/7h43/pL9rFW4/ce+baL/wKZvnydiU1DL1rmUEpqdLJioae4HBQCBKyRVFgJBBjZ5cf3fkvf2r63CvF/twgO7RJYWxx9+vf2P7wdwLPbr72ag+dR2BUtV7zwsBog9Qd1S/9/C9DB4JQt52xMhrnN2+/emfvNGz7y//1X1r9rX+I117IVS0QWMzi6tWf/SejH/sjp05tXvvcF9cGmSapQ+cIAbWt2+k7t57oPzQpxnJr96Vf+3drvdxrZidjqlftfFVKpCyzmYtdE5gzEhYw7Aywoz6CFUlAwHWF2KIjNT4brJtlbVILcVUCUYOYDOYeJuu8k+nmxNoscbvw4XByZvTwh3rXDg9/99c03qXM2K1TkXM9vMtSg1dFEI5ctRmpFjmhBVRhyDRyTBhFk/gyR4h2MZt95dNZDhrFOgdoAFCYtWkGakyTQyesGG3LhGSBETTPJRGsVjmRpE6BQQVUOdPpldN8ZgdvHtHbN0YGXEaYBMhqUATZenN/8Xf+6bkMAMwJvUdZkVwfLUQGr6DkrHAgE8VEXrN09xsvPPVTP/Wr/y44iAbRGByPh3mvl0Qz0Gq1mh3PtMgs5qmC5YKtd/kglxAVJffexKxtapNJAmQGbiPlJRFQ6kRiikgkETSiosWWgiIwCsgfrJIBKN6HisJ9cPXJb+A+TQJOjrN39wHeTUYDADnpbwGQhIXhZE6sCpgA2JnXrt55/AMful2Hrc3LMGXtTZeaStauWp45u363iYbUGOGE09ef96ceXH/yoyiO6/m9Fz/PR4eFNSgKKTVH18bejQzGareZBx1sdutnjnIX3Kh/+glfr4BYq33qrbug3fSAb33DnH0/DrKuqrmNRb8f1O0s3ppffc5Z3wCzamIlMSrgm7h161t843WNlcl8isCIiMJdJGsgCbaNSY32pPfBPzc79aSUg/b0avHyM2u7b5GwRvbFJGTOb5x27VqDfdBBzAaNdQactMusKNogRb9gQGtySeGkcEQw3hqJMSrW5aneD/1ode8VufqOYSCXlRr0pS/RsiHN4va5eqNWgrwNTdvxvB70rM+gmx7a0nNVddcWtteXTJCga1cGTewaMCV5AlJfIBA1D70Hv+372Oc+LvziRrV7M7v0sdYM7cfONt2yuP7yGsT02/8s3HuDH34/1l129Y3ms/8aD3bXap/QqtGoNaIQiDI0iZvLF2+d2RyeeRie+WLBAIRBlC0wnDR2QAQkmJ34C5KAQBIABlhh/tzLt1d3zv/UJbd5KdScYofMKXKzqgdEvXD8lS//lh9mFYioqoJBMgCGDJxgclUVMUli0ZblZGXFWmMssTKARJEk6qxxSKpacxIAEiVRVfVkABQN2XchjptFbjEf++G1inHrQtbPl3GZtW1cTJ2KBTAIuS/zyVaxdQ6z0dmL75ucPbO/e+/e/v7HHv/O5fLgt3753+YeD+dzimn71KTzdjpb5Xmxn2AVskuXNpvbq7ZOaxYeujj50NOn29kR5gVZs3+8N17v82xVaDcsIM8B8jotb7U4tOubWW8jW5vQ0YEaYxC5TZ202NU4v82zox4w7S6P2hfX3/Nw+fBjzfyIV7X3RR7neBwWN6/b1HR7V311ZAPTzhNLP84ff7IDdvPDiut649KvfOar3/3oBLOj0XKa79HIb6bGCJaG66c2LsD0nrx5lfK1MrarFPPT799rXOcG03vzLSbvbP7giF/6wuGN1+eXt9f+6I+54ZocvTG/t5+5oeZudOlhUYcxpq6zSCkm65yqaIrSxJRCbJaIYCxFZkUEphTFtjH3WN/dj8t5OR7FGLpWVZLPbZbb2FShC7Gp1Bh0jlMn2szapUVS1aPdu9aZasptaPIsSzk5OwJ7viw2bXG++J6/NBlAvPGGwO3q9mupSWG8M7/83pfeuVu09SOz6U4WVDOJ1LWKRJg4hrTqj16/dCGduTy8t3dm92216kaq026x6M49/Ji7fGmvd+rUqPfV1z//6jvf6MIrDz90li2IqSNbSqpkty+df/DxK7vTG0ez5tbx9O692a1ZVTkbFYvMrRYdNEAb7mBWS1JXkjdme23I7fLqWy+VmAknT1QxUUQZ+Ba6zcxVXTq9dWERszIvS4z39ubVMtx984A8nj8z3NlenzbNYl7Vqzov8uGkqEK3WnS2b0+fXr/36r0Oos8Gt493UxXg3RNfEJXw3eMeURGVDSKCQSRASCpWyVoDiArEKiAgKJHTiQtJVcVYgvupaKwMSqgozIyocLKqrKwqygjG40kM4f1rA1SU5cQIkviEYI2ABIiqKqKJFRGTCIAaUZAIytY5jAxA3vkcAMEYk6wyQSzWxtePbj4GGEPXHh2UySXkaPzRYAu/47syouGNPfOZ/5hNZyl6ZasGDMX+tz7Tu1VsDH0KCAcLrVYIGaFtqlV9etT/8z/UbW6S2NIAh7AELD/4baW2y8/+zurrz02qFiJAYBUhj75OwAipBUFIDCk620GdwKO9fSR3bmSpw/MXUntMpUebJh/7zqYcNe/c8NBqM8NGAUl95ihD8l1bkw0MDBABUKIiiN29Db//S6YNq/2jYhl1mdBkgChombltql5hClce336VMt04L+H5N1wkVErzur39Ejz9UFdeXrEtHi22fqI8+lf/yq5aMGA/+CSfu4Kr1oYGugTjTbGjpp47b2hVp8M7ZpVMBGXAJCgQpC02Ty0TxZYdUdsF6SIb45uut1ru7k8LgwlARFDReMwfPh3f8/5VOSg2HhjYgbl1rQjpjV/4TJ4YuxiJt77rA/W5s9DrpabySAAENmu6uFo0m2VvY7BVHR5Op3WQKkrnjUNAEQRFUuNtZqAhAFS2BHlGWW6ds2AMGjAomXEGgZWc8axIRMz3HabGo1UjARQArYvWqc+z3gCtc2SdRLC2Fk7SNIgzYXWcWYNBgYAJGpGIYAw5Y531ABhTihwKZwmVU0CDCVgR0ZgThBfAyWsBuXFFb7SzPTl3/hKzt0hOwHCLy6N6elQ6ybMMfC/VsQ2RkwhD5l1T8/HRauycLFtBtd4qQNV0mKRtJW/jYNXK7hvVq6+70mQiLpEIt3WN5HpFZtQr2tH6pLvxZvXJf56996n80YcPv/lCwVyOe+2yIcHYJQ+GvN6rji//D/+ke+qjJkR89evH/+FnRpO8Kbfj8Fx+aj1O35SwU5dnXVaqzH2aQmTwTp0DUE5tuyDKy4jWMcl01jMMaLif8scvryQ4kztP1mfS5XH74axXxK99xbdWA2rmTb/XVLVRsmkhqyXlPVHUxIiGusAaNUdbjCAqL+Ymt2AtjUuxXZKENjeYqUkqjG3DQCKaGQOzGYQK/LA+mtpofWG0WbnxiPKymx+LceQ8cFBQzgtNUZZdnlvKel1bQ38oo6GkOLt7J/mMt8bdbti9d9BB7PcygyBql4j3juInf+mlwaZ+7KmHBdq2aWl5mK2NlCJ3DRoiX4JRA6FXwunN/lG1sN5WNSeTLVdhi3r902cr7wBS3iuySY6evHWPffQjR0f7t148MEZRWROLMhKBojJZ1yutzUvD6BAxiSZV9YZQUDvjQA0GbFMKyvMoCVEUgUXvMygAOMUMtFZ1ZW87GyIb8tZUbWFAgxTWNl0sXV4m4wQiM5KZS2xSigaihJzQqoXABbm2qdczd77v1yfjV2fzL73+8uVTkyvrw0wzW/qrR7Ml8mhQANPeXj31aTgpxGJBetC17+y3BqQA0zah1/MpJEI0veLmLMym1YMTn4lmKAKMqo6tc0bRtJoi2VnovFNytmuDydA6Yw2pMCgu6/j6fLkCdIZGzsUEYikvrLVgs2xzo6jm3e7hfNUmtGZrPFpM5+TJ9/10b14UOVSymC5Ga0Ve5sA6LPqxS4iweX79kSceO7q2h6nZyEbb57aPmvp4NT3VHxY7g4Pl4daZM6vjmSnRaXaf8Xp//k4KAmQAEFBAEPRdNy1ZVlE1gWnRSMVi8kw7TUjE9vat5as3dp/+8AfKs4dFLfXR4pXnnj1//onq8JgXq26/bRZVT7k3iDv9wcFhWD+z9eyLb3/qpav1IIMuQtRWtJ63YpBTakJlnHc2O6znMQqBem8U2QFenvTO5XYwXt88P/ntZ28vmnZVBSqIvGnq2jhjehY0nCom4XjKwqd3Jotli0bzzKQ2dcwhhrVhQaaoms4qWUlRpaorjzjKcjGooEouUATmcVncbQPHhNP5yPvrt9958tyT7bKGzLnhEGKUlFIXi3756OOPfeHV15surY16DqmpmijNgODy+bWKq9v7x1UthUU22AKvbQ+Ru5jC7XsHhfOTDcxyF5idM6SCgCJgszwbFH9IKgo1Q+ggS+PeaDwY5lWbI0lSk5RdmkdEMMbmvdyk1CJonmXdotn02fFrt/7k3/ufx4+95/pimrUVrU/C6zdLxH7mB1/5zPLN565/42vn1vpmnnciuVo11CQ5qb4MIssJMZhANDMYDbKqoLjcYJDjrqGLZxaTnVQYkGjIB2YLOja4/+nPVEHg6PrguW+WAKAgiFlm8O6d3X/wdzbff6E9POy/+DpbrYScdRylqwOtqkcevnKTp83Aws42vBOdECFw4FGZpRfefvPW7S8dHg7eaR8ab1ft0jpTVV3ubFFmsU7LX/ksnYXXfudL2/vJSASL071DvbCRTfq4N+NWU2IEKlzPqVjnURnjSrUGsOQ8WAsajAqSVpA6qTOKIdYmBQupWnHRn7RN4FP98ns/Cg9u6WhoY333my8fvc39t+rq698YzPfFVaROFysqE+eq0XLHwsk4T30D0mk2oNSVGmITncTcmmARgKz30WiWF6UdwrBv763icmWNUWOjdtajSKfKgJqEIUEMyZd5t2oh894btqCchAMAGgTB1F46t/Uz/1scjF3byG/+xtV/9i/Pt2IApGMEsg6gtcPSgDKQggVmZVWHgCqQWwgJLFjLlJgLmPRzAri5dzgWHI83d+81Psty40aTwfbW+uJwKRJc4ddHY98vbt2ru05UfRfUNY0zIUDMTR8i2OQJrWl8ii1YmxgzQyoxYVf2yxhWyZqERlrtOGFKxjln3l3DxJOpLp2Ah+/jRwXo3YQz+INktPuf7/8I77qPTtQmQBSW+0YkVQICgfnu7sHbt0bjcQVw6kM7vYPxtTdfapbMCvZwcSr3q1WTBDLEeLw6+tIvZ2k6OH2ejw/CG8/ni2AQNNOKY5kLuSQpWI+Tfn85n8U7t/K10fFst8CdNr/A9T2aTXP0qkr1cfrm7+XZ2YBbtTUtlLZzver26tP/IpsHMYjEmuK7zj4rAiYwpZW1JCkYMiFEU9gkiB2kmLxaa2Idp9m5dT8qorEJBtvf+2P17/6shRlSQczL44NosKS8u/G8lNR/6MnB2bMOsy4ulNrexiafhPxKIGuFULsoGkEYGDJHjbRdb1Q88aF09bpBZU3SNPm8xjKfGVd89/fvnH8QRE1XL1963t6+1l57CZtjtMwGJTdgrHgProTQFOSkEW2XmFvypTYNlH1973eZ7/+/Nn5oOBaruU7v9dxEEhoTtT+wn/ix+Ou1m+7bBdhnX+C3Xq1feY4W1SB0zC6hiA2dSWQICTkpqu981PGoNxiW154b7153J1GPjEh6EqRNoAwaUQlAcmgCZkkTwqoGV+Kw0FP3pq//i//ukT/70/7cU+opIrfLuL6xhtXRG9/63enstre+ixIAnbelMxCTiFZRkqgoBBF5d/cRVFQSoANAETl5TIlIFFIKqJARKUJkYUQAYjRE0Igg3xdMc5RYLZD6k+31zY//6NqVRzsfu3oZ7l2dH9zuWfTWDfunFEtbDu1w1B+fFqcFxUcePDsoh2td/F6XX3vlyy6uiswWuW1Yd/qjacUy2Hxi54xwsz5rjp7/Bu7deOTKhq6BjDbntfpy2z/8kcP+GgWIhzdW158bLaZ2utJ055Gtx29866XV9Ga/ut3OFr18A7wJoiCh7WpcJWmsBCp7pzRrQbDizgxgaDfrm9eHQPVyCoOtw3NPDD/0fYv9a2YVx83t9ckGNLfmv/fF6qtfG496q0889X/5zL+f33hxs8Dpz/0zc+3WqL66Ws39+sSX5yVU5BxtbgL4lqMrXXX7dvPiVzc++N31nVczqZfZ1lpMmI22y9y88orb+1lnwmF9uP6Rj9azbj69u7MxgY0LbQBNTA5RSFgTo3EFWoaWfOYlxq6pIQRLJNI60YFNcXV484Wv5dnGuHiP8z6GhmPHKYXWAwde7UOsfV56k9ftTFNm8mEiD6C2zB1BalM/y03qwuwoNF0B+fp4LO+81P3i3z614eZvXB/MDBBwjzTrba49/ZH+FF/7bPHCV/hwiosFGG+8ZQmmcIHs65uT0//oH+FgMHj7Rvt3/3J682q8eVWKzNhh101vfeuZW0X+3j/+Z0/9qY985eXjqy/f/fQLbw+G8IH3v9cXdnW0GvYmXVrWi90nP7Qt19uXvzWbVXDYiBrtRGJMJncd8yJ1xaRMYD7+8Q+61J0eTd58+02oZ1vjYfDD2VE1HG31C57HRYh2fyUoIr4SY2bz2X5oQsMmo36epRQ4tstlvba9PugNr71zq4rhcNGIo8M29W1x496iNxg7kfWN4WrZTVeAxr87HcCTGdr9+lWRLOL9eknBnFjiT8aclE7414gsLJqAQFQAABWTKiHKSSmsCoCiICInOR6IBAAEcGK9V2UEIGABREBWVRA+4ZEBoty/YkQEhQgNJyA8SU5LIIqIIQRnCLkzaC2oNdb63FByxqAT2+JgWnudHT37kj2WQUTB4BcHmS6XozPu8c3Bja/CizVEq+SiA4dg6gaWFZDxoQNAtk6JwTa4Nhj/kQ8uC07dMalIFA6aDXrM1JKa973fpjZ+5mvYsXACRGgZxVgixgTOGCsgTEgp1jYh9PtFRhrq7niO9251KYTqGB88J5p6k0EgCDEfjMZSt4AZWp/sKhs6aNpYLdGAZp4ya7BIi0a/dUuDUBcZQcSkEBNKVjrrIMMg2i7ni/mq2sqxfeWmhKDJkjNh1sYXXnSnz/bnq9077/THp0L/tD19Su/spl4++rYfXAVLiyPMBE5tQb/fdTETW66q9lufqV95IdufY3BB4KS6swKJQ1n2F1hjlhNhCJVR6m4fpa8/R4sgosaRURMjZz0DvUyLzGZeYunOPTTe2lge79HOmO7OByW0TsvHLy4UU7USW0Ix8JkRC5gk80LSNbOj5Wy26loGUHXOenIeyQoCOUPekxWyRoWZCK1VIkACNIasoooIApIriDyRRInM7zpMMwJFATZIJjOZt4O8KG3hXE5qFANLAlAyJJQEJGLyxhlnmhA7lqTABAzREBNGVsOoUVJItSW0xrdRUK1AUmUFYZWu61JkA0aQbH/YP315/fSVMjOZl0VbF0im50zPBQHns9L0ZrmhhhEoqTbCfuhrqApCcLI1Gsz3py0nsLbsO3IU2y7cqrMqjAoPy0QDN0+QYU4OMmtCHQic6+cgftJspc8+V/3eZ5vS9aI307kYL9qhaL9XQMKqbYsPfxifeBImPhzHImC5e2AOWrd8ve5wsjaQWzfiW/fWPvETy9FmO1zn1T23O7OzhWEDqYFmUcQerp/m/ijkhds6FZcHrmnN2gZsrTuf2RO0MkNarMj3w9oD7vIx3Dm0RyuYzrStxViMAt2KAMFmwAmsIZNB22jXImVgInSJ0ABZSMLSoEPnCgSjdYMMwAzMrpelE4bw7rxbHOQPld5kuAw6P8wGvbRcIXirBhKbLJMQBQSzvnJrKUIToG6pa9BAOcm6vl27dJbWRg9+5KFrr7/2zOdeWNQ3m9QWBS2rEAHbOhFj0+rx0WrUdM4717ZmeQdyE80GqkMcT07tTK/vpcMgkjJruy5sOForHEUNVTt9Y4oDW01re+pByNYpZ5eV0nRZKnPjQ6iDJIRk8yx0SUBRraWCbGldzmCQjCAlBDZAyhY7NNSpeGhjWjWSVFIUkJMQMnu/L7h0YXuxqFLkZhmuPPzE3sGtva47aKIiFIoj5zeHZZZkaIxCYjQmy2dNUwWeS5eX3udFaBMw5D5b6/ktn5UWj5fzY27aQfH8otlTNtEYh0q0XDaDzaEzBkw8CN2t26udnullABlVkQtD673yMGnH0gmsQhIRRjgCaWuYKOwYcpaENYCiAKEgaSWpDaCGT2W+REIBBlWU1ISUIOTu5mENSZrAolLkGafEwRkyrHi45K4REbJlrhLn1QoIVquQyKB1mBMxlQB1E6wx3tq8yKtmnuXm9OnR6fODxWx1Zm3N2qzAie2uFwJl1qt8O797nPWLrCgCK9oTlBq9O5NH1RMvLoEqEILISWdGpAhIzlGWdV1yxoXAUfXc5gPz4P+7n/2UKYa/+uLxn/nehx8AvfXOm8WWq+t2OpVy4PqPn1kremvd8sYXnqlvvJyTvbHZfHb37VkOieL60DdLOD5eMkLpsqxwqWEOwVjqFxmUeQwdGlh2oWnDlUFZepdi3NtfPXhmIx4tbk6nxpuiLKXrIrCggPCf+IGPfOnZL2HJjiD3RhWbpmMO0y50ojYHTxmDatIkftjPjxezgiiyUcHMWUv5LDUJJAXYLAqMIdYLTdmlrYtdTVWnk76FjGZRyyybz6udU1vXbt7wzQI9drFpVnWf7Nao/8a9u1tdODtZY9K7h4u2TaNxobl/497hzqDsFy4lOUqtLtyOc9ZoSCnWnSNSR53q/tHyD0lFvZ5r6rSq+fbedFLka3mGUSLEjWGvxnCvXW1kI2LTBQqhAwLl0I8hm1eXr2zv3cIub/vrIcdZ6tO5hy7k3YzYTLtF3w3XL5yWe9OCJbeEjAJoDSFrxwrmBNBIKACoFowkBVRBECJEWesVB7feKafT6t5xJi4xmZNphsHNWR1+6TeMrhyiijM2S7GVZPlgirvPHH/rK11kSznYTDEpSVFQ6dy1Zz7/1LU/s76ehZD2rx88ZDLDiawxiE5ivd98x4/9ra7oX/+FXzh641WHGjpFIhESpSG4g1/5dXXVCIosX/dZztbM1jf7f/r7j67eLj/97NgDMSem1DEomNwhAIhR4wjwpPgjNc5NBLoCM+0WXE2btusPhmAsCbr1fusGw0curMxycRjKfBtB7Obmw8vKfu1Xp9/8WuGZBy5H6vbnejrv+jtgcpF96ir0GIAxKYv4FFiD9R6iglfrGNSmsGQiIuep69rAhjDrCYASJUlJxBCoJlRBx1VkNmq4WRuUTdN4tNFAF6JxhgQxSd2G4Xd/F599YLlage2v/eifjy+/XP/Op8cEQKogwqwOERDBADCriEUATCKOAEQgMzA06jod9Fye19eOvceRSPvqtYcf/7a7u1/IUft9VyLkhcP1wd7+LSLY2VpPkS5tD7/1xlVXWiFOKWYDmznHFSuA8zlA0pg4ahejJiCLatShZVQKLkZuSL13LOzQGEPvDg/+IJ5YERAU9AT1e+It+gP3kKKi6n07JOAJZgKB7kOLTwxJeNKonzj2EisqQNcsb1y/sP30Ss1rdf2D3/WJ1w7u4eqwwWFwZdUswDiIapA8Brtapmd+repZCDJcGkCviIIJEEOnEtmQYehQ1IUb8tL/PiHdAGPAs6bCU1pMta47rqwJePu5/Rtvjj74Xb2dR0ro+f03wttfsHv7rpyIcAqVYkJjDTkNHbCKaU1eErB0Aaz1vTJYjV20qsY7UGU19oEn7cZ5RuggsAbdeXTpT63ZhcskNbFwhV11oNWGsfLql8K9N7vxuH/5ieR6dO6S9AZs2FJuMwdokIgTI+YnfhuyUsSjdLBs7u0PTMFd3YkO1jbx5q3O+fEnPtE98CjbQhNLb7v3HRd1dufoV6t+sHF6WM2rMitFgaBkYAoNGAHIXDliNYYslWWyJVx+uikGFqlIXXPt1bS3X14863MvFCNLV27ie7/HPPsFahXi3MzvDkIlXQK0bESsJhQiDDFZNRjRDPxo2G+uv168k/mDuQvABjpQNKSqhgFZCYkLOh5SOL3uL10xKVv9xu/mFRgPQciz9Bl2rt579d/8zfwH/mrxxPewAMzjMKa3vvY7v/mbvzSYrLWd1ketFXAEzlBK0KQ0jxASWwIRuS9dCqsIIYIwksmcVVVQNgQnwK3ErKrCqgKsgKiBRQEYFO8rRZBCHORZmzqKrYsBQkDMChmcu/iBcOaxFKMiIpZ1F7PhYLS9XpaTo9mR6Q2MxaiLenngbdja2izdWm80CKEydZPl5RG38xD6XXtq83y7uvfYI2dPPT2a7++uaohZvv3Yew5msR6f19NPu2zU3tvdv92cOn52LS/SvcO2+a11aGN1RK1kZsQKTXWUyHMyqW2NM8Xmhg+NE0bwdHGte8+V+d3dwf5Rmu+WmXfWzK483vz4T99pr23M7p1tgz965/BXPxXL/qnNEZVZf2f9yLrjxf7WeDvu3TF2kA+3Yn3kCHNa08zh2trx9Zd7p84nseXF05ianenu6lf+afzUv9omZ/P+6MFvg+5oNftac+NwcuacxZwSarNWF+8dX7lU7r15eON6X4kGExbQxADqvLOKkSMoOJ+zRiRrFNB50AQRqvlsujpiWQ42dzK/kVQkRWNNbsum4Ri7rp55C8P+oAshciJTOD9MiYFg0BvFrm3qRQitc9aIEDjSBOCXdRzz8vgzv2OHfEA7twcf7a9/8PxFt+669tf/32vdTA6mrui3VVAAjg0bj8qgQKI7q9uDl3/t4PVrzec+d/44tcFnw8GxNld+4BNX33lhx/KPbcq//5m/8eAn3n/zlW8tgsuyfK9Jt28ebp6xw/Gm9ymYWuQo2Glwh/3TAoPh6lasGlYnWuB8Vl06Owx13CyyLB+neddz9vj46NbdA1f0j+eBc98vdh679PT121+Rut4a51E19/22q42xdQyhS56cJqGouUXs+O7dGadsr52GLkWQpo7FWrneGzdNdGUeA+fOTA9mZGzewy7ct1uf0KMVAQkxqQicOHruzz71vgrMqioJlIQgMosmBAHRkxxMlYSgQgZEEQmBRVgVLBIIKwIq00lcuJKqIoJ9NzNYWAENAyQUECS4byAEMqiASqIR1ACZkw05FAYVlYiOMssuJEOqShyxHPS8zzdPbe9o2du7W8zeqW9d37ryXrr9dgzMt2/Fz/62+/ifTE99OH7b0/r8swRjJVDEJoZCEIIikBgDQBxBOZADeOQSXjhtuiocHVvj8rys2oAaIUmlsdzeLp9+Kj7zLYkVK6SYPBiOCaxRTOQQJKUUKLfgjVgkSBCYl127NZFTltpE8+fgrbddXa32lzI5Nc3BAea5icdTAWTpwBtCJOOVRFgQogpzGyyiB5dUUkyAyVlHSCiMXSqcLm1KFx+krQf5+AgPZpYMWUSi3NrMmELM8mBRfe2rw37mn/i4f+jsvDroj/vNjdcIgWXVTly+1mdSIkv1NDz/he4rv+WO6iLaIIksSVIxaizobE9nuzjoBexsnpl+Do6yqqmuvmOFUmIGBEcWNXDaeOBC5xyoM0Wprmj3F229X0x6dn8Jor3tSZtlTbvob5wGdMJSrSp0WWyrrp19/cu/c/PtV6vVHCBlzhFYEkARSQ2QUwECyQwBGTToPQmpGIio1iADowiTqGjmMpSkqCBM91FFAAYQNXfkjcvIjvv9ifU9VCMiJzuaSgadNRmqWAQ0SJgARMkkVVZ1RLkhQxA5CFKIYK1LBrquVq9ADgETs8bELAoQY0rCLCAoBLoKkTlims52r/ZOPYS+wP5aMdkoUqrmlSXIDbagSkQGU+wc+uFwsDrYzzHbmy6sSAgMQNZqbi23rUvgrIUmmpGpvZTvPeOg6K4eyKIx6jyRrJazl16nmA23TruD/TL3dlBWCNDr27ZLVe2ADUJnzYU/818syGjL5Wjinni/fuO3cHGUZxad4SKLqsX8TvjV/00f+Gj/u//U0p8Km2u93Tu63HcY4ehOePOt/NSF/tYD4cGn51oM1s8sdhfjpz7QJpa2cxlxm4yhrGdXR0tPK1pO4dqrig5F0iKZrVOGMrA5cFJmtaInQ9kCKSuksEAgQdiATTFxdKMhSKdtiy5jRHXWEoEYCIljbcsBjJy1YwgJmoXJnQjT5khDEE1GIybVZoXSgTFAALGVkGITeb6gUOHqeHnrdR3YwXD0wm9fH+7099vpj/yVH/76p37r68+8uaztirUWkyuM+s6twbnz2/Wt2/m509zM42IvywKtOeY+kR2e2VkWu1lJx7uprRlE8oKcgWKQtU138PzLfuLXn7yI/TWX2cH25N4sZhbPnpocvGNCy0a0WXUZl0RZFOPyksxAKCOTOWON81HBIqpBkOQooQGTQlSDkqIpjI3SBBFGPMkdAQA4PF6EEJXDY9vbkJZVrJOoGlCRwuS4aDfGPavJRclLH2KKSn10bKWw1qCaZdjKi7VxkbH2fG4BjaNZ02Q9WrO9u3urGcvu8XJ9rdyc9Bw4Td2siQVHQpMAolLhzLIOpCbPqIkRLbVdCAJF7lFlMDbLoLOEd2JyPjuFsZ+ZJCBggnIlUV0+wyQiy9XqTJFvFYZjzI3Petms6T5/89YRQlHatg6sHDUORr1ekR/Om8AcFo23djDIuq5p246ddc6UeV6tUtNJbDpJjKreu8gamJu9OaqK0at3j//NJ3/lhz76I1ceOqt1rKbTdNiMTe46DXU9Hg1z7B/PZ26SCbMxVkFB78c2nvhr5f4GICqQngSFigISqFqDkuLauDy1MwzT1Q9/4FJOqdfPgqSDvYM7b+jZzcnTVx7ffOjRu3v3Tp/fUoVYH5ON1Wo5PHNuUPVfevH5T33l1Rc0RY/QtiuJsROrWFgqDBkWMbi9Nnzn7r3hcA3IziHElBwZcNZxvLhdNBVVrF0bTwEdt7DklAysD/I2cSuSmuaVl2++drB//vxmagLF2Arfm3exDa7MCel4vrh86oxLJorU1apeMSKgtcZS7DgmdgXZiItlk4/6JrFNvAyh189DF7cmJWoTutpmPRYJoFvra4bw8O5uWLXRmAPWsAqne6ZpmgfWxyQsoT6TmcF4cPtwhULLOrUDOpitVKHpmgg4D7HlcGZzZAHAEBqYLaqjxWqyNoZ7u/+HVAQI/YGrm7iIKXDb9660SArkxFtzxgz7YHqZs0o349IPyr7PJthNxF5/9s3/7Md7w15v+foXn/vX/zS+cfhDP/PLn/lH//2jP/onnvrBb39tsTzXX9v9G38nPvM1m6I1GFUxiTdoDSZJJNaSSRLUqVqL0eeOWIAD56hGdfncC+bTv/bR7R29dkDGITpWAOOMuoIMKjBKazCgYZufWDsy7yWEnsuSMw0xlRkocJCO43h2/PJP/5XRhx6xG2ff16NOFJCU0aBGaDnHV/be2f6u75APv2f+yrfOjArpWiWT9wfaBEt2OOijlgS+qYMoVDme/Ws/evehyfYTZ7bq+Z3f+vog62c+D7GhBKnrkJjIkCu0boGjZejahnwPjabUOAK2WVY6Fg4x9tfWZ2z8zrkGnEpTbJyONrfW+LX+di8evPp7ZmsAqGTRrw01iqoJVSqKxJBMigbRkgd1vg2mbQmBKAnqrKmLDdd2gRJkWda1UVJyxUCCWpNLbD0YQivSGrWmzEJYzHB1vEaS94ezRBrKnELXamST+9gGK2SVTWbmSTO2PrT9/hi6/PJTT81/8zfB2RNevQCCJRF1YADVAp5EhuHJngu3YdCDj7zP/+k/7oZ5hSn/jc91n/qsOWbYa5749j/3+1/8cs/Gat6snTkdkCLR/nR1XKcO9fDo+MOPvUcK/61b7xAyKWVZPq8aJXaSECFK0wbMChtEnLGxDXlunXVt7aSFqotiwQwhL0hjZyx14X40LBIREKjKSVojwruuyPvi0bvvyrvf7utLigBA94PJVUFUBFQBDCEaYuYTden11189e+G067liVFw7Orjy9FMv7n/pRvC0co+c3oK9d6I47sQR2aCQABo0QKJAHjilnAhECQAEwWIyqYsRmV3niYiMZ7SWEFkIjAFWSWIARSZxTs/9ru1/XSNnllzdQT5swBqfG+tkdWyNS21rndXcCnXoPUQ1ZZ9DIGecR1qBsioLeqyzZB//9rptjWmL4QSKrKmG7fmPpRsvhdhpZOcKaBvlFvMMY5vVJSRbLd+xT7+PB2uAIspysmTBkezJ/+5SaE2IsHe3uP0Kz3fl5nVf9kLdWhCtj1VjyLdx/Ty6QWoaBLGTMjSR7cbo8Q+mayvKpDAJVzMXZ7w4sqMta6K2jUqBeU6aoCPNMGydo9PnbZ6bEHhxj6saBzuJCstMKlLP7XDbXnmfYgFvv949//ks1lGDOmA0hACEmlgYnXUMETMSbGQV8yqCmqaF6OGEVgUARh3F6Eq3K5Le99Toz/zZxdYVtLlJ1cbO+Po/+8WsIjF2uRQiWJv02oPlnU/+43U4HJ1+b7WoPvnLv3Ln2juQj6tkk6QiszaBQTQIXeJFG1sxCCAqKnzyWt3nagEwK4EmONmbvM/BB1LUE3cDgCohIYIxFEFBFAxCCwDQsza3lkQip+7VL3B7qzhzoT/cTFUvRISsLHo9n/V9UjRmVXd1mhXDgpa6vHur3n0zrg4Ny1gQk3aLuovV0GG1e512Z5tZdvTyF4qLT0wGp7LqmBJPemPOBhCb7lu/Vy4h0rhae222tvnA2iNbpt8fTzgIunq918u76VG7amu7SqnseZf1TOBSKUjEjDJvvCtlPs2G5EfmMEzDg4+mUZo4Tgf37Kw7vPmmq4/PnF0bDWv48jdufOV342Q0+eDHwM6KtPvi1Vcu/eX/uj1s9n79F4rnvrXetW2k7ngxfs/5/LFzx+Tf7E/O/fCPr6z2VrD45X9fzpd5wLiE/sYa5pIaqfavJ2n58feWa9vhxkG6/qpoPfyeH1o9/dCqTd2Mqnu3B9tb1BtkpRdOysqxBUVVBoWUBElZWQhUQSODMILmw1Fcab+weW+t6xLmqiqMGjV1XZ35wKkNXWq7qWiZ+VETpiKJIA+BUuhiOFKVdmWcgvM9Mh0a4cnZSLBGdPPw9vpf/afGXmp+6xfr13+zn6X8aArgzNqpdj5PZUmA3LaJNXPGhIQxne1k/2/8v87ZrOeGXIHtj6Tn3WT4mTdfWr90ZvTOl1/4/b0Hss1v/tLzp0bGY5qu81HQm3tH6+NiZZaNwaPmuHXRZUu/A4u5Hs2gMtGW6aELw7Wt8uPve+zw1gHX+Nznrw1KM1S7vLfaOLd15dITbx/cW8tHg60H33Phgbdf+OLh4V3rfFd10dq1gcvAzDEVm3R5tHl4p05OrQ+TQRklDnEQQSJDEptiHPdG3YoXbSCCQeHmy9oZZcZeXwfjYmzKm68cAQBagwJwQhkzcJJFdgLcJFUBPaFJv5uTKSqiqCoswgjASoiAVsgQgui7l999DUoVQQVQOIGigBKwJSJlFDUnKGwAASFVImAFVsET26CoAcCTWxtANRpQAFEVBFUySTTGjow3KCExROMoL/o961wu5vabr9Pd6yM01WTE9eD45lSlF185KB/cjxcP9fyjprcOTaTEgRW9AwsKwApJGVnQWKS03Bz6j337tVv7IyfDtVNMeawYuxCXbT7IwGhbrYq2jdNpVmnhbVBjySbLyXEShg4LYz2RNBx5YQclQISY2pe/Thcfnk42+8P1sY/dM7/lUijLbHnm4s4HPt7+6v8vr+eUGGzBZGNIVsFYwykpa6g6nzlrXNO0yMrMxhtCIIOkqm0kluCy4Uf/2OzU5e3z6/rMr5nEhghYhdVZmi+Ow/yG/2N/6sL7r0x/+zM3nn2uPLxTGsezVfzCp7s3Xis/9L7sqQ+uALTIbBvgzefS898YTDnFjBB85pvEZIyCAFhzZze++DI8+kTq2QWH4caohS6X+ez6O0U0aG0iIgAg8Vu9MBqJCCBno4E086DINcc29jIXl91g+7TbON0EKBElCrioSu2qeeutl7/5ytdv3Lkl0palr8MJFIsFULouEyZIBgyoOoNiVUCctYU11lCeOSEBFVUmQGcJpXFolVzXtuldhynlBAAs0WTZIO+N8n7f5oVCBqrSKRFHUUYAr9CyiiHITAIAkRQ1JlGLYqxBRGMNADFiAg6Js6IUUVQBTaSoCEQn1G1EAJTEjFzXd9751pFr1ncea4/HaxcfSShYnLI4bqc3PIBhzlCINAgAKCE5htTEUdkj8d10FttV5kYmqk2p6+pB6blpSVRbWULpP/7dWz/904qiv//Fxb/453kSm2Kog+1MSm3bdRo08zZMpy4b2fEwHE1FNYVWFWUyhsE2uMKPR0omrTK7Pqhvvp1MPvyRPzHzHkeT/O1n0+tv9Rdfq176Fu2cLi9eMmYOixXsHwDk+dlz0AbYvWqO9jLu+fc9nV14eNGEPLGGRsu+I4Ljg0CgvRIlg9tH8ZUbbnsbNkdoPBhLKUFTQ8Noc0OaYlRhJIc2t9ZB25KzUDgg1VYhK6GLcDyF/hrH6AZrYKyu5hyizUtFxH5mM1y+/Xbh+5CXgllaLgSMxs4qQxK0CHlOXaSoMSXbc6pBi+Qno2Z/b3b9WO5kx3SPUG68ebxQ/tarN7/jR3/wma+95nO7ofm9RTKIVqXfyxbz+drFArsKYLpa3MMRZWvnOnXgvBv2Qauu82W/nLWNGmzJcRPPFcXGmc2jW3earps8YYanNluO3fEUJPNcgSw0BJsECYMA2ihkmAyAF5M53wPrgYx6Z5FOAotRoiURBDAexTKIaWuEliBDCSoE7yqmC1ZyRlfxwrm8WhwgysYgbzmg4sDS5fHW8d3jQe7Z27Zhb0xs0iAz/TJvQirIcidrWa/0gIosXC/a3qDXRt2dV8zN2cwOC2u8zUTne0unAIRN0w7W106tr794/W4nrp1W6GybuCZlwaoOxuXctl6lb3UrN7Z0d2ahzvOEis45EABUdF0CGyNCHDlqMOxVcnvRPFindWO2N7I7TfPi0dERYt2JBY2BFYQSRA/zVW0tluPB8fGShTlEZWWAXi+3CsfHTcuQ9zxKiiHlpfVllisuF11UAQafCXT03CtXt8uXPvDYo5nFVRf9cFwfhHq1v3FxvL979/Rg53ClaNyJrRaRQJXwZPIOCkpoFEUAEJTQgGoSRkQVTGrairfXJ9dnx9Oj0Mn8tz75UrOoHri0ff7Sxvd/7MHRzPLdVXtrV1fLgd1xXB/v3by3vzj/yFOjtbJa7e7Ges/q6QubL71823bsyAVVmxkVjYGrpiGkvdVi2B8dzpbW23m9BDRlkbdB9o5Wu3em6+vlsPDHi+XZtbLXs69X06WwoFGgxWpxef1yr789r98ZNynWiTRzQFW9UtTQhNKbheq9xfG278eWjVovtiw8awptW/qMmdsu5WjGk43jtkWGxSoEZ1vbK1y2aKokEDlh2wZNUU2p7AC8o16ZrYSTNZSKvS7R8erCRr/pZLGsgXSncL1Rv4tBVOfL5UNXtptFNc7zZPCokUUb3HG1PnRdmxBFDR1Pq2kV/5CrCBJGYY1iMh8FbhzX62W2OS45d9Ku+mRJYpMCGtMbDGrHCHa9WpwydKE/Gr3+4qmy+/Q//l9Oz6fv+8m/8Psv7GV/5i/ceOiR/fps/9w4pdV8ttpkOcHOA6Kiicoi6MghqwVNuR4W4oqMaj/EOLBmdZyUyZCM7iz3/td/Pe6VybkghoHQUgeqqfUu06TgTKOw9K5DKjQNRJ1RIg/O1AbcqQ3t5829QzuvoYMSTLl7jJ/98oqcgX5mbJOCsIJhNJyzrj73yTvHV4+utsNs2LQVobDEFOoMHQCwJkV1hm0uNaQ9P9o59UDPLa/+xqfb514cZEVoJEZJTcxyZ10WQgsJjERr0KLlNpT5CChruhUhGePAZ6DJKGvH1SLBkAbT41u/+Omz3/v05uPf1STrm4O9z3/j+S/+p/Usm1x6sJsdGqNNiAbQNMth28HRygEaJag4QYdoyDuLGSGkIF2xHt73XenjDy3bFp99efD6t4YatEXBJKIxrAwwC4OIQwMMqQ1SUvvk+ff//b8X8gH+7nOv/Q9//yGTMjJdjCLiM48M2ERBHp05v9jbp+4Auho63+wdxDZxRLWAlhSBIxtAsABKIIyKYAhCBIDQz/Cv/cXuo9/f9YY1Yxj2J9+L8bOf7LHCq9+YPPbBNuvXTeAQ59MpRHGAmHi5XLapmS2qL33rhbWt9ScfuVjPp/fu7sdFTFG5FWtRSPKiSApqUbqQ2Yw7AoPHxzX6TDri4NRgkyT1wJO1g1zpvghEACe6j5wkDZ9Qq9/dLHt33+z/9IUAgIIKdLLXfJLviqzCetJCKIgSQWRV1HtxuXtweHn9ClBaNcvtzLRledxSm+WPf/uT8OL84NnDDeOSvItBEjAEiKjMzlMnbAlJ4aRrQAVRNdiJYY0kltVkAmJQAAGBAESCkqCKatuhVt67tKqDcuwVOOpRMjZk3NSW1HkP/VHcOlN7W5aFq+e83OsNy7BckbQWauOpm4WkqGcuwOU/yiZ3nNq6MlluGLfOnFZQ5QQAsVphYlROka1TSHOvzD5PHMqiz03KCw/iWLp4onOwuGQSg6Ucbt2Yfv7Xx+trBagkxMwbx9wsoG/cxz5WD86UjfEhmXEJPrcm94GrEBSwN5zM9+5lPa8xRzfoehe4AwNTPVEpucUmcDmBRz8YfB9DbVBMZnoPPy2up+jVICAX5QAJlLC7+DifeyhbK/l3XoyRjaBYSMCIgIiU0LCy07g90nqeHbcYSaTzuWsgGmNQGIM4I8bBUR6Lv/jT8r6Px8G2Z1dgXmGZf99PtJ/6fT48yFHEY5fArZrLa/CoTXd/8eefPf75F2pYjU+3bshZ//B4FgxWYgMzqnK9zKwx3kDHAgosRhHkJHHvvntIWJnhJLP+hLN10uoiqLdEhKAST6L9lAGViMi8q352FBowGfWYwsEtVwp383k2cpNL7CZ+kkHPABJAIsSsN/DDUW9tJL2qTO3bbz3fVxakjY21EKtkzHLWQD0tTbfVM3dfe/viqXO9Lm6d7UdbdO0M2abFqldNi/nMsc+d1Ec3U6F7B1qun039rEvzQur5fO/WfPXq2/MKcs5oFPjp7VMDXkm1cgpCQj1DxianxYUJDMb1+GK987Q1i+Mvfm5HbbT2scun4epvL7/05vzaS5u9wYUf+7NfS6fa9TNf/sf/y8dG5578iR9aLUP5/O/AS89NOMU6RSjMxk7lLNpF6OOZH/krr944Go8mWN97cOcM7H2FFIvcOkN1F/NLl26iG//AT31192BjZ2ekr5zfPXY0DacfOmy79dhOLoy4m/Q2thog4URojMMUOpEYQ4iclAnIKCIYb1wZeIEWspEVBVuOR961TWe6Bp1p6yqk0HGXIGnbWuAsc6lSAhujM6ZkEcZymXr7B3etHJ7evOzLEdUH2pow3XOD7XbayBJp+J587T35116jb/6D9aM3PVioDZQKq2W3W4GzziOgNa6f+pvqMrn9DmnmQc6aXFdJLcbAUsS0XMU2PHDh8umPfHB+9Y1Nkw6if8hAT+Gh913+uZffnJLMTfzGi2+sP+jOXsjBYeDsjVfla2/idFm28/T0k8Pv+ejp6Z1uEcurb4d2IbHiqsOdcrw2nvR7w/lieefW3Ww4XN++1B8Pnv3aZ25de83kmUHTtKk/7JVO7u3fmkP3yKUN6qZ2jNmZ7ccePP3Os68p4/b26M2r+456q4BdTOdOlQe7sx4aRjk+muW5y3v2eFYbiNuTzaI/BngHAATQEMEJHuz+e6X3dSFUFFVEFlVVh8iakiY0qJwQEYBORCWDqqkz1oiCogoyCVgyRCYmRkUAUiJRsAiEYAA9EaIQkAIl0JMbRE7wRCKIoEisgoAAQqCEYJAAT5KFRUVBVCF1sVXpAJGrZIdrA9Mv7RBsxufPrrRpLJ563wfvmbBsadO6gXHkBmubE26SnrvYTd8pSBFQlNq2MZYQSdokHZAXzoAffmxPB6PmekkLkBr75wSzYjICNKZ0IHVM4BcrZlB1nBBVE1Jden5gw50/E45qureb7twtADNjoWmABQz1s7h8+SsXH3ts+pVfTNM9c3xERR981jv/1L1at0qMt/dcuSYug5Q0xhQ74pRCUGed80rEzFlmFQhiEgySkgiSSmYp6+fQ1fDq769f+/zqd6s++cQQESQlsAjC/RIWrz6z+uT/uva+D9qtvH2n0pCqpVIKtNqXVU1XtpvmfVXZw6TmlRfyb3zVHk5RPFkAibGurbMqgqIAiNXSv/xNHKy5K2chz2rRgK3sXneAklQzFBFWThCzrVNxZzuGkACiDdC1bG0+6PfXcjyeJSt+fbhsghvT7OBA3GDUL9p6efv6jb2jmwerarS9vqjjvK6TSFIFVVb0aFkZGEqTWW88JbWm40h5IeTIebQZgiCqSgzSKrnMmFYktS0L/IHROoFm3jl0Jstd3ssot+CIIXVJlAWMMgdm9NagNShoSFWSYJcwUKZOQQCNASLMDLAaBERBOnlSUSGgOFE9qZpE+MS6Z4kYjSKV3pSmRjiGtABh9YXZfoiGO/HeqwYpKwY4bxBiYnXOWaOoEFahDYGdFqcHaW9VeMR5sJFzRavJFzZV0ViMKFsf/Z5dt6WudB/Ix5/6VXz7FgmTI+dMTTHfKWDZpnZFbeDSdKHLmENU62wUxnLASiIQuwQWsT+gT/zwrLu7fvo9s8nOUtY3P/7otJH2m+9kWRycGs4C0eCBe1/+hVMf/244OgbtADy3yWTRxJVdGHjLSQiw2rYPnVY/DHVlDis4ukePXKmrpV0eOQlusgG2n1Y1DnrcNMxqQMAbQFA0mOXovYaojNwIJQZpadADZw0Voa2tMdAfJzRckHLMUpNWu67ohzoiFmE2zzPTe/BMEKOSG5KUpSqkQTaAtuJuASec4cQcpyIxhsrGittZS/1u1CufurJz7sqtL/7+wRs3N8+fni5Xv/17L+w8sfXYd15+8Zk7dpY2SLaGenar7G1qF1tmF48Wk0eenFfHLCmFudgiQfDjQTn23TEPRr7XRUBatqk37MUQ9vduj0dZqy7vD9oYlotpbzAGH66/9s17N17LCwgdti0H1Y4jFlayMiQkb0WRAK05eRCtMxYRyHqRCADoPAcQTYAD5gCQVK2A7bo/gLv7hrnw5XS1mM4WJi9KwjXrKpZbbeqgHY6y3CGhOmdCl6wziSMQorMK2J/0xFAlECM7i1QWxwB3VO/WzArjnqkW/MgDO/vzatW1w57PQNFSI/H6/sG4LEUTeLO+OTw6niVCp7I9KpeR/aBoYgRLjWQecHtYLJXvNqHy3ncxNO1oVLZdt9kf7M3mtrRVgCmry/xsWVNIZrGcciSDo8I50NyCH5VkDBjTdUFF88I2bZNiV+RZ2feLlYQAB4tqs+hhBOFE4L3LmDUJLqvoEEU4zzNlrquuS1BOJofQpp43beyt9Tni8milKTRgR5vD1XJJDslTAiUEVaWTlM//U2IQIqkKICkoEKIxKorGANogtr++/fCg+Nw7X/pPn/7W93z0Q+95a/fyMH//xR06lBvXj3qz7u7q9va5K/O37nSzu8nG3vqVZW33d2+Zupvn9mhgzPFy5F2Zu9IWR/t74Gwvc12IJwO5OgGxlqX3zgTxZIwz5DMTRe/Mmw6035f+oOhUzmyMVhTfmM6PuxRjKoN9cufc57/x1e3NQW5kvD44PGinVe0MdSGVmXGWVnW3fzjPxtjPB+tZfzGdDUyeZ+XRdDpjPli1zruzRY8i9kGHZXEEcU72oOkGxmwYSpJOb4wO5kdk7ELTztagPpoNi8yO6Y39g8gu2DTulWLt3a4DhQ4gcy4ve0lrMHjjqFpYgauHm6XfthTa5DLvkFJMbehOiInHoesMZZn/Q1JRObSqftPa2aqrViE4cxA5BT09MOPcE2HXRGssN7U3vsI0RHrv5sYF4ElWvvHzP3f9V0o4mBqCbPMJP5g8/OPfOy9OhTszs7x79Wf/5tbVl0plVAIDhKAKKEosBixxWgIOf/QTD/3EnySEW7/xlXuf/UK7WHgio+ocZZGsgBdT9LOlcBI0PZNQrUTlwBGrxOV3f9f6D//gLFS3/+0v89VrAxFNyoBpvJVd+mhxatP4b/DLL5Nhg2oA61kypXOlQc0RwBIaK9aZ3Nls0eBzr/TSsPDUtCyoZS8PnBxlk51LB9Ut7EKzqkjZOzeaHt34H/9711tt7afTsL5sGwBC7bIBmjJjNkBlz/SaRZ1MokxTit5x8oHyvopyaK1VaWtUzYq8Jna5GRVm/3j6+Z/7d/XP/cbW5MGtXu/smQfP41kGrKYLamrMyaOPqxVARYo+N12MmBEiYNMZJQXDJ02hanH5LP7pH7h7JhVbE/vRK9O/81qxv/QGQjsjTUSWIOPQEAEoAYHxsBoN3/O3/+aCclqE/uPve/qv//XDf/OzPjS+yECAYwRR9FiarH35zeH3/mgz0aNO/XBCa9tZhkZQCJICGTIGUCCKEKghUgYEA5nTEKvLTw5++D+vfe5XzbBn9piWlcAiDhh2f/9zWz/1/1x7zwdnX/i9AsgZ77Je4AZExyN/b7FghINFO5ej82Z06YHzhP724ayf9equRnV5kbnChrpxZEeD3tD2O+jqukYAElLBwrguCqGtG5YR2sL6zLxrEEIUvE8cOlF6UN+tmlThxGf0B+nHCAqAYokIUVQcGlUQ1Xfxw2gNoaroSYmPgcyLr791/vK3NbHqFvd2b1wver3Dw+P1U6doXF346IMHrxxKpc4YgfvdASIinUQ5qCF2ZESFTnoRVjSoGFUTGQuQTmRHQyhgCAAAWVQBxKom7laLWAzm+Tm4dLF46BE/OgfPf07uvJSt9SxCVuSNXbOPfEe58wDlGbet6Rb13buwd82kfUpvaduSYhJnJ5egG1nqaCC+P5CYdddfXf7WP1xfHhLZCGQdqSTjfVSJVpwPPL/tFRZvf7Ud5mk5a+az8c7ZZMvyyqO8vg0uxVgpuqaeZ9O7rotoS0mLcDwlggwdVW1oj1M/Ayr4xrX9W8+d/vCHaLRFSriYddevmekMCiiRXb9I2udF1BbZ9ikTTSQhEFoxELKePXNZioJAKdVJoh1OwGbSBQZRZjLkDAooO89Y8gMPt7MDjyGj7ISGYATVYA4m1d3izJXeT/3V9upL1b//+WGsrcF4AhdBzoAsCRX2uJ+t/dW/fnDxY6bcsIkdWKO2wKLxZy/84I9c/fo/J5IGWIawjwAN8Vxj0Cj5yhc3O1qkJjUxARh0QeMiBEwJJYkAJzbKSuitSYmBUPSkswRFUAIROcnmI0RVPel0rTOMGIW9JXgXY4REoJB5C7MGALiJBlGcA4Dh0KtUxowhV/SAvSzvl0AWnC9GvWI88qN+agHqFR/f0f1bZ9Z7iLq3d1RVVdutlFDSKtZHBjqXu8uPnrPcjsZptbgucQHLZWxdNjlXFN7NFrw8YKx6opJkdPHh6eajMuSzg/71z3/hrWu3DyPcbROwlDbXZV1Z7KXIXRw+cB7P71QQZFAMHvw+8MX0+mJw9rKfTM6S7pW42hOztUXOHfzir50+teV3vd8s4Eb94EVarM/f/5f/5L3/6W/vXXt78shVvPumrapVtGbc77//MdoaLA4O2nuv6eJOsfnvnzz34dVg4B85K3tn9r4620QnecHN/Gjm6MHzmz/0xw7ZPvzoE67I+9fu5u3h3TvXCi2y2svemxWthsV610TVQFkpqWKNqorkiCU3FhRRQVBTbEHUSgeE6JwEFjVdSjZ3XbM0oaPUWol5zixoUq9ZzmMwBnZEe9Zupk5y76q6btPBuE9c5e3esuWj9vD25taH+/2LxjhqA2Tl4MELa/svr37p/zNJEQc9WCuB8HBeZxcfKY6XvH8NFks/6KVVGvYKgCpA9GihYQBFY2KskmgTsTfIqJHJm29Xv/KL2d58kHTWdQ+u93JI6dXDD3S5xCqO3N4yvPPGdL8u5lO+tfQHt2M9NWXhhpCe3Ji0B3PVbLT54N5cj5Z1P++t3NHaxrYrfMTACGjLhx54WGD50tefuXXnTkSzvjYoqChL8D6EcBSo88bc3luuFUUlqtP6uWffLFpTHdPtOyEfTYIkW+KpyfCB024+Y0DTtDpaz2uOd+aL3qjPAAdHVdir798Fxoogmvt7XwqChMKJUBERSE5OV0FRQwCChIlZVARQlQHFGVJN1pzYTpEQUAVRARKgIRQFAAJFcaBO1aM4RH+STa0oIgnQIkRUUBCkpAaQThz4SASopGrJkCqCojXKJ4KwcUBJI4ZGQTNycbaA9RCrBieG1tcgf7QLzWvTu357q1wfbPgyvHXXDTOaT7vUzdpmzaTQRio9gbKBFbOzxuUZcCTupE52Pstef2316qd1fch+oKf33dntaLa1HCiXkGfSLNObLzsNwpAUea2ffehDvYcf0rOnU7+XacgP7k7/w6/Ht68NgNE6wUiFA6tlddz87ieHzQpSB0S6XAJVhy98avbwe6mrzg43ZbkSYQSiFKHpICVHJrQBLSGROpskh3yQTyYVgN/cyjc23fpgdXwn3rre372VjvdgDjlRkzgKUK5u7BFsSsbnfrC7cr/xTPjc12MbPRuNBOj6/SJWrVut8Btfw9wVDzzCy44/9ym4d5eDdGxM4TRCNh6GVUXGhJQcGWOBj/d9Wc6FPeJsf7/fWw4O96atiBhBIquOoBLwp3b89oOJJOtWbhnqxR4b5F4vH/Rm82ptI793fGRiEAN1tfD5IHALJvi++jTZME9lsZ0dtUeruVoloJQiGCMEmXXeeWOcsNosQ+cNEZnc2NyXfUPOoCKmFBu1nl0WzP+fq/+M0jU97zrR67ru8KQ3Va6d9+7u3TlIaqkVWrKEEw6yDbYPQeAh2IRhiAeYMz6YNecYGGBgYAADNsZmHIUxxrYkS05KrditzrvD7u6dQ+V64xPvcF3zoXbD4dSqD1W1qlatd633ee7nvu7///dTxE0MrbAI3zlJTpJEaaWMJptRkhAfPVlS27iI0YtDF6OSDsATApKABlLM4Jxzjg1ipjExNlVkFWgCLZJr0cASHZPVOgNAlsiRGSjESARZlhKDIbW0vvHk4/fZySvl9JrgYH1RZhsb2fJ6OTpZQcZhVlcVaoWgiDlEFhEScrO6C2r58Q+tf+gj+OYzk2ees2E7SxQ3rvNMCGA4tTGFWYpzh52L2LShB5ggKwV1JTtt2f+rf1l/5/e0l/cOf/YnNhcT1XpBatouITAoaU/PFtP4+ouDD5xtywb7IQCYY+9a/dg/1kme9Ky6tZf2ivTMX5uOjuHXPgn92/KdH9069piqqvbmflouoGfhxPlYeLn2CqqYj0aymIarlwZG4YWXYVmrjQKu7fpbtzQvVspxuPACTB0UNnSlXl7qkAA1t6VCBYBSTmGYo9FRM+R5W4c8s1h56AA8QudVapRNQtdhYhE0JQm6KYS5SQZgc+pKv1gkSS6zGSZR2VRPZm73Znr2BGmjFxU0tSIlqMQLt7UgpSbxB2XT7ouxu9sLtqj7eOX6q1HbYyeO5yeW86r5wEces+e1eas4Pu4V+5DG9uRJu7qc0GqSrA8B7cGty0v35zYmfjxJjzsiZhbbL7JlDWOc3Jr3eymIzNpuMa+HSeLabmW9p73dunGwfu9do42la6+/tn392vb1q9P9Q9DgGQIYz+iCEpeZtEBINStgFf1RMpSOxJREwoxaZZE5ADiR1nvn0q5NvM9DwMCo7Z1N8t7t3eVB/+yxFU2OFTSd67xrnfMkITNv1tPVLA+9tNdFbJqBVSpLWi8N4bQNBdE6hIxs10ZANkwNuxvzdgI+6atywlUXs8xcuHlARh00IQA+sD50i6asOq2AQxCF/WTQNGhQ1d7Flk9uruxVs6gpT5O2DgzBcZQQijzpSF/rGqvTjZXl8XSWpCZG18sTjRgUgGvbtk5ADfq29iE3xJF7hiKItsb72MuTzgUhDAiDftq0oSHXtDDrFlpJYk1iLDCY3MAizKf1QkCnpLQJne9QEHUv0RwUoA8Yi9we7uyMt/ZPrWzsXdtdXllRiHVb20Yho+M4m0+G6apDPIJ/HG28jiK1InLkQ0Pho7wsAAIqjg4lpolGa7KeXaHByXPH9xeLcxv9//WHvyfGeuvqAffT5ZWTSSKULUYrp6U93C73+xtDOn6ibGLv9FJ5EM+deaL99DYdlrFyxerAM7oQQ8fWoSAkKtVakVYzVzFiHQOjMQiu6xDRAexO616mlzW7rqtqrxBXlH10fW0CMJlMnnzHfbPpjk5IALM0r2s/WFl69uY2ZDogtDH6jrROCHVqsyzVMYQs17GLVYQ56+12pqw+KEsEuXe4qrmNwQ97vRSVsXZ/2iwNi8HQ3ty/dXxt6fKkcSjWSEmxXjT9dFCkllsp8nTUG87H01qxYLSZqtq23K8MquCD09Bo3VTRG5V0PMp0MSx6mU1Qs0gkLBvemjSzslEB/7tR0WxRry6vDIrMajuVMgauu3bvMHrXSkE9hSlQYU16YrMJcfeFS3/j7/6Ts9v4jZ//Z3HSUITcmQhZ0c8+/e9+Rr712x/80EO9/Ka9+PlLP/GPimeupl4pSpgjszCJJtICHEWJkNZ07kTxA390eu5Bqyn7vtXa0vx3P7fUHnLkykcPnOqsawIGQEKrbYyRSJRV4ImQYzE680f+h9k77o/sTkGy+5M/HXd3rVbaainH+OLvqet9PZ7rIADatcEowZ7WiQ2hZSFNqEURIHjvPamIcTovEiBxfau5a7mqilRbysCPFUdEJGvbrskSvY6Jev1AGUfZUicRQCeZHM737LrtuD2xdLKbc/TshysHa9xfhaIKUhI2NYVOB/LeMaNgIkZ3zumUuB7fPti7Vc0kWzYND3y7sXxauBVwse40gDgvqLx4rRUoRJW1wALBiyNkTAwyirKRvZaYEC5uX8Wbr129tHXP+5+Qawdq1sQKfGR08W3esiFikM5oigEgTYr77m1Q56Gd7x3OilX9rofkpXvrL325YAQyIIIxAInSOPn876RPvDs88t6211O13/rEb9yFGlCANITIwqQAAYg0BCdGo9ZQd0AKC5XDXM/2KOu3VRm6TEy6+/I12IeCTDPftvXh6fOPvfKV30t7SWq0goxDaF1XdwubcJJJ9ECeJ7cP9Nra+iCZzKCNkmqyilKlQ4gSJZBkRlnEpgtNzb2hbX2DKipL4BGjsIhEtZi50VLx9jToCDh09Pm2Bu2/fXkUigOBt7894lmLEKEmEkABYYHId/6OAAEhRmYWAqIo4/lke685+87NvWtXTPA5+XWC8fOvnPvDT05fO9g8sbH/2iQBQUEFGAUVaoSIGhUpAGyjMgQGWCSigAAqOsJVCEoAEcQjZ3LKwgAiUYAUIJIChFgPi+Q7/7psrDrx0bV5QkZKk2S2KCKodO2s6y9BYqDfa7ROj98V1s7n/l1w+4WwuGkLAgrkQVfb7uv/DI+dC+cfh7ihxjN+6l+lV18AwGB6IQYAQVCBgyCxixy9STIj5dLOpe5Tl9G3SzrjCFH0/PhJ9e4/aO8634Q25BnND9vbWwahGR9Eccaq2AbjGFKE1c2k2ESRGuTYO5+UdOjrBgSavd0869E0qw72NaoYRGe2m75pmqnWJjIH1rgoO9emS0NwNTVz3S1HTGNVu2qudBpjFwOATlmUBh2jRB8iIUhoJ4fNrF0mFRRo0tIxKlAZgm91n/Tjj9/qHR89thJ+79eKW3WikTwYQIpAgbnQN48lJ3/4bx1sPJT0Vpr5LMuS6CPlxoryrc4ffS/3f6prQpaA30x2oq72VTNuU3bdxujquL3cVEVGEDyRdq7rWte1nUFMkAgIIRIgR/DCAEcQlKPjmDvvUGAABAIUjgQIyMpQphEVWWsJJaBKjQ4hMIgg8NtSzCTTltJ506oecFQxWIDcJIPeygrnA5sabbUyyuSZFkiqSm/d2P/6J+DmdaNGNh/VxlkS7xoArhYLRDZZn8ACx82NlbbdS3oasNe2nUlXsjhoytliUWYhyQcntdYqTsWNQz2ZHW5Tcmqvo9ev709844HOrQytUBW6BFRZHqwvLWdLK/Vgk0/cC6NiTryf5tHCPe86Xt94I7/+ypXPfu7U8tpkacM+/sTNw93eseFs0d3a6k5DwE99sqwPj/0//8zW4Bx/15+cvHm1UGub7ygOXr10bWf/xL3nYZT56a2epgSPDVfOw1cvh72nalXG++6t3vun6j/+/775+d8ZLK6mmyP92BPZ+feF1y7cxdf3X3tW9wbpYQUp43ue1JvrQ7MAmLjpxJ64uwPW1pJm79vgaqRcQocuJNZgkPJgW6hmDBFV0uu7iKCN0gkmKUffNQ3E2FULcJ1ClxvevrXXVIEBx9PbK8fuJ3vkfw+umVX1lAxP9nYHUEQ1O9y6eWL9PqVSDl63jfGtLmfTL79RZGbw7idgOnV7u9NaHb7jw+0H3qdDceqzn0inWwlEMKyXDIwvdG7fQOa4p7UVzwwebQQl3VJ+leTx935Lb//W7M1rXdOtLGexxr1m/MhHPgTFfeHpZ2+8+NXOxVjAbK138aK5eblrPWsA5aWtyn4KVy8cHLur3zh/9Wtfy3tpoeObL76+sbR8uH0rgbXSR89xNMqkm1669vJ4f9+mRuXFyuhEvdi5vbgxSqEoitFqX2cUKr/St0XnsyU7K5u1peFuOXddXBot7Y8PNvJiSePNG4eoFIlOLB3bPHVj7xbEbrNvwqwrlFGD5E0AAOCj9pmARGGOR9miI1pdEAZgZDnqzDAAMwPwkXAQSQMCoyCKJtB0lEACfUS3RlQIiKA1sQQQJEXIkAAmpI5SQkBIqDjwnRVFQAMwKkASACGtUCESgRitNeIR6dpoBTEAIgdBABCOLCSiCEIQS32djlQxKDKVr6/WswUhRKNcTKYqGd5zSieEoWlM3v8Tf9T90q/Ai69iF4Mgnb+7OHtf++qrcLAjLIDKqDC7dTVbPpGs3FOOb+okgt2uYaGrCfZXKc0VdebiG/MvfKVfErKU6Pvf/h750IfiYFDOZ+zHRV5M8yL9/o+2n/9s+8JLGXhSFD2DQiGxVksblTbROySUIEM/Gy2xP3UMxMC8jmUdWYCjiaKEY4ipUSFN3fpm/u4P6pMPcb7U6jjaXILUxKaexqgT1YvjnV/7mZUd6N7aZYdgtA/MXsBwmpAmxUF0kg7IuLLODXboVZbUTVM1SsVYZAne3C4//nFcHrHXpvOA0iqmh+5P7j3trt6kKzdjWRJ6NBAgUmCudvPDa8z9ePPN/u71kRxUL1zSYmKiag5WgaCwBbO22szGNMixmbSH265sQzZKltek1xOrnVIuU/21tMQ6GxSmp5EcQ20SAfS8KA/2DuazynsJHBBEEWkD2qg0sVYbA4a0QqtCjEiaVJrbXqIzrXSEgKBQWBSKNgKxdV6CAwGj7tgwBYTo6BRfAkUfQ8dOsWbPgULHnn3w4ipuWqzIgpIAMcSoOCJFtEi5JiU+URZFLIpFNKQINKFCBOGIAsCeQJBIWwMQQxMzVOuDwcPnzpzo8aKc3Li5pWmEbdktXIyS9FZK0gLad74YFPv7HSBEEQDpfDCgp6Ie++if6s6e5dXVLCC1n19dTcdv7jQL7wkTEYK4OurB1tV47Y1880yu2o76IhQiJql2bdt/3/dMlx5pn8yzY6fkta+F//gfMq3cVrAYEkMAkjaL6nd+Q8+cfegh+/BDVdBO8hKyQZaTVLx3uYFTPt3Mv+vPuv0byfMvjC493UsGTf1Ws7uf6sxPDtvlAGcf0PVO5ufQz9GJVQBlBc+9QrqE40uwiAYM/M5X9HxPuw5swbWHxLhyoWwKAtg0wAKYSASK5NlrnfuIOiuCbwwKZAkAgFagHVCrCwIUnjjMhhCdNLVIglSI1pEZ0xQWE1jUOusQJRn0pJorSwSKuUGTiNJk0yAo1gg4T2NOEPJ+oZvEhqaeFSu97L6Vr3zj0ovXLvbu3xzkrTQ1LGZnhtl6PxuM6Nhj/talay2o1ZXVrvT5cC20XpFW1GoKvq1AWMByYpMlmxyi1qSYJSNgHVqOjISqN0zSpV6YlG997blqf3LxK18vlvoK4eZurXuFU0nt2CQDzPticlKWVAKgAUhQBRZrDJFCFEEVhT1g5cLCSReo7cAH7YPxwhHI3dEAwlKeHBtkPc3X9w4ax5WPLTAwlDFwxqTp9nzhFRzLtHBg1G3lpqFLFDDA0BZZaiFCU3cKpWWphavYVj7onu31srZqmNR43prMaKu8Eq9IBJeKDDliRr1R4RpetKUWdIDDYVHVdWbQowwKlQgsXOeFl3pp9NFGBq0myCMFS8u9qq6JcGnQq2rnurafkvOxb3Cln1zZbxUTEc0XbWGTrmFmnsUGBaq6SYt0elgC0WDYm81rjOw9Z9aw9/tlM+j3IkdE5UI0xkTkKHHQ65VV2zXee8/MiVFSlnkPb71yqX8ubJw6Jl0Xm7rIsoQMxziPITmxGo3cIXrgnRkRHFFK+Ch1iCCCR24JAYkRiUIXm6rpFVmfBjee/8bmQL11sPPMC8/df+zEcKW3cWp13prB2qk4m0e72N16wXYVNa66ycc2sgOu7frS1y586b88/Y02pcx5Jt6azrU1WiljIERnTeI6D8yKGQW896jUyqAHJPvjrm0ja7VTeztrk4HJjRmlmULOCBKNFpTW0Lf06nzWJpoZnEcjZn9/f5SaiXdvBwhAeb+W6ZNr+e3JYlb5jWEfgprXbfBSdUwx6FTv1GWh080kwQgcUae2bjuVqalr2Cib5yA2i1JgXBzOFMYiUU3ZGq+z0OZ5VtdlYfXU+0nl+n2TMLd110tz14ZF2ZoiN0o3XQu9dG2YZ71MK51oU1YtgiDRpHYhxNSk/92oqJel9aJDFzXJyjCLPsi8bFHGsw4bHCSUsrvvnoEOrPYnj47WblzeWTvxQbe23u5dNb3UjjKthzGV5vbr/Zeo/Rysr9Lz//ynR292hU8k0V3wuTYxOAYWBhbQiljEa1r+pg+GzXNsEzFkj62d/qEfvDa51f76FyUSk4qCidIGFLBoRUyY9QaL2QwJhaFzkm6u0OaGzxEs9r/7PWb3rd2f+MVEaXSoYtRVhfuzghHQoko8GSAKwTWu6SVZ40sA38/7zZyT3HhPJCqxSsS70FGEhIwjqAB0Ty3GY4UoIh2LGgwaH/uiVrLVZlGiSRbltIucr6XDe9biQ6fdVjt/7lqK1ASR9z72wI/+sS5xSyGtP3tx7xOfU5PbuQFttVa67ViZQqFF4igu6euN8+cCh3XbSyHDQXQ81xh144m12CRqpUMXGMmkJhmE6oC7FgOIciY1BEZEcdmolESAbk+bn//MB779fv7iM/DmzSQiixbHibIhdNo7QBFECcygggOHanDfA13X0fwwCWRPrsS8WBv9Tze//tWe0hhAW4MSITEg7WpTXvvxH9v8i3956X3vhxeeO7VzTQcBUiEqYxRChCgQGRIE0hIEMEKiAQBcl7x5cfwXfih78kP04IPNieODk/fMLl7MHKTEXPvxi7/7yLs+8CI2hvM0hjSGynfBNaFxed8uFZrbViEx45Xbu8dW+8fW8+sHTT4wEsmkunMBFXjvwOh5N66ic1bSpSE2PJ1PQXxWpNHxoLCL0Lm5q982oB0hKO7Mr+HIV3NEIJI7WaM7syQ56hfAEQGGkPDOhzAw3wFRHEGZmAWPYh1H2w4PX/nqJ/N7/qg9tR79fH7pcmjR1NmXfv6le1aG431q/ZEkDqzmwGJA/zdcNhOEAIY8cYoAIsgiR1pJYCIRYRINICwORVCYogBrEfJR93oGRoArS6GXRca6mrCB/t2P2bse9hefRvE1YZJlsWttstnv9aPJTC+Jrui2+qZpHTKCEUyxo8Wr3+if/4CM7hGiuHdx9sqn16zBkKJKDIhSIBLRd1qnCqxvOrDgJIio3FoWJO+icJL1Yfe6/P7HcWVFz8f5iZO12FGy5mHVXb3cGwyarlFamKPSfXXq7uqta8C3KBnZe9/j88KHSIldevie3Rd/dZjE2BLYwjdlF70qVrTosKhCy5GSnBNdO8RKykuLpz5lHv2wPnU2aDa9PhISoEpsiECklbaCUVktQWmWydc+22dSEpgIQ8c9Va9TvjbQuzvQeVgbwsqK0Kx/72m+cRi7wEGRRWIOKRx88EODP/kjU1NEB366lRbLoDPlA3eNAHKSqOVzkdK0a3oVJF5fmYTxYXXv8cE733Pfj/z66wfYU8jzKsTIpKNzsXNBgUiUgIIQYmRA9DHwHSAuovCRNOKOrE8LKHV0CAxKkTY20SSQGFIaFCEGhuDAe4kRBTDe2R6QBI04SrIups086Z+6R2/ek64sYW8koKMwuRYU9DGxO7en3/ice+6N1abpMS7CTjVse8eWWi2Y6256oMUr4hRsEIghTib7sZvNDg/6w0oVq2CXZX4w6HZkuh9LCaYnS2aRFrQxMCsb59dXy27vNz71bAdueXVlQ7yb1THKxtLydH9WuXKe93RPuoHlNBuo3qBn2o0zPrcHly81n3lJb99eKvT1m1d773wCTt2N9x6XDz9247BbPXZGPX/Zffrj+SRe/Ae/OPr//oT9A+9+50ftzR/7q+VWsx/hoZ/8l7y8LKCnz33e71bF7suwvad92ezDsh1O9y7fXinz7/ghe+we/I1/oc8m5tFzFBeLT31aVTdGSpscWlcfdl3vBz5UWzefVqt5j/rDZPMEEAlzjEKgtemppK9ReHZ4+9UL48Pb7awejdbWz51lKct6C5Qhm9nRurKpC5yiFg/lwpWL5ublGxCoKxWprL+6oUGqOulZxaJE/GI2VZZd51PddzPY2ZqefeTbBsuDpp5zbEw7lcbn2GjraeNYWyzZmZ/Sy+oj39d75L2jYWFevZRfetZWc7DCUaabq/abPhjffCm5+Jq1BEwRA6bQll2ZLelv/pEnPvyHrv7cPxxdu8TRiU1nZdeMfbbaf2H32v7pbOlPP/mxd35s9sxv/fw//5S7UVZSaEc2CqKQEhFoIly+XZWOH7r3/BrN3v+OR776zJdWeplvuqbofMQEEY3VGb3+xoXtnT1WhD1aXss0Vhrb5YLW1oa7h/MIpp8OurLbn81zcn3oVtP+kw+e/IUXnlKSbO1PcmuIQ1vjrOUKWFOZF9mt7UtLo0z1e2eOb+zHvb2d3XMPnju6Co4iecgAAsJIR2cIzIJyZBlEAQUSmQEJARhBABSiACsSIpUQG2AFIkQCQsR41Bm+s5iI0qBENAkRaECFQACkVDgqr2mCKCIUEZWgCFltWCSSQgAtQgAJgSbQpBWgElRGIaoQGEDaGAMoYahCHOi0f+x8b3PFpwy+ChK1LuaH24Oe1iaZLw7iYppnpzHIfsv0rvf0smH8e39fNyUXBT/5Yb9yAg/3YW9Ho4qgBJQsujDetWvrLO3u9ZujUbe++dhk95pc36l3d6CcZ+Om3ylE8jqGk6Pko999gIRImjDtD6wxrW+6YW/4xz8yeeN5Pek06RAZOUJPGEAl1jUtAZNOMGJ661b7iV8bFH3YL8kF7lqdJkBC0LICXyR09ox59EPZu5+MK8c6pmBQDFcSIXibqURlsakWpRQffNIMv3n27382vrVTVX7YS3T0WkPXlpIPMFFMxgl2TP2hJRAXWQrjQQcnrvbGiTXWj6s0EmemXllZ+a7vDA88xig0uOQWXbrYDY2TCEEAFUrr29/+L7afRDZYdU3VGEYnxAhaU/ABI/ie5qWVumnR12rvMpXjpN+fVp02wziZYODZzDlPPTvq2ACIBgYOs/H41q2bMfi8DTe3J/v7M3dUPhQwKVmtrFFaG22TRBVEihOtFAKQQluo1KDR2jihCMoqihJYOPpWIzIhAdm3SwdGK0MkUYQFJLL4INaFwByD+NKVELATP/ddB16HkFIkTXXnnbciRCIaKFGpxpTEkmij+oZyFKVRE0TPrI5AjhwxegVCSFrBStY7c+zMwPty56Cb1qfX1yfl/s23njv7wXuNtRNXzybzPDpttO33IKnKcq6UTrUxiXKVY9Wrle2tZIpOrrzryelL36gmE5MrCz1glqoOAZRXfmfMO9vdtct07aJeHNi8qOtWKzp590NmmGeZEal7D543K2n93GeTLGHS/tY4HI5Nomxi5eb+YvJb2c51m5NdP+EHBRWb9c7+YOu5oZ+DOeYzakeDwZ//Efj7vwfPXTIXDngOyfmHwC3w+Su0c8m+50nqP8G7h7QoQUcoMkAFLOhzmBAEBcUQQgu+Ap2AzYhbRFId46IC04HVIAwZUWpBKwsJcKq7RoSPoBY4tNHoKs/zk6vu4EDvH1gNpJHbWuYT1CBxJui4aW3Ri1Wg6hBBN4s2TzIIzo8P7OY6aAUE0dUktmMHupAgbZzj0jFdt6oKMDlIBtRhc/GFV8thwu88/fhHv2f93Obeles6aT7ysYde+09fXvLZVl0vrW+ePn8yBGnqLLDXTVCqK04My0UFTYumFVKU29HJtds3dln3lccE3VIqSd4vtxwx7m+Vx+7KdV0Wk+354Rwwf+z8w2/tH7S+kf6gLpYjpVSYSBqTFJRBbUQnpAySQlICIqxEVODICADYhdBFcZ6bLngPrScXVGQTRJjunA+PimStn1+/uXVt0SCppWGf2CcsD66uXtufz6q6Z5Qry5btaj8Nwp1Si5Z1xI1hMjJI3DFLkoCPsr2ovYFMK1I4dS5R6ju/5ckLb1wg1d9ftI1z7PC6Pzy5uawhcMDWhYHNbhzudhFcFKWwCX59bZCWrqd1Wde9fhK8lbrrZXYRWgihqlo0tq8xdWF1ONhbzELkGYS5c70in9WuZ6123DNktJ67yAyEmjS1PjBEYRwOCh+RgGeLhclTFKEASqnQcRRRxmhtbEIRUIRCgOA6BbqpvTVKCJz3EoUBK2i/48PvYOvb9lDalWbRoMOkl29fOVw+NqSRHd69EbBVSslRWQPwDhPwaHYEcBToPWIoRAEQVgSE4JqmVwxPn1i7ceENs2Zf2xr/xFNf+Ysf+rYHF7ixodfyQTvbqg/2I7uT547Hrf3t8W6xnn/tpU//7Ne/vjO/3Uvyw/1qaWM4m9cuuC7EtZURkZRtp0gVSY8lMrNgTFLT19miasumAQjBRQLIGPJ+0mp1exFOjTICATQAbn9e73nX1/rqzsFbO+Oqr9px2RO+79SJtukShNRoHyRNcyuxp/kDj5z/xpWtqfikw9D4qm36vV7OckyN9t0i0bjSG4auwbzHDtfX1l++ft0pZM0OSLKeNUk9n909OFYupkmgYjC6Xe4FUQnZUyvLntrazdeGo5eu1alVTedXlkcRqlFvsNuM13r5pO4GK3Rus4euzdIVYxOOEj0brbhzeUYuBud92cb/blREhFpT3bQsfjjooRbWFEIgoqaLWhQbtbVfHV8erA6Sg52rF8fh+AeOP/C//K3n/tGPpjcm+zemy+urmPJjd2+skLv0d/75DYT1tOjbNGAI4lFj4IAQlRACICChIEI0ZE6fH5fcJS4muu101js1euJDs9/4UkYaiChGLh2j1bmJEkKHqOpUGOqowArppnVue7ddG4ypFcOmWLN5QQ6EoyYNIsAaQcXWqzQSQcCgUPqEoW6BpOZJGM9t7GXF0EAgEaU0oIigMkn0dnrmoRP/4w/e/PIXli68aQ8PSAFr3SoojPW7M5d2CKGdtyZBvZxJkTeczNvRyrlHkld/G5ptzLpkWek1u7U3mU/nm/ceHz76gPv61MRaGJhQGxN9o4liU0MKymYrvYElPNbr7d7Yrye1BgKThMhaKU2MURBR5QkrgNTbCDWL7g19U6vEiWtibEwiXRvFS5Klyc40/NZzguCnixgiCAIrD5GIKDgOHmxmjBbfWZO5fGjWzjchjRbJqOn+DAqUnWnSX4qTBboARoPSR1BtjPosxvZf/Ev8mZ9qtnb7QKjICUUgZDaIoAkCSYgoAbWBFPy0NABAAKKWLx/Am78KQ920oVvJRjuYN9ZmyUrmbj/71eM/8IML5eq6qaZzEzuDzhoqsv6iazsXwaq69Qb14bxdXe5pYxqoCSiSBLcIoUOQPDHMdRccaALEuesMqSRJkDgwCwlTTEzCnTRz9/YZGjAfhYqQEO6QKUT+630L/v/nSEeBSSAEugO+frt9poAQATFEDkeCGmEiDBDr+uAzn/zP59///nRiH149ofHQ+jA8ddflndlzr92+fzWh2PURjTWxcxI9gKCixnmLaPHo3AKjQoUCEuGoVxq8qEiUABFwQGCOkSQSEUgga6OyUWq+eXHx1L9Yed93it20/WV6+PtnixoLDys3dFcVD3ywtf3URgsdenAmrSOnyWB47J6ZgG48+UB5Suv3cYY4eLArOyDob/RocwC3ZlFIs6Y73CZBbiECo0LCKIxoGbHxHZNCo4GDDXMlEb2H8byHinfeNHbF96VtFwZ1qD1niV22qm0mh7sJn5BbX3NXt83x03DCqzOPByZfKq7niTK+bW1eGJN7joo7rUAaR8zc1LqfBAlJlgXfxHoOF56e7uwmD7wrf+ybuj5JbATE5gOtlYAEVyOCaIUYs+mt3a//phWFWgCkIag+/AH5to/URZI9+zvymd9VzIli1Y6bG7sjhtABE0BkEZgyjP7wX1gMTiSx0W4ffUOcMGdCAMKNd402oO3aux8d//bTfgqzlyoe9O5618rp072fef712yab1JFFus4jUmTxPvrAzIIALOCZI4sTxiPqOSAKKEQEIQIiEBalFRojQAwEWhttNIFVmFglEgg4IfExaOKEABlUrnZmAABGAFxnTdKi6Z+6l07cTcOVAAa6iDoqhSyS2r7u7Py5C+3nn+4dsknzadPdLsdtf9JsXZ0vN+np1Lcth6CsZYuklRfb+S5TfYsgXqtAvVw1cQvV1KxYtKotpSyruDLS6dJ6f/PVp752azrem2md995z74O0fallxCQrI9oU+8PlaMyNvcXaA8fWNzbHn/vUS5/5tXf+mb+w/MQ3x6pcPn/35z/xpRNnNtOEeu2kuv7qNZudevRddFLt7+28+elP3Ts+JBXu63Hz6X8qK+vN7s3RrSu7O9XJv/r/mq+cngtt7+2tnHrf2t1reODKG9eknKZAFJ3rmmF+D5pEx8Nw+8ps2k+PkV8e2ve8s3x90ksH27ca79Cef6zsn2ZVRN1xsolatY4Fo0k1csTEiPe+q9ty2iy2583O6Phmdt+IOLIK7WRBJDazJJ6bkhGaepEZ0y66q69fmc1bo5cw7fdWhpGkAvHQsXc9syTSNc2UVLM8LMrJobVtbdT5dz8KWTorD4lR2ZU0LYam4tsX5nYzv+vJaze3NhcTfeIBu3R89eaF6j//Jr/8mq07CDU43n3gXf7H/+G0WA6f/91Hrv+YKSPEWky8XeiT3/uDvYODnRc/cevCJ+f7E14/aaSl2SxzcTkBveZmcDg187hx/tXt8d33fPP3fqz3tZ/+j9G1SKQQQoxaEQimQ/vuDz1639nN7VuTtXPFly+82qCKCVatP7U26BV6ule9deOmLtLZorz37F03dveXl7J6ttipbuQZDgfJtOnaTvuIbr+lhtqqNn3b00unjj/0k7/8lZYT2y/2ytrF6uF3nDw8qGddVEn3Te87/42vvpr3LbJGnb3wxjYHVGmvWvi3jw0YBTnIkaIAgY7SeiKCKAYVoiCCIRQRIRKkKIqIEVGRKIKEiFApIiTgO040AQQkFYGIwChSgiRAgAoRWJCQ6Mg6BQKAElEQBQVQkQbUAAhKoYABMCCGyBIY0gpIk1KIACqoGMEFF2NkkBgCt77UmVZJAspZVbiyFNHLmycTckaxiy5NhulwDVTaY9P5aVvuQl2tJFZObMjSmoDqD3PXtNop0cpHZzh0F18I5cbgzF0uPREG6vXLz/fRrYqW+VQtoqkZQxSCMjg8fU+TGqlb6DoFLk7KQCJtU/mKred+GsYtxogclY9ucpMCB+cU6RCCgMKImkMeOhg3sQykUDyDcsQ+WD8/t2m/41sHj3+kjcveFEE5Dk1iBt7V0XVaIOnZIB3FKrbzcPva5GvPQH2oFCtNjoUcRIXFE++xjzxWjsfMMtmfr51bGy8OPKvhPff4rJcUib56tf79r8KNbWk7CoKATtTgO79n9q73T0XXN6+l5fjud9/d7l+MbYjMXsQrBYw0qeNhGUWh1jHETiAQOmYA0AYkCIi4w1tSDZLlYRq3p5cu2LWl0cpSuk2XL12ZLmJtZb43PZn3etkIOXbeLabVfDppys72h027u71/2MUQKYKAUmSQNCmNhIAMBKRJK1RKSOWolBBJ0JpcbFEp57qEUKIggERApRVphYrozr6AWUQIQELsWiclm8hiHANIRO/AC2EFbhEaMN5a9s6VDVdOLTqnwaBSDBhEmNkAGEpQlATRwEqiEAtD5yJ7h64LrSNkjUpbU2htMahMxYScChCdC3zpraeWHviQEre/+8b+bL6eaIrgKqezPmFLwKFuWXRCVOS2ZyI4wmxzb3I5OL2cZROW6vRZ40r15kXtEJwvdg/z65fmzz6t9rbrnVmrLBrTdnVs23jjohmd7JuQyLj96qf5/T9QfeQH1Wz38Kf+Xv93fjezqBPlygr3FjZp67Dr7js2+NaP6WyYbL2+9R/+/vEPfBs0M16eqXveeenWbLQ/WG2abm/bLi3Pr7w+2FjRw4Eepb7emymLmyeW7urJxdfROZjtw+opMAqsYQ0UGcopQACOEJzEAGkGSknVoQgYAHLALeQjUAKQyWyKmUJtMc2itEF3+skn9anz3uaq6/Qrz7fPfj4NmkwB2dGDIvkQtIiUcwkISQ4bx7vZxEznOJ+LAJetaEODDDFGwhB0ytCNtyXOOuyn3vN0v7y5XS+pt/ze1sn+d/7jn0pPPOiiSald7Py2X7yaHjfLx4vmQB78Uz8ej/vJ1htuez9bWRquu5heGI9fXXnHufnOvjrc15ujGLxzDtIsBEFt7aBofXCxdRTTYVLtdxwVg3EHZbe3v7Qy9L3s8q1rlevUqL956sxWSeUiWG2stiJkSGttEBWiEiBmIEIf2UtggMCRATof6xCrsu3qzle+qXxg6CIAEr/NnvBeDubNOEpr0zwzC46h8UlmHMKJ9aXcmFbCYd1aY+rIdVsnWS8jWEoT7QOgdoQdSyVBNDqjbKJ7qL3E+eG8svZXX3i6r5ONtd7aco8MXrk1rQMelC1w0EoN+n2ntMoSP6uG/R4IB++mpW8XTV9jD0KIGIB0r7i9aMQHRZgkSVTqpmMVZeFg4pG7stcz/V5PK2t6mFlb+gaAOialUAMs2o405IkCUq1jItQIHDhJUqu15DgamnLRtDH0h1lwcbaoOAppGOaGQJJBxoG7xiOqiGS10akKAmhSAVcG5xFfeP710yfOHi7cDJjX0+T8KDk+KGOjFSISIQng2/EiEmAgBUfcBELmo6VRQGFwwQffdnG0vLm2Mnjw/F3PvPRSb3W5G6b/4jNPH5+aP/EH7n38HfexVYkdgE7ntP7q/uEvfen3LsXD/aZeXR4UaT4rQ69X1PMaFJJSMfJwuDxvuoodRvHTGSBkxrbeQ0QFflbVTYgCIBLPr44e7mWLycREtVjEnSjHBmk/zfqqv9XGkv09JzYuvXqbiUlhy3GvbjfLOi+yIk9qkKbxPoYUwz2njl0bz96cTrI06/V6NtMH0/3ltD+EZDhKp7e3lrNsTdvxvJlbBai39q/V0gqo4bDXzF3tfK+nh2lfWVhdym/Pxm1FSaaHvXQ2a0OCi4ZJ5zfGDYto4Sw1wTkEmJVliMF5HOgE6tjO43IvG099El0vNQEBldKJOZiXDXswJk8sjP9/RkWLuks0+rbTKS7axnUeDfa19l3sWiYgNOrq7V21CBvGZVZ+4K98jyh77sy73vzkmcXVm4UGS9XN29O+IzfoMugvEWEVSwx5obUHbRRJEH+Ed0RG9iJGlLgIk1lGGZhEFwkYGznhGqL3sUW0koEulO58NHlWW6XV0C+mhgIaJBbyzAcHe7/9+xsPvBO50tNGdhrFGHwAA2iJfeQuKguUScDakXItZjbVIgiJf/TxR/76D7amPfjyxWu/+turcZJAcF5EWCstPvYeuWf4t/7K9XXcuP/b4Jd2Dv/jM6uDY0onuOCg1PIwY+ZgKOklXRcUqsy53s7e9TeetY9WuWkVi+/qwxcvnCqb5VHhtXctubVl78oksEbrW68NSlGIStyU0iSJIs2iiyB1V9nUMAtR5mOEBFicApTOMwAlRhS4bmEQi35eNV3SW22pbXuttF2/lTQRUCEQUgCzAO9FR0VaUGsf0EW2tsfBg1FHDl+NAtKY+nD6pc9lo3erx84tmkotFSLSPv2VrC4VO2WTSEhMGJAjAEQOXgdWZVScCjGDIKFGMAwgHjINaQaxBlTgwxTI/4nvSzeXx//qN840DXgBm/ip5MHoBWUGQCvvotGyuPD8XYu3Hnnnw7vPjfOlTaXSqrrhoXMYOt+UC+9UpgyYFJMChLo01WmWlPMmRGliXC5y11WoSUjVXXReMEmdQPCgUBNGRowcy7I2JiGiIk/+WwHtzsyI72Bf7hil4L/Oku6Y0I5SRUe0F6WOwkeRJbJE4aPj5SNDOctRPQGQSCCCIhZfXrt5oeH3PfIo5yhNee/ZYTmgq/uhOL18MJt4UOTBWsmTzFUu6WmDQioLGB1HE5mYxSArVncm70x0lNz0zEDAKJpFWOKRh1lIuoc+KOkqv/msevP1yfXX0lNPrL7/e1tzDAZ9pxu98Zia3WjrmeS5lkSuXQvlLd64lzYfqOfd+MLX8nqhPKcKXTPx09dXcIBvPpPf9/CBWtp25vgH/iR/4Rd0CRLILzoAEEJtrJCJqJA9aWIhIOuDN1aLBhTtXKsIQRbIWiVZrHzSaj+Zkq+UTuPegUkTivlux9n3/9n9pZMnsqFe/BdYTLY+/kvDJyfZo0/iaF2Qk9GIDo0WDl0poUSVcct+OidjbZGAcdG3HWMQ0ZmBatdwDLFd4CB/9DFfJJTq6EMMHaCyad9H3zSdcrPxb/2ztbCIAYNJiEitry5/yx9dHLvf6gG87/7utTfj1YtLrOunv5jfmosDJq1MAtiKxJBnkG1kqm/AY2/ZH1z38+305HJD6HwT49Q1c/E7JR6KAaeLcRk5pgliGeiZm1w7w8FHCVpR10Un6CLfkeod2ZMiMIAAAYo6Gs4JKKKj0WRkUEheCFkpJGMT0IjMVmlNwCzOewmdQTYK8sIokBDlbekNsIuKQ912XX+Ay4PB5jGVE4jOjPUu+OpQ93PfzOvxdPtLn126th+5N25xYlP1yCOT3a268QQ955MYJkYAW8YOwVpre1bPbCyR0S8m7WyMyXWl6gCcDQtOUQpOtE2KwkQ1vTInPqltURQ1s37+qy+fYA+1uK5ZOr56st8v0BPC8krevPLMwYtfWVy6cdpv3P7JX5v95lPkXDLqvfevfLdaX7/xG1++8dUXzNSdeeK9/hd+uTt84/gHHnjwyXPf+OKuGZwtD/nUYaSXnlnqZ5KQPZEeXH0l3/r2peNrtneKW+tuv1Xd+FyehyQtBFseSFv2lna+IL/6S7uf+LmH1u6Gc2df/9qXl7/3+/03fYv/tm/d8rgI06Sus2mQ3lL0YX1llPuM25qUchxi49h7a1Fi9HVTH2zpVE7cd18MykmIxNx2Jk0kdigefSSsFbks+On24uD6WMFanlO6tlbGdiGc9awCVIaWhsdms12EQ3FTq+xiOo+dT2xv4+Rj6cpKu3tLlSX7DFlhOGy2biq1lr7/h+cbSxv3P5B+9UvDvVe6z/x6/eyXVgcJM0QR1SsWteCT3z3PThD3zq7e5503YCC3s0ceOfmX/uZ8a7v7B38u32ubodEf/aj9gR+B8c7sp3883W3TAvYPp2TgxPCtMyGMF6ZLzH0PfevZ01+4fLDXLtg5YAGJqLQM1gfvfPfdt964XbZhdrhQKunQ3NrdXuuvhLI+mLWi88lu21s1reRvTStObLSDTBzporeMhD5GnVROBdUcBgxhOeu3LnvmZfzc+FnUaeODtAuV4bGlzB3Ot24cYL42bfErb9w+dXpzXrWbJ44fHDTdYt46SGK4Md09ugqiDwpImIEFRQgZJCpEEdYIhIxyR+1LiIQYhQFRoSICrQngzlGBOmILKT46IUECQBXhaPvChhQBARACiBBziCAsQgIxCLMwQGAGUiwIAIxCIATKoEqV0YRWkUJSSIa0ImIEEK8YIkRhw8A6M3U9vf7mi3f3v1kPjM0GKvgOJRAMlpela3tY9EnJ9jXc3l2uF+rKeP/3Xx412qGk5x/g/lLc3Q+t720st9uLAEp0gijGcbi5A4PB8Pyp3QwlS6rrt/jmbrooUzY2I66jRvIQbL/f3rptDBGg8p3uWplPYywJam7n0NaxDUSKUmIB41sJUQlyVyvA6LwxKRJE56V2Ok9iFKMSRODo56t3rf6lH10cH9UmlXnbHO4RB2MstEFm+8RBk4KFjrFBEgo86K18/dlr57LMA6JIaDw4bpN89Pi3hdOnc8M4GvZtFgh6bt5HAZtaJybVeuNMzxSHX/gy7u277QMTGIs+n3xoDv0CeOgiTifz8U2ZBGlREFmTi0IaRdgSMEIrwUkkQlEUSBAkMmgFFliuvVqcqcYXp2F/321NYKcYDvr1YdXuLhymjVGdjt1iR0wqQtpQ5VoOnmO4fvPWmzdvNbELLIIkoAUxAHkGZLbCghwwSGTxQAgaI3EIwWNKAbwDiDEwKkUpgFIagHRiBlopljuLgUINQkQgMYbYTWNckDYscvQ6FESREsCjkii6C8Hx/lTGLdVsMlRaJZ2ighJiMqREmDk4CQqBiUPoAqNj4q7RXhQAxk4JpMqCuNZNOhx49kunTsbZ4QBdGQ/f+OIvFFbtvvkNo7WrODRRla0Fc2plqSprZNQsFIUW84PnvnT89PGIy7u3m2zSuvpgof3aD31rcnx98g//XrJ7uLI88Ic75tbKYD5vJpUEARD2DSay2x/kJ8+orsq2L1Xf+I1uu1r+4Y+1S0rs2v1/4e9OXvu6317QtE6X+2nOVNf1sy8WezcOP/t0P9Hd3uK4J7hwCbrXp/sf73/HtxfL55aXjzWvXcg2B5Dm+WpWH2zlmmR3X19/rTh1rrXDWKzxytRIACDoJbGuOTpaPRM5Ea51GmA8hSocCUrAIiYGiKRr28ffoe6/X6Y1vfqqQcLEY1fGRYAil4zw7nvCsZOSaoaAKcEj7yQCeO7r4BbCHoKII5X1IB5CNSfW2Ix9s01Lq9yXxIwUBCQLnWfXINaUZVYE2/2cQltNA3mJIV3GpWJw+eDGyh988H1/+n+Ny/c7SVxVB+DlEx/umQfag09G942IVdI/1noZZJM3Xvvs8XN3t35hO6dtD+KAF5XmTvmF7g995Sign/iEEY2DHii0i8ZZS/0VW2uYS2vy3LRqdnM8lsVicWiLZGfnEDHv0ChJohevROsUiIWDGCUGUAEqAlGBRRgF0EVpIzceqyZ0jXeNbxvXBgyBfQykdRvuNNBOnd989vnLCx+ixhhiYjWEuNOonZsHozyJIdrE9vp5J3DYcN3qxLsMIxkpkrycugqlATXz7CA0zOR4Id0oz//It3/TZ196bb/ybh4PuoY4GGWy1CgiSsx02kHj546vH4xzZTsv1rM2IAxNy/1hfo7oAx/8A59+4/k3bhxG9B3odlElidUKCUQrYKRri5kGtJo8qqbr8oT6ebY0KibbNShVty0jpUrVPhQqAW1D5MazCATnlVXa6CwxXdW1nZRtjCy68xTYh5gmNiAcTsulPD8+WLq+txcYfCNpglXb5r1UJxkiTq5fSZ98f2vxxJmzbYsbj51Sq4khEzjMuBPEBPRRjEihwB1N9hH3g48SRgKECAwQQgDA4ELXtE3Tra7nQPjEB97B3d5r86ZDuOtddy+Pxec0ne1HnOcrxa9/9blPPPvsYjG1JoGVQcJ6MZeym58+tryaFTd29o6fWH3z5i6SLsvZqJ95YqVpumgBVVXVnXgtpEGYmBR3UUSrG7PJt5zoL+lMKbVV4qSCtV5alZzksLfo9mL75tZuv5evEuyrmCV2VnfXdw7ObS5rJQDYGxZuXN13+sTm8srvv3qhcRIx3g4LATl5/BQzXJ3fTmx616nz6/ng8Nb81Im7q5xv7u0oCKZvm7KxodbaHC6agTF9YyEukLmwtkWYaw8FqKimresQOpSdtun1Mt24IrX93B5UlW8wBChbn2dqeTBwTcBBElBBFwkwJXTBB4A2ij+S8Br671JFw8IoA7ZfTBdzZCky42IYFKYFbiUKSMtRJems6rKkzRJd75d2ZfmV518JAIO+TVtprVsMIBUq22lfaTCGtPFd5zkaYtLIIWrLiBTaePSUhUQp8+z3f6f/+Ld0y33vGIH6s62nfuE/3CU6Algy3IrtJXux7R5/4tif/9jh7zwdfuW/DLo2AYyCoqCvuPvdT1a33lrbXG5jM335RhIVaxGNLjhFaHKDTIzhQIk8cHZkj5Vfe2MY6+G5lfP/y994ddUfpGr53Pm7hv1b/+Rfr1lCgeiiUYa8lJrK5eImT9r5OJst1tZOsiMSya0Cj76LstIPq9J1sV+mbeuiZ1TpPaNz4ILp5WjCoJeGyfzKv/3F9T/0kduStMmaS0Y9kwm0IpD2jJemTGfcP4FqKTEU2yob9G0MviuB0SqLACjMwqJtFBINaBRmttFk7j7bXLmhdq8NB322y5PkbPGOx7avP2PeelmqVpOgBK1IkKkoJLooTXRtiEhKop9ba4UxsEgMOkabYk/X/pUvTW5/del7vjc9fy4zbuszn+df+MUTnCuw3gck4OAUAGkGEtIAjoGBBCIiahW96CggDInUdy3n7/l292v/KZnXwKpJl+xf/dmDNe3davVP/0kBlmMQRKMUCUQW0YhAOdn1nRn83L/+jvsf+dkXvp4YvbTRe/bC/mFdl6EDAEEhhl4/D6ECYYkoTewxuxgINaPWZLQ2JKFtnUIkzf0MEaGtGmAXkG2eKYUBvZMYYtT+Dtb6qGSACOpO6QuPpkeEwEeWs/+KuL6TNBIi1Hj0eyhHr0IIgY9CcyFKjCx3AP8AjB6ANGhN8fDwjVffGD18etmMXr++M5tsf/TD73/q0mcJuYswr3ARwtpQrW6ud/UCmdLjj7x0+flzBSUQCVgYA4gQKiEiElZCwBHo6F+JA9CIyMIk7CFTd39HLUvuFgzis7bd4otfv3rhK37jvuTUu9be8SG0q/s3nzIxWGvAU/nKy/HwgjpbqvRBKBftq8/1ukaR5YjkFrTzhhp79+o3tpeP9//Kz87V6n563ggNrSdkSQgVRfZRhEWRySEskH3snJg+pTmHGo4cMZiKItKRxYvzAFYxSzPvCGBlncoJYFsu/BYsn3zwexDc13/tl+5z9cpawntTu/taecGnj31ze+OaGu9ZFHEONYAhRiNKUZKLsBjRmqXwURH7BQVUYlJtqcKtL/xybHbT+96nNk8FCMisxHdTVvVBfuNZ/+KnzGsv9wC9Uuwjss+wFR28Ix9qk22Y4ank818cxP8Zv/SMHoM2yitWGFFh9AC2T5RJcGiEnaPLz7z48Z89tjlaObm+cfLElae+sr/voJ/YMm1Tu9tQnUXCuvP05dcmV8skKEHi4DgweYYuMh+ZzETw7dWUiI7MZkfWPgHwDIhgrM1TI4EBwGa5QfAxaoVpqrJEa0XOeRRJ84xiOBKhucgRhOOdN3ea6hSMEhUHS4y6qeYZGGWKSAEkao1Kh4hxuLkeH34kbO+jFHPIu/seKU8tXbp9Na0Xy92wPNgf5kU/7WGURCUOwSFwCDu3L1ud63S4cvx0XZUxUL9YrtOiUWHp7Np0d8aHBzdfuDzlNV5a4USPDztum7XUOWqW8n5ikLoaQURDOR9Lv7UwqSazNC+oN1zSZrQ0uHFpO26cu716X3r3yeTJOT9VKe6dHTy8+/XXNjv2T19+ft+nf/rHs/c+kB8c9N/4cvVvPn776mFVTx767nfApr39mY+f2bDwytPNWzermzePP3h3nY2EZVG25dK9Z3/oLzdf+1R35avL999fcS8clOvnTuSLm+ObX076m1uXrp/5g9+3e/t6cfstfPPrCcHyiVV3e0JW63vu74qCB0NKsKtriJ1iZ5UGJGV6HFpuOkLfjRfIYlLFoFzbua7RKQFIuT/vakbs22FmRoXM2zzJMfpYtQZwMbmlhPqjoWtcQiY2U1d3K8XdoYPFratmemBE22SkyllSXU3rXVd39uX/tNSeCDYp9i/YJFhpQjGATtX7t3USMEvNsWNJt9i89ZXt3/6tg8/8xtnGAyWT5Nits+/dvdaYvWo1UekS0CDZLn2YiN/ec2XTBUqLYZH6RMvhK3tf/uJfqiNfKaXD7M/97f/5hZ/+8ZlwZlQwSpPiEFXp3vz8S4lIUzWhYrapITq5upoTrY1y21Dv+LnvuOude7de/+rFVzrn055FGzCipWTYt7tb2wctah+t10s2q1gGSb5okQ11MRBKLzEm0Z1ijl01DqNscCuAeDPIR6P+Uuy2D28edl4SJEwTv5ijfruGCQAsR9YLFCAEElSICIpIFAqBKFIiQkQKiYXxSA/xtpJMEb29ZDAf3ZoRWRiRFBIIa1AGNAAiKTlCYYMKkRlExRijxCBO2EkIiKSBFCGIBrJEqegENIpoIaM0AmhARSoSBmERJIVKIzIIMkh865Uv3b59yS7bYrTRIzp+8m67srJX1tz4UeyS6Ja1DvvXsumkfPP55HqIYro8WXriA/PlE+nycpxuh72tuDcHDAyRI2ultYv1q281s7G5/67swXs4mWmZjfJ+c7BgFh2jAtGG+8OsbCbBKbRpNhrFie/E8bJAuedeuwiTeY7WsegoJIKhI1BCCkFxCFrp2DSAOsRWJSqq1inrap9r0H0p7js57y07L3H3ylJSOIF82KtnM4qg0Ak7ESknNWjWS6uUj0L28IPf9x2TT382JaYUSNmmDoO77mr6a+OD2ibl6tCIyHxnl3e2x29dHNzzYHLfQ21ZNteum0Spx++tn67S1uKiRVfp17/Se6Hqbl2lnd28rcuD2lRRUAIJGiCALgRF2IqgkuDZazDqqOCOnkGEOgBwEl/bS9wsA03BZZx3u/HW/GA8D2VnnEDHEiBcfeXZ3tlm+dhZm+RWUdu5l1+5dGU8n40bBEoVOhAAIQHxwXNACRiUAvECTFYRKkIOgWPnfTdZlAydNjrLMo1WG9N4p22S2MyCaA0B3h4VidJRS2SUGDk0oQtJAoSRotVMru1rG0hEIQblHc1nam+BhxFTAwm6yBIVOo1CqnUNQNDiNZD3nVUEHAUQwFLkGDwIk4QYAuZGa8XAMVWD46dHq6fV4vD2C0/3/LR962nKimQyRwi9PN/emyXWFMvLXFbATphyAvZdgGbvua8c+8AHwvLS0vLq3vxwvQAw+ZzWRsMH48Z5nszKemGKpD64BsGhlqQghRg6PkiyB370/zw8fr+uQ/2N1xYvXMnf/f4uN4IcU2yX08OMTmwOVfBN2+aJjqHpG4TDenmvAt8KI2wcB+zJzsUzSQdvfBE3diNKeW6zy/xw2sZpnSoEm4fpzOxeTdY17b0OOxuK1mI2UChASiVWWc1tB5FVbCE4IIDgQZHUFer8Dk3z7F3yrd9f9obU+D4quPUW7ExAd6rpYtXx8bP08CMhtyoGDA2hCZSp84/XVy6l+28ROGDCuuN2Dlwit2hz9hDLKrM9YgTxaCn6BbADyhFQmjl0MZZjMhrJZUVuE9u4eRz4emX9ru/9M14vt9O6GGogb4oiRL3YiqrOItq733l894XPhV5/7+p/2ljNACpg7mpHiQGb6d46JRTDjMD4uiJrSCXluEl6KaVCKFoky7W2XNWla3Rp1LDoy2Iyn8yiw8PZLOkP6nLmKIUEXOtYS9DogmhrlAKdkjJEKmhlgohgDCJtgIaxCaH10bvgO+9DZGaOEQSEI/Od6s2Fi9d8BFGY5Lqatha1QhPR2FRNqlZ8zAlNrmPjoldC0niXpMoHKIPjwB3ILHZj5zv2lNgYJDLv+ja+cWn7sLK2cC05QxSZmItRb3Y4L0xqTF6sZs77knnh0WZYt10eDGEyn/n5zcV6Pnzizzzx2TeuLQ5uSAZqMNBFmiSqabpQx1Sn1pLLkpG2TVlPF64wKgYPiZ7UZYjMHK1RQUBlRgVuWzevG2PNsN/TEsUHFGSBybRuKw9WKQXakBYQhWliXdshqX4vjywH47nVVh2JO4XzPFFKN2WjkV555o27Nu8a3p2u9QFTUksWDNatM5bIs7GpMACDIPJRpIgUw9GWC1kiAB4Ri1hY7tCMJHauGc/t2bQOsjJYP3Hs1Gb9xvzG1nf9oVPmZJFrfm7r8idfvbLH873x1CqrlnJfhVWFvTQtm6Y/KqzQ9mSGqbk9GedZ0vnGEhR5kmcmy5Oq8kh6US3Kts2yZHd31rRtYAQWVBTAzrzaGFoXomXoAngFB4v58WK1cb5if1Dh1uFsda3vq7pvNbCddPW7RnevNNO9gxk17ljaP5kPnnnr8t7CSZEX/Xw9oW4xp37PJnKiGJQdevGTarFhhoXpXZ5c9bFum2q1n24MMiMkytaxaTpfY1jt98UTsrQuSqrMKI/ec+fX1vu7s0liTGENh6493Dc+dZ2rWsgTk2QqxG4+nw37g8PDQ03DopcaykEAWOqqa1zIbbbg9r8qDu6MitquJaeGwx6n6awLVdVpjc5HTWA1+RDLukuMabjZ3Zp+33c//pl/+8//h7/5Y8GnX3n68rE9CCkfIrzvx/76iMzFf/sLxdVxVzkicFoaw8NCla3YbJA2h8iRFIIYFghABsm9/tbs87/Hxz8W8zW3KBe/+ZurN/eMFyACBwRq23crf+evzv/At45PH1urePKZX8eGAXTdBVFkDKg6hGfeqBL0GHpJzlHEKEZGQFTaUo4B5jY//jd/eOlPf79iPf/Xvzr7P/4x2IP5+LVw4q5EQn9ppbc2yhItHJUySWaAUSU8ffPaOYe9TW12bHetTT21XUClidkg+Qx/m+R7/sHfPTWdHvy9f0N1yzWrfpoNR9F3HCQezLUJAwX7v/nU5GBr+PC95h0f1iujaao5Wuk6r8I80/zeD7WuN9gbu9sXTblIcAASrR6Cg9DUJB1CkIRCCCJIEkEwug4fPBH+xPtN+6H9/+0n4/ZWz4hqF4S85PepXChOFGpbJK3rOAoTaiQVSYFKQYOP4py2tgmBSIkXspbbVsBlhbV7bfuLn1CrI294ePNWH4eRg5DRRyebRKBjBCrrZti3KBgXnRB6iVqACDUH0DA5vZ7/659bQLTz2/LxzyCGY3r3jd/6Z/JDf6544K4QAJAls15B6522Cg21AeKUU22WfTL/jS89+nfeM1g+c1BO59ProZlIiIB60Msnbqqt1hSt0eNpmSCsrAxHS4O2rRqPmnRsGmsVkXXNzNiMQ9TMCiImaTWH4AKASnIcDlUbfBt5Npn/d7mho/qZ3PGgHYWHjmrLzHfwRYCABISoUBBB3ZkFHT2mk1Z0dEwMCCx8Z3svwAjaKEGIzDG6w/3b1w8G/XMb45oub934z7/82TONJJH7jNZjw7FbFkj625f3876+6/H7T9+/dvCF38kECEEYEAgEjm60dyJdACB8xFQVUYKoIJrI2E7b6aty9rvWP/IDi6cO9fXbaaajdLL7lju4JroMaZLbFdt75/jSxX651aumvhk3177QpizenoCxRM8OQAxpo0KAtrValm5dbn/zJ9a/62+5/vm4+mS8/vvsRCV9IqUCYtUAOaBEhYgoKRrvHCoSH+hICKcFSMB1CjgwA+pIjXBnjAphjolxk3GmN3sVZV/5fV5c+ODQ1vOEQpGrnbh9uWh8V9buYDeLk+CCuKDBqOhinCMewcuDazzX3I6O4ff9SH7fse4bX5h//ql+OZVYruTL1Td+vbz6nFs6xqAVkW0Ok9mM9m6k5aQP3rMJEJUBcmwiw61d/41PqW8575OV0vvRAk0N5eeeVl4kt2BYK+xcJCfogZ2LSnsix5DU8oX/7T/m+3oc2u3qzSx98+7NQTVmc0wucpkeW9lHee9Hn7h0ce+TT795cUySpqHuOh88gwuRBaKA4B2vngIgRAaIb5fRSFFAEAGjNGmtExNENHGiSRNoCNb4XkapQU0eEKwKfSXxaHQpSNYIy3ReSrgTqVNWAYDrTL52mjY3ISkMOj8vWxMoM1m/12CkVDdW83DF66rfX0mHK/zIOfPg6Y14Y/t3fvdYcXxW7pN3ufJExMY2bV15n8W2GPQiJtjbqCBlMj7fgBP31kUS8/QAWa3H6uKFZnWsl09dF9MnKMrKDOGBxzfT+mCxfdgPJneYRqePpA4BEJtgZLIoV/KldnzYLA5XHzmbfOxDh8fvFpMtqdi+9Ezcf3XrV/6lrqhsDuONuLS+BNSRX+Dkonr+N8fZwX3/6m9upUsHq8elas7B8a1/+Ddg7/YgWRd7ps3vyd5x386VS0maG336pae+ujY7KM6+t/eOb3JNbKfV4OSD1a/++/aVr2tlV3ysXrsgLfVs4xZdiLFMKUuL+nCnPb5sH3zQPv4RRzpgIPRNVbrKGSmaduLaWkB8XcV5o40hZTACzwNHioEaX6Oy/dUs+IxtPj6YF4O+F4plpTtPqJIkdV3HjimENNcNuLXVc9nweDNurI+jfMjzKtTTwewVs/Vq6kAfsv/Cb8YvgxqcnkvIv/n9cj/VivrtKKqQqRI7TrvIv/jzzc9Pz9bcT4xnPBzl9Ke+Lz/3GG7XJzGkjz7avfZCVmSDC18/vPC11Nfn8gwz6wMniQDp6GyuUSXhfD/bofTsw/f/3L/4J9/2l/6WbxBZdYGtpqJYGnfJo489eG5J//rP/Koq+mJiv5/Vk65s3Km1cyfOnZ0eLvRy33QuydP15X5cLPoWKRu42djabHx7vpbDMB1UFXtJ9uY8mZa91UwyPxiMOoeenM7k3odPXn75ymOPvGPy8k3rOrdbXtypxFWhC8lgMN45yIaDtY2RZbgOMwAgEYWEAAYFATTCUT4ISSlC4agJkZAFjn5No0YEQCIEQSAQIgWIR+YDpY7WDCAMR6VmdUchbkCABVEEYiQWYhARZmaBlqMHYCFGQlDMoIxRgAYgUXQUaEIREIajIZSwHM2QhTCCNdozhRDJQnRdfXArdro93JsrA13bd6dsrtz+pGfJd4upm3eHk7PnHp5cutY0Owezrjiz3hWrlGbRoHn0IUNle+M2OsAIFLWJhjsZCqW3x/XeJDz/SlKFNGrqYhIlSLSKhIGUbvd2ZbbdDDK7tLIzvwazQ8JDPbQ02UknB10LCgkIOAAdhWMgQowSgBSSBCKqTd8Pz+Z9qyS0QspD3LmKsGjDZa2D6hgCUJ4nS1mEltE1i6nmgFGsRdeOlcm01r4wbUbx7ndU7W8rDxhBMWuNZnlUDk9nm8X6hr341EtFX3qr52aHc/9W2xZq6cPvacsr7sZeY2bFI0/g5Un7xk0rpNs4/rVPaA5UdkTasWhSqFAlitBxAIySat14DwiB2SBoBPZHr5FijKyVF0pRUeu7y53KoXHQxVg57iItWIcUGXBM4fRjx/e6STy8URi9f83vlO1rV24uAtukl2a2qhY+hsgBNRAjHsnzNHYgAVpmyguNzIBeQmwWZevaxndoqa+sxsyS8sEb1AQKUVlt2m6Bd7YFYERZMBCBA3kP0nowTZeQNz4GTBCaI59xCBQMdBQXgmVIKRr2CoNOrSLUITB1AsYFYXY+imIMoPGIzQ6MHBWwgIgoROM9zdsOMpOQXdo8aQepzY+du298+8JbiZUhFbNo28YsbNI//w63edfm4++AK29c/+wXu0nTKN9LtUZoti5Xt16m1XPT+lLTzWdRXM/ms13bPbay8Zh7+ZmlUdb5ilbuwfOPVV/7WrxxI5UQARmy4EYY0aX5klbb6erm/+Ov1hqN0rENpmo3l1YyG+fseyv51muvnzxxKly/HYJXHXdzL6mqy4kcXhs9eBYOblRPv2H4+qJTa3/+f+Kza7v/+/9nVDoCALJaARyO4a03DAQeX6Oz74X105474z1IAoIUInQdVCV0FSgFvoRIlGjILKhERGh92aTsTSCIdHYZDpijJxTgKIamaPomx7K2gwGRrXcPVL4WRNmH3hOeL211FbAFnlPZiV9E9JyvaGttsoQYpHXQVJBalQAkaTQF+hJ9g4wqSaCbYjP3beAkW4x6oz/8rQ+/672NWk7AKzcjV1tpILQqNfr4xij94I2Xf2F7fOXUt/7hyq1n+XtyLYeXL9119i5YTm+9eq1391nje1Sz7fW9U+AMhujqVtuei5zqQiVYl461CISNjZXSJT4kXiwkcbSiG09n1lffmE9nroyip4dTEo0m2iSgsjZmykaLoFmTVl5FQQWIXfARVPDcdc4773zrfeuiD+wiexahIwHX0e64aYskvWc0HK0WzXo3nlRN103aWivbAthe2pGcHhRZVry1v4iBWx86gYNmQXmS2cw5P2naeddpo9hD1zoolBO6fdBsrK40ESbQHubmkQcevPXm9YHRx4bZ7uE8C2poBXU8/+Dd24v45sW31lILISa93o19+Nmf+NXetRd+8if+4UOPPnL63Q8MHzn2my+8eL2ufKMQk8HaQJGJCG1d4SiVAL1hLyeY19VBU/et9U0XSIDUsFe0ndMctVXeEQfp6oZRjNFRofOeiYLCRClrFJJkVtdNZ7ViRd4LRhGWBkIA6Pc0MFZ10+sXIQqC+MjXrxzcemvn/nMnYobpxrB2HQaAKBqsb7wii6SJdAR+G/0K/7W5gUdYa2BEQEEEjD4orVBh9K5XkBgVleRrxx+976GHPpyd2Ei9i4NTKy8dHi5EWTtQ1LBgZBwWue4gtBIDujwriv71w5uHXXV+c0W06SAMB8ViujCpWUxrhcidyxn6WdFJXOulLrddF7abRog0q6ff2v7w6dFSQewbZr0/n99/ZqNzrt/PsjpUrQshJCodUpjNyiQxmNiXb1xdL5IHVlcXe7PT/WE/TQ/mDZkkT2xsWmWKw6Y7aKcDVZy7554Lb9zKNbxr89RALb2w9Ro39dCQzrLUQ6qisEyq+Xp/U5Q+WBz0k2hQiZKq7myq5lPPrRPw83ovV3h8OKgO5+TbU5vr43lLKWiMZeNCFFRqEgUbN6+ESRURBVVPqbaLIbCJsgFqZW0t0/Ty9Zv/bVTkvFijr+2UZdeRNbnRGGNTtpZB26T1zvlIEcuSv+9b/vgf+xM/8g9+9C+aN54f9QabwE9+5wO//cmX7vmub147/c04v46hGvYpC6ryvDMszv7tv9gON6ApcGfifuWfJpMd8CKECCCRAXCEces//5JRzfE/9KevffHLV/6Pn7zLmFRBZJQoDlzvo9+e/vDHSkpoPmuvv2KLtItsrCijnIggksHEGOGANtGF7hrvvUMLNs9ipERlAIB9O3jvt9xMcyfNsT/+KPxUq+b1YHv/NNY7Fy6Qw71n3rCURBDvo1YUOJjEbEo7/7H/cXTa3Hzx5tIs9WhcYLCUGMgUXV2Vb/vF//TU7Wfziy88EMvlgSUMgR0sdnLVWm1aGyBGsvr46Ji8Og2vfXH+X74QRC8PBj4xZJbKztPGg63P3aVXqyuvrOZDDSpMW+aW0n7bOp1S6EplRdsCwbIPJrU2TQHC1qSchZ7qFclD7wk7XzC93rqJi1e+khdRqwQpDQFiYM8MHLFZRNdoIvQBvOSFaVkLR6tFaYz+6HHWIBBFVIF0jDTzoKJuHTsfJQopk2iQABirRObrfeM36q2t1EVlFDBrpVAEBQAkglv64T97fbiOdcL5xgYmGQG0Xf/nfub4H/nOl77w79dT8A46gJpZKUQNAJFRROsInEVJghlFs16cZtjbrtxkNk0NCcOk8YO8Z6wty3lEiYJNlNb75aXlxTiZVZVJMw5tkipNCZNtXTBKuq4mnag0SdDoSjFz7VgL9FJjoiRGAczu3KOOCrMiIIJ4h1ctAHAnCId3Wmlvax01gRIAwSjio8jRMA0EgTiKFz4iQiIc9YVAkdKKggQiwOiuvvFmYdWpM8f6lXG3r5xYK/Th9dOpvr2zKEa6SpW69x53ZT8m3m9u1n69XHvZ7W1BCIYAWfhooKXhqDNHiMJCAIgURSSIECBSAjx/9pdhsd36JGm3VWrrxYS4TY0quuC+8IswGOSrx6U+KLYvx/2d2mrgoGKZbF0FMZE7Jf+y784AAQAASURBVBgic2SMEFAMJIpMiqC+8CvTG0/TyUfU3lskLKg5HqkgJQqDSljl0XfclQhRtJZmom2Kus9RIAYQQKlZhNIlH6OElsBBx6GuNdlieQPT5HzP1L/7c1nYCidOGxjWt1OVI7tKDm9j5KSdOZklWosy0QN2nXQ1+E5IgXbpQFHtumLFn//unSJL3vcAdj382q8mrcNuknkIl3Zi6GJghYlxTaoVErGwF4kIghC7YEgMEgQOn/2t0Ym7w+n7u+eeG3/+qV4AIIHNvEtsTT6wx0PIa1YBwmzan98Mg97h/pXJJ3//dB2qAFliSh+MtVuHfuXEcrLqti81Vw7ixvtPpetLcnMx7y9Pxs3CeRciKYUgAhHuvAER7nQh78D+8KjizaINJVYnhqzSIQJHTjM76lkVOx9cbin4YDT6owMQJ0qRd+wieyEfxAfXhKARDN7JUywWIU911u8vJtPiYNLUNQyNgC0Gfd3PGCWoREHSBNTLa7ByWkxmRmbW7KSgzbqe+3rr1m0xTVLkMXSg9KyqvUp1NpTaIqWoe7p/YmklV2W9c1BNJs2xE6da25WLuLh2K0y7wanTVd6/+vzreenv6fXO3bV09/kzDCd6j+EQpdd08fWXeGuqdJpaGxhjak+954Pzxly68Mw733NPown1yrSUUc/tPPWse3WvT4TieeORk3/sw/uf+/U++e3//H9tvnr3a7/wbzC3j/ztf7x137cccPvlbzz9nrNn9m5c6+3t9OsaVZUNncTtw2rtzMf+VPn6y3sXr58+fw7qdDabNjfdtcnB+rGNYzrri08FjAeDRT3zj/y1H61/+//yL7+SDnu9XorU06Osd88apHO5/Mxi+6A2kp09qfOVrNBAfr6/l6SalPbOG00EENuWJUAEEO0dB6fAGkrS6BiIi2IAoRuYvpiAEKzNeoPiYLxlyInW88U2QKphtVoY8pwqHZuZdMtLa/fY2y8UnYAjzUZnGuJ8T2VrP/LXZ4ur4RsfXymSrvGH97/nYPzyye2DZNLmDeR6FCNI62Kqoi3g5sXlS7/bPv+c2FGcKz8r2lr6BEs9YrSLg1pp3YoL2hfLvd4ZbT12tS8V9DeWbr70+Wja96xlz+/KokOdUJHqfj85/eDd1+aTWxeur6yteVSSs3Q4G+/yibs2Nk9Mt/djbFfWi0E/qwM1EzFe7+B4v9u959wSAp1YSQqcJmlxeXscSY/6cXMt74w7c/expeWNp57fUk1zdkW9/vINE3tPPfuW0VmiaDZvJ13cGFnUdjxuz51cT4161+PvRGlefO4GAKSKNJICUMhKwGhSCMB81CcTIgBAFEWoBESAjn5+dLkKAxIzi9JEBuXITQAALKCYWVAAkBkQGEQFEWQQoRhCjDGKhMhepGOJhIxH8UEUEC1IiCIxsgdUIbBVBOAJSOlEJEZmJr7Dz2NEpQg4dg4gKhEMMOhnJBm3MjvoTt118q77z7avPT/fubWY7o6Gm/tF4ZZXOrOdLyk0UFcN9pdc66Gf+81jstyHcakjQYgSGAVjG3TAHiHPOgqRlLBAlIAKAwdCRS7Ovv48TK4lj5xMHzgDi3b/hdeKUGWXlssXr8oN36fUewZDEgMBiCBFRhSJIIwoyJCq932HPPFNknd7t2/ZY2eUi4t/94+KakKHe4N2u9F5WU29NtJD5E5hwCSBJoTI6KK1Nslt27azrslXlhUyMCgEFkAXMMLeWy+dK4r96N547eby8on1tdP1fOf4xtJsYODgtnvzlcn4cp6AWX9QpWthtLEIignbBjB6CN4ERQoFJCSBEmmlQw2KIQRQEJUGDyoIHt2xCUIXhCwKQgsMWlXMFpOubmLgINAonovMGRqFZLHxwQ052UyT9VNGr1y+vfXWhdf3arVw0Du+aU0a4u6iw65lQEBhiaIQkCR0HTBaDS74HACVikqJggDKMzCIQUJmjAhCCFERgHTio2MDiJ17m94YEQgUk0ILwimzq1y38D52oq1L1UR1vQHkRUxQIRt2TkLQNmpg1FrIIOMRELsTpgDgvXQ+A62ASCfKECIaJiPGAERgZg6Ru9Z3s7BUPCL99VZrQi6Ws8G62j2cb43TenB2+d2Pn338w2WyPJG4+tA5femlyY3tcTmFzjGT61ySNrvPf3XzgT8Q9t4ia1vPJuruy593ezVfep4Qo3jfRcdF9r4/crhzMDq4rp1gxH6zmP/0X17/9nNbW1y/efnuH/3J/UWZDUcQoKj2y9/853ngyfLZ4k/+haof4MWnxreuFQf7ca9xXiS3mNKiLPPxvnNz62ORGWljz5b7v/Vv+h/+rl6WJZ2DuuvqOlnvcxAatxAqbBzcfgE2lhwVRi/BfApWIHWgckiGoDNAFpWir9lV4gTXNujEJjTXzbXPZ3d/iFHh+grcfy8VBextQSwD1MOHH3bgjbC0C1CY5WkwzIJ08pzf3pOtm1juQTsFjGicAgZf+WpOmkBrnfaAMxBm50hliFqQiIzvOhYxxMFXMUbn4ry/XjXFs5/4rYGN73v4XBc0myLr5+MbcWn9HixOLnhj/R3fRNVzatSFa7dWNk5J0k9p2PViuP2Kzci7YNdOePSx2olhoIY2h5HJ0Xh0wjHGKnKMsXbNyfXlEBC60Fdxd2vLc9MfjZLgujYuLxU3r+6qxBowjfe+i9p71JlqXZKkiQ/aGFRaEaFJEQQVATKFqDlEYAJRWquQMABHRgEkiK45ugq6yOfObDx0ondr57CX4dkTGxJ4Ou8cw/V5eTB12ze2/+b3/cDBdOvpX/1cb5hmaeKapmSVWVOjn3ZlQ6yLxOiMKY0iFbthPmCwqcJ7Thc//3/+q4U6t5KqT//Uz3794x9/97tOv+fbvuX0sbu/8ruf3jhz5p0f/ejzV+txedBUN37lf/93XpVffuF3T/f6bldWzvzg6buewFrcydUf/rG7D69e/NQv/9I3nnpGD/Bgb76+lv/5v/znf/oXPh5P3vP5115vxLBBbbXzvLJWBMBp6dvWcRBF5LooUQKGIk+CF2P01u5+nud5kUkEjswAXe1RsCpd5KN1DgWQAboogFDVMUtMFHTe16VjBkqTfJhcu3HL9N7XLRaRIULY3FztapmOZ6OlIRKQJiBAIAGRI3r+2/GhI+ACAxASoCCBMiZAIE02t4ez/Xy0rMo5Mjz06Jlzo8wm8uKFqy9+6StX5jsqtNODOcYQu9gr8uWeue/k2RcvXWpaP56OF+1MFN61sXp2aemtrf2esj46Yem6OO/8cn+QpljXTeNi3frGxV4/zYytOMybEDEuMnuj8zZLlrLMOLo9q5dnrTgnzIUxGhGWkkVoj632R4mlVBYBrhzuF9lmTxmbF48/dN83Lr+hC7sxyJvaxeD2ymkDrovadcmNazOq4eRgsFhMXrr5xhS6SlwyMJnSPZMO02yyKBXOI8+bCBDD9qTuZzhYzhsTu0SmXUxQG+Watu3rIXtJrU7sIDH5ME/Kw71BYmatj4hCauw5KZLCO+9jvaimgK222hqdqFEqdwGjMqPN4adeePm/jYoWC0csjABaSwSxGKNEduvrS4IGDcG06aXmYGdyz3d/z0TFM8Nm75nfz2w+3t09OLv0xPd+sIqSXrn82id+YWNnkbdgKD1M1aN/468V3/fHJEs6r9qtdv7Fn1Pz3X6KrUNgiAQemYUGvtn/pV+88KlPJeP2fm2CD55EKWUULSCO3vdEtbz8f1P130G2pdd1J7j3/swx16fPfN6VNyigquAdARCOBAh6URQpcnrUUoykHjW7J0KhaDMaqVsuumW7qRE1akokm04gARIg4R0LVUB5lHtVz5v0mTevO+4ze88f9xWjGZGRERmZkRlx855zvr32Wr+lq2bQXj3aHh/tlgs6987NifYuOG200zUzYm4bqcGgaEM2jZ0Fy4yTMUWgW8PX/9m/PPur/9XBymJ17kR63wPw6t7Bv/912Rj0R001LMgH9KAIPHgmBRAbhTEivnZ7cpm7lLKgdz5pJWIxpai1M0XY/OqX7vnAev+Db4evvuhev248Ut6CSQnhqAQPAKQTV7i8paSJBnRLCIjAVVwV2rhulvvDy9XmE8ci5raNpLwVqCpwRYMuoohu6W6LQyUiSqMHHRJjOznG0kwm2cVr7XvO6dWTolqz0SH6ijpWp1qcByXSBI6kSCUcQzWzAoqEFYriifKxk8XKJdGDKxV4YeV9g0QhcGKVTkmqGcZaGu/qBhRGL4igU5gkefFLPy8f+ZGlV2+O/z9/nadThVYihcCJIQQE5Dq1rdMPtpRCrJL77i+8ZDYHshu79ehf/MbSKzcZAYAQqWOUNAEbgkQa9AUGQEkTxZZGT/3Zo0uPv3hjqyOtXrd3ON0RllEx3Vhc1aR1p3U0qweDPkuYuWAno4Vud+JVGXyn18acx6NKBHxAhUoUeAM2F69Za1JAk1lJkVOLNtXtPAPYeTNadodPRH/e4AhyR9QGuMMymqeBkICACJCARYCB56YjAEQUEEZ2cU6unEOFAEkLAgsbbaJ30fNsVj178UpcXlw/swpdemlvfyPiIE9iVtYVDG/Vp++L53/0h5/+1h+/8ux3Lnz4k70H7nJ/essq7X0gRYQQRZAFSOb9kmp+y0UmiSgCgkGs5rhweOi+8nuGUBNymoICnZvIjQFIJKKvwuYNf/MaeU+R3LQmg1YIpwHR6xyFlNbkfFDWRgT2iDFYRaaXtG5dl53t3OpYTjBdYIBYzxjYoPGuIhTmAKiCBDIKQgPRI3hhAkBUHQldjghiFXtytbHoHSgWjt6jUWR9OctaEe1yDE6l5Xh/r31yMVlpScVQ3pRYhaYEa6nxJBFiqSEDYzh6RGFCUjpjsSppdF0PTP7A47Nv/++DqDKlXC1ZBAUgSqF4NBwRIvhIjCQAhFFAY+0ZoqQK8j1u/sU/V10rt1zLqyZElSF6san4nm1325PRoQ+iNSxUsP1P/7p628pLf/zS6g62G+HEbEKNS3glVN1My3m6fXlk9II/qMuXrhS9rVOtxaOdo3GtK44sAIBBGBGjyDxxhohzdgkRgtaKyBiVGA0YjVZEELxvp2lKJOATYU1B6RBiZOFpxRIBUaFWZcVNlKJm5z0JAQhq+r+8uUFHHafs6mlR3Dzc3M/ve0g/uN7tJ8BVrEAYBqvLCJ2kcOWsCZSyxHK0feHRB7//xJ9a6p068UivS2U6y1piFXmWdpbPPGZpry4PO21JW8loeHvz0pU8MQJ5c3Rj2rxW+iGpzkrS0ev5Z79x/Zbf01U0GLMe1ZPDV596Jsuof/bkHuNEqc4gr3cOXfBl8GSQJZ0dzcYZv+ef/rd6eVVeePJbv/Wbp+5/BOMWP/d8luXiq7Guy1ZA12bV4b09U2wXw9mJzvG1Rdr5k+926FhW3/zw8fNtzDcG2H705NVvPD+6fggqrnUWi2+9jIczHt1a0pm6mjTpSmvhvF47cebeh/PRtc1f/5XuK5dSbxllVJftY8du3PyeWlyU8/clMrv+2uvp4obqrTqXYXquGRU2Oaa5tDE7uH0whWiXepi2xocHpNi7GgNwEAIwynAtjJZ0EqoGXB2sg9pFmUkr39m5eeHYw0p7bW0MrpxNFBSKtQ9tbVaXl09SqWQ4wSlrN6V6P6tTf/hkPtwBV0FNortoWeri6MzK/t3rMGwGT/dmT3ylSnP4ub/f6r0d/uDX4JUtaEpIkpBIkrXShMiJ/+zXcl1gK4Whr/ZmTQyxa3Qirg5CgEFsGybSuFUdVs1973j4+sXrgw6dO3v/a88c3nrhu53V5FMfuHDzT19vSlPUsXDsmd/5/gdefPVye9DZfOX1p1+9eOb+syl6yrPprBqNq1aqKdf7e1uunFHQIdif+LGf/89f/w3E5OZeE2Z1q7uWJb3rV4aCmsCd3lggiKvHT9y4ub/jDx59/PRj5zYm119//oUrotuRpALXW7CzCvMUIEsODsv1Y6ff9853ZkpVs1uj0Z3xoGWURq1AjNIKiUDojteURJgFosh8PteAwoyASChADICgECVKjAGAogIWVHcqEd7MigYUDcAMIBIjSOQQgw8xirgoAuSYAygPCCRaG0VKARpUSghEGvaCQUScQIqUKO3ivJwNWHEjQZAFGElIYQRhAs8cGwdH4za5wzJcePj9q8dPtiikx1ZH+1cpWdbnH9pMzNIDF9LLr4etkqazTFGttMrz+uY1uz/O0rbPXHAuMGsEjgpFAxAHFmBUyBwFQWmA+V8HRBfzWvxLe82VvdkXngOGRTI481GPO46TqBWIozk6mRiAI+h5lBoJAIgBADjvhS6pFFpnVl2WNyV277orPHtb71ThyS/jxkkoCifCggoD+5Bo3QRXFwWriLGJwjHvZJ2Bu3518oX/s+MBGgBWHGNqoV9Mt/7d38seuaffIj2CV5560e3vZtL0u1lVlPvf/YNsoZVsLIV+N+T9+sWXhLn0EAOnqKIQKazBUxJDJumpNbW2wAlNtg7VtODRTNc874KPLirNoDEIxChotNLoAYBgREFrjCqUIumCTljLngeiaR1BoXFw8NKlpbfYosPp2dNraPNDdTQclhB2tjdHk1FVliJBhOcv2rx2AJEARSNaZQmNkG1YmAl0GkMhPqSJaRttQUcOUZoIASVVQJUrQNDH8OZxSKJwYJ8oUCoqiCawKiWNtuLgW1xmAh1KFDAzMHsOHpqInFijjE5somO0KBoURnQxUhRgqKPXoEis5pAiK9IQOYICMkKxZhZNTROPDkZLDtqpjWkybty0dBzi7u6s99APveMX/x9ZfzCq4QTUVod48nj7nW/de+0pCqqsgkGVgcKrh8Nf/734tW9QAQlpPRH37MXp9b22n5BQWUQ2nupSt9e65x7y3/hDgcSmbWHtvvl0uPK9fndDFpYg+MHqiQCxnnn43hftxeerYn/wk3/54Mw5gczel7azJ/zXv+DYNQEp0eKAJz6jiXUkvqjFmXPHfcO6qA+/9+SqDxgaIU7WemC1VMHvF3OBlQ631NVns1OPQH7SY6nbPUCDAmBjfTDV7S5GkrrhTl+dXD/i2FFtOz70Tz1rlx4NuS3Gt6aHe6vLqzzaV+LiwU7c2rSnj+lMucIZk7J3Jk1jU1HjLY7FVThzMJsCBIgNRtEEGhQoFdFHgxCC7rTJGO8C0AjChKAvxoRmwt5DarXI6Gj39aduvPHKE91TC2at/fLBpVMPPRbyqvJY1kleDZJ0MCtcPjg/PXy5unVD6sUy+MY30l2Gnimrari33Q13234766ij21vtY9ZFh3mm87Q49EmWVoVPGtVtVK4NgkFmP2u0OAkyY6pnxGiHE5cubDz8lo2vvnStLJu6AVFp1VRkamPSGF2MDSqjtNEmUYFBRGslSougIeUBlBiWYJCByCQqirAPJrlzIkq0ORiPf+BmMTU7t0d2dxZc3bP5hWOr6y0U1yRLK0mNb/zgYr+fgwAhB1IadOmx9k0VImtFST6Z1HnbfeAjdz348KNf/exX7JTWGvuR+963VvW1xd3Xb7x//a5P/Y+/2jm32ju9sH1j+/GP/uSgdWr3lYmthg+eWBDver/4sdlkupawd7vX9l7fuPvMoGde+LNne8n9gtY2yWc+/jN/86/+nYPty1/8/J/W5d7Wiwcr4eR73/+ZB+59+dd+57eQ7XRW9gcDLSyeMVQ2USqzXgAKRwoa7/IkH4Vqf1IudHu1ZwgRIHoJvgoJGYmodBICL3TT6azyLIk2CqCsPEdhhF4vM1o1LkgQCTAui7SXTaKuN4ednnRapjmsOabigQUNAqBEYQAgujNy3aF7wLxLCOZaM4jMRzEElSRZVVWNq5Ytup3y9msHCUN7ceHz33v5u6/vTF1z7qEzu89fqQ9kpdsvVYMC7Zb95ktP9ntLVkkEmDlesL1j3TYXbm1x8NSrt1Nt2u0MUFVMHoUhjji40FCqyNjto1mMXpAW2nbs4sj761OupmF1IS2mlRbY3jm46+QgDwKzRuf2YDRtsem0smPLg4DCk2K1M5jOnDG80msfNbOLe1tRiZuVJGAUCsDbH3h0VS9s3t5atOY9jz786rWXXt8bOxS2bAgr9KULk9iU0izmGfusmNZpWzFBI+CrYIkqihPPE4SzywuzQ9eWXo4tcc4oZSkZj8fswsmFNigkH0elZ6JZlFE5vnelv5QnDILCPiJHYA5aSa+dKFTFqPwLATQW9J4jsEENgMH5jjUSgIKsHFsYFbWfTORg0u9YUc3ujReKrdunH7n32edf7N99/8qP/Fyrt/ri5/7jjd/7F91bh8u6HRo/RWh/5EP9T//8lSJCXbY6vXYLR2A4Wt8EBGYiRnHzfVn0C4J8OFGgAKXGCEphiAwI1kC00MRQNXtXbraXFvqnV+XyrTTTReEDs2nlqt9hEI4sgkQGI5EX8YEnR66suKoNJQvWhm987fC/vrb06U8fPL9rL85saOGIm+EOUtJSitK08bM0SyYxSKKjBx8jBzBaGW3QWFdHmxCxFy8ErJQ5ly7e+tf/iv/Y4NIps92k7RVflipJUhsn9WHIPBe8mC4mVoe6iA1TSsqkAiwsiSLwDghaKUgREzQs2tURs4TSgCYVZYg4EqHWEL24gL5G0ipRzWikKRxLpf7t3wyxkolKa8DEcGJiiORZIsdQkdEsEb0TCaQQ0froC+V77300nrsw5E73qGq+8aetw6uIAXWOOqmbkKUmYlAhNKMpRk5aiW9qTVhXRaZMmPrZux83H/nZUUY3nviT/mRkjAJS5EF5lCCKCDiaY2u+tdwcHHV6PXX6NGYAJMAhpnknPWVGgRIjHqQJCIoEObAgGwtJhlHoSDjv90cH4x29e+LcA9UUgSnVqk+Q6r5E70Kom5qdS2zWeK4LLybp2bwnTZ6gGG5cpMRMpqWGhBiq0BgrKJglOvhASretbZyf1YEAKXnTbz0PoN3BB88VHyDAefHN//Xz/JZG8ub3ACJDAAQQhQAwj5jJfOgnQBCY009RJEYBRRFwDjRys/HNV15N7zu3tLgQB/2wal+4evHMYrqhMByUOy+/eP6DH169977lTs89/aS6sc2samZCBRyNIhBkJgJFgBpAYiQEZKb5HjsiInpPOvqMNEtEsOwiWRtrIUWePDFD0IiGohcITJQZzXWtAMiQThMXHBOEEJQxIQRhJm1tkrqyssA2ySlQGDcI2kcPzCABlQIl2FQcHBoTCUzSdoER+6DmPaQMjBgqFIUo7McQWVwj2GJRwKhJy6yUSpQKmBpXFGKAjU9Xl6o6xCn7wlsujGUU5soZZPYFiENM5+RnCSUlPcjJNFvTz/2T1uMPSrvfPP/NQXtJZlMAEnCRVGQBUiBMSrMgCEH0ggDMBlA4UCQREgcQYgLg9p0VU0UWgFAKTRsOFXXAt5WaiG+AAbzH+ru7s6d3TyvgGlTbgPdFCT4h3W+PhvXFp/fyBk6lVbsHptt6/rnw+RtvXII0KGDAGGCu8QgLsxChsLAAECjExJosTa0mDYIQgEAkCsGgl1d1PZ1OE6NDTFKtqyCVd4lKIuq68XUQwOAd+8hzQ6+AzMPJIhLgzQAakUE1Go64pXVqoOke7UvSUp3MNpNpnNWTm8MEcbR11e/u9DU7Y9M0XPnaZ493UHF5+vQit6HMklDNitEoy7ok0TZNuXsw3T+YSWlz1LlVqcJWarVtpzmGEVUj0nE6nW3tzCRIx2SD5fTEQG+001jW5bjIWZUvvEStwZUx9ZXksROFWpmiVJ3pHzvavNzRk9GXQ1x6xKr03oc+tOS3xy++Ut7aaal2XMk3fvLd5q3vuf1G0d05MUhDwHSWLacfeieUl5a+/2z1j7513/H+qPFI6TTUBULVW4z9hVOnOtvjGS8ul7NtHcbRT9ztapadlLiaq+HC7v7kC7/d3anX73vPcGvP9rPx9Kj9rseGvZVy7XTyrt6Vq1ebe99pF8/kxVHv1mthC7qnH3nlhef6C6mbgOP2XuOW0nUINC2mmWk4UjOpc5tzCGJAGrI6VWAp1uJLnbQxwym4YUxt/8EKFo04NLlOOMy8FvCzGnHQsuvhELmYpjZpL/arvXE66JoG681dkykpm5jlIRo1Lhj7vfwcJGeaxbz9+E+Ea1um024VNt54iTe3pJkGYgZMVgdxPIQaJFILKTpRllhjdiKP4/2g2Wa5b2KMLIjD2pf3ri5+6LFya2/7+Ss8Hr9wffee5eOn7++fScd/9KUf7Kv2hmrfrkpmZMBL17b/l3/4q/eePdbvdDpSP3xiBTV6Ca08K4p66/bOQoon7+q4WYlA+XL7Ex/70YWcqv1t3U7I1p6qvJMT8tJgoaPg5Nne9u3rTd3sTcfLi4PZfjE9uhEO97vEn/rUz33lC9909XiwnJ0/t3Tx5VuDLFdoL9x/4cTZ+yeHR9dHB+969Nz5u8/8p9/7DgBkSmlQikBrIgRCmht7gFBEfOQ5M2y+hSYgAmJmQASRGENEEQwRmed0TgJEigwxhijcAJJCM1d/Q4weWMCHEDgKSBSQeQeMogBojFZIljQJI3OE6Jlj5ADAhIlCFgQWDRC9IwHm4JERQROxACJqhS6KC0GjOhoNPWUqlbJ0IdimGBsO2cCs3XPPLrTHxuRLg7JtEFhGM39rn3rHIE6To73q0us0KkJ3kNxzKuFqdvEVPJqYWtCreUggEAsiMcL8liUYmZWgQqWcmIqRRCEggmYrIhGBAQWjKAgQQEQCI2gOAiRKKXYBSZNmI+Ni/yZrZ1C31juEQVTwAegAhp/9cljrpHedoO46Kj0/0KMmyDSR4bpCNwm+QeZk7yY9+QQ9e0vNIDoUIAFpgCnE9Kln4aVnIwKy2mBVBlYGcTprEw20SFOqw2n0rmoieYIYCTQLewYkgExcx/Da4sYPPe7W1v3iiso6vWaa7F6/9ruf05emNnAiClAcMeXQamtEdXTYEIMhKTwnmdRWsoFZy4EdS8XR0iSA0tBqpzdnJQ/D/rXbgwcX2euljQvtnn716hevbe2MZmXk6H0grQTn8xaCAqVRGa2sytLc2hSVwfldPbHec5q1owVjCFCFO5smElGREUME7xSQfpPY5XwULUZpRiZS2qpQefQRhTSSj6xZLIp41pIKaAIy2qD2CGBJWcCMdIJoWBOpChwChgjEiCjEQSREZCbNokAUCIfAmghAtKG9rZvp1tVscIGUGU/M1tVJup/amKx22iaJOsO2n1y6fHlh7djiylr38Xe4X/814DI1OpYxjJru/uQ4XdsdH9VeUispoC+LREedoMlzAs/YQPSRYN/FVYLE2rqJBLHfadGssHomC+Ke/0I1eW9+4rwRNfnS5+3VfR2ag9cvZ4/7aTHmK09e+dX/eX3iVbeFNZIicpJ2csMSRMpzZ+r3fHD5J35aqmBG1dKkjJ/9dXr2OzSa+SoYlaEms9CJswZBqy6C26ZtB+VR1CsVtGnW2MYRlGixCT4xhlEFrSvKYGW1cBPRqVHob+7B6RXHKZuT0+2pvrJldm+iL+Ibz9CppVIBdNd0a8DahjSTMDu6/caAJ9BGmGmoEziawGwkRYV5CbMxDBbUYg9QiUYhB2ggbzEFhFRIx3rqmhFUFQaJMRhlVpZTWegVHq+/PumsLjScS1SD5Y6aaV8N8yWns0Tpu8uya7YmCyt9hMOjK69lx97OaoPaa0m3THN9eLiTtlpZbriahSMfMOjE+ijkIjJA7QapylpIus5z23O69l7rsDDobk/rjY1jBvV+XURNoZqFBps6smqYjMQmhNS5wmeZNonS1thUB6+IYiRjcyKtgVKldWqjBk6MDw6i9oGDYpQ7V0F0EoPsj1xsI5ukbnyGhlB2h4casZ7VmcNuKx9WBSgaF83Aak1agy6c4zSbRq/J+rJG07S1z69tnujd/bc/+kuPvetRX00C0Ovf+4Hk6VseetgVB91zpybdbG/mhyKnHnxLtRNOtNLWlctf+fX/8Imf+diH3v+eo4Najye+7RbXTsL+5Gh78sDD7xuz2ts/XFnOD3euFDgtx82P/eVf5jiNSfuHfrL17S/8gd+6fD6hQtEMMgUWAO8+u7Z161bpD4dlVUZjtEWKSiV7w7FCBImVi4a0rxqO0SpsdzNwcHhUolGpIdc4ZInO51YnRgsCEIbQLHa6h6NZp524EKPXZdlsnFw+eWalmTaDwaKKnhROi6myBhCiCACoOyYiQFDz5gdEFJH5LgYRBVgAeG45DcyAAs7Xw+P5+Svbzg0P773nRHTDotzpL6Wz6Xh3NC1c1euYVqKqKjYcbu77pd4ykRYf+902mHaoVdWIYchSnSbWBbBtMx3V4MKN0bBvbRV45pvCc6K0IeWDUwjOM3LMUj0VKbwrZ2Ka2GrnYtTOtNrZn5SeZ8632gkB3dg62pLR8eOLNrE5YaxdWciZc6diCovLrWRaBcR7z98TS4dG1Uf+ytHr/U5bmfiNl58eucnioO29K4oSUgXGFszTAEXZOIEUaNBOIIWavHescjslN1PBGw3BTYtDQ8ghOnGReBaiARO1Wl7oT0aHuU173bwMk2njQIg03ZiNTq6dSoNgwHY78cIhyqws++3MZrnz4S9IRdqiIoiimKVx7ANikHaipqVfqRrxPkLziQ+859UXXnrt6W8snb1wbvnMK998sQD7nl/5OwfnT2wqet/f/W+//3d+pVfLbNhY0lW7ffLn/vaWXTUwy6wqX3hq5/P/afn6ZeuUAqVYREEgU4VAiiyBdgFZQFlRkJAS0gieOOZAW1/84tkf+cgsTdSZbq0vUNyd/asbumgSUoyg0ECyFGOdKgjVhItCGLVKxAWPLjYehEBYAvfQzr56Y+8b/0pX0A6GrYlKNBJZBYgxRaG0WRwsvO2jR7f282p6tLOz3stdWTTjaRnrvG8QiBSCiwoNUOJmuOoJLgV1fVcRMlDWbteumqy14l/94bCxDH92ce+PnluMrElLjsFgdDFBBsOhjHl7IbCNjULooFGeI9dOKandmEzaTpSbjTimre7ABXZV3Ru0rCZg8DXEGE0FaRUlcohKE4FnRQIO2TsghHyRY0OxgKZEQBBkBVMTWp9+vP2zP2OSVvBmqTUQOjr6z1fbgBK9AkotWqXrogET0yQDbpppqUCLD6k2qBULJ7aTldN46Sn5zue6YsBHQBYw2mqZ2+nZjyu/NFiHvCrytlo5r1IPtYKm4Y+/o3r8DPzm1AgleeID16GJIsokUSQyaITGS+lCcJw+/PDR/trP/viP2+rg5Te+QaTIxX67NeMwq7wPHCM3VVO4EAVsgP5GvjboHUyHPsqwdlERWQUeQWlCghhjWaJJktQIh6Sl0jyvvfdFc7h58OeWIpqjg+HOh8A8iy8sdyyR893wHRUJkegO2Gi+RBWQN1GmGJnvxNUABSEiaD2PL3CUwByiAKFAlINrt3IEPru+n6rW6dNveeyhS7/7uRMhVk7ffGUH0yfW3nvfhHu2ltHVAxOohpAqUhQhMgEBiyIQxZFIA0WOhIjAhPPQL0cmUpZ1FAAkh2AsE6BuatZpAoojRwEALSQcGw/BCYpqaee90iSeSQhdRB0TEdLRx8Y3RKbXhAkZCsEnqQFW4BhYobE61aFwhrRKLVNkF2MtigkUilLMUQCMMhJq4UikpAmsM0xaMXiKzB5MatEmSLouioBGtdeVpaaZUDPtGBDl2Q7U7FB8JVrZrB9cQ6YF0aNMhAl0RqAlNkAN1NXClS/DG7/N2pAncAWAAYekMkEVmCpmRaiERQIxaCIOEVxAIRURfARtytqnSgOJqOhixJZGoXoqGRtkbwI1B2w1RU2zOopSTYyxFGhp6cKB8srAhV5ycbdpmrrvxQprQyY3eTf/6o2D1xp9VWeFCLDnyJFBEElRgEhEc9+YsqS0zrTOlCIJKF4jQvCA0aYGMZo4TizblvURZ5VzMdbOCekyRBfQ1RIFEYWjECkRjiIAoggR2VqdGTOpZgCwuNZhlfHCInI6a9LO0l3VUnt3f3P36hU1q9ugeFqCOOOmUs18J51x4Qpupx0sQ9RShH2Q4EZTg6qPlkJsZtuT3S0DndwqFqUsZquLkJqoMoyBjOZ6gsaGGN2kcNMGHBjJqsYlC8vXXtvlcgalV0um144mTs91HhhNt5LF5XQj3x/NdOPwcFfYu5LSK/Wk8HJ2bf3d7ylf+INupzNS6e3tyeLqiW5rdfuFS9/87tM/9bGP0bfD0TdeGJ/qpo9/5jv/9h8+tHm4BGkYKzeDbqdbAGz31N1/41duPP3MxT/8jaX3vtf+wt9AvL319W9066bdWl9fOt1OZpPvfp4ON/VYhXf90kENtpFCl6c/8ZGmvzQtIIrFOpxY36jbZXdpvXjq5eZrn0903779A+d6y9qG4SuvdtdOn0p6vXFoNjft1c0stdw4cJT1B87PRAk7UOgxVl10WoGJNBk3sZmZUxsn7v6gq/bSVBVbB0mWtuxyYxPfUjkurTrBgxuKCt1dGW9f6Unhx5ODahp8nU+bnokhU9XqW/ThrBNn9ju/M7vyLZtm6ugwPTxEH/U3/0DP6tQnYBvI9O2i6pD00kRGRySJoGBmC+UOgqsCbyxq00A9KUwn66yYpg6qnR9729v95LZ78vuzWzG9r3/u0z++o9dWFrqvffErGdsHzpzZ6/L3915XIDbRsfblaHY0PGr2RllurIfpfhWL0pXN1IWN5XFLdDGj0azcRfe+d96Hy73r1y8tL7cU+/bawpGLNktHe5sg6vBgCqbUostqplDbmWsnynEYDWcjn/pvP6vFbyx1x8G99Oq2n0gVpr1O30OVqojarZ04uTPyo8tvzJ8FWWI1kNJ3HHcKCQAlxiiAwsBza5EAxzlXCATmneMCGAAigCAAidIEAIEFkAMLIweODShm1MwaQUQCgo/oggRGJ6K1scoqpY3SRCBKLDIhK2QAbGJwwbOAl8ioIujIAMQG5wINB4kRxFBg8SSREJHBkKmh9lF0qirxoT649NK3Hrj3/OLKwtU39oobt1dPvbW1tFbOJo3o3mMPVOULdNTs/d5vnR4e0mynuPhUK6VyweLbHq3ueaC1ttTevl5/8+vumeeyyABaABlBEIIAAYD3gMSBkUhAEBBRiQC/aV4URNGKQyRUQkwMEEUHQjAqehAlTIasAmAfp688m9x3nlZPwqSw+1vlKy/Q7s0oiJjzfgjj/Zr84P5pA6WQZ8VFVYECNKaf9qqdN+LOvrke9l+4DlfLpEbxQoQ+CpL4CIqBAGkiWiMze45CyAyICAqdi0oRl55ZNBMgRQAtyIZrcXY51+fWl9/9SDx2ssrb2MrTbr/0mCytmo0T5oVX3evP5aAhxFki9tFTemNxcnBEk1liq2JYQpDMqib67grobsvPalXBbOIK0RG1ZggO+oN8FjlptbP+slatKsoXv/SH165dnTXggzBHUhR9QEIyCCQemHRC1lArg6yNKg0RtIqoCIB0ahPIMHqtJIAge0TUZANqBM0ckDlKlDcxLTEwCoNGVEgUHTqVkaqlduIxegIH3qI1QhgFOCKCMsopsXkr05Qj5gwJoBEihkxRjFAiexGRoKBWEAwBSARBmEuyEJh14xsipWZy68k/adevjoqCRkfd3qpy2WiseTL0O/sDMMMXv5DGdvvcubqhJiQoSJ4DSqIxTc3RcKj5DellR+Us2CQoNGiBQlMpQW0QJYT61huJ38kGvViCD87m/YgWLTfTQOGoPtprdb/sbg7V4z/KV6+5Fy+2Sbkoxbe+0zt7ao1Gu7/zq/eapXF9oHINDDpIgpzlajaewrGT3b/zK+0TDznTtYqkpeAMJMtq/Ov77WcumoWlOJ7GYWmZVBNEJ9Bbhsk2zDbhxmG6cXeaGqgqGG6D29UWEmOhrKGq7ETibY1rx1iD7vf5aFJfvtJ67LHBYtdeuWlu7cjV50zOxnh55YnZ6DatLOQPPGTue29k632UGJM8hVtH4MdQjEEEVAKd1eCnqo6kNLgZHDaQOuwtATuoPCYpEWi2ET1KpcQjEWV5dTSFxC73srve985R60S9fiz4nUtXr3SWE5uvN6Vt2ZZMb0UYiKz1+m8fdMez0e084Zy6KSShDtnS2Wrvdl1N0sTS6Ki6ebt99jwprXtd01KkJc8sNtUkTBuSpTNLYLVRaVo341slgQlObGbHRXEwrTbrZmxV1h2U453SRRdr0laIrGlI6RgqZYy2mWqMtRmR0tb6WCMZpROttFIBtGJGEU2SuABRSGMzvwrOHe9323Zzr760M7FWZUrnfTMq6v2yzlGRyLnjS9L2W9N9ll43TboGKx+Wltu7k7pRRvdaLpSLi92f+MhHHhhsvHX9sdsXb9/zgY+271qxuRnXPgsOJqPD3S2+fmNze9a++97Vx+5pry8+/fXnh9cP33vh3PjQ33P60d1n9zcPNt1wyPbFCx9/L3gZjYrNSy8+8vG/EvvJklk0OD6xtnb7lRsxdi8dXhzcfSpZXThS6vhHP/bAR9/7sZe/9v/71V+/vFVsXZutLC6VO7c/+Ohjx8/1f/dzX70xrutZEUIgVKiwlSeTptYEAJJl6WhaWK2aGJtZo4zRiVpaSKuyAE2DlQyCnkwaRpRIIcDu4RiYIMa8nQGlUrtQTWfDnbW1U73VxfHBsEK/e3Rzdf0czWnCkUndMRCJRERFOA92KJEIiCIC83pfBkVorLLB6Nw8/dyLfMvvPn3tgXt6h6Nb4xm9/fTG7DvPrw/6T7x0pZkGVLA7m1otVVnpPC2qZnU5PbG+miSqibJbN6bT7fXbmztH6L2r6/EhtzudST1KwKy0u2TVjb39JAOrs8msLCC2mOaVowTSCFfceCfn2tmorpfagx/cPPRZWjRldPViazAtfVU1zpqd8bSoGmtQOV5bWDkq3etXridWnTq+qqS9oAcXD6/VEJHDIw+d3t/Zv3m0eWM2dQBHe7MYGqC6w+Z01pukeHFvp9NbKGIBqIKbrbRbVmTS1MK8t3+4uLo4DCFhnO7uD/I170rUkKa21c2v3R7lCR2VE4vUVLx/MBXUuRXxERB3ivjdK/vvOrWy0k2jb5IkUWRAdVjhqHJo/2IDWnBRo0KW1OqA/GaLB41q/+q1HUotq+7Dj7zz6nefufKVL73nR3t3ra5feeL7ZvH4rl+2i28fjbevua1pf6mNt7OeFhHTqYqXPp/hRf/C1/TVK/TKtfZembFRqJlQOMYQWbEm1SAAAAFrYWKKkcUQgJYgiqQjnL7w9K2/9pflHe+n+99qTp3aMRt2bRm29sqyUdYoUs1sSr4KHGJ00jhhS4kSQAGGVjty0AqDExVjy+uOY4UYFUd2AhAJNFmOcQRCb1vJrYbdN8z1G1L5HiVuiEnv1OGpBfuZdzwzvjJoZsPP/dE78hwbkIq4ZpJojA11yamqA7daViyNFhb1Yz86SvRCdvbgma3+0TTVidMZ5qRms2q0W4VKVFRiW7brG+4NOk0IChljjaE2Scra1lWtdZabVvARu5leVI2fYNAkuQZWlsSzVEGM1kZLdOCjryqgVFkNwlAXBIGkJPTGpOLFU+j++Mfbv/Tpoyq+cfFyb/1YbrPqxg1FCYeamwaUAmGwkC8m3NRlMRUfEaImBXEOlfM2hcUXnip/4VMtv9ORDkQLEGJgBhGLBEKxIQnl9u7wmSd6P/yOA44iRZYqCDEokEcvNLyXSXAexCuxepIqf2p56a67Jm/ctLduxZpR6VZivJPjH/xM9pX9pg63Lj6xt3sLu/mpjdXbe0NKqKrqoqxReOZCBcikJx6aCJ3EVKXeb5wyhlLtQdCzB2dNMm9pjaHSBBAhRFZa5Xk6q2tUb/aEgyAQityBkYqICCNEueMkmp9xQe4UpSHOhaF5uAyB8c+/FBDEOb35jmtPCCO+SbUMDIAmUcGzADYMO5t7nUSbhd5LanRjc/eBex8+vHGtNIeK1c1XNnlFDx56Gy4NxjHkEYhIWIwgEFiImhgiIwELBEACYgEQQWQihSDCMQIQeDHgAUKMGTbEiSUbIxDpGCpBMiSaRTTWVrnVJd9tm7IpRiMOkyRKklkJPgQvOoKNrHRwYqFCIGtNYNSoooqoEEG0sMqCcMVpA1HS1PogRJaDj05pnUeJMXgkRpS5B4vnG3AEZUQbgxjBiNjGtlQwACYKcKpsPRyTrxypcOZeoFIVN4yKHMaANoooICTFkEUhlKCAgQGMhXoMTFQiQAVEEgTJkOHIziY6EDJwmM6YmQIGQQVKEbmKlTGcNMiNbYP3ARkEAVNxjg0QCM98xQTUADKmNimFZuImZVQKggEy4iEe9uG9P/32sI1bf/T6ZHv07odXDnvpb1zcfeKg2jnwBduK5kIlM3MUQEUoGKNwRCQkUlpDK7WZNRycEnYStSFjjEGtgSOI1nkdKgSoHJS1K+sYYgABL0EAOQJHmdvNlOAdyjqRtpQqaFmV2sQqfXt/BgBQlhzqpaTTyMRYGd184fKeoU7sRlBjtqnJskyQlVURcOimuqUDI0DDEFEbMIDEKhAIsgaTiKJk8dQpcOAZGSTtZunqhqtLbFxZzBqbSCMhAGi9uLrSHZjxxds0m3Ljti5PFrROW+iRjsZN0+BKlu7efr2Vi08sZYtKZ3Cwuzc6GCwNcp0sLLWa4o1O0fBLVfXGC3bc+ECDe0+c/4VPXD0s7EQeeOAt8fTZNtnb33hieanJuu1b7/nM5PDpE712ve9gIauJLo+Klf/b/33n3N2drs5+8LvD4S6ahSu2f+lUcy/Mjldp/aXPj688N7p6O1npZI+9X7313cnmN6vxaNs3Za/XeJMZPCuHxVe+7V56KgyHRX8paVTXpmTA/eApiIE0LbHHa9dS0X5WdhEXTcv5gD6iAGTbHoH7x3T3RFkZrGapQSLiw7icLC2EdnNjKPX3nUwclBm2LCxOJjxRa3ZprVc7euK33O0XaDmJ7Q0YxWp5bbu/Wjz+8+2Vu4vf/fv3jWtKN0if0fwq7O8sYLWwcy2WDYeACK5xOC5RtQ44KU0yO/tA/6f+lvzgO+b1b3lfWiEuCo7ycrZo/rv/FzZH1/7pP9ioXSfRFYbac9rDxBbDr/6Rin5Bt91q2C8yu/AYF/blb3+Lg04zderkWtLsAQEi+hij8OGkOqWSfNCpy9mtg72oTeowRGaSqOp8cYlITiy2Hji78tYH7jVgZ1DOJB4cVdQakPc7u7enwwOLKVpiT4LUaXV7C6ukYHywGUwWCmwpqFylc7M1HjHYmec0a91919obz190E9ff1uuLa5F5sjO+vbV/52CkjEKal5jNI8fCzEhRmFmCQBT2zgsHFPSRCRFICwELRhFUBKIJSIA8AAIxi48RQEJgBxAJQ5xLRTFEdkFqH1hQG6u0tSbP05wEgkSGQIpFmhiZAVz0NXuO7LxnVIGNaNKkIwExMMUgHIk8ikWFKMRgtJGolE1q7yNHVEpEjm6//Oo3f0evrkIxOnbXW5uolIRQjibTCSyuFOsrXRjn093yj34vaaIf7pX3LuHxY+n5Y0fdvFKt1tm3OezZopKXXja1F1EsXsgI6fm/DoFJa+YQGe6UQsxj2kJRAc5DZoiBBRwTKM1EXkhFFu89gM1sFAyMwu3b1933vqre/WNlUMrx8NIPlpsJGUZSHa05xnJ71Lz+Pch1aVPRmUbFjk01xcO96qmXxtf2sAFyoAS8SBRA5KhBWDSARFAgQMBemAEIBCQEEBGtBUQ4iHCYFwrMGYY1uzKH7vseXPihD7jlE7MsjZ65brJo49BFk9YozCqmXU8gTJWE+u7TC//lL81UGg8PezCNFy8ffusH/vYw9bHTgtWT58zGAwdPv3TraLNut1v3nM2R967eIEXBUp3FtbN3DdbPuCl848tfefX5l1gnQWlANPMFHjMQAEQkIK2R0GgNDByiCw1RQNCklFLWaG3a3Rg0swveIygCNkTzkw7wvMuVSd2ZCyiKIgmNp4SMRAWoELUCrdCLsEZjopJAATno4AISIlKaZUmSZAhJjGqOrBLRoLSiBnwkisgKDIgHCQAY34zvzzdtwCEKE5quyuv98pU/fV437vyZU9W2PrF+3wOPPJStrHluXnn281ef/tOlez9uNAo663cn03HfgzXoQigVHfvIByazYRjms66ZEpw/d8EGGL/4/Z53XGEdXJoamAV3/Wq31YqZ5iBJQnXluBQURkryLGleejnZ3ipfeLqNuNhKmmllrFk6GFb/5J+zLvtNKUmWpUldlG2TtRbbcLgLg2wqnH/0w/HkWaFEvI9BdJYHR7L4EH7yl+LSq/7EXZvPfPvkpRdhuAVNQFGwfQSeARgUw841wBrCDMYHEKagAZSAMgAEEVUT4GBEIcB+TmQ7AeGPn4+xSeqgTQtigDIwBz04lgTjrr4adl/mwxvpIx8s1YnGuTSOm4OdZFpIWaExsLDEWa8eeD0Zp+MdhAYwQmpiR6tmDDbTNoJEHg9BRIuPICIcfTCaBSo5mo6+/Tl68JHvPL11/+kz7Va3axcPD8atrBfY728VlJ9OWosq7w/lWutEa7S/I20rqeckZAurzbUqXc7caPn2t195+vtbH/jFu7mVcNsKRhTBiChRLdHaD3+s/56PBufVlWvV7a8nqUnIDhvonzs+PdpR0jRNce2g2ar8eFaGOeUmRhBxVCltvDHaWJM22iaNr41JDCSKPSibACoGQlKkFCIqUChKiwgoc+cqOJ7mG3naywBmzUw4ulCMggcsA/ngO4QPveX8rcNt0+kYaoUxpixppuppefzc6q0bN091Vj75vk9+6uPvf+CeC6999furee+ez5zGB9Zu7Q8T7CgBEzF22ysr57ZevyJ1U914/RbUZa9//4Xj/btWtl94XrX0g4/92GtPvxajW1hTUE3a4p21/vj5C/e9tWgtKSqam1dG21fL7dvdwfLG+kBvHw1yQ92BVsbolh7vnzj1yGd+ZAfa68fuee/B9uQf/cP/4U+++dX0WTy2cerMhc6T3/w+aDQtU4NmxMzmBiBwDAI20exCbrQjRFBNU7fTvCxFEJU21iYhsM60a0JVKALUicnzZDqrG++TzF65sVVO67Cs9o6KVt6KoTR5z0eeVU2OGVrkOXCBCAlhXuoLAgBESmQ+b82rz+YuI4VoXCM3Ng/D/uvZZP9uPrFxfL0silaAX/joe166fjtL7vr29c1XN3fybktjWDPaCxXOjQpPQN5JUzQxyLXb27e3okZS4qsSKl/fs9gnJSuLnUbCzq3dTrdlIRgN2ULeDmQYXeTCuaNJlWakcztu4p6LOePu/qQA8MGlCP1W53h7gRfM3nC4N2u2jooA8XRrIU7HrcRc2t/fqfjek2dnk+nlN65qvHa8t/joW+6aeL4+Hr529Xrh3H7VOKJ2Lot5Ki62jL25f8hGL9hBQkmCrqhKCFAcEUWwGo1IblLlOIwqCWil673ovNVut3YODlwdIKVSvBFVewiM4wikcW7jDM4L6BuzRm8fPX5sqatN8NI4FxEKL+WsLn38C1JROzNz7l5ZBY2ESgeBIgizIESZNIut1ualZ3t1dd/xjYEL49f28CisLIar/+YfvFWr99x9prp1eWG0TbWrG8gyatXx6N/+2xRjj0kF6QhmSaooAgEHVEZziBjZEketI4jVgjEiaaOpioEiogZSqADJ6+b71/n6Uci+JmfPAnVPnrl7tjfRKSMLkSTiYlWKD0arqBNNafQcG6cNqhwCU4hBaXEqUqIwOhYJgZis1ood87hygzT7G78YPnT/9Pf/qPoPn201Bjkh4ejGzeGRtBZ0+JG3vnt1bHxy/5mdf/sfN7TGGJRSEL0QaY0RvPaCs2liwN7cK7743cFDG9nu4d1ZL98/5KoxVFrMjiLzfWft4/d4S5MvvqSu7EEZMSSxabCVUpRYNzrJda4ksEZE52oBXlvDt921deOyuzJcMS2qikQ8R1YZRI6x9uRjYlPUKaCyqdR1ME3gUANFROOLYMnWNu1/6FNbnU6j3eKpcwPbs1duTl5/xY4K1GIIFRIYbMoqaXWGZ84vfeS9w61p8Z3n22+8lkMV6kqTIiXAPvcM1AKASIDGKAvshRBUYIgBMl6z8No/+pWNrc/YD/6V9nSfF3Jw1c5iJ3v0gfF+2TWU5AaKZIjY+vmf6f/cT8fuwsn9nct/62/qN7ah8mQ1RoBrr6dV/dwz+zsvPwm5mbiimtI0NKJSa2la+rZJjKAEUpoU6sP92YmFPjOUs0b1M4NoLIqKZXQqaXE0IarAsWMVU3SlN5KIMOdZ3iaAfQCYjwXzChmEO3lZljnVWuYuI3yzEY0QaF6SIIKCKGAIAYkUAUcOLAI0dyoRKiJEQBBhYESkuV8cSM3TblRW4dqVrVXwsUV7B8NDX9+t0w6a4P1CZkcv3eiGRs6faVTAKBREJaAAgdGqCAQKwXtABCIBRFTAgnPYqiBpYwRkZkhfeJ85+Vgx3IaXPpv7CqIH5ijKkPIhkCMAVy+t55/5r/HMPeBnZrRVXb8Y9m7yy88lRQGsqt463vdw6C5QEdTwoLnxfVsXxKxIO/akkV3QnsA3vJRVJ86r1Y1weTsZTgzOAANQJCD2JVICpAQFNQRfo1YMIgI2VqQQdYu1aVY24PwFHCzEsoTJUXX1eisxZkkIoGpCmZ3MF/rCSG5L143SilUigIAgRAQMwsAgzmOiIVYgBKEBiKANAnlBL86mhNGlbEEbrzIvUaKQMBIKRkqwEh8kKA3tzMwKr7RKi4iV6Aa9c9Wpfu8DD9qOne0X1bWd8gc3J5iERM84ktYpyaQKQFS1Fp7f7TAufmv84gMPbfwmlr//0t5msBWoCMDsEZREZmHhO916IQohoUYCMJoyS0jeN0ViyKY2T7SgKGQAKZtYN74p69pDDIoICZXzgVBz5NrFOUPljrlNQBNkiTYGUmttonKtyEcmnSQJwCEA4LhKHAQotImUl83k0onkWDOJA+ylnSXBMunZqqnGO0OURlkzm5VJL3cQdEKCIbDXGFMbnfMA1LggCpPeUlG6SjCQxaxbjSH1XjfTBZtWUVCbLOs0YMZVTbq5556FFkI9K0IxVhJTRWXBRamKijaH4xRKMboq1Ux46vNy9xAZpqOqxbC/fZlUfv17z0onJ1e+ZeF4Kla01XplkBebTz1f6ezFF3+j1VTqqHLPPeu+9jvdR95y/p/9s+nW8PXP//7pD7z71mHdW/qI3P+2g0jtcqaGQ1dOzzd+efWeU+8/BpvPJH/8naNvfUsjQnthz9PZzrHN7/zJ8Y2puuvCqfNvg1P3LB4cLd54bevf/Vu7PV5oJy72FKakDShPKhh2WLskRUBAD9673CBGUOAIGCkSgG+G6EUxUu1Zr1PSFSTWHRM1TLatlLbgODywfhJSpdKOwA0Rp5YXuydWqitP4841LWQ7q2Uc7BYH5sPv6L3zkyePncESwqlj/ESVuq3i1m+quoiMEkECRAHS4KMoxdLMJj3xv/iLC+94mFRLUe/oX/7T3mSa9hcgJKSTA6Jzf/cf7T18t756Ne2aPBYpYVVJIAuxMo2y44BiZ+IbE3wcD//o9zptdYx3Z8t2NA0Xr17bvj31LjICE5pEPfLYPev3XKiGozQ1U3AbSytxfxojhQC3dw7Wj63uFuHqrcnDD77VTPnwaPuPvv3saFJ2F7tH05DomGTlhcfvf+21bQlu9/Y40doYcMW0CYUYv3rh4bXl7PD2laNiTMY01CweOxHqZjSddF399rc+kEL6xqXrXavvfeDEy5M9ldo7MUxjkBEI7xhhBBjIxRgiRx9jDD4EjswMUSREAEIvjEAaUJFWoAiBESLSvAFTAAJAiCEEaESIgIC8cGBxIc7zywHJ6FTbLEsSYwyBEGPDEtizxBC8Z18F73yMLBKZQYSFDDYmRmQAFo7RACj0IA0qowhiNCJGRQ2UoG7Kxgsjki/8E1/+k3DXPSt5//hdD1ZVNXzlGTcr1jfWPLarey60j1XVpSu0cxS0jnefVu98PBo93Ru3slVathFjfnyj/xM/Px7973T1tvVKAzYeIrCQBEAEVCzISCDzTjgQQQECEZYYgeaLlAjIrIA0EFKcqpic3GhMK4y9H4572oMXmjj69veqaOVdH7612Fv41I9Pf+/X8pRC5VwjxqKM4fCJ55dM1jl9vnTYbucyvDF7/sVrP7iFuxGFFCChkLpj9WIRBEYEEWAFwjD3hREBKJg/uCMBzL3EOB9VIAYhikDIue5/+PH2D3+yWFqOZH0MCFFBLlPPrkpX26xMcVQfXj/og0IhzPTKux+DwemUJM26qYouW2zPaDj8Lhe1uGhrfeXPnstW1jZ+6rFq/djg+Iqxjr7+5Z2L15Nu9q4fefRm6E4PzNHe7es3Ly2sr4xKJmOV0cDgvIPggAMLkKAARpbovUbgZhYJGxZxZLJMZe2oWoGMsu0Qo9IOmDnULrKQBlFWKYQQMcKb3U8xsJJICimyJoWMEmNKOijKiaKWbkdnWGpBEInAypAFEmMSpS2yRs++LoO0SGvSPoKQMDEojIw4PyIJIioC9DFoQhcDScjSxFpKlWolpyejyrtifGA73ROL972//9b35cu5Mqbdmi6V9x2NDof7+4HMtW99LSMkksrHKCE9c/L4L//1W3uXqZLTSUetrHdaVkM9/dJ/Pvi138uOpgkpixjGwf3x57MLd5nlVVuUniND8D4kVhWusjoHX2ejEWzNGIIXF0uM4hAwz5T3Lrv7bDHT9R6DJKkwEEBP7S3m6//Tr1fLJ0KMzXjWXT/m6oYI0AVVB7766tYf/pPT/89fW33ow3FWwc4ls7YMYsGmsF9C1gGtod2B3V2oCugaAITgIc8YlThRMYJRECpAgbqCpAtBYNpQYDQEHEFpNhqSrmDQcapTBVPX/NnT1eUrsd1aOHO8urKnDo5AArZTaEqg6E1HnVo0cQF32jDel9VF/vQvNKsr5qlvuq99vbUYoK04TdD7WFUYo28qgcASw6wClt2b4/sf0x/5kR+99YPnujka1SwcPzU5mkIEbTMvHq32lve3Xrln9SG90CvHXsWxG1V11qumEbD94m9//+arw2zpODVKLZlIFJtgGMPIeVWf/isfXvjM33J6RRpIlx5uP/PG9YvPVGlrajtYh6SXrfbaX/nqc5fHfho8A/P8KElu3lasjPZeaWuxaUxijbHapMZZnaRKZ957q6xWRisDqLVWEQVJK1Ly5gZ5e2e4vyk9hR88uwAoV7emMU0uTSskUKk63NptkX/uuy/EsWQ5+rLGHD/8qc+019a+8eRXf+kzD/7Uj36qk/W//pVvvfSn3+4WuBVeXn1kZZHeEZMFdE2/3SmK0pWbN7cvdzbaK/c/PJ3x7n6x2huEmxdvvvzy/qXXl8/f7fa3Fi8k2bFzi2Zp/8tfeflzv2PPnTn2V34Z2isRDfoWlUW9ebksinI4qw5Ldk6ugNVpunGqhARNe3c/YTjzwDvekx47u3Af/H8/8sdf/sMv/vav/usbt3eTnfHjj9xbuGY88ZsHo1HTJAojCxBNitoac/bE8sFwZlIYT+uNjbbJLNkEGpgWbLAihYiKUEACM0UQpZULzEJl7Td3J4wJM1MTgPDocBacJlLAwvJmExICzTfub3ZQCzAKoOB8wQA458eSOCZSJ84d+/6rF2dx+L5P3H+qZ27vVSkoVfos0+1SHlxavXkwuZIcIPLSQmfr9hAVpkkGOtndPzAKV/s956fTMrBAJ9GtlglETLhzcDQp6uBcFSXPWpNKbKIhclXN8ixlLZlKfMm5JKiV0kQGyml19/EFNPbajW2r7UY7X8g6V24fnjl7Mk1ai3lrsr+/0u1rTN71jvOzvR1XlW955IFnnn1VGo6l/fB7376+yJ978ukj71utTn9jqdndX8ioCjHJzSDBh06fe/HmZn/QQdZlXR4Oy9imdqfriEZlIz5kpJa7dFSWswKiQ0s2aymT6WGop5NRQJpUnCg7aLck4NBNd0elZ8TgiSNHdg5aLRTEW+OirfVJm3TybGl5MBpPSGEZws6b/eB3pKKFnml303IWjliIqGgCA83GTZJbQlQkUPPBpZsLy0vDo+LrX37ynSgnejkeFPfYW7u/8nOzblXM/GK07aTXIDN69HGRKW0gxySqIEbXsWGKc7keIyOwpjk3WAJLALQms90lz8H4ypceSTyA96CYW1bDzMHBjtvcW25nTStXEAFImH1RR9UoEaOVMgq4MVYbxZIrFWLtfMRIVgMgiAoQQAIzUGZB0HvXSlJVxJBAfvreqwUu9lerUZW3ewotCGgtSRrP+P2b/+vf3lloVn/sRwanHxjh+nQ2SlhsotBxrCvNymggjRpJI50u1Ow//AH7IUTo6I5oEo6uONJ1pnzgnjdpf9RGtdYxl24kii3FyH48dSvdRBsFDAqZ8jRJs3pcNN4tPHqv+/GP9oxeTFb2vvLC9Dd+f8UdYGxAMISgjRUWVgzGC0QvHi1RNOQ4EgrM68Wi8rPdL/5uS96plV5vH+MnvnHlP/2Hhb0jDQgsiAiIdQB1/yPbZ/rH/u5/cxBCUyUbHxuP/uZfg4NNHQU1sjBBAA6ASoSYAZCBFGgBkBCiAYIAJPgQVeW/+PeH//LfDx2cXh1sV91jP/uPb6h3hnB9RqeSfOYcJKfWWz/8yZ3e+vXdG/fnbbt+TK5st5Wqg28t6IMXnxnIqUp1bx3tFsB7s0pFKZrYMrTYzlMEEshN2hzULobgS5Podqb0lAetVsXALnr2aWLyaGv2QioGgSizwqeK0fngqipCFNUfdN90Fc33n3MEv8wpwnOEy5y4NhciRO6M3JpQqfmGFIgUvOmjfFNNAlQEJIrebK1iYmGh+TgvIsAsCGAwoqYm+q1be52i7GjjU7UvMqzF1VBq6oyg2bm9cnnYtYQZlROaCkLLLB4f1Hu3YyHdFIwAs3BkARSrSVDim2TThEyqQrpYHXuXP/GecXaxc/0LNszKAowk3kdQQSdauxCqmhfW9bmH2VrCUKUDeOB97Raxn8EbF6czlLd+vPXRn3bRVjubenRb1/tm+w2CaNBwCIygSFlSZSjDu38q+Ym/MQmt9PZh9cX/w77+JaVLr4hRo68RagSOzIKCwBwBtRJBMJZIQHF9+gL9yC+GtTPMacdg8IfNaxf55muzl59ueegvdv3hi2kxRr9nICAx1zPSNSgvEggYorBABFJJS1DA16SNdFIUA7Mwc1be9YnieFvX+/mVy/L6pVSmKYlOk3rqNFoKjRY3WtHxXY+lJ+6CYaz2j+T8WWmZ+nd+C1/YsYT1AE78vf9mePr0rPYqDleyw8P/5p+G19zEyUyJaCaCfiuB0Hr5ZvWZh9/+8tbewz/xw3//dz830tCAkhB1BERSCAA8f+n4TvWPEAkiGpR2Yjpt67hChUqnmVXMAighsPMeERglzakpHThQAk0dfZhTsIJERiAQRgQloBGNJkWYIViUFFncvDMJhEIZ3mQVMSIrsMRUp9ap8ma75A4lcTYe+mvdpXZDLbFaAUmoWZp215rcqFZGliQGD8YmyNXE6BYkiXOuLoumHImLg94i91ai1zoEbDY7uSvrWdlImrUhImEiSFaBIY/EVtm0PzDGhLpp+5B7WGhwe3vfapN0jGm1RnW0zGXAy9tHxqjlPMmVzm2V5tLoMu+3DyeTEIteo3Z+63d03j6tWm99y4W4Obz9x5+zS52kI/s/+B6cP/Vy0x29gaNNenjlbTqtZ2rma+s4DvTqbNxenTa3fvnH0uU1lmFxsL2eDo6vndgumsvenvn5T48WTg9ffWbAvHj85KDT5RvXit/5rcOnv7+ue0x9FYgsG53E2ATnwItuWczbqCOIoJD4ApNECCNIZK8IIgfQxuRawxSKIqMp0YKTEMuIkMVmRJkhjZac+Eogy1KGejOp9xd2m+Eff7G3dBbe8o6hPRHvOR6OCnntUjtN2te/2vzpE3vf+vw5J7bm6FjXNQBUGjRgDAG0isCYaoIQvXdl7HSPx9bKwbDe/Ppvffj0Sf/yDdFZNSuTVp50Mz7cyreT5uXX9l4ddbsQ0JX95faHHy23L+UHw9g0pgHNCBtp2u3L3i3cGm06bxcH6UJecq2Vb+XKEdYsCHGgdS9Lbu7stkHOrqxm7Ww2qSSGJoThwW45OW4wnw5tm9a2Lt7o9vMzK4Pnq1kEOhzvUai7beSyXul1Nre2UrKkUStkduzZJt2tzeH+ztTGelpNl5Y6n/noo1/85kvdVve++9cTm9dbhcnx3D1nR+PxN77xDR9hYb1/Z0gWuCMlMMcYBYBZGu9DnHc8OhaJIfr5UkAAVWStUJEgCQOiECGiYkEgmN/mwjybNu+OiTGgAIif41oQkMhqq40xhAoFxZNSETjEWHN03oXoIofae+9ZgCRGYIEYJQixtjoAokIIEA0SsfFATKBJE5IERmQisJpCjN57BphGd2t29LYPf8z1B9EWh5deu/Ttr2bvenfr+Nns5DluqcF77x8/8dSla3tn3/0hv3Y8a4pm57YZ79NeKmnGOpl0lrNP/eT+v/k3HVckgADUMAiqiMKoKArEO/AJiQxCd0qQ50sUBkHBiIqVilCHAOdOmY9+oD51WreX1SsXyz/+PG9vJaLyzORuNvv6F6Tasm/7SFClYlf5BgILomckUOmen/zhtzj/FiAcenBTgAooAPHc+gsgAvEOOFUTgIBHEAQB0BqYQQAEQRDmCKr5sYhQIs+NwqBh/hu4tbrRefTtR8q40cSQ8p67/U6r3RpffsPXpVeBdD/Uw6SaqCihYlxqt07etV9yRJ9bczCqgFu8fmbinmxFNJZujIrOR94RN84mp08F0MPGBZJ45p422IhU+oXFhTMWum+8dmM8FNXuZC1BQiHtoocYokD0EYDnzw+J0oQILjKB4wBKmbZFK0qlVrFGusMGCUjy5xF2YhAiIlQxeol3QgciKAIQJRILRcYYpQFEFVVi0qBCV8UcdYwxMjjmCGINYqJMaiBGX9TRhcZ5MhgFMCqFFokDgEKlSGvWKHyHwSXAiEBkDAaujeimadpalle601E1rfeW1k8un8qPsHHSaVuzfv4t0wNpL4M73Lv45J8dffU7VMWISBp1u5OdPDOTxKyfXz1+nIumLJ1vt3cmo2Of+Mv7378MP3g6jkaujJk25tqLptls2HsgFBNcEUQIVZKYaloiQwgx7a36yoWo0SgJ0dWFJ4SVbvbQe3z7wjiBsyeP7f7aP8vD9mRhdf1v/ffV4oY0Hqthqzdojo6AJRIhx2YyzrvtpaDhP/5js3qv6mT1qSWTtuPtfRVKWF8EAT+cGcWgFbQNGAAg0FZsCiaLWAGISgw0NTcVkQKpoRKoalQKvAInNdcqb6FNLXt/+7LJLDiyaSa3Dy3f5tcuqgIiICUZgQKt4rQwURuuCAhSBFyRU4+4s+9yWTv70D00eI/ceAonz1LjJVakPWaMjUQAbQgZY4C11e74cmNPLK8k2jbT28/dnAyHkPSDjuvnFo9qV053IsS1C48H21apSq34yVjEoLbrx85e/sM/40MhcA+++x5tSibLbGy7ZXWVAjcGFz/yl2K2KD4RsmZ1sXX8QilPsZG1JfrB00+snt44aHimpIjeS5wvYzkKqnlzMYQYPaD2HqnxjbKJJTLWpibJdOK0bQIam6TGJAJKa62UUgZRRCVqfhWM6hgkPQC/zJQr2ehlptvfr/aHrvGz8v71JSmqSY11o3s53H/PXT/74z/26Mc/WMX49vvPtcqD7Teufuu1i1tXtmUcT7XTC29/p++djH557fTZzae+fPvpb802jxYHVkHmW91i15sTp08sHVf1rDk6bGZTdOrglevjK7vDNX3yXcscd4fX9+NmU4RxuVkk5zVp5Z3qrJ+Uw4PDG9ezbmJSY/J8ePF6eO2W7mcxXd44tXIia518zwcmJp+6iGjY4bseeTd/8Mrnfv/3XOZvuk4DaceIEdAMYtV0WiKRVsr5sDkcd9K0k7YlDonjbFL0WsYHR0QQoyYzHpVNw4mxLOILt9N4V8cYMMlsxLC1u3n3PRcMqRj85PCoM+gpxT4wVTUyKEoFAA3OBy8EESREJcIyp7ySzEcxRFLWUB21yVc2llqOPTch2Go8WTu2ttBrj3f2lgbd7VHhp7MEBIm2dkaT0gm5JDU2EGnK03QO+mN2tYvtJG+1Mia1tz+ZNZUxMgE3CtxfXV5J7dH+vk5tq9ObNs1hMVnqL7Y6tuaARFz7XPBEZhIVX93f6/Y7ncTkqCrvN9ZWgRlFkhgeObZSjUOKKFW9NdlfOL5x+fLr5fioJe0f+8THr95++amXr+1NSydYzVyvZ9/1wJlXLm/fLkrxnLXSW6NRklpfxY2V41erN6wRIk60tNtm6+gwS/Kb1dSXZJVhgcV+vy6d0YQESNS4kOZ5l3ScFVD58bQi0amF4ENudXARSKkIWgBJovc3Dw8PENutblpUTVE6V5k0SdvJX5CKFvtZUfiWMXbRDiclEkQAYxMArAu/YOlv/KXPbD35B9mK/cEu/pf/7O+/8Ct/bUnLUmK2t+pjnXbcgYw6GQG42O7osmy0l5axQYJLMSKwZp1kII5DjM7PfWQxMqFKgFIS8BFDjGVtTIwclIpWa++j0nMfR5QIgKCRuKxU45EMew4+aquBHZCxNq2bUrQ06NGmpt+JrFj1xVehmVmM0niLmpRnRoVGSKWEhgMp1uN6+0tPdn75hwnWjp1+zBf7cwJjbIJHCUFWbK8zbJLf/l7DT65VxNEEDNnAONsCVQOhn9USAbWCSOylA1pg4AnAKGZCkoVuh129AK55Y99/9k9OP3pCUuM7PV3WrLm66+TaD31i+p//Y7txXAXgtgA49KaFSU/P1F6nvm1bZ1n71U+841g53fw3/6JNBjmQb0SjVVRLXZggNms7SXxEXwJ4VAkHBokknNUYP/sN/92nwbYKk7mdm2tlI6DARAQiIq9xfPbsyb/51yGMA1RKeV9Om0lFxRBrRyReIhKSJQYjLoJSSBCbmsURIAGSYgDFATUSl2g5XxWIEN1wtpx1it//1VPu1R/8wZ90D0sY5DZrwvho/7vfl/vfkp87N97ZbrXajSA3jFYv3Hvs1fqwvXH3iUfP/+Fv7amOImMd4mHTUGMzhf1uXk5LoLi0kh1Oy7bJmohb20eJSVvGA4YZe6MSEWUQIzKhREEX7aRG27GGGAVrJwZVKO9Uw84Z+3capwAYkN8UinAuFr1JWiNChayVQiJEEhACUDAvPpsbWkQAiEDdqVVGFhBAihQE5r9WESKjIrFaRcYQxDUy3Zqu9dRSmvWXWxV1JwAzIJA0aUK3TkWquopESXNYJ0my/uGPH+Vhur27++T3wrXDngFEUAqlCUorIh0FFVBiRPlZXjW0/Vx8+O1pkoOqq1FQRCwAKmFhQRZiY6haPFmLFl+3NAs6r7Jqht3+KvVu6Y1T8aH3hcGijZx3z4k/1unH2e//r7g7xBiQNHMEALBKTtzFb/0RyddaoPF0v/uTf63+X76J1RhFswTgYBAhBkTgSFol2nDkhgKh1iAcU+se+6C++21kElWFKBjzLH1sOb/77rHjeP1VGW/1Wld5bzPJ2hidICqOGBVIBHZAYb6TuNMFAwZ1ByAiBQAFdWHuvTv+6KeShYHlWTLeGf3Zl93F5/XLF6V0OmqDGLkenl8b/Ff/w2TxDGDWPhrDD/50fPKuyepS70fr6tn/LW2k1SKbLZnkJPMsVe0W1MOx92PI22ai1e7Ui6K1Xuvy0I9nxZ/82m984JPv/q0fPO1auS+9mTcJcAQUApTIKEhEcb5nJc6t6hrVy03h48QXAL6TJlqz94EFQ8MogkC1cyxAGiEIeXaegxekuYZ7J0+AczYgiVZgDSIQizQeYxRNGCkmRvkQomveVEsZk0SMjdFPjkYqNaPdYdluo1caQg+43N9LlhY3TiwVFSULeUllqz8I3glEEJumqSjiTKMiBh2JLGJKmofeHUxUEQGVlohxXDVHIWipOcbaN05naxptcKWCECBGZSNTGSWKjiJaowVePbEsYLhRkbOWUSf6J5+7cjtWLsbkVjEFz6v9fLWv2UFdye3D6VsfOhfKqZVZ6qGpK3X7yvYLbzgIg+NLzs+Gzz27/rZ3Ldz9bpm8krVl9N/9j8VM3Epr4cc+1D33lif/7t+7d19R2s4c+kubXcO5pOmx5f1xmA6WFt7zl07+zE9d27tsj6WyUGxej90vfkO99py6dD3PepVNhEQlFLxPHINg3u/EEEJUEkJ0LEZHRNsdSHQQOBBipxO4QSBGDAzE3sZGR0flJAWKmHlxIRs4rKU4AqgiSPQeZoUqD7SrTS29WWWqK4W7sdhemD2znVv9QCud/p+/E6eTgdbrqKKHiEa8Tk2XeTZHr4G1gT2xgBMUBuYO1uN//XfHy+3OoPfJ97yvmlq9+pZKezO5wbvj1u548g/+32GZDHQfvffc1tUrstJvf/zT+IFHXLVdvvrKemv36LsXw5HOP/zx2pfmyT+TQ99upYVTohxoc/rkYuvIudqTjxLxqe9f+vQj92er+Y2XLy530r3hQUvp4BwSINYrWRGxvbq8cLB1e2Mxvf/s8re/u39ifZ1lWlWT3HRNaq9u3+gMlo+dXITQPqrrbl/tTo+OnVtvxZQYdw+LuoRuOqBSvv2tp9tiu0HTjL1FH3RZRhLtajM8nNXF7NjJ5flV4CID39kWCEgM0QvHGINIjNGLeAYf0UeSOOfZgaCympSAUiAEQgCEQmp+9XHkua00QgBEQWaJQhIxevGICjWqBFTCAC6IQIwMqglN8M5DqNg3zrF3HEEYCQlFIaKSKEEaCWIYUAwTaAQERAYFUSSyAAGBUyhaa1TIEQLHCDJ0zfXtCS2uQz+zWau/cUpbe3Q4Xn4wTweLlSvdQiv95KdWaqUWV+vKcTmzAq9+5wsX7rt/7R3vm0xHnFh/+tjGX/3p7c9+Vo0nUEWrTeMFGQElisyTtSwiLHNY6Z2n6Hwnw6yASFT0rjq+uvCX/pJ74CxAlDKY9Vb/gWMjdzg7ikUk9qaVkX7tFbp+BQyk0wmTDiBMGESIgUSZMsIImEEiaAQRCERiVEQEEYNAc7wgAkYGAkCIAIAQGBIEYuAIQgAIDHcQlwpAoUQAYCIETRSEg0EGKKpCq4RMmrCu94bF4cXmcOhCTFcGICj1GI6OciKt1RRNurYRe0ldKwHWnR4NaKFlrnUyiaFA79ZW07e+mxdXJ8xuUilK80G7aA+XHnqwmtRoOk4oJvmFd/zwPuRPv/hG3ZQozBxjqCMLiCgWAQTmyAIayZrIkYFAlChwhnXbcBClWWkFCKQVRxH2gkxai1aJMRobEhXR0B2lCOY/zCCIUcixnT+yI0TlEROb51RapkhUOgQCL6KtNooMETKgMg7MoRcOzYJBlFj56AMgtEhEg2hEZGGO87dEBI6EbLVVNk1Tx6FIeOn06uSV61qq3Z3Lty4/c/zCW4OBpqmuT2eqnQza2t24sVTuxX6v2NtDDq7k3GZLG2vRS2/1OGd9bWMnC8140mtlFTVLn/4Jh4V+5VXtXHAx5bq4eB1RQdY2/Y3Ue3ZNPXRaGR0ANYjEyo9iaBSLaWUHR5P07nV53zv6d52bfu8FvRDO/uKvBKzlU5+yx9ZXTj0aVjf84UyVW9XwmsGOWV4C4bKcZotLvkFoX3Bhxd+cNnsXuz/8/uxn/4ut8XgD49H/8fnBQh+Gh9R4AIGMxCCKhmAg0zwtMYlWhCWAQgBDlAMwRARrQIBDJGWAUYcayxJ8AAGTEhQIa2dw9Rhu3fSHU6O1TVuBCBxzVQAqlWbADYgBcbDYB7NYrK4aRJNEnyb6vY/gex+AW+8efvd7+f4bePNZjpW2MbgQYmAlACG6OH7hycmsfvhsVYZaHfnx0bRuHV87ubzbPYJOQgNj1IqSsqm5lWZVXZjWhm6n+aDne8nR7kHe7a8k2Fkc167KV074GvI09TSmPt/1kz/l2uehyrTNPKjZqH7j4hVMEidhuL+7vL58OA0v7R5sj0sXGO7cZ4T+XOoEmBv5nWOkEFmF4LWxMQbnGl3XpBOtrLY2MYqM1cogKZvkmU2B7jhMTa9buHjUwMv71bluMjsosyacHCSHl4eLrez0iXU0uUk673n44XsunPv5X/rpzomVBpmmxalzZzaWHm5GVTmiB04/fGxp49aN6/f+zE9PFNaCAWTt/FuS25v9MKqa5sTpM+7o4PDZV5L9pkyumdzuXL5iUK+evnfWNMt3ndq4sDEep2at1f3Eu5bPnIybu/TGdQJKVpdIYdM00Fu+8OlfSACuf/d7mfjFY0uTrVu2rvYufX9yI61mgdZW0re/ffmeU/WkjpQwm3c8eO/dy3/5S9/+xncP49MvXXzr+cHCUgsmyYxZQ+19RGKOWMya4Ph4K6HIvtb7u6Nz59YV1ESQKNVJs4PDUhgde/CxM8gPZ0ftrOudJ4KqaZ59+uX7T98NwRxNq/5qV4GaFU2SZa52TdkEXxuTkHJZlmutYhQ1Z2kyIiFKhDnUmhkAFBKILPQHjzx037Nf+26KJ8XkJumMS26aIpasstbW1b3Xrh3qHoEWR2TyzBJNyzHF0O9kzDJzcX9SkNakcFK6mEJsfK+dEYazp5e+9dqmFnt0OIbcMklR1IygEr3QXgze185VTSTipHQnlrvLbbs3rpe6Sy/u7zk2SdqxSFVVKJPfPjjstDIJsR6Xi+3WzWvX1k4cu35Uv3Z1b0O3f+Enfuaz3/6DqpqFwneXe5OjqjyaStVcdLeV1icWFxd6XX80HE2bOnDeMi/vXg7QtKxd7XZube6usty71D8aVZlNjeMqsM1spOlinwaD3tbeUeoRmI2IJvIoQcQQlVUdAhgNQErP2SQhxsCapdu1C/20mLrrxcxNCvQeRZLaGcK/IBU1HNvt1nB3OqkbFi0gMUYlyrCkqTmfHPuxT3zyX/7Zb1Jv5cwv/LVbq6dnZ1darxwWYSTAofEM3iY5WPJNrCtvtLJRmtqZ3EKOZdO4xAaOfXEpoFaK71S5qsAigVNCRQoiIQdXzEgrQSx8TJSJkVGJD45YRYHIwkKF8yF6rRNKlI/OJhrJhKho5Xj9wD36kUfQhfKFp/MUzOK9sjsMbzwHxQFFxxABRGniGBCDFgYPSX+wrPIrv/2lyfbXV3f8cTUo0tb4YF8lBq2AQ02aQsxFmUNnQMS2ygg2T1zjVD/ZnxxKrvK260KqnEYXJcSASJkmIkYXGShGk+ahDpSpdnuj3incn71Rmq5ZWDGn0mevPSfve+fRfefpC9JBskkOIXV1mWRx6EA/+LGVD7x78+lvd95x7I9f+9Lj73v3xSd+/1090gWLeJNqAESK+91M/xcfs2qx+L2vmRuXLHgmieyBIUoEUtok7ciweSQ0gQA6OrCAFEWiZ3YxjO4+d+af/08VSvn8Fry6p1fyNdUqv/35xDkB8sqEIAohCAsD4XxdBwgMmiJTZNCEcxIHx3nvy7xOWDACNlX78rOTf/zMfbkyzrjNkSLINdz+9/9yYZCd+uAPu2d/sPfECyto2LBXsn9jJz/fTjyPbuy0Om2HVZoo7zgzqQHt6hAbr7VhoNxYTqMhFUJEm2pLYTImSyGIUYpFQEJkJwJ92z8qBaKyxMbgdFpGThTEo+EdqYjlzvaRQRDuCEZvqkRwxy8kAAgEoOZoawH5c1QnyFwTDyG6gIRAgDBfoArE+ZIORATCm+XnlkQRGgSlIAYABh9gPItXoGhMdt+9Z8dya7x9SCkUrCZ1zCL72vVySVPPR81rX/3ysY//3GZ6ovfJD61RsfXv/2HXN8ysFYXIgBg6C7R8nP1Bu9jLNPo3njIPPtYspU3UEoxRmr0TRRoVNzWgRGN4cK6OCYamKQ4z50QBpB1sdQNKHCwmK6eHwzrr2UZiIyq565314m/S7igBEGTgyFECBDx5L6zcFSo2lkOi3fpJe9/94ZUh1UDCooxwAAEGLWBYQEnQAN45VFY4RoLszFtdtEoFk5pQNtoknvLDzGTv+4Uyfta+8kdJNVMmdQF1RIBAiDAvDGaGUIEiJEXAEDBILDvLOjdS77RnswBulhS02JsGr13YO/TLj3/cPvj+/c3/eVDtu+FMq2zatr1f/nvT1lntssRaaFF84j+1zafLtR+qep1CQRuT2f5s83O/n/3i3w8qG+7vN2/cqn33IBQYWGs8tjBYaYXFM8kNcoPeoFpZ/bF/81vXGu2iEsbAkQUUIhAgs4AgsiIwBqyixKpuon3jxlUdrbE9zV5NPXPpEmVRGRdjWbjaRe9ESM3RVAZJRJBgXt0tAESIIojIIoTIQJ4JAFBAKQDCyJwizsoaERDv7NA4Ikfw8547aChWS93FF5DuvvescrdsOVvudsmaLE1C0vc9zamKlow3MQgDSEsHx4gpk47BR0AfoyojTN3w1hu99ZOLq2dcHBXjkUa2uieZLad7edIqm8ZLba1m1GRyQutDVTfoWZSouizTwBoVoCCTD1Unx6a+IaqpJNRMrVSD8LByzjUqxMXFZK1rD8rh0XC3bRI/kZ43mxevzGZORMY/mC2sLayZfPLv/uP+8H9LJ7O1jY5jA7XI5ZtXn3kj3nNykJ8v+FKYTsCwFewur22X/vrexMSQJbPt3//vv/vCPz7+6AfaeUdvDHJpx50X/cFmasFBYEY0SixIptkYdKHRXpFYqlJFZSWgMxCpIGquVQzMSSSFpICIbC8G8NFHrjVMk3oSy4gqMdmAacYKUPvIje60sB6pSYNFYfIWhajrGKfDXppAOWrNCrIKJ2ZJiKXFjQRuOIRIQQQDsqBXLByElQJiYRSPkVEbY63Z8H51axcObsJwW4+y2jfiCpxUlrXS+cJw1AmRV1v+/0/Vf0Zpll3nmeDe+5xz3WfDm/RZmVVZJssDKAAF7w0BkIQEOjVJ0bRa6lmaJuVas7qlntGou0VRopqUutWgaEBQIAmQBAXCFWwVgPK+KiszK31EZvj4/HXH7D0/vgQ1/BG/IlbEWrHu/c45+7zv84xx7vjhTdfvPfmXq8vjtVEZ97dNFnVum4cLN0bPfhG5jVXNaGylN3qTKknNcrO/56rapYQC4gIqgWcee7a5ZB546x1P/eC10pGtXOlZNVJMk+2dotuyg+Hk5OHVboT/+2/95o7LBlxGKmgTi0clUdbtFFxxabnIR84mWWu226G6cs5ykDQyd99777WrN6zLm/Ndw0mdy844UFNl3dWZ+cXe5cuB+5Uex7PKSzl9C0pnJQgLWB9EOHAAAhbxzBzYheAY3DTghz9UHRDS9K5BgSgMAoBKIcq0gyWMQEoZEQ4QQggiXkREHClQWiFqUiLoPRoHQYAw+ACOxXpf1EVROx+sR9SEyig0ZEACMjhhJlU6pzSRKGEKTggCRF4JgkjtmYSZJSHUWitFkZZJZQUEtDj240HRbEQrJw+emWmFVkOSVMAAJV5iHceGpOK6CnW2OGda3dOJeu07n2fd6K4cGoEbiaqO36f+zrH8u1+bPP74rHdaIAAE0AAYhEFEEBkFQW5e9AOwAAEoAPRePNk4St7y5sHhE6NhPduKIfhopjU+erRoLYqPYX3TXr0UuVEkHndHQcAqEMOI5H0Qmv4VEAIJAACogQUFQJCDItSICgJyQFBx7KoqEyVVmNbJBUAR+ABRAA1gp6s+AQOIAmZgAUFgFMUSgkcNthj5Gxu8WOtmV8oyuIChLnevQaD26qHQSvPxyF26amoQRRxRKPPtM2fq1oNZq5mVASbjUO+Nr52zyk2Mp1Zy5P4HixC7/rjZSuIIRvuDslcnXgcsQ1VUvdDbs3O3zEOUgUpqdgElOKskgAQUnm7GmKdgDxRG9tNMHE/7c7Z0qUtssEoC+Mooo6dOWtQq0hWyU5REEXornummcehmqoiRArDRZKFmsibhULJXSSWcMikB9IJA6BCFI0M6UooQWFwIKJDXaidXXoAjTsgZMMLKEESC4D2BImGFGAhAUyBUsdFJYmKDyri6rsd74Qa2TKMc7DtvR3tbxfYWd1OMcbI/7r36/PEZ7D31UjkKpjOLTSVVBYUTrn0+LPZ7fnY1d+XCXCPiurr0PJdbyerxXeHlD3xi33V45zrCaDK4AhyStC1RECp97qAKCaCyLoq1ykxV19469FzbgokbM91++9DSW3+mqnYb+P1Lz3751N/+xw675s0fdM35snRx5bNQ4+7m2hPfPPKJU740HJyJu/le3R3h8AvfVRCB8g3FxSsvJcePu+TWndBqPLRon/2zqJmoNAFbhWZs5+dRUPb307CrGgoCQ5JQ5aWYiGVSEUgA8KBiEC/OsnOkptd/QWUZVCUQQKezMRm0dKyHw4i4LgsEFUjiTBM6ZgVAEALYEhB40KOPfrh131tr1sZoXxVMJlHdQfOB8OY7Eljv/8E/arpQ74yQrauDtaKJIViT6Yc/+u79735OOH/Le9711HNno6Ta718ft7JOoz3Ms3a8lKZdca6wQ88JUcMLF6XtLC2LcaffemJjcyvgJMka7ACsA182OlSutFrv+7lSryjWKoRMJsquUSam29mflHvWB4Tzu5Pz+3kJnEbgvTAACwBikGnrXkRuhkMB2flAgEYHp6w2WqnaRCaYyDptFZGO4ihWJvECgCqKbp6O2zPJZDRUhDfysYLy1LHFYpzriA6fONhJk7gRbdy48ZMf+9gdp9+edhut1TkhmQxHS81u1ExA0/54+MaPv//Siy+9ePXM4ZV0/dJzoTMTLxypWXZ3J7lvNFZXFu4+IfVw/StXo6Q9v7Q82L9cF9Hiu358n/TcXSdUNdDDjUZSjL/1Z/v5PjRWm+1DS8cPXnj6+/uPfXvp8PzADebvfyCeuc0nSR0l8+9892xHq0ntn4s3n3262B1u3+hHkQ7D/nA0GF55SQLEUXvttUtQ7c0fnHvzvW/+4D3vubK79sef//zatXORMhGrTCunxTkBRbULge3V9b12IykLG2dpWVXtNHIc2IZxVcSxnhRemLudLG1Gqc2AgyIaDSeHjx9wyj35xLMRN/fL8cc+9RGtBBHj2EAgACHSIjIZDgZ7u/MrC1rHwFpAhAgQpx0NZmERYAkiwQs7mkvnVpZXLm6Nzq+NgoFji0taQ7/vBuu7gyh0js10MlXlw9tuv//rjz7eK8ftpva1L6QSkMXZbp6mO6NCx6oWseAn48nBuU5izE5vstxteQeDfJxmGWoajy0AJJEWhnxU545LT+1mHAt5wKu90lvMJ6NMJ4bS+dljl9Zfi8h4D62lNnhVVlYrWl2cH/qyZ/3m1vZ8C99317HvPvHFrf4eKTJECdCR5cW3v/8tV89e3x33t63vD8q65Lbi+XazrG0riX1dWeHE4XAnVyobFjA/kxn0GkJR1HEU58NKWkZr8nvD/qAIjMp4KG2VVxikqOqI4gOLLb07LkPY7peZignJO2cYGLhjIghBR5B6tN5FGbkgVliR+Wujol6vaCYmr/2wcrGmKI7YBc6DViprdP7nX/v1r37pj4WoVs3jDzw8qYvlN50aXjiDcya765a8P5Kxw16uPZKgjJ0xBCnUkSqaTV6YW3n/2+pjx/q7vfL3Pqd3t7X1GnTNbDQSqRCYvATFOjagMc0ajGTH3qC4EDSQc8HoSMfKlS64oGLtmClGFxwJSR00Upwoh1HrgQebv/Qz1dE5e31v8MJ3O7t92L6hdsd+OGYWFGNDiJqp1i3LSZCBLyakTFUXVWpO3nm4uxjtb173ZZXECjsNr9TQoLKoA9hxQYi6qUNpBW3cyEBrY3C/X4a33UcffejG2t7e5758KDhjJPhaBDRnwAggcSP2yg7sKJ5PYvajasikNWQT35r5wEeL2w5EWx9avvtEpoZ7Te/2ykY3mfhSJwGM72Z68tQjw+ceg3EJ+crHf+6jfcrvvvct9Wu/wxXGWcSkIx9cKrOf+pH6/e9p7Ef8vefChQkFESQVQLyjWHkIHkB8nWgIQs4HHZELDh2TIq2o32wd/tV/bGe6NNhvrKzg3GHfaiSTXDbOOTshj2AUGSREcFZrjQxUB0RRaHzNAqwi9NP5kIjSJMwgEsQDABJ6sBhR0wAH8RSmMjUFeHzCg3/2r+mzfyj7/QPDOtTORwRsbjyxMRuvFmp87INv7RVVo0ng2Be1Qh2TqosizqLp7WS+PzRaBedjILE2UslCq9m3k5aKAqFlW7q6Ft/VEVgHpU8x9oU1GhbnZjbHwVf8Q0v4dG8k8sPLz+ns52Y772bsaGqTQ1SklCj6K8Y1sAgieg5OxAawQaZ0IiRAhYFZkLxnlB9G3EUAOSYQAB94yhpmBk2qtFCM1e76xDd6dx45oOcaN167qlO69aHbxs7tnLlU29wIzJrG9nObMPzC4QdPD1bnolsOwqGZ/PyOQdCM6ELZztL3fdy84Z26dOtf+I8LO2daUlbf/c/dlYVyZ+xrEBO0NoE9IBsVkRR57UKoA0Zxa9Ymod6vtU1dsS2DgYxymJ0kSaNhMpWQC4WPeTeraXHJvfSKD14SLYiRVuRdaWZMNpMmihSCYwg1uAHkNfhAClgUIAJNk/4CLCKeFWJiPLAG4CJAjiqAZofORQpCOa6tNs2ObTdk/iBIaZwXJCZC1CgA4gEApjUHQPZMyiN78OgR/eKRIo7bhfHXX9QEtHk9vrxhFhOudtpaq/WeK4etQ7f4i2MVe8ekFg6Gudu16hCXEGGYTPKXx/bSZ1d2L2w8f/HIQpqvVyaG/e9/a/HHf7FajNyci41/ZTdf18pymNWq3cbdGf3k1mZ/rnV+CC8++UKhY0tIXlwQEFBIikgEiAAxGEVpGmUGFcAodz0XmEUpVD5AEdgHBYa9GpRixVkHYpEFAY0ECMyEaIFBAG+exYRhOqkEQhREIPSAQUTf5GahZVbMtQdDICB/9RoYpaoaGAISRKo9meRQRneeuFv1LqeEK2mjhQ7s3uj6WWw35g6eHpoUFSRJEhSaVgci7Qf79WA3iPJVRcGiLUc7Oyb384eWk8WZ0u3X+TBO57jC2nYLAuvzKBgkrRoZabR1DRIpMugmMZuO1hiciCVmHzigidNEg3LD4dbO5nBvMJm4IhYIdj4CyAulk45ptgtcOHxkHfjQ//Mno5VbdVle+5//hbveI0VFiYtZMyZl4rgcTOIobq0mJXNvv+5EsYK00Wnt3X165id+Er//zeLPvjhTyOGlzmso+TvfuPzAXcM//CJcvb5CnXAD/PXvZiHUSVYYHYKEzu2D/OoyWGPIKAQoQ103oiXKon1jXDaX9TezemCSTISNTmplECWK0DkW8YodeRuqmlWras/b+eUQa9zvZ1Wd5IMo5HrUgxAwWSRWDL4Jte8POQ/sydpaiUFmP6qBvbAIM7BVCqyzQSkWjjItIL6smAMSMQCSmhKmkXAaAg8oUjoKLEG2N0aHDy2CG6m6pIhJpCrqeFbrJnFZqsL1d0Y4G3WONOHC5uA3/vjIqfbCLa2dSzMvP/n6ydUoHpaTSR5352n2oLdjM8lBQ6yok0a1DVqhItQZpakqtnYbqnNpfTMR02i09nvbTISMCujs+c1Th5Kjtx449Y7bvvYXX73R59BErbgsayxUpjQqj8yO/KicdBOzutztDXPIQ03KVtXC4sLB1aOVpWhpdmMn19lMB1LD46XVpb1hcf61Z54L3NTxg2984MorQ1cXTzz92vQtGBWVCARmZkYCLwyAPjACMAvj9IzOLMggChVpwpthIhSFrJCAGAFBCU8DRoggQqIUCchNXTg4YCYgRUwUQIGHWpARYx9EAXi2NlTO1d5a58UFEICIJFFBG02oURR7D9oABqVRNAmJQwQkA6QBEDQ7X4tyzKI40sqQKE2ZosQRTLaoHCbzq9ZWblAsHT+iUDWaM850lABFxloHGHweNEfFxKokK5ZOrdz3iR988Qt33XNH0oji2ZXQDpAp/shHj7z9jZu//Zn0xhAdC6MIgBDgdPLAgsIEIKDgh94IYtSEIBgpyloeIqNBnOFx0buykffqzpvfZQ4cg90b/Sce2/jKtzpjGwuTBo8hIGB9U6KMCgMII6ABDOAYQoBIoU4wxDrElK2upAcOU7eN3QXKMjXuj597pbh02UxG4BGDiIAoYASFisN00AQsgDdhglNPBVAACED7k+KR78pCt0ijOhAmsQ2lQ+9by53bTxd5bkbjwVOvpiNxHlFTwhyefn6mGeX9jd7OZHB9OO738lEfvXgA3e0ki4dVmhRBlGc/zvO9wdbOxtLszLi3vXXtYnNubvmetxiFRiXRbItNCEFIi3iZPncCAECgkBmQUE8T0Zq0IhEBAkUYhBwalJBCDQFCcEpI6RjAAAYdRYYIqRRCIoK/ShUhKaU0GsUVggJRjp0nqcBVIYk9QWUF2TkW0Q7FEpIypCIgJRRKD05U8FHP46j2jdh048gQxgAImOgIfGCAIIEJWWtvCFMjcWKVQqUSwOB8vTf0zsfNdhT8pVfPpAtPzN7VxNlssrnprl3dvDIq9ka8cHTl4Y/0h9tu7bJJ0SjauXRl+Z5+opQocuxsrzd5+tHqylk6vNJwktvaxOXw6PEjf/Nv6GJt6zN/tP/8q0c7aTnqxYhO8d64aCakAkWSiiauXNxoigupjtgGuHax/INf7xw/BNmJI+/5YLXFOF905g5WI0m5MpO90Zd/n199tpsP+3/y76ODd0fK22p3tDe48tXHjkrUbcWkKdROrV0pPvc7S6dOR0cflOGaNAWUgWIiaRYeem/6kZ+G3NqXXqof+a1Y5cAMFUKYLucKvJe6wLQB3oGE4B2pOFgHESkTMzOlGvxk0yYLv/zP/MtP4N5VjIEFbHAIpIqJ0RGpJFS1oCJmd+hOeNMHqvbBZNwwUEGAqBGHsuDKS2Vx0JvsPpGo/TDYQ/A6SdiVNZJKk0g5DWN48g/q3QKh+8Tv/2D5LXecc3tv/NDH1rbs5tWr+WJQiytJFCM34qSLGsgQ4qiq+gkt3dgcB+dA6lZjthiWcX+smQBhV0e3/J1/WOhFAw2KSIq+v/7amW9/5raTM/Vu57K1Z7aKYW+yXzhLiB4iAEIEoxzzVKA13VornDLHIAgL4zTUj4wxRhi8Yw1lRZGJIwVK21prk2jrQ4CmubntH+33DJAOUnres7IxqRLS66PR6TecfPDWuw/OHGm1ZxaPHp5dmD978bVop2g3OvNpu7ixMdjfStLWaJjX7LI7V9/3wXu/+e/+LT7/4r3v+5Hm4nGLYfauw60Hj/LY9UeDSU0H/tufbdRz5//4D9NqKzl2JL774EJDj+ywEJ5cvkKvPmkv7JgsSbvmypnz9W0rjaVbT957997lizQomwMcv/qIp3F2+Kg5dmyf5g2k4cSp5cO3mO0bg2ef7sT28qtr4dyabK3FzdbIFQc63bptdq5f3t61uW6evvfY/f/6//ib/90vA+4cbHWuXvV1zoGDSSgAW+e988pEnj2L3hlW87MtBshrS7UHTTpV7LEoi4WlJsy2+v1xEsfAiDaM9sfj7vjgoWPzeVQV+41mC1GV41GWJiZO4sSQ0sbEHMLGZq/VTJaXV+qaGbxIACCAKXEffPAcQtKIw8RlSfTGh+564itPtJYOXN26yhm12+maTG5UI6Wta0ab6ztNDher8xFZSuT4sQMXz1/TAJo0SJibaTMCKdzNvbcwN9cKBP3CcZRtDgdzjSyN4/GkbM+mGGFdcz4oVGAGBKXElkkc3XaoNd7OPagxsyCkccwM6711ifWgLGLWKlJXdncONVonji125uMzFwebI7vUmP+RD77ly3/55Ru90cLcDMbRcJBfWO+nuvCuvuPAoTedXNwaF99/8XVvC5vFtqoLL5TAQqdrx5PZVnNtbw8ItoelABgMxOHQTJeV3tzcm09nS5sP6grSNipJM40WxlXVbc8XwyGpqNZqcaklImlU7vVr7yGOtDIqSQANmUjSxPQGE/AspImUcACSvzYq0hqtK9NUUU1IyChBQrsdd1qmFl938Pb3P/jqM/+lmzYPJdCNi+cvrl3bDD/2d/95cvrhsLnTHe69/uv/78zWwXKzkSlv10Z8+7/4VfPgm6DdrdJmMJ0GKnnylbC9F4EHhSAYnCgMmkglauDrfVQ+4Jz1zYCxIiKTV04MaWW85yDONEixFg62tCaJSaM4YUIgJJZmE3VD78NoUptsRp16/7vsH3zO7ezyoECKPVDaaJH2UZJJvMjdI5fOf/lAcK0ojiOD7KLe9uAHPaxTlkawVjeN3HF69a6H9n7308BWGwguTGwNBDqJlWJbTEyIojr0Jjpt3t25Py6eXsuferoRKdVpIGlwGgMqIwz6SlRlH3tnvb01+8xLLWadNTWqdn6j/NJvw6PxCUzd50vvR7eoWM0s1oysai/O5cEkEo8sT/rdKLn2a/+2+eqfxYcP5t8601VZSIKXoAJrgn2q4NbbXS64s9174ekDEoAECAkYIgpKPCph1olicCE40CIoSpFCwoCuksapU7J8cvvaRdm83Fi+JWs1Mobijz47+s73Z1CDmRJUGBmJFDAAAkYKXAnsNYHyIVjQBlmZ4ALwlP8MSpmAEjggoLAgICnFLIH9FGqvwcx7B2cvp8AeFGaGgevaAQPntVLRYKtnWg3myWBS1YFrButcGkchyM5gkGRZI0m1cF3VSkV7/VHpbJRFGIBrJ0xJYpgTrSVGDEhRpCKQdtqyNtcYN6l2miW6GTT9r/Gh6Xkbp/2dqdTsZnIeABSCYiZ1E8s/Vb9Pm/beh9qz9cJMjIIMwEwaAYCBp3eq08M8Tpl7AgoRATyDIKEIsidEYQijcPnsRnTQnT7VmTvRGe8Vf7lRfeSnfzEsP7f/zT9tJBMUdyDVvLU2czW3lwa7zzT1bu2DDsjGB8MwqYs4m60aB6u0lT38s4M/+X+ZamKLq+71i0orj2q6VbUgmgUVqyBNBZOn/lDGV1rHFtE4Paj8lbXy2kU7HDUp1Jefk70r+eytPm4Yxhhce7J37YWn2l6HwLZyqEhFLOjJNOqqMFGKwRGAv/pi8crLjRrJGMfMTpAEiQglOIcqAgEhhjgRDogqMMhgjJ5LV1IdsmY87u+adC7sX6f1q/bl/9yJHDofPAuxGJKpT56n90c3czocBKwjBylac+7bzpLmiCoAgJn1/u7/8KnOkipK35hdVAFf398+9iMfCdWCDEaGHPp9LHddbKwqEw3xKKe+jm6YtX/5lAtwTUFsTBzRzBa/9Pf+mwf+3vvH1d7rf/4ke1W39dioMriR8HOXJrutxpWtelRXQRArBiEJrBARlSK8SUUHyNKkYSLLtp/XIqLQMPMP9ccSambWLqDzofaeATSgBwEPwtOSI4YpkIgIhKcYViRUdDMpREoLgANAhXUImdEKJAIgAhRhBPyh8gYAEDiKJYiwddahqmiuLGcwN50GmC64fnX+3Nb61vb+gA90D7XatDwvjSz3lcSx1lSN++X2tc1zL7U6hzqN+TSOt4scItRJygqc8io2kVmwkzG48XB3bRKCYTeoqrTbMQZt6Uk1qly8HaeYG7HBF74eswNho3VaVJPSVCg8GY0jiEW4ZbCqimHPlwwHuhS3snar8ZGf/qnXz58Z+ejG4rGlmdvFrr/y+pVVlY1CLag7hvZ3B3E3VSfaxrY3X72UZZGZibNWFFygpluOq+qlx+XamZA5q/UPrm4/e+zQh97x8cQPrbWLnYzbtPCue3fPvL7g1fq5C7rRvHH0vgN/939Uz36z/uz/pZGDY4kTc/we1LObly+U7/gx+4Y3DJ96TH3+11bnYyyHIBNoNUFBQBIUBGFXKxLNE6W9KyfeEzRWIYlcknHSDFZCXbOjpLmkyw2pC19UYAGY7XCkM60T5Qrm2ulEExpfhmA9k4DRoAhJbHAsQkpHaJj5ZgmIMLAHBlAYkFG8Q44WZtyBmcM//xPhuRvl2WcjriUwxmjmaEg9Jo4NGJ3Hi4pnlZ5rNkdpW0CN/Y3nbzQ/8iOHPvWR/tO/S7gjmamsVSqyLldRreJ0PBiSac4em7O21q4CVPnYuknVLJURJDAaTBxHeenEhZjAT8qFlW7nzqMv7/due+9bbMt//evPRx1lIlQeg7fDUV2znZ1vzDQTDb7dFBdAk77z0G2D4ah2dndzc2dyuRbARO2FHcbsUENdOfvS9tbmKOek2Wkttutheezgkd7axaylrkAPAErvPItnBgCYNqkQkYgAWTETIYISQgESmbrSAEkbRUhE03SHaCQQQFQgYZpOFWFAQVQgSjBMFwoWj0Bqmq0GVEDA0+MMBAQWIjRaByPM7BkQmL0TDxLFsVK6YUyYIgMJEIUBgoh3jhQJESlCInGBWargAgskaCKdaB8JT0aF1IE8G5UyJsdP3pVELQBT1TlgDD5xeUFGaVRa6XpS5t7FcSu754GH5xfXn31k/+LrD7954dIzT87eenC7MUOHTjbf88bBZ7/WABAJgQllmqm96RqQKQ4o3MxYAQhp1BhSKocvPK7mD+PJ48Agg0FU5YUgNNo5+7KZqjc+OHz21Q47zoeRwdoDMChN4JlBmIURNMCUlh1FxpMOCdmZdudNbzJHDtPMbLw4T5oHVdGYW7SjWq0cND94onz0e11D4H2hIDcSHETEWAoFIAAU9A5JCaMQavZBKUIRyNFc7dcbg0oBpHFNnjOtVxfbpw7vTiZRA/HG5VZdIyltFDM2I5Wfv7y3vW6a0FzoTvo5jeu2xrSTVqWvyU+uXkg5gpkZ6wKgitqdeS0hH+xsr127vn7AmKMR23y4X1cvv/q8EgEFAMSBSVBuXgYIAihEESYBRXoqbUWFDMGYCIHYucDeI5DOCJVghAAcaqWUYkQWAHIsPoTYmL/aBREQkialnQ1otGfQBoxhw5yCmACKNAYBQtAgCoUQADj44AMDeg4ZhcrbUrjW5FF1lURKkWWZ3pwJMgIjWR8wokhrDJwmDULNkhf5GBSjSFV6yyHsDF7+wh/NPf7i0j2379zoFedfXivHC7ffc9uHf9ys3nJs9/K1T/8msmua4LfXtp/+UuOe01WSaJemteXLa/r6Dbe1KfmkriakdHbyPhd3d7PV5FffGT7za4Mf/EVssnS1uzGW+X/y/7DF1sbzzy/Hkg2GuLbnizLKjKttazalYly/8iz1LnjVTo3zT/yerXZ0HOOZS8Y7Pxk0dgfxbBcaqXvq6dGXviq2ru1kfml1vhJbjScWMTitNDlp9gb8+HfcE49HSYzLizC3AEmjYE4feK9fOgk1BD8HZ74H2y9AMQEJXJUBvElTAGSVYdaSqlbCqtmgpAE2eBDQIOggWGjF6UOfulGa2UazfXB568paevIOfeRO8b48+xwPh7Fyqhn5ce1Nq547oo48gETh+mtxVMPMHLh2WRU6W5yZ0fDak/lTvx8X+Xi/IN2ASBPVWiGzjw3GrPZfuNQ6fd8z18rOgaPf/c6jJ+4+cv0r3/PxobmDDedqyEzpK7KUNjoGfRCvtIEolnjmrofuHO4N8528dfCQJE2EVIdJMDp0j8HSO3Q0IwFERHlXbl6+/vKrz2/aZy6M+yI974vgPUgjwlgpH6ACCYiRpmnxlgEUoQ88TQ6CgAgEAR8CMnGolCIVNKHCYH0NApREhlSZNpEojn1yc2Dq4PjK/PVef1RjFGlRepyXR5azh+87MdtYPXLy1la7xUU1unZhyfvZXAwnQXWC77347e9k0Oqevouas0GbK5eK+9/zi/UTT5ozN2489+erH3wXHJ6zVVVuD5tabV66omdWQzK7+q6PJtvPhw6X5bB0EaBvIq/ccmT/zBU5cCg7lPVro5eX/bElu3nx4nOPtJNkbvlwPLNKG6O0dHuvvmo6jU0bDXrbc0rNUJUUQ9/rTxrQWeqCYLG7v7+/d/e73ugTuW7zIw+8+T3dOy488bmLf/7rM/d/6g9/6zO/8W9/5cr514wgMQkI+6BhaolDBT5K07ysXSVb28NmGmtUnVbsWK5vj9kqRZgX1hgyqMpJmUXJeHe0k9DlRue+h2+59PyjO+s3ugurWbstwCqKScA6Jg7CkGbxqpnf2926evG1Q0dPOY+BvTIYfBBhVIqYGISFhb24im3Vmsn27ejU/Sf7W72nX7hxvdePZxJfVRt7Iw3m2KHD33vmedbQzKLN9e1uo0lK7e0P2XOrna00GjpC8GXwrtOMVKx6+0VMfHShOx6V+agEpbbzSjQZhSmiAl3UZZyY5bjhevW1ca0Dz87MhVCBIUKYVPm4lm6n1WxHqdFepBWlmaG20S+/dqGVdTtLy4uxfvKxp1ypFlaWNjf2m0kcaWy1dFnal9d2z231l2eTe2498ua7j11c37+21W+3G0PvfVE2gzsyN1MXlfMc0CUKQUKzHeejvJJQFjVFer83jFIiUkk7dq5OGJChHceVHXQMlXmV52G+nZlUL8xmvgqjGiRQxWLidHtQmTHMz2bzM03qSlG4oUfvg63cXxsVjYq6EUXshIXqIM7aUFlruAzVPrhXdjdu0bxxY4966w90KL/wQvn8xXf/+E/jXe8dznQOdBrDL563m9tpu12DcIAKzfG//3fxE79Iy10VhlKHWppUKZdP0PvghUE0komjUFiXRJsmXvrbP9t+3zuHE7f1L/4Vrl8yVU0oSikiIfZkcCC1D5xi0wA2WnFVMQeJTew1UqS5tjYf1tWOW2oRJ90g1feetFfWNWoSFUUpewFvIy12NOROp+wsNE/epy+/Qg7qySSKYjscS6wgBq+4zqj7wTeF29+48cpaq7JSFkqpKI2GJKHZ1FrHlBgCCtLJoH7xjPu1Xw8JHey7RrLowUEAojRN0nJ3oAIOfH707/zo7icf9jG5b31v67f+Yq6w0htTnGSTIhkY73saFFNgcl6ITdqYOTQudsGXMghxzhSknIyOdE3x/ctCl1bTJomuo0gpxuAceVw6NtM6Mhhu5d//3rxzEMSyBwDSygEiNSzEcdL0tqfDGBFRA4U6TGVcrCKTRO1Dg2HDzXWi6F5uHOLc0atPjf/4d2aCJh9EgUYjvjLKoNHB5koLKAXKMOlylDdSjZ69DaSnZCpAFkAIQAqRnSChQvJBFIlCCChg0DNTYFIIATShUBClU0UzHNpp9ML3zoXFe1r5aDQeHpjPhnVtlPKiplHzsg5jAWf9bEwGME6iOIoCwM64zFCJCInBAMp5I+S9csB1DbHWxKJZBCMWmWlneW3TrAlw9b9OiH74NbU0IqCAAOK01zMdHCmFBMgCwOhFAk9/DANjYGS5WV2TKa9Y6KZHBQEBOdw0qrCAV6AIPXMQcRIUkSHtPQcXgEyvX79md1TkTs42JkO/v77/3FMvvO8dD3WS8ZmvfcWVRVn6hQh4rZcAVEUfQdeip/1Ah07lvPONzx8/cTBfvI/uvKvxoU9sfek/p1Y0aScYmNlBPCVMBaYgwWlU2Jjs+Gt/TgkzQF5BZnSLEo8YIslGW1uf+1V8yydmljv5+ef85lrv8uWjOzVWyHGsCaUMUAf2tVt7NeXCsQrkSXOWr3HlgxAy/bCwpxAZp9Rl54gUgS7HpWm2gINhl7/yrcZdd/kshSSEJIn9Eo37cPXZybf/rLW9o3UIQoq0BDv1CAEEFAIMDCiCBOh9oKCBIAy89EEc5CRxV0el9xa6USYbFn06qmpzaHH+kx+sHv5gfMsrg2f+ZRfBXhtG5x4zD3yoiGEyKaPzZ8pxcLXSJvGaSwwSC6Dh4Ge34YVf+0oRsW5C2QytGXJEmyM5X7q1OLnRc6UH4SkJHTmwMBAhAHgWBlZAcawYYVhMAFgJJMYoEIYQgOpagqD3qqxCEGFmIVEgLKIIlZ5ugzAIqGkbUgQBtMJIKRQxihBRBBROiR6CnptGkbBB1AoB0DNYEfT/1XrDHFgCKq1IheAxSYr9dfX0epvHvaG/vldX2VyfI9dc1BiNX7q0UPuZlRkGH/NMuZ5vbl2SkV1u3NWQbO/M9c1qS7WUTiPTiAMz1r4Y9h3EEQnFICq04naY7CftzHFtR2NCNsazY8OW7T7XVpjKnLUkNlDlCx0ptjUSB9WwSWvhtoV2pFdPzA/3Jre84wPn1tZ2L2yaYXHhxWfq/f2Tx26hZqN+9cX6Tz97lGMr9OAvfWTSUerMZdwqN11y509+aPzy+smILl+81vPcAleN6qZplp9/vNF4HotidiE68IEHR37mgz/ykeJ7j6c7L7e5X01CFqe754YmOrhx7mw5sTO3tN/wjg+ce/mVhd39DKoIEj83V6wuw0MfGt+4Gm++wNef7N5zKx046Q69SXibhIM3UTZrQ+6LEj1IAO1B2IPLTVQn4lOxDBvAihotMF1uHc5NE6uR8dco75EiUNqTq4kYMJvvjHojUBCsNZptbaMkUzoK1hGKcw5RJDAqUkohacVenAveCWgAdEEw+FiTsNPdZK+zkPsDrTPD8PLj7W6uoDYUS2quRihvf3DuTQ+Pv/sdef7FZpZa8tRcSFeW8+2temRlRk9sldx5p5vcu/9nj8zOqLpwYoskMa1UFagNqr1qfODh47ffcSipJjpp7W4N1/e2Dx5bWnvmtXIjRyyTWbQTzlDFXkUt011eWjk8fzXneK7TWJntzoaF2Uij2t2qR0MblI6jKPjA4EjjsDcmwkYrKUdlBot7+eZd990zfO7psFelJmkRvu3eu7//5LfHZSFOz822RGerC/MdY7cuXTURxI3m9C0Yl4WIYhClFYDAdOxyU1agiKYcNFCIIKwRUUQh3SwWkyYBRK1QAeC0DTFdIKbASQZBZQIHL4IKCRSJIiQUhxKRALAgkCCDCCFp0nGUCPqbsCQINSuQm0klhaSM1iZCFB+srWtG0TEqtgQRCzOxEJCgAgzeh4BEEDcbaZocPXYSGxlACBzycalrlbRapQ3sLfgJJYqiRBmEUAu7OKIQoC6rEFF89ODxY39r55nv/8XXvrvsgvje4Y98eH2yN3fr6ejg8/78jhYBQmEhEUIRrTiIUsBeIqN8CB55CuY2RDpOo91x9e1Ho7IOOspf+Ibv7SUn7utk82vjHjZRVlbe9iv/4OL/9v+VcizlNDgKnhkUagIvEjMoBlTAMbiEZWG2/cY3z73treX8HLXTyXg88nXWTGaaq8WwUGmcrdw/0+msnX+Vx5WtYLS82Hrotkh8/5U1vbGbVjlbUB4IERhQBIAVEjOGIGRMBOgtUKRj0cqGpB3lHMWzK4GqpLc/fPYHK3G6lxr24IvgKxeaiVtYhuOrfmVWLQ3SyiouNs+exeAziPML5wJnLCEXSNJIGlnS0qPza8Pda+2M9vZ6V89fqBv7rw0GAkEpYc/MQKgROQTPwjjlhAMQESERkVIEKDpSqEAUekEIQMw1BEEnYhFqo6wSDSHWEDx7llqAo8jAD91PSpFCBEYKqFApEPEiTBoxUpIAGE9AqAG8IDBrQHDCEgRBnENBg9LSAhBKCAy6AqYsVmQC1wIAQiRKBAIHUcrXLjNaR+hchUohBI4QY1AW6kIaOjNJGG4Oee/V/YvnEBRUZR/jA3e9pXHrXaT4yOnbriepd/Vwz8JkL+If9A7+dueeO4ds8yefN9fOy2jila4HRaQRsTJrFxrXvzc5+Y7JTHf+x/6Gf+qr2tnJZBjf81DnZ34ZEt11YOw2jLf4yad2vvSlwbNPLMxmPRFGZYzU/Ykvyyh+RdR5LKtxaWMfVKjBsVRuNCoi9OBcIyhl2knc4omLlJi2rlhMktrCNaIMCGUy1oYD1bS7T0QuaaX3vFNO3V/HRqeUNo/CT/0N+L1XYd+BMiCRgA5OVKpEp4pQXJDCqSyS4MSzTmOux6Qh2KKcXcF7P9ztdNrlNUjaMz//L+K3fUgQGFzx7NfKx77L2+vRaB+d58GWP/t4+60/AQlJf/PaU39+5L2fGC/cJTMHAwj0Xq2+82+ivQEjmXgmKBXcWCQnUhiCq4M4Gg755E//6PEP6aXT96z95h/vnnns0qPP9Om1g++8DY8MZudWKJXEK86rerhPqXHok4R8zYudufH5/qyswERD7LUQSj3RcPSnf8XpJQIERaABiDYvXX70sQsu7uwxD2sXCGLEKQHNCIjCRKtahAVckIAgIAHEIwaBOgRiCEGmW/vA7FmYxTmvEEgpJCBtSnZK+SAGKWm4m2sBGyyCJAgTJaXzhfa3P7Byz+kDD73nXe3OyWuvr8t4UKxtml6Z1NUr1x5LO51bPvohTfbkqbsOr95etdt9nBw8fevO2TXZuKYunw+jMN+Y6332d5KjB5cfvGdw4Zqp7QPL6cZLN4pbWj4LdPU10+v79XruHW93kOPu/u43HqeLW2E0zC+rSaN56ld/sXc4cfXRtS8+cis3Rq+cwae+j7sj4WASGuDV2WPJ/ScPV9de7J854yajBkS93dK0Z0fDEVdijL5+Yc2St8sz3Tzs758NfaHxsWc/8/id0Vvef9+Pfq0/Gc8X1Nvb2S48A9cSnNWpSaMkU5EvbeGFgGwVRHhmPm4q6ozd2HtUut8rO+0UgKNYJU2jVNorKtza/Ve/9hu/8kuf4DAejcbNmXYUGRMpUQFJiCCAFEWJDJ1ud+3qlclgvzPTLWouhyPvvXO20er44JBAgqvyvBjlhGGrt724fDIfTerCZiqba4TLOztSF+ikLCYvXj9XxiowziTpwmzr+pVN0JAkWgDGedWKlS04I6qRQLQ2kae69GoyHmaRaqXGIxJIXtWkdbMdW7adTJlEj3Yn7biRF67VznpVNajqiCNxdS21ZqI2Wl/oKPElL0XJLfOd7b3x2MYP3X77+sZmb9AfjCYBVVWFZrux2MwU0v76ThCO280oii8P8rVnz9zeaS10WneemHOiJ8Mi1B6cXFjbyFDPZCnnoZkaH4JQpLJ4ozdM47T27FBcRbNz2Wxi9sdVmU/iKIlVrIyZ7y5ddztV4cY19Xr9I3PtO07NXr0xLhz7cY0SAHReierZTqZnm/FMZKLKYasRKre/P/ivo6JGEhmiUWGZFAOUVpSiJI1jjf3dSdbAExLdd+RuGrmn/9U/d3vbi3WUziy51qErW5dj3r/wjT+558Sy3S+zbkoBxlVoPfSmUWdhMNzn/kaso0ac+Bev7j/9+kFKdGqFdHAOCcZJGn3kXcd/4W9V97zBxzPNko996Pnt3zyzSEgKBCU4DsG4uYPtn//l7VjohWdHTz7etrkxU0w6omchhxR5JB6U0fX9ePPc2u/9zszL5xNQoLUyCJoDBiECdqKYeBfLy92lOdho1XmBjMjCAC63yOK7uv3BN75+y7JEKk21aqeaHBKNBPmjH/EnDxRPPNu6vN6KBMYeAixoo69tBx4H0M6n1G5y8DrSCGV8ANNOaobFM3/2ueTYEt57uk5P5enL7eqKCbURMKhNKeCcoKOEvHOCqAlcua99Tc5xXhtUKiJfATInTEopkGAdOxQwoAnSKIbt4fAf/IKErYZN0gpAYQAAYM9CFPnSGpMRWApVKCagNJgYfEARESDSJobeE39aH20feP/DrteP6uuDL/1J/7nvHwxoEVhhcB4VGh2hiAirpZnq8IJfvXW3zrJjh6tzr/Qe/cayjGMlIkEIUBO4AIjoPRApQBFmUIwgIaAAEnkQRAIUAg9KptvxWiRUEphLDNJoltGqiuPRJD+w2EAABEiMzpJkMhkDiAmAwL3BJI1NrCgE0ZoyovGkQEJttCIz7vcBtSfDGvLKNg0ZTZPcg8KAwURKMJ6M6/+/UNHNJBHglHINOC0/iwBM9fNgEJQAAoQgfkq/limIQZhheg99M4A0BRUHpummnuXmr2XQCoUlADgRFmBSpJAFggAQshOEQIRVWV+8uGeXuvON9lywm0898eXe7js/9PH2aK7/gz+buB3UlLBplnYuTYLgWELNlFsGYxqR0mtrN37zfwi33dO6823a7hZZqqsJoGfR3nkR8kppFIZQSUgiHQS0FwjoJkbp6OYRk8CLSFDGRIvr6/4z/5qCTz1ar0kiiZWLhMWqQEQUPMcE6fnHoqd/V9/34TxTEPL+9/80YQyIPlhSSHHsPJAgSSBEZYi9Fw5kksDkyzzm/PU/+c1bH7gDTr9rGLSxDnYnrTNfvfH1P1xNWgjgK0YMoIg1TsU1wsLAFFgEIUxlzMr7AACMCgJgW40PH7GHk+zpF1KGEEonjA89IG9/S7x4YGPsJYRGdybqztrdskvl+D/+n27+s4snl668tjN8dTTr0SXeKXARsKKSAA01U+0rsSDbWo8T83jNm2XoQdgrZTipLU4VM6IRWBgBCAE1TXcqotAjNA02tGjwURYHpjz3hRdrPSIIoHWhtiDCwMIBEJhYYgPaoFIAIBLABgkMIcjNxxVByZTXCo7DVMwXJBBBTEQgJEHRtF6hOPA0/8c3oVrTDJL2XlXWJgnpOBqjvjiYFJNSVZM0aZXzrSqOClAOyyxUsjuoKaTJcVHBlePR0A364wyaRYAi3yj6vRCZrDmnEzCZQl+V1YQ9EzMS1HXdmclckHHhhwWjUhkJ27yyG4ENGOUrlyaZYxJDwMpa64QJBIk77UbhF3b0kdkjJ2aX5g4crVa2L37r5cEnf/WfXPzGlwb/+Q/Xzq23VGa27aGDP7jyg0fVpWvt+e7CWz7YetenLuxdtdXZgw92Z+uo2LzWe/rxaAL1sflTf+snBt/62u7XzhyeFCl7Ho08ytUb5Q8ef+EN//ifK2Ubg/MHWtXePUcO3vfOjW89uX3h0oFWU3frw3ffli90/9O///+0DswdGwzesrqwtVXP3/vwaLTZe+bbs/2t2VZqX3p873tfE2nOzrZDErsoC1HDsQAhkaAxKk1wWIBnFBUcEZNyBHWlTUx2VPuBbYziA4dVU+mqRiOj/b5tNfZdrJdun11a6Ut9o9uo7zjNW1dnLj5+gCd+MgxllSj2xQSDhihCVADIXhCcQgacYmunvRVCIBd81Mjy1kx857s2H/kaT8bxgdP1u+7Ye/QvlpxcLVX7n/1m77YjwzK4F3bb4YUsDoJ+cOmCtjqbaVqZmLR6/du/Z/BM2mplS9rWGIhYIYMaDHKTNuJE7w/355vxTCfZqaoANHtqpamXBv3esffcef9coxG311+49N1Hn+6vjbFUS53mkbuPv3Lm9ah94NLlG09/75l2EiVCDNRq4bhwSpNnnlSBneumejKuF5Zmdjb260z/zMf+xvzlFy9ffEnGg2aW7vR2jy6tXnntFevs4fnVHgyj+cWzNzZW88l4f3046FOs20dmpi9BEMbpSRaFCEkr1hoIWUQR0TRpiiQgyIGmvWQB4ICIKEyoiJEAABgAmSWwsEx9aAgsgESkFZnpvAmDBPEBgyJFoJGAxXvvAnsEIFCRVgBKC9hQuxoQwAepbcBIa6XIaNHEIsxOAktgAiRU3gaVoMSJGPZFJd5PvWQRYqhDljVXDt4WnFBZ6ixrtttFOc5d1THtJsXDwdakvBitPug5C9ZrwlYz8Q5sHXSEKsLxCA694f2uJ9uPfmUB4/zaVZM1BcXH010lBu8piDNQZMDduLm4OsrrNIvq0iWNLE8ibDSU0vm1y6qoUi9q97J8fwuNdlevCuihf11dvORmkroOaXd2kCbJvbfb8RXMg2JwzIDEgCSiBNCD01DOwcyp7k5tDn/4k+n9b5g0Mk/gXBGETZwpnVQVYdJRRL62br7bvv+28vVzuLA0944PjA8sk4rn3mqTtdc3/vKrZlL7vE60Zm8JFSAyiWVQmDAab6ugEvFiXe3FeUeG670nv8n5EMvc9Ab7VE+UBHZpygVXvLp65Jd+plxaikihrYdFvXv5/PqN64dUpFLjxoPRtZcXDzedZE4iE+k44asXX9u8uteZ7TiSs2fW9/TGljh7cxvCqBBYWMSjMIgSQiJhRlQMGBhRaVKKARVCUFogQjAReERf+4KUCiyBQ2YyjYbAewk+BDUdZ8rN0gEHEYTYqADoPLAgckSQKNSJAcLAgXzNoBRrlAAk6Gsn4SZFNQS0YJiUaAuBSKlGlooiUZpiBPaOmYBY2DEECFGEtqxZBMCjQeQQRWih1qDSmU45Kqmu4naU51XGeqYR175qthdbt5wuJIq57N52cvmuWzavvwRBlPO4uX/9d/5TfaBBrHAzX1K6LFxOCl2okBqRSYb5+d/99KG/23L2jnr9Yqi9ITXq9YMdWxAWAK4MJYU6kL3rp2fe8uN88UWc3ODr4+tf+OzB0bqWEKdJudlrNDIpS3JOrHPOOWatjcstKjaEFCGjldozBzYYvI1brWpSonDwQGOniK0NQLFUA5IAjdrlo8iFUDtIEp9X0cgaSCBD8DUaEI4YQSyzOAgBGYMQecCGQhYWZm9V2lIqc/GRmfGN4SP/ExNYPkz3f7TIEvSBnI4Pv731iTfUz32j9+0/WNBVpNltnO198d+4U29vdbpHPvQLuVmAQI3JkF74y83P/y+LDvPcg27YyKlmZkxVlxYxMkAhKFYqZv3V//s37/wf/2m+lr3hTT/3xNPfPP3G4999bWtSDMYbcXz18l1vvHc8qqEIdW79eOKMHDg2D3v51XOvrWaH8mvb6fFEdWKuJ9BI4/veIcfvATBiLUDQKgtif/D4s3ppaWN3XDomkEhACcQISOhRMaITiUA5Zk8oACziWFgBi0ROPIhjsY6nF3WBIMhNwpewRxBywSjFmo1JJVhvq5vXBq7GMvciKomLyl+4vvkj7z49A83JpvaD4XJ3ZbbVurRlqSO6LnTW3718PnmpcfTh96rGof7QzanR8OorV59/zOTZ8JXXD6Spm0zGRR3mMcmabn3U3J5MXn31+mCHl7sULS/dcwyjrt3aq6/8YPLEk8GVi8eOt8vW1vq+ik09HKYLsPXUK9J4cMB023s+hk+9Xl19nPq7UluHSfeONx0/+cGN5x/Z/cbXqv4GRjIhWx04ot54f3T76YOdtr34cn5xzU7y7qHlG/sb1154Unm7fHBhPj109OHWpY2/fP9Hf3y0fvyRZ56cnzH7mz44SjU14oYNkFdcSh6YY4Uzrc5wOKZE90eTZqSbWZyPPYuEwHlRkiJjot3N/snbD3zqZ358Nj783Pcv7mxtBDecO0C2nA/Ox0kcXMiLWhmdNptxouvCisDqoQOvvPzsLbfe5kVHcZy1Min0ZJJPJv0sTaYVjROn73z5+efe8/Z3+KK42rv69vfd+52vn1u/cXVeu9qwNNI4iV9dvxDFWW39xmDSm0waBEVVt9Koci7Pg55pgodOK2s3squ7e5rcXLNxvZd7Fna+lUbjovbeYRAg2B9PSl8tZ+2uTmYWovHE9j1PxgWJ16iqkK90O/1xmM0yx35U2zwEnLijs+1QFiZLxTQfff1SJu7AQiYGy/3h3t4gSswQqdts3LI6f+HGDtlaEbQjXTjenNTbhT1wsAMcjq508v4IiQF0ZhISWJxptzrdG5tbdc+WgkanW7klkrlm5iuuJpJ2o3pYZiapEO6549gLL53dmmwnRiUaKi9x1h6VXFWTRlsvzcfdUby3VU1qlkihMWWQSmihnRXljmF0xH8tVcSeK18146hXlYVlYQYSR5hknVaDP/d7fwBZ9I633f2Z3/jG8cWVS89fmE3Vo3/8W594/ycfXDhSv7A2evnyqElCKOzEWhbuvfjs3D0frhrtfCnjuG23h/7Jr7eMn4xsmhEHiZO4BL34yZ/in/qb/rYjksXkK3aSLHVNotixswE0am0EGWY73Td8uHs4hfe9R/Y3Jn/+5Z2vf7OZ5xIQMJAwRSFCgrPni3/xv1ab/e7GniFxglK4yEDRTtK3vZWrUL/0fFQNqSrjtfOmO4NBwETWeUZSqc4k2MoWo8o0j+Jse29QtnUjSVLMR+StXjqQveHd19su3Z+Ya9fF18FNnYzK56WJGGMTkNlWqMhjOXTjxVtWxzkETo8MYPC//M7QbUWOjqSrkWJOooCMWgIEUhwAva0VAqKXssZ6lEaJKJXOJrasQ2VFLDuIG5loBqy0McyklcYQgvdkQ7f2hJpRmDAwo0JSxnoxiGlwoeoBBZZaIfkAZAAQUREBK2Soy5nAzS/+Yf3F35aSLKqZ2iKClQqCQ4VamIAVIQMV7Ub6kfclP/ajPm6uslFzC1oTfOb3r/9vv7bKOQkyCsQKYoLA6BGCFwRE8iGgVuABWTQEDFNtDAkBCCMRMhtC0shYr4udf9Op5hsOn7l2KW1mde289aAjQRjnlTHxaJLPt9quct6GScBeEKXCkYWs1UpoUE1qropQ2ryZzFRl4QXjbrSQpcg1MydJXFvrKvbWOhdc6X9YvhFEQLoZ05jKkqfToim9CBEUgiFRBADIAmGKB0DQN3nBDAAkCCCIKCQ0DRBN8d44LRwJ4ZQNQwEAGZCQAUWIOQgEEVEKYcobDNgb1QF69Xwa1bbTxtdfePH6/vD4yoO3vfcnX/zGH7TEXe8XS4m2BK1Ye89V4Y2mXecB8VDadjcG5cbL/tmzLnEJZmWQyhELJkgBkNg5X9PKnD95qho5/9JrLa4ZgD2Fso5TDVoCSwjEjMxIjnUlKLoOOogSPcWyooKAWspUMxg7LHVdDX/vt+Fbj2QrWb650eyzkcgKkNIqeGBRJEwigh5ECZMI+6Bj8CI6JZf700fMuf/wK0s/+z9FtzwYdja3//S3ff/S4Wa3Gk0MWUHWWnl2wA4wIBDcHMUxCgl7YgQkVsajhzkdp1CMq1ivOd+uNGgEVFwmoO9eLe85aamJw1I14yTtOMJ6LM044gHYc9X6Dy66AFGsesSQUAlSBZ8ZBK13rd1lLI3eT9Nv9sPrA7+nk726Egk1o3gBJC9Cgg4ECZFIEAILYjBGIqMiQmU91eBFgsex48pJCAg8PXgyT7MIIloBEhKZKd1p+j2tSGuIlACDCAhPGVgoMg0xgQAQEhKBBE2gFaBMy29agD2IIFrnhYi0RnVzVETAmiQxhL4iABUl9awZLS4Cz/e287SVWiRX1waVNiR16G0Nd2Z6WTct3CSNZxY6B3Xg4db6YGeH2M8fWzIJW1+GQgkiqVQ8cuXKYhjKfDjOfTDBi8vZUOBkpNAqQEEtEBMlVRWCiBVwoU6anUhgPBnnw0k9KnU8vudH7j18z90bly4/89lP26s3iFa/fPkHZnj9gc7czizPrWblxuip3/8LmkxScmWg9fPPzz8zv3zXbWej6MKIysf+5BPLqmUaV3Zu9GYX4rSTvff9D7zt/Vt//sX2aOxH1dWdYutg+65/9g8e+ebLd3bxBMKLj5+9tnz09nc11u8/ltxy8MIXPnfHXHpuY1/f+1PH/smP3fWuN2z91r/Zffm5xYMzgwsvdNQk5GK3RwURcDzXWvZaBxY3KpKmJl9Xm1umEUWE7GtIgw6Op3gcT8ExMnJNbIOiQMgNU/q9LZ0Ya6i/fHj7loMz7/yAXlxOjDKPfn3/67+79P6fzd728fGll4OeqP4uTUb9nQ0/nnSaqi5qQAFiHxgcoNIMAQiVjkgAJThm0uQqX1cBqQPFzu0HePNKod7yqfyhtxTR0Z0Xn0+i5QYtDC/+wL36rcOXn26vpnlZxmnqCxn3CqVVjY6adLARhi8/c+iO+Qn6keVmY7Zw4KxNojRIPFbtnWJr/OSrraLoHJpPGsYWhU507aOy1p2lld6ID9/9wIcPHb56Zf3bjzzePriQN+pTdx9TMP/tT3+nfyOPIj3su2YXTSM6de+hl167IcpkqamFMDGN1DllFpZmdjfyP/vWI3ffeqSZLKYymDs4P6ebJ4+devnJrxikucWuZVkv+7ffc6jsT/qD/aSRKKLrG9s3lwIUAQES0miMAaWQEJVCQCJUQICASAAshFOkMokgMAFrdooDIU8bytNJNgKLeGDHDBKQtBAGQwGJhYUJWWlBLUJKPIAXUR6dB0+AzCiIBBJHSnmjA1gnIYh3wSqKFYpCJlUHZz2HICoQO4JYKxMxkVPARik0WCIKMYZpuIwgnm12ZFKFUZ203Xj7apI1udGuxdvRhIvi/HP/5dT9zcaJu6so82KnZUeuSidGWfGFG+t248gtd3zsk3PtbO3ChZU729uP/IWs7WgBZKTAeQLJO9+48vaHsNsVHc3GhhWWg6EVWL7jjspjcKPWZHv78385uz3QO2Plysn+RAl6y+7GjcHGtcYdD2dEUZT0dzcbp0/F/ZfLS9dN3yqCwODrm3EsSyB3Hjn0Ez85duXxhYPm1pOhmY7Ho26U2vHIYKPRbLO3Ns9JJ82FOcvjmom7CwN6ce7I4fapEx4h0nGAHFZWFj/0znI0yVAxYJKlrq7ZWVbc6HTLcU0Qt1OsiaL9frGzUxf5uPBcyuTKZTUoCVkiGw4k6Z0n3G6v3Onl6Odvv9V1ZzGOUUGoKlTmwKlbGu6h5/7wa3ZUyXCA5T6/UJg4rXTz9Rujce96tb6XiBnvj9VSYrWdOBKNYeqXVQQAQoyMUzafCLPwtE/sgJiUBEQWFdiDVmgiTZmJYmAOIXj2oplZIVgk0pFh54NjAmdrTUr/0HojghzAKwgCtSd0daqosihMTa3ReQTCEDvvPDMYAeeUIhekYvCkOXBAAA1gCBA0YOxtO02MRmTDNSGJd1yHwESehR3HSnMQ9J4QOVTCrFBVteQGbrvnzjmIti9vDzZ7lQu9/n5KlJKeVVkqHBGJTrKVg1w/U3lSgXWMMcvofK+lsU0mIAOSY5+mqmapg88q6ZSF++xvt5ZW6rVBGmWoJcV0srMeDbdDd5GDq3a30u4SpCbiYvXetwsESTszH/rpV3/09Kq1hnMy2Lu2o5DImGrsgoNYE+CEQHSUQRA/smnTOPY2t8qomn1KcTM2LtiyKBWgYoziFLLMF0OoJkZj/fTX3NxC+6O/UGBUk1iOVUVt52A8CmQoi4QjqgvtAwQHDCozVQg8dlkzoUZEGXIIVNlsZ2P4G/+wMxNCZyV54/G8mQSBDBWO9l2x6Zur0bs/1Ugb63/0b9qVR8XKD5pvf1sR9qoSZ3UDvv1v9r//CN3Y7oKqwRMZNARGJEl8bw+ZEQIjOpbYAFBYrXrX/+Df3v62nzr/+Iuf/JV//NRrl/IrjylnD60uOFeev3I1Vpnx/tQtx3euXRua+mjHFGd25w/MsZovX7+cNZMqeKgGZQXJ0bdJ1tRKiRhQwsD9nasvv/q6d0GDNCO8GaQLjACiyLEERCcSgFwQJxgEGFAze2ZAMoR1CJpFIXoW8EICBOAQmKeTUQwsAIEF6qqM44r9Td1Ns9HSibHB2TqY1Pzkpz58fD5eoObwzNqFy4+duO2YHDg8s7gwd+J4QtB+7fDuxWMLty0P9vs76xe6W5s4rOy1vkhpspXl+fnk+GJ9xNS33bb8lju1TvOL4+LJtcxlM4sn5W336ZWDwy9/M3npUuaM5LXb4WYzDcXaftD6xB3zt8xNrlxNfb7951/rFD0WZpeYi1fT/X0SEJBquG/Pfq/uvZYVLi4nLlQ7S+nsL/9S6/gdqGPwxPuXy0svxePhzMwipUvHHlzaGve7xw4U/f7+1fWJLUjbV77zp1RsH55rV51W9/7ZK2v9ybhSxuwNC+tqHxwwaoS9wTgxOgRpt9qpr2tQITASemT0IMAOAiUqMTQTJyeWW/f9wgci5Srny9IPBkW/12t2eolJjFE6UlGkjY4AsNlpqMgsHly1tppfOgREzkkcZ1GUzSzOTXqDYjSs6/D6pSut9lw56t+4stWcW7rw6vWr6zvNdmtmdv7s+Us3hnuTwd7S8vKlSxugUABlGj5B7LQbg2EJmovA3TSe5KULrtUwSiQvCi2ODDoJu1WtDZEyc63M5S7LmnUxHkqYjAdt1nluo0bsCXwFKB5slUaLqtUI7Ma1zTrNclTPN5LjK7Oxjs9t9i/vDBTQYhSb1USD08rXzHlu+yObDcq7js2fOrZ67sZWUZXeSquRLB7o9ibV9dzt7w+MbC+1m3G7JQ0zAd68MYhiE+X1XYeWG2l84fpmMORFfBDtoSpsCHautTpcWt7cm4wq2Z4U3ZlWWTgl/p6ji+e2e6VVklcTB1ZR1bd17hRCI1MTKxMRI1QPCxXk4NyceLexO/5royJSpJRGxEhRwV5r1ISu8rfec1AtdNdeuvCUVNG9d1yDGE68/c7G7N73H7lV88X//e/d8+GPXvjzb9yeRW1NEin0wQDONrD/B//qiT/5v+786R9r3/fOZO6O3p985up/+Pd3tFqcIMW6zq0AoDFw4ISPksGNfb+/P9udS7fHl77wxSYaEYsKXeUpVcaobsqvfeY36O6Dx378o5PsSOtnfmFybVc9/b22IkQSwprBKFKjIhnlVIECw86qTJNTlr1/43v5p34+XL2CW+vhzFYapxgKLmr0lkG1FhcKZ2tfKmGEKsqHF/7z77b+9t9Klm5dOtLZo6/mYxuzLdRmdebF9A13VRNxG/2FxECqUEiEIVFeG4+k0pizVj3fzd7/4G65df7xZ455rRNozzY7g5qxrVIQKera+WB1Iyt9jc5rjdoYUqQUIWhnxWhNiIFdiNhGY5NJkgdXKwZnS2sS0FGshSR4ZK76uSJSBIAgnn0IpCEIcHBGEdueZtSoRMSjirIG+MDeiQgoEoaqqhJUpFU0GEbBgtYgEYiAYRs8Eoug1kTC7KQ4eDj65AfU2x+qogSMh9iMpXSVzD/8YDXT4u0hASGCr2qdEcSEWoFjri0CaoXee0I1NcWgAEBgVALTo4tSIMRYVxXO8Bv+/huf25hZXmh94XM/aGUxI2pjHOhJ5Uelb2SJrcSQC5bZctbR5GWU15v90G00ssgolrGtSClklyiFpKUOpFGBrnxtFQtiI42KygvLX5kO1LSuI0CIJDfZ1oA4VSmAAAFqBK1wCiRGBE2IJIpAKxQRo0TDNK11czQAPM0jgfCUfSTEAlNyNiLdZO6JME+NaV4EULEw3oQNiCC6OuzsThJNnsqZZiMbbJ8785mN4wduO/1+t3etkQzywXA7z5c01QotoHMhM9QkvTEoG7HEWXP/8n46n44i7z0oweCDVgBGTETBgc3m6lvfqwRjlbrHH+UAolCASrip/jFm+oipytpYGesCGOIQHAf0mOmYCQattv7Ae91+b/ylr8w7Urkvti5FXcwwUJICRag8EmFgJGSEELyYmIh87TUyYESifVUW5IOPGkodc8Xer//TgqDb7R6zptFOMVIKHFVDMRLkZpN8Gs5CIQlMAkEQUCvNrq4RCZWEmCAznSFUZ/Li9Vwq8ALAEDGE578dHZwrspUWKTVZy7/2dbM2qgrYdlgWoZHFsbDE4A2WgWxAUSYOREUIURVHMCK40sleMfqZcb1duLosfUAFogwJCYNohTiVtyLa6ROlMSHOFGSI4KSosbLshUSFMC2QMStEAQYUTegZpoMhAAKRIOAY3HTKKqBp6vsARcjMAKgIBYAAIqUEwAZWIIjgGTlMqbKiFAMwCyAAKYMKRU3TbtP/J7PzoClKYgiuHUk36ly2vmWipMNVWZY2n0aY2ILikDaSjWvXO6Pm7MoMWevynXywY6vxwvKMTmPMwGNJkUadBC9KxTqJJsUeKakVDKwDMBqhOZsAs2UJgRppCzgubB28j6IYjQrWIYjzdVl5AS2igZQqd69/599vvhJjckvn6DvifHLnHbdycrWo2usvvlZT9PzZ85HENuqsHGwe7sy8fmE74b4684yp69Ot+dXjJ93Gqf6z35Iq4YXF+/7OP9w70n3+9fPXm+7Q7Yd633qsM7O4pzq3/eLPtxePfvKdC8UPvv76N55IOoce/m9+fbSSzcw/MNddaWYrm4/+kaiZlm8utw/1n3hp8OzLB/p17ofQz7kBTiujG8yAaGpniYwwJnFc90aqE3dmk6p2Kk4qlP3azXifOCtebF0YhQJCaRJAkQKuSxCYMOzuh7k3v739jg/IldfV2W+W375IiOq5Zw/VtvzKH8ijX0knfUWhqOLLM29s/9Tfc9/4gn79O5o9e8vIGMUgDDpmBpLAgSUEEEEgH4IyijRDr2e//5fKF4c7yf63/ml4rTM/ur6EDaho+Cv/7lCAZBYcgnRSb5ULTrejrklJfGKy8dAJ+FYetr/Z4wpZJTYPgYitZ9Fjrf/4uWuvVcy7+blnnzlwbHbhyHKj3Th876HVhflsqU0lUhQpqWeS5vKbTt9+18mGxK7CvavXv/nlz+ys9cX7Vqd9aLF78frFBE0/jLNY1ZXF2i/NZOw4CO3tTWbm4tjWF8/8oBpee9vDD7dFylDUmfreM9/KB+Nmq3P18m5tLbEdXN6lkB8+OLc3GiRZK8WbeQqlFZICRVorRQhESAqQpmkdRMKpD1MACd30vRVPAMAsYUpBJsAAgBz89M5B2Af2IoBAGADEB/GKAhAhA4swYBC2zOwYRYdgrbMACIGINBDC1HNuSIEHEWVQG6W0UgodMbKweBIxpAyqSGullSdQCAigjUInbJlArC1Tkxjxo7VrK4vR5rU1x3tsd07c/dayTkCZWEtrcfH4/G0Xv/CZ4+97T3bX27ndznOHLmhAsJyP95XITm+3m5FXC9eVad56/PL3v779X15dAq0dGJYEYTRjTvzUzxUH58ejXVvn7VYrLypaXolajarT8GU9qZT17cbHftTsrI++9g3ZG3MFWYBE4aQq52ebRZL53lDbXr63szXMW6fvmrnnpHz9u3a7doFRgWdIAEqApQdOV0ePD66/wqrfVSVRYpBCXnId4kZEOhUVZ1EjSCiqwnvfWZ4b3nOHhnF07La88r62aTdB9JOqwPklXj1SQhQ3U4ugFdf7O8QCjTZOfO29acWkVJU05eQtzQOzxauXr/7xI2a/SFiVimw3W3zPO6P7H8wmE//M0/vPv5idOIxK1eOSE8AQVB36441zL13EeEZ3YfFQ1uuXo3PrRW80GIVRCWgUcEg7kUqiSvFY+aCi4KCoGRllWn/HMCUTITOHABgESSklhEEEmCUwaUgTk6VxM46VaKMboI0Wthx8yJltFYIRI75m8Q4tCjPSX1Ebg/NKKecqdhV6VtoI+gSzqnaGhaxYHwChJq7ZI5JnB1o7LaA1AAOBiUiAU1ZSiVYhEqu9ItDTJUejKKU8u6DICbNwaW2kwHhAJEQEDojB1lW72ZqdX7nl5ENzd4Mrxvn6tc2vfjGtCxkNBs882j56kFNgpbrHDjUSlJy94KjiZqJsDq021eKIyYuoGAkhBYgMApLi2J7fptc3IWgnoMSooLrjUfGd/6Ie+rjqduPuHKC4vaum27x+9nvtE3dFjZanOm21zGQQa3Jc60Tb2qETQaEUm22NqMtGEs0s8Y1dXZTiHUhodDN2oWkyl4eiX8QrS533f9xdfUZdOgeAUgYJAhSDhHi0A3/x78Njn9W3PDhpzabtBZ0tQH1FOGZCZwMFqz1QcJBq8LAbCN/wztbqid1nvr6gBoAlZU0oclOWVOaQgvjx5OwLzfE4tynIpt8627tybv6hH+d0xrzzU/lzX4/Pf08Vxf7Ljx3e240WOqHY9F/4l/jqX7RrYZXVtUURUDVoCqYVhLtNVZSaPAKjUgZYkkTiFt727tN6Ud7+M+8crp48ecdH7vr4f3vp6f/06rPPmsne1e1tk3TarWS5AeXetfN7Z+48+O52s9WfW5x/46dij/loJ5lLiHS1m4fV2RSYWAI7ZCasfvBH/7FB2Pceg+iASiGCkCIOYgH0zc9iJEBFpAQtCwMAYmx0EAkIkUYvUAb2IN6T91z7gIABRaZ8S8Sb2yBhZ2tvb7YNGmlzb5T72mYpxBJONZZmy7B19Urpro3XetfWNsLde/Pv+5hPzM5kXM91Dx94eCQT5OquD70l6/ny//wT7rulY6vjfqn729XFsZ2bmXnDT6xVha6G7flk6f6jdG5vHHef97SyOYQnnzmqU9NM0vZyzE58XdNEv/dt/eN3FNqDxO0XXjqUpeG556Q3Hm8XysS+KKUZxZ008MjbQbg8iBstZQIk6fi+d8M97029g9ee4SeenCmHyfmr5bifdq5d3vjWob/x8ZW773ULdzaXGqfuLqv1F33eq65c7qy7d771JC/Pf+ORl+3+lTRK+oPce08KgIFZgkJbVY4jDuxvDBMl7EIakxfRiWFA8UII3XZmJFBdjXt75bhcPLwiKppfmp2dZ3tokcVPBoUyemZuLjgHKCBUl/X+9V2ioFuqrsqs2VCRJlRlMent9Pp7+5fOvlwHPHHXfVXFWkUrR45AJqPJVnels3l57+prl9ut5mi7TrImk0QJTbEUMVCkQVDKskLmCOnG3pA72UwnnYxt2kx0onlURYSdTmaD3+6PVhYPDHo9V5bgQ6qTTpaWRV3W9aF2677jR75z/kJZUSfVbaWTuDEYj4K3WWoM6WFhyfnDRw7Oz80/8dLrl3bHEKs0i8bjen27n6BeWZq9lpeTwlNEVvyljZ1jy4vHZ2eu7w5AkxPfn4wb2hyYndly3EpodWn2pbXB9nDUzogh7A+tAnrZ+hMH5pZmmrkLg3FVOV/3XBKZrKG2d3cXFmd741FX6eF+P5QOCOI0Hownq52G8zQx0htzXbEi5WoQlLmFJJmEUe4jbWKh4aiaMwYDLnbTvzYqUpEm1KNhJcFnicpLx0QF2qvbN1pzHSWhE8Peje3x3MKxn/nb8/3LX3786+2xMWcvXbz26baLFrLMVxOViJ1UCkkrlVXh9txNfvN3t4pPIyWzAnfMzLqyBqVsHbLEiLcRV5PvfxUfuK1YWNHZSj22yeaeXr8BeRChQDqKSYRq6zFqHPvQp4Z3tLmbceHGq607fvw95771xUbaVlp7NMGJipTWhsraxCqIIjHeScrM3UbjI+9bb7jmQtadP4bmchTHrE2oAjXSAGSaBoZIIiROp+IL393Zy//Dp7Exf81yc7RjhJVWNBzvfOb3F/bf3hkkabSgeKQ0ukAEyAgA4vKCYqjT6MDPvm/zLaf2rV05cXj8m38Sb+1HcStWaFFUklSljeO0k86MR2UcxR6c0uh8FVMUgUbPEoSI0RZxgrUrt1sKPS4RR2kUKqXjBityPijDAlaC14lChc474UBxRE4pYgQSF4idIg4edZoihygA5aVyDDriIMyexRulnWeqLEgwBkF5jyCA4muSoDQFRlQA3ttUjT/8gcZHPsYyGJ+/uPDA3aHVzSsmE3HSmF3owvY6oFDWFGehFBgzRAJRjOA53ARUCQsCBGZERQgKGICABVGmlPs4jZ/YK9794V+Dx783v7x0/crFaE6zl0xHYtKtUV8QjZdGknJtdaTJGOcCKeWD9MYyGOYHumknjeqCm410WNZoyLmgo1ipSAW30GlULMGHunS2Yo1a3wzMAwIoIsKpqHEqcAEPIAI3PbTABDQ1kStCQCFETaAQtKLp+dsATa8pUCki+qFSDQIzy/SWmZEQBMKUKxxAhKZHCRJC5pt1apEAQkSBg3NiUBdIflwUjmdac6rZzof+0RdeMlj85Ic+eGR1buPS2e0zL4BhnYbJOOQVd5p+8dZ2YeLxRLWOLbuiqqoaUNLIELG3TkWmqoKpgfZ202ov77Rn77jNPvV4UbrIiIth9p4DtQo8Kc3mCEpnIu0UiQdCDIAGFHMFSSKKx67oQWvm4P10gNy57cnZ55LSNlpta6usEztwpIWD1TpCDkqRAGmdOoYAYgwx6bQ1l2/u6hrZO2EVSgy1WkiT2ta6MqqzWrU61fhGGmolIQCyd4AaQRkQ9oyiREgYQQkQhSCx0dba4GBfryxVseyWzGAwzYuJSigwYw3qhUF9/dNH3nwaQjW+XvILe77vqjQaR15nOrc12MCArhJlDDmp6jIB9IFjDYduX7igzZ8/v3+mroJQYRkse4GgQBPyD605apoCUqRAtMIkhhRI1VDkzAFdUCKkFXofkphumqUBgqD3wizsAwp5DyIchKcjVg0AhILoWKZdFgw4pasjBEBEhCgEFCAQKwxEQWnLRBIAURxPRz0KCSBoDVOg7l/lKUwcBe+4Dioglf7I8uw1Jb1qdzKqmwRGEJi9YECUAC5nAqfF3n7HgXHv+t7WejNNmkvznEYcs25lnkUpFaUxVaXnQWnD2JUh1GK0Wp4F1bDDvCLkuibUqdGiUBO0KLHBq8gEZbqNpneFFGUShTqorBEXxUSSoHyFk5ZbOXH7/Z+89678S//rL2PL+9hEysSLS9Ra7Ffueu5X7jx+9olnkoYsrWQHknD2619bPnGbfvXlrVee82NQscmzlmstUCu7481vP3BwVgbXL/3RX/q0/cH//udfO/tcsXV+59uvNHv7x3RjtLP58j/6uZXTd8t+//KNgVy/ccuxRetGvU///r78XitVd3WbCTarkYtMS5BBgldGN1JUFMoxAynhUFXY6rSOn6jPPq2RAsWje+63990bHvs8XzqrfKhGBhRrVwXviYytHTS6VxqHGj/x9+dhQk89av/drzYuXU8a8UynMao08qyTgGMdV/sccmgbuetY9z2fSE7d1j03i6/UmpTXGYdAAqB8kBrRIaCIEgigEAHZszKkoswVnCQZ+6jOdRS4Pr9hslDKCCkxEIPCojIc26qUZKlVa1eSRy4NR95pEE0Acdpy4xyRAkRAKlJUeMeot5y64MPEYyaKfbK/abfWrnrg073y5Gnu89bKkVPzC93heMNaSRPTNnGsW87JKMEJNfRMvXTMTAbu3Pb14GUhnh+PtsRJhIx5nWa6Cpim6UK3e6CV7G72I4Pnrtw4v/mNdx0/HJF+7doaU96cXWw32hKkrMuIINRV1ohRUVEMg7Wl/aEHMNKIKIDTIaoIaNQgRAgSBEkQGARIOLAIohdQASAEJUSMCCgoHl0AQZQg4kW8BEZQOA2BeO9dQA/AioBA2DshHab+A8ba2rKuPQshJEjIHhC00oyKNRlNatqziJUHRIHgAwQfIYlCFYCIpssWkVKIEQIEa4wKZAK6gOgAKwhIih2mKhnu5RTG2xevLLxx2QZfR6DYS9oZXN9/+QtfOrx27daPfWIUL3qmOM1CaXVkelcu6qTVWDpYAkSGDqDb+Pz5TqS9M6ahufKjmuff9OZBuxOM0VmTk0w3ZjT2BaNsdnFSOVcWMeu0MQdxrWfTenDD7O1WT121V6tIgjI8PPNY59Th/Pp2/trT4ofH3/ruoURzC3Hv+4+Bh4bRnhSxsPOx4e1Xz516708lR077epJKAhDV/Q0XZGZhXowpsfKMWSsb93qNOLZl1SvLMm4d+7G/Odob6yybjarBtbOD/kApNXvL7c3ZmdqJSpNyPDLEaXtOPHvr0hmdkpKqJiUqojJucaQpz5N6rCJwKHUrPvnRd/pb76jiBrCi5aXG7Qs7k9HspFLNtq/KTrubhkLKSVtFw7pudbvNVrZ3Y+x6oRihlKqltBdmgqISx6G1ko5C7WsfvMZquioIIgChB8cwjSijMCIhK2BgUEyIokE0cqSV1qnSRjeIyJNDQAm1sABbEQhSY4AQPKPXWmGAEG6iTNlJCAFFuAq2DlphkiTOQgJGSQi1V0Q1s0NiMByYWYJwAHISMCJFJJoRKFIRxUqBbyQap6YqYNSeImCnItAOJEJi5KCQBR0H60KkiYFc8KFyUEwisXGczK2uBAVLd5zAfO3y578+S7z2zT/t3nu6e+eRPR4s33fHeWYFZBCDiGZqNiOOCkkocIRVUBLsyGmlopauQlCoNCgJYBAYMZABSBourH/+08fveaBvd9PFJaMz1ZiR3C0eOW2pzaMi2d5YiFB7FSYeldfJFDWAcRY1Z5q7++vpyhE6eavSoHYvVhsTUEnSTUv2ZL2qJTUGqIbNC/WVxrC1KvmLSw32laE44QqwrBEZilLtDNX14Syjnp2HVldGgCrWCrFyJJ5UBKqCZnKtXlz+R/9B33JCMAmzp7a++/vLw+cZ9oQi1Wk6qoO3zk/6Lz8vT/8l3/WuYnJ1fmZ1aeY4ddsceaz17T/3j8//w8cWVXxsqctf+z/SO4/tfe1PGxtbFCc2BGd9UBgnSjT3wJvVu1Ldqa+doyhErAGEY5XGem80zE6dDPe/c7BfdLsJzM31isFcnHVn7zXDRzqr6WZVbJZuEWYvXLy+vDI7o49MhoPitYt2vVqL9ubVQcyLsprQogqukSyeFsHAjFpTqMrrL25cfA2gVhI0KqUACJ0LjIJARqMGoACCFAADIgoCqcr56Z20AkREjaRYlCYHHJSyFCJFLFLZ4BgcAAuqqeHSOwGv1c21IGrHB1a79Xjw3//tT2Sb9fC13au79p6PvI2S2TQ0tK0u33iFgYpq5NhBI+a52W5zcbCzfX39Ol646sZby3Pp1vXrRqUalRmM3ZU9++gr5u3H3FJc9Cp79kV68aX89PFjf/OXJs+fve3dby8f+RqJq32z1Z7fu3KldctsurCErSxZmmkcu2/AoffY9zKeiISIDCtsrCxW1WR/fzwcFbMr87FEo81eMJKLLJc8Py77X/7s4mDbXtkYbPRpdsakwhgOHDk8fuyFhZ2RfeklbM75UV/qrfTwLHbSuz7+3q2LVwWj/x9V7/mmW3rVZ661nrDDmyudOqFO7nM6d6uDupUzIgiEwDYYDLYJBhsb4/FwzYCNxzDMDLYxHtuDE9GYYIQRigilltRB6tytDif0yaFy1Vtv3OEJa82H92jmov6Aqqs+7L2fZ63f777vf/StbWh/7WvPN7MEOAlWxfF4djYlhZE9A4wmleTWKgIAV9cQkIic8wBgKNnaLP/wNz73j/7nn1leWaJElDLKAIBYQ1onRmnAMLfQcbVTWiltOXJ3YW5t9epo3J9OR2jS8WS0s7XRbvcOrKws7l+xOq9L12m1fBq6vUPPPPvS41/66t2nDhXjCoeTXpIu79tXeqomE+HyxPL8zZs7hmCxaRHUYOJH0ypRSYyRtdksakcQaqhCoRQEZggSKh8dNDBZu7nZSnJkiQJr6zteYTtrZu32znDQTkkFnzndMNoiJWIAqixJE5t08nzi615bH1uY/8aZK+v9gVIpWrM3dT3Qw8o5Ja5f33P44BOvXWEFmtT6wI2K1QeOLD90x5FnX71aFlx4XuyqS6vr9x/fl3qY7BR+Z3K4mx9Yar/hd4dhmmRJiWptuzjYy9u9XA/GaWK4DByAxfT7tWnytCwpafS3uZ3hcDqpYwNYKjd1Vbmy0vVM6xI9cGJ1lCCBWwa9Aom+EjWYSlP5nF1wfzlVNC5CYjWQdhzIKK1EFDLCtc29bl2W07WNGrpJ49Ch/UrvXL/85wTjXpKqulaCNlFenMNalWKUjTUUI69AUhUzth1llQKAQLoOUkbGEBCZLcYMgc48bT72H/nYvc3b7nVXrl/71B81p9UMqREiE7KBqCSMrrx2MImsOO6M62GlDW2tbYiiAJEJBRGs9uCJAdAxKCDSChUxjJ1bOFgs7d8c7OWJSfbfK/bZSDUSB/Tc63pTEgxysiGC9rbemxiJqdXNejTeXktNppTiGjHVGekVV0z/9OPKzqXNOWo1R4MhoLZKcfQUo42MXISiPvPUV+jBh50yqjusa+rmWQxVFBWcQ0M6icbweNhPrFYp1oWQVtqm4iqAaBQ6x8ZoiWFSVHsPfRv+/D+vV9fO/vI/ubscaKq9IJICV0pgiBFiJJsEYEwMRGJmQGFfIwpGQPFIhMYGL0okz9IwHaMSSKzynqytJwwJMSEoMpYjuFLilKWlIEGUKkokdqIMAcSg/fyJ20d7G3X/ku9Pxd1dj+pEGZmOt377t5JrN7VNJMYYop4VgiBAYMg1GSAh5wU0oQ/MKIAzbjQyQIygFMCtfM3Y83u/+0Mb18vf+OPf/ie/dPfuYLDUnXeBNeg0zYLbSbIkeokE0ypgwGbDxhiYo8kMMLoibvfHU2uUUmVR2DRVRjUi1kGKYcVV1VfjEijLsizPO91cynpUFN9MFYFGIABEIERmZgEUmU1GEEAjKAWzHlkABBSjkUAEsQocgXyAwBKjiAApJmStUGtCuvUvA8Jso4x0a38hwMBEQNEzC8js5MeCiMwCRLObhZ2RGF1dBN6duvlcrzSNCbI3Dv/pjz5+6uGH3vPu9y/3Vnbf+MaN558GRCB9xcNDDzz08mBb+nGwNoBBGQMpTdFrpTg3UbuoAC1otz7lJ55YeOt91eU3ACgg+Iq5afcO7XeHT1iW6hOfXfDDypURQBCUgCuDIcQOxEMtvzZqKlUPtvzrn5rUSm9esBpRSyCnOsa1VWg2JgdP8JG7dNLJNt6QV17MpkMLXpIEBCOoYBOwbLIaKgc2D04pweDBa++RVcuMFtJ0+ejoynpijPMUgqhURx8tB0GBCBzirIUREZlFBRLm1NLm8knzV35u+43Xqiu/bdyeiSVkMC6ZgYxSqhJ7OW6ef5VrCIaSZrO2vKb98Pa5k295M15chXMXxpulIWAI86mta2h39c5861nI/u4L65dcnJQ609hJJVqpIkiQWeJMISpNnm+F05BippUGUCXUQaKXygmSircUZpJphFunfQqRZ+Uxz6wUxYCCwIjCiLNsGgoSAjOJkPpm7RFlxmMXEBFwIAiQABiFTDLTDgFgBAAUhFnRUgAkRgDmbzoxAVFcCAqRgwCjVham42+989FPPP+FrYmDTDUslZNKmcSzRjSWtGLI0Ey2N0MoWt08bbYhS9hwEBGPNmm4EKvKUaxdOSyLaKk1qou80cFEpk418s60P1aSZzZRUmkGDnVqDSAFgiAsIba7vZGvPENkmAxHSdYYN9TD3/6dubIXzmyd/c3/6cb1mwXEjT5PEVt5493f8wO3v+XhUYDTW9c2/+Q/qEmZp7YeVOtb13tibjz+THChRKFuOpdhY7z1+i/81L43Ha5EXy69f+EVi71q0z/zi/9qAXy1N8lsb2Hfvt2NvXwuOc1V9eUvNdgfSZumm8qer7Ru2hStRvGhX45dmZkGK1GEROi9c8jWKvKlrypqNIemXnj3+17eliM6xXGxWdf4rX97OC/V039yZy/pu+n09gfGZ88vTWuNghJISx3Ko71s+PjvT1/97JG0CY0WNFUsgkoo1a1srgcJTDc2PLlY1T5kVdJZimH6J7+8/fE/PtxZDDqVwALeh5qAIiniIMDG6IAAatZH5iBRoigFEAOR1jZlXwWTYadTTCbgRTiozAYVfc2TGsp5rxZx+Y6TF598PR3XMXr2nGVJ4eq0mRc1ewaNUFfTNNGxlX3mxfV1NokxnTSJsTbaIIL4sHV1kPrr03qydqP4nh/5e9N6YPJGqBmt3bheGGV8nuZHDg8L35zPhoMNiYFDnFR7JlEqmiRJLMDYh+G03J8Z7Wsf9OKBXuGc3+E6xfbygq7Hkyt7zU7L5L2829u4esP5YDJxMVScDKaqkS/Wk0LxrZEp4TejpcwshCTRe0IEBSwMolAhKYTZo4oUGYVBRdJy6y4cmSPEiKBQRYlRJArPvL9AChCYQYRCiFE5Q98kSQmI974KheNJ5YOgRWaMpAmBlbKk9CzxRIg6MaQ1z7BJKIiRFAvJLNhUhci1N2QYgoBWaBk1K/QgAFgjbo5GBxt+3EW73L7z6ENrF79RurK/sZb0etxO9iKlhw7PnzwmW/3RE0/dqK4tf/BvxIUjkyJGxsSYuWPHSXAy9VGROL/34lPVuWEvy0oObNko9BqSg0c9JKPxZHmuF8au8hxstm/p8M5wkqYt5dxw7eqRI6e213Zr5vz0o9k78uHgt/PdcdiadFOi5x8vX3tCSrFTTnLtrp9JE7k6GaY3nUaMXoUqpqkSzxDAvXFx67FPtd/yTtXIxoNd6A9p4vK5HuTpYDoCRM0KK0m8K7a3DYXdK2/4spiO1oJj8LUO48FosHLPOypKdbs3rb1O88AoOteZ8eOhnxQSfHPxWOSynG7XzlEzRcrU5mTz669yzS5BfWxu8eH78P57sT1fTKrMqt5dJ7LGeKeuevPdUtLgudgbT65evPnq03509W/+wDufePyLZ15xe1t+OBGjVVRBQUCKYo1j8Dq/OvSTXKLKnfMYmYAEMMaZI0FFEoi31J+OI0uk2aUNok50VGiaOZqcUSPpKIxkRRAU6iRKLKKrI/soNIOyBx9Agf5mGVkihxBcGV1dlz5YazSLYjGazGyr4CMLOxHR6ENkjAosIIPWBgABsizVArGuQghR0CltEQDFaLC5VkaFUqxCCiHGgASEihECSwiRWDSCqxmD2r65t3Zt9cRdDtkjKhCYO3XnzcVn6tUh37x87qO/e/zDj8bllt5Ym9u/UG6Mps63Dfm9KaWcHMhP/q0fGmxPr3/iL7rDQctKrAJWmFgoyhqSHECIPQEED4aRqrhv4/L1f/cPD//0z5ilA8X6KEFdnD2XHT3MxbR6/EvX//jfHhpOREBQTcZlpkg1k/bcPkexbrVwfytm81yH8ub1bOpVnjFl0mxnvWa4dI0qB6ECqcHzQA7yj/6a+t6fWfujX6DXX1jQUeUISQK7BXgFLkAoNCCU66LWENl7kQA2U+BriAVk8UaWHPn1P6laxwuu6345/67vNfe8c+9X3t+rNl2o2ZekCYOkOrSR02k/5N10+R0gFYSiriYCSsXMLp42R9+MF79mR4P45CfKz2x2m222BqKIYZtR7arQwKrZ5Pv/RvOuH2q89undi+NEIdoUIvjasXfguQVm9zpsbE7UModGv66ULKqF5flid+oubEwONtqnFnzg3eHmHQ/uP/9a1WjOnzm/HbaGD761U6zGZjI/8rL84ENyeRo5QVAco0bwextf/sP/trk5qWoJHkAhCYMAKpqFN3m2dlWIRMAQb8FVZPYSRgAUMITWaBFmIB+jY4mWQmAA9po8QxUlBIggwoIEEJz65sD09Su7D333u372R/9a88KN1esvLadza9ZtQnqoN4dbE014+NTth247OowFBddp9kZXt4fVWE/3mpNJa/kO/O7byvOv+Y0bdr6pG6qe7B3sLmyd+8Zg9Mq+tz2UBotpc5Is6XJOzl2xG9v9nT5Ma5hOakhxoYfv/db+kXZB+fqnPwXF6spt9x5bOYqPvHPvqU+FqghIzCyumkYO7/0b1X0Hd9NQff6LB+rXwVV5NRp/8qPrX/3souY9rPHY7Yd/9p8Oj2Q7X39s+Md/MDe3cKiZtq7f0M99dePCukzCNOH2gyfl5LHs4WOPntr/uc+98Mql6e2n33Ryc+uFM5cUqV6nszGFIrDSqBR6zwBsjJ6WIRiVZQYwFlVopCq3hllYZK8Mg3rwZ5957K//4Ec6XSsUy0mpjfaVV0TaagXU3xknWYKkYh0JEIWCl1Z3/vCxQ6vXtg4cOnjs2Om82YjA9bReWprbWr0pPFUeVy+c/8pX/+L4PXdCSx/uLvX7G1OIV3b7BcDWpDZKl7vDSjRX1VyL2Ic8sdFJq9HsFztJQj7SzqjkwI3UNFLrAOZ67QCxl6V71zdMYhBlYa49Ho4U2CQ3rgiTejw/3x5VbrHXma/B+8iOs6ZtG1rottJW5/qNzYPd5r5Wtr0z2ByO263c5Olu5ETLkWZ7fq59/uxqjeFgq/mW21eevXgjtbbR7I6mxfmd/psayZtvWxmMqyuurARvbo8O7u/tizrJGpmt9ypfbU/SVt4iKafOGp3mdljVFy/1A5AhPSkqm1ljLXqPPi7P93YrF11Mm2ZIuuDQ7c5tb6x18nwwmt7Y2EmbS0bD3ScWzl24KT6UgYEEHXgBk6UTF5qZycD8pVFRiGhRD4ppDAwIpHEW3nMcdgeDt737TR954G1P/fcnVjrwtX/z9+YHqytGQ0kEbJBHWzvd3DaSNG/ZaX+qGySRWWAcgmLQgASsjYSyTA0Fq2TqCYUUCYExKj72lElfGgaRqurVDKEmhY7FECkEitEonK/r1f/7l1ofeG/r/reWg0k652Vta2LSiMAQTKKFFBACRwIQpRxDVfvUUmxl7sDtiMtLuY43R5Prm8ZPbGAIFSQ2vv1bkzsP1s99xW7tWYmCCa3vhs1tKZ32MW80yKgYnEobHAl8rSF2EROrYlWGVKWdFpc+hhoISZQyAFK3XT3+0jPR/NbR+0/j159bqkYyHoUo1O42mnYY/TTl3pGlLOnI+TNY940jDB2nbUgMQ1BCpi7rYZU2kgjSbi6IyhTN13O3yfm/iFNPxgoR1gEiE7BEhroWdkKsCLxzqFBJZEarjDBFL4pYAuuIHCaQuHHHBgPNISbitAHvHAe2CqNzZY5bDzxsTz1gHvtTs7tpLDOQFQLngWJzytNf+9mFN5+AI7fD3R+MOmcL1EjyPI4e/2xWOoCA1hAr8AFUBFVDIynLSYYQAIFJPCmlZ2kdIfSKNM9OuTNjGoHxNpXPf/zTpx59z0//vV944cLFyoqLQKw4iq+9QcgUBR8rZhclxlCFIkqsa5dnJrVWMAiZSRnyXFchBqxzzKP3SiepMeOqHk/KSmg0qZO02L/cmmtZ1vzN6wEKM9Ct+zbiDEYBIIICREgE37TgIAggUmTwt+jU4HnWCEQQQUFkIBAmCIEBAQlIzwgyKCg0QxPNYkoSWYQRhG8NAWZ9Qk0zzgWyYAzR5gkHNRgHrYOveTCeao1G2dE0PPGVp154+vn3v+3N9x2760CCX/zK82mUgQt/8dwr7//xnxxs+nhpfObTvycueEe83Drx6P07F66oy9c6tY+IadDhxQ1AW12/BGOsR9qiCuu1XN3JDt9dSwndjh/UZYb5Ox6Uoi6eOqP9hEOc5mDventx7vO5dx0j/U98OY3UjeJLaXX0toTG8Tua73134+63SeeYS5qR01QNko1zm3/4W80zT5MLsQy4uBLuuWPz+rWDzRTryERgjCsd5InTIU11PdzLM6jdVhYnXNU4mZrEQnQEgBzIwKyMDjNIUGQVNEVS5PsZ5R/+3sHBpWzpAz7UxbOfwb0tvTuVBKYlaiICSQlJohCAMdMYBxKq+bT18ANw7P7uoYfPrP3GqF/mhDyRETs40viLTP/3a6O1aeU4JYI2BIgyGDmbKFSgkRgQApAhANAkpNFqNAQpgK+xLIIwIqJREpGBMBIUkXNSGjhEiQAKkBShQhITFLOGUHkUUAiEAoCehViMhoU0G8dYe888y8HJDJ4VWDygIhAlWWrrKEqAiF2UyIIaUUBYQEQRcmTAW5JjAIjMHBmtBmWiZ0Bw/ZvFE392f75w1UBtRINv6UyIImGoYkc4VdKFul697rXO5pqSWVGsQRSQBNSWWFg8slO+Mplq1lVqlR5s+xjq3JiVuTm1Mm+zbG+47QaDlm26qB0zGyvM1V5FUZUOlM7I0+Jd951MWj2b7Y7qK1845y8+lytsYg1g02bW9tQxpuyPHv+1X8mOznUP3n1iybbW1rJGFgHOb/UzUd1GgzNgoeDYD7waekJcjOPi0lVfoAiSJUoNhNiIaDQmjSYjrm0PME006OnOyIC1mS1imVvJlhb6NzfamY0uRBe0EdtMvPfsghbUwGmWG+VjNUy0qjpZ8u63Ntv5Gds68JFvqb7C01fPTWTx8MGF3NzA472ta65YuWfux/7N3p/95/qxP6HChYnLUqWjk+tn06pcPnCAXXWl0Wi+/83q4kZz5FI3LdZuwlJz7b6T5oNvraZVvlksN3qX//3PLKHbd+xgNQbxlsvaJg0Wihy0VkEAoqfoASIgICpSIi4iCRgQKaOi4XhkjMmaqUo7kxhUG+tYM0eDBNFALX7kJXMba5dab7tj78yaWi/y3I4ntWml/WJqdVZX4Guohj5tJ69t8I1IKqW5xN5754mNSXXuzE2S2Gyke7vD06cOnLr/of/0W3988s73EezaVmgfPUhZwyykrWa6evbS/ffte/BEY2+3T5NJ1a+5093eHXuBiFaxGUdutVoUXAz54oGF118775mtltTYjMrJ9oW5xc69p/aLMhtbe7XChUZnIrR0dPHFN16jCGsbQ4NuoZuTMgA7AJAYA4AsPPsQBGYGwZkACgS04QgEQMwoyBwhRIlAQijAzAGFSAkgAwSOLCyzm+WMM4OKQDSRoI7AEVihIHgBkIgxSh1CCCwco8As/TULYxpkwgiogUDbhLRRCo1SSFELg0iQSJqQFBBEYB+c1IgITEhATKiURiEGKX2Y63bHwV/vbx851N6crM0dWjz/2sXy3Ov7jx7rdQ+W5KIItfVkvWiSPvv5m2ef+Y13/Nj39e64bZykqJRoiKhUYk0jy4u1J//bx+drNaq9Q3EudiKggIw3W3kn5rlKLIyK2pFpNidloTS4agrBo6edaxck+igGksY0pkv3v3X03BWFWEyBCvR1JAQVQU1D1d9SCg2BMgoIFYlHKGNtE0iIypLPfeyPHj26QsdPB62Gg13PUtSonGrZ1IjUl8/tXHr5ytPPF6sbCSJLnR1c2QlRTLq3sbVy1+mTb3kr57203fDMpK1SKgRG4ei8G43GW2tWJZXeLoer9c6WmjtiG/PjiTR3hm57x8Uw9+gdK9//HUW2NI1W6lDXEUhNYl5tx2L7ijt11meL3kK9N6iKTc9ufv+hp974xu6Edte4KiAaPYmhZURiKEAkVdGqyqityNORUyYNPHvD+wjCAEEAAIPMbBpyi1WHoEKYASKBrKiEAUQZ1qbwXkPUJCSiIJJWtZiILoQaACMoIELUiU2S5Nb1oK6cUdaF4APXjK4WS5hqNIlWwkYbX3vFoDA6mR2gEFEhiCFCloY1ubIJEWpV1EUNqo4EiVKksA5KEYGyAqF0hDoQBCVBAZGwsCELwrWPAgoorX24dPHanatvrPTySSE6a84fPJguzuHORFW89sUv0+DVZo52ErrOc4w6V4mEZLFhMnBTt7s+0affeuQjc9d/59elcF3b9CWg40azOXau0UpiqCCIq5wwJil2XMZfffXa+Z+K+5cNNzK0SabHD92XH72t/7FfX3aRUsC0We1Omo2mj3Xiw3h1Le1pn7j06H31+rA5GqQbBQy0QROhxvEuuC2tBFKMRUSbUAuWHn2gPHlYJwv2od91rzx7+df/2UoebRWQkIoSbALeg9LANVIQRmFWlEARAIPkuHH7O1b+6a/63hH2XkuFDSGLIT2E9769eukz2oXoylBMMoNodUvi6PyTrff81UDaxShkxOQxCCOHpl76wHfF1a9NNzeSRpI0W4Godp5QFGqSOu2mm2Zx5fv/sT/y/nILJ9vfaGQQa4koFL02kiV25UDy/Jkz7eOXTt/5Jlm92NgdtpP50Zceu/C1/7HkaG7fobUg5d6uq0MnybEu/MZw/fm1eqsxuvTqcx/7l2960/Hd81v7jx72T744vT5K8Ul13wehkTJz/9LZ57/wlTKygGgFjEAETIg8o0NAFKiFgFnxrRpZZA7MjpmRQNgiWkILopUixBogKBGkYIVnKjQAxxwZKsdCIKRBg/0m0PeOleOnDx5effqZI5U/dfcchdbp951a29nc+ItP77124bY7H9AnDu+8cqlwQ7OzanR7/My5wc3VvKj2H2hWeReSBopvajW9tO4NJI1czS9kq+unzZJ86YVqexdu7iaokp2d/q//ZtfovA6x001IFaSKu5fVm+8bed9qz9/1zreIGk9k7urza/jS8w2uEAOH6MaFSjG4kuLw6EOPTiwvpx/sf+2rJijTaVgP2mFBpvOdH8g+/G271CwUNN78UGdrHa5uD89u1PUW2mz/3Q+Vu5M2q2rbT29eHJ27AHcdPLl0sLjvSJl1fuj73vXiR36a62I6nGhW7JlIt1qNEHhSlgolM9hqZFNfa6KmTbNEB1Bl7WrGGOumTZ77+rMrhw9+6LvfEjzXlbAX9n5+/yJp46upbSRaESkQwVA7k6aHjhzd3Lg52B1055qodCIoTK5yCKosy/PnLt5xx21zc+1L17Y3NkbQLA4vNF67ec65OKwLNNr7qt3Sa2t7LngBWeh1ZjsdQ5op1PWUOMYQSUCsdirWCQjHRmoTqzc3d5pL+08dPbC2uV0Uo2YmmqTXzY3Re24KDIk1oSoRledgM6uTLO/1fNlf3RtnZdmwum2swXhh2G8vdKbj6vjCynD9ap42pMQOtZIkGTNcWNs5eXDujgMLl1cHkFpndKHTl69tv+uuk7atV3cCoHrgjhWKsdFtJ1kSr+1sTSa5Tjo2yxLbtWka9KlD+16/foONHQ7qVMWgDYm+fG14ZL4JrFo2v7E5IY699mIyl75yaWNvuGGMUzqpgObnDw1K5LK6sdpf6PWKwrHz7RzIy/YgIsBo7FqO3vnoyc9f3vn/R0UssayqRstqw8EziJSlN5qyxIw2R4/c/d7Tb/q+T/3Wqx/50Ld85Tf+lyTNYIzj0ndz1ZCyt9jgGhyTG3pEUDb4umZRKtMSWLEiEtMmiDLac+Jiao0i4ijOYcpOkajgtIsxggcEohC8iCalQmBFpDRxHZcunZHt68XHPmdBuFFVm6NulsfgPTNGSRJix1FAaaPTRETzuECErE2tG0/zf/yRrh/zHuqrkyQ1KBFcVYHPHv3ItZ6rhjVWL61QxKQZqJ3oVDbXdYzCGJxLs9yDJqWtQSWVKAqVT3u92iTbm4NGMwHllQQIyMGDhCRRx2vyH/2E+VMOztm8G5ttFo4s44nbe/R9o+9+ZCdb8LuDA5s35/emaTcPIR+qpeSDH/AyGn3+M/uzgTVeOPSUgy/81+LiY3vnL+8HwoQiQyhixEgBNSqUQBYEQftQO09Gzd6fjChKR5GZ2ReEEVEb48jv3bW4+M/+3niiq9//czhzLh2PpKwNsmLkKk5aS4f+6s/XEMvHPxljMMwkBG5WiCII1Li5B4Ovw/KN6WTZHXtzSBTUUD/2WGO3n1ACSerroIGACVrUT8F+4G4KzZ1PPt32rEkRUfC11gZEEAVv5YkABAEAYgAjZYP2Hzz9yhNfeeSnf2zr4jNJqkgTAaQmAW2MSdKsETx7VzdSO63qYlppa7LUAktV1s5Fk6jMKOYQXfQxgheFSjOS1nkztakqQhwNSy5qExth4rLU3hqYihASgGicUbaFRRhAvkkvIhSFMyUaIVGIEgV8hHoGxBMBFkKcZYJg5jpjZAAkAgAVgIEBZQa7uCXRQQKe/SChIhRNxIK3LhczYJICIfA+MLPSFDhOgkxKT1YjVwoAYyhHxec+84WvNbJ3vOme3uHby42Njb2N8xfWu5/+hFk8etfK/Z39venqaH1E9uSp5AM/rE7fuPQb/8L7PXbBVbG9crjgrBp7mcToxVJsM1ZffWMhFgMOg7Ob20OI7/iW1Ue+y1Qcb370yEtfswZMG0pTJ95LRW4aM4QYSRnNXG0DLv3k363veYfftyKmASAhVgFwAlmx/NDoLUN46Yl5qZKKR6ORzB/2C92rn79xME90Jyv3xpJwNa2bWjGmBBg398gonScuGgtNLhi0aEJIMIATFbEGHUABKDIYNCmzN5/1fvIf9w89kNgFJ5D9tR/ofvtbz37s37f/4utJCWih9CwkJYWGhlxhWdfdHFtzadGo2q3Ws69f1tAy3QNnB+sicvKOhWnb/s7ra7sdFVp59CGLaJgDhCBoEqJEGS0WsXJMiIBAKKQwSzBRKAFCwVWJgMpYUhgQIQjWQTwzEoEwImgAECYg9qwU6Vn1EyFVVHkRJACeRd5mt9YIsZcngylPqyAo38zBsSIiARSJUWofk8QawgjsXCgDR4mAM6wRgsAMVCQIt6r5QABSV94mGpF8jIgxd5XVWpu0TjKOaPLG5uamTcEaSiRQAu25LF3omJVTU8LKjVutBgcXfQSxMYqgVkhbU3np2mDoRetmG1XD614jW5ybtwk4V+2OJrpNNmkOtid1xf0o0c410RJzplq5alOnd/87HnnuiSeKtSe3xmOkLFcm63Zsj6d14kZmrx9DFU0OmYZOj910o3xtc8PidMp7E6gOrdDf/I5RZW+cefbQy6/ToG61m1Ud2wm5cWUN5aizpaYmGRfeZMa54CgGq42FgsVBTDBMJiOryBOxkkgYyPUnV1mzn44VWokkRlc+aDJEBpVoweB5bTyh247201bv3ocHaWNhtHp0su3/++/h2RcOd6xMr+/8w++Mk935dhJimu7E8MnfiU99siFs8pYLJUKkGADq2NpfhHTYnG/+o18q2sB//pg88bnF3EOYrvb2pX/j70C7eWB9c/JH/6La2z2ptPNQrJXK2rRtAMvIJccKUBAlIEdKKQqAFyUxBoiiUwNKgqqhgaXy9dS1IdRVRbtj0ZD2skg6eij7hYWk09BAigeh76vlB06u+nZ17dl9EPJu4iCChsjOoEKJqdGbe/KlzbgBqTIMwQ+nRavTXTl6YLq9yxyybmPiqq3B8J4Tp1766mcPH57Hbhx6bnXmE7DDazd3v/aslAOoS22TNujB2DGyMZRbWzuMTubzdKm9xHlWDjZuXL/R7TZ87Re72eZgtH9lfrgz2hmV83PtYlottyz6oiy8q4sr1waL7XYsnQfuLXWHewNz644MmsysGx0lAgCDAAsA34qU1gEIOQAxE4tChSwcUZijoFIkCEGCAEcBZmaUWbJUEaIQz0SiChkAQUUAHyJBEEAiywrQKEMkABBAE1AUZgZgkCgxCgAoUkiIbEgpjRBZJAJLDKIEtEKYbd8B2HmlFCWIwJEdcxDkyD6zSVlA2ZaptmubG+ONK81Ul8g9NNPrV/z4Uqfd7LYPqRPHN25c1dOw1E52bux+9V/+hyPfct+R7/2+keoQo0ooaWbl7vXLn/rjRlCIOk1QnEsJM8HgeeO5r3T/yk+KbU8nVeGkkzcqH70buIjddnfqnRMJ46G4Ym5uwU37qsKWc6NpzU6cE1IUGWnW4mXQhORBKYoxsomhZ+1ti/W4qNdGPKrFQyPAa//lV5ff9d75u+6abGwdPHSE9lari6Nyd3vjG+dGr10OW0GXsJBTYFHzWtrTpNGaO3H8wKlTYhM7txhTPejvdOfmfIjj8V6MojQpZbxKmvtX9na3qtHaQisj2B8bLdGxJeNzX/70aFq1Dy8f+PbvmPYOTiYhTMfKl02jMUixObj5/PlBf21ne2/fmx5pnrw9Inb2rzQ6XZWaSxfOji48X9YTazCxCEF0gplNK2LY30uaZjSlahAUKfFeWAQpMntmEeAYJfIsQoY8Q/wisMQQSaNoiqVrGN0iMQLCAMJ1XUwnQ+BgNJnEGAASttqEGKOIMYlCtEr/f1hrAuEYkISBvfegBHVmDSkUQ6SFwSjl2SgqOURk1KRQQBhjyLRpK9PSqZqFQx3WISBaYVYEhlAiAiATRVIzaYhQZIzaWBSIkYKIAwwBwtRhUFcvrn71f3zqvrXrS8eO2s6hJLp2p7FauqbTaQju+dUsEVdHdoiBU4QEWTUSaufDm5eT51657R3fWi3ec4r+2trnvlLvORlMIYS85FwTVw4IRRsxwsxlhKqGFNpLNyvZuNHotHevT+XYocXv/CAup6GZUTEaV1FklJrgy2ASoUpTyb6oM9BVfT4fubQsYFICMiSErgAXQZt+TO3Rw54S66oG9HH/UkmcpBm2j7tHT9zWXNl99nezxdbk859rXLqOXEKWzTR0lCTAbIUheHACVg3z3uLP/OFu0tJ7NU9HjflUKWD0ojU9+jdXv/YXJ3LR6HVUCAJ1CL5W/StqdCXYjBFNBA0CZz8bLj1Z7V2za7vGUlVhEI4A1hgbglKkCDFL9+bnD//Qr5Zzd3npME7T6ZpKjATURN5Fm6deaRBzbLnjz325Jdfrm8Nw+Q1l59SNi8cS315qu0L2d7JXNrcW5xdto3Pm/M32wZOjdPndv/hvrz/2ic7772701PBrFy499WTv5k64fOUbL//SI790uz52khGyXiNv5bvrfe+iMAFBjBwBA0tECFEYsEIwhAASOc4C1opQIypFwKhYNIJG0SxEs74+CpADQKOFwUVOQZCw0pERakEBMXwLa/32E4dPVJy9slp55oPLHKGIk+K5l2F9vDJ/Ig9QlnX0lWHnr11ZvbgLq3JUZbpGf2W8Hjf42P7Db39ksDmUpWPEU7++M7pwg/aGbnstlFU9nWibi2c7J4v1OPqqv8cYIFvOm3PN6cbl9OOv9i680d532qPgQ0cbty/rpTsG1VOiMpNlpvZSi7DrZumVT/9B9wFGle0+/vyhQyvlxi4aq0IlysrCPvXAw1usmIP2Qlur5dlXVGka73qI3fTS88+sLO9r3HFsGOpeY37y7FMtFr2XiKGH3v/QG/3hpeeuNUiBBJsbbShGzYjCrIlaSdJsGC04mdT7Oq2yrgKH+V5zq19UdWDPWcP2erZeLy8+99yZRRpUPm13XnnlpWPLS6duu7N34ODBQ/sEkT2z91prYzRwsFqH0vc3BvtW9oeaEbV3vhgVjWYOJKwhb3eGo3HN5h/8459N2+0Lzz179pU3VFOGo+l0NBmPy0RTPam0ApuaUNWmnQiaqnYIMCrrIAJAc732+ta20UnD2HI8tLpZjcJie66YlqwkULQ2GU1C8DIta4lliBw1XFvfThkyrSIAakEFa/0ddFUjTXwZ5zK6ff/ibjkccahK19Z6Y2vNBpFQt7vd0aheH01MQ0/LGNZ27zi2lCb27E7/0FL39KGVyc2NcjJK8mT95rDdyJaMVh7Xrm3Xzi0kxppOgRzK6MZl2krm8nRrMNwaj1mpMvil+Q4HGg5KEI4RimF148aWIpW2kr1xMeGpQmnk+mC3C3vu0vVhp32AMCqBkwsra9trOnBDG9KQZFTVtavBpqabGj/xfylV5LyXKjabGSIgcSNLjFHKBeW5O5ecu3j+nQ+b9cHm8v0PYhNH01IQQg69e+ZsnMarQ6WT4IVUojVNp854QIxgUJPSGqCOk4nvfOh9FtTql5400xICqUCgyLnaGuLAwiKkQx2VViYx6AVI2ICwVDUrIm2Zh2MzBo2iBq5tbaWojFoDUADtRJMBAphNJn2tI2OE4GMSYnz6dSsSJSE0dRXQgMkkBg9bN3tH7izvfFTlx8fPf63FmqcDVdXeVYKaAW3eBAiabAyEYIzOhKISFSdxL4Tjf/9H3NzcU//pP9xW7CLUIFFrFYTEk6WmEpCEnSLwqJXSwoih9IND95wcJKmYY+qZFXp5l1JU6+sJ9Zfu+a7+3afd2+f3PvNRfPwMllUSsTHf5Os7XZ0wAEw8aUUMRABKaaOiuElwlReL2Ow2QwCKrChCXQMpYcchztomKIqJB63m3E/9bH3sHlNh9iNLcPmi/9NPVGfPpr7EyhtLvem6+4Ofne5sLq3t6IDAChQCRNAEEIEBUMNYAHfrM48n7gdrn7b7g93/65+vlBSEI4PRFgKDoZvl5NCv/8r44TdlMS0u/3Tx7LlmZLIxGJgG31EIog0a9iwMaBAseI5XA5f33n/v//V7l3/1N5Jm8uKzz2ZZyp412SzPq5p7jTnUOsSKSGmrdIQUDHPkIBJZgNt5asgYraNz2tiag5PQUCZEVioKSppaqWLMY7eVaWOIcXF5/pt5CoCZ9QwBGAQxzpKuiEpEK9QKQRHRjF4EzOiiuCBB4JsoSCEQoJkTWQRJAABFKSAiAEEk+abZXYEQieCMxqdmqaKIoAABAYk4BEZBoohSMDcIgUOMPPNbIaIPQQF4AQxslQoRy4n72OPPZibZ1+t0Fg42Thx8/LXh9ugrjcXnP/LWuxpXcPzSejKaVHvD5tLCuLs/G5WJjLkr8Oi+9lveN/7dNV1cbqZUTwLpJNR69Ymb0SgeCiH4yXZjvlGGeOqH/9obLzzRraClWi2TTIIjRpXODE4EoFxm1LveWz/4HaG7PyBHFzD6qJTJGkpDCXrxnrfvDJ0acSowvb4Rk8eWv/PdxUNv2XnlcvvOu6adVuvue+2w2P21f34AJWs2J7uThtiimEattTEspTYa66rM7bWVQ3jPae109vKFzvWbXWU813uH5ts/+nO7Kw9VPtGijSEB69p33vlDv3Dm8W9vTqD2kUGhVWWMHIVA2MJuXW3tQTicXvjqs5OlE69ceOVd3YNVksPBuY9t9sGjzOWp5zwItqnyIQQcAZGgaHIxkkLSqGeJM8LMkgUQ5hDAFywVCINHKHwklFwRIRoAiRJRKhZBZZQi4RAiCCkBgSjCFCUnNBZrIQ8gIqRBGCLTdulbEZyLAhAJNSlmUYAyC6kBBsGaJUEQAE1KWSIIjjkgKEXCohCZOYqAIoAIAMxRoU4sRGb2QQBJxYhYbA9s1lB+qpgpyRrLc8pKe76ZLi9O51pVahqHFjcLsY00qcZlXZq8a9JGLBzXZRr45o21p167Mk6SCcNctxlEu6HL8sxRHIOQNqCTcemGI1S03Dx+amFh5cDRu9KN86tPfckEYl+vXj730qtn2uLnuVAGSesq8t7utGE7977n/Wsvn0+TcVnF1a1dD4FSU2I22K16y71mT09SPvzWt574zu9hTHT5/ud+5ifVnk+VOO37ZZUqmZI3neaUqJmQINdF7UM1TWPVTNPl/a1HHl685039P/5UPPOyRpWmzXU3xIVOemS5On34eO/k5KN/CGsTDSYl7SNrQmMsU5xOg1tY9isLB77lO/I0NFs2vPJq/MwnTclqHFUIUwMEpgsobGACiJXdOh9eOXM0BakpcKWIBYJO7Cilub/z868/+7L75H/r/clvTsIaX/rG0X3H6ytrY47d7/7QFczhN/9L/rXHD1Q+MNa25bonG7ynimG1NzImotI6zVHq6CqL7JVmjipvCXrlJhJBaeOk7CfanT7WffiOuTAqn/mG7o95WIITN4mYw/xio2+qyV5MapCAVpuOjzc/9dhbf/B7Ll66MT23BilFlmY2r6OEeo812bbdjOalcXAdk/tqZxzjtc35ZY6hOn7ncn93tDUt13dHWm0oCLUWuzAfUix3R7mT6+fWXvj6c/31UfBRRJJmVhRlq5OMq2JhsaMAXKyPLcxnpnV0eeXs5Ruj8fZyM23nDT2HKorqLm5c2lYq9VYXO31w3qam0WqUsaBEyvE0E6qKaZqaalqNJ1W7eat6Y7UlAUAMMcyW0yHGKBBmdDAADgyIt74PIsAexCAqEYgsipgBwowpw0AAigRhtjdQETmwYwnKAPvIgALoYwQ0SKIJVYLRcZJQaij4AEARSCEpIGSBIBw5YkxIG0DFIAIxCAkowVsiNq0EBYEASIAYmCByFI0KOWomJiowbJf9vcmkl4QstatlubLYC4Wqdoc4KvvDG432mumY+UcfvO1QsvfY82Raa2uj61/8xvZrF489+tZjDz4a53vVoBp/7bHqCy/0rPYstTBZG50vHCcCYa3uP/7Z9oe/e6rRdho2z1wxDpV0unO1K0xqpqPBvv0LNjsAIGYaF7BY//JjvOugAGV0HTkgRoiZUTUyEWdae4jaYjh+OP/Q++Nth1pR9Lm17d/+QzsoUo+jVb75B1+8Zr9oUxgRlBXUHlIB5SEviFApQCyUc2HX+XqRH/iRH6gwK4Y+S/3G9SuN/UcT2yj3RiLe1U5ACeqgSCcJieGI7bxFCFNwQTOPRpNnn1u7fFWn2HvXXVeTBo58w5eLeUaj8frF8xiU3a4mV/dqJ+s3LkdpHDt8xMx3pkzYntsaTzv33fXm0yce/4+/G7b3KISGEm1oWPv2Xbep205lc/NyZWPHXZsUZVk7nSbMECKjgHBUDBwjBEYWZpQwa6wjaDWTIjCLBrKkCClw5OgRvHO1sIeIyFWapYISBYlB60SbZJZnleD/v3sBioTAlfeVDyQUODKd5KNYAAEAAElEQVTEOgRSGINDBpA4O6+gwsCMFFGhiKTWaEHFBKgCaySDGCCKiMTAiEaTkohKU0h0dCyISlsgRgRiII3BA5JiiZ7FEIvQ+fNXp1V15717i/snczZRQRU1wLTspWnTU88go9e9RoNSX41cdK2PfMvKB99Lf/IHBusQC7u4JO95/7G3vLNj5Pof/dn24y/3ymkeIwRy0ducBBg0Bh80KUIib1NrVaNhjjUmd5/wnWTwZ//96GAycVq95d3dU0c3nnxSbmyZ3c0YRCurSMLAG9enygEwqKm3fiMUnbe8TZ96OH/HB0FSOXK0sbhgN9c2/48PLZoq7aWQQCDACtDIwvGVPvVa3/MP/Of/BC6cNdUIdRorRkIUAIVgDSADJY3lB2XStIlyzpmko6yerG6QsWkT1eE7T/z4L07+w083M6Mi+CAAZBsZTdfj03+QPPRjYdivr75Gu7t84cmwdiXXkRpNEM7aeVWL+AgqKK10FqHX3Kiayz/8X9z+045RY7PZG44GZ+zYY9Bo0FgTQAU71+l1iqtnkl1hULWTaJvzd57ywys+wMkjt1/bqrqqu9wMxhqv/EaspwEn/Zv3nNo79IH7NpeO7m6txpUH1bceWRi9cu3Kb2pQZFIGYlSqcbAuay4rhRg1hcjAwshBbvk9GNDJzB8DltTshWwQU0MRIArYRCOCJSIGBDBGs3AEYgEgIq0UAwIISWI4ilQCHnVqb62Qr1+9+R1vu//IfScf/9xT7cO3NXDJlSKVWZrfJ5PqwouvHrzvjsYjd0yG23lb1ree2DeowmBsE+sbc713frj93ru5a+ZtGpWupwP32SfC1y/sP7QyvnxeIaWNFqWJIcWuKCej0O103/N+3O5X11/C8RSGTC7O4VHZNlxWpt6dPveJuu/mkiBRgTIgkKQ5ROS6OGyw/2t/1FlaOKAbWIYyqnpck5PheCxzh7uH7ohqaoMkdbSXrsvmSBqLC9/93VfaeNs//jsv/PJvLPR3wvb5ZtpJmGhShnOr8drmVl0vv+/Ni/cvv/d73vrpj/+F7XWwrKGs2UNdOw5MQDG1waNWSQzIEcsYnSiOUtfOpCp619/eu+PY4Ufe/vZ8aV8xLk6cXDm4sG97ffXFV199x9K8SLDaBIbIIAgiwIEFsNnqcfDKKMIYWYyxvYUGl348mty8duW+hx7sj+tDK6cPHlra2h6ePH70+trNq+vXyGoP0M4yivUYZVrWwNGAGMw9RAacREmNNgIuAkbKbYMFOEiv0UFmJlW7YJWqq7qZmE63W9RxNJ6MyrqRN920SlNSlddad5utfjEeTesWqRADS8goJ+R7Th/rjwab42HPtNqtdGNrZ2O6lyh7fP98brGqXN7u1boCkiHBudX+obmurXw2mvZXL3zLW+6dTPDMtfVTxw/vbm2eu7C+Mt+59+C+6+sb01guLTSuDEKaWC7dnbcfRu9euXSj3c0LH1EwKGpqu+77CvXN9dGRg70ZMNEmtqojE3CouqYhDj03Uyt+UkfnbAqvXLy8f6GTmnQ4rlxgyokBFQspNQlsEvuXRkVImOaJTY226D2QhcQqLjhU3mZmsLVXj8oj+5fHPErvPnL+81cfOL6v3zr0Pf/Hr0zOPvPUz/3TO/fVUkGoxBa+m6YsQogsJA5ZQoZwrXuy9Xd/ccr1YHtHP/VCRxiQUGsCEzhyhZ5FtJhE+1mDX4FNVF16FhHAWkCFQIQ2Z/ERAEQJJQAxQh2stVjWzEEfOFCx127PavHoo9eh9kLGkDYoSBTYM0dkYU3LaT76rV+Zf/HO7ZtbrejTwfZ0PLVRYj3V2sQIQOg4GBTxJXAqpCQEoRhqx6B3arh29WrypoeO/qOfC//6fzVSkzKeAUn5KhAKpIZnOAJPMq2MVbmRfa8+s/UTL+GxA+n+E3B2q9wR0pXyfpHGr/6rnz31e7+/++CbpneevPaOi/fazHzpZff4JxMZoQeIgpF8YEo0CrELyJFT7B9ZwOWj1YWrajjJwDgfhZg8RwgaiYGNQpGIqD1g7/0frk48Uo848ZNhtpi9/XhSFeWNS2p3SoGBKPOSvfB62wRgAkfAAsqDUoKMEIEFGEEjcJi7+Fz9r75/+dQd3/j9P73PN0ErRRiAIToVKUQ39763FSceqWAfez935+md594Ikf2Ut+452Xj7/VtPPd64OmggEwpEZkGpQt2yzQ++z7/vHZ/82J9+48YL7+x+wFvQZGMltmkkMgpYRYVnH8C5GCASUqqVCwKKBCV6rxEtgQ++qMsszVJtjFWz831Z+8icW22N6ai8RRbKaBO9tz289RQgMAsi8MxEBiIwWy2LItSEpBAJiRBEWARAQuSZeQoRUG4hmmQWKJIZ/RpQoSLQBCLCAjDTmrMISYyRAElIAwCD4xmhZqY6F5Zb5JnI6DxrJABlFfrIUURpRRxnIjYAioAQhYQBsAzhylZfLK7F8M43PfDOuTdtDDY/9+JWWK8PrZyUiXrps1++/1u+bf5tP3hz9c+aydkyDo4cPLFlV/YO3dG6tpWjKyRWc1007cnFC20vGpQKUn/9xdOvf3V9Y+PC4882TL7TD2Yzc199BhzUKAokKp5WTiFvNcPJD32Pb/ZcNTEkoCwRISWxdBIEUzMqhhs7bNmIlsSa8OLrG+de33/PgenQDDp39++5Y9w9WI8uU8nOl9ACheKLaaJdEaNoYKmCWOJwfarnv++Xh70DqVmcNJ9wf/TLTbO5OZc1fvDHR8ce4NhIVS3M6AiFGexALZX7FmF7GzXOLmCImgyPkbeD2vFhnKSTVbX36s37vnP+b73nntvzfWd2z/756nZtlNseNjKtotrZLjBR5cy8hgoIYmBhiDECotWkFIKAFSBm7yQyAIO2iFFilJmrpvAgEBOFRmsUCMIAMy43G0JhUcCIFAVRkwiIDwKAJJEhAAiIVgpBKh9EBAl9FE1kAAjEMQuCUsQMDDj1oZVYREitQURf+cAcmAkBbskIhb9Z8UdQLBSDR4ukgiiJIBElaYkxVZrloxLn7z7t2xZTLb7a8DGYRtQ2pa6ESYKNiS98tDprFKLBQJYnFNylJy7ulS5p9poKxTnMWotHD7YyLn05DiwRIG9B1na80tZdP5zsnP/S9Y/95+50tLy8b1rjJLiTf/U77njw7fPQfOl///nxtU2bJgVzs9udt80zj73cazTvevu9l67e0McX+NjRhQfupoX9WZ3K5z6fF4PrZy6tf+nzNy89NezvLOZqP2fJ/nmp60nd13ceG+ybW9PcSJYPblWLcezWN1USL7j86D/7B3td8/JOffTQqSYxUz1vlHdcl356973Nv/1XNspYG4S9KVjbjKGLOkzLLKVqUue9eTFh8XjP1bu7L71SPPlJVJEbzaSQXMiTRUxiVCRAt94KCHVUBMpSQhiLqJWJTOw9kEiskmp64WP/Ss+fuOddt9c3nqvdWB0+XCwc1o1jexu7Zjssrn1h7vLTGTJ49ix4eL+9483F5de261y+7yfs8X18dSP+2X87Kq8l5CrPAIaQI3hAr4RL55U2dWrC3Sc7P/U/FaaNbnB92jtkXbZ5rnjpTJqic9wfFo3bWuR18dowF10OHIlNLhdv/M6n6z3vvN0rNButAIz37DkAXhyG5ypKl5rAoUE4qcE5HE5CUbHql4Oh390bHziQ2SzDVrF4anlta63dNm0V0iZ+4bEv9AeQZ6luUjEpa4ic6LHjgKoS9JOKDOatrq/CpbXrlMvSoQNHlk9dunR+qZmBkqIsDp5auXx9XZmwOylW9i/v7y3cvH4taaXt3OIIqxCX9rUQZc+5yG40irOnwChLgrfqDBhARCSyoDBwZBTmGGftNCIVQQgASCIHQGIAYRBCUZolAjACMDJKjMIgLoCIooiIoFHTrT8jlkGDaIyQqAQzVVFNDMhlZCFFyEwAzBwDAgIonr3wEZQCpVAJB8OsRGJkEPDEqLNZtzDEoAyCguAjRAkxTkOpNFWyQwEmXichZUr3zR9oHD9M0zC30Dj/9eeuDLeXuktFmDgl3XsfTXuops83irG/Or1+8wvVV59O5prUSa+9fKUNurZQxqC0it63SISQa8kZ+h/7nVbXt97y7WNrJzIuqxK9r3wdQ2Afbn/zI6OdrUlVNDst1d/ae/aru09fzWtCg0WMoAURSEEJgRWQgPc+ISi13fdt71o9fteowpW0251rjQMZBpoI1JAZJVWUIQBCDpAKSACFgAROR4cgMegGskGzeLCKTWku5jCarl9hF6WIVXBYlxwqNLYxP0eIdVVrrSJie3mh2hsMdkfT4Bfmu7qYrr7yqhHM59Pm8f1jHWIxDDubwVV6urf72pnhtbLeDb7wqEwnT8uRT+cXR+nu9rW1LqkgTVYHwCzNHXtgbeNz3dxG50ipg3ecSO6+p+zuH4193lqY7wxc7cFCFDEISOSCUAQUxCDiozBwQBRQiCyMgMgRGFErF2Ido2WPRjt2UpcxeokzZJACLwSSaGu1QqIQagZlbCpy61sQY1SkBDjGADECxRDZMwdNARCZNUqaqqAFWSKICIQQLerZUlOTiARSxJG9AAC6EGpCTyogRESNIMKIGBUwkbIpEgNHBsAIEiNHVkBaGUGElIZ7VXtaD/b88p2Hwnx7dOZVu9hUYahiGV0MeSKLefvhe3WNmy98PUl54d3vuZH3ivvvvq0dWFM5qGy3wyap8m7246dPfM/upX//f8Jzr3QVEEiovbI6KiRQrvSBAxDWVS27JXcSTiZQnj+I1XBauJVHWj/0C5sNm731w/bSy9v/7ldpc5ucUJ7GwicaFUo5Ge+FEL7jHYd/4b+OzQoH3hgNuo3OtPDCVnWP7vvI90EcowFGUBEy3nzjv/7SgTtOzv34L5cTgVPfNv3zX7df+G/tcmxTC0BSBFGIEEAxkJgrT49+8wfz97+99Y4fr0Cx58ZcN3LUmdkblvH427IT98P2G35cSiRmJnRaO3j6o/Glp3mCGQk4p3mqVIms6t1dZcB7D6QbvRx5Ci2GHrnTJ5bf9s/qzmknScKVjWX9/MfMuExt7hEFRSVqWpRgOe72ey0syhrrvl1arla6u+Or28VuT9XV4A1dqffe/1dW33j6/O7l+Wx+MBodWJ7rDyf9teewf6G7stye9DdWp0IL1dZrc23l0GhFdeSotdm/YLot2diLkYEElYIZpwtJK0IUz2AQLaIFyBQJEBhkAO+CzBoLCpXSPjDOgOpCDABILDKbpJNCq1QIURuMzMFHrU2eJLOn4Md+/PuK2tyI6X3/8H9KjWw8+2LaaR5678npN875cjp3fO7C2sVs+k6dtMOBU0e/Z+nCL//HZe+EdezuW37k4Z3M72wP2rlpgPRffqW3fjUNo51XrilxqpmFOmKN3tf53D5QvJktdlfuDYMn2uNRKmZx3zG/t1WPS9Np5gu5G+81VXTjUpQPPghFDp6rItE6eiXse2hgo3QZhdK5yKhjmmJbeOo362tXRinOLywoX9tx31Bc+sB71lP9xqRMfTE/L4s3d91uPbpxIWk187m008787trqZx8fTSbm0Yfnm/uWDu/fY7N1czKtgFAySxEEEH1kYxR7mlYVKVmc61DgCJCkKrHQaaXTveGHv/chFyDJ7cMP3rl78Vp09cqRA4WWQHp9rb+0vBgZGo0GEXgfAUmQm3MdTRCCVGXgyHmuhdnXRd4yS0uLw8H2xura8dMLimC+rft1mO9kzz2zxRqbqXrw+Ik//4svNfJOISIESqmimoIIA7oIc51e2NvVEHxdJkTTqqoRmo00sBNR+xbnBuOCMKt8JcPCRwYQsmq1v3Oo12sYwG8qfRKUZm7a3Wx7MBJSZVEenFsohi54zlV2qE3GJuOcJ7VebDeiw91hf//KoXwy7e+UWUMzqBBlr3IHDi65QbG3MZns+t1pXNq378Unnl3IU9vIJolqHpw/2NGre3uVyCg4ceMjvZ44fuPquuVkrtmmybQMbjQZeMyyvFGWrlYyQRkEP40EdbQw3r/SFsjjpLoZeVhmnSxRdQCEQZBoqfBeOwaJIGB92G/1Urc16Hv24cLF1b80KrJKW2s1SNrJJ5PKB1/XQbG0OqnuZsM6LCx1zNY1u3Ytw+7GFI5+8Ife867vvb655wftKwtH3v+r/1rtO1a+cn7nf/s7unZkEUVhDUZRrnHC4fhP/MDWvK6T7NDf+1H/8gs8Bp1Q6QMqrYmYAxBGQK6jVtpXIbMmOtGKQEHpGRKcirMisYpGp440zbWTVntwZa2V1uTjLOMqiwfLbkNdfFb6faUpikIiMhJD5QV8ZBQwiWUQEgQvjbX16vL1eZsSO45VZg0zUGKioLYUBQOAKCQUNMAgQdhz3VxsAzWta+0q7rV9ebHfCB4DEDIpHRWppvHsMTNIYhG5BmYgrT1YjpBdKf3lK95f1JKKNkUsDLHO6MBedfmn/r5535vn3v7tPjbM3tTfPKfLNYVgMh0qQKVjCIysYyBB53m40G783P+q77uv+8Lzo1/5t7RHua1rqkREeeFvxpWBRRMgM+gOpkcT2msttifTwjV7yb6e9iN2JYoKHpUgAYEkAAhRboWJUG4tLjXAjMZZMQSVPHam+uyr9zVyqEtQIFaJIFkNhnjq8t6CT5pSTsFmdGoeElYsQ6WP/PN/7h+8y+x+P/zBx7d/5xMLUiMBaQMslqn80tN3vunR48dvIy5fvHFzk7n0stxqQASbJEBgIY6rSpiNtVHY155EOaYQsWVMp50hRUXGB9GNticiEE1JGQrS2hgdqyrE6EPMkvTQvgPdztwLr7+8t75xq3UgovH/140HAYbZvgwUoTaKCIhAAEXEi3iOcRbfiMIIKAKkiBAQcNakJlQaFQriDHwRREAQZzeMQIikIqNmQRaDKIizI5QIq5nGYdZQE2aRGGJkVoRGqzoEYQkMAKAUCciMqKQ0KaBZLKmowtXr/RvrXznSbZw6cejEnfc0F1eGVy/3dzd47eZTZ7/0wPt/4J7v/sjap3by3em5//Hlu3/unere+3h3vbx0zT5yd/Kd36/biXvy85d/93PLLtpA7VKd/8U/ogh2EqcePGnYMna3KAN4IiSjXGwYzRimmSnNnBVJrLUK2XuIZA0EghBDjdRshLSb4FCCQASyKssG9fTptfkMzv7mLy/8w5/JbtPFZ3+/4QOSLYso1iqJukK0ZlKCQrXdbZy8//aVs+eu//L/Miz3kt6BHoeDR8xYNeODf80d/QDFHINHjsCcZLl33jH2+8VA76vcblvIlZIZjCQjE0VRP19YNe7azWE7kbtOtiYXr+0B/u+vf+brk9D3SpxMK5gMHAAg6VhBACAAItZEMdzSUadKaWIECYFrBxIkBBRhRRCVoMREa2KJjCwSRIRpFhEDwMBAeGtiqZQKLIoECbVCFsi18SyzzmMVJIqwxJldjxBEpJWZ6H2aWIkRAjhBhcgItYiPogBzhNIHYWLECBBYNECMgiikZqPP2UMAIXjByHVItPLMFUdnAK0F0HUd0+XloU5FJZikKs1J6aUjx/bWrmy/+Px4Y8f0eqqTzN9+3xSoZk+Jdoh5p10ZqlCVY2+1xFht8GiSb7bvuJsWDwLERGlfu2Rviturg6vP8t52RnKo1xUr5bDeas7PP/Lw86XxL16KazcXlg4cOn5XLJSM1hYOdsLF7ZNLhws/vnz2RuP4icMHF/3Bo1uDGlbPXzzzWvbCmSzE8bhupmZwaXsRTb5XTb0bTNN8Lrn77//d7nd9+MzrF++aP3yKsqd+/kdHN4atREYUvvvXfunisdNCyd33N9JR0GcvVdcvaye5psl4dKq7rxh3cXc43yrWP/eJ27TEuXyy6Za67VECh9/2oTit119/fnxzg/u1JWlIAwT1GJWoWkJQEtGhJiRQMQIp5qgIECHOHIkEEbwAsgQlqJC0TvfdWHc3hiHW4oKV6G6M62y7u++2/bGY/o/f1NOtxPC0hHShlwLXWxfVeBt83X3gIfVXPxBaRTU9PWiZ4s/+t2R3TzdsFB1qjxLE14GiamgvzJG6i13k4fbXnx/uXjzYmm/c/tBoc2nnyhthBGGoq13f7dULDxy+fH53vsBmN/caE53GCvrDMu0krC1DMt2bJBIDZr7Ve6OGC1tTpQI5V8agkSD4qqjSdnN7ewQCMeLuYNzb88K2reX81u5CZ3l5X+fatdXhhAObyod66o0lk6pyGlBobqFVihw5cuCN7Y0XN2/OZ7oDuYuu086H003dTIbj0iiZ+KIaFUVwoaiizYfepEU6Lc2kjhX7qgr9cpI3lrY3NgIiMjRbCUABANYkwAKAHgABgUNkIBaMjHE2J2IQiFFkxnUkII2EQqARFAIQkCIKwmSUQETCGCWKRJ65qwRQsSCCJkVRmJSaxQYSSiwkUViblGNUGkACcyQkZkalQABAIkjFkCqDTAiRGLwTCBIjsxBIBIrMEQlnH5oQAwlJCMxQuxBRxEMMIc+z3aqM/cGh5YM2BNXIqNsoGmnnLe9oFNXyQleNz22vP5udvgdOJfe844S++OobX3y26OONcVS7O6Rlob1QRNEnFu595yN7Fy5cff71anukkbS2xgoU/vof/J69tj7/5veY/YfTVitYsHk2LaLidFy6YPKk2XHA7sxr6//9C+0aXCREBIVCTJETvPUtVgoUIbLYkRt8+oudk280fDW+uF1uTpONSU3gDKimLiNANCAMKBKRCShlD8LIlQHU0EqwiPLQO2+/WU1f+n/+ZbPVCcO9lXvuWrjjruHWdnJw/7guUZHNm0X0ZVm2Op269shhOhqO1zetRd3O+udeca+em17eiVUIGtZeuYRLE05Srge7G5vl6lZbtTTAyNQyZ6isTIzxytrexz4q+2Fy7mIVkiKYePfp/MA999z1kNm+uNffTdrNuaNLzeP3msN3csCkrtJGmqjVRBtCciECitagGWogHzkysiCzRBYEEGDSJEbhLAKtVJjFWqsJsIYQUKEyBi1RkgphBBGJgg4VBkHvPemsKGr1TUoLM8cYgg8+xBAjsiuqKtVAzmurNDBpUoYSozJhIa5ikCgsQAFc5UNOijQwe+QKoAJhIi/sWVgrz4BMAlKLBBI2iglIWRImpUGAKGJwCoAQp841TJot5qqVxDQfNlK/kMqxOX7W5w3SpdO5Hsxnhz78AXXyrt78/IE3LZ359KfGV870Vj6At92289rjS+0SbWoxI6MFvckMnbjtwV/5t9d+4kem52+mCTFoNw1JQwUtupGRi4qxLGqcONyrGuE8Dj6KqzfDWrn0zoN1t2ETTLrLzaW3tfbZyz//y3iz7ybBuwBl1BiK7nz77/5M/gN/s8p6KgTB2G73Em2im1ScTUFxaba/+rGT7/45n1rYG179+L9RqMv9b2kQqDZDfnzhb//ClEL56Y8m1YQ0YNYAHwEjxAqIoSza556uB29oOpGcetNOmCwuLZ5/8vOHTx9Pkvms29LzDXd5rE1SMYPRXkQxw+5E4XUlyhcTY9OIAa2JHEgxokFibRjBAVLd2pe874fV3R+ukyVUmkKN1oZSq+ENwUiklEAMgUXylsF24DhRWUUCN3el9/YP9r7tB3am6crDf330zO9de/LPlnq9jZd/+1DafZ3HfW746M6df32O4cvnducnWwvjayt+AK/vDG7EudSH/qhA2T3/em/fAZBYD0bgAUFmxbEgtwr+CpAQNIpRYkiTSKYoMdYxgCAgZlozMyL6GAApMUQgXiQChAg+MiCJoDAaq7UiqzWgCICxUgHSN09EZtRfXDjePn2cu61cY/f44f75NxpY27aeXKvLqVnqHKKbN3opbF5cdVvFkXtua2wM6q3NenB19ZP/qfGWNx+Z68jmZPOTX1wYMw02ubc09853Ty6/quNYBooDATVc4wA8eP/RR96+vnp95d33UMuFs1dGW6u6qpNGYzIdRK95OhTwBkl8NKCr0ZhypTpWILKgUQa0jbX3cc9FcEbbzLi6NLmYnRvjL/z5/o98/4TmoOUgV3vbl0GGrpUclHYjqd3exuCZV5qdVtpoQKPVL6oiDjHJj51eaR4//NgXn7ztvkMHvu/tf/zJZ2M7m1SFYHRCzEoRjad1nlgWmVauk1mJoBIzGBdpoglZAdrErN/cSZrt8YCe/Oobp450bMvv1UXBug6wsNCtERFVUYQkNTFIVdfGakI12NnrNjp52vAxRgQfYuBw4+LlUPl2vnDyRKOx1OQ4vXHmyvnX3vji11/ZG5WoxOI0jPv7l/ftDhk15M0kEV+U9WKnPRgWANQvx2mSQaxQkY8OFSqbkFVBFCD5umDkvapI08RFnoyKXq+lTNjXbY/2xtMKOjZ13hXslxYXjYLrG7uUqDxL20nj2P79GOtWo+mHY4mQaVuWTqk4kpJs1moseNGD3UEsHVk1KaZpK1vfGHT3dVvLvWNzj4yKamf35rG5Aw+dOHj95mYINB75azd2DxxcUCO3uTOqJnx0ceHhB+9+/uxZYzKtVH84rXxtgl9Y7O1t1q6og6vmO1nlyva+TBtg5Zf3N7ybcqxIp51GHmNsZIlOJAuq084npdfB9XKzONeeluz2XHRVz3hgnrAMh/VfGhXVnqc70xBE6anSqt2xAAE0HjrW3vJ06fr11Wuvl/2rO9/4yvbq3lt+9Mdq2n7+f/zE6bmFj/7Hx77vv3z60r63NrQ4XJsOJzoCAlBkEkMxFF6ctWGnXp5b6XOVHzqxwVbHyBK1IudDBBSFYrQgaRKpY8NqYCYApAgEjiEqBFIM4uvgxae3PzI8dGqK1XRznE+mCTDXlWnmuzKS+x8dVFsw2GtYwMjeRQpCpOoYUCsUBERi0FZHF41Ek5DULsQYESKDJiOCAMgcABRpDShEBMYKaM8kqKronSru/lv/6OXperuSsL21YFOIhoPnumZrQKskSTwLiUaDMUFHFVlmx66OASTJk5axo93ApAhsRK8iWDanVifyXz9T/+6nTwQPmTYlqEY7Bu+rECJrYtHIMRhgRRoEjbINteyv9KunH7fzGS0cq8+dZT80JgKpWDOjjq42QARRI1z7/MdWfvKn9jpml6PJs3J9VT75Kd13ik0khdZE55FpFpIHiTArwVMEEOAIygAKIIJSEAm0TimCMORq1t1iEQQCEZvQ3lNPdM8+T8dvd+BHa/3IEBCgjTC/OPRKNffRh77FlHuTP/1quldDZCElFc0Pp+u/8C/ONPmOf/I3P/PVb6xfH7VFlQ67WSIide0TpWJZRw7WahCMFDUqiWS0SSz54FqZrUsXRLQxChEio48pJHWUgKBIQl3XdZgU7nV/tfIXt/rbvrjVSdYIBlARsYiwzJS0hEgghlAjEAEBCkIUiLPAJCCIzFRmIiDCCIQw+5YB4a1iGt5yoNMMFKIBbs1HQRGLRVEKIc4EzOT41q+QW+K0iCCKwUNEAK1IKVSgYhQSCALyzSUgAkQRrTFLjA+SgXaRMcbVzb21/vCJV9840W3ffnRl/50riXcHQ7n7xvN/uDa6bSlvuH3Fhevm3/xcL/cPrHSfeGln7aY51VoZNSy+68ObX7+8cnM16Y+t1drDdOKEVZqazKAb7CZdhFTVERUTOVCBnYXGwrzzRlVO0sQjeO+UGNAhsDOpsTy59mf/+lBH1cOqYCPMHIMSK2Uk8W+Kfu0X/9WggnkNKoERO318vxxd8Gurze3dDBljqVty4F1vufnUM70bo149mDNS7l5udrWZn98aONU9XReA5Z5N0yS1EGLwzvlSyHQzPxhWLUATQWGYlrXJgVvm2H3vWd730JnPfXycFLZLZ5lv7vFrj31jYu0oKmEItY+AQCAEiBgcC4JWOOubkKbUkgZUIuLFeeaAzjMKBRYU8JEhgFZoJCoAQgoACCDMtyKDAhwlRmYQozRHFkCJYDVmgEjAAAohRjBaAUKEEASExRAkyjgRjpxqrQRAKQYRJuGoAJAQhKNznGirdBTkKCy3lGlRoiKKgEgAwAAAJCa1LExGc4CoW3O3f3DUTcbDvf4br3XSMHfwpD18cK+/Nrx+o93sdReWaX1Cl6/rjdXu2KndEXRaTi+6djtZng8CtfNUKecw1xoQSFgLEUEoi2deenZqsZVmh5Wxo0oVoavwtmNLeykX/QlUtWK0i0vv/Zlf2DYu2Rvafv/QbbcVnR5OQ1VeB9fvn12HjWpvtT9cbLSPn9jfPHr+sa/sbH18ujcM/Umz0dy3NJcspRMT+/2y3nP7982nuhUAsWcPvOnUuNylJ35fPv253b792s4wZS0ME4fUkOnXPp6fOfjqUy8fP3VbfO3Sxte/cbTXYRYz12rMufrVz1392iePZWkcj/NB2G23zP7b4V0n+UDWO0jPTndfeupL7330bfOb1ejxl60AS1REECBEBoWsBEhc9AoJSAQlkgCyMCtAJB0BhBkJ0ergvQokUZOb5siiRKGIiGGg0cgNX1QQ5xSKCSUlq40WmrkjrWB2x6raJQjF1Vfpid+Nc3gzO3rqA9+286Xfn58MDFQhllFYggYQslqRQqC2letf+WIrvPHwXQ+Z22T49c/ffOFzjWNHDx4/vv7ViyklnfnWcHOUu+bSQw9d+fPnD7VNNfFBpJv3uscPFOUgTl1qDXOsBeosf2LDP1Zl3VOnwvoV573J7WTi6kkRp5UZjwml0Wy2KFlsdUf9vem4uIB10uxhhGKveu4rLyuhdk4iwWYaAFKBRrO9ON/tj7Yl8I3VTUKhDFs9mxUuA9rZ66u0tNm8afUGO+uurkfTqmVTlcnaqNrauH7ywSOJOTmebkVwJgGbUBHFawOE1mJvPgfYAwACBSAAoEmHyIikiWbPiwCwsITILBKFkBSSMUoDacSZfhuBAUCYFRKgBBBBiMxBWASQb31dyCSotFZGc2BGx4hCRFYBkaAPHmLQSILIAlGQRTShJgRhTagJVGCFAIGRET2B1xKiAHjnIjJmHpTy0SfNFBEFFRqIpZtx8iBGiV5rpTEe3L/v9kOHDzU7Xa1H0wlZaCRKJW03Hiw2lt7YrA+faE5Ue3s4yu+5p3UgWxi6a89fw/50Lk92+uKadu5t95en7rEHjqwc2n/ldz5mK051DJFUqW0Ioz/9i70v/EVYSNOV4y7vHHj4gSrd195/RLUywSQ3sPPk42/833/ULtFHooho0HEQLYqABPSsIRJACSiEnMC/uFY/s2YIOhpKB6hADLChWoIx6ESUACEopWLkQKIUKEVaGJEUqPmm2Xz9JgmerGp/o8/CRf/J7a89GTtZevRwlbSbBw/jqXsq5YVE1TS99MbuhdeqjS2cxv1LPY28/srV0Y1pKKjZy9zI3/jiN6CJ+sCcbehD+zvf9p1/5dUvPzftTA6fvu3ihfNc1x2tWqiGT52XJdVs2H4RCy+tQTxwR7py4radmyc2nx/P7T9w7P47Bqo9HBYqyRsEG/0t72oAQIWtxJKSUIXSOxGOMlPxoQjTrRc4oiYxZKy2SIG9FvLTiYosNcUoxpLSmdVRJPoYIgIqXSMKi4veu5qipCpNrPnm2iAKQIgusJ+to+q6mkLd1GJVkiRJmtmoiYwqYiyjA0TPUQXQyrAPlZtBiDUaQ4aUIMWoRQkjiBDHWDkgDSxWKyfAiI6dmYGtgUGchhhqJ14kSMkuNUqlvHRi32BU2qk9euJ06HyV+r72SEudQ9/1/uUf+KHCm9FoZ9w7mB2+bXr25QPHFuorN69+9ckcDnXu3MfTykthGk1ptqsYvfDi+9+9euF30YPWkhgShLIKxlCqAEO0VpPVxqrJ1ljJDZvCwlK72l1PuJhKsj2OPmn2Tr318E/9xNb/+Sv5tAIEJVpE8aHbmt/6t3cromqEvu7Ot+J4Mr38msQddfhdkvQaraMsqvrqZ+HUW84985npqy/fcd+3tx/93mkxSjIzjdE1U/WhHy5feNmunweKgE5EUCOIAmXAR1CcDHfLP/x35du/Pf/Of3RjY3r49u+CG58z136DdwfxjdfBRaekrELWTEIMbuCVAtUiiLVppBKAdOpd8L62eYMxSVu5lGNo0LR9HL/zx+Pt73XUpSgKvZBQXcv2ml99JlXoqhqVRQCWaFPlVGEOdPjsoB9h/w/9z3zvu8p0LiN0eEAe+Ba88jRStdCFDk+6Qd9cX0uspERe+CbD4iMPfPkbb3woX7Aba62JSclUpS2H43rnBlEJIRlfulkMC4gRkRDFImpAUsQipDA6FiJPIIwJESBapQiUm53eaSZaVojIISIRcpTAmkgIGAi1AtTWWBTIskRpqsoSNSpA801i13Of+cy73/ttdPux4Y6j+Tbki9QeMEa16Lu3z+2nxuDM2clzXxitrUdvm/P7ob1vMJjEOriqyM6e74US0oYfyv4+tRIcYnald3zvwXdtnn319t3dzM7z/LxiV954JWyEzsZ66+qlTfDLi/vabTvdHkSMQCbNUz8ZYV0zoU6MWD3zF2HTbpTXwYARyMZ1psHkVHuvrIYY2BMGcsHlhv1TXxquX+q854cLSvxOnEhn/763NFw0L3x5+szjreevtjtLdaOhF2ijlfpGK472zLRqWNOkfH7h0Ojq5pUnt+/L9+87vNg6dOjVc2dDUfVaOYVY1zGG2qamEdVcllCM1aTEqCGYwwfb/d1dI3Zzs/zgw0duXl1tNlt+zBvr219/6mtz+w7ffvQIVwWSNBvNaeEAiGmmmiZAaHTy3e3NuX0HtVH9vdHVy9fm0/jiU1/3rDPTbMzlr1y6fKDVzC2vrV/p793MW7i3M9Kp3tgaCLMm3wDItegAI1egmU+MinVQjp0IAjofa+dZ2EZWYiMjSNjdLQJQy5pQ+4YyC/uXpvW0nrphxbFmjaQTMknCYqeurqdFHbwxanN7mC4a0oI6NQ2Kw1EUub62OZmOJxALHYHdgf0L5y5e6+Upcx0DI1FVh6Xl3s3d3YX5k/v2L37mTz9pbbzZH58+fuDhh257/vWbVR3PXr4JFPYvNKIvE5g7Nd87d/7SxtrA2LydxlBNp0VtUt2wNua6Dr7RSI4c6tZVGdjrxGLCC3O91ctbGXWa7azZNjLol+NxluqF+a6rhYMuA0wn8ea4qmpeCLJA1k04I2Sr2Km/NCqqKmGh6bRmAa1IIVstNgeKYjzsrq1Ph6M3v/2DF9+4vFvgO77jI3dUz/7Oj//XuUPw0MHO8XsfHS027Pobq5//zdYEAFATGqu1TcTVxqokwUu//m/6Zz8OplcNqgOYB11L9AQASkUXlFbBeyXKoBaIoiGQYiYLICGkxjpB9kwqGg31ZFqN9th2xSTQXIOir6hWDSt1XZ19ofXwm+097/EbE9m5LN4ZTBgU2oQ4IBkTIURHFmvFkCexKiAwG8WaQSuxBAIRBJUGpQi10tookioCawDtq9q0UyVOT3ev/tG/PPzom4427uWldjGaGgeJImssawXAEIJWllyoJ5IcObjVO2gfOMoT5KmTqzvDa1er6a5qZ7q9AKM9YudHIw0JMyI3U1QQKgASo9iTEiBllQRfF6iRCCCweI9iujWEJ59SVdx77RvL3/fX4f3fY/78sav/z68fHG6Cr4AJdIoKOXoQYYiN4Ub/U38491M/usMtoWF64Q3/hcfbMYXoAJEioVJoo3iPzGAFVJQoCAgCQBoiAEcwBMJgEDgCAFiEGEEBWZWKxCooAYjQnhZb//k/t3/qn6qTy3uXbuJEkTDWxFNMCGOMvHKo909+qlpdrT97Nhc1rdk7b/MGqWig6TsLz339SfZRSBfTeqGVxyiMwAQ1eDJaEIxWSmdEJgWMguxrYqgrqF20RltSihSKMIJRGjUFlsp5hZDmZjSJW6NJ9J4FbSOBUQEA1iojCmGW+AAAZhAForVSGpUCrVEEGJBZfGAfhIFm62VGRAQikpk3DWHGtZ4NnQQYhAhEIQgKIShQAYkRUuJczVKwRAFZoBJgEQRBBJiJl2U24yRgYREUVEREgiAYIQBKZKSZwk0EuI6RbhE1IiMIKPHgXP36ePv1td1eRncdWDyx1Gk2cX7//LXJsAToQPvll6sTi+mLG6Nt15peGI1/57fu+dBbJz7p2Z7t7qEeVsbxWEcjLOJ08MbdeefBm6+vBg+NhKAOweHEgze4GdJDzbagZiZArTUCaTCkMKnRr3/uj3f+y5+dbEFyqHNjK6oIeWrrgYciEiZ1iEkdqURnQKXKH58vfuDDcPp4zvXuRz9b//lXliyEKcjQJbFZjPuVE5PpEGmsQa5yv+D8Wj+/jVlD8LHCiphr5wihHg3rrUsw+n+p+u8o3bKrvBeec66ww5sqp5PP6ZyT1N2KLSEJgQQIDMKAEbZ8beOPaxvjALYx97uXa3xtnOEzTiLJNgiBhAIKCOXuVud4cg516lR66807rDDn98d7mjt8/qkxzqix662x19615rOe5/dM9jz2R2HBmk6uO/fc7w4c/NRr584+9d97ve6Y1eU+enE94T1lfQUUAwtzFCEILMBiNJIGnnrkRRKt04QMoVUUfHQ1OI8hQvAowpEFQYwCQgiMHpiQEEV4SrwSEaojggizRGQiIGRCCSIBCUUkoBIRBEZQSteBiVEBKkSlMUSe1p0DgmPWgLlNxrWLSCQkMWhSCBAjOx+FABgFp1wtEASFFCMT4U3WFoBCYBYSxsgGmGrPl59yDdNYu2v53je3slheOQmDqy2u9822x1tXR8dfk9TYqm4klpozw7KwEetLl32znZKhvIEKUWWr+w5v9M9qZeo6kEksSKhiJMAKVK8v7BJKFTWbnaymEnO0tjkYlugobuy+/O9/bTY1h2d5RsL1M1d2B0Xp2AUAjhVAvjh7bf/cvX//Z5vp/v3J7JmTT/GN4YzS+dzc4srMFvnDH37HY4+9tX+pXz1/5vQffpG0LRabD/zCT45Wmi+duLH+x3/6ruSWxN2wuqVyG9r60s6uVdn1T7+w7+CNx1S+/XtfaRu7b3HfmGrK1LVR3x9uLH3gexfjwd5v/nqzts12e8gEywdv/0s/8cX/8kv62ROP/q2faRz7q+ZiH7tnHHmrUoV62pRurI4SULEIJ5nlGBWwgAgQe68JUSlAICJmFCLUCWAFHDmgTqyyaVUUFlWoa7IKYgDEbGmBUY2Jrx+5Pf6lv9vO0p3f+If7xrU4VJ35+dDa/dXf7Sypu9t5z/9GvrlLoBxykhpSKkYBosjsyspmRmVqgU1zZ/L8t5478l0fHp97Hne3hjeKRq5wJCGRUpUQ8fQ3Tt//03+31+etV443I7gYC60WH7q3e/X86NyO1KVJAOfyP70wOGMW9aHVkVdpNrPXGyqIRFJVkVG7eqKNymzaTvMmm1FZjTZKY9Kl+SYlDUlkPBnbzOzsjTpzWbPZvmVlaefG9b2iHFZAKuagxYW8qQNTovLz62ebebPZSDrz7VNXN/NmHnxphBc6zf6kzlVqoEwUXjr7wr233DXqV2U9qWOVEV3fuKFzpRo6EdjeHb7xFCAQMQsCESkMkZAQIkqEyBI4hmlkczqLCCEZIo1gFBESABPpaSuUoIiIk0BKacboA0z/D0CIUREpImVANIYgMTAFVhqjJgooTiFFQJxmJwCBMCJpMqC0UhoFUJgAOETFAGAcs7BExoiGtBAEQIy112CCVkSRSabfAQJGUzUoF5c6R47eu9xc2tvanOmomYVObVUr08PtvU6jOaiqtX0P/9a/+pd/9aM/Oze3dH7jqpldxfnk4e96/87Vi2sqK/fcld1dP7/QnfSt0dmhI/d+/xOXPvvUpB8MkBqLJpMmmrcrGnh38WRCcOHz36kMiLHztx+YPdC+sb5Tvbi+MlSV0qyjKBwnqv3AnY3bV4qdne0XTmfjOvORWLGXiCoSaAFACYgxilgUEAG0AdAzs1hFWgEHRoGEjCCLZ6ixoTQhxZGvKyl9QAAI4GtQCepCJxLixrg+eUJSmKTPTma/Sp1cgLeHRbEz1CwGiR30cTNUIThItGFPZY8dcsLoSJlkdi9iB25/7tTG9Utnu8Pq4OpCVZRVHVoWXIjO+2QmKyks3XX7odUj6cHDsLzvGuP8Y+95/J63+nIS2tJITHejEl9NBgOQ6CFSohMUi6ARJSH0EKN3wD5GIEYSEmAUpZXJNBqFiJpIYaIweu+MtkBEoiggEAmID64OTpDRpgQmhAgipIzz3pCNf+4whcgswXuJHGMEgMAqRDGZslpNRVIg0kAJSkOpEIICgMjgPRMHJUBKWyAgQkCN5BkBBUztogZtiHyMgIo9R2bUmhCjSIQYQhWjk0jiJBQcHdpcD4tqdzS4sbe5b/5QY+KysSzmuTNFbFDj7kfm3/3DxexBrDlvdJK0zVe3jn/8k6NXLhpjdc1nvvjl2ynLbz22t7fXWl5rtRfKYZlnOj16y+wdtw2ubvHENYG0VjlGhnJY18SYJakIRoOi80kxUWnTtrK9p15s2F9tvvcdM/c94jHuolKPPeEf+szo26fmmiqiqotKZ3kc14zkWNBB5fv1idf1xoX1099ZeByaR9/Er7/W7vZ7X/w35H5i7tHvu/v9P2KaedCIdS+SQqNBNMJcWpXKO9AEeVZbSMFBzRAZ0EDpwEkWzw+/8+lq+c4DT7zfdYe6t12/8hRvlUl0kGbeYYK6Hrjp/QItPCgoUWAAMICgUmRM4tkDC/gECVxjMfuxXx0fvBctggsIQGkWJpX2Eq+/Vl88nscaTVoGshmmxtT1yCoH40yizW5/yL71gzGVaINKM0gyWH10Jf4V/s7vdesNmpvbvd7j5sygP1qaywa9btJc/Nal8xe21+8CfWxSLs5nEGqlrI2+uLHJk7Fqq7zjI4Y00S5giKgJE62UQmEGFJ0aJ2AAQBtAFcmAkJAKiEKkRQyCxiggghhiUESRhJWWaeIxMWlq8tQorbXWhlRidRAogyhzk9Iiw5KHuzNzTTYJQsQsNbcc7Mwd3PnM19LdYdLbbZ2/0WYT3ZptzsSyKra285mFYm7RPHFk7dFbq2++Pvzqi5ltmcSORnulhaW33R9uP3DvD/5k/NgvGiKeXSrIBdtePfqm0aWTyeJK8+5bdiRbf+YbcwvLaV1NdgZAVbPVDKnxDkUgjPZ0AlL5Pdtc/Wv/GA6sIanex3+nfPIbrYRiHdN2DhxlWIPS9ZiTZpIDuFcv1Cd/lXRm9nfuuf2x3qc+mf3BuHnxWsOFybhGa33pJ2sL+//R36hXFhdmZquN3Rd+9/Me5e1/+yObL720/mu/e3StOd+YfORDP/y733zhd//HJ1HYh+B9BEIIgQzs1kVKOkltmunU2oFDZTLQ6uz564vffjIVmV2e7+w7+vlXn93o7k5Gsf2+d5XjMrh61BvnjXyjuzO7uJhlNjK6wo3H1UsnTryttVTH/vWtnZ7rSuJOXDyRpx1CXRflfJLlCV/Zu3TqwpnJaMhohqWbeF+Uk0fuPHb8/IaqYz0JgoDQQCQSVuwT3WCld8ZlmukkscF5lCDBW21IKQqqpYxIyBs5MhvCqBByEyOqpBmAJwjE7CelETHG2MSo3PrxRApnmatQXe2OXOU7uuPDOGmlMeEbhUtK66ps4KqFpblRNaxFVtaWRoMiohw9eudb3/Xoa8+cUBZEqZ1+zec2j62urs7MDIvyRr8YjcVSmOxNVlqLfjy8dO367PxcEaESaeRZ5WBQOVdWJbjFA1mrlYALWmEnyRoL6c7O4NKpHQNZqoMEv7dTJAqyRlMzu8pf2KxGDo2iqmSjMRFKDC22rGFPRvNw4gz+L1JRYmhUBEBUiDbVIeLSbCO30NsZcJoHsAWmaw99/6f+7T/90C/+8mjY+sanvrRaQ30B9u2Dvf/6MzQ7eepjnz88gfnEuGCV4sCRPSAaAtGV3GLQffNKXZ9TMZp2LjTtKo5IhFajQh2ZEBVRzBtbmaY776L5A+rS2fTKRZpMCBXHGD2Q0WlLuxuvanc5UHvOUyub8TvrZq6NVM1LsfPZ/zx3z7uMTgATkAKtVY0mEJAviRU7Pc6Xs7c86BYXxWl+6kv51iWTkAhGEEYmBGQUAm11RIrk0SgiFevKAKFx0QdWpCBZ3t3pfONP5cLLYc+3IqNWDgQQSJMi5QvXmu8U48qsLV9rLi399I9fSaor17oP33WvXBtu/JtfuUsPiX1zRg8HpS8niQajtIoswKQErJFpqolFghdNKECiQojKILPo1ALYclzbpN1dTte+++fj4Tv7Oo3veHcDQ/kv/3mbXQwxcs3MwCEwaMIFE9x//EX3zJ/MzmZXXvnWAZcmqg1Qi7LMQWJUyECem6QC11UJRxf1wmE+ddJM6ulMAloBEQBBHQARFAJHEAYi4QjMBAyBAIzyavnlk/2//hdxNrFXi07aJK18g2iyy4VhXfSv9YwfUM2kVUTCBNk5UNA6sHTnO99h3vOW13/9dxIkYdbGVJ4VQGrNWEQbI0hRxHnXzNpWGR8CEAijUYnEQCbRVgUOgYWABSE1SoEAojU2+NqFoAkUMhmsK0B7UzfFqT0IIIowsExtrkRIgAQIOB3pIktkCPENxMiUTgRwk1B0014EmkhYiAhQUFAQAaeuImJhQkEAERAgBEAkUqg4UhSFIIARQBEkShNHjkICEpkQhYVBkHA68RMKAYO6CdEQIu/jNNoQJd7MO0gkxGmiLQruFPz10ztPn9uZy6XTSQ8cPhIWFvfS5rg72CuACtGQZqnZ+vp3rrz29GAUFsfUT+GJv/69C488fOHpV7ef/k6609eaLwqsLXfgtc0Zq3Lw5cQZBkntoAm9fq/YW88XDkekiCQMEhyA0olazqpP/9//5w/MrGUL/sK4Fxdnxzt1XU2WZ4z3fjKB6YmmNCiyOIG4vDB39yMb1g5BVn70w25nc3T5+kzO28++mo5qbFinaFzXaBW5WGx2OVHrz3/hgXe9feQNJ1JHiiEKs2YPo43NU9+6vDfss1UL86xymmu+em595+SFlg+Zw8V2Pi7jboijWo0ZgRFFYgyMCBqn+p9CZAEQEGLQlGiVaUwVIEgIoapkMo4B0AdmxmlDNqGIgEYhFECMgAACKFNCeogSOAq/IdUwFnVUhEqTkDBRiJEAGbB0AUQJQ0KSkGKOIiIInjmCiCATmdQaoNwYdhEREmtABEFC5CjAyIikFVhBB8iREUHfjEzeTGHGCN6xpqmDkLJEq/KaLUrXu0A2rVITuiWAZuBhKx9XAYxJAMoYe7tD3QpJS3VyiNTPxuN4sucxVTO5XZi3/YKK4I0zRsUQas9YSWqpabEJQHXIm4nzUqAdF0EtLu3uDHb9eA6wSZJuXhNXXepPfOnIgJufyW85kmCzvnGlHBfXfXPt8Y8YWPFXuq99+XeTnc2FmdxXMtoZTKDp33zgi69dwGdOrNVp49JWrIc7A57s9l/9ysvzT7zvzqPvePznn5g9u/5nP/v/uf/Awl43bij76P/1y9++9OrghZf3pcno1VOzAImicVWH+5YGIV6rfOvBH127893Jxd2iBBRi5zXw8Nzz61/Ut/rJUuuQfOEEb27unThTlq6VtbxjRoEYSFGcNnuyoAhSNCw0tZ8pTaQIcXpBhQIiYowQAnCMUQKjkQgRGhY0mwTFOPJhMhgnmqjZiEXF/a3cdUdnX80ufrtEg2Bx2IdqsjQ7Ayiwfl17UK3OqPDa6ohQ1l5pSixTrDjDqKuqDGChu9U99pG/OVy9U63cUYdxc77hygpndGM+FVO3W61Br9jauWjufXDwysszOUUCJ6F7ad16FUqANNnx6oXLxbfGWh/o3P3A0d3r4wN3HU1fy3ZubBejrsTYaqWRoSz9qF8e2NcZbO2oPMsz3ejkadbIW529neu1o+ikneaJSSdVGI3qYlQPJ+O1xbm81bl4/vpsOx9OijS1m+s9SubKEJOAve0h1tXIlY0ka9s8NTCpu0Xpas8xOBi7q/bCqCwjiSbu741SnUxcfd+Dd29cOlMMi5vjAcuU4iUswKKVYlZKCaIHiRJijMwsIKI15cYkSJaUwelJNKOAEhBEBoocUelpbBkgAkxzo9MqHjXtOteEGhEQGJkkxOA0GIjeiARmiZF5+uQaZbSA0tYSKQUKAgAwREAvGASY2UMVYgRiRVSy8iIGBSQKSiqgSGkVYwRgiVGQAGHtljs7+471d0cnTp9bPHRvK3IsPGVZp2mVONNqHVj74MK3v/bqS099/4d+SmYPXNvdjplsbO1UEbdqXFg5dMvR2ybdXvQ8KGOFreW3PpF0++Nvvd6uI6I4Hy0oBYYmIROwxs5KjLViFYsXLnWfZhLIEDABAV9n4hJtHrjPvufdm81cDC29fW/0nWd2v/nUTIiWFCDWProIyhCRcOQIwCAiCm5G+NBrCIG1mdKWGSiimvbXCRN6QRaIVgGzoNjc1CGAiugkY5UHIc845tjbc3G3ipBqooioMTFU17XSgIJkqWTXaCSCHAWihtW7HnAPPgwmHZT42jOfq3b7yuTjC+dsgtxUY3DNBdPZv0pHV/OVQwfve3Ni27rZqRCY3fzqrYPtPez1Yj2cbA/667tpu9GZaVWxRGOMJCARQ7DaEjEbdhQrBBRgFgQkRYaIDBmltNJTBAsja6FY+RpKq1WirUWMMURkjlMmdgURkDiyWECMMdUaYgz+JtbaB0+itabKR0YQwogECAnp1BjSOoZYxxiMTEs5lWBkAAHnY82+BkmAW0krBgeIRmnWgl44ig9So1S1V0rX0WnSRLZ2ATQECZErwsAQfeTgow8Qg4zHNWi6sT3Wp87nczMtWOD1Gy5MYsqUtw9/z7tlcaUohOpgtUGV6kbTlzwqRpGjiFNFvfPqV1cWm2puVbXnvCtnNOLl86deePXwR36mpbKX//Nv1lfOd0ZVKly0mrN/7S8MxsXk0vVOEYbru3uu7OzbNx5Vy2m+2knVs8/3T34rffxNcOzB8vb3ZHfdPnn0CXt5t39lq6mqdotG55/f+/1fzN7+9uTeD0CS8u7FFlnaf/Rob9eef7366h/y2VOJKmcmF6tnPnXosQ/vugmA2euO9zcTFUyjmujNizv/9VfV+ct5w3iw/Oh30yNvGVw+ZY4/ry6fS1wFVsAH4Gp5cLn+xM/H535VB8WXXsmWmtAyMmm60nMV2YsxyuRKA4gEtMb7WI9qsqSIhaMFMiAhulhTPaiLZru1cKcPkSQmhEQqTFh5TtxW8fLvZewBqRKMhiJEzSFpECiEGavunQ+N5e7INSX6ulKqMoR+MoagtZkfbBxXS2Z1f3ZiMBqHKi10qPjK1a2ZrL3/0G1bF84s5wkMyvGwaBCZNtmWZkOjcdkd+6zZrvpDQjRaawQQBgBEZJ52wqBWxKiDYBQlAhIhAHFAoyyCJ0Ifo4AOEpEwSVQUbLRbmGdWk9E6S7QgKZ1AiErrOoRQeMGbFQf3PfB4S7dHZ0/Vx46ybjmBsi79q9fLl8+VZzZIWwqxDFhHl8HQO9debqH0u4Od/N73fSedT+9/eGFUNdf7OCnABa3c9c/+xsJ4vffM+VndEIn1jetGj7TjyfFhhJjvW3Szy+nBO/HooW7Zh3PnzevruXeYAsYUHEsduFI1M7Wb7cz0nn8y3dwf66AHXcxSEWmktq5qlQBzFJC8nTMqCR7EkiPLtb/WlUaZicbNbS5rMzdf10HqOlvKC13o9kw/m7tSNpoL7ZUHH9r87H/Y1y5yThtYZejq4fC3fv1Xv+cv/9wff+pL5bifGJVmRoiYOQrEKNZIp6k5uplmcq27Z8gAwoWNLuLp++/Zd/bE5qeffe7yZhdZV53szPrk0MG5mrmoJ1kIECMNRqlPbJK6qkKUuVbrk5/42G133HH2ev/63taF468MdkYKh3//n/yfP/WjP6VN/fk/+MrpcydRVKNtnauyhjEmz4mGAwcxtjIzjDFwSLJk4kptTI66qEMdPShwIVhrogiQKmoXULTSKSUN1aplUtTVZm83TVseuJGqfbOzkWm7Kq4PCnF+KdfH9q2evrA+USIjp0jPd5qjyWRYlOs3dleWVg8fPvrsmZNzBxYxVFlVv/3Nj37+a09Kg2aMsZ3OYLeXV74eju84estd99726d/7o92d4cGVhQrqOMGiDGdOb9xxrHPowJLOe/fctnLxws787Fy7kRYTT82stuxq8RBW52d6w+qWpUUNdZ5hM6NWarrd0jEurzVOnzub2dn+AJPcLK/o9d2dfbPzDatKr5JERBg4Nhis4GorzVBapJe0pRhqz8xBKZPn/6tUtDOcaDDGYmLNaOQndZUbOnrPkX53N2lktjm8fO3CO5p3Bzd84uHHXlq//vrTF456W4P0L4/dr3/CAjzczqlSEqJKgDSqiFEiCLoKCZRFQceZMSbTPgaVp1LGxKSB2ddeCVmjueba6vSxR2be/Ta8816am08vXhj+5m+q11/VgVkIFXoWREmQ1c4OYV+xZiCllB9WYJDSdL+j+ulnUDW1Npy3go/oBkxMDOAktJcWP/Izw4cOBquozlutmeqPfycxIhEBCDWjq6UKwgooj2nOizq2ErdXJMMJjyqIEpm59FZYWUgmMRzfsmnKxIzIomIMmgMxCEFRDaEjatEVZ589/4fDAx/+seV7j47HW3b/zMM/95H+v/5neT8M97pZs+mhxikzPkZmDsEZ0gxoU42iY8kIKkyJuYqiBEC5Cf0LFd2+0njbrX0eBecpVbCQth69v2poLJQ1FAMjTRtnBRitExvQ/9l3dBJvyRPwBUQPIGCNCkBAIHEsofkD31vv39+7fGXlx38UZveps8/xf/8EnboKwwE4gRgBAYyCGCHEKV4FBMFH1ASkhQUTDSBQUTMo1/OWVHCVSckM3M5v/vvZv/XDftnKxkb9jRfy41dM4KKuORU7G5uPHH7x/Hpncf+n/+BbehI1sNLapIYFkMGmBFWYdgD7IJlpuhrIch08KGg006ryhQ8C2LT5uIiIMcsNCwVkJ6CV1UkSCCW6qhy2Uptqg5iU8SbKdJqhAwSWabgMEElNVZZpoTgAM7jAdZhiAkRQEG+GFRBBKUKYXkECC4HIFAZOxCKEwMKIQKQQpw3AxDEGRIPAIjDFGAkIwLQKTStQhBEIAT0AESEAT93n0yyDQmAUAUEAUFMQtnch3pwGQBhwCrsWnGpbBBQh1hF2hrE3dtc3X2u0svvufujRhx7xVffGta2t69dCNWlZ3dtxBlK0esc5fm1r0V9e23/fm3/+e/3Jza31rXc8PLtay/HP/2pd+EMP3+Hm3cnzF+aXGksri4Oz1+trVzrLR4iELIhDFAWoeudPfPyf/NS9ZvbEDX8khdvf8cCNi7ubjUayun8Xqb3UVKD8bm985cbM8gKMC90rt1+5eOkf/sPmHSuik+GEu6+s9wdSSgAZNIGT3FQgVpNiNoBJQ40lXr92JfvGZ5J9B0WZKnpXldWof/3qlWvnz4xGo21INlXcq0O/3sWy3243kSx2coP2ws7O+rgq/NQqQBwjgwQGeuOu0jTcLgII1mCWa4OiOJLAuIjekwuqjsjCIQpOWzkEImCMiACkEEGm6UQCsGa6GREQYEERICSFMFWNomcgEWFDoAQ868AoIsiCiEQARNMQCQMSIBMJYclsACIzIyCIQgTAqegjAJ5F0XRJgKKp1htJBPH/dRVNqSusFZHygWMQjSiiEsF6MMaxAk8eZJwntDA3ciTFKKlHAJQttlgRi+9fv5YqQDDakpY0rofKJCsRVwmHRqtMs5ioakBigsDBka5V4+JW+egHfwgPL+xRCLmdT/KF4d6Nb3/VX7g86Q9ddHYlb8wdSVcO2SO3t+96sCrLyXNfri6f+uD3//jJF7927snfh1NXl4KWWLHRcwfnF/cv7y3c8ujP/o3Trz6784UvxbMn9bBenjd1Lob1uf/0afzC17N2upXm58/vven2/f2tnapXI8DOK88//L63xr/8oTz6V//eLy48f4kGfbXWWfm+785GceGrXzv3+f988Uu/lpdqrdGpS4qRMXK6tbfxPz4zb2DkpeQNBOkYm+gEPRmFmgBZDHJwrLVSMQCD1IEBkYElCCgBDCyGQJMG9kgSJYBjCd6AEqLonMQ6a9px4TlT48w07r9nYWZ146mvNXvXOs22HZ7c+ec/sYS02OkMML/UWV26777sxNl44YTuebQAiwvQ7OCwdOUuC6hMoQKVovfBr63x/kPV7mauCzXeC+efSlSvry6aWSx1Satx6ZaD3XO7wZHh0LL1jee+cvsP/836/vt7L7+iTe66TDtbKvBgLw5zdcLh6chLh48NWJ17/qLV+W7SWb31wTqejUGr4cC7KICqYUoD2VxaD9zOrkfRXqDRaYnJn/z6y3EcW828MihC1qrdyZi02b/v0EZ3NCftaPTI823HHlnfXA8hMjgvdGO3DLEIMdrEOKQdGWbz+6IbJEqhxmYDl5vt8WSg28nK/rntzV3lMEvylJq7uxsqEWMsgAMAXztDigGFp5liIJweZpEinBa1BAbkqDQZwukorohAJHAgAmbPAFGISbwgamVUxiGQYODIIgIkoG+2HoAgYqITIvBRCWOITEgwTToL+ak6YazSKjVWKaMRNQDG4ANTDNFzDAzMkTGyYgSOCpghIkchYmUARYJnFIoCQYCsrhXl2szMHdzauKZHZFSyuXV5YfW+xCblZKQqj4onofaq+b0f/blf++W/u9X7j6se24v7tMru+q5jm6ZtWlkRpXtpU6p6dml+ZqVVFtWeheUf+/4dPwnPn1MBooK6dAb0lNI3CdLQpBBClMiohBRRzYzMdUZh39LRD7x90lke6MBQIFpeWV76gQ/Vh5bPfeYLC4UzgVUtWiEzIjOhoAUHMFGRASACJBIyymcazBIDAQhXpVIArCC1ktiyX9YjJyQqJyaKAr4QjKIVGIwAYBQAAwg7BtbAmqNWpSWhOm2SNrpmrovaWKxD2V6eJeRsIZ3dl0rbbkERaLT6wK362OEkWcDQk52Te/1xETJanVt48M3tO+/lpGmbcwCaTUocneM4CePCm7ThyvqrX/k2i9z74H0a0iRBbVR0opFMmiCRFQWZ8hWUrqoEWYgFNJImLQIoABwEQClFMt0ZQHAOWIE4ZdMIEry/WcknimKMPEp1qkExBgMqchSyb3jriPkmmWsql2WaZlKaBp1DDKQwsHjECrjiOooEoMgojCIqRuVdSLxPM+O9CxIBFSqtNRGLCxyZQ6gQBQUCC6EKAAGFQBGhEAAG0IJaiY/BR0SlSF+9cDU3MNq/OF+VzRkb9wpj07qu5tKWVlo3DQU33Nk68/QzNrPbPa+AEqtnE8ubm4OTL82+74PDYpCVsVUPr37xc1nzYP7YW8c2ffOxO5//3/+G2b1RF8XMh95rf/jvrVkrm+d5HDszq0aqtrLjP/rM9je/2Mln61ii862zO9tP/l7r/jO80mltXFxZnCm9K3qDNEWIIXzrKzi4Nl4vcPmIM3vl5o1qcl2ZsLhXJTvrFkagBPtj9eLzu//lH9m3v5/uvn9OL/LWjqlG1dOfH37jc+bUpVRbB6LvOGze8S7Yd6s9fFe8/5386jd7X/9cY/e60QmWDOSSchtuXA6u1LMN2JqAVo6UXZv3ewXv1QrFTWrbVKikLOpEJ0ql5MEgotUgEdPc5JZ9HSGri6wNDaNrrUUbWxeliKjJKX7xv6nzXyPnxRAKG0CoPaYMaQTU0O1CFuda3eLsc8l9j8c8jXWw2yfg4tf48sXUp2sH33m2vtRuVE3vr1wfxdrSGEr0Mrtr0qKG5nbF5e6kU8axG3dNnE0SEkJfl6N6cXmp6m6Xk6CmMUsWppu9MTzdVAsAMpAOAD7KlJehlfYhEslUsg+ANWoAAaRGnuettmpkRiOxEGEIEGsh0N45RuDIDPyGkaJ16flTM+Puvr9ysG4BloPBM98IL51MTm7RUEBXyYzZOLJ0x1/58f7p0+blC/UrZ6C/l7MfvfDN5Z/+G+H2hfTeOf9bX3Ffe7Je302PzK2BhE/9aXOijCfbsSricL0LWWaaJgy362tn1J+9vj2o5289uHhgttlZqNkG8eXWbpakPBwnykgVJqNxujK3us8Uly8Mn3wuM9aVIw61JqUQBcMOxyS3iSMTIAqHGJUmBs8Mqg7VZKKN0blNZ5t7vTpaM9tqe1eYs1e6v/u7sx/9ST+7mlaTyearraL/1d/6+Fv+4k+09i2t3nfHvvbi+Y9/0tfl//Zj3/1r//V3Y50aYzhwYE4ULc92QlHUZVAsri47adLdGzUbHTOTXN6bjE5c9THs9EZWqyyH7taN4y89c9stPxH8TiOxCiITMvB4UkBR6BjbLXv9ymWAyWDvyrXTZ9c3e7s3dkNko5PzF6/9yec/E8e9K+vnG51GAF/XFQpnNlEMncTWZTk/02JXD0qI2jpXJoaUYJ6Y2vN4XDEEVJRZTJJEWwyhJqClvFnUfrfojYJrNuzM/BIg9YcDI9lWv5jUtc5U05IyGQRVBJpfmWtavrE1XGqnR5YWx+MxG7s5qg8dyjZ6mwN2MKnr0WQ5nVm/dnlSDWw2c3XjajNvtefm69LdeXDtA+99y2987JPVpG7kqUmoqu1wNJnNbd6AfM7uToY3Rv3XrhtD8paHbn/mlVf3JjVqHA3LccEqwWswSvLUu8moDo25bDSpiokvJ7RXh7yJVs3NLa+e721lGBtNMwMNn+QX1ocpdbr97rHl1oPtrI1EdcgNWkUxsjLgWQ9Y0FKiYojhf5GKtFEo2sWAlcsTZWzqQnj97FVEp+pqJsvijYuH3vn2WWhd/pPfb3K92o+LKs9iTRDnIGkDN2ICM6bXG3kfhYRIhbK21moi7z1qjSkzxaAEAH1VAUdBQsTEIjCjALXadOzW8MR3Je96R2g1x6NxXF1pvfc9k0vnzaBvMAIZEaAoyGwUKgRg71j5qJTWJBCGNWlpKMHcRKtQJ9ztS1mzL8jmCpzHzdjc6mbpVhHmvIVRP+sshzhh59gFdAERKE9R55G104vJg3dNji15D/jai/UzL3SUGKDogECF4IE0cozlBFOIxGQMRYhlqY3SQlJUWptyz91737uuvfS8rr/I+5Zab77/5O6FI+1O1V6by8X1a+JIAsAYY00AUZOyKShARvastECKQKCIwAMakpoBiREx1CwchqEczJqmTQDNzsBuXdz51MdbhYOgAjOxRBZIlKCIMAtiQJNnEEqY1JBacDcrthQiTLtR0ibe8yODpRmz8EqZLRST1B56sPVzq/D8a8Vv/Od8MgY7rTQkIAXa3KwIi4gMEBFIMNHAUZxDQE2aBDmSIiW1JKQWn3zBnXuBOpnaxaRS1WZhbEY2gI6wkMO7ngjHBp1H7z7+67+UsYuUeRZUpEhVZWkTlSitglRhSluISgGRSrQGAGSIIYTISumyGkNkQXBe8jSXEHNtlLJRKS8SokekEES8T5QCuekqmpo0YuQogggEAMKESk0bHBEA0MdYB/bT+XxqF0ERBoVAiGpaXSbTNjQhvHmULCJEiCAwPakGmUKOBCJNE0ciwkyKtCAJy01dSphFEwEysyApEUCJItMLTZ0gogj55g+BEAWm4GwQnmpdNP1mmv5SwCzACmkK2I6AUcQNi6e+8+RLr9Jtd9/+wB2PgMk3d9fr4EdjsqQndZUkNDlzqXX16relfteb32S31PFzx5fDre+446HXW1nRst9c33NRXcuSJW1vcX695q99/tM/cPju1r61ajjI82aS5r2LF/7F//Zjt5rGjSSzzdSf252H4WYxevBv//DuTPtaBaOsAx72z7Sr5741unZ1cO7SftOS3RgubA6Pb44LmJTgEygzdBoTYKcRhbP5hs6SItKJQe1Fj0BfGsIffuz3lUUiAWYSttpGlRRFKCP1oqoZt4dlQNIushtzjJvCdRXF+xgxMhgCmvaMEWlDBMiI09kGALSlLFVpQomB6OqyCizkA9ZBXOSIwnHqSBMSERCQ6Q2CKDjNFZppdTYLESKiVexEIgBOK/RYOAISRAEStgkJYPBemAREIyvEGDHKTelwSsWyBCCsQHmOiiAB8AGrCNOQTORgjdZTfUlRjFFuqkkCIoSIbyTzI0eZymSEGgEQYxSlEoUmMSoIsHCpk9cb87f+wEfmD625577W+8oft4wl5RlU8KDQ1pWX4BA4SwlEsD+a73TuabZfG1Yq2qzVZhilqaq8dwFcUTVcmG2lN7bO84yhg6t5NrNokrjZv3q1W3RrCqLnF5PbjtKhu5uQla9958q3PlvtbM8CL2bJi//uV+eMugVVPpM02qmERjEpJjs9Y7vHltNXf/VvSz89qsCsLe+E7f6wdlU9O99aXTSNeuCvbE6cbZlm91K3t7t32z0rUJd7f/C7e3/2+xdm5/Z/8AMPfOijF3p/cEj74Hv9P/6yvrKbT8p7bIJeVQFDpVwdbZakRppazTufMCnCzE19hgwiKMDekZVEIzMAoESGIMCIBrUxDKC0FRESTwgQYwRAY6ZmEGAWQFGI2ihhIr/Tn8T3/fS3GvmdH3znjsq73/7K3OLpdBDBJQZlTQgAu3tQPfzu+R/6SO/KqercxmInndgCVg4Xi0f0zk6So1IaXTUZhbyjHVbN+5aa73rPJF9b//IXF47OpNtx9Prrmy+/trR/Du448szxzc7i/MJ9bxld+1yeB05DyjAa7F5+8nOjGLK5lh9RlqRbW7tKKW5nu0hnKt/D5GiW7l3bntT1zOrStVPlZBzTXO87fPDy6fMSHSKptj5439H5LCl3+fDM/MWzO+RkVuexNxjt9FGlIQhw6O0NOu1crC1K5yR2Ws2GSe3sQkbJaDQcjYcxBA8Mwsaq6AIJ55oaFocjd7R1eO7IwulrpyiHWE2Ort6y0+2NivHoxmBno3vw4MF6Um13NwwmWSNtL7eunh4DAPs6kgZSyEBEgIBEIlPRXgCmXg6c1h1oo5WxoPSU5wIikSMLB+EIoKxWSmmjgYEFAgZCIWatNAAwB8VKTZF2oAgJFToGBogRGCDESIQ6iCAZra2iVGlkQBQUCS4Qx+hCCJGApn8TDGFgZmatQAG5GMWLr73oSGr6uyhg5iguRBiPW66wZK6Pt+5+/N6Tl060NudX55eR1WQ4JA5JZrrrZ5fbje97/1/cu3DyxIkz/VfW10K48sILb/rBD6d3dIbi09kVH4rN8bBFLjdmPPFlqOYee9uzz517/P79u5e3cITl0BkCpUlZKmoXEcFARPJOICIicm77K4sHfvAH3G3HJoN+FRyyadpmJDVC8g88tHr0wPBr3xhsrHeCbzhxXWcrVKQmGKqVTji8MnA832rt7I2X7jqU7lv0wgqVaDXq7rWbxtVM7SYptVz7099+dTjuPfiuRyuFAtK/0S17Ew++qn2SUH9ru6GiEc0TJ8js62DS3SQ/+ODtSZpNyjom7Pd6i0sd1cj7RWmzrJXKuVfPx9c3uTnfXty/eMs9AkpljVsP0Kufv6CoXl5YOfaWN0G6EqnRyNqhjqKU1WCThDRPJoP5pcaNKxvnT50rI6Z5opQaDQfd4aionKs9Wh2FlAAKWKOtRgPBQARFU6y1FmGJkYlAlFIBBIADszBrUIQgmrzzYMhoI4hELJ5jrAMPFTaYrABHYZt0iP684oARSEQICUA0Ssa+rZKUSJNyzCGEwLHw3isUZCISICBiBo5Yly5TFDG6IKXzNUskSo3RQkoic6wkemJFkUSQUoXTECfATces0dZIErHyWgECRh+ijxjo0qkLo93uYmYOt9UtRxf8zmTn9NOdB95Ci4tgTCwrtX5944WzGcJEeQWkCU2s86EbPf1i2Vyztz2my/ELn/qdvXPr7/qVnxzMNEuNamE1fd/940/dUGM59ND9A9PxW5fTqlfhHM/OG5WG0Sj0j88cmBsceWT28Ycmz/7pzqlLvjvOnv9Oc1+Gpdvp+eyxJ6rNy3tnXupY1nWtX3u+98IZe/ttSz/y4ROtO+fe/dd6G+uDb34q23561uhqUGvbyAXhm1/QF75FKwupnYMbezIYpYNSDbvRBVLaZ8rccScs7BOiOlH68H3m8C2dO+/a+2//It/dzZUAEohAI9HtHEKAYZCEzOxMaKSAuqELGE7AIkiAFLLDB2FjOwsMSQbsINYAoexX2coyuiqdxSorpNilRpOZi1FFCjLehXN/XD77J0lVl0EpBcGHNCVjLTRsf6+bvvlRc+yuwXc+rV9+vnzmXGvz/fltj4Qbp1T39ZnL5wPOmMV7Aq1INdD+/EysWoShkgxNqtOq5wapbMPMYsN0u5tHU6UrkfmZ/Q/dN6idyRrzC+3JcKKshYKnW+MoAAyRWSNEZoUCU+ctoYD+87dzZEbEAMgx+igRpWYATcDQJGN1ypGqGIvKaxCNOk0SF2ofIhuMCKRvuoru/dBfev1/fi5cfPnCL/3qoUOHJ2cvNPq7cYyyJ62sY5zvTpx/5/teyWbaB46lF3eyynUandlOsv7qevLU89AJu3/67fypq3PSaKysEAH3epiCffzB0erCJKid16619cGl2blr166kD90zGa+7y/19R1Zk43J15eTYJOUAOkf2N2baVTEIaqKzVjrXTnjGla4uJ76qmnNtpaWu9rTW5KQq6p2jRw/+xj8vB3uX/9Vv7L92UaNVlNTVCFkIUSJiK5+5647Tx0/RaJxn2qbKBRawtx64c/uFE43q3wy2Rugibe4t7E+P/Nw/gFse/8CP/Z3jr56or7z2+IP3nvzy1x5505EHjux79doeCgpL8EGBGUyK3KrCxeXF2d3eyCZmebFdRFfVNUY9HIe0nWS5sQIqxpZVr730zPnLZ1ury29609tjf9jr9Y7eerTdVrWLRb+/ee3q1c2rRTl8+luv7fUndYgRgDQ2NK5kdM+iv1L0Bg1dOrfbn5gkXWw3Ekq29nqs0WkEQy5GEF8EXpjNuQ79UTnXShE5UUCoyarSuUwpCKplMxKGEEx0hNoQGQSOQYBvW5sZjVxR1f3J5ND8vK5cA60KmOiYtWe3J72M/a1rq3kref7U8dmVFWd11xfZbKKtSRs2t3nDZFe7vaWVpb3K5SrLyEjghXzuHY8//P/829+tRUjbqoy+VPVg1EAiiIGjDpChPpbNtIJOU3X16vpoHEIJ6EKD0CupA+wO6vk8SxEh8Ewj7+726iA189JcOpPjBOnK9qWWtWud9mh7YhI73JvQqDjYyI8uzKTECRHX0di0dgEEY0BRqJCbiZqwYJREvaERTb9kifaeESHVxmo9Kiub5P1xwVQf2zfTaunzF17/s9bxGjqXn3kxxcmaNQ3xFMHVMILCQ9MT4qQAqxKdlLEGie3MoiBEHxIZMkjEvGGYY3ScpAqDhMASiZi0ghAh5Hnyvve7t73bzc8licmzhp6d0QCtr3+teOU1FYooAFFpBNDkY2AQ4CiAoMnHqCFmmWLP2G6K83FQm5ZWSKAT4UiEyJXq3eh/8rdn8YN0yyNLy4v9s8ft+iXK0iTPQvTKOYVCwAReiXKDwnQXs8ceoJVWfs9aY3Vx8sdfzJ0QkEfywiiStDOQiCqEqjJQWauYlTKJMimXVdytedAt/cnlZl6dvTS5urF25NCtKweNS9K1B6szL+rKs3c2yRyTZoWxMkzMYqa6ucAkSlydxSyDfp10d5A9kJ0m5bSFtgm7n/ztlYUlGJyU65erl9Ynp8+3B4OMFSgVJRqMoCCGCMygVJgyblWtNAAjs9OpYQZSBDECCCDMKip+/79mh+9MW2OzfChda49msv7i0Xh43/y9++Gn/j7UAkDgBAggAUAFzgEmYBBiERUIg2YDKB7QeZcpmrYrRSdpaoARrzjTUFIBRp8nSUzMuI6ROU3mtk+Mbnn4HtVwV85fYFn0jiHTHiCCMABqHavgYyST+Bgr5iRJgFAYFREHJjC5JQbIGqY/HMcoJArJai3MmhQhUqoalGn0kYR96dPU4E2GIzCLCEaWKDcLF4y+KfDQlGka2UX2DEFuij6aiKf5LhD9xrA9nb2nepDc9BvhVCFSRIIAiAHe6HQADqgAAUWQBBk0IgoSwJRyhEikFEucGlIQCZlveotAmAER5Y0DEEYAwenAAgggQtN4nDCDiCCAsODNFBRiFEAGIUUgvTE/88zx554+nhqdZplKLSq9vH9fpEITFai2diZOzX3m6atmMqrAPf/N1//kay/Nq6THOkS27Pds+8KQj++OUmycffb8pe4v/dN/9n9EV1zoDmfmlm8887nD6Tzb/Klh70Db3XrrwU9f3eoXg3P//n9++O//9YXFg9BauPSJT146cWJ48cZgZzy70Hi5P1RiuJWGUIuRmCImupXrRouWlnIm8AovD4qxUqeLcJFVMaJSsNa2DmI8N1qJtqQUjKJCY9w8lXUIwqbyqnSVi6GOxbhSKEjkA6MAyVT1A0SYJsUQRN3MaBFhtAo7TZ0mZAn7w9LVwmALJ5WPIYAAE4iaKnJTmrmggEznSwCYLpwwjRmKMIEiMIoA2PNUALxp70EgQACNShOyTJcQI2giQyQApUSJrEEMKeFIgAYxhuAEGsaoGAthz8AiSEQap0sCgSWiIQIAHyMQEREhvIGnAKVAiWL20XkCBFCgdAQIESMDWVBZth7hvp/9e8m+VSpdi0yjlcpE9ta3lbEqSyNwXUelTJboKjI7FiYa1nOT4uEQLOgrV85ZZXIDlKqYNgYhNGxEKKtTryTXzqvZBmmz3u0Nr2yupE3RaWg3V9/3fQu3Hzh98sX65CvZ5RNUuzllxyUVVVxuzSpU3ulx4SdYsYmqSTSbD7oVX1rXxajerSczrbKoKkVdG7GVbHI5O5umjjRhWgP7MKnl0CN39Pvb0gudxtx4o3hgdzT6l791YVDdcuxop51sX7ycACWYABnxxE6M0cGHRiOra0dGSfRWEYNEBMkwgBir2AX2QEYLclFXZHNtjIQaBZPEei9T9YGtFiQ3KVOlxE+9mgpZCFgJIilghWCCq7WWVkJDGbzzrW/ybssW3P/6x/P+jlY4Gk1ss8FOKY1Wczj7ZPHx0+bGxuwkjpzM/rVf3uq0G8Otyed+jfvjhdVsUmewOp++Zf/GxsV2lN3P/HEnlIeDokHa2xxBsB0N6ST6kNz75u+73r345Oc/e3TfvjufePfpz/5OnoaF5tLO9a1mmi8cmT/74uZwu/aurvP07GD4Onl36OAj9z9y6sXz1zd3yzKsX96dX2zPLC5tXO4uLLQXlmcGg2p+frZCHlbWkm2tHLxwajOfbx09tO/oLP3hxz9d7AzQ2qqMJlNpkrCgImzkZmdvqABiCAvNGU12/crVZsPaVro3GEaAJLUMKJFLhtT7R+66JdbbZVUtJNny7XcwyaULJ4s4ns9zYdVOW1BHNx4szOR5I7Npvrm5O30KXF0LRVRakRIgUqSUng7hrBURkZIgTETKkDJKW4NaA/NUD2YWH2JEYUJg1mAIVJAYAEQBgTKgEBUIRREIgghBoiKlldKgGCUge2BAQEUSwpSTrZVKtNKECuBmKi16ZpY4Zb0ACbJEQRQUTaCmrHoWjuBrr1StMoMgGgEJmSMRGm03Ny4ED9d6fZ6Px3eu9V7+1tuPPTLXWvEjp4I7d+rs8vz85fWN1duOrB2dP/KeN3/yf/xJ98z1yZWN1lc+8+b6YS4nc7NHxjubqY+t+XluprUbWCUXX79+1zueGC7A3d/zXZf+7NnumauhqlNBrmubIonuV45Im8SMq8rllNxz5/0/9WMbzOV4IjHGAMBBckCrXV2qxOiZhZlHH585vDjYvNyhUB6/rq72it1CVmZW3/3YYHkuT4wMx2slK3AVyqhb5HlKjTSy2q7L1vIMtRrFoA9J2nrLgx3DrtGoAmqL8/sPDyZVlsboPCids1NGYuG0qwKMedhNOnNz+aJkHSEq+8P26mxe+c3uXqOZK9sYbnaNyVceWFVgGFqttUNesFuNu6PhpWdOa7syf/fdnc4Kpvs47ZiZeRalrDI2tdoGDEFAKTXu9/Z2tq9dvzQRF0Vv7m6Oq+Jqd1B6VlpCVSlt8iwBRDDKZDqpyNQSmFEhKGAVtRZrQSlShIElctSKAElJFB9ENCmjNUkQTIgBSBlh1pj74FixiDeCisOU1AgALMCRY4gISgCNwlaiU0KtNQiBEEN0HCvhmiFCREU6klLGc2RhDjVwEoAAqAZywRPUAspAIlEEWBkqgIOERGurBDEqUiQEoIUjAgEDQTSJCpVHQGFgFlIwHISJH1XzeWdp9gabhXm18frzzn787h/+yTAzy3s3bjz5J4uzWbJv/oH3vPnCiYvD1y93L2+QQZX4zc99YvWDfna2dfXaRuX0ploAhxxjqtTdH/nwk1/6psC4zua64za+dHL/XNAHbxPbSQhyKrunXwvZytoP/80bmV19y/vHJ1+hL7zoPvex8uwlL1AdOJZ99B/s37d65f/4a/4rf9LR2hXcUOP44ne218+uffeP2nvunjvUrJZavd0+CCet1DQTYYCiUtsT6k58cc6wRtAQOIjElCIzHjiAb33fpFaNlrJz84E6gC165F0Lqwdu/Nq/KF95bi4FBI5WMWljNCQGq8pv99gl2k+k8BiiACDpetcnkwEECynKGHsuqToH2nc94GK6uXF+JlxuJuM4vLT37B8tvO/DTkEZEXfPJTeeoqe/ZDbHxNoLgwSbKfElp42ifdvMj3+UDzzmUzt79N17v/VP8+tX4rc/rc5/O1bjsLOdrixpZFeXtrWqr91YUq2tYkMJjIOetbnnMOoOF4+tXbhyfSV2luazTKuofFhdqxqLxma+8iefe7UYuaJkUDRN4iNCBIhEAGxQKQAkxTxlgbIW9AhegggF0sCsgAJKJdEBcuCUqHZhMp6IsZM6usAIUYtLUonsAvuomNTNagIAmLTz/T/wE7t/eHVur1e9fDrzmsy+0no7Q4vzi77eGewz9tE7fUvPLy/N9w+Pn3yFggavZq5Nxv/qk5n1C72ezuciKoqBe+NG2tyUENcW+fHHy/4IDxxuXL109ZOf4FZ78sB78E0/vHvydOKKvV//zblm0j60uELN9efPS8tgO62XO9gdAVShncd2YzyfDtozzQHj9XOaKLqQGEMtO3N0X2gbc/Dgm/7FL934Kz+Rco7NecdCWpsAGWhf15sn11e/5y9WdvvqN55qeX/41mWoYOxFq3Z9SZJRjuyysMTNhcU3f3CrdJtjt/TAfQfefMd/+Zm/w6MqqSc//ws/9dF/9B94FC0pi8IKBpMJc4pI1dZAEJsJt1LtxrXWyoHv5NnW7kgDkiY/qQ4d20+gbtzoVQ4+f/mz+xaXFKSnz3zje973eHPBfPuVk3vb1zmMhoPBZFBOxpXNElQ0N9fGMizntLlxyVeFjhEjzDSyGgjrEKTOFKiII4n90chY9AJtZQzASLgmO3JSO681NG0SfQyhbtn56KNNEgj1xJWdVtt5l0Ksa1e5oBBI59vDShEmeba5M0qCMIU7Dyw1m61nTp03iTqyuHJwed/l65tJ1iLSHv361laTbEdnunDNdrq7t3v7bUcv3OjVOh7Zt//yuXMrawff+463/NHnPlNF1kk6qf2orPZBvHV15dT69aKKUdOLxy/fc+zI8kyr3+sdvuPgd145axrt+++fO3H8QhkxqDgqJvv2LypFOuTEtHGtbxDzNBFbLM3joNoZlePVTmt1YXZzs+hPwsTHmXZ2R964dTZJg3OBMU3AgEdNVqPGPFVVjIiSGUWCdeQ3Zto3pKLJpE6NNYYisyhe3TeLWpd11lxM73rglryqXv7qK++7757lv/CR89e+8j13NS+cG18f1s223QO3fPedh3/gL17//B/NXj4fiqBRoSiDxC6mZGrirRabu+5yPZxcPrOKogApIjIaImZAkume2FqiTqd2Xko/7YBRTiCflYffJq7yr7+eeAdoFWmIoCIRE6AGpaxWMVCo6kQUKRWKkZppU+TIjgGIo9EEEZCSlFV2/jp87LfT9ichT2fO9Ixk7Dn0xwgsLkQhNMB1pZWeNTB8+hk/P5d98D31zKp+1wdau8Xwy1+b1RTIBEDgyKRASFAF5SsVk9ySJ/acECskAKRQhI2rrKjVmsnKYe8T/1MvzJR743w4SUNwRSUxRvGgrdgMqoDRg/OhQkYKSYbv+6D78HthxprN0eQ//Fp+/qyJzIjAxCwkeuHMhfKvftRyVM0s0w0QCmJ88CyMWkUBFsaIOHWdEImwsHiIhAIKmQhELPPNE0yjQGJ+4hV47UWYUdVzT5v3vr/9o39lhzTMr8AqSKuN9RAkAhEgQmBgATJQe9Dg21QcnVGlVxcGWQQDWgMykgg7H21mKhdSIAME01RLoisfR2VNmSED83ffeu5Pn3LXLh5/xeZ2tsKkrkUU1tF7sahp2j2vCWqOCgkBvXfG2hACIyWEiVGjomikRgEZsiCRQFVl3TDWKuAYCcEgkU4rAULRGseTSZJlNxUeEBZmkACgkAhRkbrJBBIQlhCiCxwYEKYUaSAAgqlHaHoFgDfwRQTT0BneZFAAIggBiogIMggAEIgQ1hAZQJMICMO0+xzkpvwvUYJSKIggzBEBpojI6T0FnMpJQiw8xSQjIANMsThTfOqfu0emn4Vo2lUHIjdlJBFxBFobBI2EnrkqKikrBNjY6qUJzrWzO44dOvbwna2GSco43NweVuMyeFFUu7jXnyiCgsIApJm0YooNo11PLp85+z//7S/cc8+dMwsd1Gtb159cvSU3SzN3PHgPpXjjerHYvP2bv/2lbZ/+6c/8a60hr2B/Bk3bGpdxbm3l4mgALW0IdnfrmdzOLiatxbnC0oDthe7w0pWqXwbh6AQ9lxXqYBSIjgy+KENgRZDWnplVokXbmseuqoxIlKAEEjIM7IQjSwQgFhIgJIkRFE11HSIwGhmAgIAjKm43qZnqRqrqMnSHoajQs64DOOeiALIoQmC+uRQQCOime2y6NAQQgW76jG7SjOWmlAhWTVeRaJrS04VBSDA6NkgJ4ZR+pEl5jpGBERFEISgSETX1rQFzFAwsjKAUIjCRCjdJSBgis4DShEjIoGlKtIj4xqeGm6FFJkSliV1gASIQJFfUpDCKBFe1k7Q88Wy4gOHkqfTGNbW115ybb3ZyrUVlVIy8thRDCFN+VhSTNqLg2lx7b3dn3Ose6+SFD4ZgMJz4SUhUIPZEZq6ZyN5e3Nk0iVXRWJsTQESsyvHlb/zh9af7iVBa54urC7GuNq7sqtS28hRjpVqLBx9+E1fl8Wef3EPFq52wcltTFuzJZ5JQUc5FWXsPvtVsf+AJueW2xXYzuXG9/5v/vRFqZXSjk8Hiwvq++fr8ZnPsci5m88S5mJkkPdgMsb9xedTomIhh1O9pyZSQtjo6J4RQMYYoogLHiDeHGaUJULiKBsQorZVhz2gSBorOa6UA2fmACpACI1QVizZJaqrAOjFAigUNIIfAUSyBghA85AqUjdbE9rOf2PvKf9EKtIID7ZkyYuVFCOqJg8iUUyNDcnvq1I422Gx3pG52T708rE7tXXplIWrQemerr1tZTPlSCK0f/MmXnrkw0+rPXPrO3s6gUdhBXNYz7dH22faNutd/5ZYfeku956yb0GDv+rUt8rJ9I6zdvdZEt319Z+JqjyamqkDVJbWehm67MXNk+fL6xvETp3yEqExENRrUZbEREPe641azuVf6hgbJ7NZosNhZuf/hRy+sv2hN8/pOfeWTX1jf3NWttKoDaADAJG/kzWw8GiBIMptkc6Yq4qX+3r6V/a2l2RwARTppc6vYsWCV1mVVaW3YlYtZur3RXZyfW2rnw3F/q5iMmZ1Nk7Wju5sXRemdslw9tjrs9Ya1M7XLG290P7FnYWEmmyhSUQARiQgRFaHWhBIsKatJW4PaiKI3fJ4SOU5LxiPdJFFrAohBOFbOgY9WaSAAIBZmAeQYkRVqZEFgmh58SxSOAlPa3ZSYrSNgEJhirKIPHDz7MngnzMxw07OISIoMEBEpgCBikQSBA4AHxgCEQIpjQFKkVB3iK5euJFlmm43L69fJpMcvnD9/8tx3PfqeR24/ltqODK/PHTrkNk73h9f3zedZ58h7fvJn1/qjl7/86dHWpU//zieZoWOTZOQynYHJxhiyGbOyb35v3W1ha+ad944X7jZ/4balhMoLl+Da+cEzzzWLsa1dW2vnZFjWRTtrvffB9uPffUkZ0lFCnTRsk1IyRpEWhZRYZRB8BG0Gw4me23ft2sZ1k977Pe/F0pmmHWTtrUloW02hePLLX3novgexYaqKKwaouCh9o9koHJU7RWJsXbvupJ7pNLpDx5Q2VFKPnHPRGstRA1pMZkauBANG8273hieayfYpTGMZR7EknY7GVI6qQLZtmuWwXDt0mIugIvvSpWkjuIqJkYbVYL3u7+mInVbTNmd3+nW6QLkYdi5JSGJdeVezH7lyZzja21lPZtR2NaBOJ6bJqc29/nBQVg4VJRpVYLIxohJjWYHNktwnlffBBSfimTWiQsi0IgBCrUgEARVIiMyBQaJSLrAoFGIVARRqmyrWISCwBpx2eyqeEgZuHmyICBBRZFHGpBYTrQAoiPYRAktgroNHCxyji+w5AiagKU5r+AI7B3VAIEJB8gHq4BXXVhkgSpKoPURKSBFQlMDCEDACImgtpMh4DMITTWJUdD4QghCygCb0QXpjt96tTIqtNE2oGn3zCze2XmrNHxyNu+vPvtZYXTvwfT9Q3npb57Y3h/lvud/7jDbUNmSG/b1P/wEttmZnTHLbsc7R1Z3eYHa1BWWF2TIdWK03u7Jx+vDa7mv/8z/NPvrI4l3vGxQ1kneXn8Gaq7oP496wfetud9RZvSP/0G3V1/9bxqqq6knKrfkj3dCs3vJ96qUXq71eHNVoicjojb3+f/8N/63P7b/32O4zrzetzxvZpChNDW5Yaa3KkWTGYBVQIxAHhVAHEPFi7NJBmb1bSl1s7TKkdmEmMEpshcVHV37h93f+9d+Kl5/UdV8ZUOBBHABDVZos5UCu60Snm7uk77h9vHhk5cF3D599KT31VBWd+sCPt9//E8n8rGidOlSXr25+/P/L69/Mk/Het37f7D9S65QarbnyeHjuk+7cZQimiowaVCJJJm4Y61nT+MG/XOy7n7NFXwvNPiD7Horn19VaB+bvCQtLMjrr4055ZSdser10eN/8HGGK++df2tupJqHnKiZ1o6gGF7YTl+53VVt7Z1FW0rf+9E91J7y0iL7Y3bl6uqyrOhCBieynb1lGQFRImhkUCIOIBCSSGB1TVOiRBIhFgEEBuMgBpBKpffTEWntQdXSxKGNZOxavEGQw4ulm2mJnpqnkjaKP1swXvvyNA0V9H8aqimlzldlCtR4wCaEurEuPHaW9SToYdv/0y/2nT61SMuyNkiwRRW2bpzEdi69Bpc1ZSQsGWHf13NE7q7OXyrMn69Gu2X+sHsdQcZhsmWe+fu30C+Nicted70xve+zcxTNHV+86/cmvHJppBfDD+fl9P/czm3/25MXvPH/0A987/5a3VnMzWNX4+1+sP/GKCUxEgDEx4eqLT811P9zNZjavXEnQqCqw29UESoRZhuVEc12Mry88dPvOvvs3Z+f09pVLEWaxxS+dSIZFluS6nZf9jQTD6PyF8//2n4TlFXj8g/bYnaphbzl24MUnn7t83B2emf3Bd77505/7NkIqCCGyFxzWPsmzOrIwchm9ZmWTLLdQld3eKNYsEPN2Y3ZxbuLZKrWw1Ai+DljPL81WYze/0jx35oUrX9+51ht5V+9sbMzP5qSp3bYAoBVK5VxZ75F096qmNSsLcxe2egqkoTFP862dzU57xhq9My4dAihLxFzUqKCTJhOoSx8dEHPEurx//6GtrU2McRL8aOxzjQuzc91JUdc1koBCMjpLksI5jzw71x4Oh8utluvXqc1uuePIl596XiXaqrzTXt7uTna7k/b87GA0NiLjUTl39yxcuDYp49DVLMoqbEgoy+rK+fPzeeutDz/0G//992s/adhkUtaRJG+nbGV5ce5qd7esaqtNpgww+Lpu5fmF81fbzdbAhcFoPNOZk/HkSDtX0imjI4ZerxxM/Pxiw0W/1+0trORVdJSnUNOk73fUqKyk36t9Ylch2T+D4itBqUAVNTLHpoXUKM9efNSALGItGSQkxfS/SkUznQZa02zooGFttR0msfbQmM87y3PbmzszEvbciJazN//Nn/r6f3zm4s6JIhY1qF5dtt7/2J3/8B+r5XsOLc5c/6d/xwD5UYVEgKJYJij+Xe+69f/5Z3BoP3Qnu7/y85M/+pO2xFAFhCBIGk0IAgCGgXd21KXX82OHOLOZgdDvu94uWU3f/bbGBx7b+cX/27xy3Eamm0UJDAEQVS0A0UtArVSxV6vV5fz7PwgrS7LZnbx+wQ67sXddOS/MMaICg1HJ1UphCLinFActHIASy8GDBhYCBNIQkAk52XXjLz3f3H8Q3np/PZu2f/SHRHL/5a9irJUmEoCKFdhCcfZd39N459scs3/yRbp42W1voR9bIPEeYkCi2m0pk7SGhVvfpprT1Prak1VKo9Q1xzrEUiAoECYQAEKKiK2D+8yxpYk2ydKh9Od+pvtL/7R1o0cs4iEIWEVQQwYZKIAyCNUBORKDBSRyUTSzIk0EGpVnJlFcB1ZoEysSpA4IfvpyAYwcmUgBADgGq6FXp+Mt/thvwwsvLj764FDq7lMvzG/2ADQYBIPACkIEIVAK8tBPTP4LP5e///thOFDf/ub43/y7ZrfCAIwYNQVNlRObGh8ZPbBzKlOY6DTVu7u1r9FWeOGzLxz9wAfV3/qRf/yzf9uneaxJK9AAGokUFCEwEKASxBgZFHrvtNaaKc0SdkyKqrpsZEkrz/vDUQhCWiMoiKhS1ApZEIliiDWHZrNZ10U5rkQgUzefhsjCEUQYEIFIayUISpNSxCICEllYgBkUokIknLpFcLoru+njwakQM3UUQUQhBNJEwDeBMIJEwixIxAAMqAlCDIgYmBmACKMIAgCLj6CmrFsQEACaQm0EEW52awvcFIluWmAQbmbcaGouigLTlBIiTLNNCqYNayhwcwBiEWZGBZqQOQqgMUSIIUqO5OuwuVNt7Jz96gtnFpp0y+ri2lxrdd9iGqQ1PxfGxUIxWJ5d2ty4VGt7aPHg5o1rF25cdh2aW9v3x6cGf/LKN+6768Cl4Rfe99gdc4+vfuFrzzV9vOvOo89eHPb7W607bn3p6gBunVnePxulWn7s3svH11XeONnvHzi6b25xKU9T3tg8dfLEi+cu717crgrnAxrUWlsjZMgCkUkgMVpp1Mq0WnOTYX9QjoKPropIMBmVdRgTWsW+DCFK1BrTRDRBEQIzKQSZVpBN45TCAqgAtFWZ1Z6FWVKj0kzmm8TAg341LNAFqgPU3rNgjH+uJ0ZgCYCIJPKGS4hQWKY/AwGnTCpBEgFgCFG0AgBQhIhAMO2VgWkI0SKQACCisAZQiCTApCv2jIQiGsgAInJgBiCJEZUqWUgCA1hDilAi+AgBpksBw3TFTKUrmLKM/t9/iBI4iJGIrJQCUszBC5NGEo5RFMdZ8cM/+SSiWsxbzXajt3BwOwRMbE4ha7SBR2V3aMgWA681ErPzI6O0RMRUO5adwQCbDSNeGqom1qTSLOFxHYoKKpdo5UshDVohg4hIKqFRBT2uD68dLPNm2L+wY6w9tHvj5HrtfVp5KW7Iy8+MhP3i/nf+/V8o5qVx+AEW+7UffFtzZ7DQyFwlosWk6f0PvWfQIFaN6obfrIetSKnyRb83xuEdf+fHtkbvWNZ44w8+NXppB+qimUkxKClR0swGmeoLHPrBJ1SvHJ84pSz5Xm2QmINGijVwFFKCCAYBmesgNgUfIwEqxNQITDGxzokiMpqJvQIU0ACZoPg6+ECJBY5KAFgIhAgpVQGFRThT3gWKaJM2+2ixkyIKQVEAaEsEliDWrDMdYhWGEpBsnghxBVWzY8fPfnmWqjyfGY0K3VBJIytGk0Rt7H3hws6Jpw+89e2L3/+h+In1slsOC2m+//14953VZz9mt88pB93XnqwvXeKRG6gdLkDZY6vHGlvXtnvdrfEkxiKWBVSN5IzD80GSQ0dvu++BZC576qWnhWE0KDlGk9hsphOcz3OLHPvDQXt5YVyNZc/nDvu74/Ob4yNHD67curbX7T7/4pMINEWZJGgAsChLweCEQWGCVDkWreqy2Bvv7Ns3nwPs7Y4PHdhvb8Di4uqZq1c4MzPLs3ng4ciP94rE5N29vk/0qKoDuCyzZy6f1SwcIAINShWTGUtVb3vXvhE6qJ0TrUlhjAGISBuevnOJEIkUaVECZK1RNkGTgDIAJHGaOEQRRFIRJDBIQAwROJbORRYArjkioZqGtkkJIoJXQMzomQSBI0cfBYD55ptjenBBICFGAahciL5m78X5EKIgBkFQyMipSSwIChAhEWkBZgbCKAKBmZTKtM00OIUS6tqZzJSIXmmZVGE09izVxDfm8uOXX7jjwcU6odn9y8dfePqupcbFl670lT9yvzTtfre29sD//g+K7ZM7p58cXLg+3pvoCKNCWJIsT6Ssuv1QKgp5mq/s02mGqSmUVg89Et9836EfeKL75c/sfeHFpoMgQonJ1vYdfu8H9vJFil7zmJqNzCSx9qCtgKAW5z17jZyOdquNp77SmZvLFlbm77hrb3ZGeVPVUcZlRBhcGl771svVK8MzG6eSI43Ve++jZFEZBuMxULkz1rmebc/VpW/ZtNy53l5cKjxSafe6O2ipX1dZOpvoZvRkIAUxsFuNj6935pZWkwMbOxf3Njc8h/lDh5YOHx1W9W6/u3v5smHq3RhH5r5Ns2a7Rib2PuXXL53udnczJWmar7Z4q+5mi0cgNVFc2tAQfJTI4C5ePv/NV16+ePnK6nwHwde1D1CijpNQTiSyEvbOBzQeKOIoFtRWlBEbTWmqVWEVVRhEkzakiSVG0lqYjVYRDChAbT2i0gqIIpK66UuOAjxNMLNQAIUxaFRIKkSv39gR+QASWJgB0CjMLSVWKa2FiJECxMqDB6p8qJjLKJ4VApAPAugiR6MDqQAKXfBFjbVIzTWHkXUNa8gIISlkQItKc5y64aD2HiK0MDWgWUJKkckHIiRFBIGxDoEFEKQcVb1dWtlPOxIaTSV7wysvnuLRifnZVkSipdYwb4YgSbt9+Im311fODV65SCFQJFN4MK59ZLY32cmGo1mb6aqEGKmR73/3W19/4bXRUy/J535u/sR6fxIX7nsWH3iiyrA8d8Zvlf1Bj//4Y3M/928m0iqLOk/TUliNUCPo0bgzqsbtbHXf3ZuDQsegO5kXIz4qpWxkPnNj89y11GpGHZzH4CZDzhudcX8EEIaT/uJ8BpkajxyLSbRRpZM4wfXz4clPJ627BGq7esj7Ei2BQR+CN+32d/0If34DJteh2ws68dZmqoa5Re6WVXfXaixVtvbT/0S94x3SSNib1i13bf72RXPr/pmf/FFuzUlwHAqVUevO5fyjf3fz331nIctmYv/yVz6d72/PznXKl/8Qb1zHKD5i7ZmMoDJRAHKdvfWtVecggzJUotV1VAtPvNe99KUYaHz0nZ23/FA13qknV8JzX5xLw+725dbafMWpnVs9uOA3uzAqqqhU7QhHk3Quq3I4uHJ06/nzh25du7Lenb0lbJ89+Z2vfnHc2x6PaiQVYhCkKcVKIRORIqURgKNCiuIJAoKanrsBUUAKAgTKx+iZXeQgUZhLFixrShIiU3MYVRVRjMw+YmDyITQyHRFbb3jrzt5YP/LeR9uzF+iV1+fnWqPr1jTbtrm2cPD2vWvb+fy+GGfxX38i6W+0tnZtmvokTlLBtu3Mz4x2Rrs3Bma5kS7NKE91sTVcmJl971/3515obKzn/XFvZ69VLhUjWTz64ODG1fqF7vyMefeHf+Qrv/xLVPG7/uP/db0ezZ7ZGp86Sdu77WZj/JUvp2zu+5H3qbfcf+b1Z+ZXlqEe3/ijXznMLUCo6qBASYy3ZHj6oz/eeOtd+9sHR/3SlUypTlJx3g+0jfvnlmZpZrvoXnzGHHv/rW9/rHnS1JfOq7WDgcGffHk03AjF2BA2G53xbrn3p6+Fu7Yf+O4fGkgcTdh4pXaLzdFoiM9+3wff9/Wnni4mZMFqYB2nG9aYJKauvfMQXMgzu9zKkJ1VWuoiIvXq2pHMIA2KUTUums08T7PLpy9ooknXFlXY2h1Tqt2410i0F2QyiUmV9/W4Aq9aSRKFddYYFl4GUjnNEo3IRMZ5o1VE3igHlUJUupxUragOLXWixho4lVJbo1vZta3+gOFsd2smNeQkAUpaVmKsfdSovFKN3FTeC8hMI7++O3QxrG/taTK+k4GOUfwrp86N2K2utddm94WhRDF5o1XU5dUbO2BwEuJ2dy+gVNHtW92X5E0AqkXVrl6anXn3ux/+vS99zoVJalOlkVScVA6JHfO17nYr0xVBI8tjb9TbHrztTXe+dv5ikmZbW8Og1XgUBntjQj8/ozup2dyeNFqt3dpHpUdBaRLIkqStQXg09oN+1REs0AdBnSoxeqc/mrttDcZjAxGVLoNEFB9riWiV0gjKpHWoI7DiqIHpjTPkm1IRWEhyQiXzHWupriPv7U0W5mLdrXobm9dryfNVm7eube8cuPMW9/rJMUCeZpMG3PvRv1UffWDi0LQaZYTUixZQgDF4zNPkwErzJz86WTsCaGJCre/7/smfPRludK0iH4AVROZpPgIVqVEBX/1S2uoMdu+Lh4/qREHaGUkcps3QktnH3+xeeiUxFIpakY7IxiIQSs3KahGRQGSzSXM5f/+HBkudsgrND7J65juTT/9WozsxSIAU60gIiKSEAIQBmUTYR3fT6Q0I3jkrwgoAIcG41u13f/N/zM00/W2HRq3EfvDt1bnX9ZlLhpRuZGCJC+ZGp/W9PzA+eiBEz3uT+bm5cPY0X7/s97qt2eZkMBZGCZGBE5saKUOiWCLoab1SLcFNybYhshMx1lgWAsiAJ68813jvXWF2oc+4cmCf3bfCe0Pr2UuUwCEiA5ABiQEwskJQFAWYEAklRvZsrcbp6GsUIShSIADegTAkCfgIAsE7MFolWmJE9wYlBQwEoADwwil4+XheTbAGSDUggYBERkEAgshQ1zAnrZ/9EfnAh4qwyGbZPrEMp467j33WgtZKjeqSU9uwKkbE1HgSYjWpvSggBtEq1sxAAcLGlYuvP/v0dcQ2oFIILiKjFjGgJLJEihw4RAYBlsRoFnbRawVKKQBRighgUtXOh9oxgogEJeADImCMYjUREPhgSdURRCEgVs7d9FMACExTXkQACiBRqBVOqxZ84DhtwyEiEATRqKLcfJAiCE3FApCbgBqSqTyDCoCmZZ3ICNP+BiJAYGZkjhyZAGpmUlqY5aYVCImQURhxWnmmEOLNLNn0MBoAhYURCJCnjhW5GTwDQgwcgVDrmzIT3/SH3MzB3cxWTY1I07ATIaMoRUSIAkRTwgGSUiw4TVUUE3nx7PbzsGXpgtaoDWVElgDgtCYSwGfUZaJwcHVRhbBXSM+05+eWX90pemHpY1/aLSfreWL4Su+prz8VBZUBlGgyS1lydbObzsx8/ap76B0f6rQ78cy5rWK8uVVdu3rxO6+8Wk3GyGBMstDMQYREEq2BQQBNao3GXm+83OncfexAM+1c2zLPne65urKJEgbDrC0Ke2YhrRSjsQoAvfdkFIabrh8ixKmRX6NSZBUJKI4kEhA5tZhpLIs4LKT0qoygtIquBhZhpDcihyA3z/ZFeJrpEpwqMYh4s19vavGaRiqnmr2PQgiRp8E31NPUGoghJGYBCBCs0USMgAqp5GiIPKIiLcyegYQZokJrFTlgTTpGahgd4xRxStPPFViigEKsQ0yUIpYp7BD1nwfzgUl0pl2smRlEYozCkUEZUuynjgRlAKnk5f23TJwbmOblRWP3H243Env+1etn19HVVb+0KSpryYK4yIy6kWA7ic352YO3ZWztXDtlBwDJ3Lze3Nh57tu94SZ7n+WpQsCoXWBE0agMQmaN4ahNu7s9hLQaNuLTdOC+t/50Z99pefIPtZkY4GpvC2K02fgr//aX7/nBt+++evrai88fpJSSpgrcMNhsa1fsnPmVvyMSPGtdhtvzHJhSo4swSSu/9fkvqENv+86X/mi1Hi3ecfDQgw88//w3VVGvXyvo9gd3crzrvgft0fvhma8PXjjurCqNsiiJ1A1UZLO6JKVQKSJhShqzC0eHox2DCsa7HMvS1SCEZLUhQIwUqlRdTdVspee6w4RQ2E/7rtGAiAh7rTUxVJ66S0vlTKrzLO/vLfU3oWaQaBLNwKyESLvKAxIRmcQ6N0ILLKxTlSzY3thPBpPmKMpw0FlOstmExVU6eksgJipZaQD1YvfPvgFFb/TCiQiJ08mFzXr2bXdszB/Kdk+3VnQorkWeqLVkfvWILic3Nm7sFiVpM9yaVMEkWV6mdG7iLpHeTpr3HL1ltLt3/PlLO9dugLGI0MgT0UBaWxTnXUPrGjlbSg8fWDn/zMlQ1ONoZDEq6653e/Ww6MzOh7rvxrVNDYoYQ2MfyiKQUQrw8NrK1uZmZ6a5upoZEh38cFjs9AaReLHd2tvdXp7Jy4pHRZUpvTkYHrxt9dTJq8YYlSFzpaxyE28iporm1xrbvcnmRhcVzLdUliZLC62Lx68DAANHiSAxcmCPCiGgMEdFJFrj9C6LWEJLRFPVVaaRYEEgIMWRI6AICHNw3nOoQ6wjG0XKTrn5xCAeWAsKBwVkQEsUhhC8izEiEJERiXZad8hCMYBIDD6GELwD5siBmYVuXk0pEPCAZJSOMu3knP5FQVKICgnQGqutQTRVOVapBmOAyLSzelzUHIsyuDo6xXvV7ouvvrBy4N6Z1uKRN73p+omneuOx1e1qa/v+B2954do535zLTPvOt3/XYGX7hZ3t5bsP7lzeyEXmk6T/ysX1k6daqwtv+/731MuLrJmii+yKECeFE2/7o5m9DfCWtTWDsXOXL734B59MH3li9sHbsIgqEAfu7W6ZhYVspinReQ1A4vvF1770peGla/sPt37oI7e9cqNnmgvFbt9V3JlJOjnNJvDy8VeWZg5pBU1UK7Z95cxxxb3O/NrB5UPPPvv0iSuXllYWqxJ8FWwySR5+NFlcZVdYUMWkkNpZNnX0vnCpSWZbyxJMv3KTcydOnHshKBVrX4+q/mY3nrvUmk1ktAeoXF1vFb1b73soX7xl+b571y+dPXvh+LXJNW7SgCc3+sNxf7csDs7NHzrQWtSly+bmxVXnz5/aGW1ZzddOn93tFxqTjb0CVZyfaztAD3VwlUTPPrAIktbAsaqLqs4UpUkLjTJpaq113qdG1ZHxJmZhWraqSAgVgSZlTaKVADAzIoASBBaOMQYEE8UwWecEOTBBhEiA+MauJkw9y4CGSAunjKngTXSiuplcd7UUEkchlNPLA5MyAAo5qsiGI7oCCCWKIGmlMJAEGdelzhJSghAjYskszF4BBEZBzRaBo4hhjC7WtdcROLLRBggCKYnCMbLnYb/stVLIKStjhiFD0ZnuBYeixhvd+1nc9m7BcXl1ya3u33zxLIXYThROpNobS1u1JBu+8pq668HCV8Fz48ByduCYSIwXN2ac6QGaUV2cOusPPu6beOGzXz84pER09+tfPvCmP1659db+5Utn/+C3jrhgtG11Uhz1r/7//sHkjnsGr3/9/rlkstnj1IoLJIyoKMRmq8EShZm0jgrrpm8dWNq85KyLRussscOI+vCKX9qfuFb97LN20mtmNmyv8+d/y6drzbvvhrvvitJyVmttdNnP9jY2v/TvzMapzvxKb28X7rxr9if+brl3OVm/vPelj8+2GJxUabP9lu/p54jCuY07dnP42J3773xrUZNNHEQPwZWjMTYW4uzSgNrpcDuds4fvm/Ob5+KLp+nGjeC9oGLFiiDJNDCj1xA9NOa5qsLoQrI0SVUzVpvVqc8lbdyyjcW3fM9YNM8drmb228cXx9/8T9DfnnvkHh7bjfUzR9eWXrry/+fqv8Psyq7zTniFvU+4sXIhowE0Ojeb3YxNmlmJlhVHloOksSzbctA4jP3NfB5rHGbGI4dxmHHQZ0mWLFmygiXKViIpUgwim7kDOwNoZBQqpxvPOXvvtdb3xy3I9uAfPM8tFJ5bp+49d693ve/vvR1G6BAXc4cIWkW30I/d3tm3P+iKQl2+cGzl+suXJgeHe/u7CdQ7pynOzNlHzTCgBOqIidhM2cxiMHWozlAADHlmszQDJVBGjTGqaDIwEq4ln90jyUKMTYpGPiYLMTI7moimo3fBYorFwa5uHHAdw2FKI7ebudZbH2weuT+tdWxrZzXhwdffYDTvisFQ7h5fWPxjb6m29+KLVwrAfKEFc3m1v55VldAow4Xhy6/2NtbdcD+EeOKhc9NU9eZ489b2ZGf/kXc/dWfv5vPP/OR7fuRPb/72M+v/7mcP3cQ/8OD0/BMn727z3sB97cU6WFsvxFXshEG6PJKvfvE4LTUxeCJkTIhIPsV4wearj1+p860W90OOSYNVMoFCvv07Fv+7b6/3X3/9b33fm/YOzh0Ot772hfo3/313e2zLJ5jbqVZRFSXfLgfjkLmyyFvH3vzuueP3TcFH4E6/u9Qr8pVWXXLLpZWF7vVpLdGKwpU5MRuBm4ymXoARwKipm/Wd/X47Z+LVpXxj/xBzV4no4ahD3G35osCQmt2dw7zVch7a7Q6A7O/sa4xZnrmE49BEwgWPHYIT/dI7u3EwEMNOme/HUd5yGeaMGkI9qSMR+YyHIZmmHrt+RucXF6/u7BcFH19s74+maTxaabtJ1KhGnjseB2MFtVpTnE4VQM2SqEYzsK39A+8zboiB2lm+trl1ol0sz7WQ4MLxYxlY3B8szy9NQpqG6X49qEPM8qLdL29trvf7rT6Wj509dena5tcvr0fUfq/9jrc98luf/Oykruf7nRSsk7lQh167REteZDycMhNlvD0YQZOqNPnsc6/lDgqAY/3OAaSYkjMlg9Ggpuh8lk2acW/ORY8TwcE49OcoxnAwjMNR7BTtlYL3d8dlu59rioAHtd3cGdyXZaqGBLlzkwRgNhEJovN5BmCMqsEILHNc/r9cRf2FrNMtFspsNK0OtsZSp3PLbZTKTYNK2YALycilxx544Fd/9la6NGwnXyKvnrvYue/tKWuTqxaefpsdWwlXhmbsAF2W1wLl3AJdfHPkIiMr+y3/2JvCakfXd8EA2c16hcBRMDDRAjK5vMFvXCmXV6YbZnUVji36CxeKrFXkyW5dd5OGHFHpkgt1v4tANBZHmYrBDO5gtHTufJMkX/SYdbHoZ6fm4xu/L79/B7kEzhlRE6pGyklMBQE8o7KJOUg0K8BiB5CASFLCQKh1+8704Fd/b+5vfP+oldm54/0/+R0H/+gn2slZAvbOtbC9OJetrkBOLc/Fo6u+0zh3VjBMtrbDZEqGKooAFiGMxpip72b1cGoBCZGYjHNEMOOQGs58UoMQPBmIDy9coc88k33/900dHAxpfyCnEmhMRCikRgwKSgKekxiQYcZMOUhkEATKixY0FXiW5d7UF4UKj4cQAilAaIABACEkxwwIEBpAmlW3pya4nAANjMEcVOBcG0qFmU2CCM0gAiABJ6C4s7i4/M7vhdu73Z3L+OTTOwnm/9yf5995Bm4PkKFX9uqU/NScoeaCKUBW9EufvI9N0tg4wDnKNzT/2HNX+Zs+VAJbgBiFEByRiSKxQuZ8B9IBms22Xhn5aBoFFH0dQuE5L32o6yRmYFnumiTATkFjCoROFesYYwqmYs6y3LvgQkzjUXXvYKQEbAbM6BC9w8zTjCmT4ChKgzCrx4J7rGKclZrBkTVLEQFmtb02M2vgDDQNSICAjKrKhGSzlhBSwThzm4CJAACBwRFXm8AQkokmy3mGq6QosxF+9pFJSDAjWqgpIiLSPSQNZMzsWM2SiioAzGSHe5oSABgQoqmCoZiimDAoovesqig2K3OfPSWAI4M6EBATqIhAEol4pEyCJkb0DjotP0/ZwkLrte3D6cEwDA4co1KmSUShrpTASoeMAsnYYZg0aVx57wZr+5uvrF37/IuYRAUmTdA0NZBu5ru+kJicIYIlBAOskxggu0zE6pSm0W6uHd65vRtSM62TsWdvMUViJCQEiCIiguSQoA4xz7jM/XRUg7GoMSISiKnLIMuZAB2RIDchlAV1yixHDckOhzqcohgqKnJEAKI/yJEZGOCMUA42+5XOXidwz88167Y7Mu/MdJmj1lc7ghGZEUKwGffKCFCBwCxHdACeydQIZlIiFGQeEQgEwCiLkBgdmbIlSMI461HmINagqakDcgZk6pFnRy2Y9Q+AJVV3L3SgjuNECCGjLGkyADFTNlB1jglMzdiAGrh7Y31y5mL53T9w4tyZlZXV4UvPfewnf2ah4jZiu+VLoqauhLmV6bGF3uCwnqR8/r7HQjWeazant7b1YFRPqkk3l/2RD1aCgzJvCKwKaNous0YbAbKU0FxOmSpogmoyQKnedHrpAcfuzNlX6kHYD0XhpEkn58r9cdNaW9//l79ASVcE5jAjzGoQJ5LGkoPvRDAjFA+a6zCqYqhTgb5Lfvt3n0N/9V1cp+lIbg1uvnbl6fe/5fX1Gx/8xg99+vrrP/Tff8/otZcu/51/vdLY2eXOuqXud33o2B9615V//wvw/EvdVLX7HWkimjqHUg2bzcsdMSLWOEmmBIZER81zlqZVrB589L6//Xfmrl6v/+E/guEAELJWJlmmgMToicCsacLuUlf+8p+BM4tOonvjxuY//yf9IbQ7HSE2lWQNO8hzH6oKSOtkyGk0abBVlNHiDg6z7onv/NDd9Us9HcH62nh4mExUuapDQgNNvQLa3i+JVZ+5ZruKLlCT2q9/6vVPD9//ne/jX38Rnd24skXlyvFHnzi8fGn84ufxsFEwagG3itEwbqX0wrDWhQ4dO7F8+uJhWd6+dHW8uZuVbu9gyhmRp6XF1fF4CLEBwu1Y3ff0m5fuO9Xs3+qWOPVpMJK7W7tveef5S5eurXKeH194/dIWMjZBHKISFEUek5R5gWi7B6MguL8/CV0/GlenznYXWq1M0ZA3tw5ClHa3XY+nvbzMgxwe1FVokKRTFBvT6SCOVIv5TrtVYDvznuDkSnd8c7eprWZBEbx3IooiAJgkKQN7EFFlEDMFSwbknCMkxYwzr+zIaVIzmJWYAhgRO5ZMzEBJBADMVKM6RBA0RfKZsQMTFiNTUpqJsmwRAEGURARJAJEYEGc9nJ5AkoDBLIs6Q9MDz3rKgUBZIWNGAAUjxwYKZMRoiKKCCnknz3Kv5inLKVmUKvdFVddc1RiiYzx1+tRpV6yc9DbdPHj1tfGt8ak3vbkqVh5+69sfe3ruM7/6qa88+0IFeOz8RRfG9eUX27R+6SvXrXNisDJ/+vFHgQAknTi/0npyScUNjveyeXd4Z3dwZ+dwd+2pt73lzMLKxleuTl9eh3Z7R7VWLJYL186uvXQ53t48fesxr83kYLq+dndyuDe/euxNT7+tf/L49ZtXLj/7gh9wHnS1u9CJ+Sd+8ePnP/QN3SLPimpnd29neGjQ7E0PvG9t7WwsFO15171+6cXtq5cGd28/eOFNr9767J3dyaixW3d2Leqco6c/8MBkY3ep1TsMm5j5uV6vm88Nt/eydme5bG2s3Xj2zu8HAZdj58z83Mr50KRep9hb3zrY3ZF8MpZJTPXCyrntWzc5uhZgjtPnv/qZrVtXtjc3pjCyrtvfHcQEvmi98sZuayN86fk3VpeP9U4df+3rL62vrS+szj38+P3lylLbdnViVUj9fuE4k2paVwnGgUSaGPPcOwYi0CQiOBpV1M4UMwVzZeaaJicAMQkxBJhZgxFV0QSMPTFgZuQ8GnGUyGigkESZWVNKECtDQCNkMVFJaGa13fPWCQFbSpnDAqFAZs7uUffQRGY7rWhciwYATSFHIjQxEjVVsmQWQrAYGwHgMs8c55Q8Yqbm43SEJhZFkIG8sk9V3QbMWQnAkhJCmlZNiCqAQAZoqHnhXUyGGAXqSX0wHM51lsBjIBC0suOqKpWMvLP32n/4mbnTK93eQr1zLk7M53N1s0/jlCQbH8Z87RDzyY1f+5m5b/jOuQ98MOWx6bj+mx5NSOvX9qdmvsiK0Gx87D+7poaVhWPDAzBWyHnj4O4/+NtL9/V3N7cWpy40VfRUT2Rhdd42bi499a61B97ZL1vpc7+OCN1WPjyM02njCg+e2bOBNoKHCyfP/uD3+kee6l/ffemvfd98arHKqHP25A/+nc6Zc7w/2bh5Ke7sQGAgn9aHVMbhwd28O1d86w8eunaocf7aV3b/zf9VvnKnXFqodkeFKrgpLc1LC0LvgfnxFff87053ghzr7XaWXQfyZkjDW/nxp8686Vu4HmxdvxYvfb3dn1+5+EhdTKrJWEa57zxdbd31OoFrz+vmddzfC5NKi9IIpK69J59nzqweTSHE5rXX4dQP+F4B4K2yrFkfX3lhUuGx/88/CO0ug/OZo+RaJ+6n0mS0Brde4rmHljuPP5S/8c4LB88e7sfkUMw5qgcD6LVrbLnFOd9fPfWu94/yuTNvfe+nPv57G7e2waCqhAARUVQJ0SHPlvt+Blo0TDEyMANWGpX8LKmIKSEaqQBYUnEASQEBUkp1CEz5zCaaRIZ1TLNkr4pMGtOM7uEbT2UL+1cGd75we7K95wPSiV7ve98zeaw1IOULy+7TX2m+/lp00yAcDeozyyd/5Ad6b14d/9xv6dZmqkpXtoBT5pAX+3b6LPExvnEV1m5Nx2PudBD9eDK2+UweXN5q0eDwoJ0Xy6ePw7LwIvqNaXs0gdPZ/Ie/sXjx0t5vfaxcr3K/eOsjVxbK+/vf+UeK+ZXtyzvjyUs9ZETkLIuhIaCUDM1nbpmKrqFPYZxn5E3i5uZyH6GcpnP3Pf1jHxn+7mvT3/i94uvPl1vdNCy1Yt9JUPq55WP71QGoZt6WTi0ODsbTfnHncEDzy3nJx07Mf320v3hxYSdI2m7myvki3+90uUDbH1a5J8/KBbmWqxPVdXQ+PzwYeu0gkTrOCx9dlqo4nFTduX4VhcEPq7E6akAIeWVp7tbGuneIXKDCdDAWkgMJ2eL8sU6Bok1souM6QTWJFppumc+VHXY4CiEArPQ6BpinsQN333yvg/723q4k2N4YtOc6YBSnzZnVpZ3peDyto+dArGZF3q4jDGNisYKxEXGZm9Z1E5p2ixfKkn1ZjUYl0vnzx3ut1uVra0XmTGj5WLuT49XbdyqyqpF2u2Os73js4a997tnHHjhTdvyNu3fFGDlfXmwdX+596ksvHIzGeV5ORtMgCbTIkPJ2qbFhsaX5zv5oPBiOx1XwArVmYTB58/nVTomDcVNPpHR630p5OI510sEwRY011p1OcXwxrxKQho6jnm9vTxJDO7fQLtzQ1w+cPnZ7I9g41VFuH1QPn+yUzFUyQSDmqBYFIqZxiG1EB+yzLIQqKRDSfyMVFa0iJrt5d1AlKIqy26etg0nG5WRUay3ddibkh+tvlPnmihuQ1KmJMat77Xw0aqqtnbJN1c1rg2HTYmNnzqnGVKIf3r67vHNIJ4+LIU9DdXlNAjgCA0hmlowZjFEFAgGreDX51Cey5SaLF2EA9f58MdroDPcHX/qi/M4XOlxaSiHo5NHHpt/4jXvPfrX/3CtdbxaiJirKgjq5sUrWnjbNqG46WOBkmsYp5wIUARQ8GyMkizGqY3CmyMC5aQQNORnNOoo0mQBRqaC+qfPM7OZ1unrDnnh4vYorp87T6XN4+xarQjQjlno8vXHDtR6EajRdv9uvAp06R6uPTG4PW2EDYb8ovcuK8UGgHLJObJqBzxznhQWVqAaKyGDCDoXB0IlXz+RIezLZ/O1PLD/59qUHzqf1PWdlUtZgVDh1s8qrCKCKZkQGDhrxjlnVadAI4HJotQerC/kP/wV44JH6jWvhS1/sbq9lw/Xp9RvQTL2IJ4MUAHOY1WgbgJnLM0UhVFABNSCDJJDNupmMUIFn8oECkmS4/Ff+9zjs7fz9Pz0Pan/8z/A3/3ej3pm9Kt6/OA+xcZnr1A2IQFkAhZhE3CQ3jMOs2+2WS6vZSjPebfJWv1rpf+aVT5bOkiATZuSQoJIwrCdZngWNyESO0EzMTJNEaZdlqusQgoNSJYZ6akztdlkHc8BGZCpN0hirMm+pGgJ476rQqAGS95nL2AAOjkZ2AERgwpzRM5CBIUTVkExs5tVSNSMkMTU1RGBEAEtyFOaRGbAGjrARMzlplvYSU1R0RIjmmFJSEQMjMxQEVEEGAHBEfmYCUnWOZhklS8kzzngEyKhqaJBkZoECMyAmEZ1FighITJ0jRGtlWR1TYwn16JOPj5DbZjPvisER7wgAFGbFa8zmHcGsFktBZ2FwM0YgJsOEBI6ZZuVuAKbiHWUZGWCV9HNXb4slA/LeSbQqIFgwND+DKINFBVQBRAGCWZecqkNgStPBtmMCQueMPCQhQkOHYmCqBoSAzjsANSOfuRDiYDAeT6MZic6uSAYGKagjdMjJjAlmWaokOkM5h2TSREanqjRjeBF4ImYgdA6siZEybXeoYECww3EajK2OEA1AExKgKRMBzNImR/rP7JrOEmP3HERgM/HOjvxEM0rUzGQ0O+bMXF84Cx0iKIAozP5rIiBChxiieMcz9ol3PkfLkTKiOsU6WkJhYCE1E0Y4EnQIVXRW3waOMmQ0NVFQcUQigohGGHRGnbg3HnjmlkuTmhHYUAAYzExCMiBiQBQR1cx7xjpVW/LGs2vXPv6Rz/xeeXV8H/iojnJukhwONX/4kfziGdm4vHblalm0aW5leP1FuX69GtVE5NgvtAqoefMwSZaZQ2P0ZOzJkmAKpIocy8yhSgrRxJJxO2sVIMWNr2+88ddIZVnUHHvIHHEzkDKlgr0ZgwIyaw2NpLxkQmBEDAKiuWdVE0A0xJnxAsmSLbLv5Sw1gBY+c61otz/6mW6mdnPnfRav/uUf7br84X6/0UiTZJh3Try56Tx60DvX8pdmv9Ck0Dq9vHOwubhAZazr7QYjGAF2WiDInIUGSC3LfTlXuJU5GtzdePY/zRfjJkzjtAGtLYXCFexKi2qmvk1tP/ZrX7j1e1/NpVdvNQudJbAYkMQEiXxeSqwaiLFIdWF6+v7i4oO2mvV8qn/1U37cFJOtrZc/G0+dv54mvcP6WJEBxpZXSKl7ots9d9/o5pV6QusbVd7iYxfP7owP3nTigeu37g6vXY4vcxzG9sn7uisnB1PeulwPXh2PNsfZXKdydPvuVIvsaoARpfXFTuv0fb0z58Y1Xv/6lTgctfvtu2u7SczICWeTFJImCKHTKlKRLZw8Q9aQWdmGlbO929ZkJ1cOb6+vP3/lThXPrh7/o9/5bT/1S/8pL/PM8cLC0nC0A4DVtCYENMzK3BnMdbsHo4PhZOozPx7FQXNw/vTS2tZwGmKrV96/fGy6s3/zcJRS68yp4wxxcHt3vt954oFHvvrisxceOouaNnbqaQwZQxIp2l21fG/QHK0NTEGECckgNQkSmiNlNkJgB6ZEhIoCFEUNo6mZJMQj+54ZELKDZAaqkGJKCKaKSEzgdIYERkAmOiojTwlAG0mARDMWUTAQ8hk7QPDekSoBIDKAcuY8YxJJhCEkVVNTJkUEQ1IwNZ25P2DmgAWLkohmN+zk2U+aBpKxYjVqHGGzNyDiKdjTb3rvB46fi4e3Yrz2xcufeemlF7fHw1Z+nJ68uHDCPfCtH+7E98aiOGT2V29ufuWNVzauHtTZbb1+Yzp94F1Pn3v08ay/MK2JFs9lwJd2DjqjSjYPTmTzYf/uy7/2n73VezcPmv2Ibb8zmHRX53wmvQ4vry4pcT7aJqsml9fuP3n+UCkbVu71K5//zx893KlyzjPkcxdWJ2k8P99aOxh89D99/L5rV88u9YaTeuH0uUp1bWO3++B9i92FpfuPkyuqoGfuu89PhtX6IW1VSao8hTLL5hfaxxfLk/cvbO4Oh/sbE4k+y5tRfVDFtZubaso+DmR9/vS5+9/x3nx+sZxbKsqCspzN3N7esdj0eu2tG9dao8kLn3+hi0BZ68uvvnHClVtia9vbjo18HkMiYVTXTDmJq5oQJqO97a3Wxg2ppF+USwvLglnIaIqOoXYZK/mt0SRXGA9qMMsIOWcgcYhqMQZJAS1p3nYZmyFx7steG4PqdKqqqigqDQAzUuYNiZKSigKYR2NDS4qiZI0mm1UQOwc0O/aQGUqKjBjjUUFyEkAzB4wAHtGRi4kSgxNDU3QuJQkKAuiZYogeCBQkqpgZEjknhtNKzJKBImgCjwwtnznJxbiOUxWViAbahKkZFqoxiUMmjrFJjYYkMk3iADNgUO12sizj2vO0DkYUze/t1ydXJAfIe2Wlw5iSIVDJhdL2rS2DNHXDBNkD73j4VL91+3c/AYfjts80cBViy0J6/ZWhs/aDF/cXio7rxbU3lEkQAhEQ9LzJ7k79n34toqBhVMvb3gdJOzuj4V5KgJQFtDCpfJdxd4hRdi8/c+y9f2L98tfyrIV1PZg0fq7HkkBhuDvM53PXzur5+Qf/5k/d1onTU9nSQlxwujeNCt13PGHnnphy4VcrPrcYXr1WCNsoIoSWo2T16AsfH/X7/t1/gm5f3/zJH1vaGbp+Z1S57urydH1jdOtObxKqone33njoqafghd/CzO/u7Z8putAMitGdL/6Lv/Wuv/dLwyrlxKtnTlV52Sm4vn1lb+Pq0sNP58da5VsfrIcrOe1X11+R0QQsUlEmADRzzgla0yiy+tw14xiuXs4+VCfLMZreeCl95t/A3eHiB35whKebSouCICoMRnD9Y5uXf5/iaO3qlfbb3mUx1/GVDzxxfOvWwa0BTffrAj2oTifV4jvP+JEL2koSzVhzl5VkRFHTzK9tho4IEMTsXkrSGFERnXPOEos5NAUxIDQsCFUFCRVJgcEsiZlaCKl2wWcpc84XGabaGouaTE1ViDHEGMJRM/K1j368efHO8V7b7aNrLCLosYe2/EAO9tqH1dlL18OLl4khy7lAGh3s7f3OJ7afOayf+fLTxy9S3Z9MzQzGIRbz52rmdnI2PsjOHm+fOrF1e+Pm3cng0ceP/8C36dmVY4utjc+9ah/5yAN7wxu/8QurF99UvvNCj/TwYFT97K/G9bvLx1bd6vFJfuz86dZkZ3P08/8+muMXvlgwY9CkYB6QRENCTeAYzMJkAFRAChEAwFYWW+u/8AsXz3SqysKLL7kv3Ij1sGAQyrnDjaRwuJdlfUtZ26fxeBBrvdscZE+9g9/29s3Gd5QP7hysv3RpcXVpOq7qBOtrd4+trry+MWJH3kK/k6eokwQIDsn7gusYo8YyzyWpWHLmk1h1OClBjndKBlwbVDt1XZTYywslbvU6X79zc4I6n5cs4J0b7+1H0Sxr7U6hAFhu46hKm6Mxl36+XTYmQ51qsrNz/SxkTlGTZA6xSp55OoG9elyNK25lc8v9Is8Pm2h5fnM8UlQBrCtpF7jYKjYHgz1omPHc3NLG3m6WeWJ03nvvGaEaN8FCr8jPXjiLZfGl124uLfXKLMNB4zm7s7azNxo++MgjVdgaTQbzi9n+/nbpSzQtWrQ+GgxqOgzV/XOnb61tTFLsdLvMeDgZFsgpRQCilEA0VlXv5LEbt2532y30bjKp2r1iPIztbmd10V/buH19vz7dby2WMG7CNKgxtub7hZaVJmw0SgVs7Ohwd0KJ89znCAeD2nH39ZsDNunlGS37CuRu3Vxo5ZnESMlMPTtEL+ZFNJhDCE2oM0Sfuz+I5B9JRYcHI8ZsNIqirEGmB8EMBWKscSErWvNL3dMPlmeyT//iz184oXtfb7oOkJv1V59/YPdG78TTRvWN6zcrCxk1mtgBeGZz5sOw+tRHygt/aeCWI5Ctb8Nw7Ag0KTA6lsxhncRl2TQJeYq148PY/NuPu+yTpJD1241CNQ3euDBLBlRk09hki4vViXP+kSy9up6G2wRadPPYTDOtN698fQG7Lmu1iXyto9/9mL98yZsBE6BploAKMkfmQRSBLVhKEV1mBGLKQJAigUUwSQ2TQelR2N3Zq3/9s3P3PbDftlF/yT/4zurq1ZaK6xUSIjYTefbrvQfeUuNw4EoSkzubkxSai/cNDgu4vt9FMGVYXJUL9+nptHvnDt8Z93YnnlCIwHya1o5mxBQhQs5ZTAkiEyztD/b/3v8WTyzgwXjlADyBZiSzYnrnMCm5HDW6jAEcBdIqqDTo0AFLgDjX6//Q91dPv5lckfkL3ZOrkIZw/bW0taeTJnv9qm1ci5t3M84kJBNxRJDCDKExA5QgEHEGqhAayNyMewOigLPCl8aO92+73vHVkyfe/67Nj302jx10pQwPjq104cY+JAexhjbDSnfz1vaxd54/fMKXjz85/PHP2vXJZj0ODyyfe/dDl37p924u2p/95X/zU9/+QyeEwzhwlpuhYw5NikkKR2ImCWJSYATAqhY2ImCfMbpiMq46bSrarbpKUQjMRMFbljmI2gAgpugAGVEVRBWJi7KlBnTPbm0GDMhMmePMITMAYKMao4ZoYjZDESuamDo0RpwFudTMEQio6gwhNIuEAc6AMwh6r9EcQBFQDURlBrVARAMENMoAeCbbHDFu4KhyHZJRnPFP0QqPjMiqZsBqgDOexRF3yBBmIz8hxpnVFiIYkJmaEdGMnI2IR0QjMJ2FpADBYEbdnl2KJMo4Ox8oOwSjWZYBQGbqEooCgkf0fobKliTiiAGAHTH42ShuAEx/IMMZQSLAoymJkHDmhULQmZAiQJZMcAYYBGPiWak0FY6Zk5FTIMeEQM57nzVV3NkdiGgSNdAZqh0AZj30CMQoUQQBGGewcgTAJsqsmB6NFM1nmHliAEBKogKSZVjklmVKhPuHdVNDk0js6A8pHCk/CMhoBipAM00QZ6LPjBuFeIS1nmkScCQg4RHZ6ujho7azGZJc6aiO2wDQFAUIxFqERIiGSRVxtkwDQyycI9SUNCEGBEAqHSVJMwgv4ezCgiKomTdgxzOYCpFvQoois1fkH9idhuzRtPROa2Fz6D2QZqRsYKKgKmCU+2ixcNIZbGz/ys9z07wbc5PWdJqMGTICw2nQcxcf4pNzh4d3Uza3Nl1+7Ph7x1/8ifJwxBVZVjYmOelocuiISTA1AT1ljnwiTImZEpqZRRVyDAwWZhMM2Ug9Qi8aGEpK0bQJ4svCF9whXzXBjMmMUKOnPGPOIJoCIjuCIAIpAamhy4C8C7VKihAxIxwcHBIiFllDhuAsdTqZD7XlVvZCKU0cQnQ5DXZHrVbY+5mfnLvwxTPXXm4LeJ/PzS1vhbvXifG9H9h76fnTMio7GdbsQCIiIpkIGaNiqoK1s8ONG+4L//nsE8fdw99+5fNfXd8bLp9cPba02HzyeQo1oaiqR+wMYvrp33myi1UzQtc2SpKJcwZVaHXaGqSx7sGpdnz34/j4/dKaPzwEhj1YfxUnQ4olY1ZfPag3L9//wx+szi+sP7cTbl5b7sD8Sp6M7e64HbtTsv5j98VIVzduLJw9SY98qLr8i088/pbJtSt7h3vZfQ+nZHH/znA4rQ7riS/3kr99UG9GUsxv9bJQdh5479MVFVcv3R1tHfpYDcbV/mQUmwTGIoBEg8ODHA0CNBTPv+8d+VKbBqNqGnY3qykZdvKa7dkvv9GMYBptur599TPPZe1cVIPC5mDPI1RRAXlxqQ8IAOHUiWP72wcrqwuhqjcOJ0xeze+OUm2QmvjwmWNQjxfbrTvNPoXs+fWD7lI2pnbXyleuX1mY6x7uVqNqUlE8/9jJ3bWJ7cXRpPEEnfn2kbdOVQhVoqkYQIoC3nGeATpjZ8BJgAijqqhYSGwGKkwzy6kSAJogAiKpGrG/B/pN3iOAMTG4XE3EIs1ulISaZhY+bVSSiQCIWkRBZI88o9c5dMRABMYSY4gIGbsgJqZEwI4FCREEKBGklKJIslmCGTCBDMd9DDlarBoQjikaqpFzmY9iZrS7ea2am7P9g7qqH734cAW3t6p6bevWv7/1xtvf9bYLDz7w8AMXJRFabouLpx5761ev3OJeq6e6fX3n9e2PnTI+/Za3VWVX2p26SafyjGKzfHr+1Y/+7utfeiHPhCn2e73lN53Nzq4+cLw3rZqlk4vT6bTXn9cpSB2/+tHfPn3uZHtx0Tq5n4arW7uWtzo99kCDUfj69a2R2fjGeJoS5M7d2eTR5sLZc/t7h8fPPDA9S2VBWXcu73emw2neaweUg7LjV493zx47t73xxHL3t37x98aU5h89gadWXeEybhXJMu+//tLVwe7+9t5IPUKm7/sT3/3wk29z7YUAUo2CcmZEUaRYWAwh7IrxfQ/Q1s79T1qfmq31w5P3neOzC7VMf++5j5/vZGxshO2icMkPhymmOkQpc5mbz80COMry0k3qzRt35k4vFgWWRTneT0F14jSYNZ7YLENjxRl1sJEUFdWskjQYT3KnWaedGCF3pYNGGhc4JoqaVMwjOANLIioRDIUigqIBKWYgLJENkJkoSNIiF6SjslRNyvYH5tcUhACyDAHAO0feJ4RkxIKzo0oQZe8hCIE5BFA0IRFMYurME4ijaKbJRBITH2pdsBXtrOzPFdDJ2+3d7c3p5ECTSQgOoYnqgISokmiqUVUAAZwBSjTvKFNpedcrW3VR7jdJ2fmWn6Ziaa43P9eNocLk6qFUQTLS4+dONK3+xQ9/Bx47OZ0rehcfXTnY2P3kl6GZqisPKiCC3nI23br0xkf+yRP/y/+8/+qL1S//8pJhO0cV3K8js7KQjpuyS2OLWSc7TKkBc46rqRp4M23lhEAugOwPc6vy5z452d1YGgwhLw8h73/g/bs3b7q1ayW63FMWxMGgncbNpec6j57bbfa7x1dXvvM7w8/9mtSw8s73V+0FII2pWrn44J5/NjXgfJaCVnuBHVCzVv3WL8RLV1fuXG3d2kaGIWf+u9+PZ1aan79i69Xdn/3xxT/7o1k7bQ22+toq+lmvVaRbm/3+jdu/+aNt1zTTxnq1QnTMveUlKPJmyx176qHNyy+2Nn7bXvudAsa5s9BUiJE8mxoki0k8kXecYU5TqJvGMerowK49Qw+8WxWLtc9LGtPp8/v5/b3eeYy1KlGs8MpXNn75n/u4j53cPfI4P/KW9oadjzTBL733zeGTr0xuDdQw817rFKM4XDz30ENPjXAuVM3+xp319duj4dgBqwAz5o4BUY/OTWZERiRgRkyZJwRLUVGDNkpGlLnZoVc1GWRAIshEiopiZhpSNKJkADRrLhEEYEbv0RP5e0Py+MrN8eWbrbNxqcf1ZLr32i347KX2t73ZdeYX6lzu/GwaNwzWxOidz/an9twlOKfFmx+5tYErB7VPLCEWRSvtNK2tAU7i4RDh/PFPvPGVvUl1e83+1F/+6+6sCFax1lPtbO9TH62y/srcafGdquhpmnRb5cIkRZdDHMVpNil95+L8znBnrnsxXL2bZ6UVmUNLUZERzZDNyKFnUYFYG8GM6RSqhgteBov/8TcWWqeHl15v5wvMZQzVuMSpmAq28lYdRXbH6hiiayc6mOyefc+7d86fbsNcp90KB+s5DKHnE7su8XzPrZxqwxUSxek4uSyv0jQgGUuNkipFp9Mk3mUI5IoMPWkzncvxm97y+K3bW1uH0wwROaOy2NsZdDsd8u3RwU5T2UE9apU5NsFMAciiBgnnHr1/vq52t6aIYArDOjjASW2D8fBkr9/KsqaKTSPVoO47V5TF5nisoL2FcmdUQ3RisanrSG5vWBWFL70vyg42CXNUhzFiKX4wrMy4CRZSUIROmWtIvV6r1y0fOH3s69fWb13ebmfFdDgJcvCWBx/IS7+xORnFJkz2wnh/zuE7Tj1+5/at7nJnwri7tptMuxl98MOPvXZta2uwlTBvk0ek+W47wxbn+cb+1migrDq/3NsfTCpk5VaZJWey2O2Ka6ypb66Nks9OX2jtb+9PRspI47ouOwUqEfgkadQYoS0t5EhqzF6YDfKS0sRik0ChUeAiBMVJ5j+/sZefOn1fJ8Pp1HueNk3b+6igxAomwTTFWakT/r8CaGjQhAhomceW4yZJcrlx4bLs+Ep58b4ze9O7n/m1f3brlbvY1dX+Um8w7Ilk4+qVf/wj7/yhv+a8uee/dv/b3i5rO4c3bmIVsTGBlGkz/Pl/vbQ46X/jX+QQJl/42Gh9FwETKnh2uSMwr0kkZsSCBl7ZEwcPlRECDaoc2ZkJqiIaqCB0Wzr4widdfbnD5+RwgAqzLQkhIOvxuHP49/5k78ETUWRyd6u4tV80gmaKwc6tNBfmAbP8xh7vVH4YLAFnbQem5COycoJpxWDmvXqvYiASQFjQs9kLXxv+k8H82x6HxQfi7R2gMlltU1GRwmzyqY8f7lVw8ZxbWuyunq2ufuHwpec7y/3+uQdGEuRgMJmG7uLZXV6cXnyweaqXDWP1S7+Cg01jc2XPDg6hGTNzxm46qZMiqqgZGVrS/nQ/beyCEbgspEBkEmPUwA5yn7m8HUWQVJva6kDeudyBKYED78KFc9XFh6Td4qYWGXK1Tc14r+XqCxdPHDs3WPvJual3Qye1CJgDBDVwBIxHA7dzBqgSiAFKD6aABikBOSCGpBBpJHn3/PsOJM196I8Xj31DeeHNubOX/+e/9NDaIXgHmRuNJ/6b3968+QmeW5hMxi/THp997PTi2uC1Lz75Nz5wx9of/+hHv+XH/ure5p1//OM/X9YZSFBEMdFGc2AAIOfrJnbnPWmDSArAjghcTs5EXJaFuspRPFBIkZkQFNQ8YGZkISQ0ZAQiVdUU2WeOHJBliHXdwL1SzJlFyBHkDh0hmAVNtZiIypEJxxyhGAAaAzicYYIJUNEM1GbtDDTzFBEgoZESIpjes+dAQgCHziGRMYChICKTucyQzSKoQFIjIhVLqjDbERuEBGKKxBmZI5gtsIFMhGISVIwCogaGeo+QY6oxJAYgAEZQ0Hu6BRiY6VF1GyPobGUN5Bhohlw1E0MJRy08RDNSt6kqEjnGjKhgYAJQUwAmnNXiGRiC0gzNPOupoKNcnMEMtoozwjIhELGZIRK6mQlLEYyYZpsiBCBkZqdgSoSOMyNAElXH7PKWKuadFmW7mHSGo0a0GRhKAUQVks7q/5xjT2xmIcwewWRAYM6homVEObGaCQASesetFmZkdSNVZZMaRAHYTBIaECKYIYIetZcdiXs2c5DhLLmJiLO2uxlR/L8mR8OROmc2cxAgAhMQoAGqGMJRO91sOASzZDghyE0doyMis6QqgEjonWOIiGgKsxzbVNURI6IYAFACNTUArFWAKMnMOzZjqB8Jmsx4T7mCNcwKH1coLmYchrWRi2qz3Z2Qzn5qMzOlpjFGWPR5hyFFX4FRxkbUiLZR57N09/d+0TIoyc11l4+dOT1648WWSjE3F6GJDFHTIAymQYqybSnkjlSsm2NGqUmxnjQud1jkAhgA1YC988ishoKswEhmRq60EH1ZthfazWg8ScpMk0mdAZKC8wURpJQcEwHEFB2hqrrcI2cpTUDVoWaOEQzJstwDU1WUm4WI81kN7Zicc54JyAfAoCIork1UxGyyV33+k50COSdtqr2124mQTz6y+D0/crj0O3u//tOnCVyBIE5VQcUxucJTkixz16R1/i/+ra2SNvMxajt/+sNLWHcuvTZ55otcjbVhVxbkWFUYfZ4gDWqzzHVkuDegTKKHmGqtKquz6fLCwn//R9ZbZvm03t+8+qXn3/voRb72fCANQTR3JrQEgddfmlDJ735H573f8Ks//RPfd9/xw8v7cbTV6eDapJp/03vj7dcFp9Xw7tqzH7Fj+vKNl06p9edO1vvj3RdfFN9uDOvcHwqPGPYwHTg9tKSnTi3Nre5t7W3tDuYQHQxdbrv1eL7TGcJEIuaZi2gxSKfTQpXOXIe75XC0e/frX29XFhtQtbLvHn7w9AtXLktpHqCKldRgBqIiKbFZ1i06uU9RnCQEbJLcunE3dz5HoCQCca6bFxWYSqebT4bT4dahhdSeXzi5WtaFjY1isnMrS2k8aLeKtve9cnXn4DK2860bh53oWpLIpKnivtx7gxLKLDQmSQEUjRidARkwEKPLHFlKKSVNMc1CZCLeeQIiBM88WwnQ0b3PaFY7BUimjKBRmNUTITFDJqAzrO/sfZzAEoKAmGkTIyGjuoyQgL1jhyiqTJTlHDXFmByIEQGoIciRg9WaIHVMdQiiRkBk4hyF2hCso8RIhsBkyC6CgKJTbQlevfz1zfsuPn5idbgzPpRxh2irrua7/SdPnIS1Gy+++tL8u9+/+uCDc6fOT1aXuu98++m7r93d2S6beJ/rhaq+9rGP737tC3l/vnvsRFYWrW5+8+vPv7w1TsEXc22VZv7Y/FPf8m7pLW9PQ3exk42rmpSy9pQ4URpi88Qf/dZ8crAzogeeeOvGSy/B5ZcLjM3uyBsuB9jYHg0PqmkEQdYE73rfd7RLpf6xFhCODmAwGU5g3rXanSbFKs8LPzd/98ZaA1JX8dr65itba/sr3f3t6vLzrx67e6uTuU67UzfN/nR4cNDEwyjKIPDdf+q7L77zHQr5NIhzzrlZQ4Q0dUgSVKQRLXot3+3lJ5aWj3UWH/Lrk7Q9rren1bf98T87H4YvfvGZQZhYzq1OexJ2nREoZJ6AyHuXdQtrLMRJ7rKDO5uQkhBn3jnl1nw/TMYxqCYzgtg0BODZk+PZwicRNElIlJFsdo9HyxiVIQYxM1NIAhoTEpuJEqChKiiA85hEwIkvPDuWKMSoBsAcq4ohGVgMAemoBxAQwEDFHENGZJKUXK2aIyFhlaKYKcwWKOo8SrIYAZEATE3QKWVGKcYqSASx6DOc6KA0zLNW4bnd6cdJSAcHCtKAMTKB0awuzZCcMxEFS2oZUWZWsBUO+oVzWdmdX1kfT/cmo6xb5EWmYsFzZ74fRhXVilUzrbT1wKp/4rH65Jne4tJof9IYLbzrPZ3x8MZnXzFBM1o/DKvOYxR47sUr/8ffqjbr+rnbc0UZYsw6WUpp4A2DtssikVqyQVVVpTtMsW84N+dTozpOScAheaQ8K9Jo4kOMm8/VrbIxqp/+7uX/4R+sTLdHP/e/NV/8Ao0HDtCleNLJ2j/+C4vf8tTC49+Ej7+XXakAruDdW+utSpRii+P0ym3i0tow9kk0Zp45Sg+z/HAnPPc5G1cWaUiY3vLu270L7gsvnIsZ237/xqfgJ189JhHbS0XTLrydSJv7/+ibm2I7n0zOfOOfJ9+11ARMYRKcok6Hfq4XKS3QXnjuI377losxhAho4HwUIGQkijLNcsqZdDpByilSNlf66XTtl//xyfe/n1rl6NWvdD/4P1bZ8QWM+ql/1JatKt11neXs8i7s7WaruT/+4OhNH94J3bzX6WXtGy9e7YS9iwuu3qsmKXVavhrtlM5cu9jVOBoNmvH+G1/+1GR/09BSSqY4Wzo6ZjCTpEg2O1kyExKqggdtOYrBDHEs4hkKJgI0tComRnLMjGKAnmYe9YQwW4ZT7ljU0JDUMrV+yy0vd+AaAMBT3/s9n4i/c7vcc9u3ijw/d6J/+NGfbb4SumUn3ZqUYWrtDiKCRqltrlwEae0PJye+//sP92Xvp39+IdTsc2zNdzqL4e6OYTF/8cT//blPvto+wG5Znzz+93/m3/zrp/9CJ+4u5Ytf+Yf/+1NuFVw7tudYXdjcdNNageKkAWSaNtX6peyU3c5SXfbjxnBhNIxNsGCEgiYqbKrmPCGJiqQGCbNOKzVGQL4A9qZpcvW5K/0T8VjHiU4sxcNJdfD4Q5dP9B955NHtn/ulk9VoZkjPy7L0jsLk8Pc/mvWLxYfelaXmzjOfn15f67WLYqU/3RrmSToJm+l0uVXO94vBJIFEZiBgBVZTJuzkuRlMYiCNroKMdKE3/8bNrVfX1nMuer18Iirj8VLLtTLkFNFi5tGihSblZcFF0dQ1kJWOOMP9QTh5Zql9rH/l+uY0NSFSItfqdm7vH7aR8pxFLRJWJuNqvF1XRl4ysMwOxlMo2hISeO3mWSUSCYajcb9VDEaDxqDlXYGGkDpFjggxJVdmIUYwWJnr7o4mX710c1jVzAXlvhpNzi52uGk2x4fb08PzZ0+GSdPN85NL81u76wejseu46zc2zp88zhl2crp9d+/mxs7SwuJBHbrtfHt7O3POKKVGM3Le+2ZSzRXZ+WOdUdO9sn/oWFYoW1G6szMol+Zac+UuQV1Pz59dHKm/s3a308pikoPBCE2NpMxQY93PytqS872SpB6MU7KVbkcd3dkfI7nRpIKZz6LwH7uzNt/1D3X92Vaei6CoJKA8T8xRHSAag5g1Vf3fSEWjKjjjAlBVp5NpYtbSX3zi8fMnlur1y8dP+f1nNz/6Czf/6T/5hy989ov3nytv/Pivd9D122X10uaVH/0/fT1sG/WfejtceNP41iDLDsCSIarBUpDqX/3b8c/9Bk5Ca/9gCR1E9RmpZypKyDJvQz9tEEBEHVNGlo4GbTPGkIQzNsfkM2tiMsw9LVgaf22dZb8tLATivQribECfUPdgBy6tA0KbDJBs1nvWIfqub+b3vw/YeG0HPvmV+ssvucGYgERIIih5RJ71WyuoY06pYhFMTlU5Szyuyq+8GC+/kriXWStzqi6TJkFSyiKP6vD7n5Avu/byivZL3NxYPTyATa1ffr7I2hly2ztoNpfXbl/7D59f/uD39otuGg9wMPDtDFSVuUHnAIiyrCSF1EyUPIYYncscICRDDzHWTMguTyl5NodJqonEhFmHyLm8jMkis2chhdQ0Alo8dH998mSsY16UfnXx8OWX2RKtrna3RT/92/rMbyowiaVasrwNKIDJSAENBZkcJJOYCBFaHlRAExQ5hAqCAQAkBGWZTEobpOPda7E79/ATfOd69kv/98UvvogHCg6hjfG7vs1+7G9uj8fzrh7+uT/ywW99yrrnhtObnglKvvvZ5+6M4y/8xuc+Ml7uLFU99RFiAgO1lBKSeCQTQSOnKYloSuh49oIHxFarFWNlpJMw7rQXnCsm08oRlb5wLkNUUx+a4NnQ1DuuJSKhJW2moV34hX77XtHBDFEEhWdHSAiiFpOlZGp2hIswmE3gBuAJaCYGwAxgZLPPJLtXUs+O0CnOnikAGKqyCKqZGSVNzqGhUoboCcjAAxAxEUVAMYhoAAoglggQAMRAFTWaEhQOeIb3QyIHjJwMKCgj6AxbqQoIzKxmiIaIDIZqAoBIYv8FpHT0nAmIcOY5giPMDgKgzaIWYLPtumcyRiZ0ZG3CknBmUBkHmUXJiQAB74Gyj1jOeMTJplngB9Fwdj0BkGlW1jMTTZg8MRgQMznHM3HNZTkwAzKAgSEzgRoxky9n9V2IaKrMpGCSEhISEwAIkZjlBAQWzUzEEP09BHSLWUXNDNERQQIwUOexXWJGqCpVLYOKpTlC1pqJoQHQTBMyMJ3BnhCOZDGFWQDunoVo9pf9l+5hgKO03T2Q9lH6DFHtKNPpmNSO2ukIZ1oOJrFomMAKwCLjEtER1knqJFUUJCsdt4Ak2VQhqZWOSE2SKDEAZsRBRAADgEeMJkkhGagoMxaePWKMcfYkJ6vHp9NRMZpy1eTOQDnGjJwJkmgw0NyxiXrvUx0IVFICQstQRbqLBRoK8/FeHrf3TChLnsD5neF07ZnRcMotXB81Dz5+cXO8Vy4Wh8OJuoyjuOGkU3oCZKk0Tom8a+VZxkxWVVIAZQBNEsvMKaQQlVkBTUQAXdl57Il3b6y/ojgOg1iWWbfTtRS8iKCZamYAUcwgI2eADg0EyciD05lWCQbAKhFQGoHDhZPn/8c/2ywQDwabf+8frVYCdRjVVi63KbdbkwFn5gX6zN3FsihzzrPmYMgxeRR4+VPbP30z3xydb5c8HsdxxZT5IldSkAQaieCgju4Pf+utt7zZZU1r7WV77kX/9TujZ7/sXr+5UhWZOCyKZCghsifn9cBqYiUStGl/tRxl+eDUspR1urF+amWxXdqtj/5EPp46kaXHHuref6YZHRR56jy1OnwpSOY6j59xzd3Ba5faULSfdPtn3/eBv/J31z/x81hPVk7MD2Iz9573dN79TXF4tbkJLcD1vcng1OoT3/D+r//bf913tHdlk1xZtrOd0ejaIO7mnS0NY6cD8sWZk1Mt96/uSgpNnHbml/Y3dqcxeso9lgg1ZeSBLIoiVaMqTMIDb3l8/tjSzs41EDN2ySg2qRSJOvEl1gfT2Eh/oQceq6mwQpnnSByHwXVcr1tMho0zcrkbDeuQ8XhaqyZu4crJ1Tiq1jYOyk6BrWxnUJ28/9TiYuvt33z/b3zyhbCZnrzv1Ghnl3r5ubMnr928+cLma2pIRGFcB6JeX950/vzLL90MUe8NyQaGNqNJz26YCijAmWPkGSFO1QAsaoqzBkkzJ1IcmSiNAMBQdFa8Sd4hGoAYGVqMyLmJAdIMGEbGxglzUNSYrFEJaLUKGzOBgSZNZOgJTJMRlVmRkiAYMjN5yFFBVS2JRdEIUjfVJIQQxUTNABnVTBkT8rjWrEPtwoemMtTMeTKaTicOiIVGk/SJL346nj55bomzhfKt73107auXh4fxTQ8+uHttozjce+GXf/+937zXfeJQy9WDtj3w7d/05Z/5FWwXLQfTrLN8aiXVzWDjcP3O+vBgYil1ipJ8axynRa+lvbJ98eHLm1Oo9zrzvf2Dw9FB5TxKoDgNN9ZvFMfO+DuTeGft+Pn7rlx+8WC8e+FD7yl6nfWbO91+vwjxgWr0tZdeufH6Gw8sHTOeR3Rf+9oXh2OAwZ2922sPvuWhlQsPL/j8537sX00qG47A9btWYrI4t9Rv9fJbO3vGEDLYSWH/9ghEyGWcUVSd63YWzswdDMcLywvnHn+zYTb7pESx3GVmqikUgCIQmmkt2uylZi+A+mFEsXQw3L1+/UbD4IcnO9jK7LSPWwnDfj1JbLlnCzaLY4MJYGhiTNORk06dKMVEuVvpdnplTp6bsi3JjYaHqsoE7FxJTlhqjaAgSZugLUST5AEFnIE555kDY0qiQAwAUYTMGE0tRYVkpoCUGBm8copgoDOrclMFKygjJymaJgmiKve8dfeaFpiJWE2rEAydmKJA1FBbrDGllEQkoWoEjRhVlaUsKSfgUKMkjqAViUBU9Z6qqhnv7Wse/bLrzrd8dnpzbYOsVkNgaxS8425RMqpTihI8Yh2Cy4AY2t0iqbRaHfHcO7a4Wh6/uXGX+/MuL2qbHu5uLs/lMcFkf3dhbm68UPTf8shho3kYFgudTAiz82d+4M8dbv+L3WtbMqgboI298MBSblXc/+JtqTHPS3Wq87l/9MzJ88dHkxruHG68cKMVooU4LfITP/THllt88zc/zuMtdI1YdLnPjNQYwAHklABNmlE9UD3/h95j8518sdX+C393lP3E2n/8t/POJPiUeDFHfebVpcsbo9/8j1AFD2Wd0+ClL899Xx4KYknV4c6+VHN/+JuW3vP27U9/cvOZr646R3WM0RyOjGiIxO/78OL3/w89zK9+9ksHu003c1USvHKnyKFJO+DLWsw8FXs75isr+r2LbxlXg7xoMym3Oyy2+dwnVucS7u8MPvdb/eGeNlPgYjqNWenZw2g6BkRlyzyoBM44VdUIqqrxnUkbyHpEze9/WeI0zLdvxaUWyeRTP7bkaWc45rd+S1a8L1v+gp4/1lw8Odxd2niV50+MsdW4Yh7quRz6953tT727enXXq7Za/fZSYV7vXr/WvfAw+dH62ktXr94oxDMiiPnCoVlMiRAVQYkaOdr0MB2V8SQBryiEHXY5WKbADhMYekJDAwKfTWJkgoRoCoTonasBHDuPimB5xr2cO0Wx0DtymN6UvYU//43Hz1n8Dz8//uzlfCqwv9Gexlx9Ke1QemwXcVIVWc6ccFjTQd1NuvPbnz/olaO4tXrxATjcs2orydSVyc/L/uJ+eXEuS5gvtus9OA+T5uPP+J39tLb77rhKRRm7eXGsPRoeuuh1Mmik4Mawjr7FeaszHk3mFt+av/3i4MuXJ1/+tG+mmTNXZMBcNdFlZYyKxgzgik5saguhLOea8cQaq6epevji2b/xN3fW71z98X++Oo0dj61+cfjwuff88HeXnC02O7s//lNz+VLhs8lkiu32/NlTYTj0z3xu/Owbe9fW6juXH7hw9tbmdp1P98aNbR2uHl86vrII0+lk0nifE5HnfKns705HppHQciQivzmtGkEUwxDe+8Tbf/sTnyp7LdHky1YcDXq9zuJ8n3ze7rQy75pq7L1TsUkTpqPRiWNL73z8fOHzm3d2RtOm1/a7m4cfeOCRuV4ZXPrEs6+NYqizzGWOOn44nowlNqk5ttTPyARwUlWSu+m08Y7avXw0jS1iBQG1cVNjO+8U3UlTYeZErVsUTRWZMRFN6tDutKQOr92+tTo37xFbRVYlLXvFfKc8ubJYsr+0s4ecAdhwWi8WnfP9U8/fuVphePjkibJBq6Ca1Ja5zekA0Q+r1GjaGuxJDJNJU+RSeCaBqaT5lXnX8Vu7+5BSrjjHrazSjeHOycXeUjtf2z9oz7owxkmaujTrOkBmIDeI8XAao89PLnUB4ngaKE4m48YCBvCV18mkNmxW2wu7Iyk8q4jPeaBys0rXhgeLO/4tC73HFuZ7AlNpkBVyIsrRFMUwz/4bqSjzNN9pdxg2tipCo5wefehMqYP9168zNC/sbdy5sysIh83Dp5++8OmPfeTMghtPNJO4mLt8EE4Uc51c91/8fNNrhxoJIjPGZGLQAuw00R/uAIBGFSL0JGaQhCBBlNkI5VUZjmwUAkgegygoGhIQAYCCMING1RqdUcsyBgdMsYmUK4JPkgwQkzogAJ+DKaUEZpCqqGlxufvBD48WT1k9LU6f7vzJR/J3Xhv80q+1r13xLmaurJsqAlq7gwIa9jgkEoEAyA4JUkqGqFXtjDhHoSbmWWJHzoOIhGgWvcViIjjaq0mTJABAUCSELDVqauZG5Fv5QwDNL/xU3TQlQEKuD5pkkR97akSIw7XWeIQWPGnRZ1TE2jQ1YiBwJKtHtaAGTI4BwJEzIK9RYtMQq/rMnEuTw8ySA6+O6+GBjcbqs6BWH47c4lkfm257Ub/2S7sf+0+LKU8RiAA4ByVwpAVPl9kSue1xKQ0QsWMwgKYBl4HPwGzmwpFGOAGQLun02r/8G2f+4g87LOeH17b/9p+vv/L6Ul4KswBWEvvf9cduwlyn6PVvXZ1eTuHOq7s/dcM2Juj4lX/1pf3zF47/wIPv/sH/7w88+l0/+sPfNWjl+6OpijFgUWRIMK0bnladoouAhpDlWa2SRFKCzGXTulIJzvPCXD/GpLMVnACCdHM+mE46ZemobCRJilOJSGazWi+Xk3d5uwN1/ANXUcbkERhARZskyQwBMyTviI+qzSzBf42GBtUjUwmiOUQAAtRZI82sjM4xmmISSOnIlWWmpmBA5FDZwCuRikNNUDKgAaIpsqgJzWQpnEWSACnoERzHE8163TwRE6Ip5ahiZiYCYjgj+YsBIxmAKc7sJXBk3oWjgjazo6wW3vsiAajQbCLCWSGKMREQItOMclQgdnJHAFWTptEmiYSg8JQTkaMjGs9sQwiIBHgU/TJAYEaiGaMQ72XQiIgA78lJwMSMTDRrZuMMicjlgKCi7IjUkFDRM+cgqAozOhSDOEJiBBBVTDobxRBoplaBqBKp90SMDFCLIM7+tTFb3mJ2gKRJoIk4ntA0gCMkMAJgJoQZyhPAZtKhmSggEpCYzfjieE8Wsv86a4b3IOJmdmQjQiZUNVUQAwVMZmiGePT47LtnnBEiVISkFsGGTUwO296RQ0gghMCUANGsFp0mNCYQc6ZoQPf0KgTIicV0GhMRJDAkkmiEKGo+45w8TBMAHJy7f2t9OzZTzod9GGd14wCBKaph5iHGIOLJV3Xw4BJYnUwZgyRVjSEQkWeFUTNXluQYgCSCI8Mwhb6Ddta/cHKysNR/+9vs5JlTraVWgTsf/5Xs9TeqwwllvHz/hY0qheNvGsd67uqX25NBSwiJKYhjQOdSFERMTJ4JvQPGrKCNu681h7sxxKIoAQBdrjEiO3KWl5zqQIphqs47x85SNBVNtWoAJkMwAlMFNXTYybkerR186t8PqMyr5lTR0jhsLS40w2pQhVGndeEv/s3emcXNL39u9Lufb4klhVQFE20Mc3LHNHReeAWjGngg4lZmcRaAVYeQmuBK1+7AnVc+u/rM8fCVT06vrnUOgEJzYhhb2hfFOne+U2pSkUjsdiON3/eejTk9Mdw99fqlUTWKjzzqvu370S7v/fovz4uN1jfa6Apuy3A4+dIbne98eHqwvbc5Lqg3UNRjS72n39Zs52eLR3eef7Z65Wu8Jzr3znzp2OJCK97d3d0fQPvsYZPuvvTy4yvdottzkXqGr/zeb48Huj+G4PvQ7kyr0V6VNjzvdPxNna4eP+4nunFnr0r7QK2n3/F0jIcvfPazZbsost7+pBqND2cMHiZql15CtCbOHV84dqJ/5cUXRpNBO89zctNhVbTd0rljY1+0Tp+GvctpmpLITLfMnCMgA9JkmdFKuTTFZmF+/tq1K+1WPjff3z8YFkUnsVx7Y6d0piGONpuiV/Z6cwvLp6ejwWuv7izMLfa3NiebG+ykAbx0c+NwUJVlQTk+9tiZjcu3gqRT9x2/deu2w8TFH/gpcAYiE5lp6DgDk6maonlkRxRIAURUkmoQRUOvCVA9oKgyIAIyOyJHhB6Q0AxUEzQqhNE7D0rAbCaIBMQJQBgSUSKLKkqEhqimJs5UCIk9EKLzAgDEZipiio6dNxVFIYDYjBuNojORCpgQER0DGgmCikQ2TSkEBGQ1iU2wpJn6FKKKJKGrt9alqj988tF28u1uK5vvba9tfOn1a299/K3lyQqm4dOf++ibHCych4nRQT199ze/f0zDarAL/aWyvdj1eQ9tvLZx8+UbxLQ3GI9SPP/IQ3PHForjy0Q8mQyGIep4iEBNlkdzYnlvsfPuJx6fIN/42gudkyfy5WNDCW7p7H4qW0PI547tTKbj7b3lhR50W9opbu5vjwY3X7711WMnjh9Y/o5vfP+DzbCJ8eb+xiDncx/80CtffamGA6J6Ps8eesv9eTszovseOTauJ5BxqEIzrVplHpuoZuR9rGqoY3Lyh//oN7LzTdUURamGKKoxoqmmJoNUH6x/7YXP3v/Y20TKTmvBeGFYTw9Gg9Tntd2t4eZ+ii9nrfzEidMXzj7x/Cufb82TiQNjFosiZJhiCjLTbnxdSxTDpAiUSg/KmDh3Wa902DR1La1WO6WEogbQKAAYO46OpgAWoyPnyQWVZBjhqLL0CDcoaqSJQMlklmp2LoEBkEdSVTZC5CjR0DQ2QHQkFSVVuYeBUUVmVHPAEi2CIEKZERtojEnTQGLycMRnFDA1MNJklDu1SIaeOATRhFJBMjCwFC03ndQj80mCdrv9Vt6d6+NIdyfjIRnWyUo28lwUbbI4rgaoUgJ5UGaNqVnottrISclCU8u4X/rxpGoXc+2sXx7zw71LWSf1TvYy9nWlJ3Y2wuu/e2n90v3v+87yzB9Cv7it1Znv/iPxo79998tXvTBE2Dlssk4e66SAjmGS0+N/8sPdD3/bYXtpTrWMw8n/9f/IV15oJrVdvNj9rr/qCnDHH9z4nV9cOHdy8e3vSneu7n7uC3JzEyfTFisms0YlJ8p5d/dOy7KmqhvonflLf3VFbodnPmt70WJUZi8oa3tYRyo7Qi5H2Hj5a+76q3x+aePyp3pvOX/hz/5P8eK7ap8tveW7m8d/ufr1f9fbuOmTKKSUZf6Rk+3v/b6NfAXqeuEPf3tnycKLz8zPt5r1AYoVrZYZxrrBhiQKEbSLfOPOzf7jH6gasNTI/t7KwRvLX/ql4bOf7/f7fVcCChhPxhUWJCAmws4lIXIZq3iMU6Ph/U8U7/ke175v/5nfb7/2SQ+D8kR3sN9yD9x/5hxNX3x+0O2lp7+zdfxbpH9y8InftK99lc8vuae/bfhrH1m6/BvNy9eL+y5O558spZBpUfTbcwvYLcL0cDih/LmX7zzx+JluO1t79aUvfu6zX/zk53LIq2getSDUmJApY4egZhoVgVGARMWxZQzqSDEFhFkhmjeb9VR6EqYZ9gHEISc0BTBAIlEjQwCSdPQazohyh53S97qtoxVye2GwPZ1e/fzp6y8vzbXqw4lCpSz7w62Fk2cmUeJ0nAE240kL0ROVRdknby9c2ZlvP/mn/8/NL30iW9+CGsbj/f7qfJ8yGY+fPLnitrGf3Pz5+eXDVP3sZ9KwLgNb76RkPmIjwzrr9aRoEZfYmO7ueQeiIqBtPJQv/U7cOOFxzp08TdOxDHcbUM4yi6CoPietawuRWnk+n0M9TY0AKxXkDSdpWq2NUFuts+fzu2thMI6ZpYbqabQY93b32+0uAqhXYYYLJ6p8Mrl9tzcdFItby4DlubOX1tcE+e76eC/G06snN8NwZa4jTKNpE0Jw3nm0YRwniLPVcVQTqwmRmTXGvmRWaQDKZxi+zK8srmKRS9aJlI0n0ul1h4eDZixJ7A+9691PPflIPTk42Fn/6ouXWTGqn+xUwyp95vIbWYtahe/3OscKvz+tDif1UEMUc4zzRQuCtgGndZjrlJExTBpQuO/s8guv3mwSpEzUZZOUPHO/dIO6qZWnUdoFGfPhpBKH0TRMa9fEubJ38ezpa3fWqhAhiYW8bLdynw/q5sb+XrvTX9+e3rfUailf2rw8jE1roT+OOAhxoddqFf3ba+tYesyYxDLJUNC0mSuzMitjCjE0C63SYCpKxlmrQ772C4sn6uEgTfbPP3KaPF16dTA0unjfcsZgsVpcnGvldO32AXAWrD5zbnnzYD1vdfbXd1rdOcfZKIZuq+3Zt33W7XXGzTTEul3kJiZITS0FuiW24Lt7iZ7ZlauTw/edXOo7tSStzOOsTk8s3QsjH0lFJWd1Y+rUzWULCx1w+WS4tb0/7hbF9sFoOAjTUXN29cxDTz30wguf+6M/8r12onnxP386P6iKUMfaBW4dTKdA0CliqzNf1YwQU6OZs1pTpgSgCoZsea5qjgh51nQTI+hsRESa7c7UjCkk8KLsMydMyRSCcxl59sRgChG5ZAkCCFnLI4JJqlQEzRcUIFoEnNVRkbMEAGSN8hSyRWnaTrrdQ7bJmXLhvoXqx/6Ve/2yhW2HRkIEU7USNOGkznwBuQeghFqPJr7loc3GCpkCRkLQkMAVUQQ0aEpEqoja1D53SobMZsAZGwUzEwMV5+ogIRBw6V2SlAzYUzTX+4Z3th88G6vp9Mo6XX5jcu3FfHgw60k3EyODlI7UBSBVJXMUEY1QvcvcbJ8ImIiMcvTMECrIxMVm9JXP9t/6FL/lGyWIlcWoCfmrX6Cw33zxM91ogqSglOUobE2ocoIPfKj9x77LnJt87rnxv/u3nTA+GnxJZo3sAAQKgMyZA1CAHMZy9iOftk9/OjWwO8WVJtVcTo08A4Q4HladfguLwnYOmt/5VLXjErBAmqKztuucmIspfOt33f/y5hfOnO6/eumNKgT2XBIjkGOXJHnyihQEyHuswfHsSqPPsK7HPmcDIEMCZHJq6jx0vItNGsfDPAdJ1SwkVHhqNHHmptMxszdmIz8YTfAepyVznDl290qsRFTNHDlHWHryCAAgM8KzQjQLYgoQVdMRlwhmOS1mAlBkRGK7h6pRJAVFnLXSIyhaAgHUWXCIDcCYKERlmbHFFQgMLBqyiWNGs5l6KoBxJqoiAmJGRKhMyIzsiO/lntTA1GaRNEQTA0s4k0Vslg2bjURIf4DNmTmkzICIFYGZHAExzVrfk4EpOLJOxnMFa9SkOI00rkUA0OEMQMSEgMjEYMA8G7nsCEnENGNf0az8b+YtQmTHM8mGnTu6hM4R8ywmx+yBybnMCGC2NDUgJDFgV2hEpMxgymjsyMy8Z1CTpCkBGDRqmWNgExVGywv2hJ6ojuIyFwRMlRlabfKePFk0HI21qjGlI6OVmiEB05GOlgRM0GZsJTsCPSGQgsAs93eEmoJ7oKJ7F3vWO2dgCO6eWmRiUVENZoIXw9G36BFNakahAjTIEM20EaiA9mLoeNd2jAhIPIkxik4jRDMFNTUH0HYMZlEkIeEMsHQURURinIluohqCiUinXczeBV969fL73/uHkoRb29cuznsbEw2DVEBAPsPkCEwm05T5XAykqZtZr40lTdr1HIiq8WG7W3rEehJSSj5rHdbTumkgcx3wvLc+vXqpc/PVrZikvZKvLne37raUFxYWtNPvPPpNnftOT8/fvzhXND+xU3zp5bLNB6Mmz4sYo8bogbJ2SzzXkyYDKJ1rDhrHBxQt7xQlU45OcuZuN894ZzjcOzgoyeXk85yCNmYVI8kMY44KNNPzGRkRVEVtIqtM4996eSXPJGobtbO0vLlz0CIqwMq1nf1/8Q8nC57VPTm3VI2Gh8NKzNAkJHCQsbo8BlVwuQNIYDJjETtCQipKR5a82UPj+s7f+SfnF0pVqoeCEU2LmGepbgys8Clny0qXmzZVff6+t/qLpY5G+88/f8ylwe5devUreut32nfWyzNP3t3dBm+1Q0xFXUmzBXF/l0c6rYdCunzq/P7lK+mVF6+Wpwvrl7kbX71+/j1vvuGNTy6VxxYOfnnrZHnmIAzOPXhxcnNzcjAsF5d31+8cHIbxyDMUeyMcSLOlIc/n99i9PjLrLqyN5yYbd6fDKecu1pO9N954+MkHX1vq6jTGlIoMhlMJTfJZFhQylQRy7MLJJz/w4Y27b3jmhbnO4ehgOh0pxIy5286SQX955eKjw5vX7vQWyv06UurH0DjwKkAM7X57a7A+mdZBp/OL7WEVB4NDaCzGyYMPX6gDDA8P2nNceLexM+W62Vpf/45vfvprX3+jM9chuLO3NXnsrW8t5+hTz7wIeWZCC64c7o9CQhOoD5vF+bkmHaweP/by83tHYrqCgXnHQOycB2IllwAAjJkBzDtvkggRVEySAiliUHTgMiZUyZA8qNM4c/UQGCGJmgikKBoSIgoSA4AKiRiQAooqICKSJ2BDQjVFx94zkXfkvBGTc2KakjUhRUkqTQQ0EVZNFBUVDckZE3uchYDFOxcRCdlAU4p5dhSnq0OwqrGETD73FBFqtZs7g8+8eLNITTiY3No6HA2n0ndb+Wj95uUY45l3vunRb/8mQxwPmt0t2N/ezWTg0uDs6uldyKjdHzYVXTj78KMP1EEWq9DrZy3Cqq5qx1UVqdNawGy521XBlNzdy9d/6+d+tsw773zT/fOd1gqEfhu71OueWAgO62pKeR5HqSyzpUdPTfZ2tnbC5sagY3rhoVNPfuAdhrlfPS0VDvcPKdd5JuTUvp8/+N7Hm5B8U8tg32fNeFqJiisKGJHzXqKkJA4t1TW4rG5S1ipiCG53PK0rUfJZgY5YQVNAFLTYhOH+eP/Z5z539foLnePtEycfSOiy/jJYqzzR/fwXvrI9HGSeegvz08DQ6o9z/5b3vPflK1/WAYlhijxJTU7cKjClUAmliE2EFGXeexBq2IJhI6Fot1IDDGXbo+fIGFNoOGnBNGazzFGrCC7LiYzRbIYdJCUGp6jRBNQIjdAopoRkOrNPmLAjMDBVAEgitaYoEYDMQWMkSUAUAI2O9h2EREyFJwdISpMQEIElYs5ViCNLQ7MqSobIzE3EDFCNiEBEC8cKWtVJxKaVkpAAiiIA1clKpyphuLXtpxPL55c6xzuYHQBDPYmk3rssI58zkGuhSqjRsVoCBiGYBG0r5VQM673BeEcNIrgBFLxyxreXBeOt6y+v5n5uYXX/6tq8Rll/I26vX1r/xbf+4InuhTdXjuWhxy+sLkwOfmLntc12gkq4DvHk6c7+RA/Wxounjq1+8JtHC6seM4qOysXH/spff+1//ZF0MFh99KL5Monwytknvudb8zOPH5Sn2o984+IHf4hefuHZ/98/W96+kYu6LDOV3Gzjkx994lv+p7wNl5uKW3Odh99kz/1+SKHfKesmBUFHapmrRKJYkeBM01z/X//Uxf/j75HrdT/05+DYI5AVZID9pc53/anO+7917S/9keN7Gx6ioYUwSqnxWYHex4cf6zz1WPrkT4drz2nS6dZBQRqBwFFoQlF6SXUabOvrX8w/8McqSKknpdt57Z/98Hlz/fYJqbSWRN6AyYSwATGjHIo8H+yrI9WUIuf69j9x4k/8jSlmLl+aO/HWnX/1Rr9+uZo26R3fmfWLm//yR1cfvkgf+gt7D7wnDKh84/P2xX/K47ut7OHXP/8rS34kvlHG1SffPq3PvnH7KyYIB1U72BIVMO8opd2N7d3V619+5upXnn1j9+5+J88THHUNgZvtIInYm9SEeHQwIjBGyghSneUKCrWJABkl8kyGxOCdE1A1NBUCiwkikiEQJCRORkoATMBMDNHMiAVA5chhenph8dLvPedvXc92bLq/U5THho+177ztDF84VeMJ/8p2+Pzzfm8EKWJeCmFdpTxCv9c+a/31TzzfuXvNB1TLu63CxNLOJKzvn1yen4eMhnXLH+ZNcI3Lyk7WhagHAOZc1+parSbNNDUUa9NBqCs01hDAEt8NxQ7yogBzY94SkQa0yI2iV9CYA3Feji2tj6rOqbPm+vNOCwTZXc9vvLHx178H5xZPHztPjTXDhpXwmefm3/NkvnqSQ3fYsNRR89B577vxj37vxE0W9+/u/uaXD69ep+Pdhe96X/Gi3v3KzfmlzsHB5I2b1y48eeLcauv2ZEzOM2i38KqwP6iqGLxziBRDMkugmoSaaN/8tvdWzZTaLoI2MdnB8PyxVcnLztx8VSfH0F04N55OxofVxfMPffXatc9dfYHFsgAts29/52M3dwavb++evX/h1p39Bmg6jQXHdz3xWIr07KWrScJItUoSACNw3QQyzLMCUur4IjRxfXNv+fhiHWRjNOi0fSOtg1AvZFkLNQInkK29kfesjClVS/OL7aJE0YcvPvLi5ees8FhXi93sZCfvuyJz/sb+NnmnSWQau+VcSJO14SFknQXnb9/ZzFw2f/LM5s5NN5dHgeFwIqagttDpu1JzctW0nu/ky518eaH7xtpBgWWn3a5txHl1eee2RDl7srd1eHh7OJ0Ya9S1u3udwvd6neu7W72uO3F8eWMvzRctEFnuFF5wsbs4iliPpe96IDqahnookpqkQR1nWQYaJin2s6yT514kb2U+wqCRK9Ow//qN73j0vqWORwORSEiAWE3Df+sqyiDPODax5bIupmZaIZOJbQ0m+wchBYqU393fO1y72x+bjMtHv/OHfufXv9xxw4lZ2c5fi3H+/HEOV7sW+HAn1yIvOSKaJEWYBnHkDYQZRRVAHCKYqgKrGuEM2wFqxEiE4tAhoRIJIhhkJlmUls8aZSSYETYYDWbxA0ABBEQ2cxhZCVFNwXDW18NkbICb+9VP/D+9D74tXbwfFx4Nvc6YylG2GrKVXC5n2jgPoaoRC868JQBFStrAlDsdZGFFQwtilLGC0kx/ZlMGbOUaDJFiqBIaFxwdEnCMSsx2BO9VM1D0pgBEmiJ4MGJUVI/l6iLI5p39EN78dP/pd8KdzeEv/tLyV57JJIgeBQVFiVxuklSUnc/QYx28I/AIBVMTxIEr2qmqQUWmNZphCuBkcb+u/vHfDHM/ni8tGE7aU8vuboTRoIhJkRjQxKRO3mHVzur3Pr7w5/7MZH5p2rD/5gv00c/AzZcBE2QEYCARRIA8GAMiOLKYUACCcUBsbAEpJlB2RoYO2IABFwuMa1f1fOfEeOuVf/5Pz0OpjrnwdQWNM2vr4Oat/+V7Xv2S5E9eeLR0MCFwLo91ZGAAKLxHA+dcFeq6qSSl2CQ1IaKMfNCYFR2RZlJP+mVfBZskPudGJZmCqiff67bH1fSeGMIyw+EAsHcAFJtG9KjvwxMSHhGdU0oKxAwlU87kEDLGWVpJzNLMXaKWdIaFPpJdwAxwllKCowTXTIIxNdXZEYzQiEDVNIGqIQErGRpEVEcgZoJoRmxsGBMQGiAhEQNoUqBZ2Gn2PE0MKxMCYweM4AgKhw7RMWaMpoYzY42aAhBaUAA00Vn5Fhw1vBse1b3fy4vJDMOkBgI4g1kjqwITtBj7pXNoAehgmoa1yhGUSR0572cyECIRH6GVCQlo5hsCI2ZmBiQiZiac/WjMxEyGiMTOITIRIzMazhQ3AHLeH22CCFJMNItUGJuxzm4cZIjKOPMxIcw6g2coKZjVySEROEdm1IgmxSTCTOYgy8CxImBVp0klMVJMYGCOEU0ZQNVAiAAQZ/zPmdYANNMIzdCQkWab/CMbz+zxezYjsP/Ct54hrAGRCQFNkoGCGgKhIRCAGJABETCxmtGRCwwIMclRXdlE07g27yn35g1AkclIZgh1EIMo4hwzkSGmpJLUCBlRxDyjGZDNniIZ4nh65K27+cqNN8oT733bh7YuL9zZeqOXjU8ed7w/bQ4qrxRiBOct5wmhM/XsnHJoQuZsoe9AzCsslT3SBJ4LDwFMS69YUJGrQWo0g6bvmJp6xbiY7oVLG/Vw2l6YX+2Vg8Ha5Ku/vPla6+Jf+fu4f9XnmJVFmJrM5a0Lp8PB/u7arcwVzpjNZ6bcmDN1ZJ4DFqY5hL1DU78LFjv+9lTuf8e7j+e8++ILOYBUUVWNkgKaWd7paVJBcECxDjPjRUaMwTL2uQMKWPY748Pdw/3DuW5LYpPlvlMrgdgwjMNkd38KyJJMFPI2d3KyWE3GiTMuygzI1XWIhg4JACUZAJIhRjBAOWxOthZjHb3Pgw0b4kOOOcR2Ds5BaiYxBQOYa9tqN975lb8rTz5Qvu8DC08dD5dv9dqcxrvbN9fPnKRwBlbOPbX7withWLe95bmP/eTGKUhMznffeSY+fKqghQNa6hx/oLrx7PprX+pws/ncb+RhfOfVZm7Fn1zN6xc+7raXq61B2jcBWjvcGE6CZe1hctNR1ShN2WnbN77cHg+p3W/78nB/UNW1ZTitm9IX+wfbX/7q9tLyPIzqSzd31CMadFoFEjkzMnMZrt5/Zne8ube/08/UQDiEOAntrBxsHLLC4qnV7PxZN+3lX0qXXrxpvr242n/ve971/DPPNqInzs1vHe7ML/Dx02ffeG5fxZ9orTSbk2Y4vfDQKmDMmnxjo848hqgZ592isCSvvXFdmvH2+sHxhZ7P3MH2YHtj3CIq+sXy8e7W2sGNm4O8aD904eyt27f2Rk2e+/H46GCkMOPuc+4cgDPiGU5eVBVVTAmZyBMlYu8QElhSM7NkomIzV2xCytBy59L/n6r/DLNtO+s70TeMMWZYqXLt2vmcfbKOjnJEAYRAMgYDBkwwGAcaMMZcu32xr29zux98225sg93Y3Y9vmzY2xmCihYSQQEIJSSico5PDPnufnUPlWnmGEd73flh16OZbVT2rqmatNeeqOf7j//5+STMyyASSRAUQRVOIAY1GoAyFVfHPMG7MaFhCEBGNEQGctQYws4wgoIGNTSStxBb9PM58CkisbIsyy8iYFowkbT2wIYCU2rzIXJ5ZxyFCaCMg5JkhlBCkqaK0wMn6NliHIrKg9aQQXvjqZQs8H9bKCIAvP3sDtXjxSy/mruw9WgqURa6Wik9/5bkP/+pHNhwu5/GRa+HkY28tHlgXTITNZFYFQVe4lHRS1ZE1emjq0BsMXnrp9h89/YndO7fFS55oszugKLdefOVa2/R7xWx32K6+9F0/9Xd35gc2K9Mc4kyNnU2PdsJsvlroylvvm+zu3P/I/f1ibTqZyGS/nct8NrIgs8kUCQSwZm4anxPG0Ka5JwISRakylwEBs2MFQnC5RevyKKJi2GyeWLl4/emVsw/0zQn00k7nIo1vZ9PZwTPPv3jp+u3ReKRgn3352mwSNjbvSU19ZzS+fbh/+8btIkNkLTpIqts3X3rldvjAe969vrR+9+rLCa2oWpuH0FZVAIH5XAGZLbAApYigzXxas4rJNdocbVVPu5kzqMI6w5DnpU9RCuScoyYOMXcZkkkJyRqLOYaICAwJIKEgJNUkSRRJyJAqshCKxhiiMjhMQbykmBJRQohgM0AjCiklfHUKE1QtoEVgFd82bUqMKEZ9lCakw7atCJWhjcIIIYFR1jaJYvBBMowMKUYF4sgaIXfso7fMhTMcJcY2NTKtU6Vtdqrsd1azTmhCiiTGcBSNKRoyuc2iKBGH1ITQgtpEZlw3IPMWddZGsjCdHBAb57LuYOC6J3qb9Wx4e1+KCw9eoKP90EQNZnxp59Ln/+ixU/ejyye+3yzZ+77/2/DXP3L0/B2ZxDJR72R35dzan16/uHn/+Rm78f4cqImR1eYIRX9rpS52pqP6hO2GZlr23I1PfbS/fevUt/zIQVuN27T8hrds/Njf87/4P+STShJgwg4AXnv+yR951/3f9R33v+st4SZPnnyOxy0DtG0UoqrxzhkhpcwQRdDk0HReeNl//FeK7/v7OxWvzuaQt23i5DqA/WkG/R/7H4/+5T9arSvrsLm7s/+R/7z1Iw9UEnIzPnziy9nOzRSTzw13s8Y3syoZNqBKXtFDz0V68tN3//Z7Tz52emjVYP6Q9MJsDim2EJWpnXtTOMMmYwo+YJKUAjKhtpnho7k9/U0/PO2u+OANzGiz6J2C6vHxwfpJ87rvTpOrXQEZjkbPP3Nm85x/9iuTz/7aSjYrL5y9PnPFG/7S7nLTe82b6unwKp5t2tndKgQq5uN53u1SOiyLrsni/t27H7368ZdvVkcHMQnXXjCl0iAzI2lMkjlOCgSL/UghBVFh69RZ4yA2LcVgTMKIQcCjM4AZsTNImixoxhAl5Q58QDYMKIbUIHomJpQEjJARLrZd57N6cREcVLH76EPLm936w3t5sr21lVfK2ZcDv+uNb622295hEzNhQs5KzTIvmve7GhtX4MDFTnvbqq8SAXKqmugru9TZXDs1PBrnoc4pizOyWamsAm1bt2SdqqZpMkjgE9QNSRuaBkMIdcWJDUJRZDH5MJ9waYoLp8mnuo3UoFatM04ZSQm8T6I73fX0j/5B9fqHbz/5Qve3fuX1S73ZeLL0wOu7jY9k21kNIJ2NVYD4KM5G//RnoejoUV2ADjY6dZjemhzpyoOXRrfPDbqnPrh+9vbl4eGVyeXrdOMWj2eHYY4mu3Rje+VUL0z9nTsjRG59MgQ+SJvEWkeEnU4+Gc2SqKIa4KZpV7LyqUvPhjYaYxGI0Azy7m41q3XsfVrZ6M2nk+BrsnrxlWeCCFtrc7s86JwA81B3cHB7r+sIcpXMSYu93J5b7uzd2n7+7kGWZafX+9NGtw+O6iagM7kzPoZJVYtAaV2YNWud/pW7+2dPrq308rvzJjk7DXHQ7aoKBWGyQ0mrS0XJUrhBmXcm4waIrt++OplVRqFnuAfE09RZz5oEz27vLZ9cw1nFDLv7R9aZw73Z1lq5d+PO+taGzXuXt28cHY2yDDMm04aUUtEpD4eHyy53bPM+9Qed2PrrBzPIrC3MxDcv3dxpwWLEAuWelbJjaUdjmtS9gg6nUzWdHhVb/T5IA+CtxdlkWDKt9TknOqxjSJTmvpo1bKlOEhTWukVdBVFhDQyamyypGdaJQBJJhmDbdmBclej3Xrn1jfedeWBrSfbHKjEJLp/o/7moaHk5P3d2Y74zSuDqeoypzm3/7jwcTevoFUGNoaptMjp83ZnOz/3s3/q7P/ajr3/ft9V/8Guxmk/i9OTr3rTy1/5edr/p++uX//VvbVy84usIIKwqQZFZACwZNqAxMhJGRVQwFJCQDcbESCpJErAjhaQCxlgJs6aA+MiDg+/7bljfgltHkz/4Y7x7NZ8MURKSsUpNFCZFJMuMiMG3oGA4A4gKpD6BiHEGgsE/frz9wlfd6a04WLb3nB6c3CzDAG/fCbMALDEJmUzBsJHMAbQMBNbIuFdIaFxsDDpLll1eSYUiKXokx1AbtKIogIKEzjCINIGZjc2AOcUoQY2zxBAQEiE5AyTRB2OtyXI2BZ2659LXXjjxk9+T7j/VeJfOn1p/yxvtU5/haWDAGKOoGOCUEquyCibQVIPhMMAQE1C0nSy2XjUCE4mSKhBKUowKjRTTKt+/Cpcv5xwg60DVADMYij5q4izvRm2n2vK3ftvKT/7dGaNS4G7uekvluVN67VlEBQFAA2wgHhu0AARS60GBSUXy0mijlCCzjI40kionxShN02jaP7x67drhf/2lR/MMmkxzrlJEq9tRm4k+2Zp73/V1l27VHqQ+momCYysEoGCQM0fA2KakZBRRVZkMqDJbpryfFzEIEpeuIwlIKbNOUOsUQdUa04qOmhaUcudEYzWbEoFG7S8NEKnolMNR0FcNaJlh0oVPXKIAITqCjCBnRGYipCSIkFTFR0RVANFj3AwSEgARAFICQVjM/mhAJARgSAJKADEt6iaaFvYyQERIqIQaABlElY9d5sJKvEgbVJGUkJSPycmSFj0j0aQJEADEq1clpJrUIjJrbowlsIyGCUgNojUcVZsQo6iI+pgEj7k5CqiA8CrufrGxvUBWkpKAKqgxVOa03DUGNEUd1nHcxqigCEnVEqlBYrCZQQRig3rcEiJUIsPMgMBMxBaRkNgYXnxASGwsLvIl5kV6RMgqwoaBWFWty5FIRRip5YBMKSUFBqYgkJIYUGZYdJokSUyJj8X0kAQFlB1ZQ8mLAqYoQGgtWquGxTCkqJNGUsDgjQKSSEJEECYhJAWMSoiomIwBIFCBFBWEUHXhuVdQ4mM49wLUtEBWA5CIHvvQFgUjRUkQQAgIABhBF3ATQAVIAABIpIzItFAQKB3zkqQgiqpBNAEKQB00Jb/iOLdGA6SkIYE1hARC6IEEJCkmAF3kpKoAYoisI03qwNRRIC3GWwEA9m/XH7v12enO7nd953vSmaK+duNwumtm084qN00CZEiqRKwpdygCw3HDGs90OwZjqL16a40qQTufl8S25HGcO2ctlL2iaI72l3qd+Xw+jwmysvbROdNf6zbR7+3viLRFyQ9DPv83P0vDoZ1OWDJ53Vvu/6mfGC6J2Z/nL7ywFE31B78Th3c7PUNGUltp1+BqDhFjpOyBs1NTHnSzx77+A+d8HW69nN24ueIEIniVPM+UbfK+yIxvvWVmQEwxsxRTVNDYRkvYYgRDaKWpDpmwLAqvPusZUtTAIWJK4jouxiDMKZGVCBiDj4oxdqDXy2LlNQCiYRIDaszC5RZURQ2pMQjMIkVeDMejcGGT3/IabOb1H3+t00iWIVgz3+wVb7hv78nn06Q+1e/dvl67zVk4Gvuj1N30F6tp9tiZx5+4tTrY07W4er4/u5Pi3CNIPRoO2lBPU2Ntr7sRG3dw95niwffs91e8PlZduR63r67QvNdfmd7Y3q+itdQ/vLa2Fl88GIualLlqOjVEhzHdbXyeqTora719w1fuNjUV3ptmNNKUttbWxn5etYGLYq9t+WC+sTE42xOl6wABAABJREFUnByYnIdtWM7ytk4mw16eW9Jzb3qMV9d27uymBqbjuW+btg1hKgDRmm5/9Xw361HRnexPtrbunx3FaYDhqH3iT7/iw5wM3Lh7F0sy0L92eOSXQXxw+WTl9Z0Objz9p8/dt3XiwUcG995333B/fvHiNpBkOQ2rUWV7Jx5YOXp6lJWlI97Z2SeSXCCrdP+VoWizsV4czPWFa3dHw4qsk7kfHd45fgO0BgGZGawjYETGJAAQQVESJQJkBLKcFVlSL0LICkkSgapIFF28MbaKSZOzBkAMKhgyqCKiqAoRlDRohCQpOSRZZOsJNEYHFFEWA2yZZSNKBEnEAAD6BNjEqo4xYBJSBFEmdVbZkCH23qTUdSamgIZtkREhLu5UDKCChKTCFjIK4H2gBJQQgnhJIsLWQEKNEEltkaeYUkg3tmcPvX3z69//rVdevnjzxvU//cQnX/+a107a6tKlK51Opw7ASb/05G14Zvedb3/dg4/eP1hz3qI3lohBUt4p2hDIlLcPw3/8pd+f742LUV0QDUrTYbUUxqHxwG3dlFtrgTABfvZj/zXL0HsZTlosMoKpT63NiyqFLIes5Ge+9IXJ3b1e3rt48+mVU+dMtzc7mAJQWRbExpbOcUzRC8QUvcszIpPlmbMZEhMxsUsiTACogOx9BAFxZUmDr3z+D9aWVi27+XRet/Pdvd3D0WjeaBWoFfTBPPPS0f5+6PYPxnUcjmoSXVkdVCGkKEejWWoleKGimI0mk2kLhCmFlETBxACKtp2Htia2bAizDJASIWSFm8m8y24+H+kcIMUQ1BBoUks2WOQOE1BuGNuGvYANSgaMlZSM4X7HtXXyETjLQhNVNYrGoIhqAdAyISz+p0XV4EVARFWImyo4C+iCIUghqsCxRRXAWCICs1ipi6iAYdQEXlId1Su1XggRkxKzIY5JQFAikDVtlcgQg4WAlDAKSBJk6GS2V5aZYDOsDKlG0BBmu7d4udoadNSuHFatLBhMLksxsbZkCmZxTHWKTEUTrMk7UfvW5nnRVDLtDDoa5v7odoWR1k7kq5sTPz9sx+bgmdOBYzOJqMVy59KfPJ4NfuvBv/AXTKefAmdvfvubz5598d//2pXPvYyt3Lk9u3/z3tc89MDgxOru0UiWsj6jLZEMD6+MJ9szIdo/2LkQae6kX/QPX9gfvfICrTyfP/zabj9Xm228/q23e0VzMAMP1jiMmAKu3r26+8u/uPXCBbDd/KmX4jgQsG8ULDHZJiQlZPSICIhNHbMMrv3hFx74oX8Z/eHR1/6EZdx56C1u65GaoFge9N7y9f23/YWDj/xORsJdg1/4E1r6Z71TPRhvD//kqfr67f6pDaokVAnRSS1QgBBNZ8mp7bTGj7ybTuqbz5Sl6ZzeiuMGLXsFYADUssjYSFPXKJRZvTOvO296s0on3ryeVRNePwlbD0Rl53JOhNMpzUbTNp3/wR952urr33L61u9jc21nMP3S5MoXaH+nr3Cwf7Cv87V3v1W23lWtpPHyqWxVR0fN3E6z+87p7AB269zk586fmVf1mXs383j+jz70qapWTRGJNIk1jKhEYBCzLENiQbJsQgwEiRw7a9AYZAOg1mmcNyAaQ/AIbRty40DBJGBmw1jkqIgMKYG0CNZlSBaULZIljBEWKbOPCUBBj4lde838nvc93B6e2PvEH54cTpujO0vz+p3ZPP/VX7XQqZ++1dudrkBnMp8DhMHqZpi0LE0Yz1MzTpEVUrG6HmzhK8CzF4a2ytfW6eln3dHhUqdTJ06IChGoQZ8MW6XoCCCAKsm0gdQGUHY5ZcH4GmOaD4+0R+nsarOyHuZcBCliQoKF4xhCxCTGsCilXnbv+9921/oL3/zw4OZ99Mwtg0UqevM4NN1CRJtR5dhgBElEkI9nkYt+msamrpjV7R2tzXnp/Lnu+Gr80Ed1b2/28vVJQ6cevo9KIKnuHlTlcq/TXVldClX9SuZwXnkG59vga8msFcaKvG/9rG5sUWZKncw++uiFzzz5yV6vQ0QaFSgZliylWE2LvLhz7cq17ZseEcE6Q2TYGdMv8ryVcyeWhpNm/2iWnOkXa6sD5+rZudK+5sF7Pv6VZ4RzILp5OCfCe7ZWJOnF2zvdYhAFo6hFcIx537S+WVrKNtbMbBxTlu2EtvE4HgootdO6tzbQnsOEJWUxwFFV51gsb65fObw5U9vVuFqWJmKed06dvnDt8LY6O51MaXTUtUvbh6HTs5uDlY7LDWomthpNIAaJMo/gWTJ2DIkI8zybzr2v/cZyf29vPAs6nbdnzy2vrro7e8P9eZO73Hl/73rpfHPryEfD9bytUqiV5sOKM7dU5M676dDXjT/Zc3nJZY77o3ZYpV6nU9NMbRLWysc2aGaYLfc6hcZAKr3MUkK2NibxTew71y+pRZ2ZbBL9H7zwyot3ew91e/eePhnm0xTjn4uKYgu3rg7Z69I694tOVdHoqBYfUYhQCTEK1WR+4d/+8/eeeqQd4omtd2Zvbz7z6d8p0pwiNPFK+dD9T0znq73BY3/v/M7f+9HVEFIUmzlhqCtflA4tqIixqFGRCWIAEcOkKkQgMRETI6QUGVW8phSHAL0ffH/n+35qRquQ9+MDee+936bXHt/9X/7Jxp09IxZsYSKKpSTJpESCdiFLIxJZwLGNakwREJJRRC/w8o7N9uCFFzM1MWSuSc5QYhKJSGgIQVpgAIZQV7fW4N6f/5+v/e6H7Sc+uRyINEqoM8ZIbKxRVZAAKapAAkW2UdWQwcUoonjLjkiBMcttaL00DdtOisKSrAGMMh/P+dz58oPfK1Q1y2s4iRRD72h35+O/fU8UX9XGFkCsCUU0pWCctRYVtBr0q/vOw2vvS2ur1ub5we7ki08Wt28upxqFFLzERAqqAF4gKRoANBASpBoAACMgGlaAqNVkRn7ynkfO/uTfHEKiJhDEkvL8ysX9L395PTOLDAGiigqiQUOQEkCCnOiB+9pyafzMc+uHM5shKkstSkIGFYRCMkWe5Qi7L33jnd2bX34qy3OzsTIZH85br5vmNd/+mv/05evDpc6hDnb2ry9v9Rbw5aZZkIRJQX0IQkoM49nMGJ43MaVknQGhKkiHcwchCijArK4KLoIGzMhkRiE2MSlQTElFGVVDFKHcGHQgMTqmejZmTGj+XO+jDalJgoiGwDHz8SpdSImJoogqikJIoKC0sH0gMgMSLsgvAIQL3TKAWECzgPQoRV1MLakuFFmACUAhwnGdJwEgYAISBjYkx/4rTapW1FkiwCivjl8lkQWyFGiBxgHVRJoSelVMMA/JkRpKmVVnwLFRAC+iSExKBMSUVCRpTGlhslc4rhoRIaogoOXjJg0ydnLqZkwIoHRUhcMqRQEARVFkZEZr2TlDlonYsDHMzIaNQVQiy2wQF3Uig8jMhlCNtUiGiIgMAqBhZqvHk2gMosgGEAGBjVsUhwiQvBeFajZZjLkttPeqqoICqpAICJCsJVTyMRKBWQQDSgjgozIhGVREg+SbUAvGiE0QVDCIKQqKImIEJQO4wDQJIGgSYFoEQ6C88J7pouilC/PdorC1mBrDhc1M/6xahK+a4Bb5XBJFAFpMoonKIpMTQSJanEaLqxggqSItzjMgUQOKr7LJAbCKYiQumnEWF6+atCEiqwoIARNFUUOISKqoCimpIWJCCxBTevUAYe4Mcf6l5y811fC7fviv8vID8+rg4PLFpTuX8qODgsBWEqvIzEA4mSd0pc9t977z9a3nU6OSq8vttGmoLIw1w3bYxIScOYrT+RH6tF/NJiQn3vwocH/24iU0AMSofs5QvP2tunkKr4/w2Wc6FJsAdHp960d/8vay/drOLXd3+03nL9CtnbY9IA2hbU3C6fLSPT/5/WHFhpdutS+/sPm+N+tBOL938+qv/1w2ih1lSY5s2bYBFKOSISdBlE2nNNVkzs5pagmVWMkhcrCd7G5Vy+p6rmmlnuSRYzuHJL5WBwRRNCJllgmEcNLrNYNBd/t6L+ftVKz98HcOrR9//I/v2dmnKDESs5Uq+hDznGsIU/TFUl5apFnyDaCK31yO3/fe6b0PFhBmTz9bHLYZ6chHeO3bd97/lhstvuv6C3uzw8Hr/+K4bjNkXoG6HhcG2td/YKIX1+6/FyfXD2/t1cC9sgdV8/A9b7h8+RJ3ynvf8d7Ld17quHjp5a++/sKFK3d33/muv3zp4mU4vIp5H3tnq3Bg2HWXlijFZz77Uhus56wRt52K6axtOTSG8tXuSxO5dpQmRrAsNrorsW4OJ0eCPGrrZuaDgGfNqDjVd/2CZhE3NtZlZ9IBzERTgunRfHWtVxbZM889nwEMOnmq3ZLpSxFvDG/2Sre52e+7FPf285XwwMMnbkyuP/zYQx/7xNOSwhRSWdIDD63ioHvi3tMXn709dlm2Ke14Wsns7tF4ZXn+0F94YHJn8pmXXtxYLs+cWH3rI48MNldu3dg2JhwdTPcOjrr3dNHrhfXTrnNTfLMlcHA0DwaPGigLWnPuYFgJcwBRlU6/BKgAIChYY5QoqhoCUqFjsJuCJvUITGDIMBNkCqIpBkiQEoAiwgKFEUWjSO0TohqXpYXmEEBSSiIxJACPZJImVQygEZNhNkw2aRUDIjBCTmhVCTTGpIpAULdzYw1QUiOgABENskJS33qOohJjTKCiws5SgUq4OPmTJiGyxJyiRgpt4JQKRhBddD4F2bCtKg+IRhmTWCIidGU2ThIFBhv9rz//zpsvvZwJnFxZevIzX33pyadY8xBj0emOa8nJvfDkC9devvjBb3njxgMXLJvptAXhvN/bPRh+/snPv/DyZTuXtQhrvfzBh08ONhwbaJKfW7d+dqtj7Msv32xu3uw7S3mwG735/rTsZVk/r6omy8u2TR2y1lFsfbnc2U+z63t3pJMlB9YBRkagpJKCjzMPhGzZgFGEqFLmtuj28jwPwSOyc7mPEVSJKIqQARVgxtTqcm+pnh1duXO7bgWdm9e+TaDOzGehbtJiC+bOTq3bk6zMAKwxdng4IwYfAqVkjHEd40y+sz+c+qgEGpKKNnXdtilDpy1BwISQlz2TxWo6yovuzLfGGDP3GmQ2rpFM39iCTOmytmpndQjOJcK6ajlK5jgmim3Mu4VhcqIOBZMH72JENCChVcIFiM2nhKRgCBWFNMbUSCRm1YUvz0ZEUTWaVAAJ5VW4uyQxBjWBGABVh2BQmDSpVj7KYv2giUQNKaACYdCUjrWgQMAk4htBQVUFkaywZV5Y4RysxJBCaNu2y1wPD9vpkNaWzp06n3mcRWoQY6SMM2syFAQUTRYd1ppClFAr9PtrTrsG4jwkCWipboc8bac472bl0tLS6KDdz+CRt7xx+2vzdHPomzZWzQu//2FfXznxTd+HfKLq9Nt+ef9f/1vj9j8cPvH8Fsdems2hHl+5GLfc1ns+qDX4WRTb0cm82vMtcr176cv/6/ff87aHX/r0J+3Ogfbd/tWXTl24h6HfhlnR57TUiXjQy3Px0ERVEQZfRJl//nlhcCKK1AKHIAtDLoNBTSJYtbFTkMsoL8qT9ezuj793qTQGy6oO6cwZfuT+pC1YOrixM//cc13brVVNE2wz2//Pv1sWVPkgYMqyM7wxlaCYMMSIaAHFmtA/tdzOUhvbosxiAosiaAKgFuBb7yeBLHT6eaxrt+IKBhnNZ6msvvFv8ff+OE9D/J1/2bz45eL8+bqODCkriesqXHlu54XtZTeY7+6a8a1n//QjfVus9rVp56u97h0JvLbadHoP/3ff/MKzB+HuS/e+8V03h6OQYl7XyVfkU9+VJx6474UrV1l488SZN73nfVinT370hcPdF3NI1jpnaXFfFJNqToKEhkISXHAVQDEpIhNZoExVQWJmrSNhFZAUfZgFANUcmYitdZkxxIaobSEu6giGspgQgIkMsyAcOwOj9ykcc+vyAV+6ddG5pfX3fp376O8yw4nBoDwKzYvXlYqOdUXZGw0b6ZAWaZoOyLCjGNsmKAt3IkAzGUmnXX7NBTqzfnjr2v5kb/DaN1N7VF2+HKuGCiupIRRSQpt8mARRTC6XLCdVSQkU21ZjIFTmNDLV0ZkTZ//fPw2PvF2fug2f+lC6ewdDy2SlDZCSRFHHxurK0e0X/uU/OvtDPzi9eDE+/pXZiweuKKvt21lh2t1tScSamka73f5zB/XW3/nu4r4Hbv72Hyw98dUyhCLH02k2/vn/Hvgg+MPyQGbFCt736IWH3n5w/dL4yhWbxRNFpzFm9eR6t6oFMXN2fYAmc5UzduBmwyqFeKrbl7Wl7tLgxnh/MqltB4/SsLvajyjNvA0xTmJzsxp2i7xpJKZmbzLMypIJMnZrHdeE1I6r1VpPdpe31vsH4nWlzGNoDocwnp5fHfRN9uknLh7MUshtU6fTS92D8fRunD54bmt5uO+9N4SVgoc4C/6Rs6vDu+PIdGd/tmzt2fX+Ub2PGe9U9cmeKSwUliWHR849eLSzPZ1NLODyUufZW8/NOcv6nULTUt4d7Q63Tq642Fy5dmngmJXIdbZWTr5w88o8pfvX1iazpj8oukvl7Sv7dRsNG2Je6y8d7R0iqsQ2eQ0xKmEMsVVtrZtiLMpiNJ7dvjvcXF8SjznDxmr36kHz/N60V+Kgnxl10yBVHXcm1d3R/L5+t9/pudyfWDZzmEObSGzet1lZ3tkdlnmeOdOhUEQSRUPUtsLKURJDyAwX1tSiWX8wG9cphF7fsMFTm8uTw3r7aHprd4rXd5cMn95c+nNR0d7uBCQPjR/UkzOn3Op6sXc46nc706pW4sUAijXZH3/qiff99W/8H3/hP8hWuTdYNt/87uqPP9qP6c7l0cpTv/uab/trd+7eas+8Wc+fgctXs0geJCJ0+1nbRFY2CAQCLgNJwAxJURA1QVJEVAwJzYIWaxzNmiQPnc3/2o/tdrtdsGzBFQ7TEq++/dQv/uLhT/z4yo05hihEpnSahKuWjF2wHoE4iEIUUMXChRQYqQ6NA0OahZlkWa4qGSS0qKqEgMzJt8pKhGAIDJmVPrz9oflp5tNL0xKzZLtofBQSYM4SNpCSBgI1aIAIDKEJQdvIhFRab1lzRwmNgA/qij65FNrgslx8RE2WQJ3I7Ep46qMlDfzVdOJdb9h/5aUrv/+heyZW2soULN6nFMQaimqUKWGMwW/2+z/5I713vK0tCs6dirTNZPCt3zT/g0/O//Nv9KTGzItfxGSUIHKuURpQZYpoGBSCTyTMAMCENo5P3Hvu//UvGpt1lH3rya3ba+M7/+bnTqY5JA90jGImA6AIEYAQrJmHmt7+ttHDb3Df/I79n/vF1b2Uk5DjlNrKgXEWhI1E6Czv/Ovf4RLyFvcB2/ZopWsPxvM6mqf/9PJtKf7PD//htdvXP/S3/8aKsCAmTTElA2jYKWhMqQ0SVVyZj+a1JFzciiNRTBBJu1kxayogJUvAjKQNplA3HesUEjIRc4RYBw9eRKFRccZ2iwyTikY0SGperdJIiBBT0gWbR4QU2BCwOsOMTKoCmJLEY68VEgAisUE2CCoIyoQxiSICkKoyIIESAhtUAsQFlUZo0UpCRaBFpQeOb89Q9ZhzrJo0oSwyCAbmBYoaY1JDCAICGFEXw5x8HF/In8ELomhSgARBpRCKKKLqJUVVBSVYDCoJIy6iGgFNCqDHP2Jh+BJURjGG2VCnYwpmTWlUh+k8qiiCIByzqZMCEmdlQYaI2LEhMtZlvNjoZsfEgEjMhAREbAwhGjZIzMzEdtGloT+rFxETLmBPoAjG5Zpk4VAzeVnVDZoWwYGENsaQABWYgZEkSQJZ8KMEojOQZcQErNC2SRQB1RhixhilCdA05AMQLLgPEikCgigqYNCF/gUQhFBBEJVUjp8eUiBCNcflJRbkRTKkoAto0TE7XAkAaQFCR1A9lsQt2kegREAIiogEoiCIi8+YSGAxmwtIi41hiKDMmBmOMaUoiBiT1EKEyRJaQlxw0IHZIiAm1FbVEQWQeJwoSiRw1qQoBEi8WGgeR0WrJ5bHw3oe6YvP3r76P/3rD7z/vY9+8zvp7H3WH8L1K+PP/Glfp2urWZwnaygL7awx5x98zRwOd+ctg1OTglauyNQHj1gREJMFxTBNNSxtrB5ycbi69vq/+VOjz310fPEJPzU2E184fPe3pO//oWCywac/U3/ti1WduKBmxV8/eOUwnX4whIf66eZ/+bnRxZ2ub22RWQOVy878/f/nwWsemrdTK+eWR7f3PveJ6Qu38jnck3fEdiOgAISUbMdCUiOISV3GHD1L7C45QkJxMTZi2YsvsjS2uf+h7xl8718p97cPfuYfrN05LLVg40A1AwyWsixLqqGuml6x/OM/NTq5rv/8H6bp9PSb35De/QHbydaKc/gLP2tQFJyCWkfGmJrT0QOn2m95b7Vajj7xxf5XXh4sufqw8TTIlpaPxnvi27VBF+80qdVMcPj8JfMtj534hrfZz7Ttrae73/S221/+6vy5SbfiePvQ3H5iOYdBbtJXPtuOd5t5u/qWt22eOvnyf/zQ6MtfKESOlorstW+88sU/es+bXv/Bb3vraPuZE/uHL13+Iry0383y/QO5ebRTGw7JVKOEVT2aaWXcXPnOOEwqJu10SlcM7OOT6lqrc9HcWWJ3VB1U00kbWzTGJlFJWWa75zbe9/b3T5/+0teefcI562ZQBiaMubPzOua9zkOPPXrnzt4Sim/ro/rA1zgwS+ijOJyBXy3JLWOnl9XQHk5SYHO4d5h1O6ubq+RHnOIrzx+WZX50eWiz/MyZFVN4XrUtpSqZeZXqdpSdMGcfODUftV/b2abhrrluu52+Ee3mLu+VAcWhff7gYnHCba5tZIG2RqHXz49Gs93RXnVUWwZI4qNqSguHBgAQMxAQoQDIceArBpRUERBRg8QolpDYOEpS5uqDJ8SkCQElJEmoAqKImgiUUJBsSiAqQJySJo0RxFliIFYUiMKcGKJvY4xNEETqMoEERYqSkkhEaMG4rhOSKCKgys4SFGhAo8QmRkqKpOiMQ4uQETiQGEQkpdTGwIAYEyQxbK2juZeYQsbAbIDI+yACjLRQOzh2iwYqgSEKB7cvptOvf+r6zccee+P85vU//PRH59Pq3NJyxOBtr1P2sxNrGUO4uz2vmqvXb9Q+NXOrlg8PR0eT6vb+0WQ8P2Xw0QdOnVldfuDB84f13PbKJuPi1PKkUt+iEbnn686efw91NNy5c3H75uXYFbS+Rk+lAos1jIYR0RZOEQl4/eSAnM0ErSUTLQEzWVUgg2hUFQtrkwJhsqZwWWZdaV0OiNZm6KNIdJxHSZKFGCWEkBnb1gk1661vUN3Oq5oda2BNSix5ZlQ4RiViBQWipOglOTKCVp20CMaZjcFq40Ec9k6u7uzsCKQomhKRkkZOPsZWGKGqmg7bsjtovFg2bSspzjWCUWyTb5RqQmBq2nY0rLDXp9KKqEZF8TMQV2aUAhpTGLacdwiR2lkVAVpCTUkSpKQgUU1SElXGBTErJYxJIIEkIUNRxRAosUhEAn7V2WmQENEY8iE6QoLjGwMkcBmnNggCE6mkJnoyuWVUA2gQWLOOcRlhRE8oSsIgqCDgQDHFaSOSILQRFMQoIMxjmu0d3p3M7t08sVxsHWoSJmUHzqaQkm+J+kiI0JLAZNagawa9taLI+9bOJtfYqKUQIaTZHrWls6sBXFuPP/vk51534oQ/RMaxNRGaavuTX2lvzzcfefvqu989V6dnN8792PdWMh5d3d7ZvovRzJ7aTUeT7Ys7g82TjdFo8r0/eSHuVdQtcgv+6WeuvPClsq5WlzuHctBZmtZh2kmOit61Z1+pt2euQZuJN1ilRBCBtAikk4acSQW2CRRYUCGqMWwstPOISYo8b5MnpnlSTp3seh1iBFcbMu3li5M/fppzbKuoypbteJqytRI7OSuEJONDwSwXgukIg7At2KcmqgqDb5vMYLHkso1y9MpNZuY8Ja2km/tqtBSDM2wIUhOyWYgp3r7bnH73m2h/Em3n/u/86wcxI7G9+x+G3a/qqQ3TtYToTBQbfD+jtRMwPjh46qkwetZVlwZbeXXraDxT7Tzq3vt96cT901cOX7oWhlENrrIdMOjyoJiOLt586okqjKRTSlyVdtWBWynOj3dzmEtA11nOnJcURAFF0TnjnE3EkRwAK6rXlAATmKSUy+I6BIOcGZMMgyHAFHz0YoCobaVNZPqlABA7m+eZYA8VEiqxNZwxtZkLkoKqQ8mtdixwSpaP74jOnFifjVtZXxrd42Ryy2UbmHXaYd2pirohKWSIFa2d7LzmEe657eeeW8amnVVFvwuWgcs2xrBcTqiS6U386tU+orZQ9UZ14XolZ0JhfmQ1GpcT9SahgG98e+/r3rJ/uxn+yq+emN4xkJhcO6mcEEGqbTp6yzvf9Mu/MLVZmsbl8WW69sVwtKOaJQBkpySUWyVMjV9xrB//cvv4M0tke97ToNvWjZGakrEkmrxBypSizFa+9Zs6f/n92lt5aLO78xOfMq2FhMkW2c6eaQ6Ljo3Gzh9789kf/+vTcVy//ODwxpNtnCw5PtiZ3bo13Tusin5RWjfI3XDcLuVsDFCElZXNLudXb+zlnfxkt6t189pHH3rx+aebxqMx81pWOmXZz7jLe8P5wXhecXXqnhNHN/cZYum4l1ny7Wq3uLBc1DNpmrAfwsS3We72D+tqmpq+HY6qSd2W3XyC0iHuLfd2qkaMPHnn9j1n1ueHVTNpyZggqMBaYHetc3DYALqjo+laZmOKKcp2HdaLXqdwh/tHxcry3s7u0SieWD51YnPjyu6lWduGFHrLA0pUVc2Z02srG4M6tbPZfLnfb4O6XvdgfmSZSsgQMjaJjXv2+ctqjc1c3bQEMq9mgjKe1stLywSNog66vTNba09fvTVKsrxS3HNqMDrcP5wlKsnC7DUPbCmCnyXryBicVG0OXBo2mRk2ISls123RXellPJuOvVYrJ9bu3LiLeZ5xs7bcP5z6xIA2AwlL3axfuNIV3bIAY49G48lkFto0mdQs3oIM59W8paywOciAzfo9py/e2j9sQjVtZ2ny56KiAAqasoFt2vbGzeZw1IBmTUhZbkLSBMhJHJoxgPZ7vfOneGP+H//Nb/+t7/mh269cjcPnyhau/6d/+9Y3vHGr3JB6GjbPHT3x4iAz3LHNpCWTucykpK6g0ARfz4scUAARwAIYBi9AVtqEqCmpgDKgy9kyN19+plw/X2xs+M2NEJOC1rN5d3CifNs7/Y2PZWpZkgRPwLTAGHFCCwLedJTUyjwGBAEMrMY430YgDyU0rFHVsuM2JYkIJqWIxrI1SRKjEdX4wPmNb//2aRX73/XdZ7/9g1d+5XfrT39lXZPOPWhCTQnVOCcekFFQkJCdJXaqGrmj92zMVpcw2s7uXTMaW5en8TQrjYJPXkUxxIiOijQPH/+jtQpR/Pw//0a3y8v9HqY8JpZQI3CABIoWFYMiERiWNz4C3/yuWZFLG7idJ9VJ3az31wcnziZD0KaFlgUIkyqCKCYwhIZTElg0OBemeVFIKlHPvf0Ds1SE/RmbVORLtkb4g/82uPgEFgkCwIJhQqhJEBmQwAdg6azY2Rc+VDaHtp+Vg5yHFXiFKMPXvnHlf/iRenht/ksfxWevy/bUcn9S1cGaIAgRx/vV1uvu2Tuj157cuSvlz/6Tn/7ic4+fOb+eAhEqWk4hqkSRgKiWAVUYE4KUvaXxtAqIlfedHI02ydejoFFCXmSlc6gIzBKDZUKEBBp9TexiGwBTL3PzWubzxvZ6pDKdzfJubtk5d6wDjCIpQVysywFUVVJKhA6AQQ2BiopqEk0L6zzoog9ChIZIgXShGJPjwgehggIBqCZmSqoiuIgPFoUgOiYbqQIIHM8xgYKIagAEFYUFvwJFUNECGFAiBBWDyIisizyCFED+jDGwWKMAAqKoLjI1w5BEYhSvgKiEx6SkBaCZJC2IYcfjUbCos+LC3F6wKRxZgpRS8DKeBy+KiGZxdIhJlJCdNZl17BgUy6wktq4oFu0iYzMEXAj1FlERIhlmY6wCMjEZi8TEhMiIymxFgYgEUESREJDRKDO1IRBbCoJsRMAnn2JICiGCapKkhjEzZA1EFUeIcCwAaprQBmUDZU6WEADaJtZeQxQEAkwLFugixFZSUGFBSKKMiACUEEgiINCimsyEiwbBcfEKFAFfLfC/yjMHWNDYFgkRvYqaAgQVAgSEY5faor2nrz5edTGkiAtA04KGznicUSohGV7whpgxAURRRhIFAvU+Zs44ZhVxhq0AEDBxExIgCHESDVEN0eLgCV/FagGISKsRjDLT9ri6cumFx97/np3W1sunstMm23wlZ0+Oc2hj4zOIzgbZf6nmeZ5xRNO0yTo7bgKm5GIo2JUM1odZhAff/dbPPbOHb/2G+z/4jpuf+w35yie6htWR0Tiv0+b9bzhs6mx4ZfS5D8O01YS8NrCPvod7G8uXLi5f/vDOc1f89jzXImlmGtEkcmpQn3vdYZAlu7zZL/wQ5ah7buM101duG8nr4MtBfhjnUzBWY0c8hSyzYgsFCBOZl0WHG2Oz1WvtRN73NuqvrN+6ePLkufI1r9+pXT3vBLWGjl869RIMiFHNlEQMghBPzPq8e2Z9K19baq9dfKKPsh/CEOFeBoeMqMDRt4nZcs7BmvL824a2ys7eik9fsgZhOe+99uHD2/P1TpZvrtiNtXRlYpxZXjJm/3D4+Zfo9KPVIUDNV7/8BV+suLxTTSZMZJp2+qUnYbWc7W5jmcfTJ+6unp+nUGewf3CpPNHZE2tv3Hjs1Pr+l/5kvb+yWhR3vrZbFlzYAtTmpjwaz+eGgO1oVhcQXZ8r0SAamsqpGNeZ5b0v3NydOraOS4r3rXT2jqajFCOBR6IolkkAlnr9rZXBwc2Ls72by72uydzt4eGkjkagNDaz2fKJte3R+PBouzTc1C069QmnflrX40ntXVlcvr3bubF232mq5tP102sr93SDv7Z1vn7ju0+/7sIHvvapZ29fffnype1rkzEQxnS7W8D5RzfLdTPYcjb4KqZh1XKIRebKjSInJeWyq85ZIUxOhnuzaj4vGMZH09thSK2b7TXz3aqbd0Oo96fj1bUlh0JIeVnMZ+3xVRCFBMQgACaMgkSL9BaJFu/tmjRRYiKA0mWxTciCTqIaUFEiFcSoMWmGnBGxKisQGTEExG1qFBdQN2QEiQmRVJOPqkl8G2NiS8DEDlEVYpSoKSwIHaiE0MY2gXHGaggJYoIQJAEXCbRjLQqQQzECRlOMvm41ROdsbGIQMSZTgkSUAAFooVZwgCyoAgYwJCGCJIkISmNjSkukS2HG1fz85ubNq5dWKTmrr3n9+UcfvPDcxYuXJ6G3dNL69mtfffqeTnbP6fNkwrPPvzCf0WC5431KyZ5dX117+ILJ5hfuOTWrZM8xdze3j4agebufUuWBswge2EWeP3X52XF94JjyTiYiApDZxeC5IURAMEzIpFEIMc8zK5Akcp7n1gEgEjKbqMHYjJiTCKQ6c1lW9hYwPBEBpdzmCgHVKCITK0mU46RQ2ZLLjEAaV94LAvkU2JmkEH2svXDGzBACYEYmc92sMzyaN6nVLE/Ik7YhZ7SQmY9BMSRFZo3huPwrCQlBpJ5WCLnLbGglQCoyowDex6ghUQTCgOqbWHkfPLHXvMdFr8AYmmldLToerS0NI7FRdDli1LaJkVDZICUWktaTgrTiJZnMuG4WiTBRSokVjQAl9SFGOv6Xh0QLve6rF4J6FBIIIpm1jk3uMq/JpFg40ZQW/WhrLPLx0ycpMaVukVmjMSkzikKKQgjq42Q4t2i0lT45YGQ0bZQUJSQAxGo4K2Tn3OmVzLAY52GBMLWWMTOAGpOgh6BMk/kh23RqY6Nw664X6rSfNCVIsR1CaoRgY7VoJuLHuzerWyvcy1wvxHHwcYD53pdeHr94J0y23Wtel507lRu68A1vf/zyb/RLe+rUuhxOq1eODp79YreXt7EGyx0qGFIDbFRKw1nWXe+USfzWmls/f3Kcre0dNVk3P+G6zx7trTD5FATEFDahSU2okgxMJimGimISYTVEDrRu6sUduQgyGF8Hay0xKgEbQnJoWVIsHOdgQDG3PKljhNhbzapQpxZjaMuOayx0MlEAjsYnaVqvlOZr3XN/8X2zWjHK4IPfNJy2nXoUr+1n4/HNS5+/77u+t3PP23f+939VvXylV3R6Wx0YTZr7Hl76qz8x2+i1X/w4nLq3tkV199bSWl6cW7n7W3vZmVmOHGOopuFw1Jbn3nDy23784Df++blVtzQ5GE7aYLQigrNn4xv+YudtH5CUn+7cmR98oZsvnbpwdrq3y01qfbN/4+Xx8KbtIGMxOWqyYvPc/WdVitjivSfWNzvZLd9qNEzMREpoXU7GAls12QJzqymQCiGSgiZBSYSCSJFsIBOIkioqkKhoVOYQJSUxAMTWmpJApW1CE4MB55ABO8nUgSVZi9I30HNQZgCvjt6004Nmtp/1yhMn+t1zp8yuTPcOOYWiuxx9lbKY90u/1sUTS3P1YWO1wknRtuCcRG9ynM5x5Z3v0fUOHY7l6ZczF8nEOkq37PpJbfs5FR1bT9FHP+X5m7+p+7e/bXcJvEB6+kvNZy93RCAj23WuFko0m/mN7//H1zOs9rfz/Sr7+C+3X36+Z0uxqEht07jcRRFVyJ0ri1yAidbE1362AxhtkUOm3kd1uSm6pp1nKKPZ4bXxFItcpmO+8gzEGOYesBiPm66l1TNLYVKNlNN9b3u2buez2UZf+689O/rak0s5PrLu1pgOrt8qmaNP4xDbFCfT+crKkukU25MZppE4vLW3m1B3xvOH2tQr86zo+NYPMtPPTC8zGgJTyF0oVwbTWM3ELxVmKjOpaycRAs5mBLHcXD1z+coL8wg5EaJkTiWl3DnwGmI4v963VTPd3V4x5FudI0/HicjFOAcL7ChUfrw3KdmKyu7RlGM0s6YweBSDOGwlLXcLR6bolioSs1xafOXK1ZdvXXWOV8uiZ0xqql7Zc8zeh4P5xOV53URRtUws0M8cId2ZDPeH4/uzk6vrK0ezan1teWd7NwavpSXSlW4nNyyVmgQppat3t4Hk9HrPStzd3bl8a7foLuWWTi6tloV74uXb+3OxZVZNG0ySNN134cGXb16DIIZoVPnnd/be/egpnTVdl93em5ApHJWrg5XbYaR1qqbVmbX+qRNr/W5GKZad7uVbR8/tTMZVzSpLDpdye6rjVjrZqOkHlIPpfHtSI+AS6JnN1R7I0c7hfNr+uagoLnxhCRFMjAiNVW055w7zvE6xSaiomnr9/Od+6Rde9/437D/94ve//03ni83Xfu+PPHX9p2fbHu40X/uZH7ab59/46HtOVUcVAivkjLCUU0vgW9e3tyU78yPfY+/ZaPd3bn7lUnz5xn1hZK2BAAiMC29ShBgiCDkVuLU3/uf/1p48T4++Lv/LfyOd6NWdWVixI+PW3vqO+a99JCO0ma0n3mYIBitp6pWyv7aidaTZhIJQaZ3a1CaVRCzMOMMm9h2s93h5w2kPbtyl8RFJQiARTU3kIpNZJf1i/qbXH/m0Wc3p3s2JO3X2//sPrlT/sHr8Umkh+agi7FCO5VCKCtEHQ6QKpihmWHTOvyW77+zS5lJ9+flw8Xr0Mdg9GY84NBCTCKagpBpAyXtWB4nz5JoGqSyg6OnkEJMSGWtZJHKMxjAhzzt28J1/cZblAmxUY4gp2c6kkt/5jfbDf7g8mUAQBXJIqGQQQAm8RFFyiIRKGkMgw6QCoiBAxAdPfKH7ne8bbyzxYIDj2nz09yb/7f8cEECIgArEoAkMoY8gAgagyyGJJdedoP+1P3KnTsLdFsQAcQOw9qP/6PDBR23/m9ce+9Eb73qz1diCzolDon5hOrmbetxtzYeeuRS2VvxuuPXyxfWVvh0U+3crQCbVnFGUogRrDBJJQhaEppXpJKYgCIiGmDLLwdchKiNqkjyH+XSe9YrlvPA+JB8NUEE5JADgkKSto/qU2SyCRQSbG0RNbVu9yngPKYGyqgKKKgpiYlZEJmAEFJFXdVjh1SePdYFlPyYpq5KIHrviASSBJmBHhCiiBIuZs2MPepJ0bFHHBbJYF7NmC4U8vPpDFi0UJWQmAM0UhEhFUcEQeDlmXBPRYnzsVYwzgoIkWRh0wjFkClTlGGOtr2ZKCz4OLX7TMT0HQYmYEBVJDRhnMkOsKKLzNnnR8Cq0mxEBwDAiqrPGOkfOOXK5zY1zWae7mEGzzhGxLmRnAMQGEJmZiIkMMdOrnSNAWkCxFRCRRCHGyGwUlJREhY3zMemiNsS0YKxr0rCgjIt2HSArABgFQ6SqoUkiEsJiEQEMGJLEIBIRZSF9kwSQVB0pARKi4OLpwiTkBaxBVSEAIBIhBUVF1GP6bZJEi9fvOHg5flc4Du4UFqhtOCagH38dERcAK9VjQ1yStOgQ0YJrhHDMvwYAhZSOEUiIkNKi87WYqV8cGUZRQiAkJW1FCaREtMwEEkUVYcm5BNrGFA14kSgQRAyAtQzHiyS4uXtElM1TONtz59c2Hnr7w0cHV1YePLWzc2P68uVzeT2zGhIVxBqRkZdWB0W/Pz6s6ymiAaegHlGwCoFMEVqJiiZR7lgCbm4Nste+Ef2RvvR419uiUzaQMClzMWuNO6jqj/1678bNoqB2dSn/hreNypP9F/4EP/G7sHPYKZex7I6SMd3CpMa2vmgl+/R/WzZ3w8sv7V+8tTSp1u+973B3lvX6pjQ2ZDvV7MqFjbf9i1/Y+cQnw6/8ypozBuNOnhcf/Ab3jocmz79yz+7eS594tvvDf3PwA39Ln7q0dv2f+E/8Qf74V5Yee1sa3L9k+6ndDyJAkY3MNAVrVKsSIcsSsEpWGFieDefjesrTVH/5a+vv/k5bbifDEkzWLbh0LTc2xhIhXLo2+rl/2nl4fWs5h253Pqx00MH7T03qSW9e7V7c3pgflKvc1FD0C2qa9PHPL701Tm/sQkhrG/1sc23+Ynd483DQcejIdfPD3f1zF05sb7djs3bybe9JN250N3JIcTistl737tLP6nkDlR5tj67PDgmKpU4eg001HE2OOgVzkK3l4si3VWjzQX7jdn2kFtr25MrSTSi/ejgeGSeGfOuJ4v7Qz+apjhCFXJHlGRtjY90M8vjaLTva3XXdzqROV3YOudtNCjkhIZbWiUhnYLFYOrh9Z2Olzxk47d+4edsBLBW5R5gdTG9+7RW8O+737N7ssPbD8Wz/wXPrWO89/dJk/XXnTj9279F/+Kj4CbAdjWaTg/T8l8aUx7VTWbkaT15YXjlRNE1oYms6qWOyMNbhtb3clsZQttnbWC2zk8X0aLK2tObExRlsdpbrjdWbt6aSFd0OCgAzxDb6CiwdL5ITKKtIQkCUCIhgURWYBRY9Tj7WHTIhsRoyGSgaNglSbFtgjqBgxBoyEgnJkmG2AhxiZADLTlDYGEKNKpmhheegCRGDpCQkaMlqSMIURYMXdGiY1DlBRgBrMkiRIZKRgIJsDRRRCRQVDEJK4lPj1ftYt9IKK0ASFjh2zlpKTFlRhFSn1scU2Bkim1JKGg07PJ5+1ZSSJcuGLr2yu3/whf7yysZG98E3PsL2XLButDcSu/XYN7727L335vPdEycGn/vYn0rdXnjw3OrZ+/velNS4IhvujnPmLLOd3mA3hbQ8MG6g81bIFd11E+PsYH50MCL1dRhidy5xWihllixbWchNLRMggho2CmKNAUEqyDjHgIyAphBNmcuSJGdcUmXOiAwTiwREyFxOllGA2SCqKGRZJoIpagoR0RkkNQ6INaQkiSkYS51eijSb1qPKh7pR9RgjErAKJZFKQk5MOVV1jSrNtFrKOrnrTetpaTrz8dHBwTQlZWZRAYhtDEkyZFVEm3EVPLeLEmwymbN5ISERYpJkHJnMgKOpj5O6SQmMul5WlMYmzGIM6gVFtfXCxKUFJUDMbJa5FJIGEQLO0IKEIBpVUHmh3yPRgqjSaBRVMXlxxjYxskEkQwoSjiumGZnFqHtKGiBlzjljCMkBdthGAorQAkQBQiJEw9yyIIgtnClcmZs6NpFhoa92TJqirwIYsAlVPaNYMHWUCDTX5CyRzaekh9Obp9ZX2xhmwoIl0yCxNoqm6BrtpNmYHIAfz2qdNCsnl1YzTBpk3jTOGmeUVWIazaPjbLB+5vV3XvqKp7DZhdVTa7s35ymISybMq5d/7483X37uxGMXektLyyfW7D1nL88b5Dpf6Y52xlmnGE7qrMikCep8S6b2GivPJaw9tL4/vtNxdqOPFz/6m2vfeKqzsVnvTsZPfQT6UlOkCjM2znsEcGhTkoTiY8vGsWXmAKSNRL/ZhXu2xlf3uQ4yrftsYsDZtFnp5gnBawpRoTAm+jLnpmpnoPUS+CCDnMLcL5V5r8wP5y10yqapUJE4aQm7Bi78wPff94G/CifOZZM5jg+hLDubzq0tw7sBICznGKa+qU33r/x4/dxzna0L8PxzcPNJeOTB8MCbm9Hcnnhk7ev+0mT7CI4mrW3Htw7c6gNiV30ofDOrYmXI6v7B9NITS109evKp6ZGYTv/w8LD/xgvZa7/dP/ieKuXo4/Lp+1xnNL/1UtKGXLlcdo9u33rl5otXD7e/6Q0f/PLTL8dmNmM9+eB6kWcq7fpW//3vfuPn/ugzK90+EVljDBOTIcps1uOio6llCIIVAkIUVEjBk1oQRV7ca4m1hggJEQElaRvFMrrWGwtkDHNWdApPWJiIYJ1lSFJmWLQ2KpRW+pmxoSmssa/Wil556crZ977jzuW7zRe+1G+FogrBUd3M2z1bm95SFmTC08v141dyoPXU7XXug/WBjnZNnFUH+2fe+o7ysUddAULDkdz04x2spwSFKOehAS5FrGLmbJJ8jHItXwbsgAC0S+vJk+3kCR0YAtcwMs7j0RN/cvLbf7DcWsl5MNybF/lAAQMmNGQdAXhBMp0CIlR1wJWB5t0AMx30QY3OK+OT1CFhGQE0UdAIaHTncKksabpPF++SLTCXnc5W5y/9teGf/DpMjrLRaK4uO3tfdiqfx7bprD704z/W/dCvvfDElQJgZaO3O57kmQmxVUMqNFjtEztDMKuavMhb42dtM+gNNlxxeqv/+a8+NVhefcfrHrl+63rZza5e2w6EsRXyctqtHk0PyBGq5p2smk4JyBEZMoUtJ8NpSNoanaJ6MEWBw7Ya1kFLaqvWAQJCXU3KznKv15mMjuZt083z1X5ZJ19pWu7lJWLwaRojcJbl9urtvfWtdWfriHhzOj9njCMyVpVk/USnSvbK89uC1hgTWg9Mp/pdaaHs9rsrq8/duRMUXZaNprNpLTk4SWANDEN64E33pjpFgs1Tqy+9eHmtKGx0nTyzhARu+85wkNn1jdVJ3Vpjs6glkUNbN2SzlUlDfnd2f3fr7sEUC1OC2l7pLU9Gs4m2N0fbayv5oGJ2ZrdKu5P5dC5+GhDVqu2l+IZzJ3vd7J6yY88Zoyk2s7aVo4MwGjfT2XBuglHoGG4iBKRDL/v1aLXK2ioRYJ2SMiFifTR/ZMtunRy8OE0zARj936IitmINMIkgJIVJ22pqB3lBqJa4UQltMqyiNHfxJ//hT3zqD37pD/+3j6mEh+wSnXqgaa/xaD449HDr0hN/ePUtb3xsqHB6q6x2JgaBuaQgN6Le++GPjHrr8+XVoZ8OfrQ5U92efO8PtHsN+eSr1hEbSwSigEoEaCHgIHm4/ArcugY3bst7vqPzze+se5ELv/2FZ1ZNCQlB1GUZxNT4ducdb934Vz8z8XX9+WeKj/7mqovw4l0Ek1kAYPRNirF597vlx/9ydeZeylfwqUv8H3+dR4dlbCQhc0fapJlQZhpDy+/9jubuJT68medvqTFrgu2/713t81fLoDmyD5RSJIxKGWAiQCA2TKmNKrLkQnjiU/4FM7pwtnzve+KD96tBu7enH/kk3B5RDMxGEDWJSNI2kkVkSKEiR1JhCBGrigWjABOrb9mgKAQjzamTbmM9Nt4QcIouy2ydwhNPH/yHX9kqB7AAKjgXQzCgIAIaoSQjCqipqsFmRLaNkgFyjIAMrKt3Xqx+9ee3vuuv6/Lm+MO/efDb/2190IFQQ70wZSkQQlJgAlCgdrI+sN/6HdM/+sTKrZlrEZ69AcCLNCUbhPGlz+o3PXZtOlyBbPNH/trVX/p3q84u97KdYciN9lboyu36iVduP/AD3/Cpuy/9i3/xiz/xvT+4atfcPCEda+uRIEiMAooL1BKISK87MOi8TzFEwmid8W10JvMaomoSyRT63eU2eMocSEIVUjVEZZZP08RmDlVTjLPYZDEPUXtlh1Cmvmn9cW66oMsQEyAwoDFkiIlQAZKAoiZRQUwgcLzgV4XFlIEgLqpIiy1JejVTAhFIAkSLOBFUAQUSHkvQj/vcisiw2CxdhEcACCL0qkxrAUUCAEOYKcRFpwkXjRMAXHBu1BAuXGyyyINUCI4DIgEI8mckIoWkxzkUvvqHH4+80WLejRUXPjc27Bw6RlIJrTReay+KyBYZQRIKgIgyoUY1xpRlaYpSIxg2Li+yTulczszGOGaDzIhAzAsUDxEhErAxxhwPsjEvDmoRv9Gi/xJaRWS2qNC2lUGriRCA2SpSFABZBGYkAkGhBhUVZ8kYUoUkkATaIFGUmWKUplkUrJQAHC0Mb6qAqiiASYBAmRb4BiDklBKiWEIUWXRMCBbnBLya9nFUJSRUpf9bRoQKx8IzePUll//rXFM9risBACgI6KvfAQnUAKgIEbGhRd608LAZIgAUEQCMSRe0qkVNjEEza1DEJwgqhoAAUozOsGNeBEoJMOFCBY4hiagQgqj5v85ehE7hcmvf/Oi51cJJ0X36hZf7RzcSz1ZPrU0Ko1snhTqzg53zg6Vw/e50OJvFZiF+c0RtHWIQD5jbMoBFDRBSVpQs4ZXPfaWzVmx/5ld7D61uhnnlkwSZNZ6tmK2Njc3l9s4zze2rGFvl0m+ccoMT13/7l9fDwemsFFv6OtWtwjKsnM3D9jTntETN9Df//RJFaxFToa6zd2uXjK1DQy0ZJuxlfN9bx3rP/JXdpWqakZ+WvPqP//ud9QsxjQZHX4TLLzxohrOP/a90+b8Mv7gNHXDdnoNp9dHf6Z5/QKd7YhGQk0hLMvq6t/T/wjv3//gP3SvX7Cw1OdG5JbM8LU70w9XZiZP9yR/99uClJ6YvPL2c20SmqdvkG5AFtMotO+7XDdzd33lq2OWsPL12aGznzKn56O7Eu7XzF4Yf/93oDIrZmzSpbUxb+aNrvfd/Q7FaXH7l87DzfGd94Lac63TmDfv7T2+cfR3v3ODpdDj3K60PzWxnp7lnqYwB8MqLh9O5VDWIqYcxYmZ7xTwSszus69uVYNsuO3vpzngeMBWDa7emd73pd93pjU6F2VfvjHeRvCKLQoyCGnJdXe12wM19rBQJBDE+fE93q9TbL16cTKWe062DyqMrkFZ6XZaEjdTj4FZyFZukeOD+R1e4t314DZ1rQ5AoSpyiApi9nakEeujBMwc3Z3WsGZHLydKp4mA6qYZ+fnv0xvdstrMT1y4dbDbu2ou7QVI7x1uvNHpLr7906MDbLOMyG86OVosBqdnbPmKdMlDku9TF8w+uMMOJsxsrJ1eOpgfjqZ9FjH2Qtl6x9vBo1OlmkmII0rTHHsAkEokkJkEgw0RICgoRhQyQCjKZJCAxMjsCIMqMNa1vBJNBVVDH4DEhGxU2NifCCCCgyugbAUCGhRkVRSGQpBQ9ShvFiBpAI4AhJaJ5G8gcI+7JkhoSVVIwiqCoISUAYDRsFNAQRcEoAigikSWmJG0VUVzwoqqiWlgTU6qktkVpDIHh0CKxiSlKipadERZRQLNwNjKRSELBJuj+9ujg9sF+zw66a2941zs6PWdTx6xtTvKVvd12xZSz1nK/f/bBC0W/56MklHEVHZA6Z3qFV5yOR7Yotvd2fBoh+CLnHrsSqMw6myf7s/G16eTQWc5M7pAIEzMTADIaJlrsESGJRiaLxiChsY6BRKJ1ThWQCdArMYGiMdZkIEk0Otc1tiRTMFKWlU0zQ1WyNvlAlnMyoAYgoWpMohoQqdMdcBAVNwmwt79bJ9vUnpJam6ESigFIzmiOWCDX3oc2LRUrFizEWBaZpqYe1e3+ONVAYkKUIBQSEIJzZIEVtcgdCqJixsYQGNIEOG9jllk2Yh3HEGdHTTX3qWFyjfqCskzYZP1OHAUJQRF8TC1ykedJgAwba7BBRcBFcdhw8F5IRROoWDGSRJRMAgnJWMMAmsAC2UUnViX64z4FiZKKiMYkbDgoLobYrKEuWkkQRRIIGuTjAXphVgKk3EZrxZisUGmDtAlhYUmlJqUYUwkUGEvDFiBESQqOCRUMavBpNpvtayTXBZuRaMUpcyVzAYzJJ6HQhEloZjnhcHe7ly2v9s50pA/skhwm9UiafBWTJDCHWpx48wdweHD36PKMiu6Z8+ngoJnMGJwTs/309b1L2w88sOWWV8uyk2x243Cet82cE2HyXZ1i2zbtOLqYQjejpYzirL5966i4cP7Cd/zwk7/2/1tpDuef/KV7vv6tt4aj0eT61/8v/+wLtxqu0+6nPnXmxWc7c68xAJBq1VpIZZsXrhmFvABdXTnxoz89PvfwRhUnH/m17T/8WCbKiEWeRcSudd7Xzhq7XGYURsO531y7uX56+V1vN4OV9tpLxc3b9bWr3bI99fpH4ML923/w+25cVW3cXd985J/9Yvboe1rMNUTuD1y3BAmQOi2VdZh1O/00n5Gxkid64xsHb/5GaBCczO58dm/v5U3jdbztxjf0Tz/UOdyePfN4hKLem6wuLQ/3bh79158/eUrW46HfuxHH9fTxZ90gDe9O3NYGbJ2ISxeK7/7vqHyYyA6PdlfXN1zH+rbbgPO+5WTa4fSTH/vQjWtPH1WzD3/0s2urKyeLcvfo4PoXPnXzxnYq7ed+df7ysy+vLnUgoIpqUrZsM5eXS6ZcDZRxmKd2RGRYIcYkqm3kJgJHQkSnJFAC1sBGMImKIQNCEaD1ArPWWA8U2FhncjJKiGScVdYUSwuilDE6Sz2bOU3HqxGA+MKVolxzf/Th8oWv6rCdh+7FztLgx76/dHH0ocdh9+YqpBCNrfIw2ldbhdU+UGadNaksCP3uncnH/lMrIzfN1mNfE1G+lJog1ZApprryldFQgWkJ2/4zf3zrB/4Uzqwc7u3d+0rbtUshxYSprRuWiA421jv2j/4rzr44b9q61dVg226vHo1UomFkYyyn2KajJi5j1enlTRrHYQWcpOzCYKs9uF0MxyYT1BoiESCavOOKNxztVz/243rj+kp0Ure3yXT/9k9dW+nSV6A3anpdt8TZy1/4xL1v/0bX3I47d+8Od+6+eNVb1z+1fG13bxIStTHMq3NnTwwnszqE9f7K7sGdpq0D+H6enz+9Ppm1mabr126dP3d6f95+6ukXkgbajQiQk2ubZqnv5uO92ATDPGkTWzSm04awtLZBrZnOYl5XrsC+M8AqKK954OyVq9cUWyQ+sdVJEubifWYsZvW8bYK0VYzabnUsHFQKlKXEXbc/Hi8VvcNJbBAGva4F4wWiaIqQjaqOqO32hDVk06eu3W18ZSxhTu28zgeD0rosNxvrvQiNF5857nfcwUTQ8sjSm15zX72zbw9bHNcAto6RTb2+spqnmHPBkBpNcx/K5cL7GAyL4c7K2nD36PCoOX9iaVS30xpWC3v+wla3Y6/dmJkC3vDA2tde3O11O6bs5ppXPji2LnfdwkpPN0+sHh4c5AQY4qnN1XOrG7Hx0xBHo1kb0uGw6nfy2bwBtGvrK488uPm55758otPbG82cgU5uh1UbhA8bZbKhTU2UE2vZ2vrqK69se8juXh9O7oyy5aU/1yqSlFQ1eEXDzhIaBs6SJIeUZVnTpGiUmVPSuYdLu9t/52/843/13T/6+H/7+PU1mHXW6zN+SS5V23B6a9B95ztH66cv/MAPweb69F/+THHtOmdulqezP/I9264XyhXyYauLcOuFT/7Ej77x4sQoBMOGWdQErxYFco4JEqpDhSTABI2Hp/40XLtir34pO7/WW+uNP/UpF0A0RWFjCTM5srj69//23vI5MzzkM/f2/sq3Q3UI7ktw8YhCTE1gMZVxSz/807de9wjO4/p4Mv3q48uk2Zlzceeua5MCcWFFBI0UsYLP/WE2HvH9GzgZ5x0KIbqts7rccRrbgwazEpNPQSFUikDMoKQpEsTU1FZFD33W69fza0vf+h3NqotF6VZP4Jee1p1X2CZQQEspipBgF8UkRpR5DVMBkGStdRa8ahsTJk0i7IxxtXr7+kfgzJaKsKE2JYTQfOETzW//yoksA0kKqowIRAqAAjYCBx+TQwtE3CnUa0qaZ1ZSC4ygCpJQtfMnX6m++FTb1KVv83wJhmPIFBjIWghRYiC0AAyhaTqY/+iPzt73nd3+vTs/9/MnSGHgIBgAgeixheG/+3fn/+I7hg+8+WjJDP7eD+BXPz185qWVQ7Vg6llqeVZ17GhYfMs7vnP56J7/7f/4+R/8Oz/w5rd94J//Dz/bG+RNK20djHVRBFLSpMF7BUZCEQixbtqaCCzTdDI3pkOmADQ++sw4L0jQOmdmPkgCQldkdjwfKykSOmerpiWgMuuOZ20qnVDwkgwTIr96FSiqEoJldIjOUu7YAoBqEBEFUW1SCou4QAFQ04Ijs1DQAOgCa02gqEQQlYIACCBDNMSkgAuQpMox13qRIhxnRIvtYFUFpEXbZDGQhkiiaggMMiJgFFAkZNZEkgQWfMRjhjIiJoAECEp6HGQAAAgAI4rq8QyTHkva8DiYWsCOFxkNACISqSG2VGTGEKCKKMxD9Au5IC2w8cfRAwAwMzPbLMs6XRCgBCbLbJaZPHMuN+wM8wJURLQYumLiRYOHiAwRLGqFrx4CAgAjJF3UcZDZiSgAqmioJgDJOvY+CiRmRlzwnzGK1gFChCwugNsAgDEJ44IXAkmx9WqURNSyEqglePWhqoIeVAEMgiLyq5Y6VQJUICVUQSEgMiYBQIIkx7NmixEyhYVNjJBRj6lP8Gdp0cKBttjyQpQ/m/xKSRagqQXinACZFtDqBQIbECnpotyGispISQUQCVEkMVJGaECNKiA4ggAQFKKoBR0gOMIFgpuVnEFRJAIJuigxhRDp1cyqWxaDEt782ntywNl89sKLF21ZtLvVysnSk42rKybPbtdp6fyjB4XTe891ox6+cGX21MWe5WUALm3bthiB1GSZbWPSrt3xsgRaLuV7s/rcOx8s3/n67cc/Xkauo2+SAGnav4uP/wZffRZ2DkPLNatPZu8Tf3IB2uATFdauutHBHO9Z5vu2Dl54ZhBi22iQZJERISVSAoGU5wSUQmwdswPYCKPmC789evkL98xGg63O/q7Xk2f8xmPZZG/9hc/Vv/dZsIWvrAWYP7HdfWxr/sH3Dl++Ve0erD7w5vTiy/V4bgBYBESak+tn/h8/s7+0fOq+N89+8af16pH0TnTNqszv+uFRp+JEKDt7eDDcMLWHmDBlhZtPZsVybtcGO3uzte7AT+q4HWxjJdPp3pEt8uHv/e7ma87srj9q73nLnZUvdbvY8/Hw5etLyxn5+vDuNr13eTToTx8fbvQzWjrVfXC9a7tXP/PK8htWh931u/tf3jrzGmg2Sukdjm+tbPSmR7Hs22p/Ntwd2l4WPBjgKNCqGc+kGY2Ohg0MeilzT+7PKMvmVF4ftnm+HLOsKszT4+mV0XhiXBJFTWW3SAxV4+9O/SDCO1574XByeGdW7x3Nq0ndydTMpJ2EKhXzKpLNYkptIMXUzRkSLq+sv+nd73/i+c93bKlttlc3zbxXrJ6/99Hlnf3d+f6R87Myd20rB3vjLx9VEWhlsxgNZ3inmUm1ejbPrQ76LfjWrvfsMMSpPPiGFVWzv9fubFcZFjBDP2+CYpG7ze7Zngz29m4P8iWDtmMKtEoW8YAPpkfbV195rmtPbfUv3HcSq/nWg1svPP5CFbT13jlX9ovRaFaUDqAGgBCT0iKgJyYAECEMUUiBUAgABGihhUpJidFYRFdYF8UnYyUF1GQBRdXaLCIKYFD1EmKMQGiQiTChkmFAiil4jbVPISSjJDGJaITFGU4oKapYy50yI2cFhSSiakoJhZgsiWIAlYiYmFgJ0UlKEOdtXafUGkxZiiAxMSqSTdFriswxeBWBiISAogqIOR7PuaZX1Z6Ki1+VACAoENBwFj/6ex9/+mtfuf+ee5Z7y2uveWxztddZXotNdenuXqPtA/edXz97741btyNG3/bGR7OD/bu3D/ebpm6q8NADb1/ubW3H5sSZVZ+aLC9Ngs3lc5df+pPabptlo0wWaLE3ZY1RAGIgIgRlMoBoyBljCIyoWHKMrOycyVICw0DkmFhViZxjB+SNsXxsUkJrnXNZ3c4USFQ5cyiEbBQ42iSoGoItS/AKamxEwvLW/nw6JQEmcESgQtELSSQDmbWsGr23hhuNoQ0I4MqsCk2kFGKNbCVK8BKDIrmMWJMkFc5sikl8co6YMDfMRqKvVQwCRUnGuYAY2zQb+5goBoBa9vfrrDOAPLfGUgPV4Sg2bbdvFFBIkmBSTaCCQLjYEUpJEzAJqQKi4yopJWgbrwoMGGMwjETcLbIitzEE7z3oqwNoCqRAi/MgpbaNkDvCRXVNMuKOtYopODKIPqm0ygZsRlnPcWHBoBGLpAhKSj4qyEICYtoYMAGAIDGgGMQuGS+SWVaRiDCpG2l8kXecAJo5AERjiftsyDgrkxjmh5O2WlriQz9xxjkqMlqTNgiyMYJxGOoggDG1M1sMTr7W9teevfK1+zeK848+xKsHh8/vJa/GdMcHzUuT7bw47GwU7LjVRtey/oOnlM3a0kr35CBo1dR+tDuaPXk5m9VFALcTTUEvPX/z4W/9B9MP//LVrzxx5fBa/sCj/TNfd7t8w8m3r7Ck+97w2hf/5nedKXupiYdgB9/w/oe/6wMh6zhnhl/72vjF59ZPbOb3vKbqbWAJ57/jb7a1P/r0J4ixYNPM6jxXx5gEw34yORZZLwzW3vcTf383L4qVTffmr8ujzD72+5PP/f6y9qrBGf66r59+/I+b5fvf/E//j/Dgg1FtgRATkst15ODaS27t3rpYs2UpMcTty7uXnzn/9d8jmQlR4lJB7/xAd+u0XV9OaXl5aXjrq5/Zvz3p5zZNZ2GeVu69l4zjK9f1c0/rplbYJhP4xGn7wKPDrs3vTztH862H75tP91y+efvZJ4q1e5c6PozvCpPN1s8/8DYsBsGb51585td/679srPaOhn7a7vZ7ZjMPTBRiYnLDUVQf7ELRCImIEySTmawsXXfV9DYRMpnsU6zYGfFATMo2qq3VkDoQXlhBoiRRJgYriEAJOIqGhJygrduMW0XO2OUujxoAkJSdtSW3JmNDakBIU5axf9UDmF/+8pMf+q9biZY6LLWOi+wN//PPyBtPO03n3vetze98aP83f6OcpMB1TGydkflQ1UhU5wxoLKqJuaYDo9gqpRTEE5M2Ffo5ZBm4DmZ90eSldZIGhO7aXrx087RSiT3BLMVkDZI1LNE4QtNbq3n/V57qnz1NhcdOlm9siWhq5ggETVQKabmvH/wruy9+fumlS6VFDA2F1nVbqdpaU8yNgFhDEhIE9ZigKLNG0ud3OGHi2YTarb/7XYevPbnaWd2rq3qv7hWGoV797f9Sv/wReziFdnowU1peP6yrpfWTO+O5y2xhqVVpfCAlCMSptFySUeuyNoiPvg31hXOnBs498fKNpo7gMGrSFMquoyytncgJsU0RmUEUKWlUUWWLDaq3xrvi9IX7Xvza9fVBPmtb1rScmaqqkFxb0ww12DBvogG2OVSp7ZVuhrpfN6dL+/D9y09cOqjmstTl1QKWVgd+fpBiytB1cutAgEzR6+2OqtOZXVnumoH5va++1HrpWOdZukXRKfsZ2bqJy8t9o3D77gGSbds0DLPclXUTc4id0S7Ws9Mba01Mu9N2PbNSR01kIGvrkGfs8v7NvVtlJ3OZ3T6aRsH2YBxjOn9i6fS5pRsv3e2W+I6H102SZ6/don7vaDq9entsgSj6e0+u3dobqmJM6gEPdo9Ont3UBBBo62QfgjdR947GuwfTrFOAwlxCCzBuQlvVQetr06MvXX2x1yuKmDCpCtQpdpCKvnHGCtF47ldcNyS4uz/KmO4ejR69cGY292Mf/lxUZJxJHpAViUWAEhrklKROEqIXRRGIIIBaOE6Cn3jqyS9+8923fee3/+r/5+fe8f3f+G3f9j3P/O8/o5eO7g7H5fJS8dY3vxQOv+7Esj196vDZ62e/+4ea3/zl0VOXVt/4vPX76fGP3P21fw+X45sbsB4iO7QYJGRAxjAIIqYFL9ersgKnBTOWy9EhfPjD/cI10S8lhswgGWTS5IOksLHGy2ftXnVif7/+6vNuI5epjb01OKvu6m1DCaqYG5IbL29u0fhD/6n97Ke7dXCv/Xrzrg8efOK3ujevmBgXwzvQJEw6/s3fWv7L34kPPjq7ctWdPTsZzgUEO0vNzqHpll5S03qjBBKJQAUgpeBry5wIs36/gJmwNSQwGsn6xt50toxLZQgQEhOnkARBJJClyAqWUkimcCJCBBCqGIMTQSDjMg8tCEpKjURT9usIbYKglJmiN5mlx7+Ih0MUbttIhsAniN7aDGILlKq33w9vf0Pzhefd05fyeY1kTQJIoIhRiQGOJd2tlBBLtpATcIKUQBBIQQKAECP4BEDgePb/p+q/oy3bsvJOcJq11jbnnHuuC2+ef/lsekeSpLAChJNwEioQDUgCSVVyrZaqq1qqaokhpK6SQJiSUKlkEQIhrABBQpLeZ76XL583YV746+89Zpu11pyz/9g3kmLEPxEjRsQ925y91/rm9/2+83X1xieaFiePvntvGuqd2UpVQNMDE6iB8L0AV/7CD1z4iZ+5fOYNN2T82cvdn1hdr1ga08k9oy9cu7n2pntvf+D1xfMf+PwznzqYxHd/04/85E/8BxmXReFj35okQe+IU1ZTQ0T03Papm8+O2kWfk4BKhpI9o6VmURa+LILzziQ3uZn6lRXvomGTes1WhCIJFOz3jmaouDGZNlnKcemcEZB2Uk9KXxXH3wJGBvKonrD2zIiMSoCiFmXgEw0VyIOnSIc+rGPmaTQiRGakgXWNogoKjABi7EBNUUyPK9EBBxcLABriUFJvdtxjBkNZPSDg8RkA88yDsOSJMyoDARihMcDQw844mIsGeQLJYOhbQxv8IsdhprvZMyQEAgQc0nBDQgoVdMC6MoJnAoelh5INRRSs7wfkNxUORbNjEsUEmLOhAasG730oyBVFKCAKOcfOh1CGsgqucOwBkYhoWMczE/FA60AcHFR83CiPhAimNmzGGIaeC2cpOmYFaLvOjEPhYlIjygBf6hYbLooCZh1wTUA44MdB8/AoITHMYEhDKtDcsWSnHsEGBxjAkCwjNMXj9noxNMjsaSilO64nG5jvqMfEn+Mk4HBWDf/QDgQAxnR8dcDMEGgY1wPdJUof/59E+KV/SIhZBwOaDqJkAjAzIjCTIc/mEB0hmQ5nM4sCIJoMhw5qfcqFw+BITZHIzKJKQSyE0dDAAEHvztDWV8en1+vt7RmgOoY6hMhSrY3HJ0906CFgOSlHU2IeSXBuWs44HJWT1ccfC3f2b73/o5u5gZgqzxh7k8QmvbrMvEwpOwtTbi4/n+GIEhN4RKwDm8sau5u//eFCsytLLdzJx9+0pd363uGS9h/87j+/87EvLOY7V5yv3vG+s08+0H3205gZkVNKiCRgPoScbaimQwD2QWLOQp74wRD6rT1KfUtV4So+2Jdf+LH+6U9XM9gIk7hkYmaXo8FW9XD55T92+NbZBPembXfnr/zpij2RsqFFbZgKCbdf3drY3SsO/Khahfmh/PTf5vnlE4divkbwowK0y33tqPAJdJEaKLAs8WZ/tPat7+sbSa9dkVs7w2PBTBc5Fbev0+I1Lp6h7WsX7bC7s9V3oe9yBp2ulqD59tVL1aNvTotm63Bv49wmHPT1lMZT6vq9FasOrx+ce/zWW8bx6Nd+Qm5d4bmA8OF+PGgjY5g3XrNxzGNn3c5CAJKQC+HGopu10Ad3pGnG8WDsJiGkBFvLtkmy7Qo3CmOH3WIZU5szdokchlmrn/jccwo6M0hGRT1qG9zu5mc3x7WVfU7OxOesnWTT2Dtv7uJDq0fLK2ur9Tj7+WxRFMXJE2dCvXppb/fUqentw535Mi37XI1HEsEgdb3cud2BEOn4hY9F9j3K3sVz4zOn65MPlo8/trG/s+wOZnu7R+fvqSnj7ZsH0+kYNOWUHce+R4Hm5MXp7du73SKdeaA6fbqKBr3hwVYahyLvy9Wdwxc/uzPeHHE4qELhVsL5MxdjK9dv7uUkhTteEcUoyZQQ/FB1SizDA8oGhcHUTECBHTEDAIEDBCByAAACkIYqVgAgICBMqoDQqoiZJyIwQEBGQwUTNYkxgaE/1qYZHIiiZFU0Ey3HBVUBCodkYNbGGIB8KCGRCOQsiCY5ac6+CDx09eWcFr20aJFEI7FHEAeAMQZHkk26GDj0Q3WkSM7gEZVkcBMhWgRQwKhqokggWTwzuSEvzbdvH2xf3ysnK9/xyFuD94d7Nz7x/g899YnPKqSf/Xf/cfPkqZ2DGVA2UBF44PzZEyfrsqr9dPzUs5976fatOc+/82u/+eRkOo9bT1167t9efnl9gl/7Fe/OCdWUkBXQMSMDI3rvB18XAmUTdmHI4xuYc8zEMGR2Sck5T4EoqEQzA1NE9K4IRQlKwAHJqVnw9eDHLItKooiBsSu4MFTByMhWMSgsFjFFibkHBhRFE6Yh7g2Dnzipy9lS23sqkqSYJJnFwyOBLkyKer04SkswoWyo5tgBQMyqaEJqYNkyq4t9AtRq5IfFRVGwC1TVwUzArCh8YiE2FE2LxsWOi2LWpb6JlgQDJc1AllV7UVRDZmbSnFRyTKIKQENpKqipxJx7Sz0YIHoHpujYeyqYAzOA5Zy/VLHhCdmQCRmUzApEAkuSnDGIMkJFyL5YWuTAmDVi9p6Dg3oFRzVylMKRlqytJAQTFDNGtJQdgAdEHWzZw4gICkZUNbOcRBmr4Au01M1cUYnsZa+Koyg+0LS2gwgYCh5Ni2oCQNk7DsUq19A0e2idQOIguelDDXF5e+nSydUH7zsP3d7Te74bF2WuQiMptp2ruFWLfWxuRTcKoxNra1/+rie/9Tu4LNFcajsJGMbl8nD3qR/7SXz6C6mPq+X48MZ2++LnkTZbmlT3rTi/BILNB554+cVnVh8b+WXz4X/+Ty+M68NE/fn7T3/rN42/4qt3gFOfF7F1b/7KE489tvyD3zh67tP+vX/ytdtbp6fugT/3J19+6VO2kK5NqyvFwXxhAiubNj9qVIIvyN28Nv/i75Vv/dq9w13HlFvdeMtb7dWP4/6r08NifWPti3MZPfIonjodY06LxlelGVgWTPPX/tO/OPWm906+60dmEUhS2t8+c2LTMkaI+3uH4411VinOnI/Ows6l/Q/9R9jeL7pERBpjvVEfNUeuOTxT82ptdVUf7DfzFTz5td+S3/gtu4eyiXju8tPLK79P+69f+cC/3rzw5cVaeefGy9O1NajWbty8AcUKLftOmpef/9zD952/cnNr3uZlh5oiBqCcyawsUhl8MopJFZAAQaUuQzALGFyYZD/NUCIvhzknAvjgFCCbpJSdaE+FioEFdXWm5dAHg0PaFiCpQCfMyLQcsQ8+IPsC2dAsg3N+VFW+bz2rNxim81+qrXWLdM6NfDy0HPykltOn5b5TLeaYuyPZm7zrofrl+/mLl/p2mT2mvHTE2kQycLgqSXiWiZi9p5z65kjYsgiiMJKlTiSRF8REhdMWYsxkrvZTCh4SgqllVc3og2bsU4xdLsiduGft6GC7wNKfWDsC6XJTquQuF74oQBfaV08+WT64qTv/B8ySYA6jglPfKR3+se/c8bcnz37m5LXbVVGoEjHHtiWXJ1M2rDFT8vXisbccFmHn6nNrp8/7rUQFsOQNk/jaYqWabt8+1Fw2I1q799SJC2c+8bnPevKUEzlsuu5o2TPi/uEN1XhqdWReZd77gLPFwTd+2Tf8+vs/aA6nk4CF29mfp9504rYOlmUB73jkoStXbwqCIAZmMygdMUID0ixmjtzNxZ2CnDQ9S6rZvX5z75777r+1uxski/VilREY0v7ywLkQHFufCi4XEZbbzTLqxmR6+6i579RGNBCPim4JKIcLYt/0ghgL9hsnNqLKpdfvFIRnT03bo/bC2VOnzp1+9cUroSgqKgpfxLbngGVND57ZiPO2azBU8NjFzaO9nZPTzfHaiU8988pBlzuLGNzG+traeDKfL0aj8qDvysJJzAvsp+NxTpQ6CTmfmYSda/s+0+n1MRLuLrsj7y7vz9e9t2zBcRt7AGCBZdeONkZqkMHFg07UOdXFogeTnXmDhdcCONCJyfjS9jYEt79oyDIDTOpiKUzm5vtzZ7A+ritHK9Pinotrt7eO5r2sOKq8mzfpsOuL9Tomu7G1f+Lk2vzW9h+VihhD6fqUJWsCAEVgJEVVUxNEDUwpaeoysSjQouS/9o9+9N/82K/+qb/2d+Ly2R////7Mt58Z9cv9O/vw3q99bP7l7z56/Uq/+2Kx+1xVA7znGxcf/82VS680f+37gZw71Auty1LmymUvXCISaYQqlKnLSOhdADQVkSzoqFf1RJQUlIA8tFCah4DAXqL6kgCIC7eaRvyffm/aHjTPPO1f38mTwh7Y0Pc81L75rfHf/Ifx8hAcOufjv/yJ8v2ny+s3yg5gNGmvvJZEy73bPnWWKbfmCgZ2ZuSP9tPudV39qsNRUdejbm+55ifjC/fPnrnugMCgCCMRcsaWl96SGZhjHKwliKZK/ZHFvPeBD5SPfRfVI1vm/vVXir0jh4kpKDhPCkopZ8gKQuxQmVKvIOqAnKO+T4kKc15Shx4KFxavXNq8se9XRugPm4994MX/6788dGc+NgYAMgExXyCogDcIvnn3m8r/11+bn784/sbt5hd+Pv7mh6qdA8+iSS0rORJR5wxAwQFABgRwBJChYDABVNBjKQEKBxEky+a739GW45GPeP/64//0R6/+0F9dGXqcBKAnUAdA993K+3/5fzz5FffZiSf/xOZpvbIVSzwo4GCkN958YeMHf/iv/5X13/23/2rj4cfSiYfufeSPvXrtZ2BxcBI3yHA8qpZddogOUI/3vYKgRVWI5F6SAfQpU+XKgCo2LkOfkqkYsSsqJdPUDy12aDKpQtMldD436Ipyu5kXRJUP0oEU5cbGdO/wIN3NCznGAjEwB8JiMPAYZrWkFgWSaNYh7KMI4BCzAZGpmg25IgE2QzJyqAaINCSaQBTEGAwGLjaiqTESmMpxyz3e/WUDUluPo2lwbPsZ2tURdZB4GAO5LmdTTGaEpGaIQz0amALSsYAkx8YcQwAGy6aIiHaMWdahimuQjgANAIEAjJk8M3tyBRXBexRCTNmiDL4rzaLk0JAHxpGoFkYVM5oWoWQ/qqsaXM4qjr3zIRQlkw+hQCQcmNZAfAxTRiI+RvAc62bHyg0QIBqaIyZVAyQXXAZquzargvExZhyYiTMCECoY4pCyM0VQACY0OIYI6XE3mRlA0uMSOzbMpASAaERECMMK1gAEZNgVkEFWVjQH4IdhKNzl0ZIN1CQemNaENmxZTI8zK4gCRoAENuwaBxSWKqApGupgDgKzobLVD2cFRG3YkRqiDJ/aTA0UUA1MjkHoQ4aOABweVxQP0ltQEEC9i0nqRBRBRAkAh3I+0+OvmJkNO4nBVcRKGdm77YODUydXT2yceH3vaIRuQSV7Lh0EQEIcr067LiJaNBmdXxUmeeK+J973jvT007c++HFYNDxvUVib3qOrR7Xroa4Ycp+efj4999x6cDSqDvaW5SQER7OlGLhGnC3lobc+AtzPPv65R89XfO8J2L7U3Hjp4OTmme//H6aPvW3vw7/oDtUFzwVTSVkVFPukwXlFkSxA6utKkiYBsJDnAECOqr5l5z0eqnzklVMwhkg9+D6rI9JkjFq88mz+d38v6m1aX7n+sadX206yx0BEiCW7eQzPvXIWG/no71T7Mx9oTI63XuUYzRXC3MccSkeBE+amb+Sh++onHm6uXNu59JoFukOi56frj7x98Z9/a8wEve53efXbvt5uXm2f+/yJlTL+zgc9916OUtbJ2Yu5SfO4nIusLtrm+Ve57yarwXu/uN01LRHKbPvW2nzj5HhtuTNP/VF/0ExG/qCTybjuZi30GZ2V5Hrtg1NPLCYLwy3Fa410tZuhubrYRsR6NF1dV3A7t3balAldhNR1fVk5hKSdxQZNUB1ktCWDKSiTq9z6tNTD2YMPnSwIFeqXbt/JWQtEJqyrslv265uTt7z7kU9euWatkPKilWp9rV5fm23faq9fFxBsu7VJPUvRhTCuqGmWkLXwBSH3naYFJubYwWtLfe6F7VMvL++9Z/yed90LJ6cHp9L27Z3yidW1+zc//fydqN3Ic4fSNDHE/s7sENH5E+7iu0/XuPjcR14RVzx4YfPm9YX3xXQaFkvZHK3vHu10B8laPZBFKHhSFbxSdcvj6E1OwsAGRkNw1DJ56gERQQfnrio6MjURcQSddIbsHMFdon3W6IeqdgcRsDPrVHpTBc0CqOocErPBYNtVVPOgmhWUyDGwZRErEAAIGeqgRUhiuW0A1czMkWMUM0LWHnNOptkTmogkk5h60b5VTV6yABM770CZKUVBs6jq2GFOsYs2PBVMCYDUAIDZJbvrkxVFRAIjRkPocyJ2LrhoVjpcav6t3/hPe7+cm2WjR510ORNevbx/9eoBBwZTJAxO2hU8+eCjaqmZyyuXXzqcZyL8+f/rFyXG1UkFiOxxZL456kNFRC44nw3ZERGQY3Y86HKqwEB4nGVGZvbeEbucBRgrX6EpoNHAlCJmZiYPiN6XqoDgHAfnKgIfU6eIAAWzgQozJ8xDjtYV3rkyxq5QMmYqsc8danAOyzqYgSkIAiB3Ks7AuzLmiIHAHDiXsxWFLwEqpJ2Usgo5ZkKxrKIpSggOEMl574mMQCznGDN7xzmLR/MB6pFnD0eLNjjwFQKx9TnFuHXt1okLVgU/izHn5LwnhmXTUGIIBcrwuGNUStH6KMzmiCQnh4xEapDEVMkMoimSMSuzpJTMcgLtU4rd3UkygjMEUQYs2RXeIzpzmHJ0DpmdagbExGSIofLlNABkMAkOSQ2Ro2RyVtWIiaTLJYfcZ002rOr6nMuSSs/mOMdYsUPARnUhaJkcMJpFwMJ5BsR4pKiiY/A1QxrVk5W1En1ZMXJaLrsIkzL4cb1S55jN9ovCmPc0t74/7OfbPeez5x7ci0d7W692qXWTQgq3OEiefD9rR8EX6Lplft+3vfuhP/k9ceWkI3JG4LOJtsIQ/Pv+u++9cvtGvziatTEGfOQr3rm7cvL0xe/8/C9dP3z1hVN8XabPr1/6SP+pW1svHk0bPmjoxDseOfEdf/potLKzdzQdj13J3tfBb1qzefXZn730a//yjXLu9FvftrV/e/fWLRYkzaEimdbjr36rbUxj0ubG4ZWXbq96HFu7/98+UO7Fc9/83e14vC89Pvzw+R/8kef++o+cOcra4cmqWuw8f/Sx/1K/+avCiZOztg1l1SxnG7vX3O2rW7evFadW+OKjFur+6Y8ezA7vO/sojjbGReGYeDpRLX0Y24vP7H3sY77pUi9LsEVBo8cvrj70ZPeZLx7u3tG4WLTQveuPnfwzP3S0/mgsV9Y3Q71//ROf+tWy2Zrec+/unM+ev0fG6ysXHp1OS1+OF5f6SV3P5/H3/+D3fvtXfnlSu53bixBcjTh1FNB8cJXnJCn2vRiKQjJzzCWz964sCnABy6qoR5oAHBlpSkIKzESICJZTLy2RKxAQ2Me+BeJspoDDMlYAo6kRSC+aFy54dOCDG48nWURYPfgqYN8o595idt5nlS+1xFy68tr58xtN48u9LLt71Xv/NNRjc8tlm1JI0/c83Lxwf//xj0yq01IVGYyAgmMSI2XPJbFvUxtzB2bGisFnIUgUfGCJvDhC6sQceYfkpBjxuILQ9ylirwzALeasrNkoLwLa+Ufay6/ZYr9Ak1m883K/+R1fv3pxOvvcC0WfyXyOaRqt+Z2fn9x/tqtIlqVZ7vuuWh7Ny337jveNnywf6r5v9hd+JF26VYb1CKQmmqSLwhWjJmJfzeCM9psnQr5vLezMty9dZTNyngD75bxan0Zw8YHp6j1nleXSKy+VJy8u22WfxAff9GlzZXJ6bbI72795dFSvjPNCLTUPnbj3E3/wyZ29A+dHJTE7u+f86uGixzJkQPHw7JWr969tPnv1MrnSOyaik+MVbQ9MU3Tcp9TmOFmp2tRBnyYUlouGwE6O62Uzu72/v3nm0X7R95DbfjkOzmehDK3azXnePLGmaAfLFny9t3S7B4dRVWLK4tRRNLISyPp7T22++cn7Pvz8yzfmcxKwpj+/Wq4hz7f2SoKiyw/ef8/BYlYVJXhubtw5xzhr+rc/cKHfn81359q7m7E/2L6avEdCDNwl6TQ3cekcHs3mvcZJQFPUBGv19OqNg6rCx584szqtn3r2Kq6uk+Suny+7/rDLTRdPj3xRhJRbNby1ddC3y5Vx2cS4Op1qqweHy/NnT5SBD+YdjFxeW/niK9fKKpwMidoZeVzG9tzFDYv56HDZiDZMSDaqilWEsxurkypc3d979tWtvoeb+/2oRO/crOsubGxuLQ7nnZRrbnNlmm62f0QqYiY19OAcSc6RHOCwSwGtyroz0F4BkDwbKIgGcQeh//u/+q//20//8+2PjL+uuWd0+EFd0Y3u5vv/6d99ot7YwJU7n/rFB9PRdAMWH/ypsyHanUUdATKq1eZUCJVVCKJKQPNMOTZF6VQlp8xMREYoQFyQTwrECFkAEngCMQAGFQPs2xhQkWl6tAf/+p9VlcFoApYgjY5e3N+5/135nW9d3/jd8f4ciCG7sJzBC7vgHBhD21Z9m2KTYp9BmZw5Z8EBIaJUCjc+/LEL3/Udq9Mzurfc2Jml3/3N3T/4TN12AAhZ0DUO2BOINCBJFclXZsYg/cH1AOYc+4Liq6/xK1fGpzbj5553W9c8InIReyFvzAQMTJQVuQiSkyfKnaAYDMYetBwbyWaaTcFnOfHZT6S/8VQfJS3akehDWHpXWW6bWccA6AFKb71iiQePPLj2P/3t9uQZjT7W58Of++HiPV99/Z/9xMazz9c8jDs9sYNjoScBEziCFMEzOLAoGBMUDlyAJoEzCCQ9M4+XB21bzv3Genn+DfRn/9SLv/Krj1YAFCADZADNILC+tex+9akd+eLm5nm9b/PGzuzKHF7dDl/5Mz96B1YuvXT5E7duuShf8443/r9/+G+S7J+5cGJ+pwUDZgagLkV0XJcVETV9IqLVUZVy7JdLZFYISIzkgqM2ZiYiQCZ2hE41qa6MR9Y0ANq1Tewj1KmqXBM7Ux2X5XhcSYy7R83esleTfJdPUToqkT2DG8QbQDXIBm2UKMdCiSMjIkcocJc5rSp3Q12SBRBIh8YqMEUzs4xCBkRAgEP/lRkBGBgf540M1IYWs4GBjMP2BOA4QGbHfWzekxkEoAzKZogQCAYV6W5YCXBA99yt3xrEAoBjJYDwGJMsg7yFgIwEaMOzAI0ICyZE8AzeY3DAChKl7bRPlIkyKBmqopqAqgJ4YhIFA2Ifk05C8EVBvoTUARGwY18EXzA5Znc3a0dMPHxiIobj2i80Mxi0rAH2PHw6JTTLuUdk8rxotwgdORJZlsE7dCJmhncPGQH/ECgtMiQHjyW3u0zpu1VvAGooRjQkLQa7BxrCUDc2hLYQYFg1IBAiGhNmQREwNEfgCVRUFWxgSDGb6cC4MoBsAMcKDqDBcIg2XGkDNRsqzgZzEOHdPKCBDUQpO7YjDafNBnIV6LE6aTCY00TU8fEulQCHDuNhZkuD7g7Yi5mBBygdMVGf1RGyc61kVVM9XhgdLXNwrj2a+1E52Vzb65ZLbKenV6PGyo2QoO/yqB5ZZ2v1Spa+jy0wtn2K3Onmuck3fN3pL/uK7U9++OBjn8A7RyNETL3PrmujAwIvVV0u277xvm36cn0UKR800icM7ABspaTdG5cXB93aWr171OtTO7n+yGQlxNBVR6/Mfv1Diw9++OFp1Rwk71idogcSgyQGaFl85cy5LIIgAJgzKBERxCRcMCJozM4sCoAquGROowihssPzlOa/+yunQux68eIAfFSEjCqxLvi0NN2/+J/WuXXiUoNSFCk1UDJUZUouto0hAaNa8mXZK4WvfN8NXyTjzamGxY5OTn7y6u0HHrq3Kvxsd+lbzKurdy48udjePosAoijoT5xUkeuL2YNf9earT7+yYbbSpnh458SZUwc5unoDLj5x57OfP33iwo3t7TOnz89v3lCho1stAy0bnC2FfH0Ysc2SkWaHmVcEPbaKry90n4oDXx542h+hr3kOeXN9Q1OUxKnpu37eSp8g59ZEs4JZ9m+9982fff4ZxwysaogOppOyWUZRgCg476Yu5J4bDpdu7mRCLl3tQr/sl13XqkzOnLy6f+BZHn7w/JUXXn/nm9+yu2gh2ZVb15588o1d11/GSweLwwJ9weXewfXJuKzVpZQ0ixJCAM9UkO+zAJe3b6Zrr2+/9MLy4fvPPPTA6RMrvHz96pnzq29fO3352f3ZLAfv4gH0MU8KV1TFm9/7+FNXbq0FlnpzNm+Kw366Mu1y62sOojd3b41HPBn7/eWybVNtVI0KdgB2/C5AA82Ss8QoHELhiExFE7mAoIHIIaFkQtBsHQCF0oCTkWr2msGMgYdvuRgIKDKoKZOZWC/ZqZFzzJzMyDlHaBrRspERMhAmUKgYkcCwKEr0Ts0kinYZVCGQK5EYBDAnjQmIPWTLWXPXhcJpkqxKQmzOMQkigYKKZLMEjWRl7LUrioJ86GIENQbUnIAdGBBTAOxVEU3N3FC6CWZmjAxMA91bHeZs16/e6ABEDbIlFYVhUINgCEA5a9fkF17cLkbnn3rl2YP9ZY6I4kTFoePSJSPN6kzbTmdt3KhrIgLmY/QeofOeySEbIhiZAzeUahooO8/OGYDzHsmKMkgW1R4JyXvng+OAasxuEMO9Lwa/JzOTIJHPImjmgkdkMyh8UXBB4NQox1yESpQcUPDE4MgNCSoLwWtKKHo8A8DI7NCDIgIweipr8GxxEauyiKMkvQGQxCiCyC5HE8hhpXQONCqSImHqohIF55CIAxugZDPHEly1SRZ5sbXQzvbmEbaPfOksx6Jy5Cmrpix9H+upCxwcU14aI2pWRwSQJWcyBTEyN7hHTVUAFQANu6ToTEyaaG1KZEZ3I/ko4Id2YcZJ8M674CtkymoGRkRjLjpQdF7L0BJiwMAp9h2R63tzxGXtS2cNdrlV38Xa+SQQcw4AZua8zwqO0QGFUHpEUGUxJDIgOWZ+UeqWxg4DxnZuvu4a51yxOlkPVWlUHR0c5RA8gwFNV2rvSnRcjlfVFr4oGI9SP8vzxdHus8u9qyW7RWcRoO+SFn7tyYdB2Pbu7O4c1qoCApvTjnnWxBH5lYLYuyyJjIXquP7IbnEuNbFN+6Gu3v+JT973Z95RjMqLX/kNe7dvdtafXQ2Xr17Zvbp96zpM1ycZuITCwSSidYv9st0lVuxituLwQIx5VOtzv/FzX3bxxImKOtOtnZaR5mbVxTMXv/+vzqdroaj8UXcv0OFLL9/6dz/zQDjQ5/4gFzvj93ybbNy3nM8PX7w+t2m+1nFnwRtu37n9b39y8sTvn/7Kr6xPPzK67/7tzz9z/Vf/k7+2Nyrx2j/8e+qFyxHNsxXFK5dfpCe+fPWJ9xRvee/Botk4udrtHM0+/FsBZHxm9fr2XnvvA9Ov+a7qyXe4ldPrG/9179f/9S7lJ/7MNxVf/deXK+ekQ6+pv/na/jO/GiWuXXhgPr3v8a//0+imBDgZTxZ7O3f27swbu7p//Zd+4ecuP/+5Ucn7R/NJ6UaBHIMHM7PAZCqMSGaGEDw6co4okAMwJAh1ZUg559wn7GcaezMj74mdASBaTH3pSPsuUSlAfVYRZUBGduiRnUpWtaimMXHA2WJujrCuHEhwaC4IigQHiNhRzp1IP5QUw/EuedT6mk5S1n5cjpfzWXFrSy/mejw+6te2Lh/A5v1w4nSzYBUwpG7eFyFQKKKZqnlR8gZINlQbE6akhCySWROaOsvkkByASXZBCdqjGYK4bFktuIJd0Cb2lO888KazP/hDi3/+j8Ire+wYsusefOzV97ynaA/sytbp7R3qhaoKChgd3Hn+Dz5+8Z6H+2v7BcHIuzByXFMn9WEbrh/eao8WZynErEpMYFxV1rdmAATrfbP/9/+Bnxbr94ycuFtXZ2unLy5vXe6W7WQ6yQL7cPjAd3zLwX2bv/ebv/dn3/c9TS8PrKwd7RyIwoR5fTJC0IS9cvZ10UrpC+Sue/ODj3/+pc+WYTxeXT3a2m/m7dnzZ6br48O+zYqLJqZA5OjM2qSP0ggloKWmZdOX2RRKR3Dx9OlPvPqKG4WCVx65952/8cnfHasUBI5oOtrcmS0M1QFORhMV9c67lIOJks0Wh+MCUiOOLUU5aHv2aIDsXSaX+x5yfMdjZ1a5/NDzz7ywM6vLclKH6Wi0vlHd2W5aoLVqsrmycmd3typ8H7vPXL5amdtatKfXNg8aPTyCarxmo+rGzl49IRd1rVzZSjMmuT1rEWhCjIjjejRZKReLNKlHKnTxzIqDdszy0ivXuajnfX75xnJ9vPHSpTud96fqskBb9svTJ8d7B00T++lGBUYZ6ebOvD1qzkwm2vdtb1fuHNikmC16cs55WqncbNY0XaoKf3TUSVQw6PuczMiX62vjhzZPvXL15udv7B1hZsaTo9Fo4tdLf3lnpgGXKO98xyMf+uRzfeyu3Li9ujqF63t/KBVNxgWgcwDzxVKzMwAGSgSIIWchh6O6iPMeEU0QwCBnNvrMU7/2//ihSz/5N/+33ebgxY9efvTE+Xq9yPuXl7/4Lx/4qm/76C9/9gEPoSvtk6+lO3MFagzIk0AGhExAhIYMSTxoWYZlZ/H8GXriSajOtB/44Mb8BjiERoA9MfY5FpU3SVh5kAw5gg8eVUWRFcBAI5QImiAdAQHEdqyWvviJ9OijfrSp+RoNbGbI4Awsgwj4AljBiT10kS5fs0UfKifMsetCwVhWZxLM/v4/TKsbadbwncNyuVjnIH1GT85Um56P69ANHCOTQUYAAnQqqMreo/jVvb3lz/7b8eZEXnt93PaqLhOFypnpkJXJZDz2Sc0CZ1VXB3JFmqtnQlIQtb4x5kzAQLDMuL9fE7MBoRq22fcKEnwwldxmYMIQYj2a/sAPt5tnEdzIITOwX4O3f82Ff//Oxf/nr7S/+bEKGdKQREcAhBAgsBmgZ4gJkuHgMMoKlsEhkEIfg/HtP/jM6T/1vbd97mI77zH9wA+MvuNNe3/pf92YiZS4jKaZKkAvEIUKoK14cHlPa89a+v39xZT6q9uX3/MVD/zGv+K/+9e+/+/+k/+j296vmXVvnuZdQvZllbLkYYvPZky+Ks6dPXG0vbNsWshYFC4RRrUuJRG7cGqjdqQifRvZkIDIOHaxYE7ZIMP6aNxILph9Xc6O5m0rIk1w1CmkPubU+bv2nVFwAdATDv3zohYVetFeLGVNZmoAYAHF0N0l6pDBkEcANANDRcNjIigOlCI1MCQEIwQlAxkGxQBIInq3wWzIEtlQb29D2AgMAXRw48LQDIYAVnhnktkzRHNEaJANgCirMoLAcGPpXfA2HEsKYDQkIgYENOFANQJAB2hgnocqdwzMgFoUHBxAjmakCl3UXqBnyABhqHdHExFARkZV46r0o5EgUQjoPCFVhUOwwN5zcM4xeSZGOrZc0fHhDDagQcUZsnt0rHyZDPYXIkYDZBG1rl9a6rzjnE0zKKEgKg5sBoP/2wt+sNgcO7IQh+A53u2pvzv8H34CiAEpAqCgISETApiqIcBxzxmCISY1pqH0DcVABRwaEwwUDQVEA1UgAAUjIgIaTGcENpxw1cHwNOh7x9V5RAg25BpBZBD+TEQVEI6J5miqzGw0iFtgA1oJUMEyIAAMui8jKGJnqoDeoTdEAGbOWaKaZ4diYMpEZeBlnxAIDFX1S3ztlHV7f973i3Obm9XIbzfzRVp2qS1pPJ6OQuyCYWAPXQ8BJe4551PXA3NVjjL3SzDdWBl/67edeNdbt97/wdnnn+sOlqmVelJHyrmXPO9H0xFXbLE9ymlUo5DrWMQTWD6xXnXzPplFBAM/qgKiSezXls3st/9bFbsVK5plz5VTh8ZEbAwoCkmyQ4QBbA5oQOBBBAzBWMFBJ5EwB6YUxZc+S8KgBtTOc1U6VXJWUNtC6wrnY1YTY0YKzMrRgREW0occM4L4Ajz5UPX9kqDwaxOKSTOWoyKmvm+NjjReuVk98sjt268/9u6v+cJP//gZfeqxe9+c4+rh9f11K9R0cTgPJW583fu6Fz8KO4frm9PD6WQL0urXfv2V1TdW73jDSrmz97u/nZuWpi7PuubGYTx9pR/nNvRdBzWF/e3X2yYdHHRmLmZWBcZIhYoZIumkOPKjPYkW/Osme2W9J1qUwfughn3bv7bdheA8wmK5bPNyMpqUfv3S1SuGGo2ixqduXYWiiF1vzJlQGHd6BaO6cqu1OzeejELVCb7w3FUILFk0a+szqXBZv/2973nwkYdfeul5bZfd1mubK6f2dw/afsEyzjz597//6YfO38Op3DtQQWC/CKOVlfHFopzdObzdm/W9Bc+KEshK4nFVHx42Jfv9rfTJrWvXLu2/5633vOnJJ25s3T4T46mLGy+8tHsU87T0a6fX5ttx3OC9MvVTffXa7a295EzLVRCRjbPrk9P106+8PCrhjfefeOaV19XRyQunIOnecqF9OylGx66iLKDYZ0VAjCllFI81cUzJcxADJBAzQ1DKYgCpY3IiqGCqGTV7AIesAAQsJl1KlrOlrDkzeULs+zZCVueB2BFTCKbEaGCshEIc6soTld6LAJilLqW+J4GUlEtXBAOXNWsgppFT9ABlXDZIFFWkNY1qSR0NOVaTPgZBU4wJmDgrxF4okAtF7DJmQbOSPRqCDbB8JARG8MQKmDUTudo7U4PgMloWXWbRJIaQkiJBF3MSBWMzdY6zSs6KQEp+1tinnnq2S1k6h2CqioPx1TBnRQBVEwUzYM+IjEjsWdUcoWfP7NCMmDBHosBMbkigMRESsVM1YiBiCg6tGDae7BjJGRo577ggJO+CDCZQ54KbqiqpggkTiYhK9FQUxaqIxNQVoXaobaeWmckF9qpqYmLa5ohgDrhAx5a0zcYGDk0MAoeqHq/XmrvlfKfvUtenUIzAJAv1KQdFU4AsKpmDk4CIPKwiCMgVBbDx6ggnxfLoUAxGtS/LIiZHijs399tl7rujlbWyLBAdZM2pyTFlKorU90UFlq3wgMRZVUHUMjkGBVFRNRmSOXRM1AME9dwTsFpK0idxg7oJAACBnTMsPRkis6tHI+DCeWIUs5wBAEGUGAJBGBeFCwbYBXZREE2AWArHqEVpo0JDUe5tL4c+DTIDMyInaMAcGMtQiChoHjG2yWZqIvFEKAoWMCkKUMa+77t2HtUnLaZ0upWuGGOgkDTXkxEKp+SYIFoel4HdpvSACmRYbhT7W7ckLw53drnw5DjmZGU4+egjm2fuL537wgffP7t6Gfrugx/87SdSc/rxL+t7TlQRgqsKi4uD3VvPfeJ56o+m5zZXN09GEWe0/9HfWz133/jIrt/uYefOf/3Mj71hbaRSm4es2pu+/qkv7M927/vKJ/v9+XyrjYezgzvbXa+9MvQHSPFg6/nP/8Evrd97P1x6zoA1Qk5JVFutKUyx9oWGgqsz3/hIOjP5vR/+c49Mx/u/8IHVbTv3I39lsexuffgjnFTUu9J1ua2UcGu23PvUzWe/UJ5aO6r84rV9twu6jG0VQJmSONcYIpbmnru89/yt0e2rXFcrFx9f7s3Sq88uXvoCN+3t2Xz1qx578M/8HVh5A4U650iPvi1ee3b1zGY77prdp7OMcmbtcvf6Der2vvkv/sWbdw5e66q8ds7aPjbN7M7tV7/w6cN29uib7vnl//xL80vXVqnXViZsm6dHENNAGeiTEJOYEoFzKgBmg3+F2VxZIDJaVoyqEFObKPdJuoKJkRUJAbxDlsyxN1vkyveGbTYxFE1MpGhoxkjE1iYF4l6F+sjLRTWpvfahKA1QuOqymJJDxCzSJzUlPl4R3VNPmmuHUvTOeG1aH3zwv945eH71q+8fvfHrlgdyz6nVL3zg98u2ydE7C5SBodpNDZ5C5TRWmh8erVQlmvZdHypOTeMRtRc0RhRfOCA5bBbBQk0Bs8/ZVdM1zaKHR1RNQFo0CzXHvh+f3dixna69uZkyeswcy8fPLS9UnNzqu98Sf/23SyXK1CbaOX36oR/966Pp6aP/5d/Gz3woNh0WnJr+zv/5T578B3+r+ehTF63EHHOKoaIwtkNsZrEvqgmlce3MN7lqHF1eLNvcx7VZW4+nqyW10koqp298z9uefm1vY2N0v5/uX7ne9TZhYo0xmpRxVPHhfD7r0rkzKzLL2/NFtVJ8xXvf84lnntudLdj5g+t3TqyvTHm6c+dInFQFvOXeMy/dOthdxC9e3jp5oigo9UuctbmPy/WVVSQ7mudV7/e2mnoyWULfpO7T156lCZNTAQ2VWynqfq5EHjR5wDYnMWCjHHslTRFOr0/NQFXQipF3nSZ0Dsp63nQrlX/w7PjCyeKpF25utyAKTZTDo47FnT2xWVRyNJ8t2txPXBX8yrieazpaxEPpTrFNuqWDtHFufBTh2es3o5fTK+PlQe6b+YPnNm9u7xaBau9Hpd86OCw8eC7nIvNFp/P+zFq9MQo3Xt/Z34/GBTgZrRe3mwSjCpaZNS9n6ByEjcJhNyrIMQkgcXm4e1Sjf8O9p45mO9u7i3Mr9cJBZ2aGE2VupShDERwSLlOKWQPaudWReRyN6+UyffKVVxSgZ1RHweP1g6NAtjae3nty7APf2N67/Gpc9aFwfLg3O3/PqT/iKgpEAphjJFTvXbPMkiGbK4InVNU0OAByNpHjLY1DOunCM889+y/f/4kxPfo9P/rT3a1nXv31nwpzuPnxL7AsH/z6d/NL1+HWXrG4Jt76JBK8eeeJ+qZVII/EYgV4bGSufvo3/vbON3ydP/+w02l54aeP/re/PXURCgZURmPnkoEPZU7ibMiHJBiEDmKQQQASGBIbmsB7rvzmtc8s/9nTVUIyBkNgAVIgA2YghJzBILr1dOrNurUMR7dMkzoCz0qMUZD8+OoO8D4iQ69mJJYNLHe9OuLCaYpGNNSfIzCIaBYMDpCAKGajZIgyun6g1++QASirAYihKXuSstCz603tyo3pbNEgsJ8v8PahbxtXGRqJqGOCEDKY5mQpM6FzjgxM87FxIgMgCWpZeB4cFJEzjGDzgSQFLmejqoRmYd4d9pg36vUf/PM7H/hE1RmUBGKQEwwttCroHQzNGYLgHQxqQ69ABp6gCJBJXry59Us/P/me72ivvda99lpawfOPnpgLxEMF842oK0OzSAUM6obxBPrendwYu8PuwdMrv/4Pf+xO38/e984nzp37zz/1n/eu7zDI6ngSl7FkDK4E4h4TEQcfAK1PAh5z7kpPPTGymy0a533XSjkd1/UYEZOpZIkp+SI471POKafxyiTOZ45D5cZlTfuzfe/VeVQxIZs1y7bPPoSUlIvjGVpB6AF5iPoYiJmqJdGsKmb2pWIrABU1IFUTU73baoUIOtRYqQ31scctYjC0isFAdwQzNkCzIUv9hwYaODYcHeOK7gpYqsYERDQYW4YM9UAgKr0PiDkPnm4AsCG1hIBEx3qImeFxIOu4qH3oZb8bLEQwIzAm80QGRsSEgAyM2aOFwZ46ILCHRy2hiLIoEnpiATQ1x1RVoagq54NkK0JhokVRxRyJGIkce6QhMuAYyACY2FDBAId+uGNLjSHikMIDIzMjJBATyMNHPzw6QPTEmCUNU5qcB+DB8bD02IY1wKEZB4uOqBEgmCGhHlOiB9z0sFoegNZwfIHVzIzt+BwqmgEQIx1fJEQAAh3kPhjO9pBZs2M0kgJ6puHEDoMvA3BIgIAEpmBofHwZ0OzuXaKmx8FB02PD0KA9GZgSoZqq2oBXPzZI2ZfkvyFPaIgEagSgaMPcLYuKHt9rDsATOsScBZA8YsyCg9AJx3e3L9mBC+PJ+uZalwU93XffyekY1lcqUPWeyxBSzH3K8SgKal27MCoZlNUw9r4OLTZLyd2JzeLbv5mfeKz94vPz51/rjw66eTOdOhJZLJdsFEHNu/2mTwoJQaEd17B70HOEYsyt9uqCkSPpJiGwZVn0IkIQgJ2KOkYgy7FDZGQgB0yG3JtCvwAfXM6RyRtylgSmRqxihqYE5DDrkKwBCq6X7A2060LNkqzrMxCFghW01z54x96yJFBTdFjUyST2HaIW03qZpF8ugnNObHEw1xW39uC9289ciq9euXD/qeXBYb68/cD0bN5abm194Ow3/9U5nWyxLbwmyQXardvX11bhwYfP39rn/J5v+O0P/cJ7JxeRrL39hfsfoGfubE8uFHvLHckQY1vhdjcqbh+2keHqC1f63DW9deZFXa+mIiWKE60mJTi4ui8vd2kRuG8yjMq2ZI+8Ph0dzPo+m0gKlY8qGWwyHcFiUbO23ayc+BijAJpY28SAfjQJi8UySfYUljmemozOrlSnTlU7txcH83yw7FzlEioocIDa09iP/th3f1c7nnzuxedfe/nS2Y363Q89tPvqzWI0ObU+euyJ+77wyivn10qQdu9we2O9Opovc+pHRZ36w9sHNxcS62I0KXwnaVSWqe9izEfaIIKlfuTZDA73Zr/+W5978vHTDz1y7/lV1+jiDWfkxnx27aC1rOt1/eip9d0bV27u7WlK9YTf/vg7Fzfu3H/m/Kdf/MzWkY6drz3cvLXbZ1TjILi9vXXy3MlmEeDuJtl7JCDnUERj6s2xDnyau/5MNMtZM0gPmsEgmecCiImDGjKQqmk2Zkqo0aRPuY+pj9FMFSSLqglJDhNmH3woMmlsE6IxmYKNqhINXTYyjTkzqANTwm4pzIRgqU1Jo5lVlWcShezKyhxLyjFK7hWVCQKhy5qTWCtSgzMBETPQbCAIXRfr4LKKU3PEjOAAJScAHuoEsgkhGoAC5QzgybEhmoH2aimKZhHQfPxUAnKkdxn/ZggEBgoKCrBcJGIkJKRBLqfj15GBITAPf8yIxu4YY+d4AMYRswcTYnJYILJjZIQQAgAwe+eDKgCI44DATCSgObfOeSZGYMfewBESIA1v6MJ5AlDVmHoDZGZEVCwcBSZUA0BkX+TYFXW5ujYejTyaA7MMmmMPioSU1SBHh2ZmrMZA3lFR+6oEjV2Urmlju+whocTeRKxP2IkKExErYBdVlZg4sKo6JgfqUXxBRaAkwkDzRes5lCVq11nXpSZJxmR5cdTmipENPapKOSrLojDJuScCcuSRHAFlVTGSjJrN1JjJ1JCQPCCgiBABoppC6qXvJQEqAN8NIzuiwcslg+9jmOOIBKSM1OWEzpeBe2JiBkSPGHMPNNR3piwO1dDEpM0R+6U1rVjSwISmgZmOA+PmiAiAObSSA1OM0RupwbKL6sU7XjR9BotJuhSz9r10MeVqVEDuC5Yw2XQhcFGaQwq44mrPGAJTudl1sMyqEOoNTu2SDVPq5suD0aQQS01ztADdW/Rv/ON/fOelp26/clksv/T0Z154/tk3P/yOGE4088Xk7Im9a8/uXX/ttS9eu7g2bcq1H/y+7332Q39w5+mn+cpzW0s42u/I0ZIsSXp11jQNVidGi93D8TRAxOap1y89f23kdTKpmJmO5tkocXBlsTrhcTWNr17e+tQX7M6dzSazcxPgfDCfCN+e9938KJTjUMCdw53ywj3r73lbvnSpaKudj3wm4Hx5Y9F9/OVR8AtpZBxYkxmX4DEBbeXFretFIN8BU+FH3OTeM6NaFXzMGUkQdD12+bMf3N65Ovmy9za5WHz6Q3RnZ3JqHKrN4p4nJ+feMJu7nTvb081NOPnQQ9//o3c+98mt9qnu1s28/MKDj76t1rHfeNtLn/rEp7/w9J1bO2ff9vWHW3vaU3c4u3X59U994uNXrlz9yG8DZTtbk1tbXRw1VcUITgFTztnUBQRUzYYA7DmQywLHD4OspS8dkYjkptGOWQ2lZTDnHSiy82qKrJgo9i1kQigzF0ggYI5YTAgEjESzqoqKqbWaCCASdIcHZWBCQD/yvsDCs7UAoH2HllKKX1oRPfTIG77wu0+tXahyI/vSGzdrd66sfuDWrX///pzjZWt557CoVqEMKgjGey7eXIX7vvGbJg+/TbZnOx/6nXD9ku8jgGrOlpKvCq08KHZJYsz7o0n75364PrHy8q//9mRvZ6U5WFMrirEWQR2YoS21zxkVV7av4fVTuykQ1l2CLuXV5e7GrS9uf+I5utK40QQ5WdKD1N985I8t8+TCjZ20dblgM9Mkbp3925/+xMHXfmUd0ZVjoxACZGjm8ejGu79687u/KfDKwfs/tvdff3PDQzSR7PY2zm78jb+0d/3m4j/9TNUfuM3N8qHHn/rgBwtfT08W+y9e2XrDA2669ureVmt21OeRuFKga9uwOimkKmV+YW20vjL99Isv35rvM7syECg0XYMVbK7W+4t5ynp4OD9RB+lzY74zWRsVa0yGcRQqcDEUhOLKBDs3rjd9PEjLRUonNstqUi6lRRONsDFezbd2y5qQHJBGS32fRRyieOeAad71NVOfcoA8CuwyzZNAlpHHR07W958Yf+YL17ZasbIaj7hr07h05O3Va7d67cWTQ5+TgmOw/PLl66xmpA/fe25xe++ojVzn15tFw2rMt/aWOusNcHHlVgjFagjS57mlNnVqeZljGQokzULm8zLnYmX02Pkzr16+hYW+4Z6Vl1/bFYUiEIJYL8jh2s6eZVsfT1ZXqkt39haQAe3UidF+sydijH5tUp4ry9NcFJ5Hpd/bnSPA2oqrC793kJcpnVxfGVd+3i1znydm9frE1eHK9kzaJfU6CkVZ0eG8DQArrnxwc3W+aCBKUbnTpzYW8/6PSEVF6TSTWhEjojrK1jYiAuAAAjK52CciQzYQA4QspqYdm2f82V/+8X/w939+cYoP6ImTX/stN//NP90YAYzx3A9/33P/4uefODne+fDLazWm4FNEVBMnpedEhqKFOUbnvL1anXjTD/zV4oT0ixYWOnrnE6O3PgYvfxGigAw0Q/Aq0idXFgAKzkObQBECATOgAzIgUKRUeMbCiUE0QD9qMyiBd5AUjiMvCHoMfwET1x62s/1y8yzeORiwsr4oVVVbceMAmEFQQQ1VA+csXHgGZ6KIBsSGqEnQlDQNCxuAwbtUiCiakkPpzbPPmlwI0PcOmQmiJnnwgeLbv358ckVUq8KlKljb8m98qv39z9a5zTGD8wU71OQAUlRylDQZqguOiEGVjBBI0RwzOEY3NJZSPloWhw3cuyEro6ZRaKKfSIMlLT23q2t+BItDQABfAg4BtAyikA1IQQXMQRQAAyRAgroAR3CwgKI6iXznF397pTD73OdWXr4ZRMKJyeYlgOyBaZWsa9NKoGXsBSyStnNN0fcPn9yNt3eRTq+vl6YvfOTZcqW+tL3jITl10HWj1ZW+lUzMSNupZwMuPRH33JPnplleOHfq2uefDaFWKhWJIFegp0ZUeJ3PlybgHPUibU6gwIDzeSu9dW0uOGeFPiboTXMG52POLtBqXSGHGD3dxQoXDp3hoAQBYVJUUzMzM1XQoSPdwIwGDLCYZYUMx9mnwbCig88MBxVE2QGwKRoDASn4QbcBA1JVOOYG4TGCeZCIBnUB7uapEACUiBhRVZEwD5KoaOUcs+tRPeEsWwYAIDMhPNZA6FhuMDzG1tig+WSDgfZnCI6IwbwD51HAMloELciA8qCU5Myd5KqgxNhmQSURJEIRRAY0UNWMWlYlEqphihGJfVkSgfceCBmJ2aFjMCRyAyEJhwZqU0K+y3X+v3G2B2GHHJgRA0BOBl3Tpb5lLFUSAiKS88c470Fxw7t4p7tCkA34biZUw+PQ1hANHAJpSHfP9/Eps2O9CNVAFQbO9nDVB4uPAggMZ1LA0BBVkQgIQQYX2DFImwgMVL0jBJPjfjUDs+O/RbwbNDw2Ew0YlKEETQzUbJAHB9eVGKgqImQZOOZ3CeBw3KqW1AjBGJGRRNBQRQDJ0ICJkDw7VCVANVQDIyAiz5RUsoHc3SQvui6gPfLQ/efuO60aRyt+9QSuT3g6rrhaJe0ZUy8x9a1qWValK7CsC83Rm3LuUcvSM5BaEnXEjz28/oYH6G1Xu8uXZ194qT/cmaQIXcYkoLgxXdk93DfWU2dXH33o7M3LLx7ezOWYJuvsFZoO2fs0jzYqzJRAWd3hYSzK0ntj51WzZs2ExNplKUoXRJA0FE5M2bElQWTJwIZZJATnAsUoKWVQIGUBc2apE/UOmJJBAktlULOMVjguM+VsFoXLQTU15lAySN9J37geSgt9PJLEljx6k8b6WdcmzbeO5h/53PpOc7D9Yds71NFyY7UKO6+U41FuE6au9Bl22/S5S+M0un2laVM6NXogrD04KeqNgHe2L71+pyknI1s/3zYOEs0PDW8frI4uzJpwmCfrXO9tbx8d9cieypBiZzn2DutT0+eb5dZOuqGTK8Dra+ON1Xp/b3HYS12Wd/Z7yZpTzLmbugpM5vMFaU+pu7m3V1RjS33lC8jWZhHFTLYGlVhRleQDGsK9J9dhsXj90sGNw1iQq2vfGc1bWRmVKyvFE489/FVf+a7f++RrW88/E+fzeyaVA+2Xu2u1vXTjUt+vHNx+9XTlA1U39o8WXe/K0BPUxeho0SXbd94XmUpAp/moazw7xywo0brJSrXCo52d+eZKXYQwOyzu7MPl97/w4Im1EdMbn7zwplX+6GdeKC241XLzkdPPPn97PDnRzrazLK/tXB754ur+9ZWN2gVcnZRXr9/ebmKn4cSYd3Z3NjdXYt+1s8Y7f7wwCijZPGJZBO4sqeWUUlZm54gJNSsbQA9yIEkIEanK2ZOx4bH58zjOy6oaVbNBMogpaxYRCo5FVS0VVa4JEIyYk/MCigQFB0cBQJ1I7lpT63J27Jplr8nMSJKGQKCoCsul5D4qQVX3pMhZYptyr6hQMNnA+zPw4HqBmDIBcbZspoix7UdlUbNHjiaCxMfTAnLJFAjAwDHDkNd0DgFFhTKQKQuIKYAis5hIFjSEu5q3ghoCIkkWM2MiIjLQ4dHLhHYMOVAgQgA1BRPmoXuTiXj4cUMWzfkwyOWmQuyYCBnZFUOhJvFgz/REPvhac09I7CtCVE1FKIMfD3QeIgjEx2FkQSbPKKIZwCOIJ2ZHIklEmV1OPTGHItSjsS/YoQezLibvC82GIKo5AyIM9m8wVQBYK8xZEmMgMzJmsphjjxqzGKAxEjMRmJjScNiazQCTSvAcpa+LmpCss+VR7BZi3pbSpmy5S2xsZjFp36soAoqv2ZeuV2NTFiwCIECSGEoejSvtutijClgWy6hkiMaOjExNnUdmBFE4DmqjiSri0PkHAAVzYQiQi8ILiMboCasQ2hSNTEmBU8cdTVbQu5jJsubU9Rjb2BToENe6ZqmSp6VbzlJaomRQMUfIZp4wON+DclEiE/tAYoVjUi0RBNUMF0m6KIVjBVQyNSOisnYUrctHs13sj+aQlpV2ENDzZnBE7IsqOGT0zIYlrYpZyl0Yjd2od/Wo6+ayC+XIV6FsF0fN3vVoYZcnCwmjc/eQg6aLvhjXG1XK82qFF7DDZ8sJnlnZ7166s7d90D79v/7Dt5/d/KZv/MpP/s5vKZLz1a4mQGwrpCAWwTfNFNG1hqTeqFTjJDfaw1SUNpnS2TMrpy9mE1nunh1tzJ69ynf2+ygJdN6nMcD+pZ3tL3ze3vlVGYs4X0zKwDE998Hft5koOiAJEu584EXX2bQqmq7XCc4rKtXlRT92tUWpK+8YRJQr6lIWpQgaLRr3S+zKqkraEVGxWppZ8/KV5vZtBRqXJa/4Frvl+gl39l3brx/WqydXV8beufmibdn1p95w/vzXzLfuFONVa5vtG9cvv/iZ11983lE524/hVLv2wMivVOsr9Yd+7fO3rl3Wpmuicw4qz475/IWz3hWLZdO2y27eKAqgIeRRWQ4ivAIV3pkKqvmSSIGA1DTnBNzHdoHxgC0zF0RgIZiZWfIchDUrYEo0cKrRRAQxqJhhb2qx7w1JEaMSRQtOizYW83kdguPChwn5wiEpKMUuQZKc9W730x2R+x88vb9/ebeu0g99uzugV//9f7yvT9NiDMkdbvXTaj0BZwNigqBXQB/4y3/pzLd+A65Ma0U4NVr+k787RVTExXJZUJH7LJRGLsyW3WSzPv/2x/jPfutilU9/z7f1z7381I//i8dvvERxSaJqWePA0vTOT9zLt5fXfnd97olREdbGRX7/U/P/+v71aoprp9WQ1DhIHNePf9vXRb2+/Lmfne5cB8tSUAc+LXLiotZTvkoxICKIZ3zg3jOPPtCuveXkybN7eTb5779leV9ZfeHq3qc+u3Fhgx598+z+0ebj75YXfnX+6S16y2P45/7ifZ+479l/9k/KF05vTO6zerNXUMBl17NDLLgMfm2yZsK7+/Nlszi5uVJY7PrWu2JSF7v725pjzdN529bUCIpHmh0eVKOyomiKGvnOwcHptZX1gOsr4faim8/7pkkn109x4L2dGY95hYvKESTdaVvv3XK/cbJcL8uoqTdhHxSZPDE6p9FEHFPX9VktGOfYglDg2iCN2T1yz8ZI+2tXD5aNd2V5e29ZlzRiOj2p07JRHzZPj+8su8Oj5gJX5Zhd5Wfdcn1UWdfdub3bL3sfeHvn4FrbdhkC+lEIk2l18+jAWeE1NTnOm6YehaqsVNOyaauST65XN/YPdlogpUfOnUm5Lz2r2e7tIxCbLWJZ+Gnp7jl5+srujrrAzPvzZQ+SBSdZLq5Vp1bLpy/dLsqxJnXLHmJeddy1/cHR0hSXbT5MqpBXfTUtfbuczxsMzIh6cjpp+84Tjtcm/bSczdq9pmP0AtTnPBcoS2TEjXHR584yLhfyR6Qi0zyuq8Ve7uZJMsSlGhAhdzmqWuHJMh1vu9gGNq6qdR24mk7fP/7Jf/WPvvVXPzYrb5dnVp84Of303/tf5NJri1/7yM7J9518o/eTD+/85oc3ykjMEIquS6FwDtUH7A7aypeHam/8H//WtaVJe3hqgqR9OOF343Lj4XswR7k652UC6QGRh3qiYWdWlxAVTEAEHFpWgEyBCiuzMZCBU2iWAAjGABlIwWQIhwy5eyACB0Vc4M1Xsek85DxAZNCpCjvKEtGSMRmASTb0oCDHHVdiKmCmCjxs7VGNYNgjqgKIABBo1ixqKgkAVaKQKagagK1U1XvffnT+jIwKU8rjlUPwk7OjyRsO08eftbZhxpxj1giQQJXZyAGoE8vZTIc6ksBgpF3vkU3ABEEUWMZxvvMff+LUyp9e+DF3o4rL5etXVwtzd24e/rtfWJ1lMA+KQACDzVIQlAEJkoAOAQ6GJEAGCJAjDKpA3wWgi02ED3+23juChYcuyFbPPkAIoNb3kY3mKsuL506/995PfeBjtRR7gs8dbr/rL3/XqDn61Ac+89AbH//e737X/+/HfzpXdd3bmTK88bGLV16/c/bUmS9eueVDwQ5RoU9Nyqigki1Uo8ODuYhpFgPIIlVVOKam6RezRgyWXSw8k0MRrUJV1L5plsG5ybTaX85CHarCL5rOF76qyy5Z7mNVeBFT0Hp03IBGeLdyHUgUFEABxEy/hJ4e5AcwEzNDlSHthASgwwTC7uaazPAYcQ5EhkOdOsJwPnXQVBAJaRAtB8+LgQ0AURlCw3dFE0IEMDFVNQMIiApAjB7BMaKxA+1EegFBcEPfF+GwDh9W72CgSGogx7crCB6rIDjMgMmMlIiigSEEBkQTtdxqE7kXdozIUJnmdGyosbuSCiI5hvGoLEKJQCbWt7Gux8TggY6ZPIhEiIPXamjwGqQadmTH+TsDO7bqwBC8GyjdqBoRmdBmyz3nSxRMWRSABhOyiakeO3PwuDNusFPZsetnuApfEoWMENRgqMBWOOaC33VzHQtDg7sH2B3Dg9RIjAFVwUyZSU0GAc5MnR7XTRMhMWUYQm9YupBAxUDBTAQRCe1YpDM7TsoZKpgBih7b2eBYZzymmQ/KthIMlqnjwxsODYEQQBWHPrVBk0Jkw+PfEBJxtqHLRsE0A5JBMkVBQHRIbNndjSMAAPuwtjZZ26iB9dR9pzPJ6sSdPnlivH4mW23NTtL92B1CGYFU2KrpFJCZK1cGBp8SOUQikyShCDHakePioYf7Bx6UN767u365+fwn3e3b464petk6OoQonnBMbuvKweE+8biiqmyCtX3XiYB1o5XRzqLxjCRJk/gVKHyfE3YZ2CEVISpq4dOZe8Lqub0XPlooODIAlCyoCoZApgDsg5p0XQfgQBUzoMPxuOwXSSOtXXhoe/emylI0uXERwec+elHthMDYUAW48FxQt+iAiNQcsLaO109oidOH78+zw51nnz77hkfTnrQx1y7sX7qdtUnCdcHJY2yavc980od2MilKq1+7dPXx6vzW/tF8EcpiakfN5V/4qbevNsVnZtt39tut/aMumquDrOqBLlPtgOeXZ8JxsUjm3dWdw7Z3ytgL9UfZB5ierG4cNtfm8XPivvy7vvu0X6uvvHTn+vV5L12fNUqGyMQaeOPMibzs9mezbNq1M+l8FKGiykB1WaYkXkyTgOVJWbznvgdfv7lz5eA2mXOBC6KG6PZO61aqnMiXBbewsjF+4k2Pgyte3T689isfo3axMaZlns0Pu1yMntm/meZLXxU7B50XkOBE3H1nTvRrvsuxGk/nB/OF9KvTyaLtiqLMZoJaFuOuNeua1bXq4sMn9+4cjnwo3KozgN5OjgOWMOGyi3Lz4Oh2NxtP/Zvuvc9F/9KVm5974cZRq6tel4eLzQnbcnHAvd+oCy0l52s39w8bXVJx4czZkx75aPv05ukrt67dfeACALQpO3KhcI7Yq0qfJJkQiIlRAlIgELNW+ka1RQjBOSIQi6aKlAGCARGgmjCpcVRMClkABBEpq2YADj4RK4B1HZI6BDFFcA4ZxCSLphy7mABGoxJAEYGYk0kZHDrsmx4yQ6ScMAOmeWQESRmVGJwpqFmShEiEoKoiBmZGAEQgCmCErFEBMQMiWTYVUTYgADJIYkToCByyJDMVM1DLbCgxx2zJ1BPlAT4HJHcfncddkEiDzTIfrykUAYiHQYsCwZCmJUAbwtGOXPBIbohas3OD49J5P8SVzcwVhRkwe3LsfEAwJmbHQ2uS50CAyN7QCNhQCZl8YVwgQW57AkQaOr0cIRNxAFKnaiogxN47n3PPzgOiqaKi9jqdTE+dO98s0nz/SFQdO9GsAgpkRNmsBGA1ycZg3ayrx4WfsKjlKN0i5c60FxNAZpFMwwsJsc+iCIAsSs5TUTjnnLGaA5XekVMxQm+JD7uUYj6cd+SIswEhOBouBkTxJTsCzUlQk+cMDDEzu7Is2i56oCYlyIpKYKDDfAJNVMmRGaCqGoooAqEhIskfBtCQwTRLMvCOekumnFMUkKSZAqu3ovBae0RXFWW7XAiG3qCzPmMsPHKGIjiXjbJaD44AA5eMCH6IXHtCj+Sd71IqkBCok6zMphKTJCBTK1VTFHaeHYaCiULtIbZtVIsAR83h3rVmU3S0kcuVtaIuOGtd1GiYNBP4slwhKQ0FnKCrXbUsyg3pmq7vA7v5zk4Ziju3rlmvSrYEWTt1fjxdefraa2dOrW/W05uv3ygmxfrZe7/5nX/8n/zTn4pNXxXFR1+9+fyd3Yv3nv+q7/n2gzu3X/mDT1ldXPVr3/aXfvCh9TD/2Gc+/49+fEVjBWitldNRHJcHK6OVd36F3PvG9SffXE9WZ5ee3/kv/56/+NI9XW5i7gzUcRtTHbwz2PrUB048dObUo2/rD+ZxPlOVCxfPfuS1197xlvtw2d589YDNJUvJ7Hawh//yd64+eGb/Yy/MPvTxsWidzXIPiNlxbyaMSAiCC6/h3k04fZ5gsrh2ueqWkjVFQwu6SOPVqjk6Wj1VN+WSHnyyDxdpkbp8ddFtSV9QmBYrVSmyvHrt1ec/f+b+96xNN/rFnZ1bN8ZrZ86eefCTn/zinds36iuXysn0xhef+vAH3g/ae4CmTZ4orI4FMSdt+xiFemFjx+SBkQiICzTJImCASINpEnHAdSVzmVyUGDUfpXhQMvcJ6hCAmZidckBLKWvM5DIISpYkhsgpa6akJgaoTGADhRLUYNlGZijYJqWvytozgGMqCsmSmzklRmaku0Uf73hf3P/Q3kufd9/81vB9f9HtdeXea93vfihIZ9H8lBfSRRHAwvoUIb/rL/wPa9/7fdegN+hOs7txcH0dIfWSibgIJqgmRsWyPrHyXf/P6Sp+/F/+43ceHeHGqedvbz96/xue+N4faf73vzNOIrnDwBA4S+8Jib22cRSVy5Xl0S46M8Uq1OPqxHKZWCWXBaSEdWVQzH7l1+Jrv3/qhStVPVpAbr0dzNqC0ihgcpzFIEPamL5+7/iNf/PPx9/6ta1//EPVBx87+33fvTN917zJ+1+8fO7kxvr5tddbXs7mCWb3P37yxmfkTd/5l57Lox23mu9fWxi8dHvnoYP9FYZuNg9lkUSXKXsmFR2PC0IrtHKhvrN9eHDY+LqqAE6vrETtE4Seebdb1AFX64DZ9vtmbW1lnV2zXMQqgEFRFUtto8aUImLJobq+vxUZrc9VXc6bNjZzD3lU+B6LpdrYY7tozVHsE5kLFKqi3oltn3siVgNBcAVPV0Kcayh9btJ9a8XE52u35svWbfWoKOAwA2SEE6P1iw+cfeqZ50a+HnsbT/zFCyd7ycuYxuvri2XrBPqlCIeq8tu7e0owCrRacQU0axsffCiG2TdQ8C6EUNAk1GfLemdvmZdpbVI3Ue8cyMtbhw9fnGxu1LtHR7iMZ8Yjy1Y4X2Qktcq5q/uLcRFOVCHN+zUsHjw37btFN2uqaqzer05CXHRzg1mnXdZFE1WVHC9VxlU5oeKo62POHMIkFJLyzf2uICtFp1W5GSabLp5flzanRdcfdjxrtddUBj8qeEy0WCYp7I9IRQS+6ywlAaOkKATOkWYxMR/IVCejyme/N+/QQE3I0BCNcbHMaX/vTQ+df8t9537qX/z+13zZW37uf/4Pj55+vGyujq/fuf+7vvloo6i//u3t0aXZ527UAduYHXtEJ6g45soVrpPlQbP/0rXVb5+Cat1t51/5uZv/509OrvewMYEve5z7A7h2C7ICEZADw9z2rvRgPQwbSiDIhuAAPMx76LKjKA7JE1QBAFEVIIMYiN7twh5iVgTgoDO/vQOGlnpVgECGYoGEjdhbyhITFQwOySGZWYqQMhpF4VAwEaWkzDhYTwD4OB1igKCCagbkWCwzkXY9knFRaBQuprB5P/MqBh9WpkuFVQjjNtmzz42OjnzuhiI26KMjNGUwHgqoKCG6Y/O2dBHBBfIWE9WeJ4XNl1g5Ilz5+GcXV79oRREXRhm95n4xY7PVVgARAMExEEASIARiYAJiQAAQQAJC8AxqgBQ7CSnBSgGdABFElS/eGBpUAZgnHgKkiN4XAnTUzsuLp+sf+5+/APFN3/S9N37xV155/yfGDzxyfX771q1b7p6Ne9/zti9evrrbpVmKZ8oSIj778jYpX262EGC57ApyUVSSpi6bp6MUz1y80B0chNFIs+auX0bFqiiR9/Ybdh4czlNKptpbztoEaEBzzrUDU20tBSoOjpaImCMCpwxkYIfzdkhXHhzmP5SKAJBITU1NhnC/DsoPGCAQ2tAVc4yOhoGPY2Z4DJ4ZeNV4HHJiQNahgkrNCMHIdEj72bEUNDhr8K7JDXDY2yIMOcXBIIOYwBwYGGSDDEoIQORBwMg5QoMia0/QGgoAmjEROEBFAVABYFAAgaGDHsCMCQmAB3IoAzIiQTIFZEXIZr2qIJJxKxgNJakjLRwTgGUVAR264AfwgeNegUJV1RPN0C8XtL7BzGrg2IEN7Gw+VjqQB7mM73agDTYZg8HrM2g8dBypAiVEVejaVro+UJE0w9AEF1yM+biDzo6jYDA4ce5Smr7EB8fjnJcxMaHx8AMJUY+B02p3lRmzY8ai0RC9M0JBDUhgmoVIkUCYABBUTI/b24CYDM0QPbGC+sH4BDhIWYMHyx3TtAGBhourCEO+EfD4dhAd9CICsCGWMehYxxo0DLcZHp8zIjBjPA7BJQM0EzU1EMNsZkAZjEQHH5NzTs2YUAFyVnRDyaF9aZfsDAPSdHX1zD33n7n/3AuXL2+cunf97H2IjG2rbkHgC6cIaJbKcmTSG6gPK4BFVmZ0AEyUfe3BMBSewFhVjcN999DFB+Tik/LKs8sXn1re2QoHh0BsvS2vHNTGoShN02y3DZXLvUhWV2Gbkna9K9yo5HRio8+5390foUsxNksoKgIHmuPpk2cPl4c5JSpCyskHp0Si6tDQmWUT0aLgnHIftfZlzEaqEJOKdpgO2OaOywX4bOR6togZIOp45I1Mk2Sysqqag6amKoMAGfqqPnVqb2tmKyeK8UM728/O3Oilzz/zvq/4qt3PtcXOUR2ibdLGW9609eJVPn/GV6P9F27ef199ePVynVfGzl/6zG+6/ua86xprl7Ht7lzDzh9t7zUHqWkjeBfbyLeuJQJXjqLofC/3/QEAiWKfoyFrtj4RhBFOqi80B7fSeOdg8jV/6luvX72zffulWXtwuLe7uTq97+TZRZ9Xp6OrN29fv3G7posXzp5GMOHxxoOPPPfMF017cCFFiSkXQCdG475UzeoJP/X8M33XGvNkvXj7m+7/9Befzlop23zehqKqRqOV9ZW19dGNSy+8fn2XpFqf1KWT1uX9nf0yFNnnmVrN6Lt+NOYu297ebG/R3HPmHGHev30DyiLFDGgHy0zgFjEhkUMyVSJ7+OKpvo9uRuvFyvVbe2VZTif10e72xdOneeze/o7HPvrxl06trvYePvvs1SbCWx568G3vedMcq9/74KcWXXdife3ihZNHfZ6n/v6z91y6dm3e9U3wvQczaHK7QMYU9rb3KWUXaPPs5pWX9wEAkRSwjdmRmEgbRTIgKRM20qtj57TP2uYYAYRJEXtGRVBgYe/NdFDJDVQhi0rMqcsmAEh9ljI4rh1NavUhIzhPMeU2J1VhI7U+Cfd9Tp2AIjF1lAnkblU8qOS2SQU5AL9oct9rVmBA53Cw6WhWNDIgUSXHwzuMABzRccp1wPYbtH3PVeVL78QcYmp7NhQxRUiqiORUhmx/FrGURzUDoEY0A0VOZmKas4BCHqK+PNR7DjL+AN8HFSNGx6Dp+F2JRPClQYaYuQE8NMzayHvHTGbAYEjIzjFCtuRcGBQuYkZiImJmz0FFkJSd9+xFIjsWTegcqjGVRIwmGtDQVCX3KZRlcKyi5EoyTdI5dGaURZkdASGyibOsDDgu65MnTt2Bg7bp+2wGBqbk2DuPDlUzA1EyQnPkc6dLaXLXNNI3i6jZqWjKwMgysNMIkkRzEMoiqrJzkrVgZsDesChHNK6YNXdLsiRqfZ/6NpEHq33lAidrY+66nE2DIwnEowAkhOaQUtSsal1GywzkyYnXrC4Nwy01BUU1YMgIqMaMOJCiDEQEANXsS66ic2dPxuXCxM9ji9A7BiFUj/OckRmCS6RcVlQWTnyKYCLIuuzb7CrjuuAqgJYFrE1H/e6inXcScx1c8AxZHLvSB7JcKmgU9kFNO5E+JxzUTTIFNUQBQEdiysB9H9W0Ynjwwqnnb2/tLDvXYVmRXLsxbdrNc2eXHL2spdCvrW4wCBoAOUZ0jlESZuMqWJh2YeEd9t2yW+xp18a2AeTSh1LtYOtqu8W5y83+7suAJ06dzuXGzd3DL3YvfuPf/Fuf/OBvXH7mk2949+MXzjy2tTP7lc+9yHD0lm99X2Hw7W/4Mq5OnyrC2a/9lqNXLm/9xi+PmclsL/nZxtpb/97/zGfuEx1HcJBy6SqE7OOCqDjoZUFYl+xK7dq8fnK0vPVq+MTv+MmpqGF8ZvVgdoCbZ97xrd9x/TO/f9Fj0/YxAnmII33zD/zZtW/8k1UxLlafrEdy+PQX+ltHtS+lR3KOg5Vk/ayL6/UbfvC/1ze+t7zwqFfQ7Wef/7mf7Z9+yqHEpZRJ2mXTJTs6yBe/81s23v6VLx+99NFf+28ro7VG85u+7BvOXVjbvXp1fvvSYv762oV7au937zwHaevkPeX2nn70uY9Nz7jR6Oblp3/nqU8/f+35F6RvU9ImS1UGR5RjkjZRjOgLEdIojhwQ4HG2lAgcoxpZzklzZkNVKYJzFDK4lGLXNSntelQAIsBjWROIAJicoEPQnCKyJxeG/hEBGxonCVnMCKFyhKaAEIGXYkXbj9uln+37elSUFZe+l9LXY7OEbW93paLVd79z9urtyWt/sP7AWdndiqhnv//ryp3r/WdfXHZmle9yrEarfSvZQb05uf7qx6Y3v+7M2lir8Xon9c2jsmUTo0AKRkUJfePZloXQ255sT9DaM1+x8/RrG6fvPe/c7ic+s/yD31+jCEXgMI4WiZBU2qMlmCu9t65nWzgyhTwaj/dmuStYVVesd0ly03Y9bpw50/3q72z4FttiNp/PnODXvnP9G75l64Of6n/jF1YI/DgsVfefePK+H/2RK/uL6dXFg+sX9YXZ7X/8S4enf29lWS+ee50v1Nev3+4fG5177M/svf7i7qUr59fKqx/9jfG3fX9818W3ffXf+alv+Kv0yNeNz9zb9g1I8MHZrG1y9KJnN1cP5/1C4saZlbBS5OXRaFSgCahSzmPkerp2s9srCpKEh8u+dNyhXDvcXnPThy88fOtg+zDOm6MZYtIQTOnE5qm11ZXDwzvjUZld2mtm51dWqcggbuy0b/sFuNjziY2zB/O5qSZTlQx5UZQlRw/IGdEIpfCtp3It3Llz4+LpMyW221stcLEkWaJ4wozqjRPYKwfX1lbdyfVJXiY77E5urhKpR3dru712azZa5cLrpFq7tTuLtnSOS+IROyc8j/2iyStrq2FS3tneAjFi7tXmu8vyzMnl3NCP9md9wSX66Eewn9sbR/zAyUpmXTUaSe7OjELti0DuaDabOHdmpe5ydiVPFCZMqW/2523TZ1GqWM9ONo80ac7lan2r7TPZmH0dvE9pUpXjsjqEFNBz4fcXC40auDy1Pkb2WaDrsyMaI64QTVRPjevtppub7bVxFvPmyRWCWFj8o66ibC64UV0dLhbSJ2Wnjlg1aXbk69KtjQrAle39G2TmBgwNIgBmCteux9pu/Hd/4o3/4Efe+btv+br//Rv/JN7+/1P139GWbVd5L9rDGGOGFXauXCdHSUcRiWhAJCf8wBiME/a9GNv42uZh44cD4Hsx5jq9i7ONsbGfwTYOFyeiAAllJB3p6Egn6ORTVafSrp1WnGGM0Xt/f8xdus1/VbW1296rrbXmGnOMr3/f79t/8QM3V9euPbKZ3p/Onbtw4YEf+P6rf+gH7+ms4NysUqtQjX0vIJ161Z1RceULnx8rNByjtB/86Gff+tZvGF1o8Nkn4SNfWI62+0fud3euws3jzZoggGNWYlKAgoQcGHJKaALBARBAAo3QqUGFTOC9aYQsSAzOnboGdPCCIBBBFsw9FAUEDhQyYZYIiGhqKSMAMhoYMZuoZRmU8BjKUTnujg/YkWdgZwaYRcEjO4YsoALMpoY0ODRYh7EaKOVELpAwdNau2ujzpB6T+en6mD/x6cUHPrLRKgImEU0akBmRPCC5LAkRuChyL4akqr6qIBpyAa7uITmJVCKwQQ/FUmi1BD8HAegNTEPJwAnIn0pmYKACRGAC7EAU9G6FOAHkdGr2YHOMwAQxAjLkBMVQKm4mSc1SFu9CFlk3aXCe9USTi+dOnv7EHTvGC7x9f6rq1cEXXn/j1snmg/et7xzGxeLifbvnAp7Dan2nM6bXXr0aykBF8N5lzYRoCL50AlBjrTGvZnMwMZPBPpJBhI1Kh+SX6wVhBoKulxR7YKEkZnAwX3vnCu9ibNk7MysQY59ilroMauJCyGruLoJITzUCMMPhsK1fpBANg9DT3NbAc4LBtaFgjEADjhpg0IoGgswXoTgKSEgIcurhO00qDT32ioPDhRCJbOirNxj22HjKn1Y17FImIwMCRCSjUxSEmRmhlYQdQatAjAGRCUWA0RRRDLKpwYB2pqEBenhSNGQEOs1MAQ3RJYAsJlHRk5glEwFSMTXr1QjAO0SAbCCmiARAfUrMQQHJcc7StH3XxWk9JTQiRANRceiRBkz1Xd0NiQZ4Dwx4Ij79EAAA0IhPrUGGmvVkdsJUEgxxPQEgx05YxxujejpatS2eKlcIKkP4zgBo+DwGNefuuPtuFg9B1YaOn7tUaaIvakqK5MBAB7QQG5qgokVMiuDMgzHY4M8iRBBwRIAmZmKDQw8Y1Tl2Q1LjFPkBJoYD6HzozkMwGHxnMMTTDMGGNwoxEKmelridXqenCtTpI2bGAxncDMyyWjYDMM8kqp2hkhEhM4GZczzocGIAaMyYRLKqwhep1iDaPfbI4+/5ykenZ8/O2+bRy+cub58hcn0yzdF7NnaTzbFK50Pwxdj7sWYuXUFcARdqMUk2MHTOgJJACCNMUggTVUk8XnisPP9Ae/khufZi88xn4/7Nfn9ZFz7l6K0vKTNZn7NljVEzg6L5IqxBXl+nB3/nd9Wunf3Xfx6U+ijqA3nq+15zPvzk+0LpiwDC5Cq/7lsXPFYk7GLqimmFwov1Ep1WmxNbq6iFUUixzdaWGwh4p5qWOWulZe5XAFa54EduBUm0qzaDOe5HWE93pZPF8iiJah/98f6MuUnHL3zk59VVC4b0lgeeoe7OFN7x5e9aXn8x0jpsnH3u+pMPvvv30dnxFz79U9t0BqtRXFmeFocvfyb3a8qU44qCKmqcLTvlLAyFVzPnLJ3cSUAGEmPum5S6FKYeybeWegJfFzdX6XbXzPt1LkJ5/tKFex564fVnm5M7+28cQhkKx2owWy/3zu7t3zlyTt/+5nso8urgYIrkyo3ZGyeXN89mFD+dztbrW9evXTy7uzfdeu7K1VVMkrLz5EYBgUaj+hNfeD4qRVHHOjEcO1gdHkZNt67m2MaK3Nb5qRM5ma2OUy8CbSdts+iYOvZj716+catCcuQu7Iz69njdteT9dDwC5pev36icN1HvkIMjg9hmEguat0bV3mTn1WvXC/DT0Ygr/dr3Pnrjyvz6/smzT72w7d2N430N4eGLezeOZ1ff/6nf+cSjD973yCOFt0nhK1+7YGI1+xdf+MKtk3kbtSiLc3UlneTV+uWTxTsff/PRwZ1QTPqutbtQfGLKYqiGRrlPqY8mEBV7Ri5cB8YKSa3PCgBBIQzRLMYhSCVmTODJkqhkSzFZUhBBQ0CsCheKoM4ItMJsfS8hCJihI+AB+pO6zjJaMgJgxtxJipmZgwPvUEFNQQZHiAmYIqAqqDEjwYAvRjBkA1IZ0lFGRJqFYGC7mWdSlWGdpaH4nHiIrBqC6kDfVyRvZiIJhjlDGuQOK5gUzDP2KXY5EjpCBARDIyTFU3LcqalzGKUYMCMA4GCtvttHycSgMuDgmJzzwTmPOHg0MYSCiADNWWAkQ2B2yOx8IIShvhOZFHSg7oExkTckcoEMFSirkGZy2vSr2fr4+u037jn3lvsvPQQ5xyTOccqA7FQUzNB5UwI4Nf6aaBlCHUoDSEnEQHIyEQZyRM5gqE4DsJSlS5ZSJg/K1vVtyoIYENB7AAQTGzobCNAckSNWMJGAjFF8wR7JuWDKYtAs+9RnQgwEXLqo2ZeBQlE7L/N534olIufqkgiid1yVntRQYbVeU5ICHZErCyeSSscWk6AJGDKKiCkakgzO1sForHcdq6fTJACA6cYmbUxZ0/HiRCUragviAjIJBhYnwfNamhoqz4gi5CE5qA07RQSD1BReJoG4iZQ0d8LszBQVPZN3xKjOzJn1KRLjICt65xU0OBSTmA3VRIwBAhOYMAKkpAhbI/81b33oY8+8dtBk1eyIcr+a3X5jNTtebk7r0Ti127V3DDAaTxBMsgVy5WgMlg3Aez+IiY6pbRY99Qqs2YKnCTNrPpnNZ8fLTnHVrqerOCrLfDR78ZUX5I2bb7pwZmfv7Fvf9rWLm6/8ws//G2qOX3z9tbw8fv3ib7z66m0qbONtX/Mt3/09ly5PP/wP/vGjFzZ33nJ+44E3LTuZRkDOq1k3cnjunsu3pts8KhaqbtvrKq4lE1A1cmfPTe59aOv288++ePwfpl/9Xtxy6Lg/nOW1NDfaWROjkoIBm+3Vu1/2VXnr8p3F0YHIY3/0O/p7N9ynPt9+4c64BGY8ma3OXt6cPnovvOnx/KZ34D0PHkHRr5vpzoNbX/fbT9a3q+PldjGd3zzEAunCdnjTO042N3/93/8DOjfdu3dnDJtFOrd54fHpmR3Ubn57huPG3Ppg/6mDW9frcrRs9dVXjq+89MZ0s3RypZl3h/vHseljElRjIM0awbDT2vNq1RdFNA4D0dyMRAzN0EwsD6ZwEdUkRKiAzI6cA2DICrGFGL1HZ1YyI2SVyDhghaIm1azIzOwdg3O+7xpEZCZUGNrrA6M3VEmDCC7RIkm7WpcOi0VZF4H8lNmzKwAdOu/kdIR89eqNcRiP643jzz5fftPhUZm20/zqK1fPJVqtU98ZoaeiKApEi4Hc9OrzN370u2lv75En3nvn+cPtp16s0ffe1jGx5xxTSDYqZCT7i//wf2w8+sjlIsJnfvX413+GV/PtLk0WfVmVKUXIzCXn2CmaeHJAQJmcnKQZ1N2U+KBru+/5y6+c2z+Lcfmzv3hu3aAgVjUFKLvODGhUknRNn+L5y5tv/Yq8pP6Xfm7ca2xTdGVx5tHOj+D67fXTL+9iyJhrZtxvheH847tmfUgcrr8SPv2bey9fsU/eiof4yPjBjz1/OOfbTx+80k32Lr7piQ9/9DcYsYtZojqVyqH2WoUqxhyR5zmXElOMsemK4DqVVdPUrtTVypla1pKDLwC1mQTM0Xt0L9y8c3O+zwFHjCZWB5cEysCrdMJBAcyAnJO3vfnRX//IL4wmOwZ5WuPtxWGKW5sRSCyAJkdtt5yOtkDZiSFTr01d+jb2BZSKsru9UZewXmdfT27enB1nVU+l92wBohTOrZoUo13YGu2fzHbKcrpRZQLweGs1ayRpIh/K1+7ciSkRYFmX2qaUJVTEwU0YZ/PDkOvScdIoos2sD87t3zqcbmxt7W6X9bgo7MaVa3m9puCPD5dF7FwOOaWc7OzGBMgmpTMNrnDaxzsCd1bdY9OqSkI+FD6I4Rb5s+PSd10AbPtWHVZA7NAn8Wbnaj9fL+axJzAADRm9o9IFEws512UVNHsGNGCgwjnneNn1e1UYq7LYPObZrPFm07r8n6Uiwa5TyGqILsA4+Atnd7tZM1sxO7AuGqe989vFyEufKA+OYlURIoreP3Nt/8y5q1/ybe/+wEc+8fGzv3NjY9KcvGv79Rv/+Yf+8mM/+6vrQuCRdz385d/Yf+DXJcXJ7s58GUm1ENe2q4pdGPmtz/3Gyd/+vnPf8O5XFssnfvTvHIc4ObmRf+c3hwYPf/tX3/9jfw0W1+789L89/Fe/sKXKZuRczua2S+fHcnBESBAMYN0WapFqj1yU0Iu0ynkIcjCIAiIwAwCIQCAggpwBC1AHgqCoao44d50bVcpIgswABqmP6IQRkR1kVDfhsw8sju5My43VfF2NnamAqS/KNKB/grcEFCo00xgdksYIDOA9FxWs16B9bm+lzz3L7/xD/e4EE2xhzx/+tcXP/dft3rKowLAbY+dZpJGgsfAoviLMfXSjCmIWMfYsKZsoITjvcrsq6lJXDSFRAYAOlu1pcy0AoAAjYAZHwAFEQBA8gzF0BsCQBByBCgCAG3qxFGTg8SIYQdODZ0hJA/K0Bo1OvZyYS+YCdmJZtQWi+aL5+Be24tbzd15Zwej3/fnf/99++RPiJ5H10UffdO+587/1wQ9wlrc9eDEdWY8JvG1v7Y5G03P3nHnhteuruCiYs1LXZxQtGMdod9q+CC6UQUbWzlZAcHC8GpWV9Muu79hZF7MYEXDb5lUrBmhJiDRQOx1V7FlNvPOW8igUXdchghSFDYjqU6kIUE/NZzrYUADADM0AkAaTPZjdTTsBGCMwGDswNDYQMBwIxwoDbBkRB6c9DMggMCJAAgVQRbOhIfQu+/iuG8nAkE43wYaGTGLGAKKKRIYkpk4xmyKTmXrmylsrGghB0QEwDtqmOAAlDEZm0JtmwIEhMeCWEIAIjCCrkWIydciigIiEDsQUzDFqVgNLAoBIZB7JMaiaJlMYWlW0XXajUQkgk82Nbr1ezE8293bI2dB2b2YEjEMyDoFOLVynJOYvEgTBhlG3EtBQxQNoorBo1pLUOw8qCENlGBGa9zwe1UVZLFTpFL4NQAQ68DIBYThADJ4xBjMbWrEQkckAxAztFPatQ/McDJa7ITqHCmYIHsF7yo2weVEQAYNcO/IEDMCiwVHpUQ0zWDRDAEdABIMgXaNmhCxDZozBdIiVnWYDFYjQMQ0RNGJkwMHPhAjMpHCXuX23mQjMGFBh8ETBcJ5UNQFVRBN1CGiYwNQgGGYDb5jVFMBs4NWDmBpAMlVVO41Qwvn7Ro+949zlB/cOOnTe7V287KvpqicONGbo++MsfRm8mREH5iAqZVUyKjtFyDEns2gUEnbOlVU5BnWuGllmxELUEIg4FPfc7y+dzfdc1NWhNMt4cjh7/dWwPFkcHHPMtM6EUE2LXqgRyKtYBDZXjV3I6xdmCyHGonBqMF/GwKyeQTMJQ3Aq2qSkyIAAziA2hJgsJ8lhVBwtV2E6albHwM55bLNW25P1elGkNZoaW58y25AY1j7UR5cebUeloFjfudjmRTc+f+7FGh56z9uoXd++dujG41tH+NiXvGOyMX76Y79RBn3okYdf/40PerM6t3p01D798fscnFm9ARfuu2e3Xr5xvBFt1cbsQfp1EyO4qoPsLBOjmrUGCcExYdaQlfoOu9j0kpXAh2TQSZy3nZblkUkrcKfwJ6UrN6f3PvDAG7cPj+/c3PAKoA+/8/E3rt9ZHh7fPtzv21y9cj3G7KsgV2VUVBWjxFRNO1WtSmpiXh6uynG1u719slrfODhsU+qieOeXfRxPq93NSRewj0yFm7ryOB1PCw7Bur5frlpfBPa+xFxxuW6OU8xZsQg+IGOWgonIgLBPOZS+l34xbyssmy6qh9Xxoq6qS7s7gdztg7kC5S4Z4ngc9spQlMVyvl40N8ajScEjMepPFnETD45bX0zM01d+ydueevKpl669fvHyhUcef+tLL966dufk2q3ffNODF1a9NCDXblzfPrPbzNZ904xLp46aZSotx3VjIDtbk+P+6LhZd2Cjyt+8fnv4FrR9YmAy62NKXW/ZNGtUXCtl1cI7RBUwMTBAZ4ZZ2LEYqpqpObSs0msG58Q0pRhT6nvNaiGAD1xVmExz34AShYDKkhOIoHLsNUkSMVUjQ0aSKKKiGcgrBRxPiiamvstqkmOSYbsHBEBJFNBQFRGReLBJKpKJZrUhJ02EqlI6hwSIrgwu5+zYiUHKuSrZsqQkngeXomZRNSDEpBrV+l4cAhGSKapkQ0JXeMyqBCiqgzcFEUV1MD86RlWjYeJx2opCqIaENjiP8BS7RsxEjpxDYs/uFPbsHJEHVeLT1szgSiMjA8d+6AJg5BBqEXEefFEgBDGNScl5U2EERTs63j9enaxic3I826j249nzXpjUpRhNzTkvmIjYuVIERDMRAjrn/HhjYzxd0K07fdubqCZhQMtmlAdXp3ceAmaRvssx55yB+NS5i0DqBDyKiZppUkR0RBSYPUMGyTn3xkhQAwMFAlKMKTdtH/ssQj6UJBB7cUWoxjVEdYUH7khJTUWQXMGOXQjS9zn1hIQEBC7GPudMZqRCDFnVAOV0kEUAQICWlZBg+GhgoDbiF6Wi4IP3AUzOjTdEZL6YQ26UurIw9NiqiGhZVM4BmBalcxnUoPZF7tQQm77dqpmcmaT1vEdEM2tVS/al84GZLZfOERCYZhVDGOyymg0APCJmFQHJBgyMKtmKwoGiGt8+PHnswXPf8Xu+5mff92Tf9mgiEROts8SUVu3CH77xoieaVJO9y/dtX7gAjIjouVDzknM1KrPknHtxNRdTX0361Fu3RFYAiX0zGpc5NtJ1dHKwOrgzPnfPved3DpeH58+ce/65V579/Ic+/4sf2duoLhe1lpM7t+7E2M8yTsb14uRg+dKLC9zc+sZv+12Xxk//p7+/cZlwL+wfHUU3Pnv/o+PtmlJCqs8+/JVvfPgjWygF85ScNzlp80Lw5Bjuo53u1gvr/c9ff/3FB770re/59m/m8/X7n/qgLpqRY1fREbXb7zj/6Nd96Z3jw837VqGgC/c95M+ML/2eR+Ctz77yMz9z/NTz1EYr3DLwsWl368bn/t5fu/zbfscT7/3OzfP3vPbUJ1cf/1i1WFcg5WS8mKxuzk9G5x+D+x8Lk/N7j6duVE19U/TBbZypt0fLfnb9teef+fwnty9tHR29fv9jO9u7G8fr7vNfeOnK61f7tl2cHJegzaLru94D5SzBESAwApjFlEFSQdTHNlsHyMCeyIuhsnlkAu3VFCWljKo5ycg70YhSZhTNhlnJRCUpsUIERRUCGArLoqaIkM0MtGUqChfIFyjKTIIQuHDOlUQOVJJ1MTKCAfWZjk56RiI+dExhLOzGzODYO+dJTllF093x6JF7r/6CzK4dv/PSg2n5QvGpV+6NsEkuWs6oseulmUNZK4Kpm/Sb5Ws+XIXDj/2aJCz6JqeeghXiObgMEEpf7Ey73G4frdMnX+8drK9cgaJyokLOFQ5CWajmWe+zz90KS/LsHFMXT14Z0cP/4Iebo5OTv/OPdnbD9rt2/ePnte3gky/Of/19G/VE/EYuagqLuFpg7qVZb4X89L/9qZ377t2+uYDJntfkESrMN//H/689+dDGfHV+Y6M5kJ4VRUNRNCdHNjIuHIKdXS/0n/083ToKMyAqn/pn/3TvO//IN/y+h//BT/+ojcaPvf2ej/zcLxthjuYrTmZk+vCli5ulv3W4nvX5/PnNxaI9Wq5KX5QMqLY52jKlLibPiL0ptJ5c069Hk/E48HLRcEHjcRGTKGI5nh6d9Du+3iknV26+zgRdVqi8qHv6tWemO5dWba4mvJ6tzm7udofMmgqnbZKCq2pUUxZWZcau7dnhBpfToiqYmnVbOT5ZnlDmZHq0XlMxKkxqxYyhkUYEBN3VwztWTe7cnl+6vHf+3NbB0VyV7ixm2awTudG19djbMnWClKBPCSFt1hNwEGM7qaomabfuEQ0LGk9CwaFdC4dyvLfn+65igzPb2rbKAgrz4+WDZ84vm/mkrGPWPqtZCszBhU2j/WU/c3wjwmZvVYqxy4Xz0zL4lIrgcquQy1UDLeukKAuGkvD28XKRMjPXBQDEYuwmdeHaNK3Ht5dtn+CerQl5UqKUzCwDsSOHfVtXrpqGgwWcIMxma+71f5KKZsseESry42pDuqWs9EZ3EAgKRgRVlBCsHCEWaOIsRzBzDkVQ1NAIC/roky+8+S1Hf/zv/plv+4s/+qEPvO+Jt1Wzf/NT9zz73+c/8/MPfdc7mw8/1z358lae6jm54+rVSTSHvOy2Nja8AxjZudzDf/jp9LP/aDypN37gB4+f+B1HFTUCu6Xd/6f/wh1/8XCyd+FHHu9/7cN6Z8aqkBIYaQZidVUFkqCk62fP3HzP28czLT78Ww/mEyDjcDdjhQHIgUbICRCACSQDCDACG5QMYCBK2QC1CCK5BzV0BQKKKjsPoGaKaFa6vqir+x8ot+r5G69uXb48PzgYjTA1DSKTdBo7V46isQzeAEDLBlFcaVJtRe+L9RIwO7T00V+p763hoYfsjcOjD31448Xnd9QjB6SGPFlKaFlTpjEc3Ve7b/xarnZOPvF0+foVp6jHRl2ELMwOxGTdIRdUbeS61iYG7WBgkHsEVvAMBqDZBDHwqdDBDHkg/SJ4A8xQIqCAKOTBSiPgDPoIhiAGzoHHU9uWGmQDZnZUFAA5LY906+u/5KkPfBoA7r1Q75yrt2n6Kx+8ujqzffCZW1dmqZpCsXHGWnrlxVeuHB+Mp3uvvnJnfrQ6OFxsbk8zUuv8nOt7H3m0GtOvvO99W+O9LKKqwNB168AuhIAmblzdOD7RnJGCWC4KToJdil3KnoNnbttU1sHAHHOflYiaqNIm57HvGkJwZYxJ1GwW+0nt6lH4olQ0EIXFIJuJgaqpnprPBt1hOE8P0Gke4mg8kHVggEmLmWYYoFoDEmAw7Ay74ayn0cS74Gozuvsn71KrB11vEFIGLcXMxCAPrmwwBR2MIELmAImGQSs5lqDATE7NVMGADICg8IiGaMBivVkEGFrbCFEHg4kZwfCn0FQRrQjgzbJimywOFWxgScGUTC2p8lCJZqaAgzGobxMCOh9C4XP0TbtMMZVF6ZhUBkONIqCjU4g1Ag/z4IF0oTBUtCkOe7pTbI+CQdK86k5cERAwaa+mhI6cI82gVpDz5AaNzczMhgn24DA6jV0MTCUbInBqcFo3P2TACMHuakQ0uAqIwBGKqZoakqGeorCNQIWAhzNPNvCIDsk7YzNHRkyiaEkQwfFg4AJiZPKi0oMO+3WzQZaHUyvT6cQdAIyIHA+hOpC72bDhclIDBjQbInGoAGQoMLySIeaIoiYGjkgNPJOaOQA0FUFRAEUj8wSeEBDlFLiLQwcQQAaAy2++7/53vru1GsuwtbGTzTcxOXYO+l73BWKotnJ0liMCCXh0lQJFAe8gWxIECi5rLMKGgUcYoxLRCAhSH6uicKjSp7Y3Q2dbF/PWmeSolUTvWZJrDp/8rB3cthevlF0XYnJRUXw0CF6Q7MP/6f87KdLFe7yuwABS25dTz0bLvufAOfeiuSyK1tSCz86h49Q147qG4Jftmp2b3v9oEzhNOmt0tlxWwQGhuWo+79pujTjqY1eRoLemldvWb/2//upirHF5rX/9hfWNV776O3/vy899dqOEw/WxHPbrLj76wMXFG083H/mNw5s3dgtOffzYh5/eDdUzn/iE05QTH3zhqCzd9Y/9sj2tYaUNjeZtAoQYrdWs7Lo+C7qsSAkkQQIEQi9mCblXWfWhIPC2ijkSHXPKjg5acuVUzp2tppu46qcWTw6bT794dVIU9UZZ726Ynn3l9tHN/TZ3oKH0QYUse+qSoWGOsQ/eObd/+8QzBhBwUIxGLsq1W7frUZg1rREnAUXJJlbQ7aat+1zUI0WaL5vEXgH3lx0hclEhUVatXPHA+ft/69ZNhwSE29MRxJxz2+cUPHc5ZUxFUbddm1BRQQvXSlKAoDpC37f99mao6xIyJNTtzXp2sJx3FiZhMY/3bpw7liM3Cg9feOzKzVdaVzSSm3lLL1zf2nz4kXrzZLacz/qt8WixWLlQPn31oM1w8eK0nEyc93Xtey3KMSB55HXfS495WjGCOXJt3xeV25pW/bI5dRURIlCKCYmUSUmROQMYUhJNKESKjDoEcskSKWEGJEAUzYN1gh2DQtbc9qnpYp8VyMUIShlQgZQdsGMAkCyeKKMwQdJExkm076UIFQCJZSMAspQFsl904nxBDBJz1syBEcSSIhgagujQGaAqBpSGeJcqATGRqXoi75yq4tDQntUxx6YzNUWrqqAgYkrIQ7C3j+IcDzC9wIQGYkAIpAopkg/gXPBoOaeBsYcDZk2JiZlNdEgIqxoxGZiKcqCicGUoUpZm3YnoUFEUnGd2RG64X7J3NpSNMZkN7din0GtgZHbsClNjdI5QzJidSK9RHQE659gRDwkVlLQ+OLo5a5dGWFXl/uG1++5/ZOqnNtxQLXc5BR8QiMmBmoEy+Rw7DiULiGGzalIfCdETOaCsGEWiaRUYTUVNEJSUPYoKOiVDRMrJwNQYQ80GkDvDZKLCA806myYFY8/UrPpQ+RrBBRezhXLcrJty5J3zFnMjRIhkQKq5SZiNkAiRvKeydMF1SREYLDs05xAkG0BOqmKI6JiHSUrMOtR0goCoqRkQGsKwzTjd5ZyeDkAFkKkMlRGqIUSX17bMvRUIzkw0IQvmxKkTGoeyYMyrNVEANAFuclSw3GbsK2dWMOkw/CHOgAJaOTa1rKoAq7ZLCN5R5Ykx9H3EbBVgBOhxIDYSMbKhmXojy7Z/Z1Zu7P6ur/3yz37mKUCgQD64+bwRog4N+yZ37VEZmr5Z5X60sVsVdeEdeed9KSI+jMyYCl/W09SN+9xD6vu4aFdLKsL8eH/drVQtUqirajY/Xq9mphmq8QNPnN9et22THdNjD7/p+deffODc5Wo8vfLK/jhwfY5HO+d2Nnde/PzVS9O37lfhK778TbL5nluvx7Iax7ZlN44xz5LuNwk2povbB8GzOsyxr9E6h4tVc+v67Z2zW5VxeTw/+i+//MknP6iRu2sLUdSoOApXL2898Rf/ytL0l//pT3zbuW1fXRifubRsw9UTt3H2mx75c+/8wJ/4fefHum7am6+cjB+/5+v/l/8tPvnZJ77yy575pf9rqx7DYvb291zwb/+KV3/rY681V3Q3vyEn2xe2btx8dW/a7z3yxFvuf/hikZ/7zJPh7Lkb1692TdvMVxk3yuqhYrz3uVdPDvZfeeGFV66/eqNiLgGtjY619IzqU1YuvSEaoIg4JjNLGTKqmQIwsklMDh37wBB6VVNtDZLmPmePMB5gY8AEmHPftAlyz0gevCdH6BksW46dcB85R5DsUIEITcpQODOKrWIyk0BWBSg8D2XBRJYGTJh3ORuqzmfrPjYIOormpkZDHyK5lE4F0/mNqyfXDrfvv88vZu2/+7nd5StH/+XXpgs36/NBTscXt8+/6c1Xn/tcWN7ZLjcgVwTmEjhbg3RALLWu2gRg5oNY9uyp8HFjtx+hYrbbre+1CC7HdSb2xDlGNNRkfnOkJs4FtRwXPSisx3Tpr/z4tce+Yn3z1nb90+Ojo/nf+6vVu3b3rx1vfGE1Gu/2Ua1p8PA1gkzepEPMrq7Lr3Kj2z/y4+xxWo56kYTkDM+ANR9/uSqojx53RsvVgWLvcwTq0yILoSYjoABaBhctYRid2R41n/7QT/3LH7vu6sk73/LUpz/10rVrodwglyMRFMUo2rnR5JUbr9YF3rc5LsxiBleNui7GmDa53KhG+/ND9t5EN8ejCLlNa+fr5RoJ86KZTR1s1uPj1JqhZd6stu7b3Gj7k+lG0TarsvDH7er8dMN1GHu2hEd3tKaNrWrj0BY5Nrubk4P5+qBLAOjNYt/XIZzd3mu1HQWftOcM81aidRfOjtfrtFz3u9PpskkMIF1SIDVdt33wboHx+vwkFzyZ1u2qJ0Bfh8bSdBq6JEVZhapcrlbjagIA5agOhBG07Xsw62PqFYBkWtXLptvcrLZ3J/PjWFZVu4qjsq48tuWk3tq8vX9UVX7v7Pa8ax2VTK5PaaEyXzf3Tqereb+9Ub3j/N4nZ7PFKgKhuHz/+Y3bh0swyCZ9l/fGIwd41DTrHJNzSawBkKrwRCMfcreqHa9O+sZjt46WVm1WX9m0xAlWRVWAJ+1BxJxj7yozKx1ujUmytj7exdbdlYrQgqPSZTy+s87JRFXJekKzxKhndstlzMXJbFIXh31blC4YxF4CcWdKamAcnT79+YPwc7/6fX/2e//KD//0n/+mb/vdf+b7fu13f3zxd38Bf+3jVz/3ufeAO4L04Pd/Fy+6y+/7+I0Xr5coR6tmfHEyTQ14BlVb4jmOt370/+je86FHfuT/s/yBPz37pV/aPXufwMJthVnhzj+821295YODwpt6E7F1i46hiYvL91/6xz85fuLN0y7mX/yPy7/1Y5M+Q5sgpbut2KeBDVAFGs7iqt7ROAABeG+NYUcZsvPoiLRRS70iqCIQE4ilpCY0CtUk3HrjSrHB5YWNudLGV7yrXRxV0z3ou+bFZ0Mscqfe+2zIwKYdE9HmNK4PRWKeXgjhANIaPFWH8+4f/2vw4nvcgopoCMsLqJgFREI1NW0unT3zD38iPfQYN0zf+Aa88rn+yefjG0fhE8+ExaG1zTCBA7KIzBvnc5fC8U0TwWkN2kNUYANmiEMLFEKKkAw8QxRIBi5BiTmL8wYZwAEway+EABmggFP/g2QwAFYonBfrj/ui8mANoN0+MfnyRyc//hN7//k/f+Gf/Iv9a7PfeXzjky9/7vb85vmHLhwc3e4z5VWanjt7fm/rt178XH1hql07moxfvnFyIlm6flqUs9nxvF1tbxZvufS2Nz/2jlv7R1haVXCX9Hi+RGRSzbEfT+pLO3tt2xy3aT5fbU1GWc2xr5lNDUzHVVl5Tn0UiQ6w8pUPvo26jtHYULRtMhIKIzEUHCmdGk2zGJupDcwFkLvl5QZDbgCHCP+gQtgpaWgovBkO/cZ3ecNDaAgBTRAcgJmo4F06NpABG9ppVewXbR5INLBybAizDVs4syjoEJHIIYBB0oFVhBnMD+AkAx1UFjAwIKaYRAAzIAMwABMgQBgsSShJYCifMEIZesJMASiTZdQCrXJQGc1W2CcWACNxOKTk4LQkz06hQ2SIhOxdVZZm7IPvUpxsjKVb527pN0bMBEQIBCqOw5AEQ0DC09a4gcg0GK9Ah/JcsIG9k1ERT+aHpOYdiRqzS5aYmIi8C53FajweT7cAXzvlEg0dZojDUwyBs8FEo6BESARZDIb5v4GjQa9DBpThPGSmAIKn8CggQCBkiip3Yd6KiAbYGaphAHOMDACOB+kvOJfNIhAPmCEzT0CE5ggFzCCdVkkDMyKe0pbuWtWGGNpAtsaB5a9gYKiidJpTw7v480GBBCLMp9AkxCHheAoqMlAgBAVTZEFTMAJitWyqgApGCPmuoQ0Aftfv/c6tex5vVtcxmwP1zpbL/YJNrGn6fVdwjM6MR5Nd004NskUmZB8sZ09FUUxEzPsJ2RYIWkbQPGT4giscYjDtxUhd1KRY5Ux9D0Y+5mKxbOyer4CLTd54dRWXI13Nr93ERWurVe5ijskH14l1K+VWdsdltVF3OUKSXHBR+9QJEJmX6P0rs27b6m2AejLKjpAhjIJk6VfHReFXx7ONeirOdX2Oq4zMqKiRDMSRB+9TXhcVYtYrv/iTN7UdtyfV/u1JP7965YV43GjWvGg3RlWR8q1PvbyJ4jm4JuXCRQUDXvWSEBNCVkcdeMhODDuRBBmzoosikakzyKRZoBNTV6iC9uoRnUmEqNGKEPpQ9ohJ4Uh1vpa+HuPWVrh4/sZi6QSWr187OWrCRqUdPPbg5SceufjqlSuvv/LqbB3bdRxXJU/HB6t1oLJpumI0KmMejyv2YWt7o6gmImGzDLldNylubW1Bm0fTqRvjrZOT5XLty7A1HhWTzbP33s9utFFNX/3cs2bttfnLTZuDQ1U0QgD25NTycb/+2HMf2BjV0vLtpp3HbDE1lsejenNULterxKNFlwB4GuqH9i48+cqL6AMoLNuU2s40V2MPKW0U1eHJYn/VTorQKKT1+uzG+Mrt11FhXF+6cXx8ZXb10Qfvma3ag6P22VevOSkvX6ofe8ujL732/BNPPPjU8y9ubUyOru17B/VmUPEvXr0+cgSI9+yd/fjL12eLZuyKd77tLa+8/vq6k8ujrbo4RsaT2Xq6MQZoAICV0bBwvs/ZALPqgNZQRAUcDu3OUIbFZWCOoQGKGVg2BnPgsmjK1sR+0SdRy2pmuU/mMuUIPgAFRW/BsVpy7ArCKIKB+l6iWUaEnLjgsg6qktqejYyxzyaoOaskQEVGMwJXcm5zFssCqhgYDSyD2N2aRwWjoTGeABA8D+qPIJqpShYABAIFZOcx9lmG0khziDjkiBE8Dt5IC74AJ1Nfr1OOKSdRtbvwvsEISqBmzEqOQM0xmsCQb7pb+zkoWwOqH4DJmEWVmYmYEByzITJ5Isfs8zCxM0UiHG6EoEwG5AgcEwNkAxDFa2+8RhAeeuCtrgiSkpqhYc5x3cyBIJQFEx4e7N+5c726+EgWATM1NRPnRpoHty8wh5zXamBAMVnbpa5tOKAO5ZQgZISGCtCrRrUsYoxQYOEYM7gQNBsSYc7QKjoiZFBURAFznkWySkQFMla1nA2INARzZdv02iUyqMoQgt/Znca2723dIWcyJhQzZDbBBFCyE+JMVARuVxFjTn00JiLXpkSIBorIxEZmAOAZYYiNG6gqIw/FoWp22qNw9y4EAA7JmzomNfTsuJws1kuCAN5h0HFAUOhTzGmVWRLKNFgIo5iy8yEBEmomcYSggEPsGrBkJISsOrTRZYEmpmQmhokxizkGVAsMruCAsI65Q0p0t30WqQgeyAA5Rdm/dWP7nH7529/69MtX122r5gKFpElyxK6vC685LW/fWszXo629vTNnJ1ubRVXVo5rIZ8iEHtgh0Wg8KUy1jyE1VRXbbjG2sQu7Zj07KSqMJyez5Wy5XC10sbs3mVyquzvLrO4T1z7Zs3ad3ntmxzZOjiGP6rMf/cDTD375Z/buuW/Wnzz+HX/2yWtXt2f7b37n7zYIzL4TmWxWKTZoMxi7jYenGxcvffqzV4uTNCp8lVU0PvuFNx5+/MytO3fSSr3ya0/eiQm3t6tiZ7QZ6Mqxftsf/4EceTQ5//Dl+wuoty89HsM4C144t7ley6c/8vHbh30ESIA5Yr7y6of+xd/OF942OvPg8eEsLp/fPbOTw5QeeNPq8LgK6eVPf/b+b/4D9/3uP/2oFWnR33j9uY//+r8viXeq8zUfXXrs8dvHR+lkPN088/nnXnlj/xOv3jiG2SoYVBnqAiHmUeFKD8RQOYpZo1gCS1GQ2QhyBkVuRBNwzhocAREjcFLKmYCSmABkEQE0gw54FAoBB5BSzqYClp1jJmQkBCNEELOsOhioGUQUKReBEI29p7KEqCDgmbxjIFDCnCCpdSoRIYk4x5lckyT3cjhfZAwjMIAChJjQFad1N9jB8RrrnM7HdvHPfjJr2ibkER4w7Pyx7/jy7/+zsRrBjevv+8t/Ory6b6syGmjQ4JlYo+bYRXOhKHzsM2ZlVqPi1d49/pf+vBvD6qPPHv+nny+6znKbyeeOOGW3hXNdJleAqznnwjOPi7KglrIdpXMnI30VcJZpyZPFun3laC/4ikoVNgDpzFuvueOqpAB9Cn0C7XQMFY9dAutQQZRNmTnPJVOfQnytKu7/3/7U3qOPFWny2b//D6s7B3C82Ls8jct2cXueC3BjeW19dN+3/w5cj/uXry19/Y6v+/qXP//LAQNkoSydZOTwzifeXpa5D7Dq0qXNSVp1ZUGiERyWXJtSQp6Mtn3lOk0H87UfOd48c+f4RC1XSJPdMylB6tvYdYWbto3ce2Z3d1y8drjvxuX8YL413d2gYtuN215z7M6Mp1Xgg5PlyWLdiCZDWDVRBFB6ES7KmCEBBczb02Ja8HIe110Uyd6zQ7/qmr5DLEKWRMyqAJYR0FdlykmDPzhszm3snT13lvKqT3LtxmE2KYkUsG+y2npzMlWFpu0plMxF7LuN6WbTrhXS9uYImtZDcf+Zi4eHt6+vDjYqvzk5R0UBgkmgrDanW7ZM0OZ2ZlSCnyhu1rvzeHIyb1exJ/IXt3dWs/beM5tfMnVfiAezLhnzfpuNeaXGWc+4cUHILm4KlKPyOPONdZxbMoPKIKS2YK59tU7x9nxO5OpQPXDpkshR0/cOEQ2YcBgFIbMqiWjsJLdxMqry7rTr/2dWkYLrM8Q2JSR1TkUMqEvC3gHK7KQvCocdQJcRoSw9GoCoRGUzOT3qBg756fd9Ql6/+d7v/FMvLL5w9B8/8l0//td/9f/8q+trL08gT8a7NtkCNy5e+/Ttqy/d+3f+5Oc/8SLNRjqm6Qc+lA8SCxA4WNN5tp3f+mD3XR/Zvm9j+9xu9zP//Py3/44JeH9zdeMjz+xlD4UzU3ZAqiAJUOC+vfwd39re/7B2lBuHD7756uW3nb16fW91HQoAyIAOJIMqEIL3oAbk+nrbvu6b1l/6lkXw586M9faSnrqy/OQHto7f8BKp8ABOUwbHzGQxKZmrCi3r5c55+vY/2O1uuqNXT155ceOxx65/5De2tza7Gy+fKXrsly5FBxXE7DemUHhZdxA41CFKS2OW20LZKCZQLMXKjKAeQIFFQcmxEUrfF+SYOY/C+Ld/U7P3IHRGq+Orz3yoOlrvbVwsdu7vnnoGXCZnUAL0Yt2yxDbd6ZA81CV2EXhQJxSygRAYwVD+5DwYgGdIQ1YKVliNf+ivXnnj+llYLp951U/qrXddPvjk09vXDml/idNwK2D1+G58+fropdkoCBQFRQH1EGwx1nP/6l/9h5cO3wVb973r6z/Y/qPL77n84//2//5Mjpvn6o88+eRO5c6dO+vVNUfzbM1LN14+6NdnyZ9cPz5fTCabctJHq7gO2C9P2gZfh6c2io1UV6Kpi9E5qwCbYK7m3Y2tmJVNJ2UxO2kppdR3VekK7/uur4vSO1e4QJq7hsrarfrctWl93EaCmNUXDACuYGUQs1BS4bsvbo10sL4YqA20h2EiBzRYbwahZ7DS2BdL1e8qRTwc3hUBQAYVAgc9xojRlGhwuiCIqQAI2ml4aJAjTjWCUwj76YbbABGIRAwJMygiOGaEgViMJTMiD8YwBXBM5cCkAGUCOXW+qKjxaf0YOoShpRgVB60mmxHgcPENWOXKgxs6AxFVYMAODDIMoQ0vHwGYyRFLVlTznjcmI005eKe9Ft4bTZrYTiWFUCE6BM6SAdEQCXiIngEOrxvR8JQnRAQAp5XMYKaWctc0q9pNHJtIOlWBwIhITcj5PmvsIupd5ek0WIdg6ngopEcAIz3tC3PIw+dDg54ESISqKqd9cYAIjhFMgZCYkikhkFFWsGynzw7GSCI6VGkpYCb0hp4JwUDMsvWiBhQYGZSRDBRNCUwACE+rBVWUCR3TXarowBwa3mUQAWAb4qOMaERD+nGY0ttQRk3oAZhIYZgQA6sxnW761YwBkImRwCDb6UgZEUAG3DegAQHq3fFB5WB2+wpx4mqMHFZtQ1zGvBJsySECaFZEEEUCr5JsSI+jIw6IlWlVcqj9riSXMBoqIgVf5pTMJIq1fZfa3ogdBu9ZW0xi0qei60dmx3dmq/bIjLpyc42F3eOxbYKl9mhOlk+OGx/H++tOCvOZ0jq1y7hdVjUCrFvHVo0gn3QAII7W0mwiNFm9RxNDx00jcQmuAMew7JY5ZofA7HPOuc9V5RxA18WuhxwhOatKs2ufnSI2XZb5khV6WYyYIMWowDlDBrLkCwJL5LDL0ooBoSt42acVABfOsVrK1mtVBQKMIoaioAZsCr1oBkyAMQkCegQTyZqQFAMf5jh34UarPY+PwmTv0qVqu24sf/pTnyIwvRank8oT7u1sPHTxwdTE93/gwzdvnyC5IWf0wAPnQnDvPv+EZJOcJKYNx96VYTJSkflsjRja5UnKfV1QOr6dOrm0M0XvR76wi5Z63dna5nJy50QW6zvP7H/atW3fzPp+XXqkoQdSBk1AWNUUnbCadG0sPWeTMPIbBZjlk9UCkroqZBUG7frVy/tX6lHZZAE2z6FkLslXddjZ3nz1yu1VKxhckzS4vF0XpYmrXV3VVYkv37oG0R1fPWa0sk1lKDpsr91a3Di6seGKT3z8mYR25+ZJjYiWX3zx+oWdXV3GlfdR5fiZN5h5GgoCfub515A0eH7t5SuOsFds27y9VQ3fghGXTsgxQMCVNlNQEJNsznsDULShJiwO4j8zMmW03tQxIJkIRE2gaMZdTkmzmSGoGZiaAuYEfYoVOF22OC7r8SSLpmguBC4ROAKqaUIEdKSMxuzQUxJC05i7LmlWFHNgSRIjCbEaxJgNyaEDVWTkQdoWHXRgAiO8u9re9RV69sMSw8wppi4lZ4jEWaWogsUEZlmzAiNxn/KIiABSjIYYgu9iCo4EoGsTINGwnCIwoSJ4xyln50kFk4gNsVsAFU2qORqoSRIDBAdooFkHIcl7jzRw204Ph8xESMgeAAb3FhGfsvxO76RoJs65uqpjmxyaJCFmEFORGDsg41CW5ZQrmM8Pbu6/dvHsZcWxSlJRIiUiJI9ASJA1e18YgolYTqltYtOiCiGIiiEjg+WESMOaiCISUQV6Vg6E5MhL1KRk4ADRcswm2Pc6RKxNTQ0L5webEhmQg+moZqLgHWs9a2ejgifjUHiOHZSVXzctE4fKo2MkIRv6ITLErIQZQXJuF50pmmVgRbPAiIZ62p4KA5JveHa4y6dTPa0DNRsehqEoCgCYCJBE8qlnWpNFCa4QlaooEqYSFVWbmBd6HMJGi0UoKssrQUFClkxR2XG7FAaPpqpCjjALqSqQOMpmogBEKaekBGyLLgfEjSJQztXIGcG6NzEgJJCMaiKAqilJ34Cn9QJubZ+7/9zO+as3riUT1d6iULZxOSkYsjR53Vt/spjN8+Gt2eZmGNWT7b2intSjCYcyFME5FjMGHo23kLeamHi1QtpEOs7dST3VGBfGTotQ+B0qwvEa5n1MWikAeS9tXC765597ni27wjcgf+HP/vFfeP9Pffl3/sXPf+4Tj1zcWad+fuWlk2Jv4nZrLk/mNybVGvJ+c/vp6sxONR5/Nvu3/u2/ET/35Af/6c88VAk2Xdvy0y8cqJFFGQWisSfDFpMhhyrcN10995M/NMsno83zx8u4eOgrp+e+pi27YmNUmbQ3nv+1n/zL05ykchiYzvIf+/P/66f+x39/+hP/9cI3/a6v+DN/v3nhF1/73MeDe/Dg8J6zX/c7wnj6VV+/pCTrWPcS28XJxa2N5Us5Ea1YfU6379y5dXv/k+//rSuvvny4nMeuI2M2wWzBNICUFVceyJGBsSP2CEm8qTky0WSWifqkJpizIIHZaY7VOfVsaM7EhmZpNTNFYiTAPiWRvo3ZEg7M1gGnmnUgfwFlQzUgMkMkLItAOBylUA1ZrURwjJ4dOTIk713qG8lJh+EloLKLYhItz1d9zmekL4pp8BNHinhqrtvqbldTWdrCsY2Zh6z+/nLefNWXPvR933NQOO0bt1W9/fv+2DN/4YdGyQW3yaSxX2VJnkMgjMlMjIGqugDDVdTHv/UPHO+ejwTF11xcfeBjcOsGZyk2R6kDyHhl1V36/j/h3/k2W8db//6/zD711DmmiLJD3Y2/8xfW+fvPkj9bTbpibBnrbGBBCIgVUVXM0aSsfRtnVrjMHXjPBaBRNqeW0Q8lLmTe1VxB3+xT8fD/9e/n9407ql78wkvv/rG//pkf+eFL80U0m3URipI2i1dnt0d/7g++8eB9dz55feeRN73p/ou/8eFfuP3GF1xwCCaaHNOI3e5o+uEvfLSTNK6CpgSZFs1K1k0Rimk1ti73sT+YNxyJEDe3t9ZxNZstodeNqsa+3w2j43QSKGAoVJzzfnMy+tz1l2EEq/2TLL7v+8locvvw5IGzl9MqUdMcnrS9si+8L33h3Na4nB0cemZXFKt1G7wzwPmyC+i8Yc65rIugednH1w4WXStFXXcp16UnUBPTpJOqjgxZsgiNxqPzZ7Yi9OvlYpHlC1dvoneiOhrXzf5+WYwMaLluRTR165WuTWUdc+V55IOs2hJp/2D+xq0jyRHAjkBmrX3Jl52DLpOhuRLzuDwj1xeHTrWkUDIt46rpurZrgfnWYumdf8uZPQf2wNa4T+laWBlAG2Pseyn0/u0xz3Pf54poXLqRB8oc1fVrbUC7NrdI5Hh/uTro4nEXkWDp+9XN9UNnJhMOqe+iQuFIHZLzTE7ZE5uKjgx60JpMnP1PUlHX9ASyPGlU2QgNpJ6UYN5SAoW+FyYsmXioyXLgmEXQe2edYbIs2USMKHt44dVrq3/z97/9u7+X3vGmn7/+zAflYMtWf+g73vT0R79wZmtTfvW/3PjYa/sIlx556MLld9dXbtTTrf1f+sWtULORkQc1yETJxrcztCvIJ/Shv7X69/8k7+42144fWFeRihzFVdgtuqoqwBmYvLr5wIPf+yPLvq0ShR66Vz/44I/+JVpY/NO/PzQNgIIIIA+GANCBF8yF97C1ofc+Nrr3Qjny9mbGd6g8dG/86Z/wGvMyU/BG3voIkhAE1TAUKWXamu584zc2264MX36feVgcPfANX7Y6XvrPfPrkxz+yLZmzaN8H9NC1wOAqXJelpONxjnB8DUUAQLJyWYD1p44n5+2Uv+uRPXPPCtBnKTTc/0AqKo4pT0bT3/Ntev3W+of/d3rmc/WdE3AjyABrAUREhr73t9Z+Zxccg2RoBKwHIMgKTKdCSEpQeBgV0LRQFBAJ5u342752+c1/+OzmpIon1ZwgEEzL6XclPrnTHl9/8cVn97706+PlzZ1rzy3//I+2zz6rq1RC0E7Wh/30B/7X2dm3PrgTd/u1f/SdD37tV/2XFz+7uP/c9uX7X33hmS95y5vyojm7s3XztVuj6VTyejU7mtTlKMBstnrTE0989Mnn0JeHx+tCVluTer3WZasQZ6tV44ki4u5GebYOz756MJlOvVG3zm3TlQXvTKv5SmJOgDZfd+S4KIkA14t1WTr2zIgbdVX7qumOfHAWQHIqApw5Vx6rkOrjb9roFvFg/1Q3VbVh36QKoiZ6l/8JMMyTT4/ngKqmBnaXXY0DaoEQlMCMmGhIswmaomRABBqSUIamjDo4VU6BR3j6+6cOlNNH4JTrY3qqdyAiM4LhUNZFCJ6R0AQw5xyI2ZFCFoKsYGBElBR6xd4U0IJjBEsiiqeDYhksTqcxNwDlDMCgtccyOMkWk6jRwLsc1Ae1U0uVAZ4KYgYGWm1OJ2e32HHwoSgDAJRFPe/W3XIxqkfKCAYMHhC+KNAMubPTdxR0IDkBKKI3MEZQRRU9OjyseYyI2YTZqQqiESMiSlJkr9qLLAEVh9TIYCxSAwQZ3CyDljfQXIfp+in7mhnumpiG/fNgIQIlICBUNERzZo4QRck4qQy6mZgB6kDEVuImaeVYWpkELAvnyAy1HQ6GYsExIygM1XIyHG8cQFYTJDTjQR86HepCEhvoTUwIpxsjYgQEI6JTWxOgGKCdYpcQgICyyjAKhtMiv1MW1XCBgpkHJAATUzyFeoMYmQ064vAtWM2Ptzb2zp7bW0aQ1HnicrSZezo6vj6qQ+FHSkgYA3lQCR6y+eA3HY0JR1W9a1ZgP7d+FbMgMlEAQwNTTaIpNU3ObU4QytB2UaPmLlLECdDRG7eODm4ujm4sVnfKjTFtTNa4rgvCke+NU8FgIjsbzhRi6nvplbplOzsq7/QYj1ZFJCcyjjkoBAPnNY8gJIAMwayoISdZK2AJwUNZhPUyTjfKHLOB69adq/lolUsPUGFCysH3RM0yltiHqt6/tXTe1UVhoVocHwVHCXDemebhQI4paVISw5OsLhAkiQQNoRESEjNTcAsBx9znXHiWJBoFDHpV8j6bGJJayqKqxGRKpg7bUbhTji5+yTuuHjbf9GXv+fX3/9LNT97yhNynlHM5DhHEIz523wPTevQLn3zy5HBtQnVBDuHsZLq6djRl6k8SmZ45s7OOa0366u3F4XKZUq9KyJBSl3Kq67GAqZkPLht1qeeSROxlea1FJVfcd/n8XsVcFlfXOQ3Qm6wISAaMoCJgQIQpW1FgNtgsi87hKqcCMPYpZSnQjRx1jQYEMtvaqbNgc3zStD2WOnaVGs7Wcf/gFiiUjoMLxjpbLQNWWXJVFFtb4ZXrL680jTanLAlSnhTejyUoHZ7kbhVF+xBGnROeVpbVxK16k/2jBy9snizTnaZ1IfCoOtjvcs4FQRjxYdM+cGYXYzs7akRhebIevgVnJzsBPauigflNQoSsDKCEaigEjjBF7XI2gmS5k9xbAounwVE1QDQyk6Q5o5wS7YfWLjNTocTMjqeT2tUFV2MTYBQgUpEyEKQ+QQ+qgTwCqWYk9MGpgqWEQhKFVMEPqynGmCQLE2cxMUEmB2SSzRBBHfFQwoA2LP5gooMrFNQIYFR670IHxkhdl8hoAO147ywrEyZRB1Yy8+kajm0fe1NGEgURYSJAzpJUAQmICQmSCTjrcirYO88GaAIAaAYmqiYDBhDUQCxFQ3LsA5MjHE6OQAhETOwJkAkMhDkQEiIyMSEzOQAgYkM0ERF1rkCHyBxGE7Sc+wxAKjnFFZIHqEajUV1cO7p55ej8fRu7DwGz8yHqOufOYWDygmY5OeeSRCafVbJlcoSEhESEgQmJYsSBSsDeW8qpt75DZoPaQiAFAc3BBQOMMeeoaI4HrrcoGZ1Wf6IZKXssx04hg/pQ1c16hjza3N2eTss+piKAQ6v86dmb3GAHY8hqvUFOhQsmgOCYfderiBpBVRIziiqIDUOmKCJZhnsukTGQqCAPoKKhy8EQ8Yu2Imb2A7vA1DlIkIuS26QmjnhSe8yxFzWSdgTJ2YoxKZGyOnSEsDku48lxjxV2bnNazVdrFiy8l2xEqIjLnJxRCNQDkHHO4pEFoBMtkwQAMAjsJ0FiVpYB82cxqwu+65XUikCLkxkVN85vX0R39slXrrlVq50WKuW03i6rRLCOvRGu29hJXs+XwOTH10fbe/XW9sbu3nhrYwRjV5TEHhAMfSiLYrJRb52Z3zk8uXUT00xELjx08WC9mh3OpuNCgecn6/X6JKf51oTHBUewHDOYEjqF+PrxS27+3Od+9v/9e//gn4TJhf7i+dX192vzqZPbzWv7J8cHt3em20Z+66G3v+ud777+6//5/qgXL7xz+sjXXv6KP/gbP/CdF6Z51NHVg67twINpRZ30wLhZ8fzmul8U080yza0KOyilI37ht37xuc98IG9NH/2qr+8PDq59/sOPPLaVl8Wjb794cw6Tx996/NYve8jR8z/9c1vntsHyfLl2W5uXH/8qv94N0wsZgcut1HRxvtD1nd2qKcCmO9vH6+6Tn33KCG7ur5a3DyuGdd9DAq8DqFLRoy+8qbgAoWBAylkcc0BypwMtzkmDYUTMMZfDZkTNVM3QyDqxKNkBKCANKxWoAyyJCRQUuz6KmoiZAwD2xIRM5MyUzZhRUlQBAwJgNT9GFwB7sN4xCOecTcyRFuSMMKfGiQQAEFPMIQQUQ/arpJKk79eYdG8brbAcNfWnrKLlR37ZVorHxwm8OjJXKud+Y/PSH//edrIJmkPBRVFtvftrJt/75579pz9XmIB1DBkaqRizSDEtwdfCBNqjSuVl8eKHp9/6xB32RuO9t7ytf/bJqvLrNoOFOC6nX/tl29/6rW0ZSPXRex76zR/8If+5Z6edaT2p6s3NeqNiWLVN32TvHXlPBWvfp6hU+eCIbL1K63hm3J/bSVo0r+9vpaW20bnCFaUHV3jrVqLmMHYOkSH1t25uPHaxrezer7xky6OH3vVmuPpKOj6559LZw8NGp9vywIPuPe8a++nevZv/5p//yte+97d/8OUPz1X7jJNJiaIbCI+c3Wni/olIRLp5tHxiPKkLWh3NPPocpU/OY+77tvYw8mWXZL4/44phlseecd13fdyXI/QOI7dZ0OdH7724zge9a6blZDZfRmfH3TwDWrLDwxsxtluTEQsK2miEtw57Ajk+ajZK30sqiLbGxEbLlBXDuk3TEELwbc5NyloVb9xuR8w9xGbVFKHc2RgfHx9nBfAUQnBltThZbYf6/nPj5brxG/WNawfldNy0bdfFPsVzZzYnW1tXrtwuXegopdRnsWpUzVcxTEvEXFfl/GiWU0I0CL6oivliNb91+8zLL73l0ceaKJLBhGpfb/pSNU3KogaOfV9U5bgpFppawWtHyy0IMXAt8sDOtJvF3vswBL+8QzBgWre6sqJP7eYEHOmFYG0nB+tu4oI6H2rObexy70ahIGjalCS/cDuW4PfK0T2bxcWNQvrOWc5qopk9lY6orGZNS1EWR8v/SSqqnItRS3bgnYG5soDAlq1Zd2DgyGexnNWyJNE+IzOMpl4iCkrKEQhVVRWRi47k2s3Df/wTP/HH/saPPfDI9Jt+6n//yb/0N2986llW/Bs/9DcPXrzm3lKIO/nsjdHG1c/Qb/x0/cAjG/dtz15qS8jgwaXh0OQ0A88V0Afnw+uNXnmDgKAiRCEwyMkzgApk6fv27Df94Rv7/RTXFRh8/sOv/ud/t/WDf2RW4711HVYNZAMVI0QOoBmGQyQptEfwwf+Io644/8c6R1Z4N6nj+QdHO+dgOWMEo5H2S23Xfdd7hygKq0Uoy/jGZ+MXPkS/7Rta53qFfjbv42rv/AX/2MNxVNFsiQjoHYBp19Du7vHly9M/8Vde/fl/cu9Hf6Ncr8Ekx+hKnygxmOYeyQEoOg+mlloARk0wzAMprPevp7jsqHSRCXjDQnr9pfJgBsDQZwhj0AxdBO8AGQoHXQsggAa9gGTwDN6Bp9OGdmLwHpIAACwaqCuYuLg4Ilcdz5Y7pSs8ofPQT6yV1mse1Y+9/cKdZ54PL796/L7/Fj/1dOlDQayNeq9SwvFjb3761djuX33HE2ev/tq/+9ef/xQ9sjc6M7YY/5ff9XVhUr/47NUmQ6Mw2d587trr2xsb0zPF6vYio71684ZzIWdEhUlV5ihFWTYpzg9XfSc4quap/5KLF5rrB2PHabEMpc+i9aggb3tbI3dCy16bto9ZTSEu1iF4yeoMtydBYk4xjcblZDxexBbNXMAz2+6r3r37gU+8FkUfuW/62acW7KovSkWmqGLZ9FQTAUBEVc2ACKhgPDiLcDiog6qhgCGaKgMPqScgUDRTyKhgZAqWwYaRrpzqAXhXpPh/3Epw+g+dlqOfPmJqyBCQPGJgFlE1IzVmRlXHRICemYyMzJjUYDgfEIJT6BWSoQIkU0KTwbNjJsPsBcwxoAMzEzVE8ggBADPmDKd1QEMtlxozMqLeNe3IoLgCKsDW+Z2Nc9voAqAvy1GOHQCVoV6t1+MuFpMxIYnYQCcaMgc2NBEiwd3qleE/A0ADVQFwvj7RHEs/UtCUDIkNlMgxEjAheBFtT45Xs+NBjFEDtbvYH6AvIpwRcMjoARiTek+ah5I7NBto22iqNMCggDLAKVObjdEISUSzgprcYlMAAQAASURBVBGDnSKmRQ3ACAf2O7ZJPQNlQMbgMQAHULFT7EMWcUR8Sma6GwhDQiRRATBCYh48W6BqWS0QOjODgdZoOMReEOTUVqVDd9oXi9XADJEYQcyGvImZDVBJPa1fs6FMD5nttDANmIAUDEHvBtDqjbNhfGa1dtnxxtZmyrnt+yi2uXGZcZFtPR7tkBVIlFPnHFXFVh9LwLErzoFWKsnUem3VeUzmCZNI2zQqmQyEPflCNWZ0CWTdi62E75wc3Xl9dnB1vTo5mR+V0zGxsYslooiIZEMRB2qqTM4zg6esuYvV1hj3qv0Tu+VHebWqokJKKPHM1iQ2PczyhQ0/mtTAfOPW8b1nxzpaVx7ySTsNFU1Gi5KTExdVJ6QAroBg2Dctjko0SqvEu5s5W+5FLu4mibc77bpub2cCeUizckpGntVhB1mIPGH0WjmQmMqqmKcMCEmgKIJ3lFMmBVeGZJFK7rvIjlulLucuCZZspuAgel9VO6+uVicZZli9551f9l9/9dc46qc/8TEPWG6MEanPSoDdOkOH737bE32Tfua//4IPzhWezJVlqDz7IuzVVTAxBOHyudduXL990It0QBlUULemVcp9ZPV11ZimlNlRYOv7Xk2oSaNQFM6fPX/u27/l2576+C9duHTxP/3Cr3dZyCEhCpof/EtmmsF5FsIeAASm47p2fGs118rXZa1NDM5JykfHjZEvAmzWdWo1q5wdVVse68kIeutS2t7bXMh63fQBOXW9r3g6qos6FFh06/TqG0fHTexYR64G6k/WzfHqZJLH43JkjL4sN0Nx+84qk1w8c4FTf3z7ZH+ZSgRNOir93kZ11MTXrl6/tL0RynD92lGjHAk3Nrduvnzg0QA5tadh5GmoSypKIjXUHBkNhRxz0myISRXMzBERITs1iaZJ00mzWqa2gabJPSAmyERYFE7IskCfMgLQABczYSdlUY2qcmMy7pthNcAYBRUdBAdahFJFyAxzMlFTUUQx0ywmimDIIABqwGrknGbUmAmRGRFMVXnQnw14AF0DMAIaFs4N+WhybIhZsssAGuvgl000wjbLdl3GnAAwMAd2fer6pIkx+JC62MXcGZjIqAjOOaeqOZspE6oiIcWUfYmjSVDILoH2pzA4NcWhLdTMTkO1+EU7qGM3SKuqwsCIg+mRCGnYHTEzETF5sQhEp4viQNsZRg2Smd2lh94cqloYISkgpiShGhkFhrGTgrTc2rmwXJ5cufP6o2cuBpzkpvPeDfdicmzZiNhEHLEaErERhoLTGkTUewwes4lj7aIQsAJkTaIelcw0NiaSyBmQE0RTlAS5V0A1Ru+AVM0EgdRMzdRgXITgXYGc2tQuege+qKroXE8IoJO6OLi1dAIImGOWLKpCbOhIVbsmFbV64slklNctrBMCq6ImyQjOeyTJfdThLoGIRCp6Wrehg5V3oPwhEN41FwEAeGQ0MiADi32KOcccvXdVVYKEDH5rdPbEbs7bfWJxLCm2Yqaai1CiSS1Z+3A8A1jFaV2ww4JQVAAQDDqxgQDKRNkUiAqPOYmKIehKrPZcOygLN2I0iZ1kx5RFFI2FCueMVEAYaHF0TBjObZ0NGESXRmyOvcNRQc0aVknbnBkxp4xIOUq7Xi1mi2SvTHe2pufObu+dO3/+8niyzb5ABUSUThyUW9XW4fLlF68/18tivbgX6+3NyT0o0mq/d+ESwmuU2XGOYAldMr9RMoCK2guvvDbaOcfN7Jf/9d8UUCHcGPumSWE8nZ7Z3rw4Pn/ukRPdPZg+cLxz7wO/7ct/9m/9jQe+4fd3j793vo35voe7G1diu5zuhUpIcyLGIiOBVTshbO/c/7ZHn/3k0w9cOuOrTa03Hnz0XCryrddfK+PBnWdPnviS30bxsXt/z7cfHO+XO4XtZ6ouvPzC7TOTS7/7h//6Se5otRg/9hWb1VfdWo82di6sO2Eucp/B+UDuxpUvfP6lp24dLN+4vbh9eHg4X1VeNorgupa898mGoWkSE0MDykZMaExdljK4oiokZwMkYh8cmGbmvu8BaFQUTFaRIFjO1gp0piKKDGIRyCt7yZlNPKKwtAIUJWVDJM+n0TMx8M6DC5qzYgbLyINDmh0FU9ScjduUNFlC5wjAsyc/SSJswuSYO++pj4BMSYE4CNICoUuimisfy/WqNlBj50+9/vnla7BIRUnrvkMqjEBIl9anK9fuu2fbheDGUxE9mYntPprqCbTUt7k0xyl3630KnPuMGMGNNkaFYZpe2n754x8efeC+S7/9m9P81vX3/9etlNC7wLhcp6UvHvrmb943WhyuS+bp9vgt3//dh9/3A6PMAXIOQJUauthhkmiWigKb2EBpvpAy5G7RNVbFr/7q7T/1J+6c2ai8xn/y0+tf/ZWp8wEUY9ImqgsOkiGUEwakBzfCiz/0J2lH58Vo76vfzHsPrD71DLTKwS/WoX/gwure3Xjp4snN19/4Fz/X7Ls/9Ee/87NXPnHn5uujrW1XsQCM6moL5cyk+swLLy0tUyhGm5u+KCnGoqiSw2kVoNc6BPbikA/XzXFzfO/m5XVu773vwu07twqHTcZllDP1dL2WcV3X6LFdrJcHY+dO7pwEQHAlWsjs6ombN+udnc3puDo6WBIRFjzZHFmm+Xw2rYvZcubrTRW3NZ3Omn1Aa1rLVTkeuXmznnVNWY5HwTkDMSPnRKR0xaarT9rF5niyaNZVVTrjvc0NYGhmq3IyfX0535mMxp7W83Y8qTfKMvYJjOZNX2yE3EYD6Jq+cG40cuNAx0erUVFVoVznfGe5gpSInWb47Oeff+DSA6EqFeLGuIhRtmTUdS0D5wzOl6awt70hs8WxJCjd7WZd0cbJ9cWl85tTcVdbmzMUwYFqv4pFhnvPnHnh8LhnfydGsnRps75ILo9CJ7RcRuiyEGFZxFW0rM65LLAWbExnTXelTWcW7nxdnJ+Wo8AEoJFFsgc/9DDv7k7hjRv/j1RUT0aVIo00C44n1aqLEHi1as1RjkqEMclyFYeITlkVaikULiJU4KNoAdQ2UUzBVAx75/tV9y9/9Ef/8Hd8y8U/+C3f8zf/xvt/+l+++ou/+be+9y/+vZ//1dtP3vYP7/mtxQNF9dQP/+Tsg8898J535PIY2kPnFAllrVmTL0NjibxZTF6QCIAFRk5BsQMg7xxCTOCAzYnUm9M6pRmkZnH0qcN6dulStQOQXQttBkbwQwd1hsFVMPSg9QjXF/ziG1iO+3GlPKZFDItVIS00LRpKXIC0YB0RaihD6SBG8Dlcv3rnn//dS/eezw+9rXXgHrhkevnwtZc2D/c9dKQZiFSVqoqiAMDk3W+/c25r9KVfhh/8bzCaQhJWBUwqaiyusKw9sBGr5UxJyQIPLocCii6t/tO/ne45ePwJufX0nV/5he5TV8+/PAOqTzNQnMHhUHMLfQRHwABtBDHwHkI1xJoADAoEVfABtjZTyl4FDpeABszzj31+40OfvfiV74Q6zJrlJr7+1C/9j9qFx37/D8L7/sfRT/7Y/P23/QbQEnaLsm8tFEgVdV0/uw8ufv3vvfeV/OA7v/Wv/dHHP/fcyw+/61x974VL910IsFEQzLp+4/xZZlayo9QVyxSbPh5rLyHQ5uGNZeodAUy5PjvaXXfLCGiAqBkr1zuYhtGZsPNaOpmU4+mkeOPGPpAbFbbKcdGaFn65Wk02qnpUtlGWTaNJTLlLvaGNqpIqk4JGXLnEbxzMqlFxrZm9uqx7o0AWe+0TTDe3AFqAU9SCImQ9TewPuyYFQwQFJURCEzU11cGmctqXNcTIxGCoBgZmAm8GpqfAI2REvXuwt0Ew4WEbf2rnsNO97gDRAgAUUgQjR0jm0BgB0IjMEZIZgxHzQNwiBE+WDYzJAAkEDEXUDNQsGyQFGi6DAeGjqgaARmDEpxRtAwAkT0RIWa3X09dDAKKGRKe/QgO2wkTEhhYbs8m4nownkB2RR3bIzoDqqortom0bN6rZIRMioGpm9nf9WKfIb7v70gGcqSCoiLQ5Nd2q8CWiimQiGohDw0AZiMArmFWjsqwmS13Z3fZlIjbT08awwUasZgACYIAZKQAE5iHvN8T9DL4IEge0YdCOiMaMTAyIIIPrCO30QgEbWuTAsokDSkgKKBkFdGpYEAWCPgsMeJDBfwQmoA4Rhn66rECkgKbAZKdqDSIgMBgSEgEaiVkeZCs67YpDIlNgRENTAzXNegrPOmV4AAzFbYSDrDlcekRgTANb0hRIVAiBiIbDGEAEgL0LDyat6sodz05Wy1XXzCBITt6RxtwZdyoLUG9cCtagveRNsnNjv+v9RpRuuTwg66piUvjNXqIgOY59uwYRR7ZcrVw1BcDVcp1yTL2k+ezaR385x1m5SbThpltb4DkTtN18VHtkzqIKqAocuAgeCU0kFMSI0jaikYrC7YzStH756rxw46L0rxytN3053Zi8dLw4vJ6bvnXFmA+h5FGN/YafnK8vfdM3vbd2sVkrNRGdknPSdioZuy6atss1ZJMYm65pFgtX8WLdtEVexv52xctVkpIdsy9p3fUe2JD6DBVoQVgTi4pLsM7EiDiUO6uSDaXgoBgglDQpknIbU+rXPMUc2AdZsv/8QWxXMSY2QWvX7//VX63JJ7TgSkGLWeuSi1EQBa7Hb3/3l1nTfPTJTxWBFTAlGY/LM/ecPTqevz6b3enbacEv3TxG8AFZLEHJQtamnMxi14CqoWFKCigiloFjpkHLFlOJCHF7efj+9/1cXLdPPv9yToPcCn0WNsxD6DadJiEFoFPrUg5kFZlEEXJH7ToYaFSHPC6LJFbXbnNzK0ZddqvgcFQFYFcURM0aUzbsLp6brDrNbdv2PXtaNrQSLIFH5Dcnk50LOy9cu7pmaJququsE4bDLTR8nblJtbJdtPwlVEWPsFg/dv91eWza97Pc2tfyWCztMa+BJ27Yn3frc2TobzrI2XffIww997qUXCXk6rvZvrQCg8t4pIzGqeu8ZUUnbKEkMUbNJVumTeSYfPBM55tLVIz9qs5zw4ni1ME1JMjtMkokETQ1RzTIaeSiLwKUPhNqmHjvJlJCamMGMMyXAnAcDI6WoljIxqERVyIYmyYwAMJs6dL4MzJw6EVBfhJwyIkqSYbX1TJ4ZTQFMDNizBwOJxQC3NlVEAsxJEwg5VlB0pI4iE6O3nEU1ahqiJWLUKCnwImdlUlFK6tE0q5kN1akIoKoU2HusgmPvFqu+7VSyKOrdddbstLqRTn8FTSQxEJIzJOeYeNCVYLhnoRkiOQ54Gj8LCMjskDwZALNaYsQsCoq+mBoSIvngTRvJUSF3cT0JNCo3QwjT8c7mdOPO4et7x/edP/OEowIsmphiQkRiYHaOWUQh9ZZVYsopx5SRvQvBe1guV6s2IRGRZs1GqGRqSkCaBABUKOesoIyYMyEERVMWLpCMNCIoMlHOisRJzCdbrhoJgJ4LhSR9DDDacl5iv05ElppIBalTHDkvnoAga1bQiG1rftOXJe7tVjn33Vq1R0FCjwqa0MCxJbHTmcwwHhkGYagyqERIcArMo9OfgveOyYmhZQGRuM7dOuGUUAT6SAUH1s2t883Ezxe32nbtA4j20Vrgwju3Pmxqc80SgvlFzqPCs2AXzTGoofdMAKaqhDGaGrAhAvemYKrkxAiBm5SHuyITIGImzgSE4BwoQsoJgBwWs9u3CeBt9557+rmDg9bOjEoBgZzYDAgygHdQKqYsAKbEfd+D6vL27eXx0cnmjfbocGv73Ob23mS8wUCGGNUd7S+OTk6Wy7aJuY83pxt9fenB0WhDZAl9d2a60bT9bHG7BYMwckiArCDe86Jt1km9kWaWbKNxvd9IPTnTI+Wwc/Zt7x2P3721c6Zj2j84Xpxsoq//4Q/+8a/47h95z7f9kch1J8XOfSHX1jRGjltgPO7GJJOLZ+Etbx9//Vc/8KVfPcpHh9cOb6eT681tT+UT3/LVxSLHVI0fe+/G9Pil128j7E6wvuexM/P91bguwHODG2G8jRzUSIuRZVo3WY1C6TWbJGq78Fu/+Zuf/NjHWilXvbV9LEvnS8xtmhTsREQtMJIrOoNOJRr1GYIDRlcwOXZIhC4oGAXPZgAIllUJ1TLEAgUcI+BqHRGNEb0KG2TLjhySYXAOmBmUqRfxQNHAEAqiwACWyBfmOaIhGpmoRM/AhKLGCBWLmkbBqJAsG5oP3pEzM2KUpJIsKWY19ZQM0Myh66I1gMlwIcrrZNDtgAeENp8Kpul4uT2qW5RyVGCkrm2pT2cc3fmX/+zaR8697Q98D2zv7Bf+8Pbh6Hh1ZrIJB/vUibJ1W3v2rm/cfOu981tvLD7/Sn2wdrEfb0/na5q05dX/8/9+6HNXT1547sxymdifrLpQEfmA5Yg3HjHvig3ncm5zO378oXKD8ThT4Hpjul6uyJW8MfLScSeQ0sG0dl/3Hnnl5uT6LZ7U+Hu+5YE/9d1HLCWZRNU3XhmdrAKwNrEclX5zcqdLklMNWXoTApqXF7Bevj7fcEq3P7OPL053zti5s3Jy8sbzV/fe+977vvltb3zo/Xsz6Fbuia//njTZ+/iHfwmQ+pgT6mKx2iuLB+49V216DaJzcRq3J5Np6Y9ms1Vu1znuTkbzk3y07N1IRJJzfH5j99Lm7q3FvqQ5cDxZpuloZ9FBH32X+MLZi9uF2z9+AxBQhCWXwdd+I8fUo+TSFtHm88VOm7CnPslsrcteOFC9OV5qDpOtxToJyuzksCydUyp9qEIAszIUY/Rni3pRp+W6W3WGCcHhncVsRDIqXPBVkrUJVuXowYfuXedVqKo3bs7MoMv9osntOtZbm5tndz/55BeaZAl0d3uEnnidXMbtSf3aSfN69/+n6k/jdduysk5wNHPO1bzNbk9/77n9jf4GAQR9BQoijShYIqhpWqiYiVUpaZaWJWqWYlMiaYKaJGBbiiCkKIpAKiBNEA0BRMSN9vb33Hv6Zrdvt9aazRijPqx9ItPz5ezfb5+z373fPddacz7jef7P4GJ55vK5xWq9f24nAx6vVmoQaq9RXr32+jve9ZyknAtkpabdAqXcpdiDYxDAknArVKKKUE6L3e83T16aC8nVna3Dm0e31stHLs+RqCnYVNxOqqs2vduv7yaNBrCIkzacCyEWniiQwWmUra0ZVcOwGSSVsy0NQdE8SL7R8e3VZnLotys+31bnd+YmpUZ2IawX63XK/4WraFilyaRRsJzzeiUxF1+YcwwoHBhU69pHBET0gKnopGZXua4fyIfgmZQ0uKEUzapi7DwEsNXm53/836T++Hd+zdd947f9/g+u3OS15//i737uT3/791a7T15++tI//XN/8ynASbMNu9PpXrv6jcNlzJu+zAk8Ux4SNT47QoeWwBUIwdYs8V2PNuswfPbBLgyAAqauxvv/5keufOvX3HLaaTX9xj/31q//47hZnf6D75m/egjZQetsdICPh8Hx1IcGzBAcv/ySfPJj9AVfjlaaT3+6/Psfhns3QVA60ZSVlRxLAUIqRZ0hLLrg9ZHXPnnzW987/eZvmX3Dt5TLz02refrV96/+tx++sBoADLzXpFQSCII3WtydtDDMtMRNZSsgMU8qgkSAaESOK+MgWgg9kgA8jMioQoK920fwl78XEkHsH6sYogdtQRGYwASyABRAAiBQhSxADjxCGNvKHto4hgxZAQo4sZNT3p5CXcGyg66H1p+r9fQv/pHjt+9c/Bv/oJ9/vs2+eO+Lntn/9R+D7/kz8IGfaT9z+Paq3RynyaQC8r2WfkgTRgpgAjfe/4Fp2vp7f+eb76/e+Pzf88zeY0/dOZWm3YLiT1YLbqbAuZlNYLF+9Y1XPv8Lnzn85DKuG/J1d7zaJN3fnsV1ubo9fWr//Mu3h1yGk+XGQKuaiPTJnTYe3b9770CnWy7ipKmymmguKn2mVcxCvE5aYtlvqyk394+WPlTmeNlFUakc1ZU5xCb4C9vzVewqau9cEzJXBTk5iCV54upzriIwKPqQVTSasR+ypc+AoIAGJgpKnxtjWhmtsKjsGAjBgIAR1UjLOK07K7oi0XGKRyPQBsfc9pl7aQSv4+eEC0YCBCJzBGAiSk7pcxjQh7lukLGGbFzfhKJn5AYBAAKgMaoEgKgqiKA6eprIwJCAePyZUEe+D4EZDNn6pDJScgCA0MxoNKizI/xcvRh4xKYK86r1yBTqksUH72ikDBqHcLxZ1ts7HsfQAJAijGognrXK2ec2rUByJs1ZSnmxPkFz7GuVaGqIJCJMjMFAsYwbXOcuPfros2952703bjERwBhnM2IcfVKf0/zGejcEGKvpPY84Vx0MFcDAxmn8yKEqqkTEAKDgmZKqKhCxqpzpWmON2dm5B8SUCAwoGUCBwFY5aogALIqVIoFRVRh13ITL2GB3ZmWCEclRRAgJCMmUEEUNmZCIRBhQwcbFc8auJrJRmASjMTdJOEKs9azIZqRm2RjKJ2IxsIfvj4hlPDMaIAGMOiYAAHSroiCHDw5W6weIm1D5nXOPuXq7osMYN6n0ORcEa+pp4EBQWbFQQXCWuwU59sKepoizbH5j3XqdGtQJQlvVN6+9CPNJt0GJNqQUPJ9ev3f08Y/eeenlc09tSTPJqMVX7HwuOaPTqIbROUQC57iqHbvA3hMKcoBJVQbvwros4da95WpT9R0t+8HiigmFc+Wn95ar9VBysYZcjFp7ht1mZfbhF45+71/4+vNu1U73d2aTgXA1FCKU9Sb2XS6yOj25/cad7uDe4YObpT7p+9MNSVSgibvRdckYBKxgUBmS1QqOURiUuJccieqmWolK4Hbi+36op7V4F5qW/NxPZtXufqmnhWp0DtNmyxaf+NgnD4c8LPM6dVkJU+fMppUvAEYhm2UiUADE9TrnmMnM1+2XfMmXbV248PM//e82J+utWRPXKWZLuj544TUngkCrPj0gVW8guZMSAqY8xGLA6DxVgXIxNRPNzOQ8ZlEEMwFmDs5VznWn/eGNo4vzfSOVBM75YTNUFSNAVVUpFQAaUV8xlYzoCevg5iFUldsaghJnKDUROV2f9rO6FpGTdXK1PnP5QnuKJ6eL3vQ0DRVCHuJQ1k1Vn6xytly0r4IzNDVh7y9dusDJ7t4/unXrelAxw6ceu3rv3t2YhqZp6tahlNu37xRJHOTWrRPv/O529c6r0+t3D28cdGvUz75534Fs77SXzm/fvHdcgUfIweD1N29feOvbtOTZJBDL2SR5SD44VYkpOuc6UQbOpr1I0bzuN70UYJy0oRavnRJwW4XgfY1+fzrfm81iit0wJEmnqe+ixJLFAIlUMzqqK/YEJdpySKbEzqsjgIIGpiWroXNqIGxFDUWcqBQxGanO6B2P9yhy5IJHF6gkcwaiLCRFiXgsOwuOHKIVG/luNNIa2QUwMkMAK0oIUqyoOQBGEJCmrgtqyrE2FDVVYLOcMiJ3RVPKg6mIBcYiWvoBADxxgZFDQFmVxEyoX4uhpoySgJDHSQoBKqqIMD7UJBAdW9V4ds57b4CO/XiYJDqb4TA5MyUct6xKyACAREQIgoReNedSDCzFIQ7rum6JAiAxOBMswkSBEDxXFdTEs8pNc7x9+8bLu9tPTnw99Mu2nXnXjBcaMYEZM4Oa86xquRQ1I9E0JEmakuainsFVJIjoQypqcdQXCUCkgBQhciKFmWnEYatZFA6Vc6hZgQgQVCwPpTf0bbszaXPUVEQRrB/iUgzLatMrojlXkLBm79ukeRqqssn9kE1gWKdJ7bn1YVb7BQwb8eBQCphDMysCgI4J0WTckBCiQsk6zj/GUORYgGAjdXEcX4kiGxOnIVouJWUwzBl54hA457LcbKqtrZ3pPA/HwkUglwjZPCIhG9eMyhx0uYwxpq3G7VUBTBCQz+ZNIIY5yxi/H1ICgBFbE3MxY0PwaB6tDlw5EkVTPNuoEQIgAMeYwYoRnx4fT6b29gvnI55OK2idQolxGGrmpWEUcc6bgmRV1ZGRDIDSC+LiYPOZY/9qPd927NtQS8xhMuuTxNixJxns5HizOu37o5Mre9u7ezOKWDWwKSKFAbAJ9bxtksR2tlfVba56JBDT6XljYg6zPtnWI4/PG3jz5Y+98Z9/5tzl5Xu/4TtSf7J1bk/K5uqXfQm9/MKbH/4XN974N4+949GLj3/e8eGLb33fE0dH5ZU35K1f/weevXDpUz/xg9zsDFfe8drdQjptZDhYvfjkF169+do19bu3Fu32UO1ML77wG5+BQVrDqmn0EPsYa95CCIpzN9mHUAGQJnNYA4snVoA0RLISQrh4cYIFq1AtTtPQKTmQnIMP27N6GhANnLNUJBdlRBMQUzNSwpxKqB0ynYHFiJk0AOY81jzQ6GgjJCBQQ3KMJo6gUUIiDx6JxJSYPWPt2DGZgIEzJ4jCJGplUtfgA/pA5AAiggA6QQUDqiqHVCQWVceixBwqIIpZxAUwErUiaK4CBMABVVGhqisOfoilCEYkAMcCjbJLpaQUzx4FsDWbbNY5BnNeKx1HrTitJnzS98+/8qlX/5Z/71fsfdsffuTJZ+LNu6eLoxoKIx6tB/eVv+udf/uvHdHynA4XF+uDf/qj5TMf1IvzbmevbOP5JaSPvlCf3jcwqxxiIML5LHRp8cYv/sL5b/nKMA9aVMpw++XX0mmZqbeY7XTjlXQzJIhc0Ifp6XqYfOv/sP4jvzNsuuU/+pHh9vFb/9R3XjPZHJw+us/62Zf7l9+o1IMPrrJuU+7stW//wR9e3r32/F//H8+fPpjVlaw2F+ZzNwm+CgMP7vy5ktLde8eTMFz6vIslHxz87C/Ax37dXX1ctq8+8bVf/UPf99dfuHu9C9gS+sY1pW2gemLv0VeuX88r3a/runaT4NabjXhY5UTOHy+XYnXl8crlx169dh0V2rZ57e7tjGUydTuznap0aIKWJ0092d0apLt9uNqsV5cf3cpJNqsEUgHYelgNWnZ2zg2lYXYTV5UhxVUR06rGDPmRnb3DxSZpqRt6sDnx3rdVw1hrl7YaXsTuZLVsQ1UbZ1f2L1/47M0DcgZkpqosT1w5v96k7ba5+Ojl1cGyjysCCcEtbq7Ota1anrRNWud1t3zxM0so2lY4nc3KYoBBW3SPXt6rvb18cKyiEPjOyXFD7t6D02KqWcSAPFihl1578/O/5Muk70CiJzYk8a6zDdaTENxmkBTLkHMwLskON31X5VMY9ibVUzR798W9R098Od5o7fdnU7YiOkwD7pK/JykKniRlzI55XtfzFti7bbPOCoka5cykVswADZ0JgBFBVj0ttiz4xnJDD05aT4F9IPQA7uEj+iHWWsp6laxoEVttBgghbXqOidnqinJW761unJygS66ouKpipum06XsFglkbzu20R6eb5SalWEAUzITc8Sr+7I//0hufvv3tf+5PPPedX7e68/buh3/2R/799+effPBn//J/8+Xv+7x//6lrX3yhxf/8iWev7vfa52/53ftfdvnlH/rRx46gXaCUotkVBe8CWU4Iy/f8rsv/7AchG//Yfzr8W39+nxXEIPPO9U8v//73XPl/fd+Sa1dN9sjx+/85/cx/aKQCcmA8jlLO+syZQBBUwGcA5uWDo3/8A42A3rix/Bc/vLccoAxQ+dE2EJANnHOAhpqTjSdyIziBR2kbfvzXjv7hz4Tz+7R/rrp71IJCLOCcFXOeIQ9AHqZ48MLzs/sHc8Ew2wGpIAoxKFIgBlUYIgAAG6MCOagYShkj8DYYhgA9QOfAeYgMUc5kIAAYvRUKkARAoGlgWkHpQUfTgIdBoHKgZexAgkBADOhwVRAS0ACqwA6Sg7VNUtz+zLWP/bGvf+5/e3+pq215+doP//ezm1wPglAbQPa0cgXAbO4ne836jUVo/RNf8c2b6/z//N5v33nPJM9303z/2mHZ27+AOn/z7r1S1ue13avq2ld3kp5v59O2bl2ICSfMkmFrd/f4dPPIfLvmfGvzYDKbnN6JVFcDECv70j+zu7ta9MfGLeH9xXHtyEBD0Lde2Lr5YGi9cVMfLOOKKMzcEzvzprLDxUaAI7g+FhO7f9TP22p3MmEFV2jIZXVfoumjT28xNSaBh7PQgZ6RYqQYmoKdOd/NVMnAI3qEcQBnNPaYnakcpigIhGwFkeyMTKxgNqKj0QzFjBGIaTyxj6W/ZoZjQGpUpM6eTYYw7uKBEJjMO0RAAY4KbmwhA0NiBDIzACpgKmY4ClRATCYW0bKNVehgAAxgRIpAeFYNP1aSOIRRmBhr6h2iKohiVx4qOEg2Nh2NwM+H1pWzCJmqD36ytxMmU8mMzgUXlABSZkZ2kzKsu9Vitn9hhPsAsoESnilthoAKhDzmpwiLIWS1LnWg4nn81oyZRXQsewYkZCxSCHyM2mWtoEFCtbEzHmyEYYxSzOjcsocRK6QRSKVITMgIaiZIYgxnqcMxpEYAiGZMKKKiY7PZ+HvBs4618VelZ6RQNfWIBTEbbKISyrx2HmEwy4bsUBHAzDEnMUPOKoDokEDPPGyErKZ6pm8pAAmYogIQw3hKhDQ2BY1q4bhCYBTBPoeotrPv7+EPMzY0jSYjAABCUSsIY51bGVFcpPZwYxS1ZjdrWx/CtOTl/PwVN31M1n3ZDBWd8yGwdq72JKkOvqQEtNIhqu+b5orlKB7RNV23GcpxQd2etrDp6hhvvfzGG2/cuvrFb8cZG2VPVroVbxZ3Xns9Gt3bZNv0j1zaR0NyXjWZSW+qlibeeRbviYMXG+m6DojYT7GuZ00b3fGTl5sXrmGDHjRHM1UFxq3ZNh7dRUGHUKTUdWiaMAyRW1/KyWuv/Mq7vux3TCZbk/19z+izEWh3uvGlRJGeqlYr2Np5kGKkJKCWDLMxYsFs7BAsqfSi7Oq1KSQDgATAgMUTTqtirpBtGHl3EvbOSbs73bvEk/PTre2k5Mhv784M40c/+uFPvPCZo+PjvqBkAYWWbBhKARuQrJhzPpsMQ2knIdS+l0Ec7e9uf/mXv+/0ePmffuZnbBgQ3PFpf4YHVpMiXPsCWWLSUahWLEVpVnkfikVmQAXIymagIEXRo3c0rjzH1FRVl7T0BkZNqH7nl3/Bj/3sz+RNHJIQmJmiaBo6Nc2MoFp577hih73G7XZiyXK27XZ60nV1cFe2Zyfr9exSo4AV43qQ24eHab2+eml+tByUKQJ0m6FxOG9adLzarJqWxWRrsnVue/v+sl8N9sbd052KF93SOWImcHzn9Khqa4hSIwLp/dXpdDrrAF+9d/TkhSu5cH8aWcuuC/00WoPIsOnL4v7J+Wz7s0tPXbz0wisfN88Pjlde8lsfe2SZ8/HpGavIQBkNQD2YylCSZFRyIUscbDhMy2UsTOAHrhxPfOWIcyrUWxWC91w5t7uzZbizTqnZrFRErQym5pDI1XVgsjJIIVY3PgR0E4fxNmiqaKTE6hywWUqYrBQAcYRkoMQARp4QHDZNpWKWk0kBNc0COuaMgJk9gUMCFSYSVdDRbGghsCcwM8+MUD5XvViKZjBjCKY26gVmMRbnWG206aCkImKOSA20aHmo/iDBGIk1MMdEjsSw9FKSEjMpjtZRUzFiwjEdgiaCAMFx7WBS18SBXQ3MZoWBzlo6R24SEqEDdGjKzKPV15EDALNR3fFoYKQxL0recNsCmgGpmfOOMnnvybF3wVHtm53Z/HyzeHN9eu/+zZcv77/du5aBGcmxP6MUagmhaqrsvXPsGFFzVtASgRSzGBqaqkjhAESOQ4FWJWVRJcAiUs4mSjbOlxRMxLzzCuQBFaBklWKBPTKXnBuk2ni97kosvvJ1TfF4LZ6Ig3IBj6GtLMdhyCVGX899W5V4IiA1u9xLyUTTrWZfh+G0mDCMAXn0LmTNgMgOVVSBRtDi2DRqWcfuDnSEhAA4ynAAZzWlWQo5zpK954JY7+zihBV9Xg8lb6QMbiYXtuo7R6soJYsxNjk5TIP2EouFhkvizWCrWFyG1pHzfozfm4IoRZUkI/TprDtWgYphH7VT8yB7gdrgDcERaNSKqACQFB9IARUoFytmMUcti6uXHr2XCqbeGbSN08zHXc4yUgE9GXpAMWA1zxAcUuVDXYnkodscLjZdhrppgmexu4bEAYcisZRiJsSHaVjcur2zCIw62ar9tEKqAxIUdUQUJoS1iGNqK+eHNPhQi7G4rXoyO3xw+mA44KRTkjc/9q8uNOnCk+9uH336+U//FjLh9qxBnO5fpu29O/G4ufrkneMsHbRPvN0uvmdowhvLoQX8mt/1J++uFrc//gsvf+TD23v5LU88Lou0feld9Wx/fXjz2t1X9y49tnN+zkDIs5irye455OCbhkKjLhBTyVJXZIhURkOOaB8V8eT0+M3PPo+aLu5MN/1iAMoKiyG1KGHbOTYT8QRGAIzk0AkxoAAYsSKUokXNWEEF0DGyITnvsZSRtswGhDwUFaRsoEgAFpwTIEJ26MZePwELoZ1NJlZsPaTaCVmGkkLlyXkLHl1w4IASIJJvOKCZgp+bcukXRVJRNSgYWsMquJF5RiUlhaqoTylkrRiNCBS9jkNToQheXLO91VJT9XkoKuuhP3MVpegrHx3nUiQO5HwCFYugyQ9wyzZvee8XLOZ2forrg1e4P/SFEYl2Jntf/u6hzVnWoObPzR/9w3/843/pQ5CofueX3Fumz9u9dO9f/gAS0FgVpaZd6qyrit35sR8qtz/0xDf+0WO2+bM7rseVm3kiwlzSUE2mUrrWh7yJ2vVtyvE33n/ui5+995u/uv/ybyVs+9ce1Be2J63Lz//6wT/66ebUkrCiVSop5PPf/AV3z3fNM0+843/6H+7/zb+Ly+iKpm6NTg51OXvm0u6733XvY5+h3ffuv++pw0LVI0/f++UP/74/9l/98N/6wfWVL3zz7kc2fNtv1ZpyzLnyznOofXXt3tHu9v7nt1s3VoeX9ndWqxiaarGKO7P9wYY8rPf2tzZp+drRnWOIrVZTEMMUc2zcFqrlgtE253dmj+34E42DQF5LCPVi0adinpt+I0i5Id+61vW5zmogzay6c7pwk3pE7NXgThaHKOF0udxumiY0ADwUIx2o5JzLqugaKJSSYuzTemvG73xy/uaN21u7e91gkGFgCI2DmC82GCo7t9WcbtbrYX20OJ2e25m5EKr2yC2LmRjMtuenJyfn96dHx4utxl3en5O5l6/fXnbZu2BguR8K6f7O9uFqrZpcCFXlzHHu00svffwtjz62PolK0PcbAwl1O2Sz2lCsqrGqJyB2slq1NQ2a3jhNrxyvb4f1uy6ce/bcjGfVq3dP13k9nwb0sDle13M+1wQtIn0clFI3QNbZZJpjCVlB5KlJW6p2iO7GwYNNSWaiqmyGntkMqBQDJQDGtZoMpQmu8RTsv5SKVpvESI4IUNhKTlly0qKe2TOWKEOf+8o5j64QO8qpSAEfOEVQkHZa7U7CbOrvHK4Oj3oTy1kFzdV+Uez5l165+33/8Jl3f94f/bN/wvSRq6vrv/QDP/CD3/eTT3/l/+VPv/D63dv3XvqmP9AcHbmnH4U/87df3t/b++a/Jv/hH9/+83/zClodU8IgSdgyol3+pj96t5ut/fbkm/4g/sT/FK/fropAsdYj/PMfPfx3PxUe38uT5vTOna2lTFcA1JgVBFARQlYFIgRVIIaxfTMVMNp76Vr+7j/ntUwNQBQilJzBeWDSknkkGQsAqKJRcKAGxUANmPZoB047SPcgCzgERFBBRCgKHsEyPPLk/jf/V6t6sjOJggkcQrSxgQlKHsdKYAZsAEkRzJACSjLQwh5BBIKDFqBkaBBIQQugH60vwA7YAxAwPzQVB7ACKUPNwAAWAQt4Ai9QBCLAFGDuIW+gDTADGAQaD8fqFaA0X7DcrP/gVy4V3B14KkBytVVQzFzNBBJbW3SDJ4mdNu/+8r7eevHVNz/y2b/zxNc8u97NWys6PYLL+xMQOzaZXbrghllTfNmse5U7t+5fubw3HMWqaruyYUm+bpAm85p26uriXnj19sF2szWrwhoFglUOH2128mr9xv2jFWqXBi3Q97F19ux2G9ge26umO82dw+Mhyu11qjPMBnnisR28Ntw6GcC88x4drIeURVeriEp9zoYWRZNl33LX9U3lhuV6vApSUVUo8rCjCmgM7ROOEUYDIDHLY5X7mbEGzQABQFDBkM7YNKOVS0eEJ6IBGtpZ7sxGdck+pwydQaxH3eTMmI/jsc1MnWNmUNHRiJ9Ua8DA5EARMIumsZcNzPGYTyIFEIMCGAGJkXAE14CefeOACEqGDMQwwqrV0MyYlRFKsZTR0BkokfHI+z7jRoAaFD3rhiAiUGDnmrqu2MOInZTCIeCYAjOetTur9fHOfKsKNRIYqiM3GnnG6SUijJ0ragUQRbSLfSzZhRqkqKmYqQESE5FYMiuEzjm2bIsHB/dvX//Ep3/bmFDOHFl81vICZ0kHQDwDQCsQgIKgEY5xwhFUZDDmBHVU1Wh0DiGaQ1LTktWxozEZCSgP8eNw9s7D51IVCIpAYhiLDknHkw2aioz99gxiFXNRyKOoB4CIMkKvzrKNBgiKSGioikRICIpnpreH4iQhjrExA0Q0MlI9a6YbM5F2pgjCSMQAPasIeghkB3gIcCs6lhCfHQ/OX3lUtGHW5dFBgzuT2bkIHrjjtk7JoToxBFHVhBFyjmDF8XQoA0DphtNu0236OAybrXYnZxu6Pi9O3/zsq7/207/y6Due232nUEubVbS+93FY3b9/vDiop4yTxrXNSR/VCoGAGTlm9FWz5T02tTNXjIW4Aj8vIgR1MQYEbtrZnjqD3/74GyztvK0eDJ0gbpJcO3zArpKSzNASaM7723NW5xq/X29uvfqJ5vd+lQIlduRcTYqa/Va9HqJ1ZWvuu5V21jumnLEIFQqrYp6wnmyfrJellCRoRjWz2tiCjUWkqew9X/2ue4fL5XFxVYCquvKWJ5AnbnYB0G8Gii4wlNbH05tvfOLjv/3Sqy9qEc2YBrMsqmdmwqadJNW+JDYxBPRcipY+GcJkvvPOd31+3mye/9iHi+aMGkuez9oU07rPBOoqLqAxFSCYTUKKOYsFZisZvCMtDinGpMQi2jaBmcxEC0gsChQ85c1gaqWXnXk12eJf+cj7NxIzJCCtvDPQ3fPbQ9R2b2sjcOHc9oMbt1IX1aRyPGsqQ3NEwiWUIKBH634o4gm7lFMqSASoJ8Piadec26oP1n1FhK1vHc3berUZJlXr6vD01bcfPzg+XXM32GK9qRxhtAvn5303OMR7p+t2awqOUi6L49N2VmuG48VmttO+87l3LO4ttme+rb31sD2Z5GB3F+uDo83WdphNq4Pj9QPsb92/U3tZdtFVzZ2T06259+w36fRMNFE9XW+CIynlZHEsRJNZkDgcrtfrlNYxFjQF7GKESFWInmniXAXs1Pvomqoq6CZ1U1eTC77ySKu4PDg97iEjmfNI4Nad9DFmhuBdRZ4EHSMDpDMGPDoElpJTtmIlKSohnbVjAhs7IjPdDFZAxTQLMaMjIBAlyRIcTIIjg1xQDJAZAAggIJAqIRmQjlK6js2KZ9p3FFXOVXAEVoqYQs5aAMgxEZIQkRVVUmAEdiyqxBSz6ChMiiGIq7x3HIuOESdEco6zCCOOejgxKagLNpuGih2VTCLOVQBuNJwyc9H8UJS3cf4BJoTEyAaCpEZAgMhOTFULMxaxtpnmHGNMjGwGqmBIYjAUvbR3kahyCKo4a7fRKObNaXcbTiYX5+daJAUtkpndWVcFIJGrm3q6sz3bnq4X61JUi6V8pk45h96haFbJSGhOS5IiyoyuciA5MJmY2Xh/R0LQaHmM7ymMu4CYs0KuHKz61SauuwLz+TY3nhtbb7ott23GRXvEgjLsNMFOZH0ip93GVZ5cEBWRbAVWm75tQmjbthlW607VxGgcBSkSj7sVRkDKRQzNMYqaczwC7bxHxwSI8rANM6XiAxkSMEbT4szNa6l9VVcByaKkov3J2vpSanVW1cwZCoJSibKJ2qF5H2qcIm8WKSiBEZIzGJtVrRTJCmpgaowPYeVgsZRxr90PllArwhAgIHrHhJBLicVykogKhBUTIDnmlHWz6e7cu3l5Nh1OtfIwlGQIQpaSeqB+GBjBIbWMk8bnkhm4ric+1FJivxxyLAO65elmrBtFInSsBiPtr5hlgMa5VdFJ7XtzJTtH2DRNVTXg61BVzoei5is2ERdCLoqgENdDv/QGmsywAaDLlx+9+cpHDu5+ui9KvfMecbZNVNaL1eJgM+G6tL5bx8rN9688cf1TL7eXLjz9pV8pqRzf/vitV17eHH768fc8028Wz3/kzf3dR22QPp1M9/bDblU1O9TMiOut849S1SoQItYhpKIFyFS9cwY2FhGlmEGKQ7bcffz9v3zrk78GJ8cXd7aGzhZ3F50aeGeMiBajqCohx6RMjIjeh14KAGbRwiiGQyrkgDQ7Io+eFLIUBiqCMRZVMKAsImCA7BxEFTMiJFMgUgYchiEzIcHYa8ZghmqqgdkQVLVygcgRgDpCdKIGTL7elsn5uFz5uIZyVr4iisijRR4IhBkAuYvYK0UjRPUIBkaGPtS1yCBUN/PJfEdj1zRhmXI3nE3PnAcRITPnWBMrATtfYtLUJ6NHv+h9ef/ZdjoLqX/9P/7cU0KOUTyEZx9zb338ZL2MecixI+Xhxr1Zc7F743r89z+5s3PuxuKo0sKVS13Ki6UWRAcYkEJ4hDl94vlXP/UGQkXP7i5vXz8XSKIUVS2S07DqVst0sD+ttszXDcbPfLD/6y9Nr91utmfTdrj/V77D0KogPpftNAGqLCgwJpWNb65+3he9Rj5KqS9fGuoZgKccO6vemOOlr3/fiy+9uP/xF8jvNX/gO06+4uLRzdu+5xP46GvXytPPfWX1+NMf+IWfeP3Bq7bVlkFLKoWhdOaDDKT7sPPc5SdONmmzsbzRduKHlLMZWnn0woVhA8NyOdvfBao0NFpXXmWr9Zt17xW36tZo8vSVy0N3sjk6DnuzvoIkQohDtsDet2E+nS2XhyXGCOQSpyGtdS1JDElRh66f+RbApk2Tp8WJALrVkKOBQzi/VXNlq66bzVo73Ww1s6J6sowb7WbT+cw33fJUDQ+POyv5qQvnrt+5e+Xyhc707vGyS/n8xYvJ4dCtTxbrva36yjNXP/Lhz57frh7Z3/KAq5NN5dshDZRlNm2r2JNaGcp0u/bI2ZWt7elwGomdGWpRFfvMiy+dq+bDemOoTaiCm56mfnurvv7gtb4bWqwcudpVgFbiEDdSRMC5B6n86mt3ry3X54kHs6lhlDLJw2R/MsiwDZgETs3SkBx4ExCF4OqS4mqZpY9gYCazZhosDiWWUpgwimlWMCQa8zJoCMRUUMEHMP0vpKIQPCjVPiQrm80qFbERzZGlrGOokINduuK7+/GoSzVUxYCIiKHPJSkwUxyiq/z+uWkBzrGs14NkG/unN5IevPHaGy9c8/Ppt/1ff387e9tj1z/76Z/7rX/9c7/8VX/yTv34I4/8v7/r1t/9Hy/cXT5Zn/qwZ8cn+BVff+Wf7Kfv+O9JnHdOCznEISroGluYtD1vteGZx4YbdytVM0ImELd/X2B518hPBQAhFu/QMzHgqBABiYAa8Ij0RSgGzgEplOLLmEwxINLaqQI6AlJyjIhWhNiATAEAszkCUUQCG8AMSEAEiCwLehor6jVlAgdg8I639V/43hSHxb/54FaKMBwDIxGCq8AykEAYT6cCHEhYOkMyBlA0dCOmY6kzc+ZlYewRqIIIo0wBMQKNgOUCZJALdD1MaphNIHewW8PuVuqEMriLfAKTsGJ//XpoFfIAGvvg/LuePPnY6+eEIWawClIVDm1LaeJ96QeaAQS4c6IcgikA++lju7tQHS77ra//jht3PvQLL33q2pysooyTztIcw8kq3Xxw1x1UV+c7z158hDa26tKiW7548/UvuLQVXEU780ef2L+yd+HkzeMbD04ff2T7zpsPQthjqxaLTQI7zSkC7FQcmJDt1uJwPp1AsHUE1wY1u5vsfr9axfTu/UvvfNveY6t0/ih98trJnVUJE37bux6b3Ft98vpBLpAiEHBRVTJEVVTnXZdLO/eTLb53sJju7jpQgBMA6LOAjQEuJiJAw7HjysbrBA1BRuTNGOnHM66zjG5oQ7MzdOTIv7GzanU0ACAgDypmAiZkZ2xHQwBDRRiDQKCgaDAyOgmB0WjEAwGjjhEkQ0bP5IkMMRsNZhkBFAKgB0SkBJhMs6gZKYKr3Sg2mZy1jokpERkqMhgBGigRAniy4B0UyEVVwREQgidUgGSKSKYmhgIApoCoSEhgjgxREXxwWSKCMSFzXXICVV9XYJOD4/tXrzxjIKPVfUSaIiIz4cPGNzRUVVHZrE4c1sxOVBCcWmYkGIEV5IzNTHXsVAsuBAdIqAQiNnoeRtb0GagTz9xECIhkZxRqygbOTM5G04RoIqCjkWvsmCargzNENcTxmKXgEZUQkETNAMjOeKwAIOMrAyOBKHTJHOTdSe2wxFREqAqBCUgzPVwR4xkNH/rL9Cw5BnrmgMQRWIsm4+dGr5EqKpgnIxqDBGCGBIZgY1RwRDqdtTARGiIhqaobe50BCQFUefwAQc3oc8WwAEzYD6WLq+22bQlLwvXmKG1uN37R94tZg5N222BDYFZ6gpGfK5m6srw1DDmnpGJBXaupP7l/9/i0O9pc++hnb968f/UdpM7WcU1o0uV0sPj4h357iGItThkZSQkNDAUka3AVIZBlRL/KyxCqOkyIa6IGQRlrVSgmQFRP9z1Vly4fvPjZRRy4qtiQUionJyePPnKu6/uYNWdlQjFD0/XpZrutbrz8xqufefH8ZWp2LgJiLhrYowPVpEUFlD2TZz9tQtpJxQqmSI1DCj6YLY1ZCjBCEihjlaChFd0/t9fsXW3gyILtnr/o6qbZ2UlZxTcAbrYf2km7Pnz9lVdf+O0PfGi17FWt8gF96GIGzwG4rqrY5yq0Q7fORWMqwbvxFO6Qrly9+uwz70lp/cobn1yvlj3QZlMcezWsm3bQrh9iIIainhiJp2F/GY+zRTXoe4OSPaAgcvBMXAGE2ndDX1RaTzvzxpDKpm9R9nZnnr2gPPHcJa6rrcZ2n5tNJ9XskQtVO8+KxdLieHHnztHrN+/XreVBSRGIl5uhFhR2y/VgjN57RV0NvSSoJxPv/XodHXhN9sard9/67ON3VtdSDq6qNjkOq82Vrflqk4dMtx48cIIllvlssr/l7p+ul0PUZakLchPm23Nzrh9y7Xi+syXmsIY+Jejz5ihv+rw9rxzDg35xZ324vbOzpY0xbNJg7LJCymlRYGfuw/Zsse5WDe3MJqgqdHYd9DkxOABa9MtlyUOWpQ0plV5kk0oGKyZ+vCEzpawiYAWtrbyzrHmIss62nZSkn07D9tbcJ+/a9nC91NKvN4NqVkMAh0TrVMijd0wy2lEtiXoSFssxWlQRVkMYlQZnrhlNGYoKpctFUQ2Q0TtmZskFswZ2FdiI8lcEKcqEahgYK0+eiYDUqKgUgbHnrSiAgqoxk2Tpi1QMgUjBslpWdZ5VjGj8VzZG+bMaIWURQzRCM0VC752qDkNKSRE5ZmVCoLFxAMctGwbvAzx99dL2xBHK8mRpqlXduOCNyI28v9GKZETkERnJxliwIZqhFBHFEGoD0CIqMvRDTlFFNt16vvsoAJCBqJlIKnk9rH01B8OS1Tmu6q1Js5djTOW0uOM3HzzY2vsqHbsmx+AbsIkgWsV+21ct8XwSFv1QRNkjEosIoGYjJHbeVaagPpM5N7qg2JFrfVCVPncxGhExoKohgqg4T5X3ecjmlKcVeJOWhmW/KbJ/7mqYNoLRlyiCRRJ5rHyoHG039d1lgd71qXjDalYp5aEU5zilUuUcUJvKrQCLAigZABESkIicjUIAmAmBUM2kjHFqcs47AgNklHJ2FWzWnXMpTGoyEymLfpBpDS44cp54a8vHFPvDsl6X44NVu1PLtDXORTsEYSmmPMQsYsy+aWzOdS0EguS5iIlYNGUMPAYImQEEiTAXZiqpjLv1WPBgsK6kS41HIkRkRIcmgKcJpqiudjmXAIIIKRXYaOOq+da0QanFNpt+mVJhGqRMiFvPqcjUu4aLQwRXczUJzsesW83sNC2yqjIBIDsEKWgIigyISFmNSvGBPFHVePY+J4XgM7hAvk8yrRnJeRcCETorJQGBI0ipeCyE1rSTGIWCRx9MtHhnmnjmTNFIgAKW4pogrtqQJ1eJm/UdfcMf/r+9/pkPPv6ur/rAf/yXy0/8+9VJv73TFnSZfDu56KvdOGTfbIM109n5+blL9dbcN2fpSzBgRCBkBjAQFTArRcmBlGRgRLw6PP3oh371+vMfSQ/uwLDGyp1r60e2y2un63WGdRJQc56yaFEl4iwlpZKBGIzMHDEhZSAtSAAVBykSIQZ2ORcRyAWycjQtxYArFTW00VtExN6AUZHVzKTk48E2B8fn2mF3MiHLJiWXAg4rJmZ0qoxZNSMqBC/q/ORcPb8i5HXQs+kROVDUomgJzXIpyUSsLHLeZEvqu2JDKozmmJuWRSVlcQDBCNWjr/vciWvDVAAOxi1RjgYkJpnRKXpVYUdt4PVi073ygn/+1xef7j75cz/12EFyfa2I6xCnv+Nr3NufOtlszk22a6ro3vL+b3/c7t+zbqj8Bo8HXS14q0qLTTNp1OVeoaCz05N6e6/daaW3aVY7OdbVnb2JozzEIXNVu9qtUj589tLsO/7AwdHR3X/48ztvHu6/9erO+77m8Pg/DmAVVpPiZFPamkQHBRUZFIqh5+DaqnrjR38qnd9744XPPHX+0uO5WRw8mD22f/Ebv+XO7Py5L3mm+bc/XH/o9t3VIZTVL//iR7/6y97VYP3inft08eqDO/fCpF/CAW97x9DWrpcStSSUg5Tcbrh5clyibk9dQT5/df/+6cmJSRR1QF3SlIcr5y5nc9u+FI3dINOmvnPvaGdrf6+dh4Igw8np6WYYQlMNXTruNiZZyuR0HduGfB067ULLU+fSkM6f337pzs1z7W5jAsaVOUHe8tX+1vT+0ckM83TSnPbFgksOc0pt0yBDjKKSG6XaV0lTZzSggtGdk+V0VisgIB13XSL2jb94abLs12tVROfApk21ltTHYoqvvXp7OptsNVQLrpans3mzAeo3w76V+XZrB8WAJ9OqacN6nUuMfVy5JiA4iTkO8ZH97XoyPTy8vzufdhkXOU3YCaQcdcvvFT1Z65BK78EIdOJCvcUP1mtBrGvHU3cg5XTY1HU4SuaNJ8VfnlSTAJtVjL04g+mkVsFuE9eyEAPFkGqHwQjl9HQVJTMxnBnOg5SoAo6p9j5jEbNioqKG2K06q/1/IRXVtUelfrPuUjTTEFDUQIgdE9v+uXmG9WTXnj2/f+fB9WoamokjZ8gYKs+deVdNfTFHWUvlra2rhul0OfSbiEiaNBPutfgbP/mvmk3/u/7of/d85K/4tq/+2jfjr/69v/rg0mPf/SM/9Ik3PkE/+a+X3/+Xz//5/+/Stf/4Ix/56kcu7z8xmb+4CeJBFZRVpL56aeLT+vTmzt318SdenghA8FZMxRgYvAcpyB40DYh+q2IQSQMCUcngCchUlOoGDIuocw4QQTJgAVBwDEglJvAOHFrJRIioQA6ZQQQIyLGZFckueLDxECqAAoEBHfQGqoAMjFQ5EIWGT27eWATQ6aRbxq2qgcQQGAAAGQLAlNfbjqaX5SjOTu+BCQcPQGhEYJBSKvH4S94x/a4/0JehfPjFyT/+T3NG8BU4hm4J7CEVqAIQARs4B1hBzlADvG3/9lNbF//Qt2L1pHv1drz9nzff8N1ufrX/G98ZfvMDkCAnPv1f/m3/Vc+1H/8F+Et/Da4fwrKHpg6VD4VKjBkorjduxjDhlQ5BYX2s7/mWb4WbRxdvfOqnf+6v/iIfdlfO1bv7djDwUM5N263tLe2HHeG+S76W04Oj9VFfs9BUJrPJ1HACmLI+fnm/7zZLW51/cvLqZ2+6gjErs6tDdbA47fpIylSHonjt8LQwOTUy25oyV/7e/XUnCmARm/c/f+/lls5P+OojW83jO8dDVBhcE97xxOzo4MH9ZY5IFILGJFm89yGQI2tacpVy0JPN+tH5xWrqH06SzUb870NyEJxFrEecC6jqWCGCCGSGQAI2JhppNLCoPYxAnYWURi8He/TBiARo3JvRQ5aynnmKHvpURpsIoKHgWEUPqgBIjkGNEBHRISJjAVDTqBoNMwESCqiOM1tDAQNARkCnsymzsyhMGUpSVHCAI37oLDBghAAM1jIwohkSkKIimGNgFEarHKpqLmDGgqhKZYxfGXhmF7wAEGCopmfb+rNaGgCQqp2s16eL9WJnOsMzzrQaKIwRvNF4pQIqImXVdY5qh05NAExUmJmAzezsLWXUbFJEwerd9sm9Z28d3j28d0/xzI5jdja+BFQVBaTRVINgeFaQpiCYzZiB4KG3CIAMRRDUiMwxtJ570ajmGCuApIIGDhn+T31hCIBqo2v/LF+hRkAG1Kn6lJ1DRlRgMWBARhIthkgPO4kRUe3h1xt/EwZiOCpHo08MCHHMU4/5RwOzkQY16megMIYJcLSkwfhfztYTqKkjUjOHY1fz2VcVAxElQsL/I/14cu8OUDWp6PjO3bVK1c7qEAIv+nQIJGBQcZsSZM0qfU6lDtOqqkLIQzxk59WVGTezre3bL37iwf2b65Tb7csX3/KWxS89f+uNe4/0aYidj8anJ89/+APHy8XW+Xm778IkIEjOwqTe+dpVvnHkmAOQrwBqNpZCaurcgIAENSMwh1gG8g1M2qe+CI/l+vIox9NlP8Qq0LxtUlZQNDPvqQphE5PG7D2QQT8IYkzd8dHhyaXLl8wbqAASM7squFL7up5szZrZtMvd6clJQfTBVzUPsU8iYOg9OYUomnIJzI7JgK489uggjtr5VuuVWBx2osVgUrFpWS9uvPCpF669/MLh4SpHq+btcNg5z6a0iYWJJ5UD54fcr46OQQUQgiPnEDIEZzvzyRe8823DYvPBD30o28o5lkHq2udii/WGiRBoZ9bEONC4YBAWq0MEbRo2gawKCNNJlWMKvlKgHFOf0nTabM/aWTO/dvNu1jKbVe/7kndgSVE4b7t0brZe99uPPTGdTsvp6vbp8cn1B6s+rQ+OVofHU3brxfrCdK5mllNTVSplMeSd7W2oSERVYxyGWe03KaOWejIdPYpt3SAVdrhVtTcXyZsEROfECMBSC8EbzOfbj5y7/PxLH0teawB1vDudPrl7+aOvvVTqSkqagC+r9RNPPHayjNcP7rchTEKoAy483To83WkrKOrRT2B6kjYNwZC102gx1p5nbZ1zSV0MNU/n4fDeTW7GDhAAgJO+N8C4yiIx5qJI3RABQAjIozNjI48cVIuJZwCQtm18hT5YXMXlYhmgstmwPZ0PMQYIjnhnNgdHQ/R9FjMLwXRTQAskULbiyVdBihUAct6QEcGssHOihizGEGpygULjwVCGpEUMyWxctsRAkAuLmqobMWRGMWUgdjTSYGA0JhUDtCwFihoCMiGK8Uj/V1BTBQDEiokQwQQUcMw2mko5uxMzkZrkkkdNB4hAz/yczjn2UMSMoag5RgSTUsSUmAjJsVNBApqE2knucx+ltE0TYyZD9gGVDBVJDQQIgXgs6AJiIjIkBANwiKaj8UDVFIc+SsndZuHDLKeurqbeebEOUAlzXc9C1XJUQkIfGKv5fO/k8N7q/snFC4s4bO7e+vSjj3y+CIgZCDjvkmQFrjlc3t492t9twnDQy+3bImlU2ouolJidcwAgXdaMzggJs6gBgOpgPTnzDSlJEXM8VosKIVpRpRIq3N/baeezxenx6enKJSspb5anzTSoFOerksswZBghAsinXRYDY7OilkRSNg/I6CuPBmk1FCkypDqYImWxlIwM6ezJSeN60YeBZUREwpE/PH4MAC6cXQUxD2q+bASLqFouZ+GiopAAiTwEy0pDxNzDQodiGuZOS5k2NSSfsqFZduzI1Uhe2J85fBVBGa1iNz76PKKJZBVmYGYiUIOYk2YFs8EABDYOiaGtyDEmg00sgxESOTYC0iE5x0VUu4ihc/M5V56jB1PJOdQVKVSIDggIRDKCm1V1p5CL1OQRaBQJxczMiNBEGRHUwFDNGM0ROiZHzMymyEaI6Jy3JODKZDIBRTxjMBkROfaKCgwukBmjqUqup9tgOuYSxTTUgRG1mJoKSD0ZgwOuoJtMZ4YV+eNf/sW/O0dccXjiPV+azPaucuVcirJdTbGZbs92RbFpdxBp98J5Awp1TTzKOIbEo93ZDMSUCXNWyRkMpSQi0pTffPO1X/71/zRLp7tOvScr0lZ+fxpuLYAQ2NOyj1vIADYkGYqxaRYQkECsiCpaBA01F3XkDNCjoKkzBMMkpUulF1vmIurqQKpipkkKACYRQkC0rJhyGYpGgXWO3ZBO1stgUAdfBe+ASA2siPSgBUgN1IGvCKHEtL4zpI0MHUFSpEEADaMOFiOrqqqQdjkuh+F0KOuEyVAUmcDA/No5DqAwm9Q8LFcPVq5t2LvY55QeeuuM/LSV0nOx2GUOTb/ZeNQ0cdmVur92/C//1smDfr/aKb3rzFeVo+2tC09+oeSWD09wne3w1dNPvnb4gV/fllLPmv2rl09uHFPjCaBhLMPmvvWzb/iG+txl/OCvV+tkZG5SmRKHKlhOKiKGgLGPoalWWZ78Pd8Qn/2y8zs78pYv/dD3/tV3X95ZvvzblUWHLsUiiKFyhJTExi5sARhK8Z4u7c3aV67ZK6+/c96evno7d11Xw+rRAI9QKJvDf/Uj7Uc+cHBt+MI/9V1vzup3Du29//BT8Y3lF1999pUP/+qHPvrRJ3afe/7OSa78pYs7i1snaShXLl1cPDgYNkksF6SD/iRZCwKT2k/ruvbOB6wUsKgvfjqdvfbgulqpmUpXouj+zlZdtQb1cXdShq6eWtESnA3LzZRD8B49rdn6VIorbevjaTbPRHj3+H7UTEI1QIqdCVbeb2JX9S6EysQb0KR2CpkJtGQoenLaoblhVZzAatmV2Puqqet6y4WXr93ZqvYqz71snnvHY6kv3aKfVvT67UMxdFwdHhxfCOfW66hCqy6ac4a63AzLvlzc36oErx2etL5++e7xIwxf++63Lk7KZ67d8FMMdehXaxFAQZXEZDvz+rm3XPnQZ9+88+D21/2OL2o7wl6rEIgnQxpEhUPVrdfzpplwu+yGuqoE81auFpsEUuratYGqelJQ10VOSpxWdHx8fL52RWExZO8oqlWOla1uOIs8kLJ3fq/fLLrFqvK0Pds24Vfv3DTnCXvvORDYeAcyMwMyIMbgHSGk8tBbN/4lWa1YjHkzZGZ0SFYsKyS0nSmEVjvB37h25/ELF69caDxBslKFypTI1LPLKc+2qqVqGoqmPJ+7ra0mpixC5Dh2lrOtTWnZ/9K/+tcf+IX3P7Ubnvsrf6r+/Mnpi6+8/3/58b99+83v/pH/38H8iZNPfuRH3/E7vvU733rp899lV7798nf92YM/+30p87xmU2oB4O983+z3f8Xm47959OFX9l4/Qo9iwrXPBjwJ46E69b02dOxYmR5rlR2uj4Z2fw7rgdCIHEQFIOcYFJWwxBTaAJ5siFi1rgoKACliMXKMmoFlHNiDoRVTQh8qEAUFcA58A5ZgyICK4GAcSxYBy+ARrKSXPnhuMqx8M0WW5cDYAnsoBIPAudny930dfue3G8/wP/9a/Mv/XQUB2EERyGObug6z5uKf+cvyjreGHN0zX2efvgMf/iQogq8AHRCDIyAPnqDbQNOCqyDoieSdP/Pdl3/PN+coYT2Bu//utV/8xWd+9/fc3jRua3crKexMKJbp/jOnzXzyxe+RZ7f58C6sERBjNh2kubRTDu5MpvB6kvB0272y1mLow+nP/RKUcFvLCxnwCz6vsy6eejvp5opSFZ5kBNmt68l0/8KkXpyciLlVinfvHx0Pw6xtXvvkm1ttde31m1VFBYbNMvpAsYNr9w73K3/5/E65laYENfMWVXXdfvJ4wb6qgLrOXJcnUzcpbr2K5rQ4NXAng7tzFFdpcfXyZH9v69Wbh28c9lvQv/ftV27c6z955zAKKNv2JEgxZpy2VTX3AF0A51OobDfLarwKsirDqOKd+TpGsjWdMaRh3GMxEdmo6QCd+YmMDIxRAceEAIIZKjIhGiOQE++BHGQ1zqBqpmNYCs9QRvg5PBHgmGQa3S6ExGP0yhwjjlG4sUoMQUxj0QxUABIYIRVniKhFk0AxIEAIVGpAz1jMOQRCyCPXTNHGmq8z/w2iNd47dFFNxy52BEDxgceKdgIIDsWsCBhTAYhJa+c0jSk4b1mYR7ohEDM7UhFkJAzz2d7x4d2tplJiAEVgT9Xo4RnFHUAVtD6l2A+1rz3hIBmMDIwcMqCpoeHI9DEyyYMBeeesk9ZNR5HFiMb2ZRvlNgVEEAWkEXyhYxewGchI9lEQMEEjQFFlQOdITINn50DUspigBcKKMUUAZUQGUMKRKa5nPG4YPWJACCoChIZgYpjzHvvK4ToLnpHKiZHBgGwsjKYRjm0jnZFGzJXR2SocsVZnBzwyYhjXiykCEhcEJgJVBRoNcGgApqMSiXgGm8Cx8WZUhBCLnKWlEJCZRrHQHvJKJu16s3xwdHgMOZFHTlvM1fHRq51sqkCVusSxSAbyJFXg1lENxiUryFCT1DWe3Lt3/fDuzWvXjpd3964+qhWUuccGV/Fo3voq5cXp8Wc++uEbr78SZpXfDTRBbj1YIsTxDArBQQDyARBFJlRvMQvyoJCkFCIqEEGNiZmpFOhTadv5Y2958vSw1+Xq2ms3IWUmDky7O1v3jo5GLhcaZBON4Krq7v1DJ8cWpy+/9Mr+9nkjwrF0Uq1yXJyvvAPVtnELB2rCwRlZhIKOgLyBGELMUMyqOohYAUCm3atX2ovn16fLpqLgqhS1mnvIw/3rn3r+45+4d/taYNf1RZBSslXqPNpqnVXUxMBUsp10ixSjq4OooaAiJgUheNvbHv2y9zy3vLW4fu1mU1M/uG5IJWrlERRAMZnWAR37TKWLaeIrMQsVrVMygzoEp2gAWc3V9dZ8erJZqddJ28Su3HpwIrZAheDdSujHf+1T262PXZme3zpJmUF8w/1J3xCui5liaAIOw5QMJM2BK1HyhMgNmEcgplTyxuIipRqpYmqCH/sT82rJogToXXu00dfvHX/5F3/ef/rw81nIUj6NkXEz8bi71ext77x+7+4bL71WkUtFU18un9sFwYPTo+BqHErN/GXPPlE3ddGMbnkcq6bheVvfvH/gtloM0xfvnM5r2An19Rt3QfP+znR2ZfuVg9vzWdMth0mAKDosh+SlslKkr6Cmh9Gbg9XSuypJIbSCliUpKDObqYCG4AjAo0ODpGImrqomsymIgpqKFC1ZbTgqq25zfndWwaQoEWOoHVSTqbPVahk3G9RMQmnQaNhMCAOy41G+VzUBUzZRUUTzVNWhboMbyxZUBIA9a1FDQzEDy1pARHMZWyKJkRk8sgE4xwSgaI45qwpRIBQrSM5Ei+go/ZSiZljEBMF5ArGihRAdQ1EwwKKYioChmiACEYmJD27s+gQz9kzMisaAiNjULApZJcYCSEyMRJJFQMGwV33z3uETF2ZAbnncza/OQhU84QgnMivkvGhCZDJjF6wUAmDyjjmXDKZkAjqYkQs0dJJiklK8axGwpN5cVVRBsqkOy/Xc7U3dRIeOvI+xV5Ur5x89vHd71R1vToeqotdf+63L598NaGhCGETUxLQIA1y+cCGv31ndAMZh5ftFlmgCWMjxmNiyInGAONhZN6WhgalmBdnaqpq2Wp+ukYiJ15vkiRFRso3+z1nV7OzuFbHUH5IrmP36+OSRS1tEmkqGDHEdLYTEsF7HCVi3yWCGzpCgqJoaEglZcKRqIgqMwUMqKkktMKg4MgIkP9atGgKKqJTRBGZIxExAiESllJLPjger9Tr44JwjVUMsucQueRFFOF73ra9jSeQdRrIokAtPqEJCQs19ILXgh+xp0qoLey7w0UZjMvWEmHJ2RETjLohg5GsqmZmYKqiqGQKBBUdJdVP03jqKqpkLoFDMKViWTs2S1BVtN6wAHFzl/Wgd9qFuPNHhsSfCJAGgDt47b6V45wkDAKMpMTpfSS5o0AZfpZwVCEDE2GEBZE80FqES1Y2v69ohGNBqKOycZvFEJZVUSQMYXD1Oo5SQPRMYgICJATKBKbNzWpTIRMVUx4GRr5nBjEb7OTkXDFidM+fAALuh1JVBSRsNTeOaLfDznUfPqbmdnd00lK2tORh777iqTA2Izejhwx0McOyPs2xSNPedlDJ0yQhKSq9/5sVf+fmfWRzfD3XJKJtNZJVzMzdz1bR2oNJlETFQiCrJdC3AgLmoIEwADJQQk1hF5ByrQlJx7AEopqxqfSpdib1or6LccNUYgsWCCFIKuGAjHgFsLGMlUUfYq/axVOSmJFtIE49SkvMBQIumyjsca2All35t/UY1Qo5aSlGLaWzCLSYSRYeUT7IeD7GXso6lGMHYXwwggJ5L7cETY9/XoqBuDSDBFZHPXQUKprknG8BwNp92Mbc1qOU37x5nsEeE5jidbe/EGN22B8UhSnfSv/rj3z9/+R2Xzj1z42Mf29x6+cr5J89NJsBRtD+5/SA+6Cs3SJfY+dd6cX/yD+/8oT+Cdw7wwWtH738B+2nZmvpJ217Y6a5/upSBHQMyG+dij8xm9/7+P1n/r/98/vbn9r79O9/5D74foy5+8Meb8qKb7ksblOvYD6WkpIlR2DtioyLSa38/lqNkjheLoVhZEpz7U396/TVPv7qQcDL0H74+fXUlWX/jn/0LeOo3fs9//TUfu3foHnvvu9/7VR/82780ffejG8f3ToawPa3XKRBSzqGPlXFTTTHJuvTHCfqSn7l63vncrReDDg1U5+azB3fvTOTccnUEHEmoqtqKwmrZkZer53ZeeeG1ug6hrqa7zSdee3VrGmJJDOAdTKt2HsK6wHI1gBgMwJs0mbDzgUEXi8hWZs4L49Ks2m7fPFxPjIdN3pvV5GFIqbfCjJd25p9+9Rr7aqvxaRWddxcuXbl262CbsfXVDk7nEJImiHGCdrpc72/tHZ/0i5P13tblW4vDcxe2VnFz0q8b17TT5u7p5vz+tIgsUpmIlW5AgKPjjfO1FL8+7Ld8/aVveXRp5ZU7x5X32VHV+OFktT31b7105cUXrverTpRuvnZwcb7rQNfdKXu/u3MuhPV0umlnPm5iKqn2PmdAcOe2dqo65mGwPhPB3Lvt7a17q6UvCADe+ZilJ5ccL5MOOU681sENXaon1SbGB7duPXv5CkF1eLq8t1hVTH5SDypFta6qPCQVQAY1lazBsZillGvH8jBucCYVEQqSExXvnHeIiMpKAbGyeq4FB41KaXrvRvaKRphSKhZQtAwleI+oXcrRNA7qudn0yjl5553XVIQYc1EVIseuogfHD44W9hf+yb/95m/6vb/v27/xD+XTj/7vv/o3v+tb/8R3fQ9+/vv+9Hu/5JP/+9/5g+/7cm7xwU//bGfsPWm03YBYTewDv41v27/46gtwcAIThFBLlCFlcMRWiEuSGM7PfqH37/3pX/PTrQf/999f51vhe/7ebYPhh37g6usvhRxxrAlrJ9oPWNfON4MqDoUNpe8JCJEdkjGPrgdBcsyAYOAURtEfgQlSgqKAAm5kFxoED8MamhqSgjkABk0X7r5p/+O31Dbpf/JDPJ2DAgwZgocJFlnpN/3X9+pH2uJkJVcwADgoAlEBFBoPy57O78ujuwd33kypg63JhcfO51+J9XYNoKAEpYwUEpAMjiFmUASG6s/8P1Zf9ydib2FzQiG4L3rPO77tvW/+yHc98U9+/YW2Lh25YeAZuh//3rc1X3vvtf/I7gh+5xd88F9/7Eu2oRIeYA0m/nL7i6Jv+f5/evnJt+z/3K98/O/8BZi5n715604bVo/N81NPfPbOvUrT05f9ztVzGWERu/urw0ngc1vbXb9YyNBUIWxtfeC33jzGbiNiVe04nC42CcOlCzsvXD86WUZXwBLnDKc5H4mstEhJ03mYMJ50qzvd6vOeuAzLmJbDdDo97bvTuN6bTgaRjQgSJJV6b3ZzGO68evrup889dn7vzePFYSrFyt4uf1G1df043lmlwgoCXVfaiQdNM0bXu6lO49Hw4PBovApGqBCBjbxoVcOz6jBAMBAQtHFodAZ2JmAAYjw7358RH8YGLhjb08DGwnVFMmYzACNMcJaJQtVRpXmoGoEB0tlkCjwSMDhGBXBoDtEzmikQFsWilhUKEiCpKSAaYlQlAkMABw4QDNRZJgAwdAjjlk8R1UaWDxIjmigAYOW0deiNomoqaoCCGIgMkMkVNTfmpMwUwTHWiNXIvhFJScQgsE9mmSAgFBNyjtBQgB2QC/Ppzu2D21cefVwLPKwvOWsnUy2GkHLu+7V3np0rWhAplYFdBaBErFBQYSREIRA5p6ps4Crau7BNRCIjSPwMqERnZOfRPgWmhjjONEFhdFSNpW9gRRWJyCkAoTEas3lHBiYF2Hu0AgyEikagYDbyY85UojOZ7cyRBsykpuNCSAarlKux5qZINvPOlbNqPQMZ/WQ6IlqJwXR8Z8YaPRhTHmYAqDSKTDYKO2falBmKKD7syxN42PhGiGfDdiAw7zAYFDVRK4ZFTVVHzicA0AgUfeiT6g4/E4/WQ9/3eXPu3OX1yer4zdNclsWl83u7/SAVu7bZcX6n6GBgBDjEbACSqSKUQZYn5ehwiDo5f+HpupqdLG2tdO6pqxvpTq5df3Dv9Zdfeu3m9Te2Ls7PPbIjwQwKOm2qgIGQzAGmkok4az+t5u3kUgRSXZEhEvgQShZjBoaUIrsaEEEKSY5Hi9whS+141qXTHKN3VeO9B1YyMy0ltpUvpcym9c5+5bTz2C1Ob3fdup1U4FDNQuWHZIZsxpbNo0fhOjRWhJ0HEBFBckysJoKKzEiIoJpFJWcoqxx7hoCg/fLg7uHLv/zStVdfIyAzaqd1LLReSEaQYpOpm1Z8ukmmMF5oJUtVNd2QyVCFHIKJTub1pYvn3/bYM7aCN199fYi9n3HqU9OE5JTMEIHQiaqU3PcYgncOvSNAKKIhhChKzIiATCnnmHMfj5MVaqv9yfbQH6oSBkdF45CUiRw7580J9EPIulkN5pxTAAcVEIhyGgJhTjmnMptNFielrrlin7NmGQLRZrlaS26ayhX1iDkXBIWYtIAjdOB1yHU7vb0YnjlZPD6bvH7nyDlu23qrDv1mM3T55eNbi5KMQlP5lNdvfeLSC6/cE+9NdBL8zmS2s7d7b92f3LnTTtxitWl2q9XJput1k6Q72QhCaKt1GZ5sJ05gq3WncRCxAOSB6km76nMpoMhkcO/+0YXWH54cX7qye+PaXQBAgiwJEFxwZxZNYgUjADJyAERERGcwfueIHUHlPRaRVb/skoGp5yKq1mmj/WQyq12NrLX357a3G8RTLccnnQqqUsrYmkuDhNY7FClSSlJEcBwckwMDYkA0RkbHLJYRxYqAAQOAGYqkKIhENtrLQNGy5gAUnHOARKBqxRTGVm9C7wnMsioYqmoGG20ljjkDqlqoXaXY5yI6suJJAcbaVgATtcDknPcPy6RSzmZmWgyoT4JMwERMAQjIS7EsKmIAqKaOODCu1/1r/UZz9gQlplwGU5MsyEzgHBLQ2a0WgJAckQMgFURAUEipgMeSoRguFseLxYlnLNBXrH0eMPaenXdQoixPF/vbF2K/BABVAgTE4NxsurNzvD4Yos7PTdfL7o2bn3n68bdrUXEGokNOLgTyrmm28kla3T1a692tsADTVSfmHIbKTNKQU5QUFc2NnitDE5PQOK4DeCtSmiZ0Q1ErdescUUGKG4EMBnS8WGntVJI3jGsJoaJM3YNVvRMka4rChJsUBwKt2p2LF+++dGO0NhsYZlVVNZRehcQIJtuz/vRUGdBRqLGoCVDwBKKjh7hIQSQTMAViZABDY2JRzSolm571fICJFItgUlIZUlnmwk29WEVqfT9EExv6Pi6XadPJZmCPtopp4rH27ApyEo1AU982NYS2QOqTxFJPGhGpvHNIpaiCpSTZJBVRAVAxAx6PDGqKkM+Ie1jUhiyNQ3IIaM5BDVBAu2R9ERCaNyE4nIeqIFnMJebi2DtfOWwBQnCg6lAB1RMDSBwSOIcmCUSZyAVGDKZEhEjmiBmRABiQIARiplARk5hqSSqGKjb0cdLMGq5Xp52KeVdx03CoxlYNQCV0RALjNtLUAFxANEPMxgBQDIycQxPmCsbSUg6IREpknpFgVG/ViCGgE7G6rSrfEAYaFSJgICYksDOZFR9iFAlBHtrVCbVYkdJLSqzFV9W9O7d/8xd+9uYLL2bX33hw5GdtyxCIydSbBiEFY48q0MXSS+mKJiRQRVRFjCqAWDFX7BDQIycrjlnUkohIJqIkomBFczat6llWJkiIRGieISEoYirFEQISE0wZs2EBSwLAXMhl4FIsmGrOhRON0W92MYkCiVFMCR2KguWsBbKQqYJFEItFDpf9QcJj1WGki8EZfR/G+SfQpmjN5JhWUTK79aa00SpPbjIdr4Jq/9Kgdbd4fUddSRENNdu9dVn93j918SueffV//VtX7h8HKOo5OpBBGq6aoTSf+BS9+tlFe+Wib0pdlYP7mtapO2krCn4Oj1zlxStl09vFyztPvYcefVv81Z+6/8/+6TTsyrmLzd5TxzffPI+x25xKF8diRxR03gd2DZ97EoPGofv4yy+tv/+Zn/j7q9c+e/TCbz1GTVZVT6V0CAnYyAPICCaDtmoAFahgjZHo+HA93a/Cef/yG59529ZXdJKmW3V/8Up65eVLlybXXrmP98svvf7Gh65f/8P/n9/3vT/0fS/jaXN197fffGM+r4piHNRznfrDe4dH29N5iXrv+Hjr3HZofcrQcRlcteq7CmgW/GJ97Fqe+MmNo+t+Mg2+KrGU3O3s1NPJ1unqdDbxiHBvtdiEtDObF9RE+vj84uHh9bqpL1/eeXC0apKxUwnYVk0aYjutq4KTdgZDqolu3F+uZ35JliEjiQu8KpkA1YUS07nZ9M7i+LTkECoAzahhEmKJEkt0aQWbUIOrUdgu7Z8rLnQxfd7+1eXBYurOX5g/cfPw+Pzl2eu3729vTVJSQdvdax6/3B4cDksXveeqdreO+73tGQgiUaeac7k0q9KprRab2Xw69Eli2vL0pe986vbd9WDg6+CD++zN23vvONfOsHQZ1JWBLDsPeKFtOqpunZ4+srsb+wSIPdjB8RqLQFFJcJr6OglpmbShqeqmaFu5T965l4xSUfSu8aAOq6o5XGxKKhXYzRs3Gt80wZVSplV1t1ubgXOu6zMZClhK4oMjRikF88giRO/59P8sFa2WPahXJUQtRevaVzVSrecvNACDlnz53Nbxsa675H1QknbukU2EQl3FTqdNqzrUs8atdFjkVW/7bTUN5mtvZqenm75L5MiKrFMhdDnpb334Y7/9/Gf6v/gnn3ucf99f+mM/+AP/4W/8g+/7+t/x9e7JZ+fbX/biX/zVSO/nNw8r4KpyLndDHo4eCY/+0Hd/8sd+cfrCnafqLRj6kiNxYPYqKIVo2oSCp6fd1/7kf7i5s73nJ93m9fPPPQYXzhFt1V/x1eH2HaQFAEFhM6NJ0CKQra5C1sRmDlhi4UCmgIzg3cjIhbEiwoid09hDNmCFhgACZAMk8AGQoAgEDylCRGCCAYADSMB/+0aOZRqmls0CEDogBA/uq99691xr5KYq2y9+inwAQPBB334hVVzfWWrnyvS5VH9Rp6/pdEk7u3bxfIkAKYIRBIZ5A4OAZYgFJh5qAOV+kdsn334/BM+Ak601N1U4x7e77Y98dvXf/J5zb951VAEV2N5pfu5n4d/9i2oD8Bhce3b+FcPL6+/5byf/6Nfrtu5vdG+Cvf1//vM3UpV+82NvVQHBo53wkW3/9Dd/1U/97K+2B3dU9IlmMqG6cq5P6wRxK0yevHx5cbjarmcXJ9Nhs/7Ey9eGUrooUKo3rt2hQEdHi17dAKUTNcZ6Up1YWsbko+8ycCzMwEjI+GAVs7r1OvuixdmtzUlxZeWGytcRtZnwThuWm5h13Xse2F9fpi96Yud9j21/8JM3XjmJDeS37rfvnLWz0+XRUiQ4H3yp3d1Nd+HcLK/EIfV9h1ydbYxG0wohj/YLMwDzfKYCAI+CDpwRjw2Qxw6qs857ZCNVAFQc/TxnnvFRwCAwMiAarWCmZ2DnscPe6P/oXic0cATemfcGjOToYcd7EUAFFJEClIwMXAFQGd3/hoZmBszMwGY8Up49A4ExZrMiQo5IzFQJmEhHWg4RAmLt1LOBWMkyCl+JR+ACMCo5VkVgUpAkQgrsCb0VtMl2MC1EgM4ZoqkRItEIRCNTNBMkrtvp6nR1cnKyt30hSyIyUDYwlQSGScvpZskKzrOSmloWIXYj9n3kVRgYMYtkACUmSfHw4DiuV9eu3XW+BetlhImNmGozJCRDOxOEzvoPx360sw39KI8QAVAxQADPUFXOO0xFJBsqOjNVU4Y6uF6EKIw5DQFgBAJUG19rBKGecc0RVIkGNTZFJiQsWrw6Ux23vwowkhphpH8jnhGVAJjRYCQvjbzskSg0criJCUzHc8LZn7NTg9q4bB9uS2Ec1AWWaeWD2jIWQSqIGeRzfHXChy/x8Mu9/plfb82tBTYldZsVunlUCx6r0DqsmFk15KRgBawOoSHkUgbVjGiLo5Pj28vNUi3BI/uXIgyKzAG39nbf9U1f/+Jvvv8nfv7fDif3gcPFp89VbW2Nr2r2Hn0bPAmykEfW4qqKAqtiExBdCvU+Qw2yzGUhWURRc0/oAWsxG1LH7OsaseT71x6kblivtWmdET1Yd1d3Z3u7s7sni9F8td3wcoABOCn/6D//xb/4V/7sKz//kefe/qWz2SMyqr+IRMjOEVE1aapcB+/rulqfLgBtOpmdxiNEG5u3CSClMrLdEdlAQu0qzzGWo5tvfOT9H3tw0ENUM24aZ4aLk9xnACFL4Cj4NN2qJ/f66xW7lLMaCqTLO/Onrly6ce/u9jZhztttfXE6e+6tn5dz/uQnX757bzGAnlqe1LNJOztZnppJ5XxD1dHyxIxiLpO2rup63fUx5hB8O60bExJZrWMpzIwESmY1+xjt9q1DEkFAHZJpubDVxlQS4XKxnjCtToZeobIQlHPMeVAFNC3OE6Kb8BbOwUwTJXNohCKaizofBIpDVqEiRo5OcgJHj1zYXh5vPPiLW3vLVQeNT0lev3nnsb3d23cegKsr71IpYdreXWxyEvGsgcumpwxHB/18d7JIYlnDdtMVW6+Xm+V6dx42fVnH/vK5RzBj3+dMMm18Ng1sT567iiubzkvMpStDv6Z3Xnnk3uHhouvryjOac24hIgylpjs34qX2YeggZ4eMjGhCaMGREWYRU/SMTFhUREFVlFDEtuqmrqoS+24zxGH05hRAEuGTflgMQ5vL1vb2vJlZtsrjvJmwWlnmRSdBQQST4qQJzjE718smsBdE5x0iU8wgJsniRpLDdhKcQ3ZmlpUSj0k1NUIcj9vGxo6K2Zh4BVVzmLQQMnnniJmdFTkD1yAi4lijgERIRgBgFhADkIqNczAiUlPHhIYmoIRMxESO0Y0WFUIgYCAAQyI0MjRVE8wI6JmhiJyVaSI79ojIyITdKitZU/PxYn391o3Lj1ydVC0jsgsKhZCZnedAgOQCkTNV8gQGLCZFulKW6/XR4nhYLyrjTZ+oMgr1oMViN5vU/bBS7Y5OD9721rdbKoLm6woN+k1fV+7RJ95y68GN9fJgd1v3L84++9oHL156eqdpFuulubbkTpCns12NcPGRR0EAmnQwDGmzSSpdD5YN1CSTFhxbEQRGK7JiQ37LI0M7DZpyKmqg3rvGEZa8LCzKhMDs1l3OD05Mi0MDpiISl92Dyj99+VHnMGtsA2/WiS1P57NqPlVGAWODUoRRQREdkaEY7l669Phb3todHl57/lO9rjAPzpH3DhyxA0IUKWQjLlgAR4IpkGcEtgJZVFQ+5zAtqYwWGRFLRbohBhE/VoGydafH6/VmdXBqgGUQVB6Ohjy1MK0wVN551DxNs/PVeVz1cnxcIqjyatOHyjE7BjRGQogkRSmBKIAjMlUcs/kEIjCIRgHPWAW/6oURllgqR5VnhqJFsyARdQKVGosJmGdUsCji0M23d06HbrNJjliQU0lbTNM6xCLk61XuvOVE4CeVqkoXRDtFZM9IQJ7MofPWeGqawMQjhjybOkAVskEMcC0JJDtfEZjBMN91jj3xuI9kU8WzbQySgagyqpoSIDEjiZkyOUQk9AbqkdERoDnnnXPEjIRMjMS+Cr6qyWA6qbfnO8TTIjJvA4j54Ea+IRMRotnDjo4RVQsAaJIzSp4EFwHC9u79G/f6wyVs1qk7jWw54/Fg863Ku9LFzqGbTb1GadjtTpuj1UKYBAjM+lzqMcdHyI4jQoVkpmPJLxqKkihIoWKyjmWdykE2a8+d335a+4MoCSmZGBMxgqp4QEIXc0bmxnNlkDM0RMDkmXCs9mWH5ER0XMMKEBUUUDCrd6LZmI29ZMmKKFCyGEKXZFDoco5AWY2YTAHUiNE7RAA2K1rAB2NSoHUWAgIfBPBzDtO+9HLxkfbZi/JgkFtv8HrNltfzvPdtX3vpy9759q9457X/9o+nVx4kASWsXehWw+6snfsJDI12EeukmHPIlrEqBTAvYoGvfF97YLM3P6mNbB9eW/+jjw/3Di76nTtPf/57/9E/yJaHH/tJ+bmfCNFKTD4EEoQopKkM3YM07O3stJfOrRdH8zfzG3/++/2N33xixR652yiaVkVLL357O8ISSy+bjAbGWR2c1NNHv/F9b27WF7cv3Hv++dWNV9y9X/34+z/U7l9a7czOn9wl3x+sj6svmPEzT86fes/jL5z+zz/zo/fgem7pqbZZbgSNVcqy5MS6sz37/3P1n+G2bmlZLvqGFr7Qw4gzrbnmyqtyFVBQ5IyieKGgGNCtW8RtDhhwm/WoGJGNHhXT8cAGDJtgwJJUYEEVVVBFVVFx1aqV14wj9/x9X2vtfd/zo48Fl2f+WvPHvK4xxup9fL097XnumwwBKCtUcWyKTR2kFE1w1HfLQVKC87TqF4uH9g/vrx6UmrqsE4Lz2bpuwlsffWJxdrxIyze8/tpzz90JlobTcng4un0+b30VvYU4Pu91rsuK5GBn/PK9uSdyMexfPTg5O7WMp/PjG9N61ZU4ae4OKaCq6kY2bRiFupqvEogMSW5effgTL37K142CqkDi0qeLG9d27t07Awrniz5GftAvmzE/dG206frdqy3HVXcyv3r42OnZvXGsT09mWGA6aixSN+93a7oG2BWotKDqUFIgSquhieH0+NzVdScbqibXr07f4p78zO0Tj/jk4ZVHJpNXb88+88rxeBKXmzWkROZ//pPPPHVr/3CnlU05XwyLzULKphpXq0Q+VvPV2hdpJ+2qDHs7fijMjpabfj3IBKRGYMX9utHV+mLo90Ztn8oMMhIsxGZdZqdXD3dlNnMYHpycGoRB/dDT7qhp65GXUnJpKxrF8fkmbcDm3RDZR+f3prU3QKJYhXtnp78WFTlGM/VeHBmAjieOohxc8UPOm5kBhC71qehoEj1qD1qPnBlaUQ+ORJbLBQRTtjL0TmjsolOXrT8YV+g5eN91abXu10OSAgzFeVTjsJH/91/5F196a/LHvvUbftdveVvbTj/1/MdufM1fAuQ73////ML7fuEw2J66EHBKONRO//Cf/cRv+lP1b/zztz7xzvJH/xK9fBdUMiYgij5KB2U5uP1qR5fy3n9Tvv4vP3hs9InPfVsdmqs1fuz7/vLXfs6XpTyP0wYWy/XID9E3RyeVIzCAJJe8YmfsAJygUyCCYQWOOdRlnVgNEUGV2EFQcLhBY/Lx2v7mIpfl+WSU8tB7NvAMCCAExjAAFIPMFXnwBiZEaENCCEA8P5pfjdY9+MzyX/2L6Tt/FgSBBEY1vem6PPMyVA3BUXzmf1Y//B03m+WQHgR3jd/501XNkBEab2VAMQgeXARDUIZegVb1JL3wPX/10a/6dX3VSmQ9X9Yf/tjsJ+75+678pw/sHjSw6GCnhhdP4MpVWLW70cFpfzPE5/75O1ff++5b9yDShjk+2rhP/df/QGf/5VrCX3nxxf2b7L70sVfW+Wd+6t3TtsZ1jxwfv75Xse9SUeauGw7c6PTsJEL05NTwwXw1sFgTTAcF2Fjp0+p+mk/3D6DyiRJV0E7qByfrYiV4l0zHTayDTXZbdsDODscxal4NaVBZdMvrV6YOwjJldaSWH5oG7yEB39mU1ZqfeXXhSvnaz33oMMQPzYagGnXzxkfqz3p8/NK9dO8kOS2CCCTkLFahbevjZf+Gtzz1kY/e/rXD8iXBx7YIGAQDBFGTrYfq0sb1mpkcwBQdbbcrIM6IFQjNQM0MiARVEcQKIZFenuG3ZJhLktEWg759rG8vsaAKGCtyAQUxFwVAU8iKuoUsE2awgqBAxtvpjAEBMRoy0CU15zJoYDIERdsqzUAARGk7PQMEwKJCxIwQkUEJFMQA9PKr2kq/tsJ2UCtmdplIACBgsGs396qda/XO1ABKyt47SwkJfRPVZLCBMDqO7BmMD/aun108qOtx9BWCKco2fcslr/tlSl3rmxhoyCVJVs2OAhIjiFw612zb3hPJjnw9otufeeHo7t3l+TmRkSdJ8hoEatvfQtr6X7b/fNsGMkCAy70RoohuQygm9A4DK4OqYEqWEoAjKOic60pugqMkYgJoHslUYEv22LqHzXhbQdsSuk1NDQEEqJgG3vK71bYyMxPYctPJTI3AwEy3aKdfNU8bXFrwtlM0uGTxb/+bkQDBTAkuk024/M5xezOLiA4J1KLaxHHKBcG24juHUMy2lSiCy74VvUZpaepZG9357dPpZLLTjl8+uk9VUxG6def4AiUw7gwb7CACxCq2DgJy6Dfzvp/1a+w3WTHGmjtZdyX7MA5NczpsfvmjH3j1M88EyuNxnO7v7l3fYyYC4OCcR+dCcFBK5yBWFScthoakSUvQVFeBhBUaQsmyKXlNBkXMnAIIgs8r/Z8/8SvPffJ2jG3u03RUdX3XDYVjmK9W3aZvHSNeOj4r5jKke3dlc57/80+872CnOn1w7+bNK9w0ZmaqKsJMsQrkEBy6QFXF0ZOFioxyLmqEyEhqiOydFFEDVUvAH//UcwcX8/f9wi+fHs1EnLlgUlygqqpW/ZCTlV6Z0KFZyTnJ/bNlQGYEIlQzBlzPZ20bG7RR4KvXdqm3xcnifT/3ntmq00AUKQ+pIt/ncr6cOUQT7FMCr00Tuz55x5u+EwvRBwZkRyXnoc+qWtQULTiOjnPXHe7Um4KzZXLB9ZteQZuGq8gAmvuMBi7ESXTYD1fHe/ce3AfbxsaWirQjP8bRYraMvkoyDHld+WnXSVSYxHpSt8vz08jU9cnMdqaTspEBYLUoLUUH2A3rEOxiuRzEToBff2VycH16sZFc0rpbPv7IrWw6X4ghqMhhVT98/eD4wREH6leradueLtZxVIEIkR3Plg/f2D2opzoUyCXn0kYq6/Wj13ccwerkQnt56I3XTk9mVw/2Xjy+OLpYLPtsiH0utWNiKl1BX6l2uztBX1vmVzEgMaLqFim/xdIAAGDORoRFgNgq71CVgSqHhtlYu2GDhA7JewJU5wgQ+pTAISypINSx8eyrGB2hv05y/2y52hQzZPLBeUUDIt8kS1sJAREKQsrZCoqKb3yRUkANwRERIDLwtkG5Ze4jbM+WAOSAEIy2BUUxYwvMdfSlWCrFRJCdIBXZYvpRRNRQAQIiCQxdiUhSVMGQyRENqryNnszQkYlaNmAoqoJQRWbAXDIjE6EhIVOWbKpbR4Qj8I6kiPdIqipWOhEFUeiTLDbl1Xt3njg7PqSqqqtxjJIIAA1JDQGAicSUYCsUoPPV6u7p0XqYjdspa/Q8HoWwyouqabnaMVevUq+DDN3Fan7sYzOe1qfH56FuEMBxaOsJ2lDz6JEbtxaLu2DaTNrpTvehT/3iV77965jK6enRzs5OVdXBeSQUs+bKlf38aPnMx1fDakgARihQzELlQ42bNXRJECEEQlP0WrOgIvai6pZ9GsVWhpwkP35z+nj70K986sWKnSOLzWixWhkiBsZKZbAs0BcN7HMJQNkQ6qbiwVpGWC6C8+ALKBgqee8ck1pJIs4EUdBEsIpVwlXwKKJoSkDk2CEXUzXIZWs0BQNAR0hEZrZ9UOKvKVnRjJBLkpILbt8ZKpIG0VKGYXP/dL0ZJCdAdARxW6VOBoNK1hgaQI44zhfFrVLqeg7crzsjAsJJ4x2gimYTEzEDRsxaPJNjJANDZAIRcGLAZIBZJAReJwHQBFAAAhMzsoiodcXqLKPoNn2/M27J4SZn8vXuaLo/XYNf56K9IoAAkxIqACF6dpX32XQLmHfO1yFkIPSBECjyaDdMG4oqZJiyJYHU5aJigDlngMzOp1SONwUoShntsq9bwjbkXNizj8H5AAiIhmZgxuQcE6gIyaV0wiy6aAYGLJqdD8BERM4xsTMkQyqGDhmJ1XQ0mRLZ0G/2diegYCIGYKJIaGKGW2MKANjWfQt0yUM0VOddCEG6skxk1Tize+Tpa7df/WRZds77oWhKeeIoRs/AN3YavLN4eHfCNqyGlCk6T4EBkAKzGBRVNDKEvmQkQineEQAJYBEtBdZZerPBdF3cleZGRTDrlpb6yjOypZKdd5KUzMxku67XUtg5QfSOzcCbjGNwDoHYwEkpaABi5lhcZewVa0VnoiYiblVgoZrzpkumhjCUbZarhEqmpMiGl/e4ZoHIMVbsoyMUJQIVQWZDA89lGLbvgtn5MQzdrl3lgj0MCCn367l0Bw7Xx/eH2YO12N50d77IIgYOhnaz8EvJ67E/jFVLFZUhmQFz8Eol5c1TDx/8/q+fffDKyXf+1NV7SwuTg3ZaBO+th8e/9dtf1Nyh9l/89vrH/+/GPDnfZSHzXlwo4EfjnW/52vd+6qet7h+69rlPn9DmxefTaRIKELwjUykoZTLZGUAco49Oln1ExiT3N/nWH/jW1Zd/zuHhrijEn7529zueGUu5puvhxbOzrt953UNlkc595b7yjfEdX/Qr5+vJr3/k5Pa7LlZdXroPffK5dlKj0dDLoltObh4uz0+jhcl4Z+gvRLXf9B6hrAd3MKqmzVnnh249jn5c15o2j1299vzRkQeNmPbGo/3dneXZ/KUX7h08dHj7bLXcDKvVwCHcP1tBVtBy/+zEIdSxcY4tlZzKlfEE1FZDOnvwgJGK6DprfXWXsUQOs6OlMzGgUdV2qp30WQtYboLr1suT+cpPJtExkENw/apMdyeCyt5ZKQPZwd4ErS/rdPZgOd0bdQg4rnPDxw+WvWg321RtLEOJ4HYCxwS+dyPEQHh6vhApzgc0EjFit0rZOff8K6dn92dvecuTB1TPLi5ed+vw9r2Li8U6tu2y9EWEySVT0gxkhCWljUFomrhTjU+HlViuq1HrYd/FF4+OpbLrh/Wrty8g42HrH9rfn637Bi2Yrs5nkAt5GldVcJkQxaAvZcilAJzMz4NRW7nHb1419OusrqIe82h3PDu+cAoN+8OqHXNbPJ1ulgCmUmrnN+tBTP7/B2hGJpqrisYHYbxD/bJX1DRYGjxaUKhS7psJ1RUy8nw+4AAhhKHLJYsqqqGLNO/6TbeuYNySWy5mFqiJFJHGVb2/M76Yb+6VXFRK0ixAIolYxX/glXX6dz/11icPvuxL35IV/syf+3P/1/f+AHZHn//rnv7gP/uB69GBZKRm6FY3Dm5Jrjaj5J+8nh99JL1yxzE59jllgcKBAXl1/2T0WCv/8acWH5Ob/+JfP/F7/1/wS5+Af/8Dt3783d0LZ20OcLpaPfGYfuc/4f2r/d/8a/yud/qUC6n3HsBgSNqy7MWkyNiGWqi7ANeLQxcCCEifuaqB02w6bv/An9Ff/iV47uPdO75w7w/+7s3JR92d8833/H+bfgYkQB4yASmAQhOALfcDIbN3uJWXGUxfuBi+6x/VizN83wtMI9iNqzR/Ye/q8++9/9XPvgrUwijWLdm/+KehgZAFdh6CeQ/1CHqFJOgI1gNQhsPrECaQFY4vAAQm4dFXTud/7rePvuD17dvfVj4xO/pb331wAawT1UTDBiZhmC0jUffKUf3YPuQEq0TN5OznXnqMrqzhuCDNm+Hpr3vH27/kDw3//l+ePffxo1rf+s1f94sEH/7IR/bZlQCz8+HmPhPkUqRtgnT82PgwZsqpNCH0q+ULq7OP37tr9aTLaSjDarXMkHjkxld2ak+L09NuM29xkuf9YRgdcpn3OeUyqO7vjEDzohvWmnem0QusuqFt6zDeoR5rgQS+S2U+pM+cpButv3WtDZ2/f+7FySsn/Uv3y0MPXT04wcVs+MzRar05/+LX7T180JZeT1MugFXTYq/VVuWQc/SXH4z0NbI0IsLWCWZkgNmskEsqr+3LkIGQ7HIFbqAAAljQtkBjRgMCUjMVEi7CilCSbI1qoEgARmBb75nBa4ouJETP6D2EmjgSEBKqMxSBoqDKAiyEANIjCKkYkCMPiqDEiLz9wK8GBgTOEwCICgqhiqqyOlQS5e22zpAIICAZoUOrfXRGgGTb0EoMSCEDMTrezujBXwIyAcyKQtP68XRn99o1VzUF2bFbp1yLACoBGF0654HQgIjRczyY7J+e3b5540kzMlJQkGJDGlI3RK6IUU2yiYAxOWKkS2EY2Rb4Ta/9xmIDV+1fe+iTH//44uICxcqQ8VcXWGqItt2BEW0DFlNk22ZTl3kOACA7ZjBCrTzX3hwaMiw6KxlNEQUNVMAG0KjADClnR8HQSO01YORlgLWt6ujWtmaXYVpRSwhOzROIFjMEYnYsooaGYNthCAKp4fbExgSsl1hqMdBL5ihso6kCxkhmlz+PX80cTbevJyRgkQzoDCyQ7tShoG4kMTIB9qJixkzbreVlYEj4q8eDzeb06H63WtpimW7nU2oebxhW+bwdlZQrLVpyLYZCPnKVOgOJZnUeKBtLrsO4aqp60w2xDoHbpp585BPP/vCP/fTpnaO9yuI4Hl7d372y1zQVoDpCX0fNxYsS8yiOyDkz81wTkyESCxJbXiF7AGVXiWQPHtGr12U37OxejW78r7/nB198/rSOEZyI6KZbB+JVKX0umguJeiZTrR3vxLixvMgqBEcz+L5//zN/5lt+28ntF+3tb1WRbf/PO5eGjkFARbIwcYihisFMh9IbcxUrRu1zAsTArNulIkEp/gO/+FzOn1IBsLhFm5ViuWiRAaHkjOwdWnaOHbplku2ZxTN1ybY1w9W6d0cnEaAv+tzJPK/ztb2p5iKOL1K/Xgxt49GKpERMQpikVDEiCRKMfCyipciwPf0riGgqYqaA22UCpqFAgcq7xWZIgopQVFykqnKTEE7PVwg4ZFDi1bo3LUwwX56TMwMtDFiTZ28IvS20SqNRa36c1jSp29V6IUk3AcpmHZ1nEVOtKmfd0GLs+nyxEqq8RVgt123w+5NofVkXeemlY4a66xdV5Q4O9iqmoduE4HZ2mm6TKqJ7Jyer1fC6J28+duNKJ/aJ2y/utE3jq+P+3JDTRovp+fyoYv/5jz59cvYAvMbB7e+MdN+9ejZ/5cHCK5ytZkq67IZBkjBIgW2Bxns3O1uMd6yu3dHpxfZdwI4RgYnlclsMoFmKiaAUI94GuMpozqQOfhRrYjApzAokvE2kHfep+OhiHdHTaui4D47dal2S98HxdG/yaBXv3j+ar1csGRI77yWbiDkAdmxmOeVLqSOTARQxy8UFl0FEhAlBQFQUQJkAgRGZUVUZYQsoKqKekZnNM3onZqUUQFCkIiqigmiERUwRL/WdqqYoCsLGhESYDNQsmAXvBU1I0LGCBedKEgY2VRUANO+cMYDpVqwGBuwYAT0SZAHTULF3VAYBM1GLgYsqk4xGtWY8Ob5nfd9UY9s9tODrNqAjAERkQOcZhpRXXXf7wdFsPtciLuwRjQOjkQGQ540TrqAq2fLQQazQ2enFiRHXozGcXARfBSZGKgjsnEh85NbbP/LLrwyrfloPN68efvD9H/ef/Y3NOLI4UWyqaSnKjCUbDnJydjpLw0BcjIpsO7FlVDkXmDR5b2pQNQ4E1DT3wkCbXnNRLFBI0FOzu3f91vWI9RNf/zn3Luant4/XXS/rjY8to8OaxLJ3BCWn9SKOqh03ma83Ecp4NKHK5c3GeV9IiAjY2JNs9+Bt5MZtZucnt1/lTr330TnAakhSxESBmUwVEVQyIKoVdp4dOQIgAjW1sm3vboWfAOCZHFER6JNmtUHKsF5Px00aOu2GzWrIKRcRIGPCknIdKzQaVuXKYVsliRZn5/PRmMc7dcKq5BQqXgylreoqesmFmKAoITJYUdl63HVb7gRS0eicAEkSQOiLOsKaKTqXoWxUq+Arx2jDapBOaI3aBhvVuMmJARV5uVkbwHhnPwFuuqFkCsED46rkcdUOg9btrnKsQpWGBOAAnWMvCC64Uesmh6Pxjqux1ICrVT4fBhtAxZcEAmYCQCqaBGiTi7ly76zMutSV8SqXnfHImyNH7BwSEiK7rUDWgPCykwNWzAyxECAgE3v2ouK9A9z+Q3I+ICKx91WMdeNd5UPMGbDvSll7F1LOy361M9qrQmMGkjOQ2ypgGbd+WZCcAQCKUogG7KNjsxCr05dfsvX5iIfkcZ0AWCi4LR9KBaPQFU+PjadFurWt+wwHFTNhFUgBSLeMSQJDITRVA5cLCIgRZ9Wu2AC8LKlLANTg0J31nwHtPKuqEhgzs0GhLaAKillWMaQu9Z4cIHqC3cp7RscY2JVUVGWgQt6LZu9axejcqBiIZNh241Wt9EXTImMxKEU3xYSolLLlqdmWuWAGBo7Ih4q5qrw3FUMQTXmQHtH5GuDy2uBkZROTux/42NV6rFUkdljvTMpO+fBLI7c4jL6ZPjp78XkyjxX2164+8Tu/Afal3L3/0o/+zCNSUq9A9JrLEdZgD3/T13fXDx75bf+br9rn/+F37ZpsloN2yWo88sPosIldf/1tb+sOrsH5MRlzUW8uOvbebsvFu2Tz1r/zd9xqs3esZ//s/yOvvOq9K5hFkg/eYjI/dLIyCWSDiox3a+rz6cWi/aZ3dF/z0Hs+8ktveuqt1x6/+thXvenonxMcraPn5mC0+8Y3fd6XfeWLP/vjL533r//Sb+n6InL+X/7nT60M+0SL1Vo0jyftjd290+XJsiz3/UOjpk2L7s7Jfc0qORNxGNca+higSK+aAxiZbIr0qdBGKJNzmNebnXa3NXjh+VeZd4+WNN3zVNWu38TaE9KVUXu8XIjE3owoj8l54/l6jcR7zWh9YaOq3lgWK20dVDtn6oQO1febEnzdurbr5hAQWaTPTz52/cHx3eidU9ROpFgINJttuvmAOXsT9I5IdndjtxpG1XR9snjsypRMxuOpoK10QJA4qiTYatXtUFUTv/HJx8r85OHd3Vfun1GI5vho2Q9QHm2aR3ZHd+bdYtHX5Pti90/PX3/9+iM7+OxzL52sjYLv+s2o8ZWbzLpkjhxTt+pS8KrqvCs4qqqK+7mVlBZLIgtjqiscIq4369c9eSUvuvPjdUSTYcPBNcyT4Cj69aYr685Qdys3rePpfL1CEObc9UOR1XJ1uDfZ3YnVoMseTtfF0IFIBIIEfd8rWOX4CmMG7jL1GxMO/bDuZ/3/EhVxxN3DHWfJaskm4K2KIYmENvYO+1W3eyWEashd7jtLCa1o3HWIgsCAgsGFKoylcgdhWCpKGVfOx7DbVCYASuNqpIWH/bxerlalM0BVBLbi6QL4Z++cfWjA/BVPd9dOD9/8lnf8kd/8J3//b/ycr/6qX3c9vv/bvjtq2d9z00M6+wd/9sF3/uWbb71xPFv7j81H9WQ534zGzKSay9CnF970WU98+588+cF/cPjC8Vs+9G76C9+wf7zJz96B0r2pqcrzdwEEdqab3/577j38RZWHsDeaNgEITCnn7JHuHzxy8Bf//OZ1j26qvd3jU/yFd5b/8h+cT3jrdWd3H+yerng6to1hgOGbv3Xztb9r9/z4/Ed/YP8b/tCda5873319egNc+cBHml96D3QFNAMSoG6xaGaopmJYRKNzUDkQhnMX//3/hJ0A1QFsltpOXvm6r7v1p//eW45Phq96I7gIPkFn6MYwCEQPizUMGXIB50AKoIALIArLOVQ1hAqqBjSAJO5k77//sr3zfWVEjpqrowO4OIXxmOZLmef5NRj+8V/85Z/7yJvv5cdqgp//AKCtH1l/wTe/8UMnB2999HN+9Gdf/rzf/4fh1z/2/N//rp9//y88/fn8qavX/9RP/aTf3R+1VAGwlmsj99C4EeEsRUtyaNHAUkGH04P6+GJzd34xd0aSlptV7WgyimZCgPt1TVIev753elxavjGbrRerbjr2gsWNqvnFJlZeFF4+mV9U8PBu2HOhdOOTxWoSAb12TEfLjfc8cS6vrRM8SesrV/b6vpsctvM5ferTrzx1bXRjHCzpEMdSldMVTrRATj7wKudRjFakpLJS7c2/9MLRZfzwq4QbBkQyUAPIBr1YAlUzMkAzd3lFA0jIjEUNtlcd23JHQDPdOuazKRFC3lpaSAEcG2+hOfRr7ZetxsqBAWr05COwBwMTNTPCoiAKGVRMkDKDOcgexCkAMimiekZBhS0iWnSLGAVjNFSCopmtEBKogIIpqDIaABojMJljZAJGB4YigACBSU3EwNSSKaNDpMikTCCCIsEHc3rt1s29K7diO131m5EVIyJS5zwIiqghOY5A28yLAIAAfdU2aTg+eXDtykOgaChF0jBszKD2FSN03SarEvIW472FAb3W4TIwYKRUspg0vppePXz40YeXm+Vipc65ktPlRvBX+z1gBkjkkokiEcMlw1sunWiVs9pBIARTMh6yJoNNoq2ajlRUzQzJuT5rG9ygxUxBjAHzJcpq6/3Zfq2XBKZtZLUVUSFAKUpMRFsx0faa18i2adO2mKRm+Bq3Gwlhi4mF10pF29eKASBstUCwvWZXNANgMMdkcAm8RmIBdqCNwyttuL3eZLHKEQANYFJMAWALQL1kbV9yiwDg7GRhaoI0WyyGXBjc7mhvf6K+5WxZibphhiyemLHpsmmqgp8ajgA8N6Hb6NCtXYwW3N2j1Yf+5zvf894PC1DleX+nvnpl59qVA6uCXlIsHQh7dFWM2TJSLEXYhxhrJEu5J1JCTJuV+pbAS1GmKgYaUpfWGyeBev8v/8UPvPzCGccoBnmQJvqmohjiJiupdqaecBxDSZmAZusEYOtFx42rgl9mvPHYQ5/8xFG3GgJ5dOiCJyHuOmIfqzq42LaNlGFVz9brheTBM20TXod8CQRXBVXbOvDMoeMkmRnTkMGYGAEwZ0HALBgIWk918BfzwYQUEU26VBCR0YCxFOhSOpi06012sXau9dNmdja7WK3FAwY2R1YEGYsaIhHFIsZM3dCzC21dWz9ks6wmWSq2OobFZu3QqZkxmaNiW6wn5JJDHTAbAlydVKU3JM4KGIgF0pAjYxpEuWcCNSsqBWU8rtKq6/qyN57M1z0HnnClmxIMQuC+GFeuacJQsiv9w1cOGz/++AsvjaLLAGAyrsL+dPdisclFSSWKzk/Pnnr8iVdmC3N+UHj+/kVXIFQQvRu0Ox1WN65da0aj5149mo6bL3/Hl44ovnr3To8dlAExXlysqOJFEQS+ffZg0vjr+9fTpjtb9athpaPqYt2lYc0m5mDV9bHhJrAVTsWMdS+OovTSKTfA8Nol2naVpbKlWSMACwGUbcBcigRPasUKEIHfPghK6vsEjO04aDLyPrOLBFXtLQsaFDXn0FCLmiQE5zxzNa6uw0G79JvZCkpZJwmuyqmw5+BDLmKgwL7AFlIHzhGolC6r2JC1ATRVRwxohICAUhTViIwNUQzUTLcaa/VtzVXNKXdpEFEFTGpFTba1VEYCKmCmxoymBsxG2786VvVIl7cLwRWlTsRF5+u4OlmxIZtZKcBcRE0Q6XI3wo6Lbn8vESO4gE3jnHMluK6XfiiOiUmbig+m7f7OPgl2y+XFydFsfl+p3Lx5a//K9S65dZcyWLLh9OIiaQEhZ0RKo9CO4wglKcpqMSePUHG/WSDXDn0e+qEMs81wuHtLCnnXEBKSVwU1c+YDWFF66PBzLpbPODrbuX7w6KPX3vXe//I1v+lb+jk3tS5XazMc705GU77/4NVVml154kb34KKbn4kIobS1G7dcNxVV2PVdSQpQDEEKSNEqRkOf8uCdTCe+mYQsfOf+4spucBU1PN6bdpMdHu35s3k/O+tKAiRGsyHLvZP5Fc+uit4FyWqK/aJPm7TJeVvsdUzkyRh9FY0NySjr6uh02tQxwsbZkBIRkyIjbV2f5JyJoBp6t31GspmKCSgys5peqhYAAFgNTSEVR5SsePIqkM9mJefNfCX9QLSt5wITYTHqrYbY+nFdKlmsu5m1PG7aadtWebkqKYsYE9QhmEoRSUXYMRWGnJm28SqpFmaqnZciRUGJgNEAE5hjqLyvmKygqK6SjhnrGERzVszIs01iVEfgnSem1G96JkCtq1ETJpvZxlCz5TpUxNy0dXEuA2DOLOhDHLdtptTWONmb7O/GqiU01cx9wrPZsFpaLiDbqbeBI0cIYkKMgXTIXZ8TEDw4lfunZ+Npc+X63qOjR5E8ewYzYjIGU0FEk8zeERGZV0QwEVXyDESBgw+BiAEQCb2Pznvngq9qx76uGh8b59pYVUNKQyreuyY2jJBTT+guy9SOTFHBRBEQBAwRXVPLoFkKoBu6zSuf/MTtX/5wnr24i7y/N34wX0kj2eGqFHbUhkqH1dPXR77Cl+4vl0o9UMg6IhzF0PVdFeucS1borKiSdyzZVNW2DNVc+iQdwCrBUKhuopRZN6wcAfKWCUqOneTkkQxMVEqxZFbMgg8iEIkaj9XWy0FkAMSkZkWLI3ORiUmZgRiNiKykgkpMLhH2YJ3ZYJhFlcATe1MwVUMRI0AiCETRe8dBwWXDwFRUTE20kAtSBrLLZ0E4PLx9ctqmbs+5aryTYGjayaNM9//Tv3IvPvRApWSgx68Or35mtDe6/uu/JH7N7zhxq+7xo25Vlff9jJsrAMqQyyY1vqp2phpGs6F++eTs5hd+vn3jl+afeNe0bqSBLiCePBjnp0u3PPpvPzS5fWZFkTkQgWXg/t6uu/k3/3R6/TvK3Ze6H/jexc9/4gpO3c5oSHnYJE+oebXStJ70Qx3bq28++8Snr7swJT/2tXv0xuu+4++9mNdPrnZbzem5Z979j/4xHq2u7+z1jd998g3rJ2994IMfC33v3/KF737Pex98+Ocz+k2rZGkypr7YoGhEylhXPhU3nC02q7UJD3155Pr+6dlJ29az5eLa7ni93PRahuUQFZbnq83GqhBfvX/sQA5jvTNpr18//J/v/vAb3/rmPo3uLs9lKI8++vD62aE3NKWbVaSZzHNvQMuUryDvEe3tTdOmTKa76OLxYjkbNtPDNg3laNHtjRpHuj+tGab3Li7WpTfUJFTEInFRvH2xClVFxUxxSKWpY58GNfGVZ8CUUzXiPPSKenR2Php7olT6hD7eufeACBjwys74aLlkCgni7qidr4a6hP3peFzFjiAw7bShHoWHXZwGRufWbT3eHR2drV++f/7i7Ysve+qGDLg/rZuDancTmawN7s79+YXaYj7cP589/NBDnsGHZr3pTuZnq/WqdDQsuivjkGizWKfTpQr7+7N1Wc8DQre8zQ43G9rfP2iq9pUHpyKiZomQDZJmcDGSDFkAnA8BRZcbPZudBnCujiKYdcDoKwc1h1UaBpPru5OdkT9fJUJm4IvlJjjf1OFkufi1qKiug2oJ0Z2sEnvz7OaLTJE8dOz05s3gYr+z42+/shFqa0eOzECHIYFWAWC9zPeHPN6diGmxTe3hoG3X65w2hWMwptOTpZGL1cghXj3cO190i/k6iYCUAS0Zrc5Ov/vvf+f/8bu+6snPe+SP/aX/85OffOZ//F8/8AUHwxd/2//++OvfAv/hP8DmYvLyhR3r4qMvKOhoVPW9NOPKiL2WIZfh6Vtv+L4fOX74xmRvX7/jj9FOO/zMhwSCzRI5G9Y5tgggL108uPWFXzDvF/ue8vmLMts4ctuTzNrD9b/2N/LXfq3ztpd8PHwCmtD9yH9vb95afvXv5kl3/Nf/wrWdJ+zJJ9bycvzcr7QSY3cvHHJ355l6p14s+7oN4yce0V/4OfIehMF72PQwFAgeR8Gj5N5crKCqAQHOeyAE38BmA2UO2eRkc+v6E+lTn7r7zu+6PutgL8FKQBRgBd7DFoa80wIlWBWoK5AEwcFQoB9gtYLRCAigraEvEBvoFFfRmQqsuWYNVFarAJEhNpCn7bWv//VfOv8H/3T+weOpC8CQfvxj7/rPf/T1V0f+FjzhZ//5e//ivR8Znt6dfM1f/5sXb3L/6ru/Cw6v6KLbnU5qkDbhiKt2g7ZEVLq4WO/tNT2qRwCHq1U/my/Pl5tHH736yU/f2xlXad1NmxqUl+vUD+qJVp2NR1cfHNnQcdeXpnLqeVPydG80mkxyWq89i5bZybKJrVMZx3C4Nz5dzHdizLUNKFzRRbZk+PKyP9VlVfs1r688fs1Oq7rlp1pMpxefHlKMzauzdBPDtZsH6cHF2cWQ+6wV713dWV0sNl1354W7l1ERAAIwbbkNaghFrQfoVYsBmYEZbR/FgFvLmYqB4WXxw1CJh6JbwbAAiCGpmqEZqiIURAK9xBkAEDMg6japQU+IZCESO0UGA1DBIgZZTEHFTAiYVEGMRAwDOQLPwLx10xMo6PZ+WJEMB7VSxMAhoRZgMEITyaggBgGZEQNrE5336kN07MugohIcJxFEM6QC1oN6BEbcNl2YqY5VLxCaSXX4VDx8PAIe331x0rSTnYM6NFUYE4locZ6MTaGAIlGwrdcduR3tXpw9WK3n9WhSUhm6vuTEXKFZkWIKqICsxH7bkjEAVDEDUwUpJsboNl0HtCHv3vy5bzs5f3B6elEGFSRUU7Qt19EMCJgMVNURAZiBOQZGI1KHQISO0BGYWTHqByyZC6BsOwOmgFv7GIrgANJWzoKVpMxkCsSkaltb9WXwh7+Kut4mRgZEilDMyNBtl4FgRcWAkGnLtCZHWhQItnM5x0gIqFiyXEKJ8DXmObxGtAZARAUVRQVlBOOt23dbTmEhnHra8V7VRI2AtnO5rJbMSOCyl7QFSqkxXva1iPJmEAoeKAFbyvdRUy5hsWyj33U+IhcizaXMFkug0HAYJHkPNgxofrw3RYRFKb/wvl/8qf/+S4u7F0bO1TQa1VeuH1y5vhvGTWGvudQu1D4AGLJnx+QqoopBBbyAN+nJj6MLgAKAQN4hkdNStJRkVu2NW1+P/v5f+57ZiQYfUxEBUixX9sYBFQ13YrVJQ5ekM5hvhmuTyhjP+94jtxUbghEkyz/9kz9ej99w5/6DJ6dPINAgUETJh2Y0HqsuV/1ivrAtPJqYQsyrNUphQgBGwKLDdn1TSiqKTJYFRUlVFYiQzUxFkYAcNK3zZG998uGjs83FYmBCRShimgp5V3KGbeAkMvQ9NqPplYcDuJRO1pZchYVAjDpFhyFpz8ijODKwIW8yAHlPzP2QhyERu7Id+DAshoTOYXAkmgzEBB21k9CnfNBG76HGMDtP84u03uTsHHrabHoEZ0rOsXPcjlux1G2G6F3Rsj+e9sMwnuymBJM6nK82RW1UhUnblDSMHdd1uHc+I+8z8quzecM9k9us82DqQkglFChdtk1ODiGgNoHe+uS1n//oM0zsnFsnGwxytu54XnmpJ2E5pJRQq3B/kX7oJ979m77sc3am7Seee/76lel8bZLSfN1Nq6BA9+fzpVWLcqcxvxpUa2pH7VLXPcBhc3ixOlVyyait6vWqH7g89ZarNtOj59c9lvGoYiYABYAQHQCoggqomYiaCROlUoyMAMFB5BB9RLEw2rFY9V2CEKFL41EN5NjHDaCvQwCyvqcCxSiXlLtkjgUhd6kH9Cbk/e7erjNcr4euS71DQMBBgAEAJadk1OfilRyiqaCIigw5e4qKRNv6pZmU5AA9k0dARFLbIqEd+2ICBLEdNeMdXPW8KQZZDLNkIzLELKBqWyYaExCjjw6NiggiFRFiisFrUULsRQpYHMdrN6+cXaxCZEvKiOxcLgpE2SSwv1yUkJFjEwIzx5dUFh+9D5C0Z0UyBbK2rdq6apuR8zWTG09HBuIAhnWenZwNSWbLddJCHrOod35UR1SOvvLsdRjYu9BWQ+lTv5zUERm9FxRLXZ9Lt5jNnnr4bZTUbxk6SIDgXDBFEI1YXbvxtnvP3klK625x9dGdu8/d/sUP/VC/gEevP95ODpp6VPqyuJgZKNTTk7t3Ui/OEwY1cONpU1dUUkGFugpQ03zWGaKvAmYrSEMafKt7h253wnuT6t6pLgauuma96mbLiyKrvZ2RDECaWCFlkWIOqG2rMthm0U/JzLJYRnNDnwXK5KAa5j0IOefUA4zq8fWdsY85KwHG4LKUoZQ4rsBMM4oUEUW2bYuIEYeUyYyJi2YmNKCSiqmpmpjKa9cGfHmqRxMzMwKVlAYtm1UnfWHHm5SZ0HuqHFfs2rra39nnvbELbddthry8enB4uNNezI6Zo2FYD+vrN65Pmzg7PwIlx5yKCCgz59emDlurBxEiMxGmXKIjICTV4BDZkGxUxXWfBLRHHNW1GQ5DUYIMrjfnt79VDZKIKK6zTqrxNI52pqPButX8KBArQwyefWulWNEQQio51u5wvFNdaXzbOF0POUmS82Xp15A3mrMBkJTkgIsqEgJYkeIQnMmQ8pCwKAxaiuauX/lIjz/C3jvy3lQBFA2dc4hoBMHxtvTtmMBMwUKoHBETsfPI3nuHgOy37PgYYuudj1VTN9Oqarx3ahp8ZHKV84CsqKVIYFYRdgRASGBFEJGZ8lBM0cCcd5KGzf3PXLzy4fO7n25tpX1HUp7cj3VTrQYrpSs9rIqWDscxZMcXgmeDco3nqRTn2QOyLyZbmyEaZLENkGMsQmpQxEqRVEpCyAYCwFC0CJmYmQL2IgEcMkbHW5kvi241I8qkjLWD2sNuE9iA2HtGLdnQBi0JadSMXDvKUpFrtsRIxO06FQbV3qCYARmj21pwtQgTJKWtpTSTMVHlA7AD780QHSbRPqeUEiKIZFMVfS0xHbsnnn7I3Qnthb9976Jtgdhj4v37i/N3nZxOx2/609/evunph5b3Pvrdf8d3L9n8VWzrg+s7T/7mr3vpvf+tpIHMKThHCJs1ZLz93961/0XfIBNdkl75fd/SvffnuqN104Q9wvv//Dv6V9958vyd+pN34aJL5Jg5BDtRuXs1fO53/O2P7e+dfujj43/7ndc+dLQzutUPg1KvAYMjlNINafaln3Xjr/5Z3j/oFnTz9FRffG739N7L/+YHpC9/9x2/4fO/4Q8+/NY376SXf/F7/211tKoOr5cnnvr1f+Vbfujn3vX2L/uKj/yDf3T7pQ//H3/jH/zYh94Pd68a7+6M4F0fex84XCxzIUMqx+cXHuXQTxqNRXO/UR/c/bNlNZnO+jQdjSbj0dCvz+fzqh6tu6FuG6waShkZpFvvHxycrlavfOL5gys3mJuj49t+5D1Uy0XanR4ONT776qvXEz/x6K3n7h2zDzT0dbFlFuZcRbpzfpQ1jUb+mquvH+4cnS36lZ13PYCMabLjTUCSQEFbDkNXhrc99eiD2WxgKlk8ZB8qcpTUzFOfhtG4Pl92jtEBXJ3ufuKZl9vQNtF5Z1BH9qPTs1kdQpK8WHfWJUzFYus4NFUI0buAe3ujl1cZRCdMbzg4vHt0sVh2OLiDdqyDHkQngx0l/eS9i7c+8dhq2Lx0dGTEVVVdzIbFOvdJ99vR1XY8P1t5b7shQlov171z0zoYOb1+sPPK8XFfaHcaL9LmvNv4ALnvd+o4rpuHprt37l+8cLRIAM5jG4gczYZ+0StnjM6z96NxuymZS9r0XSFMQ3dlp2Xmi/mGGMBkM2wa7xadybybEmSB+Wyo6mY6biRtxpP4v7SKhmRgulwMHeHuldacpL7bGfuaspQCQos5zU6k64ISeGbCoiZbOgkz9+uhDLjSpZQSCR3jRZ9zKtKV4XQ1mjT7411Fun/n3mRU1+P2RjNu/UytnFws+27wDMFon4RL2gnVT/7kzz118NiT7/gqpO4Pftf3vPPvfvlDX/H56//xP9YX51euHAyL2G2GYZOatk556EV2AwvIHLtq58awBHr9lzwY8o3jO1F8Hgp5RjQHyIUg2K2dndn3f99TX/COF/7Ndz/x8l2AGshFEFAxZkAZSpck54suNhXIEXzu02Ttyf/4kdf/nl/34nKAm0/R2758/rP/uO66MLu49/H33vRjeN+799/zY92n3m/3n/fveg8VBl9B7kR7dgY+pliBAmRiMHQCmkAzjAIUhb4DLxAcOPbQHX30Qz3D1cls5WDneA6TFlzYMnKgqiB3MCyADaKBFTCBUgECGAMDlARtZbnTLFQQyYMDQGGPm7p8+mseGb3tzcc//JOvVzw4Xtkf+7Z0D6YeEhCMxoOcHbzlia+pHoVnnzn54KeWCGGP41OTT5wc/u/v+ILf+z1/pK7NQXEeJKVxU52fzzDy+cXJ4+Mpm8YYHVaSN7GJYdTePT6/cz7nCs9vPxgloJzzJiXpRxVxCU4xxjhfDmXdbWY8DtbstefLpQVM67Q7dUHTarXaDKnaqfOQfHQew9HyGBaIvj5bLafT+mi9zJpHMZwvutCGvsBiviCnqQ9XxuMV8qPj9rd/xY13feiZ025YdnIn4M1RZUA7MTTsHLJujBI51Zou3wW41dXbdukNCphMBsNiioa45QrDJabncpH1GiDm0pUmYGqWwYiYAIlB0RQNlBSYQLMhAREb2mtmCg0E3jsiI0b2iIQqejk6LwAiCEQIW6kNISgiO2BGT8iIiFvRNoLBVgK2LayoGSloURMTQUJEQFdg24gJBhGxYtxrmDywc6rbQ7kjK44QQdHscnGmwHzJYSwpefKqcHBw7fqNx6t22i9Oy/w8nU9T9JPDG+Y0A3tHIuJd7IYOthtiJFVFAiY33t29d3LnVvO0qQ1pSY5CDGaacxJVZN6KGMG2NjMFuHSvqSogEccsy2hIhtE116899NILtzdD2kY7pqKwZYEDGrBBJCFG740BCM0ToVEWGwR6BTNOZWs5AiMQM9iSBcC25TI0IiBV6pNEz11KiEQEJPga4erytQFbUy4CwvYHvt1hEAAmUTX0jsiBYxJAADICNjUDZnJ4ifAm2qLBwfE2abwMnba8D9ieZbciNATGS53blqBjqohsYgy6H9w0uovUbwoEIuewL5KkAJC89mI2NUe01a5t/5zOUxEa+kyGTVtzJFDL2QNHDo0WKVLQUirrVBahboWUJMfeT9prvpoeP5i/8Oyzv/TBZ5555l7ZICMZqhQZ746nB/uGmHMh9JFdAJc2uWqDqg19guDU+joERSkDAJr0KwoVkhFb8KjJhn5ZSvEkMsQ7s/W/+nf/3pYAzm9vwhXUEabNhpmI3LSpo3fnZa0AwFBX4Xg+R6CEOg3bLYZ60DKfffr5X/7K3/BNQ9dX4xETulBrThkdcRwKpmzb/gHVFRrbgrQAGKiKZs2SGYnJlBERx5MqzTYhoJbLyp6qUuAhF00WCCaTMF8uTy/WyK4UNTTH5BnZ88YyIophTjkzuUDO+OjBy0O/8B68xxBcn1XUomHEULFLZWNqZInAh+CGrKjgCZmQwZx3wUGfxAAINEayoZgB5ZIWwqg1uBji6Wk3DMwOnCdXsZgwAYNxJC0lBtdWuFiVUePP5hsXw3K+aHzwIW769bovpSRCKiWv1SYhLtZDKksf3Fyyr+NyGFzAqqaNYoVuALt9sRpKjhXXgdsRX7lxML9/vkjdTqDNZjCfA2MzCnlQTxyjvf0tB/MH9L4P3Rm1TWAnancfPHhoUj9U7Yir5mlxvhgIOfWDD46YRExE+iJqsOPi/PzecrFwgSetP5kXzygqy+VG8zAaQ+iW3ZK94XR//2KzHrUtwBwuf8kjAeGW3IbI5CpHiCglx8BIEIkRqWpj29ZImERSlrYemwAQgwAzQCqbdeGSu83gqoo9qVgqyVhlGNywiQiVgybyqKkZEAzXAKIaHKoJqnFRyEW6zBSVdciqJTkiDwy5FGR/yd8Adt4BoKoD3H7ZBGimxMaA7F3FvomjYZ0rz33JRWQUYleSIikqgm17SEwQCSp2pQgSFdjONkiKkoH3rGxgEiNXoLJZ65AcMW3VjWBZdGtEC86JFfROiQRURAmgrkMd/JZVZzVEHMAkxrizO2rHYzAPENAFXwVTQTOAtmQmorZpK1DHoEagGij4UHkXAMgAU8rzzcXdB/fY0Wg6beooaTDY2t+tFHEubJ8khE5EwMgxIyMiQDESDH5vtrhIDLVLDHSwGzuc/+R7v/eLv+j33IBHdw932yuhh9W9D56sz+ec++hJzZkZAYDgsE5ZiqujGjA7UwNCJEgpMVoVYbob6li9+sqZVO2NJx7aVV7cWU2rZrHMi1kZGIxdsXVVexCqgjeDi5MZpCJrdG1Nij6GdlwNZUMVLEgxmaXclSF4H2qdVGHdlfnF2pk3YBVw7EOtBVIFHoEUtvc9WwAWgyghOuKiIqjbMSAgbm+2fvWzjYiAQXSuyyIF+jIwv9a4Fms8e0cm0hA1wV+9ug8UHcTWt+qGG1f3a45nF8fGmrRslusW/eMH1+48eLHfJBeaLUsHATwzBBxKCURo4AlVJTiH4ApYElPQYhLYqxl7BlUPUIyGrOss0XOzVVACKFKftJCJZmOEymGSLncm5uoJFHbViMEIMKWByZFxNiwAxVEcx2rCZcQSfH82X8/7YZFOZgLmOAOWUnlfkVczz4yghCgIUIr0WbIauPl87foUPLXoJiFGH4i9GRA7JjRV8qFoZg50mRCBc56IBMG76BBCqBGZ2ZF3zOzYOeecj1XV+hCqug0c0EiKGqiyVpVPWYmdc8G2ajGwJALbSywrqGhWEMQAjAjQ0uLipV9+z/z2xw+mqAvb5HVw4ylDbVYxzxmij8WYOFXM92aL8z4bmoGuU7asFdsoOEREAodWim3RbyRkZkV1k7JqMVUhLEWySNdt0GET2IM6QASLiN6UtyYyVRVxCAGQwaJIRbbnKs5l0u50JZkCbg0SBm1VO6pIwAFmKUyVqkoREClSUhY18iGMzIyoeBQ1yiCmg+nlJ7PLnvb2E6UQARGlokPOhkhoXZeYXF1dHpLP1kvpLkYJQ5miCRRenq+BXBVphTl++Vecf/ZnbQ4n3EW6unfxgffPX7j/hd/6uz7zE7949xOvVosy5AjOG+LVqxO9e5cFT3/2vcsf/rHDr/3s7LvnfvC/XLtAyNQln4Gu42A/9v4rnWGxEqrtdz3flKPf+I2P//VvfiGViWvKj/+nR+709c61tVioq2G1FiTnQIDT/vQdf/6vvNzuhi49ZPOPfNff7D/y3PWDx5j96Mmbv2P/ba/839//qXW/cxjccmXNbnj6zY9+4Rf95I/90GNf/raPffpT9cOPfcVDD33/v/2OT23uH4xGp/ePP/zska9inzTGGEiD5yxFRF935aHbd8/22v12r/nISy8hQ+ocC7LCYtWVYpPpnqY8qUdXDh795It3rE+KZX9/PFtuXnlw/Pgjj0+rNjawXJ7ujQ/2x+NQyLKbb1bXrx60TaUEO+AJ/O7ELRfdTJnEokco2vgwanzO3YM7x10RBj8OoW2ae0fLGF0I2BfxDqPZqGpqhV+5dxS4rjyb5MrzYJKGrq3p/GymucTgHNjObv3qySnG4Kqg1E1vVBsZ5mfrUDASxhDSYh2cQypXdqv93bpXdeO6s6EaNaujB1frZifwxdEspfLIzg3M+MydV+smCmjOOfdyexiGIb3t8YcP2/1Pv/LKdBeXYisRMFXpT2fLx28+DAppWF2Z7pwOuTeBxcWj13frMS1PcjvxHEUBu+ww5Lc/8vjiYnGxKZ96cJJLEYYMAE5vXdsdButnKzV1nvpeYEiLdT/f9EY2lIKg07qOVXOIvNksN0WyWB6ypOyYypA6ojaEg2ldFFbDMKni7HTxv0RFBWDc1k4iDN16sWkntjt200pLSmo4m+liE6J57xBUFaUeBwqI3glA7b1jHwINlDPlAiQxbEpJzDllyRaVL9arPg/O+1XKMltMqtFk3ASHnv2Di7kV8czLjb7zv/3il7zta579sXfea5rf94d+q9249ubf/rv/1s/+6Lf/tt/x5Je++eKffN/pr7xAJlqrCoSbuzH4/Nx9ZHRU7OXz/L53t1/5FSvslt/yx+DFn4EPvLp634ttHHlGElhdDKMJsQ7Vf/yRzY/+5yfYtBPjmLNVwYHpCPDsB//V7vWxPvHUqpJ+aj8/v/imf/R3L/72Pzz8uQ/CT8dHrzwCsxm88Klw/wXP2l/bda//Qrj9nnqd13/0rxxSz7RxiQA8ZAQOTAmQIHgF9NWEJcLsAlY97O/AZgEqIA4ggANYDtDLZnV68y++46Wn3giPf/7Om79Qfuff4ESADG0DUmC5gB3sbtR26+HqztyOjngxQG+QCKoAnkAL9ILkGREiw3yAkQNfFqTjb/mGa5//jZODN1/75r+RD+DBH/jmvR/7ZPAMdV1G3cXF2cHNfbwygec+fXz33t4bn4Ao71++bJPqz/6Fv/7/vPzLF6XAElBzJVKN88n5pmjy9STaeK2LTT9M2umowvW6a9q42KSFMuzsnhw9OGibYbNRxVwUIwAIlv7adCoUTpZDSloH2q3wohPynplihTsj7xhDXY2GjKoVmZT+cH/8eD3+1J2LgpGdv5gtlYUAGa2JoYgZiCdGCGf3F/0sbQpfTPPnXfff9IWfd/94/p5PfPrBcnXcC4uMRZ98aFeLwDI/Pd5/9qRDu9wkMwGaIqCaIkAqklTFtjGQbtEw27WYIerlXg0NhQ0AEBTALj3Eiq+VQFRNL7dGZoCAbGaoAOqYiCyAOSLngIgVoSCRggKoghUBpctBE5qRKlomlODIATEygttmW1v+dlEyJ6WwKiA6AxuUM0HBoo6BHChfjqUUwGL0o8ZXTaSAppyFkq630rAYfA8ZioqYEGYxckroDM0HVgWDsrO73+4c+Bi19JN2ull3zNxUPgAQO+cilgHAnAsIDHZpslct7IOnZmey88yzH33qoccR0FeBnJUsQEaO2DHxlt+MuoXJAm7nWEikZobIWMrmQrkG5ltPvf7O7bur1bOQ8xZWDaBb50ggi2aNJyTzjEVRDYpSVitKg1gxVNrevhsTaDHdBijbcTmYAZoaMihALzKJTr2VUhx6IVFAstdM80hmYAi2bezYNuEDNTVCVQBT55EIK4QkaoB5+3p6bfJCl+kYAmIRQUBCuyxg2+XazVQZUQEYwEzpctgHxdQjMTMAjU3G3h04HlQ3igq8LUCBbrFPoFvuEyE7JrxkfG7fBet+CDwqSUfBk6I3Kssh91LvceA14HooKymdWOedtN6Pmn2Uw3598JkX58+//OwnPvrs8s7ppgfcsMn2AyOC6tH57Cl5eOw9ApECEg9DcYH7ISnAdDSad7256LaNPUrOOYIgQx6GIeeMoQ+hqVqKVf1TP/KLP/+hz1gRdlWhsq32MZNnLln6LjVVtRy6VFlbxRC9ZB23PlZYb2JALQYBIfebg8M9H6vj01Vwh2a9c2yo3jkDJMBu2a0Wq80yuViPRsP8ImyYpe8dELqoagDFQLOKkRcDUdOiozpuKr9Y9UwQmQwVBQQMGWOM2/Hi6dlqdzxCGEQHdEylOCIX3JDRkJ1DBNdOr+1fefy5Fz+d1xdV5GLUrTcjbCrBZZd6HCpXIYhRNuLIgTgEMsmaimhRKeLI6ipOm3rFm2Sy7pMqgELrt2U22d1pZJAHx30qrAqOyHtEBu9jGgYTCEyDlH7oqp61SBKpalcF7wr0KWdZjUaTbj13KI13yDRfrTUHKcUzF5FkZoTBAEBH47BM/ToZsnMOOHB0lLtBXH12siEK5yeLg8no/nLFHlPRthmdbhat99dHcadpXr548LpHryzXfSYsPdw+O7q+/+jVG4cvvvhqI/I5jx68erEoQkOXR+OK6hoAiiRX0WTSLFfrEYYI4aW7d9WEAQkJyfaut9GVdpC8ksZXZ+f9apM8X74LcilaFJmKipiqkQ8eFJwzZqJtfG7SBK7qmsi6bsEK0UWPLqeyHYBpysNQ0kKdQVHt83q824bgpWwvriVnQYReJTuuQ5yMYz2q76Zhs+h6MTQk2Y5T1Tlnuu0smhGCgRZFoWQKjKC2xZkBkkMsqmyQizgk51zSAqAByJMypHoUYxifnZTU5QL6mjCygAET+OCcIxABFdVSxAApMNXOmamoJpMkBRgZ4PzkXPvsyJmhGqJa2c51XwvsiCmwU4CM4gg9cRNCW1exHoFZ9HWuujT0Prq2aoJr6mrs/aRqGpUh+FA3Tawq7z0QtKwgCa34WJcixH77lPM+dqlHCEUSI96++/JmcfKW13929M65qkBez1YoBFI8c3SBia0UMyWOiJDAACBW7aTev/3ii96Pdg7cDJbni9VjN5/63NLdeeVD5eyVwG989nRuzWQDuxez9UFbSnJDL4CGq9JpklQUoGzP6CZcOQP0AaWUOvrpNB7f7UCLCSNo18+Wcva2L3p6uZR+Uy023fOv3ldAZvbsCB0akWPKm2Xqq1KXZap3d9hHR4xd8ZCT39oqdFKNnbO2DPnktN8Ak2NDZTfaiVqG2FSrs1MO6ID7vhChICk6ZwjerKiaGUgRKGqIoCK2LUluoyJCBjIVNHRAaGJIWQ3BnCPnyAfyCDHU40lTxdgejNcckwUalBy7mlza3H/1xd0r1+8eH3Hq3vDIUyjrTbdGdkhUSmbGClxRKGqtIwIzNUdowIYgWsg0MgFRjIEIveOmdpKKI1gnMfZGbGJMzkxC4ALIRSDJSlLbeBejc2QC61S8K5XzWFU5DwaqgjLkQTfctjohDN5qnytKVvqjs+FkOSzy5iKpOXRoYNEFMkO1wGRgKggGroCq2WCl2KoMguyNVHE82Tm8/hCEmkNQNc9ERIoFkYILIp0hOvZu+0AkDtv/76BE0fkQQwVMTOidB9Dgg3eXH4vYMaggeVU1lZILk8OtvsMMkUEKGSJhybkMXUmbbnFEZJODx8DXfa9nd87mL949vX8E3ZDny4N2FzBOyOGQ2FXVqALgi2U3cuavjZ55+cGASKpp3VWBk+nFkAxx7J2kshXLApgyDqoi2pfcl4JguZSkakiCPPbBI5jIIGreRWSH6A2yiBEZUUJVIQaKBDVSQAUDzz6nAqJAWyOxKlGownR6mDZnZAmCJ4ackAjVpKgaoSmTj40jABUVBYQuA6JmsQFEQYt4h1KsgPoKmWxIfckZEIChGMbaxUj22rlgvrfz2Fd/7o/8hX/7Orv5tqvVbs2LDWXRYd2dx7Tzls/xjxwO2h80h09+zpd9/Gde2Dmef/Qvfk9A3ZkbbtyAOrgV7dRn/XoUfES6oXj6j/9Bfu8TVNeHz9ypNgBNvVYLLgbnurOVCy7lfpDim4iewMVHCOPHP+I+8sHjn3z2DSeOhqpDlbzBNoRRmPepXGxG451W3cf/9ffQV30ZHT/7me//3vGDbjdeT3fOq0nuTh+88et+9/0fes+NW9chkk4nq6du/YY/8Qc/+a+/c/benzx59mMP/bY/dbp85uPH5QN38vn113/lb/l13/fv/kmnyBm6dU8OkGjatCcXM1J9/uRuqHjTnYH1kxG2h5N7p+v9OlydNBrDCy+8HCLljXgZQZ49fnVvOV8/euvxl27fXS7Kow89Lrmalf7u0fl4NK4o9Ofzpm44D49fPbgzmz24WF/Zrd/81ENDN4jJwtfDMAAYMSvrfFWMsACXJMqoJc031oSI4OY5QywGAoxNxscOdk+OjwNEz26ThknlqxjMhggh9Qky9F0xcu24KgJcx0XeJF7fvBGpzmkz3L2/rDCyar9OzjvP8aG92lnitCpcP3f3+NrV0DsnbRg1Yc+zKM+wWaKs+lMcQQGsGHXIzQh39vaOTxbPHh+94fEnvmRv/L6PPTfTHAPV47ZtnJdy0fe+gAOkHerTzBl+w1d87lHq/utHP+zr8cTrfLWIITooZOFTd8+7zcYxx4qLFQOo2e1Uzem9RV9KLmVUBTLt8+CYLGU2McCqCilLEeuHAV0MoFyFvOkQKBchouUyURN2d2N0eHw8I3WsdGWnffX47NeioqaK3oXSrZyQ9zhx2G3kYsk5VYBkBVuOW2yqZDMQ5zGJqlpwXooQk2oODgqYWlksN8t1yoVqH9pJtSm5GTWbZY8A7L0Kevb9eg3iJ6NJyXRxcY6iIQYA99f/2t+p22aV7YPv//T+2+wNb3vdE5/19X/mn37/N37x53/9d//owe33v+uv/YndeuJH7ekmNC/fnTrs1n0b3W72z3/H33jrl3zfy9o/SIevu/bU/N4vVQe1LAcbFAXrpgJysN5UzLlI79QEzRULri/FO2PD/ReePf/Lf7L6jV975Uu/psib31Cu0Ad/Ob3vZ5uc5UOf4sMb+fZtv6bJqjr56PP9Y1+4uD2/OjPcHbXOACrQDJJgXAF6kOVQxXjtGrx6t9pp4OoYFl4uFrdvPv7ot/2VF//WX3k8n8NyCXstgEBWqHwznr5UFi9Od+9cDLc++dIjkykUARTollAFaOxY8sF3/7fjh9+gzz9/8p1//LFfeQbIgA1YAAw2AwBCa4ACGaDvYJDV54WXf+uXvPU3f82ND3/iZ37dN7352//g/u/79k/fz5Dl2le/sfzKJl0PV//u759/3/fOf+EjfOX6C2+89WX//J99++/9ps3NfXh++Yf+7B8fN2FvwtRwWpVRCO1oZ7laIzU+NNGGs9l8rbDMsFytRkhuudz0tqzo/ma2O2mYWynrEKscgCNdrNZkCJgGS9PdmIP5Tp+6vvehT94HReJokKZtrUULWHBu+/R76Wy50M3Nw/rGYfvSyaYOrpAu1qtpXXXzNXiXVZ0yepeLluLOU78uXterF547fuKRyUOHh29609P1i692fRqP3K63asAkoTkYv25/nDbD4nx+eZMMwIRmlhUMLAEUA4LLnIi2OnPYasxsS3dGANrqWLZLITMxE0EFFLzsJ22PFYRGCLwF0qhtr3ADIZOyQyQoqgVIFQgdE6EKgDJiMVMDBVQEIGSP4NERoKKA2fb4L6pKkKAURdHL9AgABbAoCZKqA0FT5wnJAFEJYvCeQQ0InREUBUMCYHIQXZWkH4okQADKRYGKGKBDZkbAWLeH1x71XJsYsscQs1jfZRQgoKRl3es0MqkG5w1RtQDxlp8MIAQ0rvaacPGZlz752M2nkEhNCBGdN0VkZyZ4ybdEU1UDU7u0iKgiYdXs6jAMXa/okvDh/sGVvTvzs8yIYMpEhEpojKiCSXAQsAxiiIbFQLb+LwZTVVACcIioRowgBri9X92GPtsqj6GhKPdFmQlVSIlgu457rVp0KRkmRDQTRFDdOokIERVNgbICisUIkUHRshEalqJsYGpCCLal6BIhA5gCXTaKtomSAZEDVDUjRDUCMDOzrbJUgEAj67XGBbaVpIusm0KOGERi8KpIoAjgHSKgIwxMuC24Xe7PoGJmhN3oCT0iY8mWu2rSKJ9epDmg2ACRYlM9vJyFZz6Zj+6dvfjy88en/WqxNoXah0rIkg6DCCkoMjskWp6vPvDLH3vd07du3NyfNBUDAteVrxb9uZmeLWaubgMGRgyBzbSIlpSzlKLW93mY9UlX9+/fv32vv/vKA6bWXFYVEEXdxovESKF1tXdFtKmD8xQ9H7BfpcH1uYcyCZxSbto2bYbxdKcMKlbOCrA7PTt66WLxVK0Qo9+s1/164yN7D3UbioZ1seBrR5WzzsQIEEyy9ADGTEUkF5WiTaykLyxQsWO2g72667vZrEcKDslEmojjllRbAGelq6MTsy1Pxgz2pqNB8rrruFTXJod37z6r/XmsCBiIqAnRA4kWVHHsyJGLTlZrDl7NipVNn3LWqoqaQROCmA1yb340mjRt43LqgnM9YimllFLHMCRTcOtiTIgio7YCryfzFSRmxIw2aAEEF7x3cU2l66Wq2ESMDD11UiqGnNUxrVMGUmS30nJ4MOpW3XTcHozre0cXsY5ienS+6Yay5a1OJgEljSPTuFHD+XwVd9q9p6/erMKz7/7wiMlUT+fL7BCp61fw4qfnm01IOiDK6566+vLti1i5zveP33hoWvtPPfPy1fEoOqeAn3j2GUvcmQTn5hdL38QEoij7++P7R+d+Epw5x2E2W0x22pFns+revW5xXjgGQxoS5dc2B6JKRLlIFjFC5wgBPJEVSIZoEMjdmB6owWg6KZyWywGMiTiLJjWx5NRKKigWgytDMSZfsTENIGZaUs+mZcgpS0olhyrVGq7GOIo3QjzOw2rdl8ECuRhCEgNVAhLVwIRIIEYI6JiJyACMBhUAFARnYCKekJhlG3MjE26HIeoCFsAqVqMcBy0pqxGwQe24iDIj4GsMNVVHqKoKVjmsPXcZxKQUU3TMZMBZMhLHirouiWoVnXOoYkaUzbZaTlIDxMqFIsq0xVo2TDE670acUtetF7nk7ZDEBw8qDoBcJKYQmxjryA4QQoxIyoSiJl7ENKUBANXUEQFRGE12n37L1f0rJ8d3P/3Cc75qH7/50HgUnIMQw8X89OHrD/sQRAGAyfH287FtlRKCV6ZPHsdPb1Zza8LDN6c/9p53tV/y+yNfXa8+8cBW6Yxm1f7P/cK7Na+/5Muewou77z+aJwE1VcghMDo3dNmyhADgYDwOImaqniMZ9SvJyfdDEdOWw/GdRXPr4JNHx4yBMb96ctF1ttkMZK4Qr9WuXttt2lG48N1qCVQYHRlUziuFREkkoacMuXIMnlF1uCinp93sYj3Zn1ZQu2bsm4l40jLEJqScRAQdZMnJCJ3zMaCoaNqigrdGU0IgMwX9tYopoiIaQMnFRB0CMwqiU6gDI0DwvLc7itG7GGJVJTGsqG4qSBJjRZIvTk8mfpJnnQxpZ3fajOpP3355KBTYbffdSOQMwMRAGRGBBtAiBogMiAhNFUR1ECXyTfCWU1CoJlFBz48WXNVN2Fkc3w/RbRfgPkTKCAAtOVdKGTrvYzEg8pZzFhdcLCIOi69cJ4NGwwnigY/jkHPp+7V03fp83p8P1pENoAZQMgdyzCAGZqC6BbVKEY9soB4JtvoLLVCoCVXdTupmRwQBmBgMwQgJPTMZGlLFhHQptjdCYnbEHhGQiRid3+ZCsB2YIhEwmZmolJxCqIL3isE5t436aAuwA0ZAZIdqhKhop/df2fSn3ezBjZtPpSzz5UarkbTkdlo7CqWACA6i7KUYYBF0wIAAOp7Ebp5fvDdb9uRDTDgAoioUMZACht4gEjFz6gYFy2pZBUAFhchKlqGIIRYEJSLnK++sXxuimBrBlizNCINJpyqA7AgNHIIjLGbrIZkgu223WtPQEwE43mvGMsxK2gAyshOLqgxa+n6TU69mPlbkmYI36SXnJBoKkGHWNKAkQvTMRAIQiEQE1JBAxVTAB19XvLsXo/Tr9aUB7a2/+7f+wHv+s77+zZtzvcizsEzDZvBAtsmjKwejZjdfrLil5f2X7r3//R4AN7kmlaXFqsqNgfUb7krte/XOeSbGIV8pA/7Sc5tUQjsuHJalxFGd5ivvmJ2mfsgmAiT92hxMIi/+8w+u/8P61pW9tz/9xWcny7Ua1lRXfC6bT+Tjp37vNzx+9bGj73ln3cP8nb/09Fve3tn02tt/57jeFcD7P/FO6+b5iH/47//NGweeeHl+lu/W4U1f8GXP/rfvf/De9wWZ3Hr8i/f33vyBi/8olf/i/+0bP1Pg//ynfzf2w+Fk3IT25X5IIih6sVimNKBidPV0MpFBclbHfDFfV544JZf17unxetZDxc7ZmNzQLR8s5sTF7p3OV/3B7jV23lvBSBcDnqb1BPa0zz1kZ+qGfhLdy0ulzfp6Heq2Go1Gt6rJ7Pbt1cXy9HxZ1yilrBNnIgt+WvP8dG6ow2Y1DHnuYTSKBMlJiUSlpFcv5hRb7x1oQaA0qAPPrIu82d97/P55r8q7od2s5zmVh/YbM9od+9nxejDSEh1HVX342kPzfqHGHWJKOZlCWnEQVc59P6r87v7u/PZJjK0C3OtnhNK0scY4DN3Bbot9ml+cR+dmXffuX/n4Uwd7b7z1yEfvvMKBSxOkpp12enb3jMUFh7lbHVx9+OmHX/fulz/2oaN76uuroxhBnrr22J3jk/YgLoe86Dpy2vhQqT15/ep4Ul/MuqHLYlZKvhocq4baXR2P+pJPZ3kzlKquhKkkKSLrfj3ZdYBAW3iJGRGiYvQRHB13iyffeK3apbsPNsMmxxD+l1bRZrlOLG3gSYOust06rBZdSY0DJ1JEMwUD1KzmPAsRR8eARUrLbrHs1JmhHDTtXmzXy7WRcuSNpJwhO6vJ+RDHozqJnC3XtfnZCsA4p7QTaO/KobIO67Uj3Gz66BFdn3P5mf/6U1+cVxcf+uDxS2//I3/42159YfEvf/r5L3rjFf77//yzP+vzSBFeeRZ+8D+t/tn31tFhazvoP6d/9e4f/63hs75y9OMfXB5/fMI1HlC3WlBEH3lYDyUJobeiRYwju0BFBE19dIwMEKG46UsL/uH/sfnRdzY3Dh++e3F8fDpSx8SzB8fNJOAbK/+Gsf1wn977w7u/7bfQdJQFgvUQDai2c0XvoC8QdzfSz7/69+686a3uT3+zN4a3fTHc+/j5ZnHrr//t1eueevzP/NX8n/6hP5qDJugyOAfsZy+d7LRvvXXwVN0dlw/cheSACGoAx0AGunPlW791dfiGisd05dbNP/Kd+id+GxWCQDDPEBEYoa5tscGoEAwOxq+e0N5f+xc7v+nai9/2bY//0K989Y0r+jPv/ZX//kz88DO+BTg/diL0wuri7/39Sdj/lRP48n/3T7v5S6/7M78Rb+2s5usrHDZmOzXSAIuLbgw8mda25gY8I/Tz/my2TM4nh8tuvUR7dH80H8gyo4ms0gZwoLWPVQe25LzfxmY6CsaLte7sTB+szi9WKcz45c3SJS+bTixd3xkdHkyO1+v7J/PVYHUdQyBy8aV1ci1cubZbT6eL2exKNXrsrU+s1vLYw1dWi3Vdt90y/cQHPtqZoLMQqG3DepmD88+eb16Y344qBxNuJ01Cayb1hKb9pmDh0Kc3X79yFOK7nzmCbT1bQQkHMTQrlxThLVUIL6E5l5yi7enfXpN1bMdSqGagsF0dbfkyaCZgRADbrgiAiRG5SzEYIGICVmAFc2Igl9MlY9yu4czAmaEoZDLxhI4IkUAJt9xrKAplMMmEg0IBByiqTMgEZEBGZsKmjhguvx1QIGU+RyNiZAKiPGxSFgITA2BCZyFGUytZDVAUJCmWQjUhKAC37c5052pkNofzi2Gx7kIVkaiUQQc7XXezlN/2yBMEAqCA8dLyjgYAWpTJnHM3Hnrkg+9/eT3tdvb3CySxsv1J4nZite25KCCAajEVk+0ELQOxr2NXkpaMzkrpCmKoqlj5gIBgW2RRl3WTTcBnoyywxWaimaoRgwEIqIJtGQ0FzBMhbA8jiADIbCBmRkRghkBFqcs6rj2qqtmlo87s8ntD2HKptyKt7avFEGQ7IQPMappBTdpRQAZjLUCW0fCy8a+XiOrtehAATLfUo9fYR4i0/dEYbodyYIBbv7YBkmlFOGJoIvSqmwxDBiIDAIdYRHtVIVZ7ba+IWFSYti+0y6yoiIEHQEuaAjH4RG1e2cznsVlT1TdKmb78meEDv/DM6fG6WyoqFEDvcNrEnUlQRUKdSY8emZ3K1pMNqeCD2SAvHtU1jYO42CbVdc7sfRkSFR1RmDY7IosIvreikhWsmCkAe187t7w3X56t7909y5JBC4h6h5G3xnBa98NoFARgk8UD1o6JcbZaVojNiJraE1rJJYbQ91oUKw5J8mI5TGu/O9H3/sRPTh/+7MOD3boO4MwEmNhXvhn5XHhuGGI9Ho37oXeuKkPKBj1sy4NIBEWKI1+KOnRSyrRpwWtCcDE0raVkRC7lzIj7uzuzwfquwJb0jojMWazmgIRa/P7hY/uTa7PN2VBWO3vVatWv89CGMaKYlYqdRDbATlSy1O1EEXLOXS7b8tJwedutB+MRImoeukG843FdZbV4KYUeA+HJRRIrRggOd6fVatjIIL7ypdi0bdZF132qnQ/sNn0CgeC5lK0PywQsiZ6czR2AGRZQJtyytM9WXe1cb7CarRlw6IdFn2OgvYP2dLZBRw/O1jWjGe9O6vV6sXc4OV0vXr13D6tJe9iM63BxvsxsV65P3viIf/WZk3UXkshimAHbnduzSH61SZ988WhMo4dHh1evluOT01nKn/3Y42/+ilvP3j965pWXq0nAaZurcLbKoHpvfj7aqU81d323F/Tw+phi9cL9DZRMA6KP6F17ZbK6d9wED6cbABi6QsRiomhMRGBaRJGaunLGw5DMWMw8G6B0aV0ki6lKQaD1kESz5qGt27oJpn7jVExd7bByy81a+8QGeUhlIyZsEAYFAlgshgpgMvG39qfH6parDpCzmoKlnIKLk2kbmpj6DYtJEFEMhF4VBTAjADsjM3EcGLdcfbzEqDnsRWi+qqbjELlLBX3Fru+HPChUjB7Jo23BsWamaGaWSpFiiJBYndc+55ylKIAjQehytz3bDrmHyFXwqhCIIZUCBkDEzgdGQkYsGdDFph3V9cj7GGITGdETMgsCaSHvhRBRgI2C9xSQgDloIWTftiMDiDEgYlETTZ0kFFQZ8rCOvjEBj+TZ37py8/rhjQenxy/f+/9R9afR1q5nXS94Nfd9P81sV//2ze73TrKTnWSnI4RgjCgGbAClQFQ8UmodT1kOLC2OOiy7qqMWckT0gIfOBkSwQCBEGkkgkGaH7L2z++7tu9Wv2T7d3VxXfZhvOOUY69u7xrvWmHM987mf6/r/f79rd/dvlVNLRAiuDaFuPdvSWCNRgIxCEkkgKfo2+NQvylMbF55749cgpXeefs/Tj77n6GByejz80JN/YG968MxzrxzIzSzVJ4tDt24PO2kRuxiJNMaoBMEHQbCWbR9tyaJRkyoxOuqaFDrpIibFsuypsU3nD5eeBKVrFsdHDgvfgYJJ0S+rZQPCvcHFcxdzRmGYHU17pWUAbLwtTV6UYjkjoa6Gpk7i1ePNvXmoldFVc5+XOBz0ffKaWVuUvfwU13U3r0U78aCdWEQ2BJYiCKlNnaSYYlq1zdJqKbK6F4iIpCRJmMBZQp8kCRmSqJkzRZkVo6I/LJGIbNZFaRO5PqUUGcBlFDwsfWOy7N69QxI8s7kxrScxeEO5s6yIbQyWkRgMoANOIqgUxABSlBhTssTWOhJpMQmZpDDuZ5sbvcR8tFi4M1sXLr9ro+lNj381pJRSCgCZKBlOKTjmKApBUwqDIiuMMUoao3TCCcToEuvYc8XWFgwGmptOQvTL6dFROFlIo9oSS0a5A49oOErnE1jQINGgSfcJlMCMAMSIIElVgiBGMSkmSSmlPCsIDRAAqWEmAKVEAIxGQck4QlKJ1mYKSoaMsQhEjMiJ2BCxWTH6jCVVYxgU7mMbNQFyigGAjbMKcUU6X4XYJWFCUTKnLj4q6UHLHJL6IIXj/ds3d9/8Mtg6z+TWySIXUzUhE1yDbLxeKBoiVy1moXDHJ3wwrzI7sBbqybRrldgyY5fEBa0o2YxJgdn6lLoY2xQNgCZBkZyR0IhqAERHhsFrLCypci3RrawBCIrYhViBgmGIYhGdoahRCMjwTJSTsgpKsgBMRIjSLSS1KaiCNRbJMJEGCU1Xe98QsGVrsoydETFoQmjbrCBrOSC0qQudCqJzFhHIkoAYRFSrMeWm2NoaZc6fe2BrOZum4wrgCAD+9U/92/EDZ3/v7vMTU7aFeXJ9zXULm6Bo82VV3/nZn3nY/Mm10/0XfvRfn/zaM2WXhxTDCMWltpn3Tq1VxWD88Y/3P/jkjV/87PHzL4dZYzyOXB5rKPPcx7ZRasF7oq0iC9FXoSNnUqNZaSbLSWyIaS0frtlihE2aXH+xhdwoOaaZ0fnXPvmRv/F/zjbW57/+6Sre6+ZcVQs5U7qnPzz6hph14GjwwPuHX/wvP775Dd+5cfr0rf/3D58Dunp8W2J4+X/7idOzey4z1db681emh7v/4YFH3n7+PU9/4bnP/Luf+g+D3BL1UF3dNWwMBiVCEBz3x33bny+W+9MqRmEhZF4suo3x8MGLp5pmngTL/lqUJtPu0UuXP/vSc2mo6xsbx8u0vnPqxvXJuR3yUhdrroPwyKUzG5hX81mvX9jBYH+6NDmApmWkl24dPXHm3HBcVNPlo1ubE8v7XVwb5HvN9HjhJzEUJRb9MbncspvPfc86YTOpPIeEUS6f3nnl1h1FE0U6jWS5CimGNif2IY562bQ+IaPrRbEzHu5OFuO1Egszn7cWhR21lV+2XdYkx3RvsljWVUBsJbUqejLpObtWuv1ZlQI2oZq2i53t0/Npszc7sn0YktMoVaq8T1yuNf6o6jxxdGCXPt6tjh8+TR941zkse//11ZvcIjQLL6AqZIHWevnO9n976Ut3T47zfk6a6pPlIHdLoxQYBKzQdtlb28xymzFnL1zbO7qx37fZmX6Z51m/zLoqdLWfLOLspK5D14oQQVe3nGecWzTsHdHQmoWdTDunnCQgkgEqXV7XdVHksSGDcPbsmgGzmDT/3ahIAQi57WRU5m3t9xe1RWcILWkrMWFnMxNFuyBD64QUE4iPlFRl1WZhQVj6gCqSAiPnuY0NxjZ1Xo3Tvf3DUZkl0EHPgUrnmzLvWSBEEumIiaypmzZJ8o0GkF4OeQ5f/swXzpw/fTjna/3Pv/8DH3XvXNu9dvPq/tFXPvVjGKo//a6379/cfezyNi9bGEVoGijDWV+d/N4v+FnfVGaegnSm/Man4TxOf/Vz5dwu7y4Kl1NemIShC8agUSRC8R0bB1EAPYOFE192sb1zzSgZsVGEHVtH3HVhV2O8lcvWAy+9cvx/+ZOnjgQ9ApQnTz2w/sGnjz77qfHLr9n+OvQ2Ji++ZTYv7nNvfYfknY+ahx/k3/38nGXrobM+xM93Rxf+5v9VoZe9+Mzw5349P2mhlfG5rb0f+PvnX/3N7rc/NfrKLkwVRgWogm9hVIBvwHYRbLuoOUf32Dl67DzcmUDPwY0DqJegClFnT70d29no1hvwiW+4cP49z//ifx5W/MCtr8ARwMHB9do88t5v6p/b0ud/DSZNZfAAw66Jzfa5J7/vh/7r3ef/xU/+y3KtXBx1aU4yQmCzWCiC2F7PIU1nMEmtUDcU7JeFZG7mvXM2UiwyTjGE4C7tnH3p9lvGgDgUxKBhsmha1231TKYAQVLws6ODZdtknDti72MTwyou60y+rLvpss7zvJZ63Hd37x1rbqzLdvdbammznxd5f3/evLA42Gvib716M8UIEjMk4JwVRJVBGNu8r9QCJyYG6EIEnCxqEODA5/KwXmYU1WWUFfkxz+6v0AhWYpS4clgh0lfTQ6Ar+ici3Rdd0X2ika56VQSYVmQdBAFdRVLovhNrRSFWiCoESJyEUZAYmMVZJKvEAAIQFEQF7zuLGSkRrgYDAkmZBdEoUFIUBZD7UNyo4JVEcUU1koSoFkiDECqoOEIAQboPNlaBqCkwAsJUVENAjJQkdW1GmXHoo3iJ1hkfAJJGiagKCQDBe8ksOseDvO/IYPAq0s5moW0SghAFEO+bKzevXLv5qlm+7z1vfweiCai4InWDApACCQiQmMiXzj586/aNXn+E1iDG1ZAlSiJgwFWdPImsYF2r2REiUlQVInI5mraaHlfL+WTv8N7eFEE7UIlCRKIqSEH0q1p60JW+HtSgStKVWVVEYVUaA02KoBF0NUUBBJX7mSG9n+gB0qSa1DKmJAEIEeX/r/0BgGmV95FVp21lRBNAVUIHJBGRyHdpOHICSl6CAhNKSACrAZSiqsL9n70S2Sugqoquho8AuAq0iyIqgqxcyASOoceYM0271KzY6US5NZKSZRIBn2LSlSAOQQXvQ7MBifGrWOuEqpjqrjV9l8CTV+dorbeV/Pjam+bll3ZvXnmprhIDMoBlay1TG7YGg61RtjXI5k06ODmGGHJrWhUAIkWNwoaJ3LJqr968jVI+fPliv5f5gJwXagwnTb6r3ZQwtV0dkyZEY/KkxrB2UhPB2loZ29Fr1+85SkTK1pZlLqKGMEZZ6xcpxhhTpzDxqYVuwCYz4H1gxS5FQ9CpLObeZDmR2a8aYygiJd+GKi5md/LejDRTpCzLrcJsOvOVt8A5oQERBmYETGwQwfogiZyCBo2cwv3PCFXroKdMFtUpO40JPENSFR8ZwNlsVoVZk1ISthxD6LqYOc4t+5Ayci4fbJ99BJZHi8WhSKprCUGty4MyaGq9H1KWoYsibYiRMEQxDEkigxpDqBgFvFdCrKoqszkqhQQhQtkr5ydTY5iN8Ulioi7q6s/LhxBF89K0dSJCAkQBiKoRhEEg+caTYSIExf4g21nrXb+1Z9A0tbeDwhjQNnYh5MQGEYNWIUTtMKEkEbq/iW+79txOv20DK6nCtPInS29JVKpCo5kve8M8ztplHYcFXzo1xiDH1+vN/tp7Lz71i5/5zHrPqUG/CDHU/fWyyNxkunj72QeGJ0ejjfXK6ZdffPkPPf01F/t2cIbvVLNDriJrkuAM7IwG+biobh9hwl6/FzV2iwZAGEkdnj+1HdpwuKiyngG5z9YlQu9DFxMbTKK2cIRsrDNEMSjGmFSOJrO1UR+jb5suqXoNvouplShoLDISKomyD9FHMRmppLZtVVUASBWFUtLgFUTUpDznZtF00avPBmU57vUytssU2hisdRAiGM7yLHcWPBJobk3jhUSDpIyNFdCUQBRU0DISgmoSUEIRSQkgSjChWkytGTqmhFDmWeZFojAjBJEkAiiqIaaVuzNGUQVLWHWpCZWqGqakykTIFDSansUEeY5JNTPs6+hDilEVgA1aZFJgAOeKTgNlxWg0LDhHhMxybjhpcs6hZnVUJfGxCaHiVFKMWdkDRAI2TMRArIYNihAbUhFgRpNiEhFjCnbOki3IAkASzXPTK8YXT11s6um9Ozc6jm3srt56yzf+nU+9j6MhpRgDKqQYiXLKCqIImnbOPnJ6dmv/+NrerZsGerd2Xz6//ZGwiIVu9tzW0Z0bJTQlZK+9fNIuA4aYm9VnvHYhqmhZWrY6GmdotZ61nVeyYgvDMSlTiDowxnDqlc44M2RJ6qPAcHPNGrffxsWsJRAnENpU3Tuej477lKhJFpiIDK3awybvZctJyICs46qGlEQT+jbGiAoEIvWCXK8mkaTZ2mhgqJcn4gKqaglEZe5iiEqIhtmZEDtA9T4mJE0QIxAh0VcVBwhAmFAJ8b7pL4mK9vJsWBSDQYGFRSQGkpgkCWVElihjX3cs2oWuEu+8FpnJ3DC2/mhySMyGEAhFNcscIqCIAhoyMQYFtUxJJUlSAZdx23TIRgWTgGUaDLKN7YFxPSyKCOVgayfsLXpFr11OiCFpqqomWWbUBBJUjMIwJ0IY5GX0MSIyZ5JJS4E3B/l43fTXyJl2OW2OTtrZzM8abIHVrKjbUZFXhwgIUdAhZ2hICTWtCuZRBFStgYyRkxpCIpDQVdNFt6gNEiElScas7rAiKoUrkkSBRAhEiGQJEYgMM8CKZUkiKiREltggMRAbl7NhUGZjATGJEHPwUcTnORLRbL5YNs3WTr+ggZBTIhKi3KWOkJyPLTIW7ez6Z37l5ObzDz48gB4cQlIAEDM/aWBtlA/X7szrwzkcHNRzqPYnlZZZUPzAO95bHS8XYQKaVlh0QBCFpACiMUUCFEmGGVWtGgYxqoUFRWiidDGm5E1Zik+iGIWaGAdkGDXGqKKIxIgW1SiQKiF3KEGEwCFyDN4SaEwAkhvXVJVQy65weQnAwYfUVc3ysGuXAErWFHmhbBQ4ISMiQwKLolo4NyggYFImQMyYLVMCEFWVUDrXz7K1gnojzrNgNnL9ahn5ytWrj2fw8W/+mhdfvfLL+81PPX/9nevZu09tblNQ8emZ33z+jd9oY9iktUL7ybjGxFvd7uk/8HR5+Z1ptLmxcW60s+7XTHn6VPOFF8aWiEQoQQpgMlRFQ4Wlblmf1Abf8d69D79z41J/+eVXbn76c+fe/9Ajb3/v/LMvyf5dAuyMv3nBjT7+x4a/9qzfu/NWBo/95b/aro/b115+9V/98AU0WNLFnfH13/j17bNPXr352vooM/fuvfnT3997/AH7tneFM1vD9z99+2c/OV7rr6335SCdPnfxjXl73Y7zM+cef+iRyxftD/3vP/zila+UAI6NK6zJ+GQyR8NOJMXYL1yGzgbV1rcpjdf7J3vLM1trw15/c9izFA+ny0i6iK0zEhGPm32Xp82drRbsnckdAzZ3lFsk445P5lh358tBXB5t9FwGMUVcd1mXwT1eNj7WSY+n1dn14YNbw9vTEywcbAyOpnNm7CEIqrRxsn/Sc0VGlPVKBWlAl8FLknHW62Jc+mAz5xDYknSpFSXHzBw733N2c9zfPZmvrZWLbkqGe/lgfzYNIQSfFGDv7qxtkchkGS3qyf7xHAyTQ5tzTOK9P4nVyFpNMLZFn8zuwUEM0WaoKbnclciTyRTVnsymk7YCBd9FbkJeZKGDxdSb5EpyH3v86Y0hTvfuLRcn5x84c+fafrWYHVN1+nQmph8UYtVhTIrupJ2LyGzZFf3h9sbOZHb85u503nVBtCiyS2tra8wqaVE3XLrOh+NlO209MnQxZci9LCvyHlislosCMm1DpoliY00OzlpmTZpi6Bd2Y5SVUF84N57Mq8zm+/LfY62ZsqYTC9jOU/Qpd9kygc1UIaCLg9wyqyYYOIcgDtEhtwksZixmYziaSxsQ66brl/mwYEFtIxhybKBtVo9qKSPGjNhmzBzJ2Lzn6+Wkqh25jIF6Wd02aIGQ6tanGKuoTrHIds+eG37+Z37kbDH5Q9/17X6Pz7/34+HShz75i//ll7740h//rj+TL77m9v/6HwYnt8YlwmbZ3tqNF91Df/e7q+/7e3ScpGpu780f+u4/Py77zW/+zphKueOj82CIRFLomISLkoGACZR12eBaDzofvWFjk4hBiyElT5lBnsdub4knGXRrIN3w2bt2uA1ntg5OJqNv//ZbG2sbDz4Z/vp3WO1g9+q2T9Mv/lb+Ld+2jALrF3i56OCw9+2fuDumbtrQqdNy6nzqivK9/fbTr+S3rwIgYH7q7jL975/MYQFBYB3A1AlyTgGmAGWqf/Jf5t/0V6drY4M82LsF8wM46eBWAxvx7gcvD88+fvLi4c4P//PFRnztE9/y+PRVsO2jz32lfNsH6zvr5aX11zb0gV/+qdf/4l9+5Je+AgjFw6M328MXNf3h/9dfp6e+44d+5hf+/c/84I7UqsNUB2OgiTocZBs9t3d01CZYLzfLteEbe7sbg972cH0xn1edNJ2oxjaEAqGV1IXmjYOrgeK5zY3GkcvcnaMlA7oGYSpzP2dnTUZWcVQM3rq+ezEfbJ/dqG7iBtKSZLi+LgUfTibYNxlkXYLT25u3jo+ds2e21mYn0/V+dnpnA6C5cnBkBcg4tT1r2RKcHM/Z8mg0MDY0UD16xi12m/mcmkiDDI2mZZQ6JMZyZGXdGh+8hk5iyii/n6cQJQG/8o3DqiVGyECEZIFZRJGAJCoCEoLSCjS8ytKCikYV1VU25PcLSQqAqoIJI6AqQyJBKhiYxebqclBKAJCSolpSXaEBETAgKAqCGMvKKIgMlMWEoqCQEqaAGigJgiQFTSqEiVf2NBQ05FczL5ACBTQKYUgghIIYU4pd4KgODCU1oqRZnuVtgC4GAySiShZWEjNUAEhJAiCSqMTx9lmb5RCWvq5mBzf84oh0cO9oF4u+QS4wX+di78Yr853N3voZcAYAVjZwVFDSJIKgKamYbH083N2/fuHCQ0lRUEGAlVc5b1WC+0MeJEaRSJAQQLpOUAyyoDbzydHe3mI+k6QpaZeEiDQgCJIllGQUBHTlsyAmVRWRKIq/XzEDXQGDRPF+modWaKSv9r4QUGBFNBcFH5MhUo0qCcjifbP2fe8ZftU7LwqrJv99eZloVEHgEGHZxuEosxbJiBKhABHJaqIECQBEV8BVTSoAgKi4ajKufogqohABJEFCSkkRcoAh88iyF2kSLsOq/oFGxYIwog/iCI2CjxKRHJskgoS6Ot1+1feRQiqcDAYcmfJBL4q5e7393Ov11TeOkrcYgcj2iaNGQ4CQUgybp9c++HUffuKJB4dF9vqLrz//5S92u91inpQQAAxhZk0CZMM+yd3DTkTXBtOtzYIZY6UrMCgx+qahFTBMIgBEIUngfXDWIqTeeknFJvye+HnMFC2iEVkBntgYFMnYWWcOF1WRgYLMmhZj7FsWhRh12sQsN8CUou/nBmLYyHNguzUul9Nqd+/uM7/6yx//xu/CLMSONCTDLFFAVYXYWLJGCCWps6b1nVUtUJMmEPXeI5hGkqAqeGAxBifNbMClxGhEyzyfLnyvyNpA7eFCGRNA13SgAjEKcZdiVrjT5y5snr10697+8Z0rsa0TgqAgJkzilJHKRfItgobWMpJCDOpTKJ2pG28NgnMGUZJYJAKqm2Z9faNehKoTX4UdMpYtgXY+NiEacKqQGRtIMiSLVDe+cC4l6LrQxC7Li0FmQFVSWN8cHx8vVIWJc9Z2uWyTWKJezlZlufRlz+2Mx810SYjWZkeLWg1lSEIghI4phZi6WGmrUTAoslER6wwSLCtxZbZx4dLN1++1VcqG2XrRG3O/rdv5Mtrx4Iuv/t5w6KoQHjzz4N61/Q7aQdkH1Nev3qsb93Xvevj5l7+sm0Vrs5995kvn8uxrHn7oVNr+7Zdf0GV3OufeYIAU58d1GXQ4XgvCaPje3n5hS99FSEDCKZ0MrJRl731f864f/eefBIC29RIwRPERyYgzXFpnjM2tY1ZN2nWp6ZL1aZTl2i18CMs2hiphQiDXVNG5jJga3yQvlqmaLq0z5ByiUgRfx9BK8pIATMFYII5ZknYxUEcizbAsx8MyLpcSkwCU1mYuKzI2KFz0UtshajAxhGisiUlBRUIyYFEhiAYEh0isKipJAIiJGh9lusgZi6JEpqLMii5I6jREVWVjgqSkGkWEMIEqYVJQSYbIa7KGo0FECCoxRdO32jMs5GJC1LHLDv0iJYpRAJRIg/eGTQQgTZQZ2yuy3qBv8+BbIgDCPM9RPGAWqg5UCbDp2tY3USM7a7NcNBnnFJNAFF1pi5ENgaau66zBlBAhGYQiNz1brrY2nQbBrpcPNtfXz569MPfTjb3hc898affmG/V098zpM73euD8+U7heSgrWoVFVgATOrp/bfOLOW9du36rWL2y1dnHcTT/05Puq2dHOzhO9NDnc7d64cSBG2o4Y1EsUUXZsLRPCaM26nEMX5pMOhJg5czQodRlTl7DMberaRT1xbr10+cG9O2tnds6du1i3vsh6k6NlKa45aTSKU5aI9f709MWdiZpFnUDbIm+LYYZGE6UorcZaxBtDxhkkrUjIEERl5qjaNHWuKXZ19AltmTlXjDc84XTvODSe2BIiGxIVAlgldTsvKUZCUMbfp7SQRUqAyqhAjDmg8WCMHZbZeGPgikwyYiYLsFi2wQdyfYw+1pGQIaGvvINsp795e3lr3C+btiGgPMucy6OAUzUEKQUE9KCqwGgEhFd/XcYGhY6ZDXkPmqgo7PmdEZoQlLsQDLsiQjvdzYU2ds4vrKubqXQ+JVhESaxFpmzIkCLEkNo6HjGZKkZ2/dgzNN7Ot9co67NyXM66/ZN2f9rWUT2zWoskJsWYiLjIbR0CU+EVcyFJnePVISIRoSHqfFQRi2QRMcGKKb+olt7XITRZkTni1QAIgCxZRGRjGJiYAIgAjbGIiEjMqKBMzGwQiYkMWyLDxljjrMsIeXUkIWDHjiwvF8tmWc0nk9deevnzz33xQ1/3/q//8B/N2JEqMYOCKbjzMevl0zu7v/cLP/fm5z7bzvYL3MiTGZA2XmIXT5e4ZqpL5x5aLPvPfGlv2aZJXTNxW3UHx8fXbe8jH/joT/3qz5MFCZHZxhA6BMycIRMIYvA5ACMAIDnbs9aCdN5HBIUYYoIkRIRoUgyS1LIBJQUAghg8MxvRkWGCRERIZAUALJssIQiSpGCJV13RldUEAAlIUicpLafHTVcJSOasc44MoeGIzIKIbNiAQFKJPuUmG5SiqClGSQmZFDASK8LWoNwe9efN3KntbWzdvXtHMa6ughb48y/f3rh5ePbUzgNPPNIulzakX/ncW49QenCY9X2BHZNwZSkbCFPWpp2L3/FXHv2eb6VeoU0d7hxOPvnD3d0r3UvVRdvTUE+bJmRYsGliIGKXm64NRdRmMes9vPPQ9/wx30tn/vjHTz33ifw04M5YHz7/1g/8GM7TqW/+5vH/9B2RbK/Vl//ji09+/z+/uTPme/eOf/BHLlanxgUt9Diqh9996db+D7/tT35sNNq4c/vV89/2p+CBxz1u0N3q5f/vz53fX5x+9PLpJx56bv+FO2RvndsZffybnvq6Dx7vXv/Bn/jnz774qi3zrJd1IaG2ZlSM+r07k/ncd+PcZMgjm3nvszJDjc28HRjGttsc9o2GW3cmvX7ZsswPZgOHhmnRLgvj+jZnSWdHfQhVv8ejUXllfzcRbvb7WWhDXXG/TF7Y2WVdAWfDvGjahgxdv7O/PaQSjx5/6Ozvfvm2tVQUeZvk/Jn8yt2jNnFVEdS+P7K2lx0v5+wceSDFR85tvXbzrgJo0ISaopDoqJerCpJYixZhsVgWzl08t3XjyqunTp26cu2OV8j61jjXtbE9Tj1TAoAx4H1cXyuAqfHRsamD96E7e/YUS3JeKNH8sBoOe8cn08fOnumqGQge1/MIZm3UnywP+mQRcFiOZnVlnV0bF87Y7Y2to93jo2qfhy7HVFf10c1rk+O5cWV3HPJR38dW8ywf5Kyx0g4xsNW8R0b9Szev3T6ZRY/bZX5hbbAzLgeMi7re21tkNq9jOOnqhUZxlGUsHamyR0ze99FkuY1eQG3SfNyjpq6JiA0HTZyzHZjhxsjHxXQRjVK9qGIr/92oiBgLa1igi/Hs1ub+0aHJszIzta8kJeYMgX3bDsusrmtnNGPnU+gUNcFo4IqYQhPaSFj5UZk1bWOcHW4MZNFETSIiINO6po7Go1FuXJTkYnTGAlACXnYQRNg533YAwIYTkJDGIMdz/9zz15564okrn//c8dGVP/Dtf+bo6Nata3ee/ujjb7w8+6nPP/uu/nrve/7GY7m/9pnP5vfequs9XnD1lS/1zpTLxeLucfvQw08fv9h95frxu//+369/92765z86dtz4ZTXKewNLkZuiMENbXjrjn33TWQKpEuJSxapRIonKSYnEOEs5DQRgPoE2AiabE1RTCGl8evuthedzG3d/+sc+WHWwfwIba6Ycpisv9+03nciQXz6kF65g2x+Vl5IpZ9Vrp689f+HqF/Z+6/Nr585X927A44/DK1chenDKbQQnMKAZNbM/+z+Uf/jr4Rf/zeZ/+RJEUzpqv/Kl/BvfYw9uXP1bf+HBia9C1fsrf+uzd28/8C/+ll69dvRN33zx6jMLOnOjPT7fHTdvhsES4fkmvNi15eyRj33tG5/+FFnTjLOjeXfr9b3sr/+5p7/xPftj8w/+wd999c3P/I9//cP/+Z99hhrloEEpZRyEjmbtYNAfWlM4u3+8PyjCiBoH7axZzJdtb1gWOZPFjiQZxqiA2svKpo11xOW89sGThrGzGZnhYO1oWZ05vUFRJ7uThx8+M0iQl6bnaDqpN7eL02eyq/cmTRCO+PYnLkyvzaLg5mA4a/3h0ZwFppM5JWwXse+RS94829s9bOs2RkDDEHzomkXq0iL6uxH9LM7qEJnzwg1GJSy7jV6fyXLS5Xy53i/H4+Fy6Xvcu58qMgiRV4EQRkRdxS9IUVzJWU6i0DVKyBDvk4sJ788Fkq6wxQKA9wnBioJKsHIukAKiEiQ0rMQ6KJWdcgZxNRUBUmRFowASkyFMiGl1kEiQUkIEC4AhIFASDRFFMaaoIggIKREishq8f0ljFABh5AAYVm0qQoOgoAlRRFCB6hBRgmjsWTIWuPQCRZ77OA0hGEZUJUBmSqKAgIgJkImRctPbDL5Fak+O9pfzWd3VbHl/93CweXG7HG3116teX0P14vU337tzjgmRdJWTSfcVXspobt17zWJ+8dLFt15/bTI/HA2HGmTlXyGApCCSvrqDT6CIQDF5VUwpAFvnrGY2pdQ2vm18ltm6SrpKg6GudGJEZBhF1WtMK1UroCIQ3p+5wCo/BICI6b7wHlRkBVUiRAVVuj8NXMWiYgTjkIiIIa6I5vfHRPdDU6iyCuuAggoQ4f15kQKgikJMEFJip8YhtnC/EYZgmFQEAGKSVZLIEDCtFry46rNpUsLVgQqJQAAcqbNYMGvSSZAuaVIMCISYJJlEUWDpgzFmZItmWSvh/ZoJaAzigBT5q1Rr6PU3Ebibmet3dX/q37qxL6FwaYA4tiaI1oZAEEkUDTJCQe5DH/3gk0+9//Sps6lLm1v48KVjp+1kebJ6DxiRETLLwadenlkeNsswn5vY3Lt4+UFjXZdSFOhnBSDF1BlrncWQAhHFkECR0GjSLkafIM+tzdGIgkrXemMtqLadlHnGxrRdHA0GmTTL2UIdelI1UPlYz1OH1EUZMfREx6TFMGPCGvCt3epk2ULOR5/+3Ue+9o89tjGyznRddM7110Z21K9CV4zGTWy4yG3RJ4SmnkhoUFRClJByY70XQ6QMiMgZBm16uZWYQpSyzGKHWWE4z9qYDDOQxqZTEQJ1SMGLyfJzZ849fOnMa2++cPfWXVDtDXPfAoIT4hh98nVIQmolATI5Z9Fq1YXCGCSwhhhBYojIMQoIS4r9fl7Vc1SxjpzlzntJKSTh3DpwlAhFNCZWsQ5mixqANUjXpaSG2Rg2k/mCLZ/aGmTWLBdo2ETQlOKpc+sxM7PjhgJwTJKSSxB9jEpdK04jaerldmCzg8kSIxTONIpFnhukZdMZm8Wo/cxxbroQ0NkQ0q3rJ2B4uJGPhqPzlx8YbOSvfeX1Mh+gkIbi8vmzd/cOpgfHBWqWl9W0ykopB3YBi0l38NR7L79y744z4biDG81y8coLg7y4vHXm4UuP7i8PLz745J17N7744rMUQ1wkDZ0pzanhOmuqQp3lZnJ4E8WfOrUxHJ25eXX/9/MUiGJQowApxi74BOwcGiKIeekiBIjknKUY07KW1jtFRVGhmCITkcXE0MYVzya5wjESJvVtmyKqECo6i0parlszzDBj8YmTCkLVBZCqsFnPOYqJE2SDIgkGH4TJMiFCiklFvQ8JDceQqbJl6WJK6oFTQmW2CqiJYRVqRC80X3rW+cY6loOcsmwsQTu/CFEEibGLUUSVKImE+zFOjCIJlR11KVECRgAGzPX0AyNbuoNrJ9WsddYdLZqmDpKICK0xSaMoqWpuMxUpimIwHDnipJJYIkUAMMQxRdUkKYim3JaGbNMFQGesY2KLKNGzpRQjW6NIAklEANSAlrnzUQzbrDcwTIBIqqLRGAbOJGlQ6fUGvV4WJocbPHzH+75OEaz1t2+9tn9yUyUQm8F4A6jouTXmgXW94frZ85fedefOzauzm25r69deeWF06tKTOz3X3f7THx4uJvFHd9+4x2feuHk8HGQEne/qIisHo9wZRcfRQ/AcPLvMZIWNvplP7XQGCTAzYgFyu5Ykm9WJKMeEi2kFRHdP7kYNSpqipkApAmOUo8mc2v3jGSJq6+VkvlMUfQsm8chsLsFnZVbBPMa2iZ0yJp8MkrFocsPOdt4DU+UXxsU0Go4GGz313cmsrperLAsKGNRkODQ+eUkRU1BhMl890gBAFCRAl1uJKbYeAVgoM64ociDTKSlmsfGhqtuqsjkXmJJv2wiuKHwX23pmMcZu2c9tiJ1jKoaDftlr6siMPaLoG1YFgTYJIK6aj8wWgdqkYjgQndvoccLYhUGGwzyJYmxCK9KkCB4TYchHbnsnV+1iapdHqOBJPVJIpjRokrc2A01JO8oVMlMbgPEw2x7CoIeiOp9VeyfLvUVYJgnGEKWQAIEVMQkhgQKrCcIxaRXjwHJInWGDhmIMhAya8twGBhMDCXgfrbFN0+7vHT7wmIgIGlZEZgJVZhZQUGFjmA0RiQgwrw4LQIYNo7FkHLOzWcbGWpezsQhM7IgYEK0xxli2GTugPPOhCV11MLt+88pL733/+5hZARERREBAohJo8rE6uPPGs5+9/uabaLm7outmQM5JXXUxBKXLD27funUbNs+dPnsK1ourt2/H1lMX14pi72B/6fmp937988/9ttEUBEWVQloEHRH2shxTcEmh64x1zlpRYUOAEGLquhCAwNK06kpJKGBUWUEBo2hQNNYaNDmkgSNC7JJAigUZNnlQQJGYkiMmVMMUYkqWM3ICGIPvQtdGiKFeqRuRHBursCrjY4oaQxRVUWAyvdwESQzSto1XTBgMkCGyvZxsvlmWnYZEEAAPJtOIanr3V8hvnSy317d3535vdptu3tscmPe994m/+gP/05Vf+czh829iBoUjJ9RWsWnqYWYf+MDHHv1Tf/HleV0d3nv3RjaY7d35wsvp4JhnZn9paXtt/K3fMnjw0evPvmCXd9ea5uTlV4cx5Tnjuj+885UdW1eT6fVf/ZW3/rd/d/7B02/7jj/+6jNX1v7Etz34oa/d+/JvH/3ED/qrr8XXDk2nvWpR/MavNc9++t0lmtNnD24c1Za8gaR+/ulfunLtt7IzF+mBM+e+528cl15OYnlv9+ypC2tDf2MqL/zO83um7j369oe/6VvWHn3y2c/+6k//5L9MXV2ujRDZoLZVNxr1DNO9k5Oy6E2j75X2/Ob2y2/eiIQdptObo1S3iDhYX1eCo/lcAE3O1HlnDCDajIy1Zdmfz9vJohKQedXmo7WDpb9VBUzp3ZsjaaoisynFmBJW6oidsy6FFXm/HBT3jqtmng4Og3S40XPF2trU+7mP+Th3Qrsnvl9SBO9MkRNjgj4ZIr6xf3JvUqFxGJMjrOtuUGYgMSZJ1pZlrtK0TWfRvXbzihGYniyqFmrhXslFb9BWlXNrB7MFmdYaOxxkk8USwJDKYlFHlaLgellliEVWCtLNe0cXs3I0HlaLmQVZds1hMx3ma3vVCWbZQ+fPS13HZBqJSZMBKRHq+QlqszO0IO5Egt3YunOyt/DQz+z585t1O+sXebB2Vi3a1BUQh/kaSrszKmazFlv/yObaY+e3LeCya6/fPFosqkE/y60xBaxn+eywThHbiE2TLLEh7lJq22kvX8tdljmXQAXUOtN21LZdkpjn3BtkauHgaDJydHN6nBsabK73x/a/GxVhwpikC8llxkMshoWSyVjZZh4yiNwGDB3OWsnzzDidTuvQKZvM5sg2ZQw7vUG9bLqYJjGNR4N5FTrUopcZq4eHc1BtOnVZvmw0JYXosaDc5kDs2woIQpcYybks+iBJQTWz7Cw3Sbqj+tVXbnDnJ8vJ4t73x2VvfOr86O0PPXb5wWtvZL/zyk08gP5HPnj2E49ce+HVwWPffvmhx6f/8cdg4k7mCuBef+65y/bc659//mP/+Af0K188MsDOwdvOlv/znwv5RtZt8qnzTfTCbJuu/bX/dutf/+h22wx6g5CCr33BlnMAJNEIZR8SwskCshLyEkDkeEkFzQ7fsstk8PTZ3QoagmBgkbSFTZR098r26XP58TGEJdQp+7H/mF575tRyyXfmMIunGobBvJ8B7CicL2B/CpMlDMYwWAM3H/0v/0+99LEud8W3fQt8+TW4PYVpu/zBv7az/qe7z7926aUJLOveJ54EuDF+8WfdD/vhG2++J/CX/9D3Pvi9f2D96XP9f/aP9HPNtb/0j8/+7OeGHWI6mf2bn89+5tfzC1sTw29G/vjf+9vZ3/3uv/IX/+gv/earSuMHNvrP/errbWfbJogyJIwdVW1rVIfbg9D5Az+1hr7msQeryaGDMBpnXRIfY/AyHPfy0s4m9XrW29lcn9SzadNglqWYKvG2zBBc1/L+oZ8Ldoc1dxKINobDdZst9k7GQ94ejmqIaVGnZetaQNXDt/awwSaJVdo0RYCwsdYHCXcW3Qff9u5nnvu9g+nEm0bFLRaNqIKkWnzbVpfWR9INpkufoqvRoOEDn6ppPSY3YJsZW5S92PjJtM16JQlKur9DExBD6ixai4QqCUNAVaLcFAM3GJigJEeNT2lFpVZQC6AIETAqCmgCWslmCUERETDpqvBzf35EqL0Cs0LKQomlI1QyEDkmTAIiIFE1URISElEgIiVQYFp1z0VjlJjIJ0ggIqtO1arrBKSIKgZJkYCU4b5WPQG2hMzIoqKaFOLqhi1KBF0n7CAaGynf6p0u2M6Xk8IpsybBLqkIoKEUEyIaAg1dvnlusH2mS4EoVs2sCw2StqHVauaBi+HW+tbW1umd3d1rgcVrzMnBynrPhASqCsjVYnq8d/3Rx5+O4E5devD1N1946oknQa1ABGRZjU9ESFFUQEVERBWMIVCIBNZ00bPNe8MhZfvFOBuf6u/ePZjNFhITACiBiAaJaMiS7SDoikJEK1lqWoWoV3MeWOWQERVgtWuNoqv6HjGBrrDaSsikmARWZOguKkSBVcoHQXUFsV7hpkB0tTJUZgQVBEyr1htik7Tp4iAnBE2qEhGTGkZiIEOIyLyK+ZDC6lwPILAaXSqBrLbg8T5Sghk7ISUQgTZCAATQqGpEjEoXOQG2iUhgBh0gWsTVBA6A0KxeKPn90sHV67B7TPeuHknq5+jW88t5b3R4cC/oMoXALMaYJGoLtrkpmS6cOvfk29517swF5IIQt09fKPMPXrt5I8kxGzKWNSogp0TiI4CJPvR6xVs3Fpcu7ly/eby9tm4KBkh1M0tCCpJnGRkOIWRlbg1HH2PXpRiqJs7n1cAWU52mKFluU4wgqMqE0Piu8q3hmLusx5ANslnwTYITHyFJJ5hA8oSqaTzIBhYho7tHzW4NrRdBkCAHr9y6+tYrj57ZEaRsDNJ3AAEAAElEQVR+2ZMkPsZcou/126MpeUtaZFlfJGbGgc1810oUAmYkg8FH6TvbiW+T7w1cUPVdalHnYZmbMitckpRbTlGRyBAokjE2kKq6y4+/45EHLz3/5d+aHu1blaqNMXaYsCzWCutmfrJinzE7QE1eAmCbYogKSSCBMSaJZIaXVYfARUYgev7sxuRkFhP4FC2Y4KMqJjKW82HhlsuqsLbpuqwwXWizIu+8SlIB7hQs0KzpysJxxoA6GOTHB4AAEuFk5i942dkeVJO5dVluOc9NUeT39mdeUNmwQcgokRzUc+7Tuc3R8cnUQur1cyLIen1mN5vVvutc8gTJObdR5IDtfH64Pc53zm2NTxdX3rqRWF2Jk8XJcDwIZnHp4S2X24tnNvaPpq/euFZNKmSc1NVbN+4+9Y6z3WG1uHPXjTaWKVKeYdl16ejkxrzx4aXbVxj9+UdPrXcb6+Px4cHd44NFSDDbP1YVZWO2z77rwx9c7h8++/nPg351h+ao9SGKAqMCtREiRIUEsYYuGUP9gdGo22sDkJiiglIC8SklRAWxObohseOi57RNIMLkpRNJ6EOCZEU0xNQr2A54sDEQZwQBNSprF1P0rYgVSKO81MKEDoih7cSH5BRAU0xdCtEnDD4mQiPqEAxRMgqIESAphJASKKWQGUPIIQQhA2jmVWDbYemIoCxzWk8wx1nlvUhA9KokKYnGBEioAECa5RkxKSZAbHxgNoSKohCBkSlzwafUJWuzIFEZyfJqkx+SGkFVWOGRHVvfeWLTc/0Yu+B9W89IFTRZZo0RYkCBdjELZc8MNzWllCIKGWPACBkrqil51WCJosCgHFpXJDYiEnXF8AbDhEoC4c7tK4OiPL09Xls/+9R7vvniE48LY1tNBuuXuxDm07vT46uhm+/fe52Bi3ytbvjMqfNbWxdns7br5r2B3j48/tFf/KlPvPfd3/w1D778m79w6hR+0x+5+P/56RsPnBnNEnQR8hzLXlbmHGLo6lSdRBUryTCTUSLI5scpdC4kBae2VzCQo6xLXcZOY9lWjFbbzlNm20WzqDqJRpMqSldVs0nnerYVaEAyQ21XD0MZk86Xy6pLRXHaEVd+d1JFRLAZZdZYi6Kp6xrp2giJi6G2vg7zRTsbZFmR2yVoBEENqYvIJoCisQlCjAJKElSTGnN/b2DYoAARK0JWlBS9NWStyzKDoNGHOsjyZCmxS7GzAnajcMYAU8TgQ5c4uMIYJvaAym1K/fEgXxvmQTlIEcJ00tVeQtIUxasaZjI2CnnEBNoDefD8+O2ne5OTWYJ8OV9glxDo5GSWrIlk2gBd3eBYGjc8dW57UBbzkLxfRh8SQAwpBcDcVHUa9AuvWTJ9GY54PMDMoQMDjS5m3d7CTzoOYIDbBIrkUTSGAhGVNGqSFFMSUp9ABUxSR6YTiGzQGI0+sxYJOGBhzUJSSAAiIcQkUQGsZSWKEq0aZpMkZi4TQUOGTQYAzErMiKIqxjrDxhhLnBlrABnJGZOztYatNbzaQRq2TBbREEOoA4EZDYobb732J//Ed33s6z+RUQ6IhlhEgFcnHFNNj7/4y/+5Pd7tF1Al3wVGl1lnRUDYLK373VcO1tcGR7PDsigffPL8Yrq/kLS5VpwsfMKicmnnws5HTn3is7/xKQcJkRHJp4QGmQQx5lYDqE+d75IaXrQKXQAEY8gH9SklIgyxyFhaH0HIMhrqfFLiHuPQ5bmFJIEVTaI2UlQARJHkDCloG1PBRoTahJgSMibxXUxRJLdAmIkp1BXqMiWnipI0RvAx+RQIgACNobWyr2xaH05mtcuYDHdtW/QKNUwgdR2W3henSigLa/3iZLG6CnbG64SUF25ZNUbo3n798//t2d/68it//lv/9Ac+8P5XPvXZZ9+6dsoRBTlzahgX/s6zn4GfH+Qf+EOSDe4881/3f/oX9M0TEV86njUN4fDt3/Sxam10+Wvf1R9FxBR/4j/t/sQv9+dNNLyc1xfqQfb5z5lPfeliO8LnFrdf/o87Fnl89eiZL8R7t9eb1mS2miUB9/z3fxLvvpjn/gDw9CPnbx7tpZ7Y9b6PKd/mdq+bX30Nnnn54PNvXPiGD9955ZX93/ziWjksH7l4D0RPX3zwIx85/4F31aw/8e9+5Hd+9ifz3PR7g2rulcOwV5ze2RyUResTEtchCtko5srdPWGuJNicO8JEumy6vXt3t9cH2xuj5fGJaUIeMAdAtrGNmxe37zV7QE5iR1aMy85e2Hjp1p1E/qGdU/3ax9qr0bzIHfGyapy1EJG91671iR5YX5e6PZ6m3YOTjeHoocsbHVlTlHfv3D2Z1Jpj1nMtpWmbYrPYGBaTRdOpbvSzybIGY5F5lNt+YUoaSpKi16/ablq1Arg/X166+LbjazdHoGv9/p3dmRdcRt0cjJqq7bpwPG2tyajA/Wa2v6wgqjERLQx6+db6ILZd0etN9iYTX6mhWFJ0HMnHEIRNVrpHxqfvHFWRzMzL7OAgUyxdtrlzamM03rTdrVevaa82iK7gw8kul4P9yfzm4XTQ78/b5cFeWB/a0WiU26wgt2ziRn8dU1s3i9fvNtPFcjgcK/Ir1+9N5xVCttnvnToz2tkoTUyzZW1Z33V5eyk6XXZHs7prQ914ZuyNhhGVEOY+XOgVXeenJ/PxsCeEReGcw/4wkxiQiI0JnVaB670lQvrvRkUAyoy5ofHAotQZgY9dFbrCDRik6UIgJiYQiT4RyVo+mLddhllhJHQRDDjHfUN11S6jFAj9zAUfDRtjYHNQVHXbtF0ntaSoZV5aFk2V7wjBhw5BhmVR1XXnPQDEmKwh0ajALuPooZG28Qu5de/84MGHzu7cu37tlz71ix//rr/wxNmtNVO98MZLn/zpFyWHv/b3f+TN3/nC577y8sWSFovZ1jvO7b/81tN/9y9c/5f/9slorn7f95pfe7MXND62npecPfLUtZ/7nUuXHmiKHEbmKLB1o/VPfMvljfXFv/nBdNQKtdOzVnKA3eM+M26OYKvV2REOy+grAx6IyAkk2hrZ5U9//7mqip/9eUgCmQFmwkgHu/rLn7JNhL1bYBmEad7Qf31h9RAJ1INeDmQQYrc39bODgfqmx8U/+jsv/Ivvf+eHv2b+0AP3pN6/9+W3bY2W12dn1raBms23ZvV3/YtwTLn2YbwOv30PPvXG25dx/qX/BACwtvXes6b6j6/v6O70lb82/73ZE+fPwse+Dl54qZpB6+LtdjnfefCj3/t9Y29+5t4rf+fy5dLAE5c2z73j0fTWwbW3ZjbrRZCujRkzOUMipHjSetXklDPr7h3NUyOZ6fJhr2y0CR2iNrPl5CD28qLCbv/kRDjYjNVQaCB4QJ9yCtYoc0cFL+uuVJvluSy6Djy2fufc1ny67CtZNBfOrq1vdCfHi7E13qc8pPF6HpMeLLrC+7ZuHWYvPv8KB9zKx/VJM+2qhCLMRDjqlWTAIGTqq2UzyA0LZGTrtgugkLu27sTqlLCPtigKQUrqi979NmbW54w5K7AYMalU8zSbgQRjDLvMFiXniO1EgogkJSRGoFXFCDElSiungyBpIgXVBIjOMFiipKJoCXs59UsEm9SIEAAwgIkphoiaBPWr/WgUQiBAFVilYCVJSqJJURFB7nedQL5K21YCJFnNOYQYdAU5hGSTsKAXkq8a0XFFIAEmVVToQiyECTSQCa5vLfV6gxQqhWSMYU5ekiCwZYxKKfZ7Zm1tgzOH0s6mx83yGELTy6xGTSnULUpMxmGe5WsPP3bYNF0bLAsR30/UoIIwQXjryitnTj3uOAfR3JWXzly6cuWVhx57KjSgvPLdIxOBKshKcM2y4lqnZMhglpPjJI3rj3v9MR2dpKRPPPLoa6+9NF94EVCFBKqgQURkZUYDoJVFbWWruw/tWZGoiUhBZQWpXr2rKvBVSwsqIN//ZgROKWVODWsMQsAAAAgEtPpnQmDAmBRAEYGB0kqfRyBp9QRGIQAAWBZD2Kyab6AkYi0xMyEElSQogCGkVe9s9UVERAiivNIbEQKgj+LTiph+36ZHKgTAAKrSKXhFDYkIMkOEYBC7IEk0JhUAVWC6/3jwmd85oG6QWfPIxdPHkwOXR+cqH6cJge5rlSDv55ljAi2QH3vg8fXhGQuuWSZkowDzJjZJKLdkDCoQUQrKVq0hYxQpUeqmE33u8LolGQ+H737X+cGGU2Y0igytePKkCu1sriElUdXkCLNEkxsH1dFSo4KiDykm6UINyJYoY0OIgGJE2FBEFdCqW7EYkrXGKsTaJ0NHrewvU6Nt3YogB00A4JxxIvtXXyw//g3ssjzPEaA3NJPJLG2MYgwh+HQkFZnKS+GMA7esPZJiAp8iAeSGMmeni7oG8I0Qow8SkfPMGaVFtSiyEtLK7qPWEFpuO0Wylx986MKZ01dferY6OQbGlGS1Xo8pLdu5IwbV0tgoUUEcG8tZHZoYY2YopoRCGbFXRaDNce/szvbsZJE7DjG0bVBRDinFZjwoFLTxMXZdDJ4RAJJC8m0qC5flWYpN10ZidhYZBEA3Bv3Zcm57xf7BrOlikZFzlMjuThans97WuHfnTt0aLvNstmyBUBEUldjkrvQ+FEXR+nB3UjkEa7Gpl1mWDwemqufjLdNFGm/n441859w2oO2CbBSDwXAY1YCD97/38aODvf07h+NsePGJB6SNR3eOy2Hhx3Tmsbdd/sR7f/6H/sPN10+Y118/mp8bbZ/dPr1776AZkETderB3bnvtrZdvF+sjrMgxVaSL0zDeOHdyNIFsuH3xzJYZH926M6mPGzMYnXn4jZvXZ9dvESSm+wcjHyM5gxqRwbCCQpZZ1OjIGGKLGiVZxxR9ylgNeUCfFCwys0PIe1gOsaqbrMgiCRMXZTE7qtqqBWs0gSQhS7Z0w7UcGTFp8lF9BDIpioRUNTG1CKKZs45ZLAVKIUokSCIAyEzYtSbGoIqotsxAVCEgGugS0wowg9YYYiJCMkaSapKgULehDNEZM8hcb43J2CSLadMSkzUkkigpSCBkBGVelWiVEOJquI7YL3JtUtXUqgBWDTEhsVLwEUQkRUYA1eRjI01RZOI7CN4QuLIffAjRZ85QBqhZXS/YUG4dSkrR+9TO68mgGXF/lDODGmJmMqoq0ujqxoaEoNaY3OVonUGKGkREVQxR9BGBEMy4XLvz2u+99YV7Fx984uLjHwKI1dJnWdZL4DiMzz9x7tQDi+XkzKnFfHl9OTmZn+wf3J7NFh4C5Eqwd3LKaiXtrzzz27fv3fyz3/CHP/tTPy5t7zs/8qFPPfeV3ePFcNwv2RWOsoyq46puMIIF0kHhejkoRjYISQwxChjgbhmIkrQ1Gtg82+v3TIiJnLEhJ/GOV1pwQhJF6W3kvXHOa71buyeFQs5xOTvOLVg3cgjCvdh2VT31PiYRk7kMYNjvxRA8cpblIEzqAyJK8nXDURumalppgKQRCYkZVJwxsXCmSdZASqKCzKC/X0YWcWQVVvsv9EG7qKxQp9T4uu18AO6qDlEkdkXfEoYU60QMEaWLMaSBKTLDbV33imI4Ho52NrNeBo3HukJMPnSiGEQEgBiZEYyJXjnhhcI9ecpZ02ZVYB8ny1bIIHGKKScXgX0by9y0sbp7Y3bcWwvnz62vbYzPpMmdKynNVZVJVbTykq2NUm8z9cY6GqbSOBsoLORk1vpFM6mxBaPZYDCemaWQVCEhqTEmpcgKBiCqgCZCtowAKARKCATLEDsBSVKSWAIAFAVNAoCaBDH1+jkxAQGCOraEBEDGZknVsCW2CkhEhgkIGA2gEhljMyIiImscGcvWmSxn4lX1jJmZDDITcoiCqMw2M3jl3p1v/MN/5j3v/6ijTFFx9ZvcF+RC2/ibr780u3c7VcuBxVQ1w4Y4TceDQZi2RoQyc1xFLrFLVZGZxe7+KO8dHSxrCF7wgYdODYaD337m00+/72u+4V3v/PKLzzUhJotV2zVGgcRJan2riEmAVQDIoiHLIaTkgwo2XYcAuaEuCiGtZCUhpUgkgtYaC2iYATV0dca5Jlx2HtkYVAJJqoZZRIgJlZouIANbNNZaQyo4qWOGSM4CKECKXZIoyfsoHTEYNgkEmcvRetk/ZTjLd480zJtm2e+7vOQmhuODSaC88tgXXDdIIP3h/TyFSGw6vwRhJAw66ve7rgtT/5M/9dNf/953fOQPvp9G+d3nXzcGQutHVbyQnbz+r/7BpXsvPfBtf67z3e3JkRoBg4HVjGW0w5VOr0W5dm/v7W97wvXzzb/4nYf701N+/oXP/8b73n/u4N//+MFP/bibx41iRGVx8eyZe7evLrtjq07DsuiVwsV+sXzyj/2pUw+9/wv/5HdaI8dlkT88SAfj+u4+LdLB8XJjMBhslm6tXB5O+sdH+tKzx6+92T+7cQj5m/WC3/30k1//kWx97UuvfPnnfvyHT06Ox6Ny0cF82fqkhi1aJkeL2WRzY7y5NroxWZTWtt4PXc9kRYzqHN25szdwLooIw361nLXtMOqlzbNrhvem86OuXi/yejYzTJEASabT9uxmeTI9PjqpFMyw7bKY6spDgYZjbg0oe4Fu2ahPA2Jr3NlB8dLtux54njBU87YKWd/tWLt2bmdnvXzm1RucO1+nDMk6Nqo5kwHYWe8d3ap642K9zJeTxXLZTbqoRFxFleC99Iwbro9JeH9v7+HtB5uqlpgy63pMO+M8oxQh2kwJAwI4gaS8vlbM65Nxf2TVtPOu8Wk6n0FKy9A+9PBZpaqNfmujqPcbAqqbOs8shkDIRsKa6WlHF9c3e+trV964KQ4t2I31UTHIv/Lm7Y2t9bMPXbr2xc8N1nqjvqvnDTXt+c316eE0WXMqW59HFK/T6bwoySCy4apdHE1al+frG+sxQNN2s9DtLqaFxPm0G+Vlr2eWKZb9/NLO2uRk2eYGAJBpfbS2e3zCWRniKkGHIaXC2lFhy0EWug4iKcJ80YCAosYQ7H0B2ldHRTGF0tLWsKcqzJx8VASm3FkiSaAYk1Qh5tYwMQaqFkKetemSU1O4STNnN+iZLElTeZqehLIPPkkMoZdzbGPmMu1JF9PSd030KXdDRGJtQhARIFIVa613AVYA/yghYfC+7NsWMCxmm3P+7u/8I2++8dYLx294Iw+8+/LvfvlzWcJv/bZvvLQ92H397hffuPO3//x3v3Nn9B1/6TvtR4c//49+/Vu+72/GP/s98JW35r/1FZuVd3/5TTupnvrOP5H92Q9+7sd/svnu/7n9nbeepx+hJx9/+ts/dum7/szsrth7U/y92+utBefS2TPZD/yjo9H2WphNb+8VGyNHPr55Nb3wxvEnn9nZv5OhoBqICA2sXb83/dEf3nIWfAs2QEzQE3jwjF3baZ55pljbgZMFuBwEAC2QAgCMGIKHkwWsg7/4kTx/L/zKTxSn8zvz4zM/8g/333wZy4fO7M8u1LPer38Bzz8Fz78BaznMupJ7kGsC4Qzh7duTV9uQYL23CeWoW3Z2t+mkOVVshc92p3msdxfLwy8dNfUS6psDuvBX/u67vvlPDS+Vf+Ibv+6Ltw/GZRmcOTiMm7dP2uPFoO8++EefuHp977lXdps6Flm5mKdIaZwXp08P9+8eR4TDe7MiZzRRogKpT22hGcQEXciyUpPO24SsdmxdPw9z32NrkTWEbEDbD2zM6/mljfOTK4cFylBg0CtvXj9q40ls63e87bHJ8WR7czD0dPncyLftlLrydNZ03caoXx65ybx74IGHdg9m1aJ69KFLN47uZcXGKzduSeZsz02ntWNjLC3aFHxXFoYkZKoUU7nCNNjoegUmadoOWSlwmM16zgHfvwpG57PC2YIhEVhIliMCNEtk59gxuwSiRuN9R70KEDgL1rCwcUmrkDygoiRBArAIhtAwgTXWYW6ZmUTj/fRuSIaIEBKIREwRZRVjQQGmlXuNAEWUFaIoJhABRlCEiBBIkyiDJgC7guMoIKCgCSA5rBIxAKiGUKKusr4rBzIREiIjigIiOkZSxRRsRmgt2DgY9KeT2rEF0NZ7AOqCMABS3BwNBv3BmbMP5mSJxLdtt5yxim/FAMR6fnj75cVwcH68A6BiMLKAxCEAIzJh0iQICPDWzdd7nG9ubCMDMijQaG27Wk7v3L558fS5xidiEkAi0hQJVIEQgdlE32lIGAUDsLPCsn3+4rSaXbnyRuya/tapU5vb0+oWqUGAqNoBqqJFsURJIiMmxRVsHFZ4IFVcpc1VATF9lTGEq0IZUlJEBmSCFb9IARC7pLmoMYQUVQwRiiSBFWFo9cICr/p+IqICoAnuw0FBUVXbqG0EJWASAEyAMSoCBgUKEQBFNK2kAUoASrSiPAGBgohhsrzqNwIAoAohId+X8imAQ2QCQqSkyyAMDEQC4uP9BypJogBJQFdi3RS/mjC1iJA722HrOZbjURcYjKaIbA0gqkrp3Klhfz6b7Gyun7p4iUxJbPMcjYXpQffqWzdu7Z2osIk4yGxeZF1IVecNQ+6IVJb1wpZF55MQH50sl9NmZ9NEDD6Q+hADxgSM1CaNvgNjxae8KE72929fuxWasFJxhaQCKkQalTCBSsbcqVTLymX5om0WSUXBrBDxSVMSm2cCqmSmSTpha8gQCoiqEoIk37VVvzdQlxMaQhUxw6Kg0cA3zWRQxthWM5uKcnEy74Jvu4aUjMna2DlrnM2qumGTocbOCxN1rS+KMscs+NaxMUxJMcaYYkTKg0I+6j/68DvWd84e7V+dH++Swfm0UVFkYwwQQEg+coGkQhpCcmRH5XhWzRERFVhABEJMxByjghUkPZkdb/WHvtPJvIqinHEGmCFnZEJIBOisrbyPKTqDSIpETOTbRqOIogrkzEyw9N180RjO7+0vM8cuswrJRwkWl8tqFPI8Z+soK/KmCykFBe0XLgJXy7ohHYx6QJh8dEwhpl7flYNy4aUZytrZ7JH3XnTnNjpIKrGaLzrfSuY8awOztk0ns4oaWpwsXMEMuPvy62xM8jJow72mY6opdGcubh5c24fYpTbsXj/62MefqhYHz98+Gpfl/r2FcP7Qex556drd3maJluresDH40usvtbMmJzsqx3fjYrhRrr/j0fWdS8e36iuvP6/LmXSVT/dTRWVuVLBX5gkAVJPoeNR3jD0ml6GEFLpQkN0Y9e8sZkgIBLnLFZUN9hiaNi5OlgyYJJFzorBcChFnuVVFAFNLEIL78/+6IyASiFGAogh0XlMVapFukcZrfdvLBNQQUmZjlNX0M6lISJggihJT28XSuTIrF/Mli/EhcW7YWpGgBjJnrSjGlNB2IlFgflyN+wUiInCZlRsDYdRO48KHTkAAjGNEIgVRjSGioZQUBVEJOsUuLhdVvj4wOaLQoD84OZiHOgqqCkBSUQhJDHNoGwcCmct6uc2tNXmSBJzssG/LPC857NWhaQSBCNjlaChCShra5cL2cktsDBlrCZGAozTWlIqEIGwckdEoAoF0lQyNzDkAJAHKs1F2dnM8+NRP/bvrL71EYh/40B+QXk4kbZcUlG1mTD7kkp3dCA/qw/jw47O9Ky+//uqzu/f2TNG/8er+eGc9s3zr9uJXXj668+xbDoZlf3g6O/76D1+af+lV03eTw6UKz5uEBosRU6QUUpZxSD50nWEr1seOOgUElyQQAQDn1taScptEIwmTX6YYu0WDAGxYFbHAsxfXcsPrO5vkYX44Pd5bDtaHgbLWd3leIvNsuXdSnZS9vg3qJVrk5LIOpHMO87LIMA8dJDYWFDqEuKxjAhaB0AQl7juXEsQUrcGiQEGJikmYAUDu3wtiFMRkjAGiuvWKCa36FOZNygRRCUlzizGgy9zqDZDVMSOmEDvChGQm9VKdDchc5LYspotqDW2GcPdwUlU+ia1DSipgOesVte8sywce3BrIYpiFae2DZByd0ZgQFrVHtL0sa7s4YGfd2tFyzx+dhKPqsGrdxQfOXzy/vtG//tZrTbO0BTMDDoZy4VLIx9naaRBv46Q7nMnyqNk/5oQxqkEk9nbNZDkXqgpURY8KCphAUDoQ6rFtBEiiRWZAkYiCLIkRqpQkpL5bOXKRCEgUCAVovLWd2PgkjGwNEVGUqABlMdDkkYCNXfk5FISYVx3/r7bm2TADEiMwkbUOBIiYgYiYyCBSjpBAO1/fO767vfPAqTOXLWeKgAiqwExJYogJRfdv3vriJ38ZYzMY945u7ZNAezTbWCs2BoMAWkJQcG/tz8H0hzsbljKnOVE4Xt7jbMilXbv4YJ/Qofn0b336z3/Dh7ZHH/iVz32ZnKWEbfQSY9e2zpgmBeucZXSWQNSQQUHHYlU6xKqLOVpQ5SQmJ2UGiYjIbAwRm9UhlBz3u5jqCB2QA7QIGVEdoyFGBQgxoQcmBHLOqmppTRQZ9VztMTGvVDQICST5FBIICCKCCrDjRWgmy93R+mh8eTTO16a7+77tPDWpaVOHWJqUBARNWFJaZIPR6ipY1jWi6WIqcitdy6SQoiETA3zuS69cf+3KRz/wjvd843t/+3dfvnHl+CE2mLUb2+eP3nzlQjN98+aVubU0tm0XBwbW8uzqa9fW7xwPPnD5iX6R777mpge3XrsZ9g7gm//gN/y9v3Tt9nG6dbe/neUbZZvyqk5v3r5JO1uX/87/GHbOvvb/+NvzOycn4/Sun/v+eZMvXrp5a6xP/vnvfvI7v/369b33f+PHn/t7/6S9szvOTWfhpK6X02bcc3MK9+7uu0cfPAj59WLja7/jY8PLDxwfHX7q5/7tV37nc1mos7JXVW1SSQoaBQvYW0ze9uDpJJoZbS0wQs70wKn1HrlXr++ZDG0Xt/ulDwEyu7Y1WDRNbLT1evv4sGfN2WG/1+JmUSj4IjOm7O2fnKyX9tKpzU9/5dUk+XueuNBfHrPjRNCzReeDcaZFmIZojBHmNqTNYe/Uxsad8t5e06EFMfra1RvvfvKxMnPG0IWttWXQK3u74631164eGkHxqa27wXpvb/9k0TYpdCd7R/1eAYpsnBcJTUsG1tZKoXT29PZ0ergzGjnKOhuAqAvCROtYYLuYHcwEUtfEtksamqEtIMw3Mntu1Nvdb+8dz8GZIFBkeGptCE2MsxmNabmIDHg0WeaFYTeKoZXUXhj2Hjk9vnMwi/ODyWT3FGbGc6VuUjeddIVh75svPv/Cxsb20XyGEXKBxbw9MEenxgNOsJG5vqHD4+OdwWiw1ru0HQfW+BDqrp4t6jbRyVEThNAgGqNAO+eG4rWOsSWeLzu7bFXEWrJs5osOtcvEObClzann+2nEonXbgk/LozYEmc1D9OJy03aty3DUy1bL1v9jVJSiWnSOXd1UQTUFIZOLYudBlaJEZyyoWiZSWPrQxkBonUOBKILDfEiCazarkRgUDUeESbOEYCQiIQzywYrgGX1Ihie11E0cDvrOGgbNcrdsO5GUGeO9F4WQks0MMC3bKIKB8fX9xV5z9n1f9+7nvvDJyfSwcpUbn1sv1l5788bGoHz6D37wsY/Ic7/6uRuvfP5v/eO/+cGP/pGH/86/f+mlL/Te9VT72uH2+ugr+yeS9y6fP+th87/8ix/7hj/+R2/+bvvwd3xn/ch460Pv+LXv+TMo3SdfeePdb/vo+Wnfvroc56N8MCpG79j7yosb2z0fzvr0QO0X6ckPwAcQP3q7/ut/3B1OkQgyBkrk2x4ouC3QGroG2q568nT7jofdrvinP+aaiuevQuuBGBwDE0iCHndWM4dzkvAX/2pql+PP/CRIt3vjxH30fesn0/Nvvj75v/9VOAgYjN6NOBrB3QN1KD6ooUNNNLajb3773Q8/dTZPt3/tv11+55PNL3wp0549rn3j1Ua+4F6N8+kFfEk3/8R3/s1Hn/5ad+mxX/3FH/snf/kfVvN44czG9rC3t6i/6Wsf87PmzUV9a+lfu7oL3myNx3YHYhtHzqxtlSWZqq6Pdmeu6CU2wKYFmc79dn9QFnHkyrbxHfim8b0SxqNh7TsrEKaddJESokXKzWgjn3bLwrpqNslN6hmsp8vDo8U8xOOjiWX+witXGp/Kg6PJYT3oF2Tw3qwmR5obuHnCUTI23f6UVTe3hnf29zlzRTn4mnc9dXAy2z0+tC4/nM9tYYlhvFMe7Lfeg0fsumiR0Zijum1SGhiz0aPhWhlZj6ZTHQ2Go3J1FdiSrBMiiAm8qjcIGVogNqoUY/KxVe9FhVcaNABFICbMmHLCQWbaEJchxgSg4BBJNEfMrS2stYYb0FowaQAASxZVk0hCTQoiSgqISe53nSiJolFLZFUlCiqhggVFBA+KKy0WAAOCgAFkQk0QAAg5xOgMMJPcJ++AJlTllcLGkBKApPuqEYtGBcWDyQgBRZMCW84RRDQ4Y0BSrzQGJSe3vjUerZ8bjMdMAiTGFlujU/vNnRRaNjZoMzl4/WppB/k7t4fbLDiwztlcE0RVwAioRHiwPFh2i0fOPwxMSkDEokpkTp2+dPPWWyeL47JcE1Fd4XPud/w1iYAoM5Ok+eFxJkWe9/JhGdt26AY2mRjtwd6x7yKDUVFRAURGAkQAIQDLzAgxaRLR1eANVqGh1TdhVMH76Oj7lbIVS1oFFRTpPqAcDSbkAMlaIAsSRIAAgBF4xTjB1eBOURVp9R/dL0esaEii2LTSdpJlhm1QlhXjQ5PGtOInrZDbKxkVrmBITMAIFoEAM0a7el1U2eAKgy2qMYgkYVXHyICQNAgoAKM6o6IYk0ZERVQkERVVBFKA9FVABbFRlfVx4dvaIGZZub9/pOIAVBOqSu64ABLvLXNvOCqGazFhDEoYfbWcn9y7c/taG6Rw5vR4sFb2QoQ7J7NOWVVRCCX5JL7zxrKxFqPcuHXveAr9DffA5QdT1GGR5QXPF0ttIgP1i8Ey6C9/8ovTtk5BLRIkWZlb2JKuEGMYQ4woKqSZM1G1jupVDSMlVVGk1fN1FDTL2jMTg/ou8gpWrqpVIIyPPfg4oYtgkpK1BFHIZeQydoUrc50wEitZKgpNjbM9CTVRars2cdaG0M9d3QSNyUt0VqxwaKLvtCgcawqtRyJrMLdZ6DTPew88ePnS+fVXXn72zt3bbdsKYz4Y5kUZVOrlTCWCEAAk0EjIzknC6bL2KDuntk6OJt77vLAkEAWMc8BYeVFJ9XKafHTOCaIqKXNE0iwfD3N/fNx5kaT9fhl91+u5EELd1ACIZAprGAlUQhdyRgYlQB8UmZzFrgmRYTAolrVeuXEyZiLDbeeXPmyOskXd+hAAUs4IquiliVETklrlLAJoRpXtzrz73NsfP+2X3aTq9idLD4mwq0LwAWIbfNWpQSHEDq1l5zh6SWTLzI23Cuq0pljY5anSbJ7ZuPNG7mvqlu5qPf+T5848ki699MbVtkUYuMNrk7u3D8N6TgXGFKbTWV6YoqeZ1VFvkI66qLKs7eSFt67XL8KyGecmu9A7uNmUha0PTwCgcNahQRUwmDkrxIgmQzQxgJeMHRtiohBjUzWMZjwcNTFFn1LQRR06j4AGOI56Dq1rg4iKJJ+5PFZeFEixa0PKqWo6sujYEpCkGLrkfUpNDJ2SJIxCrOOCjXMxARNGiBqDhBiCoslIAgVVgigKhJCEkZNCbg0oxhh7jleWTmDoZY6z/KRpOWlsUyttJdDrlT3rNC8y0DZ1pTVHy7ZG1URRIaE4wyqSUlJFyzYlsZa7GJ0IkB+sZaGzJ10bSb0KWDZAFrgNXVRMKTFg2/peSkxk2KEKE/aKfDAYqCvIonXHqZ75EHNTZJYdUD/LJAXClCSiSFFkqxwFAliTMa2EvcyGCSnGuPqgZAJjs9WHrSFGYDCkbuvrvuVbn/3NX3r5+WcuPvW1DSBpzFzu8nx14VtbghJLzi6zg+23v//ycHO9/tVfnMyOilyP9o9Dq72sGK2tP3fjeDgu79w9fvyB6lIg63rHJy0WxbJp1Rnbs2VPtQoYtKrmZZYXpiSCoeHZtKGgltrCssnsRn84mTdNB9NGM4amWkTfplZiA0VREPDWqK/ctVVj8nL32uH+zYkPScHWVcLpogMZDZIrSsh5CGsxxozVoEDw9SIKMg+yRFDYfqxbFEXWvCyWTbfwHWNJhizZ+2VHpKBic+4bBwGEabFM6KPj+5tkARTFpvZEqAARkhImpqBMgn3rMqshJQ2JmdrQiooEj+xKVxCItXmPeOJnRZ4xMTLNF0tji9hKfbSsl3XjO2MoShTVnE1MullkWy6NbZt8PJxJdHkTeNkmIIgptsFnvbxDJaDSWceucwOGfRN8d3I4y93aaHN4/qEd6yYHd2NcrG+MaGMtFaMqosYpd8sCF7E9rA4n3Ty0Cw1qytJwod2iLtWYxHnUGCGpRJEoyYBBFRG6fzaCxEgiYo0dZEZ8XTI4w5KSAqSUVicV4+jU6e3BaOByR4jGcEqJeYVkNjEFhKSi1hQETISI7KxbyU2RjTUOiUSACXVVfVc1bJhZ9auFeRVEJojT+Z17u2+9+51fbyhTUpJVSFpiSElRNNs/mrzxxj3r+eSkXkybqJgYNMsh7+0vq87h0uu8qky/iJsjeOzhbHNjeXx4dOve9pkdO86H/bFfzH7vyvU700UV/c//1jNPv+PJr/3oH37mKy/PZoeOMPg2JRFUJCNAgOqIVmsHIi0sm5BARAmrkJAwU1jNfBwTRgFWNuhDVDW1D0oOlEKMFimDyCpRNCZRSKTAgFEiIgFoimwNpuDzLEsAgnbu1bERSSBRkg8SvSQmlIQMZBz7UDuWgjIqi97FjXLAV1+80tUNhGid6aIQYNN0p89s7d68d3DncHUV5BajaJazzTjFVLU+JXGCRZYtq3Clbd74tS8PR+7Rdzz89Affd/Xzvzs/mJ+p9XI3/o1/+r0YzMWL27fuzI9r6Qz5BFk/f/Vn/tUH5I8183uHz7558Pr+yeHcBvmt11/mzz/e++gnhlStXz5z8vrhvWh3PvzuojB7s+UpHXWv39u5dGrrwdOTsi++GzvzT//p//T3f+AX3uifujYZXZu1LmKzsAe7i3Kr3+V4AjAnO7PaNnL+4gU6fXn7wcdHF87vL2/89Pf/L68/9zz5QGyTcV3rF/OGMlMUbnN97e50jtbMZi0HbY9PTJb7LuQma3ya1/MuiROWzNVN62MqsjzWUZvQtaHptCx7MfozpzbCxBw31XB9nYMcHc9AwBXuy1duYl5cWFtnaZZNW2RZv58pQkCcx66KscyLnImc3Y2ebG93ujhzemd5Z7/VhI4nko7r5tRoXFo7b8K57S3V7t6s5jyvwmo9iz7EbK2kpZeUSldEMMfT5c545Bi7phoN+haxzy5MF34WxsNi4pvK+8iUKFsfD6wxPnHdgMvzqp5Ztsxg2FqTE7oX3pr6NnlNeekkagRU5K6JvsM8z23B8+U87+dB0/7iBA1tj9cl6vX9JqqxCtCl9XGmKncOFpMZXOptRMBbd29snzpTzecc0uFhBYKGs6N5Ox7mpeGrR3eO520/y42GW6/c9V66eb21vd4fWrbGmOzUToEE1Hd7R9M2pYUPFqJxrln6GEQyo2irTlLbGua9yXyQZUUPq4OJc+705nZW9o/mx8cnB5Ojia4sTYyIWLgsM3pqc8OV5uXrt/6PUdFw0Hd5QcRZXja1jxJiUmVou44FjCUylANZIgTyAVWRDCcJndcMxfW466KIt2BAYlQb67RRrEmmbbNEB8fLRZnlNoq1UQl9Ih+TzVYcFG07EoA8s3nWa1s/nc8JoQmiqtZQnpkY0vG8+d5/9I/f9ujjf/t/+EtPvrO5ff21l67fNr0m5f7LL8zfvHnj/DsfG3/9u//It3/jL/zCL/3Df/a/nj11+a/9qb/y8b/xQyef+cwrR//p8mPnqjYWQZ79mV/OU/XW534+zU5eA3nq//bds5+fXl6848qv45/94f+04Hz76Vv/+VPf/61/7/uWP/pDN9/30KlxIdmy/9735u/4P73w6f/0yHf9Lf3AN+2cPbcQ61vnWoNOU8EiIdoS3vP+9Fs/z6dyeGjn2bz/oQ/8wTf+8T988Fv+dlfV5s3nnSs0aVIyMcJO+bzMnvrZH4eb1299/ouy2WzeuPr/o+o/w3XbsvsucIQ55wpv2vHkcHOVKqoUSsFllyW53WDJmMYStqERNm2gTWMD7adNcEPTjd1AG/zYJpkHcNtghJBtJIRkZVmhSqFy3SrdfM6998Sd9xvXWjOMMfrDe8pP++v+svd+955rzTnm///7pZPzKx/42HM3ru9NDp/82P+6+n//z+M1l0TWCF4dSRxyTfXeiDebDeP6atgv5b3/14806zA92NuFDbz+s/R0gOs3/K3dtx49OcmQR2H/j//gzX/8B7/tg59Kp+//2b/8F/7r/+UXDs7KS1fau1euNM1seTl88807Ez/78c+/erlRpGqpHTJXijcOrz6av3uw0xzsNV9586SHajbbm3f5uEt1Fz3zuo97Wk1H1dAXRWoPd0Oo4sV8kwsixqFkMU0ogrnIZLfO5B8+Xc8ODmTbeB+PHp4tzy7jqGmKpSVpPF/WlYPKa0NLkpRljQBFORUtpSEYNX6Rhv3RdJ3S0eUw2RkpdRX2d/b2Xrx65f1Hj66ExjEuYjo/2cxG454HNknLoSjaULLBJpcF+9O+nBfZG433A61SpqF/9kqoGUkVbSjFFITAWgJnZkKgEmOKkLNtj9cC5tFUoWRDk9r7mnFGfsejEGZ7Zk5jhYqIiQBU1Ippj1ohG6FqUcSh6HanC0TbGHbOqmYAVjFVTiWJM9ICHpAMwIwNSAGQCAAVHIDftocIFKAQqToC216vFKAMos9YN+Yd1gHRtkFpZGIjo8CgYil7MMcOnTeEJDYYdAB1qNqmChyms4kx084NP73mUL3jvems1/784kTSICxRtQzz19/8zS6uPvLBb3/52o22ahCNQQmzGJjSZlg8fPTgxdsvjupWEIkIDRnRyChUd+/c/fo7X33p1ofraqLPQOECBIjPdkgGMFg+PX86GkaHt583kqoZgd+/uNTF0C1WsXVEW5UmACI6MAQFMDUIvB2WGaEZo6IhMJoBbaX2uBWwEYIyIMD28IVA2y8yAyFkAAQshpsMex6qAEW+QTH6BsvaTI2YGYEB9BlCWwzNwHALlsKYIYurGaoWqLMiCkZg+A9Z22pKiABGaEQEYEzkCByhGShAEUVUQqStkR7x2W8igOwdgTPsVBZSMhBvmesGjKSASawoeKJApt8otm1XwaaXyaQSU1QcjaqqroahgKEjrILLOZFjFY1qWDftzh5BVdU1elqezzeLiy987XfeePceeqqr4I3qUGWJXHkzQ4fFyEocT1piyIVIzXvfFV6c5nQS37l3z2Wtx2FckUC+emXn6v7e73zx/hv3j+bLLhuiwyTqijaes5QKfCBEUFRAZHK0TmK5ZM2pqBHmrC0TO85IxGBIXRYWUwd9StuUflUxOwoBywDf/InvIPbExEBb2QqAiUC/SWhIhNNZc352oqqXFyvIKIkW69XOzriY7k1rADi6jEjYepeyjuvRcpOYuXjsYg5kCALqBIXr5uWPfeLa1Wv33nx1OT+pAm4ioveT2a7zk1ntz32IyzmmnIsUhT4mT9VeXVWeGzdh8pNpvepx2WVDMFAUUMXKszEv+8gKlrOa5T662o+n7WXXr0qaTluftO9Xysx1sxm6yrmmcUUQDWLKAhoceXSTtl2uNh5LzdxUbpM6ZGJCyVABEbp1zuNxZWJt465dG48WDlz9+OlpQPbO74TJ0/UCwTbrGCahj1mWcXpjMp7tKs3iMD9fzMsQgXR9fBqLhhkbAKdcjStqA2cdN60H5DHVo8mwjnSZWsjJ0viwPbp38d6Z9DjaKHSal1p+4R/8/Pd98kO3rj33aJOs9SdPLsc7oQF46ebseBgo+sXiwgq4FEt6GDKlixRl5ERzFNKShqFfddXYJ3g2ML0ymxIAq6ph8JxyQcIyFFBBVdNoCggj9BCqYBHJOZSu11wG1MImRJ6BDMiHuk1BqSBxAyYFFjkWVa2agM4VQV9zL2Kas+Y0JE0q2YqQQ15FicuB283YM6IJbvtfpEAKWgAMINuz2IKoxpSzalEi2qLX0NAzmErJhpUnxnLQ+oqoXyVSKBFXOda1QrGGvTcLlW9cWMU0lDIfUi8Qi4FtBepWTA0howG75qCtR26zilBAh8wuhEBSSuVZU/GOjC2JViEwQ0qSU+qHTRWqzdCNxuMquIhayIWdG2N2cT0vCLEkIkeuLmoxJSRiX5uClQKe66ouJQGAAhA5VTUyxe0VAnnvEIicByN2vogVKVHN71w7eO5Dn/vlX/3Fn/i5T37/9wVfUAldWwQYSMWYuG6aXHLwVACvP/+JP/onX/rxv/lXargIZ/2ql6PlInbJ1bXfrZ67NqNWf/ut47TO1SiwIoWgYquLfnEhomU8boicD3WNoVtvSoa2DtWemzV+cd53g17EZRZxI06b3leMjEquFFhsOg7h+s3xld3qydEgSOSq9++flIi52GSnqUg1deNpKzlLbUqBG45xnboFSK5D1Xdl1k5mzZWuDJaEtBpVVe3FGra2RFjGwTAKaqmEt+DAUPksgxKExqujiQ8mikUANgDQdxGcEaIabvo8DMUqXxGaIrKv2xFSrlCQMFqp6pqYkigZUNEaqeIKJFkuRszeD91aVqvxbNcpuskE+zWpbmIpgM77HEtDeNBUVx0OQ79Kibg9XkemYkbttA1YefYLwzrLxDUxl+VwpibsHOTSd/358bEbPbh7666781LY3W0qD1yKdFD6tF5vTuZNkNT0uZytVmIdqTlB6otgNk7slOJ6cAaVUQFSEAMy9GZF1IQwGzBTYGRyGVQlj5m0aMWcSgHErKII6EiRJru77LwW8FVQtaxSYROqgAAuUEyxctXWvwGIRIzkaWvuQAIkRCJiZsfsDQxU2QETGOCWbE0EpgqonuoPPv+dNY8FjJG3+t0t13E5v/zJH/8JGroXr75SZm1XE028QrhYldlsh5q6SIJpWC/ioDD2/uDG1c7V1Oxcro+fnM1rX925fqX1o4vV8DtvP9gMvSA+OFk29x7f/uAHvunjH3j79S4fH+8E10MBQCYWA1Hsi3gkLUUVEYERSi7gWbZYb3YGoOSKmWjxgRICt9WQIbtmSAKialQxjTyYWs6C5KOKIwBmdq6ogmpMiQhJUYRVxBnM6gaMUi5Zcz9shLgYajY2BQdFpJ21W/hA27pqCtnvXX4h+8ImFFPuTDJhD7pYb+KQNvNnA1MgVlEQWK8Hx+SZfIXsoKmBvE9JlO3dp8v7T77wuRF+6IN3/w//4h+kJ4+73/ja6tFpG+je2489497e1NQt17Emxgern/kP/r87rHkjVI0gcd+lEaT8zircGZrhwTuff9fb+JP/3p+rP3mN1K5USMt+fe+rn/nFn/7d3/yx3/j13/n9Nye7L33zv/CH/+hs/8pHru6c6/J5r7/8//kPZotLu7n3+vlFDmWVYO3rD//u7/r23/N7/P5k3q++/uZbP/4f/qf5YjHpbFJXytU6yrL0jODrqgA47zfrjTdrJ6Ohy4fjtlsMgasrk1EVfEN+M/SuCetSlvNOimWwYbVx0rDpuAoHV/eCgGZ5erE+XfbM/uRUG84nF4sPf+SVL73/xoWVvcnOlbHfbFbYlWQ+iDEKgmUldujNzIQFapSq8n46qWfjbz4YvfnwNMU+R33/4fEIaO/wCgoxWeuavZH4nfrx08t+0BduHY6q8OVXX79z4+a1/emDJyfVdPTSzdmwLiZ6dXKoDoc+jqZ17V0/X1bEGvOwjIVIPe3vNOOAD5d5Y7hadLVz0zAi0XW0k0ybbtlWpISjur5yZf+19x57pf1xm6J24iRnyVZVvnhabnIBmMyqjcnZWjSXq7OAoJVBifHh2TK0VTMdPz5fieMXXn7x/HTdzfvZqG0bvykEVg6utqvcJ8eP4mawUiBplh5tU0ide+3xhT6hUe0DFoUCTIkgVPV0t7l265qtu9bVe1e176OAGFO/jn2POWNV6WTWvvji4fHj89VyePRkXhi7uHAed6/snJ+fZ6MQAjJ5F/Z2Jss0NJT+kVTRVuZyOU+xpMCMaGqK5Gkr/C4KVshASiEiB7ANVShaShFR5xep8tyi7dQBC/So1/d343rea9+OqcuRuQrOZ09MqkZQBJFSTt6YPAzDUDUTAAak2e5ob3f03oOjJEmLpqJSkkOqvK9I7731+v/xz/6Z524d/ol/+p9+4eWPvPn53zpZXY5H+6+98ZUvvvqlPJ0+d+3Kp7/7ez9++Ke71fFf+x/+dT78CzSe6Ke+++n63Y++vPv25177nv/o3/zCn/8bslgIZILul//qXxk3Vw7HN+/cfOU//6e///qN8eaXf/bmx+8Of+z7fvmdL33oyUfp859px97phP/YD9Hf+PPv/nt/5OvzcvDKD33L45MAY6sq8n6zWmgjcvoe7I4WVw72vu027ET/la9e/JXPfnixvvd3/1LzXf/MyEo4cDYwrwyyQt9c+31/9N2d3Te+/oXhbv27DvPs579YXb8D984Of+S/K//Ff3DtydJXuwLcjOA8rs/udC/+0Pd/9T/9O59I7ApGRy//i/+nkz//11/s9/3+VXi4gipA3U4//NxnTl+3j93d/Z5/4vmPfveLf+D7TnH48r2H//qf/QP333pNon3btevXnm9XT1dA1fHlMmZ+952Tn/jqfRQioBv7Ixeqr775KFT1++f9bg0By3vdJkoA733Im5NVABI1ZBojLc8ugtRiSMCrLJd5vu+d1zxtXL+JbdtqMY9OWUd1SEV9HUYj16iL55vjk/PT+cVkvJtyPI+byaQCBjS4ONlc3R/Xk1CMbo3Gi0XXmhFgHeD69R1Ff/bo7ODKhDkIKjC+dPvmu2+9P5tN95rx7d29yWh6fHbeeznaLN6f91EUt/wWhw6fTQY2qXRPzx/oxbTxO5P6Wj9sV8FyEb3nxptnLCronAKYlpLVZYu9DhsDISIwMEQjwm0sQ8WKWSouMLSBfU2yjSGz16ikIGZbPqKwMoOBiGhUKSpIjlRVkIjV0MSgoGQjBuesYVBQRjQwBkBQQFAkAzUiRkI1Fqs8kANEZMWsiIqejFAQjQCIibdeNoSa0ROQMyYu2ZKBZwqEhIS+cugBUAxjjlFkAOJQO6Xa1wEcW4DQZNOuv9yp67oMol3duJ3ZZJ3jYthojoZWipy8f7+789EUu6ixGdXArIqhqk5OT54c33vpzkujagxIW62Y2paqQcbeudGLtz/46PG7d2++wr5CM2QyBTUTVQWMRYY+rucrS6s8FFcDhzoESFrSVtScxVTNxAwcMaqYacWsW4AUmHOsCiIqANuNGiKW7Y+CyARgsJ3XIJACEhiaObDGkxHhYCJCBug4Z3AETEWUtyMmMUWGillV0YiIismW6A0KgiaqgcCQRLFP0BqhszCG3KsWJDIGMwUwILTtc3ZLLgcAAjAFATBUUDRUT2wASQQBgkNFKAbOEZoxoIl2WQYF57aUbTQABSPAgOAdQlEEIFUkBITtxJQRzWBntjNfLL3js5OFFSQKUqKZBiYCGY2CSNKum5DtNY1n6pbLQLIu3Vv3X1MrlSdFmHdp3Z8myYnZmbIwkc3Gk2ISgl+nFFOa7lXj6RjYjk/XfYbca8jxEmKfy7uPFn13f7HZ5CLOg4oGdIIKWzccM5iiIYgS0lAkmvbFihRERLAAWDPttDUjHK9iNkWAKjAZRJWtaM95zkkgEJO7fut6xd5y4tAioarlUoYupm4ATVXtoMTYr7v15eX8aEjrSVM3NU8nk5jFWXbM3bDZnVXzfqgCN2Fro2cDKzk6R8xYBTd0iZSu3bi1v3/t/NGDzfw8pm7ZZQN25Nk5X9fDsClZrhzsLRbnfZ+sT7v1yFRbb7Nps9r0q8Vq2XfkvVNyYNcPJyUNl+tYUhFiT+gdkiEDKAIjphSHmEpfwt5kb2eCTruYo8reznTousl47Li+uLhER5shIVejOhQpzFSHkPq+j93OtaY7X2mh3CcVVdDp2M9XG8NA6O89mO/Oxg252ajNqRTUy7hw3lLKoFiHCoiCh6v7Ta0yP+oXj+NwsTp67enqskBSIWdajCgEGqqS0yUC1NUcHYoIM0/rJi66esQF9T3HsS8xeTQWU2Nq6/Zivrq2v3f72pW3X33n7HJtsWyy4mJ45/I+X92jg9H+6Eof14opDh03VO/5spa8zl2KFhHZWcBNikDPCmjBzCGTYRbTUlCEmDwyMThvKRZCnIxaKxocp/WQ+iH1vRQrCR2xmRFoE7glK0MXfKWAZZCu69MgxOQcg9uCZIQcq8KQohmCKCgiADOQmQGg4rDqQ3C+9SUbGDl24INoZgITayuHKsOQCkkUE9Xas4mJFGG/7IYxU9V4T0gI3rGZFlUIPPQ59z0pug5b7yYjX4da++Sda8gpWsWb8z7NkwjiNnsOoAAYi2CDoFB6zYusQyH0vgpKsIrDYOodjSZtZbJcJ2IoMYuXVHrvZ2pSeWbRYbUe7R9GwHbMuaTN6kIQsOahpNPz4zpUB4cyHl31FbInIHTOK6D3rZowAhIXzSJCzOSc974KgcgZkAkYAHtkIgsesX3pW373k+PlvVe/PDmcV+BuvfzN11789giopqCAxKaqCr5ym01C8rGrPvSt31/vhJv7tz77Y3//H3z+599fXKob9yvzq+VZzqQ0duwBzk4uR+NGsvbzLKqhcsH7ljxHKxiLahcTAMZOunlO66IKzjlzMK2dk0yaxodjZI1rqXZ33CjMboXL8/nl8mJy/bqFTDXHlMGjdzIaVV3sKBc0kMuFCvnApIOlhctejRvzk6g7Ma8vzjYE052d3dleS/08bUig9Z5STmhNHVSLikpOzBh8iKUQbutRVo+qCsM3zsgoZSvxIDRTBACVnH3gqnLZMkOpnRtcKbEEw1LMNzVikKIAiJ4uNmuloMCeOa43KpaIpodXq/agAovOL59eIOiIDLXMiMu6W2RYptyjA6+ZfY04GTWELKZiGJraK2BxORcinlT+cDrpZJENmHQ1fzqkTse7kiYpb+YPn6TlPJ4dYV6xn9/54AE25EZl/QZwz74ih5pLtoQbMYDMhlmUmVMZDLmgRUiEFrwvCh4dEAAgmqYcwTs1ZbQYo2P8xp7BApMjakMgwJKLqLEjBwEIixiBNNyAr70LTAxkBMTsxJTJsaNtk121EATn3bbzLiWpc0SEBIYKWwoiAQDu7RwSV4pChgTP9KvM2C+Xv/mTf+fV//5HRgjva3np+et1Xs1mo6GHvZ3xpi9uNwy9Hq3n55S+7Tu+yea8zKPDvRvsghRoqhog1O3e2dn862++s1x3vtbakSWYr8+ux4PD/dHjuFGHpkYOQa1o8Z7AMBZD9kwIqlLUoQVCA/FE7KgULYDFrBQJzpUSNQT0TSopoWZQTzAirMhqVgEVUI+azRiJEUCVRb1zqMUpS9Q+qxF7FwKZmhTLzFgHj6rMTrJahqryCColhsq9+NIsYj/CTc+8Px0vz3NRQwdklvPQ1rvnx9164Sk+ozfu7dfoq36VlqvBRJuKq9prgWGTi9oQEwG0CuPal6i/9eX7X7z3Xzw3bj5R7V19bqzrXCdB1VGNA3AZjx89OCFwN6dtsvL0rKucG+007Zhm++50ePTG3/kvwpPH1w4PS07lLKubPTo+P43d3Vl1Npr9nn/138Z7x9c+WD7/Iz+1XPzYzt60XfxnXYU/+5lfna/mu+O9RVWPr90IL7+8/+KdTz33AuxOl3Hzhbff/MyP/ur52+8FLS/s7B4LNrujLqX1usfajcfVZt0bgHO85Rms+hhX1ir6nfba1f3lcj1qQ4WI6zhCWmtWi7uzdkiw7NaHk1FadyZ67eCwXy9n4xFW/HS5VMcu1IiaUn9rd7S8uLi8GJpq9MHrN4+e3mOAndGI2RFkNemLtru3Li8em0NNxRM1GZ8cXz5/5Qb03aidvHKLYr85uuzm8+VJ5dtm5LSiAaQHzXJnv96lg3ePFs7pcrm6e/XOpGqGy8Wt3Z3LbgNZrZc+iWMwh32X2mu7m02qfFPUPELJJdWNY9gbhX7dHx2vLi47Moyga0kpRmVfjWqIxTlfVzxu/eXZaUN2db/JJd17fNkLZLHYpaHkpRR1vF4PQ85ZxSgc7Ox7LBeP3v/ujz6nxOdLt8oyPz7zdbUm3YGqRtzZ28spF1RhqKsKJZ1tokWca5yOQgX55s2Dt75+dtbFunHTOhhAHfCVG1colkfni0fLfi39ZjNcHG+8AiDtHuyY2GRUAUJDfOPF61kNYulivvfw+NGTpwd7e7P90TsPjxBNFfZ3rlnqu6RDnxXgfLU+X69BSvONiOk3CmgiAjGLFi2BXZLiAtfoUCgWE0RUzpIUoa0degCjKClU6AtscsrEy+VQE1yb1a6qQMRs8A0uFpvZeK/y4Xw+9NiD2TS0IrjS7aajTA92FquNCprkZuTXm77vra3H8IwnYrjlxDGwZBXz3lGw95+e/oW/+l996MW7P/C7vvXRycWvf+FLOzO3d/XGg9cefumXv/DF3/itl1966RPf88/JR+nf/9s/9X//M//KBz/xf3n1Nz47XGm+fLT+cDu+eOH26Rvvj8dhf3r4/pOj73zu7muvvXP7q9XNh+/r+3Kj2T99Y/EzP/GLz//hP/7SJX7+Z37u7vTG4194P1/+P0cH37RrT27e9t/+f/7B1/7U3/loIN6I9MDcuiCb9eXJa7918Id+6J3ukftbP/tdv+/7Fl/8HNC4tXXvfF2HBD5Ma1hlgACv3P6F/+kv/VN/5QePP/qR4eFB++69zX/z49Xetf7hSbNo6R6TH8VRzAXyHpz/M7/38p/6g2+98PLt/ZfO/9RfvI7NbiqLn/yFK7kGGGCX4Nqtk3d+W0t4O7Xf/T//ncsbz42agzcfPP7eP/kvPH7n9b6niQ9UT1vP/Wl5f90NvXaUm1F1sVgu1hkVgSwBPj1b0KSWUTtPAOu8N5mGUH3l4VFvblTH8+WaKtcibpUCJRU/ctWs6QcZ+erhozk6nst6gsAaHGPKqe/6HNU3btPF2f6eLJZTgrTpPdF8UYJO90eT1Xq1LO7KbG+eYolpXIebV0d3Xrg+nC+FiHZmB7PJo8fzLsZhPWxiRwR9l2NKA9qgeXjnwSRU80U/DMO6pYK6u1/dGtejOWPXr4Z+FdPZOjbMitR1uSJqPWctGNyij6tN9+j9s+0qGBYFPPkgRgVRDMGKOWMz0wQpYolQTJGBzZAJ2QBZGU2pKKAaIZJRLuA8oUHJCgZFimPvvOs1EkI2YzU1RTM0sQyQFdQJginkBCoICqSkIIEpG0KRgPgNpBICgkNWRHJmZmQWAgGAQ2SlXIwVkFjUclEE8w4YEZjYY6jQkTmHCMBopfAgOHbj0DQ8mjoWETElR76LqZTivTORkl1ThfUmjZpR0zYldqLdyfKi5FUzGoGZk6Qxog7eM5CltHn48K1b+/V+cwWTmq9OF+vz+ftFuttXX2j9eKuZJ2Yzo2eaeTMwdmE03b+pcnT+3pX9G5Ubo6qqgQITqwk7llxYaX52FJwDTyBlefaYdE0lJrWiwo6QwIESCm6bYczeqIA5JGA22NrlWKQggBkBqIIJqABsvfFEpAqIUDncjhhEiyqAgUcUwCzcxTIdOXZFUgF15FhVniWkzLSoMYBBFsUthMAAv5EeB7Ohy7DbeKamxg0lQGRCREtFCbd6azDb7k4BAA0NgWIRQzKEypHbitcMiLiYDklFDVAqpmxQDNL2e6oqKDMbIiGqmol6ZuTtaIz1WZ0RAKAJOK01UF+w20QOGFoXUjLkQkgApgjzLMGkcqFpq5Q3i5Pj3Md6ZL/yaz9/dPwEQbrIOcJIy6QOuYhn5KpyFmLXUe09IhbzgK52o4o95ovlAFq4dr6qL8/nu7NmvUoMEKUoGniLKqoq2ZjJO0cEjpAQ0aAKPmbpTVISMdNizOiYa4a9UevIMfGmxVXOJoqIpZQtrktUyQSIBXG1jtd2RqNRm5JQUB9ISkEBjfLk6ePNors8O99cLtMmVRTu3H7x6mGO2br1Rb+ZezIFTpBDHRaXG0UCtbZxi3VPTAJYVLzzatT3vfPu+bsv3HrlE2mTzx8fL+bry3WvTKGpHGK/XA05T3b2xqXt0hB8ULGSkkki4JIGlCbmzbKP4NgzTMfuyrjmUuq2HaNbaDnZJGYiwpiyczRGf3B4Y7G6zL6ouNN5H2Pem7TOUSfA7JpAQ1+WqxMEI4VpXQHCxWoxbevQhPmqY0Z22HqH4yr1cNknRjTUtg0NuuVG6pq73hbLuCj9eBSo4dPVxltBQyQkxLSOPpRm4g5bujbhDsPKzTgt7AJDV5WipQCTAwPnGA2dUXAUqiqX3HhXhwALs6640hrmvmgza0VyjtJOw2y3uTLbvTw+fvONe3vj3bjqQLwVy1Er787ny3xvMdqtP/TdHxiclp3JSocYezA3c1V/vs5n64oCh+piud6pxzMHr7/5GABIjAHR0BUDQDZEoKyCZIRAuO2vYt+V5dFci8w3m4IA7MlTAa0qNpBYCsdUtyNSEJDVZpG6AgjYOI9GBF0/FEmKIzNTQUCCQpoUjcgMFESBinWdNjNANXROijlGJq7YXPBcad+X3A+eQoqpcl5BKwRD8w7NYRIbTGpAR65oWXcl+LYf+l6tILTssJQAhKLamyeXi6EIMhHaGJ1WmGXoRXMqjKyERERs4wlPHa9XCQbrN5mYzITFiBEAq8aLinc8ndaaEgCZg/V8nseVoQflGHlivhJ2OaFgPd5Ni+piWJ3PLyrFtpoySikZiJFrcCGMJtuTjCM2k1ySgSAYk/O+ouCRPXENYIyohACiOSMjEiYAaUaf+id/sLJ4/vj01S/89g/+idmV2x8mbtg74y3Tz1yFyqWdhdjnJHB45+XHp4/7+0ff+wf/yT/8J374C1/5rX/3v/zLqWu6mLsuj2fNbHfU9X0zrpgxrdSXYBkBeX4c9w+bAlpM6sqFpkIHfSeX8zUrVY45hGxSYmRXJUEFx3X7dP62bw+u3x1JuhiPeH9nsh6GlNL01m636fI6h2k4my+Cx/l67VyYtjOQJMVqIvYHJhA32QDON+vL7i3vefdw35EOsgFJlsSL7vjqXBMTm0LlfImiCq4Jlsyr92CeNGphLcz0D88FDreNgaLkFJ2YY/B11frgEQqqxSGmmLTEmirJuRqNyIjUqmrsxaUkYOTIIYBjkhL71WoYTxNX48Mbo/2DTO+X83OfElmhRMCUhEQD+LBMMBqFaRO8uVndnq5Xh+2Md8fzxWLa7Fi5BEdNxdcOrj7ddKyqZsPi8slrX7j+sU/uElyen1++/npZDVYicZ4eVGpwMcwL6qBhQgyGgholsXlR2uTBIwZgB4TMWZSdL0VqAmSr2RFiQds6B5yvBgd9ElWpHGXRpCa4JQkIeQFTRqrYIyAAqeUipWlqzJyGngm2LlhGR7wV7ppDduwKIBrhNvKq6r3f4uS2VgvbXjqpmvNqRkDO19tEsSHloow4xMik681ycXzygRfvrh8/lWE4efDuMMTZbu2GPPETH6r92e2vrd9f+Jk2TZo9t065vn73+eefA1j85v2vjF0+uHX7YlifxvXF5WI68jF3aZPGo2a+Wr32+msffP7WwWh02g/LTQreMQKpNYgEVszKs9ocFFFCDGYFoCgkUQDrizESK7TOQSkODEpCUOcoZ0QrFQlqEgEBUhREckyM7NFtIZ5oJmp9loo5ivq6EgPIWVVAcsUWah6iDYTFeZ6FpqmLpXpE+7MQV8ejndHJ/UspzunQd91o1Kz6wTPuhzEs0lDBo/dWu83udhXEVILVAeDm7qSuq7PLOSUQ1cFEMyFKTimQL4aTJgSrumU6i/FvPf7KtRt7H9rZedFgd8Ouxg7j8aBlVFdN9WQYKiv8/F4M1fx005RCscT9289de65HEfZO0i/+xb/4Ay//h7OD0fjwFlwc37zx8gc+/V2v/zd//VG/uTwfLnphXL32W7+RRcH5vdt3b37Xp2PVHnzw5eXl/I1H7/3SL/zk8eP36zBqM7z8/A3Uad+vnvaLzUaXaUWeejXrU61QktYN7Yzra4ezo/MFNx6ZjaDkvOgiCqYCD+br3VH9rc9fv7FYP7g8XSQx9rOd8WadK8bJqL0yHa06qMbTN++9u0zD9WvXDg4m/Vl/dW831uWXXn1tr53tOTg+fn+56qbj8SrL1Lu1ZFJWCxcpD0gaS02kahXBpt+I5NGk7ZYrpNGVq1Pwi3ORktPR46fXn3sujHcWpZufx/MnayQbgYSCqw4vY3m6unjpYNx18WzZ+7oaVxVxPjo9v3147crhPiXcLBNx7X0uZNoG26l3p9Np5VDL6UXfTCZdt2o9E2hbMwSeDxtBMCRy7ny+IiSnBoxvPj6C0EqWunXBw+l8c3B48PZ7p92QJ4FmDV99YXL8YO7d+IMvPrcpeu/oLNQN9P31mzvnm6Hrh6PT5DbFQo9q3/rB55526fV3nkgngwBr2BnPbLNMYA8fnvcpjScNop3G4oOvCQ1zQQR2VahGlVMtkmJWQu9Plkvp8+UpV0zEnHIaJO827dHRoh63k3qyWm4udEUkwC6LXZ6sfJZG5GA6FUgBKXGVB4sx/iOjInLEPsimC1VFSLWvkpZ1vzS1IoZC6LFxjEw1+VLyZbdCB7uT1gZYLNfFcx3IUNapa93Iop6v1pOAk7rt+yRK5PgyDm1bV4rsAVISgFT6yu9I14O4rs/a9YiQc17hpk/R1EBVEQmQmQHBTEUEEergVPXr7947vjj94X/uh28N67fffOveyau7V69Nrs++fv/JV+8//B9+6h945+7e3flz/+af/kM/+MPf8j2///OLkyv/yp/65cv56Xe8fOf7vj0/eeetL3/t+rcevPQdd5ruaXr6zscnzauP5zfv7h4M8a2/9hf7j378Z3/q7W/94AcP/6WPP/3Z+2/8yk/98F//j375L/0bH/6rf/21uzc/8/u+vfqt+y9vhAF4FPoWDm/c/p0vvVHPnu9fuGs/+sv6hSft1T0Io8cnj1/5ge/88o/b7QGbyrWWZ1jATr9n50b/n/2ND92ZvfP3fmL+tIRHF10fTnbh6rftNd988N6Xfu25H/pd9//mb6TbB8/9uX/nF99+9NFwozzSOwDgGZ8/mH37t63feHVcw5ePvx6+/dbZ7/30p//An5mUq+u90d/4e3/zv/uRv3H6cLHngEPtySs7RTpbdVXjJDllcOPJZNYeH80dqlOogl9uEtd0MGuy5afDuiaU1fDO/LIryQVuyWV26AKiWZZgGgJNRo6hjD0OcdBipuVgt04lFnWmAKgakwmaupXo024Izi3OFx4oAA+LOB2Ndpu636z3RqO43liWkaPb+7O9/YPzTs/nctr1jAwPNiLCAFwjeLx289p77x6RGAdk41VKvqo1WCrW1H4R5XKxvrY7qkRu15XUQVlWks1kPh96BnZ+GctGDdmA0TmHbND1ANCvQQ0TgzmoaucqJs0kKr1pwhItFQQHtI32mBhxYUB2IEbf0IsVATPMigwYEWrvRlXIpfRaCgo78EggJlFNWZOlgs7ICoCKIW3frAQkSYxo3W9Va8YBgFJTO3DoDY24gCpJMXPEZiDGpiaKIZAzFUMG5qIAhFtuL5ICMiGaMfIz2DLopK7H9Ziq0I5GlUMaEqM6dnlThiKDKz1AJzoUbZqdBtjhWHgS+8ViddkPF7p0cbOMRTZxUAQlBTMehtPFUeFPLVd49PjV03gcAQ7G+x/+wEfHzchUQYyQTAERgbZ8iS3xWUkrnh4Om/Vi/nh/csNcpWAqtt1Oian3nErOQ5/myxB2cpbzJ0dDvyEwAxAjNNj+ORB0yw9CUAEgBAKFDKZoWzaUEpoZCCGIChLTtpemAoTBUwiuMgMCItICfRJQ2lbADDiZJRXnUYoVhGJGQKZQAEtWQmRUQrCtOwxARdkRmDKxCWoGzYhotcMq0FDAzBCV0WCbpELaErIRyMBUAEHNUAzQkajmIluAEnkWgSSoBrWnwG6T8jKX3rYJcEQjMdhSmACg8mymROyYRMXU9BvWGwSb1vX8bJG6YTyejac7K151aVU3aFZKkWyQuq5RObixB4qnT560db+3O/3x/+3HPvOFL0cF59BK8YFnOyNSAw5V4I2aquzNxswKSI6QgHJJq3U/xibn1LRh2W/6TZKix+cDGgFa1pQlO4cK4JgQIBl4ACL0TKCmxQxhnVKnIqaBCRmawKhQEaNY8Lw3GZ+XWBIiUEqSUybvtjl+URO14Im9E1DfVD44MTWJVRWy6IP3Xv+5X/pfvvUTnwbCajzZb8J4Ojo6OV2tVuu+v1jMU9yglk3pmf1qHY2DIsRoXor3/ubhQTQ9Pb0AgFChRJ1Oxi/cvdPNz44ePeyGs0GTmk1qN5k0fSeDlBJj6bqRd1FdO92fu1UpJmLFlEwenJ0oqiOYjKssIFJiynujZmccQhvKsqMum5iopiyCPBnVWpIj7vve+SqLAbs+W8olgltveovZuX/4n2mOuI+R2UeAUoprXRUoeO5X3WoTJZNHBjFyYX5RLGkxcN60SCnmCIvYMORJaHdG7ZPTI48VEavgclHE6QvcfO31x/VzLRzUVbV7+Uuvjd0ESJCsqhgRCcrBbNoNK4fWNCiFyNHQ9/W4mu6O5xcbrsh5jV3nzZq2IUTdRD/StcJlGXbvHoKzCtk7KAqIoIUh2/A4vvqTX6sm1c3v+na/B0OTL6VbOio7dXQ7pSCJguNSCodneQpJgkYqKibeOSbcQsSSaBFFxeKpZ+w3w2adSirAzjlStdqhACCqmAJg7NNoPFERFfUCiIE9GWGOUQqoMkIoSZrGm4EYqYJkYzAtwuwwECMYgxHmlMFAAbNBHcI0NEDICgR53heRhKbBjJgAdDJtB4jqEAEdUSIw0AAAAqBlyGWDgK6qqkqH2DhXYtaYCyozgRiggsOq9hNlcHy22iRkMYhFzADJRkC6KfOjVSm+rhpDICurVeeISi6mPBlPVkPKJgoUi7Vg56sVP9Gd3Z1QTerpCMl3QyypBytKvQRcLvrlRg6a5srB7vVrL+7s7zfTaahHbdt67z27rUJTlYyZHDpgIgqhBkJyFQFtCXHbIzS7oKBiUMxUMAhcPbh+BunP/ht/+sH58Ve/9uXbL3xgOjtEppLFzIoWy8Kk3fysXywdeVqLd/jrn/v5TdX/ztvH1ewwptK2lWefUl6sel/x3k5LhIunGyveipW+VCMtNihAFXhnFJYbUYGhy56ZiLyvBHKfom18MrhyuKuLPK7Di1eu+End+A6NY8F+Zc3YXbl5/WyxMsgxxuOTiFlXGnf2Zu3OblRRZy5UAKhsiDlJyiY+bBmC4F2/Mx6t18sCVLbddxHNErOIQjL0ao1vdMhJCwRfUsw4NHUQka7fPDsXwPbjVucYTAOS57qqRozAEksZyIc+FlUN7GLMNAwNiGUl9k1wl8crK4RG47ZmkLp1826ek/Wreqep12utJ5Px9OD06IJK2R+FcRNU7GIdq6ZCZyN2I+btcHFzudhWr6jINLQ2FG9gYDlK49pAbj2kqnLecTp6EsNXLuer08ePutWG2LGjbGKuSgpFcL7S9SqF3NSMgRGAwcyLFVViV1TVspht33aesGEHWYhKG0KXU2AmRyaaASoOWnJgApSYxJCQDAGKZERLfUIzUFQVMMNShtWQletKqwDo0BGLAjkEIDTdts6dc1toJYiUkh07RkB8FqwGNM+eHAEYM5upwvZ2hMEAEYpmRRxief/+Q8Z86+7Vs9Rtlvnk6BQJuvmiFK1FkP16cZqSXcj0H/tD/+z7996eXqlGk718fPT6F3+jyTTeb268cOVpad54PB/vTss6wmpQFSuFkRbz9Ze/ft8VSFEQnANoHW0hjWYm9o08kaEjaBDFqAPcmPYi6NijZoAqBCZmMymyWfVNaJrQWOwDaBvUCjCgZg3sE0DFgQG0iCcHAClLJASFTMmYUDUwRM3eewJrGlcHjrkMAFHUVVXqo8cyaXHSShlW50fxvffOFIKVpm5H664zsGLqHUM3nDyM/cr4G4fk0/MOTSUVrphoOa3r/enuYn2aka7uXnnv6P2qropAb5z6nHNqg0eiwxvX4jh8bYhfz/Ef++ZPfPTO5MGvfm2wAk3dixy2YTMvjSnlYXrgTt45vvmhT/6xf/svW2r+63/vj8d+PvG1cPfFv/affPhTnzx87qOf+dWffu2tr5yczzfLoT7crW7vPD3elLb+9Ke+NZT86pe+2rfjn/jbPxGhDKkrfV83fn8yuT1uR1Xzyjc99wuf/c3Hj87RuYxQCoxDGI/bLi0rzwClbtyVnTEiPD45W/apqnw7aefHi+OzjUdtq9rMDYYnvfzqm+8HkrtXD9rIX3nw7v547Mj2ZrOi9NqjE0ODi8X+zqgeKrnsujJvg1Om156csh/d2TucYHn35DRUTU9SwE7WXSgyZh8QjxdPZ9ywmQNqgueQN6vNyZN3d27fnLZhyIoQpuPdNNrk1J8sFjv9MJ3t+qqZNEG9F7Kr7Wx9ubl969rFvXersbt7984Xv/jFu1dvwGT61ptvZODdqzezlPefPrmxs4t1+87T84MdvhLqq7O9buhv3W5snRf9ULI1rWt9dXM66dc674Z1EYXSVHzr6v7J2bkjHO80J6fLzZAU3UYyOis5baKUGLv1pnHMTbU3aTarVTxf+5JB8VKdRtioXwxybXfn2nS3X5+EQVM2BfKCe7PZ60/OFzE303p3t73/4CJucheHD905vP/ag6oded+shnz9yviFq7Ojx6c7iPPLVVONXnnhJj88eXQ8N7Yq8GRcr1PpU19XHovkHEd1s1oPq1IeP5oDUDCpnbx48+Ybb9/z3ne5uFDVzbRgyjGerZednH/8lRe+9u4ZO2hH/vTy/29UdHWvHaIFqLpcEHRcheNNBiBRdVQBUEFoaxeHbrVKApRKNKJH8/kEwu39K6eLy4Px9P3TS6xG2RKV0s4m7M2yFpNstkoRPCU0dUqgUoHiFtUpYiJGfdSYclUTAZWcEYAIHfuSn+HiBKwJXs2GmFERwID4eL35C//lX/vHf+/v+v5/6Z/96R/7yXvvPpg2lQthsxlSzirD2+903ujHfvRv/e3/4X/8+Ld+8p//Z//gzsdeuvvKx9YP3pKXpz/w7/yrf/Hf+g/hzd958RV388YLP/eTX3/pA7cwdtcODvO7x/yLb92x5uz10+6//epVmXxn2befub/zpv/1H/436j/5Q3/kP/2vH/xrf2H9s7+yHypUJ0vULr0A7YP/6q/Vf/Zfm3z7p/Pr8+rmNB49Pphz+Qe/cM0fYoc7e1dsebY4v5itqynvhh978/z+veeLZsDdnec6hmZcDQ8v331wMrv1IrxTXRvd3A345Y9+6uaN9hP/1/94/FgVKhgZXEmvfe4nmj/6qXt7Bx/6U//uq/cudl44+PGv//Zf/Y//jCrH+/Mr4xm0GrOVbCKWVZgtE0GCbJKxHJ1dpm4zbkO3yf1QmD0YFaX5UFSlpDTabfdvTt57sNEq7EzCXqiGFAeUG4d7j49ONMOVyaia+uPz5W41uVgMRqYIs3FNpuPx5NHxSWirph7FZRm14wiDRJtNg+Y4kM2Xw+WiP9i70m/iC8/deuvoNATc9IsPfuDlmwcH7zx+slmn2EsOvuTSDeXZkzpKb/lo0TeADTvyvma47PuLfhE3pfFe1kPjdUT83v2z64fjvfHo4nLD4DXlqvbPHbZsdrZYH16ddF1MYusksSvDszImWCFRloxGQSJwAHYJFSCaZC4CSYs6JFICdEDMXADUSnDoPXtDD2RIW1MnEbkQnPeKZmAxJiFAIgItYKCUB4wDFEUB06LbWhIQqCqBeselWA9IjtgpeGvqIBVsjR2AyETOMQuUgmY+FQVRZxgq8hxU1MxScWhAqqbA5FTIgKyoCKpmM/OoU+8Dt+zHwI4qH9eCBkmgmBXTlEUULOcupX2Dqe6q6njv+RFmbKZVPFtdHK1Xy03J22mZiqIk31Lun7z29m9RClDmh7euPvf8S4Ga2rlSoudgWKlu6+oIQNtpEW5RP0jONTtXbl5ePFls5s14B9kDqJasInnTWyyeadVfPn3//o3djw5DP93baUfNYrk2ITXLYowWHBCSmQKqGBUAAmBGRqe9IJEU9UjAZCai6ogMqag6g9p7RGLGUWA1Ww6DAZpxMWFHYkpEDqEYxKRti+I0RSH0qihixFCKBo8+eFFhRgDDbQcQUJLRNiOUoRt00gYtWo98N5gYIBJ7lKKApEbbcCU8o1sCGm4raWSghoOpI2LEHIsZFMMtVUHJosggBoRkgAaGCAi4FbuZqVlREFHM6ggM7B8KknfGYzEXQReD1JVFKdGSsSkoAIoZGThTVEsDINLeDrPT197+rd/4wm/0yRfFHJWR2NmWT1TXDZNpH9UMjUwNwZyrVqmzokMqN2c75LML7vzkVAqlYlnK1b39blgjmqqZARM9i/ER1rUvqVjKdRXYw1rLSnMSA7AC1jDXzEbgna/rxju3GZIVBZXKsQiyr7Z/RAyEimKAhFVFUMrmYrM7M9MiQphyzDkKv/D8d9y68/J6M3dn7cnTs9Inds4xEIlido2LXexi9IxELABMru8SBFXQzWbtvTvcnRTJl+vh+u2Xv+lj3xHSMCxPNpcXj07PEgJWPiIGKVxRbaSs3oaS1FCzeaBmPKFu06kmCDzEyOQqYsNqsJzFsCueZLWY101d+brysRgWVfOUkXoQHdal2Gg8NjCh3JeiRKPZOK1SStFyBkNmLEVEVAuG2m9RhWLQtv7qzvj48hyc6wfwAJPxJPYDoUtDkSToaLNKo3Fw7NFgtewRgcQWNtTNtHLVerFBtqzufKVvPZh/6Luvry0+vX+0OT3dfWG6flgyQxYlRO+xaaujbkEAleeL+cp5cOD8yGMw81gmVcJ05crB2cU8dzmpuQSrTT/E84OD6WffeP0P/b7xxz7+3Ge/cORrXnUJTE0RCjA6Ej+c5Hd+9suzl67tfcud63evbrpVW1mYeUiYJWfZNKNwfTT6+t97GwByKmaopkggUkQQyIlpVmUCICi1o/1mvriMSRzViIoE2UpJYihEOERBT01d9UiCOl8kGJynKg+xWE6DlgzOEzD6ujLIwFoGYUBCRgBwRjUE72om5mcpQ2PIRQSIzSZVnYo6xqptvXG/WI4rhyWlmDj4NcXpyCs5UKiCy2Z9KcE7j5yjsfMN+ayyikPjHIe6Ck3uBjFrK6dC65jSIBpo6IZi1jjPzhcztQxAYHhxloda2cbBtZO6ucyXQ59MMBcrxWK2quRsMgw5xVLVQQD6OCxzVRdUK3401ho7GqBJ/dCdXJ48XS1Pu75hDmox1GHvVjUdceWZAiCpqLLh1tuI6DiwcwiGAIDsiEzMSBWQzHgLf9lWghRbHIW+f+Nzv/Zbv/Cz6+VGNscvfeRb/bU74+sHMmQCR4RglGPOZTg7PT568NaTe/eLmiQxtq+89t6j1WUn6ho3lORDG5No0a7EpvFsdHK6VOFc1Nhcq/vX60I2authMxydrlbrUhQImQjFdBVXFrS51hqia3yYtNxU4KGZuVs3a4nDg/fyk6MyrJBJzxer9fllXm1cdppYxdBVeVlgGn2toa76TQ8GnqTZYfT+6bJrp3XT2pWDOi+6vr+IMfbq1BlZyatUhDKCMpjDtmomzY4DulitujwAYV03PkAuxeiZJpyQUspm4BFzFjBDEzJ1aN1mCWp91GE1hIrBIQKVbJrVyKydqNSrxXEpxsGJikGWEmMpQHx0fLaJsn/7pqV6Z7J/T22HeGevzV1n7PysMSJ2viJfA+Yh9WJX9vbn87lpaipswmSpMhmNY+oZoA7VwXS8istQ1VxXq03/4K0342qDgACqUozITUPYbwuWFHO/HEqqo2bMDN4RMQAgByoEwOywFAHmGAczqHwI7Jj8OqdsgmSg2REFBUVkBPAsW6NoyWpgAIKIput13yeNqQBHLoZQVOXhW28PtHf75WvI1JgBIgEhoJmxY4NC3AbiXCLatq9ecozBBXS+5Ow9eu8QcKs+RTBTdd4piIEBQs4ZVFan8/fffO/tX/uNePTek4dvfOhjHx0dfPgzv/755flRn4bYbXYs1ePZ/ffuu9HB/uHzHVa7L74826mDjmV19t5r70bK/dnwys5efzI8fO8hbxa7Ywqt71fZBT4ctfdPLwdzqCgFW8eGlkxbBsdbdiIUEXpWnLciikDj0GyGPhnkZFXtsmlBTVCyaiyKQF4yihJFx4aWiQyRvfGQISoFBM/oGAjtGf8R0Axjzg7EOwqEEWggH0aNbyof2Gn2qHWKBubR56LLVYcy1JV/9+kaZRQaFIduFNJ6kwQGB03gksvQm1HI33DCMjIAjndGfc45xrn2F8ODF27uO4PL/un+bh1jGQzWSZwpWx6jOajJQ1Fdb8qq17/96udo8m2//wf+ic/+wq+cXi4uLs+kHo8oXB6vw271id/9of/dP/8HJ+HaX/73f/h01U322g3Zvbj54It7X3ty/tb/+JMn538zVdoz9YZS10/O+7GyHNaj2finf+nX6pFvd/a/9vX3UIVK2qvq6cFeZT4lrcx//etvff61r0chqh0iOueGrh9Ndzebzgc63N1drZceaWdUHV+ueoEhghVJZbM3bZy5OzcO7z94HLy98NzBk+NlMlpFWj+6rF34xHN3Auij49VyXhYCxq7rhqbCGnkT415VHew26vEzb7+fAUZUz9fdhcaS8NZsp7R2drTs+7I/m1ben/XrPuLuOPC2ECqySbnP9juPn06r+srBlSSmWXJE4jZKZ2hHR0+q/d0wmcCm6iQeHrTvHy2jxVAuX7o5vVx275+8+4FXPnR+enn//v3BXN+LQQ9luLW/E0ajp6u5jopUIbPrF5vdptqppgG5X5wM8+LNjMIl4GqjQn53b5c5nc8XT5Zz1TSu63U/2Ig3lvpsg+DzV3bbtl2vV+Sqy3V/0XWe+clgUGQm8tyVVnu3TtLFErNlzkrV6fycJb84moXKnUkaBff4eJ2Q1DsOsDzrqPZptRkGezzvJweH1uts2kxG2WnJZ8trrkpRTucxVOXk7Gld03TcnueurSoDh5DAynqTGflgWu0e7rz15DgVA2cA1ve9eXj76EE7HSNz6WI2ef3Jg2/7wK0Vx8uNehkvLlZsidiF+h8toG2W63HbqMdlb+t+WHQxg47ryjX1prdOCpAkRAwUy6YetbVCLIXMR4NB497utMRybTJxgGq2Mw5RuxJNNTfeRSlVcFXlE+lF6nZmTaj8kFIVqsfnSzUzVGYzM0HMpTijKri+FC2FidBMigBYRNq6ipwHMZSobe0D2q/9ym8+vlj+U3/kj3z1M5/77c99TmEgBhVT70VQGE9XC+3zL/3Mj3/2V37qpY997M//S3/m7q2PnB2//6V75y/+gX/8IM/fe+Ot95aLs/H4VqB41q9Ol7PD8fWdnTFXbj1AjuHu7rg7w/GTb/l9n9z/xV9/8P/4S83yYftoxVL1KwMWwiya2vHerY2mr1/s3f5w97kfAT2s9sPharX+W3/3euWiyPztJzuMDJjOLrwO+uDs1sFOjmvDyrfSiKR3j6eXM682vvucfu71XSlwGV6Bg5c3/fG/9qc79gc/cP3tZnz1X/i37u5/AK7uN5vlxbl9/jd/+W//3340blaTpr17+Fz1sr3x8CGyy3FAQvKISIbgm+AAoBgILS6SDbC3u2MujkakWho2Z7q4WJPjvZ36yo6rR1C8Hl4ZyyptegW0bOlodcZOSdE57OPgQnCtSytLguxwvYltRRfL5Xrd77mw17QDzK1sXOkqL6PJOKc4HtdA1HrvAWr2aZPyZphWkw/cuVl5enpyvF72gDyUlITYaLHuEHTcVJs0bJLs+ZbRzs4udnd3EkjtXXAVud5Eluuhk75v/LVpA6KTUTM6qFf9qlglgH1f+thHMO0TZmlBb1xp+mhDoteOLgCgqhxmlaRkYKJZUQEQgAvHKIIISKBiYECIBloKIoMVIGQmBDUENUUwJqocNQEdWlQbSikIQObYkEStACAJQlFUBFA2QwIAY0bnIDASmAkCUwbLAt4jOgSnVDOYKRMQmxGICpiAQkCPVQUUHBIi5pxFiHlLuTFU2YKBFExMtGQpQDCqaG/kBs2lKJr1MZIjVEZ2SM6R9qkImCj1qJvlogxDXY/G0z3WbsKH1nlFd355Bn7lMgqotwIElQeiYb05n7hrn/r27/XODJBdpWV7w5M262WownjUEnncjjFs+9GRmAKax3rc7L769mdfeP4jVdVKBtUszFxXqmV6/eBysRAqEbD31dVv+sjz99/52pe/lKMQb8cZZqLgENDAI4GBmEN2nlWxrnxWM8RQwbQdXa6XMJghiWGFjGBXZlXOJQ3ZKQyaA1uUUhCD92qylc0DoCqakqiRA3YCyqYG29EUMSE4epbjUQAzIUQEEpAsQsRm1vd5XIDBQkB2AgkQkMwATdTI8FnaZ0vDxm1UylSNRACMvMtgxWwLlQBDRuyydDlHM0Cs8FmSyAzUEIm2CHUVVSMFYAQzY0JyzwpoMeeYRIsAN9yMMohCcgEBgBm9IqGNiOKQpqPpjWvXruyP333w7n//E3+3U1ZAQkOEirgFsq6XqjJgA2mIGbnxIYMMkuMwmKoUWQ/Da+89qls/iPadChgiTqqqT11KCVQno+ARPXEVXO4jeI+I4LDyXqQQU8nRwHjLJDN0hCJSOzdpXEVouYgJlhzQGMA5VwicQ0NerbumCt4xOrR+GB1ebadTAeDKMWAphT298k0vXr99a7PaoOB4NEt7OvRd8M2onXQlTSajoV+rg3HlVEwEDNSkODBiQoPFerMzm/arNSGyVAcHL1mm86dH85NH56cnouZaz63fGTUT12RhJXe5PCrIbdMgI3C9idFQas/dRvtcclHw1GuOcakOR6NQBjnebK42Ve3dxWqJpjkqIu6Pqj4XjXmVeoc8GdfFStHiDClJWSlHrdEwOEJdd0Pdhsm4AnLMvFkN125fWayWrPLue8exWKiw5kZS6TexZFXpkcgz18FlTbeem5FUy3m3XvUliVjRDTDjwGJZ0ib5ljnR/c8dhWVkc0/fPsvFKsfDunDtUSENNgy2WvdEntH6obCjqnbTvcmdu1eIw6JLY4bdcfXq59/e9KWt/OHO3tnxaUCug2sdWOSz+5fffOvul379qWuY2JxzUkwGUdUhR1dsWtnefHP845+5fvuqxM1pHAj8cp0VyuTGuBk3K/eNnPX2yc+0RctvH51o5smBh4SlOOpNLhar5aobNZStOOCsUlRFZftwQMuSQDUC8sj5CEaGZlr6ZAqMDsRM1UriGqraCSiBoaiKIIGb+Kb2rWPIUkoiYjQig6EfEKoEmYlBLZWInqu6tr44ImSsPCtATqZOGUE0A6EjzRAl05Ckcp41i5QupbYdd33nmzapSslVRd4FGeKqjylTikly4TrY9v1mUFRQaUg03q9JyZmt0jzFTc2VoeWUa0fe4TBEYeLKk4iJpL4c3NxRxIvl+oXbB5f9gGyxlk7Pjk8fnJ8+cr52tRjoeri8d/+rm8XZlSvX9/ev3j68Hvgqe48YHDtBQEKwsg3KbiG1UpTcFtyn2yKtQLFs5Hjmq9Xp5f/2t//y/tX9P/Fv/9k0dL/+0z/2k3/n7//eP/aBM7+pvYxmYwcoqT+6/9Unj++fHZ9BkTuvvIDAq/l5r/0H+YXLL73hsKiTtm7TkD0BBS9ZSpfPF9lRlawg2Wjkpvtcj9xqnReL3K1LjsrswZTRqZaU+lA7bvHK/iitclkP3sQ5p956wPPOzh+ul8cS19IPwMFmzp0vl2VZgqOAoSComRa5eHp07eruuK1lo0NKoWJEbALe3Bk5UMywOuu0lD6mrEXr0WR6mJYrAKwqxy6fXV420Bp7kbI7vSKFCS8KZIIcEAjI+2cFNGQOTTX0sZgSWYrJzMipJ0eSVWy52jhDZcspgVo9qtgEnK/qajjdbIahmIynVSylIdLBbCBFiSk6wzKpXRUfPzrXNDiHNTpf+155UPCIkypYUclJkdYmaXlGIlYKbfRgd7pcduN2h9REerTN/rh+cL4yg5wLsQ2pFxBmxoII4AL6FlyTY1kth5UmrwAKVBRYFUwBja14QBMTNULMpXggx1gTipQo1on2ogQQGhdLjqaEHqSQc6A69AMRkKGoGqJnHEqMJQ2pVC0CaUm9dJfnR+987s34B/Z+SEpfVQErZFcRoJkF5wmMtgwiDoymqiJGVIAQCAnNIKeUDGrnAvNWm4rbl7tYURWTTDLAxZPXfulXH37pC2N3eePKWJdPTwf9wAvPf/no3RHo1f1ac2pYQ1UBmKRzmz9auVAfeifpzTe/hlX2Tuv26v71O7q8f3GxPHAA4BwwABWQZVxd228uOllFMwURyGajhvbHrqilaKzg2G13mFQQTJMUKMNO5QbVgiamSWzVx630Zd5nR8xMfVcAdItbQi3Oo0gR4WLsAHPOjpGYBbRIFgMg8ugc+5xAIa1Lgdr7nTE1kyRRNAkqmpoZO7pY2MPjxa2r7fMvzR4/eXSwc5C5HlLZbysGrB0Wyoc707Oz3uwZxv/ZKkCKMfXD0LTBEQFQU/n1JqqhFsNAOzvjNO9iLCoFHa6zVqPp08unQcwBX51UfbG/98Uv/9yXPv/Hv+fT3zr56PLhu7Myvnx48bt/6Hvbj3wwN7Pf/KXf+Pznfvb89AKq+t7xckhWVwBlrd72WpebkXoiwLKJvg6zqu6krPok6fLadLSzG0pDlcreXj0e7z96cBJqt7joHpytRxV1ppvOHEMVnORiKtf3ZpLTtK2Wq269WTmkGzduPnz6uEulqurGOdA8IrrRuiK4ubwIjrWkA6Y18eWyr8ZVXeHp4+WYaObdc1cPFl3uFssehGut2vD09PzabFJjGbfw9qNjUJmOApXSd5uq8Qf748v1Yr0qoBRG1TJGV1IX47QdeQD2biNJ+iigBqYA5+vFwf5+TayGICW4Kuzudf1qGLrNcrW7f22znj49e0yWWw6T3b2cuzo4LuCsWfTp8cWZ5FRSqauq8ZJZPvaJ5379C19bZnAtQQUAeRRwtjNtGNfdernsRnVbAO/cvrJZr+Jg6BjTRqRUCnmIo+B9QIt5NMLpzA3e7j9ZTcPBct3FriDzfLXyjEnydKeqR5Ojy3ngEJerZMhE4KSqXLfq929ehQ175Kx5nYfpbI+a6IGjSOqkaDm8Po5pY0KPzy73nb8+nvbDuh2FANV83gmwMbxw9+aTozMM4DwP8zUk7SXloF1OUrIjJiwfeOH2Ww9Pz5arCoMHcURkaEU3y4R1GI+qWpIHM8273h50KYR65jzVtjetYiqHB+0/MiqqvS9Jl/PNJgIaFbOmcqTgHNQtCMVrV2ZPL1YOUDko0N393fcenxNBgTTv85DrMeHYsUFugm8qtxhkOaRuyNwgJGmB1AzNGh+CD1aymdaeNhcJUgA0z5gFyACIQs3X9ncen54CO82Gz+7RDczINDTMyKvV4Igqxp12fH65fPtrr//MRv/kv/4vzw6v/fLP/4xxGlPoYxqG4jxbAlBh4C7DF3/7Sz/8lX/5o6+88p0f/vinP/W/T/7Dv3P53u/9A9/fr08mn3jnu17YvfjNV++9fV8bfW++OH3w5O7BQelQfueeGyf7/K9988e+vb917XDgs7//2Y9853c9/moejXb94WT59L327k73ZCU+7Y1XD3/ux/emqT9/P3Bl12ZOLFzb6U97WAzSuvE33TnfdG5H6pn0PKSWcpcpa7/ON19+AVqfvnh/+I2vnxydCc3Ho9t2fa++88HdW9+8973fBR/88Muz/XlMDx99+X/9q//Ra189qlcp7EyvXLbLYhM3zsu40F7ZqHLWuygaABgUCFDNFDQJeUdqahSCr2rXl5WB3dmfXSyyFSzF1Mrezu7Dp+dOCi5TiGWdI3rw5JvQOsx9Kecx1iPnnAkWITGyyXjUxW60M+VRzcvgwKWuIyyscNi2694encRg2I78KnXgwAeYHEznsW+bsLc7JrX1ZtN32RyvTI67ITOSGDlFooTqgnfIznFO3c7B1BPlIToXGjcuEDcm4H0xW/Ti/VCNwuPTFXoc+m483XOVP0kXqfSFOVBglHHjZ1XNli3JdhW0LUvUghCHwogILMVLISxFZQvTUSMz/4weowCqViFWBmQqyEMpggBmI+8qZw5ztLhC14MIqiFu1RQCVNByUVVTUEJzgbyD4Nk79FseslkpGM2kqCPMojoYiLIJoRoZVx5ARSGLMqEn3zoX1NBEFbOUlLewL0OgLCZFKSqTU4NURMw8GlginpfYmcR6MiJ2FOphc17SUDusXUOc57FHAwQqVob1YBlZpKkD80QYSinjnYNMq76fp1LUhMGkpL6ne48efPzFqzEOORdsAyU0tTisluvzi/XjD97+CLej7XzCALaiRkAkBSAAGp8cL+dPZk8zXn/uCmoqsJLAQqqVnxzcJXl3WK1cVulMZPzScx9793e+XmQwAAQQQDF0iM4ZojgC58ATZijJETnXZ8vOuZGLXljByJWC2Uo1oabNOOpl4/za55LrGhqCk430BWvwqFZKYUZPqEApEbPWtcVgqc9g7IjEhBnJQdKy/fwRUKRs3SWIAGiMWMxSKpKdq8gHcxWkrCzPevlJCoAZ4jOvGpjZFlJknsEhqqGZiZmaMiITVg7FaJ6UgdS2UAMABEMUMwNTLduqPxgAg4kVQEbkLc0bAACGWBy5nLLzlWPnyEAHZslZIaOIouPLxer29YPv+57vPJzNHt5/8D/95E8PaqmAU1UTZvYOdkYOiZLBat0Twqhq66olDp40geRcQu3Ph36RykaFchKFypNlHY99cC6JeM+q2ng38wxZAiGOWiPui+i2bCiSUiwiJMrEhBi8r5lEtXIcCM2kqsNyHbshRwMRvL4z6+LQDX1o3JW9ETu/nvcO6OpsTEK+nbiqgi0YQhWB2FXNCBVUrbjNsJyvRlVVmumc5o6DZz8IBORBsQiKgoiyg+k4KOmoaUJTCUGo6rzSV178tudvftPF0bvH95+kvMqS6toVg6rI1MrN2fSth2cRHGE9FGxdW7sRsru9q4uzU6wJh9LnnLSQIZqVlEtGJTI1D4gpL6wcrVbo6oqgqVhThj62kyZWwTGuumUd3P541DZ17IfFuktJDYiZFFwzmTaj9s6NqwVSzuXWrXB0Pt/MOxQtCaVg0a1xC8FUUjYDQGOiEksYVYTVk6dnm3WvWbdcZjMjwlISIfqAnilna1x79G5PAiVWbAZq07FHFvW1Z7fqohHs7o9Fk5hMDqbP392r6oajLuYbdfTk7cf350kGiyJU61l/7sg5xH459KbzZF+YH3364x/+3k98+Osnrw1WT3Ynj4/OIpQazdcsre3emVTMu7mllWwuY9VUdd325dKE+6M+0SDwDGttpiLJlJiIGVQMSIlcEhWwAQ2UN9GSQkY+X/YuENO2D1gccwjOMwoYELNjU1A01RJTjDmX7emFEBDRUz2tKEAycQ3mUiSY84FGDitfoghsD7aoxLaFxgAq6rIM+zs7zoCHMmE/JNEcUsTKuSEXRmx82JgyQ1ZxdTur8ej8rMGpMW5MyAoAhaodFCrPp91aciIAWS2v7u/t7086y2ebbIoCDFEUTERLAQMwEmCn3tVVVQYrsWiihBpj0qLkCAjZuWvXdk/mi/UqV+jHk1ER5+tqVE3mm3V9/WDVLU+O57FclM2iYawp5bLwrmZQyfro4WUpizIsdyqajcd12EEAROfYkBDU5RzVBBDFjImeZS0RiFQMUF1DVZH0Sz//I6fvP/70p3/g9rd8stmdpLzYfesr+0+H4eGjTQhrWszP3YP33nToKqruvvit3/TRkYlMru4vF/NYlhufL1+/19+/V/rSD6lRYCJkG1fjYdNv+gjiiL1JqSs/Cgii77137l0jQiDEzITgqyDb0i35esLXb+zNTzZlUYrR5rQfm6MKg4aLJ/n8EXDypcvjaZjt1pJKw60EKwKqGodS2IAcpJA3enRyJuCCd2mTYxpq77yaA11vNhqsGTVKaIRWSlwsci7eEFGLynRUIWPqBqCwkWNUmtbVar2SkkLlhhiLPDsXhCZ4DABpKCWJZkuKrDlHKRYzZCUAh9JWbVdEcgFwsc/NaJJRj4/eH0pmT7mkquKGuYuqEYyxcVXp08XTk51pD916THY4HudB41DEUah8MKmKmOoabSC8IBih7k/atFHxVSc51G4oCyFJQz+pmhEbe0pIbajI15t53Gj2BmBaBWqnwc+w8mmxXlcCwyo7rEVtyCIRx5WzlAWLI4+ODGEz9MDsCCUnDAEI+yEXRgB0gIbgmMRA2GWClJVMXHDOjEoJRMVQimw2fc45paigKKJFT95/ev7waCzN2eOHLsx4vjzc5RaZQIBYpTCxiSEaIgQXiqpzIVSuiCAyk5NckHC9Xtd1XVdNVVWqpRQFgH69cUy59OvLJ29+4ctnb7/x3I3Jk9NHZnU377rujXEYX6XNarAqtKGtun7lQiEo++Ds3bdGN5+raHdYzOfHT5zzDx5dfsvv+fD5Wf+1z3yxjv1yUwyqvdkoDAlJRfPh/tVld1mxZFOHIADttH3hlVvvPznPF0vtcnBkBEwMgYfBgKzXMq0DmxtylGJCbMx9EUnFGYrBolfv0TMqASHWwYsBmnrjATCJEpkCRMmmnE3JhWyYDUiq2CkRRlUHPXSo3jwrS0eeqgBHZ93xRX+xiovLuDvZ99UVRw8q5+eLtajbCGUoCDpxvlusW4A+Rwaq2/rZIdnZzs29CqgfUuxjykWjplKcZ0K+XG4aBVOgbBV5KdoDHJ0dM5SGGETBjFUQQofVX/+5396r8rXr+3Vw1Tg+ee1XL1777PWbLz1/cHj3U89975/85M//1GfsfIPD8OLLH3jrtd9RZxe1JaGM7nI5bACqKJv1qlT0u77rQ1fa9qOvvPDG11796Z//zDS0zjT2cVQ380V/frnucukFm5ZNjB3d3N05u7jscrlcbQ72pkPUknWVNzeneyeL5VnMkjnlXlJuCK7U00++8OLr94+OVpvVOgWPpyeX08r7nea425z0Qo0/X62v3bruHDYONaZMgCGAQN7E6dX9wvyrbz5oFGvmItKvhsbXo1AdDcPFchNCRSqYyvXxSMHU8zoODaINBUHHdXu2nLfeE9OTJ+e7YXznypVs0ElGUwA/aceXl6cn9x5eo2rcTsZtOwypafy08U/mQ+5t4rkWnQ8XbYOrJNPZRAn6y4sPvHD79OmT+XK9HPxoN/TcT68f2mU6OGgIoqtteqU5DOAqsripLe5fHxVR0dInBUAHrpp4V7OcKhtB4YbklcPdHY9DTEYkEoOjUDH2JXfRSo9AR+vLCmjncNrncjDaffPhMVC9mm+6vFn13Qu3r12ZXj++XF29Oo1DN540x8cdlLDHbnpw5Y0nJ209duCi4qqXKMUg3r5zGDddv47e5xz0LCZMEriZhWKEojar6iUAOj6ctIbSTtxsPbaCDYtkIwAgyoKbPqssR94PfXIhXBytd0a7qzhs4nB1VudVcQz33zn6R0ZF5Kgk63MxDoQ0LOK1aQsW1+sIPlTmynpokMUSI/dxcMijUd3FDGjMMBRxdTXv4rimXUAB6HNKWcFXxWg6qtUUHI1IS9fFbpiym0sJCtlTjkaOvWMsqlEE0VrOGnd22/Umr3I0QFVw3iGa5ITF0Nt0UoMIRUOQ2tc+8Gtv3fub/+3/9Af/mT/KAD/1sz/ddwvvfQiOmWIqhgSAWAyMYslfePONtx+9/9/+Lz867/vRrKYEH/rER3bv7l87b3w1uvZP/J7f/b2//46v9uNT6/oHr7/riJcn9120H/3sa7mCg/F+PL346i9/9sY1XqfoV2dtNT976+LOC7P3TxfvH/3cnb/87/zqV9/6yK1r7avvPvn7/2B+9Pij33RIV/bfe/XrH3h+7/6Tr9Gtna6U/WrHDxqsfvPRe3svHBztDo+rp3e+47vX3/HxL3SPf/8f/8/f/+q7h7/v+4G4kH90cvFo+eBH/5M/9+7Z+++/ef9mzQ1XCk17dccHQielz6dnlzenVVE8X3QFjT03yI4gRwFRcIyE4NAFUrVUyvnl6XRWZ4GUYZUGQ6fEBXC806yz1qPxNag2Xbx2cND3nbCM0SRLjMIUln3EgLOA69VQh6rrU85l3FYhMHjWYN5bXOemCiPvQI18/f7FpmU6w65fd1dv3Kgn7VrKoksH+wfrHrtV3N2frWB5Pp8nBEBzgExaIRtCtxmYnCNenJ0f7LTBOTEzZkU8X5/ltJHgx7NmfT4oUkd6KXES6knrK489yGYzOOZrs2nxtCry/M3JZtmvNrJYDuv+WdCUCoCBCBCwqnBREyi5oG2F4ttxJeZnWCEgJCYGwKiqBM4EabsMSaQMyWKiXvPgM6N5loSQVRXQtmIsFUbzziqPVc0Ot8g+BIVthkRJt/sAKWaKOQN6Vzrwjsgj9AUZ2LNn9uQZwXKJaqKSxVDFgCxnLYBGRUyyBGGhLKBFVAGJMBZa9paGorYcTc897yu6InmIvRG1vq0q8wybfmMqhsAeQUpaLxjrIcXVplvGzhrHVuG5OWM1JpSSStG5m7ZPjt7+ipTnn3vu7tW7lZ/Ol5eINtu7fXj1zpgDAKoqE6uBgRIQmCGhIc8vzn795/7umOqjYfHlL/+D2c7tyc4V2Nk9vHs1WJ9XtlnFxw8fVof3Lobw8J334tG7zlch5lQKAiCzGeaCzkNVg3fKSLkU18D0Bq+WA+cgJll1IbEaQ7Rsre5eI5imUauTGZ68nnWhe2FWNIoAqDEgmShYYCI0VQUlBJ+LVhKDhxgN9JnOmk09OlGToohbRhQDIJgB2rPJN0BJ1vc48oScwwhjhyK09a7h1kwAvK2MbZFDjLANOtCWXgDGZgDmGU3Fk5OiWQshA9g2MURICIQosAVlf8OpBqaE8GzoKQKM37hDMzVl59DQAQybFWoBFRUpSOwCERDzJz/8wZBivnj8K7/9uXeOL7vCANvk2pb+hwk0bWIGTEA7o9GobgFcEev7HhGc4eV6tYg9VpRNpQARNs7VjtvACJZjbh1x8A5NS5mEsK3hOWT2uC5qZqqqRRvvAmHJoAZOxTEwKmpJEREomy1TKgAIiAZ9TI65bXxT07ittcBHPn6nZgkyRGu0yDYBRkg+eNHifEi5mBK7SsuARRnIVB2QZw9iIKYCjExEBtJUfhR4f9ZEif0QiwME89598lPf8/xL3/K1V7929vD9uB7qWTOetKtczCxFmadhXE52rlbLnmpp2U88VipKpWctY9S69iWGTUyIQGBApEWCc1osl1I7TsmWyQqFwO7m7uzo5KkPY/JhE0W9Z/JYMTVVEVnFHIeSBIEcINR1453fxGFYDW+89rZR3t2dXXvhhXv3Hw6bQojMzKAmSu4ZAZiZVJ/p+QDtxVfueG/dsivJEFCsjNq6pGJmyDQeN2VIqtrW/vDqwdCvFhcbI/B1sKwvPH/lrbfvZaiSiA8EKDasx5N2Nt55cLz86tEjENUhcQjmQIec87MBaOzyuKqmTdsNse/j/WVfnB8C/S7Kf/iPfpf+zOXJ147nyz5l1QzD/4+q/4q1bsvy+7Axxkwr7HTSl7+bq6tvVXWlZnd1Jps0qVZbJkSTMCRasg3DtmQ9GAYcZMOw/CjApgwDEmT4gQ8iaNKCaLgJkh3EVnWzqzpVrrq3qm6+97tfOnmnlWYYY/jh3KLA143zsHH2mmutOeb///vloqDzW/Mh6xA71ZTHGGpbJF/32zFlEJgtZy7YYRhuVkFBRQOgMqVsEAU0BFtyHqasRCPJ/J7XhrS1SdUZo4RTYiRCYxUwFyECMarW5gLAmnKCIFLYVSSFOBVyEIJFK76yQOJA1VkVIgcWkSwZZisCBQsrWDAGVYoHBNSx78VXdZrm1gdAYvDG9WxSzoXMIEwkkBI4a0IIvhpQUykrfyjFsklTSY5cESmok5QyZimMpaiUmbG7NB749tZ8vo27iKhUbop4KTHKJ3PmxaIK5PqpjPsBhpSjRGZDBiyq6H43HJ8sTEktgtTBMoKCM7S6ffTow6e35jN3cVpA4uYCgxr2q9krd+79DElc90/Pnr3hHR0cnxzdf7FtjqKzuziGdlEJc47OBWZ21rOYkpnwxoCCzMVgAQQQtOhJ4PE7b/zR1//BZ770c7/xm/9z51bRhO2uV4tf+Y2/0r//4Te/+k+nb4af/sJrr33m81/8yl+dNUeaRZAsMBBPhYHrMsKffet7v/f1P+/HmPdd7fy945OnF0+cDWOeiiZwChadR2uqknmMikkRWiILoPOm2o07JcqaYlFL1s/CbBlk0m6bZCBB2Vz1vqaKfHvgt+9f3b516+LppfNIjaLNuU9GxFgrhQGYjIYK28qqZjbZBOJJyOiNmLgoTkPJsYANzhFgZVBUU56mrk+sdoY1OFwtF0O3Y8aS8tXZpkFb1d5VjhIkgXXJcRL6SfVGnIsMYgCN5qn44CfVUiQzQ2ZPBFDunswHjs6oADqyaHw7W677/vLy2kjlrE376H3DMcUpFlZh8RZQxajmNAWnBmE/TnPjC6sLZjGrYRoqArIhchpKYuBYIDG1pvKAOTIDd9PkFFtH8+AzzOeh30YpU5rVVQ+sjkQwVB6MtHNnqzQMOywEPaRrlUSgzIVBdEppjlA1VpRjEQYtiF0pFWGjGMcEBAKCzuQsIhoTWGYktx7jJMSsAdQQFlEQsYgCUBSEpeTMKeZxQksgNDs8mGJXuuHpo/fqB59vsBljklhAsvU1OY+O0BJaYxQULZEQkSqKsECxzhMYADDGCHApk4ICUhyTNegQYup///d+9/0ff3/WG5C83VwdNH7Ko53P2rmrcwmNmUCGwpsxWp6Ovf3SZ37mzUcXp2//UNaXU9qYpr772vGzN9f3X3zl6MXX44SnP/6xNbmPZTNwl6bKUgtUV/Z6u+XMmsWCOgBnCL0roSp1VWRnDakIChACggSCoJRYU4xN8E1wKqoKUypQYEpcEXkCBSZnDYKIGm+9h1IYvBuLsWgyAjlLIDlnIrLWTYpFABTjGLMyS3HBxXGEcdf3u3bmHcrVdh8TP7vqt11UY4cpDXG4Wm8Wq9Xq9lE+u9xshl0uVd2ABXQ0xhhc1VSSCv/LVJEITLuBDRHCraNZO5tv12tlzMyKpfI2p5SZ24oa57LolLQSefGVl58+fjKpTLlIVk9ijGGGVB987e1tAjxu8He+9W1wflG/QdNUreyte7cpQgWlCuGP33zLJVWU4SIDkVIamXeoLx8fvHS0ev1nXlu1TWT8s3ef/ME33l2PDhJ2F3s19MqDI8dciK1hATJoQ+1WwRUStgRKE0fr8eZ+/vLBsQP3/tm5CdXicLG5OHdoMuPHV/HZ135YN6HydLgIi3l7cXV1cND226H2drvP9QIPF8sPLi5feXBy6/ZyM06bKTFqJdLOV+M69gaGjG1wP/+5uz98+1k2EAzuhnFSzWRjLFZ1NXPLk+bJ6Q6NMwYcmjROnFOoZlgALJYpq5qnF1fEEuo2hMCVub7cestJceh29e5ieTS7vZglP267blPSkJilnLSrQhJZ9lg6YmTuN8O9Zv7lL3z+7//2b6OtQoWoksZITvqK9SBcXO2t0MGtdivJCGlyzhlNiKq1r8bIqgAEU+FuKAw09smRrQqExr1/eh6cc85drbvZspk3zbIKV9c7QgozV81t6crlvgO11qST5WyO1bNdd3zgX75zZ4zl48cXV/u0dbaGfDw/CJZygd06oSUpmi3sOB14u3QuRu5ZPrrcBcR55bfDtDqcXV3us/X1wtvtXlRSKqxoyAbjDxfLs+f9+jLObSigDfkIUxeHUIU2NImLCovA3duHI2sG9JZwKJut7jxW4LpxbIz/V0ZFuz51mwmNHRMbI0dH9bI1XbZKdLmO88pVqdxuQrOsLq/H0172mYtCAlquFsNunUSvyogGlu1CDTzqN0PKUynGByUzxSJjOjlaEeVUuCv5wf1lpj5UPmcdcwEFfwOZtRDmVXu7RpSF8b4CQ6VkHDOkDGx0Pp/VddjtekWdYmnR/dpXfuE7H77fQ+xw/Ma333LLr//Sr37+L5T03X/x1d26C94W0ZvNm4ACCCEUAVAtJGx0edBYwmGK7731w+47IzRQORqK/c/+zn+ybOpf+Pwrv/rX/tbLr3zl7suf/ZSfFotV+x/UsD0rev7db34t3F/ev3dfErrurOZdOHvz+ZN4cOe15V/815727rN/5a+7eX203X3rBX35C58dbz34wY+efer/8r9fPZw//qf//PYv/RuL5uAw7P6jv/Hf+e99+dfv/pt/9eHf/lu8Hq6irF+7tztd9/vH/wQ3300//vDvfffNN79hTbYX3cLQwlRhtvipuw8dwdiVnHg/DRRjwlxIBnF9lCEXyeIDsUERTTkbh6JoG2OBMAoRyRBBFOrAIrO2HYuGqt2dDVZRnDm+exi7bWKddqlyvs9i6lkIsl7vbObG10M2BiBYcg7SULiwKvZRXI0Hyt1+d+fwcKH++snl0XKW0yQGm3k9Gx2Tng6lH/WFh4fWmXFKUvx21IvtvoZqvOxPt7vaWxS0wAgwxUSOrA1EZKzzzg+pTxmRZOAc0KiaQSIBSdYpKRhThLskkmgL6ZY1t9olTPvFrMJSKrRX171HrHJ10cd9TlmA5ZNNsowFBaFIyWIQRJhv7H8qrKqKCKiKIMgIIFBbRBAwVAiFIAsDCBJZNAQqzMBYGEAEbGZgsYZVGSExFxIb1FYQgvOIiMYAkYIIiWhmiMqJWeSGW4gIIETABDeUUwMUUCiFCoJTDyCFECAmiXwjnlbmwqVgJhIRABHkG+o0gIpkNKJYsu3PeYZq7RS6S1fXUB1g1YgzozBAOajrF+qjoVTPtptgaJtiNK61UFCz8D4O18PQj1PXRc4IGRFAiSl4gxwCGxuN9102l6fbcbxkKPOD+Woxs5aMsgrgDY5Rb9xcpADCKlr2m50ATvTxNPUZp8tn72yfLjDMcfOlZnkr7vbdfrrevPn42Vvsmt31kLp9sGiM1M5MmQHAkgUUQ+grrCtEdDkjzNSv/FEVDgptx9JvU3DWGouUQg3uEOuThsfYqTQnENdm2CXNGoVKUQSNysYAGFJFZBYFMr5wKgVsAOc1qaqiCjDf/G5QbtA8BpVIlJ2xICiK8InlEaeolZB1WFW4RwYxDECkQGLRFFaEG+PHTUURALWoEqKzKKJGEckiARhyhlD1TuX7VCZWMMigReXGnoKIclOBAUUEUTCEN35UBeCfsIrkJvMguly1zjZnV+shFUkFEEUVBBLR51//zOuf++Kcrx99/P6b778v1hgQBEqZCUBAp8JP1p1HG4JrZ/WqXUxDCd5VdRAooGXKsh36WFjJKIJxxjoHjqxgTqoiuYAPNnEiIkYMoU4l51hAi0G1hRGxrv0IwKporRgtIlm1KDhjBJABADSmcj3lDACIUXibpwZs7QwUnLkGUe/a8KlXT+4/nL31aMycrbVJsrUGAYXFGvTB4mCUCJ3zdXCVNR7rRdiKN1VlYqsIecw9yyAgwgch+MrlzMEZaut2Vj288/rrX/71/fpCy2WW3UV/3YSKDMEgxpoxy/mmIO5ferjcleT8/Kg6ilPnjInDgMRVZfZDt+n2YFRUd1MMPoC3ag0orppZhVhyLlOqKp+krPO4nB/kgrtxAINRmZGsNRBC6sdpiJzZeZ9y9Ab3Y496U1PEzExt9VO/8EsfvPFdgYJWrTU5F1cZBONcNaXpBvOsXARwiNA23vtF7K9LYePDNCYKvpBBgyo8paQDWAFCzFzOzs9KKsRovdt1UYho1rz4qVsfPNvOqqbro4r1zk59urq4SlFBgBSg6OK4XZ3Mn1yec47MTIRk/TZhSVMqeRenwgw1bq8uP3i2e32sf+rhy3/2w6uLruQMhoz1ergIqzvHw7YXwCHk1x+8Nnbd2x88y6DV3FfOX8ehAYv+JzVMYc4ioohGFKIIiXAWYUAkqSpAozEaZAwqiQlJDWYRYxAABcDVxs2Dn9UsNz6OOD8M1cwLg2eRVIxCycUjlbHYxpCxzGoUnSESiX0hRFUcmQsX21rRnMsk7EgdottPAuvBts4yCrMWPVrMDWopbMFnlL5AXRE7wwbJ+EU4Gsp+M2wZxHlPaPI4ZuUpFwGqnB2LkDUbhmkzEvO9o8NPEVwO8bSbumHMLACgBgSAQfCmTAuShtGLKYVB9Oa24p1FkKt1t+kGVc1FydCm39079NM03L53wmN/vXleOFvS2ezB3ZOfXrUPwLWexnZ5S8pG9hfzugpt20ncX60Zm+XyJKXiHAWHSE5KKWkAUesqsoaFFUGKBtuQyLPHH/3om38etxf/9r/7v24P7/lgbhyUiyaASacfnrUtfennH7z+K//G/P4LYb7KYiQCVkiAhiTLNOxTHNw3/pvvfuOH35jPsO+RwJTIz6/OtcAwRWudgvoqzL2b+okAOBcFKoKpUIPGBNimXh2iAQF1AJxKQ81+Hfd5j9oYi4jSD0PRagKo57U5MNs8DO2+OWgXR/W43o37MiY1RrDW2ttbdbPte4ljVbvV0fLs7Nq0Hi0iJOssoMtkCso0KCVRzaGxfZFYoKbaWysAw9j3JetUpokNg7FmSmk2C6UfC8M+QqfsNdt/uU1uQh3CvD56/vysXJ8XRVYFLZoLlWLmVTWvcoXjIIUzGjelfNgemTC7eOdxHNQiI5fFvMKiBspQUrxJY2txJFZ87KaYUrBkHexjbkNwloIjFOMUCkhlbB+jzWLEQCm+FgeGwBHimKJDmM0cx3hnsTpo+z5uLUhAXVWuqfz1dQze10fBzh1R9IaGommtkpoYNWNmKVkwCjKocbnxtqhuYlGiEQTULL1zRJHVgAU0CTMoDow1IQP0nFUtqAKiiFpjrJFUGBBu4qUpRVEWUDAGyFftrdd++tP5jQ8vP/5w8eKtg7bKQIrSdZvaN81ifuLuqhRvrCEPhMYYEHaGRDJPnLIgkPPBWQcAcSguWFehNaKg27Pn7/7we9//nd/2pNXBinVf9vtuvD5e2urWbHd2Kd1+bpGdplQUtKlnOfLzxz+WPM3tPOeFu1y7uwPYgai88PqXPvMrP/v4x48r0t0w5QKciAdZLP2Br+69ePjj956nLIWVVAeQpvW5FDXazJodgQAoiyUcYqytrQ0mUEYcikymLCvX1nVkSWMU0AKQREDEOwwWHLFz5gbDpCqKxMzGAXpLDkHYgBWmIkkBWFjRClFiVQBOExbOk8ldpBCmGDdjzIVvqmqoIrYc3Q++FXB09869y6eX3X6wxoe6RmfUGAANtjEQgTOXT1ZBv4sTcQguiVxc7wGvkIWQjMPjW7ODuu33Q+ONMrtg19spJ2ka8/j04wkgKWrWYCixAOcStVx1gTiotK51q1kUrSrXCVQuXDzfdVP51L3DB0dHw8QlFkGIJbaVv//Srdlhu7h1cuL9cUO7bry+vvhHf/i9J48vHFAwTgwyi0rZbrZf+PSrjx4/N6BKkIoEYyCXKH0R8MEZxXFMWDTY9vbxvR+887YI8JhH3hChwerOcvHo4oKtDUN8Zd6OXX+xEyY83e5yNovlPCF0/XRNkYHeP991Q7l9sqj3/fWmv7VoJ8tqzbOzS19VZ7t0cTnOfBNa+KXP/9Qfvfn+ps+58FHrHh4uGsLH5+sxSU3uZF77WO4fHR4eHGwn9qE62+4OT5rTfTzb9ynL3UN+cHsJgkiQCtfLar0bTx89Kd3BbNU+u957qqYCpq0zkSwX7334qI/5wb1V8k3syq279379137pjUdvDgWwsBJKoZMHh1G4OZmVA50ihclc7bq6sjWFpprFbqhdtb6+XO+HIUkhs88TAikKAodgxFIEvBrG7HDuhITrKpSsQz8q5triweG8L2KwWk/rpvLTwLvNfrVa7nfTmEI7hsUyvPP0jGfVPNgpl9BU7z/fpVharC+mzjn74PbyuZSSgIU/9+qLP3j3o2UdHvc9km0PV97yzJvX7x2+e7bWpJOSiiCYnPPycH68aAyDiPXqDJmJykG7mmwvqkrO2ypK16ciAqFkQbh3//bFs6t54bZpkM2saW1wYBx8fP3fjoqGKamazIqoTW1PloE8X11fq6lC7RRwHHlOpVnQzBiPWdWoparyGWJlwTFEUWscljKkFKy95i6DJjZRKKi3ASOWadfBpDNrpyEF73LJWll/5MYojKZ0eXZYhaWvGt1d7g2Y2mrdlFvHx0+f9VlgyMMXPvug3+H1xaYoAhnw9O6zD37mCw9/9w+/acguGv3h1/7FZx7efWV1NPvST//5t984u+jBGSBQkZvEhwAgoQGMkZu6IYP7/eCsBzDLsPzKz31+GLbbq00zw+vL3cUHj/5v/9H/+TomE4Jg/tQXP/fTD7748Pbr84d3D2Yv/vLn/uK+jGFW//3/8rf+3j/4u//53/vPH7nzr/3jf3b76X/1//07/7HJNtw7/PjH5//u/+4/vDz+lc+9+sLv/tf/6f/p//Uff/qV47tm9fy/+YNH6+eQ4ur4xffONtXwPv0n//fnl/uQ87TeQ+JsuDHkVYwLn64xoKfVgYLhQsxQRDJrMaABE1goAs6Zyr5wcshdBHJHJ0f7qVsu6mGKCZy1WHmDBpR50bT7fck6rWZVKTr1zIiqYo0sZmTVQMBxuy5jZBDHLFlL7U93u+PDpm1CYGzbxbvPLp2RVCSSIoJFgFzmTVUrHIQQ192sNdJhTNon5pS1NtPQv/ry3R999EHTNPPliRq52k2g7Mk9v15X8+XFVZf3qW28pGnqk0Xrgg0IJcWYcu0DiaSxt0aa2hhLFhwJJBmWdSii5MI0JZAys+iCr8Fvd/15N+66cdkGn8mTd1YWC4PGnvXxqhsiqq9cjZ/MTXMukE3OqoKsTBYVkQwpKCmwKN70LuAmI6zEYgBAkR2JMpJRZ6yCgopoGZlAiAABC7CzqiSqWiSLIAo6e0PCRmBNLKUI8yeVIgBl+MRRpgp0o3wHZWZCNECaVRmcJ6MisYixIpQzTxGmIohkUJHgxvEEqiLwCQzREKgigEMpDKlgYUykYRqauUdhN1uJGL99GrmvAiFw46rlrPaI+34fp2m/j8ux3/dX693T9eVH2+unaXsZxy3GbMAlFgo4pVLVQcYuh6vlYnnY3o27dT8m0QLcocDiYG6dBbL6L2E8gDcVP0RCgJO7L5689sr15Qf9eAaVWGOtDjKdnr3ziNXturjrE4t2+6wqQI4KD4Urh0wgQFIAVAxKbUGUCwKY3Jy09QvL2Ry6R7uuTzAxCZB1qnJ4u2YcQhAQJmOIdHmM085eD5kTGnJSEiGyigApM3mLlrAoABMrR7AOvIOcWQoZNKTCzABoVYlASlYARVS96U7dtNJARG+8z2rUWwmBSgFFRYM39llU1KIKYAx+8jMKEiEisoIiOIMEqIgpl1FkEczK2f2k24yxSLkxAKCSwk1kTQVuvog1hCo3nyOR4iebZG+AFdFRbQm1KAggGmeRsIwMCnniF49ux83Fx9Ppb/3Rn++LvemACKszRKrOkqo674Fs7WyjimU6mDVobCqTARCkp9eXUxEErLxhZksw83aK8WC5zJxLQQZk0doaAyqgQ+w90NIb721MidSMKRcUVXXOuuD33ZCFE1LrrArHLIiQchlEEt+UABVAUIoqNb6ZWzsXfPHk1s+/dOczX3w4uf6Dp1vOSQEMGVVVUCIqpRhjfBVuAsboAqAzPqgx1jnng3F2GgriDXZcFZCFUyxlmmYH7axuMJlf+fJfvDq72Dx5L9A0DNsoEocyxLEhSyyLQBmh25fHH51D1ZiQu+HSKhSWllg4KcgY+zFmtSicV7M6Zi2stUXV4oQEIEs2yOOYfW1F8XpMpYjz5mZ0GUjHcbxmVgFLZGt/I9FTECQVICjorFFLdx7cVemePX4qySIgFsECZMCAOJCszMx8gxZjaapAgDLx5ellzuJIvHdFJU/RKhBAZWzl3H4/WmMlCxUQ0YoIhOvgBs3rrjs5rOTsVJILQNt97+bLtjX9OIAyGmMAlgdNzP3F1bhaLX/uS689ev/9Z6dPyUIsHB3tpimrKoJyqSr94Qcf/Ub5tQfzu/FqI8U7AlFA1ZpcPcW+nyYWG5pupx88vYwMDMhZUi6Lw+V+vTXuJ6MiAPCuH6eUCpAqGVEOwY59cYrWWBAs+6RRbGWsQQFo5iExg4o3CAq+sX4RbG3Q2pwTlcqCguEs4AwULgaoFBjH4oFMRaCCCoaVWFDUslhEo5YTGLLIN5xGB8ZqAi3ibZ2TDlRuLQ9kGoG0cZR56ofYACRFsoQise+tLwzVeZ94ikqgDMyfPIxQNAACl1IYCouzijBk3k9pHPvD0MxcDdg9Y76OiZEQ0Ttsgq1rgjRBkto6USBDQIKkiFBSBsDCIlAEEYjqhupgIWXtJ1uHHJMWms+PrDUv3/383aNXSiwpp9WqDq7t54v17plOvcmDOHex39lpWmJuH7wcgk2cQmgKQi5IhAWAbigtary6frN+63vfPL/88Bd/7a+8+MqnwZrCWpTIelAATnm3v3z3+Te/8da949t/+f4drpupsKoikiqkKTKzItsqzA8NVX2YszWJpl3eTixmTegdVW1VVZ6V0zgxg2YB0sWqSpFLBGvEWO7TWFALiyV7OGuGbnKVNcIZDLXzYSxx1x2frIpRTdlLyOe97jNBfOnuatzEfDVM+1IvV9Dw1a4jKatVU0R4S3kSEXjnvYuDg6Wy9CkNcaigIgBjiY0AKRJ1+7FkcsFaM2urqqr8ttu62ubM4xSnnj0Zj2g9SaDbJ0c/fvdjQispM0Id3M0KSGTZhhdefP3w3gvr89/NA3MqwXvnKISqmVdu5pp52KVBslSNM7V3oe133cWz0wKIqKUUKOzF9vs9CDtrBMmQsUZzypb8OORQuf04kfG35rWjYvMYgFhU8lS5ambsoBrAQc7zOhTJ41BWt+7kbphkrKpF6tJM4ci6M4S6CZByQLTW+MOmDsEsq+J1mjKJGTuTJy2CYAQ1OXeD36KiElNGESFwBIJiVSuDqEoAkktBiDEjaOMdAsVSBLU1RjIDQu3tVNgoGoOYFRQMYi4SY8rTyCmq88Z6G/D+S586Pd1szwbtE8aSpG8Paixydf7s9CJWzi9WK0QoUggtKXLO3W6X8zSbtyG0xgZUMhiMIQqgVqdxtJKYp+//yR9960++vj17frxcbbbvv3R40FN13pVuPf3wezuv4Lpp1gQnVAMW8pCJcn706P1Pf/mVq8m+/ku/8cajd4Wng7qio6UuqzKNVZWOVqbrMAkba8aiMZbNfjqcz05m8369JgJryAFURpZQ8vlFXEdIInIDZxCDhAiNtzcgRY6qLJmZRSrvhilNzBbUAgZrEBSErQFryCCUXBQwZzVkVdWAqICqGjQIaICIABBGTgJGtIAIAJMqgJZYpmkaVbucuYgzxgJ5S5L1sHIVVUfHd7ptHqP2UdGwrnf37h+Rc9vNcBH3XRJVTD/x3TSzpq6rNEWOWVGdxQwKpUjSstnNUlMBSdHIeNmPYsB7GOP4wsHhs8vdoq3VUEzcTdPdRTU/bt7+8KIKXliu172KhmCvNoOxDsF0MU2qn3v9S0sauPL1wVxUYpfndTDGiLPnFxO1fPFs9/Wvf//DZ8961uVy1u1Sl6Zq5oPHUqDbDe+8+4zIc0pZtHA+OgwVsPHh5Rdvfeftd+vgPQJY6314fH667ycfnEppyExIhdNlt06mqHXjmM+uu5MmnF5u2uU8mND349nlxpHervzZfu9Ds+dc5XQcajugs3bTlymWqQwCUEqeL8JHp5uS1Cv84L3T1vvbWf/ST79Whs4KPz/ftWRqwjvL5vZyhklXy3ks+ODOaneJcZx6KUyFbblO++uPziQ/CG27j9Ns0UjOs6UvLKPko8XR9Wa2Xq+vp9HOQxymPcuqqoiFphgyNgRf+dxrcbx+50fvEIOvkYIlgEVjvYme4OzqotKZKsjEq7blkR89fR5TTprjbjcmEYd7TVjryXK52/XW0MmtxfV+mpjBUxK5/8rJ+oPrxaLqYwQRRKrr+ul5n4zhi2HWOFGovF/WodvtjdqTpT84Xpx3a1tXXTeKCAv1ALaCOrj78zlccmhDqHSWNRVNQ3xycV5XNkp57d7ho/Pd5a5viezMr0J4qa1PTzfQBOuq1GeHRorsu3G3GerK1a3fdz1ZsxnWUAoBFMkxZWfABtvW9X4/CsLZ5Wa1Cm3fO+fHcQrLOTh3fr39V1JF7awdVCVNtw7D3eN67KaulLatJzHkDEcJZE/Xw0FtZ4KvtE0W2U5pAtqOqglC0Zmzq1lVO7PvlMjWVWvIFWuvu5g176dYmVhXtlFMpVxv+9u3F4e1fnx+0Vb2uHVWYcBsXF5hWplZVTVnu4mIRFw3TrMZSiGffd5sD9t5W0ERo0BTyolHY2zVmO31YKwt4/hP//Hv/Obf/M354d3P/yJ952vfPVv3ztqi5YYNqyIIIKolJba4WM5TyuMYS1Tn3NV298qD2x8+enbdp9qFiw3eWt576aDtrvcCqb7cf3z+tW8/+6/nt6qxH//ufxbu1u1rt042F/oVv/wH/4v/w+W5XK1PP/xjev3ki/2Y6/nBF/+1X37x4MXdB+fffXY125SX9MH4frryHQsfcbC+iesyuuvNk9Oq8vPWB4M1iG8dkrfWllQYSZOAUlYBpCSKRpmAsyYWAigxr2ZVy2a/L+uzy5rtyd0771xfgVVTUWNdW9GDo1m/Tq6y6/Mu7idkeXD3MJUybOLJatF1fRpTDP2to+aFk/nzq/7quqs93L6/PM3XwYaP1yMLXPMUMB8tZyUyKBpRFLje9pWf5Rs/EpMV2k951vrGUATcllKhuoBTzsM0mU1AMrVxJ/O5x3HCeHR/+eGjs+thD2na9llUwWmNgIRKgJaCb2QajTHOUinFWmCWdd/Xvr59+9AIPnl66m0QpikWb90wDtW8NgwZxAbfZSmoC6Fls9qs11klxdLzcD1FW4fIPOQC5ZO4NTOhoIiCIKLJqViLSHgTAkHUG5AOEDKChxv6EygjWxVUe5POQALUImwrQzd6s6QgqgjsWYQNFkFbmEpCKTCOwKygoKyIAKQCfLOZB0SDqKiIwKBqFB0gKwIpALBKAlEtQJE1JSiZVEHAICgaIVIgNYgISMpgqCgrECIaLAhGGSODCCYVQlFEQqqd802b6lXpBlMmdMG6OSoerW6Z0I7Y5OF8uoSzy4+uLj6QuDZ5kt02WM+ECOAAs0pidazAZb9fPz97W2l15/jwzsIYY1lxyvvdfqpmLRlVDwiIiDc8UkQAYIuo5OrD2yDstc1DBzwBRhg0D9R3bl+wWGIGZSaj1mQAMcgEhhDBqIBYS+jVrSw1OdfAVo5fWGhdiwqoSVHKBCYDiAoWDQyOpfCwyWDRK1gieyCL+7Q7z2lXkNQYUrCgkFFQ1SAoqcVSECKTZw1WoxcFhawKBkEQodyE7cFOnI0hASFjbi4cBECFUiQWqDw5p1WtXa8KCCqESAZIRdUIAN4oP34SwULAUhhA0SICqMGEyMaoSj3myppUmFHVIKFR0U/es1ThRrtiiIhAheDGEyWA/3KTzJ5MG/zxctYNe0kZAZRIjSmqjWod/IGB3frsGz/84S6ZIWG68fcxmJt3w6JZdF6beeVfOT5wYKYYQ6DEWUsWW513223JKmRBchKrxjsrfZo7e9LMTrcXoOQBSLUhY1H3RTIgcVks6jENtxfzgem98+t9Uc3sgDbTIAhjEbBoVb2IISqgGSSWAqScpXJWjFXAKeX1fmpmVezSS194/S/+5i9LLaVKD64RLTKL8SRyE/0iax3nRIDWGuUCUoRZiigriVrQKoSJjLNkEt+ImNa7cTmfn5wc9PtOcWro4PzDpxYhoFqRbuhs43ZTNMEsQxj7ToR95YcJtlO5Pasx7etQH1a+pGkR7KPL7r3z6wQUFS2Z1TKkXAhhXrW1Can0fT+QNUSEzmDhUNfqnKnLvs+1D62xlsswjMjMpTAYrKw1bozjyeGtytj9rjfBlDFxLsXK53/u5Y8ef2iMQXRkwVoih4aklNwPAwCBEBgUQBfsLHgA+/CVF37w9g+aeTsOiUWdc6EK09gBWWd9zNLO2rGf0KD3jqVMY7KZqEIBevvRs5ce/tRL95aXT8fgV4uWZr7xgWLeoTHjlCpnsyoZ7LbTepcvz65fffjSX/r5nz1fb3704WmB3CzqKcaUk6qAge8/evb202efe+3Ba68+XH90NmYsgIDUT3DXr5yZgqUQQr9+ZkHZOKro3p1Dn6O1iFwRwtnN4VnKgBKLMKlxWFTJUVcKtYZswLapV/PufD1bLqvlLBhbACCY3b73VqwpeUjeG7WYxjHMWgHKSjll5UgJhInHLOrSmEFBalGijGKArEeYBBVklCK2iGQu6HE2C1LUIslNQ5eEy5By6YiXWFlXQKAryc/q/TRKEQZRUhcoJzYsMQ29iLWIIqWIERJUROOIUh6hcGEApDxmcFZV+winV/3dgwCltCV/6uTg8W67i9OQuGqcDU7JpFg4KxIMcfQuoCKQUWZltsaIgIhaQywihQV5ezVaS1zSMCTybVOHk6MXvVv0JbazOqCzNge1t49eSNenYAyX2LQ0rJ8+jY+XFE+WCy5SL46QEheuQ1CHIhNpsGYmsXzw429/8P63f/Wv/o2/cv+vK+kk2VkfKgfkQI1yAXRhtjp48fPt0Tf20/aNb37z5Z/55eJrREQMaFBVvXMx4vl28y+++0dvP/72Tq62z8/SNgIHZSwIxnu0tvJUclZVEW5qLyjO2pwTOBLBbZ5C4ySXClAFOI+EBYkisyqKI2No1Sy67dYa++zq6qWjZcnUTzpr2oDV9a7vh8JZi5fFnTm0Dqd4edFV1msOIDT2xVnPHIbuUlXINpmNwUKWjm5Xu71q1jKhA0+sKoI2W+sPTg6zMAE9HaJwVqF+im5WpbGkBsjU09ATqhYt5if0xnlNVcjz6qVbJ83Xab9Ls8oZbwMZTOLJuMKSckCq24a9odqsjg6evfUOjgkEcskIkmJMUbBAheQNlVIWVUsonJMBUznvyY4xsTXgqLKEKt6FSXNT12DsyOMCLRWtXCGaTIFKaK4QHUnhknvS7MXfX9Qf7AgMBueQXSFtV1XtK7Zmn8a86xVBh3q8WXEqSOQQCKCwJqSMFCwZUijqEGuBY+c8UhYFZ7qYC1mvAiSqbAmCd65kRyYWUAUCcM5ZUUPFgWEuACjMY9fFZuarFhV9qMKtey9+5jPPd99bP31qX3/ZO+t8qMPC2tnZ5fOLiwtXVdY5a2zJooWeP3r23js/2m4uv/Krv/rCizMQLbHEQZq2IoVuu21mbRz6N7/zzY/ef2d7eX333qEXfune4QL18UdnEockgnZ23fWLXBQ55TIUUd+ezJdl260HiFdxMasfvf17vj30jd0/Of343R/80i/81HB6WtPmwa3Zej9OkkZRazBHWcf45MOdAQOag6ssAjGmMU9Wr8/WOapmJqLEoii1pVLYGHFEVdGFR0ZDAIkLGVq0HvqCBkjREFkCg2pBHSAoEFgGzFoEKataBc7s0ACKM1aUEqhRccyxJKfEJTtDzkAUNaTECCoG1DijClmYmGOM8/rg6nwEPrz70qe/+vvfAsHMccgxH82MCuc4TMW1tmQM1ScewNXhyim+f3aphry31hmwygpEpNZ0E2eVw3m1HUbjXQiUurgIc+9q5wYuJYQwjPHOPPw7v/mXfuerf+ScooCzrp8mADWB6spFwMt1p4Ao5fd+93f/Z3/zX+88r3c9KdbBW2v3+54qd3RQry92f/gH3zm93FtTsaZ1N4KqX9RDyiICRplMX8a2rYpCZdE7fHAy3w1jlPLk7MnhrAaji7bNGV1d9+N4cLQaprE1rq3bmeMYx1TGl27NZ61//+MdAiWFdj4vqA9OluMwXg+T9eGFu6tyuYPGJ9WLMZqz9dIZAZSil12qKuOrsNt0SpiSGEecob/eff6F5cHSOI3nm1FzQbGVcYeH7e3Z3ADXy2oSJbGboXw49Fc5kpTtfhxicd4aX//5x6eAYNCE5+7+0er+7dl66J/tt9XOiinOmsPl/N3TSwduDzE4V1PgLjk1v/r5V3f91T7bNsyiDmBBhBfLxhV1Hs6vLl85eKlf97Gb+t0AUcs49V2fEAqIb4ypTJfL2CWj5vLJBhQKYW780I2boW+8n1X+4w+vHdCQhzz2x6ujUNePn16BberaMo3X28F523prTbaErz9cfvR8//bzbTsLOWeI053VIjp3uem8kjo9224Wy4aC63b7oU+tIxC62nT37x7laTzddAtv7h3Mnz7ZjENeVtPJyRJEH11tBQEMNZVNmScWBhsRr3bbxnlWTTHOgwFUFaoNbYfeW3ejmxlKeuvx+aduLWurm2HYTIL7GPuoUv6VUZEUSGNuK7tqKFBJyKJUV3WJ2TocElyNk7f24+uYp3QwX3z2xft//sa3YHYQCzStu2UCsXijTLLOud/3HuDOQX1nsWht6nJ8+cWjsduknuOYgnOFgSOytbO2jmNsHPT9/vhgEZyDia6e0W7S/Qg58Sz4NEGw/tmmV2e2bPqrDYAgQHCmbt1iER4/+jiNKSXGzL6y17tnTx+/+4tf+enLb16+8tLJNE3bPqMhEQAANESEKkqAyrLfjcql8kadvXPvuJr5Dz5+mmKyKOpRgIh06Pbbzd5ao2l85cUXTl4JL79y+8dv/KD188+89MLVxXWJ5XB2SNIfH9EXvvQL3/vOj8ZYxow1uwcvP1wE7Pbn82axWDW2aSq36LYX6KAKLmuBHMnKfH6DcGMWNO4TkYJkdt6KMrOIKIERUGfo5li3rSrEHGNy5AzQzPrY8aB6jfLxu+8Zr5yVJS+aqrssb7z1JLOxDoPBdhFCoKmP1odgsd8Mlqa7B6ZtLFF5fnVV1LVNQ5hjTLNFbSHMs6SSVJikzI29vt7oGIE0eFsYDCkRWO8EsUuFdumFO0dTP4I2zdwVLO2i3m16BPvx5aZq28ZWaTfZJvCo51f549NuNxXhnQBU3lYOp26qvC8Kfc513TCCckbQaYrOuLYJFOzVuk8g86p+eP9ujGOX9qnorFkYap01rDwMvRIdHs36zW693U19unX7sCiMaTdo3iVovZumwkUdfrIKQFVVpLAC6Q1QhgUBDRKB3oBmCIBVDKFRNmBJMRfNAmrVVCqokWTyWlQqITOBiQKqyMypsKi3lkhVigpCIRG6KUjSjfhKmQA+kZYhsv6EKgNSBTQzRKNGkEcBxSLIrEOCnPWGelyKEBIhI7Jz6iwWhcxKqGRAQBVoykpEBAbIqFFgUdBPBFvGNHVD0LTzozy76DfX47i2gFqSD40AHbSHGvPm+Q/L1ff2/W7oOo5ZDRkIUVGUSAHRGAUikwuXmFDKj9/6k0fd9QvhN07C4uGtkx++8f1vvfVPHj9+/9/+q3/zr/3q3yysahUQAW+4RaiAnEEn1nKdrRBV1ojCFGGSSsQz1zlnk7eeMxgEIixFVIHQpMICCMjtjNgwB0xzV5rJrujk3v2grWYdN9uu71NhX9fKmmMZY46ZmgOjHhKzqWzHpfJaNbl6CFjh1ePCSljAqCKiMCZmZ4AQmdWCTWCnzLXjKkBKhdUrKOsNbAulKKlYQBAxNxMuVGFVACTkAtNUfI0E4GqjhknpZjR0g7hEo6h4g6VGhJtdoCEAICRVBFEozMZjG0LXd6COmVUQ0TDcjKQ+oR0RKhlEQgIVuAl4oyLwJ+IgAIApw+pwSTnFaRzHUTI7Z3ZR0NKitS3oauHefvzObrt/tu20qopGvDlQVRVWFjFEB7W737azuk1RN1OyFssUlcVZe7q5eLw+V4XKEzF6b5VRkawjQ/B8v+0Lj4krtG0VNCeRQogZEA1exsETraf4fMpXUoZcvGrMecgMN5eO6H5MgdAbgJRFRBAMoiB6Y7sxgrXkbZfKeoq3bx883e3OO/Q78Uu8f/ee5KTApQAiGGsk5Ru0mBRREet8CJV1NVJtbW20s+AMFEQnZFOZCKlIiYYeb7fLAQ5mdWK+dbBqZnOO20GGtz58MiTOFoHM3Yf3GpWsWkQ5BO+dtwYytwwVZEQ9vdx9a9vtSxEx5BC9JgDLUHuHSHEqA7NqcZXzIez6qZ5Xtw6OVWVzfpWYFTTl1DjDuazmbRynmAuqWBYSnYfQ7S42uRhyWBxkTXF6+NoRXz969IN3pHgyNuWYRJwPVfAxJjBqDFRi5KbDyKI5l3HPuSMXhi6pIuqNIlK9sYVLPwyLts1JDWBMua4tEYElZ60AeONY8fr5+si5PffF4GJR991W7CwWqC2E4JxFBF4uWlSdkrDg99780cVx87Nf+MLq4OQHb72/myI6h8wlcyYAF/7gO+9++tXXXr778FtvPbZEIQQ0AF4fnz0TLqt5m5VPr/fFeCaar+qTQ3/x0UZInSD+xP1EBm+MUYUUiHNiMjSrra18hdR49eNw0PjU9+CMCyACBaWtQ8mp9AzZK5D1lhoPjgiVsEAqFfrg7PU4IblpKKxCgCVnAWu8sUSkLCOnia0aLjDFAkZAsQKBzElycI2xxlUkY8JAyeh17Crjg1LMJZaUgQwhIzEZBQOV79JkDQlKsiQiKaeGPCmAilFrRQAIrSnMAMiqotBzcYK7PNyazY6pnnJ5ebmaJD++WqsAF46Zl00w3my2HZJlAUdoCJkhg2YurKoEZJQQlEtGZIGr9eBdQueXqxZzAkLvqylOTe1inoI1yIWyM9CywCRjd7E1Gb0Nu2E8u7p8eK8JGoVslpJiCiYA07Jq03b6//yD/8dXvvKlf+vf+/fIHGgpSBSoNsYBEP9kOSuRINx59Wf+J//h//Fr/+gf/uN/9F/97cMH9z79RYHEXAw57zyInl9e/V///v/7o8u32yII47jLqM54Q4ySi0rSqMMYCwMzoIN5FTgXAvKOhhJZyNeuqqwHKmPSQinDVCTUMp/XYoQdDn1SVlDdd6NfBBUFS6ato7FXQxzVEiJYPTlo7t5qHr2/GXbRZQHmkjMAzxYhGRjSYLzL49D4uaimtAdAzLKoTUTuR8mlpCnNFp6c6abIicAa58xsNStDNoLCwlIIlMd0dHBw1feGoIDqTyr5iNjWzfnp5UGbv/hLr337a+8MOzMhFOtadyMm5smUZRvAmIvCy9VJLPT4o48JbsRTCgaZOQ9AkefWj2OZWQqSDFGXE3psLBjSQlgQYubWiie0FlpFIrRE2drdFA15snqVt7N66Y1LU54vjsbNuozTvA6Ew7IKlqDPeRH8wazZ5XGxarxviqf95V6T+qpxGGonhFYtSS6VN6iYM1fOWoQ45UVFB01AwZJFWZKy8a7PhYJzYlri2mDJhQhLygyogMONyASEWUnhBuWlAlyUFVhhyrllIeNzQbL++N4L8+Wjs7Pz9fXm6PglW7WW6qVZiLoseYgToR4cHiDL6fPzr/7R15+dfmCYf/aXf9kYQoI47QHABTd1yVqddpfXT9752j/5LVJ//ay/45o7q1CbkqfuU59/8PgD2Vz321QqB3eO2zFOpYOKcHlcaUn1olrculcGnfqtTo4f3j++95n9B79/GPxPVeXND37wnR99//zZ9cnBfD9s9/0kIEQGlJ5sx0+9/iI8PgdWVvGWADBGOB9GAoNKMRdjsSghoDUoqgiozG1wTCYVjTE5YwxCcBYUUpJPAASiWFhSAmNVaR95EmJEY4yAAtKkGgySChk0NxlvlgYpAVpr+SYCDcqSCZFUf2KgxZuLWQAePLz34Z+++cqrn1ufXysWRHGgVTB5GkoJhqDyxge7i9E39c0qWLS2deHxMytEqipZcmEgaOdhHNOSzML7Vw8a5/HR+eSynDh/ezG7WndAttungngwr4+XzZ/96EePrrdN26SOFRgRbfCx8ChqEJZNmKacE1wO05df/sKfPfoTJ36z66WdXV/tqrbafHjeX/fX3fbkhYPzfTcMsbBYNU3rq0UTIu83O0HMQE83/U/dOdJJ+xgbb1JkUTw+OHry8QWAHraVTtzt4lFlvcP1Ns0aP6WYNQOU6936l/7Cz55tr99/dJkZun6Y1ysh3XJ88/HjW031wvywj3mdEgV3vRt9Y0DhfNvdfuHuEnHXJbZaLHLmo8MZEhpLCTiVXBnizJrK5abUFJrl3LlwVAejpGLGqSiiCe74cPV+N56V8cuvv/L0rbeCq4vmEWVe+VzYeooZ9pEXAnXdfvDkacbqvY8vFm2Dxopka9WzokJj6eGLDz56fNo086cXewChO8sUsPHVKLzf9beqgxgnxmpRz2FCLTLmBE4jD4XjfBayoXo1f/zRhynrmLmqHBoTvOu78aD166u1VXNSNV/47Et/8r0fhbYxgdqZw9AM0zSM+XjWfHi5TuIrT6xQRJxyVVU85fcfX5/vp9NS7iBoLvO6iUUVAQpsc25mJkqedt3RalGTx6Gz81lCKFrGMW/7fBETenu23TqLS1PFNFxdb+4cLUj52dUA1o5TTqUAAXprjauDV8IRGSt/tu8rRx6x5LKoK+9dKTyklEmZcDOklrBTbY/m1Xy235/OfrIKfmJAawwVc3tVmRqV4qbvTWgUZOKsXbGEi4U3xmwzZwqbOO0+fv/Fl+5/2PVbKb3SsTWVpetpOge5TGXKiAAX1/tFN8ytZWsX1aydpjqAW4axz9uu+FUNwMvD46P5fHN+3s5XPGGK9uq0T4kzq0UCVUSOAtspeV/tYtr35QD5eNWcdmJDddDaEqd7925/9221lqQAgSGnf/b1P3nt069+5nO/+K2rq9def/j2W8/6zJyBbviyAMYY4wxZUzg3jQdRJfNgsdycXz9/+mQ2a642Y1v7X/ji69/6wXfEwm7YNrOGXLUul8DUPz2FCoYib3341ACNmt9/9mEIxCJsSlf6vuit+w9f++yn21t31/1wfr5mVzbjWB22edwWL2RVpDCpkuSSjNUbDg4zWufRGmVBVOesCjCCtRhcmEYmY/fD5F1lKZAyIqHBOE0NUkmowffAQ5KT1jUOG088pv01TFPIipiY5v741u3Ti2ddP7WV6ca0qODBoT05rljt99957lbt4aLmNDkHu+tNbRddjDGKq9z2KjZh9uLRT737/BudQoWqRNb7PqbMVBiyKhO+sFyqET+vYedU2RsbYzZKaM1l13/6wYt1ZVpnSgn7IT4+f7IdsgleNTNzTLzd9hbhet81dZ2LTtIRZkICMlVVK1CSIhnqedPtR2Lsu251UPvacUoZJ+spVL4UkEKJYJuL9c0+csfE+1TGMY4TWG1nc2ONd2Uquan9T1JFKkUElEWEkBXQoFUkBFQ2CkCIqgaRECqDyFzUsGC6+WtWVRFfnAdnCUjEOXbAiUGMB3BqmBU5iypJAXACqnrj3BFGARQltQTeGEZUQVZVBDJia9M0jlBBtIiMIlkwMymr8k3MRG6kV9ZyCOq9iKoWYqUs4AhVNSkpGFGUm3ITABowCKxQFMaUMhRjwc/adnGvrp9ISX1R7PZRJDhXtAQtU7fdTgPfPFcNKYUCckOoMcpooBgiUEYENOq12O5s9/0n3/6gAgzNTDvuy2a2Cuvt85Im4yoRBAOqojd8qKI4uvXT52n7uHKSlREho2HnNWQNbCaYsWyyQPaSJYM6NJGLAgpSkULWJscYTDY0jtLOlweHi7q+pZf9uN2NXadaQitIo4pNk5HoORneZxMAg3FN8gekKiMAGYAT03qbLnF8nmGgktGQUUABts6AMgKCYCylFggWwEmX1RBxSWQtIBXNAQgAbhqMYkBBAUBBCZAUOIsKsohrjHqCCKAAZMjiJy5cRJWbHiKyKDMyA4JaY1RUhK3FwyqsmvZZTjnKgLBoQk24i2nIBYGQrAogKSEQqQIIASBlvkEUfQIqAoBbBysWPTmc56k/PnAH7XK76w/mNRFiii/eXg2pXHbd5T5N6LfbnJM4bxVUkAWUrHHOv3h0z2Jab/tJkcQsZt4Za0mT4cvhEhTo5uWNDBgjCFG0aVzwrhunUTQR1MTOIRofx+wIjTOsNBG4ym9iuhrHCRktVqbqYxQEYxAKAN90HDATxIkNQla9+V8CoG/CmAuBqHWbLO9d7Sc6/5Pvf/Tf//W/cHn5aL8flsMeAIwxiKKiioiEzMXcgMuExylOWci6um6Guh1TosTk6jTGWABQRXRiVUjiVEj7Ls/aV9v54W466zbrYcxDLIAE1o4Fa1fff/hCMr5uVhWEtL9aQK40n6533/rw9LKPjEatAWWj5kZ+plJW7ZwoxDxkKKUIAaWU1FCXy/560xhMIsaYSiXnvN5ERwYV+iFbY1G0REm5d94UYeaihoiIRdXbOy/e3u9PrWJGW3K6SdkLTHW9OLh96/nFhSfAonWogSSPSUDu3FsN3bA8qMma3VVnjTFEMUdQAUJrnTULU8uAvVVIRVkYiUDVAjjEAnC6G147oKOD5vmW2za0Jmxj79owxgKmsKHaGbBiDDgDLOpDe7bV3/v6d7/y5c/84ld+/o+/+d3IgzN2Giblosb+8J3HH3y0/tmf+dIffe+7ZwMrUdFyOfKRtaR0vt1FZ9xyMUP/wsFihPTkvVMzino7TVO1+uTF6M7J8RQLZyko9bKylfcON9sYmuZo3vbX+xrMxW4Y+hxJx1oZFQ1ZMdxD3mmepJ6bMHPWSE4ToeS096UeBu059pFFRUTJOkAGfxMBFRFFg4WEERNLKZxBmpm1Mw8OVICUOBcVkaLGG19VhE6BFJ1t3TpFcezQOrRUShGJpQz5uvWtWoNFiAhQG2+9sZLymHpFD6QKZIzDG/c2EFmMKFcx6zCpoReOD/Plroz9neV8eft2n4azKfZDvGLoOrbMwCCaE4tTA6DWUgZlVSQyziAzkHHOskoqACogaRw2vp716/ONc217eDX2deOj+qmfZJKpj+iKtTMqoykDcbgan8V7D7r1FoqN+11UlKymvest/Okf/fPvfPtrf/t/+D996fXPZIGkYJ0HRSKjgAB0c+tVoJKyICQWv7z7xb/x70wr+Bd/+vv/+vEL7XyRMxcnmUucyvfee2tI2xdfvHP9ow+KZCIPgoCopHVrXOUqW+2vB0Q3TalqcBxSHYKS3n/p5fT48bAbnLBj2sfCCYSNIaqCNYVLn5EYauOy5oFVzMG86YZ9f3ZZz1snOQltnl9btnlULaB9Xn94ljdx2EQBTCWWWGpvRISEJE4FSts2pfCURlQka7ohceF9FmAiFGyNO6rY2lI451xSntLeqKEqlCGpSIyFS0ZgAeuDi8oIbrZsAQYAADVIFFqrYWKKd46dHNXXaM7XsYtyEIJRUIBuZDHFtO0LL7/88dvvdONUsmERAPUhBGIqpVGsLd46msccyRgla8nsUyJndqlkox5k2PcvHa4EJ4Hig09FlbPxVa1m4d3Ui7riW4BcAFLbHJiphTQpR09m4XBluGfIbJJG1mws2drZ5bya4rjrUKw3lHw0QDd6rkBklG0gSwhkmKEy3hkLCKhoLOVcipRsAMC01jcYg7fe0Sg6MlzGwswkUAE4AkOYY7GWUlZDVFSSSF+YxinsersIxuOUDLuDVz/72d3uTy5PTx++cF/YkPG2dberKuUxx2HiPFY07scP3n778vRxHPrD1dKFWjIr63i1ySVVokmcQH77je89evNNQh263TTwQbOoTLJB0n5yXu7eCj/z8smb711ql2vi0DoDxRuTp6HP2DG5SDXpGKeju/V4eNLef9AeL+f04Ntf/cO3zvlHZ/t14kVdjha+L3kqaIi4SHXnRfvw82bxDnQJVK01eFPYVhQVstYCGiIjUhSTQus8GfCIZE1h9cEVFQcMAK01AWAPUADGIvNAZAAIE0JJJaqZEFUoWARga62gIQRmScwswsKEBGg0swKBSi4ioMYiIRVkQZqKEGFBFEtuDmE1t+3RbPXqG1//Z0WiUjG5kLEH8+p021ury2VrqkrRVFX45AS5TKUIISZAhwAWjXMIaABPavvSvMUJfcTAeuDwhZOFRs5TASBrlRyBNU1tz07XHz67rt2SCLCipFJVGNpmt9lxTraq1ruRC1TBqzF/5+/9F70MxbmTg9W0/zgVrYLllANikvHxx7vN2AWLpGStCyGUbhzTVFc2KzRNU6ex5FQFqcjVbfv8auwzXwwdZ1nV4eTo6PGTs8zjojl4fNUNpRDbBH7fJ+D46Rdf3W927z29KBlr55YLe7ysNuN0ecWFMFsBLV3kKLnPed3nBdHMmma2eLze18Y4h8tZOJ/GGFPwM1/76+s9G7h7PK8LypAWxpIxdWjms7lAbr0dBymA1nvvbUEgX71x9lEqmlL0LsTisAy11YXx1cLeursYEd5862LfXe32/u7dg/U47YdJBIwxP/PZT8e3Pnj33cdOXECz66YpyWrJdx6sHn98dvbo42kcVkcLVl651fHtxeOnb//0nZ/eXXQTdk+G530e796+df7k0gpwP0UxabtmEQBkvNGowD5K4QJQk5QkIJv01juPZ01jnDtd715fHETBfsirplbi1aq+f/vek6ePV42fROu2udpNPrndZnALv6hM6y0b2O2HyhgDag22zezlFx/88O2PAHFVeLmql51XlWppF2GBBdXRMGDcT8x2Zmy7qPaP+6txXO/Sp1+8jbZ+vt2RsVPiaZrmrfVolweHz68uEhfv0TlXW6yIgrNkzXK2uNx0rOw8plLAyu17h8+fX/uGFHV5uMg5/yujohT56HipPF1tdgY02EAGg7fTGI1BIgBlJ6AlWSBv7JjyYn5UDWkuxRU4aANC2We+3I9Z1DhEIEWICPPKAvPHHz5qhA/q+qW7h1eX8c7tsFy1+7I1YsPQHvijiPnps4tuoCFmY5yxYLgQwuFqsUtjPxRjoHZYOThoqsM6PPngSlQ2V9v2uD57drqq6xHZETIrGcNF/+B3/uR/9D/+X/7sp778p9/76q2Fv9yXrmRj8WaX0jSOletglM3tW/PK26HP+8225JgSXVyP9Xw+gfnzt9/MQphKqNu6bZerWePr/X5M4uvKIFqpwtgPLGqsC7PZGPXJJomb3bv34IUXXq4WzVgUc2Zjn5x2tpr55UGUkYoHFGJhkbqt6UbvjWCstZbQ2hRL2wQyBtBySbFw44MoNHUTuYTKIxCXTKjO2+CtpFLPXQiQtVQOK+tqUmdp0+0rrWJfkMhatYjeaLfvHHljbRczI5OX119/+fvvnj653E/U2OKaAgWVCFxd3Tk5+e5bTzZCfpiENFTwg0c/FGSPWDnnrGMtse+LEIMBEgFt503PXT1vKaLVQmxFsLIW0Z3UdkFYYh6Bn59ebXc7JS0pVXXNHI2wgE5RZk1FhtAg56yMwdG8nffbLlhrCKZuxFCh4UXlGofjyMoAZAkKs6ZpbHwYxwnRgOiw74KpakNZebPbWgSwpqn92I9cUo1knO/3n3SSRRSQlAQAbxAuqliKWFAEvCnoIMJNOkMRlJBZiiIoqCoX0CJsNGTUSrUh9lZYFFEzAyuoCigSIBhjSYwiqkEAFgRUVCAwnojEErJAYskCjDCrjffGW6vAijcTFSj5pk0GAICiAOpIvce2AVuDCgyj5gSpIBLqJwMCUdXCSARMaAGsqiLcrJqhH1Oc5kRNVdcnD7qrd8f9GWdep5ElDWSKsLJASU7UkRVhb00StqoGyFhaNB7QbGRg1YgGSAuLR6qJm7hBFRmuPTUnAbdTvHMwi92OKiHfqggQIDkWBWxQ/en+j2N4Sj6brDwpqAEAMEhUGLOZaXWcc6ECllSBlZk5i9wAelHQU33gMINDPG7D8awGypNJhgqyatHa0hCZEHNkTSQKhcWgATLZ2nogOWRYqnEKwDSHamYnA+mpmD2RiCXkAkpqDEnO1rrCmCeoW1AHhhjUEBGKggFjrAgQgiKQsEEDKgjm5qQLkTgXSYYQrGNfg0bAG7KagkEoIAg3VHUSAFFQVVEhAuGbLTQR6WFrhzQQECom5t7ocaD5wq87uI4FrRUlAC2ZLQAjqHwyr0IEQP2XsaJpys7i09OrWU2zZZCcX3rhyHu73vUx04dX/XbI6PxeSQFElAxwKaA3a4QUcRR9d3vloXiyTdUuqzCrvQ0oIu88ehQjW2MtgkUUVk6ZCGvrlHnbTYm1K1xXXopKZkK0xjHITbwusQiLqlaWDKKyHtUhxZhRUdVbJMTMklkMgKKSNUY0FVbRLKWZNUNMN8WRehai4TefPDv9rf/fs48/+PWf/8zs1oEixDLVzgEZ5sKFFRQNkSMwmkpWUFGy1gGBciRkBakq73sTvM1cEFFZSta+iHIp7I6OHqTNZd93/XZjnPjKZuCDpn39/t2WzNSPqDA3DHGTeSLQJxdXH15vr6fMBhRFUQGFsxJojcTM/TTWNTRLP8VcouANbacIZGUR9MYQzupmHEcuqmTQmG4/WjKWKJdcWAQl1MGS8S6AAqCZcrEzuxk7ve4VDQtba9ASRgaV/W6b5+1qNS9jZOUCWSYOzo451Qfz49t1/N5UBq6MEZYUY2gCMyNSW1U5damUtg5FEAGKAWtsN2YAgpjBaWGYL5ssoJvU7/p2AYcz//7pvglNKugJQVEYVFmhTFkALQIqwJ99+3u/+vPVL7z22g9+9KMrzpUzkxRPFMdh38WHtw5+7Uuf+4e//62CqF6ZJTZaVWjJqK1O2lvxepOGbR9T46r53Waz2dx66ahZurffOgOAz7x0x9kq7vN2P3QxTXsumIPYuBmfbvqZD5ylqmq1XUrZgfMmAcq89dsuFlYLpEmm/egJFESRDxfzlGCKMaYcixCqIxJmVHGAHiiVrICIPlRNiTl20ThoF251GBQUQSKrQwdKJRcpag3YVOZNk4ZpT33OoW1DYS8TIxpLoqBA4H2D5pMziRwnU9CitYpM1hpvwHIpICpaWDVldg5BQUHUmqshJlUhur+czzwMw1iFsJgvtvvzbeQhjaWQASgxOSIkRDBFclV5AiljBpaCYhUqb0rWKWfyYB3kMUVnxLsUu25zFfuUCh7dXs2rE1R01hG6JGXdpZWzOnSkgPODnquLTf/Oh4+OmtDvNndfeNUE+uOvfn0a1//Bv//vL269VDKoIUTLAsJgDAAwEYKqAKFBIIOI47bb9QNnvXfnyx9+57e++o/+yy//2n93sVpkThly8eoOaXEUdqcf2hz7PgdnQ2OBOXFpKs8chzGrQhFuW5tjlIQJMCyrUZL3JJ6Ct1qUo1oTppJLymBIBKVwEZ7WyQEhAyFEngQkTsm3vglEY8oABg069MuwG/ZPPzp32KpA3bgsmRyxKhbWzMaJbwhyrILnrN5YIMMIYz+5EMBJs6g4UCJxnmbzuu/H6bozuQQEaw1XviQouez6wSm1h02wbkqxqoLyJ8+C27eW0eCYex7N4zdOr8/6T33a/pt/6y//9lffeuPNNxSIU66aWR4nJbp1fDJeXjx9731AsBZKEUfkFGzig0V1fFwLQUq8cM7aehRxBqJkNpCZmdEZnPopT9E3pMyGfBkHW1lyVWtryKO3FKrKGEKBcViftAtmJTTempzZJV4hnisMQ7JOALRMPF+21Bwe3m4hwfb5M5PywlhjDCTlrMyiCJV3RpQLB8KARiITWWc8kbLhSTmDOlSJ0+HRLEMqbLopdiKioMJkTC4CrPiT+DgqGJBgyRqM4+jIXpVLC2Z5eIAgFuDWajGr7eXzx2n8NOpJzmIDWWNYyzDpuNuMXXf57OzRO+8ZLJ7k+HC+XLZqKGe2rd19/OT9y8ca5kf3T/70939vuNyj5f11fOXVV+uQ9henZZOPGjdcnLvg16fnL66qLdqhi1EUqOpSca4Gch8/va4EDipMU54pr+7durju+8kehdVXf/+tt7YSm2bTSTDTcdtcdcMUAYQbx7/yC6/81te+WkpxiAaglEKqI0BlSEBF2BJawArNJFKAhqIVwrz2qkJN2MYSkFhZGeoqFKKBcykKBMLCBhUNCybFgVXQVJYsgqqET7QuWIoyFwWQXEQJkAxiZr7pFZbCNrgkctMjMdZEkQLIJb3ywmK7vqjnR0ndu48+7gsbkNYbMHS1H1UBDIFCmqbG2/gTVtHF5bYx9XxW1wjBUTdlMmABqaSFdQfGXXP+8HJ3rgqK3vk+DVfDsMnFNsYYTCk/OZ2cYN2GUnSz6dAgGRusK0NsvEcPMRdEtB7GmCow7zw/axunhoPsF22VoABq1ZgY83sfXbD3be0r73SSzDruY7Dkga2BFx6++N6zp8GQGvHOfen1F7/x/Q+Mxabx62E4nvt5FR49ehqCbw/rs+v1yw/u98OTmNQHaqw5JPzii0e//c0fKTjjKVT27u3lxfW2SxyCjzFlkduH7ViGi3XPFp1BYPAGm1Dt9jsK5tZhW9Y9ARTnoTD38V4dlPkhwN3Gt0TLWa3ghElZ1KhKrNuG2XHhfpystZfXa07D7SpcPvr4bjs/rObJ17ePKmOnkjkPyZH50u35oja5G5uDZvHC7IMne4lc1+Hi0cevPzy+OL0YJhgsfHy93Y/Zp7TZj09Pr8S7WahSPx0s28R4fdHVft5H6GJeD0+fbzcOw5Nn275PJ42HLGOXp8xTSof1rAooJQ5pOj6a1UeLPPKDF08+fHpFYC4vO2vp3v2ZnbVxnQns7YO6qarHz89caM7Pr4LxiIiqaZvKrvu5L/yFjOYPvvftk6PF1A+hskZg2A2FjCM8mbXnl1d9zlLKxoMhsY6GiVWJh+nIhqEbD72dDFTGqOYu73/zL//yH37njbP95tmmO5kvzi8uk5TKUjA1gY7jYMDfOVwdppRLMrNw2LrddlIAMvZ0vdvuhlDbzXpYzqrg7HrbHQZDedpfX/3cF39mM44/fOuj/3ZU1ARnQKvGXQxT492irjwhgdbLsO15H+Osqm63s4ZiTEkERq3eenKKBe6rqYDi1UZJqBuafiITUFWYlaBxwQN0Q8m5ROc2++yht0gucNPowcFDoJqHSWZuM16S4YGLBARUMphjAZSrOOYidV13YwYr4O06lcYnVd3vp3ZpZ3faN37wyLcra+3dxcGzi4t24TXRuH/+W//07/9b/4O/vo1X6z/92mrZWJPBICqWnAiNZLVqV4fzxvmSxkXt99tNs2gf3L91enYlXPZDXli32e6WyxYUpyk5gjiO+357q7l3ezU7vdxFKKpsiRS8Gu8ddhlXy9mDhw9XL9ztrtf96drVbYxxLOujw8ZaAkZHNqURQB2QNwZYLZhpGpyzNgRnjKstGUIiZa68VSwGBbGQcZaAlVQViAL5HDOAvWG2WiSdkpALloK1XMDb6uTkeBz357seUL2j4zvz3aYHctOUSsnLxvyln//i7/zhn41azaqarWTm7WbDhSHKwardxP3BYa3Zrp9f3j6aHR4vHz26EjKaC8SsljiKBWOdZQBSrJ2O+644jOj6YeecywnaRd3txwKpCeFW60DtpuTtGLtpEgPMJU4xTRFRYkpSV85ZY2yXU4pT8CEJ9ZiI3G7ojg9WFbRkzDRFYVmPUx2CKuYkaYKYy+Gy3e5HVBglrebzugizzirKDEVtEVG1QMs6wLa7rq21YPxPUKbMCqLKigCiAmRACUQFgBCAlFAMAaAQoiIoMFpiZhWjQqCAQmAw7UQHNkN0xDUawQKgZEgJCmEBQ4Ke1DhEBSg3tGEUUGsIDABaVi0MRUAE0AAAoSVWsSoImlmAUZUQkEWJ1BowVtsKfYDgdSwyMSb2woqf5D3YWjRw43JDVEQBIDCGDGBilqJTl3IsHKggN1V9dHh89jiDSAG86kZLFggCcuNABMVQsLWAkCqXbIw7aEPjHaLRwsqixCOLM4oKKYsKEIExEMtY2C0ePqzvfPZiPdw9aUmlEJM3JWcokMf07Pn7p8MPOzrHPDErsQlMWIwQFQAC1lraQ+mHMXNICUCkIDKaG+STGHJZkwIFsaZgkDJOxvim5jxMu37UBBFhSoCpcLFcBLGgoipIBklBnlvZp+Yh2FUCx8ygIPUhetD4mKE3kkGJRNQEgygITExD1OClstB7LhMok3M30UgFBLrxuwkHb5JhAVC9mTFKyTxO2FbWmGJrnLZsBW+i+zeNWYAbWDqIMAsA3IzvURSVFUlCMLHAEKdFEzDYq5guYt5kftj4u6vFCeBlN17HgmDEYmZOoqhCBKQKqIZQfpIqMgYJtBAomZiKqxyQbse83kxJ7WbPUxL1ZUgFtVAuZAkJFCCngogGkAm6sTuog7DOgjs8mA37/VRKz2lKwkwOUYqgQ4tgDS5qR4BAOAzirBXOuUguGCM7RwjaBh+IUmFRg+oLl0DWogoLZBBWLSgKxhtWAcTam0mEkBRQQFDVGDJEy3a+HvZEaBA5lQk0i57th3/4O38Ein/tb/5lZ2maRlVtqgoBQAsSlBJFS46x5FjiFIfdOHVX6904ZRWVzJLZW2cJWVCYCdV6C2ALant48MWf+0VZPzco4zBmLmMRBvnyg7svrdpYYBVcQE3XV8rZVvjocvvo/HqXkQIRa2KpgiMHOWfJwCyEuN/n/VjmtT1ctK0BZ8Pl1R4QVMEiWQBOeRp6FWkrnzJr4Sr4IjrlhADGmspXyirM6Iy1dugTkL76+r1bh/L2W7s4QCpSB0ME1ipnUNDU9U07W7bz3WZTSinMdRsWq9m8wc3pU6NcW0POrLedcYYQ0diUyxAnh04Kc2ZiGfNEBsTZyhlQDKEZofQ9b7bisJ4ZzgxThpfuNMftMI2FjHcqMuVsrap6a2qlbWYkrZzPbP/ou9/6jZ/9/K/8hc/+8x/8KOVRQdq6ln783a99dUa//PmXf/qPj956tosRoTImDpEaP7OuIhO77S7tIeesBpfV2bg1C7KHPsybm1WwO92STMS0rMLt1Wqz3yfk7ZSf7a7UqPcNFNhudmPXG2tKV7CWqnY5TiBZBKUoGyyABsDXwUqMMQ9TyQpZtCgbRGNUk4bgPBnK4p01wXFiVQ013Wpnq6UvoEM/5C4S2pKUgUVKTsUbOw6DaTHRkDL34w7qWb1AA6JImTOXOGlpZm1BAywWxTmXUqyDabFKUy6xgLosigrCIgCgZAwgQEzJGwMkSWG3TzFvNcmrt45Z+qu+nzV492DlS35nvYnTKGQNGTHkCBEhhAAIklMAo0QpFiH0AgrqGlcMDUnaUBdFtNTlPWQ02nE2oRs27QgpjuPVFIchT7Z2XCB3Mje6XN19dflw9+wtGrqDu/euHz//0bf+FH5of/Hnf/2zn/kCzOzIjOQM2ptHNSIDKgICKiDd3D6NgZSjokI7v+r56Mu/evjhs2/8k39mj1/48s99oZnZbj+99+GT3/vqV02KtC/VvFrev7O7vIxT5BydMZWhJJBQjDcG0TiqZjNvTZzSLMwz5we3bj0eH2+20RpL4KaYGASDVjNfStEMXKywsdYqSIkRFGxdmTBrV4tt30E/+AD73T66PL/1cEjg9nPeFWutAZulqDJnMWhyLpmgqmeOFCCbYCQxJ2ADxpg0DQeL6vaRLcaEOnT7Mu72JWXKY0XgATYDCwOAAqGgSYUb5lnjo2vAuUVdA5wDQOQU2kU/9cM69nu+uIb0ftLf/vPX7x++/hc/z2J++L3H10MRlMp7lOnqyRNNAOK5MKB6jzOPx2hOlnXwTirnp9T6edMeX+7Xu/357VvLi3VXyJLASd1k4V0Xj1erNszcWAfUvttpA8vZnSgSuQBb1AA5VZRmskFbDbFz1kmGpW9Wofc5AxkgrJwxPvj5KsxXEERjGk7PATWzWCQBYx0WIiZMzoQCxiqAorGkoApFBBGKUpYCqsHAsrZSJgEcIncRIrNFdYQBFS0BwFiEAK2lpKqCRVQYEZBZ9t2ubqytyXBDzvn54qVP/9S7b71xefr0lVde9o11DkjRFWxm4bLbbs+2F88uhrF3lenWCW2loByjN1RXcHG5/uEbb/3Jk0f/q//t/+aXf+7X/p//6d9dLmsQN1/wbAF0LQFtt9mQg7Ze5bKJ/T7HKSWRjOsxjSz3jiTFzWHL3hpD6muXie6+ehubo6T4/NFHu6jraEYqjFy3zUFT3YntJk7E+cHdps497vs0KoiCMhjDrGhdVAFrJpYgN547JCQVKHxz4gLBGmPCaEyRYkDwRn+BOPd2wmgALCIglSyKMOXM6glURE2wVeXg5n2X1QEK4lhY0KQihrgwI8D/n6r/7LV1y/L7sBHmnE9aYed9wj3nxsrdXc1udhBFipIISpZlCQ5vDAGGAAfAb/zSn8eAIYEwYMCyJFKiKIlspmZXd1VXV7pVdcM555648wpPmmGM4Rf7Fg3uL7A29lpz7WeM+f//fkyUVMAwJUFHnknQiEiSAdicy3pxON2UT5783rPLl1f9FTc+OHKNS5odt8ZhmuZ+mpq23m530/h1ngLIu7oJwz4AeCBu61DXedif13WXcOznGWxwNCq23l7d3jm0hMW8YYUkuLsbm+BztmTFORAwMPNqqFgH3mx37F1T1w7sctPXVV0Htz5uakdM9MH751eXNwdHyza0P//Fr273U7tclqzR8MC1yaXb6e7B+erktLnblZpg5dJHx23o6l99dgcGP/vsot/P1IT9mGp23376+PWrt4z1olm+e/mKHNVyuyiMmK2Il/gH3/vgH//lL7gOSx8slw9P27Gf46AKIChi1Ed78WrXLNo6JDFYeBqneRZHJrGIWqQNAPPpepH2U6iJzB4uq4rcYVWXu31Xt5XjpNBUFTsWCArFqBKwUXJy4Gt/E/uJ8kbmv/Wdbx/Os2BlI7UkVbMa5mjoLy83B8GXND08P9v3o26n2iSLni6WHerBql53vB3nu33P5J88ODs8rb5481YZD9eHX11cnR2vlnU1FOzHdHJ6XlP3br5KSuu6Zq1ub/q+F0wKKjuKi7btXGjQzWqdVQ3UClaSzFPcD9MuRjQnLvjav7gbVeZO/bLhUcZNmpZHK822202IeHhykK/2IPa/+/f/1sWb7Rdv3hyEhqJmtc0YAzlXkyIC2OOD9c8++xLUVuvWebfro6urcbMvbJ7pvcPVsrTTbX9Qu0FsGvWA9NXzX7eUXVN/ddvvx/zxkwcvbm93YwzOex9yzF3lmfLKVf1e+jFfKySlMicbJUs2QgRcdPUwyYzGFYzz5Lo6LNeXl9d32/HfSBX54FXtxctra5qz04d3m+0uZcc+CtyMeSxhNNyWnoiYg2lJiiy4akKzqLfvblfHy9rD4aoDAlWOMSETMFW1Y++ubvdjzIIhcNsGU8GDk+7s/BC7VZIqGb/86mW3cB88fLTJt2nIEkshMBHPOA6TJ5cNDMFVrm593qSry+0CKZJMopvN9N6DR2Mpw5g2411dYxWckM19/uKXP/uH/+P6b/7Wty5fvfrx8y/XnWvqUIrlwojEi9o5Xq9Wp6fHX/z0rwqBr8N7Hz+8udhPaboeZyFwbVjBuo8J6+rjT95//vnn33z/wfnRYUC6vd2R0c3V3XrdtXUtCZ2psnvy9MnB4bFvu+3tXdqPVeU3u22c+6PzQxFh4hITGwTvpmkKITBzUSEXJM4AzgB85XMuYkxAuUQEqF1d+zDFhHwfmjA1884jkGkqRVSLRntwdLB5fV2KiJkMeYr5+KCtndcijinmohVNfel3xaCoxGXAf/f73/mTf/qzHOu2bZZtJXkgr8HhbR8RbDeAGBhgZ7I8P6w8fXW9+aovq2WNOieMU8S+nxcHSzVIkEHleFG5pE59k91QtGsDJGLiDDgZpBTR0tRPcyl14JMPn3761bN24WOM5MnEQh3Y8TDNgOiq0LWVFWWUOI8M/OD4pJ+Gpq4cUU7AFRN5Jrq53jahC+wiFgOum1A7DvNUe3e5nV2otnOaptxUjQsY47ztr6nkOnTrRXt1fZPL1xBH0fuoBand30ICqIIiIAgAA8q9BRnA1AxJwZKaIH0tmzZAMxAwIyoOZzA1rU19ogqFScjAOTPIAN7f44VUgUoxrwBIBcCKGZgIiNzjZYwcmUN0BGaSVbLeY7gZQdVqpy5AW6OvyNDUdEgwRDdHK4oOjAFE7rVdSoyAcP8MrQZS7rlFRkiClHO6vro5kgGrAOho8aBaHl31F5OaGhlk52DF6JkJENCNBo7Q0NjjKmDNqqYFANkxFAbzXKmUhFYo3ZsH7yk7Y4wl3f3w4q++efD9/dv6hKlaevbq60aH7bPPf/iXb/5JObmZ2oijKIIPhtlIDQoYgmAoMIsH6QCLUkSYDYox+JKNkUxtngxjrtfUHXCzIoBZxry9yBevc+q5FA+G85whlUDsgmnSr4XhoA4dJKMtJZOyp/qhIw9AxdXqTg0M4yuDHSIQgpFm8qxZFbAYzxHrRjpnWzQAVkUAE7HA97/+vToPiFi/DvYYAoGBFtRCRuo9R7rvqtlvqhP3WyGA+yEIlBkdgIgx0f0q5HS5GOd5PwGx1k6XFBJTP6Y3Q7md0nsny0frVTfs78bcg6gZ3vf9BJjJsTGR/aaDVkpRUGLCEHaDAOnl3byf1QzQ0ahqDClODtAhkGNBkCJ2v9NiBDBmIuO2qk+a+mjhSu7JqyK8utoIkXOGqmCQRCviVVOfLat5juQdQrOf4joEAUCkXLBtvGR1wExUMYrZZp5zUVU1kEXTkLGgOU9M6JhBQERIwAOraSlazBBJzIBMtGhRJmKHaLjfRyQKjSut/5Nnz/79kxMrhSSWPk15IseqIMWmOSJQcGHVtLyywNU//tN/OKa5IJBr6poQQ0End9emZmoAxmqzlG7Z/fXf/l4ZL1wZyKhddbrbaOO7ZXu8Pkq7WcHQFNg0T7v98Pa2f3U3ToDmIXhmAkariQA5xaJqzOgci5oZxkEvhz049NV9B1ERSES8D8H7OWZVdQ5NMCd1DrOIIjSVAyTvOeWSsrCBZsAiq5YegMwvdnmgwLXVyIillPsMV6AACMO2j342BWL2zMNcAIpp+dWnX0x98fUyqdRdbYDzXAwVGIccO+9dVRVTrtyHH37n7vrNPEXHRIAEpSl5XdNwNwbHi8rfRLtT/KSuHyzq5/uRGVUKcpGcWe/zANEbCaloYUVy/Gc//fXf+cM/+tYH3/7s2acR56iFPH1+d/eq7//wo/PvnD386uIzqLyCmMHTk5PzxernX7zOhuHAhYY7XxVQrTws6CbGu9v5/hRsr6eWyAVH3jRNTRXOFku32bX1cjf3XdsYafBYt56Z4zxWvmJyoLo6WftadzdD3XC9dtRSTgUNAaCfJs0MBExcLVzlUaNKLtmQ1Rw7AmEP6nBdNbkoULZsBoaOzBCJVFDAkIkDog+lgtGLd9hRa2ib2DtP4KFrggp5csaqWfo0uvvlpfJB062rdivDbjcKsqFVzqEWVTQDA1BTYgeMJUsWLaA3WeIQpywfHB+cV4thji7wWdfuS85jRKdo6J3TXIqYJ6t8qH0z9XlOakYp6eCk7UK7rvbzrCVDUbZ6t7tbHy5NGW0e+95BN03Xq64JvqSinkPqrQgetednzarD+fWn/9LScH7+4NlnP3/x2a/f/+jDs+OTB8dHV5urrj123CkgmFoxIkRAUSCwe5VJUcpRtpvt5vZiJv/s3d2vPnu5ffN5+uo5Ib27fvXs4qiJ9a+eX1z1m9u3VxUUh/Tok4+ffvMb//C//f8ksKatNZXdNIkIIy/a+1FH2LFpbhfVLKmiuuoadiHFxAFNRXNqFhWygJbK0b4fY0EiX4pCBilKxHkscc4JZJjiPOZ5jOujcPrk6OOPn/6X/69/eqCtFGUHqRRBQO9QxXsmIqhwSBqDLheh5rp/ewcFwWBZ++DbOeXLr3rwXFVjzNmYxcxSGhNuZtVECOyJ6xAkKQVQzeenB2UYYpabTf91wjQN2ueKcbVwKhmAr6/K51/0b19di83t4WFh/d7vfXJ7ffnVF8+rHN687n3wywVeX2+q4AJhBdp5Wi/azd0u7efgW+rWr3exXZwdBJiHm6OaYczF5MGCr0bYj4UTVAuf9lFg7kLYF7E0iWgxqLGa5+SYEk5ShoY4qqKCqhLbsibdxRCaKY+t13rR+K4D9uZpeXIUwElKFbiSChOvQr0XnRgmwyxSARcQNasNcpq7qqrIAUBWXnuPRZChCCvqbLZXrQmPK08qwdOYJKqKWv4ai2PIPMTSD9EbWUEm223JEFcHR95aQnf64OFXzz//4rNfffM7v31YtaQ4bEdNY9Zx++7NNEyr47CZw7C5zTGOfek3fbNyqDmO17q7raFcPnv5j/7r/zZcb1ar7uj44M3L1+X6yh8fdLUkS1w1c5rj7X5NNGwHi3K48PNkl31RCpuxrBr45OOjq20cUj46f3r0W79XLc7Gq9vaxV/+7NdXY7dRhGImgCJLLGc1vQy+FHn84IOf/eKr/nrngEXEEwXnu5oCGhDtciLHjkhU0KFDdmqgJqo5aU1+HEZDZwC1r5hYQavAUMSkJLMxl8pzTPfwa8eAFREjec9EGDznLOn+gsgsFylqYiYliaoZ3AOwsxqRL3NGxFVXZymSNcdcV7S9yKtvPF517X/5p/8PbM15Q2dNG9oQVEFEnHMmZbvfp1LsN9dnhrzrcxvqLGlOBSufstTgG3VVW7/b79VxVdfb200kqkOYhknY6kAHwe36vHJcMQ9ZyFTAIYfmHiYMNM/TelEDYbds52k4P156z2Uu4z5CE9ZdePXyHbrw5u12M13NY9kmOXBOdIpq73bbxrtvfus9pnjRj935Ian+8uVFAGrUhDT3oMvynW++96Mvv1gdrivgl69exdmU6e7dJYYQJd/s9n/rO9/9lz//q9Pzw9OD+ufvdiV0nzw93w/b3e18++6Wfc3Bx2H68OnxzWbe9fOo+f33FuM47MYkSgY4lLKqOpIp1A7Q0pCOO364WBw0TeXQI6oBK1RHR0a+14RMzgfjkHNirBnX2/1FNFufnE8yzo4aWRVftSffrTfPi8zNYt0Ff3W7LYA5RWG67od2Xd+luFz4pglagtbuyzeXJ2fLu1cXm83skQ3KH/32N159+Vrqalk3+zG6phXENtB75+uf3V0ScVe74W572++OPnz/9eZlmLOUwuBQhRt5+ujg9dV204/Hzer13faoXbGvUowO+MlqWal9eLC42s0xq4syqh4dV996/OgXX7wUMhG+2uxXbdjMYyDnh56cHbeLv/zli2fXN2cnB+uF0zjlbXl6eLYf9+vDw5vNXVXjwaLdDXN7eFAF30+TxvnR6ni1DFcixWQ/xbYOWdM0wuykO1wcHR6HVc3j3JlV5+t5HAvp+Xpt+SZnHWQisV2/N7E64M02JuQUMxCsg2vZTVOhEAqoiAWkNoRF5YD0OmufkgH539whf70qirlstpNhBbmu8LhzeSi72fBuztE76tp9TAkRVMexXzXtYlmlWK72Y+04LBdXE27fjct1SFJqouNVM/SzsDvxYb/ta4KnZyd88OjF1fjVm8/Pjw6PFm779kLcBG7pvH94cpZneXv1GmbRXGrvDE2dW67bcYxNU/e9rpfN8Xm3dHkjcnq0trt5KPHh4VHt6qkkQ6yc1IGAfFOTiHWdv3wbf/pXf3XS8O/9W3/41fbu9u7OrHLeheBK0TaEm37yOa8pfPiNj0Tg9et3n3/6oqmr4+P2YjMUxsu77cnRcpcm0vzs2YtHD85U4cnZ4xfPN08ef/D6zVtPHZNTlRIVKluvV8uTx+ZrIPCM4HCzv3n17KLfXLf174W6BqhCWFjZiGEIAVAVNUMW1aquEBFVc5qNyFWezaVY0O5nseSYTQCVA3tglFwQnBEksNWyHe/uHh6d/vKrt/WiSwSj5athBs/l5Q2BOTKusPXBATVNmIbxvK7/xh//1n//z38wFTpsFvNkxvNOo1gxxbPjxZCyRswZby6H2vH3f+/R2A/O8cuLGcQwULPoWDBjHARiEawpx7xen1QoQM4Uc8yVkRpNs4n3dzl96/3jMc+pJFN7+KB9+OjB5XZ3e3fl0BQxg0lW0cKeYso1kneoas7AMQvakCIgxVgmmZ13Sa1rK8jq2VdVZZzSXIZpstojkWN/cbunKmSFwK5dVjFZWzeic5pSADC1Tb8PdaVFfzMk3w/oAEh2P6Sb/QbwCEr3j6AGiIaQAVEpCYgh3LNfGAEBgUGADAgInRkqECnafXsLVZHMGzMZeUMmYTEGFVBBU5KiRFAMC5hzVACaml3FalpMCXAWTGoF74Vo5gPUweoajcAAp0xDxHlCLHpPliQEZhTDpLYIBA5zMQA0NQXMQFlFkdgxAG3urvLmjR2eoz9dHj8Ki4fI16RFvo6/UFRzprUnFQDCWUvleVE13jl2WMgNSeZchpIzKymBJu+trVsBP4sKTojWMsi+f/HiH2+3nzEcP00nIZEPq8PjB7F/+fz60029u7q6pRYrLMFRBrGgTsEU0EJSl4WEFZaIAsa5raxraBxk3BAXRCJyVNfWLqytJY/Zpny389OF297eR3RcKQICCIwkQuo8ELMZxtnwazcZlQ1bNAHh8+RWAAXFGR6BE8yiMLEUywZtAA6WMgC6IdvCURt0nCVnLMoE970/VAAjwMqTpzIlQA8EgARAhJASNEnZQ/BITqEAICCRiKmZKqsAMRKaY/SBnGIGU0QzACKgcDf0YwSx3LrcVv60qbBIH8tdweFqPKjSB8edb4ps+izJE2UBA8jlXqcG3jOAAAB7CMyHhwsMod9sh1xSkmFWBtApFSmgomaO2TEnLcFViCoqRGSoJkKKde2PDtarxk9xqjyHmn/+6uqmz2xAYJV3HLgYJIW2rsdpNgUh2g37tq0lSVTIolB5Ch7RFGlIQkzZdAeFK8QIqJjN5hwVQNEIcbqPODF2TTdrEZEpZoeoBAo0icw5htqLADunoOxQDBANmF+8vf57f+/v/+6jp+1q+fDJgw8++RAVq6oNVdVyiGMs+/HFr351ff0mlXS27HJ9cHVxe7fbjVMOwSOgcz5HIe85+IhsmR+cPvn44VNLA0IFwrnoMEY0OGiXB6ujdPeq8mSaX7y83t3ttinfxTipzWaT6pyoQgTiMYpjo3sbw73mXIyRyPmSS4yieaw9YUAtUPlQDJ1DUF9VDggkK6MTBN9WD86PK8d3t9vt7R6RTA0MBKxq+PHp8hirHz3fjqNThwLgKkI2Z5xN1JQUCSzFhOBIBNiIHDkOTRgjCgbT0s8RiR0zOPYuABgoAJiIFARIOkwC1bJpFqtutbm5DgTewWHn0jCszg6HMedsU5I3V3p6fPzyohdiM6u8yxlBpYBKseCcEASHJSYQi0V++NPPv/+Hv98F+JMf/agUY6Q5lf/5X/6rj87+zv/qP/27P33z9iIrB2yWXgr88NcvRoCq5m5du3XYjNkxfPP9o/2UN+/63evN/fd91y0lwTCn3ZyLFUdI/pbIG7uOHYwJPRy3rUkxovZwOeS56dpcivdutYCz867zuE8FCSJBjKXxDg4WyQnVrleRexMiqYbCS+c6nyQ2RGbkEBSNHORk+30CoymDiVTOccUOGSTLPHKoppy1lAY9ppwRQ+X6MVUVusP1sq23+x4gs+YayYdGYnlwcHhYuWkoc8yEBAiIHLgeZBhzMkZFdAjEKGCMwIhmlsUuZt1fbieDbz5Y+7YZ96OU8sHJ6nDpfvn6Ok46mzAxMhojeiQKuc9FzYgQKSE4zZ+sjuhg9fKrd/sxOoerRVd5nHbbm2n06PZTARrLSUuYMMZQU5VxsTz43W/+4e7qOs27Ifm2OXhzs61Xx//r//P/7WD14LO//LN/8A//qwLD3/67//HTj/5IyAQUkdUU0cBAEPOcsyByGIf4+tX1y1dvPvvVFy++/Pzuy5cWb0NAMP38x39x8+7N4298/O5uw+vqWx9/PPTbUkoD/hc/+SEKQfLgUFWMnCoykZqAKpAVs6PjtQBo5Y3wbnvXHqyaERbBZc3E960kTqM1bd3U7FRyKZoSV361bATMG8T9frx1qS8YAq95cdB+cHL0l3/yVw/9Msbi11VdV7t+NDUk4ArJE1gGYI3z0dE6pXm731fkfO26g/r2Zpt6zJEQcNZU1+Yc1QsniikzFJ4TWFYES4CG4OeyN2gXueVmHfxtyle/uUluQhvnVKTfId9c7jWGsGySgqsW1+/GTofKm2/t7OHiyYd/+OZi+9WLzWFXjyWVZctAyxpgTuD8diuWLbCPEiJVxnGSfUe572+XVacSq6rO6W69VGwXcc6W5/Xab9L++ODBeLtNpcfgEFtXLeZ4y8Ez+aiTU0J24NmUDOLx0uqL6CWLFslatQtfV8ShQZyS5CFaVPJoiEBACAvCFkCQ1QqzTYhRcgJqmyYBoaEIVVQLGqASm3q63Q3XYoUIGUWl884csioJ5ZiJcEoCiIigIsN+XPrK8rRYdfOUHPVd1zG7uQho04WDV8++/Msf/Oyv/7sH6xWleb69vLq4eH7z5o1v3DEfyJQdwMHJejv1BhEw6Tjdvnz1ox/84PX17elB/T/9F//P99fHD9979HpzcdO/+1/8/nc03/gKvPHH7//Nl1/89G683cxj0zSLDgQzM8Cd7GeSlBuni8fnzfee3uTlR7//N8bmhFfn8fVVxrib5XIo5l2KpfUQY4qTPT1f/vj6TomffPSt//Gf/RMHDPfxex8oVI7IysxIHXsxYxMDA/ZJxJB8oGBa1IhrtdyXRI5IISAigAAy6LL1U1E0jMQVcomlGNm9k8ORIBBSYS/CyKpZSxYFS5IFCAkFKIkUsIgwGjjRNjAaVnVViwPUMSd/uL6M/viTv/ar5z8d+01beygY2B2uD8o8o4JnN8bUj3lMM6Dm+Wv3U06lrXxTV5CNDBx7VDsJi5qrL+42PepRE1pXjpYOzY6OFs+nWYE0G+7LaXf4brrWgliQgQpIGwIzpJRdcOcPDuZ+isX6fpSszhFmLRF2Oc2DrENz9OTgly8uN33ph6l2dLA6eO/g/Ms3XxKiQomI1/srdmEmd305Tfu+bqp5KrvNOEohBgjd8+uLB2erjz96+tnPPnfUas2Xu71zGJNmxQHkL371i5OjZefp+e386i6h4u31Zjft68ofrCrfND95/prJ7fYixfpUXPB/+fNnf/T9b3764qu7WXxwTe2jQhWqKebWV48Xi4dd6MgzkgNAYk+ECIYojg1rJF+sIiRlKnM16NTHcU6IYb5N2zs3Qc6N+R/86T/9D771DW+CKndTqg+XtAcZ99UKpAoDyH43WRO8g6N2beZeXF/8/Nebx6dH4XA9jfHb77+nOa+7ahV8LFoUXm/fnb7XAc8bidtx6oJ//8mjXz37Zdet3ly8E7V51oCuXYazddXPE/S230wVV/2Ua9eKOcrSAfzxh0/quUiMv3VUtx/4291YkF7e7SjgeHG3DP700cnPPn/V47/ITgABAABJREFUeF8KtF1NQpoRJ3HO/+rN1WzSv9vGOHeVnoTDUiyBXNzeasyV737x8mImd7pePFw3n/66f/r45HY7pGxmpmKv3lz89vc+eBhPri+3U7R6gddTXLaLHHG4msqhi1HeXGzXy/bJo4e32+Grflge1NthDr6+24xI4Bpyjvphzpk2MT5cLg8Pzj598yyJlWx3u+Hx4elumiLxzWaIm+F8sfg3U0UNl9mBhvX6+PXF672MyeM8FeQai/T7WLGlVIyoqusMdjtHnVIwaLv69c3uzS6BY3cXzexoEe7u0mUfgZtHBc4qbYrc3ex0VCP3h3/47evL129fXz46fpiGcje8psDrDl5dXH/24sVENZq1zjchHB4d9jG5hir2Oeiicl4t761YPSRomzqNeH0zXmMET+Dp4cHKAcc57W9HctDWuOycDeOf//m/gvHbv/udb//Zn/1oGmfyvq2DFLsZh6zl+vWz/urNe6cnIXRH3WLI07Af5rlUQGyQo/SbkbKsg4/93B5jSXh5MxytFpeXFyS5JeOcmViRFnV9dnDcLk6vtiMGVZZh3L5+9fkvP/tsvt2dHp5++FvfNQF0Lt8jL8xMldhq560AsF+0DWBGJEQuSXIpnimEAFQKaKgayM5KzpK9r6RYAfVtnZIiwTSmqov34uxkUtfu5KghKLOkpgnBeN3Wp4eHL16+GjWeH62++8GjP/nxz8aMVdei5zzMlMB5TCUbVTIqikqGJGXhg8/5+tcX63bhtnaYoZqSa/PHx6ev3uwYhbSkqZALGHiMsQ5EJDdXV7Wn2rnJ8mYY9maAUqZ5cbIqzt2+2BCky1ev/4O/+e1ffll9+cWbq7sRybVdNWfLKgA2DrOINJ4XXeWMiJkICBwATPOMTKVo2e3JsHJhnEdScWRN5WeJHzx48OtfvumTLavqoFtaKrGUztNut41iDXsV86FilqJi+etU0X2JwxDNjAwMzRGSoYE4IkOl++GHEADsni6sAAqEQIhMgAiM+jXlCIQdEtPX2SBBsFJM2SOAoqohIoPD+7gIqBEoEON9AsJVGBysG0ekhMUVK6KiGIsVNArICJ6hYmWGrEWEc4L9bCUhCPK9+AqNEEzNFFBRCviAXYUAKIo5a0JISgqIiAW5n+P1u5cffPgtJXLdulmdK/1KoTCYQyQDMhCCZCJZRDCbWcHRODsZCYRKyiqijq02ELNkLMUok2cnkokYgY0UIKedXNvLB493r+9+un0laXZz9jHPzsEMkJPVFS+X2i1dfYC4zjOZOWBTvUc+AZBDC4CITNaici8CQBMroT+w+gDQFUlGQ331At5ceMoAilRSqAxNDMw5zlmtmCE5wqLmGEXMrAQCy+pmTBc2R24fUGgBIYJXd6ZSKL4DGtkUU9K60lDRlE0yDUnXAUOwJKroApOIAphzrKasVnmUgFGMgYqYqKoAJpwzhqK+M/KgxUDvZXgmdt8xJANgQjLFezIQA6qZqa9CH+NQioGfYskJhrm4ppBhyxAISpLblA3KuvVN5VIspajcK9YQ1ECKym8OATOtuioQ78Z5t+kHQWaoCCwXInYec0YGdIQEVLvKDKRkdmxoAIhGaUrHVdVIzlPyTSgIP3/x6rIvJuA9MmBwLGpSzBN7h13T3G2maZidDzGLlZLVFLBgGWNsyZUsyOAc9kNEIsdskAFBVWOMcm8nJChZseBJu2QDElUzT6iIqQiTdW3VDz0jEEKcowuEBsFjnhN6a9vg4p6glFE++8Wzz3715bJrp/10fHYUKr+926c8m8GyqbabWcaYSwSJkEaUGZUap8vGa87IfLOfH5yeHi+bbujl+vXi9LQ9efz65m2cJc+Srdg44HQRYNrdbPt+fHs9zIqX++gqZm8uW+MpqThXNb4a02ygRCACROAdmwggpBTBjAm8c8TUNc24G9NUzIHzwUidI0WYx3m16tarrqjqPF/3U5oTiLhAVLkikMwCctt1Rfxtr6M5B4gO2qbZD7FkATO9F8sA1CEwciy5ZAWnN7c90lP2LjQACLWSiVWOc8kggmAIpqpqwkQtY3/9NqtFs/12HxBTkrYj8lW9IKqrxYKv9htmtxnz4fnS1y4XoYrAcUnzXoc1L2ryu6KKoMAixTtcdGGYhjfPXr3/4NH7i/PrvNnOcxa7nKc//eln/9Hf+uN/+xu/8w9+8ldKBMjbaQAP9ar64L3DYdj1047Qnx2s7GK6/uJ27GOevj4Gu2FgJVP1wTMgIrLzANQPg5p5H6rOy5Qa9aqFprIAr5t53TYQjYmKKCbtgDyReD8pMVJ3VGBpRISeFCCZboY5NKwmHLhIuCcxi5R5iAhMAHVwTLhw7dq3pwfLh+89XLQnOhcivbq8vr272ebpq9fvrJg4Y4DWV0Q4jgLJutDGnBkhTXNOsuT6pF4Hlpv+UorEIqMBs5cyIUAIAdAUzRP280SACgCIlu67ODRN6fO3NyLlg8PDzldzipx1TfWTg8OXab83Dcx15UgKq6GKJ9J7tBqCxFSMvnh+UdTiVNC7bG5IsLZq2O9383y4XidNUNLLFxtM6dHJ8tD77338B7/+/KuXr971u5u6wu9+60PX8Ouff/WHH3+jXncJyjf+4Hfftw8//fE/B/BFxIzYeRW9Zz/eCyOTmBoTGjlcnx3/8suXadg/bOhy86aqSQRdcJuL3d1NHGM8e3JWZlkEXp6eHR6fvLv4yk/GSRrCeTcAgaG0VSAzUmmaSkpmF+55KsFziknRuuP6UVfZGPfbHRaSgtubGdBP/cCMvsJl5bnyUaxt63GYpGDZJD2MJJrUVsvu6qK/ebXNvRpx3VWO7/mIMM3Jal9XXp2lMaFSaMPu7c4ByWDQ6Hrt18tFUy2ffXEdBStXlB15N47zMEwZoGsawZLKVAcnSimbxNyKtgbkfC7iyTmmqv0a6HtxcVs1NfswFpnMhCxLiXe3H5123/7WmaEdro8v3lypbn7n+x9Hpd/5nbO4maSjcCWceeGwaX3HpGUSycfHR9KdzJx3764br1Hvjg6X4xiPj1sRZEfTPC5dZ1Jk1w/oqvZwL8AUJBfvG19DslS5SlXQSlGMU8HQKTE4n9AO2nCOUGKymg/OHzTdAXMoqXh2EqPLBmaSS1V5IqAiAR0L5wLIzWCJQBF5Rud9haJFDA1FimdUsJLVMGUDAWRGZzBn4SKZ7P5DXgdOCpnRkMQkxmmzu+6CLZvOrMrikkAWCMjkGJGePH3/xZef/bN/+k9gsfj+737rxQ9//PMf/2CS4ej49KNHH+8nYVfnficlp93l1Wc/ePDdb13+8qv3Ojlp+R9/+rx+/+HhYlGBYpoY4WwdnhwuqlRUp6Zqb+++6Grdb0vwjkjF0jDsG+rOliEXWdRWuylQ5k5d9SCVKprL6nY3t3e3kbqFjxRAmICKdNUCvWsb/3gZrgr8y5/+dHM3giKpASiKWkFftS7UU57NzKMxAQKVnLzz94dFVR3zbhqMqEGMWUHNcaUmJsUxgaqQni6Pp3kYhomIQQEA1ZEAQC7Z1KtgIVJEuXfrOUdmaoBQrGSRZJKZiEmspEKtD4zYdS1AmdvmNuXHDz5Q0j/7i3+yahxE9FU4O+3u3z4lHOY4DnGzGzICkjj82obZ1jUZxpwYkQwqhGCwMhpUbhCEXWMKWVqz42U9jhMHP8wzMxXMb8erHPSgXnBI8zBLBhNdLttkZZiy0VSyMjuQUkSrUItpBOGKM8Lzq5uLabjZT7X3CyYDG6fhLbyqAgV2Hvmq337y4WmRUSM++3JbVdiuHGWJsXiik3U97IaurhrjX/3sRSONCm6HzdnBwjxf7icoNo15KHB6EN69Gd6k3By0B121ubism/DgeBV30/XN7WFTMfJmd3u0XlUjkXfNarXbDOuqi2lQxIAUx/mors/a5hD5tKrWHIhBBbUogBkREmYrxbQJlRSwktSEA1Utvbu9HTUerQ7Npl0cRlYEmO/2ssXdbf/J+weaxikpkS9lx7kcBHdYt++GPQCUOW3NmKfdPCDI+VGr81wHev+DD9fUbi+urKCAu9lcEhvm8t7R2c275yerdr1w68VSQK/329Lx0M9dqT589Mnnz77oWia2cU53d9cPFmtXOSwWA0rWNfMZ1yexBCSoW0rgUU9CDQSHpweXm/HlXe+0vHt1uXIOC22G/cffOMl7NcXjxytT3L2L2AYupXJc+8pCeH53o1DIhc7B8dGyqHqPq453d3ubik/U+Xocd45QUbPa5qY/WXWYRYYZc7nZ73a3+4dtfXbYXuuuXdSUcZZS5dj6ADru+iiiQHp6vLS5z6bzXHyCDBkZ+jFt0usp3fcCOIrsprgfxx1Q5TyzuMr/G6uig3W3u53iri9ZXW19Lw9O1+GoyfNUL1yac9dQyXC522vwMQowUlUxOY0ZUSCIb1iioEOzlMG60+p2n/usv/voKKBKMUFemovXQ+ptXTWz0pz6uUyzwbOLi1nNN21S8khTkmksKSsye6JBZmA9XJ1Y4WdfvetjQosPjo8cuJjLmOJclGt/GOpVHRYLqmvnXGVp5Fqryu37+JPPvvit3/rmN77x8Y9/9evluqmaJl71atK21TTJPMebzW654LpmcvXJYf38i7dt5RQRiUqGJ48fjLc9L5pdn4n8mFMjBSxWnud5RmQt6F3HlU8gNlxV5FV52o9vPnvx7NkvyNRT1V/vcp9yzLkUJEKgUpCIgYiRYkkOhFCziIGFylsRU/w6sCKQwWrvYylAxsoOPXg0sda5PA2YPKJnlpPD5kLLsg6FKCwWTcNvrwZvphmSwt2861b8yfmDg8P1P/vhL/cTd3UnCbbT5B2aAoK2yA0wGwyDTOpI48PFoqrc66txEwRTfP+gPV7XfezHITaNf9o+uJySZ0yExK5qOyqJONze7U4ctk2Yx5RynsUI7MjxUeteX26XJ3XelmEv8WBaV83vfefjy9vtl1+9c3XV73eeAFCoqYdZLWNFylrqxu9TqUI4WK0MUQlQS5QiyRBcW7tpPwLzNBbXVP2YfR2WgTTlbew7x2mcQqC2Zk+8qhb77W4/xa5x45Tyb6ZkBaOvidYACIhoAIrGRORQUBjJkFXvgyKgRcmADRwBe3SEBvKvsTd4rzkHIYOcBIGVjBm8KDIioQGYCQOwAyBA0owsxgWsruFkjce1cclFaZgkm8uK6b4RgsqEAdXu5WUZLWEsmGcwZVRlUEBQUCYwAEYUQ1Eaky6YAhmTVY7RU1KYoMyKUS0TI8DrF68/fP/1gyfvsbludVLXjeYxxmLs2CGBFrScEioTMaqUJHdzykoA1NakrI0zcB7AkDArWgkFiGFPdUYxhNqyARp6SLfwut8uBdzCZ4A566wtDiqjenGInK4QG0cnEE7G+iRTXZQLYEG4rwSqBc7OARX10BwYHWvZZEZnS/XHUojHbT29wPkNuSiEBqaMSGqEYIZeWZVI0AjHaT5o2plSNhQ0teIDEFGaSARnIVinsCJlVFC3UDiClNEKKqFjoKBmZZ/dNmGVoGtpmwVIFNR5QjBkZCDJ4gszqRES8H24iwxNNWcKRs4sMCQFIwJTBSQ0REUiM0paPKGpIYL3LiVlhKZpZ0kGyGRgiICxyLSPlQdHUAGhQ1Qcso6bsXZIHgmwAsxF0aCoEUCBr7N1J4cr7/x2s9/2KUXIxTAQGxS5ZzyAZyaklAtYQWJTcC4UzYiAzMbcVc2yqVGKZ7+sql9dvLvazWDek4CAAZsagRFaUdnc7dwimGiKOXoKtddSDKwAjCU7oq6tIZeq9t4RmYKqipIpM3vvriUpGCiokfOkAnOJFXJKuYApWO3Zk8tFcixg5MGUsDBU3hOXrGZgTDhst5/+5Bf/p//8Pzs4+nZ3dDzMwzjurt5e311eGGXf1ODAk437mKMSkJZp39+lPITWMZft3TAMM3HdNKGul9/78LFud6YG3q9OTr56dX1wtHKBOLiAMKfp+ZfPaRpjKe8uN7mptpl2YjSLlVJV3hkCIKHNko1AxZzzQEIODo/aeYy7vrjK53lmxCo4RRpTqitadnXoVu89fRhz+urlyzTHVdeS0X4zzDHHKSE7LWDmJKMhiFrdhuBcQ+HHP/9KCzXBzSWh4RxjQA9UFA0QnXMpZiJCQlY088h0eny83WyGXZ+sQkdkyIwekB2NOaVUCIEcARg5Pj9ZXd/u0BGI5ji54JkIHX31bntyUN++2TxYLdcNxqQvLm8er5uDg8V4PYFRLuYpmORF223SOEgkclrgsF3upqGPfdUun20vCsrJqulvb3cltwSB+J/84CcfPH78H/3Hf+fT1y+2kKD1F/u7hPJgfbCbJinxaIlPzg7ffb7/yV+8NutcCE1Hd/sJAJ5dXtdUEdrRYsUkhMigRTEVZcdGyBy8wjj0U8q1c4BIhGVOZiCIoXEC4gg1ixGxkGRhZe/QogZkyWXp8ahpkGzKJgnMBQVMqRhAMPSeUAs3TSBcLMP7h6cStdmncvM2JUXQJqdvrY+jyFNXFy2X281mmGYTUQ7s5jHJnMjRFAVMocDx4UkVOtGbwGSIUykR2UPOqg0Hz1yHajNtRMwhmqEjLEWZqBQFEGAekn7xbtdv0yfnB6tlzR7V8OGqGnuJY09aUoKFd1Is50SMApLUmLmuKlPZ7pIYgaEHGIbs2G14p5mC1Z4dU3bAm30eBqxr/PjswYtfv4jb8dXtuFo2T7//O3/5y5//sz//s//r//H/Xi3PsCZAIV9V+fj7f/SfhLoWRCQS0Xtf+X3ROhc0Ir0vhzd+2mN2cXf3Yvf6hWsspTmNBngvkkvDcD2OAbtuv5tWq8VumjLVdX0I8TkUJUBRQAYjJKKmqgJ7dC4aFC1pjh4CM98vul0T+lm4NRlsmpQ55IyaVJwJWls5JlexSZbD5Wof46wEjhaH7fa61yFzopLBL2qqfExJwCxOqoUCUeNDxZ5dilyhR+M4TK5dOGegWoq7udhjU/uWFWWex3qxDL6LajGmTFZxYdCTowYYh1jiJAKs3onnFxc33/nmUwXZXY/zby7Phu02x/zovdMy3ORS1EIpYGjLx8tvfHi0udt8+7sf/Mk/+kV90K4f8M12+Ph7ZxbL4/PmJz94fvf5/kHXDtfztJ/ckrirUkVDGUzhuG5i7Mc8dcuOmIFizV6RK98548Z5VCcCcRasEiFPqV/UDYIrWg7CwU1/y4276/uT5ijN4oEKulRK8O5wsb7LbnF2cvDgSTh6lAysqCNN/SXbFLDJIMwOtLTelSJZcjGt2AXJbe27EDYqDrIjbSunBUsmY56BphJ3Qy5ZnNrhslohFGIA2udSeUKiwKCp0D0Vy4icY+emcfQI+XZeHJ2zhGEcfVVX6H0Tjh6dfvDJez/885/8D//f//dPf3B80oRC9OTDb627g0dPPw7t8tOffRbfvl54Xkj88l/+4+HHP+EJ6Aw3V8//L//53/kv/v6f/8HHD9988RrG8XpM/4e/+7er+eVwe+mDF3MaN/vbt5X5eR+LzKF1Tx6ff/n5O9C6Yg2NjSCffflusYPv/jt/fUqjp266fffmi58cvHfufA2Q6Otgng5j8kfLsi1H5ndQWVnGWdtARIBEwRGoZshoyMwqZqRI5NgBaOPIA2DOiSmjy0WYzKkRkIrlUhBsnqKvggPQbvFH/97/8mc//PPrL34SjNSACKOoJwKFNGd1wQE2RIpIDF4EEe+r14xkaIyIgFktSinOR5E5lqPFAldH1sZXFxeffPt3tpv91dU1GNTsPnx61la8H6eqDuNY4n4UUfgaHGrymyeiyoc0yzRnx+Q9zfP0dHlSOfrZzZUR12BjlpKEEStwF1d3E2BXBxWRSaKW4ANjGdMAzOwCkEtGk8rh+rikOc65aisDIeY5RSslGxUxUJ0A93E4PGwOan+w6F68207F5phRZLLEaKcPjrZzvL25CbJ66B0gpU3aT1MdQhzLJvffff9UBMf9XEaApnl98zb50nG38lWQSQyOT5ZeNbT+4er42fNLMvjyZl8Z2QQY4Wo/7DR/8vjDtLvmUh6va1J6cbMxotdvN3UdLGuo6pa8h/ww+JXDB6tlGUvKCQW9C23TJFWm4EgRLKDV4gzv9c7MXf3F62cTc71wWee313d9Vb71/pMXv3jjXSU4D9fv9CHmVMhqLLSuFpjmnObGuTPQfZ4CtCXq9XDXy5xZPJLMxollOyROLtDj9z94+/Yq+IqiLprl2Oenjz4ZttRyeHKyCjYZCGA46ham6c32K6n3rvOvb65E3UG3iKJxNx0ulvspxn0+b/13P3xUAXgkFypmJyVZsTglT3Bc13BM57X71dRbMYjypFkdY3g1zyaZK9mP8/c/PruwfHnZ18m+8/S93TDd9pKLoliRsl6tf/3Fy/fWq/fW3Ze3vUMAwMqFIUZwzeHxYjP3F1f7w5W1wZ8pPjzunj2/GhEGrGrWP3r4YJzly69uMtPtPnvvj0M1zKkQxbFow4SuH/pFaHlFdeVFQ1tX/fbyaFnfTZGYzMxXgMF7hSyFEd9d3f0bq6JfffaaDep1dbRsmN3Dk0eU55zKuvKImF1dSi6SFrXvp3zU1M65eehjms+OFn0ZfTIs6Sg4VPlwfVBUPt/0K18vHD55ePzpz3/ZdgdHR4evLq73VymjzvN4MWUgAFLX+HLLDjmZICEhkKesOhVzAEWldniwbMs8vLvYGhlYXi0W3vm7YW8IB4fNMGdFuNruhn569OjA+3pRLaadDf2+CtS11W6af/HrL773jW+eLU6vLy/szMADMPZjVmNyfDfmQXZLccpQmyMmQrvZ9Ot1u1i2J4fd4tFpu+hevbiclCLizd3+0dHROjTzFEVNoy669uBw2c9TilfeN27ZMQmwmdOuDZPAXGZ1ietgBqDKDMF7VVFTMUkyI6FYBgYkc47vNfMGFrzpfQhJEhGaomMvqojkEAxcDTTebkNRQjhY1e/u7mpe3KW59hDMLXzYXI7rrvUoZPjo8UOH7k9/8NKsOlxX6zZcXvZZYUqGTh+eNmkGyXmKVkmYJ0HPTq3CHFSvNsPByk1qoqWtmtVycf3Vi/Pj810uE2Mqgp41TlkitnTZj4frBWYxKchEAIua1l0z7qbb601ZLt693YXCLy5/Olv6nW+9//T8wapatuvu7rZ/++b6+Zu3YoU4CKCAEEFKyZCz6ZBSmnPKc7OoFl17V2Kfi3fOh9A2zTTnZd1dX+z7MRFDYGYP3bIxQsCMBFpSH3sBIaRJZSxaNeE3oSK4zwt97Z9CAFVAJCRGuFe8mAEZmqgaEAATMoIjREIkQGMxAwJUwHtyUTZUJGA1UEBkBAUjQ0YiQlADVDQjBQeAxlaCp3UlB4FrF6ZoKYEZ56KlqIoxAOM97A+ykiqkQmSmhmjGdk9XAjMFAEAUu5e2ASiAkWYDhhAcsmPECrDz1Jcyg9vOQgbzOF6+fX14Pob2sFQ+dNWw16pyYIgeRUSBna9KFv816duSyKwISLGYkpXa1QDoqWRjdJAtZTOyakGJxJJWBGgkCXIBGWlwGhqzmoebNO+tEvAiHkvtyRGZ8bjnVKgtVThS6BQZwCArsAITFqAZCSqtanOrwgcAVow1K4hBmXm6Q43CZiBCDIwoxYjvUVEFlTxiTuJ9lZKKoKoZwKwaiAKDU8MC5Vb6gcJEfknsY/Dq12aJyhaLUE7SeBCCEUWM7iY9cbgIOEYVomXlc4qqgIAEiIjtkmMRTYhwr5lFR6xFtYBldA4jGQEVRTBgJmaSbKpiYOwIAUy1iKgZkXYeXm76Isj35HVAZMpm2TBmITAkYyTJxdiiEKIBA5ixAwIUsa/Xo18fA0ypTFNKSarARNB4x6JA1HXNdT9GAzVgZiLMuTjvCEiBDSEVQ4XgkcSqqnN1/aeffnEXowIHh5DNAQFAKUJIqah3POcSsyulNLWLBOQ9lMIAhKQAk1lfUuccKYxzSlmNIWepFACxn6IU8ETehSQli6KhFJ000/1dvIEYShFmVgMgOFp2u34OFd9nudQ0zcWAqa6vTf/ef//f/c0/pu/Wv4fsQrM8eeBWy+b68u04ThQ8Esc8C7qpwG4uUvkpO3aYk3C7WK9QlSXro/MjtPTm8tXxwQnVzZTK0mOfypwmMTGRGPHFze6QtD2sQl4MUaeYPSOB1m0ggr6P99vBqLnuqpP1ch/nw+MVOdrc7ZquWi/bYTs5cQ4hTZE5xJgODtfvnT5ql93NzeXkSiopz5ZyASxFVVQZ2bNPJRMiEavKunZHbRj2/c27q5glFSxSmBAcxphASghe1UxNiwBYzlELkGNgKtlSVBSSiM6hZD1crDfDdpaUsyQ19h4RKh/mKI71ansjas450lIztBXFUeYZxkmKxs00nS6OWdlDEcKLd7eLrjbZqNaTlEXlnfNZizGwd3NKxriPkkxKEdGsMK4X9p329PnNy5QzGu5kqrruH//iF3/4/d/57fff/x9+9OdcLQ6edoujdj/EhvlocVbR4k//u0/7rRAsQl3Z/df6fapIyt1sYPBy967x3HrXNPWyaxjRe65D1feTmnqHpowIBJZSYu8JSbJlKaYqjOw5STIwzx4VAmHwDQIYey2aYyLHq9AAURGJKVfgRIvPyMKatV2GGj0kvLqZoQBrbLqqiJgIGtzexSLFsdVVXR+fHbVRWa/3w4QSAVIuFXgGW6/bZX2yDFW/36rMUWQ/jCF4Va0dm7CaRSkpCSKllLx3UlRU72WfamoAJRdAnsQuZEKPp7k9XXfskAG++8HD7vLq7e1+M85c+XrZcsXsXZ8iF/QEoIoK41QMidTyXE5PGyxJMhshCuy2+9ODxikMU9zNerePn718dxDCMJeuOxpj9ad/9bO//OVPvvPRX3tw8q00DjtUYG8H3gEvF52CiqmVDEC+qowUxGCOb64uF0ePyTlLtp/zXsCfH11sd/NuLEVISUFikcoTBBji9OnnLx5++Lj1FYhoifXBglfYHqzH201dOy3Kge7dEIqmWjBAHUKCzB7IceW9pDTGJGrkcbHudn0cNjkbEIALbAAyl8mSVVi1wfm6H/ZJJXTBh2YUwEAlZyAgpkXXbWOcRNfrLhYBsbbhZUNpisrSVnVMBaQEV0kRKNoummmSbtl23J5+dPLZs2cfv/94c72NeSxTZEWu3aLuxnkvYItF0x5gPeR+EpttUlt1laBAmcisqfj+FMQxtqEerzahKebJTOsFnn94mou8vdynMX3+yy8KZ+9tmkfX6Ha8ffTBh+1RffDgTt/uFiSFbV+0H2Poqs57GOP64CAO4+52Vy8XJYNDQsd3m83D42PfuKy2y4WCX1TV3O+brIGaVVgxupyToE5lLJJA1ISzQ2KQsa+batJSB9d60NCcnJ6SD2TZgMxsut5un7/y0YIHMMxFiHE0IVIBZWbPFKhLVkSkMm0Jm8BdU01zLA63c95GSWhbQwf82NkR6Enj50r6bEhYDKMqOWwDm7lYVBTEB0ee1OI8uqy5aaqmmlKuUyFiixlRj548LX/6Fz4OMjWH7z/86Lt/7eE3v90Cs3PbKZn7yrE7WHjazPuXl68vX6wOz3/6LF72sTndfnK6zNPmydlaqtPNzZeV7mXeBE8InJV8Wy8eHuNk1Z5Kwl2fX3++3Y10O+RYyCc4Ojnb3Fjw8uLTny3PP1p19Pazz/7yX/zP/8F/+rcv+1nQpSLgIItuh8kTHiy7RU9pD8/v3lXBG+Qs6oHJqKjsp9R6D4QJkMAhO0MshKF2B1VoUb94+w6r2jyB43mKCiBmORuDgrETKIzUuB+9/PHLt595rnNWQDSgUrRAzgzeOzVVkwKmJoSMoB6xQlQwVe28i0WyGBIbKqERGAPUwaEAqRwfnqzq8A//5L9OaufL1YNFs2ocoZnJbkq7zZxKUcCiqmhoRr9JFXW1k2LMbT/Ma2+PVgdPTo7/h1/+bKxDXbEDAwBJ0rYVM3VVqB3HnJGDIdf1KmUZkxK4bDDPmZxlid/+6Pzd67uSrXaUcqw9e8BhHLzzi8b3c0KHxJyjUNKIuVphZaC5OGdzzF3tUCEOw+GyWS4OpUfHMJTsyJlgEgjeo6aOw8u7u8WiUXbX2023WrQsJnZze0tRa7SDQJjx5dVmfbhYBixDrkBW6/qb5+dfvbnsC4sPVTja621EfH03MvGwH7tFlzw9WFTHNTriFfrVyYMKEMjG2RwyIAOiDz6bIpMhZhPHLnBtyHMaPVdm1E9zL+oaH0JIUa84JuYffvqF3Obz89OP/9aHzZyvpu35wRGqL2IRCxEq4T5mEQ5h1dWH+/nOBcDsXO1PTk93smm86zybYgbZxz6liFaWTTg/P3398suTow/HaQSS1cHii+fPmeuuXjK43fYOXb1cPGDXGCV1eDFvF0issL3rg69CRY+7uiqqSJNq6wFUELDyxK5JMYGWgC4gfOP46NnrSyY+aurtRXx7Nf7R9x4xyPOXN6cuDLt9RfD73/vkbrN/c3mlYurYkY372FJ3uj5YrOjZi6t+SOb5xe3dxw/Onp6f3ex6p9Y2HgCb1msxcLTvS9cGBHy2HTXq27v9uuKHD9ZSMGpW5mXAuYS3N7uCydfh/PjE34XSF8m67lb7nPc29SIlFnPUS6lEF+2iq3MjerMdV1UIhP/GqihH4bppPZ+u1rfb7Zu7dxW7/WZybFzBbhoQnQLup4SAdUpQpCrikSYH292IknOSUtUV0DBIgXnhsQpsc/rpL149Ovlg7Mvl1c1ujtQ1dePTPNeHK7dYn52dQSkHq6t+ezm7u22ahuvesgA4o5CzJKfvPT4yy9VBdfn59mC9dsoZy6vb6ymLb3jBYKiEOPSxOQi7fVzWq2xFFY4OlmC5a/z41Xx7efuT+dff/+43v3hun93ckBkbMJsPvq7YKZjm7e0Qcz48XYVV45SSIQdQgBdv9nUjVT3EcRbzaZpyild3V3sHgpgjoenSeDcMSVm8m9NAk0Lqm7PK3bbbu00bFocPV9upj+NcsjKgKZIhIMc4V66q/MJA1YiYDGAcJnbOGENwBmIAjrzE0jRdQpWM8yzBOTCRIq521+/GU19fXm2P1t3RMLdtuBjHMZU4K7N/8Hi5rh3YfHq8vno3fPbFjYJfHi62Y8/OlMFXfhSpa7dcLm9yIqNgoXbBWLLJdnv3/sdHJyeLv/jsdg+2z7gZ7KBqA64fP3qPkeLtTnIhRYnqVGsmIdeFalGq02VzvR1LxdOcv/nk0WK9lGmfc7q8vuHG/f63v/8Xb16/u7z45dvN0WQfPX3v9uq67dwf//Enj18e/eCnzwpKEbvdluOuWR6vppyylSlNPmBbNao2DXMXfNuEfj+3RCYoSfJUspIQ+S4MU6pVBQUDJeDgmxrzbhhWy85UC2AhavzXpwAJHRLd819M7ynHZggKUAwZkAytsCEiMAEiOm/EDhUUMd/XolRQzROCQVEGNRUUNUIgx2iggMAAosQkJkpgDhnFyBgNERHFOZ4S5QhmPonkoikLGJoKGSKCgBaDZCRRy9fqL71PQX1dh7svtX3NklYzICAzmrN6hw6IEAGAEarggiMAWjlwQqWUt69eL55cHdXtgGbe+cqhQjGMdd2EA8o+D72UXrXUlWPnA9I0T6UIGCHwWEBSDjUjQe2qouo9iLWWp6rzkTjPkxi5wlioFHJhcl4hCidzgmTFeWAnrpaqgkQs7AW59DVK1RT1bYW0oftWILAaGQv7EKqCrkh9r1N1NhRRYDVvOrHx18hyFAMAFMR710kCK6pEdv/jiFQVAI0RvAFC5UyVcyEzK9cAAmHlgZJjadeUgPodDpnbAG3AcbJZSA3iBKvOzTlPBZfZCFwWEAMBnLN0B5wHpQyAiASIhIgAJuqA0NcMzkxRsyGRgBADOaAiCPcSaMp2v9IW77kiVDFmFjUxUWJUAwBVMMAERmY1m2NUNCQVMwNQM0B0aA7pX0/IAHBztbX75LAaB1+yaCmt5/eOjl/u7taLtoDbzIOaFjFgymYEAmBMBAbOMxE2y9VXm/5d/3qYowIhQlHzyM4xINw7BZlJADZzPmiqqqsUgZOWrFZgjtJ0HhE2YzIhX/lF12z281wUkeeYqyoY0BQjMJkAAZlAUW29C97PcxZRI1x0nRRxHhBd0RJLutgPjfPOhe3QAxoj+copmYLtZv3Fj392iAcHfvHt3/12KljXXeqcmrZdG+fkADQCUHBdePOz6yjFOb6+3lfsaj5w7eLJkyeH7fKrV19dXP46TREOSgS9u9t64anv97tNjMlQJyozhNE0DzIOOWenhRAMQlgsGjJMosjeB4YpQdZpGuvGtctmKtIermXOJcZFy4uDg2GbqCEppXJtLPKzX/8aucRYVo9PHj1+7+3z11OKORoyiRoCJiiqVlSqwKGqKoBcpkXn+yEWgOXKR4GplFAHS6LEhphiZHbeo/Oubaqhn13wSaCu6XDdro+6AlCHykpORQ1wmCKjI+fEDIpk0iY0x+vFME9zmuZ5ZsRlVZ2tD25lMys6wJwyAJSSFsv67fWtMV/vd7///vnzi5ucDZRiLMSwk3k7J6CKHbMjBfCu5ooFLQ39u9u3dHfru+CTS5P1uZDjL19f/cM/+Vd/8Md/45/++Ed9tNrqEm3R8FHTPv+Lm5t3bySz49rYMor3X8sMAGA7Ro++GBhANt4MyUdZxhzEFk11fsYukJj027EULck8QtNUjhEJmankXAVvYCjslYnMQQBUyxkdIDlTdUChDWbAyEyohg0Th5BLFMk5ZgGDIqoqwnOMDpwnHLd7NQneeSIBiDE5Aph1nhM77NpqphKHfcvATeuZpliOlidPjt7f9XeQIFuz6zdRQL0DK6oAZqlkBWQjlew4lFIULCuyQS6FHIoaGiICgEWQV33cio2iB13jILdQnh4vDrrmR1+9KyJDKp5QihLc/2MyUxExRygAAtIGhwRd41MqgOg91k01DHnu03Y/dk3dQJa8efDRN5+7zfF7D4vxP/8X//PTx+/91m//0d3tzfrpOgTyPgTncsx9P4rY6rghQnKsJmkSNty8vvlH/+Dv/+2/+789ffIQGNV0uxs+++mX/d02DSOZBUMVI4+LZbM+Wu3ScPXmerVcvvfJIw7VbbaCstvNwdUZnXE2AMfEYIAAohHBCi5aqpgzzgeN2+z6OaboOOPkqYJJzftmXdGUUtImNPM0sXoiSkmKzEQZwbpFlSmlaRYqLNOD985//YvXBO3tfopWtJT+Zj/2UyAXvDfCGAsWZAczltXxkgYcbnuHsNlM3PHibD3EuH2T2NaVW5W8nWcJREnUOxhiH0tkrDbbmESq4E4OumEbc5bo+N1m2m834xwtf30KqlB3i+U0DaF1/Zgqh6fHy+WyevLk7Pkvfv6NT77R1fUHHy8++mS5eftFs6IHZ2epuLf7sDh/j57IYtfvb/pQVYPqtEtEV4ftop3T7eYWmNpmlfPGNM1TrIKra2irdjMbGU8G+yE12Aas5jmSw15nQK08DmNfQMi5FMfEsXEoeapX3V3cVOXg6fn7n932bVuHrhqnGdiNu0ipyCBN20jWJoSK3CSlBHSOY0YDU1OnOhYg1KYCF0y0TPOEZD6gF+zHWVzIqk3gR0tPmmedk1rl3AljUtoWndSI2RGp41RstTxarA54HJC15Hh38cp5CDVKIqnXHrzm3HWr73zrky9++QX14+/87l87+OC7XK+7gGnKXdst68P9zW012mmjv/W9b//p5hfPN3c94Ul1+PkPfjHK9bf/4Dsvvtj91bsvDw78x2fduJtDh5vN5HwwG6fdqKOClJs+Xe7strhhJkGqOjeV8uZWEOjdL94+sNUnq3Pe3w67izzLvE3H60cvp+tshYAL816tiBBA5533aRoKs8eY2DCJ9pA90NqFAmiq7DwhETomVTCs6gnBh+r84dM3Q8wpgYH6qsSiRkUUFapQD0UrQufc25s3gSjlgubQMSBGVQYDQ4+OkAEgipQsIXgFJkOFMoswiSAG5KYKu1QCO/KEhIqWJRfF2832+ORkvnizf3vx6PTwZLlYOmy6ehj2N/vh5m4IrhKAYY6pCHoixH+9MJ3nucxZ2WkyVq6c/9Gr5zmQipERkmYxUQsVjWlGsoCwPlo3TXd5fTcb1G1dcjlomzynq7uJGETEYqwCoae2rV5f31FY5RzRewVyhISaMqhp6PjwpHl7dbtaP3mo7tmXL5zzq0Utsbz/8GyXhn4r+ymzazZxJLKqsCOXjY7q6snqZL8ZW/b9Pt9O06R40NZpmMdxWNbu4Kha17BufY746vMbT/g3vnm2uZmevdutvP/s+TtjHqMFtXdvvzArQ0mgAZN98/33etHbzf706MBNU5ymBnnpCAEQSYlKkTlFCk4K+/vnaBDnyCE6RDFt3CK7Ohu8ub7ux8ohTlkmxYnd9e0QJFrBMvTXv/6irar/8N/6vflqv6pR0pynaRxGClXl+R6VvZ3uXIWVwiLwpLDbbJum+eD8lNCrC5+//Gx7Pe728xRFW7/r7x49PHU+9Jf9R08ft2vfP0vEXueipgy+xq5pw8s375ir2yEum6Pf/fgbP/n0x947X4cmyIdnK5jFdwHBKZiqiZTGMzNW5JGAG5pi9Pv5gfcFSJKsjxa4i2/GfpfnC406zCBcId++229lnks6eXB8V4oM8o2zow65YQrqri/HTDZMqeaw3W2fvH8+5JymuWQUg/1UiKvX+4tV7R8v2g7SNmkG6iNtN9PNLF3lqnXtHC9FZT81lk5PFsXo4vpu2dZS0GMuw3az3ZWqarqWnW1TWlW+FX7+9kbVrVtn3eHwbvftjx68/eHd/39V1LXtmGya7MXbi7d3mz5BFehg0cU0WYoVIxgMAtL4JGWX5kPnT7paUnlzu0liaFh70lLM+8tpEy36uhm322XdbSBgH+OcMlAOddu2jvzHn3x4/tHDi5vNr3/+809//eU87bu2XqzX6zoUJvKUE9QeCLEi9V7TlDeb7eGiQ8l1IC2pDiRIdeXnKZqaiRytmtrjsO+pYOIKLZ0frvdTyak0S1dUpri7vr364NF77273ziPkiR2wo67y2+3eMTrL5nC7G7zjddU9PFiqlknLPAw55ZtboSLLumsQF3UwAw6BybPYnHQzjHSp2lZVB5AxUGUFQ7U+O33SuGbZLMFjkRFyrr0TAVHRHB1zYE9IYiU4dkbTONehds6JWtLMpKXkKjAHViHLCmbsuAqsZjHNvm587atFAwkY+HjZNTe3t/udc8AuHLfd3d3OBbVSukW12e/eXo/mg2+qIU+rpVsswpzzNIzBuwcHi5u7W4d4vO4w0vX1TMgrX6eYP3+7Xy6aB0fN66sJFNVot+8/jy8eH/pFUzGYmjjnMqivfHB+ynBWd+8fn6JMaVIxq7yeH3jE+Pr2dj8XZDpcNr/9weO/fPXMWbm+7a+u+pa6bzw517L3Ds4fd+tnfr+dnCNLOoxad76uq37b9zItFs2iqQ2orut+nCTlQFB5nuLoKx7mPpYsjMMYKyIZ40Z0m+LB8fHdPtaAvuoAMRcRBUaI09dSTCZ0CI5IxKSYqhqA3Qc/VJmQCYgI1Jjw3leORIaAiElAzL4GZBsUUzDKqiaKRAJGX7OyUVQNITjQeyCNsRQFQwO416IzYJxBARzinO5tT5qLClIWIkNDyAKiIGB8v/0wuzdlKQAY3L8QAKAZIIL9JpFvCoo5aUnFAeg9osc5QgCQJTP7EDPvtptXL7+g7txzKwkDc8ozcvCLR8fvfb9sbfPsX4n2appKFkRXudq7gjlnBVUU1BmkIIKBU1DwNTVGBn4/J6wYhVIqvgBkChhKRp1tmjyoa715NUfkkM2EPTgvhYFYkcwEZVS0zK4wgVMQVSwa0GpRL5zABIwQ41ysgEbIF8lSTaCOrZiZ2f3MzEQiggYqBgAEIJY9BAYkBUDgQGIlGrAyoyVNBIgAcaNaLHSeQRyVuqU4cyrcF1uyVQ7mooQ85VwBdQ0MUYoRqhExC6pCitpFaAiiCaC7d0HBvbVaAQp4JqAEQp5JTOy+E2lwHwYCNSRzxAqoYI7dNCQr6BiJ1BAJTAEc4b0XhImIgJgU9OtPCZjdF5zMRMFMCRm/ng7AIQIzmHqkMmdn7MgB0eV+gwiBoGacIg5Fk5hDYMKS1ROhGIosKibSd5ubu3nuYyYz70xEQck5t2y6fuqLSc4Ficm5JCredZVPoqsKTRSCdwCmKgQhkFhOalMCNPEOldB55uBMaBbJamBgUBgUiRQhqznHoNq1dYxJCJ4+PB2SffbynfMOwVBU00gIzhEAFBECjHNad/W//eHTd//yX/xkc9mWv3P48GPs2v0+awSGYFT6cSCmyvnXbzdxnon5oHKro/pwvWx9a7kcnZ/kWb+SolHOzw/qQE/OHpioQxr7rWA2NhCpq/Z//7/5z751ePbf/Df/1avxi2KiqwoczAonZ4+cOnWXl/utqhkagIkoIw7RxNdl4c32ziSmkTKwC6Su39wgoQ91d9idPz5eNSHN8ObdVZ5mBGNn5JCVtahpMTAOTJ6a1j8+Xr66eP7k6SfO/C+/eOUYY8qtc2lOagZogly1Tcnl9Gh9dnqSS3r14mIck4genS7OV13KiZ0bxjkE3vV7JG0rh0DkKKbCnkCzmN3cZkZsnfdV5RiT6NUwiJpIEZXguXX+4vb2eNUyM9bVnEzMDrrmzUVfVYFLAUDPvvYoBgGoJspJlACj1oEwJb3dymoJzim5YtlX3nns2P/0q8/++G/8/n/yH/7xP3v2i9XBwmp68+WbT2+v+1uz7JDRBc1FxNAHbpr6/hTUjUNgyUmzqQE6jKrjkILKDNUytjKVKZVNP6akMRqjNtXsHYfKO3LrZTtmyal4z4iQx4SQHBNqyVjIOVAgB5WvEEBKYWVANjbUEoJ3WGOld7t9nzKCsUfX1Ew+piSKxN4I56wxZjZLpo6AHIxjTEnMNCDlkhFzXfkQqsO2G+bbcR6D5+2wR8I6hMmUybRkRHJo9wW6rFw5pwpjymqGAARoREWF7tVOBkxccrnNJaX0IJWnJ8t5nKnxR233u0/ev+v3l/1+NDDv0XnUPPXJBZ+mhMim6gAZcBpj7Xzl6WDdqZU6+E3f5wwrc48r/9sfnE1Df/n2NnAYr99c3O0Pjlbf/v53p2n49VdffbT6OFNaNDXOWYGxcVRXJc8hBMmiCFKw38d/8c9/+NXnNz87+cmH+/3Jew+GlC+eP3/zkx9Bv2/rKk2JGZe+9hXXTPMw1Z4WGb/84S9Wxk8/+sDiXFt1+eqSh7iuwjznAqIluaoKoWbvhpzars25TOPAjreWEJnMzPKDo8Pd1W4eiwNcLJtpnNCslFx7AgVVQ4RShNg8kxQhtgoCchOVNv2+Dg6n+7+5lqwScwUODALXUAShcsSaS/CsWSv2TARiUIxmWSn6dnV9NwQH8zyFKmyHsak55igMuAyrcHDxakuKjgkB43awWCw4qfA2zn0/lSwVhX89F/T7YbFcHh/X3QJXbXhw3iL1b171zXrhu7o7WqMOrh7Qp5v+7um3zn/6oy+Xyw+OlquNYTQ9OF293WwSYkxi1/vqKAy723GatcbrYbOqMarbzuUw+KauqiosGEtBUphSKuAM65xFSyJz2bQjJ4Dc1OrJeAJAIzJiA2+KMcaDxTpcD3l/C2bQLRDBlVKJzf3YOq+a2HPJBTQW8czBTJOZIyhF5wJdcJXHli3llLIAsRlI0aO6jgKMCIB9Ki1DTT4g3MckQYlzRMCURUXhPmqdUxEN7Eoc0xRD7e7evRIZKzcb9NSsuq6uPf87//4fokvj9S7F5E2G7V1Y1qi4qMM6lCVilWYi9FKagilOblmtGf/6B0//+c9v+uvoPTbLRsrYv3rZ3+6MII5yd7WZ4yZ0h8vVwX7sn9/mL+8UA3qDwKZgY4btbnJViHMZ390eTWMTp5//+Q9OmuWLn30GNiOVymsqWgX2gaacp3GErE+OT69TjNNNE8zKpCDBeQR2oUbJgFARmShoNtY6uOluj03FanW3wCEi2TBOdV05TwqQ1ZBoiEYED06bJ99aP3vz8vf+zjf+7O//sp+gRK0qxwCOWMyKGpIhSiCH3iuKolVMhmQF2roZSxmm6BgCoXnOqq3zZUyVQhWqqvXE/NWrN+hc5+uG6PRweTWPP/n81W7MgcARCiIgMBMQgor/zSPRMI2ALsW5ZXfQNNWqffX6Hbf+yBGS5WL9fvKB31zvHq7q/TgXZEoypE2ZMjKy4xTzalkfL6vHJx0zd219u91Mxbpl++b6RhSmcSQtjIQEqeR7yh8QapohNodVd3d1M42CuShxMn386PDtzW47RsdYVTyPo/d4cnS4udoDAYMi2u0w6Jzqugpt5bOYg3GcZJwXNd8b3w5Wh69f34i6Rw/PhjK9fLc9rJpvvX/y+naXTItpqM1TOThcvn03PDhY1c3i9avbk5NFyXPF+Ozt1RnR2apuOUgp98X0KcZiCmxG5tmTKULx3jE6RFegqOXAjXM4jqmtmiGa95Z53knaz30wq1kPVt1uGHZ73aD/85999btHRyyT5AnIyIVZHZpmKduxZ+drqAJ4MfCe3r27wtWZGT1/+fbZ5cVqyR9+8Ml+O8Yp+4o0zs1iLVHaENrKX15d7qdxcbTcb3Ynh2c7Ukcw9FumXGI+dO5BU3/1/LMKnCPWKB2H/V1c11UpwABacvA+cCBCAysqRFg7FxhsikDkFk0sZWCtOV9fb6zmw9ViWZMlWFfVel2VZJMtg3NNytvd9R//W9+/293mnL96e3k3znXnqaLlqt3u9qf79fl6/fzLfjZSj2VzV3v3Ox8cv3x96V2bpjztN0C1Bz5cLq/2fV+slBzn/M318nzdIJdpltspe0biUtV4tF5u+36xCECsqF3XgQdVnC/3508er1fVT1++rWry6O6udv9Gquj67pZd3ZydbnebbZxn4AxscU5zPGzcRw/Of3VxvQMYTeaUDc1lPW7a6+1NEozK2WFNwMzJjB0E9gerbq87Zikm+/08inv/9/6gOVgvW/qt7300vbr4n/7RP/rpp1/EeTJygVsVmIcYKnd00HDNm9upIja1dt0mwDkVqkPCXHI+XHcr5+v18tPn164hj3LStlxQxZ2uVrebbR8Hnafas+kVMdzezYs2INE0ly+evfzOB0+/9/RMUN97eM5+9fZ6QEg1XuziPopm1SIw345uiefvHRVzB4Fevrkplg8OFpoLmlM1MVq0nSFkFb63cxFRtRCP6LBMcexv0zSG1h0vH+tUjk5O99t5uuoXYTFMY1eTASgZe2bQLCXaROYKOnYEDOgcmRKgI3LsLerryzePzj9ANibMpZh3eZZF1xUwtXIPWk7DpLNydmMeD9sGVHjsP1zRck2PzlY//vXmi9c7KF7J7eKsmo6PF4e124qx91XV6ABdCJlgKhJjEaI8JMk5ZxvQX03j+w+PT44p3+2a/x9V/9Vj3ZalZ2JjjOmW2SZ22M8en3kqTdkslmF1N9lUi60mGxJbAlpoAfoH+gOCfoguBDQg3bQapNhSk03QieWTKlalqbQnj/9sfOG2XW6aMYYu4mQTRFzETQCxF/aee6055vs+jzFpwm7Xv8xkLUylkCEBrIKb0tT6wDnXytawCNWhCcNw1tbfOF58sbv79Hofk1HRCPqLjz++u7oqMauYpg2/eP2sPZk/XTWbm3XKwYKZB+cCTpimia/v9rPKVVWoGjcxlz5aADBk0ZQkqtrFuDxeguS+n9ombNI0dQWCqx26OlABCxXo2MfRexesGbrRV8EZ+p8TFaY2CI4LE2FRvReWKwgTIqh1BAYVwVhSVkAUQEbDRQiARVUBUEGRkaQAKDDgVwMiQiFgAjZsvGEL2ahBREZSBQGjRKAgCohZFQhVNEpmpYKggpGxCOJXSmOUX3aGGFRUEe7F6gBKIOLx3mqFAICACgoIwkKAKhgLYIKZBWesMR7QIIJk4QI+YOu9tQg3z3JtLTaNseusznj27fnDdxa+8QsrYMacWCUzkhQH6EUMIVqJWbMAAiKTsvBUELBkLRNVMwNNaxMiiVMvRUpWK6iZklrOJKrB5iOvwDBGGtiy5NOlmIozcfIqliKUXHBmHDKICBA6S04UOwUhKTSlYoidwZg09SBbxlgsErDgL/lTqpAyo6KAABIRqIBBinpfyyJSra0VoSGzIhbJzqGqKhksWHq1Rik4Sdlxbh2tC+0SzxpczkyfI4tnsWPkpqWEMnGRrLPagCgrxKzTqLW3kdL9J0wVmNmAlQK5SLCIFlSU7uVeACwIrI4sCJJSHJPxFqEY5OBCKqUw1AacsQwCKAUVAJyje2I5kQICAKACw/2nDJDgHuEOcB9p+moVVN6A2vHQt97VMxfmM7W22w91HXxkFjkMvUoJDkFAUVMurbWL4GJia8gilcTdlBMIkTGEzhApWOuFoS8pizCDM6awAqqy7IeptiSZm1DN6ibG0SKxaBeT4QLOdik7g4CkqrmwQVLQolkQkEzjLQKSgUlkKgLKXskZm1Jp2uCsMShvbm5BVURAVEhZWUWNsV8pDRVq50pJ/78f//Up+aubz//7//az3/zOb/3u3/k9zwsQn6zy0FsDF4/P/uTjv/ji088fPjh5+vV3pldXw/X2wempEhytGrWGycqDX/ur/+f3zyZ5q2nW+8NqXrfzKue0O4wioIKtrVanJ+3jd/5P/5f/8z/7R/+P//Gf/atQ+UnBklt3XUqdD7Q09cQ8SvGOTh6cVvNg5tVqtbi7686fvLe5W09jTDFLJl9Vs0enKR12d3sWXe8Pd1dTnMrUR+sDGGQoXAQRBZVVwaJrbD0LmfnTu+uHFw92Yz8dJoKcIKBFXzU+pKGLQjQM2Vuqqmq9HcfhpQKFYKeJ6+CgyLDr44GdcbGAEkmJ5GxmsZZQwToiwJRyLKWqnGgBkhaqlV9+sbmOOWsqsyp4K5nLbDmXLKeLOdqmB7mN5fJ6s3LNnd+BAYdWgWahSalHi8HY2ppd4TEpWVCWo8o5g/1u19T1u6fHfFyGFGdV2N3sv+in/++/+e7f/Rvf/v4nH999dnMjfLjtx4SF0VmylVESQ2SaSr3r+nS/CpgAhBG1aR0CGjJRBCycnJ+C6CFIYD2MU7Zy4DJZ9saMwjRkGrMyz/poCDwRmowGDAKxeudmFlnUqDdWnPNKv/x+tjZrLiISM2SwiBYQrDrjnK1Uy8TJEQ5SCpTK1UQVQBLGoiUxx2HyCgTijS4WbRpUIiYuKU7NfNGl7EiUZZzGbd8lyQCiXFSZFGfBJaaUiwh4MpojAATvMBcVUJCci+p9JRtUFURJVYn6Is+3h5LysgrdyA6npqm+8eT4ZIcHLV/c7qeRQTTUnosaZ4TBGWxmlbdGON+nXqXgNJXDvmMyVvS9k9XFwqVYukk0paaxr29fzBdntLfp5s2Xz350uRse/+ytxWLx4YffPD46rU319L2H+/W6djRvm6ZdmKY6bPtPfvrJ57/4SdptfvTnf/Tvvvtvf+sP/qbM5uvu+nguGx4KaxZofWgqUzXBkNjGU00YYEZHnz+/sdWifTKPee1sziaHOgixU4uWUNEFLwYWswocJSAbjsskQy5g81Qmi5T6iBkMIxhyXv2cJJIkjmMSKXXbtlXYThGAyNqUYi4TtBVZKgkJTG2rCKo5xyl7Cij2fqIfNwlBkAFBS2QzDzLlLsaUkxFy1pQBXn1+d3ImYzeYk6U29vZqyEa6aWSXfVUF76Fq2hPUbkAEY9EZdMaqMadHy826u88jG/vVJvnB4+MvP73lOsybM1+Zb3/7fXApOz599PjBe+9CBnJ888VHFejl5a2a1N2uW4dUhjJC0UJsStY45WRNF3NSmNnBG6uqY5JF64jMOOWsNsaCCCVNPOGyXlgk9NhN6RBHBWwCppIEcdbOuqFrjM+qiOScSyXWvp2S5mKttcd14+Iuv1GziNXZSg25RN1m9MYyYjOrIpP3JsZiyDklA+oJG1+NkZk5ZRnGbES8sVm0qhyINKpNZQ9jPvX+euKxsGawaACkcGG9DyTjlFkNOm9zZrTIeeI0FkbNOByyFDXObjaHpq7SYcrtzjw4b1bnNsx/93f+4Eff/fNPv//zs+OnzckJIKQp3T5//fznP6og8zC9fNMNm2E2b99xdMDy/PUvwmh/59ff63ZTszp6df3q7//2N3fXX6YxHQ5MKnVdOW76joep2x4O6PzxAvdJgGUflSepFNGFqq2HPMBiFn2VyTrOy1ANuwEIT2YtHwaFgqollkAwn9mC0GPBtD31VaVuH5NQDkCV9aglBM+QDamCIVAyhGBmTTMK7A7RT6IKWcg3jSgolnwPQwAQYapl/sCwJmftLz76+Pis2b5MSH5ITIiOQBELZ2ehCY4TV1VIOftZZRVkKo1RIXVkqwoNOQqmz5MpBWKpKgsgBvJq3s5Pjq5erp8+PTmd2ZN5Pe67v/7o5c0+A6IFjDEjESIqIAIYRw8eHn12vQWAIrBczMxm55352rd/5bs/+Um2YYgcUq4b4w01wQ1cVAGtP5Qp1IupFO9DhSaXUoStM+OU1pJm3p6fHj+7Xk+lgPGHfrTWSFYuCkQoYA3N6nrDA2ppavr1D55+8aOrWd1MU0mpnK5mQ+Yywd06Hbp0b5V58uDi1ZvnBimXsWqgpPK1d955+eJqcd6MwW7WB1dKFAUowciHH54c9uM06TDyzz57M2/bOGmyZTPkzDA6nTfu9Gh+u7vNRSbmWeU2Ix+dL2nIHkoRfvnmNkmB4L+86RfHMyo4xbhqGyDTxyHnYpGQDBnyRubUErAzzoFz1ArGjEoI6/7QTwxCFQPk8snlZfLBTjoNA9Xu6fLiSq4Hlu1efvrXH/9H/8XfbgxG6DaHrqCJIJwmI1K5JjNyBtacOXHWWRWWR0cd1rcFyLlNd2j2r8fc195crOZ33e0H7//6T372rAqVq5s3r28E7Ho35CmyCBoWihOkUcq+H44Wyyj5btM1dZMYdYjtsp7VQbKUpMaAqoAWDc6CAbxXJEZhRYXGOrY8JRmjvhz2ZIwziGoqb9MYH8zn33jy4OcvX1YnqwXBdt1BzH/wK9/4+NkzG8R6F0VwbqfI1tKw7aqon3/+5u33Hj54tHqx6TD4/Th1h/huipWBy7vtzDdzu8xxqA3MDMwfHC8W/m4/3OiQtPzo8urdhw+9ivC4T2UahjrYV9vN2eqoxImAmez20KFlsu78/Gz9puPIZKpslTw8fbj8xfNX/35UZLy3lu4OGx7TrCLL+aKu5QCFXRvx6vnVEOPIqAQVg0VqlONuR0OZVV5b9yJ16HzObBw9OJlZmZCnxvDRoj05uXDUfuOdr50/frzf7k9W7ff/9Hv/4l/+IcEBvB7N2nGXHRpWkAIQMIPmVEJd9YdiDFky2/1wOp+p6Ml8ab2tF1V31725HGrfuhLfe/eYI2/XCciNObPTpvXFY5ySAUp937YLg+iD84hVqF7f3lTz6s3dFqvVxdnp8uKpjvuL46/vpsOnX3z/4+ef+dqb2u3iuB2SJdKks6YeY+aUQqjTBIdDj+hERRH2/biqKtTy+MGFN8b5KsfxuLXrXU+Fg7ptNxamOGQrWIoOZYopW5c9orOVAoGqs5aVCSHF7JxTwFIyKFgycRJh2Xeb5+vtvImzljJy1lw1VRyzAhokReyHDkazmtVX6/XRvNnsdZjAsLoaF/W8S/DHP9i+2BRn66N5fXt7CB7B+UL0Yr3PDN6GGHU5p7quXu72xbpsbUJRI8EbIkOuGRJc3vSL1s9r228nNB4d7FOujTXOekNQFFnr4CELJz5/fDYxgWt2ssuiT5pa+u7q+g5MYANHtf7qu08/ffXCWqrJI7hmVvUp/fTFy9P5u6ZYZ+zjh2fx6maYIiIBiSLtuhEsXRwdbbY7dMZX5ma3WVWztqpzKQSYC3NMzJylGNbKWARThTplcUTr9TqxVN4Ii4Il56raA8HmrvuqhgkoBMbbUgrfn6EiFAAyaAyBQSAUBhVUgcxiyKhRp4hfJT5UWeRe1CsI96o0uu+UgYC6iigYJoigiiqAlSWQe9ODKAsDCJLQ/YhHFZRBp3xvyAZCBGGDoAIWUUQZkRDhnjijWlhVgYjusceI9yo2AFXFe0T6fUUNGIgFfbCIaBWUwYJFa1LJAmqNG/fXrz7ZMmsXkxQh57hI9/Ino/+o3x6m/oaVuZAginBRJG8MEaEApwJEqjklVECDLJAyolo8qJkIrUXrrDWOQIAJRMQdGMeE5GVRyYLUFuyyu5swJdqmEkapV0YAwVtBsEjKYBClFLAWgVSNIvJAaU9x6wEsVahmT1ApKiDcR6uUFb5KXgGwAhoVQUIUAAFj7sXzAKTHtY9TZCYUjFwIwXtzHy8jRY2QO9AMlQ/O6KySxDglnCQHg1UwXa9kKGcxE7cBYlLxGEt2YIiosJkSt4GMAWFGItT7eRCgEAKqFCIooIBovdGS78c7ACAsWUDIFkFLEKxRhS5nb4MwF1UBJQRUEBAkCAYV4D54c//2EwIhyH28SL/S7yGA/HJWNHVpXpnV8dIZfPvJ+b4bo0FAPfS9JvSegreMOomSJvLGG2pVKykE7L1lYUIAb4TcrpscoIggg0oxxg1TQjQhOALRlCXnYBFRrcHKhynzkEYRboIjtGPKhMSiBxaB+PTitJumEkvwdhhSAVBERwSZvXV9KaJAlshQiWyJyJkQQhXc5WY7TtEZYwkq74M1+34kbxSBENu6GqesUrLh9dRMlG6+fLVahn/3F/96e/txjvOH7/za6q13xsPu4aOzn/78B3/xF//6t7/9W7/xW9989fz6aFE//va7u/3YdbFezZLoPPjPLt8YV1nhuWnLZifhaJgKGGIBY8hZgGn49Gc/aJqj22v6L//Bf/Pg9Oi//R/+6ZS0qBy6wzB187YGH6QO5289PbtYjC9vJWra77Y/fg4IW3xRjKmOlqGu72K/2+2rtj49Po6MtB90mqZuRCRxWFUh2MW62wxjhiJA6Lw9P18WVCTulFmwm7RxKEoXT06fX3WLo6Nvvf/+X/7lXxs0oOLIAIJBnrd11/WJYbmYVTNTzQPnmEXefefRq9tf+GDJYj2rMguoDX6ZC/fcG4Pk1VUuA7FMHnJ/GK0zrjYxs3q/PDq+ubyabHIWuz66YB77+vnddrWst3H7n33nmy/+5MVhGtp6KVH6rtciBakQpcxsxNfGW8NJCiuPPKsCaA6ii0W96xRxNCd+N+KnVy//j4/+9nfe+/Y/+jd/uGE4pOLrpgo4q3yfSyoFRI1wUE+/bOZnLc45F1yZijMuldK07vjiyNezGOP2bgulFKPVMnR7UZIkYkGDpZRYCbZxrCsrzoiUmMQTGiFNaWfQgmVOzmhVeecDEeTCiIUQ1ZCrPGcJ3hoAVg4usLAadY4ERvRiACctqgVBXesFrKqbbGGAgEhEg6LzNaG1pRjCpp0pIGdwiEOOKEwG28pBxKlgECQFAlClcSrOlNrm49nydZdAKWc2CMYgsJAxyr+8uQCgqhbOIq9jPFThwckskEDiZuKVl6+fH3/rrSefP1t/8ubVduJuUm8oWKgCqhP2lTPVdt+T0cISuGBJj8/PHj5auZytwWGYDPsuS7OkxdHZejeeNG2/3beOHs7r28+eXQE9+/hzhXCyWj5+cnG73o/dsJgdf/v3/tbFWw/++J//089//IPjthLrkhbO+3/3R//kkNW3/jiktg67w/5osZBcqqYmZ2JREBzWebE8f3N1fbjpPtPPf/Xkg88++kUwtmnMbF4VnmJW670qTcwGbeV8z4Odz3f9pKVMh8P5o4WhKqcUtyNmOnS7urYZsSwX3XZnhTAYAUgeVDNZdIqBDEAYVbVAyjwBoYaUp8wSUwZE62zJ2RnMolXjWDkPCQDRojVEZMCrBNAs6H2a0uYwZoQwc7bBhCKsJYr1vvLBV84gxN2hdoGdAy7L5WzmbXfocsbLl29iLKAAlli/Avpu1mtLksv4ox//ohvNjz55/ehJ+8G3nzTz0+Xq4XBzWfrbs1Xoh8NUKoTm6koJ/BC72XElXl7dXL91cuG97/qkBsGY/TS+//jt7eUrWlWzWchd6g+lMIZFAGRvrUhgtVVY+LDPu0NWBuNWizql0VgbExq1GNWUeE/9RKOpZGNEvE/GdHG8mM+33ZDz5mRV5c3Nyersk92NEi5Ojrvp0I25IJraESAoAwABDimJiDMAKqg8pDyb15WnicUYoyBaAAGLQkS8Y6gV4hSXlbM+RFEWKaDe2bEUZ9x9eFtS2t/tqJ1bNL6e9ak/3OyqWVjMgpuVzB0v2l3anp2dv/fBh3G7/+LLq8xAjMRwfb25/Ojju08+mpVDY+XZFF9mnjfuvK4eCCYX2vPTpLgdp852MPVm/WJyMQuCNYW0WjV6kPPV0hrHL7Nm3u4GKxIsIMg+KVo/lcKHYbFojh+fg7evP/18v12/8/jJjz/dv16ndjkzu0FEhQEsbFOu5iYdDu++++5tX999ed3td2fzJivWtmIBQJy4kKFMqorBGDQWkMjVHGPCbMmIUBQWoJOm2e22WdkQeENFzV7GZ1f7Y0yjs7OT6u3ZyZ/95C8fHD++7+UrQmUNitQ2AKgxeBj7WWhn5+fDrnc65VyYEwsyeO9mlnzi2wKlxGjReOcV4OJ4lchdnB0/OV+GMpKk7/7gJ5cDg3UeFVEfrhZvDl1RIESLXJFenM2/2h2D3e57zNNbJ6dv7t68Hnbztll61HE6DUELDJjHVFaLWnM+nc3ZEE3lrPa+9pe3N+zNEJMxJhcI8+rZ5e1BpJ23zDKlaYhFihy1c28glyRSYhwNp4erxSrYactjDjHJ7Mgmjg8fLD+6uj2bzbrNWEREFZBndcuKrLg5TCRwdnzS7ToMtC45JmZj45gm4V9978J7uD1Mr25iMEZFrcVmOVczJuWnTy5W9eyz55evX/WzdjhbNN75V+u7yofdZsSTxild3e5FcSi5qTxYBAPOEaLOZpUoE6gzJNa0rnLWm2Bb5zSDt0FVhTGbjCBkQwTsSxSFZe3i2O2nAxSsFtU49RS0GH61vx3HKSMedt2TdnFi8HZ92/H4s8+v5+0RGohTt2yWAA4RKtuOac95Ojprt9v+5e31t999NHzJ18NgEPpX65l1H7z/aL/fPzy+eP1mv9ls33v0ZPvmZr8bZvP5m30/m5+MUaKUzXhgNsG688V8sQwFlYLDYNKQQTEq3g7TwlrOKXMJ1pKxsWRS8eRJ5T70z6JQxIGNLGMp6yTz1h27mlH33bRoZt66L66uBHC3OexTJm/Pj1Z+dvL6xedfe+fEWsHaE4LEaBRQYHVy/Mmbm3mfH1wcP7u6doglCyM/e7NdeFu7urL+4ZI0UynlphvHMY85r2q7PFpwTotHZ9lI3w2GJHg9OZl3fXa+vtlP+xjnlTOsoNz3sbLVFMvXzx5GyljsKH3rYX0Y/oNUUUo5FR1YGiIhSkmzykVzjFycA++Mc/lhnBZzK5kBRUse9sPbj5Y56zVTP2uyCqRYG39Efj/slrMHH/7q7xu/LJzO3jo1xj97+dlbD87/6E/+8Pvf/z6rLuaQEiuksydHDYWbdaee3n1v9cmXl/s+efLbAieL2jfGaE0M683uZLlMrJLdYTLDoMt5aJsqTppiqds6hNk0dmKm8/NHX362BW+TDWa+fOu9dw5vXqv2Y+o4634YfOOMCX/5o58/eZA+/NXfVASTxfnF06ffKWHx8cuPAybTmg7Sk9OFlpzZt7BgYfIVWHteLQ3SIa6HaUTV/X6sXF0fPZjN/W6fy6CTyeMQM0MowELeHpVSL+fudjpYslU9RzjEPFhEiy7m5Jz3xgeyAuB8VYooFLLWBpdyZmbXzB/Xx8vlonDKJZk63K53FbrbXdcuKlBdLFfjeFiPcthNYSaF8/HSn86qeWMvO/3JyyGOwuhba+KU6rlZzKur3U7UbffFYcgIVUv10r/eXw6GcuH1oTttmuOTJpfA23GIUy5yl6aJ85MH7Wqen12PqChMA0NDWFJWQK/ibZWHsevz8Tw3s0bBC5hZW339ybHq9OJ603Gd1Zbijudn393/nFlr5xlpiDkWHg7rTh+89/7Dq5vy1qy5OuQxbwCK8xwaPwzajYmvtser+dXmblYaBDPm6Ly3PuTEOiSyVqhwzAFdKgyMXU5CYB0RqkVsF01MeXsYtcAYC3P+pfMMyKKCFiAFAyCqep/IIAJrDRIioABORYWBlQjQEVkyoAoioKJ6b5ZWUUBzj30GQrWGMBhbkbNqEGNh/koCBYIgqEIgCkXpnowEKgbVWEpZimJhJAUUIVBLaAzcy7FAgQgNqCVTVCLqoYCAApqvsNb3HnEAQBCEew8XomLJrnANpnbeGqOKaN2kGlNUECTZD8O4x5RxUnUqhRMA76/2kVVFMJecQe8xzaqlcEI2agSQABm14H2YBQVYUdXYiqQWxahUbAHyBp1XtM4ijAJSIItaJhICEAKsRF2mqK7PZkqIxeisAAO2ju5bgtaRc4WQAQCRAKZOIQftkSeThEPljLeSk6KIkIqq3sfBCFStWhVFMqqIoiwgRKJCBslRscDOlqKKBAQZRKVYIBF1ZIg8pwJiI9jCkyNprY1JOgAiqBvqJ0XAWIyM0jgwpGA1JbBkHAIz9ZN6r3Vtdj0DEKoaMgiUUkYhNGAsJtIKCVDJYAYgh4RIqCySGVVJM9cVAuF+ihXVzDoKKECwVkCAVEFBGBEZlJDQoIiwfvX4pYiEKioIBP8+VAS1d4Fw2dYiMIxq0cQpm8RL8myhtiZ7tsotQgJbkFiKQ2HVfkpIOA/OOfvs7kBtc7RsQWHoYlW5WdOkPKQps0BRsUQKZDyQMbtJjoq2RsmqEk1JDLAIJ+EsAkB9Fja66/vaV6WM3vuccj9EJWBUYDlpfdacC99j5sGaSfTRxYM+Trf7w2E3sqIRqWpPSikV72wszAzWUipZWILjD7/2HgEZ0pvt9V2fpzf5T37+sw/OLr52mX6/nfkq/Hf/8B//+ff+5P/wv/uvGz+/26xrD5jcpudiHTjd7yJbN3b9xz/7Ravw6HjlrW0qR8R160xr/MzJlosgAP3sp5+fPvrm6fHFWmbv/8rvXzz8wfXz181sxpRFK1wct8vmaNbs++nw6iqtN8M6G7m/VmFNbKDfH5y3y/P58aNZP0w3L2+Oj89P3nlrd7vZffa8HMbaYjf2CacqBC5CwSIhE4gUgNLlogjOut1+71dzmjdbCqWGhV/9+IeflbEIg5I6R8IgUYY4kKGmtrv9ZC3FbaoCZiyvXt9Y51OGfkiAhsjM2vbJ8fHUxx7qTToME1gVMrBcNDyOs5UHj5yEVZloH4d33z5a56mdza+6hEQ8ZYuwS3kf5c3gTo7Py6Yng2gBWVeLep3TLueiXCG0FgZJ6C25upR05J3qhNbaCk7mR29u+3Fi8uHF5fU/+R//5MP3PljYP5uMltoL0ZRZc6zamU5EImSMTFG1fLUKGp+KqIqrPQuknJwSWD30B19VEQBAjLMFYLaobBYV6Q5xuqd6MJAxTW20dnFIiQWNFWBBTXrPRlFXBIcJRmIFUrKkRksVrNPaOEqxgCpzsil671XluJn1sS+iWZUxjTw654NtCU3lWmqQUCtjOBUkYwiQue8nAvVhLoXHw96UCIq1N1yEDFYWKmuNohQ8xFwUjCFCWoTZ+6fnh/H1xHJ/ZytowFApSgiGQFVFVKOCIRFJCgcsZnNY1A7UXt/mtrLT67uULr9+/uAPvvZ7X1z3/+rHHzcos4Df+faTzy9vP3q2A7RtIHJw2vhH8xOjFgk9qIjEJKvFwhymbtoZI1Oiu92O4LC7iou69cbMEIxDDxmxuP3+5sdfgq2s0MvXry9ffmZRMMcVOlOmcTjsbse2bZbzmSMz83NPdt/OXAhFABwyai6iBaZDeXG7myp68PR4tx9i7r98/ixGIM3N0o9xWCyaIRrnPBjXDV1T195rZeazpuEhZWtPVwuY4vJ09fLyyopIpqN2gV62U0kOlk/mZZvygKiO0GlRJ4JFUsoKgoY4Ms2d9WaYxmmcDFjbmAMP9cL7IN0YxaM7nU1dqlahCdW4nVgxFzWiaFFJpxxFxDsfc2na1sycEXbFT+NoTfBoW6qwsImy3RyqOrTz2TTloT8QiKNgAcK8GmMiYz946/zZ8x8AgMRh6vcPH5588ouXscMtlPmyjENy+fD62S+87B5fjLe3mxdXQ8yeB7nlcXnkJh3qltHkrIfV/OmJrzfbBESHVI7aupt6G+xy1tTGlJziAOJozFwYsSZh6WWaiikag7Oeqz6ySnMfoDh0NtA5KhoZOd35JjBpFBuqedl3HrCM27a1bwZ5czi8/ByX3rQ+7w5rtCIaAbWpXcdSWWuFILM6uxkmkjRvKxmzEam9Q/RjUbiPjhM2jU9CKZVNnCI5RRyFCpKynaPt8ogAaGxOGe9PK4swcyqZAGbzmSGj5Mj4IY6HTZ+5fOsbb6/mDRT1datQrTcoeJTyR1Pc6lRd3faf/+gX06tPzldOb4bK+vNV0zncDHl93T1qiMQezZZl7E8uFj9/vf2DD57+5tP69vX1ch7O3z/Xo+Xm0D342lspTv2hf/j0OL64O279goz3dLebZrM2hCpttpaonVtPZeE5xZ3j9O4H85/cVFfr9KtPHj8iXD97CeRYKI86TRls2MbhdnwVlnUFTVWD9BiC66YIaGLJeH8Aa6xFjDlXvk6CJrgqUF21V8+vR5ZJtbLq63lJOzDYDdGHqpC52aVEUj1e/ezLK/929Z3f+drnH11aV9u68s6kWOZthVW17w/HTTMHqBr76OvvZzCXH39i+12dXOr6kmHKk7OlcnYoCMG6yvXjdHJ+9uRr33xzNzbHec4Ha/G7P/nFth8cBWtVYvHBMZT7IL4AKOFs5tv2qxomJxRO7509+pu/8Rv/3Z//caisAZGiwpJzMcb200DGDIVv0nSxXN3sts6Z59d3TajUWxe8yckH78nGorZtqS9taC9vr2OWksUiBm+Mo2kfjTG7w/hgMa8VG6Gbm2F5tOr6AwgIa7fpGqC3H5//cP8peeMMzYz58vI5q5IBZCMZ9/tClT5975RTeTnuTaB5437jvUfru/6TV93YpdDW/ZQA+XhRf3m4WhgfsLVM11drVD0+qtFAVEtAv/3t958/W7eenAub9f7oaBFhmLpMVt9/9+EnX75oG+IiINjlCESOsHV+6UJTNVET5WzBGCQkYlWGwgwD4PW0M2RmlctpmM8JXR1yvx72FuT0wUVKacp5UkngVkfH/+vf/1uJy/dvN9/77M2TkwuZ4skyzJYn20M2WABg5DSOSQG7KBkxSXn3Yfvl52XWnL14ecsFRi2HfQ+q33j3w1e7g8Hw+PzBy+sXo0AuRdKElU9FE9i7fjibn1oFMrA75Lu+86a5fLM7XjaPHs9L1oNqICiIhjAD5zJZEyiLBhyFldkaa61xKko4TqMxAhZdoM2QmmA9+dV8xsD7IfWltJUlVmuh8vUXr+4Q1AndXO+a2tUN3B76J7PT/e6QkLUJv7i6kXn95L23P//0S+vq42o27svbp4+4xJJZJOdc0BuooSS+6eLrV7v3Luarxj9drb7/8vWmy45QyYxj1lIYsZk5dbBw5J3rux6cbQhnQUxVLl9cLU/av/Nf/YM//LM/l/34H4yK0CJ50gJT5pwV0cUszQKBZWB50/XM+LWzCjmNU6xmfkxlOfeCOaD95uMHpzXsh/Vpc7S564+qoyfn75vmGMiN/QACMpZXr589eNi8efmjH//437kmhICtlVLSctUuj6qx48hFi/3i8m5eN5IUFOeNz7l03XTenqXIKhZBN9vduMUhA1kfvGMdI2QbzBhTHHacIjq4vtwu62Z/2PYRTk9OabIl4tXlenY0O3+8QtZxGpvKepzJ4c1P/upPHj169+jp42mYdq/eGKzeefDetx4f/exnP3BAl1fr2pnTo4uhn7znLHsCVQBrq4UzZFzd1HHUt5+8G2qXBUgNiN2XYRRmoy82rxerJ8cLd/PmTc5IVZi1R90gCGqtJWBniQURwRovIhOLFK68JyVAUFFjTVU5NdaxKRpjjMahojTBBjGmnalHBDh/sPrJyz1lKMF2MTUenpz401X4+Rc3Lzpl1VAjKZSSvTOqEsS2qmVKxthImJ1ULUw6hsqNOTuEk2Ccl6MTd7x8/Oznz7vrbbAYmJxRGbKxhix2I5vKJxaThVCNwcYbVK6Cawoqq1GdiibR5SI45/Zdz+wDmrrGucU//cFfjElEcJqStw4RCMQ7/+bNzalxmqxgWbbtYewMm6lMZcqLymjKFnLJ01FdK6hztrLuZrOfLxY5lfN53U+DtXQyn223KQRbhPvYz5ZNHcxu36tAtz8wKCc24HxVX77eB//LVXBPh+Zyn+ohQlG2hIbgPm+YVXPUzFgKK6ITULofDSEIZGZRAQJEICPi0Bg1KMGQIbUVOW9JRUQsIIgiEiHofXpIFBkNgyiwilrKCEqAIihIYFGFEByqs+AJQe87bwT3ZB9DFoBQonJkLKoe4d7Xdp8tEgFUBEAQ8aROpSSO0RqwrjWVt8Z55awgSTNLKQgZMRMUIUuGOVujXEouaOCrLhcgKCret6cQ7L3RTYBFRaFCDKjWKCgWYK/qQQkQWB2SKBpEFiJAE1mTGDC10SDGEggQEXiTRYgUIOu4za6Qslrj/MIaj+SCVUuQsxZAYCnZCvhCTktPlFFYcxdVFAlYFQDuMYfwy26e3DeyAJDBCimKAbUgM2eBOU2csxhriwqAsKCoqIgBrcgZ1RhZiRQtqUiW+v53RgtSBYixkBKrGSdFZGfRG2AtlhwxIEDMcI8pAxZSQwDMwnp/rqkIqIJMiMJIaFQQ1ToAuR/+ALCACqpJuQhaBBCU+6RQLIqIyoKIhGgNWEOqCqyIYPC+R3L/t2CRAOB/DlMAQExlbnzsp1nVeDahnuW0sZDb1hWnKiWnsiBo29AhXHZTSswGGuNCsJFziTIDOWlDRkwWBNDP6zxlBS65gLAo5KJC9yJBUC6geLPtpLHWgHNOEKeUppgys0UQgioQqOz7/oOLxzm/QQQRtgbUmVzUe+vIemOyCKCWmAza5XzRD9Om2w3jhKx1MARUMoNKTKkKnggzCxNYh6QQAtlSyJjXz+6y1cvNLqA5qf3TU/vOE92tv/xX3/3rj19evv/0a8swW6+vhqE/WSxr35K6OA7CYAwhl4erZoZdmYbbA7Abj7bb5cnD2E0ckcSgFlJ1Hov2P/yzf3Nx+vSw7t46Dv/p7/3Hn97+43V3KMSucrPZzArwzV2+vhumzjtbV5VmqOt6tzvIVEoUIiwmbfqoX17Pl8259f3nz9+8vjVVePvdBynG2MXLy8vaeFfA1t4bQ6DGmjROAoxGj45nBCgWFWTXj+5J+3f/87/NP33xZ599gvelRAVglSxiqIASGwAlUWHBnEuRcDJbLtpXd3u5L+kiPVytKhbf7z54dDo/ecLV4k9++FdF+27YmsLzyi6sHUu2jhyQJ+Ju+PrTB19ex+3d9sPzFQA1x20dqLzaEbiPPvrMSlnWFYHuhunh0Wwbh1wEyWLhypvW2cj5ULCfCoE93AytAd6OcDkpoZKpCeqKFOWHn//iN3/7tz44f+v29SfBVWZRDbshOGudm7pBCxtjfeVK+WpkKqjGmb5LjIKijoyMHHfZVXa92xuPOSlPCUSNhVCYrMXalsxkzbAfCBXBSJRpGFENl0IIhuwkLESKcp9rdM6I4hQTZbZQsgLHg20q9LRoF2gsqCIklHKz6cZyIHIGqQq+oKa8D6YQuDQcGNBYw2yQXGE0ZBTQBgdFd8OBCmoWZ/xiUcW7yQFIKRqT846QvfXBUGJGgOCrWT37+MWbnMrc8NffevzFzf6zm72gIUQRKfc4fAUiULz/TgUE6KcirKhatcGD474E61+8vLq8uT5bHv+9X3vvqKr76bC7WR8n/p2LY1K8eHBig50OY+tcijxNObHJOZFxuy4NY/S+iZM5qttnfGONMehZIcfkEAwwMasIOvDOF05tXZUpIyqC2MaLqK9o5UMJJrTVyclKFaRwt997pKqtd7uOgDCLMRRCSMxtQCjytYfv/OX6e965OE5VBcFWEXRZNwRGlRNi4axkp6J5onkzv7zcNU015rHywbnw8ubAbmZsPdxtQlU1M7Mv47yClvI29odunM3mi6aOffGAQxeNqcZpMg6xV9eQtZLSZK3JYoDIthBDTOP+6PTItVUaRuvJWIrjYAm1YIw5BO8rn2IRKFIErXG1q448p8mOxTN4IJk4p3IYUhqTJWeVxt3ERWwFUfLDh6uSiPfje48udkO33+43V+uvNsm5xGEY9ntvwFnizJzh6vV6Ej57wrvt9c2rm6bJt9f7tr0wNpAQgPWBNnddYXTN/Oa2W9VNpYepmKZxnlA4SqD5ss7DYeiysEFHFnTYZYMEFDgmEIfinKuzctGxG03UFHxVmJtZfTj0Dy5Ob6822UiGWBrqdDevGw8kVE6eXPzw5ZsxL8ZtKQtLt2sMfhlo3x9OVrOYofRjZVBQYkwpY1H0zgx5urd/3PSpCmYZDAiSmpjUBBtZomIBbUhqModSBsFcStJCAKRaBBwZZhZQ/goQCSJijXVNC+hsNkUEIqehPP/yrqzKt9tHyxCWNQ7d5usfPvj8C3n1xScPYiwxp+4K42GcNp4TgWlrM2vNyGW/kwmwOZpf3eywT4vVcrPe/lff+SDuvzh7tHKKVWP20+SNLbkfun4aYtfLboQvtrItoJBLQeOSt1PRfHbUlJKpS3A3vPjpc1Z6ffWmQNlqfnXofuXJo08/f+kWwaBZNbOCxtQFujeh26H1Yn2fJ7Hu7tDNqiqnFKwVUmspeM+ZExdOcSqpnlWK2nMMrZt6tqJgipb4d//O3/ijP/srT25I03LlTB36Xdntd5DtzzbX3/mtX/v8xy+0kqQ8U2eFjAvswtGioTIgmrZ2xhWuKzpqSDdh6XuT4z4RSBNwGIEMSMqGHKJhW41sdjE9fHpyYt2LL794c71D5wOQRUCC1hPHCaRUwUOW2UzPTwLhV8cGcexOl8tffffDP/rhj/ZT8csWpGjOMaa7/cYbd9o0l130SpUJ1plhTMzknRXhzCWXQoIBQEqKasJymcf95e1tShmELBAppFSkaAYQwsV8PgNqEB+t5p3CJ3ebk9qDweao3o8TRrh5teGkmgp6M+U4W7Zt45PA6fHpcMiJc4o5b0dNvKzx4ePVuI6Xl/vbdRcTGGcnyQk4WAARmbiaV9MUp2k4dFmIWuAFhX4cX9/FEuOqndd16GKk1RytGERhSZlfvbieF2rIxa7fcS6OCvO8Cq2zOfcxAgZb22DIF4QYJ7LGWBh1utwPG5ZVPZuHMA6iFqWoNegyGKXcTfPadzGdPVh8+Wz9H339OxdPF//X/9d/f8dAyd1cHt5f1jLubbOovE1lioIpqzXGWhqGsRu6ty7euvrZC93E5aqNs/pmdxBmFJeZZ6G+u/tyvpyth/1uGvcppyE5oKnrMwMZms2WhISk/eFQuerE1SdHs+qidVqCg23JgG7gTKjVPfqehTSDNUWL9Q4tiaqoEhlJ2SOeNNVjB21tP9+sa3fkAr252y+WgQuDKmjxCJAT5/jyZt+gZaGr1y+bo5NK6EE4yoMa5+92mwfnq/VN12227zxYfePJk59++ebt0/NJ8812jZW7WMyb2q7Xu+2UYi5gwRqqjttOYXOznbSc1xVPwsbEIsO2dwKNkfPj43XJZczTVGoyiFoHb5huNc8ujvZj/y/+5f9E4HPf/wejIiJwCFF0kNLHEmxt2+Aaeu/p0V988WpjCzj8LKqJbMmk7VTyeNrWXik7TamLhnQs17vRhZO6fXtx/Na2TBMoGK2NfvSzHw3jdurd9//6+4bcKOKN3Wd1VSCG3aZLIxDrMGWVIg6MN13feU9vPXpwc315tbmJIz19epp0YgeZdTar5+1st16HmRyfnPUHztOQh6E2SMXOl7NpFDJ+WZ1dPPrg+ubzLg7GuaZqZovWWhf25s2bK2tptmh263T96jnw/njRtufh1edXt4c17m7eungyxHTbd5d3RYcpOLZQ3ExU8/Xu2lTLumpy5cf9fjr031x82xkxlZGYgm+7AbEkobLvDkfHaimP3SEiLgxVvsm26Ye7WWWsRVY15AidtyHFLJymNHlrHCGDsqiCiiFEqrB4h1xMNwzPX6xPZqePVqvQ1AOV8dDXLHPMXdFo8lvvHD2emQL5xc12SmVeB9vQ3XoQ5Idny8vNdLcfHVnvnEVb1ZCEw8w8OAt910uWZXCOjHCC7H7287tZk1wCm6mxgSkdxjJVMqvMW4+WQ5ze9LEhF0du6wqLNsZ561LMWVVAr9aby3G64btvvfthswg//Xydi4rkwtK3DAAny+V6iEOMtaWLxfLNYVMSf/yzLz5YHtezozzFxfHynZX//MW1vR9sEU65ZE1K5qhtWMpu6MVXaCADh9blXCQrCu7HDBY32/XD5XmWVLLuOBZQQpj6aEMVU3r48OE+HdSbLPzV9kAZFLWIJjVqUMEgWQPGoCHUIsqQCzPTfRijAGqSYr6aPAiIkNxXndQCOFQiIQJUawwqACuQEQVk9mgE7n9UQBBsYYGM94JiFQULQAqIKqoi920hY1AAGBWAGAnkl6wVUUAgpMaoqiZFViUA+grNo4iEqKhq6V7QjpOaqwRHEq2jwmIUknW7rF1i5VxZC0aHgcfEQDALZBCK0AGwL4KAxhOoiqpBMERKdM8XziyqZBQQVQ2qIafs7mdUSEX1HqyLYJhBDaIlASkgmUELSpJ7stLAWRHu9X/GWDJowHAGyVQihbZGNA7VUGWVs+bCBT0OOFGADKTspJCSIqE6FcMIyLaQV3HZeAADRCoIKCAdYjQc0RkKaJihZCVFMnRf10IkBUCiJCoKBvPZotpO6SpHK7a9TwQhRCZhIasumCTMDAbNmMQQEAE5KVksMCiCQkrSEAVv08CkAArEyoVBDBFmVVbE+/9qQIqKama2KE2wyDSOyVtHaPuYmLCpLObMWQSwqNr7MR4ooCkiBgAMiCoiAAncR+YAvuKyiyp85TcCgKKaRX0VAGF9ONSgfZkYUlbvg6VCFt00sUw89mlKMjK0IUTUaLSPSgqZ5GEdqqp+tdsCGRXn62qKRUGcIQcIZKeUQEATm+DB2u2U540NlqaU9uM9Er3MajekzKUAkCIMsdweNqLCWUXAOKcWpeTa2m7sR5GiaAmJxJFzBqfYSUwWSUBQlUs5XjZEtO+kCjbFaCrMwqwcqlAUtusdKfRdXwjmlYMp/ycfvvUEB+Ht/+1/+MfP3uSjZn50dv6j7/3b/Wb7+NHFs8vrWTs/Oe32Y0mJm3m7WjZvnr189ckXMJUOsveUJjEgtbXni+XMucqVzOAckkLJ/fWrV90h6wfnv/Ht92ZkD15E7axuxrv1zJDEKH0i8EqYJAGzUe9bVyAboWAMIaYxA+p4N4h0zpLm3F9H3Tc++LPl0dPvfHM7KULZbQ43V+uzNiSZmtaJYC6sYwEViZpSrmaLs4vHuJP+5tY4icUkkVLE+/t+rcxmwVfVeju4VVVZH4cknFLWrhuRNU3R2BAqb4SPKhyHA4i9e7NZnZ/+3d98/+bu5uPPd51oFhPB9dPYpyzgmpz/znfeq6fD0eJouJkCtvuNtq2vvNU45ijRWTK460fCdH5e/8bXz/7FX3wcszHBAmNJ0sfh4qQtu6lDB+RSNt6aknPOGUWBS1hUZw9nW8CP726++73v/YP//d+/+kf/9y+2fb8fIXFG5xlzSqRIzI2vyIf7VVC1VUrFBZuzWCIGE5pAhMt5SJ2UKRtrilJOnGJGQAcUjIXCLBwqR2AtmcKqagwaFQZjDFAgYNQmOCKKzGBMLqLKiIhoi6ogchYE6U20hlTYxAIq1pGSY5EI1HW9ghKYMm6tdSgFOGtdFTCipqrbYWLEihM0ri25K1OeWWtBxn6cOVcZs+n2s6rOykPKFpWMegsKBArbYTTOtK7+4Lz53Q8/8J+9+vxqR8GIsuq9WRHvWX73NWdWvfd79n0elUCLAeNRWIHFAIXdOKUyjpO3ZNW3q/mpsrIURsxjAtGhj6UUXzuygOCiaE5pFJ6UCOX69uq4dpJktmw4Z0NUV65wnrpxKhlDBZpjzn0cGI1lLblUletzGbvpbHUavNjK7Xd3gWwaGckEa+LQV844QwimlOJDmfapAXhwdv780+dV8YduSAc4f7TilJgwT1xQxbpDHGsf2qopSccxl2lTUgYKUnLKstuPVLlt7LIW9NxzRylY0qDimAL5GtWI7bsDFerH1FYVignWAiGjH5N0u3ExryMNo04sgGoMmNrPWAxGYLVD4oWhMuVhP1qzrNnAFAszEbjWHiCXQFnLUR4rV2HhkpmZhQHJjF1UoWKklASo1Yzm8/n5bHV2tpRIZ+9Uh6vbfbcPyt3VV5V86yFUgQzdnzsFT01tAc327gA65nHbtFPbOGJtPZ6ez8qY66bcXQ+v3ggkEyOOjR7P/cVR6Cd1vuJymK8ej8OutaYXUSJVWFg7NzT1XFestjjjHOVDmoaSGLFu7BjZWA9NM0iz2w0PV8v1Zk3kmY3zbY+zSpIDV1s3lduc7QdHZ36dnu/7atG89+7FszevD31ezE988N22lyooSWEby+SM9wRcUlGehuJdtRep0UAyINkrNnWzHoZRtOOSshxZ9EXBGDQYQQYRUkJh4eyMcYCg6JASC6FR1ly4ImwWs4BzJCqbRIC7uyFtc7AvY49H33zbGdPUIZDdX1+e12G4fPlkjn3P3d1YWWdYTRlXMquPZnY3ONX1duw60xr6cvPmrQfzh21RswCiUnQ3SXO8bCUOu+tZkODtj37+8pM7enHQUcERokIDMEYOHqYC5O3Dt5/qiJ/87PXIzG08P744fdnfXj+/yWf/q//st//pn3/3g/Pz41l5+72LH/zRX56fPP3Www//1Q8+MU3QMnnCxpmizCCzOkQuKCSRVW09a5Rj43wRTGmU/mAEfWVBZd9P88p/9uzybLXcr/dIfHwcgvdX19E5P016iHDY8//2H/wv/uWf/vliUc0gGBewntfnD2aIt88/Mg6lyPbFs9P333336Rme2Z/+9Iu6WYAtngykgjn1qkx4GMqilQftDNbdh/Mnmxcv+3j98uq6CQEsWERCbGf10bymUrp08Ib2sfimqhf1bP6V4uBk2fyt3/6dU1+t92thuLvZAvHFrKlDo1iMd2PSytt3Th9eX72auWY2X23GBM713a5yJiUtUmDKVV0fr46vNuvSD3kqaB2XYgwuV+3yqLm8uk1Fg7XWyIOTWRrKbkyvtzsQmLdNAVh3AyCEYOvK3APjYj+tlnVbe1/smHkYp3GMVVMjZxJwwUtXfJKb7dR3kwM7q0wRDpZawsYRT/mt+YqnHAxe7+KQHBrNKS2app7XycHLTddnORrM+28/fH3XjSnXZB4fzzdDFJbzquru9merozrYbUre2kywKfHY1sZS5pImnTkSUFUUcrvcH6YppnIW2gWoicNh23VSisvvzZddVd6sN2+2h8Ito7n99PkDs/ib7z76p3/8h1jZKtGyMks1v/nB29frz3eFx5ILizHNoqnLmBizMb6/VnNkYs/X66HKqRuGBPnD84erWeva+R//+AfDwL/69ffJlqvDFhwZdM6YGEeO02K13PQxY3LBgo1nq2Mf/N1ufVQ1/ZbX06iiJ40FThgEiAChkKhKzQTARGqMBaRcMgtYZ63I3NiqGw/T7uHq6Kjyz9fji5vtb599GLshS7neDUX56w8fT5j3ONbVQnxt23lSyIc4p2rQfLPrpA5NJ6bA9qZLoTFsHq+Odpvbm23/tQ/f+dn69nU//Mrp0SjTchlOQnXIst9MBADG4GL+6d3+b3zrrWFMN11etBVM+rW3z2uEZ+s77krKcCjRWiCDFmHo437IttIIsF1PXnor8h+MiiTLFLP3trWV4LSqvCB8dLj7eD9lgVljtJQta65nZMOh21tXwqzq9n0zWx7PFtWq3XbdrD1dnLxbzc82Q5cMNMtFM6/+7f/7H766fnV0XH3jW+9a3wJqMGRqe3vXN3UVWY3BLMzGoDAX6FL35GLVoEHjcupyLrmIswFtcgiPHixvt5lZhn5tbaot7u9utgOsp/zW6mG3vyWU9dTLKLEoSLx8/cnVm88N6qPzk+PFovGLMaVZMxd9vR0OWQsY8/bj04kPI/MWeJvuSPIQ6fYwlKxSTGXxru/nMz1tG9tyANcs3tlLDAEiD8vz0zP7e1V7fNcNNqOBRfCLIAPuexI+Xa3qGtra9ONWBZlSwfqkcmWyrElTNoaEi3cmxlFAjxZ11ojKKZe6WpCx95u7ksQAZNCkknMyxUIUROinzsyCq5wt8vTt081uZGfrxtwN436cdiNbO4/9OF+66qhijIAdQjl2vrY2CXm0IdD6sG58lePAOSHqad2Mk2yHzNlW1MYk1vq2qVLMWYQR1j3vh+nx2ep8WY9pb4FTMMrZgUFhSWXqx9a6FIf9es++cqSVh8M+Dn1Z1lWf4WrXJ0wlwfnSHbWVs0oKh35HClVwahefXq2/c3GyXCxeXH8xXzSk7AyQt1PKEFyofCnl0E3ztkFI3nnNU99tD6xvn57VdXWYBidBUN99/Pa+OwQX7voBrUWQtjIVuTHzxdkcddpeXROaqql+magQVERVQQQVJCRUJAQFZtWiOSszKijcj3EUCisjCygaIFQkJYOAQgQCimgATVa2SoikSqQAoM7ZXJKqFmZB1IKlqArxPVRG9R60nBNkRmBw98oqAAG4752BikFiRHuPJBYx1iiARTDK9w52VbmPFeEve2iISoAGEFijSkSxRXLagyNj3QA0RUmleKfkCEFNLl44oCEBRDJAAUQJxRkFBUVmvX+OVBUuwgAMhEgIJRAK6CAYFA0oIlgVA8CiIooAzMrGZNSJhQEr1BoU7i8ZFZEEFUkN0lfmekBk4BFiJ36BrjYAhgBJmRQ1m2nQMqEUFDAGEA2JKBKrZdcoepWgbgFQg6mADIAFJmAG7nTalnyLyBZBa7HdRpWVEETEWcNSVEFECBAUCuuY82rmxv2UFcYEzqK1qIoKtkgGEms0KQOgBRTGGMF5UmRBIWegYGHlwsaae4QSqxohIJOLOBWLaACJVVCV7tPHRu/BIojeICNaVS4akzKpAanJZoyMaACA+R6SrgAKWESNkiJY+0sXHioB3Cv+ALDwv6+gWWvGFHciR02tSlKSQ0O20lzQCYKmGLWIDdU277MxxpkCOMTMgFGpInuITJpOyJzOKjHm5hBjKmMGFLGgogKGUNUYYAFnDJCbzetFY8bDPhaZJnWNV7LW1lRUSi4sZGwR3na9AwIitfYwTgYtAXIqhlAIwVhGMGRmzfFm3AzlPmaklSVlDUgOUFiOZ02og6MjNfji8jYrMyiSfbPtS8qKyAl8ht989/Hc0I+/vJo2m62KWejFQ7OYrZ9dvYpg8tWr01mdMn3+5S/UNoDVyLzZ716+/LhTWD04qm3lErRHy6EDE1PjlsF4hoGN2SVg5eOlDzkaXb94vfnoy5+FJsysWS1WocE54M2Xr6eUVsfzbhhAGUph1GE4MIMKGFADUDlrEbgURGNUp5hJTe1b7rXvptvXz9vTqhi3fHT04MMH7bsncnPYbm/FSzpMBPeneVRyDkTvv336W3/z923P//oPv1vITLmgIR/s/TmSDWbM2bX+7/zH3/nuz366nZKIsRTWQ46xa3zdGclkJoEX68PJW/OTx02SwVeVln5z/dl600NiBTOCKVyMc0TmsI5PZ7Oq6GZMUT0zWOeBAgT35nZvjatqYhfuhttqFc6qBWr1//nuR8Y0y4aEIBVBNBOay+0wq4IzlqzrNANLTMkSggiREdHdLhWAg7r/6Xs/+f3f/e0/+OBrd3/8x9a21rpIdBgGQEPWnp0cxRRT+ipuLZybNlRVfb05TFk4q3O2OV5s+l1RTEWmmIwxCIogxoUiBtEkpijFWnKoU4y5iCXnwDowjijlkoUVNd+j9FVKKTFlZRU0ScDc3yOYOOUu9SZY49Sj5hhhcACADo03mTnmQhwNMmBxBlDLer9zlrhM89Y745bNk8Y2kNK4P0jJ1ts61HEqhzFvpsk56xwpa+0CIC5rWiBwLgZNN2ZAQ0QvNsP00Ud50tN52EwypqJEhlAEGIQF7k+YEVFBDaF3ti/lZtSU88mimjsyBpXvO3dGwdlQF4aeRacYvEkCIkpELAUIDl2Phpq62g69AtSzRTubeWtj7matTzrKMJKjKSUmQVG0tq0CAvUpMhIiRYCpj2PkKu5WbbU4WkZFQuNt3YSZUVwtZ5vuZkx7W/kkOhaxhMVQP0YFjZyfb6+fvvWWi9Zura+ssVSkFOBdHE0IiuRd8NZbshQ4soKBZub2sZsvfWNdjtEKrWwIdb3t+iSTip1Tu74+rLMIK1jfsxogC2qIWMETWqJJuBQ1TE5JIpBIKMmRd2hNBAFNYyITkA0KSOQyZqvese3HfU5JPaGlujbkzGhJIQHmtvbCpnAmY2RMYAnFCGosDARHR9WTt08fn50cbvflTdf69vkXn0xTvt31Jnj8ypEBKZUSszP24ePzv/7F57WpUy6umC5l58yyqU6OF1qGk/mqdagpyhSnMuWDbDo5bR9m37SzBkv81d98+sM//uhUfF1XRLyszdR3wXrBzKCEDKDbboJgFLOrCaRIUiPeIL118viLqzvr/N0onbH1vLnuDhd1qPximnYrf+LoKOU7JJqYyXlGvXh0dLN/9eB08Y13H5a4C8aU2u+jXQ/VZt0piKNCWAhxjPlu3z29OJIirkIh48mBtzdDrC0GLlkmFR5L8YTB2xmZpqkS624YCYo1NsbiEL21CApFau9EqctFRA2AihJa4yok08yX681NislC4Up/8fFHm/WGgls+ePDTn/9wvYFhe+dGPrZ8HIbSv+lTbGbzo7q9OxwOQ46QL47CFPMumJte+sazw//mP/k1O91UR7Ulv+8S2ABQxxjHnq7vps8vu5+9gcuJizU6Fee8M+gRE0tVBV+HnKNzuL2+YStDKTfbgxlyXPdQudd3++Xp2cnZAwzmkMYXn76p+Ojh8Vv/8ovLUdhMY/DGWBpiOVrWILGbplD5o+OF9c2aZX52dHj5MqZifNvp8Ae/+qtf/uxHuzdRnAOkfiqvnt8FA+sp+8YNxVtbI/SH/eScZ+P/+Ec//Y3/+r/88MGT9WEaynS6bCMOKd8ecl4ch5Oj5eXr7bTemfpN1bTz6ujJk29stn2iftm6abu/3UlBEHQArg0zHxPk9Q++91fbOK2W1cX5xVFncOhrD4C4WtZ1VfWbrq7qwxiXi6aqjNqClblfBd60MNI//96fHjRmYc2FUWLmkxDq4BOjo4IAdzevVlWIXf9odfK6f54zUGUzqhHUAvO2rb3jaSxj5Mih8sPEdeVCME3ru673ZNCppDxJMidHE/Pt7aYguobGPAn4MbJx1FT2uutDW2mRi+Pz9WbjQxNLt+u7lPSoah9fHI/cI0m9aMj6/SHvpwTF+GCAZZz6lZ8B2ukwGvSDga4bi6Smbed1s8vD2ar1zlzebBOodTbnfLXevv/oZN5gbRthRutvh7xNaWHcew8e6jBasqva9SklzpGh45QUR6vEFBNZMt5SnsaB8hi10qqZ4KgJRXlqbDdFRS1Zn5w+Llm2KXeMatBh87/5T//+j18//+LNXTTY51Q37nLMf/X65bcen3V3nZExp0x1tIZY037op5l3R83jty52m6vlcdj0UzfK8enFf/Trv/tP/9k/efrtJ32UfjQ5yWFz9+rm9ujhuaCE0IxDZ40Z+z6Nw0DhoNy2iy8PXXc1jil/sU0+l0Xjz9tqVlsY1KIzAo2v9jztsk4cZw6PQ2BOAFQKsxhj0BofYzlyIQiyD8M0onBT2U3fJdbaV2nSVTufDuP1MFwcuROnV+s3AyAafOvB07tX187jYhnQ+2Vdx5SZIWUlr0nz6nilvi2aS84T0Fh0108jgJ3kyfnJg6q+Xe/3qaRQ42L185fbE+vmKrBLp+fzUeT1rj9MmmMpZJJoZrBk9gLekjFEFuI4pqKLZW1+WTj4alRUV00bmrlmNUgzr6ou4MvdNIEYi42LXz+v3OLory7jIcfq2M9t6Kc4u1gdLS9igc3zq2BPLh6/a9uVqLHI9qS+vP7yoz/6Xrq5IaC+i19+cb0vOXgLoCXn+XxuHVYeifDuzQ5ycMagqrAdMo5J6lp17JvGtk3YbaKUuO0SGeqGZIMJDRzPwhQji2oRm6JzM9/mw3DgRE6xbl0/jteba2+xmc/R+amoGVSKt75um5NdzGPifhqrfvOrH7y/3u+//PyzMe4tSkR7NWRWOr04KWNxoGj49W5HCRd1rWRN2/bQgQ2rk3ffPvqV2+12GRaqrsu5jDuJvXAmkFJKAB23d1piKpDWrHpz+vbbSZgyO4NggECQ0Bibc5eBhIWlTLlzpr2XNwZXIYohiWlaby+XfnkyP17NF3eb2+poFupmKLLZjuvNIXgbvF6u918+u/ZmlhIUGDRP1T61jZ4f2cXqqD9sd8NEllrr+64gw1k9f7xYHDYbZ1yU6ZDSqzsu0fBUbDCN0920dwrLuYvRnc7qdRzJYD+luq2+8eTRo5P6ky/vXt5ymvT8eBF8wyGrsBRxs+bzq/3ZeXU0dzRCYQsMpMICPJSE5vV29I5mjrppsgCshpykBD/74vLB6ek7b5/P2uYwRUtYhGezJovr+yiCueiY+1BXAnC739jg+1RAdZ/yw9XKeGeApq6LmA9Dd7w4aXJOIGpNFOiGnJQe+WoapuPZcj/Gi0en8MmnAMB8P10RJVQAC3AfFxHVVFQLlAKsIApw7yRHFVS0gCiIQARIKARGkVCNMIKgskMG1SIEaAwg3M/ZLbBoEc1qSgHOKAwCqijOAKApTKpQRAEEUYnQEBQAJhT9qhAHoERgAQkRAIooKBCi+SXFmFWQEPF+3gXu/lWDGkAEzCydqFYGEMs0EZKyIkMqsE6FSBxgBRBAUi7WOGRtAFpLRQRVCwDj/eEygEgGI4rM4IzWHgNCFMgKKhBAPaEqsDACGCQBFUMd6wCAigL3cSdKgiA4MYtAEbHWoCoXYTRTzAF97sHMSNQoOTSWGaAUYJQJeQ/akcuWrCKBKBgCdAaaYldoZyANacPkASw4BCGwHjQDOQgNHGrlqXDRGKVMymJR6P4ylShnJQBDyIql6K7P3uFJ42/3hYnunwxRQVCjIloljxZBcyGwwMgAkAEtTSqWwFhKCaakwYIaFRYEYtUiAEiqRUENAIoqYimshGTucdU4ZgQAawwJAUBBVLSFRbUkVLR4XzG7d1sziAKKEggKEah+RekEBSJj7uuEoEUAv9oejDEB2QHLiSMqlHIpoCLZGnsYowdRheVs9qYfBoAo4AljylwYydiiSkUEthMnHc9bf75obtaTFAbBEBpP5jB2qOqDF5FwPzq9fwVsQAGdr9GMiYF0l0YuBQCQnHdWGWPmuq2ctd2+I2NVgQgW3hui/ZTAmEPMjTH7NCTVUiT4EGM2xtXBzWurivWiTX3Ok0TkalE9fvqWMeXy6g4NerW7mELtEdRl/frZjPP66O2zf/39Z11NHz49fmdpP/rRxzeH2DMoy4PTo1N7+Oajt+8OvZkhzMKr6+uNjMWZ3/3md/70e//uxPu6Mb72zrrV/Nz7WnRbCA4pemuPT+bT9mCxvHl52HFualoATgQQZh//4iM7RTS0n4ZuHLylqvLLUN/t75iFyEnRMTJnFebKUzurS4y2cqmoJcxJiqirqzhq4Wmdrm4ub+tleO/rH7y/+vDNq9dx28duz5mnlN3cO4S2Pfrse7/4/IcfPf/iuijfKzUJURWcpcQAxl/fbY8un/8vf/v99TT8yV99acgR+UOKDyv/rYdvf//z5/MW3bK+5aG1/mhZdR0+u1yDlAz45Mm5oP/566tcSjeJGlvb8NbR05PFgx892768uVo29fHF0W5MbGmUPHA6PnvowKgx3/i9d69/dPPRpxvipp3P87hXBAJMRQeFwaB1NUiZWXdyNluPw8iJCAHUKszn8yll8q6eh/Xd7l/++V/8/d//9qdf/PTZmoM210DdOJGqqkxxqipbyleroOSMSMHJ2apiY9dd52bhUMTN58NhmJA6Jq+wqqnxfgIYh8KpiKCxwVphLkhIZHIRS5ILs5CITiLY2ALFoiHQLArBAguhQwUR5cJaxCBmziBshBbHi3HEvs8pFeusd4mA1RAaZGPBuFSyMJp7v6W1xlVL1xQQTj3kohxZ5G4YnUzTBGNKAxRgUEGHcta2XAozO0fOGgNYLPpgGcCA2e7H2vlfeed0mmLMEmMmwJQVQEUUUUFpigmQuJQqUBJ1BqzRIpIyEKt0Q9NUi/kM0r1NNMcppWEcUJAsEE7TSGi8t5HLxGBjyVKquq3r1jg/DQdvzDT0qDBOqTtIIRi7KdQ+GDMzBJy4FDI0a4/O6vmX5VJGdgicSj7Eq2HLKXu0FokBldBptFbqipD8pDnmjKBFWaKCw4HiOm9mwa7O2pRYSfw8eEOZ0FgzTlPhYkEUMYto7drV8fXlm1K4BqNql/OTbd8hm/62t+w9eM9utxs4SizialMAssfKu7mb+0njugNRJOIY62V19mDxxW0apnHmKmcdZNFkjK9S2ZALDlvRyGPpQRttik7j1EkRQMsZlUFYMhSzcM0qgKeUShlFORN5N/PFmClldHjy5OTD9x7bFPfrw5e/eEGDrGbzqfT97WTbyvuQJT+4OP7o5RYAjppjCA4SxjSAKiLGGKf1qASmPV7OTm4/u/YNhWbe7abdTvJ+evroaGG1OQ1nR0c3t9P17f5I9MH8wdsXK2fQqZvGgsBqKUrZjp1vqhAceZyFMHJJyv2gC4JgKk9+5MNm2liTPLrpLh/41j5+Uld+Qk3TaJQrzjmvjREwzgYbhx4kOWs//J0Pqnd+pU39q89uj+fN1evdq+tplBqmZEGNkz5P3pg+5YQycZ43fhhzlmIcLqxT5xuPVmnfT2hM7aohJkNYVIdSUJUAamM92axKCKD3J3P4lTgXNHjLiFVVGWeRrLF10zjkT/OUTG3rxpNwLumHf/3T/vt//eCiauuqGoevLardyy83u23spiJmyMaHTMHuNjtX+2bRPl6txk+veynXJL//radPZ7xb74q0RtJ236t65m6M481N//Iqvhp0l52SOAPNzJBIpeAJlqvKBDeO+e33H3ETXvb7IfVvPzoqWPru8GDur7vUR3p5s33y3ltH1TRTuL3s8/Dwn//Jl1cpshbIMIm4JoxZfSycSxz43aeP3ONHv/Ht79zeXh4vQjk5ubq6fbYfL+ZP/4v//O/9w9ev+8N2tK4nWJzOUFLqosXqkNK0SXcyBjHBUiwAJGHV/OTlxxfnx262nTifzOhyPTx7dnO6nDtN+4NUzqcir19cutlsefbw8VvfFn1e9KrEniOPQ5xYowAhqRoC+eGnP19P6ZChT3a7vuv23byuV8eLMU8hIDkv1haIdRXQ2+Dk7OER2K/uBR++d3bXX96NYy+lHyY0Toy5G6KAnLmaiwKacZyO60aYhj7PKH/r4fkvLq/Got4hxNI4WxmHQG9u78DZoiCpgIXmqF3Mqv3+UGJGshOWGVaP5qsvb/be2lHxwMqcl3VVQMgTI+6H2E/ZOkcmvN5OSO7F1Q6xtG316984bZz99MurfcrA+v57VT+ODGQXdaQ0KIycXOXHMZOShXrVnK+nu2ydRVc3zfX6cBjG9x588NnVy6lAHHPThArgracX20MvjVVHY2ZkNhYU6HXfX0zxg+NVGTpXhdrbmowyG1FBKcoWHQs6coexD/MWwJYiS6JHizkLP9/uXuSOvHIv25u77bCvQ7uqjypb3mzGv/c3/q6plz+5/vzRk+Xdbhw6jqIn89mnl+tu6OZN+9bTh+ub7a7v1iMOu26kVNjdHuK8Pvr0849dwH47aTVL2f3ks58//uDt4rCq9PHpo3lbvbgalm273+wJVVMxgmQMJ27a+Zd3/WLuQ2OnaagauzhqLfnp0LWrmR0ljTw3XpCAARlcQc14KJQMbXbpvKmWplVI4NAgVpUTGs/Ajph+8vxNO2uIaOzH3XbbzqrdvvfWGMTIOBVxrIfY1dVM0WWBzTCyM5yk8cF6m8ecpsIqX756/d77j3xDy4twyOObu/Vv/Np7f/JvP34j2/OLJy/v1tbZz652WmRu4RvvHV92ZbsrjTO1hebcP7lYfvxq/3o9Hbro66Za1vv1moIFhSKaDqPT8o0H58plkmSRFbKx1X8wKlpU/uHxPEzd1X796MHZ3XaIyqiaVfoE4zAcE751Ik+Oze0E12PKPR9Vs0W9eHR6cnO9cW65WC6tAR47ZQytefX8849++uPDbmgsgVJd2d2mny1qX0PXTxLFOb+s5iUeRhZHGHN0oTFK5Gjf91VtKw/ztlkfhu4wOkegPE0RlMrEcRzDyTyNCgJ+1vg4nczCfv+iEHGWojn3yRqbc7Y5N6E9qWdY2DmsyLngrIPHjx4l7Q+H/XJhd5vrV8/UWHo0M8d1uz6sn5wv+oiHMRWIUfPXHl90d7s8Kme0J/XtbXfWLE9mTewhXve36U1BH+rAhVBjGgfJUwg1+XoY9odugiGGEGzQlFTT0O9ugnfAxYA0xsq9kgqk8p4UnfEA0FRHYNBYVQYRJgM5FUDz4PyRBbc48Wi0kmVR2N6sp53wkN9+62J32F3fbATweHU0TooQJTMY7FM59DmlMN8fZmSpqSculSUuyXpatXXJTNYDpso2255zIUCsG2JJhy5WQU5OqgWBKxWDWe+FSW+2Q+r1ZLGM69wNWCacRhVWRQaHd+v9vG2qYGcNvrVoVirDvodCY067kmtPWjAqTiqVtSfLpnW0OlnerMd1P8Zc6rb69MtXj48vgrjb3fbxycno6oGTFFWBKWYh49rqan2zWizAhlGKMwaK9IfuVcpARlOZ1Y2xeLpcDdPImZUYja2bygE7Ma8vb1DUoFGAu83+l6kiRaIiSsFCYhQAEWQAQSkorACA9wwPAkAlB9aBeLWoqIIIIqhihEEBUQiBDKEgsEPyRhTuGTSiwAyZoWQtrJpBMyICohqS4NQC5qKZkZEIgBAQFETRYFZVJFJEUEtg8SsKDQsW0XsoD8G9gl2JfoksAjCg5r5LJSqoKIIKI/IcTW2pKDECIvKUEM2oYj0Ea0ELQHHGqYInxKKGAAicARYRBSBgUWZlhTGLQ/CkNSGIpgxRUQE8Aqrck4GIkACLQAZg1CJgAECUQKakoyAhMiiAWjRalAgUccpsiiqwFHKj+FKsIBoEUU45pjxseFobHIwoGDBACqJkgKyGpS3zYmeolUBtENUiGgUWQVFjAD0ggakgZjUJOBZwgGviWzbF8D38gxDv+1ugZEhFNvtkCKySADCCiui9SxoV2SAIGZACqoqEyCqgSBgsSRZVQLgnlSsZEBIUAgVCuseRJ9By/9QpUpjJAmoxgJxBUIlIWEhRDbIiAYLqzFpG2BdhBEsIqKhgAYuIESAlEOByP54GpHvW1FeorK+eb+/P0DwBUATdTFMFPk8pBBOscdbllGIsjswoZT0OoBgsOiMqqMY4Y+4DbtZQSZwJbw5x298yq79/17UIMBEWZmEAFkIQKTkWFaDjUM99isAM1oCoci7M7J0rhQlIQWaNN0aHHIFAGRQQcn774YNpzC8OQ1HxpCSsHA1JHQyiOIRK7OPTi5gO05S03MeyTNO0QA5t66s8teN6uy+sR009lGxRyZs//fmnTgtYNJbeOV/91gfvpaHrnq8PUbCivsCLdbfF29qmo7Aq3cgYx8P++etnC3/8g5/85TQOPevt9e1xc1bNV/1uUzuYV8RFHLrWVm9eb+dkFo+PFyernz17db1fDzl7yqamX//Gt794+YmWOO1j1ThF6FLOWsCAByCjrq2mfeJcPNrUFeQpGLUBcylJNBdRI5U1CiYz0KTUZU948/EzOTqcf/2dk6dmZs1HHz9PN7c2Dxz5r/78R1P/A84CpEXumfQUcw7WSmQgcsaeNvPXr97kfv0rXz/63W8dPXsxXd+N5OxNPx0/Xn7trcdXl9dsSrT+56/jZmfA0PsPLzZvrg3Y3WHstJf/P1f/9TPblmV3YnPO5bYL+9njzz3XZebNzMosw6oiu4pWVAPsbgIS0BAEChD0Jv01etOL0BIkARL0olZLIpvdTbKKySpWZqXP6/Ka48/5fNjtlptTD98tClQ8RyAisNfaa6+xxvgNyD6lwtgA0sz0Zbr85z/9cu3z4ngyceX55Xo6nYr3juTe4YxzJoDTk9Ozq/DLL88KPFAKrdZJZMyhrJxhBZEzwb5tj+eTPMRUAkjWAhRzBep4MT28e3S92Zqq9hc3k1L99PNP/9E/+P6f/O3f3f/rX9yct6ap4xAqS6iQAYDMfxBM910oR8UmuULPazM1rm4cCwXhMafd4Esk8TkqZM1NWUQPSXNKIiKYEUWZwsUcgNhDbiqlBOl262JUWRkk2O1aa7Uxph9GRRoEOd62ChAqUAycGQJvV60igzAYAgFJMXGMSitlVfKBDKcQcpZEyJ6zhTuHM0MaQYUwcu9JULJoS1HS4MfeR49QWpOTCMpu37nbm8s39/BMoMYxlqVBgdI6Tpx8qDRVhiKxsICjzPk2EqsVcGEICdBIzlabeT0NOWZEa4thyIkjJ+W7pBiHsEqSUSNhzjlqkK71DKKd2o5jN8bIWJZ6tpgujg5zlOBD341xaLebbUGFH+O6j+hUtpS7sUasFroiqwutNSqOu9WlA1jObb9tQZnbvgBUMeeILBlJULOkdvBDANTGI6ArnFF5yMDpYN7M6knIMIRoNILiXbeZTZvCFPshVKCQNQNq1CIMpELO7XrtRIzQsNpZXVV22m77mFNZOZFMSKSqDJsEKWulHEKKu+1meXioAWaTyW4XDUhSVJbWWWXGdFqVamZX1y1aPYaRAPuxFwWC2ac2cbYGlDIadCettkp20aBWLCyQAhca8qqzzhV2HlPOAMWs3oXsgbd5fPxw8eBgUrl69+LN2dsrYSq0LVEVmk4XB7PKfP7ylaj4+Mnx3/R8QEW6YwKRcgpUga20sQSGSZTfDdd2IKp3bU8jc9+fniyfvHN3Pi048aodLYvD7GWYHC2MhW89vrverCApn5lDNKWBFEoSLMzBcUMxQAROuTK664c+y470pIRJWeTMrizIwMRjpXOSqyzYdemgLEGKPozG2uAHjSCuKkqb2HcKXVlbG3PXd/twHfn1+VWEwgeujZpo0+33SqFWIJR7Tm927TKYyqnalcYoRMo52Syc2WoliDHnxJlAptOq7ROhGmMgQVRIBoc4WtSTstDWbPqemREweO+ModwXNBUGyEyMhKS0SiD9GCtDnL2kjnOu5ydxy41Tmzcvoe1ViVqkVBTDuIoopMaMNZAPaTMMp7PJfohPr1bfOZmq4doWLqvK+7HdbPZ7n6FMFttEl23fZhKJJYBmVIiIMHYDVe503mhToKGDidlenn3yxdNJXXgfsxIQXE5MRpXBTYw90NokXLfDYTO9vNh9fHFWzCqtBHMi4JTJOJVzBAFnDSD03fDqy0+r1K7f7nnw3/vw/fLcX69vXv/mv/v+owdhld60482wr6bueKpeXw4Q/HxeDBkca4M4nxQ9+8lhUyysIW+O3aOje2dXW7/ZVY3c17Wz5c0uNEXRZQgqhyGkYdNnyrGwiScKUmY3cfZa5xiAWFm95fjzF88vxrELacw49OPNfm+yhAGwJQKYNOXBfDLE0F3srNUcfGn1YmEnlbmdBW/eXCDjfNlU5POldFm0ppSg93yd+0Lr0pXHzcw5J4SGUYk8XDQ3VzfnIQArIFU4ox3kmIQp9AmS2AJdYzVkHkeDeHhn+ep6O7VFPeQJynnIRivXWNvlmHja1Jvtqu0H7Yp5UwXEYUzapk0XppUusyxmBWlaXe1f9b7rWJVWUFTm7eWVrWpm1kkCp7sHs931ngUC55kt+rDtQ48GnShKPCV9NF/mfl9V5dD5yaRwwnHfPXx0eDmEN293J8eLpi6ub1YlCCPuOL/t+yfHxzYlq6lwShjDEDgn0kobNKCJc2ZfFCrltB29QJ5PG05+Nw7P19urMhuSSpvKEuShUNVhpQbOiOW3ju7/6x//yzFn9oPL6f5B023bOPbHVQlhfDvG4HhBrgEVfTZOM1I/CsYU+mHV+qvYWVdOyS1c0VjchvT667Oj6fzeXePHzo9jHnPOGR2JADAYZR4cPfjs/PlhZVESjXzQ1MvGfvnsvM1w93g+n0+nihcajMaYYAhpG1qOcWIrskLWrHTohPMYFIJRRNr03udMUJoX59cBdWOL4MeytgIcYgSAdgiF0UVhMKiU8qxwypBzllBWu10JcDqfHU1nr95enm/WXhg0ZpDdfpjWxdsvr41SLhX9WbskWWilMcwbPSnsxbrbBwmiw9nekuZ+0FWjXTlK+OJyt48eSEtlB873prPYjV32QqCR9n1QpX57s5uVCkGcNSmmHMb/2FVkIbetcXTvzmHbd0MKN6OPIWqFI9IQ9ccX49t89eTe0bsLXek8BOWkKotT5tmsNnpaurpox3FaN5O6/qu//rPPXn0eh2i0I0CO7PcpMlqDjIAsSlKhtUJpI0gAzlwak30OQLbEzHIwb04P569evmr3Y7/3RVVsfNCoTGGdApQS0i2ogm9udkopxVCUpvOxmU76XR6GxDlAlsI4DXrYDXWh67oqJzOV2RhtxNRmso1r7+Po5bObs2ZSNBNTae1md9LIJvHElKvrloPZF8PxwXyW7drS05fP70+Prt9cHi+tK45dsVDGSEbOQEqjJGW8jPvovW+7opnNDg6zBbq8zikgwr7fPXCnmPWwH4WZGYggxtFnKrFgoBiTcVZrBZgRBJRm5gyYMpBWaAwC9sP+YDkdvI5drGB6cfXWlup832Xm5mTS3+yXs/ochrb1CpQxlEQC25sBfZZ789rmYcygrEtqmNR2PilSDpzy+fW+bsqcpDEqIJRWQxydxo/eO8ytn1pnpPn6vL11VTCaq3Xu+x2PfrLQyilHiAo5hnHfQiYjerPztam+e/+h9CkPUtiqdMg5HTYH51fbcUxNbRvC2uSTw8nyaBGHYOr67XWrCK43u5/9+umTk2mZ4uF02RK+Pt9NmpnLMKS4jkyWaiy1wn2bREHwvimNK9R+6G1dZg27tlelqTQLJVsSJFGi/N6zj64qx8zHB8vtboss3fYbcJfSyFkKTZ4ZhREUCEpWOYOwfCP0gKC6dRwJOVAKNDGCKAYWIJGUQbIWJhIgQCASYkYiFC2sOKFCRkpAUSSwypkosSQhjYqyMaQJFGMGTLe6AyFqQPgmT6YRNbMWVogAkBGYVGRMAAmBAEUBZb6tgWUAECFCFlGEiILIpPCWfcSJSWHnx4kpWGTvQ6nV8cT1t0z9BLs01igRQYswS4hsUMcgDDjeVmoJEClE0SQEiMhGfRN4SwBZkBgyQCs4INjbZBoTCSQBn3O81bUQDNHtkZ0AsDAQakUsAreGOhAS0UTBZ2QZep6mRjOJAEKWlMYu+y3pVoHXpFkpQYMgKkvOiMygADlkjaApGySLGpKKCMACmlGlgjIBkAawgA4KA76JQ6a0JkQlCBkzEQMiMgAqzjBGIsKcmREEmRxYpfZDJlAkt8FFyCpLZn0b+hLkDB5ACQGgoASBFGJt1ZgyQgIxwASMDJwRmMAIpZSFdBJARiIE5CycgW2pKIslURkAwGmqLaWAnmNGtCgpc2S+hREBgEbMpG7HNClSCCxARDlGEVBEkv8G6JsZgJll14WIsGgmYQjgaBd774MF6mLqh55RKyNDzlkwZVakBSAxG6ONJlSKyYzMMcZpWQpKSOxDUgk4J0WEhNqYMHhXlAjsnLna9A1xAtz7VFtVaNN7IS3WqJAlJeEsCZQFlWLKLD4xIE6sYmJD4pSKQJqkssUYRgAg1N6HaVX+3vsffPrqTeB8Z7KMOe/6XVFUgfWyXF7drDhxHlLptEGcNrUbeswp5LxJQaHKQ2xDun52ubq4qazcO15859Hx07PzyxQtqco2XQ86DLt+i+2WpPzW4skf/cE/+N//i/+rVroq3dDuJQ55SBgVos1JUFlnVPShvje/MyvX2xuri5N5PSAg45077+77/uL6TZf59OTo4FQSydXNjmMcwtjoElFGzylFLkQVSrKQswkBGaRLGcA6Y8rsU44p3UZIiHA2LSZl3d7sh1X79OIi5PzBo4dPDk7m73749qvPdrsupTXsBlI6i6SQUCnQYrXS2kROSquYYxg5le6Lq9jSel7qo6bsdjFk6Uf+8a9+8wffefLwo3c++fzrRqsh2FdXfZvTxXb34WT2+PT+L1493eVxTHE2qTNi2ntdYKh4NebNkLj0AdRHH753dfF61e5XO386m12cny+m0ymWf/3Tl8Sla2y3a6EPgKn1sW5UjSaETCLJJytYNsXF1Y0utUGSlAprF9NDye7+oydjzNL7VVxf77t//s9/9j/5Ox8umxcqPK9VczCvxhD6zJVPlIcw/IcHozJmVEaljmPsJrW1hcScrHPTog44WkhRYPCYssyVqmrtfczDGEbOCbU2KXLilEUcqGXpUs5O2TEzkV23bbBZQEGWED0xgKQsOWYvAAotpywoKWQFasijQCBEBmEWQlDKiHDOAgjsAwmBAGQCSAWZyhlKTMCQs1EqM/mUupzGFObOWCIfc6mMGNq1g88YKU9L64zu+p6QlFIKiDOMvQebOYNXxCwpJhJBIJKECFaRsTbFSEolER8kReQcd8NOExGRoSxKF9beqmD9MPhhtIXmnGNKzCwQMmDnk+9z8DEDNPPJCHBc10nEKBYGzqkdBkZkkkEyWBxTyqCm06LWSpAzsABZY3KU/b7vyNyE0QDuW296n7VWmGtnTg5LKIvrXRxGzmjFGqMUpyghYsIZKVvqZVF4pVfRq8IQqSg4r+eYqV0HhabbDBmgqG3j6oABDcyqwu96D5RQhLNAqus5X10DwDBkzRy577P3kCMNuiiPT+p+k+dmYUfp9/vZcXX46N7mzaXBKEkc2M157+aNaEgJQoQUlEKIKZuJA22BKEWPTmUehki6Ucv5YkuXYcgqKY7CgbUyCnNhVMqZmNbrtbfAzj14fPC3nxyV2936q6uvPj7rB58VgRJGPr47BY5FNT6/2e4kJKd2IDPzN/uCuaNr0geFPeTmc6RGVRMNRvWb3hgzq9LQA4sxmUBoUZccebv1kuDyaovz4mh2kJhUoVa79vhggX58e7YRVytXBpHohRNOa31QY862uwmTqj5YGMs3SeBZ68UHQKOtHcbR1ZZmh4njmHXwedEcjn5DmKx2RrErTAbpho55Zxe1nhzcffhwt766eHNxsQkXex+iEiPGklUkyROJtXroe+9zzrDtc026bohzLJyJKUEMwhpBURJSoABHRmv04CFmnb23mr45/shZk9HKgHI+DO0QsTSBkymL0lltCgZFWpmiUDEIpJxDztxpO3gwMX7nzvJO6e5ZkKZWNzcTpUaAdsgxYYW8LLW24BEef+futJr8+NdfHd47mRxUn56dIxa//+Rbu1/9qxfnvakx+HY+m6tSPn+7Xu3lusM9oU9sjEo+pijkTGRWiA5BAmWQurYa1HorYy6cGW92rZ7oTdc/XhzPp+6myznH9598+PTzz+eT43nhvn77G0EVY0bhkLJBTD5Ya0IQQ3R4NNMh3Lx6ATfrf/yHf/zJJ7/eD/EkdqlMf/C97x7ghq5Xp6r/z/70wz3ye/eW0wr+23/16189vSRbvtoEDKly6uQuFZPqgw8ef/X1SxPcNvVvLNYPHr3c/eadk+Pu5aWPw8MHx8FTHuS4Tn7MEGGEePnq89PZghTce3L88ovXxiJ6qNHcXx4icbvtEbFpimPrhpCv92mfYhvigPlkVk1nzbSAfddbjSWpZHF+MrfWDv03NeGRAYaUtRDx/cX8+XpLWjPyYjZJXdi0vSBZVyoUEJWT7HxKN91RXe9CCpmnpTtsihDT1eVuSAKEZVMUGo8PF/uuTz5mhqt1SxnuVcUPPzx+9uzm0f1l9OnVzaayJgOcXa4rQ7UuhiHveAgxcczEfHdZvffwIOy89+P55VYATWWziEYoG1U4yQjTqpGcX+/aMSPuR1SUUiyMvnM4+/ry4vbosCxMSrkucWG5PijaFjoFjybl5vXmnXfuZUc/e3ppwc6GiIBaK8xiFe4gvF5df6bd7949wRwsGUYNZEEn0oSYClUOfkRLWudNux1CfDSdUYoictn1UOlZYTmH9W5/b7bctddDOC9NubvYfv8Hf2fFm1zR/cXd10/P9723hgq0+8v1yV1992Dx1dX+bNtd9qtvzWfLw/mzy+vdatQNPjqeTQ6LHWfQphujZn80acK+u9m3FkvyOvXj2e4KUTBLVVlTaQLFeSSCi/0ZqRQH35gJJbjcrTYrdzhdnlgQI59/+vJ3T46XB7NdNxTOhJRIWTJm0w8a6cn8zjhcM6icoCwrZwQpa6OV0+epvwa6CTne7MKQnNKI0neDRXM8my2bZtVuc05NVd05OHp7vS6UWKsvb3qtrUeJjWpVZKvGIcU+zA5mT6/W75jFCFgACZm2S99/53Hcxt0Qx6wowmI2HXajsbj3/bygJ6fTPhA4t17tR+61K9a7ISDWVQEwWswpScxZjF5U5by26/Xm7r3Hn75+A4pTyqmP/7FUNK0Uq/Pr7vpiNE6vNn1R6Sf3TjchDNvBlLVWdtPCEI6V6rr1elocnR4+dnbid3u0GjX1vdem2bf5l5//5MXrZ4hmMXWUpdB2l4Y+QgZqN30ISEY0QiLYbNqQk0FTTRdhnRAk+Eg6L2fucGLX691q3ZG1urAhc9v6+byp6ma3HQpyV9ebUCvlqM987+6hjn075HbbTw4KRuw4kWBTFymBKmqtrbEGmcQP07JgEJ/TyXJxdXO56feqst3AY06rTXSKF5PJ4Hc+DScnd46wvun85dv9zVU3ndf3v/0wzTtJvnJ0eX7z4XsfKNIp55zz6HvOues3SH3XbS5XmxD9fVMvj9/tAZr68u3b16YotC0CqMI4UtpoDMw5DNaUWjtlLACyhshCzOMQJk1RTFzfBa1s4YoQsgLISVxR9SN7z2VRa6xPDyfapIvLi+XRfAhDqUy72XLMViOxMGBMjAiEagxx1Q7jMDRNId5PpnY6taMf9+0w9KF2TUlV169JictpZuzJ0aQojfSq3dLZkF+fn5naRZEx5KIwiSMBiMGoKGYeQiyaQ6WRgBpXLG1RZyPWxS6NfR6l/Gp1VpyYw3LynbvvHNgVXp93Kd/41G6SBp/Or5u6qqtqOi2H3TAKfHVz/kd/+N22u1YqP7g3a5y+2nXzk9luCMOqDV2Y1c57n33QhSMkztD5GJl9NzhnjTGt9+MYOaemqWw0kAGVyojMXDrXjWNOUpUG/iaNyQLGGkoRQBCAAFGQRRiFEEDd6jYiSrQF0MIkCHjr5CGGnABZiVDKpIGAExEhZ0RAFo6MIqJFAITg1saRWG5ZNIgkOQOyBTSEGlVWgnJL7WGFAMBZAG6f1SUBQAYQQC8YBRSCSCKliEEyCAsAigARMoCA6FtdByDLLf8KUEAj5syDx0tOgDgmkMSmIlTKAfYhAaHHrBRGkcAAqCJDzOgZhLRCBGYWQERUSAoKrZRGYBHBPqYAkIQZEIDkVqcSiIQGBEUis0FUIAqQBL4h6wAjIjArIiNCghE5IxIiiyBgZjBZSRAIwggp4bDD7gb8ml3IBoQEmG9dXwCgIsAYRcWMBgkly20+ImsFSBQlobBCJgXEgAgJARSoGoQAliwDUiDQXDRspywKMNLQZQmQByURFCCRKEdArBVPRPmRJQGLgCYCZAIWQARmueVaZUGjiFBYbpM03xi/RJhREkMKYBg4soAQCrFkoSwYEZVWMUWtiBRqEs1iiRDBc04ZDaIhFQWyZEIkEhEghaABBEEysoCAJoQszJhu2Xf61kUCAzAAZGZSqAx4iQjQpw45+SGJJiUQRG5SSkCzyWSzXpMmHzNnEGGt0VlDgClBAuCcnVGWzcy6y30bWLS1lEWEraIYBQisM0hQWJ1yGrwsGhP7qEkTIhIpIkFIIqgkhVw6QyKKkBRFz0IEIApAY17MC9wIoqqtEgYOjIoSAIuzxeTzN2/WfYvKYLOoOcznbtOOqoQo+6rEsqBcu92Qep8fHd79Ww9Ozp6//Plvv9oJRUSFGhz6IZ7vWFCu2R92w7unp1nOIwcBXPmWkMVFZxN5/733Pwo4dFZTFt337xVQz4vMeHj3brWYw8XblHKXcjbmnLjQ8OB3vl2O9Orty74bi/lhynl1cbnfruqZA3IBhmY5vXcwvby+ubj008cnD949yahB8XZ3s77YXz1fyYg4ZjtKVSgnXBV6TJkRWx8FFQCZSs0OqunkwO98vxtohKkzz3/x1c5/6VHIwJ/8vb8/jF/JxT7JmIVuJXFBTJkZgrVaIEMWIckca2vaTrp9cryTDJhAo8qEX79++8H947uHx+thGFJ2VbVt21XPP9+tvrzpLREHqevac4qIY5Z+6ztmtM65nELsJbRDN7K/9rv3v/fdp198Uc6q73/v4V/86K8WdKe+u4jSdfuIWEYwdaW5Q28kRc4IBHq/G7kCa50AoJLJfPrO6XtkrFgN2IT9BTAICCj186fP/4t//Pu/870Pr9Y3L7ph0kz7FGZNeVI3V1c36psHItBKhcDtNihFg2cRLoroXOkzt+1gFWROQIioclI3ey/WN7UplBbujTGZIykoCsuBS9LdLhpjdWXTuBfjiaJBfXpw//z8BQsyglIEia11fIt0ZwIRYwkIU45GkS60seCjCAsBZMYQkjaESGVZjH0QZqPz4WyCyWfvQ7aUpTFFBNmFcRxjhttEKlhtxswiaKxtxwgISaKGEWLymUnTdFKyRCHYdmMWxaiIKINSwgSgADVRBsQESrvI3I+ZAbNWTNwl0Dk7K5VBZcA6tVwsNxcXbMUYfX6z9QmGLCEDKBpDJEJbWKOlcJZTBJaCsFtvdeVSStcXlxnkYD4RnxdNEbKknJFwVphK68Ko7OPovXDQpPyY9pkHy9WsHHIPiJlz68fGGglx7AYH2mgZSSWRECJIUlkUYcj5JtBl189OZmgsxzjEMOTb5RL7NkkOh7UuHY67XjEM3E0OK7/vb67XfZKqcihw0+5BX9rSra86n+PxopSkgo+T6aSKJpOkbU+jaHac1TikNzcrN01NaVXGOKakSJO9uVgXC10QRS9MUFiNiiVjhKwKyjk1mipXXm5adKbdhEk9a7nd9UGTdqXJJBl5yHyz7yjnwfeLO3c+uHMaOv/mZ6+vnl/kQXwA0Bq+OR7gHNiWetO2bddVxkhVkyq3+/3tLNiNGYtJG3xe746OnSYZ4+g990E3kZ/+9s13fuf7n3365rvvnLZDu18JpzyZllVZQo55tBdv1xMyTJSTyj4CaJCIwKQQBD2oZjZtmqbtgtJomkJQDYzL5SHq+ka3itNqs7ZSzOp6YPti1Q5jB66ybrLer++42OiAgMpqZ/R+38+Lsh/y9MMPF48+2F09f/brT8dUtx67gbWtjAafsqQwjhGJjNboys0wTI2YonAgkDnk2OXknDEWrFFWu37AxJmJpqKZYTMMY4a709ngtymmduhNWXmWTYxemZxE26pFMqVKKZXaVlWdGbRh0OLDmECM1sYoUS4RNzXNbXpcSrHanb9ol3Xzvd99/1c/udnvx33Ih6X64L7W7H/71er4zjuz4/t33MUJKG4v/8mf3lv92Rdff/E5bdLVymO3iTFMJvPpZBr2b8de+6yqxmEXDUoElTIwYlPZWleLupyX5R6GRWO3q9WvrlY5Z2WMNbq2NnSDjL019djtvHMXl6/3oZuo+s3ZDRROxjyrm65vgSUhYs7Zg1b68ft3SqD/5Hsf/Nu//mna3/zsr/5i14Vo01dfX2dr+oPrr79+cZjzH3/0gLu+mTYXF1d/9ulTdlVZ1pvNOCNVNkoXajP4GOnqZ789KKpm6hLmbR/83jbF/bPV1ZjEGOwTw8jt1ct3JvfqSXG93vca9vvLvYT1AG92Q5nz/XsnZ1++sU5pByl7Y+R4Nut2aZZVI0p0io0FUQigEC3Cq9cX21WbskTKunbV8ZwNbG42t7PAzYrl0cGvP/2kWUwW5YRYX653LUDbe0V4uJhGTlfD0MV4Z7EoC9p1oR3D0WzaRxCgGEJiHAV3wOIMZK6raUGw74Zd13PO08mk0NqP/aofP3l7bp0dhn5s/Z3Tg7ftvu2iICUmzllpNYwROT8+mhZKJ1KvXq27IZaWbi3vZV3t2h2FZEhdvrmZFeUwDFYbDklbx0CMQJLnxl2uV533ZPWksk1RPn99/c7dg++/d1RODuTVuWw2cdcdF44y/vZ8XVYTo1XWcrxcxhiIqHL6IsZhlI/PLr794P4S2CKSMYiGCNsYksiQOCtJuUNRLLoApDELcq/l7biPjZpb3W1lYk0CMXU9xiCEI6nZcvoXn//65Gh6XFgzn15p3sR0XM8ePJ6gJAGZz8p1SFLJm6Fdnef5ZFrJfJ+6A6efnT2vGt13o3HKUnHvyYNf/eqvB6R6Xt8/uHPTdtW0Oro31a+KL29WFhQa06duBJHMAGreVCfN4uXZWU7eTSYj5zyySrrfRzjS2QMm7PuRSAprUhSjCyT9tu0OF5Na6ykertstETBkbfUQ47Pzs4shGKMVISIVymiRg6rsx0jAYRwqqw/MJGY+X+/7MY7RZ0VKoOv9uWrPgl9vWxvYAUwnFRIoZ1ar7awplpWxx7NnF1elm1SN/fK3z9TR9E3bV1oNY1CgrSoZyQOKptdXN2ihsKWPWQsaS+j9GuKTd5aXl1ul3C4EYZKcHpweXd1sbruWmGU6r95ebf5/UtG+H7wp37TD2mc1SukmHeNZKJSuIekcbpuY0quX54t51ZiTg+Xh5GDue1Z1Vc0nGXIhcrXtfv2bX15t3tQVqqRSziVRXda7MaElJRBj4piXVS0qbkM0kXzwi0YXxuraArCr1befHBQ63LT7T7+40sZpcP0YmQXZ7PvQw+Wju3cOqmMQM8qwHTpTTJ6/7U8XhTHu/r2jq127222BgVnaIVhj9mMUTU09XW9j4Tw7E3IehmDRHh7ML8d972OKCTgxyqzQ2XAUtNVcW9tvIwGMPZPC86vuJvd37t799Iuffuvu/YN6pmFWuBpRJxly7FabzW6/BpO6trvZ72pbPPv6zb0PW4N6Xi/bZh+FQaFWEgYvopEVCGtKRrmUGTUopQABFVpDiNj7tBvWVVETYvJJoy60G/OotBKkuqlTSCGO1cSuVu1u3Y4errbr2fzudeyMdY6zyZAFyWoBHPo0qQqxZKCwZXV2czlfTJW2r16/ZdHImpC853lVGPHffnBKIqTdi/P49nLMgCGmoJwmBcJo9G4MVvPBSdMNKWqEiLO6qgunJZrSXfb9vXnBfr+oNGopDo9+/puv7eEk6jE7+Mvnn2SGIfWTyeTe0d0Q+hevNpxVHNJmd4VWv/N4/uJq2GzDixfPCWhoe0RsYwySCltqn47qYnFnvl5tjhbHW9fftC0pHWLO7JPC2aTQoC1q4Kw0IemdjwYMR5k1rq7rg/nh+ep6N462LLXF9fqbvo+6LDXp0tp1GxKmnJEBRFhQlAKDAgSCggSoQAAUA6MAQBQlmVIiEhLIiAzASn8TQsPbFBiQACbODIKCmbMCsPZWG5AMwkAIkAAMUmRh+eYMt7CKUBAwAXDGIGhAKRJEiUThNjaHYBE0At56x4kyCwLeihQEqAD0bfs13Fax3Uo4IgBjpgjIhKUyGeGyHwFkzCSkmUVQsnCBAqQyswaOhAExI5AISyYgEIGMBJAAIgOKpMTImIQZgYlAAJGYxVnKIsxCGhVqziIsgpQAFMItKkopMoQauSY0AK0IMTCAZ1RIwigj+y6rUlJIrc9hx3mvcsgxJdK3kEwxADklAZ09ZhCjxaBkDVFBdEImKU63h/GOgBCYQTEQKKWIIWUQXUJ5CLJX3CKWaXKPzBylVBDAdJxCHraZR8KEKTMWACzZMwChR2QFgnzbK6YAsnAGuFUesxASgxCiQRDmFEWRSigKMQGgQhRAAFDAlCxBjJwZfRYGUFrQkLbIkplFaWOIURAyZAuaCFIMhCOgQwJ1627CBIAgBjABW6XwdiQz5MwEggoB4T+8FKFRYLXeDCOjNKowBlSmxlb92IpWygtrfTXsvcgtJwv1NwjkSWmJYe8DszROHzT2uGks6tW4U0mSpFJRqa3ROmhuA6dbUBHzcumOJq5ANox+n4xWYcg5CRLEMTApa3VZlATZZ9mPMYsIQWIQa0NIPgkwoFV9kIkrypp6ZqP10XJRotrvrxCFiL8+e147vHdyEjn7vu+ha5SRjEQiLBHsm5tuOe2PD+/8ST1/2l5+/vJNaNsYkr6lgiHdrMfLm3G7t995vDw+Ovzy6Rvt1Jt9W2stEYo917n+889elIoKTVOtJovDzXW3KBMKCbOIRJ9YYYr89NlzTPfqu/fPnm8UVQ/eu/vo/QcvfvMbkX4xa/Z9++VvvjaQyVCMwVhTKnrx01cvPn4DZT55cro4qO+/u/zeD+5vb4b9Zty8XK8v9nVSW98TaBBlSZQxIMJRxtXYX73c7gfvAQfwKpPWxNEKAMm/+7P/9p/8p3/60+1qs2pzFFNoAUIlmRMqUBZEhJDFaEHJGEC0jykQlK5sGrPphoLUGOJvX1zcO5wv7kyH64Fz1siQoA2y9cPJbP7H73/06bMvWk5E2UytANSNExHKKaK8/zvfO9vvn95c6GX55uXnVo2nx/MfP//6qsc7TeVv1r1JdeEInGRsjL0znb29OGvKqkucIzupUoY+AQv0mY+q5Xxx0o5d8OBz7Hfj1fUuRKZCrXc3f/GTX/yjP/r2O1991p/vv2w7EO6HEetyeThpxwywAoBxDJCJE7BI4OQmFdalj7zZd33fS4wsLEwkrAQjc04CSA7ZWpdvzaf0DbUIMsUgMfB+v0Id5/NmXi4029X2nBRzNpK5HwIqMBoZlEGdIChFgJhFjHKkKQZWShHiINGQiObFokFQYYykNKgQxJ8eNQcHE45RiTbi+jh4CD5lkMzMgdTZECwRJJ9AFCgj4nMmpcaQFYpVNAgFgc12nJWkUlJKOecGn3zKQkRGjzFLQs5ilDjCZV1MjQbejzH6cTRaT50tFUynZYzShxhD//YmNk29Wt3cbPdeYO9jYmIBzpyZZ5X1kI5P5rPZbNx0CvOYQ9UY0ggJs9Gv9/vVBg5syd7PZvXCVPt9q4S7faeaRjvnlJ2Utt3tTpezR0W1t/R6c+EsVNb5lA/nTrHex3S0KCVLhmz09Grf+5xJGxDpu9QPvG4zqnyxH+fzojCQ2A85WmuTIGfWCKP3Spd1U+zjYJzxIw6MDMYoiAliDFpLj9u7j0+wlBfnN/sQxTMpncdUOCsK/JDaVVIYs0RE2O18aVU9nbhJ2b/eKMmJo7VUilLE3kDO2BSuawMzDNF3wKCsj4ZSEXc9adBFPnl83w8vTQUxCqAELcVhXZzMsoXKudPFHaPs+Zcv19d9v+pJbM7IIMKAKGFMhxPnEHmEi7a/uu5Qq7rJud2F0d+uBQcnU05DplTVNZ7OLy6GuqmvXm6C1F++3v6d3//w3/zi7OMv337//QfLg/lkPmvscnPVh4Gn9jAOcHByrwGZHU4vz7dKgCVoFRTG9aotJzWLcoUzApJU0VRh6CnDEAgIuxwmy/m+3WDTtF08aIwWmVWkSK0Gz6qSmKG0CnkYR2VhCKGYLskUJ3cet3J89vRlWK11NG0XM2hbuCwO8jgxOqQ85JyzJGFIGTMaBgtCiibN5Hq9vx78gggZE0KUTEZjJD+GmDMAdfvgQc5hRyjZZ07AfQgiqBB8P9G2Js1DPJ4u7y3nY85jF9kYOy2GFH0IkcgVTUqs9GRW4B9/9+S+ba9++/SAGn/Z0h3z5def3rm7fPW077u8TVzIPEnnh/QX//4pH164TO0nN0cTGoerR1Xx4unbfDO0GYKEGGP/8mo6637n2w+fX3T+2TYn8DGDxsRirWZjGAkcualG8BLyvcfv/uSnv1qveiQaR1+QPtWwmFUvX908fLi888F7v7i4+refXtRuUtjJv//NjwO6HAVjOJ5VbxO3XVhMSudMC3jTwx+8/+BHf/HXP3hy+LNPXm5GaAE9Ufv6urT63VP39//43c/+1Y8qNdsW9l/+1ZvRBzJqP2737TBZlodH7nRRPXt9FTsTgTOiKqKSvZ3k779z+md/+RelPUSHdx4/3m83oqA51H/3T/7+9Zf9obUz+/roO+9f3Tn/8V99cb1O601aLtxqew1kD2aLYdvHsS+KgqIrxN63ulkWL/ZrmuvNLuQsx8cHRVN9/dnz4NmVOnAyTVnfXzDtpicHt7NgOw59d83OrBNvNvv7i/ruwb2L9fbFvt0zxpgXy0ZiULa4ajcElDOezOqbm32JJIIp8ejzy20PjMQJRfq2BafjGDKRtsr7NKRoC7VNnLtgcrbISHRQNdur82TUpHLMWZfNZrefziczZ5TibZ92rR9T1oYwCDp953i56YIrValpUpTXby6fPLn31YvX79+79+jg8KzthpExcRyiOz6+uL6OqDEpHrENcXa4jLa+6HHWd3f0RCjElN65c/i67fo+G+uAZNMObXumU0Ky+/1OMgORB/7N2fkfPrybh0EBKkXGoDV1amlMCdGJqoY0tj5UQE5ZUfLbt682OVDQ28g5JEjcDt06jyFI3+3/Rz/444hMOu1WnGkc9pH36mQ+Oaz087P10XL+/M0mFrpHLkvwQW/ankt3VB+eHp72+9V+tR+3PSAMYzxd4s3N2xvfbWKOo/ze9//4xaefVhgty8TV754qP467bigRVWECSz/kxPxmvT6cH8yribL1vm2JaPRpOZs/Or0/9OeT2ipQWaTbj4UzINAOyUGakiFIu7zWjTKKvDfrfXd74q9ZJEvE7AAL4HcfPRj7fiN7Za1TcD3GTgRFxn4/hMzMiKycq0sXIYL4b71zp7vc9F1f18V+SAi8342EqtR5Wpm6KC7WV6ez5eRAbY2U1mGQutAn8+pw1qz6/mxM7EMQ9jlFyHcXRy1vNGHoQ4v8er0DBFPoIae+Gw8ru9n2B0dl6lkbxajQ0H/kKvJ+7Pfj6Aej9dh5P4K1Ng1XKWtjCibglKxiDGNtlofHR5r02I9ZFAP6yIlD7LdPf/tb316WFkulUkoYs7LUR380b96sd0PMBYFF8ePgnLYZC2Oc0aUQdGMJ+mQxcbPJdEmff351tY3KOFSqb/tbOAgTF5pSN5YYg+8PZpPz68EK8TgWyhQJ67pqh/76bAWARinSpBSBwj50oWsB0uls4eNI1CiWuihAmTotDva785ut5GwVCoIjWK32VWVr7fpVG1oprRuHXhu9mFm/eTPWzWl1J27C8dGSU2rbcRx3LGMeN+MwKkkKTFJuNp3mIcVh3F6eF7LnEI4PD84ur9OYCNmVro19ymPiZAm0soC57/tpPYOcc4ZNF/qxV1rt2/2kbk71XYVakoyd9zzWrrkF3EYB78fYDjnz4cF8MTu+Odu+8RejoTAyJcWQAaUiLQKuIOcMIWqNo++bqi4MDf1YF6Wy5XY/dGOyMd89sN/54N563b+93O+GvZCjCvvWE+QZqYLNzg9aQaV1ZZASaxCIIkOYNXUNmEPY98mze36xnSHs9zd3Txb73K1hly1Ppjp12x88eueTV+fvfnD3l+eXYX8DLLrBw9lsf92WjTleloeT8s3VUCv17OXzP/jo8fnFG9+i1WpRl+fnN1o7TTSE4Iw+aKzJsB9aQI4APjEgpSEVziQ/KsRo1MF0ctW3WgFnoYRDHL7aPR9zFiRO2WSqZxW8BgCgBMAcRSjJbWs9EwAiISsUwlvqDAjcenbwlkkEAsKUM2oGlEzqG+yLJRIQQgJClqwBiREFNGrJpEBCYk0iSpJCTpBvO84yJGCNRIQKRDQoLQoABJBYMgJjBkhw64ViQgIABFEECkBEGASQEATxbwJoCIQggAgALBkosQgiM4uIViiZWbD1aenooHK9j2POIYkwAEEETElIZSLRhBrEhwQIAmgUYk4sQEicJDKQIhTJSQyRJiIQ0ir7LJlLg0pxozWBWmfJkpWA+qakXZhFEzEQilDmeYUFCQvaKIiQEJMAIWaRMETuPfcIxGnv89Zzh5QZGUUYhOgbwQyQRfqMo2DCvANZAswgZaAShEAUgIIAwAJRIMttLAwJKUlGA2aJVRRpwdVULlM2kKygBVNJimKWAMwqAgOIAAlIC8OKFSC3rBkTIIuI0G1zvQCTwDfMaQBhvr0qKQoJkgASKA0Zb1HdyEgJsTBoIXPKJYhCJTmTSIUGGDRSSAwMguAFWp9rrQtSWngkHVAks0VkQRGIOeMtcElEBCQzABkkROTEQAD5G7lIWFAg52S01kqF0VeWQCSn4HNg5mnhLvsBLSlEAnEgCmRmzJBCHsc+ZGf1pND3jubZBwJpfQ98WxOImOHxyVGfhygQt61EAasNQGOVA7FGA8RCGcmCgq4wIURENESVQkghs3iWFBMoUAiSuSADIqUtlrbMAkVdSE4hemA5WM4O5vXbV69zSgqZIPmd95U9v7yCqLQSV5rM8WYzhpBCCpqlv7reTey9+6cmq999cE/a7lm/50oNgX1kA2gBtNV+HF6+OA+Jimq6G/aAShAsqcPD5dG9uxeffaKYNeTSSKGx1jb7uNtsYsjCUlhkgRyi78Znz5532c/K04NqrmT48ic/Onv63IDq+ti2rcoAoNhnq6xTLoU0ERe7JN5sfrV+0501Cz07qYuFW96tj9+vFk/c+uv9zavORVGs59PGD0Eh2NJk5rYNLOQq5RN7Zo45BQZEjFDW9ud/8QtIpIkCZERRCKiQADWpHBMDoBJUqBH7YVBZAJHI+MzaiHFqsxksaXZ8uV4fuUPLuNr3VmQICRmtdZt9f76H/+V/9r9++eKv35x9PepuF9pkcHvdkve//5/8cK/i5dmrokzduNMIjbNvX21fXWzfWZ7Ycd/nUZIqjeMoOpPTMvTr+bT0ovoxY8jfffwHP339k5iSIXNSTJZkxmHDCKawBiQmLyzOGeVUAvzq+eu/9we/e2d6/OKqm+mMTgsqi/FocRAAn37xEr5BejNLNkrXlpwjV5qUB4gRYpbEShnOjMwiQhkwcRxjQnBlYY01pU4ph5TymIdddKgRpahVOdGmKFKf27Bq016s5cgKOMTMGQitUQoIYxDvfUFGiDIKAWoCEC4LNW/q2cQO47CczttNvBly2wVSqIt8fFwLxNGPM13kIXD0rWcEU2oznzbnfU9EKTPnTNbMytqPgwHJnA1R5WyKMfqktCKS0hbKcBhzSIDf9Cfw4COjsoamTX18eHR3eVQiYRzitF3vdvuuqwrDMWgQR8gaNGMWsYikKCu1YY5JsiFggCyI0DhtWY6Wk+Om0ijKyK7345Ct0tFzzlgXzSykkOTK901hd0Pob9HaXV+S6oOvtGTJ15tuXruC1FTkh++8M8ijn/zq1/v97mhq7hxNblZjZd3MUedzGnMfevZBCSmjlIJOoilNTTmHBILWwGLWXGz7stSosVQ6+KQVzauyDzFKsJXM5tPLm3a3HwigKA0QgCoO5g2yNxLef/Jwtji+fv6iC4MAau1u1ntVF5hVJEUalciksWXIOY0312OWWVlX0o2uUbXWjm0kvrm5Lo3xviNFDCAxKlC10ZOiGdtstMqCfoxfv37bcuoQbG2KQk4f1g/fu9NfJc9MQpuL7W6z7TadJCqtQYaRE4Ao0koRiEwsNq5IXXy97ZNx1cSWs7JNo9A3YeTc7w8mZj+Gt8/P+jTkDF3nDyZFN4rRtsk+92lSFBldUS7OLofN699evbgslD585/Tu6UkWj0Udx1i7uu86Jn1wctIOaYJTQdnsh2ZZTAo7tG3FdHpwcHFxs2jKumyeXW85tNHH9X7Qot5erqKO9dEdAmddEYSKqiaJwlC5AiEpq+2s9n22ZMPm/NOPfzpf3v0v/+F/8X/4r/7P2NSBg0geh1YpVKpwVgEr0qrNvinLUuk3my0ptQvrWmGpnTPOWLVpg7KoCH0fQgxVY0Ni0lmE10M7m07QK4ixQFkUprQ6Dt29k0XtKrT1uG/BeoOlRFW4EjgbV0yOZr/6uXfaPZ6ah3drx36+2e22qyJZKEpdb5an6u79g7sPPvzRmzNWEEb58rPV/ASTY/bx7cUNkP6f/u0f8rD6Hz75NInuoGwOm/31rhV2gLD3AOl77981tipM8/xyH4Y1aK1RJ6KsVUpsQmhXuTiqT58c/vrl0y/P1la7ArgBCmMcVv64xPceLQccnl+vHx8//Nkm/97vfOu98vDP//JfThZlGKVbjYe2PkD7w3fvv12//MFHTz5/vdnv27/+xW8mRj76Ox/95ovfzqYT0ipM6rxLhxrsdfeLzXq1x0+fv2rF9KyzQNcGAZ5PHSg1Br7ZjafLRR/ylkVbk1La+9Fp/eb59p6Zr1bb5aOjNIS78ylyWi6Kj558742r337yZ3q3ufrsl89f7d9959iW+xc9dW3rwBzpinb+/qx8/NG3fv70Rfbp4QL/0bdO/8Vf/mwyXfS9nxCg0xXlL377dL3tlTKKtK1xflijjNXEDiF94ypCYIwP7h7ufVhHv0ueIdeFPYhV9iNo2MbAgjJ4jDEZzGNG4cooYkSA5bxqfbTIopA5s/Dgg1aSJIFV87IUgNW2Q40epLJ23w8ns0Xi+MWrp5bREGJIpVZDCLWxGNN2DOMYCmeUQsf58d35duuvu81qt9/uRuKcQA0tDR7HkUtTpJSnTXXZtvPGcYxDNtOmkhVRBs30YD7f9ruTR6c/++LNdx6elj4aK1xZKpcn8+anz89GchlwOcPddljv49RZpel63wHRrHEDjNfdxtMdo8AavN1nGK2cyr7vXF2PCJthN3i/mCyKwnx1db6DBKUBvM0fyDD2ZlrFLBkpefre3Xf+xY//h1yY+bTux15Q3l0edmNvJ3T//uLqqhvGKKVdt+1kNj1ZLp+/ON8N7dvV6l2889H777x5/goSltMySSid3fYbLzGk8M7Rvc315ZdvXk5qfTKrlTWny0nSWndjKm0H+XaLkRIuXTGmZLQzhT2dHF5dXI7b/nc//OCLm9cvrt7Uyr17ODutKpuIUygLrQtbOOs0IelbTBMoxZSYyCr97tHd4eztvveKc0U6c9z3rYp8Mpm0o9/t+z7nLUhlVA7BGWWpqIv6ersz4CHxsXX/4Psf/bsf/3I3+M0QfcxAUjYFg1xtdz5zXbta27ou7p8uz76+PC7L+wdTa/Wk0qtt6/vBGFVNsWtBE5jJxPdeWwjjmHNa1lMLRFZdt30fBazZ+1QAD5z7nEyg0ij3/ycVpZyE6J37i4vVmHzqQkKEZjLftt0wDFZjXZv3nzxutAE0ri5EsGmavvVgCCd2/+bq/OzrlPaGQs4qDyhD5ph9BkOaRQi4dLpUEP3Ydzn6zMLlxDoBBTSZlyXZj54cf3H+8ie/vOr2iQm1gyf3Dj/96jyyELG1WltdWg0xD2nXFNOH9w+fvTgrnJ5N5h88Pv6rn/9603UaTRbUmmZNFbMETiBBjOy6i4OpSRFWK1TGpZhQMvv8+N7dg8UkplgZHSXXTTX4nHOkGBLQDtuDRbM4Onj5+pwh8zC+eL4D4qK0y0mTimLX+m6/T9xZ5UWFvu8w2hSx73pX1IePTxKO0+NJu19JYknJWoPAkmJdlevL9aSuY2i9HhikUGq9u0IIk9mCSNfN0vt4sDxs6iaEsW7s+fmGOc/nUxItqLKPRiRBro5qyfrnP3/+vjv46Aff/+9/+uOmPLJK9T4OKRijLTqndVISctZKWESAOXO/gx4SGG0QdFFMSL7/4Ttvz15//Pl1F5MAYGnFgB/TzsRZqe4tlhOqXr5uU+QGcVnU3baPKGMUnWRxWJVItp693nI39OT9cjE9qBuDdrfeD/3AmtxONVBLNxYM11db0+dhXAOY1ktttIJkFVKfOfk6pYC4XqftBV9cblUlzXyBQ6pcnQjRx5vLPQoEXldAGDwarAobsk6ZSZhjlCiRhYE2nY+JHdHJ8XK932mtFIjkaEq33nrcJ9t/s0kOXdKoQkrMrEAEWBOiBgYgQb5tHgeAW0sOo2LNomLMIMQAALkwpBQhMwMCioZvZJBbq09mVt9YfQQEFCAJKcAR8yi38SuRJErrxCyE6vbjLKQwsSAAsRgWQMyCtxdSCBFFkSAw3vaIpwS3Xo+/meEawaAIiCAkRC8gACwizHjrIgZITAjQ53ykySibIMQhoKhbBHViVsykkQUkg8VbsQkIhQg4A7OIQBRgllKT0SAMwmKVQobSqMpogzCfNtP55HC2/GK1+fzFc0PImW9dNiBCiAxIBCWCRREmEdBIt4V0DJJv2yaU+E1nVGAC6BkjUEhZyGeGb2QzVkAiAJlVBCEdt5ocjoIoiRyLyqbMKJATCEG6DRwioLCWwPJNS5wU4g69nSprSFUYDSAhZM7MhcPIAgAkIAqEgTNIA0UNoZLhMkKnZQRhuhXoFGG+BUQBAkKWjEROk09MGQkREIGEFBMJCKqkXFacoY/BFkpThsz6m3gecM63ZedGaSQZATxgzNkSlVpXKSAwCyIozgwgDMgA6jZ6KCxCqAkzIAKIMKG5bca7HTAa67LAHDllJCyNrY0KIWZCZ1GzEqWDdYMGJZiGWFiznNZD71NMWutCmWnhKms0AJD2YwKW0hpvQBlXZ+niOHJAq8CAJAEE50ypad/1KSrFsSrMft+jckIKWJwma6gu1DjEb8Y1ASkMLFbT0Hl9MAk5VwpnSp+33bRSiyL/4z/8/our/lcvn6Uoi0mdOIy9d7Uu6nLj+6IoyqpsptNhv960PaE9PFrstuuCaHV5fqVs2g+zY/fufH5/Ur5t989eXxOJVpBC5JiSpsuRZd0eF/qk1IdHdrvppQ8Dbt9ev0QVEbVgFqvebjZzvbS8bxxOTpv8RsMoPEardFWqbmiz8Mm3fqdO7eef/eT61SunzeX1qhtT4gyAVivJQiJj9jkwJ14ezpM2VrtmBn2/3bwet1+uyomaH9uD+5PH375z9+7i+tWwPe82QyvC1hifIoNkQ5PaDj4Hn2K6Ba6JMWQK1dTl1fXWFQ5AtCJQmHMWxm8aG4WTJBFQipBw4sqQglHKaRU5d+MoGUtXaILS2cVkPqEmpe5OPdM6n11vBslVBYTVpy8/O3/z2b3a/v2/9Xv37p6eXV1u0xjf1cW0/qtff+rN2LZDUr6pzNiGds1pNDgWH73/7se/+ZUtTRrc/fnJ+c0VGVWbKai+H/autM6xkOviDrQszXR/vSmbYjqZuspBDuUE4zik3NZTt+96nREYfvP01Sefvvj2u08u05433fNNz7YoF9Pz1Wboh9tZQFqSgFaQJFUgi7rc94MWtxs5+mwEEG7dh4ozIwgJkFaJhUl8DDF4V5vNZltSpQoFArrAxWkDOm3SkHej0jqTWh5MrILrt2sfBVkb1Jzy4PcffHj3eHnQ78MvP/myqGpEjFEAcl2Y0oCCWGizutqEVgBIkhDlu0czR8RCRTnpQmpKC5sBklo00z7325RmhSqtG1LMlkKiId5K/EoTVtbcW059yDrtt/2grYldYoWzpkGRxlZVYQpnmrpEXZTWFnU1Duy7uFntnPCwb3e7rTHojE5y24bRE6gMPJ1M66oCTR89fmTJnd1c70ZPhFVptaKDg6Yi4xAhJc4ZMnBig44D+DHkLHfmk0bj4FMC9DGS5L7zQLhcVhqkHeI4CAr3Ia45PDo8OCrt8xdfMelGaVXWKgTZDwUkp20KEVi0Vrt2VKTHBOtdGMah1GVIoe3bsjRaqQD+1brVjU6VjpEnChUQKdpzYCsP7h7YZib14vX2M9GkhJtSecgxgUYStn4d0vZSh3y8mLZGbTvvfUZl2z4h5qTYOtNM3cRRt43M1G3aNsXFowfQVDc3fuKK65selcyqZsgJUECBSlwpawAa4AJD5wdBGYZAFZhl8/h7H+Ruf1jiwsrzL17uv7zod1kUZYFh1Q+7qMQJAoJEgQRChESAkp0Czbzd7TjmYHKxqNiqGwiAXP8Nq6jbbBbFUexyOw5jSTlDHvaaSEk+qieNS4+Wkrf86U8+Dt2uLN2dyfSjD+7oqowWmiNFg1+v+2GDlO1223tKwWeyFUnabLflcnr8+KEVvBn7Iq7megYl6ZJ04YYUKcYKoCRdFNPttt203VxtkVQzadQ41o6NQgjYzCc+bcqJFWIf+nRzVpXmTl2dn11+/vTVjlJNxlmObU8CkIVUzpKLouhC6kM6nE5vui5ZBKNEgJIkSDD24CkwhoGFk+8Dar3ZDMGnMYQQk60Kv9+XGY5mtirMneNZ6r1UsziMq3bdTBsC3bV+cefd42bWi+oU7Xno2+HhwemhMr/3oKY8Nk6lNoY8/+2Lp672H96vT06gmcD5+hUq2/fbupzlpSzfn975W4d//qOnpKqPX+52RH/4d//Wx6tX7OWf/MmjV8+uJmXzajd+68HD55+/ut53v/3saj6ZPLm3qBrz7uNm38Uvnq3WnScPmmg+L45mZVXqBw8Wu9fbIelMsu97dCYlKqqyqHlh5aSWR6bEo+nzf/WXzc3s1f63f+cP3v83v72cWPnuu7Pv3Zv85JNX/+wfPT57ZZJyZ5sVunrNyqv6L/+733zr7v3fPLs8uHt0NL/39vUn/4t/8jvd5fr/+ZcvyJiT2YF0w+Gd8nI71GZ2eudgVnC/HbCwm+tVn8I4jIfHx7q0+6vt9rxT1qpZwj7xEDZXoirXrZKr3IP5Eb+U//r/9v94cAf+V//0n/78k4+f/vynD8vWukRtd/doNp9Mvn6++vbdwyJ2TVrPYWhm6t1Z8869w5ODw9debVabeubqmWWVzy9XMWTldIrZLitVqvpo9vr15zW7b2KYfT94Pp3NmqZ8drYdhrQHnBlzNCtzL1lg1Y4gOJsUy2Xz6dW+MGrd9qlwNRpnnSEAlgJ5SCKC1hTGalAiSqGo/Zg4BwJ0pGPqHfLf+vbdn39+VhROIwHqrh8KYwtjlBEDuXSu5/y286iNLpQaYd/FdowAphtiYraYT+8cXF/2VdPMquri/GwzdH7X3j2ePtusjaiidmi5HwarzWlRPFlO3kLnt+uphqHr7bQZV9voeR/Gr9fX9bLOSR/Oi4MpfdoOxeEUlKmrYp/82Gf0mWO8uNncXF01VZWGvqwbBoUMxthJXe1TSGBolClZzXy527xp12fDOCsmd6blclJ/+exyiFSzSdat+vCn3//20K9D3z+68+Dter32own4w3fv7p/+drXfTibNxKA7mp1xaKpy2Ifn23MgiEiZ4JOnz0GlR8fLR3zy1eXNoau+8+SdX3z+MZeFI/UP//YffPrLX86mtirsm8uV0mq/bk+Wk3tHy3XfxmG33fdTU7m6oYQxhdVqx/sWkczY//6HT0qMz/c3UhUvLrvLEIog3z44Op1YSGwMFAokYiBMDDnHHsM4jCmTBp6hngcpTHk8a/Zt1/n48nxwgKeVaIRaqx4zpawQVWG9j0WBOXmrEAK/c3TwwaOjP//RXz9bbVIGFgk5AYoY3agyp3x2vb9Tqs3WG+smZfPBYXx83EBmH+Fy7S/WY5eEVU4lIrNKUA/m4eL+T579WgAP6gkiHi5LyXg9jixstYqARweLi3ZtXaEBEfLQ/8dYax8hJSZIPmdyVJLLSW7WG000a4pvf/ikrCtnCxmC1o6jysD7zmvEKOn87cvLty8gbr/94d0vftNz7y2hbly3Z+cKW9jnqysDFgEjkLYOGIiUpIiCBmB3tX1yeqeaVv/u869fXV+TsR7dvLFPnhztNm3MnFFFn7XWYx9Jc7vpQgi+FKPd/dPjqqpudv7Pf/npfhgloyaVxlhPKxGlFSTh6WyS+i70Y9dtFUfKZBv2ffB+ZJ2Lwh5UE+UMAUUSAqlL8H1Xz6t1N/7q2YWdzn738dGbr18mFCATmQRxMT9Me+67GwG7aS+cpqZRgIoQmYNkGfY389nyWx98EMZut1pfXV5llJQZmDlTHtk4bCYTTUSqAaUQJXKczkofCI0hAOOscSaNaRxHyakd9sqyxcrZKSrcbttJaRHlcr8qZ8vckdPVarM9eTjJw6DLPglWVispATSI7kMOnBLJMEbnzHI59T77IaeUuzY9bqpH9yYV1W9evt7cbO/dPZgv7evz68Q8LYtu1ykELGhQgXOn9bhsysfzeYxwturAaTezOIzzylGK18Pwpt2QVq4oJZHOpsjm7OxqeVDvE6+6fD0OX65HUnC/mv/Tv/3k1fmr3eAHyWPfqym898Fhv84X563npKqmC9lVE2LgnLZdt19tBGg6KSurq+Vy2/eJc7Tqu99+/Ob85rrzY2anVTv6QPmoriEjECjFE+diTOuu00Y1tZERSkz7ob9l39vim6YDEpHMyKCRQDEi3gIuACiLAOgkIhlQxBAqEM7MjMwgIqREoygCEgCgxCB4SxoSdVvdDAiAiZnSrWHp9vkNoghkvC2dTwKGICbWigCQBHKSyJRJiEQpIi0xSZbbTjO51Q0EGVAiMAAoJKsgJAEEFkG5bfxEBuTMQSQhRf6buqtbfJDcvpMJMWZZjamwWDnMrLqQM9PtL88sKnMWtGRDSCwAiFngFj4tgMJCtzhqZkKVWfItxkjEOKWMNoxX6xGq+YGZnC6MH/ub9cqHRACRMTMKAhMqAkOKERVQTBIQgkAEYfjGHsUZxpYVZTSYoxgNUlDqBRkyiIyZNAQNShBuq+dBJIGgSnsgTThjVowWDAFkCAyo4TaMdnu1iQEJGBEMyERSECQUp1mhQkWcFagojJIzMWkSBQCcGFkLOiADWUE8SxaMiiIiwiIZCRQS5CSiBDUhSYZMiLekLBFWCnTBAEAEBoVBYgKl9RAFkIwREcpZiHBMACiWUZNkZM9EhCTYCy+tWpCSIXYixqAi9Eluq/0U0e0XZUHOgKRyZi3gAMx/6LwBsEaTiEYsEDln7YA0Zp8A7bRoKlWs2t6BFFpHENFKaT0m7mIaEhw3hcnCUQbA0EppsHa2ULoL0ccMButaI0kp6vB4Bkpdpl0f0ghUPTyGdgOU7j8icfTqKZFx3cqj0ZPKeB/HlDIAkY45EWECFJAcmRSt29FrNcTsM05teWfeHEzdX/32+dOLXdT6vW89+fCde3/xr//tEHNdl+s0vP+de8+fn0fSl/vVVAEa0pa6cSecGGAgenZ19eHd0/OLC0Y0hT2dHx3Nj5++eq2cbPfbYTeidSPUB0fv3beeu20lHDRebNpm2kRNx0fHeUvd+opYWVv3fkRnoCiMU4LCLMYqSpz79dQWf/jD77959cVPP/5MYFNoGoLPt9qDIkUohFkg5mysFSO6sKwFIE2b2dD7MeQ8Som16fTN58PmRfcl7kyjj+5PH/69R9NZdXO1vn49Xp1tKTFA9OOoBLVVklNKohQ45A8e3WGm2MdRBIkWB4Uo3m6HlEUpYaMUC2VAARIEUWNiYBQRzyFLqpv5sPezQhlWDZdLM4uboe4jFzKkxCBV7QqnhnGcHLsY8svR/99/8rOTun7v6MFydjRE+9vNWs9m++7KLuydw3Kz3rRvujSWieGdh0cXN29v+n5qDxj1Td+LQQTahp1SiUnFmCd1FYK73l2qgIBxNqnqWT09WkZMirkd95989nUOgoqSF/E+IUA1/8lF+6f/4I/oNz8+nbmz/fB2O/ZMBGjUN2sBsxSlcdr5mCwEp0QS77sWWQxiDp4BhUEBokLJkmMSQBAcx6CMQsnWoWtcjsTACnF2WKuaX59dUVHYpiDS86LJIXkCHyF5kZQ8Kq3T975z99GDw12bl0fT3//Db3/yyXMG0oaK0lgL23bjisN97xEogISUJaflxDptbrbRaQ1CQxf8EDjlibFbv8uaEwdFubFASu3CiKj2wVMSRWpWOBQZPYPIoi4OpzUJKSTSeDibZoTaaR9CCDwgheS30huzDyFJzEYrJuwheIXepy7G2ulZZULMCBQSZx885KaeNWXx/QcP3z2ei8Wxa+vKMphujApVZXU3jm23r6q6qaYg4MhkjEozcnBOK0XO1W9uVizApJxRIspLDiADp+i9qQq0+sVqXR7QOIycMSdGa5vFMuUh5X5MFPq86kVPbLImDuPFruWiHDIJonJqcTKdHxc5Q9/nkkwirA+asjLrVRt3aV7Q4POsrGPMN2+vzvev2Pv5xNWgC2cD5uFyu03drJwq1eTkQ46L08N6rq8+exo6zQiIUjic3Ju0Q4ySdwNvdoETsEfJyd7sjhfzilTa7DRgHCQZ7VHPZpPr3RXHoNGA6C7kwffdfvRDbCbl7GF9/PC0zHz9Zv/1+Y2AMKCrU1Hp2LezycQrMloVoAcfU8wAZJTOkH3KSbJbVjip+srkFHNtmsNJHEehVEfO198UfXDK3MPXn1/2B5qX7sG7D599/DMXYt/KKtp/+ezq7XY4npkce3LDyePl9mJ4c7EqJw4b3vmdinzv6OTo+FBGsx1Fc2KXDx/cPX+zBqPxcFEv56++/BozLKcH291e18uR+fr6AhGNpV2QZGjQiaZa90ayMlLYaEscLs5//eDOfbK0bseDRVOUta8W3PnDw4XTPj9rM8ddXIMyo/fA2doCmVL0aQg5phHGdvTroe8jAtE4jLzvp1YHgkSyWcemLjOqITBDNghdm4wtMUsObBALztQG17jCUs7hZtsi6nk5SyG40tRTS9rdnT5cdYm9D2HU2hYpvXrz/J2jo8fTKvYboGxd/enT86t+yIWdOtOhXq13XdfdfXz/4bcP3lx1Z5uLv/vwW7/5+ItlZd5f2Pe/d///dP3xJ7/+0fce0D/+1ns/+fc/788vHxwVv/vDB29vxnEVjn5w8vZqG8cUQzo/P9+m9PB4dmfWPDo+nJeH/+5nv7jqe63k/Hz3d/702198cnYxsCjdaAopi9b1rBpzfNGmUbPbrt97OK+H3X/65EHa7X788evf/c//6HA7e9S/+f37ar/bP7h39N/86PNNO5SlHRNGlVmb82H4wZ/+j9sv/ur8fF2JXH/8+bsm+ze/2W8gs1BdmZN7k3GzadeQWTt6c3YZqsIw5/2Q2uRKKRSkdhMHxYyoAFBu+vGoLgypy+uWmpQgqSD/mz/5k+9Mj/X/7v/4aog/+qvDg7sPv/9Hd3R/df7m54cgfR+ux/OH75xUTbF51b99dhWVVnfuffQP/94////8v688E9G8cvWsSVbpRQPOTSBXyhrnBp+1M22/req6u/wGTKGKsrA6sFxe3dRWV9p02z6WmlVeVm7iCtNHY8xEZFjHccyTStmJA+bHD49/8emz+bSaFfpodvJ217/a7gNDaQR4yD7VRa0x+jDOyuk4pqaaSkxfv9kOEiXkaTV71bVH88VR2ew3+wf3D2sLXz196xWVVuUhdF18OJ9DZInj9588+PTrF3fmk3G3s0nfrPeLZvZqfWmcaSbV21ebJ08md9jPq+nXF+tnq5WtrEN68Ojo12/Oeu9RR/F5c7PZop42jUJ99bbN2h0tpy5J3+0vAnz4waOz9e7Fm+t915eF4ZhDDEgoSB3EclEMq1HGoJyb1lMUEkkGuPe5MKUfBwF62223CqipjCu0qN11e286/71vv/PVly/quoQ2Hzbm64sX5JCMuEKl4G4k/Deff2xTMFkOxvaH33nn58/f7PsQmdOYtSgl1EyIS1VW5c2+O57P5q54WNUypG59M4S8j5GUoPDPvvh6dnrAzItFvRt8VPjmZo8kpcM71fykWLy82iqWt0NbNlVXGLLYaAdt+3h28Oe/+tU+JnJWtNal6zR9HfNxVRaIb1ZrhZgkz+YzyZIzZ0mosFJa4mBIf/fxO19fXo0xKYtPFvW6BWXoqKyasphO7eubDVxeXO08o55Mq8xpMrGoXb9Pl93w9uMvBx+FVFHQvh1IISORIaVVRACUy+3eCW533bsPT+ezOJLyQs/eXlpleh+oLvYhQcZaqwzDLl9/ebMGLSYSRhnTMGY8O99lZSalHYcUQl/b2duUnDKGhJD22+E/korWq94Wrhv2wTMZnRMyKDT69MH9hw/vNtZCjmkMRVmWZTN0npR1RYEwvr5486tPP9GQK5vXF+v7Ryd4fdP7uB2TcU6MiQAM5DOzzwHQGsPASgkLrlo/L/T9x/e1KX7+5cuL7SaxJpTSFlaXry/2LCn4aKyrKicIKVNVThfLu6/OL4Gp7UIjtIv7q/O1lyBKF4XlqGd2iiAhRB+HQZBspbHS1lbFUhS9Xe2LoRfhWV23g/c5AzOpWBgTEikC4Rg8eIvnPQTWZ9udVR9868P3P/76tS5NVrbbx5zLSbXc7baq1CSm9SGnNGRfmvJgVmRT3Hv0zrQ6bXftuLq+2m1vpdUBJUff+jCvipxHY52hzGC34zApKtScNRkscpY4JgJNAERKaVBFGXNaTMu2j0IxJZxUlUje9aGYTIbWmw4XVF28frX44P1vv/tgP8Q+IpJGY6bFQe/7KO0QElolgpmp9Nj1EZKaGfdkOfvBB3e/+vqz8+2b6XxZ1Ufr0XebkQmMMxNXFEfTN7uhFXq2Wk/H+GReGUlK4iqlgMBAGWBeOqPIGVqvu5TDoihmTaV6npdzRZPLrk/MKQsh7jNn0krwq20Lz96U4B8cFyjp/PV4vU+XZ/3VLlxcjVrZyPGm2y2Omh/Mv/fzr56SzWw4M3Sht6AXVZWlONtux8gHk8m3Ht+/2nW/eXnmLBFZFh5ICqOAMWUASQIkElLGTTdmRYvDZby4yoKjT6DsN966lJRojUJE+dbjI5KEUgaG20ZyUYAowoKMkpmZgQEziiKyCgxizMAZMhARRclGITAgAiFkAESVEUEQGBQCZxEBYeEMGRABQ0xWiSAwQ07CgkwoCJVThliMMEDIcgsAUoS3XOVICUmzEN6GqBIbQINMCgEpAwbEhOCFkW8pRQgACCAMAIICChABgXG9987RRMlJ47btuGPpI6NSAIo0s8AgORDeGqWQUMOt6wqsxoxcKFIIOSelQIASZKUoUO7SyKCStm/3w3i1PzqYYV1y76rKsvcYJQJowpBZa3K32GVBD7BnCMCcRSNoAAJhhrbF5LFsNLlIJUhMlDAEEiZ1+6dEEjIgMrIiFCC47faSrGxyNRp9y1/TOSOLAkpIiRkTYyEEJEiMwFaDMVmDEaTEibKoJEpIMhCSQgFA5gwEpE2mjJhlIhYAPTBnZAURhQEIswiIkEURNoZQo2FIObMQEDABGzA1xiS1NjFGTYoFRCildJv9IWQ0KmUWVENODVGtKWUWyAgIwn0Sh1hbnDktIWhSTpFAGrIIAgOKgKBGZk1KC2qEQiFyBlCgCSACQIhpyP5wUkRAH3LOnDihJiC6xWE3pc0oZCgwoNGj0a9X+8Siq2Iym8i4F1EAoAvlc+IQN2nQZBa1W4/+puNZU5osQ5cMGkygswbWN6veIE8X5cZfOofvfzB7fR46yiA2su7DEKM4IpI0joEMEaJR4FO2zoyMfetZgMe8OJxd3HRnq10XArqirKuI9OWr88iuD8Nu7KGE1zd70oaBBh+KplKuQROH1iMq64qbNqy6m4BpUrq+yy9enrmZnc/cuw+Pcx5mtRpn/mpUf/iDvzchc9d0vt+s9m+fr7pcaLWckDNjP6QhNEWl3aSuTpuiGoY+JR63HQfhBBlz5vjg7ul//j/7Zz/95ccvvvyUcz9w2naD78eimFmJHkAQfJKE2CzqELItdCbcQSrQtqm/uj4zaHIUBQyZC3Eq0RglRnnd3zz7+mxyPDk5mn77Ox/+8Dsm98PNzdX5+Xp9uUPxCsRqkxKMKV1ctiGksfNBSFsliQnJFgozpywpZQBQRaFVBawRRKC/ZY0X2gIUXTeWyMeufufedxbz5cs3N/ODo5PvNP/9z/78ottJYTofMfow9I0uS2tn03rXcd/uOn71X/7+t7789PUaunXYPl2/OV7YV2fd7noYe4PZjF1871sTUwco54FLU9rA48gCabyN1zJAiClzp1Bi0pUtQ983y8nk9HCbBq00knr99sXYtplRKQJFZCwxsNM//uzn/+avD77/0Xv/r3/7kxqLiaPtOKTBH1bl7VqAoHLEkLIIKVcoaxlRk0wn1YWPUZnb0Kkkub2bozMsCjCDAdb5+F4zPSryHLtdPDoolnXx5evrfceidV1M7yzv3FxdFYoy6P3QZ1BMqEqKWtXT4od//MMf/fmP9l1+cP/u5fVKOR0H1hptgV4SkHt7tWFgZ6tAElCiznVTkVOVdr2HW+UKmKtSI8l8Vt8M3bxuAsddF5j0vJ6OkUsjhdI55nlVaoHMXJdNqYLRTgsyMBq168bo81CYKDxmmMQYUy6VjH3gnFCZEOKYQiYqp41iaGPvicU4yZ40TadNF3PONHflvt2V1k6cyUbpDErAR+EBUavE0LeDACiNBDL63W5POeWq0pFh34858bYLMeUhp6oqckxDG2a1qqaFDyEJ9oNPY1RAr86unCZrzGRalRO364fG6mU18Z4Hpk5zzzlDUoU5eefub19eESgf48G8md+ZnA0r59z03kHofGPrnGGzHupmMjs6wpDu2OLsfPPlyzWCdlWRVc4xdRpvdhtlDZZVNa8r63JMxhpp8/Xmxinz4OHDN5f7LnTv3l3COJLWRUldiKuNj156H6vSJaBXN+ss9rQuUcT3OJ1WL7ebkWJuN7asY9S6cIyh3/jSWl2agnB+1Hz0vXdePns7dtLd+NhrMsrnFHIaNR0vFrvVOPZBEoyYY0pZRBRnYhYWYleV9ng2WM3TCjkfGB18L+OolYch4JD+Zntgbi5XCoRFx46/fPmmdfzkaDkv668+6cjMvOflcvbhB0YbxTo8/WqjiqIu9OD95fkueobyIC117WaTR4uY+u2rZy8u18gxowx9/8H8AI/5df+smBxbU3Xu0FYYNq8IUDc0dfk8bro4tr2vynkSFUI623x1PE2LOt2bpfrgCKLt9/b5l768rxbzu9c3N9oMB4+O/vrf/GL/41+qqPu+VbWuJpVEb8gowYDaQx4kBcCJMx89fLzubvqurZxSWncZSWlmuLrZ1CUdLmd92zYx11W52e30tMIEs3nTlHa969NsZlh2MV1tRpBzn3AynaouWfHTogNSOcG23TpbasKJEulXXvvUDRnwbDu8FT/W1I7w+qqrcvdH372zWsfYbx41dXvfjovp+c31+z9899NfnF28HNie/7P/+T/83/5X//z/8l//xe+99/53fu97ry6++u6Du+yHh/cXl3rz8tn+cjd+fdZ+/8N7H7x398e/evr1233nr0YI793Zf/ejw+W9w8++3u/Haiwbj7tu7CYABzHd/f9S9R/BtmVJeibm7kttceTV9+kXWqSqzKwqVBV0VTXQ1gCbNCObbWxy0hySU3JKzsk2GkdgD0gjekAzdsMAsAUIoLTMyszKiIzI0PH0e1ffI7dewp2DGwkaJmd6zjY7a++9fH3/9x+6gLEfNg2IGOeN48E8ftJ4eDHAMPDi1tFBXrj/w//2fz0++3x4/hf/z3/2QfbWt2oXrxZdu1786ptv1Nv1c5bQhQ8/+RCurgLKut6ODf6j331L06ZRIfBKd/VHX3yGGf7ur793MBv/9JNn4Mq69sxS5m6+M8tdoq5LqNZVvxy41KoOIQW82AaFNCtcHeJ8L392vv23/+y/fnXo/pf/i7e//OzFH3/889+883e6Ysgy9Xd++52juzv/1b/8xWWjv3h89dgsD4qMNayG/tHnny37Rb9tJ7Niu6ru3N+NmXq8WKSzJpHJAJARIvXrFhLNcl1vt7H5hqdYVcFktG0HH2WIaZuG+dQlVC+vKo58MOe3Xjucubxb9uVsunr5sm0bba1G/PL5q8moHLxnlDHi3Jh8fwKobKba3gSGCKAVjl05zYuzTb2sksvzxGlnvBeYM1e6IhFS1/mdnZ3zxab3XRQ+2juou36zrimmvXHe+ZQQz8+uYWANOs9GF1dbSdQH3/uUG7v02CN3TX17Mp9Ps6uqqUJkJCR11rQnPlbbbmc2JqMkDtCFye7k979+ilY7gqtVVfshLxxn9ovzpTKQrI1dDEMEo+MwAGnt9MtN9ebOgUXLrNKQWh2QIyoHIcW+HlJfFE5G5mRVrXyYTEpns6H3R7uHJ5fV+ux6PQwl0Zv3Z4NsXlXbTuDr56/ef+uuLvDjV+edpGRJaRfJvLhcvVrULSXPoSiKyWxntVjVnnvmrDBV3T46v+xbun80vkEjteie/buvvX6+qPbu3jYOphDv37v74RePhCKU6nzdYoC5yo9d9sbu7nv3Hv7V409PPK8DOGurqvut99+73C5UqUZi5uP8stJVHApHeUbbYeAsY6c7ZCvc+Ki1SYSJCSgopGGgrutcnh/N5k/W160u50Vexu3e3nSnLIfaO8DXdmYHe9OPXl58cbHso4jQyaazSkGGLpNCG+X1dtmElFhEGSLSQ4TURJaobZpg9v233+zWVRySIn1yuUrGHb120Led6/XLzXaaZ9bQ3alr2jZSFrbDrtU9odU2AFy1sSVljTPGTIyNHSdOQ0zAgOamLln9e6OiG0g4dQEAjDJCbI0tR+MHx0elUkPdCWE5nUCidoigjMmKhPLZB3/9/PIFxASaqkVz1fPb9+5+943bXz85FWFEDCH2PnEiECQAVCpyiolTH6zVwFxFP3vrtYDDarNgAZVbh/Da8fGzly8YOQiXRU5CcUjjHIoiG2uzWCxLaybTfLFuQ9eBgtxpp8su+J1y0jUpBhYBrYy2PkPsu60F0Ch97J1oQQCl+i50q2WEGPrUNG1e2P35eDbZPT19HkIoymxWzDbX17kmIzBsW0c5ITBiTIJRnV9193fNdLyz3viROyzzlLrO96sHt+/U29V0MnMKL68uzy+vpa7vvnF/vZHT84t87BKScHQm33SekHserNZjmznALnroIbdOIRnjksdVW+3szMiY8+ur+XinYz8Mw7gcIaHR7uLiJaCyziEmV+CDd15/5623ttVqbjLfDMpmwUdEIq5JOgi9U4QA+ahEULHuD6YOuvD9e8ftdvv5x79gg/moWNSdZ9gMvSLUCoYQX1ytJ8YGn+quvzudvHdn9vz85M6dnZeX3aYdOCUOsfeenFaBK9+1bT/LXEY00iozMCrs88Xzs7bK2YYkXpEgJB/HhYUEm7W/TOrF0leVVzHeurXbeL0dvE9iLZaF5Xkx3zXAY/NVkj5OM4fKbjbVNg251sjEXjrmz16cfOve8e2DXZ3odLm4il0CvOz6cZbPnAWEddULQjab7IzKoe8iqa4Z4pA4shJsq2/mpiFJYvHIFOWXAwcUBAG4cbyAgIgwg6AAAgFpAAA2iBoAGRhuKJ+b6FkyCrSIAgQQEFFADAIJAdAQifDNUvQp4Y2cWAARJWECToISRQDpG6XNza9AIlACCYAUIgoJAzMqAWSKkCJBIouoJGZGIVJk8Yk9IDOTCP6SSBL4pbYG5MZldONaBpYQsYoAKRVZRjEBxiDCAobAaupDUkiIKIlJiyUgYYfoCAFFYWRhtiiCfUpAZBQYrTBK34ckkqTaKHVYmFS1JogS0URZTr0kFLYKbqgrYeh8ZCIA4ZgUEidh5CgQWUCpPrLGlDvWLo1LlDV0kaUnJYQcIQIR31DlzAIJSIgmfnQnlrfYOrhBkJIES3jjbdFaIt5E8xLd1G+BEIHWRrFiIUIBjILCDAKSRAgJQSEqAIGkNUQkldTgclAT6CuWFgjNzdCRFINBVDIGZUViRBJgQCRiREIEYkUoLCrepB6UUikyJ0AyKkhCYYWgEYcoFlVMAEyU2DIqEIMEIYlhAJVpnCXjEzCARgVaUkoaQROSICYYgSgRp6gkCkIRJaT0zSaZSGdGACVFRMgyhyS5tYqcSoElkfDY6S4kA6SZLjddH8VqlRnV1rVLXDgtgMjiY0wC3odxpo9m5Q4Uz5dt3QRKYbttR5N8b3dUdzGgrup4ONEKeFSknYPJ1x9er65136Fw8n5ApUCxMTZFyTIXJEQfiVRhNGpDinSZhcjchPPFAoh8DM5pQilUctwd7u190baaQIFAlFStbx0fKOTrOqyaddeEO0c7eUhtHLZ1z4Zc4ZKm6f7OIIug+rpuFg3P9N0i43bR3L17/K07D9j7/uJVt1s8vVx/fr06Wcdi1+1m4+8f3FL+S26TWASllKOnJ89dad+Y3CpKEvRKi4Z0vL//O7/zDz/8yx89/fxLazkaTm0gJGedBpUgaWGlKYEYQEqSgYLESUAn4NBcr9aEBMg2U5w4cgLgOLAickmh1yEi+Pji0cXjPzvRiOUoO379+M79u2++JVcn169enrZNEEGT2UXTKgaTGRQUYU5pPM66rsMkioUTE0KClKBLUbRShqDMiyH01iif4rhwow7eOX7YJj67up7M7N6toz//8C8HjPt7kz6EIlE3DLf3Z6OxrvvgYxe9OLJNvflX//rf3H33oWg5e76dGAv1MGwHvwVgQIwH+ybD7vTJcuS11ZAban2S1Pg0jN3IoU4CmbWQgjG0Ox61beTCppQihnmZa1IKoPp8YZXywBADkVNIykCUQBD/7LMP93748Pj4qFp0oepRE5EirX+5CiB0HCInYSXEzANHbXWmkIBFMAbWgiyiCEQUiKYkk7EeTVWSVGLSfaei6fvOpSxEXlaD07kpxtO98cXqpVNWo4khbSu/6ZJCuX1n56qS+bx49NmnM7tTIIWmHbaLTM37yELS9h4VeR+1Am3M4IP3orQ6mE2UQt+FIjcmSEpJJ9I3RYEKB1CijQBSSqUrBbDrOo20NypTN9giq+s2Ahpj4uDjEKptmxjQaEVARHnmNnWtjS7KDCkpzSLCwMqgUhh8YmAmpbSRCKnrRdLF0GhJo1FWKq0iBz9cXZ1MR/mknK5WXddRijIgKi1ZRiFyNURBMsqmyG0MPrElFWKSPpHi2Si7ut72PrYhiNYS0SHsjdzexBZlcbB/++LkRd31VdU5qwLH3Z0iDNJ3g9EamS42TWGNIrVpfReindgUYdMNAXh/f+arPrPmwYPjIe9tUknFdls3l52iPs914dLDgwfny5VWqk8+dr1DnWIKVa0UGGsD+k3wJqmiyDxqZcu2WUrkVHsEvq7Wk/lB6702uvOREuTWJO4vzhcSXGbd1FqlAQyFJgz1erR3nBfFirth8LlWrYTcZGjyZHDVV7Nxnk9GPqZRRtiwc3j96pybwUSTYjIKIYFKcbo3tge5cspfNj4SJ4zMQcr1t74AAJHESURBVCCSeCVB8f7c3dmdo7cR2CfgKBKZ/BCqtrqqMqMNmXGRA1wDwM4kbxuY7pjpYVBTeFUHNCNAHIKvNrXKWGu12IQ2qf56NTua/PZ/8PbXny7fef3o+dfP0uBuf+v1+WvvpWwvbPj2W/f/6T/5vwA09bZ78/CwXlS7R3vDoyfDRXU4LUJKyuSjvb22royZjsbFyfZ0sGq0uys8yLKOCYcmjPMRQnztzu5Rae8ezF9dV7Pdu9M0DssFkQbj+u5kTNher6bapKp3tqj84HLy/RC9aGM94mXfXPt+NCmRdSv440dfKyWz+VgV5WLdrn1crjcpDikka82rTXtnVmbK1uuWfBqPcrtbnm/6p+u28kE2rRa6e+f24euHQxuWdb1hLJWd5DT4SqM44sNJafR4tV4VJRYW2nrtwHTNMCjcPZie9yEM7WhUGGN//vOWdPHFo+tXl88nk6zz8HgdP3xU8yDRjv7FB5v0wV/U/ei0iT/66q8P94oDi59fnG4XC1+UX7y6NkZnLu8kv3i8/OlFtd54lfjvvfP2/Vv0449+sbb2crHZbvBv/6N/6O6M7tyfT37/x7Hxf/N7t997rVzX3TAMi9B8/Kp/9rz61t15zrgy+ge//YPLyyZN3v7qpP0n/7f/63dK+Z//2t3vvP3+v31xuRBQaHd2dy8ur4amPdopM4QP//yTe4ezdQf7Th8UKi2Gf/5Xj14EMxk5C8GWpvb92ZNXzaQ42h+//fZ3f/rR03rdljM6767/p3/rN+7Oir/40w+//Z2HTx69uLw+MU29N8/zhG0H6NT5Zad7fH0+evro5ODwvZxD7cPZsvqD3/tRSglAjt45nhbyu7/xvQ+/OhmGk03rr5t+uxmshom1F48v7xzvgigcFQseoFUuH+++fqwm9YuffTbJi5ZTm3jdx8urym/67JfPAqMwz83Qd1qDVmIB5wbKXN0az89WW278dbsZ7+sd5zIN3z7c/+qU2z7lk4IgxBDR6cYHQzh2BgKHJE0bmUErtW3q3fnOSLlXy0UdkjLGakgsPoUUxavBJAypN664WK6vhnaSaxJYXS/6IWhS08K1PjV+iEFEEWZ2UXdaRDNYozHJfF5qA53W8/n+UTbWUVTDu2iGEEggc3qxXAjTaKcApfpt5yZl6DYvTjvQKgET0tVm8FoNHTjSzTCURLORG83L4z338mx9uYyrfkgRn61a/4YdFwBiJEK3qYWHYjpWJkBKlgA5fnlxsgW2RkM/dLAZF8Xj08svzy7b2JXW7ojebBbvvH372z9490d//EFIYb0ZUnTf+dZrH3/61DtTgaLROMcQiHno5rlzqGPTKgVN3YTAm+hLFHJ2cbXms/Zbd47Ix7d35+21/Cd//2//6f/w+0e7O7mTnTytm5VPqQqBEoomm+VXm0ApjkA+ffLk7aPd48Bnrm+H6Et1e1T+wU+/mBzMnIhjeHcyXi7Sr7z/+tPnF0O1NjTOXG6MSilt11uTWzJ22/UQh4nNrcU8y1CBtrDrs1XNj05WD3fKWaGhr51SLIScdvLR947vzk3xfLO57vvKx3GpRxk5gl95++2ffvG4sNr7YJRlTpoQEEIccsJfuXv7jb1727ZuQc7Or2eHk5188vhi6ZgS+9vzUWml6WMXwvWi7TmR5oJcEnEWLzerHuCN0W5RjoahNyzNtjNE22pAVC7To8z2bWud+fdGRcrozqfEFBUNQyKh24e3X7//sO0rPwiCzrTLbCYgQ5f0ZJwy95M/+v3Fy8c61xZRhJQdBzZPTy6/9WD6g3f3f/7o2kdou5hbm9LQhqSUMlaHPkgCJENKgUq78+mt49mPfvGRUcBATR8nebFZ1b5Pde9tZkWR9NGh/KPf/vv70zf+1R//YRpql+mtT3XXuKjBqgQgoCejoquH6KP3HozOslynvO873/vRaDLN9dX1Yn8yJmsj620YtNX7h/Onz17Q2GxiqBdLXCxj3yJDGcPF9dp7rxV2Xfr08ck7D954eOeN6+25Bf/gvWOpRPH2/tH+OjenV1sptZvvQ5qbzCHL8rzmWDddKDPTgwg0m6qzWU4C/eCTj8KolRIQn6REG4EUKZV0jNhDYBjmO8dO5R2mJib0vDueA1CUNJvvAJGIVN02SESAosg4xhTj3sEOe+CRfvtgJ3z84baDhCYKb3wd/cAAkoAHUgRK9SXF3ziaHR3vv3h2eb5YdYCi1EXVtElAkQeGwGNt/ZAWYSiyUFDaH2Gmhq8uX7CCZ1dLwEwrZa14RImSK5qPinVVtSGKMlFhtd1mxo2nxbq6cHulCol9PNgfYYyhDXuO8jwzI/f0ZNMmTNplZfmi8c3psjBQFLb3KW0bQ0ni1jn71sPjoU+Xi8YrIGtGlCurYZDXjqePVk3r6aPnF0fL5va4eO/enU+ePd+G2As4guvrZZYZQSMC16uaBQqrQ4h9GwhQUhKWrCi+oYqSENxs3wEB4cahcxMdg28kMiiU4MY7Q0okR9BEBACIjBQFUSQKJyQLoAi1QvylgkcEQYBBRDAJW4WEqBBEIaLANw33GBIopUQkETIAC1gNpOiXEycAYI2AN7pkAhEhAEBJiCiECSkmTUBACTARhIh8415mEcF/p1y6mRIB3LR13YBCyICBsRfsIR2znpYuS2rTD20CBgzCqMQgexZnFCpRKBmBBTGEhEoAExNK1ESOKCVQgMZQIBKlrKgmpTRsn72oAzOExADj3GWKInLvOwlCRD6CZwCtIgdIbICQIQmwgDAjkgAAAiPbsaZc0FAu0Qfumf2gjVJEcqP1ISIC9Ar6ojt8X0YPRZdgEClKFBgh9IN0Q1IMKUJCYSOgQBPflMlhEqWYMAEmVDGmEBg8AyMAAQFYARTQoFUEZiBUiIAKUgkwAt+z3KjPFSsHheERmVhJ7MGHm/YxfRM/I5Tc6ciRESIAGhqiJ4NabrAFLaAIkESlFFCEiFCSQcqUkiSMwAii9RooBjFAgqAImxAVKUhhROhCGiPnALkyCMDaDAxD5ITQxRDTNxm0IcFIax9FAueKJAZSaICcJgG3jj5KBE55boVlVJTPT1sHnAFOAHVCDghGk3AmICic2CmCmHLgeZYtkbfEfZIydzEkAzw2GBDqJrQt1t3lrTmdvFr1W9AxV5CEJISggIYhZRiRwJKhwKgIBKw17RCMUs0QWpbgfW51WZqNJ6epr/tVu3Vj8/TROteKQ0oCCjSSNVZC1+YFLqqawA2DOSiPxC976PWY794dxSs2U1q+qJrke0ZG/NnTk/s744PdY3TFi8vLN8rp7uHo58+ff/riNIB3Cnw7fP3o6Tuj3ZEZZiOzHfqh6vrFItNqSLxaVLgVs61ff/jW2M7e++6v/n//4F8P28bcdDD1HShlnZGIkoIPIQFGQaPUyBaGlA/NtmpH02lpqW27m3OXIQwgrJ0eZ8Vqu5XIkDhi2nejATh0SaLNdJEktpv05UenX9qz1944fuf9t+6/9bCq+vNX16Kgq9vtdc0gQxOTT6AgrreSQAFOxnYYQuwSgUohSuAAHq3ptRKBmIbUyf7e/V9593utLJWkWVHUWv23H/5suVxGH7MYOHmdaE5qynqGxe0deHK5SkpN5qO+Ic7GnSq/fvrzpt1OJ2WZ2K+GjAxnBNbMjvZWdWyqmJOGFNuhDqF3wrlxKqGAR+CU0ihzKbRh2HJUbKE8mObTclTa509PFttLElLA1iqCYpztdt3C+4g6xcQ/+/qsv67/9neP42lrAViRno72pqMvAQCgqpJlpZUG9uJjrnUYuuSjRQeeJYqhb+5C1qoQlQbdh/bw9p2j/eznj5/EdXeoR5u665phPjm4Xm2odWIVlXJRn+2OC94kVurp4jLL3e44/Yd/74c//ZOPmsvK7T3MihwMPH929ebtnXmaVWsiQQJFrDJrW+6dyRiMyKCVsqQmZLvaM2O76lhU9L7QOHJ6kmd1SHXXC1ACuDFWFa4Q5RJLHDgGiAn6jjnJAJ2zNjc6CPcp5bklrRd9F6rBgBoxp+AhcuFcAgjekyIKnGIUZmEJgTFxrh2D6fpWfHBZ1veJtAt9qJbtatNdLRurTD1w8omMme04Qup8XDftdFLMyny53g4+KquZoE5p2EQAcBkkRNJUoAFnh7afTfLDeTa2DgJUF9c7Nj+ezv1eHPr+uq4sqgFCyGxtdd12tVZXgblrBUlPnHJ50zdoaZ38pFSWqEvhq+tndqSzQo4Py1efV+R13XlH6nf/5q/+9Bef1NsBQCli9gMmCUPo/ZCPJhOaWRW3WllRUvlUhY2LB/u7i+U1JyzLufC4sJMHh9QNbbXqQ5+EosusM7MkISUhImuzeqiREzFsritwY6O1D4FERjrHCAaTRjjaO9i59/APf/qjzCqb9NHBToi+XW7Hozn3HIZ1DJASjHbyJCxWS0EtymYYOIlYLG6NpjvleJ6XExPXW+qkXtUEzoxd6BNJCn5oLytqxDhFGbbpG6qonNrnX502vs8TjkoVl00pVpHDkbz7zvHpRX1+3T15Ee4cqHu3dl88u7g6S/Vq6C5P4hAnxeTsmWpx/PDXXtcq/bN/+c+vlovS6HFZKhSDjKGvFisVlBqorcLQx+7q63mpS9Lj+dEGELwvnJQFQDDLi5Ur3Xpo9o7LYu5YjV9Vgz08Dlisu8XJ6vO9/WQlL3kYzq73M5X6/rJK41KCCAzBe+ma0OGwikFYBeNWHrdNyJC7oQ8i/qoJQYxVQMQpKgJJ0rR9ptWy6VVsj/enb75+67Nnl189O2uSiSkNaQBASdwMXZ9uv37/TTuxJ9eb+/df980i9Qat3xsVL79+2W5rCvHW/kGhAhE6g+fXW5tlt4tbRzn/5fmmMK6t7GfXrS7kohoCGedT3QUkpQg1cxOHgaOgHk/duo89G+9VR/rrLy+HIelCtWIyrSn4l+thVCi56rohWRL54smvr7Omy9ue3MDRQ1edd5vsD/7VH+zW8tq+bvny1TrrhY/G+OZxabq6HODb3xorKc/tdHTvdd7TLd793mHx0QWs2rW7dQ/tyYO9fD+lIeW/9d3v/Js/+ZMu8ahLe4by/Z3n1+3Y2JnI3/n2t3/0yc8Xg/GUfX3d3pvYEco06eXJ2rOEs4bXP35wMP/u7/xKo8zv//mTD392+fNufTzL/ub33/7V9+4ths2ff/CJqU/aV6e//mvfv/Xa0dVV//z5BU/3H/38+Ztv/s3qxdeXZx++e2fc190w+Ktl/CKOqkszuz35je89cJF//ItHRLqLPDJ2WrpRls93sqrq9m7tiYXHTxb5NC925CCbLJ9OtsuQLOAtZW9rKbxUaXn+TQyTJG63rTMaIu9OxxniNHcXL6ujO0e1C/Xgx4ac05ZUtaxCE4/IpV06PJo/v6qeL85n83x2PN8sthDp9sHBydVVZPGcMtS7xQTbsFa+Sqkb/O2j8mCcX11UxuXlGKpVm2tyLFnhYs6y9tGoEGMBIgjKQAvy5cXVvMgNQ7LUq3h8ew+qttt6QIgSv/3O7Q8/+3zP7JCLMxRlsCwzZYvrFy/GJb15f//JV1dR4njsLNgyy2cT40N89eqq8vDe/UMjYZKXmKnNtoscRKRdd4agU3Hr4K3jo//st7/9V598+emry8ViO8S22J2GhoXJWEiJJVVts7Ya0OR976/XTXKKRTCkert88Nbus+XJuusTMzpsu3AvH+/YHbvpZ+ySyRevesbh7d3y27/67S8vLl+GdH66jI6K+dQE7fskaJqmFQUxiFKKiQ729uc7d754dYUDfvbo1Xv3b11fLMcG/vj3/q0O0anRy6vtK+21ElDOOjLQ7yiXF+VZ21ZJXKaeLJptinf2y9B0xPjgeOerVy+syxXqZt2MFI9mo/v376ZNd5CXntW0GD+93IznpSHqI5fa+thz6KuuG9q0m+eIkR1OnXl9f7zqm7XA2UDZ9XBv7CDFzRBTSNhtrDVvHs4fHsw+ePLy86sKB2Al1033pz/7pLT03dd2r2p/vewwt6tNFb1/4/buu7f2d7Ps5x99tmpaWzqw5PpM50pFXp9uu3otO9tbs2KqsQr25Do0Usyn403fRQkRAMtc+7ja9tPCQYopkkGtFAWFKMBM2uXcdvTLduRvRkVVNShrSYFRONamLCc7s3HbrpTRyllMpIDCtk8Mt+4fRRP+6f/r/wHBjzJDitptZ8ba5CaKWlf9o7Ptm3fn7zw8PDldEHjRyRMqVD2g75NCmI0toO67MJsV+1N3eXZiEx7tT19drKbaIqeq3kZho/XevKxWnTH69Tu3d6eHT69WbIrQDakflIT90TgOaZAUI8uQWu9LZxSK0pQwhbYCxJHC6ExKvhmSGY8k0+fX16AUWJWLun626Bd1uTuTlIYhjib5dDyul3Xdh2012Fz5wGLdhtSC68lU//Cdb3325ae3C358tlhCysrozHhmYbVpWtOLpMVlf321yZ3Ls1RtKmdtF/rnJ5ej0bTvJcYoAiTYVRUpIcLc5jGxTzEKElKRO0zRlUXb1E75ucuUta7MU4h+8CkBgur7MAwenVYmty7LR5O+aYzLJREo0pOxJrN/fHj+yRPUJSgc+gRKZ06ngaMAR8mF/86vvD/K6Y/+4rNtUmpaNN4L6QG45zjKs0LboWr2Rvpi7W2mg+Gjndlmc11hhwoM6AA0LvJNaBKLkDQhABUxik8iqCOigthU/Wt3Zn2/fbq4RtKOpVQau+GgNJBlBnC56qpNw0FikNzYuqrLoli3Qc1K41SIQxzCqLAmc6/OL++/duf00fOrxG035NoYpUOU5brOC+VIPKkupLO6MxpnIEc7s6L3F02fIWLmhiEkjXlmGKHyg1YCgAiQF1mfokIk+mYxIOKN0RgRiG/qv0QAFIFSoLRihhSBb5TBgBo4I8qIhKVnicJwY5tGUoQaRIHcwCkoIIwIgswaMAlrREpiSTHDjWL+l64cAQbNwAIgIghAohVaEgKIIRrSBOCFATEKJxBFggKJMTJBooKTRjEgzBwEOxEQsYSSgBGS3Mwu/p3IGX7pgv5GwAwkiEIKjbF9EEeca2W1WyYfUQhJE5NGwzeXhFEEGIGQRQ8JiDQqJNHMTEKWhBCj9ybLtFFakFgkeGHIlNaKomBKFAm1UqXLGEI/+MSUAFnECyJqEGFBBAQCRZQAILEIjwozG1NviBSqMSLyylMXAUUcAt1AO0AMwGM/f5PnrwNYwIQBiSTdXDfSjZQagZGBk4BxAAkYRJmbmrmErCJLEkkeJIAkEATQMDDc8GMMrAUEMLISdAkS6yQj4Ro4sNJSjGCWYdhgu+WhR0iSBLRBRZgEhEGAbxRGVQBjdOra0tkkwVjthwAYmxgFgFEMEgAwAgom8YagRRHEwAIoSulViIYUJXAoGSg9xFLpzMddN0KURNwKdin1AQdhQQkgSKRzgmYAACJV90IayRiDyWpypKMPMVEb08YHVrA3Ki2IRGl8FJRMoUUSDizkshJM7ij2fVtazUhtjENMl1eNw/b1493ni+U5DslRDLxZd6My68R7FXFSHs8mw3ozM/uU08bTptoICTCElLJMZYqT8LQoY1DbbhhCiiGBqKtNH4iTQgPx9s7suuqSvwHy8Ma1nqPLTd95BkGjdeNxOwQMPWR4cGAWL/x6eT2Q0oYighFaXNWuww8//TJwnEwsej8alaniUXl8fOf2ycnF4e2Dq/X5zy5evdxULXMKkRC1stebJurkdt16s0Bj3CgvRsXJi2fl3sF227/75j2QX/3rJ6dP1vWnf/hKIChioxW3rMiKMUKEihMwKby5yRikXHHT1WFgZx0JNF0bEUQb0kqD6X2YFGUaongQUBGiNYg5QR9i9MAoiVFEA1pQwvTss7Pnjy9vPdw9vHUwOdhTzmalCoMPIW1XXWzaZtG0TUuYJKW2iQKECrRWmVZBxcQiIJFlOh+Prdy9f6+AyeX2MmWIBq3pX11cxP4qczECI4FzygpYVD3F5/WWV5w8W6HFxXIyLpwzH37854tNzVbXiyvDyWj14Phg1fRbiIvFJktUugJFYooKtVMWQYGIkBbxPgYAGWJCVNu6iUmPxqOsMK6wn3/9dHG5aethd+eA7RA4BIY+tSFJP3gYJBEKqsuYGnQjZ9+5O368WRdTu7xafnOSbIhY94NH4vnupG7rCFGTaZqWAYyynFJMmx+8d4+r+OxlFzm9+XBy5zb+65/8fH93XmqjUXjwKURJ3FdNqc0oN0pF33Nu1OW2ftws8mn+2u1JMUpfPn2cCCa75TZ1780PF49eCm80lkxQd32WWSHOMtWEjqwOnKzSAOyctkDBh6aLTUwAYowGwZBw2/hqE9uYvIH9w4O9Udl3/gaxNMrjwEMKRTlq6m5vNu+bJoK2pdNIbdcbTcixbgMrGYK3yk6KwntPQJISEmlrhmFQSmlDBlBEmDkJ9yE1IXR+yBTWvd+ZTvLxqK5qQQiE151XFDlxIQRJ2k0ApMrHPoUiYtsMoffC2MUYIPWRm9ZbbYOE2SyPXUOMiWU6LnWmrRuZzPR1LzF5RcPQDyFleW4RF5v1pgublPr1ZlIU46PD01enU2t83zmA0PXjsc4n2SCxi0NGenWxcqJHWuEwXD4dQg8JGRzevrP/+z/+K/aotUkxxDBoS207gJBWlgDaZlVLPSQus3EYQtNF73pX2Nn+bNW1l9u2VLYNlSHWWYZeNaEFQh8iKE0YmKMfgBQqIeHAvl8uYihkXJYSwWo7nRTLbRWHPoYYlEpd//d/8zdZ/KtPn9ZD6kPSHBVw3dQdCTqFiNFhMmgIrusqOj66V9y9fUtNlMtNtdhUy+325dBXHUSVPE7GusiMlzjiVFVdWqeDrNidjNepHqK/WQVPXyyyoiyUetqs3EaG1JecNO7CxG2ueOf24XWIDlU+GZe4VPOZMru3jlwJ/tXXLzNnC6e/+ODj6y5cvHxRBp6X+dWyqrq6326++9prfbs1IyeQAofNqpIgMUrOLrLyGzsp8oP5zouqV4W5RWWsU4+9zaHIU+M9+xg3QS8qE7c5xDs7Rbh41iZAlNyJkN12eNmmoCORVOsBrX7n9oOXV2dFhIe3j764vAwso8xwihIFBXJjTRJE7oeAKAgIwFmmrbJ1YJOXS4AfP7u6WPZ9lKFpikJrLd4zKQwxPH/+4tHXz8rMGG2uX550PsYEZpSXGvYoHOT5uCglhs12UE5trzY92q6H9dNXIGHfYkrBFWXUalG3VWKPynESQ6DAOiMpcResciBiDI2AFPLQ9yvWZWnzQpPCumHvB6vNpNStj0MfQWEU+eOXp+vi7p27b54+enSs1UjBJ5+eXny80uC++96Oqc9iqJ+ftr0qzxJ99MFiseJLnf/BJ4s39qzdmyyer8rD+2fLej7h3QPzl39x8v/+vVrRRIZeDd0bt/Jq8fjocHpYHNw/2L24uNzGOJ6Xv/333v9bUxle1h988bKZTQfl9/bLg2n+j3/t73/41z8Oul9jPy6nzXXTN0nxR9/74Vv/4fe/9dEHL9n09ar67/7lf/f+D94//v6v/6Pf+B+N5Oz60w+efvbo0xfnhRvVgV999YxHxZ9++fw//p/957/+n/+vnv7Fv/pv/k//9/uH47fv7cx37m9Wi9//8QdHT8tdO/7Be3eXdffa3cOmasrSIgj2YWowra5XXTTe5JKuvz6d3D344T9+7/Gnq3yHjn5zb7F+vOe6NA5X6pvup67xRa73jZ3vzlSpfvHk4sNn29Lo8ycv0dG0KMaz6ZPz9d2d8eF8ZPPY927gJJXcctOj18e977oq9skNAz/3l30KnbIDUd3093enQ90KJaNhtjfKObbrBjy3te8z7Ad/NJ2+drD79dMzb3WuVFt1pVVHe+Plul3VXaewF7wzmYd6s1jXSaRvPbf90EcfJSvt85PV3Mzr8+XxyBk2o50xaZUB783Gr/r2ZL3eG5vv7N+K5F89WxZWVw0vlxuXZblWvee9cpwRjicKJ+MXi2WmUiryIYRNG568HB7x+V99/Nwpm7S+9/Cw5rRs21xlCQPEmDgO9ZBAvIJo6bxpGoII6WBebq+3d27tTQqTIJABYRWVSowHB3vioW6GaTnZPdjptvT49NmzZ+eLzPzwe/ffSfavOx1N+Pr06rVb+yTRD3FbDX7gwmWaqKu7wzvFl88+NSg+QK0S5HZyNPvsy5cbL3/rB9/bvXt8+mc/uji9+Nb7r7vkuy70VXv3eHLVDApSVfnNkKw2J9c9Bx1E50VWM0atR7Ox06is3SlLzxLEVNtt0pknk5oGVLpu+sNpaTKjtMHkPevADlF88sDhalPJbLpTlm/sZc+28aTz10B57A4zv19MF+sQWIE2HEKB+Ftv3RmV9fPlMibuojDgbllapdMQYuv7qptq9Z1vvVlq8+jri09C8FFMNnrtzu2fP/3Kn67vv3bMqbNoR/P55TBsrtkhlgXNp6NZgDB0gVGL9ha6xCKqY46bphSymu4c7V1v1nXb58ogw3bbxIQY+N8bFe3uz/vAxujdW7vdplXgtn23r8rxqCQqtIV22+VU3Hpw5+nTT//1H/8LjcbaQhgRdFbmTLoZ4iiDcl5sgD54ufn2a7dv374TT04E4uT2zrZP2yDX9VC1tYQwH02VzroQX12t56PjENO2aYus6AMH5mXVA6ApXB1lbz759r07Wu998MVlWw0H+wer0IikEGGUZdu+zoscSFb10jpiIDKIIEWRZVqtr9dBUtcHFsnyjEM/BNKFWlT1neNb94/2f/HXHyRlX1zU2rmul5inoGBbt9loZOd5XwcmisAdhCtfPTzcfdltaHL3VdWow+nTk/rRR/1svPvOXjkv7U9evWy7upzsJ5ZtV7335kN77+Gf/PivdI6bbfCMvunyIiudk5jyMhtC1EZFTkFJ54NJpBUxGEFBoAjREIBwu11rBYEZFTm0hMAoYshmRhtFoNqqt9aS1i4rm67X2gw13D1866svT9dDajjWIRaZ84jEpJiPdmY/fPv2xdXizz9fKpNtvR/avu4brY0IKUJJKVPqaHe2Wq4nuWtSGiA9Wl5NHczLkXP2/LpSWld1430EIUrx/mz02uEMAgcPWZYro+faqNDvHM4a5+tBDtzo9g4tm6onWPv0eD3MTKaSXafheKK+92D2ky8usj2dUpqMssw64xR4JsbprNzZn3/w9OW7d97sLroPV+fBZT5FwX5A9hQlJBmgzM1o7rptv+z6qu0OdyfGkAYZhmG3MNPDva/OLsMwDBGjiaWG6WzSdU3V9Skm0Kr5peNdSEhEKRIBBgEERlIkpIGIUBESKBBiiUkU3PSN4c20JQH2QgKoEEChIVRKjEEgFEQS4G+wIUIiZBQEBZJSAkAAhcj4jWtaECBGYQRARBKjMSOCFIFQaZVuPNIIjAQEAZm1ScTASpgsS05iiSxBjRBFQRSlGAVQKc+chPkbquib8RgAwU0DFydnsHRqZjUJAWIfoxfOUO2McjF44X0nSEg5MKIQkoAYMgwYBVAQRIVEiclHIdKIYkCswdKR1ShIHKJhsQ6jT5iSBopMtQ814wgwR4iJQQGwKKUjQkoISRiAkUkr48QqGBICqxS9zjmZiKWBlDBiOVESYh+Ae+UZVVJaYUwilEbHuHtPs0Ri1IhMogCCQD/AtoXgCSOlJICgLRgBRyAO+h6SAyZpBx+iGphST5AMCST2YIUMDACosRdG8YQklJg5KIqGYQbiBVQqUSuAdgNxo/rhBvzyiUQRAUJikRuyTGMiCQAYpSTjB0jaqAQWIJEySpgUfTM2oyiCVq+RA/KgAEFZBgVovNgkSicDbBgyMnKDoCh7HqFD6hijAkQUSUoEAdASKAwYbv4Rvu9ysuU4F42KY0TlRQVIjU9B0gDSi5IhHufZaOoWVZ+UNopLYzsPwCxWs1VN20PSbXDbdosImTbbTqphuFWu33p7usPuq5cXyQqMEsyTpva4NNEPZxWmeg/M3mJ1PUjvcjUkZhGl1e6sKJUOkYcgfYpdigxiUBlDlQ8uV7cORn/7/Tf+6EefVV3fRmw9KEk6U6OZNhbBTJ8/ujY608bBmPR9HVrwm83uXra6DtJmNccQgrWKRVXdsKk7tIVGdf/ALTdDG401ervx59fb0dg2lL44e3F9tQSjAekmmpiCqIw/ePH5WNkgrJNo5V5eXu0f33/tve/nHO5Pw+eff7beLjdoI+kyw86nfvCYRClqhjYkaxmbISCpwCCEdWyhH4AICUmruu+tpihKErIIsSqMgsAU48jaNoZ8lNuJblJnpyov9DBI8kwCkBiAMzQILGAuny+vn10ZRzp35Aw6MykLRe5o/8AdT6bTKYp0oV8trl88O6k218EHUQoUGGtjhIR63cLO6N7h/EG1XiDJZHevSquf/OKzs1cXSCwaSJLLXWa0ckprvdy0257mqiwdAcj9o/2DWenT2izraYbLakhREqp+gE0+vPb23RcX574LcdOLKN+z1sb3HFMyWhBAokcQY3JQKgLneTF0ETLavbczmY+rMFycX/oO8lFxtmpGeXn37vuLxdPUtzV70oQgLIAgy7r70SfPdjOVd9Wd6fjo7Xsfb7/6JowcomewGUFGdlaA5uipyOzQ+8DMnMhIkROjbzgkTsMmTB5Mz6+XtrTzQyoZtqsuMmGWwRiHLAYM03HWpAYZLprhOqZI9Pr9OUl/ft5dnDbzYhJ6f3m2/uoXp/t7I0O3dk355fV519O4RE6eAZvQi1itTN8FlSLmZlqqzWaIpFhh4sSQUCA1XIDyXYBMdZYu2rZBiT76JoU+9iEk77XVGKt5plfR48C5MYttv/XeEGQoTng2GmVlWVfk0MjgM0VaGUEKKbYhxJQ0C7Kkm/BjSlXT9Qwhpggyyh1LvxO6uI2FoxAxAvQibdUUSk1zC0omk/HL61WI6LJsOplKU2mtmrYPgG3yLGBRGQFFNB/NOc+2VQ2Ik+nIKqqGuK2GDElZrNt+Vbc30WhVZGcMbNw2tA1z6n232OZFNt/J9yYPf/bxF4P3yqqJjy5Tcehbkx483EvCqGGo/GLdO52bUnPET09fjJky0Rhj13WZproaNtthWk6dQiWx9sNlV+/Od7dVPyuzAF3v/fl6fX+kutjmkyxjuLi4Op7PnS2qoTNExTS/WG+VMXs7exdnS4CEkSdFnkgbxBSxDrHUqHObmnZZbX1Ibe+1s5s+1mcX74xfX18slcQhhZ1ZUbUVm0FGaVAhOr57bz+f5hC8BY913D0uVCuU+pdfXW3qJFtPSRMoUkYSISZBCDGonKqLZdsGwGhdaGW9GZoUvvFTyMChDq/Oq8UxZAN97723nn36NVoi4ysVmqsmMooKn3/6+M63sndfm1xsYdH13oeD470yN20X33jjgAroAdPA27prQg+Yeci/Wg/HhXt5dlWSUdZA6arrdWq7UZZ3NcYq6kLt33nNWne92hLjeDa7XJ6Mx7Pf+rV3f/HxH45bpRs8vXypORYGylLyrOz9EFLMXf7oZL1JNBhsCTRSF2MS/mRxlnxrlH22WKQYmxhDDKPM5tYQgNbax9j0iUS0VRpQlDG5jSmFKEMYtqctaIokewez/XL6848/I60RCBVFEQRMBANw1zWAShgio69Tw3H/9u7xwXxsVGiTyspXm22bUtNHg8q3nbopBpFUj4vCaephfzQ52Wy7kEII+ci2LE6pCN5ZO8ntydmCNSoEBrXtB++Hven0fN0ASYi4rlrPfMO8c+SeOSr3Z5+fwy/Opij+1uTv/ObrfcJHLytq4vzR6dsz//33jl++XJ3W8fGzrlnHJuBaa30Z6fL6vW/bDlLoe8A7Z6vzj//qJ2k7PHnU/OZ3vo1Z+dGXnz07Xc/nst16CFBbPlk0Vxy/+/4b+4h+VU007o5Gdc9d6sbzfD3ED1492RC/vjP+j379O//lP/03bzx4e1mnzz+56mvfpk9frervfufw8mnvxP7ik2d//fkZ7x5MHPTPlhDiuqrPTh5LTFluVvVVXa3+y+7lOqX/yTtvvvXumy8fPfc+fd08c5ZTrrdN0N12fjz2pPyQRiMXYmyuO2Wsc7ReDdfbfjOk2+UeTfJn59dhboY7enLHXTcbm7jfbnnRZnbvZhUUZXH71uwHD44/fHr54Vcvu1Yz2em82BnJoxfXVdv7IU4nu59eLS7b+q1bB8bgmMwQeYyARJCNjM43/aCAfKzPN8s1YqttVhITrDn0ngchB2KdToOypVv0XWrj7dG478PldpuNHfVpZz5aQipdtl0NXdMj8iTPCxKNeDFU2piJMZtVlWmdl4aYE8vVss5IHeyNH+7mI0dOUzf40upSi4+p7mBCrm26fvAjMopg0fs2KVCq8oOsKpusUKgGQI1k8LCcjMq9L5+fCkqUWG0G4cA8gILG9/mtNMKZxbS4aiaj0ago2etlc7Xsms6ks7ZpYlKi1lU/ym3mzGpTX3e9dVZIR9A7uXrv4X5/Ua27WjCmYXlVN7fujxfU//Rks/jsBTIcPbhX15tvzyapicvl2tjs1vF+Et40IVTdVBX3b7318xdPY+TEYp299J0xqGbqP/1P/8GXH37y9OPHxzP7qw9/8Gq5GrvJ4dhM7xw8f7G42nZNTNkoU9oUIwcbrFM/G9lvvX30p7840UaalFZdnCrIxjYNbEpwZvR403YxvHM4Gtbh1WJdFDpHToGNlb2ZnRY4LccnL86u+1AJhasGROXO7BdpZdKKurYNHcAD13nVKOPsWBU2110ymXEcxk5dNl6cqqNsY/bk0boLkYDfv7Xzd99/4y8/+fqL61WELMtMshDQ/+LyrFe4XV6/897du/dmL5+t5tNcWC2roRLwUSakFOI4g93CPDw4/Leffi2AlLg0FlK8dzBrYlq2TdUPoI1JSSGAIDMcH8+evrj6/4+KQKmRzUs3omQ1BiLnspkrxiIqxj5wlCgPHh5/9vjT//a//+fjWWa1yo0tna3bJopwSIoAOWnQTZXY4B/94uW3bx/sT3aaahUIRg5Io0acFtZZs25C1UUgyHLnxuXwImow947uPD59BYqUwgQwLvNC6wcH+8VodH29pSzbmdHp+iSlxhK1IV4tFqR08n1KiSD1g+cwWK2NIZ9C3bZNNzAyIgBIx37ddMnm82lWlO7yfLE/nmXFbLXYjkcjVOBDmJo4K/QP/tbbf/nXz8IASmSyU5qxi+3mh6/dv7qqp/t7eTb7/T/9c1fi/m5eqPzl9Wl4cO9gb/y27FdNsQ2QjUxuVXt9Vcv4aH9nOSxVgjp041kWvKQoy6qdjxwp6npvrCiCwlglKIp84m7oCwXzssidruuuLJ1PMQGWzpWuXFcbbbTLTdv1GrTLnMlHyqhttdV+yF2mFbmZ3bGO2WvMSq0JpW+GPFPHRv3WD17voD+/eOqjji7rZdhA8jEyy9B2TlmLioDqwbfD4BNg7zNDPAgqZXLnB+q6iNoxqH4YwIf9wu6Q3SncjgCEMBvlL6+bkbOHNs+K3AbctNWv3rsb6/TgqHh6xef1Wmw6nDHHXmfjqsbQx+Xji4CyWSwLM7LKDn1cVo22ZHL78vRilI8uLtrTV5vpbJo5FQg4pbrvlcV5YXd05qbZ6dBsB+8EOUEVOS5qxZFZSmtKl40Lezwpqq6rOKYUrzdhO4RxVjqSANK0ncJvTg+0IhREEAH45WfSiPoGT2BBQWQhEIM3sSNAkcicWCIAE4IACtwUBmmFRIiEwowCiW+SbZAEEgAIRmFLoElxiAkYAAiARAgREW6ayRDQgGgCRB2FWSQxppsysMRIYhQig0IlrChihqCYrdKAMCSOIIaEGQILgCREuCl8B/glVAQEgJAsyrjQO6VVqArzjZKsj6nv00021iqVs2ojR02AxDFZhSQkqCNgw8mBKEksMQikBEkQEEEkc5SsjIAyg5m1YHgYQsfS+1AqGxF9SoDKR1ISjCLQ2goPnFBAAYCIAkaNxqXRBJXCTQPtwOOxnk/tOmwplRQRkoBCN4Z8J3WXAEELMIKxCqSQ2SFryxGRkRQlisADdB7aCvrOUgQFSSAhgzSQWLUheVKikLIIPeSZQIrRI3gQnzQTMSWfhEAyEHsjnUqsWKIggxLULMEDaDAjyDtIDfa1pIikATGREktw043GSYDAEKBg6IESkmDwIZINQYXgC60SxwwpCZI2A8TAEhMoFkKDEilFJ6lkyIEKRVbpIQaFKteqCZGU9iC1SKskGjWkACAkySjQN7lHjZiDdgCXAAC5NSOrKCaRpABT4k3oDIjLnCR2nCSwJVXVQ9v1lY9OUe9ThKhQjYssYkAQpWOZTz682iiQKeDeZDw0HaNc94lO1pOZmTsFY3ter181q+kEdu1gIlRYmGLy7HypIBklNnLsozVqbOgoN20XUoKuT56TtTrGGENIMZKKP3zvLiX6t3/xi4tVi0ojxsSAhIjUeZ800EgVYx17BRhGBvKUOPXc+BfrweWjVhiQtFHOKEcyDO3Bfna9TU0fXg1ek2yX68PR/t6OyfPBTuPv/eVPdcTE6EBpBQI4n02uFlsTxWkkwt35JHRJEkev96f39rOjYXXy+U8/GU/KiMkZgpTYy407DBBC1xvg0mWbpr1hGwkEhbU1EpPWyJBiGHJnFLAlEZCQEghIFGZJKaLG3IiFKHVKgwdny6zs6hqBEnNMggopBqtRK7HOLpfb5IFrj4YA9QCLrumfI9jMOpcdHB4+eOetX/n+93/rN3+9ujj72U8+eHFycbVaWFeEhJqxrcO1qpfjSlO8/drBo6uTTz/9oq2afJqlwfvWxxTFmroW34EzgKBHZakSJh+KTB/MZ5vN8usXL0JMzRBQlCIavCiB5WLd1pvxbH4w3V/Vl13bMzOSlcgIgkDRRw3onBatI0uI0GJMHHcmpStdlPDpz7/otz4mxdJqJE7d2fnnBlPybe6sTxFFYoiKI0XcNGGnyB4e7MZMv3x+Zr7ZI4MtyDh9NM9HI6NjlzyoKJSERciRAigL3puOgx9iAnHazuFgf/Jy3eZYTjHrrjaOxhebbTkfXzdbL6wUb6oBUIFQX2PO7qjM06I9u1x70aN83LaDJYyRvnhytel29nfGH3+yWFynLM9iTNYAEBPpkCgKaSSLN3WfbYRErkidn4zyqu51JIppmtuo1FnXSa5IZRfLZWb1DUqZwiAATqMDPbOucHaw/bZuNykiQm7UTlnsuXzVdKvVRkQG8bHnySijEPuQmqEHrVKC3kdEUQq7fkgppZSIiBAVIAKXzoZ2cLkmbZb9OiKSsjcQpzY6y7VQyh1pFB8GFbiPzMw+eFvmU130fUKQSZ653HSbVZ6ZkTFaiQkeIoh2CDhwCl0/cogGhjC8Wl7/+q99N5lwcr40TgqPDiDUbc3BD23tRedmYO9Z6joGDyEhKoIwRKTKdyTcNpJ0UBQEYPdwFqvU977tBqNwOikW2y2Ia9rYh+5wr/RGvfHaG2eb9rV3X9ucPt87KpaXQ9/Um5Wa5HZxvbnuI0Z1dr6N3Wo8nRPSelHDEMmJ3+I8yzdV27Z9CKHUepQXbfKN7y/OrhzpwIwWMq08xxyNMip1/dOPPkltCDxQobu2F+YuhWxfv3t/h4qE66rdLqY5WdLS+/4ari59GyWxcAQLGhKmlJKE3Jms1OVIJ4IuYgqKRcigKSZBcxsD+G8466oeQqd8ktA7/7L5cnl2kI3yAca7WP5w5/HPhsXlVlvLNFq1dHIVq959/MV1qWg+tu8+HF1fXJtZgX3cXjfrdVh2fWJWlBaxSTH1Fg9Hzmp+6/XX/vnnH+smjRVrEleqy83KJyR/ezQuvc4EcO/N28PFoFL/5NHn09m9N+7c+9Gf/cGqHaTrj/YnE1t0CZ8vhtUQthf9q8pvGZES+G6+O41hgBSbumWWSQ7D0ASftNV15Jl1ZWaXq3qcO+9DiEkpFXwi0kkgNj534nSazbLYoidM1vzwzXffvfPGq+fP635IIogsDClEFEhBRDDFpLSympGSJTqaToRxUXW7RdlWw3XVKWvfPb7z5YsnEaDMDHGyxtarTatIgzRtbTVqq6KWEFPju3VM2ujNoq4KA0ZzDMqog92d6+2GU7rYVsZoBGj6ISVGhQoVyA2KbhmpaoZAgIVbtOnLR5eX23b83sMP/vLpJ5vq1i7cP7vYSzyytLnqtNGReGzNZp0G1IU4SMy+2znUjz8/XTx/WQf9qix+9PVj0XkrVmPcNkNR6Lr3P/vqy0E4KL5bjGdetHO33pybUo1quH1nxGNxyv3VV89+4ze+3W+e/MkffeTs9OnTc1Fmurt7vWn6bc99/Ogv6tKa6Sj1i9X9h4fxvKu3tdFFVo5DX77/9rfaejvZK1ebddP6oQq//rv/6Dfe/9X53/ub/8X//n93uty+qL3JbMZ8//bsew/2xjuj6tOrT846v2MLBy37dt2NpqNlgFUCcGZ51Qx1NzqcMqZg26bt0qKfj6g8uLN6eb5efWNvZKFtE754duYrP9YZWli1faGK6+s1D3EgddlUajSeTYuPn518drXeG4/evnM8MUZ8j6RtXhT5pJxgU/vDwztH28XHT19i187n7mK7Hk9z4khdmFDazYpivPv1i1fjTI/HeXtdI6pN3YHA7YPdZ6cX41Fmjer6MCRuId5VruJqvTnfm020KVfXl6VRksQ5skBDNyjWbRfISuEyTYxBdIAUkx1S1/podaVSXVW3b82TTi8uruy4SEav6/74aFaAM3AjvZNtHdDRYtOunz9ZbTbNkMCgzRUkbOuWEFfXdbc7uVhc37tza3Rn/Gc/+XzRd999++3Z2A3duk8REI3WxiqJSVia5VaXo6oPEmiUQanjOzuzHel+cnW+Ss7X6brdKJXNkGfGHZRpeVpZo6vtk7t3ZtoqMyot4tliU5SjeWEL14pTE5s9uX4yn439aoNK5Zk9v7jAmB4+PNxcvnr88sWyjnv7c4rVm/ffWFx3f/rki3eORqYs1BCdQjAw2iuGLjQRfBj2Zvn1+ebi/OK1O4eLoVu0vRrltU9TY4c2LtqaEypOsYsC5JVpCYj56vzy4f19QkYCD2EY6Y/OLifTMStVX25+67t33i+cW6xO/HC5UU82ft2tCzUcz0Pi0MRYB/nk61dPnm9doQXxYFo8vH/rzz95lmKixG/t7jzY3/+XP/pgFRKaDJy1mau3KxbsQ6UNOWM++/irw/1JoYwS2C0oYXa+raYmV1pXbaNcZnL1eH05m2XQxRz0zigHzLbBk1FGQDtTx3C0N6uqRgATmboO/x5VtLM/zfOca16vtyhpfrBns5KC0lneVMuJc4evH/zBn/3hX/zsJ3uzkrRCQuPQA3fAdfRIfHtWHE1Hfe3tOO+J63X4anH9w/3de3fvfvLkGWnwQ8REWvSmYYvq3lG5HPoe+PMvn4503g58db4CyFKMWtuDafH63cOhS82mf5U2oUUK8RrS1dWy4845a1KKnMjoFIa+7uw3OA6t6hqRREzoPAmbTGelsw4vm+r23YO0CkMr3lAI3A/9vdcPTpZXt/b313WbleZvvHu/burNy9XtIje7ed8Oxd4os2XT9ZtXy5wIu6Frr7/7xgOv2dWVQVlZdXJ63W3i8Xxmep+GdnOxcJP8xctFZDOauAxwCFy37cgYimCU7O3MewyZFueyvukcYPADKKWtyqwmKpHsdtvimBjBjvOYaGj90HsfQoieAMho1EqBK4oRaC3aapMh2iwvJaUkKTh87Tvv/uLDL1LU2vOBw++9OTuwauS6xaYaz83919/6y//+rxJDlKRZG1SkEJlDAlLaGF1vq/G0ABn6mHanZTX4iJYAunZga0CSAxhpONbq1x7cefFyObF5NnWLMGQtaeR+Uz08vHVrNvvsospt/M7bB//m934e5saNstkENydLRfDzi+c7e7fPL32vuHD0g/fvfPDhSSKM0XdddKOsbYdbO6MAVdVer9rL9+/e6X3VUzGwrPowDWo6g1997+j3/uKrrHBv7RYP7t364JNnW50t2kBaaVTOqapuFcpkbCYlPj3ZsNF9kvW6yuaGhdkniOnf4dZ4k9ISYICbLjEiIhEUIkRmZJHIN21oooStIoWIAB7FCwZBxcwJgAhFSGkgFBBldAiBEAVJEjNLFEgoCgQJ5WY6hcggCoAQAQFIQACYRICjBBAhGABTFE5CQIlBIRGmGxwIOCpmDZghWaWSYMcQREfg5KNBSEkQUYhuYjkKEQAFQCEaDYXGg8ISmt6nLqXVkACAkhcUstQkaEI/zczImm3ogujokwUlAiKAwJGBET0hhKAVERFDJAYSSiKD8JBUE8OO4Z2SnCJrXQqDMkgJDLIyijVxTAMn1FoISSP6xAJWK1LijPISrQNNAMwjo7FP98u5qqTdOlvJZMdG8omTGMhnIF78mg0YTkqRmCmJSz4AICAkBvARhga3fcYtGGGiQIqNAUng19BXlgcVBEGLmUVrfSCODAhiNABH7oA9SiBGkSQQADQggdLCAphAJ+EOoAMbwBJSUt02sceUkijQCjWRoHAEBAAFgmAILVHnRadkNaFSzBhZYkJCUgZVTAgBJBUMjrBU2dp3u46GhEyqFNqxJnohberUD6SYYe2D1tqnYaSoNEprdR06RYwKQZImQkJFNFiWAlT2zSb59uHONCtj3/d1sESOdKQkzK3v+yH2QyKnQ0jrNmS5ahqfjWxZmsxZn3xGxAISAnJYN1dJRAkfHh2eL5Zd4iFK16NfQWYHW9p2lezE7hpyMbRX6XCWi1anV6/KsrAKX7u3f/H15RhpZ1LM55PTq003iPc4dBEtoyEBAmBO6fho9/Sqev5qkXrRmWEflLBWxjoTYmqqtEl+vJtN77qrkzgdmWmZNifXpsxDV5JAIPbUZyQTh3f2JlfLNVm1u79/1S2JMHNa4jDRmTM2wma5TU++unDo1nWda+WHkDQ5o9ddV5Qmc6rvQp5bHxIaN53O7t957a0HB9Rd/8VP//J778/vFrfgZzAEiT4RQdcNeW4hJQZvtFpsVk5ZrVSIiZCFgUCzqBv00OU2d66tahTOrIlJ+uCnhUUQ5VApLEoLMdRdLIqx7yMTa8G67ayzaIhIKQvCXHeDjgNptk4hKDK2b3ySqIhSAo5S9XVTN599/iTLzd5k/O5rb7z14P1fef83N9XJpmk++eLJYrM+npa7MzveAW2yn3z+88vlVrPMZ3lVN0dl/v6vf38+sn/0h3/eJMYhDJHJ5IPtXIavHe3s7++dbVaPn74gUk3sBhUU8dhmzmrvWSvTNmFIbRvocH/nMDNn51fL5UYYnLECYnMLiUQg9DGGgEpXsTMO7j44OJjmTx89SZsaCQ1h5GiAQ9cbm7PBIrdV0wOnKACJFUHysFxtb8/yd986/Ozpeb3ark7X37wRHY7KcXFvnNXbCpHiEK3OQ0weB52JASlyBSRBIFoaFOfWzQ7GH718QuSeP2kPs9l220Pw4yJXQYH3DpICOrvuNaB1heaUurTxkYMRwL4bYvSzcR6YtlV4eb56cX5phHcO523bFgVOp9n1ujGoUAQ5IUKWKaMVEkxHs5OryrCKfdSMOqXcqSS+TTybu5Sr+d54NhTbzXp3ai6u6/EkExFtjGOVklyttn1sDidTC/r66nqsbLv01xlfN00U0UZFEdGq2bYKSQGkMDhlOUWFiEQCjFoRorGaUFmtIktKaVsNM+MUSV13Oy4Hgi6wMTYzalF1+2rkvVcJB06Hs3lbVyH6CMpaZ0CXLl/HzmXm7ny2rirlNIdktEIOkgIR5TkCQ0hQTO+8On0UWCqfapDtsGXxg+9YaJyVu+XkZLkeWk6DauvrcqyYh8xl4zzXWp9dbgmUDkxK9xsxGguXowTtlBYPoXXKLXtP2g3Jf3F+bVlrUMPQ3d4vTWk5VkaF/ZK25xe5yoamRzGi0hBjrCRGZNLRciKMiIu2gYSaKEYMIZHzkqAffIrMUZik6yulkBlSAC0A2hzv71yulkYrq3RpqPMxAihNEjUZhcQ+iY7cd3T682VeqtInYuFxdl41TQ261z7Q4AGEWUQZAMMR2WiVrLATXeoqhJi4D5Ji0kZfx9j42Ioa628Oz1ofiSnP835ZWcerKk0Ps6uLzo3KLOt/9zduP/5443v2Op1d+L6G3JmHB4ec1Hq9TnGcz/KvH1/t7R+gzhebrcmdFdJo12G4GjpXZjNDfQynp5e2AekkOX121ZBPYDWl7LPPPrv/+p1SF4MgL8O45Xrb0nzUr4c/efLxoocqKMXmy0X3ZOMzN3q29luWdRBQhghCGHyCEIiAJIbaJ9Eqz6keAiFmSu2NRsMQOo7KuuPDB/7V0445pWS1QUSIsST+h999OJ9PPnty8bSqjegMTVKQLN69d/Tl81eSJEa40RYgioAIAhKmxD7FwqYHR/v3Dqb1thZra2NfdZugrGM5vzxRBBQ5eCaE1WpzOC8xckrsh6Esc6WlnE13d/a+fHzxsloa0q1n0jLODMTIIa03a2LhKB6xC77UepS5Xg0CGgkH731iBDKAWpNP0qd4ua3+43/8fXp6mlSRWF95urqWX1wG4+MkVxOjCLkPcaLTROvRgM8vN6WaVMM1yeUnz089I2T2RRfPXlyKdjz0Dw8mifDFq3PQlFhPs+I/+x//g8Wzz8K4NPvzP/nzzxLoPLPQIrdBJmnYVJuLZa4QQAMj5m7bDpenV8fzyeZqtTMfKW1PltvVGkba5HkV0ZhMiYIUOp9836XRpLxeN6QLbenVpvvf/M5/Uj/7KHOUQDvybx5PhBEQJ6PJyxeXv/nardS9hNTvFBMy3RX6kOlPF9t1lxDYMtfnoRvSvCZ9ayxFMDuw41C2/bYZ2mvYLL6pu9m2fYiyugrv3Nn9lZ3iZ59fFqPs2zu3frr55mHRDP7xi5d/9/37Dx4cPrpYfnV1/myxfnh09PreWIfU13VZaq1BK31ZV0abg6N9V1X3HxxcfdF6bVLHkGAIzCP1eHMWOZom5U4rZ8aT0dn1QhBeLM4VofdhWdWJqEspdy4OPUIKHkalqrYrTSRCXR8GlDYkzZBpRICZNX3v0app6TI0ddWPjJ3lJozcxVWXB8n6/vxigaBC7bOUDkZFockqyRkjypB4PB9dp/hisVlte5NigGiADdDxrb2np37ok1KQF+bhu4fDpn9492Dv+Ps/++TxL774ZIzZxDnKlcWoNGmj+2poJf3we9/+6NPPQIMPqet5f55/68Gtr78+W3u8Fpm6YWd3+uS8bk83vQyrPu4Y83C/XFf99enlsk1HB/vffef2dDo+udq0Xe+Fbx3tLhfd8vJy6HsDaAs7mZf9ok5B35/e+h/+1Z+GhC1nJg8Qr/bi6MF373wyTD989OxQJ2uyFIhZzk9XozKLnL7z7sPow+PtZjLKdnO6WiWroPbx6dX224e7iNKzrKvtu8c7yvtpnp9u2vNN+/peUXVp8LHt/fnWt34x2R9jpuohVFVrY1Jf4d/99u39DE4vNs4XPct19NOiLaPPVRpU/gdfnFTrBJY2QyqMSgNfvrrwPkoM/+Dbb+YW//QXX3bAxais+tj77rJal1ZxhE3THY8nzPZk3b/x9sPL9abaDtZTU9eHRV4qY8psoujscpv1MYQYwfQenNUJdN0Ni6bLCttsK9TaOLtab4KPRtM4s/jLCMo3oyIGOwQcOp/EAinMZugs+7i6anXUb96+/Yd//C+evzx58+GejtL0XiL2bWxS7+NgDE0QDnM9NAOS6nwfUpxqQB8++Prpr7375v1bt68uXu2NLZGrukSdLzNTdS30bDMzKktFSpe4rSMxTvP83r61BL7q2z5qY6uuJ9JOwbbZBu4VpLZuOKFSGkEZFAIMIUQkBvCAJAKciqmLTQCm3dmuJs/e+1WDrdo72P364oKUOTu9Lq0cTGdXp8vp/shoODtdL+tQV1gN9K07t6blNkFcX60Qzelq2Bln7cUCcYwoBlVeTIf15o3Do92d3evl9vmi7nw4PN7fM6YLzc7O4RuvvfXJB5+/9eD1znM19JL69bYtrCKKWV4wD9eb5d29o6peujxLMSVOAHG7XihllICxKp+PrlZrY4vxOI9ddK6wLjPWJWRFPB3vVpvGOYkhKsCbuCYjxAiBzWz/DvhfuCSlw/15YQCNzp5ctKGYDCH+8R/+2CoJQ8iNLlxWbao8N8wUva/7PkO9Oysx+hxxEE5dsAzDuk8Iwhz6NDZ0YM1bx3vjPP/4yVUX8W4+WbXrV8PWSwyhnzCE0G9D/DePTpFgsY2vkC5Pm01MO3O7O99549i+rL8SvzUIAqrqJHTueHf+4mQ5GU27PmoNEcjlxcvHr1Qv7Nt6qKZ7s9Wib/o4LvKxJiPYLPrX946arjesmsq/f/fgpO5fXVwVE5tSShFKpxOozWYY/FBm+aruAGGW2dV6MZvMjEK02pTmutrATem7Qo4iiKIIkQhRgBPfOGuAExAoRNAkCpJVqAVBILEwgEJCgBuNESImZmICBGQgUCyiAG6q0W+0RECoFCJg4oQCv3QFCQAKo4AAMwEGBk4iIEzIgijwyy9ABjAGAIGsgoTAEhNLUjFIl8QrIhRHaAA0gQAMnJRGvHm3QUBGpdEZSyRBiBm6RFVMSivDAhByQwiEKAmkS0kBawCOCVCMIQLkJCCs4RvqylktiW9a2wQgpaQ1iQhHHgJsNMTQj60yADlQYZR2FAG7GARueubRBxFmIQBFmCgmMM6K5kwjkvQ+aAJSyISI4wf33nnxxQcpNS0GmDASJgKbAcwZEaVKSEqVXOwzWAgeiIAEBoauU0OrY9IZ9soEbQE1UAYosG3BhwictEHRURtIwD4BaCADGkESJAdhK2lACCQhkQIywAxAgAA6QRggBhVZIXqXKxgjtqgR0TOgSnxTQYcpJCACVCBJoSBL6KDQeYgCJMigWBCJBVVCBVQSEse5zVvfaUwOLXZsbNmnxEpdhDhA6sPACEAExIEAMbFKpVWFVQUBEXpUHQdQqAgScpCYHKnSqIIAEgBYZUDQapNyxwJXbQ/IPg5dF4zOEui6TqQGJaJR7++P+77PtM5U5tshaeaYJkXmBc/btU44y83Q16Ah+JSiaKfJmNW2y71a9f28KGY5npxfTkfFZZf6KmRRZjmWOtucDahKN4PzajirVzGyMkplYAkASWuFUZmM5mU+npefPDsdojaUDkejuq4DiE+ifJiM7Lww51wrVw46ROUH4W0/7N6dsnXV8/PdbM/kErErTGya9ullYK2VKz69vIY8Uwirejt2djbf62o+P3tVFgbBeh+cVbklz+CjMMXd3VIxHx3sDX2our5PcZKP7uzt7rjh+Wc//vTjr86D//bf+nvIhCjCUBTW9wNpRUqTdgeHd9fVKpvCdlMHH5EBAYFU8KJB5jaLOoIiYFFKxSidD5zAKdN5VhqmhfUxrRrxHrpB6W4gQOxbUjKfln3obeZQUBudIosPjY9aY2F0ShAChxRjSESESJyAI4BgbggiL1ebP/urn7FAlpu9aX739uE//Ls/iL1v2u56OawWF2XuqAs/+NY7p4+eEvN/8Pd/J+Ps9OnLjcfvfe/X16ur04vVtm1tYTyk7+zvHe/tPL9an1dtr0ySrizcjiuP9t969vQzSkiMYYgERAFWV9XQx/E827l3fPfh7ZOTl8urddMFV8wIc4daI3DwpBJiOnhwUEzz7WL54uszDaQJUZEr8q5uog9DAOtGRT7q/HmIbTt4Q4pDZCEkfbFsP/rs8tnpVRI83ps/OrsGgKoJRqdlGvwQs0KRIlJSjrLrbpM7Qo4ojISkgfvAGF0x+uLJMxaSyBp1z2kQr+bWzLK66fLM+b7uu42DBJHzwrnMVas6SBrNS606m0kSPdIynmVxyJq62z3eWa67q21rVLe/N6naAQI75XzyKAIKyrIITHXPJ/Uqt1YrBAENVBicjopc0bBuUBlJtLiqxYeuql5xtK4QSNa4601TaMdGjUZ5kfQwcIj98WziB2+t7uOQOdsmJoDcoNZm1TRecJzZnfGICKuYfEiiIKIMQ1CkJYkQRBZARsEBOBImwqzMsR+iD5kzlCRxHGXGakoMOs/3j+b74/L05QtCwhgzpTVR23ZRQAlebzskE1M0ZDbbWivAJM7Zk6s6AXSJ2+Uauc+dE0tK6ZBBeTiX1UIGavreS+o5AjJkutixd24fQM8vXlyu26bnaKwyinzwXdUqIg4sCsajPKQ+y7NqOYBA4tQPbZR46/B4udw0m/pwmo1yVeR26o5fvLpG0uNRGYYIjKSy28d3h27RrDYXi6YY53aEr795a7MYXjy9cMYhkCQprJOQGNnmDoPXqNpq6BGLaUmc+jagpAI8tPlBUTjSLKAAutCTssIsHIcQkdkRjbRdXKxNQNVmTKrxYXUtA1rSMDV2uoNp3YRekiTPIIieRBUmcJoZkzHsotbGZbduh2ZIwpgkVVuX2V1rnpytAKDr/dwWJgy5gRQ5Jk+SjXKj2qgTvHz0xU4OwRaHo3S0Awpikr6ru2I0n07zx08X3qcid9EPQOn4sEwRfBe11lNMjBiEq6Y73p00i+ZIzULeV81WOS1D0sJZbrrGL59eSD4nl3HfZA7teJ+Z+ja0TVj2HrIyGbPpeozgu6ZTyjtCxZkh3wwEUo4yASEkpY0F8CCMNJtOrabluhLQzClGmBb2/PzVfDTqgdd1q1E0wWxS/Pq3Hl5cL3/01WMSPN4ZT4tsVbe/+Ouf//xHH9w5ONCeUCPqCEKIHKMQMwgbrawlYfXWnf3XdnZX11sloLS9vtwOXnJtMgw6RiVsNLDEnsEVJqZBCR4f7nc+3r17GzWttnVWjnfnXe2bJsWiVKPMGJJikvnEWplt3Vlrcmd9CMq4TVURpHGRJflGS8rMjEySHOHMmgcP7vYJoQNc1fOcegd9TASgSnslsgpQKM6UrkBajxTFPFk9yDOaluuTzenCK5NZhRqjaLxuN6jV6baakGKEw92yoOytvaPN06+hl4vlMJ7GbDQ5Pt6NOCpg2J0V450xZPbVxdnf/s1fG+r45L/+PZ9in6AO4bSqD3ZHLIGUG5eZZbGjbFEPHlMWbNwMxhmjVRQc1n0xnQxD3zdp1+l//l/8H9+ZFb//xU+/+63vNO2mbuvb9x/+0R//db3dvH44/+AnX2HHD3ZG1dX13k5xOBptfLzYNoGjB5nlTonq6sasKu+H4r7uL7rZ8Y7vdBTSNju4jQDnAKCsikr5kJb98M5R+a3Xj15dLh5fPc+dM80QkhilbabP1hsC9fBwb5Tpr19ebob6k5ebo9HkvXsPXr1cuDLb2R0rwcPpLIBeLbcSzXS0c1WtnQbUYES/XG4SIhEdHh013dITnG+2UYSJijw3OVWdr1iMwSI3k9wBcAqwO532IQZmbbR21jAPiUMUo8hpnE2zqUFHMMpdGIJiyfNsjrDft6cxHe6UadMslsuQJAMoKL1zsCMhxR5Gk3Hn0/P1xmjqNsuX1bb33hgtMWmW1Pf/8B/8zT/78UeWqAneWfPZlxd3bh/Pi3y9rfNx/pt/463X7x/+N/+fv7Tl/r39Wb5eV02/WfVRcDSyJ6cXQUBbLbaAEOf5vK501KNFt1x322/d350X5auwCcKQkQ16VoweHu1uxv3ZchM0vnxx5pvN7u7B3Z2dOAwvrq4fXzR+iOOpncxde7EADZooRh5n4/WyBTMxFve0ef3BzvJs8flnv7hz2x6NJn1uvvva3cWmPZzfOj9fvjh7NcrsZORMGh6fr1rffvfObcOcGzuzZtPFRkTlelnVDSchlVvr+8R9AA+b0F0ZEKKTq8XdW0fri37VdNvoUxv7oc+sMUX2fLV9sZ4d7WeuyFZ1jznoklMuW4r9Zb3acF+nwcfCWfKRNDw4moR+uFVmB27UbesfvTpF5wqjtVHSNciCwkMXQxcmeTbPbXSqXXovcTRT3nPiYXesxwW9PF9tfe6AjDNKKRQVhySMauBkpBu8CPeDVwZj4twaFX3XBZ0r5uT9v+8qym2x6dpm6Jwqjo5vkVU+9s4VB8UeAf+f/6t/Yrm/c/cgRkgQTWRbOMpM3wVNZJU53j/o+7YLsQvRJ+EwaCQgHZX+0cnp7/yNv7GX259/9RUx///a+69mW9Pjzg/MzMe9Zpm9tj+u6pQFCgUQAEEQEE1jNN3yHaGQInpGlzPfbhSh6ZaaMy1ddKtpmk0SIOFZxao6/pzt97KvfVymLnbxor/D+d2t2xUr1xtv5t9Myzk3I4kqnE2SjQJF6vhof9BatXFaTS9en48xeuYUUgaJHDftyIzMTFpxjkhQTWbNrqOiWhyctturem8Cwrt+bAYfY1KGgg/K1apQXZee3S6Vy994/LhtxmbZLx6enpi4XTV+9JgpK9ATXR3vxV17PcabzehH0IX58vK8lnFaaTuvykl504RpXe/X5tk/XICrzf68Or6X1Sxkvmj6gBlKaFV1XM8t5mELJeK6aafHiycXS9HuW5986JvlYj8cn0w+f/omd7HZhND0Km1dgdW0GoY+RC+jlLaCTCGk3a1XaChATnEgC5kpgSPddQyKrDbdkF1ROWvabaMsgQLOAoKTWY3Gvvftj//uP/6ftsd3jxYPH+5f73Y3rM8GOd/cNl3vWYymMbJgVugZxAtnkhEA6jLVZjP6j4/2Dyy2uyFkOciSGcYQ68phpkVRPDjc++Liepu79XrYKyYM6nLoX2z7zBHD8Ohgdvre4ZkfcH+CBH85LL/5owfXn11hH24ivL7Ml2nz6fc++JvPXh0dzbY7Dgk/P7tSHI4fTVDK1eBNYUptD+4dXDa+8/z8y/PvHR9++t0Pnv273yrCjLQe0un9w89v+7jtEbDv+RXuPjle7C0mnzxUKL6JgwH89NH7f/XVi6Bkl/K0KN55PL+6vZlPZ03bTeqqDcGnYPAfbZgAd+Xx8rXiBgAEiFArYUQhxQxZAMAqsopI7pY4mIASC6CA3IURAZKIVkIKgYWFRe7ayxAQEQ2CsCjAzEJEgoSQERHhLk8RsnBkEQYhYAHJcFfIJQCEKHLnGgPShAZZUTCEFkhJzgA9is9AAJwF2GhVKBKQnLND0oSIIHDnjBMAsBoSYIyZRVKKBlALWxIhpUgJSgIISZIPCMSCd6VpWVA5m2NQggoo55SEkTQitiEKCwIqSyCgABEhA44M45ibwFNDBqku7KQqJAuhdEOUjKAVEwZhAAwsKeWMmBONmQsEESFhIepjzIW9/zsf46T+ncn3f/bXf9ZEmGjr6hQ5CwJWUKocC04h4Qx8lTlBoZWOGDvMHtNoUKA2YzWJrgbS4AnuyuZoAmgTGFSFgBFdAmmIfPdtg1hQCtQUTAVhxX6p9KAJNHNGpZlzZohMwMCos43lvmOHwUQEwXXOO6AIkjFHUlpryoKQAZVG5fQoMobsUDHkDMKQiVABCkOSFID3nCNWtavI6KYNpLRn9MMQSMaMWhNkZsigEAiAKDEggAa3ytiNUlAGQgEgbT2kqBWoRIZUpVDd/cgAAEptDVavb89HiUpBN0SjNAORcR50672yTDZWhSUnz7e7g7JKQRLE6WTSNB1k6IJcbNtdwpihGRODbwcfkyCgTzFyTpQn++WiqBaH0y5EPU3BkAVn0L9/dEqKbq66kNJtGxOm+byUwGjVzieJiVN22oyBxyEuZgUQ3l5tQp9i4kVt584WUkmBy9aPXazq6WbYTo8m23WPIyqjIklOcV+5MQNbihiHLgRWQ1ba1Go6nd872ay2oHLtqhzaPmEwahOGrvfJmhQxDYFTLpwio8IYfU4FqL3pROcc+xBCdlX5nQ/fn1fzk8X07IvPzs4ur5tO5pM3ry4+ePf4aHG4uezHmAVEW2rHflKWq3alSGXUEZUpTY4JGa3Rfky1U5mTJRVZ2hDGwCCoQDShMLtCa6eyppFz2yXJIogpy11bIAEYsqcnh6+vLzkkZypm9mO0BR0czrSC1XI3jgIiohQo5LtkdEc5ZxQhRKXIWp2BEOi2Ga5++/Szp8+d4OFsPp8dHH10v+2HD7/x0dMnrye2+MGn3+5befb6q+XV8nx53YCfTWezev+Th4+NJKN5WtDPn7zaDn4beJOjrtVecbDaLm+3v5LB81AIKWYsrbYGIGMO/vamv75c1pU6Xsx/8k8+3azXr843N7smh7FSoLR046BO9t799ieE+OTJ800zEGoRmdS1HzynPJsvysVx8HGz7Y0pYwbNJEQIWXxWBn2Gz87XWkRi3juY302BAdX3pFNGBPBRszjkLgwMWTsNokKMMApqxSLzhV4sXAeQS4c+G62Qcn1czfbmqG2z6UlRIF1OnFGJwxgwdkP0PgJQ7n1xoOcPJptVFxKsVrvS4r0D/b2PZj//1Qhz51SxbYYEaMvJcjco0hppbza76ZvBZwItrFLGmFJirpEqYzTp3gerVTOMjaJxZA4CTEVZGVfu2t5mViiDH3qvCkUHdZUpMgqRRBDErDQoholSfgyhzdmmFCVwFsmKir1ZXQFEHjZ9aJgBAEYPIqgNIWfOhdHGkB8jT4RRVs1movTR8bHknDP3KTf9SEqh0fPZLMVQlWWHYxLUyqYsTd/YalruTU3put3Q9UGleNs0tqyd0x5p1/ZDzlGyEExKrYC6YUzEr66aj/f2Du4fJqvj4K8uGjQKHAQjh/fmz85uc+sziq6UAzCKaoeTwmmFAvp2NcQhobJZ0nIXrNKKyKBN5Pf3pnuHFk1x9N5Bajdh7Ce6fnmxIjBtH26Xy1lZZWLQstmsjaEwiCEbRcBTt5GqOp4fyvriJo2sBTlzDD6hzPb3cNDZBxIBgW7bxxgVQj1x1WJyud4d7++tu74sTalt5fT+/vz1emPQzRbl5fUN5FBodHoa2tiNKmsSU+oC0cZ6po9t4bv84HQ69ikxECIqQmViTFPtJoSUR+ucMFTGgjFEmAf/TqkDsy3MX35+DgBzXYin103XTWns89SqN+vl45O9r653947q/aPpwXv0xYv1pwf2R7938uyrS7V//9nz1cuzpamszbh/VBPRbt13QyAQU9p1H3KIoBQqFRCjhfJwkkPubzYxxcm8LED1gYzTSVIXhq5vyiP9rcf32xu4DZDQsZUIY1Wne3Dw9OoWQDdejFV9P0rpQBHFoCLMtDofpB15UZv1uE0MdVVSltxzgOxqUykXIi7qeRyDE6iKUil8tD+fzyj6oS6r7377k6dPXp2drZQtlORdP/imSTHOnM0aN9ubn/zg3a+uVl9c3BbW6ASkFBACEmcmkOPF4huP3wvbndKq3yZrUt8NfdujAJI82J/GDUdOpNBk0EDTamrKuo3p9NE7qbKDH7uA3WqLSmaz4uZmh4ikzRgG5wpDgoDzWZ0yRwFm2HVjUVQKcgIBY05m824cdkPIOccIKOyJ5u99vHf8gd/wf/zFZ+tdlzCLZGPIGgAGYHBVWcTx8dG8b1Nqu20HKirZblyJV6/OvE99Slapsij8theW6zFJYYqi3Jsd/PC7P3j6q19OKvPo/n47DmJVNupq06iJfO8Pvvfky5fL5eadD44bpn/1b/9qc9v2WrvSDO0Yo2zZQ+Z5aYZVMy2LxX4dBKZ7+9ynmPJkXjOkzBFcaWyxaZsxxpMH+2NQzcXVKz40+4/ONlvMfHj86OXrm/e++Q1XqTD0Y1JQQY2YlA1AnKVkqFKSZqcnZdPGbI3bLzzQep3Sw8V84V7cXJ/OHz86/uTvf/7nJ+/sfa0q2nRlLazV88vt/l7ltDuczX71ZvnB+/du+xDGkCGHSFeroaicRFgcHN+7ZwFks1q/WTdEF5Wom9tlzHFSlU5b56p2TK/eXN1//531s5Fy4OhFMIyiNY2QfvPq1f6sjD4BSVFYzzzE3HQdWn1wPJeQNmm8HQdn7WQ2hdqGNgmpLorOwYLU1p1MTEjRGmOIBs7U+6qqCqu73aAIJsYduMnTq4vJfO4Kh1l3IUGQmTZTVY05SsZ2E9d+9IhQLl5fv2oyWVv4EDlyaQ1HevFqM3juWi6dYWYF+NmTy48fH+/PJMQQ24wz96N/9ruf//3r355f1Cl/+OiBbZpeskGFd4YEoyXB1BUfv/tOZnx1fTMOY63UbTcswxghRkz79d6nn74rif/29XkR5FsfP9o9eWEKerXafXXTHB8c/943Pv72O4cvbs7P39w2/TiOiGiEcXXbanTf/f0//Mtf/4XMyrHx1tluiEKya+JvP798/XqsF4vPrv3lsqEXX/2TH/zge9/+5OXZmycvz29uh3EnUfRq11VOtT7sQmJjhyjl9PDp+e0rH50iQW2qQseUBl9ObNuFlHKl66ttum281VTX1m8HAONF9T4bVH/1i/Mffe/+YHKasJvoXOpdxoBQ2moZaWASojFm66w1punjbt092F8czoqf/cOz6bze9hkF180w9jEndoVBAGOtIO4GPyuraVVcvD4TDNOjiaqK1W0M4tDm0Us7eqfNdF6s+y76/p3Z/OPTe692a2jj6f7eevR9EyECt5woPn60mB/M3ry8NtbcJVN8/ZK8Xm0SsNG6npSRgwRwgkfzenn97M//5s+mFo73D47npQ95DNoV2A8+hFggFsYYUF0zlCgIjMwlkWgFIEhcaD2k9PLZ8/fu3R+XHWlzu26M0k3PKcVJqSd1hSn7rgetw2b7/OLMx5yyFNYQih+D0uiHGJK4wqQQ0hidMWL80V6VhGIaC1cuV5vjw+l8Ud6ut2im9cQ9fnj65tXVar2rLAwalC261Wbq7OH96Xp9NZ9a32Kl3b17i2dnl0br1dnK+ZwlGgTQKsWseuqHdr8+UajDIBTo8mJ9Gbph1Y55E69WT9TrCtBlKEua79eLenp9s/1yuZoKzGo7rYvlq8uL7W6diUnfXl3NlOiUYhvfeXDsY96sWuds50dh3MAWOSNIAp5OKwCFfXCljTlrpcuyFlJFXSbPXduMMc/397W1tnKYc+f7uipIqbqaRp+Cj5aJCG1linIi2e9E/4evLi+3bWQhyQLMMRtrcopOiQIBZtAQJYdMVWHJOT/E0MsFDKNBxzApi/meW6+a/bmuC910ufHjr16fr2NuctIOFpUy2PV9E9vmcF4p0Sdl+WCx9+zNy7btUSBLLqN/72DyEla320Gja9r8y79faikKYXQQtZCCvsmHBzOMxYPZPIVhddPGy9VFCJpgu9lOHD6YT2qld9H30VukJ+fLCqFW+v3TPSrgq6vm83YoyJ7MqszaGqeM+u3FM5HY9TmTAgIkmFVlTmJ1sd3uImNVVpC+3psSwl1YMyMqQIWgALQipQgQCZAFRFgTKRQC0UohQWRgToSAIgrFkBQajSZNCCAaIHL+umcNMd2FADADilFEhJq+Fi8RC4HQ3a4IEO4ip0EI6S7w+i48iQCEBRVqIqXRKExKjcIKFVMOzIYEhAlIMWgCBTK1ZgieFIpWJEyKckYAUUohIgIgy52GqkKtFGlSKcfIHFMOnMacRQBQQLJzjkTS3cUqZ0kZSLFkANF0pyRiZuEshGK1toTDkACUAkGEzBKjdAmNUUkkS5xXblpbVLDrow8ygAiIImARrRCAOUYhHgdWBkpDTukYeFbT4/cPLtbdfOre2T9+vVmnbSycIwliCBRLBdaJIzYWGMFE5AiZkT1CJC1S1LGaRzcDbYEBEHCMogkKo6JVAEAugQVCUQBWoYBIBkkABEqDmwAhpCAyEHsBUSIgjAQAwsigVKQiTUtLVqVawYJhEfyG443gTigoJaxEUCmfEwIXhQ0pF4QWVY6ZkyALAWiEnBkISKk+IChquzFLLpwptI7diJoRxCBZBCAWBGXoToAGnKOIEuQYM+pBo4ggEVowFnrFQOIcOQvKwGRmARIA3F4vJW4H3ulZ0bXd4w/u5zGs1rHvM7LsL+iDDyflxJ/c239z7YdXY3Pr+zbX1lgQDTifTlZ924QYRazTOeV+GIiUQiaFOSchnB6XboF1AbE5C4M3Ws2UmyEtHhy/+vwss+KESLhXQFFVaFUUSSHhOHgRU9iI4EhrhhhziDGNo0Us62o6wZt2mZgn9WRaqYOZtVMYAzvAlAkU1xVySvP9clKX/c24qO1BWZ2ftQockOzVVlHQfVN57z3HoecYSwZHkmIsnArAE2N2ibNgCAwIxlpjtTZwcX59YNS7B/cnx8eP3v2wX16Pq9tfPXvy5vWrcl6ngkLw9++dGHL7k1nqN6YogEEItVLMGWIefM53OjKtFShmjoOfaVsQpZBsoVLMOWVlFWSGDCmz1mgMxZiaPsUkOQEBKKQ7aSMzEGLfd4u9yceP7p+fvRHMwxCMgknh0hg3bZ89I9xFpgMhZWRAZM5EirTCzJIEhYmQFFpjiqomTCWqx/dOTw7vFc69/Orii5vfvHN070ff+35Z2p/++c+izh9895v/7Oif/umf/dvLrr/ZbMHqAqQfOoZx3fZIoJUcTydZu8TkfScpYdSEinMGUpGzjAmJjDKUOQwhRHq+7N+8upqV7vT+6Tc/3n/+5MyPY+j80dFs9sHjCt35V89eP7nmAK4kBJSYitLZsrRFTUopxdlp0ooBQk4xJeZMCoTz5nY5ycMHDw9Y82Rq754FQzNSkj6PZZn394vpwTQkSDlHLwFYERhQIKRRo4T5nptU4AM8uDcL3VBYk1LOpE1V+WYkIGDMjAnR1U7XdhgjMJvapcxE6Df5grvCFAoTAITMVx39H//hzThInNjbZacEUWQYIwsaQ5xltd3mnGIUAgbBiJFAHCmHeDiddt0AiUGrurLLbmuc8zlZTYowx/7R6f52uR05BZaYclYqxC1Etlr8AJR5fzG5d3CwXq370QtloKwACyVIyMIx5Zwix2RBFAopUgqjgAjnFEFAGxCA6JMUCTIbAxPlRu/fXG40Ql2YqnQjj8aZ/cVM+g5ymk0m666LKRXOFc7U7kBICTFKWByVsOWrsyuq1OVuBw1qqwkxp+Bjmh9NVaVBSelct22ryhzer9/sKGodwjg7LCalZQBSotC3YYjCWNnFpHCojQaHnIK0m24X0phVHONuO4YcwdDeRIkIoRiNRuvr5Ra0vo1xryqtsrut3y+Oyon9+y+fIQHqlGMCEZSA5KpJIWrMyEY4bHZUyayUjnIGIUJSYqyqnDaEurK2LjrF22b0Q2QGQNGARmiX+OzidlaVVtPKb51RN8utNWgI25t1lfNsNjGVHjVIZqW1shmMmAK11SXS1CclqBmpLu/kcwKYkpiqlMCYsrHWEDCz5MiJIyfFyZAgUlV9Hdm1p7VRc1gtCRWiyqL18cGrZYuxhKU6eGevrrYPTxZ93/7mb7Yik/XtJgWaTqbGmmE3dr0og5zVdLJ3u9447fR8b9v2trBk9dD6qO2qGbwPIfm2i5soTutdO1KbQYtmOa7tzfmbv1heRebnyzZZd+/k6HRSz7xfFO7BJ+92Xfji1Y0tdH1v5jPHIY1p/KMff/JyPfzJX39WGp1iOF7MScBaA9qkISrhhbNotNJl4cx2vapqPVlMh7brOJ+1+XSx/6M/+L//r3/yr5tNV2qTIVJKYCiIgODYjqS0UnT+5M2Pv/fNf/Ff//G//Ff//mK3S9owISBohe89ODiezFc3KwcopFAjKOE8WkopsULTND3EVGH+6N2DH//T/3x5vbrexWpa4XgbY1oF7ya6amAYxro2i9MHb5qOQDmrSVQMse9D6VxZGBBWRKgpKXGFmbmZ0ZiBlFDG5DUnBaClqMqXb1YQwSm9bdvRp7EJReGmE6s1xJAdUkJaj9hiOazTzLre7x5/65NXy8122zXU14WpWn/vaJ4AzrpBgGVI1qhHh3slqUmIv/3bnwefFEm7e8MpaC1Pf/3seTNetemnf/LnlsmRoS9ufMSTWdUufQKoq0RZ5qVTzmoNypiJdT7lZZ9Iu+XrFaMJwafbtbMaUTJ0aBwihzhe3W4LbTHwm5evSyW56+aLvZX37cjn528Kixz609P9wLld9+RclNiECJwP96fTvcrMXE5Ijs6XuxBzguR0t75ZfzBxeHPzxZNVGPJPX/zD3RTMp2UzBGdcRmlHf+Dww3cPnm363S4WZAQDKbQOtEKGOCb/1YtnhSkp50OrfMp+3AYmp+zYNsPQch4yEMSRkj0ui1/1LSKlACl4YRpCRsKqsCHm3sfCKaMQciY/HO5VYN12CBRyrbBJMClt6mMkKDT1fa4R90qzr4vD/XnbDsuGQzvuUj66b12hfRgJBZUoh67UJ+jeHSsknkzd8ze3RdIHSn36+Lgfx565LOpAsbbZzfTF5tphEMiKgYHFWSN57tTxYXGzqTe77TgmDeiDv3558fvfe3h0aOzE9n1aLjdW5/eOZtc3mybkJoS6qvwwSMjzuj6/vsk+E0Yc/cNp9auvXkoIx4vJqttVpb3dbjjl+4fH3/v0g//1Z7+6WW+hDe/N9r7RwNXF0ld2f1aNIe+2t//+p8vTx8fvfvjuTJvt5ep2eUM5TWczw3JvOg+bN6uzS11USjSP4+1VH7pOGXQS39+36OB1M1zdrnJW/98//9PTvekffPu7j+/zttld48Y5Y2rDyR/WjhOAUtOD8uzqjSo0+6AJs491YZICgWS1c0Zr5sqRz62rxpOyfvDe0d++3hgyjrQCUSmljOyUK/j+ng3iKytD6/VEq8CbdsgxG0QlaAAK4dXNyqhiN4az21U5myqNZQVaQ7MLCGwVSoo5SmHs/ePDMHah79Pojx+djHF8c7X78L3DCjKHXAAUGkKhjdVDPz46nk/swTvHB3/3yye7bjzcm80OppdfbUhkWtvZhFjo3fuHr8/W908OVqvuP1EVhdgjmOlsf3FwGH083Tscrm9+/au/eP3mea3to4dH3bZt1k01KSZzoyNFiBSSQozMnGXZxUIrRDEGp0URQpCclNYeQIJ8+dXLg9k73/z4w98+fQpWmdJKBj+GIqnVatf3Ob65tqWKKSDpDGTKQleWYyJltFFT47o+WGshcS4tKY2gNMn17dLm8OjRw8mBHtud8PjO/entZgzr/lX/pmu2aRirSbHZNrPZQbNcTk8PfPShDzpZa0gX5nbXGGPGwAbYZK4Ktyjd1WrUSu9Ni1hYQatBF9X04vKyC3x/MWMIoqAfRmM0ldoz+x4zqW03rLuATM3Itqy++c2H/a982nYeAjI/ffbiJz/+xryarVY9Rpk7/eh4vryJTdefvHuEWpJnrSwQpSwi2dWuqByxWOM4i+9HTcikTOlM5faP749+iAzAMCYuSwJUY8paUTWtTVmkzNDCdz754V/86f953V56Z3ZjQKJCAQhba3zMd+70LqVsSSaTaj49rqqa+fXL15aInb7axS3CQus2+WUXJAXLahtiZvAhKUSjyVhbEHzy4b2rm2sWmNU1Zu047VX18nL7+s2OEw3E7br7+a+ez515cOA+/fT03/7Nm2o6Tan3PmRlj+9N3pyvqnqyqOrNql/frB/u74/j0O/6yaNDdEXfR82q7zpZ9Q/2Fq9XF2WpCkV+DD4w1Phy3STMmzHtUB1O7aOpa5uY+jip6qgT1ioq5hhiTG/Obg/2amsNQxx2UbQlpSf0tZ7CGkBRyBxZOLEislprIgVKFDGzU4SEkhkB8a7LTCDliIggrAgLRYVVzhCJZBARYEKlSBGBCCFJ5MiCSKRQGaI7YY+ARspwFyCNgMQid6YuEQQUUJiZSSGgACEZNBYRwWhEQYlZ3V20gAyK0YAOMN01a2DO2IxRKRU5hyROK0cGIRPIXambCFtEUopApxQAICXfjGEICYlAIH5duIYgYBQgYE4SEBhBoQEQQSBQkSGBiEDKd3FD8Gh6cN3dZuQYxBAW5m43JcYqQdr5kJF0beq6OqzrdriJkj1LYnHakEZNxDkCsIAAolEqSR6iIFLy7cGRftYnzPL/+O//+f/yP//La9yul2G+MAkxcHAWyAIpMUp5nyVByhjupGFatB3LE3YzIAtAqIUkAlFmhmZgTE45QgMRgtUqS7ZaJUxaYRZBAbrbwZWwOOabgTMDZgDSyScQUAhGadJaWEIX53UpRkwhMkG9AD/JuwsfblP2aFhnRkGxRpyh5KMWADRJCFXQShSqQptmGEipsqhG5m7wSAAKmxBKEVUgCZo7N2FmRaCJUmZOkEU4JxbUpCwZQuVZEnOhHUIiLaZEXZKbUlGCK83pzH0BPQA8erQ3Ns1/8fufiivfuX9Qm/Tm2dn//G9fJgWuHH/vBweffntmld/ctJNjs93xr191nC3E5IkU89C2zdBH5CQgmZEBtFZKjSFLEFuqb3z/A6ohiY/XV+8cTeZjsRuHk8lkd+2vblZjzxFsH9O04k+OZ+++f+/Nsv+Hz28e7E//2x9/K5j073766yfNtqbpGGKxKL7/n33807/6tcpOlVXWOQI3EgTTOydT3w4CwWjotq1nOD5eLNs1p6CVgeCnVgLyZrPTugpdFkQ3p4eH9Xo1ttdrIuuc6WJKIhNT1pVthrDpm6KepJxQGUUoKQGI9+Hg9NQA/NOf/Odx40c/3rw+W19egsgQvZpPX/fjdvSkiy6oeTkpsLZKjFFjQqvvCuAFEcWPxmkRyQkFAJQ5nFYOeFKaMSgk2Q1jCkyAxBI4TwqNGshpCZj6ZI1ByMKiSKWYEIUQhHFM/PzFm48fv2uU241BK7XYK4vK3azWnEhpwykLIzAgoSGVRYxxjERKoZLSUGHJVFUIeVrZpoO6nj2+9853v/XR3/3i7/yLRif47//4vzxw/OTF+f/27/801tNU6r9+fvHDH37/9//oD/9oNv3ss9/+9ZfPOabSQhauZnXTtnEcSm22y1ZS1FgZs4i8Mk4lif2QQkyOMOVUc6FJCgWApKxNWXZN7J+f9bk++ca+cmZ1vqsn84Oje+O6vXlxSwlLZxFUDFzMJvcevaeM9hxDnwSStaXRqm97iQEFhQVQFa5UBvdPFrp25XQy9P3X+tIs3mfKohDYC5EmEhEdeoosWbEtbRqz+EyC3SYMrXSeF3tysihjzmPicnaQREkeDVprKdkUGJXQth8M2sm8iD77MZKk4HmzHPb3ysmkNC6OPrSNxFaYoR9GElakJLMympliyilnpzUqrUmAQZMiRWkMTluOoRuDczZj6HOWLPvTudKw9j4HGcZgFKWSRXg2KXKC292ABEgARLa0ELNW5Eldt0Nh3BSArMqAQzPSGDTiKsi286V1Tuu9uRGn27ZJjAKsNbECQkwsPgtxms6nyiqFWJhiN4SLy+2sLDT46WSsFWjEuqxyYF0UL88v+sGPzHGI+xN7erh4enZ+c7tKgY+OFu2w0yo1zcB3jmurfQz1olScUoEr39XWusImxRni7fJ2fdvtxt3xflWWJodxuWrGPihdFqQPDvXscHKg3fK2Wa+62+Wtwtqg9CmboppOJ0jQj55BF3Xd+b4q1cyqFJK1Bgs3jrkBOzbN7qrT1J6enPzgW9//8tXn5Kgs8Oj0UNXF+dnldOK6GHabfr8uIfO0UjpoP3YhFhGAjFZKVXWdcjaKlqslBtZAkFNptSJa3zTQ5USinc6lmR5Mb26vjS3HLJvt4GM42J+XlmJKmJSrbMBhfuB8yIHj4vCwvd1VUhSRHCKBHnaDRgJMCEpy0vpuO5RAXMqJtAKlMwettLGEOQtL6L9OafngfrlajYc6LtfjgbUFmaNY96vOZXjAdfyry08T/PHi5OyyO0q1sbo9ez2v6tx7pbH3cVxxOZ2pbszKLjL5dTNXyBFDMxiNNWcjaj3404cHieNtN267vlC468eyLLddxJhdXW1VHHbb3eh3CoLk5fXlMM7/6Pe+9/TzXz86vJfD5gc/PBXxyQ9EWis0Tg+si1nxN9amKGZaVkZXxg4+ONTZkdM4qapdM7a3y+Jweryod8MQuwhtmk/K7/zRj77//ifT6UnzydWXT5/33I0yOhEkMJNqHELfe1cZRTTsui9/8/T5Z1/9P3/8ey+Xtz9/ff3F7XoxnRLL1FVWUfTB1jWR7oYmiGigg2lJVXVxtctjPthbfPK7H336wb316ub51c3Zbvgffvjfrf7h5uX1atvxg8ePe9NP7bRtB+5gv5isdptdY072qpwikEEBjmKQ9vYmzRiXmzZ2MYEWNta4OPSVq8YgkiEPIbLXIO8/ejR//O6Xf/L/R+vqGdTTylha7FdhSKEfJ3W58Xm17jZdu/GsJ4u/vl6/7wbfhDZLNZ0eF9V7BwfddvzJj3/nctu9Obslid9+/HC7uWHGJGyqUs/3nr15OanLq9+8bq52nXOeMSfUWrU+LOYzXSuuJ32bqqKYzyrv/fV6gyhDHwvNABiy5GEwCrWzN9uhKCwDE0HyIQvZcjL2OxGOQ1hMSsqyacN8bh1mOdsp9ZoIBHVV2SHkN7stxDSd2uOjg9Vm56Ge71fdxcYV1ih7fLQ/25sKfHG12UxLdbu+PZ1OpuS+fHrGca/rOYSvA305R6V033pD+PTFWg7qxeFeQ+OyCYeLOYmvrF7U0xc3N4/ffe/1+no7jpc33cGkrA+ribGVrVfr1hVQT6pN27TjmEKsSnu1Xt/bhspN1tvN6GFR1iA4DsP+Yn7T7JyzqnAszN7PwP/hD795c7M8Ojyu9ya//MXzVdtbpcuq8Eh+SMBZ+3T/YPb4/rFftcrnYTdIBlNYI7Qbs8YEoq3monIM6Mc4VWrfuJe7bvSx91AE1A4BiIzdq8qinrxcXfG8fLbcbLZD7n1KAZTOolio74dvfOs9yf3V7Zvp7Gg3RqXtbDJbD5e2xJx8TGbo4/p2vLzeTgv37sOT1+e3r27W7x4frprIMe4tdyf7+8+2QzWZPzyejVkHH1xhztouEK4y5km9XrazYfjr3/yy2e2S6P2T6WG9uLi+PDre2yquJzPYdfcOi7Oz2/OXZ2dnl4+ODj/95vv7+xUW5osnz2e6uL+Yex9nZYWFM8bNKrO8uS3r6e3o//Tzr/6b7//gqydPLnfsCgWojdGvV+t/9R//w3fvH/3kDz5tfnbToP3Fq5tvHu1/9NE789ubej7VKd+cX0z353oMIBAZ45h9isYoZUxVl6sY0NpxvZ0R/Oh3vvMXn/2mLqrN2mOBGKMi6X1ofTB75uRgevb6ZYnKOXCSqOv3dZGQc8IkyMKzojqcVtvR9xwGQ1VtQ4xbP+qEjhQaAkBjnB9yVVSMuGra0rj1Te/fTX3M46C6Xa6Me355MZ0sZtP6+fU6R20yPj651253P/3tk8D56HDejfLy7ForNSCthuH0aDrRth9yx/ry/DY243+yKtps1/vzUxCVxlgC3p6/vHz6m7HrPzzdR9Zx188rN5lWm3YEElM5Fxxas911IgAkoEDUnToBun5EEGABAu8TKkrCP/3FX39w78iiAZRu1x8t9vb37jXbhlECJShM53PhrCZtgEgV7TYdHkzrw1o43d5sS4XRJ8xgnQatju7t315cKJ2Q2+uz17/7/Y9etVuVwe/yq/OlTpxYnCMKKJ2c7h14HydlNd0/WOzZl09eK0OL44MP33vn5z/9zCeGEO8/PO3W2zEnPwxKQ/Rj08jpyQKFx87frrtx7JXSXT+UVQUxHjowTg/bdm9RI6CrZLnebbb96b3DyV7x5vpN+1erdjl+8zsf/fL1m6KwtbNMqu189CmJgMDpw5PZweTq4jZJwihG66p2fszAFHwCRrCgSHHMiOgKaws3jH46m2ldhqGRzD6KLW01qbVWkoCZdVUKk4gYTcT8e3/4w5vV2c9+/duBATVohX4IhigrBgFjFRJoxuTTe59+uOk2BUEcuhyjD5IEDNq9/XLYdKRNizyflcttO62cDGmvqk+Ojp9cnje7QRnl23ZUatkGKi2IkRArNZ1ODkN6hcN4eFi8c3yQWn5zfnsT+lnm+/cO1meramrvnyzWmw6kV2jbrWfliqIuZ3TTtpyhPJhtQ+5vm8PpxGF2jk5PZx/u/N8+u4CcU/KzqkiO0dE6xz54RkGP65yexvbh/ZMPDuZfPX0zJHaTYlYaVDCOAUi1IWKXRLFRZAsz+DFIupsChULIzGyUEkFFoO/sXjkToSIlWb7e+YCICAsDEhAqRaUCi1hqVCSWBAmIISPdRQJpRSiQE2uFGSALIAhnMYpIERARASlAQQHMAAAo//gZABDFOaU1kgYiEEGyCjKMiSlDyoImQwIR1kBGEVXK9xlAAGlILCLEkACzoAGVGAg1IKcM2iCKzgIhCWMeYuzDGFlECEgBA6IIolJ0Z3vLIlmAAUmAQzJOC0CMSWtlteLMkcVqw8xIcjM0Y8x3yiltKDMQgAbMkatCVbVLmTfbETzPq1lV1lu/g5y0opAiwd2KgaymmNNdyrdGHcdYkPnG+8fX1+cvz5vQRWriP/uv/qt//Wf/po+bcYgJ0DhQAEruTFyMBiUIpKwIBLItZbInRQ1IYDRkEaCsGBGAARzIkEY0QgKKIHEmB70kVMAi1gABQAKlQRPQBI4e4+pcQgc+JGdBCDhS9IApK0HpYFiPeFLkzGRUUUMxlfrULJ/H1RuRlhUpLWIs9j7FBIjgwyiC2jqQzImjZK01IrTRi1KgiQl8ztaQCNekNFEYPWhKiT0DCXxteCQFpAvSCkSYQ0rktEXFkpSI74ObOI2xtO5w7gRxz9V3U7B/WPzkf/w0hO3lZTep4uvz1y/Pr3Xi90/sP/+nJ0O6bG9unCl3N+3JycN3D44uJ822SaZQMUnMHBiHLEJEKJLYFi7EBMy2sCD44PFB9mHYdilz6GCp9Dg6YDfff+jpfLVZAonEXDv6xncWP/79+//6X/+sHd1AxW9fn0XZvP/x5P/1Lz7J88n/9r//rS2PQOLnz75sdyOBGda78qjeXyx0CGWhPUsQiaPff7D/5uImZ7XdtNO9uukkBN4u/d7+4nW3S16hLgrN1US3u91ZTM02kNZd632Csp7qnIIfg8cY+Hix2C0HiGIhlxb+8Eef3lys9urTcnq8f3R8+fLl2LT1zNxcXM8mVlCvztZep9F7jXR8ckhEVT0DlhiCYG+0NkqnFEGYCI1SwnfzjmPIhdWVtRCiZCCBzFJVRhckwIZAgI6OyqYPm2EA0MZayKJQZc6ZMyIiIrMQoLFGGF6fnx3Npwut+y7EAJ3f+ZEZtIgQKhBAhTFnRQQInDkLSIbCaoNUaytRsM86xYeLw+livwD54lc/twnM4l6bgMt7/7+//JMXl9eLjx5dbIfb5fLweP/6+vWfvXl2cm/xxz/43V3TP7u4YMzX62ZWGMhSF9XQ9hhzTpxzLB2gVoMfQSttNBFyiGVdaq2jHzmzZEZgBCShkgrFbuy1b3IxOTLTvc2quz47W252CMQpFcYoa6dVLYm7sEWi6WQ2Kuh2rQ/9GJMoQ0Qa7Nj7cRgPajpcTNEPQ9/m8PXZwIdQWieJUHhalZBCHHG9bWFMSghRkrCA+JyVUX5MiTORamNTpqiL4mA29yFvVzv0nkhHHzFC9HmMHhE8hBACsDAjMSCZCdrc+k0aVMlGmYePPjx/fr1dXTCwq3VVFkM/aqVjioo0pyyZS2uVguiTcEYCyTKGngS4hjGnIUafkjFFCjnF7FB5yEIKCW6XN4WxWUFGLIwKIeYEtjRO6+jTEEZgRk7TvRoztK13hZ1MKsiJhLxWmfO66WalsXVZKfXByd5t2292XkSISGkFMWmiKqHkqCLymOe26G0qSMhSl1IYhv3j+d58FsfkQ0zjsO3a7ehD4sW9+7O9gy/Pvlrn8brvnNW8W2bhyhptnFMICvPoj48nppB2F12JXLqhDd1mMEalIL/522d9k4xxu24dc7SlDV0GNhJ10Pjw3tH1+cXVro8BUVFdHTCWyPFgoVNKzhoicBq952a1GdOoynK2mJGRpuutJBRzvRun070JVt329tnFF69W//Dj3/1OOa1Wm42I2JynSl9eXaMuQHTM0vUDrJdhjM5VKYs2pu/DYlLk6FPIopEAN13Q2riiiCEIyd7h3KJE77vRu8E2vZ/v7a822yQqMqDSYRzLUi/KYrPtvEqadLgZkcmR6vrOsTXG7dlpu90AsmUtIgjEnEtD0QfSCgzlnCUxhigUjFYxBjDagBBS+seij6a5+ejePd+4x0tDWdIu3lPDo9m81lhRqVRO0XeXt7gSfbOrJ/q/ffBQMuIsD37AA+05cvCtxJxxUs+9ysKpvndU2OJydatn9XI3AhBuAnR+CnoIHkgKY0JKtrLVtHh6tsxKPnr3Hi+XQwh1ZcZMDz949MXzr/IQX3727L39ukqUkv/wZP/12c2u6dKsPl/G2z4+rGy0xpGEbtA2KlFlado+bJqxHUYlarpXhOQ5eRlDaZ0ra9aTd2cPNmfnL5Z/e28Kx5988ObyxeLx+9fnl7vdzpS2ETmqTIhx7IMzVkgw589+/stpaf/43uFp6V7crjlxbrrEXFuXU/QSBYSTTOfToR+jT5PKfPTg/uZ611wvf9Ounz892zs5PK3m4fpWg9IaSyU3Z6+1KavagaLo8wePH6avOoCkCLKIQZxUDkVyTsOuJcGTvRIABDjEkIhOTvfGvpuWeyH6dtDtdjOZKNtcbl/OduvQt9EZ3JtNRZGwmkxqU1UKoSjRZIbFZNn2Q+SE8OFH77/56jxnuxlHlvT06jIPYfWbFWhXuioK/PzJ0zz2xweTSV3dXq9fra4YTUI8nL33O5/+wb//2X84VJCy3D/dZx8TQ0jw5PWtJGkhXne3oLjpx9R2Y2JEU5TF/nQyO6mJuI/D6fFJu2lEGIDttEwZu7aNmgttZvNJqZEFj09VYtaUYmCJEoXdtL5arlxpFWVT03XbnX/+HEk3Y0jn14gUcuh91JXLORqUorSe84Nv7O3vTcGa+cFxbOvLsxfV5OurgffeGq2dhpQlhmag4/leycpxLDgVh/PYdMvtdjqpzm4uUwpWGztVIYbVDvYntcrROtWMXQbgFFh0PSs9Y9Nylet3Tu4tr5dW6TEkB7Rfl4/2J07D9fW2j3la2vfuzx7ONQ79j7/7sdKFz/jxO6eN5M9eXqy7FoU4Ziv80f2jAqhgYGtGzqYuN1e7ssIEaBHSruGcCwu20MKgAR8cHqxD/upmnaKa1mVVqEfHi83gQasc882232Terbqui5xYgIvSCYMfM2mlnd103QMsHj98+OVlU+/V3cq34/D48XHTLqFNdYbKTY2iYTfYjDmkjx4dN6NnVrFP7P30xD58dPjzV2fD0MPi6MnyRlXON+vt7raYTc5ub7XGw/n+/uLePzz/LTMT0c12+8fvP/71Lz7b6jgavev5YDaNPi9mswR5zOn68ma7bd+5f3K6OPr0g4/mSBDS1WqJLDrH4+MZaqdXbvQsiECqntdJaHu9NnXRbLvFgwMNRUrp6eVF/Uu6vzjYBLmI3eDTi7OLaWHnClabAcDsdiN73g19vzc/ONrfrqOzehhDoxSQXm66wjjKTYr04uwGYDItXaE15KhRzU7qJg40Yv9mM6HZsOoMGLOoJ5PZK9wBMwoBCkdxVcUKOUVTaWfUarczigwpDnlvPgGp+27smmFvWoHk68vrojKAanE4JVA5qcmsvt5sPrq3+L3vvPvrz18lgyfTcrseY5RffvbUETpnlcDR3j63u64ZY8qZ6ZOPHqs03m7X480ymzpkQMT/ZFWEbMykKOYKtP/q75/tVks1bN959/jg/mx10+zt1W07ROF6WgRk3w+YmFG00cnnnEVyDjk7p7XWGqHvw3TqQuaMmYiZYoacynz6/snTr147pVnnJoyjT8bpyczGJPN9u386v77YWcB6VsUorjAhJwScLibTeXn15iaGcHx/sd525+dvDCNm6ZsumfzF8+dd05wezo/eOT2MaVzuvE+GVH2v/Oi7722v29X5el67XbsdupRiVIWWCC9e3tT1DNJAPI4pBIXDGAyaMSatSU90J3loRhLuuiApE8lmNxilUFFhbBgCoADIth3ztiGkWVU23TCqYADbQRLRcnP7rQ8fP/jg/k//9G+Wl7eLaTGG4aNvfXB1vUIfIfLepCJIfdelxKkUBtREZeWs08aq0EdjTD2vfOS+88LY7PpqSsFnQmWc5Rjn85qQ0pAwZeIsALPpLPQDatFOFYsjSiIx3PmCmDmjIFAUgSwGlTJqOnNDd5Ga4eKs7dsxhwjKKaQYRh+0KU1W0LRhGEOlrR8yJYkC66Gd1JYwHOxNG/S3QwMkaRjJ5f2H1b0/fGQPD9rfDocOr24bZWdDSJnswOCX3jkAq3dRFBRtGLs+Vs4FH6iG08fHb37xQqXISZTVfhiDD7OCPnjniPZKVua9eu/dz85XXQeAKUfnXMip770ITKauGYZR0hWkvN6pTTNmDgIppeW6Nc4AZ1QqMPftMJ3VYvXovRZo/dfXg0IZIWJUY8hGKSRkYoUowgIREtxtEwBFUDgxAokwImv6OkOusATMmlAAWEQhAyCSIAApyMycGe+cZYikkJEFGEiAWDHz164zZADhO9ObIKBRpI0yFkmDABNSEg7CKWZgSRnRg7IshGAoM5NSugTfJoAkyFFI4l3bGmUGQp1ZFAIihhw5S8gcUwo5Z4Fwl5EkgIR4F7UtLJlRRClKICzMWQgxYw4xK00AklIixZyYEH1KChVpZErMgIJaEylUBChiDVWlG3ddXTrtKI65G8c+paKqgABAck4ZRBJkJSiAgMZivqv8VaxLGPKwSnTwzvEHg/3sF79alt3sdz76vz36yb/8X/6N98Mo4DIU+6AMJAZUEjqACAbFVXlxaKZHFikYGzMIAGQGo4G1pABNAz5DcowOsAQgUAUkAGNBFyACkgAVWAvCQARKYDah8j6g2O3tEDqICTKwMAtDStBtoQtpasPewuYsIlQYe3yyOJnS2aQb3wzbm5EBzMSBorH1maEoQVAQkEARIYAICyrimEAYBBSSYqYotqRxGMQoAeDEgAIkRBRZBAFBlCbkyASoxVTUB4+E1irUMJsbNcmzRTE9soak0tPh6msbZuD48vK86brV0n/1Yvn6zY3iOCnp/VPeP4jLnpotzw7mRw/ff/b56uJZX4noqWpD9OOYGRF0lgQZtVYpS/jH5QVp1kaj5tXVVb8bFdrD0/3lauy3XeH0F/6F+DGGaI32vlHWvvOdb//y7GbX5+VNk0wSlJ+fb5745nXTfOf7j//5f/2Hk8XeX/67vztbrhWBIdFTsCWEHJSwAhaC+rBMkjZN23WjAGnLrKH346Le78ewebn2AxKhSGROu21PyJzCbheKwprCmtq0YYidtwBKKWYotKunta74028/+p/+3//iV3/12fvvgR3D3/7NF9dnz0LXkaZlk4YQelQhQpP7sQ2qspz9VIcHp6U9QFtp65mNEAkZkcQSExmFBByFFAEkrbLVqJ2r50UYImfRVhcEtcLpxPhxUFpliaQEYiYGpxQT5AxEFGISEYVEGpgzcOaUEuK26+69u5iqkgFfveiysDIgIiAASHd/OVmS0RoACkPWmLI0RqCclZWzdsFHe+WH3/5Q2/ri1WV7vakKUz+ann3x4v/zb37qtzuPNg0xSvzdf/KN7W2T+vHFxWq53b18fs7KOFds1q1jGbpekbGVMWUJqdWaQJPnHQuT1gySUq4KkwwBQuKcALTVyuphNxqgBIlJJofz3bh+8OC+csWQ+cmvPvfrNvkRSRmj1UQfnB6UZc2pT12DQE0KfdvlmFLyYRzuqgOYMwJbQ2RU1jCvZtvbse/912cD0Z69tThEv41DMdnf7Nbr1ZaySkiA2DUBEJGIfBZGRPKdT332nKcnarNrum6Zm8hDKOvaxxRCyvnrRgKRbK0OMQNAWbkUYozJOtf2vrvt9vZn2/Q89l4RIoiA9HFk5ODHFJiJlVJKQUopZ1AIoLAfhpzEoAKrBsqKRRRWdZEJTZaUWBj2FtNd5yVlU5L3iYFSzkVtlCGOqapN3/WzukDUEtgUetUNs6owk5IQGeLByR44E6+2Q58yxzAkqYxAdmSOprVFHbOEmJklisw03tuf7U1UZWnb9u+8f2z7us3no8QcPEAeJdFUFfNifTG8fnXR9t2QODNcbC68C9fb3qcoSSIHzuHxNx+eHp88+/I8r1pO2VoiyRzFaQUhZmCtEATdxCSVdushdoIFiEBM2fiYRp7V5ThGdPjV69ddO8QuTopybP18z833y6GX1XKjSXUwZM45pboqFZERUgqWmy0i5MQx5XHcGWO1rQebskER56bl0+Xt+sn25HgReh9jJGUKV6csRishaocxQeyaYJ0jlUGyLakdWzRF4rxe9lrZybwyzrY7r0kXNU0P9Pa2mS9cN3hrUheHNnAG0SWllRfJXBd9iD6Obdu7olJKde0IGVChcykDAQ2RGkg+h1w6c/dESpy/NsAnYAAijZE5pcy5rKwQscoMkAOHrwvQACbzdd8eT+y9ubl3snhw/+DqyauFyrEbF8fFxO31y+bN682kgGoytRaKqUz291jb4IFjnB4ejLs1imTO1mhG8qOPPqvCffOHn17cdu+xI6u2XVpuezObdQBPn78aUl61w8VqWzB/8P59Evzkm+8WL3V+cSsBHOTXf//brCx6rwWUDm+6m1ltNqHdNePQpbQZlm3aMitCksyhzYkbzwhq223vrP0hYgqZZ4VBUILTgoLvbOmqSn315X9MwSc/OqMyU5T86vUVZ6gms8EHSkkgT0unQEREELsubcbUAh+krcRxj1AKssRaQ+AIciflM2NIPStTud6PAryNzYj+5atmvqhUUXZd2Fy/Wl5d708LrdXQ9in7w/tl4hzGWJZljjJVNRsQyYYUEta1DSnLmG1phz4YhcMYtdFWM6dx6DEOYVa5yZ51Rh4c1ENoJjN89vTL2CWnFUtmyMroyXQybIeqrBcHs74PcRQyWGqz2m5OJ+WTV6so6ujBwfDyAqy6G9N+l0AnbcdNEwQoxdQM2dg2C/sgzHDwge3i5pfP+2IyVYY2m/71q6XVmoztQyhriyhtNyiD3RBcbZyyBUjOYBU2u02/2wEIg2x3raQEwJI5xBBjdtYUlev7FnMws6ofw141DV2MKPcenNR1OXTD7apRSnVt32zbyLmwhgH6MWQB0jqlxChB2ETimFiJqYxWzGP75S8vim99lKwd4u5wVkTz9dlAsg7grdI5BWetILy8vT0s7e16i5W2k5qgVIkTYrvrgVFi5pg0agJo+6Hxqa4Ko1Tf9RyDBkbmJDir6Xz5fESmRGQhxzjmrIBevBmKWfXw/jwGP6nw+7//MKyDRj2IWV5tQ8pSOUUAOcmQtFGlkr2qePj4GJPcXK+6JnR+DMx1QTmHcUzAhkLo2x3q40RQ25IBHaCxVREp5SCK6+kk6KSNGf3YbHepsDe7Xfw6ewH6JEkJEwiknPjevfrbv/ewBDzkw5sR/OgJBklpf2/vw289lmHoOwxZcoSpdnHXJ0FTFcvz26Iqp062u3EZti4Xj987XezvGdBPv3peOSdGzWa1rnXbhu1NZ/f3znYXqIuDWm+GcX7vqCnj7P1jCl2/63bbXXa46xJnKGeuUDpy6Hfdr1dfPX/18vho/4//s+9VRJ+9ONPIHLkfR3FZa9qflG7squrgp3/3mwTmvXff8b6pSBTEOPQoVNTms6dvHpweDjHWBcY4vtmuMMnNwTT4XJTO1E4v14bUwOGy2wUETTqBJMlaI2buU/7gux/fTnLUzg9RawCDOUXW/MmP38dKbm9ueNN3Y7z/6P44jrFX20zMokiyZIZczxy61PtECrIG33mIUURpVKxUDDFnUUpVtQPNRAr64EcinU8f7DVx+MFPvvuzv/mH9cVw4+xxVVpjrdUQxAAk4iR+vfR783pxOIdDd/lmk0IqncqtDzBGzctmACDfNdLzdP6PG9O3vOUtb3nLW97ylre85S1vectb3vKWt7zlLW95y1ve8pa3vOUtb3nLW97ylre85S1v+U/4vwAXh2gItVZatQAAAABJRU5ErkJggg==\n" + }, + "metadata": {}, + "execution_count": 6 + } + ] + }, + { + "cell_type": "code", + "source": [ + "" + ], + "metadata": { + "id": "92QkRfm0e6K0" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/stable-diffusion/scripts/sample_diffusion.py b/stable-diffusion/scripts/sample_diffusion.py new file mode 100644 index 0000000..876fe3c --- /dev/null +++ b/stable-diffusion/scripts/sample_diffusion.py @@ -0,0 +1,313 @@ +import argparse, os, sys, glob, datetime, yaml +import torch +import time +import numpy as np +from tqdm import trange + +from omegaconf import OmegaConf +from PIL import Image + +from ldm.models.diffusion.ddim import DDIMSampler +from ldm.util import instantiate_from_config + +rescale = lambda x: (x + 1.) / 2. + +def custom_to_pil(x): + x = x.detach().cpu() + x = torch.clamp(x, -1., 1.) + x = (x + 1.) / 2. + x = x.permute(1, 2, 0).numpy() + x = (255 * x).astype(np.uint8) + x = Image.fromarray(x) + if not x.mode == "RGB": + x = x.convert("RGB") + return x + + +def custom_to_np(x): + # saves the batch in adm style as in https://github.com/openai/guided-diffusion/blob/main/scripts/image_sample.py + sample = x.detach().cpu() + sample = ((sample + 1) * 127.5).clamp(0, 255).to(torch.uint8) + sample = sample.permute(0, 2, 3, 1) + sample = sample.contiguous() + return sample + + +def logs2pil(logs, keys=["sample"]): + imgs = dict() + for k in logs: + try: + if len(logs[k].shape) == 4: + img = custom_to_pil(logs[k][0, ...]) + elif len(logs[k].shape) == 3: + img = custom_to_pil(logs[k]) + else: + print(f"Unknown format for key {k}. ") + img = None + except: + img = None + imgs[k] = img + return imgs + + +@torch.no_grad() +def convsample(model, shape, return_intermediates=True, + verbose=True, + make_prog_row=False): + + + if not make_prog_row: + return model.p_sample_loop(None, shape, + return_intermediates=return_intermediates, verbose=verbose) + else: + return model.progressive_denoising( + None, shape, verbose=True + ) + + +@torch.no_grad() +def convsample_ddim(model, steps, shape, eta=1.0 + ): + ddim = DDIMSampler(model) + bs = shape[0] + shape = shape[1:] + samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, eta=eta, verbose=False,) + return samples, intermediates + + +@torch.no_grad() +def make_convolutional_sample(model, batch_size, vanilla=False, custom_steps=None, eta=1.0,): + + + log = dict() + + shape = [batch_size, + model.model.diffusion_model.in_channels, + model.model.diffusion_model.image_size, + model.model.diffusion_model.image_size] + + with model.ema_scope("Plotting"): + t0 = time.time() + if vanilla: + sample, progrow = convsample(model, shape, + make_prog_row=True) + else: + sample, intermediates = convsample_ddim(model, steps=custom_steps, shape=shape, + eta=eta) + + t1 = time.time() + + x_sample = model.decode_first_stage(sample) + + log["sample"] = x_sample + log["time"] = t1 - t0 + log['throughput'] = sample.shape[0] / (t1 - t0) + print(f'Throughput for this batch: {log["throughput"]}') + return log + +def run(model, logdir, batch_size=50, vanilla=False, custom_steps=None, eta=None, n_samples=50000, nplog=None): + if vanilla: + print(f'Using Vanilla DDPM sampling with {model.num_timesteps} sampling steps.') + else: + print(f'Using DDIM sampling with {custom_steps} sampling steps and eta={eta}') + + + tstart = time.time() + n_saved = len(glob.glob(os.path.join(logdir,'*.png')))-1 + # path = logdir + if model.cond_stage_model is None: + all_images = [] + + print(f"Running unconditional sampling for {n_samples} samples") + for _ in trange(n_samples // batch_size, desc="Sampling Batches (unconditional)"): + logs = make_convolutional_sample(model, batch_size=batch_size, + vanilla=vanilla, custom_steps=custom_steps, + eta=eta) + n_saved = save_logs(logs, logdir, n_saved=n_saved, key="sample") + all_images.extend([custom_to_np(logs["sample"])]) + if n_saved >= n_samples: + print(f'Finish after generating {n_saved} samples') + break + all_img = np.concatenate(all_images, axis=0) + all_img = all_img[:n_samples] + shape_str = "x".join([str(x) for x in all_img.shape]) + nppath = os.path.join(nplog, f"{shape_str}-samples.npz") + np.savez(nppath, all_img) + + else: + raise NotImplementedError('Currently only sampling for unconditional models supported.') + + print(f"sampling of {n_saved} images finished in {(time.time() - tstart) / 60.:.2f} minutes.") + + +def save_logs(logs, path, n_saved=0, key="sample", np_path=None): + for k in logs: + if k == key: + batch = logs[key] + if np_path is None: + for x in batch: + img = custom_to_pil(x) + imgpath = os.path.join(path, f"{key}_{n_saved:06}.png") + img.save(imgpath) + n_saved += 1 + else: + npbatch = custom_to_np(batch) + shape_str = "x".join([str(x) for x in npbatch.shape]) + nppath = os.path.join(np_path, f"{n_saved}-{shape_str}-samples.npz") + np.savez(nppath, npbatch) + n_saved += npbatch.shape[0] + return n_saved + + +def get_parser(): + parser = argparse.ArgumentParser() + parser.add_argument( + "-r", + "--resume", + type=str, + nargs="?", + help="load from logdir or checkpoint in logdir", + ) + parser.add_argument( + "-n", + "--n_samples", + type=int, + nargs="?", + help="number of samples to draw", + default=50000 + ) + parser.add_argument( + "-e", + "--eta", + type=float, + nargs="?", + help="eta for ddim sampling (0.0 yields deterministic sampling)", + default=1.0 + ) + parser.add_argument( + "-v", + "--vanilla_sample", + default=False, + action='store_true', + help="vanilla sampling (default option is DDIM sampling)?", + ) + parser.add_argument( + "-l", + "--logdir", + type=str, + nargs="?", + help="extra logdir", + default="none" + ) + parser.add_argument( + "-c", + "--custom_steps", + type=int, + nargs="?", + help="number of steps for ddim and fastdpm sampling", + default=50 + ) + parser.add_argument( + "--batch_size", + type=int, + nargs="?", + help="the bs", + default=10 + ) + return parser + + +def load_model_from_config(config, sd): + model = instantiate_from_config(config) + model.load_state_dict(sd,strict=False) + model.cuda() + model.eval() + return model + + +def load_model(config, ckpt, gpu, eval_mode): + if ckpt: + print(f"Loading model from {ckpt}") + pl_sd = torch.load(ckpt, map_location="cpu") + global_step = pl_sd["global_step"] + else: + pl_sd = {"state_dict": None} + global_step = None + model = load_model_from_config(config.model, + pl_sd["state_dict"]) + + return model, global_step + + +if __name__ == "__main__": + now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") + sys.path.append(os.getcwd()) + command = " ".join(sys.argv) + + parser = get_parser() + opt, unknown = parser.parse_known_args() + ckpt = None + + if not os.path.exists(opt.resume): + raise ValueError("Cannot find {}".format(opt.resume)) + if os.path.isfile(opt.resume): + # paths = opt.resume.split("/") + try: + logdir = '/'.join(opt.resume.split('/')[:-1]) + # idx = len(paths)-paths[::-1].index("logs")+1 + print(f'Logdir is {logdir}') + except ValueError: + paths = opt.resume.split("/") + idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt + logdir = "/".join(paths[:idx]) + ckpt = opt.resume + else: + assert os.path.isdir(opt.resume), f"{opt.resume} is not a directory" + logdir = opt.resume.rstrip("/") + ckpt = os.path.join(logdir, "model.ckpt") + + base_configs = sorted(glob.glob(os.path.join(logdir, "config.yaml"))) + opt.base = base_configs + + configs = [OmegaConf.load(cfg) for cfg in opt.base] + cli = OmegaConf.from_dotlist(unknown) + config = OmegaConf.merge(*configs, cli) + + gpu = True + eval_mode = True + + if opt.logdir != "none": + locallog = logdir.split(os.sep)[-1] + if locallog == "": locallog = logdir.split(os.sep)[-2] + print(f"Switching logdir from '{logdir}' to '{os.path.join(opt.logdir, locallog)}'") + logdir = os.path.join(opt.logdir, locallog) + + print(config) + + model, global_step = load_model(config, ckpt, gpu, eval_mode) + print(f"global step: {global_step}") + print(75 * "=") + print("logging to:") + logdir = os.path.join(logdir, "samples", f"{global_step:08}", now) + imglogdir = os.path.join(logdir, "img") + numpylogdir = os.path.join(logdir, "numpy") + + os.makedirs(imglogdir) + os.makedirs(numpylogdir) + print(logdir) + print(75 * "=") + + # write config out + sampling_file = os.path.join(logdir, "sampling_config.yaml") + sampling_conf = vars(opt) + + with open(sampling_file, 'w') as f: + yaml.dump(sampling_conf, f, default_flow_style=False) + print(sampling_conf) + + + run(model, imglogdir, eta=opt.eta, + vanilla=opt.vanilla_sample, n_samples=opt.n_samples, custom_steps=opt.custom_steps, + batch_size=opt.batch_size, nplog=numpylogdir) + + print("done.") diff --git a/stable-diffusion/scripts/tests/test_watermark.py b/stable-diffusion/scripts/tests/test_watermark.py new file mode 100644 index 0000000..f93f8a6 --- /dev/null +++ b/stable-diffusion/scripts/tests/test_watermark.py @@ -0,0 +1,18 @@ +import cv2 +import fire +from imwatermark import WatermarkDecoder + + +def testit(img_path): + bgr = cv2.imread(img_path) + decoder = WatermarkDecoder('bytes', 136) + watermark = decoder.decode(bgr, 'dwtDct') + try: + dec = watermark.decode('utf-8') + except: + dec = "null" + print(dec) + + +if __name__ == "__main__": + fire.Fire(testit) \ No newline at end of file diff --git a/stable-diffusion/scripts/train_searcher.py b/stable-diffusion/scripts/train_searcher.py new file mode 100644 index 0000000..1e79048 --- /dev/null +++ b/stable-diffusion/scripts/train_searcher.py @@ -0,0 +1,147 @@ +import os, sys +import numpy as np +import scann +import argparse +import glob +from multiprocessing import cpu_count +from tqdm import tqdm + +from ldm.util import parallel_data_prefetch + + +def search_bruteforce(searcher): + return searcher.score_brute_force().build() + + +def search_partioned_ah(searcher, dims_per_block, aiq_threshold, reorder_k, + partioning_trainsize, num_leaves, num_leaves_to_search): + return searcher.tree(num_leaves=num_leaves, + num_leaves_to_search=num_leaves_to_search, + training_sample_size=partioning_trainsize). \ + score_ah(dims_per_block, anisotropic_quantization_threshold=aiq_threshold).reorder(reorder_k).build() + + +def search_ah(searcher, dims_per_block, aiq_threshold, reorder_k): + return searcher.score_ah(dims_per_block, anisotropic_quantization_threshold=aiq_threshold).reorder( + reorder_k).build() + +def load_datapool(dpath): + + + def load_single_file(saved_embeddings): + compressed = np.load(saved_embeddings) + database = {key: compressed[key] for key in compressed.files} + return database + + def load_multi_files(data_archive): + database = {key: [] for key in data_archive[0].files} + for d in tqdm(data_archive, desc=f'Loading datapool from {len(data_archive)} individual files.'): + for key in d.files: + database[key].append(d[key]) + + return database + + print(f'Load saved patch embedding from "{dpath}"') + file_content = glob.glob(os.path.join(dpath, '*.npz')) + + if len(file_content) == 1: + data_pool = load_single_file(file_content[0]) + elif len(file_content) > 1: + data = [np.load(f) for f in file_content] + prefetched_data = parallel_data_prefetch(load_multi_files, data, + n_proc=min(len(data), cpu_count()), target_data_type='dict') + + data_pool = {key: np.concatenate([od[key] for od in prefetched_data], axis=1)[0] for key in prefetched_data[0].keys()} + else: + raise ValueError(f'No npz-files in specified path "{dpath}" is this directory existing?') + + print(f'Finished loading of retrieval database of length {data_pool["embedding"].shape[0]}.') + return data_pool + + +def train_searcher(opt, + metric='dot_product', + partioning_trainsize=None, + reorder_k=None, + # todo tune + aiq_thld=0.2, + dims_per_block=2, + num_leaves=None, + num_leaves_to_search=None,): + + data_pool = load_datapool(opt.database) + k = opt.knn + + if not reorder_k: + reorder_k = 2 * k + + # normalize + # embeddings = + searcher = scann.scann_ops_pybind.builder(data_pool['embedding'] / np.linalg.norm(data_pool['embedding'], axis=1)[:, np.newaxis], k, metric) + pool_size = data_pool['embedding'].shape[0] + + print(*(['#'] * 100)) + print('Initializing scaNN searcher with the following values:') + print(f'k: {k}') + print(f'metric: {metric}') + print(f'reorder_k: {reorder_k}') + print(f'anisotropic_quantization_threshold: {aiq_thld}') + print(f'dims_per_block: {dims_per_block}') + print(*(['#'] * 100)) + print('Start training searcher....') + print(f'N samples in pool is {pool_size}') + + # this reflects the recommended design choices proposed at + # https://github.com/google-research/google-research/blob/aca5f2e44e301af172590bb8e65711f0c9ee0cfd/scann/docs/algorithms.md + if pool_size < 2e4: + print('Using brute force search.') + searcher = search_bruteforce(searcher) + elif 2e4 <= pool_size and pool_size < 1e5: + print('Using asymmetric hashing search and reordering.') + searcher = search_ah(searcher, dims_per_block, aiq_thld, reorder_k) + else: + print('Using using partioning, asymmetric hashing search and reordering.') + + if not partioning_trainsize: + partioning_trainsize = data_pool['embedding'].shape[0] // 10 + if not num_leaves: + num_leaves = int(np.sqrt(pool_size)) + + if not num_leaves_to_search: + num_leaves_to_search = max(num_leaves // 20, 1) + + print('Partitioning params:') + print(f'num_leaves: {num_leaves}') + print(f'num_leaves_to_search: {num_leaves_to_search}') + # self.searcher = self.search_ah(searcher, dims_per_block, aiq_thld, reorder_k) + searcher = search_partioned_ah(searcher, dims_per_block, aiq_thld, reorder_k, + partioning_trainsize, num_leaves, num_leaves_to_search) + + print('Finish training searcher') + searcher_savedir = opt.target_path + os.makedirs(searcher_savedir, exist_ok=True) + searcher.serialize(searcher_savedir) + print(f'Saved trained searcher under "{searcher_savedir}"') + +if __name__ == '__main__': + sys.path.append(os.getcwd()) + parser = argparse.ArgumentParser() + parser.add_argument('--database', + '-d', + default='data/rdm/retrieval_databases/openimages', + type=str, + help='path to folder containing the clip feature of the database') + parser.add_argument('--target_path', + '-t', + default='data/rdm/searchers/openimages', + type=str, + help='path to the target folder where the searcher shall be stored.') + parser.add_argument('--knn', + '-k', + default=20, + type=int, + help='number of nearest neighbors, for which the searcher shall be optimized') + + opt, _ = parser.parse_known_args() + + train_searcher(opt,) \ No newline at end of file diff --git a/stable-diffusion/scripts/txt2img.py b/stable-diffusion/scripts/txt2img.py new file mode 100644 index 0000000..bc38640 --- /dev/null +++ b/stable-diffusion/scripts/txt2img.py @@ -0,0 +1,352 @@ +import argparse, os, sys, glob +import cv2 +import torch +import numpy as np +from omegaconf import OmegaConf +from PIL import Image +from tqdm import tqdm, trange +from imwatermark import WatermarkEncoder +from itertools import islice +from einops import rearrange +from torchvision.utils import make_grid +import time +from pytorch_lightning import seed_everything +from torch import autocast +from contextlib import contextmanager, nullcontext + +from ldm.util import instantiate_from_config +from ldm.models.diffusion.ddim import DDIMSampler +from ldm.models.diffusion.plms import PLMSSampler +from ldm.models.diffusion.dpm_solver import DPMSolverSampler + +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from transformers import AutoFeatureExtractor + + +# load safety model +safety_model_id = "CompVis/stable-diffusion-safety-checker" +safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id) +safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id) + + +def chunk(it, size): + it = iter(it) + return iter(lambda: tuple(islice(it, size)), ()) + + +def numpy_to_pil(images): + """ + Convert a numpy image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images = (images * 255).round().astype("uint8") + pil_images = [Image.fromarray(image) for image in images] + + return pil_images + + +def load_model_from_config(config, ckpt, verbose=False): + print(f"Loading model from {ckpt}") + pl_sd = torch.load(ckpt, map_location="cpu") + if "global_step" in pl_sd: + print(f"Global Step: {pl_sd['global_step']}") + sd = pl_sd["state_dict"] + model = instantiate_from_config(config.model) + m, u = model.load_state_dict(sd, strict=False) + if len(m) > 0 and verbose: + print("missing keys:") + print(m) + if len(u) > 0 and verbose: + print("unexpected keys:") + print(u) + + model.cuda() + model.eval() + return model + + +def put_watermark(img, wm_encoder=None): + if wm_encoder is not None: + img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) + img = wm_encoder.encode(img, 'dwtDct') + img = Image.fromarray(img[:, :, ::-1]) + return img + + +def load_replacement(x): + try: + hwc = x.shape + y = Image.open("assets/rick.jpeg").convert("RGB").resize((hwc[1], hwc[0])) + y = (np.array(y)/255.0).astype(x.dtype) + assert y.shape == x.shape + return y + except Exception: + return x + + +def check_safety(x_image): + safety_checker_input = safety_feature_extractor(numpy_to_pil(x_image), return_tensors="pt") + x_checked_image, has_nsfw_concept = safety_checker(images=x_image, clip_input=safety_checker_input.pixel_values) + assert x_checked_image.shape[0] == len(has_nsfw_concept) + for i in range(len(has_nsfw_concept)): + if has_nsfw_concept[i]: + x_checked_image[i] = load_replacement(x_checked_image[i]) + return x_checked_image, has_nsfw_concept + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--prompt", + type=str, + nargs="?", + default="a painting of a virus monster playing guitar", + help="the prompt to render" + ) + parser.add_argument( + "--outdir", + type=str, + nargs="?", + help="dir to write results to", + default="outputs/txt2img-samples" + ) + parser.add_argument( + "--skip_grid", + action='store_true', + help="do not save a grid, only individual samples. Helpful when evaluating lots of samples", + ) + parser.add_argument( + "--skip_save", + action='store_true', + help="do not save individual samples. For speed measurements.", + ) + parser.add_argument( + "--ddim_steps", + type=int, + default=50, + help="number of ddim sampling steps", + ) + parser.add_argument( + "--plms", + action='store_true', + help="use plms sampling", + ) + parser.add_argument( + "--dpm_solver", + action='store_true', + help="use dpm_solver sampling", + ) + parser.add_argument( + "--laion400m", + action='store_true', + help="uses the LAION400M model", + ) + parser.add_argument( + "--fixed_code", + action='store_true', + help="if enabled, uses the same starting code across samples ", + ) + parser.add_argument( + "--ddim_eta", + type=float, + default=0.0, + help="ddim eta (eta=0.0 corresponds to deterministic sampling", + ) + parser.add_argument( + "--n_iter", + type=int, + default=2, + help="sample this often", + ) + parser.add_argument( + "--H", + type=int, + default=512, + help="image height, in pixel space", + ) + parser.add_argument( + "--W", + type=int, + default=512, + help="image width, in pixel space", + ) + parser.add_argument( + "--C", + type=int, + default=4, + help="latent channels", + ) + parser.add_argument( + "--f", + type=int, + default=8, + help="downsampling factor", + ) + parser.add_argument( + "--n_samples", + type=int, + default=3, + help="how many samples to produce for each given prompt. A.k.a. batch size", + ) + parser.add_argument( + "--n_rows", + type=int, + default=0, + help="rows in the grid (default: n_samples)", + ) + parser.add_argument( + "--scale", + type=float, + default=7.5, + help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", + ) + parser.add_argument( + "--from-file", + type=str, + help="if specified, load prompts from this file", + ) + parser.add_argument( + "--config", + type=str, + default="configs/stable-diffusion/v1-inference.yaml", + help="path to config which constructs model", + ) + parser.add_argument( + "--ckpt", + type=str, + default="models/ldm/stable-diffusion-v1/model.ckpt", + help="path to checkpoint of model", + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="the seed (for reproducible sampling)", + ) + parser.add_argument( + "--precision", + type=str, + help="evaluate at this precision", + choices=["full", "autocast"], + default="autocast" + ) + opt = parser.parse_args() + + if opt.laion400m: + print("Falling back to LAION 400M model...") + opt.config = "configs/latent-diffusion/txt2img-1p4B-eval.yaml" + opt.ckpt = "models/ldm/text2img-large/model.ckpt" + opt.outdir = "outputs/txt2img-samples-laion400m" + + seed_everything(opt.seed) + + config = OmegaConf.load(f"{opt.config}") + model = load_model_from_config(config, f"{opt.ckpt}") + + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + model = model.to(device) + + if opt.dpm_solver: + sampler = DPMSolverSampler(model) + elif opt.plms: + sampler = PLMSSampler(model) + else: + sampler = DDIMSampler(model) + + os.makedirs(opt.outdir, exist_ok=True) + outpath = opt.outdir + + print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...") + wm = "StableDiffusionV1" + wm_encoder = WatermarkEncoder() + wm_encoder.set_watermark('bytes', wm.encode('utf-8')) + + batch_size = opt.n_samples + n_rows = opt.n_rows if opt.n_rows > 0 else batch_size + if not opt.from_file: + prompt = opt.prompt + assert prompt is not None + data = [batch_size * [prompt]] + + else: + print(f"reading prompts from {opt.from_file}") + with open(opt.from_file, "r") as f: + data = f.read().splitlines() + data = list(chunk(data, batch_size)) + + sample_path = os.path.join(outpath, "samples") + os.makedirs(sample_path, exist_ok=True) + base_count = len(os.listdir(sample_path)) + grid_count = len(os.listdir(outpath)) - 1 + + start_code = None + if opt.fixed_code: + start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) + + precision_scope = autocast if opt.precision=="autocast" else nullcontext + with torch.no_grad(): + with precision_scope("cuda"): + with model.ema_scope(): + tic = time.time() + all_samples = list() + for n in trange(opt.n_iter, desc="Sampling"): + for prompts in tqdm(data, desc="data"): + uc = None + if opt.scale != 1.0: + uc = model.get_learned_conditioning(batch_size * [""]) + if isinstance(prompts, tuple): + prompts = list(prompts) + c = model.get_learned_conditioning(prompts) + shape = [opt.C, opt.H // opt.f, opt.W // opt.f] + samples_ddim, _ = sampler.sample(S=opt.ddim_steps, + conditioning=c, + batch_size=opt.n_samples, + shape=shape, + verbose=False, + unconditional_guidance_scale=opt.scale, + unconditional_conditioning=uc, + eta=opt.ddim_eta, + x_T=start_code) + + x_samples_ddim = model.decode_first_stage(samples_ddim) + x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) + x_samples_ddim = x_samples_ddim.cpu().permute(0, 2, 3, 1).numpy() + + x_checked_image, has_nsfw_concept = check_safety(x_samples_ddim) + + x_checked_image_torch = torch.from_numpy(x_checked_image).permute(0, 3, 1, 2) + + if not opt.skip_save: + for x_sample in x_checked_image_torch: + x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') + img = Image.fromarray(x_sample.astype(np.uint8)) + img = put_watermark(img, wm_encoder) + img.save(os.path.join(sample_path, f"{base_count:05}.png")) + base_count += 1 + + if not opt.skip_grid: + all_samples.append(x_checked_image_torch) + + if not opt.skip_grid: + # additionally, save as grid + grid = torch.stack(all_samples, 0) + grid = rearrange(grid, 'n b c h w -> (n b) c h w') + grid = make_grid(grid, nrow=n_rows) + + # to image + grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() + img = Image.fromarray(grid.astype(np.uint8)) + img = put_watermark(img, wm_encoder) + img.save(os.path.join(outpath, f'grid-{grid_count:04}.png')) + grid_count += 1 + + toc = time.time() + + print(f"Your samples are ready and waiting for you here: \n{outpath} \n" + f" \nEnjoy.") + + +if __name__ == "__main__": + main() diff --git a/stable-diffusion/scripts/txt2realistic_human.py b/stable-diffusion/scripts/txt2realistic_human.py new file mode 100644 index 0000000..437c065 --- /dev/null +++ b/stable-diffusion/scripts/txt2realistic_human.py @@ -0,0 +1,347 @@ +import argparse, os, sys, glob +import cv2 +import torch +import numpy as np +from omegaconf import OmegaConf +from PIL import Image +from tqdm import tqdm, trange +from itertools import islice +from einops import rearrange +from torchvision.utils import make_grid +import time +from pytorch_lightning import seed_everything +from torch import autocast +from contextlib import contextmanager, nullcontext + +from ldm.util import instantiate_from_config +from ldm.models.diffusion.ddim import DDIMSampler +from ldm.models.diffusion.plms import PLMSSampler +from ldm.models.diffusion.dpm_solver import DPMSolverSampler + +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from transformers import AutoFeatureExtractor + + +# load safety model +safety_model_id = "CompVis/stable-diffusion-safety-checker" +safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id) +safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id) + + +def chunk(it, size): + it = iter(it) + return iter(lambda: tuple(islice(it, size)), ()) + + +def numpy_to_pil(images): + """ + Convert a numpy image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images = (images * 255).round().astype("uint8") + pil_images = [Image.fromarray(image) for image in images] + + return pil_images + + +def load_model_from_config(config, ckpt, verbose=False): + print(f"Loading model from {ckpt}") + pl_sd = torch.load(ckpt, map_location="cpu") + if "global_step" in pl_sd: + print(f"Global Step: {pl_sd['global_step']}") + sd = pl_sd["state_dict"] + model = instantiate_from_config(config.model) + m, u = model.load_state_dict(sd, strict=False) + if len(m) > 0 and verbose: + print("missing keys:") + print(m) + if len(u) > 0 and verbose: + print("unexpected keys:") + print(u) + + model.cuda() + model.eval() + return model + + +def put_watermark(img, wm_encoder=None): + if wm_encoder is not None: + img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) + img = wm_encoder.encode(img, 'dwtDct') + img = Image.fromarray(img[:, :, ::-1]) + return img + + +def load_replacement(x): + try: + hwc = x.shape + y = Image.open("assets/rick.jpeg").convert("RGB").resize((hwc[1], hwc[0])) + y = (np.array(y)/255.0).astype(x.dtype) + assert y.shape == x.shape + return y + except Exception: + return x + + +def check_safety(x_image): + safety_checker_input = safety_feature_extractor(numpy_to_pil(x_image), return_tensors="pt") + x_checked_image, has_nsfw_concept = safety_checker(images=x_image, clip_input=safety_checker_input.pixel_values) + assert x_checked_image.shape[0] == len(has_nsfw_concept) + for i in range(len(has_nsfw_concept)): + if has_nsfw_concept[i]: + x_checked_image[i] = load_replacement(x_checked_image[i]) + return x_checked_image, has_nsfw_concept + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--prompt", + type=str, + nargs="?", + default="a painting of a virus monster playing guitar", + help="the prompt to render" + ) + parser.add_argument( + "--outdir", + type=str, + nargs="?", + help="dir to write results to", + default="outputs/txt2img-samples" + ) + parser.add_argument( + "--skip_grid", + action='store_true', + help="do not save a grid, only individual samples. Helpful when evaluating lots of samples", + ) + parser.add_argument( + "--skip_save", + action='store_true', + help="do not save individual samples. For speed measurements.", + ) + parser.add_argument( + "--ddim_steps", + type=int, + default=50, + help="number of ddim sampling steps", + ) + parser.add_argument( + "--plms", + action='store_true', + help="use plms sampling", + ) + parser.add_argument( + "--dpm_solver", + action='store_true', + help="use dpm_solver sampling", + ) + parser.add_argument( + "--laion400m", + action='store_true', + help="uses the LAION400M model", + ) + parser.add_argument( + "--fixed_code", + action='store_true', + help="if enabled, uses the same starting code across samples ", + ) + parser.add_argument( + "--ddim_eta", + type=float, + default=0.0, + help="ddim eta (eta=0.0 corresponds to deterministic sampling", + ) + parser.add_argument( + "--n_iter", + type=int, + default=1, + help="sample this often", + ) + parser.add_argument( + "--H", + type=int, + default=512, + help="image height, in pixel space", + ) + parser.add_argument( + "--W", + type=int, + default=512, + help="image width, in pixel space", + ) + parser.add_argument( + "--C", + type=int, + default=4, + help="latent channels", + ) + parser.add_argument( + "--f", + type=int, + default=8, + help="downsampling factor", + ) + parser.add_argument( + "--n_samples", + type=int, + default=3, + help="how many samples to produce for each given prompt. A.k.a. batch size", + ) + parser.add_argument( + "--n_rows", + type=int, + default=0, + help="rows in the grid (default: n_samples)", + ) + parser.add_argument( + "--scale", + type=float, + default=7.5, + help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", + ) + parser.add_argument( + "--from-file", + type=str, + help="if specified, load prompts from this file", + ) + parser.add_argument( + "--config", + type=str, + default="configs/stable-diffusion/v1-inference.yaml", + help="path to config which constructs model", + ) + parser.add_argument( + "--ckpt", + type=str, + default="models/ldm/stable-diffusion-v1/model.ckpt", + help="path to checkpoint of model", + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="the seed (for reproducible sampling)", + ) + parser.add_argument( + "--precision", + type=str, + help="evaluate at this precision", + choices=["full", "autocast"], + default="autocast" + ) + opt = parser.parse_args() + + if opt.laion400m: + print("Falling back to LAION 400M model...") + opt.config = "configs/latent-diffusion/txt2img-1p4B-eval.yaml" + opt.ckpt = "models/ldm/text2img-large/model.ckpt" + opt.outdir = "outputs/txt2img-samples-laion400m" + + seed_everything(opt.seed) + + config = OmegaConf.load(f"{opt.config}") + model = load_model_from_config(config, f"{opt.ckpt}") + + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + model = model.to(device) + + if opt.dpm_solver: + sampler = DPMSolverSampler(model) + elif opt.plms: + sampler = PLMSSampler(model) + else: + sampler = DDIMSampler(model) + + os.makedirs(opt.outdir, exist_ok=True) + outpath = opt.outdir + + print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...") + + + batch_size = opt.n_samples + n_rows = opt.n_rows if opt.n_rows > 0 else batch_size + if not opt.from_file: + prompt = opt.prompt + assert prompt is not None + data = [batch_size * [prompt]] + + else: + print(f"reading prompts from {opt.from_file}") + with open(opt.from_file, "r") as f: + data = f.read().splitlines() + data = list(chunk(data, batch_size)) + + sample_path = os.path.join(outpath, "samples") + os.makedirs(sample_path, exist_ok=True) + base_count = len(os.listdir(sample_path)) + grid_count = len(os.listdir(outpath)) - 1 + + start_code = None + if opt.fixed_code: + start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) + + precision_scope = autocast if opt.precision=="autocast" else nullcontext + with torch.no_grad(): + with precision_scope("cuda"): + with model.ema_scope(): + tic = time.time() + all_samples = list() + for n in trange(opt.n_iter, desc="Sampling"): + for prompts in tqdm(data, desc="data"): + uc = None + if opt.scale != 1.0: + uc = model.get_learned_conditioning(batch_size * [""]) + if isinstance(prompts, tuple): + prompts = list(prompts) + c = model.get_learned_conditioning(prompts) + shape = [opt.C, opt.H // opt.f, opt.W // opt.f] + samples_ddim, _ = sampler.sample(S=opt.ddim_steps, + conditioning=c, + batch_size=opt.n_samples, + shape=shape, + verbose=False, + unconditional_guidance_scale=opt.scale, + unconditional_conditioning=uc, + eta=opt.ddim_eta, + x_T=start_code) + + x_samples_ddim = model.decode_first_stage(samples_ddim) + x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) + x_samples_ddim = x_samples_ddim.cpu().permute(0, 2, 3, 1).numpy() + + x_checked_image, has_nsfw_concept = check_safety(x_samples_ddim) + + x_checked_image_torch = torch.from_numpy(x_checked_image).permute(0, 3, 1, 2) + + if not opt.skip_save: + for x_sample in x_checked_image_torch: + x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') + img = Image.fromarray(x_sample.astype(np.uint8)) + img.save(os.path.join(sample_path, f"{base_count:05}.png")) + base_count += 1 + + if not opt.skip_grid: + all_samples.append(x_checked_image_torch) + + if not opt.skip_grid: + # additionally, save as grid + grid = torch.stack(all_samples, 0) + grid = rearrange(grid, 'n b c h w -> (n b) c h w') + grid = make_grid(grid, nrow=n_rows) + + # to image + grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() + img = Image.fromarray(grid.astype(np.uint8)) + img.save(os.path.join(outpath, f'grid-{grid_count:04}.png')) + grid_count += 1 + + toc = time.time() + + print(f"Your samples are ready and waiting for you here: \n{outpath} \n" + f" \nEnjoy.") + + +if __name__ == "__main__": + main() diff --git a/stable-diffusion/setup.py b/stable-diffusion/setup.py new file mode 100644 index 0000000..a24d541 --- /dev/null +++ b/stable-diffusion/setup.py @@ -0,0 +1,13 @@ +from setuptools import setup, find_packages + +setup( + name='latent-diffusion', + version='0.0.1', + description='', + packages=find_packages(), + install_requires=[ + 'torch', + 'numpy', + 'tqdm', + ], +) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/.gitignore b/stable-dreamfusion-3DPortrait/.gitignore new file mode 100644 index 0000000..4684816 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/.gitignore @@ -0,0 +1,40 @@ +__pycache__/ +build/ +*.egg-info/ +*.so +venv_*/ + +tmp* +# data/ +ldm/data/ +data2 +scripts2 +trial*/ +.vs/ + +TOKEN +*.ckpt + +densegridencoder +tets/256_tets.npz + +.vscode/launch.json + +data2 +data/car* +data/chair* +data/warrior* +data/wd* +data/space* +data/corgi* +data/turtle* + +# Only keep the original image, not the automatically-generated depth, normals, rgba +data/baby_phoenix_on_ice_* +data/bollywood_actress_* +data/beach_house_1_* +data/beach_house_2_* +data/mona_lisa_* +data/futuristic_car_* +data/church_ruins_* + diff --git a/stable-dreamfusion-3DPortrait/LICENSE b/stable-dreamfusion-3DPortrait/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/stable-dreamfusion-3DPortrait/activation.py b/stable-dreamfusion-3DPortrait/activation.py new file mode 100644 index 0000000..e6cba6a --- /dev/null +++ b/stable-dreamfusion-3DPortrait/activation.py @@ -0,0 +1,21 @@ +import torch +from torch.autograd import Function +from torch.cuda.amp import custom_bwd, custom_fwd + +class _trunc_exp(Function): + @staticmethod + @custom_fwd(cast_inputs=torch.float) + def forward(ctx, x): + ctx.save_for_backward(x) + return torch.exp(x) + + @staticmethod + @custom_bwd + def backward(ctx, g): + x = ctx.saved_tensors[0] + return g * torch.exp(x.clamp(max=15)) + +trunc_exp = _trunc_exp.apply + +def biased_softplus(x, bias=0): + return torch.nn.functional.softplus(x - bias) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/assets/advanced.md b/stable-dreamfusion-3DPortrait/assets/advanced.md new file mode 100644 index 0000000..c9432c0 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/assets/advanced.md @@ -0,0 +1,85 @@ + +# Code organization & Advanced tips + +This is a simple description of the most important implementation details. +If you are interested in improving this repo, this might be a starting point. +Any contribution would be greatly appreciated! + +* The SDS loss is located at `./guidance/sd_utils.py > StableDiffusion > train_step`: +```python +## 1. we need to interpolate the NeRF rendering to 512x512, to feed it to SD's VAE. +pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False) +## 2. image (512x512) --- VAE --> latents (64x64), this is SD's difference from Imagen. +latents = self.encode_imgs(pred_rgb_512) +... # timestep sampling, noise adding and UNet noise predicting +## 3. the SDS loss +w = (1 - self.alphas[t]) +grad = w * (noise_pred - noise) +# since UNet part is ignored and cannot simply audodiff, we have two ways to set the grad: +# 3.1. call backward and set the grad now (need to retain graph since we will call a second backward for the other losses later) +latents.backward(gradient=grad, retain_graph=True) +return 0 # dummy loss + +# 3.2. use a custom function to set a hook in backward, so we only call backward once (credits to @elliottzheng) +class SpecifyGradient(torch.autograd.Function): + @staticmethod + @custom_fwd + def forward(ctx, input_tensor, gt_grad): + ctx.save_for_backward(gt_grad) + # we return a dummy value 1, which will be scaled by amp's scaler so we get the scale in backward. + return torch.ones([1], device=input_tensor.device, dtype=input_tensor.dtype) + + @staticmethod + @custom_bwd + def backward(ctx, grad_scale): + gt_grad, = ctx.saved_tensors + gt_grad = gt_grad * grad_scale + return gt_grad, None + +loss = SpecifyGradient.apply(latents, grad) +return loss # functional loss + +# 3.3. reparameterization (credits to @Xallt) +# d(loss)/d(latents) = grad, since grad is already detached, it's this simple. +loss = (grad * latents).sum() +return loss + +# 3.4. reparameterization (credits to threestudio) +# this is the same as 3.3, but the loss value only reflects the magnitude of grad, which is more informative. +targets = (latents - grad).detach() +loss = 0.5 * F.mse_loss(latents, targets, reduction='sum') +return loss +``` +* Other regularizations are in `./nerf/utils.py > Trainer > train_step`. + * The generation seems quite sensitive to regularizations on weights_sum (alphas for each ray). The original opacity loss tends to make NeRF disappear (zero density everywhere), so we use an entropy loss to replace it for now (encourages alpha to be either 0 or 1). +* NeRF Rendering core function: `./nerf/renderer.py > NeRFRenderer > run & run_cuda`. +* Shading & normal evaluation: `./nerf/network*.py > NeRFNetwork > forward`. + * light direction: current implementation use a plane light source, instead of a point light source. +* View-dependent prompting: `./nerf/provider.py > get_view_direction`. + * use `--angle_overhead, --angle_front` to set the border. +* Network backbone (`./nerf/network*.py`) can be chosen by the `--backbone` option. +* Spatial density bias (density blob): `./nerf/network*.py > NeRFNetwork > density_blob`. + + +# Debugging + +`debugpy-run` is a convenient way to remotely debug this project. Simply replace a command like this one: + +```bash +python main.py --text "a hamburger" --workspace trial -O --vram_O +``` + +... with: + +```bash +debugpy-run main.py -- --text "a hamburger" --workspace trial -O --vram_O +``` + +For more details: https://github.com/bulletmark/debugpy-run + +# Axes and directions of polar, azimuth, etc. in NeRF and Zero123 + +NeRF_Zero123 + +This code refers to theta for polar, phi for azimuth. + diff --git a/stable-dreamfusion-3DPortrait/assets/update_logs.md b/stable-dreamfusion-3DPortrait/assets/update_logs.md new file mode 100644 index 0000000..b1c2e2c --- /dev/null +++ b/stable-dreamfusion-3DPortrait/assets/update_logs.md @@ -0,0 +1,39 @@ +### 2023.4.19 +* Fix depth supervision, migrate depth estimation model to omnidata. +* Add normal supervision (also by omnidata). + +https://user-images.githubusercontent.com/25863658/232403294-b77409bf-ddc7-4bb8-af32-ee0cc123825a.mp4 + +### 2023.4.7 +Improvement on mesh quality & DMTet finetuning support. + +https://user-images.githubusercontent.com/25863658/230535363-298c960e-bf9c-4906-8b96-cd60edcb24dd.mp4 + +### 2023.3.30 +* adopt ideas from [Fantasia3D](https://fantasia3d.github.io/) to concatenate normal and mask as the latent code in a warm up stage, which shows faster convergence of shape. + +https://user-images.githubusercontent.com/25863658/230535373-6ee28f16-bb21-4ec4-bc86-d46597361a04.mp4 + +### 2023.1.30 +* Use an MLP to predict the surface normals as in Magic3D to avoid finite difference / second order gradient, generation quality is greatly improved. +* More efficient two-pass raymarching in training inspired by nerfacc. + +https://user-images.githubusercontent.com/25863658/215996308-9fd959f5-b5c7-4a8e-a241-0fe63ec86a4a.mp4 + +### 2022.12.3 +* Support Stable-diffusion 2.0 base. + +### 2022.11.15 +* Add the vanilla backbone that is pure-pytorch. + +### 2022.10.9 +* The shading (partially) starts to work, at least it won't make scene empty. For some prompts, it shows better results (less severe Janus problem). The textureless rendering mode is still disabled. +* Enable shading by default (--latent_iter_ratio 1000). + +### 2022.10.5 +* Basic reproduction finished. +* Non --cuda_ray, --tcnn are not working, need to fix. +* Shading is not working, disabled in utils.py for now. Surface normals are bad. +* Use an entropy loss to regularize weights_sum (alpha), the original L2 reg always leads to degenerated geometry... + +https://user-images.githubusercontent.com/25863658/194241493-f3e68f78-aefe-479e-a4a8-001424a61b37.mp4 diff --git a/stable-dreamfusion-3DPortrait/config/anya.csv b/stable-dreamfusion-3DPortrait/config/anya.csv new file mode 100644 index 0000000..4509748 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/config/anya.csv @@ -0,0 +1,3 @@ +zero123_weight, radius, polar, azimuth, image +1, 3, 90, 0, data/anya_front_rgba.png +1, 3, 90, 180, data/anya_back_rgba.png \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/config/car.csv b/stable-dreamfusion-3DPortrait/config/car.csv new file mode 100644 index 0000000..c014cdc --- /dev/null +++ b/stable-dreamfusion-3DPortrait/config/car.csv @@ -0,0 +1,5 @@ +zero123_weight, radius, polar, azimuth, image +4, 3.2, 90, 0, data/car_left_rgba.png +1, 3, 90, 90, data/car_front_rgba.png +4, 3.2, 90, 180, data/car_right_rgba.png +1, 3, 90, -90, data/car_back_rgba.png \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/config/corgi.csv b/stable-dreamfusion-3DPortrait/config/corgi.csv new file mode 100644 index 0000000..bb1234a --- /dev/null +++ b/stable-dreamfusion-3DPortrait/config/corgi.csv @@ -0,0 +1,2 @@ +zero123_weight, radius, polar, azimuth, image +1, 3.2, 90, 0, data/corgi_puppy_sitting_looking_up_rgba.png \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/data/anya_back.webp b/stable-dreamfusion-3DPortrait/data/anya_back.webp new file mode 100644 index 0000000..827bb96 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/anya_back.webp differ diff --git a/stable-dreamfusion-3DPortrait/data/anya_back_depth.png b/stable-dreamfusion-3DPortrait/data/anya_back_depth.png new file mode 100644 index 0000000..3fece86 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/anya_back_depth.png differ diff --git a/stable-dreamfusion-3DPortrait/data/anya_back_normal.png b/stable-dreamfusion-3DPortrait/data/anya_back_normal.png new file mode 100644 index 0000000..f8550bf Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/anya_back_normal.png differ diff --git a/stable-dreamfusion-3DPortrait/data/anya_back_rgba.png b/stable-dreamfusion-3DPortrait/data/anya_back_rgba.png new file mode 100644 index 0000000..d583853 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/anya_back_rgba.png differ diff --git a/stable-dreamfusion-3DPortrait/data/anya_front.jpg b/stable-dreamfusion-3DPortrait/data/anya_front.jpg new file mode 100644 index 0000000..588c72d Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/anya_front.jpg differ diff --git a/stable-dreamfusion-3DPortrait/data/anya_front.png b/stable-dreamfusion-3DPortrait/data/anya_front.png new file mode 100644 index 0000000..276bd41 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/anya_front.png differ diff --git a/stable-dreamfusion-3DPortrait/data/anya_front_depth.png b/stable-dreamfusion-3DPortrait/data/anya_front_depth.png new file mode 100644 index 0000000..a98cc40 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/anya_front_depth.png differ diff --git a/stable-dreamfusion-3DPortrait/data/anya_front_normal.png b/stable-dreamfusion-3DPortrait/data/anya_front_normal.png new file mode 100644 index 0000000..fedf7f7 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/anya_front_normal.png differ diff --git a/stable-dreamfusion-3DPortrait/data/anya_front_rgba.png b/stable-dreamfusion-3DPortrait/data/anya_front_rgba.png new file mode 100644 index 0000000..089499e Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/anya_front_rgba.png differ diff --git a/stable-dreamfusion-3DPortrait/data/baby_phoenix_on_ice.png b/stable-dreamfusion-3DPortrait/data/baby_phoenix_on_ice.png new file mode 100644 index 0000000..02a15cf Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/baby_phoenix_on_ice.png differ diff --git a/stable-dreamfusion-3DPortrait/data/beach_house_1.png b/stable-dreamfusion-3DPortrait/data/beach_house_1.png new file mode 100644 index 0000000..cfde250 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/beach_house_1.png differ diff --git a/stable-dreamfusion-3DPortrait/data/beach_house_2.png b/stable-dreamfusion-3DPortrait/data/beach_house_2.png new file mode 100644 index 0000000..5a33e50 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/beach_house_2.png differ diff --git a/stable-dreamfusion-3DPortrait/data/bollywood_actress.png b/stable-dreamfusion-3DPortrait/data/bollywood_actress.png new file mode 100644 index 0000000..4316be3 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/bollywood_actress.png differ diff --git a/stable-dreamfusion-3DPortrait/data/cactus.png b/stable-dreamfusion-3DPortrait/data/cactus.png new file mode 100644 index 0000000..1f89ba8 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/cactus.png differ diff --git a/stable-dreamfusion-3DPortrait/data/cactus_depth.png b/stable-dreamfusion-3DPortrait/data/cactus_depth.png new file mode 100644 index 0000000..f086e99 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/cactus_depth.png differ diff --git a/stable-dreamfusion-3DPortrait/data/cactus_normal.png b/stable-dreamfusion-3DPortrait/data/cactus_normal.png new file mode 100644 index 0000000..f420869 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/cactus_normal.png differ diff --git a/stable-dreamfusion-3DPortrait/data/cactus_rgba.png b/stable-dreamfusion-3DPortrait/data/cactus_rgba.png new file mode 100644 index 0000000..1936f75 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/cactus_rgba.png differ diff --git a/stable-dreamfusion-3DPortrait/data/cake.png b/stable-dreamfusion-3DPortrait/data/cake.png new file mode 100644 index 0000000..dcfba04 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/cake.png differ diff --git a/stable-dreamfusion-3DPortrait/data/cake_depth.png b/stable-dreamfusion-3DPortrait/data/cake_depth.png new file mode 100644 index 0000000..ded7595 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/cake_depth.png differ diff --git a/stable-dreamfusion-3DPortrait/data/cake_normal.png b/stable-dreamfusion-3DPortrait/data/cake_normal.png new file mode 100644 index 0000000..c7b99b2 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/cake_normal.png differ diff --git a/stable-dreamfusion-3DPortrait/data/cake_rgba.png b/stable-dreamfusion-3DPortrait/data/cake_rgba.png new file mode 100644 index 0000000..f0ae0b0 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/cake_rgba.png differ diff --git a/stable-dreamfusion-3DPortrait/data/catstatue.png b/stable-dreamfusion-3DPortrait/data/catstatue.png new file mode 100644 index 0000000..7f58741 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/catstatue.png differ diff --git a/stable-dreamfusion-3DPortrait/data/catstatue_depth.png b/stable-dreamfusion-3DPortrait/data/catstatue_depth.png new file mode 100644 index 0000000..a22c328 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/catstatue_depth.png differ diff --git a/stable-dreamfusion-3DPortrait/data/catstatue_normal.png b/stable-dreamfusion-3DPortrait/data/catstatue_normal.png new file mode 100644 index 0000000..3baf000 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/catstatue_normal.png differ diff --git a/stable-dreamfusion-3DPortrait/data/catstatue_rgba.png b/stable-dreamfusion-3DPortrait/data/catstatue_rgba.png new file mode 100644 index 0000000..3b44eb5 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/catstatue_rgba.png differ diff --git a/stable-dreamfusion-3DPortrait/data/church_ruins.png b/stable-dreamfusion-3DPortrait/data/church_ruins.png new file mode 100644 index 0000000..951eccf Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/church_ruins.png differ diff --git a/stable-dreamfusion-3DPortrait/data/firekeeper.jpg b/stable-dreamfusion-3DPortrait/data/firekeeper.jpg new file mode 100644 index 0000000..9e57d14 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/firekeeper.jpg differ diff --git a/stable-dreamfusion-3DPortrait/data/firekeeper_depth.png b/stable-dreamfusion-3DPortrait/data/firekeeper_depth.png new file mode 100644 index 0000000..7d56a1f Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/firekeeper_depth.png differ diff --git a/stable-dreamfusion-3DPortrait/data/firekeeper_normal.png b/stable-dreamfusion-3DPortrait/data/firekeeper_normal.png new file mode 100644 index 0000000..614f8ac Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/firekeeper_normal.png differ diff --git a/stable-dreamfusion-3DPortrait/data/firekeeper_rgba.png b/stable-dreamfusion-3DPortrait/data/firekeeper_rgba.png new file mode 100644 index 0000000..73430de Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/firekeeper_rgba.png differ diff --git a/stable-dreamfusion-3DPortrait/data/futuristic_car.png b/stable-dreamfusion-3DPortrait/data/futuristic_car.png new file mode 100644 index 0000000..0cfc78f Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/futuristic_car.png differ diff --git a/stable-dreamfusion-3DPortrait/data/hamburger.png b/stable-dreamfusion-3DPortrait/data/hamburger.png new file mode 100644 index 0000000..2dc1268 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/hamburger.png differ diff --git a/stable-dreamfusion-3DPortrait/data/hamburger_depth.png b/stable-dreamfusion-3DPortrait/data/hamburger_depth.png new file mode 100644 index 0000000..f76c80c Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/hamburger_depth.png differ diff --git a/stable-dreamfusion-3DPortrait/data/hamburger_normal.png b/stable-dreamfusion-3DPortrait/data/hamburger_normal.png new file mode 100644 index 0000000..26f0835 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/hamburger_normal.png differ diff --git a/stable-dreamfusion-3DPortrait/data/hamburger_rgba.png b/stable-dreamfusion-3DPortrait/data/hamburger_rgba.png new file mode 100644 index 0000000..7cd36c3 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/hamburger_rgba.png differ diff --git a/stable-dreamfusion-3DPortrait/data/mona_lisa.png b/stable-dreamfusion-3DPortrait/data/mona_lisa.png new file mode 100644 index 0000000..51f8371 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/mona_lisa.png differ diff --git a/stable-dreamfusion-3DPortrait/data/teddy.png b/stable-dreamfusion-3DPortrait/data/teddy.png new file mode 100644 index 0000000..7bb3a96 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/teddy.png differ diff --git a/stable-dreamfusion-3DPortrait/data/teddy_depth.png b/stable-dreamfusion-3DPortrait/data/teddy_depth.png new file mode 100644 index 0000000..70a35b0 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/teddy_depth.png differ diff --git a/stable-dreamfusion-3DPortrait/data/teddy_normal.png b/stable-dreamfusion-3DPortrait/data/teddy_normal.png new file mode 100644 index 0000000..75d08b2 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/teddy_normal.png differ diff --git a/stable-dreamfusion-3DPortrait/data/teddy_rgba.png b/stable-dreamfusion-3DPortrait/data/teddy_rgba.png new file mode 100644 index 0000000..d3dbf5f Binary files /dev/null and b/stable-dreamfusion-3DPortrait/data/teddy_rgba.png differ diff --git a/stable-dreamfusion-3DPortrait/docker/Dockerfile b/stable-dreamfusion-3DPortrait/docker/Dockerfile new file mode 100644 index 0000000..47fd296 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/docker/Dockerfile @@ -0,0 +1,53 @@ +FROM nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04 + +# Remove any third-party apt sources to avoid issues with expiring keys. +RUN rm -f /etc/apt/sources.list.d/*.list + +RUN apt-get update + +RUN DEBIAN_FRONTEND=noninteractive TZ=Europe/MADRID apt-get install -y tzdata + +# Install some basic utilities +RUN apt-get install -y \ + curl \ + ca-certificates \ + sudo \ + git \ + bzip2 \ + libx11-6 \ + python3 \ + python3-pip \ + libglfw3-dev \ + libgles2-mesa-dev \ + libglib2.0-0 \ + && rm -rf /var/lib/apt/lists/* + + +# Create a working directory +RUN mkdir /app +WORKDIR /app + +RUN cd /app +RUN git clone https://github.com/ashawkey/stable-dreamfusion.git + + +RUN pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116 + +WORKDIR /app/stable-dreamfusion + +RUN pip3 install -r requirements.txt +RUN pip3 install git+https://github.com/NVlabs/nvdiffrast/ + +# Needs nvidia runtime, if you have "No CUDA runtime is found" error: https://stackoverflow.com/questions/59691207/docker-build-with-nvidia-runtime, first answer +RUN pip3 install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch + +RUN pip3 install git+https://github.com/openai/CLIP.git +RUN bash scripts/install_ext.sh + + + + + +# Set the default command to python3 +#CMD ["python3"] + diff --git a/stable-dreamfusion-3DPortrait/docker/README.md b/stable-dreamfusion-3DPortrait/docker/README.md new file mode 100644 index 0000000..2fe00e4 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/docker/README.md @@ -0,0 +1,80 @@ +### Docker installation + +## Build image +To build the docker image on your own machine, which may take 15-30 mins: +``` +docker build -t stable-dreamfusion:latest . +``` + +If you have the error **No CUDA runtime is found** when building the wheels for tiny-cuda-nn you need to setup the nvidia-runtime for docker. +``` +sudo apt-get install nvidia-container-runtime +``` +Then edit `/etc/docker/daemon.json` and add the default-runtime: +``` +{ + "runtimes": { + "nvidia": { + "path": "nvidia-container-runtime", + "runtimeArgs": [] + } + }, + "default-runtime": "nvidia" +} +``` +And restart docker: +``` +sudo systemctl restart docker +``` +Now you can build tiny-cuda-nn inside docker. + +## Download image +To download the image (~6GB) instead: +``` +docker pull supercabb/stable-dreamfusion:3080_0.0.1 +docker tag supercabb/stable-dreamfusion:3080_0.0.1 stable-dreamfusion +``` + +## Use image + +You can launch an interactive shell inside the container: + +``` +docker run --gpus all -it --rm -v $(cd ~ && pwd):/mnt stable-dreamfusion /bin/bash +``` +From this shell, all the code in the repo should work. + +To run any single command `` inside the docker container: +``` +docker run --gpus all -it --rm -v $(cd ~ && pwd):/mnt stable-dreamfusion /bin/bash -c "" +``` +To train: +``` +export TOKEN="#HUGGING FACE ACCESS TOKEN#" +docker run --gpus all -it --rm -v $(cd ~ && pwd):/mnt stable-dreamfusion /bin/bash -c "echo ${TOKEN} > TOKEN \ +&& python3 main.py --text \"a hamburger\" --workspace trial -O" + +``` +Run test without gui: +``` +export PATH_TO_WORKSPACE="#PATH_TO_WORKSPACE#" +docker run --gpus all -it --rm -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:ro -v $(cd ~ && pwd):/mnt \ +-v $(cd ${PATH_TO_WORKSPACE} && pwd):/app/stable-dreamfusion/trial stable-dreamfusion /bin/bash -c "python3 \ +main.py --workspace trial -O --test" +``` +Run test with gui: +``` +export PATH_TO_WORKSPACE="#PATH_TO_WORKSPACE#" +xhost + +docker run --gpus all -it --rm -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:ro -v $(cd ~ && pwd):/mnt \ +-v $(cd ${PATH_TO_WORKSPACE} && pwd):/app/stable-dreamfusion/trial stable-dreamfusion /bin/bash -c "python3 \ +main.py --workspace trial -O --test --gui" +xhost - +``` + + + + + + + diff --git a/stable-dreamfusion-3DPortrait/dpt.py b/stable-dreamfusion-3DPortrait/dpt.py new file mode 100644 index 0000000..8cc0479 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/dpt.py @@ -0,0 +1,924 @@ +import math +import types + +import torch +import torch.nn as nn +import torch.nn.functional as F + +import timm + +class BaseModel(torch.nn.Module): + def load(self, path): + """Load model from file. + Args: + path (str): file path + """ + parameters = torch.load(path, map_location=torch.device('cpu')) + + if "optimizer" in parameters: + parameters = parameters["model"] + + self.load_state_dict(parameters) + + +def unflatten_with_named_tensor(input, dim, sizes): + """Workaround for unflattening with named tensor.""" + # tracer acts up with unflatten. See https://github.com/pytorch/pytorch/issues/49538 + new_shape = list(input.shape)[:dim] + list(sizes) + list(input.shape)[dim+1:] + return input.view(*new_shape) + +class Slice(nn.Module): + def __init__(self, start_index=1): + super(Slice, self).__init__() + self.start_index = start_index + + def forward(self, x): + return x[:, self.start_index :] + + +class AddReadout(nn.Module): + def __init__(self, start_index=1): + super(AddReadout, self).__init__() + self.start_index = start_index + + def forward(self, x): + if self.start_index == 2: + readout = (x[:, 0] + x[:, 1]) / 2 + else: + readout = x[:, 0] + return x[:, self.start_index :] + readout.unsqueeze(1) + + +class ProjectReadout(nn.Module): + def __init__(self, in_features, start_index=1): + super(ProjectReadout, self).__init__() + self.start_index = start_index + + self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU()) + + def forward(self, x): + readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :]) + features = torch.cat((x[:, self.start_index :], readout), -1) + + return self.project(features) + + +class Transpose(nn.Module): + def __init__(self, dim0, dim1): + super(Transpose, self).__init__() + self.dim0 = dim0 + self.dim1 = dim1 + + def forward(self, x): + x = x.transpose(self.dim0, self.dim1) + return x + + +def forward_vit(pretrained, x): + b, c, h, w = x.shape + + glob = pretrained.model.forward_flex(x) + + layer_1 = pretrained.activations["1"] + layer_2 = pretrained.activations["2"] + layer_3 = pretrained.activations["3"] + layer_4 = pretrained.activations["4"] + + layer_1 = pretrained.act_postprocess1[0:2](layer_1) + layer_2 = pretrained.act_postprocess2[0:2](layer_2) + layer_3 = pretrained.act_postprocess3[0:2](layer_3) + layer_4 = pretrained.act_postprocess4[0:2](layer_4) + + + unflattened_dim = 2 + unflattened_size = ( + int(torch.div(h, pretrained.model.patch_size[1], rounding_mode='floor')), + int(torch.div(w, pretrained.model.patch_size[0], rounding_mode='floor')), + ) + unflatten = nn.Sequential(nn.Unflatten(unflattened_dim, unflattened_size)) + + + if layer_1.ndim == 3: + layer_1 = unflatten(layer_1) + if layer_2.ndim == 3: + layer_2 = unflatten(layer_2) + if layer_3.ndim == 3: + layer_3 = unflatten_with_named_tensor(layer_3, unflattened_dim, unflattened_size) + if layer_4.ndim == 3: + layer_4 = unflatten_with_named_tensor(layer_4, unflattened_dim, unflattened_size) + + layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1) + layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2) + layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3) + layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4) + + return layer_1, layer_2, layer_3, layer_4 + + +def _resize_pos_embed(self, posemb, gs_h, gs_w): + posemb_tok, posemb_grid = ( + posemb[:, : self.start_index], + posemb[0, self.start_index :], + ) + + gs_old = int(math.sqrt(posemb_grid.shape[0])) + + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear") + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1) + + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + + return posemb + + +def forward_flex(self, x): + b, c, h, w = x.shape + + pos_embed = self._resize_pos_embed( + self.pos_embed, torch.div(h, self.patch_size[1], rounding_mode='floor'), torch.div(w, self.patch_size[0], rounding_mode='floor') + ) + + B = x.shape[0] + + if hasattr(self.patch_embed, "backbone"): + x = self.patch_embed.backbone(x) + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + + x = self.patch_embed.proj(x).flatten(2).transpose(1, 2) + + if getattr(self, "dist_token", None) is not None: + cls_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole cls_tokens impl from Phil Wang, thanks + dist_token = self.dist_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, dist_token, x), dim=1) + else: + cls_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + x = x + pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) + + return x + + +activations = {} + + +def get_activation(name): + def hook(model, input, output): + activations[name] = output + + return hook + + +def get_readout_oper(vit_features, features, use_readout, start_index=1): + if use_readout == "ignore": + readout_oper = [Slice(start_index)] * len(features) + elif use_readout == "add": + readout_oper = [AddReadout(start_index)] * len(features) + elif use_readout == "project": + readout_oper = [ + ProjectReadout(vit_features, start_index) for out_feat in features + ] + else: + assert ( + False + ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'" + + return readout_oper + + +def _make_vit_b16_backbone( + model, + features=[96, 192, 384, 768], + size=[384, 384], + hooks=[2, 5, 8, 11], + vit_features=768, + use_readout="ignore", + start_index=1, +): + pretrained = nn.Module() + + pretrained.model = model + pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2")) + pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) + + # 32, 48, 136, 384 + pretrained.act_postprocess1 = nn.Sequential( + readout_oper[0], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[0], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[0], + out_channels=features[0], + kernel_size=4, + stride=4, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess2 = nn.Sequential( + readout_oper[1], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[1], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[1], + out_channels=features[1], + kernel_size=2, + stride=2, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess3 = nn.Sequential( + readout_oper[2], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[2], + kernel_size=1, + stride=1, + padding=0, + ), + ) + + pretrained.act_postprocess4 = nn.Sequential( + readout_oper[3], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[3], + kernel_size=1, + stride=1, + padding=0, + ), + nn.Conv2d( + in_channels=features[3], + out_channels=features[3], + kernel_size=3, + stride=2, + padding=1, + ), + ) + + pretrained.model.start_index = start_index + pretrained.model.patch_size = [16, 16] + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) + pretrained.model._resize_pos_embed = types.MethodType( + _resize_pos_embed, pretrained.model + ) + + return pretrained + + +def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_large_patch16_384", pretrained=pretrained) + + hooks = [5, 11, 17, 23] if hooks == None else hooks + return _make_vit_b16_backbone( + model, + features=[256, 512, 1024, 1024], + hooks=hooks, + vit_features=1024, + use_readout=use_readout, + ) + + +def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_base_patch16_384", pretrained=pretrained) + + hooks = [2, 5, 8, 11] if hooks == None else hooks + return _make_vit_b16_backbone( + model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout + ) + + +def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained) + + hooks = [2, 5, 8, 11] if hooks == None else hooks + return _make_vit_b16_backbone( + model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout + ) + + +def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model( + "vit_deit_base_distilled_patch16_384", pretrained=pretrained + ) + + hooks = [2, 5, 8, 11] if hooks == None else hooks + return _make_vit_b16_backbone( + model, + features=[96, 192, 384, 768], + hooks=hooks, + use_readout=use_readout, + start_index=2, + ) + + +def _make_vit_b_rn50_backbone( + model, + features=[256, 512, 768, 768], + size=[384, 384], + hooks=[0, 1, 8, 11], + vit_features=768, + use_vit_only=False, + use_readout="ignore", + start_index=1, +): + pretrained = nn.Module() + + pretrained.model = model + + if use_vit_only == True: + pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2")) + else: + pretrained.model.patch_embed.backbone.stages[0].register_forward_hook( + get_activation("1") + ) + pretrained.model.patch_embed.backbone.stages[1].register_forward_hook( + get_activation("2") + ) + + pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) + + if use_vit_only == True: + pretrained.act_postprocess1 = nn.Sequential( + readout_oper[0], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[0], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[0], + out_channels=features[0], + kernel_size=4, + stride=4, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess2 = nn.Sequential( + readout_oper[1], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[1], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[1], + out_channels=features[1], + kernel_size=2, + stride=2, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + else: + pretrained.act_postprocess1 = nn.Sequential( + nn.Identity(), nn.Identity(), nn.Identity() + ) + pretrained.act_postprocess2 = nn.Sequential( + nn.Identity(), nn.Identity(), nn.Identity() + ) + + pretrained.act_postprocess3 = nn.Sequential( + readout_oper[2], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[2], + kernel_size=1, + stride=1, + padding=0, + ), + ) + + pretrained.act_postprocess4 = nn.Sequential( + readout_oper[3], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[3], + kernel_size=1, + stride=1, + padding=0, + ), + nn.Conv2d( + in_channels=features[3], + out_channels=features[3], + kernel_size=3, + stride=2, + padding=1, + ), + ) + + pretrained.model.start_index = start_index + pretrained.model.patch_size = [16, 16] + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model._resize_pos_embed = types.MethodType( + _resize_pos_embed, pretrained.model + ) + + return pretrained + + +def _make_pretrained_vitb_rn50_384( + pretrained, use_readout="ignore", hooks=None, use_vit_only=False +): + model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained) + + hooks = [0, 1, 8, 11] if hooks == None else hooks + return _make_vit_b_rn50_backbone( + model, + features=[256, 512, 768, 768], + size=[384, 384], + hooks=hooks, + use_vit_only=use_vit_only, + use_readout=use_readout, + ) + +def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",): + if backbone == "vitl16_384": + pretrained = _make_pretrained_vitl16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [256, 512, 1024, 1024], features, groups=groups, expand=expand + ) # ViT-L/16 - 85.0% Top1 (backbone) + elif backbone == "vitb_rn50_384": + pretrained = _make_pretrained_vitb_rn50_384( + use_pretrained, + hooks=hooks, + use_vit_only=use_vit_only, + use_readout=use_readout, + ) + scratch = _make_scratch( + [256, 512, 768, 768], features, groups=groups, expand=expand + ) # ViT-H/16 - 85.0% Top1 (backbone) + elif backbone == "vitb16_384": + pretrained = _make_pretrained_vitb16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [96, 192, 384, 768], features, groups=groups, expand=expand + ) # ViT-B/16 - 84.6% Top1 (backbone) + elif backbone == "resnext101_wsl": + pretrained = _make_pretrained_resnext101_wsl(use_pretrained) + scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3 + elif backbone == "efficientnet_lite3": + pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable) + scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3 + else: + print(f"Backbone '{backbone}' not implemented") + assert False + + return pretrained, scratch + + +def _make_scratch(in_shape, out_shape, groups=1, expand=False): + scratch = nn.Module() + + out_shape1 = out_shape + out_shape2 = out_shape + out_shape3 = out_shape + out_shape4 = out_shape + if expand==True: + out_shape1 = out_shape + out_shape2 = out_shape*2 + out_shape3 = out_shape*4 + out_shape4 = out_shape*8 + + scratch.layer1_rn = nn.Conv2d( + in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer2_rn = nn.Conv2d( + in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer3_rn = nn.Conv2d( + in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer4_rn = nn.Conv2d( + in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + + return scratch + + +def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False): + efficientnet = torch.hub.load( + "rwightman/gen-efficientnet-pytorch", + "tf_efficientnet_lite3", + pretrained=use_pretrained, + exportable=exportable + ) + return _make_efficientnet_backbone(efficientnet) + + +def _make_efficientnet_backbone(effnet): + pretrained = nn.Module() + + pretrained.layer1 = nn.Sequential( + effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2] + ) + pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3]) + pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5]) + pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9]) + + return pretrained + + +def _make_resnet_backbone(resnet): + pretrained = nn.Module() + pretrained.layer1 = nn.Sequential( + resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1 + ) + + pretrained.layer2 = resnet.layer2 + pretrained.layer3 = resnet.layer3 + pretrained.layer4 = resnet.layer4 + + return pretrained + + +def _make_pretrained_resnext101_wsl(use_pretrained): + resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl") + return _make_resnet_backbone(resnet) + + + +class Interpolate(nn.Module): + """Interpolation module. + """ + + def __init__(self, scale_factor, mode, align_corners=False): + """Init. + Args: + scale_factor (float): scaling + mode (str): interpolation mode + """ + super(Interpolate, self).__init__() + + self.interp = nn.functional.interpolate + self.scale_factor = scale_factor + self.mode = mode + self.align_corners = align_corners + + def forward(self, x): + """Forward pass. + Args: + x (tensor): input + Returns: + tensor: interpolated data + """ + + x = self.interp( + x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners + ) + + return x + + +class ResidualConvUnit(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features): + """Init. + Args: + features (int): number of features + """ + super().__init__() + + self.conv1 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True + ) + + self.conv2 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True + ) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + """Forward pass. + Args: + x (tensor): input + Returns: + tensor: output + """ + out = self.relu(x) + out = self.conv1(out) + out = self.relu(out) + out = self.conv2(out) + + return out + x + + +class FeatureFusionBlock(nn.Module): + """Feature fusion block. + """ + + def __init__(self, features): + """Init. + Args: + features (int): number of features + """ + super(FeatureFusionBlock, self).__init__() + + self.resConfUnit1 = ResidualConvUnit(features) + self.resConfUnit2 = ResidualConvUnit(features) + + def forward(self, *xs): + """Forward pass. + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + output += self.resConfUnit1(xs[1]) + + output = self.resConfUnit2(output) + + output = nn.functional.interpolate( + output, scale_factor=2, mode="bilinear", align_corners=True + ) + + return output + + + + +class ResidualConvUnit_custom(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features, activation, bn): + """Init. + Args: + features (int): number of features + """ + super().__init__() + + self.bn = bn + + self.groups=1 + + self.conv1 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups + ) + + self.conv2 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups + ) + + if self.bn==True: + self.bn1 = nn.BatchNorm2d(features) + self.bn2 = nn.BatchNorm2d(features) + + self.activation = activation + + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, x): + """Forward pass. + Args: + x (tensor): input + Returns: + tensor: output + """ + + out = self.activation(x) + out = self.conv1(out) + if self.bn==True: + out = self.bn1(out) + + out = self.activation(out) + out = self.conv2(out) + if self.bn==True: + out = self.bn2(out) + + if self.groups > 1: + out = self.conv_merge(out) + + return self.skip_add.add(out, x) + + # return out + x + + +class FeatureFusionBlock_custom(nn.Module): + """Feature fusion block. + """ + + def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True): + """Init. + Args: + features (int): number of features + """ + super(FeatureFusionBlock_custom, self).__init__() + + self.deconv = deconv + self.align_corners = align_corners + + self.groups=1 + + self.expand = expand + out_features = features + if self.expand==True: + out_features = features//2 + + self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) + + self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) + self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn) + + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, *xs): + """Forward pass. + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + res = self.resConfUnit1(xs[1]) + output = self.skip_add.add(output, res) + # output += res + + output = self.resConfUnit2(output) + + output = nn.functional.interpolate( + output, scale_factor=2, mode="bilinear", align_corners=self.align_corners + ) + + output = self.out_conv(output) + + return output + + + +def _make_fusion_block(features, use_bn): + return FeatureFusionBlock_custom( + features, + nn.ReLU(False), + deconv=False, + bn=use_bn, + expand=False, + align_corners=True, + ) + + +class DPT(BaseModel): + def __init__( + self, + head, + features=256, + backbone="vitb_rn50_384", + readout="project", + channels_last=False, + use_bn=False, + ): + + super(DPT, self).__init__() + + self.channels_last = channels_last + + hooks = { + "vitb_rn50_384": [0, 1, 8, 11], + "vitb16_384": [2, 5, 8, 11], + "vitl16_384": [5, 11, 17, 23], + } + + # Instantiate backbone and reassemble blocks + self.pretrained, self.scratch = _make_encoder( + backbone, + features, + True, # Set to true of you want to train from scratch, uses ImageNet weights + groups=1, + expand=False, + exportable=False, + hooks=hooks[backbone], + use_readout=readout, + ) + + self.scratch.refinenet1 = _make_fusion_block(features, use_bn) + self.scratch.refinenet2 = _make_fusion_block(features, use_bn) + self.scratch.refinenet3 = _make_fusion_block(features, use_bn) + self.scratch.refinenet4 = _make_fusion_block(features, use_bn) + + self.scratch.output_conv = head + + + def forward(self, x): + if self.channels_last == True: + x.contiguous(memory_format=torch.channels_last) + + layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x) + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + path_4 = self.scratch.refinenet4(layer_4_rn) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out = self.scratch.output_conv(path_1) + + return out + +class DPTDepthModel(DPT): + def __init__(self, path=None, non_negative=True, num_channels=1, **kwargs): + features = kwargs["features"] if "features" in kwargs else 256 + + head = nn.Sequential( + nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1), + Interpolate(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(32, num_channels, kernel_size=1, stride=1, padding=0), + nn.ReLU(True) if non_negative else nn.Identity(), + nn.Identity(), + ) + + super().__init__(head, **kwargs) + + if path is not None: + self.load(path) + + def forward(self, x): + return super().forward(x).squeeze(dim=1) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/encoding.py b/stable-dreamfusion-3DPortrait/encoding.py new file mode 100644 index 0000000..7edd096 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/encoding.py @@ -0,0 +1,89 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +class FreqEncoder_torch(nn.Module): + def __init__(self, input_dim, max_freq_log2, N_freqs, + log_sampling=True, include_input=True, + periodic_fns=(torch.sin, torch.cos)): + + super().__init__() + + self.input_dim = input_dim + self.include_input = include_input + self.periodic_fns = periodic_fns + self.N_freqs = N_freqs + + self.output_dim = 0 + if self.include_input: + self.output_dim += self.input_dim + + self.output_dim += self.input_dim * N_freqs * len(self.periodic_fns) + + if log_sampling: + self.freq_bands = 2 ** torch.linspace(0, max_freq_log2, N_freqs) + else: + self.freq_bands = torch.linspace(2 ** 0, 2 ** max_freq_log2, N_freqs) + + self.freq_bands = self.freq_bands.numpy().tolist() + + def forward(self, input, max_level=None, **kwargs): + + if max_level is None: + max_level = self.N_freqs + else: + max_level = int(max_level * self.N_freqs) + + out = [] + if self.include_input: + out.append(input) + + for i in range(max_level): + freq = self.freq_bands[i] + for p_fn in self.periodic_fns: + out.append(p_fn(input * freq)) + + # append 0 + if self.N_freqs - max_level > 0: + out.append(torch.zeros(*input.shape[:-1], (self.N_freqs - max_level) * 2 * input.shape[-1], device=input.device, dtype=input.dtype)) + + out = torch.cat(out, dim=-1) + + return out + +def get_encoder(encoding, input_dim=3, + multires=6, + degree=4, + num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=2048, align_corners=False, interpolation='linear', + **kwargs): + + if encoding == 'None': + return lambda x, **kwargs: x, input_dim + + elif encoding == 'frequency_torch': + encoder = FreqEncoder_torch(input_dim=input_dim, max_freq_log2=multires-1, N_freqs=multires, log_sampling=True) + + elif encoding == 'frequency': # CUDA implementation, faster than torch. + from freqencoder import FreqEncoder + encoder = FreqEncoder(input_dim=input_dim, degree=multires) + + elif encoding == 'sphere_harmonics': + from shencoder import SHEncoder + encoder = SHEncoder(input_dim=input_dim, degree=degree) + + elif encoding == 'hashgrid': + from gridencoder import GridEncoder + encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='hash', align_corners=align_corners, interpolation=interpolation) + + elif encoding == 'tiledgrid': + from gridencoder import GridEncoder + encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='tiled', align_corners=align_corners, interpolation=interpolation) + + elif encoding == 'hashgrid_taichi': + from taichi_modules.hash_encoder import HashEncoderTaichi + encoder = HashEncoderTaichi(batch_size=4096) #TODO: hard encoded batch size + + else: + raise NotImplementedError('Unknown encoding mode, choose from [None, frequency, sphere_harmonics, hashgrid, tiledgrid]') + + return encoder, encoder.output_dim \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/evaluation/Prompt.py b/stable-dreamfusion-3DPortrait/evaluation/Prompt.py new file mode 100644 index 0000000..53603db --- /dev/null +++ b/stable-dreamfusion-3DPortrait/evaluation/Prompt.py @@ -0,0 +1,91 @@ +import textwrap +from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForTokenClassification +from transformers import pipeline +import argparse +import sys +import warnings +warnings.filterwarnings("ignore", category=UserWarning) + + +#python Prompt.py --text "a dog is in front of a rabbit" --model vlt5 + + +if __name__ == '__main__': + + # Mimic the calling part of the main, using + parser = argparse.ArgumentParser() + parser.add_argument('--text', default="", type=str, help="text prompt") + #parser.add_argument('--workspace', default="trial", type=str, help="workspace") + parser.add_argument('--model', default='vlt5', type=str, help="model choices - vlt5, bert, XLNet") + + opt = parser.parse_args() + + if opt.model == "vlt5": + tokenizer = AutoTokenizer.from_pretrained("Voicelab/vlt5-base-keywords") + model = AutoModelForSeq2SeqLM.from_pretrained("Voicelab/vlt5-base-keywords") + + task_prefix = "Keywords: " + inputs = [ + opt.text + ] + + for sample in inputs: + input_sequences = [task_prefix + sample] + input_ids = tokenizer( + input_sequences, return_tensors="pt", truncation=True + ).input_ids + output = model.generate(input_ids, no_repeat_ngram_size=3, num_beams=4) + output_text = tokenizer.decode(output[0], skip_special_tokens=True) + #print(sample, "\n --->", output_text) + + elif opt.model == "bert": + tokenizer = AutoTokenizer.from_pretrained("yanekyuk/bert-uncased-keyword-extractor") + model = AutoModelForTokenClassification.from_pretrained("yanekyuk/bert-uncased-keyword-extractor") + + text = opt.text + input_ids = tokenizer.encode(text, add_special_tokens=True, return_tensors="pt") + + # Classify tokens + outputs = model(input_ids) + predictions = outputs.logits.detach().numpy()[0] + labels = predictions.argmax(axis=1) + labels = labels[1:-1] + + print(labels) + tokens = tokenizer.convert_ids_to_tokens(input_ids[0]) + tokens = tokens[1:-1] + output_tokens = [tokens[i] for i in range(len(tokens)) if labels[i] != 0] + output_text = tokenizer.convert_tokens_to_string(output_tokens) + + #print(output_text) + + + elif opt.model == "XLNet": + tokenizer = AutoTokenizer.from_pretrained("jasminejwebb/KeywordIdentifier") + model = AutoModelForTokenClassification.from_pretrained("jasminejwebb/KeywordIdentifier") + + text = opt.text + input_ids = tokenizer.encode(text, add_special_tokens=True, return_tensors="pt") + + # Classify tokens + outputs = model(input_ids) + predictions = outputs.logits.detach().numpy()[0] + labels = predictions.argmax(axis=1) + labels = labels[1:-1] + + print(labels) + tokens = tokenizer.convert_ids_to_tokens(input_ids[0]) + tokens = tokens[1:-1] + output_tokens = [tokens[i] for i in range(len(tokens)) if labels[i] != 0] + output_text = tokenizer.convert_tokens_to_string(output_tokens) + + #print(output_text) + +wrapped_text = textwrap.fill(output_text, width=50) + + +print('+' + '-'*52 + '+') +for line in wrapped_text.split('\n'): + print('| {} |'.format(line.ljust(50))) +print('+' + '-'*52 + '+') +#print(result) diff --git a/stable-dreamfusion-3DPortrait/evaluation/mesh_to_video.py b/stable-dreamfusion-3DPortrait/evaluation/mesh_to_video.py new file mode 100644 index 0000000..5810320 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/evaluation/mesh_to_video.py @@ -0,0 +1,87 @@ +import os +import numpy as np +import trimesh +import argparse +from pathlib import Path +from tqdm import tqdm +import pyvista as pv + +def render_video(anim_mesh): + center = anim_mesh.center_mass + plotter = pv.Plotter(off_screen=True) + plotter.add_mesh(anim_mesh) + + radius = 10 + n_frames = 360 + angle_step = 2 * np.pi / n_frames + for i in tqdm(range(n_frames)): + camera_pos = [center[0] + radius * np.cos(i*angle_step),center[1] + radius *np.sin(i*angle_step),center[2]] + plotter.camera_position = (camera_pos, center, (0, 0, 1)) + plotter.show(screenshot=f'frame_{i}.png', auto_close=False) + plotter.close() + os.system('ffmpeg -r 30 -f image2 -s 1920x1080 -i "result/frame_%d.png" -vcodec libx264 -crf 25 -pix_fmt yuv420p result/output.mp4') + + + +def generate_mesh(obj1,obj2,transform_vector): + + # Read 2 objects + filename1 = obj1 # Central Object + filename2 = obj2 # Surrounding Object + mesh1 = trimesh.load_mesh(filename1) + mesh2 = trimesh.load_mesh(filename2) + + extents1 = mesh1.extents + extents2 = mesh1.extents + + radius1 = sum(extents1) / 3.0 + radius2 = sum(extents2) / 3.0 + + center1 = mesh1.center_mass + center2 = mesh2.center_mass + + # Move + T1 = -center1 + new =[] + for i in transform_vector: + try: + new.append(float(i))*radius1 + except: + pass + transform_vector = new + print(T1, transform_vector, radius1) + T2 = -center2 + transform_vector + + # Transform + mesh1.apply_translation(T1) + mesh2.apply_translation(T2) + + # merge mesh + merged_mesh = trimesh.util.concatenate((mesh1, mesh2)) + + # save mesh + merged_mesh.export('merged_mesh.obj') + print("----> merge mesh done") + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Generate rotating mesh animation.') + parser.add_argument('--center_obj', type=str, help='Input OBJ1 file.') + parser.add_argument('--surround_obj', type=str, help='Input OBJ2 file.') + parser.add_argument('--transform_vector', help='Transform_vector.') + parser.add_argument('--output_file', type=str, default="result/Demo.mp4", help='Output MP4 file.') + parser.add_argument('--num_frames', type=int, default=100, help='Number of frames to render.') + args = parser.parse_args() + + #mesh = obj.Obj("wr.obj") + generate_mesh(args.center_obj,args.surround_obj,args.transform_vector) + + input_file = Path("merged_mesh.obj") + output_file = Path(args.output_file) + + out_dir = output_file.parent.joinpath('frames') + out_dir.mkdir(parents=True, exist_ok=True) + + anim_mesh = trimesh.load_mesh(str(input_file)) + + render_video(anim_mesh) + diff --git a/stable-dreamfusion-3DPortrait/evaluation/r_precision.py b/stable-dreamfusion-3DPortrait/evaluation/r_precision.py new file mode 100644 index 0000000..d2177ed --- /dev/null +++ b/stable-dreamfusion-3DPortrait/evaluation/r_precision.py @@ -0,0 +1,30 @@ +from sentence_transformers import SentenceTransformer, util +from PIL import Image +import argparse +import sys + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--text', default="", type=str, help="text prompt") + parser.add_argument('--workspace', default="trial", type=str, help="text prompt") + parser.add_argument('--latest', default='ep0001', type=str, help="which epoch result you want to use for image path") + parser.add_argument('--mode', default='rgb', type=str, help="mode of result, color(rgb) or textureless()") + parser.add_argument('--clip', default="clip-ViT-B-32", type=str, help="CLIP model to encode the img and prompt") + + opt = parser.parse_args() + + #Load CLIP model + model = SentenceTransformer(f'{opt.clip}') + + #Encode an image: + img_emb = model.encode(Image.open(f'../results/{opt.workspace}/validation/df_{opt.latest}_0005_{opt.mode}.png')) + + #Encode text descriptions + text_emb = model.encode([f'{opt.text}']) + + #Compute cosine similarities + cos_scores = util.cos_sim(img_emb, text_emb) + print("The final CLIP R-Precision is:", cos_scores[0][0].cpu().numpy()) + diff --git a/stable-dreamfusion-3DPortrait/evaluation/readme.md b/stable-dreamfusion-3DPortrait/evaluation/readme.md new file mode 100644 index 0000000..b54557c --- /dev/null +++ b/stable-dreamfusion-3DPortrait/evaluation/readme.md @@ -0,0 +1,36 @@ +### Improvement: + +- Usage + + - r_precision.py
+ For prompt seperation
+ --text is for the prompt following the author of stable dream fusion
+ --workspace is the workspace folder which will be created for every prompt fed into stable dreamfusion
+ --latest is which ckpt is used. Stable dream fusion record every epoch data. Normally is ep0100 unless the training is not finished or we further extend the training
+ --mode has choices of rgb and depth which is correspondent to color and texture result as original paper Figure 5: Qualitative comparison with baselines.
+ --clip has choices of clip-ViT-B-32, CLIP B/16, CLIP L/14, same as original paper
+ + ```bash + python Prompt.py --text "matte painting of a castle made of cheesecake surrounded by a moat made of ice cream" --workspace ../castle --latest ep0100 --mode rgb --clip clip-ViT-B-32 + ``` + + - Prompt.py (model name case sensitive)
+ For prompt seperation

+ --text is for the prompt following the author of stable dream fusion
+ --model is for choose the pretrain models
+ + ```bash + python Prompt.py --text "a dog is in front of a rabbit" --model vlt5 + python Prompt.py --text "a dog is in front of a rabbit" --model bert + python Prompt.py --text "a dog is in front of a rabbit" --model XLNet + ``` + + + - mesh_to_video.py
+ --center_obj IS THE CENTER OBJECT
+ --surround_obj IS THE SURROUNDING OBJECT SUBJECT TO CHANGE
+ --transform_vector THE X Y Z 3d vector for transform
+ + ```bash + python mesh_to_video.py --center_obj 'mesh_whiterabbit/mesh.obj' --surround_obj 'mesh_snake/mesh.obj' --transform_vector [1,0,0] + ``` diff --git a/stable-dreamfusion-3DPortrait/fit_latent_trigrid.py b/stable-dreamfusion-3DPortrait/fit_latent_trigrid.py new file mode 100644 index 0000000..1a35273 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/fit_latent_trigrid.py @@ -0,0 +1,467 @@ +import os + +import torch +import argparse +import pandas as pd +import sys + +from nerf.provider import NeRFDataset +from nerf.trigrid_utils import * + +if __name__ == '__main__': + # See https://stackoverflow.com/questions/27433316/how-to-get-argparse-to-read-arguments-from-a-file-with-an-option-rather-than-pre + class LoadFromFile(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + with values as f: + # parse arguments in the file and store them in the target namespace + parser.parse_args(f.read().split(), namespace) + + + parser = argparse.ArgumentParser() + parser.add_argument('--file', type=open, action=LoadFromFile, help="specify a file filled with more arguments") + parser.add_argument('--text', default=None, help="text prompt") + parser.add_argument('--negative', default='', type=str, help="negative text prompt") + parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray") + parser.add_argument('-O2', action='store_true', help="equals --backbone vanilla") + parser.add_argument('--test', action='store_true', help="test mode") + parser.add_argument('--six_views', action='store_true', help="six_views mode: save the images of the six views") + parser.add_argument('--eval_interval', type=int, default=1, help="evaluate on the valid set every interval epochs") + parser.add_argument('--test_interval', type=int, default=5, help="test on the test set every interval epochs") + parser.add_argument('--workspace', type=str, default='workspace') + parser.add_argument('--seed', default=None) + + parser.add_argument('--image', default=None, help="image prompt") + parser.add_argument('--image_config', default=None, help="image config csv") + + parser.add_argument('--known_view_interval', type=int, default=4, + help="train default view with RGB loss every & iters, only valid if --image is not None.") + + parser.add_argument('--IF', action='store_true', + help="experimental: use DeepFloyd IF as the guidance model for nerf stage") + + parser.add_argument('--guidance', type=str, nargs='*', default=['SD'], help='guidance model') + parser.add_argument('--guidance_scale', type=float, default=100, + help="diffusion model classifier-free guidance scale") + + parser.add_argument('--save_mesh', action='store_true', help="export an obj mesh with texture") + parser.add_argument('--mcubes_resolution', type=int, default=256, help="mcubes resolution for extracting mesh") + parser.add_argument('--decimate_target', type=int, default=5e4, help="target face number for mesh decimation") + + parser.add_argument('--dmtet', action='store_true', help="use dmtet finetuning") + parser.add_argument('--tet_grid_size', type=int, default=128, help="tet grid size") + parser.add_argument('--init_with', type=str, default='', help="ckpt to init dmtet") + parser.add_argument('--lock_geo', action='store_true', help="disable dmtet to learn geometry") + + ## Perp-Neg options + parser.add_argument('--perpneg', action='store_true', help="use perp_neg") + parser.add_argument('--negative_w', type=float, default=-2, + help="The scale of the weights of negative prompts. A larger value will help to avoid the Janus problem, but may cause flat faces. Vary between 0 to -4, depending on the prompt") + parser.add_argument('--front_decay_factor', type=float, default=2, help="decay factor for the front prompt") + parser.add_argument('--side_decay_factor', type=float, default=10, help="decay factor for the side prompt") + + ## Trigrid options + parser.add_argument('--trigrid_path', type=str, default='', help="path to trigrid") + parser.add_argument('--trigrid_decoder_ckpt', type=str, default='', help="path to trigrid decoder ckpt") + parser.add_argument('--train_decoder', action='store_true', help="train trigrid decoder") + parser.add_argument('--learnable_bg', action='store_true', help="Learnable background") + parser.add_argument('--noise_bg', action='store_true', help="use noise background") + + ### training options + parser.add_argument('--iters', type=int, default=10000, help="training iters") + parser.add_argument('--lr', type=float, default=1e-3, help="max learning rate") + parser.add_argument('--ckpt', type=str, default='latest', + help="possible options are ['latest', 'scratch', 'best', 'latest_model']") + parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") + parser.add_argument('--taichi_ray', action='store_true', help="use taichi raymarching") + parser.add_argument('--max_steps', type=int, default=1024, + help="max num steps sampled per ray (only valid when using --cuda_ray)") + parser.add_argument('--num_steps', type=int, default=64, + help="num steps sampled per ray (only valid when not using --cuda_ray)") + parser.add_argument('--upsample_steps', type=int, default=32, + help="num steps up-sampled per ray (only valid when not using --cuda_ray)") + parser.add_argument('--update_extra_interval', type=int, default=16, + help="iter interval to update extra status (only valid when using --cuda_ray)") + parser.add_argument('--max_ray_batch', type=int, default=4096, + help="batch size of rays at inference to avoid OOM (only valid when not using --cuda_ray)") + parser.add_argument('--latent_iter_ratio', type=float, default=0.2, + help="training iters that only use albedo shading") + parser.add_argument('--albedo_iter_ratio', type=float, default=0, + help="training iters that only use albedo shading") + parser.add_argument('--min_ambient_ratio', type=float, default=0.1, + help="minimum ambient ratio to use in lambertian shading") + parser.add_argument('--textureless_ratio', type=float, default=0.2, help="ratio of textureless shading") + parser.add_argument('--jitter_pose', action='store_true', help="add jitters to the randomly sampled camera poses") + parser.add_argument('--jitter_center', type=float, default=0.2, + help="amount of jitter to add to sampled camera pose's center (camera location)") + parser.add_argument('--jitter_target', type=float, default=0.2, + help="amount of jitter to add to sampled camera pose's target (i.e. 'look-at')") + parser.add_argument('--jitter_up', type=float, default=0.02, + help="amount of jitter to add to sampled camera pose's up-axis (i.e. 'camera roll')") + parser.add_argument('--uniform_sphere_rate', type=float, default=0, + help="likelihood of sampling camera location uniformly on the sphere surface area") + parser.add_argument('--grad_clip', type=float, default=-1, + help="clip grad of all grad to this limit, negative value disables it") + parser.add_argument('--grad_clip_rgb', type=float, default=-1, + help="clip grad of rgb space grad to this limit, negative value disables it") + # model options + parser.add_argument('--bg_radius', type=float, default=3.0, + help="if positive, use a background model at sphere(bg_radius)") + parser.add_argument('--density_activation', type=str, default='exp', choices=['softplus', 'exp'], + help="density activation function") + parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied") + parser.add_argument('--blob_density', type=float, default=5, help="max (center) density for the density blob") + parser.add_argument('--blob_radius', type=float, default=0.2, help="control the radius for the density blob") + # network backbone + parser.add_argument('--backbone', type=str, default='trigrid', choices=['trigrid'], help="nerf backbone") + parser.add_argument('--optim', type=str, default='adan', choices=['adan', 'adam'], help="optimizer") + parser.add_argument('--sd_version', type=str, default='2.1', choices=['1.5', '2.0', '2.1'], + help="stable diffusion version") + parser.add_argument('--hf_key', type=str, default=None, help="hugging face Stable diffusion model key") + # try this if CUDA OOM + parser.add_argument('--fp16', action='store_true', help="use float16 for training") + parser.add_argument('--vram_O', action='store_true', help="optimization for low VRAM usage") + # rendering resolution in training, increase these for better quality / decrease these if CUDA OOM even if --vram_O enabled. + parser.add_argument('--w', type=int, default=64, help="render width for NeRF in training") + parser.add_argument('--h', type=int, default=64, help="render height for NeRF in training") + parser.add_argument('--known_view_scale', type=float, default=1.5, + help="multiply --h/w by this for known view rendering") + parser.add_argument('--known_view_noise_scale', type=float, default=2e-3, + help="random camera noise added to rays_o and rays_d") + parser.add_argument('--dmtet_reso_scale', type=float, default=8, help="multiply --h/w by this for dmtet finetuning") + parser.add_argument('--batch_size', type=int, default=1, help="images to render per batch using NeRF") + + ### dataset options + parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box(-bound, bound)") + parser.add_argument('--dt_gamma', type=float, default=0, + help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") + parser.add_argument('--min_near', type=float, default=0.01, help="minimum near distance for camera") + + parser.add_argument('--radius_range', type=float, nargs='*', default=[2.7, 2.71], + help="training camera radius range") + parser.add_argument('--theta_range', type=float, nargs='*', default=[60, 105], + help="training camera range along the polar angles (i.e. up and down). See advanced.md for details.") + parser.add_argument('--phi_range', type=float, nargs='*', default=[-180, 180], + help="training camera range along the azimuth angles (i.e. left and right). See advanced.md for details.") + parser.add_argument('--fovy_range', type=float, nargs='*', default=[11.5, 21], help="training camera fovy range") + + parser.add_argument('--default_radius', type=float, default=2.7, help="radius for the default view") + parser.add_argument('--default_polar', type=float, default=90, help="polar for the default view") + parser.add_argument('--default_azimuth', type=float, default=0, help="azimuth for the default view") + parser.add_argument('--default_fovy', type=float, default=12., help="fovy for the default view") + + parser.add_argument('--progressive_view', action='store_true', + help="progressively expand view sampling range from default to full") + parser.add_argument('--progressive_view_init_ratio', type=float, default=0.2, + help="initial ratio of final range, used for progressive_view") + + parser.add_argument('--progressive_level', action='store_true', + help="progressively increase gridencoder's max_level") + + parser.add_argument('--angle_overhead', type=float, default=30, help="[0, angle_overhead] is the overhead region") + parser.add_argument('--angle_front', type=float, default=60, + help="[0, angle_front] is the front region, [180, 180+angle_front] the back region, otherwise the side region.") + parser.add_argument('--t_range', type=float, nargs='+', default=[0.02, 0.98], + help="stable diffusion time steps range") + parser.add_argument('--dont_override_stuff', action='store_true', help="Don't override t_range, etc.") + + ### regularizations + parser.add_argument('--lambda_entropy', type=float, default=1e-3, help="loss scale for alpha entropy") + parser.add_argument('--lambda_opacity', type=float, default=0, help="loss scale for alpha value") + parser.add_argument('--lambda_orient', type=float, default=1e-2, help="loss scale for orientation") + parser.add_argument('--lambda_tv', type=float, default=0, help="loss scale for total variation") + parser.add_argument('--lambda_wd', type=float, default=0, help="loss scale") + + parser.add_argument('--lambda_mesh_normal', type=float, default=0.5, help="loss scale for mesh normal smoothness") + parser.add_argument('--lambda_mesh_laplacian', type=float, default=0.5, help="loss scale for mesh laplacian") + + parser.add_argument('--lambda_guidance', type=float, default=1, help="loss scale for SDS") + parser.add_argument('--lambda_rgb', type=float, default=1000, help="loss scale for RGB") + parser.add_argument('--lambda_mask', type=float, default=500, help="loss scale for mask (alpha)") + parser.add_argument('--lambda_normal', type=float, default=0, help="loss scale for normal map") + parser.add_argument('--lambda_depth', type=float, default=10, help="loss scale for relative depth") + parser.add_argument('--lambda_2d_normal_smooth', type=float, default=0, + help="loss scale for 2D normal image smoothness") + parser.add_argument('--lambda_3d_normal_smooth', type=float, default=0, + help="loss scale for 3D normal image smoothness") + + ### debugging options + parser.add_argument('--save_guidance', action='store_true', + help="save images of the per-iteration NeRF renders, added noise, denoised (i.e. guidance), fully-denoised. Useful for debugging, but VERY SLOW and takes lots of memory!") + parser.add_argument('--save_guidance_interval', type=int, default=10, help="save guidance every X step") + + ### GUI options + parser.add_argument('--gui', action='store_true', help="start a GUI") + parser.add_argument('--W', type=int, default=800, help="GUI width") + parser.add_argument('--H', type=int, default=800, help="GUI height") + parser.add_argument('--radius', type=float, default=5, help="default GUI camera radius from center") + parser.add_argument('--fovy', type=float, default=20, help="default GUI camera fovy") + parser.add_argument('--light_theta', type=float, default=60, + help="default GUI light direction in [0, 180], corresponding to elevation [90, -90]") + parser.add_argument('--light_phi', type=float, default=0, help="default GUI light direction in [0, 360), azimuth") + parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") + + parser.add_argument('--zero123_config', type=str, + default='./pretrained/zero123/sd-objaverse-finetune-c_concat-256.yaml', + help="config file for zero123") + parser.add_argument('--zero123_ckpt', type=str, default='pretrained/zero123/zero123-xl.ckpt', + help="ckpt for zero123") + parser.add_argument('--zero123_grad_scale', type=str, default='angle', + help="whether to scale the gradients based on 'angle' or 'None'") + + parser.add_argument('--dataset_size_train', type=int, default=100, + help="Length of train dataset i.e. # of iterations per epoch") + parser.add_argument('--dataset_size_valid', type=int, default=8, + help="# of frames to render in the turntable video in validation") + parser.add_argument('--dataset_size_test', type=int, default=100, + help="# of frames to render in the turntable video at test time") + + parser.add_argument('--exp_start_iter', type=int, default=None, + help="start iter # for experiment, to calculate progressive_view and progressive_level") + parser.add_argument('--exp_end_iter', type=int, default=None, + help="end iter # for experiment, to calculate progressive_view and progressive_level") + + opt = parser.parse_args() + if opt.O: + raise NotImplementedError + opt.fp16 = True + opt.cuda_ray = True + + elif opt.O2: + raise NotImplementedError + opt.fp16 = True + opt.backbone = 'vanilla' + opt.progressive_level = True + + if opt.IF: + if 'SD' in opt.guidance: + opt.guidance.remove('SD') + opt.guidance.append('IF') + opt.latent_iter_ratio = 0 # must not do as_latent + + opt.images, opt.ref_radii, opt.ref_polars, opt.ref_azimuths, opt.zero123_ws = [], [], [], [], [] + opt.default_zero123_w = 1 + + opt.exp_start_iter = opt.exp_start_iter or 0 + opt.exp_end_iter = opt.exp_end_iter or opt.iters + + # parameters for image-conditioned generation + if opt.image is not None or opt.image_config is not None: + + if opt.text is None: + # use zero123 guidance model when only providing image + opt.guidance = ['zero123'] + if not opt.dont_override_stuff: + opt.fovy_range = [opt.default_fovy, opt.default_fovy] # fix fov as zero123 doesn't support changing fov + opt.guidance_scale = 5 + opt.lambda_3d_normal_smooth = 10 + else: + # use stable-diffusion when providing both text and image + opt.guidance = ['SD', 'clip'] + + if not opt.dont_override_stuff: + opt.guidance_scale = 10 + opt.t_range = [0.2, 0.6] + opt.known_view_interval = 2 + opt.lambda_3d_normal_smooth = 20 + opt.bg_radius = -1 + + # smoothness + opt.lambda_entropy = 1 + opt.lambda_orient = 1 + + # latent warmup is not needed + opt.latent_iter_ratio = 0 + if not opt.dont_override_stuff: + opt.albedo_iter_ratio = 0 + + # make shape init more stable + opt.progressive_view = True + opt.progressive_level = True + + if opt.image is not None: + opt.images += [opt.image] + opt.ref_radii += [opt.default_radius] + opt.ref_polars += [opt.default_polar] + opt.ref_azimuths += [opt.default_azimuth] + opt.zero123_ws += [opt.default_zero123_w] + + if opt.image_config is not None: + # for multiview (zero123) + conf = pd.read_csv(opt.image_config, skipinitialspace=True) + opt.images += list(conf.image) + opt.ref_radii += list(conf.radius) + opt.ref_polars += list(conf.polar) + opt.ref_azimuths += list(conf.azimuth) + opt.zero123_ws += list(conf.zero123_weight) + if opt.image is None: + opt.default_radius = opt.ref_radii[0] + opt.default_polar = opt.ref_polars[0] + opt.default_azimuth = opt.ref_azimuths[0] + opt.default_zero123_w = opt.zero123_ws[0] + + # reset to None + if len(opt.images) == 0: + opt.images = None + + if opt.learnable_bg: + assert opt.bg_radius > max( + opt.radius_range), f"bg_radius must be larger than max(radius_range) = {max(opt.radius_range)}" + assert opt.noise_bg is False + + if opt.noise_bg: + assert opt.learnable_bg is False + + assert opt.latent_iter_ratio == 1.0, "latent_iter_ratio must be 1.0 for now" + # default parameters for finetuning + if opt.dmtet: + + opt.h = int(opt.h * opt.dmtet_reso_scale) + opt.w = int(opt.w * opt.dmtet_reso_scale) + opt.known_view_scale = 1 + + if not opt.dont_override_stuff: + opt.t_range = [0.02, 0.50] # ref: magic3D + + if opt.images is not None: + + opt.lambda_normal = 0 + opt.lambda_depth = 0 + + if opt.text is not None and not opt.dont_override_stuff: + opt.t_range = [0.20, 0.50] + + # assume finetuning + opt.latent_iter_ratio = 0 + opt.albedo_iter_ratio = 0 + opt.progressive_view = False + # opt.progressive_level = False + os.makedirs(opt.workspace, exist_ok=True) + # record full range for progressive view expansion + if opt.progressive_view: + if not opt.dont_override_stuff: + # disable as they disturb progressive view + opt.jitter_pose = False + + opt.uniform_sphere_rate = 0 + # back up full range + opt.full_radius_range = opt.radius_range + opt.full_theta_range = opt.theta_range + opt.full_phi_range = opt.phi_range + opt.full_fovy_range = opt.fovy_range + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + from nerf.network_trigrid import NeRFNetwork + from nerf.network_trigrid_latent import LatentNeRFNetwork + + model = LatentNeRFNetwork( + opt=opt, + device=device + ) + teacher_model = NeRFNetwork( + opt=opt, + device=device + ) + # load + print(f'loading trigrid_renderer from {opt.trigrid_decoder_ckpt}...') + ckpt = torch.load(opt.trigrid_decoder_ckpt, map_location=lambda storage, loc: storage) + # ckpt = {'params': params, 'state_dict': ckpt} + state_dict = ckpt['state_dict'] + state_dict_wo_torgb = {} + for k, v in state_dict.items(): + if 'torgb' not in k: + state_dict_wo_torgb[k] = v + + model.model.load_state_dict(state_dict_wo_torgb, strict=False) + teacher_model.model.load_state_dict(state_dict) + # + model.model.rendering_kwargs['depth_resolution'] = 48 + model.model.rendering_kwargs['depth_resolution_importance'] = 48 + model.model.rendering_kwargs['ray_start'] = 2.35 + + teacher_model.model.rendering_kwargs['depth_resolution'] = 48 + teacher_model.model.rendering_kwargs['depth_resolution_importance'] = 48 + teacher_model.model.rendering_kwargs['ray_start'] = 2.35 + # + # load plane from pkl + print(f'loading trigrid from {opt.trigrid_path}...') + import pickle + + with open(opt.trigrid_path, 'rb') as f: + data = pickle.load(f) + trigrid = data['trigrids'] + ws = data['ws'] + model.load_state_dict( + { + 'trigrid': trigrid, + 'ws': ws, + }, strict=False + ) + teacher_model.load_state_dict( + { + 'trigrid': trigrid, + 'ws': ws, + }, strict=False + ) + # print(f'loading encoder from {opt.encoder_ckpt}...') + # encoder_ckpt = torch.load('./pretrained/encoder_sd1.5.pt', map_location=lambda storage, loc: storage) + # model.latent_net.load_state_dict(encoder_ckpt, strict=False) + + print('save trigrid to workspace...') + shutil.copy(opt.trigrid_path, os.path.join(opt.workspace, 'trigrid.pkl')) + + train_loader = NeRFDataset(opt, device=device, type='train', H=opt.h, W=opt.w, + size=opt.dataset_size_train * opt.batch_size, teacher_H=opt.h * 8, + teacher_W=opt.w * 8).dataloader() + + if opt.optim == 'adan': + from optimizer import Adan + + # Adan usually requires a larger LR + optimizer = lambda model: Adan(model.get_params(5 * opt.lr), eps=1e-8, + weight_decay=2e-5, max_grad_norm=5.0, + foreach=False) + else: # adam + optimizer = lambda model: torch.optim.Adam(model.get_params(opt.lr), + betas=(0.9, 0.99), eps=1e-15) + + if opt.backbone == 'vanilla': + scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, + lambda iter: 0.1 ** min(iter / opt.iters, 1)) + else: + scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 1) # fixed + # scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1)) + + guidance = nn.ModuleDict() + from guidance.sd_utils import StableDiffusion + + guidance['SD'] = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key, opt.t_range) + + trainer = TrigridTrainer(' '.join(sys.argv), 'trigrid', opt, model, teacher_model, guidance, device=device, + workspace=opt.workspace, + optimizer=optimizer, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, + use_checkpoint=opt.ckpt, scheduler_update_every_step=True) + + trainer.default_view_data = train_loader._data.get_default_view_data() + + valid_loader = NeRFDataset(opt, device=device, type='val', H=opt.H, W=opt.W, + size=opt.dataset_size_valid, teacher_H=opt.h * 8, teacher_W=opt.w * 8).dataloader( + batch_size=1) + test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test, + teacher_H=opt.h * 8, teacher_W=opt.w * 8).dataloader( + batch_size=1) + + # # test output + # trainer.test(test_loader, save_path=os.path.join(opt.workspace, 'latent_trigrid_fit_initiation')) + + # TO BE DEBUGGED + + max_epoch = np.ceil(opt.iters / len(train_loader)).astype(np.int32) + trainer.train(train_loader, valid_loader, test_loader, max_epoch) + + + + + + + diff --git a/stable-dreamfusion-3DPortrait/freqencoder/__init__.py b/stable-dreamfusion-3DPortrait/freqencoder/__init__.py new file mode 100644 index 0000000..69ec49c --- /dev/null +++ b/stable-dreamfusion-3DPortrait/freqencoder/__init__.py @@ -0,0 +1 @@ +from .freq import FreqEncoder \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/freqencoder/backend.py b/stable-dreamfusion-3DPortrait/freqencoder/backend.py new file mode 100644 index 0000000..fa0e820 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/freqencoder/backend.py @@ -0,0 +1,42 @@ +import os +from torch.utils.cpp_extension import load + +_src_path = os.path.dirname(os.path.abspath(__file__)) + +nvcc_flags = [ + '-O3', '-std=c++14', + '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', + '-use_fast_math' +] + +if os.name == "posix": + c_flags = ['-O3', '-std=c++14'] +elif os.name == "nt": + c_flags = ['/O2', '/std:c++17'] + + # find cl.exe + def find_cl_path(): + import glob + for program_files in [r"C:\\Program Files (x86)", r"C:\\Program Files"]: + for edition in ["Enterprise", "Professional", "BuildTools", "Community"]: + paths = sorted(glob.glob(r"%s\\Microsoft Visual Studio\\*\\%s\\VC\\Tools\\MSVC\\*\\bin\\Hostx64\\x64" % (program_files, edition)), reverse=True) + if paths: + return paths[0] + + # If cl.exe is not on path, try to find it. + if os.system("where cl.exe >nul 2>nul") != 0: + cl_path = find_cl_path() + if cl_path is None: + raise RuntimeError("Could not locate a supported Microsoft Visual C++ installation") + os.environ["PATH"] += ";" + cl_path + +_backend = load(name='_freqencoder', + extra_cflags=c_flags, + extra_cuda_cflags=nvcc_flags, + sources=[os.path.join(_src_path, 'src', f) for f in [ + 'freqencoder.cu', + 'bindings.cpp', + ]], + ) + +__all__ = ['_backend'] \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/freqencoder/freq.py b/stable-dreamfusion-3DPortrait/freqencoder/freq.py new file mode 100644 index 0000000..5cba1e6 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/freqencoder/freq.py @@ -0,0 +1,77 @@ +import numpy as np + +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.cuda.amp import custom_bwd, custom_fwd + +try: + import _freqencoder as _backend +except ImportError: + from .backend import _backend + + +class _freq_encoder(Function): + @staticmethod + @custom_fwd(cast_inputs=torch.float32) # force float32 for better precision + def forward(ctx, inputs, degree, output_dim): + # inputs: [B, input_dim], float + # RETURN: [B, F], float + + if not inputs.is_cuda: inputs = inputs.cuda() + inputs = inputs.contiguous() + + B, input_dim = inputs.shape # batch size, coord dim + + outputs = torch.empty(B, output_dim, dtype=inputs.dtype, device=inputs.device) + + _backend.freq_encode_forward(inputs, B, input_dim, degree, output_dim, outputs) + + ctx.save_for_backward(inputs, outputs) + ctx.dims = [B, input_dim, degree, output_dim] + + return outputs + + @staticmethod + #@once_differentiable + @custom_bwd + def backward(ctx, grad): + # grad: [B, C * C] + + grad = grad.contiguous() + inputs, outputs = ctx.saved_tensors + B, input_dim, degree, output_dim = ctx.dims + + grad_inputs = torch.zeros_like(inputs) + _backend.freq_encode_backward(grad, outputs, B, input_dim, degree, output_dim, grad_inputs) + + return grad_inputs, None, None + + +freq_encode = _freq_encoder.apply + + +class FreqEncoder(nn.Module): + def __init__(self, input_dim=3, degree=4): + super().__init__() + + self.input_dim = input_dim + self.degree = degree + self.output_dim = input_dim + input_dim * 2 * degree + + def __repr__(self): + return f"FreqEncoder: input_dim={self.input_dim} degree={self.degree} output_dim={self.output_dim}" + + def forward(self, inputs, **kwargs): + # inputs: [..., input_dim] + # return: [..., ] + + prefix_shape = list(inputs.shape[:-1]) + inputs = inputs.reshape(-1, self.input_dim) + + outputs = freq_encode(inputs, self.degree, self.output_dim) + + outputs = outputs.reshape(prefix_shape + [self.output_dim]) + + return outputs \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/freqencoder/setup.py b/stable-dreamfusion-3DPortrait/freqencoder/setup.py new file mode 100644 index 0000000..ea64112 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/freqencoder/setup.py @@ -0,0 +1,52 @@ +import os +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +_src_path = os.path.dirname(os.path.abspath(__file__)) + +nvcc_flags = [ + '-O3', '-std=c++14', + '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', + '-use_fast_math' +] + +if os.name == "posix": + c_flags = ['-O3', '-std=c++14'] +elif os.name == "nt": + c_flags = ['/O2', '/std:c++17'] + + # find cl.exe + def find_cl_path(): + import glob + for program_files in [r"C:\\Program Files (x86)", r"C:\\Program Files"]: + for edition in ["Enterprise", "Professional", "BuildTools", "Community"]: + paths = sorted(glob.glob(r"%s\\Microsoft Visual Studio\\*\\%s\\VC\\Tools\\MSVC\\*\\bin\\Hostx64\\x64" % (program_files, edition)), reverse=True) + if paths: + return paths[0] + + # If cl.exe is not on path, try to find it. + if os.system("where cl.exe >nul 2>nul") != 0: + cl_path = find_cl_path() + if cl_path is None: + raise RuntimeError("Could not locate a supported Microsoft Visual C++ installation") + os.environ["PATH"] += ";" + cl_path + +setup( + name='freqencoder', # package name, import this to use python API + ext_modules=[ + CUDAExtension( + name='_freqencoder', # extension name, import this to use CUDA API + sources=[os.path.join(_src_path, 'src', f) for f in [ + 'freqencoder.cu', + 'bindings.cpp', + ]], + extra_compile_args={ + 'cxx': c_flags, + 'nvcc': nvcc_flags, + } + ), + ], + cmdclass={ + 'build_ext': BuildExtension, + } +) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/freqencoder/src/bindings.cpp b/stable-dreamfusion-3DPortrait/freqencoder/src/bindings.cpp new file mode 100644 index 0000000..bb5f285 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/freqencoder/src/bindings.cpp @@ -0,0 +1,8 @@ +#include + +#include "freqencoder.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("freq_encode_forward", &freq_encode_forward, "freq encode forward (CUDA)"); + m.def("freq_encode_backward", &freq_encode_backward, "freq encode backward (CUDA)"); +} \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/freqencoder/src/freqencoder.cu b/stable-dreamfusion-3DPortrait/freqencoder/src/freqencoder.cu new file mode 100644 index 0000000..072da74 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/freqencoder/src/freqencoder.cu @@ -0,0 +1,129 @@ +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include + + +#define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be a contiguous tensor") +#define CHECK_IS_INT(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, #x " must be an int tensor") +#define CHECK_IS_FLOATING(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Float || x.scalar_type() == at::ScalarType::Half || x.scalar_type() == at::ScalarType::Double, #x " must be a floating tensor") + +inline constexpr __device__ float PI() { return 3.141592653589793f; } + +template +__host__ __device__ T div_round_up(T val, T divisor) { + return (val + divisor - 1) / divisor; +} + +// inputs: [B, D] +// outputs: [B, C], C = D + D * deg * 2 +__global__ void kernel_freq( + const float * __restrict__ inputs, + uint32_t B, uint32_t D, uint32_t deg, uint32_t C, + float * outputs +) { + // parallel on per-element + const uint32_t t = threadIdx.x + blockIdx.x * blockDim.x; + if (t >= B * C) return; + + // get index + const uint32_t b = t / C; + const uint32_t c = t - b * C; // t % C; + + // locate + inputs += b * D; + outputs += t; + + // write self + if (c < D) { + outputs[0] = inputs[c]; + // write freq + } else { + const uint32_t col = c / D - 1; + const uint32_t d = c % D; + const uint32_t freq = col / 2; + const float phase_shift = (col % 2) * (PI() / 2); + outputs[0] = __sinf(scalbnf(inputs[d], freq) + phase_shift); + } +} + +// grad: [B, C], C = D + D * deg * 2 +// outputs: [B, C] +// grad_inputs: [B, D] +__global__ void kernel_freq_backward( + const float * __restrict__ grad, + const float * __restrict__ outputs, + uint32_t B, uint32_t D, uint32_t deg, uint32_t C, + float * grad_inputs +) { + // parallel on per-element + const uint32_t t = threadIdx.x + blockIdx.x * blockDim.x; + if (t >= B * D) return; + + const uint32_t b = t / D; + const uint32_t d = t - b * D; // t % D; + + // locate + grad += b * C; + outputs += b * C; + grad_inputs += t; + + // register + float result = grad[d]; + grad += D; + outputs += D; + + for (uint32_t f = 0; f < deg; f++) { + result += scalbnf(1.0f, f) * (grad[d] * outputs[D + d] - grad[D + d] * outputs[d]); + grad += 2 * D; + outputs += 2 * D; + } + + // write + grad_inputs[0] = result; +} + + +void freq_encode_forward(at::Tensor inputs, const uint32_t B, const uint32_t D, const uint32_t deg, const uint32_t C, at::Tensor outputs) { + CHECK_CUDA(inputs); + CHECK_CUDA(outputs); + + CHECK_CONTIGUOUS(inputs); + CHECK_CONTIGUOUS(outputs); + + CHECK_IS_FLOATING(inputs); + CHECK_IS_FLOATING(outputs); + + static constexpr uint32_t N_THREADS = 128; + + kernel_freq<<>>(inputs.data_ptr(), B, D, deg, C, outputs.data_ptr()); +} + + +void freq_encode_backward(at::Tensor grad, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t deg, const uint32_t C, at::Tensor grad_inputs) { + CHECK_CUDA(grad); + CHECK_CUDA(outputs); + CHECK_CUDA(grad_inputs); + + CHECK_CONTIGUOUS(grad); + CHECK_CONTIGUOUS(outputs); + CHECK_CONTIGUOUS(grad_inputs); + + CHECK_IS_FLOATING(grad); + CHECK_IS_FLOATING(outputs); + CHECK_IS_FLOATING(grad_inputs); + + static constexpr uint32_t N_THREADS = 128; + + kernel_freq_backward<<>>(grad.data_ptr(), outputs.data_ptr(), B, D, deg, C, grad_inputs.data_ptr()); +} \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/freqencoder/src/freqencoder.h b/stable-dreamfusion-3DPortrait/freqencoder/src/freqencoder.h new file mode 100644 index 0000000..34f28c7 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/freqencoder/src/freqencoder.h @@ -0,0 +1,10 @@ +# pragma once + +#include +#include + +// _backend.freq_encode_forward(inputs, B, input_dim, degree, output_dim, outputs) +void freq_encode_forward(at::Tensor inputs, const uint32_t B, const uint32_t D, const uint32_t deg, const uint32_t C, at::Tensor outputs); + +// _backend.freq_encode_backward(grad, outputs, B, input_dim, degree, output_dim, grad_inputs) +void freq_encode_backward(at::Tensor grad, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t deg, const uint32_t C, at::Tensor grad_inputs); \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/gridencoder/__init__.py b/stable-dreamfusion-3DPortrait/gridencoder/__init__.py new file mode 100644 index 0000000..f1476ce --- /dev/null +++ b/stable-dreamfusion-3DPortrait/gridencoder/__init__.py @@ -0,0 +1 @@ +from .grid import GridEncoder \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/gridencoder/backend.py b/stable-dreamfusion-3DPortrait/gridencoder/backend.py new file mode 100644 index 0000000..b403f34 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/gridencoder/backend.py @@ -0,0 +1,40 @@ +import os +from torch.utils.cpp_extension import load + +_src_path = os.path.dirname(os.path.abspath(__file__)) + +nvcc_flags = [ + '-O3', '-std=c++14', + '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', +] + +if os.name == "posix": + c_flags = ['-O3', '-std=c++14'] +elif os.name == "nt": + c_flags = ['/O2', '/std:c++17'] + + # find cl.exe + def find_cl_path(): + import glob + for program_files in [r"C:\\Program Files (x86)", r"C:\\Program Files"]: + for edition in ["Enterprise", "Professional", "BuildTools", "Community"]: + paths = sorted(glob.glob(r"%s\\Microsoft Visual Studio\\*\\%s\\VC\\Tools\\MSVC\\*\\bin\\Hostx64\\x64" % (program_files, edition)), reverse=True) + if paths: + return paths[0] + # If cl.exe is not on path, try to find it. + if os.system("where cl.exe >nul 2>nul") != 0: + cl_path = find_cl_path() + if cl_path is None: + raise RuntimeError("Could not locate a supported Microsoft Visual C++ installation") + os.environ["PATH"] += ";" + cl_path + +_backend = load(name='_grid_encoder', + extra_cflags=c_flags, + extra_cuda_cflags=nvcc_flags, + sources=[os.path.join(_src_path, 'src', f) for f in [ + 'gridencoder.cu', + 'bindings.cpp', + ]], + ) + +__all__ = ['_backend'] \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/gridencoder/grid.py b/stable-dreamfusion-3DPortrait/gridencoder/grid.py new file mode 100644 index 0000000..3f91daf --- /dev/null +++ b/stable-dreamfusion-3DPortrait/gridencoder/grid.py @@ -0,0 +1,206 @@ +import math +import numpy as np + +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.cuda.amp import custom_bwd, custom_fwd + +try: + import _gridencoder as _backend +except ImportError: + from .backend import _backend + +_gridtype_to_id = { + 'hash': 0, + 'tiled': 1, +} + +_interp_to_id = { + 'linear': 0, + 'smoothstep': 1, +} + +class _grid_encode(Function): + @staticmethod + @custom_fwd + def forward(ctx, inputs, embeddings, offsets, per_level_scale, base_resolution, calc_grad_inputs=False, gridtype=0, align_corners=False, interpolation=0, max_level=None): + # inputs: [B, D], float in [0, 1] + # embeddings: [sO, C], float + # offsets: [L + 1], int + # RETURN: [B, F], float + + inputs = inputs.contiguous() + + B, D = inputs.shape # batch size, coord dim + L = offsets.shape[0] - 1 # level + C = embeddings.shape[1] # embedding dim for each level + S = np.log2(per_level_scale) # resolution multiplier at each level, apply log2 for later CUDA exp2f + H = base_resolution # base resolution + + max_level = L if max_level is None else max(min(int(math.ceil(max_level * L)), L), 1) + + # manually handle autocast (only use half precision embeddings, inputs must be float for enough precision) + # if C % 2 != 0, force float, since half for atomicAdd is very slow. + if torch.is_autocast_enabled() and C % 2 == 0: + embeddings = embeddings.to(torch.half) + + # L first, optimize cache for cuda kernel, but needs an extra permute later + outputs = torch.empty(L, B, C, device=inputs.device, dtype=embeddings.dtype) + + # zero init if we only calculate partial levels + if max_level < L: outputs.zero_() + + if calc_grad_inputs: + dy_dx = torch.empty(B, L * D * C, device=inputs.device, dtype=embeddings.dtype) + if max_level < L: dy_dx.zero_() + else: + dy_dx = None + + _backend.grid_encode_forward(inputs, embeddings, offsets, outputs, B, D, C, L, max_level, S, H, dy_dx, gridtype, align_corners, interpolation) + + # permute back to [B, L * C] + outputs = outputs.permute(1, 0, 2).reshape(B, L * C) + + ctx.save_for_backward(inputs, embeddings, offsets, dy_dx) + ctx.dims = [B, D, C, L, S, H, gridtype, interpolation, max_level] + ctx.align_corners = align_corners + + return outputs + + @staticmethod + #@once_differentiable + @custom_bwd + def backward(ctx, grad): + + inputs, embeddings, offsets, dy_dx = ctx.saved_tensors + B, D, C, L, S, H, gridtype, interpolation, max_level = ctx.dims + align_corners = ctx.align_corners + + # grad: [B, L * C] --> [L, B, C] + grad = grad.view(B, L, C).permute(1, 0, 2).contiguous() + + grad_embeddings = torch.zeros_like(embeddings) + + if dy_dx is not None: + grad_inputs = torch.zeros_like(inputs, dtype=embeddings.dtype) + else: + grad_inputs = None + + _backend.grid_encode_backward(grad, inputs, embeddings, offsets, grad_embeddings, B, D, C, L, max_level, S, H, dy_dx, grad_inputs, gridtype, align_corners, interpolation) + + if dy_dx is not None: + grad_inputs = grad_inputs.to(inputs.dtype) + + return grad_inputs, grad_embeddings, None, None, None, None, None, None, None, None + + + +grid_encode = _grid_encode.apply + + +class GridEncoder(nn.Module): + def __init__(self, input_dim=3, num_levels=16, level_dim=2, per_level_scale=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=None, gridtype='hash', align_corners=False, interpolation='linear'): + super().__init__() + + # the finest resolution desired at the last level, if provided, overridee per_level_scale + if desired_resolution is not None: + per_level_scale = np.exp2(np.log2(desired_resolution / base_resolution) / (num_levels - 1)) + + self.input_dim = input_dim # coord dims, 2 or 3 + self.num_levels = num_levels # num levels, each level multiply resolution by 2 + self.level_dim = level_dim # encode channels per level + self.per_level_scale = per_level_scale # multiply resolution by this scale at each level. + self.log2_hashmap_size = log2_hashmap_size + self.base_resolution = base_resolution + self.output_dim = num_levels * level_dim + self.gridtype = gridtype + self.gridtype_id = _gridtype_to_id[gridtype] # "tiled" or "hash" + self.interpolation = interpolation + self.interp_id = _interp_to_id[interpolation] # "linear" or "smoothstep" + self.align_corners = align_corners + + # allocate parameters + offsets = [] + offset = 0 + self.max_params = 2 ** log2_hashmap_size + for i in range(num_levels): + resolution = int(np.ceil(base_resolution * per_level_scale ** i)) + params_in_level = min(self.max_params, (resolution) ** input_dim) # limit max number + params_in_level = int(np.ceil(params_in_level / 8) * 8) # make divisible + offsets.append(offset) + offset += params_in_level + offsets.append(offset) + offsets = torch.from_numpy(np.array(offsets, dtype=np.int32)) + self.register_buffer('offsets', offsets) + + self.n_params = offsets[-1] * level_dim + + # parameters + self.embeddings = nn.Parameter(torch.empty(offset, level_dim)) + + self.reset_parameters() + + def reset_parameters(self): + std = 1e-4 + self.embeddings.data.uniform_(-std, std) + + def __repr__(self): + return f"GridEncoder: input_dim={self.input_dim} num_levels={self.num_levels} level_dim={self.level_dim} resolution={self.base_resolution} -> {int(round(self.base_resolution * self.per_level_scale ** (self.num_levels - 1)))} per_level_scale={self.per_level_scale:.4f} params={tuple(self.embeddings.shape)} gridtype={self.gridtype} align_corners={self.align_corners} interpolation={self.interpolation}" + + def forward(self, inputs, bound=1, max_level=None): + # inputs: [..., input_dim], normalized real world positions in [-bound, bound] + # max_level: only calculate first max_level levels (None will use all levels) + # return: [..., num_levels * level_dim] + + inputs = (inputs + bound) / (2 * bound) # map to [0, 1] + + #print('inputs', inputs.shape, inputs.dtype, inputs.min().item(), inputs.max().item()) + + prefix_shape = list(inputs.shape[:-1]) + inputs = inputs.view(-1, self.input_dim) + + outputs = grid_encode(inputs, self.embeddings, self.offsets, self.per_level_scale, self.base_resolution, inputs.requires_grad, self.gridtype_id, self.align_corners, self.interp_id, max_level) + outputs = outputs.view(prefix_shape + [self.output_dim]) + + #print('outputs', outputs.shape, outputs.dtype, outputs.min().item(), outputs.max().item()) + + return outputs + + # always run in float precision! + @torch.cuda.amp.autocast(enabled=False) + def grad_total_variation(self, weight=1e-7, inputs=None, bound=1, B=1000000): + # inputs: [..., input_dim], float in [-b, b], location to calculate TV loss. + + D = self.input_dim + C = self.embeddings.shape[1] # embedding dim for each level + L = self.offsets.shape[0] - 1 # level + S = np.log2(self.per_level_scale) # resolution multiplier at each level, apply log2 for later CUDA exp2f + H = self.base_resolution # base resolution + + if inputs is None: + # randomized in [0, 1] + inputs = torch.rand(B, self.input_dim, device=self.embeddings.device) + else: + inputs = (inputs + bound) / (2 * bound) # map to [0, 1] + inputs = inputs.view(-1, self.input_dim) + B = inputs.shape[0] + + if self.embeddings.grad is None: + raise ValueError('grad is None, should be called after loss.backward() and before optimizer.step()!') + + _backend.grad_total_variation(inputs, self.embeddings, self.embeddings.grad, self.offsets, weight, B, D, C, L, S, H, self.gridtype_id, self.align_corners) + + @torch.cuda.amp.autocast(enabled=False) + def grad_weight_decay(self, weight=0.1): + # level-wise meaned weight decay (ref: zip-nerf) + + B = self.embeddings.shape[0] # size of embedding + C = self.embeddings.shape[1] # embedding dim for each level + L = self.offsets.shape[0] - 1 # level + + if self.embeddings.grad is None: + raise ValueError('grad is None, should be called after loss.backward() and before optimizer.step()!') + + _backend.grad_weight_decay(self.embeddings, self.embeddings.grad, self.offsets, weight, B, C, L) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/gridencoder/setup.py b/stable-dreamfusion-3DPortrait/gridencoder/setup.py new file mode 100644 index 0000000..a91b0c1 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/gridencoder/setup.py @@ -0,0 +1,51 @@ +import os +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +_src_path = os.path.dirname(os.path.abspath(__file__)) + +nvcc_flags = [ + '-O3', '-std=c++14', + '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', +] + +if os.name == "posix": + c_flags = ['-O3', '-std=c++14'] +elif os.name == "nt": + c_flags = ['/O2', '/std:c++17'] + + # find cl.exe + def find_cl_path(): + import glob + for program_files in [r"C:\\Program Files (x86)", r"C:\\Program Files"]: + for edition in ["Enterprise", "Professional", "BuildTools", "Community"]: + paths = sorted(glob.glob(r"%s\\Microsoft Visual Studio\\*\\%s\\VC\\Tools\\MSVC\\*\\bin\\Hostx64\\x64" % (program_files, edition)), reverse=True) + if paths: + return paths[0] + + # If cl.exe is not on path, try to find it. + if os.system("where cl.exe >nul 2>nul") != 0: + cl_path = find_cl_path() + if cl_path is None: + raise RuntimeError("Could not locate a supported Microsoft Visual C++ installation") + os.environ["PATH"] += ";" + cl_path + +setup( + name='gridencoder', # package name, import this to use python API + ext_modules=[ + CUDAExtension( + name='_gridencoder', # extension name, import this to use CUDA API + sources=[os.path.join(_src_path, 'src', f) for f in [ + 'gridencoder.cu', + 'bindings.cpp', + ]], + extra_compile_args={ + 'cxx': c_flags, + 'nvcc': nvcc_flags, + } + ), + ], + cmdclass={ + 'build_ext': BuildExtension, + } +) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/gridencoder/src/bindings.cpp b/stable-dreamfusion-3DPortrait/gridencoder/src/bindings.cpp new file mode 100644 index 0000000..fc3dd5e --- /dev/null +++ b/stable-dreamfusion-3DPortrait/gridencoder/src/bindings.cpp @@ -0,0 +1,10 @@ +#include + +#include "gridencoder.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("grid_encode_forward", &grid_encode_forward, "grid_encode_forward (CUDA)"); + m.def("grid_encode_backward", &grid_encode_backward, "grid_encode_backward (CUDA)"); + m.def("grad_total_variation", &grad_total_variation, "grad_total_variation (CUDA)"); + m.def("grad_weight_decay", &grad_weight_decay, "grad_weight_decay (CUDA)"); +} \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/gridencoder/src/gridencoder.cu b/stable-dreamfusion-3DPortrait/gridencoder/src/gridencoder.cu new file mode 100644 index 0000000..93f5b80 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/gridencoder/src/gridencoder.cu @@ -0,0 +1,713 @@ +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + + +#define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be a contiguous tensor") +#define CHECK_IS_INT(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, #x " must be an int tensor") +#define CHECK_IS_FLOATING(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Float || x.scalar_type() == at::ScalarType::Half || x.scalar_type() == at::ScalarType::Double, #x " must be a floating tensor") + + +// just for compatability of half precision in AT_DISPATCH_FLOATING_TYPES_AND_HALF... program will never reach here! + __device__ inline at::Half atomicAdd(at::Half *address, at::Half val) { + // requires CUDA >= 10 and ARCH >= 70 + // this is very slow compared to float or __half2, never use it. + //return atomicAdd(reinterpret_cast<__half*>(address), val); +} + + +template +__host__ __device__ inline T div_round_up(T val, T divisor) { + return (val + divisor - 1) / divisor; +} + +template +__device__ inline T smoothstep(T val) { + return val*val*(3.0f - 2.0f * val); +} + +template +__device__ inline T smoothstep_derivative(T val) { + return 6*val*(1.0f - val); +} + + +template +__device__ uint32_t fast_hash(const uint32_t pos_grid[D]) { + + // coherent type of hashing + constexpr uint32_t primes[7] = { 1u, 2654435761u, 805459861u, 3674653429u, 2097192037u, 1434869437u, 2165219737u }; + + uint32_t result = 0; + #pragma unroll + for (uint32_t i = 0; i < D; ++i) { + result ^= pos_grid[i] * primes[i]; + } + + return result; +} + + +template +__device__ uint32_t get_grid_index(const uint32_t gridtype, const uint32_t ch, const uint32_t hashmap_size, const uint32_t resolution, const uint32_t pos_grid[D]) { + uint32_t stride = 1; + uint32_t index = 0; + + #pragma unroll + for (uint32_t d = 0; d < D && stride <= hashmap_size; d++) { + index += pos_grid[d] * stride; + stride *= resolution; + } + + // NOTE: for NeRF, the hash is in fact not necessary. Check https://github.com/NVlabs/instant-ngp/issues/97. + // gridtype: 0 == hash, 1 == tiled + if (gridtype == 0 && stride > hashmap_size) { + index = fast_hash(pos_grid); + } + + return (index % hashmap_size) * C + ch; +} + + +template +__global__ void kernel_grid( + const float * __restrict__ inputs, + const scalar_t * __restrict__ grid, + const int * __restrict__ offsets, + scalar_t * __restrict__ outputs, + const uint32_t B, const uint32_t L, const float S, const uint32_t H, + scalar_t * __restrict__ dy_dx, + const uint32_t gridtype, + const bool align_corners, + const uint32_t interp +) { + const uint32_t b = blockIdx.x * blockDim.x + threadIdx.x; + + if (b >= B) return; + + const uint32_t level = blockIdx.y; + + // locate + grid += (uint32_t)offsets[level] * C; + inputs += b * D; + outputs += level * B * C + b * C; + + // check input range (should be in [0, 1]) + bool flag_oob = false; + #pragma unroll + for (uint32_t d = 0; d < D; d++) { + if (inputs[d] < 0 || inputs[d] > 1) { + flag_oob = true; + } + } + // if input out of bound, just set output to 0 + if (flag_oob) { + #pragma unroll + for (uint32_t ch = 0; ch < C; ch++) { + outputs[ch] = 0; + } + if (dy_dx) { + dy_dx += b * D * L * C + level * D * C; // B L D C + #pragma unroll + for (uint32_t d = 0; d < D; d++) { + #pragma unroll + for (uint32_t ch = 0; ch < C; ch++) { + dy_dx[d * C + ch] = 0; + } + } + } + return; + } + + const uint32_t hashmap_size = offsets[level + 1] - offsets[level]; + const uint32_t resolution = (uint32_t)ceil(exp2f(level * S) * H); + + // calculate coordinate (always use float for precision!) + float pos[D]; + float pos_deriv[D]; + uint32_t pos_grid[D]; + + #pragma unroll + for (uint32_t d = 0; d < D; d++) { + + // align_corners + if (align_corners) { + pos[d] = inputs[d] * (float)(resolution - 1); // [0, resolution - 1] + pos_grid[d] = min((uint32_t)floorf(pos[d]), resolution - 2); // left-top corner, [0, resolution - 2] + } else { + pos[d] = fminf(fmaxf(inputs[d] * (float)resolution - 0.5f, 0.0f), (float)(resolution - 1)); // [-0.5, resolution-0.5] --> [0, resolution - 1] + pos_grid[d] = (uint32_t)floorf(pos[d]); // left-top corner, [0, resolution - 1] + } + pos[d] -= (float)pos_grid[d]; + + // smoothstep instead of linear + if (interp == 1) { + pos_deriv[d] = smoothstep_derivative(pos[d]); + pos[d] = smoothstep(pos[d]); + } else { + pos_deriv[d] = 1.0f; + } + } + + // verification of alignment + // if (level == L - 1 && b < 4) { + // printf("[b=%d, l=%d] pos=(%f, %f)+(%d, %d)\n", b, level, pos[0], pos[1], pos_grid[0], pos_grid[1]); + // } + + // interpolate + scalar_t results[C] = {0}; // temp results in register + + #pragma unroll + for (uint32_t idx = 0; idx < (1 << D); idx++) { + float w = 1; + uint32_t pos_grid_local[D]; + + #pragma unroll + for (uint32_t d = 0; d < D; d++) { + if ((idx & (1 << d)) == 0) { + w *= 1 - pos[d]; + pos_grid_local[d] = pos_grid[d]; + } else { + w *= pos[d]; + pos_grid_local[d] = min(pos_grid[d] + 1, resolution - 1); + } + } + + uint32_t index = get_grid_index(gridtype, 0, hashmap_size, resolution, pos_grid_local); + + // writing to register (fast) + #pragma unroll + for (uint32_t ch = 0; ch < C; ch++) { + results[ch] += w * grid[index + ch]; + } + + //printf("[b=%d, l=%d] int %d, idx %d, w %f, val %f\n", b, level, idx, index, w, grid[index]); + } + + // writing to global memory (slow) + #pragma unroll + for (uint32_t ch = 0; ch < C; ch++) { + outputs[ch] = results[ch]; + } + + // prepare dy_dx + // differentiable (soft) indexing: https://discuss.pytorch.org/t/differentiable-indexing/17647/9 + if (dy_dx) { + + dy_dx += b * D * L * C + level * D * C; // B L D C + + #pragma unroll + for (uint32_t gd = 0; gd < D; gd++) { + + scalar_t results_grad[C] = {0}; + + #pragma unroll + for (uint32_t idx = 0; idx < (1 << (D - 1)); idx++) { + float w = (float)(align_corners ? resolution - 1 : resolution); + uint32_t pos_grid_local[D]; + + #pragma unroll + for (uint32_t nd = 0; nd < D - 1; nd++) { + const uint32_t d = (nd >= gd) ? (nd + 1) : nd; + + if ((idx & (1 << nd)) == 0) { + w *= 1 - pos[d]; + pos_grid_local[d] = pos_grid[d]; + } else { + w *= pos[d]; + pos_grid_local[d] = min(pos_grid[d] + 1, resolution - 1); + } + } + + pos_grid_local[gd] = pos_grid[gd]; + uint32_t index_left = get_grid_index(gridtype, 0, hashmap_size, resolution, pos_grid_local); + pos_grid_local[gd] = min(pos_grid[gd] + 1, resolution - 1); + uint32_t index_right = get_grid_index(gridtype, 0, hashmap_size, resolution, pos_grid_local); + + #pragma unroll + for (uint32_t ch = 0; ch < C; ch++) { + results_grad[ch] += w * (grid[index_right + ch] - grid[index_left + ch]) * pos_deriv[gd]; + } + } + + #pragma unroll + for (uint32_t ch = 0; ch < C; ch++) { + dy_dx[gd * C + ch] = results_grad[ch]; + } + } + } +} + + +template +__global__ void kernel_grid_backward( + const scalar_t * __restrict__ grad, + const float * __restrict__ inputs, + const scalar_t * __restrict__ grid, + const int * __restrict__ offsets, + scalar_t * __restrict__ grad_grid, + const uint32_t B, const uint32_t L, const float S, const uint32_t H, + const uint32_t gridtype, + const bool align_corners, + const uint32_t interp +) { + const uint32_t b = (blockIdx.x * blockDim.x + threadIdx.x) * N_C / C; + if (b >= B) return; + + const uint32_t level = blockIdx.y; + const uint32_t ch = (blockIdx.x * blockDim.x + threadIdx.x) * N_C - b * C; + + // locate + grad_grid += offsets[level] * C; + inputs += b * D; + grad += level * B * C + b * C + ch; // L, B, C + + const uint32_t hashmap_size = offsets[level + 1] - offsets[level]; + const uint32_t resolution = (uint32_t)ceil(exp2f(level * S) * H); + + // check input range (should be in [0, 1]) + #pragma unroll + for (uint32_t d = 0; d < D; d++) { + if (inputs[d] < 0 || inputs[d] > 1) { + return; // grad is init as 0, so we simply return. + } + } + + // calculate coordinate + float pos[D]; + uint32_t pos_grid[D]; + + #pragma unroll + for (uint32_t d = 0; d < D; d++) { + // align_corners + if (align_corners) { + pos[d] = inputs[d] * (float)(resolution - 1); // [0, resolution - 1] + pos_grid[d] = min((uint32_t)floorf(pos[d]), resolution - 2); // left-top corner, [0, resolution - 2] + } else { + pos[d] = fminf(fmaxf(inputs[d] * (float)resolution - 0.5f, 0.0f), (float)(resolution - 1)); // [-0.5, resolution-0.5] --> [0, resolution - 1] + pos_grid[d] = (uint32_t)floorf(pos[d]); // left-top corner, [0, resolution - 1] + } + pos[d] -= (float)pos_grid[d]; + // smoothstep instead of linear + if (interp == 1) { + pos[d] = smoothstep(pos[d]); + } + } + + scalar_t grad_cur[N_C] = {0}; // fetch to register + #pragma unroll + for (uint32_t c = 0; c < N_C; c++) { + grad_cur[c] = grad[c]; + } + + // interpolate + #pragma unroll + for (uint32_t idx = 0; idx < (1 << D); idx++) { + float w = 1; + uint32_t pos_grid_local[D]; + + #pragma unroll + for (uint32_t d = 0; d < D; d++) { + if ((idx & (1 << d)) == 0) { + w *= 1 - pos[d]; + pos_grid_local[d] = pos_grid[d]; + } else { + w *= pos[d]; + pos_grid_local[d] = min(pos_grid[d] + 1, resolution - 1); + } + } + + uint32_t index = get_grid_index(gridtype, ch, hashmap_size, resolution, pos_grid_local); + + // atomicAdd for __half is slow (especially for large values), so we use __half2 if N_C % 2 == 0 + // TODO: use float which is better than __half, if N_C % 2 != 0 + if (std::is_same::value && N_C % 2 == 0) { + #pragma unroll + for (uint32_t c = 0; c < N_C; c += 2) { + // process two __half at once (by interpreting as a __half2) + __half2 v = {(__half)(w * grad_cur[c]), (__half)(w * grad_cur[c + 1])}; + atomicAdd((__half2*)&grad_grid[index + c], v); + } + // float, or __half when N_C % 2 != 0 (which means C == 1) + } else { + #pragma unroll + for (uint32_t c = 0; c < N_C; c++) { + atomicAdd(&grad_grid[index + c], w * grad_cur[c]); + } + } + } +} + + +template +__global__ void kernel_input_backward( + const scalar_t * __restrict__ grad, + const scalar_t * __restrict__ dy_dx, + scalar_t * __restrict__ grad_inputs, + uint32_t B, uint32_t L +) { + const uint32_t t = threadIdx.x + blockIdx.x * blockDim.x; + if (t >= B * D) return; + + const uint32_t b = t / D; + const uint32_t d = t - b * D; + + dy_dx += b * L * D * C; + + scalar_t result = 0; + + # pragma unroll + for (int l = 0; l < L; l++) { + # pragma unroll + for (int ch = 0; ch < C; ch++) { + result += grad[l * B * C + b * C + ch] * dy_dx[l * D * C + d * C + ch]; + } + } + + grad_inputs[t] = result; +} + + +template +void kernel_grid_wrapper(const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *outputs, const uint32_t B, const uint32_t C, const uint32_t L, const uint32_t max_level, const float S, const uint32_t H, scalar_t *dy_dx, const uint32_t gridtype, const bool align_corners, const uint32_t interp) { + static constexpr uint32_t N_THREAD = 512; + const dim3 blocks_hashgrid = { div_round_up(B, N_THREAD), max_level, 1 }; + switch (C) { + case 1: kernel_grid<<>>(inputs, embeddings, offsets, outputs, B, L, S, H, dy_dx, gridtype, align_corners, interp); break; + case 2: kernel_grid<<>>(inputs, embeddings, offsets, outputs, B, L, S, H, dy_dx, gridtype, align_corners, interp); break; + case 4: kernel_grid<<>>(inputs, embeddings, offsets, outputs, B, L, S, H, dy_dx, gridtype, align_corners, interp); break; + case 8: kernel_grid<<>>(inputs, embeddings, offsets, outputs, B, L, S, H, dy_dx, gridtype, align_corners, interp); break; + case 16: kernel_grid<<>>(inputs, embeddings, offsets, outputs, B, L, S, H, dy_dx, gridtype, align_corners, interp); break; + case 32: kernel_grid<<>>(inputs, embeddings, offsets, outputs, B, L, S, H, dy_dx, gridtype, align_corners, interp); break; + default: throw std::runtime_error{"GridEncoding: C must be 1, 2, 4, 8, 16 or 32."}; + } +} + +// inputs: [B, D], float, in [0, 1] +// embeddings: [sO, C], float +// offsets: [L + 1], uint32_t +// outputs: [L, B, C], float (L first, so only one level of hashmap needs to fit into cache at a time.) +// H: base resolution +// dy_dx: [B, L * D * C] +template +void grid_encode_forward_cuda(const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *outputs, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const uint32_t max_level, const float S, const uint32_t H, scalar_t *dy_dx, const uint32_t gridtype, const bool align_corners, const uint32_t interp) { + switch (D) { + case 2: kernel_grid_wrapper(inputs, embeddings, offsets, outputs, B, C, L, max_level, S, H, dy_dx, gridtype, align_corners, interp); break; + case 3: kernel_grid_wrapper(inputs, embeddings, offsets, outputs, B, C, L, max_level, S, H, dy_dx, gridtype, align_corners, interp); break; + case 4: kernel_grid_wrapper(inputs, embeddings, offsets, outputs, B, C, L, max_level, S, H, dy_dx, gridtype, align_corners, interp); break; + case 5: kernel_grid_wrapper(inputs, embeddings, offsets, outputs, B, C, L, max_level, S, H, dy_dx, gridtype, align_corners, interp); break; + default: throw std::runtime_error{"GridEncoding: D must be 2, 3, 4 or 5."}; + } +} + +template +void kernel_grid_backward_wrapper(const scalar_t *grad, const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *grad_embeddings, const uint32_t B, const uint32_t C, const uint32_t L, const uint32_t max_level, const float S, const uint32_t H, scalar_t *dy_dx, scalar_t *grad_inputs, const uint32_t gridtype, const bool align_corners, const uint32_t interp) { + static constexpr uint32_t N_THREAD = 256; + const uint32_t N_C = std::min(2u, C); // n_features_per_thread + const dim3 blocks_hashgrid = { div_round_up(B * C / N_C, N_THREAD), max_level, 1 }; + switch (C) { + case 1: + kernel_grid_backward<<>>(grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners, interp); + if (dy_dx) kernel_input_backward<<>>(grad, dy_dx, grad_inputs, B, L); + break; + case 2: + kernel_grid_backward<<>>(grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners, interp); + if (dy_dx) kernel_input_backward<<>>(grad, dy_dx, grad_inputs, B, L); + break; + case 4: + kernel_grid_backward<<>>(grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners, interp); + if (dy_dx) kernel_input_backward<<>>(grad, dy_dx, grad_inputs, B, L); + break; + case 8: + kernel_grid_backward<<>>(grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners, interp); + if (dy_dx) kernel_input_backward<<>>(grad, dy_dx, grad_inputs, B, L); + break; + case 16: + kernel_grid_backward<<>>(grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners, interp); + if (dy_dx) kernel_input_backward<<>>(grad, dy_dx, grad_inputs, B, L); + break; + case 32: + kernel_grid_backward<<>>(grad, inputs, embeddings, offsets, grad_embeddings, B, L, S, H, gridtype, align_corners, interp); + if (dy_dx) kernel_input_backward<<>>(grad, dy_dx, grad_inputs, B, L); + break; + default: throw std::runtime_error{"GridEncoding: C must be 1, 2, 4, 8, 16 or 32."}; + } +} + + +// grad: [L, B, C], float +// inputs: [B, D], float, in [0, 1] +// embeddings: [sO, C], float +// offsets: [L + 1], uint32_t +// grad_embeddings: [sO, C] +// H: base resolution +template +void grid_encode_backward_cuda(const scalar_t *grad, const float *inputs, const scalar_t *embeddings, const int *offsets, scalar_t *grad_embeddings, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const uint32_t max_level, const float S, const uint32_t H, scalar_t *dy_dx, scalar_t *grad_inputs, const uint32_t gridtype, const bool align_corners, const uint32_t interp) { + switch (D) { + case 2: kernel_grid_backward_wrapper(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, max_level, S, H, dy_dx, grad_inputs, gridtype, align_corners, interp); break; + case 3: kernel_grid_backward_wrapper(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, max_level, S, H, dy_dx, grad_inputs, gridtype, align_corners, interp); break; + case 4: kernel_grid_backward_wrapper(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, max_level, S, H, dy_dx, grad_inputs, gridtype, align_corners, interp); break; + case 5: kernel_grid_backward_wrapper(grad, inputs, embeddings, offsets, grad_embeddings, B, C, L, max_level, S, H, dy_dx, grad_inputs, gridtype, align_corners, interp); break; + default: throw std::runtime_error{"GridEncoding: D must be 2, 3, 4 or 5."}; + } +} + + + +void grid_encode_forward(const at::Tensor inputs, const at::Tensor embeddings, const at::Tensor offsets, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const uint32_t max_level, const float S, const uint32_t H, at::optional dy_dx, const uint32_t gridtype, const bool align_corners, const uint32_t interp) { + CHECK_CUDA(inputs); + CHECK_CUDA(embeddings); + CHECK_CUDA(offsets); + CHECK_CUDA(outputs); + // CHECK_CUDA(dy_dx); + + CHECK_CONTIGUOUS(inputs); + CHECK_CONTIGUOUS(embeddings); + CHECK_CONTIGUOUS(offsets); + CHECK_CONTIGUOUS(outputs); + // CHECK_CONTIGUOUS(dy_dx); + + CHECK_IS_FLOATING(inputs); + CHECK_IS_FLOATING(embeddings); + CHECK_IS_INT(offsets); + CHECK_IS_FLOATING(outputs); + // CHECK_IS_FLOATING(dy_dx); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + embeddings.scalar_type(), "grid_encode_forward", ([&] { + grid_encode_forward_cuda(inputs.data_ptr(), embeddings.data_ptr(), offsets.data_ptr(), outputs.data_ptr(), B, D, C, L, max_level, S, H, dy_dx.has_value() ? dy_dx.value().data_ptr() : nullptr, gridtype, align_corners, interp); + })); +} + +void grid_encode_backward(const at::Tensor grad, const at::Tensor inputs, const at::Tensor embeddings, const at::Tensor offsets, at::Tensor grad_embeddings, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const uint32_t max_level, const float S, const uint32_t H, const at::optional dy_dx, at::optional grad_inputs, const uint32_t gridtype, const bool align_corners, const uint32_t interp) { + CHECK_CUDA(grad); + CHECK_CUDA(inputs); + CHECK_CUDA(embeddings); + CHECK_CUDA(offsets); + CHECK_CUDA(grad_embeddings); + // CHECK_CUDA(dy_dx); + // CHECK_CUDA(grad_inputs); + + CHECK_CONTIGUOUS(grad); + CHECK_CONTIGUOUS(inputs); + CHECK_CONTIGUOUS(embeddings); + CHECK_CONTIGUOUS(offsets); + CHECK_CONTIGUOUS(grad_embeddings); + // CHECK_CONTIGUOUS(dy_dx); + // CHECK_CONTIGUOUS(grad_inputs); + + CHECK_IS_FLOATING(grad); + CHECK_IS_FLOATING(inputs); + CHECK_IS_FLOATING(embeddings); + CHECK_IS_INT(offsets); + CHECK_IS_FLOATING(grad_embeddings); + // CHECK_IS_FLOATING(dy_dx); + // CHECK_IS_FLOATING(grad_inputs); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "grid_encode_backward", ([&] { + grid_encode_backward_cuda(grad.data_ptr(), inputs.data_ptr(), embeddings.data_ptr(), offsets.data_ptr(), grad_embeddings.data_ptr(), B, D, C, L, max_level, S, H, dy_dx.has_value() ? dy_dx.value().data_ptr() : nullptr, grad_inputs.has_value() ? grad_inputs.value().data_ptr() : nullptr, gridtype, align_corners, interp); + })); + +} + + +template +__global__ void kernel_grad_tv( + const scalar_t * __restrict__ inputs, + const scalar_t * __restrict__ grid, + scalar_t * __restrict__ grad, + const int * __restrict__ offsets, + const float weight, + const uint32_t B, const uint32_t L, const float S, const uint32_t H, + const uint32_t gridtype, + const bool align_corners +) { + const uint32_t b = blockIdx.x * blockDim.x + threadIdx.x; + + if (b >= B) return; + + const uint32_t level = blockIdx.y; + + // locate + inputs += b * D; + grid += (uint32_t)offsets[level] * C; + grad += (uint32_t)offsets[level] * C; + + // check input range (should be in [0, 1]) + bool flag_oob = false; + #pragma unroll + for (uint32_t d = 0; d < D; d++) { + if (inputs[d] < 0 || inputs[d] > 1) { + flag_oob = true; + } + } + + // if input out of bound, do nothing + if (flag_oob) return; + + const uint32_t hashmap_size = offsets[level + 1] - offsets[level]; + const uint32_t resolution = (uint32_t)ceil(exp2f(level * S) * H); + + // calculate coordinate + float pos[D]; + uint32_t pos_grid[D]; // [0, resolution] + + #pragma unroll + for (uint32_t d = 0; d < D; d++) { + // align_corners + if (align_corners) { + pos[d] = inputs[d] * (float)(resolution - 1); // [0, resolution - 1] + pos_grid[d] = min((uint32_t)floorf(pos[d]), resolution - 2); // left-top corner, [0, resolution - 2] + } else { + pos[d] = fminf(fmaxf(inputs[d] * (float)resolution - 0.5f, 0.0f), (float)(resolution - 1)); // [-0.5, resolution-0.5] --> [0, resolution - 1] + pos_grid[d] = (uint32_t)floorf(pos[d]); // left-top corner, [0, resolution - 1] + } + } + + //printf("[b=%d, l=%d] pos=(%f, %f)+(%d, %d)\n", b, level, pos[0], pos[1], pos_grid[0], pos_grid[1]); + + // total variation on pos_grid + scalar_t results[C] = {0}; // temp results in register + scalar_t idelta[C] = {0}; + + uint32_t index = get_grid_index(gridtype, 0, hashmap_size, resolution, pos_grid); + + scalar_t w = weight / (2 * D); + + #pragma unroll + for (uint32_t d = 0; d < D; d++) { + + uint32_t cur_d = pos_grid[d]; + scalar_t grad_val; + + // right side + if (cur_d < resolution) { + pos_grid[d] = cur_d + 1; + uint32_t index_right = get_grid_index(gridtype, 0, hashmap_size, resolution, pos_grid); + + #pragma unroll + for (uint32_t ch = 0; ch < C; ch++) { + grad_val = (grid[index + ch] - grid[index_right + ch]); + results[ch] += grad_val; + idelta[ch] += grad_val * grad_val; + } + } + + // left side + if (cur_d > 0) { + pos_grid[d] = cur_d - 1; + uint32_t index_left = get_grid_index(gridtype, 0, hashmap_size, resolution, pos_grid); + + #pragma unroll + for (uint32_t ch = 0; ch < C; ch++) { + grad_val = (grid[index + ch] - grid[index_left + ch]); + results[ch] += grad_val; + idelta[ch] += grad_val * grad_val; + } + } + + // reset + pos_grid[d] = cur_d; + } + + // writing to global memory (slow) + #pragma unroll + for (uint32_t ch = 0; ch < C; ch++) { + // index may collide, so use atomic! + atomicAdd(&grad[index + ch], w * results[ch] * rsqrtf(idelta[ch] + 1e-9f)); + } + +} + + +template +void kernel_grad_tv_wrapper(const scalar_t *inputs, const scalar_t *embeddings, scalar_t *grad, const int *offsets, const float weight, const uint32_t B, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const uint32_t gridtype, const bool align_corners) { + static constexpr uint32_t N_THREAD = 512; + const dim3 blocks_hashgrid = { div_round_up(B, N_THREAD), L, 1 }; + switch (C) { + case 1: kernel_grad_tv<<>>(inputs, embeddings, grad, offsets, weight, B, L, S, H, gridtype, align_corners); break; + case 2: kernel_grad_tv<<>>(inputs, embeddings, grad, offsets, weight, B, L, S, H, gridtype, align_corners); break; + case 4: kernel_grad_tv<<>>(inputs, embeddings, grad, offsets, weight, B, L, S, H, gridtype, align_corners); break; + case 8: kernel_grad_tv<<>>(inputs, embeddings, grad, offsets, weight, B, L, S, H, gridtype, align_corners); break; + case 16: kernel_grad_tv<<>>(inputs, embeddings, grad, offsets, weight, B, L, S, H, gridtype, align_corners); break; + case 32: kernel_grad_tv<<>>(inputs, embeddings, grad, offsets, weight, B, L, S, H, gridtype, align_corners); break; + default: throw std::runtime_error{"GridEncoding: C must be 1, 2, 4, 8, 16 or 32."}; + } +} + + +template +void grad_total_variation_cuda(const scalar_t *inputs, const scalar_t *embeddings, scalar_t *grad, const int *offsets, const float weight, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const uint32_t gridtype, const bool align_corners) { + switch (D) { + case 2: kernel_grad_tv_wrapper(inputs, embeddings, grad, offsets, weight, B, C, L, S, H, gridtype, align_corners); break; + case 3: kernel_grad_tv_wrapper(inputs, embeddings, grad, offsets, weight, B, C, L, S, H, gridtype, align_corners); break; + case 4: kernel_grad_tv_wrapper(inputs, embeddings, grad, offsets, weight, B, C, L, S, H, gridtype, align_corners); break; + case 5: kernel_grad_tv_wrapper(inputs, embeddings, grad, offsets, weight, B, C, L, S, H, gridtype, align_corners); break; + default: throw std::runtime_error{"GridEncoding: D must be 2, 3, 4, or 5."}; + } +} + + +void grad_total_variation(const at::Tensor inputs, const at::Tensor embeddings, at::Tensor grad, const at::Tensor offsets, const float weight, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const uint32_t gridtype, const bool align_corners) { + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + embeddings.scalar_type(), "grad_total_variation", ([&] { + grad_total_variation_cuda(inputs.data_ptr(), embeddings.data_ptr(), grad.data_ptr(), offsets.data_ptr(), weight, B, D, C, L, S, H, gridtype, align_corners); + })); +} + +template +__global__ void kernel_grad_wd( + const scalar_t * __restrict__ grid, + scalar_t * __restrict__ grad, + const int * __restrict__ offsets, + const float weight, + const uint32_t B, const uint32_t L, const uint32_t C +) { + const uint32_t b = blockIdx.x * blockDim.x + threadIdx.x; + + if (b >= B * C) return; + + // locate + grid += b; + grad += b; + + // decide in which level is this thread... + uint32_t level = 0; + const uint32_t n = b / C; + // binary search b in offsets + uint32_t l = 0, r = L; + while (l < r) { + uint32_t m = (l + r) / 2; + if (offsets[m] <= n) { + level = m; + l = m + 1; + } else { + r = m; + } + } + + const uint32_t hashmap_size = offsets[level + 1] - offsets[level]; + grad[0] += 2 * weight * grid[0] / hashmap_size; +} + +void grad_weight_decay(const at::Tensor embeddings, at::Tensor grad, const at::Tensor offsets, const float weight, const uint32_t B, const uint32_t C, const uint32_t L) { + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + embeddings.scalar_type(), "grad_weight_decay", ([&] { + static constexpr uint32_t N_THREAD = 1024; + const dim3 blocks_hashgrid = { div_round_up(B * C, N_THREAD), 1, 1 }; + kernel_grad_wd<<>>(embeddings.data_ptr(), grad.data_ptr(), offsets.data_ptr(), weight, B, L, C); + })); +} \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/gridencoder/src/gridencoder.h b/stable-dreamfusion-3DPortrait/gridencoder/src/gridencoder.h new file mode 100644 index 0000000..3df2e08 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/gridencoder/src/gridencoder.h @@ -0,0 +1,18 @@ +#ifndef _HASH_ENCODE_H +#define _HASH_ENCODE_H + +#include +#include + +// inputs: [B, D], float, in [0, 1] +// embeddings: [sO, C], float +// offsets: [L + 1], uint32_t +// outputs: [B, L * C], float +// H: base resolution +void grid_encode_forward(const at::Tensor inputs, const at::Tensor embeddings, const at::Tensor offsets, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const uint32_t max_level, const float S, const uint32_t H, at::optional dy_dx, const uint32_t gridtype, const bool align_corners, const uint32_t interp); +void grid_encode_backward(const at::Tensor grad, const at::Tensor inputs, const at::Tensor embeddings, const at::Tensor offsets, at::Tensor grad_embeddings, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const uint32_t max_level, const float S, const uint32_t H, const at::optional dy_dx, at::optional grad_inputs, const uint32_t gridtype, const bool align_corners, const uint32_t interp); + +void grad_total_variation(const at::Tensor inputs, const at::Tensor embeddings, at::Tensor grad, const at::Tensor offsets, const float weight, const uint32_t B, const uint32_t D, const uint32_t C, const uint32_t L, const float S, const uint32_t H, const uint32_t gridtype, const bool align_corners); +void grad_weight_decay(const at::Tensor embeddings, at::Tensor grad, const at::Tensor offsets, const float weight, const uint32_t B, const uint32_t C, const uint32_t L); + +#endif \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/guidance/clip_utils.py b/stable-dreamfusion-3DPortrait/guidance/clip_utils.py new file mode 100644 index 0000000..f36295d --- /dev/null +++ b/stable-dreamfusion-3DPortrait/guidance/clip_utils.py @@ -0,0 +1,54 @@ +import torch +import torch.nn as nn + +import torchvision.transforms as T +import torchvision.transforms.functional as TF + +import clip + +class CLIP(nn.Module): + def __init__(self, device, **kwargs): + super().__init__() + + self.device = device + self.clip_model, self.clip_preprocess = clip.load("ViT-B/16", device=self.device, jit=False) + + self.aug = T.Compose([ + T.Resize((224, 224)), + T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + def get_text_embeds(self, prompt, **kwargs): + + text = clip.tokenize(prompt).to(self.device) + text_z = self.clip_model.encode_text(text) + text_z = text_z / text_z.norm(dim=-1, keepdim=True) + + return text_z + + def get_img_embeds(self, image, **kwargs): + + image_z = self.clip_model.encode_image(self.aug(image)) + image_z = image_z / image_z.norm(dim=-1, keepdim=True) + + return image_z + + + def train_step(self, clip_z, pred_rgb, grad_scale=10, **kwargs): + """ + Args: + grad_scale: scalar or 1-tensor of size [B], i.e. 1 grad_scale per batch item. + """ + # TODO: resize the image from NeRF-rendered resolution (e.g. 128x128) to what CLIP expects (512x512), to prevent Pytorch warning about `antialias=None`. + image_z = self.clip_model.encode_image(self.aug(pred_rgb)) + image_z = image_z / image_z.norm(dim=-1, keepdim=True) # normalize features + + loss = 0 + if 'image' in clip_z: + loss -= ((image_z * clip_z['image']).sum(-1) * grad_scale).mean() + + if 'text' in clip_z: + loss -= ((image_z * clip_z['text']).sum(-1) * grad_scale).mean() + + return loss + diff --git a/stable-dreamfusion-3DPortrait/guidance/if_utils.py b/stable-dreamfusion-3DPortrait/guidance/if_utils.py new file mode 100644 index 0000000..c610b74 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/guidance/if_utils.py @@ -0,0 +1,234 @@ +from transformers import logging +from diffusers import IFPipeline, DDPMScheduler + +# suppress partial model loading warning +logging.set_verbosity_error() + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torch.cuda.amp import custom_bwd, custom_fwd +from .perpneg_utils import weighted_perpendicular_aggregator + + +def seed_everything(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + #torch.backends.cudnn.deterministic = True + #torch.backends.cudnn.benchmark = True + + +class IF(nn.Module): + def __init__(self, device, vram_O, t_range=[0.02, 0.98]): + super().__init__() + + self.device = device + + print(f'[INFO] loading DeepFloyd IF-I-XL...') + + model_key = "DeepFloyd/IF-I-XL-v1.0" + + is_torch2 = torch.__version__[0] == '2' + + # Create model + pipe = IFPipeline.from_pretrained(model_key, variant="fp16", torch_dtype=torch.float16) + if not is_torch2: + pipe.enable_xformers_memory_efficient_attention() + + if vram_O: + pipe.unet.to(memory_format=torch.channels_last) + pipe.enable_attention_slicing(1) + pipe.enable_model_cpu_offload() + else: + pipe.to(device) + + self.unet = pipe.unet + self.tokenizer = pipe.tokenizer + self.text_encoder = pipe.text_encoder + self.unet = pipe.unet + self.scheduler = pipe.scheduler + + self.pipe = pipe + + self.num_train_timesteps = self.scheduler.config.num_train_timesteps + self.min_step = int(self.num_train_timesteps * t_range[0]) + self.max_step = int(self.num_train_timesteps * t_range[1]) + self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience + + print(f'[INFO] loaded DeepFloyd IF-I-XL!') + + @torch.no_grad() + def get_text_embeds(self, prompt): + # prompt: [str] + + # TODO: should I add the preprocessing at https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py#LL486C10-L486C28 + prompt = self.pipe._text_preprocessing(prompt, clean_caption=False) + inputs = self.tokenizer(prompt, padding='max_length', max_length=77, truncation=True, add_special_tokens=True, return_tensors='pt') + embeddings = self.text_encoder(inputs.input_ids.to(self.device))[0] + + return embeddings + + + def train_step(self, text_embeddings, pred_rgb, guidance_scale=100, grad_scale=1): + + # [0, 1] to [-1, 1] and make sure shape is [64, 64] + images = F.interpolate(pred_rgb, (64, 64), mode='bilinear', align_corners=False) * 2 - 1 + + # timestep ~ U(0.02, 0.98) to avoid very high/low noise level + t = torch.randint(self.min_step, self.max_step + 1, (images.shape[0],), dtype=torch.long, device=self.device) + + # predict the noise residual with unet, NO grad! + with torch.no_grad(): + # add noise + noise = torch.randn_like(images) + images_noisy = self.scheduler.add_noise(images, noise, t) + + # pred noise + model_input = torch.cat([images_noisy] * 2) + model_input = self.scheduler.scale_model_input(model_input, t) + tt = torch.cat([t] * 2) + noise_pred = self.unet(model_input, tt, encoder_hidden_states=text_embeddings).sample + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # TODO: how to use the variance here? + # noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + # w(t), sigma_t^2 + w = (1 - self.alphas[t]) + grad = grad_scale * w[:, None, None, None] * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + targets = (images - grad).detach() + loss = 0.5 * F.mse_loss(images.float(), targets, reduction='sum') / images.shape[0] + + return loss + + def train_step_perpneg(self, text_embeddings, weights, pred_rgb, guidance_scale=100, grad_scale=1): + + B = pred_rgb.shape[0] + K = (text_embeddings.shape[0] // B) - 1 # maximum number of prompts + + # [0, 1] to [-1, 1] and make sure shape is [64, 64] + images = F.interpolate(pred_rgb, (64, 64), mode='bilinear', align_corners=False) * 2 - 1 + + # timestep ~ U(0.02, 0.98) to avoid very high/low noise level + t = torch.randint(self.min_step, self.max_step + 1, (images.shape[0],), dtype=torch.long, device=self.device) + + # predict the noise residual with unet, NO grad! + with torch.no_grad(): + # add noise + noise = torch.randn_like(images) + images_noisy = self.scheduler.add_noise(images, noise, t) + + # pred noise + model_input = torch.cat([images_noisy] * (1 + K)) + model_input = self.scheduler.scale_model_input(model_input, t) + tt = torch.cat([t] * (1 + K)) + unet_output = self.unet(model_input, tt, encoder_hidden_states=text_embeddings).sample + noise_pred_uncond, noise_pred_text = unet_output[:B], unet_output[B:] + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1) + # noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + delta_noise_preds = noise_pred_text - noise_pred_uncond.repeat(K, 1, 1, 1) + noise_pred = noise_pred_uncond + guidance_scale * weighted_perpendicular_aggregator(delta_noise_preds, weights, B) + + + + # w(t), sigma_t^2 + w = (1 - self.alphas[t]) + grad = grad_scale * w[:, None, None, None] * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + targets = (images - grad).detach() + loss = 0.5 * F.mse_loss(images.float(), targets, reduction='sum') / images.shape[0] + + return loss + + @torch.no_grad() + def produce_imgs(self, text_embeddings, height=64, width=64, num_inference_steps=50, guidance_scale=7.5): + + images = torch.randn((1, 3, height, width), device=text_embeddings.device, dtype=text_embeddings.dtype) + images = images * self.scheduler.init_noise_sigma + + self.scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(self.scheduler.timesteps): + # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. + model_input = torch.cat([images] * 2) + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet(model_input, t, encoder_hidden_states=text_embeddings).sample + + # perform guidance + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + images = self.scheduler.step(noise_pred, t, images).prev_sample + + images = (images + 1) / 2 + + return images + + + def prompt_to_img(self, prompts, negative_prompts='', height=512, width=512, num_inference_steps=50, guidance_scale=7.5, latents=None): + + if isinstance(prompts, str): + prompts = [prompts] + + if isinstance(negative_prompts, str): + negative_prompts = [negative_prompts] + + # Prompts -> text embeds + pos_embeds = self.get_text_embeds(prompts) # [1, 77, 768] + neg_embeds = self.get_text_embeds(negative_prompts) + text_embeds = torch.cat([neg_embeds, pos_embeds], dim=0) # [2, 77, 768] + + # Text embeds -> img + imgs = self.produce_imgs(text_embeds, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale) # [1, 4, 64, 64] + + # Img to Numpy + imgs = imgs.detach().cpu().permute(0, 2, 3, 1).numpy() + imgs = (imgs * 255).round().astype('uint8') + + return imgs + + +if __name__ == '__main__': + + import argparse + import matplotlib.pyplot as plt + + parser = argparse.ArgumentParser() + parser.add_argument('prompt', type=str) + parser.add_argument('--negative', default='', type=str) + parser.add_argument('--vram_O', action='store_true', help="optimization for low VRAM usage") + parser.add_argument('-H', type=int, default=64) + parser.add_argument('-W', type=int, default=64) + parser.add_argument('--seed', type=int, default=0) + parser.add_argument('--steps', type=int, default=50) + opt = parser.parse_args() + + seed_everything(opt.seed) + + device = torch.device('cuda') + + sd = IF(device, opt.vram_O) + + imgs = sd.prompt_to_img(opt.prompt, opt.negative, opt.H, opt.W, opt.steps) + + # visualize image + plt.imshow(imgs[0]) + plt.show() + + + + diff --git a/stable-dreamfusion-3DPortrait/guidance/optimizer.py b/stable-dreamfusion-3DPortrait/guidance/optimizer.py new file mode 100644 index 0000000..f5bb64f --- /dev/null +++ b/stable-dreamfusion-3DPortrait/guidance/optimizer.py @@ -0,0 +1,325 @@ +# Copyright 2022 Garena Online Private Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List + +import torch +from torch import Tensor +from torch.optim.optimizer import Optimizer + + +class Adan(Optimizer): + """ + Implements a pytorch variant of Adan + Adan was proposed in + Adan: Adaptive Nesterov Momentum Algorithm for + Faster Optimizing Deep Models[J].arXiv preprint arXiv:2208.06677, 2022. + https://arxiv.org/abs/2208.06677 + Arguments: + params (iterable): iterable of parameters to optimize or + dicts defining parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float, flot], optional): coefficients used for + first- and second-order moments. (default: (0.98, 0.92, 0.99)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): decoupled weight decay + (L2 penalty) (default: 0) + max_grad_norm (float, optional): value used to clip + global grad norm (default: 0.0 no clip) + no_prox (bool): how to perform the decoupled weight decay + (default: False) + foreach (bool): if True would use torch._foreach implementation. + It's faster but uses slightly more memory. (default: True) + """ + def __init__(self, + params, + lr=1e-3, + betas=(0.98, 0.92, 0.99), + eps=1e-8, + weight_decay=0.0, + max_grad_norm=0.0, + no_prox=False, + foreach: bool = True): + if not 0.0 <= max_grad_norm: + raise ValueError('Invalid Max grad norm: {}'.format(max_grad_norm)) + if not 0.0 <= lr: + raise ValueError('Invalid learning rate: {}'.format(lr)) + if not 0.0 <= eps: + raise ValueError('Invalid epsilon value: {}'.format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError('Invalid beta parameter at index 0: {}'.format( + betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError('Invalid beta parameter at index 1: {}'.format( + betas[1])) + if not 0.0 <= betas[2] < 1.0: + raise ValueError('Invalid beta parameter at index 2: {}'.format( + betas[2])) + defaults = dict(lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + max_grad_norm=max_grad_norm, + no_prox=no_prox, + foreach=foreach) + super().__init__(params, defaults) + + def __setstate__(self, state): + super(Adan, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('no_prox', False) + + @torch.no_grad() + def restart_opt(self): + for group in self.param_groups: + group['step'] = 0 + for p in group['params']: + if p.requires_grad: + state = self.state[p] + # State initialization + + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + # Exponential moving average of gradient difference + state['exp_avg_diff'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step.""" + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + if self.defaults['max_grad_norm'] > 0: + device = self.param_groups[0]['params'][0].device + global_grad_norm = torch.zeros(1, device=device) + + max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], + device=device) + for group in self.param_groups: + + for p in group['params']: + if p.grad is not None: + grad = p.grad + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + + clip_global_grad_norm = torch.clamp( + max_grad_norm / (global_grad_norm + group['eps']), + max=1.0).item() + else: + clip_global_grad_norm = 1.0 + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + exp_avg_diffs = [] + neg_pre_grads = [] + + beta1, beta2, beta3 = group['betas'] + # assume same step across group now to simplify things + # per parameter step can be easily support + # by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + bias_correction1 = 1.0 - beta1**group['step'] + bias_correction2 = 1.0 - beta2**group['step'] + bias_correction3 = 1.0 - beta3**group['step'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + grads.append(p.grad) + + state = self.state[p] + if len(state) == 0: + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + state['exp_avg_diff'] = torch.zeros_like(p) + + if 'neg_pre_grad' not in state or group['step'] == 1: + state['neg_pre_grad'] = p.grad.clone().mul_( + -clip_global_grad_norm) + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + exp_avg_diffs.append(state['exp_avg_diff']) + neg_pre_grads.append(state['neg_pre_grad']) + + kwargs = dict( + params=params_with_grad, + grads=grads, + exp_avgs=exp_avgs, + exp_avg_sqs=exp_avg_sqs, + exp_avg_diffs=exp_avg_diffs, + neg_pre_grads=neg_pre_grads, + beta1=beta1, + beta2=beta2, + beta3=beta3, + bias_correction1=bias_correction1, + bias_correction2=bias_correction2, + bias_correction3_sqrt=math.sqrt(bias_correction3), + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + no_prox=group['no_prox'], + clip_global_grad_norm=clip_global_grad_norm, + ) + + if group['foreach']: + _multi_tensor_adan(**kwargs) + else: + _single_tensor_adan(**kwargs) + + return loss + + +def _single_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + neg_pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + clip_global_grad_norm: Tensor, +): + for i, param in enumerate(params): + grad = grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + exp_avg_diff = exp_avg_diffs[i] + neg_grad_or_diff = neg_pre_grads[i] + + grad.mul_(clip_global_grad_norm) + + # for memory saving, we use `neg_grad_or_diff` + # to get some temp variable in a inplace way + neg_grad_or_diff.add_(grad) + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t + exp_avg_diff.mul_(beta2).add_(neg_grad_or_diff, + alpha=1 - beta2) # diff_t + + neg_grad_or_diff.mul_(beta2).add_(grad) + exp_avg_sq.mul_(beta3).addcmul_(neg_grad_or_diff, + neg_grad_or_diff, + value=1 - beta3) # n_t + + denom = ((exp_avg_sq).sqrt() / bias_correction3_sqrt).add_(eps) + step_size_diff = lr * beta2 / bias_correction2 + step_size = lr / bias_correction1 + + if no_prox: + param.mul_(1 - lr * weight_decay) + param.addcdiv_(exp_avg, denom, value=-step_size) + param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff) + else: + param.addcdiv_(exp_avg, denom, value=-step_size) + param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff) + param.div_(1 + lr * weight_decay) + + neg_grad_or_diff.zero_().add_(grad, alpha=-1.0) + + +def _multi_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + neg_pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + clip_global_grad_norm: Tensor, +): + if len(params) == 0: + return + + torch._foreach_mul_(grads, clip_global_grad_norm) + + # for memory saving, we use `neg_pre_grads` + # to get some temp variable in a inplace way + torch._foreach_add_(neg_pre_grads, grads) + + torch._foreach_mul_(exp_avgs, beta1) + torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t + + torch._foreach_mul_(exp_avg_diffs, beta2) + torch._foreach_add_(exp_avg_diffs, neg_pre_grads, + alpha=1 - beta2) # diff_t + + torch._foreach_mul_(neg_pre_grads, beta2) + torch._foreach_add_(neg_pre_grads, grads) + torch._foreach_mul_(exp_avg_sqs, beta3) + torch._foreach_addcmul_(exp_avg_sqs, + neg_pre_grads, + neg_pre_grads, + value=1 - beta3) # n_t + + denom = torch._foreach_sqrt(exp_avg_sqs) + torch._foreach_div_(denom, bias_correction3_sqrt) + torch._foreach_add_(denom, eps) + + step_size_diff = lr * beta2 / bias_correction2 + step_size = lr / bias_correction1 + + if no_prox: + torch._foreach_mul_(params, 1 - lr * weight_decay) + torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size) + torch._foreach_addcdiv_(params, + exp_avg_diffs, + denom, + value=-step_size_diff) + else: + torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size) + torch._foreach_addcdiv_(params, + exp_avg_diffs, + denom, + value=-step_size_diff) + torch._foreach_div_(params, 1 + lr * weight_decay) + torch._foreach_zero_(neg_pre_grads) + torch._foreach_add_(neg_pre_grads, grads, alpha=-1.0) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/guidance/perpneg_utils.py b/stable-dreamfusion-3DPortrait/guidance/perpneg_utils.py new file mode 100644 index 0000000..0dd5ff5 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/guidance/perpneg_utils.py @@ -0,0 +1,48 @@ +import torch + +# Please refer to the https://perp-neg.github.io/ for details about the paper and algorithm +def get_perpendicular_component(x, y): + assert x.shape == y.shape + return x - ((torch.mul(x, y).sum())/max(torch.norm(y)**2, 1e-6)) * y + + +def batch_get_perpendicular_component(x, y): + assert x.shape == y.shape + result = [] + for i in range(x.shape[0]): + result.append(get_perpendicular_component(x[i], y[i])) + return torch.stack(result) + + +def weighted_perpendicular_aggregator(delta_noise_preds, weights, batch_size): + """ + Notes: + - weights: an array with the weights for combining the noise predictions + - delta_noise_preds: [B x K, 4, 64, 64], K = max_prompts_per_dir + """ + delta_noise_preds = delta_noise_preds.split(batch_size, dim=0) # K x [B, 4, 64, 64] + weights = weights.split(batch_size, dim=0) # K x [B] + # print(f"{weights[0].shape = } {weights = }") + + assert torch.all(weights[0] == 1.0) + + main_positive = delta_noise_preds[0] # [B, 4, 64, 64] + + accumulated_output = torch.zeros_like(main_positive) + for i, complementary_noise_pred in enumerate(delta_noise_preds[1:], start=1): + # print(f"\n{i = }, {weights[i] = }, {weights[i].shape = }\n") + + idx_non_zero = torch.abs(weights[i]) > 1e-4 + + # print(f"{idx_non_zero.shape = }, {idx_non_zero = }") + # print(f"{weights[i][idx_non_zero].shape = }, {weights[i][idx_non_zero] = }") + # print(f"{complementary_noise_pred.shape = }, {complementary_noise_pred[idx_non_zero].shape = }") + # print(f"{main_positive.shape = }, {main_positive[idx_non_zero].shape = }") + if sum(idx_non_zero) == 0: + continue + accumulated_output[idx_non_zero] += weights[i][idx_non_zero].reshape(-1, 1, 1, 1) * batch_get_perpendicular_component(complementary_noise_pred[idx_non_zero], main_positive[idx_non_zero]) + + assert accumulated_output.shape == main_positive.shape, f"{accumulated_output.shape = }, {main_positive.shape = }" + + + return accumulated_output + main_positive \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/guidance/sd_utils.py b/stable-dreamfusion-3DPortrait/guidance/sd_utils.py new file mode 100644 index 0000000..48200fa --- /dev/null +++ b/stable-dreamfusion-3DPortrait/guidance/sd_utils.py @@ -0,0 +1,513 @@ +from transformers import CLIPTextModel, CLIPTokenizer, logging +from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler, DDIMScheduler, StableDiffusionPipeline,AutoencoderTiny +from diffusers.utils.import_utils import is_xformers_available +from os.path import isfile +from pathlib import Path + +# suppress partial model loading warning +logging.set_verbosity_error() + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.utils import save_image + +from torch.cuda.amp import custom_bwd, custom_fwd +try: + from .perpneg_utils import weighted_perpendicular_aggregator +except: + from perpneg_utils import weighted_perpendicular_aggregator + + +def seed_everything(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + #torch.backends.cudnn.deterministic = True + #torch.backends.cudnn.benchmark = True + +class StableDiffusion(nn.Module): + def __init__(self, device, fp16, vram_O, sd_version='2.1', hf_key=None, t_range=[0.02, 0.98],): + super().__init__() + + self.device = device + self.sd_version = sd_version + + print(f'[INFO] loading stable diffusion...') + + if hf_key is not None: + print(f'[INFO] using hugging face custom model key: {hf_key}') + model_key = hf_key + elif self.sd_version == '2.1': + model_key = "stabilityai/stable-diffusion-2-1-base" + elif self.sd_version == '2.0': + model_key = "stabilityai/stable-diffusion-2-base" + elif self.sd_version == '1.5': + model_key = "runwayml/stable-diffusion-v1-5" + else: + raise ValueError(f'Stable-diffusion version {self.sd_version} not supported.') + + self.precision_t = torch.float16 if fp16 else torch.float32 + + # Create model + pipe = StableDiffusionPipeline.from_pretrained(model_key, torch_dtype=self.precision_t) + + if vram_O: + pipe.enable_sequential_cpu_offload() + pipe.enable_vae_slicing() + pipe.unet.to(memory_format=torch.channels_last) + pipe.enable_attention_slicing(1) + # pipe.enable_model_cpu_offload() + else: + pipe.to(device) + + self.vae = pipe.vae + #self.vae = AutoencoderKL.from_pretrained('F:/high_quality_3DPortraitGAN/exp/stable-dreamfusion/pretrained/vae-ft-mse-840000-ema-pruned', torch_dtype=self.precision_t).to(self.device) + self.tokenizer = pipe.tokenizer + self.text_encoder = pipe.text_encoder + self.unet = pipe.unet + + self.scheduler = DDIMScheduler.from_pretrained(model_key, subfolder="scheduler", torch_dtype=self.precision_t) + + del pipe + + self.num_train_timesteps = self.scheduler.config.num_train_timesteps + self.min_step = int(self.num_train_timesteps * t_range[0]) + self.max_step = int(self.num_train_timesteps * t_range[1]) + self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience + + print(f'[INFO] loaded stable diffusion!') + + @torch.no_grad() + def get_text_embeds(self, prompt): + # prompt: [str] + + inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt') + embeddings = self.text_encoder(inputs.input_ids.to(self.device))[0] + + return embeddings + + + def train_step(self, text_embeddings, pred_rgb, guidance_scale=100, as_latent=False, grad_scale=1, + save_guidance_path:Path=None): + + if as_latent: + latents = F.interpolate(pred_rgb, (64, 64), mode='bilinear', align_corners=False) * 2 - 1 + + # feature_image + (1 - weights_samples) * bcg_image + else: + # interp to 512x512 to be fed into vae. + pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False) + # encode image into latents with vae, requires grad! + latents = self.encode_imgs(pred_rgb_512) + + # timestep ~ U(0.02, 0.98) to avoid very high/low noise level + t = torch.randint(self.min_step, self.max_step + 1, (latents.shape[0],), dtype=torch.long, device=self.device) + + # predict the noise residual with unet, NO grad! + with torch.no_grad(): + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + # pred noise + latent_model_input = torch.cat([latents_noisy] * 2) + tt = torch.cat([t] * 2) + noise_pred = self.unet(latent_model_input, tt, encoder_hidden_states=text_embeddings).sample + + # perform guidance (high scale from paper!) + noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_pos - noise_pred_uncond) + + + # import kiui + # latents_tmp = torch.randn((1, 4, 64, 64), device=self.device) + # latents_tmp = latents_tmp.detach() + # kiui.lo(latents_tmp) + # self.scheduler.set_timesteps(30) + # for i, t in enumerate(self.scheduler.timesteps): + # latent_model_input = torch.cat([latents_tmp] * 3) + # noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)['sample'] + # noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + # noise_pred = noise_pred_uncond + 10 * (noise_pred_pos - noise_pred_uncond) + # latents_tmp = self.scheduler.step(noise_pred, t, latents_tmp)['prev_sample'] + # imgs = self.decode_latents(latents_tmp) + # kiui.vis.plot_image(imgs) + + # w(t), sigma_t^2 + w = (1 - self.alphas[t]) + grad = grad_scale * w[:, None, None, None] * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + + + if save_guidance_path: + with torch.no_grad(): + if as_latent: + pred_rgb_512 = self.decode_latents(latents) + # + # # visualize predicted denoised image + # # The following block of code is equivalent to `predict_start_from_noise`... + # # see zero123_utils.py's version for a simpler implementation. + # alphas = self.scheduler.alphas.to(latents) + # total_timesteps = self.max_step - self.min_step + 1 + # index = total_timesteps - t.to(latents.device) - 1 + # b = len(noise_pred) + # a_t = alphas[index].reshape(b, 1, 1, 1).to(self.device) + # sqrt_one_minus_alphas = torch.sqrt(1 - alphas) + # sqrt_one_minus_at = sqrt_one_minus_alphas[index].reshape((b, 1, 1, 1)).to(self.device) + # pred_x0 = (latents_noisy - sqrt_one_minus_at * noise_pred) / a_t.sqrt() # current prediction for x_0 + # result_hopefully_less_noisy_image = self.decode_latents(pred_x0.to(latents.type(self.precision_t))) + # + # # visualize noisier image + # result_noisier_image = self.decode_latents(latents_noisy.to(pred_x0).type(self.precision_t)) + # + # # TODO: also denoise all-the-way + # # all 3 input images are [1, 3, H, W], e.g. [1, 3, 512, 512] + # # print(F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False).shape, pred_rgb_512.shape) + # viz_images = torch.cat([pred_rgb_512, result_noisier_image, result_hopefully_less_noisy_image], dim=0) + # save_image(viz_images, save_guidance_path) + + guidance_eval_utils = { + "use_perp_neg": False, + "neg_guidance_weights": None, + "text_embeddings": text_embeddings, + "t_orig": t, + "latents_noisy": latents_noisy, + "noise_pred": noise_pred, + "guidance_scale": guidance_scale, + "return_imgs_final": False, + } + + guidance_eval_out = self.guidance_eval(**guidance_eval_utils) + # decode_latents(latents_1step).permute(0, 2, 3, 1) + # "imgs_noisy": imgs_noisy, + # "imgs_1step": imgs_1step, + # "imgs_1orig": imgs_1orig, + # "imgs_final": imgs_final, + viz_images = [pred_rgb_512] + for k in guidance_eval_out: + if k.startswith("imgs_"): + viz_images.append(guidance_eval_out[k]) + viz_images = torch.cat(viz_images, dim=0) + + save_image(viz_images, save_guidance_path) + + + + + + + targets = (latents - grad).detach() + loss = 0.5 * F.mse_loss(latents.float(), targets, reduction='sum') / latents.shape[0] + + return loss + + @torch.no_grad() + def get_noise_pred( + self, + latents_noisy, + t, + text_embeddings, + use_perp_neg=False, + neg_guidance_weights=None, + guidance_scale=100.0, + ): + batch_size = latents_noisy.shape[0] + + if use_perp_neg: + raise NotImplementedError + else: + # pred noise + latent_model_input = torch.cat([latents_noisy] * 2, dim=0) + noise_pred = self.unet( + latent_model_input, + torch.cat([t.reshape(1)] * 2).to(self.device), + encoder_hidden_states=text_embeddings, + ).sample + + # perform guidance (high scale from paper!) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_text + guidance_scale * ( + noise_pred_text - noise_pred_uncond + ) + + return noise_pred + + + @torch.no_grad() + def guidance_eval( + self, + t_orig, + text_embeddings, + latents_noisy, + noise_pred, + use_perp_neg=False, + neg_guidance_weights=None, + guidance_scale=100.0, + return_imgs_final=False, + ): + # use only 50 timesteps, and find nearest of those to t + self.scheduler.set_timesteps(50) + self.scheduler.timesteps_gpu = self.scheduler.timesteps.to(self.device) + max_items_eval = 4 + bs = ( + min(max_items_eval, latents_noisy.shape[0]) + if max_items_eval > 0 + else latents_noisy.shape[0] + ) # batch size + large_enough_idxs = self.scheduler.timesteps_gpu.expand([bs, -1]) > t_orig[:bs].unsqueeze( + -1) # sized [bs,50] > [bs,1] + idxs = torch.min(large_enough_idxs, dim=1)[1] + t = self.scheduler.timesteps_gpu[idxs] + + fracs = list((t / self.scheduler.config.num_train_timesteps).cpu().numpy()) + imgs_noisy = self.decode_latents(latents_noisy[:bs]) + + # get prev latent + latents_1step = [] + pred_1orig = [] + for b in range(bs): + step_output = self.scheduler.step( + noise_pred[b: b + 1], t[b], latents_noisy[b: b + 1], eta=1 + ) + latents_1step.append(step_output["prev_sample"]) + pred_1orig.append(step_output["pred_original_sample"]) + latents_1step = torch.cat(latents_1step) + pred_1orig = torch.cat(pred_1orig) + imgs_1step = self.decode_latents(latents_1step) + imgs_1orig = self.decode_latents(pred_1orig) + + res = { + "bs": bs, + "noise_levels": fracs, + "imgs_noisy": imgs_noisy, + "imgs_1step": imgs_1step, + "imgs_1orig": imgs_1orig, + + } + if return_imgs_final: + latents_final = [] + for b, i in enumerate(idxs): + latents = latents_1step[b: b + 1] + text_emb = ( + text_embeddings[ + [b, b + len(idxs), b + 2 * len(idxs), b + 3 * len(idxs)], ... + ] + if use_perp_neg + else text_embeddings[[b, b + len(idxs)], ...] + ) + neg_guid = neg_guidance_weights[b: b + 1] if use_perp_neg else None + for t in self.scheduler.timesteps[i + 1:]: + # pred noise + # noise_pred = self.get_noise_pred( + # latents, t, text_emb, use_perp_neg, neg_guid,guidance_scale = guidance_scale + # ) + + latent_model_input = torch.cat([latents] * 2, dim=0) + noise_pred = self.unet( + latent_model_input, + torch.cat([t.reshape(1)] * 2).to(self.device), + encoder_hidden_states=text_emb, + ).sample + + # perform guidance (high scale from paper!) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_text + guidance_scale * ( + noise_pred_text - noise_pred_uncond + ) + + + # get prev latent + latents = self.scheduler.step(noise_pred, t, latents, eta=1)[ + "prev_sample" + ] + latents_final.append(latents) + + latents_final = torch.cat(latents_final) + imgs_final = self.decode_latents(latents_final) + + res["imgs_final"] = imgs_final + + return res + def train_step_perpneg(self, text_embeddings, weights, pred_rgb, guidance_scale=100, as_latent=False, grad_scale=1, + save_guidance_path:Path=None): + + B = pred_rgb.shape[0] + K = (text_embeddings.shape[0] // B) - 1 # maximum number of prompts + + if as_latent: + latents = F.interpolate(pred_rgb, (64, 64), mode='bilinear', align_corners=False) * 2 - 1 + else: + # interp to 512x512 to be fed into vae. + pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False) + # encode image into latents with vae, requires grad! + latents = self.encode_imgs(pred_rgb_512) + + # timestep ~ U(0.02, 0.98) to avoid very high/low noise level + t = torch.randint(self.min_step, self.max_step + 1, (latents.shape[0],), dtype=torch.long, device=self.device) + + # predict the noise residual with unet, NO grad! + with torch.no_grad(): + # add noise + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + # pred noise + latent_model_input = torch.cat([latents_noisy] * (1 + K)) + tt = torch.cat([t] * (1 + K)) + unet_output = self.unet(latent_model_input, tt, encoder_hidden_states=text_embeddings).sample + + # perform guidance (high scale from paper!) + noise_pred_uncond, noise_pred_text = unet_output[:B], unet_output[B:] + delta_noise_preds = noise_pred_text - noise_pred_uncond.repeat(K, 1, 1, 1) + noise_pred = noise_pred_uncond + guidance_scale * weighted_perpendicular_aggregator(delta_noise_preds, weights, B) + + # import kiui + # latents_tmp = torch.randn((1, 4, 64, 64), device=self.device) + # latents_tmp = latents_tmp.detach() + # kiui.lo(latents_tmp) + # self.scheduler.set_timesteps(30) + # for i, t in enumerate(self.scheduler.timesteps): + # latent_model_input = torch.cat([latents_tmp] * 3) + # noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)['sample'] + # noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + # noise_pred = noise_pred_uncond + 10 * (noise_pred_pos - noise_pred_uncond) + # latents_tmp = self.scheduler.step(noise_pred, t, latents_tmp)['prev_sample'] + # imgs = self.decode_latents(latents_tmp) + # kiui.vis.plot_image(imgs) + + # w(t), sigma_t^2 + w = (1 - self.alphas[t]) + grad = grad_scale * w[:, None, None, None] * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + if save_guidance_path: + with torch.no_grad(): + if as_latent: + pred_rgb_512 = self.decode_latents(latents) + + # visualize predicted denoised image + # The following block of code is equivalent to `predict_start_from_noise`... + # see zero123_utils.py's version for a simpler implementation. + alphas = self.alphas.to(latents) + total_timesteps = self.max_step - self.min_step + 1 + index = total_timesteps - t.to(latents.device) - 1 + b = len(noise_pred) + a_t = alphas[index].reshape(b,1,1,1).to(self.device) + sqrt_one_minus_alphas = torch.sqrt(1 - alphas) + sqrt_one_minus_at = sqrt_one_minus_alphas[index].reshape((b,1,1,1)).to(self.device) + pred_x0 = (latents_noisy - sqrt_one_minus_at * noise_pred) / a_t.sqrt() # current prediction for x_0 + result_hopefully_less_noisy_image = self.decode_latents(pred_x0.to(latents.type(self.precision_t))) + + # visualize noisier image + result_noisier_image = self.decode_latents(latents_noisy.to(pred_x0).type(self.precision_t)) + + + + # all 3 input images are [1, 3, H, W], e.g. [1, 3, 512, 512] + viz_images = torch.cat([pred_rgb_512, result_noisier_image, result_hopefully_less_noisy_image],dim=0) + save_image(viz_images, save_guidance_path) + + targets = (latents - grad).detach() + loss = 0.5 * F.mse_loss(latents.float(), targets, reduction='sum') / latents.shape[0] + + return loss + + + @torch.no_grad() + def produce_latents(self, text_embeddings, height=512, width=512, num_inference_steps=50, guidance_scale=7.5, latents=None): + + if latents is None: + latents = torch.randn((text_embeddings.shape[0] // 2, self.unet.in_channels, height // 8, width // 8), device=self.device) + + self.scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(self.scheduler.timesteps): + # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. + latent_model_input = torch.cat([latents] * 2) + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)['sample'] + + # perform guidance + noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents)['prev_sample'] + + return latents + + def decode_latents(self, latents): + + latents = 1 / self.vae.config.scaling_factor * latents + + imgs = self.vae.decode(latents).sample + imgs = (imgs / 2 + 0.5).clamp(0, 1) + + return imgs + + def encode_imgs(self, imgs): + # imgs: [B, 3, H, W] + + imgs = 2 * imgs - 1 + + posterior = self.vae.encode(imgs).latent_dist + latents = posterior.sample() * self.vae.config.scaling_factor + + return latents + + def prompt_to_img(self, prompts, negative_prompts='', height=512, width=512, num_inference_steps=50, guidance_scale=7.5, latents=None): + + if isinstance(prompts, str): + prompts = [prompts] + + if isinstance(negative_prompts, str): + negative_prompts = [negative_prompts] + + # Prompts -> text embeds + pos_embeds = self.get_text_embeds(prompts) # [1, 77, 768] + neg_embeds = self.get_text_embeds(negative_prompts) + text_embeds = torch.cat([neg_embeds, pos_embeds], dim=0) # [2, 77, 768] + + # Text embeds -> img latents + latents = self.produce_latents(text_embeds, height=height, width=width, latents=latents, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale) # [1, 4, 64, 64] + + # Img latents -> imgs + imgs = self.decode_latents(latents) # [1, 3, 512, 512] + + # Img to Numpy + imgs = imgs.detach().cpu().permute(0, 2, 3, 1).numpy() + imgs = (imgs * 255).round().astype('uint8') + + return imgs + + +if __name__ == '__main__': + + import argparse + import matplotlib.pyplot as plt + + parser = argparse.ArgumentParser() + parser.add_argument('prompt', type=str) + parser.add_argument('--negative', default='', type=str) + parser.add_argument('--sd_version', type=str, default='2.1', choices=['1.5', '2.0', '2.1'], help="stable diffusion version") + parser.add_argument('--hf_key', type=str, default=None, help="hugging face Stable diffusion model key") + parser.add_argument('--fp16', action='store_true', help="use float16 for training") + parser.add_argument('--vram_O', action='store_true', help="optimization for low VRAM usage") + parser.add_argument('-H', type=int, default=512) + parser.add_argument('-W', type=int, default=512) + parser.add_argument('--seed', type=int, default=2) + parser.add_argument('--steps', type=int, default=50) + opt = parser.parse_args() + + seed_everything(opt.seed) + + device = torch.device('cuda') + + sd = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key) + + imgs = sd.prompt_to_img(opt.prompt, opt.negative, opt.H, opt.W, opt.steps) + + # visualize image + plt.imshow(imgs[0]) + plt.show() + + +# python guidance/sd_utils.py "upper body photo of caucasian man in black clothes, night city street, bokeh" --hf_key pretrained/SG161222Realistic_Vision_V5.1_noVAE -H 512 -W 512 --seed 42 diff --git a/stable-dreamfusion-3DPortrait/guidance/sdedit.py b/stable-dreamfusion-3DPortrait/guidance/sdedit.py new file mode 100644 index 0000000..6c4b28f --- /dev/null +++ b/stable-dreamfusion-3DPortrait/guidance/sdedit.py @@ -0,0 +1,605 @@ +from transformers import CLIPTextModel, CLIPTokenizer, logging +from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler, DDIMScheduler, StableDiffusionPipeline,AutoencoderTiny +import numpy as np +from pathlib import Path +import glob +import os +# suppress partial model loading warning +logging.set_verbosity_error() + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.utils import save_image + +import tqdm +try: + from .perpneg_utils import weighted_perpendicular_aggregator +except: + from perpneg_utils import weighted_perpendicular_aggregator + + +def seed_everything(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + #torch.backends.cudnn.deterministic = True + #torch.backends.cudnn.benchmark = True + +class StableDiffusion(nn.Module): + def __init__(self, device, fp16, vram_O, sd_version='2.1', hf_key=None, t_range=[0.02, 0.98],): + super().__init__() + + self.device = device + self.sd_version = sd_version + + print(f'[INFO] loading stable diffusion...') + + if hf_key is not None: + print(f'[INFO] using hugging face custom model key: {hf_key}') + model_key = hf_key + elif self.sd_version == '2.1': + model_key = "stabilityai/stable-diffusion-2-1-base" + elif self.sd_version == '2.0': + model_key = "stabilityai/stable-diffusion-2-base" + elif self.sd_version == '1.5': + model_key = "runwayml/stable-diffusion-v1-5" + else: + raise ValueError(f'Stable-diffusion version {self.sd_version} not supported.') + + self.precision_t = torch.float16 if fp16 else torch.float32 + + # Create model + pipe = StableDiffusionPipeline.from_pretrained(model_key, torch_dtype=self.precision_t) + + if vram_O: + pipe.enable_sequential_cpu_offload() + pipe.enable_vae_slicing() + pipe.unet.to(memory_format=torch.channels_last) + pipe.enable_attention_slicing(1) + # pipe.enable_model_cpu_offload() + else: + pipe.to(device) + + self.vae = pipe.vae + #self.vae = AutoencoderKL.from_pretrained('F:/high_quality_3DPortraitGAN/exp/stable-dreamfusion/pretrained/vae-ft-mse-840000-ema-pruned', torch_dtype=self.precision_t).to(self.device) + self.tokenizer = pipe.tokenizer + self.text_encoder = pipe.text_encoder + self.unet = pipe.unet + + self.scheduler = DDIMScheduler.from_pretrained(model_key, subfolder="scheduler", torch_dtype=self.precision_t) + + del pipe + + self.num_train_timesteps = self.scheduler.config.num_train_timesteps + self.min_step = int(self.num_train_timesteps * t_range[0]) + self.max_step = int(self.num_train_timesteps * t_range[1]) + self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience + + print(f'[INFO] loaded stable diffusion!') + + @torch.no_grad() + def get_text_embeds(self, prompt): + # prompt: [str] + + inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt') + embeddings = self.text_encoder(inputs.input_ids.to(self.device))[0] + + return embeddings + + + def train_step(self, text_embeddings, pred_rgb, guidance_scale=100, as_latent=False, grad_scale=1, + save_guidance_path:Path=None): + + if as_latent: + latents = F.interpolate(pred_rgb, (64, 64), mode='bilinear', align_corners=False) * 2 - 1 + + # feature_image + (1 - weights_samples) * bcg_image + else: + # interp to 512x512 to be fed into vae. + pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False) + # encode image into latents with vae, requires grad! + latents = self.encode_imgs(pred_rgb_512) + + # timestep ~ U(0.02, 0.98) to avoid very high/low noise level + t = torch.randint(self.min_step, self.max_step + 1, (latents.shape[0],), dtype=torch.long, device=self.device) + + # predict the noise residual with unet, NO grad! + with torch.no_grad(): + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + # pred noise + latent_model_input = torch.cat([latents_noisy] * 2) + tt = torch.cat([t] * 2) + noise_pred = self.unet(latent_model_input, tt, encoder_hidden_states=text_embeddings).sample + + # perform guidance (high scale from paper!) + noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_pos - noise_pred_uncond) + + + # import kiui + # latents_tmp = torch.randn((1, 4, 64, 64), device=self.device) + # latents_tmp = latents_tmp.detach() + # kiui.lo(latents_tmp) + # self.scheduler.set_timesteps(30) + # for i, t in enumerate(self.scheduler.timesteps): + # latent_model_input = torch.cat([latents_tmp] * 3) + # noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)['sample'] + # noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + # noise_pred = noise_pred_uncond + 10 * (noise_pred_pos - noise_pred_uncond) + # latents_tmp = self.scheduler.step(noise_pred, t, latents_tmp)['prev_sample'] + # imgs = self.decode_latents(latents_tmp) + # kiui.vis.plot_image(imgs) + + # w(t), sigma_t^2 + w = (1 - self.alphas[t]) + grad = grad_scale * w[:, None, None, None] * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + + + if save_guidance_path: + with torch.no_grad(): + if as_latent: + pred_rgb_512 = self.decode_latents(latents) + # + # # visualize predicted denoised image + # # The following block of code is equivalent to `predict_start_from_noise`... + # # see zero123_utils.py's version for a simpler implementation. + # alphas = self.scheduler.alphas.to(latents) + # total_timesteps = self.max_step - self.min_step + 1 + # index = total_timesteps - t.to(latents.device) - 1 + # b = len(noise_pred) + # a_t = alphas[index].reshape(b, 1, 1, 1).to(self.device) + # sqrt_one_minus_alphas = torch.sqrt(1 - alphas) + # sqrt_one_minus_at = sqrt_one_minus_alphas[index].reshape((b, 1, 1, 1)).to(self.device) + # pred_x0 = (latents_noisy - sqrt_one_minus_at * noise_pred) / a_t.sqrt() # current prediction for x_0 + # result_hopefully_less_noisy_image = self.decode_latents(pred_x0.to(latents.type(self.precision_t))) + # + # # visualize noisier image + # result_noisier_image = self.decode_latents(latents_noisy.to(pred_x0).type(self.precision_t)) + # + # # TODO: also denoise all-the-way + # # all 3 input images are [1, 3, H, W], e.g. [1, 3, 512, 512] + # # print(F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False).shape, pred_rgb_512.shape) + # viz_images = torch.cat([pred_rgb_512, result_noisier_image, result_hopefully_less_noisy_image], dim=0) + # save_image(viz_images, save_guidance_path) + + guidance_eval_utils = { + "use_perp_neg": False, + "neg_guidance_weights": None, + "text_embeddings": text_embeddings, + "t_orig": t, + "latents_noisy": latents_noisy, + "noise_pred": noise_pred, + "guidance_scale": guidance_scale, + "return_imgs_final": False, + } + + guidance_eval_out = self.guidance_eval(**guidance_eval_utils) + # decode_latents(latents_1step).permute(0, 2, 3, 1) + # "imgs_noisy": imgs_noisy, + # "imgs_1step": imgs_1step, + # "imgs_1orig": imgs_1orig, + # "imgs_final": imgs_final, + viz_images = [pred_rgb_512] + for k in guidance_eval_out: + if k.startswith("imgs_"): + viz_images.append(guidance_eval_out[k]) + viz_images = torch.cat(viz_images, dim=0) + + save_image(viz_images, save_guidance_path) + + + + + + + targets = (latents - grad).detach() + loss = 0.5 * F.mse_loss(latents.float(), targets, reduction='sum') / latents.shape[0] + + return loss + + @torch.no_grad() + def get_noise_pred( + self, + latents_noisy, + t, + text_embeddings, + use_perp_neg=False, + neg_guidance_weights=None, + guidance_scale=100.0, + ): + batch_size = latents_noisy.shape[0] + + if use_perp_neg: + raise NotImplementedError + else: + # pred noise + latent_model_input = torch.cat([latents_noisy] * 2, dim=0) + noise_pred = self.unet( + latent_model_input, + torch.cat([t.reshape(1)] * 2).to(self.device), + encoder_hidden_states=text_embeddings, + ).sample + + # perform guidance (high scale from paper!) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_text + guidance_scale * ( + noise_pred_text - noise_pred_uncond + ) + + return noise_pred + + + @torch.no_grad() + def guidance_eval( + self, + t_orig, + text_embeddings, + latents_noisy, + noise_pred, + use_perp_neg=False, + neg_guidance_weights=None, + guidance_scale=100.0, + return_imgs_final=False, + ): + # use only 50 timesteps, and find nearest of those to t + self.scheduler.set_timesteps(50) + self.scheduler.timesteps_gpu = self.scheduler.timesteps.to(self.device) + max_items_eval = 4 + bs = ( + min(max_items_eval, latents_noisy.shape[0]) + if max_items_eval > 0 + else latents_noisy.shape[0] + ) # batch size + large_enough_idxs = self.scheduler.timesteps_gpu.expand([bs, -1]) > t_orig[:bs].unsqueeze( + -1) # sized [bs,50] > [bs,1] + idxs = torch.min(large_enough_idxs, dim=1)[1] + t = self.scheduler.timesteps_gpu[idxs] + + fracs = list((t / self.scheduler.config.num_train_timesteps).cpu().numpy()) + imgs_noisy = self.decode_latents(latents_noisy[:bs]) + + # get prev latent + latents_1step = [] + pred_1orig = [] + for b in range(bs): + step_output = self.scheduler.step( + noise_pred[b: b + 1], t[b], latents_noisy[b: b + 1], eta=1 + ) + latents_1step.append(step_output["prev_sample"]) + pred_1orig.append(step_output["pred_original_sample"]) + latents_1step = torch.cat(latents_1step) + pred_1orig = torch.cat(pred_1orig) + imgs_1step = self.decode_latents(latents_1step) + imgs_1orig = self.decode_latents(pred_1orig) + + res = { + "bs": bs, + "noise_levels": fracs, + "imgs_noisy": imgs_noisy, + "imgs_1step": imgs_1step, + "imgs_1orig": imgs_1orig, + + } + if return_imgs_final: + latents_final = [] + for b, i in enumerate(idxs): + latents = latents_1step[b: b + 1] + text_emb = ( + text_embeddings[ + [b, b + len(idxs), b + 2 * len(idxs), b + 3 * len(idxs)], ... + ] + if use_perp_neg + else text_embeddings[[b, b + len(idxs)], ...] + ) + neg_guid = neg_guidance_weights[b: b + 1] if use_perp_neg else None + for t in self.scheduler.timesteps[i + 1:]: + # pred noise + # noise_pred = self.get_noise_pred( + # latents, t, text_emb, use_perp_neg, neg_guid,guidance_scale = guidance_scale + # ) + + latent_model_input = torch.cat([latents] * 2, dim=0) + noise_pred = self.unet( + latent_model_input, + torch.cat([t.reshape(1)] * 2).to(self.device), + encoder_hidden_states=text_emb, + ).sample + + # perform guidance (high scale from paper!) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_text + guidance_scale * ( + noise_pred_text - noise_pred_uncond + ) + + + # get prev latent + latents = self.scheduler.step(noise_pred, t, latents, eta=1)[ + "prev_sample" + ] + latents_final.append(latents) + + latents_final = torch.cat(latents_final) + imgs_final = self.decode_latents(latents_final) + + res["imgs_final"] = imgs_final + + return res + def train_step_perpneg(self, text_embeddings, weights, pred_rgb, guidance_scale=100, as_latent=False, grad_scale=1, + save_guidance_path:Path=None): + + B = pred_rgb.shape[0] + K = (text_embeddings.shape[0] // B) - 1 # maximum number of prompts + + if as_latent: + latents = F.interpolate(pred_rgb, (64, 64), mode='bilinear', align_corners=False) * 2 - 1 + else: + # interp to 512x512 to be fed into vae. + pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False) + # encode image into latents with vae, requires grad! + latents = self.encode_imgs(pred_rgb_512) + + # timestep ~ U(0.02, 0.98) to avoid very high/low noise level + t = torch.randint(self.min_step, self.max_step + 1, (latents.shape[0],), dtype=torch.long, device=self.device) + + # predict the noise residual with unet, NO grad! + with torch.no_grad(): + # add noise + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + # pred noise + latent_model_input = torch.cat([latents_noisy] * (1 + K)) + tt = torch.cat([t] * (1 + K)) + unet_output = self.unet(latent_model_input, tt, encoder_hidden_states=text_embeddings).sample + + # perform guidance (high scale from paper!) + noise_pred_uncond, noise_pred_text = unet_output[:B], unet_output[B:] + delta_noise_preds = noise_pred_text - noise_pred_uncond.repeat(K, 1, 1, 1) + noise_pred = noise_pred_uncond + guidance_scale * weighted_perpendicular_aggregator(delta_noise_preds, weights, B) + + # import kiui + # latents_tmp = torch.randn((1, 4, 64, 64), device=self.device) + # latents_tmp = latents_tmp.detach() + # kiui.lo(latents_tmp) + # self.scheduler.set_timesteps(30) + # for i, t in enumerate(self.scheduler.timesteps): + # latent_model_input = torch.cat([latents_tmp] * 3) + # noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)['sample'] + # noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + # noise_pred = noise_pred_uncond + 10 * (noise_pred_pos - noise_pred_uncond) + # latents_tmp = self.scheduler.step(noise_pred, t, latents_tmp)['prev_sample'] + # imgs = self.decode_latents(latents_tmp) + # kiui.vis.plot_image(imgs) + + # w(t), sigma_t^2 + w = (1 - self.alphas[t]) + grad = grad_scale * w[:, None, None, None] * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + if save_guidance_path: + with torch.no_grad(): + if as_latent: + pred_rgb_512 = self.decode_latents(latents) + + # visualize predicted denoised image + # The following block of code is equivalent to `predict_start_from_noise`... + # see zero123_utils.py's version for a simpler implementation. + alphas = self.alphas.to(latents) + total_timesteps = self.max_step - self.min_step + 1 + index = total_timesteps - t.to(latents.device) - 1 + b = len(noise_pred) + a_t = alphas[index].reshape(b,1,1,1).to(self.device) + sqrt_one_minus_alphas = torch.sqrt(1 - alphas) + sqrt_one_minus_at = sqrt_one_minus_alphas[index].reshape((b,1,1,1)).to(self.device) + pred_x0 = (latents_noisy - sqrt_one_minus_at * noise_pred) / a_t.sqrt() # current prediction for x_0 + result_hopefully_less_noisy_image = self.decode_latents(pred_x0.to(latents.type(self.precision_t))) + + # visualize noisier image + result_noisier_image = self.decode_latents(latents_noisy.to(pred_x0).type(self.precision_t)) + + + + # all 3 input images are [1, 3, H, W], e.g. [1, 3, 512, 512] + viz_images = torch.cat([pred_rgb_512, result_noisier_image, result_hopefully_less_noisy_image],dim=0) + save_image(viz_images, save_guidance_path) + + targets = (latents - grad).detach() + loss = 0.5 * F.mse_loss(latents.float(), targets, reduction='sum') / latents.shape[0] + + return loss + + + @torch.no_grad() + def produce_latents(self, text_embeddings, height=512, width=512, num_inference_steps=50, guidance_scale=7.5, latents=None): + + if latents is None: + latents = torch.randn((text_embeddings.shape[0] // 2, self.unet.in_channels, height // 8, width // 8), device=self.device) + + self.scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(self.scheduler.timesteps): + # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. + latent_model_input = torch.cat([latents] * 2) + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)['sample'] + + # perform guidance + noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents)['prev_sample'] + + return latents + + + + def decode_latents(self, latents): + + latents = 1 / self.vae.config.scaling_factor * latents + + imgs = self.vae.decode(latents).sample + imgs = (imgs / 2 + 0.5).clamp(0, 1) + + return imgs + + def encode_imgs(self, imgs): + # imgs: [B, 3, H, W] + + imgs = 2 * imgs - 1 + + posterior = self.vae.encode(imgs).latent_dist + latents = posterior.sample() * self.vae.config.scaling_factor + + return latents + + def prompt_to_img(self, prompts, negative_prompts='', height=512, width=512, num_inference_steps=50, guidance_scale=7.5, latents=None): + + if isinstance(prompts, str): + prompts = [prompts] + + if isinstance(negative_prompts, str): + negative_prompts = [negative_prompts] + + # Prompts -> text embeds + pos_embeds = self.get_text_embeds(prompts) # [1, 77, 768] + neg_embeds = self.get_text_embeds(negative_prompts) + text_embeds = torch.cat([neg_embeds, pos_embeds], dim=0) # [2, 77, 768] + + # Text embeds -> img latents + latents = self.produce_latents(text_embeds, height=height, width=width, latents=latents, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale) # [1, 4, 64, 64] + + # Img latents -> imgs + imgs = self.decode_latents(latents) # [1, 3, 512, 512] + + # Img to Numpy + imgs = imgs.detach().cpu().permute(0, 2, 3, 1).numpy() + imgs = (imgs * 255).round().astype('uint8') + + return imgs + + + def denoise_latents(self, text_embeddings, start_t,num_inference_steps=50, guidance_scale=7.5, latents=None): + + + self.scheduler.set_timesteps(num_inference_steps) + for t in tqdm.tqdm(self.scheduler.timesteps): + if t>start_t: + continue + # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. + latent_model_input = torch.cat([latents] * 2) + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)['sample'] + + # perform guidance + noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents)['prev_sample'] + + return latents + + + def sdedit(self, data_dir, height=512, width=512, num_inference_steps=50,test_data_dir = None, guidance_scale=7.5): + + + noise_level = 200 + res_dir = data_dir + origin_data_dir = os.path.join(res_dir, 'data') + if not os.path.exists(origin_data_dir): + print('no data dir') + return + + update_data_dir = os.path.join(res_dir, 'update_data') + os.makedirs(update_data_dir, exist_ok=True) + + if len(glob.glob(origin_data_dir + '/*.png')) == len(glob.glob(update_data_dir + '/*.png')): + print('already done') + return + print('gen data for ', res_dir) + + name = os.path.basename(res_dir) + + prompt_path = os.path.join(test_data_dir, f'{name}/prompt.txt') + if os.path.exists(prompt_path): + with open(prompt_path, 'r') as f: + prompts = f.read().strip() + else: + raise ValueError('prompt.txt not exists') + + if isinstance(prompts, str): + prompts = [prompts] + # Prompts -> text embeds + pos_embeds = self.get_text_embeds(prompts) # [1, 77, 768] + neg_embeds = self.get_text_embeds('worst quality, low quality, jpeg artifacts, blurry') + text_embeds = torch.cat([neg_embeds, pos_embeds], dim=0) # [2, 77, 768] + + for image_path in glob.glob(origin_data_dir + '/*.png'): + image = PIL.Image.open(image_path).convert('RGB') + image = np.array(image) + + origin_img = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(0).float().to(self.device) # --> 0,1 + origin_img = origin_img / 255.0 + + latents = self.encode_imgs(origin_img) + + t = torch.tensor([noise_level], dtype=torch.long, + device=self.device) + + # predict the noise residual with unet, NO grad! + with torch.no_grad(): + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + + latents = self.denoise_latents(text_embeds, noise_level, num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, latents=latents_noisy) + + # Img latents -> imgs + img = self.decode_latents(latents) # [1, 3, 512, 512] + # Img to Numpy + img = img.detach().cpu().permute(0, 2, 3, 1).numpy() + img = (img * 255).round().astype('uint8')[0] + + PIL.Image.fromarray(img).save(os.path.join(update_data_dir, os.path.basename(image_path))) + + + +if __name__ == '__main__': + + import argparse + import matplotlib.pyplot as plt + import PIL + + parser = argparse.ArgumentParser() + parser.add_argument('--sd_version', type=str, default='2.1', choices=['1.5', '2.0', '2.1'], help="stable diffusion version") + parser.add_argument('--hf_key', type=str, default=None, help="hugging face Stable diffusion model key") + + + parser.add_argument('--data_dir', type=str,help='Network pickle filename', required=True) + parser.add_argument('--test_data_dir', type=str,help='test_data_dir', required=True) + + + parser.add_argument('--fp16', action='store_true', help="use float16 for training") + parser.add_argument('--vram_O', action='store_true', help="optimization for low VRAM usage") + parser.add_argument('-H', type=int, default=512) + parser.add_argument('-W', type=int, default=512) + parser.add_argument('--seed', type=int, default=2) + parser.add_argument('--steps', type=int, default=50) + opt = parser.parse_args() + + seed_everything(opt.seed) + + device = torch.device('cuda') + + + + sd = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key) + + imgs = sd.sdedit(opt.data_dir,opt.H, opt.W, opt.steps,opt.test_data_dir) + + + +# \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/guidance/test_taesd.py b/stable-dreamfusion-3DPortrait/guidance/test_taesd.py new file mode 100644 index 0000000..7e2aef9 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/guidance/test_taesd.py @@ -0,0 +1,626 @@ +from transformers import CLIPTextModel, CLIPTokenizer, logging +from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler, DDIMScheduler, StableDiffusionPipeline,AutoencoderTiny +from diffusers.utils.import_utils import is_xformers_available +from os.path import isfile +from pathlib import Path + +# suppress partial model loading warning +logging.set_verbosity_error() + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.utils import save_image + +from torch.cuda.amp import custom_bwd, custom_fwd +try: + from .perpneg_utils import weighted_perpendicular_aggregator +except: + from perpneg_utils import weighted_perpendicular_aggregator +def seed_everything(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + #torch.backends.cudnn.deterministic = True + #torch.backends.cudnn.benchmark = True + + + +from dataclasses import dataclass +from diffusers.utils.outputs import BaseOutput +from diffusers.utils.accelerate_utils import apply_forward_hook +from diffusers.models.vae import DecoderOutput,DecoderTiny, EncoderTiny +from typing import Tuple, Union + +@dataclass +class TaesdOutput(BaseOutput): + """ + Output of AutoencoderTiny encoding method. + + Args: + latents (`torch.Tensor`): Encoded outputs of the `Encoder`. + + """ + + latents: torch.Tensor + + +class Taesd(nn.Module): + def __init__(self,device, fp16 + ): + super().__init__() + self.device = device + self.precision_t = torch.float16 if fp16 else torch.float32 + #if init_ckpt + + in_channels = 3 + out_channels = 3 + encoder_block_out_channels: Tuple[int] = (64, 64, 64, 64) + decoder_block_out_channels: Tuple[int] = (64, 64, 64, 64) + act_fn: str = "relu" + latent_channels: int = 4 + upsampling_scaling_factor: int = 2 + num_encoder_blocks: Tuple[int] = (1, 3, 3, 3) + num_decoder_blocks: Tuple[int] = (3, 3, 3, 1) + latent_magnitude: int = 3 + latent_shift: float = 0.5 + force_upcast: float = False + scaling_factor: float = 1.0 + + + self.decoder = DecoderTiny( + in_channels=latent_channels, + out_channels=out_channels, + num_blocks=num_decoder_blocks, + block_out_channels=decoder_block_out_channels, + upsampling_scaling_factor=upsampling_scaling_factor, + act_fn=act_fn, + ).to(device).to(self.precision_t) + self.encoder = EncoderTiny( + in_channels=in_channels, + out_channels=latent_channels, + num_blocks=num_encoder_blocks, + block_out_channels=encoder_block_out_channels, + act_fn=act_fn, + ).to(device).to(self.precision_t) + + print("Loading pretrained model") + vae = AutoencoderTiny.from_pretrained("pretrained/taesd", torch_dtype=self.precision_t).to(device) + self.decoder.load_state_dict(vae.decoder.state_dict()) + self.encoder.load_state_dict(vae.encoder.state_dict()) + del vae + + + self.latent_magnitude = latent_magnitude + self.latent_shift = latent_shift + self.scaling_factor = scaling_factor + + + self.use_slicing = False + self.use_tiling = False + + # only relevant if vae tiling is enabled + self.spatial_scale_factor = 2 ** out_channels + self.tile_overlap_factor = 0.125 + self.tile_sample_min_size = 512 + self.tile_latent_min_size = self.tile_sample_min_size // self.spatial_scale_factor + + + + def _tiled_encode(self, x: torch.FloatTensor) -> torch.FloatTensor: + r"""Encode a batch of images using a tiled encoder. + + When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several + steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the + tiles overlap and are blended together to form a smooth output. + + Args: + x (`torch.FloatTensor`): Input batch of images. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.autoencoder_tiny.AutoencoderTinyOutput`] instead of a plain tuple. + + Returns: + [`~models.autoencoder_tiny.AutoencoderTinyOutput`] or `tuple`: + If return_dict is True, a [`~models.autoencoder_tiny.AutoencoderTinyOutput`] is returned, otherwise a + plain `tuple` is returned. + """ + # scale of encoder output relative to input + sf = self.spatial_scale_factor + tile_size = self.tile_sample_min_size + + # number of pixels to blend and to traverse between tile + blend_size = int(tile_size * self.tile_overlap_factor) + traverse_size = tile_size - blend_size + + # tiles index (up/left) + ti = range(0, x.shape[-2], traverse_size) + tj = range(0, x.shape[-1], traverse_size) + + # mask for blending + blend_masks = torch.stack( + torch.meshgrid([torch.arange(tile_size / sf) / (blend_size / sf - 1)] * 2, indexing="ij") + ) + blend_masks = blend_masks.clamp(0, 1).to(x.device) + + # output array + out = torch.zeros(x.shape[0], 4, x.shape[-2] // sf, x.shape[-1] // sf, device=x.device) + for i in ti: + for j in tj: + tile_in = x[..., i : i + tile_size, j : j + tile_size] + # tile result + tile_out = out[..., i // sf : (i + tile_size) // sf, j // sf : (j + tile_size) // sf] + tile = self.encoder(tile_in) + h, w = tile.shape[-2], tile.shape[-1] + # blend tile result into output + blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0] + blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1] + blend_mask = blend_mask_i * blend_mask_j + tile, blend_mask = tile[..., :h, :w], blend_mask[..., :h, :w] + tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out) + return out + + def _tiled_decode(self, x: torch.FloatTensor) -> torch.FloatTensor: + r"""Encode a batch of images using a tiled encoder. + + When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several + steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the + tiles overlap and are blended together to form a smooth output. + + Args: + x (`torch.FloatTensor`): Input batch of images. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.autoencoder_tiny.AutoencoderTinyOutput`] instead of a plain tuple. + + Returns: + [`~models.vae.DecoderOutput`] or `tuple`: + If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is + returned. + """ + # scale of decoder output relative to input + sf = self.spatial_scale_factor + tile_size = self.tile_latent_min_size + + # number of pixels to blend and to traverse between tiles + blend_size = int(tile_size * self.tile_overlap_factor) + traverse_size = tile_size - blend_size + + # tiles index (up/left) + ti = range(0, x.shape[-2], traverse_size) + tj = range(0, x.shape[-1], traverse_size) + + # mask for blending + blend_masks = torch.stack( + torch.meshgrid([torch.arange(tile_size * sf) / (blend_size * sf - 1)] * 2, indexing="ij") + ) + blend_masks = blend_masks.clamp(0, 1).to(x.device) + + # output array + out = torch.zeros(x.shape[0], 3, x.shape[-2] * sf, x.shape[-1] * sf, device=x.device) + for i in ti: + for j in tj: + tile_in = x[..., i : i + tile_size, j : j + tile_size] + # tile result + tile_out = out[..., i * sf : (i + tile_size) * sf, j * sf : (j + tile_size) * sf] + tile = self.decoder(tile_in) + h, w = tile.shape[-2], tile.shape[-1] + # blend tile result into output + blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0] + blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1] + blend_mask = (blend_mask_i * blend_mask_j)[..., :h, :w] + tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out) + return out + + + @apply_forward_hook + def encode( + self, x: torch.FloatTensor, return_dict: bool = True + ) -> Union[TaesdOutput, Tuple[torch.FloatTensor]]: + if self.use_slicing and x.shape[0] > 1: + output = [self._tiled_encode(x_slice) if self.use_tiling else self.encoder(x) for x_slice in x.split(1)] + output = torch.cat(output) + else: + output = self._tiled_encode(x) if self.use_tiling else self.encoder(x) + + if not return_dict: + return (output,) + + return TaesdOutput(latents=output) + + @apply_forward_hook + def decode(self, x: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, Tuple[torch.FloatTensor]]: + print('decode in Taesd') + if self.use_slicing and x.shape[0] > 1: + output = [self._tiled_decode(x_slice) if self.use_tiling else self.decoder(x) for x_slice in x.split(1)] + output = torch.cat(output) + else: + output = self._tiled_decode(x) if self.use_tiling else self.decoder(x) + + if not return_dict: + return (output,) + + return DecoderOutput(sample=output) + + + def decode_latents(self, latents): + ''' + + :param latents: [B, 4, H, W], + :return: imgs [B, 3, H, W], in [0, 1] + ''' + latents = 1 / self.scaling_factor * latents + + imgs = self.decode(latents).sample + + + + + imgs = (imgs / 2 + 0.5).clamp(0, 1) + + return imgs + + def encode_imgs(self, imgs): + ''' + + :param imgs: [B, 3, H, W], in [0, 1] + :return: latents: [B, 4, H, W], + ''' + + + imgs = 2 * imgs - 1 # to [-1, 1] + + posterior = self.encode(imgs).latent_dist + + latents = posterior.sample() * self.scaling_factor + + return latents + + +class StableDiffusion(nn.Module): + def __init__(self, device, fp16, vram_O, sd_version='2.1', hf_key=None, t_range=[0.02, 0.98]): + super().__init__() + + self.device = device + self.sd_version = sd_version + + print(f'[INFO] loading stable diffusion...') + + if hf_key is not None: + print(f'[INFO] using hugging face custom model key: {hf_key}') + model_key = hf_key + elif self.sd_version == '2.1': + model_key = "stabilityai/stable-diffusion-2-1-base" + elif self.sd_version == '2.0': + model_key = "stabilityai/stable-diffusion-2-base" + elif self.sd_version == '1.5': + model_key = "runwayml/stable-diffusion-v1-5" + else: + raise ValueError(f'Stable-diffusion version {self.sd_version} not supported.') + + self.precision_t = torch.float16 if fp16 else torch.float32 + + # Create model + pipe = StableDiffusionPipeline.from_pretrained(model_key, torch_dtype=self.precision_t) + + if vram_O: + pipe.enable_sequential_cpu_offload() + pipe.enable_vae_slicing() + pipe.unet.to(memory_format=torch.channels_last) + pipe.enable_attention_slicing(1) + # pipe.enable_model_cpu_offload() + else: + pipe.to(device) + + self.taesd = Taesd(device, fp16) + self.vae = AutoencoderKL.from_pretrained('./pretrained/vae-ft-mse-840000-ema-pruned', torch_dtype=self.precision_t).to(self.device) + + + + self.tokenizer = pipe.tokenizer + self.text_encoder = pipe.text_encoder + self.unet = pipe.unet + + self.scheduler = DDIMScheduler.from_pretrained(model_key, subfolder="scheduler", torch_dtype=self.precision_t) + + del pipe + + self.num_train_timesteps = self.scheduler.config.num_train_timesteps + self.min_step = int(self.num_train_timesteps * t_range[0]) + self.max_step = int(self.num_train_timesteps * t_range[1]) + self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience + + print(f'[INFO] loaded stable diffusion!') + + @torch.no_grad() + def get_text_embeds(self, prompt): + # prompt: [str] + + inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt') + embeddings = self.text_encoder(inputs.input_ids.to(self.device))[0] + + return embeddings + + + def train_step(self, text_embeddings, pred_rgb, guidance_scale=100, as_latent=False, grad_scale=1, + save_guidance_path:Path=None): + + + if as_latent: + latents = F.interpolate(pred_rgb, (64, 64), mode='bilinear', align_corners=False) * 2 - 1 + + # feature_image + (1 - weights_samples) * bcg_image + else: + # interp to 512x512 to be fed into vae. + pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False) + # encode image into latents with vae, requires grad! + latents = self.encode_imgs(pred_rgb_512) + + # timestep ~ U(0.02, 0.98) to avoid very high/low noise level + t = torch.randint(self.min_step, self.max_step + 1, (latents.shape[0],), dtype=torch.long, device=self.device) + + # predict the noise residual with unet, NO grad! + with torch.no_grad(): + # add noise + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + # pred noise + latent_model_input = torch.cat([latents_noisy] * 2) + tt = torch.cat([t] * 2) + noise_pred = self.unet(latent_model_input, tt, encoder_hidden_states=text_embeddings).sample + + # perform guidance (high scale from paper!) + noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_pos - noise_pred_uncond) + + + # import kiui + # latents_tmp = torch.randn((1, 4, 64, 64), device=self.device) + # latents_tmp = latents_tmp.detach() + # kiui.lo(latents_tmp) + # self.scheduler.set_timesteps(30) + # for i, t in enumerate(self.scheduler.timesteps): + # latent_model_input = torch.cat([latents_tmp] * 3) + # noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)['sample'] + # noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + # noise_pred = noise_pred_uncond + 10 * (noise_pred_pos - noise_pred_uncond) + # latents_tmp = self.scheduler.step(noise_pred, t, latents_tmp)['prev_sample'] + # imgs = self.decode_latents(latents_tmp) + # kiui.vis.plot_image(imgs) + + # w(t), sigma_t^2 + w = (1 - self.alphas[t]) + grad = grad_scale * w[:, None, None, None] * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + if save_guidance_path: + with torch.no_grad(): + if as_latent: + pred_rgb_512 = self.decode_latents(latents) + + # visualize predicted denoised image + # The following block of code is equivalent to `predict_start_from_noise`... + # see zero123_utils.py's version for a simpler implementation. + alphas = self.scheduler.alphas.to(latents) + total_timesteps = self.max_step - self.min_step + 1 + index = total_timesteps - t.to(latents.device) - 1 + b = len(noise_pred) + a_t = alphas[index].reshape(b,1,1,1).to(self.device) + sqrt_one_minus_alphas = torch.sqrt(1 - alphas) + sqrt_one_minus_at = sqrt_one_minus_alphas[index].reshape((b,1,1,1)).to(self.device) + pred_x0 = (latents_noisy - sqrt_one_minus_at * noise_pred) / a_t.sqrt() # current prediction for x_0 + result_hopefully_less_noisy_image = self.decode_latents(pred_x0.to(latents.type(self.precision_t))) + + # visualize noisier image + result_noisier_image = self.decode_latents(latents_noisy.to(pred_x0).type(self.precision_t)) + + # TODO: also denoise all-the-way + + # all 3 input images are [1, 3, H, W], e.g. [1, 3, 512, 512] + #print(F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False).shape, pred_rgb_512.shape) + viz_images = torch.cat([pred_rgb_512, result_noisier_image, result_hopefully_less_noisy_image],dim=0) + save_image(viz_images, save_guidance_path) + + targets = (latents - grad).detach() + loss = 0.5 * F.mse_loss(latents.float(), targets, reduction='sum') / latents.shape[0] + + return loss + + + def train_step_perpneg(self, text_embeddings, weights, pred_rgb, guidance_scale=100, as_latent=False, grad_scale=1, + save_guidance_path:Path=None): + + + B = pred_rgb.shape[0] + K = (text_embeddings.shape[0] // B) - 1 # maximum number of prompts + + if as_latent: + latents = F.interpolate(pred_rgb, (64, 64), mode='bilinear', align_corners=False) * 2 - 1 + else: + # interp to 512x512 to be fed into vae. + pred_rgb_512 = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False) + # encode image into latents with vae, requires grad! + latents = self.encode_imgs(pred_rgb_512) + + # timestep ~ U(0.02, 0.98) to avoid very high/low noise level + t = torch.randint(self.min_step, self.max_step + 1, (latents.shape[0],), dtype=torch.long, device=self.device) + + # predict the noise residual with unet, NO grad! + with torch.no_grad(): + # add noise + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + # pred noise + latent_model_input = torch.cat([latents_noisy] * (1 + K)) + tt = torch.cat([t] * (1 + K)) + unet_output = self.unet(latent_model_input, tt, encoder_hidden_states=text_embeddings).sample + + # perform guidance (high scale from paper!) + noise_pred_uncond, noise_pred_text = unet_output[:B], unet_output[B:] + delta_noise_preds = noise_pred_text - noise_pred_uncond.repeat(K, 1, 1, 1) + noise_pred = noise_pred_uncond + guidance_scale * weighted_perpendicular_aggregator(delta_noise_preds, weights, B) + + # import kiui + # latents_tmp = torch.randn((1, 4, 64, 64), device=self.device) + # latents_tmp = latents_tmp.detach() + # kiui.lo(latents_tmp) + # self.scheduler.set_timesteps(30) + # for i, t in enumerate(self.scheduler.timesteps): + # latent_model_input = torch.cat([latents_tmp] * 3) + # noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)['sample'] + # noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + # noise_pred = noise_pred_uncond + 10 * (noise_pred_pos - noise_pred_uncond) + # latents_tmp = self.scheduler.step(noise_pred, t, latents_tmp)['prev_sample'] + # imgs = self.decode_latents(latents_tmp) + # kiui.vis.plot_image(imgs) + + # w(t), sigma_t^2 + w = (1 - self.alphas[t]) + grad = grad_scale * w[:, None, None, None] * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + if save_guidance_path: + with torch.no_grad(): + if as_latent: + pred_rgb_512 = self.decode_latents(latents) + + # visualize predicted denoised image + # The following block of code is equivalent to `predict_start_from_noise`... + # see zero123_utils.py's version for a simpler implementation. + alphas = self.scheduler.alphas.to(latents) + total_timesteps = self.max_step - self.min_step + 1 + index = total_timesteps - t.to(latents.device) - 1 + b = len(noise_pred) + a_t = alphas[index].reshape(b,1,1,1).to(self.device) + sqrt_one_minus_alphas = torch.sqrt(1 - alphas) + sqrt_one_minus_at = sqrt_one_minus_alphas[index].reshape((b,1,1,1)).to(self.device) + pred_x0 = (latents_noisy - sqrt_one_minus_at * noise_pred) / a_t.sqrt() # current prediction for x_0 + result_hopefully_less_noisy_image = self.decode_latents(pred_x0.to(latents.type(self.precision_t))) + + # visualize noisier image + result_noisier_image = self.decode_latents(latents_noisy.to(pred_x0).type(self.precision_t)) + + + + # all 3 input images are [1, 3, H, W], e.g. [1, 3, 512, 512] + viz_images = torch.cat([pred_rgb_512, result_noisier_image, result_hopefully_less_noisy_image],dim=0) + save_image(viz_images, save_guidance_path) + + targets = (latents - grad).detach() + loss = 0.5 * F.mse_loss(latents.float(), targets, reduction='sum') / latents.shape[0] + + return loss + + + @torch.no_grad() + def produce_latents(self, text_embeddings, height=512, width=512, num_inference_steps=50, guidance_scale=7.5, latents=None): + + if latents is None: + latents = torch.randn((text_embeddings.shape[0] // 2, self.unet.in_channels, height // 8, width // 8), device=self.device) + + self.scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(self.scheduler.timesteps): + # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. + latent_model_input = torch.cat([latents] * 2) + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)['sample'] + + # perform guidance + noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents)['prev_sample'] + + return latents + + def decode_latents(self, latents): + + latents = 1 / self.vae.scaling_factor * latents + + imgs = self.vae.decode(latents).sample + + + + + imgs = (imgs / 2 + 0.5).clamp(0, 1) + + return imgs + + def encode_imgs(self, imgs): + # imgs: [B, 3, H, W] + + imgs = 2 * imgs - 1 + + posterior = self.vae.encode(imgs).latent_dist + + latents = posterior.sample() * self.vae.scaling_factor + + return latents + + def prompt_to_img(self, prompts, negative_prompts='', height=512, width=512, num_inference_steps=50, guidance_scale=7.5, latents=None): + + if isinstance(prompts, str): + prompts = [prompts] + + if isinstance(negative_prompts, str): + negative_prompts = [negative_prompts] + + # Prompts -> text embeds + pos_embeds = self.get_text_embeds(prompts) # [1, 77, 768] + neg_embeds = self.get_text_embeds(negative_prompts) + text_embeds = torch.cat([neg_embeds, pos_embeds], dim=0) # [2, 77, 768] + + # Text embeds -> img latents + latents = self.produce_latents(text_embeds, height=height, width=width, latents=latents, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale) # [1, 4, 64, 64] + + # Img latents -> imgs + imgs = self.decode_latents(latents) # [1, 3, 512, 512] + + + imgs2 = self.taesd.decode_latents(latents) + + imgs = torch.cat([imgs,imgs2],dim=2) + # Img to Numpy + imgs = imgs.detach().cpu().permute(0, 2, 3, 1).numpy() + imgs = (imgs * 255).round().astype('uint8') + + + + + + return imgs + + +if __name__ == '__main__': + + import argparse + import matplotlib.pyplot as plt + + parser = argparse.ArgumentParser() + parser.add_argument('prompt', type=str) + parser.add_argument('--negative', default='', type=str) + parser.add_argument('--sd_version', type=str, default='2.1', choices=['1.5', '2.0', '2.1'], help="stable diffusion version") + parser.add_argument('--hf_key', type=str, default=None, help="hugging face Stable diffusion model key") + parser.add_argument('--use_tiny_vae', action='store_true', help="use tiny vae") + parser.add_argument('--fp16', action='store_true', help="use float16 for training") + parser.add_argument('--vram_O', action='store_true', help="optimization for low VRAM usage") + parser.add_argument('-H', type=int, default=512) + parser.add_argument('-W', type=int, default=512) + parser.add_argument('--seed', type=int, default=2) + parser.add_argument('--steps', type=int, default=50) + opt = parser.parse_args() + + seed_everything(opt.seed) + + device = torch.device('cuda') + + sd = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key) + + imgs = sd.prompt_to_img(opt.prompt, opt.negative, opt.H, opt.W, opt.steps) + + # visualize image + plt.imshow(imgs[0]) + plt.show() + + +# python guidance/test_taesd.py "upper body photo of caucasian man in black clothes, night city street, bokeh" --hf_key pretrained/SG161222Realistic_Vision_V5.1_noVAE -H 512 -W 512 --seed 42 --use_tiny_vae + diff --git a/stable-dreamfusion-3DPortrait/guidance/zero123_utils.py b/stable-dreamfusion-3DPortrait/guidance/zero123_utils.py new file mode 100644 index 0000000..cc41161 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/guidance/zero123_utils.py @@ -0,0 +1,320 @@ +import math +import numpy as np +from omegaconf import OmegaConf +from pathlib import Path + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.cuda.amp import custom_bwd, custom_fwd +from torchvision.utils import save_image + +from diffusers import DDIMScheduler + +import sys +from os import path +sys.path.append(path.dirname(path.dirname(path.abspath(__file__)))) + +from ldm.util import instantiate_from_config + + +# load model +def load_model_from_config(config, ckpt, device, vram_O=False, verbose=False): + + pl_sd = torch.load(ckpt, map_location='cpu') + + if 'global_step' in pl_sd and verbose: + print(f'[INFO] Global Step: {pl_sd["global_step"]}') + + sd = pl_sd['state_dict'] + + model = instantiate_from_config(config.model) + m, u = model.load_state_dict(sd, strict=False) + + if len(m) > 0 and verbose: + print('[INFO] missing keys: \n', m) + if len(u) > 0 and verbose: + print('[INFO] unexpected keys: \n', u) + + # manually load ema and delete it to save GPU memory + if model.use_ema: + if verbose: + print('[INFO] loading EMA...') + model.model_ema.copy_to(model.model) + del model.model_ema + + if vram_O: + # we don't need decoder + del model.first_stage_model.decoder + + torch.cuda.empty_cache() + + model.eval().to(device) + + return model + +class Zero123(nn.Module): + def __init__(self, device, fp16, + config='./pretrained/zero123/sd-objaverse-finetune-c_concat-256.yaml', + ckpt='./pretrained/zero123/zero123-xl.ckpt', vram_O=False, t_range=[0.02, 0.98], opt=None): + super().__init__() + + self.device = device + self.fp16 = fp16 + self.vram_O = vram_O + self.t_range = t_range + self.opt = opt + + self.config = OmegaConf.load(config) + # TODO: seems it cannot load into fp16... + self.model = load_model_from_config(self.config, ckpt, device=self.device, vram_O=vram_O) + + # timesteps: use diffuser for convenience... hope it's alright. + self.num_train_timesteps = self.config.model.params.timesteps + + self.scheduler = DDIMScheduler( + self.num_train_timesteps, + self.config.model.params.linear_start, + self.config.model.params.linear_end, + beta_schedule='scaled_linear', + clip_sample=False, + set_alpha_to_one=False, + steps_offset=1, + ) + + self.min_step = int(self.num_train_timesteps * t_range[0]) + self.max_step = int(self.num_train_timesteps * t_range[1]) + self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience + + @torch.no_grad() + def get_img_embeds(self, x): + # x: image tensor [B, 3, 256, 256] in [0, 1] + x = x * 2 - 1 + c = [self.model.get_learned_conditioning(xx.unsqueeze(0)) for xx in x] #.tile(n_samples, 1, 1) + v = [self.model.encode_first_stage(xx.unsqueeze(0)).mode() for xx in x] + return c, v + + def angle_between(self, sph_v1, sph_v2): + def sph2cart(sv): + r, theta, phi = sv[0], sv[1], sv[2] + return torch.tensor([r * torch.sin(theta) * torch.cos(phi), r * torch.sin(theta) * torch.sin(phi), r * torch.cos(theta)]) + def unit_vector(v): + return v / torch.linalg.norm(v) + def angle_between_2_sph(sv1, sv2): + v1, v2 = sph2cart(sv1), sph2cart(sv2) + v1_u, v2_u = unit_vector(v1), unit_vector(v2) + return torch.arccos(torch.clip(torch.dot(v1_u, v2_u), -1.0, 1.0)) + angles = torch.empty(len(sph_v1), len(sph_v2)) + for i, sv1 in enumerate(sph_v1): + for j, sv2 in enumerate(sph_v2): + angles[i][j] = angle_between_2_sph(sv1, sv2) + return angles + + def train_step(self, embeddings, pred_rgb, polar, azimuth, radius, guidance_scale=3, as_latent=False, grad_scale=1, save_guidance_path:Path=None): + # pred_rgb: tensor [1, 3, H, W] in [0, 1] + + # adjust SDS scale based on how far the novel view is from the known view + ref_radii = embeddings['ref_radii'] + ref_polars = embeddings['ref_polars'] + ref_azimuths = embeddings['ref_azimuths'] + v1 = torch.stack([radius + ref_radii[0], torch.deg2rad(polar + ref_polars[0]), torch.deg2rad(azimuth + ref_azimuths[0])], dim=-1) # polar,azimuth,radius are all actually delta wrt default + v2 = torch.stack([torch.tensor(ref_radii), torch.deg2rad(torch.tensor(ref_polars)), torch.deg2rad(torch.tensor(ref_azimuths))], dim=-1) + angles = torch.rad2deg(self.angle_between(v1, v2)).to(self.device) + if self.opt.zero123_grad_scale == 'angle': + grad_scale = (angles.min(dim=1)[0] / (180/len(ref_azimuths))) * grad_scale # rethink 180/len(ref_azimuths) # claforte: try inverting grad_scale or just fixing it to 1.0 + elif self.opt.zero123_grad_scale == 'None': + grad_scale = 1.0 # claforte: I think this might converge faster...? + else: + assert False, f'Unrecognized `zero123_grad_scale`: {self.opt.zero123_grad_scale}' + + if as_latent: + latents = F.interpolate(pred_rgb, (32, 32), mode='bilinear', align_corners=False) * 2 - 1 + else: + pred_rgb_256 = F.interpolate(pred_rgb, (256, 256), mode='bilinear', align_corners=False) + latents = self.encode_imgs(pred_rgb_256) + + t = torch.randint(self.min_step, self.max_step + 1, (latents.shape[0],), dtype=torch.long, device=self.device) + + # Set weights acc to closeness in angle + if len(ref_azimuths) > 1: + inv_angles = 1/angles + inv_angles[inv_angles > 100] = 100 + inv_angles /= inv_angles.max(dim=-1, keepdim=True)[0] + inv_angles[inv_angles < 0.1] = 0 + else: + inv_angles = torch.tensor([1.]).to(self.device) + + # Multiply closeness-weight by user-given weights + zero123_ws = torch.tensor(embeddings['zero123_ws'])[None, :].to(self.device) * inv_angles + zero123_ws /= zero123_ws.max(dim=-1, keepdim=True)[0] + zero123_ws[zero123_ws < 0.1] = 0 + + with torch.no_grad(): + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + + x_in = torch.cat([latents_noisy] * 2) + t_in = torch.cat([t] * 2) + + noise_preds = [] + # Loop through each ref image + for (zero123_w, c_crossattn, c_concat, ref_polar, ref_azimuth, ref_radius) in zip(zero123_ws.T, + embeddings['c_crossattn'], embeddings['c_concat'], + ref_polars, ref_azimuths, ref_radii): + # polar,azimuth,radius are all actually delta wrt default + p = polar + ref_polars[0] - ref_polar + a = azimuth + ref_azimuths[0] - ref_azimuth + a[a > 180] -= 360 # range in [-180, 180] + r = radius + ref_radii[0] - ref_radius + # T = torch.tensor([math.radians(p), math.sin(math.radians(-a)), math.cos(math.radians(a)), r]) + # T = T[None, None, :].to(self.device) + T = torch.stack([torch.deg2rad(p), torch.sin(torch.deg2rad(-a)), torch.cos(torch.deg2rad(a)), r], dim=-1)[:, None, :] + cond = {} + clip_emb = self.model.cc_projection(torch.cat([c_crossattn.repeat(len(T), 1, 1), T], dim=-1)) + cond['c_crossattn'] = [torch.cat([torch.zeros_like(clip_emb).to(self.device), clip_emb], dim=0)] + cond['c_concat'] = [torch.cat([torch.zeros_like(c_concat).repeat(len(T), 1, 1, 1).to(self.device), c_concat.repeat(len(T), 1, 1, 1)], dim=0)] + noise_pred = self.model.apply_model(x_in, t_in, cond) + noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond) + noise_preds.append(zero123_w[:, None, None, None] * noise_pred) + + noise_pred = torch.stack(noise_preds).sum(dim=0) / zero123_ws.sum(dim=-1)[:, None, None, None] + + w = (1 - self.alphas[t]) + grad = (grad_scale * w)[:, None, None, None] * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + # import kiui + # if not as_latent: + # kiui.vis.plot_image(pred_rgb_256) + # kiui.vis.plot_matrix(latents) + # kiui.vis.plot_matrix(grad) + + # import kiui + # latents = torch.randn((1, 4, 32, 32), device=self.device) + # kiui.lo(latents) + # self.scheduler.set_timesteps(30) + # with torch.no_grad(): + # for i, t in enumerate(self.scheduler.timesteps): + # x_in = torch.cat([latents] * 2) + # t_in = torch.cat([t.view(1)] * 2).to(self.device) + + # noise_pred = self.model.apply_model(x_in, t_in, cond) + # noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) + # noise_pred = noise_pred_uncond + 3 * (noise_pred_cond - noise_pred_uncond) + + # latents = self.scheduler.step(noise_pred, t, latents)['prev_sample'] + # imgs = self.decode_latents(latents) + # print(polar, azimuth, radius) + # kiui.vis.plot_image(pred_rgb_256, imgs) + + if save_guidance_path: + with torch.no_grad(): + if as_latent: + pred_rgb_256 = self.decode_latents(latents) # claforte: test! + + # visualize predicted denoised image + result_hopefully_less_noisy_image = self.decode_latents(self.model.predict_start_from_noise(latents_noisy, t, noise_pred)) + + # visualize noisier image + result_noisier_image = self.decode_latents(latents_noisy) + + # TODO: also denoise all-the-way + + # all 3 input images are [1, 3, H, W], e.g. [1, 3, 512, 512] + viz_images = torch.cat([pred_rgb_256, result_noisier_image, result_hopefully_less_noisy_image],dim=-1) + save_image(viz_images, save_guidance_path) + + targets = (latents - grad).detach() + loss = 0.5 * F.mse_loss(latents.float(), targets, reduction='sum') / latents.shape[0] + + return loss + + # verification + @torch.no_grad() + def __call__(self, + image, # image tensor [1, 3, H, W] in [0, 1] + polar=0, azimuth=0, radius=0, # new view params + scale=3, ddim_steps=50, ddim_eta=1, h=256, w=256, # diffusion params + c_crossattn=None, c_concat=None, post_process=True, + ): + + if c_crossattn is None: + embeddings = self.get_img_embeds(image) + + T = torch.tensor([math.radians(polar), math.sin(math.radians(azimuth)), math.cos(math.radians(azimuth)), radius]) + T = T[None, None, :].to(self.device) + + cond = {} + clip_emb = self.model.cc_projection(torch.cat([embeddings['c_crossattn'] if c_crossattn is None else c_crossattn, T], dim=-1)) + cond['c_crossattn'] = [torch.cat([torch.zeros_like(clip_emb).to(self.device), clip_emb], dim=0)] + cond['c_concat'] = [torch.cat([torch.zeros_like(embeddings['c_concat']).to(self.device), embeddings['c_concat']], dim=0)] if c_concat is None else [torch.cat([torch.zeros_like(c_concat).to(self.device), c_concat], dim=0)] + + # produce latents loop + latents = torch.randn((1, 4, h // 8, w // 8), device=self.device) + self.scheduler.set_timesteps(ddim_steps) + + for i, t in enumerate(self.scheduler.timesteps): + x_in = torch.cat([latents] * 2) + t_in = torch.cat([t.view(1)] * 2).to(self.device) + + noise_pred = self.model.apply_model(x_in, t_in, cond) + noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + scale * (noise_pred_cond - noise_pred_uncond) + + latents = self.scheduler.step(noise_pred, t, latents, eta=ddim_eta)['prev_sample'] + + imgs = self.decode_latents(latents) + imgs = imgs.cpu().numpy().transpose(0, 2, 3, 1) if post_process else imgs + + return imgs + + def decode_latents(self, latents): + # zs: [B, 4, 32, 32] Latent space image + # with self.model.ema_scope(): + imgs = self.model.decode_first_stage(latents) + imgs = (imgs / 2 + 0.5).clamp(0, 1) + + return imgs # [B, 3, 256, 256] RGB space image + + def encode_imgs(self, imgs): + # imgs: [B, 3, 256, 256] RGB space image + # with self.model.ema_scope(): + imgs = imgs * 2 - 1 + latents = torch.cat([self.model.get_first_stage_encoding(self.model.encode_first_stage(img.unsqueeze(0))) for img in imgs], dim=0) + return latents # [B, 4, 32, 32] Latent space image + + +if __name__ == '__main__': + import cv2 + import argparse + import numpy as np + import matplotlib.pyplot as plt + + parser = argparse.ArgumentParser() + + parser.add_argument('input', type=str) + parser.add_argument('--fp16', action='store_true', help="use float16 for training") # no use now, can only run in fp32 + + parser.add_argument('--polar', type=float, default=0, help='delta polar angle in [-90, 90]') + parser.add_argument('--azimuth', type=float, default=0, help='delta azimuth angle in [-180, 180]') + parser.add_argument('--radius', type=float, default=0, help='delta camera radius multiplier in [-0.5, 0.5]') + + opt = parser.parse_args() + + device = torch.device('cuda') + + print(f'[INFO] loading image from {opt.input} ...') + image = cv2.imread(opt.input, cv2.IMREAD_UNCHANGED) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + image = cv2.resize(image, (256, 256), interpolation=cv2.INTER_AREA) + image = image.astype(np.float32) / 255.0 + image = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(0).contiguous().to(device) + + print(f'[INFO] loading model ...') + zero123 = Zero123(device, opt.fp16, opt=opt) + + print(f'[INFO] running model ...') + outputs = zero123(image, polar=opt.polar, azimuth=opt.azimuth, radius=opt.radius) + plt.imshow(outputs[0]) + plt.show() \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/ldm/extras.py b/stable-dreamfusion-3DPortrait/ldm/extras.py new file mode 100644 index 0000000..62e654b --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/extras.py @@ -0,0 +1,77 @@ +from pathlib import Path +from omegaconf import OmegaConf +import torch +from ldm.util import instantiate_from_config +import logging +from contextlib import contextmanager + +from contextlib import contextmanager +import logging + +@contextmanager +def all_logging_disabled(highest_level=logging.CRITICAL): + """ + A context manager that will prevent any logging messages + triggered during the body from being processed. + + :param highest_level: the maximum logging level in use. + This would only need to be changed if a custom level greater than CRITICAL + is defined. + + https://gist.github.com/simon-weber/7853144 + """ + # two kind-of hacks here: + # * can't get the highest logging level in effect => delegate to the user + # * can't get the current module-level override => use an undocumented + # (but non-private!) interface + + previous_level = logging.root.manager.disable + + logging.disable(highest_level) + + try: + yield + finally: + logging.disable(previous_level) + +def load_training_dir(train_dir, device, epoch="last"): + """Load a checkpoint and config from training directory""" + train_dir = Path(train_dir) + ckpt = list(train_dir.rglob(f"*{epoch}.ckpt")) + assert len(ckpt) == 1, f"found {len(ckpt)} matching ckpt files" + config = list(train_dir.rglob(f"*-project.yaml")) + assert len(ckpt) > 0, f"didn't find any config in {train_dir}" + if len(config) > 1: + print(f"found {len(config)} matching config files") + config = sorted(config)[-1] + print(f"selecting {config}") + else: + config = config[0] + + + config = OmegaConf.load(config) + return load_model_from_config(config, ckpt[0], device) + +def load_model_from_config(config, ckpt, device="cpu", verbose=False): + """Loads a model from config and a ckpt + if config is a path will use omegaconf to load + """ + if isinstance(config, (str, Path)): + config = OmegaConf.load(config) + + with all_logging_disabled(): + print(f"Loading model from {ckpt}") + pl_sd = torch.load(ckpt, map_location="cpu") + global_step = pl_sd["global_step"] + sd = pl_sd["state_dict"] + model = instantiate_from_config(config.model) + m, u = model.load_state_dict(sd, strict=False) + if len(m) > 0 and verbose: + print("missing keys:") + print(m) + if len(u) > 0 and verbose: + print("unexpected keys:") + model.to(device) + model.eval() + model.cond_stage_model.device = device + return model \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/ldm/guidance.py b/stable-dreamfusion-3DPortrait/ldm/guidance.py new file mode 100644 index 0000000..53d1a2a --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/guidance.py @@ -0,0 +1,96 @@ +from typing import List, Tuple +from scipy import interpolate +import numpy as np +import torch +import matplotlib.pyplot as plt +from IPython.display import clear_output +import abc + + +class GuideModel(torch.nn.Module, abc.ABC): + def __init__(self) -> None: + super().__init__() + + @abc.abstractmethod + def preprocess(self, x_img): + pass + + @abc.abstractmethod + def compute_loss(self, inp): + pass + + +class Guider(torch.nn.Module): + def __init__(self, sampler, guide_model, scale=1.0, verbose=False): + """Apply classifier guidance + + Specify a guidance scale as either a scalar + Or a schedule as a list of tuples t = 0->1 and scale, e.g. + [(0, 10), (0.5, 20), (1, 50)] + """ + super().__init__() + self.sampler = sampler + self.index = 0 + self.show = verbose + self.guide_model = guide_model + self.history = [] + + if isinstance(scale, (Tuple, List)): + times = np.array([x[0] for x in scale]) + values = np.array([x[1] for x in scale]) + self.scale_schedule = {"times": times, "values": values} + else: + self.scale_schedule = float(scale) + + self.ddim_timesteps = sampler.ddim_timesteps + self.ddpm_num_timesteps = sampler.ddpm_num_timesteps + + + def get_scales(self): + if isinstance(self.scale_schedule, float): + return len(self.ddim_timesteps)*[self.scale_schedule] + + interpolater = interpolate.interp1d(self.scale_schedule["times"], self.scale_schedule["values"]) + fractional_steps = np.array(self.ddim_timesteps)/self.ddpm_num_timesteps + return interpolater(fractional_steps) + + def modify_score(self, model, e_t, x, t, c): + + # TODO look up index by t + scale = self.get_scales()[self.index] + + if (scale == 0): + return e_t + + sqrt_1ma = self.sampler.ddim_sqrt_one_minus_alphas[self.index].to(x.device) + with torch.enable_grad(): + x_in = x.detach().requires_grad_(True) + pred_x0 = model.predict_start_from_noise(x_in, t=t, noise=e_t) + x_img = model.first_stage_model.decode((1/0.18215)*pred_x0) + + inp = self.guide_model.preprocess(x_img) + loss = self.guide_model.compute_loss(inp) + grads = torch.autograd.grad(loss.sum(), x_in)[0] + correction = grads * scale + + if self.show: + clear_output(wait=True) + print(loss.item(), scale, correction.abs().max().item(), e_t.abs().max().item()) + self.history.append([loss.item(), scale, correction.min().item(), correction.max().item()]) + plt.imshow((inp[0].detach().permute(1,2,0).clamp(-1,1).cpu()+1)/2) + plt.axis('off') + plt.show() + plt.imshow(correction[0][0].detach().cpu()) + plt.axis('off') + plt.show() + + + e_t_mod = e_t - sqrt_1ma*correction + if self.show: + fig, axs = plt.subplots(1, 3) + axs[0].imshow(e_t[0][0].detach().cpu(), vmin=-2, vmax=+2) + axs[1].imshow(e_t_mod[0][0].detach().cpu(), vmin=-2, vmax=+2) + axs[2].imshow(correction[0][0].detach().cpu(), vmin=-2, vmax=+2) + plt.show() + self.index += 1 + return e_t_mod \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/ldm/lr_scheduler.py b/stable-dreamfusion-3DPortrait/ldm/lr_scheduler.py new file mode 100644 index 0000000..be39da9 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/lr_scheduler.py @@ -0,0 +1,98 @@ +import numpy as np + + +class LambdaWarmUpCosineScheduler: + """ + note: use with a base_lr of 1.0 + """ + def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): + self.lr_warm_up_steps = warm_up_steps + self.lr_start = lr_start + self.lr_min = lr_min + self.lr_max = lr_max + self.lr_max_decay_steps = max_decay_steps + self.last_lr = 0. + self.verbosity_interval = verbosity_interval + + def schedule(self, n, **kwargs): + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") + if n < self.lr_warm_up_steps: + lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start + self.last_lr = lr + return lr + else: + t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) + t = min(t, 1.0) + lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( + 1 + np.cos(t * np.pi)) + self.last_lr = lr + return lr + + def __call__(self, n, **kwargs): + return self.schedule(n,**kwargs) + + +class LambdaWarmUpCosineScheduler2: + """ + supports repeated iterations, configurable via lists + note: use with a base_lr of 1.0. + """ + def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): + assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) + self.lr_warm_up_steps = warm_up_steps + self.f_start = f_start + self.f_min = f_min + self.f_max = f_max + self.cycle_lengths = cycle_lengths + self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) + self.last_f = 0. + self.verbosity_interval = verbosity_interval + + def find_in_interval(self, n): + interval = 0 + for cl in self.cum_cycles[1:]: + if n <= cl: + return interval + interval += 1 + + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}") + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] + self.last_f = f + return f + else: + t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]) + t = min(t, 1.0) + f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( + 1 + np.cos(t * np.pi)) + self.last_f = f + return f + + def __call__(self, n, **kwargs): + return self.schedule(n, **kwargs) + + +class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): + + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}") + + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] + self.last_f = f + return f + else: + f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle]) + self.last_f = f + return f + diff --git a/stable-dreamfusion-3DPortrait/ldm/models/autoencoder.py b/stable-dreamfusion-3DPortrait/ldm/models/autoencoder.py new file mode 100644 index 0000000..6a9c4f4 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/models/autoencoder.py @@ -0,0 +1,443 @@ +import torch +import pytorch_lightning as pl +import torch.nn.functional as F +from contextlib import contextmanager + +from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer + +from ldm.modules.diffusionmodules.model import Encoder, Decoder +from ldm.modules.distributions.distributions import DiagonalGaussianDistribution + +from ldm.util import instantiate_from_config + + +class VQModel(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + n_embed, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + batch_resize_range=None, + scheduler_config=None, + lr_g_factor=1.0, + remap=None, + sane_index_shape=False, # tell vector quantizer to return indices as bhw + use_ema=False + ): + super().__init__() + self.embed_dim = embed_dim + self.n_embed = n_embed + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, + remap=remap, + sane_index_shape=sane_index_shape) + self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + self.batch_resize_range = batch_resize_range + if self.batch_resize_range is not None: + print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") + + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + self.scheduler_config = scheduler_config + self.lr_g_factor = lr_g_factor + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.parameters()) + self.model_ema.copy_to(self) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + print(f"Unexpected Keys: {unexpected}") + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self) + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + quant, emb_loss, info = self.quantize(h) + return quant, emb_loss, info + + def encode_to_prequant(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, quant): + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + def decode_code(self, code_b): + quant_b = self.quantize.embed_code(code_b) + dec = self.decode(quant_b) + return dec + + def forward(self, input, return_pred_indices=False): + quant, diff, (_,_,ind) = self.encode(input) + dec = self.decode(quant) + if return_pred_indices: + return dec, diff, ind + return dec, diff + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + if self.batch_resize_range is not None: + lower_size = self.batch_resize_range[0] + upper_size = self.batch_resize_range[1] + if self.global_step <= 4: + # do the first few batches with max size to avoid later oom + new_resize = upper_size + else: + new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) + if new_resize != x.shape[2]: + x = F.interpolate(x, size=new_resize, mode="bicubic") + x = x.detach() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + # https://github.com/pytorch/pytorch/issues/37142 + # try not to fool the heuristics + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + + if optimizer_idx == 0: + # autoencode + aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train", + predicted_indices=ind) + + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return aeloss + + if optimizer_idx == 1: + # discriminator + discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return discloss + + def validation_step(self, batch, batch_idx): + log_dict = self._validation_step(batch, batch_idx) + with self.ema_scope(): + log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") + return log_dict + + def _validation_step(self, batch, batch_idx, suffix=""): + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + + discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] + self.log(f"val{suffix}/rec_loss", rec_loss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + self.log(f"val{suffix}/aeloss", aeloss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + if version.parse(pl.__version__) >= version.parse('1.4.0'): + del log_dict_ae[f"val{suffix}/rec_loss"] + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr_d = self.learning_rate + lr_g = self.lr_g_factor*self.learning_rate + print("lr_d", lr_d) + print("lr_g", lr_g) + opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ + list(self.decoder.parameters())+ + list(self.quantize.parameters())+ + list(self.quant_conv.parameters())+ + list(self.post_quant_conv.parameters()), + lr=lr_g, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr_d, betas=(0.5, 0.9)) + + if self.scheduler_config is not None: + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + { + 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + ] + return [opt_ae, opt_disc], scheduler + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if only_inputs: + log["inputs"] = x + return log + xrec, _ = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["inputs"] = x + log["reconstructions"] = xrec + if plot_ema: + with self.ema_scope(): + xrec_ema, _ = self(x) + if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) + log["reconstructions_ema"] = xrec_ema + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class VQModelInterface(VQModel): + def __init__(self, embed_dim, *args, **kwargs): + super().__init__(embed_dim=embed_dim, *args, **kwargs) + self.embed_dim = embed_dim + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, h, force_not_quantize=False): + # also go through quantization layer + if not force_not_quantize: + quant, emb_loss, info = self.quantize(h) + else: + quant = h + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + +class AutoencoderKL(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + ): + super().__init__() + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + assert ddconfig["double_z"] + self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + self.embed_dim = embed_dim + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + self.load_state_dict(sd, strict=False) + print(f"Restored from {path}") + + def encode(self, x): + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z): + z = self.post_quant_conv(z) + dec = self.decoder(z) + return dec + + def forward(self, input, sample_posterior=True): + posterior = self.encode(input) + if sample_posterior: + z = posterior.sample() + else: + z = posterior.mode() + dec = self.decode(z) + return dec, posterior + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + + if optimizer_idx == 0: + # train encoder+decoder+logvar + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return aeloss + + if optimizer_idx == 1: + # train the discriminator + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + + self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return discloss + + def validation_step(self, batch, batch_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, + last_layer=self.get_last_layer(), split="val") + + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, + last_layer=self.get_last_layer(), split="val") + + self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr = self.learning_rate + opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ + list(self.decoder.parameters())+ + list(self.quant_conv.parameters())+ + list(self.post_quant_conv.parameters()), + lr=lr, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr, betas=(0.5, 0.9)) + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + @torch.no_grad() + def log_images(self, batch, only_inputs=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if not only_inputs: + xrec, posterior = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["samples"] = self.decode(torch.randn_like(posterior.sample())) + log["reconstructions"] = xrec + log["inputs"] = x + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class IdentityFirstStage(torch.nn.Module): + def __init__(self, *args, vq_interface=False, **kwargs): + self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff + super().__init__() + + def encode(self, x, *args, **kwargs): + return x + + def decode(self, x, *args, **kwargs): + return x + + def quantize(self, x, *args, **kwargs): + if self.vq_interface: + return x, None, [None, None, None] + return x + + def forward(self, x, *args, **kwargs): + return x diff --git a/stable-dreamfusion-3DPortrait/ldm/models/diffusion/__init__.py b/stable-dreamfusion-3DPortrait/ldm/models/diffusion/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stable-dreamfusion-3DPortrait/ldm/models/diffusion/classifier.py b/stable-dreamfusion-3DPortrait/ldm/models/diffusion/classifier.py new file mode 100644 index 0000000..67e98b9 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/models/diffusion/classifier.py @@ -0,0 +1,267 @@ +import os +import torch +import pytorch_lightning as pl +from omegaconf import OmegaConf +from torch.nn import functional as F +from torch.optim import AdamW +from torch.optim.lr_scheduler import LambdaLR +from copy import deepcopy +from einops import rearrange +from glob import glob +from natsort import natsorted + +from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel +from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config + +__models__ = { + 'class_label': EncoderUNetModel, + 'segmentation': UNetModel +} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class NoisyLatentImageClassifier(pl.LightningModule): + + def __init__(self, + diffusion_path, + num_classes, + ckpt_path=None, + pool='attention', + label_key=None, + diffusion_ckpt_path=None, + scheduler_config=None, + weight_decay=1.e-2, + log_steps=10, + monitor='val/loss', + *args, + **kwargs): + super().__init__(*args, **kwargs) + self.num_classes = num_classes + # get latest config of diffusion model + diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1] + self.diffusion_config = OmegaConf.load(diffusion_config).model + self.diffusion_config.params.ckpt_path = diffusion_ckpt_path + self.load_diffusion() + + self.monitor = monitor + self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 + self.log_time_interval = self.diffusion_model.num_timesteps // log_steps + self.log_steps = log_steps + + self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \ + else self.diffusion_model.cond_stage_key + + assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params' + + if self.label_key not in __models__: + raise NotImplementedError() + + self.load_classifier(ckpt_path, pool) + + self.scheduler_config = scheduler_config + self.use_scheduler = self.scheduler_config is not None + self.weight_decay = weight_decay + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def load_diffusion(self): + model = instantiate_from_config(self.diffusion_config) + self.diffusion_model = model.eval() + self.diffusion_model.train = disabled_train + for param in self.diffusion_model.parameters(): + param.requires_grad = False + + def load_classifier(self, ckpt_path, pool): + model_config = deepcopy(self.diffusion_config.params.unet_config.params) + model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels + model_config.out_channels = self.num_classes + if self.label_key == 'class_label': + model_config.pool = pool + + self.model = __models__[self.label_key](**model_config) + if ckpt_path is not None: + print('#####################################################################') + print(f'load from ckpt "{ckpt_path}"') + print('#####################################################################') + self.init_from_ckpt(ckpt_path) + + @torch.no_grad() + def get_x_noisy(self, x, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x)) + continuous_sqrt_alpha_cumprod = None + if self.diffusion_model.use_continuous_noise: + continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) + # todo: make sure t+1 is correct here + + return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise, + continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod) + + def forward(self, x_noisy, t, *args, **kwargs): + return self.model(x_noisy, t) + + @torch.no_grad() + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + @torch.no_grad() + def get_conditioning(self, batch, k=None): + if k is None: + k = self.label_key + assert k is not None, 'Needs to provide label key' + + targets = batch[k].to(self.device) + + if self.label_key == 'segmentation': + targets = rearrange(targets, 'b h w c -> b c h w') + for down in range(self.numd): + h, w = targets.shape[-2:] + targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest') + + # targets = rearrange(targets,'b c h w -> b h w c') + + return targets + + def compute_top_k(self, logits, labels, k, reduction="mean"): + _, top_ks = torch.topk(logits, k, dim=1) + if reduction == "mean": + return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() + elif reduction == "none": + return (top_ks == labels[:, None]).float().sum(dim=-1) + + def on_train_epoch_start(self): + # save some memory + self.diffusion_model.model.to('cpu') + + @torch.no_grad() + def write_logs(self, loss, logits, targets): + log_prefix = 'train' if self.training else 'val' + log = {} + log[f"{log_prefix}/loss"] = loss.mean() + log[f"{log_prefix}/acc@1"] = self.compute_top_k( + logits, targets, k=1, reduction="mean" + ) + log[f"{log_prefix}/acc@5"] = self.compute_top_k( + logits, targets, k=5, reduction="mean" + ) + + self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True) + self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False) + self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True) + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) + + def shared_step(self, batch, t=None): + x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key) + targets = self.get_conditioning(batch) + if targets.dim() == 4: + targets = targets.argmax(dim=1) + if t is None: + t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long() + else: + t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() + x_noisy = self.get_x_noisy(x, t) + logits = self(x_noisy, t) + + loss = F.cross_entropy(logits, targets, reduction='none') + + self.write_logs(loss.detach(), logits.detach(), targets.detach()) + + loss = loss.mean() + return loss, logits, x_noisy, targets + + def training_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + return loss + + def reset_noise_accs(self): + self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in + range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)} + + def on_validation_start(self): + self.reset_noise_accs() + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + + for t in self.noisy_acc: + _, logits, _, targets = self.shared_step(batch, t) + self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean')) + self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean')) + + return loss + + def configure_optimizers(self): + optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) + + if self.use_scheduler: + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [optimizer], scheduler + + return optimizer + + @torch.no_grad() + def log_images(self, batch, N=8, *args, **kwargs): + log = dict() + x = self.get_input(batch, self.diffusion_model.first_stage_key) + log['inputs'] = x + + y = self.get_conditioning(batch) + + if self.label_key == 'class_label': + y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log['labels'] = y + + if ismap(y): + log['labels'] = self.diffusion_model.to_rgb(y) + + for step in range(self.log_steps): + current_time = step * self.log_time_interval + + _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) + + log[f'inputs@t{current_time}'] = x_noisy + + pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) + pred = rearrange(pred, 'b h w c -> b c h w') + + log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred) + + for key in log: + log[key] = log[key][:N] + + return log diff --git a/stable-dreamfusion-3DPortrait/ldm/models/diffusion/ddim.py b/stable-dreamfusion-3DPortrait/ldm/models/diffusion/ddim.py new file mode 100644 index 0000000..0683d16 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/models/diffusion/ddim.py @@ -0,0 +1,328 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial +from einops import rearrange + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor +from ldm.models.diffusion.sampling_util import renorm_thresholding, norm_thresholding, spatial_norm_thresholding + + +class DDIMSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def to(self, device): + """Same as to in torch module + Don't really underestand why this isn't a module in the first place""" + for k, v in self.__dict__.items(): + if isinstance(v, torch.Tensor): + new_v = getattr(self, k).to(device) + setattr(self, k, new_v) + + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + dynamic_threshold=None, + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + ctmp = conditioning[list(conditioning.keys())[0]] + while isinstance(ctmp, list): ctmp = ctmp[0] + cbs = ctmp.shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + # print(f'Data shape for DDIM sampling is {size}, eta {eta}') + + samples, intermediates = self.ddim_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold, + ) + return samples, intermediates + + @torch.no_grad() + def ddim_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, + t_start=-1): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + timesteps = timesteps[:t_start] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + # print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold) + img, pred_x0 = outs + if callback: + img = callback(i, img, pred_x0) + if img_callback: + img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, + dynamic_threshold=None): + b, *_, device = *x.shape, x.device + + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + if isinstance(c, dict): + assert isinstance(unconditional_conditioning, dict) + c_in = dict() + for k in c: + if isinstance(c[k], list): + c_in[k] = [torch.cat([ + unconditional_conditioning[k][i], + c[k][i]]) for i in range(len(c[k]))] + else: + c_in[k] = torch.cat([ + unconditional_conditioning[k], + c[k]]) + else: + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + + print(t, sqrt_one_minus_at, a_t) + + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + + if dynamic_threshold is not None: + pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) + + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + @torch.no_grad() + def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, + unconditional_guidance_scale=1.0, unconditional_conditioning=None): + num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] + + assert t_enc <= num_reference_steps + num_steps = t_enc + + if use_original_steps: + alphas_next = self.alphas_cumprod[:num_steps] + alphas = self.alphas_cumprod_prev[:num_steps] + else: + alphas_next = self.ddim_alphas[:num_steps] + alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) + + x_next = x0 + intermediates = [] + inter_steps = [] + for i in tqdm(range(num_steps), desc='Encoding Image'): + t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) + if unconditional_guidance_scale == 1.: + noise_pred = self.model.apply_model(x_next, t, c) + else: + assert unconditional_conditioning is not None + e_t_uncond, noise_pred = torch.chunk( + self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), + torch.cat((unconditional_conditioning, c))), 2) + noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) + + xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next + weighted_noise_pred = alphas_next[i].sqrt() * ( + (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred + x_next = xt_weighted + weighted_noise_pred + if return_intermediates and i % ( + num_steps // return_intermediates) == 0 and i < num_steps - 1: + intermediates.append(x_next) + inter_steps.append(i) + elif return_intermediates and i >= num_steps - 2: + intermediates.append(x_next) + inter_steps.append(i) + + out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} + if return_intermediates: + out.update({'intermediates': intermediates}) + return x_next, out + + @torch.no_grad() + def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): + # fast, but does not allow for exact reconstruction + # t serves as an index to gather the correct alphas + if use_original_steps: + sqrt_alphas_cumprod = self.sqrt_alphas_cumprod + sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod + else: + sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) + sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas + + if noise is None: + noise = torch.randn_like(x0) + return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) + + @torch.no_grad() + def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, + use_original_steps=False): + + timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps + timesteps = timesteps[:t_start] + + time_range = np.flip(timesteps) + total_steps = timesteps.shape[0] + # print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='Decoding image', total=total_steps) + x_dec = x_latent + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) + x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning) + return x_dec \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/ldm/models/diffusion/ddpm.py b/stable-dreamfusion-3DPortrait/ldm/models/diffusion/ddpm.py new file mode 100644 index 0000000..3fcb7ad --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/models/diffusion/ddpm.py @@ -0,0 +1,1994 @@ +""" +wild mixture of +https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py +https://github.com/CompVis/taming-transformers +-- merci +""" + +import torch +import torch.nn as nn +import numpy as np +import pytorch_lightning as pl +from torch.optim.lr_scheduler import LambdaLR +from einops import rearrange, repeat +from contextlib import contextmanager, nullcontext +from functools import partial +import itertools +from tqdm import tqdm +from torchvision.utils import make_grid +from pytorch_lightning.utilities.rank_zero import rank_zero_only +from omegaconf import ListConfig + +from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config +from ldm.modules.ema import LitEma +from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution +from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL +from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like +from ldm.models.diffusion.ddim import DDIMSampler +from ldm.modules.attention import CrossAttention + + +__conditioning_keys__ = {'concat': 'c_concat', + 'crossattn': 'c_crossattn', + 'adm': 'y'} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def uniform_on_device(r1, r2, shape, device): + return (r1 - r2) * torch.rand(*shape, device=device) + r2 + + +class DDPM(pl.LightningModule): + # classic DDPM with Gaussian diffusion, in image space + def __init__(self, + unet_config, + timesteps=1000, + beta_schedule="linear", + loss_type="l2", + ckpt_path=None, + ignore_keys=[], + load_only_unet=False, + monitor="val/loss", + use_ema=True, + first_stage_key="image", + image_size=256, + channels=3, + log_every_t=100, + clip_denoised=True, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + given_betas=None, + original_elbo_weight=0., + v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta + l_simple_weight=1., + conditioning_key=None, + parameterization="eps", # all assuming fixed variance schedules + scheduler_config=None, + use_positional_encodings=False, + learn_logvar=False, + logvar_init=0., + make_it_fit=False, + ucg_training=None, + ): + super().__init__() + assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' + self.parameterization = parameterization + print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") + self.cond_stage_model = None + self.clip_denoised = clip_denoised + self.log_every_t = log_every_t + self.first_stage_key = first_stage_key + self.image_size = image_size # try conv? + self.channels = channels + self.use_positional_encodings = use_positional_encodings + self.model = DiffusionWrapper(unet_config, conditioning_key) + count_params(self.model, verbose=True) + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + self.use_scheduler = scheduler_config is not None + if self.use_scheduler: + self.scheduler_config = scheduler_config + + self.v_posterior = v_posterior + self.original_elbo_weight = original_elbo_weight + self.l_simple_weight = l_simple_weight + + if monitor is not None: + self.monitor = monitor + self.make_it_fit = make_it_fit + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) + + self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, + linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) + + self.loss_type = loss_type + + self.learn_logvar = learn_logvar + self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) + if self.learn_logvar: + self.logvar = nn.Parameter(self.logvar, requires_grad=True) + + self.ucg_training = ucg_training or dict() + if self.ucg_training: + self.ucg_prng = np.random.RandomState() + + def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if exists(given_betas): + betas = given_betas + else: + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( + 1. - alphas_cumprod) + self.v_posterior * betas + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer('posterior_variance', to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) + self.register_buffer('posterior_mean_coef1', to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) + self.register_buffer('posterior_mean_coef2', to_torch( + (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) + + if self.parameterization == "eps": + lvlb_weights = self.betas ** 2 / ( + 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) + elif self.parameterization == "x0": + lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) + else: + raise NotImplementedError("mu not supported") + # TODO how to choose this term + lvlb_weights[0] = lvlb_weights[1] + self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) + assert not torch.isnan(self.lvlb_weights).all() + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + @torch.no_grad() + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + + if self.make_it_fit: + n_params = len([name for name, _ in + itertools.chain(self.named_parameters(), + self.named_buffers())]) + for name, param in tqdm( + itertools.chain(self.named_parameters(), + self.named_buffers()), + desc="Fitting old weights to new weights", + total=n_params + ): + if not name in sd: + continue + old_shape = sd[name].shape + new_shape = param.shape + assert len(old_shape)==len(new_shape) + if len(new_shape) > 2: + # we only modify first two axes + assert new_shape[2:] == old_shape[2:] + # assumes first axis corresponds to output dim + if not new_shape == old_shape: + new_param = param.clone() + old_param = sd[name] + if len(new_shape) == 1: + for i in range(new_param.shape[0]): + new_param[i] = old_param[i % old_shape[0]] + elif len(new_shape) >= 2: + for i in range(new_param.shape[0]): + for j in range(new_param.shape[1]): + new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] + + n_used_old = torch.ones(old_shape[1]) + for j in range(new_param.shape[1]): + n_used_old[j % old_shape[1]] += 1 + n_used_new = torch.zeros(new_shape[1]) + for j in range(new_param.shape[1]): + n_used_new[j] = n_used_old[j % old_shape[1]] + + n_used_new = n_used_new[None, :] + while len(n_used_new.shape) < len(new_shape): + n_used_new = n_used_new.unsqueeze(-1) + new_param /= n_used_new + + sd[name] = new_param + + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, clip_denoised: bool): + model_out = self.model(x, t) + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + if clip_denoised: + x_recon.clamp_(-1., 1.) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def p_sample_loop(self, shape, return_intermediates=False): + device = self.betas.device + b = shape[0] + img = torch.randn(shape, device=device) + intermediates = [img] + for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): + img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), + clip_denoised=self.clip_denoised) + if i % self.log_every_t == 0 or i == self.num_timesteps - 1: + intermediates.append(img) + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, batch_size=16, return_intermediates=False): + image_size = self.image_size + channels = self.channels + return self.p_sample_loop((batch_size, channels, image_size, image_size), + return_intermediates=return_intermediates) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) + + def get_loss(self, pred, target, mean=True): + if self.loss_type == 'l1': + loss = (target - pred).abs() + if mean: + loss = loss.mean() + elif self.loss_type == 'l2': + if mean: + loss = torch.nn.functional.mse_loss(target, pred) + else: + loss = torch.nn.functional.mse_loss(target, pred, reduction='none') + else: + raise NotImplementedError("unknown loss type '{loss_type}'") + + return loss + + def p_losses(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_out = self.model(x_noisy, t) + + loss_dict = {} + if self.parameterization == "eps": + target = noise + elif self.parameterization == "x0": + target = x_start + else: + raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") + + loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) + + log_prefix = 'train' if self.training else 'val' + + loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) + loss_simple = loss.mean() * self.l_simple_weight + + loss_vlb = (self.lvlb_weights[t] * loss).mean() + loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) + + loss = loss_simple + self.original_elbo_weight * loss_vlb + + loss_dict.update({f'{log_prefix}/loss': loss}) + + return loss, loss_dict + + def forward(self, x, *args, **kwargs): + # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size + # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + return self.p_losses(x, t, *args, **kwargs) + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + def shared_step(self, batch): + x = self.get_input(batch, self.first_stage_key) + loss, loss_dict = self(x) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + for k in self.ucg_training: + p = self.ucg_training[k]["p"] + val = self.ucg_training[k]["val"] + if val is None: + val = "" + for i in range(len(batch[k])): + if self.ucg_prng.choice(2, p=[1-p, p]): + batch[k][i] = val + + loss, loss_dict = self.shared_step(batch) + + self.log_dict(loss_dict, prog_bar=True, + logger=True, on_step=True, on_epoch=True) + + self.log("global_step", self.global_step, + prog_bar=True, logger=True, on_step=True, on_epoch=False) + + if self.use_scheduler: + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) + + return loss + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + _, loss_dict_no_ema = self.shared_step(batch) + with self.ema_scope(): + _, loss_dict_ema = self.shared_step(batch) + loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} + self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + def _get_rows_from_list(self, samples): + n_imgs_per_row = len(samples) + denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): + log = dict() + x = self.get_input(batch, self.first_stage_key) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + x = x.to(self.device)[:N] + log["inputs"] = x + + # get diffusion row + diffusion_row = list() + x_start = x[:n_row] + + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(x_start) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + diffusion_row.append(x_noisy) + + log["diffusion_row"] = self._get_rows_from_list(diffusion_row) + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) + + log["samples"] = samples + log["denoise_row"] = self._get_rows_from_list(denoise_row) + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.learn_logvar: + params = params + [self.logvar] + opt = torch.optim.AdamW(params, lr=lr) + return opt + + +class LatentDiffusion(DDPM): + """main class""" + def __init__(self, + first_stage_config, + cond_stage_config, + num_timesteps_cond=None, + cond_stage_key="image", + cond_stage_trainable=False, + concat_mode=True, + cond_stage_forward=None, + conditioning_key=None, + scale_factor=1.0, + scale_by_std=False, + unet_trainable=True, + *args, **kwargs): + self.num_timesteps_cond = default(num_timesteps_cond, 1) + self.scale_by_std = scale_by_std + assert self.num_timesteps_cond <= kwargs['timesteps'] + # for backwards compatibility after implementation of DiffusionWrapper + if conditioning_key is None: + conditioning_key = 'concat' if concat_mode else 'crossattn' + if cond_stage_config == '__is_unconditional__': + conditioning_key = None + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", []) + super().__init__(conditioning_key=conditioning_key, *args, **kwargs) + self.concat_mode = concat_mode + self.cond_stage_trainable = cond_stage_trainable + self.unet_trainable = unet_trainable + self.cond_stage_key = cond_stage_key + try: + self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 + except: + self.num_downs = 0 + if not scale_by_std: + self.scale_factor = scale_factor + else: + self.register_buffer('scale_factor', torch.tensor(scale_factor)) + self.instantiate_first_stage(first_stage_config) + self.instantiate_cond_stage(cond_stage_config) + self.cond_stage_forward = cond_stage_forward + + # construct linear projection layer for concatenating image CLIP embedding and RT + self.cc_projection = nn.Linear(772, 768) + nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) + nn.init.zeros_(list(self.cc_projection.parameters())[1]) + self.cc_projection.requires_grad_(True) + + self.clip_denoised = False + self.bbox_tokenizer = None + + self.restarted_from_ckpt = False + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys) + self.restarted_from_ckpt = True + + def make_cond_schedule(self, ): + self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) + ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() + self.cond_ids[:self.num_timesteps_cond] = ids + + @rank_zero_only + @torch.no_grad() + def on_train_batch_start(self, batch, batch_idx, dataloader_idx): + # only for very first batch + if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: + assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' + # set rescale weight to 1./std of encodings + print("### USING STD-RESCALING ###") + x = super().get_input(batch, self.first_stage_key) + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + del self.scale_factor + self.register_buffer('scale_factor', 1. / z.flatten().std()) + print(f"setting self.scale_factor to {self.scale_factor}") + print("### USING STD-RESCALING ###") + + def register_schedule(self, + given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) + + self.shorten_cond_schedule = self.num_timesteps_cond > 1 + if self.shorten_cond_schedule: + self.make_cond_schedule() + + def instantiate_first_stage(self, config): + model = instantiate_from_config(config) + self.first_stage_model = model.eval() + self.first_stage_model.train = disabled_train + for param in self.first_stage_model.parameters(): + param.requires_grad = False + + def instantiate_cond_stage(self, config): + if not self.cond_stage_trainable: + if config == "__is_first_stage__": + print("Using first stage also as cond stage.") + self.cond_stage_model = self.first_stage_model + elif config == "__is_unconditional__": + print(f"Training {self.__class__.__name__} as an unconditional model.") + self.cond_stage_model = None + # self.be_unconditional = True + else: + model = instantiate_from_config(config) + self.cond_stage_model = model.eval() + self.cond_stage_model.train = disabled_train + for param in self.cond_stage_model.parameters(): + param.requires_grad = False + else: + assert config != '__is_first_stage__' + assert config != '__is_unconditional__' + model = instantiate_from_config(config) + self.cond_stage_model = model + + def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): + denoise_row = [] + for zd in tqdm(samples, desc=desc): + denoise_row.append(self.decode_first_stage(zd.to(self.device), + force_not_quantize=force_no_decoder_quantization)) + n_imgs_per_row = len(denoise_row) + denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W + denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + def get_first_stage_encoding(self, encoder_posterior): + if isinstance(encoder_posterior, DiagonalGaussianDistribution): + z = encoder_posterior.sample() + elif isinstance(encoder_posterior, torch.Tensor): + z = encoder_posterior + else: + raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") + return self.scale_factor * z + + def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): + c = self.cond_stage_model.encode(c) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + + def meshgrid(self, h, w): + y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) + x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) + + arr = torch.cat([y, x], dim=-1) + return arr + + def delta_border(self, h, w): + """ + :param h: height + :param w: width + :return: normalized distance to image border, + wtith min distance = 0 at border and max dist = 0.5 at image center + """ + lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) + arr = self.meshgrid(h, w) / lower_right_corner + dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] + dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] + edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] + return edge_dist + + def get_weighting(self, h, w, Ly, Lx, device): + weighting = self.delta_border(h, w) + weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], + self.split_input_params["clip_max_weight"], ) + weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) + + if self.split_input_params["tie_braker"]: + L_weighting = self.delta_border(Ly, Lx) + L_weighting = torch.clip(L_weighting, + self.split_input_params["clip_min_tie_weight"], + self.split_input_params["clip_max_tie_weight"]) + + L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) + weighting = weighting * L_weighting + return weighting + + def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code + """ + :param x: img of size (bs, c, h, w) + :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) + """ + bs, nc, h, w = x.shape + + # number of crops in image + Ly = (h - kernel_size[0]) // stride[0] + 1 + Lx = (w - kernel_size[1]) // stride[1] + 1 + + if uf == 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) + + weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) + + elif uf > 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), + dilation=1, padding=0, + stride=(stride[0] * uf, stride[1] * uf)) + fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) + + elif df > 1 and uf == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), + dilation=1, padding=0, + stride=(stride[0] // df, stride[1] // df)) + fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) + + else: + raise NotImplementedError + + return fold, unfold, normalization, weighting + + + @torch.no_grad() + def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, + cond_key=None, return_original_cond=False, bs=None, uncond=0.05): + x = super().get_input(batch, k) + T = batch['T'].to(memory_format=torch.contiguous_format).float() + + if bs is not None: + x = x[:bs] + T = T[:bs].to(self.device) + + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + cond_key = cond_key or self.cond_stage_key + xc = super().get_input(batch, cond_key).to(self.device) + if bs is not None: + xc = xc[:bs] + cond = {} + + # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. + random = torch.rand(x.size(0), device=x.device) + prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") + input_mask = 1 - rearrange((random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1") + null_prompt = self.get_learned_conditioning([""]) + + # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] + # print('=========== xc shape ===========', xc.shape) + with torch.enable_grad(): + clip_emb = self.get_learned_conditioning(xc).detach() + null_prompt = self.get_learned_conditioning([""]).detach() + cond["c_crossattn"] = [self.cc_projection(torch.cat([torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :]], dim=-1))] + cond["c_concat"] = [input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach()] + out = [z, cond] + if return_first_stage_outputs: + xrec = self.decode_first_stage(z) + out.extend([x, xrec]) + if return_original_cond: + out.append(xc) + return out + + # @torch.no_grad() + def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [self.first_stage_model.decode(z[:, :, :, :, i], + force_not_quantize=predict_cids or force_not_quantize) + for i in range(z.shape[-1])] + else: + + output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + # @torch.no_grad() # wasted two hours to find this bug... why no grad here! + def encode_first_stage(self, x): + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + df = self.split_input_params["vqf"] + self.split_input_params['original_image_size'] = x.shape[-2:] + bs, nc, h, w = x.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) + z = unfold(x) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) + o = o * weighting + + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization + return decoded + + else: + return self.first_stage_model.encode(x) + else: + return self.first_stage_model.encode(x) + + def shared_step(self, batch, **kwargs): + x, c = self.get_input(batch, self.first_stage_key) + loss = self(x, c) + return loss + + def forward(self, x, c, *args, **kwargs): + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + if self.model.conditioning_key is not None: + assert c is not None + # if self.cond_stage_trainable: + # c = self.get_learned_conditioning(c) + if self.shorten_cond_schedule: # TODO: drop this option + tc = self.cond_ids[t].to(self.device) + c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) + return self.p_losses(x, c, t, *args, **kwargs) + + def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset + def rescale_bbox(bbox): + x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) + y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) + w = min(bbox[2] / crop_coordinates[2], 1 - x0) + h = min(bbox[3] / crop_coordinates[3], 1 - y0) + return x0, y0, w, h + + return [rescale_bbox(b) for b in bboxes] + + def apply_model(self, x_noisy, t, cond, return_ids=False): + + if isinstance(cond, dict): + # hybrid case, cond is exptected to be a dict + pass + else: + if not isinstance(cond, list): + cond = [cond] + key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' + cond = {key: cond} + + if hasattr(self, "split_input_params"): + assert len(cond) == 1 # todo can only deal with one conditioning atm + assert not return_ids + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + + h, w = x_noisy.shape[-2:] + + fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) + + z = unfold(x_noisy) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] + + if self.cond_stage_key in ["image", "LR_image", "segmentation", + 'bbox_img'] and self.model.conditioning_key: # todo check for completeness + c_key = next(iter(cond.keys())) # get key + c = next(iter(cond.values())) # get value + assert (len(c) == 1) # todo extend to list with more than one elem + c = c[0] # get element + + c = unfold(c) + c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] + + elif self.cond_stage_key == 'coordinates_bbox': + assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' + + # assuming padding of unfold is always 0 and its dilation is always 1 + n_patches_per_row = int((w - ks[0]) / stride[0] + 1) + full_img_h, full_img_w = self.split_input_params['original_image_size'] + # as we are operating on latents, we need the factor from the original image size to the + # spatial latent size to properly rescale the crops for regenerating the bbox annotations + num_downs = self.first_stage_model.encoder.num_resolutions - 1 + rescale_latent = 2 ** (num_downs) + + # get top left postions of patches as conforming for the bbbox tokenizer, therefore we + # need to rescale the tl patch coordinates to be in between (0,1) + tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, + rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) + for patch_nr in range(z.shape[-1])] + + # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) + patch_limits = [(x_tl, y_tl, + rescale_latent * ks[0] / full_img_w, + rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] + # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] + + # tokenize crop coordinates for the bounding boxes of the respective patches + patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) + for bbox in patch_limits] # list of length l with tensors of shape (1, 2) + # cut tknzd crop position from conditioning + assert isinstance(cond, dict), 'cond must be dict to be fed into model' + cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) + + adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) + adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') + adapted_cond = self.get_learned_conditioning(adapted_cond) + adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) + + cond_list = [{'c_crossattn': [e]} for e in adapted_cond] + + else: + cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient + + # apply model by loop over crops + output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] + assert not isinstance(output_list[0], + tuple) # todo cant deal with multiple model outputs check this never happens + + o = torch.stack(output_list, axis=-1) + o = o * weighting + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + x_recon = fold(o) / normalization + + else: + x_recon = self.model(x_noisy, t, **cond) + + if isinstance(x_recon, tuple) and not return_ids: + return x_recon[0] + else: + return x_recon + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) + return mean_flat(kl_prior) / np.log(2.0) + + def p_losses(self, x_start, cond, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_output = self.apply_model(x_noisy, t, cond) + + loss_dict = {} + prefix = 'train' if self.training else 'val' + + if self.parameterization == "x0": + target = x_start + elif self.parameterization == "eps": + target = noise + else: + raise NotImplementedError() + + loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) + loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) + + logvar_t = self.logvar[t].to(self.device) + loss = loss_simple / torch.exp(logvar_t) + logvar_t + # loss = loss_simple / torch.exp(self.logvar) + self.logvar + if self.learn_logvar: + loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) + loss_dict.update({'logvar': self.logvar.data.mean()}) + + loss = self.l_simple_weight * loss.mean() + + loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) + loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() + loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) + loss += (self.original_elbo_weight * loss_vlb) + loss_dict.update({f'{prefix}/loss': loss}) + + return loss, loss_dict + + def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, + return_x0=False, score_corrector=None, corrector_kwargs=None): + t_in = t + model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) + + if score_corrector is not None: + assert self.parameterization == "eps" + model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) + + if return_codebook_ids: + model_out, logits = model_out + + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + else: + raise NotImplementedError() + + if clip_denoised: + x_recon.clamp_(-1., 1.) + if quantize_denoised: + x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + if return_codebook_ids: + return model_mean, posterior_variance, posterior_log_variance, logits + elif return_x0: + return model_mean, posterior_variance, posterior_log_variance, x_recon + else: + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, + return_codebook_ids=False, quantize_denoised=False, return_x0=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): + b, *_, device = *x.shape, x.device + outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, + return_codebook_ids=return_codebook_ids, + quantize_denoised=quantize_denoised, + return_x0=return_x0, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if return_codebook_ids: + raise DeprecationWarning("Support dropped.") + model_mean, _, model_log_variance, logits = outputs + elif return_x0: + model_mean, _, model_log_variance, x0 = outputs + else: + model_mean, _, model_log_variance = outputs + + noise = noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + + if return_codebook_ids: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) + if return_x0: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 + else: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, + img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., + score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, + log_every_t=None): + if not log_every_t: + log_every_t = self.log_every_t + timesteps = self.num_timesteps + if batch_size is not None: + b = batch_size if batch_size is not None else shape[0] + shape = [batch_size] + list(shape) + else: + b = batch_size = shape[0] + if x_T is None: + img = torch.randn(shape, device=self.device) + else: + img = x_T + intermediates = [] + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', + total=timesteps) if verbose else reversed( + range(0, timesteps)) + if type(temperature) == float: + temperature = [temperature] * timesteps + + for i in iterator: + ts = torch.full((b,), i, device=self.device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img, x0_partial = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, return_x0=True, + temperature=temperature[i], noise_dropout=noise_dropout, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if mask is not None: + assert x0 is not None + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(x0_partial) + if callback: callback(i) + if img_callback: img_callback(img, i) + return img, intermediates + + @torch.no_grad() + def p_sample_loop(self, cond, shape, return_intermediates=False, + x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, start_T=None, + log_every_t=None): + + if not log_every_t: + log_every_t = self.log_every_t + device = self.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + intermediates = [img] + if timesteps is None: + timesteps = self.num_timesteps + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( + range(0, timesteps)) + + if mask is not None: + assert x0 is not None + assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match + + for i in iterator: + ts = torch.full((b,), i, device=device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised) + if mask is not None: + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(img) + if callback: callback(i) + if img_callback: img_callback(img, i) + + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, + verbose=True, timesteps=None, quantize_denoised=False, + mask=None, x0=None, shape=None,**kwargs): + if shape is None: + shape = (batch_size, self.channels, self.image_size, self.image_size) + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + return self.p_sample_loop(cond, + shape, + return_intermediates=return_intermediates, x_T=x_T, + verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, + mask=mask, x0=x0) + + @torch.no_grad() + def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): + if ddim: + ddim_sampler = DDIMSampler(self) + shape = (self.channels, self.image_size, self.image_size) + samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, + shape, cond, verbose=False, **kwargs) + + else: + samples, intermediates = self.sample(cond=cond, batch_size=batch_size, + return_intermediates=True, **kwargs) + + return samples, intermediates + + @torch.no_grad() + def get_unconditional_conditioning(self, batch_size, null_label=None, image_size=512): + if null_label is not None: + xc = null_label + if isinstance(xc, ListConfig): + xc = list(xc) + if isinstance(xc, dict) or isinstance(xc, list): + c = self.get_learned_conditioning(xc) + else: + if hasattr(xc, "to"): + xc = xc.to(self.device) + c = self.get_learned_conditioning(xc) + else: + # todo: get null label from cond_stage_model + raise NotImplementedError() + c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) + cond = {} + cond["c_crossattn"] = [c] + cond["c_concat"] = [torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to(self.device)] + return cond + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, + plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=N) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) + log["conditioning"] = xc + elif self.cond_stage_key == 'class_label': + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( + self.first_stage_model, IdentityFirstStage): + # also display when quantizing x0 while sampling + with ema_scope("Plotting Quantized Denoised"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta, + quantize_denoised=True) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, + # quantize_denoised=True) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_x0_quantized"] = x_samples + + if unconditional_guidance_scale > 1.0: + uc = self.get_unconditional_conditioning(N, unconditional_guidance_label, image_size=x.shape[-1]) + # uc = torch.zeros_like(c) + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg + + if inpaint: + # make a simple center square + b, h, w = z.shape[0], z.shape[2], z.shape[3] + mask = torch.ones(N, h, w).to(self.device) + # zeros will be filled in + mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. + mask = mask[:, None, ...] + with ema_scope("Plotting Inpaint"): + + samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_inpainting"] = x_samples + log["mask"] = mask + + # outpaint + mask = 1. - mask + with ema_scope("Plotting Outpaint"): + samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_outpainting"] = x_samples + + if plot_progressive_rows: + with ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising(c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N) + prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") + log["progressive_row"] = prog_row + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = [] + if self.unet_trainable == "attn": + print("Training only unet attention layers") + for n, m in self.model.named_modules(): + if isinstance(m, CrossAttention) and n.endswith('attn2'): + params.extend(m.parameters()) + if self.unet_trainable == "conv_in": + print("Training only unet input conv layers") + params = list(self.model.diffusion_model.input_blocks[0][0].parameters()) + elif self.unet_trainable is True or self.unet_trainable == "all": + print("Training the full unet") + params = list(self.model.parameters()) + else: + raise ValueError(f"Unrecognised setting for unet_trainable: {self.unet_trainable}") + + if self.cond_stage_trainable: + print(f"{self.__class__.__name__}: Also optimizing conditioner params!") + params = params + list(self.cond_stage_model.parameters()) + if self.learn_logvar: + print('Diffusion model optimizing logvar') + params.append(self.logvar) + + if self.cc_projection is not None: + params = params + list(self.cc_projection.parameters()) + print('========== optimizing for cc projection weight ==========') + + opt = torch.optim.AdamW([{"params": self.model.parameters(), "lr": lr}, + {"params": self.cc_projection.parameters(), "lr": 10. * lr}], lr=lr) + if self.use_scheduler: + assert 'target' in self.scheduler_config + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [opt], scheduler + return opt + + @torch.no_grad() + def to_rgb(self, x): + x = x.float() + if not hasattr(self, "colorize"): + self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) + x = nn.functional.conv2d(x, weight=self.colorize) + x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. + return x + + +class DiffusionWrapper(pl.LightningModule): + def __init__(self, diff_model_config, conditioning_key): + super().__init__() + self.diffusion_model = instantiate_from_config(diff_model_config) + self.conditioning_key = conditioning_key + assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm'] + + def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None): + if self.conditioning_key is None: + out = self.diffusion_model(x, t) + elif self.conditioning_key == 'concat': + xc = torch.cat([x] + c_concat, dim=1) + out = self.diffusion_model(xc, t) + elif self.conditioning_key == 'crossattn': + # c_crossattn dimension: torch.Size([8, 1, 768]) 1 + # cc dimension: torch.Size([8, 1, 768] + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(x, t, context=cc) + elif self.conditioning_key == 'hybrid': + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc) + elif self.conditioning_key == 'hybrid-adm': + assert c_adm is not None + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc, y=c_adm) + elif self.conditioning_key == 'adm': + cc = c_crossattn[0] + out = self.diffusion_model(x, t, y=cc) + else: + raise NotImplementedError() + + return out + + +class LatentUpscaleDiffusion(LatentDiffusion): + def __init__(self, *args, low_scale_config, low_scale_key="LR", **kwargs): + super().__init__(*args, **kwargs) + # assumes that neither the cond_stage nor the low_scale_model contain trainable params + assert not self.cond_stage_trainable + self.instantiate_low_stage(low_scale_config) + self.low_scale_key = low_scale_key + + def instantiate_low_stage(self, config): + model = instantiate_from_config(config) + self.low_scale_model = model.eval() + self.low_scale_model.train = disabled_train + for param in self.low_scale_model.parameters(): + param.requires_grad = False + + @torch.no_grad() + def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): + if not log_mode: + z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) + else: + z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, + force_c_encode=True, return_original_cond=True, bs=bs) + x_low = batch[self.low_scale_key][:bs] + x_low = rearrange(x_low, 'b h w c -> b c h w') + x_low = x_low.to(memory_format=torch.contiguous_format).float() + zx, noise_level = self.low_scale_model(x_low) + all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level} + #import pudb; pu.db + if log_mode: + # TODO: maybe disable if too expensive + interpretability = False + if interpretability: + zx = zx[:, :, ::2, ::2] + x_low_rec = self.low_scale_model.decode(zx) + return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level + return z, all_conds + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, + unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, + **kwargs): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N, + log_mode=True) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + log["x_lr"] = x_low + log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) + log["conditioning"] = xc + elif self.cond_stage_key == 'class_label': + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if unconditional_guidance_scale > 1.0: + uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label) + # TODO explore better "unconditional" choices for the other keys + # maybe guide away from empty text label and highest noise level and maximally degraded zx? + uc = dict() + for k in c: + if k == "c_crossattn": + assert isinstance(c[k], list) and len(c[k]) == 1 + uc[k] = [uc_tmp] + elif k == "c_adm": # todo: only run with text-based guidance? + assert isinstance(c[k], torch.Tensor) + uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level + elif isinstance(c[k], list): + uc[k] = [c[k][i] for i in range(len(c[k]))] + else: + uc[k] = c[k] + + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg + + if plot_progressive_rows: + with ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising(c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N) + prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") + log["progressive_row"] = prog_row + + return log + + +class LatentInpaintDiffusion(LatentDiffusion): + """ + can either run as pure inpainting model (only concat mode) or with mixed conditionings, + e.g. mask as concat and text via cross-attn. + To disable finetuning mode, set finetune_keys to None + """ + def __init__(self, + finetune_keys=("model.diffusion_model.input_blocks.0.0.weight", + "model_ema.diffusion_modelinput_blocks00weight" + ), + concat_keys=("mask", "masked_image"), + masked_image_key="masked_image", + keep_finetune_dims=4, # if model was trained without concat mode before and we would like to keep these channels + c_concat_log_start=None, # to log reconstruction of c_concat codes + c_concat_log_end=None, + *args, **kwargs + ): + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", list()) + super().__init__(*args, **kwargs) + self.masked_image_key = masked_image_key + assert self.masked_image_key in concat_keys + self.finetune_keys = finetune_keys + self.concat_keys = concat_keys + self.keep_dims = keep_finetune_dims + self.c_concat_log_start = c_concat_log_start + self.c_concat_log_end = c_concat_log_end + if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint' + if exists(ckpt_path): + self.init_from_ckpt(ckpt_path, ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + + # make it explicit, finetune by including extra input channels + if exists(self.finetune_keys) and k in self.finetune_keys: + new_entry = None + for name, param in self.named_parameters(): + if name in self.finetune_keys: + print(f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only") + new_entry = torch.zeros_like(param) # zero init + assert exists(new_entry), 'did not find matching parameter to modify' + new_entry[:, :self.keep_dims, ...] = sd[k] + sd[k] = new_entry + + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + @torch.no_grad() + def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False): + # note: restricted to non-trainable encoders currently + assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting' + z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, + force_c_encode=True, return_original_cond=True, bs=bs) + + assert exists(self.concat_keys) + c_cat = list() + for ck in self.concat_keys: + cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float() + if bs is not None: + cc = cc[:bs] + cc = cc.to(self.device) + bchw = z.shape + if ck != self.masked_image_key: + cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) + else: + cc = self.get_first_stage_encoding(self.encode_first_stage(cc)) + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} + if return_first_stage_outputs: + return z, all_conds, x, xrec, xc + return z, all_conds + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, + plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True) + c_cat, c = c["c_concat"][0], c["c_crossattn"][0] + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) + log["conditioning"] = xc + elif self.cond_stage_key == 'class_label': + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if not (self.c_concat_log_start is None and self.c_concat_log_end is None): + log["c_concat_decoded"] = self.decode_first_stage(c_cat[:,self.c_concat_log_start:self.c_concat_log_end]) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, + batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if unconditional_guidance_scale > 1.0: + uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label) + uc_cat = c_cat + uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]} + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, + batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc_full, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg + + log["masked_image"] = rearrange(batch["masked_image"], + 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float() + return log + + +class Layout2ImgDiffusion(LatentDiffusion): + # TODO: move all layout-specific hacks to this class + def __init__(self, cond_stage_key, *args, **kwargs): + assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' + super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) + + def log_images(self, batch, N=8, *args, **kwargs): + logs = super().log_images(batch=batch, N=N, *args, **kwargs) + + key = 'train' if self.training else 'validation' + dset = self.trainer.datamodule.datasets[key] + mapper = dset.conditional_builders[self.cond_stage_key] + + bbox_imgs = [] + map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) + for tknzd_bbox in batch[self.cond_stage_key][:N]: + bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) + bbox_imgs.append(bboximg) + + cond_img = torch.stack(bbox_imgs, dim=0) + logs['bbox_image'] = cond_img + return logs + + +class SimpleUpscaleDiffusion(LatentDiffusion): + def __init__(self, *args, low_scale_key="LR", **kwargs): + super().__init__(*args, **kwargs) + # assumes that neither the cond_stage nor the low_scale_model contain trainable params + assert not self.cond_stage_trainable + self.low_scale_key = low_scale_key + + @torch.no_grad() + def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): + if not log_mode: + z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) + else: + z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, + force_c_encode=True, return_original_cond=True, bs=bs) + x_low = batch[self.low_scale_key][:bs] + x_low = rearrange(x_low, 'b h w c -> b c h w') + x_low = x_low.to(memory_format=torch.contiguous_format).float() + + encoder_posterior = self.encode_first_stage(x_low) + zx = self.get_first_stage_encoding(encoder_posterior).detach() + all_conds = {"c_concat": [zx], "c_crossattn": [c]} + + if log_mode: + # TODO: maybe disable if too expensive + interpretability = False + if interpretability: + zx = zx[:, :, ::2, ::2] + return z, all_conds, x, xrec, xc, x_low + return z, all_conds + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, + unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, + **kwargs): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc, x_low = self.get_input(batch, self.first_stage_key, bs=N, log_mode=True) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + log["x_lr"] = x_low + + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) + log["conditioning"] = xc + elif self.cond_stage_key == 'class_label': + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + + if unconditional_guidance_scale > 1.0: + uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label) + uc = dict() + for k in c: + if k == "c_crossattn": + assert isinstance(c[k], list) and len(c[k]) == 1 + uc[k] = [uc_tmp] + elif isinstance(c[k], list): + uc[k] = [c[k][i] for i in range(len(c[k]))] + else: + uc[k] = c[k] + + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg + return log + +class MultiCatFrameDiffusion(LatentDiffusion): + def __init__(self, *args, low_scale_key="LR", **kwargs): + super().__init__(*args, **kwargs) + # assumes that neither the cond_stage nor the low_scale_model contain trainable params + assert not self.cond_stage_trainable + self.low_scale_key = low_scale_key + + @torch.no_grad() + def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): + n = 2 + if not log_mode: + z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) + else: + z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, + force_c_encode=True, return_original_cond=True, bs=bs) + cat_conds = batch[self.low_scale_key][:bs] + cats = [] + for i in range(n): + x_low = cat_conds[:,:,:,3*i:3*(i+1)] + x_low = rearrange(x_low, 'b h w c -> b c h w') + x_low = x_low.to(memory_format=torch.contiguous_format).float() + encoder_posterior = self.encode_first_stage(x_low) + zx = self.get_first_stage_encoding(encoder_posterior).detach() + cats.append(zx) + + all_conds = {"c_concat": [torch.cat(cats, dim=1)], "c_crossattn": [c]} + + if log_mode: + # TODO: maybe disable if too expensive + interpretability = False + if interpretability: + zx = zx[:, :, ::2, ::2] + return z, all_conds, x, xrec, xc, x_low + return z, all_conds + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, + unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, + **kwargs): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc, x_low = self.get_input(batch, self.first_stage_key, bs=N, log_mode=True) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + log["x_lr"] = x_low + + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) + log["conditioning"] = xc + elif self.cond_stage_key == 'class_label': + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + + if unconditional_guidance_scale > 1.0: + uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label) + uc = dict() + for k in c: + if k == "c_crossattn": + assert isinstance(c[k], list) and len(c[k]) == 1 + uc[k] = [uc_tmp] + elif isinstance(c[k], list): + uc[k] = [c[k][i] for i in range(len(c[k]))] + else: + uc[k] = c[k] + + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg + return log diff --git a/stable-dreamfusion-3DPortrait/ldm/models/diffusion/plms.py b/stable-dreamfusion-3DPortrait/ldm/models/diffusion/plms.py new file mode 100644 index 0000000..080edee --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/models/diffusion/plms.py @@ -0,0 +1,259 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like +from ldm.models.diffusion.sampling_util import norm_thresholding + + +class PLMSSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + if ddim_eta != 0: + raise ValueError('ddim_eta must be 0 for PLMS') + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + dynamic_threshold=None, + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + ctmp = conditioning[list(conditioning.keys())[0]] + while isinstance(ctmp, list): ctmp = ctmp[0] + cbs = ctmp.shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for PLMS sampling is {size}') + + samples, intermediates = self.plms_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold, + ) + return samples, intermediates + + @torch.no_grad() + def plms_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, + dynamic_threshold=None): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running PLMS Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) + old_eps = [] + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, t_next=ts_next, + dynamic_threshold=dynamic_threshold) + img, pred_x0, e_t = outs + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, + dynamic_threshold=None): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + if isinstance(c, dict): + assert isinstance(unconditional_conditioning, dict) + c_in = dict() + for k in c: + if isinstance(c[k], list): + c_in[k] = [torch.cat([ + unconditional_conditioning[k][i], + c[k][i]]) for i in range(len(c[k]))] + else: + c_in[k] = torch.cat([ + unconditional_conditioning[k], + c[k]]) + else: + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + return e_t + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + if dynamic_threshold is not None: + pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = get_model_output(x_prev, t_next) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + return x_prev, pred_x0, e_t diff --git a/stable-dreamfusion-3DPortrait/ldm/models/diffusion/sampling_util.py b/stable-dreamfusion-3DPortrait/ldm/models/diffusion/sampling_util.py new file mode 100644 index 0000000..a0ae00f --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/models/diffusion/sampling_util.py @@ -0,0 +1,50 @@ +import torch +import numpy as np + + +def append_dims(x, target_dims): + """Appends dimensions to the end of a tensor until it has target_dims dimensions. + From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py""" + dims_to_append = target_dims - x.ndim + if dims_to_append < 0: + raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less') + return x[(...,) + (None,) * dims_to_append] + + +def renorm_thresholding(x0, value): + # renorm + pred_max = x0.max() + pred_min = x0.min() + pred_x0 = (x0 - pred_min) / (pred_max - pred_min) # 0 ... 1 + pred_x0 = 2 * pred_x0 - 1. # -1 ... 1 + + s = torch.quantile( + rearrange(pred_x0, 'b ... -> b (...)').abs(), + value, + dim=-1 + ) + s.clamp_(min=1.0) + s = s.view(-1, *((1,) * (pred_x0.ndim - 1))) + + # clip by threshold + # pred_x0 = pred_x0.clamp(-s, s) / s # needs newer pytorch # TODO bring back to pure-gpu with min/max + + # temporary hack: numpy on cpu + pred_x0 = np.clip(pred_x0.cpu().numpy(), -s.cpu().numpy(), s.cpu().numpy()) / s.cpu().numpy() + pred_x0 = torch.tensor(pred_x0).to(self.model.device) + + # re.renorm + pred_x0 = (pred_x0 + 1.) / 2. # 0 ... 1 + pred_x0 = (pred_max - pred_min) * pred_x0 + pred_min # orig range + return pred_x0 + + +def norm_thresholding(x0, value): + s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim) + return x0 * (value / s) + + +def spatial_norm_thresholding(x0, value): + # b c h w + s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value) + return x0 * (value / s) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/attention.py b/stable-dreamfusion-3DPortrait/ldm/modules/attention.py new file mode 100644 index 0000000..124effb --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/attention.py @@ -0,0 +1,266 @@ +from inspect import isfunction +import math +import torch +import torch.nn.functional as F +from torch import nn, einsum +from einops import rearrange, repeat + +from ldm.modules.diffusionmodules.util import checkpoint + + +def exists(val): + return val is not None + + +def uniq(arr): + return{el: True for el in arr}.keys() + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def max_neg_value(t): + return -torch.finfo(t.dtype).max + + +def init_(tensor): + dim = tensor.shape[-1] + std = 1 / math.sqrt(dim) + tensor.uniform_(-std, std) + return tensor + + +# feedforward +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = nn.Sequential( + nn.Linear(dim, inner_dim), + nn.GELU() + ) if not glu else GEGLU(dim, inner_dim) + + self.net = nn.Sequential( + project_in, + nn.Dropout(dropout), + nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def Normalize(in_channels): + return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) + + +class LinearAttention(nn.Module): + def __init__(self, dim, heads=4, dim_head=32): + super().__init__() + self.heads = heads + hidden_dim = dim_head * heads + self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) + self.to_out = nn.Conv2d(hidden_dim, dim, 1) + + def forward(self, x): + b, c, h, w = x.shape + qkv = self.to_qkv(x) + q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) + k = k.softmax(dim=-1) + context = torch.einsum('bhdn,bhen->bhde', k, v) + out = torch.einsum('bhde,bhdn->bhen', context, q) + out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) + return self.to_out(out) + + +class SpatialSelfAttention(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b,c,h,w = q.shape + q = rearrange(q, 'b c h w -> b (h w) c') + k = rearrange(k, 'b c h w -> b c (h w)') + w_ = torch.einsum('bij,bjk->bik', q, k) + + w_ = w_ * (int(c)**(-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = rearrange(v, 'b c h w -> b c (h w)') + w_ = rearrange(w_, 'b i j -> b j i') + h_ = torch.einsum('bij,bjk->bik', v, w_) + h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) + h_ = self.proj_out(h_) + + return x+h_ + + +class CrossAttention(nn.Module): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): + super().__init__() + inner_dim = dim_head * heads + context_dim = default(context_dim, query_dim) + + self.scale = dim_head ** -0.5 + self.heads = heads + + self.to_q = nn.Linear(query_dim, inner_dim, bias=False) + self.to_k = nn.Linear(context_dim, inner_dim, bias=False) + self.to_v = nn.Linear(context_dim, inner_dim, bias=False) + + self.to_out = nn.Sequential( + nn.Linear(inner_dim, query_dim), + nn.Dropout(dropout) + ) + + def forward(self, x, context=None, mask=None): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + + if exists(mask): + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + attn = sim.softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', attn, v) + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + return self.to_out(out) + + +class BasicTransformerBlock(nn.Module): + def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, + disable_self_attn=False): + super().__init__() + self.disable_self_attn = disable_self_attn + self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, + context_dim=context_dim if self.disable_self_attn else None) # is a self-attention if not self.disable_self_attn + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) + self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, + heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + self.norm3 = nn.LayerNorm(dim) + self.checkpoint = checkpoint + + def forward(self, x, context=None): + return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) + + def _forward(self, x, context=None): + x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x + x = self.attn2(self.norm2(x), context=context) + x + x = self.ff(self.norm3(x)) + x + return x + + +class SpatialTransformer(nn.Module): + """ + Transformer block for image-like data. + First, project the input (aka embedding) + and reshape to b, t, d. + Then apply standard transformer action. + Finally, reshape to image + """ + def __init__(self, in_channels, n_heads, d_head, + depth=1, dropout=0., context_dim=None, + disable_self_attn=False): + super().__init__() + self.in_channels = in_channels + inner_dim = n_heads * d_head + self.norm = Normalize(in_channels) + + self.proj_in = nn.Conv2d(in_channels, + inner_dim, + kernel_size=1, + stride=1, + padding=0) + + self.transformer_blocks = nn.ModuleList( + [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim, + disable_self_attn=disable_self_attn) + for d in range(depth)] + ) + + self.proj_out = zero_module(nn.Conv2d(inner_dim, + in_channels, + kernel_size=1, + stride=1, + padding=0)) + + def forward(self, x, context=None): + # note: if no context is given, cross-attention defaults to self-attention + b, c, h, w = x.shape + x_in = x + x = self.norm(x) + x = self.proj_in(x) + x = rearrange(x, 'b c h w -> b (h w) c').contiguous() + for block in self.transformer_blocks: + x = block(x, context=context) + x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous() + x = self.proj_out(x) + return x + x_in diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/diffusionmodules/__init__.py b/stable-dreamfusion-3DPortrait/ldm/modules/diffusionmodules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/diffusionmodules/model.py b/stable-dreamfusion-3DPortrait/ldm/modules/diffusionmodules/model.py new file mode 100644 index 0000000..533e589 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/diffusionmodules/model.py @@ -0,0 +1,835 @@ +# pytorch_diffusion + derived encoder decoder +import math +import torch +import torch.nn as nn +import numpy as np +from einops import rearrange + +from ldm.util import instantiate_from_config +from ldm.modules.attention import LinearAttention + + +def get_timestep_embedding(timesteps, embedding_dim): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: + From Fairseq. + Build sinusoidal embeddings. + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + assert len(timesteps.shape) == 1 + + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) + emb = emb.to(device=timesteps.device) + emb = timesteps.float()[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0,1,0,0)) + return emb + + +def nonlinearity(x): + # swish + return x*torch.sigmoid(x) + + +def Normalize(in_channels, num_groups=32): + return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) + + +class Upsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=2, + padding=0) + + def forward(self, x): + if self.with_conv: + pad = (0,1,0,1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + else: + x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) + return x + + +class ResnetBlock(nn.Module): + def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, + dropout, temb_channels=512): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + + self.norm1 = Normalize(in_channels) + self.conv1 = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + if temb_channels > 0: + self.temb_proj = torch.nn.Linear(temb_channels, + out_channels) + self.norm2 = Normalize(out_channels) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d(out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + self.conv_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + else: + self.nin_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x, temb): + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + if temb is not None: + h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return x+h + + +class LinAttnBlock(LinearAttention): + """to match AttnBlock usage""" + def __init__(self, in_channels): + super().__init__(dim=in_channels, heads=1, dim_head=in_channels) + + +class AttnBlock(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b,c,h,w = q.shape + q = q.reshape(b,c,h*w) + q = q.permute(0,2,1) # b,hw,c + k = k.reshape(b,c,h*w) # b,c,hw + w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] + w_ = w_ * (int(c)**(-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = v.reshape(b,c,h*w) + w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) + h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] + h_ = h_.reshape(b,c,h,w) + + h_ = self.proj_out(h_) + + return x+h_ + + +def make_attn(in_channels, attn_type="vanilla"): + assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' + print(f"making attention of type '{attn_type}' with {in_channels} in_channels") + if attn_type == "vanilla": + return AttnBlock(in_channels) + elif attn_type == "none": + return nn.Identity(in_channels) + else: + return LinAttnBlock(in_channels) + + +class Model(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = self.ch*4 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + self.use_timestep = use_timestep + if self.use_timestep: + # timestep embedding + self.temb = nn.Module() + self.temb.dense = nn.ModuleList([ + torch.nn.Linear(self.ch, + self.temb_ch), + torch.nn.Linear(self.temb_ch, + self.temb_ch), + ]) + + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) + + curr_res = resolution + in_ch_mult = (1,)+tuple(ch_mult) + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions-1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch*ch_mult[i_level] + skip_in = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): + if i_block == self.num_res_blocks: + skip_in = ch*in_ch_mult[i_level] + block.append(ResnetBlock(in_channels=block_in+skip_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x, t=None, context=None): + #assert x.shape[2] == x.shape[3] == self.resolution + if context is not None: + # assume aligned context, cat along channel axis + x = torch.cat((x, context), dim=1) + if self.use_timestep: + # timestep embedding + assert t is not None + temb = get_timestep_embedding(t, self.ch) + temb = self.temb.dense[0](temb) + temb = nonlinearity(temb) + temb = self.temb.dense[1](temb) + else: + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions-1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks+1): + h = self.up[i_level].block[i_block]( + torch.cat([h, hs.pop()], dim=1), temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + def get_last_layer(self): + return self.conv_out.weight + + +class Encoder(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", + **ignore_kwargs): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) + + curr_res = resolution + in_ch_mult = (1,)+tuple(ch_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions-1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + 2*z_channels if double_z else z_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + # timestep embedding + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions-1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class Decoder(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, + attn_type="vanilla", **ignorekwargs): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.give_pre_end = give_pre_end + self.tanh_out = tanh_out + + # compute in_ch_mult, block_in and curr_res at lowest res + in_ch_mult = (1,)+tuple(ch_mult) + block_in = ch*ch_mult[self.num_resolutions-1] + curr_res = resolution // 2**(self.num_resolutions-1) + self.z_shape = (1,z_channels,curr_res,curr_res) + print("Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape))) + + # z to block_in + self.conv_in = torch.nn.Conv2d(z_channels, + block_in, + kernel_size=3, + stride=1, + padding=1) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, z): + #assert z.shape[1:] == self.z_shape[1:] + self.last_z_shape = z.shape + + # timestep embedding + temb = None + + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks+1): + h = self.up[i_level].block[i_block](h, temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + if self.give_pre_end: + return h + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + if self.tanh_out: + h = torch.tanh(h) + return h + + +class SimpleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, *args, **kwargs): + super().__init__() + self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), + ResnetBlock(in_channels=in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=2 * in_channels, + out_channels=4 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=4 * in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + nn.Conv2d(2*in_channels, in_channels, 1), + Upsample(in_channels, with_conv=True)]) + # end + self.norm_out = Normalize(in_channels) + self.conv_out = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + for i, layer in enumerate(self.model): + if i in [1,2,3]: + x = layer(x, None) + else: + x = layer(x) + + h = self.norm_out(x) + h = nonlinearity(h) + x = self.conv_out(h) + return x + + +class UpsampleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, + ch_mult=(2,2), dropout=0.0): + super().__init__() + # upsampling + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + block_in = in_channels + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.res_blocks = nn.ModuleList() + self.upsample_blocks = nn.ModuleList() + for i_level in range(self.num_resolutions): + res_block = [] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + res_block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + self.res_blocks.append(nn.ModuleList(res_block)) + if i_level != self.num_resolutions - 1: + self.upsample_blocks.append(Upsample(block_in, True)) + curr_res = curr_res * 2 + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + # upsampling + h = x + for k, i_level in enumerate(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.res_blocks[i_level][i_block](h, None) + if i_level != self.num_resolutions - 1: + h = self.upsample_blocks[k](h) + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class LatentRescaler(nn.Module): + def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): + super().__init__() + # residual block, interpolate, residual block + self.factor = factor + self.conv_in = nn.Conv2d(in_channels, + mid_channels, + kernel_size=3, + stride=1, + padding=1) + self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) + self.attn = AttnBlock(mid_channels) + self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) + + self.conv_out = nn.Conv2d(mid_channels, + out_channels, + kernel_size=1, + ) + + def forward(self, x): + x = self.conv_in(x) + for block in self.res_block1: + x = block(x, None) + x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) + x = self.attn(x) + for block in self.res_block2: + x = block(x, None) + x = self.conv_out(x) + return x + + +class MergedRescaleEncoder(nn.Module): + def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, + ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): + super().__init__() + intermediate_chn = ch * ch_mult[-1] + self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, + z_channels=intermediate_chn, double_z=False, resolution=resolution, + attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, + out_ch=None) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, + mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) + + def forward(self, x): + x = self.encoder(x) + x = self.rescaler(x) + return x + + +class MergedRescaleDecoder(nn.Module): + def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), + dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): + super().__init__() + tmp_chn = z_channels*ch_mult[-1] + self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, + resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, + ch_mult=ch_mult, resolution=resolution, ch=ch) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, + out_channels=tmp_chn, depth=rescale_module_depth) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Upsampler(nn.Module): + def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): + super().__init__() + assert out_size >= in_size + num_blocks = int(np.log2(out_size//in_size))+1 + factor_up = 1.+ (out_size % in_size) + print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") + self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, + out_channels=in_channels) + self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, + attn_resolutions=[], in_channels=None, ch=in_channels, + ch_mult=[ch_mult for _ in range(num_blocks)]) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Resize(nn.Module): + def __init__(self, in_channels=None, learned=False, mode="bilinear"): + super().__init__() + self.with_conv = learned + self.mode = mode + if self.with_conv: + print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") + raise NotImplementedError() + assert in_channels is not None + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=4, + stride=2, + padding=1) + + def forward(self, x, scale_factor=1.0): + if scale_factor==1.0: + return x + else: + x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) + return x + +class FirstStagePostProcessor(nn.Module): + + def __init__(self, ch_mult:list, in_channels, + pretrained_model:nn.Module=None, + reshape=False, + n_channels=None, + dropout=0., + pretrained_config=None): + super().__init__() + if pretrained_config is None: + assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + self.pretrained_model = pretrained_model + else: + assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + self.instantiate_pretrained(pretrained_config) + + self.do_reshape = reshape + + if n_channels is None: + n_channels = self.pretrained_model.encoder.ch + + self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) + self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, + stride=1,padding=1) + + blocks = [] + downs = [] + ch_in = n_channels + for m in ch_mult: + blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) + ch_in = m * n_channels + downs.append(Downsample(ch_in, with_conv=False)) + + self.model = nn.ModuleList(blocks) + self.downsampler = nn.ModuleList(downs) + + + def instantiate_pretrained(self, config): + model = instantiate_from_config(config) + self.pretrained_model = model.eval() + # self.pretrained_model.train = False + for param in self.pretrained_model.parameters(): + param.requires_grad = False + + + @torch.no_grad() + def encode_with_pretrained(self,x): + c = self.pretrained_model.encode(x) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + return c + + def forward(self,x): + z_fs = self.encode_with_pretrained(x) + z = self.proj_norm(z_fs) + z = self.proj(z) + z = nonlinearity(z) + + for submodel, downmodel in zip(self.model,self.downsampler): + z = submodel(z,temb=None) + z = downmodel(z) + + if self.do_reshape: + z = rearrange(z,'b c h w -> b (h w) c') + return z + diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/diffusionmodules/openaimodel.py b/stable-dreamfusion-3DPortrait/ldm/modules/diffusionmodules/openaimodel.py new file mode 100644 index 0000000..09f0ae1 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/diffusionmodules/openaimodel.py @@ -0,0 +1,996 @@ +from abc import abstractmethod +from functools import partial +import math +from typing import Iterable + +import numpy as np +import torch as th +import torch.nn as nn +import torch.nn.functional as F + +from ldm.modules.diffusionmodules.util import ( + checkpoint, + conv_nd, + linear, + avg_pool_nd, + zero_module, + normalization, + timestep_embedding, +) +from ldm.modules.attention import SpatialTransformer +from ldm.util import exists + + +# dummy replace +def convert_module_to_f16(x): + pass + +def convert_module_to_f32(x): + pass + + +## go +class AttentionPool2d(nn.Module): + """ + Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py + """ + + def __init__( + self, + spacial_dim: int, + embed_dim: int, + num_heads_channels: int, + output_dim: int = None, + ): + super().__init__() + self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) + self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) + self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) + self.num_heads = embed_dim // num_heads_channels + self.attention = QKVAttention(self.num_heads) + + def forward(self, x): + b, c, *_spatial = x.shape + x = x.reshape(b, c, -1) # NC(HW) + x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) + x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) + x = self.qkv_proj(x) + x = self.attention(x) + x = self.c_proj(x) + return x[:, :, 0] + + +class TimestepBlock(nn.Module): + """ + Any module where forward() takes timestep embeddings as a second argument. + """ + + @abstractmethod + def forward(self, x, emb): + """ + Apply the module to `x` given `emb` timestep embeddings. + """ + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward(self, x, emb, context=None): + for layer in self: + if isinstance(layer, TimestepBlock): + x = layer(x, emb) + elif isinstance(layer, SpatialTransformer): + x = layer(x, context) + else: + x = layer(x) + return x + + +class Upsample(nn.Module): + """ + An upsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + if use_conv: + self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.dims == 3: + x = F.interpolate( + x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" + ) + else: + x = F.interpolate(x, scale_factor=2, mode="nearest") + if self.use_conv: + x = self.conv(x) + return x + +class TransposedUpsample(nn.Module): + 'Learned 2x upsampling without padding' + def __init__(self, channels, out_channels=None, ks=5): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + + self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) + + def forward(self,x): + return self.up(x) + + +class Downsample(nn.Module): + """ + A downsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else (1, 2, 2) + if use_conv: + self.op = conv_nd( + dims, self.channels, self.out_channels, 3, stride=stride, padding=padding + ) + else: + assert self.channels == self.out_channels + self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.op(x) + + +class ResBlock(TimestepBlock): + """ + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + + self.in_layers = nn.Sequential( + normalization(channels), + nn.SiLU(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + ), + ) + self.out_layers = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, 3, padding=1 + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + return checkpoint( + self._forward, (x, emb), self.parameters(), self.use_checkpoint + ) + + + def _forward(self, x, emb): + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + h = out_norm(h) * (1 + scale) + shift + h = out_rest(h) + else: + h = h + emb_out + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class AttentionBlock(nn.Module): + """ + An attention block that allows spatial positions to attend to each other. + Originally ported from here, but adapted to the N-d case. + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. + """ + + def __init__( + self, + channels, + num_heads=1, + num_head_channels=-1, + use_checkpoint=False, + use_new_attention_order=False, + ): + super().__init__() + self.channels = channels + if num_head_channels == -1: + self.num_heads = num_heads + else: + assert ( + channels % num_head_channels == 0 + ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" + self.num_heads = channels // num_head_channels + self.use_checkpoint = use_checkpoint + self.norm = normalization(channels) + self.qkv = conv_nd(1, channels, channels * 3, 1) + if use_new_attention_order: + # split qkv before split heads + self.attention = QKVAttention(self.num_heads) + else: + # split heads before split qkv + self.attention = QKVAttentionLegacy(self.num_heads) + + self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) + + def forward(self, x): + return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! + #return pt_checkpoint(self._forward, x) # pytorch + + def _forward(self, x): + b, c, *spatial = x.shape + x = x.reshape(b, c, -1) + qkv = self.qkv(self.norm(x)) + h = self.attention(qkv) + h = self.proj_out(h) + return (x + h).reshape(b, c, *spatial) + + +def count_flops_attn(model, _x, y): + """ + A counter for the `thop` package to count the operations in an + attention operation. + Meant to be used like: + macs, params = thop.profile( + model, + inputs=(inputs, timestamps), + custom_ops={QKVAttention: QKVAttention.count_flops}, + ) + """ + b, c, *spatial = y[0].shape + num_spatial = int(np.prod(spatial)) + # We perform two matmuls with the same number of ops. + # The first computes the weight matrix, the second computes + # the combination of the value vectors. + matmul_ops = 2 * b * (num_spatial ** 2) * c + model.total_ops += th.DoubleTensor([matmul_ops]) + + +class QKVAttentionLegacy(nn.Module): + """ + A module which performs QKV attention. Matches legacy QKVAttention + input/output heads shaping + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", q * scale, k * scale + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class QKVAttention(nn.Module): + """ + A module which performs QKV attention and splits in a different order. + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.chunk(3, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", + (q * scale).view(bs * self.n_heads, ch, length), + (k * scale).view(bs * self.n_heads, ch, length), + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class UNetModel(nn.Module): + """ + The full UNet model with attention and timestep embedding. + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param num_res_blocks: number of residual blocks per downsample. + :param attention_resolutions: a collection of downsample rates at which + attention will take place. May be a set, list, or tuple. + For example, if this contains 4, then at 4x downsampling, attention + will be used. + :param dropout: the dropout probability. + :param channel_mult: channel multiplier for each level of the UNet. + :param conv_resample: if True, use learned convolutions for upsampling and + downsampling. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param num_classes: if specified (as an int), then this model will be + class-conditional with `num_classes` classes. + :param use_checkpoint: use gradient checkpointing to reduce memory usage. + :param num_heads: the number of attention heads in each attention layer. + :param num_heads_channels: if specified, ignore num_heads and instead use + a fixed channel width per attention head. + :param num_heads_upsample: works with num_heads to set a different number + of heads for upsampling. Deprecated. + :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. + :param resblock_updown: use residual blocks for up/downsampling. + :param use_new_attention_order: use a different attention pattern for potentially + increased efficiency. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + num_classes=None, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + use_spatial_transformer=False, # custom transformer support + transformer_depth=1, # custom transformer support + context_dim=None, # custom transformer support + n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model + legacy=True, + disable_self_attentions=None, + num_attention_blocks=None + ): + super().__init__() + if use_spatial_transformer: + assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' + + if context_dim is not None: + assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' + from omegaconf.listconfig import ListConfig + if type(context_dim) == ListConfig: + context_dim = list(context_dim) + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' + + if num_head_channels == -1: + assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' + + self.image_size = image_size + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + if isinstance(num_res_blocks, int): + self.num_res_blocks = len(channel_mult) * [num_res_blocks] + else: + if len(num_res_blocks) != len(channel_mult): + raise ValueError("provide num_res_blocks either as an int (globally constant) or " + "as a list/tuple (per-level) with the same length as channel_mult") + self.num_res_blocks = num_res_blocks + #self.num_res_blocks = num_res_blocks + if disable_self_attentions is not None: + # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not + assert len(disable_self_attentions) == len(channel_mult) + if num_attention_blocks is not None: + assert len(num_attention_blocks) == len(self.num_res_blocks) + assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) + print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " + f"This option has LESS priority than attention_resolutions {attention_resolutions}, " + f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " + f"attention will still not be set.") # todo: convert to warning + + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.num_classes = num_classes + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.predict_codebook_ids = n_embed is not None + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + if self.num_classes is not None: + self.label_emb = nn.Embedding(num_classes, time_embed_dim) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for nr in range(self.num_res_blocks[level]): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disabled_sa + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + + self.output_blocks = nn.ModuleList([]) + for level, mult in list(enumerate(channel_mult))[::-1]: + for i in range(self.num_res_blocks[level] + 1): + ich = input_block_chans.pop() + layers = [ + ResBlock( + ch + ich, + time_embed_dim, + dropout, + out_channels=model_channels * mult, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = model_channels * mult + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if not exists(num_attention_blocks) or i < num_attention_blocks[level]: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads_upsample, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disabled_sa + ) + ) + if level and i == self.num_res_blocks[level]: + out_ch = ch + layers.append( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + up=True, + ) + if resblock_updown + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) + ) + ds //= 2 + self.output_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), + ) + if self.predict_codebook_ids: + self.id_predictor = nn.Sequential( + normalization(ch), + conv_nd(dims, model_channels, n_embed, 1), + #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits + ) + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + self.output_blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + self.output_blocks.apply(convert_module_to_f32) + + def forward(self, x, timesteps=None, context=None, y=None,**kwargs): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param context: conditioning plugged in via crossattn + :param y: an [N] Tensor of labels, if class-conditional. + :return: an [N x C x ...] Tensor of outputs. + """ + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape == (x.shape[0],) + emb = emb + self.label_emb(y) + + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb, context) + h = h.type(x.dtype) + if self.predict_codebook_ids: + return self.id_predictor(h) + else: + return self.out(h) + + +class EncoderUNetModel(nn.Module): + """ + The half UNet model with attention and timestep embedding. + For usage, see UNet. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + use_checkpoint=False, + use_fp16=False, + num_heads=1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + pool="adaptive", + *args, + **kwargs + ): + super().__init__() + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + self.pool = pool + if pool == "adaptive": + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + nn.AdaptiveAvgPool2d((1, 1)), + zero_module(conv_nd(dims, ch, out_channels, 1)), + nn.Flatten(), + ) + elif pool == "attention": + assert num_head_channels != -1 + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + AttentionPool2d( + (image_size // ds), ch, num_head_channels, out_channels + ), + ) + elif pool == "spatial": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + nn.ReLU(), + nn.Linear(2048, self.out_channels), + ) + elif pool == "spatial_v2": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + normalization(2048), + nn.SiLU(), + nn.Linear(2048, self.out_channels), + ) + else: + raise NotImplementedError(f"Unexpected {pool} pooling") + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + + def forward(self, x, timesteps): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :return: an [N x K] Tensor of outputs. + """ + emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) + + results = [] + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = self.middle_block(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = th.cat(results, axis=-1) + return self.out(h) + else: + h = h.type(x.dtype) + return self.out(h) + diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/diffusionmodules/util.py b/stable-dreamfusion-3DPortrait/ldm/modules/diffusionmodules/util.py new file mode 100644 index 0000000..a952e6c --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/diffusionmodules/util.py @@ -0,0 +1,267 @@ +# adopted from +# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# and +# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +# and +# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py +# +# thanks! + + +import os +import math +import torch +import torch.nn as nn +import numpy as np +from einops import repeat + +from ldm.util import instantiate_from_config + + +def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if schedule == "linear": + betas = ( + torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 + ) + + elif schedule == "cosine": + timesteps = ( + torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s + ) + alphas = timesteps / (1 + cosine_s) * np.pi / 2 + alphas = torch.cos(alphas).pow(2) + alphas = alphas / alphas[0] + betas = 1 - alphas[1:] / alphas[:-1] + betas = np.clip(betas, a_min=0, a_max=0.999) + + elif schedule == "sqrt_linear": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) + elif schedule == "sqrt": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 + else: + raise ValueError(f"schedule '{schedule}' unknown.") + return betas.numpy() + + +def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): + if ddim_discr_method == 'uniform': + c = num_ddpm_timesteps // num_ddim_timesteps + ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) + elif ddim_discr_method == 'quad': + ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) + else: + raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') + + # assert ddim_timesteps.shape[0] == num_ddim_timesteps + # add one to get the final alpha values right (the ones from first scale to data during sampling) + steps_out = ddim_timesteps + 1 + if verbose: + print(f'Selected timesteps for ddim sampler: {steps_out}') + return steps_out + + +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): + # select alphas for computing the variance schedule + alphas = alphacums[ddim_timesteps] + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) + + # according the the formula provided in https://arxiv.org/abs/2010.02502 + sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) + if verbose: + print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + print(f'For the chosen value of eta, which is {eta}, ' + f'this results in the following sigma_t schedule for ddim sampler {sigmas}') + return sigmas, alphas, alphas_prev + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +def extract_into_tensor(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def checkpoint(func, inputs, params, flag): + """ + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """ + if flag: + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_tensors = list(args[:length]) + ctx.input_params = list(args[length:]) + + with torch.no_grad(): + output_tensors = ctx.run_function(*ctx.input_tensors) + return output_tensors + + @staticmethod + def backward(ctx, *output_grads): + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] + with torch.enable_grad(): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + input_grads = torch.autograd.grad( + output_tensors, + ctx.input_tensors + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return (None, None) + input_grads + + +def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): + """ + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + if not repeat_only: + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + else: + embedding = repeat(timesteps, 'b -> b d', d=dim) + return embedding + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """ + Scale the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def normalization(channels): + """ + Make a standard normalization layer. + :param channels: number of input channels. + :return: an nn.Module for normalization. + """ + return GroupNorm32(32, channels) + + +# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. +class SiLU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + + +class GroupNorm32(nn.GroupNorm): + def forward(self, x): + return super().forward(x.float()).type(x.dtype) + +def conv_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D convolution module. + """ + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def linear(*args, **kwargs): + """ + Create a linear module. + """ + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D average pooling module. + """ + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +class HybridConditioner(nn.Module): + + def __init__(self, c_concat_config, c_crossattn_config): + super().__init__() + self.concat_conditioner = instantiate_from_config(c_concat_config) + self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) + + def forward(self, c_concat, c_crossattn): + c_concat = self.concat_conditioner(c_concat) + c_crossattn = self.crossattn_conditioner(c_crossattn) + return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/distributions/__init__.py b/stable-dreamfusion-3DPortrait/ldm/modules/distributions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/distributions/distributions.py b/stable-dreamfusion-3DPortrait/ldm/modules/distributions/distributions.py new file mode 100644 index 0000000..f2b8ef9 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/distributions/distributions.py @@ -0,0 +1,92 @@ +import torch +import numpy as np + + +class AbstractDistribution: + def sample(self): + raise NotImplementedError() + + def mode(self): + raise NotImplementedError() + + +class DiracDistribution(AbstractDistribution): + def __init__(self, value): + self.value = value + + def sample(self): + return self.value + + def mode(self): + return self.value + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self): + x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + return 0.5 * torch.sum(torch.pow(self.mean, 2) + + self.var - 1.0 - self.logvar, + dim=[1, 2, 3]) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar, + dim=[1, 2, 3]) + + def nll(self, sample, dims=[1,2,3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/ema.py b/stable-dreamfusion-3DPortrait/ldm/modules/ema.py new file mode 100644 index 0000000..c8c75af --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/ema.py @@ -0,0 +1,76 @@ +import torch +from torch import nn + + +class LitEma(nn.Module): + def __init__(self, model, decay=0.9999, use_num_upates=True): + super().__init__() + if decay < 0.0 or decay > 1.0: + raise ValueError('Decay must be between 0 and 1') + + self.m_name2s_name = {} + self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) + self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates + else torch.tensor(-1,dtype=torch.int)) + + for name, p in model.named_parameters(): + if p.requires_grad: + #remove as '.'-character is not allowed in buffers + s_name = name.replace('.','') + self.m_name2s_name.update({name:s_name}) + self.register_buffer(s_name,p.clone().detach().data) + + self.collected_params = [] + + def forward(self,model): + decay = self.decay + + if self.num_updates >= 0: + self.num_updates += 1 + decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates)) + + one_minus_decay = 1.0 - decay + + with torch.no_grad(): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + + for key in m_param: + if m_param[key].requires_grad: + sname = self.m_name2s_name[key] + shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) + shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) + else: + assert not key in self.m_name2s_name + + def copy_to(self, model): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + for key in m_param: + if m_param[key].requires_grad: + m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) + else: + assert not key in self.m_name2s_name + + def store(self, parameters): + """ + Save the current parameters for restoring later. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. + """ + self.collected_params = [param.clone() for param in parameters] + + def restore(self, parameters): + """ + Restore the parameters stored with the `store` method. + Useful to validate the model with EMA parameters without affecting the + original optimization process. Store the parameters before the + `copy_to` method. After validation (or model saving), use this to + restore the former parameters. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. + """ + for c_param, param in zip(self.collected_params, parameters): + param.data.copy_(c_param.data) diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/encoders/__init__.py b/stable-dreamfusion-3DPortrait/ldm/modules/encoders/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/encoders/modules.py b/stable-dreamfusion-3DPortrait/ldm/modules/encoders/modules.py new file mode 100644 index 0000000..b1afccf --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/encoders/modules.py @@ -0,0 +1,550 @@ +import torch +import torch.nn as nn +import numpy as np +from functools import partial +import kornia + +from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test +from ldm.util import default +import clip + + +class AbstractEncoder(nn.Module): + def __init__(self): + super().__init__() + + def encode(self, *args, **kwargs): + raise NotImplementedError + +class IdentityEncoder(AbstractEncoder): + + def encode(self, x): + return x + +class FaceClipEncoder(AbstractEncoder): + def __init__(self, augment=True, retreival_key=None): + super().__init__() + self.encoder = FrozenCLIPImageEmbedder() + self.augment = augment + self.retreival_key = retreival_key + + def forward(self, img): + encodings = [] + with torch.no_grad(): + x_offset = 125 + if self.retreival_key: + # Assumes retrieved image are packed into the second half of channels + face = img[:,3:,190:440,x_offset:(512-x_offset)] + other = img[:,:3,...].clone() + else: + face = img[:,:,190:440,x_offset:(512-x_offset)] + other = img.clone() + + if self.augment: + face = K.RandomHorizontalFlip()(face) + + other[:,:,190:440,x_offset:(512-x_offset)] *= 0 + encodings = [ + self.encoder.encode(face), + self.encoder.encode(other), + ] + + return torch.cat(encodings, dim=1) + + def encode(self, img): + if isinstance(img, list): + # Uncondition + return torch.zeros((1, 2, 768), device=self.encoder.model.visual.conv1.weight.device) + + return self(img) + +class FaceIdClipEncoder(AbstractEncoder): + def __init__(self): + super().__init__() + self.encoder = FrozenCLIPImageEmbedder() + for p in self.encoder.parameters(): + p.requires_grad = False + self.id = FrozenFaceEncoder("/home/jpinkney/code/stable-diffusion/model_ir_se50.pth", augment=True) + + def forward(self, img): + encodings = [] + with torch.no_grad(): + face = kornia.geometry.resize(img, (256, 256), + interpolation='bilinear', align_corners=True) + + other = img.clone() + other[:,:,184:452,122:396] *= 0 + encodings = [ + self.id.encode(face), + self.encoder.encode(other), + ] + + return torch.cat(encodings, dim=1) + + def encode(self, img): + if isinstance(img, list): + # Uncondition + return torch.zeros((1, 2, 768), device=self.encoder.model.visual.conv1.weight.device) + + return self(img) + +class ClassEmbedder(nn.Module): + def __init__(self, embed_dim, n_classes=1000, key='class'): + super().__init__() + self.key = key + self.embedding = nn.Embedding(n_classes, embed_dim) + + def forward(self, batch, key=None): + if key is None: + key = self.key + # this is for use in crossattn + c = batch[key][:, None] + c = self.embedding(c) + return c + + +class TransformerEmbedder(AbstractEncoder): + """Some transformer encoder layers""" + def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): + super().__init__() + self.device = device + self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, + attn_layers=Encoder(dim=n_embed, depth=n_layer)) + + def forward(self, tokens): + tokens = tokens.to(self.device) # meh + z = self.transformer(tokens, return_embeddings=True) + return z + + def encode(self, x): + return self(x) + + +class BERTTokenizer(AbstractEncoder): + """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" + def __init__(self, device="cuda", vq_interface=True, max_length=77): + super().__init__() + from transformers import BertTokenizerFast # TODO: add to reuquirements + self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") + self.device = device + self.vq_interface = vq_interface + self.max_length = max_length + + def forward(self, text): + batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"].to(self.device) + return tokens + + @torch.no_grad() + def encode(self, text): + tokens = self(text) + if not self.vq_interface: + return tokens + return None, None, [None, None, tokens] + + def decode(self, text): + return text + + +class BERTEmbedder(AbstractEncoder): + """Uses the BERT tokenizr model and add some transformer encoder layers""" + def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77, + device="cuda",use_tokenizer=True, embedding_dropout=0.0): + super().__init__() + self.use_tknz_fn = use_tokenizer + if self.use_tknz_fn: + self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) + self.device = device + self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, + attn_layers=Encoder(dim=n_embed, depth=n_layer), + emb_dropout=embedding_dropout) + + def forward(self, text): + if self.use_tknz_fn: + tokens = self.tknz_fn(text)#.to(self.device) + else: + tokens = text + z = self.transformer(tokens, return_embeddings=True) + return z + + def encode(self, text): + # output of length 77 + return self(text) + + +from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class FrozenT5Embedder(AbstractEncoder): + """Uses the T5 transformer encoder for text""" + def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl + super().__init__() + self.tokenizer = T5Tokenizer.from_pretrained(version) + self.transformer = T5EncoderModel.from_pretrained(version) + self.device = device + self.max_length = max_length # TODO: typical value? + self.freeze() + + def freeze(self): + self.transformer = self.transformer.eval() + #self.train = disabled_train + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer(input_ids=tokens) + + z = outputs.last_hidden_state + return z + + def encode(self, text): + return self(text) + +from ldm.thirdp.psp.id_loss import IDFeatures +import kornia.augmentation as K + +class FrozenFaceEncoder(AbstractEncoder): + def __init__(self, model_path, augment=False): + super().__init__() + self.loss_fn = IDFeatures(model_path) + # face encoder is frozen + for p in self.loss_fn.parameters(): + p.requires_grad = False + # Mapper is trainable + self.mapper = torch.nn.Linear(512, 768) + p = 0.25 + if augment: + self.augment = K.AugmentationSequential( + K.RandomHorizontalFlip(p=0.5), + K.RandomEqualize(p=p), + # K.RandomPlanckianJitter(p=p), + # K.RandomPlasmaBrightness(p=p), + # K.RandomPlasmaContrast(p=p), + # K.ColorJiggle(0.02, 0.2, 0.2, p=p), + ) + else: + self.augment = False + + def forward(self, img): + if isinstance(img, list): + # Uncondition + return torch.zeros((1, 1, 768), device=self.mapper.weight.device) + + if self.augment is not None: + # Transforms require 0-1 + img = self.augment((img + 1)/2) + img = 2*img - 1 + + feat = self.loss_fn(img, crop=True) + feat = self.mapper(feat.unsqueeze(1)) + return feat + + def encode(self, img): + return self(img) + +class FrozenCLIPEmbedder(AbstractEncoder): + """Uses the CLIP transformer encoder for text (from huggingface)""" + def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): # clip-vit-base-patch32 + super().__init__() + self.tokenizer = CLIPTokenizer.from_pretrained(version) + self.transformer = CLIPTextModel.from_pretrained(version) + self.device = device + self.max_length = max_length # TODO: typical value? + self.freeze() + + def freeze(self): + self.transformer = self.transformer.eval() + #self.train = disabled_train + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer(input_ids=tokens) + + z = outputs.last_hidden_state + return z + + def encode(self, text): + return self(text) + +import torch.nn.functional as F +from transformers import CLIPVisionModel +class ClipImageProjector(AbstractEncoder): + """ + Uses the CLIP image encoder. + """ + def __init__(self, version="openai/clip-vit-large-patch14", max_length=77): # clip-vit-base-patch32 + super().__init__() + self.model = CLIPVisionModel.from_pretrained(version) + self.model.train() + self.max_length = max_length # TODO: typical value? + self.antialias = True + self.mapper = torch.nn.Linear(1024, 768) + self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) + self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) + null_cond = self.get_null_cond(version, max_length) + self.register_buffer('null_cond', null_cond) + + @torch.no_grad() + def get_null_cond(self, version, max_length): + device = self.mean.device + embedder = FrozenCLIPEmbedder(version=version, device=device, max_length=max_length) + null_cond = embedder([""]) + return null_cond + + def preprocess(self, x): + # Expects inputs in the range -1, 1 + x = kornia.geometry.resize(x, (224, 224), + interpolation='bicubic',align_corners=True, + antialias=self.antialias) + x = (x + 1.) / 2. + # renormalize according to clip + x = kornia.enhance.normalize(x, self.mean, self.std) + return x + + def forward(self, x): + if isinstance(x, list): + return self.null_cond + # x is assumed to be in range [-1,1] + x = self.preprocess(x) + outputs = self.model(pixel_values=x) + last_hidden_state = outputs.last_hidden_state + last_hidden_state = self.mapper(last_hidden_state) + return F.pad(last_hidden_state, [0,0, 0,self.max_length-last_hidden_state.shape[1], 0,0]) + + def encode(self, im): + return self(im) + +class ProjectedFrozenCLIPEmbedder(AbstractEncoder): + def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): # clip-vit-base-patch32 + super().__init__() + self.embedder = FrozenCLIPEmbedder(version=version, device=device, max_length=max_length) + self.projection = torch.nn.Linear(768, 768) + + def forward(self, text): + z = self.embedder(text) + return self.projection(z) + + def encode(self, text): + return self(text) + +class FrozenCLIPImageEmbedder(AbstractEncoder): + """ + Uses the CLIP image encoder. + Not actually frozen... If you want that set cond_stage_trainable=False in cfg + """ + def __init__( + self, + model='ViT-L/14', + jit=False, + device='cpu', + antialias=False, + ): + super().__init__() + self.model, _ = clip.load(name=model, device=device, jit=jit) + # We don't use the text part so delete it + del self.model.transformer + self.antialias = antialias + self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) + self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) + + def preprocess(self, x): + # Expects inputs in the range -1, 1 + x = kornia.geometry.resize(x, (224, 224), + interpolation='bicubic',align_corners=True, + antialias=self.antialias) + x = (x + 1.) / 2. + # renormalize according to clip + x = kornia.enhance.normalize(x, self.mean, self.std) + return x + + def forward(self, x): + # x is assumed to be in range [-1,1] + if isinstance(x, list): + # [""] denotes condition dropout for ucg + device = self.model.visual.conv1.weight.device + return torch.zeros(1, 768, device=device) + return self.model.encode_image(self.preprocess(x)).float() + + def encode(self, im): + return self(im).unsqueeze(1) + +from torchvision import transforms +import random + +class FrozenCLIPImageMutliEmbedder(AbstractEncoder): + """ + Uses the CLIP image encoder. + Not actually frozen... If you want that set cond_stage_trainable=False in cfg + """ + def __init__( + self, + model='ViT-L/14', + jit=False, + device='cpu', + antialias=True, + max_crops=5, + ): + super().__init__() + self.model, _ = clip.load(name=model, device=device, jit=jit) + # We don't use the text part so delete it + del self.model.transformer + self.antialias = antialias + self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) + self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) + self.max_crops = max_crops + + def preprocess(self, x): + + # Expects inputs in the range -1, 1 + randcrop = transforms.RandomResizedCrop(224, scale=(0.085, 1.0), ratio=(1,1)) + max_crops = self.max_crops + patches = [] + crops = [randcrop(x) for _ in range(max_crops)] + patches.extend(crops) + x = torch.cat(patches, dim=0) + x = (x + 1.) / 2. + # renormalize according to clip + x = kornia.enhance.normalize(x, self.mean, self.std) + return x + + def forward(self, x): + # x is assumed to be in range [-1,1] + if isinstance(x, list): + # [""] denotes condition dropout for ucg + device = self.model.visual.conv1.weight.device + return torch.zeros(1, self.max_crops, 768, device=device) + batch_tokens = [] + for im in x: + patches = self.preprocess(im.unsqueeze(0)) + tokens = self.model.encode_image(patches).float() + for t in tokens: + if random.random() < 0.1: + t *= 0 + batch_tokens.append(tokens.unsqueeze(0)) + + return torch.cat(batch_tokens, dim=0) + + def encode(self, im): + return self(im) + +class SpatialRescaler(nn.Module): + def __init__(self, + n_stages=1, + method='bilinear', + multiplier=0.5, + in_channels=3, + out_channels=None, + bias=False): + super().__init__() + self.n_stages = n_stages + assert self.n_stages >= 0 + assert method in ['nearest','linear','bilinear','trilinear','bicubic','area'] + self.multiplier = multiplier + self.interpolator = partial(torch.nn.functional.interpolate, mode=method) + self.remap_output = out_channels is not None + if self.remap_output: + print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.') + self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias) + + def forward(self,x): + for stage in range(self.n_stages): + x = self.interpolator(x, scale_factor=self.multiplier) + + + if self.remap_output: + x = self.channel_mapper(x) + return x + + def encode(self, x): + return self(x) + + +from ldm.util import instantiate_from_config +from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like + + +class LowScaleEncoder(nn.Module): + def __init__(self, model_config, linear_start, linear_end, timesteps=1000, max_noise_level=250, output_size=64, + scale_factor=1.0): + super().__init__() + self.max_noise_level = max_noise_level + self.model = instantiate_from_config(model_config) + self.augmentation_schedule = self.register_schedule(timesteps=timesteps, linear_start=linear_start, + linear_end=linear_end) + self.out_size = output_size + self.scale_factor = scale_factor + + def register_schedule(self, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) + + def forward(self, x): + z = self.model.encode(x).sample() + z = z * self.scale_factor + noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long() + z = self.q_sample(z, noise_level) + if self.out_size is not None: + z = torch.nn.functional.interpolate(z, size=self.out_size, mode="nearest") # TODO: experiment with mode + # z = z.repeat_interleave(2, -2).repeat_interleave(2, -1) + return z, noise_level + + def decode(self, z): + z = z / self.scale_factor + return self.model.decode(z) + + +if __name__ == "__main__": + from ldm.util import count_params + sentences = ["a hedgehog drinking a whiskey", "der mond ist aufgegangen", "Ein Satz mit vielen Sonderzeichen: äöü ß ?! : 'xx-y/@s'"] + model = FrozenT5Embedder(version="google/t5-v1_1-xl").cuda() + count_params(model, True) + z = model(sentences) + print(z.shape) + + model = FrozenCLIPEmbedder().cuda() + count_params(model, True) + z = model(sentences) + print(z.shape) + + print("done.") diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/adm_evaluator.py b/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/adm_evaluator.py new file mode 100644 index 0000000..508cddf --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/adm_evaluator.py @@ -0,0 +1,676 @@ +import argparse +import io +import os +import random +import warnings +import zipfile +from abc import ABC, abstractmethod +from contextlib import contextmanager +from functools import partial +from multiprocessing import cpu_count +from multiprocessing.pool import ThreadPool +from typing import Iterable, Optional, Tuple +import yaml + +import numpy as np +import requests +import tensorflow.compat.v1 as tf +from scipy import linalg +from tqdm.auto import tqdm + +INCEPTION_V3_URL = "https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/classify_image_graph_def.pb" +INCEPTION_V3_PATH = "classify_image_graph_def.pb" + +FID_POOL_NAME = "pool_3:0" +FID_SPATIAL_NAME = "mixed_6/conv:0" + +REQUIREMENTS = f"This script has the following requirements: \n" \ + 'tensorflow-gpu>=2.0' + "\n" + 'scipy' + "\n" + "requests" + "\n" + "tqdm" + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--ref_batch", help="path to reference batch npz file") + parser.add_argument("--sample_batch", help="path to sample batch npz file") + args = parser.parse_args() + + config = tf.ConfigProto( + allow_soft_placement=True # allows DecodeJpeg to run on CPU in Inception graph + ) + config.gpu_options.allow_growth = True + evaluator = Evaluator(tf.Session(config=config)) + + print("warming up TensorFlow...") + # This will cause TF to print a bunch of verbose stuff now rather + # than after the next print(), to help prevent confusion. + evaluator.warmup() + + print("computing reference batch activations...") + ref_acts = evaluator.read_activations(args.ref_batch) + print("computing/reading reference batch statistics...") + ref_stats, ref_stats_spatial = evaluator.read_statistics(args.ref_batch, ref_acts) + + print("computing sample batch activations...") + sample_acts = evaluator.read_activations(args.sample_batch) + print("computing/reading sample batch statistics...") + sample_stats, sample_stats_spatial = evaluator.read_statistics(args.sample_batch, sample_acts) + + print("Computing evaluations...") + is_ = evaluator.compute_inception_score(sample_acts[0]) + print("Inception Score:", is_) + fid = sample_stats.frechet_distance(ref_stats) + print("FID:", fid) + sfid = sample_stats_spatial.frechet_distance(ref_stats_spatial) + print("sFID:", sfid) + prec, recall = evaluator.compute_prec_recall(ref_acts[0], sample_acts[0]) + print("Precision:", prec) + print("Recall:", recall) + + savepath = '/'.join(args.sample_batch.split('/')[:-1]) + results_file = os.path.join(savepath,'evaluation_metrics.yaml') + print(f'Saving evaluation results to "{results_file}"') + + results = { + 'IS': is_, + 'FID': fid, + 'sFID': sfid, + 'Precision:':prec, + 'Recall': recall + } + + with open(results_file, 'w') as f: + yaml.dump(results, f, default_flow_style=False) + +class InvalidFIDException(Exception): + pass + + +class FIDStatistics: + def __init__(self, mu: np.ndarray, sigma: np.ndarray): + self.mu = mu + self.sigma = sigma + + def frechet_distance(self, other, eps=1e-6): + """ + Compute the Frechet distance between two sets of statistics. + """ + # https://github.com/bioinf-jku/TTUR/blob/73ab375cdf952a12686d9aa7978567771084da42/fid.py#L132 + mu1, sigma1 = self.mu, self.sigma + mu2, sigma2 = other.mu, other.sigma + + mu1 = np.atleast_1d(mu1) + mu2 = np.atleast_1d(mu2) + + sigma1 = np.atleast_2d(sigma1) + sigma2 = np.atleast_2d(sigma2) + + assert ( + mu1.shape == mu2.shape + ), f"Training and test mean vectors have different lengths: {mu1.shape}, {mu2.shape}" + assert ( + sigma1.shape == sigma2.shape + ), f"Training and test covariances have different dimensions: {sigma1.shape}, {sigma2.shape}" + + diff = mu1 - mu2 + + # product might be almost singular + covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) + if not np.isfinite(covmean).all(): + msg = ( + "fid calculation produces singular product; adding %s to diagonal of cov estimates" + % eps + ) + warnings.warn(msg) + offset = np.eye(sigma1.shape[0]) * eps + covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) + + # numerical error might give slight imaginary component + if np.iscomplexobj(covmean): + if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): + m = np.max(np.abs(covmean.imag)) + raise ValueError("Imaginary component {}".format(m)) + covmean = covmean.real + + tr_covmean = np.trace(covmean) + + return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean + + +class Evaluator: + def __init__( + self, + session, + batch_size=64, + softmax_batch_size=512, + ): + self.sess = session + self.batch_size = batch_size + self.softmax_batch_size = softmax_batch_size + self.manifold_estimator = ManifoldEstimator(session) + with self.sess.graph.as_default(): + self.image_input = tf.placeholder(tf.float32, shape=[None, None, None, 3]) + self.softmax_input = tf.placeholder(tf.float32, shape=[None, 2048]) + self.pool_features, self.spatial_features = _create_feature_graph(self.image_input) + self.softmax = _create_softmax_graph(self.softmax_input) + + def warmup(self): + self.compute_activations(np.zeros([1, 8, 64, 64, 3])) + + def read_activations(self, npz_path: str) -> Tuple[np.ndarray, np.ndarray]: + with open_npz_array(npz_path, "arr_0") as reader: + return self.compute_activations(reader.read_batches(self.batch_size)) + + def compute_activations(self, batches: Iterable[np.ndarray],silent=False) -> Tuple[np.ndarray, np.ndarray]: + """ + Compute image features for downstream evals. + + :param batches: a iterator over NHWC numpy arrays in [0, 255]. + :return: a tuple of numpy arrays of shape [N x X], where X is a feature + dimension. The tuple is (pool_3, spatial). + """ + preds = [] + spatial_preds = [] + it = batches if silent else tqdm(batches) + for batch in it: + batch = batch.astype(np.float32) + pred, spatial_pred = self.sess.run( + [self.pool_features, self.spatial_features], {self.image_input: batch} + ) + preds.append(pred.reshape([pred.shape[0], -1])) + spatial_preds.append(spatial_pred.reshape([spatial_pred.shape[0], -1])) + return ( + np.concatenate(preds, axis=0), + np.concatenate(spatial_preds, axis=0), + ) + + def read_statistics( + self, npz_path: str, activations: Tuple[np.ndarray, np.ndarray] + ) -> Tuple[FIDStatistics, FIDStatistics]: + obj = np.load(npz_path) + if "mu" in list(obj.keys()): + return FIDStatistics(obj["mu"], obj["sigma"]), FIDStatistics( + obj["mu_s"], obj["sigma_s"] + ) + return tuple(self.compute_statistics(x) for x in activations) + + def compute_statistics(self, activations: np.ndarray) -> FIDStatistics: + mu = np.mean(activations, axis=0) + sigma = np.cov(activations, rowvar=False) + return FIDStatistics(mu, sigma) + + def compute_inception_score(self, activations: np.ndarray, split_size: int = 5000) -> float: + softmax_out = [] + for i in range(0, len(activations), self.softmax_batch_size): + acts = activations[i : i + self.softmax_batch_size] + softmax_out.append(self.sess.run(self.softmax, feed_dict={self.softmax_input: acts})) + preds = np.concatenate(softmax_out, axis=0) + # https://github.com/openai/improved-gan/blob/4f5d1ec5c16a7eceb206f42bfc652693601e1d5c/inception_score/model.py#L46 + scores = [] + for i in range(0, len(preds), split_size): + part = preds[i : i + split_size] + kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) + kl = np.mean(np.sum(kl, 1)) + scores.append(np.exp(kl)) + return float(np.mean(scores)) + + def compute_prec_recall( + self, activations_ref: np.ndarray, activations_sample: np.ndarray + ) -> Tuple[float, float]: + radii_1 = self.manifold_estimator.manifold_radii(activations_ref) + radii_2 = self.manifold_estimator.manifold_radii(activations_sample) + pr = self.manifold_estimator.evaluate_pr( + activations_ref, radii_1, activations_sample, radii_2 + ) + return (float(pr[0][0]), float(pr[1][0])) + + +class ManifoldEstimator: + """ + A helper for comparing manifolds of feature vectors. + + Adapted from https://github.com/kynkaat/improved-precision-and-recall-metric/blob/f60f25e5ad933a79135c783fcda53de30f42c9b9/precision_recall.py#L57 + """ + + def __init__( + self, + session, + row_batch_size=10000, + col_batch_size=10000, + nhood_sizes=(3,), + clamp_to_percentile=None, + eps=1e-5, + ): + """ + Estimate the manifold of given feature vectors. + + :param session: the TensorFlow session. + :param row_batch_size: row batch size to compute pairwise distances + (parameter to trade-off between memory usage and performance). + :param col_batch_size: column batch size to compute pairwise distances. + :param nhood_sizes: number of neighbors used to estimate the manifold. + :param clamp_to_percentile: prune hyperspheres that have radius larger than + the given percentile. + :param eps: small number for numerical stability. + """ + self.distance_block = DistanceBlock(session) + self.row_batch_size = row_batch_size + self.col_batch_size = col_batch_size + self.nhood_sizes = nhood_sizes + self.num_nhoods = len(nhood_sizes) + self.clamp_to_percentile = clamp_to_percentile + self.eps = eps + + def warmup(self): + feats, radii = ( + np.zeros([1, 2048], dtype=np.float32), + np.zeros([1, 1], dtype=np.float32), + ) + self.evaluate_pr(feats, radii, feats, radii) + + def manifold_radii(self, features: np.ndarray) -> np.ndarray: + num_images = len(features) + + # Estimate manifold of features by calculating distances to k-NN of each sample. + radii = np.zeros([num_images, self.num_nhoods], dtype=np.float32) + distance_batch = np.zeros([self.row_batch_size, num_images], dtype=np.float32) + seq = np.arange(max(self.nhood_sizes) + 1, dtype=np.int32) + + for begin1 in range(0, num_images, self.row_batch_size): + end1 = min(begin1 + self.row_batch_size, num_images) + row_batch = features[begin1:end1] + + for begin2 in range(0, num_images, self.col_batch_size): + end2 = min(begin2 + self.col_batch_size, num_images) + col_batch = features[begin2:end2] + + # Compute distances between batches. + distance_batch[ + 0 : end1 - begin1, begin2:end2 + ] = self.distance_block.pairwise_distances(row_batch, col_batch) + + # Find the k-nearest neighbor from the current batch. + radii[begin1:end1, :] = np.concatenate( + [ + x[:, self.nhood_sizes] + for x in _numpy_partition(distance_batch[0 : end1 - begin1, :], seq, axis=1) + ], + axis=0, + ) + + if self.clamp_to_percentile is not None: + max_distances = np.percentile(radii, self.clamp_to_percentile, axis=0) + radii[radii > max_distances] = 0 + return radii + + def evaluate(self, features: np.ndarray, radii: np.ndarray, eval_features: np.ndarray): + """ + Evaluate if new feature vectors are at the manifold. + """ + num_eval_images = eval_features.shape[0] + num_ref_images = radii.shape[0] + distance_batch = np.zeros([self.row_batch_size, num_ref_images], dtype=np.float32) + batch_predictions = np.zeros([num_eval_images, self.num_nhoods], dtype=np.int32) + max_realism_score = np.zeros([num_eval_images], dtype=np.float32) + nearest_indices = np.zeros([num_eval_images], dtype=np.int32) + + for begin1 in range(0, num_eval_images, self.row_batch_size): + end1 = min(begin1 + self.row_batch_size, num_eval_images) + feature_batch = eval_features[begin1:end1] + + for begin2 in range(0, num_ref_images, self.col_batch_size): + end2 = min(begin2 + self.col_batch_size, num_ref_images) + ref_batch = features[begin2:end2] + + distance_batch[ + 0 : end1 - begin1, begin2:end2 + ] = self.distance_block.pairwise_distances(feature_batch, ref_batch) + + # From the minibatch of new feature vectors, determine if they are in the estimated manifold. + # If a feature vector is inside a hypersphere of some reference sample, then + # the new sample lies at the estimated manifold. + # The radii of the hyperspheres are determined from distances of neighborhood size k. + samples_in_manifold = distance_batch[0 : end1 - begin1, :, None] <= radii + batch_predictions[begin1:end1] = np.any(samples_in_manifold, axis=1).astype(np.int32) + + max_realism_score[begin1:end1] = np.max( + radii[:, 0] / (distance_batch[0 : end1 - begin1, :] + self.eps), axis=1 + ) + nearest_indices[begin1:end1] = np.argmin(distance_batch[0 : end1 - begin1, :], axis=1) + + return { + "fraction": float(np.mean(batch_predictions)), + "batch_predictions": batch_predictions, + "max_realisim_score": max_realism_score, + "nearest_indices": nearest_indices, + } + + def evaluate_pr( + self, + features_1: np.ndarray, + radii_1: np.ndarray, + features_2: np.ndarray, + radii_2: np.ndarray, + ) -> Tuple[np.ndarray, np.ndarray]: + """ + Evaluate precision and recall efficiently. + + :param features_1: [N1 x D] feature vectors for reference batch. + :param radii_1: [N1 x K1] radii for reference vectors. + :param features_2: [N2 x D] feature vectors for the other batch. + :param radii_2: [N x K2] radii for other vectors. + :return: a tuple of arrays for (precision, recall): + - precision: an np.ndarray of length K1 + - recall: an np.ndarray of length K2 + """ + features_1_status = np.zeros([len(features_1), radii_2.shape[1]], dtype=np.bool) + features_2_status = np.zeros([len(features_2), radii_1.shape[1]], dtype=np.bool) + for begin_1 in range(0, len(features_1), self.row_batch_size): + end_1 = begin_1 + self.row_batch_size + batch_1 = features_1[begin_1:end_1] + for begin_2 in range(0, len(features_2), self.col_batch_size): + end_2 = begin_2 + self.col_batch_size + batch_2 = features_2[begin_2:end_2] + batch_1_in, batch_2_in = self.distance_block.less_thans( + batch_1, radii_1[begin_1:end_1], batch_2, radii_2[begin_2:end_2] + ) + features_1_status[begin_1:end_1] |= batch_1_in + features_2_status[begin_2:end_2] |= batch_2_in + return ( + np.mean(features_2_status.astype(np.float64), axis=0), + np.mean(features_1_status.astype(np.float64), axis=0), + ) + + +class DistanceBlock: + """ + Calculate pairwise distances between vectors. + + Adapted from https://github.com/kynkaat/improved-precision-and-recall-metric/blob/f60f25e5ad933a79135c783fcda53de30f42c9b9/precision_recall.py#L34 + """ + + def __init__(self, session): + self.session = session + + # Initialize TF graph to calculate pairwise distances. + with session.graph.as_default(): + self._features_batch1 = tf.placeholder(tf.float32, shape=[None, None]) + self._features_batch2 = tf.placeholder(tf.float32, shape=[None, None]) + distance_block_16 = _batch_pairwise_distances( + tf.cast(self._features_batch1, tf.float16), + tf.cast(self._features_batch2, tf.float16), + ) + self.distance_block = tf.cond( + tf.reduce_all(tf.math.is_finite(distance_block_16)), + lambda: tf.cast(distance_block_16, tf.float32), + lambda: _batch_pairwise_distances(self._features_batch1, self._features_batch2), + ) + + # Extra logic for less thans. + self._radii1 = tf.placeholder(tf.float32, shape=[None, None]) + self._radii2 = tf.placeholder(tf.float32, shape=[None, None]) + dist32 = tf.cast(self.distance_block, tf.float32)[..., None] + self._batch_1_in = tf.math.reduce_any(dist32 <= self._radii2, axis=1) + self._batch_2_in = tf.math.reduce_any(dist32 <= self._radii1[:, None], axis=0) + + def pairwise_distances(self, U, V): + """ + Evaluate pairwise distances between two batches of feature vectors. + """ + return self.session.run( + self.distance_block, + feed_dict={self._features_batch1: U, self._features_batch2: V}, + ) + + def less_thans(self, batch_1, radii_1, batch_2, radii_2): + return self.session.run( + [self._batch_1_in, self._batch_2_in], + feed_dict={ + self._features_batch1: batch_1, + self._features_batch2: batch_2, + self._radii1: radii_1, + self._radii2: radii_2, + }, + ) + + +def _batch_pairwise_distances(U, V): + """ + Compute pairwise distances between two batches of feature vectors. + """ + with tf.variable_scope("pairwise_dist_block"): + # Squared norms of each row in U and V. + norm_u = tf.reduce_sum(tf.square(U), 1) + norm_v = tf.reduce_sum(tf.square(V), 1) + + # norm_u as a column and norm_v as a row vectors. + norm_u = tf.reshape(norm_u, [-1, 1]) + norm_v = tf.reshape(norm_v, [1, -1]) + + # Pairwise squared Euclidean distances. + D = tf.maximum(norm_u - 2 * tf.matmul(U, V, False, True) + norm_v, 0.0) + + return D + + +class NpzArrayReader(ABC): + @abstractmethod + def read_batch(self, batch_size: int) -> Optional[np.ndarray]: + pass + + @abstractmethod + def remaining(self) -> int: + pass + + def read_batches(self, batch_size: int) -> Iterable[np.ndarray]: + def gen_fn(): + while True: + batch = self.read_batch(batch_size) + if batch is None: + break + yield batch + + rem = self.remaining() + num_batches = rem // batch_size + int(rem % batch_size != 0) + return BatchIterator(gen_fn, num_batches) + + +class BatchIterator: + def __init__(self, gen_fn, length): + self.gen_fn = gen_fn + self.length = length + + def __len__(self): + return self.length + + def __iter__(self): + return self.gen_fn() + + +class StreamingNpzArrayReader(NpzArrayReader): + def __init__(self, arr_f, shape, dtype): + self.arr_f = arr_f + self.shape = shape + self.dtype = dtype + self.idx = 0 + + def read_batch(self, batch_size: int) -> Optional[np.ndarray]: + if self.idx >= self.shape[0]: + return None + + bs = min(batch_size, self.shape[0] - self.idx) + self.idx += bs + + if self.dtype.itemsize == 0: + return np.ndarray([bs, *self.shape[1:]], dtype=self.dtype) + + read_count = bs * np.prod(self.shape[1:]) + read_size = int(read_count * self.dtype.itemsize) + data = _read_bytes(self.arr_f, read_size, "array data") + return np.frombuffer(data, dtype=self.dtype).reshape([bs, *self.shape[1:]]) + + def remaining(self) -> int: + return max(0, self.shape[0] - self.idx) + + +class MemoryNpzArrayReader(NpzArrayReader): + def __init__(self, arr): + self.arr = arr + self.idx = 0 + + @classmethod + def load(cls, path: str, arr_name: str): + with open(path, "rb") as f: + arr = np.load(f)[arr_name] + return cls(arr) + + def read_batch(self, batch_size: int) -> Optional[np.ndarray]: + if self.idx >= self.arr.shape[0]: + return None + + res = self.arr[self.idx : self.idx + batch_size] + self.idx += batch_size + return res + + def remaining(self) -> int: + return max(0, self.arr.shape[0] - self.idx) + + +@contextmanager +def open_npz_array(path: str, arr_name: str) -> NpzArrayReader: + with _open_npy_file(path, arr_name) as arr_f: + version = np.lib.format.read_magic(arr_f) + if version == (1, 0): + header = np.lib.format.read_array_header_1_0(arr_f) + elif version == (2, 0): + header = np.lib.format.read_array_header_2_0(arr_f) + else: + yield MemoryNpzArrayReader.load(path, arr_name) + return + shape, fortran, dtype = header + if fortran or dtype.hasobject: + yield MemoryNpzArrayReader.load(path, arr_name) + else: + yield StreamingNpzArrayReader(arr_f, shape, dtype) + + +def _read_bytes(fp, size, error_template="ran out of data"): + """ + Copied from: https://github.com/numpy/numpy/blob/fb215c76967739268de71aa4bda55dd1b062bc2e/numpy/lib/format.py#L788-L886 + + Read from file-like object until size bytes are read. + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + """ + data = bytes() + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except io.BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data + + +@contextmanager +def _open_npy_file(path: str, arr_name: str): + with open(path, "rb") as f: + with zipfile.ZipFile(f, "r") as zip_f: + if f"{arr_name}.npy" not in zip_f.namelist(): + raise ValueError(f"missing {arr_name} in npz file") + with zip_f.open(f"{arr_name}.npy", "r") as arr_f: + yield arr_f + + +def _download_inception_model(): + if os.path.exists(INCEPTION_V3_PATH): + return + print("downloading InceptionV3 model...") + with requests.get(INCEPTION_V3_URL, stream=True) as r: + r.raise_for_status() + tmp_path = INCEPTION_V3_PATH + ".tmp" + with open(tmp_path, "wb") as f: + for chunk in tqdm(r.iter_content(chunk_size=8192)): + f.write(chunk) + os.rename(tmp_path, INCEPTION_V3_PATH) + + +def _create_feature_graph(input_batch): + _download_inception_model() + prefix = f"{random.randrange(2**32)}_{random.randrange(2**32)}" + with open(INCEPTION_V3_PATH, "rb") as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + pool3, spatial = tf.import_graph_def( + graph_def, + input_map={f"ExpandDims:0": input_batch}, + return_elements=[FID_POOL_NAME, FID_SPATIAL_NAME], + name=prefix, + ) + _update_shapes(pool3) + spatial = spatial[..., :7] + return pool3, spatial + + +def _create_softmax_graph(input_batch): + _download_inception_model() + prefix = f"{random.randrange(2**32)}_{random.randrange(2**32)}" + with open(INCEPTION_V3_PATH, "rb") as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + (matmul,) = tf.import_graph_def( + graph_def, return_elements=[f"softmax/logits/MatMul"], name=prefix + ) + w = matmul.inputs[1] + logits = tf.matmul(input_batch, w) + return tf.nn.softmax(logits) + + +def _update_shapes(pool3): + # https://github.com/bioinf-jku/TTUR/blob/73ab375cdf952a12686d9aa7978567771084da42/fid.py#L50-L63 + ops = pool3.graph.get_operations() + for op in ops: + for o in op.outputs: + shape = o.get_shape() + if shape._dims is not None: # pylint: disable=protected-access + # shape = [s.value for s in shape] TF 1.x + shape = [s for s in shape] # TF 2.x + new_shape = [] + for j, s in enumerate(shape): + if s == 1 and j == 0: + new_shape.append(None) + else: + new_shape.append(s) + o.__dict__["_shape_val"] = tf.TensorShape(new_shape) + return pool3 + + +def _numpy_partition(arr, kth, **kwargs): + num_workers = min(cpu_count(), len(arr)) + chunk_size = len(arr) // num_workers + extra = len(arr) % num_workers + + start_idx = 0 + batches = [] + for i in range(num_workers): + size = chunk_size + (1 if i < extra else 0) + batches.append(arr[start_idx : start_idx + size]) + start_idx += size + + with ThreadPool(num_workers) as pool: + return list(pool.map(partial(np.partition, kth=kth, **kwargs), batches)) + + +if __name__ == "__main__": + print(REQUIREMENTS) + main() diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/evaluate_perceptualsim.py b/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/evaluate_perceptualsim.py new file mode 100644 index 0000000..c85fef9 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/evaluate_perceptualsim.py @@ -0,0 +1,630 @@ +import argparse +import glob +import os +from tqdm import tqdm +from collections import namedtuple + +import numpy as np +import torch +import torchvision.transforms as transforms +from torchvision import models +from PIL import Image + +from ldm.modules.evaluate.ssim import ssim + + +transform = transforms.Compose([transforms.ToTensor()]) + +def normalize_tensor(in_feat, eps=1e-10): + norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1)).view( + in_feat.size()[0], 1, in_feat.size()[2], in_feat.size()[3] + ) + return in_feat / (norm_factor.expand_as(in_feat) + eps) + + +def cos_sim(in0, in1): + in0_norm = normalize_tensor(in0) + in1_norm = normalize_tensor(in1) + N = in0.size()[0] + X = in0.size()[2] + Y = in0.size()[3] + + return torch.mean( + torch.mean( + torch.sum(in0_norm * in1_norm, dim=1).view(N, 1, X, Y), dim=2 + ).view(N, 1, 1, Y), + dim=3, + ).view(N) + + +class squeezenet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(squeezenet, self).__init__() + pretrained_features = models.squeezenet1_1( + pretrained=pretrained + ).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.slice6 = torch.nn.Sequential() + self.slice7 = torch.nn.Sequential() + self.N_slices = 7 + for x in range(2): + self.slice1.add_module(str(x), pretrained_features[x]) + for x in range(2, 5): + self.slice2.add_module(str(x), pretrained_features[x]) + for x in range(5, 8): + self.slice3.add_module(str(x), pretrained_features[x]) + for x in range(8, 10): + self.slice4.add_module(str(x), pretrained_features[x]) + for x in range(10, 11): + self.slice5.add_module(str(x), pretrained_features[x]) + for x in range(11, 12): + self.slice6.add_module(str(x), pretrained_features[x]) + for x in range(12, 13): + self.slice7.add_module(str(x), pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1 = h + h = self.slice2(h) + h_relu2 = h + h = self.slice3(h) + h_relu3 = h + h = self.slice4(h) + h_relu4 = h + h = self.slice5(h) + h_relu5 = h + h = self.slice6(h) + h_relu6 = h + h = self.slice7(h) + h_relu7 = h + vgg_outputs = namedtuple( + "SqueezeOutputs", + ["relu1", "relu2", "relu3", "relu4", "relu5", "relu6", "relu7"], + ) + out = vgg_outputs( + h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7 + ) + + return out + + +class alexnet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(alexnet, self).__init__() + alexnet_pretrained_features = models.alexnet( + pretrained=pretrained + ).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.N_slices = 5 + for x in range(2): + self.slice1.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(2, 5): + self.slice2.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(5, 8): + self.slice3.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(8, 10): + self.slice4.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(10, 12): + self.slice5.add_module(str(x), alexnet_pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1 = h + h = self.slice2(h) + h_relu2 = h + h = self.slice3(h) + h_relu3 = h + h = self.slice4(h) + h_relu4 = h + h = self.slice5(h) + h_relu5 = h + alexnet_outputs = namedtuple( + "AlexnetOutputs", ["relu1", "relu2", "relu3", "relu4", "relu5"] + ) + out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5) + + return out + + +class vgg16(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(vgg16, self).__init__() + vgg_pretrained_features = models.vgg16(pretrained=pretrained).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.N_slices = 5 + for x in range(4): + self.slice1.add_module(str(x), vgg_pretrained_features[x]) + for x in range(4, 9): + self.slice2.add_module(str(x), vgg_pretrained_features[x]) + for x in range(9, 16): + self.slice3.add_module(str(x), vgg_pretrained_features[x]) + for x in range(16, 23): + self.slice4.add_module(str(x), vgg_pretrained_features[x]) + for x in range(23, 30): + self.slice5.add_module(str(x), vgg_pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1_2 = h + h = self.slice2(h) + h_relu2_2 = h + h = self.slice3(h) + h_relu3_3 = h + h = self.slice4(h) + h_relu4_3 = h + h = self.slice5(h) + h_relu5_3 = h + vgg_outputs = namedtuple( + "VggOutputs", + ["relu1_2", "relu2_2", "relu3_3", "relu4_3", "relu5_3"], + ) + out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) + + return out + + +class resnet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True, num=18): + super(resnet, self).__init__() + if num == 18: + self.net = models.resnet18(pretrained=pretrained) + elif num == 34: + self.net = models.resnet34(pretrained=pretrained) + elif num == 50: + self.net = models.resnet50(pretrained=pretrained) + elif num == 101: + self.net = models.resnet101(pretrained=pretrained) + elif num == 152: + self.net = models.resnet152(pretrained=pretrained) + self.N_slices = 5 + + self.conv1 = self.net.conv1 + self.bn1 = self.net.bn1 + self.relu = self.net.relu + self.maxpool = self.net.maxpool + self.layer1 = self.net.layer1 + self.layer2 = self.net.layer2 + self.layer3 = self.net.layer3 + self.layer4 = self.net.layer4 + + def forward(self, X): + h = self.conv1(X) + h = self.bn1(h) + h = self.relu(h) + h_relu1 = h + h = self.maxpool(h) + h = self.layer1(h) + h_conv2 = h + h = self.layer2(h) + h_conv3 = h + h = self.layer3(h) + h_conv4 = h + h = self.layer4(h) + h_conv5 = h + + outputs = namedtuple( + "Outputs", ["relu1", "conv2", "conv3", "conv4", "conv5"] + ) + out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5) + + return out + +# Off-the-shelf deep network +class PNet(torch.nn.Module): + """Pre-trained network with all channels equally weighted by default""" + + def __init__(self, pnet_type="vgg", pnet_rand=False, use_gpu=True): + super(PNet, self).__init__() + + self.use_gpu = use_gpu + + self.pnet_type = pnet_type + self.pnet_rand = pnet_rand + + self.shift = torch.Tensor([-0.030, -0.088, -0.188]).view(1, 3, 1, 1) + self.scale = torch.Tensor([0.458, 0.448, 0.450]).view(1, 3, 1, 1) + + if self.pnet_type in ["vgg", "vgg16"]: + self.net = vgg16(pretrained=not self.pnet_rand, requires_grad=False) + elif self.pnet_type == "alex": + self.net = alexnet( + pretrained=not self.pnet_rand, requires_grad=False + ) + elif self.pnet_type[:-2] == "resnet": + self.net = resnet( + pretrained=not self.pnet_rand, + requires_grad=False, + num=int(self.pnet_type[-2:]), + ) + elif self.pnet_type == "squeeze": + self.net = squeezenet( + pretrained=not self.pnet_rand, requires_grad=False + ) + + self.L = self.net.N_slices + + if use_gpu: + self.net.cuda() + self.shift = self.shift.cuda() + self.scale = self.scale.cuda() + + def forward(self, in0, in1, retPerLayer=False): + in0_sc = (in0 - self.shift.expand_as(in0)) / self.scale.expand_as(in0) + in1_sc = (in1 - self.shift.expand_as(in0)) / self.scale.expand_as(in0) + + outs0 = self.net.forward(in0_sc) + outs1 = self.net.forward(in1_sc) + + if retPerLayer: + all_scores = [] + for (kk, out0) in enumerate(outs0): + cur_score = 1.0 - cos_sim(outs0[kk], outs1[kk]) + if kk == 0: + val = 1.0 * cur_score + else: + val = val + cur_score + if retPerLayer: + all_scores += [cur_score] + + if retPerLayer: + return (val, all_scores) + else: + return val + + + + +# The SSIM metric +def ssim_metric(img1, img2, mask=None): + return ssim(img1, img2, mask=mask, size_average=False) + + +# The PSNR metric +def psnr(img1, img2, mask=None,reshape=False): + b = img1.size(0) + if not (mask is None): + b = img1.size(0) + mse_err = (img1 - img2).pow(2) * mask + if reshape: + mse_err = mse_err.reshape(b, -1).sum(dim=1) / ( + 3 * mask.reshape(b, -1).sum(dim=1).clamp(min=1) + ) + else: + mse_err = mse_err.view(b, -1).sum(dim=1) / ( + 3 * mask.view(b, -1).sum(dim=1).clamp(min=1) + ) + else: + if reshape: + mse_err = (img1 - img2).pow(2).reshape(b, -1).mean(dim=1) + else: + mse_err = (img1 - img2).pow(2).view(b, -1).mean(dim=1) + + psnr = 10 * (1 / mse_err).log10() + return psnr + + +# The perceptual similarity metric +def perceptual_sim(img1, img2, vgg16): + # First extract features + dist = vgg16(img1 * 2 - 1, img2 * 2 - 1) + + return dist + +def load_img(img_name, size=None): + try: + img = Image.open(img_name) + + if type(size) == int: + img = img.resize((size, size)) + elif size is not None: + img = img.resize((size[1], size[0])) + + img = transform(img).cuda() + img = img.unsqueeze(0) + except Exception as e: + print("Failed at loading %s " % img_name) + print(e) + img = torch.zeros(1, 3, 256, 256).cuda() + raise + return img + + +def compute_perceptual_similarity(folder, pred_img, tgt_img, take_every_other): + + # Load VGG16 for feature similarity + vgg16 = PNet().to("cuda") + vgg16.eval() + vgg16.cuda() + + values_percsim = [] + values_ssim = [] + values_psnr = [] + folders = os.listdir(folder) + for i, f in tqdm(enumerate(sorted(folders))): + pred_imgs = glob.glob(folder + f + "/" + pred_img) + tgt_imgs = glob.glob(folder + f + "/" + tgt_img) + assert len(tgt_imgs) == 1 + + perc_sim = 10000 + ssim_sim = -10 + psnr_sim = -10 + for p_img in pred_imgs: + t_img = load_img(tgt_imgs[0]) + p_img = load_img(p_img, size=t_img.shape[2:]) + t_perc_sim = perceptual_sim(p_img, t_img, vgg16).item() + perc_sim = min(perc_sim, t_perc_sim) + + ssim_sim = max(ssim_sim, ssim_metric(p_img, t_img).item()) + psnr_sim = max(psnr_sim, psnr(p_img, t_img).item()) + + values_percsim += [perc_sim] + values_ssim += [ssim_sim] + values_psnr += [psnr_sim] + + if take_every_other: + n_valuespercsim = [] + n_valuesssim = [] + n_valuespsnr = [] + for i in range(0, len(values_percsim) // 2): + n_valuespercsim += [ + min(values_percsim[2 * i], values_percsim[2 * i + 1]) + ] + n_valuespsnr += [max(values_psnr[2 * i], values_psnr[2 * i + 1])] + n_valuesssim += [max(values_ssim[2 * i], values_ssim[2 * i + 1])] + + values_percsim = n_valuespercsim + values_ssim = n_valuesssim + values_psnr = n_valuespsnr + + avg_percsim = np.mean(np.array(values_percsim)) + std_percsim = np.std(np.array(values_percsim)) + + avg_psnr = np.mean(np.array(values_psnr)) + std_psnr = np.std(np.array(values_psnr)) + + avg_ssim = np.mean(np.array(values_ssim)) + std_ssim = np.std(np.array(values_ssim)) + + return { + "Perceptual similarity": (avg_percsim, std_percsim), + "PSNR": (avg_psnr, std_psnr), + "SSIM": (avg_ssim, std_ssim), + } + + +def compute_perceptual_similarity_from_list(pred_imgs_list, tgt_imgs_list, + take_every_other, + simple_format=True): + + # Load VGG16 for feature similarity + vgg16 = PNet().to("cuda") + vgg16.eval() + vgg16.cuda() + + values_percsim = [] + values_ssim = [] + values_psnr = [] + equal_count = 0 + ambig_count = 0 + for i, tgt_img in enumerate(tqdm(tgt_imgs_list)): + pred_imgs = pred_imgs_list[i] + tgt_imgs = [tgt_img] + assert len(tgt_imgs) == 1 + + if type(pred_imgs) != list: + pred_imgs = [pred_imgs] + + perc_sim = 10000 + ssim_sim = -10 + psnr_sim = -10 + assert len(pred_imgs)>0 + for p_img in pred_imgs: + t_img = load_img(tgt_imgs[0]) + p_img = load_img(p_img, size=t_img.shape[2:]) + t_perc_sim = perceptual_sim(p_img, t_img, vgg16).item() + perc_sim = min(perc_sim, t_perc_sim) + + ssim_sim = max(ssim_sim, ssim_metric(p_img, t_img).item()) + psnr_sim = max(psnr_sim, psnr(p_img, t_img).item()) + + values_percsim += [perc_sim] + values_ssim += [ssim_sim] + if psnr_sim != np.float("inf"): + values_psnr += [psnr_sim] + else: + if torch.allclose(p_img, t_img): + equal_count += 1 + print("{} equal src and wrp images.".format(equal_count)) + else: + ambig_count += 1 + print("{} ambiguous src and wrp images.".format(ambig_count)) + + if take_every_other: + n_valuespercsim = [] + n_valuesssim = [] + n_valuespsnr = [] + for i in range(0, len(values_percsim) // 2): + n_valuespercsim += [ + min(values_percsim[2 * i], values_percsim[2 * i + 1]) + ] + n_valuespsnr += [max(values_psnr[2 * i], values_psnr[2 * i + 1])] + n_valuesssim += [max(values_ssim[2 * i], values_ssim[2 * i + 1])] + + values_percsim = n_valuespercsim + values_ssim = n_valuesssim + values_psnr = n_valuespsnr + + avg_percsim = np.mean(np.array(values_percsim)) + std_percsim = np.std(np.array(values_percsim)) + + avg_psnr = np.mean(np.array(values_psnr)) + std_psnr = np.std(np.array(values_psnr)) + + avg_ssim = np.mean(np.array(values_ssim)) + std_ssim = np.std(np.array(values_ssim)) + + if simple_format: + # just to make yaml formatting readable + return { + "Perceptual similarity": [float(avg_percsim), float(std_percsim)], + "PSNR": [float(avg_psnr), float(std_psnr)], + "SSIM": [float(avg_ssim), float(std_ssim)], + } + else: + return { + "Perceptual similarity": (avg_percsim, std_percsim), + "PSNR": (avg_psnr, std_psnr), + "SSIM": (avg_ssim, std_ssim), + } + + +def compute_perceptual_similarity_from_list_topk(pred_imgs_list, tgt_imgs_list, + take_every_other, resize=False): + + # Load VGG16 for feature similarity + vgg16 = PNet().to("cuda") + vgg16.eval() + vgg16.cuda() + + values_percsim = [] + values_ssim = [] + values_psnr = [] + individual_percsim = [] + individual_ssim = [] + individual_psnr = [] + for i, tgt_img in enumerate(tqdm(tgt_imgs_list)): + pred_imgs = pred_imgs_list[i] + tgt_imgs = [tgt_img] + assert len(tgt_imgs) == 1 + + if type(pred_imgs) != list: + assert False + pred_imgs = [pred_imgs] + + perc_sim = 10000 + ssim_sim = -10 + psnr_sim = -10 + sample_percsim = list() + sample_ssim = list() + sample_psnr = list() + for p_img in pred_imgs: + if resize: + t_img = load_img(tgt_imgs[0], size=(256,256)) + else: + t_img = load_img(tgt_imgs[0]) + p_img = load_img(p_img, size=t_img.shape[2:]) + + t_perc_sim = perceptual_sim(p_img, t_img, vgg16).item() + sample_percsim.append(t_perc_sim) + perc_sim = min(perc_sim, t_perc_sim) + + t_ssim = ssim_metric(p_img, t_img).item() + sample_ssim.append(t_ssim) + ssim_sim = max(ssim_sim, t_ssim) + + t_psnr = psnr(p_img, t_img).item() + sample_psnr.append(t_psnr) + psnr_sim = max(psnr_sim, t_psnr) + + values_percsim += [perc_sim] + values_ssim += [ssim_sim] + values_psnr += [psnr_sim] + individual_percsim.append(sample_percsim) + individual_ssim.append(sample_ssim) + individual_psnr.append(sample_psnr) + + if take_every_other: + assert False, "Do this later, after specifying topk to get proper results" + n_valuespercsim = [] + n_valuesssim = [] + n_valuespsnr = [] + for i in range(0, len(values_percsim) // 2): + n_valuespercsim += [ + min(values_percsim[2 * i], values_percsim[2 * i + 1]) + ] + n_valuespsnr += [max(values_psnr[2 * i], values_psnr[2 * i + 1])] + n_valuesssim += [max(values_ssim[2 * i], values_ssim[2 * i + 1])] + + values_percsim = n_valuespercsim + values_ssim = n_valuesssim + values_psnr = n_valuespsnr + + avg_percsim = np.mean(np.array(values_percsim)) + std_percsim = np.std(np.array(values_percsim)) + + avg_psnr = np.mean(np.array(values_psnr)) + std_psnr = np.std(np.array(values_psnr)) + + avg_ssim = np.mean(np.array(values_ssim)) + std_ssim = np.std(np.array(values_ssim)) + + individual_percsim = np.array(individual_percsim) + individual_psnr = np.array(individual_psnr) + individual_ssim = np.array(individual_ssim) + + return { + "avg_of_best": { + "Perceptual similarity": [float(avg_percsim), float(std_percsim)], + "PSNR": [float(avg_psnr), float(std_psnr)], + "SSIM": [float(avg_ssim), float(std_ssim)], + }, + "individual": { + "PSIM": individual_percsim, + "PSNR": individual_psnr, + "SSIM": individual_ssim, + } + } + + +if __name__ == "__main__": + args = argparse.ArgumentParser() + args.add_argument("--folder", type=str, default="") + args.add_argument("--pred_image", type=str, default="") + args.add_argument("--target_image", type=str, default="") + args.add_argument("--take_every_other", action="store_true", default=False) + args.add_argument("--output_file", type=str, default="") + + opts = args.parse_args() + + folder = opts.folder + pred_img = opts.pred_image + tgt_img = opts.target_image + + results = compute_perceptual_similarity( + folder, pred_img, tgt_img, opts.take_every_other + ) + + f = open(opts.output_file, 'w') + for key in results: + print("%s for %s: \n" % (key, opts.folder)) + print( + "\t {:0.4f} | {:0.4f} \n".format(results[key][0], results[key][1]) + ) + + f.write("%s for %s: \n" % (key, opts.folder)) + f.write( + "\t {:0.4f} | {:0.4f} \n".format(results[key][0], results[key][1]) + ) + + f.close() diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/frechet_video_distance.py b/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/frechet_video_distance.py new file mode 100644 index 0000000..d9e13c4 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/frechet_video_distance.py @@ -0,0 +1,147 @@ +# coding=utf-8 +# Copyright 2022 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python2, python3 +"""Minimal Reference implementation for the Frechet Video Distance (FVD). + +FVD is a metric for the quality of video generation models. It is inspired by +the FID (Frechet Inception Distance) used for images, but uses a different +embedding to be better suitable for videos. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import six +import tensorflow.compat.v1 as tf +import tensorflow_gan as tfgan +import tensorflow_hub as hub + + +def preprocess(videos, target_resolution): + """Runs some preprocessing on the videos for I3D model. + + Args: + videos: [batch_size, num_frames, height, width, depth] The videos to be + preprocessed. We don't care about the specific dtype of the videos, it can + be anything that tf.image.resize_bilinear accepts. Values are expected to + be in the range 0-255. + target_resolution: (width, height): target video resolution + + Returns: + videos: [batch_size, num_frames, height, width, depth] + """ + videos_shape = list(videos.shape) + all_frames = tf.reshape(videos, [-1] + videos_shape[-3:]) + resized_videos = tf.image.resize_bilinear(all_frames, size=target_resolution) + target_shape = [videos_shape[0], -1] + list(target_resolution) + [3] + output_videos = tf.reshape(resized_videos, target_shape) + scaled_videos = 2. * tf.cast(output_videos, tf.float32) / 255. - 1 + return scaled_videos + + +def _is_in_graph(tensor_name): + """Checks whether a given tensor does exists in the graph.""" + try: + tf.get_default_graph().get_tensor_by_name(tensor_name) + except KeyError: + return False + return True + + +def create_id3_embedding(videos,warmup=False,batch_size=16): + """Embeds the given videos using the Inflated 3D Convolution ne twork. + + Downloads the graph of the I3D from tf.hub and adds it to the graph on the + first call. + + Args: + videos: [batch_size, num_frames, height=224, width=224, depth=3]. + Expected range is [-1, 1]. + + Returns: + embedding: [batch_size, embedding_size]. embedding_size depends + on the model used. + + Raises: + ValueError: when a provided embedding_layer is not supported. + """ + + # batch_size = 16 + module_spec = "https://tfhub.dev/deepmind/i3d-kinetics-400/1" + + + # Making sure that we import the graph separately for + # each different input video tensor. + module_name = "fvd_kinetics-400_id3_module_" + six.ensure_str( + videos.name).replace(":", "_") + + + + assert_ops = [ + tf.Assert( + tf.reduce_max(videos) <= 1.001, + ["max value in frame is > 1", videos]), + tf.Assert( + tf.reduce_min(videos) >= -1.001, + ["min value in frame is < -1", videos]), + tf.assert_equal( + tf.shape(videos)[0], + batch_size, ["invalid frame batch size: ", + tf.shape(videos)], + summarize=6), + ] + with tf.control_dependencies(assert_ops): + videos = tf.identity(videos) + + module_scope = "%s_apply_default/" % module_name + + # To check whether the module has already been loaded into the graph, we look + # for a given tensor name. If this tensor name exists, we assume the function + # has been called before and the graph was imported. Otherwise we import it. + # Note: in theory, the tensor could exist, but have wrong shapes. + # This will happen if create_id3_embedding is called with a frames_placehoder + # of wrong size/batch size, because even though that will throw a tf.Assert + # on graph-execution time, it will insert the tensor (with wrong shape) into + # the graph. This is why we need the following assert. + if warmup: + video_batch_size = int(videos.shape[0]) + assert video_batch_size in [batch_size, -1, None], f"Invalid batch size {video_batch_size}" + tensor_name = module_scope + "RGB/inception_i3d/Mean:0" + if not _is_in_graph(tensor_name): + i3d_model = hub.Module(module_spec, name=module_name) + i3d_model(videos) + + # gets the kinetics-i3d-400-logits layer + tensor_name = module_scope + "RGB/inception_i3d/Mean:0" + tensor = tf.get_default_graph().get_tensor_by_name(tensor_name) + return tensor + + +def calculate_fvd(real_activations, + generated_activations): + """Returns a list of ops that compute metrics as funcs of activations. + + Args: + real_activations: [num_samples, embedding_size] + generated_activations: [num_samples, embedding_size] + + Returns: + A scalar that contains the requested FVD. + """ + return tfgan.eval.frechet_classifier_distance_from_activations( + real_activations, generated_activations) diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/ssim.py b/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/ssim.py new file mode 100644 index 0000000..4e8883c --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/ssim.py @@ -0,0 +1,124 @@ +# MIT Licence + +# Methods to predict the SSIM, taken from +# https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py + +from math import exp + +import torch +import torch.nn.functional as F +from torch.autograd import Variable + +def gaussian(window_size, sigma): + gauss = torch.Tensor( + [ + exp(-((x - window_size // 2) ** 2) / float(2 * sigma ** 2)) + for x in range(window_size) + ] + ) + return gauss / gauss.sum() + + +def create_window(window_size, channel): + _1D_window = gaussian(window_size, 1.5).unsqueeze(1) + _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) + window = Variable( + _2D_window.expand(channel, 1, window_size, window_size).contiguous() + ) + return window + + +def _ssim( + img1, img2, window, window_size, channel, mask=None, size_average=True +): + mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) + mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) + + mu1_sq = mu1.pow(2) + mu2_sq = mu2.pow(2) + mu1_mu2 = mu1 * mu2 + + sigma1_sq = ( + F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) + - mu1_sq + ) + sigma2_sq = ( + F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) + - mu2_sq + ) + sigma12 = ( + F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) + - mu1_mu2 + ) + + C1 = (0.01) ** 2 + C2 = (0.03) ** 2 + + ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ( + (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2) + ) + + if not (mask is None): + b = mask.size(0) + ssim_map = ssim_map.mean(dim=1, keepdim=True) * mask + ssim_map = ssim_map.view(b, -1).sum(dim=1) / mask.view(b, -1).sum( + dim=1 + ).clamp(min=1) + return ssim_map + + import pdb + + pdb.set_trace + + if size_average: + return ssim_map.mean() + else: + return ssim_map.mean(1).mean(1).mean(1) + + +class SSIM(torch.nn.Module): + def __init__(self, window_size=11, size_average=True): + super(SSIM, self).__init__() + self.window_size = window_size + self.size_average = size_average + self.channel = 1 + self.window = create_window(window_size, self.channel) + + def forward(self, img1, img2, mask=None): + (_, channel, _, _) = img1.size() + + if ( + channel == self.channel + and self.window.data.type() == img1.data.type() + ): + window = self.window + else: + window = create_window(self.window_size, channel) + + if img1.is_cuda: + window = window.cuda(img1.get_device()) + window = window.type_as(img1) + + self.window = window + self.channel = channel + + return _ssim( + img1, + img2, + window, + self.window_size, + channel, + mask, + self.size_average, + ) + + +def ssim(img1, img2, window_size=11, mask=None, size_average=True): + (_, channel, _, _) = img1.size() + window = create_window(window_size, channel) + + if img1.is_cuda: + window = window.cuda(img1.get_device()) + window = window.type_as(img1) + + return _ssim(img1, img2, window, window_size, channel, mask, size_average) diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/torch_frechet_video_distance.py b/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/torch_frechet_video_distance.py new file mode 100644 index 0000000..04856b8 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/evaluate/torch_frechet_video_distance.py @@ -0,0 +1,294 @@ +# based on https://github.com/universome/fvd-comparison/blob/master/compare_models.py; huge thanks! +import os +import numpy as np +import io +import re +import requests +import html +import hashlib +import urllib +import urllib.request +import scipy.linalg +import multiprocessing as mp +import glob + + +from tqdm import tqdm +from typing import Any, List, Tuple, Union, Dict, Callable + +from torchvision.io import read_video +import torch; torch.set_grad_enabled(False) +from einops import rearrange + +from nitro.util import isvideo + +def compute_frechet_distance(mu_sample,sigma_sample,mu_ref,sigma_ref) -> float: + print('Calculate frechet distance...') + m = np.square(mu_sample - mu_ref).sum() + s, _ = scipy.linalg.sqrtm(np.dot(sigma_sample, sigma_ref), disp=False) # pylint: disable=no-member + fid = np.real(m + np.trace(sigma_sample + sigma_ref - s * 2)) + + return float(fid) + + +def compute_stats(feats: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + mu = feats.mean(axis=0) # [d] + sigma = np.cov(feats, rowvar=False) # [d, d] + + return mu, sigma + + +def open_url(url: str, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False) -> Any: + """Download the given URL and return a binary-mode file object to access the data.""" + assert num_attempts >= 1 + + # Doesn't look like an URL scheme so interpret it as a local filename. + if not re.match('^[a-z]+://', url): + return url if return_filename else open(url, "rb") + + # Handle file URLs. This code handles unusual file:// patterns that + # arise on Windows: + # + # file:///c:/foo.txt + # + # which would translate to a local '/c:/foo.txt' filename that's + # invalid. Drop the forward slash for such pathnames. + # + # If you touch this code path, you should test it on both Linux and + # Windows. + # + # Some internet resources suggest using urllib.request.url2pathname() but + # but that converts forward slashes to backslashes and this causes + # its own set of problems. + if url.startswith('file://'): + filename = urllib.parse.urlparse(url).path + if re.match(r'^/[a-zA-Z]:', filename): + filename = filename[1:] + return filename if return_filename else open(filename, "rb") + + url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest() + + # Download. + url_name = None + url_data = None + with requests.Session() as session: + if verbose: + print("Downloading %s ..." % url, end="", flush=True) + for attempts_left in reversed(range(num_attempts)): + try: + with session.get(url) as res: + res.raise_for_status() + if len(res.content) == 0: + raise IOError("No data received") + + if len(res.content) < 8192: + content_str = res.content.decode("utf-8") + if "download_warning" in res.headers.get("Set-Cookie", ""): + links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link] + if len(links) == 1: + url = requests.compat.urljoin(url, links[0]) + raise IOError("Google Drive virus checker nag") + if "Google Drive - Quota exceeded" in content_str: + raise IOError("Google Drive download quota exceeded -- please try again later") + + match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", "")) + url_name = match[1] if match else url + url_data = res.content + if verbose: + print(" done") + break + except KeyboardInterrupt: + raise + except: + if not attempts_left: + if verbose: + print(" failed") + raise + if verbose: + print(".", end="", flush=True) + + # Return data as file object. + assert not return_filename + return io.BytesIO(url_data) + +def load_video(ip): + vid, *_ = read_video(ip) + vid = rearrange(vid, 't h w c -> t c h w').to(torch.uint8) + return vid + +def get_data_from_str(input_str,nprc = None): + assert os.path.isdir(input_str), f'Specified input folder "{input_str}" is not a directory' + vid_filelist = glob.glob(os.path.join(input_str,'*.mp4')) + print(f'Found {len(vid_filelist)} videos in dir {input_str}') + + if nprc is None: + try: + nprc = mp.cpu_count() + except NotImplementedError: + print('WARNING: cpu_count() not avlailable, using only 1 cpu for video loading') + nprc = 1 + + pool = mp.Pool(processes=nprc) + + vids = [] + for v in tqdm(pool.imap_unordered(load_video,vid_filelist),total=len(vid_filelist),desc='Loading videos...'): + vids.append(v) + + + vids = torch.stack(vids,dim=0).float() + + return vids + +def get_stats(stats): + assert os.path.isfile(stats) and stats.endswith('.npz'), f'no stats found under {stats}' + + print(f'Using precomputed statistics under {stats}') + stats = np.load(stats) + stats = {key: stats[key] for key in stats.files} + + return stats + + + + +@torch.no_grad() +def compute_fvd(ref_input, sample_input, bs=32, + ref_stats=None, + sample_stats=None, + nprc_load=None): + + + + calc_stats = ref_stats is None or sample_stats is None + + if calc_stats: + + only_ref = sample_stats is not None + only_sample = ref_stats is not None + + + if isinstance(ref_input,str) and not only_sample: + ref_input = get_data_from_str(ref_input,nprc_load) + + if isinstance(sample_input, str) and not only_ref: + sample_input = get_data_from_str(sample_input, nprc_load) + + stats = compute_statistics(sample_input,ref_input, + device='cuda' if torch.cuda.is_available() else 'cpu', + bs=bs, + only_ref=only_ref, + only_sample=only_sample) + + if only_ref: + stats.update(get_stats(sample_stats)) + elif only_sample: + stats.update(get_stats(ref_stats)) + + + + else: + stats = get_stats(sample_stats) + stats.update(get_stats(ref_stats)) + + fvd = compute_frechet_distance(**stats) + + return {'FVD' : fvd,} + + +@torch.no_grad() +def compute_statistics(videos_fake, videos_real, device: str='cuda', bs=32, only_ref=False,only_sample=False) -> Dict: + detector_url = 'https://www.dropbox.com/s/ge9e5ujwgetktms/i3d_torchscript.pt?dl=1' + detector_kwargs = dict(rescale=True, resize=True, return_features=True) # Return raw features before the softmax layer. + + with open_url(detector_url, verbose=False) as f: + detector = torch.jit.load(f).eval().to(device) + + + + assert not (only_sample and only_ref), 'only_ref and only_sample arguments are mutually exclusive' + + ref_embed, sample_embed = [], [] + + info = f'Computing I3D activations for FVD score with batch size {bs}' + + if only_ref: + + if not isvideo(videos_real): + # if not is video we assume to have numpy arrays pf shape (n_vids, t, h, w, c) in range [0,255] + videos_real = torch.from_numpy(videos_real).permute(0, 4, 1, 2, 3).float() + print(videos_real.shape) + + if videos_real.shape[0] % bs == 0: + n_secs = videos_real.shape[0] // bs + else: + n_secs = videos_real.shape[0] // bs + 1 + + videos_real = torch.tensor_split(videos_real, n_secs, dim=0) + + for ref_v in tqdm(videos_real, total=len(videos_real),desc=info): + + feats_ref = detector(ref_v.to(device).contiguous(), **detector_kwargs).cpu().numpy() + ref_embed.append(feats_ref) + + elif only_sample: + + if not isvideo(videos_fake): + # if not is video we assume to have numpy arrays pf shape (n_vids, t, h, w, c) in range [0,255] + videos_fake = torch.from_numpy(videos_fake).permute(0, 4, 1, 2, 3).float() + print(videos_fake.shape) + + if videos_fake.shape[0] % bs == 0: + n_secs = videos_fake.shape[0] // bs + else: + n_secs = videos_fake.shape[0] // bs + 1 + + videos_real = torch.tensor_split(videos_real, n_secs, dim=0) + + for sample_v in tqdm(videos_fake, total=len(videos_real),desc=info): + feats_sample = detector(sample_v.to(device).contiguous(), **detector_kwargs).cpu().numpy() + sample_embed.append(feats_sample) + + + else: + + if not isvideo(videos_real): + # if not is video we assume to have numpy arrays pf shape (n_vids, t, h, w, c) in range [0,255] + videos_real = torch.from_numpy(videos_real).permute(0, 4, 1, 2, 3).float() + + if not isvideo(videos_fake): + videos_fake = torch.from_numpy(videos_fake).permute(0, 4, 1, 2, 3).float() + + if videos_fake.shape[0] % bs == 0: + n_secs = videos_fake.shape[0] // bs + else: + n_secs = videos_fake.shape[0] // bs + 1 + + videos_real = torch.tensor_split(videos_real, n_secs, dim=0) + videos_fake = torch.tensor_split(videos_fake, n_secs, dim=0) + + for ref_v, sample_v in tqdm(zip(videos_real,videos_fake),total=len(videos_fake),desc=info): + # print(ref_v.shape) + # ref_v = torch.nn.functional.interpolate(ref_v, size=(sample_v.shape[2], 256, 256), mode='trilinear', align_corners=False) + # sample_v = torch.nn.functional.interpolate(sample_v, size=(sample_v.shape[2], 256, 256), mode='trilinear', align_corners=False) + + + feats_sample = detector(sample_v.to(device).contiguous(), **detector_kwargs).cpu().numpy() + feats_ref = detector(ref_v.to(device).contiguous(), **detector_kwargs).cpu().numpy() + sample_embed.append(feats_sample) + ref_embed.append(feats_ref) + + out = dict() + if len(sample_embed) > 0: + sample_embed = np.concatenate(sample_embed,axis=0) + mu_sample, sigma_sample = compute_stats(sample_embed) + out.update({'mu_sample': mu_sample, + 'sigma_sample': sigma_sample}) + + if len(ref_embed) > 0: + ref_embed = np.concatenate(ref_embed,axis=0) + mu_ref, sigma_ref = compute_stats(ref_embed) + out.update({'mu_ref': mu_ref, + 'sigma_ref': sigma_ref}) + + + return out diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/__init__.py b/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/__init__.py new file mode 100644 index 0000000..7836cad --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/__init__.py @@ -0,0 +1,2 @@ +from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr +from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/bsrgan.py b/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/bsrgan.py new file mode 100644 index 0000000..32ef561 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/bsrgan.py @@ -0,0 +1,730 @@ +# -*- coding: utf-8 -*- +""" +# -------------------------------------------- +# Super-Resolution +# -------------------------------------------- +# +# Kai Zhang (cskaizhang@gmail.com) +# https://github.com/cszn +# From 2019/03--2021/08 +# -------------------------------------------- +""" + +import numpy as np +import cv2 +import torch + +from functools import partial +import random +from scipy import ndimage +import scipy +import scipy.stats as ss +from scipy.interpolate import interp2d +from scipy.linalg import orth +import albumentations + +import ldm.modules.image_degradation.utils_image as util + + +def modcrop_np(img, sf): + ''' + Args: + img: numpy image, WxH or WxHxC + sf: scale factor + Return: + cropped image + ''' + w, h = img.shape[:2] + im = np.copy(img) + return im[:w - w % sf, :h - h % sf, ...] + + +""" +# -------------------------------------------- +# anisotropic Gaussian kernels +# -------------------------------------------- +""" + + +def analytic_kernel(k): + """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" + k_size = k.shape[0] + # Calculate the big kernels size + big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) + # Loop over the small kernel to fill the big one + for r in range(k_size): + for c in range(k_size): + big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k + # Crop the edges of the big kernel to ignore very small values and increase run time of SR + crop = k_size // 2 + cropped_big_k = big_k[crop:-crop, crop:-crop] + # Normalize to 1 + return cropped_big_k / cropped_big_k.sum() + + +def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): + """ generate an anisotropic Gaussian kernel + Args: + ksize : e.g., 15, kernel size + theta : [0, pi], rotation angle range + l1 : [0.1,50], scaling of eigenvalues + l2 : [0.1,l1], scaling of eigenvalues + If l1 = l2, will get an isotropic Gaussian kernel. + Returns: + k : kernel + """ + + v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) + V = np.array([[v[0], v[1]], [v[1], -v[0]]]) + D = np.array([[l1, 0], [0, l2]]) + Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) + k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) + + return k + + +def gm_blur_kernel(mean, cov, size=15): + center = size / 2.0 + 0.5 + k = np.zeros([size, size]) + for y in range(size): + for x in range(size): + cy = y - center + 1 + cx = x - center + 1 + k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) + + k = k / np.sum(k) + return k + + +def shift_pixel(x, sf, upper_left=True): + """shift pixel for super-resolution with different scale factors + Args: + x: WxHxC or WxH + sf: scale factor + upper_left: shift direction + """ + h, w = x.shape[:2] + shift = (sf - 1) * 0.5 + xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) + if upper_left: + x1 = xv + shift + y1 = yv + shift + else: + x1 = xv - shift + y1 = yv - shift + + x1 = np.clip(x1, 0, w - 1) + y1 = np.clip(y1, 0, h - 1) + + if x.ndim == 2: + x = interp2d(xv, yv, x)(x1, y1) + if x.ndim == 3: + for i in range(x.shape[-1]): + x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) + + return x + + +def blur(x, k): + ''' + x: image, NxcxHxW + k: kernel, Nx1xhxw + ''' + n, c = x.shape[:2] + p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + k = k.repeat(1, c, 1, 1) + k = k.view(-1, 1, k.shape[2], k.shape[3]) + x = x.view(1, -1, x.shape[2], x.shape[3]) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) + x = x.view(n, c, x.shape[2], x.shape[3]) + + return x + + +def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): + """" + # modified version of https://github.com/assafshocher/BlindSR_dataset_generator + # Kai Zhang + # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var + # max_var = 2.5 * sf + """ + # Set random eigen-vals (lambdas) and angle (theta) for COV matrix + lambda_1 = min_var + np.random.rand() * (max_var - min_var) + lambda_2 = min_var + np.random.rand() * (max_var - min_var) + theta = np.random.rand() * np.pi # random theta + noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 + + # Set COV matrix using Lambdas and Theta + LAMBDA = np.diag([lambda_1, lambda_2]) + Q = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + SIGMA = Q @ LAMBDA @ Q.T + INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] + + # Set expectation position (shifting kernel for aligned image) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) + MU = MU[None, None, :, None] + + # Create meshgrid for Gaussian + [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) + Z = np.stack([X, Y], 2)[:, :, :, None] + + # Calcualte Gaussian for every pixel of the kernel + ZZ = Z - MU + ZZ_t = ZZ.transpose(0, 1, 3, 2) + raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) + + # shift the kernel so it will be centered + # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) + + # Normalize the kernel and return + # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) + kernel = raw_kernel / np.sum(raw_kernel) + return kernel + + +def fspecial_gaussian(hsize, sigma): + hsize = [hsize, hsize] + siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] + std = sigma + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) + arg = -(x * x + y * y) / (2 * std * std) + h = np.exp(arg) + h[h < scipy.finfo(float).eps * h.max()] = 0 + sumh = h.sum() + if sumh != 0: + h = h / sumh + return h + + +def fspecial_laplacian(alpha): + alpha = max([0, min([alpha, 1])]) + h1 = alpha / (alpha + 1) + h2 = (1 - alpha) / (alpha + 1) + h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] + h = np.array(h) + return h + + +def fspecial(filter_type, *args, **kwargs): + ''' + python code from: + https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py + ''' + if filter_type == 'gaussian': + return fspecial_gaussian(*args, **kwargs) + if filter_type == 'laplacian': + return fspecial_laplacian(*args, **kwargs) + + +""" +# -------------------------------------------- +# degradation models +# -------------------------------------------- +""" + + +def bicubic_degradation(x, sf=3): + ''' + Args: + x: HxWxC image, [0, 1] + sf: down-scale factor + Return: + bicubicly downsampled LR image + ''' + x = util.imresize_np(x, scale=1 / sf) + return x + + +def srmd_degradation(x, k, sf=3): + ''' blur + bicubic downsampling + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2018learning, + title={Learning a single convolutional super-resolution network for multiple degradations}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={3262--3271}, + year={2018} + } + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' + x = bicubic_degradation(x, sf=sf) + return x + + +def dpsr_degradation(x, k, sf=3): + ''' bicubic downsampling + blur + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2019deep, + title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={1671--1681}, + year={2019} + } + ''' + x = bicubic_degradation(x, sf=sf) + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + return x + + +def classical_degradation(x, k, sf=3): + ''' blur + downsampling + Args: + x: HxWxC image, [0, 1]/[0, 255] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) + st = 0 + return x[st::sf, st::sf, ...] + + +def add_sharpening(img, weight=0.5, radius=50, threshold=10): + """USM sharpening. borrowed from real-ESRGAN + Input image: I; Blurry image: B. + 1. K = I + weight * (I - B) + 2. Mask = 1 if abs(I - B) > threshold, else: 0 + 3. Blur mask: + 4. Out = Mask * K + (1 - Mask) * I + Args: + img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. + weight (float): Sharp weight. Default: 1. + radius (float): Kernel size of Gaussian blur. Default: 50. + threshold (int): + """ + if radius % 2 == 0: + radius += 1 + blur = cv2.GaussianBlur(img, (radius, radius), 0) + residual = img - blur + mask = np.abs(residual) * 255 > threshold + mask = mask.astype('float32') + soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) + + K = img + weight * residual + K = np.clip(K, 0, 1) + return soft_mask * K + (1 - soft_mask) * img + + +def add_blur(img, sf=4): + wd2 = 4.0 + sf + wd = 2.0 + 0.2 * sf + if random.random() < 0.5: + l1 = wd2 * random.random() + l2 = wd2 * random.random() + k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) + else: + k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random()) + img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') + + return img + + +def add_resize(img, sf=4): + rnum = np.random.rand() + if rnum > 0.8: # up + sf1 = random.uniform(1, 2) + elif rnum < 0.7: # down + sf1 = random.uniform(0.5 / sf, 1) + else: + sf1 = 1.0 + img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + return img + + +# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): +# noise_level = random.randint(noise_level1, noise_level2) +# rnum = np.random.rand() +# if rnum > 0.6: # add color Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) +# elif rnum < 0.4: # add grayscale Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) +# else: # add noise +# L = noise_level2 / 255. +# D = np.diag(np.random.rand(3)) +# U = orth(np.random.rand(3, 3)) +# conv = np.dot(np.dot(np.transpose(U), D), U) +# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) +# img = np.clip(img, 0.0, 1.0) +# return img + +def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + rnum = np.random.rand() + if rnum > 0.6: # add color Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: # add grayscale Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: # add noise + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_speckle_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + img = np.clip(img, 0.0, 1.0) + rnum = random.random() + if rnum > 0.6: + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: + img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_Poisson_noise(img): + img = np.clip((img * 255.0).round(), 0, 255) / 255. + vals = 10 ** (2 * random.random() + 2.0) # [2, 4] + if random.random() < 0.5: + img = np.random.poisson(img * vals).astype(np.float32) / vals + else: + img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) + img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. + noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray + img += noise_gray[:, :, np.newaxis] + img = np.clip(img, 0.0, 1.0) + return img + + +def add_JPEG_noise(img): + quality_factor = random.randint(30, 95) + img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) + result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) + img = cv2.imdecode(encimg, 1) + img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) + return img + + +def random_crop(lq, hq, sf=4, lq_patchsize=64): + h, w = lq.shape[:2] + rnd_h = random.randint(0, h - lq_patchsize) + rnd_w = random.randint(0, w - lq_patchsize) + lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] + + rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) + hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] + return lq, hq + + +def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + hq = img.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + img = util.imresize_np(img, 1 / 2, True) + img = np.clip(img, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + img = add_blur(img, sf=sf) + + elif i == 1: + img = add_blur(img, sf=sf) + + elif i == 2: + a, b = img.shape[1], img.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') + img = img[0::sf, 0::sf, ...] # nearest downsampling + img = np.clip(img, 0.0, 1.0) + + elif i == 3: + # downsample3 + img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + img = add_JPEG_noise(img) + + elif i == 6: + # add processed camera sensor noise + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf_ori, lq_patchsize) + + return img, hq + + +# todo no isp_model? +def degradation_bsrgan_variant(image, sf=4, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + image = util.uint2single(image) + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = image.shape[:2] + image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = image.shape[:2] + + hq = image.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + image = util.imresize_np(image, 1 / 2, True) + image = np.clip(image, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + image = add_blur(image, sf=sf) + + elif i == 1: + image = add_blur(image, sf=sf) + + elif i == 2: + a, b = image.shape[1], image.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') + image = image[0::sf, 0::sf, ...] # nearest downsampling + image = np.clip(image, 0.0, 1.0) + + elif i == 3: + # downsample3 + image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + image = np.clip(image, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + image = add_JPEG_noise(image) + + # elif i == 6: + # # add processed camera sensor noise + # if random.random() < isp_prob and isp_model is not None: + # with torch.no_grad(): + # img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + image = add_JPEG_noise(image) + image = util.single2uint(image) + example = {"image":image} + return example + + +# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... +def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): + """ + This is an extended degradation model by combining + the degradation models of BSRGAN and Real-ESRGAN + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + use_shuffle: the degradation shuffle + use_sharp: sharpening the img + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + if use_sharp: + img = add_sharpening(img) + hq = img.copy() + + if random.random() < shuffle_prob: + shuffle_order = random.sample(range(13), 13) + else: + shuffle_order = list(range(13)) + # local shuffle for noise, JPEG is always the last one + shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) + shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) + + poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 + + for i in shuffle_order: + if i == 0: + img = add_blur(img, sf=sf) + elif i == 1: + img = add_resize(img, sf=sf) + elif i == 2: + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + elif i == 3: + if random.random() < poisson_prob: + img = add_Poisson_noise(img) + elif i == 4: + if random.random() < speckle_prob: + img = add_speckle_noise(img) + elif i == 5: + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + elif i == 6: + img = add_JPEG_noise(img) + elif i == 7: + img = add_blur(img, sf=sf) + elif i == 8: + img = add_resize(img, sf=sf) + elif i == 9: + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + elif i == 10: + if random.random() < poisson_prob: + img = add_Poisson_noise(img) + elif i == 11: + if random.random() < speckle_prob: + img = add_speckle_noise(img) + elif i == 12: + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + else: + print('check the shuffle!') + + # resize to desired size + img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), + interpolation=random.choice([1, 2, 3])) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf, lq_patchsize) + + return img, hq + + +if __name__ == '__main__': + print("hey") + img = util.imread_uint('utils/test.png', 3) + print(img) + img = util.uint2single(img) + print(img) + img = img[:448, :448] + h = img.shape[0] // 4 + print("resizing to", h) + sf = 4 + deg_fn = partial(degradation_bsrgan_variant, sf=sf) + for i in range(20): + print(i) + img_lq = deg_fn(img) + print(img_lq) + img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"] + print(img_lq.shape) + print("bicubic", img_lq_bicubic.shape) + print(img_hq.shape) + lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) + util.imsave(img_concat, str(i) + '.png') + + diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/bsrgan_light.py b/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/bsrgan_light.py new file mode 100644 index 0000000..dfa7606 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/bsrgan_light.py @@ -0,0 +1,650 @@ +# -*- coding: utf-8 -*- +import numpy as np +import cv2 +import torch + +from functools import partial +import random +from scipy import ndimage +import scipy +import scipy.stats as ss +from scipy.interpolate import interp2d +from scipy.linalg import orth +import albumentations + +import ldm.modules.image_degradation.utils_image as util + +""" +# -------------------------------------------- +# Super-Resolution +# -------------------------------------------- +# +# Kai Zhang (cskaizhang@gmail.com) +# https://github.com/cszn +# From 2019/03--2021/08 +# -------------------------------------------- +""" + + +def modcrop_np(img, sf): + ''' + Args: + img: numpy image, WxH or WxHxC + sf: scale factor + Return: + cropped image + ''' + w, h = img.shape[:2] + im = np.copy(img) + return im[:w - w % sf, :h - h % sf, ...] + + +""" +# -------------------------------------------- +# anisotropic Gaussian kernels +# -------------------------------------------- +""" + + +def analytic_kernel(k): + """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" + k_size = k.shape[0] + # Calculate the big kernels size + big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) + # Loop over the small kernel to fill the big one + for r in range(k_size): + for c in range(k_size): + big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k + # Crop the edges of the big kernel to ignore very small values and increase run time of SR + crop = k_size // 2 + cropped_big_k = big_k[crop:-crop, crop:-crop] + # Normalize to 1 + return cropped_big_k / cropped_big_k.sum() + + +def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): + """ generate an anisotropic Gaussian kernel + Args: + ksize : e.g., 15, kernel size + theta : [0, pi], rotation angle range + l1 : [0.1,50], scaling of eigenvalues + l2 : [0.1,l1], scaling of eigenvalues + If l1 = l2, will get an isotropic Gaussian kernel. + Returns: + k : kernel + """ + + v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) + V = np.array([[v[0], v[1]], [v[1], -v[0]]]) + D = np.array([[l1, 0], [0, l2]]) + Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) + k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) + + return k + + +def gm_blur_kernel(mean, cov, size=15): + center = size / 2.0 + 0.5 + k = np.zeros([size, size]) + for y in range(size): + for x in range(size): + cy = y - center + 1 + cx = x - center + 1 + k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) + + k = k / np.sum(k) + return k + + +def shift_pixel(x, sf, upper_left=True): + """shift pixel for super-resolution with different scale factors + Args: + x: WxHxC or WxH + sf: scale factor + upper_left: shift direction + """ + h, w = x.shape[:2] + shift = (sf - 1) * 0.5 + xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) + if upper_left: + x1 = xv + shift + y1 = yv + shift + else: + x1 = xv - shift + y1 = yv - shift + + x1 = np.clip(x1, 0, w - 1) + y1 = np.clip(y1, 0, h - 1) + + if x.ndim == 2: + x = interp2d(xv, yv, x)(x1, y1) + if x.ndim == 3: + for i in range(x.shape[-1]): + x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) + + return x + + +def blur(x, k): + ''' + x: image, NxcxHxW + k: kernel, Nx1xhxw + ''' + n, c = x.shape[:2] + p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + k = k.repeat(1, c, 1, 1) + k = k.view(-1, 1, k.shape[2], k.shape[3]) + x = x.view(1, -1, x.shape[2], x.shape[3]) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) + x = x.view(n, c, x.shape[2], x.shape[3]) + + return x + + +def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): + """" + # modified version of https://github.com/assafshocher/BlindSR_dataset_generator + # Kai Zhang + # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var + # max_var = 2.5 * sf + """ + # Set random eigen-vals (lambdas) and angle (theta) for COV matrix + lambda_1 = min_var + np.random.rand() * (max_var - min_var) + lambda_2 = min_var + np.random.rand() * (max_var - min_var) + theta = np.random.rand() * np.pi # random theta + noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 + + # Set COV matrix using Lambdas and Theta + LAMBDA = np.diag([lambda_1, lambda_2]) + Q = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + SIGMA = Q @ LAMBDA @ Q.T + INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] + + # Set expectation position (shifting kernel for aligned image) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) + MU = MU[None, None, :, None] + + # Create meshgrid for Gaussian + [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) + Z = np.stack([X, Y], 2)[:, :, :, None] + + # Calcualte Gaussian for every pixel of the kernel + ZZ = Z - MU + ZZ_t = ZZ.transpose(0, 1, 3, 2) + raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) + + # shift the kernel so it will be centered + # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) + + # Normalize the kernel and return + # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) + kernel = raw_kernel / np.sum(raw_kernel) + return kernel + + +def fspecial_gaussian(hsize, sigma): + hsize = [hsize, hsize] + siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] + std = sigma + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) + arg = -(x * x + y * y) / (2 * std * std) + h = np.exp(arg) + h[h < scipy.finfo(float).eps * h.max()] = 0 + sumh = h.sum() + if sumh != 0: + h = h / sumh + return h + + +def fspecial_laplacian(alpha): + alpha = max([0, min([alpha, 1])]) + h1 = alpha / (alpha + 1) + h2 = (1 - alpha) / (alpha + 1) + h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] + h = np.array(h) + return h + + +def fspecial(filter_type, *args, **kwargs): + ''' + python code from: + https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py + ''' + if filter_type == 'gaussian': + return fspecial_gaussian(*args, **kwargs) + if filter_type == 'laplacian': + return fspecial_laplacian(*args, **kwargs) + + +""" +# -------------------------------------------- +# degradation models +# -------------------------------------------- +""" + + +def bicubic_degradation(x, sf=3): + ''' + Args: + x: HxWxC image, [0, 1] + sf: down-scale factor + Return: + bicubicly downsampled LR image + ''' + x = util.imresize_np(x, scale=1 / sf) + return x + + +def srmd_degradation(x, k, sf=3): + ''' blur + bicubic downsampling + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2018learning, + title={Learning a single convolutional super-resolution network for multiple degradations}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={3262--3271}, + year={2018} + } + ''' + x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' + x = bicubic_degradation(x, sf=sf) + return x + + +def dpsr_degradation(x, k, sf=3): + ''' bicubic downsampling + blur + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2019deep, + title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={1671--1681}, + year={2019} + } + ''' + x = bicubic_degradation(x, sf=sf) + x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + return x + + +def classical_degradation(x, k, sf=3): + ''' blur + downsampling + Args: + x: HxWxC image, [0, 1]/[0, 255] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + ''' + x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) + st = 0 + return x[st::sf, st::sf, ...] + + +def add_sharpening(img, weight=0.5, radius=50, threshold=10): + """USM sharpening. borrowed from real-ESRGAN + Input image: I; Blurry image: B. + 1. K = I + weight * (I - B) + 2. Mask = 1 if abs(I - B) > threshold, else: 0 + 3. Blur mask: + 4. Out = Mask * K + (1 - Mask) * I + Args: + img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. + weight (float): Sharp weight. Default: 1. + radius (float): Kernel size of Gaussian blur. Default: 50. + threshold (int): + """ + if radius % 2 == 0: + radius += 1 + blur = cv2.GaussianBlur(img, (radius, radius), 0) + residual = img - blur + mask = np.abs(residual) * 255 > threshold + mask = mask.astype('float32') + soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) + + K = img + weight * residual + K = np.clip(K, 0, 1) + return soft_mask * K + (1 - soft_mask) * img + + +def add_blur(img, sf=4): + wd2 = 4.0 + sf + wd = 2.0 + 0.2 * sf + + wd2 = wd2/4 + wd = wd/4 + + if random.random() < 0.5: + l1 = wd2 * random.random() + l2 = wd2 * random.random() + k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) + else: + k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) + img = ndimage.convolve(img, np.expand_dims(k, axis=2), mode='mirror') + + return img + + +def add_resize(img, sf=4): + rnum = np.random.rand() + if rnum > 0.8: # up + sf1 = random.uniform(1, 2) + elif rnum < 0.7: # down + sf1 = random.uniform(0.5 / sf, 1) + else: + sf1 = 1.0 + img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + return img + + +# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): +# noise_level = random.randint(noise_level1, noise_level2) +# rnum = np.random.rand() +# if rnum > 0.6: # add color Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) +# elif rnum < 0.4: # add grayscale Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) +# else: # add noise +# L = noise_level2 / 255. +# D = np.diag(np.random.rand(3)) +# U = orth(np.random.rand(3, 3)) +# conv = np.dot(np.dot(np.transpose(U), D), U) +# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) +# img = np.clip(img, 0.0, 1.0) +# return img + +def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + rnum = np.random.rand() + if rnum > 0.6: # add color Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: # add grayscale Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: # add noise + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_speckle_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + img = np.clip(img, 0.0, 1.0) + rnum = random.random() + if rnum > 0.6: + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: + img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_Poisson_noise(img): + img = np.clip((img * 255.0).round(), 0, 255) / 255. + vals = 10 ** (2 * random.random() + 2.0) # [2, 4] + if random.random() < 0.5: + img = np.random.poisson(img * vals).astype(np.float32) / vals + else: + img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) + img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. + noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray + img += noise_gray[:, :, np.newaxis] + img = np.clip(img, 0.0, 1.0) + return img + + +def add_JPEG_noise(img): + quality_factor = random.randint(80, 95) + img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) + result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) + img = cv2.imdecode(encimg, 1) + img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) + return img + + +def random_crop(lq, hq, sf=4, lq_patchsize=64): + h, w = lq.shape[:2] + rnd_h = random.randint(0, h - lq_patchsize) + rnd_w = random.randint(0, w - lq_patchsize) + lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] + + rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) + hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] + return lq, hq + + +def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + hq = img.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + img = util.imresize_np(img, 1 / 2, True) + img = np.clip(img, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + img = add_blur(img, sf=sf) + + elif i == 1: + img = add_blur(img, sf=sf) + + elif i == 2: + a, b = img.shape[1], img.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + img = ndimage.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') + img = img[0::sf, 0::sf, ...] # nearest downsampling + img = np.clip(img, 0.0, 1.0) + + elif i == 3: + # downsample3 + img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + img = add_JPEG_noise(img) + + elif i == 6: + # add processed camera sensor noise + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf_ori, lq_patchsize) + + return img, hq + + +# todo no isp_model? +def degradation_bsrgan_variant(image, sf=4, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + image = util.uint2single(image) + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = image.shape[:2] + image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = image.shape[:2] + + hq = image.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + image = util.imresize_np(image, 1 / 2, True) + image = np.clip(image, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + image = add_blur(image, sf=sf) + + # elif i == 1: + # image = add_blur(image, sf=sf) + + if i == 0: + pass + + elif i == 2: + a, b = image.shape[1], image.shape[0] + # downsample2 + if random.random() < 0.8: + sf1 = random.uniform(1, 2 * sf) + image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + image = ndimage.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') + image = image[0::sf, 0::sf, ...] # nearest downsampling + + image = np.clip(image, 0.0, 1.0) + + elif i == 3: + # downsample3 + image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + image = np.clip(image, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + image = add_JPEG_noise(image) + # + # elif i == 6: + # # add processed camera sensor noise + # if random.random() < isp_prob and isp_model is not None: + # with torch.no_grad(): + # img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + image = add_JPEG_noise(image) + image = util.single2uint(image) + example = {"image": image} + return example + + + + +if __name__ == '__main__': + print("hey") + img = util.imread_uint('utils/test.png', 3) + img = img[:448, :448] + h = img.shape[0] // 4 + print("resizing to", h) + sf = 4 + deg_fn = partial(degradation_bsrgan_variant, sf=sf) + for i in range(20): + print(i) + img_hq = img + img_lq = deg_fn(img)["image"] + img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) + print(img_lq) + img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] + print(img_lq.shape) + print("bicubic", img_lq_bicubic.shape) + print(img_hq.shape) + lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), + (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) + util.imsave(img_concat, str(i) + '.png') diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/utils/test.png b/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/utils/test.png new file mode 100644 index 0000000..4249b43 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/utils/test.png differ diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/utils_image.py b/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/utils_image.py new file mode 100644 index 0000000..0175f15 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/image_degradation/utils_image.py @@ -0,0 +1,916 @@ +import os +import math +import random +import numpy as np +import torch +import cv2 +from torchvision.utils import make_grid +from datetime import datetime +#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py + + +os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" + + +''' +# -------------------------------------------- +# Kai Zhang (github: https://github.com/cszn) +# 03/Mar/2019 +# -------------------------------------------- +# https://github.com/twhui/SRGAN-pyTorch +# https://github.com/xinntao/BasicSR +# -------------------------------------------- +''' + + +IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif'] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def get_timestamp(): + return datetime.now().strftime('%y%m%d-%H%M%S') + + +def imshow(x, title=None, cbar=False, figsize=None): + plt.figure(figsize=figsize) + plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray') + if title: + plt.title(title) + if cbar: + plt.colorbar() + plt.show() + + +def surf(Z, cmap='rainbow', figsize=None): + plt.figure(figsize=figsize) + ax3 = plt.axes(projection='3d') + + w, h = Z.shape[:2] + xx = np.arange(0,w,1) + yy = np.arange(0,h,1) + X, Y = np.meshgrid(xx, yy) + ax3.plot_surface(X,Y,Z,cmap=cmap) + #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap) + plt.show() + + +''' +# -------------------------------------------- +# get image pathes +# -------------------------------------------- +''' + + +def get_image_paths(dataroot): + paths = None # return None if dataroot is None + if dataroot is not None: + paths = sorted(_get_paths_from_images(dataroot)) + return paths + + +def _get_paths_from_images(path): + assert os.path.isdir(path), '{:s} is not a valid directory'.format(path) + images = [] + for dirpath, _, fnames in sorted(os.walk(path)): + for fname in sorted(fnames): + if is_image_file(fname): + img_path = os.path.join(dirpath, fname) + images.append(img_path) + assert images, '{:s} has no valid image file'.format(path) + return images + + +''' +# -------------------------------------------- +# split large images into small images +# -------------------------------------------- +''' + + +def patches_from_image(img, p_size=512, p_overlap=64, p_max=800): + w, h = img.shape[:2] + patches = [] + if w > p_max and h > p_max: + w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int)) + h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int)) + w1.append(w-p_size) + h1.append(h-p_size) +# print(w1) +# print(h1) + for i in w1: + for j in h1: + patches.append(img[i:i+p_size, j:j+p_size,:]) + else: + patches.append(img) + + return patches + + +def imssave(imgs, img_path): + """ + imgs: list, N images of size WxHxC + """ + img_name, ext = os.path.splitext(os.path.basename(img_path)) + + for i, img in enumerate(imgs): + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png') + cv2.imwrite(new_path, img) + + +def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000): + """ + split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), + and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) + will be splitted. + Args: + original_dataroot: + taget_dataroot: + p_size: size of small images + p_overlap: patch size in training is a good choice + p_max: images with smaller size than (p_max)x(p_max) keep unchanged. + """ + paths = get_image_paths(original_dataroot) + for img_path in paths: + # img_name, ext = os.path.splitext(os.path.basename(img_path)) + img = imread_uint(img_path, n_channels=n_channels) + patches = patches_from_image(img, p_size, p_overlap, p_max) + imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path))) + #if original_dataroot == taget_dataroot: + #del img_path + +''' +# -------------------------------------------- +# makedir +# -------------------------------------------- +''' + + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) + + +def mkdirs(paths): + if isinstance(paths, str): + mkdir(paths) + else: + for path in paths: + mkdir(path) + + +def mkdir_and_rename(path): + if os.path.exists(path): + new_name = path + '_archived_' + get_timestamp() + print('Path already exists. Rename it to [{:s}]'.format(new_name)) + os.rename(path, new_name) + os.makedirs(path) + + +''' +# -------------------------------------------- +# read image from path +# opencv is fast, but read BGR numpy image +# -------------------------------------------- +''' + + +# -------------------------------------------- +# get uint8 image of size HxWxn_channles (RGB) +# -------------------------------------------- +def imread_uint(path, n_channels=3): + # input: path + # output: HxWx3(RGB or GGG), or HxWx1 (G) + if n_channels == 1: + img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE + img = np.expand_dims(img, axis=2) # HxWx1 + elif n_channels == 3: + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G + if img.ndim == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG + else: + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB + return img + + +# -------------------------------------------- +# matlab's imwrite +# -------------------------------------------- +def imsave(img, img_path): + img = np.squeeze(img) + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + cv2.imwrite(img_path, img) + +def imwrite(img, img_path): + img = np.squeeze(img) + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + cv2.imwrite(img_path, img) + + + +# -------------------------------------------- +# get single image of size HxWxn_channles (BGR) +# -------------------------------------------- +def read_img(path): + # read image by cv2 + # return: Numpy float32, HWC, BGR, [0,1] + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE + img = img.astype(np.float32) / 255. + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + # some images have 4 channels + if img.shape[2] > 3: + img = img[:, :, :3] + return img + + +''' +# -------------------------------------------- +# image format conversion +# -------------------------------------------- +# numpy(single) <---> numpy(unit) +# numpy(single) <---> tensor +# numpy(unit) <---> tensor +# -------------------------------------------- +''' + + +# -------------------------------------------- +# numpy(single) [0, 1] <---> numpy(unit) +# -------------------------------------------- + + +def uint2single(img): + + return np.float32(img/255.) + + +def single2uint(img): + + return np.uint8((img.clip(0, 1)*255.).round()) + + +def uint162single(img): + + return np.float32(img/65535.) + + +def single2uint16(img): + + return np.uint16((img.clip(0, 1)*65535.).round()) + + +# -------------------------------------------- +# numpy(unit) (HxWxC or HxW) <---> tensor +# -------------------------------------------- + + +# convert uint to 4-dimensional torch tensor +def uint2tensor4(img): + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0) + + +# convert uint to 3-dimensional torch tensor +def uint2tensor3(img): + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.) + + +# convert 2/3/4-dimensional torch tensor to uint +def tensor2uint(img): + img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + return np.uint8((img*255.0).round()) + + +# -------------------------------------------- +# numpy(single) (HxWxC) <---> tensor +# -------------------------------------------- + + +# convert single (HxWxC) to 3-dimensional torch tensor +def single2tensor3(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float() + + +# convert single (HxWxC) to 4-dimensional torch tensor +def single2tensor4(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0) + + +# convert torch tensor to single +def tensor2single(img): + img = img.data.squeeze().float().cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + + return img + +# convert torch tensor to single +def tensor2single3(img): + img = img.data.squeeze().float().cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + elif img.ndim == 2: + img = np.expand_dims(img, axis=2) + return img + + +def single2tensor5(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0) + + +def single32tensor5(img): + return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0) + + +def single42tensor4(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float() + + +# from skimage.io import imread, imsave +def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): + ''' + Converts a torch Tensor into an image Numpy array of BGR channel order + Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order + Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) + ''' + tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp + tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] + n_dim = tensor.dim() + if n_dim == 4: + n_img = len(tensor) + img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() + img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR + elif n_dim == 3: + img_np = tensor.numpy() + img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR + elif n_dim == 2: + img_np = tensor.numpy() + else: + raise TypeError( + 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) + if out_type == np.uint8: + img_np = (img_np * 255.0).round() + # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. + return img_np.astype(out_type) + + +''' +# -------------------------------------------- +# Augmentation, flipe and/or rotate +# -------------------------------------------- +# The following two are enough. +# (1) augmet_img: numpy image of WxHxC or WxH +# (2) augment_img_tensor4: tensor image 1xCxWxH +# -------------------------------------------- +''' + + +def augment_img(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + if mode == 0: + return img + elif mode == 1: + return np.flipud(np.rot90(img)) + elif mode == 2: + return np.flipud(img) + elif mode == 3: + return np.rot90(img, k=3) + elif mode == 4: + return np.flipud(np.rot90(img, k=2)) + elif mode == 5: + return np.rot90(img) + elif mode == 6: + return np.rot90(img, k=2) + elif mode == 7: + return np.flipud(np.rot90(img, k=3)) + + +def augment_img_tensor4(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + if mode == 0: + return img + elif mode == 1: + return img.rot90(1, [2, 3]).flip([2]) + elif mode == 2: + return img.flip([2]) + elif mode == 3: + return img.rot90(3, [2, 3]) + elif mode == 4: + return img.rot90(2, [2, 3]).flip([2]) + elif mode == 5: + return img.rot90(1, [2, 3]) + elif mode == 6: + return img.rot90(2, [2, 3]) + elif mode == 7: + return img.rot90(3, [2, 3]).flip([2]) + + +def augment_img_tensor(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + img_size = img.size() + img_np = img.data.cpu().numpy() + if len(img_size) == 3: + img_np = np.transpose(img_np, (1, 2, 0)) + elif len(img_size) == 4: + img_np = np.transpose(img_np, (2, 3, 1, 0)) + img_np = augment_img(img_np, mode=mode) + img_tensor = torch.from_numpy(np.ascontiguousarray(img_np)) + if len(img_size) == 3: + img_tensor = img_tensor.permute(2, 0, 1) + elif len(img_size) == 4: + img_tensor = img_tensor.permute(3, 2, 0, 1) + + return img_tensor.type_as(img) + + +def augment_img_np3(img, mode=0): + if mode == 0: + return img + elif mode == 1: + return img.transpose(1, 0, 2) + elif mode == 2: + return img[::-1, :, :] + elif mode == 3: + img = img[::-1, :, :] + img = img.transpose(1, 0, 2) + return img + elif mode == 4: + return img[:, ::-1, :] + elif mode == 5: + img = img[:, ::-1, :] + img = img.transpose(1, 0, 2) + return img + elif mode == 6: + img = img[:, ::-1, :] + img = img[::-1, :, :] + return img + elif mode == 7: + img = img[:, ::-1, :] + img = img[::-1, :, :] + img = img.transpose(1, 0, 2) + return img + + +def augment_imgs(img_list, hflip=True, rot=True): + # horizontal flip OR rotate + hflip = hflip and random.random() < 0.5 + vflip = rot and random.random() < 0.5 + rot90 = rot and random.random() < 0.5 + + def _augment(img): + if hflip: + img = img[:, ::-1, :] + if vflip: + img = img[::-1, :, :] + if rot90: + img = img.transpose(1, 0, 2) + return img + + return [_augment(img) for img in img_list] + + +''' +# -------------------------------------------- +# modcrop and shave +# -------------------------------------------- +''' + + +def modcrop(img_in, scale): + # img_in: Numpy, HWC or HW + img = np.copy(img_in) + if img.ndim == 2: + H, W = img.shape + H_r, W_r = H % scale, W % scale + img = img[:H - H_r, :W - W_r] + elif img.ndim == 3: + H, W, C = img.shape + H_r, W_r = H % scale, W % scale + img = img[:H - H_r, :W - W_r, :] + else: + raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim)) + return img + + +def shave(img_in, border=0): + # img_in: Numpy, HWC or HW + img = np.copy(img_in) + h, w = img.shape[:2] + img = img[border:h-border, border:w-border] + return img + + +''' +# -------------------------------------------- +# image processing process on numpy image +# channel_convert(in_c, tar_type, img_list): +# rgb2ycbcr(img, only_y=True): +# bgr2ycbcr(img, only_y=True): +# ycbcr2rgb(img): +# -------------------------------------------- +''' + + +def rgb2ycbcr(img, only_y=True): + '''same as matlab rgb2ycbcr + only_y: only return Y channel + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + if only_y: + rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 + else: + rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], + [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def ycbcr2rgb(img): + '''same as matlab ycbcr2rgb + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], + [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def bgr2ycbcr(img, only_y=True): + '''bgr version of rgb2ycbcr + only_y: only return Y channel + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + if only_y: + rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 + else: + rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], + [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def channel_convert(in_c, tar_type, img_list): + # conversion among BGR, gray and y + if in_c == 3 and tar_type == 'gray': # BGR to gray + gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list] + return [np.expand_dims(img, axis=2) for img in gray_list] + elif in_c == 3 and tar_type == 'y': # BGR to y + y_list = [bgr2ycbcr(img, only_y=True) for img in img_list] + return [np.expand_dims(img, axis=2) for img in y_list] + elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR + return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list] + else: + return img_list + + +''' +# -------------------------------------------- +# metric, PSNR and SSIM +# -------------------------------------------- +''' + + +# -------------------------------------------- +# PSNR +# -------------------------------------------- +def calculate_psnr(img1, img2, border=0): + # img1 and img2 have range [0, 255] + #img1 = img1.squeeze() + #img2 = img2.squeeze() + if not img1.shape == img2.shape: + raise ValueError('Input images must have the same dimensions.') + h, w = img1.shape[:2] + img1 = img1[border:h-border, border:w-border] + img2 = img2[border:h-border, border:w-border] + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + mse = np.mean((img1 - img2)**2) + if mse == 0: + return float('inf') + return 20 * math.log10(255.0 / math.sqrt(mse)) + + +# -------------------------------------------- +# SSIM +# -------------------------------------------- +def calculate_ssim(img1, img2, border=0): + '''calculate SSIM + the same outputs as MATLAB's + img1, img2: [0, 255] + ''' + #img1 = img1.squeeze() + #img2 = img2.squeeze() + if not img1.shape == img2.shape: + raise ValueError('Input images must have the same dimensions.') + h, w = img1.shape[:2] + img1 = img1[border:h-border, border:w-border] + img2 = img2[border:h-border, border:w-border] + + if img1.ndim == 2: + return ssim(img1, img2) + elif img1.ndim == 3: + if img1.shape[2] == 3: + ssims = [] + for i in range(3): + ssims.append(ssim(img1[:,:,i], img2[:,:,i])) + return np.array(ssims).mean() + elif img1.shape[2] == 1: + return ssim(np.squeeze(img1), np.squeeze(img2)) + else: + raise ValueError('Wrong input image dimensions.') + + +def ssim(img1, img2): + C1 = (0.01 * 255)**2 + C2 = (0.03 * 255)**2 + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + kernel = cv2.getGaussianKernel(11, 1.5) + window = np.outer(kernel, kernel.transpose()) + + mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid + mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] + mu1_sq = mu1**2 + mu2_sq = mu2**2 + mu1_mu2 = mu1 * mu2 + sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq + sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq + sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 + + ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * + (sigma1_sq + sigma2_sq + C2)) + return ssim_map.mean() + + +''' +# -------------------------------------------- +# matlab's bicubic imresize (numpy and torch) [0, 1] +# -------------------------------------------- +''' + + +# matlab 'imresize' function, now only support 'bicubic' +def cubic(x): + absx = torch.abs(x) + absx2 = absx**2 + absx3 = absx**3 + return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \ + (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx)) + + +def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing): + if (scale < 1) and (antialiasing): + # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width + kernel_width = kernel_width / scale + + # Output-space coordinates + x = torch.linspace(1, out_length, out_length) + + # Input-space coordinates. Calculate the inverse mapping such that 0.5 + # in output space maps to 0.5 in input space, and 0.5+scale in output + # space maps to 1.5 in input space. + u = x / scale + 0.5 * (1 - 1 / scale) + + # What is the left-most pixel that can be involved in the computation? + left = torch.floor(u - kernel_width / 2) + + # What is the maximum number of pixels that can be involved in the + # computation? Note: it's OK to use an extra pixel here; if the + # corresponding weights are all zero, it will be eliminated at the end + # of this function. + P = math.ceil(kernel_width) + 2 + + # The indices of the input pixels involved in computing the k-th output + # pixel are in row k of the indices matrix. + indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view( + 1, P).expand(out_length, P) + + # The weights used to compute the k-th output pixel are in row k of the + # weights matrix. + distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices + # apply cubic kernel + if (scale < 1) and (antialiasing): + weights = scale * cubic(distance_to_center * scale) + else: + weights = cubic(distance_to_center) + # Normalize the weights matrix so that each row sums to 1. + weights_sum = torch.sum(weights, 1).view(out_length, 1) + weights = weights / weights_sum.expand(out_length, P) + + # If a column in weights is all zero, get rid of it. only consider the first and last column. + weights_zero_tmp = torch.sum((weights == 0), 0) + if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): + indices = indices.narrow(1, 1, P - 2) + weights = weights.narrow(1, 1, P - 2) + if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): + indices = indices.narrow(1, 0, P - 2) + weights = weights.narrow(1, 0, P - 2) + weights = weights.contiguous() + indices = indices.contiguous() + sym_len_s = -indices.min() + 1 + sym_len_e = indices.max() - in_length + indices = indices + sym_len_s - 1 + return weights, indices, int(sym_len_s), int(sym_len_e) + + +# -------------------------------------------- +# imresize for tensor image [0, 1] +# -------------------------------------------- +def imresize(img, scale, antialiasing=True): + # Now the scale should be the same for H and W + # input: img: pytorch tensor, CHW or HW [0,1] + # output: CHW or HW [0,1] w/o round + need_squeeze = True if img.dim() == 2 else False + if need_squeeze: + img.unsqueeze_(0) + in_C, in_H, in_W = img.size() + out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) + kernel_width = 4 + kernel = 'cubic' + + # Return the desired dimension order for performing the resize. The + # strategy is to perform the resize first along the dimension with the + # smallest scale factor. + # Now we do not support this. + + # get weights and indices + weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( + in_H, out_H, scale, kernel, kernel_width, antialiasing) + weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( + in_W, out_W, scale, kernel, kernel_width, antialiasing) + # process H dimension + # symmetric copying + img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W) + img_aug.narrow(1, sym_len_Hs, in_H).copy_(img) + + sym_patch = img[:, :sym_len_Hs, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv) + + sym_patch = img[:, -sym_len_He:, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) + + out_1 = torch.FloatTensor(in_C, out_H, in_W) + kernel_width = weights_H.size(1) + for i in range(out_H): + idx = int(indices_H[i][0]) + for j in range(out_C): + out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i]) + + # process W dimension + # symmetric copying + out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We) + out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1) + + sym_patch = out_1[:, :, :sym_len_Ws] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv) + + sym_patch = out_1[:, :, -sym_len_We:] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) + + out_2 = torch.FloatTensor(in_C, out_H, out_W) + kernel_width = weights_W.size(1) + for i in range(out_W): + idx = int(indices_W[i][0]) + for j in range(out_C): + out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i]) + if need_squeeze: + out_2.squeeze_() + return out_2 + + +# -------------------------------------------- +# imresize for numpy image [0, 1] +# -------------------------------------------- +def imresize_np(img, scale, antialiasing=True): + # Now the scale should be the same for H and W + # input: img: Numpy, HWC or HW [0,1] + # output: HWC or HW [0,1] w/o round + img = torch.from_numpy(img) + need_squeeze = True if img.dim() == 2 else False + if need_squeeze: + img.unsqueeze_(2) + + in_H, in_W, in_C = img.size() + out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) + kernel_width = 4 + kernel = 'cubic' + + # Return the desired dimension order for performing the resize. The + # strategy is to perform the resize first along the dimension with the + # smallest scale factor. + # Now we do not support this. + + # get weights and indices + weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( + in_H, out_H, scale, kernel, kernel_width, antialiasing) + weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( + in_W, out_W, scale, kernel, kernel_width, antialiasing) + # process H dimension + # symmetric copying + img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C) + img_aug.narrow(0, sym_len_Hs, in_H).copy_(img) + + sym_patch = img[:sym_len_Hs, :, :] + inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(0, inv_idx) + img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv) + + sym_patch = img[-sym_len_He:, :, :] + inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(0, inv_idx) + img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) + + out_1 = torch.FloatTensor(out_H, in_W, in_C) + kernel_width = weights_H.size(1) + for i in range(out_H): + idx = int(indices_H[i][0]) + for j in range(out_C): + out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i]) + + # process W dimension + # symmetric copying + out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C) + out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1) + + sym_patch = out_1[:, :sym_len_Ws, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv) + + sym_patch = out_1[:, -sym_len_We:, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) + + out_2 = torch.FloatTensor(out_H, out_W, in_C) + kernel_width = weights_W.size(1) + for i in range(out_W): + idx = int(indices_W[i][0]) + for j in range(out_C): + out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i]) + if need_squeeze: + out_2.squeeze_() + + return out_2.numpy() + + +if __name__ == '__main__': + print('---') +# img = imread_uint('test.bmp', 3) +# img = uint2single(img) +# img_bicubic = imresize_np(img, 1/4) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/losses/__init__.py b/stable-dreamfusion-3DPortrait/ldm/modules/losses/__init__.py new file mode 100644 index 0000000..876d7c5 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/losses/__init__.py @@ -0,0 +1 @@ +from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/losses/contperceptual.py b/stable-dreamfusion-3DPortrait/ldm/modules/losses/contperceptual.py new file mode 100644 index 0000000..672c1e3 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/losses/contperceptual.py @@ -0,0 +1,111 @@ +import torch +import torch.nn as nn + +from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no? + + +class LPIPSWithDiscriminator(nn.Module): + def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, + disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, + perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, + disc_loss="hinge"): + + super().__init__() + assert disc_loss in ["hinge", "vanilla"] + self.kl_weight = kl_weight + self.pixel_weight = pixelloss_weight + self.perceptual_loss = LPIPS().eval() + self.perceptual_weight = perceptual_weight + # output log variance + self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) + + self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, + n_layers=disc_num_layers, + use_actnorm=use_actnorm + ).apply(weights_init) + self.discriminator_iter_start = disc_start + self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss + self.disc_factor = disc_factor + self.discriminator_weight = disc_weight + self.disc_conditional = disc_conditional + + def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): + if last_layer is not None: + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] + + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() + d_weight = d_weight * self.discriminator_weight + return d_weight + + def forward(self, inputs, reconstructions, posteriors, optimizer_idx, + global_step, last_layer=None, cond=None, split="train", + weights=None): + rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) + if self.perceptual_weight > 0: + p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) + rec_loss = rec_loss + self.perceptual_weight * p_loss + + nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar + weighted_nll_loss = nll_loss + if weights is not None: + weighted_nll_loss = weights*nll_loss + weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] + nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] + kl_loss = posteriors.kl() + kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] + + # now the GAN part + if optimizer_idx == 0: + # generator update + if cond is None: + assert not self.disc_conditional + logits_fake = self.discriminator(reconstructions.contiguous()) + else: + assert self.disc_conditional + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) + g_loss = -torch.mean(logits_fake) + + if self.disc_factor > 0.0: + try: + d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) + except RuntimeError: + assert not self.training + d_weight = torch.tensor(0.0) + else: + d_weight = torch.tensor(0.0) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss + + log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(), + "{}/kl_loss".format(split): kl_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), + } + return loss, log + + if optimizer_idx == 1: + # second pass for discriminator update + if cond is None: + logits_real = self.discriminator(inputs.contiguous().detach()) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) + else: + logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) + + log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean() + } + return d_loss, log + diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/losses/vqperceptual.py b/stable-dreamfusion-3DPortrait/ldm/modules/losses/vqperceptual.py new file mode 100644 index 0000000..f699817 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/losses/vqperceptual.py @@ -0,0 +1,167 @@ +import torch +from torch import nn +import torch.nn.functional as F +from einops import repeat + +from taming.modules.discriminator.model import NLayerDiscriminator, weights_init +from taming.modules.losses.lpips import LPIPS +from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss + + +def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights): + assert weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0] + loss_real = torch.mean(F.relu(1. - logits_real), dim=[1,2,3]) + loss_fake = torch.mean(F.relu(1. + logits_fake), dim=[1,2,3]) + loss_real = (weights * loss_real).sum() / weights.sum() + loss_fake = (weights * loss_fake).sum() / weights.sum() + d_loss = 0.5 * (loss_real + loss_fake) + return d_loss + +def adopt_weight(weight, global_step, threshold=0, value=0.): + if global_step < threshold: + weight = value + return weight + + +def measure_perplexity(predicted_indices, n_embed): + # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py + # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally + encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed) + avg_probs = encodings.mean(0) + perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp() + cluster_use = torch.sum(avg_probs > 0) + return perplexity, cluster_use + +def l1(x, y): + return torch.abs(x-y) + + +def l2(x, y): + return torch.pow((x-y), 2) + + +class VQLPIPSWithDiscriminator(nn.Module): + def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, + disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, + perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, + disc_ndf=64, disc_loss="hinge", n_classes=None, perceptual_loss="lpips", + pixel_loss="l1"): + super().__init__() + assert disc_loss in ["hinge", "vanilla"] + assert perceptual_loss in ["lpips", "clips", "dists"] + assert pixel_loss in ["l1", "l2"] + self.codebook_weight = codebook_weight + self.pixel_weight = pixelloss_weight + if perceptual_loss == "lpips": + print(f"{self.__class__.__name__}: Running with LPIPS.") + self.perceptual_loss = LPIPS().eval() + else: + raise ValueError(f"Unknown perceptual loss: >> {perceptual_loss} <<") + self.perceptual_weight = perceptual_weight + + if pixel_loss == "l1": + self.pixel_loss = l1 + else: + self.pixel_loss = l2 + + self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, + n_layers=disc_num_layers, + use_actnorm=use_actnorm, + ndf=disc_ndf + ).apply(weights_init) + self.discriminator_iter_start = disc_start + if disc_loss == "hinge": + self.disc_loss = hinge_d_loss + elif disc_loss == "vanilla": + self.disc_loss = vanilla_d_loss + else: + raise ValueError(f"Unknown GAN loss '{disc_loss}'.") + print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.") + self.disc_factor = disc_factor + self.discriminator_weight = disc_weight + self.disc_conditional = disc_conditional + self.n_classes = n_classes + + def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): + if last_layer is not None: + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] + + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() + d_weight = d_weight * self.discriminator_weight + return d_weight + + def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, + global_step, last_layer=None, cond=None, split="train", predicted_indices=None): + if not exists(codebook_loss): + codebook_loss = torch.tensor([0.]).to(inputs.device) + #rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) + rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous()) + if self.perceptual_weight > 0: + p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) + rec_loss = rec_loss + self.perceptual_weight * p_loss + else: + p_loss = torch.tensor([0.0]) + + nll_loss = rec_loss + #nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] + nll_loss = torch.mean(nll_loss) + + # now the GAN part + if optimizer_idx == 0: + # generator update + if cond is None: + assert not self.disc_conditional + logits_fake = self.discriminator(reconstructions.contiguous()) + else: + assert self.disc_conditional + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) + g_loss = -torch.mean(logits_fake) + + try: + d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) + except RuntimeError: + assert not self.training + d_weight = torch.tensor(0.0) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean() + + log = {"{}/total_loss".format(split): loss.clone().detach().mean(), + "{}/quant_loss".format(split): codebook_loss.detach().mean(), + "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/p_loss".format(split): p_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), + } + if predicted_indices is not None: + assert self.n_classes is not None + with torch.no_grad(): + perplexity, cluster_usage = measure_perplexity(predicted_indices, self.n_classes) + log[f"{split}/perplexity"] = perplexity + log[f"{split}/cluster_usage"] = cluster_usage + return loss, log + + if optimizer_idx == 1: + # second pass for discriminator update + if cond is None: + logits_real = self.discriminator(inputs.contiguous().detach()) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) + else: + logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) + + log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean() + } + return d_loss, log diff --git a/stable-dreamfusion-3DPortrait/ldm/modules/x_transformer.py b/stable-dreamfusion-3DPortrait/ldm/modules/x_transformer.py new file mode 100644 index 0000000..5fc15bf --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/modules/x_transformer.py @@ -0,0 +1,641 @@ +"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" +import torch +from torch import nn, einsum +import torch.nn.functional as F +from functools import partial +from inspect import isfunction +from collections import namedtuple +from einops import rearrange, repeat, reduce + +# constants + +DEFAULT_DIM_HEAD = 64 + +Intermediates = namedtuple('Intermediates', [ + 'pre_softmax_attn', + 'post_softmax_attn' +]) + +LayerIntermediates = namedtuple('Intermediates', [ + 'hiddens', + 'attn_intermediates' +]) + + +class AbsolutePositionalEmbedding(nn.Module): + def __init__(self, dim, max_seq_len): + super().__init__() + self.emb = nn.Embedding(max_seq_len, dim) + self.init_() + + def init_(self): + nn.init.normal_(self.emb.weight, std=0.02) + + def forward(self, x): + n = torch.arange(x.shape[1], device=x.device) + return self.emb(n)[None, :, :] + + +class FixedPositionalEmbedding(nn.Module): + def __init__(self, dim): + super().__init__() + inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) + self.register_buffer('inv_freq', inv_freq) + + def forward(self, x, seq_dim=1, offset=0): + t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset + sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) + emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) + return emb[None, :, :] + + +# helpers + +def exists(val): + return val is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def always(val): + def inner(*args, **kwargs): + return val + return inner + + +def not_equals(val): + def inner(x): + return x != val + return inner + + +def equals(val): + def inner(x): + return x == val + return inner + + +def max_neg_value(tensor): + return -torch.finfo(tensor.dtype).max + + +# keyword argument helpers + +def pick_and_pop(keys, d): + values = list(map(lambda key: d.pop(key), keys)) + return dict(zip(keys, values)) + + +def group_dict_by_key(cond, d): + return_val = [dict(), dict()] + for key in d.keys(): + match = bool(cond(key)) + ind = int(not match) + return_val[ind][key] = d[key] + return (*return_val,) + + +def string_begins_with(prefix, str): + return str.startswith(prefix) + + +def group_by_key_prefix(prefix, d): + return group_dict_by_key(partial(string_begins_with, prefix), d) + + +def groupby_prefix_and_trim(prefix, d): + kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) + kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) + return kwargs_without_prefix, kwargs + + +# classes +class Scale(nn.Module): + def __init__(self, value, fn): + super().__init__() + self.value = value + self.fn = fn + + def forward(self, x, **kwargs): + x, *rest = self.fn(x, **kwargs) + return (x * self.value, *rest) + + +class Rezero(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + self.g = nn.Parameter(torch.zeros(1)) + + def forward(self, x, **kwargs): + x, *rest = self.fn(x, **kwargs) + return (x * self.g, *rest) + + +class ScaleNorm(nn.Module): + def __init__(self, dim, eps=1e-5): + super().__init__() + self.scale = dim ** -0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(1)) + + def forward(self, x): + norm = torch.norm(x, dim=-1, keepdim=True) * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class RMSNorm(nn.Module): + def __init__(self, dim, eps=1e-8): + super().__init__() + self.scale = dim ** -0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + norm = torch.norm(x, dim=-1, keepdim=True) * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class Residual(nn.Module): + def forward(self, x, residual): + return x + residual + + +class GRUGating(nn.Module): + def __init__(self, dim): + super().__init__() + self.gru = nn.GRUCell(dim, dim) + + def forward(self, x, residual): + gated_output = self.gru( + rearrange(x, 'b n d -> (b n) d'), + rearrange(residual, 'b n d -> (b n) d') + ) + + return gated_output.reshape_as(x) + + +# feedforward + +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = nn.Sequential( + nn.Linear(dim, inner_dim), + nn.GELU() + ) if not glu else GEGLU(dim, inner_dim) + + self.net = nn.Sequential( + project_in, + nn.Dropout(dropout), + nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +# attention. +class Attention(nn.Module): + def __init__( + self, + dim, + dim_head=DEFAULT_DIM_HEAD, + heads=8, + causal=False, + mask=None, + talking_heads=False, + sparse_topk=None, + use_entmax15=False, + num_mem_kv=0, + dropout=0., + on_attn=False + ): + super().__init__() + if use_entmax15: + raise NotImplementedError("Check out entmax activation instead of softmax activation!") + self.scale = dim_head ** -0.5 + self.heads = heads + self.causal = causal + self.mask = mask + + inner_dim = dim_head * heads + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_k = nn.Linear(dim, inner_dim, bias=False) + self.to_v = nn.Linear(dim, inner_dim, bias=False) + self.dropout = nn.Dropout(dropout) + + # talking heads + self.talking_heads = talking_heads + if talking_heads: + self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) + self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) + + # explicit topk sparse attention + self.sparse_topk = sparse_topk + + # entmax + #self.attn_fn = entmax15 if use_entmax15 else F.softmax + self.attn_fn = F.softmax + + # add memory key / values + self.num_mem_kv = num_mem_kv + if num_mem_kv > 0: + self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) + self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) + + # attention on attention + self.attn_on_attn = on_attn + self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim) + + def forward( + self, + x, + context=None, + mask=None, + context_mask=None, + rel_pos=None, + sinusoidal_emb=None, + prev_attn=None, + mem=None + ): + b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device + kv_input = default(context, x) + + q_input = x + k_input = kv_input + v_input = kv_input + + if exists(mem): + k_input = torch.cat((mem, k_input), dim=-2) + v_input = torch.cat((mem, v_input), dim=-2) + + if exists(sinusoidal_emb): + # in shortformer, the query would start at a position offset depending on the past cached memory + offset = k_input.shape[-2] - q_input.shape[-2] + q_input = q_input + sinusoidal_emb(q_input, offset=offset) + k_input = k_input + sinusoidal_emb(k_input) + + q = self.to_q(q_input) + k = self.to_k(k_input) + v = self.to_v(v_input) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) + + input_mask = None + if any(map(exists, (mask, context_mask))): + q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) + k_mask = q_mask if not exists(context) else context_mask + k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) + q_mask = rearrange(q_mask, 'b i -> b () i ()') + k_mask = rearrange(k_mask, 'b j -> b () () j') + input_mask = q_mask * k_mask + + if self.num_mem_kv > 0: + mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) + k = torch.cat((mem_k, k), dim=-2) + v = torch.cat((mem_v, v), dim=-2) + if exists(input_mask): + input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) + + dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale + mask_value = max_neg_value(dots) + + if exists(prev_attn): + dots = dots + prev_attn + + pre_softmax_attn = dots + + if talking_heads: + dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() + + if exists(rel_pos): + dots = rel_pos(dots) + + if exists(input_mask): + dots.masked_fill_(~input_mask, mask_value) + del input_mask + + if self.causal: + i, j = dots.shape[-2:] + r = torch.arange(i, device=device) + mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') + mask = F.pad(mask, (j - i, 0), value=False) + dots.masked_fill_(mask, mask_value) + del mask + + if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: + top, _ = dots.topk(self.sparse_topk, dim=-1) + vk = top[..., -1].unsqueeze(-1).expand_as(dots) + mask = dots < vk + dots.masked_fill_(mask, mask_value) + del mask + + attn = self.attn_fn(dots, dim=-1) + post_softmax_attn = attn + + attn = self.dropout(attn) + + if talking_heads: + attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() + + out = einsum('b h i j, b h j d -> b h i d', attn, v) + out = rearrange(out, 'b h n d -> b n (h d)') + + intermediates = Intermediates( + pre_softmax_attn=pre_softmax_attn, + post_softmax_attn=post_softmax_attn + ) + + return self.to_out(out), intermediates + + +class AttentionLayers(nn.Module): + def __init__( + self, + dim, + depth, + heads=8, + causal=False, + cross_attend=False, + only_cross=False, + use_scalenorm=False, + use_rmsnorm=False, + use_rezero=False, + rel_pos_num_buckets=32, + rel_pos_max_distance=128, + position_infused_attn=False, + custom_layers=None, + sandwich_coef=None, + par_ratio=None, + residual_attn=False, + cross_residual_attn=False, + macaron=False, + pre_norm=True, + gate_residual=False, + **kwargs + ): + super().__init__() + ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) + attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) + + dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) + + self.dim = dim + self.depth = depth + self.layers = nn.ModuleList([]) + + self.has_pos_emb = position_infused_attn + self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None + self.rotary_pos_emb = always(None) + + assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' + self.rel_pos = None + + self.pre_norm = pre_norm + + self.residual_attn = residual_attn + self.cross_residual_attn = cross_residual_attn + + norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm + norm_class = RMSNorm if use_rmsnorm else norm_class + norm_fn = partial(norm_class, dim) + + norm_fn = nn.Identity if use_rezero else norm_fn + branch_fn = Rezero if use_rezero else None + + if cross_attend and not only_cross: + default_block = ('a', 'c', 'f') + elif cross_attend and only_cross: + default_block = ('c', 'f') + else: + default_block = ('a', 'f') + + if macaron: + default_block = ('f',) + default_block + + if exists(custom_layers): + layer_types = custom_layers + elif exists(par_ratio): + par_depth = depth * len(default_block) + assert 1 < par_ratio <= par_depth, 'par ratio out of range' + default_block = tuple(filter(not_equals('f'), default_block)) + par_attn = par_depth // par_ratio + depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper + par_width = (depth_cut + depth_cut // par_attn) // par_attn + assert len(default_block) <= par_width, 'default block is too large for par_ratio' + par_block = default_block + ('f',) * (par_width - len(default_block)) + par_head = par_block * par_attn + layer_types = par_head + ('f',) * (par_depth - len(par_head)) + elif exists(sandwich_coef): + assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' + layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef + else: + layer_types = default_block * depth + + self.layer_types = layer_types + self.num_attn_layers = len(list(filter(equals('a'), layer_types))) + + for layer_type in self.layer_types: + if layer_type == 'a': + layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) + elif layer_type == 'c': + layer = Attention(dim, heads=heads, **attn_kwargs) + elif layer_type == 'f': + layer = FeedForward(dim, **ff_kwargs) + layer = layer if not macaron else Scale(0.5, layer) + else: + raise Exception(f'invalid layer type {layer_type}') + + if isinstance(layer, Attention) and exists(branch_fn): + layer = branch_fn(layer) + + if gate_residual: + residual_fn = GRUGating(dim) + else: + residual_fn = Residual() + + self.layers.append(nn.ModuleList([ + norm_fn(), + layer, + residual_fn + ])) + + def forward( + self, + x, + context=None, + mask=None, + context_mask=None, + mems=None, + return_hiddens=False + ): + hiddens = [] + intermediates = [] + prev_attn = None + prev_cross_attn = None + + mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers + + for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): + is_last = ind == (len(self.layers) - 1) + + if layer_type == 'a': + hiddens.append(x) + layer_mem = mems.pop(0) + + residual = x + + if self.pre_norm: + x = norm(x) + + if layer_type == 'a': + out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos, + prev_attn=prev_attn, mem=layer_mem) + elif layer_type == 'c': + out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn) + elif layer_type == 'f': + out = block(x) + + x = residual_fn(out, residual) + + if layer_type in ('a', 'c'): + intermediates.append(inter) + + if layer_type == 'a' and self.residual_attn: + prev_attn = inter.pre_softmax_attn + elif layer_type == 'c' and self.cross_residual_attn: + prev_cross_attn = inter.pre_softmax_attn + + if not self.pre_norm and not is_last: + x = norm(x) + + if return_hiddens: + intermediates = LayerIntermediates( + hiddens=hiddens, + attn_intermediates=intermediates + ) + + return x, intermediates + + return x + + +class Encoder(AttentionLayers): + def __init__(self, **kwargs): + assert 'causal' not in kwargs, 'cannot set causality on encoder' + super().__init__(causal=False, **kwargs) + + + +class TransformerWrapper(nn.Module): + def __init__( + self, + *, + num_tokens, + max_seq_len, + attn_layers, + emb_dim=None, + max_mem_len=0., + emb_dropout=0., + num_memory_tokens=None, + tie_embedding=False, + use_pos_emb=True + ): + super().__init__() + assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' + + dim = attn_layers.dim + emb_dim = default(emb_dim, dim) + + self.max_seq_len = max_seq_len + self.max_mem_len = max_mem_len + self.num_tokens = num_tokens + + self.token_emb = nn.Embedding(num_tokens, emb_dim) + self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( + use_pos_emb and not attn_layers.has_pos_emb) else always(0) + self.emb_dropout = nn.Dropout(emb_dropout) + + self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() + self.attn_layers = attn_layers + self.norm = nn.LayerNorm(dim) + + self.init_() + + self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() + + # memory tokens (like [cls]) from Memory Transformers paper + num_memory_tokens = default(num_memory_tokens, 0) + self.num_memory_tokens = num_memory_tokens + if num_memory_tokens > 0: + self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) + + # let funnel encoder know number of memory tokens, if specified + if hasattr(attn_layers, 'num_memory_tokens'): + attn_layers.num_memory_tokens = num_memory_tokens + + def init_(self): + nn.init.normal_(self.token_emb.weight, std=0.02) + + def forward( + self, + x, + return_embeddings=False, + mask=None, + return_mems=False, + return_attn=False, + mems=None, + **kwargs + ): + b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens + x = self.token_emb(x) + x += self.pos_emb(x) + x = self.emb_dropout(x) + + x = self.project_emb(x) + + if num_mem > 0: + mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) + x = torch.cat((mem, x), dim=1) + + # auto-handle masking after appending memory tokens + if exists(mask): + mask = F.pad(mask, (num_mem, 0), value=True) + + x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) + x = self.norm(x) + + mem, x = x[:, :num_mem], x[:, num_mem:] + + out = self.to_logits(x) if not return_embeddings else x + + if return_mems: + hiddens = intermediates.hiddens + new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens + new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) + return out, new_mems + + if return_attn: + attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) + return out, attn_maps + + return out + diff --git a/stable-dreamfusion-3DPortrait/ldm/thirdp/psp/helpers.py b/stable-dreamfusion-3DPortrait/ldm/thirdp/psp/helpers.py new file mode 100644 index 0000000..983baaa --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/thirdp/psp/helpers.py @@ -0,0 +1,121 @@ +# https://github.com/eladrich/pixel2style2pixel + +from collections import namedtuple +import torch +from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module + +""" +ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) +""" + + +class Flatten(Module): + def forward(self, input): + return input.view(input.size(0), -1) + + +def l2_norm(input, axis=1): + norm = torch.norm(input, 2, axis, True) + output = torch.div(input, norm) + return output + + +class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): + """ A named tuple describing a ResNet block. """ + + +def get_block(in_channel, depth, num_units, stride=2): + return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] + + +def get_blocks(num_layers): + if num_layers == 50: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=4), + get_block(in_channel=128, depth=256, num_units=14), + get_block(in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 100: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=13), + get_block(in_channel=128, depth=256, num_units=30), + get_block(in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 152: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=8), + get_block(in_channel=128, depth=256, num_units=36), + get_block(in_channel=256, depth=512, num_units=3) + ] + else: + raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers)) + return blocks + + +class SEModule(Module): + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.avg_pool = AdaptiveAvgPool2d(1) + self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) + self.relu = ReLU(inplace=True) + self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) + self.sigmoid = Sigmoid() + + def forward(self, x): + module_input = x + x = self.avg_pool(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + return module_input * x + + +class bottleneck_IR(Module): + def __init__(self, in_channel, depth, stride): + super(bottleneck_IR, self).__init__() + if in_channel == depth: + self.shortcut_layer = MaxPool2d(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2d(in_channel, depth, (1, 1), stride, bias=False), + BatchNorm2d(depth) + ) + self.res_layer = Sequential( + BatchNorm2d(in_channel), + Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), + Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth) + ) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + return res + shortcut + + +class bottleneck_IR_SE(Module): + def __init__(self, in_channel, depth, stride): + super(bottleneck_IR_SE, self).__init__() + if in_channel == depth: + self.shortcut_layer = MaxPool2d(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2d(in_channel, depth, (1, 1), stride, bias=False), + BatchNorm2d(depth) + ) + self.res_layer = Sequential( + BatchNorm2d(in_channel), + Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), + PReLU(depth), + Conv2d(depth, depth, (3, 3), stride, 1, bias=False), + BatchNorm2d(depth), + SEModule(depth, 16) + ) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + return res + shortcut \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/ldm/thirdp/psp/id_loss.py b/stable-dreamfusion-3DPortrait/ldm/thirdp/psp/id_loss.py new file mode 100644 index 0000000..e08ee09 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/thirdp/psp/id_loss.py @@ -0,0 +1,23 @@ +# https://github.com/eladrich/pixel2style2pixel +import torch +from torch import nn +from ldm.thirdp.psp.model_irse import Backbone + + +class IDFeatures(nn.Module): + def __init__(self, model_path): + super(IDFeatures, self).__init__() + print('Loading ResNet ArcFace') + self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se') + self.facenet.load_state_dict(torch.load(model_path, map_location="cpu")) + self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112)) + self.facenet.eval() + + def forward(self, x, crop=False): + # Not sure of the image range here + if crop: + x = torch.nn.functional.interpolate(x, (256, 256), mode="area") + x = x[:, :, 35:223, 32:220] + x = self.face_pool(x) + x_feats = self.facenet(x) + return x_feats diff --git a/stable-dreamfusion-3DPortrait/ldm/thirdp/psp/model_irse.py b/stable-dreamfusion-3DPortrait/ldm/thirdp/psp/model_irse.py new file mode 100644 index 0000000..21cedd2 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/thirdp/psp/model_irse.py @@ -0,0 +1,86 @@ +# https://github.com/eladrich/pixel2style2pixel + +from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module +from ldm.thirdp.psp.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm + +""" +Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) +""" + + +class Backbone(Module): + def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True): + super(Backbone, self).__init__() + assert input_size in [112, 224], "input_size should be 112 or 224" + assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" + assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" + blocks = get_blocks(num_layers) + if mode == 'ir': + unit_module = bottleneck_IR + elif mode == 'ir_se': + unit_module = bottleneck_IR_SE + self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), + BatchNorm2d(64), + PReLU(64)) + if input_size == 112: + self.output_layer = Sequential(BatchNorm2d(512), + Dropout(drop_ratio), + Flatten(), + Linear(512 * 7 * 7, 512), + BatchNorm1d(512, affine=affine)) + else: + self.output_layer = Sequential(BatchNorm2d(512), + Dropout(drop_ratio), + Flatten(), + Linear(512 * 14 * 14, 512), + BatchNorm1d(512, affine=affine)) + + modules = [] + for block in blocks: + for bottleneck in block: + modules.append(unit_module(bottleneck.in_channel, + bottleneck.depth, + bottleneck.stride)) + self.body = Sequential(*modules) + + def forward(self, x): + x = self.input_layer(x) + x = self.body(x) + x = self.output_layer(x) + return l2_norm(x) + + +def IR_50(input_size): + """Constructs a ir-50 model.""" + model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False) + return model + + +def IR_101(input_size): + """Constructs a ir-101 model.""" + model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False) + return model + + +def IR_152(input_size): + """Constructs a ir-152 model.""" + model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False) + return model + + +def IR_SE_50(input_size): + """Constructs a ir_se-50 model.""" + model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False) + return model + + +def IR_SE_101(input_size): + """Constructs a ir_se-101 model.""" + model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False) + return model + + +def IR_SE_152(input_size): + """Constructs a ir_se-152 model.""" + model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False) + return model \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/ldm/util.py b/stable-dreamfusion-3DPortrait/ldm/util.py new file mode 100644 index 0000000..7dcad70 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/ldm/util.py @@ -0,0 +1,227 @@ +import importlib + +import torchvision +import torch +from torch import optim +import numpy as np + +from inspect import isfunction +from PIL import Image, ImageDraw, ImageFont + +import os +import numpy as np +import matplotlib.pyplot as plt +from PIL import Image +import torch +import time +import cv2 + +import PIL + +def pil_rectangle_crop(im): + width, height = im.size # Get dimensions + + if width <= height: + left = 0 + right = width + top = (height - width)/2 + bottom = (height + width)/2 + else: + + top = 0 + bottom = height + left = (width - height) / 2 + bottom = (width + height) / 2 + + # Crop the center of the image + im = im.crop((left, top, right, bottom)) + return im + + +def log_txt_as_img(wh, xc, size=10): + # wh a tuple of (width, height) + # xc a list of captions to plot + b = len(xc) + txts = list() + for bi in range(b): + txt = Image.new("RGB", wh, color="white") + draw = ImageDraw.Draw(txt) + font = ImageFont.truetype('data/DejaVuSans.ttf', size=size) + nc = int(40 * (wh[0] / 256)) + lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) + + try: + draw.text((0, 0), lines, fill="black", font=font) + except UnicodeEncodeError: + print("Cant encode string for logging. Skipping.") + + txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 + txts.append(txt) + txts = np.stack(txts) + txts = torch.tensor(txts) + return txts + + +def ismap(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] > 3) + + +def isimage(x): + if not isinstance(x,torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def mean_flat(tensor): + """ + https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def count_params(model, verbose=False): + total_params = sum(p.numel() for p in model.parameters()) + if verbose: + print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") + return total_params + + +def instantiate_from_config(config): + if not "target" in config: + if config == '__is_first_stage__': + return None + elif config == "__is_unconditional__": + return None + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", dict())) + + +def get_obj_from_str(string, reload=False): + module, cls = string.rsplit(".", 1) + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +class AdamWwithEMAandWings(optim.Optimizer): + # credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298 + def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: check hyperparameters before using + weight_decay=1.e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code + ema_power=1., param_names=()): + """AdamW that saves EMA versions of the parameters.""" + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= ema_decay <= 1.0: + raise ValueError("Invalid ema_decay value: {}".format(ema_decay)) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay, + ema_power=ema_power, param_names=param_names) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Args: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + ema_params_with_grad = [] + state_sums = [] + max_exp_avg_sqs = [] + state_steps = [] + amsgrad = group['amsgrad'] + beta1, beta2 = group['betas'] + ema_decay = group['ema_decay'] + ema_power = group['ema_power'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError('AdamW does not support sparse gradients') + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of parameter values + state['param_exp_avg'] = p.detach().float().clone() + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + ema_params_with_grad.append(state['param_exp_avg']) + + if amsgrad: + max_exp_avg_sqs.append(state['max_exp_avg_sq']) + + # update the steps for each param group update + state['step'] += 1 + # record the step after step update + state_steps.append(state['step']) + + optim._functional.adamw(params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + beta1=beta1, + beta2=beta2, + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + maximize=False) + + cur_ema_decay = min(ema_decay, 1 - state['step'] ** -ema_power) + for param, ema_param in zip(params_with_grad, ema_params_with_grad): + ema_param.mul_(cur_ema_decay).add_(param.float(), alpha=1 - cur_ema_decay) + + return loss \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/main.py b/stable-dreamfusion-3DPortrait/main.py new file mode 100644 index 0000000..9d1a9bf --- /dev/null +++ b/stable-dreamfusion-3DPortrait/main.py @@ -0,0 +1,413 @@ +import torch +import argparse +import pandas as pd +import sys + +from nerf.provider import NeRFDataset +from nerf.utils import * + +# torch.autograd.set_detect_anomaly(True) + +if __name__ == '__main__': + # See https://stackoverflow.com/questions/27433316/how-to-get-argparse-to-read-arguments-from-a-file-with-an-option-rather-than-pre + class LoadFromFile (argparse.Action): + def __call__ (self, parser, namespace, values, option_string = None): + with values as f: + # parse arguments in the file and store them in the target namespace + parser.parse_args(f.read().split(), namespace) + + parser = argparse.ArgumentParser() + parser.add_argument('--file', type=open, action=LoadFromFile, help="specify a file filled with more arguments") + parser.add_argument('--text', default=None, help="text prompt") + parser.add_argument('--negative', default='', type=str, help="negative text prompt") + parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray") + parser.add_argument('-O2', action='store_true', help="equals --backbone vanilla") + parser.add_argument('--test', action='store_true', help="test mode") + parser.add_argument('--six_views', action='store_true', help="six_views mode: save the images of the six views") + parser.add_argument('--eval_interval', type=int, default=1, help="evaluate on the valid set every interval epochs") + parser.add_argument('--test_interval', type=int, default=100, help="test on the test set every interval epochs") + parser.add_argument('--workspace', type=str, default='workspace') + parser.add_argument('--seed', default=None) + + parser.add_argument('--image', default=None, help="image prompt") + parser.add_argument('--image_config', default=None, help="image config csv") + + parser.add_argument('--known_view_interval', type=int, default=4, help="train default view with RGB loss every & iters, only valid if --image is not None.") + + parser.add_argument('--IF', action='store_true', help="experimental: use DeepFloyd IF as the guidance model for nerf stage") + + parser.add_argument('--guidance', type=str, nargs='*', default=['SD'], help='guidance model') + parser.add_argument('--guidance_scale', type=float, default=100, help="diffusion model classifier-free guidance scale") + + parser.add_argument('--save_mesh', action='store_true', help="export an obj mesh with texture") + parser.add_argument('--mcubes_resolution', type=int, default=256, help="mcubes resolution for extracting mesh") + parser.add_argument('--decimate_target', type=int, default=5e4, help="target face number for mesh decimation") + + parser.add_argument('--dmtet', action='store_true', help="use dmtet finetuning") + parser.add_argument('--tet_grid_size', type=int, default=128, help="tet grid size") + parser.add_argument('--init_with', type=str, default='', help="ckpt to init dmtet") + parser.add_argument('--lock_geo', action='store_true', help="disable dmtet to learn geometry") + + ## Perp-Neg options + parser.add_argument('--perpneg', action='store_true', help="use perp_neg") + parser.add_argument('--negative_w', type=float, default=-2, help="The scale of the weights of negative prompts. A larger value will help to avoid the Janus problem, but may cause flat faces. Vary between 0 to -4, depending on the prompt") + parser.add_argument('--front_decay_factor', type=float, default=2, help="decay factor for the front prompt") + parser.add_argument('--side_decay_factor', type=float, default=10, help="decay factor for the side prompt") + + ### training options + parser.add_argument('--iters', type=int, default=10000, help="training iters") + parser.add_argument('--lr', type=float, default=1e-3, help="max learning rate") + parser.add_argument('--ckpt', type=str, default='latest', help="possible options are ['latest', 'scratch', 'best', 'latest_model']") + parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") + parser.add_argument('--taichi_ray', action='store_true', help="use taichi raymarching") + parser.add_argument('--max_steps', type=int, default=1024, help="max num steps sampled per ray (only valid when using --cuda_ray)") + parser.add_argument('--num_steps', type=int, default=64, help="num steps sampled per ray (only valid when not using --cuda_ray)") + parser.add_argument('--upsample_steps', type=int, default=32, help="num steps up-sampled per ray (only valid when not using --cuda_ray)") + parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)") + parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when not using --cuda_ray)") + parser.add_argument('--latent_iter_ratio', type=float, default=0.2, help="training iters that only use albedo shading") + parser.add_argument('--albedo_iter_ratio', type=float, default=0, help="training iters that only use albedo shading") + parser.add_argument('--min_ambient_ratio', type=float, default=0.1, help="minimum ambient ratio to use in lambertian shading") + parser.add_argument('--textureless_ratio', type=float, default=0.2, help="ratio of textureless shading") + parser.add_argument('--jitter_pose', action='store_true', help="add jitters to the randomly sampled camera poses") + parser.add_argument('--jitter_center', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's center (camera location)") + parser.add_argument('--jitter_target', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's target (i.e. 'look-at')") + parser.add_argument('--jitter_up', type=float, default=0.02, help="amount of jitter to add to sampled camera pose's up-axis (i.e. 'camera roll')") + parser.add_argument('--uniform_sphere_rate', type=float, default=0, help="likelihood of sampling camera location uniformly on the sphere surface area") + parser.add_argument('--grad_clip', type=float, default=-1, help="clip grad of all grad to this limit, negative value disables it") + parser.add_argument('--grad_clip_rgb', type=float, default=-1, help="clip grad of rgb space grad to this limit, negative value disables it") + # model options + parser.add_argument('--bg_radius', type=float, default=1.4, help="if positive, use a background model at sphere(bg_radius)") + parser.add_argument('--density_activation', type=str, default='exp', choices=['softplus', 'exp'], help="density activation function") + parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied") + parser.add_argument('--blob_density', type=float, default=5, help="max (center) density for the density blob") + parser.add_argument('--blob_radius', type=float, default=0.2, help="control the radius for the density blob") + # network backbone + parser.add_argument('--backbone', type=str, default='grid', choices=['grid_tcnn', 'grid', 'vanilla', 'grid_taichi'], help="nerf backbone") + parser.add_argument('--optim', type=str, default='adan', choices=['adan', 'adam'], help="optimizer") + parser.add_argument('--sd_version', type=str, default='2.1', choices=['1.5', '2.0', '2.1'], help="stable diffusion version") + parser.add_argument('--hf_key', type=str, default=None, help="hugging face Stable diffusion model key") + # try this if CUDA OOM + parser.add_argument('--fp16', action='store_true', help="use float16 for training") + parser.add_argument('--vram_O', action='store_true', help="optimization for low VRAM usage") + # rendering resolution in training, increase these for better quality / decrease these if CUDA OOM even if --vram_O enabled. + parser.add_argument('--w', type=int, default=64, help="render width for NeRF in training") + parser.add_argument('--h', type=int, default=64, help="render height for NeRF in training") + parser.add_argument('--known_view_scale', type=float, default=1.5, help="multiply --h/w by this for known view rendering") + parser.add_argument('--known_view_noise_scale', type=float, default=2e-3, help="random camera noise added to rays_o and rays_d") + parser.add_argument('--dmtet_reso_scale', type=float, default=8, help="multiply --h/w by this for dmtet finetuning") + parser.add_argument('--batch_size', type=int, default=1, help="images to render per batch using NeRF") + + ### dataset options + parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box(-bound, bound)") + parser.add_argument('--dt_gamma', type=float, default=0, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") + parser.add_argument('--min_near', type=float, default=0.01, help="minimum near distance for camera") + + parser.add_argument('--radius_range', type=float, nargs='*', default=[3.0, 3.5], help="training camera radius range") + parser.add_argument('--theta_range', type=float, nargs='*', default=[45, 105], help="training camera range along the polar angles (i.e. up and down). See advanced.md for details.") + parser.add_argument('--phi_range', type=float, nargs='*', default=[-180, 180], help="training camera range along the azimuth angles (i.e. left and right). See advanced.md for details.") + parser.add_argument('--fovy_range', type=float, nargs='*', default=[10, 30], help="training camera fovy range") + + parser.add_argument('--default_radius', type=float, default=3.2, help="radius for the default view") + parser.add_argument('--default_polar', type=float, default=90, help="polar for the default view") + parser.add_argument('--default_azimuth', type=float, default=0, help="azimuth for the default view") + parser.add_argument('--default_fovy', type=float, default=20, help="fovy for the default view") + + parser.add_argument('--progressive_view', action='store_true', help="progressively expand view sampling range from default to full") + parser.add_argument('--progressive_view_init_ratio', type=float, default=0.2, help="initial ratio of final range, used for progressive_view") + + parser.add_argument('--progressive_level', action='store_true', help="progressively increase gridencoder's max_level") + + parser.add_argument('--angle_overhead', type=float, default=30, help="[0, angle_overhead] is the overhead region") + parser.add_argument('--angle_front', type=float, default=60, help="[0, angle_front] is the front region, [180, 180+angle_front] the back region, otherwise the side region.") + parser.add_argument('--t_range', type=float, nargs='*', default=[0.02, 0.98], help="stable diffusion time steps range") + parser.add_argument('--dont_override_stuff',action='store_true', help="Don't override t_range, etc.") + + + ### regularizations + parser.add_argument('--lambda_entropy', type=float, default=1e-3, help="loss scale for alpha entropy") + parser.add_argument('--lambda_opacity', type=float, default=0, help="loss scale for alpha value") + parser.add_argument('--lambda_orient', type=float, default=1e-2, help="loss scale for orientation") + parser.add_argument('--lambda_tv', type=float, default=0, help="loss scale for total variation") + parser.add_argument('--lambda_wd', type=float, default=0, help="loss scale") + + parser.add_argument('--lambda_mesh_normal', type=float, default=0.5, help="loss scale for mesh normal smoothness") + parser.add_argument('--lambda_mesh_laplacian', type=float, default=0.5, help="loss scale for mesh laplacian") + + parser.add_argument('--lambda_guidance', type=float, default=1, help="loss scale for SDS") + parser.add_argument('--lambda_rgb', type=float, default=1000, help="loss scale for RGB") + parser.add_argument('--lambda_mask', type=float, default=500, help="loss scale for mask (alpha)") + parser.add_argument('--lambda_normal', type=float, default=0, help="loss scale for normal map") + parser.add_argument('--lambda_depth', type=float, default=10, help="loss scale for relative depth") + parser.add_argument('--lambda_2d_normal_smooth', type=float, default=0, help="loss scale for 2D normal image smoothness") + parser.add_argument('--lambda_3d_normal_smooth', type=float, default=0, help="loss scale for 3D normal image smoothness") + + ### debugging options + parser.add_argument('--save_guidance', action='store_true', help="save images of the per-iteration NeRF renders, added noise, denoised (i.e. guidance), fully-denoised. Useful for debugging, but VERY SLOW and takes lots of memory!") + parser.add_argument('--save_guidance_interval', type=int, default=10, help="save guidance every X step") + + ### GUI options + parser.add_argument('--gui', action='store_true', help="start a GUI") + parser.add_argument('--W', type=int, default=800, help="GUI width") + parser.add_argument('--H', type=int, default=800, help="GUI height") + parser.add_argument('--radius', type=float, default=5, help="default GUI camera radius from center") + parser.add_argument('--fovy', type=float, default=20, help="default GUI camera fovy") + parser.add_argument('--light_theta', type=float, default=60, help="default GUI light direction in [0, 180], corresponding to elevation [90, -90]") + parser.add_argument('--light_phi', type=float, default=0, help="default GUI light direction in [0, 360), azimuth") + parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") + + parser.add_argument('--zero123_config', type=str, default='./pretrained/zero123/sd-objaverse-finetune-c_concat-256.yaml', help="config file for zero123") + parser.add_argument('--zero123_ckpt', type=str, default='pretrained/zero123/zero123-xl.ckpt', help="ckpt for zero123") + parser.add_argument('--zero123_grad_scale', type=str, default='angle', help="whether to scale the gradients based on 'angle' or 'None'") + + parser.add_argument('--dataset_size_train', type=int, default=100, help="Length of train dataset i.e. # of iterations per epoch") + parser.add_argument('--dataset_size_valid', type=int, default=8, help="# of frames to render in the turntable video in validation") + parser.add_argument('--dataset_size_test', type=int, default=100, help="# of frames to render in the turntable video at test time") + + parser.add_argument('--exp_start_iter', type=int, default=None, help="start iter # for experiment, to calculate progressive_view and progressive_level") + parser.add_argument('--exp_end_iter', type=int, default=None, help="end iter # for experiment, to calculate progressive_view and progressive_level") + + opt = parser.parse_args() + + if opt.O: + opt.fp16 = True + opt.cuda_ray = True + + elif opt.O2: + opt.fp16 = True + opt.backbone = 'vanilla' + opt.progressive_level = True + + if opt.IF: + if 'SD' in opt.guidance: + opt.guidance.remove('SD') + opt.guidance.append('IF') + opt.latent_iter_ratio = 0 # must not do as_latent + + opt.images, opt.ref_radii, opt.ref_polars, opt.ref_azimuths, opt.zero123_ws = [], [], [], [], [] + opt.default_zero123_w = 1 + + opt.exp_start_iter = opt.exp_start_iter or 0 + opt.exp_end_iter = opt.exp_end_iter or opt.iters + + # parameters for image-conditioned generation + if opt.image is not None or opt.image_config is not None: + + if opt.text is None: + # use zero123 guidance model when only providing image + opt.guidance = ['zero123'] + if not opt.dont_override_stuff: + opt.fovy_range = [opt.default_fovy, opt.default_fovy] # fix fov as zero123 doesn't support changing fov + opt.guidance_scale = 5 + opt.lambda_3d_normal_smooth = 10 + else: + # use stable-diffusion when providing both text and image + opt.guidance = ['SD', 'clip'] + + if not opt.dont_override_stuff: + opt.guidance_scale = 10 + opt.t_range = [0.2, 0.6] + opt.known_view_interval = 2 + opt.lambda_3d_normal_smooth = 20 + opt.bg_radius = -1 + + # smoothness + opt.lambda_entropy = 1 + opt.lambda_orient = 1 + + # latent warmup is not needed + opt.latent_iter_ratio = 0 + if not opt.dont_override_stuff: + opt.albedo_iter_ratio = 0 + + # make shape init more stable + opt.progressive_view = True + opt.progressive_level = True + + if opt.image is not None: + opt.images += [opt.image] + opt.ref_radii += [opt.default_radius] + opt.ref_polars += [opt.default_polar] + opt.ref_azimuths += [opt.default_azimuth] + opt.zero123_ws += [opt.default_zero123_w] + + if opt.image_config is not None: + # for multiview (zero123) + conf = pd.read_csv(opt.image_config, skipinitialspace=True) + opt.images += list(conf.image) + opt.ref_radii += list(conf.radius) + opt.ref_polars += list(conf.polar) + opt.ref_azimuths += list(conf.azimuth) + opt.zero123_ws += list(conf.zero123_weight) + if opt.image is None: + opt.default_radius = opt.ref_radii[0] + opt.default_polar = opt.ref_polars[0] + opt.default_azimuth = opt.ref_azimuths[0] + opt.default_zero123_w = opt.zero123_ws[0] + + # reset to None + if len(opt.images) == 0: + opt.images = None + + # default parameters for finetuning + if opt.dmtet: + + opt.h = int(opt.h * opt.dmtet_reso_scale) + opt.w = int(opt.w * opt.dmtet_reso_scale) + opt.known_view_scale = 1 + + if not opt.dont_override_stuff: + opt.t_range = [0.02, 0.50] # ref: magic3D + + if opt.images is not None: + + opt.lambda_normal = 0 + opt.lambda_depth = 0 + + if opt.text is not None and not opt.dont_override_stuff: + opt.t_range = [0.20, 0.50] + + # assume finetuning + opt.latent_iter_ratio = 0 + opt.albedo_iter_ratio = 0 + opt.progressive_view = False + # opt.progressive_level = False + + # record full range for progressive view expansion + if opt.progressive_view: + if not opt.dont_override_stuff: + # disable as they disturb progressive view + opt.jitter_pose = False + + opt.uniform_sphere_rate = 0 + # back up full range + opt.full_radius_range = opt.radius_range + opt.full_theta_range = opt.theta_range + opt.full_phi_range = opt.phi_range + opt.full_fovy_range = opt.fovy_range + + if opt.backbone == 'vanilla': + from nerf.network import NeRFNetwork + elif opt.backbone == 'grid': + from nerf.network_grid import NeRFNetwork + elif opt.backbone == 'grid_tcnn': + from nerf.network_grid_tcnn import NeRFNetwork + elif opt.backbone == 'grid_taichi': + opt.cuda_ray = False + opt.taichi_ray = True + import taichi as ti + from nerf.network_grid_taichi import NeRFNetwork + taichi_half2_opt = True + taichi_init_args = {"arch": ti.cuda, "device_memory_GB": 4.0} + if taichi_half2_opt: + taichi_init_args["half2_vectorization"] = True + ti.init(**taichi_init_args) + else: + raise NotImplementedError(f'--backbone {opt.backbone} is not implemented!') + + print(opt) + + if opt.seed is not None: + seed_everything(int(opt.seed)) + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + model = NeRFNetwork(opt).to(device) + + if opt.dmtet and opt.init_with != '': + if opt.init_with.endswith('.pth'): + # load pretrained weights to init dmtet + state_dict = torch.load(opt.init_with, map_location=device) + model.load_state_dict(state_dict['model'], strict=False) + if opt.cuda_ray: + model.mean_density = state_dict['mean_density'] + model.init_tet() + else: + # assume a mesh to init dmtet (experimental, not working well now!) + import trimesh + mesh = trimesh.load(opt.init_with, force='mesh', skip_material=True, process=False) + model.init_tet(mesh=mesh) + + print(model) + + if opt.six_views: + guidance = None # no need to load guidance model at test + + trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt) + + test_loader = NeRFDataset(opt, device=device, type='six_views', H=opt.H, W=opt.W, size=6).dataloader(batch_size=1) + trainer.test(test_loader, write_video=False) + + if opt.save_mesh: + trainer.save_mesh() + + elif opt.test: + guidance = None # no need to load guidance model at test + + trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt) + + if opt.gui: + from nerf.gui import NeRFGUI + gui = NeRFGUI(opt, trainer) + gui.render() + + else: + test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1) + trainer.test(test_loader) + + if opt.save_mesh: + trainer.save_mesh() + + else: + + train_loader = NeRFDataset(opt, device=device, type='train', H=opt.h, W=opt.w, size=opt.dataset_size_train * opt.batch_size).dataloader() + + if opt.optim == 'adan': + from optimizer import Adan + # Adan usually requires a larger LR + optimizer = lambda model: Adan(model.get_params(5 * opt.lr), eps=1e-8, weight_decay=2e-5, max_grad_norm=5.0, foreach=False) + else: # adam + optimizer = lambda model: torch.optim.Adam(model.get_params(opt.lr), betas=(0.9, 0.99), eps=1e-15) + + if opt.backbone == 'vanilla': + scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1)) + else: + scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 1) # fixed + # scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1)) + + guidance = nn.ModuleDict() + + if 'SD' in opt.guidance: + from guidance.sd_utils import StableDiffusion + guidance['SD'] = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key, opt.t_range) + + if 'IF' in opt.guidance: + from guidance.if_utils import IF + guidance['IF'] = IF(device, opt.vram_O, opt.t_range) + + if 'zero123' in opt.guidance: + from guidance.zero123_utils import Zero123 + guidance['zero123'] = Zero123(device=device, fp16=opt.fp16, config=opt.zero123_config, ckpt=opt.zero123_ckpt, vram_O=opt.vram_O, t_range=opt.t_range, opt=opt) + + if 'clip' in opt.guidance: + from guidance.clip_utils import CLIP + guidance['clip'] = CLIP(device) + + trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, optimizer=optimizer, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, use_checkpoint=opt.ckpt, scheduler_update_every_step=True) + + trainer.default_view_data = train_loader._data.get_default_view_data() + + if opt.gui: + from nerf.gui import NeRFGUI + gui = NeRFGUI(opt, trainer, train_loader) + gui.render() + + else: + valid_loader = NeRFDataset(opt, device=device, type='val', H=opt.H, W=opt.W, size=opt.dataset_size_valid).dataloader(batch_size=1) + test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1) + + max_epoch = np.ceil(opt.iters / len(train_loader)).astype(np.int32) + trainer.train(train_loader, valid_loader, test_loader, max_epoch) + + if opt.save_mesh: + trainer.save_mesh() diff --git a/stable-dreamfusion-3DPortrait/main_3DPortraitGAN.py b/stable-dreamfusion-3DPortrait/main_3DPortraitGAN.py new file mode 100644 index 0000000..15a2359 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/main_3DPortraitGAN.py @@ -0,0 +1,474 @@ +import os + +import torch +import argparse +import pandas as pd +import sys + +from nerf.provider import NeRFDataset +from nerf.utils import * + + +if __name__ == '__main__': + # See https://stackoverflow.com/questions/27433316/how-to-get-argparse-to-read-arguments-from-a-file-with-an-option-rather-than-pre + class LoadFromFile (argparse.Action): + def __call__ (self, parser, namespace, values, option_string = None): + with values as f: + # parse arguments in the file and store them in the target namespace + parser.parse_args(f.read().split(), namespace) + + parser = argparse.ArgumentParser() + parser.add_argument('--file', type=open, action=LoadFromFile, help="specify a file filled with more arguments") + parser.add_argument('--text', default=None, help="text prompt") + parser.add_argument('--negative', default='', type=str, help="negative text prompt") + parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray") + parser.add_argument('-O2', action='store_true', help="equals --backbone vanilla") + parser.add_argument('--test', action='store_true', help="test mode") + parser.add_argument('--six_views', action='store_true', help="six_views mode: save the images of the six views") + parser.add_argument('--eval_interval', type=int, default=1, help="evaluate on the valid set every interval epochs") + parser.add_argument('--test_interval', type=int, default=10, help="test on the test set every interval epochs") + parser.add_argument('--workspace', type=str, default='workspace') + parser.add_argument('--seed', default=None) + + parser.add_argument('--image', default=None, help="image prompt") + parser.add_argument('--image_config', default=None, help="image config csv") + + parser.add_argument('--known_view_interval', type=int, default=4, help="train default view with RGB loss every & iters, only valid if --image is not None.") + + parser.add_argument('--IF', action='store_true', help="experimental: use DeepFloyd IF as the guidance model for nerf stage") + + parser.add_argument('--guidance', type=str, nargs='*', default=['SD'], help='guidance model') + parser.add_argument('--guidance_scale', type=float, default=100, help="diffusion model classifier-free guidance scale") + + parser.add_argument('--save_mesh', action='store_true', help="export an obj mesh with texture") + parser.add_argument('--mcubes_resolution', type=int, default=256, help="mcubes resolution for extracting mesh") + parser.add_argument('--decimate_target', type=int, default=5e4, help="target face number for mesh decimation") + + parser.add_argument('--dmtet', action='store_true', help="use dmtet finetuning") + parser.add_argument('--tet_grid_size', type=int, default=128, help="tet grid size") + parser.add_argument('--init_with', type=str, default='', help="ckpt to init dmtet") + parser.add_argument('--lock_geo', action='store_true', help="disable dmtet to learn geometry") + + ## Perp-Neg options + parser.add_argument('--perpneg', action='store_true', help="use perp_neg") + parser.add_argument('--negative_w', type=float, default=-2, help="The scale of the weights of negative prompts. A larger value will help to avoid the Janus problem, but may cause flat faces. Vary between 0 to -4, depending on the prompt") + parser.add_argument('--front_decay_factor', type=float, default=2, help="decay factor for the front prompt") + parser.add_argument('--side_decay_factor', type=float, default=10, help="decay factor for the side prompt") + + + ## Trigrid options + parser.add_argument('--trigrid_path', type=str, default='', help="path to trigrid") + parser.add_argument('--trigrid_decoder_ckpt', type=str, default='', help="path to trigrid decoder ckpt") + parser.add_argument('--train_decoder', action='store_true', help="train trigrid decoder") + parser.add_argument('--learnable_bg', action='store_true', help="Learnable background") + parser.add_argument('--trigrid_lr_ratio',type=float, nargs='+', default=[100,100,100,100,20,20,20], help="stable diffusion time steps range") + parser.add_argument('--scheduler_annealing', action='store_true', help="annealing scheduler") + + ### training options + parser.add_argument('--iters', type=int, default=10000, help="training iters") + parser.add_argument('--lr', type=float, default=1e-3, help="max learning rate") + parser.add_argument('--ckpt', type=str, default='latest', help="possible options are ['latest', 'scratch', 'best', 'latest_model']") + parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") + parser.add_argument('--taichi_ray', action='store_true', help="use taichi raymarching") + parser.add_argument('--max_steps', type=int, default=1024, help="max num steps sampled per ray (only valid when using --cuda_ray)") + parser.add_argument('--num_steps', type=int, default=64, help="num steps sampled per ray (only valid when not using --cuda_ray)") + parser.add_argument('--upsample_steps', type=int, default=32, help="num steps up-sampled per ray (only valid when not using --cuda_ray)") + parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)") + parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when not using --cuda_ray)") + parser.add_argument('--latent_iter_ratio', type=float, default=0.2, help="training iters that only use albedo shading") + parser.add_argument('--albedo_iter_ratio', type=float, default=0, help="training iters that only use albedo shading") + parser.add_argument('--min_ambient_ratio', type=float, default=0.1, help="minimum ambient ratio to use in lambertian shading") + parser.add_argument('--textureless_ratio', type=float, default=0.2, help="ratio of textureless shading") + parser.add_argument('--jitter_pose', action='store_true', help="add jitters to the randomly sampled camera poses") + parser.add_argument('--jitter_center', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's center (camera location)") + parser.add_argument('--jitter_target', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's target (i.e. 'look-at')") + parser.add_argument('--jitter_up', type=float, default=0.02, help="amount of jitter to add to sampled camera pose's up-axis (i.e. 'camera roll')") + parser.add_argument('--uniform_sphere_rate', type=float, default=0, help="likelihood of sampling camera location uniformly on the sphere surface area") + parser.add_argument('--grad_clip', type=float, default=-1, help="clip grad of all grad to this limit, negative value disables it") + parser.add_argument('--grad_clip_rgb', type=float, default=-1, help="clip grad of rgb space grad to this limit, negative value disables it") + # model options + parser.add_argument('--bg_radius', type=float, default=3.0, help="if positive, use a background model at sphere(bg_radius)") + parser.add_argument('--density_activation', type=str, default='exp', choices=['softplus', 'exp'], help="density activation function") + parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied") + parser.add_argument('--blob_density', type=float, default=5, help="max (center) density for the density blob") + parser.add_argument('--blob_radius', type=float, default=0.2, help="control the radius for the density blob") + # network backbone + parser.add_argument('--optim', type=str, default='adan', choices=['adan', 'adam'], help="optimizer") + parser.add_argument('--sd_version', type=str, default='2.1', choices=['1.5', '2.0', '2.1'], help="stable diffusion version") + parser.add_argument('--hf_key', type=str, default=None, help="hugging face Stable diffusion model key") + # try this if CUDA OOM + parser.add_argument('--fp16', action='store_true', help="use float16 for training") + parser.add_argument('--vram_O', action='store_true', help="optimization for low VRAM usage") + # rendering resolution in training, increase these for better quality / decrease these if CUDA OOM even if --vram_O enabled. + parser.add_argument('--w', type=int, default=64, help="render width for NeRF in training") + parser.add_argument('--h', type=int, default=64, help="render height for NeRF in training") + parser.add_argument('--known_view_scale', type=float, default=1.5, help="multiply --h/w by this for known view rendering") + parser.add_argument('--known_view_noise_scale', type=float, default=2e-3, help="random camera noise added to rays_o and rays_d") + parser.add_argument('--dmtet_reso_scale', type=float, default=8, help="multiply --h/w by this for dmtet finetuning") + parser.add_argument('--batch_size', type=int, default=1, help="images to render per batch using NeRF") + + ### dataset options + parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box(-bound, bound)") + parser.add_argument('--dt_gamma', type=float, default=0, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") + parser.add_argument('--min_near', type=float, default=0.01, help="minimum near distance for camera") + + parser.add_argument('--radius_range', type=float, nargs='*', default=[2.6, 2.8], help="training camera radius range") + parser.add_argument('--theta_range', type=float, nargs='*', default=[60, 105], help="training camera range along the polar angles (i.e. up and down). See advanced.md for details.") + parser.add_argument('--phi_range', type=float, nargs='*', default=[-180, 180], help="training camera range along the azimuth angles (i.e. left and right). See advanced.md for details.") + parser.add_argument('--fovy_range', type=float, nargs='*', default=[11, 13], help="training camera fovy range") + + parser.add_argument('--default_radius', type=float, default=2.7, help="radius for the default view") + parser.add_argument('--default_polar', type=float, default=90, help="polar for the default view") + parser.add_argument('--default_azimuth', type=float, default=0, help="azimuth for the default view") + parser.add_argument('--default_fovy', type=float, default=12., help="fovy for the default view") + + parser.add_argument('--progressive_view', action='store_true', help="progressively expand view sampling range from default to full") + parser.add_argument('--progressive_view_init_ratio', type=float, default=0.2, help="initial ratio of final range, used for progressive_view") + + parser.add_argument('--progressive_level', action='store_true', help="progressively increase gridencoder's max_level") + + parser.add_argument('--angle_overhead', type=float, default=30, help="[0, angle_overhead] is the overhead region") + parser.add_argument('--angle_front', type=float, default=60, help="[0, angle_front] is the front region, [180, 180+angle_front] the back region, otherwise the side region.") + parser.add_argument('--t_range', type=float, nargs='+', default=[0.02, 0.98], help="stable diffusion time steps range") + parser.add_argument('--dont_override_stuff',action='store_true', help="Don't override t_range, etc.") + + + ### regularizations + parser.add_argument('--lambda_entropy', type=float, default=1e-3, help="loss scale for alpha entropy") + parser.add_argument('--lambda_opacity', type=float, default=0, help="loss scale for alpha value") + parser.add_argument('--lambda_orient', type=float, default=1e-2, help="loss scale for orientation") + parser.add_argument('--lambda_tv', type=float, default=0, help="loss scale for total variation") + parser.add_argument('--lambda_wd', type=float, default=0, help="loss scale") + + parser.add_argument('--lambda_mesh_normal', type=float, default=0.5, help="loss scale for mesh normal smoothness") + parser.add_argument('--lambda_mesh_laplacian', type=float, default=0.5, help="loss scale for mesh laplacian") + + parser.add_argument('--lambda_guidance', type=float, default=1, help="loss scale for SDS") + parser.add_argument('--lambda_rgb', type=float, default=1000, help="loss scale for RGB") + parser.add_argument('--lambda_mask', type=float, default=500, help="loss scale for mask (alpha)") + parser.add_argument('--lambda_normal', type=float, default=0, help="loss scale for normal map") + parser.add_argument('--lambda_depth', type=float, default=10, help="loss scale for relative depth") + parser.add_argument('--lambda_2d_normal_smooth', type=float, default=0, help="loss scale for 2D normal image smoothness") + parser.add_argument('--lambda_3d_normal_smooth', type=float, default=0, help="loss scale for 3D normal image smoothness") + + ### debugging options + parser.add_argument('--save_guidance', action='store_true', help="save images of the per-iteration NeRF renders, added noise, denoised (i.e. guidance), fully-denoised. Useful for debugging, but VERY SLOW and takes lots of memory!") + parser.add_argument('--save_guidance_interval', type=int, default=10, help="save guidance every X step") + + ### GUI options + parser.add_argument('--gui', action='store_true', help="start a GUI") + parser.add_argument('--W', type=int, default=800, help="GUI width") + parser.add_argument('--H', type=int, default=800, help="GUI height") + parser.add_argument('--radius', type=float, default=5, help="default GUI camera radius from center") + parser.add_argument('--fovy', type=float, default=20, help="default GUI camera fovy") + parser.add_argument('--light_theta', type=float, default=60, help="default GUI light direction in [0, 180], corresponding to elevation [90, -90]") + parser.add_argument('--light_phi', type=float, default=0, help="default GUI light direction in [0, 360), azimuth") + parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") + + parser.add_argument('--zero123_config', type=str, default='./pretrained/zero123/sd-objaverse-finetune-c_concat-256.yaml', help="config file for zero123") + parser.add_argument('--zero123_ckpt', type=str, default='pretrained/zero123/zero123-xl.ckpt', help="ckpt for zero123") + parser.add_argument('--zero123_grad_scale', type=str, default='angle', help="whether to scale the gradients based on 'angle' or 'None'") + + parser.add_argument('--dataset_size_train', type=int, default=100, help="Length of train dataset i.e. # of iterations per epoch") + parser.add_argument('--dataset_size_valid', type=int, default=8, help="# of frames to render in the turntable video in validation") + parser.add_argument('--dataset_size_test', type=int, default=100, help="# of frames to render in the turntable video at test time") + + parser.add_argument('--exp_start_iter', type=int, default=None, help="start iter # for experiment, to calculate progressive_view and progressive_level") + parser.add_argument('--exp_end_iter', type=int, default=None, help="end iter # for experiment, to calculate progressive_view and progressive_level") + + opt = parser.parse_args() + if opt.O: + raise NotImplementedError + opt.fp16 = True + opt.cuda_ray = True + + elif opt.O2: + raise NotImplementedError + opt.fp16 = True + opt.progressive_level = True + + if opt.IF: + if 'SD' in opt.guidance: + opt.guidance.remove('SD') + opt.guidance.append('IF') + opt.latent_iter_ratio = 0 # must not do as_latent + + opt.images, opt.ref_radii, opt.ref_polars, opt.ref_azimuths, opt.zero123_ws = [], [], [], [], [] + opt.default_zero123_w = 1 + + opt.exp_start_iter = opt.exp_start_iter or 0 + opt.exp_end_iter = opt.exp_end_iter or opt.iters + + # parameters for image-conditioned generation + if opt.image is not None or opt.image_config is not None: + + if opt.text is None: + # use zero123 guidance model when only providing image + opt.guidance = ['zero123'] + if not opt.dont_override_stuff: + opt.fovy_range = [opt.default_fovy, opt.default_fovy] # fix fov as zero123 doesn't support changing fov + opt.guidance_scale = 5 + opt.lambda_3d_normal_smooth = 10 + else: + # use stable-diffusion when providing both text and image + opt.guidance = ['SD', 'clip'] + + if not opt.dont_override_stuff: + opt.guidance_scale = 10 + opt.t_range = [0.2, 0.6] + opt.known_view_interval = 2 + opt.lambda_3d_normal_smooth = 20 + opt.bg_radius = -1 + + # smoothness + opt.lambda_entropy = 1 + opt.lambda_orient = 1 + + # latent warmup is not needed + opt.latent_iter_ratio = 0 + if not opt.dont_override_stuff: + opt.albedo_iter_ratio = 0 + + # make shape init more stable + opt.progressive_view = True + opt.progressive_level = True + + if opt.image is not None: + opt.images += [opt.image] + opt.ref_radii += [opt.default_radius] + opt.ref_polars += [opt.default_polar] + opt.ref_azimuths += [opt.default_azimuth] + opt.zero123_ws += [opt.default_zero123_w] + + if opt.image_config is not None: + # for multiview (zero123) + conf = pd.read_csv(opt.image_config, skipinitialspace=True) + opt.images += list(conf.image) + opt.ref_radii += list(conf.radius) + opt.ref_polars += list(conf.polar) + opt.ref_azimuths += list(conf.azimuth) + opt.zero123_ws += list(conf.zero123_weight) + if opt.image is None: + opt.default_radius = opt.ref_radii[0] + opt.default_polar = opt.ref_polars[0] + opt.default_azimuth = opt.ref_azimuths[0] + opt.default_zero123_w = opt.zero123_ws[0] + + # reset to None + if len(opt.images) == 0: + opt.images = None + + + if opt.learnable_bg: + assert opt.bg_radius> max(opt.radius_range), f"bg_radius must be larger than max(radius_range) = {max(opt.radius_range)}" + + + # default parameters for finetuning + if opt.dmtet: + + opt.h = int(opt.h * opt.dmtet_reso_scale) + opt.w = int(opt.w * opt.dmtet_reso_scale) + opt.known_view_scale = 1 + + if not opt.dont_override_stuff: + opt.t_range = [0.02, 0.50] # ref: magic3D + + if opt.images is not None: + + opt.lambda_normal = 0 + opt.lambda_depth = 0 + + if opt.text is not None and not opt.dont_override_stuff: + opt.t_range = [0.20, 0.50] + + # assume finetuning + opt.latent_iter_ratio = 0 + opt.albedo_iter_ratio = 0 + opt.progressive_view = False + # opt.progressive_level = False + os.makedirs(opt.workspace, exist_ok=True) + # record full range for progressive view expansion + if opt.progressive_view: + if not opt.dont_override_stuff: + # disable as they disturb progressive view + opt.jitter_pose = False + + opt.uniform_sphere_rate = 0 + # back up full range + opt.full_radius_range = opt.radius_range + opt.full_theta_range = opt.theta_range + opt.full_phi_range = opt.phi_range + opt.full_fovy_range = opt.fovy_range + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + from nerf.network_trigrid_heirarchy import NeRFNetwork + + # load plane from pkl + if os.path.isfile(opt.trigrid_path): + print(f'loading plane from {opt.trigrid_path}...') + import pickle + threeDRepresentation = {} + threeDRepresentation_shapes = {} + with open(opt.trigrid_path, 'rb') as f: + data = pickle.load(f) + ws = data['ws'] + threeDRepresentation_shapes['ws'] = ws.shape + for key in data: + if 'trigrids' in key: + threeDRepresentation[key] = data[key].view(1, 3, -1, data[key].shape[-2], data[key].shape[-1]) + + threeDRepresentation_shapes[key] =threeDRepresentation[key].shape + + + print('save trigrid to workspace...') + shutil.copy(opt.trigrid_path, os.path.join(opt.workspace, 'trigrid.pkl')) + + + + model = NeRFNetwork( + opt=opt, + device=device, + trigrid_shapes=threeDRepresentation_shapes + ) + + # load + if os.path.isfile(opt.trigrid_decoder_ckpt): + print(f'loading trigrid_renderer from {opt.trigrid_decoder_ckpt}...') + ckpt = torch.load(opt.trigrid_decoder_ckpt, map_location=lambda storage, loc: storage) + # ckpt = {'params': params, 'state_dict': ckpt} + state_dict = ckpt['state_dict'] + params = ckpt['params'] + print(f'loading params: {params}') + + model.model.load_state_dict(state_dict) + # + model.model.rendering_kwargs['depth_resolution'] = 48 + model.model.rendering_kwargs['depth_resolution_importance'] = 48 + # + model.model.rendering_kwargs['ray_start'] = 2.35 + # + # load plane from pkl + if os.path.isfile(opt.trigrid_path): + model.load_state_dict( + threeDRepresentation, strict=False + ) + + print(opt) + + if opt.seed is not None: + seed_everything(int(opt.seed)) + + # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + # + + + + + print(model) + + if opt.six_views: + raise NotImplementedError(f'--six_views {opt.six_views} is not implemented!') + guidance = None # no need to load guidance model at test + + trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt) + + test_loader = NeRFDataset(opt, device=device, type='six_views', H=opt.H, W=opt.W, size=6).dataloader(batch_size=1) + trainer.test(test_loader, write_video=False) + + if opt.save_mesh: + trainer.save_mesh() + + elif opt.test: + raise NotImplementedError(f'--test {opt.test} is not implemented!') + guidance = None # no need to load guidance model at test + + trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt) + + if opt.gui: + from nerf.gui import NeRFGUI + gui = NeRFGUI(opt, trainer) + gui.render() + + else: + test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1) + trainer.test(test_loader) + + if opt.save_mesh: + trainer.save_mesh() + + else: + + train_loader = NeRFDataset(opt, device=device, type='train', H=opt.h, W=opt.w, size=opt.dataset_size_train * opt.batch_size).dataloader() + # data = { + # 'H': self.H, + # 'W': self.W, + # 'rays_o': rays['rays_o'], + # 'rays_d': rays['rays_d'], + # 'dir': dirs, + # 'mvp': mvp, + # 'polar': delta_polar, + # 'azimuth': delta_azimuth, + # 'radius': delta_radius, + # } + if opt.optim == 'adan': + from optimizer import Adan + + # Adan usually requires a larger LR + optimizer = lambda model: Adan(model.get_params(5 * opt.lr,trigrid_lr_ratio = opt.trigrid_lr_ratio), eps=1e-8, weight_decay=2e-5, max_grad_norm=5.0, + foreach=False) + else: # adam + optimizer = lambda model: torch.optim.Adam(model.get_params(opt.lr,trigrid_lr_ratio = opt.trigrid_lr_ratio), betas=(0.9, 0.99), eps=1e-15) + + if opt.scheduler_annealing: + print('=============== use scheduler: annealing') + scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, + lambda iter: 0.1 ** min(iter / opt.iters, 1)) + else: + scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 1) # fixed + # scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1)) + + guidance = nn.ModuleDict() + + if 'SD' in opt.guidance: + from guidance.sd_utils import StableDiffusion + guidance['SD'] = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key, opt.t_range) + + if 'IF' in opt.guidance: + from guidance.if_utils import IF + guidance['IF'] = IF(device, opt.vram_O, opt.t_range) + + if 'zero123' in opt.guidance: + from guidance.zero123_utils import Zero123 + guidance['zero123'] = Zero123(device=device, fp16=opt.fp16, config=opt.zero123_config, ckpt=opt.zero123_ckpt, vram_O=opt.vram_O, t_range=opt.t_range, opt=opt) + + if 'clip' in opt.guidance: + from guidance.clip_utils import CLIP + guidance['clip'] = CLIP(device) + + trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, + optimizer=optimizer, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, use_checkpoint=opt.ckpt, scheduler_update_every_step=True) + + trainer.default_view_data = train_loader._data.get_default_view_data() + + + + + if opt.gui: + from nerf.gui import NeRFGUI + gui = NeRFGUI(opt, trainer, train_loader) + gui.render() + + else: + valid_loader = NeRFDataset(opt, device=device, type='val', H=opt.H, W=opt.W, size=opt.dataset_size_valid).dataloader(batch_size=1) + test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1) + + # # test output + # trainer.test(test_loader,save_path = os.path.join(opt.workspace, 'initiation')) + + # TO BE DEBUGGED + + max_epoch = np.ceil(opt.iters / len(train_loader)).astype(np.int32) + trainer.train(train_loader, valid_loader, test_loader, max_epoch) + + if opt.save_mesh: + trainer.save_mesh() diff --git a/stable-dreamfusion-3DPortrait/main_3DPortraitGAN_cam.py b/stable-dreamfusion-3DPortrait/main_3DPortraitGAN_cam.py new file mode 100644 index 0000000..e5d0671 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/main_3DPortraitGAN_cam.py @@ -0,0 +1,480 @@ +import os + +import torch +import argparse +import pandas as pd +import sys + +from nerf.provider_3DPortraitGAN import NeRFDataset +from nerf.utils import * + + +if __name__ == '__main__': + # See https://stackoverflow.com/questions/27433316/how-to-get-argparse-to-read-arguments-from-a-file-with-an-option-rather-than-pre + class LoadFromFile (argparse.Action): + def __call__ (self, parser, namespace, values, option_string = None): + with values as f: + # parse arguments in the file and store them in the target namespace + parser.parse_args(f.read().split(), namespace) + + parser = argparse.ArgumentParser() + parser.add_argument('--file', type=open, action=LoadFromFile, help="specify a file filled with more arguments") + parser.add_argument('--text', default=None, help="text prompt") + parser.add_argument('--negative', default='', type=str, help="negative text prompt") + parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray") + parser.add_argument('-O2', action='store_true', help="equals --backbone vanilla") + parser.add_argument('--test', action='store_true', help="test mode") + parser.add_argument('--six_views', action='store_true', help="six_views mode: save the images of the six views") + parser.add_argument('--eval_interval', type=int, default=1, help="evaluate on the valid set every interval epochs") + parser.add_argument('--test_interval', type=int, default=10, help="test on the test set every interval epochs") + parser.add_argument('--workspace', type=str, default='workspace') + parser.add_argument('--seed', default=None) + + parser.add_argument('--image', default=None, help="image prompt") + parser.add_argument('--image_config', default=None, help="image config csv") + + parser.add_argument('--known_view_interval', type=int, default=4, help="train default view with RGB loss every & iters, only valid if --image is not None.") + + parser.add_argument('--IF', action='store_true', help="experimental: use DeepFloyd IF as the guidance model for nerf stage") + + parser.add_argument('--guidance', type=str, nargs='*', default=['SD'], help='guidance model') + parser.add_argument('--guidance_scale', type=float, default=100, help="diffusion model classifier-free guidance scale") + + parser.add_argument('--save_mesh', action='store_true', help="export an obj mesh with texture") + parser.add_argument('--mcubes_resolution', type=int, default=256, help="mcubes resolution for extracting mesh") + parser.add_argument('--decimate_target', type=int, default=5e4, help="target face number for mesh decimation") + + parser.add_argument('--dmtet', action='store_true', help="use dmtet finetuning") + parser.add_argument('--tet_grid_size', type=int, default=128, help="tet grid size") + parser.add_argument('--init_with', type=str, default='', help="ckpt to init dmtet") + parser.add_argument('--lock_geo', action='store_true', help="disable dmtet to learn geometry") + + ## Perp-Neg options + parser.add_argument('--perpneg', action='store_true', help="use perp_neg") + parser.add_argument('--negative_w', type=float, default=-2, help="The scale of the weights of negative prompts. A larger value will help to avoid the Janus problem, but may cause flat faces. Vary between 0 to -4, depending on the prompt") + parser.add_argument('--front_decay_factor', type=float, default=2, help="decay factor for the front prompt") + parser.add_argument('--side_decay_factor', type=float, default=10, help="decay factor for the side prompt") + + + ## Trigrid options + parser.add_argument('--trigrid_path', type=str, default='', help="path to trigrid") + parser.add_argument('--trigrid_decoder_ckpt', type=str, default='', help="path to trigrid decoder ckpt") + parser.add_argument('--train_decoder', action='store_true', help="train trigrid decoder") + parser.add_argument('--learnable_bg', action='store_true', help="Learnable background") + parser.add_argument('--trigrid_lr_ratio', type=float, nargs='+', default=[100,100,100,100,100,100,100,40], help="stable diffusion time steps range") + parser.add_argument('--scheduler_annealing', action='store_true', help="annealing scheduler") + parser.add_argument('--use_body_pose', action='store_true', help="use_body_pose") + + ### training options + parser.add_argument('--iters', type=int, default=10000, help="training iters") + parser.add_argument('--lr', type=float, default=1e-3, help="max learning rate") + parser.add_argument('--ckpt', type=str, default='latest', help="possible options are ['latest', 'scratch', 'best', 'latest_model']") + parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") + parser.add_argument('--taichi_ray', action='store_true', help="use taichi raymarching") + parser.add_argument('--max_steps', type=int, default=1024, help="max num steps sampled per ray (only valid when using --cuda_ray)") + parser.add_argument('--num_steps', type=int, default=64, help="num steps sampled per ray (only valid when not using --cuda_ray)") + parser.add_argument('--upsample_steps', type=int, default=32, help="num steps up-sampled per ray (only valid when not using --cuda_ray)") + parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)") + parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when not using --cuda_ray)") + parser.add_argument('--latent_iter_ratio', type=float, default=0.2, help="training iters that only use albedo shading") + parser.add_argument('--albedo_iter_ratio', type=float, default=0, help="training iters that only use albedo shading") + parser.add_argument('--min_ambient_ratio', type=float, default=0.1, help="minimum ambient ratio to use in lambertian shading") + parser.add_argument('--textureless_ratio', type=float, default=0.2, help="ratio of textureless shading") + parser.add_argument('--jitter_pose', action='store_true', help="add jitters to the randomly sampled camera poses") + parser.add_argument('--jitter_center', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's center (camera location)") + parser.add_argument('--jitter_target', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's target (i.e. 'look-at')") + parser.add_argument('--jitter_up', type=float, default=0.02, help="amount of jitter to add to sampled camera pose's up-axis (i.e. 'camera roll')") + parser.add_argument('--uniform_sphere_rate', type=float, default=0, help="likelihood of sampling camera location uniformly on the sphere surface area") + parser.add_argument('--grad_clip', type=float, default=-1, help="clip grad of all grad to this limit, negative value disables it") + parser.add_argument('--grad_clip_rgb', type=float, default=-1, help="clip grad of rgb space grad to this limit, negative value disables it") + # model options + parser.add_argument('--bg_radius', type=float, default=3.0, help="if positive, use a background model at sphere(bg_radius)") + parser.add_argument('--density_activation', type=str, default='exp', choices=['softplus', 'exp'], help="density activation function") + parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied") + parser.add_argument('--blob_density', type=float, default=5, help="max (center) density for the density blob") + parser.add_argument('--blob_radius', type=float, default=0.2, help="control the radius for the density blob") + # network backbone + parser.add_argument('--optim', type=str, default='adan', choices=['adan', 'adam'], help="optimizer") + parser.add_argument('--sd_version', type=str, default='2.1', choices=['1.5', '2.0', '2.1'], help="stable diffusion version") + parser.add_argument('--hf_key', type=str, default=None, help="hugging face Stable diffusion model key") + # try this if CUDA OOM + parser.add_argument('--fp16', action='store_true', help="use float16 for training") + parser.add_argument('--vram_O', action='store_true', help="optimization for low VRAM usage") + # rendering resolution in training, increase these for better quality / decrease these if CUDA OOM even if --vram_O enabled. + parser.add_argument('--w', type=int, default=64, help="render width for NeRF in training") + parser.add_argument('--h', type=int, default=64, help="render height for NeRF in training") + parser.add_argument('--known_view_scale', type=float, default=1.5, help="multiply --h/w by this for known view rendering") + parser.add_argument('--known_view_noise_scale', type=float, default=2e-3, help="random camera noise added to rays_o and rays_d") + parser.add_argument('--dmtet_reso_scale', type=float, default=8, help="multiply --h/w by this for dmtet finetuning") + parser.add_argument('--batch_size', type=int, default=1, help="images to render per batch using NeRF") + + ### dataset options + parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box(-bound, bound)") + parser.add_argument('--dt_gamma', type=float, default=0, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") + parser.add_argument('--min_near', type=float, default=0.01, help="minimum near distance for camera") + + parser.add_argument('--radius_range', type=float, nargs='*', default=[2.7, 2.7], + help="training camera radius range") + parser.add_argument('--theta_range', type=float, nargs='*', default=[60, 105], + help="training camera range along the polar angles (i.e. up and down). See advanced.md for details.") + parser.add_argument('--phi_range', type=float, nargs='*', default=[-180, 180], + help="training camera range along the azimuth angles (i.e. left and right). See advanced.md for details.") + parser.add_argument('--fovy_range', type=float, nargs='*', default=[12.447863, 12.447864], + help="training camera fovy range") + + parser.add_argument('--default_radius', type=float, default=2.7, help="radius for the default view") + parser.add_argument('--default_polar', type=float, default=90, help="polar for the default view") + parser.add_argument('--default_azimuth', type=float, default=0, help="azimuth for the default view") + parser.add_argument('--default_fovy', type=float, default=12.447863, help="fovy for the default view") + + parser.add_argument('--progressive_view', action='store_true', help="progressively expand view sampling range from default to full") + parser.add_argument('--progressive_view_init_ratio', type=float, default=0.2, help="initial ratio of final range, used for progressive_view") + + parser.add_argument('--progressive_level', action='store_true', help="progressively increase gridencoder's max_level") + + parser.add_argument('--angle_overhead', type=float, default=30, help="[0, angle_overhead] is the overhead region") + parser.add_argument('--angle_front', type=float, default=60, help="[0, angle_front] is the front region, [180, 180+angle_front] the back region, otherwise the side region.") + parser.add_argument('--t_range', type=float, nargs='+', default=[0.02, 0.98], help="stable diffusion time steps range") + parser.add_argument('--dont_override_stuff',action='store_true', help="Don't override t_range, etc.") + + + ### regularizations + parser.add_argument('--lambda_entropy', type=float, default=1e-3, help="loss scale for alpha entropy") + parser.add_argument('--lambda_opacity', type=float, default=0, help="loss scale for alpha value") + parser.add_argument('--lambda_orient', type=float, default=1e-2, help="loss scale for orientation") + parser.add_argument('--lambda_tv', type=float, default=0, help="loss scale for total variation") + parser.add_argument('--lambda_wd', type=float, default=0, help="loss scale") + + parser.add_argument('--lambda_mesh_normal', type=float, default=0.5, help="loss scale for mesh normal smoothness") + parser.add_argument('--lambda_mesh_laplacian', type=float, default=0.5, help="loss scale for mesh laplacian") + + parser.add_argument('--lambda_guidance', type=float, default=1, help="loss scale for SDS") + parser.add_argument('--lambda_rgb', type=float, default=1000, help="loss scale for RGB") + parser.add_argument('--lambda_mask', type=float, default=500, help="loss scale for mask (alpha)") + parser.add_argument('--lambda_normal', type=float, default=0, help="loss scale for normal map") + parser.add_argument('--lambda_depth', type=float, default=10, help="loss scale for relative depth") + parser.add_argument('--lambda_2d_normal_smooth', type=float, default=0, help="loss scale for 2D normal image smoothness") + parser.add_argument('--lambda_3d_normal_smooth', type=float, default=0, help="loss scale for 3D normal image smoothness") + + ### debugging options + parser.add_argument('--save_guidance', action='store_true', help="save images of the per-iteration NeRF renders, added noise, denoised (i.e. guidance), fully-denoised. Useful for debugging, but VERY SLOW and takes lots of memory!") + parser.add_argument('--save_guidance_interval', type=int, default=10, help="save guidance every X step") + + ### GUI options + parser.add_argument('--gui', action='store_true', help="start a GUI") + parser.add_argument('--W', type=int, default=800, help="GUI width") + parser.add_argument('--H', type=int, default=800, help="GUI height") + parser.add_argument('--radius', type=float, default=5, help="default GUI camera radius from center") + parser.add_argument('--fovy', type=float, default=20, help="default GUI camera fovy") + parser.add_argument('--light_theta', type=float, default=60, help="default GUI light direction in [0, 180], corresponding to elevation [90, -90]") + parser.add_argument('--light_phi', type=float, default=0, help="default GUI light direction in [0, 360), azimuth") + parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") + + parser.add_argument('--zero123_config', type=str, default='./pretrained/zero123/sd-objaverse-finetune-c_concat-256.yaml', help="config file for zero123") + parser.add_argument('--zero123_ckpt', type=str, default='pretrained/zero123/zero123-xl.ckpt', help="ckpt for zero123") + parser.add_argument('--zero123_grad_scale', type=str, default='angle', help="whether to scale the gradients based on 'angle' or 'None'") + + parser.add_argument('--dataset_size_train', type=int, default=100, help="Length of train dataset i.e. # of iterations per epoch") + parser.add_argument('--dataset_size_valid', type=int, default=8, help="# of frames to render in the turntable video in validation") + parser.add_argument('--dataset_size_test', type=int, default=100, help="# of frames to render in the turntable video at test time") + + parser.add_argument('--exp_start_iter', type=int, default=None, help="start iter # for experiment, to calculate progressive_view and progressive_level") + parser.add_argument('--exp_end_iter', type=int, default=None, help="end iter # for experiment, to calculate progressive_view and progressive_level") + + opt = parser.parse_args() + if opt.O: + raise NotImplementedError + opt.fp16 = True + opt.cuda_ray = True + + elif opt.O2: + raise NotImplementedError + opt.fp16 = True + opt.progressive_level = True + + if opt.IF: + if 'SD' in opt.guidance: + opt.guidance.remove('SD') + opt.guidance.append('IF') + opt.latent_iter_ratio = 0 # must not do as_latent + + opt.images, opt.ref_radii, opt.ref_polars, opt.ref_azimuths, opt.zero123_ws = [], [], [], [], [] + opt.default_zero123_w = 1 + + opt.exp_start_iter = opt.exp_start_iter or 0 + opt.exp_end_iter = opt.exp_end_iter or opt.iters + + # parameters for image-conditioned generation + if opt.image is not None or opt.image_config is not None: + + if opt.text is None: + # use zero123 guidance model when only providing image + opt.guidance = ['zero123'] + if not opt.dont_override_stuff: + opt.fovy_range = [opt.default_fovy, opt.default_fovy] # fix fov as zero123 doesn't support changing fov + opt.guidance_scale = 5 + opt.lambda_3d_normal_smooth = 10 + else: + # use stable-diffusion when providing both text and image + opt.guidance = ['SD', 'clip'] + + if not opt.dont_override_stuff: + opt.guidance_scale = 10 + opt.t_range = [0.2, 0.6] + opt.known_view_interval = 2 + opt.lambda_3d_normal_smooth = 20 + opt.bg_radius = -1 + + # smoothness + opt.lambda_entropy = 1 + opt.lambda_orient = 1 + + # latent warmup is not needed + opt.latent_iter_ratio = 0 + if not opt.dont_override_stuff: + opt.albedo_iter_ratio = 0 + + # make shape init more stable + opt.progressive_view = True + opt.progressive_level = True + + if opt.image is not None: + opt.images += [opt.image] + opt.ref_radii += [opt.default_radius] + opt.ref_polars += [opt.default_polar] + opt.ref_azimuths += [opt.default_azimuth] + opt.zero123_ws += [opt.default_zero123_w] + + if opt.image_config is not None: + # for multiview (zero123) + conf = pd.read_csv(opt.image_config, skipinitialspace=True) + opt.images += list(conf.image) + opt.ref_radii += list(conf.radius) + opt.ref_polars += list(conf.polar) + opt.ref_azimuths += list(conf.azimuth) + opt.zero123_ws += list(conf.zero123_weight) + if opt.image is None: + opt.default_radius = opt.ref_radii[0] + opt.default_polar = opt.ref_polars[0] + opt.default_azimuth = opt.ref_azimuths[0] + opt.default_zero123_w = opt.zero123_ws[0] + + # reset to None + if len(opt.images) == 0: + opt.images = None + + + if opt.learnable_bg: + assert opt.bg_radius> max(opt.radius_range), f"bg_radius must be larger than max(radius_range) = {max(opt.radius_range)}" + + + # default parameters for finetuning + if opt.dmtet: + + opt.h = int(opt.h * opt.dmtet_reso_scale) + opt.w = int(opt.w * opt.dmtet_reso_scale) + opt.known_view_scale = 1 + + if not opt.dont_override_stuff: + opt.t_range = [0.02, 0.50] # ref: magic3D + + if opt.images is not None: + + opt.lambda_normal = 0 + opt.lambda_depth = 0 + + if opt.text is not None and not opt.dont_override_stuff: + opt.t_range = [0.20, 0.50] + + # assume finetuning + opt.latent_iter_ratio = 0 + opt.albedo_iter_ratio = 0 + opt.progressive_view = False + # opt.progressive_level = False + os.makedirs(opt.workspace, exist_ok=True) + # record full range for progressive view expansion + if opt.progressive_view: + if not opt.dont_override_stuff: + # disable as they disturb progressive view + opt.jitter_pose = False + + opt.uniform_sphere_rate = 0 + # back up full range + opt.full_radius_range = opt.radius_range + opt.full_theta_range = opt.theta_range + opt.full_phi_range = opt.phi_range + opt.full_fovy_range = opt.fovy_range + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + from nerf.network_trigrid_heirarchy import NeRFNetwork + + # load plane from pkl + if os.path.isfile(opt.trigrid_path): + print(f'loading plane from {opt.trigrid_path}...') + import pickle + threeDRepresentation = {} + threeDRepresentation_shapes = {} + with open(opt.trigrid_path, 'rb') as f: + data = pickle.load(f) + ws = data['ws'] + threeDRepresentation['ws'] = ws + threeDRepresentation_shapes['ws'] = ws.shape + for key in data: + if 'trigrids' in key: + threeDRepresentation[key] = data[key]#.view(1, 3, -1, data[key].shape[-2], data[key].shape[-1]) + + threeDRepresentation_shapes[key] =threeDRepresentation[key].shape + + + print('save trigrid to workspace...') + shutil.copy(opt.trigrid_path, os.path.join(opt.workspace, 'trigrid.pkl')) + + + + model = NeRFNetwork( + opt=opt, + device=device, + trigrid_shapes=threeDRepresentation_shapes + ) + + # load + if os.path.isfile(opt.trigrid_decoder_ckpt): + print(f'loading trigrid_renderer from {opt.trigrid_decoder_ckpt}...') + ckpt = torch.load(opt.trigrid_decoder_ckpt, map_location=lambda storage, loc: storage) + # ckpt = {'params': params, 'state_dict': ckpt} + state_dict = ckpt['state_dict'] + params = ckpt['params'] + print(f'loading params: {params}') + + model.model.load_state_dict(state_dict, strict=True) + # + model.model.rendering_kwargs['depth_resolution'] = 48 + model.model.rendering_kwargs['depth_resolution_importance'] = 48 + # + model.model.rendering_kwargs['ray_start'] = 2.35 + # + # load plane from pkl + if os.path.isfile(opt.trigrid_path): + model.load_state_dict( + threeDRepresentation, strict=False + ) + + print(opt) + + if opt.seed is not None: + seed_everything(int(opt.seed)) + + # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + # + + + + + print(model) + + if opt.six_views: + raise NotImplementedError(f'--six_views {opt.six_views} is not implemented!') + guidance = None # no need to load guidance model at test + + trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt) + + test_loader = NeRFDataset(opt, device=device, type='six_views', H=opt.H, W=opt.W, size=6).dataloader(batch_size=1) + trainer.test(test_loader, write_video=False) + + if opt.save_mesh: + trainer.save_mesh() + + elif opt.test: + raise NotImplementedError(f'--test {opt.test} is not implemented!') + guidance = None # no need to load guidance model at test + + trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt) + + if opt.gui: + from nerf.gui import NeRFGUI + gui = NeRFGUI(opt, trainer) + gui.render() + + else: + test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1) + trainer.test(test_loader) + + if opt.save_mesh: + trainer.save_mesh() + + else: + + train_loader = NeRFDataset(opt, device=device, type='train', H=opt.h, W=opt.w, size=opt.dataset_size_train * opt.batch_size).dataloader() + # data = { + # 'H': self.H, + # 'W': self.W, + # 'rays_o': rays['rays_o'], + # 'rays_d': rays['rays_d'], + # 'dir': dirs, + # 'mvp': mvp, + # 'polar': delta_polar, + # 'azimuth': delta_azimuth, + # 'radius': delta_radius, + # } + if opt.optim == 'adan': + from optimizer import Adan + + # Adan usually requires a larger LR + optimizer = lambda model: Adan(model.get_params(5 * opt.lr,trigrid_lr_ratio = opt.trigrid_lr_ratio), eps=1e-8, weight_decay=2e-5, max_grad_norm=5.0, + foreach=False) + else: # adam + optimizer = lambda model: torch.optim.Adam(model.get_params(opt.lr,trigrid_lr_ratio = opt.trigrid_lr_ratio), betas=(0.9, 0.99), eps=1e-15) + + if opt.scheduler_annealing: + print('=============== use scheduler: annealing') + scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, + lambda iter: 0.1 ** min(iter / opt.iters, 1)) + else: + scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 1) # fixed + # scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1)) + + guidance = nn.ModuleDict() + + if 'SD' in opt.guidance: + from guidance.sd_utils import StableDiffusion + guidance['SD'] = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key, opt.t_range) + + if 'IF' in opt.guidance: + from guidance.if_utils import IF + guidance['IF'] = IF(device, opt.vram_O, opt.t_range) + + if 'zero123' in opt.guidance: + from guidance.zero123_utils import Zero123 + guidance['zero123'] = Zero123(device=device, fp16=opt.fp16, config=opt.zero123_config, ckpt=opt.zero123_ckpt, vram_O=opt.vram_O, t_range=opt.t_range, opt=opt) + + if 'clip' in opt.guidance: + from guidance.clip_utils import CLIP + guidance['clip'] = CLIP(device) + + trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, + optimizer=optimizer, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, use_checkpoint=opt.ckpt, scheduler_update_every_step=True) + + trainer.default_view_data = train_loader._data.get_default_view_data() + + + + + if opt.gui: + from nerf.gui import NeRFGUI + gui = NeRFGUI(opt, trainer, train_loader) + gui.render() + + else: + valid_loader = NeRFDataset(opt, device=device, type='val', H=opt.H, W=opt.W, size=opt.dataset_size_valid).dataloader(batch_size=1) + test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1) + + # # test output + # trainer.test(test_loader,save_path = os.path.join(opt.workspace, 'initiation')) + + # TO BE DEBUGGED + + max_epoch = np.ceil(opt.iters / len(train_loader)).astype(np.int32) + trainer.train(train_loader, valid_loader, test_loader, max_epoch) + + if opt.save_mesh: + trainer.save_mesh() diff --git a/stable-dreamfusion-3DPortrait/meshutils.py b/stable-dreamfusion-3DPortrait/meshutils.py new file mode 100644 index 0000000..4d1c53d --- /dev/null +++ b/stable-dreamfusion-3DPortrait/meshutils.py @@ -0,0 +1,117 @@ +import numpy as np +import pymeshlab as pml + +def poisson_mesh_reconstruction(points, normals=None): + # points/normals: [N, 3] np.ndarray + + import open3d as o3d + + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(points) + + # outlier removal + pcd, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=10) + + # normals + if normals is None: + pcd.estimate_normals() + else: + pcd.normals = o3d.utility.Vector3dVector(normals[ind]) + + # visualize + o3d.visualization.draw_geometries([pcd], point_show_normal=False) + + mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=9) + vertices_to_remove = densities < np.quantile(densities, 0.1) + mesh.remove_vertices_by_mask(vertices_to_remove) + + # visualize + o3d.visualization.draw_geometries([mesh]) + + vertices = np.asarray(mesh.vertices) + triangles = np.asarray(mesh.triangles) + + print(f'[INFO] poisson mesh reconstruction: {points.shape} --> {vertices.shape} / {triangles.shape}') + + return vertices, triangles + + +def decimate_mesh(verts, faces, target, backend='pymeshlab', remesh=False, optimalplacement=True): + # optimalplacement: default is True, but for flat mesh must turn False to prevent spike artifect. + + _ori_vert_shape = verts.shape + _ori_face_shape = faces.shape + + if backend == 'pyfqmr': + import pyfqmr + solver = pyfqmr.Simplify() + solver.setMesh(verts, faces) + solver.simplify_mesh(target_count=target, preserve_border=False, verbose=False) + verts, faces, normals = solver.getMesh() + else: + + m = pml.Mesh(verts, faces) + ms = pml.MeshSet() + ms.add_mesh(m, 'mesh') # will copy! + + # filters + # ms.meshing_decimation_clustering(threshold=pml.Percentage(1)) + ms.meshing_decimation_quadric_edge_collapse(targetfacenum=int(target), optimalplacement=optimalplacement) + + if remesh: + # ms.apply_coord_taubin_smoothing() + ms.meshing_isotropic_explicit_remeshing(iterations=3, targetlen=pml.Percentage(1)) + + # extract mesh + m = ms.current_mesh() + verts = m.vertex_matrix() + faces = m.face_matrix() + + print(f'[INFO] mesh decimation: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}') + + return verts, faces + + +def clean_mesh(verts, faces, v_pct=1, min_f=8, min_d=5, repair=True, remesh=True, remesh_size=0.01): + # verts: [N, 3] + # faces: [N, 3] + + _ori_vert_shape = verts.shape + _ori_face_shape = faces.shape + + m = pml.Mesh(verts, faces) + ms = pml.MeshSet() + ms.add_mesh(m, 'mesh') # will copy! + + # filters + ms.meshing_remove_unreferenced_vertices() # verts not refed by any faces + + if v_pct > 0: + ms.meshing_merge_close_vertices(threshold=pml.Percentage(v_pct)) # 1/10000 of bounding box diagonal + + ms.meshing_remove_duplicate_faces() # faces defined by the same verts + ms.meshing_remove_null_faces() # faces with area == 0 + + if min_d > 0: + ms.meshing_remove_connected_component_by_diameter(mincomponentdiag=pml.Percentage(min_d)) + + if min_f > 0: + ms.meshing_remove_connected_component_by_face_number(mincomponentsize=min_f) + + if repair: + # ms.meshing_remove_t_vertices(method=0, threshold=40, repeat=True) + ms.meshing_repair_non_manifold_edges(method=0) + ms.meshing_repair_non_manifold_vertices(vertdispratio=0) + + if remesh: + # ms.apply_coord_taubin_smoothing() + ms.meshing_isotropic_explicit_remeshing(iterations=3, targetlen=pml.AbsoluteValue(remesh_size)) + + # extract mesh + m = ms.current_mesh() + verts = m.vertex_matrix() + faces = m.face_matrix() + + print(f'[INFO] mesh cleaning: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}') + + return verts, faces \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/dnnlib/__init__.py b/stable-dreamfusion-3DPortrait/nerf/dnnlib/__init__.py new file mode 100644 index 0000000..dd91ed1 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/dnnlib/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +from .util import EasyDict, make_cache_dir_path diff --git a/stable-dreamfusion-3DPortrait/nerf/dnnlib/util.py b/stable-dreamfusion-3DPortrait/nerf/dnnlib/util.py new file mode 100644 index 0000000..80b67c4 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/dnnlib/util.py @@ -0,0 +1,493 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Miscellaneous utility classes and functions.""" + +import ctypes +import fnmatch +import importlib +import inspect +import numpy as np +import os +import shutil +import sys +import types +import io +import pickle +import re +import requests +import html +import hashlib +import glob +import tempfile +import urllib +import urllib.request +import uuid + +from distutils.util import strtobool +from typing import Any, List, Tuple, Union + + +# Util classes +# ------------------------------------------------------------------------------------------ + + +class EasyDict(dict): + """Convenience class that behaves like a dict but allows access with the attribute syntax.""" + + def __getattr__(self, name: str) -> Any: + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __setattr__(self, name: str, value: Any) -> None: + self[name] = value + + def __delattr__(self, name: str) -> None: + del self[name] + + +class Logger(object): + """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file.""" + + def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True): + self.file = None + + if file_name is not None: + self.file = open(file_name, file_mode) + + self.should_flush = should_flush + self.stdout = sys.stdout + self.stderr = sys.stderr + + sys.stdout = self + sys.stderr = self + + def __enter__(self) -> "Logger": + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + self.close() + + def write(self, text: Union[str, bytes]) -> None: + """Write text to stdout (and a file) and optionally flush.""" + if isinstance(text, bytes): + text = text.decode() + if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash + return + + if self.file is not None: + self.file.write(text) + + self.stdout.write(text) + + if self.should_flush: + self.flush() + + def flush(self) -> None: + """Flush written text to both stdout and a file, if open.""" + if self.file is not None: + self.file.flush() + + self.stdout.flush() + + def close(self) -> None: + """Flush, close possible files, and remove stdout/stderr mirroring.""" + self.flush() + + # if using multiple loggers, prevent closing in wrong order + if sys.stdout is self: + sys.stdout = self.stdout + if sys.stderr is self: + sys.stderr = self.stderr + + if self.file is not None: + self.file.close() + self.file = None + + +# Cache directories +# ------------------------------------------------------------------------------------------ + +_dnnlib_cache_dir = None + +def set_cache_dir(path: str) -> None: + global _dnnlib_cache_dir + _dnnlib_cache_dir = path + +def make_cache_dir_path(*paths: str) -> str: + if _dnnlib_cache_dir is not None: + return os.path.join(_dnnlib_cache_dir, *paths) + if 'DNNLIB_CACHE_DIR' in os.environ: + return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths) + if 'HOME' in os.environ: + return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths) + if 'USERPROFILE' in os.environ: + return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths) + return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths) + +# Small util functions +# ------------------------------------------------------------------------------------------ + + +def format_time(seconds: Union[int, float]) -> str: + """Convert the seconds to human readable string with days, hours, minutes and seconds.""" + s = int(np.rint(seconds)) + + if s < 60: + return "{0}s".format(s) + elif s < 60 * 60: + return "{0}m {1:02}s".format(s // 60, s % 60) + elif s < 24 * 60 * 60: + return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) + else: + return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) + + +def format_time_brief(seconds: Union[int, float]) -> str: + """Convert the seconds to human readable string with days, hours, minutes and seconds.""" + s = int(np.rint(seconds)) + + if s < 60: + return "{0}s".format(s) + elif s < 60 * 60: + return "{0}m {1:02}s".format(s // 60, s % 60) + elif s < 24 * 60 * 60: + return "{0}h {1:02}m".format(s // (60 * 60), (s // 60) % 60) + else: + return "{0}d {1:02}h".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24) + + +def ask_yes_no(question: str) -> bool: + """Ask the user the question until the user inputs a valid answer.""" + while True: + try: + print("{0} [y/n]".format(question)) + return strtobool(input().lower()) + except ValueError: + pass + + +def tuple_product(t: Tuple) -> Any: + """Calculate the product of the tuple elements.""" + result = 1 + + for v in t: + result *= v + + return result + + +_str_to_ctype = { + "uint8": ctypes.c_ubyte, + "uint16": ctypes.c_uint16, + "uint32": ctypes.c_uint32, + "uint64": ctypes.c_uint64, + "int8": ctypes.c_byte, + "int16": ctypes.c_int16, + "int32": ctypes.c_int32, + "int64": ctypes.c_int64, + "float32": ctypes.c_float, + "float64": ctypes.c_double +} + + +def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]: + """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.""" + type_str = None + + if isinstance(type_obj, str): + type_str = type_obj + elif hasattr(type_obj, "__name__"): + type_str = type_obj.__name__ + elif hasattr(type_obj, "name"): + type_str = type_obj.name + else: + raise RuntimeError("Cannot infer type name from input") + + assert type_str in _str_to_ctype.keys() + + my_dtype = np.dtype(type_str) + my_ctype = _str_to_ctype[type_str] + + assert my_dtype.itemsize == ctypes.sizeof(my_ctype) + + return my_dtype, my_ctype + + +def is_pickleable(obj: Any) -> bool: + try: + with io.BytesIO() as stream: + pickle.dump(obj, stream) + return True + except: + return False + + +# Functionality to import modules/objects by name, and call functions by name +# ------------------------------------------------------------------------------------------ + +def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]: + """Searches for the underlying module behind the name to some python object. + Returns the module and the object name (original name with module part removed).""" + + # allow convenience shorthands, substitute them by full names + obj_name = re.sub("^np.", "numpy.", obj_name) + obj_name = re.sub("^tf.", "tensorflow.", obj_name) + + # list alternatives for (module_name, local_obj_name) + parts = obj_name.split(".") + name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)] + + # try each alternative in turn + for module_name, local_obj_name in name_pairs: + try: + module = importlib.import_module(module_name) # may raise ImportError + get_obj_from_module(module, local_obj_name) # may raise AttributeError + return module, local_obj_name + except: + pass + + # maybe some of the modules themselves contain errors? + for module_name, _local_obj_name in name_pairs: + try: + importlib.import_module(module_name) # may raise ImportError + except ImportError: + if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"): + raise + + # maybe the requested attribute is missing? + for module_name, local_obj_name in name_pairs: + try: + module = importlib.import_module(module_name) # may raise ImportError + get_obj_from_module(module, local_obj_name) # may raise AttributeError + except ImportError: + pass + + # we are out of luck, but we have no idea why + raise ImportError(obj_name) + + +def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any: + """Traverses the object name and returns the last (rightmost) python object.""" + if obj_name == '': + return module + obj = module + for part in obj_name.split("."): + obj = getattr(obj, part) + return obj + + +def get_obj_by_name(name: str) -> Any: + """Finds the python object with the given name.""" + module, obj_name = get_module_from_obj_name(name) + return get_obj_from_module(module, obj_name) + + +def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any: + """Finds the python object with the given name and calls it as a function.""" + assert func_name is not None + func_obj = get_obj_by_name(func_name) + assert callable(func_obj) + return func_obj(*args, **kwargs) + + +def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any: + """Finds the python class with the given name and constructs it with the given arguments.""" + return call_func_by_name(*args, func_name=class_name, **kwargs) + + +def get_module_dir_by_obj_name(obj_name: str) -> str: + """Get the directory path of the module containing the given object name.""" + module, _ = get_module_from_obj_name(obj_name) + return os.path.dirname(inspect.getfile(module)) + + +def is_top_level_function(obj: Any) -> bool: + """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'.""" + return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__ + + +def get_top_level_function_name(obj: Any) -> str: + """Return the fully-qualified name of a top-level function.""" + assert is_top_level_function(obj) + module = obj.__module__ + if module == '__main__': + module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0] + return module + "." + obj.__name__ + + +# File system helpers +# ------------------------------------------------------------------------------------------ + +def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]: + """List all files recursively in a given directory while ignoring given file and directory names. + Returns list of tuples containing both absolute and relative paths.""" + assert os.path.isdir(dir_path) + base_name = os.path.basename(os.path.normpath(dir_path)) + + if ignores is None: + ignores = [] + + result = [] + + for root, dirs, files in os.walk(dir_path, topdown=True): + for ignore_ in ignores: + dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)] + + # dirs need to be edited in-place + for d in dirs_to_remove: + dirs.remove(d) + + files = [f for f in files if not fnmatch.fnmatch(f, ignore_)] + + absolute_paths = [os.path.join(root, f) for f in files] + relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths] + + if add_base_to_relative: + relative_paths = [os.path.join(base_name, p) for p in relative_paths] + + assert len(absolute_paths) == len(relative_paths) + result += zip(absolute_paths, relative_paths) + + return result + + +def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None: + """Takes in a list of tuples of (src, dst) paths and copies files. + Will create all necessary directories.""" + for file in files: + target_dir_name = os.path.dirname(file[1]) + + # will create all intermediate-level directories + if not os.path.exists(target_dir_name): + os.makedirs(target_dir_name) + + shutil.copyfile(file[0], file[1]) + + +# URL helpers +# ------------------------------------------------------------------------------------------ + +def is_url(obj: Any, allow_file_urls: bool = False) -> bool: + """Determine whether the given object is a valid URL string.""" + if not isinstance(obj, str) or not "://" in obj: + return False + if allow_file_urls and obj.startswith('file://'): + return True + try: + res = requests.compat.urlparse(obj) + if not res.scheme or not res.netloc or not "." in res.netloc: + return False + res = requests.compat.urlparse(requests.compat.urljoin(obj, "/")) + if not res.scheme or not res.netloc or not "." in res.netloc: + return False + except: + return False + return True + + +def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any: + """Download the given URL and return a binary-mode file object to access the data.""" + assert num_attempts >= 1 + assert not (return_filename and (not cache)) + + # Doesn't look like an URL scheme so interpret it as a local filename. + if not re.match('^[a-z]+://', url): + return url if return_filename else open(url, "rb") + + # Handle file URLs. This code handles unusual file:// patterns that + # arise on Windows: + # + # file:///c:/foo.txt + # + # which would translate to a local '/c:/foo.txt' filename that's + # invalid. Drop the forward slash for such pathnames. + # + # If you touch this code path, you should test it on both Linux and + # Windows. + # + # Some internet resources suggest using urllib.request.url2pathname() but + # but that converts forward slashes to backslashes and this causes + # its own set of problems. + if url.startswith('file://'): + filename = urllib.parse.urlparse(url).path + if re.match(r'^/[a-zA-Z]:', filename): + filename = filename[1:] + return filename if return_filename else open(filename, "rb") + + assert is_url(url) + + # Lookup from cache. + if cache_dir is None: + cache_dir = make_cache_dir_path('downloads') + + url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest() + if cache: + cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*")) + if len(cache_files) == 1: + filename = cache_files[0] + return filename if return_filename else open(filename, "rb") + + # Download. + url_name = None + url_data = None + with requests.Session() as session: + if verbose: + print("Downloading %s ..." % url, end="", flush=True) + for attempts_left in reversed(range(num_attempts)): + try: + with session.get(url) as res: + res.raise_for_status() + if len(res.content) == 0: + raise IOError("No data received") + + if len(res.content) < 8192: + content_str = res.content.decode("utf-8") + if "download_warning" in res.headers.get("Set-Cookie", ""): + links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link] + if len(links) == 1: + url = requests.compat.urljoin(url, links[0]) + raise IOError("Google Drive virus checker nag") + if "Google Drive - Quota exceeded" in content_str: + raise IOError("Google Drive download quota exceeded -- please try again later") + + match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", "")) + url_name = match[1] if match else url + url_data = res.content + if verbose: + print(" done") + break + except KeyboardInterrupt: + raise + except: + if not attempts_left: + if verbose: + print(" failed") + raise + if verbose: + print(".", end="", flush=True) + + # Save to cache. + if cache: + safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name) + cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name) + temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name) + os.makedirs(cache_dir, exist_ok=True) + with open(temp_file, "wb") as f: + f.write(url_data) + os.replace(temp_file, cache_file) # atomic + if return_filename: + return cache_file + + # Return data as file object. + assert not return_filename + return io.BytesIO(url_data) diff --git a/stable-dreamfusion-3DPortrait/nerf/gui.py b/stable-dreamfusion-3DPortrait/nerf/gui.py new file mode 100644 index 0000000..65faa5c --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/gui.py @@ -0,0 +1,485 @@ +import math +import torch +import numpy as np +import dearpygui.dearpygui as dpg +from scipy.spatial.transform import Rotation as R + +from nerf.utils import * + + +class OrbitCamera: + def __init__(self, W, H, r=2, fovy=60): + self.W = W + self.H = H + self.radius = r # camera distance from center + self.fovy = fovy # in degree + self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point + self.rot = R.from_matrix(np.eye(3)) + self.up = np.array([0, 1, 0], dtype=np.float32) # need to be normalized! + self.near = 0.001 + self.far = 1000 + + # pose + @property + def pose(self): + # first move camera to radius + res = np.eye(4, dtype=np.float32) + res[2, 3] = self.radius + # rotate + rot = np.eye(4, dtype=np.float32) + rot[:3, :3] = self.rot.as_matrix() + res = rot @ res + # translate + res[:3, 3] -= self.center + return res + + # intrinsics + @property + def intrinsics(self): + focal = self.H / (2 * np.tan(np.deg2rad(self.fovy) / 2)) + return np.array([focal, focal, self.W // 2, self.H // 2]) + + @property + def mvp(self): + focal = self.H / (2 * np.tan(np.deg2rad(self.fovy) / 2)) + projection = np.array([ + [2*focal/self.W, 0, 0, 0], + [0, -2*focal/self.H, 0, 0], + [0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)], + [0, 0, -1, 0] + ], dtype=np.float32) + + return projection @ np.linalg.inv(self.pose) # [4, 4] + + def orbit(self, dx, dy): + # rotate along camera up/side axis! + side = self.rot.as_matrix()[:3, 0] # why this is side --> ? # already normalized. + rotvec_x = self.up * np.deg2rad(-0.1 * dx) + rotvec_y = side * np.deg2rad(-0.1 * dy) + self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot + + def scale(self, delta): + self.radius *= 1.1 ** (-delta) + + def pan(self, dx, dy, dz=0): + # pan in camera coordinate system (careful on the sensitivity!) + self.center += 0.0005 * self.rot.as_matrix()[:3, :3] @ np.array([dx, -dy, dz]) + + +class NeRFGUI: + def __init__(self, opt, trainer, loader=None, debug=True): + self.opt = opt # shared with the trainer's opt to support in-place modification of rendering parameters. + self.W = opt.W + self.H = opt.H + self.cam = OrbitCamera(opt.W, opt.H, r=opt.radius, fovy=opt.fovy) + self.debug = debug + self.bg_color = torch.ones(3, dtype=torch.float32) # default white bg + self.training = False + self.step = 0 # training step + + self.trainer = trainer + self.loader = loader + self.render_buffer = np.zeros((self.W, self.H, 3), dtype=np.float32) + self.need_update = True # camera moved, should reset accumulation + self.spp = 1 # sample per pixel + self.light_dir = np.array([opt.light_theta, opt.light_phi]) + self.ambient_ratio = 1.0 + self.mode = 'image' # choose from ['image', 'depth'] + self.shading = 'albedo' + + self.dynamic_resolution = True if not self.opt.dmtet else False + self.downscale = 1 + self.train_steps = 16 + + dpg.create_context() + self.register_dpg() + self.test_step() + + + def __del__(self): + dpg.destroy_context() + + + def train_step(self): + + starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True) + starter.record() + + outputs = self.trainer.train_gui(self.loader, step=self.train_steps) + + ender.record() + torch.cuda.synchronize() + t = starter.elapsed_time(ender) + + self.step += self.train_steps + self.need_update = True + + dpg.set_value("_log_train_time", f'{t:.4f}ms ({int(1000/t)} FPS)') + dpg.set_value("_log_train_log", f'step = {self.step: 5d} (+{self.train_steps: 2d}), loss = {outputs["loss"]:.4f}, lr = {outputs["lr"]:.5f}') + + # dynamic train steps + # max allowed train time per-frame is 500 ms + full_t = t / self.train_steps * 16 + train_steps = min(16, max(4, int(16 * 500 / full_t))) + if train_steps > self.train_steps * 1.2 or train_steps < self.train_steps * 0.8: + self.train_steps = train_steps + + + def prepare_buffer(self, outputs): + if self.mode == 'image': + return outputs['image'].astype(np.float32) + else: + depth = outputs['depth'].astype(np.float32) + depth = (depth - depth.min()) / (depth.max() - depth.min() + 1e-6) + return np.expand_dims(depth, -1).repeat(3, -1) + + + def test_step(self): + + if self.need_update or self.spp < self.opt.max_spp: + + starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True) + starter.record() + + outputs = self.trainer.test_gui(self.cam.pose, self.cam.intrinsics, self.cam.mvp, self.W, self.H, self.bg_color, self.spp, self.downscale, self.light_dir, self.ambient_ratio, self.shading) + + ender.record() + torch.cuda.synchronize() + t = starter.elapsed_time(ender) + + # update dynamic resolution + if self.dynamic_resolution: + # max allowed infer time per-frame is 200 ms + full_t = t / (self.downscale ** 2) + downscale = min(1, max(1/4, math.sqrt(200 / full_t))) + if downscale > self.downscale * 1.2 or downscale < self.downscale * 0.8: + self.downscale = downscale + + if self.need_update: + self.render_buffer = self.prepare_buffer(outputs) + self.spp = 1 + self.need_update = False + else: + self.render_buffer = (self.render_buffer * self.spp + self.prepare_buffer(outputs)) / (self.spp + 1) + self.spp += 1 + + dpg.set_value("_log_infer_time", f'{t:.4f}ms ({int(1000/t)} FPS)') + dpg.set_value("_log_resolution", f'{int(self.downscale * self.W)}x{int(self.downscale * self.H)}') + dpg.set_value("_log_spp", self.spp) + dpg.set_value("_texture", self.render_buffer) + + + def register_dpg(self): + + ### register texture + + with dpg.texture_registry(show=False): + dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag="_texture") + + ### register window + + # the rendered image, as the primary window + with dpg.window(tag="_primary_window", width=self.W, height=self.H): + + # add the texture + dpg.add_image("_texture") + + dpg.set_primary_window("_primary_window", True) + + # control window + with dpg.window(label="Control", tag="_control_window", width=400, height=300): + + # text prompt + if self.opt.text is not None: + dpg.add_text("text: " + self.opt.text, tag="_log_prompt_text") + + if self.opt.negative != '': + dpg.add_text("negative text: " + self.opt.negative, tag="_log_prompt_negative_text") + + # button theme + with dpg.theme() as theme_button: + with dpg.theme_component(dpg.mvButton): + dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18)) + dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47)) + dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83)) + dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5) + dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3) + + # time + if not self.opt.test: + with dpg.group(horizontal=True): + dpg.add_text("Train time: ") + dpg.add_text("no data", tag="_log_train_time") + + with dpg.group(horizontal=True): + dpg.add_text("Infer time: ") + dpg.add_text("no data", tag="_log_infer_time") + + with dpg.group(horizontal=True): + dpg.add_text("SPP: ") + dpg.add_text("1", tag="_log_spp") + + # train button + if not self.opt.test: + with dpg.collapsing_header(label="Train", default_open=True): + with dpg.group(horizontal=True): + dpg.add_text("Train: ") + + def callback_train(sender, app_data): + if self.training: + self.training = False + dpg.configure_item("_button_train", label="start") + else: + self.training = True + dpg.configure_item("_button_train", label="stop") + + dpg.add_button(label="start", tag="_button_train", callback=callback_train) + dpg.bind_item_theme("_button_train", theme_button) + + def callback_reset(sender, app_data): + @torch.no_grad() + def weight_reset(m: nn.Module): + reset_parameters = getattr(m, "reset_parameters", None) + if callable(reset_parameters): + m.reset_parameters() + self.trainer.model.apply(fn=weight_reset) + self.trainer.model.reset_extra_state() # for cuda_ray density_grid and step_counter + self.need_update = True + + dpg.add_button(label="reset", tag="_button_reset", callback=callback_reset) + dpg.bind_item_theme("_button_reset", theme_button) + + + with dpg.group(horizontal=True): + dpg.add_text("Checkpoint: ") + + def callback_save(sender, app_data): + self.trainer.save_checkpoint(full=True, best=False) + dpg.set_value("_log_ckpt", "saved " + os.path.basename(self.trainer.stats["checkpoints"][-1])) + self.trainer.epoch += 1 # use epoch to indicate different calls. + + dpg.add_button(label="save", tag="_button_save", callback=callback_save) + dpg.bind_item_theme("_button_save", theme_button) + + dpg.add_text("", tag="_log_ckpt") + + # save mesh + with dpg.group(horizontal=True): + dpg.add_text("Marching Cubes: ") + + def callback_mesh(sender, app_data): + self.trainer.save_mesh() + dpg.set_value("_log_mesh", "saved " + f'{self.trainer.name}_{self.trainer.epoch}.ply') + self.trainer.epoch += 1 # use epoch to indicate different calls. + + dpg.add_button(label="mesh", tag="_button_mesh", callback=callback_mesh) + dpg.bind_item_theme("_button_mesh", theme_button) + + dpg.add_text("", tag="_log_mesh") + + with dpg.group(horizontal=True): + dpg.add_text("", tag="_log_train_log") + + + # rendering options + with dpg.collapsing_header(label="Options", default_open=True): + + # dynamic rendering resolution + with dpg.group(horizontal=True): + + def callback_set_dynamic_resolution(sender, app_data): + if self.dynamic_resolution: + self.dynamic_resolution = False + self.downscale = 1 + else: + self.dynamic_resolution = True + self.need_update = True + + dpg.add_checkbox(label="dynamic resolution", default_value=self.dynamic_resolution, callback=callback_set_dynamic_resolution) + dpg.add_text(f"{self.W}x{self.H}", tag="_log_resolution") + + # mode combo + def callback_change_mode(sender, app_data): + self.mode = app_data + self.need_update = True + + dpg.add_combo(('image', 'depth'), label='mode', default_value=self.mode, callback=callback_change_mode) + + # bg_color picker + def callback_change_bg(sender, app_data): + self.bg_color = torch.tensor(app_data[:3], dtype=torch.float32) # only need RGB in [0, 1] + self.need_update = True + + dpg.add_color_edit((255, 255, 255), label="Background Color", width=200, tag="_color_editor", no_alpha=True, callback=callback_change_bg) + + # fov slider + def callback_set_fovy(sender, app_data): + self.cam.fovy = app_data + self.need_update = True + + dpg.add_slider_int(label="FoV (vertical)", min_value=1, max_value=120, format="%d deg", default_value=self.cam.fovy, callback=callback_set_fovy) + + # dt_gamma slider + def callback_set_dt_gamma(sender, app_data): + self.opt.dt_gamma = app_data + self.need_update = True + + dpg.add_slider_float(label="dt_gamma", min_value=0, max_value=0.1, format="%.5f", default_value=self.opt.dt_gamma, callback=callback_set_dt_gamma) + + # max_steps slider + def callback_set_max_steps(sender, app_data): + self.opt.max_steps = app_data + self.need_update = True + + dpg.add_slider_int(label="max steps", min_value=1, max_value=1024, format="%d", default_value=self.opt.max_steps, callback=callback_set_max_steps) + + # aabb slider + def callback_set_aabb(sender, app_data, user_data): + # user_data is the dimension for aabb (xmin, ymin, zmin, xmax, ymax, zmax) + self.trainer.model.aabb_infer[user_data] = app_data + + # also change train aabb ? [better not...] + #self.trainer.model.aabb_train[user_data] = app_data + + self.need_update = True + + dpg.add_separator() + dpg.add_text("Axis-aligned bounding box:") + + with dpg.group(horizontal=True): + dpg.add_slider_float(label="x", width=150, min_value=-self.opt.bound, max_value=0, format="%.2f", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=0) + dpg.add_slider_float(label="", width=150, min_value=0, max_value=self.opt.bound, format="%.2f", default_value=self.opt.bound, callback=callback_set_aabb, user_data=3) + + with dpg.group(horizontal=True): + dpg.add_slider_float(label="y", width=150, min_value=-self.opt.bound, max_value=0, format="%.2f", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=1) + dpg.add_slider_float(label="", width=150, min_value=0, max_value=self.opt.bound, format="%.2f", default_value=self.opt.bound, callback=callback_set_aabb, user_data=4) + + with dpg.group(horizontal=True): + dpg.add_slider_float(label="z", width=150, min_value=-self.opt.bound, max_value=0, format="%.2f", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=2) + dpg.add_slider_float(label="", width=150, min_value=0, max_value=self.opt.bound, format="%.2f", default_value=self.opt.bound, callback=callback_set_aabb, user_data=5) + + # light dir + def callback_set_light_dir(sender, app_data, user_data): + self.light_dir[user_data] = app_data + self.need_update = True + + dpg.add_separator() + dpg.add_text("Plane Light Direction:") + + with dpg.group(horizontal=True): + dpg.add_slider_float(label="theta", min_value=0, max_value=180, format="%.2f", default_value=self.opt.light_theta, callback=callback_set_light_dir, user_data=0) + + with dpg.group(horizontal=True): + dpg.add_slider_float(label="phi", min_value=0, max_value=360, format="%.2f", default_value=self.opt.light_phi, callback=callback_set_light_dir, user_data=1) + + # ambient ratio + def callback_set_abm_ratio(sender, app_data): + self.ambient_ratio = app_data + self.need_update = True + + dpg.add_slider_float(label="ambient", min_value=0, max_value=1.0, format="%.5f", default_value=self.ambient_ratio, callback=callback_set_abm_ratio) + + # shading mode + def callback_change_shading(sender, app_data): + self.shading = app_data + self.need_update = True + + dpg.add_combo(('albedo', 'lambertian', 'textureless', 'normal'), label='shading', default_value=self.shading, callback=callback_change_shading) + + + # debug info + if self.debug: + with dpg.collapsing_header(label="Debug"): + # pose + dpg.add_separator() + dpg.add_text("Camera Pose:") + dpg.add_text(str(self.cam.pose), tag="_log_pose") + + + ### register camera handler + + def callback_camera_drag_rotate(sender, app_data): + + if not dpg.is_item_focused("_primary_window"): + return + + dx = app_data[1] + dy = app_data[2] + + self.cam.orbit(dx, dy) + self.need_update = True + + if self.debug: + dpg.set_value("_log_pose", str(self.cam.pose)) + + + def callback_camera_wheel_scale(sender, app_data): + + if not dpg.is_item_focused("_primary_window"): + return + + delta = app_data + + self.cam.scale(delta) + self.need_update = True + + if self.debug: + dpg.set_value("_log_pose", str(self.cam.pose)) + + + def callback_camera_drag_pan(sender, app_data): + + if not dpg.is_item_focused("_primary_window"): + return + + dx = app_data[1] + dy = app_data[2] + + self.cam.pan(dx, dy) + self.need_update = True + + if self.debug: + dpg.set_value("_log_pose", str(self.cam.pose)) + + + with dpg.handler_registry(): + dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Left, callback=callback_camera_drag_rotate) + dpg.add_mouse_wheel_handler(callback=callback_camera_wheel_scale) + dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Right, callback=callback_camera_drag_pan) + + + dpg.create_viewport(title='torch-ngp', width=self.W, height=self.H, resizable=False) + + # TODO: seems dearpygui doesn't support resizing texture... + # def callback_resize(sender, app_data): + # self.W = app_data[0] + # self.H = app_data[1] + # # how to reload texture ??? + + # dpg.set_viewport_resize_callback(callback_resize) + + ### global theme + with dpg.theme() as theme_no_padding: + with dpg.theme_component(dpg.mvAll): + # set all padding to 0 to avoid scroll bar + dpg.add_theme_style(dpg.mvStyleVar_WindowPadding, 0, 0, category=dpg.mvThemeCat_Core) + dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 0, 0, category=dpg.mvThemeCat_Core) + dpg.add_theme_style(dpg.mvStyleVar_CellPadding, 0, 0, category=dpg.mvThemeCat_Core) + + dpg.bind_item_theme("_primary_window", theme_no_padding) + + dpg.setup_dearpygui() + + #dpg.show_metrics() + + dpg.show_viewport() + + + def render(self): + + while dpg.is_dearpygui_running(): + # update texture every frame + if self.training: + self.train_step() + self.test_step() + dpg.render_dearpygui_frame() \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/network.py b/stable-dreamfusion-3DPortrait/nerf/network.py new file mode 100644 index 0000000..aceea26 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/network.py @@ -0,0 +1,241 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from activation import trunc_exp +from .renderer import NeRFRenderer + +import numpy as np +from encoding import get_encoder + +from .utils import safe_normalize + +# TODO: not sure about the details... +class ResBlock(nn.Module): + def __init__(self, dim_in, dim_out, bias=True): + super().__init__() + self.dim_in = dim_in + self.dim_out = dim_out + + self.dense = nn.Linear(self.dim_in, self.dim_out, bias=bias) + self.norm = nn.LayerNorm(self.dim_out) + self.activation = nn.SiLU(inplace=True) + + if self.dim_in != self.dim_out: + self.skip = nn.Linear(self.dim_in, self.dim_out, bias=False) + else: + self.skip = None + + def forward(self, x): + # x: [B, C] + identity = x + + out = self.dense(x) + out = self.norm(out) + + if self.skip is not None: + identity = self.skip(identity) + + out += identity + out = self.activation(out) + + return out + +class BasicBlock(nn.Module): + def __init__(self, dim_in, dim_out, bias=True): + super().__init__() + self.dim_in = dim_in + self.dim_out = dim_out + + self.dense = nn.Linear(self.dim_in, self.dim_out, bias=bias) + self.activation = nn.ReLU(inplace=True) + + def forward(self, x): + # x: [B, C] + + out = self.dense(x) + out = self.activation(out) + + return out + +class MLP(nn.Module): + def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True, block=BasicBlock): + super().__init__() + self.dim_in = dim_in + self.dim_out = dim_out + self.dim_hidden = dim_hidden + self.num_layers = num_layers + + net = [] + for l in range(num_layers): + if l == 0: + net.append(BasicBlock(self.dim_in, self.dim_hidden, bias=bias)) + elif l != num_layers - 1: + net.append(block(self.dim_hidden, self.dim_hidden, bias=bias)) + else: + net.append(nn.Linear(self.dim_hidden, self.dim_out, bias=bias)) + + self.net = nn.ModuleList(net) + + + def forward(self, x): + + for l in range(self.num_layers): + x = self.net[l](x) + + return x + + +class NeRFNetwork(NeRFRenderer): + def __init__(self, + opt, + num_layers=5, # 5 in paper + hidden_dim=64, # 128 in paper + num_layers_bg=2, # 3 in paper + hidden_dim_bg=32, # 64 in paper + encoding='frequency_torch', # pure pytorch + ): + + super().__init__(opt) + + self.num_layers = num_layers + self.hidden_dim = hidden_dim + self.encoder, self.in_dim = get_encoder(encoding, input_dim=3, multires=12) + self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True, block=ResBlock) + + self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else F.softplus + + # background network + if self.opt.bg_radius > 0: + self.num_layers_bg = num_layers_bg + self.hidden_dim_bg = hidden_dim_bg + self.encoder_bg, self.in_dim_bg = get_encoder(encoding, input_dim=3, multires=4) + self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) + + else: + self.bg_net = None + + def common_forward(self, x): + # x: [N, 3], in [-bound, bound] + + # sigma + enc = self.encoder(x, bound=self.bound, max_level=self.max_level) + + h = self.sigma_net(enc) + + sigma = self.density_activation(h[..., 0] + self.density_blob(x)) + albedo = torch.sigmoid(h[..., 1:]) + + return sigma, albedo + + # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192 + def finite_difference_normal(self, x, epsilon=1e-2): + # x: [N, 3] + dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound)) + dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound)) + + normal = torch.stack([ + 0.5 * (dx_pos - dx_neg) / epsilon, + 0.5 * (dy_pos - dy_neg) / epsilon, + 0.5 * (dz_pos - dz_neg) / epsilon + ], dim=-1) + + return -normal + + def normal(self, x): + + with torch.enable_grad(): + with torch.cuda.amp.autocast(enabled=False): + x.requires_grad_(True) + sigma, albedo = self.common_forward(x) + # query gradient + normal = - torch.autograd.grad(torch.sum(sigma), x, create_graph=True)[0] # [N, 3] + + # normal = self.finite_difference_normal(x) + normal = safe_normalize(normal) + normal = torch.nan_to_num(normal) + + return normal + + def forward(self, x, d, l=None, ratio=1, shading='albedo'): + # x: [N, 3], in [-bound, bound] + # d: [N, 3], view direction, nomalized in [-1, 1] + # l: [3], plane light direction, nomalized in [-1, 1] + # ratio: scalar, ambient ratio, 1 == no shading (albedo only), 0 == only shading (textureless) + + if shading == 'albedo': + # no need to query normal + sigma, color = self.common_forward(x) + normal = None + + else: + # query normal + + # sigma, albedo = self.common_forward(x) + # normal = self.normal(x) + + with torch.enable_grad(): + with torch.cuda.amp.autocast(enabled=False): + x.requires_grad_(True) + sigma, albedo = self.common_forward(x) + # query gradient + normal = - torch.autograd.grad(torch.sum(sigma), x, create_graph=True)[0] # [N, 3] + normal = safe_normalize(normal) + normal = torch.nan_to_num(normal) + + # lambertian shading + lambertian = ratio + (1 - ratio) * (normal * l).sum(-1).clamp(min=0) # [N,] + + if shading == 'textureless': + color = lambertian.unsqueeze(-1).repeat(1, 3) + elif shading == 'normal': + color = (normal + 1) / 2 + else: # 'lambertian' + color = albedo * lambertian.unsqueeze(-1) + + return sigma, color, normal + + + def density(self, x): + # x: [N, 3], in [-bound, bound] + + sigma, albedo = self.common_forward(x) + + return { + 'sigma': sigma, + 'albedo': albedo, + } + + + def background(self, d): + + h = self.encoder_bg(d) # [N, C] + + h = self.bg_net(h) + + # sigmoid activation for rgb + rgbs = torch.sigmoid(h) + + return rgbs + + # optimizer utils + def get_params(self, lr): + + params = [ + # {'params': self.encoder.parameters(), 'lr': lr * 10}, + {'params': self.sigma_net.parameters(), 'lr': lr}, + ] + + if self.opt.bg_radius > 0: + # params.append({'params': self.encoder_bg.parameters(), 'lr': lr * 10}) + params.append({'params': self.bg_net.parameters(), 'lr': lr}) + + if self.opt.dmtet and not self.opt.lock_geo: + params.append({'params': self.sdf, 'lr': lr}) + params.append({'params': self.deform, 'lr': lr}) + + return params \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/network_grid.py b/stable-dreamfusion-3DPortrait/nerf/network_grid.py new file mode 100644 index 0000000..c308f3d --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/network_grid.py @@ -0,0 +1,172 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from activation import trunc_exp, biased_softplus +from .renderer import NeRFRenderer + +import numpy as np +from encoding import get_encoder + +from .utils import safe_normalize + +class MLP(nn.Module): + def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): + super().__init__() + self.dim_in = dim_in + self.dim_out = dim_out + self.dim_hidden = dim_hidden + self.num_layers = num_layers + + net = [] + for l in range(num_layers): + net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) + + self.net = nn.ModuleList(net) + + def forward(self, x): + for l in range(self.num_layers): + x = self.net[l](x) + if l != self.num_layers - 1: + x = F.relu(x, inplace=True) + return x + + +class NeRFNetwork(NeRFRenderer): + def __init__(self, + opt, + num_layers=3, + hidden_dim=64, + num_layers_bg=2, + hidden_dim_bg=32, + ): + + super().__init__(opt) + + self.num_layers = num_layers + self.hidden_dim = hidden_dim + + self.encoder, self.in_dim = get_encoder('hashgrid', input_dim=3, log2_hashmap_size=19, desired_resolution=2048 * self.bound, interpolation='smoothstep') + + self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) + # self.normal_net = MLP(self.in_dim, 3, hidden_dim, num_layers, bias=True) + + self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus + + # background network + if self.opt.bg_radius > 0: + self.num_layers_bg = num_layers_bg + self.hidden_dim_bg = hidden_dim_bg + + # use a very simple network to avoid it learning the prompt... + self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3, multires=6) + self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) + + else: + self.bg_net = None + + def common_forward(self, x): + + # sigma + enc = self.encoder(x, bound=self.bound, max_level=self.max_level) + + h = self.sigma_net(enc) + + sigma = self.density_activation(h[..., 0] + self.density_blob(x)) + albedo = torch.sigmoid(h[..., 1:]) + + return sigma, albedo + + # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192 + def finite_difference_normal(self, x, epsilon=1e-2): + # x: [N, 3] + dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound)) + dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound)) + + normal = torch.stack([ + 0.5 * (dx_pos - dx_neg) / epsilon, + 0.5 * (dy_pos - dy_neg) / epsilon, + 0.5 * (dz_pos - dz_neg) / epsilon + ], dim=-1) + + return -normal + + def normal(self, x): + normal = self.finite_difference_normal(x) + normal = safe_normalize(normal) + normal = torch.nan_to_num(normal) + return normal + + def forward(self, x, d, l=None, ratio=1, shading='albedo'): + # x: [N, 3], in [-bound, bound] + # d: [N, 3], view direction, nomalized in [-1, 1] + # l: [3], plane light direction, nomalized in [-1, 1] + # ratio: scalar, ambient ratio, 1 == no shading (albedo only), 0 == only shading (textureless) + + sigma, albedo = self.common_forward(x) + + if shading == 'albedo': + normal = None + color = albedo + + else: # lambertian shading + + # normal = self.normal_net(enc) + normal = self.normal(x) + + lambertian = ratio + (1 - ratio) * (normal * l).sum(-1).clamp(min=0) # [N,] + + if shading == 'textureless': + color = lambertian.unsqueeze(-1).repeat(1, 3) + elif shading == 'normal': + color = (normal + 1) / 2 + else: # 'lambertian' + color = albedo * lambertian.unsqueeze(-1) + + return sigma, color, normal + + + def density(self, x): + # x: [N, 3], in [-bound, bound] + + sigma, albedo = self.common_forward(x) + + return { + 'sigma': sigma, + 'albedo': albedo, + } + + + def background(self, d): + + h = self.encoder_bg(d) # [N, C] + + h = self.bg_net(h) + + # sigmoid activation for rgb + rgbs = torch.sigmoid(h) + + return rgbs + + # optimizer utils + def get_params(self, lr): + + params = [ + {'params': self.encoder.parameters(), 'lr': lr * 10}, + {'params': self.sigma_net.parameters(), 'lr': lr}, + # {'params': self.normal_net.parameters(), 'lr': lr}, + ] + + if self.opt.bg_radius > 0: + # params.append({'params': self.encoder_bg.parameters(), 'lr': lr * 10}) + params.append({'params': self.bg_net.parameters(), 'lr': lr}) + + if self.opt.dmtet and not self.opt.lock_geo: + params.append({'params': self.sdf, 'lr': lr}) + params.append({'params': self.deform, 'lr': lr}) + + return params \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/network_grid_taichi.py b/stable-dreamfusion-3DPortrait/nerf/network_grid_taichi.py new file mode 100644 index 0000000..8fa2efd --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/network_grid_taichi.py @@ -0,0 +1,170 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from activation import trunc_exp +from .renderer import NeRFRenderer + +import numpy as np +from encoding import get_encoder + +from .utils import safe_normalize + +class MLP(nn.Module): + def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): + super().__init__() + self.dim_in = dim_in + self.dim_out = dim_out + self.dim_hidden = dim_hidden + self.num_layers = num_layers + + net = [] + for l in range(num_layers): + net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) + + self.net = nn.ModuleList(net) + + def forward(self, x): + for l in range(self.num_layers): + x = self.net[l](x) + if l != self.num_layers - 1: + x = F.relu(x, inplace=True) + return x + + +class NeRFNetwork(NeRFRenderer): + def __init__(self, + opt, + num_layers=2, + hidden_dim=32, + num_layers_bg=2, + hidden_dim_bg=16, + ): + + super().__init__(opt) + + self.num_layers = num_layers + self.hidden_dim = hidden_dim + + self.encoder, self.in_dim = get_encoder('hashgrid_taichi', input_dim=3, log2_hashmap_size=19, desired_resolution=2048 * self.bound, interpolation='smoothstep') + + self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) + # self.normal_net = MLP(self.in_dim, 3, hidden_dim, num_layers, bias=True) + + self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else F.softplus + + # background network + if self.opt.bg_radius > 0: + self.num_layers_bg = num_layers_bg + self.hidden_dim_bg = hidden_dim_bg + # use a very simple network to avoid it learning the prompt... + self.encoder_bg, self.in_dim_bg = get_encoder('frequency_torch', input_dim=3, multires=4) # TODO: freq encoder can be replaced by a Taichi implementation + self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) + + else: + self.bg_net = None + + def common_forward(self, x): + + # sigma + enc = self.encoder(x, bound=self.bound) + + h = self.sigma_net(enc) + + sigma = self.density_activation(h[..., 0] + self.density_blob(x)) + albedo = torch.sigmoid(h[..., 1:]) + + return sigma, albedo + + # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192 + def finite_difference_normal(self, x, epsilon=1e-2): + # x: [N, 3] + dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound)) + dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound)) + + normal = torch.stack([ + 0.5 * (dx_pos - dx_neg) / epsilon, + 0.5 * (dy_pos - dy_neg) / epsilon, + 0.5 * (dz_pos - dz_neg) / epsilon + ], dim=-1) + + return -normal + + def normal(self, x): + normal = self.finite_difference_normal(x) + normal = safe_normalize(normal) + normal = torch.nan_to_num(normal) + return normal + + def forward(self, x, d, l=None, ratio=1, shading='albedo'): + # x: [N, 3], in [-bound, bound] + # d: [N, 3], view direction, nomalized in [-1, 1] + # l: [3], plane light direction, nomalized in [-1, 1] + # ratio: scalar, ambient ratio, 1 == no shading (albedo only), 0 == only shading (textureless) + + sigma, albedo = self.common_forward(x) + + if shading == 'albedo': + normal = None + color = albedo + + else: # lambertian shading + # normal = self.normal_net(enc) + normal = self.normal(x) + + lambertian = ratio + (1 - ratio) * (normal * l).sum(-1).clamp(min=0) # [N,] + + if shading == 'textureless': + color = lambertian.unsqueeze(-1).repeat(1, 3) + elif shading == 'normal': + color = (normal + 1) / 2 + else: # 'lambertian' + color = albedo * lambertian.unsqueeze(-1) + + return sigma, color, normal + + + def density(self, x): + # x: [N, 3], in [-bound, bound] + + sigma, albedo = self.common_forward(x) + + return { + 'sigma': sigma, + 'albedo': albedo, + } + + + def background(self, d): + + h = self.encoder_bg(d) # [N, C] + + h = self.bg_net(h) + + # sigmoid activation for rgb + rgbs = torch.sigmoid(h) + + return rgbs + + # optimizer utils + def get_params(self, lr): + + params = [ + {'params': self.encoder.parameters(), 'lr': lr * 10}, + {'params': self.sigma_net.parameters(), 'lr': lr}, + # {'params': self.normal_net.parameters(), 'lr': lr}, + ] + + if self.opt.bg_radius > 0: + # params.append({'params': self.encoder_bg.parameters(), 'lr': lr * 10}) + params.append({'params': self.bg_net.parameters(), 'lr': lr}) + + if self.opt.dmtet and not self.opt.lock_geo: + params.append({'params': self.sdf, 'lr': lr}) + params.append({'params': self.deform, 'lr': lr}) + + return params \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/network_grid_tcnn.py b/stable-dreamfusion-3DPortrait/nerf/network_grid_tcnn.py new file mode 100644 index 0000000..e270789 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/network_grid_tcnn.py @@ -0,0 +1,178 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from activation import trunc_exp, biased_softplus +from .renderer import NeRFRenderer + +import numpy as np +from encoding import get_encoder + +from .utils import safe_normalize + +import tinycudann as tcnn + +class MLP(nn.Module): + def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): + super().__init__() + self.dim_in = dim_in + self.dim_out = dim_out + self.dim_hidden = dim_hidden + self.num_layers = num_layers + + net = [] + for l in range(num_layers): + net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) + + self.net = nn.ModuleList(net) + + def forward(self, x): + for l in range(self.num_layers): + x = self.net[l](x) + if l != self.num_layers - 1: + x = F.relu(x, inplace=True) + return x + + +class NeRFNetwork(NeRFRenderer): + def __init__(self, + opt, + num_layers=3, + hidden_dim=64, + num_layers_bg=2, + hidden_dim_bg=32, + ): + + super().__init__(opt) + + self.num_layers = num_layers + self.hidden_dim = hidden_dim + + self.encoder = tcnn.Encoding( + n_input_dims=3, + encoding_config={ + "otype": "HashGrid", + "n_levels": 16, + "n_features_per_level": 2, + "log2_hashmap_size": 19, + "base_resolution": 16, + "interpolation": "Smoothstep", + "per_level_scale": np.exp2(np.log2(2048 * self.bound / 16) / (16 - 1)), + }, + dtype=torch.float32, # ENHANCE: default float16 seems unstable... + ) + self.in_dim = self.encoder.n_output_dims + # use torch MLP, as tcnn MLP doesn't impl second-order derivative + self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) + + self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus + + # background network + if self.opt.bg_radius > 0: + self.num_layers_bg = num_layers_bg + self.hidden_dim_bg = hidden_dim_bg + + # use a very simple network to avoid it learning the prompt... + self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3, multires=6) + self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) + + else: + self.bg_net = None + + def common_forward(self, x): + + # sigma + enc = self.encoder((x + self.bound) / (2 * self.bound)).float() + h = self.sigma_net(enc) + + sigma = self.density_activation(h[..., 0] + self.density_blob(x)) + albedo = torch.sigmoid(h[..., 1:]) + + return sigma, albedo + + def normal(self, x): + + with torch.enable_grad(): + with torch.cuda.amp.autocast(enabled=False): + x.requires_grad_(True) + sigma, albedo = self.common_forward(x) + # query gradient + normal = - torch.autograd.grad(torch.sum(sigma), x, create_graph=True)[0] # [N, 3] + + # normal = self.finite_difference_normal(x) + normal = safe_normalize(normal) + normal = torch.nan_to_num(normal) + + return normal + + def forward(self, x, d, l=None, ratio=1, shading='albedo'): + # x: [N, 3], in [-bound, bound] + # d: [N, 3], view direction, nomalized in [-1, 1] + # l: [3], plane light direction, nomalized in [-1, 1] + # ratio: scalar, ambient ratio, 1 == no shading (albedo only), 0 == only shading (textureless) + + + if shading == 'albedo': + sigma, albedo = self.common_forward(x) + normal = None + color = albedo + + else: # lambertian shading + with torch.enable_grad(): + with torch.cuda.amp.autocast(enabled=False): + x.requires_grad_(True) + sigma, albedo = self.common_forward(x) + normal = - torch.autograd.grad(torch.sum(sigma), x, create_graph=True)[0] # [N, 3] + normal = safe_normalize(normal) + normal = torch.nan_to_num(normal) + + lambertian = ratio + (1 - ratio) * (normal * l).sum(-1).clamp(min=0) # [N,] + + if shading == 'textureless': + color = lambertian.unsqueeze(-1).repeat(1, 3) + elif shading == 'normal': + color = (normal + 1) / 2 + else: # 'lambertian' + color = albedo * lambertian.unsqueeze(-1) + + return sigma, color, normal + + + def density(self, x): + # x: [N, 3], in [-bound, bound] + + sigma, albedo = self.common_forward(x) + + return { + 'sigma': sigma, + 'albedo': albedo, + } + + + def background(self, d): + + h = self.encoder_bg(d) # [N, C] + + h = self.bg_net(h) + + # sigmoid activation for rgb + rgbs = torch.sigmoid(h) + + return rgbs + + # optimizer utils + def get_params(self, lr): + + params = [ + {'params': self.encoder.parameters(), 'lr': lr * 10}, + {'params': self.sigma_net.parameters(), 'lr': lr}, + ] + + if self.opt.bg_radius > 0: + params.append({'params': self.bg_net.parameters(), 'lr': lr}) + + if self.opt.dmtet and not self.opt.lock_geo: + params.append({'params': self.sdf, 'lr': lr}) + params.append({'params': self.deform, 'lr': lr}) + + return params \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/network_trigrid_heirarchy.py b/stable-dreamfusion-3DPortrait/nerf/network_trigrid_heirarchy.py new file mode 100644 index 0000000..b582fe8 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/network_trigrid_heirarchy.py @@ -0,0 +1,413 @@ +from .trigrid_rendering.neural_render import NeuralRender as TrigridNeRFRenderer +from .renderer import NeRFRenderer + +import torch +import torch.nn as nn +import torch.nn.functional as F +from encoding import get_encoder + +from .utils import safe_normalize + +import numpy as np +class MLP(nn.Module): + def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): + super().__init__() + self.dim_in = dim_in + self.dim_out = dim_out + self.dim_hidden = dim_hidden + self.num_layers = num_layers + + net = [] + for l in range(num_layers): + net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, + self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) + + self.net = nn.ModuleList(net) + + def forward(self, x): + for l in range(self.num_layers): + x = self.net[l](x) + if l != self.num_layers - 1: + x = F.relu(x, inplace=True) + return x + + +class NeRFNetwork(NeRFRenderer): + def __init__(self, + opt, + device, + trigrid_shapes, + num_layers_bg=2, + hidden_dim_bg=32, + ): + super().__init__(opt) + + + self.triplane_names = {} + for k in trigrid_shapes: + self.register_parameter(k, torch.nn.Parameter(torch.randn(trigrid_shapes[k]))) + + if k.startswith('trigrid'): + res = int(k.split('_')[1]) + self.triplane_names[res] = k + + # sort the triplane names by resolution + self.triplane_names = {k: self.triplane_names[k] for k in sorted(self.triplane_names.keys())} + + params = {'z_dim': 512, 'c_dim': 25, 'w_dim': 512, 'img_resolution': 512, 'img_channels': 3, + 'rendering_kwargs': {'image_resolution': 512, 'disparity_space_sampling': False, + 'clamp_mode': 'softplus', + 'superresolution_module': 'training.superresolution.SuperresolutionHybrid8XDC', + 'c_gen_conditioning_zero': False, 'gpc_reg_prob': 0.7, + 'decoder_activation': 'none', 'use_torgb_raw': True, + 'use_background': True, 'triplane_depth': 3, 'c_scale': 1.0, + 'superresolution_noise_mode': 'none', 'density_reg': 0.0, + 'density_reg_p_dist': 0.004, 'reg_type': 'l1', 'decoder_lr_mul': 1.0, + 'sr_antialias': True, 'radius_scale': 0.7, + 'depth_resolution': 48, 'depth_resolution_importance': 48, + 'ray_start': 2.3850000000000002, 'ray_end': 3.12, + 'box_warp': 0.7, 'density_noise': 0.0}, + 'batch_size': 1, 'thickness': 0.25,"apply_deformation": self.opt.use_body_pose, + } + self.model = TrigridNeRFRenderer(**params).to(device) + # self.trigrid_4 = torch.nn.Parameter(torch.randn([1, 3, 16 * 3 , 4, 4])) + # self.trigrid_8 = torch.nn.Parameter(torch.randn([1, 3, 16 * 3 , 8, 8])) + # self.trigrid_16=torch.nn.Parameter(torch.randn([1, 3, 16 * 3 , 16, 16])) + # self.trigrid_32 =torch.nn.Parameter(torch.randn([1, 3, 16* 3, 32, 32])) + # self.trigrid_64 =torch.nn.Parameter(torch.randn([1, 3, 16* 3, 64, 64])) + # self.trigrid_128 =torch.nn.Parameter(torch.randn([1,3, 16* 3, 128, 128])) + # self.trigrid_256 = torch.nn.Parameter(torch.randn([1, 3, 32*3, 256, 256])) + + # self.ws = torch.nn.Parameter(torch.randn([1, 14, 512])) + + # background network + if self.opt.bg_radius > 0: + + self.num_layers_bg = num_layers_bg + self.hidden_dim_bg = hidden_dim_bg + + # use a very simple network to avoid it learning the prompt... + self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3, multires=6) + self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) + + else: + assert self.opt.learnable_bg == False + self.bg_net = None + + + + self.train_decoder = opt.train_decoder + + def common_forward(self, x): + + # # sigma + # enc = self.encoder(x, bound=self.bound, max_level=self.max_level) + # + # h = self.sigma_net(enc) + # + # sigma = self.density_activation(h[..., 0] + self.density_blob(x)) + # albedo = torch.sigmoid(h[..., 1:]) + + + # return sigma, albedo + + # TODO + raise NotImplementedError + + # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192 + def finite_difference_normal(self, x, epsilon=1e-2): + # x: [N, 3] + # dx_pos, _ = self.common_forward( + # (x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + # dx_neg, _ = self.common_forward( + # (x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + # dy_pos, _ = self.common_forward( + # (x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + # dy_neg, _ = self.common_forward( + # (x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound)) + # dz_pos, _ = self.common_forward( + # (x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound)) + # dz_neg, _ = self.common_forward( + # (x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound)) + # + # normal = torch.stack([ + # 0.5 * (dx_pos - dx_neg) / epsilon, + # 0.5 * (dy_pos - dy_neg) / epsilon, + # 0.5 * (dz_pos - dz_neg) / epsilon + # ], dim=-1) + + # return -normal + + # TODO + raise NotImplementedError + + def normal(self, x): + # normal = self.finite_difference_normal(x) + # normal = safe_normalize(normal) + # normal = torch.nan_to_num(normal) + + # return normal + + # TODO + raise NotImplementedError + + def forward(self, x, d, l=None, ratio=1, shading='albedo'): + ''' + x: [N, 3], in [-bound, bound] + d: [N, 3], view direction, nomalized in [-1, 1] + l: [3], plane light direction, nomalized in [-1, 1] + ratio: scalar, ambient ratio, 1 == no shading (albedo only), 0 == only shading (textureless) + ''' + + # sigma, albedo = self.common_forward(x) + # + # if shading == 'albedo': + # normal = None + # color = albedo + # + # else: # lambertian shading + # + # # normal = self.normal_net(enc) + # normal = self.normal(x) + # + # lambertian = ratio + (1 - ratio) * (normal * l).sum(-1).clamp(min=0) # [N,] + # + # if shading == 'textureless': + # color = lambertian.unsqueeze(-1).repeat(1, 3) + # elif shading == 'normal': + # color = (normal + 1) / 2 + # else: # 'lambertian' + # color = albedo * lambertian.unsqueeze(-1) + + # return sigma, color, normal + + # TODO + raise NotImplementedError + + def density(self, x): + # x: [N, 3], in [-bound, bound] + + # sigma, albedo = self.common_forward(x) + + + + # return { + # 'sigma': sigma, + # 'albedo': albedo, + # } + + # TODO + raise NotImplementedError + + def background(self, d): + + h = self.encoder_bg(d) # [N, C] + + h = self.bg_net(h) + + # sigmoid activation for rgb + rgbs = torch.sigmoid(h) + + return rgbs + + # optimizer utils + def get_params(self, lr,trigrid_lr_ratio): + params =[] + assert len(trigrid_lr_ratio) == len(self.triplane_names) + resolutions = list(self.triplane_names.keys()) + for i in range(len(trigrid_lr_ratio)): + print(f'{self.triplane_names[resolutions[i]]} lr: {lr*trigrid_lr_ratio[i]}') + + params.append({'params': getattr(self, self.triplane_names[resolutions[i]]), 'lr': lr*trigrid_lr_ratio[i]}) + + # params.append({'params': self.ws, 'lr': lr*0.1}) + + if self.train_decoder: + params.append({'params': self.model.parameters(lr), 'lr': lr}) + + + return params + + @torch.no_grad() + def export_mesh(self, path, resolution=None, decimate_target=-1, S=128): + raise NotImplementedError + + + + + def render(self, rays_o, rays_d, poses, h, w, staged=False, max_ray_batch=4096, bg_color = None,bg_rays_o=None,bg_rays_d=None, **kwargs): + cam2world_pose = poses.clone() + cam2world_pose[:, :3, :3] = cam2world_pose[:, :3, :3] * -1 + cam2world_pose[:, 0, 1] *= -1 + cam2world_pose[:, 0, 2] *= -1 + cam2world_pose[:, 1, 0] *= -1 + cam2world_pose[:, 2, 0] *= -1 + cam2world_pose[:, 0, 3] *= -1 + + intrinsics = [6.510416666666667, + 0.0, + 0.5, + 0.0, + 6.510416666666667, + 0.5, + 0.0, + 0.0, + 1.0] + intrinsics = torch.tensor(intrinsics).to(cam2world_pose.device) + camera_params = torch.cat([cam2world_pose.reshape(1, 16), intrinsics.reshape(1, 9)], 1) + + # rays_o, rays_d: [B, N, 3] + # return: pred_rgb: [B, N, 3] + #B, N = rays_o.shape[:2] + H = h + W = w + + if self.opt.learnable_bg: + assert bg_color is None, 'bg_color should be None when learnable_bg is True' + bg_color = self.background(rays_d.contiguous().view(-1, 3)) # [BHW, 3] + # from [BHW, 3] to [B, H, W, 3] + bg_color = bg_color.view(-1, H, W, 3).clamp(0, 1.0) + + + device = rays_o.device + N, M, _ = rays_o.shape + + planes = {} + for res in self.triplane_names: + planes[res] = getattr(self, self.triplane_names[res]) + + if self.opt.use_body_pose: + # apply_def=apply_def, ws=None, pose_params=pose_params + pose_params = self.model.sample_pose_params(camera_params) + apply_def = True + else: + pose_params = None + apply_def = False + + + + if staged: + + + depth = torch.empty((N,M,1), device=device) + image = torch.empty((N,M, 32), device=device) + weights_sum = torch.empty((N,M,1), device=device) + + for b in range(N): + head = 0 + while head < M: + tail = min(head + max_ray_batch, M) + + render_output = self.model.renderer(planes, self.model.decoder, rays_o[b:b + 1, head:tail], + rays_d[b:b + 1, head:tail], self.model.rendering_kwargs, apply_def=apply_def, ws=None, pose_params=pose_params) # channels last + # {'rgb_final': rgb_final, 'depth_final': depth_final, 'weights': weights.sum(2)} + feature_samples = render_output['rgb_final'] # max_ray_batch,32 + depth_samples = render_output['depth_final'] # 1, max_ray_batch + weights_samples = render_output['weights'] # 1, max_ray_batch, depth + + weights_sum_samples = weights_samples.sum(2) # 1, max_ray_batch,1 + + + + depth[b:b + 1, head:tail] = depth_samples + weights_sum[b:b + 1, head:tail] = weights_sum_samples + image[b:b + 1, head:tail] = feature_samples + head += max_ray_batch + + feature_samples = image + depth_samples = depth + weights_sum_samples = weights_sum + + feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous() + depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W) + weights_sum_samples = weights_sum_samples.permute(0, 2, 1).reshape(N, 1, H, W) + + # Run superresolution to get final image + if self.model.decoder.activation == "sigmoid": + assert self.model.decoder.out_channels == 3 + feature_image = feature_image * 2 - 1 # Scale to (-1, 1), taken from ray marcher + + # Generate Raw image + if self.model.torgb: + rgb_image = self.model.torgb(feature_image, self.ws[:, -1], fused_modconv=False) + rgb_image = rgb_image.to(dtype=torch.float32, memory_format=torch.contiguous_format) + + weights_sum_samples = weights_sum_samples * (1 + 2 * 0.001) - 0.001 + + # from [B,C,H,W] to [B, H, W, C] + rgb_image = (rgb_image.permute(0, 2, 3, 1) * 0.5 + 0.5).clamp(0, 1.0).contiguous() + depth_image = depth_image.permute(0, 2, 3, 1).contiguous().squeeze(-1) # [B, H, W] + weights_sum_samples = weights_sum_samples.permute(0, 2, 3, 1).contiguous().squeeze(-1) # [B, H, W] + + + + if bg_color is not None and self.opt.learnable_bg: + assert bg_color.shape == rgb_image.shape, f'bg_color.shape {bg_color.shape} should be equal to rgb_image.shape {rgb_image.shape}' + rgb_image = rgb_image + (1 - weights_sum_samples).unsqueeze(-1) * bg_color + + + return {'image': rgb_image, 'depth': depth_image, + "weights_sum": weights_sum_samples} + + + else: + + + # Create triplanes by running StyleGAN backbone + + + # Reshape output into three D*32-channel planes, where D=self.rendering_kwargs['triplane_depth'], defines the depth of the tri-grid + + #self.trigrid.register_hook(lambda grad: print(grad,grad.abs().sum(), grad.abs().max(),grad.abs().min())) + # Perform volume rendering + render_output = self.model.renderer(planes, self.model.decoder, rays_o, + rays_d, self.model.rendering_kwargs, apply_def=apply_def, ws=None, pose_params=pose_params) # channels last + + + # {'rgb_final': rgb_final, 'depth_final': depth_final, 'weights': weights.sum(2)} + feature_samples = render_output['rgb_final'] + depth_samples = render_output['depth_final'] + weights_samples = render_output['weights'] # 1, max_ray_batch, depth,1 + weights_sum_samples = weights_samples.sum(2) # 1, max_ray_batch,1 + + # Reshape into 'raw' neural-rendered image + + feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous() + depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W) + weights_sum_samples = weights_sum_samples.permute(0, 2, 1).reshape(N, 1, H, W) + depth = weights_samples.shape[-2] + weights_samples = weights_samples.squeeze(-1).permute(0, 2, 1).reshape(N,depth, H, W) + + # Run superresolution to get final image + if self.model.decoder.activation == "sigmoid": + assert self.model.decoder.out_channels == 3 + feature_image = feature_image * 2 - 1 # Scale to (-1, 1), taken from ray marcher + feature_image.register_hook(lambda x:print(f'in sigmoid, feature_image.grad = {x}')) + + # Generate Raw image + if self.model.torgb: + rgb_image = self.model.torgb(feature_image, self.ws[:, -1], fused_modconv=False) + + rgb_image = rgb_image.to(dtype=torch.float32, memory_format=torch.contiguous_format) + + weights_sum_samples = weights_sum_samples * (1 + 2 * 0.001) - 0.001 + + + # from [B,C,H,W] to [B, H, W, C] + rgb_image = (rgb_image.permute(0, 2, 3, 1) * 0.5 + 0.5).clamp(0, 1.0).contiguous() + depth_image = depth_image.permute(0, 2, 3, 1).contiguous().squeeze(-1) + weights_sum_samples = weights_sum_samples.permute(0, 2, 3, 1).contiguous().squeeze(-1) + weights_samples = weights_samples.permute(0, 2, 3, 1).contiguous() # B, H, W, D + + + if bg_color is not None and self.opt.learnable_bg: + assert bg_color.shape == rgb_image.shape, f'bg_color.shape {bg_color.shape} should be equal to rgb_image.shape {rgb_image.shape}' + rgb_image = rgb_image + (1 - weights_sum_samples).unsqueeze(-1) * bg_color + + return {'image': rgb_image, 'depth': depth_image,"weights":weights_samples, "weights_sum": weights_sum_samples} + + + #results = self.run(rays_o, rays_d, **kwargs) + + + #return results \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/provider.py b/stable-dreamfusion-3DPortrait/nerf/provider.py new file mode 100644 index 0000000..764efcb --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/provider.py @@ -0,0 +1,357 @@ +import os +import cv2 +import glob +import json +import tqdm +import random +import numpy as np +from scipy.spatial.transform import Slerp, Rotation + +import trimesh + +import torch +import torch.nn.functional as F +from torch.utils.data import DataLoader + +from .utils import get_rays, safe_normalize + +DIR_COLORS = np.array([ + [255, 0, 0, 255], # front + [0, 255, 0, 255], # side + [0, 0, 255, 255], # back + [255, 255, 0, 255], # side + [255, 0, 255, 255], # overhead + [0, 255, 255, 255], # bottom +], dtype=np.uint8) + +def visualize_poses(poses, dirs, size=0.1): + # poses: [B, 4, 4], dirs: [B] + + axes = trimesh.creation.axis(axis_length=4) + sphere = trimesh.creation.icosphere(radius=1) + objects = [axes, sphere] + + for pose, dir in zip(poses, dirs): + # a camera is visualized with 8 line segments. + pos = pose[:3, 3] + a = pos + size * pose[:3, 0] + size * pose[:3, 1] - size * pose[:3, 2] + b = pos - size * pose[:3, 0] + size * pose[:3, 1] - size * pose[:3, 2] + c = pos - size * pose[:3, 0] - size * pose[:3, 1] - size * pose[:3, 2] + d = pos + size * pose[:3, 0] - size * pose[:3, 1] - size * pose[:3, 2] + + segs = np.array([[pos, a], [pos, b], [pos, c], [pos, d], [a, b], [b, c], [c, d], [d, a]]) + segs = trimesh.load_path(segs) + + # different color for different dirs + segs.colors = DIR_COLORS[[dir]].repeat(len(segs.entities), 0) + + objects.append(segs) + + trimesh.Scene(objects).show() + +def get_view_direction(thetas, phis, overhead, front): + # phis: [B,]; thetas: [B,] + # front = 0 [-front/2, front/2) + # side (cam left) = 1 [front/2, 180-front/2) + # back = 2 [180-front/2, 180+front/2) + # side (cam right) = 3 [180+front/2, 360-front/2) + # top = 4 [0, overhead] + # bottom = 5 [180-overhead, 180] + res = torch.zeros(thetas.shape[0], dtype=torch.long) + # first determine by phis + phis = phis % (2 * np.pi) + res[(phis < front / 2) | (phis >= 2 * np.pi - front / 2)] = 0 + res[(phis >= front / 2) & (phis < np.pi - front / 2)] = 1 + res[(phis >= np.pi - front / 2) & (phis < np.pi + front / 2)] = 2 + res[(phis >= np.pi + front / 2) & (phis < 2 * np.pi - front / 2)] = 3 + # override by thetas + res[thetas <= overhead] = 4 + res[thetas >= (np.pi - overhead)] = 5 + return res + + +def rand_poses(size, device, opt, radius_range=[1, 1.5], theta_range=[0, 120], phi_range=[0, 360], return_dirs=False, angle_overhead=30, angle_front=60, uniform_sphere_rate=0.5): + ''' generate random poses from an orbit camera + Args: + size: batch size of generated poses. + device: where to allocate the output. + radius: camera radius + theta_range: [min, max], should be in [0, pi] + phi_range: [min, max], should be in [0, 2 * pi] + Return: + poses: [size, 4, 4] + ''' + + theta_range = np.array(theta_range) / 180 * np.pi + phi_range = np.array(phi_range) / 180 * np.pi + angle_overhead = angle_overhead / 180 * np.pi + angle_front = angle_front / 180 * np.pi + + radius = torch.rand(size, device=device) * (radius_range[1] - radius_range[0]) + radius_range[0] + + if random.random() < uniform_sphere_rate: + unit_centers = F.normalize( + torch.stack([ + torch.randn(size, device=device), + torch.abs(torch.randn(size, device=device)), + torch.randn(size, device=device), + ], dim=-1), p=2, dim=1 + ) + thetas = torch.acos(unit_centers[:,1]) + phis = torch.atan2(unit_centers[:,0], unit_centers[:,2]) + phis[phis < 0] += 2 * np.pi + centers = unit_centers * radius.unsqueeze(-1) + else: + thetas = torch.rand(size, device=device) * (theta_range[1] - theta_range[0]) + theta_range[0] + phis = torch.rand(size, device=device) * (phi_range[1] - phi_range[0]) + phi_range[0] + phis[phis < 0] += 2 * np.pi + + centers = torch.stack([ + radius * torch.sin(thetas) * torch.sin(phis), + radius * torch.cos(thetas), + radius * torch.sin(thetas) * torch.cos(phis), + ], dim=-1) # [B, 3] + + targets = 0 + + # jitters + if opt.jitter_pose: + jit_center = opt.jitter_center # 0.015 # was 0.2 + jit_target = opt.jitter_target + centers += torch.rand_like(centers) * jit_center - jit_center/2.0 + targets += torch.randn_like(centers) * jit_target + + # lookat + forward_vector = safe_normalize(centers - targets) + up_vector = torch.FloatTensor([0, 1, 0]).to(device).unsqueeze(0).repeat(size, 1) + right_vector = safe_normalize(torch.cross(forward_vector, up_vector, dim=-1)) + + if opt.jitter_pose: + up_noise = torch.randn_like(up_vector) * opt.jitter_up + else: + up_noise = 0 + + up_vector = safe_normalize(torch.cross(right_vector, forward_vector, dim=-1) + up_noise) + + poses = torch.eye(4, dtype=torch.float, device=device).unsqueeze(0).repeat(size, 1, 1) + poses[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), dim=-1) + poses[:, :3, 3] = centers + + if return_dirs: + dirs = get_view_direction(thetas, phis, angle_overhead, angle_front) + else: + dirs = None + + # back to degree + thetas = thetas / np.pi * 180 + phis = phis / np.pi * 180 + + return poses, dirs, thetas, phis, radius + + +def circle_poses(device, radius=torch.tensor([3.2]), theta=torch.tensor([60]), phi=torch.tensor([0]), return_dirs=False, angle_overhead=30, angle_front=60): + + theta = theta / 180 * np.pi + phi = phi / 180 * np.pi + angle_overhead = angle_overhead / 180 * np.pi + angle_front = angle_front / 180 * np.pi + + centers = torch.stack([ + radius * torch.sin(theta) * torch.sin(phi), + radius * torch.cos(theta), + radius * torch.sin(theta) * torch.cos(phi), + ], dim=-1) # [B, 3] + + # lookat + forward_vector = safe_normalize(centers) + up_vector = torch.FloatTensor([0, 1, 0]).to(device).unsqueeze(0).repeat(len(centers), 1) + right_vector = safe_normalize(torch.cross(forward_vector, up_vector, dim=-1)) + up_vector = safe_normalize(torch.cross(right_vector, forward_vector, dim=-1)) + + poses = torch.eye(4, dtype=torch.float, device=device).unsqueeze(0).repeat(len(centers), 1, 1) + poses[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), dim=-1) + poses[:, :3, 3] = centers + + if return_dirs: + dirs = get_view_direction(theta, phi, angle_overhead, angle_front) + else: + dirs = None + + return poses, dirs + + +class NeRFDataset: + def __init__(self, opt, device, type='train', H=256, W=256, size=100, teacher_H = None, teacher_W = None): + super().__init__() + + self.opt = opt + self.device = device + self.type = type # train, val, test + + self.H = H + self.W = W + self.size = size + + self.teacher_H = teacher_H + self.teacher_W = teacher_W + + self.training = self.type in ['train', 'all'] + + self.cx = self.H / 2 + self.cy = self.W / 2 + + self.near = self.opt.min_near + self.far = 1000 # infinite + + # [debug] visualize poses + # poses, dirs, _, _, _ = rand_poses(100, self.device, opt, radius_range=self.opt.radius_range, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, jitter=self.opt.jitter_pose, uniform_sphere_rate=1) + # visualize_poses(poses.detach().cpu().numpy(), dirs.detach().cpu().numpy()) + + def get_default_view_data(self): + + H = int(self.opt.known_view_scale * self.H) + W = int(self.opt.known_view_scale * self.W) + cx = H / 2 + cy = W / 2 + + radii = torch.FloatTensor(self.opt.ref_radii).to(self.device) + thetas = torch.FloatTensor(self.opt.ref_polars).to(self.device) + phis = torch.FloatTensor(self.opt.ref_azimuths).to(self.device) + poses, dirs = circle_poses(self.device, radius=radii, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front) + fov = self.opt.default_fovy + focal = H / (2 * np.tan(np.deg2rad(fov) / 2)) + intrinsics = np.array([focal, focal, cx, cy]) + + projection = torch.tensor([ + [2*focal/W, 0, 0, 0], + [0, -2*focal/H, 0, 0], + [0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)], + [0, 0, -1, 0] + ], dtype=torch.float32, device=self.device).unsqueeze(0).repeat(len(radii), 1, 1) + + mvp = projection @ torch.inverse(poses) # [B, 4, 4] + + # sample a low-resolution but full image + rays = get_rays(poses, intrinsics, H, W, -1) + + if self.teacher_W is not None: + teacher_H =int(self.opt.known_view_scale * self.teacher_H) + teacher_W = int(self.opt.known_view_scale * self.teacher_W) + + teacher_cx = teacher_H / 2 + teacher_cy = teacher_W / 2 + + teacher_focal = teacher_H / (2 * np.tan(np.deg2rad(fov) / 2)) + teacher_intrinsics = np.array([teacher_focal, teacher_focal, teacher_cx, teacher_cy]) + + teacher_rays = get_rays(poses, teacher_intrinsics, teacher_H, teacher_W, -1) + + data = { + 'H': H, + 'W': W, + 'rays_o': rays['rays_o'], + 'rays_d': rays['rays_d'], + 'dir': dirs, + 'mvp': mvp, + 'polar': self.opt.ref_polars, + 'azimuth': self.opt.ref_azimuths, + 'radius': self.opt.ref_radii, + + 'teacher_H': self.teacher_W if self.teacher_W is not None else None, + 'teacher_W': self.teacher_W if self.teacher_W is not None else None, + 'teacher_rays_o': teacher_rays['rays_o'] if self.teacher_W is not None else None, + 'teacher_rays_d': teacher_rays['rays_d'] if self.teacher_W is not None else None, + } + + return data + + def collate(self, index): + + B = len(index) + + if self.training: + # random pose on the fly + poses, dirs, thetas, phis, radius = rand_poses(B, self.device, self.opt, radius_range=self.opt.radius_range, theta_range=self.opt.theta_range, phi_range=self.opt.phi_range, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, uniform_sphere_rate=self.opt.uniform_sphere_rate) + + # random focal + fov = random.random() * (self.opt.fovy_range[1] - self.opt.fovy_range[0]) + self.opt.fovy_range[0] + + elif self.type == 'six_views': + # six views + thetas_six = [90, 90, 90, 90, 1e-3, 179.999] + phis_six = [ 0, 90, 180, -90, 0, 0] + thetas = torch.FloatTensor([thetas_six[index[0]]]).to(self.device) + phis = torch.FloatTensor([phis_six[index[0]]]).to(self.device) + radius = torch.FloatTensor([self.opt.default_radius]).to(self.device) + poses, dirs = circle_poses(self.device, radius=radius, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front) + + # fixed focal + fov = self.opt.default_fovy + + else: + # circle pose + thetas = torch.FloatTensor([self.opt.default_polar]).to(self.device) + phis = torch.FloatTensor([(index[0] / self.size) * 360]).to(self.device) + radius = torch.FloatTensor([self.opt.default_radius]).to(self.device) + poses, dirs = circle_poses(self.device, radius=radius, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front) + + # fixed focal + fov = self.opt.default_fovy + + focal = self.H / (2 * np.tan(np.deg2rad(fov) / 2)) + intrinsics = np.array([focal, focal, self.cx, self.cy]) + + projection = torch.tensor([ + [2*focal/self.W, 0, 0, 0], + [0, -2*focal/self.H, 0, 0], + [0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)], + [0, 0, -1, 0] + ], dtype=torch.float32, device=self.device).unsqueeze(0) + + mvp = projection @ torch.inverse(poses) # [1, 4, 4] + + # sample a low-resolution but full image + rays = get_rays(poses, intrinsics, self.H, self.W, -1) + + # delta polar/azimuth/radius to default view + delta_polar = thetas - self.opt.default_polar + delta_azimuth = phis - self.opt.default_azimuth + delta_azimuth[delta_azimuth > 180] -= 360 # range in [-180, 180] + delta_radius = radius - self.opt.default_radius + + if self.teacher_W is not None: + teacher_H = self.teacher_H + teacher_W = self.teacher_W + + teacher_cx = teacher_H / 2 + teacher_cy = teacher_W / 2 + + teacher_focal = teacher_H / (2 * np.tan(np.deg2rad(fov) / 2)) + teacher_intrinsics = np.array([teacher_focal, teacher_focal, teacher_cx, teacher_cy]) + + teacher_rays = get_rays(poses, teacher_intrinsics, teacher_H, teacher_W, -1) + + data = { + 'H': self.H, + 'W': self.W, + 'rays_o': rays['rays_o'], + 'rays_d': rays['rays_d'], + 'dir': dirs, + 'mvp': mvp, + 'polar': delta_polar, + 'azimuth': delta_azimuth, + 'radius': delta_radius, + + 'teacher_H': teacher_H if self.teacher_W is not None else None, + 'teacher_W': teacher_W if self.teacher_W is not None else None, + 'teacher_rays_o': teacher_rays['rays_o'] if self.teacher_W is not None else None, + 'teacher_rays_d': teacher_rays['rays_d'] if self.teacher_W is not None else None, + } + + return data + + def dataloader(self, batch_size=None): + batch_size = batch_size or self.opt.batch_size + loader = DataLoader(list(range(self.size)), batch_size=batch_size, collate_fn=self.collate, shuffle=self.training, num_workers=0) + loader._data = self + return loader \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/provider_3DPortraitGAN.py b/stable-dreamfusion-3DPortrait/nerf/provider_3DPortraitGAN.py new file mode 100644 index 0000000..647c6af --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/provider_3DPortraitGAN.py @@ -0,0 +1,332 @@ +import os +import cv2 +import glob +import json +import tqdm +import random +import numpy as np +from scipy.spatial.transform import Slerp, Rotation + +import trimesh + +import torch +import torch.nn.functional as F +from torch.utils.data import DataLoader + +from .utils import get_rays, safe_normalize + +DIR_COLORS = np.array([ + [255, 0, 0, 255], # front + [0, 255, 0, 255], # side + [0, 0, 255, 255], # back + [255, 255, 0, 255], # side + [255, 0, 255, 255], # overhead + [0, 255, 255, 255], # bottom +], dtype=np.uint8) + +def visualize_poses(poses, dirs, size=0.1): + # poses: [B, 4, 4], dirs: [B] + + axes = trimesh.creation.axis(axis_length=4) + sphere = trimesh.creation.icosphere(radius=1) + objects = [axes, sphere] + + for pose, dir in zip(poses, dirs): + # a camera is visualized with 8 line segments. + pos = pose[:3, 3] + a = pos + size * pose[:3, 0] + size * pose[:3, 1] - size * pose[:3, 2] + b = pos - size * pose[:3, 0] + size * pose[:3, 1] - size * pose[:3, 2] + c = pos - size * pose[:3, 0] - size * pose[:3, 1] - size * pose[:3, 2] + d = pos + size * pose[:3, 0] - size * pose[:3, 1] - size * pose[:3, 2] + + segs = np.array([[pos, a], [pos, b], [pos, c], [pos, d], [a, b], [b, c], [c, d], [d, a]]) + segs = trimesh.load_path(segs) + + # different color for different dirs + segs.colors = DIR_COLORS[[dir]].repeat(len(segs.entities), 0) + + objects.append(segs) + + trimesh.Scene(objects).show() + +def get_view_direction(thetas, phis, overhead, front): + # phis: [B,]; thetas: [B,] + # front = 0 [-front/2, front/2) + # side (cam left) = 1 [front/2, 180-front/2) + # back = 2 [180-front/2, 180+front/2) + # side (cam right) = 3 [180+front/2, 360-front/2) + # top = 4 [0, overhead] + # bottom = 5 [180-overhead, 180] + res = torch.zeros(thetas.shape[0], dtype=torch.long) + # first determine by phis + phis = phis % (2 * np.pi) + res[(phis < front / 2) | (phis >= 2 * np.pi - front / 2)] = 0 + res[(phis >= front / 2) & (phis < np.pi - front / 2)] = 1 + res[(phis >= np.pi - front / 2) & (phis < np.pi + front / 2)] = 2 + res[(phis >= np.pi + front / 2) & (phis < 2 * np.pi - front / 2)] = 3 + # override by thetas + res[thetas <= overhead] = 4 + res[thetas >= (np.pi - overhead)] = 5 + return res + + +def rand_poses(size, device, opt, radius_range=[1, 1.5], theta_range=[0, 120], phi_range=[0, 360], return_dirs=False, angle_overhead=30, angle_front=60, uniform_sphere_rate=0.5,pivot = None): + ''' generate random poses from an orbit camera + Args: + size: batch size of generated poses. + device: where to allocate the output. + radius: camera radius + theta_range: [min, max], should be in [0, pi] + phi_range: [min, max], should be in [0, 2 * pi] + Return: + poses: [size, 4, 4] + ''' + assert pivot is not None + + theta_range = np.array(theta_range) / 180 * np.pi + phi_range = np.array(phi_range) / 180 * np.pi + angle_overhead = angle_overhead / 180 * np.pi + angle_front = angle_front / 180 * np.pi + + radius = torch.rand(size, device=device) * (radius_range[1] - radius_range[0]) + radius_range[0] + + if random.random() < uniform_sphere_rate: + unit_centers = F.normalize( + torch.stack([ + torch.randn(size, device=device), + torch.abs(torch.randn(size, device=device)), + torch.randn(size, device=device), + ], dim=-1), p=2, dim=1 + ) + thetas = torch.acos(unit_centers[:,1]) + phis = torch.atan2(unit_centers[:,0], unit_centers[:,2]) + phis[phis < 0] += 2 * np.pi + centers = unit_centers * radius.unsqueeze(-1) + else: + thetas = torch.rand(size, device=device) * (theta_range[1] - theta_range[0]) + theta_range[0] + phis = torch.rand(size, device=device) * (phi_range[1] - phi_range[0]) + phi_range[0] + phis[phis < 0] += 2 * np.pi + + centers = torch.stack([ + radius * torch.sin(thetas) * torch.sin(phis), + radius * torch.cos(thetas), + radius * torch.sin(thetas) * torch.cos(phis), + ], dim=-1) # [B, 3] + + targets = pivot + + # jitters + if opt.jitter_pose: + jit_center = opt.jitter_center # 0.015 # was 0.2 + jit_target = opt.jitter_target + centers += torch.rand_like(centers) * jit_center - jit_center/2.0 + targets += torch.randn_like(centers) * jit_target + + # lookat + forward_vector = safe_normalize(centers - targets) + up_vector = torch.FloatTensor([0, 1, 0]).to(device).unsqueeze(0).repeat(size, 1) + right_vector = safe_normalize(torch.cross(forward_vector, up_vector, dim=-1)) + + if opt.jitter_pose: + up_noise = torch.randn_like(up_vector) * opt.jitter_up + else: + up_noise = 0 + + up_vector = safe_normalize(torch.cross(right_vector, forward_vector, dim=-1) + up_noise) + + poses = torch.eye(4, dtype=torch.float, device=device).unsqueeze(0).repeat(size, 1, 1) + poses[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), dim=-1) + poses[:, :3, 3] = centers + + if return_dirs: + dirs = get_view_direction(thetas, phis, angle_overhead, angle_front) + else: + dirs = None + + # back to degree + thetas = thetas / np.pi * 180 + phis = phis / np.pi * 180 + + return poses, dirs, thetas, phis, radius + + +def circle_poses(device, radius=torch.tensor([3.2]), theta=torch.tensor([60]), phi=torch.tensor([0]), return_dirs=False, angle_overhead=30, angle_front=60,pivot= None): + assert pivot is not None + + theta = theta / 180 * np.pi + phi = phi / 180 * np.pi + angle_overhead = angle_overhead / 180 * np.pi + angle_front = angle_front / 180 * np.pi + centers = torch.stack([ + radius * torch.sin(theta) * torch.sin(phi), + radius * torch.cos(theta), + radius * torch.sin(theta) * torch.cos(phi), + ], dim=-1) # [B, 3] + + targets = pivot + # lookat + forward_vector = safe_normalize(centers - targets) + up_vector = torch.FloatTensor([0, 1, 0]).to(device).unsqueeze(0).repeat(len(centers), 1) + right_vector = safe_normalize(torch.cross(forward_vector, up_vector, dim=-1)) + up_vector = safe_normalize(torch.cross(right_vector, forward_vector, dim=-1)) + + poses = torch.eye(4, dtype=torch.float, device=device).unsqueeze(0).repeat(len(centers), 1, 1) + poses[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), dim=-1) + poses[:, :3, 3] = centers + + if return_dirs: + dirs = get_view_direction(theta, phi, angle_overhead, angle_front) + else: + dirs = None + + return poses, dirs + + +class NeRFDataset: + def __init__(self, opt, device, type='train', H=256, W=256, size=100): + super().__init__() + + self.opt = opt + self.device = device + self.type = type # train, val, test + + self.H = H + self.W = W + self.size = size + + self.training = self.type in ['train', 'all'] + + self.cx = self.H / 2 + self.cy = self.W / 2 + + self.near = self.opt.min_near + self.far = 1000 # infinite + + self.cam_pivot = torch.tensor([0, 0.0649, 0], device=device).view(1, 3) + + # [debug] visualize poses + # poses, dirs, _, _, _ = rand_poses(100, self.device, opt, radius_range=self.opt.radius_range, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, jitter=self.opt.jitter_pose, uniform_sphere_rate=1) + # visualize_poses(poses.detach().cpu().numpy(), dirs.detach().cpu().numpy()) + + def get_default_view_data(self): + + H = int(self.opt.known_view_scale * self.H) + W = int(self.opt.known_view_scale * self.W) + cx = H / 2 + cy = W / 2 + + radii = torch.FloatTensor([self.opt.default_radius]).to(self.device) + thetas = torch.FloatTensor(self.opt.ref_polars).to(self.device) + phis = torch.FloatTensor(self.opt.ref_azimuths).to(self.device) + + + poses, dirs = circle_poses(self.device, radius=radii, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, pivot = self.cam_pivot) + fov = self.opt.default_fovy + focal = H * float(1 / (np.tan(fov * 3.14159 / 360) * 1.414)) # H / (2 * np.tan(np.deg2rad(fov) / 2)) + + intrinsics = np.array([focal, focal, cx, cy]) + + projection = torch.tensor([ + [2*focal/W, 0, 0, 0], + [0, -2*focal/H, 0, 0], + [0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)], + [0, 0, -1, 0] + ], dtype=torch.float32, device=self.device).unsqueeze(0).repeat(len(radii), 1, 1) + + mvp = projection @ torch.inverse(poses) # [B, 4, 4] + + # sample a low-resolution but full image + rays = get_rays(poses, intrinsics, H, W, -1) + + data = { + 'H': H, + 'W': W, + 'rays_o': rays['rays_o'], + 'rays_d': rays['rays_d'], + 'dir': dirs, + 'mvp': mvp, + 'polar': self.opt.ref_polars, + 'azimuth': self.opt.ref_azimuths, + 'radius': self.opt.ref_radii, + } + + return data + + def collate(self, index): + + B = len(index) + + if self.training: + # random pose on the fly + poses, dirs, thetas, phis, radius = rand_poses(B, self.device, self.opt, radius_range=self.opt.radius_range, theta_range=self.opt.theta_range, + phi_range=self.opt.phi_range, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, + uniform_sphere_rate=self.opt.uniform_sphere_rate, pivot = self.cam_pivot) + + # random focal + fov = random.random() * (self.opt.fovy_range[1] - self.opt.fovy_range[0]) + self.opt.fovy_range[0] + + elif self.type == 'six_views': + # six views + thetas_six = [90, 90, 90, 90, 1e-3, 179.999] + phis_six = [ 0, 90, 180, -90, 0, 0] + thetas = torch.FloatTensor([thetas_six[index[0]]]).to(self.device) + phis = torch.FloatTensor([phis_six[index[0]]]).to(self.device) + radius = torch.FloatTensor([self.opt.default_radius]).to(self.device) + poses, dirs = circle_poses(self.device, radius=radius, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, pivot = self.cam_pivot) + + # fixed focal + fov = self.opt.default_fovy + + else: + # circle pose + thetas = torch.FloatTensor([self.opt.default_polar]).to(self.device) + phis = torch.FloatTensor([(index[0] / self.size) * 360]).to(self.device) + radius = torch.FloatTensor([self.opt.default_radius]).to(self.device) + + poses, dirs = circle_poses(self.device, radius=radius, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, pivot = self.cam_pivot) + + # fixed focal + fov = self.opt.default_fovy + + focal = self.H * float(1 / (np.tan(fov * 3.14159 / 360) * 1.414)) #self.H / (2 * np.tan(np.deg2rad(fov) / 2)) + + intrinsics = np.array([focal, focal, self.cx, self.cy]) + + projection = torch.tensor([ + [2*focal/self.W, 0, 0, 0], + [0, -2*focal/self.H, 0, 0], + [0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)], + [0, 0, -1, 0] + ], dtype=torch.float32, device=self.device).unsqueeze(0) + + mvp = projection @ torch.inverse(poses) # [1, 4, 4] + + # sample a low-resolution but full image + rays = get_rays(poses, intrinsics, self.H, self.W, -1) + + # delta polar/azimuth/radius to default view + delta_polar = thetas - self.opt.default_polar + delta_azimuth = phis - self.opt.default_azimuth + delta_azimuth[delta_azimuth > 180] -= 360 # range in [-180, 180] + delta_radius = radius - self.opt.default_radius + + data = { + 'H': self.H, + 'W': self.W, + 'rays_o': rays['rays_o'], + 'rays_d': rays['rays_d'], + 'dir': dirs, + 'mvp': mvp, + 'poses': poses, + 'polar': delta_polar, + 'azimuth': delta_azimuth, + 'radius': delta_radius, + } + + return data + + def dataloader(self, batch_size=None): + batch_size = batch_size or self.opt.batch_size + loader = DataLoader(list(range(self.size)), batch_size=batch_size, collate_fn=self.collate, shuffle=self.training, num_workers=0) + loader._data = self + return loader \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/renderer.py b/stable-dreamfusion-3DPortrait/nerf/renderer.py new file mode 100644 index 0000000..d141ae0 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/renderer.py @@ -0,0 +1,1190 @@ +import os +import math +import cv2 +import trimesh +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.functional as F + +import nvdiffrast.torch as dr + +import mcubes +import raymarching +from meshutils import decimate_mesh, clean_mesh, poisson_mesh_reconstruction +from .utils import custom_meshgrid, safe_normalize + + +def sample_pdf(bins, weights, n_samples, det=False): + # This implementation is from NeRF + # bins: [B, T], old_z_vals + # weights: [B, T - 1], bin weights. + # return: [B, n_samples], new_z_vals + + # Get pdf + weights = weights + 1e-5 # prevent nans + pdf = weights / torch.sum(weights, -1, keepdim=True) + cdf = torch.cumsum(pdf, -1) + cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) + # Take uniform samples + if det: + u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples).to(weights.device) + u = u.expand(list(cdf.shape[:-1]) + [n_samples]) + else: + u = torch.rand(list(cdf.shape[:-1]) + [n_samples]).to(weights.device) + + # Invert CDF + u = u.contiguous() + inds = torch.searchsorted(cdf, u, right=True) + below = torch.max(torch.zeros_like(inds - 1), inds - 1) + above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds) + inds_g = torch.stack([below, above], -1) # (B, n_samples, 2) + + matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] + cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) + bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) + + denom = (cdf_g[..., 1] - cdf_g[..., 0]) + denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom) + t = (u - cdf_g[..., 0]) / denom + samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) + + return samples + +@torch.cuda.amp.autocast(enabled=False) +def near_far_from_bound(rays_o, rays_d, bound, type='cube', min_near=0.05): + # rays: [B, N, 3], [B, N, 3] + # bound: int, radius for ball or half-edge-length for cube + # return near [B, N, 1], far [B, N, 1] + + radius = rays_o.norm(dim=-1, keepdim=True) + + if type == 'sphere': + near = radius - bound # [B, N, 1] + far = radius + bound + + elif type == 'cube': + tmin = (-bound - rays_o) / (rays_d + 1e-15) # [B, N, 3] + tmax = (bound - rays_o) / (rays_d + 1e-15) + near = torch.where(tmin < tmax, tmin, tmax).max(dim=-1, keepdim=True)[0] + far = torch.where(tmin > tmax, tmin, tmax).min(dim=-1, keepdim=True)[0] + # if far < near, means no intersection, set both near and far to inf (1e9 here) + mask = far < near + near[mask] = 1e9 + far[mask] = 1e9 + # restrict near to a minimal value + near = torch.clamp(near, min=min_near) + + return near, far + + +def plot_pointcloud(pc, color=None): + # pc: [N, 3] + # color: [N, 3/4] + print('[visualize points]', pc.shape, pc.dtype, pc.min(0), pc.max(0)) + pc = trimesh.PointCloud(pc, color) + # axis + axes = trimesh.creation.axis(axis_length=4) + # sphere + sphere = trimesh.creation.icosphere(radius=1) + trimesh.Scene([pc, axes, sphere]).show() + + +class DMTet(): + def __init__(self, device): + self.device = device + self.triangle_table = torch.tensor([ + [-1, -1, -1, -1, -1, -1], + [ 1, 0, 2, -1, -1, -1], + [ 4, 0, 3, -1, -1, -1], + [ 1, 4, 2, 1, 3, 4], + [ 3, 1, 5, -1, -1, -1], + [ 2, 3, 0, 2, 5, 3], + [ 1, 4, 0, 1, 5, 4], + [ 4, 2, 5, -1, -1, -1], + [ 4, 5, 2, -1, -1, -1], + [ 4, 1, 0, 4, 5, 1], + [ 3, 2, 0, 3, 5, 2], + [ 1, 3, 5, -1, -1, -1], + [ 4, 1, 2, 4, 3, 1], + [ 3, 0, 4, -1, -1, -1], + [ 2, 0, 1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1] + ], dtype=torch.long, device=device) + self.num_triangles_table = torch.tensor([0,1,1,2,1,2,2,1,1,2,2,1,2,1,1,0], dtype=torch.long, device=device) + self.base_tet_edges = torch.tensor([0,1,0,2,0,3,1,2,1,3,2,3], dtype=torch.long, device=device) + + def sort_edges(self, edges_ex2): + with torch.no_grad(): + order = (edges_ex2[:,0] > edges_ex2[:,1]).long() + order = order.unsqueeze(dim=1) + + a = torch.gather(input=edges_ex2, index=order, dim=1) + b = torch.gather(input=edges_ex2, index=1-order, dim=1) + + return torch.stack([a, b],-1) + + def __call__(self, pos_nx3, sdf_n, tet_fx4): + # pos_nx3: [N, 3] + # sdf_n: [N] + # tet_fx4: [F, 4] + + with torch.no_grad(): + occ_n = sdf_n > 0 + occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1,4) + occ_sum = torch.sum(occ_fx4, -1) # [F,] + valid_tets = (occ_sum>0) & (occ_sum<4) + occ_sum = occ_sum[valid_tets] + + # find all vertices + all_edges = tet_fx4[valid_tets][:,self.base_tet_edges].reshape(-1,2) + all_edges = self.sort_edges(all_edges) + unique_edges, idx_map = torch.unique(all_edges,dim=0, return_inverse=True) + + unique_edges = unique_edges.long() + mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1,2).sum(-1) == 1 + mapping = torch.ones((unique_edges.shape[0]), dtype=torch.long, device=self.device) * -1 + mapping[mask_edges] = torch.arange(mask_edges.sum(), dtype=torch.long,device=self.device) + idx_map = mapping[idx_map] # map edges to verts + + interp_v = unique_edges[mask_edges] + + edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1,2,3) + edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1,2,1) + edges_to_interp_sdf[:,-1] *= -1 + + denominator = edges_to_interp_sdf.sum(1,keepdim = True) + + edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1])/denominator + verts = (edges_to_interp * edges_to_interp_sdf).sum(1) + + idx_map = idx_map.reshape(-1,6) + + v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=self.device)) + tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1) + num_triangles = self.num_triangles_table[tetindex] + + # Generate triangle indices + faces = torch.cat(( + torch.gather(input=idx_map[num_triangles == 1], dim=1, index=self.triangle_table[tetindex[num_triangles == 1]][:, :3]).reshape(-1,3), + torch.gather(input=idx_map[num_triangles == 2], dim=1, index=self.triangle_table[tetindex[num_triangles == 2]][:, :6]).reshape(-1,3), + ), dim=0) + + return verts, faces + +def compute_edge_to_face_mapping(attr_idx): + with torch.no_grad(): + # Get unique edges + # Create all edges, packed by triangle + all_edges = torch.cat(( + torch.stack((attr_idx[:, 0], attr_idx[:, 1]), dim=-1), + torch.stack((attr_idx[:, 1], attr_idx[:, 2]), dim=-1), + torch.stack((attr_idx[:, 2], attr_idx[:, 0]), dim=-1), + ), dim=-1).view(-1, 2) + + # Swap edge order so min index is always first + order = (all_edges[:, 0] > all_edges[:, 1]).long().unsqueeze(dim=1) + sorted_edges = torch.cat(( + torch.gather(all_edges, 1, order), + torch.gather(all_edges, 1, 1 - order) + ), dim=-1) + + # Elliminate duplicates and return inverse mapping + unique_edges, idx_map = torch.unique(sorted_edges, dim=0, return_inverse=True) + + tris = torch.arange(attr_idx.shape[0]).repeat_interleave(3).cuda() + + tris_per_edge = torch.zeros((unique_edges.shape[0], 2), dtype=torch.int64).cuda() + + # Compute edge to face table + mask0 = order[:,0] == 0 + mask1 = order[:,0] == 1 + tris_per_edge[idx_map[mask0], 0] = tris[mask0] + tris_per_edge[idx_map[mask1], 1] = tris[mask1] + + return tris_per_edge + +@torch.cuda.amp.autocast(enabled=False) +def normal_consistency(face_normals, t_pos_idx): + + tris_per_edge = compute_edge_to_face_mapping(t_pos_idx) + + # Fetch normals for both faces sharind an edge + n0 = face_normals[tris_per_edge[:, 0], :] + n1 = face_normals[tris_per_edge[:, 1], :] + + # Compute error metric based on normal difference + term = torch.clamp(torch.sum(n0 * n1, -1, keepdim=True), min=-1.0, max=1.0) + term = (1.0 - term) + + return torch.mean(torch.abs(term)) + + +def laplacian_uniform(verts, faces): + + V = verts.shape[0] + F = faces.shape[0] + + # Neighbor indices + ii = faces[:, [1, 2, 0]].flatten() + jj = faces[:, [2, 0, 1]].flatten() + adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(dim=1) + adj_values = torch.ones(adj.shape[1], device=verts.device, dtype=torch.float) + + # Diagonal indices + diag_idx = adj[0] + + # Build the sparse matrix + idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1) + values = torch.cat((-adj_values, adj_values)) + + # The coalesce operation sums the duplicate indices, resulting in the + # correct diagonal + return torch.sparse_coo_tensor(idx, values, (V,V)).coalesce() + + +@torch.cuda.amp.autocast(enabled=False) +def laplacian_smooth_loss(verts, faces): + with torch.no_grad(): + L = laplacian_uniform(verts, faces.long()) + loss = L.mm(verts) + loss = loss.norm(dim=1) + loss = loss.mean() + return loss + + +class NeRFRenderer(nn.Module): + def __init__(self, opt): + super().__init__() + + self.opt = opt + self.bound = opt.bound + self.cascade = 1 + math.ceil(math.log2(opt.bound)) + self.grid_size = 128 + self.max_level = None + self.dmtet = opt.dmtet + self.cuda_ray = opt.cuda_ray + self.taichi_ray = opt.taichi_ray + self.min_near = opt.min_near + self.density_thresh = opt.density_thresh + + # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax) + # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing. + aabb_train = torch.FloatTensor([-opt.bound, -opt.bound, -opt.bound, opt.bound, opt.bound, opt.bound]) + aabb_infer = aabb_train.clone() + self.register_buffer('aabb_train', aabb_train) + self.register_buffer('aabb_infer', aabb_infer) + + self.glctx = None + + # extra state for cuda raymarching + if self.cuda_ray: + # density grid + density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H] + density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8] + self.register_buffer('density_grid', density_grid) + self.register_buffer('density_bitfield', density_bitfield) + self.mean_density = 0 + self.iter_density = 0 + + if self.dmtet: + # load dmtet vertices + tets = np.load('tets/{}_tets.npz'.format(self.opt.tet_grid_size)) + self.verts = - torch.tensor(tets['vertices'], dtype=torch.float32, device='cuda') * 2 # covers [-1, 1] + self.indices = torch.tensor(tets['indices'], dtype=torch.long, device='cuda') + self.tet_scale = torch.tensor([1, 1, 1], dtype=torch.float32, device='cuda') + self.dmtet_model = DMTet('cuda') + + # vert sdf and deform + sdf = torch.nn.Parameter(torch.zeros_like(self.verts[..., 0]), requires_grad=True) + self.register_parameter('sdf', sdf) + deform = torch.nn.Parameter(torch.zeros_like(self.verts), requires_grad=True) + self.register_parameter('deform', deform) + + edges = torch.tensor([0,1, 0,2, 0,3, 1,2, 1,3, 2,3], dtype=torch.long, device="cuda") # six edges for each tetrahedron. + all_edges = self.indices[:,edges].reshape(-1,2) # [M * 6, 2] + all_edges_sorted = torch.sort(all_edges, dim=1)[0] + self.all_edges = torch.unique(all_edges_sorted, dim=0) + + if self.opt.h <= 2048 and self.opt.w <= 2048: + self.glctx = dr.RasterizeCudaContext() + else: + self.glctx = dr.RasterizeGLContext() + + if self.taichi_ray: + from einops import rearrange + from taichi_modules import RayMarcherTaichi + from taichi_modules import VolumeRendererTaichi + from taichi_modules import RayAABBIntersector as RayAABBIntersectorTaichi + from taichi_modules import raymarching_test as raymarching_test_taichi + from taichi_modules import composite_test as composite_test_fw + from taichi_modules import packbits as packbits_taichi + self.rearrange = rearrange + self.packbits_taichi = packbits_taichi + self.ray_aabb_intersector = RayAABBIntersectorTaichi + self.raymarching_test_taichi = raymarching_test_taichi + self.composite_test_fw = composite_test_fw + self.ray_marching = RayMarcherTaichi(batch_size=4096) # TODO: hard encoded batch size + self.volume_render = VolumeRendererTaichi(batch_size=4096) # TODO: hard encoded batch size + # density grid + density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H] + density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8] + self.register_buffer('density_grid', density_grid) + self.register_buffer('density_bitfield', density_bitfield) + self.mean_density = 0 + self.iter_density = 0 + + @torch.no_grad() + def density_blob(self, x): + # x: [B, N, 3] + + d = (x ** 2).sum(-1) + + if self.opt.density_activation == 'exp': + g = self.opt.blob_density * torch.exp(- d / (2 * self.opt.blob_radius ** 2)) + else: + g = self.opt.blob_density * (1 - torch.sqrt(d) / self.opt.blob_radius) + + return g + + def forward(self, x, d): + raise NotImplementedError() + + def density(self, x): + raise NotImplementedError() + + def reset_extra_state(self): + if not (self.cuda_ray or self.taichi_ray): + return + # density grid + self.density_grid.zero_() + self.mean_density = 0 + self.iter_density = 0 + + @torch.no_grad() + def export_mesh(self, path, resolution=None, decimate_target=-1, S=128): + + if self.opt.dmtet: + + sdf = self.sdf + deform = torch.tanh(self.deform) / self.opt.tet_grid_size + + vertices, triangles = self.dmtet_model(self.verts + deform, sdf, self.indices) + + vertices = vertices.detach().cpu().numpy() + triangles = triangles.detach().cpu().numpy() + + else: + + if resolution is None: + resolution = self.grid_size + + if self.cuda_ray: + density_thresh = min(self.mean_density, self.density_thresh) \ + if np.greater(self.mean_density, 0) else self.density_thresh + else: + density_thresh = self.density_thresh + + # TODO: use a larger thresh to extract a surface mesh from the density field, but this value is very empirical... + if self.opt.density_activation == 'softplus': + density_thresh = density_thresh * 25 + + sigmas = np.zeros([resolution, resolution, resolution], dtype=np.float32) + + # query + X = torch.linspace(-1, 1, resolution).split(S) + Y = torch.linspace(-1, 1, resolution).split(S) + Z = torch.linspace(-1, 1, resolution).split(S) + + for xi, xs in enumerate(X): + for yi, ys in enumerate(Y): + for zi, zs in enumerate(Z): + xx, yy, zz = custom_meshgrid(xs, ys, zs) + pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3] + val = self.density(pts.to(self.aabb_train.device)) + sigmas[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(zs)] = val['sigma'].reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() # [S, 1] --> [x, y, z] + + print(f'[INFO] marching cubes thresh: {density_thresh} ({sigmas.min()} ~ {sigmas.max()})') + + vertices, triangles = mcubes.marching_cubes(sigmas, density_thresh) + vertices = vertices / (resolution - 1.0) * 2 - 1 + + # clean + vertices = vertices.astype(np.float32) + triangles = triangles.astype(np.int32) + vertices, triangles = clean_mesh(vertices, triangles, remesh=True, remesh_size=0.01) + + # decimation + if decimate_target > 0 and triangles.shape[0] > decimate_target: + vertices, triangles = decimate_mesh(vertices, triangles, decimate_target) + + v = torch.from_numpy(vertices).contiguous().float().to(self.aabb_train.device) + f = torch.from_numpy(triangles).contiguous().int().to(self.aabb_train.device) + + # mesh = trimesh.Trimesh(vertices, triangles, process=False) # important, process=True leads to seg fault... + # mesh.export(os.path.join(path, f'mesh.ply')) + + def _export(v, f, h0=2048, w0=2048, ssaa=1, name=''): + # v, f: torch Tensor + device = v.device + v_np = v.cpu().numpy() # [N, 3] + f_np = f.cpu().numpy() # [M, 3] + + print(f'[INFO] running xatlas to unwrap UVs for mesh: v={v_np.shape} f={f_np.shape}') + + # unwrap uvs + import xatlas + import nvdiffrast.torch as dr + from sklearn.neighbors import NearestNeighbors + from scipy.ndimage import binary_dilation, binary_erosion + + atlas = xatlas.Atlas() + atlas.add_mesh(v_np, f_np) + chart_options = xatlas.ChartOptions() + chart_options.max_iterations = 4 # for faster unwrap... + atlas.generate(chart_options=chart_options) + vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2] + + # vmapping, ft_np, vt_np = xatlas.parametrize(v_np, f_np) # [N], [M, 3], [N, 2] + + vt = torch.from_numpy(vt_np.astype(np.float32)).float().to(device) + ft = torch.from_numpy(ft_np.astype(np.int64)).int().to(device) + + # render uv maps + uv = vt * 2.0 - 1.0 # uvs to range [-1, 1] + uv = torch.cat((uv, torch.zeros_like(uv[..., :1]), torch.ones_like(uv[..., :1])), dim=-1) # [N, 4] + + if ssaa > 1: + h = int(h0 * ssaa) + w = int(w0 * ssaa) + else: + h, w = h0, w0 + + if self.glctx is None: + if h <= 2048 and w <= 2048: + self.glctx = dr.RasterizeCudaContext() + else: + self.glctx = dr.RasterizeGLContext() + + rast, _ = dr.rasterize(self.glctx, uv.unsqueeze(0), ft, (h, w)) # [1, h, w, 4] + xyzs, _ = dr.interpolate(v.unsqueeze(0), rast, f) # [1, h, w, 3] + mask, _ = dr.interpolate(torch.ones_like(v[:, :1]).unsqueeze(0), rast, f) # [1, h, w, 1] + + # masked query + xyzs = xyzs.view(-1, 3) + mask = (mask > 0).view(-1) + + feats = torch.zeros(h * w, 3, device=device, dtype=torch.float32) + + if mask.any(): + xyzs = xyzs[mask] # [M, 3] + + # batched inference to avoid OOM + all_feats = [] + head = 0 + while head < xyzs.shape[0]: + tail = min(head + 640000, xyzs.shape[0]) + results_ = self.density(xyzs[head:tail]) + all_feats.append(results_['albedo'].float()) + head += 640000 + + feats[mask] = torch.cat(all_feats, dim=0) + + feats = feats.view(h, w, -1) + mask = mask.view(h, w) + + # quantize [0.0, 1.0] to [0, 255] + feats = feats.cpu().numpy() + feats = (feats * 255).astype(np.uint8) + + ### NN search as an antialiasing ... + mask = mask.cpu().numpy() + + inpaint_region = binary_dilation(mask, iterations=3) + inpaint_region[mask] = 0 + + search_region = mask.copy() + not_search_region = binary_erosion(search_region, iterations=2) + search_region[not_search_region] = 0 + + search_coords = np.stack(np.nonzero(search_region), axis=-1) + inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1) + + knn = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(search_coords) + _, indices = knn.kneighbors(inpaint_coords) + + feats[tuple(inpaint_coords.T)] = feats[tuple(search_coords[indices[:, 0]].T)] + + feats = cv2.cvtColor(feats, cv2.COLOR_RGB2BGR) + + # do ssaa after the NN search, in numpy + if ssaa > 1: + feats = cv2.resize(feats, (w0, h0), interpolation=cv2.INTER_LINEAR) + + cv2.imwrite(os.path.join(path, f'{name}albedo.png'), feats) + + # save obj (v, vt, f /) + obj_file = os.path.join(path, f'{name}mesh.obj') + mtl_file = os.path.join(path, f'{name}mesh.mtl') + + print(f'[INFO] writing obj mesh to {obj_file}') + with open(obj_file, "w") as fp: + fp.write(f'mtllib {name}mesh.mtl \n') + + print(f'[INFO] writing vertices {v_np.shape}') + for v in v_np: + fp.write(f'v {v[0]} {v[1]} {v[2]} \n') + + print(f'[INFO] writing vertices texture coords {vt_np.shape}') + for v in vt_np: + fp.write(f'vt {v[0]} {1 - v[1]} \n') + + print(f'[INFO] writing faces {f_np.shape}') + fp.write(f'usemtl mat0 \n') + for i in range(len(f_np)): + fp.write(f"f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1} {f_np[i, 1] + 1}/{ft_np[i, 1] + 1} {f_np[i, 2] + 1}/{ft_np[i, 2] + 1} \n") + + with open(mtl_file, "w") as fp: + fp.write(f'newmtl mat0 \n') + fp.write(f'Ka 1.000000 1.000000 1.000000 \n') + fp.write(f'Kd 1.000000 1.000000 1.000000 \n') + fp.write(f'Ks 0.000000 0.000000 0.000000 \n') + fp.write(f'Tr 1.000000 \n') + fp.write(f'illum 1 \n') + fp.write(f'Ns 0.000000 \n') + fp.write(f'map_Kd {name}albedo.png \n') + + _export(v, f) + + def run(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, **kwargs): + # rays_o, rays_d: [B, N, 3] + # bg_color: [BN, 3] in range [0, 1] + # return: image: [B, N, 3], depth: [B, N] + + prefix = rays_o.shape[:-1] + rays_o = rays_o.contiguous().view(-1, 3) + rays_d = rays_d.contiguous().view(-1, 3) + + N = rays_o.shape[0] # N = B * N, in fact + device = rays_o.device + + results = {} + + # choose aabb + aabb = self.aabb_train if self.training else self.aabb_infer + + # sample steps + # nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, aabb, self.min_near) + # nears.unsqueeze_(-1) + # fars.unsqueeze_(-1) + nears, fars = near_far_from_bound(rays_o, rays_d, self.bound, type='sphere', min_near=self.min_near) + + # random sample light_d if not provided + if light_d is None: + # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face) + light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [N, 3] + + #print(f'nears = {nears.min().item()} ~ {nears.max().item()}, fars = {fars.min().item()} ~ {fars.max().item()}') + + z_vals = torch.linspace(0.0, 1.0, self.opt.num_steps, device=device).unsqueeze(0) # [1, T] + z_vals = z_vals.expand((N, self.opt.num_steps)) # [N, T] + z_vals = nears + (fars - nears) * z_vals # [N, T], in [nears, fars] + + # perturb z_vals + sample_dist = (fars - nears) / self.opt.num_steps + if perturb: + z_vals = z_vals + (torch.rand(z_vals.shape, device=device) - 0.5) * sample_dist + #z_vals = z_vals.clamp(nears, fars) # avoid out of bounds xyzs. + + # generate xyzs + xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * z_vals.unsqueeze(-1) # [N, 1, 3] * [N, T, 1] -> [N, T, 3] + xyzs = torch.min(torch.max(xyzs, aabb[:3]), aabb[3:]) # a manual clip. + + #plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy()) + + # query SDF and RGB + density_outputs = self.density(xyzs.reshape(-1, 3)) + + #sigmas = density_outputs['sigma'].view(N, self.opt.num_steps) # [N, T] + for k, v in density_outputs.items(): + density_outputs[k] = v.view(N, self.opt.num_steps, -1) + + # upsample z_vals (nerf-like) + if self.opt.upsample_steps > 0: + with torch.no_grad(): + + deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T-1] + deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1) + + alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T] + alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+1] + weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T] + + # sample new z_vals + z_vals_mid = (z_vals[..., :-1] + 0.5 * deltas[..., :-1]) # [N, T-1] + new_z_vals = sample_pdf(z_vals_mid, weights[:, 1:-1], self.opt.upsample_steps, det=not self.training).detach() # [N, t] + + new_xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * new_z_vals.unsqueeze(-1) # [N, 1, 3] * [N, t, 1] -> [N, t, 3] + new_xyzs = torch.min(torch.max(new_xyzs, aabb[:3]), aabb[3:]) # a manual clip. + + # only forward new points to save computation + new_density_outputs = self.density(new_xyzs.reshape(-1, 3)) + #new_sigmas = new_density_outputs['sigma'].view(N, self.opt.upsample_steps) # [N, t] + for k, v in new_density_outputs.items(): + new_density_outputs[k] = v.view(N, self.opt.upsample_steps, -1) + + # re-order + z_vals = torch.cat([z_vals, new_z_vals], dim=1) # [N, T+t] + z_vals, z_index = torch.sort(z_vals, dim=1) + + xyzs = torch.cat([xyzs, new_xyzs], dim=1) # [N, T+t, 3] + xyzs = torch.gather(xyzs, dim=1, index=z_index.unsqueeze(-1).expand_as(xyzs)) + + for k in density_outputs: + tmp_output = torch.cat([density_outputs[k], new_density_outputs[k]], dim=1) + density_outputs[k] = torch.gather(tmp_output, dim=1, index=z_index.unsqueeze(-1).expand_as(tmp_output)) + + deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T+t-1] + deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1) + alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T+t] + alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+t+1] + weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T+t] + + dirs = rays_d.view(-1, 1, 3).expand_as(xyzs) + light_d = light_d.view(-1, 1, 3).expand_as(xyzs) + for k, v in density_outputs.items(): + density_outputs[k] = v.view(-1, v.shape[-1]) + + dirs = safe_normalize(dirs) + sigmas, rgbs, normals = self(xyzs.reshape(-1, 3), dirs.reshape(-1, 3), light_d.reshape(-1, 3), ratio=ambient_ratio, shading=shading) + rgbs = rgbs.view(N, -1, 3) # [N, T+t, 3] + if normals is not None: + normals = normals.view(N, -1, 3) + + # calculate weight_sum (mask) + weights_sum = weights.sum(dim=-1) # [N] + + # calculate depth + depth = torch.sum(weights * z_vals, dim=-1) + + # calculate color + image = torch.sum(weights.unsqueeze(-1) * rgbs, dim=-2) # [N, 3], in [0, 1] + + # mix background color + if bg_color is None: + if self.opt.bg_radius > 0: + # use the bg model to calculate bg_color + bg_color = self.background(rays_d) # [N, 3] + else: + bg_color = 1 + + image = image + (1 - weights_sum).unsqueeze(-1) * bg_color + + image = image.view(*prefix, 3) + depth = depth.view(*prefix) + weights_sum = weights_sum.reshape(*prefix) + + if self.training: + if self.opt.lambda_orient > 0 and normals is not None: + # orientation loss + loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2 + results['loss_orient'] = loss_orient.sum(-1).mean() + + if self.opt.lambda_3d_normal_smooth > 0 and normals is not None: + normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2) + results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean() + + if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None: + normal_image = torch.sum(weights.unsqueeze(-1) * (normals + 1) / 2, dim=-2) # [N, 3], in [0, 1] + results['normal_image'] = normal_image + + results['image'] = image + results['depth'] = depth + results['weights'] = weights + results['weights_sum'] = weights_sum + + return results + + + def run_cuda(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, binarize=False, **kwargs): + # rays_o, rays_d: [B, N, 3] + # return: image: [B, N, 3], depth: [B, N] + + prefix = rays_o.shape[:-1] + rays_o = rays_o.contiguous().view(-1, 3) + rays_d = rays_d.contiguous().view(-1, 3) + + N = rays_o.shape[0] # B * N, in fact + device = rays_o.device + + # pre-calculate near far + nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, self.aabb_train if self.training else self.aabb_infer) + + # random sample light_d if not provided + if light_d is None: + # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face) + light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [N, 3] + + results = {} + + if self.training: + xyzs, dirs, ts, rays = raymarching.march_rays_train(rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb, self.opt.dt_gamma, self.opt.max_steps) + dirs = safe_normalize(dirs) + + if light_d.shape[0] > 1: + flatten_rays = raymarching.flatten_rays(rays, xyzs.shape[0]).long() + light_d = light_d[flatten_rays] + + sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading) + weights, weights_sum, depth, image = raymarching.composite_rays_train(sigmas, rgbs, ts, rays, T_thresh, binarize) + + # normals related regularizations + if self.opt.lambda_orient > 0 and normals is not None: + # orientation loss + loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2 + results['loss_orient'] = loss_orient.mean() + + if self.opt.lambda_3d_normal_smooth > 0 and normals is not None: + normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2) + results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean() + + if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None: + _, _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, ts, rays, T_thresh, binarize) + results['normal_image'] = normal_image + + # weights normalization + results['weights'] = weights + + else: + + # allocate outputs + dtype = torch.float32 + + weights_sum = torch.zeros(N, dtype=dtype, device=device) + depth = torch.zeros(N, dtype=dtype, device=device) + image = torch.zeros(N, 3, dtype=dtype, device=device) + + n_alive = N + rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N] + rays_t = nears.clone() # [N] + + step = 0 + + while step < self.opt.max_steps: # hard coded max step + + # count alive rays + n_alive = rays_alive.shape[0] + + # exit loop + if n_alive <= 0: + break + + # decide compact_steps + n_step = max(min(N // n_alive, 8), 1) + + xyzs, dirs, ts = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb if step == 0 else False, self.opt.dt_gamma, self.opt.max_steps) + dirs = safe_normalize(dirs) + sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading) + raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh, binarize) + + rays_alive = rays_alive[rays_alive >= 0] + #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}') + + step += n_step + + # mix background color + if bg_color is None: + if self.opt.bg_radius > 0: + # use the bg model to calculate bg_color + bg_color = self.background(rays_d) # [N, 3] + else: + bg_color = 1 + + image = image + (1 - weights_sum).unsqueeze(-1) * bg_color + image = image.view(*prefix, 3) + + depth = depth.view(*prefix) + + weights_sum = weights_sum.reshape(*prefix) + + results['image'] = image + results['depth'] = depth + results['weights_sum'] = weights_sum + + return results + + @torch.no_grad() + def init_tet(self, mesh=None): + + if mesh is not None: + # normalize mesh + scale = 0.8 / np.array(mesh.bounds[1] - mesh.bounds[0]).max() + center = np.array(mesh.bounds[1] + mesh.bounds[0]) / 2 + mesh.vertices = (mesh.vertices - center) * scale + + # init scale + # self.tet_scale = torch.from_numpy(np.abs(mesh.vertices).max(axis=0) + 1e-1).to(self.verts.dtype).cuda() + self.tet_scale = torch.from_numpy(np.array([np.abs(mesh.vertices).max()]) + 1e-1).to(self.verts.dtype).cuda() + self.verts = self.verts * self.tet_scale + + # init sdf + import cubvh + BVH = cubvh.cuBVH(mesh.vertices, mesh.faces) + sdf, _, _ = BVH.signed_distance(self.verts, return_uvw=False, mode='watertight') + sdf *= -10 # INNER is POSITIVE, also make it stronger + self.sdf.data += sdf.to(self.sdf.data.dtype).clamp(-1, 1) + + else: + + if self.cuda_ray: + density_thresh = min(self.mean_density, self.density_thresh) + else: + density_thresh = self.density_thresh + + if self.opt.density_activation == 'softplus': + density_thresh = density_thresh * 25 + + # init scale + sigma = self.density(self.verts)['sigma'] # verts covers [-1, 1] now + mask = sigma > density_thresh + valid_verts = self.verts[mask] + self.tet_scale = valid_verts.abs().amax(dim=0) + 1e-1 + self.verts = self.verts * self.tet_scale + + # init sigma + sigma = self.density(self.verts)['sigma'] # new verts + self.sdf.data += (sigma - density_thresh).clamp(-1, 1) + + print(f'[INFO] init dmtet: scale = {self.tet_scale}') + + + def run_dmtet(self, rays_o, rays_d, mvp, h, w, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, **kwargs): + # mvp: [B, 4, 4] + + device = mvp.device + campos = rays_o[:, 0, :] # only need one ray per batch + + # random sample light_d if not provided + if light_d is None: + # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face) + light_d = safe_normalize(campos + torch.randn_like(campos)).view(-1, 1, 1, 3) # [B, 1, 1, 3] + + results = {} + + # get mesh + sdf = self.sdf + deform = torch.tanh(self.deform) / self.opt.tet_grid_size + + verts, faces = self.dmtet_model(self.verts + deform, sdf, self.indices) + + # get normals + i0, i1, i2 = faces[:, 0], faces[:, 1], faces[:, 2] + v0, v1, v2 = verts[i0, :], verts[i1, :], verts[i2, :] + + faces = faces.int() + + face_normals = torch.cross(v1 - v0, v2 - v0) + face_normals = safe_normalize(face_normals) + + vn = torch.zeros_like(verts) + vn.scatter_add_(0, i0[:, None].repeat(1,3), face_normals) + vn.scatter_add_(0, i1[:, None].repeat(1,3), face_normals) + vn.scatter_add_(0, i2[:, None].repeat(1,3), face_normals) + + vn = torch.where(torch.sum(vn * vn, -1, keepdim=True) > 1e-20, vn, torch.tensor([0.0, 0.0, 1.0], dtype=torch.float32, device=vn.device)) + + # rasterization + verts_clip = torch.bmm(F.pad(verts, pad=(0, 1), mode='constant', value=1.0).unsqueeze(0).repeat(mvp.shape[0], 1, 1), + mvp.permute(0,2,1)).float() # [B, N, 4] + rast, rast_db = dr.rasterize(self.glctx, verts_clip, faces, (h, w)) + + alpha = (rast[..., 3:] > 0).float() + xyzs, _ = dr.interpolate(verts.unsqueeze(0), rast, faces) # [B, H, W, 3] + normal, _ = dr.interpolate(vn.unsqueeze(0).contiguous(), rast, faces) + normal = safe_normalize(normal) + + xyzs = xyzs.view(-1, 3) + mask = (rast[..., 3:] > 0).view(-1).detach() + + # do the lighting here since we have normal from mesh now. + albedo = torch.zeros_like(xyzs, dtype=torch.float32) + if mask.any(): + masked_albedo = self.density(xyzs[mask])['albedo'] + albedo[mask] = masked_albedo.float() + albedo = albedo.view(-1, h, w, 3) + + # these two modes lead to no parameters to optimize if using --lock_geo. + if self.opt.lock_geo and shading in ['textureless', 'normal']: + shading = 'lambertian' + + if shading == 'albedo': + color = albedo + elif shading == 'textureless': + lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0) + color = lambertian.unsqueeze(-1).repeat(1, 1, 1, 3) + elif shading == 'normal': + color = (normal + 1) / 2 + else: # 'lambertian' + lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0) + color = albedo * lambertian.unsqueeze(-1) + + color = dr.antialias(color, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3] + alpha = dr.antialias(alpha, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 1] + + # mix background color + if bg_color is None: + if self.opt.bg_radius > 0: + # use the bg model to calculate bg_color + bg_color = self.background(rays_d) # [N, 3] + else: + bg_color = 1 + + if torch.is_tensor(bg_color) and len(bg_color.shape) > 1: + bg_color = bg_color.view(-1, h, w, 3) + + depth = rast[:, :, :, [2]] # [B, H, W] + color = color + (1 - alpha) * bg_color + + results['depth'] = depth + results['image'] = color + results['weights_sum'] = alpha.squeeze(-1) + + if self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0: + normal_image = dr.antialias((normal + 1) / 2, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3] + results['normal_image'] = normal_image + + # regularizations + if self.training: + if self.opt.lambda_mesh_normal > 0: + results['normal_loss'] = normal_consistency(face_normals, faces) + if self.opt.lambda_mesh_laplacian > 0: + results['lap_loss'] = laplacian_smooth_loss(verts, faces) + + return results + + def run_taichi(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, **kwargs): + # rays_o, rays_d: [B, N, 3], assumes B == 1 + # return: image: [B, N, 3], depth: [B, N] + + prefix = rays_o.shape[:-1] + rays_o = rays_o.contiguous().view(-1, 3) + rays_d = rays_d.contiguous().view(-1, 3) + + N = rays_o.shape[0] # N = B * N, in fact + device = rays_o.device + + # pre-calculate near far + exp_step_factor = kwargs.get('exp_step_factor', 0.) + MAX_SAMPLES = 1024 + NEAR_DISTANCE = 0.01 + center = torch.zeros(1, 3) + half_size = torch.ones(1, 3) + _, hits_t, _ = self.ray_aabb_intersector.apply(rays_o, rays_d, center, half_size, 1) + hits_t[(hits_t[:, 0, 0] >= 0) & (hits_t[:, 0, 0] < NEAR_DISTANCE), 0, 0] = NEAR_DISTANCE + + # TODO: should sample different light_d for each batch... but taichi end doesn't have a flatten_ray implemented currently... + # random sample light_d if not provided + if light_d is None: + # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face) + light_d = (rays_o[0] + torch.randn(3, device=device, dtype=torch.float)) + light_d = safe_normalize(light_d) + + results = {} + + if self.training: + rays_a, xyzs, dirs, deltas, ts, _ = self.ray_marching(rays_o, rays_d, hits_t[:, 0], self.density_bitfield, self.cascade, self.bound, exp_step_factor, self.grid_size, MAX_SAMPLES) + dirs = safe_normalize(dirs) + # plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy()) + sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading) + _, weights_sum, depth, image, weights = self.volume_render(sigmas, rgbs, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4)) + + # normals related regularizations + if self.opt.lambda_orient > 0 and normals is not None: + # orientation loss + loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2 + results['loss_orient'] = loss_orient.mean() + + if self.opt.lambda_3d_normal_smooth > 0 and normals is not None: + normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2) + results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean() + + if (self.opt.lambda_2d_normal_smooth > 0 or self.opt.lambda_normal > 0) and normals is not None: + _, _, _, normal_image, _ = self.volume_render(sigmas.detach(), (normals + 1) / 2, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4)) + results['normal_image'] = normal_image + + # weights normalization + results['weights'] = weights + + else: + + # allocate outputs + dtype = torch.float32 + + weights_sum = torch.zeros(N, dtype=dtype, device=device) + depth = torch.zeros(N, dtype=dtype, device=device) + image = torch.zeros(N, 3, dtype=dtype, device=device) + + n_alive = N + rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N] + rays_t = hits_t[:, 0, 0] + step = 0 + + min_samples = 1 if exp_step_factor == 0 else 4 + + while step < self.opt.max_steps: # hard coded max step + + # count alive rays + n_alive = rays_alive.shape[0] + + # exit loop + if n_alive <= 0: + break + + # decide compact_steps + # n_step = max(min(N // n_alive, 8), 1) + n_step = max(min(N // n_alive, 64), min_samples) + + xyzs, dirs, deltas, ts, N_eff_samples = \ + self.raymarching_test_taichi(rays_o, rays_d, hits_t[:, 0], rays_alive, + self.density_bitfield, self.cascade, + self.bound, exp_step_factor, + self.grid_size, MAX_SAMPLES, n_step) + + xyzs = self.rearrange(xyzs, 'n1 n2 c -> (n1 n2) c') + dirs = self.rearrange(dirs, 'n1 n2 c -> (n1 n2) c') + dirs = safe_normalize(dirs) + valid_mask = ~torch.all(dirs == 0, dim=1) + if valid_mask.sum() == 0: + break + + sigmas = torch.zeros(len(xyzs), device=device) + rgbs = torch.zeros(len(xyzs), 3, device=device) + normals = torch.zeros(len(xyzs), 3, device=device) + + sigmas[valid_mask], _rgbs, normals = self(xyzs[valid_mask], dirs[valid_mask], light_d, ratio=ambient_ratio, shading=shading) + rgbs[valid_mask] = _rgbs.float() + sigmas = self.rearrange(sigmas, '(n1 n2) -> n1 n2', n2=n_step) + rgbs = self.rearrange(rgbs, '(n1 n2) c -> n1 n2 c', n2=n_step) + if normals is not None: + normals = self.rearrange(normals, '(n1 n2) c -> n1 n2 c', n2=n_step) + + self.composite_test_fw(sigmas, rgbs, deltas, ts, hits_t[:,0], rays_alive, + kwargs.get('T_threshold', 1e-4), N_eff_samples, + weights_sum, depth, image) + + rays_alive = rays_alive[rays_alive >= 0] + + step += n_step + + # mix background color + if bg_color is None: + if self.opt.bg_radius > 0: + # use the bg model to calculate bg_color + bg_color = self.background(rays_d) # [N, 3] + else: + bg_color = 1 + + image = image + self.rearrange(1 - weights_sum, 'n -> n 1') * bg_color + image = image.view(*prefix, 3) + + depth = depth.view(*prefix) + + weights_sum = weights_sum.reshape(*prefix) + + results['image'] = image + results['depth'] = depth + results['weights_sum'] = weights_sum + + return results + + + @torch.no_grad() + def update_extra_state(self, decay=0.95, S=128): + # call before each epoch to update extra states. + + if not (self.cuda_ray or self.taichi_ray): + return + + ### update density grid + tmp_grid = - torch.ones_like(self.density_grid) + + X = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S) + Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S) + Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S) + + for xs in X: + for ys in Y: + for zs in Z: + + # construct points + xx, yy, zz = custom_meshgrid(xs, ys, zs) + coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128) + indices = raymarching.morton3D(coords).long() # [N] + xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1] + + # cascading + for cas in range(self.cascade): + bound = min(2 ** cas, self.bound) + half_grid_size = bound / self.grid_size + # scale to current cascade's resolution + cas_xyzs = xyzs * (bound - half_grid_size) + # add noise in [-hgs, hgs] + cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size + # query density + sigmas = self.density(cas_xyzs)['sigma'].reshape(-1).detach() + # assign + tmp_grid[cas, indices] = sigmas + # ema update + valid_mask = self.density_grid >= 0 + self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask]) + self.mean_density = torch.mean(self.density_grid[valid_mask]).item() + self.iter_density += 1 + + # convert to bitfield + density_thresh = min(self.mean_density, self.density_thresh) + if self.cuda_ray: + self.density_bitfield = raymarching.packbits(self.density_grid, density_thresh, self.density_bitfield) + elif self.taichi_ray: + self.packbits_taichi(self.density_grid.reshape(-1).contiguous(), density_thresh, self.density_bitfield) + + # print(f'[density grid] min={self.density_grid.min().item():.4f}, max={self.density_grid.max().item():.4f}, mean={self.mean_density:.4f}, occ_rate={(self.density_grid > density_thresh).sum() / (128**3 * self.cascade):.3f}') + + + def render(self, rays_o, rays_d, mvp, h, w, staged=False, max_ray_batch=4096, **kwargs): + # rays_o, rays_d: [B, N, 3] + # return: pred_rgb: [B, N, 3] + B, N = rays_o.shape[:2] + device = rays_o.device + + if self.dmtet: + results = self.run_dmtet(rays_o, rays_d, mvp, h, w, **kwargs) + elif self.cuda_ray: + results = self.run_cuda(rays_o, rays_d, **kwargs) + elif self.taichi_ray: + results = self.run_taichi(rays_o, rays_d, **kwargs) + else: + if staged: + depth = torch.empty((B, N), device=device) + image = torch.empty((B, N, 3), device=device) + weights_sum = torch.empty((B, N), device=device) + + for b in range(B): + head = 0 + while head < N: + tail = min(head + max_ray_batch, N) + results_ = self.run(rays_o[b:b+1, head:tail], rays_d[b:b+1, head:tail], **kwargs) + depth[b:b+1, head:tail] = results_['depth'] + weights_sum[b:b+1, head:tail] = results_['weights_sum'] + image[b:b+1, head:tail] = results_['image'] + head += max_ray_batch + + results = {} + results['depth'] = depth + results['image'] = image + results['weights_sum'] = weights_sum + + else: + results = self.run(rays_o, rays_d, **kwargs) + + return results diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/__init__.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/__init__.py new file mode 100644 index 0000000..dfebd04 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# empty diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/custom_ops.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/custom_ops.py new file mode 100644 index 0000000..ed2524f --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/custom_ops.py @@ -0,0 +1,159 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import glob +import hashlib +import importlib +import os +import re +import shutil +import uuid + +import torch +import torch.utils.cpp_extension +from torch.utils.file_baton import FileBaton + +#---------------------------------------------------------------------------- +# Global options. + +verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full' + +#---------------------------------------------------------------------------- +# Internal helper funcs. + +def _find_compiler_bindir(): + patterns = [ + 'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64', + 'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64', + 'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64', + 'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin', + ] + for pattern in patterns: + matches = sorted(glob.glob(pattern)) + if len(matches): + return matches[-1] + return None + +#---------------------------------------------------------------------------- + +def _get_mangled_gpu_name(): + name = torch.cuda.get_device_name().lower() + out = [] + for c in name: + if re.match('[a-z0-9_-]+', c): + out.append(c) + else: + out.append('-') + return ''.join(out) + +#---------------------------------------------------------------------------- +# Main entry point for compiling and loading C++/CUDA plugins. + +_cached_plugins = dict() + +def get_plugin(module_name, sources, headers=None, source_dir=None, **build_kwargs): + assert verbosity in ['none', 'brief', 'full'] + if headers is None: + headers = [] + if source_dir is not None: + sources = [os.path.join(source_dir, fname) for fname in sources] + headers = [os.path.join(source_dir, fname) for fname in headers] + + # Already cached? + if module_name in _cached_plugins: + return _cached_plugins[module_name] + + # Print status. + if verbosity == 'full': + print(f'Setting up PyTorch plugin "{module_name}"...') + elif verbosity == 'brief': + print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True) + verbose_build = (verbosity == 'full') + + # Compile and load. + try: # pylint: disable=too-many-nested-blocks + # Make sure we can find the necessary compiler binaries. + if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0: + compiler_bindir = _find_compiler_bindir() + if compiler_bindir is None: + raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".') + os.environ['PATH'] += ';' + compiler_bindir + + # Some containers set TORCH_CUDA_ARCH_LIST to a list that can either + # break the build or unnecessarily restrict what's available to nvcc. + # Unset it to let nvcc decide based on what's available on the + # machine. + os.environ['TORCH_CUDA_ARCH_LIST'] = '' + + # Incremental build md5sum trickery. Copies all the input source files + # into a cached build directory under a combined md5 digest of the input + # source files. Copying is done only if the combined digest has changed. + # This keeps input file timestamps and filenames the same as in previous + # extension builds, allowing for fast incremental rebuilds. + # + # This optimization is done only in case all the source files reside in + # a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR + # environment variable is set (we take this as a signal that the user + # actually cares about this.) + # + # EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work + # around the *.cu dependency bug in ninja config. + # + all_source_files = sorted(sources + headers) + all_source_dirs = set(os.path.dirname(fname) for fname in all_source_files) + if len(all_source_dirs) == 1: # and ('TORCH_EXTENSIONS_DIR' in os.environ): + + # Compute combined hash digest for all source files. + hash_md5 = hashlib.md5() + for src in all_source_files: + with open(src, 'rb') as f: + hash_md5.update(f.read()) + + # Select cached build directory name. + source_digest = hash_md5.hexdigest() + build_top_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access + cached_build_dir = os.path.join(build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}') + + if not os.path.isdir(cached_build_dir): + tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}' + os.makedirs(tmpdir) + for src in all_source_files: + shutil.copyfile(src, os.path.join(tmpdir, os.path.basename(src))) + try: + os.replace(tmpdir, cached_build_dir) # atomic + except OSError: + # source directory already exists, delete tmpdir and its contents. + shutil.rmtree(tmpdir) + if not os.path.isdir(cached_build_dir): raise + + # Compile. + cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources] + torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir, + verbose=verbose_build, sources=cached_sources, **build_kwargs) + else: + torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs) + + # Load. + module = importlib.import_module(module_name) + + except: + if verbosity == 'brief': + print('Failed!') + raise + + # Print status and add to cache dict. + if verbosity == 'full': + print(f'Done setting up PyTorch plugin "{module_name}".') + elif verbosity == 'brief': + print('Done.') + _cached_plugins[module_name] = module + return module + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/misc.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/misc.py new file mode 100644 index 0000000..c0ae67e --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/misc.py @@ -0,0 +1,270 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import re +import contextlib +import numpy as np +import torch +import warnings +from nerf import dnnlib + +#---------------------------------------------------------------------------- +# Cached construction of constant tensors. Avoids CPU=>GPU copy when the +# same constant is used multiple times. + +_constant_cache = dict() + +def constant(value, shape=None, dtype=None, device=None, memory_format=None): + value = np.asarray(value) + if shape is not None: + shape = tuple(shape) + if dtype is None: + dtype = torch.get_default_dtype() + if device is None: + device = torch.device('cpu') + if memory_format is None: + memory_format = torch.contiguous_format + + key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format) + tensor = _constant_cache.get(key, None) + if tensor is None: + tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device) + if shape is not None: + tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape)) + tensor = tensor.contiguous(memory_format=memory_format) + _constant_cache[key] = tensor + return tensor + +#---------------------------------------------------------------------------- +# Replace NaN/Inf with specified numerical values. + +try: + nan_to_num = torch.nan_to_num # 1.8.0a0 +except AttributeError: + def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin + assert isinstance(input, torch.Tensor) + if posinf is None: + posinf = torch.finfo(input.dtype).max + if neginf is None: + neginf = torch.finfo(input.dtype).min + assert nan == 0 + return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out) + +#---------------------------------------------------------------------------- +# Symbolic assert. + +try: + symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access +except AttributeError: + symbolic_assert = torch.Assert # 1.7.0 + +#---------------------------------------------------------------------------- +# Context manager to temporarily suppress known warnings in torch.jit.trace(). +# Note: Cannot use catch_warnings because of https://bugs.python.org/issue29672 + +@contextlib.contextmanager +def suppress_tracer_warnings(): + flt = ('ignore', None, torch.jit.TracerWarning, None, 0) + warnings.filters.insert(0, flt) + yield + warnings.filters.remove(flt) + +#---------------------------------------------------------------------------- +# Assert that the shape of a tensor matches the given list of integers. +# None indicates that the size of a dimension is allowed to vary. +# Performs symbolic assertion when used in torch.jit.trace(). + +def assert_shape(tensor, ref_shape): + if tensor.ndim != len(ref_shape): + raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}') + for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)): + if ref_size is None: + pass + elif isinstance(ref_size, torch.Tensor): + with suppress_tracer_warnings(): # as_tensor results are registered as constants + symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}') + elif isinstance(size, torch.Tensor): + with suppress_tracer_warnings(): # as_tensor results are registered as constants + symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}') + elif size != ref_size: + raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}') + +#---------------------------------------------------------------------------- +# Function decorator that calls torch.autograd.profiler.record_function(). + +def profiled_function(fn): + def decorator(*args, **kwargs): + with torch.autograd.profiler.record_function(fn.__name__): + return fn(*args, **kwargs) + decorator.__name__ = fn.__name__ + return decorator + +#---------------------------------------------------------------------------- +# Sampler for torch.utils.data.DataLoader that loops over the dataset +# indefinitely, shuffling items as it goes. + +class InfiniteSampler(torch.utils.data.Sampler): + def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5): + assert len(dataset) > 0 + assert num_replicas > 0 + assert 0 <= rank < num_replicas + assert 0 <= window_size <= 1 + super().__init__(dataset) + self.dataset = dataset + self.rank = rank + self.num_replicas = num_replicas + self.shuffle = shuffle + self.seed = seed + self.window_size = window_size + + def __iter__(self): + order = np.arange(len(self.dataset)) + rnd = None + window = 0 + if self.shuffle: + rnd = np.random.RandomState(self.seed) + rnd.shuffle(order) + window = int(np.rint(order.size * self.window_size)) + + idx = 0 + while True: + i = idx % order.size + if idx % self.num_replicas == self.rank: + yield order[i] + if window >= 2: + j = (i - rnd.randint(window)) % order.size + order[i], order[j] = order[j], order[i] + idx += 1 + +#---------------------------------------------------------------------------- +# Utilities for operating with torch.nn.Module parameters and buffers. + +def params_and_buffers(module): + assert isinstance(module, torch.nn.Module) + return list(module.parameters()) + list(module.buffers()) + +def named_params_and_buffers(module): + assert isinstance(module, torch.nn.Module) + return list(module.named_parameters()) + list(module.named_buffers()) + +def copy_params_and_buffers(src_module, dst_module, require_all=False): + assert isinstance(src_module, torch.nn.Module) + assert isinstance(dst_module, torch.nn.Module) + src_tensors = dict(named_params_and_buffers(src_module)) + for name, tensor in named_params_and_buffers(dst_module): + assert (name in src_tensors) or (not require_all) + if name in src_tensors: + tensor.copy_(src_tensors[name].detach()).requires_grad_(tensor.requires_grad) + else: + print(f'{name} is not in src_module, init it using random valua!') + +#---------------------------------------------------------------------------- +# Context manager for easily enabling/disabling DistributedDataParallel +# synchronization. + +@contextlib.contextmanager +def ddp_sync(module, sync): + assert isinstance(module, torch.nn.Module) + if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel): + yield + else: + with module.no_sync(): + yield + +#---------------------------------------------------------------------------- +# Check DistributedDataParallel consistency across processes. + +def check_ddp_consistency(module, ignore_regex=None): + assert isinstance(module, torch.nn.Module) + for name, tensor in named_params_and_buffers(module): + fullname = type(module).__name__ + '.' + name + if ignore_regex is not None and re.fullmatch(ignore_regex, fullname): + continue + tensor = tensor.detach() + if tensor.is_floating_point(): + tensor = nan_to_num(tensor) + other = tensor.clone() + torch.distributed.broadcast(tensor=other, src=0) + assert (tensor == other).all(), fullname + +#---------------------------------------------------------------------------- +# Print summary table of module hierarchy. + +def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True): + assert isinstance(module, torch.nn.Module) + assert not isinstance(module, torch.jit.ScriptModule) + assert isinstance(inputs, (tuple, list)) + + # Register hooks. + entries = [] + nesting = [0] + def pre_hook(_mod, _inputs): + nesting[0] += 1 + def post_hook(mod, _inputs, outputs): + nesting[0] -= 1 + if nesting[0] <= max_nesting: + outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs] + outputs = [t for t in outputs if isinstance(t, torch.Tensor)] + entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs)) + hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()] + hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()] + + # Run module. + outputs = module(*inputs) + for hook in hooks: + hook.remove() + + # Identify unique outputs, parameters, and buffers. + tensors_seen = set() + for e in entries: + e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen] + e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen] + e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen] + tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs} + + # Filter out redundant entries. + if skip_redundant: + entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)] + + # Construct table. + rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']] + rows += [['---'] * len(rows[0])] + param_total = 0 + buffer_total = 0 + submodule_names = {mod: name for name, mod in module.named_modules()} + for e in entries: + name = '' if e.mod is module else submodule_names[e.mod] + param_size = sum(t.numel() for t in e.unique_params) + buffer_size = sum(t.numel() for t in e.unique_buffers) + output_shapes = [str(list(t.shape)) for t in e.outputs] + output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs] + rows += [[ + name + (':0' if len(e.outputs) >= 2 else ''), + str(param_size) if param_size else '-', + str(buffer_size) if buffer_size else '-', + (output_shapes + ['-'])[0], + (output_dtypes + ['-'])[0], + ]] + for idx in range(1, len(e.outputs)): + rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]] + param_total += param_size + buffer_total += buffer_size + rows += [['---'] * len(rows[0])] + rows += [['Total', str(param_total), str(buffer_total), '-', '-']] + + # Print table. + widths = [max(len(cell) for cell in column) for column in zip(*rows)] + print() + for row in rows: + print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths))) + print() + return outputs + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/__init__.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/__init__.py new file mode 100644 index 0000000..dfebd04 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# empty diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.cpp b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.cpp new file mode 100644 index 0000000..ee6f6d0 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.cpp @@ -0,0 +1,103 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include +#include +#include "bias_act.h" + +//------------------------------------------------------------------------ + +static bool has_same_layout(torch::Tensor x, torch::Tensor y) +{ + if (x.dim() != y.dim()) + return false; + for (int64_t i = 0; i < x.dim(); i++) + { + if (x.size(i) != y.size(i)) + return false; + if (x.size(i) >= 2 && x.stride(i) != y.stride(i)) + return false; + } + return true; +} + +//------------------------------------------------------------------------ + +static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp) +{ + // Validate arguments. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x"); + TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x"); + TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x"); + TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x"); + TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); + TORCH_CHECK(b.dim() == 1, "b must have rank 1"); + TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds"); + TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements"); + TORCH_CHECK(grad >= 0, "grad must be non-negative"); + + // Validate layout. + TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense"); + TORCH_CHECK(b.is_contiguous(), "b must be contiguous"); + TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x"); + TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x"); + TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x"); + + // Create output tensor. + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + torch::Tensor y = torch::empty_like(x); + TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x"); + + // Initialize CUDA kernel parameters. + bias_act_kernel_params p; + p.x = x.data_ptr(); + p.b = (b.numel()) ? b.data_ptr() : NULL; + p.xref = (xref.numel()) ? xref.data_ptr() : NULL; + p.yref = (yref.numel()) ? yref.data_ptr() : NULL; + p.dy = (dy.numel()) ? dy.data_ptr() : NULL; + p.y = y.data_ptr(); + p.grad = grad; + p.act = act; + p.alpha = alpha; + p.gain = gain; + p.clamp = clamp; + p.sizeX = (int)x.numel(); + p.sizeB = (int)b.numel(); + p.stepB = (b.numel()) ? (int)x.stride(dim) : 1; + + // Choose CUDA kernel. + void* kernel; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] + { + kernel = choose_bias_act_kernel(p); + }); + TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func"); + + // Launch CUDA kernel. + p.loopX = 4; + int blockSize = 4 * 32; + int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1; + void* args[] = {&p}; + AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); + return y; +} + +//------------------------------------------------------------------------ + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("bias_act", &bias_act); +} + +//------------------------------------------------------------------------ diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.cu b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.cu new file mode 100644 index 0000000..71ca390 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.cu @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include "bias_act.h" + +//------------------------------------------------------------------------ +// Helpers. + +template struct InternalType; +template <> struct InternalType { typedef double scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; + +//------------------------------------------------------------------------ +// CUDA kernel. + +template +__global__ void bias_act_kernel(bias_act_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + int G = p.grad; + scalar_t alpha = (scalar_t)p.alpha; + scalar_t gain = (scalar_t)p.gain; + scalar_t clamp = (scalar_t)p.clamp; + scalar_t one = (scalar_t)1; + scalar_t two = (scalar_t)2; + scalar_t expRange = (scalar_t)80; + scalar_t halfExpRange = (scalar_t)40; + scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946; + scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717; + + // Loop over elements. + int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x; + for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x) + { + // Load. + scalar_t x = (scalar_t)((const T*)p.x)[xi]; + scalar_t b = (p.b) ? (scalar_t)((const T*)p.b)[(xi / p.stepB) % p.sizeB] : 0; + scalar_t xref = (p.xref) ? (scalar_t)((const T*)p.xref)[xi] : 0; + scalar_t yref = (p.yref) ? (scalar_t)((const T*)p.yref)[xi] : 0; + scalar_t dy = (p.dy) ? (scalar_t)((const T*)p.dy)[xi] : one; + scalar_t yy = (gain != 0) ? yref / gain : 0; + scalar_t y = 0; + + // Apply bias. + ((G == 0) ? x : xref) += b; + + // linear + if (A == 1) + { + if (G == 0) y = x; + if (G == 1) y = x; + } + + // relu + if (A == 2) + { + if (G == 0) y = (x > 0) ? x : 0; + if (G == 1) y = (yy > 0) ? x : 0; + } + + // lrelu + if (A == 3) + { + if (G == 0) y = (x > 0) ? x : x * alpha; + if (G == 1) y = (yy > 0) ? x : x * alpha; + } + + // tanh + if (A == 4) + { + if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); } + if (G == 1) y = x * (one - yy * yy); + if (G == 2) y = x * (one - yy * yy) * (-two * yy); + } + + // sigmoid + if (A == 5) + { + if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one); + if (G == 1) y = x * yy * (one - yy); + if (G == 2) y = x * yy * (one - yy) * (one - two * yy); + } + + // elu + if (A == 6) + { + if (G == 0) y = (x >= 0) ? x : exp(x) - one; + if (G == 1) y = (yy >= 0) ? x : x * (yy + one); + if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one); + } + + // selu + if (A == 7) + { + if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one); + if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha); + if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha); + } + + // softplus + if (A == 8) + { + if (G == 0) y = (x > expRange) ? x : log(exp(x) + one); + if (G == 1) y = x * (one - exp(-yy)); + if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); } + } + + // swish + if (A == 9) + { + if (G == 0) + y = (x < -expRange) ? 0 : x / (exp(-x) + one); + else + { + scalar_t c = exp(xref); + scalar_t d = c + one; + if (G == 1) + y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d); + else + y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d); + yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain; + } + } + + // Apply gain. + y *= gain * dy; + + // Clamp. + if (clamp >= 0) + { + if (G == 0) + y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp; + else + y = (yref > -clamp & yref < clamp) ? y : 0; + } + + // Store. + ((T*)p.y)[xi] = (T)y; + } +} + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template void* choose_bias_act_kernel(const bias_act_kernel_params& p) +{ + if (p.act == 1) return (void*)bias_act_kernel; + if (p.act == 2) return (void*)bias_act_kernel; + if (p.act == 3) return (void*)bias_act_kernel; + if (p.act == 4) return (void*)bias_act_kernel; + if (p.act == 5) return (void*)bias_act_kernel; + if (p.act == 6) return (void*)bias_act_kernel; + if (p.act == 7) return (void*)bias_act_kernel; + if (p.act == 8) return (void*)bias_act_kernel; + if (p.act == 9) return (void*)bias_act_kernel; + return NULL; +} + +//------------------------------------------------------------------------ +// Template specializations. + +template void* choose_bias_act_kernel (const bias_act_kernel_params& p); +template void* choose_bias_act_kernel (const bias_act_kernel_params& p); +template void* choose_bias_act_kernel (const bias_act_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.h b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.h new file mode 100644 index 0000000..8994bfb --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +//------------------------------------------------------------------------ +// CUDA kernel parameters. + +struct bias_act_kernel_params +{ + const void* x; // [sizeX] + const void* b; // [sizeB] or NULL + const void* xref; // [sizeX] or NULL + const void* yref; // [sizeX] or NULL + const void* dy; // [sizeX] or NULL + void* y; // [sizeX] + + int grad; + int act; + float alpha; + float gain; + float clamp; + + int sizeX; + int sizeB; + int stepB; + int loopX; +}; + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template void* choose_bias_act_kernel(const bias_act_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.py new file mode 100644 index 0000000..b4028ad --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/bias_act.py @@ -0,0 +1,211 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Custom PyTorch ops for efficient bias and activation.""" + +import os +import numpy as np +import torch +from nerf import dnnlib + +from .. import custom_ops +from .. import misc + +#---------------------------------------------------------------------------- + +activation_funcs = { + 'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False), + 'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False), + 'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False), + 'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True), + 'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True), + 'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True), + 'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True), + 'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True), + 'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True), +} + +#---------------------------------------------------------------------------- + +_plugin = None +_null_tensor = torch.empty([0]) + +def _init(): + global _plugin + if _plugin is None: + _plugin = custom_ops.get_plugin( + module_name='bias_act_plugin', + sources=['bias_act.cpp', 'bias_act.cu'], + headers=['bias_act.h'], + source_dir=os.path.dirname(__file__), + extra_cuda_cflags=['--use_fast_math'], + ) + return True + +#---------------------------------------------------------------------------- + +def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'): + r"""Fused bias and activation function. + + Adds bias `b` to activation tensor `x`, evaluates activation function `act`, + and scales the result by `gain`. Each of the steps is optional. In most cases, + the fused op is considerably more efficient than performing the same calculation + using standard PyTorch ops. It supports first and second order gradients, + but not third order gradients. + + Args: + x: Input activation tensor. Can be of any shape. + b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type + as `x`. The shape must be known, and it must match the dimension of `x` + corresponding to `dim`. + dim: The dimension in `x` corresponding to the elements of `b`. + The value of `dim` is ignored if `b` is not specified. + act: Name of the activation function to evaluate, or `"linear"` to disable. + Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc. + See `activation_funcs` for a full list. `None` is not allowed. + alpha: Shape parameter for the activation function, or `None` to use the default. + gain: Scaling factor for the output tensor, or `None` to use default. + See `activation_funcs` for the default scaling of each activation function. + If unsure, consider specifying 1. + clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable + the clamping (default). + impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). + + Returns: + Tensor of the same shape and datatype as `x`. + """ + assert isinstance(x, torch.Tensor) + assert impl in ['ref', 'cuda'] + if impl == 'cuda' and x.device.type == 'cuda' and _init(): + return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b) + return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None): + """Slow reference implementation of `bias_act()` using standard TensorFlow ops. + """ + assert isinstance(x, torch.Tensor) + assert clamp is None or clamp >= 0 + spec = activation_funcs[act] + alpha = float(alpha if alpha is not None else spec.def_alpha) + gain = float(gain if gain is not None else spec.def_gain) + clamp = float(clamp if clamp is not None else -1) + + # Add bias. + if b is not None: + assert isinstance(b, torch.Tensor) and b.ndim == 1 + assert 0 <= dim < x.ndim + assert b.shape[0] == x.shape[dim] + x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)]) + + # Evaluate activation function. + alpha = float(alpha) + x = spec.func(x, alpha=alpha) + + # Scale by gain. + gain = float(gain) + if gain != 1: + x = x * gain + + # Clamp. + if clamp >= 0: + x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type + return x + +#---------------------------------------------------------------------------- + +_bias_act_cuda_cache = dict() + +def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None): + """Fast CUDA implementation of `bias_act()` using custom ops. + """ + # Parse arguments. + assert clamp is None or clamp >= 0 + spec = activation_funcs[act] + alpha = float(alpha if alpha is not None else spec.def_alpha) + gain = float(gain if gain is not None else spec.def_gain) + clamp = float(clamp if clamp is not None else -1) + + # Lookup from cache. + key = (dim, act, alpha, gain, clamp) + if key in _bias_act_cuda_cache: + return _bias_act_cuda_cache[key] + + # Forward op. + class BiasActCuda(torch.autograd.Function): + @staticmethod + def forward(ctx, x, b): # pylint: disable=arguments-differ + ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride(1) == 1 else torch.contiguous_format + x = x.contiguous(memory_format=ctx.memory_format) + b = b.contiguous() if b is not None else _null_tensor + y = x + if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor: + y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp) + ctx.save_for_backward( + x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, + b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, + y if 'y' in spec.ref else _null_tensor) + return y + + @staticmethod + def backward(ctx, dy): # pylint: disable=arguments-differ + dy = dy.contiguous(memory_format=ctx.memory_format) + x, b, y = ctx.saved_tensors + dx = None + db = None + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + dx = dy + if act != 'linear' or gain != 1 or clamp >= 0: + dx = BiasActCudaGrad.apply(dy, x, b, y) + + if ctx.needs_input_grad[1]: + db = dx.sum([i for i in range(dx.ndim) if i != dim]) + + return dx, db + + # Backward op. + class BiasActCudaGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ + ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride(1) == 1 else torch.contiguous_format + dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp) + ctx.save_for_backward( + dy if spec.has_2nd_grad else _null_tensor, + x, b, y) + return dx + + @staticmethod + def backward(ctx, d_dx): # pylint: disable=arguments-differ + d_dx = d_dx.contiguous(memory_format=ctx.memory_format) + dy, x, b, y = ctx.saved_tensors + d_dy = None + d_x = None + d_b = None + d_y = None + + if ctx.needs_input_grad[0]: + d_dy = BiasActCudaGrad.apply(d_dx, x, b, y) + + if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]): + d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp) + + if spec.has_2nd_grad and ctx.needs_input_grad[2]: + d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim]) + + return d_dy, d_x, d_b, d_y + + # Add to cache. + _bias_act_cuda_cache[key] = BiasActCuda + return BiasActCuda + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/conv2d_gradfix.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/conv2d_gradfix.py new file mode 100644 index 0000000..9a177cc --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/conv2d_gradfix.py @@ -0,0 +1,199 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Custom replacement for `torch.nn.functional.conv2d` that supports +arbitrarily high order gradients with zero performance penalty.""" + +import contextlib +import torch + +# pylint: disable=redefined-builtin +# pylint: disable=arguments-differ +# pylint: disable=protected-access + +#---------------------------------------------------------------------------- + +enabled = False # Enable the custom op by setting this to true. +weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights. + +@contextlib.contextmanager +def no_weight_gradients(disable=True): + global weight_gradients_disabled + old = weight_gradients_disabled + if disable: + weight_gradients_disabled = True + yield + weight_gradients_disabled = old + +#---------------------------------------------------------------------------- + +def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): + if _should_use_custom_op(input): + return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias) + return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) + +def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1): + if _should_use_custom_op(input): + return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias) + return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation) + +#---------------------------------------------------------------------------- + +def _should_use_custom_op(input): + assert isinstance(input, torch.Tensor) + if (not enabled) or (not torch.backends.cudnn.enabled): + return False + if input.device.type != 'cuda': + return False + return True + +def _tuple_of_ints(xs, ndim): + xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim + assert len(xs) == ndim + assert all(isinstance(x, int) for x in xs) + return xs + +#---------------------------------------------------------------------------- + +_conv2d_gradfix_cache = dict() +_null_tensor = torch.empty([0]) + +def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups): + # Parse arguments. + ndim = 2 + weight_shape = tuple(weight_shape) + stride = _tuple_of_ints(stride, ndim) + padding = _tuple_of_ints(padding, ndim) + output_padding = _tuple_of_ints(output_padding, ndim) + dilation = _tuple_of_ints(dilation, ndim) + + # Lookup from cache. + key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) + if key in _conv2d_gradfix_cache: + return _conv2d_gradfix_cache[key] + + # Validate arguments. + assert groups >= 1 + assert len(weight_shape) == ndim + 2 + assert all(stride[i] >= 1 for i in range(ndim)) + assert all(padding[i] >= 0 for i in range(ndim)) + assert all(dilation[i] >= 0 for i in range(ndim)) + if not transpose: + assert all(output_padding[i] == 0 for i in range(ndim)) + else: # transpose + assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim)) + + # Helpers. + common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups) + def calc_output_padding(input_shape, output_shape): + if transpose: + return [0, 0] + return [ + input_shape[i + 2] + - (output_shape[i + 2] - 1) * stride[i] + - (1 - 2 * padding[i]) + - dilation[i] * (weight_shape[i + 2] - 1) + for i in range(ndim) + ] + + # Forward & backward. + class Conv2d(torch.autograd.Function): + @staticmethod + def forward(ctx, input, weight, bias): + assert weight.shape == weight_shape + ctx.save_for_backward( + input if weight.requires_grad else _null_tensor, + weight if input.requires_grad else _null_tensor, + ) + ctx.input_shape = input.shape + + # Simple 1x1 convolution => cuBLAS (only on Volta, not on Ampere). + if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0) and torch.cuda.get_device_capability(input.device) < (8, 0): + a = weight.reshape(groups, weight_shape[0] // groups, weight_shape[1]) + b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1) + c = (a.transpose(1, 2) if transpose else a) @ b.permute(1, 2, 0, 3).flatten(2) + c = c.reshape(-1, input.shape[0], *input.shape[2:]).transpose(0, 1) + c = c if bias is None else c + bias.unsqueeze(0).unsqueeze(2).unsqueeze(3) + return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format)) + + # General case => cuDNN. + if transpose: + return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs) + return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) + + @staticmethod + def backward(ctx, grad_output): + input, weight = ctx.saved_tensors + input_shape = ctx.input_shape + grad_input = None + grad_weight = None + grad_bias = None + + if ctx.needs_input_grad[0]: + p = calc_output_padding(input_shape=input_shape, output_shape=grad_output.shape) + op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) + grad_input = op.apply(grad_output, weight, None) + assert grad_input.shape == input_shape + + if ctx.needs_input_grad[1] and not weight_gradients_disabled: + grad_weight = Conv2dGradWeight.apply(grad_output, input, weight) + assert grad_weight.shape == weight_shape + + if ctx.needs_input_grad[2]: + grad_bias = grad_output.sum([0, 2, 3]) + + return grad_input, grad_weight, grad_bias + + # Gradient with respect to the weights. + class Conv2dGradWeight(torch.autograd.Function): + @staticmethod + def forward(ctx, grad_output, input, weight): + ctx.save_for_backward( + grad_output if input.requires_grad else _null_tensor, + input if grad_output.requires_grad else _null_tensor, + ) + ctx.grad_output_shape = grad_output.shape + ctx.input_shape = input.shape + + # Simple 1x1 convolution => cuBLAS (on both Volta and Ampere). + if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0): + a = grad_output.reshape(grad_output.shape[0], groups, grad_output.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2) + b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2) + c = (b @ a.transpose(1, 2) if transpose else a @ b.transpose(1, 2)).reshape(weight_shape) + return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format)) + + # General case => cuDNN. + return torch.ops.aten.convolution_backward(grad_output=grad_output, input=input, weight=weight, bias_sizes=None, stride=stride, padding=padding, dilation=dilation, transposed=transpose, output_padding=output_padding, groups=groups, output_mask=[False, True, False])[1] + + + @staticmethod + def backward(ctx, grad2_grad_weight): + grad_output, input = ctx.saved_tensors + grad_output_shape = ctx.grad_output_shape + input_shape = ctx.input_shape + grad2_grad_output = None + grad2_input = None + + if ctx.needs_input_grad[0]: + grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None) + assert grad2_grad_output.shape == grad_output_shape + + if ctx.needs_input_grad[1]: + p = calc_output_padding(input_shape=input_shape, output_shape=grad_output_shape) + op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) + grad2_input = op.apply(grad_output, grad2_grad_weight, None) + assert grad2_input.shape == input_shape + + return grad2_grad_output, grad2_input + + _conv2d_gradfix_cache[key] = Conv2d + return Conv2d + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/conv2d_resample.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/conv2d_resample.py new file mode 100644 index 0000000..d46f4dd --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/conv2d_resample.py @@ -0,0 +1,145 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""2D convolution with optional up/downsampling.""" + +import torch + +from .. import misc +from . import conv2d_gradfix +from . import upfirdn2d +from .upfirdn2d import _parse_padding +from .upfirdn2d import _get_filter_size + +#---------------------------------------------------------------------------- + +def _get_weight_shape(w): + with misc.suppress_tracer_warnings(): # this value will be treated as a constant + shape = [int(sz) for sz in w.shape] + misc.assert_shape(w, shape) + return shape + +#---------------------------------------------------------------------------- + +def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True): + """Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations. + """ + _out_channels, _in_channels_per_group, kh, kw = _get_weight_shape(w) + + # Flip weight if requested. + # Note: conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False). + if not flip_weight and (kw > 1 or kh > 1): + w = w.flip([2, 3]) + + # Execute using conv2d_gradfix. + op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d + return op(x, w, stride=stride, padding=padding, groups=groups) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False): + r"""2D convolution with optional up/downsampling. + + Padding is performed only once at the beginning, not between the operations. + + Args: + x: Input tensor of shape + `[batch_size, in_channels, in_height, in_width]`. + w: Weight tensor of shape + `[out_channels, in_channels//groups, kernel_height, kernel_width]`. + f: Low-pass filter for up/downsampling. Must be prepared beforehand by + calling upfirdn2d.setup_filter(). None = identity (default). + up: Integer upsampling factor (default: 1). + down: Integer downsampling factor (default: 1). + padding: Padding with respect to the upsampled image. Can be a single number + or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + groups: Split input channels into N groups (default: 1). + flip_weight: False = convolution, True = correlation (default: True). + flip_filter: False = convolution, True = correlation (default: False). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + # Validate arguments. + assert isinstance(x, torch.Tensor) and (x.ndim == 4) + assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype) + assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32) + assert isinstance(up, int) and (up >= 1) + assert isinstance(down, int) and (down >= 1) + assert isinstance(groups, int) and (groups >= 1) + out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w) + fw, fh = _get_filter_size(f) + px0, px1, py0, py1 = _parse_padding(padding) + + # Adjust padding to account for up/downsampling. + if up > 1: + px0 += (fw + up - 1) // 2 + px1 += (fw - up) // 2 + py0 += (fh + up - 1) // 2 + py1 += (fh - up) // 2 + if down > 1: + px0 += (fw - down + 1) // 2 + px1 += (fw - down) // 2 + py0 += (fh - down + 1) // 2 + py1 += (fh - down) // 2 + + # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve. + if kw == 1 and kh == 1 and (down > 1 and up == 1): + x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter) + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + return x + + # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample. + if kw == 1 and kh == 1 and (up > 1 and down == 1): + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter) + return x + + # Fast path: downsampling only => use strided convolution. + if down > 1 and up == 1: + x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter) + x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight) + return x + + # Fast path: upsampling with optional downsampling => use transpose strided convolution. + if up > 1: + if groups == 1: + w = w.transpose(0, 1) + else: + w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw) + w = w.transpose(1, 2) + w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw) + px0 -= kw - 1 + px1 -= kw - up + py0 -= kh - 1 + py1 -= kh - up + pxt = max(min(-px0, -px1), 0) + pyt = max(min(-py0, -py1), 0) + x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight)) + x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter) + if down > 1: + x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) + return x + + # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d. + if up == 1 and down == 1: + if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0: + return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight) + + # Fallback: Generic reference implementation. + x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter) + x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) + if down > 1: + x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter) + return x + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.cpp b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.cpp new file mode 100644 index 0000000..4f55466 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.cpp @@ -0,0 +1,304 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include +#include +#include "filtered_lrelu.h" + +//------------------------------------------------------------------------ + +static std::tuple filtered_lrelu( + torch::Tensor x, torch::Tensor fu, torch::Tensor fd, torch::Tensor b, torch::Tensor si, + int up, int down, int px0, int px1, int py0, int py1, int sx, int sy, float gain, float slope, float clamp, bool flip_filters, bool writeSigns) +{ + // Set CUDA device. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + + // Validate arguments. + TORCH_CHECK(fu.device() == x.device() && fd.device() == x.device() && b.device() == x.device(), "all input tensors must reside on the same device"); + TORCH_CHECK(fu.dtype() == torch::kFloat && fd.dtype() == torch::kFloat, "fu and fd must be float32"); + TORCH_CHECK(b.dtype() == x.dtype(), "x and b must have the same dtype"); + TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat, "x and b must be float16 or float32"); + TORCH_CHECK(x.dim() == 4, "x must be rank 4"); + TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); + TORCH_CHECK(x.numel() > 0, "x is empty"); + TORCH_CHECK((fu.dim() == 1 || fu.dim() == 2) && (fd.dim() == 1 || fd.dim() == 2), "fu and fd must be rank 1 or 2"); + TORCH_CHECK(fu.size(0) <= INT_MAX && fu.size(-1) <= INT_MAX, "fu is too large"); + TORCH_CHECK(fd.size(0) <= INT_MAX && fd.size(-1) <= INT_MAX, "fd is too large"); + TORCH_CHECK(fu.numel() > 0, "fu is empty"); + TORCH_CHECK(fd.numel() > 0, "fd is empty"); + TORCH_CHECK(b.dim() == 1 && b.size(0) == x.size(1), "b must be a vector with the same number of channels as x"); + TORCH_CHECK(up >= 1 && down >= 1, "up and down must be at least 1"); + + // Figure out how much shared memory is available on the device. + int maxSharedBytes = 0; + AT_CUDA_CHECK(cudaDeviceGetAttribute(&maxSharedBytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, x.device().index())); + int sharedKB = maxSharedBytes >> 10; + + // Populate enough launch parameters to check if a CUDA kernel exists. + filtered_lrelu_kernel_params p; + p.up = up; + p.down = down; + p.fuShape = make_int2((int)fu.size(-1), fu.dim() == 2 ? (int)fu.size(0) : 0); // shape [n, 0] indicates separable filter. + p.fdShape = make_int2((int)fd.size(-1), fd.dim() == 2 ? (int)fd.size(0) : 0); + filtered_lrelu_kernel_spec test_spec = choose_filtered_lrelu_kernel(p, sharedKB); + if (!test_spec.exec) + { + // No kernel found - return empty tensors and indicate missing kernel with return code of -1. + return std::make_tuple(torch::Tensor(), torch::Tensor(), -1); + } + + // Input/output element size. + int64_t sz = (x.dtype() == torch::kHalf) ? 2 : 4; + + // Input sizes. + int64_t xw = (int)x.size(3); + int64_t xh = (int)x.size(2); + int64_t fut_w = (int)fu.size(-1) - 1; + int64_t fut_h = (int)fu.size(0) - 1; + int64_t fdt_w = (int)fd.size(-1) - 1; + int64_t fdt_h = (int)fd.size(0) - 1; + + // Logical size of upsampled buffer. + int64_t cw = xw * up + (px0 + px1) - fut_w; + int64_t ch = xh * up + (py0 + py1) - fut_h; + TORCH_CHECK(cw > fdt_w && ch > fdt_h, "upsampled buffer must be at least the size of downsampling filter"); + TORCH_CHECK(cw <= INT_MAX && ch <= INT_MAX, "upsampled buffer is too large"); + + // Compute output size and allocate. + int64_t yw = (cw - fdt_w + (down - 1)) / down; + int64_t yh = (ch - fdt_h + (down - 1)) / down; + TORCH_CHECK(yw > 0 && yh > 0, "output must be at least 1x1"); + TORCH_CHECK(yw <= INT_MAX && yh <= INT_MAX, "output is too large"); + torch::Tensor y = torch::empty({x.size(0), x.size(1), yh, yw}, x.options(), x.suggest_memory_format()); + + // Allocate sign tensor. + torch::Tensor so; + torch::Tensor s = si; + bool readSigns = !!s.numel(); + int64_t sw_active = 0; // Active width of sign tensor. + if (writeSigns) + { + sw_active = yw * down - (down - 1) + fdt_w; // Active width in elements. + int64_t sh = yh * down - (down - 1) + fdt_h; // Height = active height. + int64_t sw = (sw_active + 15) & ~15; // Width = active width in elements, rounded up to multiple of 16. + TORCH_CHECK(sh <= INT_MAX && (sw >> 2) <= INT_MAX, "signs is too large"); + s = so = torch::empty({x.size(0), x.size(1), sh, sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); + } + else if (readSigns) + sw_active = s.size(3) << 2; + + // Validate sign tensor if in use. + if (readSigns || writeSigns) + { + TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); + TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); + TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); + TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); + TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); + TORCH_CHECK(s.size(2) <= INT_MAX && s.size(3) <= INT_MAX, "signs is too large"); + } + + // Populate rest of CUDA kernel parameters. + p.x = x.data_ptr(); + p.y = y.data_ptr(); + p.b = b.data_ptr(); + p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; + p.fu = fu.data_ptr(); + p.fd = fd.data_ptr(); + p.pad0 = make_int2(px0, py0); + p.gain = gain; + p.slope = slope; + p.clamp = clamp; + p.flip = (flip_filters) ? 1 : 0; + p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); + p.yShape = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); + p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3), (int)s.size(2)) : make_int2(0, 0); // Width is in bytes. Contiguous. + p.sOfs = make_int2(sx, sy); + p.swLimit = (sw_active + 3) >> 2; // Rounded up to bytes. + + // x, y, b strides are in bytes. + p.xStride = make_longlong4(sz * x.stride(3), sz * x.stride(2), sz * x.stride(1), sz * x.stride(0)); + p.yStride = make_longlong4(sz * y.stride(3), sz * y.stride(2), sz * y.stride(1), sz * y.stride(0)); + p.bStride = sz * b.stride(0); + + // fu, fd strides are in elements. + p.fuStride = make_longlong3(fu.stride(-1), fu.dim() == 2 ? fu.stride(0) : 0, 0); + p.fdStride = make_longlong3(fd.stride(-1), fd.dim() == 2 ? fd.stride(0) : 0, 0); + + // Determine if indices don't fit in int32. Support negative strides although Torch currently never produces those. + bool index64b = false; + if (std::abs(p.bStride * x.size(1)) > INT_MAX) index64b = true; + if (std::min(x.size(0) * p.xStride.w, 0ll) + std::min(x.size(1) * p.xStride.z, 0ll) + std::min(x.size(2) * p.xStride.y, 0ll) + std::min(x.size(3) * p.xStride.x, 0ll) < -INT_MAX) index64b = true; + if (std::max(x.size(0) * p.xStride.w, 0ll) + std::max(x.size(1) * p.xStride.z, 0ll) + std::max(x.size(2) * p.xStride.y, 0ll) + std::max(x.size(3) * p.xStride.x, 0ll) > INT_MAX) index64b = true; + if (std::min(y.size(0) * p.yStride.w, 0ll) + std::min(y.size(1) * p.yStride.z, 0ll) + std::min(y.size(2) * p.yStride.y, 0ll) + std::min(y.size(3) * p.yStride.x, 0ll) < -INT_MAX) index64b = true; + if (std::max(y.size(0) * p.yStride.w, 0ll) + std::max(y.size(1) * p.yStride.z, 0ll) + std::max(y.size(2) * p.yStride.y, 0ll) + std::max(y.size(3) * p.yStride.x, 0ll) > INT_MAX) index64b = true; + if (s.numel() > INT_MAX) index64b = true; + + // Choose CUDA kernel. + filtered_lrelu_kernel_spec spec = { 0 }; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_cuda", [&] + { + if constexpr (sizeof(scalar_t) <= 4) // Exclude doubles. constexpr prevents template instantiation. + { + // Choose kernel based on index type, datatype and sign read/write modes. + if (!index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if (!index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if (!index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if ( index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if ( index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + else if ( index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); + } + }); + TORCH_CHECK(spec.exec, "internal error - CUDA kernel not found") // This should not happen because we tested earlier that kernel exists. + + // Launch CUDA kernel. + void* args[] = {&p}; + int bx = spec.numWarps * 32; + int gx = (p.yShape.x - 1) / spec.tileOut.x + 1; + int gy = (p.yShape.y - 1) / spec.tileOut.y + 1; + int gz = p.yShape.z * p.yShape.w; + + // Repeat multiple horizontal tiles in a CTA? + if (spec.xrep) + { + p.tilesXrep = spec.xrep; + p.tilesXdim = gx; + + gx = (gx + p.tilesXrep - 1) / p.tilesXrep; + std::swap(gx, gy); + } + else + { + p.tilesXrep = 0; + p.tilesXdim = 0; + } + + // Launch filter setup kernel. + AT_CUDA_CHECK(cudaLaunchKernel(spec.setup, 1, 1024, args, 0, at::cuda::getCurrentCUDAStream())); + + // Copy kernels to constant memory. + if ( writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); + else if (!writeSigns && readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); + else if (!writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); + + // Set cache and shared memory configurations for main kernel. + AT_CUDA_CHECK(cudaFuncSetCacheConfig(spec.exec, cudaFuncCachePreferShared)); + if (spec.dynamicSharedKB) // Need dynamically allocated shared memory? + AT_CUDA_CHECK(cudaFuncSetAttribute(spec.exec, cudaFuncAttributeMaxDynamicSharedMemorySize, spec.dynamicSharedKB << 10)); + AT_CUDA_CHECK(cudaFuncSetSharedMemConfig(spec.exec, cudaSharedMemBankSizeFourByte)); + + // Launch main kernel. + const int maxSubGz = 65535; // CUDA maximum for block z dimension. + for (int zofs=0; zofs < gz; zofs += maxSubGz) // Do multiple launches if gz is too big. + { + p.blockZofs = zofs; + int subGz = std::min(maxSubGz, gz - zofs); + AT_CUDA_CHECK(cudaLaunchKernel(spec.exec, dim3(gx, gy, subGz), bx, args, spec.dynamicSharedKB << 10, at::cuda::getCurrentCUDAStream())); + } + + // Done. + return std::make_tuple(y, so, 0); +} + +//------------------------------------------------------------------------ + +static torch::Tensor filtered_lrelu_act(torch::Tensor x, torch::Tensor si, int sx, int sy, float gain, float slope, float clamp, bool writeSigns) +{ + // Set CUDA device. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + + // Validate arguments. + TORCH_CHECK(x.dim() == 4, "x must be rank 4"); + TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); + TORCH_CHECK(x.numel() > 0, "x is empty"); + TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat || x.dtype() == torch::kDouble, "x must be float16, float32 or float64"); + + // Output signs if we don't have sign input. + torch::Tensor so; + torch::Tensor s = si; + bool readSigns = !!s.numel(); + if (writeSigns) + { + int64_t sw = x.size(3); + sw = (sw + 15) & ~15; // Round to a multiple of 16 for coalescing. + s = so = torch::empty({x.size(0), x.size(1), x.size(2), sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); + } + + // Validate sign tensor if in use. + if (readSigns || writeSigns) + { + TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); + TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); + TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); + TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); + TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); + TORCH_CHECK(s.size(2) <= INT_MAX && (s.size(3) << 2) <= INT_MAX, "signs tensor is too large"); + } + + // Initialize CUDA kernel parameters. + filtered_lrelu_act_kernel_params p; + p.x = x.data_ptr(); + p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; + p.gain = gain; + p.slope = slope; + p.clamp = clamp; + p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); + p.xStride = make_longlong4(x.stride(3), x.stride(2), x.stride(1), x.stride(0)); + p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3) << 2, (int)s.size(2)) : make_int2(0, 0); // Width is in elements. Contiguous. + p.sOfs = make_int2(sx, sy); + + // Choose CUDA kernel. + void* func = 0; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_act_cuda", [&] + { + if (writeSigns) + func = choose_filtered_lrelu_act_kernel(); + else if (readSigns) + func = choose_filtered_lrelu_act_kernel(); + else + func = choose_filtered_lrelu_act_kernel(); + }); + TORCH_CHECK(func, "internal error - CUDA kernel not found"); + + // Launch CUDA kernel. + void* args[] = {&p}; + int bx = 128; // 4 warps per block. + + // Logical size of launch = writeSigns ? p.s : p.x + uint32_t gx = writeSigns ? p.sShape.x : p.xShape.x; + uint32_t gy = writeSigns ? p.sShape.y : p.xShape.y; + uint32_t gz = p.xShape.z * p.xShape.w; // Same as in p.sShape if signs are in use. + gx = (gx - 1) / bx + 1; + + // Make sure grid y and z dimensions are within CUDA launch limits. Kernel loops internally to do the rest. + const uint32_t gmax = 65535; + gy = std::min(gy, gmax); + gz = std::min(gz, gmax); + + // Launch. + AT_CUDA_CHECK(cudaLaunchKernel(func, dim3(gx, gy, gz), bx, args, 0, at::cuda::getCurrentCUDAStream())); + return so; +} + +//------------------------------------------------------------------------ + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("filtered_lrelu", &filtered_lrelu); // The whole thing. + m.def("filtered_lrelu_act_", &filtered_lrelu_act); // Activation and sign tensor handling only. Modifies data tensor in-place. +} + +//------------------------------------------------------------------------ diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.cu b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.cu new file mode 100644 index 0000000..aaac954 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.cu @@ -0,0 +1,1288 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include "filtered_lrelu.h" +#include + +//------------------------------------------------------------------------ +// Helpers. + +enum // Filter modes. +{ + MODE_SUSD = 0, // Separable upsampling, separable downsampling. + MODE_FUSD = 1, // Full upsampling, separable downsampling. + MODE_SUFD = 2, // Separable upsampling, full downsampling. + MODE_FUFD = 3, // Full upsampling, full downsampling. +}; + +template struct InternalType; +template <> struct InternalType +{ + typedef double scalar_t; typedef double2 vec2_t; typedef double4 vec4_t; + __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_double2(0, 0); } + __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_double4(0, 0, 0, 0); } + __device__ __forceinline__ static double clamp(double x, double c) { return fmin(fmax(x, -c), c); } +}; +template <> struct InternalType +{ + typedef float scalar_t; typedef float2 vec2_t; typedef float4 vec4_t; + __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_float2(0, 0); } + __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_float4(0, 0, 0, 0); } + __device__ __forceinline__ static float clamp(float x, float c) { return fminf(fmaxf(x, -c), c); } +}; +template <> struct InternalType +{ + typedef float scalar_t; typedef float2 vec2_t; typedef float4 vec4_t; + __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_float2(0, 0); } + __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_float4(0, 0, 0, 0); } + __device__ __forceinline__ static float clamp(float x, float c) { return fminf(fmaxf(x, -c), c); } +}; + +#define MIN(A, B) ((A) < (B) ? (A) : (B)) +#define MAX(A, B) ((A) > (B) ? (A) : (B)) +#define CEIL_DIV(A, B) (((B)==1) ? (A) : \ + ((B)==2) ? ((int)((A)+1) >> 1) : \ + ((B)==4) ? ((int)((A)+3) >> 2) : \ + (((A) + ((A) > 0 ? (B) - 1 : 0)) / (B))) + +// This works only up to blocks of size 256 x 256 and for all N that are powers of two. +template __device__ __forceinline__ void fast_div_mod(int& x, int& y, unsigned int i) +{ + if ((N & (N-1)) && N <= 256) + y = (i * ((1<<24)/N + 1)) >> 24; // Assumes N <= 256, i < N*256. + else + y = i/N; + + x = i - y*N; +} + +// Type cast stride before reading it. +template __device__ __forceinline__ T get_stride(const int64_t& x) +{ + return *reinterpret_cast(&x); +} + +//------------------------------------------------------------------------ +// Filters, setup kernel, copying function. + +#define MAX_FILTER_SIZE 32 + +// Combined up/down filter buffers so that transfer can be done with one copy. +__device__ float g_fbuf[2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE]; // Filters in global memory, written by setup kernel. +__device__ __constant__ float c_fbuf[2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE]; // Filters in constant memory, read by main kernel. + +// Accessors to combined buffers to index up/down filters individually. +#define c_fu (c_fbuf) +#define c_fd (c_fbuf + MAX_FILTER_SIZE * MAX_FILTER_SIZE) +#define g_fu (g_fbuf) +#define g_fd (g_fbuf + MAX_FILTER_SIZE * MAX_FILTER_SIZE) + +// Set up filters into global memory buffer. +static __global__ void setup_filters_kernel(filtered_lrelu_kernel_params p) +{ + for (int idx = threadIdx.x; idx < MAX_FILTER_SIZE * MAX_FILTER_SIZE; idx += blockDim.x) + { + int x, y; + fast_div_mod(x, y, idx); + + int fu_x = p.flip ? x : (p.fuShape.x - 1 - x); + int fu_y = p.flip ? y : (p.fuShape.y - 1 - y); + if (p.fuShape.y > 0) + g_fu[idx] = (x >= p.fuShape.x || y >= p.fuShape.y) ? 0.0f : p.fu[fu_x * p.fuStride.x + fu_y * p.fuStride.y]; + else + g_fu[idx] = (x >= p.fuShape.x || y > 0) ? 0.0f : p.fu[fu_x * p.fuStride.x]; + + int fd_x = p.flip ? x : (p.fdShape.x - 1 - x); + int fd_y = p.flip ? y : (p.fdShape.y - 1 - y); + if (p.fdShape.y > 0) + g_fd[idx] = (x >= p.fdShape.x || y >= p.fdShape.y) ? 0.0f : p.fd[fd_x * p.fdStride.x + fd_y * p.fdStride.y]; + else + g_fd[idx] = (x >= p.fdShape.x || y > 0) ? 0.0f : p.fd[fd_x * p.fdStride.x]; + } +} + +// Host function to copy filters written by setup kernel into constant buffer for main kernel. +template static cudaError_t copy_filters(cudaStream_t stream) +{ + void* src = 0; + cudaError_t err = cudaGetSymbolAddress(&src, g_fbuf); + if (err) return err; + return cudaMemcpyToSymbolAsync(c_fbuf, src, 2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE * sizeof(float), 0, cudaMemcpyDeviceToDevice, stream); +} + +//------------------------------------------------------------------------ +// Coordinate spaces: +// - Relative to input tensor: inX, inY, tileInX, tileInY +// - Relative to input tile: relInX, relInY, tileInW, tileInH +// - Relative to upsampled tile: relUpX, relUpY, tileUpW, tileUpH +// - Relative to output tile: relOutX, relOutY, tileOutW, tileOutH +// - Relative to output tensor: outX, outY, tileOutX, tileOutY +// +// Relationships between coordinate spaces: +// - inX = tileInX + relInX +// - inY = tileInY + relInY +// - relUpX = relInX * up + phaseInX +// - relUpY = relInY * up + phaseInY +// - relUpX = relOutX * down +// - relUpY = relOutY * down +// - outX = tileOutX + relOutX +// - outY = tileOutY + relOutY + +extern __shared__ char s_buf_raw[]; // When sharedKB <= 48, allocate shared memory statically inside the kernel, otherwise use the externally allocated shared memory buffer. + +template +static __global__ void filtered_lrelu_kernel(filtered_lrelu_kernel_params p) +{ + // Check that we don't try to support non-existing filter modes. + static_assert(up == 1 || up == 2 || up == 4, "only up=1, up=2, up=4 scales supported"); + static_assert(down == 1 || down == 2 || down == 4, "only down=1, down=2, down=4 scales supported"); + static_assert(fuSize >= up, "upsampling filter size must be at least upsampling factor"); + static_assert(fdSize >= down, "downsampling filter size must be at least downsampling factor"); + static_assert(fuSize % up == 0, "upsampling filter size must be divisible with upsampling factor"); + static_assert(fdSize % down == 0, "downsampling filter size must be divisible with downsampling factor"); + static_assert(fuSize <= MAX_FILTER_SIZE && fdSize <= MAX_FILTER_SIZE, "filter size greater than MAX_FILTER_SIZE"); + static_assert(up != 1 || (fuSize == 1 && (filterMode == MODE_FUFD || filterMode == MODE_FUSD)), "up=1 supported only for 1x1 full filters"); + static_assert(down != 1 || (fdSize == 1 && (filterMode == MODE_FUFD || filterMode == MODE_SUFD)), "down=1 supported only for 1x1 full filters"); + static_assert(!(up == 4 && (filterMode == MODE_FUFD || filterMode == MODE_FUSD)), "full filters not supported for up=4"); + static_assert(!(down == 4 && (filterMode == MODE_FUFD || filterMode == MODE_SUFD)), "full filters not supported for down=4"); + + // Static definitions. + typedef typename InternalType::scalar_t scalar_t; + typedef typename InternalType::vec2_t vec2_t; + typedef typename InternalType::vec4_t vec4_t; + const int tileUpW = (tileOutW * down + (fdSize - 1) - (down - 1) + 3) & ~3; // Upsampled tile width, rounded up to multiple of 4. + const int tileUpH = tileOutH * down + (fdSize - 1) - (down - 1); // Upsampled tile height. + const int tileInW = CEIL_DIV(tileUpW + (fuSize - 1), up); // Input tile width. + const int tileInH = CEIL_DIV(tileUpH + (fuSize - 1), up); // Input tile height. + const int tileUpH_up = CEIL_DIV(tileUpH, up) * up; // Upsampled tile height rounded up to a multiple of up. + const int tileInH_up = CEIL_DIV(tileUpH_up + (fuSize - 1), up); // For allocations only, to avoid shared memory read overruns with up=2 and up=4. + + // Merge 1x1 downsampling into last upsampling step for upf1 and ups2. + const bool downInline = (down == 1) && ((up == 1 && filterMode == MODE_FUFD) || (up == 2 && filterMode == MODE_SUFD)); + + // Sizes of logical buffers. + const int szIn = tileInH_up * tileInW; + const int szUpX = tileInH_up * tileUpW; + const int szUpXY = downInline ? 0 : (tileUpH * tileUpW); + const int szDownX = tileUpH * tileOutW; + + // Sizes for shared memory arrays. + const int s_buf0_size_base = + (filterMode == MODE_SUSD) ? MAX(szIn, szUpXY) : + (filterMode == MODE_FUSD) ? MAX(szIn, szDownX) : + (filterMode == MODE_SUFD) ? MAX(szIn, szUpXY) : + (filterMode == MODE_FUFD) ? szIn : + -1; + const int s_buf1_size_base = + (filterMode == MODE_SUSD) ? MAX(szUpX, szDownX) : + (filterMode == MODE_FUSD) ? szUpXY : + (filterMode == MODE_SUFD) ? szUpX : + (filterMode == MODE_FUFD) ? szUpXY : + -1; + + // Ensure U128 alignment. + const int s_buf0_size = (s_buf0_size_base + 3) & ~3; + const int s_buf1_size = (s_buf1_size_base + 3) & ~3; + + // Check at compile time that we don't use too much shared memory. + static_assert((s_buf0_size + s_buf1_size) * sizeof(scalar_t) <= (sharedKB << 10), "shared memory overflow"); + + // Declare shared memory arrays. + scalar_t* s_buf0; + scalar_t* s_buf1; + if (sharedKB <= 48) + { + // Allocate shared memory arrays here. + __shared__ scalar_t s_buf0_st[(sharedKB > 48) ? (1<<24) : (s_buf0_size + s_buf1_size)]; // Prevent launching if this isn't optimized away when unused. + s_buf0 = s_buf0_st; + s_buf1 = s_buf0 + s_buf0_size; + } + else + { + // Use the dynamically allocated shared memory array. + s_buf0 = (scalar_t*)s_buf_raw; + s_buf1 = s_buf0 + s_buf0_size; + } + + // Pointers to the buffers. + scalar_t* s_tileIn; // Input tile: [relInX * tileInH + relInY] + scalar_t* s_tileUpX; // After horizontal upsampling: [relInY * tileUpW + relUpX] + scalar_t* s_tileUpXY; // After upsampling: [relUpY * tileUpW + relUpX] + scalar_t* s_tileDownX; // After horizontal downsampling: [relUpY * tileOutW + relOutX] + if (filterMode == MODE_SUSD) + { + s_tileIn = s_buf0; + s_tileUpX = s_buf1; + s_tileUpXY = s_buf0; + s_tileDownX = s_buf1; + } + else if (filterMode == MODE_FUSD) + { + s_tileIn = s_buf0; + s_tileUpXY = s_buf1; + s_tileDownX = s_buf0; + } + else if (filterMode == MODE_SUFD) + { + s_tileIn = s_buf0; + s_tileUpX = s_buf1; + s_tileUpXY = s_buf0; + } + else if (filterMode == MODE_FUFD) + { + s_tileIn = s_buf0; + s_tileUpXY = s_buf1; + } + + // Allow large grids in z direction via per-launch offset. + int channelIdx = blockIdx.z + p.blockZofs; + int batchIdx = channelIdx / p.yShape.z; + channelIdx -= batchIdx * p.yShape.z; + + // Offset to output feature map. In bytes. + index_t mapOfsOut = channelIdx * get_stride(p.yStride.z) + batchIdx * get_stride(p.yStride.w); + + // Sign shift amount. + uint32_t signXo = ((threadIdx.x + p.sOfs.x) << 1) & 6; + + // Inner tile loop. + #pragma unroll 1 + for (int tileIdx = 0; !enableXrep || (tileIdx < MIN(p.tilesXrep, p.tilesXdim - p.tilesXrep * blockIdx.y)); tileIdx++) + { + // Locate output tile. + int tileX = enableXrep ? blockIdx.y * p.tilesXrep + tileIdx : blockIdx.x; + int tileOutX = tileX * tileOutW; + int tileOutY = (enableXrep ? blockIdx.x : blockIdx.y) * tileOutH; + + // Locate input tile. + int tmpX = tileOutX * down - p.pad0.x; + int tmpY = tileOutY * down - p.pad0.y; + int tileInX = CEIL_DIV(tmpX, up); + int tileInY = CEIL_DIV(tmpY, up); + const int phaseInX = tileInX * up - tmpX; + const int phaseInY = tileInY * up - tmpY; + + // Extra sync if input and output buffers are the same and we are not on first tile. + if (enableXrep && tileIdx > 0 && (filterMode == MODE_FUSD || (filterMode == MODE_SUFD && !downInline) || (filterMode == MODE_FUFD && downInline))) + __syncthreads(); + + // Load input tile & apply bias. Unrolled. + scalar_t b = (scalar_t)*(const T*)((const char*)p.b + (channelIdx * get_stride(p.bStride))); + index_t mapOfsIn = channelIdx * get_stride(p.xStride.z) + batchIdx * get_stride(p.xStride.w); + int idx = threadIdx.x; + const int loopCountIN = CEIL_DIV(tileInW * tileInH, threadsPerBlock); + #pragma unroll + for (int loop = 0; loop < loopCountIN; loop++) + { + int relInX, relInY; + fast_div_mod(relInX, relInY, idx); + int inX = tileInX + relInX; + int inY = tileInY + relInY; + scalar_t v = 0; + + if ((uint32_t)inX < p.xShape.x && (uint32_t)inY < p.xShape.y) + v = (scalar_t)*((const T*)((const char*)p.x + (inX * get_stride(p.xStride.x) + inY * get_stride(p.xStride.y) + mapOfsIn))) + b; + + bool skip = (loop == loopCountIN-1) && (idx >= tileInW * tileInH); + if (!skip) + s_tileIn[idx] = v; + + idx += threadsPerBlock; + } + + if (filterMode == MODE_SUSD || filterMode == MODE_SUFD) // Separable upsampling filter. + { + // Horizontal upsampling. + __syncthreads(); + if (up == 4) + { + for (int idx = threadIdx.x*up; idx < tileUpW * tileInH; idx += blockDim.x*up) + { + int relUpX0, relInY; + fast_div_mod(relUpX0, relInY, idx); + int relInX0 = relUpX0 / up; + int src0 = relInX0 + tileInW * relInY; + int dst = relInY * tileUpW + relUpX0; + vec4_t v = InternalType::zero_vec4(); + scalar_t a = s_tileIn[src0]; + if (phaseInX == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.y += a * (scalar_t)c_fu[step * up + 3]; + v.z += a * (scalar_t)c_fu[step * up + 2]; + v.w += a * (scalar_t)c_fu[step * up + 1]; + } + } + else if (phaseInX == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.z += a * (scalar_t)c_fu[step * up + 3]; + v.w += a * (scalar_t)c_fu[step * up + 2]; + } + } + else if (phaseInX == 2) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 2]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + v.z += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.w += a * (scalar_t)c_fu[step * up + 3]; + } + } + else // (phaseInX == 3) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 3]; + v.y += a * (scalar_t)c_fu[step * up + 2]; + v.z += a * (scalar_t)c_fu[step * up + 1]; + v.w += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + } + } + s_tileUpX[dst+0] = v.x; + s_tileUpX[dst+1] = v.y; + s_tileUpX[dst+2] = v.z; + s_tileUpX[dst+3] = v.w; + } + } + else if (up == 2) + { + bool p0 = (phaseInX == 0); + for (int idx = threadIdx.x*up; idx < tileUpW * tileInH; idx += blockDim.x*up) + { + int relUpX0, relInY; + fast_div_mod(relUpX0, relInY, idx); + int relInX0 = relUpX0 / up; + int src0 = relInX0 + tileInW * relInY; + int dst = relInY * tileUpW + relUpX0; + vec2_t v = InternalType::zero_vec2(); + scalar_t a = s_tileIn[src0]; + if (p0) // (phaseInX == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + } + } + else // (phaseInX == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileIn[src0 + step + 1]; + } + } + s_tileUpX[dst+0] = v.x; + s_tileUpX[dst+1] = v.y; + } + } + + // Vertical upsampling & nonlinearity. + + __syncthreads(); + int groupMask = 15 << ((threadIdx.x & 31) & ~3); + int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH : 0; // Skip already written signs. + int sShapeMaxY = MIN(p.sShape.y, tileOutY * down + tileUpH); // Avoid out-of-tile sign writes. + if (up == 4) + { + minY -= 3; // Adjust according to block height. + for (int idx = threadIdx.x; idx < tileUpW * tileUpH_up / up; idx += blockDim.x) + { + int relUpX, relInY0; + fast_div_mod(relUpX, relInY0, idx); + int relUpY0 = relInY0 * up; + int src0 = relInY0 * tileUpW + relUpX; + int dst = relUpY0 * tileUpW + relUpX; + vec4_t v = InternalType::zero_vec4(); + + scalar_t a = s_tileUpX[src0]; + if (phaseInY == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.y += a * (scalar_t)c_fu[step * up + 3]; + v.z += a * (scalar_t)c_fu[step * up + 2]; + v.w += a * (scalar_t)c_fu[step * up + 1]; + } + } + else if (phaseInY == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.z += a * (scalar_t)c_fu[step * up + 3]; + v.w += a * (scalar_t)c_fu[step * up + 2]; + } + } + else if (phaseInY == 2) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 2]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + v.z += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.w += a * (scalar_t)c_fu[step * up + 3]; + } + } + else // (phaseInY == 3) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 3]; + v.y += a * (scalar_t)c_fu[step * up + 2]; + v.z += a * (scalar_t)c_fu[step * up + 1]; + v.w += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + } + } + + int x = tileOutX * down + relUpX; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si0 = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + index_t si1 = si0 + p.sShape.x; + index_t si2 = si0 + p.sShape.x * 2; + index_t si3 = si0 + p.sShape.x * 3; + + v.x *= (scalar_t)((float)up * (float)up * p.gain); + v.y *= (scalar_t)((float)up * (float)up * p.gain); + v.z *= (scalar_t)((float)up * (float)up * p.gain); + v.w *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write signs. + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + int sz = __float_as_uint(v.z) >> 31 << 16; + int sw = __float_as_uint(v.w) >> 31 << 24; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (sz) v.z *= p.slope; + if (sw) v.w *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + if (fabsf(v.z) > p.clamp) { sz = 2 << 16; v.z = InternalType::clamp(v.z, p.clamp); } + if (fabsf(v.w) > p.clamp) { sw = 2 << 24; v.w = InternalType::clamp(v.w, p.clamp); } + + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + // Combine signs. + uint32_t s = sx + sy + sw + sz; + s <<= (signX & 3) << 1; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + if ((uint32_t)(signY + 2) < sShapeMaxY) { p.s[si2] = (unsigned char)(s >> 16); } + if ((uint32_t)(signY + 3) < sShapeMaxY) { p.s[si3] = (unsigned char)(s >> 24); } + } + } + else + { + // Determine and write signs. + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + int sz = __float_as_uint(v.z) >> 31 << 16; + int sw = __float_as_uint(v.w) >> 31 << 24; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (sz) v.z *= p.slope; + if (sw) v.w *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + if (fabsf(v.z) > p.clamp) { sz = 2 << 16; v.z = InternalType::clamp(v.z, p.clamp); } + if (fabsf(v.w) > p.clamp) { sw = 2 << 24; v.w = InternalType::clamp(v.w, p.clamp); } + + // Combine signs. + uint32_t s = sx + sy + sw + sz; + s <<= (signX & 3) << 1; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + if ((uint32_t)(signY + 2) < sShapeMaxY) { p.s[si2] = (unsigned char)(s >> 16); } + if ((uint32_t)(signY + 3) < sShapeMaxY) { p.s[si3] = (unsigned char)(s >> 24); } + } + else + { + // Just compute the values. + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + } + } + else if (signRead) // Read signs and apply. + { + if ((uint32_t)signXb < p.swLimit) + { + int ss = (signX & 3) << 1; + if ((uint32_t)(signY + 0) < p.sShape.y) { int s = p.s[si0] >> ss; if (s & 1) v.x *= p.slope; if (s & 2) v.x = 0.f; } + if ((uint32_t)(signY + 1) < p.sShape.y) { int s = p.s[si1] >> ss; if (s & 1) v.y *= p.slope; if (s & 2) v.y = 0.f; } + if ((uint32_t)(signY + 2) < p.sShape.y) { int s = p.s[si2] >> ss; if (s & 1) v.z *= p.slope; if (s & 2) v.z = 0.f; } + if ((uint32_t)(signY + 3) < p.sShape.y) { int s = p.s[si3] >> ss; if (s & 1) v.w *= p.slope; if (s & 2) v.w = 0.f; } + } + } + else // Forward pass with no sign write. + { + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + + s_tileUpXY[dst + 0 * tileUpW] = v.x; + if (relUpY0 + 1 < tileUpH) s_tileUpXY[dst + 1 * tileUpW] = v.y; + if (relUpY0 + 2 < tileUpH) s_tileUpXY[dst + 2 * tileUpW] = v.z; + if (relUpY0 + 3 < tileUpH) s_tileUpXY[dst + 3 * tileUpW] = v.w; + } + } + else if (up == 2) + { + minY -= 1; // Adjust according to block height. + for (int idx = threadIdx.x; idx < tileUpW * tileUpH_up / up; idx += blockDim.x) + { + int relUpX, relInY0; + fast_div_mod(relUpX, relInY0, idx); + int relUpY0 = relInY0 * up; + int src0 = relInY0 * tileUpW + relUpX; + int dst = relUpY0 * tileUpW + relUpX; + vec2_t v = InternalType::zero_vec2(); + + scalar_t a = s_tileUpX[src0]; + if (phaseInY == 0) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + v.y += a * (scalar_t)c_fu[step * up + 1]; + } + } + else // (phaseInY == 1) + { + #pragma unroll + for (int step = 0; step < fuSize / up; step++) + { + v.x += a * (scalar_t)c_fu[step * up + 1]; + v.y += a * (scalar_t)c_fu[step * up + 0]; + a = s_tileUpX[src0 + (step + 1) * tileUpW]; + } + } + + int x = tileOutX * down + relUpX; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si0 = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + index_t si1 = si0 + p.sShape.x; + + v.x *= (scalar_t)((float)up * (float)up * p.gain); + v.y *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write signs. + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + // Combine signs. + int s = sx + sy; + s <<= signXo; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + } + } + else + { + // Determine and write signs. + if ((uint32_t)signXb < p.swLimit && signY >= minY) + { + int sx = __float_as_uint(v.x) >> 31 << 0; + int sy = __float_as_uint(v.y) >> 31 << 8; + if (sx) v.x *= p.slope; + if (sy) v.y *= p.slope; + if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType::clamp(v.x, p.clamp); } + if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType::clamp(v.y, p.clamp); } + + // Combine signs. + int s = sx + sy; + s <<= signXo; + s |= __shfl_xor_sync(groupMask, s, 1); + s |= __shfl_xor_sync(groupMask, s, 2); + + // Write signs. + if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } + if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } + } + else + { + // Just compute the values. + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + } + } + } + else if (signRead) // Read signs and apply. + { + if ((uint32_t)signXb < p.swLimit) + { + if ((uint32_t)(signY + 0) < p.sShape.y) { int s = p.s[si0] >> signXo; if (s & 1) v.x *= p.slope; if (s & 2) v.x = 0.f; } + if ((uint32_t)(signY + 1) < p.sShape.y) { int s = p.s[si1] >> signXo; if (s & 1) v.y *= p.slope; if (s & 2) v.y = 0.f; } + } + } + else // Forward pass with no sign write. + { + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + } + + if (!downInline) + { + // Write into temporary buffer. + s_tileUpXY[dst] = v.x; + if (relUpY0 < tileUpH - 1) + s_tileUpXY[dst + tileUpW] = v.y; + } + else + { + // Write directly into output buffer. + if ((uint32_t)x < p.yShape.x) + { + int ymax = MIN(p.yShape.y, tileUpH + tileOutY * down); + index_t ofs = x * get_stride(p.yStride.x) + y * get_stride(p.yStride.y) + mapOfsOut; + if ((uint32_t)y + 0 < p.yShape.y) *((T*)((char*)p.y + ofs)) = (T)(v.x * (scalar_t)c_fd[0]); + if ((uint32_t)y + 1 < ymax) *((T*)((char*)p.y + ofs + get_stride(p.yStride.y))) = (T)(v.y * (scalar_t)c_fd[0]); + } + } + } + } + } + else if (filterMode == MODE_FUSD || filterMode == MODE_FUFD) + { + // Full upsampling filter. + + if (up == 2) + { + // 2 x 2-wide. + __syncthreads(); + int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH + p.sOfs.y : 0; // Skip already written signs. + for (int idx = threadIdx.x * 4; idx < tileUpW * tileUpH; idx += blockDim.x * 4) + { + int relUpX0, relUpY0; + fast_div_mod(relUpX0, relUpY0, idx); + int relInX0 = CEIL_DIV(relUpX0 - phaseInX, up); + int relInY0 = CEIL_DIV(relUpY0 - phaseInY, up); + int src0 = relInX0 + tileInW * relInY0; + int tap0y = (relInY0 * up + phaseInY - relUpY0); + + #define X_LOOP(TAPY, PX) \ + for (int sx = 0; sx < fuSize / up; sx++) \ + { \ + v.x += a * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; \ + v.z += b * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; if ((PX) == 0) { a = b; b = s_tileIn[src0 + 2 + sx + sy * tileInW]; } \ + v.y += a * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; \ + v.w += b * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; if ((PX) == 1) { a = b; b = s_tileIn[src0 + 2 + sx + sy * tileInW]; } \ + } + + vec4_t v = InternalType::zero_vec4(); + if (tap0y == 0 && phaseInX == 0) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(0, 0) } + if (tap0y == 0 && phaseInX == 1) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(0, 1) } + if (tap0y == 1 && phaseInX == 0) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(1, 0) } + if (tap0y == 1 && phaseInX == 1) + #pragma unroll + for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; + #pragma unroll + X_LOOP(1, 1) } + + #undef X_LOOP + + int x = tileOutX * down + relUpX0; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + + v.x *= (scalar_t)((float)up * (float)up * p.gain); + v.y *= (scalar_t)((float)up * (float)up * p.gain); + v.z *= (scalar_t)((float)up * (float)up * p.gain); + v.w *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write signs. + int sx = __float_as_uint(v.x) >> 31; + int sy = __float_as_uint(v.y) >> 31; + int sz = __float_as_uint(v.z) >> 31; + int sw = __float_as_uint(v.w) >> 31; + if (sx) v.x *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2; v.x = InternalType::clamp(v.x, p.clamp); } + if (sy) v.y *= p.slope; if (fabsf(v.y) > p.clamp) { sy = 2; v.y = InternalType::clamp(v.y, p.clamp); } + if (sz) v.z *= p.slope; if (fabsf(v.z) > p.clamp) { sz = 2; v.z = InternalType::clamp(v.z, p.clamp); } + if (sw) v.w *= p.slope; if (fabsf(v.w) > p.clamp) { sw = 2; v.w = InternalType::clamp(v.w, p.clamp); } + + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + p.s[si] = sx + (sy << 2) + (sz << 4) + (sw << 6); + } + } + else + { + // Determine and write signs. + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + int sx = __float_as_uint(v.x) >> 31; + int sy = __float_as_uint(v.y) >> 31; + int sz = __float_as_uint(v.z) >> 31; + int sw = __float_as_uint(v.w) >> 31; + if (sx) v.x *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2; v.x = InternalType::clamp(v.x, p.clamp); } + if (sy) v.y *= p.slope; if (fabsf(v.y) > p.clamp) { sy = 2; v.y = InternalType::clamp(v.y, p.clamp); } + if (sz) v.z *= p.slope; if (fabsf(v.z) > p.clamp) { sz = 2; v.z = InternalType::clamp(v.z, p.clamp); } + if (sw) v.w *= p.slope; if (fabsf(v.w) > p.clamp) { sw = 2; v.w = InternalType::clamp(v.w, p.clamp); } + + p.s[si] = sx + (sy << 2) + (sz << 4) + (sw << 6); + } + else + { + // Just compute the values. + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + } + } + else if (signRead) // Read sign and apply. + { + if ((uint32_t)signY < p.sShape.y) + { + int s = 0; + if ((uint32_t)signXb < p.swLimit) s = p.s[si]; + if ((uint32_t)signXb + 1 < p.swLimit) s |= p.s[si + 1] << 8; + s >>= (signX & 3) << 1; + if (s & 0x01) v.x *= p.slope; if (s & 0x02) v.x = 0.f; + if (s & 0x04) v.y *= p.slope; if (s & 0x08) v.y = 0.f; + if (s & 0x10) v.z *= p.slope; if (s & 0x20) v.z = 0.f; + if (s & 0x40) v.w *= p.slope; if (s & 0x80) v.w = 0.f; + } + } + else // Forward pass with no sign write. + { + if (v.x < 0.f) v.x *= p.slope; v.x = InternalType::clamp(v.x, p.clamp); + if (v.y < 0.f) v.y *= p.slope; v.y = InternalType::clamp(v.y, p.clamp); + if (v.z < 0.f) v.z *= p.slope; v.z = InternalType::clamp(v.z, p.clamp); + if (v.w < 0.f) v.w *= p.slope; v.w = InternalType::clamp(v.w, p.clamp); + } + + s_tileUpXY[idx + 0] = v.x; + s_tileUpXY[idx + 1] = v.y; + s_tileUpXY[idx + 2] = v.z; + s_tileUpXY[idx + 3] = v.w; + } + } + else if (up == 1) + { + __syncthreads(); + uint32_t groupMask = 15 << ((threadIdx.x & 31) & ~3); + int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH : 0; // Skip already written signs. + for (int idx = threadIdx.x; idx < tileUpW * tileUpH; idx += blockDim.x) + { + int relUpX0, relUpY0; + fast_div_mod(relUpX0, relUpY0, idx); + scalar_t v = s_tileIn[idx] * (scalar_t)c_fu[0]; // 1x1 filter. + + int x = tileOutX * down + relUpX0; + int y = tileOutY * down + relUpY0; + int signX = x + p.sOfs.x; + int signY = y + p.sOfs.y; + int signZ = blockIdx.z + p.blockZofs; + int signXb = signX >> 2; + index_t si = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); + v *= (scalar_t)((float)up * (float)up * p.gain); + + if (signWrite) + { + if (!enableWriteSkip) + { + // Determine and write sign. + uint32_t s = 0; + uint32_t signXbit = (1u << signXo); + if (v < 0.f) + { + s = signXbit; + v *= p.slope; + } + if (fabsf(v) > p.clamp) + { + s = signXbit * 2; + v = InternalType::clamp(v, p.clamp); + } + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + s += __shfl_xor_sync(groupMask, s, 1); // Coalesce. + s += __shfl_xor_sync(groupMask, s, 2); // Coalesce. + p.s[si] = s; // Write. + } + } + else + { + // Determine and write sign. + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) + { + uint32_t s = 0; + uint32_t signXbit = (1u << signXo); + if (v < 0.f) + { + s = signXbit; + v *= p.slope; + } + if (fabsf(v) > p.clamp) + { + s = signXbit * 2; + v = InternalType::clamp(v, p.clamp); + } + s += __shfl_xor_sync(groupMask, s, 1); // Coalesce. + s += __shfl_xor_sync(groupMask, s, 2); // Coalesce. + p.s[si] = s; // Write. + } + else + { + // Just compute the value. + if (v < 0.f) v *= p.slope; + v = InternalType::clamp(v, p.clamp); + } + } + } + else if (signRead) + { + // Read sign and apply if within sign tensor bounds. + if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y) + { + int s = p.s[si]; + s >>= signXo; + if (s & 1) v *= p.slope; + if (s & 2) v = 0.f; + } + } + else // Forward pass with no sign write. + { + if (v < 0.f) v *= p.slope; + v = InternalType::clamp(v, p.clamp); + } + + if (!downInline) // Write into temporary buffer. + s_tileUpXY[idx] = v; + else if ((uint32_t)x < p.yShape.x && (uint32_t)y < p.yShape.y) // Write directly into output buffer + *((T*)((char*)p.y + (x * get_stride(p.yStride.x) + y * get_stride(p.yStride.y) + mapOfsOut))) = (T)(v * (scalar_t)c_fd[0]); + } + } + } + + // Downsampling. + if (filterMode == MODE_SUSD || filterMode == MODE_FUSD) + { + // Horizontal downsampling. + __syncthreads(); + if (down == 4 && tileOutW % 4 == 0) + { + // Calculate 4 pixels at a time. + for (int idx = threadIdx.x * 4; idx < tileOutW * tileUpH; idx += blockDim.x * 4) + { + int relOutX0, relUpY; + fast_div_mod(relOutX0, relUpY, idx); + int relUpX0 = relOutX0 * down; + int src0 = relUpY * tileUpW + relUpX0; + vec4_t v = InternalType::zero_vec4(); + #pragma unroll + for (int step = 0; step < fdSize; step++) + { + v.x += s_tileUpXY[src0 + 0 + step] * (scalar_t)c_fd[step]; + v.y += s_tileUpXY[src0 + 4 + step] * (scalar_t)c_fd[step]; + v.z += s_tileUpXY[src0 + 8 + step] * (scalar_t)c_fd[step]; + v.w += s_tileUpXY[src0 + 12 + step] * (scalar_t)c_fd[step]; + } + s_tileDownX[idx+0] = v.x; + s_tileDownX[idx+1] = v.y; + s_tileDownX[idx+2] = v.z; + s_tileDownX[idx+3] = v.w; + } + } + else if ((down == 2 || down == 4) && (tileOutW % 2 == 0)) + { + // Calculate 2 pixels at a time. + for (int idx = threadIdx.x * 2; idx < tileOutW * tileUpH; idx += blockDim.x * 2) + { + int relOutX0, relUpY; + fast_div_mod(relOutX0, relUpY, idx); + int relUpX0 = relOutX0 * down; + int src0 = relUpY * tileUpW + relUpX0; + vec2_t v = InternalType::zero_vec2(); + #pragma unroll + for (int step = 0; step < fdSize; step++) + { + v.x += s_tileUpXY[src0 + 0 + step] * (scalar_t)c_fd[step]; + v.y += s_tileUpXY[src0 + down + step] * (scalar_t)c_fd[step]; + } + s_tileDownX[idx+0] = v.x; + s_tileDownX[idx+1] = v.y; + } + } + else + { + // Calculate 1 pixel at a time. + for (int idx = threadIdx.x; idx < tileOutW * tileUpH; idx += blockDim.x) + { + int relOutX0, relUpY; + fast_div_mod(relOutX0, relUpY, idx); + int relUpX0 = relOutX0 * down; + int src = relUpY * tileUpW + relUpX0; + scalar_t v = 0.f; + #pragma unroll + for (int step = 0; step < fdSize; step++) + v += s_tileUpXY[src + step] * (scalar_t)c_fd[step]; + s_tileDownX[idx] = v; + } + } + + // Vertical downsampling & store output tile. + __syncthreads(); + for (int idx = threadIdx.x; idx < tileOutW * tileOutH; idx += blockDim.x) + { + int relOutX, relOutY0; + fast_div_mod(relOutX, relOutY0, idx); + int relUpY0 = relOutY0 * down; + int src0 = relUpY0 * tileOutW + relOutX; + scalar_t v = 0; + #pragma unroll + for (int step = 0; step < fdSize; step++) + v += s_tileDownX[src0 + step * tileOutW] * (scalar_t)c_fd[step]; + + int outX = tileOutX + relOutX; + int outY = tileOutY + relOutY0; + + if (outX < p.yShape.x & outY < p.yShape.y) + *((T*)((char*)p.y + (outX * get_stride(p.yStride.x) + outY * get_stride(p.yStride.y) + mapOfsOut))) = (T)v; + } + } + else if (filterMode == MODE_SUFD || filterMode == MODE_FUFD) + { + // Full downsampling filter. + if (down == 2) + { + // 2-wide. + __syncthreads(); + for (int idx = threadIdx.x * 2; idx < tileOutW * tileOutH; idx += blockDim.x * 2) + { + int relOutX0, relOutY0; + fast_div_mod(relOutX0, relOutY0, idx); + int relUpX0 = relOutX0 * down; + int relUpY0 = relOutY0 * down; + int src0 = relUpY0 * tileUpW + relUpX0; + vec2_t v = InternalType::zero_vec2(); + #pragma unroll + for (int sy = 0; sy < fdSize; sy++) + #pragma unroll + for (int sx = 0; sx < fdSize; sx++) + { + v.x += s_tileUpXY[src0 + 0 + sx + sy * tileUpW] * (scalar_t)c_fd[sx + sy * MAX_FILTER_SIZE]; + v.y += s_tileUpXY[src0 + 2 + sx + sy * tileUpW] * (scalar_t)c_fd[sx + sy * MAX_FILTER_SIZE]; + } + + int outX = tileOutX + relOutX0; + int outY = tileOutY + relOutY0; + if ((uint32_t)outY < p.yShape.y) + { + index_t ofs = outX * get_stride(p.yStride.x) + outY * get_stride(p.yStride.y) + mapOfsOut; + if (outX + 0 < p.yShape.x) *((T*)((char*)p.y + ofs)) = (T)v.x; + if (outX + 1 < p.yShape.x) *((T*)((char*)p.y + ofs + get_stride(p.yStride.x))) = (T)v.y; + } + } + } + else if (down == 1 && !downInline) + { + // Thread per pixel. + __syncthreads(); + for (int idx = threadIdx.x; idx < tileOutW * tileOutH; idx += blockDim.x) + { + int relOutX0, relOutY0; + fast_div_mod(relOutX0, relOutY0, idx); + scalar_t v = s_tileUpXY[idx] * (scalar_t)c_fd[0]; // 1x1 filter. + + int outX = tileOutX + relOutX0; + int outY = tileOutY + relOutY0; + if ((uint32_t)outX < p.yShape.x && (uint32_t)outY < p.yShape.y) + *((T*)((char*)p.y + (outX * get_stride(p.yStride.x) + outY * get_stride(p.yStride.y) + mapOfsOut))) = (T)v; + } + } + } + + if (!enableXrep) + break; + } +} + +//------------------------------------------------------------------------ +// Compute activation function and signs for upsampled data tensor, modifying data tensor in-place. Used for accelerating the generic variant. +// Sign tensor is known to be contiguous, and p.x and p.s have the same z, w dimensions. 64-bit indexing is always used. + +template +static __global__ void filtered_lrelu_act_kernel(filtered_lrelu_act_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + + // Indexing. + int32_t x = threadIdx.x + blockIdx.x * blockDim.x; + int32_t ymax = signWrite ? p.sShape.y : p.xShape.y; + int32_t qmax = p.xShape.z * p.xShape.w; // Combined minibatch*channel maximum index. + + // Loop to accommodate oversized tensors. + for (int32_t q = blockIdx.z; q < qmax; q += gridDim.z) + for (int32_t y = blockIdx.y; y < ymax; y += gridDim.y) + { + // Extract z and w (channel, minibatch index). + int32_t w = q / p.xShape.z; + int32_t z = q - w * p.xShape.z; + + // Choose behavior based on sign read/write mode. + if (signWrite) + { + // Process value if in p.x. + uint32_t s = 0; + if (x < p.xShape.x && y < p.xShape.y) + { + int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; + T* pv = ((T*)p.x) + ix; + scalar_t v = (scalar_t)(*pv); + + // Gain, LReLU, clamp. + v *= p.gain; + if (v < 0.f) + { + v *= p.slope; + s = 1; // Sign. + } + if (fabsf(v) > p.clamp) + { + v = InternalType::clamp(v, p.clamp); + s = 2; // Clamp. + } + + *pv = (T)v; // Write value. + } + + // Coalesce into threads 0 and 16 of warp. + uint32_t m = (threadIdx.x & 16) ? 0xffff0000u : 0x0000ffffu; + s <<= ((threadIdx.x & 15) << 1); // Shift into place. + s |= __shfl_xor_sync(m, s, 1); // Distribute. + s |= __shfl_xor_sync(m, s, 2); + s |= __shfl_xor_sync(m, s, 4); + s |= __shfl_xor_sync(m, s, 8); + + // Write signs if leader and in p.s. + if (!(threadIdx.x & 15) && x < p.sShape.x) // y is always in. + { + uint64_t is = x + p.sShape.x * (y + (int64_t)p.sShape.y * q); // Contiguous. + ((uint32_t*)p.s)[is >> 4] = s; + } + } + else if (signRead) + { + // Process value if in p.x. + if (x < p.xShape.x) // y is always in. + { + int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; + T* pv = ((T*)p.x) + ix; + scalar_t v = (scalar_t)(*pv); + v *= p.gain; + + // Apply sign buffer offset. + uint32_t sx = x + p.sOfs.x; + uint32_t sy = y + p.sOfs.y; + + // Read and apply signs if we land inside valid region of sign buffer. + if (sx < p.sShape.x && sy < p.sShape.y) + { + uint64_t is = (sx >> 2) + (p.sShape.x >> 2) * (sy + (uint64_t)p.sShape.y * q); // Contiguous. + unsigned char s = p.s[is]; + s >>= (sx & 3) << 1; // Shift into place. + if (s & 1) // Sign? + v *= p.slope; + if (s & 2) // Clamp? + v = 0.f; + } + + *pv = (T)v; // Write value. + } + } + else + { + // Forward pass with no sign write. Process value if in p.x. + if (x < p.xShape.x) // y is always in. + { + int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; + T* pv = ((T*)p.x) + ix; + scalar_t v = (scalar_t)(*pv); + v *= p.gain; + if (v < 0.f) + v *= p.slope; + if (fabsf(v) > p.clamp) + v = InternalType::clamp(v, p.clamp); + *pv = (T)v; // Write value. + } + } + } +} + +template void* choose_filtered_lrelu_act_kernel(void) +{ + return (void*)filtered_lrelu_act_kernel; +} + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB) +{ + filtered_lrelu_kernel_spec s = { 0 }; + + // Return the first matching kernel. +#define CASE(SH, U, FU, D, FD, MODE, TW, TH, W, XR, WS) \ + if (sharedKB >= SH) \ + if ((p.fuShape.y == 0 && (MODE == MODE_SUSD || MODE == MODE_SUFD)) || (p.fuShape.y > 0 && (MODE == MODE_FUSD || MODE == MODE_FUFD))) \ + if ((p.fdShape.y == 0 && (MODE == MODE_SUSD || MODE == MODE_FUSD)) || (p.fdShape.y > 0 && (MODE == MODE_SUFD || MODE == MODE_FUFD))) \ + if (p.up == U && p.fuShape.x <= FU && p.fuShape.y <= FU && p.down == D && p.fdShape.x <= FD && p.fdShape.y <= FD) \ + { \ + static_assert((D*TW % 4) == 0, "down * tileWidth must be divisible by 4"); \ + static_assert(FU % U == 0, "upscaling filter size must be multiple of upscaling factor"); \ + static_assert(FD % D == 0, "downscaling filter size must be multiple of downscaling factor"); \ + s.setup = (void*)setup_filters_kernel; \ + s.exec = (void*)filtered_lrelu_kernel; \ + s.tileOut = make_int2(TW, TH); \ + s.numWarps = W; \ + s.xrep = XR; \ + s.dynamicSharedKB = (SH == 48) ? 0 : SH; \ + return s; \ + } + + // Launch parameters for various kernel specializations. + // Small filters must be listed before large filters, otherwise the kernel for larger filter will always match first. + // Kernels that use more shared memory must be listed before those that use less, for the same reason. + + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/1,1, /*mode*/MODE_FUFD, /*tw,th,warps,xrep,wskip*/64, 178, 32, 0, 0) // 1t-upf1-downf1 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/152, 95, 16, 0, 0) // 4t-ups2-downf1 + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,8, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/56, 22, 16, 0, 0) // 4t-upf1-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/56, 29, 16, 11, 0) // 4t-ups2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/60, 28, 16, 0, 0) // 4t-upf2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/56, 28, 16, 0, 0) // 4t-ups2-downf2 + CASE(/*sharedKB*/48, /*up,fu*/4,16, /*down,fd*/2,8, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/56, 31, 16, 11, 0) // 4t-ups4-downs2 + CASE(/*sharedKB*/48, /*up,fu*/4,16, /*down,fd*/2,8, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/56, 36, 16, 0, 0) // 4t-ups4-downf2 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/4,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 22, 16, 12, 0) // 4t-ups2-downs4 + CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/4,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/29, 15, 16, 0, 0) // 4t-upf2-downs4 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/96, 150, 28, 0, 0) // 6t-ups2-downf1 + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,12, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/32, 35, 24, 0, 0) // 6t-upf1-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 46, 16, 10, 0) // 6t-ups2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/58, 28, 24, 8, 0) // 6t-upf2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/52, 28, 16, 0, 0) // 6t-ups2-downf2 + CASE(/*sharedKB*/48, /*up,fu*/4,24, /*down,fd*/2,12, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 51, 16, 5, 0) // 6t-ups4-downs2 + CASE(/*sharedKB*/48, /*up,fu*/4,24, /*down,fd*/2,12, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 56, 16, 6, 0) // 6t-ups4-downf2 + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 18, 16, 12, 0) // 6t-ups2-downs4 + CASE(/*sharedKB*/96, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/27, 31, 32, 6, 0) // 6t-upf2-downs4 96kB + CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/27, 13, 24, 0, 0) // 6t-upf2-downs4 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/148, 89, 24, 0, 0) // 8t-ups2-downf1 + CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/32, 31, 16, 5, 0) // 8t-upf1-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 41, 16, 9, 0) // 8t-ups2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/56, 26, 24, 0, 0) // 8t-upf2-downs2 + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 40, 16, 0, 0) // 8t-ups2-downf2 + CASE(/*sharedKB*/48, /*up,fu*/4,32, /*down,fd*/2,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 46, 24, 5, 0) // 8t-ups4-downs2 + CASE(/*sharedKB*/48, /*up,fu*/4,32, /*down,fd*/2,16, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 50, 16, 0, 0) // 8t-ups4-downf2 + CASE(/*sharedKB*/96, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/24, 24, 32, 12, 1) // 8t-ups2-downs4 96kB + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 13, 16, 10, 1) // 8t-ups2-downs4 + CASE(/*sharedKB*/96, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/25, 28, 28, 4, 0) // 8t-upf2-downs4 96kB + CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/25, 10, 24, 0, 0) // 8t-upf2-downs4 + + #undef CASE + return s; // No kernel found. +} + +//------------------------------------------------------------------------ diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.h b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.h new file mode 100644 index 0000000..f2bfd1d --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include + +//------------------------------------------------------------------------ +// CUDA kernel parameters. + +struct filtered_lrelu_kernel_params +{ + // These parameters decide which kernel to use. + int up; // upsampling ratio (1, 2, 4) + int down; // downsampling ratio (1, 2, 4) + int2 fuShape; // [size, 1] | [size, size] + int2 fdShape; // [size, 1] | [size, size] + + int _dummy; // Alignment. + + // Rest of the parameters. + const void* x; // Input tensor. + void* y; // Output tensor. + const void* b; // Bias tensor. + unsigned char* s; // Sign tensor in/out. NULL if unused. + const float* fu; // Upsampling filter. + const float* fd; // Downsampling filter. + + int2 pad0; // Left/top padding. + float gain; // Additional gain factor. + float slope; // Leaky ReLU slope on negative side. + float clamp; // Clamp after nonlinearity. + int flip; // Filter kernel flip for gradient computation. + + int tilesXdim; // Original number of horizontal output tiles. + int tilesXrep; // Number of horizontal tiles per CTA. + int blockZofs; // Block z offset to support large minibatch, channel dimensions. + + int4 xShape; // [width, height, channel, batch] + int4 yShape; // [width, height, channel, batch] + int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused. + int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. + int swLimit; // Active width of sign tensor in bytes. + + longlong4 xStride; // Strides of all tensors except signs, same component order as shapes. + longlong4 yStride; // + int64_t bStride; // + longlong3 fuStride; // + longlong3 fdStride; // +}; + +struct filtered_lrelu_act_kernel_params +{ + void* x; // Input/output, modified in-place. + unsigned char* s; // Sign tensor in/out. NULL if unused. + + float gain; // Additional gain factor. + float slope; // Leaky ReLU slope on negative side. + float clamp; // Clamp after nonlinearity. + + int4 xShape; // [width, height, channel, batch] + longlong4 xStride; // Input/output tensor strides, same order as in shape. + int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused. + int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. +}; + +//------------------------------------------------------------------------ +// CUDA kernel specialization. + +struct filtered_lrelu_kernel_spec +{ + void* setup; // Function for filter kernel setup. + void* exec; // Function for main operation. + int2 tileOut; // Width/height of launch tile. + int numWarps; // Number of warps per thread block, determines launch block size. + int xrep; // For processing multiple horizontal tiles per thread block. + int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants. +}; + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template void* choose_filtered_lrelu_act_kernel(void); +template cudaError_t copy_filters(cudaStream_t stream); + +//------------------------------------------------------------------------ diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.py new file mode 100644 index 0000000..2047b7e --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu.py @@ -0,0 +1,276 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import os +import numpy as np +import torch +import warnings + +from .. import custom_ops +from .. import misc +from . import upfirdn2d +from . import bias_act + +#---------------------------------------------------------------------------- + +_plugin = None + +def _init(): + global _plugin + if _plugin is None: + _plugin = custom_ops.get_plugin( + module_name='filtered_lrelu_plugin', + sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'], + headers=['filtered_lrelu.h', 'filtered_lrelu.cu'], + source_dir=os.path.dirname(__file__), + extra_cuda_cflags=['--use_fast_math'], + ) + return True + +def _get_filter_size(f): + if f is None: + return 1, 1 + assert isinstance(f, torch.Tensor) + assert 1 <= f.ndim <= 2 + return f.shape[-1], f.shape[0] # width, height + +def _parse_padding(padding): + if isinstance(padding, int): + padding = [padding, padding] + assert isinstance(padding, (list, tuple)) + assert all(isinstance(x, (int, np.integer)) for x in padding) + padding = [int(x) for x in padding] + if len(padding) == 2: + px, py = padding + padding = [px, px, py, py] + px0, px1, py0, py1 = padding + return px0, px1, py0, py1 + +#---------------------------------------------------------------------------- + +def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'): + r"""Filtered leaky ReLU for a batch of 2D images. + + Performs the following sequence of operations for each channel: + + 1. Add channel-specific bias if provided (`b`). + + 2. Upsample the image by inserting N-1 zeros after each pixel (`up`). + + 3. Pad the image with the specified number of zeros on each side (`padding`). + Negative padding corresponds to cropping the image. + + 4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it + so that the footprint of all output pixels lies within the input image. + + 5. Multiply each value by the provided gain factor (`gain`). + + 6. Apply leaky ReLU activation function to each value. + + 7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided. + + 8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking + it so that the footprint of all output pixels lies within the input image. + + 9. Downsample the image by keeping every Nth pixel (`down`). + + The fused op is considerably more efficient than performing the same calculation + using standard PyTorch ops. It supports gradients of arbitrary order. + + Args: + x: Float32/float16/float64 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + fu: Float32 upsampling FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + fd: Float32 downsampling FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type + as `x`. The length of vector must must match the channel dimension of `x`. + up: Integer upsampling factor (default: 1). + down: Integer downsampling factor. (default: 1). + padding: Padding with respect to the upsampled image. Can be a single number + or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + gain: Overall scaling factor for signal magnitude (default: sqrt(2)). + slope: Slope on the negative side of leaky ReLU (default: 0.2). + clamp: Maximum magnitude for leaky ReLU output (default: None). + flip_filter: False = convolution, True = correlation (default: False). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + assert isinstance(x, torch.Tensor) + assert impl in ['ref', 'cuda'] + if impl == 'cuda' and x.device.type == 'cuda' and _init(): + return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0) + return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False): + """Slow and memory-inefficient reference implementation of `filtered_lrelu()` using + existing `upfirdn2n()` and `bias_act()` ops. + """ + assert isinstance(x, torch.Tensor) and x.ndim == 4 + fu_w, fu_h = _get_filter_size(fu) + fd_w, fd_h = _get_filter_size(fd) + if b is not None: + assert isinstance(b, torch.Tensor) and b.dtype == x.dtype + misc.assert_shape(b, [x.shape[1]]) + assert isinstance(up, int) and up >= 1 + assert isinstance(down, int) and down >= 1 + px0, px1, py0, py1 = _parse_padding(padding) + assert gain == float(gain) and gain > 0 + assert slope == float(slope) and slope >= 0 + assert clamp is None or (clamp == float(clamp) and clamp >= 0) + + # Calculate output size. + batch_size, channels, in_h, in_w = x.shape + in_dtype = x.dtype + out_w = (in_w * up + (px0 + px1) - (fu_w - 1) - (fd_w - 1) + (down - 1)) // down + out_h = (in_h * up + (py0 + py1) - (fu_h - 1) - (fd_h - 1) + (down - 1)) // down + + # Compute using existing ops. + x = bias_act.bias_act(x=x, b=b) # Apply bias. + x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample. + x = bias_act.bias_act(x=x, act='lrelu', alpha=slope, gain=gain, clamp=clamp) # Bias, leaky ReLU, clamp. + x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter) # Downsample. + + # Check output shape & dtype. + misc.assert_shape(x, [batch_size, channels, out_h, out_w]) + assert x.dtype == in_dtype + return x + +#---------------------------------------------------------------------------- + +_filtered_lrelu_cuda_cache = dict() + +def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False): + """Fast CUDA implementation of `filtered_lrelu()` using custom ops. + """ + assert isinstance(up, int) and up >= 1 + assert isinstance(down, int) and down >= 1 + px0, px1, py0, py1 = _parse_padding(padding) + assert gain == float(gain) and gain > 0 + gain = float(gain) + assert slope == float(slope) and slope >= 0 + slope = float(slope) + assert clamp is None or (clamp == float(clamp) and clamp >= 0) + clamp = float(clamp if clamp is not None else 'inf') + + # Lookup from cache. + key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter) + if key in _filtered_lrelu_cuda_cache: + return _filtered_lrelu_cuda_cache[key] + + # Forward op. + class FilteredLReluCuda(torch.autograd.Function): + @staticmethod + def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ + assert isinstance(x, torch.Tensor) and x.ndim == 4 + + # Replace empty up/downsample kernels with full 1x1 kernels (faster than separable). + if fu is None: + fu = torch.ones([1, 1], dtype=torch.float32, device=x.device) + if fd is None: + fd = torch.ones([1, 1], dtype=torch.float32, device=x.device) + assert 1 <= fu.ndim <= 2 + assert 1 <= fd.ndim <= 2 + + # Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1. + if up == 1 and fu.ndim == 1 and fu.shape[0] == 1: + fu = fu.square()[None] + if down == 1 and fd.ndim == 1 and fd.shape[0] == 1: + fd = fd.square()[None] + + # Missing sign input tensor. + if si is None: + si = torch.empty([0]) + + # Missing bias tensor. + if b is None: + b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device) + + # Construct internal sign tensor only if gradients are needed. + write_signs = (si.numel() == 0) and (x.requires_grad or b.requires_grad) + + # Warn if input storage strides are not in decreasing order due to e.g. channels-last layout. + strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1] + if any(a < b for a, b in zip(strides[:-1], strides[1:])): + warnings.warn("low-performance memory layout detected in filtered_lrelu input", RuntimeWarning) + + # Call C++/Cuda plugin if datatype is supported. + if x.dtype in [torch.float16, torch.float32]: + if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device): + warnings.warn("filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning) + y, so, return_code = _plugin.filtered_lrelu(x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs) + else: + return_code = -1 + + # No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because + # only the bit-packed sign tensor is retained for gradient computation. + if return_code < 0: + warnings.warn("filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning) + + y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias. + y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample. + so = _plugin.filtered_lrelu_act_(y, si, sx, sy, gain, slope, clamp, write_signs) # Activation function and sign handling. Modifies y in-place. + y = upfirdn2d.upfirdn2d(x=y, f=fd, down=down, flip_filter=flip_filter) # Downsample. + + # Prepare for gradient computation. + ctx.save_for_backward(fu, fd, (si if si.numel() else so)) + ctx.x_shape = x.shape + ctx.y_shape = y.shape + ctx.s_ofs = sx, sy + return y + + @staticmethod + def backward(ctx, dy): # pylint: disable=arguments-differ + fu, fd, si = ctx.saved_tensors + _, _, xh, xw = ctx.x_shape + _, _, yh, yw = ctx.y_shape + sx, sy = ctx.s_ofs + dx = None # 0 + dfu = None; assert not ctx.needs_input_grad[1] + dfd = None; assert not ctx.needs_input_grad[2] + db = None # 3 + dsi = None; assert not ctx.needs_input_grad[4] + dsx = None; assert not ctx.needs_input_grad[5] + dsy = None; assert not ctx.needs_input_grad[6] + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]: + pp = [ + (fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0, + xw * up - yw * down + px0 - (up - 1), + (fu.shape[0] - 1) + (fd.shape[0] - 1) - py0, + xh * up - yh * down + py0 - (up - 1), + ] + gg = gain * (up ** 2) / (down ** 2) + ff = (not flip_filter) + sx = sx - (fu.shape[-1] - 1) + px0 + sy = sy - (fu.shape[0] - 1) + py0 + dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope, clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy) + + if ctx.needs_input_grad[3]: + db = dx.sum([0, 2, 3]) + + return dx, dfu, dfd, db, dsi, dsx, dsy + + # Add to cache. + _filtered_lrelu_cuda_cache[key] = FilteredLReluCuda + return FilteredLReluCuda + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu_ns.cu b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu_ns.cu new file mode 100644 index 0000000..8a3eae4 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu_ns.cu @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include "filtered_lrelu.cu" + +// Template/kernel specializations for no signs mode (no gradients required). + +// Full op, 32-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Full op, 64-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Activation/signs only for generic variant. 64-bit indexing. +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); + +// Copy filters to constant memory. +template cudaError_t copy_filters(cudaStream_t stream); diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu_rd.cu b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu_rd.cu new file mode 100644 index 0000000..3cd43ec --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu_rd.cu @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include "filtered_lrelu.cu" + +// Template/kernel specializations for sign read mode. + +// Full op, 32-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Full op, 64-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Activation/signs only for generic variant. 64-bit indexing. +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); + +// Copy filters to constant memory. +template cudaError_t copy_filters(cudaStream_t stream); diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu_wr.cu b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu_wr.cu new file mode 100644 index 0000000..bc2fa06 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/filtered_lrelu_wr.cu @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include "filtered_lrelu.cu" + +// Template/kernel specializations for sign write mode. + +// Full op, 32-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Full op, 64-bit indexing. +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); +template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); + +// Activation/signs only for generic variant. 64-bit indexing. +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); +template void* choose_filtered_lrelu_act_kernel(void); + +// Copy filters to constant memory. +template cudaError_t copy_filters(cudaStream_t stream); diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/fma.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/fma.py new file mode 100644 index 0000000..5458116 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/fma.py @@ -0,0 +1,62 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`.""" + +import torch + +#---------------------------------------------------------------------------- + +def fma(a, b, c): # => a * b + c + return _FusedMultiplyAdd.apply(a, b, c) + +#---------------------------------------------------------------------------- + +class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c + @staticmethod + def forward(ctx, a, b, c): # pylint: disable=arguments-differ + out = torch.addcmul(c, a, b) + ctx.save_for_backward(a, b) + ctx.c_shape = c.shape + return out + + @staticmethod + def backward(ctx, dout): # pylint: disable=arguments-differ + a, b = ctx.saved_tensors + c_shape = ctx.c_shape + da = None + db = None + dc = None + + if ctx.needs_input_grad[0]: + da = _unbroadcast(dout * b, a.shape) + + if ctx.needs_input_grad[1]: + db = _unbroadcast(dout * a, b.shape) + + if ctx.needs_input_grad[2]: + dc = _unbroadcast(dout, c_shape) + + return da, db, dc + +#---------------------------------------------------------------------------- + +def _unbroadcast(x, shape): + extra_dims = x.ndim - len(shape) + assert extra_dims >= 0 + dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)] + if len(dim): + x = x.sum(dim=dim, keepdim=True) + if extra_dims: + x = x.reshape(-1, *x.shape[extra_dims+1:]) + assert x.shape == shape + return x + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/grid_sample_gradfix.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/grid_sample_gradfix.py new file mode 100644 index 0000000..35d9472 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/grid_sample_gradfix.py @@ -0,0 +1,79 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Custom replacement for `torch.nn.functional.grid_sample` that +supports arbitrarily high order gradients between the input and output. +Only works on 2D images and assumes +`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`.""" + +import torch + +# pylint: disable=redefined-builtin +# pylint: disable=arguments-differ +# pylint: disable=protected-access + +#---------------------------------------------------------------------------- + +enabled = False # Enable the custom op by setting this to true. + +#---------------------------------------------------------------------------- + +def grid_sample(input, grid): + if _should_use_custom_op(): + return _GridSample2dForward.apply(input, grid) + return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) + +#---------------------------------------------------------------------------- + +def _should_use_custom_op(): + return enabled + +#---------------------------------------------------------------------------- + +class _GridSample2dForward(torch.autograd.Function): + @staticmethod + def forward(ctx, input, grid): + assert input.ndim == 4 + assert grid.ndim == 4 + output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) + ctx.save_for_backward(input, grid) + return output + + @staticmethod + def backward(ctx, grad_output): + input, grid = ctx.saved_tensors + grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid) + return grad_input, grad_grid + +#---------------------------------------------------------------------------- + +class _GridSample2dBackward(torch.autograd.Function): + @staticmethod + def forward(ctx, grad_output, input, grid): + op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward') + grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) + ctx.save_for_backward(grid) + return grad_input, grad_grid + + @staticmethod + def backward(ctx, grad2_grad_input, grad2_grad_grid): + _ = grad2_grad_grid # unused + grid, = ctx.saved_tensors + grad2_grad_output = None + grad2_input = None + grad2_grid = None + + if ctx.needs_input_grad[0]: + grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid) + + assert not ctx.needs_input_grad[2] + return grad2_grad_output, grad2_input, grad2_grid + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.cpp b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.cpp new file mode 100644 index 0000000..c1769c3 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.cpp @@ -0,0 +1,111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include +#include +#include "upfirdn2d.h" + +//------------------------------------------------------------------------ + +static torch::Tensor upfirdn2d(torch::Tensor x, torch::Tensor f, int upx, int upy, int downx, int downy, int padx0, int padx1, int pady0, int pady1, bool flip, float gain) +{ + // Validate arguments. + TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); + TORCH_CHECK(f.device() == x.device(), "f must reside on the same device as x"); + TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32"); + TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); + TORCH_CHECK(f.numel() <= INT_MAX, "f is too large"); + TORCH_CHECK(x.numel() > 0, "x has zero size"); + TORCH_CHECK(f.numel() > 0, "f has zero size"); + TORCH_CHECK(x.dim() == 4, "x must be rank 4"); + TORCH_CHECK(f.dim() == 2, "f must be rank 2"); + TORCH_CHECK((x.size(0)-1)*x.stride(0) + (x.size(1)-1)*x.stride(1) + (x.size(2)-1)*x.stride(2) + (x.size(3)-1)*x.stride(3) <= INT_MAX, "x memory footprint is too large"); + TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1"); + TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1"); + TORCH_CHECK(downx >= 1 && downy >= 1, "downsampling factor must be at least 1"); + + // Create output tensor. + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); + int outW = ((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx; + int outH = ((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy; + TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1"); + torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW}, x.options(), x.suggest_memory_format()); + TORCH_CHECK(y.numel() <= INT_MAX, "output is too large"); + TORCH_CHECK((y.size(0)-1)*y.stride(0) + (y.size(1)-1)*y.stride(1) + (y.size(2)-1)*y.stride(2) + (y.size(3)-1)*y.stride(3) <= INT_MAX, "output memory footprint is too large"); + + // Initialize CUDA kernel parameters. + upfirdn2d_kernel_params p; + p.x = x.data_ptr(); + p.f = f.data_ptr(); + p.y = y.data_ptr(); + p.up = make_int2(upx, upy); + p.down = make_int2(downx, downy); + p.pad0 = make_int2(padx0, pady0); + p.flip = (flip) ? 1 : 0; + p.gain = gain; + p.inSize = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); + p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1), (int)x.stride(0)); + p.filterSize = make_int2((int)f.size(1), (int)f.size(0)); + p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0)); + p.outSize = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); + p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1), (int)y.stride(0)); + p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z; + p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1; + + // Choose CUDA kernel. + upfirdn2d_kernel_spec spec; + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] + { + spec = choose_upfirdn2d_kernel(p); + }); + + // Set looping options. + p.loopMajor = (p.sizeMajor - 1) / 16384 + 1; + p.loopMinor = spec.loopMinor; + p.loopX = spec.loopX; + p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1; + p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1; + + // Compute grid size. + dim3 blockSize, gridSize; + if (spec.tileOutW < 0) // large + { + blockSize = dim3(4, 32, 1); + gridSize = dim3( + ((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor, + (p.outSize.x - 1) / (blockSize.y * p.loopX) + 1, + p.launchMajor); + } + else // small + { + blockSize = dim3(256, 1, 1); + gridSize = dim3( + ((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor, + (p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1, + p.launchMajor); + } + + // Launch CUDA kernel. + void* args[] = {&p}; + AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); + return y; +} + +//------------------------------------------------------------------------ + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("upfirdn2d", &upfirdn2d); +} + +//------------------------------------------------------------------------ diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.cu b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.cu new file mode 100644 index 0000000..7d182d7 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.cu @@ -0,0 +1,388 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include +#include "upfirdn2d.h" + +//------------------------------------------------------------------------ +// Helpers. + +template struct InternalType; +template <> struct InternalType { typedef double scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; +template <> struct InternalType { typedef float scalar_t; }; + +static __device__ __forceinline__ int floor_div(int a, int b) +{ + int t = 1 - a / b; + return (a + t * b) / b - t; +} + +//------------------------------------------------------------------------ +// Generic CUDA implementation for large filters. + +template static __global__ void upfirdn2d_kernel_large(upfirdn2d_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + + // Calculate thread index. + int minorBase = blockIdx.x * blockDim.x + threadIdx.x; + int outY = minorBase / p.launchMinor; + minorBase -= outY * p.launchMinor; + int outXBase = blockIdx.y * p.loopX * blockDim.y + threadIdx.y; + int majorBase = blockIdx.z * p.loopMajor; + if (outXBase >= p.outSize.x | outY >= p.outSize.y | majorBase >= p.sizeMajor) + return; + + // Setup Y receptive field. + int midY = outY * p.down.y + p.up.y - 1 - p.pad0.y; + int inY = min(max(floor_div(midY, p.up.y), 0), p.inSize.y); + int h = min(max(floor_div(midY + p.filterSize.y, p.up.y), 0), p.inSize.y) - inY; + int filterY = midY + p.filterSize.y - (inY + 1) * p.up.y; + if (p.flip) + filterY = p.filterSize.y - 1 - filterY; + + // Loop over major, minor, and X. + for (int majorIdx = 0, major = majorBase; majorIdx < p.loopMajor & major < p.sizeMajor; majorIdx++, major++) + for (int minorIdx = 0, minor = minorBase; minorIdx < p.loopMinor & minor < p.sizeMinor; minorIdx++, minor += p.launchMinor) + { + int nc = major * p.sizeMinor + minor; + int n = nc / p.inSize.z; + int c = nc - n * p.inSize.z; + for (int loopX = 0, outX = outXBase; loopX < p.loopX & outX < p.outSize.x; loopX++, outX += blockDim.y) + { + // Setup X receptive field. + int midX = outX * p.down.x + p.up.x - 1 - p.pad0.x; + int inX = min(max(floor_div(midX, p.up.x), 0), p.inSize.x); + int w = min(max(floor_div(midX + p.filterSize.x, p.up.x), 0), p.inSize.x) - inX; + int filterX = midX + p.filterSize.x - (inX + 1) * p.up.x; + if (p.flip) + filterX = p.filterSize.x - 1 - filterX; + + // Initialize pointers. + const T* xp = &((const T*)p.x)[inX * p.inStride.x + inY * p.inStride.y + c * p.inStride.z + n * p.inStride.w]; + const float* fp = &p.f[filterX * p.filterStride.x + filterY * p.filterStride.y]; + int filterStepX = ((p.flip) ? p.up.x : -p.up.x) * p.filterStride.x; + int filterStepY = ((p.flip) ? p.up.y : -p.up.y) * p.filterStride.y; + + // Inner loop. + scalar_t v = 0; + for (int y = 0; y < h; y++) + { + for (int x = 0; x < w; x++) + { + v += (scalar_t)(*xp) * (scalar_t)(*fp); + xp += p.inStride.x; + fp += filterStepX; + } + xp += p.inStride.y - w * p.inStride.x; + fp += filterStepY - w * filterStepX; + } + + // Store result. + v *= p.gain; + ((T*)p.y)[outX * p.outStride.x + outY * p.outStride.y + c * p.outStride.z + n * p.outStride.w] = (T)v; + } + } +} + +//------------------------------------------------------------------------ +// Specialized CUDA implementation for small filters. + +template +static __global__ void upfirdn2d_kernel_small(upfirdn2d_kernel_params p) +{ + typedef typename InternalType::scalar_t scalar_t; + const int tileInW = ((tileOutW - 1) * downx + filterW - 1) / upx + 1; + const int tileInH = ((tileOutH - 1) * downy + filterH - 1) / upy + 1; + __shared__ volatile scalar_t sf[filterH][filterW]; + __shared__ volatile scalar_t sx[tileInH][tileInW][loopMinor]; + + // Calculate tile index. + int minorBase = blockIdx.x; + int tileOutY = minorBase / p.launchMinor; + minorBase -= tileOutY * p.launchMinor; + minorBase *= loopMinor; + tileOutY *= tileOutH; + int tileOutXBase = blockIdx.y * p.loopX * tileOutW; + int majorBase = blockIdx.z * p.loopMajor; + if (tileOutXBase >= p.outSize.x | tileOutY >= p.outSize.y | majorBase >= p.sizeMajor) + return; + + // Load filter (flipped). + for (int tapIdx = threadIdx.x; tapIdx < filterH * filterW; tapIdx += blockDim.x) + { + int fy = tapIdx / filterW; + int fx = tapIdx - fy * filterW; + scalar_t v = 0; + if (fx < p.filterSize.x & fy < p.filterSize.y) + { + int ffx = (p.flip) ? fx : p.filterSize.x - 1 - fx; + int ffy = (p.flip) ? fy : p.filterSize.y - 1 - fy; + v = (scalar_t)p.f[ffx * p.filterStride.x + ffy * p.filterStride.y]; + } + sf[fy][fx] = v; + } + + // Loop over major and X. + for (int majorIdx = 0, major = majorBase; majorIdx < p.loopMajor & major < p.sizeMajor; majorIdx++, major++) + { + int baseNC = major * p.sizeMinor + minorBase; + int n = baseNC / p.inSize.z; + int baseC = baseNC - n * p.inSize.z; + for (int loopX = 0, tileOutX = tileOutXBase; loopX < p.loopX & tileOutX < p.outSize.x; loopX++, tileOutX += tileOutW) + { + // Load input pixels. + int tileMidX = tileOutX * downx + upx - 1 - p.pad0.x; + int tileMidY = tileOutY * downy + upy - 1 - p.pad0.y; + int tileInX = floor_div(tileMidX, upx); + int tileInY = floor_div(tileMidY, upy); + __syncthreads(); + for (int inIdx = threadIdx.x; inIdx < tileInH * tileInW * loopMinor; inIdx += blockDim.x) + { + int relC = inIdx; + int relInX = relC / loopMinor; + int relInY = relInX / tileInW; + relC -= relInX * loopMinor; + relInX -= relInY * tileInW; + int c = baseC + relC; + int inX = tileInX + relInX; + int inY = tileInY + relInY; + scalar_t v = 0; + if (inX >= 0 & inY >= 0 & inX < p.inSize.x & inY < p.inSize.y & c < p.inSize.z) + v = (scalar_t)((const T*)p.x)[inX * p.inStride.x + inY * p.inStride.y + c * p.inStride.z + n * p.inStride.w]; + sx[relInY][relInX][relC] = v; + } + + // Loop over output pixels. + __syncthreads(); + for (int outIdx = threadIdx.x; outIdx < tileOutH * tileOutW * loopMinor; outIdx += blockDim.x) + { + int relC = outIdx; + int relOutX = relC / loopMinor; + int relOutY = relOutX / tileOutW; + relC -= relOutX * loopMinor; + relOutX -= relOutY * tileOutW; + int c = baseC + relC; + int outX = tileOutX + relOutX; + int outY = tileOutY + relOutY; + + // Setup receptive field. + int midX = tileMidX + relOutX * downx; + int midY = tileMidY + relOutY * downy; + int inX = floor_div(midX, upx); + int inY = floor_div(midY, upy); + int relInX = inX - tileInX; + int relInY = inY - tileInY; + int filterX = (inX + 1) * upx - midX - 1; // flipped + int filterY = (inY + 1) * upy - midY - 1; // flipped + + // Inner loop. + if (outX < p.outSize.x & outY < p.outSize.y & c < p.outSize.z) + { + scalar_t v = 0; + #pragma unroll + for (int y = 0; y < filterH / upy; y++) + #pragma unroll + for (int x = 0; x < filterW / upx; x++) + v += sx[relInY + y][relInX + x][relC] * sf[filterY + y * upy][filterX + x * upx]; + v *= p.gain; + ((T*)p.y)[outX * p.outStride.x + outY * p.outStride.y + c * p.outStride.z + n * p.outStride.w] = (T)v; + } + } + } + } +} + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p) +{ + int s = p.inStride.z, fx = p.filterSize.x, fy = p.filterSize.y; + upfirdn2d_kernel_spec spec = {(void*)upfirdn2d_kernel_large, -1,-1,1, 4}; // contiguous + if (s == 1) spec = {(void*)upfirdn2d_kernel_large, -1,-1,4, 1}; // channels_last + + // No up/downsampling. + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 7 && fy <= 7 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 5 && fy <= 5 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 3 && fy <= 3 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 24 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 16 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 8 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 7 && fy <= 7 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 5 && fy <= 5 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 3 && fy <= 3 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 24 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 16 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 8 && fy <= 1 ) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + } + + // 2x upsampling. + if (p.up.x == 2 && p.up.y == 2 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + if (s != 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 64,16,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + if (s == 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 16,16,8, 1}; + } + if (p.up.x == 2 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + } + if (p.up.x == 1 && p.up.y == 2 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + } + + // 2x downsampling. + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 2 && p.down.y == 2) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 16,16,1, 1}; + if (s == 1 && fx <= 16 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 16,16,1, 1}; + if (s == 1 && fx <= 8 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + if (s == 1 && fx <= 6 && fy <= 6 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + if (s == 1 && fx <= 4 && fy <= 4 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + if (s == 1 && fx <= 2 && fy <= 2 ) spec = {(void*)upfirdn2d_kernel_small, 8,8,8, 1}; + } + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 2 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,8,1, 1}; + if (s != 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,8,1, 1}; + if (s != 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,8,1, 1}; + // channels_last + if (s == 1 && fx <= 24 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,1,8, 1}; + if (s == 1 && fx <= 16 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,1,8, 1}; + if (s == 1 && fx <= 8 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 64,1,8, 1}; + } + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 2) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + if (s != 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 32,16,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 24) spec = {(void*)upfirdn2d_kernel_small, 1,64,8, 1}; + if (s == 1 && fx <= 1 && fy <= 16) spec = {(void*)upfirdn2d_kernel_small, 1,64,8, 1}; + if (s == 1 && fx <= 1 && fy <= 8 ) spec = {(void*)upfirdn2d_kernel_small, 1,64,8, 1}; + } + + // 4x upsampling. + if (p.up.x == 4 && p.up.y == 4 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 48 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + if (s != 1 && fx <= 32 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 64,32,1, 1}; + // channels_last + if (s == 1 && fx <= 48 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s == 1 && fx <= 32 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + } + if (p.up.x == 4 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + if (s != 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,8,1, 1}; + // channels_last + if (s == 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + if (s == 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 128,1,16, 1}; + } + if (p.up.x == 1 && p.up.y == 4 && p.down.x == 1 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + if (s != 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 32,32,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + if (s == 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 1,128,16, 1}; + } + + // 4x downsampling (inefficient). + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 4 && p.down.y == 1) + { + // contiguous + if (s != 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + // channels_last + if (s == 1 && fx <= 48 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,1,8, 1}; + if (s == 1 && fx <= 32 && fy <= 1) spec = {(void*)upfirdn2d_kernel_small, 32,1,8, 1}; + } + if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 4) + { + // contiguous + if (s != 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + if (s != 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 32,8,1, 1}; + // channels_last + if (s == 1 && fx <= 1 && fy <= 48) spec = {(void*)upfirdn2d_kernel_small, 1,32,8, 1}; + if (s == 1 && fx <= 1 && fy <= 32) spec = {(void*)upfirdn2d_kernel_small, 1,32,8, 1}; + } + return spec; +} + +//------------------------------------------------------------------------ +// Template specializations. + +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel (const upfirdn2d_kernel_params& p); +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel (const upfirdn2d_kernel_params& p); +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.h b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.h new file mode 100644 index 0000000..d5de893 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * + * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual + * property and proprietary rights in and to this material, related + * documentation and any modifications thereto. Any use, reproduction, + * disclosure or distribution of this material and related documentation + * without an express license agreement from NVIDIA CORPORATION or + * its affiliates is strictly prohibited. + */ + +#include + +//------------------------------------------------------------------------ +// CUDA kernel parameters. + +struct upfirdn2d_kernel_params +{ + const void* x; + const float* f; + void* y; + + int2 up; + int2 down; + int2 pad0; + int flip; + float gain; + + int4 inSize; // [width, height, channel, batch] + int4 inStride; + int2 filterSize; // [width, height] + int2 filterStride; + int4 outSize; // [width, height, channel, batch] + int4 outStride; + int sizeMinor; + int sizeMajor; + + int loopMinor; + int loopMajor; + int loopX; + int launchMinor; + int launchMajor; +}; + +//------------------------------------------------------------------------ +// CUDA kernel specialization. + +struct upfirdn2d_kernel_spec +{ + void* kernel; + int tileOutW; + int tileOutH; + int loopMinor; + int loopX; +}; + +//------------------------------------------------------------------------ +// CUDA kernel selection. + +template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); + +//------------------------------------------------------------------------ diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.py new file mode 100644 index 0000000..5d63471 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/ops/upfirdn2d.py @@ -0,0 +1,391 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Custom PyTorch ops for efficient resampling of 2D images.""" + +import os +import numpy as np +import torch + +from .. import custom_ops +from .. import misc +from . import conv2d_gradfix + +#---------------------------------------------------------------------------- + +_plugin = None + +def _init(): + global _plugin + if _plugin is None: + _plugin = custom_ops.get_plugin( + module_name='upfirdn2d_plugin', + sources=['upfirdn2d.cpp', 'upfirdn2d.cu'], + headers=['upfirdn2d.h'], + source_dir=os.path.dirname(__file__), + extra_cuda_cflags=['--use_fast_math'], + ) + return True + +def _parse_scaling(scaling): + if isinstance(scaling, int): + scaling = [scaling, scaling] + assert isinstance(scaling, (list, tuple)) + assert all(isinstance(x, int) for x in scaling) + sx, sy = scaling + assert sx >= 1 and sy >= 1 + return sx, sy + +def _parse_padding(padding): + if isinstance(padding, int): + padding = [padding, padding] + assert isinstance(padding, (list, tuple)) + assert all(isinstance(x, int) for x in padding) + if len(padding) == 2: + padx, pady = padding + padding = [padx, padx, pady, pady] + padx0, padx1, pady0, pady1 = padding + return padx0, padx1, pady0, pady1 + +def _get_filter_size(f): + if f is None: + return 1, 1 + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + fw = f.shape[-1] + fh = f.shape[0] + with misc.suppress_tracer_warnings(): + fw = int(fw) + fh = int(fh) + misc.assert_shape(f, [fh, fw][:f.ndim]) + assert fw >= 1 and fh >= 1 + return fw, fh + +#---------------------------------------------------------------------------- + +def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None): + r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`. + + Args: + f: Torch tensor, numpy array, or python list of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), + `[]` (impulse), or + `None` (identity). + device: Result device (default: cpu). + normalize: Normalize the filter so that it retains the magnitude + for constant input signal (DC)? (default: True). + flip_filter: Flip the filter? (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + separable: Return a separable filter? (default: select automatically). + + Returns: + Float32 tensor of the shape + `[filter_height, filter_width]` (non-separable) or + `[filter_taps]` (separable). + """ + # Validate. + if f is None: + f = 1 + f = torch.as_tensor(f, dtype=torch.float32) + assert f.ndim in [0, 1, 2] + assert f.numel() > 0 + if f.ndim == 0: + f = f[np.newaxis] + + # Separable? + if separable is None: + separable = (f.ndim == 1 and f.numel() >= 8) + if f.ndim == 1 and not separable: + f = f.ger(f) + assert f.ndim == (1 if separable else 2) + + # Apply normalize, flip, gain, and device. + if normalize: + f /= f.sum() + if flip_filter: + f = f.flip(list(range(f.ndim))) + f = f * (gain ** (f.ndim / 2)) + f = f.to(device=device) + return f + +#---------------------------------------------------------------------------- + +def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Pad, upsample, filter, and downsample a batch of 2D images. + + Performs the following sequence of operations for each channel: + + 1. Upsample the image by inserting N-1 zeros after each pixel (`up`). + + 2. Pad the image with the specified number of zeros on each side (`padding`). + Negative padding corresponds to cropping the image. + + 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it + so that the footprint of all output pixels lies within the input image. + + 4. Downsample the image by keeping every Nth pixel (`down`). + + This sequence of operations bears close resemblance to scipy.signal.upfirdn(). + The fused op is considerably more efficient than performing the same calculation + using standard PyTorch ops. It supports gradients of arbitrary order. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + up: Integer upsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + down: Integer downsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the upsampled image. Can be a single number + or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + assert isinstance(x, torch.Tensor) + assert impl in ['ref', 'cuda'] + if impl == 'cuda' and x.device.type == 'cuda' and _init(): + return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f) + return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain) + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1): + """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops. + """ + # Validate arguments. + assert isinstance(x, torch.Tensor) and x.ndim == 4 + if f is None: + f = torch.ones([1, 1], dtype=torch.float32, device=x.device) + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + assert f.dtype == torch.float32 and not f.requires_grad + batch_size, num_channels, in_height, in_width = x.shape + upx, upy = _parse_scaling(up) + downx, downy = _parse_scaling(down) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + + # Check that upsampled buffer is not smaller than the filter. + upW = in_width * upx + padx0 + padx1 + upH = in_height * upy + pady0 + pady1 + assert upW >= f.shape[-1] and upH >= f.shape[0] + + # Upsample by inserting zeros. + x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1]) + x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1]) + x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx]) + + # Pad or crop. + x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]) + x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)] + + # Setup filter. + f = f * (gain ** (f.ndim / 2)) + f = f.to(x.dtype) + if not flip_filter: + f = f.flip(list(range(f.ndim))) + + # Convolve with the filter. + f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim) + if f.ndim == 4: + x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels) + else: + x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels) + x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels) + + # Downsample by throwing away pixels. + x = x[:, :, ::downy, ::downx] + return x + +#---------------------------------------------------------------------------- + +_upfirdn2d_cuda_cache = dict() + +def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1): + """Fast CUDA implementation of `upfirdn2d()` using custom ops. + """ + # Parse arguments. + upx, upy = _parse_scaling(up) + downx, downy = _parse_scaling(down) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + + # Lookup from cache. + key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) + if key in _upfirdn2d_cuda_cache: + return _upfirdn2d_cuda_cache[key] + + # Forward op. + class Upfirdn2dCuda(torch.autograd.Function): + @staticmethod + def forward(ctx, x, f): # pylint: disable=arguments-differ + assert isinstance(x, torch.Tensor) and x.ndim == 4 + if f is None: + f = torch.ones([1, 1], dtype=torch.float32, device=x.device) + if f.ndim == 1 and f.shape[0] == 1: + f = f.square().unsqueeze(0) # Convert separable-1 into full-1x1. + assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] + y = x + if f.ndim == 2: + y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) + else: + y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, 1.0) + y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, gain) + ctx.save_for_backward(f) + ctx.x_shape = x.shape + return y + + @staticmethod + def backward(ctx, dy): # pylint: disable=arguments-differ + f, = ctx.saved_tensors + _, _, ih, iw = ctx.x_shape + _, _, oh, ow = dy.shape + fw, fh = _get_filter_size(f) + p = [ + fw - padx0 - 1, + iw * upx - ow * downx + padx0 - upx + 1, + fh - pady0 - 1, + ih * upy - oh * downy + pady0 - upy + 1, + ] + dx = None + df = None + + if ctx.needs_input_grad[0]: + dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f) + + assert not ctx.needs_input_grad[1] + return dx, df + + # Add to cache. + _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda + return Upfirdn2dCuda + +#---------------------------------------------------------------------------- + +def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Filter a batch of 2D images using the given 2D FIR filter. + + By default, the result is padded so that its shape matches the input. + User-specified padding is applied on top of that, with negative values + indicating cropping. Pixels outside the image are assumed to be zero. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + padding: Padding with respect to the output. Can be a single number or a + list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + padx0, padx1, pady0, pady1 = _parse_padding(padding) + fw, fh = _get_filter_size(f) + p = [ + padx0 + fw // 2, + padx1 + (fw - 1) // 2, + pady0 + fh // 2, + pady1 + (fh - 1) // 2, + ] + return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) + +#---------------------------------------------------------------------------- + +def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Upsample a batch of 2D images using the given 2D FIR filter. + + By default, the result is padded so that its shape is a multiple of the input. + User-specified padding is applied on top of that, with negative values + indicating cropping. Pixels outside the image are assumed to be zero. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + up: Integer upsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the output. Can be a single number or a + list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + upx, upy = _parse_scaling(up) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + fw, fh = _get_filter_size(f) + p = [ + padx0 + (fw + upx - 1) // 2, + padx1 + (fw - upx) // 2, + pady0 + (fh + upy - 1) // 2, + pady1 + (fh - upy) // 2, + ] + return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl) + +#---------------------------------------------------------------------------- + +def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'): + r"""Downsample a batch of 2D images using the given 2D FIR filter. + + By default, the result is padded so that its shape is a fraction of the input. + User-specified padding is applied on top of that, with negative values + indicating cropping. Pixels outside the image are assumed to be zero. + + Args: + x: Float32/float64/float16 input tensor of the shape + `[batch_size, num_channels, in_height, in_width]`. + f: Float32 FIR filter of the shape + `[filter_height, filter_width]` (non-separable), + `[filter_taps]` (separable), or + `None` (identity). + down: Integer downsampling factor. Can be a single int or a list/tuple + `[x, y]` (default: 1). + padding: Padding with respect to the input. Can be a single number or a + list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` + (default: 0). + flip_filter: False = convolution, True = correlation (default: False). + gain: Overall scaling factor for signal magnitude (default: 1). + impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). + + Returns: + Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. + """ + downx, downy = _parse_scaling(down) + padx0, padx1, pady0, pady1 = _parse_padding(padding) + fw, fh = _get_filter_size(f) + p = [ + padx0 + (fw - downx + 1) // 2, + padx1 + (fw - downx) // 2, + pady0 + (fh - downy + 1) // 2, + pady1 + (fh - downy) // 2, + ] + return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/persistence.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/persistence.py new file mode 100644 index 0000000..9e110d6 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/persistence.py @@ -0,0 +1,255 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Facilities for pickling Python code alongside other data. + +The pickled code is automatically imported into a separate Python module +during unpickling. This way, any previously exported pickles will remain +usable even if the original code is no longer available, or if the current +version of the code is not consistent with what was originally pickled.""" + +import sys +import pickle +import io +import inspect +import copy +import uuid +import types +# how to import dnnlib + +from nerf import dnnlib + +#---------------------------------------------------------------------------- + +_version = 6 # internal version number +_decorators = set() # {decorator_class, ...} +_import_hooks = [] # [hook_function, ...] +_module_to_src_dict = dict() # {module: src, ...} +_src_to_module_dict = dict() # {src: module, ...} + +#---------------------------------------------------------------------------- + +def persistent_class(orig_class): + r"""Class decorator that extends a given class to save its source code + when pickled. + + Example: + + from torch_utils import persistence + + @persistence.persistent_class + class MyNetwork(torch.nn.Module): + def __init__(self, num_inputs, num_outputs): + super().__init__() + self.fc = MyLayer(num_inputs, num_outputs) + ... + + @persistence.persistent_class + class MyLayer(torch.nn.Module): + ... + + When pickled, any instance of `MyNetwork` and `MyLayer` will save its + source code alongside other internal state (e.g., parameters, buffers, + and submodules). This way, any previously exported pickle will remain + usable even if the class definitions have been modified or are no + longer available. + + The decorator saves the source code of the entire Python module + containing the decorated class. It does *not* save the source code of + any imported modules. Thus, the imported modules must be available + during unpickling, also including `torch_utils.persistence` itself. + + It is ok to call functions defined in the same module from the + decorated class. However, if the decorated class depends on other + classes defined in the same module, they must be decorated as well. + This is illustrated in the above example in the case of `MyLayer`. + + It is also possible to employ the decorator just-in-time before + calling the constructor. For example: + + cls = MyLayer + if want_to_make_it_persistent: + cls = persistence.persistent_class(cls) + layer = cls(num_inputs, num_outputs) + + As an additional feature, the decorator also keeps track of the + arguments that were used to construct each instance of the decorated + class. The arguments can be queried via `obj.init_args` and + `obj.init_kwargs`, and they are automatically pickled alongside other + object state. A typical use case is to first unpickle a previous + instance of a persistent class, and then upgrade it to use the latest + version of the source code: + + with open('old_pickle.pkl', 'rb') as f: + old_net = pickle.load(f) + new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs) + misc.copy_params_and_buffers(old_net, new_net, require_all=True) + """ + assert isinstance(orig_class, type) + if is_persistent(orig_class): + return orig_class + + assert orig_class.__module__ in sys.modules + orig_module = sys.modules[orig_class.__module__] + orig_module_src = _module_to_src(orig_module) + + class Decorator(orig_class): + _orig_module_src = orig_module_src + _orig_class_name = orig_class.__name__ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._init_args = copy.deepcopy(args) + self._init_kwargs = copy.deepcopy(kwargs) + assert orig_class.__name__ in orig_module.__dict__ + _check_pickleable(self.__reduce__()) + + @property + def init_args(self): + return copy.deepcopy(self._init_args) + + @property + def init_kwargs(self): + return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs)) + + def __reduce__(self): + fields = list(super().__reduce__()) + fields += [None] * max(3 - len(fields), 0) + if fields[0] is not _reconstruct_persistent_obj: + meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2]) + fields[0] = _reconstruct_persistent_obj # reconstruct func + fields[1] = (meta,) # reconstruct args + fields[2] = None # state dict + return tuple(fields) + + Decorator.__name__ = orig_class.__name__ + _decorators.add(Decorator) + return Decorator + +#---------------------------------------------------------------------------- + +def is_persistent(obj): + r"""Test whether the given object or class is persistent, i.e., + whether it will save its source code when pickled. + """ + try: + if obj in _decorators: + return True + except TypeError: + pass + return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck + +#---------------------------------------------------------------------------- + +def import_hook(hook): + r"""Register an import hook that is called whenever a persistent object + is being unpickled. A typical use case is to patch the pickled source + code to avoid errors and inconsistencies when the API of some imported + module has changed. + + The hook should have the following signature: + + hook(meta) -> modified meta + + `meta` is an instance of `dnnlib.EasyDict` with the following fields: + + type: Type of the persistent object, e.g. `'class'`. + version: Internal version number of `torch_utils.persistence`. + module_src Original source code of the Python module. + class_name: Class name in the original Python module. + state: Internal state of the object. + + Example: + + @persistence.import_hook + def wreck_my_network(meta): + if meta.class_name == 'MyNetwork': + print('MyNetwork is being imported. I will wreck it!') + meta.module_src = meta.module_src.replace("True", "False") + return meta + """ + assert callable(hook) + _import_hooks.append(hook) + +#---------------------------------------------------------------------------- + +def _reconstruct_persistent_obj(meta): + r"""Hook that is called internally by the `pickle` module to unpickle + a persistent object. + """ + meta = dnnlib.EasyDict(meta) + meta.state = dnnlib.EasyDict(meta.state) + for hook in _import_hooks: + meta = hook(meta) + assert meta is not None + + assert meta.version == _version + module = _src_to_module(meta.module_src) + + assert meta.type == 'class' + orig_class = module.__dict__[meta.class_name] + decorator_class = persistent_class(orig_class) + obj = decorator_class.__new__(decorator_class) + + setstate = getattr(obj, '__setstate__', None) + if callable(setstate): + setstate(meta.state) # pylint: disable=not-callable + else: + obj.__dict__.update(meta.state) + return obj + +#---------------------------------------------------------------------------- + +def _module_to_src(module): + r"""Query the source code of a given Python module. + """ + src = _module_to_src_dict.get(module, None) + if src is None: + src = inspect.getsource(module) + _module_to_src_dict[module] = src + _src_to_module_dict[src] = module + return src + +def _src_to_module(src): + r"""Get or create a Python module for the given source code. + """ + module = _src_to_module_dict.get(src, None) + if module is None: + module_name = "_imported_module_" + uuid.uuid4().hex + module = types.ModuleType(module_name) + sys.modules[module_name] = module + _module_to_src_dict[module] = src + _src_to_module_dict[src] = module + exec(src, module.__dict__) # pylint: disable=exec-used + return module + +#---------------------------------------------------------------------------- + +def _check_pickleable(obj): + r"""Check that the given object is pickleable, raising an exception if + it is not. This function is expected to be considerably more efficient + than actually pickling the object. + """ + def recurse(obj): + if isinstance(obj, (list, tuple, set)): + return [recurse(x) for x in obj] + if isinstance(obj, dict): + return [[recurse(x), recurse(y)] for x, y in obj.items()] + if isinstance(obj, (str, int, float, bool, bytes, bytearray)): + return None # Python primitive types are pickleable. + if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor', 'torch.nn.parameter.Parameter']: + return None # NumPy arrays and PyTorch tensors are pickleable. + if is_persistent(obj): + return None # Persistent objects are pickleable, by virtue of the constructor check. + return obj + with io.BytesIO() as f: + pickle.dump(recurse(obj), f) + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/torch_utils/training_stats.py b/stable-dreamfusion-3DPortrait/nerf/torch_utils/training_stats.py new file mode 100644 index 0000000..636dd7f --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/torch_utils/training_stats.py @@ -0,0 +1,270 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Facilities for reporting and collecting training statistics across +multiple processes and devices. The interface is designed to minimize +synchronization overhead as well as the amount of boilerplate in user +code.""" + +import re +import numpy as np +import torch +import dnnlib + +from . import misc + +#---------------------------------------------------------------------------- + +_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares] +_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction. +_counter_dtype = torch.float64 # Data type to use for the internal counters. +_rank = 0 # Rank of the current process. +_sync_device = None # Device to use for multiprocess communication. None = single-process. +_sync_called = False # Has _sync() been called yet? +_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor +_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor + +#---------------------------------------------------------------------------- + +def init_multiprocessing(rank, sync_device): + r"""Initializes `torch_utils.training_stats` for collecting statistics + across multiple processes. + + This function must be called after + `torch.distributed.init_process_group()` and before `Collector.update()`. + The call is not necessary if multi-process collection is not needed. + + Args: + rank: Rank of the current process. + sync_device: PyTorch device to use for inter-process + communication, or None to disable multi-process + collection. Typically `torch.device('cuda', rank)`. + """ + global _rank, _sync_device + assert not _sync_called + _rank = rank + _sync_device = sync_device + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def report(name, value): + r"""Broadcasts the given set of scalars to all interested instances of + `Collector`, across device and process boundaries. + + This function is expected to be extremely cheap and can be safely + called from anywhere in the training loop, loss function, or inside a + `torch.nn.Module`. + + Warning: The current implementation expects the set of unique names to + be consistent across processes. Please make sure that `report()` is + called at least once for each unique name by each process, and in the + same order. If a given process has no scalars to broadcast, it can do + `report(name, [])` (empty list). + + Args: + name: Arbitrary string specifying the name of the statistic. + Averages are accumulated separately for each unique name. + value: Arbitrary set of scalars. Can be a list, tuple, + NumPy array, PyTorch tensor, or Python scalar. + + Returns: + The same `value` that was passed in. + """ + if name not in _counters: + _counters[name] = dict() + + elems = torch.as_tensor(value) + if elems.numel() == 0: + return value + + elems = elems.detach().flatten().to(_reduce_dtype) + moments = torch.stack([ + torch.ones_like(elems).sum(), + elems.sum(), + elems.square().sum(), + ]) + assert moments.ndim == 1 and moments.shape[0] == _num_moments + moments = moments.to(_counter_dtype) + + device = moments.device + if device not in _counters[name]: + _counters[name][device] = torch.zeros_like(moments) + _counters[name][device].add_(moments) + return value + +#---------------------------------------------------------------------------- + +def report0(name, value): + r"""Broadcasts the given set of scalars by the first process (`rank = 0`), + but ignores any scalars provided by the other processes. + See `report()` for further details. + """ + report(name, value if _rank == 0 else []) + return value + +#---------------------------------------------------------------------------- + +class Collector: + r"""Collects the scalars broadcasted by `report()` and `report0()` and + computes their long-term averages (mean and standard deviation) over + user-defined periods of time. + + The averages are first collected into internal counters that are not + directly visible to the user. They are then copied to the user-visible + state as a result of calling `update()` and can then be queried using + `mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the + internal counters for the next round, so that the user-visible state + effectively reflects averages collected between the last two calls to + `update()`. + + Args: + regex: Regular expression defining which statistics to + collect. The default is to collect everything. + keep_previous: Whether to retain the previous averages if no + scalars were collected on a given round + (default: True). + """ + def __init__(self, regex='.*', keep_previous=True): + self._regex = re.compile(regex) + self._keep_previous = keep_previous + self._cumulative = dict() + self._moments = dict() + self.update() + self._moments.clear() + + def names(self): + r"""Returns the names of all statistics broadcasted so far that + match the regular expression specified at construction time. + """ + return [name for name in _counters if self._regex.fullmatch(name)] + + def update(self): + r"""Copies current values of the internal counters to the + user-visible state and resets them for the next round. + + If `keep_previous=True` was specified at construction time, the + operation is skipped for statistics that have received no scalars + since the last update, retaining their previous averages. + + This method performs a number of GPU-to-CPU transfers and one + `torch.distributed.all_reduce()`. It is intended to be called + periodically in the main training loop, typically once every + N training steps. + """ + if not self._keep_previous: + self._moments.clear() + for name, cumulative in _sync(self.names()): + if name not in self._cumulative: + self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype) + delta = cumulative - self._cumulative[name] + self._cumulative[name].copy_(cumulative) + if float(delta[0]) != 0: + self._moments[name] = delta + + def _get_delta(self, name): + r"""Returns the raw moments that were accumulated for the given + statistic between the last two calls to `update()`, or zero if + no scalars were collected. + """ + assert self._regex.fullmatch(name) + if name not in self._moments: + self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype) + return self._moments[name] + + def num(self, name): + r"""Returns the number of scalars that were accumulated for the given + statistic between the last two calls to `update()`, or zero if + no scalars were collected. + """ + delta = self._get_delta(name) + return int(delta[0]) + + def mean(self, name): + r"""Returns the mean of the scalars that were accumulated for the + given statistic between the last two calls to `update()`, or NaN if + no scalars were collected. + """ + delta = self._get_delta(name) + if int(delta[0]) == 0: + return float('nan') + return float(delta[1] / delta[0]) + + def std(self, name): + r"""Returns the standard deviation of the scalars that were + accumulated for the given statistic between the last two calls to + `update()`, or NaN if no scalars were collected. + """ + delta = self._get_delta(name) + if int(delta[0]) == 0 or not np.isfinite(float(delta[1])): + return float('nan') + if int(delta[0]) == 1: + return float(0) + mean = float(delta[1] / delta[0]) + raw_var = float(delta[2] / delta[0]) + return np.sqrt(max(raw_var - np.square(mean), 0)) + + def as_dict(self): + r"""Returns the averages accumulated between the last two calls to + `update()` as an `dnnlib.EasyDict`. The contents are as follows: + + dnnlib.EasyDict( + NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT), + ... + ) + """ + stats = dnnlib.EasyDict() + for name in self.names(): + stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name)) + return stats + + def __getitem__(self, name): + r"""Convenience getter. + `collector[name]` is a synonym for `collector.mean(name)`. + """ + return self.mean(name) + +#---------------------------------------------------------------------------- + +def _sync(names): + r"""Synchronize the global cumulative counters across devices and + processes. Called internally by `Collector.update()`. + """ + if len(names) == 0: + return [] + global _sync_called + _sync_called = True + + # Collect deltas within current rank. + deltas = [] + device = _sync_device if _sync_device is not None else torch.device('cpu') + for name in names: + delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device) + for counter in _counters[name].values(): + delta.add_(counter.to(device)) + counter.copy_(torch.zeros_like(counter)) + deltas.append(delta) + deltas = torch.stack(deltas) + + # Sum deltas across ranks. + if _sync_device is not None: + torch.distributed.all_reduce(deltas) + + # Update cumulative values. + deltas = deltas.cpu() + for idx, name in enumerate(names): + if name not in _cumulative: + _cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype) + _cumulative[name].add_(deltas[idx]) + + # Return name-value pairs. + return [(name, _cumulative[name]) for name in names] + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/__init__.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/__init__.py new file mode 100644 index 0000000..dfebd04 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# empty diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/aligned_smpl.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/aligned_smpl.py new file mode 100644 index 0000000..33b7f7e --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/aligned_smpl.py @@ -0,0 +1,446 @@ + +import os.path as osp + +import numpy as np +import torch +from nerf.torch_utils import misc + +import trimesh +import pickle + + +import os +# os.environ["PYOPENGL_PLATFORM"] = "egl" +# os.environ["PYOPENGL_PLATFORM"] = "osmesa" +import pyrender + +class AlignedSMPL(torch.nn.Module): + def __init__(self, model,batch_size): + super().__init__() + self.batch_size = batch_size + smpl_joint_regressor = torch.from_numpy( + np.load('transfer_data/smpl_joint_regressor.npy')).float().cuda().contiguous() + self.register_buffer('smpl_joint_regressor', smpl_joint_regressor) + + self.model = model + faces = torch.from_numpy(self.model.faces.astype(np.int32)).cuda().long().contiguous() + self.register_buffer('faces', faces) + + + def set_model(self, model): + self.model = model + def set_batch_size(self, batch_size): + self.batch_size = batch_size + + def get_align_coordinate(self, vertices): + # 30 x 6890 + batch_size = vertices.shape[0] + smpl_joints = torch.bmm(self.smpl_joint_regressor[None, :, :].repeat(batch_size, 1, 1), vertices) + align_joint_coordinate = smpl_joints[:,12, None, :] # Neck + return align_joint_coordinate + + def render_mesh(self, img, mesh, face, cam_param, color=(1.0, 1.0, 0.9, 1.0), cam_pose=None): + # mesh + mesh = trimesh.Trimesh(mesh, face) + rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0]) + mesh.apply_transform(rot) + material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.0, alphaMode='OPAQUE', baseColorFactor=color) + mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=False) + scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], ambient_light=(0.3, 0.3, 0.3)) + scene.add(mesh, 'mesh') + + focal, princpt = cam_param['focal'], cam_param['princpt'] + camera = pyrender.IntrinsicsCamera(fx=focal[0], fy=focal[1], cx=princpt[0], cy=princpt[1]) + + if cam_pose is not None: + scene.add(camera, pose=cam_pose) + else: + scene.add(camera) + # scene.add(camera) + # print('camera pose in scene ', scene.get_pose(scene._main_camera_node)) + # renderer + renderer = pyrender.OffscreenRenderer(viewport_width=img.shape[1], viewport_height=img.shape[0], point_size=1.0) + + # light + light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=0.8) + # light_pose = np.eye(4) + # light_pose[:3, 3] = np.array([0, -1, 1]) + # scene.add(light, pose=light_pose) + # light_pose[:3, 3] = np.array([0, 1, 1]) + # scene.add(light, pose=light_pose) + # light_pose[:3, 3] = np.array([1, 1, 2]) + # scene.add(light, pose=light_pose) + + light_pose = np.eye(4) + light_pose[:3, 3] = np.array([0, 0, -1]) + scene.add(light, pose=light_pose) + + scene.add(light, pose=cam_pose) + scene.add(light, pose=cam_pose) + scene.add(light, pose=cam_pose) + light_pose[:3, 3] = np.array([1, 1, -4]) + scene.add(light, pose=light_pose) + light_pose[:3, 3] = np.array([-1, 0, -1]) + scene.add(light, pose=light_pose) + light_pose[:3, 3] = np.array([0.2469, 1.8828, -2.4473]) + scene.add(light, pose=light_pose) + + # render + rgb, depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA) + rgb = rgb[:, :, :3].astype(np.float32) + valid_mask = (depth > 0)[:, :, None] + + # save to image + img = rgb * valid_mask + img * (1 - valid_mask) + return img.astype(np.uint8) + + def render_depth(self, img, mesh, face, cam_param, color=(1.0, 1.0, 0.9, 1.0), cam_pose=None): + # mesh + mesh = trimesh.Trimesh(mesh, face) + rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0]) + mesh.apply_transform(rot) + material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.0, alphaMode='OPAQUE', baseColorFactor=color) + mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=False) + scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], ambient_light=(0.3, 0.3, 0.3)) + scene.add(mesh, 'mesh') + + focal, princpt = cam_param['focal'], cam_param['princpt'] + camera = pyrender.IntrinsicsCamera(fx=focal[0], fy=focal[1], cx=princpt[0], cy=princpt[1]) + + if cam_pose is not None: + scene.add(camera, pose=cam_pose) + else: + scene.add(camera) + # scene.add(camera) + # print('camera pose in scene ', scene.get_pose(scene._main_camera_node)) + # renderer + renderer = pyrender.OffscreenRenderer(viewport_width=img.shape[1], viewport_height=img.shape[0], point_size=1.0) + + # render + rgb, depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA) + #rgb = rgb[:, :, :3].astype(np.float32) + valid_mask = (depth > 0)[:, :, None] + + # save to image + depth = depth * valid_mask + img * (1 - valid_mask) + return depth.astype(np.uint8) + + + def get_projected_vertex(self, mesh, world2screen_matrix): + # mesh = np.concatenate([mesh, np.ones((mesh.shape[0], 1))], axis=1) # N x 4 + mesh = torch.cat([mesh, torch.ones((mesh.shape[0], 1)).to(mesh.device)], dim=1) # N x 4 + points_image = world2screen_matrix @ mesh.T # 4,N + points_image = points_image[:3, :] # 3,N + + points_on_input_image = points_image / points_image[2, :] + points_on_input_image = points_on_input_image[:2, :].T # 30,2 + + return points_on_input_image + + + def generate_shaped_smpl(self, betas, scale, transl): + if betas is not None: + raise NotImplementedError + else: + betas = None + if scale is not None: + raise NotImplementedError + misc.assert_shape(scale, [self.batch_size, 1]) + else: + scale = torch.ones([self.batch_size, 1]).to(self.model.shapedirs.device) + if transl is not None: + raise NotImplementedError + misc.assert_shape(transl, [self.batch_size, 3]) + else: + transl = torch.zeros([self.batch_size, 3]).to(self.model.shapedirs.device) + + # body_pose_fill = torch.zeros((self.batch_size, 23, 3)).to(self.model.shapedirs.device) + # # 15 16 for shoulder, we hope the Hands naturally sagging + # body_pose_fill[:, 15, :] = torch.tensor([0.0, 0.0, -np.pi / 2]).to(self.model.shapedirs.device) + + # body_pose_fill[:, 16, :] = torch.tensor([0.0, 0.0, np.pi / 2]).to(self.model.shapedirs.device) + # body_pose_fill = body_pose_fill.reshape(self.batch_size, -1) + # apply beta, alignment, translation and scale + shaed_output = self.model(betas=betas, + expression=None, + return_verts=True, + body_pose=None, + return_shaped=False + ) + vertices_no_pose = shaed_output.vertices + joints_no_pose = shaed_output.joints + + + align_joint_coordinate = self.get_align_coordinate(vertices_no_pose) # B,1,3 + vertices_no_pose -= align_joint_coordinate + joints_no_pose -= align_joint_coordinate + + vertices_no_pose += transl.view(self.batch_size, 1, 3) + joints_no_pose += transl.view(self.batch_size, 1, 3) + + vertices_no_pose *= scale.view(self.batch_size, 1, 1) + joints_no_pose *= scale.view(self.batch_size, 1, 1) + + nose_2d = joints_no_pose[:,86:90,:] # B, 4, 3 + eye_right_2d = joints_no_pose[:,95: 101,:] # B, 6, 3 + eye_left_2d = joints_no_pose[:,101: 107,:] # B, 6, 3 + + # points_3d = np.concatenate([nose_2d, eye_right_2d, eye_left_2d], axis=0) # 16 + face_points = torch.cat([nose_2d, eye_right_2d, eye_left_2d], dim=1) # B, 16, 3 + + #transformation_matrix = self.compute_transformation_matrix(face_points) + + res = { + 'vertices': vertices_no_pose, + 'align_joint_coordinate': align_joint_coordinate, + 'face_points': face_points, + } + return res + + def generate_posed_smpl(self, betas, scale, transl, body_pose, align_joint_coordinate): + batch_size = body_pose.shape[0] + if betas is not None: + raise NotImplementedError + else: + betas = None + if scale is not None: + raise NotImplementedError + misc.assert_shape(scale, [self.batch_size, 1]) + else: + scale = torch.ones([self.batch_size, 1]).to(self.model.shapedirs.device) + if transl is not None: + raise NotImplementedError + misc.assert_shape(transl, [self.batch_size, 3]) + else: + transl = torch.zeros([self.batch_size, 3]).to(self.model.shapedirs.device) + misc.assert_shape(body_pose, [self.batch_size, 6]) + + # apply beta, alignment, translation and scale + + # apply beta, pose, alignment, translation and scale + # mask pose except 11 and 14 + body_pose_fill = torch.zeros((self.batch_size, 23, 3)).to(self.model.shapedirs.device) + body_pose_fill[:, 11, :] = body_pose[:, :3] + body_pose_fill[:, 14, :] = body_pose[:, 3:] + + # # 15 16 for shoulder, we hope the Hands naturally sagging + # body_pose_fill[:, 15, :] = torch.tensor([0.0, 0.0, -np.pi / 2]).to(self.model.shapedirs.device) + # body_pose_fill[:, 16, :] = torch.tensor([0.0, 0.0, np.pi / 2]).to(self.model.shapedirs.device) + + + body_pose_fill = body_pose_fill.reshape(self.batch_size, -1) + + output = self.model(betas=betas, + expression=None, + return_verts=True, + body_pose=body_pose_fill, + return_shaped=True + ) + vertices = output.vertices + joints = output.joints + + # align vertices and joints + vertices -= align_joint_coordinate + joints -= align_joint_coordinate + + # additional translation + vertices += transl.view(self.batch_size, 1, 3) + joints += transl.view(self.batch_size, 1, 3) + + # additional scale + vertices *= scale.view(self.batch_size, 1, 1) + joints *= scale.view(self.batch_size, 1, 1) + + nose_2d = joints[:, 86:90, :] # B, 4, 3 + eye_right_2d = joints[:, 95: 101, :] # B, 6, 3 + eye_left_2d = joints[:, 101: 107, :] # B, 6, 3 + + # points_3d = np.concatenate([nose_2d, eye_right_2d, eye_left_2d], axis=0) # 16 + face_points = torch.cat([nose_2d, eye_right_2d, eye_left_2d], dim=1) # B, 16, 3 + + res = { + 'vertices': vertices, + 'face_points': face_points + } + + return res + + + + def get_depth(self,vert, resolution=256, cameras=None): + + faces = self.model.faces + # compute the transformation matrix with eg3d + intrisics_standard_dict = {"focal": [5000.0 / 1024 * resolution / 0.75, 5000.0 / 1024 * resolution / 0.75], + "princpt": [resolution / 2, resolution / 2]} + # intrisics_standard = np.array( [[5000.0, 0.0, resolution/2, 0.0], [0.0, 5000.0, resolution/2.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) + # normalized_transformation_in_realworld = np.array(render_kwargs['world2camera_matrix']) + R = np.eye(3) + angle = np.pi + R[1, 1] = np.cos(angle) + R[1, 2] = -np.sin(angle) + R[2, 1] = np.sin(angle) + R[2, 2] = np.cos(angle) + + R = torch.from_numpy(R).float().to(self.model.shapedirs.device).unsqueeze(0).repeat(self.batch_size, 1, + 1) # self.batch_size x 3 x 3 + + vertices_pyrender = torch.matmul(vert, R) # 1 x 6890 x 3 + # normalized_camerapose_in_pyrender = np.array(render_kwargs['normalized_camerapose_in_pyrender']) + + # color = colorsys.hsv_to_rgb(np.random.rand(), 0.5, 1.0) + images = [] + for i in range(self.batch_size): + camera_pose = cameras[i, :16].reshape(4, 4) + + camerapose_in_pyrender = np.linalg.inv(camera_pose) + camerapose_in_pyrender[[1, 2]] *= -1 + camerapose_in_pyrender = np.linalg.inv(camerapose_in_pyrender) + + # print(vertices_pyrender.shape, vertices_pyrender[i].shape,camerapose_in_pyrender.shape) + image_camera_rotate = self.render_depth(np.ones((resolution, resolution, 3)) * 255, + vertices_pyrender[i].detach().cpu().numpy(), faces, + intrisics_standard_dict, + color=(0.4, 0.5, 0.9, 1.0), + cam_pose=camerapose_in_pyrender) + + image_camera_rotate = image_camera_rotate[None, :, :, :] # 1 x 256 x 256 x 3 + image_camera_rotate = np.transpose(image_camera_rotate, (0, 3, 1, 2)) # 1 x 3 x 256 x 256 + images.append(image_camera_rotate) + + images = np.concatenate(images, axis=0) + return images + # + def get_visualization(self, shape_pose_params, resolution=256, cameras=None): + # apply beta, alignment, translation and scale + if 'betas' in shape_pose_params: + raise NotImplementedError + betas = shape_pose_params['betas'] + misc.assert_shape(betas, [self.batch_size, self.num_betas]) + else: + betas = None + # scale = shape_pose_params['scale'] + # transl = shape_pose_params['transl'] + if 'scale' in shape_pose_params: + raise NotImplementedError + scale = shape_pose_params['scale'] + misc.assert_shape(scale, [self.batch_size, 1]) + else: + scale = torch.ones([self.batch_size, 1]).to(self.model.shapedirs.device) + if 'transl' in shape_pose_params: + raise NotImplementedError + transl = shape_pose_params['transl'] + misc.assert_shape(transl, [self.batch_size, 3]) + else: + transl = torch.zeros([self.batch_size, 3]).to(self.model.shapedirs.device) + + + body_pose = shape_pose_params['pose'] + + + misc.assert_shape(scale, [self.batch_size, 1]) + misc.assert_shape(transl, [self.batch_size, 3]) + misc.assert_shape(body_pose, [self.batch_size, 6]) + + cameras = cameras.detach().cpu().numpy() # N, 25 + + shaed_output = self.model(betas=betas, + expression=None, + return_verts=True, + body_pose=None, + return_shaped=False + ) + vertices_no_pose = shaed_output.vertices + faces = self.model.faces + + align_joint_coordinate = self.get_align_coordinate(vertices_no_pose) + vertices_no_pose = vertices_no_pose + vertices_no_pose -= align_joint_coordinate + + vertices_no_pose += transl.view(self.batch_size, 1, 3) + vertices_no_pose *= scale.view(self.batch_size, 1, 1) + + # apply beta, pose, alignment, translation and scale + # mask pose except 11 and 14 + body_pose_fill = torch.zeros((self.batch_size, 23, 3)).to(self.model.shapedirs.device) + body_pose_fill[:, 11, :] = body_pose[:, :3] + body_pose_fill[:, 14, :] = body_pose[:, 3:] + + # # 15 16 for shoulder, we hope the Hands naturally sagging + # body_pose_fill[:, 15, :] = torch.tensor([0.0, 0.0, -np.pi / 2]).to(self.model.shapedirs.device) + # body_pose_fill[:, 16, :] = torch.tensor([0.0, 0.0, np.pi / 2]).to(self.model.shapedirs.device) + + + + body_pose_fill = body_pose_fill.reshape(self.batch_size, -1) + + output = self.model(betas=betas, + expression=None, + return_verts=True, + body_pose=body_pose_fill, + return_shaped=True + ) + vertices = output.vertices + joints = output.joints + + # align vertices and joints + vertices -= align_joint_coordinate + joints -= align_joint_coordinate + + # additional translation + vertices += transl.view(self.batch_size, 1, 3) + joints += transl.view(self.batch_size, 1, 3) + + # additional scale + vertices *= scale.view(self.batch_size, 1, 1) + joints *= scale.view(self.batch_size, 1, 1) + + # print(vertices[:,0].min(),vertices[:,0].max(),vertices[:,0].max() - vertices[:,0].min()) + # print(vertices[:,1].min(),vertices[:,1].max(),vertices[:,1].max() - vertices[:,1].min()) + # print(vertices[:,2].min(),vertices[:,2].max(),vertices[:,2].max() - vertices[:,2].min()) + + # nose_2d = joints[86:90] # 4 + # eye_right_2d = joints[95: 101] # 6 + # eye_left_2d = joints[101: 107] # 6 + + #points_3d = np.concatenate([nose_2d, eye_right_2d, eye_left_2d], axis=0) # 16 + #points_3d = torch.cat([nose_2d, eye_right_2d, eye_left_2d], dim=0) # 16 + + # compute the transformation matrix with eg3d + intrisics_standard_dict = {"focal": [5000.0/1024*resolution/0.75, 5000.0/1024*resolution/0.75], "princpt": [resolution/2, resolution/2]} + # intrisics_standard = np.array( [[5000.0, 0.0, resolution/2, 0.0], [0.0, 5000.0, resolution/2.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) + # normalized_transformation_in_realworld = np.array(render_kwargs['world2camera_matrix']) + R = np.eye(3) + angle = np.pi + R[1, 1] = np.cos(angle) + R[1, 2] = -np.sin(angle) + R[2, 1] = np.sin(angle) + R[2, 2] = np.cos(angle) + + R = torch.from_numpy(R).float().to(self.model.shapedirs.device).unsqueeze(0).repeat(self.batch_size, 1, 1) # self.batch_size x 3 x 3 + + vertices_pyrender = torch.matmul(vertices, R) # 1 x 6890 x 3 + #normalized_camerapose_in_pyrender = np.array(render_kwargs['normalized_camerapose_in_pyrender']) + + # color = colorsys.hsv_to_rgb(np.random.rand(), 0.5, 1.0) + images = [] + for i in range(self.batch_size): + camera_pose = cameras[i,:16].reshape(4,4) + + camerapose_in_pyrender = np.linalg.inv(camera_pose) + camerapose_in_pyrender[[1,2]] *= -1 + camerapose_in_pyrender = np.linalg.inv(camerapose_in_pyrender) + + #print(vertices_pyrender.shape, vertices_pyrender[i].shape,camerapose_in_pyrender.shape) + image_camera_rotate = self.render_mesh(np.ones((resolution, resolution, 3)) * 255, + vertices_pyrender[i].detach().cpu().numpy(), faces, + intrisics_standard_dict, + color=(0.4, 0.5, 0.9, 1.0), + cam_pose=camerapose_in_pyrender) + + image_camera_rotate = image_camera_rotate[None, :, :, :] # 1 x 256 x 256 x 3 + image_camera_rotate = np.transpose(image_camera_rotate, (0, 3, 1, 2)) # 1 x 3 x 256 x 256 + images.append(image_camera_rotate) + + images = np.concatenate(images, axis=0) + return images diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/augment.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/augment.py new file mode 100644 index 0000000..7b00a4a --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/augment.py @@ -0,0 +1,441 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Augmentation pipeline from the paper +"Training Generative Adversarial Networks with Limited Data". +Matches the original implementation by Karras et al. at +https://github.com/NVlabs/stylegan2-ada/blob/main/training/augment.py""" + +import numpy as np +import scipy.signal +import torch +from torch_utils import persistence +from torch_utils import misc +from torch_utils.ops import upfirdn2d +from torch_utils.ops import grid_sample_gradfix +from torch_utils.ops import conv2d_gradfix + +#---------------------------------------------------------------------------- +# Coefficients of various wavelet decomposition low-pass filters. + +wavelets = { + 'haar': [0.7071067811865476, 0.7071067811865476], + 'db1': [0.7071067811865476, 0.7071067811865476], + 'db2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025], + 'db3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569], + 'db4': [-0.010597401784997278, 0.032883011666982945, 0.030841381835986965, -0.18703481171888114, -0.02798376941698385, 0.6308807679295904, 0.7148465705525415, 0.23037781330885523], + 'db5': [0.003335725285001549, -0.012580751999015526, -0.006241490213011705, 0.07757149384006515, -0.03224486958502952, -0.24229488706619015, 0.13842814590110342, 0.7243085284385744, 0.6038292697974729, 0.160102397974125], + 'db6': [-0.00107730108499558, 0.004777257511010651, 0.0005538422009938016, -0.031582039318031156, 0.02752286553001629, 0.09750160558707936, -0.12976686756709563, -0.22626469396516913, 0.3152503517092432, 0.7511339080215775, 0.4946238903983854, 0.11154074335008017], + 'db7': [0.0003537138000010399, -0.0018016407039998328, 0.00042957797300470274, 0.012550998556013784, -0.01657454163101562, -0.03802993693503463, 0.0806126091510659, 0.07130921926705004, -0.22403618499416572, -0.14390600392910627, 0.4697822874053586, 0.7291320908465551, 0.39653931948230575, 0.07785205408506236], + 'db8': [-0.00011747678400228192, 0.0006754494059985568, -0.0003917403729959771, -0.00487035299301066, 0.008746094047015655, 0.013981027917015516, -0.04408825393106472, -0.01736930100202211, 0.128747426620186, 0.00047248457399797254, -0.2840155429624281, -0.015829105256023893, 0.5853546836548691, 0.6756307362980128, 0.3128715909144659, 0.05441584224308161], + 'sym2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025], + 'sym3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569], + 'sym4': [-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427], + 'sym5': [0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728], + 'sym6': [0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148], + 'sym7': [0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255], + 'sym8': [-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609], +} + +#---------------------------------------------------------------------------- +# Helpers for constructing transformation matrices. + +def matrix(*rows, device=None): + assert all(len(row) == len(rows[0]) for row in rows) + elems = [x for row in rows for x in row] + ref = [x for x in elems if isinstance(x, torch.Tensor)] + if len(ref) == 0: + return misc.constant(np.asarray(rows), device=device) + assert device is None or device == ref[0].device + elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems] + return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1)) + +def translate2d(tx, ty, **kwargs): + return matrix( + [1, 0, tx], + [0, 1, ty], + [0, 0, 1], + **kwargs) + +def translate3d(tx, ty, tz, **kwargs): + return matrix( + [1, 0, 0, tx], + [0, 1, 0, ty], + [0, 0, 1, tz], + [0, 0, 0, 1], + **kwargs) + +def scale2d(sx, sy, **kwargs): + return matrix( + [sx, 0, 0], + [0, sy, 0], + [0, 0, 1], + **kwargs) + +def scale3d(sx, sy, sz, **kwargs): + return matrix( + [sx, 0, 0, 0], + [0, sy, 0, 0], + [0, 0, sz, 0], + [0, 0, 0, 1], + **kwargs) + +def rotate2d(theta, **kwargs): + return matrix( + [torch.cos(theta), torch.sin(-theta), 0], + [torch.sin(theta), torch.cos(theta), 0], + [0, 0, 1], + **kwargs) + +def rotate3d(v, theta, **kwargs): + vx = v[..., 0]; vy = v[..., 1]; vz = v[..., 2] + s = torch.sin(theta); c = torch.cos(theta); cc = 1 - c + return matrix( + [vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0], + [vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0], + [vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0], + [0, 0, 0, 1], + **kwargs) + +def translate2d_inv(tx, ty, **kwargs): + return translate2d(-tx, -ty, **kwargs) + +def scale2d_inv(sx, sy, **kwargs): + return scale2d(1 / sx, 1 / sy, **kwargs) + +def rotate2d_inv(theta, **kwargs): + return rotate2d(-theta, **kwargs) + +#---------------------------------------------------------------------------- +# Versatile image augmentation pipeline from the paper +# "Training Generative Adversarial Networks with Limited Data". +# +# All augmentations are disabled by default; individual augmentations can +# be enabled by setting their probability multipliers to 1. + +@persistence.persistent_class +class AugmentPipe(torch.nn.Module): + def __init__(self, + xflip=0, rotate90=0, xint=0, xint_max=0.125, + scale=0, rotate=0, aniso=0, xfrac=0, scale_std=0.2, rotate_max=1, aniso_std=0.2, xfrac_std=0.125, + brightness=0, contrast=0, lumaflip=0, hue=0, saturation=0, brightness_std=0.2, contrast_std=0.5, hue_max=1, saturation_std=1, + imgfilter=0, imgfilter_bands=[1,1,1,1], imgfilter_std=1, + noise=0, cutout=0, noise_std=0.1, cutout_size=0.5, + ): + super().__init__() + self.register_buffer('p', torch.ones([])) # Overall multiplier for augmentation probability. + + # Pixel blitting. + self.xflip = float(xflip) # Probability multiplier for x-flip. + self.rotate90 = float(rotate90) # Probability multiplier for 90 degree rotations. + self.xint = float(xint) # Probability multiplier for integer translation. + self.xint_max = float(xint_max) # Range of integer translation, relative to image dimensions. + + # General geometric transformations. + self.scale = float(scale) # Probability multiplier for isotropic scaling. + self.rotate = float(rotate) # Probability multiplier for arbitrary rotation. + self.aniso = float(aniso) # Probability multiplier for anisotropic scaling. + self.xfrac = float(xfrac) # Probability multiplier for fractional translation. + self.scale_std = float(scale_std) # Log2 standard deviation of isotropic scaling. + self.rotate_max = float(rotate_max) # Range of arbitrary rotation, 1 = full circle. + self.aniso_std = float(aniso_std) # Log2 standard deviation of anisotropic scaling. + self.xfrac_std = float(xfrac_std) # Standard deviation of frational translation, relative to image dimensions. + + # Color transformations. + self.brightness = float(brightness) # Probability multiplier for brightness. + self.contrast = float(contrast) # Probability multiplier for contrast. + self.lumaflip = float(lumaflip) # Probability multiplier for luma flip. + self.hue = float(hue) # Probability multiplier for hue rotation. + self.saturation = float(saturation) # Probability multiplier for saturation. + self.brightness_std = float(brightness_std) # Standard deviation of brightness. + self.contrast_std = float(contrast_std) # Log2 standard deviation of contrast. + self.hue_max = float(hue_max) # Range of hue rotation, 1 = full circle. + self.saturation_std = float(saturation_std) # Log2 standard deviation of saturation. + + # Image-space filtering. + self.imgfilter = float(imgfilter) # Probability multiplier for image-space filtering. + self.imgfilter_bands = list(imgfilter_bands) # Probability multipliers for individual frequency bands. + self.imgfilter_std = float(imgfilter_std) # Log2 standard deviation of image-space filter amplification. + + # Image-space corruptions. + self.noise = float(noise) # Probability multiplier for additive RGB noise. + self.cutout = float(cutout) # Probability multiplier for cutout. + self.noise_std = float(noise_std) # Standard deviation of additive RGB noise. + self.cutout_size = float(cutout_size) # Size of the cutout rectangle, relative to image dimensions. + + # Setup orthogonal lowpass filter for geometric augmentations. + self.register_buffer('Hz_geom', upfirdn2d.setup_filter(wavelets['sym6'])) + + # Construct filter bank for image-space filtering. + Hz_lo = np.asarray(wavelets['sym2']) # H(z) + Hz_hi = Hz_lo * ((-1) ** np.arange(Hz_lo.size)) # H(-z) + Hz_lo2 = np.convolve(Hz_lo, Hz_lo[::-1]) / 2 # H(z) * H(z^-1) / 2 + Hz_hi2 = np.convolve(Hz_hi, Hz_hi[::-1]) / 2 # H(-z) * H(-z^-1) / 2 + Hz_fbank = np.eye(4, 1) # Bandpass(H(z), b_i) + for i in range(1, Hz_fbank.shape[0]): + Hz_fbank = np.dstack([Hz_fbank, np.zeros_like(Hz_fbank)]).reshape(Hz_fbank.shape[0], -1)[:, :-1] + Hz_fbank = scipy.signal.convolve(Hz_fbank, [Hz_lo2]) + Hz_fbank[i, (Hz_fbank.shape[1] - Hz_hi2.size) // 2 : (Hz_fbank.shape[1] + Hz_hi2.size) // 2] += Hz_hi2 + self.register_buffer('Hz_fbank', torch.as_tensor(Hz_fbank, dtype=torch.float32)) + + def forward(self, images, debug_percentile=None): + assert isinstance(images, torch.Tensor) and images.ndim == 4 + batch_size, num_channels, height, width = images.shape + device = images.device + if debug_percentile is not None: + debug_percentile = torch.as_tensor(debug_percentile, dtype=torch.float32, device=device) + + # ------------------------------------- + # Select parameters for pixel blitting. + # ------------------------------------- + + # Initialize inverse homogeneous 2D transform: G_inv @ pixel_out ==> pixel_in + I_3 = torch.eye(3, device=device) + G_inv = I_3 + + # Apply x-flip with probability (xflip * strength). + if self.xflip > 0: + i = torch.floor(torch.rand([batch_size], device=device) * 2) + i = torch.where(torch.rand([batch_size], device=device) < self.xflip * self.p, i, torch.zeros_like(i)) + if debug_percentile is not None: + i = torch.full_like(i, torch.floor(debug_percentile * 2)) + G_inv = G_inv @ scale2d_inv(1 - 2 * i, 1) + + # Apply 90 degree rotations with probability (rotate90 * strength). + if self.rotate90 > 0: + i = torch.floor(torch.rand([batch_size], device=device) * 4) + i = torch.where(torch.rand([batch_size], device=device) < self.rotate90 * self.p, i, torch.zeros_like(i)) + if debug_percentile is not None: + i = torch.full_like(i, torch.floor(debug_percentile * 4)) + G_inv = G_inv @ rotate2d_inv(-np.pi / 2 * i) + + # Apply integer translation with probability (xint * strength). + if self.xint > 0: + t = (torch.rand([batch_size, 2], device=device) * 2 - 1) * self.xint_max + t = torch.where(torch.rand([batch_size, 1], device=device) < self.xint * self.p, t, torch.zeros_like(t)) + if debug_percentile is not None: + t = torch.full_like(t, (debug_percentile * 2 - 1) * self.xint_max) + G_inv = G_inv @ translate2d_inv(torch.round(t[:,0] * width), torch.round(t[:,1] * height)) + + # -------------------------------------------------------- + # Select parameters for general geometric transformations. + # -------------------------------------------------------- + + # Apply isotropic scaling with probability (scale * strength). + if self.scale > 0: + s = torch.exp2(torch.randn([batch_size], device=device) * self.scale_std) + s = torch.where(torch.rand([batch_size], device=device) < self.scale * self.p, s, torch.ones_like(s)) + if debug_percentile is not None: + s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.scale_std)) + G_inv = G_inv @ scale2d_inv(s, s) + + # Apply pre-rotation with probability p_rot. + p_rot = 1 - torch.sqrt((1 - self.rotate * self.p).clamp(0, 1)) # P(pre OR post) = p + if self.rotate > 0: + theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max + theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta)) + if debug_percentile is not None: + theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.rotate_max) + G_inv = G_inv @ rotate2d_inv(-theta) # Before anisotropic scaling. + + # Apply anisotropic scaling with probability (aniso * strength). + if self.aniso > 0: + s = torch.exp2(torch.randn([batch_size], device=device) * self.aniso_std) + s = torch.where(torch.rand([batch_size], device=device) < self.aniso * self.p, s, torch.ones_like(s)) + if debug_percentile is not None: + s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.aniso_std)) + G_inv = G_inv @ scale2d_inv(s, 1 / s) + + # Apply post-rotation with probability p_rot. + if self.rotate > 0: + theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max + theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta)) + if debug_percentile is not None: + theta = torch.zeros_like(theta) + G_inv = G_inv @ rotate2d_inv(-theta) # After anisotropic scaling. + + # Apply fractional translation with probability (xfrac * strength). + if self.xfrac > 0: + t = torch.randn([batch_size, 2], device=device) * self.xfrac_std + t = torch.where(torch.rand([batch_size, 1], device=device) < self.xfrac * self.p, t, torch.zeros_like(t)) + if debug_percentile is not None: + t = torch.full_like(t, torch.erfinv(debug_percentile * 2 - 1) * self.xfrac_std) + G_inv = G_inv @ translate2d_inv(t[:,0] * width, t[:,1] * height) + + # ---------------------------------- + # Execute geometric transformations. + # ---------------------------------- + + # Execute if the transform is not identity. + if G_inv is not I_3: + + # Calculate padding. + cx = (width - 1) / 2 + cy = (height - 1) / 2 + cp = matrix([-cx, -cy, 1], [cx, -cy, 1], [cx, cy, 1], [-cx, cy, 1], device=device) # [idx, xyz] + cp = G_inv @ cp.t() # [batch, xyz, idx] + Hz_pad = self.Hz_geom.shape[0] // 4 + margin = cp[:, :2, :].permute(1, 0, 2).flatten(1) # [xy, batch * idx] + margin = torch.cat([-margin, margin]).max(dim=1).values # [x0, y0, x1, y1] + margin = margin + misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy] * 2, device=device) + margin = margin.max(misc.constant([0, 0] * 2, device=device)) + margin = margin.min(misc.constant([width-1, height-1] * 2, device=device)) + mx0, my0, mx1, my1 = margin.ceil().to(torch.int32) + + # Pad image and adjust origin. + images = torch.nn.functional.pad(input=images, pad=[mx0,mx1,my0,my1], mode='reflect') + G_inv = translate2d((mx0 - mx1) / 2, (my0 - my1) / 2) @ G_inv + + # Upsample. + images = upfirdn2d.upsample2d(x=images, f=self.Hz_geom, up=2) + G_inv = scale2d(2, 2, device=device) @ G_inv @ scale2d_inv(2, 2, device=device) + G_inv = translate2d(-0.5, -0.5, device=device) @ G_inv @ translate2d_inv(-0.5, -0.5, device=device) + + # Execute transformation. + shape = [batch_size, num_channels, (height + Hz_pad * 2) * 2, (width + Hz_pad * 2) * 2] + G_inv = scale2d(2 / images.shape[3], 2 / images.shape[2], device=device) @ G_inv @ scale2d_inv(2 / shape[3], 2 / shape[2], device=device) + grid = torch.nn.functional.affine_grid(theta=G_inv[:,:2,:], size=shape, align_corners=False) + images = grid_sample_gradfix.grid_sample(images, grid) + + # Downsample and crop. + images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True) + + # -------------------------------------------- + # Select parameters for color transformations. + # -------------------------------------------- + + # Initialize homogeneous 3D transformation matrix: C @ color_in ==> color_out + I_4 = torch.eye(4, device=device) + C = I_4 + + # Apply brightness with probability (brightness * strength). + if self.brightness > 0: + b = torch.randn([batch_size], device=device) * self.brightness_std + b = torch.where(torch.rand([batch_size], device=device) < self.brightness * self.p, b, torch.zeros_like(b)) + if debug_percentile is not None: + b = torch.full_like(b, torch.erfinv(debug_percentile * 2 - 1) * self.brightness_std) + C = translate3d(b, b, b) @ C + + # Apply contrast with probability (contrast * strength). + if self.contrast > 0: + c = torch.exp2(torch.randn([batch_size], device=device) * self.contrast_std) + c = torch.where(torch.rand([batch_size], device=device) < self.contrast * self.p, c, torch.ones_like(c)) + if debug_percentile is not None: + c = torch.full_like(c, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.contrast_std)) + C = scale3d(c, c, c) @ C + + # Apply luma flip with probability (lumaflip * strength). + v = misc.constant(np.asarray([1, 1, 1, 0]) / np.sqrt(3), device=device) # Luma axis. + if self.lumaflip > 0: + i = torch.floor(torch.rand([batch_size, 1, 1], device=device) * 2) + i = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.lumaflip * self.p, i, torch.zeros_like(i)) + if debug_percentile is not None: + i = torch.full_like(i, torch.floor(debug_percentile * 2)) + C = (I_4 - 2 * v.ger(v) * i) @ C # Householder reflection. + + # Apply hue rotation with probability (hue * strength). + if self.hue > 0 and num_channels > 1: + theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.hue_max + theta = torch.where(torch.rand([batch_size], device=device) < self.hue * self.p, theta, torch.zeros_like(theta)) + if debug_percentile is not None: + theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.hue_max) + C = rotate3d(v, theta) @ C # Rotate around v. + + # Apply saturation with probability (saturation * strength). + if self.saturation > 0 and num_channels > 1: + s = torch.exp2(torch.randn([batch_size, 1, 1], device=device) * self.saturation_std) + s = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.saturation * self.p, s, torch.ones_like(s)) + if debug_percentile is not None: + s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.saturation_std)) + C = (v.ger(v) + (I_4 - v.ger(v)) * s) @ C + + # ------------------------------ + # Execute color transformations. + # ------------------------------ + + # Execute if the transform is not identity. + if C is not I_4: + images = images.reshape([batch_size, num_channels, height * width]) + if num_channels == 3: + images = C[:, :3, :3] @ images + C[:, :3, 3:] + elif num_channels == 1: + C = C[:, :3, :].mean(dim=1, keepdims=True) + images = images * C[:, :, :3].sum(dim=2, keepdims=True) + C[:, :, 3:] + elif num_channels == 6: + images[:, :3] = C[:, :3, :3] @ images[:, :3] + C[:, :3, 3:] + images[:, 3:] = C[:, :3, :3] @ images[:, 3:] + C[:, :3, 3:] + else: + raise ValueError('Image must be RGB (3 channels) or L (1 channel)') + images = images.reshape([batch_size, num_channels, height, width]) + + # ---------------------- + # Image-space filtering. + # ---------------------- + + if self.imgfilter > 0: + num_bands = self.Hz_fbank.shape[0] + assert len(self.imgfilter_bands) == num_bands + expected_power = misc.constant(np.array([10, 1, 1, 1]) / 13, device=device) # Expected power spectrum (1/f). + + # Apply amplification for each band with probability (imgfilter * strength * band_strength). + g = torch.ones([batch_size, num_bands], device=device) # Global gain vector (identity). + for i, band_strength in enumerate(self.imgfilter_bands): + t_i = torch.exp2(torch.randn([batch_size], device=device) * self.imgfilter_std) + t_i = torch.where(torch.rand([batch_size], device=device) < self.imgfilter * self.p * band_strength, t_i, torch.ones_like(t_i)) + if debug_percentile is not None: + t_i = torch.full_like(t_i, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.imgfilter_std)) if band_strength > 0 else torch.ones_like(t_i) + t = torch.ones([batch_size, num_bands], device=device) # Temporary gain vector. + t[:, i] = t_i # Replace i'th element. + t = t / (expected_power * t.square()).sum(dim=-1, keepdims=True).sqrt() # Normalize power. + g = g * t # Accumulate into global gain. + + # Construct combined amplification filter. + Hz_prime = g @ self.Hz_fbank # [batch, tap] + Hz_prime = Hz_prime.unsqueeze(1).repeat([1, num_channels, 1]) # [batch, channels, tap] + Hz_prime = Hz_prime.reshape([batch_size * num_channels, 1, -1]) # [batch * channels, 1, tap] + + # Apply filter. + p = self.Hz_fbank.shape[1] // 2 + images = images.reshape([1, batch_size * num_channels, height, width]) + images = torch.nn.functional.pad(input=images, pad=[p,p,p,p], mode='reflect') + images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels) + images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels) + images = images.reshape([batch_size, num_channels, height, width]) + + # ------------------------ + # Image-space corruptions. + # ------------------------ + + # Apply additive RGB noise with probability (noise * strength). + if self.noise > 0: + sigma = torch.randn([batch_size, 1, 1, 1], device=device).abs() * self.noise_std + sigma = torch.where(torch.rand([batch_size, 1, 1, 1], device=device) < self.noise * self.p, sigma, torch.zeros_like(sigma)) + if debug_percentile is not None: + sigma = torch.full_like(sigma, torch.erfinv(debug_percentile) * self.noise_std) + images = images + torch.randn([batch_size, num_channels, height, width], device=device) * sigma + + # Apply cutout with probability (cutout * strength). + if self.cutout > 0: + size = torch.full([batch_size, 2, 1, 1, 1], self.cutout_size, device=device) + size = torch.where(torch.rand([batch_size, 1, 1, 1, 1], device=device) < self.cutout * self.p, size, torch.zeros_like(size)) + center = torch.rand([batch_size, 2, 1, 1, 1], device=device) + if debug_percentile is not None: + size = torch.full_like(size, self.cutout_size) + center = torch.full_like(center, debug_percentile) + coord_x = torch.arange(width, device=device).reshape([1, 1, 1, -1]) + coord_y = torch.arange(height, device=device).reshape([1, 1, -1, 1]) + mask_x = (((coord_x + 0.5) / width - center[:, 0]).abs() >= size[:, 0] / 2) + mask_y = (((coord_y + 0.5) / height - center[:, 1]).abs() >= size[:, 1] / 2) + mask = torch.logical_or(mask_x, mask_y).to(torch.float32) + images = images * mask + + return images + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/crosssection_utils.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/crosssection_utils.py new file mode 100644 index 0000000..72d49f2 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/crosssection_utils.py @@ -0,0 +1,26 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import torch + +def sample_cross_section(G, ws, resolution=256, w=1.2): + axis=0 + A, B = torch.meshgrid(torch.linspace(w/2, -w/2, resolution, device=ws.device), torch.linspace(-w/2, w/2, resolution, device=ws.device), indexing='ij') + A, B = A.reshape(-1, 1), B.reshape(-1, 1) + C = torch.zeros_like(A) + coordinates = [A, B] + coordinates.insert(axis, C) + coordinates = torch.cat(coordinates, dim=-1).expand(ws.shape[0], -1, -1) + + sigma = G.sample_mixed(coordinates, torch.randn_like(coordinates), ws)['sigma'] + return sigma.reshape(-1, 1, resolution, resolution) + +# if __name__ == '__main__': +# sample_crossection(None) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/dataset.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/dataset.py new file mode 100644 index 0000000..4c348ca --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/dataset.py @@ -0,0 +1,565 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Streaming images and labels from datasets created with dataset_tool.py.""" + +import os +import numpy as np +import zipfile +import PIL.Image +import json +import torch +import dnnlib +try: + import pyspng +except ImportError: + pyspng = None + +#---------------------------------------------------------------------------- + +def matrix2angle(R): + """ + https://github.com/sizhean/panohead + compute three Euler angles from a Rotation Matrix. Ref: http://www.gregslabaugh.net/publications/euler.pdf + refined by: https://stackoverflow.com/questions/43364900/rotation-matrix-to-euler-angles-with-opencv + todo: check and debug + Args: + R: (3,3). rotation matrix + Returns: + x: yaw + y: pitch + z: roll + """ + if R[2, 0] > 0.998: + z = 0 + x = np.pi / 2 + y = z + atan2(-R[0, 1], -R[0, 2]) + elif R[2, 0] < -0.998: + z = 0 + x = -np.pi / 2 + y = -z + atan2(R[0, 1], R[0, 2]) + else: + x = asin(R[2, 0]) + y = atan2(R[2, 1] / cos(x), R[2, 2] / cos(x)) + z = atan2(R[1, 0] / cos(x), R[0, 0] / cos(x)) + + if abs(y) > np.pi/2: + if x > 0: + x = np.pi - x + else: + x = -np.pi - x + y = atan2(R[2, 1] / cos(x), R[2, 2] / cos(x)) + z = atan2(R[1, 0] / cos(x), R[0, 0] / cos(x)) + return x, y, z + + +def get_poseangle(eg3dparams): + ''' + https://github.com/sizhean/panohead + ''' + convert = np.array([ + [1, 0, 0, 0], + [0, -1, 0, 0], + [0, 0, -1, 0], + [0, 0, 0, 1], + ]).astype(np.float32) + + entry_cam = np.array([float(p) for p in eg3dparams][:16]).reshape((4,4)) + + world2cam = np.linalg.inv(entry_cam@convert) + pose = matrix2angle(world2cam[:3,:3]) + angle = [p * 180 / np.pi for p in pose] + + return angle + + + +class Dataset(torch.utils.data.Dataset): + def __init__(self, + name, # Name of the dataset. + raw_shape, # Shape of the raw image data (NCHW). + max_size = None, # Artificially limit the size of the dataset. None = no limit. Applied before xflip. + use_labels = False, # Enable conditioning labels? False = label dimension is zero. + xflip = False, # Artificially double the size of the dataset via x-flips. Applied after max_size. + random_seed = 0, # Random seed to use when applying max_size. + rebal_raw_idx = None, # Rebalance the dataset by sampling from the raw_idx list + data_rebalance=False, # Rebalance the dataset by sampling from the raw_idx list + ): + self._name = name + self._raw_shape = list(raw_shape) + self._use_labels = use_labels + self._raw_labels = None + self._raw_poses = None + self._label_shape = None + self._pose_shape = None + + + if data_rebalance: + raise NotImplementedError + assert rebal_raw_idx is not None, "rebal_raw_idx must be provided if data_rebalance is True" + self._raw_idx = rebal_raw_idx + else: + self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64) + + + self._raw_idx = self._filter_samples() + + # Apply max_size. + if (max_size is not None) and (self._raw_idx.size > max_size): + raise NotImplementedError + np.random.RandomState(random_seed).shuffle(self._raw_idx) + self._raw_idx = np.sort(self._raw_idx[:max_size]) + + # Apply xflip. + self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8) + if xflip: + self._raw_idx = np.tile(self._raw_idx, 2) + self._xflip = np.concatenate([self._xflip, np.ones_like(self._xflip)]) + + def _filter_samples(self): # to be overridden by subclass + raise NotImplementedError + + + def _get_raw_labels(self): + if self._raw_labels is None: + self._raw_labels,self._raw_poses = self._load_raw_labels() if self._use_labels else None + + if self._raw_labels is None: + raise Exception("_raw_labels is None") + self._raw_labels = np.zeros([self._raw_shape[0], 0], dtype=np.float32) + + assert isinstance(self._raw_labels, np.ndarray) + assert self._raw_labels.shape[0] == self._raw_shape[0] + assert self._raw_labels.dtype in [np.float32, np.int64] + if self._raw_labels.dtype == np.int64: + assert self._raw_labels.ndim == 1 + assert np.all(self._raw_labels >= 0) + self._raw_labels_std = self._raw_labels.std(0) + + + if self._raw_poses is None: + raise Exception("_raw_poses is None") + self._raw_poses = np.zeros([self._raw_poses[0], 0], dtype=np.float32) + + assert isinstance(self._raw_poses, np.ndarray) + assert self._raw_poses.shape[0] == self._raw_shape[0] + assert self._raw_poses.dtype in [np.float32, np.int64] + if self._raw_poses.dtype == np.int64: + assert self._raw_poses.ndim == 1 + assert np.all(self._raw_poses >= 0) + self._raw_poses_std = self._raw_poses.std(0) + + return self._raw_labels + + def _get_raw_poses(self): + if self._raw_poses is None: + _ = self._get_raw_labels() + #raise Exception("please run _get_raw_labels first") + + return self._raw_poses + + + def close(self): # to be overridden by subclass + pass + + def _load_raw_image(self, raw_idx): # to be overridden by subclass + raise NotImplementedError + + def _load_raw_labels(self): # to be overridden by subclass + raise NotImplementedError + + + def __getstate__(self): + return dict(self.__dict__, _raw_labels=None, _raw_poses=None) + + def __del__(self): + try: + self.close() + except: + pass + + def __len__(self): + return self._raw_idx.size + + + + + def __getitem__(self, idx): + + + label = self.get_label(idx) + pose = self.get_coarse_pose(idx) + + # image = self._load_raw_image(self._raw_idx[idx]) + # assert isinstance(image, np.ndarray) + # assert list(image.shape) == self.image_shape + # assert image.dtype == np.uint8 + # if self._xflip[idx]: + # assert image.ndim == 3 # CHW + # image = image[:, :, ::-1] + # # # flip label + # # label = self.flip_yaw(label) + # # # flip pose + # # pose[[1, 2, 4, 5]] *= -1 + + image = self.get_image(idx) + + + return image, label,pose + + def flip_yaw(self, c): + pose_matrix = c.copy() + flipped = pose_matrix[:16].reshape(4,4) + flipped[0, 1] *= -1 + flipped[0, 2] *= -1 + flipped[1, 0] *= -1 + flipped[2, 0] *= -1 + flipped[0, 3] *= -1 + + flipped = flipped.reshape(16) + pose_matrix[:16] = flipped + + return pose_matrix + + def get_image(self, idx): + image = self._load_raw_image(self._raw_idx[idx]) + assert isinstance(image, np.ndarray) + assert list(image.shape) == self.image_shape + assert image.dtype == np.uint8 + if self._xflip[idx]: + assert image.ndim == 3 # CHW + image = image[:, :, ::-1] + + return image.copy() + + + def get_label(self, idx): + label = self._get_raw_labels()[self._raw_idx[idx]].copy() + if label.dtype == np.int64: + onehot = np.zeros(self.label_shape, dtype=np.float32) + onehot[label] = 1 + label = onehot + + if self._xflip[idx]: + assert label.shape == (25,) + label[[1, 2, 3, 4, 8]] *= -1 + + return label + + def get_coarse_pose(self, idx): + pose = self._get_raw_poses()[self._raw_idx[idx]].copy() + if pose.dtype == np.int64: + raise TypeError("pose should be float32") + onehot = np.zeros(self.pose_shape, dtype=np.float32) + onehot[pose] = 1 + pose = onehot + + if self._xflip[idx]: + pose_flip = pose.copy() + pose_flip[[1, 2, 4, 5]] *= -1 + + return pose_flip + + else: + return pose + + + + def get_details(self, idx): + d = dnnlib.EasyDict() + d.raw_idx = int(self._raw_idx[idx]) + d.xflip = (int(self._xflip[idx]) != 0) + d.raw_label = self._get_raw_labels()[d.raw_idx].copy() + # d.pose = self.get_coarse_pose(idx).copy() + + return d + + def get_label_std(self): + return self._raw_labels_std + + @property + def name(self): + return self._name + + @property + def image_shape(self): + return list(self._raw_shape[1:]) + + @property + def num_channels(self): + assert len(self.image_shape) == 3 # CHW + return self.image_shape[0] + + @property + def resolution(self): + assert len(self.image_shape) == 3 # CHW + assert self.image_shape[1] == self.image_shape[2] + return self.image_shape[1] + + @property + def label_shape(self): + if self._label_shape is None: + raw_labels = self._get_raw_labels() + if raw_labels.dtype == np.int64: + self._label_shape = [int(np.max(raw_labels)) + 1] + else: + self._label_shape = raw_labels.shape[1:] + return list(self._label_shape) + + @property + def pose_shape(self): + if self._pose_shape is None: + self._get_raw_labels() + if self._raw_poses.dtype == np.int64: + self._pose_shape = [int(np.max(self._raw_poses)) + 1] + else: + self._pose_shape = self._raw_poses.shape[1:] + return list(self._pose_shape) + + + @property + def label_dim(self): + assert len(self.label_shape) == 1 + return self.label_shape[0] + + @property + def has_labels(self): + return any(x != 0 for x in self.label_shape) + + @property + def has_onehot_labels(self): + return self._get_raw_labels().dtype == np.int64 + +#---------------------------------------------------------------------------- + +class ImageFolderDataset(Dataset): + def __init__(self, + path, # Path to directory or zip. + back_repeat = None, + resolution = None, # Ensure specific resolution, None = highest available. + data_rebalance_idx_file = None, + **super_kwargs, # Additional arguments for the Dataset base class. + ): + self.min_yaw = 0 + self.max_yaw = 180 + self.max_pitch = 90 + self.back_repeat = 1 if back_repeat is None else back_repeat + self._path = path + self._zipfile = None + + if os.path.isdir(self._path): + raise NotImplementedError('Does not support directories yet') + self._type = 'dir' + self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files} + elif self._file_ext(self._path) == '.zip': + self._type = 'zip' + self._all_fnames = set(self._get_zipfile().namelist()) + else: + raise IOError('Path must point to a directory or zip') + + if data_rebalance_idx_file is not None: + raise NotImplementedError('data_rebalance is not implemented yet') + rebal_idx_list_path =data_rebalance_idx_file + #print('load rebal_idx_list from ',rebal_idx_list_path) + with open(rebal_idx_list_path, 'r') as f: + rebal_raw_idx = json.load(f) + rebal_raw_idx = np.array(rebal_raw_idx) + else: + rebal_raw_idx = None + + + PIL.Image.init() + self._image_fnames = sorted(fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION) + if len(self._image_fnames) == 0: + raise IOError('No image files found in the specified path') + + name = os.path.splitext(os.path.basename(self._path))[0] + raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape) + if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution): + raise IOError('Image files do not match the specified resolution') + super().__init__(name=name, raw_shape=raw_shape, rebal_raw_idx = rebal_raw_idx,**super_kwargs) + + + def _filter_samples(self): + if self.back_repeat>1: + raw_labels = self._get_raw_labels()[self._raw_idx] + label_list = [] + for entry in raw_labels: + label_list.append(get_poseangle(entry)) + poses = np.array(label_list) + # find [min_yaw, max_yaw] boolean + valid = (np.abs(poses[:,0])>=self.min_yaw) & (np.abs(poses[:,0])<=self.max_yaw) & (np.abs(poses[:,1])<=self.max_pitch) + # find back boolean: [max(90, self.min_yaw), max_yaw] + back_valid = (np.abs(poses[:,0])>= max(90, self.min_yaw)) & (np.abs(poses[:,0])<=self.max_yaw) & (np.abs(poses[:,1])<=self.max_pitch) + if not np.all(valid): + print(f"filtering samples by pose: ratio = {valid.sum()}/{len(self._raw_idx)}") + # boolean to index + valid_idx = self._raw_idx[valid] + back_idx = self._raw_idx[back_valid] + front_idx = np.array(list(set(valid_idx) - set(back_idx))) + + front_num = valid.sum()-len(back_idx) + front_back_ratio_min = front_num/2/len(back_idx) + print(f"if back num be the half of front num, at least repeat ({int(front_back_ratio_min)}) times.") + back_repeat = max(int(front_num/2/len(back_idx)), self.back_repeat) + + + + + # TODO: support the repeat times < 1 + # repeat [max(90, self.min_yaw), max_yaw] for multiple times + back_repeat_idx = np.tile(back_idx, back_repeat) + # merge front index and repeated back index + new_idx = np.concatenate((front_idx, back_repeat_idx)) + print(f"Repeat {len(back_idx)} back images till abs({self.max_yaw}) degree {back_repeat} times") + return new_idx + else: + return self._raw_idx + @staticmethod + def _file_ext(fname): + return os.path.splitext(fname)[1].lower() + + def _get_zipfile(self): + assert self._type == 'zip' + if self._zipfile is None: + self._zipfile = zipfile.ZipFile(self._path) + return self._zipfile + + def _open_file(self, fname): + if self._type == 'dir': + return open(os.path.join(self._path, fname), 'rb') + if self._type == 'zip': + return self._get_zipfile().open(fname, 'r') + return None + + def close(self): + try: + if self._zipfile is not None: + self._zipfile.close() + finally: + self._zipfile = None + + def __getstate__(self): + return dict(super().__getstate__(), _zipfile=None) + + def _load_raw_image(self, raw_idx): + fname = self._image_fnames[raw_idx] + with self._open_file(fname) as f: + if pyspng is not None and self._file_ext(fname) == '.png': + image = pyspng.load(f.read()) + else: + image = np.array(PIL.Image.open(f)) + if image.ndim == 2: + image = image[:, :, np.newaxis] # HW => HWC + image = image.transpose(2, 0, 1) # HWC => CHW + return image + + def _load_raw_labels(self): + fname = 'dataset.json' + if fname not in self._all_fnames: + return None + with self._open_file(fname) as f: + labels = json.load(f)['labels'] + if labels is None: + return None + labels = dict(labels) + labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames] + labels = np.array(labels) + labels = np.squeeze(labels) + #print('labels shape', labels.shape) # N, 31 + labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim]) + + poses = labels[:,25:] + labels = labels[:,:25] + + # print('labels shape', labels.shape) # N, 25 + # print('poses shape', poses.shape) # N, 6 + + return labels, poses + + +#---------------------------------------------------------------------------- + + +class MaskLabeledDataset(ImageFolderDataset): + + def __init__(self, + img_path, # Path to directory or zip. + seg_path, # Path to directory or zip. + back_repeat = None, + **super_kwargs, # Additional arguments for the Dataset base class. + ): + self.min_yaw = 0 + self.max_yaw = 180 + self.max_pitch = 90 + self.back_repeat = 1 if back_repeat is None else back_repeat + super().__init__(path=img_path, back_repeat = None,**super_kwargs) + + self._seg_dataset = ImageFolderDataset(seg_path, **super_kwargs) + + # Build the mapping from seg fname to seg raw index + seg_dict = {os.path.splitext(fname)[0]: idx for idx, fname in enumerate(self._seg_dataset._image_fnames)} + + # Build the mapping from index to seg raw index + self._seg_raw_idx = [] + for raw_idx in self._raw_idx: + fname = self._image_fnames[raw_idx] + key = os.path.splitext(fname)[0] + self._seg_raw_idx.append(seg_dict[key]) + self._seg_raw_idx = np.array(self._seg_raw_idx) + + def _filter_samples(self): + if self.back_repeat>1: + raw_labels = self._get_raw_labels()[self._raw_idx] + label_list = [] + for entry in raw_labels: + label_list.append(get_poseangle(entry)) + poses = np.array(label_list) + # find [min_yaw, max_yaw] boolean + valid = (np.abs(poses[:,0])>=self.min_yaw) & (np.abs(poses[:,0])<=self.max_yaw) & (np.abs(poses[:,1])<=self.max_pitch) + # find back boolean: [max(90, self.min_yaw), max_yaw] + back_valid = (np.abs(poses[:,0])>= max(90, self.min_yaw)) & (np.abs(poses[:,0])<=self.max_yaw) & (np.abs(poses[:,1])<=self.max_pitch) + if not np.all(valid): + print(f"filtering samples by pose: ratio = {valid.sum()}/{len(self._raw_idx)}") + # boolean to index + valid_idx = self._raw_idx[valid] + back_idx = self._raw_idx[back_valid] + front_idx = np.array(list(set(valid_idx) - set(back_idx))) + + front_num = valid.sum()-len(back_idx) + front_back_ratio_min = front_num/2/len(back_idx) + print(f"if back num be the half of front num, at least repeat ({int(front_back_ratio_min)}) times.") + back_repeat = max(int(front_num/2/len(back_idx)), self.back_repeat) + + + + + # TODO: support the repeat times < 1 + # repeat [max(90, self.min_yaw), max_yaw] for multiple times + back_repeat_idx = np.tile(back_idx, back_repeat) + # merge front index and repeated back index + new_idx = np.concatenate((front_idx, back_repeat_idx)) + print(f"Repeat {len(back_idx)} back images till abs({self.max_yaw}) degree {back_repeat} times") + return new_idx + else: + return self._raw_idx + + + + def __getitem__(self, idx): + # already flipped in the ImageFolderDataset + image = self.get_image(idx) + mask = self._seg_dataset.get_image(idx) + label = self.get_label(idx) + pose = self.get_coarse_pose(idx) + + + return image.copy(), mask.copy(), label,pose + diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/dual_discriminator.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/dual_discriminator.py new file mode 100644 index 0000000..403c2f6 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/dual_discriminator.py @@ -0,0 +1,502 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Discriminator architectures from the paper +"Efficient Geometry-aware 3D Generative Adversarial Networks".""" + +import numpy as np +import torch +from torch_utils import persistence +from torch_utils.ops import upfirdn2d +from training.networks_stylegan2 import DiscriminatorBlock, MappingNetwork, DiscriminatorEpilogue + + +@persistence.persistent_class +class SingleDiscriminator(torch.nn.Module): + def __init__(self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + architecture='resnet', # Architecture: 'orig', 'skip', 'resnet'. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=4, # Use FP16 for the N highest resolutions. + conv_clamp=256, # Clamp the output of convolution layers to +-X, None = disable clamping. + cmap_dim=None, # Dimensionality of mapped conditioning label, None = default. + sr_upsample_factor=1, # Ignored for SingleDiscriminator + block_kwargs={}, # Arguments for DiscriminatorBlock. + mapping_kwargs={}, # Arguments for MappingNetwork. + epilogue_kwargs={}, # Arguments for DiscriminatorEpilogue. + ): + super().__init__() + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + if cmap_dim is None: + cmap_dim = channels_dict[4] + if c_dim == 0: + cmap_dim = 0 + + common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) + cur_layer_idx = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res] if res < img_resolution else 0 + tmp_channels = channels_dict[res] + out_channels = channels_dict[res // 2] + use_fp16 = (res >= fp16_resolution) + block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, + first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, + **common_kwargs) + setattr(self, f'b{res}', block) + cur_layer_idx += block.num_layers + if c_dim > 0: + self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, + **mapping_kwargs) + self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, + **common_kwargs) + + def forward(self, img, c, update_emas=False, **block_kwargs): + img = img['image'] + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + cmap = None + if self.c_dim > 0: + cmap = self.mapping(None, c) + x = self.b4(x, img, cmap) + return x + + def extra_repr(self): + return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' + + +# ---------------------------------------------------------------------------- + +def filtered_resizing(image_orig_tensor, size, f, filter_mode='antialiased'): + if filter_mode == 'antialiased': + ada_filtered_64 = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', + align_corners=False, antialias=True) + elif filter_mode == 'classic': + ada_filtered_64 = upfirdn2d.upsample2d(image_orig_tensor, f, up=2) + ada_filtered_64 = torch.nn.functional.interpolate(ada_filtered_64, size=(size * 2 + 2, size * 2 + 2), + mode='bilinear', align_corners=False) + ada_filtered_64 = upfirdn2d.downsample2d(ada_filtered_64, f, down=2, flip_filter=True, padding=-1) + elif filter_mode == 'none': + ada_filtered_64 = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', + align_corners=False) + elif type(filter_mode) == float: + assert 0 < filter_mode < 1 + + filtered = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', + align_corners=False, antialias=True) + aliased = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', + align_corners=False, antialias=False) + ada_filtered_64 = (1 - filter_mode) * aliased + (filter_mode) * filtered + + return ada_filtered_64 + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class DualDiscriminator(torch.nn.Module): + def __init__(self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + architecture='resnet', # Architecture: 'orig', 'skip', 'resnet'. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=4, # Use FP16 for the N highest resolutions. + conv_clamp=256, # Clamp the output of convolution layers to +-X, None = disable clamping. + cmap_dim=None, # Dimensionality of mapped conditioning label, None = default. + disc_c_noise=0, # Corrupt camera parameters with X std dev of noise before disc. pose conditioning. + block_kwargs={}, # Arguments for DiscriminatorBlock. + mapping_kwargs={}, # Arguments for MappingNetwork. + epilogue_kwargs={}, # Arguments for DiscriminatorEpilogue. + ): + super().__init__() + img_channels *= 2 + + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + if cmap_dim is None: + cmap_dim = channels_dict[4] + if c_dim == 0: + cmap_dim = 0 + + common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) + cur_layer_idx = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res] if res < img_resolution else 0 + tmp_channels = channels_dict[res] + out_channels = channels_dict[res // 2] + use_fp16 = (res >= fp16_resolution) + block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, + first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, + **common_kwargs) + setattr(self, f'b{res}', block) + cur_layer_idx += block.num_layers + if c_dim > 0: + self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, + **mapping_kwargs) + self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, + **common_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1, 3, 3, 1])) + self.disc_c_noise = disc_c_noise + + def forward(self, img, c, update_emas=False, **block_kwargs): + image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], f=self.resample_filter) + img = torch.cat([img['image'], image_raw], 1) + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + cmap = None + if self.c_dim > 0: + if self.disc_c_noise > 0: c += torch.randn_like(c) * c.std(0) * self.disc_c_noise + cmap = self.mapping(None, c) + x = self.b4(x, img, cmap) + return x + + def extra_repr(self): + return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class DummyDualDiscriminator(torch.nn.Module): + def __init__(self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + architecture='resnet', # Architecture: 'orig', 'skip', 'resnet'. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=4, # Use FP16 for the N highest resolutions. + conv_clamp=256, # Clamp the output of convolution layers to +-X, None = disable clamping. + cmap_dim=None, # Dimensionality of mapped conditioning label, None = default. + block_kwargs={}, # Arguments for DiscriminatorBlock. + mapping_kwargs={}, # Arguments for MappingNetwork. + epilogue_kwargs={}, # Arguments for DiscriminatorEpilogue. + ): + super().__init__() + img_channels *= 2 + + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + if cmap_dim is None: + cmap_dim = channels_dict[4] + if c_dim == 0: + cmap_dim = 0 + + common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) + cur_layer_idx = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res] if res < img_resolution else 0 + tmp_channels = channels_dict[res] + out_channels = channels_dict[res // 2] + use_fp16 = (res >= fp16_resolution) + block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, + first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, + **common_kwargs) + setattr(self, f'b{res}', block) + cur_layer_idx += block.num_layers + if c_dim > 0: + self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, + **mapping_kwargs) + self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, + **common_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1, 3, 3, 1])) + + self.raw_fade = 1 + + def forward(self, img, c, update_emas=False, **block_kwargs): + self.raw_fade = max(0, self.raw_fade - 1 / (500000 / 32)) + + image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], + f=self.resample_filter) * self.raw_fade + img = torch.cat([img['image'], image_raw], 1) + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + cmap = None + if self.c_dim > 0: + cmap = self.mapping(None, c) + x = self.b4(x, img, cmap) + return x + + def extra_repr(self): + return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' + + +# ---------------------------------------------------------------------------- +from training.networks_stylegan2 import FullyConnectedLayer + + +class PoseShapeAwareDualDiscriminator(torch.nn.Module): + def __init__(self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + seg_channels, # Number of input color channels. + architecture='resnet', # Architecture: 'orig', 'skip', 'resnet'. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=4, # Use FP16 for the N highest resolutions. + conv_clamp=256, # Clamp the output of convolution layers to +-X, None = disable clamping. + cmap_dim=None, # Dimensionality of mapped conditioning label, None = default. + disc_c_noise=0, # Corrupt camera parameters with X std dev of noise before disc. pose conditioning. + explicitly_symmetry=False, + block_kwargs={}, # Arguments for DiscriminatorBlock. + mapping_kwargs={}, # Arguments for MappingNetwork. + epilogue_kwargs={}, # Arguments for DiscriminatorEpilogue. + ): + super().__init__() + img_channels = img_channels * 2 + seg_channels + self.camera_param_dim = c_dim + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + self.pose_branch = DPoseBranch(num_betas=10, in_channel=channels_dict[4]*4*4) + self.c_dim += self.pose_branch.output_dim + + if cmap_dim is None: + cmap_dim = channels_dict[4] + if self.c_dim == 0: + cmap_dim = 0 + + common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) + cur_layer_idx = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res] if res < img_resolution else 0 + tmp_channels = channels_dict[res] + out_channels = channels_dict[res // 2] + use_fp16 = (res >= fp16_resolution) + block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, + first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, + **common_kwargs) + setattr(self, f'b{res}', block) + cur_layer_idx += block.num_layers + if self.c_dim > 0: + self.mapping = MappingNetwork(z_dim=0, c_dim=self.c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, + **mapping_kwargs) + self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, + **common_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1, 3, 3, 1])) + self.disc_c_noise = disc_c_noise + + self.explicitly_symmetry = explicitly_symmetry + + def flip_yaw(self, matrix): + flipped_matrix = matrix.clone() + flipped = flipped_matrix[:, :16].reshape(-1, 4, 4) + flipped[:, 0, 1] *= -1 + flipped[:, 0, 2] *= -1 + flipped[:, 1, 0] *= -1 + flipped[:, 2, 0] *= -1 + flipped[:, 0, 3] *= -1 + + flipped = flipped.reshape(-1, 16) + flipped_matrix[:, :16] = flipped.clone() + + return flipped_matrix + + def predict_pose(self, img, c,update_emas=False, **block_kwargs): + + + if self.explicitly_symmetry: + theta = torch.atan2(c[:, [11]], c[:, [3]]) # math.atan2(z, x) + is_left = (theta >= -np.pi / 2) & (theta <= np.pi / 2) + + img_flip = torch.flip(img['image'], dims=[3]) + img_flip_raw = torch.flip(img['image_raw'], dims=[3]) + seg_flip = torch.flip(img['image_mask'], dims=[3]) + + is_left_img = is_left.unsqueeze(2).unsqueeze(3) + input_img = torch.where(is_left_img, img_flip, img['image']) # if left, flip image + misc.assert_shape(input_img, img_flip.shape ) + + is_left_img_raw = is_left.unsqueeze(2).unsqueeze(3) + input_img_raw = torch.where(is_left_img_raw, img_flip_raw, img['image_raw']) # if left, flip image_raw + misc.assert_shape(input_img_raw, img_flip_raw.shape ) + + is_left_seg = is_left.unsqueeze(2).unsqueeze(3) + input_seg = torch.where(is_left_seg, seg_flip, img['image_mask']) # if left, flip seg + misc.assert_shape(input_seg, seg_flip.shape ) + + img = {'image': input_img, 'image_raw': input_img_raw, 'image_mask': input_seg} + + image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], f=self.resample_filter) + seg = filtered_resizing(img['image_mask'], size=img['image'].shape[-1], f=self.resample_filter) + seg = 2 * seg - 1 # normalize to [-1,1] + img = torch.cat([img['image'], image_raw, seg], 1) + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + + pose_branch_input_feature = self.b4.get_flatten_x(x, img) + pose_params = self.pose_branch(pose_branch_input_feature, c) + + flip_pose_params = pose_params.clone() + flip_pose_params[:, [1, 2, 4, 5]] *= -1 # flip y and z axis angles + + pose_params = torch.where(is_left, flip_pose_params, pose_params) + + + else: + raise NotImplementedError + image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], f=self.resample_filter) + seg = filtered_resizing(img['image_mask'], size=img['image'].shape[-1], f=self.resample_filter) + seg = 2 * seg - 1 # normalize to [-1,1] + img = torch.cat([img['image'], image_raw, seg], 1) + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + + pose_branch_input_feature = self.b4.get_flatten_x(x, img) + pose_params = self.pose_branch(pose_branch_input_feature, c) + + + return pose_params,pose_branch_input_feature + + def forward(self, img, c, gt_pose = None, update_emas=False, **block_kwargs): + + if self.explicitly_symmetry: + + pose_params,_ = self.predict_pose(img, c, update_emas, **block_kwargs) + + image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], f=self.resample_filter) + seg = filtered_resizing(img['image_mask'], size=img['image'].shape[-1], f=self.resample_filter) + seg = 2 * seg - 1 # normalize to [-1,1] + img = torch.cat([img['image'], image_raw, seg], 1) + + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + + pose_branch_input_feature = self.b4.get_flatten_x(x, img) + + else: + raise NotImplementedError + pose_params, pose_branch_input_feature = self.predict_pose(img, c, update_emas, **block_kwargs) + + if gt_pose is not None: + #raise NotImplementedError + c = torch.cat([c, gt_pose], dim=1) + else: + pose_label = pose_params.detach() # detach + c = torch.cat([c, pose_label], dim=1) + + cmap = None + if self.c_dim > 0: + if self.disc_c_noise > 0: c += torch.randn_like(c) * c.std(0) * self.disc_c_noise + cmap = self.mapping(None, c) + # x = self.b4(x, img, cmap) + x = self.b4(flatten_x=pose_branch_input_feature, cmap=cmap) + return x, pose_params + + def extra_repr(self): + return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' + + +from torch_utils import misc + + +class DPoseBranch(torch.nn.Module): + def __init__(self, num_betas, in_channel): + super().__init__() + self.num_betas = num_betas + hidden_dim = 64 + self.in_channel = in_channel + # + # predict_betas = predict_transl = predict_scale = False + # predict_pose = True + + out_dim = 6 + + # if predict_betas: + # out_dim += num_betas + # if predict_transl: + # out_dim += 3 + # if predict_scale: + # out_dim += 1 + # if predict_pose: + # out_dim += 6 + + self.in_channel += 25 # c dim + + self.output_dim = out_dim + self.net = torch.nn.Sequential( + # linear + # FullyConnectedLayer(self.in_channel, hidden_dim), + # torch.nn.LeakyReLU(0.2), + # FullyConnectedLayer(hidden_dim, self.output_dim) # betas, scale, transl, rots of neck and head + FullyConnectedLayer(self.in_channel, 2048, activation='lrelu'), + FullyConnectedLayer(2048, 512, activation='lrelu'), + FullyConnectedLayer(512, 128, activation='lrelu'), + FullyConnectedLayer(128, 32, activation='lrelu'), + FullyConnectedLayer(32, self.output_dim) + ) + + + def forward(self, feature, camera_parameters): + # misc.assert_shape(feature, [None, self.in_channel]) + # misc.assert_shape(camera_parameters, [None, 25]) + feature = torch.cat([feature, camera_parameters], dim=1) + + pose = self.net(feature) # (B, num_betas + 1 + 3 + 6) + + return pose \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/loss.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/loss.py new file mode 100644 index 0000000..9a9cfaa --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/loss.py @@ -0,0 +1,562 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Loss functions.""" + +import numpy as np +import torch +from torch_utils import training_stats +from torch_utils.ops import conv2d_gradfix +from torch_utils.ops import upfirdn2d +from training.dual_discriminator import filtered_resizing +from torch_utils import misc +import copy + + +# ---------------------------------------------------------------------------- + +class Loss: + def accumulate_gradients(self, phase, real_img, real_seg, real_c, real_pose, gen_z, gen_c, gen_pose,gain, cur_nimg, + cur_nimg_start): # to be overridden by subclass + raise NotImplementedError() + + +# ---------------------------------------------------------------------------- + +class StyleGAN2Loss(Loss): + def __init__(self, device, G, D, augment_pipe=None, r1_gamma=10, r1_gamma_seg=1000,style_mixing_prob=0, pl_weight=0, + density_noise_fade_kimg=0, + pl_batch_shrink=2, pl_decay=0.01, + pl_no_weight_grad=False, blur_init_sigma=0, blur_fade_kimg=0, r1_gamma_init=0, r1_gamma_fade_kimg=0, + neural_rendering_resolution_initial=64, neural_rendering_resolution_final=None, + neural_rendering_resolution_fade_kimg=0, + gpc_reg_fade_kimg=1000, gpc_reg_prob=None, dual_discrimination=False, filter_mode='antialiased', + thickness=None, + pose_loss_weight = None, input_pose_params_reg_loss_weight = None,input_pose_params_reg_loss_kimg = None, + rank=None,bcg_reg_prob=0 + ): + super().__init__() + self.device = device + self.G = G + self.D = D + self.augment_pipe = augment_pipe + self.r1_gamma = r1_gamma + self.r1_gamma_seg = r1_gamma_seg + self.style_mixing_prob = style_mixing_prob + self.pl_weight = pl_weight + self.pl_batch_shrink = pl_batch_shrink + self.pl_decay = pl_decay + self.pl_no_weight_grad = pl_no_weight_grad + self.pl_mean = torch.zeros([], device=device) + self.blur_init_sigma = blur_init_sigma + self.blur_fade_kimg = blur_fade_kimg + self.r1_gamma_init = r1_gamma_init + self.r1_gamma_fade_kimg = r1_gamma_fade_kimg + self.neural_rendering_resolution_initial = neural_rendering_resolution_initial + self.neural_rendering_resolution_final = neural_rendering_resolution_final + self.neural_rendering_resolution_fade_kimg = neural_rendering_resolution_fade_kimg + self.density_noise_fade_kimg = density_noise_fade_kimg + self.gpc_reg_fade_kimg = gpc_reg_fade_kimg + self.gpc_reg_prob = gpc_reg_prob + self.dual_discrimination = dual_discrimination + self.filter_mode = filter_mode + self.resample_filter = upfirdn2d.setup_filter([1, 3, 3, 1], device=device) + self.blur_raw_target = True + self.bcg_reg_prob = bcg_reg_prob + assert self.gpc_reg_prob is None or (0 <= self.gpc_reg_prob <= 1) + + + self.thickness = thickness + self.pose_loss_weight = pose_loss_weight + self.input_pose_params_reg_loss_weight = input_pose_params_reg_loss_weight + self.input_pose_params_reg_loss_kimg = input_pose_params_reg_loss_kimg + + + # for snap + self.swapping_prob = None + self.neural_rendering_resolution = None + self.blur_sigma = None + + + self.rank = rank + + def run_G(self, z, c, pose_params, swapping_prob, neural_rendering_resolution, update_emas=False): + if swapping_prob is not None: + c_swapped = torch.roll(c.clone(), 1, 0) + p_swapped = torch.roll(pose_params.clone(), 1, 0) + rand_ = torch.rand((c.shape[0], 1), device=c.device) + c_gen_conditioning = torch.where(rand_ < swapping_prob, c_swapped, c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, pose_params) + else: + c_gen_conditioning = torch.zeros_like(c) + pose_params_conditioning = torch.zeros([c.shape[0],6]).to(c.device) + + ws = self.G.mapping(z, c_gen_conditioning, pose_params_conditioning,update_emas=update_emas) + if self.style_mixing_prob > 0: + with torch.autograd.profiler.record_function('style_mixing'): + cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1]) + cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, + torch.full_like(cutoff, ws.shape[1])) + ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c,pose_params, update_emas=False)[:, cutoff:] + + if self.bcg_reg_prob > 0: + ws_swapped = torch.roll(ws.clone(), 1, 0) + ws_bcg = torch.where(torch.rand((ws.shape[0], 1, 1), device=ws.device) < self.bcg_reg_prob, ws_swapped, ws) + else: + ws_bcg = None + + + gen_output = self.G.synthesis(ws, c, neural_rendering_resolution=neural_rendering_resolution, + update_emas=update_emas, + apply_def=True, pose_params=pose_params,ws_bcg = ws_bcg + ) + return gen_output, ws + + + + def run_D(self, img, c, gt_pose=None, blur_sigma=0, blur_sigma_raw=0, update_emas=False): + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + with torch.autograd.profiler.record_function('blur'): + f = torch.arange(-blur_size, blur_size + 1, device=img['image'].device).div( + blur_sigma).square().neg().exp2() + img['image'] = upfirdn2d.filter2d(img['image'], f / f.sum()) + + if self.augment_pipe is not None: + raise NotImplementedError + augmented_pair = self.augment_pipe(torch.cat([img['image'], + torch.nn.functional.interpolate(img['image_raw'], + size=img['image'].shape[2:], + mode='bilinear', + antialias=True)], + dim=1)) + img['image'] = augmented_pair[:, :img['image'].shape[1]] + img['image_raw'] = torch.nn.functional.interpolate(augmented_pair[:, img['image'].shape[1]:], + size=img['image_raw'].shape[2:], mode='bilinear', + antialias=True) + + logits, pose = self.D(img, c, gt_pose=gt_pose, update_emas=update_emas) + return logits, pose + + def run_D_pose_prediction(self, img, c, blur_sigma=0): + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + with torch.autograd.profiler.record_function('blur'): + f = torch.arange(-blur_size, blur_size + 1, device=img['image'].device).div( + blur_sigma).square().neg().exp2() + img['image'] = upfirdn2d.filter2d(img['image'], f / f.sum()) + + if self.augment_pipe is not None: + augmented_pair = self.augment_pipe(torch.cat([img['image'], + torch.nn.functional.interpolate(img['image_raw'], + size=img['image'].shape[2:], + mode='bilinear', + antialias=True)], + dim=1)) + img['image'] = augmented_pair[:, :img['image'].shape[1]] + img['image_raw'] = torch.nn.functional.interpolate(augmented_pair[:, img['image'].shape[1]:], + size=img['image_raw'].shape[2:], mode='bilinear', + antialias=True) + + pose, _ = self.D.predict_pose(img, c) + return pose + + def get_pose_params_D(self, real_img, real_seg, real_c, cur_nimg): + blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), + 0) * self.blur_init_sigma if self.blur_fade_kimg > 0 else 0 + r1_gamma = self.r1_gamma + + alpha = min(cur_nimg / (self.gpc_reg_fade_kimg * 1e3), 1) if self.gpc_reg_fade_kimg > 0 else 1 + swapping_prob = (1 - alpha) * 1 + alpha * self.gpc_reg_prob if self.gpc_reg_prob is not None else None + + if not isinstance(real_img,dict): + if self.neural_rendering_resolution_final is not None: + alpha = min(cur_nimg / (self.neural_rendering_resolution_fade_kimg * 1e3), 1) + neural_rendering_resolution = int(np.rint(self.neural_rendering_resolution_initial * ( + 1 - alpha) + self.neural_rendering_resolution_final * alpha)) + else: + neural_rendering_resolution = self.neural_rendering_resolution_initial + real_img_raw = filtered_resizing(real_img, size=neural_rendering_resolution, f=self.resample_filter, + filter_mode=self.filter_mode) + real_seg_raw = filtered_resizing(real_seg, size=neural_rendering_resolution, f=self.resample_filter, + filter_mode=self.filter_mode) + if self.blur_raw_target: + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + f = torch.arange(-blur_size, blur_size + 1, device=real_img_raw.device).div( + blur_sigma).square().neg().exp2() + real_img_raw = upfirdn2d.filter2d(real_img_raw, f / f.sum()) + + real_img = {'image': real_img, 'image_raw': real_img_raw, 'image_mask': real_seg_raw} + + else: + assert 'image_raw' in real_img.keys(), 'image_raw is not in real_img.keys()' + assert 'image' in real_img.keys(), 'image is not in real_img.keys()' + + + # get pose_params from real image + real_img_tmp_image = real_img['image'].detach().requires_grad_(True) + real_img_tmp_image_raw = real_img['image_raw'].detach().requires_grad_(True) + real_img_tmp_image_mask = real_img['image_mask'].detach().requires_grad_(True) + real_img_tmp = {'image': real_img_tmp_image, 'image_raw': real_img_tmp_image_raw, 'image_mask': real_img_tmp_image_mask} + + predicted_real_pose = self.run_D_pose_prediction(real_img_tmp, real_c, blur_sigma=blur_sigma) + return predicted_real_pose + + def get_pose_params_G(self,z,c): + predicted_pose = self.G.get_pose_params(z,c) + return predicted_pose + + def accumulate_gradients(self, phase, real_img, real_seg, real_c, real_pose, + gen_z, gen_c,gen_pose, + gain, cur_nimg, cur_nimg_start): + assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth'] + if self.G.rendering_kwargs.get('density_reg', 0) == 0: + phase = {'Greg': 'none', 'Gboth': 'Gmain'}.get(phase, phase) + if self.r1_gamma == 0: + phase = {'Dreg': 'none', 'Dboth': 'Dmain'}.get(phase, phase) + blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), + 0) * self.blur_init_sigma if self.blur_fade_kimg > 0 else 0 + self.blur_sigma = blur_sigma + r1_gamma = self.r1_gamma + self.G.rendering_kwargs["density_noise"] = max(1 - cur_nimg / (self.density_noise_fade_kimg * 1e3), + 0) if self.density_noise_fade_kimg > 0 else 0 + + alpha = min(cur_nimg / (self.gpc_reg_fade_kimg * 1e3), 1) if self.gpc_reg_fade_kimg > 0 else 1 + swapping_prob = (1 - alpha) * 1 + alpha * self.gpc_reg_prob if self.gpc_reg_prob is not None else None + self.swapping_prob = swapping_prob + + if self.neural_rendering_resolution_final is not None: + alpha = min((cur_nimg-cur_nimg_start) / (self.neural_rendering_resolution_fade_kimg * 1e3), 1) + neural_rendering_resolution = int(np.rint(self.neural_rendering_resolution_initial * ( + 1 - alpha) + self.neural_rendering_resolution_final * alpha)) + else: + neural_rendering_resolution = self.neural_rendering_resolution_initial + + self.neural_rendering_resolution = neural_rendering_resolution + + real_img_raw = filtered_resizing(real_img, size=neural_rendering_resolution, f=self.resample_filter, + filter_mode=self.filter_mode) + real_seg_raw = filtered_resizing(real_seg, size=neural_rendering_resolution, f=self.resample_filter, + filter_mode=self.filter_mode) + + + if self.blur_raw_target: + blur_size = np.floor(blur_sigma * 3) + if blur_size > 0: + f = torch.arange(-blur_size, blur_size + 1, device=real_img_raw.device).div( + blur_sigma).square().neg().exp2() + real_img_raw = upfirdn2d.filter2d(real_img_raw, f / f.sum()) + + real_img = {'image': real_img, 'image_raw': real_img_raw, 'image_mask': real_seg_raw} + + + input_pose_params = self.get_pose_params_G(gen_z,gen_c) + + + for i in range(input_pose_params.shape[1]): + training_stats.report('pose_scale/input_pose_params_{}'.format(i), + (input_pose_params[:, i]).abs().mean() / np.pi * 180) + + + # Gmain: Maximize logits for generated images. + if phase in ['Gmain', 'Gboth']: + with torch.autograd.profiler.record_function('Gmain_forward'): + gen_img, _gen_ws = self.run_G(gen_z, gen_c, input_pose_params, swapping_prob=swapping_prob, + neural_rendering_resolution=neural_rendering_resolution) + + + gen_logits, predict_gen_pose = self.run_D(gen_img, gen_c, gt_pose=None, blur_sigma=blur_sigma) + training_stats.report('Loss/scores/fake_posed', gen_logits) + training_stats.report('Loss/signs/fake_posed', gen_logits.sign()) + loss_Gmain = torch.nn.functional.softplus(-gen_logits) + + # Lpreg + if self.input_pose_params_reg_loss_weight>0 and cur_nimg<(self.input_pose_params_reg_loss_kimg+200) * 1e3: + + if cur_nimg 0 and self.G.rendering_kwargs[ + 'reg_type'] == 'l1': + if swapping_prob is not None: + # c_swapped = torch.roll(gen_c.clone(), 1, 0) + # c_gen_conditioning = torch.where(torch.rand([], device=gen_c.device) < swapping_prob, c_swapped, gen_c) + c_swapped = torch.roll(gen_c.clone(), 1, 0) + p_swapped = torch.roll(input_pose_params.clone(), 1, 0) + rand_ = torch.rand([], device=gen_c.device) + c_gen_conditioning = torch.where( rand_< swapping_prob, c_swapped, gen_c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, input_pose_params) + else: + c_gen_conditioning = torch.zeros_like(gen_c) + pose_params_conditioning = torch.zeros([gen_c.shape[0],6]).to(gen_c.device) + + + ws = self.G.mapping(gen_z, c_gen_conditioning, pose_params_conditioning,update_emas=False) + if self.style_mixing_prob > 0: + with torch.autograd.profiler.record_function('style_mixing'): + cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1]) + cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, + torch.full_like(cutoff, ws.shape[1])) + ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c, input_pose_params,update_emas=False)[:, cutoff:] + initial_coordinates = torch.rand((ws.shape[0], 1000, 3), device=ws.device) * 2 - 1 + perturbed_coordinates = initial_coordinates + torch.randn_like(initial_coordinates) * self.G.rendering_kwargs['density_reg_p_dist'] + all_coordinates = torch.cat([initial_coordinates, perturbed_coordinates], dim=1) + sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(all_coordinates), ws, update_emas=False)[ + 'sigma'] + sigma_initial = sigma[:, :sigma.shape[1] // 2] + sigma_perturbed = sigma[:, sigma.shape[1] // 2:] + + TVloss = torch.nn.functional.l1_loss(sigma_initial, sigma_perturbed) * self.G.rendering_kwargs[ + 'density_reg'] + training_stats.report('Loss/G_reg/TVloss_L1', TVloss) + TVloss.mul(gain).backward() + + # Alternative density regularization + if phase in ['Greg', 'Gboth'] and self.G.rendering_kwargs.get('density_reg', 0) > 0 and self.G.rendering_kwargs[ + 'reg_type'] == 'monotonic-detach': + if swapping_prob is not None: + # c_swapped = torch.roll(gen_c.clone(), 1, 0) + # c_gen_conditioning = torch.where(torch.rand([], device=gen_c.device) < swapping_prob, c_swapped, gen_c) + c_swapped = torch.roll(gen_c.clone(), 1, 0) + p_swapped = torch.roll(input_pose_params.clone(), 1, 0) + rand_ = torch.rand([], device=gen_c.device) + c_gen_conditioning = torch.where( rand_< swapping_prob, c_swapped, gen_c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, input_pose_params) + else: + c_gen_conditioning = torch.zeros_like(gen_c) + pose_params_conditioning = torch.zeros([gen_c.shape[0],6]).to(gen_c.device) + + ws = self.G.mapping(gen_z, c_gen_conditioning, pose_params_conditioning,update_emas=False) + + initial_coordinates = torch.rand((ws.shape[0], 2000, 3), device=ws.device) * 2 - 1 # Front + + perturbed_coordinates = initial_coordinates + torch.tensor([0, 0, -1], device=ws.device) * (1/256) * self.G.rendering_kwargs['box_warp'] # Behind + all_coordinates = torch.cat([initial_coordinates, perturbed_coordinates], dim=1) + sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(all_coordinates), ws, update_emas=False)[ + 'sigma'] + sigma_initial = sigma[:, :sigma.shape[1] // 2] + sigma_perturbed = sigma[:, sigma.shape[1] // 2:] + + monotonic_loss = torch.relu(sigma_initial.detach() - sigma_perturbed).mean() * 10 + monotonic_loss.mul(gain).backward() + + if swapping_prob is not None: + # c_swapped = torch.roll(gen_c.clone(), 1, 0) + # c_gen_conditioning = torch.where(torch.rand([], device=gen_c.device) < swapping_prob, c_swapped, gen_c) + c_swapped = torch.roll(gen_c.clone(), 1, 0) + p_swapped = torch.roll(input_pose_params.clone(), 1, 0) + rand_ = torch.rand([], device=gen_c.device) + c_gen_conditioning = torch.where( rand_< swapping_prob, c_swapped, gen_c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, input_pose_params) + else: + c_gen_conditioning = torch.zeros_like(gen_c) + pose_params_conditioning = torch.zeros([gen_c.shape[0],6]).to(gen_c.device) + + ws = self.G.mapping(gen_z, c_gen_conditioning,pose_params_conditioning, update_emas=False) + if self.style_mixing_prob > 0: + with torch.autograd.profiler.record_function('style_mixing'): + cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1]) + cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, + torch.full_like(cutoff, ws.shape[1])) + ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c, input_pose_params,update_emas=False)[:, cutoff:] + initial_coordinates = torch.rand((ws.shape[0], 1000, 3), device=ws.device) * 2 - 1 + perturbed_coordinates = initial_coordinates + torch.randn_like(initial_coordinates) * (1/256) * self.G.rendering_kwargs['box_warp'] + all_coordinates = torch.cat([initial_coordinates, perturbed_coordinates], dim=1) + sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(all_coordinates), ws, update_emas=False)[ + 'sigma'] + sigma_initial = sigma[:, :sigma.shape[1] // 2] + sigma_perturbed = sigma[:, sigma.shape[1] // 2:] + + TVloss = torch.nn.functional.l1_loss(sigma_initial, sigma_perturbed) * self.G.rendering_kwargs[ + 'density_reg'] + training_stats.report('Loss/G_reg/TVloss_monotonic-detach', TVloss) + TVloss.mul(gain).backward() + + # Alternative density regularization + if phase in ['Greg', 'Gboth'] and self.G.rendering_kwargs.get('density_reg', 0) > 0 and self.G.rendering_kwargs[ + 'reg_type'] == 'monotonic-fixed': + if swapping_prob is not None: + # c_swapped = torch.roll(gen_c.clone(), 1, 0) + # c_gen_conditioning = torch.where(torch.rand([], device=gen_c.device) < swapping_prob, c_swapped, gen_c) + c_swapped = torch.roll(gen_c.clone(), 1, 0) + p_swapped = torch.roll(input_pose_params.clone(), 1, 0) + rand_ = torch.rand([], device=gen_c.device) + c_gen_conditioning = torch.where( rand_< swapping_prob, c_swapped, gen_c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, input_pose_params) + else: + c_gen_conditioning = torch.zeros_like(gen_c) + pose_params_conditioning = torch.zeros([gen_c.shape[0],6]).to(gen_c.device) + + ws = self.G.mapping(gen_z, c_gen_conditioning, pose_params_conditioning,update_emas=False) + + initial_coordinates = torch.rand((ws.shape[0], 2000, 3), device=ws.device) * 2 - 1 # Front + + perturbed_coordinates = initial_coordinates + torch.tensor([0, 0, -1], device=ws.device) * (1/256) * self.G.rendering_kwargs['box_warp'] # Behind + all_coordinates = torch.cat([initial_coordinates, perturbed_coordinates], dim=1) + sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(all_coordinates), ws, update_emas=False)[ + 'sigma'] + sigma_initial = sigma[:, :sigma.shape[1] // 2] + sigma_perturbed = sigma[:, sigma.shape[1] // 2:] + + monotonic_loss = torch.relu(sigma_initial - sigma_perturbed).mean() * 10 + monotonic_loss.mul(gain).backward() + + if swapping_prob is not None: + # c_swapped = torch.roll(gen_c.clone(), 1, 0) + # c_gen_conditioning = torch.where(torch.rand([], device=gen_c.device) < swapping_prob, c_swapped, gen_c) + c_swapped = torch.roll(gen_c.clone(), 1, 0) + p_swapped = torch.roll(input_pose_params.clone(), 1, 0) + rand_ = torch.rand([], device=gen_c.device) + c_gen_conditioning = torch.where( rand_< swapping_prob, c_swapped, gen_c) + pose_params_conditioning = torch.where(rand_ < swapping_prob, p_swapped, input_pose_params) + else: + c_gen_conditioning = torch.zeros_like(gen_c) + pose_params_conditioning = torch.zeros([gen_c.shape[0],6]).to(gen_c.device) + + + ws = self.G.mapping(gen_z, c_gen_conditioning, pose_params_conditioning,update_emas=False) + if self.style_mixing_prob > 0: + with torch.autograd.profiler.record_function('style_mixing'): + cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1]) + cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, + torch.full_like(cutoff, ws.shape[1])) + ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c, input_pose_params,update_emas=False)[:, cutoff:] + initial_coordinates = torch.rand((ws.shape[0], 1000, 3), device=ws.device) * 2 - 1 + perturbed_coordinates = initial_coordinates + torch.randn_like(initial_coordinates) * (1/256) * self.G.rendering_kwargs['box_warp'] + all_coordinates = torch.cat([initial_coordinates, perturbed_coordinates], dim=1) + sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(all_coordinates), ws, update_emas=False)[ + 'sigma'] + sigma_initial = sigma[:, :sigma.shape[1] // 2] + sigma_perturbed = sigma[:, sigma.shape[1] // 2:] + + TVloss = torch.nn.functional.l1_loss(sigma_initial, sigma_perturbed) * self.G.rendering_kwargs[ + 'density_reg'] + training_stats.report('Loss/G_reg/TVloss_monotonic-fixed', TVloss) + TVloss.mul(gain).backward() + + # Dmain: Minimize logits for generated images. + loss_Dgen = 0 + if phase in ['Dmain', 'Dboth']: + with torch.autograd.profiler.record_function('Dgen_forward'): + + gen_img, _gen_ws = self.run_G(gen_z, gen_c, input_pose_params, swapping_prob=swapping_prob, + neural_rendering_resolution=neural_rendering_resolution, update_emas=True) + gen_logits, predict_gen_pose = self.run_D(gen_img, gen_c, gt_pose=None, blur_sigma=blur_sigma, + update_emas=True) + + training_stats.report('Loss/scores/fake', gen_logits) + training_stats.report('Loss/signs/fake', gen_logits.sign()) + loss_Dgen = torch.nn.functional.softplus( gen_logits) # -log (1 - sigmoid(gen_logits)) = log (1 + exp(gen_logits)) = softplus(gen_logits) + + pose_param_loss = (predict_gen_pose - input_pose_params).square().sum([1]) * self.pose_loss_weight + training_stats.report('Loss/D/Poseloss', pose_param_loss) + + for i in range(predict_gen_pose.shape[1]): + training_stats.report('Loss_pose/fake_{}'.format(i), + (predict_gen_pose[:, i] - input_pose_params[:, i]).abs().mean() / np.pi * 180) + training_stats.report('pose_scale/fake_{}'.format(i), + (predict_gen_pose[:, i]).abs().mean() / np.pi * 180) + + + + + with torch.autograd.profiler.record_function('Dgen_backward'): + (loss_Dgen + pose_param_loss).mean().mul(gain).backward() + + + # Dmain: Maximize logits for real images. + # Dr1: Apply R1 regularization. + if phase in ['Dmain', 'Dreg', 'Dboth']: + name = 'Dreal' if phase == 'Dmain' else 'Dr1' if phase == 'Dreg' else 'Dreal_Dr1' + with torch.autograd.profiler.record_function(name + '_forward'): + real_img_tmp_image = real_img['image'].detach().requires_grad_(phase in ['Dreg', 'Dboth']) + real_img_tmp_image_raw = real_img['image_raw'].detach().requires_grad_(phase in ['Dreg', 'Dboth']) + real_img_tmp_image_mask = real_img['image_mask'].detach().requires_grad_(phase in ['Dreg', 'Dboth']) + real_img_tmp = {'image': real_img_tmp_image, 'image_raw': real_img_tmp_image_raw, 'image_mask': real_img_tmp_image_mask} + + real_logits, predicted_real_pose = self.run_D(real_img_tmp, real_c, + gt_pose=None, + blur_sigma=blur_sigma) + + training_stats.report('Loss/scores/real', real_logits) + training_stats.report('Loss/signs/real', real_logits.sign()) + + + for i in range(predicted_real_pose.shape[1]): + training_stats.report('Loss_pose/real_{}'.format(i), ( + predicted_real_pose[:, i] - real_pose[:, i]).abs().mean() / np.pi * 180) + training_stats.report('pose_scale/real_{}'.format(i), + (predicted_real_pose[:, i]).abs().mean() / np.pi * 180) + + + loss_Dreal = 0 + if phase in ['Dmain', 'Dboth']: + loss_Dreal = torch.nn.functional.softplus( + -real_logits) # - log sigmoid(real_logits) = log (1 + exp(-real_logits)) = softplus(-real_logits) + training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal) + training_stats.report('Loss/D/loss_gen', loss_Dgen) + training_stats.report('Loss/D/loss_real', loss_Dreal) + + + # + + loss_Dr1 = 0 + if phase in ['Dreg', 'Dboth']: + if self.dual_discrimination: + with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients(): + r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], + inputs=[real_img_tmp['image'], real_img_tmp['image_raw'], real_img_tmp['image_mask']], + create_graph=True, only_inputs=True) + r1_grads_image = r1_grads[0] + r1_grads_image_raw = r1_grads[1] + r1_grads_image_mask = r1_grads[2] + r1_penalty = r1_grads_image.square().sum([1,2,3]) + r1_grads_image_raw.square().sum([1,2,3]) + r1_penalty_seg = r1_grads_image_mask.square().sum([1, 2, 3]) + else: # single discrimination + with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients(): + r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp['image'], real_img_tmp['image_mask']], + create_graph=True, only_inputs=True) + r1_grads_image = r1_grads[0] + r1_grads_image_mask = r1_grads[1] + r1_penalty = r1_grads_image.square().sum([1, 2, 3]) + r1_penalty_seg = r1_grads_image_mask.square().sum([1, 2, 3]) + loss_Dr1 = r1_penalty * (self.r1_gamma / 2) + r1_penalty_seg * (self.r1_gamma_seg / 2) + training_stats.report('Loss/r1_penalty', r1_penalty) + training_stats.report('Loss/r1_penalty_seg', r1_penalty_seg) + training_stats.report('Loss/D/reg', loss_Dr1) + + + with torch.autograd.profiler.record_function(name + '_backward'): + (loss_Dreal + loss_Dr1).mean().mul(gain).backward() + +# ---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/networks_stylegan2.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/networks_stylegan2.py new file mode 100644 index 0000000..c3189f5 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/networks_stylegan2.py @@ -0,0 +1,1131 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Network architectures from the paper +"Analyzing and Improving the Image Quality of StyleGAN". +Matches the original implementation of configs E-F by Karras et al. at +https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py""" + + +import numpy as np +import torch +from nerf.torch_utils import misc +from nerf.torch_utils import persistence +from nerf.torch_utils.ops import conv2d_resample +from nerf.torch_utils.ops import upfirdn2d +from nerf.torch_utils.ops import bias_act +from nerf.torch_utils.ops import fma + + + +# ---------------------------------------------------------------------------- + +@misc.profiled_function +def normalize_2nd_moment(x, dim=1, eps=1e-8): + return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt() + + +# ---------------------------------------------------------------------------- + +@misc.profiled_function +def modulated_conv2d( + x, # Input tensor of shape [batch_size, in_channels, in_height, in_width]. + weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width]. + styles, # Modulation coefficients of shape [batch_size, in_channels]. + noise=None, # Optional noise tensor to add to the output activations. + up=1, # Integer upsampling factor. + down=1, # Integer downsampling factor. + padding=0, # Padding with respect to the upsampled image. + resample_filter=None, + # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter(). + demodulate=True, # Apply weight demodulation? + flip_weight=True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d). + fused_modconv=True, # Perform modulation, convolution, and demodulation as a single fused operation? +): + batch_size = x.shape[0] + out_channels, in_channels, kh, kw = weight.shape + misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk] + misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW] + misc.assert_shape(styles, [batch_size, in_channels]) # [NI] + + # Pre-normalize inputs to avoid FP16 overflow. + if x.dtype == torch.float16 and demodulate: + weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1, 2, 3], + keepdim=True)) # max_Ikk + styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I + + # Calculate per-sample weights and demodulation coefficients. + w = None + dcoefs = None + if demodulate or fused_modconv: + w = weight.unsqueeze(0) # [NOIkk] + w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk] + if demodulate: + dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO] + if demodulate and fused_modconv: + w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk] + + # Execute by scaling the activations before and after the convolution. + if not fused_modconv: + x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1) + x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, + padding=padding, flip_weight=flip_weight) + if demodulate and noise is not None: + x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype)) + elif demodulate: + x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1) + elif noise is not None: + x = x.add_(noise.to(x.dtype)) + return x + + # Execute as one fused op using grouped convolution. + with misc.suppress_tracer_warnings(): # this value will be treated as a constant + batch_size = int(batch_size) + misc.assert_shape(x, [batch_size, in_channels, None, None]) + x = x.reshape(1, -1, *x.shape[2:]) + w = w.reshape(-1, in_channels, kh, kw) + x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, + groups=batch_size, flip_weight=flip_weight) + x = x.reshape(batch_size, -1, *x.shape[2:]) + if noise is not None: + x = x.add_(noise) + return x + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class FullyConnectedLayer(torch.nn.Module): + def __init__(self, + in_features, # Number of input features. + out_features, # Number of output features. + bias=True, # Apply additive bias before the activation function? + activation='linear', # Activation function: 'relu', 'lrelu', etc. + lr_multiplier=1, # Learning rate multiplier. + bias_init=0, # Initial value for the additive bias. + ): + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.activation = activation + self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier) + self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None + self.weight_gain = lr_multiplier / np.sqrt(in_features) + self.bias_gain = lr_multiplier + + def forward(self, x): + w = self.weight.to(x.dtype) * self.weight_gain + b = self.bias + if b is not None: + b = b.to(x.dtype) + if self.bias_gain != 1: + b = b * self.bias_gain + + if self.activation == 'linear' and b is not None: + x = torch.addmm(b.unsqueeze(0), x, w.t()) + else: + x = x.matmul(w.t()) + x = bias_act.bias_act(x, b, act=self.activation) + return x + + def extra_repr(self): + return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class Conv2dLayer(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels. + out_channels, # Number of output channels. + kernel_size, # Width and height of the convolution kernel. + bias=True, # Apply additive bias before the activation function? + activation='linear', # Activation function: 'relu', 'lrelu', etc. + up=1, # Integer upsampling factor. + down=1, # Integer downsampling factor. + resample_filter=[1, 3, 3, 1], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output to +-X, None = disable clamping. + channels_last=False, # Expect the input to have memory_format=channels_last? + trainable=True, # Update the weights of this layer during training? + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.activation = activation + self.up = up + self.down = down + self.conv_clamp = conv_clamp + self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) + self.padding = kernel_size // 2 + self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) + self.act_gain = bias_act.activation_funcs[activation].def_gain + + memory_format = torch.channels_last if channels_last else torch.contiguous_format + weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format) + bias = torch.zeros([out_channels]) if bias else None + if trainable: + self.weight = torch.nn.Parameter(weight) + self.bias = torch.nn.Parameter(bias) if bias is not None else None + else: + self.register_buffer('weight', weight) + if bias is not None: + self.register_buffer('bias', bias) + else: + self.bias = None + + def forward(self, x, gain=1): + w = self.weight * self.weight_gain + b = self.bias.to(x.dtype) if self.bias is not None else None + flip_weight = (self.up == 1) # slightly faster + x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, + padding=self.padding, flip_weight=flip_weight) + + act_gain = self.act_gain * gain + act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None + x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp) + return x + + def extra_repr(self): + return ' '.join([ + f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},', + f'up={self.up}, down={self.down}']) + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class MappingNetwork(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality, 0 = no latent. + c_dim, # Conditioning label (C) dimensionality, 0 = no label. + w_dim, # Intermediate latent (W) dimensionality. + num_ws, # Number of intermediate latents to output, None = do not broadcast. + num_layers=8, # Number of mapping layers. + embed_features=None, # Label embedding dimensionality, None = same as w_dim. + layer_features=None, # Number of intermediate features in the mapping layers, None = same as w_dim. + activation='lrelu', # Activation function: 'relu', 'lrelu', etc. + lr_multiplier=0.01, # Learning rate multiplier for the mapping layers. + w_avg_beta=0.998, # Decay for tracking the moving average of W during training, None = do not track. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.num_ws = num_ws + self.num_layers = num_layers + self.w_avg_beta = w_avg_beta + + if embed_features is None: + embed_features = w_dim + if c_dim == 0: + embed_features = 0 + if layer_features is None: + layer_features = w_dim + features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim] + + if c_dim > 0: + self.embed = FullyConnectedLayer(c_dim, embed_features) + for idx in range(num_layers): + in_features = features_list[idx] + out_features = features_list[idx + 1] + layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier) + setattr(self, f'fc{idx}', layer) + + if num_ws is not None and w_avg_beta is not None: + self.register_buffer('w_avg', torch.zeros([w_dim])) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False): + # Embed, normalize, and concat inputs. + x = None + with torch.autograd.profiler.record_function('input'): + if self.z_dim > 0: + misc.assert_shape(z, [None, self.z_dim]) + x = normalize_2nd_moment(z.to(torch.float32)) + if self.c_dim > 0: + misc.assert_shape(c, [None, self.c_dim]) + y = normalize_2nd_moment(self.embed(c.to(torch.float32))) + x = torch.cat([x, y], dim=1) if x is not None else y + + # Main layers. + for idx in range(self.num_layers): + layer = getattr(self, f'fc{idx}') + x = layer(x) + + # Update moving average of W. + if update_emas and self.w_avg_beta is not None: + with torch.autograd.profiler.record_function('update_w_avg'): + self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta)) + + # Broadcast. + if self.num_ws is not None: + with torch.autograd.profiler.record_function('broadcast'): + x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) + + # Apply truncation. + if truncation_psi != 1: + with torch.autograd.profiler.record_function('truncate'): + assert self.w_avg_beta is not None + if self.num_ws is None or truncation_cutoff is None: + x = self.w_avg.lerp(x, truncation_psi) + else: + x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi) + return x + + def extra_repr(self): + return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisLayer(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels. + out_channels, # Number of output channels. + w_dim, # Intermediate latent (W) dimensionality. + resolution, # Resolution of this layer. + kernel_size=3, # Convolution kernel size. + up=1, # Integer upsampling factor. + use_noise=True, # Enable noise input? + activation='lrelu', # Activation function: 'relu', 'lrelu', etc. + resample_filter=[1, 3, 3, 1], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping. + channels_last=False, # Use channels_last format for the weights? + roll_out=None, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.w_dim = w_dim + self.resolution = resolution + self.up = up + self.use_noise = use_noise + self.activation = activation + self.conv_clamp = conv_clamp + self.roll_out = roll_out + self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) + self.padding = kernel_size // 2 + self.act_gain = bias_act.activation_funcs[activation].def_gain + + affine_scale = 1 + if self.roll_out in ['b', 'a']: + affine_scale = 9 + elif self.roll_out in ['s']: + affine_scale = 3 + self.affine = FullyConnectedLayer(w_dim, in_channels * affine_scale, bias_init=1) + memory_format = torch.channels_last if channels_last else torch.contiguous_format + self.weight = torch.nn.Parameter(torch.randn( + [out_channels, in_channels * (1, 3)[self.roll_out in ['b', 'a']], + kernel_size, kernel_size]).to(memory_format=memory_format)) + if use_noise: + self.register_buffer('noise_const', torch.randn([resolution, resolution * (1, 3)[self.roll_out == 'w']])) + self.noise_strength = torch.nn.Parameter(torch.zeros([])) + self.bias = torch.nn.Parameter(torch.zeros([out_channels])) + + def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1, **_): + assert noise_mode in ['random', 'const', 'none'] + # noise_mode = 'const' + in_resolution = self.resolution // self.up + misc.assert_shape(x, [None, self.in_channels, in_resolution, in_resolution * (1, 3)[self.roll_out == 'w']]) + styles = self.affine(w) + if self.roll_out in ['b', 'a', 's']: + styles = styles.view(styles.shape[0], 3, styles.shape[1] // 3).view(styles.shape[0] * 3, + styles.shape[1] // 3) + if self.roll_out in ['b', 'a', ]: + x = aware3d_att(x) if self.roll_out == 'a' else aware3d(x) + noise = None + if self.use_noise and noise_mode == 'random': + noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution * (1, 3)[self.roll_out == 'w']], + device=x.device) * self.noise_strength + if self.use_noise and noise_mode == 'const': + noise = self.noise_const * self.noise_strength + + flip_weight = (self.up == 1) # slightly faster + x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up, + padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, + fused_modconv=fused_modconv) + + act_gain = self.act_gain * gain + act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None + x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp) + return x + + def extra_repr(self): + return ' '.join([ + f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},', + f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}']) + + +def aware3d(x): + if isinstance(x, list): + x_xy, x_yz, x_zx = x + B, _, H, W = x_xy.shape + B *= 3 + else: + x_ = x.view(-1, 3, x.shape[1], x.shape[2], x.shape[3]) + x_xy, x_yz, x_zx = x_[:, 0], x_[:, 1], x_[:, 2] + B, _, H, W = x.shape + x_zy = x_yz.permute(0, 1, 3, 2) + x_xz = x_zx.permute(0, 1, 3, 2) + x_yx = x_xy.permute(0, 1, 3, 2) + + x_zy_pz = x_zy.mean(dim=-1, keepdim=True).repeat(1, 1, 1, x_xy.shape[-1]) + x_xz_pz = x_xz.mean(dim=-2, keepdim=True).repeat(1, 1, x_xy.shape[-2], 1) + x_xy_ = torch.cat([x_xy, x_zy_pz, x_xz_pz], 1) + + x_yx_px = x_yx.mean(dim=-2, keepdim=True).repeat(1, 1, x_yz.shape[-2], 1) + x_xz_px = x_xz.mean(dim=-1, keepdim=True).repeat(1, 1, 1, x_yz.shape[-1]) + x_yz_ = torch.cat([x_yx_px, x_yz, x_xz_px], 1) + + x_yx_py = x_yx.mean(dim=-1, keepdim=True).repeat(1, 1, 1, x_zx.shape[-1]) + x_zy_py = x_zy.mean(dim=-2, keepdim=True).repeat(1, 1, x_zx.shape[-2], 1) + x_zx_ = torch.cat([x_yx_py, x_zy_py, x_zx], 1) + + x = torch.cat([x_xy_[:, None], x_yz_[:, None], x_zx_[:, None]], 1).view(B, -1, H, W) + return x + + +def aware3d_att(x): + x_ = x.view(-1, 3, x.shape[1], x.shape[2], x.shape[3]) + x_cyx, x_czy, x_cxz = x_[:, 0], x_[:, 1], x_[:, 2] + + x_yxc = x_cyx.permute(0, 2, 3, 1) + x_ycz = x_czy.permute(0, 3, 1, 2) + x_yzc = x_czy.permute(0, 3, 2, 1) + x_yxz = torch.einsum('byxc,bycz->byxz', x_yxc, x_ycz) + x_yxz = torch.softmax(x_yxz, dim=-1) + x_cyx_f_czy = torch.einsum('byxz,byzc->byxc', x_yxz, x_yzc).permute(0, 3, 1, 2) + x_xyc = x_cyx.permute(0, 3, 2, 1) + x_xcz = x_cxz.permute(0, 2, 1, 3) + x_xzc = x_cxz.permute(0, 2, 3, 1) + x_xyz = torch.einsum('bxyc,bxcz->bxyz', x_xyc, x_xcz) + x_xyz = torch.softmax(x_xyz, dim=-1) + x_cyx_f_cxz = torch.einsum('bxyz,bxzc->bxyc', x_xyz, x_xzc).permute(0, 3, 2, 1) + x_cyx_ = torch.cat([x_cyx, x_cyx_f_czy, x_cyx_f_cxz], 1) + + x_zyc = x_czy.permute(0, 2, 3, 1) + x_zcx = x_cxz.permute(0, 3, 1, 2) + x_zxc = x_cxz.permute(0, 3, 2, 1) + x_zyx = torch.einsum('bzyc,bzcx->bzyx', x_zyc, x_zcx) + x_zyx = torch.softmax(x_zyx, dim=-1) + x_czy_f_cxz = torch.einsum('bzyx,bzxc->bzyc', x_zyx, x_zxc).permute(0, 3, 1, 2) + x_ycx = x_cyx.permute(0, 2, 1, 3) + x_yzx = torch.einsum('byzc,bycx->byzx', x_yzc, x_ycx) + x_yzx = torch.softmax(x_yzx, dim=-1) + x_czy_f_cyx = torch.einsum('byzx,byxc->byzc', x_yzx, x_yxc).permute(0, 3, 2, 1) + x_czy_ = torch.cat([x_czy, x_czy_f_cxz, x_czy_f_cyx], 1) + + x_xcy = x_cyx.permute(0, 3, 1, 2) + x_xzy = torch.einsum('bxzc,bxcy->bxzy', x_xzc, x_xcy) + x_xzy = torch.softmax(x_xzy, dim=-1) + x_cxz_f_cyx = torch.einsum('bxzy,bxyc->bxzc', x_xzy, x_xyc).permute(0, 3, 1, 2) + x_zcy = x_czy.permute(0, 2, 1, 3) + x_zxy = torch.einsum('bzxc,bzcy->bzxy', x_zxc, x_zcy) + x_zxy = torch.softmax(x_zxy, dim=-1) + x_cxz_f_czy = torch.einsum('bzxy,bzyc->bzxc', x_zxy, x_zyc).permute(0, 3, 2, 1) + x_cxz_ = torch.cat([x_cxz, x_cxz_f_cyx, x_cxz_f_czy], 1) + + x = torch.cat([x_cyx_[:, None], x_czy_[:, None], x_cxz_[:, None]], 1).view(x.shape[0], -1, x.shape[2], x.shape[3]) + return x + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class ToRGBLayer(torch.nn.Module): + def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False, + roll_out=None): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.w_dim = w_dim + self.conv_clamp = conv_clamp + self.roll_out = roll_out + + affine_scale = 1 + if self.roll_out in ['b', 'a']: + affine_scale = 9 + elif self.roll_out in ['s']: + affine_scale = 3 + self.affine = FullyConnectedLayer(w_dim, in_channels * affine_scale, bias_init=1) + memory_format = torch.channels_last if channels_last else torch.contiguous_format + self.weight = torch.nn.Parameter(torch.randn( + [out_channels, in_channels * (1, 3)[self.roll_out in ['b', 'a']], + kernel_size, kernel_size]).to(memory_format=memory_format)) + self.bias = torch.nn.Parameter(torch.zeros([out_channels])) + self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) + + def forward(self, x, w, fused_modconv=True): + styles = self.affine(w) * self.weight_gain + if self.roll_out in ['b', 'a', 's']: + styles = styles.view(styles.shape[0], 3, styles.shape[1] // 3).view(styles.shape[0] * 3, + styles.shape[1] // 3) + if self.roll_out in ['b', 'a', ]: + x = aware3d_att(x) if self.roll_out == 'a' else aware3d(x) + x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv) + x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp) + return x + + def extra_repr(self): + return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisBlock(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels, 0 = first block. + out_channels, # Number of output channels. + w_dim, # Intermediate latent (W) dimensionality. + resolution, # Resolution of this block. + img_channels, # Number of output color channels. + is_last, # Is this the last block? + up=2, + architecture='skip', # Architecture: 'orig', 'skip', 'resnet'. + resample_filter=[1, 3, 3, 1], # Low-pass filter to apply when resampling activations. + conv_clamp=256, # Clamp the output of convolution layers to +-X, None = disable clamping. + use_fp16=False, # Use FP16 for this block? + fp16_channels_last=False, # Use channels-last memory format with FP16? + fused_modconv_default=True, + # Default value of fused_modconv. 'inference_only' = True for inference, False for training. + roll_out=None, + **layer_kwargs, # Arguments for SynthesisLayer. + ): + assert architecture in ['orig', 'skip', 'resnet'] + super().__init__() + self.in_channels = in_channels + self.w_dim = w_dim + self.resolution = resolution + self.img_channels = img_channels + self.is_last = is_last + self.up = up + self.roll_out = roll_out + self.architecture = architecture + self.use_fp16 = use_fp16 + self.channels_last = (use_fp16 and fp16_channels_last) + self.fused_modconv_default = fused_modconv_default + self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) + self.num_conv = 0 + self.num_torgb = 0 + + if in_channels == 0: + self.const = torch.nn.Parameter(torch.randn([(1, 3)[self.roll_out in ['b', 'a']], out_channels, resolution, + resolution * (1, 3)[self.roll_out == 'w']])) + + if in_channels != 0: + self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=self.up, + roll_out=roll_out, + resample_filter=resample_filter, conv_clamp=conv_clamp, + channels_last=self.channels_last, **layer_kwargs) + self.num_conv += 1 + + self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution, roll_out=roll_out, + conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) + self.num_conv += 1 + + if is_last or architecture == 'skip': + self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim, + conv_clamp=conv_clamp, channels_last=self.channels_last, roll_out=self.roll_out) + self.num_torgb += 1 + + if in_channels != 0 and architecture == 'resnet': + self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2, + resample_filter=resample_filter, channels_last=self.channels_last) + + def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs): + _ = update_emas # unused + misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim]) + w_iter = iter(ws.unbind(dim=1)) + if ws.device.type != 'cuda': + force_fp32 = True + dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 + memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format + if fused_modconv is None: + fused_modconv = self.fused_modconv_default + if fused_modconv == 'inference_only': + fused_modconv = (not self.training) + + # Input. + if self.in_channels == 0: + x = self.const.to(dtype=dtype, memory_format=memory_format) + x = x.repeat([ws.shape[0], 1, 1, 1]) + else: + misc.assert_shape(x, [None, self.in_channels, self.resolution // self.up, + self.resolution // self.up * (1, 3)[self.roll_out == 'w']]) + x = x.to(dtype=dtype, memory_format=memory_format) + + # Main layers. + if self.in_channels == 0: + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + elif self.architecture == 'resnet': + y = self.skip(x, gain=np.sqrt(0.5)) + x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs) + x = y.add_(x) + else: + x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + + # ToRGB. + if img is not None and self.up > 1: + misc.assert_shape(img, [None, self.img_channels, self.resolution // self.up, + self.resolution // self.up * (1, 3)[self.roll_out == 'w']]) + img = upfirdn2d.upsample2d(img, self.resample_filter) + if self.is_last or self.architecture == 'skip': + y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv) + y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format) + img = img.add_(y) if img is not None else y + + assert x.dtype == dtype + assert img is None or img.dtype == torch.float32 + return x, img + + def extra_repr(self): + return f'resolution={self.resolution:d}, architecture={self.architecture:s}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class Hierarchy3DAwareSynthesisNetwork(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output image resolution. + img_channels, # Number of color channels. + channel_base=32768, # Overall multiplier for the number of channels. + channel_max=512, # Maximum number of channels in any layer. + num_fp16_res=4, # Use FP16 for the N highest resolutions. + **block_kwargs, # Arguments for SynthesisBlock. + ): + + aware3d_att=False + aware3d_res = [4,8,16,32,64,128,256] + add_block = 0 + + assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0 + super().__init__() + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.num_fp16_res = num_fp16_res + self.add_block = add_block + self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)] + # channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + + self.num_ws = 0 + for res in self.block_resolutions: + in_channels = img_channels if res > 4 else 0 + out_channels = img_channels + use_fp16 = (res >= fp16_resolution) + is_last = (res == self.img_resolution) and self.add_block == 0 + block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res, + img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs) + self.num_ws += block.num_conv + if is_last: + self.num_ws += block.num_torgb + setattr(self, f'b{res}', block) + if res in aware3d_res: + block3d = Aware3DBlock(img_channels, res, w_dim, aware3d_att, + block_kwargs.copy()) + setattr(self, f'b3d{res}', block3d) + + + def forward(self, ws, **block_kwargs): + block_ws = [] + with torch.autograd.profiler.record_function('split_ws'): + misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) + ws = ws.to(torch.float32) + w_idx = 0 + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb)) + w_idx += block.num_conv + + x = img = img3d = None + feature_maps = {} + last_has_block3d = False + for res, cur_ws in zip(self.block_resolutions, block_ws): + block = getattr(self, f'b{res}') + block3d = getattr(self, f'b3d{res}', None) + if last_has_block3d and block3d is None: + assert NotImplementedError + img = img3d.view(-1, 3, img3d.shape[-3], img.shape[-2], img.shape[-1]).view(img.shape) + x, img = block(x, img, cur_ws, **block_kwargs) + if block3d is not None: + last_has_block3d = True + img3d = block3d(img3d, img, cur_ws, block_kwargs) + if isinstance(img3d, list): + assert NotImplementedError + else: + feature_maps[res] = img3d + else: + assert NotImplementedError + + return feature_maps + + def extra_repr(self): + return ' '.join([ + f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', + f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', + f'num_fp16_res={self.num_fp16_res:d}']) + + +@persistence.persistent_class +class SR3DBlock(torch.nn.Module): + def __init__(self, img_channels, img_resolution, w_dim, block_kwargs): + super().__init__() + block_kwargs['roll_out'] = 's' + self.block2 = SynthesisBlock(img_channels // 3, img_channels // 3, w_dim=w_dim, resolution=img_resolution * 2, + up=2, + img_channels=32, is_last=True, use_fp16=False, **block_kwargs) + self.block3 = SynthesisBlock(img_channels // 3, img_channels // 3, w_dim=w_dim, resolution=img_resolution * 2, + up=1, + img_channels=32, is_last=True, use_fp16=False, **block_kwargs) + + def forward(self, img, ws): + ws = ws[:, -1:, :].repeat(1, 3, 1) + img = img.view(img.shape[0], 3, -1, img.shape[-2], img.shape[-1]).view(img.shape[0] * 3, -1, img.shape[-2], + img.shape[-1]) + x, img2 = self.block2(img, None, ws) + x, img3 = self.block3(img2, None, ws) + img2 = img2.view(-1, 3, img2.shape[-3], img2.shape[-2], img2.shape[-1]).view(-1, 3 * img2.shape[-3], + img2.shape[-2], img2.shape[-1]) + img3 = img3.view(-1, 3, img3.shape[-3], img3.shape[-2], img.shape[-1]).view(-1, 3 * img3.shape[-3], + img3.shape[-2], img3.shape[-1]) + + return [img2, img3] + + +# ---------------------------------------------------------------------------- +@persistence.persistent_class +class Aware3DBlock(torch.nn.Module): + + def __init__(self, img_channels, img_resolution, w_dim, aware3d_att, block_kwargs): + super().__init__() + block_kwargs['roll_out'] = ('b', 'a')[aware3d_att] + up = 2 + self.block = SynthesisBlock(img_channels // 3, img_channels // 3, w_dim=w_dim, resolution=img_resolution * up, + up=up, + img_channels=img_channels // 3, is_last=True, use_fp16=False, **block_kwargs) + + def forward(self, x, img, ws, block_kwargs): + img = img.view(img.shape[0], 3, -1, img.shape[-2], img.shape[-1]).view(img.shape[0] * 3, -1, img.shape[-2], + img.shape[-1]) + if x is not None: + img = img + x + + ws = ws[:, -1:, :].repeat(1, 3, 1) + _, img = self.block(img, None, ws, **block_kwargs) + return img + + +@persistence.persistent_class +class Hierarchy3DAwareGenerator(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + mapping_kwargs={}, # Arguments for MappingNetwork. + **synthesis_kwargs, # Arguments for SynthesisNetwork. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + self.synthesis = Hierarchy3DAwareSynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, + **synthesis_kwargs) + self.num_ws = self.synthesis.num_ws + self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs): + ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, + update_emas=update_emas) + img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + return img + + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisNetwork(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output image resolution. + img_channels, # Number of color channels. + channel_base = 32768, # Overall multiplier for the number of channels. + channel_max = 512, # Maximum number of channels in any layer. + num_fp16_res = 4, # Use FP16 for the N highest resolutions. + **block_kwargs, # Arguments for SynthesisBlock. + ): + assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0 + super().__init__() + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.num_fp16_res = num_fp16_res + self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + self.num_ws = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res // 2] if res > 4 else 0 + out_channels = channels_dict[res] + use_fp16 = (res >= fp16_resolution) + is_last = (res == self.img_resolution) + block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res, + img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs) + self.num_ws += block.num_conv + if is_last: + self.num_ws += block.num_torgb + setattr(self, f'b{res}', block) + + def forward(self, ws, **block_kwargs): + block_ws = [] + with torch.autograd.profiler.record_function('split_ws'): + misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) + ws = ws.to(torch.float32) + w_idx = 0 + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb)) + w_idx += block.num_conv + + x = img = None + for res, cur_ws in zip(self.block_resolutions, block_ws): + block = getattr(self, f'b{res}') + x, img = block(x, img, cur_ws, **block_kwargs) + return img + + def extra_repr(self): + return ' '.join([ + f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', + f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', + f'num_fp16_res={self.num_fp16_res:d}']) + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class Generator(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + mapping_kwargs = {}, # Arguments for MappingNetwork. + **synthesis_kwargs, # Arguments for SynthesisNetwork. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs) + self.num_ws = self.synthesis.num_ws + self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs): + ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas) + img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + return img +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class DiscriminatorBlock(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels, 0 = first block. + tmp_channels, # Number of intermediate channels. + out_channels, # Number of output channels. + resolution, # Resolution of this block. + img_channels, # Number of input color channels. + first_layer_idx, # Index of the first layer. + architecture='resnet', # Architecture: 'orig', 'skip', 'resnet'. + activation='lrelu', # Activation function: 'relu', 'lrelu', etc. + resample_filter=[1, 3, 3, 1], # Low-pass filter to apply when resampling activations. + conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping. + use_fp16=False, # Use FP16 for this block? + fp16_channels_last=False, # Use channels-last memory format with FP16? + freeze_layers=0, # Freeze-D: Number of layers to freeze. + ): + assert in_channels in [0, tmp_channels] + assert architecture in ['orig', 'skip', 'resnet'] + super().__init__() + self.in_channels = in_channels + self.resolution = resolution + self.img_channels = img_channels + self.first_layer_idx = first_layer_idx + self.architecture = architecture + self.use_fp16 = use_fp16 + self.channels_last = (use_fp16 and fp16_channels_last) + self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) + + self.num_layers = 0 + + def trainable_gen(): + while True: + layer_idx = self.first_layer_idx + self.num_layers + trainable = (layer_idx >= freeze_layers) + self.num_layers += 1 + yield trainable + + trainable_iter = trainable_gen() + + if in_channels == 0 or architecture == 'skip': + self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation, + trainable=next(trainable_iter), conv_clamp=conv_clamp, + channels_last=self.channels_last) + + self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation, + trainable=next(trainable_iter), conv_clamp=conv_clamp, + channels_last=self.channels_last) + + self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2, + trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, + channels_last=self.channels_last) + + if architecture == 'resnet': + self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2, + trainable=next(trainable_iter), resample_filter=resample_filter, + channels_last=self.channels_last) + + def forward(self, x, img, force_fp32=False): + if (x if x is not None else img).device.type != 'cuda': + force_fp32 = True + dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 + memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format + + # Input. + if x is not None: + misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) + x = x.to(dtype=dtype, memory_format=memory_format) + + # FromRGB. + if self.in_channels == 0 or self.architecture == 'skip': + misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution]) + img = img.to(dtype=dtype, memory_format=memory_format) + y = self.fromrgb(img) + x = x + y if x is not None else y + img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None + + # Main layers. + if self.architecture == 'resnet': + y = self.skip(x, gain=np.sqrt(0.5)) + x = self.conv0(x) + x = self.conv1(x, gain=np.sqrt(0.5)) + x = y.add_(x) + else: + x = self.conv0(x) + x = self.conv1(x) + + assert x.dtype == dtype + return x, img + + def extra_repr(self): + return f'resolution={self.resolution:d}, architecture={self.architecture:s}' + + + +#---------------------------------------------------------------------------- + + +@persistence.persistent_class +class MinibatchStdLayer(torch.nn.Module): + def __init__(self, group_size, num_channels=1): + super().__init__() + self.group_size = group_size + self.num_channels = num_channels + + def forward(self, x): + N, C, H, W = x.shape + with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants + G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N + F = self.num_channels + c = C // F + + y = x.reshape(G, -1, F, c, H, + W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c. + y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group. + y = y.square().mean(dim=0) # [nFcHW] Calc variance over group. + y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group. + y = y.mean(dim=[2, 3, 4]) # [nF] Take average over channels and pixels. + y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions. + y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels. + x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels. + return x + + def extra_repr(self): + return f'group_size={self.group_size}, num_channels={self.num_channels:d}' + + +# ---------------------------------------------------------------------------- + +@persistence.persistent_class +class DiscriminatorEpilogue(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels. + cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label. + resolution, # Resolution of this block. + img_channels, # Number of input color channels. + architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'. + mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch. + mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable. + activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. + conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping. + ): + assert architecture in ['orig', 'skip', 'resnet'] + super().__init__() + self.in_channels = in_channels + self.cmap_dim = cmap_dim + self.resolution = resolution + self.img_channels = img_channels + self.architecture = architecture + + if architecture == 'skip': + self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation) + self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None + self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp) + self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation) + self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim) + + + def get_flatten_x(self, x, img, force_fp32=False): + misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW] + _ = force_fp32 # unused + dtype = torch.float32 + memory_format = torch.contiguous_format + + # FromRGB. + x = x.to(dtype=dtype, memory_format=memory_format) + if self.architecture == 'skip': + misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution]) + img = img.to(dtype=dtype, memory_format=memory_format) + x = x + self.fromrgb(img) + + # Main layers. + if self.mbstd is not None: + x = self.mbstd(x) + x = self.conv(x) + + flatten_x = x.flatten(1) + + return flatten_x + + def forward(self, flatten_x, cmap, force_fp32=False): + # misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW] + # _ = force_fp32 # unused + # dtype = torch.float32 + # memory_format = torch.contiguous_format + # + # # FromRGB. + # x = x.to(dtype=dtype, memory_format=memory_format) + # if self.architecture == 'skip': + # misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution]) + # img = img.to(dtype=dtype, memory_format=memory_format) + # x = x + self.fromrgb(img) + # + # # Main layers. + # if self.mbstd is not None: + # x = self.mbstd(x) + # x = self.conv(x) + + misc.assert_shape(flatten_x, [None, self.in_channels * self.resolution * self.resolution]) + dtype = torch.float32 + + x = self.fc(flatten_x) + x = self.out(x) + + # Conditioning. + if self.cmap_dim > 0: + misc.assert_shape(cmap, [None, self.cmap_dim]) + x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)) + + assert x.dtype == dtype + return x + + def extra_repr(self): + return f'resolution={self.resolution:d}, architecture={self.architecture:s}' + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class Discriminator(torch.nn.Module): + def __init__(self, + c_dim, # Conditioning label (C) dimensionality. + img_resolution, # Input resolution. + img_channels, # Number of input color channels. + architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'. + channel_base = 32768, # Overall multiplier for the number of channels. + channel_max = 512, # Maximum number of channels in any layer. + num_fp16_res = 4, # Use FP16 for the N highest resolutions. + conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping. + cmap_dim = None, # Dimensionality of mapped conditioning label, None = default. + block_kwargs = {}, # Arguments for DiscriminatorBlock. + mapping_kwargs = {}, # Arguments for MappingNetwork. + epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue. + ): + super().__init__() + self.c_dim = c_dim + self.img_resolution = img_resolution + self.img_resolution_log2 = int(np.log2(img_resolution)) + self.img_channels = img_channels + self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] + channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} + fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) + + if cmap_dim is None: + cmap_dim = channels_dict[4] + if c_dim == 0: + cmap_dim = 0 + + common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) + cur_layer_idx = 0 + for res in self.block_resolutions: + in_channels = channels_dict[res] if res < img_resolution else 0 + tmp_channels = channels_dict[res] + out_channels = channels_dict[res // 2] + use_fp16 = (res >= fp16_resolution) + block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, + first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs) + setattr(self, f'b{res}', block) + cur_layer_idx += block.num_layers + if c_dim > 0: + self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs) + self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs) + + def forward(self, img, c, update_emas=False, **block_kwargs): + _ = update_emas # unused + x = None + for res in self.block_resolutions: + block = getattr(self, f'b{res}') + x, img = block(x, img, **block_kwargs) + + cmap = None + if self.c_dim > 0: + cmap = self.mapping(None, c) + x = self.b4(x, img, cmap) + return x + + def extra_repr(self): + return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/networks_stylegan3.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/networks_stylegan3.py new file mode 100644 index 0000000..40e5508 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/networks_stylegan3.py @@ -0,0 +1,517 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Generator architecture from the paper +"Alias-Free Generative Adversarial Networks".""" + +import numpy as np +import scipy.signal +import scipy.optimize +import torch +from torch_utils import misc +from torch_utils import persistence +from torch_utils.ops import conv2d_gradfix +from torch_utils.ops import filtered_lrelu +from torch_utils.ops import bias_act + +#---------------------------------------------------------------------------- + +@misc.profiled_function +def modulated_conv2d( + x, # Input tensor: [batch_size, in_channels, in_height, in_width] + w, # Weight tensor: [out_channels, in_channels, kernel_height, kernel_width] + s, # Style tensor: [batch_size, in_channels] + demodulate = True, # Apply weight demodulation? + padding = 0, # Padding: int or [padH, padW] + input_gain = None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels] +): + with misc.suppress_tracer_warnings(): # this value will be treated as a constant + batch_size = int(x.shape[0]) + out_channels, in_channels, kh, kw = w.shape + misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk] + misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW] + misc.assert_shape(s, [batch_size, in_channels]) # [NI] + + # Pre-normalize inputs. + if demodulate: + w = w * w.square().mean([1,2,3], keepdim=True).rsqrt() + s = s * s.square().mean().rsqrt() + + # Modulate weights. + w = w.unsqueeze(0) # [NOIkk] + w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk] + + # Demodulate weights. + if demodulate: + dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO] + w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk] + + # Apply input scaling. + if input_gain is not None: + input_gain = input_gain.expand(batch_size, in_channels) # [NI] + w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk] + + # Execute as one fused op using grouped convolution. + x = x.reshape(1, -1, *x.shape[2:]) + w = w.reshape(-1, in_channels, kh, kw) + x = conv2d_gradfix.conv2d(input=x, weight=w.to(x.dtype), padding=padding, groups=batch_size) + x = x.reshape(batch_size, -1, *x.shape[2:]) + return x + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class FullyConnectedLayer(torch.nn.Module): + def __init__(self, + in_features, # Number of input features. + out_features, # Number of output features. + activation = 'linear', # Activation function: 'relu', 'lrelu', etc. + bias = True, # Apply additive bias before the activation function? + lr_multiplier = 1, # Learning rate multiplier. + weight_init = 1, # Initial standard deviation of the weight tensor. + bias_init = 0, # Initial value of the additive bias. + ): + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.activation = activation + self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) * (weight_init / lr_multiplier)) + bias_init = np.broadcast_to(np.asarray(bias_init, dtype=np.float32), [out_features]) + self.bias = torch.nn.Parameter(torch.from_numpy(bias_init / lr_multiplier)) if bias else None + self.weight_gain = lr_multiplier / np.sqrt(in_features) + self.bias_gain = lr_multiplier + + def forward(self, x): + w = self.weight.to(x.dtype) * self.weight_gain + b = self.bias + if b is not None: + b = b.to(x.dtype) + if self.bias_gain != 1: + b = b * self.bias_gain + if self.activation == 'linear' and b is not None: + x = torch.addmm(b.unsqueeze(0), x, w.t()) + else: + x = x.matmul(w.t()) + x = bias_act.bias_act(x, b, act=self.activation) + return x + + def extra_repr(self): + return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}' + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class MappingNetwork(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality, 0 = no labels. + w_dim, # Intermediate latent (W) dimensionality. + num_ws, # Number of intermediate latents to output. + num_layers = 2, # Number of mapping layers. + lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers. + w_avg_beta = 0.998, # Decay for tracking the moving average of W during training. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.num_ws = num_ws + self.num_layers = num_layers + self.w_avg_beta = w_avg_beta + + # Construct layers. + self.embed = FullyConnectedLayer(self.c_dim, self.w_dim) if self.c_dim > 0 else None + features = [self.z_dim + (self.w_dim if self.c_dim > 0 else 0)] + [self.w_dim] * self.num_layers + for idx, in_features, out_features in zip(range(num_layers), features[:-1], features[1:]): + layer = FullyConnectedLayer(in_features, out_features, activation='lrelu', lr_multiplier=lr_multiplier) + setattr(self, f'fc{idx}', layer) + self.register_buffer('w_avg', torch.zeros([w_dim])) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False): + misc.assert_shape(z, [None, self.z_dim]) + if truncation_cutoff is None: + truncation_cutoff = self.num_ws + + # Embed, normalize, and concatenate inputs. + x = z.to(torch.float32) + x = x * (x.square().mean(1, keepdim=True) + 1e-8).rsqrt() + if self.c_dim > 0: + misc.assert_shape(c, [None, self.c_dim]) + y = self.embed(c.to(torch.float32)) + y = y * (y.square().mean(1, keepdim=True) + 1e-8).rsqrt() + x = torch.cat([x, y], dim=1) if x is not None else y + + # Execute layers. + for idx in range(self.num_layers): + x = getattr(self, f'fc{idx}')(x) + + # Update moving average of W. + if update_emas: + self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta)) + + # Broadcast and apply truncation. + x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) + if truncation_psi != 1: + x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi) + return x + + def extra_repr(self): + return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}' + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisInput(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + channels, # Number of output channels. + size, # Output spatial size: int or [width, height]. + sampling_rate, # Output sampling rate. + bandwidth, # Output bandwidth. + ): + super().__init__() + self.w_dim = w_dim + self.channels = channels + self.size = np.broadcast_to(np.asarray(size), [2]) + self.sampling_rate = sampling_rate + self.bandwidth = bandwidth + + # Draw random frequencies from uniform 2D disc. + freqs = torch.randn([self.channels, 2]) + radii = freqs.square().sum(dim=1, keepdim=True).sqrt() + freqs /= radii * radii.square().exp().pow(0.25) + freqs *= bandwidth + phases = torch.rand([self.channels]) - 0.5 + + # Setup parameters and buffers. + self.weight = torch.nn.Parameter(torch.randn([self.channels, self.channels])) + self.affine = FullyConnectedLayer(w_dim, 4, weight_init=0, bias_init=[1,0,0,0]) + self.register_buffer('transform', torch.eye(3, 3)) # User-specified inverse transform wrt. resulting image. + self.register_buffer('freqs', freqs) + self.register_buffer('phases', phases) + + def forward(self, w): + # Introduce batch dimension. + transforms = self.transform.unsqueeze(0) # [batch, row, col] + freqs = self.freqs.unsqueeze(0) # [batch, channel, xy] + phases = self.phases.unsqueeze(0) # [batch, channel] + + # Apply learned transformation. + t = self.affine(w) # t = (r_c, r_s, t_x, t_y) + t = t / t[:, :2].norm(dim=1, keepdim=True) # t' = (r'_c, r'_s, t'_x, t'_y) + m_r = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse rotation wrt. resulting image. + m_r[:, 0, 0] = t[:, 0] # r'_c + m_r[:, 0, 1] = -t[:, 1] # r'_s + m_r[:, 1, 0] = t[:, 1] # r'_s + m_r[:, 1, 1] = t[:, 0] # r'_c + m_t = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse translation wrt. resulting image. + m_t[:, 0, 2] = -t[:, 2] # t'_x + m_t[:, 1, 2] = -t[:, 3] # t'_y + transforms = m_r @ m_t @ transforms # First rotate resulting image, then translate, and finally apply user-specified transform. + + # Transform frequencies. + phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2) + freqs = freqs @ transforms[:, :2, :2] + + # Dampen out-of-band frequencies that may occur due to the user-specified transform. + amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) / (self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1) + + # Construct sampling grid. + theta = torch.eye(2, 3, device=w.device) + theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate + theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate + grids = torch.nn.functional.affine_grid(theta.unsqueeze(0), [1, 1, self.size[1], self.size[0]], align_corners=False) + + # Compute Fourier features. + x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2)).squeeze(3) # [batch, height, width, channel] + x = x + phases.unsqueeze(1).unsqueeze(2) + x = torch.sin(x * (np.pi * 2)) + x = x * amplitudes.unsqueeze(1).unsqueeze(2) + + # Apply trainable mapping. + weight = self.weight / np.sqrt(self.channels) + x = x @ weight.t() + + # Ensure correct shape. + x = x.permute(0, 3, 1, 2) # [batch, channel, height, width] + misc.assert_shape(x, [w.shape[0], self.channels, int(self.size[1]), int(self.size[0])]) + return x + + def extra_repr(self): + return '\n'.join([ + f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},', + f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}']) + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisLayer(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + is_torgb, # Is this the final ToRGB layer? + is_critically_sampled, # Does this layer use critical sampling? + use_fp16, # Does this layer use FP16? + + # Input & output specifications. + in_channels, # Number of input channels. + out_channels, # Number of output channels. + in_size, # Input spatial size: int or [width, height]. + out_size, # Output spatial size: int or [width, height]. + in_sampling_rate, # Input sampling rate (s). + out_sampling_rate, # Output sampling rate (s). + in_cutoff, # Input cutoff frequency (f_c). + out_cutoff, # Output cutoff frequency (f_c). + in_half_width, # Input transition band half-width (f_h). + out_half_width, # Output Transition band half-width (f_h). + + # Hyperparameters. + conv_kernel = 3, # Convolution kernel size. Ignored for final the ToRGB layer. + filter_size = 6, # Low-pass filter size relative to the lower resolution when up/downsampling. + lrelu_upsampling = 2, # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer. + use_radial_filters = False, # Use radially symmetric downsampling filter? Ignored for critically sampled layers. + conv_clamp = 256, # Clamp the output to [-X, +X], None = disable clamping. + magnitude_ema_beta = 0.999, # Decay rate for the moving average of input magnitudes. + ): + super().__init__() + self.w_dim = w_dim + self.is_torgb = is_torgb + self.is_critically_sampled = is_critically_sampled + self.use_fp16 = use_fp16 + self.in_channels = in_channels + self.out_channels = out_channels + self.in_size = np.broadcast_to(np.asarray(in_size), [2]) + self.out_size = np.broadcast_to(np.asarray(out_size), [2]) + self.in_sampling_rate = in_sampling_rate + self.out_sampling_rate = out_sampling_rate + self.tmp_sampling_rate = max(in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling) + self.in_cutoff = in_cutoff + self.out_cutoff = out_cutoff + self.in_half_width = in_half_width + self.out_half_width = out_half_width + self.conv_kernel = 1 if is_torgb else conv_kernel + self.conv_clamp = conv_clamp + self.magnitude_ema_beta = magnitude_ema_beta + + # Setup parameters and buffers. + self.affine = FullyConnectedLayer(self.w_dim, self.in_channels, bias_init=1) + self.weight = torch.nn.Parameter(torch.randn([self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel])) + self.bias = torch.nn.Parameter(torch.zeros([self.out_channels])) + self.register_buffer('magnitude_ema', torch.ones([])) + + # Design upsampling filter. + self.up_factor = int(np.rint(self.tmp_sampling_rate / self.in_sampling_rate)) + assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate + self.up_taps = filter_size * self.up_factor if self.up_factor > 1 and not self.is_torgb else 1 + self.register_buffer('up_filter', self.design_lowpass_filter( + numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate)) + + # Design downsampling filter. + self.down_factor = int(np.rint(self.tmp_sampling_rate / self.out_sampling_rate)) + assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate + self.down_taps = filter_size * self.down_factor if self.down_factor > 1 and not self.is_torgb else 1 + self.down_radial = use_radial_filters and not self.is_critically_sampled + self.register_buffer('down_filter', self.design_lowpass_filter( + numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial)) + + # Compute padding. + pad_total = (self.out_size - 1) * self.down_factor + 1 # Desired output size before downsampling. + pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor # Input size after upsampling. + pad_total += self.up_taps + self.down_taps - 2 # Size reduction caused by the filters. + pad_lo = (pad_total + self.up_factor) // 2 # Shift sample locations according to the symmetric interpretation (Appendix C.3). + pad_hi = pad_total - pad_lo + self.padding = [int(pad_lo[0]), int(pad_hi[0]), int(pad_lo[1]), int(pad_hi[1])] + + def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False): + assert noise_mode in ['random', 'const', 'none'] # unused + misc.assert_shape(x, [None, self.in_channels, int(self.in_size[1]), int(self.in_size[0])]) + misc.assert_shape(w, [x.shape[0], self.w_dim]) + + # Track input magnitude. + if update_emas: + with torch.autograd.profiler.record_function('update_magnitude_ema'): + magnitude_cur = x.detach().to(torch.float32).square().mean() + self.magnitude_ema.copy_(magnitude_cur.lerp(self.magnitude_ema, self.magnitude_ema_beta)) + input_gain = self.magnitude_ema.rsqrt() + + # Execute affine layer. + styles = self.affine(w) + if self.is_torgb: + weight_gain = 1 / np.sqrt(self.in_channels * (self.conv_kernel ** 2)) + styles = styles * weight_gain + + # Execute modulated conv2d. + dtype = torch.float16 if (self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32 + x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles, + padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain) + + # Execute bias, filtered leaky ReLU, and clamping. + gain = 1 if self.is_torgb else np.sqrt(2) + slope = 1 if self.is_torgb else 0.2 + x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype), + up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp) + + # Ensure correct shape and dtype. + misc.assert_shape(x, [None, self.out_channels, int(self.out_size[1]), int(self.out_size[0])]) + assert x.dtype == dtype + return x + + @staticmethod + def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False): + assert numtaps >= 1 + + # Identity filter. + if numtaps == 1: + return None + + # Separable Kaiser low-pass filter. + if not radial: + f = scipy.signal.firwin(numtaps=numtaps, cutoff=cutoff, width=width, fs=fs) + return torch.as_tensor(f, dtype=torch.float32) + + # Radially symmetric jinc-based filter. + x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs + r = np.hypot(*np.meshgrid(x, x)) + f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r) + beta = scipy.signal.kaiser_beta(scipy.signal.kaiser_atten(numtaps, width / (fs / 2))) + w = np.kaiser(numtaps, beta) + f *= np.outer(w, w) + f /= np.sum(f) + return torch.as_tensor(f, dtype=torch.float32) + + def extra_repr(self): + return '\n'.join([ + f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},', + f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},', + f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},', + f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},', + f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},', + f'in_size={list(self.in_size)}, out_size={list(self.out_size)},', + f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}']) + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisNetwork(torch.nn.Module): + def __init__(self, + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output image resolution. + img_channels, # Number of color channels. + channel_base = 32768, # Overall multiplier for the number of channels. + channel_max = 512, # Maximum number of channels in any layer. + num_layers = 14, # Total number of layers, excluding Fourier features and ToRGB. + num_critical = 2, # Number of critically sampled layers at the end. + first_cutoff = 2, # Cutoff frequency of the first layer (f_{c,0}). + first_stopband = 2**2.1, # Minimum stopband of the first layer (f_{t,0}). + last_stopband_rel = 2**0.3, # Minimum stopband of the last layer, expressed relative to the cutoff. + margin_size = 10, # Number of additional pixels outside the image. + output_scale = 0.25, # Scale factor for the output image. + num_fp16_res = 4, # Use FP16 for the N highest resolutions. + **layer_kwargs, # Arguments for SynthesisLayer. + ): + super().__init__() + self.w_dim = w_dim + self.num_ws = num_layers + 2 + self.img_resolution = img_resolution + self.img_channels = img_channels + self.num_layers = num_layers + self.num_critical = num_critical + self.margin_size = margin_size + self.output_scale = output_scale + self.num_fp16_res = num_fp16_res + + # Geometric progression of layer cutoffs and min. stopbands. + last_cutoff = self.img_resolution / 2 # f_{c,N} + last_stopband = last_cutoff * last_stopband_rel # f_{t,N} + exponents = np.minimum(np.arange(self.num_layers + 1) / (self.num_layers - self.num_critical), 1) + cutoffs = first_cutoff * (last_cutoff / first_cutoff) ** exponents # f_c[i] + stopbands = first_stopband * (last_stopband / first_stopband) ** exponents # f_t[i] + + # Compute remaining layer parameters. + sampling_rates = np.exp2(np.ceil(np.log2(np.minimum(stopbands * 2, self.img_resolution)))) # s[i] + half_widths = np.maximum(stopbands, sampling_rates / 2) - cutoffs # f_h[i] + sizes = sampling_rates + self.margin_size * 2 + sizes[-2:] = self.img_resolution + channels = np.rint(np.minimum((channel_base / 2) / cutoffs, channel_max)) + channels[-1] = self.img_channels + + # Construct layers. + self.input = SynthesisInput( + w_dim=self.w_dim, channels=int(channels[0]), size=int(sizes[0]), + sampling_rate=sampling_rates[0], bandwidth=cutoffs[0]) + self.layer_names = [] + for idx in range(self.num_layers + 1): + prev = max(idx - 1, 0) + is_torgb = (idx == self.num_layers) + is_critically_sampled = (idx >= self.num_layers - self.num_critical) + use_fp16 = (sampling_rates[idx] * (2 ** self.num_fp16_res) > self.img_resolution) + layer = SynthesisLayer( + w_dim=self.w_dim, is_torgb=is_torgb, is_critically_sampled=is_critically_sampled, use_fp16=use_fp16, + in_channels=int(channels[prev]), out_channels= int(channels[idx]), + in_size=int(sizes[prev]), out_size=int(sizes[idx]), + in_sampling_rate=int(sampling_rates[prev]), out_sampling_rate=int(sampling_rates[idx]), + in_cutoff=cutoffs[prev], out_cutoff=cutoffs[idx], + in_half_width=half_widths[prev], out_half_width=half_widths[idx], + **layer_kwargs) + name = f'L{idx}_{layer.out_size[0]}_{layer.out_channels}' + setattr(self, name, layer) + self.layer_names.append(name) + + def forward(self, ws, **layer_kwargs): + misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) + ws = ws.to(torch.float32).unbind(dim=1) + + # Execute layers. + x = self.input(ws[0]) + for name, w in zip(self.layer_names, ws[1:]): + x = getattr(self, name)(x, w, **layer_kwargs) + if self.output_scale != 1: + x = x * self.output_scale + + # Ensure correct shape and dtype. + misc.assert_shape(x, [None, self.img_channels, self.img_resolution, self.img_resolution]) + x = x.to(torch.float32) + return x + + def extra_repr(self): + return '\n'.join([ + f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', + f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', + f'num_layers={self.num_layers:d}, num_critical={self.num_critical:d},', + f'margin_size={self.margin_size:d}, num_fp16_res={self.num_fp16_res:d}']) + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class Generator(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + mapping_kwargs = {}, # Arguments for MappingNetwork. + **synthesis_kwargs, # Arguments for SynthesisNetwork. + ): + super().__init__() + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs) + self.num_ws = self.synthesis.num_ws + self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs): + ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas) + img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + return img + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/neural_render.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/neural_render.py new file mode 100644 index 0000000..7b28da5 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/neural_render.py @@ -0,0 +1,175 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import math +import torch +from nerf.torch_utils import persistence +from nerf.trigrid_rendering.networks_stylegan2 import ToRGBLayer, FullyConnectedLayer + +from nerf.trigrid_rendering.volumetric_rendering.renderer import ImportanceRenderer +from nerf.trigrid_rendering.volumetric_rendering.ray_sampler import RaySampler +import numpy as np + +@persistence.persistent_class +class NeuralRender(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + rendering_kwargs={}, + batch_size=1, + thickness=0.05, + apply_deformation = False + ): + super().__init__() + + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + + self.trigrid_channel = 12 + self.decode_channel = 32 + + self.batch_size = batch_size + self.renderer = ImportanceRenderer(w_dim=w_dim, num_ws=14, batch_size=self.batch_size, thickness=thickness, + box_warp=rendering_kwargs['box_warp'],apply_deformation = apply_deformation) # disable deformation for now + self.ray_sampler = RaySampler() + + self.decoder = OSGDecoder(self.trigrid_channel, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), + 'decoder_output_dim': self.decode_channel, + 'decoder_activation': rendering_kwargs['decoder_activation']}) + + self.torgb = ToRGBLayer(self.decode_channel, 3, w_dim) + + self.rendering_kwargs = rendering_kwargs + self.neural_rendering_resolution = 64 + + self.pose_branch = GPoseBranch(z_dim=z_dim, c_dim=c_dim) + + self.avg_c = torch.tensor([[1.0000e+00, 1.0505e-09, 4.3685e-08, -1.1805e-07, 0.0000e+00, + -9.9951e-01, 2.4033e-02, -1.1805e-07, 4.3714e-08, -2.4033e-02, + -9.9951e-01, 2.6992e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 1.0000e+00, 6.5104e+00, 0.0000e+00, 5.0000e-01, 0.0000e+00, + 6.5104e+00, 5.0000e-01, 0.0000e+00, 0.0000e+00, 1.0000e+00]]).float().cuda() + def flip_yaw(self, matrix): + flipped_matrix = matrix.clone() + flipped = flipped_matrix[:, :16].reshape(-1, 4, 4) + flipped[:, 0, 1] *= -1 + flipped[:, 0, 2] *= -1 + flipped[:, 1, 0] *= -1 + flipped[:, 2, 0] *= -1 + flipped[:, 0, 3] *= -1 + + flipped = flipped.reshape(-1, 16) + flipped_matrix[:, :16] = flipped.clone() + + return flipped_matrix + def sample_pose_params(self, c): + assert len(c.shape) == 2 and c.shape[1] == 25 + # randomly sample z from Gaussian distribution + z = torch.randn(c.shape[0], self.z_dim).to(c.device) + + theta = torch.atan2(c[:, [11]], c[:, [3]]) # math.atan2(z, x) + is_left = (theta >= -np.pi / 2) & (theta <= np.pi / 2) + + flip_c = self.flip_yaw(c) + input_c = torch.where(is_left, flip_c, c) # if left, flip c + + pose_params = self.pose_branch(z, input_c) + + flip_pose_params = pose_params.clone() + flip_pose_params[:, [1, 2, 4, 5]] *= -1 # flip y and z axis angles + + pose_params = torch.where(is_left, flip_pose_params, pose_params) # if left, flip back pose_params + + return pose_params + + +class OSGDecoder(torch.nn.Module): + def __init__(self, n_features, options): + super().__init__() + self.hidden_dim = 32 + + self.net = torch.nn.Sequential( + FullyConnectedLayer(n_features, self.hidden_dim, lr_multiplier=options['decoder_lr_mul']), + torch.nn.Softplus(), + FullyConnectedLayer(self.hidden_dim, 1 + options['decoder_output_dim'], + lr_multiplier=options['decoder_lr_mul']) + ) + self.activation = options['decoder_activation'] + + + + + def forward(self, sampled_features, ray_directions): + # Aggregate features + sampled_features = sampled_features.mean(1) + x = sampled_features + + N, M, C = x.shape + x = x.view(N * M, C) + + x = self.net(x) + x = x.view(N, M, -1) + rgb = x[..., 1:] + sigma = x[..., 0:1] + + + if self.activation == "sigmoid": + # Original EG3D + rgb = torch.sigmoid(rgb) * (1 + 2 * 0.001) - 0.001 + elif self.activation == "lrelu": + # StyleGAN2-style, use with toRGB + rgb = torch.nn.functional.leaky_relu(rgb, 0.2, inplace=True) * math.sqrt(2) + return {'rgb': rgb, 'sigma': sigma} + +import numpy as np +class GPoseBranch(torch.nn.Module): + def __init__(self, z_dim, c_dim): + super().__init__() + hidden_dim = 64 + self.in_channel = z_dim + c_dim + # + # predict_betas = predict_transl = predict_scale = False + # predict_pose = True + + out_dim = 6 + + # if predict_betas: + # out_dim += num_betas + # if predict_transl: + # out_dim += 3 + # if predict_scale: + # out_dim += 1 + # if predict_pose: + # out_dim += 6 + + self.output_dim = out_dim + self.net = torch.nn.Sequential( + FullyConnectedLayer(self.in_channel, 128, activation='lrelu'), + FullyConnectedLayer(128, 32, activation='lrelu'), + FullyConnectedLayer(32, self.output_dim) + ) + + + def forward(self, z, c): + # misc.assert_shape(feature, [None, self.in_channel]) + # misc.assert_shape(camera_parameters, [None, 25]) + feature = torch.cat([z, c], dim=1) + + pose = self.net(feature) # (B, num_betas + 1 + 3 + 6) + + + return pose \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/smpl_triplane.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/smpl_triplane.py new file mode 100644 index 0000000..e162a0a --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/smpl_triplane.py @@ -0,0 +1,367 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import math +import torch +from torch_utils import persistence +from training.networks_stylegan2 import ToRGBLayer, SynthesisNetwork + +from training.networks_stylegan2 import Hierarchy3DAwareGenerator as StyleGAN2Backbone +from training.volumetric_rendering.renderer import ImportanceRenderer +from training.volumetric_rendering.ray_sampler import RaySampler +import dnnlib +import numpy as np + +@persistence.persistent_class +class TriPlaneGenerator(torch.nn.Module): + def __init__(self, + z_dim, # Input latent (Z) dimensionality. + c_dim, # Conditioning label (C) dimensionality. + w_dim, # Intermediate latent (W) dimensionality. + img_resolution, # Output resolution. + img_channels, # Number of output color channels. + sr_num_fp16_res=0, + mapping_kwargs={}, # Arguments for MappingNetwork. + rendering_kwargs={}, + sr_kwargs={}, + batch_size=1, + explicitly_symmetry=False, + thickness=0.05, + **synthesis_kwargs, # Arguments for SynthesisNetwork. + ): + super().__init__() + bcg_synthesis_kwargs = synthesis_kwargs.copy() + bcg_synthesis_kwargs["channel_base"] = 32768 + bcg_synthesis_kwargs["channel_max"] = 512 + + self.z_dim = z_dim + self.c_dim = c_dim + self.w_dim = w_dim + self.img_resolution = img_resolution + self.img_channels = img_channels + + self.trigrid_channel = 12 + self.decode_channel = 32 + + self.batch_size = batch_size + self.renderer = ImportanceRenderer(w_dim = w_dim, num_ws = 14, batch_size = self.batch_size,thickness =thickness,box_warp = rendering_kwargs['box_warp']) + self.ray_sampler = RaySampler() + # self.backbone = StyleGAN2Backbone(z_dim, c_dim+6, w_dim, img_resolution=512, img_channels=self.trigrid_channel*3*rendering_kwargs['triplane_depth'], mapping_kwargs=mapping_kwargs, **synthesis_kwargs) + self.backbone = StyleGAN2Backbone(z_dim, c_dim + 6, w_dim, img_resolution=256, + img_channels=self.trigrid_channel * 3 * rendering_kwargs['triplane_depth'], + mapping_kwargs=mapping_kwargs, roll_out=None, + **synthesis_kwargs) # forbid roll_out in main G + + self.superresolution = dnnlib.util.construct_class_by_name(class_name=rendering_kwargs['superresolution_module'], channels=self.decode_channel, img_resolution=img_resolution, sr_num_fp16_res=sr_num_fp16_res, sr_antialias=rendering_kwargs['sr_antialias'], **sr_kwargs) + self.decoder = OSGDecoder(self.trigrid_channel, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), + 'decoder_output_dim': self.decode_channel, + 'decoder_activation': rendering_kwargs['decoder_activation']}) + + self.torgb = ToRGBLayer(self.decode_channel, 3, w_dim) if rendering_kwargs.get('use_torgb_raw', False) else None + + self.bcg_synthesis = SynthesisNetwork(w_dim, img_resolution=self.superresolution.input_resolution, + img_channels=self.decode_channel, + **bcg_synthesis_kwargs) if rendering_kwargs.get('use_background', + False) else None + + self.pose_branch = GPoseBranch(z_dim=z_dim, c_dim=c_dim) + self.neural_rendering_resolution = 64 + self.rendering_kwargs = rendering_kwargs + + self._last_planes = None + + self.explicitly_symmetry = explicitly_symmetry + + self.avg_c = torch.tensor([[1.0000e+00, 1.0505e-09, 4.3685e-08, -1.1805e-07, 0.0000e+00, + -9.9951e-01, 2.4033e-02, -1.1805e-07, 4.3714e-08, -2.4033e-02, + -9.9951e-01, 2.6992e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 1.0000e+00, 6.5104e+00, 0.0000e+00, 5.0000e-01, 0.0000e+00, + 6.5104e+00, 5.0000e-01, 0.0000e+00, 0.0000e+00, 1.0000e+00]]).float().cuda() + + self.plane_shapes = {} + + planes = self.backbone.synthesis(torch.zeros(4, self.backbone.synthesis.num_ws, w_dim), update_emas=False, + **synthesis_kwargs) + + # Reshape output into three D*32-channel planes, where D=self.rendering_kwargs['triplane_depth'], defines the depth of the tri-grid + for res_k in planes: + # b, c, H,W + # planes[res_k] = planes[res_k].view(len(planes[res_k]), 3, -1, planes[res_k].shape[-2], planes[res_k].shape[-1]) + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + if res_k not in self.plane_shapes: + self.plane_shapes[res_k] = planes[res_k].shape + + def flip_yaw(self, matrix): + flipped_matrix = matrix.clone() + flipped = flipped_matrix[:, :16].reshape(-1, 4, 4) + flipped[:, 0, 1] *= -1 + flipped[:, 0, 2] *= -1 + flipped[:, 1, 0] *= -1 + flipped[:, 2, 0] *= -1 + flipped[:, 0, 3] *= -1 + + flipped = flipped.reshape(-1, 16) + flipped_matrix[:, :16] = flipped.clone() + + return flipped_matrix + + def get_pose_params(self, z, c): + if self.explicitly_symmetry: + # check if c is a left face + theta = torch.atan2(c[:, [11]], c[:, [3]]) # math.atan2(z, x) + is_left = (theta >= -np.pi / 2) & (theta <= np.pi / 2) + + flip_c = self.flip_yaw(c) + input_c = torch.where(is_left, flip_c, c) # if left, flip c + + pose_params = self.pose_branch(z, input_c) + + flip_pose_params = pose_params.clone() + flip_pose_params[:, [1, 2, 4, 5]] *= -1 # flip y and z axis angles + + pose_params = torch.where(is_left, flip_pose_params, pose_params) # if left, flip back pose_params + + return pose_params + else: + raise NotImplementedError + return self.pose_branch(z, c) + + def set_batch_size(self, batch_size): + self.renderer.set_batch_size(batch_size) + + def render_meshes(self, shape_pose_params, resolution, cameras): + + return self.renderer.render_meshes(shape_pose_params, resolution, cameras) + + def mapping(self, z, c, p, truncation_psi=1, truncation_cutoff=None, update_emas=False): + if self.rendering_kwargs['c_gen_conditioning_zero']: + raise NotImplementedError + p = torch.zeros([c.shape[0], 6]).to(c.device) + c = self.avg_c.repeat(c.shape[0], 1).to(c.device) + c = torch.cat([c, p], dim=1) + + else: + + if p is None: + p = torch.zeros([c.shape[0], 6]).to(c.device) + c = torch.cat([c, p], dim=1) + return self.backbone.mapping(z, c * self.rendering_kwargs.get('c_scale', 0), truncation_psi=truncation_psi, + truncation_cutoff=truncation_cutoff, update_emas=update_emas) + + def synthesis(self, ws, c, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, + use_cached_backbone=False, + apply_def=False, pose_params=None, ws_bcg=None, + **synthesis_kwargs): + cam2world_matrix = c[:, :16].view(-1, 4, 4) + intrinsics = c[:, 16:25].view(-1, 3, 3) + + if neural_rendering_resolution is None: + neural_rendering_resolution = self.neural_rendering_resolution + else: + self.neural_rendering_resolution = neural_rendering_resolution + + # Create a batch of rays for volume rendering + ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution) + + # Create triplanes by running StyleGAN backbone + N, M, _ = ray_origins.shape + if use_cached_backbone and self._last_planes is not None: + planes = self._last_planes + else: + planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + + if cache_backbone: + self._last_planes = planes + + # Reshape output into three D*32-channel planes, where D=self.rendering_kwargs['triplane_depth'], defines the depth of the tri-grid + for res_k in planes: + # b, c, H,W + # planes[res_k] = planes[res_k].view(len(planes[res_k]), 3, -1, planes[res_k].shape[-2], planes[res_k].shape[-1]) + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + + # Perform volume rendering + render_output = self.renderer(planes, self.decoder, ray_origins, + ray_directions, self.rendering_kwargs, apply_def=apply_def, ws=ws, + pose_params=pose_params) # channels last + # {'rgb_final': rgb_final, 'depth_final': depth_final, 'weights': weights.sum(2)} + feature_samples = render_output['rgb_final'] + depth_samples = render_output['depth_final'] + weights_samples = render_output['weights'] + + # Reshape into 'raw' neural-rendered image + H = W = self.neural_rendering_resolution + feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous() + depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W) + weights_samples = weights_samples.permute(0, 2, 1).reshape(N, 1, H, W) + + # Run superresolution to get final image + if self.decoder.activation == "sigmoid": + feature_image = feature_image * 2 - 1 # Scale to (-1, 1), taken from ray marcher + # Generate Background + if self.bcg_synthesis: + ws_bcg = ws[:, :self.bcg_synthesis.num_ws] if ws_bcg is None else ws_bcg[:, :self.bcg_synthesis.num_ws] + if ws_bcg.size(1) < self.bcg_synthesis.num_ws: + ws_bcg = torch.cat([ws_bcg, ws_bcg[:, -1:].repeat(1, self.bcg_synthesis.num_ws - ws_bcg.size(1), 1)], 1) + bcg_image = self.bcg_synthesis(ws_bcg, update_emas=update_emas, **synthesis_kwargs) + bcg_image = torch.nn.functional.interpolate(bcg_image, size=feature_image.shape[2:], + mode='bilinear', align_corners=False, + antialias=self.rendering_kwargs['sr_antialias']) + feature_image = feature_image + (1 - weights_samples) * bcg_image + + # Generate Raw image + if self.torgb: + rgb_image = self.torgb(feature_image, ws[:, -1], fused_modconv=False) + rgb_image = rgb_image.to(dtype=torch.float32, memory_format=torch.contiguous_format) + + bcg_rgb_image = self.torgb(bcg_image, ws_bcg[:, -1], fused_modconv=False) + bcg_rgb_image = bcg_rgb_image.to(dtype=torch.float32, memory_format=torch.contiguous_format) + else: + rgb_image = feature_image[:, :3] + bcg_rgb_image = bcg_image[:, :3] + # Run superresolution to get final image + sr_image = self.superresolution(rgb_image, feature_image, ws, + noise_mode=self.rendering_kwargs['superresolution_noise_mode'], + **{k: synthesis_kwargs[k] for k in synthesis_kwargs.keys() if + k != 'noise_mode'}) + + mask_image = weights_samples * (1 + 2 * 0.001) - 0.001 + + return {'image': sr_image, 'image_raw': rgb_image, 'image_depth': depth_image, "image_mask": mask_image, + "image_background": bcg_rgb_image} + + def sample_ws(self, coordinates, directions, ws, update_emas=False, **synthesis_kwargs): + # Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes. + planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + # planes = planes.view(len(planes), 3, 32 * self.rendering_kwargs['triplane_depth'], planes.shape[-2], + # planes.shape[-1]) + for res_k in planes: + # b, c, H,W + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + + return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs) + + def sample(self, coordinates, directions, z, c, p, truncation_psi=1, truncation_cutoff=None, update_emas=False, + **synthesis_kwargs): + # Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes. + ws = self.mapping(z, c, p, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, + update_emas=update_emas) + planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + # planes = planes.view(len(planes), 3, 32 * self.rendering_kwargs['triplane_depth'], planes.shape[-2], + # planes.shape[-1]) + for res_k in planes: + # b, c, H,W + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs) + + def sample_mixed(self, coordinates, directions, ws, truncation_psi=1, truncation_cutoff=None, update_emas=False, + **synthesis_kwargs): + # Same as sample, but expects latent vectors 'ws' instead of Gaussian noise 'z' + planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) + # planes = planes.view(len(planes), 3, 32 * self.rendering_kwargs['triplane_depth'], planes.shape[-2], + # planes.shape[-1]) + for res_k in planes: + # b, c, H,W + planes[res_k] = planes[res_k].view(len(planes[res_k]) // 3, 3, planes[res_k].shape[-3], + planes[res_k].shape[-2], planes[res_k].shape[-1]) + return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs) + + def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, neural_rendering_resolution=None, + update_emas=False, cache_backbone=False, use_cached_backbone=False, + apply_def=False, pose_params=None, + **synthesis_kwargs): + # Render a batch of generated images. + ws = self.mapping(z, c, pose_params, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, + update_emas=update_emas) + # TODO + return self.synthesis(ws, c, update_emas=update_emas, neural_rendering_resolution=neural_rendering_resolution, + cache_backbone=cache_backbone, use_cached_backbone=use_cached_backbone, + apply_def=apply_def, pose_params=pose_params, + **synthesis_kwargs) + + +from training.networks_stylegan2 import FullyConnectedLayer + + +class OSGDecoder(torch.nn.Module): + def __init__(self, n_features, options): + super().__init__() + self.hidden_dim = 32 + + self.net = torch.nn.Sequential( + FullyConnectedLayer(n_features, self.hidden_dim, lr_multiplier=options['decoder_lr_mul']), + torch.nn.Softplus(), + FullyConnectedLayer(self.hidden_dim, 1 + options['decoder_output_dim'], + lr_multiplier=options['decoder_lr_mul']) + ) + self.activation = options['decoder_activation'] + + def forward(self, sampled_features, ray_directions): + # Aggregate features + sampled_features = sampled_features.mean(1) + x = sampled_features + + N, M, C = x.shape + x = x.view(N * M, C) + + x = self.net(x) + x = x.view(N, M, -1) + rgb = x[..., 1:] + sigma = x[..., 0:1] + if self.activation == "sigmoid": + # Original EG3D + rgb = torch.sigmoid(rgb) * (1 + 2 * 0.001) - 0.001 + elif self.activation == "lrelu": + # StyleGAN2-style, use with toRGB + rgb = torch.nn.functional.leaky_relu(rgb, 0.2, inplace=True) * math.sqrt(2) + return {'rgb': rgb, 'sigma': sigma} + + +import numpy as np + + +class GPoseBranch(torch.nn.Module): + def __init__(self, z_dim, c_dim): + super().__init__() + hidden_dim = 64 + self.in_channel = z_dim + c_dim + # + # predict_betas = predict_transl = predict_scale = False + # predict_pose = True + + out_dim = 6 + + # if predict_betas: + # out_dim += num_betas + # if predict_transl: + # out_dim += 3 + # if predict_scale: + # out_dim += 1 + # if predict_pose: + # out_dim += 6 + + self.output_dim = out_dim + self.net = torch.nn.Sequential( + FullyConnectedLayer(self.in_channel, 128, activation='lrelu'), + FullyConnectedLayer(128, 32, activation='lrelu'), + FullyConnectedLayer(32, self.output_dim) + ) + + def forward(self, z, c): + # misc.assert_shape(feature, [None, self.in_channel]) + # misc.assert_shape(camera_parameters, [None, 25]) + feature = torch.cat([z, c], dim=1) + + pose = self.net(feature) # (B, num_betas + 1 + 3 + 6) + + return pose \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/superresolution.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/superresolution.py new file mode 100644 index 0000000..43321df --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/superresolution.py @@ -0,0 +1,292 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Superresolution network architectures from the paper +"Efficient Geometry-aware 3D Generative Adversarial Networks".""" + +import torch +from training.networks_stylegan2 import Conv2dLayer, SynthesisLayer, ToRGBLayer +from torch_utils.ops import upfirdn2d +from torch_utils import persistence +from torch_utils import misc + +from training.networks_stylegan2 import SynthesisBlock +import numpy as np +from training.networks_stylegan3 import SynthesisLayer as AFSynthesisLayer + + +#---------------------------------------------------------------------------- + +# for 512x512 generation +@persistence.persistent_class +class SuperresolutionHybrid8X(torch.nn.Module): + def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias, + num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE + **block_kwargs): + super().__init__() + assert img_resolution == 512 + + use_fp16 = sr_num_fp16_res > 0 + self.input_resolution = 128 + self.sr_antialias = sr_antialias + self.block0 = SynthesisBlock(channels, 128, w_dim=512, resolution=256, + img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=512, + img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1])) + + def forward(self, rgb, x, ws, **block_kwargs): + ws = ws[:, -1:, :].repeat(1, 3, 1) + + if x.shape[-1] != self.input_resolution: + x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + + x, rgb = self.block0(x, rgb, ws, **block_kwargs) + x, rgb = self.block1(x, rgb, ws, **block_kwargs) + return rgb + +#---------------------------------------------------------------------------- + +# for 256x256 generation +@persistence.persistent_class +class SuperresolutionHybrid4X(torch.nn.Module): + def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias, + num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE + **block_kwargs): + super().__init__() + assert img_resolution == 256 + use_fp16 = sr_num_fp16_res > 0 + self.sr_antialias = sr_antialias + self.input_resolution = 128 + self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=128, + img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=256, + img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1])) + + def forward(self, rgb, x, ws, **block_kwargs): + ws = ws[:, -1:, :].repeat(1, 3, 1) + + if x.shape[-1] < self.input_resolution: + x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + + x, rgb = self.block0(x, rgb, ws, **block_kwargs) + x, rgb = self.block1(x, rgb, ws, **block_kwargs) + return rgb + +#---------------------------------------------------------------------------- + +# for 128 x 128 generation +@persistence.persistent_class +class SuperresolutionHybrid2X(torch.nn.Module): + def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias, + num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE + **block_kwargs): + super().__init__() + assert img_resolution == 128 + + use_fp16 = sr_num_fp16_res > 0 + self.input_resolution = 64 + self.sr_antialias = sr_antialias + self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=64, + img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=128, + img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1])) + + def forward(self, rgb, x, ws, **block_kwargs): + ws = ws[:, -1:, :].repeat(1, 3, 1) + + if x.shape[-1] != self.input_resolution: + x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + + x, rgb = self.block0(x, rgb, ws, **block_kwargs) + x, rgb = self.block1(x, rgb, ws, **block_kwargs) + return rgb + +#---------------------------------------------------------------------------- + +# TODO: Delete (here for backwards compatibility with old 256x256 models) +@persistence.persistent_class +class SuperresolutionHybridDeepfp32(torch.nn.Module): + def __init__(self, channels, img_resolution, sr_num_fp16_res, + num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE + **block_kwargs): + super().__init__() + assert img_resolution == 256 + use_fp16 = sr_num_fp16_res > 0 + + self.input_resolution = 128 + self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=128, + img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=256, + img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1])) + + def forward(self, rgb, x, ws, **block_kwargs): + ws = ws[:, -1:, :].repeat(1, 3, 1) + + if x.shape[-1] < self.input_resolution: + x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False) + rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False) + + x, rgb = self.block0(x, rgb, ws, **block_kwargs) + x, rgb = self.block1(x, rgb, ws, **block_kwargs) + return rgb + +#---------------------------------------------------------------------------- + +@persistence.persistent_class +class SynthesisBlockNoUp(torch.nn.Module): + def __init__(self, + in_channels, # Number of input channels, 0 = first block. + out_channels, # Number of output channels. + w_dim, # Intermediate latent (W) dimensionality. + resolution, # Resolution of this block. + img_channels, # Number of output color channels. + is_last, # Is this the last block? + architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'. + resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. + conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping. + use_fp16 = False, # Use FP16 for this block? + fp16_channels_last = False, # Use channels-last memory format with FP16? + fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training. + **layer_kwargs, # Arguments for SynthesisLayer. + ): + assert architecture in ['orig', 'skip', 'resnet'] + super().__init__() + self.in_channels = in_channels + self.w_dim = w_dim + self.resolution = resolution + self.img_channels = img_channels + self.is_last = is_last + self.architecture = architecture + self.use_fp16 = use_fp16 + self.channels_last = (use_fp16 and fp16_channels_last) + self.fused_modconv_default = fused_modconv_default + self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) + self.num_conv = 0 + self.num_torgb = 0 + + if in_channels == 0: + self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution])) + + if in_channels != 0: + self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, + conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) + self.num_conv += 1 + + self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution, + conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) + self.num_conv += 1 + + if is_last or architecture == 'skip': + self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim, + conv_clamp=conv_clamp, channels_last=self.channels_last) + self.num_torgb += 1 + + if in_channels != 0 and architecture == 'resnet': + self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2, + resample_filter=resample_filter, channels_last=self.channels_last) + + def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs): + _ = update_emas # unused + misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim]) + w_iter = iter(ws.unbind(dim=1)) + if ws.device.type != 'cuda': + force_fp32 = True + dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 + memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format + if fused_modconv is None: + fused_modconv = self.fused_modconv_default + if fused_modconv == 'inference_only': + fused_modconv = (not self.training) + + # Input. + if self.in_channels == 0: + x = self.const.to(dtype=dtype, memory_format=memory_format) + x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1]) + else: + misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) + x = x.to(dtype=dtype, memory_format=memory_format) + + # Main layers. + if self.in_channels == 0: + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + elif self.architecture == 'resnet': + y = self.skip(x, gain=np.sqrt(0.5)) + x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs) + x = y.add_(x) + else: + x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) + + # ToRGB. + # if img is not None: + # misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2]) + # img = upfirdn2d.upsample2d(img, self.resample_filter) + if self.is_last or self.architecture == 'skip': + y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv) + y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format) + img = img.add_(y) if img is not None else y + + assert x.dtype == dtype + assert img is None or img.dtype == torch.float32 + return x, img + + def extra_repr(self): + return f'resolution={self.resolution:d}, architecture={self.architecture:s}' + + +#---------------------------------------------------------------------------- + +# for 512x512 generation +@persistence.persistent_class +class SuperresolutionHybrid8XDC(torch.nn.Module): + def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias, + num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE + **block_kwargs): + super().__init__() + assert img_resolution == 512 + + use_fp16 = sr_num_fp16_res > 0 + self.input_resolution = 128 + self.sr_antialias = sr_antialias + self.block0 = SynthesisBlock(channels, 256, w_dim=512, resolution=256, + img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + self.block1 = SynthesisBlock(256, 128, w_dim=512, resolution=512, + img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs) + + def forward(self, rgb, x, ws, **block_kwargs): + ws = ws[:, -1:, :].repeat(1, 3, 1) + + if x.shape[-1] != self.input_resolution: + x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution), + mode='bilinear', align_corners=False, antialias=self.sr_antialias) + + x, rgb = self.block0(x, rgb, ws, **block_kwargs) + x, rgb = self.block1(x, rgb, ws, **block_kwargs) + return rgb + +#---------------------------------------------------------------------------- \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/training_loop.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/training_loop.py new file mode 100644 index 0000000..681de57 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/training_loop.py @@ -0,0 +1,714 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +"""Main training loop.""" + +import os +import random +import time +import copy +import json +import pickle +import psutil +import PIL.Image +import numpy as np +import torch +import dnnlib +from torch_utils import misc +from torch_utils import training_stats +from torch_utils.ops import conv2d_gradfix +from torch_utils.ops import grid_sample_gradfix + +import legacy +from metrics import metric_main,metric_utils +from camera_utils import LookAtPoseSampler +from training.crosssection_utils import sample_cross_section + +#---------------------------------------------------------------------------- + +def setup_snapshot_image_grid(training_set, random_seed=0): + rnd = np.random.RandomState(random_seed) + h = int(7680 * (training_set.image_shape[2]/512)) + w = int(4320 * (training_set.image_shape[2] / 512)) + gh = np.clip(h // training_set.image_shape[2], 7, 8) + gw = np.clip(w // training_set.image_shape[1], 4, 4) + + # No labels => show random subset of training samples. + # if not training_set.has_labels: + # all_indices = list(range(len(training_set))) + # rnd.shuffle(all_indices) + # grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)] + + # else: + # # Group training samples by label. + # label_groups = dict() # label => [idx, ...] + # for idx in range(len(training_set)): + # label = tuple(training_set.get_details(idx).raw_label.flat[::-1]) + # if label not in label_groups: + # label_groups[label] = [] + # label_groups[label].append(idx) + + # # Reorder. + # label_order = list(label_groups.keys()) + # rnd.shuffle(label_order) + # for label in label_order: + # rnd.shuffle(label_groups[label]) + + # # Organize into grid. + # grid_indices = [] + # for y in range(gh): + # label = label_order[y % len(label_order)] + # indices = label_groups[label] + # grid_indices += [indices[x % len(indices)] for x in range(gw)] + # label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))] + label_groups = dict() # label => [idx, ...] + for idx in range(len(training_set)): + label = tuple(training_set.get_details(idx).raw_label.flat[::-1]) + if label not in label_groups: + label_groups[label] = [] + label_groups[label].append(idx) + + # Reorder. + label_order = list(label_groups.keys()) + rnd.shuffle(label_order) + for label in label_order: + rnd.shuffle(label_groups[label]) + + # Organize into grid. + grid_indices = [] + for y in range(gh): + for x in range(gw//2): + label = label_order[(y + x*gh) % len(label_order)] + indices = list(set(label_groups[label])) + #grid_indices += [indices[x % len(indices)] for x in range(2)] + grid_indices += [indices[0], (indices[0]+ len(training_set)//2)%len(training_set) ] + label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))] + + + # Load data. + images, segs, labels, poses = zip(*[training_set[i] for i in grid_indices]) + return (gw, gh), np.stack(images),np.stack(segs), np.stack(labels), np.stack(poses) + +#---------------------------------------------------------------------------- + +def save_image_grid(img, fname, drange, grid_size): + lo, hi = drange + img = np.asarray(img, dtype=np.float32) + img = (img - lo) * (255 / (hi - lo)) + img = np.rint(img).clip(0, 255).astype(np.uint8) + + gw, gh = grid_size + _N, C, H, W = img.shape + img = img.reshape([gh, gw, C, H, W]) + img = img.transpose(0, 3, 1, 4, 2) + img = img.reshape([gh * H, gw * W, C]) + + assert C in [1, 3] + if C == 1: + PIL.Image.fromarray(img[:, :, 0], 'L').save(fname) + if C == 3: + PIL.Image.fromarray(img, 'RGB').save(fname) + +#---------------------------------------------------------------------------- + +def training_loop( + run_dir = '.', # Output directory. + training_set_kwargs = {}, # Options for training set. + data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader. + G_kwargs = {}, # Options for generator network. + D_kwargs = {}, # Options for discriminator network. + G_opt_kwargs = {}, # Options for generator optimizer. + D_opt_kwargs = {}, # Options for discriminator optimizer. + augment_kwargs = None, # Options for augmentation pipeline. None = disable. + loss_kwargs = {}, # Options for loss function. + metrics = [], # Metrics to evaluate during training. + random_seed = 0, # Global random seed. + num_gpus = 1, # Number of GPUs participating in the training. + rank = 0, # Rank of the current process in [0, num_gpus[. + batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus. + batch_gpu = 4, # Number of samples processed at a time by one GPU. + ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights. + ema_rampup = 0.05, # EMA ramp-up coefficient. None = no rampup. + G_reg_interval = None, # How often to perform regularization for G? None = disable lazy regularization. + D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization. + augment_p = 0, # Initial value of augmentation probability. + ada_target = None, # ADA target value. None = fixed p. + ada_interval = 4, # How often to perform ADA adjustment? + ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit. + total_kimg = 25000, # Total length of the training, measured in thousands of real images. + kimg_per_tick = 4, # Progress snapshot interval. + image_snapshot_ticks = 50, # How often to save image snapshots? None = disable. + network_snapshot_ticks = 50, # How often to save network snapshots? None = disable. + resume_pkl = None, # Network pickle to resume training from. + resume_kimg = 0, # First kimg to report when resuming training. + cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark? + abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks. + progress_fn = None, # Callback function for updating training progress. Called for all ranks. + train_g_pose_branch = None, + metric_pose_sample_mode = None, +): + print('Random seed: %d' % random_seed) + # Initialize. + start_time = time.time() + device = torch.device('cuda', rank) + np.random.seed(random_seed * num_gpus + rank) + torch.cuda.set_device(device) + torch.manual_seed(random_seed * num_gpus + rank) + torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed. + torch.backends.cuda.matmul.allow_tf32 = False # Improves numerical accuracy. + torch.backends.cudnn.allow_tf32 = False # Improves numerical accuracy. + torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False # Improves numerical accuracy. + conv2d_gradfix.enabled = True # Improves training speed. # TODO: ENABLE + grid_sample_gradfix.enabled = False # Avoids errors with the augmentation pipe. + + # Load training set. + if rank == 0: + print('Loading training set...') + training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset + training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed) + training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs)) + if rank == 0: + print() + print('Num images: ', len(training_set)) + print('Image shape:', training_set.image_shape) + print('Label shape:', training_set.label_shape) + print('Pose shape:', training_set.pose_shape) + print() + print('>>>>>>>>>>>>>>> image_snapshot_ticks:', image_snapshot_ticks) + print('>>>>>>>>>>>>>>> network_snapshot_ticks:', network_snapshot_ticks) + + # Construct networks. + if rank == 0: + print('Constructing networks...') + common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels) + G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module + G.register_buffer('dataset_label_std', torch.tensor(training_set.get_label_std()).to(device)) + D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module + G_ema = copy.deepcopy(G).eval() + D_ema = copy.deepcopy(D).eval() + + # Resume from existing pickle. + if (resume_pkl is not None) and (rank == 0): + print(f'Resuming from "{resume_pkl}"') + with dnnlib.util.open_url(resume_pkl) as f: + resume_data = legacy.load_network_pkl(f) + for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]: + misc.copy_params_and_buffers(resume_data[name], module, require_all=False) + + if 'D_ema' in resume_data: + print(f'copy params of D_ema of "{resume_pkl} to D_ema') + misc.copy_params_and_buffers(resume_data['D_ema'], D_ema, require_all=False) + else: + print(f'copy params of D of "{resume_pkl} to D_ema') + misc.copy_params_and_buffers(resume_data['D'], D_ema, require_all=False) + + # Print network summary tables. + if rank == 0: + z = torch.empty([batch_gpu, G.z_dim], device=device) + c = torch.empty([batch_gpu, G.c_dim], device=device) + p = torch.empty([batch_gpu, 6], device=device) + img = misc.print_module_summary(G, [z, c, ]) + misc.print_module_summary(D, [img, c ]) + + print('plane_shapes:') + for res_k in G.plane_shapes: + print(res_k, G.plane_shapes[res_k]) + # Setup augmentation. + if rank == 0: + print('Setting up augmentation...') + augment_pipe = None + ada_stats = None + if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None): + augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module + augment_pipe.p.copy_(torch.as_tensor(augment_p)) + if ada_target is not None: + ada_stats = training_stats.Collector(regex='Loss/signs/real') + + # Distribute across GPUs. + if rank == 0: + print(f'Distributing across {num_gpus} GPUs...') + for module in [G, D, G_ema,D_ema, augment_pipe]: + if module is not None: + for param in misc.params_and_buffers(module): + if param.numel() > 0 and num_gpus > 1: + torch.distributed.broadcast(param, src=0) + + # Setup training phases. + if rank == 0: + print('Setting up training phases...') + loss = dnnlib.util.construct_class_by_name(device=device, G=G, D=D, augment_pipe=augment_pipe,rank = rank,**loss_kwargs) # subclass of training.loss.Loss + phases = [] + for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]: + params_list = [] + params_name_list = [] + for p_name, p in module.named_parameters(): + if name == 'G': + if 'aligned_SMPL' not in p_name: + if not train_g_pose_branch: + if 'pose_branch' not in p_name: + params_list.append(p) + params_name_list.append(p_name) + else: + params_list.append(p) + params_name_list.append(p_name) + else: + params_list.append(p) + params_name_list.append(p_name) + + + + if rank ==0: + print(f'params_name_list of {name}:',params_name_list) + + if reg_interval is None: + opt = dnnlib.util.construct_class_by_name(params=params_list, **opt_kwargs) # subclass of torch.optim.Optimizer + phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)] + + + else: # Lazy regularization. + mb_ratio = reg_interval / (reg_interval + 1) + opt_kwargs = dnnlib.EasyDict(opt_kwargs) + opt_kwargs.lr = opt_kwargs.lr * mb_ratio + opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas] + opt = dnnlib.util.construct_class_by_name(params=params_list, **opt_kwargs) # subclass of torch.optim.Optimizer + phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)] + phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)] + + + + for phase in phases: + phase.start_event = None + phase.end_event = None + if rank == 0: + phase.start_event = torch.cuda.Event(enable_timing=True) + phase.end_event = torch.cuda.Event(enable_timing=True) + print('phase: ',phase.name) + + # Export sample images. + grid_size = None + grid_z = None + grid_c = None + if rank == 0: + print('Exporting sample images...') + grid_size, images,segs, labels,poses = setup_snapshot_image_grid(training_set=training_set,random_seed=random.randint(0, 1000000)) + save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size) + save_image_grid(segs, os.path.join(run_dir, 'segs.jpg'), drange=[0, 255], grid_size=grid_size) + grid_images = (torch.from_numpy(images).to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu) + grid_segs = (torch.from_numpy(segs).to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu) + + #grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu) + + if G.rendering_kwargs['c_gen_conditioning_zero']: + raise NotImplementedError + grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu) + else: + #raise NotImplementedError + grid_z = [] + for i in range(labels.shape[0]//2): + sample_z = torch.randn([1, G.z_dim], device=device) + grid_z.append(sample_z) + grid_z.append(sample_z) + grid_z = torch.cat(grid_z,dim=0).split(batch_gpu) + + + grid_c = torch.from_numpy(labels).to(device).split(batch_gpu) + grid_poses = torch.from_numpy(poses).to(device).split(batch_gpu) + + real_shape_real_pose = [] + for real_pose, c in zip(grid_poses, grid_c): + real_shape_pose_param = {'pose': real_pose} + real_shape_real_pose.append( + G_ema.render_meshes(real_shape_pose_param, resolution=training_set.image_shape[2], cameras=c) + ) + real_shape_real_pose = np.concatenate(real_shape_real_pose, axis=0) + save_image_grid(real_shape_real_pose, + os.path.join(run_dir, f'mesh_coarse_real_pose.png'), + drange=[0, 255], grid_size=grid_size) + #exit() + + # Initialize logs. + if rank == 0: + print('Initializing logs...') + stats_collector = training_stats.Collector(regex='.*') + stats_metrics = dict() + stats_jsonl = None + stats_tfevents = None + if rank == 0: + stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt') + try: + import torch.utils.tensorboard as tensorboard + stats_tfevents = tensorboard.SummaryWriter(run_dir) + except ImportError as err: + print('Skipping tfevents export:', err) + + # Train. + if rank == 0: + print(f'Training for {total_kimg} kimg...') + print() + cur_nimg = resume_kimg * 1000 + cur_tick = 0 + tick_start_nimg = cur_nimg + tick_start_time = time.time() + maintenance_time = tick_start_time - start_time + batch_idx = 0 + if progress_fn is not None: + progress_fn(0, total_kimg) + + + + while True: + # Fetch training data. + with torch.autograd.profiler.record_function('data_fetch'): + + phase_real_img, phase_real_seg, phase_real_c, phase_real_pose = next(training_set_iterator) + + + phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu) + phase_real_seg = (phase_real_seg.to(device).to(torch.float32) / 255.0).split(batch_gpu) + phase_real_c = phase_real_c.to(device).split(batch_gpu) + phase_real_pose = phase_real_pose.to(device).split(batch_gpu) + + all_gen_z = torch.randn([len(phases) * (batch_size // num_gpus), G.z_dim], device=device) # 4 * 8 + all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split((batch_size // num_gpus))] + + random_idx = [np.random.randint(len(training_set)) for _ in range(len(phases) * (batch_size // num_gpus))] + + + all_gen_c = [training_set.get_label(gen_idx) for gen_idx in random_idx] + all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device) + all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split((batch_size // num_gpus))] + + + all_gen_pose = [training_set.get_coarse_pose(gen_idx) for gen_idx in random_idx] + all_gen_pose = torch.from_numpy(np.stack(all_gen_pose)).pin_memory().to(device) + all_gen_pose = [phase_gen_pose.split(batch_gpu) for phase_gen_pose in all_gen_pose.split((batch_size // num_gpus))] + + assert len(phases) == len(all_gen_z) == len(all_gen_c) ==len(all_gen_pose) + # Execute training phases. + for phase, phase_gen_z,phase_gen_c,phase_gen_pose in zip(phases, all_gen_z,all_gen_c,all_gen_pose): # 4 + if batch_idx % phase.interval != 0: + continue + + + if phase.start_event is not None: + phase.start_event.record(torch.cuda.current_stream(device)) + + # Accumulate gradients. + phase.opt.zero_grad(set_to_none=True) + phase.module.requires_grad_(True) + for real_img, real_seg, real_c,real_pose, gen_z,gen_c,gen_pose in \ + zip(phase_real_img, phase_real_seg, phase_real_c, phase_real_pose, phase_gen_z,phase_gen_c,phase_gen_pose): + + loss.accumulate_gradients(phase=phase.name, real_img=real_img,real_seg = real_seg, real_c=real_c,real_pose = real_pose, + gen_z=gen_z,gen_c = gen_c, gen_pose = gen_pose, + + gain=phase.interval, cur_nimg=cur_nimg,cur_nimg_start = resume_kimg * 1000) + phase.module.requires_grad_(False) + + # Update weights. + with torch.autograd.profiler.record_function(phase.name + '_opt'): + + params = [param for param in phase.module.parameters() if param.numel() > 0 and param.grad is not None] + if len(params) > 0: + flat = torch.cat([param.grad.flatten() for param in params]) + if num_gpus > 1: + torch.distributed.all_reduce(flat) + flat /= num_gpus + misc.nan_to_num(flat, nan=0, posinf=1e5, neginf=-1e5, out=flat) + grads = flat.split([param.numel() for param in params]) + for param, grad in zip(params, grads): + param.grad = grad.reshape(param.shape) + phase.opt.step() + + + + # Phase done. + if phase.end_event is not None: + phase.end_event.record(torch.cuda.current_stream(device)) + + # Update G_ema. + with torch.autograd.profiler.record_function('Gema'): + ema_nimg = ema_kimg * 1000 + if ema_rampup is not None: + ema_nimg = min(ema_nimg, cur_nimg * ema_rampup) + ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8)) + for p_ema, p in zip(G_ema.parameters(), G.parameters()): + p_ema.copy_(p.lerp(p_ema, ema_beta)) + for b_ema, b in zip(G_ema.buffers(), G.buffers()): + b_ema.copy_(b) + G_ema.neural_rendering_resolution = G.neural_rendering_resolution + G_ema.rendering_kwargs = G.rendering_kwargs.copy() + + with torch.autograd.profiler.record_function('Dema'): + ema_nimg = ema_kimg * 1000 + if ema_rampup is not None: + ema_nimg = min(ema_nimg, cur_nimg * ema_rampup) + ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8)) + for p_ema, p in zip(D_ema.parameters(), D.parameters()): + p_ema.copy_(p.lerp(p_ema, ema_beta)) + for b_ema, b in zip(D_ema.buffers(), D.buffers()): + b_ema.copy_(b) + + + # Update state. + cur_nimg += batch_size + batch_idx += 1 + + # Execute ADA heuristic. + if (ada_stats is not None) and (batch_idx % ada_interval == 0): + ada_stats.update() + adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000) + augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device))) + + # Perform maintenance tasks once per tick. + done = (cur_nimg >= total_kimg * 1000) + if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000): + continue + + # Print status line, accumulating the same information in training_stats. + tick_end_time = time.time() + fields = [] + fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"] + fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"] + fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"] + fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"] + fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"] + fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"] + fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"] + fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"] + fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}"] + torch.cuda.reset_peak_memory_stats() + fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"] + + if loss.swapping_prob is not None: + fields += [f"swap prob {training_stats.report0('Progress/swap_prob', float(loss.swapping_prob)):.3f}"] + if loss.neural_rendering_resolution is not None: + fields += [f"render_res {training_stats.report0('Progress/rendering_res', float(loss.neural_rendering_resolution)):.3f}"] + # if loss.noise_alpha is not None: + # fields += [f"noise_alpha {training_stats.report0('Progress/noise_alpha', float(loss.noise_alpha)):.3f}"] + # if loss.noise_scale is not None: + # fields += [f"noise_scale {training_stats.report0('Progress/noise_scale', float(loss.noise_scale)):.3f}"] + + # if loss.predict_label_alpha is not None: + # fields += [f"predict_label_alpha {training_stats.report0('Progress/predict_label_alpha', float(loss.predict_label_alpha)):.3f}"] + + training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60)) + training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60)) + if rank == 0: + print(' '.join(fields)) + + # Check for abort. + if (not done) and (abort_fn is not None) and abort_fn(): + done = True + if rank == 0: + print() + print('Aborting...') + + + + + if (rank == 0) and ((image_snapshot_ticks is not None) and (done or (cur_tick % image_snapshot_ticks == 0) ) ): # or (cur_tick<50 and cur_tick % 5 == 0 ) ) # (cur_tick!=0) and + print('gen images...') + with torch.no_grad(): + predicted_real_pose_params_D = [] + for vis_real_img,vis_real_seg, vis_c in zip(grid_images,grid_segs, grid_c): + pose_param = loss.get_pose_params_D(vis_real_img,vis_real_seg, vis_c, cur_nimg) + predicted_real_pose_params_D.append(pose_param) + + predicted_fake_pose_params_G = [] + for vis_z, vis_c in zip(grid_z, grid_c): + pose_param = loss.get_pose_params_G(vis_z, vis_c) + predicted_fake_pose_params_G.append(pose_param) + + + real_pose_mesh = [] + for predicted_real_pose, c in zip(predicted_real_pose_params_D, grid_c): + real_pose_param = {'pose': predicted_real_pose} + real_pose_mesh.append( + G_ema.render_meshes(real_pose_param, resolution=training_set.image_shape[2], cameras=c) + ) + real_pose_mesh = np.concatenate(real_pose_mesh, axis=0) + save_image_grid(real_pose_mesh, + os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_mesh_real_pose_D.png'), + drange=[0, 255], grid_size=grid_size) + + + snap_pose = predicted_fake_pose_params_G + cond_c = torch.tensor([[ 1.0000e+00, 1.0505e-09, 4.3685e-08, -1.1805e-07, 0.0000e+00, + -9.9951e-01, 2.4033e-02, -1.1805e-07, 4.3714e-08, -2.4033e-02, + -9.9951e-01, 2.6992e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 1.0000e+00, 6.5104e+00, 0.0000e+00, 5.0000e-01, 0.0000e+00, + 6.5104e+00, 5.0000e-01, 0.0000e+00, 0.0000e+00, 1.0000e+00]]).float().to(device) + + + #out = [G_ema(z=z, c=c, noise_mode='const',apply_def = True, pose_params = pose) for z, c, pose in zip(grid_z, grid_c, snap_pose)] + grid_ws = [G_ema.mapping(z, cond_c.expand(z.shape[0], -1),None) for z in grid_z] + out =[G_ema.synthesis(ws, c=c, noise_mode='const',apply_def = True, pose_params = pose) for ws, c,pose in zip(grid_ws, grid_c,snap_pose)] + images = torch.cat([o['image'].cpu() for o in out]).numpy() + #print('images range: ',np.max(images),np.min(images)) + images_raw = torch.cat([o['image_raw'].cpu() for o in out]).numpy() + images_depth = -torch.cat([o['image_depth'].cpu() for o in out]).numpy() + images_alpha = torch.cat([o['image_mask'].cpu() for o in out]).numpy() + #background_raw = torch.cat([o['image_background'].cpu() for o in out]).numpy() + save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_0.png'), drange=[-1,1], grid_size=grid_size) + save_image_grid(images_raw, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_2_raw.png'), drange=[-1,1], grid_size=grid_size) + save_image_grid(images_depth, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_4_depth.png'), drange=[images_depth.min(), images_depth.max()], grid_size=grid_size) + save_image_grid(images_alpha, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_4_alpha.jpg'), drange=[0, 1], grid_size=grid_size) + #save_image_grid(background_raw, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_4_background.jpg'), drange=[-1, 1], grid_size=grid_size) + with torch.no_grad(): + predicted_fake_pose_params_D = [] + for o,vis_c,vis_pose in zip(out,grid_c,snap_pose): + pose_param = loss.get_pose_params_D(o['image'],o['image_mask'],vis_c, cur_nimg) + predicted_fake_pose_params_D.append(pose_param) + + fake_pose_mesh = [] + for predicted_fake_pose, c in zip(predicted_fake_pose_params_D, grid_c): + fake_pose_param = {'pose': predicted_fake_pose} + fake_pose_mesh.append( + G_ema.render_meshes(fake_pose_param, resolution=training_set.image_shape[2], cameras=c) + ) + fake_pose_mesh = np.concatenate(fake_pose_mesh, axis=0) + save_image_grid(fake_pose_mesh, + os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_mesh_fake_pose_D.png'), + drange=[0, 255], grid_size=grid_size) + + input_pose_mesh = [] + for input_pose, c in zip(predicted_fake_pose_params_G, grid_c): + input_pose_param = {'pose': input_pose} + input_pose_mesh.append( + G_ema.render_meshes(input_pose_param, resolution=training_set.image_shape[2], cameras=c) + ) + input_pose_mesh = np.concatenate(input_pose_mesh, axis=0) + save_image_grid(input_pose_mesh, + os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_mesh_input_pose_G.png'), + drange=[0, 255], grid_size=grid_size) + + + + + # no_pose_out = [G_ema(z=z, c=c, noise_mode='const', apply_def=False, pose_params=None) for z, c in zip(grid_z, grid_c)] + no_pose_out =[G_ema.synthesis(ws, c=c, noise_mode='const',apply_def = False, pose_params = None) for ws, c in zip(grid_ws, grid_c)] + images = torch.cat([o['image'].cpu() for o in no_pose_out]).numpy() + images_raw = torch.cat([o['image_raw'].cpu() for o in no_pose_out]).numpy() + images_depth = -torch.cat([o['image_depth'].cpu() for o in no_pose_out]).numpy() + save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_1_no_pose.png'), drange=[-1, 1], + grid_size=grid_size) + save_image_grid(images_raw, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_3_no_pose_raw.png'), drange=[-1, 1], + grid_size=grid_size) + save_image_grid(images_depth, os.path.join(run_dir, f'fakes{cur_nimg // 1000:06d}_5_no_pose_depth.png'), + drange=[images_depth.min(), images_depth.max()], grid_size=grid_size) + + + + # if (loss.fronzen_D is not None) and ((network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0)): + # if rank ==0 : + # print('update loss.fronzen_D...') + # misc.copy_params_and_buffers(D, loss.fronzen_D, require_all=True) + # Save network snapshot. + snapshot_pkl = None + snapshot_data = None + if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0): + snapshot_data = dict(training_set_kwargs=dict(training_set_kwargs)) + for name, module in [('G', G), ('D', D), ('G_ema', G_ema), ('D_ema', D_ema), ('augment_pipe', augment_pipe)]: + if module is not None: + if num_gpus > 1: + misc.check_ddp_consistency(module, ignore_regex=r'.*\.[^.]+_(avg|ema)') + module = copy.deepcopy(module).eval().requires_grad_(False).cpu() + snapshot_data[name] = module + del module # conserve memory + snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl') + if rank == 0: + with open(snapshot_pkl, 'wb') as f: + pickle.dump(snapshot_data, f) + + pose_predict_kwargs = { + 'blur_sigma' : loss.blur_sigma, + 'neural_rendering_resolution': loss.neural_rendering_resolution, + 'resample_filter': loss.resample_filter.cpu().numpy().tolist(), + 'filter_mode': loss.filter_mode + } + with open(os.path.join(run_dir, f'pose_predict_kwargs-{cur_nimg//1000:06d}.json'), 'wt') as f: + json.dump(pose_predict_kwargs, f, indent=2) + + + # Evaluate metrics. + if (cur_tick!=0) and (snapshot_data is not None) and (len(metrics) > 0): + if rank == 0: + print(run_dir) + print('Evaluating metrics...') + for metric in metrics: + progress = metric_utils.ProgressMonitor(verbose=True) + # result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'], + # dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, + # rank=rank, device=device, progress=progress + # ) + result_dict = metric_main.calc_metric(metric=metric, + G=snapshot_data['G_ema'], + dataset_kwargs=training_set_kwargs, + num_gpus=num_gpus, + rank=rank, + device=device, + metric_pose_sample_mode = metric_pose_sample_mode, + progress=progress, + D = snapshot_data['D'] if metric_pose_sample_mode == 'D_predict' else None, + pose_predict_kwargs = { + 'blur_sigma' : loss.blur_sigma, + 'neural_rendering_resolution': loss.neural_rendering_resolution, + 'resample_filter': loss.resample_filter, + 'filter_mode': loss.filter_mode + } if metric_pose_sample_mode == 'D_predict' else None + ) + + if rank == 0: + metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl) + stats_metrics.update(result_dict.results) + del snapshot_data # conserve memory + + # Collect statistics. + for phase in phases: + value = [] + if (phase.start_event is not None) and (phase.end_event is not None): + phase.end_event.synchronize() + value = phase.start_event.elapsed_time(phase.end_event) + training_stats.report0('Timing/' + phase.name, value) + stats_collector.update() + stats_dict = stats_collector.as_dict() + + # Update logs. + timestamp = time.time() + if stats_jsonl is not None: + fields = dict(stats_dict, timestamp=timestamp) + stats_jsonl.write(json.dumps(fields) + '\n') + stats_jsonl.flush() + if stats_tfevents is not None: + global_step = int(cur_nimg / 1e3) + walltime = timestamp - start_time + for name, value in stats_dict.items(): + stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime) + for name, value in stats_metrics.items(): + stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime) + stats_tfevents.flush() + if progress_fn is not None: + progress_fn(cur_nimg // 1000, total_kimg) + + # Update state. + cur_tick += 1 + tick_start_nimg = cur_nimg + tick_start_time = time.time() + maintenance_time = tick_start_time - tick_end_time + if done: + break + + # Done. + if rank == 0: + print() + print('Exiting...') + +#---------------------------------------------------------------------------- diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/__init__.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/__init__.py new file mode 100644 index 0000000..daba665 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# empty \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/math_utils.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/math_utils.py new file mode 100644 index 0000000..4cf9d2b --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/math_utils.py @@ -0,0 +1,118 @@ +# MIT License + +# Copyright (c) 2022 Petr Kellnhofer + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch + +def transform_vectors(matrix: torch.Tensor, vectors4: torch.Tensor) -> torch.Tensor: + """ + Left-multiplies MxM @ NxM. Returns NxM. + """ + res = torch.matmul(vectors4, matrix.T) + return res + + +def normalize_vecs(vectors: torch.Tensor) -> torch.Tensor: + """ + Normalize vector lengths. + """ + return vectors / (torch.norm(vectors, dim=-1, keepdim=True)) + +def torch_dot(x: torch.Tensor, y: torch.Tensor): + """ + Dot product of two tensors. + """ + return (x * y).sum(-1) + + +def get_ray_limits_box(rays_o: torch.Tensor, rays_d: torch.Tensor, box_side_length): + """ + Author: Petr Kellnhofer + Intersects rays with the [-1, 1] NDC volume. + Returns min and max distance of entry. + Returns -1 for no intersection. + https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection + """ + o_shape = rays_o.shape + rays_o = rays_o.detach().reshape(-1, 3) + rays_d = rays_d.detach().reshape(-1, 3) + + + bb_min = [-1*(box_side_length/2), -1*(box_side_length/2), -1*(box_side_length/2)] + bb_max = [1*(box_side_length/2), 1*(box_side_length/2), 1*(box_side_length/2)] + bounds = torch.tensor([bb_min, bb_max], dtype=rays_o.dtype, device=rays_o.device) + is_valid = torch.ones(rays_o.shape[:-1], dtype=bool, device=rays_o.device) + + # Precompute inverse for stability. + invdir = 1 / rays_d + sign = (invdir < 0).long() + + # Intersect with YZ plane. + tmin = (bounds.index_select(0, sign[..., 0])[..., 0] - rays_o[..., 0]) * invdir[..., 0] + tmax = (bounds.index_select(0, 1 - sign[..., 0])[..., 0] - rays_o[..., 0]) * invdir[..., 0] + + # Intersect with XZ plane. + tymin = (bounds.index_select(0, sign[..., 1])[..., 1] - rays_o[..., 1]) * invdir[..., 1] + tymax = (bounds.index_select(0, 1 - sign[..., 1])[..., 1] - rays_o[..., 1]) * invdir[..., 1] + + # Resolve parallel rays. + is_valid[torch.logical_or(tmin > tymax, tymin > tmax)] = False + + # Use the shortest intersection. + tmin = torch.max(tmin, tymin) + tmax = torch.min(tmax, tymax) + + # Intersect with XY plane. + tzmin = (bounds.index_select(0, sign[..., 2])[..., 2] - rays_o[..., 2]) * invdir[..., 2] + tzmax = (bounds.index_select(0, 1 - sign[..., 2])[..., 2] - rays_o[..., 2]) * invdir[..., 2] + + # Resolve parallel rays. + is_valid[torch.logical_or(tmin > tzmax, tzmin > tmax)] = False + + # Use the shortest intersection. + tmin = torch.max(tmin, tzmin) + tmax = torch.min(tmax, tzmax) + + # Mark invalid. + tmin[torch.logical_not(is_valid)] = -1 + tmax[torch.logical_not(is_valid)] = -2 + + return tmin.reshape(*o_shape[:-1], 1), tmax.reshape(*o_shape[:-1], 1) + + +def linspace(start: torch.Tensor, stop: torch.Tensor, num: int): + """ + Creates a tensor of shape [num, *start.shape] whose values are evenly spaced from start to end, inclusive. + Replicates but the multi-dimensional bahaviour of numpy.linspace in PyTorch. + """ + # create a tensor of 'num' steps from 0 to 1 + steps = torch.arange(num, dtype=torch.float32, device=start.device) / (num - 1) + + # reshape the 'steps' tensor to [-1, *([1]*start.ndim)] to allow for broadcastings + # - using 'steps.reshape([-1, *([1]*start.ndim)])' would be nice here but torchscript + # "cannot statically infer the expected size of a list in this contex", hence the code below + for i in range(start.ndim): + steps = steps.unsqueeze(-1) + + # the output starts at 'start' and increments until 'stop' in each dimension + out = start[None] + steps * (stop - start)[None] + + return out diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/ray_marcher.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/ray_marcher.py new file mode 100644 index 0000000..3c2d1ee --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/ray_marcher.py @@ -0,0 +1,60 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +""" +The ray marcher takes the raw output of the implicit representation and uses the volume rendering equation to produce composited colors and depths. +Based off of the implementation in MipNeRF (this one doesn't do any cone tracing though!) +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + +class MipRayMarcher2(nn.Module): + def __init__(self): + super().__init__() + + def run_forward(self, colors, densities, depths, rendering_options): + deltas = depths[:, :, 1:] - depths[:, :, :-1] + colors_mid = (colors[:, :, :-1] + colors[:, :, 1:]) / 2 + densities_mid = (densities[:, :, :-1] + densities[:, :, 1:]) / 2 + depths_mid = (depths[:, :, :-1] + depths[:, :, 1:]) / 2 + + + if rendering_options['clamp_mode'] == 'softplus': + densities_mid = F.softplus(densities_mid - 1) # activation bias of -1 makes things initialize better + else: + assert False, "MipRayMarcher only supports `clamp_mode`=`softplus`!" + + density_delta = densities_mid * deltas + + alpha = 1 - torch.exp(-density_delta) + + alpha_shifted = torch.cat([torch.ones_like(alpha[:, :, :1]), 1-alpha + 1e-10], -2) + weights = alpha * torch.cumprod(alpha_shifted, -2)[:, :, :-1] + + composite_rgb = torch.sum(weights * colors_mid, -2) + weight_total = weights.sum(2) + composite_depth = torch.sum(weights * depths_mid, -2) / weight_total + + # clip the composite to min/max range of depths + composite_depth = torch.nan_to_num(composite_depth, float('inf')) + composite_depth = torch.clamp(composite_depth, torch.min(depths), torch.max(depths)) + + if rendering_options.get('white_back', False): + composite_rgb = composite_rgb + 1 - weight_total + + return composite_rgb, composite_depth, weights + + + def forward(self, colors, densities, depths, rendering_options): + composite_rgb, composite_depth, weights = self.run_forward(colors, densities, depths, rendering_options) + + return composite_rgb, composite_depth, weights \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/ray_sampler.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/ray_sampler.py new file mode 100644 index 0000000..00dd07b --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/ray_sampler.py @@ -0,0 +1,63 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +""" +The ray sampler is a module that takes in camera matrices and resolution and batches of rays. +Expects cam2world matrices that use the OpenCV camera coordinate system conventions. +""" + +import torch + +class RaySampler(torch.nn.Module): + def __init__(self): + super().__init__() + self.ray_origins_h, self.ray_directions, self.depths, self.image_coords, self.rendering_options = None, None, None, None, None + + + def forward(self, cam2world_matrix, intrinsics, resolution): + """ + Create batches of rays and return origins and directions. + + cam2world_matrix: (N, 4, 4) + intrinsics: (N, 3, 3) + resolution: int + + ray_origins: (N, M, 3) + ray_dirs: (N, M, 2) + """ + N, M = cam2world_matrix.shape[0], resolution**2 + cam_locs_world = cam2world_matrix[:, :3, 3] + fx = intrinsics[:, 0, 0] + fy = intrinsics[:, 1, 1] + cx = intrinsics[:, 0, 2] + cy = intrinsics[:, 1, 2] + sk = intrinsics[:, 0, 1] + + uv = torch.stack(torch.meshgrid(torch.arange(resolution, dtype=torch.float32, device=cam2world_matrix.device), torch.arange(resolution, dtype=torch.float32, device=cam2world_matrix.device), indexing='ij')) * (1./resolution) + (0.5/resolution) + uv = uv.flip(0).reshape(2, -1).transpose(1, 0) + uv = uv.unsqueeze(0).repeat(cam2world_matrix.shape[0], 1, 1) + + x_cam = uv[:, :, 0].view(N, -1) + y_cam = uv[:, :, 1].view(N, -1) + z_cam = torch.ones((N, M), device=cam2world_matrix.device) + + x_lift = (x_cam - cx.unsqueeze(-1) + cy.unsqueeze(-1)*sk.unsqueeze(-1)/fy.unsqueeze(-1) - sk.unsqueeze(-1)*y_cam/fy.unsqueeze(-1)) / fx.unsqueeze(-1) * z_cam + y_lift = (y_cam - cy.unsqueeze(-1)) / fy.unsqueeze(-1) * z_cam + + cam_rel_points = torch.stack((x_lift, y_lift, z_cam, torch.ones_like(z_cam)), dim=-1) + + world_rel_points = torch.bmm(cam2world_matrix, cam_rel_points.permute(0, 2, 1)).permute(0, 2, 1)[:, :, :3] + + ray_dirs = world_rel_points - cam_locs_world[:, None, :] + ray_dirs = torch.nn.functional.normalize(ray_dirs, dim=2) + + ray_origins = cam_locs_world.unsqueeze(1).repeat(1, ray_dirs.shape[1], 1) + + return ray_origins, ray_dirs \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/renderer.py b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/renderer.py new file mode 100644 index 0000000..a925cf4 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/trigrid_rendering/volumetric_rendering/renderer.py @@ -0,0 +1,609 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +""" +The renderer is a module that takes in rays, decides where to sample along each +ray, and computes pixel colors using the volume rendering equation. +""" + +import math +import torch +from nerf.torch_utils import misc +from nerf.trigrid_rendering.volumetric_rendering.ray_marcher import MipRayMarcher2 +from nerf.trigrid_rendering.volumetric_rendering import math_utils +# from training.aligned_smplx import AlignedSMPLX + +#from training.aligned_smpl import AlignedSMPL +import smplx +from kaolin.ops.mesh import index_vertices_by_faces +from kaolin.metrics.trianglemesh import point_to_mesh_distance + +from nerf.trigrid_rendering.aligned_smpl import AlignedSMPL +import trimesh + + + + +# def generate_planes(): +# """ +# Defines planes by the three vectors that form the "axes" of the +# plane. Should work with arbitrary number of planes and planes of +# arbitrary orientation. +# """ +# return torch.tensor([[[1, 0, 0], +# [0, 1, 0], +# [0, 0, 1]], +# [[1, 0, 0], +# [0, 0, 1], +# [0, 1, 0]], +# [[0, 0, 1], +# [1, 0, 0], +# [0, 1, 0]]], dtype=torch.float32) + +# correct tri-planes, see https://github.com/NVlabs/eg3d/issues/67 +def generate_planes(): + """ + Defines planes by the three vectors that form the "axes" of the + plane. Should work with arbitrary number of planes and planes of + arbitrary orientation. + """ + return torch.tensor([[[1, 0, 0], + [0, 1, 0], + [0, 0, 1]], + [[1, 0, 0], + [0, 0, 1], + [0, 1, 0]], + [[0, 1, 0], + [0, 0, 1], + [1, 0, 0]]], dtype=torch.float32) + +def project_onto_planes(planes, coordinates): + """ + Does a projection of a 3D point onto a batch of 2D planes, + returning 2D plane coordinates. + + Takes plane axes of shape n_planes, 3, 3 + # Takes coordinates of shape N, M, 3 + # returns projections of shape N*n_planes, M, 2 + """ + N, M, C = coordinates.shape + n_planes, _, _ = planes.shape + coordinates = coordinates.unsqueeze(1).expand(-1, n_planes, -1, -1).reshape(N*n_planes, M, 3) + inv_planes = torch.linalg.inv(planes).unsqueeze(0).expand(N, -1, -1, -1).reshape(N*n_planes, 3, 3) + projections = torch.bmm(coordinates, inv_planes) + return projections + +def sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None, triplane_depth=1,render_high_freq = True): + assert padding_mode == 'zeros' + output_features = None + + + _, M, _ = coordinates.shape + coordinates = (2 / box_warp) * coordinates # TODO: add specific box bounds + projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1).unsqueeze(2) # (N x n_planes) x 1 x 1 x M x 3 + for res_k in plane_features: + plane_feature = plane_features[res_k] + N, n_planes, CD, H, W = plane_feature.shape + # _, M, _ = coordinates.shape + C, D = CD // triplane_depth, triplane_depth + plane_feature = plane_feature.view(N * n_planes, C, D, H, W) + + # coordinates = (2/box_warp) * coordinates # TODO: add specific box bounds + + # projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1).unsqueeze(2) # (N x n_planes) x 1 x 1 x M x 3 + output_feature = torch.nn.functional.grid_sample(plane_feature, projected_coordinates.float(), mode=mode, + padding_mode=padding_mode, align_corners=False).permute(0, + 4, + 3, + 2, + 1).reshape(N, n_planes, M, C) + if output_features is None: + output_features = output_feature + else: + output_features += output_feature + + output_features /= len(plane_features) + + return output_features + +def sample_from_3dgrid(grid, coordinates): + """ + Expects coordinates in shape (batch_size, num_points_per_batch, 3) + Expects grid in shape (1, channels, H, W, D) + (Also works if grid has batch size) + Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels) + """ + batch_size, n_coords, n_dims = coordinates.shape + sampled_features = torch.nn.functional.grid_sample(grid.expand(batch_size, -1, -1, -1, -1), + coordinates.reshape(batch_size, 1, 1, -1, n_dims), + mode='bilinear', padding_mode='zeros', align_corners=False) + N, C, H, W, D = sampled_features.shape + sampled_features = sampled_features.permute(0, 4, 3, 2, 1).reshape(N, H*W*D, C) + return sampled_features + +def triplane_crop_mask(xyz_unformatted, thresh, boxwarp, allow_bottom=True): + # bw,tc = boxwarp, thresh + bw = boxwarp + tc = boxwarp * thresh + device = xyz_unformatted.device + # xyz = 0.5 * (xyz_unformatted+1) * torch.tensor([-1,1,-1]).to(device)[None,None,:] + xyz = (xyz_unformatted) * torch.tensor([-1,1,-1]).to(device)[None,None,:] + ans = (xyz[:,:,[0,2]].abs() <= (bw/2-tc)).all(dim=-1,keepdim=True) + if allow_bottom: + ans = ans | ( + (xyz[:,:,1:2] <= -(bw/2-tc)) & + (xyz[:,:,[0,2]].abs() <= (bw/2-tc)).all(dim=-1,keepdim=True) + ) + return ~ans +def cull_clouds_mask(denities, thresh): + denities = torch.nn.functional.softplus(denities - 1) # activation bias of -1 makes things initialize better + alpha = 1 - torch.exp(-denities) + return alpha < thresh + + + +class ImportanceRenderer(torch.nn.Module): + def __init__(self, w_dim, num_ws,batch_size,thickness,box_warp,apply_deformation = True): + super().__init__() + self.ray_marcher = MipRayMarcher2() + self.plane_axes = generate_planes() + self.batch_size = batch_size + self.num_betas = 10 + self.apply_deformation = apply_deformation + if apply_deformation: + body_model_smpl = smplx.create('./smplx_models', + model_type='smpl', + gender='neutral', + use_compressed=False, + use_face_contour=True, + num_betas=self.num_betas, + num_expression_coeffs=10, + ext='npz', + batch_size = batch_size + ).cuda() + self.aligned_SMPL = AlignedSMPL(model=body_model_smpl,batch_size=batch_size) + + + + shaped_smpl_data = self.aligned_SMPL.generate_shaped_smpl( + betas=None, + scale=None, # shape_params['scale'], + transl=None, # shape_params['transl'] + ) + shaped_smpl = shaped_smpl_data['vertices'].detach().contiguous() + align_points = shaped_smpl_data['align_joint_coordinate'].detach().contiguous() + + self.register_buffer('shaped_smpl', shaped_smpl) + self.register_buffer('align_points', align_points) + + # shaped_smpl [B,N,3] + # filter points that outside box + box_side_length = box_warp + # shaped_smpl: B,N,3 + point_mask = shaped_smpl[0:1,:,0] > -box_side_length/2 # 1,N + point_mask = point_mask & (shaped_smpl[0:1,:,0] < box_side_length/2) + point_mask = point_mask & (shaped_smpl[0:1,:,1] > -box_side_length/2) + point_mask = point_mask & (shaped_smpl[0:1,:,1] < box_side_length/2) + point_mask = point_mask & (shaped_smpl[0:1,:,2] > -box_side_length/2) + point_mask = point_mask & (shaped_smpl[0:1,:,2] < box_side_length/2) + point_mask = point_mask.squeeze(0).cuda() # N + + faces = self.aligned_SMPL.faces # [20908, 3] + face_mask = torch.ones(faces.shape[0],dtype=torch.bool).cuda() # [20908] + for i in range(faces.shape[0]): + face_mask[i] = point_mask[faces[i,0]] and point_mask[faces[i,1]] and point_mask[faces[i,2]] + self.register_buffer('face_mask', face_mask) + + self.thickness = thickness + + # shaped_smpl [B,N,3] + # filter points that not on the head + # shaped_smpl: B,N,3 + + # + # point_mask = shaped_smpl[0:1, :, 1] > 0 # 1,N + + point_mask = shaped_smpl[0:1, :, 1] > 0.06 # 1,N + point_mask = point_mask & (shaped_smpl[0:1, :, 2] < -0.0) + + point_mask = point_mask.squeeze(0).cuda() # N + + faces = self.aligned_SMPL.faces # [20908, 3] + head_face_mask = torch.ones(faces.shape[0], dtype=torch.bool).cuda() # [20908] + for i in range(faces.shape[0]): + head_face_mask[i] = point_mask[faces[i, 0]] and point_mask[faces[i, 1]] and point_mask[faces[i, 2]] + self.register_buffer('head_face_mask', head_face_mask) + + self.back_head_depth = None + # + # print('head_face_mask shape:',head_face_mask.shape) + + + def set_batch_size(self,batch_size): + self.batch_size = batch_size + body_model_smpl = smplx.create('./smplx_models', + model_type='smpl', + gender='neutral', + use_compressed=False, + use_face_contour=True, + num_betas=self.num_betas, + num_expression_coeffs=10, + ext='npz', + batch_size=batch_size + ).to(self.aligned_SMPL.model.shapedirs.device) + self.aligned_SMPL.set_model(body_model_smpl) + self.aligned_SMPL.set_batch_size(batch_size) + shaped_smpl_data = self.aligned_SMPL.generate_shaped_smpl( + betas=None, + scale=None, # shape_params['scale'], + transl=None, # shape_params['transl'] + ) + shaped_smpl = shaped_smpl_data['vertices'].detach().contiguous() + align_points = shaped_smpl_data['align_joint_coordinate'].detach().contiguous() + self.register_buffer('shaped_smpl', shaped_smpl) + self.register_buffer('align_points', align_points) + + + def render_meshes(self, shape_pose_params,resolution,cameras): + images = self.aligned_SMPL.get_visualization(shape_pose_params, resolution, cameras) + return images + + + def get_deformed_coordinate(self, ws, pose_params, original_coordinate): + + + posed_smpl = self.aligned_SMPL.generate_posed_smpl(betas=None, + body_pose=pose_params, + scale=None, # shape_params['scale'], + transl=None, # shape_params['transl'], + align_joint_coordinate=self.align_points)['vertices'] + # misc.assert_shape(posed_smpl, [None, 10475, 3]) + + + mode = 'kaolin' + if mode == 'pytorch3d': + raise NotImplementedError + import pytorch3d.ops + #raise NotImplementedError + with torch.no_grad(): + + smpl_def_on_mesh = self.shaped_smpl - posed_smpl # [B, , 3] + + # find the nearest face in posed_smpl for each vertex in original_coordinate + knn_res = pytorch3d.ops.knn_points(p1=original_coordinate, p2=posed_smpl, K=1) + distance = knn_res[0] # [B, N, 1] + p1_index = knn_res[1].repeat(1, 1, 3) # [B, N, 3] + misc.assert_shape(p1_index, [original_coordinate.shape[0], original_coordinate.shape[1],3]) + + + DistToMesh = distance.squeeze(-1) # [B, N] + + SmplDef = smpl_def_on_mesh.gather(1, p1_index) # [B, N, 3] + mask = DistToMesh < self.thickness# [B, N] + + + scale = 5. + SmplDef1 = SmplDef / torch.exp(DistToMesh.unsqueeze(-1) * scale) # [B, N, 3] + + scale = DistToMesh.unsqueeze(-1) / (self.thickness * 2) * 20 + SmplDef2 = torch.zeros_like(SmplDef).to(SmplDef.device) + + SmplDef = torch.where(mask.unsqueeze(-1), SmplDef1, SmplDef2) # [B, N, 3] + elif mode == 'kaolin': + faces = self.aligned_SMPL.faces.clone() # [20908, 3] + faces = faces[self.face_mask, :] + # find the nearest face in shaped_smplx for each vertex in original_coordinate + vertex_faces = posed_smpl.clone() # [B, 6085, 3] + + with torch.no_grad(): + face_vertices = index_vertices_by_faces(vertex_faces, faces) + distance, index, dist_type = point_to_mesh_distance(original_coordinate, face_vertices) # B, N + distance = torch.sqrt(distance) # [B, N, 1] + selected_posed_smpl_vertices = [] + selected_shaped_smpl_vertices = [] + + for i in range(original_coordinate.shape[0]): + selected_face = faces[index[i]] + selected_posed_smpl_vertices.append(index_vertices_by_faces(posed_smpl[i:i + 1], + selected_face)) # [1, N, 3, 3] + selected_shaped_smpl_vertices.append(index_vertices_by_faces(self.shaped_smpl[i:i + 1], + selected_face)) # [1, N, 3, 3] + + selected_posed_smpl_vertices = torch.cat(selected_posed_smpl_vertices, dim=0) # [B, N, 3, 3] + selected_shaped_smpl_vertices = torch.cat(selected_shaped_smpl_vertices, dim=0) # [B, N, 3, 3] + + y_axes = torch.cross(selected_posed_smpl_vertices[:, :, 1, :] - selected_posed_smpl_vertices[:, :, 0, :], + selected_posed_smpl_vertices[:, :, 2, :] - selected_posed_smpl_vertices[:, :, 0, + :]) # [B, N, 3] + y_axes = y_axes / torch.norm(y_axes, dim=2, keepdim=True) # [B, N, 3] + + x_axes = selected_posed_smpl_vertices[:, :, 1, :] - selected_posed_smpl_vertices[:, :, 0, :] # [B, N, 3] + x_axes = x_axes / torch.norm(x_axes, dim=2, keepdim=True) # [B, N, 3] + + z_axes = torch.cross(x_axes, y_axes) # [B, N, 3] + + posed_smpl_coordinate = torch.stack( + [torch.sum((original_coordinate - selected_posed_smpl_vertices[:, :, 0, :]) * x_axes, dim=2), + torch.sum((original_coordinate - selected_posed_smpl_vertices[:, :, 0, :]) * y_axes, dim=2), + torch.sum((original_coordinate - selected_posed_smpl_vertices[:, :, 0, :]) * z_axes, dim=2)], + dim=2) # [B, N, 3] + del x_axes, y_axes, z_axes + y_axes = torch.cross(selected_shaped_smpl_vertices[:, :, 1, :] - selected_shaped_smpl_vertices[:, :, 0, :], + selected_shaped_smpl_vertices[:, :, 2, :] - selected_shaped_smpl_vertices[:, :, 0, :]) + y_axes = y_axes / torch.norm(y_axes, dim=2, keepdim=True) + + x_axes = selected_shaped_smpl_vertices[:, :, 1, :] - selected_shaped_smpl_vertices[:, :, 0, :] + x_axes = x_axes / torch.norm(x_axes, dim=2, keepdim=True) + + z_axes = torch.cross(x_axes, y_axes) + + new_coordinate = posed_smpl_coordinate[:, :, 0:1] * x_axes + \ + posed_smpl_coordinate[:, :, 1:2] * y_axes + \ + posed_smpl_coordinate[:, :, 2:3] * z_axes + \ + selected_shaped_smpl_vertices[:, :, 0, :] # [B, N, 3] + + SmplDef = new_coordinate - original_coordinate # [B, N, 3] + + DistToMesh = distance.unsqueeze(-1) # [B, N, 1] + + mask = DistToMesh < self.thickness # [B, N,1] + + SmplDef2 = torch.zeros_like(SmplDef).to(SmplDef.device) + SmplDef = torch.where(mask, SmplDef, SmplDef2) # [B, N, 3] + + else: + raise NotImplementedError + + original_coordinate = original_coordinate + SmplDef + return original_coordinate + + def forward(self, planes, decoder, ray_origins, ray_directions, rendering_options, apply_def = False, ws = None, pose_params = None, triplane_crop=0.1, cull_clouds=None, binarize_clouds=None ): + _ = ws + if apply_def: + assert pose_params is not None + else: + assert pose_params is None + + self.plane_axes = self.plane_axes.to(ray_origins.device) + + # check if grad = 0 + + if rendering_options['ray_start'] == rendering_options['ray_end'] == 'auto': + ray_start, ray_end = math_utils.get_ray_limits_box(ray_origins, ray_directions, box_side_length=rendering_options['box_warp']) + is_ray_valid = ray_end > ray_start + if torch.any(is_ray_valid).item(): + ray_start[~is_ray_valid] = ray_start[is_ray_valid].min() + ray_end[~is_ray_valid] = ray_start[is_ray_valid].max() + depths_coarse = self.sample_stratified(ray_origins, ray_start, ray_end, rendering_options['depth_resolution'], rendering_options['disparity_space_sampling']) + else: + # Create stratified depth samples + depths_coarse = self.sample_stratified(ray_origins, rendering_options['ray_start'], rendering_options['ray_end'], rendering_options['depth_resolution'], rendering_options['disparity_space_sampling']) + + batch_size, num_rays, samples_per_ray, _ = depths_coarse.shape + + # Coarse Pass + sample_coordinates = (ray_origins.unsqueeze(-2) + depths_coarse * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3) + sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, samples_per_ray, -1).reshape(batch_size, -1, 3) + # deform the sample_coordinates + if apply_def: + sample_coordinates = self.get_deformed_coordinate(None, pose_params, sample_coordinates) + + + out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options) + colors_coarse = out['rgb'] + densities_coarse = out['sigma'] + + xyz_coarse = out['xyz'] + + if triplane_crop: + # print(xyz_fine.amin(dim=(0,1))) + # print(xyz_fine.amax(dim=(0,1))) + cropmask = triplane_crop_mask(xyz_coarse, triplane_crop, rendering_options['box_warp']) + densities_coarse[cropmask] = -1e3 + if binarize_clouds: + ccmask = cull_clouds_mask(densities_coarse, binarize_clouds) + densities_coarse[ccmask] = -1e3 + densities_coarse[~ccmask] = 1e3 + elif cull_clouds: + ccmask = cull_clouds_mask(densities_coarse, cull_clouds) + densities_coarse[ccmask] = -1e3 + + colors_coarse = colors_coarse.reshape(batch_size, num_rays, samples_per_ray, colors_coarse.shape[-1]) + densities_coarse = densities_coarse.reshape(batch_size, num_rays, samples_per_ray, 1) + xyz_coarse = xyz_coarse.reshape(batch_size, num_rays, samples_per_ray, xyz_coarse.shape[-1]) + + # Fine Pass + N_importance = rendering_options['depth_resolution_importance'] + if N_importance > 0: + _, _, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options) + + depths_fine = self.sample_importance(depths_coarse, weights, N_importance) + + sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, N_importance, -1).reshape(batch_size, -1, 3) + sample_coordinates = (ray_origins.unsqueeze(-2) + depths_fine * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3) + # deform the sample_coordinates + if apply_def: + sample_coordinates = self.get_deformed_coordinate(None, pose_params, sample_coordinates) + + out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options) + colors_fine = out['rgb'] + densities_fine = out['sigma'] + xyz_fine = out['xyz'] + if triplane_crop: + # print(xyz_fine.amin(dim=(0,1))) + # print(xyz_fine.amax(dim=(0,1))) + cropmask = triplane_crop_mask(xyz_fine, triplane_crop, rendering_options['box_warp']) + densities_fine[cropmask] = -1e3 + if binarize_clouds: + ccmask = cull_clouds_mask(densities_fine, binarize_clouds) + densities_fine[ccmask] = -1e3 + densities_fine[~ccmask] = 1e3 + elif cull_clouds: + ccmask = cull_clouds_mask(densities_fine, cull_clouds) + densities_fine[ccmask] = -1e3 + xyz_fine = xyz_fine.reshape(batch_size, num_rays, N_importance, xyz_fine.shape[-1]) + colors_fine = colors_fine.reshape(batch_size, num_rays, N_importance, colors_fine.shape[-1]) + densities_fine = densities_fine.reshape(batch_size, num_rays, N_importance, 1) + + # all_depths, all_colors, all_densities = self.unify_samples(depths_coarse, colors_coarse, densities_coarse, + # depths_fine, colors_fine, densities_fine) + all_depths, all_colors, all_densities, all_xyz = self.unify_samples( + depths_coarse, colors_coarse, densities_coarse, xyz_coarse, + depths_fine, colors_fine, densities_fine, xyz_fine, + ) + + # Aggregate + # rgb_final, depth_final, weights = self.ray_marcher(all_colors, all_densities, all_depths, rendering_options) + + all_colors_ = torch.cat([all_colors, all_xyz], dim=-1) + rgb_final_, depth_final, weights = self.ray_marcher(all_colors_, all_densities, all_depths, rendering_options) + rgb_final = rgb_final_[...,:-3] + xyz_final = rgb_final_[...,-3:] + else: + # rgb_final, depth_final, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options) + colors_coarse_ = torch.cat([colors_coarse, xyz_coarse], dim=-1) + rgb_final_, depth_final, weights = self.ray_marcher(colors_coarse_, densities_coarse, depths_coarse, rendering_options) + rgb_final = rgb_final_[...,:-3] + xyz_final = rgb_final_[...,-3:] + + + output = {'rgb_final': rgb_final, 'depth_final': depth_final, 'weights': weights} + + return output + + def run_model(self, planes, decoder, sample_coordinates, sample_directions, options): + self.plane_axes = self.plane_axes.to(planes[list(planes.keys())[0]].device) + sampled_features = sample_from_planes(self.plane_axes, planes, sample_coordinates, padding_mode='zeros', + box_warp=options['box_warp'], triplane_depth=options['triplane_depth']) + + out = decoder(sampled_features, sample_directions) + + if options.get('density_noise', 0) > 0: + out['sigma'] += torch.randn_like(out['sigma']) * options['density_noise'] + out['xyz'] = sample_coordinates#.permute(0,2,1)[...,None] + return out + + def sort_samples(self, all_depths, all_colors, all_densities): + _, indices = torch.sort(all_depths, dim=-2) + all_depths = torch.gather(all_depths, -2, indices) + all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1])) + all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1)) + return all_depths, all_colors, all_densities + + # def unify_samples(self, depths1, colors1, densities1, depths2, colors2, densities2): + # all_depths = torch.cat([depths1, depths2], dim = -2) + # all_colors = torch.cat([colors1, colors2], dim = -2) + # all_densities = torch.cat([densities1, densities2], dim = -2) + + # _, indices = torch.sort(all_depths, dim=-2) + # all_depths = torch.gather(all_depths, -2, indices) + # all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1])) + # all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1)) + + # return all_depths, all_colors, all_densities + def unify_samples(self, depths1, colors1, densities1, xyz1, depths2, colors2, densities2, xyz2): + all_depths = torch.cat([depths1, depths2], dim = -2) + all_colors = torch.cat([colors1, colors2], dim = -2) + all_xyz = torch.cat([xyz1, xyz2], dim = -2) + all_densities = torch.cat([densities1, densities2], dim = -2) + + _, indices = torch.sort(all_depths, dim=-2) + all_depths = torch.gather(all_depths, -2, indices) + all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1])) + all_xyz = torch.gather(all_xyz, -2, indices.expand(-1, -1, -1, all_xyz.shape[-1])) + all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1)) + + return all_depths, all_colors, all_densities, all_xyz + + def sample_stratified(self, ray_origins, ray_start, ray_end, depth_resolution, disparity_space_sampling=False): + """ + Return depths of approximately uniformly spaced samples along rays. + """ + N, M, _ = ray_origins.shape + if disparity_space_sampling: + depths_coarse = torch.linspace(0, + 1, + depth_resolution, + device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1) + depth_delta = 1/(depth_resolution - 1) + depths_coarse += torch.rand_like(depths_coarse) * depth_delta + depths_coarse = 1./(1./ray_start * (1. - depths_coarse) + 1./ray_end * depths_coarse) + else: + if type(ray_start) == torch.Tensor: + depths_coarse = math_utils.linspace(ray_start, ray_end, depth_resolution).permute(1,2,0,3) + depth_delta = (ray_end - ray_start) / (depth_resolution - 1) + depths_coarse += torch.rand_like(depths_coarse) * depth_delta[..., None] + else: + depths_coarse = torch.linspace(ray_start, ray_end, depth_resolution, device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1) + depth_delta = (ray_end - ray_start)/(depth_resolution - 1) + depths_coarse += torch.rand_like(depths_coarse) * depth_delta + + return depths_coarse + + def sample_importance(self, z_vals, weights, N_importance): + """ + Return depths of importance sampled points along rays. See NeRF importance sampling for more. + """ + with torch.no_grad(): + batch_size, num_rays, samples_per_ray, _ = z_vals.shape + + z_vals = z_vals.reshape(batch_size * num_rays, samples_per_ray) + weights = weights.reshape(batch_size * num_rays, -1) # -1 to account for loss of 1 sample in MipRayMarcher + + # smooth weights + weights = torch.nn.functional.max_pool1d(weights.unsqueeze(1).float(), 2, 1, padding=1) + weights = torch.nn.functional.avg_pool1d(weights, 2, 1).squeeze() + weights = weights + 0.01 + + z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:]) + importance_z_vals = self.sample_pdf(z_vals_mid, weights[:, 1:-1], + N_importance).detach().reshape(batch_size, num_rays, N_importance, 1) + return importance_z_vals + + def sample_pdf(self, bins, weights, N_importance, det=False, eps=1e-5): + """ + Sample @N_importance samples from @bins with distribution defined by @weights. + Inputs: + bins: (N_rays, N_samples_+1) where N_samples_ is "the number of coarse samples per ray - 2" + weights: (N_rays, N_samples_) + N_importance: the number of samples to draw from the distribution + det: deterministic or not + eps: a small number to prevent division by zero + Outputs: + samples: the sampled samples + """ + N_rays, N_samples_ = weights.shape + weights = weights + eps # prevent division by zero (don't do inplace op!) + pdf = weights / torch.sum(weights, -1, keepdim=True) # (N_rays, N_samples_) + cdf = torch.cumsum(pdf, -1) # (N_rays, N_samples), cumulative distribution function + cdf = torch.cat([torch.zeros_like(cdf[: ,:1]), cdf], -1) # (N_rays, N_samples_+1) + # padded to 0~1 inclusive + + if det: + u = torch.linspace(0, 1, N_importance, device=bins.device) + u = u.expand(N_rays, N_importance) + else: + u = torch.rand(N_rays, N_importance, device=bins.device) + u = u.contiguous() + + inds = torch.searchsorted(cdf, u, right=True) + below = torch.clamp_min(inds-1, 0) + above = torch.clamp_max(inds, N_samples_) + + inds_sampled = torch.stack([below, above], -1).view(N_rays, 2*N_importance) + cdf_g = torch.gather(cdf, 1, inds_sampled).view(N_rays, N_importance, 2) + bins_g = torch.gather(bins, 1, inds_sampled).view(N_rays, N_importance, 2) + + denom = cdf_g[...,1]-cdf_g[...,0] + denom[denom= -90 and azimuth_val < 90: + if azimuth_val >= 0: + r = 1 - azimuth_val / 90 + else: + r = 1 + azimuth_val / 90 + start_z = embeddings['front'] + end_z = embeddings['side'] + # if random.random() < 0.3: + # r = r + random.gauss(0, 0.08) + pos_z = r * start_z + (1 - r) * end_z + text_z = torch.cat([pos_z, embeddings['front'], embeddings['side']], dim=0) + if r > 0.8: + front_neg_w = 0.0 + else: + front_neg_w = math.exp(-r * opt.front_decay_factor) * opt.negative_w + if r < 0.2: + side_neg_w = 0.0 + else: + side_neg_w = math.exp(-(1 - r) * opt.side_decay_factor) * opt.negative_w + + weights = torch.tensor([1.0, front_neg_w, side_neg_w]) + else: + if azimuth_val >= 0: + r = 1 - (azimuth_val - 90) / 90 + else: + r = 1 + (azimuth_val + 90) / 90 + start_z = embeddings['side'] + end_z = embeddings['back'] + # if random.random() < 0.3: + # r = r + random.gauss(0, 0.08) + pos_z = r * start_z + (1 - r) * end_z + text_z = torch.cat([pos_z, embeddings['side'], embeddings['front']], dim=0) + front_neg_w = opt.negative_w + if r > 0.8: + side_neg_w = 0.0 + else: + side_neg_w = math.exp(-r * opt.side_decay_factor) * opt.negative_w / 2 + + weights = torch.tensor([1.0, side_neg_w, front_neg_w]) + return text_z, weights.to(text_z.device) + + +def custom_meshgrid(*args): + # ref: https://pytorch.org/docs/stable/generated/torch.meshgrid.html?highlight=meshgrid#torch.meshgrid + if pver.parse(torch.__version__) < pver.parse('1.10'): + return torch.meshgrid(*args) + else: + return torch.meshgrid(*args, indexing='ij') + + +def safe_normalize(x, eps=1e-20): + return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps)) + + +@torch.cuda.amp.autocast(enabled=False) +def get_rays(poses, intrinsics, H, W, N=-1, error_map=None): + ''' get rays + Args: + poses: [B, 4, 4], cam2world + intrinsics: [4] + H, W, N: int + error_map: [B, 128 * 128], sample probability based on training error + Returns: + rays_o, rays_d: [B, N, 3] + inds: [B, N] + ''' + + device = poses.device + B = poses.shape[0] + fx, fy, cx, cy = intrinsics + + i, j = custom_meshgrid(torch.linspace(0, W - 1, W, device=device), torch.linspace(0, H - 1, H, device=device)) + i = i.t().reshape([1, H * W]).expand([B, H * W]) + 0.5 + j = j.t().reshape([1, H * W]).expand([B, H * W]) + 0.5 + + results = {} + + if N > 0: + N = min(N, H * W) + + if error_map is None: + inds = torch.randint(0, H * W, size=[N], device=device) # may duplicate + inds = inds.expand([B, N]) + else: + + # weighted sample on a low-reso grid + inds_coarse = torch.multinomial(error_map.to(device), N, replacement=False) # [B, N], but in [0, 128*128) + + # map to the original resolution with random perturb. + inds_x, inds_y = inds_coarse // 128, inds_coarse % 128 # `//` will throw a warning in torch 1.10... anyway. + sx, sy = H / 128, W / 128 + inds_x = (inds_x * sx + torch.rand(B, N, device=device) * sx).long().clamp(max=H - 1) + inds_y = (inds_y * sy + torch.rand(B, N, device=device) * sy).long().clamp(max=W - 1) + inds = inds_x * W + inds_y + + results['inds_coarse'] = inds_coarse # need this when updating error_map + + i = torch.gather(i, -1, inds) + j = torch.gather(j, -1, inds) + + results['inds'] = inds + + else: + inds = torch.arange(H * W, device=device).expand([B, H * W]) + + zs = - torch.ones_like(i) + xs = - (i - cx) / fx * zs + ys = (j - cy) / fy * zs + directions = torch.stack((xs, ys, zs), dim=-1) + # directions = safe_normalize(directions) + rays_d = directions @ poses[:, :3, :3].transpose(-1, -2) # (B, N, 3) + + rays_o = poses[..., :3, 3] # [B, 3] + rays_o = rays_o[..., None, :].expand_as(rays_d) # [B, N, 3] + + results['rays_o'] = rays_o + results['rays_d'] = rays_d + + return results + + +def seed_everything(seed): + random.seed(seed) + os.environ['PYTHONHASHSEED'] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + # torch.backends.cudnn.deterministic = True + # torch.backends.cudnn.benchmark = True + + +@torch.jit.script +def linear_to_srgb(x): + return torch.where(x < 0.0031308, 12.92 * x, 1.055 * x ** 0.41666 - 0.055) + + +@torch.jit.script +def srgb_to_linear(x): + return torch.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4) + + +class TrigridTrainer(object): + def __init__(self, + argv, # command line args + name, # name of this experiment + opt, # extra conf + model, # network + teacher_model, + guidance, # guidance network + criterion=None, # loss function, if None, assume inline implementation in train_step + optimizer=None, # optimizer + ema_decay=None, # if use EMA, set the decay + lr_scheduler=None, # scheduler + metrics=[], + # metrics for evaluation, if None, use val_loss to measure performance, else use the first metric. + local_rank=0, # which GPU am I + world_size=1, # total num of GPUs + device=None, # device to use, usually setting to None is OK. (auto choose device) + mute=False, # whether to mute all print + fp16=False, # amp optimize level + max_keep_ckpt=2, # max num of saved ckpts in disk + workspace='workspace', # workspace to save logs & ckpts + best_mode='min', # the smaller/larger result, the better + use_loss_as_metric=True, # use loss as the first metric + report_metric_at_train=False, # also report metrics at training + use_checkpoint="latest", # which ckpt to use at init time + use_tensorboardX=True, # whether to use tensorboard for logging + scheduler_update_every_step=False, # whether to call scheduler.step() after every train step + ): + + self.argv = argv + self.name = name + self.opt = opt + self.mute = mute + self.metrics = metrics + self.local_rank = local_rank + self.world_size = world_size + self.workspace = workspace + self.ema_decay = ema_decay + self.fp16 = fp16 + self.best_mode = best_mode + self.use_loss_as_metric = use_loss_as_metric + self.report_metric_at_train = report_metric_at_train + self.max_keep_ckpt = max_keep_ckpt + self.use_checkpoint = use_checkpoint + self.use_tensorboardX = use_tensorboardX + self.time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S") + self.scheduler_update_every_step = scheduler_update_every_step + self.device = device if device is not None else torch.device( + f'cuda:{local_rank}' if torch.cuda.is_available() else 'cpu') + self.console = Console() + + self.as_latent = True + self.vgg16 = None + model.to(self.device) + teacher_model.to(self.device) + if self.world_size > 1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank]) + + teacher_model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(teacher_model) + teacher_model = torch.nn.parallel.DistributedDataParallel(teacher_model, device_ids=[local_rank]) + self.model = model + self.teacher_model = teacher_model + + # guide model + self.guidance = guidance + self.embeddings = {} + + # text prompt / images + if self.guidance is not None: + for key in self.guidance: + for p in self.guidance[key].parameters(): + p.requires_grad = False + self.embeddings[key] = {} + self.prepare_embeddings() + + if isinstance(criterion, nn.Module): + criterion.to(self.device) + self.criterion = criterion + + if self.opt.images is not None: + self.pearson = PearsonCorrCoef().to(self.device) + + if optimizer is None: + self.optimizer = optim.Adam(self.model.parameters(), lr=0.001, weight_decay=5e-4) # naive adam + else: + self.optimizer = optimizer(self.model) + + if lr_scheduler is None: + self.lr_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda epoch: 1) # fake scheduler + else: + self.lr_scheduler = lr_scheduler(self.optimizer) + + if ema_decay is not None: + self.ema = ExponentialMovingAverage(self.model.parameters(), decay=ema_decay) + else: + self.ema = None + + self.scaler = torch.cuda.amp.GradScaler(enabled=self.fp16) + + # variable init + self.total_train_t = 0 + self.epoch = 0 + self.global_step = 0 + self.local_step = 0 + self.stats = { + "loss": [], + "valid_loss": [], + "results": [], # metrics[0], or valid_loss + "checkpoints": [], # record path of saved ckpt, to automatically remove old ckpt + "best_result": None, + } + + # auto fix + if len(metrics) == 0 or self.use_loss_as_metric: + self.best_mode = 'min' + + # workspace prepare + self.log_ptr = None + if self.workspace is not None: + os.makedirs(self.workspace, exist_ok=True) + self.log_path = os.path.join(workspace, f"log_{self.name}.txt") + self.log_ptr = open(self.log_path, "a+") + + self.ckpt_path = os.path.join(self.workspace, 'latent_trigrid_fit_checkpoints') + self.best_path = f"{self.ckpt_path}/{self.name}.pth" + os.makedirs(self.ckpt_path, exist_ok=True) + + # Save a copy of image_config in the experiment workspace + if opt.image_config is not None: + shutil.copyfile(opt.image_config, os.path.join(self.workspace, os.path.basename(opt.image_config))) + + # Save a copy of images in the experiment workspace + if opt.images is not None: + for image_file in opt.images: + shutil.copyfile(image_file, os.path.join(self.workspace, os.path.basename(image_file))) + + self.log(f'[INFO] Cmdline: {self.argv}') + self.log(f'[INFO] opt: {self.opt}') + self.log( + f'[INFO] Trainer: {self.name} | {self.time_stamp} | {self.device} | {"fp16" if self.fp16 else "fp32"} | {self.workspace}') + self.log(f'[INFO] #parameters: {sum([p.numel() for p in model.parameters() if p.requires_grad])}') + + if self.workspace is not None: + if self.use_checkpoint == "scratch": + self.log("[INFO] Training from scratch ...") + elif self.use_checkpoint == "latest": + self.log("[INFO] Loading latest checkpoint ...") + self.load_checkpoint() + elif self.use_checkpoint == "latest_model": + self.log("[INFO] Loading latest checkpoint (model only)...") + self.load_checkpoint(model_only=True) + elif self.use_checkpoint == "best": + if os.path.exists(self.best_path): + self.log("[INFO] Loading best checkpoint ...") + self.load_checkpoint(self.best_path) + else: + self.log(f"[INFO] {self.best_path} not found, loading latest ...") + self.load_checkpoint() + else: # path to ckpt + self.log(f"[INFO] Loading {self.use_checkpoint} ...") + self.load_checkpoint(self.use_checkpoint) + + # calculate the text embs. + @torch.no_grad() + def prepare_embeddings(self): + + # text embeddings (stable-diffusion) + if self.opt.text is not None: + + if 'SD' in self.guidance: + self.embeddings['SD']['default'] = self.guidance['SD'].get_text_embeds([self.opt.text]) + self.embeddings['SD']['uncond'] = self.guidance['SD'].get_text_embeds([self.opt.negative]) + + for d in ['front', 'side', 'back']: + self.embeddings['SD'][d] = self.guidance['SD'].get_text_embeds([f"{self.opt.text}, {d} view"]) + + if 'IF' in self.guidance: + self.embeddings['IF']['default'] = self.guidance['IF'].get_text_embeds([self.opt.text]) + self.embeddings['IF']['uncond'] = self.guidance['IF'].get_text_embeds([self.opt.negative]) + + for d in ['front', 'side', 'back']: + self.embeddings['IF'][d] = self.guidance['IF'].get_text_embeds([f"{self.opt.text}, {d} view"]) + + if 'clip' in self.guidance: + self.embeddings['clip']['text'] = self.guidance['clip'].get_text_embeds(self.opt.text) + + if self.opt.images is not None: + + h = int(self.opt.known_view_scale * self.opt.h) + w = int(self.opt.known_view_scale * self.opt.w) + + # load processed image + for image in self.opt.images: + assert image.endswith( + '_rgba.png') # the rest of this code assumes that the _rgba image has been passed. + rgbas = [cv2.cvtColor(cv2.imread(image, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGRA2RGBA) for image in + self.opt.images] + rgba_hw = np.stack( + [cv2.resize(rgba, (w, h), interpolation=cv2.INTER_AREA).astype(np.float32) / 255 for rgba in rgbas]) + rgb_hw = rgba_hw[..., :3] * rgba_hw[..., 3:] + (1 - rgba_hw[..., 3:]) + self.rgb = torch.from_numpy(rgb_hw).permute(0, 3, 1, 2).contiguous().to(self.device) + self.mask = torch.from_numpy(rgba_hw[..., 3] > 0.5).to(self.device) + print(f'[INFO] dataset: load image prompt {self.opt.images} {self.rgb.shape}') + + # load depth + depth_paths = [image.replace('_rgba.png', '_depth.png') for image in self.opt.images] + depths = [cv2.imread(depth_path, cv2.IMREAD_UNCHANGED) for depth_path in depth_paths] + depth = np.stack([cv2.resize(depth, (w, h), interpolation=cv2.INTER_AREA) for depth in depths]) + self.depth = torch.from_numpy(depth.astype(np.float32) / 255).to( + self.device) # TODO: this should be mapped to FP16 + print(f'[INFO] dataset: load depth prompt {depth_paths} {self.depth.shape}') + + # load normal # TODO: don't load if normal loss is 0 + normal_paths = [image.replace('_rgba.png', '_normal.png') for image in self.opt.images] + normals = [cv2.imread(normal_path, cv2.IMREAD_UNCHANGED) for normal_path in normal_paths] + normal = np.stack([cv2.resize(normal, (w, h), interpolation=cv2.INTER_AREA) for normal in normals]) + self.normal = torch.from_numpy(normal.astype(np.float32) / 255).to(self.device) + print(f'[INFO] dataset: load normal prompt {normal_paths} {self.normal.shape}') + + # encode embeddings for zero123 + if 'zero123' in self.guidance: + rgba_256 = np.stack( + [cv2.resize(rgba, (256, 256), interpolation=cv2.INTER_AREA).astype(np.float32) / 255 for rgba in + rgbas]) + rgbs_256 = rgba_256[..., :3] * rgba_256[..., 3:] + (1 - rgba_256[..., 3:]) + rgb_256 = torch.from_numpy(rgbs_256).permute(0, 3, 1, 2).contiguous().to(self.device) + guidance_embeds = self.guidance['zero123'].get_img_embeds(rgb_256) + self.embeddings['zero123']['default'] = { + 'zero123_ws': self.opt.zero123_ws, + 'c_crossattn': guidance_embeds[0], + 'c_concat': guidance_embeds[1], + 'ref_polars': self.opt.ref_polars, + 'ref_azimuths': self.opt.ref_azimuths, + 'ref_radii': self.opt.ref_radii, + } + + if 'clip' in self.guidance: + self.embeddings['clip']['image'] = self.guidance['clip'].get_img_embeds(self.rgb) + + def __del__(self): + if self.log_ptr: + self.log_ptr.close() + + def log(self, *args, **kwargs): + if self.local_rank == 0: + if not self.mute: + # print(*args) + self.console.print(*args, **kwargs) + if self.log_ptr: + print(*args, file=self.log_ptr) + self.log_ptr.flush() # write immediately to file + + ### ------------------------------ + + def train_step(self, data, save_guidance_path: Path = None): + """ + Args: + save_guidance_path: an image that combines the NeRF render, the added latent noise, + the denoised result and optionally the fully-denoised image. + """ + + # perform RGBD loss instead of SDS if is image-conditioned + do_rgbd_loss = self.opt.images is not None and \ + (self.global_step % self.opt.known_view_interval == 0) + + # override random camera with fixed known camera + if do_rgbd_loss: + data = self.default_view_data + + # experiment iterations ratio + # i.e. what proportion of this experiment have we completed (in terms of iterations) so far? + exp_iter_ratio = (self.global_step - self.opt.exp_start_iter) / ( + self.opt.exp_end_iter - self.opt.exp_start_iter) + + # progressively relaxing view range + if self.opt.progressive_view: + r = min(1.0, self.opt.progressive_view_init_ratio + 2.0 * exp_iter_ratio) + self.opt.phi_range = [self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[0] * r, + self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[1] * r] + self.opt.theta_range = [self.opt.default_polar * (1 - r) + self.opt.full_theta_range[0] * r, + self.opt.default_polar * (1 - r) + self.opt.full_theta_range[1] * r] + self.opt.radius_range = [self.opt.default_radius * (1 - r) + self.opt.full_radius_range[0] * r, + self.opt.default_radius * (1 - r) + self.opt.full_radius_range[1] * r] + self.opt.fovy_range = [self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[0] * r, + self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[1] * r] + + # progressively increase max_level + if self.opt.progressive_level: + self.model.max_level = min(1.0, 0.25 + 2.0 * exp_iter_ratio) + + rays_o = data['rays_o'] # [B, N, 3] + rays_d = data['rays_d'] # [B, N, 3] + mvp = data['mvp'] # [B, 4, 4] + + B, N = rays_o.shape[:2] + H, W = data['H'], data['W'] + + teacher_rays_o = data['teacher_rays_o'] # [B, N, 3] + teacher_rays_d = data['teacher_rays_d'] # [B, N, 3] + teacher_H = data['teacher_H'] + teacher_W = data['teacher_W'] + + # When ref_data has B images > opt.batch_size + if B > self.opt.batch_size: + # choose batch_size images out of those B images + choice = torch.randperm(B)[:self.opt.batch_size] + B = self.opt.batch_size + rays_o = rays_o[choice] + rays_d = rays_d[choice] + mvp = mvp[choice] + + if do_rgbd_loss: + ambient_ratio = 1.0 + shading = 'lambertian' # use lambertian instead of albedo to get normal + binarize = False + bg_color = torch.rand((B * N, 3), device=rays_o.device) + + # add camera noise to avoid grid-like artifact + if self.opt.known_view_noise_scale > 0: + noise_scale = self.opt.known_view_noise_scale # * (1 - self.global_step / self.opt.iters) + rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale + rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale + + elif exp_iter_ratio <= self.opt.latent_iter_ratio: + ambient_ratio = 1.0 + shading = 'normal' + binarize = False + bg_color = None + + else: + if exp_iter_ratio <= self.opt.albedo_iter_ratio: + ambient_ratio = 1.0 + shading = 'albedo' + else: + # random shading + ambient_ratio = self.opt.min_ambient_ratio + (1.0 - self.opt.min_ambient_ratio) * random.random() + rand = random.random() + if rand >= (1.0 - self.opt.textureless_ratio): + shading = 'textureless' + else: + shading = 'lambertian' + + # random weights binarization (like mobile-nerf) [NOT WORKING NOW] + # binarize_thresh = min(0.5, -0.5 + self.global_step / self.opt.iters) + # binarize = random.random() < binarize_thresh + binarize = False + + # random background + rand = random.random() + # if self.opt.bg_radius > 0 and rand > 0.5: + if self.opt.learnable_bg: + bg_color = None # use bg_net + elif self.opt.noise_bg: + # B, 3, H, W + # bg_color = torch.randn(B, 3, H, W).to(self.device) + # bg_color = bg_color * + # self.guidance['SD']. + raise NotImplementedError + else: + bg_color = torch.rand(3).to(self.device) # single color random bg + + outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=False, perturb=True, bg_color=bg_color, + ambient_ratio=ambient_ratio, shading=shading, binarize=binarize, as_latent=True) + + if self.as_latent: + # abuse normal & mask as latent code for faster geometry initialization (ref: fantasia3D) + pred_latent = outputs['image'].reshape(B, H, W, 4).permute(0, 3, 1, 2).contiguous() # [B, 4, H, W] + pred_rgb = self.guidance['SD'].decode_latents(pred_latent).permute(0, 2, 3, 1).contiguous() # [B, H, W, 3] + else: + raise NotImplementedError + + pred_depth = outputs['depth'].squeeze(-1) # .reshape(B, H, W) + + with torch.no_grad(): + teacher_output = self.teacher_model.render(teacher_rays_o, teacher_rays_d, mvp, teacher_H, teacher_W, + staged=True, perturb=True, bg_color=bg_color, + ambient_ratio=ambient_ratio, shading=shading, binarize=binarize, + as_latent=False) + + teacher_rgb = teacher_output['image'] + teacher_rgb = teacher_rgb # .reshape(B, H, W, 3) + + teacher_latent = self.guidance['SD'].encode_imgs( + teacher_rgb.permute(0, 3, 1, 2).contiguous()) # [B, 4, H, W] + + teacher_depth = teacher_output['depth'].squeeze(-1) # [B, 1, H, W] + teacher_depth = F.interpolate(teacher_depth.unsqueeze(1), size=pred_depth.shape[1:3], + mode='nearest').squeeze(1) # [B, H, W] + + assert teacher_latent.shape == pred_latent.shape, f"teacher_latent.shape {teacher_latent.shape} != pred_rgb.shape {pred_latent.shape}" + assert teacher_depth.shape == pred_depth.shape, f"teacher_depth.shape {teacher_depth.shape} != pred_depth.shape {pred_depth.shape}" + + loss = 0 + losses = {} + #print(pred_latent.min(), pred_latent.max(), teacher_latent.min(), teacher_latent.max()) + latent_mse_loss = F.mse_loss(pred_latent, teacher_latent) + loss = loss + latent_mse_loss + losses['latent_mse_loss'] = latent_mse_loss + + rgb_mse_loss = F.mse_loss(pred_rgb, teacher_rgb)*20 + loss = loss + rgb_mse_loss + losses['rgb_mse_loss'] = rgb_mse_loss + + # rgb_perceptual_loss = self.perceptual_loss(pred_rgb, teacher_rgb) + # loss = loss + rgb_perceptual_loss + # losses['rgb_perceptual_loss'] = rgb_perceptual_loss + + # depth_mse_loss = F.mse_loss(pred_depth, teacher_depth) + # loss = loss + depth_mse_loss + # losses['depth_mse_loss'] = depth_mse_loss + + return pred_latent, pred_depth, teacher_rgb, teacher_depth, loss, losses + + def perceptual_loss(self, synth_images, target): + ''' + + :param synth_images: [0, 1] , [B, 3, H, W] + :param target: [0, 1] , [B, 3, H, W] + :return: + ''' + synth_images = synth_images.permute(0, 3, 1, 2).contiguous() + target = target.permute(0, 3, 1, 2).contiguous() + + if self.vgg16 is None: + url = './pretrained/vgg16.pt' + with open(url, 'rb') as f: + self.vgg16 = torch.jit.load(f).eval().to(self.device) + + target_images = target * 255 # [-1, 1] -> [0, 255] + if target_images.shape[2] > 256: + target_images = F.interpolate(target_images, size=(256, 256), mode='area') + target_features = self.vgg16(target_images, resize_images=False, return_lpips=True) + + synth_images = synth_images * 255 # [-1, 1] -> [0, 255] + if synth_images.shape[2] > 256: + synth_images = F.interpolate(synth_images, size=(256, 256), mode='area') + + # Features for synth images. + synth_features = self.vgg16(synth_images, resize_images=False, return_lpips=True) + dist = (target_features - synth_features).square().sum() * 0.1 + + return dist + + def post_train_step(self): + + # unscale grad before modifying it! + # ref: https://pytorch.org/docs/stable/notes/amp_examples.html#gradient-clipping + self.scaler.unscale_(self.optimizer) + + # clip grad + if self.opt.grad_clip >= 0: + torch.nn.utils.clip_grad_value_(self.model.parameters(), self.opt.grad_clip) + + if not self.opt.dmtet and self.opt.backbone == 'grid': + + if self.opt.lambda_tv > 0: + lambda_tv = min(1.0, self.global_step / (0.5 * self.opt.iters)) * self.opt.lambda_tv + self.model.encoder.grad_total_variation(lambda_tv, None, self.model.bound) + if self.opt.lambda_wd > 0: + self.model.encoder.grad_weight_decay(self.opt.lambda_wd) + + def eval_step(self, data): + + rays_o = data['rays_o'] # [B, N, 3] + rays_d = data['rays_d'] # [B, N, 3] + mvp = data['mvp'] + + B, N = rays_o.shape[:2] + H, W = data['H'], data['W'] + + teacher_rays_o = data['teacher_rays_o'] # [B, N, 3] + teacher_rays_d = data['teacher_rays_d'] # [B, N, 3] + teacher_H = data['teacher_H'] + teacher_W = data['teacher_W'] + + shading = data['shading'] if 'shading' in data else 'albedo' + ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0 + light_d = data['light_d'] if 'light_d' in data else None + + outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=True, perturb=False, light_d=light_d, + ambient_ratio=ambient_ratio, shading=shading, bg_color=None, as_latent=True) + + if self.as_latent: # always True + # from B, H, W, C to B, C, H, W + pred_rgb = self.guidance['SD'].decode_latents(outputs['image'].permute(0, 3, 1, 2).contiguous()).permute(0, + 2, + 3, + 1).contiguous() + else: + pred_rgb = outputs['image'] + + pred_rgb = pred_rgb # .reshape(B, H, W, 3) + pred_depth = outputs['depth'].squeeze(-1) # .reshape(B, H, W) + + with torch.no_grad(): + teacher_output = self.teacher_model.render(teacher_rays_o, teacher_rays_d, mvp, teacher_H, teacher_W, + staged=True, perturb=False, light_d=light_d, + ambient_ratio=ambient_ratio, shading=shading, bg_color=None, + as_latent=False) + + teacher_rgb = teacher_output['image'] + teacher_rgb = teacher_rgb # .reshape(B, H, W, 3) + + teacher_depth = teacher_output['depth'].squeeze(-1) # [B, 1, H, W] + teacher_depth = F.interpolate(teacher_depth.unsqueeze(1), size=pred_depth.shape[1:3], mode='nearest').squeeze( + 1) # [B, H, W] + + assert teacher_rgb.shape == pred_rgb.shape, f"teacher_rgb.shape {teacher_rgb.shape} != pred_rgb.shape {pred_rgb.shape}" + assert teacher_depth.shape == pred_depth.shape, f"teacher_depth.shape {teacher_depth.shape} != pred_depth.shape {pred_depth.shape}" + + # dummy + loss = torch.zeros([1], device=pred_rgb.device, dtype=pred_rgb.dtype) + + return pred_rgb, pred_depth, teacher_rgb, teacher_depth, loss + + def test_step(self, data, bg_color=None, perturb=False): + rays_o = data['rays_o'] # [B, N, 3] + rays_d = data['rays_d'] # [B, N, 3] + mvp = data['mvp'] + + B, N = rays_o.shape[:2] + H, W = data['H'], data['W'] + + teacher_rays_o = data['teacher_rays_o'] # [B, N, 3] + teacher_rays_d = data['teacher_rays_d'] # [B, N, 3] + teacher_H = data['teacher_H'] + teacher_W = data['teacher_W'] + + if bg_color is not None: + bg_color = bg_color.to(rays_o.device) + + shading = data['shading'] if 'shading' in data else 'albedo' + ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0 + light_d = data['light_d'] if 'light_d' in data else None + + outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=True, perturb=perturb, light_d=light_d, + ambient_ratio=ambient_ratio, shading=shading, bg_color=bg_color, as_latent=True) + + if self.as_latent: # always True + # from B, H, W, C to B, C, H, W + pred_rgb = self.guidance['SD'].decode_latents(outputs['image'].permute(0, 3, 1, 2).contiguous()).permute(0, + 2, + 3, + 1).contiguous() + else: + pred_rgb = outputs['image'] + + pred_rgb = pred_rgb # .reshape(B, H, W, 3) + pred_depth = outputs['depth'].squeeze(-1) # .reshape(B, H, W) + + with torch.no_grad(): + teacher_output = self.teacher_model.render(teacher_rays_o, teacher_rays_d, mvp, teacher_H, teacher_W, + staged=True, perturb=perturb, light_d=light_d, + ambient_ratio=ambient_ratio, shading=shading, bg_color=bg_color, + as_latent=False) + + teacher_rgb = teacher_output['image'] + teacher_rgb = teacher_rgb # .reshape(B, H, W, 3) + + teacher_depth = teacher_output['depth'].squeeze(-1) + teacher_depth = F.interpolate(teacher_depth.unsqueeze(1), size=pred_depth.shape[1:3], mode='nearest').squeeze( + 1) # [B, H, W] + + assert teacher_rgb.shape == pred_rgb.shape, f"teacher_rgb.shape {teacher_rgb.shape} != pred_rgb.shape {pred_rgb.shape}" + assert teacher_depth.shape == pred_depth.shape, f"teacher_depth.shape {teacher_depth.shape} != pred_depth.shape {pred_depth.shape}" + + return pred_rgb, pred_depth, teacher_rgb, teacher_depth + + def save_mesh(self, loader=None, save_path=None): + + if save_path is None: + save_path = os.path.join(self.workspace, 'mesh') + + self.log(f"==> Saving mesh to {save_path}") + + os.makedirs(save_path, exist_ok=True) + + self.model.export_mesh(save_path, resolution=self.opt.mcubes_resolution, + decimate_target=self.opt.decimate_target) + + self.log(f"==> Finished saving mesh.") + + ### ------------------------------ + + def train(self, train_loader, valid_loader, test_loader, max_epochs): + + if self.use_tensorboardX and self.local_rank == 0: + self.writer = tensorboardX.SummaryWriter(os.path.join(self.workspace, "latent_trigrid_fit_run", self.name)) + + start_t = time.time() + self.evaluate_one_epoch(valid_loader) + for epoch in range(self.epoch + 1, max_epochs + 1): + self.epoch = epoch + + self.train_one_epoch(train_loader, max_epochs) + + if self.workspace is not None and self.local_rank == 0: + self.save_checkpoint(full=True, best=False) + + if self.epoch % self.opt.eval_interval == 0: + self.evaluate_one_epoch(valid_loader) + self.save_checkpoint(full=False, best=True) + + if self.epoch % self.opt.test_interval == 0 or self.epoch == max_epochs: + self.test(test_loader) + + end_t = time.time() + + self.total_train_t = end_t - start_t + self.total_train_t + + self.log(f"[INFO] training takes {(self.total_train_t) / 60:.4f} minutes.") + + if self.use_tensorboardX and self.local_rank == 0: + self.writer.close() + + def evaluate(self, loader, name=None): + self.use_tensorboardX, use_tensorboardX = False, self.use_tensorboardX + self.evaluate_one_epoch(loader, name) + self.use_tensorboardX = use_tensorboardX + + def test(self, loader, save_path=None, name=None, write_video=True): + + if save_path is None: + save_path = os.path.join(self.workspace, 'latent_trigrid_fit_results') + + if name is None: + name = f'{self.name}_ep{self.epoch:04d}' + + os.makedirs(save_path, exist_ok=True) + + self.log(f"==> Start Test, save results to {save_path}") + + pbar = tqdm.tqdm(total=len(loader) * loader.batch_size, + bar_format='{percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]') + self.model.eval() + self.teacher_model.eval() + + if write_video: + all_preds = [] + all_preds_depth = [] + + with torch.no_grad(): + + for i, data in enumerate(loader): + + with torch.cuda.amp.autocast(enabled=self.fp16): + preds, preds_depth, teacher_rgb, teacher_depth = self.test_step(data) + + pred = preds[0].detach().cpu().numpy() + pred = (pred * 255).astype(np.uint8) + + pred_depth = preds_depth[0].detach().cpu().numpy() + pred_depth = (pred_depth - pred_depth.min()) / (pred_depth.max() - pred_depth.min() + 1e-6) + pred_depth = (pred_depth * 255).astype(np.uint8) + + teacher_rgb = teacher_rgb[0].detach().cpu().numpy() + teacher_rgb = (teacher_rgb * 255).astype(np.uint8) + + teacher_depth = teacher_depth[0].detach().cpu().numpy() + teacher_depth = (teacher_depth - teacher_depth.min()) / ( + teacher_depth.max() - teacher_depth.min() + 1e-6) + teacher_depth = (teacher_depth * 255).astype(np.uint8) + + pred = np.concatenate([pred, teacher_rgb], axis=1) + pred_depth = np.concatenate([pred_depth, teacher_depth], axis=1) + + if write_video: + all_preds.append(pred) + all_preds_depth.append(pred_depth) + else: + cv2.imwrite(os.path.join(save_path, f'{name}_{i:04d}_rgb.png'), + cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)) + cv2.imwrite(os.path.join(save_path, f'{name}_{i:04d}_depth.png'), pred_depth) + + pbar.update(loader.batch_size) + + if write_video: + all_preds = np.stack(all_preds, axis=0) + all_preds_depth = np.stack(all_preds_depth, axis=0) + print('save video...', os.path.join(save_path, f'{name}_rgb.mp4'), + os.path.join(save_path, f'{name}_depth.mp4')) + imageio.mimwrite(os.path.join(save_path, f'{name}_rgb.mp4'), all_preds, fps=25, quality=8, + macro_block_size=1) + imageio.mimwrite(os.path.join(save_path, f'{name}_depth.mp4'), all_preds_depth, fps=25, quality=8, + macro_block_size=1) + + self.log(f"==> Finished Test.") + + def train_one_epoch(self, loader, max_epochs): + self.log( + f"==> [{time.strftime('%Y-%m-%d_%H-%M-%S')}] Start Training {self.workspace} Epoch {self.epoch}/{max_epochs}, lr={self.optimizer.param_groups[0]['lr']:.6f} ...") + + total_loss = 0 + total_latent_mse_loss = 0 + total_rgb_mse_loss = 0 + # total_rgb_perceptual_loss = 0 + # total_depth_mse_loss = 0 + + if self.local_rank == 0 and self.report_metric_at_train: + for metric in self.metrics: + metric.clear() + + self.model.train() + + # distributedSampler: must call set_epoch() to shuffle indices across multiple epochs + # ref: https://pytorch.org/docs/stable/data.html + if self.world_size > 1: + loader.sampler.set_epoch(self.epoch) + + if self.local_rank == 0: + pbar = tqdm.tqdm(total=len(loader) * loader.batch_size, + bar_format='{desc}: {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]') + + self.local_step = 0 + + if self.opt.save_guidance: + save_guidance_folder = Path(self.workspace) / 'guidance' + save_guidance_folder.mkdir(parents=True, exist_ok=True) + + for data in loader: + + # update grid every 16 steps + if ( + self.model.cuda_ray or self.model.taichi_ray) and self.global_step % self.opt.update_extra_interval == 0: + with torch.cuda.amp.autocast(enabled=self.fp16): + self.model.update_extra_state() + + self.local_step += 1 + self.global_step += 1 + + self.optimizer.zero_grad() + + with torch.cuda.amp.autocast(enabled=self.fp16): + if self.opt.save_guidance and (self.global_step % self.opt.save_guidance_interval == 0): + save_guidance_path = save_guidance_folder / f'step_{self.global_step:07d}.png' + else: + save_guidance_path = None + pred_rgbs, pred_depths, teacher_rgbs, teacher_depths, loss, losses = self.train_step(data, + save_guidance_path=save_guidance_path) + + # hooked grad clipping for RGB space + if self.opt.grad_clip_rgb >= 0: + def _hook(grad): + if self.opt.fp16: + # correctly handle the scale + grad_scale = self.scaler._get_scale_async() + return grad.clamp(grad_scale * -self.opt.grad_clip_rgb, grad_scale * self.opt.grad_clip_rgb) + else: + return grad.clamp(-self.opt.grad_clip_rgb, self.opt.grad_clip_rgb) + + pred_rgbs.register_hook(_hook) + # pred_rgbs.retain_grad() + + self.scaler.scale(loss).backward() + + self.post_train_step() + self.scaler.step(self.optimizer) + self.scaler.update() + + if self.scheduler_update_every_step: + self.lr_scheduler.step() + + loss_val = loss.item() + total_loss += loss_val + total_latent_mse_loss += losses['latent_mse_loss'].item() + total_rgb_mse_loss += losses['rgb_mse_loss'].item() + # total_rgb_perceptual_loss += losses['rgb_perceptual_loss'].item() + # total_depth_mse_loss += losses['depth_mse_loss'].item() + + if self.local_rank == 0: + # if self.report_metric_at_train: + # for metric in self.metrics: + # metric.update(preds, truths) + + if self.use_tensorboardX: + self.writer.add_scalar("train/loss", loss_val, self.global_step) + self.writer.add_scalar("train/lr", self.optimizer.param_groups[0]['lr'], self.global_step) + + if self.scheduler_update_every_step: + pbar.set_description( + f"loss={loss_val:.4f} ({total_loss / self.local_step:.4f}), " + f"latent_mse_loss={losses['latent_mse_loss'].item():.4f} ({total_latent_mse_loss / self.local_step:.4f}), " + f"rgb_mse_loss={losses['rgb_mse_loss'].item():.4f} ({total_rgb_mse_loss / self.local_step:.4f}), " + # f"rgb_perceptual_loss={losses['rgb_perceptual_loss'].item():.4f} ({total_rgb_perceptual_loss / self.local_step:.4f}), " + # f"depth_mse_loss={losses['depth_mse_loss'].item():.4f} ({total_depth_mse_loss / self.local_step:.4f}), " + f"lr={self.optimizer.param_groups[0]['lr']:.6f}") + else: + pbar.set_description(f"loss={loss_val:.4f} ({total_loss / self.local_step:.4f})") + pbar.update(loader.batch_size) + + if self.ema is not None: + self.ema.update() + + average_loss = total_loss / self.local_step + self.stats["loss"].append(average_loss) + + if self.local_rank == 0: + pbar.close() + if self.report_metric_at_train: + for metric in self.metrics: + self.log(metric.report(), style="red") + if self.use_tensorboardX: + metric.write(self.writer, self.epoch, prefix="train") + metric.clear() + + if not self.scheduler_update_every_step: + if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): + self.lr_scheduler.step(average_loss) + else: + self.lr_scheduler.step() + + cpu_mem, gpu_mem = get_CPU_mem(), get_GPU_mem()[0] + self.log( + f"==> [{time.strftime('%Y-%m-%d_%H-%M-%S')}] Finished Epoch {self.epoch}/{max_epochs}. CPU={cpu_mem:.1f}GB, GPU={gpu_mem:.1f}GB.") + + def evaluate_one_epoch(self, loader, name=None): + self.log(f"++> Evaluate {self.workspace} at epoch {self.epoch} ...") + + if name is None: + name = f'{self.name}_ep{self.epoch:04d}' + + total_loss = 0 + if self.local_rank == 0: + for metric in self.metrics: + metric.clear() + + self.model.eval() + + if self.ema is not None: + self.ema.store() + self.ema.copy_to() + + if self.local_rank == 0: + pbar = tqdm.tqdm(total=len(loader) * loader.batch_size, + bar_format='{desc}: {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]') + + with torch.no_grad(): + self.local_step = 0 + + for data in loader: + self.local_step += 1 + + with torch.cuda.amp.autocast(enabled=self.fp16): + preds, preds_depth, teacher_rgb, teacher_depth, loss = self.eval_step(data) + + # all_gather/reduce the statistics (NCCL only support all_*) + if self.world_size > 1: + dist.all_reduce(loss, op=dist.ReduceOp.SUM) + loss = loss / self.world_size + + preds_list = [torch.zeros_like(preds).to(self.device) for _ in + range(self.world_size)] # [[B, ...], [B, ...], ...] + dist.all_gather(preds_list, preds) + preds = torch.cat(preds_list, dim=0) + + preds_depth_list = [torch.zeros_like(preds_depth).to(self.device) for _ in + range(self.world_size)] # [[B, ...], [B, ...], ...] + dist.all_gather(preds_depth_list, preds_depth) + preds_depth = torch.cat(preds_depth_list, dim=0) + + loss_val = loss.item() + total_loss += loss_val + + # only rank = 0 will perform evaluation. + if self.local_rank == 0: + # save image + save_path = os.path.join(self.workspace, 'latent_trigrid_fit_validation', + f'{name}_{self.local_step:04d}_rgb.png') + save_path_depth = os.path.join(self.workspace, 'latent_trigrid_fit_validation', + f'{name}_{self.local_step:04d}_depth.png') + + # self.log(f"==> Saving validation image to {save_path}") + os.makedirs(os.path.dirname(save_path), exist_ok=True) + + pred = preds[0].detach().cpu().numpy() + pred = (pred * 255).astype(np.uint8) + + pred_depth = preds_depth[0].detach().cpu().numpy() + pred_depth = (pred_depth - pred_depth.min()) / (pred_depth.max() - pred_depth.min() + 1e-6) + pred_depth = (pred_depth * 255).astype(np.uint8) + + teacher_rgb = teacher_rgb[0].detach().cpu().numpy() + teacher_rgb = (teacher_rgb * 255).astype(np.uint8) + + teacher_depth = teacher_depth[0].detach().cpu().numpy() + teacher_depth = (teacher_depth - teacher_depth.min()) / ( + teacher_depth.max() - teacher_depth.min() + 1e-6) + teacher_depth = (teacher_depth * 255).astype(np.uint8) + + pred = np.concatenate((pred, teacher_rgb), axis=1) + pred_depth = np.concatenate((pred_depth, teacher_depth), axis=1) + + cv2.imwrite(save_path, cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)) + cv2.imwrite(save_path_depth, pred_depth) + + pbar.set_description(f"loss={loss_val:.4f} ({total_loss / self.local_step:.4f})") + pbar.update(loader.batch_size) + + average_loss = total_loss / self.local_step + self.stats["valid_loss"].append(average_loss) + + if self.local_rank == 0: + pbar.close() + if not self.use_loss_as_metric and len(self.metrics) > 0: + result = self.metrics[0].measure() + self.stats["results"].append( + result if self.best_mode == 'min' else - result) # if max mode, use -result + else: + self.stats["results"].append(average_loss) # if no metric, choose best by min loss + + for metric in self.metrics: + self.log(metric.report(), style="blue") + if self.use_tensorboardX: + metric.write(self.writer, self.epoch, prefix="evaluate") + metric.clear() + + if self.ema is not None: + self.ema.restore() + + self.log(f"++> Evaluate epoch {self.epoch} Finished.") + + def save_checkpoint(self, name=None, full=False, best=False): + + if name is None: + name = f'{self.name}_ep{self.epoch:04d}' + + state = { + 'epoch': self.epoch, + 'global_step': self.global_step, + 'stats': self.stats, + } + + if self.model.cuda_ray: + state['mean_density'] = self.model.mean_density + + if self.opt.dmtet: + state['tet_scale'] = self.model.tet_scale.cpu().numpy() + + if full: + state['optimizer'] = self.optimizer.state_dict() + state['lr_scheduler'] = self.lr_scheduler.state_dict() + state['scaler'] = self.scaler.state_dict() + if self.ema is not None: + state['ema'] = self.ema.state_dict() + + if not best: + + state['model'] = self.model.state_dict() + + file_path = f"{name}.pth" + + self.stats["checkpoints"].append(file_path) + + if len(self.stats["checkpoints"]) > self.max_keep_ckpt: + old_ckpt = os.path.join(self.ckpt_path, self.stats["checkpoints"].pop(0)) + if os.path.exists(old_ckpt): + os.remove(old_ckpt) + + torch.save(state, os.path.join(self.ckpt_path, file_path)) + + else: + if len(self.stats["results"]) > 0: + # always save best since loss cannot reflect performance. + if True: + # self.log(f"[INFO] New best result: {self.stats['best_result']} --> {self.stats['results'][-1]}") + # self.stats["best_result"] = self.stats["results"][-1] + + # save ema results + if self.ema is not None: + self.ema.store() + self.ema.copy_to() + + state['model'] = self.model.state_dict() + + if self.ema is not None: + self.ema.restore() + + torch.save(state, self.best_path) + else: + self.log(f"[WARN] no evaluated results found, skip saving best checkpoint.") + + def load_checkpoint(self, checkpoint=None, model_only=False): + if checkpoint is None: + checkpoint_list = sorted(glob.glob(f'{self.ckpt_path}/*.pth')) + if checkpoint_list: + checkpoint = checkpoint_list[-1] + self.log(f"[INFO] Latest checkpoint is {checkpoint}") + else: + self.log("[WARN] No checkpoint found, model randomly initialized.") + return + + checkpoint_dict = torch.load(checkpoint, map_location=self.device) + + if 'model' not in checkpoint_dict: + self.model.load_state_dict(checkpoint_dict) + self.log("[INFO] loaded model.") + return + + missing_keys, unexpected_keys = self.model.load_state_dict(checkpoint_dict['model'], strict=False) + self.log("[INFO] loaded model.") + if len(missing_keys) > 0: + self.log(f"[WARN] missing keys: {missing_keys}") + if len(unexpected_keys) > 0: + self.log(f"[WARN] unexpected keys: {unexpected_keys}") + + if self.ema is not None and 'ema' in checkpoint_dict: + try: + self.ema.load_state_dict(checkpoint_dict['ema']) + self.log("[INFO] loaded EMA.") + except: + self.log("[WARN] failed to loaded EMA.") + + if self.model.cuda_ray: + if 'mean_density' in checkpoint_dict: + self.model.mean_density = checkpoint_dict['mean_density'] + + if self.opt.dmtet: + if 'tet_scale' in checkpoint_dict: + new_scale = torch.from_numpy(checkpoint_dict['tet_scale']).to(self.device) + self.model.verts *= new_scale / self.model.tet_scale + self.model.tet_scale = new_scale + + if model_only: + return + + self.stats = checkpoint_dict['stats'] + self.epoch = checkpoint_dict['epoch'] + self.global_step = checkpoint_dict['global_step'] + self.log(f"[INFO] load at epoch {self.epoch}, global step {self.global_step}") + + if self.optimizer and 'optimizer' in checkpoint_dict: + try: + self.optimizer.load_state_dict(checkpoint_dict['optimizer']) + self.log("[INFO] loaded optimizer.") + except: + self.log("[WARN] Failed to load optimizer.") + + if self.lr_scheduler and 'lr_scheduler' in checkpoint_dict: + try: + self.lr_scheduler.load_state_dict(checkpoint_dict['lr_scheduler']) + self.log("[INFO] loaded scheduler.") + except: + self.log("[WARN] Failed to load scheduler.") + + if self.scaler and 'scaler' in checkpoint_dict: + try: + self.scaler.load_state_dict(checkpoint_dict['scaler']) + self.log("[INFO] loaded scaler.") + except: + self.log("[WARN] Failed to load scaler.") + + +def get_CPU_mem(): + return psutil.Process(os.getpid()).memory_info().rss / 1024 ** 3 + + +def get_GPU_mem(): + num = torch.cuda.device_count() + mem, mems = 0, [] + for i in range(num): + mem_free, mem_total = torch.cuda.mem_get_info(i) + mems.append(int(((mem_total - mem_free) / 1024 ** 3) * 1000) / 1000) + mem += mems[-1] + return mem, mems \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/nerf/utils.py b/stable-dreamfusion-3DPortrait/nerf/utils.py new file mode 100644 index 0000000..0c0bd02 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/nerf/utils.py @@ -0,0 +1,1353 @@ +import os +import gc +import glob +import tqdm +import math +import imageio +import psutil +from pathlib import Path +import random +import shutil +import warnings +import tensorboardX + +import numpy as np + +import time + +import cv2 +import matplotlib.pyplot as plt + +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +import torch.distributed as dist +import torchvision.transforms.functional as TF +from torchmetrics import PearsonCorrCoef + +from rich.console import Console +from torch_ema import ExponentialMovingAverage + +from packaging import version as pver + +def adjust_text_embeddings(embeddings, azimuth, opt): + text_z_list = [] + weights_list = [] + K = 0 + for b in range(azimuth.shape[0]): + text_z_, weights_ = get_pos_neg_text_embeddings(embeddings, azimuth[b], opt) + K = max(K, weights_.shape[0]) + text_z_list.append(text_z_) + weights_list.append(weights_) + + # Interleave text_embeddings from different dirs to form a batch + text_embeddings = [] + for i in range(K): + for text_z in text_z_list: + # if uneven length, pad with the first embedding + text_embeddings.append(text_z[i] if i < len(text_z) else text_z[0]) + text_embeddings = torch.stack(text_embeddings, dim=0) # [B * K, 77, 768] + + # Interleave weights from different dirs to form a batch + weights = [] + for i in range(K): + for weights_ in weights_list: + weights.append(weights_[i] if i < len(weights_) else torch.zeros_like(weights_[0])) + weights = torch.stack(weights, dim=0) # [B * K] + return text_embeddings, weights + +def get_pos_neg_text_embeddings(embeddings, azimuth_val, opt): + if azimuth_val >= -90 and azimuth_val < 90: + if azimuth_val >= 0: + r = 1 - azimuth_val / 90 + else: + r = 1 + azimuth_val / 90 + start_z = embeddings['front'] + end_z = embeddings['side'] + # if random.random() < 0.3: + # r = r + random.gauss(0, 0.08) + pos_z = r * start_z + (1 - r) * end_z + text_z = torch.cat([pos_z, embeddings['front'], embeddings['side']], dim=0) + if r > 0.8: + front_neg_w = 0.0 + else: + front_neg_w = math.exp(-r * opt.front_decay_factor) * opt.negative_w + if r < 0.2: + side_neg_w = 0.0 + else: + side_neg_w = math.exp(-(1-r) * opt.side_decay_factor) * opt.negative_w + + weights = torch.tensor([1.0, front_neg_w, side_neg_w]) + else: + if azimuth_val >= 0: + r = 1 - (azimuth_val - 90) / 90 + else: + r = 1 + (azimuth_val + 90) / 90 + start_z = embeddings['side'] + end_z = embeddings['back'] + # if random.random() < 0.3: + # r = r + random.gauss(0, 0.08) + pos_z = r * start_z + (1 - r) * end_z + text_z = torch.cat([pos_z, embeddings['side'], embeddings['front']], dim=0) + front_neg_w = opt.negative_w + if r > 0.8: + side_neg_w = 0.0 + else: + side_neg_w = math.exp(-r * opt.side_decay_factor) * opt.negative_w / 2 + + weights = torch.tensor([1.0, side_neg_w, front_neg_w]) + return text_z, weights.to(text_z.device) + +def custom_meshgrid(*args): + # ref: https://pytorch.org/docs/stable/generated/torch.meshgrid.html?highlight=meshgrid#torch.meshgrid + if pver.parse(torch.__version__) < pver.parse('1.10'): + return torch.meshgrid(*args) + else: + return torch.meshgrid(*args, indexing='ij') + +def safe_normalize(x, eps=1e-20): + return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps)) + +@torch.cuda.amp.autocast(enabled=False) +def get_rays(poses, intrinsics, H, W, N=-1, error_map=None): + ''' get rays + Args: + poses: [B, 4, 4], cam2world + intrinsics: [4] + H, W, N: int + error_map: [B, 128 * 128], sample probability based on training error + Returns: + rays_o, rays_d: [B, N, 3] + inds: [B, N] + ''' + + device = poses.device + B = poses.shape[0] + fx, fy, cx, cy = intrinsics + + i, j = custom_meshgrid(torch.linspace(0, W-1, W, device=device), torch.linspace(0, H-1, H, device=device)) + i = i.t().reshape([1, H*W]).expand([B, H*W]) + 0.5 + j = j.t().reshape([1, H*W]).expand([B, H*W]) + 0.5 + + results = {} + + if N > 0: + N = min(N, H*W) + + if error_map is None: + inds = torch.randint(0, H*W, size=[N], device=device) # may duplicate + inds = inds.expand([B, N]) + else: + + # weighted sample on a low-reso grid + inds_coarse = torch.multinomial(error_map.to(device), N, replacement=False) # [B, N], but in [0, 128*128) + + # map to the original resolution with random perturb. + inds_x, inds_y = inds_coarse // 128, inds_coarse % 128 # `//` will throw a warning in torch 1.10... anyway. + sx, sy = H / 128, W / 128 + inds_x = (inds_x * sx + torch.rand(B, N, device=device) * sx).long().clamp(max=H - 1) + inds_y = (inds_y * sy + torch.rand(B, N, device=device) * sy).long().clamp(max=W - 1) + inds = inds_x * W + inds_y + + results['inds_coarse'] = inds_coarse # need this when updating error_map + + i = torch.gather(i, -1, inds) + j = torch.gather(j, -1, inds) + + results['inds'] = inds + + else: + inds = torch.arange(H*W, device=device).expand([B, H*W]) + + zs = - torch.ones_like(i) + xs = - (i - cx) / fx * zs + ys = (j - cy) / fy * zs + directions = torch.stack((xs, ys, zs), dim=-1) + # directions = safe_normalize(directions) + rays_d = directions @ poses[:, :3, :3].transpose(-1, -2) # (B, N, 3) + + rays_o = poses[..., :3, 3] # [B, 3] + rays_o = rays_o[..., None, :].expand_as(rays_d) # [B, N, 3] + + results['rays_o'] = rays_o + results['rays_d'] = rays_d + + return results + + +def seed_everything(seed): + random.seed(seed) + os.environ['PYTHONHASHSEED'] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + #torch.backends.cudnn.deterministic = True + #torch.backends.cudnn.benchmark = True + + +@torch.jit.script +def linear_to_srgb(x): + return torch.where(x < 0.0031308, 12.92 * x, 1.055 * x ** 0.41666 - 0.055) + + +@torch.jit.script +def srgb_to_linear(x): + return torch.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4) + + +class Trainer(object): + def __init__(self, + argv, # command line args + name, # name of this experiment + opt, # extra conf + model, # network + guidance, # guidance network + criterion=None, # loss function, if None, assume inline implementation in train_step + optimizer=None, # optimizer + ema_decay=None, # if use EMA, set the decay + lr_scheduler=None, # scheduler + metrics=[], # metrics for evaluation, if None, use val_loss to measure performance, else use the first metric. + local_rank=0, # which GPU am I + world_size=1, # total num of GPUs + device=None, # device to use, usually setting to None is OK. (auto choose device) + mute=False, # whether to mute all print + fp16=False, # amp optimize level + max_keep_ckpt=2, # max num of saved ckpts in disk + workspace='workspace', # workspace to save logs & ckpts + best_mode='min', # the smaller/larger result, the better + use_loss_as_metric=True, # use loss as the first metric + report_metric_at_train=False, # also report metrics at training + use_checkpoint="latest", # which ckpt to use at init time + use_tensorboardX=True, # whether to use tensorboard for logging + scheduler_update_every_step=False, # whether to call scheduler.step() after every train step + ): + + self.argv = argv + self.name = name + self.opt = opt + self.mute = mute + self.metrics = metrics + self.local_rank = local_rank + self.world_size = world_size + self.workspace = workspace + self.ema_decay = ema_decay + self.fp16 = fp16 + self.best_mode = best_mode + self.use_loss_as_metric = use_loss_as_metric + self.report_metric_at_train = report_metric_at_train + self.max_keep_ckpt = max_keep_ckpt + self.use_checkpoint = use_checkpoint + self.use_tensorboardX = use_tensorboardX + self.time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S") + self.scheduler_update_every_step = scheduler_update_every_step + self.device = device if device is not None else torch.device(f'cuda:{local_rank}' if torch.cuda.is_available() else 'cpu') + self.console = Console() + + model.to(self.device) + if self.world_size > 1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank]) + self.model = model + + # guide model + self.guidance = guidance + self.embeddings = {} + + # text prompt / images + if self.guidance is not None: + for key in self.guidance: + for p in self.guidance[key].parameters(): + p.requires_grad = False + self.embeddings[key] = {} + self.prepare_embeddings() + + if isinstance(criterion, nn.Module): + criterion.to(self.device) + self.criterion = criterion + + if self.opt.images is not None: + self.pearson = PearsonCorrCoef().to(self.device) + + if optimizer is None: + self.optimizer = optim.Adam(self.model.parameters(), lr=0.001, weight_decay=5e-4) # naive adam + else: + self.optimizer = optimizer(self.model) + + if lr_scheduler is None: + self.lr_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda epoch: 1) # fake scheduler + else: + self.lr_scheduler = lr_scheduler(self.optimizer) + + if ema_decay is not None: + self.ema = ExponentialMovingAverage(self.model.parameters(), decay=ema_decay) + else: + self.ema = None + + self.scaler = torch.cuda.amp.GradScaler(enabled=self.fp16) + + # variable init + self.total_train_t = 0 + self.epoch = 0 + self.global_step = 0 + self.local_step = 0 + self.stats = { + "loss": [], + "valid_loss": [], + "results": [], # metrics[0], or valid_loss + "checkpoints": [], # record path of saved ckpt, to automatically remove old ckpt + "best_result": None, + } + + # auto fix + if len(metrics) == 0 or self.use_loss_as_metric: + self.best_mode = 'min' + + # workspace prepare + self.log_ptr = None + if self.workspace is not None: + os.makedirs(self.workspace, exist_ok=True) + self.log_path = os.path.join(workspace, f"log_{self.name}.txt") + self.log_ptr = open(self.log_path, "a+") + + self.ckpt_path = os.path.join(self.workspace, 'checkpoints') + self.best_path = f"{self.ckpt_path}/{self.name}.pth" + os.makedirs(self.ckpt_path, exist_ok=True) + + # Save a copy of image_config in the experiment workspace + if opt.image_config is not None: + shutil.copyfile(opt.image_config, os.path.join(self.workspace, os.path.basename(opt.image_config))) + + # Save a copy of images in the experiment workspace + if opt.images is not None: + for image_file in opt.images: + shutil.copyfile(image_file, os.path.join(self.workspace, os.path.basename(image_file))) + + self.log(f'[INFO] Cmdline: {self.argv}') + self.log(f'[INFO] opt: {self.opt}') + self.log(f'[INFO] Trainer: {self.name} | {self.time_stamp} | {self.device} | {"fp16" if self.fp16 else "fp32"} | {self.workspace}') + self.log(f'[INFO] #parameters: {sum([p.numel() for p in model.parameters() if p.requires_grad])}') + + if self.workspace is not None: + if self.use_checkpoint == "scratch": + self.log("[INFO] Training from scratch ...") + elif self.use_checkpoint == "latest": + self.log("[INFO] Loading latest checkpoint ...") + self.load_checkpoint() + elif self.use_checkpoint == "latest_model": + self.log("[INFO] Loading latest checkpoint (model only)...") + self.load_checkpoint(model_only=True) + elif self.use_checkpoint == "best": + if os.path.exists(self.best_path): + self.log("[INFO] Loading best checkpoint ...") + self.load_checkpoint(self.best_path) + else: + self.log(f"[INFO] {self.best_path} not found, loading latest ...") + self.load_checkpoint() + else: # path to ckpt + self.log(f"[INFO] Loading {self.use_checkpoint} ...") + self.load_checkpoint(self.use_checkpoint) + + # calculate the text embs. + @torch.no_grad() + def prepare_embeddings(self): + + # text embeddings (stable-diffusion) + if self.opt.text is not None: + + if 'SD' in self.guidance: + self.embeddings['SD']['default'] = self.guidance['SD'].get_text_embeds([self.opt.text]) + self.embeddings['SD']['uncond'] = self.guidance['SD'].get_text_embeds([self.opt.negative]) + + for d in ['front', 'side', 'back']: + self.embeddings['SD'][d] = self.guidance['SD'].get_text_embeds([f"{d} view {self.opt.text}"]) + + print('direction: ', d, 'prompt: ', f"{d} view {self.opt.text}") + + if 'IF' in self.guidance: + self.embeddings['IF']['default'] = self.guidance['IF'].get_text_embeds([self.opt.text]) + self.embeddings['IF']['uncond'] = self.guidance['IF'].get_text_embeds([self.opt.negative]) + + for d in ['front', 'side', 'back']: + self.embeddings['IF'][d] = self.guidance['IF'].get_text_embeds([f"{d} view {self.opt.text}"]) + + if 'clip' in self.guidance: + self.embeddings['clip']['text'] = self.guidance['clip'].get_text_embeds(self.opt.text) + + if self.opt.images is not None: + + h = int(self.opt.known_view_scale * self.opt.h) + w = int(self.opt.known_view_scale * self.opt.w) + + # load processed image + for image in self.opt.images: + assert image.endswith('_rgba.png') # the rest of this code assumes that the _rgba image has been passed. + rgbas = [cv2.cvtColor(cv2.imread(image, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGRA2RGBA) for image in self.opt.images] + rgba_hw = np.stack([cv2.resize(rgba, (w, h), interpolation=cv2.INTER_AREA).astype(np.float32) / 255 for rgba in rgbas]) + rgb_hw = rgba_hw[..., :3] * rgba_hw[..., 3:] + (1 - rgba_hw[..., 3:]) + self.rgb = torch.from_numpy(rgb_hw).permute(0,3,1,2).contiguous().to(self.device) + self.mask = torch.from_numpy(rgba_hw[..., 3] > 0.5).to(self.device) + print(f'[INFO] dataset: load image prompt {self.opt.images} {self.rgb.shape}') + + # load depth + depth_paths = [image.replace('_rgba.png', '_depth.png') for image in self.opt.images] + depths = [cv2.imread(depth_path, cv2.IMREAD_UNCHANGED) for depth_path in depth_paths] + depth = np.stack([cv2.resize(depth, (w, h), interpolation=cv2.INTER_AREA) for depth in depths]) + self.depth = torch.from_numpy(depth.astype(np.float32) / 255).to(self.device) # TODO: this should be mapped to FP16 + print(f'[INFO] dataset: load depth prompt {depth_paths} {self.depth.shape}') + + # load normal # TODO: don't load if normal loss is 0 + normal_paths = [image.replace('_rgba.png', '_normal.png') for image in self.opt.images] + normals = [cv2.imread(normal_path, cv2.IMREAD_UNCHANGED) for normal_path in normal_paths] + normal = np.stack([cv2.resize(normal, (w, h), interpolation=cv2.INTER_AREA) for normal in normals]) + self.normal = torch.from_numpy(normal.astype(np.float32) / 255).to(self.device) + print(f'[INFO] dataset: load normal prompt {normal_paths} {self.normal.shape}') + + # encode embeddings for zero123 + if 'zero123' in self.guidance: + rgba_256 = np.stack([cv2.resize(rgba, (256, 256), interpolation=cv2.INTER_AREA).astype(np.float32) / 255 for rgba in rgbas]) + rgbs_256 = rgba_256[..., :3] * rgba_256[..., 3:] + (1 - rgba_256[..., 3:]) + rgb_256 = torch.from_numpy(rgbs_256).permute(0,3,1,2).contiguous().to(self.device) + guidance_embeds = self.guidance['zero123'].get_img_embeds(rgb_256) + self.embeddings['zero123']['default'] = { + 'zero123_ws' : self.opt.zero123_ws, + 'c_crossattn' : guidance_embeds[0], + 'c_concat' : guidance_embeds[1], + 'ref_polars' : self.opt.ref_polars, + 'ref_azimuths' : self.opt.ref_azimuths, + 'ref_radii' : self.opt.ref_radii, + } + + if 'clip' in self.guidance: + self.embeddings['clip']['image'] = self.guidance['clip'].get_img_embeds(self.rgb) + + + def __del__(self): + if self.log_ptr: + self.log_ptr.close() + + + def log(self, *args, **kwargs): + if self.local_rank == 0: + if not self.mute: + #print(*args) + self.console.print(*args, **kwargs) + if self.log_ptr: + print(*args, file=self.log_ptr) + self.log_ptr.flush() # write immediately to file + + ### ------------------------------ + + def train_step(self, data, save_guidance_path:Path=None): + """ + Args: + save_guidance_path: an image that combines the NeRF render, the added latent noise, + the denoised result and optionally the fully-denoised image. + """ + + # perform RGBD loss instead of SDS if is image-conditioned + do_rgbd_loss = self.opt.images is not None and \ + (self.global_step % self.opt.known_view_interval == 0) + + # override random camera with fixed known camera + if do_rgbd_loss: + data = self.default_view_data + + # experiment iterations ratio + # i.e. what proportion of this experiment have we completed (in terms of iterations) so far? + exp_iter_ratio = (self.global_step - self.opt.exp_start_iter) / (self.opt.exp_end_iter - self.opt.exp_start_iter) + + # progressively relaxing view range + if self.opt.progressive_view: + r = min(1.0, self.opt.progressive_view_init_ratio + 2.0*exp_iter_ratio) + self.opt.phi_range = [self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[0] * r, + self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[1] * r] + self.opt.theta_range = [self.opt.default_polar * (1 - r) + self.opt.full_theta_range[0] * r, + self.opt.default_polar * (1 - r) + self.opt.full_theta_range[1] * r] + self.opt.radius_range = [self.opt.default_radius * (1 - r) + self.opt.full_radius_range[0] * r, + self.opt.default_radius * (1 - r) + self.opt.full_radius_range[1] * r] + self.opt.fovy_range = [self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[0] * r, + self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[1] * r] + + # progressively increase max_level + if self.opt.progressive_level: + self.model.max_level = min(1.0, 0.25 + 2.0*exp_iter_ratio) + + rays_o = data['rays_o'] # [B, N, 3] + rays_d = data['rays_d'] # [B, N, 3] + mvp = data['mvp'] # [B, 4, 4] + poses = data['poses'] # [B, 4, 4] + + B, N = rays_o.shape[:2] + H, W = data['H'], data['W'] + + # When ref_data has B images > opt.batch_size + if B > self.opt.batch_size: + # choose batch_size images out of those B images + choice = torch.randperm(B)[:self.opt.batch_size] + B = self.opt.batch_size + rays_o = rays_o[choice] + rays_d = rays_d[choice] + mvp = mvp[choice] + + if do_rgbd_loss: + ambient_ratio = 1.0 + shading = 'lambertian' # use lambertian instead of albedo to get normal + as_latent = False + binarize = False + bg_color = torch.rand((B * N, 3), device=rays_o.device) + + # add camera noise to avoid grid-like artifact + if self.opt.known_view_noise_scale > 0: + noise_scale = self.opt.known_view_noise_scale #* (1 - self.global_step / self.opt.iters) + rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale + rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale + + elif exp_iter_ratio <= self.opt.latent_iter_ratio: + ambient_ratio = 1.0 + shading = 'normal' + as_latent = True + binarize = False + bg_color = None + + else: + if exp_iter_ratio <= self.opt.albedo_iter_ratio: + ambient_ratio = 1.0 + shading = 'albedo' + else: + # random shading + ambient_ratio = self.opt.min_ambient_ratio + (1.0-self.opt.min_ambient_ratio) * random.random() + rand = random.random() + if rand >= (1.0 - self.opt.textureless_ratio): + shading = 'textureless' + else: + shading = 'lambertian' + + as_latent = False + + # random weights binarization (like mobile-nerf) [NOT WORKING NOW] + # binarize_thresh = min(0.5, -0.5 + self.global_step / self.opt.iters) + # binarize = random.random() < binarize_thresh + binarize = False + + # random background + rand = random.random() + #if self.opt.bg_radius > 0 and rand > 0.5: + if self.opt.learnable_bg: + bg_color = None # use bg_net + else: + bg_color = torch.rand(3).to(self.device) # single color random bg + + outputs = self.model.render(rays_o, rays_d, poses, H, W, staged=False, perturb=True, bg_color=bg_color, ambient_ratio=ambient_ratio, shading=shading, binarize=binarize) + pred_depth = outputs['depth'].reshape(B, 1, H, W) + pred_mask = outputs['weights_sum'].reshape(B, 1, H, W) + if 'normal_image' in outputs: + pred_normal = outputs['normal_image'].reshape(B, H, W, 3) + + if as_latent: + # abuse normal & mask as latent code for faster geometry initialization (ref: fantasia3D) + pred_rgb = torch.cat([outputs['image'], outputs['weights_sum'].unsqueeze(-1)], dim=-1).reshape(B, H, W, 4).permute(0, 3, 1, 2).contiguous() # [B, 4, H, W] + else: + pred_rgb = outputs['image'].reshape(B, H, W, 3).permute(0, 3, 1, 2).contiguous() # [B, 3, H, W] + + + # known view loss + if do_rgbd_loss: + gt_mask = self.mask # [B, H, W] + gt_rgb = self.rgb # [B, 3, H, W] + gt_normal = self.normal # [B, H, W, 3] + gt_depth = self.depth # [B, H, W] + + if len(gt_rgb) > self.opt.batch_size: + gt_mask = gt_mask[choice] + gt_rgb = gt_rgb[choice] + gt_normal = gt_normal[choice] + gt_depth = gt_depth[choice] + + # color loss + gt_rgb = gt_rgb * gt_mask[:, None].float() + bg_color.reshape(B, H, W, 3).permute(0,3,1,2).contiguous() * (1 - gt_mask[:, None].float()) + loss = self.opt.lambda_rgb * F.mse_loss(pred_rgb, gt_rgb) + + # mask loss + loss = loss + self.opt.lambda_mask * F.mse_loss(pred_mask[:, 0], gt_mask.float()) + + # normal loss + if self.opt.lambda_normal > 0 and 'normal_image' in outputs: + valid_gt_normal = 1 - 2 * gt_normal[gt_mask] # [B, 3] + valid_pred_normal = 2 * pred_normal[gt_mask] - 1 # [B, 3] + + lambda_normal = self.opt.lambda_normal * min(1, self.global_step / self.opt.iters) + loss = loss + lambda_normal * (1 - F.cosine_similarity(valid_pred_normal, valid_gt_normal).mean()) + + # relative depth loss + if self.opt.lambda_depth > 0: + valid_gt_depth = gt_depth[gt_mask] # [B,] + valid_pred_depth = pred_depth[:, 0][gt_mask] # [B,] + lambda_depth = self.opt.lambda_depth * min(1, self.global_step / self.opt.iters) + loss = loss + lambda_depth * (1 - self.pearson(valid_pred_depth, valid_gt_depth)) + + # # scale-invariant + # with torch.no_grad(): + # A = torch.cat([valid_gt_depth, torch.ones_like(valid_gt_depth)], dim=-1) # [B, 2] + # X = torch.linalg.lstsq(A, valid_pred_depth).solution # [2, 1] + # valid_gt_depth = A @ X # [B, 1] + # lambda_depth = self.opt.lambda_depth #* min(1, self.global_step / self.opt.iters) + # loss = loss + lambda_depth * F.mse_loss(valid_pred_depth, valid_gt_depth) + + # novel view loss + else: + + loss = 0 + + if 'SD' in self.guidance: + # interpolate text_z + azimuth = data['azimuth'] # [-180, 180] + + # ENHANCE: remove loop to handle batch size > 1 + text_z = [self.embeddings['SD']['uncond']] * azimuth.shape[0] + if self.opt.perpneg: + + text_z_comp, weights = adjust_text_embeddings(self.embeddings['SD'], azimuth, self.opt) + text_z.append(text_z_comp) + + else: + for b in range(azimuth.shape[0]): + if azimuth[b] >= -90 and azimuth[b] < 90: + if azimuth[b] >= 0: + r = 1 - azimuth[b] / 90 + else: + r = 1 + azimuth[b] / 90 + start_z = self.embeddings['SD']['front'] + end_z = self.embeddings['SD']['side'] + else: + if azimuth[b] >= 0: + r = 1 - (azimuth[b] - 90) / 90 + else: + r = 1 + (azimuth[b] + 90) / 90 + start_z = self.embeddings['SD']['side'] + end_z = self.embeddings['SD']['back'] + text_z.append(r * start_z + (1 - r) * end_z) + + text_z = torch.cat(text_z, dim=0) + if self.opt.perpneg: + loss = loss + self.guidance['SD'].train_step_perpneg(text_z, weights, pred_rgb, as_latent=as_latent, guidance_scale=self.opt.guidance_scale, grad_scale=self.opt.lambda_guidance, + save_guidance_path=save_guidance_path,) + else: + loss = loss + self.guidance['SD'].train_step(text_z, pred_rgb, as_latent=as_latent, guidance_scale=self.opt.guidance_scale, grad_scale=self.opt.lambda_guidance, + save_guidance_path=save_guidance_path) + + if 'IF' in self.guidance: + # interpolate text_z + azimuth = data['azimuth'] # [-180, 180] + + # ENHANCE: remove loop to handle batch size > 1 + text_z = [self.embeddings['IF']['uncond']] * azimuth.shape[0] + if self.opt.perpneg: + text_z_comp, weights = adjust_text_embeddings(self.embeddings['IF'], azimuth, self.opt) + text_z.append(text_z_comp) + else: + for b in range(azimuth.shape[0]): + if azimuth[b] >= -90 and azimuth[b] < 90: + if azimuth[b] >= 0: + r = 1 - azimuth[b] / 90 + else: + r = 1 + azimuth[b] / 90 + start_z = self.embeddings['IF']['front'] + end_z = self.embeddings['IF']['side'] + else: + if azimuth[b] >= 0: + r = 1 - (azimuth[b] - 90) / 90 + else: + r = 1 + (azimuth[b] + 90) / 90 + start_z = self.embeddings['IF']['side'] + end_z = self.embeddings['IF']['back'] + text_z.append(r * start_z + (1 - r) * end_z) + + text_z = torch.cat(text_z, dim=0) + + if self.opt.perpneg: + loss = loss + self.guidance['IF'].train_step_perpneg(text_z, weights, pred_rgb, guidance_scale=self.opt.guidance_scale, grad_scale=self.opt.lambda_guidance) + else: + loss = loss + self.guidance['IF'].train_step(text_z, pred_rgb, guidance_scale=self.opt.guidance_scale, grad_scale=self.opt.lambda_guidance) + + if 'zero123' in self.guidance: + + polar = data['polar'] + azimuth = data['azimuth'] + radius = data['radius'] + + loss = loss + self.guidance['zero123'].train_step(self.embeddings['zero123']['default'], pred_rgb, polar, azimuth, radius, guidance_scale=self.opt.guidance_scale, + as_latent=as_latent, grad_scale=self.opt.lambda_guidance, save_guidance_path=save_guidance_path) + + if 'clip' in self.guidance: + + # empirical, far view should apply smaller CLIP loss + lambda_guidance = 10 * (1 - abs(azimuth) / 180) * self.opt.lambda_guidance + + loss = loss + self.guidance['clip'].train_step(self.embeddings['clip'], pred_rgb, grad_scale=lambda_guidance) + + # regularizations + if not self.opt.dmtet: + + if self.opt.lambda_opacity > 0: + loss_opacity = (outputs['weights_sum'] ** 2).mean() + loss = loss + self.opt.lambda_opacity * loss_opacity + + if self.opt.lambda_entropy > 0: + alphas = outputs['weights'].clamp(1e-5, 1 - 1e-5) + # alphas = alphas ** 2 # skewed entropy, favors 0 over 1 + loss_entropy = (- alphas * torch.log2(alphas) - (1 - alphas) * torch.log2(1 - alphas)).mean() + lambda_entropy = self.opt.lambda_entropy * min(1, 2 * self.global_step / self.opt.iters) + loss = loss + lambda_entropy * loss_entropy + + if self.opt.lambda_2d_normal_smooth > 0 and 'normal_image' in outputs: + # pred_vals = outputs['normal_image'].reshape(B, H, W, 3).permute(0, 3, 1, 2).contiguous() + # smoothed_vals = TF.gaussian_blur(pred_vals.detach(), kernel_size=9) + # loss_smooth = F.mse_loss(pred_vals, smoothed_vals) + # total-variation + loss_smooth = (pred_normal[:, 1:, :, :] - pred_normal[:, :-1, :, :]).square().mean() + \ + (pred_normal[:, :, 1:, :] - pred_normal[:, :, :-1, :]).square().mean() + loss = loss + self.opt.lambda_2d_normal_smooth * loss_smooth + + if self.opt.lambda_orient > 0 and 'loss_orient' in outputs: + loss_orient = outputs['loss_orient'] + loss = loss + self.opt.lambda_orient * loss_orient + + if self.opt.lambda_3d_normal_smooth > 0 and 'loss_normal_perturb' in outputs: + loss_normal_perturb = outputs['loss_normal_perturb'] + loss = loss + self.opt.lambda_3d_normal_smooth * loss_normal_perturb + + else: + + if self.opt.lambda_mesh_normal > 0: + loss = loss + self.opt.lambda_mesh_normal * outputs['normal_loss'] + + if self.opt.lambda_mesh_laplacian > 0: + loss = loss + self.opt.lambda_mesh_laplacian * outputs['lap_loss'] + + return pred_rgb, pred_depth, loss + + def post_train_step(self): + + # unscale grad before modifying it! + # ref: https://pytorch.org/docs/stable/notes/amp_examples.html#gradient-clipping + self.scaler.unscale_(self.optimizer) + + # clip grad + if self.opt.grad_clip >= 0: + torch.nn.utils.clip_grad_value_(self.model.parameters(), self.opt.grad_clip) + + # if not self.opt.dmtet and self.opt.backbone == 'grid': + # + # if self.opt.lambda_tv > 0: + # lambda_tv = min(1.0, self.global_step / (0.5 * self.opt.iters)) * self.opt.lambda_tv + # self.model.encoder.grad_total_variation(lambda_tv, None, self.model.bound) + # if self.opt.lambda_wd > 0: + # self.model.encoder.grad_weight_decay(self.opt.lambda_wd) + + def eval_step(self, data): + + rays_o = data['rays_o'] # [B, N, 3] + rays_d = data['rays_d'] # [B, N, 3] + poses = data['poses'] + + B, N = rays_o.shape[:2] + H, W = data['H'], data['W'] + + shading = data['shading'] if 'shading' in data else 'albedo' + ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0 + light_d = data['light_d'] if 'light_d' in data else None + + outputs = self.model.render(rays_o, rays_d, poses, H, W, staged=True, perturb=False, bg_color=None, light_d=light_d, ambient_ratio=ambient_ratio, shading=shading) + pred_rgb = outputs['image'].reshape(B, H, W, 3) + pred_depth = outputs['depth'].reshape(B, H, W) + + # dummy + loss = torch.zeros([1], device=pred_rgb.device, dtype=pred_rgb.dtype) + + return pred_rgb, pred_depth, loss + + def test_step(self, data, bg_color=None, perturb=False): + rays_o = data['rays_o'] # [B, N, 3] + rays_d = data['rays_d'] # [B, N, 3] + poses = data['poses'] + + B, N = rays_o.shape[:2] + H, W = data['H'], data['W'] + + if bg_color is not None: + bg_color = bg_color.to(rays_o.device) + + shading = data['shading'] if 'shading' in data else 'albedo' + ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0 + light_d = data['light_d'] if 'light_d' in data else None + + outputs = self.model.render(rays_o, rays_d, poses, H, W, staged=True, perturb=perturb, light_d=light_d, ambient_ratio=ambient_ratio, shading=shading, bg_color=bg_color) + + pred_rgb = outputs['image'].reshape(B, H, W, 3) + pred_depth = outputs['depth'].reshape(B, H, W) + + return pred_rgb, pred_depth, None + + def save_mesh(self, loader=None, save_path=None): + + if save_path is None: + save_path = os.path.join(self.workspace, 'mesh') + + self.log(f"==> Saving mesh to {save_path}") + + os.makedirs(save_path, exist_ok=True) + + self.model.export_mesh(save_path, resolution=self.opt.mcubes_resolution, decimate_target=self.opt.decimate_target) + + self.log(f"==> Finished saving mesh.") + + ### ------------------------------ + + def train(self, train_loader, valid_loader, test_loader, max_epochs): + + if self.use_tensorboardX and self.local_rank == 0: + self.writer = tensorboardX.SummaryWriter(os.path.join(self.workspace, "run", self.name)) + + start_t = time.time() + self.evaluate_one_epoch(valid_loader) + for epoch in range(self.epoch+1, max_epochs+1): + self.epoch = epoch + + self.train_one_epoch(train_loader, max_epochs) + + if self.workspace is not None and self.local_rank == 0: + self.save_checkpoint(full=True, best=False) + + if self.epoch % self.opt.eval_interval == 0: + self.evaluate_one_epoch(valid_loader) + self.save_checkpoint(full=False, best=True) + + if self.epoch % self.opt.test_interval == 0 or self.epoch == max_epochs: + self.test(test_loader) + + end_t = time.time() + + self.total_train_t = end_t - start_t + self.total_train_t + + self.log(f"[INFO] training takes {(self.total_train_t)/ 60:.4f} minutes.") + + if self.use_tensorboardX and self.local_rank == 0: + self.writer.close() + + def evaluate(self, loader, name=None): + self.use_tensorboardX, use_tensorboardX = False, self.use_tensorboardX + self.evaluate_one_epoch(loader, name) + self.use_tensorboardX = use_tensorboardX + + def test(self, loader, save_path=None, name=None, write_video=True): + + if save_path is None: + save_path = os.path.join(self.workspace, 'results') + + if name is None: + name = f'{self.name}_ep{self.epoch:04d}' + + os.makedirs(save_path, exist_ok=True) + + self.log(f"==> Start Test, save results to {save_path}") + + pbar = tqdm.tqdm(total=len(loader) * loader.batch_size, bar_format='{percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]') + self.model.eval() + + if write_video: + all_preds = [] + all_preds_depth = [] + + with torch.no_grad(): + + for i, data in enumerate(loader): + + with torch.cuda.amp.autocast(enabled=self.fp16): + preds, preds_depth, _ = self.test_step(data) + + pred = preds[0].detach().cpu().numpy() + pred = (pred * 255).astype(np.uint8) + + pred_depth = preds_depth[0].detach().cpu().numpy() + pred_depth = (pred_depth - pred_depth.min()) / (pred_depth.max() - pred_depth.min() + 1e-6) + pred_depth = (pred_depth * 255).astype(np.uint8) + + if write_video: + all_preds.append(pred) + all_preds_depth.append(pred_depth) + else: + cv2.imwrite(os.path.join(save_path, f'{name}_{i:04d}_rgb.png'), cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)) + cv2.imwrite(os.path.join(save_path, f'{name}_{i:04d}_depth.png'), pred_depth) + + pbar.update(loader.batch_size) + + if write_video: + all_preds = np.stack(all_preds, axis=0) + all_preds_depth = np.stack(all_preds_depth, axis=0) + print('save video...', os.path.join(save_path, f'{name}_rgb.mp4'), os.path.join(save_path, f'{name}_depth.mp4')) + imageio.mimwrite(os.path.join(save_path, f'{name}_rgb.mp4'), all_preds, fps=25, quality=8, macro_block_size=1) + imageio.mimwrite(os.path.join(save_path, f'{name}_depth.mp4'), all_preds_depth, fps=25, quality=8, macro_block_size=1) + + self.log(f"==> Finished Test.") + + # [GUI] train text step. + def train_gui(self, train_loader, step=16): + + self.model.train() + + total_loss = torch.tensor([0], dtype=torch.float32, device=self.device) + + loader = iter(train_loader) + + for _ in range(step): + + # mimic an infinite loop dataloader (in case the total dataset is smaller than step) + try: + data = next(loader) + except StopIteration: + loader = iter(train_loader) + data = next(loader) + + # update grid every 16 steps + if self.model.cuda_ray and self.global_step % self.opt.update_extra_interval == 0: + with torch.cuda.amp.autocast(enabled=self.fp16): + self.model.update_extra_state() + + self.global_step += 1 + + self.optimizer.zero_grad() + + with torch.cuda.amp.autocast(enabled=self.fp16): + pred_rgbs, pred_depths, loss = self.train_step(data) + + self.scaler.scale(loss).backward() + self.post_train_step() + self.scaler.step(self.optimizer) + self.scaler.update() + + if self.scheduler_update_every_step: + self.lr_scheduler.step() + + total_loss += loss.detach() + + if self.ema is not None: + self.ema.update() + + average_loss = total_loss.item() / step + + if not self.scheduler_update_every_step: + if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): + self.lr_scheduler.step(average_loss) + else: + self.lr_scheduler.step() + + outputs = { + 'loss': average_loss, + 'lr': self.optimizer.param_groups[0]['lr'], + } + + return outputs + + + # [GUI] test on a single image + def test_gui(self, pose, intrinsics, mvp, W, H, bg_color=None, spp=1, downscale=1, light_d=None, ambient_ratio=1.0, shading='albedo'): + + # render resolution (may need downscale to for better frame rate) + rH = int(H * downscale) + rW = int(W * downscale) + intrinsics = intrinsics * downscale + + pose = torch.from_numpy(pose).unsqueeze(0).to(self.device) + mvp = torch.from_numpy(mvp).unsqueeze(0).to(self.device) + + rays = get_rays(pose, intrinsics, rH, rW, -1) + + # from degree theta/phi to 3D normalized vec + light_d = np.deg2rad(light_d) + light_d = np.array([ + np.sin(light_d[0]) * np.sin(light_d[1]), + np.cos(light_d[0]), + np.sin(light_d[0]) * np.cos(light_d[1]), + ], dtype=np.float32) + light_d = torch.from_numpy(light_d).to(self.device) + + data = { + 'rays_o': rays['rays_o'], + 'rays_d': rays['rays_d'], + 'mvp': mvp, + 'H': rH, + 'W': rW, + 'light_d': light_d, + 'ambient_ratio': ambient_ratio, + 'shading': shading, + } + + self.model.eval() + + if self.ema is not None: + self.ema.store() + self.ema.copy_to() + + with torch.no_grad(): + with torch.cuda.amp.autocast(enabled=self.fp16): + # here spp is used as perturb random seed! + preds, preds_depth, _ = self.test_step(data, bg_color=bg_color, perturb=False if spp == 1 else spp) + + if self.ema is not None: + self.ema.restore() + + # interpolation to the original resolution + if downscale != 1: + # have to permute twice with torch... + preds = F.interpolate(preds.permute(0, 3, 1, 2), size=(H, W), mode='nearest').permute(0, 2, 3, 1).contiguous() + preds_depth = F.interpolate(preds_depth.unsqueeze(1), size=(H, W), mode='nearest').squeeze(1) + + outputs = { + 'image': preds[0].detach().cpu().numpy(), + 'depth': preds_depth[0].detach().cpu().numpy(), + } + + return outputs + + def train_one_epoch(self, loader, max_epochs): + self.log(f"==> [{time.strftime('%Y-%m-%d_%H-%M-%S')}] Start Training {self.workspace} Epoch {self.epoch}/{max_epochs}, lr={self.optimizer.param_groups[0]['lr']:.6f} ...") + + total_loss = 0 + if self.local_rank == 0 and self.report_metric_at_train: + for metric in self.metrics: + metric.clear() + + self.model.train() + + # distributedSampler: must call set_epoch() to shuffle indices across multiple epochs + # ref: https://pytorch.org/docs/stable/data.html + if self.world_size > 1: + loader.sampler.set_epoch(self.epoch) + + if self.local_rank == 0: + pbar = tqdm.tqdm(total=len(loader) * loader.batch_size, bar_format='{desc}: {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]') + + self.local_step = 0 + + if self.opt.save_guidance: + save_guidance_folder = Path(self.workspace) / 'guidance' + save_guidance_folder.mkdir(parents=True, exist_ok=True) + + for data in loader: + + # update grid every 16 steps + if (self.model.cuda_ray or self.model.taichi_ray) and self.global_step % self.opt.update_extra_interval == 0: + with torch.cuda.amp.autocast(enabled=self.fp16): + self.model.update_extra_state() + + self.local_step += 1 + self.global_step += 1 + + self.optimizer.zero_grad() + + with torch.cuda.amp.autocast(enabled=self.fp16): + if self.opt.save_guidance and (self.global_step % self.opt.save_guidance_interval == 0 or self.global_step == 1): + save_guidance_path = save_guidance_folder / f'step_{self.global_step:07d}.png' + else: + save_guidance_path = None + pred_rgbs, pred_depths, loss = self.train_step(data, save_guidance_path=save_guidance_path) + + # hooked grad clipping for RGB space + if self.opt.grad_clip_rgb >= 0: + def _hook(grad): + if self.opt.fp16: + # correctly handle the scale + grad_scale = self.scaler._get_scale_async() + return grad.clamp(grad_scale * -self.opt.grad_clip_rgb, grad_scale * self.opt.grad_clip_rgb) + else: + return grad.clamp(-self.opt.grad_clip_rgb, self.opt.grad_clip_rgb) + pred_rgbs.register_hook(_hook) + # pred_rgbs.retain_grad() + + self.scaler.scale(loss).backward() + + self.post_train_step() + self.scaler.step(self.optimizer) + self.scaler.update() + + if self.scheduler_update_every_step: + self.lr_scheduler.step() + + loss_val = loss.item() + total_loss += loss_val + + if self.local_rank == 0: + # if self.report_metric_at_train: + # for metric in self.metrics: + # metric.update(preds, truths) + + if self.use_tensorboardX: + self.writer.add_scalar("train/loss", loss_val, self.global_step) + self.writer.add_scalar("train/lr", self.optimizer.param_groups[0]['lr'], self.global_step) + + if self.scheduler_update_every_step: + pbar.set_description(f"loss={loss_val:.4f} ({total_loss/self.local_step:.4f}), lr={self.optimizer.param_groups[0]['lr']:.6f}") + else: + pbar.set_description(f"loss={loss_val:.4f} ({total_loss/self.local_step:.4f})") + pbar.update(loader.batch_size) + + if self.ema is not None: + self.ema.update() + + average_loss = total_loss / self.local_step + self.stats["loss"].append(average_loss) + + if self.local_rank == 0: + pbar.close() + if self.report_metric_at_train: + for metric in self.metrics: + self.log(metric.report(), style="red") + if self.use_tensorboardX: + metric.write(self.writer, self.epoch, prefix="train") + metric.clear() + + if not self.scheduler_update_every_step: + if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): + self.lr_scheduler.step(average_loss) + else: + self.lr_scheduler.step() + + cpu_mem, gpu_mem = get_CPU_mem(), get_GPU_mem()[0] + self.log(f"==> [{time.strftime('%Y-%m-%d_%H-%M-%S')}] Finished Epoch {self.epoch}/{max_epochs}. CPU={cpu_mem:.1f}GB, GPU={gpu_mem:.1f}GB.") + + + def evaluate_one_epoch(self, loader, name=None): + self.log(f"++> Evaluate {self.workspace} at epoch {self.epoch} ...") + + if name is None: + name = f'{self.name}_ep{self.epoch:04d}' + + total_loss = 0 + if self.local_rank == 0: + for metric in self.metrics: + metric.clear() + + self.model.eval() + + if self.ema is not None: + self.ema.store() + self.ema.copy_to() + + if self.local_rank == 0: + pbar = tqdm.tqdm(total=len(loader) * loader.batch_size, bar_format='{desc}: {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]') + + with torch.no_grad(): + self.local_step = 0 + + for data in loader: + self.local_step += 1 + + with torch.cuda.amp.autocast(enabled=self.fp16): + preds, preds_depth, loss = self.eval_step(data) + + # all_gather/reduce the statistics (NCCL only support all_*) + if self.world_size > 1: + dist.all_reduce(loss, op=dist.ReduceOp.SUM) + loss = loss / self.world_size + + preds_list = [torch.zeros_like(preds).to(self.device) for _ in range(self.world_size)] # [[B, ...], [B, ...], ...] + dist.all_gather(preds_list, preds) + preds = torch.cat(preds_list, dim=0) + + preds_depth_list = [torch.zeros_like(preds_depth).to(self.device) for _ in range(self.world_size)] # [[B, ...], [B, ...], ...] + dist.all_gather(preds_depth_list, preds_depth) + preds_depth = torch.cat(preds_depth_list, dim=0) + + loss_val = loss.item() + total_loss += loss_val + + # only rank = 0 will perform evaluation. + if self.local_rank == 0: + + # save image + save_path = os.path.join(self.workspace, 'validation', f'{name}_rgb_{self.local_step:04d}.png') + save_path_depth = os.path.join(self.workspace, 'validation', f'{name}_depth_{self.local_step:04d}.png') + + #self.log(f"==> Saving validation image to {save_path}") + os.makedirs(os.path.dirname(save_path), exist_ok=True) + + pred = preds[0].detach().cpu().numpy() + pred = (pred * 255).astype(np.uint8) + + pred_depth = preds_depth[0].detach().cpu().numpy() + pred_depth = (pred_depth - pred_depth.min()) / (pred_depth.max() - pred_depth.min() + 1e-6) + pred_depth = (pred_depth * 255).astype(np.uint8) + + cv2.imwrite(save_path, cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)) + cv2.imwrite(save_path_depth, pred_depth) + + pbar.set_description(f"loss={loss_val:.4f} ({total_loss/self.local_step:.4f})") + pbar.update(loader.batch_size) + + + average_loss = total_loss / self.local_step + self.stats["valid_loss"].append(average_loss) + + if self.local_rank == 0: + pbar.close() + if not self.use_loss_as_metric and len(self.metrics) > 0: + result = self.metrics[0].measure() + self.stats["results"].append(result if self.best_mode == 'min' else - result) # if max mode, use -result + else: + self.stats["results"].append(average_loss) # if no metric, choose best by min loss + + for metric in self.metrics: + self.log(metric.report(), style="blue") + if self.use_tensorboardX: + metric.write(self.writer, self.epoch, prefix="evaluate") + metric.clear() + + if self.ema is not None: + self.ema.restore() + + self.log(f"++> Evaluate epoch {self.epoch} Finished.") + + def save_checkpoint(self, name=None, full=False, best=False): + + if name is None: + name = f'{self.name}_ep{self.epoch:04d}' + + state = { + 'epoch': self.epoch, + 'global_step': self.global_step, + 'stats': self.stats, + } + + if self.model.cuda_ray: + state['mean_density'] = self.model.mean_density + + if self.opt.dmtet: + state['tet_scale'] = self.model.tet_scale.cpu().numpy() + + if full: + state['optimizer'] = self.optimizer.state_dict() + state['lr_scheduler'] = self.lr_scheduler.state_dict() + state['scaler'] = self.scaler.state_dict() + if self.ema is not None: + state['ema'] = self.ema.state_dict() + + if not best: + + state['model'] = self.model.state_dict() + + file_path = f"{name}.pth" + + self.stats["checkpoints"].append(file_path) + + if len(self.stats["checkpoints"]) > self.max_keep_ckpt: + old_ckpt = os.path.join(self.ckpt_path, self.stats["checkpoints"].pop(0)) + if os.path.exists(old_ckpt): + os.remove(old_ckpt) + + torch.save(state, os.path.join(self.ckpt_path, file_path)) + + else: + if len(self.stats["results"]) > 0: + # always save best since loss cannot reflect performance. + if True: + # self.log(f"[INFO] New best result: {self.stats['best_result']} --> {self.stats['results'][-1]}") + # self.stats["best_result"] = self.stats["results"][-1] + + # save ema results + if self.ema is not None: + self.ema.store() + self.ema.copy_to() + + state['model'] = self.model.state_dict() + + if self.ema is not None: + self.ema.restore() + + torch.save(state, self.best_path) + else: + self.log(f"[WARN] no evaluated results found, skip saving best checkpoint.") + + def load_checkpoint(self, checkpoint=None, model_only=False): + if checkpoint is None: + checkpoint_list = sorted(glob.glob(f'{self.ckpt_path}/*.pth')) + if checkpoint_list: + checkpoint = checkpoint_list[-1] + self.log(f"[INFO] Latest checkpoint is {checkpoint}") + else: + self.log("[WARN] No checkpoint found, model randomly initialized.") + return + + checkpoint_dict = torch.load(checkpoint, map_location=self.device) + + if 'model' not in checkpoint_dict: + self.model.load_state_dict(checkpoint_dict) + self.log("[INFO] loaded model.") + return + + missing_keys, unexpected_keys = self.model.load_state_dict(checkpoint_dict['model'], strict=False) + self.log("[INFO] loaded model.") + if len(missing_keys) > 0: + self.log(f"[WARN] missing keys: {missing_keys}") + if len(unexpected_keys) > 0: + self.log(f"[WARN] unexpected keys: {unexpected_keys}") + + if self.ema is not None and 'ema' in checkpoint_dict: + try: + self.ema.load_state_dict(checkpoint_dict['ema']) + self.log("[INFO] loaded EMA.") + except: + self.log("[WARN] failed to loaded EMA.") + + if self.model.cuda_ray: + if 'mean_density' in checkpoint_dict: + self.model.mean_density = checkpoint_dict['mean_density'] + + if self.opt.dmtet: + if 'tet_scale' in checkpoint_dict: + new_scale = torch.from_numpy(checkpoint_dict['tet_scale']).to(self.device) + self.model.verts *= new_scale / self.model.tet_scale + self.model.tet_scale = new_scale + + if model_only: + return + + self.stats = checkpoint_dict['stats'] + self.epoch = checkpoint_dict['epoch'] + self.global_step = checkpoint_dict['global_step'] + self.log(f"[INFO] load at epoch {self.epoch}, global step {self.global_step}") + + if self.optimizer and 'optimizer' in checkpoint_dict: + try: + self.optimizer.load_state_dict(checkpoint_dict['optimizer']) + self.log("[INFO] loaded optimizer.") + except: + self.log("[WARN] Failed to load optimizer.") + + if self.lr_scheduler and 'lr_scheduler' in checkpoint_dict: + try: + self.lr_scheduler.load_state_dict(checkpoint_dict['lr_scheduler']) + self.log("[INFO] loaded scheduler.") + except: + self.log("[WARN] Failed to load scheduler.") + + if self.scaler and 'scaler' in checkpoint_dict: + try: + self.scaler.load_state_dict(checkpoint_dict['scaler']) + self.log("[INFO] loaded scaler.") + except: + self.log("[WARN] Failed to load scaler.") + + +def get_CPU_mem(): + return psutil.Process(os.getpid()).memory_info().rss /1024**3 + + +def get_GPU_mem(): + num = torch.cuda.device_count() + mem, mems = 0, [] + for i in range(num): + mem_free, mem_total = torch.cuda.mem_get_info(i) + mems.append(int(((mem_total - mem_free)/1024**3)*1000)/1000) + mem += mems[-1] + return mem, mems diff --git a/stable-dreamfusion-3DPortrait/optimizer.py b/stable-dreamfusion-3DPortrait/optimizer.py new file mode 100644 index 0000000..f5bb64f --- /dev/null +++ b/stable-dreamfusion-3DPortrait/optimizer.py @@ -0,0 +1,325 @@ +# Copyright 2022 Garena Online Private Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List + +import torch +from torch import Tensor +from torch.optim.optimizer import Optimizer + + +class Adan(Optimizer): + """ + Implements a pytorch variant of Adan + Adan was proposed in + Adan: Adaptive Nesterov Momentum Algorithm for + Faster Optimizing Deep Models[J].arXiv preprint arXiv:2208.06677, 2022. + https://arxiv.org/abs/2208.06677 + Arguments: + params (iterable): iterable of parameters to optimize or + dicts defining parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float, flot], optional): coefficients used for + first- and second-order moments. (default: (0.98, 0.92, 0.99)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): decoupled weight decay + (L2 penalty) (default: 0) + max_grad_norm (float, optional): value used to clip + global grad norm (default: 0.0 no clip) + no_prox (bool): how to perform the decoupled weight decay + (default: False) + foreach (bool): if True would use torch._foreach implementation. + It's faster but uses slightly more memory. (default: True) + """ + def __init__(self, + params, + lr=1e-3, + betas=(0.98, 0.92, 0.99), + eps=1e-8, + weight_decay=0.0, + max_grad_norm=0.0, + no_prox=False, + foreach: bool = True): + if not 0.0 <= max_grad_norm: + raise ValueError('Invalid Max grad norm: {}'.format(max_grad_norm)) + if not 0.0 <= lr: + raise ValueError('Invalid learning rate: {}'.format(lr)) + if not 0.0 <= eps: + raise ValueError('Invalid epsilon value: {}'.format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError('Invalid beta parameter at index 0: {}'.format( + betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError('Invalid beta parameter at index 1: {}'.format( + betas[1])) + if not 0.0 <= betas[2] < 1.0: + raise ValueError('Invalid beta parameter at index 2: {}'.format( + betas[2])) + defaults = dict(lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + max_grad_norm=max_grad_norm, + no_prox=no_prox, + foreach=foreach) + super().__init__(params, defaults) + + def __setstate__(self, state): + super(Adan, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('no_prox', False) + + @torch.no_grad() + def restart_opt(self): + for group in self.param_groups: + group['step'] = 0 + for p in group['params']: + if p.requires_grad: + state = self.state[p] + # State initialization + + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + # Exponential moving average of gradient difference + state['exp_avg_diff'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step.""" + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + if self.defaults['max_grad_norm'] > 0: + device = self.param_groups[0]['params'][0].device + global_grad_norm = torch.zeros(1, device=device) + + max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], + device=device) + for group in self.param_groups: + + for p in group['params']: + if p.grad is not None: + grad = p.grad + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + + clip_global_grad_norm = torch.clamp( + max_grad_norm / (global_grad_norm + group['eps']), + max=1.0).item() + else: + clip_global_grad_norm = 1.0 + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + exp_avg_diffs = [] + neg_pre_grads = [] + + beta1, beta2, beta3 = group['betas'] + # assume same step across group now to simplify things + # per parameter step can be easily support + # by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + bias_correction1 = 1.0 - beta1**group['step'] + bias_correction2 = 1.0 - beta2**group['step'] + bias_correction3 = 1.0 - beta3**group['step'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + grads.append(p.grad) + + state = self.state[p] + if len(state) == 0: + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + state['exp_avg_diff'] = torch.zeros_like(p) + + if 'neg_pre_grad' not in state or group['step'] == 1: + state['neg_pre_grad'] = p.grad.clone().mul_( + -clip_global_grad_norm) + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + exp_avg_diffs.append(state['exp_avg_diff']) + neg_pre_grads.append(state['neg_pre_grad']) + + kwargs = dict( + params=params_with_grad, + grads=grads, + exp_avgs=exp_avgs, + exp_avg_sqs=exp_avg_sqs, + exp_avg_diffs=exp_avg_diffs, + neg_pre_grads=neg_pre_grads, + beta1=beta1, + beta2=beta2, + beta3=beta3, + bias_correction1=bias_correction1, + bias_correction2=bias_correction2, + bias_correction3_sqrt=math.sqrt(bias_correction3), + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + no_prox=group['no_prox'], + clip_global_grad_norm=clip_global_grad_norm, + ) + + if group['foreach']: + _multi_tensor_adan(**kwargs) + else: + _single_tensor_adan(**kwargs) + + return loss + + +def _single_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + neg_pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + clip_global_grad_norm: Tensor, +): + for i, param in enumerate(params): + grad = grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + exp_avg_diff = exp_avg_diffs[i] + neg_grad_or_diff = neg_pre_grads[i] + + grad.mul_(clip_global_grad_norm) + + # for memory saving, we use `neg_grad_or_diff` + # to get some temp variable in a inplace way + neg_grad_or_diff.add_(grad) + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t + exp_avg_diff.mul_(beta2).add_(neg_grad_or_diff, + alpha=1 - beta2) # diff_t + + neg_grad_or_diff.mul_(beta2).add_(grad) + exp_avg_sq.mul_(beta3).addcmul_(neg_grad_or_diff, + neg_grad_or_diff, + value=1 - beta3) # n_t + + denom = ((exp_avg_sq).sqrt() / bias_correction3_sqrt).add_(eps) + step_size_diff = lr * beta2 / bias_correction2 + step_size = lr / bias_correction1 + + if no_prox: + param.mul_(1 - lr * weight_decay) + param.addcdiv_(exp_avg, denom, value=-step_size) + param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff) + else: + param.addcdiv_(exp_avg, denom, value=-step_size) + param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff) + param.div_(1 + lr * weight_decay) + + neg_grad_or_diff.zero_().add_(grad, alpha=-1.0) + + +def _multi_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + neg_pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + clip_global_grad_norm: Tensor, +): + if len(params) == 0: + return + + torch._foreach_mul_(grads, clip_global_grad_norm) + + # for memory saving, we use `neg_pre_grads` + # to get some temp variable in a inplace way + torch._foreach_add_(neg_pre_grads, grads) + + torch._foreach_mul_(exp_avgs, beta1) + torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t + + torch._foreach_mul_(exp_avg_diffs, beta2) + torch._foreach_add_(exp_avg_diffs, neg_pre_grads, + alpha=1 - beta2) # diff_t + + torch._foreach_mul_(neg_pre_grads, beta2) + torch._foreach_add_(neg_pre_grads, grads) + torch._foreach_mul_(exp_avg_sqs, beta3) + torch._foreach_addcmul_(exp_avg_sqs, + neg_pre_grads, + neg_pre_grads, + value=1 - beta3) # n_t + + denom = torch._foreach_sqrt(exp_avg_sqs) + torch._foreach_div_(denom, bias_correction3_sqrt) + torch._foreach_add_(denom, eps) + + step_size_diff = lr * beta2 / bias_correction2 + step_size = lr / bias_correction1 + + if no_prox: + torch._foreach_mul_(params, 1 - lr * weight_decay) + torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size) + torch._foreach_addcdiv_(params, + exp_avg_diffs, + denom, + value=-step_size_diff) + else: + torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size) + torch._foreach_addcdiv_(params, + exp_avg_diffs, + denom, + value=-step_size_diff) + torch._foreach_div_(params, 1 + lr * weight_decay) + torch._foreach_zero_(neg_pre_grads) + torch._foreach_add_(neg_pre_grads, grads, alpha=-1.0) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/portrait3d_main.py b/stable-dreamfusion-3DPortrait/portrait3d_main.py new file mode 100644 index 0000000..7248be9 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/portrait3d_main.py @@ -0,0 +1,72 @@ +import os + +import glob +import random +import argparse +# +parser = argparse.ArgumentParser() +parser.add_argument('--trigrid_decoder_ckpt', type=str) +parser.add_argument('--inversion_name', type=str) +parser.add_argument('--network_path', type=str) +parser.add_argument('--test_data_dir', type=str,default='../test_data') +parser.add_argument('--df_ckpt', type=str,default='SG161222/Realistic_Vision_V5.1_noVAE') + +opt = parser.parse_args() +trigrid_decoder_ckpt = opt.trigrid_decoder_ckpt +inversion_name = opt.inversion_name +network_path = opt.network_path +test_data_dir = opt.test_data_dir +df_ckpt = opt.df_ckpt + +exp_name = 'text_to_3dportrait' + +# the current file's path +root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +print(root) + +todo = glob.glob(os.path.join(test_data_dir, '*/prompt.txt')) +for prompt_file in todo: + + with open(prompt_file, 'r') as f: + prompt = f.read() + + prompt = prompt.replace('\n', '') + + dir_ = os.path.dirname(prompt_file) + name = dir_.split('/')[-1].split('\\')[-1] + + if os.path.exists(f'output/{exp_name}/{name}/results_final/small_pose_final.mp4'): + continue + trigrid_list = glob.glob(f'{dir_}/samples_new_crop/{inversion_name}/*/inversion_trigrid.pkl') + if len(trigrid_list) == 0: + continue + inversion_trigrid = trigrid_list[0] + + + # change dir + os.chdir(os.path.join(root, 'stable-dreamfusion-3DPortrait')) + cmd = f'python main_3DPortraitGAN_cam.py --workspace output/{exp_name}/{name} --latent_iter_ratio 0 --trigrid_lr_ratio 200 200 200 200 200 40 20 --t_range 0.02 0.4 --vram_O --w 128 --h 128 --H 512 --W 512 --iters 2000 --text "{prompt}" --hf_key {df_ckpt} --trigrid_path {inversion_trigrid} --trigrid_decoder_ckpt {trigrid_decoder_ckpt}' + print(cmd) + os.system(cmd) + + os.chdir(os.path.join(root, '3DPortraitGAN_pyramid')) + cmd = f'python gen_quality_improve_data_from_triplane.py --data_dir={root}/stable-dreamfusion-3DPortrait/output/{exp_name}/{name} --grid=1x1 --network={network_path}' + print(cmd) + os.system(cmd) + + os.chdir(os.path.join(root, 'stable-dreamfusion-3DPortrait')) + cmd = f'python guidance/sdedit.py --data_dir {root}/stable-dreamfusion-3DPortrait/output/{exp_name}/{name} --hf_key {df_ckpt} -H 512 -W 512 --seed 42 --test_data_dir={test_data_dir}' + print(cmd) + os.system(cmd) + + os.chdir(os.path.join(root, '3DPortraitGAN_pyramid')) + cmd = f'python optimize_trigrid.py --data_dir={root}/stable-dreamfusion-3DPortrait/output/{exp_name}/{name} --grid=1x1 --network={network_path}' + print(cmd) + os.system(cmd) + + os.chdir(os.path.join(root, '3DPortraitGAN_pyramid')) + cmd = f'python gen_videos_shapes_from_optimized_triplane.py --data_dir={root}/stable-dreamfusion-3DPortrait/output/{exp_name}/{name} --grid=1x1 --network={network_path}' + print(cmd) + os.system(cmd) + + diff --git a/stable-dreamfusion-3DPortrait/preprocess_image.py b/stable-dreamfusion-3DPortrait/preprocess_image.py new file mode 100644 index 0000000..f7937b2 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/preprocess_image.py @@ -0,0 +1,203 @@ +import os +import sys +import cv2 +import argparse +import numpy as np +import matplotlib.pyplot as plt + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision import transforms +from PIL import Image + +class BackgroundRemoval(): + def __init__(self, device='cuda'): + + from carvekit.api.high import HiInterface + self.interface = HiInterface( + object_type="object", # Can be "object" or "hairs-like". + batch_size_seg=5, + batch_size_matting=1, + device=device, + seg_mask_size=640, # Use 640 for Tracer B7 and 320 for U2Net + matting_mask_size=2048, + trimap_prob_threshold=231, + trimap_dilation=30, + trimap_erosion_iters=5, + fp16=True, + ) + + @torch.no_grad() + def __call__(self, image): + # image: [H, W, 3] array in [0, 255]. + image = Image.fromarray(image) + + image = self.interface([image])[0] + image = np.array(image) + + return image + +class BLIP2(): + def __init__(self, device='cuda'): + self.device = device + from transformers import AutoProcessor, Blip2ForConditionalGeneration + self.processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") + self.model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16).to(device) + + @torch.no_grad() + def __call__(self, image): + image = Image.fromarray(image) + inputs = self.processor(image, return_tensors="pt").to(self.device, torch.float16) + + generated_ids = self.model.generate(**inputs, max_new_tokens=20) + generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() + + return generated_text + + +class DPT(): + def __init__(self, task='depth', device='cuda'): + + self.task = task + self.device = device + + from dpt import DPTDepthModel + + if task == 'depth': + path = 'pretrained/omnidata/omnidata_dpt_depth_v2.ckpt' + self.model = DPTDepthModel(backbone='vitb_rn50_384') + self.aug = transforms.Compose([ + transforms.Resize((384, 384)), + transforms.ToTensor(), + transforms.Normalize(mean=0.5, std=0.5) + ]) + + else: # normal + path = 'pretrained/omnidata/omnidata_dpt_normal_v2.ckpt' + self.model = DPTDepthModel(backbone='vitb_rn50_384', num_channels=3) + self.aug = transforms.Compose([ + transforms.Resize((384, 384)), + transforms.ToTensor() + ]) + + # load model + checkpoint = torch.load(path, map_location='cpu') + if 'state_dict' in checkpoint: + state_dict = {} + for k, v in checkpoint['state_dict'].items(): + state_dict[k[6:]] = v + else: + state_dict = checkpoint + self.model.load_state_dict(state_dict) + self.model.eval().to(device) + + + @torch.no_grad() + def __call__(self, image): + # image: np.ndarray, uint8, [H, W, 3] + H, W = image.shape[:2] + image = Image.fromarray(image) + + image = self.aug(image).unsqueeze(0).to(self.device) + + if self.task == 'depth': + depth = self.model(image).clamp(0, 1) + depth = F.interpolate(depth.unsqueeze(1), size=(H, W), mode='bicubic', align_corners=False) + depth = depth.squeeze(1).cpu().numpy() + return depth + else: + normal = self.model(image).clamp(0, 1) + normal = F.interpolate(normal, size=(H, W), mode='bicubic', align_corners=False) + normal = normal.cpu().numpy() + return normal + + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('path', type=str, help="path to image (png, jpeg, etc.)") + parser.add_argument('--size', default=256, type=int, help="output resolution") + parser.add_argument('--border_ratio', default=0.2, type=float, help="output border ratio") + parser.add_argument('--recenter', type=bool, default=True, help="recenter, potentially not helpful for multiview zero123") + parser.add_argument('--dont_recenter', dest='recenter', action='store_false') + opt = parser.parse_args() + + out_dir = os.path.dirname(opt.path) + out_rgba = os.path.join(out_dir, os.path.basename(opt.path).split('.')[0] + '_rgba.png') + out_depth = os.path.join(out_dir, os.path.basename(opt.path).split('.')[0] + '_depth.png') + out_normal = os.path.join(out_dir, os.path.basename(opt.path).split('.')[0] + '_normal.png') + out_caption = os.path.join(out_dir, os.path.basename(opt.path).split('.')[0] + '_caption.txt') + + # load image + print(f'[INFO] loading image...') + image = cv2.imread(opt.path, cv2.IMREAD_UNCHANGED) + if image.shape[-1] == 4: + image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGB) + else: + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + # carve background + print(f'[INFO] background removal...') + carved_image = BackgroundRemoval()(image) # [H, W, 4] + mask = carved_image[..., -1] > 0 + + # predict depth + print(f'[INFO] depth estimation...') + dpt_depth_model = DPT(task='depth') + depth = dpt_depth_model(image)[0] + depth[mask] = (depth[mask] - depth[mask].min()) / (depth[mask].max() - depth[mask].min() + 1e-9) + depth[~mask] = 0 + depth = (depth * 255).astype(np.uint8) + del dpt_depth_model + + # predict normal + print(f'[INFO] normal estimation...') + dpt_normal_model = DPT(task='normal') + normal = dpt_normal_model(image)[0] + normal = (normal * 255).astype(np.uint8).transpose(1, 2, 0) + normal[~mask] = 0 + del dpt_normal_model + + # recenter + if opt.recenter: + print(f'[INFO] recenter...') + final_rgba = np.zeros((opt.size, opt.size, 4), dtype=np.uint8) + final_depth = np.zeros((opt.size, opt.size), dtype=np.uint8) + final_normal = np.zeros((opt.size, opt.size, 3), dtype=np.uint8) + + coords = np.nonzero(mask) + x_min, x_max = coords[0].min(), coords[0].max() + y_min, y_max = coords[1].min(), coords[1].max() + h = x_max - x_min + w = y_max - y_min + desired_size = int(opt.size * (1 - opt.border_ratio)) + scale = desired_size / max(h, w) + h2 = int(h * scale) + w2 = int(w * scale) + x2_min = (opt.size - h2) // 2 + x2_max = x2_min + h2 + y2_min = (opt.size - w2) // 2 + y2_max = y2_min + w2 + final_rgba[x2_min:x2_max, y2_min:y2_max] = cv2.resize(carved_image[x_min:x_max, y_min:y_max], (w2, h2), interpolation=cv2.INTER_AREA) + final_depth[x2_min:x2_max, y2_min:y2_max] = cv2.resize(depth[x_min:x_max, y_min:y_max], (w2, h2), interpolation=cv2.INTER_AREA) + final_normal[x2_min:x2_max, y2_min:y2_max] = cv2.resize(normal[x_min:x_max, y_min:y_max], (w2, h2), interpolation=cv2.INTER_AREA) + + else: + final_rgba = carved_image + final_depth = depth + final_normal = normal + + # write output + cv2.imwrite(out_rgba, cv2.cvtColor(final_rgba, cv2.COLOR_RGBA2BGRA)) + cv2.imwrite(out_depth, final_depth) + cv2.imwrite(out_normal, final_normal) + + # predict caption (it's too slow... use your brain instead) + # print(f'[INFO] captioning...') + # blip2 = BLIP2() + # caption = blip2(image) + # with open(out_caption, 'w') as f: + # f.write(caption) + diff --git a/stable-dreamfusion-3DPortrait/quality_improve.py b/stable-dreamfusion-3DPortrait/quality_improve.py new file mode 100644 index 0000000..5be2e46 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/quality_improve.py @@ -0,0 +1,27 @@ +# 190,140,22,129,104,113,133,164,15,31,72,135,83,149,85,169 +for name in [91,111,96,53,143]: + + print('cd F:/high_quality_3DPortraitGAN/exp/3DPortraitGAN-hierarchy-v2') + print('activate 3dportraitgan') + + cmd = f'python gen_quality_improve_data_from_triplane.py --data_dir=F:/high_quality_3DPortraitGAN/exp/stable-dreamfusion-hierarchy-v2/output/better_direction_prompt/{name} --grid=1x1 --network=F:/high_quality_3DPortraitGAN/exp/3DPortraitGAN-hierarchy-v2/models/model.pkl' + + print(cmd) + + + print('cd F:/high_quality_3DPortraitGAN/exp/stable-dreamfusion-hierarchy-v2') + print('activate ldm_3dgan_kaolin') + + cmd = f'python guidance/sdedit.py --data_dir F:/high_quality_3DPortraitGAN/exp/stable-dreamfusion-hierarchy-v2/output/better_direction_prompt/{name} --hf_key F:/high_quality_3DPortraitGAN/exp/stable-dreamfusion/pretrained/SG161222Realistic_Vision_V5.1_noVAE -H 512 -W 512 --seed 42' + + print(cmd) + + + print('cd F:/high_quality_3DPortraitGAN/exp/3DPortraitGAN-hierarchy-v2') + print('activate 3dportraitgan') + cmd = f'python optimize_trigrid.py --data_dir=F:/high_quality_3DPortraitGAN/exp/stable-dreamfusion-hierarchy-v2/output/better_direction_prompt/{name} --grid=1x1 --network=F:/high_quality_3DPortraitGAN/exp/3DPortraitGAN-hierarchy-v2/models/model.pkl' + print(cmd) + + + cmd = f'python gen_videos_shapes_from_optimized_triplane.py --data_dir=F:/high_quality_3DPortraitGAN/exp/stable-dreamfusion-hierarchy-v2/output/better_direction_prompt/{name} --grid=1x1 --network=F:/high_quality_3DPortraitGAN/exp/3DPortraitGAN-hierarchy-v2/models/model.pkl' + print(cmd) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/raymarching/__init__.py b/stable-dreamfusion-3DPortrait/raymarching/__init__.py new file mode 100644 index 0000000..26d3cc6 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/raymarching/__init__.py @@ -0,0 +1 @@ +from .raymarching import * \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/raymarching/backend.py b/stable-dreamfusion-3DPortrait/raymarching/backend.py new file mode 100644 index 0000000..7cc0d76 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/raymarching/backend.py @@ -0,0 +1,41 @@ +import os +from torch.utils.cpp_extension import load + +_src_path = os.path.dirname(os.path.abspath(__file__)) + +nvcc_flags = [ + '-O3', '-std=c++14', + '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', +] + +if os.name == "posix": + c_flags = ['-O3', '-std=c++14'] +elif os.name == "nt": + c_flags = ['/O2', '/std:c++17'] + + # find cl.exe + def find_cl_path(): + import glob + for program_files in [r"C:\\Program Files (x86)", r"C:\\Program Files"]: + for edition in ["Enterprise", "Professional", "BuildTools", "Community"]: + paths = sorted(glob.glob(r"%s\\Microsoft Visual Studio\\*\\%s\\VC\\Tools\\MSVC\\*\\bin\\Hostx64\\x64" % (program_files, edition)), reverse=True) + if paths: + return paths[0] + + # If cl.exe is not on path, try to find it. + if os.system("where cl.exe >nul 2>nul") != 0: + cl_path = find_cl_path() + if cl_path is None: + raise RuntimeError("Could not locate a supported Microsoft Visual C++ installation") + os.environ["PATH"] += ";" + cl_path + +_backend = load(name='_raymarching', + extra_cflags=c_flags, + extra_cuda_cflags=nvcc_flags, + sources=[os.path.join(_src_path, 'src', f) for f in [ + 'raymarching.cu', + 'bindings.cpp', + ]], + ) + +__all__ = ['_backend'] \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/raymarching/raymarching.py b/stable-dreamfusion-3DPortrait/raymarching/raymarching.py new file mode 100644 index 0000000..760d730 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/raymarching/raymarching.py @@ -0,0 +1,398 @@ +import numpy as np +import time + +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.cuda.amp import custom_bwd, custom_fwd + +# lazy building: +# `import raymarching` will not immediately build the extension, only if you actually call any functions. + +BACKEND = None + +def get_backend(): + global BACKEND + + if BACKEND is None: + try: + import _raymarching as _backend + except ImportError: + from .backend import _backend + + BACKEND = _backend + + return BACKEND + +# ---------------------------------------- +# utils +# ---------------------------------------- + +class _near_far_from_aabb(Function): + @staticmethod + @custom_fwd(cast_inputs=torch.float32) + def forward(ctx, rays_o, rays_d, aabb, min_near=0.2): + ''' near_far_from_aabb, CUDA implementation + Calculate rays' intersection time (near and far) with aabb + Args: + rays_o: float, [N, 3] + rays_d: float, [N, 3] + aabb: float, [6], (xmin, ymin, zmin, xmax, ymax, zmax) + min_near: float, scalar + Returns: + nears: float, [N] + fars: float, [N] + ''' + if not rays_o.is_cuda: rays_o = rays_o.cuda() + if not rays_d.is_cuda: rays_d = rays_d.cuda() + + rays_o = rays_o.contiguous().view(-1, 3) + rays_d = rays_d.contiguous().view(-1, 3) + + N = rays_o.shape[0] # num rays + + nears = torch.empty(N, dtype=rays_o.dtype, device=rays_o.device) + fars = torch.empty(N, dtype=rays_o.dtype, device=rays_o.device) + + get_backend().near_far_from_aabb(rays_o, rays_d, aabb, N, min_near, nears, fars) + + return nears, fars + +near_far_from_aabb = _near_far_from_aabb.apply + + +class _sph_from_ray(Function): + @staticmethod + @custom_fwd(cast_inputs=torch.float32) + def forward(ctx, rays_o, rays_d, radius): + ''' sph_from_ray, CUDA implementation + get spherical coordinate on the background sphere from rays. + Assume rays_o are inside the Sphere(radius). + Args: + rays_o: [N, 3] + rays_d: [N, 3] + radius: scalar, float + Return: + coords: [N, 2], in [-1, 1], theta and phi on a sphere. (further-surface) + ''' + if not rays_o.is_cuda: rays_o = rays_o.cuda() + if not rays_d.is_cuda: rays_d = rays_d.cuda() + + rays_o = rays_o.contiguous().view(-1, 3) + rays_d = rays_d.contiguous().view(-1, 3) + + N = rays_o.shape[0] # num rays + + coords = torch.empty(N, 2, dtype=rays_o.dtype, device=rays_o.device) + + get_backend().sph_from_ray(rays_o, rays_d, radius, N, coords) + + return coords + +sph_from_ray = _sph_from_ray.apply + + +class _morton3D(Function): + @staticmethod + def forward(ctx, coords): + ''' morton3D, CUDA implementation + Args: + coords: [N, 3], int32, in [0, 128) (for some reason there is no uint32 tensor in torch...) + TODO: check if the coord range is valid! (current 128 is safe) + Returns: + indices: [N], int32, in [0, 128^3) + + ''' + if not coords.is_cuda: coords = coords.cuda() + + N = coords.shape[0] + + indices = torch.empty(N, dtype=torch.int32, device=coords.device) + + get_backend().morton3D(coords.int(), N, indices) + + return indices + +morton3D = _morton3D.apply + +class _morton3D_invert(Function): + @staticmethod + def forward(ctx, indices): + ''' morton3D_invert, CUDA implementation + Args: + indices: [N], int32, in [0, 128^3) + Returns: + coords: [N, 3], int32, in [0, 128) + + ''' + if not indices.is_cuda: indices = indices.cuda() + + N = indices.shape[0] + + coords = torch.empty(N, 3, dtype=torch.int32, device=indices.device) + + get_backend().morton3D_invert(indices.int(), N, coords) + + return coords + +morton3D_invert = _morton3D_invert.apply + + +class _packbits(Function): + @staticmethod + @custom_fwd(cast_inputs=torch.float32) + def forward(ctx, grid, thresh, bitfield=None): + ''' packbits, CUDA implementation + Pack up the density grid into a bit field to accelerate ray marching. + Args: + grid: float, [C, H * H * H], assume H % 2 == 0 + thresh: float, threshold + Returns: + bitfield: uint8, [C, H * H * H / 8] + ''' + if not grid.is_cuda: grid = grid.cuda() + grid = grid.contiguous() + + C = grid.shape[0] + H3 = grid.shape[1] + N = C * H3 // 8 + + if bitfield is None: + bitfield = torch.empty(N, dtype=torch.uint8, device=grid.device) + + get_backend().packbits(grid, N, thresh, bitfield) + + return bitfield + +packbits = _packbits.apply + + +class _flatten_rays(Function): + @staticmethod + def forward(ctx, rays, M): + ''' flatten rays + Args: + rays: [N, 2], all rays' (point_offset, point_count), + M: scalar, int, count of points (we cannot get this info from rays unfortunately...) + Returns: + res: [M], flattened ray index. + ''' + if not rays.is_cuda: rays = rays.cuda() + rays = rays.contiguous() + + N = rays.shape[0] + + res = torch.zeros(M, dtype=torch.int, device=rays.device) + + get_backend().flatten_rays(rays, N, M, res) + + return res + +flatten_rays = _flatten_rays.apply + +# ---------------------------------------- +# train functions +# ---------------------------------------- + +class _march_rays_train(Function): + @staticmethod + @custom_fwd(cast_inputs=torch.float32) + def forward(ctx, rays_o, rays_d, bound, density_bitfield, C, H, nears, fars, perturb=False, dt_gamma=0, max_steps=1024, contract=False): + ''' march rays to generate points (forward only) + Args: + rays_o/d: float, [N, 3] + bound: float, scalar + density_bitfield: uint8: [CHHH // 8] + C: int + H: int + nears/fars: float, [N] + step_counter: int32, (2), used to count the actual number of generated points. + mean_count: int32, estimated mean steps to accelerate training. (but will randomly drop rays if the actual point count exceeded this threshold.) + perturb: bool + align: int, pad output so its size is dividable by align, set to -1 to disable. + force_all_rays: bool, ignore step_counter and mean_count, always calculate all rays. Useful if rendering the whole image, instead of some rays. + dt_gamma: float, called cone_angle in instant-ngp, exponentially accelerate ray marching if > 0. (very significant effect, but generally lead to worse performance) + max_steps: int, max number of sampled points along each ray, also affect min_stepsize. + Returns: + xyzs: float, [M, 3], all generated points' coords. (all rays concated, need to use `rays` to extract points belonging to each ray) + dirs: float, [M, 3], all generated points' view dirs. + ts: float, [M, 2], all generated points' ts. + rays: int32, [N, 2], all rays' (point_offset, point_count), e.g., xyzs[rays[i, 0]:(rays[i, 0] + rays[i, 1])] --> points belonging to rays[i, 0] + ''' + + if not rays_o.is_cuda: rays_o = rays_o.cuda() + if not rays_d.is_cuda: rays_d = rays_d.cuda() + if not density_bitfield.is_cuda: density_bitfield = density_bitfield.cuda() + + rays_o = rays_o.float().contiguous().view(-1, 3) + rays_d = rays_d.float().contiguous().view(-1, 3) + density_bitfield = density_bitfield.contiguous() + + N = rays_o.shape[0] # num rays + + step_counter = torch.zeros(1, dtype=torch.int32, device=rays_o.device) # point counter, ray counter + + if perturb: + noises = torch.rand(N, dtype=rays_o.dtype, device=rays_o.device) + else: + noises = torch.zeros(N, dtype=rays_o.dtype, device=rays_o.device) + + # first pass: write rays, get total number of points M to render + rays = torch.empty(N, 2, dtype=torch.int32, device=rays_o.device) # id, offset, num_steps + get_backend().march_rays_train(rays_o, rays_d, density_bitfield, bound, contract, dt_gamma, max_steps, N, C, H, nears, fars, None, None, None, rays, step_counter, noises) + + # allocate based on M + M = step_counter.item() + # print(M, N) + # print(rays[:, 0].max()) + + xyzs = torch.zeros(M, 3, dtype=rays_o.dtype, device=rays_o.device) + dirs = torch.zeros(M, 3, dtype=rays_o.dtype, device=rays_o.device) + ts = torch.zeros(M, 2, dtype=rays_o.dtype, device=rays_o.device) + + # second pass: write outputs + get_backend().march_rays_train(rays_o, rays_d, density_bitfield, bound, contract, dt_gamma, max_steps, N, C, H, nears, fars, xyzs, dirs, ts, rays, step_counter, noises) + + return xyzs, dirs, ts, rays + +march_rays_train = _march_rays_train.apply + + +class _composite_rays_train(Function): + @staticmethod + @custom_fwd(cast_inputs=torch.float32) + def forward(ctx, sigmas, rgbs, ts, rays, T_thresh=1e-4, binarize=False): + ''' composite rays' rgbs, according to the ray marching formula. + Args: + rgbs: float, [M, 3] + sigmas: float, [M,] + ts: float, [M, 2] + rays: int32, [N, 3] + Returns: + weights: float, [M] + weights_sum: float, [N,], the alpha channel + depth: float, [N, ], the Depth + image: float, [N, 3], the RGB channel (after multiplying alpha!) + ''' + + sigmas = sigmas.float().contiguous() + rgbs = rgbs.float().contiguous() + + M = sigmas.shape[0] + N = rays.shape[0] + + weights = torch.zeros(M, dtype=sigmas.dtype, device=sigmas.device) # may leave unmodified, so init with 0 + weights_sum = torch.empty(N, dtype=sigmas.dtype, device=sigmas.device) + + depth = torch.empty(N, dtype=sigmas.dtype, device=sigmas.device) + image = torch.empty(N, 3, dtype=sigmas.dtype, device=sigmas.device) + + get_backend().composite_rays_train_forward(sigmas, rgbs, ts, rays, M, N, T_thresh, binarize, weights, weights_sum, depth, image) + + ctx.save_for_backward(sigmas, rgbs, ts, rays, weights_sum, depth, image) + ctx.dims = [M, N, T_thresh, binarize] + + return weights, weights_sum, depth, image + + @staticmethod + @custom_bwd + def backward(ctx, grad_weights, grad_weights_sum, grad_depth, grad_image): + + grad_weights = grad_weights.contiguous() + grad_weights_sum = grad_weights_sum.contiguous() + grad_depth = grad_depth.contiguous() + grad_image = grad_image.contiguous() + + sigmas, rgbs, ts, rays, weights_sum, depth, image = ctx.saved_tensors + M, N, T_thresh, binarize = ctx.dims + + grad_sigmas = torch.zeros_like(sigmas) + grad_rgbs = torch.zeros_like(rgbs) + + get_backend().composite_rays_train_backward(grad_weights, grad_weights_sum, grad_depth, grad_image, sigmas, rgbs, ts, rays, weights_sum, depth, image, M, N, T_thresh, binarize, grad_sigmas, grad_rgbs) + + return grad_sigmas, grad_rgbs, None, None, None, None + + +composite_rays_train = _composite_rays_train.apply + +# ---------------------------------------- +# infer functions +# ---------------------------------------- + +class _march_rays(Function): + @staticmethod + @custom_fwd(cast_inputs=torch.float32) + def forward(ctx, n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, bound, density_bitfield, C, H, near, far, perturb=False, dt_gamma=0, max_steps=1024, contract=False): + ''' march rays to generate points (forward only, for inference) + Args: + n_alive: int, number of alive rays + n_step: int, how many steps we march + rays_alive: int, [N], the alive rays' IDs in N (N >= n_alive, but we only use first n_alive) + rays_t: float, [N], the alive rays' time, we only use the first n_alive. + rays_o/d: float, [N, 3] + bound: float, scalar + density_bitfield: uint8: [CHHH // 8] + C: int + H: int + nears/fars: float, [N] + align: int, pad output so its size is dividable by align, set to -1 to disable. + perturb: bool/int, int > 0 is used as the random seed. + dt_gamma: float, called cone_angle in instant-ngp, exponentially accelerate ray marching if > 0. (very significant effect, but generally lead to worse performance) + max_steps: int, max number of sampled points along each ray, also affect min_stepsize. + Returns: + xyzs: float, [n_alive * n_step, 3], all generated points' coords + dirs: float, [n_alive * n_step, 3], all generated points' view dirs. + ts: float, [n_alive * n_step, 2], all generated points' ts + ''' + + if not rays_o.is_cuda: rays_o = rays_o.cuda() + if not rays_d.is_cuda: rays_d = rays_d.cuda() + + rays_o = rays_o.float().contiguous().view(-1, 3) + rays_d = rays_d.float().contiguous().view(-1, 3) + + M = n_alive * n_step + + xyzs = torch.zeros(M, 3, dtype=rays_o.dtype, device=rays_o.device) + dirs = torch.zeros(M, 3, dtype=rays_o.dtype, device=rays_o.device) + ts = torch.zeros(M, 2, dtype=rays_o.dtype, device=rays_o.device) # 2 vals, one for rgb, one for depth + + if perturb: + # torch.manual_seed(perturb) # test_gui uses spp index as seed + noises = torch.rand(n_alive, dtype=rays_o.dtype, device=rays_o.device) + else: + noises = torch.zeros(n_alive, dtype=rays_o.dtype, device=rays_o.device) + + get_backend().march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, bound, contract, dt_gamma, max_steps, C, H, density_bitfield, near, far, xyzs, dirs, ts, noises) + + return xyzs, dirs, ts + +march_rays = _march_rays.apply + + +class _composite_rays(Function): + @staticmethod + @custom_fwd(cast_inputs=torch.float32) # need to cast sigmas & rgbs to float + def forward(ctx, n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh=1e-2, binarize=False): + ''' composite rays' rgbs, according to the ray marching formula. (for inference) + Args: + n_alive: int, number of alive rays + n_step: int, how many steps we march + rays_alive: int, [n_alive], the alive rays' IDs in N (N >= n_alive) + rays_t: float, [N], the alive rays' time + sigmas: float, [n_alive * n_step,] + rgbs: float, [n_alive * n_step, 3] + ts: float, [n_alive * n_step, 2] + In-place Outputs: + weights_sum: float, [N,], the alpha channel + depth: float, [N,], the depth value + image: float, [N, 3], the RGB channel (after multiplying alpha!) + ''' + sigmas = sigmas.float().contiguous() + rgbs = rgbs.float().contiguous() + get_backend().composite_rays(n_alive, n_step, T_thresh, binarize, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image) + return tuple() + + +composite_rays = _composite_rays.apply \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/raymarching/setup.py b/stable-dreamfusion-3DPortrait/raymarching/setup.py new file mode 100644 index 0000000..4d32fa7 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/raymarching/setup.py @@ -0,0 +1,63 @@ +import os +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +_src_path = os.path.dirname(os.path.abspath(__file__)) + +nvcc_flags = [ + '-O3', '-std=c++14', + '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', +] + +if os.name == "posix": + c_flags = ['-O3', '-std=c++14'] +elif os.name == "nt": + c_flags = ['/O2', '/std:c++17'] + + # find cl.exe + def find_cl_path(): + import glob + for program_files in [r"C:\\Program Files (x86)", r"C:\\Program Files"]: + for edition in ["Enterprise", "Professional", "BuildTools", "Community"]: + paths = sorted(glob.glob(r"%s\\Microsoft Visual Studio\\*\\%s\\VC\\Tools\\MSVC\\*\\bin\\Hostx64\\x64" % (program_files, edition)), reverse=True) + if paths: + return paths[0] + + # If cl.exe is not on path, try to find it. + if os.system("where cl.exe >nul 2>nul") != 0: + cl_path = find_cl_path() + if cl_path is None: + raise RuntimeError("Could not locate a supported Microsoft Visual C++ installation") + os.environ["PATH"] += ";" + cl_path + +''' +Usage: + +python setup.py build_ext --inplace # build extensions locally, do not install (only can be used from the parent directory) + +python setup.py install # build extensions and install (copy) to PATH. +pip install . # ditto but better (e.g., dependency & metadata handling) + +python setup.py develop # build extensions and install (symbolic) to PATH. +pip install -e . # ditto but better (e.g., dependency & metadata handling) + +''' +setup( + name='raymarching', # package name, import this to use python API + ext_modules=[ + CUDAExtension( + name='_raymarching', # extension name, import this to use CUDA API + sources=[os.path.join(_src_path, 'src', f) for f in [ + 'raymarching.cu', + 'bindings.cpp', + ]], + extra_compile_args={ + 'cxx': c_flags, + 'nvcc': nvcc_flags, + } + ), + ], + cmdclass={ + 'build_ext': BuildExtension, + } +) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/raymarching/src/bindings.cpp b/stable-dreamfusion-3DPortrait/raymarching/src/bindings.cpp new file mode 100644 index 0000000..eb8f122 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/raymarching/src/bindings.cpp @@ -0,0 +1,20 @@ +#include + +#include "raymarching.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + // utils + m.def("flatten_rays", &flatten_rays, "flatten_rays (CUDA)"); + m.def("packbits", &packbits, "packbits (CUDA)"); + m.def("near_far_from_aabb", &near_far_from_aabb, "near_far_from_aabb (CUDA)"); + m.def("sph_from_ray", &sph_from_ray, "sph_from_ray (CUDA)"); + m.def("morton3D", &morton3D, "morton3D (CUDA)"); + m.def("morton3D_invert", &morton3D_invert, "morton3D_invert (CUDA)"); + // train + m.def("march_rays_train", &march_rays_train, "march_rays_train (CUDA)"); + m.def("composite_rays_train_forward", &composite_rays_train_forward, "composite_rays_train_forward (CUDA)"); + m.def("composite_rays_train_backward", &composite_rays_train_backward, "composite_rays_train_backward (CUDA)"); + // infer + m.def("march_rays", &march_rays, "march rays (CUDA)"); + m.def("composite_rays", &composite_rays, "composite rays (CUDA)"); +} \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/raymarching/src/raymarching.cu b/stable-dreamfusion-3DPortrait/raymarching/src/raymarching.cu new file mode 100644 index 0000000..0292f1c --- /dev/null +++ b/stable-dreamfusion-3DPortrait/raymarching/src/raymarching.cu @@ -0,0 +1,934 @@ +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be a contiguous tensor") +#define CHECK_IS_INT(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, #x " must be an int tensor") +#define CHECK_IS_FLOATING(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Float || x.scalar_type() == at::ScalarType::Half || x.scalar_type() == at::ScalarType::Double, #x " must be a floating tensor") + + +inline constexpr __device__ float SQRT3() { return 1.7320508075688772f; } +inline constexpr __device__ float RSQRT3() { return 0.5773502691896258f; } +inline constexpr __device__ float PI() { return 3.141592653589793f; } +inline constexpr __device__ float RPI() { return 0.3183098861837907f; } + + +template +inline __host__ __device__ T div_round_up(T val, T divisor) { + return (val + divisor - 1) / divisor; +} + +inline __host__ __device__ float signf(const float x) { + return copysignf(1.0, x); +} + +inline __host__ __device__ float clamp(const float x, const float min, const float max) { + return fminf(max, fmaxf(min, x)); +} + +inline __host__ __device__ void swapf(float& a, float& b) { + float c = a; a = b; b = c; +} + +inline __device__ int mip_from_pos(const float x, const float y, const float z, const float max_cascade) { + const float mx = fmaxf(fabsf(x), fmaxf(fabsf(y), fabsf(z))); + int exponent; + frexpf(mx, &exponent); // [0, 0.5) --> -1, [0.5, 1) --> 0, [1, 2) --> 1, [2, 4) --> 2, ... + return fminf(max_cascade - 1, fmaxf(0, exponent)); +} + +inline __device__ int mip_from_dt(const float dt, const float H, const float max_cascade) { + const float mx = dt * H * 0.5; + int exponent; + frexpf(mx, &exponent); + return fminf(max_cascade - 1, fmaxf(0, exponent)); +} + +inline __host__ __device__ uint32_t __expand_bits(uint32_t v) +{ + v = (v * 0x00010001u) & 0xFF0000FFu; + v = (v * 0x00000101u) & 0x0F00F00Fu; + v = (v * 0x00000011u) & 0xC30C30C3u; + v = (v * 0x00000005u) & 0x49249249u; + return v; +} + +inline __host__ __device__ uint32_t __morton3D(uint32_t x, uint32_t y, uint32_t z) +{ + uint32_t xx = __expand_bits(x); + uint32_t yy = __expand_bits(y); + uint32_t zz = __expand_bits(z); + return xx | (yy << 1) | (zz << 2); +} + +inline __host__ __device__ uint32_t __morton3D_invert(uint32_t x) +{ + x = x & 0x49249249; + x = (x | (x >> 2)) & 0xc30c30c3; + x = (x | (x >> 4)) & 0x0f00f00f; + x = (x | (x >> 8)) & 0xff0000ff; + x = (x | (x >> 16)) & 0x0000ffff; + return x; +} + + +//////////////////////////////////////////////////// +///////////// utils ///////////// +//////////////////////////////////////////////////// + +// rays_o/d: [N, 3] +// nears/fars: [N] +// scalar_t should always be float in use. +template +__global__ void kernel_near_far_from_aabb( + const scalar_t * __restrict__ rays_o, + const scalar_t * __restrict__ rays_d, + const scalar_t * __restrict__ aabb, + const uint32_t N, + const float min_near, + scalar_t * nears, scalar_t * fars +) { + // parallel per ray + const uint32_t n = threadIdx.x + blockIdx.x * blockDim.x; + if (n >= N) return; + + // locate + rays_o += n * 3; + rays_d += n * 3; + + const float ox = rays_o[0], oy = rays_o[1], oz = rays_o[2]; + const float dx = rays_d[0], dy = rays_d[1], dz = rays_d[2]; + const float rdx = 1 / dx, rdy = 1 / dy, rdz = 1 / dz; + + // get near far (assume cube scene) + float near = (aabb[0] - ox) * rdx; + float far = (aabb[3] - ox) * rdx; + if (near > far) swapf(near, far); + + float near_y = (aabb[1] - oy) * rdy; + float far_y = (aabb[4] - oy) * rdy; + if (near_y > far_y) swapf(near_y, far_y); + + if (near > far_y || near_y > far) { + nears[n] = fars[n] = std::numeric_limits::max(); + return; + } + + if (near_y > near) near = near_y; + if (far_y < far) far = far_y; + + float near_z = (aabb[2] - oz) * rdz; + float far_z = (aabb[5] - oz) * rdz; + if (near_z > far_z) swapf(near_z, far_z); + + if (near > far_z || near_z > far) { + nears[n] = fars[n] = std::numeric_limits::max(); + return; + } + + if (near_z > near) near = near_z; + if (far_z < far) far = far_z; + + if (near < min_near) near = min_near; + + nears[n] = near; + fars[n] = far; +} + + +void near_far_from_aabb(const at::Tensor rays_o, const at::Tensor rays_d, const at::Tensor aabb, const uint32_t N, const float min_near, at::Tensor nears, at::Tensor fars) { + + static constexpr uint32_t N_THREAD = 128; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + rays_o.scalar_type(), "near_far_from_aabb", ([&] { + kernel_near_far_from_aabb<<>>(rays_o.data_ptr(), rays_d.data_ptr(), aabb.data_ptr(), N, min_near, nears.data_ptr(), fars.data_ptr()); + })); +} + + +// rays_o/d: [N, 3] +// radius: float +// coords: [N, 2] +template +__global__ void kernel_sph_from_ray( + const scalar_t * __restrict__ rays_o, + const scalar_t * __restrict__ rays_d, + const float radius, + const uint32_t N, + scalar_t * coords +) { + // parallel per ray + const uint32_t n = threadIdx.x + blockIdx.x * blockDim.x; + if (n >= N) return; + + // locate + rays_o += n * 3; + rays_d += n * 3; + coords += n * 2; + + const float ox = rays_o[0], oy = rays_o[1], oz = rays_o[2]; + const float dx = rays_d[0], dy = rays_d[1], dz = rays_d[2]; + // const float rdx = 1 / dx, rdy = 1 / dy, rdz = 1 / dz; + + // solve t from || o + td || = radius + const float A = dx * dx + dy * dy + dz * dz; + const float B = ox * dx + oy * dy + oz * dz; // in fact B / 2 + const float C = ox * ox + oy * oy + oz * oz - radius * radius; + + const float t = (- B + sqrtf(B * B - A * C)) / A; // always use the larger solution (positive) + + // solve theta, phi (assume y is the up axis) + const float x = ox + t * dx, y = oy + t * dy, z = oz + t * dz; + const float theta = atan2(sqrtf(x * x + z * z), y); // [0, PI) + const float phi = atan2(z, x); // [-PI, PI) + + // normalize to [-1, 1] + coords[0] = 2 * theta * RPI() - 1; + coords[1] = phi * RPI(); +} + + +void sph_from_ray(const at::Tensor rays_o, const at::Tensor rays_d, const float radius, const uint32_t N, at::Tensor coords) { + + static constexpr uint32_t N_THREAD = 128; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + rays_o.scalar_type(), "sph_from_ray", ([&] { + kernel_sph_from_ray<<>>(rays_o.data_ptr(), rays_d.data_ptr(), radius, N, coords.data_ptr()); + })); +} + + +// coords: int32, [N, 3] +// indices: int32, [N] +__global__ void kernel_morton3D( + const int * __restrict__ coords, + const uint32_t N, + int * indices +) { + // parallel + const uint32_t n = threadIdx.x + blockIdx.x * blockDim.x; + if (n >= N) return; + + // locate + coords += n * 3; + indices[n] = __morton3D(coords[0], coords[1], coords[2]); +} + + +void morton3D(const at::Tensor coords, const uint32_t N, at::Tensor indices) { + static constexpr uint32_t N_THREAD = 128; + kernel_morton3D<<>>(coords.data_ptr(), N, indices.data_ptr()); +} + + +// indices: int32, [N] +// coords: int32, [N, 3] +__global__ void kernel_morton3D_invert( + const int * __restrict__ indices, + const uint32_t N, + int * coords +) { + // parallel + const uint32_t n = threadIdx.x + blockIdx.x * blockDim.x; + if (n >= N) return; + + // locate + coords += n * 3; + + const int ind = indices[n]; + + coords[0] = __morton3D_invert(ind >> 0); + coords[1] = __morton3D_invert(ind >> 1); + coords[2] = __morton3D_invert(ind >> 2); +} + + +void morton3D_invert(const at::Tensor indices, const uint32_t N, at::Tensor coords) { + static constexpr uint32_t N_THREAD = 128; + kernel_morton3D_invert<<>>(indices.data_ptr(), N, coords.data_ptr()); +} + + +// grid: float, [C, H, H, H] +// N: int, C * H * H * H / 8 +// density_thresh: float +// bitfield: uint8, [N] +template +__global__ void kernel_packbits( + const scalar_t * __restrict__ grid, + const uint32_t N, + const float density_thresh, + uint8_t * bitfield +) { + // parallel per byte + const uint32_t n = threadIdx.x + blockIdx.x * blockDim.x; + if (n >= N) return; + + // locate + grid += n * 8; + + uint8_t bits = 0; + + #pragma unroll + for (uint8_t i = 0; i < 8; i++) { + bits |= (grid[i] > density_thresh) ? ((uint8_t)1 << i) : 0; + } + + bitfield[n] = bits; +} + + +void packbits(const at::Tensor grid, const uint32_t N, const float density_thresh, at::Tensor bitfield) { + + static constexpr uint32_t N_THREAD = 128; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grid.scalar_type(), "packbits", ([&] { + kernel_packbits<<>>(grid.data_ptr(), N, density_thresh, bitfield.data_ptr()); + })); +} + + +__global__ void kernel_flatten_rays( + const int * __restrict__ rays, + const uint32_t N, const uint32_t M, + int * res +) { + // parallel per ray + const uint32_t n = threadIdx.x + blockIdx.x * blockDim.x; + if (n >= N) return; + + // locate + uint32_t offset = rays[n * 2]; + uint32_t num_steps = rays[n * 2 + 1]; + + // write to res + res += offset; + for (int i = 0; i < num_steps; i++) res[i] = n; +} + +void flatten_rays(const at::Tensor rays, const uint32_t N, const uint32_t M, at::Tensor res) { + + static constexpr uint32_t N_THREAD = 128; + + kernel_flatten_rays<<>>(rays.data_ptr(), N, M, res.data_ptr()); +} + +//////////////////////////////////////////////////// +///////////// training ///////////// +//////////////////////////////////////////////////// + +// rays_o/d: [N, 3] +// grid: [CHHH / 8] +// xyzs, dirs, ts: [M, 3], [M, 3], [M, 2] +// dirs: [M, 3] +// rays: [N, 3], idx, offset, num_steps +template +__global__ void kernel_march_rays_train( + const scalar_t * __restrict__ rays_o, + const scalar_t * __restrict__ rays_d, + const uint8_t * __restrict__ grid, + const float bound, const bool contract, + const float dt_gamma, const uint32_t max_steps, + const uint32_t N, const uint32_t C, const uint32_t H, + const scalar_t* __restrict__ nears, + const scalar_t* __restrict__ fars, + scalar_t * xyzs, scalar_t * dirs, scalar_t * ts, + int * rays, + int * counter, + const scalar_t* __restrict__ noises +) { + // parallel per ray + const uint32_t n = threadIdx.x + blockIdx.x * blockDim.x; + if (n >= N) return; + + // is first pass running. + const bool first_pass = (xyzs == nullptr); + + // locate + rays_o += n * 3; + rays_d += n * 3; + rays += n * 2; + + uint32_t num_steps = max_steps; + + if (!first_pass) { + uint32_t point_index = rays[0]; + num_steps = rays[1]; + xyzs += point_index * 3; + dirs += point_index * 3; + ts += point_index * 2; + } + + // ray marching + const float ox = rays_o[0], oy = rays_o[1], oz = rays_o[2]; + const float dx = rays_d[0], dy = rays_d[1], dz = rays_d[2]; + const float rdx = 1 / dx, rdy = 1 / dy, rdz = 1 / dz; + const float rH = 1 / (float)H; + const float H3 = H * H * H; + + const float near = nears[n]; + const float far = fars[n]; + const float noise = noises[n]; + + const float dt_min = 2 * SQRT3() / max_steps; + const float dt_max = 2 * SQRT3() * bound / H; + // const float dt_max = 1e10f; + + float t0 = near; + t0 += clamp(t0 * dt_gamma, dt_min, dt_max) * noise; + float t = t0; + uint32_t step = 0; + + //if (t < far) printf("valid ray %d t=%f near=%f far=%f \n", n, t, near, far); + + while (t < far && step < num_steps) { + // current point + const float x = clamp(ox + t * dx, -bound, bound); + const float y = clamp(oy + t * dy, -bound, bound); + const float z = clamp(oz + t * dz, -bound, bound); + + float dt = clamp(t * dt_gamma, dt_min, dt_max); + + // get mip level + const int level = max(mip_from_pos(x, y, z, C), mip_from_dt(dt, H, C)); // range in [0, C - 1] + + const float mip_bound = fminf(scalbnf(1.0f, level), bound); + const float mip_rbound = 1 / mip_bound; + + // contraction + float cx = x, cy = y, cz = z; + const float mag = fmaxf(fabsf(x), fmaxf(fabsf(y), fabsf(z))); + if (contract && mag > 1) { + // L-INF norm + const float Linf_scale = (2 - 1 / mag) / mag; + cx *= Linf_scale; + cy *= Linf_scale; + cz *= Linf_scale; + } + + // convert to nearest grid position + const int nx = clamp(0.5 * (cx * mip_rbound + 1) * H, 0.0f, (float)(H - 1)); + const int ny = clamp(0.5 * (cy * mip_rbound + 1) * H, 0.0f, (float)(H - 1)); + const int nz = clamp(0.5 * (cz * mip_rbound + 1) * H, 0.0f, (float)(H - 1)); + + const uint32_t index = level * H3 + __morton3D(nx, ny, nz); + const bool occ = grid[index / 8] & (1 << (index % 8)); + + // if occpuied, advance a small step, and write to output + //if (n == 0) printf("t=%f density=%f vs thresh=%f step=%d\n", t, density, density_thresh, step); + + if (occ) { + step++; + t += dt; + if (!first_pass) { + xyzs[0] = cx; // write contracted coordinates! + xyzs[1] = cy; + xyzs[2] = cz; + dirs[0] = dx; + dirs[1] = dy; + dirs[2] = dz; + ts[0] = t; + ts[1] = dt; + xyzs += 3; + dirs += 3; + ts += 2; + } + // contraction case: cannot apply voxel skipping. + } else if (contract && mag > 1) { + t += dt; + // else, skip a large step (basically skip a voxel grid) + } else { + // calc distance to next voxel + const float tx = (((nx + 0.5f + 0.5f * signf(dx)) * rH * 2 - 1) * mip_bound - cx) * rdx; + const float ty = (((ny + 0.5f + 0.5f * signf(dy)) * rH * 2 - 1) * mip_bound - cy) * rdy; + const float tz = (((nz + 0.5f + 0.5f * signf(dz)) * rH * 2 - 1) * mip_bound - cz) * rdz; + + const float tt = t + fmaxf(0.0f, fminf(tx, fminf(ty, tz))); + // step until next voxel + do { + dt = clamp(t * dt_gamma, dt_min, dt_max); + t += dt; + } while (t < tt); + } + } + + //printf("[n=%d] step=%d, near=%f, far=%f, dt=%f, num_steps=%f\n", n, step, near, far, dt_min, (far - near) / dt_min); + + // write rays + if (first_pass) { + uint32_t point_index = atomicAdd(counter, step); + rays[0] = point_index; + rays[1] = step; + } +} + +void march_rays_train(const at::Tensor rays_o, const at::Tensor rays_d, const at::Tensor grid, const float bound, const bool contract, const float dt_gamma, const uint32_t max_steps, const uint32_t N, const uint32_t C, const uint32_t H, const at::Tensor nears, const at::Tensor fars, at::optional xyzs, at::optional dirs, at::optional ts, at::Tensor rays, at::Tensor counter, at::Tensor noises) { + + static constexpr uint32_t N_THREAD = 128; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + rays_o.scalar_type(), "march_rays_train", ([&] { + kernel_march_rays_train<<>>(rays_o.data_ptr(), rays_d.data_ptr(), grid.data_ptr(), bound, contract, dt_gamma, max_steps, N, C, H, nears.data_ptr(), fars.data_ptr(), + xyzs.has_value() ? xyzs.value().data_ptr() : nullptr, + dirs.has_value() ? dirs.value().data_ptr() : nullptr, + ts.has_value() ? ts.value().data_ptr() : nullptr, + rays.data_ptr(), counter.data_ptr(), noises.data_ptr()); + })); +} + + +// sigmas: [M] +// rgbs: [M, 3] +// ts: [M, 2] +// rays: [N, 2], offset, num_steps +// weights: [M] +// weights_sum: [N], final pixel alpha +// depth: [N,] +// image: [N, 3] +template +__global__ void kernel_composite_rays_train_forward( + const scalar_t * __restrict__ sigmas, + const scalar_t * __restrict__ rgbs, + const scalar_t * __restrict__ ts, + const int * __restrict__ rays, + const uint32_t M, const uint32_t N, const float T_thresh, const bool binarize, + scalar_t * weights, + scalar_t * weights_sum, + scalar_t * depth, + scalar_t * image +) { + // parallel per ray + const uint32_t n = threadIdx.x + blockIdx.x * blockDim.x; + if (n >= N) return; + + // locate + uint32_t offset = rays[n * 2]; + uint32_t num_steps = rays[n * 2 + 1]; + + // empty ray, or ray that exceed max step count. + if (num_steps == 0 || offset + num_steps > M) { + weights_sum[n] = 0; + depth[n] = 0; + image[n * 3] = 0; + image[n * 3 + 1] = 0; + image[n * 3 + 2] = 0; + return; + } + + ts += offset * 2; + weights += offset; + sigmas += offset; + rgbs += offset * 3; + + // accumulate + uint32_t step = 0; + + float T = 1.0f; + float r = 0, g = 0, b = 0, ws = 0, d = 0; + + while (step < num_steps) { + + const float real_alpha = 1.0f - __expf(- sigmas[0] * ts[1]); + const float alpha = binarize ? (real_alpha > 0.5 ? 1.0 : 0.0) : real_alpha; + const float weight = alpha * T; + + weights[0] = weight; + + r += weight * rgbs[0]; + g += weight * rgbs[1]; + b += weight * rgbs[2]; + ws += weight; + d += weight * ts[0]; + + T *= 1.0f - alpha; + + // minimal remained transmittence + if (T < T_thresh) break; + + //printf("[n=%d] num_steps=%d, alpha=%f, w=%f, T=%f, sum_dt=%f, d=%f\n", n, step, alpha, weight, T, sum_delta, d); + + // locate + weights++; + sigmas++; + rgbs += 3; + ts += 2; + + step++; + } + + //printf("[n=%d] rgb=(%f, %f, %f), d=%f\n", n, r, g, b, d); + + // write + weights_sum[n] = ws; // weights_sum + depth[n] = d; + image[n * 3] = r; + image[n * 3 + 1] = g; + image[n * 3 + 2] = b; +} + + +void composite_rays_train_forward(const at::Tensor sigmas, const at::Tensor rgbs, const at::Tensor ts, const at::Tensor rays, const uint32_t M, const uint32_t N, const float T_thresh, const bool binarize, at::Tensor weights, at::Tensor weights_sum, at::Tensor depth, at::Tensor image) { + + static constexpr uint32_t N_THREAD = 128; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + sigmas.scalar_type(), "composite_rays_train_forward", ([&] { + kernel_composite_rays_train_forward<<>>(sigmas.data_ptr(), rgbs.data_ptr(), ts.data_ptr(), rays.data_ptr(), M, N, T_thresh, binarize, weights.data_ptr(), weights_sum.data_ptr(), depth.data_ptr(), image.data_ptr()); + })); +} + + +// grad_weights: [M,] +// grad_weights_sum: [N,] +// grad_image: [N, 3] +// grad_depth: [N,] +// sigmas: [M] +// rgbs: [M, 3] +// ts: [M, 2] +// rays: [N, 2], offset, num_steps +// weights_sum: [N,], weights_sum here +// image: [N, 3] +// grad_sigmas: [M] +// grad_rgbs: [M, 3] +template +__global__ void kernel_composite_rays_train_backward( + const scalar_t * __restrict__ grad_weights, + const scalar_t * __restrict__ grad_weights_sum, + const scalar_t * __restrict__ grad_depth, + const scalar_t * __restrict__ grad_image, + const scalar_t * __restrict__ sigmas, + const scalar_t * __restrict__ rgbs, + const scalar_t * __restrict__ ts, + const int * __restrict__ rays, + const scalar_t * __restrict__ weights_sum, + const scalar_t * __restrict__ depth, + const scalar_t * __restrict__ image, + const uint32_t M, const uint32_t N, const float T_thresh, const bool binarize, + scalar_t * grad_sigmas, + scalar_t * grad_rgbs +) { + // parallel per ray + const uint32_t n = threadIdx.x + blockIdx.x * blockDim.x; + if (n >= N) return; + + // locate + uint32_t offset = rays[n * 2]; + uint32_t num_steps = rays[n * 2 + 1]; + + if (num_steps == 0 || offset + num_steps > M) return; + + grad_weights += offset; + grad_weights_sum += n; + grad_depth += n; + grad_image += n * 3; + weights_sum += n; + depth += n; + image += n * 3; + sigmas += offset; + rgbs += offset * 3; + ts += offset * 2; + grad_sigmas += offset; + grad_rgbs += offset * 3; + + // accumulate + uint32_t step = 0; + + float T = 1.0f; + const float r_final = image[0], g_final = image[1], b_final = image[2], ws_final = weights_sum[0], d_final = depth[0]; + float r = 0, g = 0, b = 0, ws = 0, d = 0; + + while (step < num_steps) { + + const float real_alpha = 1.0f - __expf(- sigmas[0] * ts[1]); + const float alpha = binarize ? (real_alpha > 0.5 ? 1.0 : 0.0) : real_alpha; + const float weight = alpha * T; + + r += weight * rgbs[0]; + g += weight * rgbs[1]; + b += weight * rgbs[2]; + ws += weight; + d += weight * ts[0]; + + T *= 1.0f - alpha; + + // check https://note.kiui.moe/others/nerf_gradient/ for the gradient calculation. + // write grad_rgbs + grad_rgbs[0] = grad_image[0] * weight; + grad_rgbs[1] = grad_image[1] * weight; + grad_rgbs[2] = grad_image[2] * weight; + + // write grad_sigmas + grad_sigmas[0] = ts[1] * ( + grad_image[0] * (T * rgbs[0] - (r_final - r)) + + grad_image[1] * (T * rgbs[1] - (g_final - g)) + + grad_image[2] * (T * rgbs[2] - (b_final - b)) + + (grad_weights_sum[0] + grad_weights[0]) * (T - (ws_final - ws)) + + grad_depth[0] * (T * ts[0] - (d_final - d)) + ); + + //printf("[n=%d] num_steps=%d, T=%f, grad_sigmas=%f, r_final=%f, r=%f\n", n, step, T, grad_sigmas[0], r_final, r); + // minimal remained transmittence + if (T < T_thresh) break; + + // locate + sigmas++; + rgbs += 3; + ts += 2; + grad_weights++; + grad_sigmas++; + grad_rgbs += 3; + + step++; + } +} + + +void composite_rays_train_backward(const at::Tensor grad_weights, const at::Tensor grad_weights_sum, const at::Tensor grad_depth, const at::Tensor grad_image, const at::Tensor sigmas, const at::Tensor rgbs, const at::Tensor ts, const at::Tensor rays, const at::Tensor weights_sum, const at::Tensor depth, const at::Tensor image, const uint32_t M, const uint32_t N, const float T_thresh, const bool binarize, at::Tensor grad_sigmas, at::Tensor grad_rgbs) { + + static constexpr uint32_t N_THREAD = 128; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_image.scalar_type(), "composite_rays_train_backward", ([&] { + kernel_composite_rays_train_backward<<>>(grad_weights.data_ptr(), grad_weights_sum.data_ptr(), grad_depth.data_ptr(), grad_image.data_ptr(), sigmas.data_ptr(), rgbs.data_ptr(), ts.data_ptr(), rays.data_ptr(), weights_sum.data_ptr(), depth.data_ptr(), image.data_ptr(), M, N, T_thresh, binarize, grad_sigmas.data_ptr(), grad_rgbs.data_ptr()); + })); +} + + +//////////////////////////////////////////////////// +///////////// infernce ///////////// +//////////////////////////////////////////////////// + +template +__global__ void kernel_march_rays( + const uint32_t n_alive, + const uint32_t n_step, + const int* __restrict__ rays_alive, + const scalar_t* __restrict__ rays_t, + const scalar_t* __restrict__ rays_o, + const scalar_t* __restrict__ rays_d, + const float bound, const bool contract, + const float dt_gamma, const uint32_t max_steps, + const uint32_t C, const uint32_t H, + const uint8_t * __restrict__ grid, + const scalar_t* __restrict__ nears, + const scalar_t* __restrict__ fars, + scalar_t* xyzs, scalar_t* dirs, scalar_t* ts, + const scalar_t* __restrict__ noises +) { + const uint32_t n = threadIdx.x + blockIdx.x * blockDim.x; + if (n >= n_alive) return; + + const int index = rays_alive[n]; // ray id + const float noise = noises[n]; + + // locate + rays_o += index * 3; + rays_d += index * 3; + xyzs += n * n_step * 3; + dirs += n * n_step * 3; + ts += n * n_step * 2; + + const float ox = rays_o[0], oy = rays_o[1], oz = rays_o[2]; + const float dx = rays_d[0], dy = rays_d[1], dz = rays_d[2]; + const float rdx = 1 / dx, rdy = 1 / dy, rdz = 1 / dz; + const float rH = 1 / (float)H; + const float H3 = H * H * H; + + const float near = nears[index], far = fars[index]; + + const float dt_min = 2 * SQRT3() / max_steps; + const float dt_max = 2 * SQRT3() * bound / H; + // const float dt_max = 1e10f; + + // march for n_step steps, record points + float t = rays_t[index]; + t += clamp(t * dt_gamma, dt_min, dt_max) * noise; + uint32_t step = 0; + + while (t < far && step < n_step) { + // current point + const float x = clamp(ox + t * dx, -bound, bound); + const float y = clamp(oy + t * dy, -bound, bound); + const float z = clamp(oz + t * dz, -bound, bound); + + float dt = clamp(t * dt_gamma, dt_min, dt_max); + + // get mip level + const int level = max(mip_from_pos(x, y, z, C), mip_from_dt(dt, H, C)); // range in [0, C - 1] + + const float mip_bound = fminf(scalbnf(1, level), bound); + const float mip_rbound = 1 / mip_bound; + + // contraction + float cx = x, cy = y, cz = z; + const float mag = fmaxf(fabsf(x), fmaxf(fabsf(y), fabsf(z))); + if (contract && mag > 1) { + // L-INF norm + const float Linf_scale = (2 - 1 / mag) / mag; + cx *= Linf_scale; + cy *= Linf_scale; + cz *= Linf_scale; + } + + // convert to nearest grid position + const int nx = clamp(0.5 * (cx * mip_rbound + 1) * H, 0.0f, (float)(H - 1)); + const int ny = clamp(0.5 * (cy * mip_rbound + 1) * H, 0.0f, (float)(H - 1)); + const int nz = clamp(0.5 * (cz * mip_rbound + 1) * H, 0.0f, (float)(H - 1)); + + const uint32_t index = level * H3 + __morton3D(nx, ny, nz); + const bool occ = grid[index / 8] & (1 << (index % 8)); + + // if occpuied, advance a small step, and write to output + if (occ) { + // write step + xyzs[0] = cx; + xyzs[1] = cy; + xyzs[2] = cz; + dirs[0] = dx; + dirs[1] = dy; + dirs[2] = dz; + // calc dt + t += dt; + ts[0] = t; + ts[1] = dt; + // step + xyzs += 3; + dirs += 3; + ts += 2; + step++; + + // contraction case + } else if (contract && mag > 1) { + t += dt; + // else, skip a large step (basically skip a voxel grid) + } else { + // calc distance to next voxel + const float tx = (((nx + 0.5f + 0.5f * signf(dx)) * rH * 2 - 1) * mip_bound - cx) * rdx; + const float ty = (((ny + 0.5f + 0.5f * signf(dy)) * rH * 2 - 1) * mip_bound - cy) * rdy; + const float tz = (((nz + 0.5f + 0.5f * signf(dz)) * rH * 2 - 1) * mip_bound - cz) * rdz; + const float tt = t + fmaxf(0.0f, fminf(tx, fminf(ty, tz))); + // step until next voxel + do { + dt = clamp(t * dt_gamma, dt_min, dt_max); + t += dt; + } while (t < tt); + } + } +} + + +void march_rays(const uint32_t n_alive, const uint32_t n_step, const at::Tensor rays_alive, const at::Tensor rays_t, const at::Tensor rays_o, const at::Tensor rays_d, const float bound, const bool contract, const float dt_gamma, const uint32_t max_steps, const uint32_t C, const uint32_t H, const at::Tensor grid, const at::Tensor near, const at::Tensor far, at::Tensor xyzs, at::Tensor dirs, at::Tensor ts, at::Tensor noises) { + static constexpr uint32_t N_THREAD = 128; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + rays_o.scalar_type(), "march_rays", ([&] { + kernel_march_rays<<>>(n_alive, n_step, rays_alive.data_ptr(), rays_t.data_ptr(), rays_o.data_ptr(), rays_d.data_ptr(), bound, contract, dt_gamma, max_steps, C, H, grid.data_ptr(), near.data_ptr(), far.data_ptr(), xyzs.data_ptr(), dirs.data_ptr(), ts.data_ptr(), noises.data_ptr()); + })); +} + + +template +__global__ void kernel_composite_rays( + const uint32_t n_alive, + const uint32_t n_step, + const float T_thresh, const bool binarize, + int* rays_alive, + scalar_t* rays_t, + const scalar_t* __restrict__ sigmas, + const scalar_t* __restrict__ rgbs, + const scalar_t* __restrict__ ts, + scalar_t* weights_sum, scalar_t* depth, scalar_t* image +) { + const uint32_t n = threadIdx.x + blockIdx.x * blockDim.x; + if (n >= n_alive) return; + + const int index = rays_alive[n]; // ray id + + // locate + sigmas += n * n_step; + rgbs += n * n_step * 3; + ts += n * n_step * 2; + + rays_t += index; + weights_sum += index; + depth += index; + image += index * 3; + + float t; + float d = depth[0], r = image[0], g = image[1], b = image[2], weight_sum = weights_sum[0]; + + // accumulate + uint32_t step = 0; + while (step < n_step) { + + // ray is terminated if t == 0 + if (ts[0] == 0) break; + + const float real_alpha = 1.0f - __expf(- sigmas[0] * ts[1]); + const float alpha = binarize ? (real_alpha > 0.5 ? 1.0 : 0.0) : real_alpha; + + /* + T_0 = 1; T_i = \prod_{j=0}^{i-1} (1 - alpha_j) + w_i = alpha_i * T_i + --> + T_i = 1 - \sum_{j=0}^{i-1} w_j + */ + const float T = 1 - weight_sum; + const float weight = alpha * T; + weight_sum += weight; + + t = ts[0]; + d += weight * t; // real depth + r += weight * rgbs[0]; + g += weight * rgbs[1]; + b += weight * rgbs[2]; + + //printf("[n=%d] num_steps=%d, alpha=%f, w=%f, T=%f, sum_dt=%f, d=%f\n", n, step, alpha, weight, T, sum_delta, d); + + // ray is terminated if T is too small + // use a larger bound to further accelerate inference + if (T < T_thresh) break; + + // locate + sigmas++; + rgbs += 3; + ts += 2; + step++; + } + + //printf("[n=%d] rgb=(%f, %f, %f), d=%f\n", n, r, g, b, d); + + // rays_alive = -1 means ray is terminated early. + if (step < n_step) { + rays_alive[n] = -1; + } else { + rays_t[0] = t; + } + + weights_sum[0] = weight_sum; // this is the thing I needed! + depth[0] = d; + image[0] = r; + image[1] = g; + image[2] = b; +} + + +void composite_rays(const uint32_t n_alive, const uint32_t n_step, const float T_thresh, const bool binarize, at::Tensor rays_alive, at::Tensor rays_t, at::Tensor sigmas, at::Tensor rgbs, at::Tensor ts, at::Tensor weights, at::Tensor depth, at::Tensor image) { + static constexpr uint32_t N_THREAD = 128; + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + image.scalar_type(), "composite_rays", ([&] { + kernel_composite_rays<<>>(n_alive, n_step, T_thresh, binarize, rays_alive.data_ptr(), rays_t.data_ptr(), sigmas.data_ptr(), rgbs.data_ptr(), ts.data_ptr(), weights.data_ptr(), depth.data_ptr(), image.data_ptr()); + })); +} \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/raymarching/src/raymarching.h b/stable-dreamfusion-3DPortrait/raymarching/src/raymarching.h new file mode 100644 index 0000000..a9994d3 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/raymarching/src/raymarching.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include + + +void near_far_from_aabb(const at::Tensor rays_o, const at::Tensor rays_d, const at::Tensor aabb, const uint32_t N, const float min_near, at::Tensor nears, at::Tensor fars); +void sph_from_ray(const at::Tensor rays_o, const at::Tensor rays_d, const float radius, const uint32_t N, at::Tensor coords); +void morton3D(const at::Tensor coords, const uint32_t N, at::Tensor indices); +void morton3D_invert(const at::Tensor indices, const uint32_t N, at::Tensor coords); +void packbits(const at::Tensor grid, const uint32_t N, const float density_thresh, at::Tensor bitfield); +void flatten_rays(const at::Tensor rays, const uint32_t N, const uint32_t M, at::Tensor res); + +void march_rays_train(const at::Tensor rays_o, const at::Tensor rays_d, const at::Tensor grid, const float bound, const bool contract, const float dt_gamma, const uint32_t max_steps, const uint32_t N, const uint32_t C, const uint32_t H, const at::Tensor nears, const at::Tensor fars, at::optional xyzs, at::optional dirs, at::optional ts, at::Tensor rays, at::Tensor counter, at::Tensor noises); +void composite_rays_train_forward(const at::Tensor sigmas, const at::Tensor rgbs, const at::Tensor ts, const at::Tensor rays, const uint32_t M, const uint32_t N, const float T_thresh, const bool binarize, at::Tensor weights, at::Tensor weights_sum, at::Tensor depth, at::Tensor image); +void composite_rays_train_backward(const at::Tensor grad_weights, const at::Tensor grad_weights_sum, const at::Tensor grad_depth, const at::Tensor grad_image, const at::Tensor sigmas, const at::Tensor rgbs, const at::Tensor ts, const at::Tensor rays, const at::Tensor weights_sum, const at::Tensor depth, const at::Tensor image, const uint32_t M, const uint32_t N, const float T_thresh, const bool binarize, at::Tensor grad_sigmas, at::Tensor grad_rgbs); + +void march_rays(const uint32_t n_alive, const uint32_t n_step, const at::Tensor rays_alive, const at::Tensor rays_t, const at::Tensor rays_o, const at::Tensor rays_d, const float bound, const bool contract, const float dt_gamma, const uint32_t max_steps, const uint32_t C, const uint32_t H, const at::Tensor grid, const at::Tensor nears, const at::Tensor fars, at::Tensor xyzs, at::Tensor dirs, at::Tensor ts, at::Tensor noises); +void composite_rays(const uint32_t n_alive, const uint32_t n_step, const float T_thresh, const bool binarize, at::Tensor rays_alive, at::Tensor rays_t, at::Tensor sigmas, at::Tensor rgbs, at::Tensor ts, at::Tensor weights_sum, at::Tensor depth, at::Tensor image); \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/readme.md b/stable-dreamfusion-3DPortrait/readme.md new file mode 100644 index 0000000..1d9068d --- /dev/null +++ b/stable-dreamfusion-3DPortrait/readme.md @@ -0,0 +1,356 @@ +# Stable-Dreamfusion + +A pytorch implementation of the text-to-3D model **Dreamfusion**, powered by the [Stable Diffusion](https://github.com/CompVis/stable-diffusion) text-to-2D model. + +**ADVERTISEMENT: Please check out [threestudio](https://github.com/threestudio-project/threestudio) for recent improvements and better implementation in 3D content generation!** + +**NEWS (2023.6.12)**: + +* Support of [Perp-Neg](https://perp-neg.github.io/) to alleviate multi-head problem in Text-to-3D. +* Support of Perp-Neg for both [Stable Diffusion](https://github.com/CompVis/stable-diffusion) and [DeepFloyd-IF](https://github.com/deep-floyd/IF). + +https://user-images.githubusercontent.com/25863658/236712982-9f93bd32-83bf-423a-bb7c-f73df7ece2e3.mp4 + +https://user-images.githubusercontent.com/25863658/232403162-51b69000-a242-4b8c-9cd9-4242b09863fa.mp4 + +### [Update Logs](assets/update_logs.md) + +### Colab notebooks: +* Instant-NGP backbone (`-O`): [![Instant-NGP Backbone](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1MXT3yfOFvO0ooKEfiUUvTKwUkrrlCHpF?usp=sharing) + +* Vanilla NeRF backbone (`-O2`): [![Vanilla Backbone](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1mvfxG-S_n_gZafWoattku7rLJ2kPoImL?usp=sharing) + +# Important Notice +This project is a **work-in-progress**, and contains lots of differences from the paper. **The current generation quality cannot match the results from the original paper, and many prompts still fail badly!** + +## Notable differences from the paper +* Since the Imagen model is not publicly available, we use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) to replace it (implementation from [diffusers](https://github.com/huggingface/diffusers)). Different from Imagen, Stable-Diffusion is a latent diffusion model, which diffuses in a latent space instead of the original image space. Therefore, we need the loss to propagate back from the VAE's encoder part too, which introduces extra time cost in training. +* We use the [multi-resolution grid encoder](https://github.com/NVlabs/instant-ngp/) to implement the NeRF backbone (implementation from [torch-ngp](https://github.com/ashawkey/torch-ngp)), which enables much faster rendering (~10FPS at 800x800). +* We use the [Adan](https://github.com/sail-sg/Adan) optimizer as default. + +# Install + +```bash +git clone https://github.com/ashawkey/stable-dreamfusion.git +cd stable-dreamfusion +``` + +### Optional: create a python virtual environment + +To avoid python package conflicts, we recommend using a virtual environment, e.g.: using conda or venv: + +```bash +python -m venv venv_stable-dreamfusion +source venv_stable-dreamfusion/bin/activate # you need to repeat this step for every new terminal +``` + +### Install with pip + +```bash +pip install -r requirements.txt +``` + +### Download pre-trained models + +To use image-conditioned 3D generation, you need to download some pretrained checkpoints manually: +* [Zero-1-to-3](https://github.com/cvlab-columbia/zero123) for diffusion backend. + We use `zero123-xl.ckpt` by default, and it is hard-coded in `guidance/zero123_utils.py`. + ```bash + cd pretrained/zero123 + wget https://zero123.cs.columbia.edu/assets/zero123-xl.ckpt + ``` +* [Omnidata](https://github.com/EPFL-VILAB/omnidata/tree/main/omnidata_tools/torch) for depth and normal prediction. + These ckpts are hardcoded in `preprocess_image.py`. + ```bash + mkdir pretrained/omnidata + cd pretrained/omnidata + # assume gdown is installed + gdown '1Jrh-bRnJEjyMCS7f-WsaFlccfPjJPPHI&confirm=t' # omnidata_dpt_depth_v2.ckpt + gdown '1wNxVO4vVbDEMEpnAi_jwQObf2MFodcBR&confirm=t' # omnidata_dpt_normal_v2.ckpt + ``` + +To use [DeepFloyd-IF](https://github.com/deep-floyd/IF), you need to accept the usage conditions from [hugging face](https://huggingface.co/DeepFloyd/IF-I-XL-v1.0), and login with `huggingface-cli login` in command line. + +For DMTet, we port the pre-generated `32/64/128` resolution tetrahedron grids under `tets`. +The 256 resolution one can be found [here](https://drive.google.com/file/d/1lgvEKNdsbW5RS4gVxJbgBS4Ac92moGSa/view?usp=sharing). + +### Build extension (optional) +By default, we use [`load`](https://pytorch.org/docs/stable/cpp_extension.html#torch.utils.cpp_extension.load) to build the extension at runtime. +We also provide the `setup.py` to build each extension: +```bash +cd stable-dreamfusion + +# install all extension modules +bash scripts/install_ext.sh + +# if you want to install manually, here is an example: +pip install ./raymarching # install to python path (you still need the raymarching/ folder, since this only installs the built extension.) +``` + +### Taichi backend (optional) +Use [Taichi](https://github.com/taichi-dev/taichi) backend for Instant-NGP. It achieves comparable performance to CUDA implementation while **No CUDA** build is required. Install Taichi with pip: +```bash +pip install -i https://pypi.taichi.graphics/simple/ taichi-nightly +``` + +### Trouble Shooting: +* we assume working with the latest version of all dependencies, if you meet any problems from a specific dependency, please try to upgrade it first (e.g., `pip install -U diffusers`). If the problem still holds, [reporting a bug issue](https://github.com/ashawkey/stable-dreamfusion/issues/new?assignees=&labels=bug&template=bug_report.yaml&title=%3Ctitle%3E) will be appreciated! +* `[F glutil.cpp:338] eglInitialize() failed Aborted (core dumped)`: this usually indicates problems in OpenGL installation. Try to re-install Nvidia driver, or use nvidia-docker as suggested in https://github.com/ashawkey/stable-dreamfusion/issues/131 if you are using a headless server. +* `TypeError: xxx_forward(): incompatible function arguments`: this happens when we update the CUDA source and you used `setup.py` to install the extensions earlier. Try to re-install the corresponding extension (e.g., `pip install ./gridencoder`). + +### Tested environments +* Ubuntu 22 with torch 1.12 & CUDA 11.6 on a V100. + +# Usage + +First time running will take some time to compile the CUDA extensions. + +```bash +#### stable-dreamfusion setting + +### Instant-NGP NeRF Backbone +# + faster rendering speed +# + less GPU memory (~16G) +# - need to build CUDA extensions (a CUDA-free Taichi backend is available) + +## train with text prompt (with the default settings) +# `-O` equals `--cuda_ray --fp16` +# `--cuda_ray` enables instant-ngp-like occupancy grid based acceleration. +python main.py --text "a hamburger" --workspace trial -O + +# reduce stable-diffusion memory usage with `--vram_O` +# enable various vram savings (https://huggingface.co/docs/diffusers/optimization/fp16). +python main.py --text "a hamburger" --workspace trial -O --vram_O + +# You can collect arguments in a file. You can override arguments by specifying them after `--file`. Note that quoted strings can't be loaded from .args files... +python main.py --file scripts/res64.args --workspace trial_awesome_hamburger --text "a photo of an awesome hamburger" + +# use CUDA-free Taichi backend with `--backbone grid_taichi` +python3 main.py --text "a hamburger" --workspace trial -O --backbone grid_taichi + +# choose stable-diffusion version (support 1.5, 2.0 and 2.1, default is 2.1 now) +python main.py --text "a hamburger" --workspace trial -O --sd_version 1.5 + +# use a custom stable-diffusion checkpoint from hugging face: +python main.py --text "a hamburger" --workspace trial -O --hf_key andite/anything-v4.0 + +# use DeepFloyd-IF for guidance (experimental): +python main.py --text "a hamburger" --workspace trial -O --IF +python main.py --text "a hamburger" --workspace trial -O --IF --vram_O # requires ~24G GPU memory + +# we also support negative text prompt now: +python main.py --text "a rose" --negative "red" --workspace trial -O + +## after the training is finished: +# test (exporting 360 degree video) +python main.py --workspace trial -O --test +# also save a mesh (with obj, mtl, and png texture) +python main.py --workspace trial -O --test --save_mesh +# test with a GUI (free view control!) +python main.py --workspace trial -O --test --gui + +### Vanilla NeRF backbone +# + pure pytorch, no need to build extensions! +# - slow rendering speed +# - more GPU memory + +## train +# `-O2` equals `--backbone vanilla` +python main.py --text "a hotdog" --workspace trial2 -O2 + +# if CUDA OOM, try to reduce NeRF sampling steps (--num_steps and --upsample_steps) +python main.py --text "a hotdog" --workspace trial2 -O2 --num_steps 64 --upsample_steps 0 + +## test +python main.py --workspace trial2 -O2 --test +python main.py --workspace trial2 -O2 --test --save_mesh +python main.py --workspace trial2 -O2 --test --gui # not recommended, FPS will be low. + +### DMTet finetuning + +## use --dmtet and --init_with to finetune the mesh at higher reslution +python main.py -O --text "a hamburger" --workspace trial_dmtet --dmtet --iters 5000 --init_with trial/checkpoints/df.pth + +## init dmtet with a mesh to generate texture +# require install of cubvh: pip install git+https://github.com/ashawkey/cubvh +# remove --lock_geo to also finetune geometry, but performance may be bad. +python main.py -O --text "a white bunny with red eyes" --workspace trial_dmtet_mesh --dmtet --iters 5000 --init_with ./data/bunny.obj --lock_geo + +## test & export the mesh +python main.py -O --text "a hamburger" --workspace trial_dmtet --dmtet --iters 5000 --test --save_mesh + +## gui to visualize dmtet +python main.py -O --text "a hamburger" --workspace trial_dmtet --dmtet --iters 5000 --test --gui + +### Image-conditioned 3D Generation + +## preprocess input image +# note: the results of image-to-3D is dependent on zero-1-to-3's capability. For best performance, the input image should contain a single front-facing object, it should have square aspect ratio, with <1024 pixel resolution. Check the examples under ./data. +# this will exports `_rgba.png`, `_depth.png`, and `_normal.png` to the directory containing the input image. +python preprocess_image.py .png +python preprocess_image.py .png --border_ratio 0.4 # increase border_ratio if the center object appears too large and results are unsatisfying. + +## zero123 train +# pass in the processed _rgba.png by --image and do NOT pass in --text to enable zero-1-to-3 backend. +python main.py -O --image _rgba.png --workspace trial_image --iters 5000 + +# if the image is not exactly front-view (elevation = 0), adjust default_polar (we use polar from 0 to 180 to represent elevation from 90 to -90) +python main.py -O --image _rgba.png --workspace trial_image --iters 5000 --default_polar 80 + +# by default we leverage monocular depth estimation to aid image-to-3d, but if you find the depth estimation inaccurate and harms results, turn it off by: +python main.py -O --image _rgba.png --workspace trial_image --iters 5000 --lambda_depth 0 + +python main.py -O --image _rgba.png --workspace trial_image_dmtet --dmtet --init_with trial_image/checkpoints/df.pth + +## zero123 with multiple images +python main.py -O --image_config config/.csv --workspace trial_image --iters 5000 + +## render images per batch (default 1) +python main.py -O --image_config config/.csv --workspace trial_image --iters 5000 --batch_size 4 + +# providing both --text and --image enables stable-diffusion backend (similar to make-it-3d) +python main.py -O --image hamburger_rgba.png --text "a DSLR photo of a delicious hamburger" --workspace trial_image_text --iters 5000 + +python main.py -O --image hamburger_rgba.png --text "a DSLR photo of a delicious hamburger" --workspace trial_image_text_dmtet --dmtet --init_with trial_image_text/checkpoints/df.pth + +## test / visualize +python main.py -O --image _rgba.png --workspace trial_image_dmtet --dmtet --test --save_mesh +python main.py -O --image _rgba.png --workspace trial_image_dmtet --dmtet --test --gui + +### Debugging + +# Can save guidance images for debugging purposes. These get saved in trial_hamburger/guidance. +# Warning: this slows down training considerably and consumes lots of disk space! +python main.py --text "a hamburger" --workspace trial_hamburger -O --vram_O --save_guidance --save_guidance_interval 5 # save every 5 steps +``` + +For example commands, check [`scripts`](./scripts). + +For advanced tips and other developing stuff, check [Advanced Tips](./assets/advanced.md). + +# Evalutation + +Reproduce the paper CLIP R-precision evaluation + +After the testing part in the usage, the validation set containing projection from different angle is generated. Test the R-precision between prompt and the image.(R=1) + +```bash +python r_precision.py --text "a snake is flying in the sky" --workspace snake_HQ --latest ep0100 --mode depth --clip clip-ViT-B-16 +``` + +# Acknowledgement + +This work is based on an increasing list of amazing research works and open-source projects, thanks a lot to all the authors for sharing! + +* [DreamFusion: Text-to-3D using 2D Diffusion](https://dreamfusion3d.github.io/) + ``` + @article{poole2022dreamfusion, + author = {Poole, Ben and Jain, Ajay and Barron, Jonathan T. and Mildenhall, Ben}, + title = {DreamFusion: Text-to-3D using 2D Diffusion}, + journal = {arXiv}, + year = {2022}, + } + ``` + +* [Magic3D: High-Resolution Text-to-3D Content Creation](https://research.nvidia.com/labs/dir/magic3d/) + ``` + @inproceedings{lin2023magic3d, + title={Magic3D: High-Resolution Text-to-3D Content Creation}, + author={Lin, Chen-Hsuan and Gao, Jun and Tang, Luming and Takikawa, Towaki and Zeng, Xiaohui and Huang, Xun and Kreis, Karsten and Fidler, Sanja and Liu, Ming-Yu and Lin, Tsung-Yi}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition ({CVPR})}, + year={2023} + } + ``` + +* [Zero-1-to-3: Zero-shot One Image to 3D Object](https://github.com/cvlab-columbia/zero123) + ``` + @misc{liu2023zero1to3, + title={Zero-1-to-3: Zero-shot One Image to 3D Object}, + author={Ruoshi Liu and Rundi Wu and Basile Van Hoorick and Pavel Tokmakov and Sergey Zakharov and Carl Vondrick}, + year={2023}, + eprint={2303.11328}, + archivePrefix={arXiv}, + primaryClass={cs.CV} + } + ``` + +* [Perp-Neg: Re-imagine the Negative Prompt Algorithm: Transform 2D Diffusion into 3D, alleviate Janus problem and Beyond](https://perp-neg.github.io/) + ``` + @article{armandpour2023re, + title={Re-imagine the Negative Prompt Algorithm: Transform 2D Diffusion into 3D, alleviate Janus problem and Beyond}, + author={Armandpour, Mohammadreza and Zheng, Huangjie and Sadeghian, Ali and Sadeghian, Amir and Zhou, Mingyuan}, + journal={arXiv preprint arXiv:2304.04968}, + year={2023} + } + ``` + +* [RealFusion: 360° Reconstruction of Any Object from a Single Image](https://github.com/lukemelas/realfusion) + ``` + @inproceedings{melaskyriazi2023realfusion, + author = {Melas-Kyriazi, Luke and Rupprecht, Christian and Laina, Iro and Vedaldi, Andrea}, + title = {RealFusion: 360 Reconstruction of Any Object from a Single Image}, + booktitle={CVPR} + year = {2023}, + url = {https://arxiv.org/abs/2302.10663}, + } + ``` + +* [Fantasia3D: Disentangling Geometry and Appearance for High-quality Text-to-3D Content Creation](https://fantasia3d.github.io/) + ``` + @article{chen2023fantasia3d, + title={Fantasia3D: Disentangling Geometry and Appearance for High-quality Text-to-3D Content Creation}, + author={Rui Chen and Yongwei Chen and Ningxin Jiao and Kui Jia}, + journal={arXiv preprint arXiv:2303.13873}, + year={2023} + } + ``` + +* [Make-It-3D: High-Fidelity 3D Creation from A Single Image with Diffusion Prior](https://make-it-3d.github.io/) + ``` + @article{tang2023make, + title={Make-It-3D: High-Fidelity 3D Creation from A Single Image with Diffusion Prior}, + author={Tang, Junshu and Wang, Tengfei and Zhang, Bo and Zhang, Ting and Yi, Ran and Ma, Lizhuang and Chen, Dong}, + journal={arXiv preprint arXiv:2303.14184}, + year={2023} + } + ``` + +* [Stable Diffusion](https://github.com/CompVis/stable-diffusion) and the [diffusers](https://github.com/huggingface/diffusers) library. + + ``` + @misc{rombach2021highresolution, + title={High-Resolution Image Synthesis with Latent Diffusion Models}, + author={Robin Rombach and Andreas Blattmann and Dominik Lorenz and Patrick Esser and Björn Ommer}, + year={2021}, + eprint={2112.10752}, + archivePrefix={arXiv}, + primaryClass={cs.CV} + } + + @misc{von-platen-etal-2022-diffusers, + author = {Patrick von Platen and Suraj Patil and Anton Lozhkov and Pedro Cuenca and Nathan Lambert and Kashif Rasul and Mishig Davaadorj and Thomas Wolf}, + title = {Diffusers: State-of-the-art diffusion models}, + year = {2022}, + publisher = {GitHub}, + journal = {GitHub repository}, + howpublished = {\url{https://github.com/huggingface/diffusers}} + } + ``` + +* The GUI is developed with [DearPyGui](https://github.com/hoffstadt/DearPyGui). + +* Puppy image from : https://www.pexels.com/photo/high-angle-photo-of-a-corgi-looking-upwards-2664417/ + +* Anya images from : https://www.goodsmile.info/en/product/13301/POP+UP+PARADE+Anya+Forger.html + +# Citation + +If you find this work useful, a citation will be appreciated via: +``` +@misc{stable-dreamfusion, + Author = {Jiaxiang Tang}, + Year = {2022}, + Note = {https://github.com/ashawkey/stable-dreamfusion}, + Title = {Stable-dreamfusion: Text-to-3D with Stable-diffusion} +} +``` diff --git a/stable-dreamfusion-3DPortrait/requirements.txt b/stable-dreamfusion-3DPortrait/requirements.txt new file mode 100644 index 0000000..ea12bcc --- /dev/null +++ b/stable-dreamfusion-3DPortrait/requirements.txt @@ -0,0 +1,56 @@ +tqdm +rich +ninja +numpy +pandas +scipy +scikit-learn +matplotlib +opencv-python +imageio +imageio-ffmpeg + +torch +torch-ema +einops +tensorboard +tensorboardX + +# for gui +dearpygui + +# for grid_tcnn +# git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch + +# for stable-diffusion +huggingface_hub +diffusers >= 0.9.0 +accelerate +transformers + +# for dmtet and mesh export +xatlas +trimesh +PyMCubes +pymeshlab +git+https://github.com/NVlabs/nvdiffrast/ + +# for zero123 +carvekit-colab +omegaconf +pytorch-lightning +taming-transformers-rom1504 +kornia +git+https://github.com/openai/CLIP.git + +# for omnidata +gdown + +# for dpt +timm + +# for remote debugging +debugpy-run + +# for deepfloyd if +sentencepiece diff --git a/stable-dreamfusion-3DPortrait/rgb_finetune_command.py b/stable-dreamfusion-3DPortrait/rgb_finetune_command.py new file mode 100644 index 0000000..c2a2fe2 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/rgb_finetune_command.py @@ -0,0 +1,39 @@ +import os + +import glob +# import argparse +# +# parser = argparse.ArgumentParser() +# parser.add_argument('--trigrid_decoder_ckpt', type=str) +# parser.add_argument('--inversion_name', type=str) +# opt = parser.parse_args() +# trigrid_decoder_ckpt = opt.trigrid_decoder_ckpt +# inversion_name = opt.inversion_name + +count = 0 + +inversion_name = 'hierarchy_inversion_4000' +trigrid_decoder_ckpt ='F:\high_quality_3DPortraitGAN\exp/3DPortraitGAN-hierarchy\models/network-snapshot-004000_decoder.ckpt' +for prompt_file in glob.glob(f'F:/high_quality_3DPortraitGAN/exp/test_data/*/prompt.txt'): + + with open(prompt_file, 'r') as f: + prompt = f.read() + + prompt = prompt.replace('/n', '') + + dir_ = os.path.dirname(prompt_file) + name = dir_.split('/')[-1].split('\\')[-1] + #print(dir_.split('/'),dir_.split('/')[-1].split('\\')) + count_ = 0 + # if len(glob.glob(f'F:\high_quality_3DPortraitGAN\exp\stable-dreamfusion\output/2023-11-*-with-inversion-initialization-{name}_*')) > 0: + # continue + for inversion_trigrid in glob.glob(f'{dir_}/samples_new_crop/{inversion_name}/*/inversion_trigrid.pkl'): + name_ =name+ f'_{count_}' + cmd = f'python main_3DPortraitGAN.py --workspace output/2023-11-22-{name_}_{inversion_name} --save_guidance --backbone trigrid_heirarchy_aggregate --latent_iter_ratio 0 --t_range 0.02 0.4 --vram_O --w 128 --h 128 --H 512 --W 512 --iters 3000 --text "{prompt}" --hf_key F:\high_quality_3DPortraitGAN\exp\stable-dreamfusion\pretrained\SG161222Realistic_Vision_V5.1_noVAE --trigrid_path {inversion_trigrid} --trigrid_decoder_ckpt {trigrid_decoder_ckpt}' + print(cmd) + count_ += 1 + break + count += 1 + + + diff --git a/stable-dreamfusion-3DPortrait/scripts/install_ext.sh b/stable-dreamfusion-3DPortrait/scripts/install_ext.sh new file mode 100644 index 0000000..228190e --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/install_ext.sh @@ -0,0 +1,4 @@ +pip install ./raymarching +pip install ./shencoder +pip install ./freqencoder +pip install ./gridencoder \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/scripts/res64.args b/stable-dreamfusion-3DPortrait/scripts/res64.args new file mode 100644 index 0000000..1e0d300 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/res64.args @@ -0,0 +1 @@ +-O --vram_O --w 64 --h 64 \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/scripts/run.sh b/stable-dreamfusion-3DPortrait/scripts/run.sh new file mode 100644 index 0000000..8d4fed4 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run.sh @@ -0,0 +1,15 @@ +#! /bin/bash +CUDA_VISIBLE_DEVICES=1 python main.py -O --text "a DSLR photo of a delicious hamburger" --workspace trial_hamburger --iters 5000 +CUDA_VISIBLE_DEVICES=1 python main.py -O --text "a DSLR photo of a delicious hamburger" --workspace trial2_hamburger --dmtet --iters 5000 --init_with trial_hamburger/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=1 python main.py -O --text "a highly detailed stone bust of Theodoros Kolokotronis" --workspace trial_stonehead --iters 5000 +CUDA_VISIBLE_DEVICES=1 python main.py -O --text "a highly detailed stone bust of Theodoros Kolokotronis" --workspace trial2_stonehead --dmtet --iters 5000 --init_with trial_stonehead/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=1 python main.py -O --text "an astronaut, full body" --workspace trial_astronaut --iters 5000 +CUDA_VISIBLE_DEVICES=1 python main.py -O --text "an astronaut, full body" --workspace trial2_astronaut --dmtet --iters 5000 --init_with trial_astronaut/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=1 python main.py -O --text "a DSLR photo of a squirrel-octopus hybrid" --workspace trial_squrrel_octopus --iters 5000 +CUDA_VISIBLE_DEVICES=1 python main.py -O --text "a DSLR photo of a squirrel-octopus hybrid" --workspace trial2_squrrel_octopus --dmtet --iters 5000 --init_with trial_squrrel_octopus/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=1 python main.py -O --text "a baby bunny sitting on top of a stack of pancakes" --workspace trial_rabbit_pancake --iters 5000 +CUDA_VISIBLE_DEVICES=1 python main.py -O --text "a metal bunny sitting on top of a stack of chocolate cookies" --workspace trial2_rabbit_pancake --dmtet --iters 5000 --init_with trial_rabbit_pancake/checkpoints/df.pth \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/scripts/run2.sh b/stable-dreamfusion-3DPortrait/scripts/run2.sh new file mode 100644 index 0000000..a958383 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run2.sh @@ -0,0 +1,10 @@ +#! /bin/bash + +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "a DSLR photo of a shiba inu playing golf wearing tartan golf clothes and hat" --workspace trial_shiba --iters 10000 +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "a DSLR photo of a shiba inu playing golf wearing tartan golf clothes and hat" --workspace trial2_shiba --dmtet --iters 5000 --init_with trial_shiba/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "a banana peeling itself" --workspace trial_banana --iters 10000 +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "a banana peeling itself" --workspace trial2_banana --dmtet --iters 5000 --init_with trial_banana/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "a capybara wearing a top hat, low poly" --workspace trial_capybara --iters 10000 +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "a capybara wearing a top hat, low poly" --workspace trial2_capybara --dmtet --iters 5000 --init_with trial_capybara/checkpoints/df.pth \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/scripts/run3.sh b/stable-dreamfusion-3DPortrait/scripts/run3.sh new file mode 100644 index 0000000..32d3457 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run3.sh @@ -0,0 +1,13 @@ +#! /bin/bash + +CUDA_VISIBLE_DEVICES=7 python main.py -O --text "ironman, full body" --workspace trial_ironman --iters 10000 +CUDA_VISIBLE_DEVICES=7 python main.py -O --text "ironman, full body" --workspace trial2_ironman --dmtet --iters 5000 --init_with trial_ironman/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=7 python main.py -O --text "a DSLR photo of an ice cream sundae" --workspace trial_icecream --iters 10000 +CUDA_VISIBLE_DEVICES=7 python main.py -O --text "a DSLR photo of an ice cream sundae" --workspace trial2_icecream --dmtet --iters 5000 --init_with trial_icecream/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=7 python main.py -O --text "a DSLR photo of a kingfisher bird" --workspace trial_bird --iters 10000 +CUDA_VISIBLE_DEVICES=7 python main.py -O --text "a DSLR photo of a kingfisher bird" --workspace trial2_bird --dmtet --iters 5000 --init_with trial_bird/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=7 python main.py -O --text "a car made of sushi" --workspace trial_sushi --iters 10000 +CUDA_VISIBLE_DEVICES=7 python main.py -O --text "a car made of sushi" --workspace trial2_sushi --dmtet --iters 5000 --init_with trial_sushi/checkpoints/df.pth diff --git a/stable-dreamfusion-3DPortrait/scripts/run4.sh b/stable-dreamfusion-3DPortrait/scripts/run4.sh new file mode 100644 index 0000000..2308d0c --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run4.sh @@ -0,0 +1,13 @@ +#! /bin/bash + +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "a rabbit, animated movie character, high detail 3d model" --workspace trial_rabbit2 --iters 10000 +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "a rabbit, animated movie character, high detail 3d model" --workspace trial2_rabbit2 --dmtet --iters 5000 --init_with trial_rabbit2/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "a corgi dog, highly detailed 3d model" --workspace trial_corgi --iters 10000 +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "a corgi dog, highly detailed 3d model" --workspace trial2_corgi --dmtet --iters 5000 --init_with trial_corgi/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=5 python main.py -O --text " a small saguaro cactus planted in a clay pot" --workspace trial_cactus --iters 10000 +CUDA_VISIBLE_DEVICES=5 python main.py -O --text " a small saguaro cactus planted in a clay pot" --workspace trial2_cactus --dmtet --iters 5000 --init_with trial_cactus/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "the leaning tower of Pisa" --workspace trial_pisa --iters 10000 +CUDA_VISIBLE_DEVICES=5 python main.py -O --text "the leaning tower of Pisa" --workspace trial2_pisa --dmtet --iters 5000 --init_with trial_pisa/checkpoints/df.pth \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/scripts/run5.sh b/stable-dreamfusion-3DPortrait/scripts/run5.sh new file mode 100644 index 0000000..fc2b7d1 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run5.sh @@ -0,0 +1,13 @@ +#! /bin/bash + +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "Perched blue jay bird" --workspace trial_jay --iters 10000 +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "Perched blue jay bird" --workspace trial2_jay --dmtet --iters 5000 --init_with trial_jay/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "angel statue wings out" --workspace trial_angle --iters 10000 +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "angel statue wings out" --workspace trial2_angle --dmtet --iters 5000 --init_with trial_angle/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "devil statue" --workspace trial_devil --iters 10000 +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "devil statue" --workspace trial2_devil --dmtet --iters 5000 --init_with trial_devil/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "Einstein statue" --workspace trial_einstein --iters 10000 +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "Einstein statue" --workspace trial2_einstein --dmtet --iters 5000 --init_with trial_einstein/checkpoints/df.pth diff --git a/stable-dreamfusion-3DPortrait/scripts/run6.sh b/stable-dreamfusion-3DPortrait/scripts/run6.sh new file mode 100644 index 0000000..ad2b946 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run6.sh @@ -0,0 +1,18 @@ +#! /bin/bash +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a baby bunny sitting on top of a stack of pancakes" --workspace trial_rabbit_pancake --iters 5000 +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a metal bunny sitting on top of a stack of chocolate cookies" --workspace trial2_rabbit_pancake --dmtet --iters 5000 --init_with trial_rabbit_pancake/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a DSLR photo of a blue jay standing on a large basket of rainbow macarons" --workspace trial_jay --iters 5000 +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a DSLR photo of a blue jay standing on a large basket of rainbow macarons" --workspace trial2_jay --dmtet --iters 5000 --init_with trial_jay/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a DSLR photo of a fox taking a photograph using a DSLR" --workspace trial_fox --iters 5000 +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a DSLR photo of a fox taking a photograph using a DSLR" --workspace trial2_fox --dmtet --iters 5000 --init_with trial_fox/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a DSLR photo of a peacock on a surfboard" --workspace trial_peacock --iters 5000 +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a DSLR photo of a peacock on a surfboard" --workspace trial2_peacock --dmtet --iters 5000 --init_with trial_peacock/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a flower made out of metal" --workspace trial_metal_flower --iters 5000 +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a flower made out of metal" --workspace trial2_metal_flower --dmtet --iters 5000 --init_with trial_metal_flower/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a zoomed out DSLR photo of an egg cracked open with a newborn chick hatching out of it" --workspace trial_chicken --iters 5000 +CUDA_VISIBLE_DEVICES=4 python main.py -O --text "a zoomed out DSLR photo of an egg cracked open with a newborn chick hatching out of it" --workspace trial2_chicken --dmtet --iters 5000 --init_with trial_chicken/checkpoints/df.pth \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/scripts/run_if.sh b/stable-dreamfusion-3DPortrait/scripts/run_if.sh new file mode 100644 index 0000000..07bb17f --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run_if.sh @@ -0,0 +1,18 @@ +#! /bin/bash +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a baby bunny sitting on top of a stack of pancakes" --workspace trial_if_rabbit_pancake --iters 5000 --IF +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a metal bunny sitting on top of a stack of chocolate cookies" --workspace trial_if2_rabbit_pancake --dmtet --iters 5000 --init_with trial_if_rabbit_pancake/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a DSLR photo of a blue jay standing on a large basket of rainbow macarons" --workspace trial_if_jay --iters 5000 --IF +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a DSLR photo of a blue jay standing on a large basket of rainbow macarons" --workspace trial_if2_jay --dmtet --iters 5000 --init_with trial_if_jay/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a DSLR photo of a fox taking a photograph using a DSLR" --workspace trial_if_fox --iters 5000 --IF +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a DSLR photo of a fox taking a photograph using a DSLR" --workspace trial_if2_fox --dmtet --iters 5000 --init_with trial_if_fox/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a DSLR photo of a peacock on a surfboard" --workspace trial_if_peacock --iters 5000 --IF +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a DSLR photo of a peacock on a surfboard" --workspace trial_if2_peacock --dmtet --iters 5000 --init_with trial_if_peacock/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a flower made out of metal" --workspace trial_if_metal_flower --iters 5000 --IF +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a flower made out of metal" --workspace trial_if2_metal_flower --dmtet --iters 5000 --init_with trial_if_metal_flower/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a zoomed out DSLR photo of an egg cracked open with a newborn chick hatching out of it" --workspace trial_if_chicken --iters 5000 --IF +CUDA_VISIBLE_DEVICES=2 python main.py -O --text "a zoomed out DSLR photo of an egg cracked open with a newborn chick hatching out of it" --workspace trial_if2_chicken --dmtet --iters 5000 --init_with trial_if_chicken/checkpoints/df.pth \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/scripts/run_if2.sh b/stable-dreamfusion-3DPortrait/scripts/run_if2.sh new file mode 100644 index 0000000..b363925 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run_if2.sh @@ -0,0 +1,18 @@ +#! /bin/bash +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a corgi taking a selfie" --workspace trial_if_corgi --iters 5000 --IF +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a corgi taking a selfie" --workspace trial_if2_corgi --dmtet --iters 5000 --init_with trial_if_corgi/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a DSLR photo of a ghost eating a hamburger" --workspace trial_if_ghost --iters 5000 --IF +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a DSLR photo of a ghost eating a hamburger" --workspace trial_if2_ghost --dmtet --iters 5000 --init_with trial_if_ghost/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a DSLR photo of an origami motorcycle" --workspace trial_if_motor --iters 5000 --IF +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a DSLR photo of an origami motorcycle" --workspace trial_if2_motor --dmtet --iters 5000 --init_with trial_if_motor/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a DSLR photo of a Space Shuttle" --workspace trial_if_spaceshuttle --iters 5000 --IF +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a DSLR photo of a Space Shuttle" --workspace trial_if2_spaceshuttle --dmtet --iters 5000 --init_with trial_if_spaceshuttle/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a palm tree, low poly 3d model" --workspace trial_if_palm --iters 5000 --IF +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a palm tree, low poly 3d model" --workspace trial_if2_palm --dmtet --iters 5000 --init_with trial_if_palm/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a zoomed out DSLR photo of a marble bust of a cat, a real mouse is sitting on its head" --workspace trial_if_cat_mouse --iters 5000 --IF +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a zoomed out DSLR photo of a marble bust of a cat, a real mouse is sitting on its head" --workspace trial_if2_cat_mouse --dmtet --iters 5000 --init_with trial_if_cat_mouse/checkpoints/df.pth \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/scripts/run_if2_perpneg.sh b/stable-dreamfusion-3DPortrait/scripts/run_if2_perpneg.sh new file mode 100644 index 0000000..2261027 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run_if2_perpneg.sh @@ -0,0 +1,18 @@ +#! /bin/bash +# To avoid the Janus problem caused by the diffusion model's front view bias, utilize the Perp-Neg algorithm. To maximize its benefits, +# increase the absolute value of "negative_w" for improved Janus problem mitigation. If you encounter flat faces or divergence, consider +# reducing the absolute value of "negative_w". The value of "negative_w" should vary for each prompt due to the diffusion model's varying +# bias towards generating front views for different objects. Vary the weights within the range of 0 to -4. +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a lion bust" --workspace trial_perpneg_if_lion --iters 5000 --IF --batch_size 1 --perpneg +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a marble lion head" --workspace trial_perpneg_if2_lion_p --dmtet --iters 5000 --perpneg --init_with trial_perpneg_if_lion/checkpoints/df.pth +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a marble lion head" --workspace trial_perpneg_if2_lion_nop --dmtet --iters 5000 --init_with trial_perpneg_if_lion/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a tiger cub" --workspace trial_perpneg_if_tiger --iters 5000 --IF --batch_size 1 --perpneg +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "tiger" --workspace trial_perpneg_if2_tiger_p --dmtet --iters 5000 --perpneg --init_with trial_perpneg_if_tiger/checkpoints/df.pth +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "tiger" --workspace trial_perpneg_if2_tiger_nop --dmtet --iters 5000 --init_with trial_perpneg_if_tiger/checkpoints/df.pth + +# larger absolute value of negative_w is used for the following command because the defult negative weight of -2 is not enough to make the diffusion model to produce the views as desired +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "a shiba dog wearing sunglasses" --workspace trial_perpneg_if_shiba --iters 5000 --IF --batch_size 1 --perpneg --negative_w -3.0 +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "shiba wearing sunglasses" --workspace trial_perpneg_if2_shiba_p --dmtet --iters 5000 --perpneg --negative_w -3.0 --init_with trial_perpneg_if_shiba/checkpoints/df.pth +CUDA_VISIBLE_DEVICES=3 python main.py -O --text "shiba wearing sunglasses" --workspace trial_perpneg_if2_shiba_nop --dmtet --iters 5000 --init_with trial_perpneg_if_shiba/checkpoints/df.pth + diff --git a/stable-dreamfusion-3DPortrait/scripts/run_image.sh b/stable-dreamfusion-3DPortrait/scripts/run_image.sh new file mode 100644 index 0000000..5c885e3 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run_image.sh @@ -0,0 +1,25 @@ +# zero123 backend (single object, images like 3d model rendering) + +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/teddy_rgba.png --workspace trial_image_teddy --iters 5000 +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/teddy_rgba.png --workspace trial2_image_teddy --iters 5000 --dmtet --init_with trial_image_teddy/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/catstatue_rgba.png --workspace trial_image_catstatue --iters 5000 +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/catstatue_rgba.png --workspace trial2_image_catstatue --iters 5000 --dmtet --init_with trial_image_catstatue/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/firekeeper_rgba.png --workspace trial_image_firekeeper --iters 5000 +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/firekeeper_rgba.png --workspace trial2_image_firekeeper --iters 5000 --dmtet --init_with trial_image_firekeeper/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/hamburger_rgba.png --workspace trial_image_hamburger --iters 5000 +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/hamburger_rgba.png --workspace trial2_image_hamburger --iters 5000 --dmtet --init_with trial_image_hamburger/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/corgi_rgba.png --workspace trial_image_corgi --iters 5000 +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/corgi_rgba.png --workspace trial2_image_corgi --iters 5000 --dmtet --init_with trial_image_corgi/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/cactus_rgba.png --workspace trial_image_cactus --iters 5000 +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/cactus_rgba.png --workspace trial2_image_cactus --iters 5000 --dmtet --init_with trial_image_cactus/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/cake_rgba.png --workspace trial_image_cake --iters 5000 +CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/cake_rgba.png --workspace trial2_image_cake --iters 5000 --dmtet --init_with trial_image_cake/checkpoints/df.pth + +# CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/warrior_rgba.png --workspace trial_image_warrior --iters 5000 +# CUDA_VISIBLE_DEVICES=6 python main.py -O --image data/warrior_rgba.png --workspace trial2_image_warrior --iters 5000 --dmtet --init_with trial_image_warrior/checkpoints/df.pth \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/scripts/run_image_anya.sh b/stable-dreamfusion-3DPortrait/scripts/run_image_anya.sh new file mode 100644 index 0000000..e0d63ac --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run_image_anya.sh @@ -0,0 +1,35 @@ +# Phase 1 - barely fits in A100 40GB. +# Conclusion: results in concave-ish face, no neck, excess hair in the back +CUDA_VISIBLE_DEVICES=0 python main.py -O --image data/anya_front_rgba.png --workspace trial_anya_1_refimage \ + --iters 10000 --save_guidance --save_guidance_interval 10 --ckpt scratch --batch_size 2 --test_interval 2 \ + --h 128 --w 128 --zero123_grad_scale None + +# Phase 2 - barely fits in A100 40GB. +# 20X smaller lambda_3d_normal_smooth, --known_view_interval 2, 3X LR +# Much higher jitter to increase disparity (and eliminate some of the flatness)... not too high either (to avoid cropping the face) +CUDA_VISIBLE_DEVICES=0 python main.py -O --image data/anya_front_rgba.png --workspace trial_anya_1_refimage_B_GPU2_reproduction1_GPU2 \ + --text "A DSLR 3D photo of a cute anime schoolgirl stands proudly with her arms in the air, pink hair ( unreal engine 5 trending on Artstation Ghibli 4k )" \ + --iters 12500 --ckpt trial_anya_1_refimage/checkpoints/df_ep0100.pth --save_guidance --save_guidance_interval 1 \ + --h 256 --w 256 --albedo_iter_ratio 0.0 --t_range 0.2 0.6 --batch_size 4 --radius_range 2.2 2.6 --test_interval 2 \ + --vram_O --guidance_scale 10 --jitter_pose --jitter_center 0.1 --jitter_target 0.1 --jitter_up 0.05 \ + --known_view_noise_scale 0 --lambda_depth 0 --lr 0.003 --progressive_view --known_view_interval 2 --dont_override_stuff --lambda_3d_normal_smooth 1 \ + --exp_start_iter 10000 --exp_end_iter 12500 + +# Phase 3 - increase resolution to 512 +# Disable textureless since they can cause catastrophic divergence +# Since radius range is inconsistent, increase it, and reduce the jitter to avoid excessively cropped renders. +# Learning rate may be set too high, since `--batch_size 1`. +CUDA_VISIBLE_DEVICES=0 python main.py -O --image data/anya_front_rgba.png --workspace trial_anya_1_refimage_B_GPU2_reproduction1_GPU2_refinedGPU2 \ + --text "A DSLR 3D photo of a cute anime schoolgirl stands proudly with her arms in the air, pink hair ( unreal engine 5 trending on Artstation Ghibli 4k )" \ + --iters 25000 --ckpt trial_anya_1_refimage_B_GPU2_reproduction1_GPU2/checkpoints/df_ep0125.pth --save_guidance --save_guidance_interval 1 \ + --h 512 --w 512 --albedo_iter_ratio 0.0 --t_range 0.0 0.5 --batch_size 1 --radius_range 3.2 3.6 --test_interval 2 \ + --vram_O --guidance_scale 10 --jitter_pose --jitter_center 0.015 --jitter_target 0.015 --jitter_up 0.05 \ + --known_view_noise_scale 0 --lambda_depth 0 --lr 0.003 --known_view_interval 2 --dont_override_stuff --lambda_3d_normal_smooth 0.5 --textureless_ratio 0.0 --min_ambient_ratio 0.3 \ + --exp_start_iter 12500 --exp_end_iter 25000 + +# Generate 6 views +CUDA_VISIBLE_DEVICES=0 python main.py -O --image data/anya_front_rgba.png --ckpt trial_anya_1_refimage_B_GPU2_reproduction1_GPU2_refinedGPU2/checkpoints/df_ep0250.pth --six_views + +# Phase 4 - untested, need to adjust +# CUDA_VISIBLE_DEVICES=0 python main.py -O --image data/anya_front_rgba.png --workspace trial_anya_1_refimage --iters 5000 --dmtet --init_with trial_anya_1_refimage/checkpoints/df.pth + diff --git a/stable-dreamfusion-3DPortrait/scripts/run_image_hard_examples.sh b/stable-dreamfusion-3DPortrait/scripts/run_image_hard_examples.sh new file mode 100644 index 0000000..4a8dd04 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run_image_hard_examples.sh @@ -0,0 +1,11 @@ +bash scripts/run_image_procedure.sh 0 30 90 anya_front "A DSLR 3D photo of a cute anime schoolgirl stands proudly with her arms in the air, pink hair ( unreal engine 5 trending on Artstation Ghibli 4k )" +bash scripts/run_image_procedure.sh 1 30 70 baby_phoenix_on_ice "A DSLR 3D photo of an adorable baby phoenix made in Swarowski crystal highly detailed intricate concept art 8K ( unreal engine 5 trending on Artstation )" +bash scripts/run_image_procedure.sh 2 30 90 bollywood_actress "A DSLR 3D photo of a beautiful bollywood indian actress, pretty eyes, full body shot composition, sunny outdoor, seen from far away ( highly detailed intricate 8K unreal engine 5 trending on Artstation )" +bash scripts/run_image_procedure.sh 3 30 40 beach_house_1 "A DSLR 3D photo of a very beautiful small house on a beach ( highly detailed intricate 8K unreal engine 5 trending on Artstation )" +bash scripts/run_image_procedure.sh 4 30 60 beach_house_2 "A DSLR 3D photo of a very beautiful high-tech small house with solar panels and wildflowers on a beach ( highly detailed intricate 8K unreal engine 5 trending on Artstation )" +bash scripts/run_image_procedure.sh 5 30 90 mona_lisa "A DSLR 3D photo of a beautiful young woman dressed like Mona Lisa ( highly detailed intricate 8K unreal engine 5 trending on Artstation )" +bash scripts/run_image_procedure.sh 6 30 80 futuristic_car "A DSLR 3D photo of a crazily futuristic electric car ( highly detailed intricate 8K unreal engine 5 trending on Artstation )" +# the church ruins probably require a wider field of view... e.g. 90 degrees, maybe even more... so may not work with Zero123 etc. +bash scripts/run_image_procedure.sh 7 30 90 church_ruins "A DSLR 3D photo of the remains of an isolated old church ruin covered in ivy ( highly detailed intricate 8K unreal engine 5 trending on Artstation )" + +# young woman dressed like mona lisa \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/scripts/run_image_procedure.sh b/stable-dreamfusion-3DPortrait/scripts/run_image_procedure.sh new file mode 100644 index 0000000..722da3f --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run_image_procedure.sh @@ -0,0 +1,71 @@ +# Perform a 2D-to-3D reconstruction, similar to the Anya case study: https://github.com/ashawkey/stable-dreamfusion/issues/263 +# Args: +# bash scripts/run_image_procedure.sh GPU_ID guidance_interval image_name "prompt" +# e.g.: +# bash scripts/run_image_procedure 1 30 baby_phoenix_on_ice "An adorable baby phoenix made in Swarowski crystal highly detailed intricated concept art 8K" +GPU_ID=$1 +GUIDANCE_INTERVAL=$2 +DEFAULT_POLAR=$3 +PREFIX=$4 +PROMPT=$5 +EPOCHS1=100 +EPOCHS2=200 +EPOCHS3=300 +IMAGE=data/$PREFIX.png +IMAGE_RGBA=data/${PREFIX}_rgba.png +WS_PH1=trial_$PREFIX-ph1 +WS_PH2=trial_$PREFIX-ph2 +WS_PH3=trial_$PREFIX-ph3 +CKPT1=$WS_PH1/checkpoints/df_ep0${EPOCHS1}.pth +CKPT2=$WS_PH2/checkpoints/df_ep0${EPOCHS2}.pth +CKPT3=$WS_PH3/checkpoints/df_ep0${EPOCHS3}.pth + +# Can uncomment to clear up trial folders. Be careful - mistakes could erase important work! +# rm -r $WS_PH1 $WS_PH2 $WS_PH3 + +# Preprocess +if [ ! -f $IMAGE_RGBA ] +then + python preprocess_image.py $IMAGE +fi + +if [ ! -f $CKPT1 ] +then + # Phase 1 - zero123-guidance + # WARNING: claforte: constantly runs out of VRAM with resolution of 128x128 and batch_size 2... no longer able to reproduce Anya result because of this... + # I added these to try to reduce mem usage, but this might degrade the quality... `--lambda_depth 0 --lambda_3d_normal_smooth 0` + # Remove: --ckpt scratch + CUDA_VISIBLE_DEVICES=$GPU_ID python main.py -O --image $IMAGE_RGBA --workspace $WS_PH1 --default_polar $DEFAULT_POLAR \ + --iters ${EPOCHS1}00 --save_guidance --save_guidance_interval $GUIDANCE_INTERVAL --batch_size 1 --test_interval 2 \ + --h 96 --w 96 --zero123_grad_scale None --lambda_3d_normal_smooth 0 --dont_override_stuff \ + --fovy_range 20 20 --guidance_scale 5 +fi + +GUIDANCE_INTERVAL=7 +if [ ! -f $CKPT2 ] +then + # Phase 2 - SD-guidance at 256x256 + CUDA_VISIBLE_DEVICES=$GPU_ID python main.py -O --image $IMAGE_RGBA --workspace $WS_PH2 \ + --text "${PROMPT}" --default_polar $DEFAULT_POLAR \ + --iters ${EPOCHS2}00 --ckpt $CKPT1 --save_guidance --save_guidance_interval 7 \ + --h 128 --w 128 --albedo_iter_ratio 0.0 --t_range 0.2 0.6 --batch_size 4 --radius_range 2.2 2.6 --test_interval 2 \ + --vram_O --guidance_scale 10 --jitter_pose --jitter_center 0.1 --jitter_target 0.1 --jitter_up 0.05 \ + --known_view_noise_scale 0 --lambda_depth 0 --lr 0.003 --progressive_view --progressive_view_init_ratio 0.05 --known_view_interval 2 --dont_override_stuff --lambda_3d_normal_smooth 1 --textureless_ratio 0.0 --min_ambient_ratio 0.3 \ + --exp_start_iter ${EPOCHS1}00 --exp_end_iter ${EPOCHS2}00 +fi + +if [ ! -f $CKPT3 ] +then + # # Phase 3 - increase resolution to 512 + CUDA_VISIBLE_DEVICES=$GPU_ID python main.py -O --image $IMAGE_RGBA --workspace $WS_PH3 \ + --text "${PROMPT}" --default_polar $DEFAULT_POLAR \ + --iters ${EPOCHS3}00 --ckpt $CKPT2 --save_guidance --save_guidance_interval 7 \ + --h 512 --w 512 --albedo_iter_ratio 0.0 --t_range 0.0 0.5 --batch_size 1 --radius_range 3.2 3.6 --test_interval 2 \ + --vram_O --guidance_scale 10 --jitter_pose --jitter_center 0.015 --jitter_target 0.015 --jitter_up 0.05 \ + --known_view_noise_scale 0 --lambda_depth 0 --lr 0.003 --known_view_interval 2 --dont_override_stuff --lambda_3d_normal_smooth 0.5 --textureless_ratio 0.0 --min_ambient_ratio 0.3 \ + --exp_start_iter ${EPOCHS2}00 --exp_end_iter ${EPOCHS3}00 +fi + +# Generate 6 views +CUDA_VISIBLE_DEVICES=$GPU_ID python main.py -O --image $IMAGE_RGBA --ckpt $CKPT3 --six_views + diff --git a/stable-dreamfusion-3DPortrait/scripts/run_image_text.sh b/stable-dreamfusion-3DPortrait/scripts/run_image_text.sh new file mode 100644 index 0000000..711dbf6 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run_image_text.sh @@ -0,0 +1,13 @@ +# sd backend (realistic images) + +CUDA_VISIBLE_DEVICES=4 python main.py -O --image data/teddy_rgba.png --text "a brown teddy bear sitting on a ground" --workspace trial_imagetext_teddy --iters 5000 +CUDA_VISIBLE_DEVICES=4 python main.py -O --image data/teddy_rgba.png --text "a brown teddy bear sitting on a ground" --workspace trial2_imagetext_teddy --iters 10000 --dmtet --init_with trial_imagetext_teddy/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=4 python main.py -O --image data/corgi_rgba.png --text "a corgi running" --workspace trial_imagetext_corgi --iters 5000 +CUDA_VISIBLE_DEVICES=4 python main.py -O --image data/corgi_rgba.png --text "a corgi running" --workspace trial2_imagetext_corgi --iters 10000 --dmtet --init_with trial_imagetext_corgi/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=4 python main.py -O --image data/hamburger_rgba.png --text "a DSLR photo of a delicious hamburger" --workspace trial_imagetext_hamburger --iters 5000 +CUDA_VISIBLE_DEVICES=4 python main.py -O --image data/hamburger_rgba.png --text "a DSLR photo of a delicious hamburger" --workspace trial2_imagetext_hamburger --iters 10000 --dmtet --init_with trial_imagetext_hamburger/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=4 python main.py -O --image data/cactus_rgba.png --text "a potted cactus plant" --workspace trial_imagetext_cactus --iters 5000 +CUDA_VISIBLE_DEVICES=4 python main.py -O --image data/cactus_rgba.png --text "a potted cactus plant" --workspace trial2_imagetext_cactus --iters 10000 --dmtet --init_with trial_imagetext_cactus/checkpoints/df.pth diff --git a/stable-dreamfusion-3DPortrait/scripts/run_images.sh b/stable-dreamfusion-3DPortrait/scripts/run_images.sh new file mode 100644 index 0000000..e41c981 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/scripts/run_images.sh @@ -0,0 +1,10 @@ +# zero123 backend (single object, images like 3d model rendering) + +CUDA_VISIBLE_DEVICES=6 python main.py -O --image_config config/corgi.csv --workspace trial_images_corgi --iters 5000 +CUDA_VISIBLE_DEVICES=6 python main.py -O --image_config config/corgi.csv --workspace trial2_images_corgi --iters 10000 --dmtet --init_with trial_images_corgi/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=6 python main.py -O --image_config config/car.csv --workspace trial_images_car --iters 5000 +CUDA_VISIBLE_DEVICES=6 python main.py -O --image_config config/car.csv --workspace trial2_images_car --iters 10000 --dmtet --init_with trial_images_car/checkpoints/df.pth + +CUDA_VISIBLE_DEVICES=6 python main.py -O --image_config config/anya.csv --workspace trial_images_anya --iters 5000 +CUDA_VISIBLE_DEVICES=6 python main.py -O --image_config config/anya.csv --workspace trial2_images_anya --iters 10000 --dmtet --init_with trial_images_anya/checkpoints/df.pth \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/shencoder/__init__.py b/stable-dreamfusion-3DPortrait/shencoder/__init__.py new file mode 100644 index 0000000..2b55c96 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/shencoder/__init__.py @@ -0,0 +1 @@ +from .sphere_harmonics import SHEncoder \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/shencoder/backend.py b/stable-dreamfusion-3DPortrait/shencoder/backend.py new file mode 100644 index 0000000..4971d5e --- /dev/null +++ b/stable-dreamfusion-3DPortrait/shencoder/backend.py @@ -0,0 +1,41 @@ +import os +from torch.utils.cpp_extension import load + +_src_path = os.path.dirname(os.path.abspath(__file__)) + +nvcc_flags = [ + '-O3', '-std=c++14', + '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', +] + +if os.name == "posix": + c_flags = ['-O3', '-std=c++14'] +elif os.name == "nt": + c_flags = ['/O2', '/std:c++17'] + + # find cl.exe + def find_cl_path(): + import glob + for program_files in [r"C:\\Program Files (x86)", r"C:\\Program Files"]: + for edition in ["Enterprise", "Professional", "BuildTools", "Community"]: + paths = sorted(glob.glob(r"%s\\Microsoft Visual Studio\\*\\%s\\VC\\Tools\\MSVC\\*\\bin\\Hostx64\\x64" % (program_files, edition)), reverse=True) + if paths: + return paths[0] + + # If cl.exe is not on path, try to find it. + if os.system("where cl.exe >nul 2>nul") != 0: + cl_path = find_cl_path() + if cl_path is None: + raise RuntimeError("Could not locate a supported Microsoft Visual C++ installation") + os.environ["PATH"] += ";" + cl_path + +_backend = load(name='_sh_encoder', + extra_cflags=c_flags, + extra_cuda_cflags=nvcc_flags, + sources=[os.path.join(_src_path, 'src', f) for f in [ + 'shencoder.cu', + 'bindings.cpp', + ]], + ) + +__all__ = ['_backend'] \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/shencoder/setup.py b/stable-dreamfusion-3DPortrait/shencoder/setup.py new file mode 100644 index 0000000..4633ebd --- /dev/null +++ b/stable-dreamfusion-3DPortrait/shencoder/setup.py @@ -0,0 +1,51 @@ +import os +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +_src_path = os.path.dirname(os.path.abspath(__file__)) + +nvcc_flags = [ + '-O3', '-std=c++14', + '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', +] + +if os.name == "posix": + c_flags = ['-O3', '-std=c++14'] +elif os.name == "nt": + c_flags = ['/O2', '/std:c++17'] + + # find cl.exe + def find_cl_path(): + import glob + for program_files in [r"C:\\Program Files (x86)", r"C:\\Program Files"]: + for edition in ["Enterprise", "Professional", "BuildTools", "Community"]: + paths = sorted(glob.glob(r"%s\\Microsoft Visual Studio\\*\\%s\\VC\\Tools\\MSVC\\*\\bin\\Hostx64\\x64" % (program_files, edition)), reverse=True) + if paths: + return paths[0] + + # If cl.exe is not on path, try to find it. + if os.system("where cl.exe >nul 2>nul") != 0: + cl_path = find_cl_path() + if cl_path is None: + raise RuntimeError("Could not locate a supported Microsoft Visual C++ installation") + os.environ["PATH"] += ";" + cl_path + +setup( + name='shencoder', # package name, import this to use python API + ext_modules=[ + CUDAExtension( + name='_shencoder', # extension name, import this to use CUDA API + sources=[os.path.join(_src_path, 'src', f) for f in [ + 'shencoder.cu', + 'bindings.cpp', + ]], + extra_compile_args={ + 'cxx': c_flags, + 'nvcc': nvcc_flags, + } + ), + ], + cmdclass={ + 'build_ext': BuildExtension, + } +) \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/shencoder/sphere_harmonics.py b/stable-dreamfusion-3DPortrait/shencoder/sphere_harmonics.py new file mode 100644 index 0000000..7bab24e --- /dev/null +++ b/stable-dreamfusion-3DPortrait/shencoder/sphere_harmonics.py @@ -0,0 +1,87 @@ +import numpy as np + +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.cuda.amp import custom_bwd, custom_fwd + +try: + import _shencoder as _backend +except ImportError: + from .backend import _backend + +class _sh_encoder(Function): + @staticmethod + @custom_fwd(cast_inputs=torch.float32) # force float32 for better precision + def forward(ctx, inputs, degree, calc_grad_inputs=False): + # inputs: [B, input_dim], float in [-1, 1] + # RETURN: [B, F], float + + inputs = inputs.contiguous() + B, input_dim = inputs.shape # batch size, coord dim + output_dim = degree ** 2 + + outputs = torch.empty(B, output_dim, dtype=inputs.dtype, device=inputs.device) + + if calc_grad_inputs: + dy_dx = torch.empty(B, input_dim * output_dim, dtype=inputs.dtype, device=inputs.device) + else: + dy_dx = None + + _backend.sh_encode_forward(inputs, outputs, B, input_dim, degree, dy_dx) + + ctx.save_for_backward(inputs, dy_dx) + ctx.dims = [B, input_dim, degree] + + return outputs + + @staticmethod + #@once_differentiable + @custom_bwd + def backward(ctx, grad): + # grad: [B, C * C] + + inputs, dy_dx = ctx.saved_tensors + + if dy_dx is not None: + grad = grad.contiguous() + B, input_dim, degree = ctx.dims + grad_inputs = torch.zeros_like(inputs) + _backend.sh_encode_backward(grad, inputs, B, input_dim, degree, dy_dx, grad_inputs) + return grad_inputs, None, None + else: + return None, None, None + + + +sh_encode = _sh_encoder.apply + + +class SHEncoder(nn.Module): + def __init__(self, input_dim=3, degree=4): + super().__init__() + + self.input_dim = input_dim # coord dims, must be 3 + self.degree = degree # 0 ~ 4 + self.output_dim = degree ** 2 + + assert self.input_dim == 3, "SH encoder only support input dim == 3" + assert self.degree > 0 and self.degree <= 8, "SH encoder only supports degree in [1, 8]" + + def __repr__(self): + return f"SHEncoder: input_dim={self.input_dim} degree={self.degree}" + + def forward(self, inputs, size=1): + # inputs: [..., input_dim], normalized real world positions in [-size, size] + # return: [..., degree^2] + + inputs = inputs / size # [-1, 1] + + prefix_shape = list(inputs.shape[:-1]) + inputs = inputs.reshape(-1, self.input_dim) + + outputs = sh_encode(inputs, self.degree, inputs.requires_grad) + outputs = outputs.reshape(prefix_shape + [self.output_dim]) + + return outputs \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/shencoder/src/bindings.cpp b/stable-dreamfusion-3DPortrait/shencoder/src/bindings.cpp new file mode 100644 index 0000000..595b5b3 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/shencoder/src/bindings.cpp @@ -0,0 +1,8 @@ +#include + +#include "shencoder.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("sh_encode_forward", &sh_encode_forward, "SH encode forward (CUDA)"); + m.def("sh_encode_backward", &sh_encode_backward, "SH encode backward (CUDA)"); +} \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/shencoder/src/shencoder.cu b/stable-dreamfusion-3DPortrait/shencoder/src/shencoder.cu new file mode 100644 index 0000000..a92e4ab --- /dev/null +++ b/stable-dreamfusion-3DPortrait/shencoder/src/shencoder.cu @@ -0,0 +1,439 @@ +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include + + +#define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be a contiguous tensor") +#define CHECK_IS_INT(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, #x " must be an int tensor") +#define CHECK_IS_FLOATING(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Float || x.scalar_type() == at::ScalarType::Half || x.scalar_type() == at::ScalarType::Double, #x " must be a floating tensor") + + +template +__host__ __device__ T div_round_up(T val, T divisor) { + return (val + divisor - 1) / divisor; +} + +template +__global__ void kernel_sh( + const scalar_t * __restrict__ inputs, + scalar_t * outputs, + uint32_t B, uint32_t D, uint32_t C, + scalar_t * dy_dx +) { + const uint32_t b = threadIdx.x + blockIdx.x * blockDim.x; + if (b >= B) return; + + const uint32_t C2 = C * C; + + // locate + inputs += b * D; + outputs += b * C2; + + scalar_t x = inputs[0], y = inputs[1], z = inputs[2]; + + scalar_t xy=x*y, xz=x*z, yz=y*z, x2=x*x, y2=y*y, z2=z*z, xyz=xy*z; + scalar_t x4=x2*x2, y4=y2*y2, z4=z2*z2; + scalar_t x6=x4*x2, y6=y4*y2, z6=z4*z2; + + auto write_sh = [&]() { + outputs[0] = 0.28209479177387814f ; // 1/(2*sqrt(pi)) + if (C <= 1) { return; } + outputs[1] = -0.48860251190291987f*y ; // -sqrt(3)*y/(2*sqrt(pi)) + outputs[2] = 0.48860251190291987f*z ; // sqrt(3)*z/(2*sqrt(pi)) + outputs[3] = -0.48860251190291987f*x ; // -sqrt(3)*x/(2*sqrt(pi)) + if (C <= 2) { return; } + outputs[4] = 1.0925484305920792f*xy ; // sqrt(15)*xy/(2*sqrt(pi)) + outputs[5] = -1.0925484305920792f*yz ; // -sqrt(15)*yz/(2*sqrt(pi)) + outputs[6] = 0.94617469575755997f*z2 - 0.31539156525251999f ; // sqrt(5)*(3*z2 - 1)/(4*sqrt(pi)) + outputs[7] = -1.0925484305920792f*xz ; // -sqrt(15)*xz/(2*sqrt(pi)) + outputs[8] = 0.54627421529603959f*x2 - 0.54627421529603959f*y2 ; // sqrt(15)*(x2 - y2)/(4*sqrt(pi)) + if (C <= 3) { return; } + outputs[9] = 0.59004358992664352f*y*(-3.0f*x2 + y2) ; // sqrt(70)*y*(-3*x2 + y2)/(8*sqrt(pi)) + outputs[10] = 2.8906114426405538f*xy*z ; // sqrt(105)*xy*z/(2*sqrt(pi)) + outputs[11] = 0.45704579946446572f*y*(1.0f - 5.0f*z2) ; // sqrt(42)*y*(1 - 5*z2)/(8*sqrt(pi)) + outputs[12] = 0.3731763325901154f*z*(5.0f*z2 - 3.0f) ; // sqrt(7)*z*(5*z2 - 3)/(4*sqrt(pi)) + outputs[13] = 0.45704579946446572f*x*(1.0f - 5.0f*z2) ; // sqrt(42)*x*(1 - 5*z2)/(8*sqrt(pi)) + outputs[14] = 1.4453057213202769f*z*(x2 - y2) ; // sqrt(105)*z*(x2 - y2)/(4*sqrt(pi)) + outputs[15] = 0.59004358992664352f*x*(-x2 + 3.0f*y2) ; // sqrt(70)*x*(-x2 + 3*y2)/(8*sqrt(pi)) + if (C <= 4) { return; } + outputs[16] = 2.5033429417967046f*xy*(x2 - y2) ; // 3*sqrt(35)*xy*(x2 - y2)/(4*sqrt(pi)) + outputs[17] = 1.7701307697799304f*yz*(-3.0f*x2 + y2) ; // 3*sqrt(70)*yz*(-3*x2 + y2)/(8*sqrt(pi)) + outputs[18] = 0.94617469575756008f*xy*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*xy*(7*z2 - 1)/(4*sqrt(pi)) + outputs[19] = 0.66904654355728921f*yz*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*yz*(3 - 7*z2)/(8*sqrt(pi)) + outputs[20] = -3.1735664074561294f*z2 + 3.7024941420321507f*z4 + 0.31735664074561293f ; // 3*(-30*z2 + 35*z4 + 3)/(16*sqrt(pi)) + outputs[21] = 0.66904654355728921f*xz*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*xz*(3 - 7*z2)/(8*sqrt(pi)) + outputs[22] = 0.47308734787878004f*(x2 - y2)*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*(x2 - y2)*(7*z2 - 1)/(8*sqrt(pi)) + outputs[23] = 1.7701307697799304f*xz*(-x2 + 3.0f*y2) ; // 3*sqrt(70)*xz*(-x2 + 3*y2)/(8*sqrt(pi)) + outputs[24] = -3.7550144126950569f*x2*y2 + 0.62583573544917614f*x4 + 0.62583573544917614f*y4 ; // 3*sqrt(35)*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) + if (C <= 5) { return; } + outputs[25] = 0.65638205684017015f*y*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(154)*y*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) + outputs[26] = 8.3026492595241645f*xy*z*(x2 - y2) ; // 3*sqrt(385)*xy*z*(x2 - y2)/(4*sqrt(pi)) + outputs[27] = -0.48923829943525038f*y*(3.0f*x2 - y2)*(9.0f*z2 - 1.0f) ; // -sqrt(770)*y*(3*x2 - y2)*(9*z2 - 1)/(32*sqrt(pi)) + outputs[28] = 4.7935367849733241f*xy*z*(3.0f*z2 - 1.0f) ; // sqrt(1155)*xy*z*(3*z2 - 1)/(4*sqrt(pi)) + outputs[29] = 0.45294665119569694f*y*(14.0f*z2 - 21.0f*z4 - 1.0f) ; // sqrt(165)*y*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) + outputs[30] = 0.1169503224534236f*z*(-70.0f*z2 + 63.0f*z4 + 15.0f) ; // sqrt(11)*z*(-70*z2 + 63*z4 + 15)/(16*sqrt(pi)) + outputs[31] = 0.45294665119569694f*x*(14.0f*z2 - 21.0f*z4 - 1.0f) ; // sqrt(165)*x*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) + outputs[32] = 2.3967683924866621f*z*(x2 - y2)*(3.0f*z2 - 1.0f) ; // sqrt(1155)*z*(x2 - y2)*(3*z2 - 1)/(8*sqrt(pi)) + outputs[33] = -0.48923829943525038f*x*(x2 - 3.0f*y2)*(9.0f*z2 - 1.0f) ; // -sqrt(770)*x*(x2 - 3*y2)*(9*z2 - 1)/(32*sqrt(pi)) + outputs[34] = 2.0756623148810411f*z*(-6.0f*x2*y2 + x4 + y4) ; // 3*sqrt(385)*z*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) + outputs[35] = 0.65638205684017015f*x*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 3*sqrt(154)*x*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) + if (C <= 6) { return; } + outputs[36] = 1.3663682103838286f*xy*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // sqrt(6006)*xy*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) + outputs[37] = 2.3666191622317521f*yz*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(2002)*yz*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) + outputs[38] = 2.0182596029148963f*xy*(x2 - y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*xy*(x2 - y2)*(11*z2 - 1)/(8*sqrt(pi)) + outputs[39] = -0.92120525951492349f*yz*(3.0f*x2 - y2)*(11.0f*z2 - 3.0f) ; // -sqrt(2730)*yz*(3*x2 - y2)*(11*z2 - 3)/(32*sqrt(pi)) + outputs[40] = 0.92120525951492349f*xy*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*xy*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) + outputs[41] = 0.58262136251873131f*yz*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*yz*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) + outputs[42] = 6.6747662381009842f*z2 - 20.024298714302954f*z4 + 14.684485723822165f*z6 - 0.31784601133814211f ; // sqrt(13)*(105*z2 - 315*z4 + 231*z6 - 5)/(32*sqrt(pi)) + outputs[43] = 0.58262136251873131f*xz*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*xz*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) + outputs[44] = 0.46060262975746175f*(x2 - y2)*(11.0f*z2*(3.0f*z2 - 1.0f) - 7.0f*z2 + 1.0f) ; // sqrt(2730)*(x2 - y2)*(11*z2*(3*z2 - 1) - 7*z2 + 1)/(64*sqrt(pi)) + outputs[45] = -0.92120525951492349f*xz*(x2 - 3.0f*y2)*(11.0f*z2 - 3.0f) ; // -sqrt(2730)*xz*(x2 - 3*y2)*(11*z2 - 3)/(32*sqrt(pi)) + outputs[46] = 0.50456490072872406f*(11.0f*z2 - 1.0f)*(-6.0f*x2*y2 + x4 + y4) ; // 3*sqrt(91)*(11*z2 - 1)*(-6*x2*y2 + x4 + y4)/(32*sqrt(pi)) + outputs[47] = 2.3666191622317521f*xz*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 3*sqrt(2002)*xz*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) + outputs[48] = 10.247761577878714f*x2*y4 - 10.247761577878714f*x4*y2 + 0.6831841051919143f*x6 - 0.6831841051919143f*y6 ; // sqrt(6006)*(15*x2*y4 - 15*x4*y2 + x6 - y6)/(64*sqrt(pi)) + if (C <= 7) { return; } + outputs[49] = 0.70716273252459627f*y*(-21.0f*x2*y4 + 35.0f*x4*y2 - 7.0f*x6 + y6) ; // 3*sqrt(715)*y*(-21*x2*y4 + 35*x4*y2 - 7*x6 + y6)/(64*sqrt(pi)) + outputs[50] = 5.2919213236038001f*xy*z*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // 3*sqrt(10010)*xy*z*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) + outputs[51] = -0.51891557872026028f*y*(13.0f*z2 - 1.0f)*(-10.0f*x2*y2 + 5.0f*x4 + y4) ; // -3*sqrt(385)*y*(13*z2 - 1)*(-10*x2*y2 + 5*x4 + y4)/(64*sqrt(pi)) + outputs[52] = 4.1513246297620823f*xy*z*(x2 - y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*xy*z*(x2 - y2)*(13*z2 - 3)/(8*sqrt(pi)) + outputs[53] = -0.15645893386229404f*y*(3.0f*x2 - y2)*(13.0f*z2*(11.0f*z2 - 3.0f) - 27.0f*z2 + 3.0f) ; // -3*sqrt(35)*y*(3*x2 - y2)*(13*z2*(11*z2 - 3) - 27*z2 + 3)/(64*sqrt(pi)) + outputs[54] = 0.44253269244498261f*xy*z*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*xy*z*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) + outputs[55] = 0.090331607582517306f*y*(-135.0f*z2 + 495.0f*z4 - 429.0f*z6 + 5.0f) ; // sqrt(105)*y*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) + outputs[56] = 0.068284276912004949f*z*(315.0f*z2 - 693.0f*z4 + 429.0f*z6 - 35.0f) ; // sqrt(15)*z*(315*z2 - 693*z4 + 429*z6 - 35)/(32*sqrt(pi)) + outputs[57] = 0.090331607582517306f*x*(-135.0f*z2 + 495.0f*z4 - 429.0f*z6 + 5.0f) ; // sqrt(105)*x*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) + outputs[58] = 0.07375544874083044f*z*(x2 - y2)*(143.0f*z2*(3.0f*z2 - 1.0f) - 187.0f*z2 + 45.0f) ; // sqrt(70)*z*(x2 - y2)*(143*z2*(3*z2 - 1) - 187*z2 + 45)/(64*sqrt(pi)) + outputs[59] = -0.15645893386229404f*x*(x2 - 3.0f*y2)*(13.0f*z2*(11.0f*z2 - 3.0f) - 27.0f*z2 + 3.0f) ; // -3*sqrt(35)*x*(x2 - 3*y2)*(13*z2*(11*z2 - 3) - 27*z2 + 3)/(64*sqrt(pi)) + outputs[60] = 1.0378311574405206f*z*(13.0f*z2 - 3.0f)*(-6.0f*x2*y2 + x4 + y4) ; // 3*sqrt(385)*z*(13*z2 - 3)*(-6*x2*y2 + x4 + y4)/(32*sqrt(pi)) + outputs[61] = -0.51891557872026028f*x*(13.0f*z2 - 1.0f)*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // -3*sqrt(385)*x*(13*z2 - 1)*(-10*x2*y2 + x4 + 5*y4)/(64*sqrt(pi)) + outputs[62] = 2.6459606618019f*z*(15.0f*x2*y4 - 15.0f*x4*y2 + x6 - y6) ; // 3*sqrt(10010)*z*(15*x2*y4 - 15*x4*y2 + x6 - y6)/(64*sqrt(pi)) + outputs[63] = 0.70716273252459627f*x*(-35.0f*x2*y4 + 21.0f*x4*y2 - x6 + 7.0f*y6) ; // 3*sqrt(715)*x*(-35*x2*y4 + 21*x4*y2 - x6 + 7*y6)/(64*sqrt(pi)) + }; + + write_sh(); + + if (dy_dx) { + scalar_t *dx = dy_dx + b * D * C2; + scalar_t *dy = dx + C2; + scalar_t *dz = dy + C2; + + auto write_sh_dx = [&]() { + dx[0] = 0.0f ; // 0 + if (C <= 1) { return; } + dx[1] = 0.0f ; // 0 + dx[2] = 0.0f ; // 0 + dx[3] = -0.48860251190291992f ; // -sqrt(3)/(2*sqrt(pi)) + if (C <= 2) { return; } + dx[4] = 1.0925484305920792f*y ; // sqrt(15)*y/(2*sqrt(pi)) + dx[5] = 0.0f ; // 0 + dx[6] = 0.0f ; // 0 + dx[7] = -1.0925484305920792f*z ; // -sqrt(15)*z/(2*sqrt(pi)) + dx[8] = 1.0925484305920792f*x ; // sqrt(15)*x/(2*sqrt(pi)) + if (C <= 3) { return; } + dx[9] = -3.5402615395598609f*xy ; // -3*sqrt(70)*xy/(4*sqrt(pi)) + dx[10] = 2.8906114426405538f*yz ; // sqrt(105)*yz/(2*sqrt(pi)) + dx[11] = 0.0f ; // 0 + dx[12] = 0.0f ; // 0 + dx[13] = 0.45704579946446572f - 2.2852289973223288f*z2 ; // sqrt(42)*(1 - 5*z2)/(8*sqrt(pi)) + dx[14] = 2.8906114426405538f*xz ; // sqrt(105)*xz/(2*sqrt(pi)) + dx[15] = -1.7701307697799304f*x2 + 1.7701307697799304f*y2 ; // 3*sqrt(70)*(-x2 + y2)/(8*sqrt(pi)) + if (C <= 4) { return; } + dx[16] = 2.5033429417967046f*y*(3.0f*x2 - y2) ; // 3*sqrt(35)*y*(3*x2 - y2)/(4*sqrt(pi)) + dx[17] = -10.620784618679583f*xy*z ; // -9*sqrt(70)*xy*z/(4*sqrt(pi)) + dx[18] = 0.94617469575756008f*y*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*y*(7*z2 - 1)/(4*sqrt(pi)) + dx[19] = 0.0f ; // 0 + dx[20] = 0.0f ; // 0 + dx[21] = 0.66904654355728921f*z*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*z*(3 - 7*z2)/(8*sqrt(pi)) + dx[22] = 0.94617469575756008f*x*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*x*(7*z2 - 1)/(4*sqrt(pi)) + dx[23] = 5.3103923093397913f*z*(-x2 + y2) ; // 9*sqrt(70)*z*(-x2 + y2)/(8*sqrt(pi)) + dx[24] = 2.5033429417967046f*x*(x2 - 3.0f*y2) ; // 3*sqrt(35)*x*(x2 - 3*y2)/(4*sqrt(pi)) + if (C <= 5) { return; } + dx[25] = 13.127641136803401f*xy*(-x2 + y2) ; // 15*sqrt(154)*xy*(-x2 + y2)/(8*sqrt(pi)) + dx[26] = 8.3026492595241645f*yz*(3.0f*x2 - y2) ; // 3*sqrt(385)*yz*(3*x2 - y2)/(4*sqrt(pi)) + dx[27] = 2.9354297966115022f*xy*(1.0f - 9.0f*z2) ; // 3*sqrt(770)*xy*(1 - 9*z2)/(16*sqrt(pi)) + dx[28] = 4.7935367849733241f*yz*(3.0f*z2 - 1.0f) ; // sqrt(1155)*yz*(3*z2 - 1)/(4*sqrt(pi)) + dx[29] = 0.0f ; // 0 + dx[30] = 0.0f ; // 0 + dx[31] = 6.3412531167397574f*z2 - 9.5118796751096362f*z4 - 0.45294665119569694f ; // sqrt(165)*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) + dx[32] = 4.7935367849733241f*xz*(3.0f*z2 - 1.0f) ; // sqrt(1155)*xz*(3*z2 - 1)/(4*sqrt(pi)) + dx[33] = -13.209434084751759f*x2*z2 + 1.4677148983057511f*x2 + 13.209434084751759f*y2*z2 - 1.4677148983057511f*y2 ; // 3*sqrt(770)*(-9*x2*z2 + x2 + 9*y2*z2 - y2)/(32*sqrt(pi)) + dx[34] = 8.3026492595241645f*xz*(x2 - 3.0f*y2) ; // 3*sqrt(385)*xz*(x2 - 3*y2)/(4*sqrt(pi)) + dx[35] = 19.6914617052051f*x2*y2 - 3.2819102842008503f*x4 - 3.2819102842008503f*y4 ; // 15*sqrt(154)*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) + if (C <= 6) { return; } + dx[36] = 4.0991046311514854f*y*(-10.0f*x2*y2 + 5.0f*x4 + y4) ; // 3*sqrt(6006)*y*(-10*x2*y2 + 5*x4 + y4)/(32*sqrt(pi)) + dx[37] = 47.332383244635047f*xy*z*(-x2 + y2) ; // 15*sqrt(2002)*xy*z*(-x2 + y2)/(8*sqrt(pi)) + dx[38] = 2.0182596029148963f*y*(3.0f*x2 - y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*y*(3*x2 - y2)*(11*z2 - 1)/(8*sqrt(pi)) + dx[39] = 5.5272315570895412f*xy*z*(3.0f - 11.0f*z2) ; // 3*sqrt(2730)*xy*z*(3 - 11*z2)/(16*sqrt(pi)) + dx[40] = 0.92120525951492349f*y*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*y*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) + dx[41] = 0.0f ; // 0 + dx[42] = 0.0f ; // 0 + dx[43] = 0.58262136251873131f*z*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*z*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) + dx[44] = 0.92120525951492349f*x*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*x*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) + dx[45] = -2.7636157785447706f*z*(x2 - y2)*(11.0f*z2 - 3.0f) ; // -3*sqrt(2730)*z*(x2 - y2)*(11*z2 - 3)/(32*sqrt(pi)) + dx[46] = 2.0182596029148963f*x*(x2 - 3.0f*y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*x*(x2 - 3*y2)*(11*z2 - 1)/(8*sqrt(pi)) + dx[47] = 11.833095811158762f*z*(6.0f*x2*y2 - x4 - y4) ; // 15*sqrt(2002)*z*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) + dx[48] = 4.0991046311514854f*x*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 3*sqrt(6006)*x*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) + if (C <= 7) { return; } + dx[49] = 9.9002782553443485f*xy*(10.0f*x2*y2 - 3.0f*x4 - 3.0f*y4) ; // 21*sqrt(715)*xy*(10*x2*y2 - 3*x4 - 3*y4)/(32*sqrt(pi)) + dx[50] = 15.875763970811402f*yz*(-10.0f*x2*y2 + 5.0f*x4 + y4) ; // 9*sqrt(10010)*yz*(-10*x2*y2 + 5*x4 + y4)/(32*sqrt(pi)) + dx[51] = -10.378311574405206f*xy*(x2 - y2)*(13.0f*z2 - 1.0f) ; // -15*sqrt(385)*xy*(x2 - y2)*(13*z2 - 1)/(16*sqrt(pi)) + dx[52] = 4.1513246297620823f*yz*(3.0f*x2 - y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*yz*(3*x2 - y2)*(13*z2 - 3)/(8*sqrt(pi)) + dx[53] = 0.93875360317376422f*xy*(66.0f*z2 - 143.0f*z4 - 3.0f) ; // 9*sqrt(35)*xy*(66*z2 - 143*z4 - 3)/(32*sqrt(pi)) + dx[54] = 0.44253269244498261f*yz*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*yz*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) + dx[55] = 0.0f ; // 0 + dx[56] = 0.0f ; // 0 + dx[57] = -12.194767023639836f*z2 + 44.714145753346067f*z4 - 38.752259652899923f*z6 + 0.45165803791258652f ; // sqrt(105)*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) + dx[58] = 0.44253269244498261f*xz*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*xz*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) + dx[59] = 30.97886890473422f*x2*z2 - 67.120882626924143f*x2*z4 - 1.4081304047606462f*x2 - 30.97886890473422f*y2*z2 + 67.120882626924143f*y2*z4 + 1.4081304047606462f*y2 ; // 9*sqrt(35)*(66*x2*z2 - 143*x2*z4 - 3*x2 - 66*y2*z2 + 143*y2*z4 + 3*y2)/(64*sqrt(pi)) + dx[60] = 4.1513246297620823f*xz*(x2 - 3.0f*y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*xz*(x2 - 3*y2)*(13*z2 - 3)/(8*sqrt(pi)) + dx[61] = -0.51891557872026028f*(13.0f*z2 - 1.0f)*(-10.0f*x2*y2 + 4.0f*x2*(x2 - 5.0f*y2) + x4 + 5.0f*y4) ; // -3*sqrt(385)*(13*z2 - 1)*(-10*x2*y2 + 4*x2*(x2 - 5*y2) + x4 + 5*y4)/(64*sqrt(pi)) + dx[62] = 15.875763970811402f*xz*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 9*sqrt(10010)*xz*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) + dx[63] = -74.252086915082614f*x2*y4 + 74.252086915082614f*x4*y2 - 4.9501391276721742f*x6 + 4.9501391276721742f*y6 ; // 21*sqrt(715)*(-15*x2*y4 + 15*x4*y2 - x6 + y6)/(64*sqrt(pi)) + }; + + auto write_sh_dy = [&]() { + dy[0] = 0.0f ; // 0 + if (C <= 1) { return; } + dy[1] = -0.48860251190291992f ; // -sqrt(3)/(2*sqrt(pi)) + dy[2] = 0.0f ; // 0 + dy[3] = 0.0f ; // 0 + if (C <= 2) { return; } + dy[4] = 1.0925484305920792f*x ; // sqrt(15)*x/(2*sqrt(pi)) + dy[5] = -1.0925484305920792f*z ; // -sqrt(15)*z/(2*sqrt(pi)) + dy[6] = 0.0f ; // 0 + dy[7] = 0.0f ; // 0 + dy[8] = -1.0925484305920792f*y ; // -sqrt(15)*y/(2*sqrt(pi)) + if (C <= 3) { return; } + dy[9] = -1.7701307697799304f*x2 + 1.7701307697799304f*y2 ; // 3*sqrt(70)*(-x2 + y2)/(8*sqrt(pi)) + dy[10] = 2.8906114426405538f*xz ; // sqrt(105)*xz/(2*sqrt(pi)) + dy[11] = 0.45704579946446572f - 2.2852289973223288f*z2 ; // sqrt(42)*(1 - 5*z2)/(8*sqrt(pi)) + dy[12] = 0.0f ; // 0 + dy[13] = 0.0f ; // 0 + dy[14] = -2.8906114426405538f*yz ; // -sqrt(105)*yz/(2*sqrt(pi)) + dy[15] = 3.5402615395598609f*xy ; // 3*sqrt(70)*xy/(4*sqrt(pi)) + if (C <= 4) { return; } + dy[16] = 2.5033429417967046f*x*(x2 - 3.0f*y2) ; // 3*sqrt(35)*x*(x2 - 3*y2)/(4*sqrt(pi)) + dy[17] = 5.3103923093397913f*z*(-x2 + y2) ; // 9*sqrt(70)*z*(-x2 + y2)/(8*sqrt(pi)) + dy[18] = 0.94617469575756008f*x*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*x*(7*z2 - 1)/(4*sqrt(pi)) + dy[19] = 0.66904654355728921f*z*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*z*(3 - 7*z2)/(8*sqrt(pi)) + dy[20] = 0.0f ; // 0 + dy[21] = 0.0f ; // 0 + dy[22] = 0.94617469575756008f*y*(1.0f - 7.0f*z2) ; // 3*sqrt(5)*y*(1 - 7*z2)/(4*sqrt(pi)) + dy[23] = 10.620784618679583f*xy*z ; // 9*sqrt(70)*xy*z/(4*sqrt(pi)) + dy[24] = 2.5033429417967046f*y*(-3.0f*x2 + y2) ; // 3*sqrt(35)*y*(-3*x2 + y2)/(4*sqrt(pi)) + if (C <= 5) { return; } + dy[25] = 19.6914617052051f*x2*y2 - 3.2819102842008503f*x4 - 3.2819102842008503f*y4 ; // 15*sqrt(154)*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) + dy[26] = 8.3026492595241645f*xz*(x2 - 3.0f*y2) ; // 3*sqrt(385)*xz*(x2 - 3*y2)/(4*sqrt(pi)) + dy[27] = -1.4677148983057511f*(x2 - y2)*(9.0f*z2 - 1.0f) ; // -3*sqrt(770)*(x2 - y2)*(9*z2 - 1)/(32*sqrt(pi)) + dy[28] = 4.7935367849733241f*xz*(3.0f*z2 - 1.0f) ; // sqrt(1155)*xz*(3*z2 - 1)/(4*sqrt(pi)) + dy[29] = 6.3412531167397574f*z2 - 9.5118796751096362f*z4 - 0.45294665119569694f ; // sqrt(165)*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) + dy[30] = 0.0f ; // 0 + dy[31] = 0.0f ; // 0 + dy[32] = 4.7935367849733241f*yz*(1.0f - 3.0f*z2) ; // sqrt(1155)*yz*(1 - 3*z2)/(4*sqrt(pi)) + dy[33] = 2.9354297966115022f*xy*(9.0f*z2 - 1.0f) ; // 3*sqrt(770)*xy*(9*z2 - 1)/(16*sqrt(pi)) + dy[34] = 8.3026492595241645f*yz*(-3.0f*x2 + y2) ; // 3*sqrt(385)*yz*(-3*x2 + y2)/(4*sqrt(pi)) + dy[35] = 13.127641136803401f*xy*(x2 - y2) ; // 15*sqrt(154)*xy*(x2 - y2)/(8*sqrt(pi)) + if (C <= 6) { return; } + dy[36] = 4.0991046311514854f*x*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 3*sqrt(6006)*x*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) + dy[37] = 11.833095811158762f*z*(6.0f*x2*y2 - x4 - y4) ; // 15*sqrt(2002)*z*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) + dy[38] = 2.0182596029148963f*x*(x2 - 3.0f*y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*x*(x2 - 3*y2)*(11*z2 - 1)/(8*sqrt(pi)) + dy[39] = -2.7636157785447706f*z*(x2 - y2)*(11.0f*z2 - 3.0f) ; // -3*sqrt(2730)*z*(x2 - y2)*(11*z2 - 3)/(32*sqrt(pi)) + dy[40] = 0.92120525951492349f*x*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*x*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) + dy[41] = 0.58262136251873131f*z*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*z*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) + dy[42] = 0.0f ; // 0 + dy[43] = 0.0f ; // 0 + dy[44] = 0.92120525951492349f*y*(18.0f*z2 - 33.0f*z4 - 1.0f) ; // sqrt(2730)*y*(18*z2 - 33*z4 - 1)/(32*sqrt(pi)) + dy[45] = 5.5272315570895412f*xy*z*(11.0f*z2 - 3.0f) ; // 3*sqrt(2730)*xy*z*(11*z2 - 3)/(16*sqrt(pi)) + dy[46] = -2.0182596029148963f*y*(3.0f*x2 - y2)*(11.0f*z2 - 1.0f) ; // -3*sqrt(91)*y*(3*x2 - y2)*(11*z2 - 1)/(8*sqrt(pi)) + dy[47] = 47.332383244635047f*xy*z*(x2 - y2) ; // 15*sqrt(2002)*xy*z*(x2 - y2)/(8*sqrt(pi)) + dy[48] = 4.0991046311514854f*y*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(6006)*y*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) + if (C <= 7) { return; } + dy[49] = -74.252086915082614f*x2*y4 + 74.252086915082614f*x4*y2 - 4.9501391276721742f*x6 + 4.9501391276721742f*y6 ; // 21*sqrt(715)*(-15*x2*y4 + 15*x4*y2 - x6 + y6)/(64*sqrt(pi)) + dy[50] = 15.875763970811402f*xz*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 9*sqrt(10010)*xz*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) + dy[51] = 0.51891557872026028f*(13.0f*z2 - 1.0f)*(10.0f*x2*y2 - 5.0f*x4 + 4.0f*y2*(5.0f*x2 - y2) - y4) ; // 3*sqrt(385)*(13*z2 - 1)*(10*x2*y2 - 5*x4 + 4*y2*(5*x2 - y2) - y4)/(64*sqrt(pi)) + dy[52] = 4.1513246297620823f*xz*(x2 - 3.0f*y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*xz*(x2 - 3*y2)*(13*z2 - 3)/(8*sqrt(pi)) + dy[53] = -0.46937680158688211f*(x2 - y2)*(13.0f*z2*(11.0f*z2 - 3.0f) - 27.0f*z2 + 3.0f) ; // -9*sqrt(35)*(x2 - y2)*(13*z2*(11*z2 - 3) - 27*z2 + 3)/(64*sqrt(pi)) + dy[54] = 0.44253269244498261f*xz*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*xz*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) + dy[55] = -12.194767023639836f*z2 + 44.714145753346067f*z4 - 38.752259652899923f*z6 + 0.45165803791258652f ; // sqrt(105)*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) + dy[56] = 0.0f ; // 0 + dy[57] = 0.0f ; // 0 + dy[58] = 0.44253269244498261f*yz*(110.0f*z2 - 143.0f*z4 - 15.0f) ; // 3*sqrt(70)*yz*(110*z2 - 143*z4 - 15)/(32*sqrt(pi)) + dy[59] = 0.93875360317376422f*xy*(-66.0f*z2 + 143.0f*z4 + 3.0f) ; // 9*sqrt(35)*xy*(-66*z2 + 143*z4 + 3)/(32*sqrt(pi)) + dy[60] = -4.1513246297620823f*yz*(3.0f*x2 - y2)*(13.0f*z2 - 3.0f) ; // -3*sqrt(385)*yz*(3*x2 - y2)*(13*z2 - 3)/(8*sqrt(pi)) + dy[61] = 10.378311574405206f*xy*(x2 - y2)*(13.0f*z2 - 1.0f) ; // 15*sqrt(385)*xy*(x2 - y2)*(13*z2 - 1)/(16*sqrt(pi)) + dy[62] = 15.875763970811402f*yz*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 9*sqrt(10010)*yz*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) + dy[63] = 9.9002782553443485f*xy*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // 21*sqrt(715)*xy*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) + }; + + auto write_sh_dz = [&]() { + dz[0] = 0.0f ; // 0 + if (C <= 1) { return; } + dz[1] = 0.0f ; // 0 + dz[2] = 0.48860251190291992f ; // sqrt(3)/(2*sqrt(pi)) + dz[3] = 0.0f ; // 0 + if (C <= 2) { return; } + dz[4] = 0.0f ; // 0 + dz[5] = -1.0925484305920792f*y ; // -sqrt(15)*y/(2*sqrt(pi)) + dz[6] = 1.8923493915151202f*z ; // 3*sqrt(5)*z/(2*sqrt(pi)) + dz[7] = -1.0925484305920792f*x ; // -sqrt(15)*x/(2*sqrt(pi)) + dz[8] = 0.0f ; // 0 + if (C <= 3) { return; } + dz[9] = 0.0f ; // 0 + dz[10] = 2.8906114426405538f*xy ; // sqrt(105)*xy/(2*sqrt(pi)) + dz[11] = -4.5704579946446566f*yz ; // -5*sqrt(42)*yz/(4*sqrt(pi)) + dz[12] = 5.597644988851731f*z2 - 1.1195289977703462f ; // 3*sqrt(7)*(5*z2 - 1)/(4*sqrt(pi)) + dz[13] = -4.5704579946446566f*xz ; // -5*sqrt(42)*xz/(4*sqrt(pi)) + dz[14] = 1.4453057213202769f*x2 - 1.4453057213202769f*y2 ; // sqrt(105)*(x2 - y2)/(4*sqrt(pi)) + dz[15] = 0.0f ; // 0 + if (C <= 4) { return; } + dz[16] = 0.0f ; // 0 + dz[17] = 1.7701307697799304f*y*(-3.0f*x2 + y2) ; // 3*sqrt(70)*y*(-3*x2 + y2)/(8*sqrt(pi)) + dz[18] = 13.246445740605839f*xy*z ; // 21*sqrt(5)*xy*z/(2*sqrt(pi)) + dz[19] = 2.0071396306718676f*y*(1.0f - 7.0f*z2) ; // 9*sqrt(10)*y*(1 - 7*z2)/(8*sqrt(pi)) + dz[20] = 14.809976568128603f*pow(z, 3) - 6.3471328149122579f*z ; // (105*z**3 - 45*z)/(4*sqrt(pi)) + dz[21] = 2.0071396306718676f*x*(1.0f - 7.0f*z2) ; // 9*sqrt(10)*x*(1 - 7*z2)/(8*sqrt(pi)) + dz[22] = 6.6232228703029197f*z*(x2 - y2) ; // 21*sqrt(5)*z*(x2 - y2)/(4*sqrt(pi)) + dz[23] = 1.7701307697799304f*x*(-x2 + 3.0f*y2) ; // 3*sqrt(70)*x*(-x2 + 3*y2)/(8*sqrt(pi)) + dz[24] = 0.0f ; // 0 + if (C <= 5) { return; } + dz[25] = 0.0f ; // 0 + dz[26] = 8.3026492595241645f*xy*(x2 - y2) ; // 3*sqrt(385)*xy*(x2 - y2)/(4*sqrt(pi)) + dz[27] = 8.8062893898345074f*yz*(-3.0f*x2 + y2) ; // 9*sqrt(770)*yz*(-3*x2 + y2)/(16*sqrt(pi)) + dz[28] = 4.7935367849733241f*xy*(9.0f*z2 - 1.0f) ; // sqrt(1155)*xy*(9*z2 - 1)/(4*sqrt(pi)) + dz[29] = 12.682506233479513f*yz*(1.0f - 3.0f*z2) ; // 7*sqrt(165)*yz*(1 - 3*z2)/(4*sqrt(pi)) + dz[30] = -24.559567715218954f*z2 + 36.839351572828434f*z4 + 1.754254836801354f ; // 15*sqrt(11)*(-14*z2 + 21*z4 + 1)/(16*sqrt(pi)) + dz[31] = 12.682506233479513f*xz*(1.0f - 3.0f*z2) ; // 7*sqrt(165)*xz*(1 - 3*z2)/(4*sqrt(pi)) + dz[32] = 2.3967683924866621f*(x2 - y2)*(9.0f*z2 - 1.0f) ; // sqrt(1155)*(x2 - y2)*(9*z2 - 1)/(8*sqrt(pi)) + dz[33] = 8.8062893898345074f*xz*(-x2 + 3.0f*y2) ; // 9*sqrt(770)*xz*(-x2 + 3*y2)/(16*sqrt(pi)) + dz[34] = -12.453973889286246f*x2*y2 + 2.0756623148810411f*x4 + 2.0756623148810411f*y4 ; // 3*sqrt(385)*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) + dz[35] = 0.0f ; // 0 + if (C <= 6) { return; } + dz[36] = 0.0f ; // 0 + dz[37] = 2.3666191622317521f*y*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(2002)*y*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) + dz[38] = 44.401711264127719f*xy*z*(x2 - y2) ; // 33*sqrt(91)*xy*z*(x2 - y2)/(4*sqrt(pi)) + dz[39] = -2.7636157785447706f*y*(3.0f*x2 - y2)*(11.0f*z2 - 1.0f) ; // -3*sqrt(2730)*y*(3*x2 - y2)*(11*z2 - 1)/(32*sqrt(pi)) + dz[40] = 11.054463114179082f*xy*z*(11.0f*z2 - 3.0f) ; // 3*sqrt(2730)*xy*z*(11*z2 - 3)/(8*sqrt(pi)) + dz[41] = 2.9131068125936568f*y*(18.0f*z2 - 33.0f*z4 - 1.0f) ; // 5*sqrt(273)*y*(18*z2 - 33*z4 - 1)/(16*sqrt(pi)) + dz[42] = 2.6699064952403937f*z*(-30.0f*z2 + 33.0f*z4 + 5.0f) ; // 21*sqrt(13)*z*(-30*z2 + 33*z4 + 5)/(16*sqrt(pi)) + dz[43] = 2.9131068125936568f*x*(18.0f*z2 - 33.0f*z4 - 1.0f) ; // 5*sqrt(273)*x*(18*z2 - 33*z4 - 1)/(16*sqrt(pi)) + dz[44] = 5.5272315570895412f*z*(x2 - y2)*(11.0f*z2 - 3.0f) ; // 3*sqrt(2730)*z*(x2 - y2)*(11*z2 - 3)/(16*sqrt(pi)) + dz[45] = -2.7636157785447706f*x*(x2 - 3.0f*y2)*(11.0f*z2 - 1.0f) ; // -3*sqrt(2730)*x*(x2 - 3*y2)*(11*z2 - 1)/(32*sqrt(pi)) + dz[46] = 11.10042781603193f*z*(-6.0f*x2*y2 + x4 + y4) ; // 33*sqrt(91)*z*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) + dz[47] = 2.3666191622317521f*x*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 3*sqrt(2002)*x*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) + dz[48] = 0.0f ; // 0 + if (C <= 7) { return; } + dz[49] = 0.0f ; // 0 + dz[50] = 5.2919213236038001f*xy*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // 3*sqrt(10010)*xy*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) + dz[51] = 13.491805046726766f*yz*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 39*sqrt(385)*yz*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) + dz[52] = 12.453973889286248f*xy*(x2 - y2)*(13.0f*z2 - 1.0f) ; // 9*sqrt(385)*xy*(x2 - y2)*(13*z2 - 1)/(8*sqrt(pi)) + dz[53] = -6.8841930899409371f*yz*(3.0f*x2 - y2)*(13.0f*z2 - 3.0f) ; // -33*sqrt(35)*yz*(3*x2 - y2)*(13*z2 - 3)/(16*sqrt(pi)) + dz[54] = 2.2126634622249131f*xy*(-66.0f*z2 + 143.0f*z4 + 3.0f) ; // 15*sqrt(70)*xy*(-66*z2 + 143*z4 + 3)/(32*sqrt(pi)) + dz[55] = 1.6259689364853116f*yz*(110.0f*z2 - 143.0f*z4 - 15.0f) ; // 9*sqrt(105)*yz*(110*z2 - 143*z4 - 15)/(32*sqrt(pi)) + dz[56] = 64.528641681844675f*z2 - 236.60501950009714f*z4 + 205.05768356675085f*z6 - 2.3899496919201733f ; // 7*sqrt(15)*(135*z2 - 495*z4 + 429*z6 - 5)/(32*sqrt(pi)) + dz[57] = 1.6259689364853116f*xz*(110.0f*z2 - 143.0f*z4 - 15.0f) ; // 9*sqrt(105)*xz*(110*z2 - 143*z4 - 15)/(32*sqrt(pi)) + dz[58] = 0.07375544874083044f*(x2 - y2)*(143.0f*z2*(3.0f*z2 - 1.0f) + 132.0f*z2*(13.0f*z2 - 5.0f) - 187.0f*z2 + 45.0f) ; // sqrt(70)*(x2 - y2)*(143*z2*(3*z2 - 1) + 132*z2*(13*z2 - 5) - 187*z2 + 45)/(64*sqrt(pi)) + dz[59] = -6.8841930899409371f*xz*(x2 - 3.0f*y2)*(13.0f*z2 - 3.0f) ; // -33*sqrt(35)*xz*(x2 - 3*y2)*(13*z2 - 3)/(16*sqrt(pi)) + dz[60] = 3.1134934723215619f*(13.0f*z2 - 1.0f)*(-6.0f*x2*y2 + x4 + y4) ; // 9*sqrt(385)*(13*z2 - 1)*(-6*x2*y2 + x4 + y4)/(32*sqrt(pi)) + dz[61] = 13.491805046726766f*xz*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 39*sqrt(385)*xz*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) + dz[62] = 39.6894099270285f*x2*y4 - 39.6894099270285f*x4*y2 + 2.6459606618019f*x6 - 2.6459606618019f*y6 ; // 3*sqrt(10010)*(15*x2*y4 - 15*x4*y2 + x6 - y6)/(64*sqrt(pi)) + dz[63] = 0.0f ; // 0 + }; + write_sh_dx(); + write_sh_dy(); + write_sh_dz(); + } +} + + +template +__global__ void kernel_sh_backward( + const scalar_t * __restrict__ grad, + const scalar_t * __restrict__ inputs, + uint32_t B, uint32_t D, uint32_t C, + const scalar_t * __restrict__ dy_dx, + scalar_t * grad_inputs +) { + const uint32_t t = threadIdx.x + blockIdx.x * blockDim.x; + const uint32_t b = t / D; + if (b >= B) return; + + const uint32_t d = t - b * D; + const uint32_t C2 = C * C; + + // locate + grad += b * C2; + dy_dx += b * D * C2 + d * C2; + + for (int ch = 0; ch < C2; ch++) { + grad_inputs[t] += grad[ch] * dy_dx[ch]; + //printf("t=%d, b=%d, d=%d, ch=%d, grad=%f (+= %f * %f)\n", t, b, d, ch, grad_inputs[t], grad[ch], dy_dx[ch]); + } + +} + +// inputs: [B, D], float, in [0, 1] +// outputs: [B, L * C], float +template +void sh_encode_forward_cuda(const scalar_t *inputs, scalar_t *outputs, const uint32_t B, const uint32_t D, const uint32_t C, scalar_t *dy_dx) { + static constexpr uint32_t N_THREADS = 256; + kernel_sh<<>>(inputs, outputs, B, D, C, dy_dx); +} + + +template +void sh_encode_backward_cuda(const scalar_t *grad, const scalar_t *inputs, const uint32_t B, const uint32_t D, const uint32_t C, scalar_t *dy_dx, scalar_t *grad_inputs) { + static constexpr uint32_t N_THREADS = 256; + kernel_sh_backward<<>>(grad, inputs, B, D, C, dy_dx, grad_inputs); +} + + +void sh_encode_forward(at::Tensor inputs, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t C, at::optional dy_dx) { + CHECK_CUDA(inputs); + CHECK_CUDA(outputs); + // CHECK_CUDA(dy_dx); + + CHECK_CONTIGUOUS(inputs); + CHECK_CONTIGUOUS(outputs); + // CHECK_CONTIGUOUS(dy_dx); + + CHECK_IS_FLOATING(inputs); + CHECK_IS_FLOATING(outputs); + // CHECK_IS_FLOATING(dy_dx); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + inputs.scalar_type(), "sh_encode_forward_cuda", ([&] { + sh_encode_forward_cuda(inputs.data_ptr(), outputs.data_ptr(), B, D, C, dy_dx.has_value() ? dy_dx.value().data_ptr() : nullptr); + })); +} + +void sh_encode_backward(at::Tensor grad, at::Tensor inputs, const uint32_t B, const uint32_t D, const uint32_t C, at::Tensor dy_dx, at::Tensor grad_inputs) { + CHECK_CUDA(grad); + CHECK_CUDA(inputs); + CHECK_CUDA(dy_dx); + CHECK_CUDA(grad_inputs); + + CHECK_CONTIGUOUS(grad); + CHECK_CONTIGUOUS(inputs); + CHECK_CONTIGUOUS(dy_dx); + CHECK_CONTIGUOUS(grad_inputs); + + CHECK_IS_FLOATING(grad); + CHECK_IS_FLOATING(inputs); + CHECK_IS_FLOATING(dy_dx); + CHECK_IS_FLOATING(grad_inputs); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "sh_encode_backward_cuda", ([&] { + sh_encode_backward_cuda(grad.data_ptr(), inputs.data_ptr(), B, D, C, dy_dx.data_ptr(), grad_inputs.data_ptr()); + })); +} \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/shencoder/src/shencoder.h b/stable-dreamfusion-3DPortrait/shencoder/src/shencoder.h new file mode 100644 index 0000000..f9e89fa --- /dev/null +++ b/stable-dreamfusion-3DPortrait/shencoder/src/shencoder.h @@ -0,0 +1,10 @@ +# pragma once + +#include +#include + +// inputs: [B, D], float, in [-1, 1] +// outputs: [B, F], float + +void sh_encode_forward(at::Tensor inputs, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t C, at::optional dy_dx); +void sh_encode_backward(at::Tensor grad, at::Tensor inputs, const uint32_t B, const uint32_t D, const uint32_t C, at::Tensor dy_dx, at::Tensor grad_inputs); \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/taichi_modules/__init__.py b/stable-dreamfusion-3DPortrait/taichi_modules/__init__.py new file mode 100644 index 0000000..3270636 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/taichi_modules/__init__.py @@ -0,0 +1,5 @@ +from .ray_march import RayMarcherTaichi, raymarching_test +from .volume_train import VolumeRendererTaichi +from .intersection import RayAABBIntersector +from .volume_render_test import composite_test +from .utils import packbits \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/taichi_modules/hash_encoder.py b/stable-dreamfusion-3DPortrait/taichi_modules/hash_encoder.py new file mode 100644 index 0000000..9a1b7a7 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/taichi_modules/hash_encoder.py @@ -0,0 +1,305 @@ +import numpy as np +import taichi as ti +import torch +from taichi.math import uvec3 +from torch.cuda.amp import custom_bwd, custom_fwd + +from .utils import (data_type, ti2torch, ti2torch_grad, ti2torch_grad_vec, + ti2torch_vec, torch2ti, torch2ti_grad, torch2ti_grad_vec, + torch2ti_vec, torch_type) + +half2 = ti.types.vector(n=2, dtype=ti.f16) + + +@ti.kernel +def random_initialize(data: ti.types.ndarray()): + for I in ti.grouped(data): + data[I] = (ti.random() * 2.0 - 1.0) * 1e-4 + + +@ti.kernel +def ti_copy(data1: ti.template(), data2: ti.template()): + for I in ti.grouped(data1): + data1[I] = data2[I] + + +@ti.kernel +def ti_copy_array(data1: ti.types.ndarray(), data2: ti.types.ndarray()): + for I in ti.grouped(data1): + data1[I] = data2[I] + + +@ti.kernel +def ti_copy_field_array(data1: ti.template(), data2: ti.types.ndarray()): + for I in ti.grouped(data1): + data1[I] = data2[I] + + +@ti.func +def fast_hash(pos_grid_local): + result = ti.uint32(0) + # primes = uvec3(ti.uint32(1), ti.uint32(1958374283), ti.uint32(2654435761)) + primes = uvec3(ti.uint32(1), ti.uint32(2654435761), ti.uint32(805459861)) + for i in ti.static(range(3)): + result ^= ti.uint32(pos_grid_local[i]) * primes[i] + return result + + +@ti.func +def under_hash(pos_grid_local, resolution): + result = ti.uint32(0) + stride = ti.uint32(1) + for i in ti.static(range(3)): + result += ti.uint32(pos_grid_local[i] * stride) + stride *= resolution + return result + + +@ti.func +def grid_pos2hash_index(indicator, pos_grid_local, resolution, map_size): + hash_result = ti.uint32(0) + if indicator == 1: + hash_result = under_hash(pos_grid_local, resolution) + else: + hash_result = fast_hash(pos_grid_local) + + return hash_result % map_size + + +@ti.kernel +def hash_encode_kernel( + xyzs: ti.template(), table: ti.template(), + xyzs_embedding: ti.template(), hash_map_indicator: ti.template(), + hash_map_sizes_field: ti.template(), offsets: ti.template(), B: ti.i32, + per_level_scale: ti.f32): + + # get hash table embedding + ti.loop_config(block_dim=16) + for i, level in ti.ndrange(B, 16): + xyz = ti.Vector([xyzs[i, 0], xyzs[i, 1], xyzs[i, 2]]) + + scale = 16 * ti.exp(level * ti.log(per_level_scale)) - 1.0 + resolution = ti.cast(ti.ceil(scale), ti.uint32) + 1 + + offset = offsets[level] * 2 + + pos = xyz * scale + 0.5 + pos_grid_uint = ti.cast(ti.floor(pos), ti.uint32) + pos -= pos_grid_uint + + indicator = hash_map_indicator[level] + map_size = hash_map_sizes_field[level] + + local_feature_0 = 0.0 + local_feature_1 = 0.0 + + for idx in ti.static(range(8)): + w = 1. + pos_grid_local = uvec3(0) + + for d in ti.static(range(3)): + if (idx & (1 << d)) == 0: + pos_grid_local[d] = pos_grid_uint[d] + w *= 1 - pos[d] + else: + pos_grid_local[d] = pos_grid_uint[d] + 1 + w *= pos[d] + + index = grid_pos2hash_index(indicator, pos_grid_local, resolution, + map_size) + index_table = offset + index * 2 + index_table_int = ti.cast(index_table, ti.int32) + local_feature_0 += w * table[index_table_int] + local_feature_1 += w * table[index_table_int + 1] + + xyzs_embedding[i, level * 2] = local_feature_0 + xyzs_embedding[i, level * 2 + 1] = local_feature_1 + + +@ti.kernel +def hash_encode_kernel_half2( + xyzs: ti.template(), table: ti.template(), + xyzs_embedding: ti.template(), hash_map_indicator: ti.template(), + hash_map_sizes_field: ti.template(), offsets: ti.template(), B: ti.i32, + per_level_scale: ti.f16): + + # get hash table embedding + ti.loop_config(block_dim=32) + for i, level in ti.ndrange(B, 16): + xyz = ti.Vector([xyzs[i, 0], xyzs[i, 1], xyzs[i, 2]]) + + scale = 16 * ti.exp(level * ti.log(per_level_scale)) - 1.0 + resolution = ti.cast(ti.ceil(scale), ti.uint32) + 1 + + offset = offsets[level] + + pos = xyz * scale + 0.5 + pos_grid_uint = ti.cast(ti.floor(pos), ti.uint32) + pos -= pos_grid_uint + + indicator = hash_map_indicator[level] + map_size = hash_map_sizes_field[level] + + local_feature = half2(0.0) + for idx in ti.static(range(8)): + w = ti.f32(1.0) + pos_grid_local = uvec3(0) + + for d in ti.static(range(3)): + if (idx & (1 << d)) == 0: + pos_grid_local[d] = pos_grid_uint[d] + w *= 1 - pos[d] + else: + pos_grid_local[d] = pos_grid_uint[d] + 1 + w *= pos[d] + + index = grid_pos2hash_index(indicator, pos_grid_local, resolution, + map_size) + + index_table = offset + index + index_table_int = ti.cast(index_table, ti.int32) + + local_feature += w * table[index_table_int] + xyzs_embedding[i, level] = local_feature + + +class HashEncoderTaichi(torch.nn.Module): + + def __init__(self, + b=1.3195079565048218, + batch_size=8192, + data_type=data_type, + half2_opt=False): + super(HashEncoderTaichi, self).__init__() + + self.per_level_scale = b + if batch_size < 2048: + batch_size = 2048 + + # per_level_scale = 1.3195079565048218 + print("per_level_scale: ", b) + self.offsets = ti.field(ti.i32, shape=(16, )) + self.hash_map_sizes_field = ti.field(ti.uint32, shape=(16, )) + self.hash_map_indicator = ti.field(ti.i32, shape=(16, )) + base_res = 16 + max_params = 2**19 + offset_ = 0 + hash_map_sizes = [] + for i in range(16): + resolution = int( + np.ceil(base_res * np.exp(i * np.log(self.per_level_scale)) - + 1.0)) + 1 + params_in_level = resolution**3 + params_in_level = int(resolution** + 3) if params_in_level % 8 == 0 else int( + (params_in_level + 8 - 1) / 8) * 8 + params_in_level = min(max_params, params_in_level) + self.offsets[i] = offset_ + hash_map_sizes.append(params_in_level) + self.hash_map_indicator[ + i] = 1 if resolution**3 <= params_in_level else 0 + offset_ += params_in_level + print("offset_: ", offset_) + size = np.uint32(np.array(hash_map_sizes)) + self.hash_map_sizes_field.from_numpy(size) + + self.total_hash_size = offset_ * 2 + print("total_hash_size: ", self.total_hash_size) + + self.hash_table = torch.nn.Parameter(torch.zeros(self.total_hash_size, + dtype=torch_type), + requires_grad=True) + random_initialize(self.hash_table) + + if half2_opt: + assert self.total_hash_size % 2 == 0 + self.parameter_fields = half2.field(shape=(self.total_hash_size // + 2, ), + needs_grad=True) + self.output_fields = half2.field(shape=(batch_size * 1024, 16), + needs_grad=True) + + self.torch2ti = torch2ti_vec + self.ti2torch = ti2torch_vec + self.ti2torch_grad = ti2torch_grad_vec + self.torch2ti_grad = torch2ti_grad_vec + + self._hash_encode_kernel = hash_encode_kernel_half2 + else: + self.parameter_fields = ti.field(data_type, + shape=(self.total_hash_size, ), + needs_grad=True) + self.output_fields = ti.field(dtype=data_type, + shape=(batch_size * 1024, 32), + needs_grad=True) + self.torch2ti = torch2ti + self.ti2torch = ti2torch + self.ti2torch_grad = ti2torch_grad + self.torch2ti_grad = torch2ti_grad + + self._hash_encode_kernel = hash_encode_kernel + + self.input_fields = ti.field(dtype=data_type, + shape=(batch_size * 1024, 3), + needs_grad=True) + self.output_dim = 32 # the output dim: num levels (16) x level num (2) + self.register_buffer( + 'hash_grad', torch.zeros(self.total_hash_size, dtype=torch_type)) + self.register_buffer( + 'output_embedding', + torch.zeros(batch_size * 1024, 32, dtype=torch_type)) + + class _module_function(torch.autograd.Function): + + @staticmethod + @custom_fwd(cast_inputs=torch_type) + def forward(ctx, input_pos, params): + output_embedding = self.output_embedding[:input_pos. + shape[0]].contiguous( + ) + torch2ti(self.input_fields, input_pos.contiguous()) + self.torch2ti(self.parameter_fields, params.contiguous()) + + self._hash_encode_kernel( + self.input_fields, + self.parameter_fields, + self.output_fields, + self.hash_map_indicator, + self.hash_map_sizes_field, + self.offsets, + input_pos.shape[0], + self.per_level_scale, + ) + self.ti2torch(self.output_fields, output_embedding) + + return output_embedding + + @staticmethod + @custom_bwd + def backward(ctx, doutput): + + self.zero_grad() + + self.torch2ti_grad(self.output_fields, doutput.contiguous()) + self._hash_encode_kernel.grad( + self.input_fields, + self.parameter_fields, + self.output_fields, + self.hash_map_indicator, + self.hash_map_sizes_field, + self.offsets, + doutput.shape[0], + self.per_level_scale, + ) + self.ti2torch_grad(self.parameter_fields, + self.hash_grad.contiguous()) + return None, self.hash_grad + + self._module_function = _module_function + + def zero_grad(self): + self.parameter_fields.grad.fill(0.) + + def forward(self, positions, bound=1): + positions = (positions + bound) / (2 * bound) + return self._module_function.apply(positions, self.hash_table) diff --git a/stable-dreamfusion-3DPortrait/taichi_modules/intersection.py b/stable-dreamfusion-3DPortrait/taichi_modules/intersection.py new file mode 100644 index 0000000..3879934 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/taichi_modules/intersection.py @@ -0,0 +1,68 @@ +import taichi as ti +import torch +from taichi.math import vec3 +from torch.cuda.amp import custom_fwd + +from .utils import NEAR_DISTANCE + + +@ti.kernel +def simple_ray_aabb_intersec_taichi_forward( + hits_t: ti.types.ndarray(ndim=2), + rays_o: ti.types.ndarray(ndim=2), + rays_d: ti.types.ndarray(ndim=2), + centers: ti.types.ndarray(ndim=2), + half_sizes: ti.types.ndarray(ndim=2)): + + for r in ti.ndrange(hits_t.shape[0]): + ray_o = vec3([rays_o[r, 0], rays_o[r, 1], rays_o[r, 2]]) + ray_d = vec3([rays_d[r, 0], rays_d[r, 1], rays_d[r, 2]]) + inv_d = 1.0 / ray_d + + center = vec3([centers[0, 0], centers[0, 1], centers[0, 2]]) + half_size = vec3( + [half_sizes[0, 0], half_sizes[0, 1], half_sizes[0, 1]]) + + t_min = (center - half_size - ray_o) * inv_d + t_max = (center + half_size - ray_o) * inv_d + + _t1 = ti.min(t_min, t_max) + _t2 = ti.max(t_min, t_max) + t1 = _t1.max() + t2 = _t2.min() + + if t2 > 0.0: + hits_t[r, 0, 0] = ti.max(t1, NEAR_DISTANCE) + hits_t[r, 0, 1] = t2 + + +class RayAABBIntersector(torch.autograd.Function): + """ + Computes the intersections of rays and axis-aligned voxels. + + Inputs: + rays_o: (N_rays, 3) ray origins + rays_d: (N_rays, 3) ray directions + centers: (N_voxels, 3) voxel centers + half_sizes: (N_voxels, 3) voxel half sizes + max_hits: maximum number of intersected voxels to keep for one ray + (for a cubic scene, this is at most 3*N_voxels^(1/3)-2) + + Outputs: + hits_cnt: (N_rays) number of hits for each ray + (followings are from near to far) + hits_t: (N_rays, max_hits, 2) hit t's (-1 if no hit) + hits_voxel_idx: (N_rays, max_hits) hit voxel indices (-1 if no hit) + """ + + @staticmethod + @custom_fwd(cast_inputs=torch.float32) + def forward(ctx, rays_o, rays_d, center, half_size, max_hits): + hits_t = (torch.zeros( + rays_o.size(0), 1, 2, device=rays_o.device, dtype=torch.float32) - + 1).contiguous() + + simple_ray_aabb_intersec_taichi_forward(hits_t, rays_o, rays_d, center, + half_size) + + return None, hits_t, None diff --git a/stable-dreamfusion-3DPortrait/taichi_modules/ray_march.py b/stable-dreamfusion-3DPortrait/taichi_modules/ray_march.py new file mode 100644 index 0000000..d159d03 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/taichi_modules/ray_march.py @@ -0,0 +1,340 @@ +import taichi as ti +import torch +from taichi.math import vec3 +from torch.cuda.amp import custom_fwd + +from .utils import __morton3D, calc_dt, mip_from_dt, mip_from_pos + + +@ti.kernel +def raymarching_train(rays_o: ti.types.ndarray(ndim=2), + rays_d: ti.types.ndarray(ndim=2), + hits_t: ti.types.ndarray(ndim=2), + density_bitfield: ti.types.ndarray(ndim=1), + noise: ti.types.ndarray(ndim=1), + counter: ti.types.ndarray(ndim=1), + rays_a: ti.types.ndarray(ndim=2), + xyzs: ti.types.ndarray(ndim=2), + dirs: ti.types.ndarray(ndim=2), + deltas: ti.types.ndarray(ndim=1), + ts: ti.types.ndarray(ndim=1), cascades: int, + grid_size: int, scale: float, exp_step_factor: float, + max_samples: float): + + # ti.loop_config(block_dim=256) + for r in noise: + ray_o = vec3(rays_o[r, 0], rays_o[r, 1], rays_o[r, 2]) + ray_d = vec3(rays_d[r, 0], rays_d[r, 1], rays_d[r, 2]) + d_inv = 1.0 / ray_d + + t1, t2 = hits_t[r, 0], hits_t[r, 1] + + grid_size3 = grid_size**3 + grid_size_inv = 1.0 / grid_size + + if t1 >= 0: + dt = calc_dt(t1, exp_step_factor, grid_size, scale) + t1 += dt * noise[r] + + t = t1 + N_samples = 0 + + while (0 <= t) & (t < t2) & (N_samples < max_samples): + xyz = ray_o + t * ray_d + dt = calc_dt(t, exp_step_factor, grid_size, scale) + mip = ti.max(mip_from_pos(xyz, cascades), + mip_from_dt(dt, grid_size, cascades)) + + # mip_bound = 0.5 + # mip_bound = ti.min(ti.pow(2., mip - 1), scale) + mip_bound = scale + mip_bound_inv = 1 / mip_bound + + nxyz = ti.math.clamp(0.5 * (xyz * mip_bound_inv + 1) * grid_size, + 0.0, grid_size - 1.0) + # nxyz = ti.ceil(nxyz) + + idx = mip * grid_size3 + __morton3D(ti.cast(nxyz, ti.u32)) + occ = density_bitfield[ti.u32(idx // 8)] & (1 << ti.u32(idx % 8)) + # idx = __morton3D(ti.cast(nxyz, ti.uint32)) + # occ = density_bitfield[mip, idx//8] & (1 << ti.cast(idx%8, ti.uint32)) + + if occ: + t += dt + N_samples += 1 + else: + # t += dt + txyz = (((nxyz + 0.5 + 0.5 * ti.math.sign(ray_d)) * + grid_size_inv * 2 - 1) * mip_bound - xyz) * d_inv + + t_target = t + ti.max(0, txyz.min()) + t += calc_dt(t, exp_step_factor, grid_size, scale) + while t < t_target: + t += calc_dt(t, exp_step_factor, grid_size, scale) + + start_idx = ti.atomic_add(counter[0], N_samples) + ray_count = ti.atomic_add(counter[1], 1) + + rays_a[ray_count, 0] = r + rays_a[ray_count, 1] = start_idx + rays_a[ray_count, 2] = N_samples + + t = t1 + samples = 0 + + while (t < t2) & (samples < N_samples): + xyz = ray_o + t * ray_d + dt = calc_dt(t, exp_step_factor, grid_size, scale) + mip = ti.max(mip_from_pos(xyz, cascades), + mip_from_dt(dt, grid_size, cascades)) + + # mip_bound = 0.5 + # mip_bound = ti.min(ti.pow(2., mip - 1), scale) + mip_bound = scale + mip_bound_inv = 1 / mip_bound + + nxyz = ti.math.clamp(0.5 * (xyz * mip_bound_inv + 1) * grid_size, + 0.0, grid_size - 1.0) + # nxyz = ti.ceil(nxyz) + + idx = mip * grid_size3 + __morton3D(ti.cast(nxyz, ti.u32)) + occ = density_bitfield[ti.u32(idx // 8)] & (1 << ti.u32(idx % 8)) + # idx = __morton3D(ti.cast(nxyz, ti.uint32)) + # occ = density_bitfield[mip, idx//8] & (1 << ti.cast(idx%8, ti.uint32)) + + if occ: + s = start_idx + samples + xyzs[s, 0] = xyz[0] + xyzs[s, 1] = xyz[1] + xyzs[s, 2] = xyz[2] + dirs[s, 0] = ray_d[0] + dirs[s, 1] = ray_d[1] + dirs[s, 2] = ray_d[2] + ts[s] = t + deltas[s] = dt + t += dt + samples += 1 + else: + # t += dt + txyz = (((nxyz + 0.5 + 0.5 * ti.math.sign(ray_d)) * + grid_size_inv * 2 - 1) * mip_bound - xyz) * d_inv + + t_target = t + ti.max(0, txyz.min()) + t += calc_dt(t, exp_step_factor, grid_size, scale) + while t < t_target: + t += calc_dt(t, exp_step_factor, grid_size, scale) + + +@ti.kernel +def raymarching_train_backword(segments: ti.types.ndarray(ndim=2), + ts: ti.types.ndarray(ndim=1), + dL_drays_o: ti.types.ndarray(ndim=2), + dL_drays_d: ti.types.ndarray(ndim=2), + dL_dxyzs: ti.types.ndarray(ndim=2), + dL_ddirs: ti.types.ndarray(ndim=2)): + + for s in segments: + index = segments[s] + dxyz = dL_dxyzs[index] + ddir = dL_ddirs[index] + + dL_drays_o[s] = dxyz + dL_drays_d[s] = dxyz * ts[index] + ddir + + +class RayMarcherTaichi(torch.nn.Module): + + def __init__(self, batch_size=8192): + super(RayMarcherTaichi, self).__init__() + + self.register_buffer('rays_a', + torch.zeros(batch_size, 3, dtype=torch.int32)) + self.register_buffer( + 'xyzs', torch.zeros(batch_size * 1024, 3, dtype=torch.float32)) + self.register_buffer( + 'dirs', torch.zeros(batch_size * 1024, 3, dtype=torch.float32)) + self.register_buffer( + 'deltas', torch.zeros(batch_size * 1024, dtype=torch.float32)) + self.register_buffer( + 'ts', torch.zeros(batch_size * 1024, dtype=torch.float32)) + + # self.register_buffer('dL_drays_o', torch.zeros(batch_size, dtype=torch.float32)) + # self.register_buffer('dL_drays_d', torch.zeros(batch_size, dtype=torch.float32)) + + class _module_function(torch.autograd.Function): + + @staticmethod + @custom_fwd(cast_inputs=torch.float32) + def forward(ctx, rays_o, rays_d, hits_t, density_bitfield, + cascades, scale, exp_step_factor, grid_size, + max_samples): + # noise to perturb the first sample of each ray + noise = torch.rand_like(rays_o[:, 0]) + counter = torch.zeros(2, + device=rays_o.device, + dtype=torch.int32) + + raymarching_train(\ + rays_o, rays_d, + hits_t.contiguous(), + density_bitfield, noise, counter, + self.rays_a.contiguous(), + self.xyzs.contiguous(), + self.dirs.contiguous(), + self.deltas.contiguous(), + self.ts.contiguous(), + cascades, grid_size, scale, + exp_step_factor, max_samples) + + # ti.sync() + + total_samples = counter[0] # total samples for all rays + # remove redundant output + xyzs = self.xyzs[:total_samples] + dirs = self.dirs[:total_samples] + deltas = self.deltas[:total_samples] + ts = self.ts[:total_samples] + + return self.rays_a, xyzs, dirs, deltas, ts, total_samples + + # @staticmethod + # @custom_bwd + # def backward(ctx, dL_drays_a, dL_dxyzs, dL_ddirs, dL_ddeltas, dL_dts, + # dL_dtotal_samples): + # rays_a, ts = ctx.saved_tensors + # # rays_a = rays_a.contiguous() + # ts = ts.contiguous() + # segments = torch.cat([rays_a[:, 1], rays_a[-1:, 1] + rays_a[-1:, 2]]) + # dL_drays_o = torch.zeros_like(rays_a[:, 0]) + # dL_drays_d = torch.zeros_like(rays_a[:, 0]) + # raymarching_train_backword(segments.contiguous(), ts, dL_drays_o, + # dL_drays_d, dL_dxyzs, dL_ddirs) + # # ti.sync() + # # dL_drays_o = segment_csr(dL_dxyzs, segments) + # # dL_drays_d = \ + # # segment_csr(dL_dxyzs*rearrange(ts, 'n -> n 1')+dL_ddirs, segments) + + # return dL_drays_o, dL_drays_d, None, None, None, None, None, None, None + + self._module_function = _module_function + + def forward(self, rays_o, rays_d, hits_t, density_bitfield, cascades, + scale, exp_step_factor, grid_size, max_samples): + return self._module_function.apply(rays_o, rays_d, hits_t, + density_bitfield, cascades, scale, + exp_step_factor, grid_size, + max_samples) + + +@ti.kernel +def raymarching_test_kernel( + rays_o: ti.types.ndarray(ndim=2), + rays_d: ti.types.ndarray(ndim=2), + hits_t: ti.types.ndarray(ndim=2), + alive_indices: ti.types.ndarray(ndim=1), + density_bitfield: ti.types.ndarray(ndim=1), + cascades: int, + grid_size: int, + scale: float, + exp_step_factor: float, + N_samples: int, + max_samples: int, + xyzs: ti.types.ndarray(ndim=2), + dirs: ti.types.ndarray(ndim=2), + deltas: ti.types.ndarray(ndim=1), + ts: ti.types.ndarray(ndim=1), + N_eff_samples: ti.types.ndarray(ndim=1), +): + + for n in alive_indices: + r = alive_indices[n] + grid_size3 = grid_size**3 + grid_size_inv = 1.0 / grid_size + + ray_o = vec3(rays_o[r, 0], rays_o[r, 1], rays_o[r, 2]) + ray_d = vec3(rays_d[r, 0], rays_d[r, 1], rays_d[r, 2]) + d_inv = 1.0 / ray_d + + t = hits_t[r, 0] + t2 = hits_t[r, 1] + + s = 0 + + while (0 <= t) & (t < t2) & (s < N_samples): + xyz = ray_o + t * ray_d + dt = calc_dt(t, exp_step_factor, grid_size, scale) + mip = ti.max(mip_from_pos(xyz, cascades), + mip_from_dt(dt, grid_size, cascades)) + + # mip_bound = 0.5 + # mip_bound = ti.min(ti.pow(2., mip - 1), scale) + mip_bound = scale + mip_bound_inv = 1 / mip_bound + + nxyz = ti.math.clamp(0.5 * (xyz * mip_bound_inv + 1) * grid_size, + 0.0, grid_size - 1.0) + # nxyz = ti.ceil(nxyz) + + idx = mip * grid_size3 + __morton3D(ti.cast(nxyz, ti.u32)) + occ = density_bitfield[ti.u32(idx // 8)] & (1 << ti.u32(idx % 8)) + + if occ: + xyzs[n, s, 0] = xyz[0] + xyzs[n, s, 1] = xyz[1] + xyzs[n, s, 2] = xyz[2] + dirs[n, s, 0] = ray_d[0] + dirs[n, s, 1] = ray_d[1] + dirs[n, s, 2] = ray_d[2] + ts[n, s] = t + deltas[n, s] = dt + t += dt + hits_t[r, 0] = t + s += 1 + + else: + txyz = (((nxyz + 0.5 + 0.5 * ti.math.sign(ray_d)) * + grid_size_inv * 2 - 1) * mip_bound - xyz) * d_inv + + t_target = t + ti.max(0, txyz.min()) + t += calc_dt(t, exp_step_factor, grid_size, scale) + while t < t_target: + t += calc_dt(t, exp_step_factor, grid_size, scale) + + N_eff_samples[n] = s + + +def raymarching_test(rays_o, rays_d, hits_t, alive_indices, density_bitfield, + cascades, scale, exp_step_factor, grid_size, max_samples, + N_samples): + + N_rays = alive_indices.size(0) + xyzs = torch.zeros(N_rays, + N_samples, + 3, + device=rays_o.device, + dtype=rays_o.dtype) + dirs = torch.zeros(N_rays, + N_samples, + 3, + device=rays_o.device, + dtype=rays_o.dtype) + deltas = torch.zeros(N_rays, + N_samples, + device=rays_o.device, + dtype=rays_o.dtype) + ts = torch.zeros(N_rays, + N_samples, + device=rays_o.device, + dtype=rays_o.dtype) + N_eff_samples = torch.zeros(N_rays, + device=rays_o.device, + dtype=torch.int32) + + raymarching_test_kernel(rays_o, rays_d, hits_t, alive_indices, + density_bitfield, cascades, grid_size, scale, + exp_step_factor, N_samples, max_samples, xyzs, + dirs, deltas, ts, N_eff_samples) + + # ti.sync() + + return xyzs, dirs, deltas, ts, N_eff_samples diff --git a/stable-dreamfusion-3DPortrait/taichi_modules/utils.py b/stable-dreamfusion-3DPortrait/taichi_modules/utils.py new file mode 100644 index 0000000..02c2f2a --- /dev/null +++ b/stable-dreamfusion-3DPortrait/taichi_modules/utils.py @@ -0,0 +1,224 @@ +import taichi as ti +import torch +from taichi.math import uvec3 + +taichi_block_size = 128 + +data_type = ti.f32 +torch_type = torch.float32 + +MAX_SAMPLES = 1024 +NEAR_DISTANCE = 0.01 +SQRT3 = 1.7320508075688772 +SQRT3_MAX_SAMPLES = SQRT3 / 1024 +SQRT3_2 = 1.7320508075688772 * 2 + + +@ti.func +def scalbn(x, exponent): + return x * ti.math.pow(2, exponent) + + +@ti.func +def calc_dt(t, exp_step_factor, grid_size, scale): + return ti.math.clamp(t * exp_step_factor, SQRT3_MAX_SAMPLES, + SQRT3_2 * scale / grid_size) + + +@ti.func +def frexp_bit(x): + exponent = 0 + if x != 0.0: + # frac = ti.abs(x) + bits = ti.bit_cast(x, ti.u32) + exponent = ti.i32((bits & ti.u32(0x7f800000)) >> 23) - 127 + # exponent = (ti.i32(bits & ti.u32(0x7f800000)) >> 23) - 127 + bits &= ti.u32(0x7fffff) + bits |= ti.u32(0x3f800000) + frac = ti.bit_cast(bits, ti.f32) + if frac < 0.5: + exponent -= 1 + elif frac > 1.0: + exponent += 1 + return exponent + + +@ti.func +def mip_from_pos(xyz, cascades): + mx = ti.abs(xyz).max() + # _, exponent = _frexp(mx) + exponent = frexp_bit(ti.f32(mx)) + 1 + # frac, exponent = ti.frexp(ti.f32(mx)) + return ti.min(cascades - 1, ti.max(0, exponent)) + + +@ti.func +def mip_from_dt(dt, grid_size, cascades): + # _, exponent = _frexp(dt*grid_size) + exponent = frexp_bit(ti.f32(dt * grid_size)) + # frac, exponent = ti.frexp(ti.f32(dt*grid_size)) + return ti.min(cascades - 1, ti.max(0, exponent)) + + +@ti.func +def __expand_bits(v): + v = (v * ti.uint32(0x00010001)) & ti.uint32(0xFF0000FF) + v = (v * ti.uint32(0x00000101)) & ti.uint32(0x0F00F00F) + v = (v * ti.uint32(0x00000011)) & ti.uint32(0xC30C30C3) + v = (v * ti.uint32(0x00000005)) & ti.uint32(0x49249249) + return v + + +@ti.func +def __morton3D(xyz): + xyz = __expand_bits(xyz) + return xyz[0] | (xyz[1] << 1) | (xyz[2] << 2) + + +@ti.func +def __morton3D_invert(x): + x = x & (0x49249249) + x = (x | (x >> 2)) & ti.uint32(0xc30c30c3) + x = (x | (x >> 4)) & ti.uint32(0x0f00f00f) + x = (x | (x >> 8)) & ti.uint32(0xff0000ff) + x = (x | (x >> 16)) & ti.uint32(0x0000ffff) + return ti.int32(x) + + +@ti.kernel +def morton3D_invert_kernel(indices: ti.types.ndarray(ndim=1), + coords: ti.types.ndarray(ndim=2)): + for i in indices: + ind = ti.uint32(indices[i]) + coords[i, 0] = __morton3D_invert(ind >> 0) + coords[i, 1] = __morton3D_invert(ind >> 1) + coords[i, 2] = __morton3D_invert(ind >> 2) + + +def morton3D_invert(indices): + coords = torch.zeros(indices.size(0), + 3, + device=indices.device, + dtype=torch.int32) + morton3D_invert_kernel(indices.contiguous(), coords) + ti.sync() + return coords + + +@ti.kernel +def morton3D_kernel(xyzs: ti.types.ndarray(ndim=2), + indices: ti.types.ndarray(ndim=1)): + for s in indices: + xyz = uvec3([xyzs[s, 0], xyzs[s, 1], xyzs[s, 2]]) + indices[s] = ti.cast(__morton3D(xyz), ti.int32) + + +def morton3D(coords1): + indices = torch.zeros(coords1.size(0), + device=coords1.device, + dtype=torch.int32) + morton3D_kernel(coords1.contiguous(), indices) + ti.sync() + return indices + + +@ti.kernel +def packbits(density_grid: ti.types.ndarray(ndim=1), + density_threshold: float, + density_bitfield: ti.types.ndarray(ndim=1)): + + for n in density_bitfield: + bits = ti.uint8(0) + + for i in ti.static(range(8)): + bits |= (ti.uint8(1) << i) if ( + density_grid[8 * n + i] > density_threshold) else ti.uint8(0) + + density_bitfield[n] = bits + + +@ti.kernel +def torch2ti(field: ti.template(), data: ti.types.ndarray()): + for I in ti.grouped(data): + field[I] = data[I] + + +@ti.kernel +def ti2torch(field: ti.template(), data: ti.types.ndarray()): + for I in ti.grouped(data): + data[I] = field[I] + + +@ti.kernel +def ti2torch_grad(field: ti.template(), grad: ti.types.ndarray()): + for I in ti.grouped(grad): + grad[I] = field.grad[I] + + +@ti.kernel +def torch2ti_grad(field: ti.template(), grad: ti.types.ndarray()): + for I in ti.grouped(grad): + field.grad[I] = grad[I] + + +@ti.kernel +def torch2ti_vec(field: ti.template(), data: ti.types.ndarray()): + for I in range(data.shape[0] // 2): + field[I] = ti.Vector([data[I * 2], data[I * 2 + 1]]) + + +@ti.kernel +def ti2torch_vec(field: ti.template(), data: ti.types.ndarray()): + for i, j in ti.ndrange(data.shape[0], data.shape[1] // 2): + data[i, j * 2] = field[i, j][0] + data[i, j * 2 + 1] = field[i, j][1] + + +@ti.kernel +def ti2torch_grad_vec(field: ti.template(), grad: ti.types.ndarray()): + for I in range(grad.shape[0] // 2): + grad[I * 2] = field.grad[I][0] + grad[I * 2 + 1] = field.grad[I][1] + + +@ti.kernel +def torch2ti_grad_vec(field: ti.template(), grad: ti.types.ndarray()): + for i, j in ti.ndrange(grad.shape[0], grad.shape[1] // 2): + field.grad[i, j][0] = grad[i, j * 2] + field.grad[i, j][1] = grad[i, j * 2 + 1] + + +def extract_model_state_dict(ckpt_path, + model_name='model', + prefixes_to_ignore=[]): + checkpoint = torch.load(ckpt_path, map_location='cpu') + checkpoint_ = {} + if 'state_dict' in checkpoint: # if it's a pytorch-lightning checkpoint + checkpoint = checkpoint['state_dict'] + for k, v in checkpoint.items(): + if not k.startswith(model_name): + continue + k = k[len(model_name) + 1:] + for prefix in prefixes_to_ignore: + if k.startswith(prefix): + break + else: + checkpoint_[k] = v + return checkpoint_ + + +def load_ckpt(model, ckpt_path, model_name='model', prefixes_to_ignore=[]): + if not ckpt_path: + return + model_dict = model.state_dict() + checkpoint_ = extract_model_state_dict(ckpt_path, model_name, + prefixes_to_ignore) + model_dict.update(checkpoint_) + model.load_state_dict(model_dict) + +def depth2img(depth): + depth = (depth - depth.min()) / (depth.max() - depth.min()) + depth_img = cv2.applyColorMap((depth * 255).astype(np.uint8), + cv2.COLORMAP_TURBO) + + return depth_img \ No newline at end of file diff --git a/stable-dreamfusion-3DPortrait/taichi_modules/volume_render_test.py b/stable-dreamfusion-3DPortrait/taichi_modules/volume_render_test.py new file mode 100644 index 0000000..1bdefb7 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/taichi_modules/volume_render_test.py @@ -0,0 +1,48 @@ +import taichi as ti + + +@ti.kernel +def composite_test( + sigmas: ti.types.ndarray(ndim=2), rgbs: ti.types.ndarray(ndim=3), + deltas: ti.types.ndarray(ndim=2), ts: ti.types.ndarray(ndim=2), + hits_t: ti.types.ndarray(ndim=2), + alive_indices: ti.types.ndarray(ndim=1), T_threshold: float, + N_eff_samples: ti.types.ndarray(ndim=1), + opacity: ti.types.ndarray(ndim=1), + depth: ti.types.ndarray(ndim=1), rgb: ti.types.ndarray(ndim=2)): + + for n in alive_indices: + samples = N_eff_samples[n] + if samples == 0: + alive_indices[n] = -1 + else: + r = alive_indices[n] + + T = 1 - opacity[r] + + rgb_temp_0 = 0.0 + rgb_temp_1 = 0.0 + rgb_temp_2 = 0.0 + depth_temp = 0.0 + opacity_temp = 0.0 + + for s in range(samples): + a = 1.0 - ti.exp(-sigmas[n, s] * deltas[n, s]) + w = a * T + + rgb_temp_0 += w * rgbs[n, s, 0] + rgb_temp_1 += w * rgbs[n, s, 1] + rgb_temp_2 += w * rgbs[n, s, 2] + depth[r] += w * ts[n, s] + opacity[r] += w + T *= 1.0 - a + + if T <= T_threshold: + alive_indices[n] = -1 + break + + rgb[r, 0] += rgb_temp_0 + rgb[r, 1] += rgb_temp_1 + rgb[r, 2] += rgb_temp_2 + depth[r] += depth_temp + opacity[r] += opacity_temp diff --git a/stable-dreamfusion-3DPortrait/taichi_modules/volume_train.py b/stable-dreamfusion-3DPortrait/taichi_modules/volume_train.py new file mode 100644 index 0000000..7a52bfe --- /dev/null +++ b/stable-dreamfusion-3DPortrait/taichi_modules/volume_train.py @@ -0,0 +1,239 @@ +import taichi as ti +import torch +from torch.cuda.amp import custom_bwd, custom_fwd + +from .utils import (data_type, ti2torch, ti2torch_grad, torch2ti, + torch2ti_grad, torch_type) + + +@ti.kernel +def composite_train_fw_array( + sigmas: ti.types.ndarray(), + rgbs: ti.types.ndarray(), + deltas: ti.types.ndarray(), + ts: ti.types.ndarray(), + rays_a: ti.types.ndarray(), + T_threshold: float, + total_samples: ti.types.ndarray(), + opacity: ti.types.ndarray(), + depth: ti.types.ndarray(), + rgb: ti.types.ndarray(), + ws: ti.types.ndarray(), +): + + for n in opacity: + ray_idx = rays_a[n, 0] + start_idx = rays_a[n, 1] + N_samples = rays_a[n, 2] + + T = 1.0 + samples = 0 + while samples < N_samples: + s = start_idx + samples + a = 1.0 - ti.exp(-sigmas[s] * deltas[s]) + w = a * T + + rgb[ray_idx, 0] += w * rgbs[s, 0] + rgb[ray_idx, 1] += w * rgbs[s, 1] + rgb[ray_idx, 2] += w * rgbs[s, 2] + depth[ray_idx] += w * ts[s] + opacity[ray_idx] += w + ws[s] = w + T *= 1.0 - a + + # if T T_threshold: + # s = start_idx + sample_ + a = 1.0 - ti.exp(-sigmas[s] * deltas[s]) + w = a * T_ + rgb[ray_idx, 0] += w * rgbs[s, 0] + rgb[ray_idx, 1] += w * rgbs[s, 1] + rgb[ray_idx, 2] += w * rgbs[s, 2] + depth[ray_idx] += w * ts[s] + opacity[ray_idx] += w + ws[s] = w + # T_ *= (1.0-a) + T[s + 1] = T_ * (1.0 - a) + # if T[s+1]>=T_threshold: + # samples += 1 + total_samples[ray_idx] += 1 + else: + T[s + 1] = 0.0 + + # total_samples[ray_idx] = N_samples + + +@ti.kernel +def check_value( + fields: ti.template(), + array: ti.types.ndarray(), + checker: ti.types.ndarray(), +): + for I in ti.grouped(array): + if fields[I] == array[I]: + checker[I] = 1 + + +class VolumeRendererTaichi(torch.nn.Module): + + def __init__(self, batch_size=8192, data_type=data_type): + super(VolumeRendererTaichi, self).__init__() + # samples level + self.sigmas_fields = ti.field(dtype=data_type, + shape=(batch_size * 1024, ), + needs_grad=True) + self.rgbs_fields = ti.field(dtype=data_type, + shape=(batch_size * 1024, 3), + needs_grad=True) + self.deltas_fields = ti.field(dtype=data_type, + shape=(batch_size * 1024, ), + needs_grad=True) + self.ts_fields = ti.field(dtype=data_type, + shape=(batch_size * 1024, ), + needs_grad=True) + self.ws_fields = ti.field(dtype=data_type, + shape=(batch_size * 1024, ), + needs_grad=True) + self.T = ti.field(dtype=data_type, + shape=(batch_size * 1024), + needs_grad=True) + + # rays level + self.rays_a_fields = ti.field(dtype=ti.i64, shape=(batch_size, 3)) + self.total_samples_fields = ti.field(dtype=ti.i64, + shape=(batch_size, )) + self.opacity_fields = ti.field(dtype=data_type, + shape=(batch_size, ), + needs_grad=True) + self.depth_fields = ti.field(dtype=data_type, + shape=(batch_size, ), + needs_grad=True) + self.rgb_fields = ti.field(dtype=data_type, + shape=(batch_size, 3), + needs_grad=True) + + # preallocate tensor + self.register_buffer('total_samples', + torch.zeros(batch_size, dtype=torch.int64)) + self.register_buffer('rgb', torch.zeros(batch_size, + 3, + dtype=torch_type)) + self.register_buffer('opacity', + torch.zeros(batch_size, dtype=torch_type)) + self.register_buffer('depth', torch.zeros(batch_size, + dtype=torch_type)) + self.register_buffer('ws', + torch.zeros(batch_size * 1024, dtype=torch_type)) + + self.register_buffer('sigma_grad', + torch.zeros(batch_size * 1024, dtype=torch_type)) + self.register_buffer( + 'rgb_grad', torch.zeros(batch_size * 1024, 3, dtype=torch_type)) + + class _module_function(torch.autograd.Function): + + @staticmethod + @custom_fwd(cast_inputs=torch_type) + def forward(ctx, sigmas, rgbs, deltas, ts, rays_a, T_threshold): + # If no output gradient is provided, no need to + # automatically materialize it as torch.zeros. + + ctx.T_threshold = T_threshold + ctx.samples_size = sigmas.shape[0] + + ws = self.ws[:sigmas.shape[0]] + + torch2ti(self.sigmas_fields, sigmas.contiguous()) + torch2ti(self.rgbs_fields, rgbs.contiguous()) + torch2ti(self.deltas_fields, deltas.contiguous()) + torch2ti(self.ts_fields, ts.contiguous()) + torch2ti(self.rays_a_fields, rays_a.contiguous()) + composite_train_fw(self.sigmas_fields, self.rgbs_fields, + self.deltas_fields, self.ts_fields, + self.rays_a_fields, T_threshold, self.T, + self.total_samples_fields, + self.opacity_fields, self.depth_fields, + self.rgb_fields, self.ws_fields) + ti2torch(self.total_samples_fields, self.total_samples) + ti2torch(self.opacity_fields, self.opacity) + ti2torch(self.depth_fields, self.depth) + ti2torch(self.rgb_fields, self.rgb) + + + return self.total_samples.sum( + ), self.opacity, self.depth, self.rgb, ws + + @staticmethod + @custom_bwd + def backward(ctx, dL_dtotal_samples, dL_dopacity, dL_ddepth, + dL_drgb, dL_dws): + + T_threshold = ctx.T_threshold + samples_size = ctx.samples_size + + sigma_grad = self.sigma_grad[:samples_size].contiguous() + rgb_grad = self.rgb_grad[:samples_size].contiguous() + + self.zero_grad() + + torch2ti_grad(self.opacity_fields, dL_dopacity.contiguous()) + torch2ti_grad(self.depth_fields, dL_ddepth.contiguous()) + torch2ti_grad(self.rgb_fields, dL_drgb.contiguous()) + torch2ti_grad(self.ws_fields, dL_dws.contiguous()) + composite_train_fw.grad(self.sigmas_fields, self.rgbs_fields, + self.deltas_fields, self.ts_fields, + self.rays_a_fields, T_threshold, + self.T, self.total_samples_fields, + self.opacity_fields, self.depth_fields, + self.rgb_fields, self.ws_fields) + ti2torch_grad(self.sigmas_fields, sigma_grad) + ti2torch_grad(self.rgbs_fields, rgb_grad) + + return sigma_grad, rgb_grad, None, None, None, None + + self._module_function = _module_function + + def zero_grad(self): + self.sigmas_fields.grad.fill(0.) + self.rgbs_fields.grad.fill(0.) + self.T.grad.fill(0.) + + + def forward(self, sigmas, rgbs, deltas, ts, rays_a, T_threshold): + return self._module_function.apply(sigmas, rgbs, deltas, ts, rays_a, + T_threshold) diff --git a/stable-dreamfusion-3DPortrait/tets/128_tets.npz b/stable-dreamfusion-3DPortrait/tets/128_tets.npz new file mode 100644 index 0000000..156b7fd Binary files /dev/null and b/stable-dreamfusion-3DPortrait/tets/128_tets.npz differ diff --git a/stable-dreamfusion-3DPortrait/tets/32_tets.npz b/stable-dreamfusion-3DPortrait/tets/32_tets.npz new file mode 100644 index 0000000..cffeeba Binary files /dev/null and b/stable-dreamfusion-3DPortrait/tets/32_tets.npz differ diff --git a/stable-dreamfusion-3DPortrait/tets/64_tets.npz b/stable-dreamfusion-3DPortrait/tets/64_tets.npz new file mode 100644 index 0000000..8515e35 Binary files /dev/null and b/stable-dreamfusion-3DPortrait/tets/64_tets.npz differ diff --git a/stable-dreamfusion-3DPortrait/tets/README.md b/stable-dreamfusion-3DPortrait/tets/README.md new file mode 100644 index 0000000..23719a7 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/tets/README.md @@ -0,0 +1,6 @@ +Place the tet grid files in this folder. +We provide a few example grids. See the main README.md for a download link. + +You can also generate your own grids using https://github.com/crawforddoran/quartet +Please see the `generate_tets.py` script for an example. + diff --git a/stable-dreamfusion-3DPortrait/tets/generate_tets.py b/stable-dreamfusion-3DPortrait/tets/generate_tets.py new file mode 100644 index 0000000..94c5241 --- /dev/null +++ b/stable-dreamfusion-3DPortrait/tets/generate_tets.py @@ -0,0 +1,58 @@ +# Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import os +import numpy as np + + +''' +This code segment shows how to use Quartet: https://github.com/crawforddoran/quartet, +to generate a tet grid +1) Download, compile and run Quartet as described in the link above. Example usage `quartet meshes/cube.obj 0.5 cube_5.tet` +2) Run the function below to generate a file `cube_32_tet.tet` +''' + +def generate_tetrahedron_grid_file(res=32, root='..'): + frac = 1.0 / res + command = 'cd %s/quartet; ' % (root) + \ + './quartet_release meshes/cube.obj %f meshes/cube_%f_tet.tet -s meshes/cube_boundary_%f.obj' % (frac, res, res) + os.system(command) + + +''' +This code segment shows how to convert from a quartet .tet file to compressed npz file +''' +def convert_from_quartet_to_npz(quartetfile = 'cube_32_tet.tet', npzfile = '32_tets.npz'): + + file1 = open(quartetfile, 'r') + header = file1.readline() + numvertices = int(header.split(" ")[1]) + numtets = int(header.split(" ")[2]) + print(numvertices, numtets) + + # load vertices + vertices = np.loadtxt(quartetfile, skiprows=1, max_rows=numvertices) + vertices = vertices - 0.5 + print(vertices.shape, vertices.min(), vertices.max()) + + # load indices + indices = np.loadtxt(quartetfile, dtype=int, skiprows=1+numvertices, max_rows=numtets) + print(indices.shape) + + np.savez_compressed(npzfile, vertices=vertices, indices=indices) + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--res', type=int, default=32) + parser.add_argument('--root', type=str, default='..') + args = parser.parse_args() + + generate_tetrahedron_grid_file(res=args.res, root=args.root) + convert_from_quartet_to_npz(quartetfile=os.path.join(args.root, 'quartet', 'meshes', f'cube_{args.res}.000000_tet.tet'), npzfile=os.path.join('./tets', f'{args.res}_tets.npz')) \ No newline at end of file diff --git a/test_data/0/prompt.txt b/test_data/0/prompt.txt new file mode 100644 index 0000000..30c1ea2 --- /dev/null +++ b/test_data/0/prompt.txt @@ -0,0 +1 @@ +upper body photo, medieval, portrait photo of 25 y.o princess in blue dress, face, pale skin, intricate details, RAW, analog, sharp focus, 8k, HD, high quality, masterpiece \ No newline at end of file